changeset 3859:19804e7fd496 onnv_61

PSARC 2006/665 Neptune 10 Gbit Ethernet Driver 6502748 Deliver N2 nxge driver for Neptune on sun4v, sun4u and x64
author ml29623
date Mon, 19 Mar 2007 19:37:22 -0700
parents c1c1d0ea79ef
children 52d51463c927
files usr/src/pkgdefs/Makefile usr/src/pkgdefs/SUNWnxge.i/Makefile usr/src/pkgdefs/SUNWnxge.i/pkginfo.tmpl usr/src/pkgdefs/SUNWnxge.i/postinstall usr/src/pkgdefs/SUNWnxge.i/postremove usr/src/pkgdefs/SUNWnxge.i/prototype_com usr/src/pkgdefs/SUNWnxge.i/prototype_i386 usr/src/pkgdefs/SUNWnxge.u/Makefile usr/src/pkgdefs/SUNWnxge.u/pkginfo.tmpl usr/src/pkgdefs/SUNWnxge.u/postinstall usr/src/pkgdefs/SUNWnxge.u/postremove usr/src/pkgdefs/SUNWnxge.u/prototype_com usr/src/pkgdefs/SUNWnxge.u/prototype_sparc usr/src/pkgdefs/SUNWnxge.v/Makefile usr/src/pkgdefs/SUNWnxge.v/pkginfo.tmpl usr/src/pkgdefs/SUNWnxge.v/postinstall usr/src/pkgdefs/SUNWnxge.v/prototype_com usr/src/pkgdefs/SUNWnxge.v/prototype_sparc usr/src/uts/common/Makefile.files usr/src/uts/common/Makefile.rules usr/src/uts/common/io/nxge/npi/npi.c usr/src/uts/common/io/nxge/npi/npi.h usr/src/uts/common/io/nxge/npi/npi_espc.c usr/src/uts/common/io/nxge/npi/npi_espc.h usr/src/uts/common/io/nxge/npi/npi_fflp.c usr/src/uts/common/io/nxge/npi/npi_fflp.h usr/src/uts/common/io/nxge/npi/npi_ipp.c usr/src/uts/common/io/nxge/npi/npi_ipp.h usr/src/uts/common/io/nxge/npi/npi_mac.c usr/src/uts/common/io/nxge/npi/npi_mac.h usr/src/uts/common/io/nxge/npi/npi_rxdma.c usr/src/uts/common/io/nxge/npi/npi_rxdma.h usr/src/uts/common/io/nxge/npi/npi_txc.c usr/src/uts/common/io/nxge/npi/npi_txc.h usr/src/uts/common/io/nxge/npi/npi_txdma.c usr/src/uts/common/io/nxge/npi/npi_txdma.h usr/src/uts/common/io/nxge/npi/npi_vir.c usr/src/uts/common/io/nxge/npi/npi_vir.h usr/src/uts/common/io/nxge/npi/npi_zcp.c usr/src/uts/common/io/nxge/npi/npi_zcp.h usr/src/uts/common/io/nxge/nxge.conf usr/src/uts/common/io/nxge/nxge_classify.c usr/src/uts/common/io/nxge/nxge_espc.c usr/src/uts/common/io/nxge/nxge_fflp.c usr/src/uts/common/io/nxge/nxge_fflp_hash.c usr/src/uts/common/io/nxge/nxge_fm.c usr/src/uts/common/io/nxge/nxge_fzc.c usr/src/uts/common/io/nxge/nxge_hcall.s usr/src/uts/common/io/nxge/nxge_hw.c usr/src/uts/common/io/nxge/nxge_ipp.c usr/src/uts/common/io/nxge/nxge_kstats.c usr/src/uts/common/io/nxge/nxge_mac.c usr/src/uts/common/io/nxge/nxge_main.c usr/src/uts/common/io/nxge/nxge_ndd.c usr/src/uts/common/io/nxge/nxge_rxdma.c usr/src/uts/common/io/nxge/nxge_send.c usr/src/uts/common/io/nxge/nxge_txc.c usr/src/uts/common/io/nxge/nxge_txdma.c usr/src/uts/common/io/nxge/nxge_virtual.c usr/src/uts/common/io/nxge/nxge_zcp.c usr/src/uts/common/sys/Makefile usr/src/uts/common/sys/nxge/nxge.h usr/src/uts/common/sys/nxge/nxge_common.h usr/src/uts/common/sys/nxge/nxge_common_impl.h usr/src/uts/common/sys/nxge/nxge_defs.h usr/src/uts/common/sys/nxge/nxge_espc.h usr/src/uts/common/sys/nxge/nxge_espc_hw.h usr/src/uts/common/sys/nxge/nxge_fflp.h usr/src/uts/common/sys/nxge/nxge_fflp_hash.h usr/src/uts/common/sys/nxge/nxge_fflp_hw.h usr/src/uts/common/sys/nxge/nxge_flow.h usr/src/uts/common/sys/nxge/nxge_fm.h usr/src/uts/common/sys/nxge/nxge_fzc.h usr/src/uts/common/sys/nxge/nxge_hw.h usr/src/uts/common/sys/nxge/nxge_impl.h usr/src/uts/common/sys/nxge/nxge_ipp.h usr/src/uts/common/sys/nxge/nxge_ipp_hw.h usr/src/uts/common/sys/nxge/nxge_mac.h usr/src/uts/common/sys/nxge/nxge_mac_hw.h usr/src/uts/common/sys/nxge/nxge_mii.h usr/src/uts/common/sys/nxge/nxge_n2_esr_hw.h usr/src/uts/common/sys/nxge/nxge_phy_hw.h usr/src/uts/common/sys/nxge/nxge_rxdma.h usr/src/uts/common/sys/nxge/nxge_rxdma_hw.h usr/src/uts/common/sys/nxge/nxge_sr_hw.h usr/src/uts/common/sys/nxge/nxge_txc.h usr/src/uts/common/sys/nxge/nxge_txc_hw.h usr/src/uts/common/sys/nxge/nxge_txdma.h usr/src/uts/common/sys/nxge/nxge_txdma_hw.h usr/src/uts/common/sys/nxge/nxge_virtual.h usr/src/uts/common/sys/nxge/nxge_zcp.h usr/src/uts/common/sys/nxge/nxge_zcp_hw.h usr/src/uts/i86pc/Makefile usr/src/uts/i86pc/Makefile.i86pc.shared usr/src/uts/i86pc/nxge/Makefile usr/src/uts/sun4u/Makefile.sun4u.shared usr/src/uts/sun4u/nxge/Makefile usr/src/uts/sun4v/Makefile.files usr/src/uts/sun4v/Makefile.rules usr/src/uts/sun4v/io/nxge/npi/npi.c usr/src/uts/sun4v/io/nxge/npi/npi.h usr/src/uts/sun4v/io/nxge/npi/npi_espc.c usr/src/uts/sun4v/io/nxge/npi/npi_espc.h usr/src/uts/sun4v/io/nxge/npi/npi_fflp.c usr/src/uts/sun4v/io/nxge/npi/npi_fflp.h usr/src/uts/sun4v/io/nxge/npi/npi_ipp.c usr/src/uts/sun4v/io/nxge/npi/npi_ipp.h usr/src/uts/sun4v/io/nxge/npi/npi_mac.c usr/src/uts/sun4v/io/nxge/npi/npi_mac.h usr/src/uts/sun4v/io/nxge/npi/npi_rxdma.c usr/src/uts/sun4v/io/nxge/npi/npi_rxdma.h usr/src/uts/sun4v/io/nxge/npi/npi_txc.c usr/src/uts/sun4v/io/nxge/npi/npi_txc.h usr/src/uts/sun4v/io/nxge/npi/npi_txdma.c usr/src/uts/sun4v/io/nxge/npi/npi_txdma.h usr/src/uts/sun4v/io/nxge/npi/npi_vir.c usr/src/uts/sun4v/io/nxge/npi/npi_vir.h usr/src/uts/sun4v/io/nxge/npi/npi_zcp.c usr/src/uts/sun4v/io/nxge/npi/npi_zcp.h usr/src/uts/sun4v/io/nxge/nxge_classify.c usr/src/uts/sun4v/io/nxge/nxge_espc.c usr/src/uts/sun4v/io/nxge/nxge_fflp.c usr/src/uts/sun4v/io/nxge/nxge_fflp_hash.c usr/src/uts/sun4v/io/nxge/nxge_fm.c usr/src/uts/sun4v/io/nxge/nxge_fzc.c usr/src/uts/sun4v/io/nxge/nxge_hcall.s usr/src/uts/sun4v/io/nxge/nxge_hw.c usr/src/uts/sun4v/io/nxge/nxge_ipp.c usr/src/uts/sun4v/io/nxge/nxge_kstats.c usr/src/uts/sun4v/io/nxge/nxge_mac.c usr/src/uts/sun4v/io/nxge/nxge_main.c usr/src/uts/sun4v/io/nxge/nxge_ndd.c usr/src/uts/sun4v/io/nxge/nxge_rxdma.c usr/src/uts/sun4v/io/nxge/nxge_send.c usr/src/uts/sun4v/io/nxge/nxge_txc.c usr/src/uts/sun4v/io/nxge/nxge_txdma.c usr/src/uts/sun4v/io/nxge/nxge_virtual.c usr/src/uts/sun4v/io/nxge/nxge_zcp.c usr/src/uts/sun4v/nxge/Makefile usr/src/uts/sun4v/sys/Makefile usr/src/uts/sun4v/sys/nxge/nxge.h usr/src/uts/sun4v/sys/nxge/nxge_common.h usr/src/uts/sun4v/sys/nxge/nxge_common_impl.h usr/src/uts/sun4v/sys/nxge/nxge_defs.h usr/src/uts/sun4v/sys/nxge/nxge_espc.h usr/src/uts/sun4v/sys/nxge/nxge_espc_hw.h usr/src/uts/sun4v/sys/nxge/nxge_fflp.h usr/src/uts/sun4v/sys/nxge/nxge_fflp_hash.h usr/src/uts/sun4v/sys/nxge/nxge_fflp_hw.h usr/src/uts/sun4v/sys/nxge/nxge_flow.h usr/src/uts/sun4v/sys/nxge/nxge_fm.h usr/src/uts/sun4v/sys/nxge/nxge_fzc.h usr/src/uts/sun4v/sys/nxge/nxge_hw.h usr/src/uts/sun4v/sys/nxge/nxge_impl.h usr/src/uts/sun4v/sys/nxge/nxge_ipp.h usr/src/uts/sun4v/sys/nxge/nxge_ipp_hw.h usr/src/uts/sun4v/sys/nxge/nxge_mac.h usr/src/uts/sun4v/sys/nxge/nxge_mac_hw.h usr/src/uts/sun4v/sys/nxge/nxge_mii.h usr/src/uts/sun4v/sys/nxge/nxge_n2_esr_hw.h usr/src/uts/sun4v/sys/nxge/nxge_phy_hw.h usr/src/uts/sun4v/sys/nxge/nxge_rxdma.h usr/src/uts/sun4v/sys/nxge/nxge_rxdma_hw.h usr/src/uts/sun4v/sys/nxge/nxge_sr_hw.h usr/src/uts/sun4v/sys/nxge/nxge_txc.h usr/src/uts/sun4v/sys/nxge/nxge_txc_hw.h usr/src/uts/sun4v/sys/nxge/nxge_txdma.h usr/src/uts/sun4v/sys/nxge/nxge_txdma_hw.h usr/src/uts/sun4v/sys/nxge/nxge_virtual.h usr/src/uts/sun4v/sys/nxge/nxge_zcp.h usr/src/uts/sun4v/sys/nxge/nxge_zcp_hw.h
diffstat 171 files changed, 72436 insertions(+), 71448 deletions(-) [+]
line wrap: on
line diff
--- a/usr/src/pkgdefs/Makefile	Mon Mar 19 18:02:35 2007 -0700
+++ b/usr/src/pkgdefs/Makefile	Mon Mar 19 19:37:22 2007 -0700
@@ -78,6 +78,7 @@
 	SUNWn2cp.v \
 	SUNWn2cpact.v \
 	SUNWniumx.v \
+	SUNWnxge.u \
 	SUNWnxge.v \
 	SUNWonmtst.u \
 	SUNWonmtst.v \
@@ -127,6 +128,7 @@
 	SUNWlxr \
 	SUNWlxu \
 	SUNWmv88sx \
+	SUNWnxge.i \
 	SUNWonmtst.i \
 	SUNWos86r  \
 	SUNWpsdcr  \
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/pkgdefs/SUNWnxge.i/Makefile	Mon Mar 19 19:37:22 2007 -0700
@@ -0,0 +1,40 @@
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+#
+# Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
+# Use is subject to license terms.
+#
+# ident	"%Z%%M%	%I%	%E% SMI"
+#
+
+include ../Makefile.com
+
+DATAFILES += depend  i.renameold
+
+
+
+.KEEP_STATE:
+
+all: $(FILES) postinstall postremove
+
+install: all pkg
+
+include ../Makefile.targ
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/pkgdefs/SUNWnxge.i/pkginfo.tmpl	Mon Mar 19 19:37:22 2007 -0700
@@ -0,0 +1,49 @@
+#
+# Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
+# Use is subject to license terms.
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+# ident	"%Z%%M%	%I%	%E% SMI"
+#
+# This required package information file describes characteristics of the
+# package, such as package abbreviation, full package name, package version,
+# and package architecture.
+#
+PKG="SUNWnxge"
+NAME="Sun NIU leaf driver"
+ARCH="i386.i86pc"
+VERSION="ONVERS,REV=0.0.0"
+SUNW_PRODNAME="SunOS"
+SUNW_PRODVERS="RELEASE/VERSION"
+SUNW_PKGTYPE="root"
+SUNW_PKG_ALLZONES="true"
+SUNW_PKG_HOLLOW="true"
+SUNW_PKG_THISZONE="false"
+MAXINST="1000"
+CATEGORY="system"
+DESC="Sun NIU 10Gb/1Gb leaf driver"
+VENDOR="Sun Microsystems, Inc."
+HOTLINE="Please contact your local service provider"
+EMAIL=""
+CLASSES="none renameold"
+BASEDIR=/
+SUNW_PKGVERS="1.0"
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/pkgdefs/SUNWnxge.i/postinstall	Mon Mar 19 19:37:22 2007 -0700
@@ -0,0 +1,81 @@
+#!/sbin/sh
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+#
+# Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
+# Use is subject to license terms.
+#
+# ident	"%Z%%M%	%I%	%E% SMI"
+#
+
+set -u
+
+PATH="/usr/bin:/usr/sbin:${PATH}"
+export PATH
+
+# 
+# Driver info 
+# 
+DRV=nxge
+DRVALIAS=" \"pciex108e,abcd\" \"SUNW,niusl\""
+
+DRVPERM='* 0600 root sys'
+# POLICY='read_priv_set=net_rawaccess write_priv_set=net_rawaccess'
+MAJORDEV=11
+
+#
+# Select the correct add_drv options to execute.
+#
+if [ "${BASEDIR}" = "/" ]; then
+        #
+        # Irrespective of whether hardware exists
+        # or not don't attempt to attach driver
+        # to the hardware. This is to avoid problems
+        # with installing a 32 bit driver on a 64 bit
+        # running system.
+        #
+        ADD_DRV="add_drv -n"
+else
+        #
+        # On a client,
+        # modify the system files and touch/reconfigure
+        # for reconfigure reboot
+        #
+        ADD_DRV="add_drv -b ${BASEDIR}"
+fi
+
+#
+# Make sure add_drv has *not* been previously executed
+# before attempting to add the driver.
+#
+grep -w "${DRV}" ${BASEDIR}/etc/name_to_major > /dev/null 2>&1
+if [ $? -eq 1 ]; then
+        ${ADD_DRV} -m "${DRVPERM}" -i "${DRVALIAS}" ${DRV}
+        if [ $? -ne 0 ]; then
+                echo "\nFailed add_drv!\n" >&2
+                exit 1
+        fi
+else
+        echo " add_drv Failed;  ${DRV} is already in ${BASEDIR}/etc/name_to_major"
+        exit 0
+fi
+
+exit 0
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/pkgdefs/SUNWnxge.i/postremove	Mon Mar 19 19:37:22 2007 -0700
@@ -0,0 +1,38 @@
+#!/sbin/sh
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+#
+# Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
+# Use is subject to license terms.
+#
+# ident	"%Z%%M%	%I%	%E% SMI"
+#
+
+BD=${BASEDIR:-/}
+if grep -w nxge $BD/etc/name_to_major > /dev/null 2>&1
+then
+	rem_drv -b ${BD} nxge
+	if [ $? -ne 0 ]
+	then
+		exit 1 	
+	fi
+fi
+exit 0
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/pkgdefs/SUNWnxge.i/prototype_com	Mon Mar 19 19:37:22 2007 -0700
@@ -0,0 +1,48 @@
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+#
+# Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
+# Use is subject to license terms.
+#
+# ident	"%Z%%M%	%I%	%E% SMI"
+#
+# This required package information file contains a list of package contents.
+# The 'pkgmk' command uses this file to identify the contents of a package
+# and their location on the development machine when building the package.
+# Can be created via a text editor or through use of the 'pkgproto' command.
+
+#!search <pathname pathname ...>	# where to find pkg objects
+#!include <filename>			# include another 'prototype' file
+#!default <mode> <owner> <group>	# default used if not specified on entry
+#!<param>=<value>			# puts parameter in pkg environment
+
+# packaging files
+i pkginfo
+i copyright
+i depend
+i postinstall
+i postremove
+i i.renameold
+#
+# source locations relative to the prototype file
+#
+# SUNWnxge.i
+#
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/pkgdefs/SUNWnxge.i/prototype_i386	Mon Mar 19 19:37:22 2007 -0700
@@ -0,0 +1,58 @@
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+#
+# Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
+# Use is subject to license terms.
+#
+# ident	"%Z%%M%	%I%	%E% SMI"
+#
+# This required package information file contains a list of package contents.
+# The 'pkgmk' command uses this file to identify the contents of a package
+# and their location on the development machine when building the package.
+# Can be created via a text editor or through use of the 'pkgproto' command.
+
+#!search <pathname pathname ...>	# where to find pkg objects
+#!include <filename>			# include another 'prototype' file
+#!default <mode> <owner> <group>	# default used if not specified on entry
+#!<param>=<value>			# puts parameter in pkg environment
+
+# packaging files
+#
+# Include ISA independent files (prototype_com)
+#
+!include prototype_com
+#
+#
+#
+# List files which are SPARC specific here
+#
+# source locations relative to the prototype file
+#
+#
+# SUNWnxge.i
+#
+d none platform 755 root sys
+d none platform/i86pc 755 root sys
+d none platform/i86pc/kernel 755 root sys
+d none platform/i86pc/kernel/drv 755 root sys
+e renameold platform/i86pc/kernel/drv/nxge.conf 0644 root sys
+d none platform/i86pc/kernel/drv/amd64 755 root sys
+f none platform/i86pc/kernel/drv/amd64/nxge 755 root sys
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/pkgdefs/SUNWnxge.u/Makefile	Mon Mar 19 19:37:22 2007 -0700
@@ -0,0 +1,38 @@
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+#
+# Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
+# Use is subject to license terms.
+#
+# ident	"%Z%%M%	%I%	%E% SMI"
+#
+
+include ../Makefile.com
+
+DATAFILES += depend i.renameold
+
+.KEEP_STATE:
+
+all: $(FILES) postinstall postremove
+
+install: all pkg
+
+include ../Makefile.targ
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/pkgdefs/SUNWnxge.u/pkginfo.tmpl	Mon Mar 19 19:37:22 2007 -0700
@@ -0,0 +1,49 @@
+#
+# Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
+# Use is subject to license terms.
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+# ident	"%Z%%M%	%I%	%E% SMI"
+#
+# This required package information file describes characteristics of the
+# package, such as package abbreviation, full package name, package version,
+# and package architecture.
+#
+PKG="SUNWnxge"
+NAME="Sun NIU leaf driver"
+ARCH="sparc.sun4u"
+VERSION="ONVERS,REV=0.0.0"
+SUNW_PRODNAME="SunOS"
+SUNW_PRODVERS="RELEASE/VERSION"
+SUNW_PKGTYPE="root"
+SUNW_PKG_ALLZONES="true"
+SUNW_PKG_HOLLOW="true"
+SUNW_PKG_THISZONE="false"
+MAXINST="1000"
+CATEGORY="system"
+DESC="Sun NIU 10Gb/1Gb driver"
+VENDOR="Sun Microsystems, Inc."
+HOTLINE="Please contact your local service provider"
+EMAIL=""
+CLASSES="none renameold"
+BASEDIR=/
+SUNW_PKGVERS="1.0"
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/pkgdefs/SUNWnxge.u/postinstall	Mon Mar 19 19:37:22 2007 -0700
@@ -0,0 +1,81 @@
+#!/sbin/sh
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+#
+# Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
+# Use is subject to license terms.
+#
+# ident	"%Z%%M%	%I%	%E% SMI"
+#
+
+set -u
+
+PATH="/usr/bin:/usr/sbin:${PATH}"
+export PATH
+
+# 
+# Driver info 
+# 
+DRV=nxge
+DRVALIAS=" \"pciex108e,abcd\" \"SUNW,niusl\""
+
+DRVPERM='* 0600 root sys'
+# POLICY='read_priv_set=net_rawaccess write_priv_set=net_rawaccess'
+MAJORDEV=11
+
+#
+# Select the correct add_drv options to execute.
+#
+if [ "${BASEDIR}" = "/" ]; then
+        #
+        # Irrespective of whether hardware exists
+        # or not don't attempt to attach driver
+        # to the hardware. This is to avoid problems
+        # with installing a 32 bit driver on a 64 bit
+        # running system.
+        #
+        ADD_DRV="add_drv -n"
+else
+        #
+        # On a client,
+        # modify the system files and touch/reconfigure
+        # for reconfigure reboot
+        #
+        ADD_DRV="add_drv -b ${BASEDIR}"
+fi
+
+#
+# Make sure add_drv has *not* been previously executed
+# before attempting to add the driver.
+#
+grep -w "${DRV}" ${BASEDIR}/etc/name_to_major > /dev/null 2>&1
+if [ $? -eq 1 ]; then
+        ${ADD_DRV} -m "${DRVPERM}" -i "${DRVALIAS}" ${DRV}
+        if [ $? -ne 0 ]; then
+                echo "\nFailed add_drv!\n" >&2
+                exit 1
+        fi
+else
+        echo " add_drv Failed;  ${DRV} is already in ${BASEDIR}/etc/name_to_major"
+        exit 0
+fi
+
+exit 0
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/pkgdefs/SUNWnxge.u/postremove	Mon Mar 19 19:37:22 2007 -0700
@@ -0,0 +1,38 @@
+#!/sbin/sh
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+#
+# Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
+# Use is subject to license terms.
+#
+# ident	"%Z%%M%	%I%	%E% SMI"
+#
+
+BD=${BASEDIR:-/}
+if grep -w nxge $BD/etc/name_to_major > /dev/null 2>&1
+then
+	rem_drv -b ${BD} nxge
+	if [ $? -ne 0 ]
+	then
+		exit 1 	
+	fi
+fi
+exit 0
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/pkgdefs/SUNWnxge.u/prototype_com	Mon Mar 19 19:37:22 2007 -0700
@@ -0,0 +1,48 @@
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+#
+# Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
+# Use is subject to license terms.
+#
+# ident	"%Z%%M%	%I%	%E% SMI"
+#
+# This required package information file contains a list of package contents.
+# The 'pkgmk' command uses this file to identify the contents of a package
+# and their location on the development machine when building the package.
+# Can be created via a text editor or through use of the 'pkgproto' command.
+
+#!search <pathname pathname ...>	# where to find pkg objects
+#!include <filename>			# include another 'prototype' file
+#!default <mode> <owner> <group>	# default used if not specified on entry
+#!<param>=<value>			# puts parameter in pkg environment
+
+# packaging files
+i pkginfo
+i copyright
+i depend
+i postinstall
+i postremove
+i i.renameold
+#
+# source locations relative to the prototype file
+#
+# SUNWnxge.u
+#
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/pkgdefs/SUNWnxge.u/prototype_sparc	Mon Mar 19 19:37:22 2007 -0700
@@ -0,0 +1,58 @@
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+#
+# Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
+# Use is subject to license terms.
+#
+# ident	"%Z%%M%	%I%	%E% SMI"
+#
+# This required package information file contains a list of package contents.
+# The 'pkgmk' command uses this file to identify the contents of a package
+# and their location on the development machine when building the package.
+# Can be created via a text editor or through use of the 'pkgproto' command.
+
+#!search <pathname pathname ...>	# where to find pkg objects
+#!include <filename>			# include another 'prototype' file
+#!default <mode> <owner> <group>	# default used if not specified on entry
+#!<param>=<value>			# puts parameter in pkg environment
+
+# packaging files
+#
+# Include ISA independent files (prototype_com)
+#
+!include prototype_com
+#
+#
+#
+# List files which are SPARC specific here
+#
+# source locations relative to the prototype file
+#
+#
+# SUNWnxge.u
+#
+d none platform 755 root sys
+d none platform/sun4u 755 root sys
+d none platform/sun4u/kernel 755 root sys
+d none platform/sun4u/kernel/drv 755 root sys
+e renameold platform/sun4u/kernel/drv/nxge.conf 0644 root sys
+d none platform/sun4u/kernel/drv/sparcv9 755 root sys
+f none platform/sun4u/kernel/drv/sparcv9/nxge 755 root sys
--- a/usr/src/pkgdefs/SUNWnxge.v/Makefile	Mon Mar 19 18:02:35 2007 -0700
+++ b/usr/src/pkgdefs/SUNWnxge.v/Makefile	Mon Mar 19 19:37:22 2007 -0700
@@ -19,7 +19,7 @@
 # CDDL HEADER END
 #
 #
-# Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
+# Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
 # Use is subject to license terms.
 #
 # ident	"%Z%%M%	%I%	%E% SMI"
@@ -27,7 +27,7 @@
 
 include ../Makefile.com
 
-DATAFILES += depend
+DATAFILES += depend i.renameold
 
 .KEEP_STATE:
 
--- a/usr/src/pkgdefs/SUNWnxge.v/pkginfo.tmpl	Mon Mar 19 18:02:35 2007 -0700
+++ b/usr/src/pkgdefs/SUNWnxge.v/pkginfo.tmpl	Mon Mar 19 19:37:22 2007 -0700
@@ -1,5 +1,5 @@
 #
-# Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
+# Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
 # Use is subject to license terms.
 #
 # CDDL HEADER START
@@ -21,14 +21,14 @@
 #
 # CDDL HEADER END
 #
-#ident	"%Z%%M%	%I%	%E% SMI"
+# ident	"%Z%%M%	%I%	%E% SMI"
 #
 # This required package information file describes characteristics of the
 # package, such as package abbreviation, full package name, package version,
 # and package architecture.
 #
 PKG="SUNWnxge"
-NAME="UltraSPARC-T2 NIU leaf driver"
+NAME="Sun NIU leaf driver"
 ARCH="sparc.sun4v"
 VERSION="ONVERS,REV=0.0.0"
 SUNW_PRODNAME="SunOS"
@@ -39,11 +39,11 @@
 SUNW_PKG_THISZONE="false"
 MAXINST="1000"
 CATEGORY="system"
-DESC="UltraSPARC-T2 NIU leaf driver"
+DESC="Sun NIU leaf driver"
 VENDOR="Sun Microsystems, Inc."
 HOTLINE="Please contact your local service provider"
 EMAIL=""
-CLASSES="none"
+CLASSES="none renameold"
 BASEDIR=/
 SUNW_PKGVERS="1.0"
 
--- a/usr/src/pkgdefs/SUNWnxge.v/postinstall	Mon Mar 19 18:02:35 2007 -0700
+++ b/usr/src/pkgdefs/SUNWnxge.v/postinstall	Mon Mar 19 19:37:22 2007 -0700
@@ -20,110 +20,62 @@
 # CDDL HEADER END
 #
 #
-# Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
+# Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
 # Use is subject to license terms.
 #
 # ident	"%Z%%M%	%I%	%E% SMI"
+#
 
-# Function: check_add_drv()
-#
-# This function will check if the module has an entry in etc/name_to_major
-# If not simply calls add_drv with the arguments given. If there is
-# such an entry in name_to_major file, it adds entries in driver_aliases
-# driver_classes and minor_perm if necessary.
-# The syntax of this function is the same as add_drv. 
+set -u
+
+PATH="/usr/bin:/usr/sbin:${PATH}"
+export PATH
 
-check_add_drv()
-{
-	if [ "$BASEDIR" = "" ]
-	then
-		BASEDIR=/  
-	fi
-	alias=""
-	class=""
-	ADD_ALIAS=0
-	ADD_CLASS=0
-	ADD_MINOR=0
-	OPTIND=1
-	IS_NET_DRIVER=0
+# 
+# Driver info 
+# 
+DRV=nxge
+DRVALIAS=" \"pciex108e,abcd\" \"SUNW,niusl\""
 
-	cmd="add_drv"
+DRVPERM='* 0600 root sys'
+# POLICY='read_priv_set=net_rawaccess write_priv_set=net_rawaccess'
+MAJORDEV=11
 
-	NO_CMD=
-	while getopts i:b:m:c:N  opt
-	do
-		case $opt in
-			N )	NO_CMD=1;;
-			i )	ADD_ALIAS=1	
-				alias=$OPTARG
-				cmd=$cmd" -i '$alias'"
-				;;
-			m )	ADD_MINOR=1
-				minor=$OPTARG
-				cmd=$cmd" -m '$minor'"
-				;;
-			c)	ADD_CLASS=1
-				class=$OPTARG
-				cmd=$cmd" -c $class"
-				;;
-			b)	BASEDIR=$OPTARG
-				cmd=$cmd" -b $BASEDIR"
-				;;
-			\?) 	echo "check_add_drv can not handle this option"
-				return
-				;;
-			esac
-	done 
-	shift `/usr/bin/expr $OPTIND - 1`
-	
-	drvname=$1
-
-	cmd=$cmd" "$drvname
-
-	drvname=`echo $drvname | /usr/bin/sed 's;.*/;;g'`
-
-	/usr/bin/grep "^$drvname[ 	]" $BASEDIR/etc/name_to_major >  /dev/null 2>&1
+#
+# Select the correct add_drv options to execute.
+#
+if [ "${BASEDIR}" = "/" ]; then
+        #
+        # Irrespective of whether hardware exists
+        # or not don't attempt to attach driver
+        # to the hardware. This is to avoid problems
+        # with installing a 32 bit driver on a 64 bit
+        # running system.
+        #
+        ADD_DRV="add_drv -n"
+else
+        #
+        # On a client,
+        # modify the system files and touch/reconfigure
+        # for reconfigure reboot
+        #
+        ADD_DRV="add_drv -b ${BASEDIR}"
+fi
 
-	if [ "$NO_CMD" = "" -a $? -ne 0 ] 
-	then
-		eval $cmd
-	else	
-		# entry already in name_to_major, add alias, class, minorperm
-		# if necessary
-		if [ $ADD_ALIAS = 1 ]	
-		then
-			for i in $alias
-			do
-				/usr/bin/egrep "^$drvname[ 	]+$i" $BASEDIR/etc/driver_aliases>/dev/null 2>&1
-				if [ $? -ne 0 ]
-				then
-					echo "$drvname $i" >> $BASEDIR/etc/driver_aliases	
-				fi
-			done
-		fi
+#
+# Make sure add_drv has *not* been previously executed
+# before attempting to add the driver.
+#
+grep -w "${DRV}" ${BASEDIR}/etc/name_to_major > /dev/null 2>&1
+if [ $? -eq 1 ]; then
+        ${ADD_DRV} -m "${DRVPERM}" -i "${DRVALIAS}" ${DRV}
+        if [ $? -ne 0 ]; then
+                echo "\nFailed add_drv!\n" >&2
+                exit 1
+        fi
+else
+        echo " add_drv Failed;  ${DRV} is already in ${BASEDIR}/etc/name_to_major"
+        exit 0
+fi
 
-		if [ $ADD_CLASS = 1 ]
-		then
-			/usr/bin/egrep "^$drvname[ 	]+$class( |	|$)" $BASEDIR/etc/driver_classes > /dev/null 2>&1
-			if [ $? -ne 0 ]
-			then 
-				echo "$drvname\t$class" >> $BASEDIR/etc/driver_classes
-			fi
-		fi
-
-		if [ $ADD_MINOR = 1 ]
-		then
-			/usr/bin/grep "^$drvname:" $BASEDIR/etc/minor_perm > /dev/null 2>&1
-			if [ $? -ne 0 ]
-			then 
-				minorentry="$drvname:$minor"
-				echo $minorentry >> $BASEDIR/etc/minor_perm
-			fi
-		fi
-
-	fi
-
-	
-}
-
-check_add_drv -b "${BASEDIR}" -i '"SUNW,niusl"' -m '* 0666 root sys' nxge
+exit 0
--- a/usr/src/pkgdefs/SUNWnxge.v/prototype_com	Mon Mar 19 18:02:35 2007 -0700
+++ b/usr/src/pkgdefs/SUNWnxge.v/prototype_com	Mon Mar 19 19:37:22 2007 -0700
@@ -19,7 +19,7 @@
 # CDDL HEADER END
 #
 #
-# Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
+# Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
 # Use is subject to license terms.
 #
 # ident	"%Z%%M%	%I%	%E% SMI"
@@ -40,6 +40,7 @@
 i depend
 i postinstall
 i postremove
+i i.renameold
 #
 # source locations relative to the prototype file
 #
--- a/usr/src/pkgdefs/SUNWnxge.v/prototype_sparc	Mon Mar 19 18:02:35 2007 -0700
+++ b/usr/src/pkgdefs/SUNWnxge.v/prototype_sparc	Mon Mar 19 19:37:22 2007 -0700
@@ -19,7 +19,7 @@
 # CDDL HEADER END
 #
 #
-# Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
+# Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
 # Use is subject to license terms.
 #
 # ident	"%Z%%M%	%I%	%E% SMI"
@@ -53,5 +53,6 @@
 d none platform/sun4v 755 root sys
 d none platform/sun4v/kernel 755 root sys
 d none platform/sun4v/kernel/drv 755 root sys
+e renameold platform/sun4v/kernel/drv/nxge.conf 0644 root sys
 d none platform/sun4v/kernel/drv/sparcv9 755 root sys
 f none platform/sun4v/kernel/drv/sparcv9/nxge 755 root sys
--- a/usr/src/uts/common/Makefile.files	Mon Mar 19 18:02:35 2007 -0700
+++ b/usr/src/uts/common/Makefile.files	Mon Mar 19 19:37:22 2007 -0700
@@ -1365,3 +1365,21 @@
 #
 E1000G_OBJS +=	e1000g_debug.o e1000_hw.o e1000g_main.o e1000g_alloc.o \
 		e1000g_tx.o e1000g_rx.o e1000g_stat.o e1000g_ndd.o
+#
+#	NIU 10G/1G driver module
+#
+NXGE_OBJS =	nxge_mac.o nxge_ipp.o nxge_rxdma.o 		\
+		nxge_txdma.o nxge_txc.o	nxge_main.o		\
+		nxge_hw.o nxge_fzc.o nxge_virtual.o		\
+		nxge_send.o nxge_classify.o nxge_fflp.o		\
+		nxge_fflp_hash.o nxge_ndd.o nxge_kstats.o	\
+		nxge_zcp.o nxge_fm.o nxge_espc.o
+
+NXGE_NPI_OBJS =	\
+		npi.o npi_mac.o	npi_ipp.o			\
+		npi_txdma.o npi_rxdma.o	npi_txc.o		\
+		npi_zcp.o npi_espc.o npi_fflp.o			\
+		npi_vir.o
+
+NXGE_HCALL_OBJS =	\
+		nxge_hcall.o
--- a/usr/src/uts/common/Makefile.rules	Mon Mar 19 18:02:35 2007 -0700
+++ b/usr/src/uts/common/Makefile.rules	Mon Mar 19 19:37:22 2007 -0700
@@ -599,6 +599,17 @@
 	$(COMPILE.c) -o $@ $<
 	$(CTFCONVERT_O)
 
+$(OBJS_DIR)/%.o:		$(UTSBASE)/common/io/nxge/%.c
+	$(COMPILE.c) -o $@ $<
+	$(CTFCONVERT_O)
+
+$(OBJS_DIR)/%.o:		$(UTSBASE)/common/io/nxge/npi/%.c
+	$(COMPILE.c) -o $@ $<
+	$(CTFCONVERT_O)
+
+$(OBJS_DIR)/%.o:		$(UTSBASE)/common/io/nxge/%.s
+	$(COMPILE.s) -o $@ $<
+
 $(OBJS_DIR)/%.o:		$(UTSBASE)/common/io/pci-ide/%.c
 	$(COMPILE.c) -o $@ $<
 	$(CTFCONVERT_O)
@@ -1353,6 +1364,15 @@
 $(LINTS_DIR)/%.ln:		$(UTSBASE)/common/io/net80211/%.c
 	@($(LHEAD) $(LINT.c) $< $(LTAIL))
 
+$(LINTS_DIR)/%.ln:		$(UTSBASE)/common/io/nxge/%.c
+	@($(LHEAD) $(LINT.c) $< $(LTAIL))
+
+$(LINTS_DIR)/%.ln:		$(UTSBASE)/common/io/nxge/%.s
+	@($(LHEAD) $(LINT.c) $< $(LTAIL))
+
+$(LINTS_DIR)/%.ln:		$(UTSBASE)/common/io/nxge/npi/%.c
+	@($(LHEAD) $(LINT.c) $< $(LTAIL))
+
 $(LINTS_DIR)/%.ln:		$(UTSBASE)/common/io/pci-ide/%.c
 	@($(LHEAD) $(LINT.c) $< $(LTAIL))
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/uts/common/io/nxge/npi/npi.c	Mon Mar 19 19:37:22 2007 -0700
@@ -0,0 +1,107 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident	"%Z%%M%	%I%	%E% SMI"
+
+#include <npi.h>
+#include <sys/nxge/nxge_impl.h>
+
+nxge_os_mutex_t npidebuglock;
+int npi_debug_init = 0;
+uint64_t npi_debug_level = 0;
+
+void
+npi_debug_msg(npi_handle_function_t function, uint64_t level, char *fmt, ...)
+{
+	char msg_buffer[1024];
+	char prefix_buffer[32];
+	int cmn_level = CE_CONT;
+	va_list ap;
+
+	if ((level & npi_debug_level) ||
+		(level & NPI_REG_CTL) ||
+		(level & NPI_ERR_CTL)) {
+
+		if (npi_debug_init == 0) {
+			MUTEX_INIT(&npidebuglock, NULL, MUTEX_DRIVER, NULL);
+			npi_debug_init = 1;
+		}
+
+		MUTEX_ENTER(&npidebuglock);
+
+		if (level & NPI_ERR_CTL) {
+			cmn_level = CE_WARN;
+		}
+
+		va_start(ap, fmt);
+		(void) vsprintf(msg_buffer, fmt, ap);
+		va_end(ap);
+
+		(void) sprintf(prefix_buffer, "%s%d(%d):", "npi",
+				function.instance, function.function);
+
+		MUTEX_EXIT(&npidebuglock);
+		cmn_err(cmn_level, "!%s %s\n", prefix_buffer, msg_buffer);
+	}
+}
+
+void
+npi_rtrace_buf_init(rtrace_t *rt)
+{
+	int i;
+
+	rt->next_idx = 0;
+	rt->last_idx = MAX_RTRACE_ENTRIES - 1;
+	rt->wrapped = B_FALSE;
+	for (i = 0; i < MAX_RTRACE_ENTRIES; i++) {
+		rt->buf[i].ctl_addr = TRACE_CTL_INVALID;
+		rt->buf[i].val_l32 = 0;
+		rt->buf[i].val_h32 = 0;
+	}
+}
+
+void
+npi_rtrace_update(npi_handle_t handle, boolean_t wr, rtrace_t *rt,
+		    uint32_t addr, uint64_t val)
+{
+	int idx;
+	idx = rt->next_idx;
+	if (wr == B_TRUE)
+		rt->buf[idx].ctl_addr = (addr & TRACE_ADDR_MASK)
+						| TRACE_CTL_WR;
+	else
+		rt->buf[idx].ctl_addr = (addr & TRACE_ADDR_MASK);
+	rt->buf[idx].ctl_addr |= (((handle.function.function
+				<< TRACE_FUNC_SHIFT) & TRACE_FUNC_MASK) |
+				((handle.function.instance
+				<< TRACE_INST_SHIFT) & TRACE_INST_MASK));
+	rt->buf[idx].val_l32 = val & 0xFFFFFFFF;
+	rt->buf[idx].val_h32 = (val >> 32) & 0xFFFFFFFF;
+	rt->next_idx++;
+	if (rt->next_idx > rt->last_idx) {
+		rt->next_idx = 0;
+		rt->wrapped = B_TRUE;
+	}
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/uts/common/io/nxge/npi/npi.h	Mon Mar 19 19:37:22 2007 -0700
@@ -0,0 +1,247 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _NPI_H
+#define	_NPI_H
+
+#pragma ident	"%Z%%M%	%I%	%E% SMI"
+
+#ifdef	__cplusplus
+extern "C" {
+#endif
+
+#include <nxge_common_impl.h>
+
+typedef	uint32_t			npi_status_t;
+
+/* Common Block ID */
+
+#define	MAC_BLK_ID			0x1
+#define	TXMAC_BLK_ID			0x2
+#define	RXMAC_BLK_ID			0x3
+#define	MIF_BLK_ID			0x4
+#define	IPP_BLK_ID			0x5
+#define	TXC_BLK_ID			0x6
+#define	TXDMA_BLK_ID			0x7
+#define	RXDMA_BLK_ID			0x8
+#define	ZCP_BLK_ID			0x9
+#define	ESPC_BLK_ID			0xa
+#define	FFLP_BLK_ID			0xb
+#define	PHY_BLK_ID			0xc
+#define	ETHER_SERDES_BLK_ID		0xd
+#define	PCIE_SERDES_BLK_ID		0xe
+#define	VIR_BLK_ID			0xf
+
+/* Common HW error code */
+/* HW unable to exit from reset state. */
+#define	RESET_FAILED			0x81
+
+/* Write operation failed on indirect write. */
+#define	WRITE_FAILED			0x82
+/* Read operation failed on indirect read.	 */
+#define	READ_FAILED			0x83
+
+/* Error code boundary */
+
+#define	COMMON_SW_ERR_START		0x40
+#define	COMMON_SW_ERR_END		0x4f
+#define	BLK_SPEC_SW_ERR_START		0x50
+#define	BLK_SPEC_SW_ERR_END		0x7f
+#define	COMMON_HW_ERR_START		0x80
+#define	COMMON_HW_ERR_END		0x8f
+#define	BLK_SPEC_HW_ERR_START		0x90
+#define	BLK_SPEC_HW_ERR_END		0xbf
+
+#define	IS_PORT				0x00100000
+#define	IS_CHAN				0x00200000
+
+/* Common SW errors code */
+
+#define	PORT_INVALID			0x41	/* Invalid port number */
+#define	CHANNEL_INVALID			0x42	/* Invalid dma channel number */
+#define	OPCODE_INVALID			0x43	/* Invalid opcode */
+#define	REGISTER_INVALID		0x44	/* Invalid register number */
+#define	COUNTER_INVALID			0x45	/* Invalid counter number */
+#define	CONFIG_INVALID			0x46	/* Invalid config input */
+#define	LOGICAL_PAGE_INVALID		0x47	/* Invalid logical page # */
+#define	VLAN_INVALID			0x48	/* Invalid Vlan ID */
+#define	RDC_TAB_INVALID			0x49	/* Invalid RDC Group Number */
+#define	LOCATION_INVALID		0x4a	/* Invalid Entry Location */
+
+#define	NPI_SUCCESS			0		/* Operation succeed */
+#define	NPI_FAILURE			0x80000000	/* Operation failed */
+
+#define	NPI_CNT_CLR_VAL			0
+
+/*
+ * Block identifier starts at bit 8.
+ */
+#define	NPI_BLOCK_ID_SHIFT		8
+
+/*
+ * Port, channel and misc. information starts at bit 12.
+ */
+#define	NPI_PORT_CHAN_SHIFT			12
+
+/*
+ * Software Block specific error codes start at 0x50.
+ */
+#define	NPI_BK_ERROR_START		0x50
+
+/*
+ * Hardware block specific error codes start at 0x90.
+ */
+#define	NPI_BK_HW_ER_START		0x90
+
+/* Structures for register tracing */
+
+typedef struct _rt_buf {
+	uint32_t	ctl_addr;
+	uint32_t	val_l32;
+	uint32_t	val_h32;
+} rt_buf_t;
+
+/*
+ * Control Address field format
+ *
+ * Bit 0 - 23: Address
+ * Bit 24 - 25: Function Number
+ * Bit 26 - 29: Instance Number
+ * Bit 30: Read/Write Direction bit
+ * Bit 31: Invalid bit
+ */
+
+#define	MAX_RTRACE_ENTRIES	1024
+#define	MAX_RTRACE_IOC_ENTRIES	64
+#define	TRACE_ADDR_MASK		0x00FFFFFF
+#define	TRACE_FUNC_MASK		0x03000000
+#define	TRACE_INST_MASK		0x3C000000
+#define	TRACE_CTL_WR		0x40000000
+#define	TRACE_CTL_INVALID	0x80000000
+#define	TRACE_FUNC_SHIFT	24
+#define	TRACE_INST_SHIFT	26
+#define	MSG_BUF_SIZE		1024
+
+
+typedef struct _rtrace {
+	uint16_t	next_idx;
+	uint16_t	last_idx;
+	boolean_t	wrapped;
+	rt_buf_t	buf[MAX_RTRACE_ENTRIES];
+} rtrace_t;
+
+typedef struct _err_inject {
+	uint8_t		blk_id;
+	uint8_t		chan;
+	uint32_t	err_id;
+	uint32_t	control;
+} err_inject_t;
+
+/* Configuration options */
+typedef enum config_op {
+	DISABLE = 0,
+	ENABLE,
+	INIT
+} config_op_t;
+
+/* I/O options */
+typedef enum io_op {
+	OP_SET = 0,
+	OP_GET,
+	OP_UPDATE,
+	OP_CLEAR
+} io_op_t;
+
+/* Counter options */
+typedef enum counter_op {
+	SNAP_STICKY = 0,
+	SNAP_ACCUMULATE,
+	CLEAR
+} counter_op_t;
+
+/* NPI attribute */
+typedef struct _npi_attr_t {
+	uint32_t type;
+	uint32_t idata[16];
+	uint32_t odata[16];
+} npi_attr_t;
+
+/* NPI Handle */
+typedef	struct	_npi_handle_function {
+	uint16_t		instance;
+	uint16_t		function;
+} npi_handle_function_t;
+
+/* NPI Handle */
+typedef	struct	_npi_handle {
+	npi_reg_handle_t	regh;
+	npi_reg_ptr_t		regp;
+	boolean_t		is_vraddr; /* virtualization region address */
+	npi_handle_function_t	function;
+	void * nxgep;
+} npi_handle_t;
+
+/* NPI Counter */
+typedef struct _npi_counter_t {
+	uint32_t id;
+	char *name;
+	uint32_t val;
+} npi_counter_t;
+
+/*
+ * Commmon definitions for NPI RXDMA and TXDMA functions.
+ */
+typedef struct _dma_log_page {
+	uint8_t			page_num;
+	boolean_t		valid;
+	uint8_t			func_num;
+	uint64_t		mask;
+	uint64_t		value;
+	uint64_t		reloc;
+} dma_log_page_t, *p_dma_log_page_t;
+
+extern	rtrace_t npi_rtracebuf;
+void npi_rtrace_buf_init(rtrace_t *);
+void npi_rtrace_update(npi_handle_t, boolean_t, rtrace_t *,
+			uint32_t, uint64_t);
+void npi_rtrace_buf_init(rtrace_t *);
+
+void npi_debug_msg(npi_handle_function_t, uint64_t,
+	char *, ...);
+
+#ifdef	NPI_DEBUG
+#define	NPI_DEBUG_MSG(params) npi_debug_msg params
+#else
+#define	NPI_DEBUG_MSG(params)
+#endif
+
+#define	NPI_ERROR_MSG(params) npi_debug_msg params
+#define	NPI_REG_DUMP_MSG(params) npi_debug_msg params
+
+#ifdef	__cplusplus
+}
+#endif
+
+#endif	/* _NPI_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/uts/common/io/nxge/npi/npi_espc.c	Mon Mar 19 19:37:22 2007 -0700
@@ -0,0 +1,352 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident	"%Z%%M%	%I%	%E% SMI"
+
+#include <npi_espc.h>
+#include <nxge_espc.h>
+
+npi_status_t
+npi_espc_pio_enable(npi_handle_t handle)
+{
+	NXGE_REG_WR64(handle, ESPC_REG_ADDR(ESPC_PIO_EN_REG), 0x1);
+	return (NPI_SUCCESS);
+}
+
+npi_status_t
+npi_espc_pio_disable(npi_handle_t handle)
+{
+	NXGE_REG_WR64(handle, ESPC_PIO_EN_REG, 0);
+	return (NPI_SUCCESS);
+}
+
+npi_status_t
+npi_espc_eeprom_entry(npi_handle_t handle, io_op_t op, uint32_t addr,
+			uint8_t *data)
+{
+	uint64_t val = 0;
+
+	if ((addr & ~EPC_EEPROM_ADDR_BITS) != 0) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+			" npi_espc_eerprom_entry"
+			" Invalid input addr <0x%x>\n",
+			addr));
+		return (NPI_FAILURE | NPI_ESPC_EEPROM_ADDR_INVALID);
+	}
+
+	switch (op) {
+	case OP_SET:
+		val = EPC_WRITE_INITIATE | (addr << EPC_EEPROM_ADDR_SHIFT) |
+			*data;
+		NXGE_REG_WR64(handle, ESPC_REG_ADDR(ESPC_PIO_STATUS_REG), val);
+		EPC_WAIT_RW_COMP(handle, &val, EPC_WRITE_COMPLETE);
+		if ((val & EPC_WRITE_COMPLETE) == 0) {
+			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				" npi_espc_eeprom_entry"
+				" HW Error: EEPROM_WR <0x%x>\n",
+				val));
+			return (NPI_FAILURE | NPI_ESPC_EEPROM_WRITE_FAILED);
+		}
+		break;
+	case OP_GET:
+		val = EPC_READ_INITIATE | (addr << EPC_EEPROM_ADDR_SHIFT);
+		NXGE_REG_WR64(handle, ESPC_REG_ADDR(ESPC_PIO_STATUS_REG), val);
+		EPC_WAIT_RW_COMP(handle, &val, EPC_READ_COMPLETE);
+		if ((val & EPC_READ_COMPLETE) == 0) {
+			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				" npi_espc_eeprom_entry"
+				" HW Error: EEPROM_RD <0x%x>",
+				val));
+			return (NPI_FAILURE | NPI_ESPC_EEPROM_READ_FAILED);
+		}
+		NXGE_REG_RD64(handle, ESPC_REG_ADDR(ESPC_PIO_STATUS_REG), &val);
+		*data = val & EPC_EEPROM_DATA_MASK;
+		break;
+	default:
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				    " npi_espc_eeprom_entry"
+				    " Invalid Input addr <0x%x>\n", addr));
+		return (NPI_FAILURE | NPI_ESPC_OPCODE_INVALID);
+	}
+
+	return (NPI_SUCCESS);
+}
+
+npi_status_t
+npi_espc_mac_addr_get(npi_handle_t handle, uint8_t *data)
+{
+	mac_addr_0_t mac0;
+	mac_addr_1_t mac1;
+
+	NXGE_REG_RD64(handle, ESPC_MAC_ADDR_0, &mac0.value);
+	data[0] = mac0.bits.w0.byte0;
+	data[1] = mac0.bits.w0.byte1;
+	data[2] = mac0.bits.w0.byte2;
+	data[3] = mac0.bits.w0.byte3;
+
+	NXGE_REG_RD64(handle, ESPC_MAC_ADDR_1, &mac1.value);
+	data[4] = mac1.bits.w0.byte4;
+	data[5] = mac1.bits.w0.byte5;
+
+	return (NPI_SUCCESS);
+}
+
+npi_status_t
+npi_espc_num_ports_get(npi_handle_t handle, uint8_t *data)
+{
+	uint64_t val = 0;
+
+	NXGE_REG_RD64(handle, ESPC_NUM_PORTS_MACS, &val);
+	val &= NUM_PORTS_MASK;
+	*data = (uint8_t)val;
+
+	return (NPI_SUCCESS);
+}
+
+npi_status_t
+npi_espc_num_macs_get(npi_handle_t handle, uint8_t *data)
+{
+	uint64_t val = 0;
+
+	NXGE_REG_RD64(handle, ESPC_NUM_PORTS_MACS, &val);
+	val &= NUM_MAC_ADDRS_MASK;
+	val = (val >> NUM_MAC_ADDRS_SHIFT);
+	*data = (uint8_t)val;
+
+	return (NPI_SUCCESS);
+}
+
+npi_status_t
+npi_espc_model_str_get(npi_handle_t handle, char *data)
+{
+	uint64_t val = 0;
+	uint16_t str_len;
+	int i, j;
+
+	NXGE_REG_RD64(handle, ESPC_MOD_STR_LEN, &val);
+	val &= MOD_STR_LEN_MASK;
+	str_len = (uint8_t)val;
+
+	if (str_len > MAX_MOD_STR_LEN) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				" npi_espc_model_str_get"
+				" Model string length %d exceeds max %d\n",
+				str_len, MAX_MOD_STR_LEN));
+		return (NPI_FAILURE | NPI_ESPC_STR_LEN_INVALID);
+	}
+
+	/*
+	 * Might have to reverse the order depending on how the string
+	 * is written.
+	 */
+	for (i = 0, j = 0; i < str_len; j++) {
+		NXGE_REG_RD64(handle, ESPC_MOD_STR(j), &val);
+		data[i++] = ((char *)&val)[3];
+		data[i++] = ((char *)&val)[2];
+		data[i++] = ((char *)&val)[1];
+		data[i++] = ((char *)&val)[0];
+	}
+
+	data[str_len] = '\0';
+
+	return (NPI_SUCCESS);
+}
+
+npi_status_t
+npi_espc_bd_model_str_get(npi_handle_t handle, char *data)
+{
+	uint64_t val = 0;
+	uint16_t str_len;
+	int i, j;
+
+	NXGE_REG_RD64(handle, ESPC_BD_MOD_STR_LEN, &val);
+	val &= BD_MOD_STR_LEN_MASK;
+	str_len = (uint8_t)val;
+
+	if (str_len > MAX_BD_MOD_STR_LEN) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				" npi_espc_model_str_get"
+				" Board Model string length %d "
+				"exceeds max %d\n",
+				str_len, MAX_BD_MOD_STR_LEN));
+		return (NPI_FAILURE | NPI_ESPC_STR_LEN_INVALID);
+	}
+
+	/*
+	 * Might have to reverse the order depending on how the string
+	 * is written.
+	 */
+	for (i = 0, j = 0; i < str_len; j++) {
+		NXGE_REG_RD64(handle, ESPC_BD_MOD_STR(j), &val);
+		data[i++] = ((char *)&val)[3];
+		data[i++] = ((char *)&val)[2];
+		data[i++] = ((char *)&val)[1];
+		data[i++] = ((char *)&val)[0];
+	}
+
+	data[str_len] = '\0';
+
+	return (NPI_SUCCESS);
+}
+
+npi_status_t
+npi_espc_phy_type_get(npi_handle_t handle, uint8_t *data)
+{
+	phy_type_t	phy;
+
+	NXGE_REG_RD64(handle, ESPC_PHY_TYPE, &phy.value);
+	data[0] = phy.bits.w0.pt0_phy_type;
+	data[1] = phy.bits.w0.pt1_phy_type;
+	data[2] = phy.bits.w0.pt2_phy_type;
+	data[3] = phy.bits.w0.pt3_phy_type;
+
+	return (NPI_SUCCESS);
+}
+
+npi_status_t
+npi_espc_port_phy_type_get(npi_handle_t handle, uint8_t *data, uint8_t portn)
+{
+	phy_type_t	phy;
+
+	ASSERT(IS_PORT_NUM_VALID(portn));
+
+	NXGE_REG_RD64(handle, ESPC_PHY_TYPE, &phy.value);
+	switch (portn) {
+	case 0:
+		*data = phy.bits.w0.pt0_phy_type;
+		break;
+	case 1:
+		*data = phy.bits.w0.pt1_phy_type;
+		break;
+	case 2:
+		*data = phy.bits.w0.pt2_phy_type;
+		break;
+	case 3:
+		*data = phy.bits.w0.pt3_phy_type;
+		break;
+	default:
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				" npi_espc_port_phy_type_get"
+				" Invalid Input: portn <%d>",
+				portn));
+		return (NPI_FAILURE | NPI_ESPC_PORT_INVALID);
+	}
+
+	return (NPI_SUCCESS);
+}
+
+npi_status_t
+npi_espc_max_frame_get(npi_handle_t handle, uint16_t *data)
+{
+	uint64_t val = 0;
+
+	NXGE_REG_RD64(handle, ESPC_MAX_FM_SZ, &val);
+	val &= MAX_FM_SZ_MASK;
+	*data = (uint8_t)val;
+
+	return (NPI_SUCCESS);
+}
+
+npi_status_t
+npi_espc_version_get(npi_handle_t handle, uint16_t *data)
+{
+	uint64_t val = 0;
+
+	NXGE_REG_RD64(handle, ESPC_VER_IMGSZ, &val);
+	val &= VER_NUM_MASK;
+	*data = (uint8_t)val;
+
+	return (NPI_SUCCESS);
+}
+
+npi_status_t
+npi_espc_img_sz_get(npi_handle_t handle, uint16_t *data)
+{
+	uint64_t val = 0;
+
+	NXGE_REG_RD64(handle, ESPC_VER_IMGSZ, &val);
+	val &= IMG_SZ_MASK;
+	val = val >> IMG_SZ_SHIFT;
+	*data = (uint8_t)val;
+
+	return (NPI_SUCCESS);
+}
+
+npi_status_t
+npi_espc_chksum_get(npi_handle_t handle, uint8_t *data)
+{
+	uint64_t val = 0;
+
+	NXGE_REG_RD64(handle, ESPC_CHKSUM, &val);
+	val &= CHKSUM_MASK;
+	*data = (uint8_t)val;
+
+	return (NPI_SUCCESS);
+}
+
+npi_status_t
+npi_espc_intr_num_get(npi_handle_t handle, uint8_t *data)
+{
+	intr_num_t	intr;
+
+	NXGE_REG_RD64(handle, ESPC_INTR_NUM, &intr.value);
+	data[0] = intr.bits.w0.pt0_intr_num;
+	data[1] = intr.bits.w0.pt1_intr_num;
+	data[2] = intr.bits.w0.pt2_intr_num;
+	data[3] = intr.bits.w0.pt3_intr_num;
+
+	return (NPI_SUCCESS);
+}
+
+void
+npi_espc_dump(npi_handle_t handle)
+{
+	int i;
+	uint64_t val = 0;
+
+	NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
+				    "Dumping SEEPROM registers directly:\n\n"));
+
+	for (i = 0; i < 23; i++) {
+		NXGE_REG_RD64(handle, ESPC_NCR_REGN(i), &val);
+		NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
+					    "reg[%d]      0x%llx\n",
+					    i, val & 0xffffffff));
+	}
+
+	NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL, "\n\n"));
+}
+
+uint32_t
+npi_espc_reg_get(npi_handle_t handle, int reg_idx)
+{
+	uint64_t val = 0;
+	uint32_t reg_val = 0;
+
+	NXGE_REG_RD64(handle, ESPC_NCR_REGN(reg_idx), &val);
+	reg_val = val & 0xffffffff;
+
+	return (reg_val);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/uts/common/io/nxge/npi/npi_espc.h	Mon Mar 19 19:37:22 2007 -0700
@@ -0,0 +1,87 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _NPI_ESPC_H
+#define	_NPI_ESPC_H
+
+#pragma ident	"%Z%%M%	%I%	%E% SMI"
+
+#ifdef	__cplusplus
+extern "C" {
+#endif
+
+#include <npi.h>
+#include <nxge_espc_hw.h>
+
+#define	EPC_WAIT_RW_COMP(handle, val_p, comp_bit) {\
+	uint32_t cnt = MAX_PIO_RETRIES;\
+	do {\
+		NXGE_DELAY(EPC_RW_WAIT);\
+		NXGE_REG_RD64(handle, ESPC_REG_ADDR(ESPC_PIO_STATUS_REG),\
+				val_p); cnt--;\
+	} while (((val & comp_bit) == 0) && (cnt > 0));\
+}
+
+/* ESPC specific errors */
+
+#define	ESPC_EEPROM_ADDR_INVALID	0x51
+#define	ESPC_STR_LEN_INVALID		0x91
+
+/* ESPC error return macros */
+
+#define	NPI_ESPC_EEPROM_ADDR_INVALID	((ESPC_BLK_ID << 8) |\
+					ESPC_EEPROM_ADDR_INVALID)
+#define	NPI_ESPC_EEPROM_WRITE_FAILED	((ESPC_BLK_ID << 8) | WRITE_FAILED)
+#define	NPI_ESPC_EEPROM_READ_FAILED	((ESPC_BLK_ID << 8) | READ_FAILED)
+#define	NPI_ESPC_OPCODE_INVALID		((ESPC_BLK_ID << 8) | OPCODE_INVALID)
+#define	NPI_ESPC_STR_LEN_INVALID	((ESPC_BLK_ID << 8) |\
+					ESPC_STR_LEN_INVALID)
+#define	NPI_ESPC_PORT_INVALID		((ESPC_BLK_ID << 8) | PORT_INVALID)
+
+npi_status_t npi_espc_pio_enable(npi_handle_t);
+npi_status_t npi_espc_pio_disable(npi_handle_t);
+npi_status_t npi_espc_eeprom_entry(npi_handle_t, io_op_t,
+				uint32_t, uint8_t *);
+npi_status_t npi_espc_mac_addr_get(npi_handle_t, uint8_t *);
+npi_status_t npi_espc_num_ports_get(npi_handle_t, uint8_t *);
+	npi_status_t npi_espc_num_macs_get(npi_handle_t, uint8_t *);
+npi_status_t npi_espc_model_str_get(npi_handle_t, char *);
+npi_status_t npi_espc_bd_model_str_get(npi_handle_t, char *);
+npi_status_t npi_espc_phy_type_get(npi_handle_t, uint8_t *);
+npi_status_t npi_espc_port_phy_type_get(npi_handle_t, uint8_t *,
+				uint8_t);
+npi_status_t npi_espc_max_frame_get(npi_handle_t, uint16_t *);
+npi_status_t npi_espc_version_get(npi_handle_t, uint16_t *);
+	npi_status_t npi_espc_img_sz_get(npi_handle_t, uint16_t *);
+npi_status_t npi_espc_chksum_get(npi_handle_t, uint8_t *);
+npi_status_t npi_espc_intr_num_get(npi_handle_t, uint8_t *);
+uint32_t npi_espc_reg_get(npi_handle_t, int);
+void npi_espc_dump(npi_handle_t);
+
+#ifdef	__cplusplus
+}
+#endif
+
+#endif	/* _NPI_ESPC_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/uts/common/io/nxge/npi/npi_fflp.c	Mon Mar 19 19:37:22 2007 -0700
@@ -0,0 +1,2720 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
+ * Use is subject to license terms.
+ */
+
+
+#pragma ident	"%Z%%M%	%I%	%E% SMI"
+
+#include <npi_fflp.h>
+#include <nxge_common.h>
+
+/* macros to compute calss configuration register offset */
+
+#define	  GET_TCAM_CLASS_OFFSET(cls) \
+	(FFLP_TCAM_CLS_BASE_OFFSET + (cls - 2) * 8)
+#define	  GET_TCAM_KEY_OFFSET(cls) \
+	(FFLP_TCAM_KEY_BASE_OFFSET + (cls - 4) * 8)
+#define	  GET_FLOW_KEY_OFFSET(cls) \
+	(FFLP_FLOW_KEY_BASE_OFFSET + (cls - 4) * 8)
+
+#define	  HASHTBL_PART_REG_STEP 8192
+#define	  HASHTBL_PART_REG_VIR_OFFSET 0x2100
+#define	  HASHTBL_PART_REG_VIR_STEP 0x4000
+#define	  GET_HASHTBL_PART_OFFSET_NVIR(partid, reg)	\
+	((partid  * HASHTBL_PART_REG_STEP) + reg)
+
+#define	  GET_HASHTBL_PART_OFFSET(handle, partid, reg)	\
+	    (handle.is_vraddr ?					\
+	    (((partid & 0x1) * HASHTBL_PART_REG_VIR_STEP) +	\
+	    (reg & 0x8) + (HASHTBL_PART_REG_VIR_OFFSET)) :	\
+	    (partid * HASHTBL_PART_REG_STEP) + reg)
+
+#define	 FFLP_PART_OFFSET(partid, reg) ((partid  * 8) + reg)
+#define	 FFLP_VLAN_OFFSET(vid, reg) ((vid  * 8) + reg)
+
+#define	 TCAM_COMPLETION_TRY_COUNT 10
+#define	 BIT_ENABLE	0x1
+#define	 BIT_DISABLE	0x0
+
+#define	 FCRAM_PARTITION_VALID(partid) \
+	((partid < NXGE_MAX_RDC_GRPS))
+#define	FFLP_VLAN_VALID(vid) \
+	((vid > 0) && (vid < NXGE_MAX_VLANS))
+#define	FFLP_PORT_VALID(port) \
+	((port < MAX_PORTS_PER_NXGE))
+#define	FFLP_RDC_TABLE_VALID(table) \
+	((table < NXGE_MAX_RDC_GRPS))
+#define	TCAM_L3_USR_CLASS_VALID(class) \
+	((class >= TCAM_CLASS_IP_USER_4) && (class <= TCAM_CLASS_IP_USER_7))
+#define	TCAM_L2_USR_CLASS_VALID(class) \
+	((class == TCAM_CLASS_ETYPE_1) || (class == TCAM_CLASS_ETYPE_2))
+#define	TCAM_L3_CLASS_VALID(class) \
+	((class >= TCAM_CLASS_IP_USER_4) && (class <= TCAM_CLASS_SCTP_IPV6))
+#define	TCAM_CLASS_VALID(class) \
+	((class >= TCAM_CLASS_ETYPE_1) && (class <= TCAM_CLASS_RARP))
+
+
+uint64_t fflp_fzc_offset[] = {
+	FFLP_ENET_VLAN_TBL_REG, FFLP_L2_CLS_ENET1_REG, FFLP_L2_CLS_ENET2_REG,
+	FFLP_TCAM_KEY_IP_USR4_REG, FFLP_TCAM_KEY_IP_USR5_REG,
+	FFLP_TCAM_KEY_IP_USR6_REG, FFLP_TCAM_KEY_IP_USR7_REG,
+	FFLP_TCAM_KEY_IP4_TCP_REG, FFLP_TCAM_KEY_IP4_UDP_REG,
+	FFLP_TCAM_KEY_IP4_AH_ESP_REG, FFLP_TCAM_KEY_IP4_SCTP_REG,
+	FFLP_TCAM_KEY_IP6_TCP_REG, FFLP_TCAM_KEY_IP6_UDP_REG,
+	FFLP_TCAM_KEY_IP6_AH_ESP_REG, FFLP_TCAM_KEY_IP6_SCTP_REG,
+	FFLP_TCAM_KEY_0_REG, FFLP_TCAM_KEY_1_REG, FFLP_TCAM_KEY_2_REG,
+	FFLP_TCAM_KEY_3_REG, FFLP_TCAM_MASK_0_REG, FFLP_TCAM_MASK_1_REG,
+	FFLP_TCAM_MASK_2_REG, FFLP_TCAM_MASK_3_REG, FFLP_TCAM_CTL_REG,
+	FFLP_VLAN_PAR_ERR_REG, FFLP_TCAM_ERR_REG, HASH_LKUP_ERR_LOG1_REG,
+	HASH_LKUP_ERR_LOG2_REG, FFLP_FCRAM_ERR_TST0_REG,
+	FFLP_FCRAM_ERR_TST1_REG, FFLP_FCRAM_ERR_TST2_REG, FFLP_ERR_MSK_REG,
+	FFLP_CFG_1_REG, FFLP_DBG_TRAIN_VCT_REG, FFLP_TCP_CFLAG_MSK_REG,
+	FFLP_FCRAM_REF_TMR_REG,  FFLP_FLOW_KEY_IP_USR4_REG,
+	FFLP_FLOW_KEY_IP_USR5_REG, FFLP_FLOW_KEY_IP_USR6_REG,
+	FFLP_FLOW_KEY_IP_USR7_REG, FFLP_FLOW_KEY_IP4_TCP_REG,
+	FFLP_FLOW_KEY_IP4_UDP_REG, FFLP_FLOW_KEY_IP4_AH_ESP_REG,
+	FFLP_FLOW_KEY_IP4_SCTP_REG, FFLP_FLOW_KEY_IP6_TCP_REG,
+	FFLP_FLOW_KEY_IP6_UDP_REG, FFLP_FLOW_KEY_IP6_AH_ESP_REG,
+	FFLP_FLOW_KEY_IP6_SCTP_REG, FFLP_H1POLY_REG, FFLP_H2POLY_REG,
+	FFLP_FLW_PRT_SEL_REG
+};
+
+const char *fflp_fzc_name[] = {
+	"FFLP_ENET_VLAN_TBL_REG", "FFLP_L2_CLS_ENET1_REG",
+	"FFLP_L2_CLS_ENET2_REG", "FFLP_TCAM_KEY_IP_USR4_REG",
+	"FFLP_TCAM_KEY_IP_USR5_REG", "FFLP_TCAM_KEY_IP_USR6_REG",
+	"FFLP_TCAM_KEY_IP_USR7_REG", "FFLP_TCAM_KEY_IP4_TCP_REG",
+	"FFLP_TCAM_KEY_IP4_UDP_REG", "FFLP_TCAM_KEY_IP4_AH_ESP_REG",
+	"FFLP_TCAM_KEY_IP4_SCTP_REG", "FFLP_TCAM_KEY_IP6_TCP_REG",
+	"FFLP_TCAM_KEY_IP6_UDP_REG", "FFLP_TCAM_KEY_IP6_AH_ESP_REG",
+	"FFLP_TCAM_KEY_IP6_SCTP_REG", "FFLP_TCAM_KEY_0_REG",
+	"FFLP_TCAM_KEY_1_REG", "FFLP_TCAM_KEY_2_REG", "FFLP_TCAM_KEY_3_REG",
+	"FFLP_TCAM_MASK_0_REG", "FFLP_TCAM_MASK_1_REG", "FFLP_TCAM_MASK_2_REG",
+	"FFLP_TCAM_MASK_3_REG", "FFLP_TCAM_CTL_REG", "FFLP_VLAN_PAR_ERR_REG",
+	"FFLP_TCAM_ERR_REG", "HASH_LKUP_ERR_LOG1_REG",
+	"HASH_LKUP_ERR_LOG2_REG", "FFLP_FCRAM_ERR_TST0_REG",
+	"FFLP_FCRAM_ERR_TST1_REG", "FFLP_FCRAM_ERR_TST2_REG",
+	"FFLP_ERR_MSK_REG", "FFLP_CFG_1_REG", "FFLP_DBG_TRAIN_VCT_REG",
+	"FFLP_TCP_CFLAG_MSK_REG", "FFLP_FCRAM_REF_TMR_REG",
+	"FFLP_FLOW_KEY_IP_USR4_REG", "FFLP_FLOW_KEY_IP_USR5_REG",
+	"FFLP_FLOW_KEY_IP_USR6_REG", "FFLP_FLOW_KEY_IP_USR7_REG",
+	"FFLP_FLOW_KEY_IP4_TCP_REG", "FFLP_FLOW_KEY_IP4_UDP_REG",
+	"FFLP_FLOW_KEY_IP4_AH_ESP_REG", "FFLP_FLOW_KEY_IP4_SCTP_REG",
+	"FFLP_FLOW_KEY_IP6_TCP_REG", "FFLP_FLOW_KEY_IP6_UDP_REG",
+	"FFLP_FLOW_KEY_IP6_AH_ESP_REG",
+	"FFLP_FLOW_KEY_IP6_SCTP_REG", "FFLP_H1POLY_REG", "FFLP_H2POLY_REG",
+	"FFLP_FLW_PRT_SEL_REG"
+};
+
+uint64_t fflp_reg_offset[] = {
+	FFLP_HASH_TBL_ADDR_REG, FFLP_HASH_TBL_DATA_REG,
+	FFLP_HASH_TBL_DATA_LOG_REG
+};
+
+const char *fflp_reg_name[] = {
+	"FFLP_HASH_TBL_ADDR_REG", "FFLP_HASH_TBL_DATA_REG",
+	"FFLP_HASH_TBL_DATA_LOG_REG"
+};
+
+
+
+
+npi_status_t
+npi_fflp_dump_regs(npi_handle_t handle)
+{
+
+	uint64_t value;
+	int num_regs, i;
+
+	num_regs = sizeof (fflp_fzc_offset) / sizeof (uint64_t);
+	NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
+		"\nFFLP_FZC Register Dump \n"));
+	for (i = 0; i < num_regs; i++) {
+		REG_PIO_READ64(handle, fflp_fzc_offset[i], &value);
+		NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
+			" %8llx %s\t %8llx \n",
+			fflp_fzc_offset[i], fflp_fzc_name[i], value));
+
+	}
+
+	NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
+					    "\nFFLP Register Dump\n"));
+	num_regs = sizeof (fflp_reg_offset) / sizeof (uint64_t);
+
+	for (i = 0; i < num_regs; i++) {
+		REG_PIO_READ64(handle, fflp_reg_offset[i], &value);
+		NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
+			" %8llx %s\t %8llx \n",
+			fflp_reg_offset[i], fflp_reg_name[i], value));
+
+	}
+
+	NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
+					    "\n FFLP Register Dump done\n"));
+
+	return (NPI_SUCCESS);
+}
+
+void
+npi_fflp_vlan_tbl_dump(npi_handle_t handle)
+{
+	uint64_t offset;
+	vlan_id_t vlan_id;
+	uint64_t value;
+	vlan_id_t start = 0, stop = NXGE_MAX_VLANS;
+
+	NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
+		"\nVlan Table Dump \n"));
+
+	NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
+		"VID\t Offset\t Value\n"));
+
+	for (vlan_id = start; vlan_id < stop; vlan_id++) {
+		offset = FFLP_VLAN_OFFSET(vlan_id, FFLP_ENET_VLAN_TBL_REG);
+		REG_PIO_READ64(handle, offset, &value);
+		NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
+			    "%x\t %llx\t %llx\n", vlan_id, offset, value));
+	}
+
+}
+
+static uint64_t
+npi_fflp_tcam_check_completion(npi_handle_t handle, tcam_op_t op_type);
+
+/*
+ * npi_fflp_tcam_check_completion()
+ * Returns TCAM completion status.
+ *
+ * Input:
+ *           op_type :        Read, Write, Compare
+ *           handle  :        OS specific handle
+ *
+ * Output:
+ *        For Read and write operations:
+ *        0   Successful
+ *        -1  Fail/timeout
+ *
+ *       For Compare operations (debug only )
+ *        TCAM_REG_CTL read value    on success
+ *                     value contains match location
+ *        NPI_TCAM_COMP_NO_MATCH          no match
+ *
+ */
+static uint64_t
+npi_fflp_tcam_check_completion(npi_handle_t handle, tcam_op_t op_type)
+{
+
+	uint32_t try_counter, tcam_delay = 10;
+	tcam_ctl_t tctl;
+
+	try_counter = TCAM_COMPLETION_TRY_COUNT;
+
+	switch (op_type) {
+	case TCAM_RWC_STAT:
+
+		READ_TCAM_REG_CTL(handle, &tctl.value);
+		while ((try_counter) &&
+				(tctl.bits.ldw.stat != TCAM_CTL_RWC_RWC_STAT)) {
+			try_counter--;
+			NXGE_DELAY(tcam_delay);
+			READ_TCAM_REG_CTL(handle, &tctl.value);
+		}
+
+		if (!try_counter) {
+			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+					    " TCAM RWC_STAT operation"
+					    " failed to complete \n"));
+			return (NPI_FFLP_TCAM_HW_ERROR);
+		}
+
+		tctl.value = 0;
+		break;
+
+	case TCAM_RWC_MATCH:
+		READ_TCAM_REG_CTL(handle, &tctl.value);
+
+		while ((try_counter) &&
+			(tctl.bits.ldw.match != TCAM_CTL_RWC_RWC_MATCH)) {
+			try_counter--;
+			NXGE_DELAY(tcam_delay);
+			READ_TCAM_REG_CTL(handle, &tctl.value);
+		}
+
+		if (!try_counter) {
+			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				    " TCAM Match operation"
+				    "failed to find match \n"));
+			tctl.value = NPI_TCAM_COMP_NO_MATCH;
+		}
+
+
+		break;
+
+	default:
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+		" Invalid TCAM completion Request \n"));
+		return (NPI_FFLP_ERROR |
+		    NPI_TCAM_ERROR | OPCODE_INVALID);
+	}
+
+	return (tctl.value);
+}
+
+/*
+ * npi_fflp_tcam_entry_invalidate()
+ *
+ * invalidates entry at tcam location
+ *
+ * Input
+ * handle  :        OS specific handle
+ * location	:	TCAM location
+ *
+ * Return
+ *   NPI_SUCCESS
+ *   NPI_FFLP_TCAM_HW_ERROR
+ *
+ */
+npi_status_t
+npi_fflp_tcam_entry_invalidate(npi_handle_t handle, tcam_location_t location)
+{
+
+	tcam_ctl_t tctl, tctl_stat;
+
+/*
+ * Need to write zero to class field.
+ * Class field is bits [195:191].
+ * This corresponds to TCAM key 0 register
+ *
+ */
+
+
+	WRITE_TCAM_REG_MASK0(handle, 0xffULL);
+	WRITE_TCAM_REG_KEY0(handle, 0x0ULL);
+	tctl.value = 0;
+	tctl.bits.ldw.location = location;
+	tctl.bits.ldw.rwc = TCAM_CTL_RWC_TCAM_WR;
+
+	WRITE_TCAM_REG_CTL(handle, tctl.value);
+
+	tctl_stat.value = npi_fflp_tcam_check_completion(handle, TCAM_RWC_STAT);
+
+	if (tctl_stat.value & NPI_FAILURE)
+		return (NPI_FFLP_TCAM_HW_ERROR);
+
+	return (NPI_SUCCESS);
+
+}
+
+/*
+ * npi_fflp_tcam_entry_match()
+ *
+ * lookup a tcam entry in the TCAM
+ *
+ * Input
+ * handle  :        OS specific handle
+ * tcam_ptr   :     TCAM entry ptr
+ *
+ * Return
+ *
+ *	 NPI_FAILURE | NPI_XX_ERROR:	     Operational Error (HW etc ...)
+ *	 NPI_TCAM_NO_MATCH:		     no match
+ *	 0 - TCAM_SIZE:			     matching entry location (if match)
+ */
+int
+npi_fflp_tcam_entry_match(npi_handle_t handle,  tcam_entry_t *tcam_ptr)
+{
+
+	uint64_t tcam_stat = 0;
+	tcam_ctl_t tctl, tctl_stat;
+
+	WRITE_TCAM_REG_MASK0(handle, tcam_ptr->mask0);
+	WRITE_TCAM_REG_MASK1(handle, tcam_ptr->mask1);
+	WRITE_TCAM_REG_MASK2(handle, tcam_ptr->mask2);
+	WRITE_TCAM_REG_MASK3(handle, tcam_ptr->mask3);
+
+	WRITE_TCAM_REG_KEY0(handle, tcam_ptr->key0);
+	WRITE_TCAM_REG_KEY1(handle, tcam_ptr->key1);
+	WRITE_TCAM_REG_KEY2(handle, tcam_ptr->key2);
+	WRITE_TCAM_REG_KEY3(handle, tcam_ptr->key3);
+
+	tctl.value = 0;
+	tctl.bits.ldw.rwc = TCAM_CTL_RWC_TCAM_CMP;
+
+	WRITE_TCAM_REG_CTL(handle, tctl.value);
+
+	tcam_stat = npi_fflp_tcam_check_completion(handle, TCAM_RWC_STAT);
+	if (tcam_stat & NPI_FAILURE) {
+		return ((uint32_t)tcam_stat);
+	}
+
+	tctl_stat.value = npi_fflp_tcam_check_completion(handle,
+				TCAM_RWC_MATCH);
+
+	if (tctl_stat.bits.ldw.match == TCAM_CTL_RWC_RWC_MATCH) {
+		return (uint32_t)(tctl_stat.bits.ldw.location);
+	}
+
+	return ((uint32_t)tctl_stat.value);
+
+}
+
+/*
+ * npi_fflp_tcam_entry_read ()
+ *
+ * Reads a tcam entry from the TCAM location, location
+ *
+ * Input:
+ * handle  :        OS specific handle
+ * location  :		TCAM location
+ * tcam_ptr  :		TCAM entry pointer
+ *
+ * Return:
+ * NPI_SUCCESS
+ * NPI_FFLP_TCAM_RD_ERROR
+ *
+ */
+npi_status_t
+npi_fflp_tcam_entry_read(npi_handle_t handle,
+						    tcam_location_t location,
+						    struct tcam_entry *tcam_ptr)
+{
+
+	uint64_t tcam_stat;
+	tcam_ctl_t tctl;
+
+	tctl.value = 0;
+	tctl.bits.ldw.location = location;
+	tctl.bits.ldw.rwc = TCAM_CTL_RWC_TCAM_RD;
+
+	WRITE_TCAM_REG_CTL(handle, tctl.value);
+
+	tcam_stat = npi_fflp_tcam_check_completion(handle, TCAM_RWC_STAT);
+
+	if (tcam_stat & NPI_FAILURE) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+		    "TCAM read failed loc %d \n", location));
+		return (NPI_FFLP_TCAM_RD_ERROR);
+	}
+
+	READ_TCAM_REG_MASK0(handle, &tcam_ptr->mask0);
+	READ_TCAM_REG_MASK1(handle, &tcam_ptr->mask1);
+	READ_TCAM_REG_MASK2(handle, &tcam_ptr->mask2);
+	READ_TCAM_REG_MASK3(handle, &tcam_ptr->mask3);
+
+	READ_TCAM_REG_KEY0(handle, &tcam_ptr->key0);
+	READ_TCAM_REG_KEY1(handle, &tcam_ptr->key1);
+	READ_TCAM_REG_KEY2(handle, &tcam_ptr->key2);
+	READ_TCAM_REG_KEY3(handle, &tcam_ptr->key3);
+
+	return (NPI_SUCCESS);
+}
+
+/*
+ * npi_fflp_tcam_entry_write()
+ *
+ * writes a tcam entry to the TCAM location, location
+ *
+ * Input:
+ * handle  :        OS specific handle
+ * location :	TCAM location
+ * tcam_ptr :	TCAM entry pointer
+ *
+ * Return:
+ * NPI_SUCCESS
+ * NPI_FFLP_TCAM_WR_ERROR
+ *
+ */
+npi_status_t
+npi_fflp_tcam_entry_write(npi_handle_t handle,
+			    tcam_location_t location,
+			    tcam_entry_t *tcam_ptr)
+{
+
+	uint64_t tcam_stat;
+
+	tcam_ctl_t tctl;
+
+	WRITE_TCAM_REG_MASK0(handle, tcam_ptr->mask0);
+	WRITE_TCAM_REG_MASK1(handle, tcam_ptr->mask1);
+	WRITE_TCAM_REG_MASK2(handle, tcam_ptr->mask2);
+	WRITE_TCAM_REG_MASK3(handle, tcam_ptr->mask3);
+
+	WRITE_TCAM_REG_KEY0(handle, tcam_ptr->key0);
+	WRITE_TCAM_REG_KEY1(handle, tcam_ptr->key1);
+	WRITE_TCAM_REG_KEY2(handle, tcam_ptr->key2);
+	WRITE_TCAM_REG_KEY3(handle, tcam_ptr->key3);
+
+	NPI_DEBUG_MSG((handle.function, NPI_FFLP_CTL,
+			    " tcam write: location %x\n"
+			    " key:  %llx %llx %llx %llx \n"
+			    " mask: %llx %llx %llx %llx \n",
+			    location, tcam_ptr->key0, tcam_ptr->key1,
+			    tcam_ptr->key2, tcam_ptr->key3,
+			    tcam_ptr->mask0, tcam_ptr->mask1,
+			    tcam_ptr->mask2, tcam_ptr->mask3));
+	tctl.value = 0;
+	tctl.bits.ldw.location = location;
+	tctl.bits.ldw.rwc = TCAM_CTL_RWC_TCAM_WR;
+	NPI_DEBUG_MSG((handle.function, NPI_FFLP_CTL,
+			    " tcam write: ctl value %llx \n", tctl.value));
+	WRITE_TCAM_REG_CTL(handle, tctl.value);
+
+	tcam_stat = npi_fflp_tcam_check_completion(handle, TCAM_RWC_STAT);
+
+	if (tcam_stat & NPI_FAILURE) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+		    "TCAM Write failed loc %d \n", location));
+		return (NPI_FFLP_TCAM_WR_ERROR);
+	}
+
+	return (NPI_SUCCESS);
+}
+
+/*
+ * npi_fflp_tcam_asc_ram_entry_write()
+ *
+ * writes a tcam associatedRAM at the TCAM location, location
+ *
+ * Input:
+ * handle  :        OS specific handle
+ * location :	tcam associatedRAM location
+ * ram_data :	Value to write
+ *
+ * Return:
+ * NPI_SUCCESS
+ * NPI_FFLP_ASC_RAM_WR_ERROR
+ *
+ */
+npi_status_t
+npi_fflp_tcam_asc_ram_entry_write(npi_handle_t handle,
+				    tcam_location_t location,
+				    uint64_t ram_data)
+{
+
+	uint64_t tcam_stat = 0;
+	tcam_ctl_t tctl;
+
+
+	WRITE_TCAM_REG_KEY1(handle, ram_data);
+
+	tctl.value = 0;
+	tctl.bits.ldw.location = location;
+	tctl.bits.ldw.rwc = TCAM_CTL_RWC_RAM_WR;
+
+	NPI_DEBUG_MSG((handle.function, NPI_FFLP_CTL,
+		    " tcam ascr write: location %x data %llx ctl value %llx \n",
+		    location, ram_data, tctl.value));
+	WRITE_TCAM_REG_CTL(handle, tctl.value);
+	tcam_stat = npi_fflp_tcam_check_completion(handle, TCAM_RWC_STAT);
+
+	if (tcam_stat & NPI_FAILURE) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+		    "TCAM RAM write failed loc %d \n", location));
+		return (NPI_FFLP_ASC_RAM_WR_ERROR);
+	}
+
+	return (NPI_SUCCESS);
+}
+
+/*
+ * npi_fflp_tcam_asc_ram_entry_read()
+ *
+ * reads a tcam associatedRAM content at the TCAM location, location
+ *
+ * Input:
+ * handle  :        OS specific handle
+ * location :	tcam associatedRAM location
+ * ram_data :	ptr to return contents
+ *
+ * Return:
+ * NPI_SUCCESS
+ * NPI_FFLP_ASC_RAM_RD_ERROR
+ *
+ */
+npi_status_t
+npi_fflp_tcam_asc_ram_entry_read(npi_handle_t handle,
+				    tcam_location_t location,
+				    uint64_t *ram_data)
+{
+
+	uint64_t tcam_stat;
+	tcam_ctl_t tctl;
+
+
+	tctl.value = 0;
+	tctl.bits.ldw.location = location;
+	tctl.bits.ldw.rwc = TCAM_CTL_RWC_RAM_RD;
+
+	WRITE_TCAM_REG_CTL(handle, tctl.value);
+
+	tcam_stat = npi_fflp_tcam_check_completion(handle, TCAM_RWC_STAT);
+
+	if (tcam_stat & NPI_FAILURE) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+		    "TCAM RAM read failed loc %d \n", location));
+		return (NPI_FFLP_ASC_RAM_RD_ERROR);
+	}
+
+	READ_TCAM_REG_KEY1(handle, ram_data);
+
+	return (NPI_SUCCESS);
+}
+
+/* FFLP FCRAM Related functions */
+/* The following are FCRAM datapath functions */
+
+/*
+ * npi_fflp_fcram_entry_write ()
+ * Populates an FCRAM entry
+ * Inputs:
+ *         handle:	opaque handle interpreted by the underlying OS
+ *	   partid:	Partition ID
+ *	   location:	Index to the FCRAM.
+ *			 Corresponds to last 20 bits of H1 value
+ *	   fcram_ptr:	Pointer to the FCRAM contents to be used for writing
+ *	   format:	Entry Format. Determines the size of the write.
+ *			      FCRAM_ENTRY_OPTIM:   8 bytes (a 64 bit write)
+ *			      FCRAM_ENTRY_EX_IP4:  32 bytes (4 X 64 bit write)
+ *			      FCRAM_ENTRY_EX_IP6:  56 bytes (7 X 64 bit write)
+ *
+ * Outputs:
+ *         NPI success/failure status code
+ */
+npi_status_t
+npi_fflp_fcram_entry_write(npi_handle_t handle, part_id_t partid,
+			    uint32_t location, fcram_entry_t *fcram_ptr,
+			    fcram_entry_format_t format)
+
+{
+
+	int num_subareas = 0;
+	uint64_t addr_reg, data_reg;
+	int subarea;
+	int autoinc;
+	hash_tbl_addr_t addr;
+	switch (format) {
+	case FCRAM_ENTRY_OPTIM:
+		if (location % 8) {
+		/* need to be 8 byte alligned */
+
+			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				    " FCRAM_ENTRY_OOPTIM Write:"
+				    " unaligned location %llx \n",
+				    location));
+
+			return (NPI_FFLP_FCRAM_LOC_INVALID);
+	}
+
+	num_subareas = 1;
+	autoinc = 0;
+	break;
+
+	case FCRAM_ENTRY_EX_IP4:
+		if (location % 32) {
+/* need to be 32 byte alligned */
+			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+			    " FCRAM_ENTRY_EX_IP4 Write:"
+			    " unaligned location %llx \n",
+			    location));
+			return (NPI_FFLP_FCRAM_LOC_INVALID);
+	}
+
+	num_subareas = 4;
+	autoinc = 1;
+
+	break;
+	case FCRAM_ENTRY_EX_IP6:
+		if (location % 64) {
+				/* need to be 64 byte alligned */
+			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				    " FCRAM_ENTRY_EX_IP6 Write:"
+				    " unaligned location %llx \n",
+				    location));
+				return (NPI_FFLP_FCRAM_LOC_INVALID);
+
+		}
+		num_subareas = 7;
+		autoinc = 1;
+			break;
+	default:
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+			    " fcram_entry_write:"
+			    " unknown format param location %llx\n",
+			    location));
+		return (NPI_FFLP_ERROR | NPI_FCRAM_ERROR | OPCODE_INVALID);
+	}
+
+	addr.value = 0;
+	addr.bits.ldw.autoinc = autoinc;
+	addr.bits.ldw.addr = location;
+	addr_reg = GET_HASHTBL_PART_OFFSET(handle, partid,
+					    FFLP_HASH_TBL_ADDR_REG);
+	data_reg = GET_HASHTBL_PART_OFFSET(handle, partid,
+					    FFLP_HASH_TBL_DATA_REG);
+/* write to addr reg */
+	REG_PIO_WRITE64(handle, addr_reg, addr.value);
+/* write data to the data register */
+
+	for (subarea = 0; subarea < num_subareas; subarea++) {
+		REG_PIO_WRITE64(handle, data_reg, fcram_ptr->value[subarea]);
+	}
+
+	return (NPI_SUCCESS);
+}
+
+/*
+ * npi_fflp_fcram_read_read ()
+ * Reads an FCRAM entry
+ * Inputs:
+ *         handle:	opaque handle interpreted by the underlying OS
+ *	   partid:	Partition ID
+ *	   location:	Index to the FCRAM.
+ *                  Corresponds to last 20 bits of H1 value
+ *
+ *	   fcram_ptr:	Pointer to the FCRAM contents to be updated
+ *	   format:	Entry Format. Determines the size of the read.
+ *			      FCRAM_ENTRY_OPTIM:   8 bytes (a 64 bit read)
+ *			      FCRAM_ENTRY_EX_IP4:  32 bytes (4 X 64 bit read )
+ *			      FCRAM_ENTRY_EX_IP6:  56 bytes (7 X 64 bit read )
+ * Return:
+ * NPI Success/Failure status code
+ *
+ */
+npi_status_t
+npi_fflp_fcram_entry_read(npi_handle_t handle,  part_id_t partid,
+			    uint32_t location, fcram_entry_t *fcram_ptr,
+			    fcram_entry_format_t format)
+{
+
+	int num_subareas = 0;
+	uint64_t addr_reg, data_reg;
+	int subarea, autoinc;
+	hash_tbl_addr_t addr;
+	switch (format) {
+		case FCRAM_ENTRY_OPTIM:
+			if (location % 8) {
+			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				" FCRAM_ENTRY_OOPTIM Read:"
+				" unaligned location %llx \n",
+				location));
+			/* need to be 8 byte alligned */
+				return (NPI_FFLP_FCRAM_LOC_INVALID);
+			}
+			num_subareas = 1;
+			autoinc = 0;
+			break;
+		case FCRAM_ENTRY_EX_IP4:
+			if (location % 32) {
+					/* need to be 32 byte alligned */
+			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+					" FCRAM_ENTRY_EX_IP4 READ:"
+					" unaligned location %llx \n",
+					location));
+				return (NPI_FFLP_FCRAM_LOC_INVALID);
+			}
+			num_subareas = 4;
+			autoinc = 1;
+
+			break;
+		case FCRAM_ENTRY_EX_IP6:
+			if (location % 64) {
+					/* need to be 64 byte alligned */
+			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				" FCRAM_ENTRY_EX_IP6 READ:"
+				" unaligned location %llx \n",
+				location));
+
+				return (NPI_FFLP_FCRAM_LOC_INVALID);
+	}
+			num_subareas = 7;
+			autoinc = 1;
+
+			break;
+		default:
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+			" fcram_entry_read:"
+			" unknown format param location %llx\n",
+			location));
+		return (NPI_FFLP_SW_PARAM_ERROR);
+	}
+
+	addr.value = 0;
+	addr.bits.ldw.autoinc = autoinc;
+	addr.bits.ldw.addr = location;
+	addr_reg = GET_HASHTBL_PART_OFFSET(handle, partid,
+			FFLP_HASH_TBL_ADDR_REG);
+	data_reg = GET_HASHTBL_PART_OFFSET(handle, partid,
+			FFLP_HASH_TBL_DATA_REG);
+/* write to addr reg */
+	REG_PIO_WRITE64(handle, addr_reg, addr.value);
+/* read data from the data register */
+	for (subarea = 0; subarea < num_subareas; subarea++) {
+		REG_PIO_READ64(handle, data_reg, &fcram_ptr->value[subarea]);
+	}
+
+
+	return (NPI_SUCCESS);
+
+}
+
+/*
+ * npi_fflp_fcram_entry_invalidate ()
+ * Invalidate FCRAM entry at the given location
+ * Inputs:
+ *	handle:		opaque handle interpreted by the underlying OS
+ *	partid:		Partition ID
+ *	location:	location of the FCRAM/hash entry.
+ *
+ * Return:
+ * NPI Success/Failure status code
+ */
+npi_status_t
+npi_fflp_fcram_entry_invalidate(npi_handle_t handle, part_id_t partid,
+				    uint32_t location)
+{
+
+	hash_tbl_addr_t addr;
+	uint64_t addr_reg, data_reg;
+	hash_hdr_t	   hdr;
+
+
+	if (location % 8) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+			" FCRAM_ENTRY_Invalidate:"
+			" unaligned location %llx \n",
+			location));
+			/* need to be 8 byte aligned */
+		return (NPI_FFLP_FCRAM_LOC_INVALID);
+	}
+
+	addr.value = 0;
+	addr.bits.ldw.addr = location;
+	addr_reg = GET_HASHTBL_PART_OFFSET(handle, partid,
+			FFLP_HASH_TBL_ADDR_REG);
+	data_reg = GET_HASHTBL_PART_OFFSET(handle, partid,
+			FFLP_HASH_TBL_DATA_REG);
+
+/* write to addr reg */
+	REG_PIO_WRITE64(handle, addr_reg, addr.value);
+
+	REG_PIO_READ64(handle, data_reg, &hdr.value);
+	hdr.exact_hdr.valid = 0;
+	REG_PIO_WRITE64(handle, data_reg, hdr.value);
+
+	return (NPI_SUCCESS);
+
+}
+
+/*
+ * npi_fflp_fcram_write_subarea ()
+ * Writes to FCRAM entry subarea i.e the 8 bytes within the 64 bytes
+ * pointed by the  last 20 bits of  H1. Effectively, this accesses
+ * specific 8 bytes within the hash table bucket.
+ *
+ *  H1-->  |-----------------|
+ *	   |	subarea 0    |
+ *	   |_________________|
+ *	   | Subarea 1	     |
+ *	   |_________________|
+ *	   | .......	     |
+ *	   |_________________|
+ *	   | Subarea 7       |
+ *	   |_________________|
+ *
+ * Inputs:
+ *         handle:	opaque handle interpreted by the underlying OS
+ *	   partid:	Partition ID
+ *	   location:	location of the subarea. It is derived from:
+ *			Bucket = [19:15][14:0]       (20 bits of H1)
+ *			location = (Bucket << 3 ) + subarea * 8
+ *				 = [22:18][17:3] || subarea * 8
+ *	   data:	Data
+ *
+ * Return:
+ * NPI Success/Failure status code
+ */
+npi_status_t
+npi_fflp_fcram_subarea_write(npi_handle_t handle, part_id_t partid,
+			    uint32_t location, uint64_t data)
+{
+
+	hash_tbl_addr_t addr;
+	uint64_t addr_reg, data_reg;
+
+
+	if (location % 8) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+			" fcram_subarea_write:"
+			" unaligned location %llx \n",
+			location));
+			/* need to be 8 byte alligned */
+		return (NPI_FFLP_FCRAM_LOC_INVALID);
+	}
+
+	addr.value = 0;
+	addr.bits.ldw.addr = location;
+	addr_reg = GET_HASHTBL_PART_OFFSET(handle, partid,
+			FFLP_HASH_TBL_ADDR_REG);
+	data_reg = GET_HASHTBL_PART_OFFSET(handle, partid,
+			FFLP_HASH_TBL_DATA_REG);
+
+/* write to addr reg */
+	REG_PIO_WRITE64(handle, addr_reg, addr.value);
+	REG_PIO_WRITE64(handle, data_reg, data);
+
+	return (NPI_SUCCESS);
+
+}
+
+/*
+ * npi_fflp_fcram_subarea_read ()
+ * Reads an FCRAM entry subarea i.e the 8 bytes within the 64 bytes
+ * pointed by  the last 20 bits of  H1. Effectively, this accesses
+ * specific 8 bytes within the hash table bucket.
+ *
+ *  H1-->  |-----------------|
+ *	   |	subarea 0    |
+ *	   |_________________|
+ *	   | Subarea 1	     |
+ *	   |_________________|
+ *	   | .......	     |
+ *	   |_________________|
+ *	   | Subarea 7       |
+ *	   |_________________|
+ *
+ * Inputs:
+ *         handle:	opaque handle interpreted by the underlying OS
+ *	   partid:	Partition ID
+ *	   location:	location of the subarea. It is derived from:
+ *			Bucket = [19:15][14:0]       (20 bits of H1)
+ *			location = (Bucket << 3 ) + subarea * 8
+ *				 = [22:18][17:3] || subarea * 8
+ *	   data:	ptr do write subarea contents to.
+ *
+ * Return:
+ * NPI Success/Failure status code
+ */
+npi_status_t
+npi_fflp_fcram_subarea_read(npi_handle_t handle, part_id_t partid,
+			    uint32_t location, uint64_t *data)
+
+{
+
+	hash_tbl_addr_t addr;
+	uint64_t addr_reg, data_reg;
+
+	if (location % 8) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				    " fcram_subarea_read:"
+				    " unaligned location %llx \n",
+				    location));
+			/* need to be 8 byte alligned */
+		return (NPI_FFLP_FCRAM_LOC_INVALID);
+	}
+
+	addr.value = 0;
+	addr.bits.ldw.addr = location;
+	addr_reg = GET_HASHTBL_PART_OFFSET(handle, partid,
+						    FFLP_HASH_TBL_ADDR_REG);
+	data_reg = GET_HASHTBL_PART_OFFSET(handle, partid,
+						    FFLP_HASH_TBL_DATA_REG);
+
+/* write to addr reg */
+	REG_PIO_WRITE64(handle, addr_reg, addr.value);
+	REG_PIO_READ64(handle, data_reg, data);
+
+	return (NPI_SUCCESS);
+
+}
+
+/*
+ * The following are zero function fflp configuration functions.
+ */
+
+/*
+ * npi_fflp_fcram_config_partition()
+ * Partitions and configures the FCRAM
+ */
+npi_status_t
+npi_fflp_cfg_fcram_partition(npi_handle_t handle, part_id_t partid,
+				    uint8_t base_mask, uint8_t base_reloc)
+
+{
+/*
+ * assumes that the base mask and relocation are computed somewhere
+ * and kept in the state data structure. Alternativiely, one can pass
+ * a partition size and a starting address and this routine can compute
+ * the mask and reloc vlaues.
+ */
+
+    flow_prt_sel_t sel;
+    uint64_t offset;
+
+    ASSERT(FCRAM_PARTITION_VALID(partid));
+	if (!FCRAM_PARTITION_VALID(partid)) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				    " npi_fflp_cfg_fcram_partition:"
+				    " Invalid Partition %d \n",
+				    partid));
+		return (NPI_FFLP_FCRAM_PART_INVALID);
+	}
+
+    offset = FFLP_PART_OFFSET(partid, FFLP_FLW_PRT_SEL_REG);
+    sel.value = 0;
+    sel.bits.ldw.mask = base_mask;
+    sel.bits.ldw.base = base_reloc;
+    sel.bits.ldw.ext = BIT_DISABLE; /* disable */
+    REG_PIO_WRITE64(handle, offset, sel.value);
+    return (NPI_SUCCESS);
+
+}
+
+/*
+ * npi_fflp_fcram_partition_enable
+ * Enable previously configured FCRAM partition
+ *
+ * Input
+ *         handle:	opaque handle interpreted by the underlying OS
+ *         partid:	 partition ID, Corresponds to the RDC table
+ *
+ * Return
+ *      0			Successful
+ *      Non zero  error code    Enable failed, and reason.
+ *
+ */
+npi_status_t
+npi_fflp_cfg_fcram_partition_enable  (npi_handle_t handle, part_id_t partid)
+
+{
+
+    flow_prt_sel_t sel;
+    uint64_t offset;
+
+    ASSERT(FCRAM_PARTITION_VALID(partid));
+    if (!FCRAM_PARTITION_VALID(partid)) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				    " fcram_partition enable:"
+				    " Invalid Partition %d \n",
+				    partid));
+		return (NPI_FFLP_FCRAM_PART_INVALID);
+	}
+
+    offset = FFLP_PART_OFFSET(partid, FFLP_FLW_PRT_SEL_REG);
+
+    REG_PIO_READ64(handle, offset, &sel.value);
+    sel.bits.ldw.ext = BIT_ENABLE; /* enable */
+    REG_PIO_WRITE64(handle, offset, sel.value);
+
+    return (NPI_SUCCESS);
+
+}
+
+/*
+ * npi_fflp_fcram_partition_disable
+ * Disable previously configured FCRAM partition
+ *
+ * Input
+ *         handle:	opaque handle interpreted by the underlying OS
+ *         partid:	partition ID, Corresponds to the RDC table
+ *
+ * Return:
+ * NPI Success/Failure status code
+ */
+npi_status_t
+npi_fflp_cfg_fcram_partition_disable(npi_handle_t handle, part_id_t partid)
+
+{
+
+	flow_prt_sel_t sel;
+	uint64_t offset;
+
+	ASSERT(FCRAM_PARTITION_VALID(partid));
+	if (!FCRAM_PARTITION_VALID(partid)) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				    " fcram_partition disable:"
+				    " Invalid Partition %d \n",
+				    partid));
+		return (NPI_FFLP_FCRAM_PART_INVALID);
+	}
+	offset = FFLP_PART_OFFSET(partid, FFLP_FLW_PRT_SEL_REG);
+	REG_PIO_READ64(handle, offset, &sel.value);
+	sel.bits.ldw.ext = BIT_DISABLE; /* disable */
+	REG_PIO_WRITE64(handle, offset, sel.value);
+	return (NPI_SUCCESS);
+}
+
+/*
+ *  npi_fflp_cam_errorcheck_disable
+ *  Disables FCRAM and TCAM error checking
+ */
+npi_status_t
+npi_fflp_cfg_cam_errorcheck_disable(npi_handle_t handle)
+
+{
+
+	fflp_cfg_1_t fflp_cfg;
+	uint64_t offset;
+	offset = FFLP_CFG_1_REG;
+
+	REG_PIO_READ64(handle, offset, &fflp_cfg.value);
+
+	fflp_cfg.bits.ldw.errordis = BIT_ENABLE;
+	REG_PIO_WRITE64(handle, offset, fflp_cfg.value);
+
+	return (NPI_SUCCESS);
+
+}
+
+/*
+ *  npi_fflp_cam_errorcheck_enable
+ *  Enables FCRAM and TCAM error checking
+ */
+npi_status_t
+npi_fflp_cfg_cam_errorcheck_enable(npi_handle_t handle)
+
+{
+	fflp_cfg_1_t fflp_cfg;
+	uint64_t offset;
+	offset = FFLP_CFG_1_REG;
+
+	REG_PIO_READ64(handle, offset, &fflp_cfg.value);
+
+	fflp_cfg.bits.ldw.errordis = BIT_DISABLE;
+	REG_PIO_WRITE64(handle, offset, fflp_cfg.value);
+
+	return (NPI_SUCCESS);
+
+}
+
+/*
+ *  npi_fflp_cam_llcsnap_enable
+ *  Enables input parser llcsnap recognition
+ */
+npi_status_t
+npi_fflp_cfg_llcsnap_enable(npi_handle_t handle)
+
+{
+
+	fflp_cfg_1_t fflp_cfg;
+	uint64_t offset;
+	offset = FFLP_CFG_1_REG;
+
+	REG_PIO_READ64(handle, offset, &fflp_cfg.value);
+
+	fflp_cfg.bits.ldw.llcsnap = BIT_ENABLE;
+	REG_PIO_WRITE64(handle, offset, fflp_cfg.value);
+
+	return (NPI_SUCCESS);
+
+}
+
+/*
+ *  npi_fflp_cam_llcsnap_disable
+ *  Disables input parser llcsnap recognition
+ */
+npi_status_t
+npi_fflp_cfg_llcsnap_disable(npi_handle_t handle)
+
+{
+
+
+	fflp_cfg_1_t fflp_cfg;
+	uint64_t offset;
+	offset = FFLP_CFG_1_REG;
+
+	REG_PIO_READ64(handle, offset, &fflp_cfg.value);
+
+	fflp_cfg.bits.ldw.llcsnap = BIT_DISABLE;
+	REG_PIO_WRITE64(handle, offset, fflp_cfg.value);
+
+	return (NPI_SUCCESS);
+
+}
+
+/*
+ * npi_fflp_config_fcram_refresh
+ * Set FCRAM min and max refresh time.
+ *
+ * Input
+ *      handle			opaque handle interpreted by the underlying OS
+ *	min_time		Minimum Refresh time count
+ *	max_time		maximum Refresh Time count
+ *	sys_time		System Clock rate
+ *
+ *	The counters are 16 bit counters. The maximum refresh time is
+ *      3.9us/clock cycle. The minimum is 400ns/clock cycle.
+ *	Clock cycle is the FCRAM clock cycle?????
+ *	If the cycle is FCRAM clock cycle, then sys_time parameter
+ *      is not needed as there wont be configuration variation due to
+ *      system clock cycle.
+ *
+ * Return:
+ * NPI Success/Failure status code
+ */
+npi_status_t
+npi_fflp_cfg_fcram_refresh_time(npi_handle_t handle, uint32_t min_time,
+				    uint32_t max_time, uint32_t sys_time)
+
+{
+
+	uint64_t offset;
+	fcram_ref_tmr_t refresh_timer_reg;
+	uint16_t max, min;
+
+	offset = FFLP_FCRAM_REF_TMR_REG;
+/* need to figure out how to dervive the numbers */
+	max = max_time * sys_time;
+	min = min_time * sys_time;
+/* for now, just set with #def values */
+
+	max = FCRAM_REFRESH_DEFAULT_MAX_TIME;
+	min = FCRAM_REFRESH_DEFAULT_MIN_TIME;
+	REG_PIO_READ64(handle, offset, &refresh_timer_reg.value);
+	refresh_timer_reg.bits.ldw.min = min;
+	refresh_timer_reg.bits.ldw.max = max;
+	REG_PIO_WRITE64(handle, offset, refresh_timer_reg.value);
+	return (NPI_SUCCESS);
+}
+
+/*
+ *  npi_fflp_hash_lookup_err_report
+ *  Reports hash table (fcram) lookup errors
+ *
+ *  Input
+ *      handle			opaque handle interpreted by the underlying OS
+ *      err_stat		Pointer to return Error bits
+ *
+ *
+ * Return:
+ * NPI success/failure status code
+ */
+npi_status_t
+npi_fflp_fcram_get_lookup_err_log(npi_handle_t handle,
+				    hash_lookup_err_log_t *err_stat)
+
+{
+
+	hash_lookup_err_log1_t err_log1;
+	hash_lookup_err_log2_t err_log2;
+	uint64_t  err_log1_offset, err_log2_offset;
+	err_log1.value = 0;
+	err_log2.value = 0;
+
+	err_log1_offset = HASH_LKUP_ERR_LOG1_REG;
+	err_log2_offset = HASH_LKUP_ERR_LOG2_REG;
+
+	REG_PIO_READ64(handle, err_log1_offset, &err_log1.value);
+	REG_PIO_READ64(handle, err_log2_offset, &err_log2.value);
+
+	if (err_log1.value) {
+/* nonzero means there are some errors */
+		err_stat->lookup_err = BIT_ENABLE;
+		err_stat->syndrome = err_log2.bits.ldw.syndrome;
+		err_stat->subarea = err_log2.bits.ldw.subarea;
+		err_stat->h1 = err_log2.bits.ldw.h1;
+		err_stat->multi_bit = err_log1.bits.ldw.mult_bit;
+		err_stat->multi_lkup = err_log1.bits.ldw.mult_lk;
+		err_stat->ecc_err = err_log1.bits.ldw.ecc_err;
+		err_stat->uncor_err = err_log1.bits.ldw.cu;
+	} else {
+		err_stat->lookup_err = BIT_DISABLE;
+	}
+
+	return (NPI_SUCCESS);
+
+}
+
+/*
+ * npi_fflp_fcram_get_pio_err_log
+ * Reports hash table PIO read errors for the given partition.
+ * by default, it clears the error bit which was set by the HW.
+ *
+ * Input
+ *      handle:		opaque handle interpreted by the underlying OS
+ *	partid:		partition ID
+ *      err_stat	Pointer to return Error bits
+ *
+ * Return
+ *	NPI success/failure status code
+ */
+npi_status_t
+npi_fflp_fcram_get_pio_err_log(npi_handle_t handle, part_id_t partid,
+				    hash_pio_err_log_t *err_stat)
+{
+
+	hash_tbl_data_log_t err_log;
+	uint64_t offset;
+
+	ASSERT(FCRAM_PARTITION_VALID(partid));
+	if (!FCRAM_PARTITION_VALID(partid)) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+			" fcram_get_pio_err_log:"
+			" Invalid Partition %d \n",
+			partid));
+		return (NPI_FFLP_FCRAM_PART_INVALID);
+	}
+
+	offset = GET_HASHTBL_PART_OFFSET_NVIR(partid,
+			FFLP_HASH_TBL_DATA_LOG_REG);
+
+	REG_PIO_READ64(handle, offset, &err_log.value);
+
+	if (err_log.bits.ldw.pio_err == BIT_ENABLE) {
+/* nonzero means there are some errors */
+		err_stat->pio_err = BIT_ENABLE;
+		err_stat->syndrome = err_log.bits.ldw.syndrome;
+		err_stat->addr = err_log.bits.ldw.fcram_addr;
+		err_log.value = 0;
+		REG_PIO_WRITE64(handle, offset, err_log.value);
+	} else {
+		err_stat->pio_err = BIT_DISABLE;
+	}
+
+	return (NPI_SUCCESS);
+
+}
+
+/*
+ * npi_fflp_fcram_clr_pio_err_log
+ * Clears FCRAM PIO  error status for the partition.
+ * If there are TCAM errors as indicated by err bit set by HW,
+ *  then the SW will clear it by clearing the bit.
+ *
+ * Input
+ *      handle:		opaque handle interpreted by the underlying OS
+ *	partid:		partition ID
+ *
+ *
+ * Return
+ *	NPI success/failure status code
+ */
+npi_status_t
+npi_fflp_fcram_clr_pio_err_log(npi_handle_t handle, part_id_t partid)
+{
+	uint64_t offset;
+
+	hash_tbl_data_log_t err_log;
+
+	ASSERT(FCRAM_PARTITION_VALID(partid));
+	if (!FCRAM_PARTITION_VALID(partid)) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+			" fcram_clr_pio_err_log:"
+			" Invalid Partition %d \n",
+			partid));
+
+		return (NPI_FFLP_FCRAM_PART_INVALID);
+	}
+
+	offset = GET_HASHTBL_PART_OFFSET_NVIR(partid,
+			FFLP_HASH_TBL_DATA_LOG_REG);
+
+	err_log.value = 0;
+	REG_PIO_WRITE64(handle, offset, err_log.value);
+
+
+	return (NPI_SUCCESS);
+
+}
+
+/*
+ * npi_fflp_tcam_get_err_log
+ * Reports TCAM PIO read and lookup errors.
+ * If there are TCAM errors as indicated by err bit set by HW,
+ *  then the SW will clear it by clearing the bit.
+ *
+ * Input
+ *      handle:		opaque handle interpreted by the underlying OS
+ *	err_stat:	 structure to report various TCAM errors.
+ *                       will be updated if there are TCAM errors.
+ *
+ *
+ * Return
+ *	NPI_SUCCESS	Success
+ *
+ *
+ */
+npi_status_t
+npi_fflp_tcam_get_err_log(npi_handle_t handle, tcam_err_log_t *err_stat)
+{
+	tcam_err_t err_log;
+	uint64_t offset;
+
+	offset = FFLP_TCAM_ERR_REG;
+	err_log.value = 0;
+
+	REG_PIO_READ64(handle, offset, &err_log.value);
+
+	if (err_log.bits.ldw.err == BIT_ENABLE) {
+/* non-zero means err */
+		err_stat->tcam_err = BIT_ENABLE;
+		if (err_log.bits.ldw.p_ecc) {
+			err_stat->parity_err = 0;
+			err_stat->ecc_err = 1;
+		} else {
+			err_stat->parity_err = 1;
+			err_stat->ecc_err = 0;
+
+		}
+		err_stat->syndrome = err_log.bits.ldw.syndrome;
+		err_stat->location = err_log.bits.ldw.addr;
+
+
+		err_stat->multi_lkup = err_log.bits.ldw.mult;
+			/* now clear the error */
+		err_log.value = 0;
+		REG_PIO_WRITE64(handle, offset, err_log.value);
+
+	} else {
+		err_stat->tcam_err = 0;
+	}
+	return (NPI_SUCCESS);
+
+}
+
+/*
+ * npi_fflp_tcam_clr_err_log
+ * Clears TCAM PIO read and lookup error status.
+ * If there are TCAM errors as indicated by err bit set by HW,
+ *  then the SW will clear it by clearing the bit.
+ *
+ * Input
+ *         handle:	opaque handle interpreted by the underlying OS
+ *
+ *
+ * Return
+ *	NPI_SUCCESS	Success
+ *
+ *
+ */
+npi_status_t
+npi_fflp_tcam_clr_err_log(npi_handle_t handle)
+{
+	tcam_err_t err_log;
+	uint64_t offset;
+
+	offset = FFLP_TCAM_ERR_REG;
+	err_log.value = 0;
+	REG_PIO_WRITE64(handle, offset, err_log.value);
+
+	return (NPI_SUCCESS);
+
+}
+
+/*
+ * npi_fflp_fcram_err_synd_test
+ * Tests the FCRAM error detection logic.
+ * The error detection logic for the syndrome is tested.
+ * tst0->synd (8bits) are set to select the syndrome bits
+ * to be XOR'ed
+ *
+ * Input
+ *      handle:	opaque handle interpreted by the underlying OS
+ *	syndrome_bits:	 Syndrome bits to select bits to be xor'ed
+ *
+ *
+ * Return
+ *	NPI_SUCCESS	Success
+ *
+ *
+ */
+npi_status_t
+npi_fflp_fcram_err_synd_test(npi_handle_t handle, uint8_t syndrome_bits)
+{
+
+	uint64_t t0_offset;
+	fcram_err_tst0_t tst0;
+	t0_offset = FFLP_FCRAM_ERR_TST0_REG;
+
+	tst0.value = 0;
+	tst0.bits.ldw.syndrome_mask = syndrome_bits;
+
+	REG_PIO_WRITE64(handle, t0_offset, tst0.value);
+
+	return (NPI_SUCCESS);
+
+}
+
+/*
+ * npi_fflp_fcram_err_data_test
+ * Tests the FCRAM error detection logic.
+ * The error detection logic for the datapath is tested.
+ * bits [63:0] are set to select the data bits to be xor'ed
+ *
+ * Input
+ *      handle:	opaque handle interpreted by the underlying OS
+ *	data:	 data bits to select bits to be xor'ed
+ *
+ *
+ * Return
+ *	NPI_SUCCESS	Success
+ *
+ *
+ */
+npi_status_t
+npi_fflp_fcram_err_data_test(npi_handle_t handle, fcram_err_data_t *data)
+{
+
+	uint64_t t1_offset, t2_offset;
+	fcram_err_tst1_t tst1; /* for data bits [31:0] */
+	fcram_err_tst2_t tst2; /* for data bits [63:32] */
+
+	t1_offset = FFLP_FCRAM_ERR_TST1_REG;
+	t2_offset = FFLP_FCRAM_ERR_TST2_REG;
+	tst1.value = 0;
+	tst2.value = 0;
+	tst1.bits.ldw.dat = data->bits.ldw.dat;
+	tst2.bits.ldw.dat = data->bits.hdw.dat;
+
+	REG_PIO_WRITE64(handle, t1_offset, tst1.value);
+	REG_PIO_WRITE64(handle, t2_offset, tst2.value);
+
+	return (NPI_SUCCESS);
+
+}
+
+/*
+ * npi_fflp_cfg_enet_vlan_table_assoc
+ * associates port vlan id to rdc table.
+ *
+ * Input
+ *     handle			opaque handle interpreted by the underlying OS
+ *     mac_portn		port number
+ *     vlan_id			VLAN ID
+ *     rdc_table		RDC Table #
+ *     priority			priority
+ *
+ * Output
+ *
+ *	NPI success/failure status code
+ *
+ */
+npi_status_t
+npi_fflp_cfg_enet_vlan_table_assoc(npi_handle_t handle, uint8_t mac_portn,
+				    vlan_id_t vlan_id, uint8_t rdc_table,
+				    uint8_t priority)
+{
+
+	fflp_enet_vlan_tbl_t cfg;
+	uint64_t offset;
+	uint8_t vlan_parity[8] = {0, 1, 1, 2, 1, 2, 2, 3};
+	uint8_t parity_bit;
+
+	ASSERT(FFLP_VLAN_VALID(vlan_id));
+	if (!FFLP_VLAN_VALID(vlan_id)) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+			" fflp_cfg_enet_vlan_table:"
+			" Invalid vlan ID %d \n",
+			vlan_id));
+		return (NPI_FFLP_VLAN_INVALID);
+	}
+
+	ASSERT(FFLP_PORT_VALID(mac_portn));
+	if (!FFLP_PORT_VALID(mac_portn)) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+			" fflp_cfg_enet_vlan_table:"
+			" Invalid port num %d \n",
+			mac_portn));
+		return (NPI_FFLP_PORT_INVALID);
+	}
+
+	ASSERT(FFLP_RDC_TABLE_VALID(rdc_table));
+	if (!FFLP_RDC_TABLE_VALID(rdc_table)) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+			" fflp_cfg_enet_vlan_table:"
+			" Invalid RDC Table %d \n",
+			rdc_table));
+		return (NPI_FFLP_RDC_TABLE_INVALID);
+	}
+
+	offset = FFLP_VLAN_OFFSET(vlan_id, FFLP_ENET_VLAN_TBL_REG);
+	REG_PIO_READ64(handle, offset, &cfg.value);
+
+	switch (mac_portn) {
+		case 0:
+			cfg.bits.ldw.vlanrdctbln0 = rdc_table;
+			if (priority)
+				cfg.bits.ldw.vpr0 = BIT_ENABLE;
+			else
+				cfg.bits.ldw.vpr0 = BIT_DISABLE;
+				/* set the parity bits */
+			parity_bit = vlan_parity[cfg.bits.ldw.vlanrdctbln0] +
+				vlan_parity[cfg.bits.ldw.vlanrdctbln1] +
+				cfg.bits.ldw.vpr0 + cfg.bits.ldw.vpr1;
+			cfg.bits.ldw.parity0 = parity_bit & 0x1;
+			break;
+		case 1:
+			cfg.bits.ldw.vlanrdctbln1 = rdc_table;
+			if (priority)
+				cfg.bits.ldw.vpr1 = BIT_ENABLE;
+			else
+				cfg.bits.ldw.vpr1 = BIT_DISABLE;
+				/* set the parity bits */
+			parity_bit = vlan_parity[cfg.bits.ldw.vlanrdctbln0] +
+				vlan_parity[cfg.bits.ldw.vlanrdctbln1] +
+				cfg.bits.ldw.vpr0 + cfg.bits.ldw.vpr1;
+				cfg.bits.ldw.parity0 = parity_bit & 0x1;
+
+			break;
+		case 2:
+			cfg.bits.ldw.vlanrdctbln2 = rdc_table;
+			if (priority)
+				cfg.bits.ldw.vpr2 = BIT_ENABLE;
+			else
+				cfg.bits.ldw.vpr2 = BIT_DISABLE;
+				/* set the parity bits */
+			parity_bit = vlan_parity[cfg.bits.ldw.vlanrdctbln2] +
+				vlan_parity[cfg.bits.ldw.vlanrdctbln3] +
+				cfg.bits.ldw.vpr2 + cfg.bits.ldw.vpr3;
+			cfg.bits.ldw.parity1 = parity_bit & 0x1;
+
+			break;
+		case 3:
+			cfg.bits.ldw.vlanrdctbln3 = rdc_table;
+			if (priority)
+				cfg.bits.ldw.vpr3 = BIT_ENABLE;
+			else
+				cfg.bits.ldw.vpr3 = BIT_DISABLE;
+				/* set the parity bits */
+			parity_bit = vlan_parity[cfg.bits.ldw.vlanrdctbln2] +
+				vlan_parity[cfg.bits.ldw.vlanrdctbln3] +
+				cfg.bits.ldw.vpr2 + cfg.bits.ldw.vpr3;
+			cfg.bits.ldw.parity1 = parity_bit & 0x1;
+			break;
+		default:
+			return (NPI_FFLP_SW_PARAM_ERROR);
+	}
+
+	REG_PIO_WRITE64(handle, offset, cfg.value);
+	return (NPI_SUCCESS);
+}
+
+/*
+ * npi_fflp_cfg_enet_vlan_table_set_pri
+ * sets the  vlan based classification priority in respect to L2DA
+ * classification.
+ *
+ * Input
+ *     handle		opaque handle interpreted by the underlying OS
+ *     mac_portn	port number
+ *     vlan_id		VLAN ID
+ *     priority 	priority
+ *			1: vlan classification has higher priority
+ *			0: l2da classification has higher priority
+ *
+ * Output
+ *
+ *	NPI success/failure status code
+ */
+npi_status_t
+npi_fflp_cfg_enet_vlan_table_set_pri(npi_handle_t handle, uint8_t mac_portn,
+				    vlan_id_t vlan_id, uint8_t priority)
+{
+
+	fflp_enet_vlan_tbl_t cfg;
+	uint64_t offset;
+	uint64_t old_value;
+
+	ASSERT(FFLP_VLAN_VALID(vlan_id));
+	if (!FFLP_VLAN_VALID(vlan_id)) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+			" enet_vlan_table set pri:"
+			" Invalid vlan ID %d \n",
+			vlan_id));
+		return (NPI_FFLP_VLAN_INVALID);
+	}
+
+	ASSERT(FFLP_PORT_VALID(mac_portn));
+	if (!FFLP_PORT_VALID(mac_portn)) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+			" enet_vlan_table set pri:"
+			" Invalid port num %d \n",
+			mac_portn));
+		return (NPI_FFLP_PORT_INVALID);
+	}
+
+
+	offset = FFLP_ENET_VLAN_TBL_REG + (vlan_id  << 3);
+	REG_PIO_READ64(handle, offset, &cfg.value);
+	old_value = cfg.value;
+	switch (mac_portn) {
+		case 0:
+			if (priority)
+				cfg.bits.ldw.vpr0 = BIT_ENABLE;
+			else
+				cfg.bits.ldw.vpr0 = BIT_DISABLE;
+			break;
+		case 1:
+			if (priority)
+				cfg.bits.ldw.vpr1 = BIT_ENABLE;
+			else
+				cfg.bits.ldw.vpr1 = BIT_DISABLE;
+			break;
+		case 2:
+			if (priority)
+				cfg.bits.ldw.vpr2 = BIT_ENABLE;
+			else
+				cfg.bits.ldw.vpr2 = BIT_DISABLE;
+			break;
+		case 3:
+			if (priority)
+				cfg.bits.ldw.vpr3 = BIT_ENABLE;
+			else
+				cfg.bits.ldw.vpr3 = BIT_DISABLE;
+			break;
+		default:
+			return (NPI_FFLP_SW_PARAM_ERROR);
+	}
+	if (old_value != cfg.value) {
+		if (mac_portn > 1)
+			cfg.bits.ldw.parity1++;
+		else
+			cfg.bits.ldw.parity0++;
+
+		REG_PIO_WRITE64(handle, offset, cfg.value);
+	}
+	return (NPI_SUCCESS);
+}
+
+/*
+ * npi_fflp_cfg_vlan_table_clear
+ * Clears the vlan RDC table
+ *
+ * Input
+ *     handle		opaque handle interpreted by the underlying OS
+ *     vlan_id		VLAN ID
+ *
+ * Output
+ *
+ *	NPI success/failure status code
+ *
+ */
+npi_status_t
+npi_fflp_cfg_vlan_table_clear(npi_handle_t handle, vlan_id_t vlan_id)
+{
+
+	uint64_t offset;
+	uint64_t clear = 0ULL;
+	vlan_id_t start_vlan = 0;
+
+	if ((vlan_id < start_vlan) || (vlan_id >= NXGE_MAX_VLANS)) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+			" enet_vlan_table clear:"
+			" Invalid vlan ID %d \n",
+			vlan_id));
+		return (NPI_FFLP_VLAN_INVALID);
+	}
+
+
+	offset = FFLP_VLAN_OFFSET(vlan_id, FFLP_ENET_VLAN_TBL_REG);
+
+	REG_PIO_WRITE64(handle, offset, clear);
+	return (NPI_SUCCESS);
+}
+
+/*
+ * npi_fflp_vlan_tbl_get_err_log
+ * Reports VLAN Table  errors.
+ * If there are VLAN Table errors as indicated by err bit set by HW,
+ *  then the SW will clear it by clearing the bit.
+ *
+ * Input
+ *      handle:		opaque handle interpreted by the underlying OS
+ *	err_stat:	 structure to report various VLAN table errors.
+ *                       will be updated if there are errors.
+ *
+ *
+ * Return
+ *	NPI_SUCCESS	Success
+ *
+ *
+ */
+npi_status_t
+npi_fflp_vlan_tbl_get_err_log(npi_handle_t handle, vlan_tbl_err_log_t *err_stat)
+{
+	vlan_par_err_t err_log;
+	uint64_t offset;
+
+
+	offset = FFLP_VLAN_PAR_ERR_REG;
+	err_log.value = 0;
+
+	REG_PIO_READ64(handle, offset, &err_log.value);
+
+	if (err_log.bits.ldw.err == BIT_ENABLE) {
+/* non-zero means err */
+		err_stat->err = BIT_ENABLE;
+		err_stat->multi = err_log.bits.ldw.m_err;
+		err_stat->addr = err_log.bits.ldw.addr;
+		err_stat->data = err_log.bits.ldw.data;
+/* now clear the error */
+		err_log.value = 0;
+		REG_PIO_WRITE64(handle, offset, err_log.value);
+
+	} else {
+		err_stat->err = 0;
+	}
+
+	return (NPI_SUCCESS);
+}
+
+/*
+ * npi_fflp_vlan_tbl_clr_err_log
+ * Clears VLAN Table PIO  error status.
+ * If there are VLAN Table errors as indicated by err bit set by HW,
+ *  then the SW will clear it by clearing the bit.
+ *
+ * Input
+ *         handle:	opaque handle interpreted by the underlying OS
+ *
+ *
+ * Return
+ *	NPI_SUCCESS	Success
+ *
+ *
+ */
+npi_status_t
+npi_fflp_vlan_tbl_clr_err_log(npi_handle_t handle)
+{
+	vlan_par_err_t err_log;
+	uint64_t offset;
+
+	offset = FFLP_VLAN_PAR_ERR_REG;
+	err_log.value = 0;
+
+	REG_PIO_WRITE64(handle, offset, err_log.value);
+
+	return (NPI_SUCCESS);
+}
+
+/*
+ * npi_fflp_cfg_enet_usr_cls_set()
+ * Configures a user configurable ethernet class
+ *
+ * Input
+ *      handle:		opaque handle interpreted by the underlying OS
+ *      class:       Ethernet Class  class
+ *		     (TCAM_CLASS_ETYPE or  TCAM_CLASS_ETYPE_2)
+ *      enet_type:   16 bit Ethernet Type value, corresponding ethernet bytes
+ *                        [13:14] in the frame.
+ *
+ *  by default, the class will be disabled until explicitly enabled.
+ *
+ * Return
+ * NPI success/failure status code
+ */
+npi_status_t
+npi_fflp_cfg_enet_usr_cls_set(npi_handle_t handle,
+			    tcam_class_t class, uint16_t enet_type)
+{
+	uint64_t offset;
+	tcam_class_prg_ether_t cls_cfg;
+	cls_cfg.value = 0x0;
+
+/* check if etype is valid */
+	ASSERT(TCAM_L2_USR_CLASS_VALID(class));
+	if (!TCAM_L2_USR_CLASS_VALID(class)) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+			" npi_fflp_cfg_enet_usr_cls_set:"
+			" Invalid class %d \n",
+			class));
+		return (NPI_FFLP_TCAM_CLASS_INVALID);
+	}
+	offset = GET_TCAM_CLASS_OFFSET(class);
+
+/*
+ * etype check code
+ *
+ * if (check_fail)
+ *  return (NPI_FAILURE | NPI_SW_ERROR);
+ */
+
+	cls_cfg.bits.ldw.etype = enet_type;
+	cls_cfg.bits.ldw.valid = BIT_DISABLE;
+	REG_PIO_WRITE64(handle, offset, cls_cfg.value);
+	return (NPI_SUCCESS);
+}
+
+/*
+ * npi_fflp_cfg_enet_usr_cls_enable()
+ * Enable previously configured TCAM user configurable Ethernet classes.
+ *
+ * Input
+ *      handle:	opaque handle interpreted by the underlying OS
+ *      class:       Ethernet Class  class
+ *		     (TCAM_CLASS_ETYPE or  TCAM_CLASS_ETYPE_2)
+ *
+ * Return
+ * NPI success/failure status code
+ */
+npi_status_t
+npi_fflp_cfg_enet_usr_cls_enable(npi_handle_t handle, tcam_class_t class)
+{
+	uint64_t offset;
+	tcam_class_prg_ether_t cls_cfg;
+
+	ASSERT(TCAM_L2_USR_CLASS_VALID(class));
+	if (!TCAM_L2_USR_CLASS_VALID(class)) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+			" npi_fflp_cfg_enet_usr_cls_enable:"
+			" Invalid class %d \n",
+			class));
+		return (NPI_FFLP_TCAM_CLASS_INVALID);
+	}
+
+	offset = GET_TCAM_CLASS_OFFSET(class);
+
+	REG_PIO_READ64(handle, offset, &cls_cfg.value);
+	cls_cfg.bits.ldw.valid = BIT_ENABLE;
+	REG_PIO_WRITE64(handle, offset, cls_cfg.value);
+	return (NPI_SUCCESS);
+}
+
+/*
+ * npi_fflp_cfg_enet_usr_cls_disable()
+ * Disables previously configured TCAM user configurable Ethernet classes.
+ *
+ * Input
+ *      handle:	opaque handle interpreted by the underlying OS
+ *      class:       Ethernet Class  class
+ *		     (TCAM_CLASS_ETYPE or  TCAM_CLASS_ETYPE_2)
+ *
+ * Return
+ * NPI success/failure status code
+ */
+npi_status_t
+npi_fflp_cfg_enet_usr_cls_disable(npi_handle_t handle, tcam_class_t class)
+{
+	uint64_t offset;
+	tcam_class_prg_ether_t cls_cfg;
+
+	ASSERT(TCAM_L2_USR_CLASS_VALID(class));
+	if (!TCAM_L2_USR_CLASS_VALID(class)) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+			" npi_fflp_cfg_enet_usr_cls_disable:"
+			" Invalid class %d \n",
+			class));
+		return (NPI_FFLP_TCAM_CLASS_INVALID);
+	}
+
+	offset = GET_TCAM_CLASS_OFFSET(class);
+
+	REG_PIO_READ64(handle, offset, &cls_cfg.value);
+	cls_cfg.bits.ldw.valid = BIT_DISABLE;
+
+	REG_PIO_WRITE64(handle, offset, cls_cfg.value);
+	return (NPI_SUCCESS);
+}
+
+/*
+ * npi_fflp_cfg_ip_usr_cls_set()
+ * Configures the TCAM user configurable IP classes.
+ *
+ * Input
+ *      handle:		opaque handle interpreted by the underlying OS
+ *      class:       IP Class  class
+ *		     (TCAM_CLASS_IP_USER_4 <= class <= TCAM_CLASS_IP_USER_7)
+ *      tos:         IP TOS bits
+ *      tos_mask:    IP TOS bits mask. bits with mask bits set will be used
+ *      proto:       IP Proto
+ *      ver:         IP Version
+ * by default, will the class is disabled until explicitly enabled
+ *
+ * Return
+ * NPI success/failure status code
+ */
+npi_status_t
+npi_fflp_cfg_ip_usr_cls_set(npi_handle_t handle, tcam_class_t class,
+			    uint8_t tos, uint8_t tos_mask,
+			    uint8_t proto, uint8_t ver)
+{
+	uint64_t offset;
+	tcam_class_prg_ip_t ip_cls_cfg;
+
+	ASSERT(TCAM_L3_USR_CLASS_VALID(class));
+	if (!TCAM_L3_USR_CLASS_VALID(class)) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+			" npi_fflp_cfg_ip_usr_cls_set:"
+			" Invalid class %d \n",
+			class));
+		return (NPI_FFLP_TCAM_CLASS_INVALID);
+	}
+
+	offset = GET_TCAM_CLASS_OFFSET(class);
+
+	ip_cls_cfg.bits.ldw.pid = proto;
+	ip_cls_cfg.bits.ldw.ipver = ver;
+	ip_cls_cfg.bits.ldw.tos = tos;
+	ip_cls_cfg.bits.ldw.tosmask = tos_mask;
+	ip_cls_cfg.bits.ldw.valid = 0;
+	REG_PIO_WRITE64(handle, offset, ip_cls_cfg.value);
+	return (NPI_SUCCESS);
+
+}
+
+/*
+ * npi_fflp_cfg_ip_usr_cls_enable()
+ * Enable previously configured TCAM user configurable IP classes.
+ *
+ * Input
+ *      handle:	opaque handle interpreted by the underlying OS
+ *      class:       IP Class  class
+ *		     (TCAM_CLASS_IP_USER_4 <= class <= TCAM_CLASS_IP_USER_7)
+ *
+ * Return
+ * NPI success/failure status code
+ */
+npi_status_t
+npi_fflp_cfg_ip_usr_cls_enable(npi_handle_t handle, tcam_class_t class)
+{
+	uint64_t offset;
+	tcam_class_prg_ip_t ip_cls_cfg;
+
+	ASSERT(TCAM_L3_USR_CLASS_VALID(class));
+	if (!TCAM_L3_USR_CLASS_VALID(class)) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+			" npi_fflp_cfg_ip_usr_cls_enable:"
+			" Invalid class %d \n",
+			class));
+		return (NPI_FFLP_TCAM_CLASS_INVALID);
+	}
+
+	offset = GET_TCAM_CLASS_OFFSET(class);
+	REG_PIO_READ64(handle, offset, &ip_cls_cfg.value);
+	ip_cls_cfg.bits.ldw.valid = 1;
+
+	REG_PIO_WRITE64(handle, offset, ip_cls_cfg.value);
+	return (NPI_SUCCESS);
+
+}
+
+/*
+ * npi_fflp_cfg_ip_usr_cls_disable()
+ * Disables previously configured TCAM user configurable IP classes.
+ *
+ * Input
+ *      handle:	opaque handle interpreted by the underlying OS
+ *      class:       IP Class  class
+ *		     (TCAM_CLASS_IP_USER_4 <= class <= TCAM_CLASS_IP_USER_7)
+ *
+ * Return
+ * NPI success/failure status code
+ */
+npi_status_t
+npi_fflp_cfg_ip_usr_cls_disable(npi_handle_t handle, tcam_class_t class)
+{
+	uint64_t offset;
+	tcam_class_prg_ip_t ip_cls_cfg;
+
+	ASSERT(TCAM_L3_USR_CLASS_VALID(class));
+	if (!TCAM_L3_USR_CLASS_VALID(class)) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+			" npi_fflp_cfg_ip_usr_cls_disable:"
+			" Invalid class %d \n",
+			class));
+		return (NPI_FFLP_TCAM_CLASS_INVALID);
+	}
+
+	offset = GET_TCAM_CLASS_OFFSET(class);
+
+	REG_PIO_READ64(handle, offset, &ip_cls_cfg.value);
+	ip_cls_cfg.bits.ldw.valid = 0;
+
+	REG_PIO_WRITE64(handle, offset, ip_cls_cfg.value);
+	return (NPI_SUCCESS);
+
+}
+
+/*
+ * npi_fflp_cfg_ip_cls_tcam_key ()
+ *
+ * Configures the TCAM key generation for the IP classes
+ *
+ * Input
+ *      handle:	opaque handle interpreted by the underlying OS
+ *      l3_class:        IP class to configure key generation
+ *      cfg:             Configuration bits:
+ *                   discard:      Discard all frames of this class
+ *                   use_ip_saddr: use ip src address (for ipv6)
+ *                   use_ip_daddr: use ip dest address (for ipv6)
+ *                   lookup_enable: Enable Lookup
+ *
+ *
+ * Return
+ * NPI success/failure status code
+ */
+npi_status_t
+npi_fflp_cfg_ip_cls_tcam_key(npi_handle_t handle,
+			    tcam_class_t l3_class, tcam_key_cfg_t *cfg)
+{
+	uint64_t offset;
+	tcam_class_key_ip_t tcam_cls_cfg;
+
+	ASSERT(TCAM_L3_CLASS_VALID(l3_class));
+	if (!(TCAM_L3_CLASS_VALID(l3_class))) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+			" npi_fflp_cfg_ip_cls_tcam_key:"
+			" Invalid class %d \n",
+			l3_class));
+		return (NPI_FFLP_TCAM_CLASS_INVALID);
+	}
+
+	if ((cfg->use_ip_daddr) &&
+		(cfg->use_ip_saddr == cfg->use_ip_daddr)) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+			    " npi_fflp_cfg_ip_cls_tcam_key:"
+			    " Invalid configuration %x for class %d \n",
+			    *cfg, l3_class));
+		return (NPI_FFLP_SW_PARAM_ERROR);
+	}
+
+
+	offset = GET_TCAM_KEY_OFFSET(l3_class);
+	tcam_cls_cfg.value = 0;
+
+	if (cfg->discard) {
+		tcam_cls_cfg.bits.ldw.discard = 1;
+	}
+
+	if (cfg->use_ip_saddr) {
+		tcam_cls_cfg.bits.ldw.ipaddr = 1;
+	}
+
+	if (cfg->use_ip_daddr) {
+		tcam_cls_cfg.bits.ldw.ipaddr = 0;
+	}
+
+	if (cfg->lookup_enable) {
+		tcam_cls_cfg.bits.ldw.tsel = 1;
+	}
+
+	REG_PIO_WRITE64(handle, offset, tcam_cls_cfg.value);
+	return (NPI_SUCCESS);
+}
+
+/*
+ * npi_fflp_cfg_ip_cls_flow_key ()
+ *
+ * Configures the flow key generation for the IP classes
+ * Flow key is used to generate the H1 hash function value
+ * The fields used for the generation are configured using this
+ * NPI function.
+ *
+ * Input
+ *      handle:	opaque handle interpreted by the underlying OS
+ *      l3_class:        IP class to configure flow key generation
+ *      cfg:             Configuration bits:
+ *                   use_proto:     Use IP proto field
+ *                   use_dport:     use l4 destination port
+ *                   use_sport:     use l4 source port
+ *                   ip_opts_exist: IP Options Present
+ *                   use_daddr:     use ip dest address
+ *                   use_saddr:     use ip source address
+ *                   use_vlan:      use VLAN ID
+ *                   use_l2da:      use L2 Dest MAC Address
+ *                   use_portnum:   use L2 virtual port number
+ *
+ *
+ * Return
+ * NPI success/failure status code
+ */
+npi_status_t
+npi_fflp_cfg_ip_cls_flow_key(npi_handle_t handle, tcam_class_t l3_class,
+							    flow_key_cfg_t *cfg)
+{
+	uint64_t offset;
+	flow_class_key_ip_t flow_cfg_reg;
+
+	ASSERT(TCAM_L3_CLASS_VALID(l3_class));
+	if (!(TCAM_L3_CLASS_VALID(l3_class))) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+			" npi_fflp_cfg_ip_cls_flow_key:"
+			" Invalid class %d \n",
+			l3_class));
+		return (NPI_FFLP_TCAM_CLASS_INVALID);
+	}
+
+
+	offset = GET_FLOW_KEY_OFFSET(l3_class);
+	flow_cfg_reg.value = 0; /* default */
+
+	if (cfg->use_proto) {
+		flow_cfg_reg.bits.ldw.proto = 1;
+	}
+
+	if (cfg->use_dport) {
+		flow_cfg_reg.bits.ldw.l4_1 = 2;
+		if (cfg->ip_opts_exist)
+			flow_cfg_reg.bits.ldw.l4_1 = 3;
+	}
+
+	if (cfg->use_sport) {
+		flow_cfg_reg.bits.ldw.l4_0 = 2;
+		if (cfg->ip_opts_exist)
+			flow_cfg_reg.bits.ldw.l4_0 = 3;
+	}
+
+	if (cfg->use_daddr) {
+		flow_cfg_reg.bits.ldw.ipda = BIT_ENABLE;
+	}
+
+	if (cfg->use_saddr) {
+		flow_cfg_reg.bits.ldw.ipsa = BIT_ENABLE;
+	}
+
+	if (cfg->use_vlan) {
+		flow_cfg_reg.bits.ldw.vlan = BIT_ENABLE;
+	}
+
+	if (cfg->use_l2da) {
+		flow_cfg_reg.bits.ldw.l2da = BIT_ENABLE;
+	}
+
+	if (cfg->use_portnum) {
+		flow_cfg_reg.bits.ldw.port = BIT_ENABLE;
+	}
+
+	REG_PIO_WRITE64(handle, offset, flow_cfg_reg.value);
+	return (NPI_SUCCESS);
+
+}
+
+npi_status_t
+npi_fflp_cfg_ip_cls_flow_key_get(npi_handle_t handle,
+				    tcam_class_t l3_class,
+				    flow_key_cfg_t *cfg)
+{
+	uint64_t offset;
+	flow_class_key_ip_t flow_cfg_reg;
+
+	ASSERT(TCAM_L3_CLASS_VALID(l3_class));
+	if (!(TCAM_L3_CLASS_VALID(l3_class))) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				    " npi_fflp_cfg_ip_cls_flow_key:"
+				    " Invalid class %d \n",
+				    l3_class));
+		return (NPI_FFLP_TCAM_CLASS_INVALID);
+	}
+
+	offset = GET_FLOW_KEY_OFFSET(l3_class);
+
+	cfg->use_proto = 0;
+	cfg->use_dport = 0;
+	cfg->use_sport = 0;
+	cfg->ip_opts_exist = 0;
+	cfg->use_daddr = 0;
+	cfg->use_saddr = 0;
+	cfg->use_vlan = 0;
+	cfg->use_l2da = 0;
+	cfg->use_portnum  = 0;
+
+	REG_PIO_READ64(handle, offset, &flow_cfg_reg.value);
+
+	if (flow_cfg_reg.bits.ldw.proto) {
+		cfg->use_proto = 1;
+	}
+
+	if (flow_cfg_reg.bits.ldw.l4_1 == 2) {
+		cfg->use_dport = 1;
+	}
+
+	if (flow_cfg_reg.bits.ldw.l4_1 == 3) {
+		cfg->use_dport = 1;
+		cfg->ip_opts_exist = 1;
+	}
+
+	if (flow_cfg_reg.bits.ldw.l4_0 == 2) {
+		cfg->use_sport = 1;
+	}
+
+	if (flow_cfg_reg.bits.ldw.l4_0 == 3) {
+		cfg->use_sport = 1;
+		cfg->ip_opts_exist = 1;
+	}
+
+	if (flow_cfg_reg.bits.ldw.ipda) {
+		cfg->use_daddr = 1;
+	}
+
+	if (flow_cfg_reg.bits.ldw.ipsa) {
+		cfg->use_saddr = 1;
+	}
+
+	if (flow_cfg_reg.bits.ldw.vlan) {
+		cfg->use_vlan = 1;
+	}
+
+	if (flow_cfg_reg.bits.ldw.l2da) {
+		cfg->use_l2da = 1;
+	}
+
+	if (flow_cfg_reg.bits.ldw.port) {
+		cfg->use_portnum = 1;
+	}
+
+	NPI_DEBUG_MSG((handle.function, NPI_FFLP_CTL,
+			    " npi_fflp_cfg_ip_cls_flow_get %llx \n",
+			    flow_cfg_reg.value));
+
+	return (NPI_SUCCESS);
+
+}
+
+npi_status_t
+npi_fflp_cfg_ip_cls_tcam_key_get(npi_handle_t handle,
+			    tcam_class_t l3_class, tcam_key_cfg_t *cfg)
+{
+	uint64_t offset;
+	tcam_class_key_ip_t tcam_cls_cfg;
+
+	ASSERT(TCAM_L3_CLASS_VALID(l3_class));
+	if (!(TCAM_L3_CLASS_VALID(l3_class))) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				    " npi_fflp_cfg_ip_cls_tcam_key_get:"
+				    " Invalid class %d \n",
+				    l3_class));
+		return (NPI_FFLP_TCAM_CLASS_INVALID);
+	}
+
+
+	offset = GET_TCAM_KEY_OFFSET(l3_class);
+
+	REG_PIO_READ64(handle, offset, &tcam_cls_cfg.value);
+
+	cfg->discard = 0;
+	cfg->use_ip_saddr = 0;
+	cfg->use_ip_daddr = 1;
+	cfg->lookup_enable = 0;
+
+	if (tcam_cls_cfg.bits.ldw.discard)
+			cfg->discard = 1;
+
+	if (tcam_cls_cfg.bits.ldw.ipaddr) {
+		cfg->use_ip_saddr = 1;
+		cfg->use_ip_daddr = 0;
+	}
+
+	if (tcam_cls_cfg.bits.ldw.tsel) {
+		cfg->lookup_enable	= 1;
+	}
+
+	NPI_DEBUG_MSG((handle.function, NPI_CTL,
+				    " npi_fflp_cfg_ip_cls_tcam_key_get %llx \n",
+				    tcam_cls_cfg.value));
+	return (NPI_SUCCESS);
+}
+
+/*
+ * npi_fflp_cfg_fcram_access ()
+ *
+ * Sets the ratio between the FCRAM pio and lookup access
+ * Input:
+ * handle:	opaque handle interpreted by the underlying OS
+ * access_ratio: 0  Lookup has the highest priority
+ *		 15 PIO has maximum possible priority
+ *
+ * Return
+ * NPI success/failure status code
+ */
+npi_status_t
+npi_fflp_cfg_fcram_access(npi_handle_t handle, uint8_t access_ratio)
+{
+
+	fflp_cfg_1_t fflp_cfg;
+	uint64_t offset;
+	offset = FFLP_CFG_1_REG;
+
+	if (access_ratio > 0xf) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+			" npi_fflp_cfg_fcram_access:"
+			" Invalid access ratio %d \n",
+			access_ratio));
+		return (NPI_FFLP_ERROR | NPI_FFLP_SW_PARAM_ERROR);
+	}
+
+	REG_PIO_READ64(handle, offset, &fflp_cfg.value);
+	fflp_cfg.bits.ldw.fflpinitdone = 0;
+	fflp_cfg.bits.ldw.fcramratio = access_ratio;
+	REG_PIO_WRITE64(handle, offset, fflp_cfg.value);
+	REG_PIO_READ64(handle, offset, &fflp_cfg.value);
+	fflp_cfg.bits.ldw.fflpinitdone = 1;
+	REG_PIO_WRITE64(handle, offset, fflp_cfg.value);
+	return (NPI_SUCCESS);
+
+}
+
+/*
+ * npi_fflp_cfg_tcam_access ()
+ *
+ * Sets the ratio between the TCAM pio and lookup access
+ * Input:
+ * handle:	opaque handle interpreted by the underlying OS
+ * access_ratio: 0  Lookup has the highest priority
+ *		 15 PIO has maximum possible priority
+ * Return
+ * NPI success/failure status code
+ */
+npi_status_t
+npi_fflp_cfg_tcam_access(npi_handle_t handle, uint8_t access_ratio)
+{
+	fflp_cfg_1_t fflp_cfg;
+	uint64_t offset;
+	offset = FFLP_CFG_1_REG;
+
+	if (access_ratio > 0xf) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+			" npi_fflp_cfg_tcram_access:"
+			" Invalid access ratio %d \n",
+			access_ratio));
+		return (NPI_FFLP_ERROR | NPI_FFLP_SW_PARAM_ERROR);
+	}
+
+	REG_PIO_READ64(handle, offset, &fflp_cfg.value);
+	fflp_cfg.bits.ldw.fflpinitdone = 0;
+	fflp_cfg.bits.ldw.camratio = access_ratio;
+
+	/* since the cam latency is fixed, we might set it here */
+	fflp_cfg.bits.ldw.camlatency = TCAM_DEFAULT_LATENCY;
+	REG_PIO_WRITE64(handle, offset, fflp_cfg.value);
+	REG_PIO_READ64(handle, offset, &fflp_cfg.value);
+	fflp_cfg.bits.ldw.fflpinitdone = 1;
+	REG_PIO_WRITE64(handle, offset, fflp_cfg.value);
+
+	return (NPI_SUCCESS);
+}
+
+/*
+ * npi_fflp_cfg_hash_h1poly()
+ * Initializes the H1 hash generation logic.
+ *
+ * Input
+ *      handle:	opaque handle interpreted by the underlying OS
+ *      init_value:       The initial value (seed)
+ *
+ * Return
+ * NPI success/failure status code
+ */
+npi_status_t
+npi_fflp_cfg_hash_h1poly(npi_handle_t handle, uint32_t init_value)
+{
+
+
+	hash_h1poly_t h1_cfg;
+	uint64_t offset;
+	offset = FFLP_H1POLY_REG;
+
+	h1_cfg.value = 0;
+	h1_cfg.bits.ldw.init_value = init_value;
+
+	REG_PIO_WRITE64(handle, offset, h1_cfg.value);
+	return (NPI_SUCCESS);
+}
+
+/*
+ * npi_fflp_cfg_hash_h2poly()
+ * Initializes the H2 hash generation logic.
+ *
+ * Input
+ *      handle:	opaque handle interpreted by the underlying OS
+ *      init_value:       The initial value (seed)
+ *
+ * Return
+ * NPI_SUCCESS
+ *
+ */
+npi_status_t
+npi_fflp_cfg_hash_h2poly(npi_handle_t handle, uint16_t init_value)
+{
+
+
+	hash_h2poly_t h2_cfg;
+	uint64_t offset;
+	offset = FFLP_H2POLY_REG;
+
+	h2_cfg.value = 0;
+	h2_cfg.bits.ldw.init_value = init_value;
+
+	REG_PIO_WRITE64(handle, offset, h2_cfg.value);
+	return (NPI_SUCCESS);
+
+
+}
+
+/*
+ *  npi_fflp_cfg_reset
+ *  Initializes the FCRAM reset sequence.
+ *
+ *  Input
+ *      handle:		opaque handle interpreted by the underlying OS
+ *	strength:		FCRAM Drive strength
+ *				   strong, weak or normal
+ *				   HW recommended value:
+ *	qs:			FCRAM QS mode selection
+ *				   qs mode or free running
+ *				   HW recommended value is:
+ *
+ * Return:
+ * NPI success/failure status code
+ */
+
+npi_status_t
+npi_fflp_cfg_fcram_reset(npi_handle_t handle,
+	fflp_fcram_output_drive_t strength, fflp_fcram_qs_t qs)
+{
+	fflp_cfg_1_t fflp_cfg;
+	uint64_t offset;
+	offset = FFLP_CFG_1_REG;
+
+	/* These bits have to be configured before FCRAM reset is issued */
+	fflp_cfg.value = 0;
+	fflp_cfg.bits.ldw.pio_fio_rst = 1;
+	REG_PIO_WRITE64(handle, offset, fflp_cfg.value);
+
+	NXGE_DELAY(5); /* TODO: What is the correct delay? */
+
+	fflp_cfg.bits.ldw.pio_fio_rst = 0;
+	REG_PIO_WRITE64(handle, offset, fflp_cfg.value);
+	fflp_cfg.bits.ldw.fcramqs = qs;
+	fflp_cfg.bits.ldw.fcramoutdr = strength;
+	fflp_cfg.bits.ldw.fflpinitdone = 1;
+	REG_PIO_WRITE64(handle, offset, fflp_cfg.value);
+
+	return (NPI_SUCCESS);
+}
+
+npi_status_t
+npi_fflp_cfg_init_done(npi_handle_t handle)
+
+{
+
+	fflp_cfg_1_t fflp_cfg;
+	uint64_t offset;
+	offset = FFLP_CFG_1_REG;
+
+	REG_PIO_READ64(handle, offset, &fflp_cfg.value);
+	fflp_cfg.bits.ldw.fflpinitdone = 1;
+	REG_PIO_WRITE64(handle, offset, fflp_cfg.value);
+	return (NPI_SUCCESS);
+
+}
+
+npi_status_t
+npi_fflp_cfg_init_start(npi_handle_t handle)
+
+{
+
+	fflp_cfg_1_t fflp_cfg;
+	uint64_t offset;
+	offset = FFLP_CFG_1_REG;
+
+	REG_PIO_READ64(handle, offset, &fflp_cfg.value);
+	fflp_cfg.bits.ldw.fflpinitdone = 0;
+	REG_PIO_WRITE64(handle, offset, fflp_cfg.value);
+	return (NPI_SUCCESS);
+
+}
+
+/*
+ * Enables the TCAM search function.
+ *
+ */
+npi_status_t
+npi_fflp_cfg_tcam_enable(npi_handle_t handle)
+
+{
+
+	fflp_cfg_1_t fflp_cfg;
+	uint64_t offset;
+	offset = FFLP_CFG_1_REG;
+	REG_PIO_READ64(handle, offset, &fflp_cfg.value);
+	fflp_cfg.bits.ldw.tcam_disable = 0;
+	REG_PIO_WRITE64(handle, offset, fflp_cfg.value);
+	return (NPI_SUCCESS);
+
+}
+
+/*
+ * Disables the TCAM search function.
+ * While the TCAM is in disabled state, all TCAM matches would return NO_MATCH
+ *
+ */
+npi_status_t
+npi_fflp_cfg_tcam_disable(npi_handle_t handle)
+
+{
+
+	fflp_cfg_1_t fflp_cfg;
+	uint64_t offset;
+	offset = FFLP_CFG_1_REG;
+	REG_PIO_READ64(handle, offset, &fflp_cfg.value);
+	fflp_cfg.bits.ldw.tcam_disable = 1;
+	REG_PIO_WRITE64(handle, offset, fflp_cfg.value);
+	return (NPI_SUCCESS);
+
+}
+
+/*
+ * npi_rxdma_event_mask_config():
+ *	This function is called to operate on the event mask
+ *	register which is used for generating interrupts
+ *	and status register.
+ */
+npi_status_t
+npi_fflp_event_mask_config(npi_handle_t handle, io_op_t op_mode,
+		fflp_event_mask_cfg_t *mask_cfgp)
+{
+	int		status = NPI_SUCCESS;
+	fflp_err_mask_t mask_reg;
+
+	switch (op_mode) {
+	case OP_GET:
+
+		REG_PIO_READ64(handle, FFLP_ERR_MSK_REG, &mask_reg.value);
+		*mask_cfgp = mask_reg.value & FFLP_ERR_MASK_ALL;
+		break;
+
+	case OP_SET:
+		mask_reg.value = (~(*mask_cfgp) & FFLP_ERR_MASK_ALL);
+		REG_PIO_WRITE64(handle, FFLP_ERR_MSK_REG, mask_reg.value);
+		break;
+
+	case OP_UPDATE:
+		REG_PIO_READ64(handle, FFLP_ERR_MSK_REG, &mask_reg.value);
+		mask_reg.value |=  (~(*mask_cfgp) & FFLP_ERR_MASK_ALL);
+		REG_PIO_WRITE64(handle, FFLP_ERR_MSK_REG, mask_reg.value);
+		break;
+
+	case OP_CLEAR:
+		mask_reg.value = FFLP_ERR_MASK_ALL;
+		REG_PIO_WRITE64(handle, FFLP_ERR_MSK_REG, mask_reg.value);
+		break;
+	default:
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+		    " npi_fflp_event_mask_config",
+		    " eventmask <0x%x>", op_mode));
+		return (NPI_FFLP_ERROR | NPI_FFLP_SW_PARAM_ERROR);
+	}
+
+	return (status);
+}
+
+/*
+ * Read vlan error bits
+ */
+void
+npi_fflp_vlan_error_get(npi_handle_t handle, p_vlan_par_err_t p_err)
+{
+	REG_PIO_READ64(handle, FFLP_VLAN_PAR_ERR_REG, &p_err->value);
+}
+
+/*
+ * clear vlan error bits
+ */
+void
+npi_fflp_vlan_error_clear(npi_handle_t handle)
+{
+	vlan_par_err_t p_err;
+	p_err.value  = 0;
+	p_err.bits.ldw.m_err = 0;
+	p_err.bits.ldw.err = 0;
+	REG_PIO_WRITE64(handle, FFLP_ERR_MSK_REG, p_err.value);
+
+}
+
+/*
+ * Read TCAM error bits
+ */
+void
+npi_fflp_tcam_error_get(npi_handle_t handle, p_tcam_err_t p_err)
+{
+	REG_PIO_READ64(handle, FFLP_TCAM_ERR_REG, &p_err->value);
+}
+
+/*
+ * clear TCAM error bits
+ */
+void
+npi_fflp_tcam_error_clear(npi_handle_t handle)
+{
+	tcam_err_t p_err;
+
+	p_err.value  = 0;
+	p_err.bits.ldw.p_ecc = 0;
+	p_err.bits.ldw.mult = 0;
+	p_err.bits.ldw.err = 0;
+	REG_PIO_WRITE64(handle, FFLP_TCAM_ERR_REG, p_err.value);
+
+}
+
+/*
+ * Read FCRAM error bits
+ */
+void
+npi_fflp_fcram_error_get(npi_handle_t handle,
+	p_hash_tbl_data_log_t p_err, uint8_t partition)
+{
+	uint64_t offset;
+
+	offset = FFLP_HASH_TBL_DATA_LOG_REG + partition * 8192;
+	REG_PIO_READ64(handle, offset, &p_err->value);
+}
+
+/*
+ * clear FCRAM error bits
+ */
+void
+npi_fflp_fcram_error_clear(npi_handle_t handle, uint8_t partition)
+{
+	hash_tbl_data_log_t p_err;
+	uint64_t offset;
+
+	p_err.value  = 0;
+	p_err.bits.ldw.pio_err = 0;
+	offset = FFLP_HASH_TBL_DATA_LOG_REG + partition * 8192;
+
+	REG_PIO_WRITE64(handle, offset,
+			    p_err.value);
+
+}
+
+/*
+ * Read FCRAM lookup error log1 bits
+ */
+void
+npi_fflp_fcram_error_log1_get(npi_handle_t handle,
+			    p_hash_lookup_err_log1_t log1)
+{
+	REG_PIO_READ64(handle, HASH_LKUP_ERR_LOG1_REG,
+				    &log1->value);
+}
+
+/*
+ * Read FCRAM lookup error log2 bits
+ */
+void
+npi_fflp_fcram_error_log2_get(npi_handle_t handle,
+		    p_hash_lookup_err_log2_t log2)
+{
+	REG_PIO_READ64(handle, HASH_LKUP_ERR_LOG2_REG,
+			    &log2->value);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/uts/common/io/nxge/npi/npi_fflp.h	Mon Mar 19 19:37:22 2007 -0700
@@ -0,0 +1,1187 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _NPI_FFLP_H
+#define	_NPI_FFLP_H
+
+#pragma ident	"%Z%%M%	%I%	%E% SMI"
+
+#ifdef	__cplusplus
+extern "C" {
+#endif
+
+
+#include <npi.h>
+#include <nxge_fflp_hw.h>
+#include <nxge_fflp.h>
+
+
+typedef uint8_t part_id_t;
+typedef uint8_t tcam_location_t;
+typedef uint16_t vlan_id_t;
+
+typedef	enum _tcam_op {
+	TCAM_RWC_STAT	= 0x1,
+	TCAM_RWC_MATCH	= 0x2
+} tcam_op_t;
+
+
+#define	NPI_TCAM_COMP_NO_MATCH	0x8000000000000ULL
+
+/*
+ * NPI FFLP ERROR Codes
+ */
+
+#define	NPI_FFLP_BLK_CODE	FFLP_BLK_ID << 8
+#define	NPI_FFLP_ERROR		(NPI_FAILURE | NPI_FFLP_BLK_CODE)
+#define	NPI_TCAM_ERROR		0x10
+#define	NPI_FCRAM_ERROR		0x20
+#define	NPI_GEN_FFLP		0x30
+#define	NPI_FFLP_SW_PARAM_ERROR	0x40
+#define	NPI_FFLP_HW_ERROR	0x80
+
+
+#define	NPI_FFLP_RESET_ERROR	(NPI_FFLP_ERROR | NPI_GEN_FFLP | RESET_FAILED)
+#define	NPI_FFLP_RDC_TABLE_INVALID	(NPI_FFLP_ERROR | RDC_TAB_INVALID)
+#define	NPI_FFLP_VLAN_INVALID		(NPI_FFLP_ERROR | VLAN_INVALID)
+#define	NPI_FFLP_PORT_INVALID		(NPI_FFLP_ERROR | PORT_INVALID)
+#define	NPI_FFLP_TCAM_RD_ERROR		\
+	(NPI_FFLP_ERROR | NPI_TCAM_ERROR | READ_FAILED)
+#define	NPI_FFLP_TCAM_WR_ERROR		\
+	(NPI_FFLP_ERROR | NPI_TCAM_ERROR | WRITE_FAILED)
+#define	NPI_FFLP_TCAM_LOC_INVALID	\
+	(NPI_FFLP_ERROR | NPI_TCAM_ERROR | LOCATION_INVALID)
+#define	NPI_FFLP_ASC_RAM_RD_ERROR	\
+	(NPI_FFLP_ERROR | NPI_TCAM_ERROR | READ_FAILED)
+#define	NPI_FFLP_ASC_RAM_WR_ERROR	\
+	(NPI_FFLP_ERROR | NPI_TCAM_ERROR | WRITE_FAILED)
+#define	NPI_FFLP_FCRAM_READ_ERROR	\
+	(NPI_FFLP_ERROR | NPI_FCRAM_ERROR | READ_FAILED)
+#define	NPI_FFLP_FCRAM_WR_ERROR		\
+	(NPI_FFLP_ERROR | NPI_FCRAM_ERROR | WRITE_FAILED)
+#define	NPI_FFLP_FCRAM_PART_INVALID	\
+	(NPI_FFLP_ERROR | NPI_FCRAM_ERROR | RDC_TAB_INVALID)
+#define	NPI_FFLP_FCRAM_LOC_INVALID	\
+	(NPI_FFLP_ERROR | NPI_FCRAM_ERROR | LOCATION_INVALID)
+
+#define	TCAM_CLASS_INVALID		\
+	(NPI_FFLP_SW_PARAM_ERROR | 0xb)
+/* have only 0xc, 0xd, 0xe and 0xf left for sw error codes */
+#define	NPI_FFLP_TCAM_CLASS_INVALID	\
+	(NPI_FFLP_ERROR | NPI_TCAM_ERROR | TCAM_CLASS_INVALID)
+#define	NPI_FFLP_TCAM_HW_ERROR		\
+	(NPI_FFLP_ERROR | NPI_FFLP_HW_ERROR | NPI_TCAM_ERROR)
+#define	NPI_FFLP_FCRAM_HW_ERROR		\
+	(NPI_FFLP_ERROR | NPI_FFLP_HW_ERROR | NPI_FCRAM_ERROR)
+
+
+/*
+ * FFLP NPI defined event masks (mapped to the hardware defined masks).
+ */
+typedef	enum _fflp_event_mask_cfg_e {
+	CFG_FFLP_ENT_MSK_VLAN_MASK = FFLP_ERR_VLAN_MASK,
+	CFG_FFLP_ENT_MSK_TCAM_MASK = FFLP_ERR_TCAM_MASK,
+	CFG_FFLP_ENT_MSK_HASH_TBL_LKUP_MASK = FFLP_ERR_HASH_TBL_LKUP_MASK,
+	CFG_FFLP_ENT_MSK_HASH_TBL_DAT_MASK = FFLP_ERR_HASH_TBL_DAT_MASK,
+
+	CFG_FFLP_MASK_ALL	= (FFLP_ERR_VLAN_MASK | FFLP_ERR_TCAM_MASK |
+						FFLP_ERR_HASH_TBL_LKUP_MASK |
+						FFLP_ERR_HASH_TBL_DAT_MASK)
+} fflp_event_mask_cfg_t;
+
+
+/* FFLP FCRAM Related Functions */
+/* The following are FCRAM datapath functions */
+
+/*
+ * npi_fflp_fcram_entry_write ()
+ * Populates an FCRAM entry
+ * Inputs:
+ *         handle:	opaque handle interpreted by the underlying OS
+ *	   partid:	Partition ID
+ *	   location:	Index to the FCRAM.
+ *			Corresponds to last 20 bits of H1 value
+ *	   fcram_ptr:	Pointer to the FCRAM contents to be used for writing
+ *	   format:	Entry Format. Determines the size of the write.
+ *			      FCRAM_ENTRY_OPTIM:   8 bytes (a 64 bit write)
+ *			      FCRAM_ENTRY_EX_IP4:  32 bytes (4 X 64 bit write)
+ *			      FCRAM_ENTRY_EX_IP6:  56 bytes (7 X 64 bit write)
+ *
+ * Return:
+ * NPI_SUCCESS
+ * NPI_FAILURE
+ * NPI_HW_ERR
+ * NPI_SW_ERR
+ *
+ */
+
+npi_status_t npi_fflp_fcram_entry_write(npi_handle_t, part_id_t,
+			    uint32_t, fcram_entry_t *,
+			    fcram_entry_format_t);
+
+/*
+ * npi_fflp_fcram_entry_read ()
+ * Reads an FCRAM entry
+ * Inputs:
+ *         handle:	opaque handle interpreted by the underlying OS
+ *	   partid:	Partition ID
+ *	   location:	Index to the FCRAM.
+ *			Corresponds to last 20 bits of H1 value
+ *	   fcram_ptr:	Pointer to the FCRAM contents to be updated
+ *	   format:	Entry Format. Determines the size of the read.
+ *			      FCRAM_ENTRY_OPTIM:   8 bytes (a 64 bit read)
+ *			      FCRAM_ENTRY_EX_IP4:  32 bytes (4 X 64 bit read )
+ *			      FCRAM_ENTRY_EX_IP6:  56 bytes (7 X 64 bit read )
+ *
+ * Return:
+ * NPI_SUCCESS
+ * NPI_FAILURE
+ * NPI_HW_ERR
+ * NPI_SW_ERR
+ */
+
+npi_status_t npi_fflp_fcram_entry_read(npi_handle_t,  part_id_t,
+				    uint32_t, fcram_entry_t *,
+				    fcram_entry_format_t);
+
+/*
+ * npi_fflp_fcram_entry_invalidate ()
+ * Invalidate FCRAM entry at the given location
+ * Inputs:
+ *	handle:		opaque handle interpreted by the underlying OS
+ *	partid:		Partition ID
+ *	location:	location of the FCRAM/hash entry.
+ *
+ * Return:
+ * NPI_SUCCESS
+ * NPI_FAILURE
+ * NPI_HW_ERR
+ * NPI_SW_ERR
+ *
+ */
+
+npi_status_t
+npi_fflp_fcram_entry_invalidate(npi_handle_t, part_id_t,
+				    uint32_t);
+
+/*
+ * npi_fflp_fcram_subarea_write ()
+ * Writes to FCRAM entry subarea i.e the 8 bytes within the 64 bytes pointed by
+ * last 20 bits of  H1. Effectively, this accesses specific 8 bytes within the
+ * hash table bucket.
+ *
+ *    |-----------------| <-- H1
+ *	   |	subarea 0    |
+ *	   |_________________|
+ *	   | Subarea 1	     |
+ *	   |_________________|
+ *	   | .......	     |
+ *	   |_________________|
+ *	   | Subarea 7       |
+ *	   |_________________|
+ *
+ * Inputs:
+ *         handle:	opaque handle interpreted by the underlying OS
+ *	   partid:	Partition ID
+ *	   location:	location of the subarea. It is derived from:
+ *			Bucket = [19:15][14:0]       (20 bits of H1)
+ *			location = (Bucket << 3 ) + subarea * 8
+ *				 = [22:18][17:3] || subarea * 8
+ *	   data:	Data
+ * Return:
+ * NPI_SUCCESS
+ * NPI_FAILURE
+ * NPI_HW_ERR
+ * NPI_SW_ERR
+ *
+ */
+
+
+npi_status_t npi_fflp_fcram_subarea_write(npi_handle_t, part_id_t,
+				    uint32_t, uint64_t);
+/*
+ * npi_fflp_fcram_subarea_read ()
+ * Reads an FCRAM entry subarea i.e the 8 bytes within the 64 bytes pointed by
+ * last 20 bits of  H1. Effectively, this accesses specific 8 bytes within the
+ * hash table bucket.
+ *
+ *  H1-->  |-----------------|
+ *	   |	subarea 0    |
+ *	   |_________________|
+ *	   | Subarea 1	     |
+ *	   |_________________|
+ *	   | .......	     |
+ *	   |_________________|
+ *	   | Subarea 7       |
+ *	   |_________________|
+ *
+ * Inputs:
+ *         handle:	opaque handle interpreted by the underlying OS
+ *	   partid:	Partition ID
+ *	   location:	location of the subarea. It is derived from:
+ *			Bucket = [19:15][14:0]       (20 bits of H1)
+ *			location = (Bucket << 3 ) + subarea * 8
+ *				 = [22:18][17:3] || subarea * 8
+ *	   data:	ptr do write subarea contents to.
+ *
+ * Return:
+ * NPI_SUCCESS
+ * NPI_FAILURE
+ * NPI_HW_ERR
+ * NPI_SW_ERR
+ *
+ */
+
+npi_status_t npi_fflp_fcram_subarea_read  (npi_handle_t,
+			part_id_t, uint32_t, uint64_t *);
+
+
+/* The following are zero function fflp configuration functions */
+/*
+ * npi_fflp_fcram_config_partition()
+ * Partitions and configures the FCRAM
+ *
+ * Input
+ *     partid			partition ID
+ *				Corresponds to the RDC table
+ *     part_size		Size of the partition
+ *
+ * Return:
+ * NPI_SUCCESS
+ * NPI_FAILURE
+ * NPI_HW_ERR
+ * NPI_SW_ERR
+ *
+ */
+npi_status_t npi_fflp_cfg_fcram_partition(npi_handle_t, part_id_t,
+				uint8_t, uint8_t);
+
+/*
+ * npi_fflp_fcram_partition_enable
+ * Enable previously configured FCRAM partition
+ *
+ * Input
+ *     partid			partition ID
+ *				Corresponds to the RDC table
+ *
+ * Return
+ *      0			Successful
+ *      Non zero  error code    Enable failed, and reason.
+ *
+ */
+npi_status_t npi_fflp_cfg_fcram_partition_enable(npi_handle_t,
+				part_id_t);
+
+/*
+ * npi_fflp_fcram_partition_disable
+ * Disable previously configured FCRAM partition
+ *
+ * Input
+ *     partid			partition ID
+ *				Corresponds to the RDC table
+ *
+ * Return:
+ * NPI_SUCCESS
+ * NPI_FAILURE
+ * NPI_HW_ERR
+ * NPI_SW_ERR
+ *
+ */
+
+npi_status_t npi_fflp_cfg_fcram_partition_disable(npi_handle_t,
+				part_id_t);
+
+
+/*
+ *  npi_fflp_cfg_fcram_reset
+ *  Initializes the FCRAM reset sequence (including FFLP).
+ *
+ *  Input
+ *	strength:		FCRAM Drive strength
+ *				   strong, weak or normal
+ *				   HW recommended value:
+ *	qs:			FCRAM QS mode selection
+ *				   qs mode or free running
+ *				   HW recommended value is:
+ *
+ * Return:
+ * NPI_SUCCESS
+ * NPI_FAILURE
+ * NPI_HW_ERR
+ * NPI_SW_ERR
+ *
+ */
+
+npi_status_t npi_fflp_cfg_fcram_reset(npi_handle_t,
+				    fflp_fcram_output_drive_t,
+				    fflp_fcram_qs_t);
+
+
+
+/*
+ *  npi_fflp_cfg_tcam_reset
+ *  Initializes the FFLP reset sequence
+ * Doesn't configure the FCRAM params.
+ *
+ *  Input
+ *
+ * Return:
+ * NPI_SUCCESS
+ * NPI_FAILURE
+ * NPI_HW_ERR
+ * NPI_SW_ERR
+ *
+ */
+
+npi_status_t npi_fflp_cfg_tcam_reset(npi_handle_t);
+
+/*
+ *  npi_fflp_cfg_tcam_enable
+ *  Enables the TCAM function
+ *
+ *  Input
+ *
+ * Return:
+ * NPI_SUCCESS
+ * NPI_FAILURE
+ * NPI_HW_ERR
+ * NPI_SW_ERR
+ *
+ */
+
+npi_status_t npi_fflp_cfg_tcam_enable(npi_handle_t);
+
+/*
+ *  npi_fflp_cfg_tcam_disable
+ *  Enables the TCAM function
+ *
+ *  Input
+ *
+ * Return:
+ * NPI_SUCCESS
+ * NPI_FAILURE
+ * NPI_HW_ERR
+ * NPI_SW_ERR
+ *
+ */
+
+npi_status_t npi_fflp_cfg_tcam_disable(npi_handle_t);
+
+
+/*
+ *  npi_fflp_cfg_cam_errorcheck_disable
+ *  Disables FCRAM and TCAM error checking
+ *
+ *  Input
+ *
+ *
+ * Return:
+ * NPI_SUCCESS
+ * NPI_FAILURE
+ * NPI_HW_ERR
+ * NPI_SW_ERR
+ *
+ */
+
+npi_status_t npi_fflp_cfg_cam_errorcheck_disable(npi_handle_t);
+
+/*
+ *  npi_fflp_cfg_cam_errorcheck_enable
+ *  Enables FCRAM and TCAM error checking
+ *
+ *  Input
+ *
+ *
+ *  Return
+ *      0			Successful
+ *      Non zero  error code    Enable failed, and reason.
+ *
+ */
+npi_status_t npi_fflp_cfg_cam_errorcheck_enable(npi_handle_t);
+
+
+/*
+ *  npi_fflp_cfg_llcsnap_enable
+ *  Enables input parser llcsnap recognition
+ *
+ *  Input
+ *
+ *
+ * Return:
+ * NPI_SUCCESS
+ * NPI_FAILURE
+ * NPI_HW_ERR
+ * NPI_SW_ERR
+ *
+ *
+ */
+npi_status_t npi_fflp_cfg_llcsnap_enable(npi_handle_t);
+
+/*
+ *  npi_fflp_cam_llcsnap_disable
+ *  Disables input parser llcsnap recognition
+ *
+ *  Input
+ *
+ *
+ * Return:
+ * NPI_SUCCESS
+ * NPI_FAILURE
+ * NPI_HW_ERR
+ * NPI_SW_ERR
+ *
+ *
+ */
+
+npi_status_t npi_fflp_cfg_llcsnap_disable(npi_handle_t);
+
+/*
+ * npi_fflp_config_fcram_refresh
+ * Set FCRAM min and max refresh time.
+ *
+ * Input
+ *	min_time		Minimum Refresh time count
+ *	max_time		maximum Refresh Time count
+ *	sys_time		System Clock rate
+ *
+ *	The counters are 16 bit counters. The maximum refresh time is
+ *      3.9us/clock cycle. The minimum is 400ns/clock cycle.
+ *	Clock cycle is the FCRAM clock cycle?????
+ *	If the cycle is FCRAM clock cycle, then sys_time parameter
+ *      is not needed as there wont be configuration variation due to
+ *      system clock cycle.
+ *
+ * Return:
+ * NPI_SUCCESS
+ * NPI_FAILURE
+ * NPI_HW_ERR
+ * NPI_SW_ERR
+ *
+ */
+
+npi_status_t npi_fflp_cfg_fcram_refresh_time(npi_handle_t,
+		uint32_t, uint32_t, uint32_t);
+
+
+/*
+ * npi_fflp_cfg_fcram_access ()
+ *
+ * Sets the ratio between the FCRAM pio and lookup access
+ * Input:
+ * access_ratio: 0  Lookup has the highest priority
+ *		 15 PIO has maximum possible priority
+ *
+ */
+
+npi_status_t npi_fflp_cfg_fcram_access(npi_handle_t,
+					uint8_t);
+
+
+/*
+ * npi_fflp_cfg_tcam_access ()
+ *
+ * Sets the ratio between the TCAM pio and lookup access
+ * Input:
+ * access_ratio: 0  Lookup has the highest priority
+ *		 15 PIO has maximum possible priority
+ *
+ */
+
+npi_status_t npi_fflp_cfg_tcam_access(npi_handle_t, uint8_t);
+
+
+/*
+ *  npi_fflp_hash_lookup_err_report
+ *  Reports hash table (fcram) lookup errors
+ *
+ *  Input
+ *      status			Pointer to return Error bits
+ *
+ *
+ * Return:
+ * NPI_SUCCESS
+ * NPI_FAILURE
+ * NPI_HW_ERR
+ * NPI_SW_ERR
+ *
+ */
+
+npi_status_t npi_fflp_fcram_get_lookup_err_log(npi_handle_t,
+				    hash_lookup_err_log_t *);
+
+
+
+/*
+ * npi_fflp_fcram_get_pio_err_log
+ * Reports hash table PIO read errors.
+ *
+ * Input
+ *	partid:		partition ID
+ *      err_stat	pointer to return Error bits
+ *
+ * Return:
+ * NPI_SUCCESS
+ * NPI_FAILURE
+ * NPI_HW_ERR
+ * NPI_SW_ERR
+ *
+ */
+npi_status_t npi_fflp_fcram_get_pio_err_log(npi_handle_t,
+				part_id_t, hash_pio_err_log_t *);
+
+
+/*
+ * npi_fflp_fcram_clr_pio_err_log
+ * Clears FCRAM PIO  error status for the partition.
+ * If there are TCAM errors as indicated by err bit set by HW,
+ *  then the SW will clear it by clearing the bit.
+ *
+ * Input
+ *	partid:		partition ID
+ *
+ *
+ * Return
+ *	NPI_SUCCESS	Success
+ *
+ *
+ */
+
+npi_status_t npi_fflp_fcram_clr_pio_err_log(npi_handle_t,
+						part_id_t);
+
+
+
+/*
+ * npi_fflp_fcram_err_data_test
+ * Tests the FCRAM error detection logic.
+ * The error detection logic for the datapath is tested.
+ * bits [63:0] are set to select the data bits to be xored
+ *
+ * Input
+ *	data:	 data bits to select bits to be xored
+ *
+ *
+ * Return
+ *	NPI_SUCCESS	Success
+ *
+ *
+ */
+npi_status_t npi_fflp_fcram_err_data_test(npi_handle_t, fcram_err_data_t *);
+
+
+/*
+ * npi_fflp_fcram_err_synd_test
+ * Tests the FCRAM error detection logic.
+ * The error detection logic for the syndrome is tested.
+ * tst0->synd (8bits) are set to select the syndrome bits
+ * to be XOR'ed
+ *
+ * Input
+ *	syndrome_bits:	 Syndrome bits to select bits to be xor'ed
+ *
+ *
+ * Return
+ *	NPI_SUCCESS	Success
+ *
+ *
+ */
+npi_status_t npi_fflp_fcram_err_synd_test(npi_handle_t, uint8_t);
+
+
+/*
+ * npi_fflp_cfg_vlan_table_clear
+ * Clears the vlan RDC table
+ *
+ * Input
+ *     vlan_id		VLAN ID
+ *
+ * Output
+ *
+ *	NPI_SUCCESS			Successful
+ *
+ */
+
+npi_status_t npi_fflp_cfg_vlan_table_clear(npi_handle_t, vlan_id_t);
+
+/*
+ * npi_fflp_cfg_enet_vlan_table_assoc
+ * associates port vlan id to rdc table and sets the priority
+ * in respect to L2DA rdc table.
+ *
+ * Input
+ *     mac_portn		port number
+ *     vlan_id			VLAN ID
+ *     rdc_table		RDC Table #
+ *     priority			priority
+ *				1: vlan classification has higher priority
+ *				0: l2da classification has higher priority
+ *
+ * Return:
+ * NPI_SUCCESS
+ * NPI_FAILURE
+ * NPI_HW_ERR
+ * NPI_SW_ERR
+ *
+ */
+
+npi_status_t npi_fflp_cfg_enet_vlan_table_assoc(npi_handle_t,
+				    uint8_t, vlan_id_t,
+				    uint8_t, uint8_t);
+
+
+/*
+ * npi_fflp_cfg_enet_vlan_table_set_pri
+ * sets the  vlan based classification priority in respect to
+ * L2DA classification.
+ *
+ * Input
+ *     mac_portn	port number
+ *     vlan_id		VLAN ID
+ *     priority 	priority
+ *			1: vlan classification has higher priority
+ *			0: l2da classification has higher priority
+ *
+ * Return:
+ * NPI_SUCCESS
+ * NPI_FAILURE
+ * NPI_HW_ERR
+ * NPI_SW_ERR
+ *
+ */
+
+npi_status_t npi_fflp_cfg_enet_vlan_table_set_pri(npi_handle_t,
+				    uint8_t, vlan_id_t,
+				    uint8_t);
+
+/*
+ * npi_fflp_cfg_enet_usr_cls_set()
+ * Configures a user configurable ethernet class
+ *
+ * Input
+ *      class:       Ethernet Class
+ *		     class (TCAM_CLASS_ETYPE or  TCAM_CLASS_ETYPE_2)
+ *      enet_type:   16 bit Ethernet Type value, corresponding ethernet bytes
+ *                        [13:14] in the frame.
+ *
+ *  by default, the class will be disabled until explicitly enabled.
+ *
+ * Return
+ * NPI_SUCCESS
+ * NPI_FAILURE
+ * NPI_HW_ERR
+ * NPI_SW_ERR
+ *
+ *
+ *
+ */
+
+npi_status_t npi_fflp_cfg_enet_usr_cls_set(npi_handle_t,
+				    tcam_class_t, uint16_t);
+
+/*
+ * npi_fflp_cfg_enet_usr_cls_enable()
+ * Enable previously configured TCAM user configurable Ethernet classes.
+ *
+ * Input
+ *      class:       Ethernet Class  class
+ *		     (TCAM_CLASS_ETYPE or  TCAM_CLASS_ETYPE_2)
+ *
+ * Return
+ * NPI_SUCCESS
+ * NPI_FAILURE
+ * NPI_HW_ERR
+ * NPI_SW_ERR
+ *
+ */
+
+npi_status_t npi_fflp_cfg_enet_usr_cls_enable(npi_handle_t, tcam_class_t);
+
+/*
+ * npi_fflp_cfg_enet_usr_cls_disable()
+ * Disables previously configured TCAM user configurable Ethernet classes.
+ *
+ * Input
+ *      class:       Ethernet Class
+ *		     class = (TCAM_CLASS_ETYPE or  TCAM_CLASS_ETYPE_2)
+ *
+ * Return
+ * NPI_SUCCESS
+ * NPI_FAILURE
+ * NPI_HW_ERR
+ * NPI_SW_ERR
+ *
+ */
+
+
+npi_status_t npi_fflp_cfg_enet_usr_cls_disable(npi_handle_t, tcam_class_t);
+
+
+/*
+ * npi_fflp_cfg_ip_usr_cls_set()
+ * Configures the TCAM user configurable IP classes.
+ *
+ * Input
+ *      class:       IP Class
+ *		     (TCAM_CLASS_IP_USER_4 <= class <= TCAM_CLASS_IP_USER_7)
+ *      tos:         IP TOS bits
+ *      tos_mask:    IP TOS bits mask. bits with mask bits set will be used
+ *      proto:       IP Proto
+ *      ver:         IP Version
+ * by default, will the class is disabled until explicitly enabled
+ *
+ * Return
+ * NPI_SUCCESS
+ * NPI_FAILURE
+ * NPI_HW_ERR
+ * NPI_SW_ERR
+ *
+ */
+
+npi_status_t npi_fflp_cfg_ip_usr_cls_set(npi_handle_t,
+					tcam_class_t,
+					uint8_t, uint8_t,
+					uint8_t, uint8_t);
+
+/*
+ * npi_fflp_cfg_ip_usr_cls_enable()
+ * Enable previously configured TCAM user configurable IP classes.
+ *
+ * Input
+ *      class:       IP Class
+ *		     (TCAM_CLASS_IP_USER_4 <= class <= TCAM_CLASS_IP_USER_7)
+ *
+ * Return
+ * NPI_SUCCESS
+ * NPI_FAILURE
+ * NPI_HW_ERR
+ * NPI_SW_ERR
+ *
+ */
+
+npi_status_t npi_fflp_cfg_ip_usr_cls_enable(npi_handle_t, tcam_class_t);
+
+/*
+ * npi_fflp_cfg_ip_usr_cls_disable()
+ * Disables previously configured TCAM user configurable IP classes.
+ *
+ * Input
+ *      class:       IP Class
+ *		     (TCAM_CLASS_IP_USER_4 <= class <= TCAM_CLASS_IP_USER_7)
+ *
+ * Return
+ * NPI_SUCCESS
+ * NPI_FAILURE
+ * NPI_HW_ERR
+ * NPI_SW_ERR
+ *
+ */
+
+
+npi_status_t npi_fflp_cfg_ip_usr_cls_disable(npi_handle_t, tcam_class_t);
+
+
+/*
+ * npi_fflp_cfg_ip_cls_tcam_key ()
+ *
+ * Configures the TCAM key generation for the IP classes
+ *
+ * Input
+ *      l3_class:        IP class to configure key generation
+ *      cfg:             Configuration bits:
+ *                   discard:      Discard all frames of this class
+ *                   use_ip_saddr: use ip src address (for ipv6)
+ *                   use_ip_daddr: use ip dest address (for ipv6)
+ *                   lookup_enable: Enable Lookup
+ *
+ *
+ * Return
+ * NPI_SUCCESS
+ * NPI_HW_ERR
+ * NPI_SW_ERR
+ *
+ */
+
+
+npi_status_t npi_fflp_cfg_ip_cls_tcam_key(npi_handle_t,
+				    tcam_class_t, tcam_key_cfg_t *);
+
+/*
+ * npi_fflp_cfg_ip_cls_flow_key ()
+ *
+ * Configures the flow key generation for the IP classes
+ * Flow key is used to generate the H1 hash function value
+ * The fields used for the generation are configured using this
+ * NPI function.
+ *
+ * Input
+ *      l3_class:        IP class to configure flow key generation
+ *      cfg:             Configuration bits:
+ *                   use_proto:     Use IP proto field
+ *                   use_dport:     use l4 destination port
+ *                   use_sport:     use l4 source port
+ *                   ip_opts_exist: IP Options Present
+ *                   use_daddr:     use ip dest address
+ *                   use_saddr:     use ip source address
+ *                   use_vlan:      use VLAN ID
+ *                   use_l2da:      use L2 Dest MAC Address
+ *                   use_portnum:   use L2 virtual port number
+ *
+ *
+ * Return
+ * NPI_SUCCESS
+ * NPI_HW_ERR
+ * NPI_SW_ERR
+ *
+ */
+
+npi_status_t npi_fflp_cfg_ip_cls_flow_key(npi_handle_t,
+			    tcam_class_t, flow_key_cfg_t *);
+
+
+
+npi_status_t npi_fflp_cfg_ip_cls_flow_key_get(npi_handle_t,
+				    tcam_class_t,
+				    flow_key_cfg_t *);
+
+
+npi_status_t npi_fflp_cfg_ip_cls_tcam_key_get(npi_handle_t,
+				    tcam_class_t, tcam_key_cfg_t *);
+/*
+ * npi_fflp_cfg_hash_h1poly()
+ * Initializes the H1 hash generation logic.
+ *
+ * Input
+ *      init_value:       The initial value (seed)
+ *
+ * Return
+ * NPI_SUCCESS
+ * NPI_FAILURE
+ * NPI_HW_ERR
+ * NPI_SW_ERR
+ *
+ */
+
+npi_status_t npi_fflp_cfg_hash_h1poly(npi_handle_t, uint32_t);
+
+
+
+/*
+ * npi_fflp_cfg_hash_h2poly()
+ * Initializes the H2 hash generation logic.
+ *
+ * Input
+ *      init_value:       The initial value (seed)
+ *
+ * Return
+ * NPI_SUCCESS
+ * NPI_FAILURE
+ * NPI_HW_ERR
+ * NPI_SW_ERR
+ *
+ */
+
+npi_status_t npi_fflp_cfg_hash_h2poly(npi_handle_t, uint16_t);
+
+
+/*
+ * Reset the fflp block (actually the FCRAM)
+ * Waits until reset is completed
+ *
+ * input
+ * strength	fcram output drive strength: weak, normal or strong
+ * qs		qs mode. Normal or free running
+ *
+ * return value
+ *	  NPI_SUCCESS
+ *	  NPI_SW_ERR
+ *	  NPI_HW_ERR
+ */
+
+npi_status_t npi_fflp_fcram_reset(npi_handle_t,
+			    fflp_fcram_output_drive_t,
+			    fflp_fcram_qs_t);
+
+
+/* FFLP TCAM Related Functions */
+
+
+/*
+ * npi_fflp_tcam_entry_match()
+ *
+ * Tests for TCAM match of the tcam entry
+ *
+ * Input
+ * tcam_ptr
+ *
+ * Return
+ *   NPI_SUCCESS
+ *   NPI_SW_ERR
+ *   NPI_HW_ERR
+ *
+ */
+
+int npi_fflp_tcam_entry_match(npi_handle_t, tcam_entry_t *);
+
+/*
+ * npi_fflp_tcam_entry_write()
+ *
+ * writes a tcam entry at the TCAM location, location
+ *
+ * Input
+ * location
+ * tcam_ptr
+ *
+ * Return
+ *   NPI_SUCCESS
+ *   NPI_SW_ERR
+ *   NPI_HW_ERR
+ *
+ */
+
+npi_status_t npi_fflp_tcam_entry_write(npi_handle_t,
+				tcam_location_t,
+				tcam_entry_t *);
+
+/*
+ * npi_fflp_tcam_entry_read ()
+ *
+ * Reads a tcam entry from the TCAM location, location
+ *
+ * Input:
+ * location
+ * tcam_ptr
+ *
+ * Return:
+ * NPI_SUCCESS
+ * NPI_HW_ERR
+ * NPI_SW_ERR
+ *
+ */
+
+
+npi_status_t npi_fflp_tcam_entry_read(npi_handle_t,
+					tcam_location_t,
+					tcam_entry_t *);
+
+/*
+ * npi_fflp_tcam_entry_invalidate()
+ *
+ * invalidates entry at tcam location
+ *
+ * Input
+ * location
+ *
+ * Return
+ *   NPI_SUCCESS
+ *   NPI_SW_ERR
+ *   NPI_HW_ERR
+ *
+ */
+
+npi_status_t npi_fflp_tcam_entry_invalidate(npi_handle_t,
+				    tcam_location_t);
+
+
+/*
+ * npi_fflp_tcam_asc_ram_entry_write()
+ *
+ * writes a tcam associatedRAM at the TCAM location, location
+ *
+ * Input:
+ * location	tcam associatedRAM location
+ * ram_data	Value to write
+ *
+ * Return:
+ * NPI_SUCCESS
+ * NPI_HW_ERR
+ * NPI_SW_ERR
+ *
+ */
+
+npi_status_t npi_fflp_tcam_asc_ram_entry_write(npi_handle_t,
+				    tcam_location_t,
+				    uint64_t);
+
+
+/*
+ * npi_fflp_tcam_asc_ram_entry_read()
+ *
+ * reads a tcam associatedRAM content at the TCAM location, location
+ *
+ * Input:
+ * location	tcam associatedRAM location
+ * ram_data	ptr to return contents
+ *
+ * Return:
+ * NPI_SUCCESS
+ * NPI_HW_ERR
+ * NPI_SW_ERR
+ *
+ */
+
+npi_status_t npi_fflp_tcam_asc_ram_entry_read(npi_handle_t,
+				    tcam_location_t,
+				    uint64_t *);
+
+/*
+ * npi_fflp_tcam_get_err_log
+ * Reports TCAM PIO read and lookup errors.
+ * If there are TCAM errors as indicated by err bit set by HW,
+ *  then the SW will clear it by clearing the bit.
+ *
+ * Input
+ *	err_stat:	 structure to report various TCAM errors.
+ *                       will be updated if there are TCAM errors.
+ *
+ *
+ * Return
+ *	NPI_SUCCESS	Success
+ *
+ *
+ */
+npi_status_t npi_fflp_tcam_get_err_log(npi_handle_t, tcam_err_log_t *);
+
+
+
+/*
+ * npi_fflp_tcam_clr_err_log
+ * Clears TCAM PIO read and lookup error status.
+ * If there are TCAM errors as indicated by err bit set by HW,
+ *  then the SW will clear it by clearing the bit.
+ *
+ * Input
+ *	err_stat:	 structure to report various TCAM errors.
+ *                       will be updated if there are TCAM errors.
+ *
+ *
+ * Return
+ *	NPI_SUCCESS	Success
+ *
+ *
+ */
+
+npi_status_t npi_fflp_tcam_clr_err_log(npi_handle_t);
+
+
+
+
+
+/*
+ * npi_fflp_vlan_tbl_clr_err_log
+ * Clears VLAN Table PIO  error status.
+ * If there are VLAN Table errors as indicated by err bit set by HW,
+ *  then the SW will clear it by clearing the bit.
+ *
+ * Input
+ *	err_stat:	 structure to report various VLAN Table errors.
+ *                       will be updated if there are  errors.
+ *
+ *
+ * Return
+ *	NPI_SUCCESS	Success
+ *
+ *
+ */
+
+npi_status_t npi_fflp_vlan_tbl_clr_err_log(npi_handle_t);
+
+
+/*
+ * npi_fflp_vlan_tbl_get_err_log
+ * Reports VLAN Table  errors.
+ * If there are VLAN Table errors as indicated by err bit set by HW,
+ *  then the SW will clear it by clearing the bit.
+ *
+ * Input
+ *	err_stat:	 structure to report various VLAN table errors.
+ *                       will be updated if there are errors.
+ *
+ *
+ * Return
+ *	NPI_SUCCESS	Success
+ *
+ *
+ */
+npi_status_t npi_fflp_vlan_tbl_get_err_log(npi_handle_t,
+				    vlan_tbl_err_log_t *);
+
+
+
+
+/*
+ * npi_rxdma_event_mask_config():
+ *	This function is called to operate on the event mask
+ *	register which is used for generating interrupts
+ *	and status register.
+ *
+ * Parameters:
+ *	handle		- NPI handle
+ *	op_mode		- OP_GET: get hardware event mask
+ *			  OP_SET: set hardware interrupt event masks
+ *	channel		- hardware RXDMA channel from 0 to 23.
+ *	cfgp		- pointer to NPI defined event mask
+ *			  enum data type.
+ * Return:
+ *	NPI_SUCCESS		- If set is complete successfully.
+ *
+ *	Error:
+ *	NPI_FAILURE		-
+ *	NPI_FFLP_ERROR | NPI_FFLP_SW_PARAM_ERROR
+ *
+ */
+npi_status_t
+npi_fflp_event_mask_config(npi_handle_t, io_op_t,
+			    fflp_event_mask_cfg_t *);
+
+npi_status_t npi_fflp_dump_regs(npi_handle_t);
+
+
+/* Error status read and clear functions */
+
+void	npi_fflp_vlan_error_get(npi_handle_t,
+				    p_vlan_par_err_t);
+void	npi_fflp_vlan_error_clear(npi_handle_t);
+void	npi_fflp_tcam_error_get(npi_handle_t,
+				    p_tcam_err_t);
+void	npi_fflp_tcam_error_clear(npi_handle_t);
+
+void	npi_fflp_fcram_error_get(npi_handle_t,
+				    p_hash_tbl_data_log_t,
+				    uint8_t);
+void npi_fflp_fcram_error_clear(npi_handle_t, uint8_t);
+
+void npi_fflp_fcram_error_log1_get(npi_handle_t,
+				    p_hash_lookup_err_log1_t);
+
+void npi_fflp_fcram_error_log2_get(npi_handle_t,
+			    p_hash_lookup_err_log2_t);
+
+void npi_fflp_vlan_tbl_dump(npi_handle_t);
+
+#ifdef	__cplusplus
+}
+#endif
+
+#endif	/* _NPI_FFLP_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/uts/common/io/nxge/npi/npi_ipp.c	Mon Mar 19 19:37:22 2007 -0700
@@ -0,0 +1,565 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident	"%Z%%M%	%I%	%E% SMI"
+
+#include <npi_ipp.h>
+
+uint64_t ipp_fzc_offset[] = {
+		IPP_CONFIG_REG,
+		IPP_DISCARD_PKT_CNT_REG,
+		IPP_TCP_CKSUM_ERR_CNT_REG,
+		IPP_ECC_ERR_COUNTER_REG,
+		IPP_INT_STATUS_REG,
+		IPP_INT_MASK_REG,
+		IPP_PFIFO_RD_DATA0_REG,
+		IPP_PFIFO_RD_DATA1_REG,
+		IPP_PFIFO_RD_DATA2_REG,
+		IPP_PFIFO_RD_DATA3_REG,
+		IPP_PFIFO_RD_DATA4_REG,
+		IPP_PFIFO_WR_DATA0_REG,
+		IPP_PFIFO_WR_DATA1_REG,
+		IPP_PFIFO_WR_DATA2_REG,
+		IPP_PFIFO_WR_DATA3_REG,
+		IPP_PFIFO_WR_DATA4_REG,
+		IPP_PFIFO_RD_PTR_REG,
+		IPP_PFIFO_WR_PTR_REG,
+		IPP_DFIFO_RD_DATA0_REG,
+		IPP_DFIFO_RD_DATA1_REG,
+		IPP_DFIFO_RD_DATA2_REG,
+		IPP_DFIFO_RD_DATA3_REG,
+		IPP_DFIFO_RD_DATA4_REG,
+		IPP_DFIFO_WR_DATA0_REG,
+		IPP_DFIFO_WR_DATA1_REG,
+		IPP_DFIFO_WR_DATA2_REG,
+		IPP_DFIFO_WR_DATA3_REG,
+		IPP_DFIFO_WR_DATA4_REG,
+		IPP_DFIFO_RD_PTR_REG,
+		IPP_DFIFO_WR_PTR_REG,
+		IPP_STATE_MACHINE_REG,
+		IPP_CKSUM_STATUS_REG,
+		IPP_FFLP_CKSUM_INFO_REG,
+		IPP_DEBUG_SELECT_REG,
+		IPP_DFIFO_ECC_SYNDROME_REG,
+		IPP_DFIFO_EOPM_RD_PTR_REG,
+		IPP_ECC_CTRL_REG
+};
+
+const char *ipp_fzc_name[] = {
+		"IPP_CONFIG_REG",
+		"IPP_DISCARD_PKT_CNT_REG",
+		"IPP_TCP_CKSUM_ERR_CNT_REG",
+		"IPP_ECC_ERR_COUNTER_REG",
+		"IPP_INT_STATUS_REG",
+		"IPP_INT_MASK_REG",
+		"IPP_PFIFO_RD_DATA0_REG",
+		"IPP_PFIFO_RD_DATA1_REG",
+		"IPP_PFIFO_RD_DATA2_REG",
+		"IPP_PFIFO_RD_DATA3_REG",
+		"IPP_PFIFO_RD_DATA4_REG",
+		"IPP_PFIFO_WR_DATA0_REG",
+		"IPP_PFIFO_WR_DATA1_REG",
+		"IPP_PFIFO_WR_DATA2_REG",
+		"IPP_PFIFO_WR_DATA3_REG",
+		"IPP_PFIFO_WR_DATA4_REG",
+		"IPP_PFIFO_RD_PTR_REG",
+		"IPP_PFIFO_WR_PTR_REG",
+		"IPP_DFIFO_RD_DATA0_REG",
+		"IPP_DFIFO_RD_DATA1_REG",
+		"IPP_DFIFO_RD_DATA2_REG",
+		"IPP_DFIFO_RD_DATA3_REG",
+		"IPP_DFIFO_RD_DATA4_REG",
+		"IPP_DFIFO_WR_DATA0_REG",
+		"IPP_DFIFO_WR_DATA1_REG",
+		"IPP_DFIFO_WR_DATA2_REG",
+		"IPP_DFIFO_WR_DATA3_REG",
+		"IPP_DFIFO_WR_DATA4_REG",
+		"IPP_DFIFO_RD_PTR_REG",
+		"IPP_DFIFO_WR_PTR_REG",
+		"IPP_STATE_MACHINE_REG",
+		"IPP_CKSUM_STATUS_REG",
+		"IPP_FFLP_CKSUM_INFO_REG",
+		"IPP_DEBUG_SELECT_REG",
+		"IPP_DFIFO_ECC_SYNDROME_REG",
+		"IPP_DFIFO_EOPM_RD_PTR_REG",
+		"IPP_ECC_CTRL_REG",
+};
+
+npi_status_t
+npi_ipp_dump_regs(npi_handle_t handle, uint8_t port)
+{
+	uint64_t		value, offset;
+	int 			num_regs, i;
+
+	ASSERT(IS_PORT_NUM_VALID(port));
+
+	NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
+		"\nIPP PORT Register Dump for port %d\n", port));
+
+	num_regs = sizeof (ipp_fzc_offset) / sizeof (uint64_t);
+	for (i = 0; i < num_regs; i++) {
+		offset = IPP_REG_ADDR(port, ipp_fzc_offset[i]);
+		NXGE_REG_RD64(handle, offset, &value);
+		NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL, "0x%08llx "
+			"%s\t 0x%08llx \n",
+			offset, ipp_fzc_name[i], value));
+	}
+
+	NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
+		"\n IPP FZC Register Dump for port %d done\n", port));
+
+	return (NPI_SUCCESS);
+}
+
+void
+npi_ipp_read_regs(npi_handle_t handle, uint8_t port)
+{
+	uint64_t		value, offset;
+	int 			num_regs, i;
+
+	ASSERT(IS_PORT_NUM_VALID(port));
+
+	NPI_DEBUG_MSG((handle.function, NPI_IPP_CTL,
+		"\nIPP PORT Register read (to clear) for port %d\n", port));
+
+	num_regs = sizeof (ipp_fzc_offset) / sizeof (uint64_t);
+	for (i = 0; i < num_regs; i++) {
+		offset = IPP_REG_ADDR(port, ipp_fzc_offset[i]);
+		NXGE_REG_RD64(handle, offset, &value);
+	}
+
+}
+
+/*
+ * IPP Reset Routine
+ */
+npi_status_t
+npi_ipp_reset(npi_handle_t handle, uint8_t portn)
+{
+	uint64_t val = 0;
+	uint32_t cnt = MAX_PIO_RETRIES;
+
+	ASSERT(IS_PORT_NUM_VALID(portn));
+
+	IPP_REG_RD(handle, portn, IPP_CONFIG_REG, &val);
+	val |= IPP_SOFT_RESET;
+	IPP_REG_WR(handle, portn, IPP_CONFIG_REG, val);
+
+	do {
+		NXGE_DELAY(IPP_RESET_WAIT);
+		IPP_REG_RD(handle, portn, IPP_CONFIG_REG, &val);
+		cnt--;
+	} while (((val & IPP_SOFT_RESET) != 0) && (cnt > 0));
+
+	if (cnt == 0) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				    " npi_ipp_reset"
+				    " HW Error: IPP_RESET  <0x%x>", val));
+		return (NPI_FAILURE | NPI_IPP_RESET_FAILED(portn));
+	}
+
+	return (NPI_SUCCESS);
+}
+
+
+/*
+ * IPP Configuration Routine
+ */
+npi_status_t
+npi_ipp_config(npi_handle_t handle, config_op_t op, uint8_t portn,
+		ipp_config_t config)
+{
+	uint64_t val = 0;
+
+	ASSERT(IS_PORT_NUM_VALID(portn));
+
+	switch (op) {
+
+	case ENABLE:
+	case DISABLE:
+		if ((config == 0) || ((config & ~CFG_IPP_ALL) != 0)) {
+			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				" npi_ipp_config",
+				" Invalid Input config <0x%x>",
+				config));
+			return (NPI_FAILURE | NPI_IPP_CONFIG_INVALID(portn));
+		}
+
+		IPP_REG_RD(handle, portn, IPP_CONFIG_REG, &val);
+
+		if (op == ENABLE)
+			val |= config;
+		else
+			val &= ~config;
+		break;
+
+	case INIT:
+		if ((config & ~CFG_IPP_ALL) != 0) {
+			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				" npi_ipp_config"
+				" Invalid Input config <0x%x>",
+				config));
+			return (NPI_FAILURE | NPI_IPP_CONFIG_INVALID(portn));
+		}
+		IPP_REG_RD(handle, portn, IPP_CONFIG_REG, &val);
+
+
+		val &= (IPP_IP_MAX_PKT_BYTES_MASK);
+		val |= config;
+		break;
+
+	default:
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				    " npi_ipp_config"
+				    " Invalid Input op <0x%x>", op));
+		return (NPI_FAILURE | NPI_IPP_OPCODE_INVALID(portn));
+	}
+
+	IPP_REG_WR(handle, portn, IPP_CONFIG_REG, val);
+	return (NPI_SUCCESS);
+}
+
+npi_status_t
+npi_ipp_set_max_pktsize(npi_handle_t handle, uint8_t portn, uint32_t bytes)
+{
+	uint64_t val = 0;
+
+	ASSERT(IS_PORT_NUM_VALID(portn));
+
+	if (bytes > IPP_IP_MAX_PKT_BYTES_MASK) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+			" npi_ipp_set_max_pktsize"
+			" Invalid Input Max bytes <0x%x>",
+			bytes));
+		return (NPI_FAILURE | NPI_IPP_MAX_PKT_BYTES_INVALID(portn));
+	}
+
+	IPP_REG_RD(handle, portn, IPP_CONFIG_REG, &val);
+	val &= ~(IPP_IP_MAX_PKT_BYTES_MASK << IPP_IP_MAX_PKT_BYTES_SHIFT);
+
+	val |= (bytes << IPP_IP_MAX_PKT_BYTES_SHIFT);
+	IPP_REG_WR(handle, portn, IPP_CONFIG_REG, val);
+
+	return (NPI_SUCCESS);
+}
+
+/*
+ * IPP Interrupt Configuration Routine
+ */
+npi_status_t
+npi_ipp_iconfig(npi_handle_t handle, config_op_t op, uint8_t portn,
+		ipp_iconfig_t iconfig)
+{
+	uint64_t val = 0;
+
+	ASSERT(IS_PORT_NUM_VALID(portn));
+
+	switch (op) {
+	case ENABLE:
+	case DISABLE:
+
+		if ((iconfig == 0) || ((iconfig & ~ICFG_IPP_ALL) != 0)) {
+			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				" npi_ipp_iconfig"
+				" Invalid Input iconfig <0x%x>",
+				iconfig));
+			return (NPI_FAILURE | NPI_IPP_CONFIG_INVALID(portn));
+		}
+
+		IPP_REG_RD(handle, portn, IPP_INT_MASK_REG, &val);
+		if (op == ENABLE)
+			val &= ~iconfig;
+		else
+			val |= iconfig;
+		IPP_REG_WR(handle, portn, IPP_INT_MASK_REG, val);
+
+		break;
+	case INIT:
+
+		if ((iconfig & ~ICFG_IPP_ALL) != 0) {
+			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				" npi_ipp_iconfig"
+				" Invalid Input iconfig <0x%x>",
+				iconfig));
+			return (NPI_FAILURE | NPI_IPP_CONFIG_INVALID(portn));
+		}
+		IPP_REG_WR(handle, portn, IPP_INT_MASK_REG, ~iconfig);
+
+		break;
+	default:
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+			" npi_ipp_iconfig"
+			" Invalid Input iconfig <0x%x>",
+			iconfig));
+		return (NPI_FAILURE | NPI_IPP_OPCODE_INVALID(portn));
+	}
+
+	return (NPI_SUCCESS);
+}
+
+npi_status_t
+npi_ipp_get_status(npi_handle_t handle, uint8_t portn, ipp_status_t *status)
+{
+	uint64_t val;
+
+	ASSERT(IS_PORT_NUM_VALID(portn));
+
+	IPP_REG_RD(handle, portn, IPP_INT_STATUS_REG, &val);
+
+	status->value = val;
+	return (NPI_SUCCESS);
+}
+
+npi_status_t
+npi_ipp_get_pfifo_rd_ptr(npi_handle_t handle, uint8_t portn, uint16_t *rd_ptr)
+{
+	uint64_t value;
+
+	ASSERT(IS_PORT_NUM_VALID(portn));
+
+	IPP_REG_RD(handle, portn, IPP_PFIFO_RD_PTR_REG, &value);
+	*rd_ptr = value & 0xfff;
+	return (NPI_SUCCESS);
+}
+
+npi_status_t
+npi_ipp_get_pfifo_wr_ptr(npi_handle_t handle, uint8_t portn, uint16_t *wr_ptr)
+{
+	uint64_t value;
+
+	ASSERT(IS_PORT_NUM_VALID(portn));
+
+	IPP_REG_RD(handle, portn, IPP_PFIFO_WR_PTR_REG, &value);
+	*wr_ptr = value & 0xfff;
+	return (NPI_SUCCESS);
+}
+
+npi_status_t
+npi_ipp_get_dfifo_rd_ptr(npi_handle_t handle, uint8_t portn, uint16_t *rd_ptr)
+{
+	uint64_t value;
+
+	ASSERT(IS_PORT_NUM_VALID(portn));
+
+	IPP_REG_RD(handle, portn, IPP_DFIFO_RD_PTR_REG, &value);
+	*rd_ptr = (uint16_t)(value & ((portn < 2) ? IPP_XMAC_DFIFO_PTR_MASK :
+					IPP_BMAC_DFIFO_PTR_MASK));
+	return (NPI_SUCCESS);
+}
+
+npi_status_t
+npi_ipp_get_dfifo_wr_ptr(npi_handle_t handle, uint8_t portn, uint16_t *wr_ptr)
+{
+	uint64_t value;
+
+	ASSERT(IS_PORT_NUM_VALID(portn));
+
+	IPP_REG_RD(handle, portn, IPP_DFIFO_WR_PTR_REG, &value);
+	*wr_ptr = (uint16_t)(value & ((portn < 2) ? IPP_XMAC_DFIFO_PTR_MASK :
+					IPP_BMAC_DFIFO_PTR_MASK));
+	return (NPI_SUCCESS);
+}
+
+npi_status_t
+npi_ipp_write_pfifo(npi_handle_t handle, uint8_t portn, uint8_t addr,
+		uint32_t d0, uint32_t d1, uint32_t d2, uint32_t d3, uint32_t d4)
+{
+	uint64_t val;
+
+	ASSERT(IS_PORT_NUM_VALID(portn));
+
+	if (addr >= 64) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+			" npi_ipp_write_pfifo"
+			" Invalid PFIFO address <0x%x>", addr));
+		return (NPI_FAILURE | NPI_IPP_FIFO_ADDR_INVALID(portn));
+	}
+
+	IPP_REG_RD(handle, portn, IPP_CONFIG_REG, &val);
+	val |= IPP_PRE_FIFO_PIO_WR_EN;
+	IPP_REG_WR(handle, portn, IPP_CONFIG_REG, val);
+
+	IPP_REG_WR(handle, portn, IPP_PFIFO_WR_PTR_REG, addr);
+	IPP_REG_WR(handle, portn, IPP_PFIFO_WR_DATA0_REG, d0);
+	IPP_REG_WR(handle, portn, IPP_PFIFO_WR_DATA1_REG, d1);
+	IPP_REG_WR(handle, portn, IPP_PFIFO_WR_DATA2_REG, d2);
+	IPP_REG_WR(handle, portn, IPP_PFIFO_WR_DATA3_REG, d3);
+	IPP_REG_WR(handle, portn, IPP_PFIFO_WR_DATA4_REG, d4);
+
+	val &= ~IPP_PRE_FIFO_PIO_WR_EN;
+	IPP_REG_WR(handle, portn, IPP_CONFIG_REG, val);
+
+	return (NPI_SUCCESS);
+}
+
+npi_status_t
+npi_ipp_read_pfifo(npi_handle_t handle, uint8_t portn, uint8_t addr,
+		uint32_t *d0, uint32_t *d1, uint32_t *d2, uint32_t *d3,
+		uint32_t *d4)
+{
+	ASSERT(IS_PORT_NUM_VALID(portn));
+
+	if (addr >= 64) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+			" npi_ipp_read_pfifo"
+			" Invalid PFIFO address <0x%x>", addr));
+		return (NPI_FAILURE | NPI_IPP_FIFO_ADDR_INVALID(portn));
+	}
+
+	IPP_REG_WR(handle, portn, IPP_PFIFO_RD_PTR_REG, addr);
+	IPP_REG_RD(handle, portn, IPP_PFIFO_RD_DATA0_REG, d0);
+	IPP_REG_RD(handle, portn, IPP_PFIFO_RD_DATA1_REG, d1);
+	IPP_REG_RD(handle, portn, IPP_PFIFO_RD_DATA2_REG, d2);
+	IPP_REG_RD(handle, portn, IPP_PFIFO_RD_DATA3_REG, d3);
+	IPP_REG_RD(handle, portn, IPP_PFIFO_RD_DATA4_REG, d4);
+
+	return (NPI_SUCCESS);
+}
+
+npi_status_t
+npi_ipp_write_dfifo(npi_handle_t handle, uint8_t portn, uint16_t addr,
+		uint32_t d0, uint32_t d1, uint32_t d2, uint32_t d3, uint32_t d4)
+{
+	uint64_t val;
+
+	ASSERT(IS_PORT_NUM_VALID(portn));
+
+	if (addr >= 2048) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+			" npi_ipp_write_dfifo"
+			" Invalid DFIFO address <0x%x>", addr));
+		return (NPI_FAILURE | NPI_IPP_FIFO_ADDR_INVALID(portn));
+	}
+
+	IPP_REG_RD(handle, portn, IPP_CONFIG_REG, &val);
+	val |= IPP_DFIFO_PIO_WR_EN;
+	IPP_REG_WR(handle, portn, IPP_CONFIG_REG, val);
+
+	IPP_REG_WR(handle, portn, IPP_DFIFO_WR_PTR_REG, addr);
+	IPP_REG_WR(handle, portn, IPP_DFIFO_WR_DATA0_REG, d0);
+	IPP_REG_WR(handle, portn, IPP_DFIFO_WR_DATA1_REG, d1);
+	IPP_REG_WR(handle, portn, IPP_DFIFO_WR_DATA2_REG, d2);
+	IPP_REG_WR(handle, portn, IPP_DFIFO_WR_DATA3_REG, d3);
+	IPP_REG_WR(handle, portn, IPP_DFIFO_WR_DATA4_REG, d4);
+
+	val &= ~IPP_DFIFO_PIO_WR_EN;
+	IPP_REG_WR(handle, portn, IPP_CONFIG_REG, val);
+
+	return (NPI_SUCCESS);
+}
+
+npi_status_t
+npi_ipp_read_dfifo(npi_handle_t handle, uint8_t portn, uint16_t addr,
+		uint32_t *d0, uint32_t *d1, uint32_t *d2, uint32_t *d3,
+		uint32_t *d4)
+{
+	ASSERT(IS_PORT_NUM_VALID(portn));
+
+	if (addr >= 2048) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+			" npi_ipp_read_dfifo"
+			" Invalid DFIFO address <0x%x>", addr));
+		return (NPI_FAILURE | NPI_IPP_FIFO_ADDR_INVALID(portn));
+	}
+
+	IPP_REG_WR(handle, portn, IPP_DFIFO_RD_PTR_REG, addr);
+	IPP_REG_RD(handle, portn, IPP_DFIFO_RD_DATA0_REG, d0);
+	IPP_REG_RD(handle, portn, IPP_DFIFO_RD_DATA1_REG, d1);
+	IPP_REG_RD(handle, portn, IPP_DFIFO_RD_DATA2_REG, d2);
+	IPP_REG_RD(handle, portn, IPP_DFIFO_RD_DATA3_REG, d3);
+	IPP_REG_RD(handle, portn, IPP_DFIFO_RD_DATA4_REG, d4);
+
+	return (NPI_SUCCESS);
+}
+
+npi_status_t
+npi_ipp_get_ecc_syndrome(npi_handle_t handle, uint8_t portn, uint16_t *syndrome)
+{
+	uint64_t val;
+
+	ASSERT(IS_PORT_NUM_VALID(portn));
+
+	IPP_REG_RD(handle, portn, IPP_DFIFO_ECC_SYNDROME_REG, &val);
+
+	*syndrome = (uint16_t)val;
+	return (NPI_SUCCESS);
+}
+
+npi_status_t
+npi_ipp_get_dfifo_eopm_rdptr(npi_handle_t handle, uint8_t portn,
+							uint16_t *rdptr)
+{
+	uint64_t val;
+
+	ASSERT(IS_PORT_NUM_VALID(portn));
+
+	IPP_REG_RD(handle, portn, IPP_DFIFO_EOPM_RD_PTR_REG, &val);
+
+	*rdptr = (uint16_t)val;
+	return (NPI_SUCCESS);
+}
+
+npi_status_t
+npi_ipp_get_state_mach(npi_handle_t handle, uint8_t portn, uint32_t *sm)
+{
+	uint64_t val;
+
+	ASSERT(IS_PORT_NUM_VALID(portn));
+
+	IPP_REG_RD(handle, portn, IPP_STATE_MACHINE_REG, &val);
+
+	*sm = (uint32_t)val;
+	return (NPI_SUCCESS);
+}
+
+npi_status_t
+npi_ipp_get_ecc_err_count(npi_handle_t handle, uint8_t portn, uint8_t *err_cnt)
+{
+	ASSERT(IS_PORT_NUM_VALID(portn));
+
+	IPP_REG_RD(handle, portn, IPP_ECC_ERR_COUNTER_REG, err_cnt);
+
+	return (NPI_SUCCESS);
+}
+
+npi_status_t
+npi_ipp_get_pkt_dis_count(npi_handle_t handle, uint8_t portn, uint16_t *dis_cnt)
+{
+	ASSERT(IS_PORT_NUM_VALID(portn));
+
+	IPP_REG_RD(handle, portn, IPP_DISCARD_PKT_CNT_REG, dis_cnt);
+
+	return (NPI_SUCCESS);
+}
+
+npi_status_t
+npi_ipp_get_cs_err_count(npi_handle_t handle, uint8_t portn, uint16_t *err_cnt)
+{
+	ASSERT(IS_PORT_NUM_VALID(portn));
+
+	IPP_REG_RD(handle, portn, IPP_ECC_ERR_COUNTER_REG, err_cnt);
+
+	return (NPI_SUCCESS);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/uts/common/io/nxge/npi/npi_ipp.h	Mon Mar 19 19:37:22 2007 -0700
@@ -0,0 +1,188 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _NPI_IPP_H
+#define	_NPI_IPP_H
+
+#pragma ident	"%Z%%M%	%I%	%E% SMI"
+
+#ifdef	__cplusplus
+extern "C" {
+#endif
+
+#include <npi.h>
+#include <nxge_ipp_hw.h>
+
+/* IBTP IPP Configuration */
+
+typedef enum ipp_config_e {
+	CFG_IPP =			IPP_EN,
+	CFG_IPP_DFIFO_ECC_CORRECT =	IPP_DFIFO_ECC_CORRECT_EN,
+	CFG_IPP_DROP_BAD_CRC =		IPP_DROP_BAD_CRC_EN,
+	CFG_IPP_TCP_UDP_CKSUM =		IPP_TCP_UDP_CKSUM_EN,
+	CFG_IPP_DFIFO_PIO_WR =		IPP_DFIFO_PIO_WR_EN,
+	CFG_IPP_PRE_FIFO_PIO_WR =	IPP_PRE_FIFO_PIO_WR_EN,
+	CFG_IPP_FFLP_CKSUM_INFO_PIO_WR = IPP_FFLP_CKSUM_INFO_PIO_WR_EN,
+	CFG_IPP_ALL =			(IPP_EN | IPP_DFIFO_ECC_CORRECT_EN |
+			IPP_DROP_BAD_CRC_EN | IPP_TCP_UDP_CKSUM_EN |
+			IPP_DFIFO_PIO_WR_EN | IPP_PRE_FIFO_PIO_WR_EN)
+} ipp_config_t;
+
+typedef enum ipp_iconfig_e {
+	ICFG_IPP_PKT_DISCARD_OVFL =	IPP_PKT_DISCARD_CNT_INTR_DIS,
+	ICFG_IPP_BAD_TCPIP_CKSUM_OVFL =	IPP_BAD_TCPIP_CKSUM_CNT_INTR_DIS,
+	ICFG_IPP_PRE_FIFO_UNDERRUN =	IPP_PRE_FIFO_UNDERRUN_INTR_DIS,
+	ICFG_IPP_PRE_FIFO_OVERRUN =	IPP_PRE_FIFO_OVERRUN_INTR_DIS,
+	ICFG_IPP_PRE_FIFO_PERR =	IPP_PRE_FIFO_PERR_INTR_DIS,
+	ICFG_IPP_DFIFO_ECC_UNCORR_ERR =	IPP_DFIFO_ECC_UNCORR_ERR_INTR_DIS,
+	ICFG_IPP_DFIFO_MISSING_EOP_SOP = IPP_DFIFO_MISSING_EOP_SOP_INTR_DIS,
+	ICFG_IPP_ECC_ERR_OVFL =		IPP_ECC_ERR_CNT_MAX_INTR_DIS,
+	ICFG_IPP_ALL =			(IPP_PKT_DISCARD_CNT_INTR_DIS |
+			IPP_BAD_TCPIP_CKSUM_CNT_INTR_DIS |
+			IPP_PRE_FIFO_UNDERRUN_INTR_DIS |
+			IPP_PRE_FIFO_OVERRUN_INTR_DIS |
+			IPP_PRE_FIFO_PERR_INTR_DIS |
+			IPP_DFIFO_ECC_UNCORR_ERR_INTR_DIS |
+			IPP_DFIFO_MISSING_EOP_SOP_INTR_DIS |
+			IPP_ECC_ERR_CNT_MAX_INTR_DIS)
+} ipp_iconfig_t;
+
+typedef enum ipp_counter_e {
+	CNT_IPP_DISCARD_PKT		= 0x00000001,
+	CNT_IPP_TCP_CKSUM_ERR		= 0x00000002,
+	CNT_IPP_ECC_ERR			= 0x00000004,
+	CNT_IPP_ALL			= 0x00000007
+} ipp_counter_t;
+
+
+typedef enum ipp_port_cnt_idx_e {
+	HWCI_IPP_PKT_DISCARD = 0,
+	HWCI_IPP_TCP_CKSUM_ERR,
+	HWCI_IPP_ECC_ERR,
+	CI_IPP_MISSING_EOP_SOP,
+	CI_IPP_UNCORR_ERR,
+	CI_IPP_PERR,
+	CI_IPP_FIFO_OVERRUN,
+	CI_IPP_FIFO_UNDERRUN,
+	CI_IPP_PORT_CNT_ARR_SIZE
+} ipp_port_cnt_idx_t;
+
+/* IPP specific errors */
+
+#define	IPP_MAX_PKT_BYTES_INVALID	0x50
+#define	IPP_FIFO_ADDR_INVALID		0x51
+
+/* IPP error return macros */
+
+#define	NPI_IPP_PORT_INVALID(portn)\
+		((IPP_BLK_ID << NPI_BLOCK_ID_SHIFT) | PORT_INVALID |\
+				IS_PORT | (portn << NPI_PORT_CHAN_SHIFT))
+#define	NPI_IPP_OPCODE_INVALID(portn)\
+		((IPP_BLK_ID << NPI_BLOCK_ID_SHIFT) | OPCODE_INVALID |\
+				IS_PORT | (portn << NPI_PORT_CHAN_SHIFT))
+#define	NPI_IPP_CONFIG_INVALID(portn)\
+		((IPP_BLK_ID << NPI_BLOCK_ID_SHIFT) | CONFIG_INVALID |\
+				IS_PORT | (portn << NPI_PORT_CHAN_SHIFT))
+#define	NPI_IPP_MAX_PKT_BYTES_INVALID(portn)\
+		((IPP_BLK_ID << NPI_BLOCK_ID_SHIFT) |\
+		IPP_MAX_PKT_BYTES_INVALID |\
+				IS_PORT | (portn << NPI_PORT_CHAN_SHIFT))
+#define	NPI_IPP_COUNTER_INVALID(portn)\
+		((IPP_BLK_ID << NPI_BLOCK_ID_SHIFT) | COUNTER_INVALID |\
+				IS_PORT | (portn << NPI_PORT_CHAN_SHIFT))
+#define	NPI_IPP_RESET_FAILED(portn)\
+		((IPP_BLK_ID << NPI_BLOCK_ID_SHIFT) | RESET_FAILED |\
+				IS_PORT | (portn << NPI_PORT_CHAN_SHIFT))
+#define	NPI_IPP_FIFO_ADDR_INVALID(portn)\
+		((IPP_BLK_ID << NPI_BLOCK_ID_SHIFT) |\
+		IPP_FIFO_ADDR_INVALID |\
+				IS_PORT | (portn << NPI_PORT_CHAN_SHIFT))
+
+#define	IPP_REG_RD(handle, portn, reg, val) {\
+	NXGE_REG_RD64(handle, IPP_REG_ADDR(portn, reg), val);\
+}
+
+#define	IPP_REG_WR(handle, portn, reg, val) {\
+	NXGE_REG_WR64(handle, IPP_REG_ADDR(portn, reg), val);\
+}
+
+/* IPP NPI function prototypes */
+npi_status_t npi_ipp_get_pfifo_rd_ptr(npi_handle_t, uint8_t,
+			    uint16_t *);
+
+npi_status_t npi_ipp_get_pfifo_wr_ptr(npi_handle_t, uint8_t,
+			    uint16_t *);
+
+npi_status_t npi_ipp_write_pfifo(npi_handle_t, uint8_t,
+			uint8_t, uint32_t, uint32_t, uint32_t,
+			uint32_t, uint32_t);
+
+npi_status_t npi_ipp_read_pfifo(npi_handle_t, uint8_t,
+			uint8_t, uint32_t *, uint32_t *, uint32_t *,
+			uint32_t *, uint32_t *);
+
+npi_status_t npi_ipp_write_dfifo(npi_handle_t, uint8_t,
+			uint16_t, uint32_t, uint32_t, uint32_t,
+			uint32_t, uint32_t);
+
+npi_status_t npi_ipp_read_dfifo(npi_handle_t, uint8_t,
+			uint16_t, uint32_t *, uint32_t *, uint32_t *,
+			uint32_t *, uint32_t *);
+
+npi_status_t npi_ipp_reset(npi_handle_t, uint8_t);
+npi_status_t npi_ipp_config(npi_handle_t, config_op_t, uint8_t,
+			ipp_config_t);
+npi_status_t npi_ipp_set_max_pktsize(npi_handle_t, uint8_t,
+			uint32_t);
+npi_status_t npi_ipp_iconfig(npi_handle_t, config_op_t, uint8_t,
+			ipp_iconfig_t);
+npi_status_t npi_ipp_get_status(npi_handle_t, uint8_t,
+			ipp_status_t *);
+npi_status_t npi_ipp_counters(npi_handle_t, counter_op_t,
+			ipp_counter_t, uint8_t, npi_counter_t *);
+npi_status_t npi_ipp_get_ecc_syndrome(npi_handle_t, uint8_t,
+			uint16_t *);
+npi_status_t npi_ipp_get_dfifo_eopm_rdptr(npi_handle_t, uint8_t,
+			uint16_t *);
+npi_status_t npi_ipp_get_state_mach(npi_handle_t, uint8_t,
+			uint32_t *);
+npi_status_t npi_ipp_get_dfifo_rd_ptr(npi_handle_t, uint8_t,
+			uint16_t *);
+npi_status_t npi_ipp_get_dfifo_wr_ptr(npi_handle_t, uint8_t,
+			uint16_t *);
+npi_status_t npi_ipp_get_ecc_err_count(npi_handle_t, uint8_t,
+			uint8_t *);
+npi_status_t npi_ipp_get_pkt_dis_count(npi_handle_t, uint8_t,
+			uint16_t *);
+npi_status_t npi_ipp_get_cs_err_count(npi_handle_t, uint8_t,
+			uint16_t *);
+npi_status_t npi_ipp_dump_regs(npi_handle_t, uint8_t);
+void npi_ipp_read_regs(npi_handle_t, uint8_t);
+
+#ifdef	__cplusplus
+}
+#endif
+
+#endif	/* _NPI_IPP_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/uts/common/io/nxge/npi/npi_mac.c	Mon Mar 19 19:37:22 2007 -0700
@@ -0,0 +1,3515 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident	"%Z%%M%	%I%	%E% SMI"
+
+#include <npi_mac.h>
+
+#define	MIF_DELAY	500
+
+#define	MAX_FRAME_SZ1	0x5EE
+#define	MAX_FRAME_SZ2	0x5F6
+#define	MAX_FRAME_SZ3	0x7D6
+#define	MAX_FRAME_SZ4	0x232E
+#define	MAX_FRAME_SZ5	0x2406
+
+#define	XMAC_WAIT_REG(handle, portn, reg, val) {\
+	uint32_t cnt = MAX_PIO_RETRIES;\
+	do {\
+		NXGE_DELAY(MAC_RESET_WAIT);\
+		XMAC_REG_RD(handle, portn, reg, &val);\
+		cnt--;\
+	} while (((val & 0x3) != 0) && (cnt > 0));\
+}
+
+#define	BMAC_WAIT_REG(handle, portn, reg, val) {\
+	uint32_t cnt = MAX_PIO_RETRIES;\
+	do {\
+		NXGE_DELAY(MAC_RESET_WAIT);\
+		BMAC_REG_RD(handle, portn, reg, &val);\
+		cnt--;\
+	} while (((val & 0x3) != 0) && (cnt > 0));\
+}
+
+#define	MIF_WAIT_REG(handle, m_frame, t_delay, interval, max_delay) {	  \
+	do {								  \
+		NXGE_DELAY(interval);					  \
+		MIF_REG_RD(handle, MIF_OUTPUT_FRAME_REG, &m_frame.value); \
+		t_delay++;						  \
+	} while ((m_frame.bits.w0.ta_lsb == 0) && t_delay < max_delay);	  \
+}
+
+uint64_t xmac_offset[] = {
+	XTXMAC_SW_RST_REG,
+	XRXMAC_SW_RST_REG,
+	XTXMAC_STATUS_REG,
+	XRXMAC_STATUS_REG,
+	XMAC_CTRL_STAT_REG,
+	XTXMAC_STAT_MSK_REG,
+	XRXMAC_STAT_MSK_REG,
+	XMAC_C_S_MSK_REG,
+	XMAC_CONFIG_REG,
+	XMAC_IPG_REG,
+	XMAC_MIN_REG,
+	XMAC_MAX_REG,
+	XMAC_ADDR0_REG,
+	XMAC_ADDR1_REG,
+	XMAC_ADDR2_REG,
+	XRXMAC_BT_CNT_REG,
+	XRXMAC_BC_FRM_CNT_REG,
+	XRXMAC_MC_FRM_CNT_REG,
+	XRXMAC_FRAG_CNT_REG,
+	XRXMAC_HIST_CNT1_REG,
+	XRXMAC_HIST_CNT2_REG,
+	XRXMAC_HIST_CNT3_REG,
+	XRXMAC_HIST_CNT4_REG,
+	XRXMAC_HIST_CNT5_REG,
+	XRXMAC_HIST_CNT6_REG,
+	XRXMAC_MPSZER_CNT_REG,
+	XRXMAC_CRC_ER_CNT_REG,
+	XRXMAC_CD_VIO_CNT_REG,
+	XRXMAC_AL_ER_CNT_REG,
+	XTXMAC_FRM_CNT_REG,
+	XTXMAC_BYTE_CNT_REG,
+	XMAC_LINK_FLT_CNT_REG,
+	XRXMAC_HIST_CNT7_REG,
+	XMAC_SM_REG,
+	XMAC_INTERN1_REG,
+	XMAC_ADDR_CMPEN_REG,
+	XMAC_ADDR3_REG,
+	XMAC_ADDR4_REG,
+	XMAC_ADDR5_REG,
+	XMAC_ADDR6_REG,
+	XMAC_ADDR7_REG,
+	XMAC_ADDR8_REG,
+	XMAC_ADDR9_REG,
+	XMAC_ADDR10_REG,
+	XMAC_ADDR11_REG,
+	XMAC_ADDR12_REG,
+	XMAC_ADDR13_REG,
+	XMAC_ADDR14_REG,
+	XMAC_ADDR15_REG,
+	XMAC_ADDR16_REG,
+	XMAC_ADDR17_REG,
+	XMAC_ADDR18_REG,
+	XMAC_ADDR19_REG,
+	XMAC_ADDR20_REG,
+	XMAC_ADDR21_REG,
+	XMAC_ADDR22_REG,
+	XMAC_ADDR23_REG,
+	XMAC_ADDR24_REG,
+	XMAC_ADDR25_REG,
+	XMAC_ADDR26_REG,
+	XMAC_ADDR27_REG,
+	XMAC_ADDR28_REG,
+	XMAC_ADDR29_REG,
+	XMAC_ADDR30_REG,
+	XMAC_ADDR31_REG,
+	XMAC_ADDR32_REG,
+	XMAC_ADDR33_REG,
+	XMAC_ADDR34_REG,
+	XMAC_ADDR35_REG,
+	XMAC_ADDR36_REG,
+	XMAC_ADDR37_REG,
+	XMAC_ADDR38_REG,
+	XMAC_ADDR39_REG,
+	XMAC_ADDR40_REG,
+	XMAC_ADDR41_REG,
+	XMAC_ADDR42_REG,
+	XMAC_ADDR43_REG,
+	XMAC_ADDR44_REG,
+	XMAC_ADDR45_REG,
+	XMAC_ADDR46_REG,
+	XMAC_ADDR47_REG,
+	XMAC_ADDR48_REG,
+	XMAC_ADDR49_REG,
+	XMAC_ADDR50_REG,
+	XMAC_ADDR_FILT0_REG,
+	XMAC_ADDR_FILT1_REG,
+	XMAC_ADDR_FILT2_REG,
+	XMAC_ADDR_FILT12_MASK_REG,
+	XMAC_ADDR_FILT0_MASK_REG,
+	XMAC_HASH_TBL0_REG,
+	XMAC_HASH_TBL1_REG,
+	XMAC_HASH_TBL2_REG,
+	XMAC_HASH_TBL3_REG,
+	XMAC_HASH_TBL4_REG,
+	XMAC_HASH_TBL5_REG,
+	XMAC_HASH_TBL6_REG,
+	XMAC_HASH_TBL7_REG,
+	XMAC_HASH_TBL8_REG,
+	XMAC_HASH_TBL9_REG,
+	XMAC_HASH_TBL10_REG,
+	XMAC_HASH_TBL11_REG,
+	XMAC_HASH_TBL12_REG,
+	XMAC_HASH_TBL13_REG,
+	XMAC_HASH_TBL14_REG,
+	XMAC_HASH_TBL15_REG,
+	XMAC_HOST_INF0_REG,
+	XMAC_HOST_INF1_REG,
+	XMAC_HOST_INF2_REG,
+	XMAC_HOST_INF3_REG,
+	XMAC_HOST_INF4_REG,
+	XMAC_HOST_INF5_REG,
+	XMAC_HOST_INF6_REG,
+	XMAC_HOST_INF7_REG,
+	XMAC_HOST_INF8_REG,
+	XMAC_HOST_INF9_REG,
+	XMAC_HOST_INF10_REG,
+	XMAC_HOST_INF11_REG,
+	XMAC_HOST_INF12_REG,
+	XMAC_HOST_INF13_REG,
+	XMAC_HOST_INF14_REG,
+	XMAC_HOST_INF15_REG,
+	XMAC_HOST_INF16_REG,
+	XMAC_HOST_INF17_REG,
+	XMAC_HOST_INF18_REG,
+	XMAC_HOST_INF19_REG,
+	XMAC_PA_DATA0_REG,
+	XMAC_PA_DATA1_REG,
+	XMAC_DEBUG_SEL_REG,
+	XMAC_TRAINING_VECT_REG,
+};
+
+const char *xmac_name[] = {
+	"XTXMAC_SW_RST_REG",
+	"XRXMAC_SW_RST_REG",
+	"XTXMAC_STATUS_REG",
+	"XRXMAC_STATUS_REG",
+	"XMAC_CTRL_STAT_REG",
+	"XTXMAC_STAT_MSK_REG",
+	"XRXMAC_STAT_MSK_REG",
+	"XMAC_C_S_MSK_REG",
+	"XMAC_CONFIG_REG",
+	"XMAC_IPG_REG",
+	"XMAC_MIN_REG",
+	"XMAC_MAX_REG",
+	"XMAC_ADDR0_REG",
+	"XMAC_ADDR1_REG",
+	"XMAC_ADDR2_REG",
+	"XRXMAC_BT_CNT_REG",
+	"XRXMAC_BC_FRM_CNT_REG",
+	"XRXMAC_MC_FRM_CNT_REG",
+	"XRXMAC_FRAG_CNT_REG",
+	"XRXMAC_HIST_CNT1_REG",
+	"XRXMAC_HIST_CNT2_REG",
+	"XRXMAC_HIST_CNT3_REG",
+	"XRXMAC_HIST_CNT4_REG",
+	"XRXMAC_HIST_CNT5_REG",
+	"XRXMAC_HIST_CNT6_REG",
+	"XRXMAC_MPSZER_CNT_REG",
+	"XRXMAC_CRC_ER_CNT_REG",
+	"XRXMAC_CD_VIO_CNT_REG",
+	"XRXMAC_AL_ER_CNT_REG",
+	"XTXMAC_FRM_CNT_REG",
+	"XTXMAC_BYTE_CNT_REG",
+	"XMAC_LINK_FLT_CNT_REG",
+	"XRXMAC_HIST_CNT7_REG",
+	"XMAC_SM_REG",
+	"XMAC_INTERN1_REG",
+	"XMAC_ADDR_CMPEN_REG",
+	"XMAC_ADDR3_REG",
+	"XMAC_ADDR4_REG",
+	"XMAC_ADDR5_REG",
+	"XMAC_ADDR6_REG",
+	"XMAC_ADDR7_REG",
+	"XMAC_ADDR8_REG",
+	"XMAC_ADDR9_REG",
+	"XMAC_ADDR10_REG",
+	"XMAC_ADDR11_REG",
+	"XMAC_ADDR12_REG",
+	"XMAC_ADDR13_REG",
+	"XMAC_ADDR14_REG",
+	"XMAC_ADDR15_REG",
+	"XMAC_ADDR16_REG",
+	"XMAC_ADDR17_REG",
+	"XMAC_ADDR18_REG",
+	"XMAC_ADDR19_REG",
+	"XMAC_ADDR20_REG",
+	"XMAC_ADDR21_REG",
+	"XMAC_ADDR22_REG",
+	"XMAC_ADDR23_REG",
+	"XMAC_ADDR24_REG",
+	"XMAC_ADDR25_REG",
+	"XMAC_ADDR26_REG",
+	"XMAC_ADDR27_REG",
+	"XMAC_ADDR28_REG",
+	"XMAC_ADDR29_REG",
+	"XMAC_ADDR30_REG",
+	"XMAC_ADDR31_REG",
+	"XMAC_ADDR32_REG",
+	"XMAC_ADDR33_REG",
+	"XMAC_ADDR34_REG",
+	"XMAC_ADDR35_REG",
+	"XMAC_ADDR36_REG",
+	"XMAC_ADDR37_REG",
+	"XMAC_ADDR38_REG",
+	"XMAC_ADDR39_REG",
+	"XMAC_ADDR40_REG",
+	"XMAC_ADDR41_REG",
+	"XMAC_ADDR42_REG",
+	"XMAC_ADDR43_REG",
+	"XMAC_ADDR44_REG",
+	"XMAC_ADDR45_REG",
+	"XMAC_ADDR46_REG",
+	"XMAC_ADDR47_REG",
+	"XMAC_ADDR48_REG",
+	"XMAC_ADDR49_REG",
+	"XMAC_ADDR50_RE",
+	"XMAC_ADDR_FILT0_REG",
+	"XMAC_ADDR_FILT1_REG",
+	"XMAC_ADDR_FILT2_REG",
+	"XMAC_ADDR_FILT12_MASK_REG",
+	"XMAC_ADDR_FILT0_MASK_REG",
+	"XMAC_HASH_TBL0_REG",
+	"XMAC_HASH_TBL1_REG",
+	"XMAC_HASH_TBL2_REG",
+	"XMAC_HASH_TBL3_REG",
+	"XMAC_HASH_TBL4_REG",
+	"XMAC_HASH_TBL5_REG",
+	"XMAC_HASH_TBL6_REG",
+	"XMAC_HASH_TBL7_REG",
+	"XMAC_HASH_TBL8_REG",
+	"XMAC_HASH_TBL9_REG",
+	"XMAC_HASH_TBL10_REG",
+	"XMAC_HASH_TBL11_REG",
+	"XMAC_HASH_TBL12_REG",
+	"XMAC_HASH_TBL13_REG",
+	"XMAC_HASH_TBL14_REG",
+	"XMAC_HASH_TBL15_REG",
+	"XMAC_HOST_INF0_REG",
+	"XMAC_HOST_INF1_REG",
+	"XMAC_HOST_INF2_REG",
+	"XMAC_HOST_INF3_REG",
+	"XMAC_HOST_INF4_REG",
+	"XMAC_HOST_INF5_REG",
+	"XMAC_HOST_INF6_REG",
+	"XMAC_HOST_INF7_REG",
+	"XMAC_HOST_INF8_REG",
+	"XMAC_HOST_INF9_REG",
+	"XMAC_HOST_INF10_REG",
+	"XMAC_HOST_INF11_REG",
+	"XMAC_HOST_INF12_REG",
+	"XMAC_HOST_INF13_REG",
+	"XMAC_HOST_INF14_REG",
+	"XMAC_HOST_INF15_REG",
+	"XMAC_HOST_INF16_REG",
+	"XMAC_HOST_INF17_REG",
+	"XMAC_HOST_INF18_REG",
+	"XMAC_HOST_INF19_REG",
+	"XMAC_PA_DATA0_REG",
+	"XMAC_PA_DATA1_REG",
+	"XMAC_DEBUG_SEL_REG",
+	"XMAC_TRAINING_VECT_REG",
+};
+
+uint64_t bmac_offset[] = {
+	BTXMAC_SW_RST_REG,
+	BRXMAC_SW_RST_REG,
+	MAC_SEND_PAUSE_REG,
+	BTXMAC_STATUS_REG,
+	BRXMAC_STATUS_REG,
+	BMAC_CTRL_STAT_REG,
+	BTXMAC_STAT_MSK_REG,
+	BRXMAC_STAT_MSK_REG,
+	BMAC_C_S_MSK_REG,
+	TXMAC_CONFIG_REG,
+	RXMAC_CONFIG_REG,
+	MAC_CTRL_CONFIG_REG,
+	MAC_XIF_CONFIG_REG,
+	BMAC_MIN_REG,
+	BMAC_MAX_REG,
+	MAC_PA_SIZE_REG,
+	MAC_CTRL_TYPE_REG,
+	BMAC_ADDR0_REG,
+	BMAC_ADDR1_REG,
+	BMAC_ADDR2_REG,
+	BMAC_ADDR3_REG,
+	BMAC_ADDR4_REG,
+	BMAC_ADDR5_REG,
+	BMAC_ADDR6_REG,
+	BMAC_ADDR7_REG,
+	BMAC_ADDR8_REG,
+	BMAC_ADDR9_REG,
+	BMAC_ADDR10_REG,
+	BMAC_ADDR11_REG,
+	BMAC_ADDR12_REG,
+	BMAC_ADDR13_REG,
+	BMAC_ADDR14_REG,
+	BMAC_ADDR15_REG,
+	BMAC_ADDR16_REG,
+	BMAC_ADDR17_REG,
+	BMAC_ADDR18_REG,
+	BMAC_ADDR19_REG,
+	BMAC_ADDR20_REG,
+	BMAC_ADDR21_REG,
+	BMAC_ADDR22_REG,
+	BMAC_ADDR23_REG,
+	MAC_FC_ADDR0_REG,
+	MAC_FC_ADDR1_REG,
+	MAC_FC_ADDR2_REG,
+	MAC_ADDR_FILT0_REG,
+	MAC_ADDR_FILT1_REG,
+	MAC_ADDR_FILT2_REG,
+	MAC_ADDR_FILT12_MASK_REG,
+	MAC_ADDR_FILT00_MASK_REG,
+	MAC_HASH_TBL0_REG,
+	MAC_HASH_TBL1_REG,
+	MAC_HASH_TBL2_REG,
+	MAC_HASH_TBL3_REG,
+	MAC_HASH_TBL4_REG,
+	MAC_HASH_TBL5_REG,
+	MAC_HASH_TBL6_REG,
+	MAC_HASH_TBL7_REG,
+	MAC_HASH_TBL8_REG,
+	MAC_HASH_TBL9_REG,
+	MAC_HASH_TBL10_REG,
+	MAC_HASH_TBL11_REG,
+	MAC_HASH_TBL12_REG,
+	MAC_HASH_TBL13_REG,
+	MAC_HASH_TBL14_REG,
+	MAC_HASH_TBL15_REG,
+	RXMAC_FRM_CNT_REG,
+	MAC_LEN_ER_CNT_REG,
+	BMAC_AL_ER_CNT_REG,
+	BMAC_CRC_ER_CNT_REG,
+	BMAC_CD_VIO_CNT_REG,
+	BMAC_SM_REG,
+	BMAC_ALTAD_CMPEN_REG,
+	BMAC_HOST_INF0_REG,
+	BMAC_HOST_INF1_REG,
+	BMAC_HOST_INF2_REG,
+	BMAC_HOST_INF3_REG,
+	BMAC_HOST_INF4_REG,
+	BMAC_HOST_INF5_REG,
+	BMAC_HOST_INF6_REG,
+	BMAC_HOST_INF7_REG,
+	BMAC_HOST_INF8_REG,
+	BTXMAC_BYTE_CNT_REG,
+	BTXMAC_FRM_CNT_REG,
+	BRXMAC_BYTE_CNT_REG,
+};
+
+const char *bmac_name[] = {
+	"BTXMAC_SW_RST_REG",
+	"BRXMAC_SW_RST_REG",
+	"MAC_SEND_PAUSE_REG",
+	"BTXMAC_STATUS_REG",
+	"BRXMAC_STATUS_REG",
+	"BMAC_CTRL_STAT_REG",
+	"BTXMAC_STAT_MSK_REG",
+	"BRXMAC_STAT_MSK_REG",
+	"BMAC_C_S_MSK_REG",
+	"TXMAC_CONFIG_REG",
+	"RXMAC_CONFIG_REG",
+	"MAC_CTRL_CONFIG_REG",
+	"MAC_XIF_CONFIG_REG",
+	"BMAC_MIN_REG",
+	"BMAC_MAX_REG",
+	"MAC_PA_SIZE_REG",
+	"MAC_CTRL_TYPE_REG",
+	"BMAC_ADDR0_REG",
+	"BMAC_ADDR1_REG",
+	"BMAC_ADDR2_REG",
+	"BMAC_ADDR3_REG",
+	"BMAC_ADDR4_REG",
+	"BMAC_ADDR5_REG",
+	"BMAC_ADDR6_REG",
+	"BMAC_ADDR7_REG",
+	"BMAC_ADDR8_REG",
+	"BMAC_ADDR9_REG",
+	"BMAC_ADDR10_REG",
+	"BMAC_ADDR11_REG",
+	"BMAC_ADDR12_REG",
+	"BMAC_ADDR13_REG",
+	"BMAC_ADDR14_REG",
+	"BMAC_ADDR15_REG",
+	"BMAC_ADDR16_REG",
+	"BMAC_ADDR17_REG",
+	"BMAC_ADDR18_REG",
+	"BMAC_ADDR19_REG",
+	"BMAC_ADDR20_REG",
+	"BMAC_ADDR21_REG",
+	"BMAC_ADDR22_REG",
+	"BMAC_ADDR23_REG",
+	"MAC_FC_ADDR0_REG",
+	"MAC_FC_ADDR1_REG",
+	"MAC_FC_ADDR2_REG",
+	"MAC_ADDR_FILT0_REG",
+	"MAC_ADDR_FILT1_REG",
+	"MAC_ADDR_FILT2_REG",
+	"MAC_ADDR_FILT12_MASK_REG",
+	"MAC_ADDR_FILT00_MASK_REG",
+	"MAC_HASH_TBL0_REG",
+	"MAC_HASH_TBL1_REG",
+	"MAC_HASH_TBL2_REG",
+	"MAC_HASH_TBL3_REG",
+	"MAC_HASH_TBL4_REG",
+	"MAC_HASH_TBL5_REG",
+	"MAC_HASH_TBL6_REG",
+	"MAC_HASH_TBL7_REG",
+	"MAC_HASH_TBL8_REG",
+	"MAC_HASH_TBL9_REG",
+	"MAC_HASH_TBL10_REG",
+	"MAC_HASH_TBL11_REG",
+	"MAC_HASH_TBL12_REG",
+	"MAC_HASH_TBL13_REG",
+	"MAC_HASH_TBL14_REG",
+	"MAC_HASH_TBL15_REG",
+	"RXMAC_FRM_CNT_REG",
+	"MAC_LEN_ER_CNT_REG",
+	"BMAC_AL_ER_CNT_REG",
+	"BMAC_CRC_ER_CNT_REG",
+	"BMAC_CD_VIO_CNT_REG",
+	"BMAC_SM_REG",
+	"BMAC_ALTAD_CMPEN_REG",
+	"BMAC_HOST_INF0_REG",
+	"BMAC_HOST_INF1_REG",
+	"BMAC_HOST_INF2_REG",
+	"BMAC_HOST_INF3_REG",
+	"BMAC_HOST_INF4_REG",
+	"BMAC_HOST_INF5_REG",
+	"BMAC_HOST_INF6_REG",
+	"BMAC_HOST_INF7_REG",
+	"BMAC_HOST_INF8_REG",
+	"BTXMAC_BYTE_CNT_REG",
+	"BTXMAC_FRM_CNT_REG",
+	"BRXMAC_BYTE_CNT_REG",
+};
+
+npi_status_t
+npi_mac_dump_regs(npi_handle_t handle, uint8_t port)
+{
+
+	uint64_t value;
+	int num_regs, i;
+
+	ASSERT(IS_PORT_NUM_VALID(port));
+
+	switch (port) {
+	case 0:
+	case 1:
+		num_regs = sizeof (xmac_offset) / sizeof (uint64_t);
+		NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
+				    "\nXMAC Register Dump for port %d\n",
+				    port));
+		for (i = 0; i < num_regs; i++) {
+			XMAC_REG_RD(handle, port, xmac_offset[i], &value);
+			NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
+				"%08llx %s\t %08llx \n",
+				(XMAC_REG_ADDR((port), (xmac_offset[i]))),
+				xmac_name[i], value));
+		}
+
+		NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
+			    "\n XMAC Register Dump for port %d done\n",
+			    port));
+		break;
+
+	case 2:
+	case 3:
+		num_regs = sizeof (bmac_offset) / sizeof (uint64_t);
+		NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
+				    "\nBMAC Register Dump for port %d\n",
+				    port));
+		for (i = 0; i < num_regs; i++) {
+			BMAC_REG_RD(handle, port, bmac_offset[i], &value);
+			NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
+				"%08llx %s\t %08llx \n",
+				(BMAC_REG_ADDR((port), (bmac_offset[i]))),
+				bmac_name[i], value));
+		}
+
+		NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
+			    "\n BMAC Register Dump for port %d done\n",
+			    port));
+		break;
+	}
+
+	return (NPI_SUCCESS);
+}
+
+npi_status_t
+npi_mac_pcs_link_intr_enable(npi_handle_t handle, uint8_t portn)
+{
+	pcs_cfg_t pcs_cfg;
+
+	ASSERT(IS_PORT_NUM_VALID(portn));
+
+	PCS_REG_RD(handle, portn, PCS_CONFIG_REG, &pcs_cfg.value);
+	pcs_cfg.bits.w0.mask = 0;
+	PCS_REG_WR(handle, portn, PCS_CONFIG_REG, pcs_cfg.value);
+
+	return (NPI_SUCCESS);
+}
+
+npi_status_t
+npi_mac_pcs_link_intr_disable(npi_handle_t handle, uint8_t portn)
+{
+	pcs_cfg_t pcs_cfg;
+
+	ASSERT(IS_PORT_NUM_VALID(portn));
+
+	PCS_REG_RD(handle, portn, PCS_CONFIG_REG, &pcs_cfg.val.lsw);
+	pcs_cfg.bits.w0.mask = 1;
+	PCS_REG_WR(handle, portn, PCS_CONFIG_REG, pcs_cfg.val.lsw);
+
+	return (NPI_SUCCESS);
+}
+
+npi_status_t
+npi_xmac_xpcs_link_intr_enable(npi_handle_t handle, uint8_t portn)
+{
+	xpcs_stat1_t xpcs_mask1;
+
+	ASSERT(IS_XMAC_PORT_NUM_VALID(portn));
+
+	XPCS_REG_RD(handle, portn, XPCS_MASK_1_REG, &xpcs_mask1.val.lsw);
+	xpcs_mask1.bits.w0.csr_rx_link_stat = 1;
+	XPCS_REG_WR(handle, portn, XPCS_MASK_1_REG, xpcs_mask1.val.lsw);
+
+	return (NPI_SUCCESS);
+}
+
+npi_status_t
+npi_xmac_xpcs_link_intr_disable(npi_handle_t handle, uint8_t portn)
+{
+	xpcs_stat1_t xpcs_mask1;
+
+	ASSERT(IS_XMAC_PORT_NUM_VALID(portn));
+
+	XPCS_REG_RD(handle, portn, XPCS_MASK_1_REG, &xpcs_mask1.val.lsw);
+	xpcs_mask1.bits.w0.csr_rx_link_stat = 0;
+	XPCS_REG_WR(handle, portn, XPCS_MASK_1_REG, xpcs_mask1.val.lsw);
+
+	return (NPI_SUCCESS);
+}
+
+npi_status_t
+npi_mac_mif_link_intr_disable(npi_handle_t handle, uint8_t portn)
+{
+	mif_cfg_t mif_cfg;
+
+	ASSERT(IS_PORT_NUM_VALID(portn));
+
+	MIF_REG_RD(handle, MIF_CONFIG_REG, &mif_cfg.val.lsw);
+
+	mif_cfg.bits.w0.phy_addr = portn;
+	mif_cfg.bits.w0.poll_en = 0;
+
+	MIF_REG_WR(handle, MIF_CONFIG_REG, mif_cfg.val.lsw);
+
+	NXGE_DELAY(20);
+
+	return (NPI_SUCCESS);
+}
+
+npi_status_t
+npi_mac_hashtab_entry(npi_handle_t handle, io_op_t op, uint8_t portn,
+			uint8_t entryn, uint16_t *data)
+{
+	uint64_t val;
+
+	ASSERT((op == OP_GET) || (op == OP_SET));
+	ASSERT(IS_PORT_NUM_VALID(portn));
+
+	ASSERT(entryn < MAC_MAX_HASH_ENTRY);
+	if (entryn >= MAC_MAX_HASH_ENTRY) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				    " npi_mac_hashtab_entry"
+				    " Invalid Input: entryn <0x%x>",
+				    entryn));
+		return (NPI_FAILURE | NPI_MAC_HASHTAB_ENTRY_INVALID(portn));
+	}
+
+	if (op == OP_SET) {
+		val = *data;
+		if ((portn == XMAC_PORT_0) || (portn == XMAC_PORT_1)) {
+			XMAC_REG_WR(handle, portn,
+					XMAC_HASH_TBLN_REG_ADDR(entryn), val);
+		} else {
+			BMAC_REG_WR(handle, portn,
+					BMAC_HASH_TBLN_REG_ADDR(entryn), val);
+		}
+	} else {
+		if ((portn == XMAC_PORT_0) || (portn == XMAC_PORT_1)) {
+			XMAC_REG_RD(handle, portn,
+					XMAC_HASH_TBLN_REG_ADDR(entryn), &val);
+		} else {
+			BMAC_REG_RD(handle, portn,
+					BMAC_HASH_TBLN_REG_ADDR(entryn), &val);
+		}
+		*data = val & 0xFFFF;
+	}
+
+	return (NPI_SUCCESS);
+}
+
+npi_status_t
+npi_mac_hostinfo_entry(npi_handle_t handle, io_op_t op, uint8_t portn,
+				uint8_t entryn, hostinfo_t *hostinfo)
+{
+	ASSERT((op == OP_GET) || (op == OP_SET));
+	ASSERT(IS_PORT_NUM_VALID(portn));
+
+	if ((portn == XMAC_PORT_0) || (portn == XMAC_PORT_1)) {
+		ASSERT(entryn < XMAC_MAX_HOST_INFO_ENTRY);
+		if (entryn >= XMAC_MAX_HOST_INFO_ENTRY) {
+			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+					    " npi_mac_hostinfo_entry"
+					    " Invalid Input: entryn <0x%x>",
+					    entryn));
+			return (NPI_FAILURE |
+				NPI_MAC_HOSTINFO_ENTRY_INVALID(portn));
+		}
+	} else {
+		ASSERT(entryn < BMAC_MAX_HOST_INFO_ENTRY);
+		if (entryn >= BMAC_MAX_HOST_INFO_ENTRY) {
+			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+					    " npi_mac_hostinfo_entry"
+					    " Invalid Input: entryn <0x%x>",
+					    entryn));
+			return (NPI_FAILURE |
+				NPI_MAC_HOSTINFO_ENTRY_INVALID(portn));
+		}
+	}
+
+	if (op == OP_SET) {
+		if ((portn == XMAC_PORT_0) || (portn == XMAC_PORT_1)) {
+			XMAC_REG_WR(handle, portn,
+					XMAC_HOST_INFN_REG_ADDR(entryn),
+					hostinfo->value);
+		} else {
+			BMAC_REG_WR(handle, portn,
+					BMAC_HOST_INFN_REG_ADDR(entryn),
+					hostinfo->value);
+		}
+	} else {
+		if ((portn == XMAC_PORT_0) || (portn == XMAC_PORT_1)) {
+			XMAC_REG_RD(handle, portn,
+					XMAC_HOST_INFN_REG_ADDR(entryn),
+					&hostinfo->value);
+		} else {
+			BMAC_REG_RD(handle, portn,
+					BMAC_HOST_INFN_REG_ADDR(entryn),
+					&hostinfo->value);
+		}
+	}
+
+	return (NPI_SUCCESS);
+}
+
+npi_status_t
+npi_mac_altaddr_enable(npi_handle_t handle, uint8_t portn, uint8_t addrn)
+{
+	uint64_t val;
+
+	ASSERT(IS_PORT_NUM_VALID(portn));
+
+	if ((portn == XMAC_PORT_0) || (portn == XMAC_PORT_1)) {
+		ASSERT(addrn <= XMAC_MAX_ALT_ADDR_ENTRY);
+		if (addrn > XMAC_MAX_ALT_ADDR_ENTRY) {
+			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+					    " npi_mac_altaddr_enable"
+					    " Invalid Input: addrn <0x%x>",
+					    addrn));
+			return (NPI_FAILURE |
+				NPI_MAC_ALT_ADDR_ENTRY_INVALID(portn));
+		}
+		XMAC_REG_RD(handle, portn, XMAC_ADDR_CMPEN_REG, &val);
+		val |= (1 << addrn);
+		XMAC_REG_WR(handle, portn, XMAC_ADDR_CMPEN_REG, val);
+	} else {
+		ASSERT(addrn <= BMAC_MAX_ALT_ADDR_ENTRY);
+		if (addrn > BMAC_MAX_ALT_ADDR_ENTRY) {
+			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+					    " npi_mac_altaddr_enable"
+					    " Invalid Input: addrn <0x%x>",
+					    addrn));
+			return (NPI_FAILURE |
+				NPI_MAC_ALT_ADDR_ENTRY_INVALID(portn));
+		}
+		BMAC_REG_RD(handle, portn, BMAC_ALTAD_CMPEN_REG, &val);
+		val |= (1 << addrn);
+		BMAC_REG_WR(handle, portn, BMAC_ALTAD_CMPEN_REG, val);
+	}
+
+	return (NPI_SUCCESS);
+}
+
+/*
+ * While all bits of XMAC_ADDR_CMPEN_REG are for alternate MAC addresses,
+ * bit0 of BMAC_ALTAD_CMPEN_REG is for unique MAC address.
+ */
+npi_status_t
+npi_mac_altaddr_disable(npi_handle_t handle, uint8_t portn, uint8_t addrn)
+{
+	uint64_t val;
+
+	ASSERT(IS_PORT_NUM_VALID(portn));
+
+	if ((portn == XMAC_PORT_0) || (portn == XMAC_PORT_1)) {
+		ASSERT(addrn <= XMAC_MAX_ALT_ADDR_ENTRY);
+		if (addrn > XMAC_MAX_ALT_ADDR_ENTRY) {
+			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+					" npi_mac_altaddr_disable"
+					" Invalid Input: addrn <0x%x>",
+					addrn));
+			return (NPI_FAILURE |
+				NPI_MAC_ALT_ADDR_ENTRY_INVALID(portn));
+		}
+		XMAC_REG_RD(handle, portn, XMAC_ADDR_CMPEN_REG, &val);
+		val &= ~(1 << addrn);
+		XMAC_REG_WR(handle, portn, XMAC_ADDR_CMPEN_REG, val);
+	} else {
+		ASSERT(addrn <= BMAC_MAX_ALT_ADDR_ENTRY);
+		if (addrn > BMAC_MAX_ALT_ADDR_ENTRY) {
+			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+					" npi_mac_altaddr_disable"
+					" Invalid Input: addrn <0x%x>",
+				    addrn));
+			return (NPI_FAILURE |
+				NPI_MAC_ALT_ADDR_ENTRY_INVALID(portn));
+		}
+		BMAC_REG_RD(handle, portn, BMAC_ALTAD_CMPEN_REG, &val);
+		val &= ~(1 << addrn);
+		BMAC_REG_WR(handle, portn, BMAC_ALTAD_CMPEN_REG, val);
+	}
+
+	return (NPI_SUCCESS);
+}
+
+npi_status_t
+npi_mac_altaddr_entry(npi_handle_t handle, io_op_t op, uint8_t portn,
+			uint8_t entryn, npi_mac_addr_t *data)
+{
+	uint64_t val0, val1, val2;
+
+	ASSERT(IS_PORT_NUM_VALID(portn));
+	ASSERT((op == OP_GET) || (op == OP_SET));
+
+	if ((portn == XMAC_PORT_0) || (portn == XMAC_PORT_1)) {
+		ASSERT(entryn <= XMAC_MAX_ALT_ADDR_ENTRY);
+		if (entryn > XMAC_MAX_ALT_ADDR_ENTRY) {
+			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+					    " npi_mac_altaddr_entry"
+					    " Invalid Input: entryn <0x%x>",
+					    entryn));
+			return (NPI_FAILURE |
+				NPI_MAC_ALT_ADDR_ENTRY_INVALID(portn));
+		}
+		if (op == OP_SET) {
+			val0 = data->w0;
+			val1 = data->w1;
+			val2 = data->w2;
+			XMAC_REG_WR(handle, portn,
+				XMAC_ALT_ADDR0N_REG_ADDR(entryn), val0);
+			XMAC_REG_WR(handle, portn,
+				XMAC_ALT_ADDR1N_REG_ADDR(entryn), val1);
+			XMAC_REG_WR(handle, portn,
+				XMAC_ALT_ADDR2N_REG_ADDR(entryn), val2);
+		} else {
+			XMAC_REG_RD(handle, portn,
+				XMAC_ALT_ADDR0N_REG_ADDR(entryn), &val0);
+			XMAC_REG_RD(handle, portn,
+				XMAC_ALT_ADDR1N_REG_ADDR(entryn), &val1);
+			XMAC_REG_RD(handle, portn,
+				XMAC_ALT_ADDR2N_REG_ADDR(entryn), &val2);
+			data->w0 = val0 & 0xFFFF;
+			data->w1 = val1 & 0xFFFF;
+			data->w2 = val2 & 0xFFFF;
+		}
+	} else {
+		ASSERT(entryn <= BMAC_MAX_ALT_ADDR_ENTRY);
+		if (entryn > BMAC_MAX_ALT_ADDR_ENTRY) {
+			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+					    " npi_mac_altaddr_entry"
+					    " Invalid Input: entryn <0x%x>",
+					    entryn));
+			return (NPI_FAILURE |
+				NPI_MAC_ALT_ADDR_ENTRY_INVALID(portn));
+		}
+		if (op == OP_SET) {
+			val0 = data->w0;
+			val1 = data->w1;
+			val2 = data->w2;
+			BMAC_REG_WR(handle, portn,
+				BMAC_ALT_ADDR0N_REG_ADDR(entryn), val0);
+			BMAC_REG_WR(handle, portn,
+				BMAC_ALT_ADDR1N_REG_ADDR(entryn), val1);
+			BMAC_REG_WR(handle, portn,
+				BMAC_ALT_ADDR2N_REG_ADDR(entryn), val2);
+		} else {
+			BMAC_REG_RD(handle, portn,
+				BMAC_ALT_ADDR0N_REG_ADDR(entryn), &val0);
+			BMAC_REG_RD(handle, portn,
+				BMAC_ALT_ADDR1N_REG_ADDR(entryn), &val1);
+			BMAC_REG_RD(handle, portn,
+				BMAC_ALT_ADDR2N_REG_ADDR(entryn), &val2);
+			data->w0 = val0 & 0xFFFF;
+			data->w1 = val1 & 0xFFFF;
+			data->w2 = val2 & 0xFFFF;
+		}
+	}
+
+	return (NPI_SUCCESS);
+}
+
+npi_status_t
+npi_mac_port_attr(npi_handle_t handle, io_op_t op, uint8_t portn,
+			npi_attr_t *attrp)
+{
+	uint64_t val = 0;
+	uint32_t attr;
+
+	ASSERT(IS_PORT_NUM_VALID(portn));
+	ASSERT((op == OP_GET) || (op == OP_SET));
+
+	switch (attrp->type) {
+	case MAC_PORT_MODE:
+		switch (portn) {
+		case XMAC_PORT_0:
+		case XMAC_PORT_1:
+			if (op == OP_SET) {
+				attr = attrp->idata[0];
+				ASSERT((attr == MAC_MII_MODE) ||	\
+					(attr == MAC_GMII_MODE) ||	\
+					(attr == MAC_XGMII_MODE));
+				if ((attr != MAC_MII_MODE) &&
+					(attr != MAC_GMII_MODE) &&
+					(attr != MAC_XGMII_MODE)) {
+					NPI_ERROR_MSG((handle.function,
+						    NPI_ERR_CTL,
+						    " npi_mac_port_attr"
+						    " Invalid Input:"
+						    " MAC_PORT_MODE <0x%x>",
+						    attr));
+					return (NPI_FAILURE |
+					NPI_MAC_PORT_ATTR_INVALID(portn));
+				}
+				XMAC_REG_RD(handle, portn, XMAC_CONFIG_REG,
+						&val);
+				val &= ~XMAC_XIF_MII_MODE_MASK;
+				switch (attr) {
+				case MAC_MII_MODE:
+					val |= (XMAC_XIF_MII_MODE <<
+						XMAC_XIF_MII_MODE_SHIFT);
+					break;
+				case MAC_GMII_MODE:
+					val |= (XMAC_XIF_GMII_MODE <<
+						XMAC_XIF_MII_MODE_SHIFT);
+					break;
+				case MAC_XGMII_MODE:
+					val |= (XMAC_XIF_XGMII_MODE <<
+						XMAC_XIF_MII_MODE_SHIFT);
+					break;
+				default:
+					return (NPI_FAILURE |
+					NPI_MAC_PORT_ATTR_INVALID(portn));
+				}
+				XMAC_REG_WR(handle, portn, XMAC_CONFIG_REG,
+						val);
+			} else {
+				XMAC_REG_RD(handle, portn, XMAC_CONFIG_REG,
+						&val);
+				val &= XMAC_XIF_MII_MODE_MASK;
+				attr = val >> XMAC_XIF_MII_MODE_SHIFT;
+				attrp->odata[0] = attr;
+			}
+			break;
+		case BMAC_PORT_0:
+		case BMAC_PORT_1:
+			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+					    " npi_mac_port_attr"
+					    " Invalid Input:"
+					    " MAC_PORT_MODE <0x%x>",
+					    attrp->type));
+			return (NPI_FAILURE |
+				NPI_MAC_PORT_ATTR_INVALID(portn));
+		default:
+			return (NPI_FAILURE | NPI_MAC_PORT_INVALID(portn));
+		}
+		break;
+
+	case MAC_PORT_FRAME_SIZE: {
+		uint32_t min_fsize;
+		uint32_t max_fsize;
+
+		switch (portn) {
+		case XMAC_PORT_0:
+		case XMAC_PORT_1:
+			if (op == OP_SET) {
+				min_fsize = attrp->idata[0];
+				max_fsize = attrp->idata[1];
+				ASSERT((min_fsize &	\
+					~XMAC_MIN_TX_FRM_SZ_MASK) == 0);
+				if ((min_fsize & ~XMAC_MIN_TX_FRM_SZ_MASK)
+						!= 0) {
+					NPI_ERROR_MSG((handle.function,
+						    NPI_ERR_CTL,
+						    " npi_mac_port_attr"
+						    " MAC_PORT_FRAME_SIZE:"
+						    " Invalid Input:"
+						    " xmac_min_fsize <0x%x>",
+						    min_fsize));
+					return (NPI_FAILURE |
+					NPI_MAC_PORT_ATTR_INVALID(portn));
+				}
+				ASSERT((max_fsize &	\
+					~XMAC_MAX_FRM_SZ_MASK) == 0);
+				if ((max_fsize & ~XMAC_MAX_FRM_SZ_MASK)
+						!= 0) {
+					NPI_ERROR_MSG((handle.function,
+						    NPI_ERR_CTL,
+						    " npi_mac_port_attr"
+						    " MAC_PORT_FRAME_SIZE:"
+						    " Invalid Input:"
+						    " xmac_max_fsize <0x%x>",
+						    max_fsize));
+					return (NPI_FAILURE |
+					NPI_MAC_PORT_ATTR_INVALID(portn));
+				}
+				XMAC_REG_RD(handle, portn, XMAC_MIN_REG, &val);
+				val &= ~(XMAC_MIN_TX_FRM_SZ_MASK |
+					XMAC_MIN_RX_FRM_SZ_MASK);
+				val |= (min_fsize << XMAC_MIN_TX_FRM_SZ_SHIFT);
+				val |= (min_fsize << XMAC_MIN_RX_FRM_SZ_SHIFT);
+				XMAC_REG_WR(handle, portn, XMAC_MIN_REG, val);
+				XMAC_REG_WR(handle, portn, XMAC_MAX_REG,
+						max_fsize);
+			} else {
+				XMAC_REG_RD(handle, portn, XMAC_MIN_REG, &val);
+				min_fsize = (val & XMAC_MIN_TX_FRM_SZ_MASK)
+						>> XMAC_MIN_TX_FRM_SZ_SHIFT;
+				XMAC_REG_RD(handle, portn, XMAC_MAX_REG, &val);
+				attrp->odata[0] = min_fsize;
+				attrp->odata[1] = max_fsize;
+			}
+			break;
+		case BMAC_PORT_0:
+		case BMAC_PORT_1:
+			if (op == OP_SET) {
+				min_fsize = attrp->idata[0];
+				max_fsize = attrp->idata[1];
+				ASSERT((min_fsize & ~BMAC_MIN_FRAME_MASK) == 0);
+				if ((min_fsize & ~BMAC_MIN_FRAME_MASK)
+						!= 0) {
+					NPI_ERROR_MSG((handle.function,
+						    NPI_ERR_CTL,
+						    " npi_mac_port_attr"
+						    " MAC_FRAME_SIZE:"
+						    " Invalid Input:"
+						    " bmac_min_fsize <0x%x>",
+						    min_fsize));
+					return (NPI_FAILURE |
+					NPI_MAC_PORT_ATTR_INVALID(portn));
+				}
+				ASSERT((max_fsize & ~BMAC_MAX_FRAME_MASK) == 0);
+				if ((max_fsize & ~BMAC_MAX_FRAME_MASK)
+						!= 0) {
+					NPI_ERROR_MSG((handle.function,
+						    NPI_ERR_CTL,
+						    " npi_mac_port_attr"
+						    " MAC_FRAME_SIZE:"
+						    " Invalid Input:"
+						    " bmac_max_fsize <0x%x>",
+						    max_fsize));
+					return (NPI_FAILURE |
+					NPI_MAC_PORT_ATTR_INVALID(portn));
+				}
+				BMAC_REG_RD(handle, portn, BMAC_MAX_REG, &val);
+				val &= ~BMAC_MAX_FRAME_MASK;
+				if (max_fsize <= MAX_FRAME_SZ1)
+					val |= MAX_FRAME_SZ1;
+				else if ((max_fsize > MAX_FRAME_SZ1) &&
+					(max_fsize <= MAX_FRAME_SZ2))
+					val |= MAX_FRAME_SZ2;
+				else if ((max_fsize > MAX_FRAME_SZ2) &&
+					(max_fsize <= MAX_FRAME_SZ3))
+					val |= MAX_FRAME_SZ3;
+				else if ((max_fsize > MAX_FRAME_SZ3) &&
+					(max_fsize <= MAX_FRAME_SZ4))
+					val |= MAX_FRAME_SZ4;
+				else if ((max_fsize > MAX_FRAME_SZ4) &&
+					(max_fsize <= MAX_FRAME_SZ5))
+					val |= MAX_FRAME_SZ5;
+				BMAC_REG_WR(handle, portn, BMAC_MAX_REG, val);
+				BMAC_REG_WR(handle, portn, BMAC_MIN_REG,
+						min_fsize);
+			} else {
+				BMAC_REG_RD(handle, portn, BMAC_MIN_REG, &val);
+				min_fsize = val & BMAC_MIN_FRAME_MASK;
+				BMAC_REG_RD(handle, portn, BMAC_MAX_REG, &val);
+				max_fsize = val & BMAC_MAX_FRAME_MASK;
+				attrp->odata[0] = min_fsize;
+				attrp->odata[1] = max_fsize;
+			}
+			break;
+		default:
+			return (NPI_FAILURE | NPI_MAC_PORT_INVALID(portn));
+		}
+	}	break;
+
+	case BMAC_PORT_MAX_BURST_SIZE: {
+		uint32_t burst_size;
+		switch (portn) {
+		case XMAC_PORT_0:
+		case XMAC_PORT_1:
+			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+					    " npi_mac_port_attr"
+					    " BMAC_PORT_MAX_BURST_SIZE:"
+					    " Invalid Input: portn <%d>",
+					    portn));
+			return (NPI_FAILURE | NPI_MAC_PORT_ATTR_INVALID(portn));
+		case BMAC_PORT_0:
+		case BMAC_PORT_1:
+			/* NOTE: Not used in Full duplex mode */
+			if (op == OP_SET) {
+				burst_size = attrp->idata[0];
+				ASSERT((burst_size & ~0x7FFF) == 0);
+				if ((burst_size & ~0x7FFF) != 0) {
+					NPI_ERROR_MSG((handle.function,
+						    NPI_ERR_CTL,
+						    " npi_mac_port_attr"
+						    " BMAC_MAX_BURST_SIZE:"
+						    " Invalid Input:"
+						    " burst_size <0x%x>",
+						    burst_size));
+					return (NPI_FAILURE |
+					NPI_MAC_PORT_ATTR_INVALID(portn));
+				}
+				BMAC_REG_RD(handle, portn, BMAC_MAX_REG, &val);
+				val &= ~BMAC_MAX_BURST_MASK;
+				val |= (burst_size << BMAC_MAX_BURST_SHIFT);
+				BMAC_REG_WR(handle, portn, BMAC_MAX_REG, val);
+			} else {
+				BMAC_REG_RD(handle, portn, BMAC_MAX_REG, &val);
+				burst_size = (val & BMAC_MAX_BURST_MASK)
+						>> BMAC_MAX_BURST_SHIFT;
+				attrp->odata[0] = burst_size;
+			}
+			break;
+		default:
+			return (NPI_FAILURE | NPI_MAC_PORT_INVALID(portn));
+		}
+	}	break;
+
+	case BMAC_PORT_PA_SIZE: {
+		uint32_t pa_size;
+		switch (portn) {
+		case XMAC_PORT_0:
+		case XMAC_PORT_1:
+			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+					    " npi_mac_port_attr"
+					    " BMAC_PORT_PA_SIZE:"
+					    " Invalid Input: portn <%d>",
+					    portn));
+			return (NPI_FAILURE | NPI_MAC_PORT_ATTR_INVALID(portn));
+		case BMAC_PORT_0:
+		case BMAC_PORT_1:
+			if (op == OP_SET) {
+				pa_size = attrp->idata[0];
+				ASSERT((pa_size & ~0x3FF) == 0);
+				if ((pa_size & ~0x3FF) != 0) {
+					NPI_ERROR_MSG((handle.function,
+					    NPI_ERR_CTL,
+					    " npi_mac_port_attr"
+					    " BMAC_PORT_PA_SIZE:"
+					    " Invalid Input: pa_size <0x%x>",
+					    pa_size));
+
+					return (NPI_FAILURE |
+					NPI_MAC_PORT_ATTR_INVALID(portn));
+				}
+				BMAC_REG_RD(handle, portn, MAC_PA_SIZE_REG,
+					    &val);
+				val &= ~BMAC_PA_SIZE_MASK;
+				val |= (pa_size << 0);
+				BMAC_REG_WR(handle, portn, MAC_PA_SIZE_REG,
+					    val);
+			} else {
+				BMAC_REG_RD(handle, portn, MAC_PA_SIZE_REG,
+					    &val);
+				pa_size = (val & BMAC_PA_SIZE_MASK) >> 0;
+				attrp->odata[0] = pa_size;
+			}
+			break;
+		default:
+			return (NPI_FAILURE | NPI_MAC_PORT_INVALID(portn));
+		}
+	}	break;
+
+	case BMAC_PORT_CTRL_TYPE: {
+		uint32_t ctrl_type;
+		switch (portn) {
+		case XMAC_PORT_0:
+		case XMAC_PORT_1:
+			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+					    " npi_mac_port_attr"
+					    " BMAC_PORT_CTRL_TYPE:"
+					    " Invalid Input: portn <%d>",
+					    portn));
+			return (NPI_FAILURE | NPI_MAC_PORT_ATTR_INVALID(portn));
+		case BMAC_PORT_0:
+		case BMAC_PORT_1:
+			if (op == OP_SET) {
+				ctrl_type = attrp->idata[0];
+				ASSERT((ctrl_type & ~0xFFFF) == 0);
+				if ((ctrl_type & ~0xFFFF) != 0) {
+					NPI_ERROR_MSG((handle.function,
+						    NPI_ERR_CTL,
+						    " npi_mac_port_attr"
+						    " BMAC_PORT_CTRL_TYPE:"
+						    " Invalid Input:"
+						    " ctrl_type <0x%x>",
+						    ctrl_type));
+					return (NPI_FAILURE |
+					NPI_MAC_PORT_ATTR_INVALID(portn));
+				}
+				BMAC_REG_WR(handle, portn, MAC_CTRL_TYPE_REG,
+						val);
+			} else {
+				BMAC_REG_RD(handle, portn, MAC_CTRL_TYPE_REG,
+						&val);
+				ctrl_type = (val & 0xFFFF);
+				attrp->odata[0] = ctrl_type;
+			}
+			break;
+		default:
+			return (NPI_FAILURE | NPI_MAC_PORT_INVALID(portn));
+		}
+	}	break;
+
+	case XMAC_10G_PORT_IPG:
+		{
+		uint32_t	ipg0;
+
+		switch (portn) {
+		case XMAC_PORT_0:
+		case XMAC_PORT_1:
+			if (op == OP_SET) {
+				ipg0 = attrp->idata[0];
+				ASSERT((ipg0 == XGMII_IPG_12_15) ||	\
+					(ipg0 == XGMII_IPG_16_19) ||	\
+					(ipg0 == XGMII_IPG_20_23));
+				if ((ipg0 != XGMII_IPG_12_15) &&
+					(ipg0 != XGMII_IPG_16_19) &&
+					(ipg0 != XGMII_IPG_20_23)) {
+					NPI_ERROR_MSG((handle.function,
+						    NPI_ERR_CTL,
+						    " npi_mac_port_attr"
+						    " MAC_10G_PORT_IPG:"
+						    " Invalid Input:"
+						    " xgmii_ipg <0x%x>",
+						    ipg0));
+					return (NPI_FAILURE |
+					NPI_MAC_PORT_ATTR_INVALID(portn));
+				}
+
+				XMAC_REG_RD(handle, portn, XMAC_IPG_REG, &val);
+				val &= ~(XMAC_IPG_VALUE_MASK |
+					XMAC_IPG_VALUE1_MASK);
+
+				switch (ipg0) {
+				case XGMII_IPG_12_15:
+					val |= (IPG_12_15_BYTE <<
+						XMAC_IPG_VALUE_SHIFT);
+					break;
+				case XGMII_IPG_16_19:
+					val |= (IPG_16_19_BYTE <<
+						XMAC_IPG_VALUE_SHIFT);
+					break;
+				case XGMII_IPG_20_23:
+					val |= (IPG_20_23_BYTE <<
+						XMAC_IPG_VALUE_SHIFT);
+					break;
+				default:
+					return (NPI_FAILURE |
+					NPI_MAC_PORT_ATTR_INVALID(portn));
+				}
+				XMAC_REG_WR(handle, portn, XMAC_IPG_REG, val);
+			} else {
+				XMAC_REG_RD(handle, portn, XMAC_IPG_REG, &val);
+				ipg0 = (val & XMAC_IPG_VALUE_MASK) >>
+					XMAC_IPG_VALUE_SHIFT;
+				switch (ipg0) {
+				case IPG_12_15_BYTE:
+					attrp->odata[0] = XGMII_IPG_12_15;
+					break;
+				case IPG_16_19_BYTE:
+					attrp->odata[0] = XGMII_IPG_16_19;
+					break;
+				case IPG_20_23_BYTE:
+					attrp->odata[0] = XGMII_IPG_20_23;
+					break;
+				default:
+					return (NPI_FAILURE |
+					NPI_MAC_PORT_ATTR_INVALID(portn));
+				}
+			}
+			break;
+		case BMAC_PORT_0:
+		case BMAC_PORT_1:
+			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+					" npi_mac_port_attr" "MAC_PORT_IPG:"
+					"  Invalid Input: portn <%d>",
+					portn));
+		default:
+			return (NPI_FAILURE | NPI_MAC_PORT_INVALID(portn));
+		}
+		break;
+	}
+
+	case XMAC_PORT_IPG:
+		{
+		uint32_t	ipg1;
+		switch (portn) {
+		case XMAC_PORT_0:
+		case XMAC_PORT_1:
+			if (op == OP_SET) {
+				ipg1 = attrp->idata[0];
+				ASSERT((ipg1 == MII_GMII_IPG_12) ||	\
+					(ipg1 == MII_GMII_IPG_13) ||	\
+					(ipg1 == MII_GMII_IPG_14) ||	\
+					(ipg1 == MII_GMII_IPG_15) ||	\
+					(ipg1 == MII_GMII_IPG_16));
+				if ((ipg1 != MII_GMII_IPG_12) &&
+					(ipg1 != MII_GMII_IPG_13) &&
+					(ipg1 != MII_GMII_IPG_14) &&
+					(ipg1 != MII_GMII_IPG_15) &&
+					(ipg1 != MII_GMII_IPG_16)) {
+					NPI_ERROR_MSG((handle.function,
+						    NPI_ERR_CTL,
+						    " npi_mac_port_attr"
+						    " XMAC_PORT_IPG:"
+						    " Invalid Input:"
+						    " mii_gmii_ipg <0x%x>",
+						    ipg1));
+					return (NPI_FAILURE |
+					NPI_MAC_PORT_ATTR_INVALID(portn));
+				}
+
+				XMAC_REG_RD(handle, portn, XMAC_IPG_REG, &val);
+				val &= ~(XMAC_IPG_VALUE_MASK |
+					XMAC_IPG_VALUE1_MASK);
+
+				switch (ipg1) {
+				case MII_GMII_IPG_12:
+					val |= (IPG1_12_BYTES <<
+						XMAC_IPG_VALUE1_SHIFT);
+					break;
+				case MII_GMII_IPG_13:
+					val |= (IPG1_13_BYTES <<
+						XMAC_IPG_VALUE1_SHIFT);
+					break;
+				case MII_GMII_IPG_14:
+					val |= (IPG1_14_BYTES <<
+						XMAC_IPG_VALUE1_SHIFT);
+					break;
+				case MII_GMII_IPG_15:
+					val |= (IPG1_15_BYTES <<
+						XMAC_IPG_VALUE1_SHIFT);
+					break;
+				case MII_GMII_IPG_16:
+					val |= (IPG1_16_BYTES <<
+						XMAC_IPG_VALUE1_SHIFT);
+					break;
+				default:
+					return (NPI_FAILURE |
+					NPI_MAC_PORT_ATTR_INVALID(portn));
+				}
+				XMAC_REG_WR(handle, portn, XMAC_IPG_REG, val);
+			} else {
+				XMAC_REG_RD(handle, portn, XMAC_IPG_REG, &val);
+				ipg1 = (val & XMAC_IPG_VALUE1_MASK) >>
+					XMAC_IPG_VALUE1_SHIFT;
+				switch (ipg1) {
+				case IPG1_12_BYTES:
+					attrp->odata[1] = MII_GMII_IPG_12;
+					break;
+				case IPG1_13_BYTES:
+					attrp->odata[1] = MII_GMII_IPG_13;
+					break;
+				case IPG1_14_BYTES:
+					attrp->odata[1] = MII_GMII_IPG_14;
+					break;
+				case IPG1_15_BYTES:
+					attrp->odata[1] = MII_GMII_IPG_15;
+					break;
+				case IPG1_16_BYTES:
+					attrp->odata[1] = MII_GMII_IPG_16;
+					break;
+				default:
+					return (NPI_FAILURE |
+					NPI_MAC_PORT_ATTR_INVALID(portn));
+				}
+			}
+			break;
+		case BMAC_PORT_0:
+		case BMAC_PORT_1:
+			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+					    " npi_mac_port_attr"
+					    " MAC_PORT_IPG:"
+					    " Invalid Input: portn <%d>",
+					    portn));
+		default:
+			return (NPI_FAILURE | NPI_MAC_PORT_INVALID(portn));
+		}
+		break;
+	}
+
+	case MAC_PORT_ADDR: {
+		uint32_t addr0;
+		uint32_t addr1;
+		uint32_t addr2;
+
+		switch (portn) {
+		case XMAC_PORT_0:
+		case XMAC_PORT_1:
+			if (op == OP_SET) {
+				addr0 = attrp->idata[0];
+				addr1 = attrp->idata[1];
+				addr2 = attrp->idata[2];
+				ASSERT((addr0 & ~0xFFFF) == 0);
+				if ((addr0 & ~0xFFFF) != 0) {
+					NPI_ERROR_MSG((handle.function,
+						    NPI_ERR_CTL,
+						    " npi_mac_port_attr"
+						    " MAC_PORT_ADDR:"
+						    " Invalid Input:"
+						    " addr0 <0x%x>", addr0));
+
+					return (NPI_FAILURE |
+					NPI_MAC_PORT_ATTR_INVALID(portn));
+				}
+				ASSERT((addr1 & ~0xFFFF) == 0);
+				if ((addr1 & ~0xFFFF) != 0) {
+					NPI_ERROR_MSG((handle.function,
+						    NPI_ERR_CTL,
+						    " npi_mac_port_attr"
+						    " MAC_PORT_ADDR:"
+						    " Invalid Input:"
+						    " addr1 <0x%x>", addr1));
+					return (NPI_FAILURE |
+					NPI_MAC_PORT_ATTR_INVALID(portn));
+				}
+				ASSERT((addr2 & ~0xFFFF) == 0);
+				if ((addr2 & ~0xFFFF) != 0) {
+					NPI_ERROR_MSG((handle.function,
+						    NPI_ERR_CTL,
+						    " npi_mac_port_attr"
+						    " MAC_PORT_ADDR:"
+						    " Invalid Input:"
+						    " addr2 <0x%x.",
+						    addr2));
+
+					return (NPI_FAILURE |
+					NPI_MAC_PORT_ATTR_INVALID(portn));
+				}
+				XMAC_REG_WR(handle, portn, XMAC_ADDR0_REG,
+						addr0);
+				XMAC_REG_WR(handle, portn, XMAC_ADDR1_REG,
+						addr1);
+				XMAC_REG_WR(handle, portn, XMAC_ADDR2_REG,
+						addr2);
+			} else {
+				XMAC_REG_RD(handle, portn, XMAC_ADDR0_REG,
+						&addr0);
+				XMAC_REG_RD(handle, portn, XMAC_ADDR1_REG,
+						&addr1);
+				XMAC_REG_RD(handle, portn, XMAC_ADDR2_REG,
+						&addr2);
+				attrp->odata[0] = addr0 & MAC_ADDR_REG_MASK;
+				attrp->odata[1] = addr1 & MAC_ADDR_REG_MASK;
+				attrp->odata[2] = addr2 & MAC_ADDR_REG_MASK;
+			}
+			break;
+		case BMAC_PORT_0:
+		case BMAC_PORT_1:
+			if (op == OP_SET) {
+				addr0 = attrp->idata[0];
+				addr1 = attrp->idata[1];
+				addr2 = attrp->idata[2];
+				ASSERT((addr0 & ~0xFFFF) == 0);
+				if ((addr0 & ~0xFFFF) != 0) {
+					NPI_ERROR_MSG((handle.function,
+						    NPI_ERR_CTL,
+						    " npi_mac_port_attr"
+						    " MAC_PORT_ADDR:"
+						    " Invalid Input:"
+						    " addr0 <0x%x>",
+						    addr0));
+					return (NPI_FAILURE |
+					NPI_MAC_PORT_ATTR_INVALID(portn));
+				}
+				ASSERT((addr1 & ~0xFFFF) == 0);
+				if ((addr1 & ~0xFFFF) != 0) {
+					NPI_ERROR_MSG((handle.function,
+						    NPI_ERR_CTL,
+						    " npi_mac_port_attr"
+						    " MAC_PORT_ADDR:"
+						    " Invalid Input:"
+						    " addr1 <0x%x>",
+						    addr1));
+					return (NPI_FAILURE |
+					NPI_MAC_PORT_ATTR_INVALID(portn));
+				}
+				ASSERT((addr2 & ~0xFFFF) == 0);
+				if ((addr2 & ~0xFFFF) != 0) {
+					NPI_ERROR_MSG((handle.function,
+						    NPI_ERR_CTL,
+						    " npi_mac_port_attr"
+						    " MAC_PORT_ADDR:"
+						    " Invalid Input:"
+						    " addr2 <0x%x>",
+						    addr2));
+					return (NPI_FAILURE |
+					NPI_MAC_PORT_ATTR_INVALID(portn));
+				}
+				BMAC_REG_WR(handle, portn, BMAC_ADDR0_REG,
+						addr0);
+				BMAC_REG_WR(handle, portn, BMAC_ADDR1_REG,
+						addr1);
+				BMAC_REG_WR(handle, portn, BMAC_ADDR2_REG,
+						addr2);
+			} else {
+				BMAC_REG_RD(handle, portn, BMAC_ADDR0_REG,
+						&addr0);
+				BMAC_REG_RD(handle, portn, BMAC_ADDR1_REG,
+						&addr1);
+				BMAC_REG_RD(handle, portn, BMAC_ADDR2_REG,
+						&addr2);
+				attrp->odata[0] = addr0 & MAC_ADDR_REG_MASK;
+				attrp->odata[1] = addr1 & MAC_ADDR_REG_MASK;
+				attrp->odata[2] = addr2 & MAC_ADDR_REG_MASK;
+			}
+			break;
+		default:
+			return (NPI_FAILURE | NPI_MAC_PORT_INVALID(portn));
+		}
+	}	break;
+
+	case MAC_PORT_ADDR_FILTER: {
+		uint32_t addr0;
+		uint32_t addr1;
+		uint32_t addr2;
+
+		switch (portn) {
+		case XMAC_PORT_0:
+		case XMAC_PORT_1:
+			if (op == OP_SET) {
+				addr0 = attrp->idata[0];
+				addr1 = attrp->idata[1];
+				addr2 = attrp->idata[2];
+				ASSERT((addr0 & ~0xFFFF) == 0);
+				if ((addr0 & ~0xFFFF) != 0) {
+					NPI_ERROR_MSG((handle.function,
+						    NPI_ERR_CTL,
+						    " npi_mac_port_attr"
+						    " MAC_PORT_ADDR_FILTER:"
+						    " Invalid Input:"
+						    " addr0 <0x%x>",
+						    addr0));
+					return (NPI_FAILURE |
+					NPI_MAC_PORT_ATTR_INVALID(portn));
+				}
+				ASSERT((addr1 & ~0xFFFF) == 0);
+				if ((addr1 & ~0xFFFF) != 0) {
+					NPI_ERROR_MSG((handle.function,
+						    NPI_ERR_CTL,
+						    " npi_mac_port_attr"
+						    " MAC_PORT_ADDR_FILTER:"
+						    " Invalid Input:"
+						    " addr1 <0x%x>",
+						    addr1));
+					return (NPI_FAILURE |
+					NPI_MAC_PORT_ATTR_INVALID(portn));
+				}
+				ASSERT((addr2 & ~0xFFFF) == 0);
+				if ((addr2 & ~0xFFFF) != 0) {
+					NPI_ERROR_MSG((handle.function,
+						    NPI_ERR_CTL,
+						    " npi_mac_port_attr"
+						    " MAC_PORT_ADDR_FILTER:"
+						    " Invalid Input:"
+						    " addr2 <0x%x>",
+						    addr2));
+					return (NPI_FAILURE |
+					NPI_MAC_PORT_ATTR_INVALID(portn));
+				}
+				XMAC_REG_WR(handle, portn,
+						XMAC_ADDR_FILT0_REG, addr0);
+				XMAC_REG_WR(handle, portn,
+						XMAC_ADDR_FILT1_REG, addr1);
+				XMAC_REG_WR(handle, portn,
+						XMAC_ADDR_FILT2_REG, addr2);
+			} else {
+				XMAC_REG_RD(handle, portn,
+						XMAC_ADDR_FILT0_REG, &addr0);
+				XMAC_REG_RD(handle, portn,
+						XMAC_ADDR_FILT1_REG, &addr1);
+				XMAC_REG_RD(handle, portn,
+						XMAC_ADDR_FILT2_REG, &addr2);
+				attrp->odata[0] = addr0 & MAC_ADDR_REG_MASK;
+				attrp->odata[1] = addr1 & MAC_ADDR_REG_MASK;
+				attrp->odata[2] = addr2 & MAC_ADDR_REG_MASK;
+			}
+			break;
+		case BMAC_PORT_0:
+		case BMAC_PORT_1:
+			if (op == OP_SET) {
+				addr0 = attrp->idata[0];
+				addr1 = attrp->idata[1];
+				addr2 = attrp->idata[2];
+				ASSERT((addr0 & ~0xFFFF) == 0);
+				if ((addr0 & ~0xFFFF) != 0) {
+					NPI_ERROR_MSG((handle.function,
+						    NPI_ERR_CTL,
+						    " npi_mac_port_attr"
+						    " MAC_PORT_ADDR_FILTER:"
+						    " addr0",
+						    addr0));
+					return (NPI_FAILURE |
+					NPI_MAC_PORT_ATTR_INVALID(portn));
+				}
+				ASSERT((addr1 & ~0xFFFF) == 0);
+				if ((addr1 & ~0xFFFF) != 0) {
+					NPI_ERROR_MSG((handle.function,
+						    NPI_ERR_CTL,
+						    " npi_mac_port_attr"
+						    " MAC_PORT_ADDR_FILTER:"
+						    " Invalid Input:"
+						    " addr1 <0x%x>",
+						    addr1));
+					return (NPI_FAILURE |
+					NPI_MAC_PORT_ATTR_INVALID(portn));
+				}
+				ASSERT((addr2 & ~0xFFFF) == 0);
+				if ((addr2 & ~0xFFFF) != 0) {
+					NPI_ERROR_MSG((handle.function,
+						    NPI_ERR_CTL,
+						    " npi_mac_port_attr"
+						    " MAC_PORT_ADDR_FILTER:"
+						    " Invalid Input:"
+						    " addr2 <0x%x>",
+						    addr2));
+					return (NPI_FAILURE |
+					NPI_MAC_PORT_ATTR_INVALID(portn));
+				}
+				BMAC_REG_WR(handle, portn, MAC_ADDR_FILT0_REG,
+						addr0);
+				BMAC_REG_WR(handle, portn, MAC_ADDR_FILT1_REG,
+						addr1);
+				BMAC_REG_WR(handle, portn, MAC_ADDR_FILT2_REG,
+						addr2);
+			} else {
+				BMAC_REG_RD(handle, portn, MAC_ADDR_FILT0_REG,
+						&addr0);
+				BMAC_REG_RD(handle, portn, MAC_ADDR_FILT1_REG,
+						&addr1);
+				BMAC_REG_RD(handle, portn, MAC_ADDR_FILT2_REG,
+						&addr2);
+				attrp->odata[0] = addr0 & MAC_ADDR_REG_MASK;
+				attrp->odata[1] = addr1 & MAC_ADDR_REG_MASK;
+				attrp->odata[2] = addr2 & MAC_ADDR_REG_MASK;
+			}
+			break;
+		default:
+			return (NPI_FAILURE | NPI_MAC_PORT_INVALID(portn));
+		}
+	}	break;
+
+	case MAC_PORT_ADDR_FILTER_MASK: {
+		uint32_t mask_1_2;
+		uint32_t mask_0;
+
+		switch (portn) {
+		case XMAC_PORT_0:
+		case XMAC_PORT_1:
+			if (op == OP_SET) {
+				mask_0 = attrp->idata[0];
+				mask_1_2 = attrp->idata[1];
+				ASSERT((mask_0 & ~0xFFFF) == 0);
+				if ((mask_0 & ~0xFFFF) != 0) {
+					NPI_ERROR_MSG((handle.function,
+						    NPI_ERR_CTL,
+						    " npi_mac_port_attr"
+						    " MAC_ADDR_FILTER_MASK:"
+						    " Invalid Input:"
+						    " mask_0 <0x%x>",
+						    mask_0));
+					return (NPI_FAILURE |
+					NPI_MAC_PORT_ATTR_INVALID(portn));
+				}
+				ASSERT((mask_1_2 & ~0xFF) == 0);
+				if ((mask_1_2 & ~0xFF) != 0) {
+					NPI_ERROR_MSG((handle.function,
+						    NPI_ERR_CTL,
+						    " npi_mac_port_attr"
+						    " MAC_ADDR_FILTER_MASK:"
+						    " Invalid Input:"
+						    " mask_1_2 <0x%x>",
+						    mask_1_2));
+					return (NPI_FAILURE |
+					NPI_MAC_PORT_ATTR_INVALID(portn));
+				}
+				XMAC_REG_WR(handle, portn,
+					XMAC_ADDR_FILT0_MASK_REG, mask_0);
+				XMAC_REG_WR(handle, portn,
+					XMAC_ADDR_FILT12_MASK_REG, mask_1_2);
+			} else {
+				XMAC_REG_RD(handle, portn,
+					XMAC_ADDR_FILT0_MASK_REG, &mask_0);
+				XMAC_REG_RD(handle, portn,
+					XMAC_ADDR_FILT12_MASK_REG, &mask_1_2);
+				attrp->odata[0] = mask_0 & 0xFFFF;
+				attrp->odata[1] = mask_1_2 & 0xFF;
+			}
+			break;
+		case BMAC_PORT_0:
+		case BMAC_PORT_1:
+			if (op == OP_SET) {
+				mask_0 = attrp->idata[0];
+				mask_1_2 = attrp->idata[1];
+				BMAC_REG_WR(handle, portn,
+					MAC_ADDR_FILT00_MASK_REG, mask_0);
+				BMAC_REG_WR(handle, portn,
+					MAC_ADDR_FILT12_MASK_REG, mask_1_2);
+			} else {
+				BMAC_REG_RD(handle, portn,
+					MAC_ADDR_FILT00_MASK_REG, &mask_0);
+				BMAC_REG_RD(handle, portn,
+					MAC_ADDR_FILT12_MASK_REG, &mask_1_2);
+				attrp->odata[0] = mask_0;
+				attrp->odata[1] = mask_1_2;
+			}
+			break;
+		default:
+			return (NPI_FAILURE | NPI_MAC_PORT_INVALID(portn));
+		}
+	}	break;
+
+	default:
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				    " npi_mac_port_attr"
+				    " Invalid Input:"
+				    " attr <0x%x>", attrp->type));
+		return (NPI_FAILURE | NPI_MAC_PORT_ATTR_INVALID(portn));
+	}
+
+	return (NPI_SUCCESS);
+}
+
+npi_status_t
+npi_xmac_reset(npi_handle_t handle, uint8_t portn, npi_mac_reset_t mode)
+{
+	uint64_t val;
+	boolean_t txmac = B_FALSE;
+
+	ASSERT(IS_XMAC_PORT_NUM_VALID(portn));
+
+	switch (mode) {
+	case XTX_MAC_REG_RESET:
+		XMAC_REG_WR(handle, portn, XTXMAC_SW_RST_REG, XTXMAC_REG_RST);
+		XMAC_WAIT_REG(handle, portn, XTXMAC_SW_RST_REG, val);
+		txmac = B_TRUE;
+		break;
+	case XRX_MAC_REG_RESET:
+		XMAC_REG_WR(handle, portn, XRXMAC_SW_RST_REG, XRXMAC_REG_RST);
+		XMAC_WAIT_REG(handle, portn, XRXMAC_SW_RST_REG, val);
+		break;
+	case XTX_MAC_LOGIC_RESET:
+		XMAC_REG_WR(handle, portn, XTXMAC_SW_RST_REG, XTXMAC_SOFT_RST);
+		XMAC_WAIT_REG(handle, portn, XTXMAC_SW_RST_REG, val);
+		txmac = B_TRUE;
+		break;
+	case XRX_MAC_LOGIC_RESET:
+		XMAC_REG_WR(handle, portn, XRXMAC_SW_RST_REG, XRXMAC_SOFT_RST);
+		XMAC_WAIT_REG(handle, portn, XRXMAC_SW_RST_REG, val);
+		break;
+	case XTX_MAC_RESET_ALL:
+		XMAC_REG_WR(handle, portn, XTXMAC_SW_RST_REG,
+					XTXMAC_SOFT_RST | XTXMAC_REG_RST);
+		XMAC_WAIT_REG(handle, portn, XTXMAC_SW_RST_REG, val);
+		txmac = B_TRUE;
+		break;
+	case XRX_MAC_RESET_ALL:
+		XMAC_REG_WR(handle, portn, XRXMAC_SW_RST_REG,
+					XRXMAC_SOFT_RST | XRXMAC_REG_RST);
+		XMAC_WAIT_REG(handle, portn, XRXMAC_SW_RST_REG, val);
+		break;
+	default:
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				    " npi_mac_reset"
+				    " Invalid Input: mode <0x%x>",
+				    mode));
+		return (NPI_FAILURE | NPI_MAC_RESET_MODE_INVALID(portn));
+	}
+
+	if (val != 0) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				    " npi_xmac_reset"
+				    " HW ERROR: MAC_RESET  failed <0x%x>",
+				    val));
+
+		if (txmac)
+			return (NPI_FAILURE | NPI_TXMAC_RESET_FAILED(portn));
+		else
+			return (NPI_FAILURE | NPI_RXMAC_RESET_FAILED(portn));
+	}
+
+	return (NPI_SUCCESS);
+}
+
+npi_status_t
+npi_xmac_xif_config(npi_handle_t handle, config_op_t op, uint8_t portn,
+			xmac_xif_config_t config)
+{
+	uint64_t val = 0;
+
+	ASSERT(IS_XMAC_PORT_NUM_VALID(portn));
+
+	switch (op) {
+	case ENABLE:
+	case DISABLE:
+		ASSERT((config != 0) && ((config & ~CFG_XMAC_XIF_ALL) == 0));
+		if ((config == 0) || (config & ~CFG_XMAC_XIF_ALL) != 0) {
+			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+					    " npi_xmac_xif_config"
+					    " Invalid Input:"
+					    " config <0x%x>", config));
+			return (NPI_FAILURE | NPI_MAC_OPCODE_INVALID(portn));
+		}
+		if (op == ENABLE) {
+			XMAC_REG_RD(handle, portn, XMAC_CONFIG_REG, &val);
+			if (config & CFG_XMAC_XIF_LED_FORCE)
+				val |= XMAC_XIF_FORCE_LED_ON;
+			if (config & CFG_XMAC_XIF_LED_POLARITY)
+				val |= XMAC_XIF_LED_POLARITY;
+			if (config & CFG_XMAC_XIF_SEL_POR_CLK_SRC)
+				val |= XMAC_XIF_SEL_POR_CLK_SRC;
+			if (config & CFG_XMAC_XIF_TX_OUTPUT)
+				val |= XMAC_XIF_TX_OUTPUT_EN;
+
+			if (config & CFG_XMAC_XIF_LOOPBACK) {
+				val &= ~XMAC_XIF_SEL_POR_CLK_SRC;
+				val |= XMAC_XIF_LOOPBACK;
+			}
+
+			if (config & CFG_XMAC_XIF_LFS)
+				val &= ~XMAC_XIF_LFS_DISABLE;
+			if (config & CFG_XMAC_XIF_XPCS_BYPASS)
+				val |= XMAC_XIF_XPCS_BYPASS;
+			if (config & CFG_XMAC_XIF_1G_PCS_BYPASS)
+				val |= XMAC_XIF_1G_PCS_BYPASS;
+			if (config & CFG_XMAC_XIF_SEL_CLK_25MHZ)
+				val |= XMAC_XIF_SEL_CLK_25MHZ;
+			XMAC_REG_WR(handle, portn, XMAC_CONFIG_REG, val);
+
+		} else {
+			XMAC_REG_RD(handle, portn, XMAC_CONFIG_REG, &val);
+			if (config & CFG_XMAC_XIF_LED_FORCE)
+				val &= ~XMAC_XIF_FORCE_LED_ON;
+			if (config & CFG_XMAC_XIF_LED_POLARITY)
+				val &= ~XMAC_XIF_LED_POLARITY;
+			if (config & CFG_XMAC_XIF_SEL_POR_CLK_SRC)
+				val &= ~XMAC_XIF_SEL_POR_CLK_SRC;
+			if (config & CFG_XMAC_XIF_TX_OUTPUT)
+				val &= ~XMAC_XIF_TX_OUTPUT_EN;
+			if (config & CFG_XMAC_XIF_LOOPBACK)
+				val &= ~XMAC_XIF_LOOPBACK;
+			if (config & CFG_XMAC_XIF_LFS)
+				val |= XMAC_XIF_LFS_DISABLE;
+			if (config & CFG_XMAC_XIF_XPCS_BYPASS)
+				val &= ~XMAC_XIF_XPCS_BYPASS;
+			if (config & CFG_XMAC_XIF_1G_PCS_BYPASS)
+				val &= ~XMAC_XIF_1G_PCS_BYPASS;
+			if (config & CFG_XMAC_XIF_SEL_CLK_25MHZ)
+				val &= ~XMAC_XIF_SEL_CLK_25MHZ;
+			XMAC_REG_WR(handle, portn, XMAC_CONFIG_REG, val);
+		}
+		break;
+	case INIT:
+		ASSERT((config & ~CFG_XMAC_XIF_ALL) == 0);
+		if ((config & ~CFG_XMAC_XIF_ALL) != 0) {
+			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+					    " npi_xmac_xif_config"
+					    " Invalid Input: config <0x%x>",
+					    config));
+			return (NPI_FAILURE | NPI_MAC_CONFIG_INVALID(portn));
+		}
+		XMAC_REG_RD(handle, portn, XMAC_CONFIG_REG, &val);
+
+		if (config & CFG_XMAC_XIF_LED_FORCE)
+			val |= XMAC_XIF_FORCE_LED_ON;
+		else
+			val &= ~XMAC_XIF_FORCE_LED_ON;
+		if (config & CFG_XMAC_XIF_LED_POLARITY)
+			val |= XMAC_XIF_LED_POLARITY;
+		else
+			val &= ~XMAC_XIF_LED_POLARITY;
+		if (config & CFG_XMAC_XIF_SEL_POR_CLK_SRC)
+			val |= XMAC_XIF_SEL_POR_CLK_SRC;
+		else
+			val &= ~XMAC_XIF_SEL_POR_CLK_SRC;
+		if (config & CFG_XMAC_XIF_TX_OUTPUT)
+			val |= XMAC_XIF_TX_OUTPUT_EN;
+		else
+			val &= ~XMAC_XIF_TX_OUTPUT_EN;
+
+		if (config & CFG_XMAC_XIF_LOOPBACK) {
+			val &= ~XMAC_XIF_SEL_POR_CLK_SRC;
+			val |= XMAC_XIF_LOOPBACK;
+#ifdef	AXIS_DEBUG_LB
+			val |= XMAC_RX_MAC2IPP_PKT_CNT_EN;
+#endif
+		} else {
+			val &= ~XMAC_XIF_LOOPBACK;
+		}
+
+		if (config & CFG_XMAC_XIF_LFS)
+			val &= ~XMAC_XIF_LFS_DISABLE;
+		else
+			val |= XMAC_XIF_LFS_DISABLE;
+		if (config & CFG_XMAC_XIF_XPCS_BYPASS)
+			val |= XMAC_XIF_XPCS_BYPASS;
+		else
+			val &= ~XMAC_XIF_XPCS_BYPASS;
+		if (config & CFG_XMAC_XIF_1G_PCS_BYPASS)
+			val |= XMAC_XIF_1G_PCS_BYPASS;
+		else
+			val &= ~XMAC_XIF_1G_PCS_BYPASS;
+		if (config & CFG_XMAC_XIF_SEL_CLK_25MHZ)
+			val |= XMAC_XIF_SEL_CLK_25MHZ;
+		else
+			val &= ~XMAC_XIF_SEL_CLK_25MHZ;
+		XMAC_REG_WR(handle, portn, XMAC_CONFIG_REG, val);
+
+		break;
+	default:
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				    " npi_xmac_xif_config"
+				    " Invalid Input: op <0x%x>", op));
+		return (NPI_FAILURE | NPI_MAC_OPCODE_INVALID(portn));
+	}
+
+	return (NPI_SUCCESS);
+}
+
+npi_status_t
+npi_xmac_tx_config(npi_handle_t handle, config_op_t op, uint8_t portn,
+			xmac_tx_config_t config)
+{
+	uint64_t val = 0;
+
+	ASSERT(IS_XMAC_PORT_NUM_VALID(portn));
+
+	switch (op) {
+	case ENABLE:
+	case DISABLE:
+		ASSERT((config != 0) && ((config & ~CFG_XMAC_TX_ALL) == 0));
+		if ((config == 0) || (config & ~CFG_XMAC_TX_ALL) != 0) {
+			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				    " npi_xmac_tx_config"
+				    " Invalid Input: config <0x%x>",
+				    config));
+			return (NPI_FAILURE | NPI_MAC_OPCODE_INVALID(portn));
+		}
+		if (op == ENABLE) {
+			XMAC_REG_RD(handle, portn, XMAC_CONFIG_REG, &val);
+			if (config & CFG_XMAC_TX)
+				val |= XMAC_TX_CFG_TX_ENABLE;
+			if (config & CFG_XMAC_TX_STRETCH_MODE)
+				val |= XMAC_TX_CFG_STRETCH_MD;
+			if (config & CFG_XMAC_VAR_IPG)
+				val |= XMAC_TX_CFG_VAR_MIN_IPG_EN;
+			if (config & CFG_XMAC_TX_CRC)
+				val &= ~XMAC_TX_CFG_ALWAYS_NO_CRC;
+			XMAC_REG_WR(handle, portn, XMAC_CONFIG_REG, val);
+		} else {
+			XMAC_REG_RD(handle, portn, XMAC_CONFIG_REG, &val);
+			if (config & CFG_XMAC_TX)
+				val &= ~XMAC_TX_CFG_TX_ENABLE;
+			if (config & CFG_XMAC_TX_STRETCH_MODE)
+				val &= ~XMAC_TX_CFG_STRETCH_MD;
+			if (config & CFG_XMAC_VAR_IPG)
+				val &= ~XMAC_TX_CFG_VAR_MIN_IPG_EN;
+			if (config & CFG_XMAC_TX_CRC)
+				val |= XMAC_TX_CFG_ALWAYS_NO_CRC;
+			XMAC_REG_WR(handle, portn, XMAC_CONFIG_REG, val);
+		}
+		break;
+	case INIT:
+		ASSERT((config & ~CFG_XMAC_TX_ALL) == 0);
+		if ((config & ~CFG_XMAC_TX_ALL) != 0) {
+			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+					    " npi_xmac_tx_config"
+					    " Invalid Input: config <0x%x>",
+					    config));
+			return (NPI_FAILURE | NPI_MAC_CONFIG_INVALID(portn));
+		}
+		XMAC_REG_RD(handle, portn, XMAC_CONFIG_REG, &val);
+		if (config & CFG_XMAC_TX)
+			val |= XMAC_TX_CFG_TX_ENABLE;
+		else
+			val &= ~XMAC_TX_CFG_TX_ENABLE;
+		if (config & CFG_XMAC_TX_STRETCH_MODE)
+			val |= XMAC_TX_CFG_STRETCH_MD;
+		else
+			val &= ~XMAC_TX_CFG_STRETCH_MD;
+		if (config & CFG_XMAC_VAR_IPG)
+			val |= XMAC_TX_CFG_VAR_MIN_IPG_EN;
+		else
+			val &= ~XMAC_TX_CFG_VAR_MIN_IPG_EN;
+		if (config & CFG_XMAC_TX_CRC)
+			val &= ~XMAC_TX_CFG_ALWAYS_NO_CRC;
+		else
+			val |= XMAC_TX_CFG_ALWAYS_NO_CRC;
+
+		XMAC_REG_WR(handle, portn, XMAC_CONFIG_REG, val);
+		break;
+	default:
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				    " npi_xmac_tx_config"
+				    " Invalid Input: op <0x%x>",
+				    op));
+		return (NPI_FAILURE | NPI_MAC_OPCODE_INVALID(portn));
+	}
+
+	return (NPI_SUCCESS);
+}
+
+npi_status_t
+npi_xmac_rx_config(npi_handle_t handle, config_op_t op, uint8_t portn,
+			xmac_rx_config_t config)
+{
+	uint64_t val = 0;
+
+	ASSERT(IS_XMAC_PORT_NUM_VALID(portn));
+
+	switch (op) {
+	case ENABLE:
+	case DISABLE:
+		ASSERT((config != 0) && ((config & ~CFG_XMAC_RX_ALL) == 0));
+		if ((config == 0) || (config & ~CFG_XMAC_RX_ALL) != 0) {
+			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+					    " npi_xmac_rx_config"
+					    " Invalid Input: config <0x%x>",
+					    config));
+			return (NPI_FAILURE | NPI_MAC_CONFIG_INVALID(portn));
+		}
+		if (op == ENABLE) {
+			XMAC_REG_RD(handle, portn, XMAC_CONFIG_REG, &val);
+			if (config & CFG_XMAC_RX)
+				val |= XMAC_RX_CFG_RX_ENABLE;
+			if (config & CFG_XMAC_RX_PROMISCUOUS)
+				val |= XMAC_RX_CFG_PROMISC;
+			if (config & CFG_XMAC_RX_PROMISCUOUSGROUP)
+				val |= XMAC_RX_CFG_PROMISC_GROUP;
+			if (config & CFG_XMAC_RX_ERRCHK)
+				val &= ~XMAC_RX_CFG_ERR_CHK_DISABLE;
+			if (config & CFG_XMAC_RX_CRC_CHK)
+				val &= ~XMAC_RX_CFG_CRC_CHK_DISABLE;
+			if (config & CFG_XMAC_RX_RESV_MULTICAST)
+				val |= XMAC_RX_CFG_RESERVED_MCAST;
+			if (config & CFG_XMAC_RX_CODE_VIO_CHK)
+				val &= ~XMAC_RX_CFG_CD_VIO_CHK;
+			if (config & CFG_XMAC_RX_HASH_FILTER)
+				val |= XMAC_RX_CFG_HASH_FILTER_EN;
+			if (config & CFG_XMAC_RX_ADDR_FILTER)
+				val |= XMAC_RX_CFG_ADDR_FILTER_EN;
+			if (config & CFG_XMAC_RX_STRIP_CRC)
+				val |= XMAC_RX_CFG_STRIP_CRC;
+			if (config & CFG_XMAC_RX_PAUSE)
+				val |= XMAC_RX_CFG_RX_PAUSE_EN;
+			if (config & CFG_XMAC_RX_PASS_FC_FRAME)
+				val |= XMAC_RX_CFG_PASS_FLOW_CTRL;
+			XMAC_REG_WR(handle, portn, XMAC_CONFIG_REG, val);
+		} else {
+			XMAC_REG_RD(handle, portn, XMAC_CONFIG_REG, &val);
+			if (config & CFG_XMAC_RX)
+				val &= ~XMAC_RX_CFG_RX_ENABLE;
+			if (config & CFG_XMAC_RX_PROMISCUOUS)
+				val &= ~XMAC_RX_CFG_PROMISC;
+			if (config & CFG_XMAC_RX_PROMISCUOUSGROUP)
+				val &= ~XMAC_RX_CFG_PROMISC_GROUP;
+			if (config & CFG_XMAC_RX_ERRCHK)
+				val |= XMAC_RX_CFG_ERR_CHK_DISABLE;
+			if (config & CFG_XMAC_RX_CRC_CHK)
+				val |= XMAC_RX_CFG_CRC_CHK_DISABLE;
+			if (config & CFG_XMAC_RX_RESV_MULTICAST)
+				val &= ~XMAC_RX_CFG_RESERVED_MCAST;
+			if (config & CFG_XMAC_RX_CODE_VIO_CHK)
+				val |= XMAC_RX_CFG_CD_VIO_CHK;
+			if (config & CFG_XMAC_RX_HASH_FILTER)
+				val &= ~XMAC_RX_CFG_HASH_FILTER_EN;
+			if (config & CFG_XMAC_RX_ADDR_FILTER)
+				val &= ~XMAC_RX_CFG_ADDR_FILTER_EN;
+			if (config & CFG_XMAC_RX_STRIP_CRC)
+				val &= ~XMAC_RX_CFG_STRIP_CRC;
+			if (config & CFG_XMAC_RX_PAUSE)
+				val &= ~XMAC_RX_CFG_RX_PAUSE_EN;
+			if (config & CFG_XMAC_RX_PASS_FC_FRAME)
+				val &= ~XMAC_RX_CFG_PASS_FLOW_CTRL;
+			XMAC_REG_WR(handle, portn, XMAC_CONFIG_REG, val);
+		}
+		break;
+	case INIT:
+		ASSERT((config & ~CFG_XMAC_RX_ALL) == 0);
+		if ((config & ~CFG_XMAC_RX_ALL) != 0) {
+			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+					    " npi_xmac_rx_config"
+					    " Invalid Input: config <0x%x>",
+					    config));
+			return (NPI_FAILURE | NPI_MAC_CONFIG_INVALID(portn));
+		}
+		XMAC_REG_RD(handle, portn, XMAC_CONFIG_REG, &val);
+		if (config & CFG_XMAC_RX)
+			val |= XMAC_RX_CFG_RX_ENABLE;
+		else
+			val &= ~XMAC_RX_CFG_RX_ENABLE;
+		if (config & CFG_XMAC_RX_PROMISCUOUS)
+			val |= XMAC_RX_CFG_PROMISC;
+		else
+			val &= ~XMAC_RX_CFG_PROMISC;
+		if (config & CFG_XMAC_RX_PROMISCUOUSGROUP)
+			val |= XMAC_RX_CFG_PROMISC_GROUP;
+		else
+			val &= ~XMAC_RX_CFG_PROMISC_GROUP;
+		if (config & CFG_XMAC_RX_ERRCHK)
+			val &= ~XMAC_RX_CFG_ERR_CHK_DISABLE;
+		else
+			val |= XMAC_RX_CFG_ERR_CHK_DISABLE;
+		if (config & CFG_XMAC_RX_CRC_CHK)
+			val &= ~XMAC_RX_CFG_CRC_CHK_DISABLE;
+		else
+			val |= XMAC_RX_CFG_CRC_CHK_DISABLE;
+		if (config & CFG_XMAC_RX_RESV_MULTICAST)
+			val |= XMAC_RX_CFG_RESERVED_MCAST;
+		else
+			val &= ~XMAC_RX_CFG_RESERVED_MCAST;
+		if (config & CFG_XMAC_RX_CODE_VIO_CHK)
+			val &= ~XMAC_RX_CFG_CD_VIO_CHK;
+		else
+			val |= XMAC_RX_CFG_CD_VIO_CHK;
+		if (config & CFG_XMAC_RX_HASH_FILTER)
+			val |= XMAC_RX_CFG_HASH_FILTER_EN;
+		else
+			val &= ~XMAC_RX_CFG_HASH_FILTER_EN;
+		if (config & CFG_XMAC_RX_ADDR_FILTER)
+			val |= XMAC_RX_CFG_ADDR_FILTER_EN;
+		else
+			val &= ~XMAC_RX_CFG_ADDR_FILTER_EN;
+		if (config & CFG_XMAC_RX_PAUSE)
+			val |= XMAC_RX_CFG_RX_PAUSE_EN;
+		else
+			val &= ~XMAC_RX_CFG_RX_PAUSE_EN;
+		if (config & CFG_XMAC_RX_STRIP_CRC)
+			val |= XMAC_RX_CFG_STRIP_CRC;
+		else
+			val &= ~XMAC_RX_CFG_STRIP_CRC;
+		if (config & CFG_XMAC_RX_PASS_FC_FRAME)
+			val |= XMAC_RX_CFG_PASS_FLOW_CTRL;
+		else
+			val &= ~XMAC_RX_CFG_PASS_FLOW_CTRL;
+
+		XMAC_REG_WR(handle, portn, XMAC_CONFIG_REG, val);
+		break;
+	default:
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+					    " npi_xmac_rx_config"
+					    " Invalid Input: op <0x%x>", op));
+		return (NPI_FAILURE | NPI_MAC_OPCODE_INVALID(portn));
+	}
+
+	return (NPI_SUCCESS);
+}
+
+npi_status_t
+npi_xmac_tx_iconfig(npi_handle_t handle, config_op_t op, uint8_t portn,
+		    xmac_tx_iconfig_t iconfig)
+{
+	uint64_t val = 0;
+
+	ASSERT(IS_XMAC_PORT_NUM_VALID(portn));
+
+	switch (op) {
+	case ENABLE:
+	case DISABLE:
+		ASSERT((iconfig != 0) && ((iconfig & ~ICFG_XMAC_TX_ALL) == 0));
+		if ((iconfig == 0) || (iconfig & ~ICFG_XMAC_TX_ALL) != 0) {
+			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				    " npi_xmac_tx_iconfig"
+				    " Invalid Input: iconfig <0x%x>",
+				    iconfig));
+			return (NPI_FAILURE | NPI_MAC_CONFIG_INVALID(portn));
+		}
+		XMAC_REG_RD(handle, portn, XTXMAC_STAT_MSK_REG, &val);
+		if (op == ENABLE)
+			val &= ~iconfig;
+		else
+			val |= iconfig;
+		XMAC_REG_WR(handle, portn, XTXMAC_STAT_MSK_REG, val);
+
+		break;
+	case INIT:
+		ASSERT((iconfig & ~ICFG_XMAC_TX_ALL) == 0);
+		if ((iconfig & ~ICFG_XMAC_TX_ALL) != 0) {
+			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				    " npi_xmac_tx_iconfig"
+				    " Invalid Input: iconfig <0x%x>",
+				    iconfig));
+			return (NPI_FAILURE | NPI_MAC_CONFIG_INVALID(portn));
+		}
+		XMAC_REG_WR(handle, portn, XTXMAC_STAT_MSK_REG, ~iconfig);
+
+		break;
+	default:
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				    " npi_xmac_tx_iconfig"
+				    " Invalid Input: iconfig <0x%x>",
+				    iconfig));
+		return (NPI_FAILURE | NPI_MAC_OPCODE_INVALID(portn));
+	}
+
+	return (NPI_SUCCESS);
+}
+
+npi_status_t
+npi_xmac_rx_iconfig(npi_handle_t handle, config_op_t op, uint8_t portn,
+		    xmac_rx_iconfig_t iconfig)
+{
+	uint64_t val = 0;
+
+	ASSERT(IS_XMAC_PORT_NUM_VALID(portn));
+
+	switch (op) {
+	case ENABLE:
+	case DISABLE:
+		ASSERT((iconfig != 0) && ((iconfig & ~ICFG_XMAC_RX_ALL) == 0));
+		if ((iconfig == 0) || (iconfig & ~ICFG_XMAC_RX_ALL) != 0) {
+			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+					    " npi_xmac_rx_iconfig"
+					    " Invalid Input: iconfig <0x%x>",
+					    iconfig));
+			return (NPI_FAILURE | NPI_MAC_CONFIG_INVALID(portn));
+		}
+		XMAC_REG_RD(handle, portn, XRXMAC_STAT_MSK_REG, &val);
+		if (op == ENABLE)
+			val &= ~iconfig;
+		else
+			val |= iconfig;
+		XMAC_REG_WR(handle, portn, XRXMAC_STAT_MSK_REG, val);
+
+		break;
+	case INIT:
+		ASSERT((iconfig & ~ICFG_XMAC_RX_ALL) == 0);
+		if ((iconfig & ~ICFG_XMAC_RX_ALL) != 0) {
+			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+					    " npi_xmac_rx_iconfig"
+					    " Invalid Input: iconfig <0x%x>",
+					    iconfig));
+			return (NPI_FAILURE | NPI_MAC_CONFIG_INVALID(portn));
+		}
+		XMAC_REG_WR(handle, portn, XRXMAC_STAT_MSK_REG, ~iconfig);
+
+		break;
+	default:
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				    " npi_xmac_rx_iconfig"
+				    " Invalid Input: iconfig <0x%x>",
+				    iconfig));
+		return (NPI_FAILURE | NPI_MAC_OPCODE_INVALID(portn));
+	}
+
+	return (NPI_SUCCESS);
+}
+
+npi_status_t
+npi_xmac_ctl_iconfig(npi_handle_t handle, config_op_t op, uint8_t portn,
+			xmac_ctl_iconfig_t iconfig)
+{
+	uint64_t val = 0;
+
+	ASSERT(IS_XMAC_PORT_NUM_VALID(portn));
+
+	switch (op) {
+	case ENABLE:
+	case DISABLE:
+		ASSERT((iconfig != 0) &&	\
+			((iconfig & ~ICFG_XMAC_CTRL_ALL) == 0));
+		if ((iconfig == 0) || (iconfig & ~ICFG_XMAC_CTRL_ALL) != 0) {
+			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+					    " npi_xmac_ctl_iconfig"
+					    " Invalid Input: iconfig <0x%x>",
+					    iconfig));
+			return (NPI_FAILURE | NPI_MAC_CONFIG_INVALID(portn));
+		}
+		XMAC_REG_RD(handle, portn, XMAC_C_S_MSK_REG, &val);
+		if (op == ENABLE)
+			val &= ~iconfig;
+		else
+			val |= iconfig;
+		XMAC_REG_WR(handle, portn, XMAC_C_S_MSK_REG, val);
+
+		break;
+	case INIT:
+		ASSERT((iconfig & ~ICFG_XMAC_CTRL_ALL) == 0);
+		if ((iconfig & ~ICFG_XMAC_CTRL_ALL) != 0) {
+			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+					    " npi_xmac_ctl_iconfig"
+					    " Invalid Input: iconfig <0x%x>",
+					    iconfig));
+			return (NPI_FAILURE | NPI_MAC_CONFIG_INVALID(portn));
+		}
+		XMAC_REG_WR(handle, portn, XMAC_C_S_MSK_REG, ~iconfig);
+
+		break;
+	default:
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+					    " npi_xmac_ctl_iconfig"
+					    " Invalid Input: iconfig <0x%x>",
+					    iconfig));
+		return (NPI_FAILURE | NPI_MAC_OPCODE_INVALID(portn));
+	}
+
+	return (NPI_SUCCESS);
+}
+
+npi_status_t
+npi_xmac_tx_get_istatus(npi_handle_t handle, uint8_t portn,
+			xmac_tx_iconfig_t *istatus)
+{
+	uint64_t val;
+
+	ASSERT(IS_XMAC_PORT_NUM_VALID(portn));
+
+	XMAC_REG_RD(handle, portn, XTXMAC_STATUS_REG, &val);
+	*istatus = (uint32_t)val;
+
+	return (NPI_SUCCESS);
+}
+
+npi_status_t
+npi_xmac_rx_get_istatus(npi_handle_t handle, uint8_t portn,
+			xmac_rx_iconfig_t *istatus)
+{
+	uint64_t val;
+
+	ASSERT(IS_XMAC_PORT_NUM_VALID(portn));
+
+	XMAC_REG_RD(handle, portn, XRXMAC_STATUS_REG, &val);
+	*istatus = (uint32_t)val;
+
+	return (NPI_SUCCESS);
+}
+
+npi_status_t
+npi_xmac_ctl_get_istatus(npi_handle_t handle, uint8_t portn,
+			xmac_ctl_iconfig_t *istatus)
+{
+	uint64_t val;
+
+	ASSERT(IS_XMAC_PORT_NUM_VALID(portn));
+
+	XMAC_REG_RD(handle, portn, XMAC_CTRL_STAT_REG, &val);
+	*istatus = (uint32_t)val;
+
+	return (NPI_SUCCESS);
+}
+
+npi_status_t
+npi_xmac_xpcs_reset(npi_handle_t handle, uint8_t portn)
+{
+	uint64_t val;
+
+	ASSERT(IS_XMAC_PORT_NUM_VALID(portn));
+
+	XPCS_REG_RD(handle, portn, XPCS_CTRL_1_REG, &val);
+	val |= XPCS_CTRL1_RST;
+	XPCS_REG_WR(handle, portn, XPCS_CTRL_1_REG, val);
+
+	return (NPI_SUCCESS);
+}
+
+npi_status_t
+npi_xmac_xpcs_enable(npi_handle_t handle, uint8_t portn)
+{
+	uint64_t val;
+
+	ASSERT(IS_XMAC_PORT_NUM_VALID(portn));
+
+	XPCS_REG_RD(handle, portn, XPCS_CFG_VENDOR_1_REG, &val);
+	val |= XPCS_CFG_XPCS_ENABLE;
+	XPCS_REG_WR(handle, portn, XPCS_CFG_VENDOR_1_REG, val);
+
+	return (NPI_SUCCESS);
+}
+
+npi_status_t
+npi_xmac_xpcs_disable(npi_handle_t handle, uint8_t portn)
+{
+	uint64_t val;
+
+	ASSERT(IS_XMAC_PORT_NUM_VALID(portn));
+
+	XPCS_REG_RD(handle, portn, XPCS_CFG_VENDOR_1_REG, &val);
+	val &= ~XPCS_CFG_XPCS_ENABLE;
+	XPCS_REG_WR(handle, portn, XPCS_CFG_VENDOR_1_REG, val);
+
+	return (NPI_SUCCESS);
+}
+
+npi_status_t
+npi_xmac_xpcs_read(npi_handle_t handle, uint8_t portn, uint8_t xpcs_reg,
+			uint32_t *value)
+{
+	uint32_t reg;
+	uint64_t val;
+
+	ASSERT(IS_XMAC_PORT_NUM_VALID(portn));
+
+	switch (xpcs_reg) {
+	case XPCS_REG_CONTROL1:
+		reg = XPCS_CTRL_1_REG;
+		break;
+	case XPCS_REG_STATUS1:
+		reg = XPCS_STATUS_1_REG;
+		break;
+	case XPCS_REG_DEVICE_ID:
+		reg = XPCS_DEV_ID_REG;
+		break;
+	case XPCS_REG_SPEED_ABILITY:
+		reg = XPCS_SPEED_ABILITY_REG;
+		break;
+	case XPCS_REG_DEVICE_IN_PKG:
+		reg = XPCS_DEV_IN_PKG_REG;
+		break;
+	case XPCS_REG_CONTROL2:
+		reg = XPCS_CTRL_2_REG;
+		break;
+	case XPCS_REG_STATUS2:
+		reg = XPCS_STATUS_2_REG;
+		break;
+	case XPCS_REG_PKG_ID:
+		reg = XPCS_PKG_ID_REG;
+		break;
+	case XPCS_REG_STATUS:
+		reg = XPCS_STATUS_REG;
+		break;
+	case XPCS_REG_TEST_CONTROL:
+		reg = XPCS_TEST_CTRL_REG;
+		break;
+	case XPCS_REG_CONFIG_VENDOR1:
+		reg = XPCS_CFG_VENDOR_1_REG;
+		break;
+	case XPCS_REG_DIAG_VENDOR2:
+		reg = XPCS_DIAG_VENDOR_2_REG;
+		break;
+	case XPCS_REG_MASK1:
+		reg = XPCS_MASK_1_REG;
+		break;
+	case XPCS_REG_PACKET_COUNTER:
+		reg = XPCS_PKT_CNTR_REG;
+		break;
+	case XPCS_REG_TX_STATEMACHINE:
+		reg = XPCS_TX_STATE_MC_REG;
+		break;
+	case XPCS_REG_DESCWERR_COUNTER:
+		reg = XPCS_DESKEW_ERR_CNTR_REG;
+		break;
+	case XPCS_REG_SYMBOL_ERR_L0_1_COUNTER:
+		reg = XPCS_SYM_ERR_CNTR_L0_L1_REG;
+		break;
+	case XPCS_REG_SYMBOL_ERR_L2_3_COUNTER:
+		reg = XPCS_SYM_ERR_CNTR_L2_L3_REG;
+		break;
+	case XPCS_REG_TRAINING_VECTOR:
+		reg = XPCS_TRAINING_VECTOR_REG;
+		break;
+	default:
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				    " npi_xmac_xpcs_read"
+				    " Invalid Input: xpcs_reg <0x%x>",
+				    xpcs_reg));
+		return (NPI_FAILURE | NPI_MAC_REG_INVALID(portn));
+	}
+	XPCS_REG_RD(handle, portn, reg, &val);
+	*value = val & 0xFFFFFFFF;
+
+	return (NPI_SUCCESS);
+}
+
+npi_status_t
+npi_xmac_xpcs_write(npi_handle_t handle, uint8_t portn, uint8_t xpcs_reg,
+			uint32_t value)
+{
+	uint32_t reg;
+	uint64_t val;
+
+	ASSERT(IS_XMAC_PORT_NUM_VALID(portn));
+
+	switch (xpcs_reg) {
+	case XPCS_REG_CONTROL1:
+		reg = XPCS_CTRL_1_REG;
+		break;
+	case XPCS_REG_TEST_CONTROL:
+		reg = XPCS_TEST_CTRL_REG;
+		break;
+	case XPCS_REG_CONFIG_VENDOR1:
+		reg = XPCS_CFG_VENDOR_1_REG;
+		break;
+	case XPCS_REG_DIAG_VENDOR2:
+		reg = XPCS_DIAG_VENDOR_2_REG;
+		break;
+	case XPCS_REG_MASK1:
+		reg = XPCS_MASK_1_REG;
+		break;
+	case XPCS_REG_PACKET_COUNTER:
+		reg = XPCS_PKT_CNTR_REG;
+		break;
+	case XPCS_REG_DESCWERR_COUNTER:
+		reg = XPCS_DESKEW_ERR_CNTR_REG;
+		break;
+	case XPCS_REG_TRAINING_VECTOR:
+		reg = XPCS_TRAINING_VECTOR_REG;
+		break;
+	default:
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				    " npi_xmac_xpcs_write"
+				    " Invalid Input: xpcs_reg <0x%x>",
+				    xpcs_reg));
+		return (NPI_FAILURE | NPI_MAC_PCS_REG_INVALID(portn));
+	}
+	val = value;
+
+	XPCS_REG_WR(handle, portn, reg, val);
+
+	return (NPI_SUCCESS);
+}
+
+npi_status_t
+npi_bmac_reset(npi_handle_t handle, uint8_t portn, npi_mac_reset_t mode)
+{
+	uint64_t val = 0;
+	boolean_t txmac = B_FALSE;
+
+	ASSERT(IS_BMAC_PORT_NUM_VALID(portn));
+
+	switch (mode) {
+	case TX_MAC_RESET:
+		BMAC_REG_WR(handle, portn, BTXMAC_SW_RST_REG, 0x1);
+		BMAC_WAIT_REG(handle, portn, BTXMAC_SW_RST_REG, val);
+		txmac = B_TRUE;
+		break;
+	case RX_MAC_RESET:
+		BMAC_REG_WR(handle, portn, BRXMAC_SW_RST_REG, 0x1);
+		BMAC_WAIT_REG(handle, portn, BRXMAC_SW_RST_REG, val);
+		break;
+	default:
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				    " npi_bmac_reset"
+				    " Invalid Input: mode <0x%x>",
+				    mode));
+		return (NPI_FAILURE | NPI_MAC_RESET_MODE_INVALID(portn));
+	}
+
+	if (val != 0) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				    " npi_bmac_reset"
+				    " BMAC_RESET HW Error: ret <0x%x>",
+				    val));
+		if (txmac)
+			return (NPI_FAILURE | NPI_TXMAC_RESET_FAILED(portn));
+		else
+			return (NPI_FAILURE | NPI_RXMAC_RESET_FAILED(portn));
+	}
+
+	return (NPI_SUCCESS);
+}
+
+npi_status_t
+npi_mac_pcs_reset(npi_handle_t handle, uint8_t portn)
+{
+	/* what to do here ? */
+	uint64_t val = 0;
+
+	ASSERT(IS_PORT_NUM_VALID(portn));
+
+	PCS_REG_RD(handle, portn, PCS_MII_CTRL_REG, &val);
+	val |= PCS_MII_RESET;
+	PCS_REG_WR(handle, portn, PCS_MII_CTRL_REG, val);
+
+	return (NPI_SUCCESS);
+}
+
+npi_status_t
+npi_mac_get_link_status(npi_handle_t handle, uint8_t portn,
+			boolean_t *link_up)
+{
+	uint64_t val;
+
+	ASSERT(IS_PORT_NUM_VALID(portn));
+
+	PCS_REG_RD(handle, portn, PCS_MII_STATUS_REG, &val);
+
+	if (val & PCS_MII_STATUS_LINK_STATUS) {
+		*link_up = B_TRUE;
+	} else {
+		*link_up = B_FALSE;
+	}
+
+	return (NPI_SUCCESS);
+}
+
+npi_status_t
+npi_bmac_tx_config(npi_handle_t handle, config_op_t op, uint8_t portn,
+			bmac_tx_config_t config)
+{
+	uint64_t val = 0;
+
+	ASSERT(IS_BMAC_PORT_NUM_VALID(portn));
+
+	switch (op) {
+	case ENABLE:
+	case DISABLE:
+		ASSERT((config != 0) && ((config & ~CFG_BMAC_TX_ALL) == 0));
+		if ((config == 0) || (config & ~CFG_BMAC_TX_ALL) != 0) {
+			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+					    " npi_bmac_tx_config"
+					    " Invalid Input: config <0x%x>",
+					    config));
+			return (NPI_FAILURE | NPI_MAC_CONFIG_INVALID(portn));
+		}
+		if (op == ENABLE) {
+			BMAC_REG_RD(handle, portn, TXMAC_CONFIG_REG, &val);
+			if (config & CFG_BMAC_TX)
+				val |= MAC_TX_CFG_TXMAC_ENABLE;
+			if (config & CFG_BMAC_TX_CRC)
+				val &= ~MAC_TX_CFG_NO_FCS;
+			BMAC_REG_WR(handle, portn, TXMAC_CONFIG_REG, val);
+		} else {
+			BMAC_REG_RD(handle, portn, TXMAC_CONFIG_REG, &val);
+			if (config & CFG_BMAC_TX)
+				val &= ~MAC_TX_CFG_TXMAC_ENABLE;
+			if (config & CFG_BMAC_TX_CRC)
+				val |= MAC_TX_CFG_NO_FCS;
+			BMAC_REG_WR(handle, portn, TXMAC_CONFIG_REG, val);
+		}
+		break;
+	case INIT:
+		ASSERT((config & ~CFG_BMAC_TX_ALL) == 0);
+		if ((config & ~CFG_BMAC_TX_ALL) != 0) {
+			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+					    " npi_bmac_tx_config"
+					    " Invalid Input: config <0x%x>",
+					    config));
+			return (NPI_FAILURE | NPI_MAC_CONFIG_INVALID(portn));
+		}
+		BMAC_REG_RD(handle, portn, TXMAC_CONFIG_REG, &val);
+		if (config & CFG_BMAC_TX)
+			val |= MAC_TX_CFG_TXMAC_ENABLE;
+		else
+			val &= ~MAC_TX_CFG_TXMAC_ENABLE;
+		if (config & CFG_BMAC_TX_CRC)
+			val &= ~MAC_TX_CFG_NO_FCS;
+		else
+			val |= MAC_TX_CFG_NO_FCS;
+		BMAC_REG_WR(handle, portn, TXMAC_CONFIG_REG, val);
+		break;
+	default:
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				    " npi_bmac_tx_config"
+				    " Invalid Input: op <0x%x>",
+				    op));
+		return (NPI_FAILURE | NPI_MAC_OPCODE_INVALID(portn));
+	}
+
+	return (NPI_SUCCESS);
+}
+
+npi_status_t
+npi_bmac_rx_config(npi_handle_t handle, config_op_t op, uint8_t portn,
+			bmac_rx_config_t config)
+{
+	uint64_t val = 0;
+
+	ASSERT(IS_BMAC_PORT_NUM_VALID(portn));
+
+	switch (op) {
+	case ENABLE:
+	case DISABLE:
+		ASSERT((config != 0) && ((config & ~CFG_BMAC_RX_ALL) == 0));
+		if ((config == 0) || (config & ~CFG_BMAC_RX_ALL) != 0) {
+			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+					    " npi_bmac_rx_config"
+					    " Invalid Input: config <0x%x>",
+					    config));
+			return (NPI_FAILURE | NPI_MAC_CONFIG_INVALID(portn));
+		}
+		if (op == ENABLE) {
+			BMAC_REG_RD(handle, portn, RXMAC_CONFIG_REG, &val);
+			if (config & CFG_BMAC_RX)
+				val |= MAC_RX_CFG_RXMAC_ENABLE;
+			if (config & CFG_BMAC_RX_STRIP_PAD)
+				val |= MAC_RX_CFG_STRIP_PAD;
+			if (config & CFG_BMAC_RX_STRIP_CRC)
+				val |= MAC_RX_CFG_STRIP_FCS;
+			if (config & CFG_BMAC_RX_PROMISCUOUS)
+				val |= MAC_RX_CFG_PROMISC;
+			if (config & CFG_BMAC_RX_PROMISCUOUSGROUP)
+				val |= MAC_RX_CFG_PROMISC_GROUP;
+			if (config & CFG_BMAC_RX_HASH_FILTER)
+				val |= MAC_RX_CFG_HASH_FILTER_EN;
+			if (config & CFG_BMAC_RX_ADDR_FILTER)
+				val |= MAC_RX_CFG_ADDR_FILTER_EN;
+			if (config & CFG_BMAC_RX_DISCARD_ON_ERR)
+				val &= ~MAC_RX_CFG_DISABLE_DISCARD;
+			BMAC_REG_WR(handle, portn, RXMAC_CONFIG_REG, val);
+		} else {
+			BMAC_REG_RD(handle, portn, RXMAC_CONFIG_REG, &val);
+			if (config & CFG_BMAC_RX)
+				val &= ~MAC_RX_CFG_RXMAC_ENABLE;
+			if (config & CFG_BMAC_RX_STRIP_PAD)
+				val &= ~MAC_RX_CFG_STRIP_PAD;
+			if (config & CFG_BMAC_RX_STRIP_CRC)
+				val &= ~MAC_RX_CFG_STRIP_FCS;
+			if (config & CFG_BMAC_RX_PROMISCUOUS)
+				val &= ~MAC_RX_CFG_PROMISC;
+			if (config & CFG_BMAC_RX_PROMISCUOUSGROUP)
+				val &= ~MAC_RX_CFG_PROMISC_GROUP;
+			if (config & CFG_BMAC_RX_HASH_FILTER)
+				val &= ~MAC_RX_CFG_HASH_FILTER_EN;
+			if (config & CFG_BMAC_RX_ADDR_FILTER)
+				val &= ~MAC_RX_CFG_ADDR_FILTER_EN;
+			if (config & CFG_BMAC_RX_DISCARD_ON_ERR)
+				val |= MAC_RX_CFG_DISABLE_DISCARD;
+			BMAC_REG_WR(handle, portn, RXMAC_CONFIG_REG, val);
+		}
+		break;
+	case INIT:
+		ASSERT((config & ~CFG_BMAC_RX_ALL) == 0);
+		if ((config & ~CFG_BMAC_RX_ALL) != 0) {
+			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+					    " npi_bmac_rx_config"
+					    " Invalid Input: config <0x%x>",
+					    config));
+			return (NPI_FAILURE | NPI_MAC_CONFIG_INVALID(portn));
+		}
+		BMAC_REG_RD(handle, portn, RXMAC_CONFIG_REG, &val);
+		if (config & CFG_BMAC_RX)
+			val |= MAC_RX_CFG_RXMAC_ENABLE;
+		else
+			val &= ~MAC_RX_CFG_RXMAC_ENABLE;
+		if (config & CFG_BMAC_RX_STRIP_PAD)
+			val |= MAC_RX_CFG_STRIP_PAD;
+		else
+			val &= ~MAC_RX_CFG_STRIP_PAD;
+		if (config & CFG_BMAC_RX_STRIP_CRC)
+			val |= MAC_RX_CFG_STRIP_FCS;
+		else
+			val &= ~MAC_RX_CFG_STRIP_FCS;
+		if (config & CFG_BMAC_RX_PROMISCUOUS)
+			val |= MAC_RX_CFG_PROMISC;
+		else
+			val &= ~MAC_RX_CFG_PROMISC;
+		if (config & CFG_BMAC_RX_PROMISCUOUSGROUP)
+			val |= MAC_RX_CFG_PROMISC_GROUP;
+		else
+			val &= ~MAC_RX_CFG_PROMISC_GROUP;
+		if (config & CFG_BMAC_RX_HASH_FILTER)
+			val |= MAC_RX_CFG_HASH_FILTER_EN;
+		else
+			val &= ~MAC_RX_CFG_HASH_FILTER_EN;
+		if (config & CFG_BMAC_RX_ADDR_FILTER)
+			val |= MAC_RX_CFG_ADDR_FILTER_EN;
+		else
+			val &= ~MAC_RX_CFG_ADDR_FILTER_EN;
+		if (config & CFG_BMAC_RX_DISCARD_ON_ERR)
+			val &= ~MAC_RX_CFG_DISABLE_DISCARD;
+		else
+			val |= MAC_RX_CFG_DISABLE_DISCARD;
+
+		BMAC_REG_WR(handle, portn, RXMAC_CONFIG_REG, val);
+		break;
+	default:
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+					    " npi_bmac_rx_config"
+					    " Invalid Input: op <0x%x>", op));
+		return (NPI_FAILURE | NPI_MAC_OPCODE_INVALID(portn));
+	}
+
+	return (NPI_SUCCESS);
+}
+
+npi_status_t
+npi_bmac_rx_iconfig(npi_handle_t handle, config_op_t op, uint8_t portn,
+		    bmac_rx_iconfig_t iconfig)
+{
+	uint64_t val = 0;
+
+	ASSERT(IS_BMAC_PORT_NUM_VALID(portn));
+
+	switch (op) {
+	case ENABLE:
+	case DISABLE:
+		ASSERT((iconfig != 0) && ((iconfig & ~ICFG_BMAC_RX_ALL) == 0));
+		if ((iconfig == 0) || (iconfig & ~ICFG_BMAC_RX_ALL) != 0) {
+			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+					    " npi_bmac_rx_iconfig"
+					    " Invalid Input: iconfig <0x%x>",
+					    iconfig));
+			return (NPI_FAILURE | NPI_MAC_CONFIG_INVALID(portn));
+		}
+		BMAC_REG_RD(handle, portn, BRXMAC_STAT_MSK_REG, &val);
+		if (op == ENABLE)
+			val &= ~iconfig;
+		else
+			val |= iconfig;
+		BMAC_REG_WR(handle, portn, BRXMAC_STAT_MSK_REG, val);
+
+		break;
+	case INIT:
+		ASSERT((iconfig & ~ICFG_BMAC_RX_ALL) == 0);
+		if ((iconfig & ~ICFG_BMAC_RX_ALL) != 0) {
+			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+					    " npi_bmac_rx_iconfig"
+					    " Invalid Input: iconfig <0x%x>",
+					    iconfig));
+			return (NPI_FAILURE | NPI_MAC_CONFIG_INVALID(portn));
+		}
+		BMAC_REG_WR(handle, portn, BRXMAC_STAT_MSK_REG, ~iconfig);
+
+		break;
+	default:
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				    " npi_bmac_rx_iconfig"
+				    " Invalid Input: iconfig <0x%x>",
+				    iconfig));
+		return (NPI_FAILURE | NPI_MAC_OPCODE_INVALID(portn));
+	}
+
+	return (NPI_SUCCESS);
+}
+
+npi_status_t
+npi_bmac_xif_config(npi_handle_t handle, config_op_t op, uint8_t portn,
+		    bmac_xif_config_t config)
+{
+	uint64_t val = 0;
+
+	ASSERT(IS_BMAC_PORT_NUM_VALID(portn));
+
+	switch (op) {
+	case ENABLE:
+	case DISABLE:
+		ASSERT((config != 0) && ((config & ~CFG_BMAC_XIF_ALL) == 0));
+		if ((config == 0) || (config & ~CFG_BMAC_XIF_ALL) != 0) {
+			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+					    " npi_bmac_xif_config"
+					    " Invalid Input: config <0x%x>",
+					    config));
+			return (NPI_FAILURE | NPI_MAC_CONFIG_INVALID(portn));
+		}
+		if (op == ENABLE) {
+			BMAC_REG_RD(handle, portn, MAC_XIF_CONFIG_REG, &val);
+			if (config & CFG_BMAC_XIF_TX_OUTPUT)
+				val |= MAC_XIF_TX_OUTPUT_EN;
+			if (config & CFG_BMAC_XIF_LOOPBACK)
+				val |= MAC_XIF_MII_INT_LOOPBACK;
+			if (config & CFG_BMAC_XIF_GMII_MODE)
+				val |= MAC_XIF_GMII_MODE;
+			if (config & CFG_BMAC_XIF_LINKLED)
+				val |= MAC_XIF_LINK_LED;
+			if (config & CFG_BMAC_XIF_LED_POLARITY)
+				val |= MAC_XIF_LED_POLARITY;
+			if (config & CFG_BMAC_XIF_SEL_CLK_25MHZ)
+				val |= MAC_XIF_SEL_CLK_25MHZ;
+			BMAC_REG_WR(handle, portn, MAC_XIF_CONFIG_REG, val);
+		} else {
+			BMAC_REG_RD(handle, portn, MAC_XIF_CONFIG_REG, &val);
+			if (config & CFG_BMAC_XIF_TX_OUTPUT)
+				val &= ~MAC_XIF_TX_OUTPUT_EN;
+			if (config & CFG_BMAC_XIF_LOOPBACK)
+				val &= ~MAC_XIF_MII_INT_LOOPBACK;
+			if (config & CFG_BMAC_XIF_GMII_MODE)
+				val &= ~MAC_XIF_GMII_MODE;
+			if (config & CFG_BMAC_XIF_LINKLED)
+				val &= ~MAC_XIF_LINK_LED;
+			if (config & CFG_BMAC_XIF_LED_POLARITY)
+				val &= ~MAC_XIF_LED_POLARITY;
+			if (config & CFG_BMAC_XIF_SEL_CLK_25MHZ)
+				val &= ~MAC_XIF_SEL_CLK_25MHZ;
+			BMAC_REG_WR(handle, portn, MAC_XIF_CONFIG_REG, val);
+		}
+		break;
+	case INIT:
+		ASSERT((config & ~CFG_BMAC_XIF_ALL) == 0);
+		if ((config & ~CFG_BMAC_XIF_ALL) != 0) {
+			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+					    " npi_bmac_xif_config"
+					    " Invalid Input: config <0x%x>",
+					    config));
+			return (NPI_FAILURE | NPI_MAC_CONFIG_INVALID(portn));
+		}
+		BMAC_REG_RD(handle, portn, MAC_XIF_CONFIG_REG, &val);
+		if (config & CFG_BMAC_XIF_TX_OUTPUT)
+			val |= MAC_XIF_TX_OUTPUT_EN;
+		else
+			val &= ~MAC_XIF_TX_OUTPUT_EN;
+		if (config & CFG_BMAC_XIF_LOOPBACK)
+			val |= MAC_XIF_MII_INT_LOOPBACK;
+		else
+			val &= ~MAC_XIF_MII_INT_LOOPBACK;
+		if (config & CFG_BMAC_XIF_GMII_MODE)
+			val |= MAC_XIF_GMII_MODE;
+		else
+			val &= ~MAC_XIF_GMII_MODE;
+		if (config & CFG_BMAC_XIF_LINKLED)
+			val |= MAC_XIF_LINK_LED;
+		else
+			val &= ~MAC_XIF_LINK_LED;
+		if (config & CFG_BMAC_XIF_LED_POLARITY)
+			val |= MAC_XIF_LED_POLARITY;
+		else
+			val &= ~MAC_XIF_LED_POLARITY;
+		if (config & CFG_BMAC_XIF_SEL_CLK_25MHZ)
+			val |= MAC_XIF_SEL_CLK_25MHZ;
+		else
+			val &= ~MAC_XIF_SEL_CLK_25MHZ;
+		BMAC_REG_WR(handle, portn, MAC_XIF_CONFIG_REG, val);
+		break;
+	default:
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				    " npi_bmac_xif_config"
+				    " Invalid Input: op <0x%x>",
+				    op));
+		return (NPI_FAILURE | NPI_MAC_OPCODE_INVALID(portn));
+	}
+
+	return (NPI_SUCCESS);
+}
+
+npi_status_t
+npi_bmac_tx_iconfig(npi_handle_t handle, config_op_t op, uint8_t portn,
+		    bmac_tx_iconfig_t iconfig)
+{
+	uint64_t val = 0;
+
+	ASSERT(IS_BMAC_PORT_NUM_VALID(portn));
+
+	switch (op) {
+	case ENABLE:
+	case DISABLE:
+		ASSERT((iconfig != 0) && ((iconfig & ~ICFG_XMAC_TX_ALL) == 0));
+		if ((iconfig == 0) || (iconfig & ~ICFG_XMAC_TX_ALL) != 0) {
+			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+					    " npi_bmac_tx_iconfig"
+					    " Invalid Input: iconfig <0x%x>",
+					    iconfig));
+			return (NPI_FAILURE | NPI_MAC_CONFIG_INVALID(portn));
+		}
+		BMAC_REG_RD(handle, portn, BTXMAC_STAT_MSK_REG, &val);
+		if (op == ENABLE)
+			val &= ~iconfig;
+		else
+			val |= iconfig;
+		BMAC_REG_WR(handle, portn, BTXMAC_STAT_MSK_REG, val);
+
+		break;
+	case INIT:
+		ASSERT((iconfig & ~ICFG_XMAC_TX_ALL) == 0);
+		if ((iconfig & ~ICFG_XMAC_TX_ALL) != 0) {
+			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+					    " npi_bmac_tx_iconfig"
+					    " Invalid Input: iconfig <0x%x>",
+					    iconfig));
+			return (NPI_FAILURE | NPI_MAC_CONFIG_INVALID(portn));
+		}
+		BMAC_REG_WR(handle, portn, BTXMAC_STAT_MSK_REG, ~iconfig);
+
+		break;
+	default:
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				    " npi_bmac_tx_iconfig"
+				    " Invalid Input: iconfig <0x%x>",
+				    iconfig));
+		return (NPI_FAILURE | NPI_MAC_OPCODE_INVALID(portn));
+	}
+
+	return (NPI_SUCCESS);
+}
+
+npi_status_t
+npi_bmac_ctl_iconfig(npi_handle_t handle, config_op_t op, uint8_t portn,
+			bmac_ctl_iconfig_t iconfig)
+{
+	uint64_t val = 0;
+
+	ASSERT(IS_BMAC_PORT_NUM_VALID(portn));
+
+	switch (op) {
+	case ENABLE:
+	case DISABLE:
+		ASSERT((iconfig != 0) && ((iconfig & ~ICFG_BMAC_CTL_ALL) == 0));
+		if ((iconfig == 0) || (iconfig & ~ICFG_BMAC_CTL_ALL) != 0) {
+			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+					    " npi_bmac_ctl_iconfig"
+					    " Invalid Input: iconfig <0x%x>",
+					    iconfig));
+			return (NPI_FAILURE | NPI_MAC_CONFIG_INVALID(portn));
+		}
+		BMAC_REG_RD(handle, portn, BMAC_C_S_MSK_REG, &val);
+		if (op == ENABLE)
+			val &= ~iconfig;
+		else
+			val |= iconfig;
+		BMAC_REG_WR(handle, portn, BMAC_C_S_MSK_REG, val);
+
+		break;
+	case INIT:
+		ASSERT((iconfig & ~ICFG_BMAC_RX_ALL) == 0);
+		if ((iconfig & ~ICFG_BMAC_RX_ALL) != 0) {
+			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+					    " npi_bmac_ctl_iconfig"
+					    " Invalid Input: iconfig <0x%x>",
+					    iconfig));
+			return (NPI_FAILURE | NPI_MAC_CONFIG_INVALID(portn));
+		}
+		BMAC_REG_WR(handle, portn, BMAC_C_S_MSK_REG, ~iconfig);
+
+		break;
+	default:
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				    " npi_bmac_ctl_iconfig"
+				    " Invalid Input: iconfig <0x%x>",
+				    iconfig));
+		return (NPI_FAILURE | NPI_MAC_OPCODE_INVALID(portn));
+	}
+
+	return (NPI_SUCCESS);
+}
+
+npi_status_t
+npi_bmac_tx_get_istatus(npi_handle_t handle, uint8_t portn,
+			bmac_tx_iconfig_t *istatus)
+{
+	uint64_t val = 0;
+
+	ASSERT(IS_BMAC_PORT_NUM_VALID(portn));
+
+	BMAC_REG_RD(handle, portn, BTXMAC_STATUS_REG, &val);
+	*istatus = (uint32_t)val;
+
+	return (NPI_SUCCESS);
+}
+
+npi_status_t
+npi_bmac_rx_get_istatus(npi_handle_t handle, uint8_t portn,
+			bmac_rx_iconfig_t *istatus)
+{
+	uint64_t val = 0;
+
+	ASSERT(IS_BMAC_PORT_NUM_VALID(portn));
+
+	BMAC_REG_RD(handle, portn, BRXMAC_STATUS_REG, &val);
+	*istatus = (uint32_t)val;
+
+	return (NPI_SUCCESS);
+}
+
+npi_status_t
+npi_bmac_ctl_get_istatus(npi_handle_t handle, uint8_t portn,
+				bmac_ctl_iconfig_t *istatus)
+{
+	uint64_t val = 0;
+
+	ASSERT(IS_BMAC_PORT_NUM_VALID(portn));
+
+	BMAC_REG_RD(handle, portn, BMAC_CTRL_STAT_REG, &val);
+	*istatus = (uint32_t)val;
+
+	return (NPI_SUCCESS);
+}
+
+npi_status_t
+npi_mac_mif_mdio_read(npi_handle_t handle, uint8_t portn, uint8_t device,
+			uint16_t xcvr_reg, uint16_t *value)
+{
+	mif_frame_t frame;
+	uint_t delay;
+
+	frame.value = 0;
+	frame.bits.w0.st = FRAME45_ST;		/* Clause 45	*/
+	frame.bits.w0.op = FRAME45_OP_ADDR;	/* Select address	*/
+	frame.bits.w0.phyad = portn;		/* Port number	*/
+	frame.bits.w0.regad = device;		/* Device number	*/
+	frame.bits.w0.ta_msb = 1;
+	frame.bits.w0.ta_lsb = 0;
+	frame.bits.w0.data = xcvr_reg;	/* register address */
+
+	NPI_DEBUG_MSG((handle.function, MIF_CTL,
+		"mdio read port %d addr val=0x%x\n", portn, frame.value));
+
+	MIF_REG_WR(handle, MIF_OUTPUT_FRAME_REG, frame.value);
+
+	delay = 0;
+	MIF_WAIT_REG(handle, frame, delay, MIF_DELAY, MIF_DELAY);
+
+	NPI_DEBUG_MSG((handle.function, MIF_CTL,
+		"mdio read port %d addr poll=0x%x\n", portn, frame.value));
+
+	if (delay == MIF_DELAY) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+					"mdio read no response1\n"));
+	}
+
+	frame.bits.w0.st = FRAME45_ST; /* Clause 45 */
+	frame.bits.w0.op = FRAME45_OP_READ; /* Read */
+	frame.bits.w0.phyad = portn; /* Port Number */
+	frame.bits.w0.regad = device; /* Device Number */
+	frame.bits.w0.ta_msb = 1;
+	frame.bits.w0.ta_lsb = 0;
+
+	NPI_DEBUG_MSG((handle.function, MIF_CTL,
+		"mdio read port %d data frame=0x%x\n", portn, frame.value));
+
+	MIF_REG_WR(handle, MIF_OUTPUT_FRAME_REG, frame.value);
+
+	delay = 0;
+	MIF_WAIT_REG(handle, frame, delay, MIF_DELAY, MIF_DELAY);
+
+	NPI_DEBUG_MSG((handle.function, MIF_CTL,
+		"mdio read port %d data poll=0x%x\n", portn, frame.value));
+
+	*value = frame.bits.w0.data;
+	NPI_DEBUG_MSG((handle.function, MIF_CTL,
+		"mdio read port=%d val=0x%x\n", portn, *value));
+
+	if (delay == MIF_DELAY) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+			"mdio read no response2\n"));
+	}
+
+	return (NPI_SUCCESS);
+}
+
+npi_status_t
+npi_mac_mif_mii_read(npi_handle_t handle, uint8_t portn, uint8_t xcvr_reg,
+			uint16_t *value)
+{
+	mif_frame_t frame;
+	uint_t delay;
+
+	frame.bits.w0.st = 0x1; /* Clause 22 */
+	frame.bits.w0.op = 0x2;
+	frame.bits.w0.phyad = portn;
+	frame.bits.w0.regad = xcvr_reg;
+	frame.bits.w0.ta_msb = 1;
+	frame.bits.w0.ta_lsb = 0;
+	MIF_REG_WR(handle, MIF_OUTPUT_FRAME_REG, frame.value);
+
+	delay = 0;
+	MIF_WAIT_REG(handle, frame, delay, MIF_DELAY, MAX_PIO_RETRIES);
+
+	if (delay == MAX_PIO_RETRIES)
+		return (NPI_FAILURE | NPI_MAC_MII_READ_FAILED(portn));
+
+	*value = frame.bits.w0.data;
+	NPI_DEBUG_MSG((handle.function, MIF_CTL,
+			"mif mii read port %d reg=0x%x frame=0x%x\n", portn,
+			xcvr_reg, frame.bits.w0.data));
+
+	return (NPI_SUCCESS);
+}
+
+npi_status_t
+npi_mac_mif_mdio_write(npi_handle_t handle, uint8_t portn, uint8_t device,
+			uint16_t xcvr_reg, uint16_t value)
+{
+	mif_frame_t frame;
+	uint_t delay;
+
+	frame.value = 0;
+	frame.bits.w0.st = FRAME45_ST; /* Clause 45 */
+	frame.bits.w0.op = FRAME45_OP_ADDR; /* Select Address */
+	frame.bits.w0.phyad = portn; /* Port Number */
+	frame.bits.w0.regad = device; /* Device Number */
+	frame.bits.w0.ta_msb = 1;
+	frame.bits.w0.ta_lsb = 0;
+	frame.bits.w0.data = xcvr_reg;	/* register address */
+
+	MIF_REG_WR(handle, MIF_OUTPUT_FRAME_REG, frame.value);
+
+	NPI_DEBUG_MSG((handle.function, MIF_CTL,
+		"mdio write port %d addr val=0x%x\n", portn, frame.value));
+
+	delay = 0;
+	MIF_WAIT_REG(handle, frame, delay, MIF_DELAY, MIF_DELAY);
+
+	NPI_DEBUG_MSG((handle.function, MIF_CTL,
+		"mdio write port %d addr poll=0x%x\n", portn, frame.value));
+
+	if (delay == MIF_DELAY) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				"mdio write no response1\n"));
+	}
+
+	frame.bits.w0.st = FRAME45_ST; /* Clause 45 */
+	frame.bits.w0.op = FRAME45_OP_WRITE; /* Write */
+	frame.bits.w0.phyad = portn; /* Port number   */
+	frame.bits.w0.regad = device; /* Device number */
+	frame.bits.w0.ta_msb = 1;
+	frame.bits.w0.ta_lsb = 0;
+	frame.bits.w0.data = value;
+	MIF_REG_WR(handle, MIF_OUTPUT_FRAME_REG, frame.value);
+
+	NPI_DEBUG_MSG((handle.function, MIF_CTL,
+		"mdio write port %d data val=0x%x\n", portn, frame.value));
+
+	delay = 0;
+	MIF_WAIT_REG(handle, frame, delay, MIF_DELAY, MIF_DELAY);
+
+	NPI_DEBUG_MSG((handle.function, MIF_CTL,
+		"mdio write port %d data poll=0x%x\n", portn, frame.value));
+
+	if (delay == MIF_DELAY) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				"mdio write no response2\n"));
+	}
+
+	return (NPI_SUCCESS);
+}
+
+npi_status_t
+npi_mac_mif_mii_write(npi_handle_t handle, uint8_t portn, uint8_t xcvr_reg,
+			uint16_t value)
+{
+	mif_frame_t frame;
+	uint_t delay;
+
+	frame.bits.w0.st = 0x1; /* Clause 22 */
+	frame.bits.w0.op = 0x1;
+	frame.bits.w0.phyad = portn;
+	frame.bits.w0.regad = xcvr_reg;
+	frame.bits.w0.ta_msb = 1;
+	frame.bits.w0.ta_lsb = 0;
+	frame.bits.w0.data = value;
+	MIF_REG_WR(handle, MIF_OUTPUT_FRAME_REG, frame.value);
+
+	delay = 0;
+	MIF_WAIT_REG(handle, frame, delay, MIF_DELAY, MAX_PIO_RETRIES);
+
+	NPI_DEBUG_MSG((handle.function, MIF_CTL,
+			"mif mii write port %d reg=0x%x frame=0x%x\n", portn,
+			xcvr_reg, frame.value));
+
+	if (delay == MAX_PIO_RETRIES)
+		return (NPI_FAILURE | NPI_MAC_MII_WRITE_FAILED(portn));
+
+	return (NPI_SUCCESS);
+}
+
+npi_status_t
+npi_mac_pcs_mii_read(npi_handle_t handle, uint8_t portn, uint8_t xcvr_reg,
+			uint16_t *value)
+{
+	pcs_anar_t pcs_anar;
+	pcs_anar_t pcs_anlpar;
+	pcs_stat_t pcs_stat;
+	pcs_stat_mc_t pcs_stat_mc;
+	mii_anar_t anar;
+	mii_anar_t anlpar;
+	mii_aner_t aner;
+	mii_esr_t esr;
+	mii_gsr_t gsr;
+	uint64_t val = 0;
+
+	ASSERT(IS_PORT_NUM_VALID(portn));
+
+	switch (xcvr_reg) {
+	case MII_BMCR:
+		PCS_REG_RD(handle, portn, PCS_MII_CTRL_REG, &val);
+		*value = (uint16_t)val;
+		break;
+	case MII_BMSR:
+		PCS_REG_RD(handle, portn, PCS_MII_STATUS_REG, &val);
+		pcs_stat.value = val;
+		PCS_REG_RD(handle, portn, PCS_STATE_MACHINE_REG, &val);
+		pcs_stat_mc.value = val;
+		if ((pcs_stat_mc.bits.w0.link_cfg_stat == 0xB) &&
+			(pcs_stat_mc.bits.w0.word_sync != 0)) {
+			pcs_stat.bits.w0.link_stat = 1;
+		} else if (pcs_stat_mc.bits.w0.link_cfg_stat != 0xB) {
+			pcs_stat.bits.w0.link_stat = 0;
+		}
+		*value = (uint16_t)pcs_stat.value;
+		break;
+	case MII_ESR:
+		PCS_REG_RD(handle, portn, PCS_MII_ADVERT_REG, &val);
+		pcs_anar.value = (uint16_t)val;
+		esr.value = 0;
+		esr.bits.link_1000fdx = pcs_anar.bits.w0.full_duplex;
+		esr.bits.link_1000hdx = pcs_anar.bits.w0.half_duplex;
+		*value = esr.value;
+		break;
+	case MII_ANAR:
+		PCS_REG_RD(handle, portn, PCS_MII_ADVERT_REG, &val);
+		pcs_anar.value = (uint16_t)val;
+		anar.value = 0;
+		anar.bits.cap_pause = pcs_anar.bits.w0.pause;
+		anar.bits.cap_asmpause = pcs_anar.bits.w0.asm_pause;
+		*value = anar.value;
+		break;
+	case MII_ANLPAR:
+		PCS_REG_RD(handle, portn, PCS_MII_LPA_REG, &val);
+		pcs_anlpar.value = (uint16_t)val;
+		anlpar.bits.cap_pause = pcs_anlpar.bits.w0.pause;
+		anlpar.bits.cap_asmpause = pcs_anlpar.bits.w0.asm_pause;
+		*value = anlpar.value;
+		break;
+	case MII_ANER:
+		PCS_REG_RD(handle, portn, PCS_MII_ADVERT_REG, &val);
+		pcs_anar.value = (uint16_t)val;
+		aner.value = 0;
+		aner.bits.lp_an_able = pcs_anar.bits.w0.full_duplex |
+						pcs_anar.bits.w0.half_duplex;
+		*value = aner.value;
+		break;
+	case MII_GSR:
+		PCS_REG_RD(handle, portn, PCS_MII_LPA_REG, &val);
+		pcs_anar.value = (uint16_t)val;
+		gsr.value = 0;
+		gsr.bits.link_1000fdx = pcs_anar.bits.w0.full_duplex;
+		gsr.bits.link_1000hdx = pcs_anar.bits.w0.half_duplex;
+		*value = gsr.value;
+		break;
+	default:
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				    " npi_mac_pcs_mii_read"
+				    " Invalid Input: xcvr_reg <0x%x>",
+				    xcvr_reg));
+		return (NPI_FAILURE | NPI_MAC_REG_INVALID(portn));
+	}
+
+	return (NPI_SUCCESS);
+}
+
+npi_status_t
+npi_mac_pcs_mii_write(npi_handle_t handle, uint8_t portn, uint8_t xcvr_reg,
+			uint16_t value)
+{
+	pcs_anar_t pcs_anar;
+	mii_anar_t anar;
+	mii_gcr_t gcr;
+	uint64_t val;
+
+	ASSERT(IS_PORT_NUM_VALID(portn));
+
+	switch (xcvr_reg) {
+	case MII_BMCR:
+		val = (uint16_t)value;
+		PCS_REG_WR(handle, portn, PCS_MII_CTRL_REG, val);
+		break;
+	case MII_ANAR:
+		PCS_REG_RD(handle, portn, PCS_MII_ADVERT_REG, &val);
+		pcs_anar.value = (uint16_t)val;
+		anar.value = value;
+		pcs_anar.bits.w0.asm_pause = anar.bits.cap_asmpause;
+		pcs_anar.bits.w0.pause = anar.bits.cap_pause;
+		val = pcs_anar.value;
+		PCS_REG_WR(handle, portn, PCS_MII_ADVERT_REG, val);
+		break;
+	case MII_GCR:
+		PCS_REG_RD(handle, portn, PCS_MII_ADVERT_REG, &val);
+		pcs_anar.value = (uint16_t)val;
+		gcr.value = value;
+		pcs_anar.bits.w0.full_duplex = gcr.bits.link_1000fdx;
+		pcs_anar.bits.w0.half_duplex = gcr.bits.link_1000hdx;
+		val = pcs_anar.value;
+		PCS_REG_WR(handle, portn, PCS_MII_ADVERT_REG, val);
+		break;
+	default:
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				    " npi_mac_pcs_mii_write"
+				    " Invalid Input: xcvr_reg <0x%x>",
+				    xcvr_reg));
+		return (NPI_FAILURE | NPI_MAC_REG_INVALID(portn));
+	}
+
+	return (NPI_SUCCESS);
+}
+
+npi_status_t
+npi_mac_mif_link_intr_enable(npi_handle_t handle, uint8_t portn,
+				uint8_t xcvr_reg, uint16_t mask)
+{
+	mif_cfg_t mif_cfg;
+
+	ASSERT(IS_PORT_NUM_VALID(portn));
+
+	ASSERT(xcvr_reg <= NXGE_MAX_MII_REGS);
+	if (xcvr_reg > NXGE_MAX_MII_REGS) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				    " npi_mac_mif_link_intr_enable"
+				    " Invalid Input: xcvr_reg <0x%x>",
+				    xcvr_reg));
+		return (NPI_FAILURE | NPI_MAC_REG_INVALID(portn));
+	}
+
+	MIF_REG_RD(handle, MIF_CONFIG_REG, &mif_cfg.value);
+
+	mif_cfg.bits.w0.phy_addr = portn;		/* Port number */
+	mif_cfg.bits.w0.reg_addr = xcvr_reg;		/* Register address */
+	mif_cfg.bits.w0.indirect_md = 0; 		/* Clause 22 */
+	mif_cfg.bits.w0.poll_en = 1;
+
+	MIF_REG_WR(handle, MIF_MASK_REG, ~mask);
+	MIF_REG_WR(handle, MIF_CONFIG_REG, mif_cfg.value);
+
+	NXGE_DELAY(20);
+
+	return (NPI_SUCCESS);
+}
+
+npi_status_t
+npi_mac_mif_mdio_link_intr_enable(npi_handle_t handle, uint8_t portn,
+			uint8_t device, uint16_t xcvr_reg, uint16_t mask)
+{
+	mif_cfg_t mif_cfg;
+	mif_frame_t frame;
+	uint_t delay;
+
+	ASSERT(IS_PORT_NUM_VALID(portn));
+
+	frame.bits.w0.st = 0;		/* Clause 45 */
+	frame.bits.w0.op = 0;		/* Select address */
+	frame.bits.w0.phyad = portn;	/* Port number */
+	frame.bits.w0.regad = device;	/* Device number */
+	frame.bits.w0.ta_msb = 1;
+	frame.bits.w0.ta_lsb = 0;
+	frame.bits.w0.data = xcvr_reg;	/* register address */
+
+	MIF_REG_WR(handle, MIF_OUTPUT_FRAME_REG, frame.value);
+
+	delay = 0;
+	MIF_WAIT_REG(handle, frame, delay, MIF_DELAY, MAX_PIO_RETRIES);
+	if (delay == MAX_PIO_RETRIES)
+		return (NPI_FAILURE);
+
+	MIF_REG_RD(handle, MIF_CONFIG_REG, &mif_cfg.value);
+
+	mif_cfg.bits.w0.phy_addr = portn;		/* Port number */
+	mif_cfg.bits.w0.reg_addr = device;		/* Register address */
+	mif_cfg.bits.w0.indirect_md = 1; 		/* Clause 45 */
+	mif_cfg.bits.w0.poll_en = 1;
+
+	MIF_REG_WR(handle, MIF_MASK_REG, ~mask);
+	MIF_REG_WR(handle, MIF_CONFIG_REG, mif_cfg.value);
+
+	NXGE_DELAY(20);
+
+	return (NPI_SUCCESS);
+}
+
+void
+npi_mac_mif_set_indirect_mode(npi_handle_t handle, boolean_t on_off)
+{
+	mif_cfg_t mif_cfg;
+
+	MIF_REG_RD(handle, MIF_CONFIG_REG, &mif_cfg.value);
+	mif_cfg.bits.w0.indirect_md = on_off;
+	MIF_REG_WR(handle, MIF_CONFIG_REG, mif_cfg.value);
+}
+
+npi_status_t
+npi_bmac_send_pause(npi_handle_t handle, uint8_t portn, uint16_t pause_time)
+{
+	uint64_t val;
+
+	ASSERT(IS_BMAC_PORT_NUM_VALID(portn));
+
+	val = MAC_SEND_PAUSE_SEND | pause_time;
+	BMAC_REG_WR(handle, portn, MAC_SEND_PAUSE_REG, val);
+
+	return (NPI_SUCCESS);
+}
+
+npi_status_t
+npi_xmac_xif_led(npi_handle_t handle, uint8_t portn, boolean_t on_off)
+{
+	uint64_t val = 0;
+
+	ASSERT(IS_XMAC_PORT_NUM_VALID(portn));
+
+	XMAC_REG_RD(handle, portn, XMAC_CONFIG_REG, &val);
+
+	if (on_off) {
+		val |= XMAC_XIF_LED_POLARITY;
+		val &= ~XMAC_XIF_FORCE_LED_ON;
+	} else {
+		val &= ~XMAC_XIF_LED_POLARITY;
+		val |= XMAC_XIF_FORCE_LED_ON;
+	}
+
+	XMAC_REG_WR(handle, portn, XMAC_CONFIG_REG, val);
+
+	return (NPI_SUCCESS);
+}
+
+npi_status_t
+npi_xmac_zap_tx_counters(npi_handle_t handle, uint8_t portn)
+{
+	ASSERT(IS_XMAC_PORT_NUM_VALID(portn));
+
+	XMAC_REG_WR(handle, portn, XTXMAC_FRM_CNT_REG, 0);
+	XMAC_REG_WR(handle, portn, XTXMAC_BYTE_CNT_REG, 0);
+
+	return (NPI_SUCCESS);
+}
+
+npi_status_t
+npi_xmac_zap_rx_counters(npi_handle_t handle, uint8_t portn)
+{
+	ASSERT(IS_XMAC_PORT_NUM_VALID(portn));
+
+	XMAC_REG_WR(handle, portn, XRXMAC_BT_CNT_REG, 0);
+	XMAC_REG_WR(handle, portn, XRXMAC_BC_FRM_CNT_REG, 0);
+	XMAC_REG_WR(handle, portn, XRXMAC_MC_FRM_CNT_REG, 0);
+	XMAC_REG_WR(handle, portn, XRXMAC_FRAG_CNT_REG, 0);
+	XMAC_REG_WR(handle, portn, XRXMAC_HIST_CNT1_REG, 0);
+	XMAC_REG_WR(handle, portn, XRXMAC_HIST_CNT2_REG, 0);
+	XMAC_REG_WR(handle, portn, XRXMAC_HIST_CNT3_REG, 0);
+	XMAC_REG_WR(handle, portn, XRXMAC_HIST_CNT4_REG, 0);
+	XMAC_REG_WR(handle, portn, XRXMAC_HIST_CNT5_REG, 0);
+	XMAC_REG_WR(handle, portn, XRXMAC_HIST_CNT6_REG, 0);
+	XMAC_REG_WR(handle, portn, XRXMAC_MPSZER_CNT_REG, 0);
+	XMAC_REG_WR(handle, portn, XRXMAC_CRC_ER_CNT_REG, 0);
+	XMAC_REG_WR(handle, portn, XRXMAC_CD_VIO_CNT_REG, 0);
+	XMAC_REG_WR(handle, portn, XRXMAC_AL_ER_CNT_REG, 0);
+	XMAC_REG_WR(handle, portn, XMAC_LINK_FLT_CNT_REG, 0);
+
+	return (NPI_SUCCESS);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/uts/common/io/nxge/npi/npi_mac.h	Mon Mar 19 19:37:22 2007 -0700
@@ -0,0 +1,573 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _NPI_MAC_H
+#define	_NPI_MAC_H
+
+#pragma ident	"%Z%%M%	%I%	%E% SMI"
+
+#ifdef	__cplusplus
+extern "C" {
+#endif
+
+#include <npi.h>
+#include <nxge_mac_hw.h>
+#include <nxge_mii.h>
+
+typedef struct _npi_mac_addr {
+	uint16_t	w0;
+	uint16_t	w1;
+	uint16_t	w2;
+} npi_mac_addr_t;
+
+typedef enum npi_mac_attr {
+	MAC_PORT_MODE = 0,
+	MAC_PORT_FRAME_SIZE,
+	MAC_PORT_ADDR,
+	MAC_PORT_ADDR_FILTER,
+	MAC_PORT_ADDR_FILTER_MASK,
+	XMAC_PORT_IPG,
+	XMAC_10G_PORT_IPG,
+	BMAC_PORT_MAX_BURST_SIZE,
+	BMAC_PORT_PA_SIZE,
+	BMAC_PORT_CTRL_TYPE
+} npi_mac_attr_t;
+
+/* MAC Mode options */
+
+typedef enum npi_mac_mode_e {
+	MAC_MII_MODE = 0,
+	MAC_GMII_MODE,
+	MAC_XGMII_MODE
+} npi_mac_mode_t;
+
+typedef enum npi_mac_reset_e {
+	TX_MAC_RESET = 1,
+	RX_MAC_RESET,
+	XTX_MAC_REG_RESET,
+	XRX_MAC_REG_RESET,
+	XTX_MAC_LOGIC_RESET,
+	XRX_MAC_LOGIC_RESET,
+	XTX_MAC_RESET_ALL,
+	XRX_MAC_RESET_ALL,
+	BMAC_RESET_ALL,
+	XMAC_RESET_ALL
+} npi_mac_reset_t;
+
+typedef enum xmac_tx_iconfig_e {
+	ICFG_XMAC_TX_FRAME_XMIT 	= XMAC_TX_FRAME_XMIT,
+	ICFG_XMAC_TX_UNDERRUN		= XMAC_TX_UNDERRUN,
+	ICFG_XMAC_TX_MAX_PACKET_ERR	= XMAC_TX_MAX_PACKET_ERR,
+	ICFG_XMAC_TX_OVERFLOW		= XMAC_TX_OVERFLOW,
+	ICFG_XMAC_TX_FIFO_XFR_ERR	= XMAC_TX_FIFO_XFR_ERR,
+	ICFG_XMAC_TX_BYTE_CNT_EXP	= XMAC_TX_BYTE_CNT_EXP,
+	ICFG_XMAC_TX_FRAME_CNT_EXP	= XMAC_TX_FRAME_CNT_EXP,
+	ICFG_XMAC_TX_ALL = (XMAC_TX_FRAME_XMIT | XMAC_TX_UNDERRUN |
+				XMAC_TX_MAX_PACKET_ERR | XMAC_TX_OVERFLOW |
+				XMAC_TX_FIFO_XFR_ERR |  XMAC_TX_BYTE_CNT_EXP |
+				XMAC_TX_FRAME_CNT_EXP)
+} xmac_tx_iconfig_t;
+
+typedef enum xmac_rx_iconfig_e {
+	ICFG_XMAC_RX_FRAME_RCVD		= XMAC_RX_FRAME_RCVD,
+	ICFG_XMAC_RX_OVERFLOW		= XMAC_RX_OVERFLOW,
+	ICFG_XMAC_RX_UNDERFLOW		= XMAC_RX_UNDERFLOW,
+	ICFG_XMAC_RX_CRC_ERR_CNT_EXP	= XMAC_RX_CRC_ERR_CNT_EXP,
+	ICFG_XMAC_RX_LEN_ERR_CNT_EXP	= XMAC_RX_LEN_ERR_CNT_EXP,
+	ICFG_XMAC_RX_VIOL_ERR_CNT_EXP	= XMAC_RX_VIOL_ERR_CNT_EXP,
+	ICFG_XMAC_RX_OCT_CNT_EXP	= XMAC_RX_OCT_CNT_EXP,
+	ICFG_XMAC_RX_HST_CNT1_EXP	= XMAC_RX_HST_CNT1_EXP,
+	ICFG_XMAC_RX_HST_CNT2_EXP	= XMAC_RX_HST_CNT2_EXP,
+	ICFG_XMAC_RX_HST_CNT3_EXP	= XMAC_RX_HST_CNT3_EXP,
+	ICFG_XMAC_RX_HST_CNT4_EXP	= XMAC_RX_HST_CNT4_EXP,
+	ICFG_XMAC_RX_HST_CNT5_EXP	= XMAC_RX_HST_CNT5_EXP,
+	ICFG_XMAC_RX_HST_CNT6_EXP	= XMAC_RX_HST_CNT6_EXP,
+	ICFG_XMAC_RX_BCAST_CNT_EXP	= XMAC_RX_BCAST_CNT_EXP,
+	ICFG_XMAC_RX_MCAST_CNT_EXP	= XMAC_RX_MCAST_CNT_EXP,
+	ICFG_XMAC_RX_FRAG_CNT_EXP	= XMAC_RX_FRAG_CNT_EXP,
+	ICFG_XMAC_RX_ALIGNERR_CNT_EXP	= XMAC_RX_ALIGNERR_CNT_EXP,
+	ICFG_XMAC_RX_LINK_FLT_CNT_EXP	= XMAC_RX_LINK_FLT_CNT_EXP,
+	ICFG_XMAC_RX_HST_CNT7_EXP	= XMAC_RX_HST_CNT7_EXP,
+	ICFG_XMAC_RX_REMOTE_FLT_DET	= XMAC_RX_REMOTE_FLT_DET,
+	ICFG_XMAC_RX_LOCAL_FLT_DET	= XMAC_RX_LOCAL_FLT_DET,
+	ICFG_XMAC_RX_ALL = (XMAC_RX_FRAME_RCVD | XMAC_RX_OVERFLOW |
+				XMAC_RX_UNDERFLOW | XMAC_RX_CRC_ERR_CNT_EXP |
+				XMAC_RX_LEN_ERR_CNT_EXP |
+				XMAC_RX_VIOL_ERR_CNT_EXP |
+				XMAC_RX_OCT_CNT_EXP | XMAC_RX_HST_CNT1_EXP |
+				XMAC_RX_HST_CNT2_EXP | XMAC_RX_HST_CNT3_EXP |
+				XMAC_RX_HST_CNT4_EXP | XMAC_RX_HST_CNT5_EXP |
+				XMAC_RX_HST_CNT6_EXP | XMAC_RX_BCAST_CNT_EXP |
+				XMAC_RX_MCAST_CNT_EXP | XMAC_RX_FRAG_CNT_EXP |
+				XMAC_RX_ALIGNERR_CNT_EXP |
+				XMAC_RX_LINK_FLT_CNT_EXP |
+				XMAC_RX_HST_CNT7_EXP |
+				XMAC_RX_REMOTE_FLT_DET | XMAC_RX_LOCAL_FLT_DET)
+} xmac_rx_iconfig_t;
+
+typedef enum xmac_ctl_iconfig_e {
+	ICFG_XMAC_CTRL_PAUSE_RCVD	= XMAC_CTRL_PAUSE_RCVD,
+	ICFG_XMAC_CTRL_PAUSE_STATE	= XMAC_CTRL_PAUSE_STATE,
+	ICFG_XMAC_CTRL_NOPAUSE_STATE	= XMAC_CTRL_NOPAUSE_STATE,
+	ICFG_XMAC_CTRL_ALL = (XMAC_CTRL_PAUSE_RCVD | XMAC_CTRL_PAUSE_STATE |
+				XMAC_CTRL_NOPAUSE_STATE)
+} xmac_ctl_iconfig_t;
+
+
+typedef enum bmac_tx_iconfig_e {
+	ICFG_BMAC_TX_FRAME_SENT 	= MAC_TX_FRAME_XMIT,
+	ICFG_BMAC_TX_UNDERFLOW		= MAC_TX_UNDERRUN,
+	ICFG_BMAC_TX_MAXPKTSZ_ERR	= MAC_TX_MAX_PACKET_ERR,
+	ICFG_BMAC_TX_BYTE_CNT_EXP	= MAC_TX_BYTE_CNT_EXP,
+	ICFG_BMAC_TX_FRAME_CNT_EXP	= MAC_TX_FRAME_CNT_EXP,
+	ICFG_BMAC_TX_ALL = (MAC_TX_FRAME_XMIT | MAC_TX_UNDERRUN |
+				MAC_TX_MAX_PACKET_ERR | MAC_TX_BYTE_CNT_EXP |
+				MAC_TX_FRAME_CNT_EXP)
+} bmac_tx_iconfig_t;
+
+typedef enum bmac_rx_iconfig_e {
+	ICFG_BMAC_RX_FRAME_RCVD		= MAC_RX_FRAME_RECV,
+	ICFG_BMAC_RX_OVERFLOW		= MAC_RX_OVERFLOW,
+	ICFG_BMAC_RX_FRAME_CNT_EXP	= MAC_RX_FRAME_COUNT,
+	ICFG_BMAC_RX_CRC_ERR_CNT_EXP	= MAC_RX_ALIGN_ERR,
+	ICFG_BMAC_RX_LEN_ERR_CNT_EXP	= MAC_RX_CRC_ERR,
+	ICFG_BMAC_RX_VIOL_ERR_CNT_EXP	= MAC_RX_LEN_ERR,
+	ICFG_BMAC_RX_BYTE_CNT_EXP	= MAC_RX_VIOL_ERR,
+	ICFG_BMAC_RX_ALIGNERR_CNT_EXP	= MAC_RX_BYTE_CNT_EXP,
+	ICFG_BMAC_RX_ALL = (MAC_RX_FRAME_RECV | MAC_RX_OVERFLOW |
+				MAC_RX_FRAME_COUNT | MAC_RX_ALIGN_ERR |
+				MAC_RX_CRC_ERR | MAC_RX_LEN_ERR |
+				MAC_RX_VIOL_ERR | MAC_RX_BYTE_CNT_EXP)
+} bmac_rx_iconfig_t;
+
+typedef enum bmac_ctl_iconfig_e {
+	ICFG_BMAC_CTL_RCVPAUSE		= MAC_CTRL_PAUSE_RECEIVED,
+	ICFG_BMAC_CTL_INPAUSE_ST	= MAC_CTRL_PAUSE_STATE,
+	ICFG_BMAC_CTL_INNOTPAUSE_ST	= MAC_CTRL_NOPAUSE_STATE,
+	ICFG_BMAC_CTL_ALL = (MAC_CTRL_PAUSE_RECEIVED | MAC_CTRL_PAUSE_STATE |
+				MAC_CTRL_NOPAUSE_STATE)
+} bmac_ctl_iconfig_t;
+
+typedef	enum xmac_tx_config_e {
+	CFG_XMAC_TX			= 0x00000001,
+	CFG_XMAC_TX_STRETCH_MODE	= 0x00000002,
+	CFG_XMAC_VAR_IPG		= 0x00000004,
+	CFG_XMAC_TX_CRC			= 0x00000008,
+	CFG_XMAC_TX_ALL			= 0x0000000F
+} xmac_tx_config_t;
+
+typedef enum xmac_rx_config_e {
+	CFG_XMAC_RX			= 0x00000001,
+	CFG_XMAC_RX_PROMISCUOUS		= 0x00000002,
+	CFG_XMAC_RX_PROMISCUOUSGROUP	= 0x00000004,
+	CFG_XMAC_RX_ERRCHK		= 0x00000008,
+	CFG_XMAC_RX_CRC_CHK		= 0x00000010,
+	CFG_XMAC_RX_RESV_MULTICAST	= 0x00000020,
+	CFG_XMAC_RX_CODE_VIO_CHK	= 0x00000040,
+	CFG_XMAC_RX_HASH_FILTER		= 0x00000080,
+	CFG_XMAC_RX_ADDR_FILTER		= 0x00000100,
+	CFG_XMAC_RX_STRIP_CRC		= 0x00000200,
+	CFG_XMAC_RX_PAUSE		= 0x00000400,
+	CFG_XMAC_RX_PASS_FC_FRAME	= 0x00000800,
+	CFG_XMAC_RX_MAC2IPP_PKT_CNT	= 0x00001000,
+	CFG_XMAC_RX_ALL			= 0x00001FFF
+} xmac_rx_config_t;
+
+typedef	enum xmac_xif_config_e {
+	CFG_XMAC_XIF_LED_FORCE		= 0x00000001,
+	CFG_XMAC_XIF_LED_POLARITY	= 0x00000002,
+	CFG_XMAC_XIF_SEL_POR_CLK_SRC	= 0x00000004,
+	CFG_XMAC_XIF_TX_OUTPUT		= 0x00000008,
+	CFG_XMAC_XIF_LOOPBACK		= 0x00000010,
+	CFG_XMAC_XIF_LFS		= 0x00000020,
+	CFG_XMAC_XIF_XPCS_BYPASS	= 0x00000040,
+	CFG_XMAC_XIF_1G_PCS_BYPASS	= 0x00000080,
+	CFG_XMAC_XIF_SEL_CLK_25MHZ	= 0x00000100,
+	CFG_XMAC_XIF_ALL		= 0x000001FF
+} xmac_xif_config_t;
+
+typedef	enum bmac_tx_config_e {
+	CFG_BMAC_TX			= 0x00000001,
+	CFG_BMAC_TX_CRC			= 0x00000002,
+	CFG_BMAC_TX_ALL			= 0x00000003
+} bmac_tx_config_t;
+
+typedef enum bmac_rx_config_e {
+	CFG_BMAC_RX			= 0x00000001,
+	CFG_BMAC_RX_STRIP_PAD		= 0x00000002,
+	CFG_BMAC_RX_STRIP_CRC		= 0x00000004,
+	CFG_BMAC_RX_PROMISCUOUS		= 0x00000008,
+	CFG_BMAC_RX_PROMISCUOUSGROUP	= 0x00000010,
+	CFG_BMAC_RX_HASH_FILTER		= 0x00000020,
+	CFG_BMAC_RX_ADDR_FILTER		= 0x00000040,
+	CFG_BMAC_RX_DISCARD_ON_ERR	= 0x00000080,
+	CFG_BMAC_RX_ALL			= 0x000000FF
+} bmac_rx_config_t;
+
+typedef	enum bmac_xif_config_e {
+	CFG_BMAC_XIF_TX_OUTPUT		= 0x00000001,
+	CFG_BMAC_XIF_LOOPBACK		= 0x00000002,
+	CFG_BMAC_XIF_GMII_MODE		= 0x00000008,
+	CFG_BMAC_XIF_LINKLED		= 0x00000020,
+	CFG_BMAC_XIF_LED_POLARITY	= 0x00000040,
+	CFG_BMAC_XIF_SEL_CLK_25MHZ	= 0x00000080,
+	CFG_BMAC_XIF_ALL		= 0x000000FF
+} bmac_xif_config_t;
+
+
+typedef enum xmac_ipg_e {
+	XGMII_IPG_12_15 = 0,
+	XGMII_IPG_16_19,
+	XGMII_IPG_20_23,
+	MII_GMII_IPG_12,
+	MII_GMII_IPG_13,
+	MII_GMII_IPG_14,
+	MII_GMII_IPG_15,
+	MII_GMII_IPG_16
+} xmac_ipg_t;
+
+typedef	enum xpcs_reg_e {
+	XPCS_REG_CONTROL1,
+	XPCS_REG_STATUS1,
+	XPCS_REG_DEVICE_ID,
+	XPCS_REG_SPEED_ABILITY,
+	XPCS_REG_DEVICE_IN_PKG,
+	XPCS_REG_CONTROL2,
+	XPCS_REG_STATUS2,
+	XPCS_REG_PKG_ID,
+	XPCS_REG_STATUS,
+	XPCS_REG_TEST_CONTROL,
+	XPCS_REG_CONFIG_VENDOR1,
+	XPCS_REG_DIAG_VENDOR2,
+	XPCS_REG_MASK1,
+	XPCS_REG_PACKET_COUNTER,
+	XPCS_REG_TX_STATEMACHINE,
+	XPCS_REG_DESCWERR_COUNTER,
+	XPCS_REG_SYMBOL_ERR_L0_1_COUNTER,
+	XPCS_REG_SYMBOL_ERR_L2_3_COUNTER,
+	XPCS_REG_TRAINING_VECTOR
+} xpcs_reg_t;
+
+#define	IS_XMAC_PORT_NUM_VALID(portn)\
+	((portn == XMAC_PORT_0) || (portn == XMAC_PORT_1))
+
+#define	IS_BMAC_PORT_NUM_VALID(portn)\
+	((portn == BMAC_PORT_0) || (portn == BMAC_PORT_1))
+
+#define	XMAC_REG_WR(handle, portn, reg, val)\
+	NXGE_REG_WR64(handle, XMAC_REG_ADDR((portn), (reg)), (val))
+
+#define	XMAC_REG_RD(handle, portn, reg, val_p)\
+	NXGE_REG_RD64(handle, XMAC_REG_ADDR((portn), (reg)), (val_p))
+
+#define	BMAC_REG_WR(handle, portn, reg, val)\
+	NXGE_REG_WR64(handle, BMAC_REG_ADDR((portn), (reg)), (val))
+
+#define	BMAC_REG_RD(handle, portn, reg, val_p)\
+	NXGE_REG_RD64(handle, BMAC_REG_ADDR((portn), (reg)), (val_p))
+
+#define	PCS_REG_WR(handle, portn, reg, val)\
+	NXGE_REG_WR64(handle, PCS_REG_ADDR((portn), (reg)), (val))
+
+#define	PCS_REG_RD(handle, portn, reg, val_p)\
+	NXGE_REG_RD64(handle, PCS_REG_ADDR((portn), (reg)), (val_p))
+
+#define	XPCS_REG_WR(handle, portn, reg, val)\
+	NXGE_REG_WR64(handle, XPCS_ADDR((portn), (reg)), (val))
+
+#define	XPCS_REG_RD(handle, portn, reg, val_p)\
+	NXGE_REG_RD64(handle, XPCS_ADDR((portn), (reg)), (val_p))
+
+#define	MIF_REG_WR(handle, reg, val)\
+	NXGE_REG_WR64(handle, MIF_ADDR((reg)), (val))
+
+#define	MIF_REG_RD(handle, reg, val_p)\
+	NXGE_REG_RD64(handle, MIF_ADDR((reg)), (val_p))
+
+
+/*
+ * When MIF_REG_RD is called inside a poll loop and if the poll takes
+ * very long time to complete, then each poll will print a rt_show_reg
+ * result on the screen and the rtrace "register show" result may
+ * become too messy to read.  The solution is to call MIF_REG_RD_NO_SHOW
+ * instead of MIF_REG_RD in a polling loop. When COSIM or REG_SHOW is
+ * not defined, this macro is the same as MIF_REG_RD.  When both COSIM
+ * and REG_SHOW are defined, this macro calls NXGE_REG_RD64_NO_SHOW
+ * which does not call rt_show_reg.
+ */
+#if defined(COSIM) && defined(REG_SHOW)
+#define	MIF_REG_RD_NO_SHOW(handle, reg, val_p)\
+	NXGE_REG_RD64_NO_SHOW(handle, MIF_ADDR((reg)), (val_p))
+#else
+	/*	If not COSIM or REG_SHOW, still show */
+#define	MIF_REG_RD_NO_SHOW(handle, reg, val_p)\
+	NXGE_REG_RD64(handle, MIF_ADDR((reg)), (val_p))
+#endif
+
+#define	ESR_REG_WR(handle, reg, val)\
+	NXGE_REG_WR64(handle, ESR_ADDR((reg)), (val))
+
+#define	ESR_REG_RD(handle, reg, val_p)\
+	NXGE_REG_RD64(handle, ESR_ADDR((reg)), (val_p))
+
+/* Macros to read/modify MAC attributes */
+
+#define	SET_MAC_ATTR1(handle, p, portn, attr, val, stat) {\
+	p.type = attr;\
+	p.idata[0] = (uint32_t)val;\
+	stat = npi_mac_port_attr(handle, OP_SET, portn, (npi_attr_t *)&p);\
+}
+
+#define	SET_MAC_ATTR2(handle, p, portn, attr, val0, val1, stat) {\
+	p.type = attr;\
+	p.idata[0] = (uint32_t)val0;\
+	p.idata[1] = (uint32_t)val1;\
+	stat = npi_mac_port_attr(handle, OP_SET, portn, (npi_attr_t *)&p);\
+}
+
+#define	SET_MAC_ATTR3(handle, p, portn, attr, val0, val1, val2, stat) {\
+	p.type = attr;\
+	p.idata[0] = (uint32_t)val0;\
+	p.idata[1] = (uint32_t)val1;\
+	p.idata[2] = (uint32_t)val2;\
+	stat = npi_mac_port_attr(handle, OP_SET, portn, (npi_attr_t *)&p);\
+}
+
+#define	SET_MAC_ATTR4(handle, p, portn, attr, val0, val1, val2, val3, stat) {\
+	p.type = attr;\
+	p.idata[0] = (uint32_t)val0;\
+	p.idata[1] = (uint32_t)val1;\
+	p.idata[2] = (uint32_t)val2;\
+	p.idata[3] = (uint32_t)val3;\
+	stat = npi_mac_port_attr(handle, OP_SET, portn, (npi_attr_t *)&p);\
+}
+
+#define	GET_MAC_ATTR1(handle, p, portn, attr, val, stat) {\
+	p.type = attr;\
+	if ((stat = npi_mac_port_attr(handle, OP_GET, portn, \
+					(npi_attr_t *)&p)) == NPI_SUCCESS) {\
+		val = p.odata[0];\
+	}\
+}
+
+#define	GET_MAC_ATTR2(handle, p, portn, attr, val0, val1, stat) {\
+	p.type = attr;\
+	if ((stat = npi_mac_port_attr(handle, OP_GET, portn, \
+					(npi_attr_t *)&p)) == NPI_SUCCESS) {\
+		val0 = p.odata[0];\
+		val1 = p.odata[1];\
+	}\
+}
+
+#define	GET_MAC_ATTR3(handle, p, portn, attr, val0, val1, \
+			val2, stat) {\
+	p.type = attr;\
+	if ((stat = npi_mac_port_attr(handle, OP_GET, portn, \
+					(npi_attr_t *)&p)) == NPI_SUCCESS) {\
+		val0 = p.odata[0];\
+		val1 = p.odata[1];\
+		val2 = p.odata[2];\
+	}\
+}
+
+#define	GET_MAC_ATTR4(handle, p, portn, attr, val0, val1, \
+			val2, val3, stat) {\
+	p.type = attr;\
+	if ((stat = npi_mac_port_attr(handle, OP_GET, portn, \
+					(npi_attr_t *)&p)) == NPI_SUCCESS) {\
+		val0 = p.odata[0];\
+		val1 = p.odata[1];\
+		val2 = p.odata[2];\
+		val3 = p.odata[3];\
+	}\
+}
+
+/* MAC specific errors */
+
+#define	MAC_PORT_ATTR_INVALID		0x50
+#define	MAC_RESET_MODE_INVALID		0x51
+#define	MAC_HASHTAB_ENTRY_INVALID	0x52
+#define	MAC_HOSTINFO_ENTRY_INVALID	0x53
+#define	MAC_ALT_ADDR_ENTRY_INVALID	0x54
+
+/* MAC error return macros */
+
+#define	NPI_MAC_PORT_INVALID(portn)	((MAC_BLK_ID << NPI_BLOCK_ID_SHIFT) |\
+					PORT_INVALID | IS_PORT | (portn << 12))
+#define	NPI_MAC_OPCODE_INVALID(portn)	((MAC_BLK_ID << NPI_BLOCK_ID_SHIFT) |\
+					OPCODE_INVALID |\
+					IS_PORT | (portn << 12))
+#define	NPI_MAC_HASHTAB_ENTRY_INVALID(portn)\
+					((MAC_BLK_ID << NPI_BLOCK_ID_SHIFT) |\
+					MAC_HASHTAB_ENTRY_INVALID |\
+					IS_PORT | (portn << 12))
+#define	NPI_MAC_HOSTINFO_ENTRY_INVALID(portn)\
+					((MAC_BLK_ID << NPI_BLOCK_ID_SHIFT) |\
+					MAC_HOSTINFO_ENTRY_INVALID |\
+					IS_PORT | (portn << 12))
+#define	NPI_MAC_ALT_ADDR_ENTRY_INVALID(portn)\
+					((MAC_BLK_ID << NPI_BLOCK_ID_SHIFT) |\
+					MAC_ALT_ADDR_ENTRY_INVALID |\
+					IS_PORT | (portn << 12))
+#define	NPI_MAC_PORT_ATTR_INVALID(portn)\
+					((MAC_BLK_ID << NPI_BLOCK_ID_SHIFT) |\
+					MAC_PORT_ATTR_INVALID |\
+					IS_PORT | (portn << 12))
+#define	NPI_MAC_RESET_MODE_INVALID(portn)\
+					((MAC_BLK_ID << NPI_BLOCK_ID_SHIFT) |\
+					MAC_RESET_MODE_INVALID |\
+					IS_PORT | (portn << 12))
+#define	NPI_MAC_PCS_REG_INVALID(portn)	((MAC_BLK_ID << NPI_BLOCK_ID_SHIFT) |\
+					REGISTER_INVALID |\
+					IS_PORT | (portn << 12))
+#define	NPI_TXMAC_RESET_FAILED(portn)	((TXMAC_BLK_ID << NPI_BLOCK_ID_SHIFT) |\
+					RESET_FAILED | IS_PORT | (portn << 12))
+#define	NPI_RXMAC_RESET_FAILED(portn)	((RXMAC_BLK_ID << NPI_BLOCK_ID_SHIFT) |\
+					RESET_FAILED | IS_PORT | (portn << 12))
+#define	NPI_MAC_CONFIG_INVALID(portn)	((MAC_BLK_ID << NPI_BLOCK_ID_SHIFT) |\
+					CONFIG_INVALID |\
+					IS_PORT | (portn << 12))
+#define	NPI_MAC_REG_INVALID(portn)	((MAC_BLK_ID << NPI_BLOCK_ID_SHIFT) |\
+					REGISTER_INVALID |\
+					IS_PORT | (portn << 12))
+#define	NPI_MAC_MII_READ_FAILED(portn)	((MIF_BLK_ID << NPI_BLOCK_ID_SHIFT) |\
+					READ_FAILED | IS_PORT | (portn << 12))
+#define	NPI_MAC_MII_WRITE_FAILED(portn)	((MIF_BLK_ID << NPI_BLOCK_ID_SHIFT) |\
+					WRITE_FAILED | IS_PORT | (portn << 12))
+
+/* library functions prototypes */
+
+/* general mac functions */
+npi_status_t npi_mac_hashtab_entry(npi_handle_t, io_op_t,
+				uint8_t, uint8_t, uint16_t *);
+npi_status_t npi_mac_hostinfo_entry(npi_handle_t, io_op_t,
+				uint8_t, uint8_t,
+				hostinfo_t *);
+npi_status_t npi_mac_altaddr_enable(npi_handle_t, uint8_t,
+				uint8_t);
+npi_status_t npi_mac_altaddr_disble(npi_handle_t, uint8_t,
+				uint8_t);
+npi_status_t npi_mac_altaddr_entry(npi_handle_t, io_op_t,
+				uint8_t, uint8_t,
+				npi_mac_addr_t *);
+npi_status_t npi_mac_port_attr(npi_handle_t, io_op_t, uint8_t,
+				npi_attr_t *);
+npi_status_t npi_mac_get_link_status(npi_handle_t, uint8_t,
+				boolean_t *);
+npi_status_t npi_mac_get_10g_link_status(npi_handle_t, uint8_t,
+				boolean_t *);
+npi_status_t npi_mac_mif_mii_read(npi_handle_t, uint8_t,
+				uint8_t, uint16_t *);
+npi_status_t npi_mac_mif_mii_write(npi_handle_t, uint8_t,
+				uint8_t, uint16_t);
+npi_status_t npi_mac_mif_link_intr_enable(npi_handle_t, uint8_t,
+				uint8_t, uint16_t);
+npi_status_t npi_mac_mif_mdio_read(npi_handle_t, uint8_t,
+				uint8_t, uint16_t,
+				uint16_t *);
+npi_status_t npi_mac_mif_mdio_write(npi_handle_t, uint8_t,
+				uint8_t, uint16_t,
+				uint16_t);
+npi_status_t npi_mac_mif_mdio_link_intr_enable(npi_handle_t,
+				uint8_t, uint8_t,
+				uint16_t, uint16_t);
+npi_status_t npi_mac_mif_link_intr_disable(npi_handle_t, uint8_t);
+npi_status_t npi_mac_pcs_mii_read(npi_handle_t, uint8_t,
+				uint8_t, uint16_t *);
+npi_status_t npi_mac_pcs_mii_write(npi_handle_t, uint8_t,
+				uint8_t, uint16_t);
+npi_status_t npi_mac_pcs_link_intr_enable(npi_handle_t, uint8_t);
+npi_status_t npi_mac_pcs_link_intr_disable(npi_handle_t, uint8_t);
+npi_status_t npi_mac_pcs_reset(npi_handle_t, uint8_t);
+
+/* xmac functions */
+npi_status_t npi_xmac_reset(npi_handle_t, uint8_t,
+				npi_mac_reset_t);
+npi_status_t npi_xmac_xif_config(npi_handle_t, config_op_t,
+				uint8_t, xmac_xif_config_t);
+npi_status_t npi_xmac_tx_config(npi_handle_t, config_op_t,
+				uint8_t, xmac_tx_config_t);
+npi_status_t npi_xmac_rx_config(npi_handle_t, config_op_t,
+				uint8_t, xmac_rx_config_t);
+npi_status_t npi_xmac_tx_iconfig(npi_handle_t, config_op_t,
+				uint8_t, xmac_tx_iconfig_t);
+npi_status_t npi_xmac_rx_iconfig(npi_handle_t, config_op_t,
+				uint8_t, xmac_rx_iconfig_t);
+npi_status_t npi_xmac_ctl_iconfig(npi_handle_t, config_op_t,
+				uint8_t, xmac_ctl_iconfig_t);
+npi_status_t npi_xmac_tx_get_istatus(npi_handle_t, uint8_t,
+				xmac_tx_iconfig_t *);
+npi_status_t npi_xmac_rx_get_istatus(npi_handle_t, uint8_t,
+				xmac_rx_iconfig_t *);
+npi_status_t npi_xmac_ctl_get_istatus(npi_handle_t, uint8_t,
+				xmac_ctl_iconfig_t *);
+npi_status_t npi_xmac_xpcs_reset(npi_handle_t, uint8_t);
+npi_status_t npi_xmac_xpcs_enable(npi_handle_t, uint8_t);
+npi_status_t npi_xmac_xpcs_disable(npi_handle_t, uint8_t);
+npi_status_t npi_xmac_xpcs_read(npi_handle_t, uint8_t,
+				uint8_t, uint32_t *);
+npi_status_t npi_xmac_xpcs_write(npi_handle_t, uint8_t,
+				uint8_t, uint32_t);
+npi_status_t npi_xmac_xpcs_link_intr_enable(npi_handle_t, uint8_t);
+npi_status_t npi_xmac_xpcs_link_intr_disable(npi_handle_t,
+				uint8_t);
+npi_status_t npi_xmac_xif_led(npi_handle_t, uint8_t,
+				boolean_t);
+npi_status_t npi_xmac_zap_tx_counters(npi_handle_t, uint8_t);
+npi_status_t npi_xmac_zap_rx_counters(npi_handle_t, uint8_t);
+
+/* bmac functions */
+npi_status_t npi_bmac_reset(npi_handle_t, uint8_t,
+				npi_mac_reset_t mode);
+npi_status_t npi_bmac_tx_config(npi_handle_t, config_op_t,
+				uint8_t, bmac_tx_config_t);
+npi_status_t npi_bmac_rx_config(npi_handle_t, config_op_t,
+				uint8_t, bmac_rx_config_t);
+npi_status_t npi_bmac_rx_iconfig(npi_handle_t, config_op_t,
+				uint8_t, bmac_rx_iconfig_t);
+npi_status_t npi_bmac_xif_config(npi_handle_t, config_op_t,
+				uint8_t, bmac_xif_config_t);
+npi_status_t npi_bmac_tx_iconfig(npi_handle_t, config_op_t,
+				uint8_t, bmac_tx_iconfig_t);
+npi_status_t npi_bmac_ctl_iconfig(npi_handle_t, config_op_t,
+				uint8_t, bmac_ctl_iconfig_t);
+npi_status_t npi_bmac_tx_get_istatus(npi_handle_t, uint8_t,
+				bmac_tx_iconfig_t *);
+npi_status_t npi_bmac_rx_get_istatus(npi_handle_t, uint8_t,
+				bmac_rx_iconfig_t *);
+npi_status_t npi_bmac_ctl_get_istatus(npi_handle_t, uint8_t,
+				bmac_ctl_iconfig_t *);
+npi_status_t npi_bmac_send_pause(npi_handle_t, uint8_t,
+				uint16_t);
+npi_status_t npi_mac_dump_regs(npi_handle_t, uint8_t);
+
+/* MIF common functions */
+void npi_mac_mif_set_indirect_mode(npi_handle_t, boolean_t);
+
+#ifdef	__cplusplus
+}
+#endif
+
+#endif	/* _NPI_MAC_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/uts/common/io/nxge/npi/npi_rxdma.c	Mon Mar 19 19:37:22 2007 -0700
@@ -0,0 +1,2287 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident	"%Z%%M%	%I%	%E% SMI"
+
+#include <npi_rxdma.h>
+#include <nxge_common.h>
+
+#define	 RXDMA_RESET_TRY_COUNT	4
+#define	 RXDMA_RESET_DELAY	5
+
+#define	 RXDMA_OP_DISABLE	0
+#define	 RXDMA_OP_ENABLE	1
+#define	 RXDMA_OP_RESET	2
+
+#define	 RCR_TIMEOUT_ENABLE	1
+#define	 RCR_TIMEOUT_DISABLE	2
+#define	 RCR_THRESHOLD	4
+
+/* assume weight is in byte frames unit */
+#define	WEIGHT_FACTOR 3/2
+
+uint64_t rdc_dmc_offset[] = {
+	RXDMA_CFIG1_REG, RXDMA_CFIG2_REG, RBR_CFIG_A_REG, RBR_CFIG_B_REG,
+	RBR_KICK_REG, RBR_STAT_REG, RBR_HDH_REG, RBR_HDL_REG,
+	RCRCFIG_A_REG, RCRCFIG_B_REG, RCRSTAT_A_REG, RCRSTAT_B_REG,
+	RCRSTAT_C_REG, RX_DMA_ENT_MSK_REG, RX_DMA_CTL_STAT_REG, RCR_FLSH_REG,
+	RXMISC_DISCARD_REG
+};
+
+const char *rdc_dmc_name[] = {
+	"RXDMA_CFIG1", "RXDMA_CFIG2", "RBR_CFIG_A", "RBR_CFIG_B",
+	"RBR_KICK", "RBR_STAT", "RBR_HDH", "RBR_HDL",
+	"RCRCFIG_A", "RCRCFIG_B", "RCRSTAT_A", "RCRSTAT_B",
+	"RCRSTAT_C", "RX_DMA_ENT_MSK", "RX_DMA_CTL_STAT", "RCR_FLSH",
+	"RXMISC_DISCARD"
+};
+
+uint64_t rdc_fzc_offset [] = {
+	RX_LOG_PAGE_VLD_REG, RX_LOG_PAGE_MASK1_REG, RX_LOG_PAGE_VAL1_REG,
+	RX_LOG_PAGE_MASK2_REG, RX_LOG_PAGE_VAL2_REG, RX_LOG_PAGE_RELO1_REG,
+	RX_LOG_PAGE_RELO2_REG, RX_LOG_PAGE_HDL_REG, RDC_RED_PARA_REG,
+	RED_DIS_CNT_REG
+};
+
+
+const char *rdc_fzc_name [] = {
+	"RX_LOG_PAGE_VLD", "RX_LOG_PAGE_MASK1", "RX_LOG_PAGE_VAL1",
+	"RX_LOG_PAGE_MASK2", "RX_LOG_PAGE_VAL2", "RX_LOG_PAGE_RELO1",
+	"RX_LOG_PAGE_RELO2", "RX_LOG_PAGE_HDL", "RDC_RED_PARA", "RED_DIS_CNT"
+};
+
+
+/*
+ * Dump the MEM_ADD register first so all the data registers
+ * will have valid data buffer pointers.
+ */
+uint64_t rx_fzc_offset[] = {
+	RX_DMA_CK_DIV_REG, DEF_PT0_RDC_REG, DEF_PT1_RDC_REG, DEF_PT2_RDC_REG,
+	DEF_PT3_RDC_REG, RX_ADDR_MD_REG, PT_DRR_WT0_REG, PT_DRR_WT1_REG,
+	PT_DRR_WT2_REG, PT_DRR_WT3_REG, PT_USE0_REG, PT_USE1_REG,
+	PT_USE2_REG, PT_USE3_REG, RED_RAN_INIT_REG, RX_ADDR_MD_REG,
+	RDMC_PRE_PAR_ERR_REG, RDMC_SHA_PAR_ERR_REG,
+	RDMC_MEM_DATA4_REG, RDMC_MEM_DATA3_REG, RDMC_MEM_DATA2_REG,
+	RDMC_MEM_DATA1_REG, RDMC_MEM_DATA0_REG,
+	RDMC_MEM_ADDR_REG,
+	RX_CTL_DAT_FIFO_STAT_REG, RX_CTL_DAT_FIFO_MASK_REG,
+	RX_CTL_DAT_FIFO_STAT_DBG_REG,
+	RDMC_TRAINING_VECTOR_REG,
+};
+
+
+const char *rx_fzc_name[] = {
+	"RX_DMA_CK_DIV", "DEF_PT0_RDC", "DEF_PT1_RDC", "DEF_PT2_RDC",
+	"DEF_PT3_RDC", "RX_ADDR_MD", "PT_DRR_WT0", "PT_DRR_WT1",
+	"PT_DRR_WT2", "PT_DRR_WT3", "PT_USE0", "PT_USE1",
+	"PT_USE2", "PT_USE3", "RED_RAN_INIT", "RX_ADDR_MD",
+	"RDMC_PRE_PAR_ERR", "RDMC_SHA_PAR_ERR",
+	"RDMC_MEM_DATA4", "RDMC_MEM_DATA3", "RDMC_MEM_DATA2",
+	"RDMC_MEM_DATA1", "RDMC_MEM_DATA0",
+	"RDMC_MEM_ADDR",
+	"RX_CTL_DAT_FIFO_STAT", "RX_CTL_DAT_FIFO_MASK",
+	"RDMC_TRAINING_VECTOR_REG",
+	"RX_CTL_DAT_FIFO_STAT_DBG_REG"
+};
+
+
+npi_status_t
+npi_rxdma_cfg_rdc_ctl(npi_handle_t handle, uint8_t rdc, uint8_t op);
+npi_status_t
+npi_rxdma_cfg_rdc_rcr_ctl(npi_handle_t handle, uint8_t rdc, uint8_t op,
+				uint16_t param);
+
+
+/*
+ * npi_rxdma_dump_rdc_regs
+ * Dumps the contents of rdc csrs and fzc registers
+ *
+ * Input:
+ *      handle:	opaque handle interpreted by the underlying OS
+ *         rdc:      RX DMA number
+ *
+ * return:
+ *     NPI_SUCCESS
+ *     NPI_RXDMA_RDC_INVALID
+ *
+ */
+npi_status_t
+npi_rxdma_dump_rdc_regs(npi_handle_t handle, uint8_t rdc)
+{
+
+	uint64_t value, offset;
+	int num_regs, i;
+#ifdef NPI_DEBUG
+	extern uint64_t npi_debug_level;
+	uint64_t old_npi_debug_level = npi_debug_level;
+#endif
+	ASSERT(RXDMA_CHANNEL_VALID(rdc));
+	if (!RXDMA_CHANNEL_VALID(rdc)) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+			    "npi_rxdma_dump_rdc_regs"
+			    " Illegal RDC number %d \n",
+			    rdc));
+		return (NPI_RXDMA_RDC_INVALID);
+	}
+#ifdef NPI_DEBUG
+	npi_debug_level |= DUMP_ALWAYS;
+#endif
+	num_regs = sizeof (rdc_dmc_offset) / sizeof (uint64_t);
+	NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
+			    "\nDMC Register Dump for Channel %d\n",
+			    rdc));
+	for (i = 0; i < num_regs; i++) {
+		RXDMA_REG_READ64(handle, rdc_dmc_offset[i], rdc, &value);
+		offset = NXGE_RXDMA_OFFSET(rdc_dmc_offset[i], handle.is_vraddr,
+				rdc);
+		NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
+			"%08llx %s\t %08llx \n",
+			offset, rdc_dmc_name[i], value));
+	}
+
+	NPI_DEBUG_MSG((handle.function, DUMP_ALWAYS,
+			    "\nFZC_DMC Register Dump for Channel %d\n",
+			    rdc));
+	num_regs = sizeof (rdc_fzc_offset) / sizeof (uint64_t);
+
+	for (i = 0; i < num_regs; i++) {
+		offset = REG_FZC_RDC_OFFSET(rdc_fzc_offset[i], rdc);
+		NXGE_REG_RD64(handle, offset, &value);
+		NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
+				    "%8llx %s\t %8llx \n",
+				    rdc_fzc_offset[i], rdc_fzc_name[i],
+				    value));
+
+	}
+
+	NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
+			    "\n Register Dump for Channel %d done\n",
+			    rdc));
+#ifdef NPI_DEBUG
+	npi_debug_level = old_npi_debug_level;
+#endif
+	return (NPI_SUCCESS);
+}
+
+/*
+ * npi_rxdma_dump_fzc_regs
+ * Dumps the contents of rdc csrs and fzc registers
+ *
+ * Input:
+ *      handle:	opaque handle interpreted by the underlying OS
+ *
+ * return:
+ *     NPI_SUCCESS
+ */
+npi_status_t
+npi_rxdma_dump_fzc_regs(npi_handle_t handle)
+{
+
+	uint64_t value;
+	int num_regs, i;
+
+
+	NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
+			    "\nFZC_DMC Common Register Dump\n"));
+	num_regs = sizeof (rx_fzc_offset) / sizeof (uint64_t);
+
+	for (i = 0; i < num_regs; i++) {
+		NXGE_REG_RD64(handle, rx_fzc_offset[i], &value);
+		NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
+			"0x%08llx %s\t 0x%08llx \n",
+			    rx_fzc_offset[i],
+			rx_fzc_name[i], value));
+	}
+	NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
+			    "\n FZC_DMC Register Dump Done \n"));
+
+	return (NPI_SUCCESS);
+}
+
+
+
+/*
+ * per rdc config functions
+ */
+npi_status_t
+npi_rxdma_cfg_logical_page_disable(npi_handle_t handle, uint8_t rdc,
+				    uint8_t page_num)
+{
+	log_page_vld_t page_vld;
+	uint64_t valid_offset;
+
+	ASSERT(RXDMA_CHANNEL_VALID(rdc));
+	if (!RXDMA_CHANNEL_VALID(rdc)) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				    "rxdma_cfg_logical_page_disable"
+				    " Illegal RDC number %d \n",
+				    rdc));
+		return (NPI_RXDMA_RDC_INVALID);
+	}
+
+	ASSERT(RXDMA_PAGE_VALID(page_num));
+	if (!RXDMA_PAGE_VALID(page_num)) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				    "rxdma_cfg_logical_page_disable"
+				    " Illegal page number %d \n",
+				    page_num));
+		return (NPI_RXDMA_PAGE_INVALID);
+	}
+
+	valid_offset = REG_FZC_RDC_OFFSET(RX_LOG_PAGE_VLD_REG, rdc);
+	NXGE_REG_RD64(handle, valid_offset, &page_vld.value);
+
+	if (page_num == 0)
+		page_vld.bits.ldw.page0 = 0;
+
+	if (page_num == 1)
+		page_vld.bits.ldw.page1 = 0;
+
+	NXGE_REG_WR64(handle, valid_offset, page_vld.value);
+	return (NPI_SUCCESS);
+
+}
+
+npi_status_t
+npi_rxdma_cfg_logical_page(npi_handle_t handle, uint8_t rdc,
+			    dma_log_page_t *pg_cfg)
+{
+	log_page_vld_t page_vld;
+	log_page_mask_t page_mask;
+	log_page_value_t page_value;
+	log_page_relo_t page_reloc;
+	uint64_t value_offset, reloc_offset, mask_offset;
+	uint64_t valid_offset;
+
+	ASSERT(RXDMA_CHANNEL_VALID(rdc));
+	if (!RXDMA_CHANNEL_VALID(rdc)) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				    " rxdma_cfg_logical_page"
+				    " Illegal RDC number %d \n",
+				    rdc));
+		return (NPI_RXDMA_RDC_INVALID);
+	}
+
+	ASSERT(RXDMA_PAGE_VALID(pg_cfg->page_num));
+	if (!RXDMA_PAGE_VALID(pg_cfg->page_num)) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				    " rxdma_cfg_logical_page"
+				    " Illegal page number %d \n",
+				    pg_cfg->page_num));
+		return (NPI_RXDMA_PAGE_INVALID);
+	}
+
+	valid_offset = REG_FZC_RDC_OFFSET(RX_LOG_PAGE_VLD_REG, rdc);
+	NXGE_REG_RD64(handle, valid_offset, &page_vld.value);
+
+	if (!pg_cfg->valid) {
+		if (pg_cfg->page_num == 0)
+			page_vld.bits.ldw.page0 = 0;
+
+		if (pg_cfg->page_num == 1)
+			page_vld.bits.ldw.page1 = 0;
+		NXGE_REG_WR64(handle, valid_offset, page_vld.value);
+		return (NPI_SUCCESS);
+	}
+
+	if (pg_cfg->page_num == 0) {
+		mask_offset = REG_FZC_RDC_OFFSET(RX_LOG_PAGE_MASK1_REG, rdc);
+		value_offset = REG_FZC_RDC_OFFSET(RX_LOG_PAGE_VAL1_REG, rdc);
+		reloc_offset = REG_FZC_RDC_OFFSET(RX_LOG_PAGE_RELO1_REG, rdc);
+		page_vld.bits.ldw.page0 = 1;
+	}
+
+	if (pg_cfg->page_num == 1) {
+		mask_offset = REG_FZC_RDC_OFFSET(RX_LOG_PAGE_MASK2_REG, rdc);
+		value_offset = REG_FZC_RDC_OFFSET(RX_LOG_PAGE_VAL2_REG, rdc);
+		reloc_offset = REG_FZC_RDC_OFFSET(RX_LOG_PAGE_RELO2_REG, rdc);
+		page_vld.bits.ldw.page1 = 1;
+	}
+
+
+	page_vld.bits.ldw.func = pg_cfg->func_num;
+
+	page_mask.value = 0;
+	page_value.value = 0;
+	page_reloc.value = 0;
+
+
+	page_mask.bits.ldw.mask = pg_cfg->mask >> LOG_PAGE_ADDR_SHIFT;
+	page_value.bits.ldw.value = pg_cfg->value >> LOG_PAGE_ADDR_SHIFT;
+	page_reloc.bits.ldw.relo = pg_cfg->reloc >> LOG_PAGE_ADDR_SHIFT;
+
+
+	NXGE_REG_WR64(handle, mask_offset, page_mask.value);
+	NXGE_REG_WR64(handle, value_offset, page_value.value);
+	NXGE_REG_WR64(handle, reloc_offset, page_reloc.value);
+
+
+/* enable the logical page */
+	NXGE_REG_WR64(handle, valid_offset, page_vld.value);
+	return (NPI_SUCCESS);
+}
+
+npi_status_t
+npi_rxdma_cfg_logical_page_handle(npi_handle_t handle, uint8_t rdc,
+				    uint64_t page_handle)
+{
+	uint64_t offset;
+	log_page_hdl_t page_hdl;
+
+	ASSERT(RXDMA_CHANNEL_VALID(rdc));
+	if (!RXDMA_CHANNEL_VALID(rdc)) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+		    "rxdma_cfg_logical_page_handle"
+		    " Illegal RDC number %d \n", rdc));
+		return (NPI_RXDMA_RDC_INVALID);
+	}
+
+
+	page_hdl.value = 0;
+
+	page_hdl.bits.ldw.handle = (uint32_t)page_handle;
+	offset = REG_FZC_RDC_OFFSET(RX_LOG_PAGE_HDL_REG, rdc);
+	NXGE_REG_WR64(handle, offset, page_hdl.value);
+
+	return (NPI_SUCCESS);
+}
+
+/*
+ * RX DMA functions
+ */
+npi_status_t
+npi_rxdma_cfg_rdc_ctl(npi_handle_t handle, uint8_t rdc, uint8_t op)
+{
+
+	rxdma_cfig1_t cfg;
+	uint32_t count = RXDMA_RESET_TRY_COUNT;
+	uint32_t delay_time = RXDMA_RESET_DELAY;
+	uint32_t error = NPI_RXDMA_ERROR_ENCODE(NPI_RXDMA_RESET_ERR, rdc);
+
+	ASSERT(RXDMA_CHANNEL_VALID(rdc));
+	if (!RXDMA_CHANNEL_VALID(rdc)) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				    "npi_rxdma_cfg_rdc_ctl"
+				    " Illegal RDC number %d \n", rdc));
+		return (NPI_RXDMA_RDC_INVALID);
+	}
+
+
+	switch (op) {
+		case RXDMA_OP_ENABLE:
+			RXDMA_REG_READ64(handle, RXDMA_CFIG1_REG, rdc,
+						&cfg.value);
+			cfg.bits.ldw.en = 1;
+			RXDMA_REG_WRITE64(handle, RXDMA_CFIG1_REG,
+					    rdc, cfg.value);
+
+			NXGE_DELAY(delay_time);
+			RXDMA_REG_READ64(handle, RXDMA_CFIG1_REG, rdc,
+						&cfg.value);
+			while ((count--) && (cfg.bits.ldw.qst == 0)) {
+				NXGE_DELAY(delay_time);
+				RXDMA_REG_READ64(handle, RXDMA_CFIG1_REG, rdc,
+						&cfg.value);
+			}
+
+			if (cfg.bits.ldw.qst == 0) {
+				NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				    " npi_rxdma_cfg_rdc_ctl"
+				    " RXDMA_OP_ENABLE Failed for RDC %d \n",
+				    rdc));
+				return (error);
+			}
+
+			break;
+		case RXDMA_OP_DISABLE:
+			RXDMA_REG_READ64(handle, RXDMA_CFIG1_REG, rdc,
+						&cfg.value);
+			cfg.bits.ldw.en = 0;
+			RXDMA_REG_WRITE64(handle, RXDMA_CFIG1_REG,
+					    rdc, cfg.value);
+
+			NXGE_DELAY(delay_time);
+			RXDMA_REG_READ64(handle, RXDMA_CFIG1_REG, rdc,
+						&cfg.value);
+			while ((count--) && (cfg.bits.ldw.qst == 0)) {
+				NXGE_DELAY(delay_time);
+				RXDMA_REG_READ64(handle, RXDMA_CFIG1_REG, rdc,
+						&cfg.value);
+			}
+			if (cfg.bits.ldw.qst == 0) {
+				NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				    " npi_rxdma_cfg_rdc_ctl"
+				    " RXDMA_OP_DISABLE Failed for RDC %d \n",
+				    rdc));
+				return (error);
+			}
+
+			break;
+		case RXDMA_OP_RESET:
+			cfg.value = 0;
+			cfg.bits.ldw.rst = 1;
+			RXDMA_REG_WRITE64(handle,
+					    RXDMA_CFIG1_REG,
+					    rdc, cfg.value);
+			NXGE_DELAY(delay_time);
+			RXDMA_REG_READ64(handle, RXDMA_CFIG1_REG, rdc,
+						&cfg.value);
+			while ((count--) && (cfg.bits.ldw.rst)) {
+				NXGE_DELAY(delay_time);
+				RXDMA_REG_READ64(handle, RXDMA_CFIG1_REG, rdc,
+						&cfg.value);
+			}
+			if (count == 0) {
+				NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+					    " npi_rxdma_cfg_rdc_ctl"
+					    " Reset Failed for RDC %d \n",
+					    rdc));
+				return (error);
+			}
+			break;
+		default:
+			return (NPI_RXDMA_SW_PARAM_ERROR);
+	}
+
+	return (NPI_SUCCESS);
+}
+
+npi_status_t
+npi_rxdma_cfg_rdc_enable(npi_handle_t handle, uint8_t rdc)
+{
+	return (npi_rxdma_cfg_rdc_ctl(handle, rdc, RXDMA_OP_ENABLE));
+}
+
+npi_status_t
+npi_rxdma_cfg_rdc_disable(npi_handle_t handle, uint8_t rdc)
+{
+	return (npi_rxdma_cfg_rdc_ctl(handle, rdc, RXDMA_OP_DISABLE));
+}
+
+npi_status_t
+npi_rxdma_cfg_rdc_reset(npi_handle_t handle, uint8_t rdc)
+{
+	return (npi_rxdma_cfg_rdc_ctl(handle, rdc, RXDMA_OP_RESET));
+}
+
+/*
+ * npi_rxdma_cfg_defualt_port_rdc()
+ * Set the default rdc for the port
+ *
+ * Inputs:
+ *	handle:		register handle interpreted by the underlying OS
+ *	portnm:		Physical Port Number
+ *	rdc:	RX DMA Channel number
+ *
+ * Return:
+ * NPI_SUCCESS
+ * NPI_RXDMA_RDC_INVALID
+ * NPI_RXDMA_PORT_INVALID
+ *
+ */
+npi_status_t npi_rxdma_cfg_default_port_rdc(npi_handle_t handle,
+				    uint8_t portnm, uint8_t rdc)
+{
+
+	uint64_t offset;
+	def_pt_rdc_t cfg;
+
+	ASSERT(RXDMA_CHANNEL_VALID(rdc));
+	if (!RXDMA_CHANNEL_VALID(rdc)) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				    "rxdma_cfg_default_port_rdc"
+				    " Illegal RDC number %d \n",
+				    rdc));
+		return (NPI_RXDMA_RDC_INVALID);
+	}
+
+	ASSERT(RXDMA_PORT_VALID(portnm));
+	if (!RXDMA_PORT_VALID(portnm)) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				    "rxdma_cfg_default_port_rdc"
+				    " Illegal Port number %d \n",
+				    portnm));
+		return (NPI_RXDMA_PORT_INVALID);
+	}
+
+	offset = DEF_PT_RDC_REG(portnm);
+	cfg.value = 0;
+	cfg.bits.ldw.rdc = rdc;
+	NXGE_REG_WR64(handle, offset, cfg.value);
+	return (NPI_SUCCESS);
+}
+
+npi_status_t
+npi_rxdma_cfg_rdc_rcr_ctl(npi_handle_t handle, uint8_t rdc,
+			    uint8_t op, uint16_t param)
+{
+	rcrcfig_b_t rcr_cfgb;
+
+	ASSERT(RXDMA_CHANNEL_VALID(rdc));
+	if (!RXDMA_CHANNEL_VALID(rdc)) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				    "rxdma_cfg_rdc_rcr_ctl"
+				    " Illegal RDC number %d \n",
+				    rdc));
+		return (NPI_RXDMA_RDC_INVALID);
+	}
+
+
+	RXDMA_REG_READ64(handle, RCRCFIG_B_REG, rdc, &rcr_cfgb.value);
+
+	switch (op) {
+		case RCR_TIMEOUT_ENABLE:
+			rcr_cfgb.bits.ldw.timeout = (uint8_t)param;
+			rcr_cfgb.bits.ldw.entout = 1;
+			break;
+
+		case RCR_THRESHOLD:
+			rcr_cfgb.bits.ldw.pthres = param;
+			break;
+
+		case RCR_TIMEOUT_DISABLE:
+			rcr_cfgb.bits.ldw.entout = 0;
+			break;
+
+		default:
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				    "rxdma_cfg_rdc_rcr_ctl"
+				    " Illegal opcode %x \n",
+				    op));
+		return (NPI_RXDMA_OPCODE_INVALID(rdc));
+	}
+
+	RXDMA_REG_WRITE64(handle, RCRCFIG_B_REG, rdc, rcr_cfgb.value);
+	return (NPI_SUCCESS);
+}
+
+npi_status_t
+npi_rxdma_cfg_rdc_rcr_timeout_disable(npi_handle_t handle, uint8_t rdc)
+{
+	return (npi_rxdma_cfg_rdc_rcr_ctl(handle, rdc,
+	    RCR_TIMEOUT_DISABLE, 0));
+}
+
+npi_status_t
+npi_rxdma_cfg_rdc_rcr_threshold(npi_handle_t handle, uint8_t rdc,
+				    uint16_t rcr_threshold)
+{
+	return (npi_rxdma_cfg_rdc_rcr_ctl(handle, rdc,
+	    RCR_THRESHOLD, rcr_threshold));
+
+}
+
+npi_status_t
+npi_rxdma_cfg_rdc_rcr_timeout(npi_handle_t handle, uint8_t rdc,
+			    uint8_t rcr_timeout)
+{
+	return (npi_rxdma_cfg_rdc_rcr_ctl(handle, rdc,
+	    RCR_TIMEOUT_ENABLE, rcr_timeout));
+
+}
+
+/*
+ * npi_rxdma_cfg_rdc_ring()
+ * Configure The RDC channel Rcv Buffer Ring
+ */
+npi_status_t
+npi_rxdma_cfg_rdc_ring(npi_handle_t handle, uint8_t rdc,
+			    rdc_desc_cfg_t *rdc_desc_cfg)
+{
+	rbr_cfig_a_t cfga;
+	rbr_cfig_b_t cfgb;
+	rxdma_cfig1_t cfg1;
+	rxdma_cfig2_t cfg2;
+	rcrcfig_a_t rcr_cfga;
+	rcrcfig_b_t rcr_cfgb;
+
+	ASSERT(RXDMA_CHANNEL_VALID(rdc));
+	if (!RXDMA_CHANNEL_VALID(rdc)) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				    "rxdma_cfg_rdc_ring"
+				    " Illegal RDC number %d \n",
+				    rdc));
+		return (NPI_RXDMA_RDC_INVALID);
+	}
+
+
+	cfga.value = 0;
+	cfgb.value = 0;
+	cfg1.value = 0;
+	cfg2.value = 0;
+
+	if (rdc_desc_cfg->mbox_enable == 1) {
+		cfg1.bits.ldw.mbaddr_h =
+		    (rdc_desc_cfg->mbox_addr >> 32) & 0xfff;
+		cfg2.bits.ldw.mbaddr =
+		    ((rdc_desc_cfg->mbox_addr &
+			    RXDMA_CFIG2_MBADDR_L_MASK) >>
+			    RXDMA_CFIG2_MBADDR_L_SHIFT);
+
+
+		/*
+		 * Only after all the configurations are set, then
+		 * enable the RDC or else configuration fatal error
+		 * will be returned (especially if the Hypervisor
+		 * set up the logical pages with non-zero values.
+		 * This NPI function only sets up the configuration.
+		 */
+	}
+
+
+	if (rdc_desc_cfg->full_hdr == 1)
+		cfg2.bits.ldw.full_hdr = 1;
+
+	if (RXDMA_BUFF_OFFSET_VALID(rdc_desc_cfg->offset)) {
+		cfg2.bits.ldw.offset = rdc_desc_cfg->offset;
+	} else {
+		cfg2.bits.ldw.offset = SW_OFFSET_NO_OFFSET;
+	}
+
+		/* rbr config */
+
+	cfga.value = (rdc_desc_cfg->rbr_addr & (RBR_CFIG_A_STDADDR_MASK |
+					    RBR_CFIG_A_STDADDR_BASE_MASK));
+
+	if ((rdc_desc_cfg->rbr_len < RBR_DEFAULT_MIN_LEN) ||
+		    (rdc_desc_cfg->rbr_len > RBR_DEFAULT_MAX_LEN)) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				    "npi_rxdma_cfg_rdc_ring"
+				    " Illegal RBR Queue Length %d \n",
+				    rdc_desc_cfg->rbr_len));
+		return (NPI_RXDMA_ERROR_ENCODE(NPI_RXDMA_RBRSZIE_INVALID, rdc));
+	}
+
+
+	cfga.bits.hdw.len = rdc_desc_cfg->rbr_len;
+	NPI_DEBUG_MSG((handle.function, NPI_RDC_CTL,
+		"npi_rxdma_cfg_rdc_ring"
+		" CFGA 0x%llx hdw.len %d (RBR LEN %d)\n",
+		cfga.value, cfga.bits.hdw.len,
+		rdc_desc_cfg->rbr_len));
+
+	if (rdc_desc_cfg->page_size == SIZE_4KB)
+		cfgb.bits.ldw.bksize = RBR_BKSIZE_4K;
+	else if (rdc_desc_cfg->page_size == SIZE_8KB)
+		cfgb.bits.ldw.bksize = RBR_BKSIZE_8K;
+	else if (rdc_desc_cfg->page_size == SIZE_16KB)
+		cfgb.bits.ldw.bksize = RBR_BKSIZE_16K;
+	else if (rdc_desc_cfg->page_size == SIZE_32KB)
+		cfgb.bits.ldw.bksize = RBR_BKSIZE_32K;
+	else {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+			    "rxdma_cfg_rdc_ring"
+			    " blksize: Illegal buffer size %d \n",
+			    rdc_desc_cfg->page_size));
+		return (NPI_RXDMA_BUFSZIE_INVALID);
+	}
+
+	if (rdc_desc_cfg->valid0) {
+
+		if (rdc_desc_cfg->size0 == SIZE_256B)
+			cfgb.bits.ldw.bufsz0 = RBR_BUFSZ0_256B;
+		else if (rdc_desc_cfg->size0 == SIZE_512B)
+			cfgb.bits.ldw.bufsz0 = RBR_BUFSZ0_512B;
+		else if (rdc_desc_cfg->size0 == SIZE_1KB)
+			cfgb.bits.ldw.bufsz0 = RBR_BUFSZ0_1K;
+		else if (rdc_desc_cfg->size0 == SIZE_2KB)
+			cfgb.bits.ldw.bufsz0 = RBR_BUFSZ0_2K;
+		else {
+			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				    " rxdma_cfg_rdc_ring"
+				    " blksize0: Illegal buffer size %x \n",
+				    rdc_desc_cfg->size0));
+			return (NPI_RXDMA_BUFSZIE_INVALID);
+		}
+		cfgb.bits.ldw.vld0 = 1;
+	} else {
+		cfgb.bits.ldw.vld0 = 0;
+	}
+
+
+	if (rdc_desc_cfg->valid1) {
+		if (rdc_desc_cfg->size1 == SIZE_1KB)
+			cfgb.bits.ldw.bufsz1 = RBR_BUFSZ1_1K;
+		else if (rdc_desc_cfg->size1 == SIZE_2KB)
+			cfgb.bits.ldw.bufsz1 = RBR_BUFSZ1_2K;
+		else if (rdc_desc_cfg->size1 == SIZE_4KB)
+			cfgb.bits.ldw.bufsz1 = RBR_BUFSZ1_4K;
+		else if (rdc_desc_cfg->size1 == SIZE_8KB)
+			cfgb.bits.ldw.bufsz1 = RBR_BUFSZ1_8K;
+		else {
+			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				    " rxdma_cfg_rdc_ring"
+				    " blksize1: Illegal buffer size %x \n",
+				    rdc_desc_cfg->size1));
+			return (NPI_RXDMA_BUFSZIE_INVALID);
+		}
+		cfgb.bits.ldw.vld1 = 1;
+	} else {
+		cfgb.bits.ldw.vld1 = 0;
+	}
+
+
+	if (rdc_desc_cfg->valid2) {
+		if (rdc_desc_cfg->size2 == SIZE_2KB)
+			cfgb.bits.ldw.bufsz2 = RBR_BUFSZ2_2K;
+		else if (rdc_desc_cfg->size2 == SIZE_4KB)
+			cfgb.bits.ldw.bufsz2 = RBR_BUFSZ2_4K;
+		else if (rdc_desc_cfg->size2 == SIZE_8KB)
+			cfgb.bits.ldw.bufsz2 = RBR_BUFSZ2_8K;
+		else if (rdc_desc_cfg->size2 == SIZE_16KB)
+			cfgb.bits.ldw.bufsz2 = RBR_BUFSZ2_16K;
+		else {
+			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				    " rxdma_cfg_rdc_ring"
+				    " blksize2: Illegal buffer size %x \n",
+				    rdc_desc_cfg->size2));
+			return (NPI_RXDMA_BUFSZIE_INVALID);
+		}
+		cfgb.bits.ldw.vld2 = 1;
+	} else {
+		cfgb.bits.ldw.vld2 = 0;
+	}
+
+
+	rcr_cfga.value = (rdc_desc_cfg->rcr_addr &
+			    (RCRCFIG_A_STADDR_MASK |
+			    RCRCFIG_A_STADDR_BASE_MASK));
+
+
+	if ((rdc_desc_cfg->rcr_len < RCR_DEFAULT_MIN_LEN) ||
+		    (rdc_desc_cfg->rcr_len > NXGE_RCR_MAX)) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+			    " rxdma_cfg_rdc_ring"
+			    " Illegal RCR Queue Length %d \n",
+			    rdc_desc_cfg->rcr_len));
+		return (NPI_RXDMA_ERROR_ENCODE(NPI_RXDMA_RCRSZIE_INVALID, rdc));
+	}
+
+	rcr_cfga.bits.hdw.len = rdc_desc_cfg->rcr_len;
+
+
+	rcr_cfgb.value = 0;
+	if (rdc_desc_cfg->rcr_timeout_enable == 1) {
+		/* check if the rcr timeout value is valid */
+
+		if (RXDMA_RCR_TO_VALID(rdc_desc_cfg->rcr_timeout)) {
+			rcr_cfgb.bits.ldw.timeout = rdc_desc_cfg->rcr_timeout;
+			rcr_cfgb.bits.ldw.entout = 1;
+		} else {
+			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				    " rxdma_cfg_rdc_ring"
+				    " Illegal RCR Timeout value %d \n",
+				    rdc_desc_cfg->rcr_timeout));
+			rcr_cfgb.bits.ldw.entout = 0;
+		}
+	} else {
+		rcr_cfgb.bits.ldw.entout = 0;
+	}
+
+		/* check if the rcr threshold value is valid */
+	if (RXDMA_RCR_THRESH_VALID(rdc_desc_cfg->rcr_threshold)) {
+		rcr_cfgb.bits.ldw.pthres = rdc_desc_cfg->rcr_threshold;
+	} else {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+			    " rxdma_cfg_rdc_ring"
+			    " Illegal RCR Threshold value %d \n",
+			    rdc_desc_cfg->rcr_threshold));
+		rcr_cfgb.bits.ldw.pthres = 1;
+	}
+
+		/* now do the actual HW configuration */
+	RXDMA_REG_WRITE64(handle, RXDMA_CFIG1_REG, rdc, cfg1.value);
+	RXDMA_REG_WRITE64(handle, RXDMA_CFIG2_REG, rdc, cfg2.value);
+
+
+	RXDMA_REG_WRITE64(handle, RBR_CFIG_A_REG, rdc, cfga.value);
+	RXDMA_REG_WRITE64(handle, RBR_CFIG_B_REG, rdc, cfgb.value);
+
+	RXDMA_REG_WRITE64(handle, RCRCFIG_A_REG, rdc, rcr_cfga.value);
+	RXDMA_REG_WRITE64(handle, RCRCFIG_B_REG, rdc, rcr_cfgb.value);
+
+	return (NPI_SUCCESS);
+
+}
+
+/*
+ * npi_rxdma_red_discard_stat_get
+ * Gets the current discrad count due RED
+ * The counter overflow bit is cleared, if it has been set.
+ *
+ * Inputs:
+ *      handle:	opaque handle interpreted by the underlying OS
+ *	rdc:		RX DMA Channel number
+ *	cnt:	Ptr to structure to write current RDC discard stat
+ *
+ * Return:
+ * NPI_SUCCESS
+ * NPI_RXDMA_RDC_INVALID
+ *
+ */
+npi_status_t
+npi_rxdma_red_discard_stat_get(npi_handle_t handle, uint8_t rdc,
+				    rx_disc_cnt_t *cnt)
+{
+	uint64_t offset;
+
+	ASSERT(RXDMA_CHANNEL_VALID(rdc));
+	if (!RXDMA_CHANNEL_VALID(rdc)) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				    " npi_rxdma_red_discard_stat_get"
+				    " Illegal RDC Number %d \n",
+				    rdc));
+		return (NPI_RXDMA_RDC_INVALID);
+	}
+
+	offset = RDC_RED_RDC_DISC_REG(rdc);
+	NXGE_REG_RD64(handle, offset, &cnt->value);
+	if (cnt->bits.ldw.oflow) {
+		NPI_DEBUG_MSG((handle.function, NPI_ERR_CTL,
+			    " npi_rxdma_red_discard_stat_get"
+			    " Counter overflow for channel %d ",
+			    " ..... clearing \n",
+			    rdc));
+		cnt->bits.ldw.oflow = 0;
+		NXGE_REG_WR64(handle, offset, cnt->value);
+		cnt->bits.ldw.oflow = 1;
+	}
+
+	return (NPI_SUCCESS);
+}
+
+/*
+ * npi_rxdma_red_discard_oflow_clear
+ * Clear RED discard counter overflow bit
+ *
+ * Inputs:
+ *      handle:	opaque handle interpreted by the underlying OS
+ *	rdc:		RX DMA Channel number
+ *
+ * Return:
+ * NPI_SUCCESS
+ * NPI_RXDMA_RDC_INVALID
+ *
+ */
+npi_status_t
+npi_rxdma_red_discard_oflow_clear(npi_handle_t handle, uint8_t rdc)
+
+{
+	uint64_t offset;
+	rx_disc_cnt_t cnt;
+
+	ASSERT(RXDMA_CHANNEL_VALID(rdc));
+	if (!RXDMA_CHANNEL_VALID(rdc)) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+			    " npi_rxdma_red_discard_oflow_clear"
+			    " Illegal RDC Number %d \n",
+			    rdc));
+		return (NPI_RXDMA_RDC_INVALID);
+	}
+
+	offset = RDC_RED_RDC_DISC_REG(rdc);
+	NXGE_REG_RD64(handle, offset, &cnt.value);
+	if (cnt.bits.ldw.oflow) {
+		NPI_DEBUG_MSG((handle.function, NPI_RDC_CTL,
+			    " npi_rxdma_red_discard_oflow_clear"
+			    " Counter overflow for channel %d ",
+			    " ..... clearing \n",
+			    rdc));
+		cnt.bits.ldw.oflow = 0;
+		NXGE_REG_WR64(handle, offset, cnt.value);
+	}
+	return (NPI_SUCCESS);
+}
+
+/*
+ * npi_rxdma_misc_discard_stat_get
+ * Gets the current discrad count for the rdc due to
+ * buffer pool empty
+ * The counter overflow bit is cleared, if it has been set.
+ *
+ * Inputs:
+ *      handle:	opaque handle interpreted by the underlying OS
+ *	rdc:		RX DMA Channel number
+ *	cnt:	Ptr to structure to write current RDC discard stat
+ *
+ * Return:
+ * NPI_SUCCESS
+ * NPI_RXDMA_RDC_INVALID
+ *
+ */
+npi_status_t
+npi_rxdma_misc_discard_stat_get(npi_handle_t handle, uint8_t rdc,
+				    rx_disc_cnt_t *cnt)
+{
+	ASSERT(RXDMA_CHANNEL_VALID(rdc));
+	if (!RXDMA_CHANNEL_VALID(rdc)) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				    " npi_rxdma_misc_discard_stat_get"
+				    " Illegal RDC Number %d \n",
+				    rdc));
+		return (NPI_RXDMA_RDC_INVALID);
+	}
+
+	RXDMA_REG_READ64(handle, RXMISC_DISCARD_REG, rdc, &cnt->value);
+	if (cnt->bits.ldw.oflow) {
+		NPI_DEBUG_MSG((handle.function, NPI_RDC_CTL,
+			    " npi_rxdma_misc_discard_stat_get"
+			    " Counter overflow for channel %d ",
+			    " ..... clearing \n",
+			    rdc));
+		cnt->bits.ldw.oflow = 0;
+		RXDMA_REG_WRITE64(handle, RXMISC_DISCARD_REG, rdc, cnt->value);
+		cnt->bits.ldw.oflow = 1;
+	}
+
+	return (NPI_SUCCESS);
+}
+
+/*
+ * npi_rxdma_red_discard_oflow_clear
+ * Clear RED discard counter overflow bit
+ * clear the overflow bit for  buffer pool empty discrad counter
+ * for the rdc
+ *
+ * Inputs:
+ *      handle:	opaque handle interpreted by the underlying OS
+ *	rdc:		RX DMA Channel number
+ *
+ * Return:
+ * NPI_SUCCESS
+ * NPI_RXDMA_RDC_INVALID
+ *
+ */
+npi_status_t
+npi_rxdma_misc_discard_oflow_clear(npi_handle_t handle, uint8_t rdc)
+{
+	rx_disc_cnt_t cnt;
+
+	ASSERT(RXDMA_CHANNEL_VALID(rdc));
+	if (!RXDMA_CHANNEL_VALID(rdc)) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+			    " npi_rxdma_misc_discard_oflow_clear"
+			    " Illegal RDC Number %d \n",
+			    rdc));
+		return (NPI_RXDMA_RDC_INVALID);
+	}
+
+	RXDMA_REG_READ64(handle, RXMISC_DISCARD_REG, rdc, &cnt.value);
+	if (cnt.bits.ldw.oflow) {
+		NPI_DEBUG_MSG((handle.function, NPI_RDC_CTL,
+			    " npi_rxdma_misc_discard_oflow_clear"
+			    " Counter overflow for channel %d ",
+			    " ..... clearing \n",
+			    rdc));
+		cnt.bits.ldw.oflow = 0;
+		RXDMA_REG_WRITE64(handle, RXMISC_DISCARD_REG, rdc, cnt.value);
+	}
+
+	return (NPI_SUCCESS);
+}
+
+/*
+ * npi_rxdma_ring_perr_stat_get
+ * Gets the current RDC Memory parity error
+ * The counter overflow bit is cleared, if it has been set.
+ *
+ * Inputs:
+ * handle:	opaque handle interpreted by the underlying OS
+ * pre_log:	Structure to write current RDC Prefetch memory
+ *		Parity Error stat
+ * sha_log:	Structure to write current RDC Shadow memory
+ *		Parity Error stat
+ *
+ * Return:
+ * NPI_SUCCESS
+ *
+ */
+npi_status_t
+npi_rxdma_ring_perr_stat_get(npi_handle_t handle,
+			    rdmc_par_err_log_t *pre_log,
+			    rdmc_par_err_log_t *sha_log)
+{
+	uint64_t pre_offset, sha_offset;
+	rdmc_par_err_log_t clr;
+	int clr_bits = 0;
+
+	pre_offset = RDMC_PRE_PAR_ERR_REG;
+	sha_offset = RDMC_SHA_PAR_ERR_REG;
+	NXGE_REG_RD64(handle, pre_offset, &pre_log->value);
+	NXGE_REG_RD64(handle, sha_offset, &sha_log->value);
+
+	clr.value = pre_log->value;
+	if (pre_log->bits.ldw.err) {
+		NPI_DEBUG_MSG((handle.function, NPI_RDC_CTL,
+			    " npi_rxdma_ring_perr_stat_get"
+			    " PRE ERR Bit set ..... clearing \n"));
+		clr.bits.ldw.err = 0;
+		clr_bits++;
+	}
+
+	if (pre_log->bits.ldw.merr) {
+		NPI_DEBUG_MSG((handle.function, NPI_RDC_CTL,
+			    " npi_rxdma_ring_perr_stat_get"
+			    " PRE MERR Bit set ..... clearing \n"));
+		clr.bits.ldw.merr = 0;
+		clr_bits++;
+	}
+
+	if (clr_bits) {
+		NXGE_REG_WR64(handle, pre_offset, clr.value);
+	}
+
+	clr_bits = 0;
+	clr.value = sha_log->value;
+	if (sha_log->bits.ldw.err) {
+		NPI_DEBUG_MSG((handle.function, NPI_RDC_CTL,
+			    " npi_rxdma_ring_perr_stat_get"
+			    " SHA ERR Bit set ..... clearing \n"));
+		clr.bits.ldw.err = 0;
+		clr_bits++;
+	}
+
+	if (sha_log->bits.ldw.merr) {
+		NPI_DEBUG_MSG((handle.function, NPI_RDC_CTL,
+			    " npi_rxdma_ring_perr_stat_get"
+			    " SHA MERR Bit set ..... clearing \n"));
+		clr.bits.ldw.merr = 0;
+		clr_bits++;
+	}
+
+	if (clr_bits) {
+		NXGE_REG_WR64(handle, sha_offset, clr.value);
+	}
+
+	return (NPI_SUCCESS);
+}
+
+/*
+ * npi_rxdma_ring_perr_stat_clear
+ * Clear RDC Memory Parity Error counter overflow bits
+ *
+ * Inputs:
+ *      handle:	opaque handle interpreted by the underlying OS
+ * Return:
+ * NPI_SUCCESS
+ *
+ */
+npi_status_t
+npi_rxdma_ring_perr_stat_clear(npi_handle_t handle)
+{
+	uint64_t pre_offset, sha_offset;
+	rdmc_par_err_log_t clr;
+	int clr_bits = 0;
+	pre_offset = RDMC_PRE_PAR_ERR_REG;
+	sha_offset = RDMC_SHA_PAR_ERR_REG;
+
+	NXGE_REG_RD64(handle, pre_offset, &clr.value);
+
+	if (clr.bits.ldw.err) {
+		NPI_DEBUG_MSG((handle.function, NPI_RDC_CTL,
+			    " npi_rxdma_ring_perr_stat_get"
+			    " PRE ERR Bit set ..... clearing \n"));
+		clr.bits.ldw.err = 0;
+		clr_bits++;
+	}
+
+	if (clr.bits.ldw.merr) {
+		NPI_DEBUG_MSG((handle.function, NPI_RDC_CTL,
+			    " npi_rxdma_ring_perr_stat_get"
+			    " PRE MERR Bit set ..... clearing \n"));
+		clr.bits.ldw.merr = 0;
+		clr_bits++;
+	}
+
+	if (clr_bits) {
+		NXGE_REG_WR64(handle, pre_offset, clr.value);
+	}
+
+	clr_bits = 0;
+	NXGE_REG_RD64(handle, sha_offset, &clr.value);
+	if (clr.bits.ldw.err) {
+		NPI_DEBUG_MSG((handle.function, NPI_RDC_CTL,
+			    " npi_rxdma_ring_perr_stat_get"
+			    " SHA ERR Bit set ..... clearing \n"));
+		clr.bits.ldw.err = 0;
+		clr_bits++;
+	}
+
+	if (clr.bits.ldw.merr) {
+		NPI_DEBUG_MSG((handle.function, NPI_RDC_CTL,
+			    " npi_rxdma_ring_perr_stat_get"
+			    " SHA MERR Bit set ..... clearing \n"));
+		clr.bits.ldw.merr = 0;
+		clr_bits++;
+	}
+
+	if (clr_bits) {
+		NXGE_REG_WR64(handle, sha_offset, clr.value);
+	}
+
+	return (NPI_SUCCESS);
+}
+
+/*
+ * Access the RDMC Memory: used for debugging
+ */
+npi_status_t
+npi_rxdma_rdmc_memory_io(npi_handle_t handle,
+			    rdmc_mem_access_t *data, uint8_t op)
+{
+	uint64_t d0_offset, d1_offset, d2_offset, d3_offset, d4_offset;
+	uint64_t addr_offset;
+	rdmc_mem_addr_t addr;
+	rdmc_mem_data_t d0, d1, d2, d3, d4;
+	d0.value = 0;
+	d1.value = 0;
+	d2.value = 0;
+	d3.value = 0;
+	d4.value = 0;
+	addr.value = 0;
+
+
+	if ((data->location != RDMC_MEM_ADDR_PREFETCH) &&
+		    (data->location != RDMC_MEM_ADDR_SHADOW)) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+			    " npi_rxdma_rdmc_memory_io"
+			    " Illegal memory Type %x \n",
+			    data->location));
+		return (NPI_RXDMA_OPCODE_INVALID(0));
+	}
+
+	addr_offset = RDMC_MEM_ADDR_REG;
+	addr.bits.ldw.addr = data->addr;
+	addr.bits.ldw.pre_shad = data->location;
+
+	d0_offset = RDMC_MEM_DATA0_REG;
+	d1_offset = RDMC_MEM_DATA1_REG;
+	d2_offset = RDMC_MEM_DATA2_REG;
+	d3_offset = RDMC_MEM_DATA3_REG;
+	d4_offset = RDMC_MEM_DATA4_REG;
+
+
+	if (op == RDMC_MEM_WRITE) {
+		d0.bits.ldw.data = data->data[0];
+		d1.bits.ldw.data = data->data[1];
+		d2.bits.ldw.data = data->data[2];
+		d3.bits.ldw.data = data->data[3];
+		d4.bits.ldw.data = data->data[4];
+		NXGE_REG_WR64(handle, addr_offset, addr.value);
+		NXGE_REG_WR64(handle, d0_offset, d0.value);
+		NXGE_REG_WR64(handle, d1_offset, d1.value);
+		NXGE_REG_WR64(handle, d2_offset, d2.value);
+		NXGE_REG_WR64(handle, d3_offset, d3.value);
+		NXGE_REG_WR64(handle, d4_offset, d4.value);
+	}
+
+	if (op == RDMC_MEM_READ) {
+		NXGE_REG_WR64(handle, addr_offset, addr.value);
+		NXGE_REG_RD64(handle, d4_offset, &d4.value);
+		NXGE_REG_RD64(handle, d3_offset, &d3.value);
+		NXGE_REG_RD64(handle, d2_offset, &d2.value);
+		NXGE_REG_RD64(handle, d1_offset, &d1.value);
+		NXGE_REG_RD64(handle, d0_offset, &d0.value);
+
+		data->data[0] = d0.bits.ldw.data;
+		data->data[1] = d1.bits.ldw.data;
+		data->data[2] = d2.bits.ldw.data;
+		data->data[3] = d3.bits.ldw.data;
+		data->data[4] = d4.bits.ldw.data;
+	} else {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+			    " npi_rxdma_rdmc_memory_io"
+			    " Illegal opcode %x \n",
+			    op));
+		return (NPI_RXDMA_OPCODE_INVALID(0));
+
+	}
+
+	return (NPI_SUCCESS);
+}
+
+/*
+ * system wide conf functions
+ */
+npi_status_t
+npi_rxdma_cfg_clock_div_set(npi_handle_t handle, uint16_t count)
+{
+	uint64_t offset;
+	rx_dma_ck_div_t clk_div;
+
+	offset = RX_DMA_CK_DIV_REG;
+
+	clk_div.value = 0;
+	clk_div.bits.ldw.cnt = count;
+	NPI_DEBUG_MSG((handle.function, NPI_RDC_CTL,
+		    " npi_rxdma_cfg_clock_div_set: add 0x%llx "
+		    "handle 0x%llx value 0x%llx",
+		    handle.regp, handle.regh, clk_div.value));
+
+	NXGE_REG_WR64(handle, offset, clk_div.value);
+
+	return (NPI_SUCCESS);
+}
+
+npi_status_t
+npi_rxdma_cfg_red_rand_init(npi_handle_t handle, uint16_t init_value)
+{
+	uint64_t offset;
+	red_ran_init_t rand_reg;
+
+	offset = RED_RAN_INIT_REG;
+
+	rand_reg.value = 0;
+	rand_reg.bits.ldw.init = init_value;
+	rand_reg.bits.ldw.enable = 1;
+	NXGE_REG_WR64(handle, offset, rand_reg.value);
+
+	return (NPI_SUCCESS);
+
+}
+
+npi_status_t
+npi_rxdma_cfg_red_rand_disable(npi_handle_t handle)
+{
+	uint64_t offset;
+	red_ran_init_t rand_reg;
+
+	offset = RED_RAN_INIT_REG;
+
+	NXGE_REG_RD64(handle, offset, &rand_reg.value);
+	rand_reg.bits.ldw.enable = 0;
+	NXGE_REG_WR64(handle, offset, rand_reg.value);
+
+	return (NPI_SUCCESS);
+
+}
+
+npi_status_t
+npi_rxdma_cfg_32bitmode_enable(npi_handle_t handle)
+{
+	uint64_t offset;
+	rx_addr_md_t md_reg;
+	offset = RX_ADDR_MD_REG;
+	md_reg.value = 0;
+	md_reg.bits.ldw.mode32 = 1;
+
+	NXGE_REG_WR64(handle, offset, md_reg.value);
+	return (NPI_SUCCESS);
+
+}
+
+npi_status_t
+npi_rxdma_cfg_32bitmode_disable(npi_handle_t handle)
+{
+	uint64_t offset;
+	rx_addr_md_t md_reg;
+	offset = RX_ADDR_MD_REG;
+	md_reg.value = 0;
+
+	NXGE_REG_WR64(handle, offset, md_reg.value);
+	return (NPI_SUCCESS);
+
+}
+
+npi_status_t
+npi_rxdma_cfg_ram_access_enable(npi_handle_t handle)
+{
+	uint64_t offset;
+	rx_addr_md_t md_reg;
+	offset = RX_ADDR_MD_REG;
+	NXGE_REG_RD64(handle, offset, &md_reg.value);
+	md_reg.bits.ldw.ram_acc = 1;
+	NXGE_REG_WR64(handle, offset, md_reg.value);
+	return (NPI_SUCCESS);
+
+}
+
+npi_status_t
+npi_rxdma_cfg_ram_access_disable(npi_handle_t handle)
+{
+	uint64_t offset;
+	rx_addr_md_t md_reg;
+	offset = RX_ADDR_MD_REG;
+	NXGE_REG_RD64(handle, offset, &md_reg.value);
+	md_reg.bits.ldw.ram_acc = 0;
+	NXGE_REG_WR64(handle, offset, md_reg.value);
+	return (NPI_SUCCESS);
+
+}
+
+npi_status_t
+npi_rxdma_cfg_port_ddr_weight(npi_handle_t handle,
+				    uint8_t portnm, uint32_t weight)
+{
+
+	pt_drr_wt_t wt_reg;
+	uint64_t offset;
+
+	ASSERT(RXDMA_PORT_VALID(portnm));
+	if (!RXDMA_PORT_VALID(portnm)) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+			    " rxdma_cfg_port_ddr_weight"
+			    " Illegal Port Number %d \n",
+			    portnm));
+		return (NPI_RXDMA_PORT_INVALID);
+	}
+
+	offset = PT_DRR_WT_REG(portnm);
+	wt_reg.value = 0;
+	wt_reg.bits.ldw.wt = weight;
+	NXGE_REG_WR64(handle, offset, wt_reg.value);
+	return (NPI_SUCCESS);
+}
+
+npi_status_t
+npi_rxdma_port_usage_get(npi_handle_t handle,
+				    uint8_t portnm, uint32_t *blocks)
+{
+
+	pt_use_t use_reg;
+	uint64_t offset;
+
+	ASSERT(RXDMA_PORT_VALID(portnm));
+	if (!RXDMA_PORT_VALID(portnm)) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+			    " rxdma_port_usage_get"
+			    " Illegal Port Number %d \n",
+			    portnm));
+		return (NPI_RXDMA_PORT_INVALID);
+	}
+
+	offset = PT_USE_REG(portnm);
+	NXGE_REG_RD64(handle, offset, &use_reg.value);
+	*blocks = use_reg.bits.ldw.cnt;
+	return (NPI_SUCCESS);
+
+}
+
+npi_status_t
+npi_rxdma_cfg_wred_param(npi_handle_t handle, uint8_t rdc,
+				    rdc_red_para_t *wred_params)
+{
+	rdc_red_para_t wred_reg;
+	uint64_t offset;
+
+	ASSERT(RXDMA_CHANNEL_VALID(rdc));
+	if (!RXDMA_CHANNEL_VALID(rdc)) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+			    " rxdma_cfg_wred_param"
+			    " Illegal RDC Number %d \n",
+			    rdc));
+		return (NPI_RXDMA_RDC_INVALID);
+	}
+
+	/*
+	 * need to update RDC_RED_PARA_REG as well as bit defs in
+	 * the hw header file
+	 */
+	offset = RDC_RED_RDC_PARA_REG(rdc);
+
+	NPI_DEBUG_MSG((handle.function, NPI_RDC_CTL,
+		" npi_rxdma_cfg_wred_param: "
+		"set RED_PARA: passed value 0x%llx "
+		"win 0x%x thre 0x%x sync 0x%x thre_sync 0x%x",
+		wred_params->value,
+		wred_params->bits.ldw.win,
+		wred_params->bits.ldw.thre,
+		wred_params->bits.ldw.win_syn,
+		wred_params->bits.ldw.thre_sync));
+
+	wred_reg.value = 0;
+	wred_reg.bits.ldw.win = wred_params->bits.ldw.win;
+	wred_reg.bits.ldw.thre = wred_params->bits.ldw.thre;
+	wred_reg.bits.ldw.win_syn = wred_params->bits.ldw.win_syn;
+	wred_reg.bits.ldw.thre_sync = wred_params->bits.ldw.thre_sync;
+	NXGE_REG_WR64(handle, offset, wred_reg.value);
+
+	NPI_DEBUG_MSG((handle.function, NPI_RDC_CTL,
+		"set RED_PARA: value 0x%llx "
+		"win 0x%x thre 0x%x sync 0x%x thre_sync 0x%x",
+		wred_reg.value,
+		wred_reg.bits.ldw.win,
+		wred_reg.bits.ldw.thre,
+		wred_reg.bits.ldw.win_syn,
+		wred_reg.bits.ldw.thre_sync));
+
+	return (NPI_SUCCESS);
+}
+
+/*
+ * npi_rxdma_cfg_rdc_table()
+ * Configure/populate the RDC table
+ *
+ * Inputs:
+ *	handle:		register handle interpreted by the underlying OS
+ *	table:		RDC Group Number
+ *	rdc[]:	 Array of RX DMA Channels
+ *
+ * Return:
+ * NPI_SUCCESS
+ * NPI_RXDMA_TABLE_INVALID
+ *
+ */
+npi_status_t
+npi_rxdma_cfg_rdc_table(npi_handle_t handle,
+			    uint8_t table, uint8_t rdc[])
+{
+	uint64_t offset;
+	int tbl_offset;
+	rdc_tbl_t tbl_reg;
+	tbl_reg.value = 0;
+
+	ASSERT(RXDMA_TABLE_VALID(table));
+	if (!RXDMA_TABLE_VALID(table)) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+			    " npi_rxdma_cfg_rdc_table"
+			    " Illegal RDC Rable Number %d \n",
+			    rdc));
+		return (NPI_RXDMA_TABLE_INVALID);
+	}
+
+	offset = REG_RDC_TABLE_OFFSET(table);
+	for (tbl_offset = 0; tbl_offset < NXGE_MAX_RDCS; tbl_offset++) {
+		tbl_reg.bits.ldw.rdc = rdc[tbl_offset];
+		NXGE_REG_WR64(handle, offset, tbl_reg.value);
+		offset += 8;
+	}
+
+	return (NPI_SUCCESS);
+
+}
+
+npi_status_t
+npi_rxdma_cfg_rdc_table_default_rdc(npi_handle_t handle,
+			    uint8_t table, uint8_t rdc)
+{
+	uint64_t offset;
+	rdc_tbl_t tbl_reg;
+	tbl_reg.value = 0;
+
+	ASSERT(RXDMA_TABLE_VALID(table));
+	if (!RXDMA_TABLE_VALID(table)) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+			    " npi_rxdma_cfg_rdc_table"
+			    " Illegal RDC table Number %d \n",
+			    rdc));
+		return (NPI_RXDMA_TABLE_INVALID);
+	}
+
+	offset = REG_RDC_TABLE_OFFSET(table);
+	tbl_reg.bits.ldw.rdc = rdc;
+	NXGE_REG_WR64(handle, offset, tbl_reg.value);
+	return (NPI_SUCCESS);
+
+}
+
+npi_status_t
+npi_rxdma_dump_rdc_table(npi_handle_t handle,
+			    uint8_t table)
+{
+	uint64_t offset;
+	int tbl_offset;
+	uint64_t value;
+
+	ASSERT(RXDMA_TABLE_VALID(table));
+	if (!RXDMA_TABLE_VALID(table)) {
+		NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
+			    " npi_rxdma_dump_rdc_table"
+			    " Illegal RDC Rable Number %d \n",
+			    table));
+		return (NPI_RXDMA_TABLE_INVALID);
+	}
+	NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
+			    "\n Register Dump for RDC Table %d \n",
+			    table));
+	offset = REG_RDC_TABLE_OFFSET(table);
+	for (tbl_offset = 0; tbl_offset < NXGE_MAX_RDCS; tbl_offset++) {
+		NXGE_REG_RD64(handle, offset, &value);
+		NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
+					    " 0x%08llx 0x%08llx \n",
+					    offset, value));
+		offset += 8;
+	}
+	NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
+			    "\n Register Dump for RDC Table %d done\n",
+			    table));
+	return (NPI_SUCCESS);
+
+}
+
+npi_status_t
+npi_rxdma_rdc_rbr_stat_get(npi_handle_t handle, uint8_t rdc,
+			    rbr_stat_t *rbr_stat)
+{
+
+	ASSERT(RXDMA_CHANNEL_VALID(rdc));
+	if (!RXDMA_CHANNEL_VALID(rdc)) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+			    " rxdma_rdc_rbr_stat_get"
+			    " Illegal RDC Number %d \n",
+			    rdc));
+		return (NPI_RXDMA_RDC_INVALID);
+	}
+
+	RXDMA_REG_READ64(handle, RBR_STAT_REG, rdc, &rbr_stat->value);
+	return (NPI_SUCCESS);
+}
+
+/*
+ * npi_rxdma_rdc_rbr_head_get
+ * Gets the current rbr head pointer.
+ *
+ * Inputs:
+ *      handle:	opaque handle interpreted by the underlying OS
+ *	rdc:		RX DMA Channel number
+ *	hdptr		ptr to write the rbr head value
+ *
+ * Return:
+ * NPI_SUCCESS
+ * NPI_RXDMA_RDC_INVALID
+ */
+npi_status_t
+npi_rxdma_rdc_rbr_head_get(npi_handle_t handle,
+			    uint8_t rdc, addr44_t *hdptr)
+{
+	rbr_hdh_t hh_ptr;
+	rbr_hdl_t hl_ptr;
+
+	ASSERT(RXDMA_CHANNEL_VALID(rdc));
+	if (!RXDMA_CHANNEL_VALID(rdc)) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+			    " rxdma_rdc_rbr_head_get"
+			    " Illegal RDC Number %d \n",
+			    rdc));
+		return (NPI_RXDMA_RDC_INVALID);
+	}
+	hh_ptr.value = 0;
+	hl_ptr.value = 0;
+	RXDMA_REG_READ64(handle, RBR_HDH_REG, rdc, &hh_ptr.value);
+	RXDMA_REG_READ64(handle, RBR_HDL_REG, rdc, &hl_ptr.value);
+	hdptr->bits.ldw = hl_ptr.bits.ldw.head_l << 2;
+	hdptr->bits.hdw = hh_ptr.bits.ldw.head_h;
+	return (NPI_SUCCESS);
+
+}
+
+npi_status_t
+npi_rxdma_rdc_rcr_qlen_get(npi_handle_t handle, uint8_t rdc,
+			    uint16_t *rcr_qlen)
+{
+
+	rcrstat_a_t stats;
+
+	ASSERT(RXDMA_CHANNEL_VALID(rdc));
+	if (!RXDMA_CHANNEL_VALID(rdc)) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+			    " rxdma_rdc_rcr_qlen_get"
+			    " Illegal RDC Number %d \n",
+			    rdc));
+		return (NPI_RXDMA_RDC_INVALID);
+	}
+
+	RXDMA_REG_READ64(handle, RCRSTAT_A_REG, rdc, &stats.value);
+	*rcr_qlen =  stats.bits.ldw.qlen;
+	NPI_DEBUG_MSG((handle.function, NPI_RDC_CTL,
+		    " rxdma_rdc_rcr_qlen_get"
+		    " RDC %d qlen %x qlen %x\n",
+		    rdc, *rcr_qlen, stats.bits.ldw.qlen));
+	return (NPI_SUCCESS);
+}
+
+npi_status_t
+npi_rxdma_rdc_rcr_tail_get(npi_handle_t handle,
+			    uint8_t rdc, addr44_t *tail_addr)
+{
+
+	rcrstat_b_t th_ptr;
+	rcrstat_c_t tl_ptr;
+
+	ASSERT(RXDMA_CHANNEL_VALID(rdc));
+	if (!RXDMA_CHANNEL_VALID(rdc)) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				    " rxdma_rdc_rcr_tail_get"
+				    " Illegal RDC Number %d \n",
+				    rdc));
+		return (NPI_RXDMA_RDC_INVALID);
+	}
+	th_ptr.value = 0;
+	tl_ptr.value = 0;
+	RXDMA_REG_READ64(handle, RCRSTAT_B_REG, rdc, &th_ptr.value);
+	RXDMA_REG_READ64(handle, RCRSTAT_C_REG, rdc, &tl_ptr.value);
+	tail_addr->bits.ldw = tl_ptr.bits.ldw.tlptr_l << 3;
+	tail_addr->bits.hdw = th_ptr.bits.ldw.tlptr_h;
+	NPI_DEBUG_MSG((handle.function, NPI_RDC_CTL,
+			    " rxdma_rdc_rcr_tail_get"
+			    " RDC %d rcr_tail %llx tl %x\n",
+			    rdc, tl_ptr.value,
+			    tl_ptr.bits.ldw.tlptr_l));
+
+	return (NPI_SUCCESS);
+
+
+}
+
+/*
+ * npi_rxdma_rxctl_fifo_error_intr_set
+ * Configure The RX ctrl fifo error interrupt generation
+ *
+ * Inputs:
+ *      handle:	opaque handle interpreted by the underlying OS
+ *	mask:	rx_ctl_dat_fifo_mask_t specifying the errors
+ * valid fields in  rx_ctl_dat_fifo_mask_t structure are:
+ * zcp_eop_err, ipp_eop_err, id_mismatch. If a field is set
+ * to 1, we will enable interrupt generation for the
+ * corresponding error condition. In the hardware, the bit(s)
+ * have to be cleared to enable interrupt.
+ *
+ * Return:
+ * NPI_SUCCESS
+ *
+ */
+npi_status_t
+npi_rxdma_rxctl_fifo_error_intr_set(npi_handle_t handle,
+				    rx_ctl_dat_fifo_mask_t *mask)
+{
+	uint64_t offset;
+	rx_ctl_dat_fifo_mask_t intr_mask;
+	offset = RX_CTL_DAT_FIFO_MASK_REG;
+	NXGE_REG_RD64(handle, offset, &intr_mask.value);
+
+	if (mask->bits.ldw.ipp_eop_err) {
+		intr_mask.bits.ldw.ipp_eop_err = 0;
+	}
+
+	if (mask->bits.ldw.zcp_eop_err) {
+		intr_mask.bits.ldw.zcp_eop_err = 0;
+	}
+
+	if (mask->bits.ldw.id_mismatch) {
+		intr_mask.bits.ldw.id_mismatch = 0;
+	}
+
+	NXGE_REG_WR64(handle, offset, intr_mask.value);
+	return (NPI_SUCCESS);
+}
+
+/*
+ * npi_rxdma_rxctl_fifo_error_stat_get
+ * Read The RX ctrl fifo error Status
+ *
+ * Inputs:
+ *      handle:	opaque handle interpreted by the underlying OS
+ *	stat:	rx_ctl_dat_fifo_stat_t to read the errors to
+ * valid fields in  rx_ctl_dat_fifo_stat_t structure are:
+ * zcp_eop_err, ipp_eop_err, id_mismatch.
+ * Return:
+ * NPI_SUCCESS
+ *
+ */
+npi_status_t
+npi_rxdma_rxctl_fifo_error_intr_get(npi_handle_t handle,
+			    rx_ctl_dat_fifo_stat_t *stat)
+{
+	uint64_t offset = RX_CTL_DAT_FIFO_STAT_REG;
+	NXGE_REG_RD64(handle, offset, &stat->value);
+	return (NPI_SUCCESS);
+}
+
+npi_status_t
+npi_rxdma_rdc_rcr_pktread_update(npi_handle_t handle, uint8_t channel,
+				    uint16_t pkts_read)
+{
+
+	rx_dma_ctl_stat_t	cs;
+	uint16_t min_read = 0;
+
+	ASSERT(RXDMA_CHANNEL_VALID(channel));
+	if (!RXDMA_CHANNEL_VALID(channel)) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+		    " npi_rxdma_rdc_rcr_pktread_update ",
+		    " channel %d", channel));
+		return (NPI_FAILURE | NPI_RXDMA_CHANNEL_INVALID(channel));
+	}
+
+	if ((pkts_read < min_read) && (pkts_read > 512)) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+		    " npi_rxdma_rdc_rcr_pktread_update ",
+		    " pkts %d out of bound", pkts_read));
+		return (NPI_RXDMA_OPCODE_INVALID(pkts_read));
+	}
+
+	RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel,
+				&cs.value);
+	cs.bits.ldw.pktread = pkts_read;
+	RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG,
+				    channel, cs.value);
+
+	return (NPI_SUCCESS);
+}
+
+npi_status_t
+npi_rxdma_rdc_rcr_bufread_update(npi_handle_t handle, uint8_t channel,
+					    uint16_t bufs_read)
+{
+
+	rx_dma_ctl_stat_t	cs;
+	uint16_t min_read = 0;
+
+	ASSERT(RXDMA_CHANNEL_VALID(channel));
+	if (!RXDMA_CHANNEL_VALID(channel)) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+		    " npi_rxdma_rdc_rcr_bufread_update ",
+		    " channel %d", channel));
+		return (NPI_FAILURE | NPI_RXDMA_CHANNEL_INVALID(channel));
+	}
+
+	if ((bufs_read < min_read) && (bufs_read > 512)) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+		    " npi_rxdma_rdc_rcr_bufread_update ",
+		    " bufs read %d out of bound", bufs_read));
+		return (NPI_RXDMA_OPCODE_INVALID(bufs_read));
+	}
+
+	RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel,
+				&cs.value);
+	cs.bits.ldw.ptrread = bufs_read;
+	RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG,
+				    channel, cs.value);
+
+	return (NPI_SUCCESS);
+}
+
+npi_status_t
+npi_rxdma_rdc_rcr_read_update(npi_handle_t handle, uint8_t channel,
+				    uint16_t pkts_read, uint16_t bufs_read)
+{
+
+	rx_dma_ctl_stat_t	cs;
+
+	ASSERT(RXDMA_CHANNEL_VALID(channel));
+	if (!RXDMA_CHANNEL_VALID(channel)) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+		    " npi_rxdma_rdc_rcr_read_update ",
+		    " channel %d", channel));
+		return (NPI_FAILURE | NPI_RXDMA_CHANNEL_INVALID(channel));
+	}
+
+	NPI_DEBUG_MSG((handle.function, NPI_RDC_CTL,
+	    " npi_rxdma_rdc_rcr_read_update "
+	    " bufs read %d pkt read %d",
+		bufs_read, pkts_read));
+
+	RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel,
+				&cs.value);
+
+	NPI_DEBUG_MSG((handle.function, NPI_RDC_CTL,
+		" npi_rxdma_rdc_rcr_read_update: "
+		" value: 0x%llx bufs read %d pkt read %d",
+		cs.value,
+		cs.bits.ldw.ptrread, cs.bits.ldw.pktread));
+
+	cs.bits.ldw.pktread = pkts_read;
+	cs.bits.ldw.ptrread = bufs_read;
+
+	RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG,
+				    channel, cs.value);
+
+	RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel,
+				&cs.value);
+
+	NPI_DEBUG_MSG((handle.function, NPI_RDC_CTL,
+	    " npi_rxdma_rdc_rcr_read_update: read back after update "
+	    " value: 0x%llx bufs read %d pkt read %d",
+		cs.value,
+		cs.bits.ldw.ptrread, cs.bits.ldw.pktread));
+
+	return (NPI_SUCCESS);
+}
+
+/*
+ * npi_rxdma_channel_mex_set():
+ *	This function is called to arm the DMA channel with
+ *	mailbox updating capability. Software needs to rearm
+ *	for each update by writing to the control and status register.
+ *
+ * Parameters:
+ *	handle		- NPI handle (virtualization flag must be defined).
+ *	channel		- logical RXDMA channel from 0 to 23.
+ *			  (If virtualization flag is not set, then
+ *			   logical channel is the same as the hardware
+ *			   channel number).
+ *
+ * Return:
+ *	NPI_SUCCESS		- If enable channel with mailbox update
+ *				  is completed successfully.
+ *
+ *	Error:
+ *	NPI error status code
+ */
+npi_status_t
+npi_rxdma_channel_mex_set(npi_handle_t handle, uint8_t channel)
+{
+	return (npi_rxdma_channel_control(handle, RXDMA_MEX_SET, channel));
+}
+
+/*
+ * npi_rxdma_channel_rcrto_clear():
+ *	This function is called to reset RCRTO bit to 0.
+ *
+ * Parameters:
+ *	handle		- NPI handle (virtualization flag must be defined).
+ *	channel		- logical RXDMA channel from 0 to 23.
+ *			  (If virtualization flag is not set, then
+ *			   logical channel is the same as the hardware
+ *			   channel number).
+ * Return:
+ *	NPI_SUCCESS
+ *
+ *	Error:
+ *	NPI error status code
+ */
+npi_status_t
+npi_rxdma_channel_rcrto_clear(npi_handle_t handle, uint8_t channel)
+{
+	return (npi_rxdma_channel_control(handle, RXDMA_RCRTO_CLEAR, channel));
+}
+
+/*
+ * npi_rxdma_channel_pt_drop_pkt_clear():
+ *	This function is called to clear the port drop packet bit (debug).
+ *
+ * Parameters:
+ *	handle		- NPI handle (virtualization flag must be defined).
+ *	channel		- logical RXDMA channel from 0 to 23.
+ *			  (If virtualization flag is not set, then
+ *			   logical channel is the same as the hardware
+ *			   channel number).
+ * Return:
+ *	NPI_SUCCESS
+ *
+ *	Error:
+ *	NPI error status code
+ */
+npi_status_t
+npi_rxdma_channel_pt_drop_pkt_clear(npi_handle_t handle, uint8_t channel)
+{
+	return (npi_rxdma_channel_control(handle, RXDMA_PT_DROP_PKT_CLEAR,
+			channel));
+}
+
+/*
+ * npi_rxdma_channel_wred_drop_clear():
+ *	This function is called to wred drop bit (debug only).
+ *
+ * Parameters:
+ *	handle		- NPI handle (virtualization flag must be defined).
+ *	channel		- logical RXDMA channel from 0 to 23.
+ *			  (If virtualization flag is not set, then
+ *			   logical channel is the same as the hardware
+ *			   channel number).
+ * Return:
+ *	NPI_SUCCESS
+ *
+ *	Error:
+ *	NPI error status code
+ */
+npi_status_t
+npi_rxdma_channel_wred_dop_clear(npi_handle_t handle, uint8_t channel)
+{
+	return (npi_rxdma_channel_control(handle, RXDMA_WRED_DROP_CLEAR,
+			channel));
+}
+
+/*
+ * npi_rxdma_channel_rcr_shfull_clear():
+ *	This function is called to clear RCR shadow full bit.
+ *
+ * Parameters:
+ *	handle		- NPI handle (virtualization flag must be defined).
+ *	channel		- logical RXDMA channel from 0 to 23.
+ *			  (If virtualization flag is not set, then
+ *			   logical channel is the same as the hardware
+ *			   channel number).
+ * Return:
+ *	NPI_SUCCESS
+ *
+ *	Error:
+ *	NPI error status code
+ */
+npi_status_t
+npi_rxdma_channel_rcr_shfull_clear(npi_handle_t handle, uint8_t channel)
+{
+	return (npi_rxdma_channel_control(handle, RXDMA_RCR_SFULL_CLEAR,
+			channel));
+}
+
+/*
+ * npi_rxdma_channel_rcrfull_clear():
+ *	This function is called to clear RCR full bit.
+ *
+ * Parameters:
+ *	handle		- NPI handle (virtualization flag must be defined).
+ *	channel		- logical RXDMA channel from 0 to 23.
+ *			  (If virtualization flag is not set, then
+ *			   logical channel is the same as the hardware
+ *			   channel number).
+ * Return:
+ *	NPI_SUCCESS
+ *
+ *	Error:
+ *	NPI error status code
+ */
+npi_status_t
+npi_rxdma_channel_rcr_full_clear(npi_handle_t handle, uint8_t channel)
+{
+	return (npi_rxdma_channel_control(handle, RXDMA_RCR_FULL_CLEAR,
+			channel));
+}
+
+npi_status_t
+npi_rxdma_channel_rbr_empty_clear(npi_handle_t handle, uint8_t channel)
+{
+	return (npi_rxdma_channel_control(handle,
+		RXDMA_RBR_EMPTY_CLEAR, channel));
+}
+
+npi_status_t
+npi_rxdma_channel_cs_clear_all(npi_handle_t handle, uint8_t channel)
+{
+	return (npi_rxdma_channel_control(handle, RXDMA_CS_CLEAR_ALL, channel));
+}
+
+/*
+ * npi_rxdma_channel_control():
+ *	This function is called to control a receive DMA channel
+ *	for arming the channel with mailbox updates, resetting
+ *	various event status bits (control and status register).
+ *
+ * Parameters:
+ *	handle		- NPI handle (virtualization flag must be defined).
+ *	control		- NPI defined control type supported:
+ *				- RXDMA_MEX_SET
+ * 				- RXDMA_RCRTO_CLEAR
+ *				- RXDMA_PT_DROP_PKT_CLEAR
+ *				- RXDMA_WRED_DROP_CLEAR
+ *				- RXDMA_RCR_SFULL_CLEAR
+ *				- RXDMA_RCR_FULL_CLEAR
+ *				- RXDMA_RBR_PRE_EMPTY_CLEAR
+ *				- RXDMA_RBR_EMPTY_CLEAR
+ *	channel		- logical RXDMA channel from 0 to 23.
+ *			  (If virtualization flag is not set, then
+ *			   logical channel is the same as the hardware.
+ * Return:
+ *	NPI_SUCCESS
+ *
+ *	Error:
+ *	NPI error status code
+ */
+npi_status_t
+npi_rxdma_channel_control(npi_handle_t handle, rxdma_cs_cntl_t control,
+			uint8_t channel)
+{
+
+	rx_dma_ctl_stat_t	cs;
+
+	ASSERT(RXDMA_CHANNEL_VALID(channel));
+	if (!RXDMA_CHANNEL_VALID(channel)) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+		    " npi_rxdma_channel_control",
+		    " channel", channel));
+		return (NPI_FAILURE | NPI_RXDMA_CHANNEL_INVALID(channel));
+	}
+
+	switch (control) {
+	case RXDMA_MEX_SET:
+		RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel,
+				&cs.value);
+		cs.bits.hdw.mex = 1;
+		RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG,
+				channel, cs.value);
+		break;
+
+	case RXDMA_RCRTO_CLEAR:
+		RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel,
+				&cs.value);
+		cs.bits.hdw.rcrto = 0;
+		RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel,
+				cs.value);
+		break;
+
+	case RXDMA_PT_DROP_PKT_CLEAR:
+		RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel,
+				&cs.value);
+		cs.bits.hdw.port_drop_pkt = 0;
+		RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel,
+				cs.value);
+		break;
+
+	case RXDMA_WRED_DROP_CLEAR:
+		RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel,
+				&cs.value);
+		cs.bits.hdw.wred_drop = 0;
+		RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel,
+				cs.value);
+		break;
+
+	case RXDMA_RCR_SFULL_CLEAR:
+		RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel,
+				&cs.value);
+		cs.bits.hdw.rcr_shadow_full = 0;
+		RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel,
+				cs.value);
+		break;
+
+	case RXDMA_RCR_FULL_CLEAR:
+		RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel,
+				&cs.value);
+		cs.bits.hdw.rcrfull = 0;
+		RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel,
+				cs.value);
+		break;
+
+	case RXDMA_RBR_PRE_EMPTY_CLEAR:
+		RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel,
+				&cs.value);
+		cs.bits.hdw.rbr_pre_empty = 0;
+		RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel,
+				cs.value);
+		break;
+
+	case RXDMA_RBR_EMPTY_CLEAR:
+		RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel,
+				&cs.value);
+		cs.bits.hdw.rbr_empty = 1;
+		RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel,
+				cs.value);
+		break;
+
+	case RXDMA_CS_CLEAR_ALL:
+		cs.value = 0;
+		RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel,
+				cs.value);
+		break;
+
+	default:
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				    "npi_rxdma_channel_control",
+				    "control", control));
+		return (NPI_FAILURE | NPI_RXDMA_OPCODE_INVALID(channel));
+	}
+
+	return (NPI_SUCCESS);
+}
+
+/*
+ * npi_rxdma_control_status():
+ *	This function is called to operate on the control
+ *	and status register.
+ *
+ * Parameters:
+ *	handle		- NPI handle
+ *	op_mode		- OP_GET: get hardware control and status
+ *			  OP_SET: set hardware control and status
+ *			  OP_UPDATE: update hardware control and status.
+ *			  OP_CLEAR: clear control and status register to 0s.
+ *	channel		- hardware RXDMA channel from 0 to 23.
+ *	cs_p		- pointer to hardware defined control and status
+ *			  structure.
+ * Return:
+ *	NPI_SUCCESS
+ *
+ *	Error:
+ *	NPI error status code
+ */
+npi_status_t
+npi_rxdma_control_status(npi_handle_t handle, io_op_t op_mode,
+			uint8_t channel, p_rx_dma_ctl_stat_t cs_p)
+{
+	int			status = NPI_SUCCESS;
+	rx_dma_ctl_stat_t	cs;
+
+	ASSERT(RXDMA_CHANNEL_VALID(channel));
+	if (!RXDMA_CHANNEL_VALID(channel)) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+		    "npi_rxdma_control_status",
+		    "channel", channel));
+		return (NPI_FAILURE | NPI_RXDMA_CHANNEL_INVALID(channel));
+	}
+
+	switch (op_mode) {
+	case OP_GET:
+		RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel,
+				&cs_p->value);
+		break;
+
+	case OP_SET:
+		RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel,
+			cs_p->value);
+		break;
+
+	case OP_UPDATE:
+		RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel,
+				&cs.value);
+		RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel,
+			cs_p->value | cs.value);
+		break;
+
+	default:
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+		    "npi_rxdma_control_status",
+		    "control", op_mode));
+		return (NPI_FAILURE | NPI_RXDMA_OPCODE_INVALID(channel));
+	}
+
+	return (status);
+}
+
+/*
+ * npi_rxdma_event_mask():
+ *	This function is called to operate on the event mask
+ *	register which is used for generating interrupts.
+ *
+ * Parameters:
+ *	handle		- NPI handle
+ *	op_mode		- OP_GET: get hardware event mask
+ *			  OP_SET: set hardware interrupt event masks
+ *			  OP_CLEAR: clear control and status register to 0s.
+ *	channel		- hardware RXDMA channel from 0 to 23.
+ *	mask_p		- pointer to hardware defined event mask
+ *			  structure.
+ * Return:
+ *	NPI_SUCCESS		- If set is complete successfully.
+ *
+ *	Error:
+ *	NPI error status code
+ */
+npi_status_t
+npi_rxdma_event_mask(npi_handle_t handle, io_op_t op_mode,
+		uint8_t channel, p_rx_dma_ent_msk_t mask_p)
+{
+	int			status = NPI_SUCCESS;
+	rx_dma_ent_msk_t	mask;
+
+	ASSERT(RXDMA_CHANNEL_VALID(channel));
+	if (!RXDMA_CHANNEL_VALID(channel)) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+		    "npi_rxdma_event_mask",
+		    "channel", channel));
+		return (NPI_FAILURE | NPI_RXDMA_CHANNEL_INVALID(channel));
+	}
+
+	switch (op_mode) {
+	case OP_GET:
+		RXDMA_REG_READ64(handle, RX_DMA_ENT_MSK_REG, channel,
+				&mask_p->value);
+		break;
+
+	case OP_SET:
+		RXDMA_REG_WRITE64(handle, RX_DMA_ENT_MSK_REG, channel,
+				mask_p->value);
+		break;
+
+	case OP_UPDATE:
+		RXDMA_REG_READ64(handle, RX_DMA_ENT_MSK_REG, channel,
+				&mask.value);
+		RXDMA_REG_WRITE64(handle, RX_DMA_ENT_MSK_REG, channel,
+			mask_p->value | mask.value);
+		break;
+
+	default:
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+		    "npi_rxdma_event_mask",
+		    "eventmask", op_mode));
+		return (NPI_FAILURE | NPI_RXDMA_OPCODE_INVALID(channel));
+	}
+
+	return (status);
+}
+
+/*
+ * npi_rxdma_event_mask_config():
+ *	This function is called to operate on the event mask
+ *	register which is used for generating interrupts
+ *	and status register.
+ *
+ * Parameters:
+ *	handle		- NPI handle
+ *	op_mode		- OP_GET: get hardware event mask
+ *			  OP_SET: set hardware interrupt event masks
+ *			  OP_CLEAR: clear control and status register to 0s.
+ *	channel		- hardware RXDMA channel from 0 to 23.
+ *	mask_cfgp		- pointer to NPI defined event mask
+ *			  enum data type.
+ * Return:
+ *	NPI_SUCCESS		- If set is complete successfully.
+ *
+ *	Error:
+ *	NPI error status code
+ */
+npi_status_t
+npi_rxdma_event_mask_config(npi_handle_t handle, io_op_t op_mode,
+		uint8_t channel, rxdma_ent_msk_cfg_t *mask_cfgp)
+{
+	int		status = NPI_SUCCESS;
+	uint64_t	value;
+
+	ASSERT(RXDMA_CHANNEL_VALID(channel));
+	if (!RXDMA_CHANNEL_VALID(channel)) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+		    "npi_rxdma_event_mask_config",
+		    "channel", channel));
+		return (NPI_FAILURE | NPI_RXDMA_CHANNEL_INVALID(channel));
+	}
+
+	switch (op_mode) {
+	case OP_GET:
+		RXDMA_REG_READ64(handle, RX_DMA_ENT_MSK_REG, channel,
+				mask_cfgp);
+		break;
+
+	case OP_SET:
+		RXDMA_REG_WRITE64(handle, RX_DMA_ENT_MSK_REG, channel,
+				*mask_cfgp);
+		break;
+
+	case OP_UPDATE:
+		RXDMA_REG_READ64(handle, RX_DMA_ENT_MSK_REG, channel, &value);
+		RXDMA_REG_WRITE64(handle, RX_DMA_ENT_MSK_REG, channel,
+			*mask_cfgp | value);
+		break;
+
+	case OP_CLEAR:
+		RXDMA_REG_WRITE64(handle, RX_DMA_ENT_MSK_REG, channel,
+			CFG_RXDMA_MASK_ALL);
+		break;
+	default:
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+		    "npi_rxdma_event_mask_config",
+		    "eventmask", op_mode));
+		return (NPI_FAILURE | NPI_RXDMA_OPCODE_INVALID(channel));
+	}
+
+	return (status);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/uts/common/io/nxge/npi/npi_rxdma.h	Mon Mar 19 19:37:22 2007 -0700
@@ -0,0 +1,1335 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _NPI_RXDMA_H
+#define	_NPI_RXDMA_H
+
+#pragma ident	"%Z%%M%	%I%	%E% SMI"
+
+#ifdef	__cplusplus
+extern "C" {
+#endif
+
+#include <npi.h>
+
+#include "nxge_defs.h"
+#include "nxge_hw.h"
+#include <nxge_rxdma_hw.h>
+
+/*
+ * Register offset (0x200 bytes for each channel) for receive ring registers.
+ */
+#define	NXGE_RXDMA_OFFSET(x, v, channel) (x + \
+		(!v ? DMC_OFFSET(channel) : \
+		    RDMC_PIOVADDR_OFFSET(channel)))
+
+
+#define	 REG_FZC_RDC_OFFSET(reg, rdc) (reg + RX_LOG_DMA_OFFSET(rdc))
+
+#define	 REG_RDC_TABLE_OFFSET(table) \
+	    (RDC_TBL_REG + table * (NXGE_MAX_RDCS * 8))
+
+#define	RXDMA_REG_READ64(handle, reg, channel, data_p) {\
+	NXGE_REG_RD64(handle, (NXGE_RXDMA_OFFSET(reg, handle.is_vraddr,\
+			channel)), (data_p))\
+}
+
+#define	RXDMA_REG_READ32(handle, reg, channel) \
+	NXGE_NPI_PIO_READ32(handle, (NXGE_RXDMA_OFFSET(reg, handle.is_vraddr,\
+			channel)))
+
+
+#define	RXDMA_REG_WRITE64(handle, reg, channel, data) {\
+	NXGE_REG_WR64(handle, (NXGE_RXDMA_OFFSET(reg, handle.is_vraddr,\
+			channel)), (data))\
+}
+
+/*
+ * RX NPI error codes
+ */
+#define	RXDMA_ER_ST			(RXDMA_BLK_ID << NPI_BLOCK_ID_SHIFT)
+#define	RXDMA_ID_SHIFT(n)		(n << NPI_PORT_CHAN_SHIFT)
+
+
+#define	NPI_RXDMA_ERROR			RXDMA_ER_ST
+
+#define	NPI_RXDMA_SW_PARAM_ERROR	(NPI_RXDMA_ERROR | 0x40)
+#define	NPI_RXDMA_HW_ERROR	(NPI_RXDMA_ERROR | 0x80)
+
+#define	NPI_RXDMA_RDC_INVALID		(NPI_RXDMA_ERROR | CHANNEL_INVALID)
+#define	NPI_RXDMA_PAGE_INVALID		(NPI_RXDMA_ERROR | LOGICAL_PAGE_INVALID)
+#define	NPI_RXDMA_RESET_ERR		(NPI_RXDMA_HW_ERROR | RESET_FAILED)
+#define	NPI_RXDMA_DISABLE_ERR		(NPI_RXDMA_HW_ERROR | 0x0000a)
+#define	NPI_RXDMA_ENABLE_ERR		(NPI_RXDMA_HW_ERROR | 0x0000b)
+#define	NPI_RXDMA_FUNC_INVALID		(NPI_RXDMA_SW_PARAM_ERROR | 0x0000a)
+#define	NPI_RXDMA_BUFSZIE_INVALID	(NPI_RXDMA_SW_PARAM_ERROR | 0x0000b)
+#define	NPI_RXDMA_RBRSZIE_INVALID	(NPI_RXDMA_SW_PARAM_ERROR | 0x0000c)
+#define	NPI_RXDMA_RCRSZIE_INVALID	(NPI_RXDMA_SW_PARAM_ERROR | 0x0000d)
+#define	NPI_RXDMA_PORT_INVALID		(NPI_RXDMA_ERROR | PORT_INVALID)
+#define	NPI_RXDMA_TABLE_INVALID		(NPI_RXDMA_ERROR | RDC_TAB_INVALID)
+
+#define	NPI_RXDMA_CHANNEL_INVALID(n)	(RXDMA_ID_SHIFT(n) |	\
+					NPI_RXDMA_ERROR | CHANNEL_INVALID)
+#define	NPI_RXDMA_OPCODE_INVALID(n)	(RXDMA_ID_SHIFT(n) |	\
+					NPI_RXDMA_ERROR | OPCODE_INVALID)
+
+
+#define	NPI_RXDMA_ERROR_ENCODE(err, rdc)	\
+	(RXDMA_ID_SHIFT(rdc) | RXDMA_ER_ST | err)
+
+
+#define	RXDMA_CHANNEL_VALID(rdc) \
+	((rdc < NXGE_MAX_RDCS))
+
+#define	RXDMA_PORT_VALID(port) \
+	((port < MAX_PORTS_PER_NXGE))
+
+#define	RXDMA_TABLE_VALID(table) \
+	((table < NXGE_MAX_RDC_GROUPS))
+
+
+#define	RXDMA_PAGE_VALID(page) \
+	((page == 0) || (page == 1))
+
+#define	RXDMA_BUFF_OFFSET_VALID(offset) \
+	((offset == SW_OFFSET_NO_OFFSET) || \
+	    (offset == SW_OFFSET_64) || \
+	    (offset == SW_OFFSET_128))
+
+
+#define	RXDMA_RCR_TO_VALID(tov) ((tov) && (tov < 64))
+#define	RXDMA_RCR_THRESH_VALID(thresh) ((thresh) && (thresh < 512))
+
+
+/*
+ * RXDMA NPI defined control types.
+ */
+typedef	enum _rxdma_cs_cntl_e {
+	RXDMA_CS_CLEAR_ALL		= 0x1,
+	RXDMA_MEX_SET			= 0x2,
+	RXDMA_RCRTO_CLEAR		= 0x8,
+	RXDMA_PT_DROP_PKT_CLEAR		= 0x10,
+	RXDMA_WRED_DROP_CLEAR		= 0x20,
+	RXDMA_RCR_SFULL_CLEAR		= 0x40,
+	RXDMA_RCR_FULL_CLEAR		= 0x80,
+	RXDMA_RBR_PRE_EMPTY_CLEAR	= 0x100,
+	RXDMA_RBR_EMPTY_CLEAR		= 0x200
+} rxdma_cs_cntl_t;
+
+/*
+ * RXDMA NPI defined event masks (mapped to the hardware defined masks).
+ */
+typedef	enum _rxdma_ent_msk_cfg_e {
+	CFG_RXDMA_ENT_MSK_CFIGLOGPGE_MASK = RX_DMA_ENT_MSK_CFIGLOGPGE_MASK,
+	CFG_RXDMA_ENT_MSK_RBRLOGPGE_MASK  = RX_DMA_ENT_MSK_RBRLOGPGE_MASK,
+	CFG_RXDMA_ENT_MSK_RBRFULL_MASK	  = RX_DMA_ENT_MSK_RBRFULL_MASK,
+	CFG_RXDMA_ENT_MSK_RBREMPTY_MASK	  = RX_DMA_ENT_MSK_RBREMPTY_MASK,
+	CFG_RXDMA_ENT_MSK_RCRFULL_MASK	  = RX_DMA_ENT_MSK_RCRFULL_MASK,
+	CFG_RXDMA_ENT_MSK_RCRINCON_MASK	  = RX_DMA_ENT_MSK_RCRINCON_MASK,
+	CFG_RXDMA_ENT_MSK_CONFIG_ERR	  = RX_DMA_ENT_MSK_CONFIG_ERR_MASK,
+	CFG_RXDMA_ENT_MSK_RCR_SH_FULL_MASK = RX_DMA_ENT_MSK_RCRSH_FULL_MASK,
+	CFG_RXDMA_ENT_MSK_RBR_PRE_EMTY_MASK = RX_DMA_ENT_MSK_RBR_PRE_EMPTY_MASK,
+	CFG_RXDMA_ENT_MSK_WRED_DROP_MASK   = RX_DMA_ENT_MSK_WRED_DROP_MASK,
+	CFG_RXDMA_ENT_MSK_PT_DROP_PKT_MASK = RX_DMA_ENT_MSK_PTDROP_PKT_MASK,
+	CFG_RXDMA_ENT_MSK_RBR_PRE_PAR_MASK = RX_DMA_ENT_MSK_RBR_PRE_PAR_MASK,
+	CFG_RXDMA_ENT_MSK_RCR_SHA_PAR_MASK = RX_DMA_ENT_MSK_RCR_SHA_PAR_MASK,
+	CFG_RXDMA_ENT_MSK_RCRTO_MASK	  = RX_DMA_ENT_MSK_RCRTO_MASK,
+	CFG_RXDMA_ENT_MSK_THRES_MASK	  = RX_DMA_ENT_MSK_THRES_MASK,
+	CFG_RXDMA_ENT_MSK_DC_FIFO_ERR_MASK  = RX_DMA_ENT_MSK_DC_FIFO_ERR_MASK,
+	CFG_RXDMA_ENT_MSK_RCR_ACK_ERR_MASK  = RX_DMA_ENT_MSK_RCR_ACK_ERR_MASK,
+	CFG_RXDMA_ENT_MSK_RSP_DAT_ERR_MASK  = RX_DMA_ENT_MSK_RSP_DAT_ERR_MASK,
+	CFG_RXDMA_ENT_MSK_BYTE_EN_BUS_MASK  = RX_DMA_ENT_MSK_BYTE_EN_BUS_MASK,
+	CFG_RXDMA_ENT_MSK_RSP_CNT_ERR_MASK  = RX_DMA_ENT_MSK_RSP_CNT_ERR_MASK,
+	CFG_RXDMA_ENT_MSK_RBR_TMOUT_MASK  = RX_DMA_ENT_MSK_RBR_TMOUT_MASK,
+
+	CFG_RXDMA_MASK_ALL	  = (RX_DMA_ENT_MSK_CFIGLOGPGE_MASK |
+					RX_DMA_ENT_MSK_RBRLOGPGE_MASK |
+					RX_DMA_ENT_MSK_RBRFULL_MASK |
+					RX_DMA_ENT_MSK_RBREMPTY_MASK |
+					RX_DMA_ENT_MSK_RCRFULL_MASK |
+					RX_DMA_ENT_MSK_RCRINCON_MASK |
+					RX_DMA_ENT_MSK_CONFIG_ERR_MASK |
+					RX_DMA_ENT_MSK_RCRSH_FULL_MASK |
+					RX_DMA_ENT_MSK_RBR_PRE_EMPTY_MASK |
+					RX_DMA_ENT_MSK_WRED_DROP_MASK |
+					RX_DMA_ENT_MSK_PTDROP_PKT_MASK |
+					RX_DMA_ENT_MSK_RBR_PRE_PAR_MASK |
+					RX_DMA_ENT_MSK_RCR_SHA_PAR_MASK |
+					RX_DMA_ENT_MSK_RCRTO_MASK |
+					RX_DMA_ENT_MSK_THRES_MASK |
+					RX_DMA_ENT_MSK_DC_FIFO_ERR_MASK |
+					RX_DMA_ENT_MSK_RCR_ACK_ERR_MASK |
+					RX_DMA_ENT_MSK_RSP_DAT_ERR_MASK |
+					RX_DMA_ENT_MSK_BYTE_EN_BUS_MASK |
+					RX_DMA_ENT_MSK_RSP_CNT_ERR_MASK |
+					RX_DMA_ENT_MSK_RBR_TMOUT_MASK)
+} rxdma_ent_msk_cfg_t;
+
+
+
+typedef union _addr44 {
+	uint64_t	addr;
+	struct {
+#if defined(_BIG_ENDIAN)
+		uint32_t rsrvd:20;
+		uint32_t hdw:12;
+		uint32_t ldw;
+#else
+		uint32_t ldw;
+		uint32_t hdw:12;
+		uint32_t rsrvd:20;
+#endif
+	} bits;
+} addr44_t;
+
+
+/*
+ * npi_rxdma_cfg_default_port_rdc()
+ * Set the default rdc for the port
+ *
+ * Inputs:
+ *	handle:		register handle interpreted by the underlying OS
+ *	portnm:		Physical Port Number
+ *	rdc:	RX DMA Channel number
+ *
+ * Return:
+ * NPI_SUCCESS
+ * NPI_RXDMA_RDC_INVALID
+ * NPI_RXDMA_PORT_INVALID
+ *
+ */
+
+npi_status_t npi_rxdma_cfg_default_port_rdc(npi_handle_t,
+				    uint8_t, uint8_t);
+
+/*
+ * npi_rxdma_cfg_rdc_table()
+ * Configure/populate the RDC table
+ *
+ * Inputs:
+ *	handle:		register handle interpreted by the underlying OS
+ *	table:		RDC Group Number
+ *	rdc[]:	 Array of RX DMA Channels
+ *
+ * Return:
+ * NPI_SUCCESS
+ * NPI_RXDMA_TABLE_INVALID
+ *
+ */
+
+npi_status_t npi_rxdma_cfg_rdc_table(npi_handle_t,
+			    uint8_t, uint8_t []);
+
+npi_status_t npi_rxdma_cfg_rdc_table_default_rdc(npi_handle_t,
+					    uint8_t, uint8_t);
+npi_status_t npi_rxdma_cfg_rdc_rcr_timeout_disable(npi_handle_t,
+					    uint8_t);
+
+
+/*
+ * npi_rxdma_32bitmode_enable()
+ * Enable 32 bit mode
+ *
+ * Inputs:
+ *	handle:		register handle interpreted by the underlying OS
+ *
+ * Return:
+ * NPI_SUCCESS
+ * NPI_FAILURE
+ * NPI_HW_ERR
+ *
+ */
+
+npi_status_t npi_rxdma_cfg_32bitmode_enable(npi_handle_t);
+
+
+/*
+ * npi_rxdma_32bitmode_disable()
+ * disable 32 bit mode
+ *
+ * Inputs:
+ *	handle:		register handle interpreted by the underlying OS
+ *
+ * Return:
+ * NPI_SUCCESS
+ * NPI_FAILURE
+ * NPI_HW_ERR
+ *
+ */
+
+
+npi_status_t npi_rxdma_cfg_32bitmode_disable(npi_handle_t);
+
+/*
+ * npi_rxdma_cfg_ram_access_enable()
+ * Enable PIO access to shadow and prefetch memory.
+ * In the case of DMA errors, software may need to
+ * initialize the shadow and prefetch memories to
+ * sane value (may be clear it) before re-enabling
+ * the DMA channel.
+ *
+ * Inputs:
+ *	handle:		register handle interpreted by the underlying OS
+ *
+ * Return:
+ * NPI_SUCCESS
+ * NPI_FAILURE
+ * NPI_HW_ERR
+ *
+ */
+
+npi_status_t npi_rxdma_cfg_ram_access_enable(npi_handle_t);
+
+
+/*
+ * npi_rxdma_cfg_ram_access_disable()
+ * Disable PIO access to shadow and prefetch memory.
+ * This is the normal operation mode.
+ *
+ * Inputs:
+ *	handle:		register handle interpreted by the underlying OS
+ *
+ * Return:
+ * NPI_SUCCESS
+ * NPI_FAILURE
+ * NPI_HW_ERR
+ *
+ */
+
+npi_status_t npi_rxdma_cfg_ram_access_disable(npi_handle_t);
+
+
+/*
+ * npi_rxdma_cfg_clock_div_set()
+ * init the clock division, used for RX timers
+ * This determines the granularity of RX DMA countdown timers
+ * It depends on the system clock. For example if the system
+ * clock is 300 MHz, a value of 30000 will yield a granularity
+ * of 100usec.
+ *
+ * Inputs:
+ *	handle:		register handle interpreted by the underlying OS
+ *	count:		System clock divider
+ *
+ * Return:
+ * NPI_SUCCESS
+ * NPI_FAILURE
+ * NPI_SW_ERR
+ * NPI_HW_ERR
+ *
+ */
+
+npi_status_t npi_rxdma_cfg_clock_div_set(npi_handle_t, uint16_t);
+
+/*
+ * npi_rxdma_cfg_red_rand_init()
+ * init the WRED Discard
+ * By default, it is enabled
+ *
+ * Inputs:
+ *	handle:		register handle interpreted by the underlying OS
+ *	init_value:	WRED init value
+ *
+ * Return:
+ * NPI_SUCCESS
+ * NPI_FAILURE
+ * NPI_SW_ERR
+ * NPI_HW_ERR
+ *
+ */
+
+npi_status_t npi_rxdma_cfg_red_rand_init(npi_handle_t, uint16_t);
+
+/*
+ * npi_rxdma_cfg_wred_disable()
+ * init the WRED Discard
+ * By default, it is enabled
+ *
+ * Inputs:
+ *	handle:		register handle interpreted by the underlying OS
+ *
+ * Return:
+ * NPI_SUCCESS
+ * NPI_FAILURE
+ * NPI_SW_ERR
+ * NPI_HW_ERR
+ *
+ */
+
+
+npi_status_t npi_rxdma_cfg_wred_disable(npi_handle_t);
+
+/*
+ * npi_rxdma_cfg_wred_param()
+ * COnfigure per rxdma channel WRED parameters
+ * By default, it is enabled
+ *
+ * Inputs:
+ *	handle:		register handle interpreted by the underlying OS
+ *	rdc:	RX DMA Channel number
+ *	wred_params:	WRED configuration parameters
+ *
+ * Return:
+ * NPI_SUCCESS
+ * NPI_FAILURE
+ * NPI_SW_ERR
+ * NPI_HW_ERR
+ *
+ */
+
+
+
+npi_status_t npi_rxdma_cfg_wred_param(npi_handle_t, uint8_t,
+				    rdc_red_para_t *);
+
+
+/*
+ * npi_rxdma_port_ddr_weight
+ * Set the DDR weight for a port.
+ *
+ * Inputs:
+ *	handle:		register handle interpreted by the underlying OS
+ *	portnm:		Physical Port Number
+ *	weight:		Port relative weight (in approx. bytes)
+ *			Default values are:
+ *			0x400 (port 0 and 1) corresponding to 10 standard
+ *			      size (1500 bytes) Frames
+ *			0x66 (port 2 and 3) corresponding to 10% 10Gig ports
+ *
+ * Return:
+ * NPI_SUCCESS
+ * NPI_FAILURE
+ * NPI_HW_ERR
+ * NPI_SW_ERR
+ *
+ */
+
+npi_status_t npi_rxdma_cfg_port_ddr_weight(npi_handle_t,
+				    uint8_t, uint32_t);
+
+
+/*
+ * npi_rxdma_port_usage_get()
+ * Gets the port usage, in terms of 16 byte blocks
+ *
+ * NOTE: The register count is cleared upon reading.
+ *
+ * Inputs:
+ *	handle:		register handle interpreted by the underlying OS
+ *	portnm:		Physical Port Number
+ *	blocks:		ptr to save current count.
+ *
+ * Return:
+ * NPI_SUCCESS
+ * NPI_FAILURE
+ * NPI_HW_ERR
+ * NPI_SW_ERR
+ *
+ */
+
+npi_status_t npi_rxdma_port_usage_get(npi_handle_t,
+				    uint8_t, uint32_t *);
+
+
+/*
+ * npi_rxdma_cfg_logical_page()
+ * Configure per rxdma channel Logical page
+ *
+ * To disable the logical page, set valid = 0;
+ *
+ * Inputs:
+ *	handle:		register handle interpreted by the underlying OS
+ *	rdc:		RX DMA Channel number
+ *	page_params:	Logical Page configuration parameters
+ *
+ * Return:
+ * NPI_SUCCESS
+ * NPI_FAILURE
+ * NPI_SW_ERR
+ * NPI_HW_ERR
+ *
+ */
+
+
+
+npi_status_t npi_rxdma_cfg_logical_page(npi_handle_t, uint8_t,
+				    dma_log_page_t *);
+
+
+/*
+ * npi_rxdma_cfg_logical_page_handle()
+ * Configure per rxdma channel Logical page handle
+ *
+ *
+ * Inputs:
+ *	handle:		register handle interpreted by the underlying OS
+ *	rdc:		RX DMA Channel number
+ *	pg_handle:	Logical Page handle
+ *
+ * Return:
+ * NPI_SUCCESS
+ * NPI_FAILURE
+ * NPI_SW_ERR
+ * NPI_HW_ERR
+ *
+ */
+
+
+npi_status_t npi_rxdma_cfg_logical_page_handle(npi_handle_t, uint8_t,
+				    uint64_t);
+
+
+
+
+npi_status_t npi_rxdma_cfg_logical_page_disable(npi_handle_t,
+				    uint8_t, uint8_t);
+
+typedef enum _bsize {
+	SIZE_0B = 0x0,
+	SIZE_64B,
+	SIZE_128B,
+	SIZE_192B,
+	SIZE_256B,
+	SIZE_512B,
+	SIZE_1KB,
+	SIZE_2KB,
+	SIZE_4KB,
+	SIZE_8KB,
+	SIZE_16KB,
+	SIZE_32KB
+} bsize_t;
+
+
+
+/*
+ * npi_rxdma_cfg_rdc_ring()
+ * Configure The RDC channel Rcv Buffer Ring
+ *
+ * Inputs:
+ *	rdc:		RX DMA Channel number
+ *	rdc_params:	RDC configuration parameters
+ *
+ * Return:
+ * NPI_SUCCESS
+ * NPI_FAILURE
+ * NPI_SW_ERR
+ * NPI_HW_ERR
+ *
+ */
+
+typedef struct _rdc_desc_cfg_t {
+	uint8_t mbox_enable;	/* Enable full (18b) header */
+	uint8_t full_hdr;	/* Enable full (18b) header */
+	uint8_t offset;	/* 64 byte offsets */
+	uint8_t valid2;	/* size 2 is valid */
+	bsize_t size2;	/* Size 2 length */
+	uint8_t valid1;	/* size 1 is valid */
+	bsize_t size1;	/* Size 1 length */
+	uint8_t valid0;	/* size 0 is valid */
+	bsize_t size0;	/* Size 1 length */
+	bsize_t page_size;   /* Page or buffer Size */
+    uint8_t	rcr_timeout_enable;
+    uint8_t	rcr_timeout;
+    uint16_t	rcr_threshold;
+	uint16_t rcr_len;	   /* RBR Descriptor size (entries) */
+	uint16_t rbr_len;	   /* RBR Descriptor size (entries) */
+	uint64_t mbox_addr;	   /* Mailbox Address */
+	uint64_t rcr_addr;	   /* RCR Address */
+	uint64_t rbr_addr;	   /* RBB Address */
+} rdc_desc_cfg_t;
+
+
+
+npi_status_t npi_rxdma_cfg_rdc_ring(npi_handle_t, uint8_t,
+				    rdc_desc_cfg_t *);
+
+
+
+
+/*
+ * npi_rxdma_rdc_rcr_flush
+ * Forces RX completion ring update
+ *
+ * Inputs:
+ *	rdc:		RX DMA Channel number
+ *
+ * Return:
+ *
+ */
+
+#define	npi_rxdma_rdc_rcr_flush(handle, rdc) \
+	RXDMA_REG_WRITE64(handle, RCR_FLSH_REG, rdc, \
+		    (RCR_FLSH_SET << RCR_FLSH_SHIFT))
+
+
+
+/*
+ * npi_rxdma_rdc_rcr_read_update
+ * Update the number of rcr packets and buffers processed
+ *
+ * Inputs:
+ *	channel:	RX DMA Channel number
+ *	num_pkts:	Number of pkts processed by SW.
+ *			    A packet could constitute multiple
+ *			    buffers, in case jumbo packets.
+ *	num_bufs:	Number of buffer processed by SW.
+ *
+ * Return:
+ *	NPI_FAILURE		-
+ *		NPI_RXDMA_OPCODE_INVALID	-
+ *		NPI_RXDMA_CHANNEL_INVALID	-
+ *
+ */
+
+npi_status_t npi_rxdma_rdc_rcr_read_update(npi_handle_t, uint8_t,
+				    uint16_t, uint16_t);
+/*
+ * npi_rxdma_rdc_rcr_pktread_update
+ * Update the number of packets processed
+ *
+ * Inputs:
+ *	channel:	RX DMA Channel number
+ *	num_pkts:	Number ofpkts processed by SW.
+ *			A packet could constitute multiple
+ *			buffers, in case jumbo packets.
+ *
+ * Return:
+ *	NPI_FAILURE		-
+ *		NPI_RXDMA_OPCODE_INVALID	-
+ *		NPI_RXDMA_CHANNEL_INVALID	-
+ *
+ */
+
+npi_status_t npi_rxdma_rdc_rcr_pktread_update(npi_handle_t,
+					uint8_t, uint16_t);
+
+
+
+/*
+ * npi_rxdma_rdc_rcr_bufread_update
+ * Update the number of buffers processed
+ *
+ * Inputs:
+ *	channel:		RX DMA Channel number
+ *	num_bufs:	Number of buffer processed by SW. Multiple buffers
+ *   could be part of a single packet.
+ *
+ * Return:
+ *	NPI_FAILURE		-
+ *		NPI_RXDMA_OPCODE_INVALID	-
+ *		NPI_RXDMA_CHANNEL_INVALID	-
+ *
+ */
+
+npi_status_t npi_rxdma_rdc_rcr_bufread_update(npi_handle_t,
+					uint8_t, uint16_t);
+
+
+
+/*
+ * npi_rxdma_rdc_rbr_kick
+ * Kick RDC RBR
+ *
+ * Inputs:
+ *	rdc:		RX DMA Channel number
+ *	num_buffers:	Number of Buffers posted to the RBR
+ *
+ * Return:
+ *
+ */
+
+#define	npi_rxdma_rdc_rbr_kick(handle, rdc, num_buffers) \
+	RXDMA_REG_WRITE64(handle, RBR_KICK_REG, rdc, num_buffers)
+
+
+/*
+ * npi_rxdma_rdc_rbr_head_get
+ * Gets the current rbr head pointer.
+ *
+ * Inputs:
+ *	rdc:		RX DMA Channel number
+ *	hdptr		ptr to write the rbr head value
+ *
+ * Return:
+ *
+ */
+
+npi_status_t npi_rxdma_rdc_rbr_head_get(npi_handle_t,
+				    uint8_t, addr44_t  *);
+
+
+
+/*
+ * npi_rxdma_rdc_rbr_stat_get
+ * Returns the RBR stat. The stat consists of the
+ * RX buffers in the ring. It also indicates if there
+ * has been an overflow.
+ *
+ * Inputs:
+ *	rdc:		RX DMA Channel number
+ *	rbr_stat_t:	Structure to update stat
+ *
+ * Return:
+ *
+ */
+
+npi_status_t npi_rxdma_rdc_rbr_stat_get(npi_handle_t, uint8_t,
+				    rbr_stat_t *);
+
+
+
+/*
+ * npi_rxdma_cfg_rdc_reset
+ * Resets the RDC channel
+ *
+ * Inputs:
+ *	rdc:		RX DMA Channel number
+ *
+ * Return:
+ *
+ */
+
+npi_status_t npi_rxdma_cfg_rdc_reset(npi_handle_t, uint8_t);
+
+
+/*
+ * npi_rxdma_rdc_enable
+ * Enables the RDC channel
+ *
+ * Inputs:
+ *	rdc:		RX DMA Channel number
+ *
+ * Return:
+ *
+ */
+
+npi_status_t npi_rxdma_cfg_rdc_enable(npi_handle_t, uint8_t);
+
+/*
+ * npi_rxdma_rdc_disable
+ * Disables the RDC channel
+ *
+ * Inputs:
+ *	rdc:		RX DMA Channel number
+ *
+ * Return:
+ *
+ */
+
+npi_status_t npi_rxdma_cfg_rdc_disable(npi_handle_t, uint8_t);
+
+
+/*
+ * npi_rxdma_cfg_rdc_rcr_timeout()
+ * Configure The RDC channel completion ring timeout.
+ * If a frame has been received, an event would be
+ * generated atleast at the expiration of the timeout.
+ *
+ * Enables timeout by default.
+ *
+ * Inputs:
+ *	rdc:		RX DMA Channel number
+ *	rcr_timeout:	Completion Ring timeout value
+ *
+ * Return:
+ * NPI_SUCCESS
+ * NPI_FAILURE
+ * NPI_SW_ERR
+ * NPI_HW_ERR
+ *
+ */
+
+npi_status_t npi_rxdma_cfg_rdc_rcr_timeout(npi_handle_t, uint8_t,
+				    uint8_t);
+
+
+/*
+ * npi_rxdma_cfg_rdc_rcr_threshold()
+ * Configure The RDC channel completion ring threshold.
+ * An event would be If the number of frame received,
+ * surpasses the threshold value
+ *
+ * Inputs:
+ *	rdc:		RX DMA Channel number
+ *	rcr_threshold:	Completion Ring Threshold count
+ *
+ * Return:
+ * NPI_SUCCESS
+ * NPI_FAILURE
+ * NPI_SW_ERR
+ * NPI_HW_ERR
+ *
+ */
+
+npi_status_t npi_rxdma_cfg_rdc_rcr_threshold(npi_handle_t, uint8_t,
+				    uint16_t);
+
+
+npi_status_t npi_rxdma_cfg_rdc_rcr_timeout_disable(npi_handle_t, uint8_t);
+
+typedef struct _rdc_error_stat_t {
+	uint8_t fault:1;
+    uint8_t	multi_fault:1;
+    uint8_t	rbr_fault:1;
+    uint8_t	buff_fault:1;
+    uint8_t	rcr_fault:1;
+	addr44_t fault_addr;
+} rdc_error_stat_t;
+
+#if OLD
+/*
+ * npi_rxdma_rdc_error_stat_get
+ * Gets the current Error stat for the RDC.
+ *
+ * Inputs:
+ *	rdc:		RX DMA Channel number
+ *	error_stat	Structure to write current RDC Error stat
+ *
+ * Return:
+ *
+ */
+
+npi_status_t npi_rxdma_rdc_error_stat_get(npi_handle_t,
+				    uint8_t, rdc_error_stat_t *);
+
+#endif
+
+/*
+ * npi_rxdma_rdc_rcr_tail_get
+ * Gets the current RCR tail address for the RDC.
+ *
+ * Inputs:
+ *	rdc:		RX DMA Channel number
+ *	tail_addr	Structure to write current RDC RCR tail address
+ *
+ * Return:
+ *
+ */
+
+npi_status_t npi_rxdma_rdc_rcr_tail_get(npi_handle_t,
+				    uint8_t, addr44_t *);
+
+
+npi_status_t npi_rxdma_rdc_rcr_qlen_get(npi_handle_t,
+				    uint8_t, uint16_t *);
+
+
+
+typedef struct _rdc_discard_stat_t {
+    uint8_t	nobuf_ovflow;
+    uint8_t	red_ovflow;
+    uint32_t	nobuf_discard;
+    uint32_t	red_discard;
+} rdc_discard_stat_t;
+
+
+/*
+ * npi_rxdma_rdc_discard_stat_get
+ * Gets the current discrad stats for the RDC.
+ *
+ * Inputs:
+ *	rdc:		RX DMA Channel number
+ *	rcr_stat	Structure to write current RDC discard stat
+ *
+ * Return:
+ *
+ */
+
+npi_status_t npi_rxdma_rdc_discard_stat_get(npi_handle_t,
+				    uint8_t, rdc_discard_stat_t);
+
+
+/*
+ * npi_rx_port_discard_stat_get
+ * Gets the current input (IPP) discrad stats for the rx port.
+ *
+ * Inputs:
+ *	rdc:		RX DMA Channel number
+ *	rx_disc_cnt_t	Structure to write current RDC discard stat
+ *
+ * Return:
+ *
+ */
+
+npi_status_t npi_rx_port_discard_stat_get(npi_handle_t,
+				    uint8_t,
+				    rx_disc_cnt_t *);
+
+
+/*
+ * npi_rxdma_red_discard_stat_get
+ * Gets the current discrad count due RED
+ * The counter overflow bit is cleared, if it has been set.
+ *
+ * Inputs:
+ *	rdc:		RX DMA Channel number
+ *	rx_disc_cnt_t	Structure to write current RDC discard stat
+ *
+ * Return:
+ * NPI_SUCCESS
+ * NPI_RXDMA_RDC_INVALID
+ *
+ */
+
+npi_status_t npi_rxdma_red_discard_stat_get(npi_handle_t, uint8_t,
+				    rx_disc_cnt_t *);
+
+
+
+/*
+ * npi_rxdma_red_discard_oflow_clear
+ * Clear RED discard counter overflow bit
+ *
+ * Inputs:
+ *	rdc:		RX DMA Channel number
+ *
+ * Return:
+ * NPI_SUCCESS
+ * NPI_RXDMA_RDC_INVALID
+ *
+ */
+
+npi_status_t npi_rxdma_red_discard_oflow_clear(npi_handle_t,
+					uint8_t);
+
+
+
+
+/*
+ * npi_rxdma_misc_discard_stat_get
+ * Gets the current discrad count for the rdc due to
+ * buffer pool empty
+ * The counter overflow bit is cleared, if it has been set.
+ *
+ * Inputs:
+ *	rdc:		RX DMA Channel number
+ *	rx_disc_cnt_t	Structure to write current RDC discard stat
+ *
+ * Return:
+ * NPI_SUCCESS
+ * NPI_RXDMA_RDC_INVALID
+ *
+ */
+
+npi_status_t npi_rxdma_misc_discard_stat_get(npi_handle_t, uint8_t,
+				    rx_disc_cnt_t *);
+
+
+
+/*
+ * npi_rxdma_red_discard_oflow_clear
+ * Clear RED discard counter overflow bit
+ * clear the overflow bit for  buffer pool empty discrad counter
+ * for the rdc
+ *
+ *
+ * Inputs:
+ *	rdc:		RX DMA Channel number
+ *
+ * Return:
+ * NPI_SUCCESS
+ * NPI_RXDMA_RDC_INVALID
+ *
+ */
+
+npi_status_t npi_rxdma_misc_discard_oflow_clear(npi_handle_t,
+					uint8_t);
+
+
+
+/*
+ * npi_rxdma_ring_perr_stat_get
+ * Gets the current RDC Memory parity error
+ * The counter overflow bit is cleared, if it has been set.
+ *
+ * Inputs:
+ * pre_cnt:	Structure to write current RDC Prefetch memory
+ *		Parity Error stat
+ * sha_cnt:	Structure to write current RDC Shadow memory
+ *		Parity Error stat
+ *
+ * Return:
+ * NPI_SUCCESS
+ * NPI_RXDMA_RDC_INVALID
+ *
+ */
+
+npi_status_t npi_rxdma_ring_perr_stat_get(npi_handle_t,
+				    rdmc_par_err_log_t *,
+				    rdmc_par_err_log_t *);
+
+
+/*
+ * npi_rxdma_ring_perr_stat_get
+ * Clear RDC Memory Parity Error counter overflow bits
+ *
+ * Inputs:
+ * Return:
+ * NPI_SUCCESS
+ *
+ */
+
+npi_status_t npi_rxdma_ring_perr_stat_clear(npi_handle_t);
+
+
+/* Access the RDMC Memory: used for debugging */
+
+npi_status_t npi_rxdma_rdmc_memory_io(npi_handle_t,
+			    rdmc_mem_access_t *, uint8_t);
+
+
+
+/*
+ * npi_rxdma_rxctl_fifo_error_intr_set
+ * Configure The RX ctrl fifo error interrupt generation
+ *
+ * Inputs:
+ *	mask:	rx_ctl_dat_fifo_mask_t specifying the errors
+ *
+ * Return:
+ * NPI_SUCCESS
+ * NPI_FAILURE
+ *
+ */
+
+npi_status_t npi_rxdma_rxctl_fifo_error_intr_set(npi_handle_t,
+				    rx_ctl_dat_fifo_mask_t *);
+
+/*
+ * npi_rxdma_rxctl_fifo_error_status_get
+ * Read The RX ctrl fifo error Status
+ *
+ * Inputs:
+ *	stat:	rx_ctl_dat_fifo_stat_t to read the errors to
+ * valid fields in  rx_ctl_dat_fifo_stat_t structure are:
+ * zcp_eop_err, ipp_eop_err, id_mismatch.
+ * Return:
+ * NPI_SUCCESS
+ * NPI_FAILURE
+ *
+ */
+
+npi_status_t npi_rxdma_rxctl_fifo_error_status_get(npi_handle_t,
+				    rx_ctl_dat_fifo_stat_t *);
+
+
+/*
+ * npi_rxdma_channel_mex_set():
+ *	This function is called to arm the DMA channel with
+ *	mailbox updating capability. Software needs to rearm
+ *	for each update by writing to the control and status register.
+ *
+ * Parameters:
+ *	handle		- NPI handle (virtualization flag must be defined).
+ *	channel		- logical RXDMA channel from 0 to 23.
+ *			  (If virtualization flag is not set, then
+ *			   logical channel is the same as the hardware
+ *			   channel number).
+ *
+ * Return:
+ *	NPI_SUCCESS		- If enable channel with mailbox update
+ *				  is complete successfully.
+ *
+ *	Error:
+ *	NPI_FAILURE	-
+ *		NPI_RXDMA_CHANNEL_INVALID -
+ */
+npi_status_t npi_rxdma_channel_mex_set(npi_handle_t, uint8_t);
+
+/*
+ * npi_rxdma_channel_rcrto_clear():
+ *	This function is called to reset RCRTO bit to 0.
+ *
+ * Parameters:
+ *	handle		- NPI handle (virtualization flag must be defined).
+ *	channel		- logical RXDMA channel from 0 to 23.
+ *			  (If virtualization flag is not set, then
+ *			   logical channel is the same as the hardware
+ *			   channel number).
+ * Return:
+ *	NPI_SUCCESS
+ *
+ *	Error:
+ *	NPI_FAILURE	-
+ *		NPI_RXDMA_CHANNEL_INVALID -
+ */
+npi_status_t npi_rxdma_channel_rcrto_clear(npi_handle_t, uint8_t);
+
+/*
+ * npi_rxdma_channel_pt_drop_pkt_clear():
+ *	This function is called to clear the port drop packet bit (debug).
+ *
+ * Parameters:
+ *	handle		- NPI handle (virtualization flag must be defined).
+ *	channel		- logical RXDMA channel from 0 to 23.
+ *			  (If virtualization flag is not set, then
+ *			   logical channel is the same as the hardware
+ *			   channel number).
+ * Return:
+ *	NPI_SUCCESS
+ *
+ *	Error:
+ *	NPI_FAILURE	-
+ *		NPI_RXDMA_CHANNEL_INVALID -
+ */
+npi_status_t npi_rxdma_channel_pt_drop_pkt_clear(npi_handle_t, uint8_t);
+
+/*
+ * npi_rxdma_channel_wred_drop_clear():
+ *	This function is called to wred drop bit (debug only).
+ *
+ * Parameters:
+ *	handle		- NPI handle (virtualization flag must be defined).
+ *	channel		- logical RXDMA channel from 0 to 23.
+ *			  (If virtualization flag is not set, then
+ *			   logical channel is the same as the hardware
+ *			   channel number).
+ * Return:
+ *	NPI_SUCCESS
+ *
+ *	Error:
+ *	NPI_FAILURE	-
+ *		NPI_RXDMA_CHANNEL_INVALID -
+ */
+npi_status_t npi_rxdma_channel_wred_drop_clear(npi_handle_t, uint8_t);
+
+/*
+ * npi_rxdma_channel_rcr_shfull_clear():
+ *	This function is called to clear RCR shadow full bit.
+ *
+ * Parameters:
+ *	handle		- NPI handle (virtualization flag must be defined).
+ *	channel		- logical RXDMA channel from 0 to 23.
+ *			  (If virtualization flag is not set, then
+ *			   logical channel is the same as the hardware
+ *			   channel number).
+ * Return:
+ *	NPI_SUCCESS
+ *
+ *	Error:
+ *	NPI_FAILURE	-
+ *		NPI_RXDMA_CHANNEL_INVALID -
+ */
+npi_status_t npi_rxdma_channel_rcr_shfull_clear(npi_handle_t, uint8_t);
+
+/*
+ * npi_rxdma_channel_rcrfull_clear():
+ *	This function is called to clear RCR full bit.
+ *
+ * Parameters:
+ *	handle		- NPI handle (virtualization flag must be defined).
+ *	channel		- logical RXDMA channel from 0 to 23.
+ *			  (If virtualization flag is not set, then
+ *			   logical channel is the same as the hardware
+ *			   channel number).
+ * Return:
+ *	NPI_SUCCESS
+ *
+ *	Error:
+ *	NPI_FAILURE	-
+ *		NPI_RXDMA_CHANNEL_INVALID -
+ */
+npi_status_t npi_rxdma_channel_rcrfull_clear(npi_handle_t, uint8_t);
+
+/*
+ * npi_rxdma_rbr_pre_empty_clear():
+ *	This function is called to control a receive DMA channel
+ *	for arming the channel with mailbox updates, resetting
+ *	various event status bits (control and status register).
+ *
+ * Parameters:
+ *	handle		- NPI handle (virtualization flag must be defined).
+ *	control		- NPI defined control type supported:
+ *				- RXDMA_MEX_SET
+ * 				- RXDMA_RCRTO_CLEAR
+ *				- RXDMA_PT_DROP_PKT_CLEAR
+ *				- RXDMA_WRED_DROP_CLEAR
+ *				- RXDMA_RCR_SFULL_CLEAR
+ *				- RXDMA_RCR_FULL_CLEAR
+ *				- RXDMA_RBR_PRE_EMPTY_CLEAR
+ *	channel		- logical RXDMA channel from 0 to 23.
+ *			  (If virtualization flag is not set, then
+ *			   logical channel is the same as the hardware.
+ * Return:
+ *	NPI_SUCCESS
+ *
+ *	Error:
+ *	NPI_FAILURE		-
+ *		NPI_RXDMA_CHANNEL_INVALID -
+ */
+npi_status_t npi_rxdma_channel_rbr_pre_empty_clear(npi_handle_t, uint8_t);
+
+/*
+ * npi_rxdma_channel_control():
+ *	This function is called to control a receive DMA channel
+ *	for arming the channel with mailbox updates, resetting
+ *	various event status bits (control and status register).
+ *
+ * Parameters:
+ *	handle		- NPI handle (virtualization flag must be defined).
+ *	control		- NPI defined control type supported:
+ *				- RXDMA_MEX_SET
+ * 				- RXDMA_RCRTO_CLEAR
+ *				- RXDMA_PT_DROP_PKT_CLEAR
+ *				- RXDMA_WRED_DROP_CLEAR
+ *				- RXDMA_RCR_SFULL_CLEAR
+ *				- RXDMA_RCR_FULL_CLEAR
+ *				- RXDMA_RBR_PRE_EMPTY_CLEAR
+ *	channel		- logical RXDMA channel from 0 to 23.
+ *			  (If virtualization flag is not set, then
+ *			   logical channel is the same as the hardware.
+ * Return:
+ *	NPI_SUCCESS
+ *
+ *	Error:
+ *	NPI_FAILURE		-
+ *		NPI_TXDMA_OPCODE_INVALID	-
+ *		NPI_TXDMA_CHANNEL_INVALID	-
+ */
+npi_status_t npi_rxdma_channel_control(npi_handle_t,
+				rxdma_cs_cntl_t, uint8_t);
+
+/*
+ * npi_rxdma_control_status():
+ *	This function is called to operate on the control
+ *	and status register.
+ *
+ * Parameters:
+ *	handle		- NPI handle
+ *	op_mode		- OP_GET: get hardware control and status
+ *			  OP_SET: set hardware control and status
+ *			  OP_UPDATE: update hardware control and status.
+ *			  OP_CLEAR: clear control and status register to 0s.
+ *	channel		- hardware RXDMA channel from 0 to 23.
+ *	cs_p		- pointer to hardware defined control and status
+ *			  structure.
+ * Return:
+ *	NPI_SUCCESS
+ *
+ *	Error:
+ *	NPI_FAILURE		-
+ *		NPI_RXDMA_OPCODE_INVALID	-
+ *		NPI_RXDMA_CHANNEL_INVALID	-
+ */
+npi_status_t npi_rxdma_control_status(npi_handle_t, io_op_t,
+			uint8_t, p_rx_dma_ctl_stat_t);
+
+/*
+ * npi_rxdma_event_mask():
+ *	This function is called to operate on the event mask
+ *	register which is used for generating interrupts.
+ *
+ * Parameters:
+ *	handle		- NPI handle
+ *	op_mode		- OP_GET: get hardware event mask
+ *			  OP_SET: set hardware interrupt event masks
+ *			  OP_CLEAR: clear control and status register to 0s.
+ *	channel		- hardware RXDMA channel from 0 to 23.
+ *	mask_p		- pointer to hardware defined event mask
+ *			  structure.
+ * Return:
+ *	NPI_SUCCESS		- If set is complete successfully.
+ *
+ *	Error:
+ *	NPI_FAILURE		-
+ *		NPI_RXDMA_OPCODE_INVALID	-
+ *		NPI_RXDMA_CHANNEL_INVALID	-
+ */
+npi_status_t npi_rxdma_event_mask(npi_handle_t, io_op_t,
+		uint8_t, p_rx_dma_ent_msk_t);
+
+/*
+ * npi_rxdma_event_mask_config():
+ *	This function is called to operate on the event mask
+ *	register which is used for generating interrupts
+ *	and status register.
+ *
+ * Parameters:
+ *	handle		- NPI handle
+ *	op_mode		- OP_GET: get hardware event mask
+ *			  OP_SET: set hardware interrupt event masks
+ *			  OP_CLEAR: clear control and status register to 0s.
+ *	channel		- hardware RXDMA channel from 0 to 23.
+ *	cfgp		- pointer to NPI defined event mask
+ *			  enum data type.
+ * Return:
+ *	NPI_SUCCESS		- If set is complete successfully.
+ *
+ *	Error:
+ *	NPI_FAILURE		-
+ *		NPI_RXDMA_OPCODE_INVALID	-
+ *		NPI_RXDMA_CHANNEL_INVALID	-
+ */
+npi_status_t npi_rxdma_event_mask_config(npi_handle_t, io_op_t,
+		uint8_t, rxdma_ent_msk_cfg_t *);
+
+
+/*
+ * npi_rxdma_dump_rdc_regs
+ * Dumps the contents of rdc csrs and fzc registers
+ *
+ * Input:
+ *         rdc:      RX DMA number
+ *
+ * return:
+ *     NPI_SUCCESS
+ *     NPI_FAILURE
+ *     NPI_RXDMA_RDC_INVALID
+ *
+ */
+
+npi_status_t npi_rxdma_dump_rdc_regs(npi_handle_t, uint8_t);
+
+
+/*
+ * npi_rxdma_dump_fzc_regs
+ * Dumps the contents of rdc csrs and fzc registers
+ *
+ * Input:
+ *         rdc:      RX DMA number
+ *
+ * return:
+ *     NPI_SUCCESS
+ *     NPI_FAILURE
+ *     NPI_RXDMA_RDC_INVALID
+ *
+ */
+
+npi_status_t npi_rxdma_dump_fzc_regs(npi_handle_t);
+
+npi_status_t npi_rxdma_channel_rbr_empty_clear(npi_handle_t,
+							uint8_t);
+npi_status_t npi_rxdma_rxctl_fifo_error_intr_get(npi_handle_t,
+				rx_ctl_dat_fifo_stat_t *);
+
+npi_status_t npi_rxdma_rxctl_fifo_error_intr_set(npi_handle_t,
+				rx_ctl_dat_fifo_mask_t *);
+
+npi_status_t npi_rxdma_dump_rdc_table(npi_handle_t, uint8_t);
+#ifdef	__cplusplus
+}
+#endif
+
+#endif	/* _NPI_RXDMA_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/uts/common/io/nxge/npi/npi_txc.c	Mon Mar 19 19:37:22 2007 -0700
@@ -0,0 +1,1063 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident	"%Z%%M%	%I%	%E% SMI"
+
+#include <npi_txc.h>
+
+/*
+ * Transmit Controller (TXC) Functions.
+ */
+
+uint64_t txc_fzc_dmc_offset[] = {
+	TXC_DMA_MAX_BURST_REG,
+	TXC_DMA_MAX_LENGTH_REG
+};
+
+const char *txc_fzc_dmc_name[] = {
+	"TXC_DMA_MAX_BURST_REG",
+	"TXC_DMA_MAX_LENGTH_REG"
+};
+
+uint64_t txc_fzc_offset [] = {
+	TXC_CONTROL_REG,
+	TXC_TRAINING_REG,
+	TXC_DEBUG_SELECT_REG,
+	TXC_MAX_REORDER_REG,
+	TXC_INT_STAT_DBG_REG,
+	TXC_INT_STAT_REG,
+	TXC_INT_MASK_REG
+};
+
+const char *txc_fzc_name [] = {
+	"TXC_CONTROL_REG",
+	"TXC_TRAINING_REG",
+	"TXC_DEBUG_SELECT_REG",
+	"TXC_MAX_REORDER_REG",
+	"TXC_INT_STAT_DBG_REG",
+	"TXC_INT_STAT_REG",
+	"TXC_INT_MASK_REG"
+};
+
+uint64_t txc_fzc_port_offset[] = {
+	TXC_PORT_CTL_REG,
+	TXC_PORT_DMA_ENABLE_REG,
+	TXC_PKT_STUFFED_REG,
+	TXC_PKT_XMIT_REG,
+	TXC_ROECC_CTL_REG,
+	TXC_ROECC_ST_REG,
+	TXC_RO_DATA0_REG,
+	TXC_RO_DATA1_REG,
+	TXC_RO_DATA2_REG,
+	TXC_RO_DATA3_REG,
+	TXC_RO_DATA4_REG,
+	TXC_SFECC_CTL_REG,
+	TXC_SFECC_ST_REG,
+	TXC_SF_DATA0_REG,
+	TXC_SF_DATA1_REG,
+	TXC_SF_DATA2_REG,
+	TXC_SF_DATA3_REG,
+	TXC_SF_DATA4_REG,
+	TXC_RO_TIDS_REG,
+	TXC_RO_STATE0_REG,
+	TXC_RO_STATE1_REG,
+	TXC_RO_STATE2_REG,
+	TXC_RO_STATE3_REG,
+	TXC_RO_CTL_REG,
+	TXC_RO_ST_DATA0_REG,
+	TXC_RO_ST_DATA1_REG,
+	TXC_RO_ST_DATA2_REG,
+	TXC_RO_ST_DATA3_REG,
+	TXC_PORT_PACKET_REQ_REG
+};
+
+const char *txc_fzc_port_name[] = {
+	"TXC_PORT_CTL_REG",
+	"TXC_PORT_DMA_ENABLE_REG",
+	"TXC_PKT_STUFFED_REG",
+	"TXC_PKT_XMIT_REG",
+	"TXC_ROECC_CTL_REG",
+	"TXC_ROECC_ST_REG",
+	"TXC_RO_DATA0_REG",
+	"TXC_RO_DATA1_REG",
+	"TXC_RO_DATA2_REG",
+	"TXC_RO_DATA3_REG",
+	"TXC_RO_DATA4_REG",
+	"TXC_SFECC_CTL_REG",
+	"TXC_SFECC_ST_REG",
+	"TXC_SF_DATA0_REG",
+	"TXC_SF_DATA1_REG",
+	"TXC_SF_DATA2_REG",
+	"TXC_SF_DATA3_REG",
+	"TXC_SF_DATA4_REG",
+	"TXC_RO_TIDS_REG",
+	"TXC_RO_STATE0_REG",
+	"TXC_RO_STATE1_REG",
+	"TXC_RO_STATE2_REG",
+	"TXC_RO_STATE3_REG",
+	"TXC_RO_CTL_REG",
+	"TXC_RO_ST_DATA0_REG",
+	"TXC_RO_ST_DATA1_REG",
+	"TXC_RO_ST_DATA2_REG",
+	"TXC_RO_ST_DATA3_REG",
+	"TXC_PORT_PACKET_REQ_REG"
+};
+
+/*
+ * npi_txc_dump_tdc_fzc_regs
+ * Dumps the contents of TXC csrs and fzc registers
+ *
+ * Input:
+ *	handle		- NPI handle
+ *         tdc:      TX DMA number
+ *
+ * return:
+ *     NPI_SUCCESS
+ *     NPI_FAILURE
+ *     NPI_TXC_CHANNEL_INVALID
+ *
+ */
+npi_status_t
+npi_txc_dump_tdc_fzc_regs(npi_handle_t handle, uint8_t tdc)
+{
+	uint64_t		value, offset;
+	int 			num_regs, i;
+
+	ASSERT(TXDMA_CHANNEL_VALID(tdc));
+	if (!TXDMA_CHANNEL_VALID(tdc)) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+			"npi_txc_dump_tdc_fzc_regs"
+			" Invalid TDC number %d \n",
+			tdc));
+		return (NPI_FAILURE | NPI_TXC_CHANNEL_INVALID(tdc));
+	}
+
+	NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
+		    "\nTXC FZC DMC Register Dump for Channel %d\n",
+			    tdc));
+
+	num_regs = sizeof (txc_fzc_dmc_offset) / sizeof (uint64_t);
+	for (i = 0; i < num_regs; i++) {
+		offset = TXC_FZC_REG_CN_OFFSET(txc_fzc_dmc_offset[i], tdc);
+		NXGE_REG_RD64(handle, offset, &value);
+		NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL, "0x%08llx "
+			"%s\t 0x%08llx \n",
+			offset, txc_fzc_dmc_name[i], value));
+	}
+
+	NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
+		"\n TXC FZC Register Dump for Channel %d done\n", tdc));
+
+	return (NPI_SUCCESS);
+}
+
+/*
+ * npi_txc_dump_fzc_regs
+ * Dumps the contents of txc csrs and fzc registers
+ *
+ *
+ * return:
+ *     NPI_SUCCESS
+ *     NPI_FAILURE
+ *
+ */
+npi_status_t
+npi_txc_dump_fzc_regs(npi_handle_t handle)
+{
+
+	uint64_t value;
+	int num_regs, i;
+
+	NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
+		"\nTXC FZC Common Register Dump\n"));
+
+	num_regs = sizeof (txc_fzc_offset) / sizeof (uint64_t);
+	for (i = 0; i < num_regs; i++) {
+		NXGE_REG_RD64(handle, txc_fzc_offset[i], &value);
+		NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL, "0x%08llx "
+			"%s\t 0x%08llx \n",
+			txc_fzc_offset[i], txc_fzc_name[i], value));
+	}
+	NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
+		"\n TXC FZC Common Register Dump Done \n"));
+
+	return (NPI_SUCCESS);
+}
+
+/*
+ * npi_txc_dump_port_fzc_regs
+ * Dumps the contents of TXC csrs and fzc registers
+ *
+ * Input:
+ *	handle		- NPI handle
+ *         port:      port number
+ *
+ * return:
+ *     NPI_SUCCESS
+ *     NPI_FAILURE
+ *
+ */
+npi_status_t
+npi_txc_dump_port_fzc_regs(npi_handle_t handle, uint8_t port)
+{
+	uint64_t		value, offset;
+	int 			num_regs, i;
+
+	ASSERT(IS_PORT_NUM_VALID(port));
+
+	NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
+		"\nTXC FZC PORT Register Dump for port %d\n", port));
+
+	num_regs = sizeof (txc_fzc_port_offset) / sizeof (uint64_t);
+	for (i = 0; i < num_regs; i++) {
+		offset = TXC_FZC_REG_PT_OFFSET(txc_fzc_port_offset[i], port);
+		NXGE_REG_RD64(handle, offset, &value);
+		NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL, "0x%08llx "
+			"%s\t 0x%08llx \n",
+			offset, txc_fzc_port_name[i], value));
+	}
+
+	NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
+		"\n TXC FZC Register Dump for port %d done\n", port));
+
+	return (NPI_SUCCESS);
+}
+
+/*
+ * npi_txc_dma_max_burst():
+ *	This function is called to configure the max burst bytes.
+ *
+ * Parameters:
+ *	handle		- NPI handle
+ *	op_mode		- OP_GET: get max burst value
+ *			- OP_SET: set max burst value
+ *	channel		- channel number (0 - 23)
+ *	dma_max_burst_p - pointer to store or used for max burst value.
+ * Return:
+ *	NPI_SUCCESS	- If operation is complete successfully.
+ *
+ *	Error:
+ *	NPI_FAILURE	-
+ *		NPI_TXC_OPCODE_INVALID
+ *		NPI_TXC_CHANNEL_INVALID
+ */
+npi_status_t
+npi_txc_dma_max_burst(npi_handle_t handle, io_op_t op_mode, uint8_t channel,
+		uint32_t *dma_max_burst_p)
+{
+	uint64_t val;
+
+	ASSERT(TXDMA_CHANNEL_VALID(channel));
+	if (!TXDMA_CHANNEL_VALID(channel)) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				    " npi_txc_dma_max_burst"
+				    " Invalid Input: channel <0x%x>",
+				    channel));
+		return (NPI_FAILURE | NPI_TXC_CHANNEL_INVALID(channel));
+	}
+
+	switch (op_mode) {
+	case OP_GET:
+		TXC_FZC_REG_READ64(handle, TXC_DMA_MAX_BURST_REG, channel,
+					&val);
+		*dma_max_burst_p = (uint32_t)val;
+		break;
+
+	case OP_SET:
+		TXC_FZC_REG_WRITE64(handle,
+			TXC_DMA_MAX_BURST_REG, channel, *dma_max_burst_p);
+		break;
+
+	default:
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				    " npi_txc_dma_max_burst"
+				    " Invalid Input: burst <0x%x>",
+				    op_mode));
+		return (NPI_FAILURE | NPI_TXC_OPCODE_INVALID(channel));
+	}
+
+	return (NPI_SUCCESS);
+}
+
+/*
+ * npi_txc_dma_max_burst_set():
+ *	This function is called to set the max burst bytes.
+ *
+ * Parameters:
+ *	handle		- NPI handle
+ *	channel		- channel number (0 - 23)
+ *	max_burst 	- max burst to set
+ * Return:
+ *	NPI_SUCCESS	- If operation is complete successfully.
+ *
+ *	Error:
+ *	NPI_FAILURE	-
+ */
+npi_status_t
+npi_txc_dma_max_burst_set(npi_handle_t handle, uint8_t channel,
+		uint32_t max_burst)
+{
+	ASSERT(TXDMA_CHANNEL_VALID(channel));
+	if (!TXDMA_CHANNEL_VALID(channel)) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				    " npi_txc_dma_max_burst_set"
+				    " Invalid Input: channel <0x%x>",
+				    channel));
+		return (NPI_FAILURE | NPI_TXC_CHANNEL_INVALID(channel));
+	}
+
+	TXC_FZC_REG_WRITE64(handle, TXC_DMA_MAX_BURST_REG,
+		channel, (uint64_t)max_burst);
+
+	return (NPI_SUCCESS);
+}
+
+/*
+ * npi_txc_dma_bytes_transmitted():
+ *	This function is called to get # of bytes transmitted by
+ *	DMA (hardware register is cleared on read).
+ *
+ * Parameters:
+ *	handle		- NPI handle
+ *	channel		- channel number (0 - 23)
+ *	dma_bytes_p 	- pointer to store bytes transmitted.
+ * Return:
+ *	NPI_SUCCESS	- If get is complete successfully.
+ *
+ *	Error:
+ *	NPI_FAILURE	-
+ *		NPI_TXC_PORT_INVALID
+ */
+npi_status_t
+npi_txc_dma_bytes_transmitted(npi_handle_t handle, uint8_t channel,
+		uint32_t *dma_bytes_p)
+{
+	uint64_t val;
+
+	ASSERT(TXDMA_CHANNEL_VALID(channel));
+	if (!TXDMA_CHANNEL_VALID(channel)) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				    " npi_txc_dma_bytes_transmitted"
+				    " Invalid Input: channel %d",
+				    channel));
+		return (NPI_FAILURE | NPI_TXC_CHANNEL_INVALID(channel));
+	}
+
+	TXC_FZC_REG_READ64(handle, TXC_DMA_MAX_LENGTH_REG, channel, &val);
+	*dma_bytes_p = (uint32_t)val;
+
+	return (NPI_SUCCESS);
+}
+
+/*
+ * npi_txc_control():
+ *	This function is called to get or set the control register.
+ *
+ * Parameters:
+ *	handle		- NPI handle
+ *	op_mode		- OP_GET: get control register value
+ *			  OP_SET: set control register value
+ *	txc_control_p	- pointer to hardware defined data structure.
+ * Return:
+ *	NPI_SUCCESS	- If operation is complete successfully.
+ *
+ *	Error:
+ *	NPI_FAILURE	-
+ *		NPI_TXC_OPCODE_INVALID
+ *		NPI_TXC_PORT_INVALID
+ */
+npi_status_t
+npi_txc_control(npi_handle_t handle, io_op_t op_mode,
+		p_txc_control_t txc_control_p)
+{
+	switch (op_mode) {
+	case OP_GET:
+		NXGE_REG_RD64(handle, TXC_CONTROL_REG, &txc_control_p->value);
+		break;
+
+	case OP_SET:
+		NXGE_REG_WR64(handle, TXC_CONTROL_REG,
+			txc_control_p->value);
+		break;
+
+	default:
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				    " npi_txc_control"
+				    " Invalid Input:  control 0x%x",
+				    op_mode));
+		return (NPI_FAILURE | NPI_TXC_OPCODE_INVALID(op_mode));
+	}
+
+	return (NPI_SUCCESS);
+}
+
+/*
+ * npi_txc_global_enable():
+ *	This function is called to globally enable TXC.
+ *
+ * Parameters:
+ *	handle		- NPI handle
+ * Return:
+ *	NPI_SUCCESS	- If enable is complete successfully.
+ *
+ *	Error:
+ */
+npi_status_t
+npi_txc_global_enable(npi_handle_t handle)
+{
+	txc_control_t	cntl;
+	uint64_t	val;
+
+	cntl.value = 0;
+	cntl.bits.ldw.txc_enabled = 1;
+
+	NXGE_REG_RD64(handle, TXC_CONTROL_REG, &val);
+	NXGE_REG_WR64(handle, TXC_CONTROL_REG, val | cntl.value);
+
+	return (NPI_SUCCESS);
+}
+
+/*
+ * npi_txc_global_disable():
+ *	This function is called to globally disable TXC.
+ *
+ * Parameters:
+ *	handle		- NPI handle
+ * Return:
+ *	NPI_SUCCESS	- If disable is complete successfully.
+ *
+ *	Error:
+ */
+npi_status_t
+npi_txc_global_disable(npi_handle_t handle)
+{
+	txc_control_t	cntl;
+	uint64_t	val;
+
+
+	cntl.value = 0;
+	cntl.bits.ldw.txc_enabled = 0;
+
+	NXGE_REG_RD64(handle, TXC_CONTROL_REG, &val);
+	NXGE_REG_WR64(handle, TXC_CONTROL_REG, val | cntl.value);
+
+	return (NPI_SUCCESS);
+}
+
+/*
+ * npi_txc_control_clear():
+ *	This function is called to clear all bits.
+ *
+ * Parameters:
+ *	handle		- NPI handle
+ * Return:
+ *	NPI_SUCCESS	- If reset all bits to 0s is complete successfully.
+ *
+ *	Error:
+ */
+npi_status_t
+npi_txc_control_clear(npi_handle_t handle, uint8_t port)
+{
+	ASSERT(IS_PORT_NUM_VALID(port));
+
+	NXGE_REG_WR64(handle, TXC_PORT_CTL_REG, TXC_PORT_CNTL_CLEAR);
+
+	return (NPI_SUCCESS);
+}
+
+/*
+ * npi_txc_training_set():
+ *	This function is called to set the debug training vector.
+ *
+ * Parameters:
+ *	handle			- NPI handle
+ *	vector			- training vector to set.
+ * Return:
+ *	NPI_SUCCESS
+ *
+ *	Error:
+ *	NPI_FAILURE		-
+ */
+npi_status_t
+npi_txc_training_set(npi_handle_t handle, uint32_t vector)
+{
+	NXGE_REG_WR64(handle, TXC_TRAINING_REG, (uint64_t)vector);
+
+	return (NPI_SUCCESS);
+}
+
+/*
+ * npi_txc_training_get():
+ *	This function is called to get the debug training vector.
+ *
+ * Parameters:
+ *	handle			- NPI handle
+ *	vector_p		- pointer to store training vector.
+ * Return:
+ *	NPI_SUCCESS
+ *
+ *	Error:
+ *	NPI_FAILURE		-
+ */
+npi_status_t
+npi_txc_training_get(npi_handle_t handle, uint32_t *vector_p)
+{
+	uint64_t val;
+
+	NXGE_REG_RD64(handle, (TXC_TRAINING_REG & TXC_TRAINING_VECTOR_MASK),
+			&val);
+	*vector_p = (uint32_t)val;
+
+	return (NPI_SUCCESS);
+}
+
+/*
+ * npi_txc_port_enable():
+ *	This function is called to enable a particular port.
+ *
+ * Parameters:
+ *	handle		- NPI handle
+ *	port		- port number (0 - 3)
+ * Return:
+ *	NPI_SUCCESS	- If port is enabled successfully.
+ *
+ *	Error:
+ *	NPI_FAILURE	-
+ *		NPI_TXC_PORT_INVALID
+ */
+npi_status_t
+npi_txc_port_enable(npi_handle_t handle, uint8_t port)
+{
+	uint64_t val;
+
+	ASSERT(IS_PORT_NUM_VALID(port));
+
+	NXGE_REG_RD64(handle, TXC_CONTROL_REG, &val);
+	NXGE_REG_WR64(handle, TXC_CONTROL_REG, val | (1 << port));
+
+	return (NPI_SUCCESS);
+}
+
+/*
+ * npi_txc_port_disable():
+ *	This function is called to disable a particular port.
+ *
+ * Parameters:
+ *	handle		- NPI handle
+ *	port		- port number (0 - 3)
+ * Return:
+ *	NPI_SUCCESS	- If port is disabled successfully.
+ *
+ *	Error:
+ *	NPI_FAILURE	-
+ *		NPI_TXC_PORT_INVALID
+ */
+npi_status_t
+npi_txc_port_disable(npi_handle_t handle, uint8_t port)
+{
+	uint64_t val;
+
+	ASSERT(IS_PORT_NUM_VALID(port));
+
+	NXGE_REG_RD64(handle, TXC_CONTROL_REG, &val);
+	NXGE_REG_WR64(handle, TXC_CONTROL_REG, (val & ~(1 << port)));
+
+	return (NPI_SUCCESS);
+}
+
+/*
+ * npi_txc_port_dma_enable():
+ *	This function is called to bind DMA channels (bitmap) to a port.
+ *
+ * Parameters:
+ *	handle			- NPI handle
+ *	port			- port number (0 - 3)
+ *	port_dma_list_bitmap	- channels bitmap
+ *				(1 to bind, 0 - 23 bits one bit/channel)
+ * Return:
+ *	NPI_SUCCESS		- If channels are bound successfully.
+ *
+ *	Error:
+ *	NPI_FAILURE	-
+ *		NPI_TXC_PORT_INVALID
+ */
+npi_status_t
+npi_txc_port_dma_enable(npi_handle_t handle, uint8_t port,
+		uint32_t port_dma_list_bitmap)
+{
+
+	ASSERT(IS_PORT_NUM_VALID(port));
+
+	TXC_FZC_CNTL_REG_WRITE64(handle, TXC_PORT_DMA_ENABLE_REG, port,
+		port_dma_list_bitmap);
+	return (NPI_SUCCESS);
+}
+
+npi_status_t
+npi_txc_port_dma_list_get(npi_handle_t handle, uint8_t port,
+		uint32_t *port_dma_list_bitmap)
+{
+	uint64_t val;
+
+	ASSERT(IS_PORT_NUM_VALID(port));
+
+	TXC_FZC_CNTL_REG_READ64(handle, TXC_PORT_DMA_ENABLE_REG, port, &val);
+	*port_dma_list_bitmap = (uint32_t)(val & TXC_DMA_DMA_LIST_MASK);
+
+	return (NPI_SUCCESS);
+}
+
+/*
+ * npi_txc_port_dma_channel_enable():
+ *	This function is called to bind a channel to a port.
+ *
+ * Parameters:
+ *	handle			- NPI handle
+ *	port			- port number (0 - 3)
+ *	channel			- channel number (0 - 23)
+ * Return:
+ *	NPI_SUCCESS		- If channel is bound successfully.
+ *
+ *	Error:
+ *	NPI_FAILURE		-
+ *		NPI_TXC_PORT_INVALID	-
+ */
+npi_status_t
+npi_txc_port_dma_channel_enable(npi_handle_t handle, uint8_t port,
+		uint8_t channel)
+{
+	uint64_t val;
+
+	ASSERT(IS_PORT_NUM_VALID(port));
+
+	ASSERT(TXDMA_CHANNEL_VALID(channel));
+	if (!TXDMA_CHANNEL_VALID(channel)) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				    " npi_txc_port_dma_channel_enable"
+				    " Invalid Input: channel <0x%x>", channel));
+		return (NPI_FAILURE | NPI_TXC_CHANNEL_INVALID(channel));
+	}
+
+	TXC_FZC_CNTL_REG_READ64(handle, TXC_PORT_DMA_ENABLE_REG, port, &val);
+	TXC_FZC_CNTL_REG_WRITE64(handle, TXC_PORT_DMA_ENABLE_REG, port,
+				(val | (1 << channel)));
+
+	return (NPI_SUCCESS);
+}
+
+/*
+ * npi_txc_port_dma_channel_disable():
+ *	This function is called to unbind a channel to a port.
+ *
+ * Parameters:
+ *	handle			- NPI handle
+ *	port			- port number (0 - 3)
+ *	channel			- channel number (0 - 23)
+ * Return:
+ *	NPI_SUCCESS		- If channel is unbound successfully.
+ *
+ *	Error:
+ *	NPI_FAILURE		-
+ *		NPI_TXC_PORT_INVALID	-
+ */
+npi_status_t
+npi_txc_port_dma_channel_disable(npi_handle_t handle, uint8_t port,
+		uint8_t channel)
+{
+	uint64_t val;
+
+	ASSERT(IS_PORT_NUM_VALID(port));
+
+	ASSERT(TXDMA_CHANNEL_VALID(channel));
+	if (!TXDMA_CHANNEL_VALID(channel)) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				    " npi_txc_port_dma_channel_disable"
+				    " Invalid Input: channel <0x%x>", channel));
+		return (NPI_FAILURE | NPI_TXC_CHANNEL_INVALID(channel));
+	}
+
+	TXC_FZC_CNTL_REG_READ64(handle, TXC_PORT_DMA_ENABLE_REG, port, &val)
+	TXC_FZC_CNTL_REG_WRITE64(handle, TXC_PORT_DMA_ENABLE_REG, port,
+				val & ~(1 << channel));
+
+	return (NPI_SUCCESS);
+}
+
+/*
+ * npi_txc_max_reorder_set():
+ *	This function is called to set the per port reorder resources
+ *
+ * Parameters:
+ *	handle			- NPI handle
+ *	port			- port to set
+ *	reorder			- reorder resources (4 bits)
+ * Return:
+ *	NPI_SUCCESS
+ *
+ *	Error:
+ *	NPI_FAILURE		-
+ */
+npi_status_t
+npi_txc_reorder_set(npi_handle_t handle, uint8_t port, uint8_t *reorder)
+{
+	uint64_t val;
+
+	ASSERT(IS_PORT_NUM_VALID(port));
+
+	NXGE_REG_RD64(handle, TXC_MAX_REORDER_REG, &val);
+
+	val |= (*reorder << TXC_MAX_REORDER_SHIFT(port));
+
+	NXGE_REG_WR64(handle, TXC_MAX_REORDER_REG, val);
+
+	return (NPI_SUCCESS);
+}
+
+/*
+ * npi_txc_reorder_get():
+ *	This function is called to get the txc reorder resources.
+ *
+ * Parameters:
+ *	handle			- NPI handle
+ *	port			- port to get
+ *	reorder			- data to be stored at
+ * Return:
+ *	NPI_SUCCESS
+ *
+ *	Error:
+ *	NPI_FAILURE		-
+ */
+npi_status_t
+npi_txc_reorder_get(npi_handle_t handle, uint8_t port, uint32_t *reorder)
+{
+	uint64_t val;
+
+	ASSERT(IS_PORT_NUM_VALID(port));
+
+	NXGE_REG_RD64(handle, TXC_MAX_REORDER_REG, &val);
+
+	*reorder = (uint8_t)(val >> TXC_MAX_REORDER_SHIFT(port));
+
+	return (NPI_SUCCESS);
+}
+
+/*
+ * npi_txc_pkt_stuffed_get():
+ *	This function is called to get total # of packets processed
+ *	by reorder engine and packetAssy engine.
+ *
+ * Parameters:
+ *	handle		- NPI handle
+ *	port		- port number (0 - 3)
+ *	pkt_assy_p 	- packets processed by Assy engine.
+ *	pkt_reorder_p	- packets processed by reorder engine.
+ *
+ * Return:
+ *	NPI_SUCCESS	- If get is complete successfully.
+ *
+ *	Error:
+ *	NPI_FAILURE	-
+ *		NPI_TXC_PORT_INVALID
+ */
+npi_status_t
+npi_txc_pkt_stuffed_get(npi_handle_t handle, uint8_t port,
+		uint32_t *pkt_assy_p, uint32_t *pkt_reorder_p)
+{
+	uint64_t		value;
+
+	ASSERT(IS_PORT_NUM_VALID(port));
+
+	TXC_FZC_CNTL_REG_READ64(handle, TXC_PKT_STUFFED_REG, port, &value);
+	*pkt_assy_p = ((uint32_t)((value & TXC_PKT_STUFF_PKTASY_MASK) >>
+		TXC_PKT_STUFF_PKTASY_SHIFT));
+	*pkt_reorder_p = ((uint32_t)((value & TXC_PKT_STUFF_REORDER_MASK) >>
+		TXC_PKT_STUFF_REORDER_SHIFT));
+
+	return (NPI_SUCCESS);
+}
+
+/*
+ * npi_txc_pkt_xmt_to_mac_get():
+ *	This function is called to get total # of packets transmitted
+ *	to the MAC.
+ *
+ * Parameters:
+ *	handle		- NPI handle
+ *	port		- port number (0 - 3)
+ *	mac_bytes_p 	- bytes transmitted to the MAC.
+ *	mac_pkts_p	- packets transmitted to the MAC.
+ *
+ * Return:
+ *	NPI_SUCCESS	- If get is complete successfully.
+ *
+ *	Error:
+ *	NPI_FAILURE	-
+ *	NPI_TXC_PORT_INVALID
+ */
+npi_status_t
+npi_txc_pkt_xmt_to_mac_get(npi_handle_t handle, uint8_t port,
+		uint32_t *mac_bytes_p, uint32_t *mac_pkts_p)
+{
+	uint64_t		value;
+
+	ASSERT(IS_PORT_NUM_VALID(port));
+
+	TXC_FZC_CNTL_REG_READ64(handle, TXC_PKT_XMIT_REG, port, &value);
+	*mac_pkts_p = ((uint32_t)((value & TXC_PKTS_XMIT_MASK) >>
+		TXC_PKTS_XMIT_SHIFT));
+	*mac_bytes_p = ((uint32_t)((value & TXC_BYTES_XMIT_MASK) >>
+		TXC_BYTES_XMIT_SHIFT));
+
+	return (NPI_SUCCESS);
+}
+
+/*
+ * npi_txc_get_ro_states():
+ *	This function is called to get TXC's reorder state-machine states.
+ *
+ * Parameters:
+ *	handle		- NPI handle
+ *	port		- port number
+ *	*states		- TXC Re-order states.
+ *
+ * Return:
+ *	NPI_SUCCESS	- If get is complete successfully.
+ *
+ *	Error:
+ *	NPI_FAILURE	-
+ *	NPI_TXC_PORT_INVALID
+ */
+npi_status_t
+npi_txc_ro_states_get(npi_handle_t handle, uint8_t port,
+				txc_ro_states_t *states)
+{
+	txc_ro_ctl_t	ctl;
+	txc_ro_tids_t	tids;
+	txc_ro_state0_t	s0;
+	txc_ro_state1_t	s1;
+	txc_ro_state2_t	s2;
+	txc_ro_state3_t	s3;
+	txc_roecc_st_t	ecc;
+	txc_ro_data0_t	d0;
+	txc_ro_data1_t	d1;
+	txc_ro_data2_t	d2;
+	txc_ro_data3_t	d3;
+	txc_ro_data4_t	d4;
+
+	ASSERT(IS_PORT_NUM_VALID(port));
+
+	TXC_FZC_CNTL_REG_READ64(handle, TXC_ROECC_ST_REG, port, &ecc.value);
+	if ((ecc.bits.ldw.correct_error) || (ecc.bits.ldw.uncorrect_error)) {
+		TXC_FZC_CNTL_REG_READ64(handle, TXC_RO_DATA0_REG, port,
+								&d0.value);
+		TXC_FZC_CNTL_REG_READ64(handle, TXC_RO_DATA1_REG, port,
+								&d1.value);
+		TXC_FZC_CNTL_REG_READ64(handle, TXC_RO_DATA2_REG, port,
+								&d2.value);
+		TXC_FZC_CNTL_REG_READ64(handle, TXC_RO_DATA3_REG, port,
+								&d3.value);
+		TXC_FZC_CNTL_REG_READ64(handle, TXC_RO_DATA4_REG, port,
+								&d4.value);
+		states->d0.value = d0.value;
+		states->d1.value = d1.value;
+		states->d2.value = d2.value;
+		states->d3.value = d3.value;
+		states->d4.value = d4.value;
+
+		ecc.bits.ldw.ecc_address = 0;
+		ecc.bits.ldw.correct_error = 0;
+		ecc.bits.ldw.uncorrect_error = 0;
+		ecc.bits.ldw.clr_st = 1;
+		TXC_FZC_CNTL_REG_WRITE64(handle, TXC_ROECC_ST_REG, port,
+						ecc.value);
+	}
+
+	TXC_FZC_CNTL_REG_READ64(handle, TXC_RO_CTL_REG, port, &ctl.value);
+	TXC_FZC_CNTL_REG_READ64(handle, TXC_RO_STATE0_REG, port, &s0.value);
+	TXC_FZC_CNTL_REG_READ64(handle, TXC_RO_STATE1_REG, port, &s1.value);
+	TXC_FZC_CNTL_REG_READ64(handle, TXC_RO_STATE2_REG, port, &s2.value);
+	TXC_FZC_CNTL_REG_READ64(handle, TXC_RO_STATE3_REG, port, &s3.value);
+	TXC_FZC_CNTL_REG_READ64(handle, TXC_RO_TIDS_REG, port, &tids.value);
+
+	states->roecc.value = ctl.value;
+	states->st0.value = s0.value;
+	states->st1.value = s1.value;
+	states->st2.value = s2.value;
+	states->st3.value = s3.value;
+	states->ctl.value = ctl.value;
+	states->tids.value = tids.value;
+
+	ctl.bits.ldw.clr_fail_state = 1;
+	TXC_FZC_CNTL_REG_WRITE64(handle, TXC_RO_CTL_REG, port, ctl.value);
+
+	return (NPI_SUCCESS);
+}
+
+npi_status_t
+npi_txc_ro_ecc_state_clr(npi_handle_t handle, uint8_t port)
+{
+	ASSERT(IS_PORT_NUM_VALID(port));
+
+	TXC_FZC_CNTL_REG_WRITE64(handle, TXC_ROECC_ST_REG, port, 0);
+
+	return (NPI_SUCCESS);
+}
+
+/*
+ * npi_txc_sf_states_get():
+ *	This function is called to get TXC's store-forward state-machine states.
+ *
+ * Parameters:
+ *	handle		- NPI handle
+ *	port		- port number
+ *	states		- TXC Store-forward states
+ *
+ * Return:
+ *	NPI_SUCCESS	- If get is complete successfully.
+ *
+ *	Error:
+ *	NPI_FAILURE	-
+ *	NPI_TXC_PORT_INVALID
+ */
+#ifdef lint
+/*ARGSUSED*/
+#endif
+npi_status_t
+npi_txc_sf_states_get(npi_handle_t handle, uint8_t port,
+				txc_sf_states_t *states)
+{
+	txc_sfecc_st_t	ecc;
+	txc_sf_data0_t	d0;
+	txc_sf_data1_t	d1;
+	txc_sf_data2_t	d2;
+	txc_sf_data3_t	d3;
+	txc_sf_data4_t	d4;
+
+	ASSERT(IS_PORT_NUM_VALID(port));
+
+	TXC_FZC_CNTL_REG_READ64(handle, TXC_SFECC_ST_REG, port, &ecc.value);
+	if ((ecc.bits.ldw.correct_error) || (ecc.bits.ldw.uncorrect_error)) {
+		TXC_FZC_CNTL_REG_READ64(handle, TXC_SF_DATA0_REG, port,
+								&d0.value);
+		TXC_FZC_CNTL_REG_READ64(handle, TXC_SF_DATA1_REG, port,
+								&d1.value);
+		TXC_FZC_CNTL_REG_READ64(handle, TXC_SF_DATA2_REG, port,
+								&d2.value);
+		TXC_FZC_CNTL_REG_READ64(handle, TXC_SF_DATA3_REG, port,
+								&d3.value);
+		TXC_FZC_CNTL_REG_READ64(handle, TXC_SF_DATA4_REG, port,
+								&d4.value);
+		ecc.bits.ldw.ecc_address = 0;
+		ecc.bits.ldw.correct_error = 0;
+		ecc.bits.ldw.uncorrect_error = 0;
+		ecc.bits.ldw.clr_st = 1;
+		TXC_FZC_CNTL_REG_WRITE64(handle, TXC_SFECC_ST_REG, port,
+						ecc.value);
+	}
+
+	states->sfecc.value = ecc.value;
+	states->d0.value = d0.value;
+	states->d1.value = d1.value;
+	states->d2.value = d2.value;
+	states->d3.value = d3.value;
+	states->d4.value = d4.value;
+
+	return (NPI_SUCCESS);
+}
+
+npi_status_t
+npi_txc_sf_ecc_state_clr(npi_handle_t handle, uint8_t port)
+{
+	ASSERT(IS_PORT_NUM_VALID(port));
+
+	TXC_FZC_CNTL_REG_WRITE64(handle, TXC_SFECC_ST_REG, port, 0);
+
+	return (NPI_SUCCESS);
+}
+
+/*
+ * npi_txc_global_istatus_get():
+ *	This function is called to get TXC's global interrupt status.
+ *
+ * Parameters:
+ *	handle		- NPI handle
+ *	istatus		- TXC global interrupt status
+ *
+ * Return:
+ */
+void
+npi_txc_global_istatus_get(npi_handle_t handle, txc_int_stat_t *istatus)
+{
+	txc_int_stat_t	status;
+
+	NXGE_REG_RD64(handle, TXC_INT_STAT_REG, &status.value);
+
+	istatus->value = status.value;
+}
+
+/*
+ * npi_txc_global_istatus_clear():
+ *	This function is called to clear TXC's global interrupt status.
+ *
+ * Parameters:
+ *	handle		- NPI handle
+ *	istatus		- TXC global interrupt status
+ *
+ * Return:
+ */
+void
+npi_txc_global_istatus_clear(npi_handle_t handle, uint64_t istatus)
+{
+	NXGE_REG_WR64(handle, TXC_INT_STAT_REG, istatus);
+}
+
+void
+npi_txc_global_imask_set(npi_handle_t handle, uint8_t portn, uint8_t istatus)
+{
+	uint64_t val;
+
+	NXGE_REG_RD64(handle, TXC_INT_MASK_REG, &val);
+	switch (portn) {
+	case 0:
+		val &= 0xFFFFFF00;
+		val |= istatus & 0x3F;
+		break;
+	case 1:
+		val &= 0xFFFF00FF;
+		val |= (istatus << 8) & 0x3F00;
+		break;
+	case 2:
+		val &= 0xFF00FFFF;
+		val |= (istatus << 16) & 0x3F0000;
+		break;
+	case 3:
+		val &= 0x00FFFFFF;
+		val |= (istatus << 24) & 0x3F000000;
+		break;
+	default:
+		;
+	}
+	NXGE_REG_WR64(handle, TXC_INT_MASK_REG, val);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/uts/common/io/nxge/npi/npi_txc.h	Mon Mar 19 19:37:22 2007 -0700
@@ -0,0 +1,138 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _NPI_TXC_H
+#define	_NPI_TXC_H
+
+#pragma ident	"%Z%%M%	%I%	%E% SMI"
+
+#ifdef	__cplusplus
+extern "C" {
+#endif
+
+#include <npi.h>
+#include <nxge_txc_hw.h>
+
+/*
+ * Transmit Controller (TXC) NPI error codes
+ */
+#define	TXC_ER_ST			(TXC_BLK_ID << NPI_BLOCK_ID_SHIFT)
+#define	TXC_ID_SHIFT(n)			(n << NPI_PORT_CHAN_SHIFT)
+
+#define	NPI_TXC_PORT_INVALID(n)		(TXC_ID_SHIFT(n) | IS_PORT |\
+					TXC_ER_ST | PORT_INVALID)
+
+#define	NPI_TXC_CHANNEL_INVALID(n)	(TXC_ID_SHIFT(n) | IS_PORT |\
+					TXC_ER_ST | CHANNEL_INVALID)
+
+#define	NPI_TXC_OPCODE_INVALID(n)	(TXC_ID_SHIFT(n) | IS_PORT |\
+					TXC_ER_ST | OPCODE_INVALID)
+
+/*
+ * Register offset (0x1000 bytes for each channel) for TXC registers.
+ */
+#define	NXGE_TXC_FZC_OFFSET(x, cn)	(x + TXC_FZC_CHANNEL_OFFSET(cn))
+
+/*
+ * Register offset (0x100 bytes for each port) for TXC Function zero
+ * control registers.
+ */
+#define	NXGE_TXC_FZC_CNTL_OFFSET(x, port) (x + \
+			TXC_FZC_CNTL_PORT_OFFSET(port))
+/*
+ * PIO macros to read and write the transmit control registers.
+ */
+#define	TXC_FZC_REG_READ64(handle, reg, cn, val_p)	\
+		NXGE_REG_RD64(handle, \
+		(NXGE_TXC_FZC_OFFSET(reg, cn)), val_p)
+
+#define	TXC_FZC_REG_WRITE64(handle, reg, cn, data)	\
+		NXGE_REG_WR64(handle, \
+		(NXGE_TXC_FZC_OFFSET(reg, cn)), data)
+
+#define	TXC_FZC_CNTL_REG_READ64(handle, reg, port, val_p)	\
+		NXGE_REG_RD64(handle, \
+		(NXGE_TXC_FZC_CNTL_OFFSET(reg, port)), val_p)
+
+#define	TXC_FZC_CNTL_REG_WRITE64(handle, reg, port, data)	\
+		NXGE_REG_WR64(handle, \
+		(NXGE_TXC_FZC_CNTL_OFFSET(reg, port)), data)
+
+/*
+ * TXC (Transmit Controller) prototypes.
+ */
+npi_status_t npi_txc_dma_max_burst(npi_handle_t, io_op_t,
+		uint8_t, uint32_t *);
+npi_status_t npi_txc_dma_max_burst_set(npi_handle_t, uint8_t,
+		uint32_t);
+npi_status_t npi_txc_dma_bytes_transmitted(npi_handle_t,
+		uint8_t, uint32_t *);
+npi_status_t npi_txc_control(npi_handle_t, io_op_t,
+		p_txc_control_t);
+npi_status_t npi_txc_global_enable(npi_handle_t);
+npi_status_t npi_txc_global_disable(npi_handle_t);
+npi_status_t npi_txc_control_clear(npi_handle_t, uint8_t);
+npi_status_t npi_txc_training_set(npi_handle_t, uint32_t);
+npi_status_t npi_txc_training_get(npi_handle_t, uint32_t *);
+npi_status_t npi_txc_port_control_get(npi_handle_t, uint8_t,
+		uint32_t *);
+npi_status_t npi_txc_port_enable(npi_handle_t, uint8_t);
+npi_status_t npi_txc_port_disable(npi_handle_t, uint8_t);
+npi_status_t npi_txc_dma_max_burst(npi_handle_t, io_op_t,
+		uint8_t, uint32_t *);
+npi_status_t npi_txc_port_dma_enable(npi_handle_t, uint8_t,
+		uint32_t);
+npi_status_t npi_txc_port_dma_list_get(npi_handle_t, uint8_t,
+		uint32_t *);
+npi_status_t npi_txc_port_dma_channel_enable(npi_handle_t, uint8_t,
+		uint8_t);
+npi_status_t npi_txc_port_dma_channel_disable(npi_handle_t, uint8_t,
+		uint8_t);
+
+npi_status_t npi_txc_pkt_stuffed_get(npi_handle_t, uint8_t,
+		uint32_t *, uint32_t *);
+npi_status_t npi_txc_pkt_xmt_to_mac_get(npi_handle_t, uint8_t,
+		uint32_t *, uint32_t *);
+npi_status_t npi_txc_reorder_get(npi_handle_t, uint8_t,
+		uint32_t *);
+npi_status_t npi_txc_dump_tdc_fzc_regs(npi_handle_t, uint8_t);
+npi_status_t npi_txc_dump_fzc_regs(npi_handle_t);
+npi_status_t npi_txc_dump_port_fzc_regs(npi_handle_t, uint8_t);
+npi_status_t npi_txc_ro_states_get(npi_handle_t, uint8_t,
+		txc_ro_states_t *);
+npi_status_t npi_txc_ro_ecc_state_clr(npi_handle_t, uint8_t);
+npi_status_t npi_txc_sf_states_get(npi_handle_t, uint8_t,
+		txc_sf_states_t *);
+npi_status_t npi_txc_sf_ecc_state_clr(npi_handle_t, uint8_t);
+void npi_txc_global_istatus_get(npi_handle_t, txc_int_stat_t *);
+void npi_txc_global_istatus_clear(npi_handle_t, uint64_t);
+void npi_txc_global_imask_set(npi_handle_t, uint8_t,
+		uint8_t);
+
+#ifdef	__cplusplus
+}
+#endif
+
+#endif	/* _NPI_TXC_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/uts/common/io/nxge/npi/npi_txdma.c	Mon Mar 19 19:37:22 2007 -0700
@@ -0,0 +1,2077 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident	"%Z%%M%	%I%	%E% SMI"
+
+#include <npi_txdma.h>
+
+#define	TXDMA_WAIT_LOOP		10000
+#define	TXDMA_WAIT_MSEC		5
+
+static npi_status_t npi_txdma_control_reset_wait(npi_handle_t handle,
+	uint8_t channel);
+static npi_status_t npi_txdma_control_stop_wait(npi_handle_t handle,
+	uint8_t channel);
+static npi_status_t npi_txdma_control_resume_wait(npi_handle_t handle,
+	uint8_t channel);
+
+uint64_t tdc_dmc_offset[] = {
+	TX_RNG_CFIG_REG,
+	TX_RING_HDL_REG,
+	TX_RING_KICK_REG,
+	TX_ENT_MSK_REG,
+	TX_CS_REG,
+	TXDMA_MBH_REG,
+	TXDMA_MBL_REG,
+	TX_DMA_PRE_ST_REG,
+	TX_RNG_ERR_LOGH_REG,
+	TX_RNG_ERR_LOGL_REG,
+	TDMC_INTR_DBG_REG,
+	TX_CS_DBG_REG
+};
+
+const char *tdc_dmc_name[] = {
+	"TX_RNG_CFIG_REG",
+	"TX_RING_HDL_REG",
+	"TX_RING_KICK_REG",
+	"TX_ENT_MSK_REG",
+	"TX_CS_REG",
+	"TXDMA_MBH_REG",
+	"TXDMA_MBL_REG",
+	"TX_DMA_PRE_ST_REG",
+	"TX_RNG_ERR_LOGH_REG",
+	"TX_RNG_ERR_LOGL_REG",
+	"TDMC_INTR_DBG_REG",
+	"TX_CS_DBG_REG"
+};
+
+uint64_t tdc_fzc_offset [] = {
+	TX_LOG_PAGE_VLD_REG,
+	TX_LOG_PAGE_MASK1_REG,
+	TX_LOG_PAGE_VAL1_REG,
+	TX_LOG_PAGE_MASK2_REG,
+	TX_LOG_PAGE_VAL2_REG,
+	TX_LOG_PAGE_RELO1_REG,
+	TX_LOG_PAGE_RELO2_REG,
+	TX_LOG_PAGE_HDL_REG
+};
+
+const char *tdc_fzc_name [] = {
+	"TX_LOG_PAGE_VLD_REG",
+	"TX_LOG_PAGE_MASK1_REG",
+	"TX_LOG_PAGE_VAL1_REG",
+	"TX_LOG_PAGE_MASK2_REG",
+	"TX_LOG_PAGE_VAL2_REG",
+	"TX_LOG_PAGE_RELO1_REG",
+	"TX_LOG_PAGE_RELO2_REG",
+	"TX_LOG_PAGE_HDL_REG"
+};
+
+uint64_t tx_fzc_offset[] = {
+	TX_ADDR_MD_REG,
+	TDMC_INJ_PAR_ERR_REG,
+	TDMC_DBG_SEL_REG,
+	TDMC_TRAINING_REG
+};
+
+const char *tx_fzc_name[] = {
+	"TX_ADDR_MD_REG",
+	"TDMC_INJ_PAR_ERR_REG",
+	"TDMC_DBG_SEL_REG",
+	"TDMC_TRAINING_REG"
+};
+
+#define	NUM_TDC_DMC_REGS	(sizeof (tdc_dmc_offset) / sizeof (uint64_t))
+#define	NUM_TX_FZC_REGS	(sizeof (tx_fzc_offset) / sizeof (uint64_t))
+
+/*
+ * npi_txdma_dump_tdc_regs
+ * Dumps the contents of tdc csrs and fzc registers
+ *
+ * Input:
+ *         tdc:      TX DMA number
+ *
+ * return:
+ *     NPI_SUCCESS
+ *     NPI_FAILURE
+ *     NPI_TXDMA_CHANNEL_INVALID
+ *
+ */
+npi_status_t
+npi_txdma_dump_tdc_regs(npi_handle_t handle, uint8_t tdc)
+{
+
+	uint64_t		value, offset;
+	int 			num_regs, i;
+
+	ASSERT(TXDMA_CHANNEL_VALID(tdc));
+	if (!TXDMA_CHANNEL_VALID(tdc)) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+			"npi_txdma_dump_tdc_regs"
+			" Invalid TDC number %d \n",
+			tdc));
+
+		return (NPI_FAILURE | NPI_TXDMA_CHANNEL_INVALID(tdc));
+	}
+
+	NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
+		    "\nTXDMA DMC Register Dump for Channel %d\n",
+			    tdc));
+
+	num_regs = NUM_TDC_DMC_REGS;
+	for (i = 0; i < num_regs; i++) {
+		TXDMA_REG_READ64(handle, tdc_dmc_offset[i], tdc, &value);
+		offset = NXGE_TXDMA_OFFSET(tdc_dmc_offset[i], handle.is_vraddr,
+				tdc);
+		NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL, "0x%08llx "
+			"%s\t 0x%016llx \n",
+			offset, tdc_dmc_name[i],
+			value));
+	}
+
+	NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
+		"\nTXDMA FZC_DMC Register Dump for Channel %d\n",
+		tdc));
+
+	num_regs = NUM_TX_FZC_REGS;
+	for (i = 0; i < num_regs; i++) {
+		offset = NXGE_TXLOG_OFFSET(tdc_fzc_offset[i], tdc);
+		NXGE_REG_RD64(handle, offset, &value);
+		NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL, "0x%08llx "
+			"%s\t %016llx \n",
+			offset, tdc_fzc_name[i],
+			value));
+	}
+
+	NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
+		"\n TXDMA Register Dump for Channel %d done\n", tdc));
+
+	return (NPI_SUCCESS);
+}
+
+/*
+ * npi_txdma_dump_fzc_regs
+ * Dumps the contents of tdc csrs and fzc registers
+ *
+ * Input:
+ *         tdc:      TX DMA number
+ *
+ * return:
+ *     NPI_SUCCESS
+ *     NPI_FAILURE
+ *     NPI_TXDMA_CHANNEL_INVALID
+ *
+ */
+npi_status_t
+npi_txdma_dump_fzc_regs(npi_handle_t handle)
+{
+
+	uint64_t value;
+	int num_regs, i;
+
+	NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
+		"\nFZC_DMC Common Register Dump\n"));
+
+	num_regs = NUM_TX_FZC_REGS;
+	for (i = 0; i < num_regs; i++) {
+		NXGE_REG_RD64(handle, tx_fzc_offset[i], &value);
+		NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL, "0x%08llx "
+			"%s\t 0x%08llx \n",
+			tx_fzc_offset[i],
+			tx_fzc_name[i], value));
+	}
+	NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
+		"\n TXDMA FZC_DMC Register Dump Done \n"));
+
+	return (NPI_SUCCESS);
+}
+
+npi_status_t
+npi_txdma_tdc_regs_zero(npi_handle_t handle, uint8_t tdc)
+{
+	uint64_t		value;
+	int 			num_regs, i;
+
+	ASSERT(TXDMA_CHANNEL_VALID(tdc));
+	if (!TXDMA_CHANNEL_VALID(tdc)) {
+		NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
+			"npi_txdma_tdc_regs_zero"
+			" InvaliInvalid TDC number %d \n",
+			tdc));
+		return (NPI_FAILURE | NPI_TXDMA_CHANNEL_INVALID(tdc));
+	}
+
+	NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
+		    "\nTXDMA DMC Register (zero) for Channel %d\n",
+			    tdc));
+
+	num_regs = NUM_TDC_DMC_REGS;
+	value = 0;
+	for (i = 0; i < num_regs; i++) {
+		TXDMA_REG_WRITE64(handle, tdc_dmc_offset[i], tdc,
+			value);
+	}
+
+	NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
+		"\nTXDMA FZC_DMC Register clear for Channel %d\n",
+		tdc));
+
+	NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
+		"\n TXDMA Register Clear to 0s for Channel %d done\n", tdc));
+
+	return (NPI_SUCCESS);
+}
+
+/*
+ * npi_txdma_address_mode32_set():
+ *	This function is called to only support 32 bit addressing.
+ *
+ * Parameters:
+ *	handle		- NPI handle
+ *	mode_enable	- B_TRUE  (enable 32 bit mode)
+ *			  B_FALSE (disable 32 bit mode)
+ *
+ * Return:
+ *	NPI_SUCCESS		- If set is complete successfully.
+ *
+ *	Error:
+ *	NONE
+ */
+npi_status_t
+npi_txdma_mode32_set(npi_handle_t handle, boolean_t mode_enable)
+{
+	tx_addr_md_t		mode32;
+
+	mode32.value = 0;
+	if (mode_enable) {
+		mode32.bits.ldw.mode32 = 1;
+	} else {
+		mode32.bits.ldw.mode32 = 0;
+	}
+	NXGE_REG_WR64(handle, TX_ADDR_MD_REG, mode32.value);
+
+	return (NPI_SUCCESS);
+}
+
+/*
+ * npi_txdma_log_page_set():
+ *	This function is called to configure a logical page
+ *	(valid bit, mask, value, relocation).
+ *
+ * Parameters:
+ *	handle		- NPI handle
+ *	cfgp		- pointer to NPI defined data structure:
+ *				- page valid
+ * 				- mask
+ *				- value
+ *				- relocation
+ *	channel		- hardware TXDMA channel from 0 to 23.
+ *
+ * Return:
+ *	NPI_SUCCESS		- If configurations are set successfully.
+ *
+ *	Error:
+ *	NPI_FAILURE -
+ *		NPI_TXDMA_CHANNEL_INVALID	-
+ *		NPI_TXDMA_FUNC_INVALID	-
+ *		NPI_TXDMA_PAGE_INVALID	-
+ */
+npi_status_t
+npi_txdma_log_page_set(npi_handle_t handle, uint8_t channel,
+		p_dma_log_page_t cfgp)
+{
+	log_page_vld_t		vld;
+	int			status;
+	uint64_t		val;
+	dma_log_page_t		cfg;
+
+	DMA_LOG_PAGE_FN_VALIDATE(channel, cfgp->page_num, cfgp->func_num,
+		status);
+	if (status) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				    " npi_txdma_log_page_set"
+				    " npi_status <0x%x>", status));
+		return (status);
+	}
+
+	TX_LOG_REG_WRITE64(handle, TX_LOG_PAGE_VLD_REG, channel, 0);
+	TX_LOG_REG_READ64(handle, TX_LOG_PAGE_VLD_REG, channel, &val);
+
+	NPI_DEBUG_MSG((handle.function, NPI_TDC_CTL,
+			    "\n==> npi_txdma_log_page_set: WRITE 0 and "
+			    " READ back 0x%llx\n ", val));
+
+	vld.value = 0;
+	TX_LOG_REG_READ64(handle, TX_LOG_PAGE_VLD_REG, channel, &val);
+
+	val &= 0x3;
+	vld.value |= val;
+
+	vld.value = 0;
+	vld.bits.ldw.func = cfgp->func_num;
+
+	if (!cfgp->page_num) {
+		TX_LOG_REG_WRITE64(handle, TX_LOG_PAGE_MASK1_REG,
+			channel, (cfgp->mask & DMA_LOG_PAGE_MASK_MASK));
+		TX_LOG_REG_WRITE64(handle, TX_LOG_PAGE_VAL1_REG,
+			channel, (cfgp->value & DMA_LOG_PAGE_VALUE_MASK));
+		TX_LOG_REG_WRITE64(handle, TX_LOG_PAGE_RELO1_REG,
+			channel, (cfgp->reloc & DMA_LOG_PAGE_RELO_MASK));
+	} else {
+		TX_LOG_REG_WRITE64(handle, TX_LOG_PAGE_MASK2_REG,
+			channel, (cfgp->mask & DMA_LOG_PAGE_MASK_MASK));
+		TX_LOG_REG_WRITE64(handle, TX_LOG_PAGE_VAL2_REG,
+			channel, (cfgp->value & DMA_LOG_PAGE_VALUE_MASK));
+		TX_LOG_REG_WRITE64(handle, TX_LOG_PAGE_RELO2_REG,
+			channel, (cfgp->reloc & DMA_LOG_PAGE_RELO_MASK));
+	}
+
+	TX_LOG_REG_WRITE64(handle, TX_LOG_PAGE_VLD_REG, channel,
+		vld.value | (cfgp->valid << cfgp->page_num));
+
+	NPI_DEBUG_MSG((handle.function, NPI_REG_CTL,
+				    "\n==> npi_txdma_log_page_set: vld value "
+				    " 0x%llx function %d page_valid01 0x%x\n",
+				    vld.value,
+				    vld.bits.ldw.func,
+		(cfgp->valid << cfgp->page_num)));
+
+
+	cfg.page_num = 0;
+	cfg.func_num = 0;
+	(void) npi_txdma_log_page_get(handle, channel, &cfg);
+	cfg.page_num = 1;
+	(void) npi_txdma_log_page_get(handle, channel, &cfg);
+
+	return (status);
+}
+
+/*
+ * npi_txdma_log_page_get():
+ *	This function is called to get a logical page
+ *	(valid bit, mask, value, relocation).
+ *
+ * Parameters:
+ *	handle		- NPI handle
+ *	cfgp		- Get the following values (NPI defined structure):
+ *				- page valid
+ * 				- mask
+ *				- value
+ *				- relocation
+ *	channel		- hardware TXDMA channel from 0 to 23.
+ *
+ * Return:
+ *	NPI_SUCCESS		- If configurations are read successfully.
+ *
+ *	Error:
+ *	NPI_FAILURE -
+ *		NPI_TXDMA_CHANNEL_INVALID	-
+ *		NPI_TXDMA_FUNC_INVALID	-
+ *		NPI_TXDMA_PAGE_INVALID	-
+ */
+npi_status_t
+npi_txdma_log_page_get(npi_handle_t handle, uint8_t channel,
+		p_dma_log_page_t cfgp)
+{
+	log_page_vld_t		vld;
+	int			status;
+	uint64_t		val;
+
+	DMA_LOG_PAGE_VALIDATE(channel, cfgp->page_num, status);
+	if (status) {
+		NPI_ERROR_MSG((handle.function, NPI_REG_CTL,
+					    " npi_txdma_log_page_get"
+					    " npi_status <0x%x>", status));
+		return (status);
+	}
+
+	vld.value = 0;
+	vld.bits.ldw.func = cfgp->func_num;
+	TX_LOG_REG_READ64(handle, TX_LOG_PAGE_VLD_REG, channel, &val);
+
+	NPI_DEBUG_MSG((handle.function, NPI_TDC_CTL,
+				    "\n==> npi_txdma_log_page_get: read value "
+				    " function %d  value 0x%llx\n",
+				    cfgp->func_num, val));
+
+	vld.value |= val;
+	cfgp->func_num = vld.bits.ldw.func;
+
+	if (!cfgp->page_num) {
+		TX_LOG_REG_READ64(handle, TX_LOG_PAGE_MASK1_REG, channel, &val);
+		cfgp->mask = val & DMA_LOG_PAGE_MASK_MASK;
+		TX_LOG_REG_READ64(handle, TX_LOG_PAGE_VAL1_REG, channel, &val);
+		cfgp->value = val & DMA_LOG_PAGE_VALUE_MASK;
+		TX_LOG_REG_READ64(handle, TX_LOG_PAGE_RELO1_REG, channel, &val);
+		cfgp->reloc = val & DMA_LOG_PAGE_RELO_MASK;
+		cfgp->valid = vld.bits.ldw.page0;
+	} else {
+		TX_LOG_REG_READ64(handle, TX_LOG_PAGE_MASK2_REG, channel, &val);
+		cfgp->mask = val & DMA_LOG_PAGE_MASK_MASK;
+		TX_LOG_REG_READ64(handle, TX_LOG_PAGE_VAL2_REG, channel, &val);
+		cfgp->value = val & DMA_LOG_PAGE_VALUE_MASK;
+		TX_LOG_REG_READ64(handle, TX_LOG_PAGE_RELO2_REG, channel, &val);
+		cfgp->reloc = val & DMA_LOG_PAGE_RELO_MASK;
+		cfgp->valid = vld.bits.ldw.page1;
+	}
+
+	return (status);
+}
+
+/*
+ * npi_txdma_log_page_handle_set():
+ *	This function is called to program a page handle
+ *	(bits [63:44] of a 64-bit address to generate
+ *	a 64 bit address)
+ *
+ * Parameters:
+ *	handle		- NPI handle
+ *	hdl_p		- pointer to a logical page handle
+ *			  hardware data structure (log_page_hdl_t).
+ *	channel		- hardware TXDMA channel from 0 to 23.
+ *
+ * Return:
+ *	NPI_SUCCESS		- If configurations are set successfully.
+ *
+ *	Error:
+ *	NPI_FAILURE -
+ *		NPI_TXDMA_CHANNEL_INVALID	-
+ *		NPI_TXDMA_FUNC_INVALID	-
+ *		NPI_TXDMA_PAGE_INVALID	-
+ */
+npi_status_t
+npi_txdma_log_page_handle_set(npi_handle_t handle, uint8_t channel,
+		p_log_page_hdl_t hdl_p)
+{
+	int			status = NPI_SUCCESS;
+
+	ASSERT(TXDMA_CHANNEL_VALID(channel));
+	if (!TXDMA_CHANNEL_VALID(channel)) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				    " npi_txdma_log_page_handle_set"
+				    " Invalid Input: channel <0x%x>",
+				    channel));
+		return (NPI_FAILURE | NPI_TXDMA_CHANNEL_INVALID(channel));
+	}
+
+	TX_LOG_REG_WRITE64(handle, TX_LOG_PAGE_HDL_REG,
+		channel, hdl_p->value);
+
+	return (status);
+}
+
+/*
+ * npi_txdma_log_page_config():
+ *	This function is called to IO operations on
+ *	 a logical page to set, get, clear
+ *	valid bit, mask, value, relocation).
+ *
+ * Parameters:
+ *	handle		- NPI handle
+ *	op_mode		- OP_GET, OP_SET, OP_CLEAR
+ *	type		- NPI specific config type
+ *			   TXDMA_LOG_PAGE_MASK
+ *			   TXDMA_LOG_PAGE_VALUE
+ *			   TXDMA_LOG_PAGE_RELOC
+ *			   TXDMA_LOG_PAGE_VALID
+ *			   TXDMA_LOG_PAGE_ALL
+ *	channel		- hardware TXDMA channel from 0 to 23.
+ *	cfgp		- pointer to the NPI config structure.
+ * Return:
+ *	NPI_SUCCESS		- If configurations are read successfully.
+ *
+ *	Error:
+ *	NPI_FAILURE		-
+ *		NPI_TXDMA_OPCODE_INVALID	-
+ *		NPI_TXDMA_CHANNEL_INVALID	-
+ *		NPI_TXDMA_FUNC_INVALID	-
+ *		NPI_TXDMA_PAGE_INVALID	-
+ */
+npi_status_t
+npi_txdma_log_page_config(npi_handle_t handle, io_op_t op_mode,
+		txdma_log_cfg_t type, uint8_t channel,
+		p_dma_log_page_t cfgp)
+{
+	int			status = NPI_SUCCESS;
+	uint64_t		val;
+
+	ASSERT(TXDMA_CHANNEL_VALID(channel));
+	if (!TXDMA_CHANNEL_VALID(channel)) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				    " npi_txdma_log_page_config"
+				    " Invalid Input: channel <0x%x>",
+				    channel));
+		return (NPI_FAILURE | NPI_TXDMA_CHANNEL_INVALID(channel));
+	}
+
+	switch (op_mode) {
+	case OP_GET:
+		switch (type) {
+		case TXDMA_LOG_PAGE_ALL:
+			return (npi_txdma_log_page_get(handle, channel,
+					cfgp));
+		case TXDMA_LOG_PAGE_MASK:
+			if (!cfgp->page_num) {
+				TX_LOG_REG_READ64(handle, TX_LOG_PAGE_MASK1_REG,
+						channel, &val);
+				cfgp->mask = val & DMA_LOG_PAGE_MASK_MASK;
+			} else {
+				TX_LOG_REG_READ64(handle, TX_LOG_PAGE_MASK2_REG,
+						channel, &val);
+				cfgp->mask = val & DMA_LOG_PAGE_MASK_MASK;
+			}
+			break;
+
+		case TXDMA_LOG_PAGE_VALUE:
+			if (!cfgp->page_num) {
+				TX_LOG_REG_READ64(handle, TX_LOG_PAGE_VAL1_REG,
+						channel, &val);
+				cfgp->value = val & DMA_LOG_PAGE_VALUE_MASK;
+			} else {
+				TX_LOG_REG_READ64(handle, TX_LOG_PAGE_VAL2_REG,
+						channel, &val);
+				cfgp->value = val & DMA_LOG_PAGE_VALUE_MASK;
+			}
+			break;
+
+		case TXDMA_LOG_PAGE_RELOC:
+			if (!cfgp->page_num) {
+				TX_LOG_REG_READ64(handle, TX_LOG_PAGE_RELO1_REG,
+						channel, &val);
+				cfgp->reloc = val & DMA_LOG_PAGE_RELO_MASK;
+			} else {
+				TX_LOG_REG_READ64(handle, TX_LOG_PAGE_VAL2_REG,
+						channel, &val);
+				cfgp->reloc = val & DMA_LOG_PAGE_RELO_MASK;
+			}
+			break;
+
+		default:
+			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+					    " npi_txdma_log_page_config"
+					    " Invalid Input: pageconfig <0x%x>",
+					    type));
+			return (NPI_FAILURE |
+				NPI_TXDMA_OPCODE_INVALID(channel));
+		}
+
+		break;
+
+	case OP_SET:
+	case OP_CLEAR:
+		if (op_mode == OP_CLEAR) {
+			cfgp->valid = 0;
+			cfgp->mask = cfgp->func_num = 0;
+			cfgp->value = cfgp->reloc = 0;
+		}
+		switch (type) {
+		case TXDMA_LOG_PAGE_ALL:
+			return (npi_txdma_log_page_set(handle, channel,
+					cfgp));
+		case TXDMA_LOG_PAGE_MASK:
+			if (!cfgp->page_num) {
+				TX_LOG_REG_WRITE64(handle,
+				TX_LOG_PAGE_MASK1_REG, channel,
+				(cfgp->mask & DMA_LOG_PAGE_MASK_MASK));
+			} else {
+				TX_LOG_REG_WRITE64(handle,
+				TX_LOG_PAGE_MASK2_REG,
+				channel, (cfgp->mask & DMA_LOG_PAGE_MASK_MASK));
+			}
+			break;
+
+		case TXDMA_LOG_PAGE_VALUE:
+			if (!cfgp->page_num) {
+				TX_LOG_REG_WRITE64(handle,
+				TX_LOG_PAGE_VAL1_REG, channel,
+				(cfgp->value & DMA_LOG_PAGE_VALUE_MASK));
+			} else {
+				TX_LOG_REG_WRITE64(handle,
+				TX_LOG_PAGE_VAL2_REG, channel,
+				(cfgp->value & DMA_LOG_PAGE_VALUE_MASK));
+			}
+			break;
+
+		case TXDMA_LOG_PAGE_RELOC:
+			if (!cfgp->page_num) {
+				TX_LOG_REG_WRITE64(handle,
+				TX_LOG_PAGE_RELO1_REG, channel,
+				(cfgp->reloc & DMA_LOG_PAGE_RELO_MASK));
+			} else {
+				TX_LOG_REG_WRITE64(handle,
+				TX_LOG_PAGE_RELO2_REG, channel,
+				(cfgp->reloc & DMA_LOG_PAGE_RELO_MASK));
+			}
+			break;
+
+		default:
+			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+					    " npi_txdma_log_page_config"
+					    " Invalid Input: pageconfig <0x%x>",
+					    type));
+			return (NPI_FAILURE |
+				NPI_TXDMA_OPCODE_INVALID(channel));
+		}
+
+		break;
+	default:
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+					    " npi_txdma_log_page_config"
+					    " Invalid Input: op <0x%x>",
+					    op_mode));
+		return (NPI_FAILURE | NPI_TXDMA_OPCODE_INVALID(channel));
+	}
+
+	return (status);
+}
+
+/*
+ * npi_txdma_log_page_vld_config():
+ *	This function is called to configure the logical
+ *	page valid register.
+ *
+ * Parameters:
+ *	handle		- NPI handle
+ *	op_mode		- OP_GET: get valid page configuration
+ *			  OP_SET: set valid page configuration
+ *			  OP_UPDATE: update valid page configuration
+ *			  OP_CLEAR: reset both valid pages to
+ *			  not defined (0).
+ *	channel		- hardware TXDMA channel from 0 to 23.
+ *	vld_p		- pointer to hardware defined log page valid register.
+ * Return:
+ *	NPI_SUCCESS		- If set is complete successfully.
+ *
+ *	Error:
+ *	NPI_FAILURE -
+ *		NPI_TXDMA_CHANNEL_INVALID -
+ *		NPI_TXDMA_OPCODE_INVALID -
+ */
+npi_status_t
+npi_txdma_log_page_vld_config(npi_handle_t handle, io_op_t op_mode,
+		uint8_t channel, p_log_page_vld_t vld_p)
+{
+	int			status = NPI_SUCCESS;
+	log_page_vld_t		vld;
+
+	ASSERT(TXDMA_CHANNEL_VALID(channel));
+	if (!TXDMA_CHANNEL_VALID(channel)) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				    " npi_txdma_log_page_vld_config"
+				    " Invalid Input: channel <0x%x>",
+				    channel));
+		return (NPI_FAILURE | NPI_TXDMA_CHANNEL_INVALID(channel));
+	}
+
+	switch (op_mode) {
+	case OP_GET:
+		TX_LOG_REG_READ64(handle, TX_LOG_PAGE_VLD_REG, channel,
+					&vld_p->value);
+		break;
+
+	case OP_SET:
+		TX_LOG_REG_WRITE64(handle, TX_LOG_PAGE_VLD_REG,
+					channel, vld_p->value);
+		break;
+
+	case OP_UPDATE:
+		TX_LOG_REG_READ64(handle, TX_LOG_PAGE_VLD_REG, channel,
+					&vld.value);
+		TX_LOG_REG_WRITE64(handle, TX_LOG_PAGE_VLD_REG,
+					channel, vld.value | vld_p->value);
+		break;
+
+	case OP_CLEAR:
+		TX_LOG_REG_WRITE64(handle, TX_LOG_PAGE_VLD_REG,
+					channel, 0);
+		break;
+
+	default:
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				    " npi_txdma_log_pag_vld_cofig"
+				    " Invalid Input: pagevld <0x%x>",
+				    op_mode));
+		return (NPI_FAILURE | NPI_TXDMA_OPCODE_INVALID(channel));
+	}
+
+	return (status);
+}
+
+/*
+ * npi_txdma_channel_reset():
+ *	This function is called to reset a transmit DMA channel.
+ *	(This function is used to reset a channel and reinitialize
+ *	 all other bits except RST_STATE).
+ *
+ * Parameters:
+ *	handle		- NPI handle (virtualization flag must be defined).
+ *	channel		- logical TXDMA channel from 0 to 23.
+ *			  (If virtualization flag is not set, then
+ *			   logical channel is the same as the hardware
+ *			   channel number).
+ *
+ * Return:
+ *	NPI_SUCCESS		- If reset is complete successfully.
+ *
+ *	Error:
+ *	NPI_FAILURE	-
+ *		NPI_TXDMA_CHANNEL_INVALID -
+ *		NPI_TXDMA_RESET_FAILED -
+ */
+npi_status_t
+npi_txdma_channel_reset(npi_handle_t handle, uint8_t channel)
+{
+	NPI_DEBUG_MSG((handle.function, NPI_TDC_CTL,
+			    " npi_txdma_channel_reset"
+			    " RESETTING",
+			    channel));
+	return (npi_txdma_channel_control(handle, TXDMA_RESET, channel));
+}
+
+/*
+ * npi_txdma_channel_init_enable():
+ *	This function is called to start a transmit DMA channel after reset.
+ *
+ * Parameters:
+ *	handle		- NPI handle (virtualization flag must be defined).
+ *	channel		- logical TXDMA channel from 0 to 23.
+ *			  (If virtualization flag is not set, then
+ *			   logical channel is the same as the hardware
+ *			   channel number).
+ * Return:
+ *	NPI_SUCCESS		- If DMA channel is started successfully.
+ *
+ *	Error:
+ *	NPI_FAILURE	-
+ *		NPI_TXDMA_CHANNEL_INVALID -
+ */
+npi_status_t
+npi_txdma_channel_init_enable(npi_handle_t handle, uint8_t channel)
+{
+	return (npi_txdma_channel_control(handle, TXDMA_INIT_START, channel));
+}
+
+/*
+ * npi_txdma_channel_enable():
+ *	This function is called to start a transmit DMA channel.
+ *
+ * Parameters:
+ *	handle		- NPI handle (virtualization flag must be defined).
+ *	channel		- logical TXDMA channel from 0 to 23.
+ *			  (If virtualization flag is not set, then
+ *			   logical channel is the same as the hardware
+ *			   channel number).
+ * Return:
+ *	NPI_SUCCESS		- If DMA channel is stopped successfully.
+ *
+ *	Error:
+ *	NPI_FAILURE	-
+ *		NPI_TXDMA_CHANNEL_INVALID -
+ */
+
+npi_status_t
+npi_txdma_channel_enable(npi_handle_t handle, uint8_t channel)
+{
+	return (npi_txdma_channel_control(handle, TXDMA_START, channel));
+}
+
+/*
+ * npi_txdma_channel_disable():
+ *	This function is called to stop a transmit DMA channel.
+ *
+ * Parameters:
+ *	handle		- NPI handle (virtualization flag must be defined).
+ *	channel		- logical TXDMA channel from 0 to 23.
+ *			  (If virtualization flag is not set, then
+ *			   logical channel is the same as the hardware
+ *			   channel number).
+ * Return:
+ *	NPI_SUCCESS		- If DMA channel is stopped successfully.
+ *
+ *	Error:
+ *	NPI_FAILURE	-
+ *		NPI_TXDMA_CHANNEL_INVALID -
+ *		NPI_TXDMA_STOP_FAILED -
+ */
+npi_status_t
+npi_txdma_channel_disable(npi_handle_t handle, uint8_t channel)
+{
+	return (npi_txdma_channel_control(handle, TXDMA_STOP, channel));
+}
+
+/*
+ * npi_txdma_channel_resume():
+ *	This function is called to restart a transmit DMA channel.
+ *
+ * Parameters:
+ *	handle		- NPI handle (virtualization flag must be defined).
+ *	channel		- logical TXDMA channel from 0 to 23.
+ *			  (If virtualization flag is not set, then
+ *			   logical channel is the same as the hardware
+ *			   channel number).
+ * Return:
+ *	NPI_SUCCESS		- If DMA channel is stopped successfully.
+ *
+ *	Error:
+ *	NPI_FAILURE	-
+ *		NPI_TXDMA_CHANNEL_INVALID -
+ *		NPI_TXDMA_RESUME_FAILED -
+ */
+npi_status_t
+npi_txdma_channel_resume(npi_handle_t handle, uint8_t channel)
+{
+	return (npi_txdma_channel_control(handle, TXDMA_RESUME, channel));
+}
+
+/*
+ * npi_txdma_channel_mmk_clear():
+ *	This function is called to clear MMK bit.
+ *
+ * Parameters:
+ *	handle		- NPI handle (virtualization flag must be defined).
+ *	channel		- logical TXDMA channel from 0 to 23.
+ *			  (If virtualization flag is not set, then
+ *			   logical channel is the same as the hardware
+ *			   channel number).
+ * Return:
+ *	NPI_SUCCESS		- If MMK is reset successfully.
+ *
+ *	Error:
+ *	NPI_FAILURE	-
+ *		NPI_TXDMA_CHANNEL_INVALID -
+ */
+npi_status_t
+npi_txdma_channel_mmk_clear(npi_handle_t handle, uint8_t channel)
+{
+	return (npi_txdma_channel_control(handle, TXDMA_CLEAR_MMK, channel));
+}
+
+/*
+ * npi_txdma_channel_mbox_enable():
+ *	This function is called to enable the mailbox update.
+ *
+ * Parameters:
+ *	handle		- NPI handle (virtualization flag must be defined).
+ *	channel		- logical TXDMA channel from 0 to 23.
+ *			  (If virtualization flag is not set, then
+ *			   logical channel is the same as the hardware
+ *			   channel number).
+ * Return:
+ *	NPI_SUCCESS		- If mailbox is enabled successfully.
+ *
+ *	Error:
+ *	NPI_HW_ERROR		-
+ *	NPI_FAILURE	-
+ *		NPI_TXDMA_CHANNEL_INVALID -
+ */
+npi_status_t
+npi_txdma_channel_mbox_enable(npi_handle_t handle, uint8_t channel)
+{
+	return (npi_txdma_channel_control(handle, TXDMA_MBOX_ENABLE, channel));
+}
+
+/*
+ * npi_txdma_channel_control():
+ *	This function is called to control a transmit DMA channel
+ *	for reset, start or stop.
+ *
+ * Parameters:
+ *	handle		- NPI handle (virtualization flag must be defined).
+ *	control		- NPI defined control type supported
+ *				- TXDMA_INIT_RESET
+ * 				- TXDMA_INIT_START
+ *				- TXDMA_RESET
+ *				- TXDMA_START
+ *				- TXDMA_STOP
+ *	channel		- logical TXDMA channel from 0 to 23.
+ *			  (If virtualization flag is not set, then
+ *			   logical channel is the same as the hardware
+ *
+ * Return:
+ *	NPI_SUCCESS		- If reset is complete successfully.
+ *
+ *	Error:
+ *	NPI_FAILURE		-
+ *		NPI_TXDMA_OPCODE_INVALID	-
+ *		NPI_TXDMA_CHANNEL_INVALID	-
+ *		NPI_TXDMA_RESET_FAILED	-
+ *		NPI_TXDMA_STOP_FAILED	-
+ *		NPI_TXDMA_RESUME_FAILED	-
+ */
+npi_status_t
+npi_txdma_channel_control(npi_handle_t handle, txdma_cs_cntl_t control,
+		uint8_t channel)
+{
+	int		status = NPI_SUCCESS;
+	tx_cs_t		cs;
+
+	ASSERT(TXDMA_CHANNEL_VALID(channel));
+	if (!TXDMA_CHANNEL_VALID(channel)) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				    " npi_txdma_channel_control"
+				    " Invalid Input: channel <0x%x>",
+				    channel));
+		return (NPI_FAILURE | NPI_TXDMA_CHANNEL_INVALID(channel));
+	}
+
+	switch (control) {
+	case TXDMA_INIT_RESET:
+		cs.value = 0;
+		TXDMA_REG_READ64(handle, TX_CS_REG, channel, &cs.value);
+		cs.bits.ldw.rst = 1;
+		TXDMA_REG_WRITE64(handle, TX_CS_REG, channel, cs.value);
+		return (npi_txdma_control_reset_wait(handle, channel));
+
+	case TXDMA_INIT_START:
+		cs.value = 0;
+		TXDMA_REG_WRITE64(handle, TX_CS_REG, channel, cs.value);
+		break;
+
+	case TXDMA_RESET:
+		/*
+		 * Sets reset bit only (Hardware will reset all
+		 * the RW bits but leave the RO bits alone.
+		 */
+		cs.value = 0;
+		cs.bits.ldw.rst = 1;
+		TXDMA_REG_WRITE64(handle, TX_CS_REG, channel, cs.value);
+		return (npi_txdma_control_reset_wait(handle, channel));
+
+	case TXDMA_START:
+		/* Enable the DMA channel */
+		TXDMA_REG_READ64(handle, TX_CS_REG, channel, &cs.value);
+		cs.bits.ldw.stop_n_go = 0;
+		TXDMA_REG_WRITE64(handle, TX_CS_REG, channel, cs.value);
+		break;
+
+	case TXDMA_STOP:
+		/* Disable the DMA channel */
+		TXDMA_REG_READ64(handle, TX_CS_REG, channel, &cs.value);
+		cs.bits.ldw.stop_n_go = 1;
+		TXDMA_REG_WRITE64(handle, TX_CS_REG, channel, cs.value);
+		status = npi_txdma_control_stop_wait(handle, channel);
+		if (status) {
+			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				    "Cannot stop channel %d (TXC hung!)",
+				    channel));
+		}
+		break;
+
+	case TXDMA_RESUME:
+		/* Resume the packet transmission after stopping */
+		TXDMA_REG_READ64(handle, TX_CS_REG, channel, &cs.value);
+		cs.value |= ~TX_CS_STOP_N_GO_MASK;
+		TXDMA_REG_WRITE64(handle, TX_CS_REG, channel, cs.value);
+		return (npi_txdma_control_resume_wait(handle, channel));
+
+	case TXDMA_CLEAR_MMK:
+		/* Write 1 to MK bit to clear the MMK bit */
+		TXDMA_REG_READ64(handle, TX_CS_REG, channel, &cs.value);
+		cs.bits.ldw.mk = 1;
+		TXDMA_REG_WRITE64(handle, TX_CS_REG, channel, cs.value);
+		break;
+
+	case TXDMA_MBOX_ENABLE:
+		/*
+		 * Write 1 to MB bit to enable mailbox update
+		 * (cleared to 0 by hardware after update).
+		 */
+		TXDMA_REG_READ64(handle, TX_CS_REG, channel, &cs.value);
+		cs.bits.ldw.mb = 1;
+		TXDMA_REG_WRITE64(handle, TX_CS_REG, channel, cs.value);
+		break;
+
+	default:
+		status =  (NPI_FAILURE | NPI_TXDMA_OPCODE_INVALID(channel));
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				    " npi_txdma_channel_control"
+				    " Invalid Input: control <0x%x>",
+				    control));
+	}
+
+	return (status);
+}
+
+/*
+ * npi_txdma_control_status():
+ *	This function is called to operate on the control
+ *	and status register.
+ *
+ * Parameters:
+ *	handle		- NPI handle
+ *	op_mode		- OP_GET: get hardware control and status
+ *			  OP_SET: set hardware control and status
+ *			  OP_UPDATE: update hardware control and status.
+ *			  OP_CLEAR: clear control and status register to 0s.
+ *	channel		- hardware TXDMA channel from 0 to 23.
+ *	cs_p		- pointer to hardware defined control and status
+ *			  structure.
+ * Return:
+ *	NPI_SUCCESS		- If set is complete successfully.
+ *
+ *	Error:
+ *	NPI_FAILURE		-
+ *		NPI_TXDMA_OPCODE_INVALID	-
+ *		NPI_TXDMA_CHANNEL_INVALID	-
+ *		NPI_TXDMA_FUNC_INVALID	-
+ */
+npi_status_t
+npi_txdma_control_status(npi_handle_t handle, io_op_t op_mode,
+		uint8_t channel, p_tx_cs_t cs_p)
+{
+	int		status = NPI_SUCCESS;
+	tx_cs_t		txcs;
+
+	ASSERT(TXDMA_CHANNEL_VALID(channel));
+	if (!TXDMA_CHANNEL_VALID(channel)) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				    " npi_txdma_control_status"
+				    " Invalid Input: channel <0x%x>",
+				    channel));
+		return (NPI_FAILURE | NPI_TXDMA_CHANNEL_INVALID(channel));
+	}
+
+	switch (op_mode) {
+	case OP_GET:
+		TXDMA_REG_READ64(handle, TX_CS_REG, channel, &cs_p->value);
+		break;
+
+	case OP_SET:
+		TXDMA_REG_WRITE64(handle, TX_CS_REG, channel, cs_p->value);
+		break;
+
+	case OP_UPDATE:
+		TXDMA_REG_READ64(handle, TX_CS_REG, channel, &txcs.value);
+		TXDMA_REG_WRITE64(handle, TX_CS_REG, channel,
+			cs_p->value | txcs.value);
+		break;
+
+	default:
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				    " npi_txdma_control_status"
+				    " Invalid Input: control <0x%x>",
+				    op_mode));
+		return (NPI_FAILURE | NPI_TXDMA_OPCODE_INVALID(channel));
+	}
+
+	return (status);
+
+}
+
+/*
+ * npi_txdma_event_mask():
+ *	This function is called to operate on the event mask
+ *	register which is used for generating interrupts..
+ *	and status register.
+ *
+ * Parameters:
+ *	handle		- NPI handle
+ *	op_mode		- OP_GET: get hardware event mask
+ *			  OP_SET: set hardware interrupt event masks
+ *			  OP_CLEAR: clear control and status register to 0s.
+ *	channel		- hardware TXDMA channel from 0 to 23.
+ *	mask_p		- pointer to hardware defined event mask
+ *			  structure.
+ * Return:
+ *	NPI_SUCCESS		- If set is complete successfully.
+ *
+ *	Error:
+ *	NPI_FAILURE		-
+ *		NPI_TXDMA_OPCODE_INVALID	-
+ *		NPI_TXDMA_CHANNEL_INVALID	-
+ */
+npi_status_t
+npi_txdma_event_mask(npi_handle_t handle, io_op_t op_mode,
+		uint8_t channel, p_tx_dma_ent_msk_t mask_p)
+{
+	int			status = NPI_SUCCESS;
+	tx_dma_ent_msk_t	mask;
+
+	ASSERT(TXDMA_CHANNEL_VALID(channel));
+	if (!TXDMA_CHANNEL_VALID(channel)) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+					    " npi_txdma_event_mask"
+					    " Invalid Input: channel <0x%x>",
+					    channel));
+		return (NPI_FAILURE | NPI_TXDMA_CHANNEL_INVALID(channel));
+	}
+
+	switch (op_mode) {
+	case OP_GET:
+		TXDMA_REG_READ64(handle, TX_ENT_MSK_REG, channel,
+				&mask_p->value);
+		break;
+
+	case OP_SET:
+		TXDMA_REG_WRITE64(handle, TX_ENT_MSK_REG, channel,
+				mask_p->value);
+		break;
+
+	case OP_UPDATE:
+		TXDMA_REG_READ64(handle, TX_ENT_MSK_REG, channel, &mask.value);
+		TXDMA_REG_WRITE64(handle, TX_ENT_MSK_REG, channel,
+			mask_p->value | mask.value);
+		break;
+
+	default:
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				    " npi_txdma_event_mask"
+				    " Invalid Input: eventmask <0x%x>",
+				    op_mode));
+		return (NPI_FAILURE | NPI_TXDMA_OPCODE_INVALID(channel));
+	}
+
+	return (status);
+}
+
+/*
+ * npi_txdma_event_mask_config():
+ *	This function is called to operate on the event mask
+ *	register which is used for generating interrupts..
+ *	and status register.
+ *
+ * Parameters:
+ *	handle		- NPI handle
+ *	op_mode		- OP_GET: get hardware event mask
+ *			  OP_SET: set hardware interrupt event masks
+ *			  OP_CLEAR: clear control and status register to 0s.
+ *	channel		- hardware TXDMA channel from 0 to 23.
+ *	cfgp		- pointer to NPI defined event mask
+ *			  enum data type.
+ * Return:
+ *	NPI_SUCCESS		- If set is complete successfully.
+ *
+ *	Error:
+ *	NPI_FAILURE		-
+ *		NPI_TXDMA_OPCODE_INVALID	-
+ *		NPI_TXDMA_CHANNEL_INVALID	-
+ */
+npi_status_t
+npi_txdma_event_mask_config(npi_handle_t handle, io_op_t op_mode,
+		uint8_t channel, txdma_ent_msk_cfg_t *mask_cfgp)
+{
+	int		status = NPI_SUCCESS;
+	uint64_t	value;
+
+	ASSERT(TXDMA_CHANNEL_VALID(channel));
+	if (!TXDMA_CHANNEL_VALID(channel)) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				    " npi_txdma_event_mask_config"
+				    " Invalid Input: channel <0x%x>",
+				    channel));
+
+		return (NPI_FAILURE | NPI_TXDMA_CHANNEL_INVALID(channel));
+	}
+
+	switch (op_mode) {
+	case OP_GET:
+		TXDMA_REG_READ64(handle, TX_ENT_MSK_REG, channel, mask_cfgp);
+		break;
+
+	case OP_SET:
+		TXDMA_REG_WRITE64(handle, TX_ENT_MSK_REG, channel,
+				*mask_cfgp);
+		break;
+
+	case OP_UPDATE:
+		TXDMA_REG_READ64(handle, TX_ENT_MSK_REG, channel, &value);
+		TXDMA_REG_WRITE64(handle, TX_ENT_MSK_REG, channel,
+			*mask_cfgp | value);
+		break;
+
+	case OP_CLEAR:
+		TXDMA_REG_WRITE64(handle, TX_ENT_MSK_REG, channel,
+			CFG_TXDMA_MASK_ALL);
+		break;
+	default:
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				    " npi_txdma_event_mask_config"
+				    " Invalid Input: eventmask <0x%x>",
+				    op_mode));
+		return (NPI_FAILURE | NPI_TXDMA_OPCODE_INVALID(channel));
+	}
+
+	return (status);
+}
+
+/*
+ * npi_txdma_event_mask_mk_out():
+ *	This function is called to mask out the packet transmit marked event.
+ *
+ * Parameters:
+ *	handle		- NPI handle
+ *	channel		- hardware TXDMA channel from 0 to 23.
+ *			  enum data type.
+ * Return:
+ *	NPI_SUCCESS		- If set is complete successfully.
+ *
+ *	Error:
+ *	NPI_FAILURE		-
+ *		NPI_TXDMA_CHANNEL_INVALID	-
+ */
+npi_status_t
+npi_txdma_event_mask_mk_out(npi_handle_t handle, uint8_t channel)
+{
+	txdma_ent_msk_cfg_t event_mask;
+	int		status = NPI_SUCCESS;
+
+	ASSERT(TXDMA_CHANNEL_VALID(channel));
+	if (!TXDMA_CHANNEL_VALID(channel)) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				    " npi_txdma_event_mask_mk_out"
+				    " Invalid Input: channel <0x%x>",
+				    channel));
+		return (NPI_FAILURE | NPI_TXDMA_CHANNEL_INVALID(channel));
+	}
+
+	TXDMA_REG_READ64(handle, TX_ENT_MSK_REG, channel, &event_mask);
+	TXDMA_REG_WRITE64(handle, TX_ENT_MSK_REG, channel,
+		event_mask & (~TX_ENT_MSK_MK_MASK));
+
+	return (status);
+}
+
+/*
+ * npi_txdma_event_mask_mk_in():
+ *	This function is called to set the mask for the the packet marked event.
+ *
+ * Parameters:
+ *	handle		- NPI handle
+ *	channel		- hardware TXDMA channel from 0 to 23.
+ *			  enum data type.
+ * Return:
+ *	NPI_SUCCESS		- If set is complete successfully.
+ *
+ *	Error:
+ *	NPI_FAILURE		-
+ *		NPI_TXDMA_CHANNEL_INVALID	-
+ */
+npi_status_t
+npi_txdma_event_mask_mk_in(npi_handle_t handle, uint8_t channel)
+{
+	txdma_ent_msk_cfg_t event_mask;
+	int		status = NPI_SUCCESS;
+
+	ASSERT(TXDMA_CHANNEL_VALID(channel));
+	if (!TXDMA_CHANNEL_VALID(channel)) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				    " npi_txdma_event_mask_mk_in"
+				    " Invalid Input: channel <0x%x>",
+				    channel));
+		return (NPI_FAILURE | NPI_TXDMA_CHANNEL_INVALID(channel));
+	}
+
+	TXDMA_REG_READ64(handle, TX_ENT_MSK_REG, channel, &event_mask);
+	TXDMA_REG_WRITE64(handle, TX_ENT_MSK_REG, channel,
+		event_mask | TX_ENT_MSK_MK_MASK);
+
+	return (status);
+}
+
+/*
+ * npi_txdma_ring_addr_set():
+ *	This function is called to configure the transmit descriptor
+ *	ring address and its size.
+ *
+ * Parameters:
+ *	handle		- NPI handle (virtualization flag must be defined
+ *			  if its register pointer is from the virtual region).
+ *	channel		- logical TXDMA channel from 0 to 23.
+ *			  (If virtualization flag is not set, then
+ *			   logical channel is the same as the hardware
+ *			   channel number).
+ *	start_addr	- starting address of the descriptor
+ *	len		- maximum length of the descriptor
+ *			  (in number of 64 bytes block).
+ * Return:
+ *	NPI_SUCCESS		- If set is complete successfully.
+ *
+ *	Error:
+ *	NPI_FAILURE		-
+ *		NPI_TXDMA_OPCODE_INVALID	-
+ *		NPI_TXDMA_CHANNEL_INVALID	-
+ */
+npi_status_t
+npi_txdma_ring_addr_set(npi_handle_t handle, uint8_t channel,
+		uint64_t start_addr, uint32_t len)
+{
+	int		status = NPI_SUCCESS;
+	tx_rng_cfig_t	cfg;
+
+	ASSERT(TXDMA_CHANNEL_VALID(channel));
+	if (!TXDMA_CHANNEL_VALID(channel)) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				    " npi_txdma_ring_addr_set"
+				    " Invalid Input: channel <0x%x>",
+				    channel));
+		return (NPI_FAILURE | NPI_TXDMA_CHANNEL_INVALID(channel));
+	}
+
+	cfg.value = ((start_addr & TX_RNG_CFIG_ADDR_MASK) |
+			(((uint64_t)len) << TX_RNG_CFIG_LEN_SHIFT));
+	TXDMA_REG_WRITE64(handle, TX_RNG_CFIG_REG, channel, cfg.value);
+
+	return (status);
+}
+
+/*
+ * npi_txdma_ring_config():
+ *	This function is called to config a descriptor ring
+ *	by using the hardware defined data.
+ *
+ * Parameters:
+ *	handle		- NPI handle (virtualization flag must be defined
+ *			  if its register pointer is from the virtual region).
+ *	channel		- logical TXDMA channel from 0 to 23.
+ *			  (If virtualization flag is not set, then
+ *			   logical channel is the same as the hardware
+ *			   channel number).
+ *	op_mode		- OP_GET: get transmit ring configuration
+ *			  OP_SET: set transmit ring configuration
+ *	reg_data	- pointer to hardware defined transmit ring
+ *			  configuration data structure.
+ * Return:
+ *	NPI_SUCCESS		- If set/get is complete successfully.
+ *
+ *	Error:
+ *	NPI_FAILURE		-
+ *		NPI_TXDMA_CHANNEL_INVALID	-
+ */
+npi_status_t
+npi_txdma_ring_config(npi_handle_t handle, io_op_t op_mode,
+		uint8_t channel, uint64_t *reg_data)
+{
+	int		status = NPI_SUCCESS;
+
+	ASSERT(TXDMA_CHANNEL_VALID(channel));
+	if (!TXDMA_CHANNEL_VALID(channel)) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				    " npi_txdma_ring_config"
+				    " Invalid Input: channel <0x%x>",
+				    channel));
+		return (NPI_FAILURE | NPI_TXDMA_CHANNEL_INVALID(channel));
+	}
+
+	switch (op_mode) {
+	case OP_GET:
+		TXDMA_REG_READ64(handle, TX_RNG_CFIG_REG, channel, reg_data);
+		break;
+
+	case OP_SET:
+		TXDMA_REG_WRITE64(handle, TX_RNG_CFIG_REG, channel,
+			*reg_data);
+		break;
+
+	default:
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				    " npi_txdma_ring_config"
+				    " Invalid Input: ring_config <0x%x>",
+				    op_mode));
+		return (NPI_FAILURE | NPI_TXDMA_OPCODE_INVALID(channel));
+	}
+
+	return (status);
+}
+
+/*
+ * npi_txdma_mbox_config():
+ *	This function is called to config the mailbox address
+ *
+ * Parameters:
+ *	handle		- NPI handle (virtualization flag must be defined
+ *			  if its register pointer is from the virtual region).
+ *	channel		- logical TXDMA channel from 0 to 23.
+ *			  (If virtualization flag is not set, then
+ *			   logical channel is the same as the hardware
+ *			   channel number).
+ *	op_mode		- OP_GET: get the mailbox address
+ *			  OP_SET: set the mailbox address
+ *	reg_data	- pointer to the mailbox address.
+ * Return:
+ *	NPI_SUCCESS		- If set is complete successfully.
+ *
+ *	Error:
+ *	NPI_FAILURE		-
+ *		NPI_TXDMA_OPCODE_INVALID	-
+ *		NPI_TXDMA_CHANNEL_INVALID	-
+ */
+npi_status_t
+npi_txdma_mbox_config(npi_handle_t handle, io_op_t op_mode,
+		uint8_t channel, uint64_t *mbox_addr)
+{
+	int		status = NPI_SUCCESS;
+	txdma_mbh_t	mh;
+	txdma_mbl_t	ml;
+
+	ASSERT(TXDMA_CHANNEL_VALID(channel));
+	if (!TXDMA_CHANNEL_VALID(channel)) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				    " npi_txdma_mbox_config"
+				    " Invalid Input: channel <0x%x>",
+				    channel));
+		return (NPI_FAILURE | NPI_TXDMA_CHANNEL_INVALID(channel));
+	}
+
+	mh.value = ml.value = 0;
+
+	switch (op_mode) {
+	case OP_GET:
+		TXDMA_REG_READ64(handle, TXDMA_MBH_REG, channel, &mh.value);
+		TXDMA_REG_READ64(handle, TXDMA_MBL_REG, channel, &ml.value);
+		*mbox_addr = ml.value;
+		*mbox_addr |= (mh.value << TXDMA_MBH_ADDR_SHIFT);
+
+		break;
+
+	case OP_SET:
+		ml.bits.ldw.mbaddr = ((*mbox_addr & TXDMA_MBL_MASK) >>
+			TXDMA_MBL_SHIFT);
+		TXDMA_REG_WRITE64(handle, TXDMA_MBL_REG, channel, ml.value);
+		mh.bits.ldw.mbaddr = ((*mbox_addr >> TXDMA_MBH_ADDR_SHIFT) &
+			TXDMA_MBH_MASK);
+		TXDMA_REG_WRITE64(handle, TXDMA_MBH_REG, channel, mh.value);
+
+		break;
+
+	default:
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				    " npi_txdma_mbox_config"
+				    " Invalid Input: mbox <0x%x>",
+				    op_mode));
+		return (NPI_FAILURE | NPI_TXDMA_OPCODE_INVALID(channel));
+	}
+
+	return (status);
+
+}
+
+/*
+ * npi_txdma_desc_gather_set():
+ *	This function is called to set up a transmit descriptor entry.
+ *
+ * Parameters:
+ *	handle		- NPI handle (register pointer is the
+ *			  descriptor address in memory).
+ *	desc_p		- pointer to a descriptor
+ *	gather_index	- which entry (starts from index 0 to 15)
+ *	mark		- mark bit (only valid if it is the first gather).
+ *	ngathers	- number of gather pointers to set to the first gather.
+ *	dma_ioaddr	- starting dma address of an IO buffer to write.
+ *			  (SAD)
+ *	transfer_len	- transfer len.
+ * Return:
+ *	NPI_SUCCESS		- If set is complete successfully.
+ *
+ *	Error:
+ *	NPI_FAILURE		-
+ *		NPI_TXDMA_OPCODE_INVALID	-
+ *		NPI_TXDMA_CHANNEL_INVALID	-
+ *		NPI_TXDMA_XFER_LEN_INVALID	-
+ */
+npi_status_t
+npi_txdma_desc_gather_set(npi_handle_t handle,
+		p_tx_desc_t desc_p, uint8_t gather_index,
+		boolean_t mark, uint8_t ngathers,
+		uint64_t dma_ioaddr, uint32_t transfer_len)
+{
+	int		status;
+
+	status = NPI_TXDMA_GATHER_INDEX(gather_index);
+	if (status) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				    " npi_txdma_desc_gather_set"
+				    " Invalid Input: gather_index <0x%x>",
+				    gather_index));
+		return (status);
+	}
+
+	if (transfer_len > TX_MAX_TRANSFER_LENGTH) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				    " npi_txdma_desc_gather_set"
+				    " Invalid Input: tr_len <0x%x>",
+				    transfer_len));
+		return (NPI_FAILURE | NPI_TXDMA_XFER_LEN_INVALID);
+	}
+
+	if (gather_index == 0) {
+		desc_p->bits.hdw.sop = 1;
+		desc_p->bits.hdw.mark = mark;
+		desc_p->bits.hdw.num_ptr = ngathers;
+		NPI_DEBUG_MSG((handle.function, NPI_TDC_CTL,
+			"npi_txdma_gather_set: SOP len %d (%d)",
+			desc_p->bits.hdw.tr_len, transfer_len));
+	}
+
+	desc_p->bits.hdw.tr_len = transfer_len;
+	desc_p->bits.hdw.sad = dma_ioaddr >> 32;
+	desc_p->bits.ldw.sad = dma_ioaddr & 0xffffffff;
+
+	NPI_DEBUG_MSG((handle.function, NPI_TDC_CTL,
+		"npi_txdma_gather_set: xfer len %d to set (%d)",
+		desc_p->bits.hdw.tr_len, transfer_len));
+
+	NXGE_MEM_PIO_WRITE64(handle, desc_p->value);
+
+	return (status);
+}
+
+/*
+ * npi_txdma_desc_sop_set():
+ *	This function is called to set up the first gather entry.
+ *
+ * Parameters:
+ *	handle		- NPI handle (register pointer is the
+ *			  descriptor address in memory).
+ *	desc_p		- pointer to a descriptor
+ *	mark		- mark bit (only valid if it is the first gather).
+ *	ngathers	- number of gather pointers to set to the first gather.
+ * Return:
+ *	NPI_SUCCESS		- If set is complete successfully.
+ *
+ *	Error:
+ */
+npi_status_t
+npi_txdma_desc_gather_sop_set(npi_handle_t handle,
+		p_tx_desc_t desc_p,
+		boolean_t mark_mode,
+		uint8_t ngathers)
+{
+	int		status = NPI_SUCCESS;
+
+	desc_p->bits.hdw.sop = 1;
+	desc_p->bits.hdw.mark = mark_mode;
+	desc_p->bits.hdw.num_ptr = ngathers;
+
+	NXGE_MEM_PIO_WRITE64(handle, desc_p->value);
+
+	return (status);
+}
+npi_status_t
+npi_txdma_desc_gather_sop_set_1(npi_handle_t handle,
+		p_tx_desc_t desc_p,
+		boolean_t mark_mode,
+		uint8_t ngathers,
+		uint32_t extra)
+{
+	int		status = NPI_SUCCESS;
+
+	desc_p->bits.hdw.sop = 1;
+	desc_p->bits.hdw.mark = mark_mode;
+	desc_p->bits.hdw.num_ptr = ngathers;
+	desc_p->bits.hdw.tr_len += extra;
+
+	NXGE_MEM_PIO_WRITE64(handle, desc_p->value);
+
+	return (status);
+}
+
+npi_status_t
+npi_txdma_desc_set_xfer_len(npi_handle_t handle,
+		p_tx_desc_t desc_p,
+		uint32_t transfer_len)
+{
+	int		status = NPI_SUCCESS;
+
+	desc_p->bits.hdw.tr_len = transfer_len;
+
+	NPI_DEBUG_MSG((handle.function, NPI_TDC_CTL,
+		"npi_set_xfer_len: len %d (%d)",
+		desc_p->bits.hdw.tr_len, transfer_len));
+
+	NXGE_MEM_PIO_WRITE64(handle, desc_p->value);
+
+	return (status);
+}
+
+npi_status_t
+npi_txdma_desc_set_zero(npi_handle_t handle, uint16_t entries)
+{
+	uint32_t	offset;
+	int		i;
+
+	/*
+	 * Assume no wrapped around.
+	 */
+	offset = 0;
+	for (i = 0; i < entries; i++) {
+		NXGE_REG_WR64(handle, offset, 0);
+		offset += (i * TXDMA_DESC_SIZE);
+	}
+
+	return (NPI_SUCCESS);
+}
+
+npi_status_t
+npi_txdma_desc_mem_get(npi_handle_t handle, uint16_t index,
+		p_tx_desc_t desc_p)
+{
+	int		status = NPI_SUCCESS;
+
+	npi_txdma_dump_desc_one(handle, desc_p, index);
+
+	return (status);
+
+}
+
+/*
+ * npi_txdma_desc_kick_reg_set():
+ *	This function is called to kick the transmit  to start transmission.
+ *
+ * Parameters:
+ *	handle		- NPI handle (virtualization flag must be defined).
+ *	channel		- logical TXDMA channel from 0 to 23.
+ *			  (If virtualization flag is not set, then
+ *			   logical channel is the same as the hardware
+ *			   channel number).
+ *	tail_index	- index into the transmit descriptor
+ *	wrap		- toggle bit to indicate if the tail index is
+ *			  wrapped around.
+ *
+ * Return:
+ *	NPI_SUCCESS		- If set is complete successfully.
+ *
+ *	Error:
+ *	NPI_FAILURE		-
+ *		NPI_TXDMA_CHANNEL_INVALID	-
+ */
+npi_status_t
+npi_txdma_desc_kick_reg_set(npi_handle_t handle, uint8_t channel,
+		uint16_t tail_index, boolean_t wrap)
+{
+	int			status = NPI_SUCCESS;
+	tx_ring_kick_t		kick;
+
+	ASSERT(TXDMA_CHANNEL_VALID(channel));
+	if (!TXDMA_CHANNEL_VALID(channel)) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				    " npi_txdma_desc_kick_reg_set"
+				    " Invalid Input: channel <0x%x>",
+				    channel));
+		return (NPI_FAILURE | NPI_TXDMA_CHANNEL_INVALID(channel));
+	}
+
+	NPI_DEBUG_MSG((handle.function, NPI_TDC_CTL,
+		" npi_txdma_desc_kick_reg_set: "
+		" KICKING channel %d",
+		channel));
+
+	/* Toggle the wrap around bit */
+	kick.value = 0;
+	kick.bits.ldw.wrap = wrap;
+	kick.bits.ldw.tail = tail_index;
+
+	/* Kick start the Transmit kick register */
+	TXDMA_REG_WRITE64(handle, TX_RING_KICK_REG, channel, kick.value);
+
+	return (status);
+}
+
+/*
+ * npi_txdma_desc_kick_reg_get():
+ *	This function is called to kick the transmit  to start transmission.
+ *
+ * Parameters:
+ *	handle		- NPI handle (virtualization flag must be defined).
+ *	channel		- logical TXDMA channel from 0 to 23.
+ *			  (If virtualization flag is not set, then
+ *			   logical channel is the same as the hardware
+ *			   channel number).
+ *	tail_index	- index into the transmit descriptor
+ *	wrap		- toggle bit to indicate if the tail index is
+ *			  wrapped around.
+ *
+ * Return:
+ *	NPI_SUCCESS		- If get is complete successfully.
+ *
+ *	Error:
+ *	NPI_FAILURE		-
+ *		NPI_TXDMA_CHANNEL_INVALID	-
+ */
+npi_status_t
+npi_txdma_desc_kick_reg_get(npi_handle_t handle, uint8_t channel,
+		p_tx_ring_kick_t kick_p)
+{
+	int		status = NPI_SUCCESS;
+
+	ASSERT(TXDMA_CHANNEL_VALID(channel));
+	if (!TXDMA_CHANNEL_VALID(channel)) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				    " npi_txdma_desc_kick_reg_get"
+				    " Invalid Input: channel <0x%x>",
+				    channel));
+		return (NPI_FAILURE | NPI_TXDMA_CHANNEL_INVALID(channel));
+	}
+
+	TXDMA_REG_READ64(handle, TX_RING_KICK_REG, channel, &kick_p->value);
+
+	return (status);
+}
+
+/*
+ * npi_txdma_ring_head_get():
+ *	This function is called to get the transmit ring head index.
+ *
+ * Parameters:
+ *	handle		- NPI handle (virtualization flag must be defined).
+ *	channel		- logical TXDMA channel from 0 to 23.
+ *			  (If virtualization flag is not set, then
+ *			   logical channel is the same as the hardware
+ *			   channel number).
+ *	hdl_p		- pointer to the hardware defined transmit
+ *			  ring header data (head index and wrap bit).
+ *
+ * Return:
+ *	NPI_SUCCESS		- If get is complete successfully.
+ *
+ *	Error:
+ *	NPI_FAILURE		-
+ *		NPI_TXDMA_CHANNEL_INVALID	-
+ */
+npi_status_t
+npi_txdma_ring_head_get(npi_handle_t handle, uint8_t channel,
+		p_tx_ring_hdl_t hdl_p)
+{
+	int		status = NPI_SUCCESS;
+
+	ASSERT(TXDMA_CHANNEL_VALID(channel));
+	if (!TXDMA_CHANNEL_VALID(channel)) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				    " npi_txdma_ring_head_get"
+				    " Invalid Input: channel <0x%x>",
+				    channel));
+		return (NPI_FAILURE | NPI_TXDMA_CHANNEL_INVALID(channel));
+	}
+
+	TXDMA_REG_READ64(handle, TX_RING_HDL_REG, channel, &hdl_p->value);
+
+	return (status);
+}
+
+/*ARGSUSED*/
+npi_status_t
+npi_txdma_channel_mbox_get(npi_handle_t handle, uint8_t channel,
+		p_txdma_mailbox_t mbox_p)
+{
+	int		status = NPI_SUCCESS;
+
+	return (status);
+
+}
+
+npi_status_t
+npi_txdma_channel_pre_state_get(npi_handle_t handle, uint8_t channel,
+		p_tx_dma_pre_st_t prep)
+{
+	int		status = NPI_SUCCESS;
+
+	ASSERT(TXDMA_CHANNEL_VALID(channel));
+	if (!TXDMA_CHANNEL_VALID(channel)) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				    " npi_txdma_channel_pre_state_get"
+				    " Invalid Input: channel <0x%x>",
+				    channel));
+		return (NPI_FAILURE | NPI_TXDMA_CHANNEL_INVALID(channel));
+	}
+
+	TXDMA_REG_READ64(handle, TX_DMA_PRE_ST_REG, channel, &prep->value);
+
+	return (status);
+}
+
+npi_status_t
+npi_txdma_ring_error_get(npi_handle_t handle, uint8_t channel,
+		p_txdma_ring_errlog_t ring_errlog_p)
+{
+	tx_rng_err_logh_t	logh;
+	tx_rng_err_logl_t	logl;
+	int			status = NPI_SUCCESS;
+
+	ASSERT(TXDMA_CHANNEL_VALID(channel));
+	if (!TXDMA_CHANNEL_VALID(channel)) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				    " npi_txdma_ring_error_get"
+				    " Invalid Input: channel <0x%x>",
+				    channel));
+		return (NPI_FAILURE | NPI_TXDMA_CHANNEL_INVALID(channel));
+	}
+
+	logh.value = 0;
+	TXDMA_REG_READ64(handle, TX_RNG_ERR_LOGH_REG, channel, &logh.value);
+	TXDMA_REG_READ64(handle, TX_RNG_ERR_LOGL_REG, channel, &logl.value);
+	ring_errlog_p->logh.bits.ldw.err = logh.bits.ldw.err;
+	ring_errlog_p->logh.bits.ldw.merr = logh.bits.ldw.merr;
+	ring_errlog_p->logh.bits.ldw.errcode = logh.bits.ldw.errcode;
+	ring_errlog_p->logh.bits.ldw.err_addr = logh.bits.ldw.err_addr;
+	ring_errlog_p->logl.bits.ldw.err_addr = logl.bits.ldw.err_addr;
+
+	return (status);
+}
+
+npi_status_t
+npi_txdma_inj_par_error_clear(npi_handle_t handle)
+{
+	NXGE_REG_WR64(handle, TDMC_INJ_PAR_ERR_REG, 0);
+
+	return (NPI_SUCCESS);
+}
+
+npi_status_t
+npi_txdma_inj_par_error_set(npi_handle_t handle, uint32_t err_bits)
+{
+	tdmc_inj_par_err_t	inj;
+
+	inj.value = 0;
+	inj.bits.ldw.inject_parity_error = (err_bits & TDMC_INJ_PAR_ERR_MASK);
+	NXGE_REG_WR64(handle, TDMC_INJ_PAR_ERR_REG, inj.value);
+
+	return (NPI_SUCCESS);
+}
+
+npi_status_t
+npi_txdma_inj_par_error_update(npi_handle_t handle, uint32_t err_bits)
+{
+	tdmc_inj_par_err_t	inj;
+
+	inj.value = 0;
+	NXGE_REG_RD64(handle, TDMC_INJ_PAR_ERR_REG, &inj.value);
+	inj.value |= (err_bits & TDMC_INJ_PAR_ERR_MASK);
+	NXGE_REG_WR64(handle, TDMC_INJ_PAR_ERR_REG, inj.value);
+
+	return (NPI_SUCCESS);
+}
+
+npi_status_t
+npi_txdma_inj_par_error_get(npi_handle_t handle, uint32_t *err_bits)
+{
+	tdmc_inj_par_err_t	inj;
+
+	inj.value = 0;
+	NXGE_REG_RD64(handle, TDMC_INJ_PAR_ERR_REG, &inj.value);
+	*err_bits = (inj.value & TDMC_INJ_PAR_ERR_MASK);
+
+	return (NPI_SUCCESS);
+}
+
+npi_status_t
+npi_txdma_dbg_sel_set(npi_handle_t handle, uint8_t dbg_sel)
+{
+	tdmc_dbg_sel_t		dbg;
+
+	dbg.value = 0;
+	dbg.bits.ldw.dbg_sel = (dbg_sel & TDMC_DBG_SEL_MASK);
+
+	NXGE_REG_WR64(handle, TDMC_DBG_SEL_REG, dbg.value);
+
+	return (NPI_SUCCESS);
+}
+
+npi_status_t
+npi_txdma_training_vector_set(npi_handle_t handle, uint32_t training_vector)
+{
+	tdmc_training_t		vec;
+
+	vec.value = 0;
+	vec.bits.ldw.vec = training_vector;
+
+	NXGE_REG_WR64(handle, TDMC_TRAINING_REG, vec.value);
+
+	return (NPI_SUCCESS);
+}
+
+/*
+ * npi_txdma_dump_desc_one(npi_handle_t handle, p_tx_desc_t desc_p,
+ *	int desc_index)
+ *
+ *	Dumps the contents of transmit descriptors.
+ *
+ * Parameters:
+ *	handle		- NPI handle (register pointer is the
+ *			  descriptor address in memory).
+ *	desc_p		- pointer to place the descriptor contents
+ *	desc_index	- descriptor index
+ *
+ */
+/*ARGSUSED*/
+void
+npi_txdma_dump_desc_one(npi_handle_t handle, p_tx_desc_t desc_p, int desc_index)
+{
+
+	tx_desc_t 		desc, *desp;
+#ifdef NXGE_DEBUG
+	uint64_t		sad;
+	int			xfer_len;
+#endif
+
+	NPI_DEBUG_MSG((handle.function, NPI_TDC_CTL,
+		"\n==> npi_txdma_dump_desc_one: dump "
+		" desc_p $%p descriptor entry %d\n",
+		desc_p, desc_index));
+	desc.value = 0;
+	desp = ((desc_p != NULL) ? desc_p : (p_tx_desc_t)&desc);
+	desp->value = NXGE_MEM_PIO_READ64(handle);
+#ifdef NXGE_DEBUG
+	sad = (desp->value & TX_PKT_DESC_SAD_MASK);
+	xfer_len = ((desp->value & TX_PKT_DESC_TR_LEN_MASK) >>
+			TX_PKT_DESC_TR_LEN_SHIFT);
+#endif
+	NPI_DEBUG_MSG((handle.function, NPI_TDC_CTL, "\n\t: value 0x%llx\n"
+		"\t\tsad $%p\ttr_len %d len %d\tnptrs %d\tmark %d sop %d\n",
+		desp->value,
+		sad,
+		desp->bits.hdw.tr_len,
+		xfer_len,
+		desp->bits.hdw.num_ptr,
+		desp->bits.hdw.mark,
+		desp->bits.hdw.sop));
+
+	NPI_DEBUG_MSG((handle.function, NPI_TDC_CTL,
+			    "\n<== npi_txdma_dump_desc_one: Done \n"));
+
+}
+
+/*ARGSUSED*/
+void
+npi_txdma_dump_hdr(npi_handle_t handle, p_tx_pkt_header_t hdrp)
+{
+	NPI_DEBUG_MSG((handle.function, NPI_TDC_CTL,
+				    "\n==> npi_txdma_dump_hdr: dump\n"));
+	NPI_DEBUG_MSG((handle.function, NPI_TDC_CTL,
+				    "\n\t: value 0x%llx\n"
+		"\t\tpkttype 0x%x\tip_ver %d\tllc %d\tvlan %d \tihl %d\n"
+		"\t\tl3start %d\tl4start %d\tl4stuff %d\n"
+		"\t\txferlen %d\tpad %d\n",
+		hdrp->value,
+		hdrp->bits.hdw.cksum_en_pkt_type,
+		hdrp->bits.hdw.ip_ver,
+		hdrp->bits.hdw.llc,
+		hdrp->bits.hdw.vlan,
+		hdrp->bits.hdw.ihl,
+		hdrp->bits.hdw.l3start,
+		hdrp->bits.hdw.l4start,
+		hdrp->bits.hdw.l4stuff,
+		hdrp->bits.ldw.tot_xfer_len,
+		hdrp->bits.ldw.pad));
+
+	NPI_DEBUG_MSG((handle.function, NPI_TDC_CTL,
+			    "\n<== npi_txdma_dump_hdr: Done \n"));
+}
+
+npi_status_t
+npi_txdma_inj_int_error_set(npi_handle_t handle, uint8_t channel,
+	p_tdmc_intr_dbg_t erp)
+{
+	int		status = NPI_SUCCESS;
+
+	ASSERT(TXDMA_CHANNEL_VALID(channel));
+	if (!TXDMA_CHANNEL_VALID(channel)) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+			" npi_txdma_inj_int_error_set"
+			" Invalid Input: channel <0x%x>",
+					    channel));
+		return (NPI_FAILURE | NPI_TXDMA_CHANNEL_INVALID(channel));
+	}
+
+	TXDMA_REG_WRITE64(handle, TDMC_INTR_DBG_REG, channel, erp->value);
+
+	return (status);
+}
+
+/*
+ * Static functions start here.
+ */
+static npi_status_t
+npi_txdma_control_reset_wait(npi_handle_t handle, uint8_t channel)
+{
+
+	tx_cs_t		txcs;
+	int		loop = 0;
+
+	do {
+		NXGE_DELAY(TXDMA_WAIT_MSEC);
+		TXDMA_REG_READ64(handle, TX_CS_REG, channel, &txcs.value);
+		if (!txcs.bits.ldw.rst) {
+			return (NPI_SUCCESS);
+		}
+		loop++;
+	} while (loop < TXDMA_WAIT_LOOP);
+
+	if (loop == TXDMA_WAIT_LOOP) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+			    "npi_txdma_control_reset_wait: RST bit not "
+			    "cleared to 0 txcs.bits 0x%llx", txcs.value));
+		return (NPI_FAILURE | NPI_TXDMA_RESET_FAILED);
+	}
+	return (NPI_SUCCESS);
+}
+
+static npi_status_t
+npi_txdma_control_stop_wait(npi_handle_t handle, uint8_t channel)
+{
+	tx_cs_t		txcs;
+	int		loop = 0;
+
+	do {
+		NXGE_DELAY(TXDMA_WAIT_MSEC);
+		TXDMA_REG_READ64(handle, TX_CS_REG, channel, &txcs.value);
+		if (txcs.bits.ldw.sng_state) {
+			return (NPI_SUCCESS);
+		}
+		loop++;
+	} while (loop < TXDMA_WAIT_LOOP);
+
+	if (loop == TXDMA_WAIT_LOOP) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+			    "npi_txdma_control_stop_wait: SNG_STATE not "
+			    "set to 1 txcs.bits 0x%llx", txcs.value));
+		return (NPI_FAILURE | NPI_TXDMA_STOP_FAILED);
+	}
+
+	return (NPI_SUCCESS);
+}
+
+static npi_status_t
+npi_txdma_control_resume_wait(npi_handle_t handle, uint8_t channel)
+{
+	tx_cs_t		txcs;
+	int		loop = 0;
+
+	do {
+		NXGE_DELAY(TXDMA_WAIT_MSEC);
+		TXDMA_REG_READ64(handle, TX_CS_REG, channel, &txcs.value);
+		if (!txcs.bits.ldw.sng_state) {
+			return (NPI_SUCCESS);
+		}
+		loop++;
+	} while (loop < TXDMA_WAIT_LOOP);
+
+	if (loop == TXDMA_WAIT_LOOP) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+			    "npi_txdma_control_resume_wait: sng_state not "
+			    "set to 0 txcs.bits 0x%llx", txcs.value));
+		return (NPI_FAILURE | NPI_TXDMA_RESUME_FAILED);
+	}
+
+	return (NPI_SUCCESS);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/uts/common/io/nxge/npi/npi_txdma.h	Mon Mar 19 19:37:22 2007 -0700
@@ -0,0 +1,290 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _NPI_TXDMA_H
+#define	_NPI_TXDMA_H
+
+#pragma ident	"%Z%%M%	%I%	%E% SMI"
+
+#ifdef	__cplusplus
+extern "C" {
+#endif
+
+#include <npi.h>
+#include <nxge_txdma_hw.h>
+
+#define	DMA_LOG_PAGE_FN_VALIDATE(cn, pn, fn, status)	\
+{									\
+	status = NPI_SUCCESS;						\
+	if (!TXDMA_CHANNEL_VALID(channel)) {				\
+		status = (NPI_FAILURE | NPI_TXDMA_CHANNEL_INVALID(cn));	\
+	} else if (!TXDMA_PAGE_VALID(pn)) {			\
+		status =  (NPI_FAILURE | NPI_TXDMA_PAGE_INVALID(pn));	\
+	} else if (!TXDMA_FUNC_VALID(fn)) {			\
+		status =  (NPI_FAILURE | NPI_TXDMA_FUNC_INVALID(fn));	\
+	} \
+}
+
+#define	DMA_LOG_PAGE_VALIDATE(cn, pn, status)	\
+{									\
+	status = NPI_SUCCESS;						\
+	if (!TXDMA_CHANNEL_VALID(channel)) {				\
+		status = (NPI_FAILURE | NPI_TXDMA_CHANNEL_INVALID(cn));	\
+	} else if (!TXDMA_PAGE_VALID(pn)) {			\
+		status =  (NPI_FAILURE | NPI_TXDMA_PAGE_INVALID(pn));	\
+	} \
+}
+
+typedef	enum _txdma_cs_cntl_e {
+	TXDMA_INIT_RESET	= 0x1,
+	TXDMA_INIT_START	= 0x2,
+	TXDMA_START		= 0x3,
+	TXDMA_RESET		= 0x4,
+	TXDMA_STOP		= 0x5,
+	TXDMA_RESUME		= 0x6,
+	TXDMA_CLEAR_MMK		= 0x7,
+	TXDMA_MBOX_ENABLE	= 0x8
+} txdma_cs_cntl_t;
+
+typedef	enum _txdma_log_cfg_e {
+	TXDMA_LOG_PAGE_MASK	= 0x01,
+	TXDMA_LOG_PAGE_VALUE	= 0x02,
+	TXDMA_LOG_PAGE_RELOC	= 0x04,
+	TXDMA_LOG_PAGE_VALID	= 0x08,
+	TXDMA_LOG_PAGE_ALL	= (TXDMA_LOG_PAGE_MASK | TXDMA_LOG_PAGE_VALUE |
+				TXDMA_LOG_PAGE_RELOC | TXDMA_LOG_PAGE_VALID)
+} txdma_log_cfg_t;
+
+typedef	enum _txdma_ent_msk_cfg_e {
+	CFG_TXDMA_PKT_PRT_MASK		= TX_ENT_MSK_PKT_PRT_ERR_MASK,
+	CFG_TXDMA_CONF_PART_MASK	= TX_ENT_MSK_CONF_PART_ERR_MASK,
+	CFG_TXDMA_NACK_PKT_RD_MASK	= TX_ENT_MSK_NACK_PKT_RD_MASK,
+	CFG_TXDMA_NACK_PREF_MASK	= TX_ENT_MSK_NACK_PREF_MASK,
+	CFG_TXDMA_PREF_BUF_ECC_ERR_MASK	= TX_ENT_MSK_PREF_BUF_ECC_ERR_MASK,
+	CFG_TXDMA_TX_RING_OFLOW_MASK	= TX_ENT_MSK_TX_RING_OFLOW_MASK,
+	CFG_TXDMA_PKT_SIZE_ERR_MASK	= TX_ENT_MSK_PKT_SIZE_ERR_MASK,
+	CFG_TXDMA_MBOX_ERR_MASK		= TX_ENT_MSK_MBOX_ERR_MASK,
+	CFG_TXDMA_MK_MASK		= TX_ENT_MSK_MK_MASK,
+	CFG_TXDMA_MASK_ALL		= (TX_ENT_MSK_PKT_PRT_ERR_MASK |
+					TX_ENT_MSK_CONF_PART_ERR_MASK |
+					TX_ENT_MSK_NACK_PKT_RD_MASK |
+					TX_ENT_MSK_NACK_PREF_MASK |
+					TX_ENT_MSK_PREF_BUF_ECC_ERR_MASK |
+					TX_ENT_MSK_TX_RING_OFLOW_MASK |
+					TX_ENT_MSK_PKT_SIZE_ERR_MASK |
+					TX_ENT_MSK_MBOX_ERR_MASK |
+					TX_ENT_MSK_MK_MASK)
+} txdma_ent_msk_cfg_t;
+
+
+typedef	struct _txdma_ring_errlog {
+	tx_rng_err_logl_t	logl;
+	tx_rng_err_logh_t	logh;
+} txdma_ring_errlog_t, *p_txdma_ring_errlog_t;
+
+/*
+ * Register offset (0x200 bytes for each channel) for logical pages registers.
+ */
+#define	NXGE_TXLOG_OFFSET(x, channel) (x + TX_LOG_DMA_OFFSET(channel))
+
+/*
+ * Register offset (0x200 bytes for each channel) for transmit ring registers.
+ * (Ring configuration, kick register, event mask, control and status,
+ *  mailbox, prefetch, ring errors).
+ */
+#define	NXGE_TXDMA_OFFSET(x, v, channel) (x + \
+		(!v ? DMC_OFFSET(channel) : TDMC_PIOVADDR_OFFSET(channel)))
+/*
+ * Register offset (0x8 bytes for each port) for transmit mapping registers.
+ */
+#define	NXGE_TXDMA_MAP_OFFSET(x, port) (x + TX_DMA_MAP_PORT_OFFSET(port))
+
+/*
+ * Register offset (0x10 bytes for each channel) for transmit DRR and ring
+ * usage registers.
+ */
+#define	NXGE_TXDMA_DRR_OFFSET(x, channel) (x + \
+			TXDMA_DRR_RNG_USE_OFFSET(channel))
+
+/*
+ * PIO macros to read and write the transmit registers.
+ */
+#define	TX_LOG_REG_READ64(handle, reg, channel, val_p)	\
+	NXGE_REG_RD64(handle, NXGE_TXLOG_OFFSET(reg, channel), val_p)
+
+#define	TX_LOG_REG_WRITE64(handle, reg, channel, data)	\
+	NXGE_REG_WR64(handle, NXGE_TXLOG_OFFSET(reg, channel), data)
+
+#define	TXDMA_REG_READ64(handle, reg, channel, val_p)	\
+		NXGE_REG_RD64(handle, \
+		(NXGE_TXDMA_OFFSET(reg, handle.is_vraddr, channel)), val_p)
+
+#define	TXDMA_REG_WRITE64(handle, reg, channel, data)	\
+		NXGE_REG_WR64(handle, \
+		NXGE_TXDMA_OFFSET(reg, handle.is_vraddr, channel), data)
+
+#define	TX_DRR_RNGUSE_REG_READ64(handle, reg, channel, val_p)	\
+	NXGE_REG_RD64(handle, (NXGE_TXDMA_DRR_OFFSET(reg, channel)), val_p)
+
+#define	TX_DRR_RNGUSE_REG_WRITE64(handle, reg, channel, data)	\
+	NXGE_REG_WR64(handle, NXGE_TXDMA_DRR_OFFSET(reg, channel), data)
+
+/*
+ * Transmit Descriptor Definitions.
+ */
+#define	TXDMA_DESC_SIZE			(sizeof (tx_desc_t))
+
+#define	NPI_TXDMA_GATHER_INDEX(index)	\
+	((index <= TX_MAX_GATHER_POINTERS)) ? NPI_SUCCESS : \
+				(NPI_TXDMA_GATHER_INVALID)
+
+/*
+ * Transmit NPI error codes
+ */
+#define	TXDMA_ER_ST			(TXDMA_BLK_ID << NPI_BLOCK_ID_SHIFT)
+#define	TXDMA_ID_SHIFT(n)		(n << NPI_PORT_CHAN_SHIFT)
+
+#define	TXDMA_HW_STOP_FAILED		(NPI_BK_HW_ER_START | 0x1)
+#define	TXDMA_HW_RESUME_FAILED		(NPI_BK_HW_ER_START | 0x2)
+
+#define	TXDMA_GATHER_INVALID		(NPI_BK_ERROR_START | 0x1)
+#define	TXDMA_XFER_LEN_INVALID		(NPI_BK_ERROR_START | 0x2)
+
+#define	NPI_TXDMA_OPCODE_INVALID(n)	(TXDMA_ID_SHIFT(n) |	\
+					TXDMA_ER_ST | OPCODE_INVALID)
+
+#define	NPI_TXDMA_FUNC_INVALID(n)	(TXDMA_ID_SHIFT(n) |	\
+					TXDMA_ER_ST | PORT_INVALID)
+#define	NPI_TXDMA_CHANNEL_INVALID(n)	(TXDMA_ID_SHIFT(n) |	\
+					TXDMA_ER_ST | CHANNEL_INVALID)
+
+#define	NPI_TXDMA_PAGE_INVALID(n)	(TXDMA_ID_SHIFT(n) |	\
+					TXDMA_ER_ST | LOGICAL_PAGE_INVALID)
+
+#define	NPI_TXDMA_REGISTER_INVALID	(TXDMA_ER_ST | REGISTER_INVALID)
+#define	NPI_TXDMA_COUNTER_INVALID	(TXDMA_ER_ST | COUNTER_INVALID)
+#define	NPI_TXDMA_CONFIG_INVALID	(TXDMA_ER_ST | CONFIG_INVALID)
+
+
+#define	NPI_TXDMA_GATHER_INVALID	(TXDMA_ER_ST | TXDMA_GATHER_INVALID)
+#define	NPI_TXDMA_XFER_LEN_INVALID	(TXDMA_ER_ST | TXDMA_XFER_LEN_INVALID)
+
+#define	NPI_TXDMA_RESET_FAILED		(TXDMA_ER_ST | RESET_FAILED)
+#define	NPI_TXDMA_STOP_FAILED		(TXDMA_ER_ST | TXDMA_HW_STOP_FAILED)
+#define	NPI_TXDMA_RESUME_FAILED		(TXDMA_ER_ST | TXDMA_HW_RESUME_FAILED)
+
+/*
+ * Transmit DMA Channel NPI Prototypes.
+ */
+npi_status_t npi_txdma_mode32_set(npi_handle_t, boolean_t);
+npi_status_t npi_txdma_log_page_set(npi_handle_t, uint8_t,
+		p_dma_log_page_t);
+npi_status_t npi_txdma_log_page_get(npi_handle_t, uint8_t,
+		p_dma_log_page_t);
+npi_status_t npi_txdma_log_page_handle_set(npi_handle_t, uint8_t,
+		p_log_page_hdl_t);
+npi_status_t npi_txdma_log_page_config(npi_handle_t, io_op_t,
+		txdma_log_cfg_t, uint8_t, p_dma_log_page_t);
+npi_status_t npi_txdma_log_page_vld_config(npi_handle_t, io_op_t,
+		uint8_t, p_log_page_vld_t);
+npi_status_t npi_txdma_drr_weight_set(npi_handle_t, uint8_t,
+		uint32_t);
+npi_status_t npi_txdma_channel_reset(npi_handle_t, uint8_t);
+npi_status_t npi_txdma_channel_init_enable(npi_handle_t,
+		uint8_t);
+npi_status_t npi_txdma_channel_enable(npi_handle_t, uint8_t);
+npi_status_t npi_txdma_channel_disable(npi_handle_t, uint8_t);
+npi_status_t npi_txdma_channel_resume(npi_handle_t, uint8_t);
+npi_status_t npi_txdma_channel_mmk_clear(npi_handle_t, uint8_t);
+npi_status_t npi_txdma_channel_mbox_enable(npi_handle_t, uint8_t);
+npi_status_t npi_txdma_channel_control(npi_handle_t,
+		txdma_cs_cntl_t, uint8_t);
+npi_status_t npi_txdma_control_status(npi_handle_t, io_op_t,
+		uint8_t, p_tx_cs_t);
+
+npi_status_t npi_txdma_event_mask(npi_handle_t, io_op_t,
+		uint8_t, p_tx_dma_ent_msk_t);
+npi_status_t npi_txdma_event_mask_config(npi_handle_t, io_op_t,
+		uint8_t, txdma_ent_msk_cfg_t *);
+npi_status_t npi_txdma_event_mask_mk_out(npi_handle_t, uint8_t);
+npi_status_t npi_txdma_event_mask_mk_in(npi_handle_t, uint8_t);
+
+npi_status_t npi_txdma_ring_addr_set(npi_handle_t, uint8_t,
+		uint64_t, uint32_t);
+npi_status_t npi_txdma_ring_config(npi_handle_t, io_op_t,
+		uint8_t, uint64_t *);
+npi_status_t npi_txdma_mbox_config(npi_handle_t, io_op_t,
+		uint8_t, uint64_t *);
+npi_status_t npi_txdma_desc_gather_set(npi_handle_t,
+		p_tx_desc_t, uint8_t,
+		boolean_t, uint8_t,
+		uint64_t, uint32_t);
+
+npi_status_t npi_txdma_desc_gather_sop_set(npi_handle_t,
+		p_tx_desc_t, boolean_t, uint8_t);
+
+npi_status_t npi_txdma_desc_gather_sop_set_1(npi_handle_t,
+		p_tx_desc_t, boolean_t, uint8_t,
+		uint32_t);
+
+npi_status_t npi_txdma_desc_set_xfer_len(npi_handle_t,
+		p_tx_desc_t, uint32_t);
+
+npi_status_t npi_txdma_desc_set_zero(npi_handle_t, uint16_t);
+npi_status_t npi_txdma_desc_mem_get(npi_handle_t, uint16_t,
+		p_tx_desc_t);
+npi_status_t npi_txdma_desc_kick_reg_set(npi_handle_t, uint8_t,
+		uint16_t, boolean_t);
+npi_status_t npi_txdma_desc_kick_reg_get(npi_handle_t, uint8_t,
+		p_tx_ring_kick_t);
+npi_status_t npi_txdma_ring_head_get(npi_handle_t, uint8_t,
+		p_tx_ring_hdl_t);
+npi_status_t npi_txdma_channel_mbox_get(npi_handle_t, uint8_t,
+		p_txdma_mailbox_t);
+npi_status_t npi_txdma_channel_pre_state_get(npi_handle_t,
+		uint8_t, p_tx_dma_pre_st_t);
+npi_status_t npi_txdma_ring_error_get(npi_handle_t,
+		uint8_t, p_txdma_ring_errlog_t);
+npi_status_t npi_txdma_inj_par_error_clear(npi_handle_t);
+npi_status_t npi_txdma_inj_par_error_set(npi_handle_t,
+		uint32_t);
+npi_status_t npi_txdma_inj_par_error_update(npi_handle_t,
+		uint32_t);
+npi_status_t npi_txdma_inj_par_error_get(npi_handle_t,
+		uint32_t *);
+npi_status_t npi_txdma_dbg_sel_set(npi_handle_t, uint8_t);
+npi_status_t npi_txdma_training_vector_set(npi_handle_t,
+		uint32_t);
+void npi_txdma_dump_desc_one(npi_handle_t, p_tx_desc_t,
+	int);
+npi_status_t npi_txdma_dump_tdc_regs(npi_handle_t, uint8_t);
+npi_status_t npi_txdma_dump_fzc_regs(npi_handle_t);
+npi_status_t npi_txdma_inj_int_error_set(npi_handle_t, uint8_t,
+	p_tdmc_intr_dbg_t);
+#ifdef	__cplusplus
+}
+#endif
+
+#endif	/* _NPI_TXDMA_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/uts/common/io/nxge/npi/npi_vir.c	Mon Mar 19 19:37:22 2007 -0700
@@ -0,0 +1,1538 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident	"%Z%%M%	%I%	%E% SMI"
+
+#include <npi_vir.h>
+
+/* One register only */
+uint64_t pio_offset[] = {
+	DEV_FUNC_SR_REG
+};
+
+const char *pio_name[] = {
+	"DEV_FUNC_SR_REG",
+};
+
+/* One register only */
+uint64_t fzc_pio_offset[] = {
+	MULTI_PART_CTL_REG,
+	LDGITMRES_REG
+};
+
+const char *fzc_pio_name[] = {
+	"MULTI_PART_CTL_REG",
+	"LDGITMRES_REG"
+};
+
+/* 64 sets */
+uint64_t fzc_pio_dma_bind_offset[] = {
+	DMA_BIND_REG
+};
+
+const char *fzc_pio_dma_bind_name[] = {
+	"DMA_BIND_REG",
+};
+
+/* 69 logical devices */
+uint64_t fzc_pio_ldgnum_offset[] = {
+	LDG_NUM_REG
+};
+
+const char *fzc_pio_ldgnum_name[] = {
+	"LDG_NUM_REG",
+};
+
+/* PIO_LDSV, 64 sets by 8192 bytes */
+uint64_t pio_ldsv_offset[] = {
+	LDSV0_REG,
+	LDSV1_REG,
+	LDSV2_REG,
+	LDGIMGN_REG
+};
+const char *pio_ldsv_name[] = {
+	"LDSV0_REG",
+	"LDSV1_REG",
+	"LDSV2_REG",
+	"LDGIMGN_REG"
+};
+
+/* PIO_IMASK0: 64 by 8192 */
+uint64_t pio_imask0_offset[] = {
+	LD_IM0_REG,
+};
+
+const char *pio_imask0_name[] = {
+	"LD_IM0_REG",
+};
+
+/* PIO_IMASK1: 5 by 8192 */
+uint64_t pio_imask1_offset[] = {
+	LD_IM1_REG
+};
+
+const char *pio_imask1_name[] = {
+	"LD_IM1_REG"
+};
+
+/* SID: 64 by 8 */
+uint64_t fzc_pio_sid_offset[] = {
+	SID_REG
+};
+
+const char *fzc_pio_sid_name[] = {
+	"SID_REG"
+};
+
+npi_status_t
+npi_vir_dump_pio_fzc_regs_one(npi_handle_t handle)
+{
+	uint64_t value;
+	int num_regs, i;
+
+	NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
+		"\nPIO FZC Common Register Dump\n"));
+
+	num_regs = sizeof (pio_offset) / sizeof (uint64_t);
+	for (i = 0; i < num_regs; i++) {
+		value = 0;
+		NXGE_REG_RD64(handle, pio_offset[i], &value);
+		NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL, "0x%08llx "
+			"%s\t 0x%08llx \n",
+			pio_offset[i],
+			pio_name[i], value));
+	}
+
+	num_regs = sizeof (fzc_pio_offset) / sizeof (uint64_t);
+	for (i = 0; i < num_regs; i++) {
+		NXGE_REG_RD64(handle, fzc_pio_offset[i], &value);
+		NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL, "0x%08llx "
+			"%s\t 0x%08llx \n",
+			fzc_pio_offset[i],
+			fzc_pio_name[i], value));
+	}
+
+	NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
+		"\n PIO FZC Register Dump Done \n"));
+	return (NPI_SUCCESS);
+}
+
+npi_status_t
+npi_vir_dump_ldgnum(npi_handle_t handle)
+{
+	uint64_t value = 0, offset = 0;
+	int num_regs, i, ldv;
+
+	NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
+		"\nFZC PIO LDG Number Register Dump\n"));
+
+	num_regs = sizeof (fzc_pio_ldgnum_offset) / sizeof (uint64_t);
+	for (ldv = 0; ldv < NXGE_INT_MAX_LDS; ldv++) {
+		for (i = 0; i < num_regs; i++) {
+			value = 0;
+			offset = fzc_pio_ldgnum_offset[i] + 8 * ldv;
+			NXGE_REG_RD64(handle, offset, &value);
+			NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
+				"Logical Device %d: 0x%08llx "
+				"%s\t 0x%08llx \n",
+				ldv, offset,
+				fzc_pio_ldgnum_name[i], value));
+		}
+	}
+
+	NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
+		"\n FZC PIO LDG Register Dump Done \n"));
+
+	return (NPI_SUCCESS);
+}
+
+npi_status_t
+npi_vir_dump_ldsv(npi_handle_t handle)
+{
+	uint64_t value, offset;
+	int num_regs, i, ldg;
+
+	NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
+		"\nLD Device State Vector Register Dump\n"));
+
+	num_regs = sizeof (pio_ldsv_offset) / sizeof (uint64_t);
+	for (ldg = 0; ldg < NXGE_INT_MAX_LDGS; ldg++) {
+		for (i = 0; i < num_regs; i++) {
+			value = 0;
+			offset = pio_ldsv_offset[i] + 8192 * ldg;
+			NXGE_REG_RD64(handle, offset, &value);
+			NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
+				    "LDG State: group %d: 0x%08llx "
+				    "%s\t 0x%08llx \n",
+				ldg, offset,
+				pio_ldsv_name[i], value));
+		}
+	}
+
+	NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
+		"\n FZC PIO LDG Register Dump Done \n"));
+
+	return (NPI_SUCCESS);
+}
+
+npi_status_t
+npi_vir_dump_imask0(npi_handle_t handle)
+{
+	uint64_t value, offset;
+	int num_regs, i, ldv;
+
+	NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
+		"\nLD Interrupt Mask Register Dump\n"));
+
+	num_regs = sizeof (pio_imask0_offset) / sizeof (uint64_t);
+	for (ldv = 0; ldv < 64; ldv++) {
+		for (i = 0; i < num_regs; i++) {
+			value = 0;
+			offset = pio_imask0_offset[i] + 8192 * ldv;
+			NXGE_REG_RD64(handle, offset,
+				&value);
+			NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
+				"LD Interrupt Mask %d: 0x%08llx "
+				"%s\t 0x%08llx \n",
+				ldv, offset,
+				pio_imask0_name[i], value));
+		}
+	}
+	num_regs = sizeof (pio_imask1_offset) / sizeof (uint64_t);
+	for (ldv = 64; ldv < 69; ldv++) {
+		for (i = 0; i < num_regs; i++) {
+			value = 0;
+			offset = pio_imask1_offset[i] + 8192 * (ldv - 64);
+			NXGE_REG_RD64(handle, offset,
+				&value);
+			NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
+				"LD Interrupt Mask %d: 0x%08llx "
+				"%s\t 0x%08llx \n",
+				ldv, offset,
+				pio_imask1_name[i], value));
+		}
+	}
+
+	NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
+		"\n FZC PIO Logical Device Group Register Dump Done \n"));
+
+	return (NPI_SUCCESS);
+}
+
+npi_status_t
+npi_vir_dump_sid(npi_handle_t handle)
+{
+	uint64_t value, offset;
+	int num_regs, i, ldg;
+
+	NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
+		"\nSystem Interrupt Data Register Dump\n"));
+
+	num_regs = sizeof (fzc_pio_sid_offset) / sizeof (uint64_t);
+	for (ldg = 0; ldg < NXGE_INT_MAX_LDGS; ldg++) {
+		for (i = 0; i < num_regs; i++) {
+			value = 0;
+			offset = fzc_pio_sid_offset[i] + 8 * ldg;
+			NXGE_REG_RD64(handle, offset,
+				&value);
+			NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
+				"SID for group %d: 0x%08llx "
+				"%s\t 0x%08llx \n",
+				ldg, offset,
+				fzc_pio_sid_name[i], value));
+		}
+	}
+
+	NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
+		"\n FZC PIO SID Register Dump Done \n"));
+
+	return (NPI_SUCCESS);
+}
+
+/*
+ * npi_dev_func_sr_init():
+ *	This function is called to initialize the device function
+ *	shared register (set the software implementation lock
+ *	state to FREE).
+ * Parameters:
+ *	handle		- NPI handle
+ * Return:
+ *	NPI_SUCCESS	- If initialization is complete successfully.
+ *			  (set sr bits to free).
+ *	Error:
+ *	NPI_FAILURE
+ *		VIR_TAS_BUSY
+ */
+
+npi_status_t
+npi_dev_func_sr_init(npi_handle_t handle)
+{
+	dev_func_sr_t		sr;
+	int			status = NPI_SUCCESS;
+
+	NXGE_REG_RD64(handle, DEV_FUNC_SR_REG, &sr.value);
+	if (!sr.bits.ldw.tas) {
+		/*
+		 * After read, this bit is set to 1 by hardware.
+		 * We own it if tas bit read as 0.
+		 * Set the lock state to free if it is in reset state.
+		 */
+		if (!sr.bits.ldw.sr) {
+			/* reset state */
+			sr.bits.ldw.sr |= NPI_DEV_SR_LOCK_ST_FREE;
+			NXGE_REG_WR64(handle, DEV_FUNC_SR_REG, sr.value);
+			sr.bits.ldw.tas = 0;
+			NXGE_REG_WR64(handle, DEV_FUNC_SR_REG, sr.value);
+		}
+
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+			" npi_dev_func_sr_init"
+			" sr <0x%x>",
+			sr.bits.ldw.sr));
+	} else {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				    " npi_dev_func_sr_init"
+				    " tas busy <0x%x>",
+				    sr.bits.ldw));
+		status = NPI_VIR_TAS_BUSY(sr.bits.ldw.funcid);
+	}
+
+	return (status);
+}
+
+/*
+ * npi_dev_func_sr_lock_enter():
+ *	This function is called to lock the function shared register
+ *	by setting the lock state to busy.
+ * Parameters:
+ *	handle		- NPI handle
+ * Return:
+ *	NPI_SUCCESS	- If the function id can own the lock.
+ *
+ *	Error:
+ *	NPI_FAILURE
+ *		VIR_SR_RESET
+ *		VIR_SR_BUSY
+ *		VIR_SR_INVALID
+ *		VIR_TAS_BUSY
+ */
+
+npi_status_t
+npi_dev_func_sr_lock_enter(npi_handle_t handle)
+{
+	dev_func_sr_t		sr;
+	int			status = NPI_SUCCESS;
+	uint32_t		state;
+
+	NXGE_REG_RD64(handle, DEV_FUNC_SR_REG, &sr.value);
+	if (!sr.bits.ldw.tas) {
+		/*
+		 * tas bit will be set to 1 by hardware.
+		 * reset tas bit when we unlock the sr.
+		 */
+		state = sr.bits.ldw.sr & NPI_DEV_SR_LOCK_ST_MASK;
+		switch (state) {
+		case NPI_DEV_SR_LOCK_ST_FREE:
+			/*
+			 * set it to busy and our function id.
+			 */
+			sr.bits.ldw.sr |= (NPI_DEV_SR_LOCK_ST_BUSY |
+						(sr.bits.ldw.funcid <<
+						NPI_DEV_SR_LOCK_FID_SHIFT));
+			NXGE_REG_WR64(handle, DEV_FUNC_SR_REG, sr.value);
+			break;
+
+		case NPI_DEV_SR_LOCK_ST_RESET:
+			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+					    " npi_dev_func_sr_lock_enter"
+					    " reset state <0x%x>",
+					    sr.bits.ldw.sr));
+			status = NPI_VIR_SR_RESET(sr.bits.ldw.funcid);
+			break;
+
+		case NPI_DEV_SR_LOCK_ST_BUSY:
+			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+					    " npi_dev_func_sr_lock_enter"
+					    " busy <0x%x>",
+					    sr.bits.ldw.sr));
+			status = NPI_VIR_SR_BUSY(sr.bits.ldw.funcid);
+			break;
+
+		default:
+			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+					    " npi_dev_func_sr_lock_enter",
+					    " invalid state",
+					    sr.bits.ldw.sr));
+			status = NPI_VIR_SR_INVALID(sr.bits.ldw.funcid);
+			break;
+		}
+	} else {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				    " npi_dev_func_sr_lock_enter",
+				    " tas busy", sr.bits.ldw));
+		status = NPI_VIR_TAS_BUSY(sr.bits.ldw.funcid);
+	}
+
+	return (status);
+}
+
+/*
+ * npi_dev_func_sr_lock_free():
+ *	This function is called to free the function shared register
+ *	by setting the lock state to free.
+ * Parameters:
+ *	handle		- NPI handle
+ * Return:
+ *	NPI_SUCCESS	- If the function id can free the lock.
+ *
+ *	Error:
+ *	NPI_FAILURE
+ *		VIR_SR_NOTOWNER
+ *		VIR_TAS_NOTREAD
+ */
+
+npi_status_t
+npi_dev_func_sr_lock_free(npi_handle_t handle)
+{
+	dev_func_sr_t		sr;
+	int			status = NPI_SUCCESS;
+
+	NXGE_REG_RD64(handle, DEV_FUNC_SR_REG, &sr.value);
+	if (sr.bits.ldw.tas) {
+		if (sr.bits.ldw.funcid == NPI_GET_LOCK_OWNER(sr.bits.ldw.sr)) {
+			sr.bits.ldw.sr &= NPI_DEV_SR_IMPL_ST_MASK;
+			sr.bits.ldw.sr |= NPI_DEV_SR_LOCK_ST_FREE;
+			sr.bits.ldw.tas = 0;
+			NXGE_REG_WR64(handle, DEV_FUNC_SR_REG, sr.value);
+		} else {
+			NPI_DEBUG_MSG((handle.function, NPI_VIR_CTL,
+					    " npi_dev_func_sr_lock_free"
+					    " not owner <0x%x>",
+					    sr.bits.ldw.sr));
+			status = NPI_VIR_SR_NOTOWNER(sr.bits.ldw.funcid);
+		}
+	} else {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				    " npi_dev_func_sr_lock_free",
+				    " invalid tas state <0x%x>",
+				    sr.bits.ldw.tas));
+		status = NPI_VIR_TAS_NOTREAD(sr.bits.ldw.funcid);
+	}
+
+	return (status);
+}
+
+/*
+ * npi_dev_func_sr_funcid_get():
+ *	This function is called to get the caller's function ID.
+ *	(based on address bits [25:26] on read access.
+ *	(After read, the TAS bit is always set to 1. Software needs
+ *	to write 0 to clear.) This function will write 0 to clear
+ *	the TAS bit if we own it.
+ * Parameters:
+ *	handle		- NPI handle
+ *	funcid_p	- pointer to store the function id.
+ * Return:
+ *	NPI_SUCCESS	- If get function id is complete successfully.
+ *
+ *	Error:
+ */
+
+npi_status_t
+npi_dev_func_sr_funcid_get(npi_handle_t handle, uint8_t *funcid_p)
+{
+	dev_func_sr_t		sr;
+
+	NXGE_REG_RD64(handle, DEV_FUNC_SR_REG, &sr.value);
+	*funcid_p = NXGE_VAL(DEV_FUNC_SR_FUNCID, sr.value);
+	if (!sr.bits.ldw.tas) {
+		/*
+		 * After read, this bit is set to 1 by hardware.
+		 * We own it if tas bit read as 0.
+		 */
+		sr.bits.ldw.tas = 0;
+		NXGE_REG_WR64(handle, DEV_FUNC_SR_REG, sr.value);
+	}
+
+	return (NPI_SUCCESS);
+}
+
+/*
+ * npi_dev_func_sr_sr_get():
+ *	This function is called to get the shared register value.
+ *	(After read, the TAS bit is always set to 1. Software needs
+ *	to write 0 to clear if we own it.)
+ *
+ * Parameters:
+ *	handle		- NPI handle
+ *	sr_p		- pointer to store the shared value of this register.
+ *
+ * Return:
+ *	NPI_SUCCESS		- If shared value get is complete successfully.
+ *
+ *	Error:
+ */
+npi_status_t
+npi_dev_func_sr_sr_raw_get(npi_handle_t handle, uint16_t *sr_p)
+{
+	dev_func_sr_t		sr;
+
+	NXGE_REG_RD64(handle, DEV_FUNC_SR_REG, &sr.value);
+	*sr_p = NXGE_VAL(DEV_FUNC_SR_FUNCID, sr.value);
+	if (!sr.bits.ldw.tas) {
+		/*
+		 * After read, this bit is set to 1 by hardware.
+		 * We own it if tas bit read as 0.
+		 */
+		sr.bits.ldw.tas = 0;
+		NXGE_REG_WR64(handle, DEV_FUNC_SR_REG, sr.value);
+	}
+
+	return (NPI_SUCCESS);
+}
+
+/*
+ * npi_dev_func_sr_sr_get():
+ *	This function is called to get the shared register value.
+ *	(After read, the TAS bit is always set to 1. Software needs
+ *	to write 0 to clear if we own it.)
+ *
+ * Parameters:
+ *	handle	- NPI handle
+ *	sr_p	- pointer to store the shared value of this register.
+ *		. this will get only non-lock, non-function id portion
+ *              . of the register
+ *
+ *
+ * Return:
+ *	NPI_SUCCESS		- If shared value get is complete successfully.
+ *
+ *	Error:
+ */
+
+npi_status_t
+npi_dev_func_sr_sr_get(npi_handle_t handle, uint16_t *sr_p)
+{
+	dev_func_sr_t		sr;
+	uint16_t sr_impl = 0;
+
+	NXGE_REG_RD64(handle, DEV_FUNC_SR_REG, &sr.value);
+	sr_impl = NXGE_VAL(DEV_FUNC_SR_FUNCID, sr.value);
+	*sr_p =  (sr_impl << NPI_DEV_SR_IMPL_ST_SHIFT);
+	if (!sr.bits.ldw.tas) {
+		/*
+		 * After read, this bit is set to 1 by hardware.
+		 * We own it if tas bit read as 0.
+		 */
+		sr.bits.ldw.tas = 0;
+		NXGE_REG_WR64(handle, DEV_FUNC_SR_REG, sr.value);
+	}
+
+	return (NPI_SUCCESS);
+}
+
+/*
+ * npi_dev_func_sr_sr_get_set_clear():
+ *	This function is called to set the shared register value.
+ *	(Shared register must be read first. If tas bit is 0, then
+ *	it implies that the software can proceed to set). After
+ *	setting, tas bit will be cleared.
+ * Parameters:
+ *	handle		- NPI handle
+ *	impl_sr		- shared value to set (only the 8 bit
+ *			  implementation specific state info).
+ *
+ * Return:
+ *	NPI_SUCCESS		- If shared value is set successfully.
+ *
+ *	Error:
+ *	NPI_FAILURE
+ *		VIR_TAS_BUSY
+ */
+
+npi_status_t
+npi_dev_func_sr_sr_get_set_clear(npi_handle_t handle, uint16_t impl_sr)
+{
+	dev_func_sr_t		sr;
+	int			status;
+
+	status = npi_dev_func_sr_lock_enter(handle);
+	if (status != NPI_SUCCESS) {
+		NPI_DEBUG_MSG((handle.function, NPI_VIR_CTL,
+				    " npi_dev_func_sr_src_get_set_clear"
+				    " unable to acquire lock:"
+				    " status <0x%x>", status));
+		return (status);
+	}
+
+	NXGE_REG_RD64(handle, DEV_FUNC_SR_REG, &sr.value);
+	sr.bits.ldw.sr |= (impl_sr << NPI_DEV_SR_IMPL_ST_SHIFT);
+	NXGE_REG_WR64(handle, DEV_FUNC_SR_REG, sr.value);
+
+	return (npi_dev_func_sr_lock_free(handle));
+}
+
+/*
+ * npi_dev_func_sr_sr_set_only():
+ *	This function is called to only set the shared register value.
+ * Parameters:
+ *	handle		- NPI handle
+ *	impl_sr		- shared value to set.
+ *
+ * Return:
+ *	NPI_SUCCESS		- If shared value is set successfully.
+ *
+ *	Error:
+ *	NPI_FAILURE
+ *		VIR_TAS_BUSY
+ */
+
+npi_status_t
+npi_dev_func_sr_sr_set_only(npi_handle_t handle, uint16_t impl_sr)
+{
+	int		status = NPI_SUCCESS;
+	dev_func_sr_t	sr;
+
+	NXGE_REG_RD64(handle, DEV_FUNC_SR_REG, &sr.value);
+	/* must be the owner */
+	if (sr.bits.ldw.funcid == NPI_GET_LOCK_OWNER(sr.bits.ldw.sr)) {
+		sr.bits.ldw.sr |= (impl_sr << NPI_DEV_SR_IMPL_ST_SHIFT);
+		NXGE_REG_WR64(handle, DEV_FUNC_SR_REG, sr.value);
+	} else {
+		NPI_DEBUG_MSG((handle.function, NPI_VIR_CTL,
+				    " npi_dev_func_sr_sr_set_only"
+				    " not owner <0x%x>",
+				    sr.bits.ldw.sr));
+		status = NPI_VIR_SR_NOTOWNER(sr.bits.ldw.funcid);
+	}
+
+	return (status);
+}
+
+/*
+ * npi_dev_func_sr_busy():
+ *	This function is called to see if we can own the device.
+ *	It will not reset the tas bit.
+ * Parameters:
+ *	handle		- NPI handle
+ *	busy_p		- pointer to store busy flag.
+ *				(B_TRUE: device is in use, B_FALSE: free).
+ * Return:
+ *	NPI_SUCCESS		- If tas bit is read successfully.
+ *	Error:
+ */
+
+npi_status_t
+npi_dev_func_sr_busy(npi_handle_t handle, boolean_t *busy_p)
+{
+	dev_func_sr_t	sr;
+
+	NXGE_REG_RD64(handle, DEV_FUNC_SR_REG, &sr.value);
+	if (!sr.bits.ldw.tas) {
+		sr.bits.ldw.tas = 0;
+		NXGE_REG_WR64(handle, DEV_FUNC_SR_REG, sr.value);
+		*busy_p = B_FALSE;
+	} else {
+		/* Other function already owns it */
+		*busy_p = B_TRUE;
+	}
+
+	return (NPI_SUCCESS);
+}
+
+/*
+ * npi_dev_func_sr_tas_get():
+ *	This function is called to get the tas bit
+ *	(after read, this bit is always set to 1, software write 0
+ *	 to clear it).
+ *
+ * Parameters:
+ *	handle		- NPI handle
+ *	tas_p		- pointer to store the tas value
+ *
+ * Return:
+ *	NPI_SUCCESS		- If tas value get is complete successfully.
+ *	Error:
+ */
+
+npi_status_t
+npi_dev_func_sr_tas_get(npi_handle_t handle, uint8_t *tas_p)
+{
+	dev_func_sr_t		sr;
+
+	NXGE_REG_RD64(handle, DEV_FUNC_SR_REG, &sr.value);
+	*tas_p = sr.bits.ldw.tas;
+	if (!sr.bits.ldw.tas) {
+		sr.bits.ldw.tas = 0;
+		NXGE_REG_WR64(handle, DEV_FUNC_SR_REG, sr.value);
+
+	}
+
+	return (NPI_SUCCESS);
+}
+
+/*
+ * npi_fzc_mpc_set():
+ *	This function is called to enable the write access
+ *	to FZC region to function zero.
+ * Parameters:
+ *	handle		- NPI handle
+ * Return:
+ *	NPI_SUCCESS	-
+ *	Error:
+ */
+
+npi_status_t
+npi_fzc_mpc_set(npi_handle_t handle, boolean_t mpc)
+{
+	multi_part_ctl_t	mp;
+
+	mp.value = 0;
+	if (mpc) {
+		mp.bits.ldw.mpc = 1;
+	}
+	NXGE_REG_WR64(handle, MULTI_PART_CTL_REG, mp.value);
+
+	return (NPI_SUCCESS);
+}
+
+/*
+ * npi_fzc_mpc_get():
+ *	This function is called to get the access mode.
+ * Parameters:
+ *	handle		- NPI handle
+ * Return:
+ *	NPI_SUCCESS	-
+ *
+ */
+
+npi_status_t
+npi_fzc_mpc_get(npi_handle_t handle, boolean_t *mpc_p)
+{
+	multi_part_ctl_t	mpc;
+
+	mpc.value = 0;
+	NXGE_REG_RD64(handle, MULTI_PART_CTL_REG, &mpc.value);
+	*mpc_p = mpc.bits.ldw.mpc;
+
+	return (NPI_SUCCESS);
+}
+
+/*
+ * npi_fzc_dma_bind_set():
+ *	This function is called to set DMA binding register.
+ * Parameters:
+ *	handle		- NPI handle
+ *	dma_bind	- NPI defined data structure that
+ *			  contains the tx/rx channel binding info.
+ *			  to set.
+ * Return:
+ *	NPI_SUCCESS	-
+ *	Error:
+ *	NPI_FAILURE
+ *
+ */
+
+npi_status_t
+npi_fzc_dma_bind_set(npi_handle_t handle, fzc_dma_bind_t dma_bind)
+{
+	dma_bind_t	bind;
+	int		status;
+	uint8_t		fn, region, id, tn, rn;
+
+	fn = dma_bind.function_id;
+	region = dma_bind.sub_vir_region;
+	id = dma_bind.vir_index;
+	tn = dma_bind.tx_channel;
+	rn = dma_bind.rx_channel;
+
+	DMA_BIND_VADDR_VALIDATE(fn, region, id, status);
+	if (status) {
+		return (status);
+	}
+
+	if (dma_bind.tx_bind) {
+		DMA_BIND_TX_VALIDATE(tn, status);
+		if (status) {
+			return (status);
+		}
+	}
+
+	if (dma_bind.rx_bind) {
+		DMA_BIND_RX_VALIDATE(rn, status);
+		if (status) {
+			return (status);
+		}
+	}
+
+	bind.value = 0;
+	if (dma_bind.tx_bind) {
+		bind.bits.ldw.tx_bind = 1;
+		bind.bits.ldw.tx = tn;
+	}
+	if (dma_bind.rx_bind) {
+		bind.bits.ldw.rx_bind = 1;
+		bind.bits.ldw.rx = rn;
+	}
+
+	NXGE_REG_WR64(handle, DMA_BIND_REG +
+		DMA_BIND_REG_OFFSET(fn, rn, id), bind.value);
+
+	return (status);
+}
+
+/*
+ * npi_fzc_ldg_num_set():
+ *	This function is called to set up a logical group number that
+ *	a logical device belongs to.
+ * Parameters:
+ *	handle		- NPI handle
+ *	ld		- logical device number (0 - 68)
+ *	ldg		- logical device group number (0 - 63)
+ * Return:
+ *	NPI_SUCCESS	-
+ *	Error:
+ *	NPI_FAILURE
+ *
+ */
+
+npi_status_t
+npi_fzc_ldg_num_set(npi_handle_t handle, uint8_t ld, uint8_t ldg)
+{
+	ldg_num_t	gnum;
+
+	ASSERT(LD_VALID(ld));
+	if (!LD_VALID(ld)) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				    " npi_fzc_ldg_num_set"
+				    "ld <0x%x>", ld));
+		return (NPI_FAILURE | NPI_VIR_LD_INVALID(ld));
+	}
+
+	ASSERT(LDG_VALID(ldg));
+	if (!LDG_VALID(ldg)) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				    " npi_fzc_ldg_num_set"
+				    " ldg <0x%x>", ldg));
+		return (NPI_FAILURE | NPI_VIR_LDG_INVALID(ld));
+	}
+
+	gnum.value = 0;
+	gnum.bits.ldw.num = ldg;
+
+	NXGE_REG_WR64(handle, LDG_NUM_REG + LD_NUM_OFFSET(ld),
+		gnum.value);
+
+	return (NPI_SUCCESS);
+}
+
+/*
+ * npi_fzc_ldg_num_get():
+ *	This function is called to get the logical device group that
+ *	a logical device belongs to.
+ * Parameters:
+ *	handle		- NPI handle
+ *	ld		- logical device number (0 - 68)
+ *	*ldg_p		- pointer to store its group number.
+ * Return:
+ *	NPI_SUCCESS	-
+ *	Error:
+ *	NPI_FAILURE
+ */
+
+npi_status_t
+npi_fzc_ldg_num_get(npi_handle_t handle, uint8_t ld, uint8_t *ldg_p)
+{
+	uint64_t val;
+
+	ASSERT(LD_VALID(ld));
+	if (!LD_VALID(ld)) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				    " npi_fzc_ldg_num_get"
+				    " Invalid Input:",
+				    " ld <0x%x>", ld));
+		return (NPI_FAILURE | NPI_VIR_LD_INVALID(ld));
+	}
+
+	NXGE_REG_RD64(handle, LDG_NUM_REG + LD_NUM_OFFSET(ld), &val);
+
+	*ldg_p = (uint8_t)(val & LDG_NUM_NUM_MASK);
+
+	return (NPI_SUCCESS);
+}
+
+/*
+ * npi_ldsv_ldfs_get():
+ *	This function is called to get device state vectors.
+ * Parameters:
+ *	handle		- NPI handle
+ *	ldg		- logical device group (0 - 63)
+ *	*ldf_p		- pointer to store ldf0 and ldf1 flag bits.
+ * Return:
+ *	NPI_SUCCESS	-
+ *	Error:
+ *	NPI_FAILURE
+ */
+
+npi_status_t
+npi_ldsv_ldfs_get(npi_handle_t handle, uint8_t ldg, uint64_t *vector0_p,
+	uint64_t *vector1_p, uint64_t *vector2_p)
+{
+	int	status;
+
+	if ((status = npi_ldsv_get(handle, ldg, VECTOR0, vector0_p))) {
+		return (status);
+	}
+	if ((status = npi_ldsv_get(handle, ldg, VECTOR1, vector1_p))) {
+		return (status);
+	}
+	if ((status = npi_ldsv_get(handle, ldg, VECTOR2, vector2_p))) {
+		return (status);
+	}
+
+	return (NPI_SUCCESS);
+}
+
+/*
+ * npi_ldsv_get():
+ *	This function is called to get device state vectors.
+ * Parameters:
+ *	handle		- NPI handle
+ *	ldg		- logical device group (0 - 63)
+ *	ldf_type	- either LDF0 (0) or LDF1 (1)
+ *	vector		- vector type (0, 1 or 2)
+ *	*ldf_p		- pointer to store its flag bits.
+ * Return:
+ *	NPI_SUCCESS	-
+ *	Error:
+ *	NPI_FAILURE
+ */
+
+npi_status_t
+npi_ldsv_get(npi_handle_t handle, uint8_t ldg, ldsv_type_t vector,
+	uint64_t *ldf_p)
+{
+	uint64_t		offset;
+
+	ASSERT(LDG_VALID(ldg));
+	if (!LDG_VALID(ldg)) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				    " npi_ldsv_get"
+				    " Invalid Input "
+				    " ldg <0x%x>", ldg));
+		return (NPI_FAILURE | NPI_VIR_LDG_INVALID(ldg));
+	}
+
+	switch (vector) {
+	case VECTOR0:
+		offset = LDSV0_REG + LDSV_OFFSET(ldg);
+		break;
+
+	case VECTOR1:
+		offset = LDSV1_REG + LDSV_OFFSET(ldg);
+		break;
+
+	case VECTOR2:
+		offset = LDSV2_REG + LDSV_OFFSET(ldg);
+		break;
+
+	default:
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				    " npi_ldsv_get"
+				    " Invalid Input: "
+				    " ldsv type <0x%x>", vector));
+		return (NPI_FAILURE | NPI_VIR_LDSV_INVALID(vector));
+	}
+
+	NXGE_REG_RD64(handle, offset, ldf_p);
+
+	return (NPI_SUCCESS);
+}
+
+/*
+ * npi_ldsv_ld_get():
+ *	This function is called to get the flag bit value of a device.
+ * Parameters:
+ *	handle		- NPI handle
+ *	ldg		- logical device group (0 - 63)
+ *	ld		- logical device (0 - 68)
+ *	ldf_type	- either LDF0 (0) or LDF1 (1)
+ *	vector		- vector type (0, 1 or 2)
+ *	*ldf_p		- pointer to store its flag bits.
+ * Return:
+ *	NPI_SUCCESS	-
+ *	Error:
+ *	NPI_FAILURE
+ */
+
+npi_status_t
+npi_ldsv_ld_get(npi_handle_t handle, uint8_t ldg, uint8_t ld,
+	ldsv_type_t vector, ldf_type_t ldf_type, boolean_t *flag_p)
+{
+	uint64_t		sv;
+	uint64_t		offset;
+
+	ASSERT(LDG_VALID(ldg));
+	if (!LDG_VALID(ldg)) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				    " npi_ldsv_ld_get"
+				    " Invalid Input: "
+				    " ldg <0x%x>", ldg));
+		return (NPI_FAILURE | NPI_VIR_LDG_INVALID(ldg));
+	}
+	ASSERT((LD_VALID(ld)) &&	\
+		((vector != VECTOR2) || (ld >= NXGE_MAC_LD_START)));
+	if (!LD_VALID(ld)) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				    " npi_ldsv_ld_get Invalid Input: "
+				    " ld <9x%x>", ld));
+		return (NPI_FAILURE | NPI_VIR_LD_INVALID(ld));
+	} else if (vector == VECTOR2 && ld < NXGE_MAC_LD_START) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				    " npi_ldsv_ld_get Invalid Input:"
+				    " ld-vector2 <0x%x>", ld));
+		return (NPI_FAILURE | NPI_VIR_LD_INVALID(ld));
+	}
+
+	switch (vector) {
+	case VECTOR0:
+		offset = LDSV0_REG + LDSV_OFFSET(ldg);
+		break;
+
+	case VECTOR1:
+		offset = LDSV1_REG + LDSV_OFFSET(ldg);
+		break;
+
+	case VECTOR2:
+		offset = LDSV2_REG + LDSV_OFFSET(ldg);
+
+	default:
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL, "npi_ldsv_get"
+			"ldsv", vector));
+		return (NPI_FAILURE | NPI_VIR_LDSV_INVALID(vector));
+	}
+
+	NXGE_REG_RD64(handle, offset, &sv);
+	if (vector != VECTOR2) {
+		*flag_p = ((sv >> ld) & LDSV_MASK_ALL);
+	} else {
+		if (ldf_type) {
+			*flag_p = (((sv >> LDSV2_LDF1_SHIFT) >>
+				(ld - NXGE_MAC_LD_START)) & LDSV_MASK_ALL);
+		} else {
+			*flag_p = (((sv >> LDSV2_LDF0_SHIFT) >>
+				(ld - NXGE_MAC_LD_START)) & LDSV_MASK_ALL);
+		}
+	}
+
+	return (NPI_SUCCESS);
+}
+
+/*
+ * npi_ldsv_ld_ldf0_get():
+ *	This function is called to get the ldf0 bit value of a device.
+ * Parameters:
+ *	handle		- NPI handle
+ *	ldg		- logical device group (0 - 63)
+ *	ld		- logical device (0 - 68)
+ *	*ldf_p		- pointer to store its flag bits.
+ * Return:
+ *	NPI_SUCCESS	-
+ *	Error:
+ *	NPI_FAILURE
+ */
+
+npi_status_t
+npi_ldsv_ld_ldf0_get(npi_handle_t handle, uint8_t ldg, uint8_t ld,
+	boolean_t *flag_p)
+{
+	ldsv_type_t vector;
+
+	if (ld >= NXGE_MAC_LD_START) {
+		vector = VECTOR2;
+	}
+
+	return (npi_ldsv_ld_get(handle, ldg, ld, vector, LDF0, flag_p));
+}
+
+/*
+ * npi_ldsv_ld_ldf1_get():
+ *	This function is called to get the ldf1 bit value of a device.
+ * Parameters:
+ *	handle		- NPI handle
+ *	ldg		- logical device group (0 - 63)
+ *	ld		- logical device (0 - 68)
+ *	*ldf_p		- pointer to store its flag bits.
+ * Return:
+ *	NPI_SUCCESS	-
+ *	Error:
+ *	NPI_FAILURE
+ */
+
+npi_status_t
+npi_ldsv_ld_ldf1_get(npi_handle_t handle, uint8_t ldg, uint8_t ld,
+		boolean_t *flag_p)
+{
+	ldsv_type_t vector;
+
+	if (ld >= NXGE_MAC_LD_START) {
+		vector = VECTOR2;
+	}
+
+	return (npi_ldsv_ld_get(handle, ldg, ld, vector, LDF1, flag_p));
+}
+
+/*
+ * npi_intr_mask_set():
+ *	This function is called to select the mask bits for both ldf0 and ldf1.
+ * Parameters:
+ *	handle		- NPI handle
+ *	ld		- logical device (0 - 68)
+ *	ldf_mask	- mask value to set (both ldf0 and ldf1).
+ * Return:
+ *	NPI_SUCCESS	-
+ *	Error:
+ *	NPI_FAILURE
+ */
+
+npi_status_t
+npi_intr_mask_set(npi_handle_t handle, uint8_t ld, uint8_t ldf_mask)
+{
+	uint64_t		offset;
+
+	ASSERT(LD_VALID(ld));
+	if (!LD_VALID(ld)) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+			    " npi_intr_mask_set ld", ld));
+		return (NPI_FAILURE | NPI_VIR_LD_INVALID(ld));
+	}
+
+	ldf_mask &= LD_IM0_MASK;
+	offset = LDSV_OFFSET_MASK(ld);
+
+	NPI_DEBUG_MSG((handle.function, NPI_VIR_CTL,
+		"npi_intr_mask_set: ld %d "
+		" offset 0x%0llx "
+		" mask 0x%x",
+		ld, offset, ldf_mask));
+
+	NXGE_REG_WR64(handle, offset, (uint64_t)ldf_mask);
+
+	return (NPI_SUCCESS);
+}
+
+/*
+ * npi_intr_mask_get():
+ *	This function is called to get the mask bits.
+ * Parameters:
+ *	handle		- NPI handle
+ *	ld		- logical device (0 - 68)
+ *	ldf_mask	- pointer to store mask bits info.
+ * Return:
+ *	NPI_SUCCESS	-
+ *	Error:
+ *	NPI_FAILURE
+ */
+npi_status_t
+npi_intr_mask_get(npi_handle_t handle, uint8_t ld, uint8_t *ldf_mask_p)
+{
+	uint64_t		offset;
+	uint64_t		val;
+
+	ASSERT(LD_VALID(ld));
+	if (!LD_VALID(ld)) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+			    " npi_intr_mask_get ld", ld));
+		return (NPI_FAILURE | NPI_VIR_LD_INVALID(ld));
+	}
+
+	offset = LDSV_OFFSET_MASK(ld);
+
+	NXGE_REG_RD64(handle, offset, &val);
+
+	*ldf_mask_p = (uint8_t)(val & LD_IM_MASK);
+
+	return (NPI_SUCCESS);
+}
+
+/*
+ * npi_intr_ldg_mgmt_set():
+ *	This function is called to set interrupt timer and arm bit.
+ * Parameters:
+ *	handle		- NPI handle
+ *	ldg		- logical device group (0 - 63)
+ *	arm		- B_TRUE (arm) B_FALSE (disable)
+ * Return:
+ *	NPI_SUCCESS	-
+ *	Error:
+ *	NPI_FAILURE
+ */
+
+npi_status_t
+npi_intr_ldg_mgmt_set(npi_handle_t handle, uint8_t ldg, boolean_t arm,
+			uint8_t timer)
+{
+	ldgimgm_t		mgm;
+	uint64_t		val;
+
+	ASSERT((LDG_VALID(ldg)) && (LD_INTTIMER_VALID(timer)));
+	if (!LDG_VALID(ldg)) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				    " npi_intr_ldg_mgmt_set"
+				    " Invalid Input: "
+				    " ldg <0x%x>", ldg));
+		return (NPI_FAILURE | NPI_VIR_LDG_INVALID(ldg));
+	}
+	if (!LD_INTTIMER_VALID(timer)) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				    " npi_intr_ldg_mgmt_set Invalid Input"
+				    " timer <0x%x>", timer));
+		return (NPI_FAILURE | NPI_VIR_INTM_TM_INVALID(ldg));
+	}
+
+	if (arm) {
+		mgm.bits.ldw.arm = 1;
+	} else {
+		NXGE_REG_RD64(handle, LDGIMGN_REG + LDSV_OFFSET(ldg), &val);
+		mgm.value = val & LDGIMGM_ARM_MASK;
+	}
+
+	mgm.bits.ldw.timer = timer;
+	NXGE_REG_WR64(handle, LDGIMGN_REG + LDSV_OFFSET(ldg),
+		mgm.value);
+
+	NPI_DEBUG_MSG((handle.function, NPI_VIR_CTL,
+		" npi_intr_ldg_mgmt_set: ldg %d"
+		" reg offset 0x%x",
+		ldg, LDGIMGN_REG + LDSV_OFFSET(ldg)));
+
+	return (NPI_SUCCESS);
+}
+
+/*
+ * npi_intr_ldg_mgmt_timer_get():
+ *	This function is called to get the timer counter
+ * Parameters:
+ *	handle		- NPI handle
+ *	ldg		- logical device group (0 - 63)
+ *	timer_p		- pointer to store the timer counter.
+ * Return:
+ *	NPI_SUCCESS	-
+ *	Error:
+ *	NPI_FAILURE
+ */
+
+npi_status_t
+npi_intr_ldg_mgmt_timer_get(npi_handle_t handle, uint8_t ldg, uint8_t *timer_p)
+{
+	uint64_t val;
+
+	ASSERT(LDG_VALID(ldg));
+	if (!LDG_VALID(ldg)) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				    " npi_intr_ldg_mgmt_timer_get"
+				    " Invalid Input: ldg <0x%x>", ldg));
+		return (NPI_FAILURE | NPI_VIR_LDG_INVALID(ldg));
+	}
+
+	NXGE_REG_RD64(handle, LDGIMGN_REG + LDSV_OFFSET(ldg), &val);
+
+	*timer_p = (uint8_t)(val & LDGIMGM_TIMER_MASK);
+
+	NPI_DEBUG_MSG((handle.function, NPI_VIR_CTL,
+		" npi_intr_ldg_mgmt_timer_get: ldg %d"
+		" reg offset 0x%x",
+		ldg, LDGIMGN_REG + LDSV_OFFSET(ldg)));
+
+	return (NPI_SUCCESS);
+}
+
+/*
+ * npi_intr_ldg_mgmt_arm():
+ *	This function is called to arm the group.
+ * Parameters:
+ *	handle		- NPI handle
+ *	ldg		- logical device group (0 - 63)
+ * Return:
+ *	NPI_SUCCESS	-
+ *	Error:
+ *	NPI_FAILURE
+ */
+
+npi_status_t
+npi_intr_ldg_mgmt_arm(npi_handle_t handle, uint8_t ldg)
+{
+	ldgimgm_t		mgm;
+
+	ASSERT(LDG_VALID(ldg));
+	if (!LDG_VALID(ldg)) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				    " npi_intr_ldg_mgmt_arm"
+				    " Invalid Input: ldg <0x%x>",
+				    ldg));
+		return (NPI_FAILURE | NPI_VIR_LDG_INVALID(ldg));
+	}
+
+	NXGE_REG_RD64(handle, (LDGIMGN_REG + LDSV_OFFSET(ldg)), &mgm.value);
+	mgm.bits.ldw.arm = 1;
+
+	NXGE_REG_WR64(handle, LDGIMGN_REG + LDSV_OFFSET(ldg),
+			mgm.value);
+	NPI_DEBUG_MSG((handle.function, NPI_VIR_CTL,
+		" npi_intr_ldg_mgmt_arm: ldg %d"
+		" reg offset 0x%x",
+		ldg, LDGIMGN_REG + LDSV_OFFSET(ldg)));
+
+	return (NPI_SUCCESS);
+}
+
+/*
+ * npi_fzc_ldg_timer_res_set():
+ *	This function is called to set the timer resolution.
+ * Parameters:
+ *	handle		- NPI handle
+ *	res		- timer resolution (# of system clocks)
+ * Return:
+ *	NPI_SUCCESS	-
+ *	Error:
+ *	NPI_FAILURE
+ */
+
+npi_status_t
+npi_fzc_ldg_timer_res_set(npi_handle_t handle, uint32_t res)
+{
+	ASSERT(res <= LDGTITMRES_RES_MASK);
+	if (res > LDGTITMRES_RES_MASK) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				    " npi_fzc_ldg_timer_res_set"
+				    " Invalid Input: res <0x%x>",
+				    res));
+		return (NPI_FAILURE | NPI_VIR_TM_RES_INVALID);
+	}
+
+	NXGE_REG_WR64(handle, LDGITMRES_REG, (res & LDGTITMRES_RES_MASK));
+
+	return (NPI_SUCCESS);
+}
+
+/*
+ * npi_fzc_ldg_timer_res_get():
+ *	This function is called to get the timer resolution.
+ * Parameters:
+ *	handle		- NPI handle
+ *	res_p		- pointer to store the timer resolution.
+ * Return:
+ *	NPI_SUCCESS	-
+ *	Error:
+ *	NPI_FAILURE
+ */
+
+npi_status_t
+npi_fzc_ldg_timer_res_get(npi_handle_t handle, uint8_t *res_p)
+{
+	uint64_t val;
+
+	NXGE_REG_RD64(handle, LDGITMRES_REG, &val);
+
+	*res_p = (uint8_t)(val & LDGIMGM_TIMER_MASK);
+
+	return (NPI_SUCCESS);
+}
+
+/*
+ * npi_fzc_sid_set():
+ *	This function is called to set the system interrupt data.
+ * Parameters:
+ *	handle		- NPI handle
+ *	ldg		- logical group (0 - 63)
+ *	sid		- NPI defined data to set
+ * Return:
+ *	NPI_SUCCESS	-
+ *	Error:
+ *	NPI_FAILURE
+ */
+
+npi_status_t
+npi_fzc_sid_set(npi_handle_t handle, fzc_sid_t sid)
+{
+	sid_t		sd;
+
+	ASSERT(LDG_VALID(sid.ldg));
+	if (!LDG_VALID(sid.ldg)) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				    " npi_fzc_sid_set"
+				    " Invalid Input: ldg <0x%x>",
+				    sid.ldg));
+		return (NPI_FAILURE | NPI_VIR_LDG_INVALID(sid.ldg));
+	}
+	if (!sid.niu) {
+		ASSERT(FUNC_VALID(sid.func));
+		if (!FUNC_VALID(sid.func)) {
+			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+					    " npi_fzc_sid_set"
+					    " Invalid Input: func <0x%x>",
+					    sid.func));
+			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				"invalid FUNC: npi_fzc_sid_set(%d)", sid.func));
+			return (NPI_FAILURE | NPI_VIR_FUNC_INVALID(sid.func));
+		}
+
+		ASSERT(SID_VECTOR_VALID(sid.vector));
+		if (!SID_VECTOR_VALID(sid.vector)) {
+			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+					    " npi_fzc_sid_set"
+					    " Invalid Input: vector <0x%x>",
+					    sid.vector));
+			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				    " invalid VECTOR: npi_fzc_sid_set(%d)",
+				    sid.vector));
+			return (NPI_FAILURE |
+				NPI_VIR_SID_VEC_INVALID(sid.vector));
+		}
+	}
+	sd.value = 0;
+	if (!sid.niu) {
+		sd.bits.ldw.data = ((sid.func << SID_DATA_FUNCNUM_SHIFT) |
+				(sid.vector & SID_DATA_INTNUM_MASK));
+	}
+
+	NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+	    " npi_fzc_sid_set: group %d 0x%llx", sid.ldg, sd.value));
+
+	NXGE_REG_WR64(handle,  SID_REG + LDG_SID_OFFSET(sid.ldg), sd.value);
+
+	return (NPI_SUCCESS);
+}
+
+/*
+ * npi_fzc_sid_get():
+ *	This function is called to get the system interrupt data.
+ * Parameters:
+ *	handle		- NPI handle
+ *	ldg		- logical group (0 - 63)
+ *	sid_p		- NPI defined data to get
+ * Return:
+ *	NPI_SUCCESS	-
+ *	Error:
+ *	NPI_FAILURE
+ */
+
+npi_status_t
+npi_fzc_sid_get(npi_handle_t handle, p_fzc_sid_t sid_p)
+{
+	sid_t		sd;
+
+	ASSERT(LDG_VALID(sid_p->ldg));
+	if (!LDG_VALID(sid_p->ldg)) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				    " npi_fzc_sid_get"
+				    " Invalid Input: ldg <0x%x>",
+				    sid_p->ldg));
+		return (NPI_FAILURE | NPI_VIR_LDG_INVALID(sid_p->ldg));
+	}
+	NXGE_REG_RD64(handle, (SID_REG + LDG_SID_OFFSET(sid_p->ldg)),
+		&sd.value);
+	if (!sid_p->niu) {
+		sid_p->func = ((sd.bits.ldw.data & SID_DATA_FUNCNUM_MASK) >>
+			SID_DATA_FUNCNUM_SHIFT);
+		sid_p->vector = ((sd.bits.ldw.data & SID_DATA_INTNUM_MASK) >>
+			SID_DATA_INTNUM_SHIFT);
+	} else {
+		sid_p->vector = (sd.value & SID_DATA_MASK);
+	}
+
+	return (NPI_SUCCESS);
+}
+
+/*
+ * npi_fzc_sys_err_mask_set():
+ *	This function is called to mask/unmask the device error mask bits.
+ *
+ * Parameters:
+ *	handle		- NPI handle
+ *	mask		- set bit mapped mask
+ * Return:
+ *	NPI_SUCCESS	-
+ *	Error:
+ *	NPI_FAILURE
+ */
+
+npi_status_t
+npi_fzc_sys_err_mask_set(npi_handle_t handle, uint64_t mask)
+{
+	NXGE_REG_WR64(handle,  SYS_ERR_MASK_REG, mask);
+	return (NPI_SUCCESS);
+}
+
+/*
+ * npi_fzc_sys_err_stat_get():
+ *	This function is called to get the system error stats.
+ *
+ * Parameters:
+ *	handle		- NPI handle
+ *	err_stat	- sys_err_stat structure to hold stats.
+ * Return:
+ *	NPI_SUCCESS	-
+ *	Error:
+ *	NPI_FAILURE
+ */
+
+npi_status_t
+npi_fzc_sys_err_stat_get(npi_handle_t handle, p_sys_err_stat_t statp)
+{
+	NXGE_REG_RD64(handle,  SYS_ERR_STAT_REG, &statp->value);
+	return (NPI_SUCCESS);
+}
+
+npi_status_t
+npi_fzc_rst_ctl_get(npi_handle_t handle, p_rst_ctl_t rstp)
+{
+	NXGE_REG_RD64(handle, RST_CTL_REG, &rstp->value);
+
+	return (NPI_SUCCESS);
+}
+
+/*
+ * npi_fzc_mpc_get():
+ *	This function is called to get the access mode.
+ * Parameters:
+ *	handle		- NPI handle
+ * Return:
+ *	NPI_SUCCESS	-
+ *
+ */
+
+npi_status_t
+npi_fzc_rst_ctl_reset_mac(npi_handle_t handle, uint8_t port)
+{
+	rst_ctl_t 		rst;
+
+	rst.value = 0;
+	NXGE_REG_RD64(handle, RST_CTL_REG, &rst.value);
+	rst.value |= (1 << (RST_CTL_MAC_RST0_SHIFT + port));
+	NXGE_REG_WR64(handle, RST_CTL_REG, rst.value);
+
+	return (NPI_SUCCESS);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/uts/common/io/nxge/npi/npi_vir.h	Mon Mar 19 19:37:22 2007 -0700
@@ -0,0 +1,690 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _NPI_VIR_H
+#define	_NPI_VIR_H
+
+#pragma ident	"%Z%%M%	%I%	%E% SMI"
+
+#ifdef	__cplusplus
+extern "C" {
+#endif
+
+#include <npi.h>
+#include <nxge_hw.h>
+
+/*
+ * Virtualization and Logical devices NPI error codes
+ */
+#define	FUNCID_INVALID		PORT_INVALID
+#define	VIR_ERR_ST		(VIR_BLK_ID << NPI_BLOCK_ID_SHIFT)
+#define	VIR_ID_SHIFT(n)		(n << NPI_PORT_CHAN_SHIFT)
+
+#define	VIR_HW_BUSY		(NPI_BK_HW_ERROR_START | 0x1)
+
+#define	VIR_TAS_BUSY		(NPI_BK_ERROR_START | 0x1)
+#define	VIR_TAS_NOTREAD	(NPI_BK_ERROR_START | 0x2)
+
+#define	VIR_SR_RESET		(NPI_BK_ERROR_START | 0x3)
+#define	VIR_SR_FREE		(NPI_BK_ERROR_START | 0x4)
+#define	VIR_SR_BUSY		(NPI_BK_ERROR_START | 0x5)
+#define	VIR_SR_INVALID		(NPI_BK_ERROR_START | 0x6)
+#define	VIR_SR_NOTOWNER	(NPI_BK_ERROR_START | 0x7)
+#define	VIR_SR_INITIALIZED	(NPI_BK_ERROR_START | 0x8)
+
+#define	VIR_MPC_DENY		(NPI_BK_ERROR_START | 0x10)
+
+#define	VIR_BD_FUNC_INVALID	(NPI_BK_ERROR_START | 0x20)
+#define	VIR_BD_REG_INVALID	(NPI_BK_ERROR_START | 0x21)
+#define	VIR_BD_ID_INVALID	(NPI_BK_ERROR_START | 0x22)
+#define	VIR_BD_TXDMA_INVALID	(NPI_BK_ERROR_START | 0x23)
+#define	VIR_BD_RXDMA_INVALID	(NPI_BK_ERROR_START | 0x24)
+
+#define	VIR_LD_INVALID		(NPI_BK_ERROR_START | 0x30)
+#define	VIR_LDG_INVALID		(NPI_BK_ERROR_START | 0x31)
+#define	VIR_LDSV_INVALID	(NPI_BK_ERROR_START | 0x32)
+
+#define	VIR_INTM_TM_INVALID	(NPI_BK_ERROR_START | 0x33)
+#define	VIR_TM_RES_INVALID	(NPI_BK_ERROR_START | 0x34)
+#define	VIR_SID_VEC_INVALID	(NPI_BK_ERROR_START | 0x35)
+
+#define	NPI_VIR_OCODE_INVALID(n) (VIR_ID_SHIFT(n) | VIR_ERR_ST | OPCODE_INVALID)
+#define	NPI_VIR_FUNC_INVALID(n)	 (VIR_ID_SHIFT(n) | VIR_ERR_ST | FUNCID_INVALID)
+#define	NPI_VIR_CN_INVALID(n)	(VIR_ID_SHIFT(n) | VIR_ERR_ST | CHANNEL_INVALID)
+
+/*
+ * Errors codes of shared register functions.
+ */
+#define	NPI_VIR_TAS_BUSY(n)	(VIR_ID_SHIFT(n) | VIR_ERR_ST | VIR_TAS_BUSY)
+#define	NPI_VIR_TAS_NOTREAD(n)	(VIR_ID_SHIFT(n) | VIR_ERR_ST | VIR_TAS_NOTREAD)
+#define	NPI_VIR_SR_RESET(n)	(VIR_ID_SHIFT(n) | VIR_ERR_ST | VIR_SR_RESET)
+#define	NPI_VIR_SR_FREE(n)	(VIR_ID_SHIFT(n) | VIR_ERR_ST | VIR_SR_FREE)
+#define	NPI_VIR_SR_BUSY(n)	(VIR_ID_SHIFT(n) | VIR_ERR_ST | VIR_SR_BUSY)
+#define	NPI_VIR_SR_INVALID(n)	(VIR_ID_SHIFT(n) | VIR_ERR_ST | VIR_SR_INVALID)
+#define	NPI_VIR_SR_NOTOWNER(n)	(VIR_ID_SHIFT(n) | VIR_ERR_ST | VIR_SR_NOTOWNER)
+#define	NPI_VIR_SR_INITIALIZED(n) (VIR_ID_SHIFT(n) | \
+					VIR_ERR_ST | VIR_SR_INITIALIZED)
+
+/*
+ * Error codes of muti-partition control register functions.
+ */
+#define	NPI_VIR_MPC_DENY	(VIR_ERR_ST | VIR_MPU_DENY)
+
+/*
+ * Error codes of DMA binding functions.
+ */
+#define	NPI_VIR_BD_FUNC_INVALID(n)	(VIR_ID_SHIFT(n) | \
+					VIR_ERR_ST | VIR_BD_FUNC_INVALID)
+#define	NPI_VIR_BD_REG_INVALID(n)	(VIR_ID_SHIFT(n) | \
+					VIR_ERR_ST | VIR_BD_REG_INVALID)
+#define	NPI_VIR_BD_ID_INVALID(n)	(VIR_ID_SHIFT(n) | \
+					VIR_ERR_ST | VIR_BD_ID_INVALID)
+#define	NPI_VIR_BD_TXDMA_INVALID(n)	(VIR_ID_SHIFT(n) | \
+					VIR_ERR_ST | VIR_BD_TXDMA_INVALID)
+#define	NPI_VIR_BD_RXDMA_INVALID(n)	(VIR_ID_SHIFT(n) | \
+					VIR_ERR_ST | VIR_BD_RXDMA_INVALID)
+
+/*
+ * Error codes of logical devices and groups functions.
+ */
+#define	NPI_VIR_LD_INVALID(n) 	(VIR_ID_SHIFT(n) | VIR_ERR_ST | VIR_LD_INVALID)
+#define	NPI_VIR_LDG_INVALID(n)	(VIR_ID_SHIFT(n) | VIR_ERR_ST | VIR_LDG_INVALID)
+#define	NPI_VIR_LDSV_INVALID(n) (VIR_ID_SHIFT(n) | \
+					VIR_ERR_ST | VIR_LDSV_INVALID)
+#define	NPI_VIR_INTM_TM_INVALID(n)	(VIR_ID_SHIFT(n) | \
+					VIR_ERR_ST | VIR_INTM_TM_INVALID)
+#define	NPI_VIR_TM_RES_INVALID		(VIR_ERR_ST | VIR_TM_RES_INVALID)
+#define	NPI_VIR_SID_VEC_INVALID(n)	(VIR_ID_SHIFT(n) | \
+						VIR_ERR_ST | VIR_TM_RES_INVALID)
+
+/*
+ * Bit definition ([15:0] of the shared register
+ * used by the driver as locking mechanism.
+ *	[1:0]		lock state (RESET, FREE, BUSY)
+ *	[3:2]		function ID (owner)
+ *	[11:4]		Implementation specific states
+ *	[15:12]  	Individual function state
+ */
+#define	NPI_DEV_SR_LOCK_ST_RESET	0
+#define	NPI_DEV_SR_LOCK_ST_FREE		1
+#define	NPI_DEV_SR_LOCK_ST_BUSY		2
+
+#define	NPI_DEV_SR_LOCK_ST_SHIFT	0
+#define	NPI_DEV_SR_LOCK_ST_MASK		0x03
+#define	NPI_DEV_SR_LOCK_FID_SHIFT	2
+#define	NPI_DEV_SR_LOCK_FID_MASK	0x0C
+
+#define	NPI_DEV_SR_IMPL_ST_SHIFT	4
+#define	NPI_DEV_SR_IMPL_ST_MASK	0xfff0
+
+#define	NPI_GET_LOCK_OWNER(sr)		((sr & NPI_DEV_SR_LOCK_FID_MASK) \
+						>> NPI_DEV_SR_LOCK_FID_SHIFT)
+#define	NPI_GET_LOCK_ST(sr)		(sr & NPI_DEV_SR_LOCK_ST_MASK)
+#define	NPI_GET_LOCK_IMPL_ST(sr)	((sr & NPI_DEV_SR_IMPL_ST_MASK) \
+						>> NPI_DEV_SR_IMPL_ST_SHIFT)
+
+/*
+ * DMA channel binding definitions.
+ */
+#define	DMA_BIND_VADDR_VALIDATE(fn, rn, id, status)			\
+{									\
+	status = NPI_SUCCESS;						\
+	if (!TXDMA_FUNC_VALID(fn)) {					\
+		status = (NPI_FAILURE | NPI_VIR_BD_FUNC_INVALID(fn));	\
+	} else if (!SUBREGION_VALID(rn)) {				\
+		status = (NPI_FAILURE | NPI_VIR_BD_REG_INVALID(rn));	\
+	} else if (!VIR_PAGE_INDEX_VALID(id)) {				\
+		status = (NPI_FAILURE | NPI_VIR_BD_ID_INVALID(id));	\
+	}								\
+}
+
+#define	DMA_BIND_TX_VALIDATE(n, status)					\
+{									\
+	status = NPI_SUCCESS;						\
+	if (!TXDMA_CHANNEL_VALID(n)) {					\
+		status = (NPI_FAILURE | NPI_VIR_BD_TXDMA_INVALID(n));	\
+	}								\
+}
+
+#define	DMA_BIND_RX_VALIDATE(n, status)					\
+{									\
+	status = NPI_SUCCESS;						\
+	if (!VRXDMA_CHANNEL_VALID(n)) {					\
+		status = (NPI_FAILURE | NPI_VIR_BD_RXDMA_INVALID(n));	\
+	}								\
+}
+
+#define	DMA_BIND_STEP			8
+#define	DMA_BIND_REG_OFFSET(fn, rn, id)	(DMA_BIND_STEP * \
+					(fn * 2 * VIR_PAGE_INDEX_MAX + \
+					rn * VIR_PAGE_INDEX_MAX) + id)
+
+/*
+ * NPI defined data structure to program the DMA binding register.
+ */
+typedef struct _fzc_dma_bind {
+	uint8_t		function_id;	/* 0 to 3 */
+	uint8_t		sub_vir_region;	/* 0 or 1 */
+	uint8_t		vir_index;	/* 0 to 7 */
+	boolean_t	tx_bind;	/* set 1 to bind */
+	uint8_t		tx_channel;	/* hardware channel number (0 - 23) */
+	boolean_t	rx_bind;	/* set 1 to bind */
+	uint8_t		rx_channel;	/* hardware channel number (0 - 15) */
+} fzc_dma_bind_t, *p_fzc_dma_bind;
+
+/*
+ * Logical device definitions.
+ */
+#define	LD_NUM_STEP		8
+#define	LD_NUM_OFFSET(ld)	(ld * LDG_NUM_STEP)
+#define	LDG_NUM_STEP		8
+#define	LDG_NUM_OFFSET(ldg)	(ldg * LDG_NUM_STEP)
+#define	LDGNUM_OFFSET(ldg)	(ldg * LDG_NUM_STEP)
+#define	LDSV_STEP		8192
+#define	LDSVG_OFFSET(ldg)	(ldg * LDSV_STEP)
+#define	LDSV_OFFSET(ldv)	(ldv * LDSV_STEP)
+
+#define	LDSV_OFFSET_MASK(ld)			\
+	(((ld < NXGE_MAC_LD_START) ?		\
+	(LD_IM0_REG + LDSV_OFFSET(ld)) :	\
+	(LD_IM1_REG + LDSV_OFFSET((ld - NXGE_MAC_LD_START))))); \
+
+#define	LDG_SID_STEP		8
+#define	LDG_SID_OFFSET(ldg)	(ldg * LDG_SID_STEP)
+
+typedef enum {
+	LDF0,
+	LDF1
+} ldf_type_t;
+
+typedef enum {
+	VECTOR0,
+	VECTOR1,
+	VECTOR2
+} ldsv_type_t;
+
+/*
+ * Definitions for the system interrupt data.
+ */
+typedef struct _fzc_sid {
+	boolean_t	niu;
+	uint8_t		ldg;
+	uint8_t		func;
+	uint8_t		vector;
+} fzc_sid_t, *p_fzc_sid_t;
+
+/*
+ * Virtualization and Interrupt Prototypes.
+ */
+/*
+ * npi_dev_func_sr_init():
+ *	This function is called to initialize the device function
+ *	shared register (set the software implementation lock
+ *	state to FREE).
+ * Parameters:
+ *	handle		- NPI handle
+ * Return:
+ *	NPI_SUCCESS	- If initialization is complete successfully.
+ *			  (set sr bits to free).
+ *	Error:
+ *	NPI_FAILURE
+ *		VIR_TAS_BUSY
+ */
+npi_status_t npi_dev_func_sr_init(npi_handle_t);
+
+/*
+ * npi_dev_func_sr_lock_enter():
+ *	This function is called to lock the function shared register
+ *	by setting the lock state to busy.
+ * Parameters:
+ *	handle		- NPI handle
+ * Return:
+ *	NPI_SUCCESS	- If the function id can own the lock.
+ *
+ *	Error:
+ *	NPI_FAILURE
+ *		VIR_SR_RESET
+ *		VIR_SR_BUSY
+ *		VIR_SR_INVALID
+ *		VIR_TAS_BUSY
+ */
+npi_status_t npi_dev_func_sr_lock_enter(npi_handle_t);
+
+/*
+ * npi_dev_func_sr_lock_free():
+ *	This function is called to free the function shared register
+ *	by setting the lock state to free.
+ * Parameters:
+ *	handle		- NPI handle
+ * Return:
+ *	NPI_SUCCESS	- If the function id can free the lock.
+ *
+ *	Error:
+ *	NPI_FAILURE
+ *		VIR_SR_NOTOWNER
+ *		VIR_TAS_NOTREAD
+ */
+npi_status_t npi_dev_func_sr_lock_free(npi_handle_t);
+
+/*
+ * npi_dev_func_sr_funcid_get():
+ *	This function is called to get the caller's function ID.
+ *	(based on address bits [25:26] on read access.
+ *	(After read, the TAS bit is always set to 1. Software needs
+ *	to write 0 to clear.) This function will write 0 to clear
+ *	the TAS bit if we own it.
+ * Parameters:
+ *	handle		- NPI handle
+ *	funcid_p	- pointer to store the function id.
+ * Return:
+ *	NPI_SUCCESS	- If get function id is complete successfully.
+ *
+ *	Error:
+ */
+npi_status_t npi_dev_func_sr_funcid_get(npi_handle_t, uint8_t *);
+
+/*
+ * npi_dev_func_sr_sr_raw_get():
+ *	This function is called to get the shared register value.
+ *	(After read, the TAS bit is always set to 1. Software needs
+ *	to write 0 to clear if we own it.)
+ *
+ * Parameters:
+ *	handle		- NPI handle
+ *	sr_p		- pointer to store the shared value of this register.
+ *
+ * Return:
+ *	NPI_SUCCESS		- If shared value get is complete successfully.
+ *
+ *	Error:
+ */
+npi_status_t npi_dev_func_sr_sr_raw_get(npi_handle_t, uint16_t *);
+
+/*
+ * npi_dev_func_sr_sr_get():
+ *	This function is called to get the shared register value.
+ *	(After read, the TAS bit is always set to 1. Software needs
+ *	to write 0 to clear if we own it.)
+ *
+ * Parameters:
+ *	handle		- NPI handle
+ *	sr_p		- pointer to store the shared value of this register.
+ *		    . this will get only non-lock, non-function id portion
+ *              . of the register
+ *
+ *
+ * Return:
+ *	NPI_SUCCESS		- If shared value get is complete successfully.
+ *
+ *	Error:
+ */
+
+npi_status_t npi_dev_func_sr_sr_get(npi_handle_t, uint16_t *);
+
+/*
+ * npi_dev_func_sr_sr_get_set_clear():
+ *	This function is called to set the shared register value.
+ *	(Shared register must be read first. If tas bit is 0, then
+ *	it implies that the software can proceed to set). After
+ *	setting, tas bit will be cleared.
+ * Parameters:
+ *	handle		- NPI handle
+ *	impl_sr		- shared value to set (only the 8 bit
+ *			  implementation specific state info).
+ *
+ * Return:
+ *	NPI_SUCCESS		- If shared value is set successfully.
+ *
+ *	Error:
+ *	NPI_FAILURE
+ *		VIR_TAS_BUSY
+ */
+npi_status_t npi_dev_func_sr_sr_get_set_clear(npi_handle_t,
+					    uint16_t);
+
+/*
+ * npi_dev_func_sr_sr_set_only():
+ *	This function is called to only set the shared register value.
+ * Parameters:
+ *	handle		- NPI handle
+ *	impl_sr		- shared value to set.
+ *
+ * Return:
+ *	NPI_SUCCESS		- If shared value is set successfully.
+ *
+ *	Error:
+ *	NPI_FAILURE
+ *		VIR_TAS_BUSY
+ */
+npi_status_t npi_dev_func_sr_sr_set_only(npi_handle_t, uint16_t);
+
+/*
+ * npi_dev_func_sr_busy():
+ *	This function is called to see if we can own the device.
+ *	It will not reset the tas bit.
+ * Parameters:
+ *	handle		- NPI handle
+ *	busy_p		- pointer to store busy flag.
+ *				(B_TRUE: device is in use, B_FALSE: free).
+ * Return:
+ *	NPI_SUCCESS		- If tas bit is read successfully.
+ *	Error:
+ */
+npi_status_t npi_dev_func_sr_busy(npi_handle_t, boolean_t *);
+
+/*
+ * npi_dev_func_sr_tas_get():
+ *	This function is called to get the tas bit
+ *	(after read, this bit is always set to 1, software write 0
+ *	 to clear it).
+ *
+ * Parameters:
+ *	handle		- NPI handle
+ *	tas_p		- pointer to store the tas value
+ *
+ * Return:
+ *	NPI_SUCCESS		- If tas value get is complete successfully.
+ *	Error:
+ */
+npi_status_t npi_dev_func_sr_tas_get(npi_handle_t, uint8_t *);
+
+/*
+ * npi_fzc_mpc_set():
+ *	This function is called to enable the write access
+ *	to FZC region to function zero.
+ * Parameters:
+ *	handle		- NPI handle
+ * Return:
+ *	NPI_SUCCESS	-
+ *	Error:
+ */
+npi_status_t npi_fzc_mpc_set(npi_handle_t, boolean_t);
+
+/*
+ * npi_fzc_mpc_get():
+ *	This function is called to get the access mode.
+ * Parameters:
+ *	handle		- NPI handle
+ * Return:
+ *	NPI_SUCCESS	-
+ *
+ */
+npi_status_t npi_fzc_mpc_get(npi_handle_t, boolean_t *);
+
+/*
+ * npi_fzc_dma_bind_set():
+ *	This function is called to set DMA binding register.
+ * Parameters:
+ *	handle		- NPI handle
+ *	dma_bind	- NPI defined data structure that
+ *			  contains the tx/rx channel binding info.
+ *			  to set.
+ * Return:
+ *	NPI_SUCCESS	-
+ *	Error:
+ *	NPI_FAILURE
+ *
+ */
+npi_status_t npi_fzc_dma_bind_set(npi_handle_t, fzc_dma_bind_t);
+
+/*
+ * npi_fzc_ldg_num_set():
+ *	This function is called to set up a logical group number that
+ *	a logical device belongs to.
+ * Parameters:
+ *	handle		- NPI handle
+ *	ld		- logical device number (0 - 68)
+ *	ldg		- logical device group number (0 - 63)
+ * Return:
+ *	NPI_SUCCESS	-
+ *	Error:
+ *	NPI_FAILURE
+ *
+ */
+npi_status_t npi_fzc_ldg_num_set(npi_handle_t, uint8_t, uint8_t);
+
+/*
+ * npi_fzc_ldg_num_get():
+ *	This function is called to get the logical device group that
+ *	a logical device belongs to.
+ * Parameters:
+ *	handle		- NPI handle
+ *	ld		- logical device number (0 - 68)
+ *	*ldg_p		- pointer to store its group number.
+ * Return:
+ *	NPI_SUCCESS	-
+ *	Error:
+ *	NPI_FAILURE
+ */
+npi_status_t npi_fzc_ldg_num_get(npi_handle_t, uint8_t,
+		uint8_t *);
+
+npi_status_t npi_ldsv_ldfs_get(npi_handle_t, uint8_t,
+		uint64_t *, uint64_t *, uint64_t *);
+/*
+ * npi_ldsv_get():
+ *	This function is called to get device state vectors.
+ * Parameters:
+ *	handle		- NPI handle
+ *	ldg		- logical device group (0 - 63)
+ *	ldf_type	- either LDF0 (0) or LDF1 (1)
+ *	vector		- vector type (0, 1 or 2)
+ *	*ldf_p		- pointer to store its flag bits.
+ * Return:
+ *	NPI_SUCCESS	-
+ *	Error:
+ *	NPI_FAILURE
+ */
+npi_status_t npi_ldsv_get(npi_handle_t, uint8_t, ldsv_type_t,
+		uint64_t *);
+
+/*
+ * npi_ldsv_ld_get():
+ *	This function is called to get the flag bit value of a device.
+ * Parameters:
+ *	handle		- NPI handle
+ *	ldg		- logical device group (0 - 63)
+ *	ld		- logical device (0 - 68)
+ *	ldf_type	- either LDF0 (0) or LDF1 (1)
+ *	vector		- vector type (0, 1 or 2)
+ *	*ldf_p		- pointer to store its flag bits.
+ * Return:
+ *	NPI_SUCCESS	-
+ *	Error:
+ *	NPI_FAILURE
+ */
+npi_status_t npi_ldsv_ld_get(npi_handle_t, uint8_t, uint8_t,
+		ldsv_type_t, ldf_type_t, boolean_t *);
+/*
+ * npi_ldsv_ld_ldf0_get():
+ *	This function is called to get the ldf0 bit value of a device.
+ * Parameters:
+ *	handle		- NPI handle
+ *	ldg		- logical device group (0 - 63)
+ *	ld		- logical device (0 - 68)
+ *	*ldf_p		- pointer to store its flag bits.
+ * Return:
+ *	NPI_SUCCESS	-
+ *	Error:
+ *	NPI_FAILURE
+ */
+npi_status_t npi_ldsv_ld_ldf0_get(npi_handle_t, uint8_t, uint8_t,
+		boolean_t *);
+
+/*
+ * npi_ldsv_ld_ldf1_get():
+ *	This function is called to get the ldf1 bit value of a device.
+ * Parameters:
+ *	handle		- NPI handle
+ *	ldg		- logical device group (0 - 63)
+ *	ld		- logical device (0 - 68)
+ *	*ldf_p		- pointer to store its flag bits.
+ * Return:
+ *	NPI_SUCCESS	-
+ *	Error:
+ *	NPI_FAILURE
+ */
+npi_status_t npi_ldsv_ld_ldf1_get(npi_handle_t, uint8_t, uint8_t,
+		boolean_t *);
+/*
+ * npi_intr_mask_set():
+ *	This function is called to select the mask bits for both ldf0 and ldf1.
+ * Parameters:
+ *	handle		- NPI handle
+ *	ld		- logical device (0 - 68)
+ *	ldf_mask	- mask value to set (both ldf0 and ldf1).
+ * Return:
+ *	NPI_SUCCESS	-
+ *	Error:
+ *	NPI_FAILURE
+ */
+npi_status_t npi_intr_mask_set(npi_handle_t, uint8_t,
+			uint8_t);
+
+/*
+ * npi_intr_mask_get():
+ *	This function is called to get the mask bits.
+ * Parameters:
+ *	handle		- NPI handle
+ *	ld		- logical device (0 - 68)
+ *	ldf_mask	- pointer to store mask bits info.
+ * Return:
+ *	NPI_SUCCESS	-
+ *	Error:
+ *	NPI_FAILURE
+ */
+npi_status_t npi_intr_mask_get(npi_handle_t, uint8_t,
+			uint8_t *);
+
+/*
+ * npi_intr_ldg_mgmt_set():
+ *	This function is called to set interrupt timer and arm bit.
+ * Parameters:
+ *	handle		- NPI handle
+ *	ldg		- logical device group (0 - 63)
+ *	arm		- B_TRUE (arm) B_FALSE (disable)
+ * Return:
+ *	NPI_SUCCESS	-
+ *	Error:
+ *	NPI_FAILURE
+ */
+npi_status_t npi_intr_ldg_mgmt_set(npi_handle_t, uint8_t,
+			boolean_t, uint8_t);
+
+
+/*
+ * npi_intr_ldg_mgmt_timer_get():
+ *	This function is called to get the timer counter
+ * Parameters:
+ *	handle		- NPI handle
+ *	ldg		- logical device group (0 - 63)
+ *	timer_p		- pointer to store the timer counter.
+ * Return:
+ *	NPI_SUCCESS	-
+ *	Error:
+ *	NPI_FAILURE
+ */
+npi_status_t npi_intr_ldg_mgmt_timer_get(npi_handle_t, uint8_t,
+		uint8_t *);
+
+/*
+ * npi_intr_ldg_mgmt_arm():
+ *	This function is called to arm the group.
+ * Parameters:
+ *	handle		- NPI handle
+ *	ldg		- logical device group (0 - 63)
+ * Return:
+ *	NPI_SUCCESS	-
+ *	Error:
+ *	NPI_FAILURE
+ */
+npi_status_t npi_intr_ldg_mgmt_arm(npi_handle_t, uint8_t);
+
+/*
+ * npi_fzc_ldg_timer_res_set():
+ *	This function is called to set the timer resolution.
+ * Parameters:
+ *	handle		- NPI handle
+ *	res		- timer resolution (# of system clocks)
+ * Return:
+ *	NPI_SUCCESS	-
+ *	Error:
+ *	NPI_FAILURE
+ */
+npi_status_t npi_fzc_ldg_timer_res_set(npi_handle_t, uint32_t);
+
+/*
+ * npi_fzc_ldg_timer_res_get():
+ *	This function is called to get the timer resolution.
+ * Parameters:
+ *	handle		- NPI handle
+ *	res_p		- pointer to store the timer resolution.
+ * Return:
+ *	NPI_SUCCESS	-
+ *	Error:
+ *	NPI_FAILURE
+ */
+npi_status_t npi_fzc_ldg_timer_res_get(npi_handle_t, uint8_t *);
+
+/*
+ * npi_fzc_sid_set():
+ *	This function is called to set the system interrupt data.
+ * Parameters:
+ *	handle		- NPI handle
+ *	ldg		- logical group (0 - 63)
+ *	sid		- NPI defined data to set
+ * Return:
+ *	NPI_SUCCESS	-
+ *	Error:
+ *	NPI_FAILURE
+ */
+npi_status_t npi_fzc_sid_set(npi_handle_t, fzc_sid_t);
+
+/*
+ * npi_fzc_sid_get():
+ *	This function is called to get the system interrupt data.
+ * Parameters:
+ *	handle		- NPI handle
+ *	ldg		- logical group (0 - 63)
+ *	sid_p		- NPI defined data to get
+ * Return:
+ *	NPI_SUCCESS	-
+ *	Error:
+ *	NPI_FAILURE
+ */
+npi_status_t npi_fzc_sid_get(npi_handle_t, p_fzc_sid_t);
+npi_status_t npi_fzc_sys_err_mask_set(npi_handle_t, uint64_t);
+npi_status_t npi_fzc_sys_err_stat_get(npi_handle_t,
+						p_sys_err_stat_t);
+npi_status_t npi_vir_dump_pio_fzc_regs_one(npi_handle_t);
+npi_status_t npi_vir_dump_ldgnum(npi_handle_t);
+npi_status_t npi_vir_dump_ldsv(npi_handle_t);
+npi_status_t npi_vir_dump_imask0(npi_handle_t);
+npi_status_t npi_vir_dump_sid(npi_handle_t);
+#ifdef	__cplusplus
+}
+#endif
+
+#endif	/* _NPI_VIR_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/uts/common/io/nxge/npi/npi_zcp.c	Mon Mar 19 19:37:22 2007 -0700
@@ -0,0 +1,757 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident	"%Z%%M%	%I%	%E% SMI"
+
+#include <npi_zcp.h>
+
+static int zcp_mem_read(npi_handle_t, uint16_t, uint8_t,
+	uint16_t, zcp_ram_unit_t *);
+static int zcp_mem_write(npi_handle_t, uint16_t, uint8_t,
+	uint32_t, uint16_t, zcp_ram_unit_t *);
+
+npi_status_t
+npi_zcp_config(npi_handle_t handle, config_op_t op, zcp_config_t config)
+{
+	uint64_t val = 0;
+
+	switch (op) {
+	case ENABLE:
+	case DISABLE:
+		if ((config == 0) || (config & ~CFG_ZCP_ALL) != 0) {
+			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+					    " npi_zcp_config"
+					    " Invalid Input: config <0x%x>",
+					    config));
+			return (NPI_FAILURE | NPI_ZCP_CONFIG_INVALID);
+		}
+
+		NXGE_REG_RD64(handle, ZCP_CONFIG_REG, &val);
+		if (op == ENABLE) {
+			if (config & CFG_ZCP)
+				val |= ZC_ENABLE;
+			if (config & CFG_ZCP_ECC_CHK)
+				val &= ~ECC_CHK_DIS;
+			if (config & CFG_ZCP_PAR_CHK)
+				val &= ~PAR_CHK_DIS;
+			if (config & CFG_ZCP_BUF_RESP)
+				val &= ~DIS_BUFF_RN;
+			if (config & CFG_ZCP_BUF_REQ)
+				val &= ~DIS_BUFF_RQ_IF;
+		} else {
+			if (config & CFG_ZCP)
+				val &= ~ZC_ENABLE;
+			if (config & CFG_ZCP_ECC_CHK)
+				val |= ECC_CHK_DIS;
+			if (config & CFG_ZCP_PAR_CHK)
+				val |= PAR_CHK_DIS;
+			if (config & CFG_ZCP_BUF_RESP)
+				val |= DIS_BUFF_RN;
+			if (config & CFG_ZCP_BUF_REQ)
+				val |= DIS_BUFF_RQ_IF;
+		}
+		NXGE_REG_WR64(handle, ZCP_CONFIG_REG, val);
+
+		break;
+	case INIT:
+		NXGE_REG_RD64(handle, ZCP_CONFIG_REG, &val);
+		val &= ((ZCP_DEBUG_SEL_MASK) | (RDMA_TH_MASK));
+		if (config & CFG_ZCP)
+			val |= ZC_ENABLE;
+		else
+			val &= ~ZC_ENABLE;
+		if (config & CFG_ZCP_ECC_CHK)
+			val &= ~ECC_CHK_DIS;
+		else
+			val |= ECC_CHK_DIS;
+		if (config & CFG_ZCP_PAR_CHK)
+			val &= ~PAR_CHK_DIS;
+		else
+			val |= PAR_CHK_DIS;
+		if (config & CFG_ZCP_BUF_RESP)
+			val &= ~DIS_BUFF_RN;
+		else
+			val |= DIS_BUFF_RN;
+		if (config & CFG_ZCP_BUF_REQ)
+			val &= DIS_BUFF_RQ_IF;
+		else
+			val |= DIS_BUFF_RQ_IF;
+		NXGE_REG_WR64(handle, ZCP_CONFIG_REG, val);
+
+		break;
+	default:
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+					    " npi_zcp_config"
+					    " Invalid Input: config <0x%x>",
+					    config));
+		return (NPI_FAILURE | NPI_ZCP_OPCODE_INVALID);
+	}
+
+	return (NPI_SUCCESS);
+}
+
+npi_status_t
+npi_zcp_iconfig(npi_handle_t handle, config_op_t op, zcp_iconfig_t iconfig)
+{
+	uint64_t val = 0;
+
+	switch (op) {
+	case ENABLE:
+	case DISABLE:
+		if ((iconfig == 0) || (iconfig & ~ICFG_ZCP_ALL) != 0) {
+			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+					    " npi_zcp_iconfig"
+					    " Invalid Input: iconfig <0x%x>",
+					    iconfig));
+			return (NPI_FAILURE | NPI_ZCP_CONFIG_INVALID);
+		}
+
+		NXGE_REG_RD64(handle, ZCP_INT_MASK_REG, &val);
+		if (op == ENABLE)
+			val |= iconfig;
+		else
+			val &= ~iconfig;
+		NXGE_REG_WR64(handle, ZCP_INT_MASK_REG, val);
+
+		break;
+
+	case INIT:
+		if ((iconfig & ~ICFG_ZCP_ALL) != 0) {
+			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+					    " npi_zcp_iconfig"
+					    " Invalid Input: iconfig <0x%x>",
+					    iconfig));
+			return (NPI_FAILURE | NPI_ZCP_CONFIG_INVALID);
+		}
+		val = (uint64_t)iconfig;
+		NXGE_REG_WR64(handle, ZCP_INT_MASK_REG, val);
+
+		break;
+	default:
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				    " npi_zcp_iconfig"
+				    " Invalid Input: iconfig <0x%x>",
+				    iconfig));
+		return (NPI_FAILURE | NPI_ZCP_OPCODE_INVALID);
+	}
+
+	return (NPI_SUCCESS);
+}
+
+npi_status_t
+npi_zcp_get_istatus(npi_handle_t handle, zcp_iconfig_t *istatus)
+{
+	uint64_t val;
+
+	NXGE_REG_RD64(handle, ZCP_INT_STAT_REG, &val);
+	*istatus = (uint32_t)val;
+
+	return (NPI_SUCCESS);
+}
+
+npi_status_t
+npi_zcp_clear_istatus(npi_handle_t handle)
+{
+	uint64_t val;
+
+	val = (uint64_t)0xffff;
+	NXGE_REG_WR64(handle, ZCP_INT_STAT_REG, val);
+	return (NPI_SUCCESS);
+}
+
+
+npi_status_t
+npi_zcp_set_dma_thresh(npi_handle_t handle, uint16_t dma_thres)
+{
+	uint64_t val = 0;
+
+	if ((dma_thres & ~RDMA_TH_BITS) != 0) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				    " npi_zcp_set_dma_thresh"
+				    " Invalid Input: dma_thres <0x%x>",
+				    dma_thres));
+		return (NPI_FAILURE | NPI_ZCP_DMA_THRES_INVALID);
+	}
+
+	NXGE_REG_RD64(handle, ZCP_CONFIG_REG, &val);
+
+	val &= ~RDMA_TH_MASK;
+	val |= (dma_thres << RDMA_TH_SHIFT);
+
+	NXGE_REG_WR64(handle, ZCP_CONFIG_REG, val);
+
+	return (NPI_SUCCESS);
+}
+
+npi_status_t
+npi_zcp_set_bam_region(npi_handle_t handle, zcp_buf_region_t region,
+			zcp_bam_region_reg_t *region_attr)
+{
+
+	ASSERT(IS_VALID_BAM_REGION(region));
+	if (!IS_VALID_BAM_REGION(region)) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				    " npi_zcp_set_bam_region"
+				    " Invalid Input: region <0x%x>",
+				    region));
+		return (NPI_FAILURE | ZCP_BAM_REGION_INVALID);
+	}
+
+	switch (region) {
+	case BAM_4BUF:
+		NXGE_REG_WR64(handle, ZCP_BAM4_RE_CTL_REG, region_attr->value);
+		break;
+	case BAM_8BUF:
+		NXGE_REG_WR64(handle, ZCP_BAM8_RE_CTL_REG, region_attr->value);
+		break;
+	case BAM_16BUF:
+		NXGE_REG_WR64(handle, ZCP_BAM16_RE_CTL_REG, region_attr->value);
+		break;
+	case BAM_32BUF:
+		NXGE_REG_WR64(handle, ZCP_BAM32_RE_CTL_REG, region_attr->value);
+		break;
+	}
+
+	return (NPI_SUCCESS);
+}
+
+npi_status_t
+npi_zcp_set_dst_region(npi_handle_t handle, zcp_buf_region_t region,
+				uint16_t row_idx)
+{
+	uint64_t val = 0;
+
+	ASSERT(IS_VALID_BAM_REGION(region));
+	if (!IS_VALID_BAM_REGION(region)) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				    " npi_zcp_set_dst_region"
+				    " Invalid Input: region <0x%x>",
+				    region));
+		return (NPI_FAILURE | NPI_ZCP_BAM_REGION_INVALID);
+	}
+
+	if ((row_idx & ~0x3FF) != 0) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				    " npi_zcp_set_dst_region"
+				    " Invalid Input: row_idx", row_idx));
+		return (NPI_FAILURE | NPI_ZCP_ROW_INDEX_INVALID);
+	}
+
+	val = (uint64_t)row_idx;
+
+	switch (region) {
+	case BAM_4BUF:
+		NXGE_REG_WR64(handle, ZCP_DST4_RE_CTL_REG, val);
+		break;
+	case BAM_8BUF:
+		NXGE_REG_WR64(handle, ZCP_DST8_RE_CTL_REG, val);
+		break;
+	case BAM_16BUF:
+		NXGE_REG_WR64(handle, ZCP_DST16_RE_CTL_REG, val);
+		break;
+	case BAM_32BUF:
+		NXGE_REG_WR64(handle, ZCP_DST32_RE_CTL_REG, val);
+		break;
+	}
+
+	return (NPI_SUCCESS);
+}
+
+npi_status_t
+npi_zcp_tt_static_entry(npi_handle_t handle, io_op_t op, uint16_t flow_id,
+			tte_sflow_attr_mask_t mask, tte_sflow_attr_t *sflow)
+{
+	uint32_t		byte_en = 0;
+	tte_sflow_attr_t	val;
+
+	if ((op != OP_SET) && (op != OP_GET)) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				    " npi_zcp_tt_static_entry"
+				    " Invalid Input: op <0x%x>",
+				    op));
+		return (NPI_FAILURE | NPI_ZCP_OPCODE_INVALID);
+	}
+
+	if ((mask & TTE_SFLOW_ATTR_ALL) == 0) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				    " npi_zcp_tt_static_entry"
+				    " Invalid Input: mask <0x%x>",
+				    mask));
+		return (NPI_FAILURE | NPI_ZCP_SFLOW_ATTR_INVALID);
+	}
+
+	if ((flow_id & ~0x0FFF) != 0) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				    " npi_zcp_tt_static_entry"
+				    " Invalid Input: flow_id<0x%x>",
+				    flow_id));
+		return (NPI_FAILURE | NPI_ZCP_FLOW_ID_INVALID);
+	}
+
+	if (zcp_mem_read(handle, flow_id, ZCP_RAM_SEL_TT_STATIC, NULL,
+			(zcp_ram_unit_t *)&val) != 0) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				    " npi_zcp_tt_static_entry"
+				    " HW Error: ZCP_RAM_ACC <0x%x>",
+				    NULL));
+		return (NPI_FAILURE | NPI_ZCP_MEM_READ_FAILED);
+	}
+
+	if (op == OP_SET) {
+		if (mask & TTE_RDC_TBL_OFF) {
+			val.qw0.bits.ldw.rdc_tbl_offset =
+					sflow->qw0.bits.ldw.rdc_tbl_offset;
+			byte_en |= TTE_RDC_TBL_SFLOW_BITS_EN;
+		}
+		if (mask & TTE_BUF_SIZE) {
+			val.qw0.bits.ldw.buf_size =
+					sflow->qw0.bits.ldw.buf_size;
+			byte_en |= TTE_BUF_SIZE_BITS_EN;
+		}
+		if (mask & TTE_NUM_BUF) {
+			val.qw0.bits.ldw.num_buf = sflow->qw0.bits.ldw.num_buf;
+			byte_en |= TTE_NUM_BUF_BITS_EN;
+		}
+		if (mask & TTE_ULP_END) {
+			val.qw0.bits.ldw.ulp_end = sflow->qw0.bits.ldw.ulp_end;
+			byte_en |=  TTE_ULP_END_BITS_EN;
+		}
+		if (mask & TTE_ULP_END) {
+			val.qw1.bits.ldw.ulp_end = sflow->qw1.bits.ldw.ulp_end;
+			byte_en |= TTE_ULP_END_BITS_EN;
+		}
+		if (mask & TTE_ULP_END_EN) {
+			val.qw1.bits.ldw.ulp_end_en =
+				sflow->qw1.bits.ldw.ulp_end_en;
+			byte_en |= TTE_ULP_END_EN_BITS_EN;
+		}
+		if (mask & TTE_UNMAP_ALL_EN) {
+			val.qw1.bits.ldw.unmap_all_en =
+					sflow->qw1.bits.ldw.unmap_all_en;
+			byte_en |= TTE_UNMAP_ALL_EN;
+		}
+		if (mask & TTE_TMODE) {
+			val.qw1.bits.ldw.tmode = sflow->qw1.bits.ldw.tmode;
+			byte_en |= TTE_TMODE_BITS_EN;
+		}
+		if (mask & TTE_SKIP) {
+			val.qw1.bits.ldw.skip = sflow->qw1.bits.ldw.skip;
+			byte_en |= TTE_SKIP_BITS_EN;
+		}
+		if (mask & TTE_HBM_RING_BASE_ADDR) {
+			val.qw1.bits.ldw.ring_base =
+					sflow->qw1.bits.ldw.ring_base;
+			byte_en |= TTE_RING_BASE_ADDR_BITS_EN;
+		}
+		if (mask & TTE_HBM_RING_BASE_ADDR) {
+			val.qw2.bits.ldw.ring_base =
+					sflow->qw2.bits.ldw.ring_base;
+			byte_en |= TTE_RING_BASE_ADDR_BITS_EN;
+		}
+		if (mask & TTE_HBM_RING_SIZE) {
+			val.qw2.bits.ldw.ring_size =
+					sflow->qw2.bits.ldw.ring_size;
+			byte_en |= TTE_RING_SIZE_BITS_EN;
+		}
+		if (mask & TTE_HBM_BUSY) {
+			val.qw2.bits.ldw.busy = sflow->qw2.bits.ldw.busy;
+			byte_en |= TTE_BUSY_BITS_EN;
+		}
+		if (mask & TTE_HBM_TOQ) {
+			val.qw3.bits.ldw.toq = sflow->qw3.bits.ldw.toq;
+			byte_en |= TTE_TOQ_BITS_EN;
+		}
+
+		if (zcp_mem_write(handle, flow_id, ZCP_RAM_SEL_TT_STATIC,
+					byte_en, NULL,
+					(zcp_ram_unit_t *)&val) != 0) {
+			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+					    " npi_zcp_tt_static_entry"
+					    " HW Error: ZCP_RAM_ACC <0x%x>",
+					    NULL));
+			return (NPI_FAILURE | NPI_ZCP_MEM_WRITE_FAILED);
+		}
+	} else {
+		sflow->qw0.value = val.qw0.value;
+		sflow->qw1.value = val.qw1.value;
+		sflow->qw2.value = val.qw2.value;
+		sflow->qw3.value = val.qw3.value;
+		sflow->qw4.value = val.qw4.value;
+	}
+
+	return (NPI_SUCCESS);
+}
+
+npi_status_t
+npi_zcp_tt_dynamic_entry(npi_handle_t handle, io_op_t op, uint16_t flow_id,
+			tte_dflow_attr_mask_t mask, tte_dflow_attr_t *dflow)
+{
+	uint32_t		byte_en = 0;
+	tte_dflow_attr_t	val;
+
+	if ((op != OP_SET) && (op != OP_GET)) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				    " npi_zcp_tt_dynamic_entry"
+				    " Invalid Input: op <0x%x>", op));
+		return (NPI_FAILURE | NPI_ZCP_OPCODE_INVALID);
+	}
+
+	if ((mask & TTE_DFLOW_ATTR_ALL) == 0) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				    " npi_zcp_tt_dynamic_entry"
+				    " Invalid Input: mask <0x%x>",
+				    mask));
+		return (NPI_FAILURE | NPI_ZCP_DFLOW_ATTR_INVALID);
+	}
+
+	if ((flow_id & ~0x0FFF) != 0) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				    " npi_zcp_tt_dynamic_entry"
+				    " Invalid Input: flow_id <0x%x>",
+				    flow_id));
+		return (NPI_FAILURE | NPI_ZCP_FLOW_ID_INVALID);
+	}
+
+	if (zcp_mem_read(handle, flow_id, ZCP_RAM_SEL_TT_DYNAMIC, NULL,
+			(zcp_ram_unit_t *)&val) != 0) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				    " npi_zcp_tt_dynamic_entry"
+				    " HW Error: ZCP_RAM_ACC <0x%x>",
+				    NULL));
+		return (NPI_FAILURE | NPI_ZCP_MEM_READ_FAILED);
+	}
+
+	if (op == OP_SET) {
+
+		/* Get data read */
+		if (mask & TTE_MAPPED_IN) {
+			val.qw0.bits.ldw.mapped_in =
+					dflow->qw0.bits.ldw.mapped_in;
+			byte_en |= TTE_MAPPED_IN_BITS_EN;
+		}
+		if (mask & TTE_ANCHOR_SEQ) {
+			val.qw1.bits.ldw.anchor_seq =
+					dflow->qw1.bits.ldw.anchor_seq;
+			byte_en |= TTE_ANCHOR_SEQ_BITS_EN;
+		}
+		if (mask & TTE_ANCHOR_OFFSET) {
+			val.qw2.bits.ldw.anchor_offset =
+					dflow->qw2.bits.ldw.anchor_offset;
+			byte_en |= TTE_ANCHOR_OFFSET_BITS_EN;
+		}
+		if (mask & TTE_ANCHOR_BUFFER) {
+			val.qw2.bits.ldw.anchor_buf =
+					dflow->qw2.bits.ldw.anchor_buf;
+			byte_en |= TTE_ANCHOR_BUFFER_BITS_EN;
+		}
+		if (mask & TTE_ANCHOR_BUF_FLAG) {
+			val.qw2.bits.ldw.anchor_buf_flag =
+					dflow->qw2.bits.ldw.anchor_buf_flag;
+			byte_en |= TTE_ANCHOR_BUF_FLAG_BITS_EN;
+		}
+		if (mask & TTE_UNMAP_ON_LEFT) {
+			val.qw2.bits.ldw.unmap_on_left =
+					dflow->qw2.bits.ldw.unmap_on_left;
+			byte_en |= TTE_UNMAP_ON_LEFT_BITS_EN;
+		}
+		if (mask & TTE_ULP_END_REACHED) {
+			val.qw2.bits.ldw.ulp_end_reached =
+					dflow->qw2.bits.ldw.ulp_end_reached;
+			byte_en |= TTE_ULP_END_REACHED_BITS_EN;
+		}
+		if (mask & TTE_ERR_STAT) {
+			val.qw3.bits.ldw.err_stat =
+					dflow->qw3.bits.ldw.err_stat;
+			byte_en |= TTE_ERR_STAT_BITS_EN;
+		}
+		if (mask & TTE_HBM_WR_PTR) {
+			val.qw3.bits.ldw.wr_ptr = dflow->qw3.bits.ldw.wr_ptr;
+			byte_en |= TTE_WR_PTR_BITS_EN;
+		}
+		if (mask & TTE_HBM_HOQ) {
+			val.qw3.bits.ldw.hoq = dflow->qw3.bits.ldw.hoq;
+			byte_en |= TTE_HOQ_BITS_EN;
+		}
+		if (mask & TTE_HBM_PREFETCH_ON) {
+			val.qw3.bits.ldw.prefetch_on =
+					dflow->qw3.bits.ldw.prefetch_on;
+			byte_en |= TTE_PREFETCH_ON_BITS_EN;
+		}
+
+		if (zcp_mem_write(handle, flow_id, ZCP_RAM_SEL_TT_DYNAMIC,
+					byte_en, NULL,
+					(zcp_ram_unit_t *)&val) != 0) {
+			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+					    " npi_zcp_tt_dynamic_entry"
+					    " HW Error: ZCP_RAM_ACC <0x%x>",
+					    NULL));
+			return (NPI_FAILURE | NPI_ZCP_MEM_WRITE_FAILED);
+		}
+	} else {
+		dflow->qw0.value = val.qw0.value;
+		dflow->qw1.value = val.qw1.value;
+		dflow->qw2.value = val.qw2.value;
+		dflow->qw3.value = val.qw3.value;
+		dflow->qw4.value = val.qw4.value;
+	}
+
+	return (NPI_SUCCESS);
+}
+
+npi_status_t
+npi_zcp_tt_bam_entry(npi_handle_t handle, io_op_t op, uint16_t flow_id,
+			uint8_t bankn, uint8_t word_en, zcp_ram_unit_t *data)
+{
+	zcp_ram_unit_t val;
+
+	if ((op != OP_SET) && (op != OP_GET)) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				    " npi_zcp_tt_bam_entry"
+				    " Invalid Input: op <0x%x>", op));
+		return (NPI_FAILURE | NPI_ZCP_OPCODE_INVALID);
+	}
+
+	if ((flow_id & ~0x0FFF) != 0) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				    " npi_zcp_tt_dynamic_entry"
+				    " Invalid Input: flow_id <0x%x>",
+				    flow_id));
+		return (NPI_FAILURE | NPI_ZCP_FLOW_ID_INVALID);
+	}
+
+	if (bankn >= MAX_BAM_BANKS) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				    " npi_zcp_tt_bam_entry"
+				    " Invalid Input: bankn <0x%x>",
+				    bankn));
+		return (NPI_FAILURE | NPI_ZCP_BAM_BANK_INVALID);
+	}
+
+	if ((word_en & ~0xF) != 0) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				    " npi_zcp_tt_bam_entry"
+				    " Invalid Input: word_en <0x%x>",
+				    word_en));
+		return (NPI_FAILURE | NPI_ZCP_BAM_WORD_EN_INVALID);
+	}
+
+	if (zcp_mem_read(handle, flow_id, ZCP_RAM_SEL_BAM0 + bankn, NULL,
+				(zcp_ram_unit_t *)&val) != 0) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				    " npi_zcp_tt_bam_entry"
+				    " HW Error: ZCP_RAM_ACC <0x%x>",
+				    NULL));
+		return (NPI_FAILURE | NPI_ZCP_MEM_READ_FAILED);
+	}
+
+	if (op == OP_SET) {
+		if (zcp_mem_write(handle, flow_id, ZCP_RAM_SEL_BAM0 + bankn,
+					word_en, NULL,
+					(zcp_ram_unit_t *)&val) != 0) {
+			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+					    " npi_zcp_tt_bam_entry"
+					    " HW Error: ZCP_RAM_ACC <0x%x>",
+					    NULL));
+			return (NPI_FAILURE | NPI_ZCP_MEM_WRITE_FAILED);
+		}
+	} else {
+		data->w0 = val.w0;
+		data->w1 = val.w1;
+		data->w2 = val.w2;
+		data->w3 = val.w3;
+	}
+
+	return (NPI_SUCCESS);
+}
+
+npi_status_t
+npi_zcp_tt_cfifo_entry(npi_handle_t handle, io_op_t op, uint8_t portn,
+			uint16_t entryn, zcp_ram_unit_t *data)
+{
+	if ((op != OP_SET) && (op != OP_GET)) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				    " npi_zcp_tt_cfifo_entry"
+				    " Invalid Input: op <0x%x>", op));
+		return (NPI_FAILURE | NPI_ZCP_OPCODE_INVALID);
+	}
+
+	if (portn > 3) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				    " npi_zcp_tt_cfifo_entry"
+				    " Invalid Input: portn <%d>", portn));
+		return (NPI_FAILURE | NPI_ZCP_PORT_INVALID(portn));
+	}
+
+	if (op == OP_SET) {
+		if (zcp_mem_write(handle, NULL, ZCP_RAM_SEL_CFIFO0 + portn,
+					0x1ffff, entryn, data) != 0) {
+			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+					    " npi_zcp_tt_cfifo_entry"
+					    " HW Error: ZCP_RAM_ACC <0x%x>",
+					    NULL));
+			return (NPI_FAILURE | NPI_ZCP_MEM_WRITE_FAILED);
+		}
+	} else {
+		if (zcp_mem_read(handle, NULL, ZCP_RAM_SEL_CFIFO0 + portn,
+					entryn, data) != 0) {
+			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+					    " npi_zcp_tt_cfifo_entry"
+					    " HW Error: ZCP_RAM_ACC  <0x%x>",
+					NULL));
+			return (NPI_FAILURE | NPI_ZCP_MEM_READ_FAILED);
+		}
+	}
+
+	return (NPI_SUCCESS);
+}
+
+npi_status_t
+npi_zcp_rest_cfifo_port(npi_handle_t handle, uint8_t port)
+{
+	uint64_t offset = ZCP_RESET_CFIFO_REG;
+	zcp_reset_cfifo_t cfifo_reg;
+	NXGE_REG_RD64(handle, offset, &cfifo_reg.value);
+	cfifo_reg.value &= ZCP_RESET_CFIFO_MASK;
+
+	switch (port) {
+		case 0:
+			cfifo_reg.bits.ldw.reset_cfifo0 = 1;
+			NXGE_REG_WR64(handle, offset, cfifo_reg.value);
+			cfifo_reg.bits.ldw.reset_cfifo0 = 0;
+
+			break;
+		case 1:
+			cfifo_reg.bits.ldw.reset_cfifo1 = 1;
+			NXGE_REG_WR64(handle, offset, cfifo_reg.value);
+			cfifo_reg.bits.ldw.reset_cfifo1 = 0;
+			break;
+		case 2:
+			cfifo_reg.bits.ldw.reset_cfifo2 = 1;
+			NXGE_REG_WR64(handle, offset, cfifo_reg.value);
+			cfifo_reg.bits.ldw.reset_cfifo2 = 0;
+			break;
+		case 3:
+			cfifo_reg.bits.ldw.reset_cfifo3 = 1;
+			NXGE_REG_WR64(handle, offset, cfifo_reg.value);
+			cfifo_reg.bits.ldw.reset_cfifo3 = 0;
+			break;
+		default:
+			break;
+	}
+
+	NXGE_DELAY(ZCP_CFIFIO_RESET_WAIT);
+	NXGE_REG_WR64(handle, offset, cfifo_reg.value);
+
+	return (NPI_SUCCESS);
+}
+
+npi_status_t
+npi_zcp_rest_cfifo_all(npi_handle_t handle)
+{
+	uint64_t offset = ZCP_RESET_CFIFO_REG;
+	zcp_reset_cfifo_t cfifo_reg;
+
+	cfifo_reg.value = ZCP_RESET_CFIFO_MASK;
+	NXGE_REG_WR64(handle, offset, cfifo_reg.value);
+	cfifo_reg.value = 0;
+	NXGE_DELAY(ZCP_CFIFIO_RESET_WAIT);
+	NXGE_REG_WR64(handle, offset, cfifo_reg.value);
+	return (NPI_SUCCESS);
+}
+
+static int
+zcp_mem_read(npi_handle_t handle, uint16_t flow_id, uint8_t ram_sel,
+		uint16_t cfifo_entryn, zcp_ram_unit_t *val)
+{
+	zcp_ram_access_t ram_ctl;
+
+	ram_ctl.value = 0;
+	ram_ctl.bits.ldw.ram_sel = ram_sel;
+	ram_ctl.bits.ldw.zcfid = flow_id;
+	ram_ctl.bits.ldw.rdwr = ZCP_RAM_RD;
+	ram_ctl.bits.ldw.cfifo = cfifo_entryn;
+
+	/* Wait for RAM ready to be read */
+	ZCP_WAIT_RAM_READY(handle, ram_ctl.value);
+	if (ram_ctl.bits.ldw.busy != 0) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+				    " npi_zcp_tt_static_entry"
+				    " HW Error: ZCP_RAM_ACC <0x%x>",
+				    ram_ctl.value));
+		return (-1);
+	}
+
+	/* Read from RAM */
+	NXGE_REG_WR64(handle, ZCP_RAM_ACC_REG, ram_ctl.value);
+
+	/* Wait for RAM read done */
+	ZCP_WAIT_RAM_READY(handle, ram_ctl.value);
+	if (ram_ctl.bits.ldw.busy != 0)
+		return (-1);
+
+	/* Get data */
+	NXGE_REG_RD64(handle, ZCP_RAM_DATA0_REG, &val->w0);
+	NXGE_REG_RD64(handle, ZCP_RAM_DATA1_REG, &val->w1);
+	NXGE_REG_RD64(handle, ZCP_RAM_DATA2_REG, &val->w2);
+	NXGE_REG_RD64(handle, ZCP_RAM_DATA3_REG, &val->w3);
+	NXGE_REG_RD64(handle, ZCP_RAM_DATA4_REG, &val->w4);
+
+	return (0);
+}
+
+static int
+zcp_mem_write(npi_handle_t handle, uint16_t flow_id, uint8_t ram_sel,
+		uint32_t byte_en, uint16_t cfifo_entryn, zcp_ram_unit_t *val)
+{
+	zcp_ram_access_t	ram_ctl;
+	zcp_ram_benable_t	ram_en;
+
+	ram_ctl.value = 0;
+	ram_ctl.bits.ldw.ram_sel = ram_sel;
+	ram_ctl.bits.ldw.zcfid = flow_id;
+	ram_ctl.bits.ldw.rdwr = ZCP_RAM_WR;
+	ram_en.bits.ldw.be = byte_en;
+	ram_ctl.bits.ldw.cfifo = cfifo_entryn;
+
+	/* Setup data */
+	NXGE_REG_WR64(handle, ZCP_RAM_DATA0_REG, val->w0);
+	NXGE_REG_WR64(handle, ZCP_RAM_DATA1_REG, val->w1);
+	NXGE_REG_WR64(handle, ZCP_RAM_DATA2_REG, val->w2);
+	NXGE_REG_WR64(handle, ZCP_RAM_DATA3_REG, val->w3);
+	NXGE_REG_WR64(handle, ZCP_RAM_DATA4_REG, val->w4);
+
+	/* Set byte mask */
+	NXGE_REG_WR64(handle, ZCP_RAM_BE_REG, ram_en.value);
+
+	/* Write to RAM */
+	NXGE_REG_WR64(handle, ZCP_RAM_ACC_REG, ram_ctl.value);
+
+	/* Wait for RAM write complete */
+	ZCP_WAIT_RAM_READY(handle, ram_ctl.value);
+	if (ram_ctl.bits.ldw.busy != 0)
+		return (-1);
+
+	return (0);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/uts/common/io/nxge/npi/npi_zcp.h	Mon Mar 19 19:37:22 2007 -0700
@@ -0,0 +1,187 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _NPI_ZCP_H
+#define	_NPI_ZCP_H
+
+#pragma ident	"%Z%%M%	%I%	%E% SMI"
+
+#ifdef	__cplusplus
+extern "C" {
+#endif
+
+#include <npi.h>
+#include <nxge_zcp_hw.h>
+
+typedef	enum zcp_buf_region_e {
+	BAM_4BUF			= 1,
+	BAM_8BUF			= 2,
+	BAM_16BUF			= 3,
+	BAM_32BUF			= 4
+} zcp_buf_region_t;
+
+typedef enum zcp_config_e {
+	CFG_ZCP				= 0x01,
+	CFG_ZCP_ECC_CHK			= 0x02,
+	CFG_ZCP_PAR_CHK			= 0x04,
+	CFG_ZCP_BUF_RESP		= 0x08,
+	CFG_ZCP_BUF_REQ			= 0x10,
+	CFG_ZCP_ALL			= 0x1F
+} zcp_config_t;
+
+typedef enum zcp_iconfig_e {
+	ICFG_ZCP_RRFIFO_UNDERRUN	= RRFIFO_UNDERRUN,
+	ICFG_ZCP_RRFIFO_OVERRUN		= RRFIFO_OVERRUN,
+	ICFG_ZCP_RSPFIFO_UNCORR_ERR	= RSPFIFO_UNCORR_ERR,
+	ICFG_ZCP_BUFFER_OVERFLOW	= BUFFER_OVERFLOW,
+	ICFG_ZCP_STAT_TBL_PERR		= STAT_TBL_PERR,
+	ICFG_ZCP_DYN_TBL_PERR		= BUF_DYN_TBL_PERR,
+	ICFG_ZCP_BUF_TBL_PERR		= BUF_TBL_PERR,
+	ICFG_ZCP_TT_PROGRAM_ERR		= TT_PROGRAM_ERR,
+	ICFG_ZCP_RSP_TT_INDEX_ERR	= RSP_TT_INDEX_ERR,
+	ICFG_ZCP_SLV_TT_INDEX_ERR	= SLV_TT_INDEX_ERR,
+	ICFG_ZCP_TT_INDEX_ERR		= ZCP_TT_INDEX_ERR,
+	ICFG_ZCP_CFIFO_ECC3		= CFIFO_ECC3,
+	ICFG_ZCP_CFIFO_ECC2		= CFIFO_ECC2,
+	ICFG_ZCP_CFIFO_ECC1		= CFIFO_ECC1,
+	ICFG_ZCP_CFIFO_ECC0		= CFIFO_ECC0,
+	ICFG_ZCP_ALL			= (RRFIFO_UNDERRUN | RRFIFO_OVERRUN |
+				RSPFIFO_UNCORR_ERR | STAT_TBL_PERR |
+				BUF_DYN_TBL_PERR | BUF_TBL_PERR |
+				TT_PROGRAM_ERR | RSP_TT_INDEX_ERR |
+				SLV_TT_INDEX_ERR | ZCP_TT_INDEX_ERR |
+				CFIFO_ECC3 | CFIFO_ECC2 |  CFIFO_ECC1 |
+				CFIFO_ECC0 | BUFFER_OVERFLOW)
+} zcp_iconfig_t;
+
+typedef enum tte_sflow_attr_mask_e {
+	TTE_RDC_TBL_OFF			= 0x0001,
+	TTE_BUF_SIZE			= 0x0002,
+	TTE_NUM_BUF			= 0x0004,
+	TTE_ULP_END			= 0x0008,
+	TTE_ULP_END_EN			= 0x0010,
+	TTE_UNMAP_ALL_EN		= 0x0020,
+	TTE_TMODE			= 0x0040,
+	TTE_SKIP			= 0x0080,
+	TTE_HBM_RING_BASE_ADDR		= 0x0100,
+	TTE_HBM_RING_SIZE		= 0x0200,
+	TTE_HBM_BUSY			= 0x0400,
+	TTE_HBM_TOQ			= 0x0800,
+	TTE_SFLOW_ATTR_ALL		= 0x0FFF
+} tte_sflow_attr_mask_t;
+
+typedef	enum tte_dflow_attr_mask_e {
+	TTE_MAPPED_IN			= 0x0001,
+	TTE_ANCHOR_SEQ			= 0x0002,
+	TTE_ANCHOR_OFFSET		= 0x0004,
+	TTE_ANCHOR_BUFFER		= 0x0008,
+	TTE_ANCHOR_BUF_FLAG		= 0x0010,
+	TTE_UNMAP_ON_LEFT		= 0x0020,
+	TTE_ULP_END_REACHED		= 0x0040,
+	TTE_ERR_STAT			= 0x0080,
+	TTE_HBM_WR_PTR			= 0x0100,
+	TTE_HBM_HOQ			= 0x0200,
+	TTE_HBM_PREFETCH_ON		= 0x0400,
+	TTE_DFLOW_ATTR_ALL		= 0x07FF
+} tte_dflow_attr_mask_t;
+
+#define	IS_VALID_BAM_REGION(region)\
+		((region == BAM_4BUF) || (region == BAM_8BUF) ||\
+		(region == BAM_16BUF) || (region == BAM_32BUF))
+
+#define	ZCP_WAIT_RAM_READY(handle, val) {\
+	uint32_t cnt = MAX_PIO_RETRIES;\
+	do {\
+		NXGE_REG_RD64(handle, ZCP_RAM_ACC_REG, &val);\
+		cnt--;\
+	} while ((ram_ctl.bits.ldw.busy != 0) && (cnt > 0));\
+}
+
+#define	ZCP_DMA_THRES_INVALID		0x10
+#define	ZCP_BAM_REGION_INVALID		0x11
+#define	ZCP_ROW_INDEX_INVALID		0x12
+#define	ZCP_SFLOW_ATTR_INVALID		0x13
+#define	ZCP_DFLOW_ATTR_INVALID		0x14
+#define	ZCP_FLOW_ID_INVALID		0x15
+#define	ZCP_BAM_BANK_INVALID		0x16
+#define	ZCP_BAM_WORD_EN_INVALID		0x17
+
+#define	NPI_ZCP_OPCODE_INVALID		((ZCP_BLK_ID << 8) | OPCODE_INVALID)
+#define	NPI_ZCP_CONFIG_INVALID		((ZCP_BLK_ID << 8) | CONFIG_INVALID)
+#define	NPI_ZCP_DMA_THRES_INVALID	((ZCP_BLK_ID << 8) |\
+					ZCP_DMA_THRES_INVALID)
+#define	NPI_ZCP_BAM_REGION_INVALID	((ZCP_BLK_ID << 8) |\
+					ZCP_BAM_REGION_INVALID)
+#define	NPI_ZCP_ROW_INDEX_INVALID	((ZCP_BLK_ID << 8) |\
+					ZCP_ROW_INDEX_INVALID)
+#define	NPI_ZCP_SFLOW_ATTR_INVALID	((ZCP_BLK_ID << 8) |\
+					ZCP_SFLOW_ATTR_INVALID)
+#define	NPI_ZCP_DFLOW_ATTR_INVALID	((ZCP_BLK_ID << 8) |\
+					ZCP_DFLOW_ATTR_INVALID)
+#define	NPI_ZCP_FLOW_ID_INVALID		((ZCP_BLK_ID << 8) |\
+					ZCP_FLOW_ID_INVALID)
+#define	NPI_ZCP_MEM_WRITE_FAILED	((ZCP_BLK_ID << 8) | WRITE_FAILED)
+#define	NPI_ZCP_MEM_READ_FAILED		((ZCP_BLK_ID << 8) | READ_FAILED)
+#define	NPI_ZCP_BAM_BANK_INVALID	((ZCP_BLK_ID << 8) |\
+					(ZCP_BAM_BANK_INVALID))
+#define	NPI_ZCP_BAM_WORD_EN_INVALID	((ZCP_BLK_ID << 8) |\
+					(ZCP_BAM_WORD_EN_INVALID))
+#define	NPI_ZCP_PORT_INVALID(portn)	((ZCP_BLK_ID << 8) | PORT_INVALID |\
+					(portn << 12))
+
+/* ZCP HW NPI Prototypes */
+npi_status_t npi_zcp_config(npi_handle_t, config_op_t,
+				zcp_config_t);
+npi_status_t npi_zcp_iconfig(npi_handle_t, config_op_t,
+				zcp_iconfig_t);
+npi_status_t npi_zcp_get_istatus(npi_handle_t, zcp_iconfig_t *);
+npi_status_t npi_zcp_clear_istatus(npi_handle_t);
+npi_status_t npi_zcp_set_dma_thresh(npi_handle_t, uint16_t);
+npi_status_t npi_zcp_set_bam_region(npi_handle_t,
+				zcp_buf_region_t,
+				zcp_bam_region_reg_t *);
+npi_status_t npi_zcp_set_sdt_region(npi_handle_t,
+				zcp_buf_region_t, uint16_t);
+npi_status_t npi_zcp_tt_static_entry(npi_handle_t, io_op_t,
+				uint16_t, tte_sflow_attr_mask_t,
+				tte_sflow_attr_t *);
+npi_status_t npi_zcp_tt_dynamic_entry(npi_handle_t, io_op_t,
+				uint16_t, tte_dflow_attr_mask_t,
+				tte_dflow_attr_t *);
+npi_status_t npi_zcp_tt_bam_entry(npi_handle_t, io_op_t,
+				uint16_t, uint8_t,
+				uint8_t, zcp_ram_unit_t *);
+npi_status_t npi_zcp_tt_cfifo_entry(npi_handle_t, io_op_t,
+				uint8_t, uint16_t,
+				zcp_ram_unit_t *);
+
+npi_status_t npi_zcp_rest_cfifo_port(npi_handle_t, uint8_t);
+npi_status_t npi_zcp_rest_cfifo_all(npi_handle_t);
+
+#ifdef	__cplusplus
+}
+#endif
+
+#endif	/* _NPI_ZCP_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/uts/common/io/nxge/nxge.conf	Mon Mar 19 19:37:22 2007 -0700
@@ -0,0 +1,157 @@
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+#########################################################################
+#
+# Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
+# Use is subject to license terms.
+#
+#ident	"%Z%%M%	%I%	%E% SMI"
+#
+#
+# driver.conf file for Sun NIU 10Gb/1Gb Ethernet Driver (nxge)
+#
+#
+#---------------Link Configuration ----------------------
+#	The link parameters depend on the type of the card
+#	and the port.
+#	10 gigabit related parameters ( i.e adv_10gfdx_cap)
+#	apply only to 10gigabit ports.
+#	Half duplex is not supported on any NIU card.
+#
+# 	adv-autoneg-cap
+#		Advertise auto-negotiation capability.
+#		default is 1
+# adv-autoneg-cap = 1;
+#
+#	adv_10gfdx_cap
+#		Advertise 10gbps Full duplex  capability.
+#		default is 1
+# adv_10gfdx_cap = 1;
+#
+#	adv_1000fdx_cap
+#		Advertise 1gbps Full duplex  capability.
+#		default is 1
+# adv_1000fdx_cap = 1;
+#
+#	adv_100fdx_cap
+#		Advertise 100mbps Full duplex  capability.
+#		default is 1
+# adv_100fdx_cap = 1;
+#
+#	adv_10fdx_cap
+#		Advertise 10mbps Full duplex  capability.
+#		default is 1
+# adv_10fdx_cap = 1;
+#
+#	adv_asmpause_cap
+#		Advertise Asymmetric pause capability.
+#		default is 0
+# adv_asmpause_cap = 0;
+#
+#	adv_pause_cap
+#		Advertise pause capability.
+#		default is 1
+# adv_pause_cap = 1;
+#
+#
+#------- Jumbo frame support ---------------------------------
+# To enable jumbo support for all nxge interfaces,
+# accept_jumbo = 1;
+#
+# To disable jumbo support for all nxge interfaces,
+# accept_jumbo = 0;
+#
+# Default is 0.  See the example at the end of this file for 
+# enabling or disabling jumbo for a particular nxge interface.
+#
+#
+#------- Receive DMA Configuration ----------------------------
+#
+#  rxdma-intr-time
+#	Interrupts after this number of NIU hardware ticks have
+#	elapsed since the last packet was received.
+#	A value of zero means no time blanking (Default = 8).
+#
+# rxdma-intr-pkts
+#	Interrupt after this number of packets have arrived since
+#	the last packet was serviced. A value of zero indicates
+#	no packet blanking (Default = 20).
+#
+# Default Interrupt Blanking parameters.
+#
+# rxdma-intr-time = 8;
+# rxdma-intr-pkts = 20;
+#
+#
+#------- Classification and Load Distribution Configuration ------
+#
+# class-opt-****-***
+# 	These variables define how each IP class is configured.
+#	Configuration options range from whether TCAM lookup ie
+#	is enabled to flow hash generation.
+# 	This parameters also control how the flow template is
+#	 constructed and how packet is distributed within RDC
+#	groups.
+#
+#	supported classes:
+#	class-opt-ipv4-tcp class-opt-ipv4-udp class-opt-ipv4-sctp
+#	class-opt-ipv4-ah class-opt-ipv6-tcp class-opt-ipv6-udp
+#	class-opt-ipv6-sctp class-opt-ipv6-ah
+#	
+#	Configuration bits (Thes following bits will be decoded
+#	by the driver as hex format).
+#
+# 	0010:		use MAC Port (for flow key)
+#	0020:		use L2DA (for flow key)
+#	0040:		use VLAN (for flow key)
+#	0080:		use proto (for flow key)
+#	0100:		use IP src addr (for flow key)
+#	0200:		use IP dest addr (for flow key)
+#	0400:		use Src Port (for flow key)
+#	0800:		use Dest Port (for flow key)	
+#
+# class-opt-ipv4-tcp = fe0;
+#
+# ------- How to set parameters for a particular interface --------
+# The example below shows how to locate the device path and set a 
+# parameter for a particular nxge interface. (Using jumbo support as
+# an example)
+#
+# Use the following command to find out the device paths for nxge,
+#       more /etc/path_to_inst | grep nxge
+#
+# For example, if you see,
+#       "/pci@7c0/pci@0/pci@8/network@0" 0 "nxge"
+#       "/pci@7c0/pci@0/pci@8/network@0,1" 1 "nxge"
+#       "/pci@7c0/pci@0/pci@8/network@0,2" 2 "nxge"
+#       "/pci@7c0/pci@0/pci@8/network@0,3" 3 "nxge"
+#
+# then you can enable jumbo for ports 0 and 1 and disable jumbo for ports 2
+# and 3 as follows,
+#
+# name = "pciex108e,abcd" parent = "/pci@7c0/pci@0/pci@8" unit-address = "0"
+# accept_jumbo = 1;
+# name = "pciex108e,abcd" parent = "/pci@7c0/pci@0/pci@8" unit-address = "0,1"
+# accept_jumbo = 1;
+# name = "pciex108e,abcd" parent = "/pci@7c0/pci@0/pci@8" unit-address = "0,2"
+# accept_jumbo = 0;
+# name = "pciex108e,abcd" parent = "/pci@7c0/pci@0/pci@8" unit-address = "0,3"
+# accept_jumbo = 0;
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/uts/common/io/nxge/nxge_classify.c	Mon Mar 19 19:37:22 2007 -0700
@@ -0,0 +1,239 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident	"%Z%%M%	%I%	%E% SMI"
+
+#include <npi_fflp.h>
+#include <nxge_defs.h>
+#include <nxge_fflp.h>
+#include <nxge_flow.h>
+#include <nxge_impl.h>
+#include <nxge_common.h>
+
+/*
+ * Globals: tunable parameters (/etc/system or adb)
+ *
+ */
+int nxge_tcam_class_enable = 0;
+int nxge_tcam_lookup_enable = 0;
+int nxge_flow_dist_enable = NXGE_CLASS_FLOW_USE_DST_PORT |
+	NXGE_CLASS_FLOW_USE_SRC_PORT | NXGE_CLASS_FLOW_USE_IPDST |
+	NXGE_CLASS_FLOW_USE_IPSRC | NXGE_CLASS_FLOW_USE_PROTO |
+	NXGE_CLASS_FLOW_USE_PORTNUM;
+
+/*
+ * Bit mapped
+ * 0x80000000:      Drop
+ * 0x0000:      NO TCAM Lookup Needed
+ * 0x0001:      TCAM Lookup Needed with Dest Addr (IPv6)
+ * 0x0003:      TCAM Lookup Needed with SRC Addr (IPv6)
+ * 0x0010:      use MAC Port
+ * 0x0020:      use L2DA
+ * 0x0040:      use VLAN
+ * 0x0080:      use proto
+ * 0x0100:      use IP src addr
+ * 0x0200:      use IP dest addr
+ * 0x0400:      use Src Port
+ * 0x0800:      use Dest Port
+ * 0x0fff:      enable all options for IPv6 (with src addr)
+ * 0x0ffd:      enable all options for IPv6 (with dest addr)
+ * 0x0fff:      enable all options for IPv4
+ * 0x0ffd:      enable all options for IPv4
+ *
+ */
+
+/*
+ * the default is to distribute as function of:
+ * protocol
+ * ip src address
+ * ip dest address
+ * src port
+ * dest port
+ *
+ * 0x0f80
+ *
+ */
+
+int nxge_tcp4_class = NXGE_CLASS_FLOW_USE_DST_PORT |
+	NXGE_CLASS_FLOW_USE_SRC_PORT | NXGE_CLASS_FLOW_USE_IPDST |
+	NXGE_CLASS_FLOW_USE_IPSRC | NXGE_CLASS_FLOW_USE_PROTO |
+	NXGE_CLASS_FLOW_USE_PORTNUM;
+
+int nxge_udp4_class = NXGE_CLASS_FLOW_USE_DST_PORT |
+	NXGE_CLASS_FLOW_USE_SRC_PORT | NXGE_CLASS_FLOW_USE_IPDST |
+	NXGE_CLASS_FLOW_USE_IPSRC | NXGE_CLASS_FLOW_USE_PROTO |
+	NXGE_CLASS_FLOW_USE_PORTNUM;
+
+int nxge_ah4_class = NXGE_CLASS_FLOW_USE_DST_PORT |
+	NXGE_CLASS_FLOW_USE_SRC_PORT | NXGE_CLASS_FLOW_USE_IPDST |
+	NXGE_CLASS_FLOW_USE_IPSRC | NXGE_CLASS_FLOW_USE_PROTO |
+	NXGE_CLASS_FLOW_USE_PORTNUM;
+int nxge_sctp4_class = NXGE_CLASS_FLOW_USE_DST_PORT |
+	NXGE_CLASS_FLOW_USE_SRC_PORT | NXGE_CLASS_FLOW_USE_IPDST |
+	NXGE_CLASS_FLOW_USE_IPSRC | NXGE_CLASS_FLOW_USE_PROTO |
+	NXGE_CLASS_FLOW_USE_PORTNUM;
+
+int nxge_tcp6_class = NXGE_CLASS_FLOW_USE_DST_PORT |
+	NXGE_CLASS_FLOW_USE_SRC_PORT | NXGE_CLASS_FLOW_USE_IPDST |
+	NXGE_CLASS_FLOW_USE_IPSRC | NXGE_CLASS_FLOW_USE_PROTO |
+	NXGE_CLASS_FLOW_USE_PORTNUM;
+
+int nxge_udp6_class = NXGE_CLASS_FLOW_USE_DST_PORT |
+	NXGE_CLASS_FLOW_USE_SRC_PORT | NXGE_CLASS_FLOW_USE_IPDST |
+	NXGE_CLASS_FLOW_USE_IPSRC | NXGE_CLASS_FLOW_USE_PROTO |
+	NXGE_CLASS_FLOW_USE_PORTNUM;
+
+int nxge_ah6_class = NXGE_CLASS_FLOW_USE_DST_PORT |
+	NXGE_CLASS_FLOW_USE_SRC_PORT | NXGE_CLASS_FLOW_USE_IPDST |
+	NXGE_CLASS_FLOW_USE_IPSRC | NXGE_CLASS_FLOW_USE_PROTO |
+	NXGE_CLASS_FLOW_USE_PORTNUM;
+
+int nxge_sctp6_class = NXGE_CLASS_FLOW_USE_DST_PORT |
+	NXGE_CLASS_FLOW_USE_SRC_PORT | NXGE_CLASS_FLOW_USE_IPDST |
+	NXGE_CLASS_FLOW_USE_IPSRC | NXGE_CLASS_FLOW_USE_PROTO |
+	NXGE_CLASS_FLOW_USE_PORTNUM;
+
+uint32_t nxge_fflp_init_h1 = 0xffffffff;
+uint32_t nxge_fflp_init_h2 = 0xffff;
+
+uint64_t class_quick_config_distribute[NXGE_CLASS_CONFIG_PARAMS] = {
+	0xffffffffULL,		/* h1_init */
+	0xffffULL,		/* h2_init */
+	0x0,			/* cfg_ether_usr1 */
+	0x0,			/* cfg_ether_usr2 */
+	0x0,			/* cfg_ip_usr4 */
+	0x0,			/* cfg_ip_usr5 */
+	0x0,			/* cfg_ip_usr6 */
+	0x0,			/* cfg_ip_usr7 */
+	0x0,			/* opt_ip_usr4 */
+	0x0,			/* opt_ip_usr5 */
+	0x0,			/* opt_ip_usr6 */
+	0x0,			/* opt_ip_usr7 */
+	NXGE_CLASS_FLOW_GEN_SERVER,	/* opt_ipv4_tcp */
+	NXGE_CLASS_FLOW_GEN_SERVER,	/* opt_ipv4_udp */
+	NXGE_CLASS_FLOW_GEN_SERVER,	/* opt_ipv4_ah */
+	NXGE_CLASS_FLOW_GEN_SERVER,	/* opt_ipv4_sctp */
+	NXGE_CLASS_FLOW_GEN_SERVER,	/* opt_ipv6_tcp */
+	NXGE_CLASS_FLOW_GEN_SERVER,	/* opt_ipv6_udp */
+	NXGE_CLASS_FLOW_GEN_SERVER,	/* opt_ipv6_ah */
+	NXGE_CLASS_FLOW_GEN_SERVER	/* opt_ipv6_sctp */
+};
+
+uint64_t class_quick_config_web_server[NXGE_CLASS_CONFIG_PARAMS] = {
+	0xffffffffULL,		/* h1_init */
+	0xffffULL,		/* h2_init */
+	0x0,			/* cfg_ether_usr1 */
+	0x0,			/* cfg_ether_usr2 */
+	0x0,			/* cfg_ip_usr4 */
+	0x0,			/* cfg_ip_usr5 */
+	0x0,			/* cfg_ip_usr6 */
+	0x0,			/* cfg_ip_usr7 */
+	0x0,			/* opt_ip_usr4 */
+	0x0,			/* opt_ip_usr5 */
+	0x0,			/* opt_ip_usr6 */
+	0x0,			/* opt_ip_usr7 */
+	NXGE_CLASS_FLOW_WEB_SERVER,	/* opt_ipv4_tcp */
+	NXGE_CLASS_FLOW_GEN_SERVER,	/* opt_ipv4_udp */
+	NXGE_CLASS_FLOW_GEN_SERVER,	/* opt_ipv4_ah */
+	NXGE_CLASS_FLOW_GEN_SERVER,	/* opt_ipv4_sctp */
+	NXGE_CLASS_FLOW_GEN_SERVER,	/* opt_ipv6_tcp */
+	NXGE_CLASS_FLOW_GEN_SERVER,	/* opt_ipv6_udp */
+	NXGE_CLASS_FLOW_GEN_SERVER,	/* opt_ipv6_ah */
+	NXGE_CLASS_FLOW_GEN_SERVER	/* opt_ipv6_sctp */
+};
+
+nxge_status_t
+nxge_classify_init(p_nxge_t nxgep)
+{
+	nxge_status_t status = NXGE_OK;
+
+	status = nxge_classify_init_sw(nxgep);
+	if (status != NXGE_OK)
+		return (status);
+	status = nxge_set_hw_classify_config(nxgep);
+	if (status != NXGE_OK)
+		return (status);
+
+	status = nxge_classify_init_hw(nxgep);
+	if (status != NXGE_OK)
+		return (status);
+
+	return (NXGE_OK);
+}
+
+nxge_status_t
+nxge_classify_uninit(p_nxge_t nxgep)
+{
+	nxge_status_t status = NXGE_OK;
+
+	status = nxge_classify_exit_sw(nxgep);
+	if (status != NXGE_OK) {
+		return (status);
+	}
+	return (NXGE_OK);
+}
+
+/* ARGSUSED */
+uint64_t
+nxge_classify_get_cfg_value(p_nxge_t nxgep, uint8_t cfg_type, uint8_t cfg_param)
+{
+	uint64_t cfg_value;
+
+	if (cfg_param >= NXGE_CLASS_CONFIG_PARAMS)
+		return (-1);
+	switch (cfg_type) {
+	case CFG_L3_WEB:
+		cfg_value = class_quick_config_web_server[cfg_param];
+		break;
+	case CFG_L3_DISTRIBUTE:
+	default:
+		cfg_value = class_quick_config_distribute[cfg_param];
+		break;
+	}
+	return (cfg_value);
+}
+
+nxge_status_t
+nxge_set_hw_classify_config(p_nxge_t nxgep)
+{
+	p_nxge_dma_pt_cfg_t p_all_cfgp;
+	p_nxge_hw_pt_cfg_t p_cfgp;
+
+	NXGE_DEBUG_MSG((nxgep, OBP_CTL, "==> nxge_get_hw_classify_config"));
+
+	/* Get mac rdc table info from HW/Prom/.conf etc ...... */
+	/* for now, get it from dma configs */
+	p_all_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
+	p_cfgp = (p_nxge_hw_pt_cfg_t)&p_all_cfgp->hw_config;
+
+	/*
+	 * classify_init needs to call first.
+	 */
+	nxgep->class_config.mac_rdcgrp = p_cfgp->def_mac_rxdma_grpid;
+	nxgep->class_config.mcast_rdcgrp = p_cfgp->def_mac_rxdma_grpid;
+	NXGE_DEBUG_MSG((nxgep, OBP_CTL, "<== nxge_get_hw_classify_config"));
+
+	return (NXGE_OK);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/uts/common/io/nxge/nxge_espc.c	Mon Mar 19 19:37:22 2007 -0700
@@ -0,0 +1,218 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident	"%Z%%M%	%I%	%E% SMI"
+
+#include <nxge_impl.h>
+#include <nxge_mac.h>
+#include <npi_espc.h>
+#include <nxge_espc.h>
+
+static void
+nxge_espc_get_next_mac_addr(uint8_t *, uint8_t, struct ether_addr *);
+
+static void
+nxge_espc_get_next_mac_addr(uint8_t *st_mac, uint8_t nxt_cnt,
+			    struct ether_addr *final_mac)
+{
+	uint64_t	mac[ETHERADDRL];
+	uint64_t	mac_addr = 0;
+	int		i, j;
+
+	for (i = ETHERADDRL - 1, j = 0; j < ETHERADDRL; i--, j++) {
+		mac[j] = st_mac[i];
+		mac_addr |= (mac[j] << (j*8));
+	}
+
+	mac_addr += nxt_cnt;
+
+	final_mac->ether_addr_octet[0] = (mac_addr & 0xff0000000000) >> 40;
+	final_mac->ether_addr_octet[1] = (mac_addr & 0xff00000000) >> 32;
+	final_mac->ether_addr_octet[2] = (mac_addr & 0xff000000) >> 24;
+	final_mac->ether_addr_octet[3] = (mac_addr & 0xff0000) >> 16;
+	final_mac->ether_addr_octet[4] = (mac_addr & 0xff00) >> 8;
+	final_mac->ether_addr_octet[5] = (mac_addr & 0xff);
+}
+
+nxge_status_t
+nxge_espc_mac_addrs_get(p_nxge_t nxgep)
+{
+	nxge_status_t	status = NXGE_OK;
+	npi_status_t	npi_status = NPI_SUCCESS;
+	uint8_t		port_num = nxgep->mac.portnum;
+	npi_handle_t	handle = NXGE_DEV_NPI_HANDLE(nxgep);
+	uint8_t		mac_addr[ETHERADDRL];
+
+	NXGE_DEBUG_MSG((nxgep, MAC_CTL,
+			    "==> nxge_espc_mac_addr_get, port[%d]",
+			    port_num));
+
+	npi_status = npi_espc_mac_addr_get(handle, mac_addr);
+	if (npi_status != NPI_SUCCESS) {
+		status = (NXGE_ERROR | npi_status);
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+				    "nxge_espc_mac_addr_get, port[%d] failed",
+				    port_num));
+		goto exit;
+	}
+
+	nxge_espc_get_next_mac_addr(mac_addr, port_num, &nxgep->factaddr);
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"Got MAC Addr: %2x:%2x:%2x:%2x:%2x%:%2x%c \n",
+			mac_addr[0], mac_addr[1],
+			mac_addr[2], mac_addr[3],
+			mac_addr[4], mac_addr[5]));
+
+exit:
+	NXGE_DEBUG_MSG((nxgep, CFG_CTL, "<== nxge_espc_mac_addr_get, "
+			"status [0x%x]", status));
+
+	return (status);
+}
+
+nxge_status_t
+nxge_espc_num_macs_get(p_nxge_t nxgep, uint8_t *nmacs)
+{
+	nxge_status_t   status = NXGE_OK;
+	npi_status_t    npi_status = NPI_SUCCESS;
+	npi_handle_t    handle = NXGE_DEV_NPI_HANDLE(nxgep);
+	NXGE_DEBUG_MSG((nxgep, CFG_CTL, "==> nxge_espc_num_macs_get"));
+
+	npi_status = npi_espc_num_macs_get(handle, nmacs);
+	if (npi_status != NPI_SUCCESS) {
+		status = (NXGE_ERROR | npi_status);
+	}
+
+	NXGE_DEBUG_MSG((nxgep, CFG_CTL, "<== nxge_espc_num_macs_get, "
+		"status [0x%x]", status));
+
+	return (status);
+}
+
+nxge_status_t
+nxge_espc_num_ports_get(p_nxge_t nxgep)
+{
+	nxge_status_t	status = NXGE_OK;
+	npi_status_t	npi_status = NPI_SUCCESS;
+	npi_handle_t	handle = NXGE_DEV_NPI_HANDLE(nxgep);
+	uint8_t		nports = 0;
+	NXGE_DEBUG_MSG((nxgep, CFG_CTL, "==> nxge_espc_num_ports_get"));
+
+	npi_status = npi_espc_num_ports_get(handle, &nports);
+	if (npi_status != NPI_SUCCESS) {
+		status = (NXGE_ERROR | npi_status);
+	}
+	nxgep->nports = nports;
+	NXGE_DEBUG_MSG((nxgep, CFG_CTL, " nxge_espc_num_ports_get "
+			"ports [0x%x]", nports));
+
+	NXGE_DEBUG_MSG((nxgep, CFG_CTL, "<== nxge_espc_num_ports_get, "
+			"status [0x%x]", status));
+
+	return (status);
+}
+
+nxge_status_t
+nxge_espc_phy_type_get(p_nxge_t nxgep)
+{
+	nxge_status_t	status = NXGE_OK;
+	npi_status_t	npi_status = NPI_SUCCESS;
+	npi_handle_t	handle = NXGE_DEV_NPI_HANDLE(nxgep);
+	uint8_t		port_num = nxgep->mac.portnum;
+	uint8_t		phy_type;
+
+	NXGE_DEBUG_MSG((nxgep, CFG_CTL, "==> nxge_espc_phy_type_get, port[%d]",
+			port_num));
+
+	npi_status = npi_espc_port_phy_type_get(handle, &phy_type,
+						port_num);
+	if (npi_status != NPI_SUCCESS) {
+		status = (NXGE_ERROR | npi_status);
+		goto exit;
+	}
+
+	switch (phy_type) {
+	case ESC_PHY_10G_FIBER:
+		nxgep->mac.portmode = PORT_10G_FIBER;
+		nxgep->statsp->mac_stats.xcvr_inuse = XPCS_XCVR;
+		cmn_err(CE_NOTE, "!SPROM Read phy type 10G Fiber \n");
+		break;
+	case ESC_PHY_10G_COPPER:
+		nxgep->mac.portmode = PORT_10G_COPPER;
+		nxgep->statsp->mac_stats.xcvr_inuse = XPCS_XCVR;
+		cmn_err(CE_NOTE, "!SPROM Read phy type 10G Copper \n");
+
+		break;
+	case ESC_PHY_1G_FIBER:
+		nxgep->mac.portmode = PORT_1G_FIBER;
+		nxgep->statsp->mac_stats.xcvr_inuse = PCS_XCVR;
+		cmn_err(CE_NOTE, "!SPROM Read phy type 1G Fiber \n");
+
+		break;
+	case ESC_PHY_1G_COPPER:
+		nxgep->mac.portmode = PORT_1G_COPPER;
+		nxgep->statsp->mac_stats.xcvr_inuse = INT_MII_XCVR;
+		cmn_err(CE_NOTE, "!SPROM Read phy type 1G Copper \n");
+
+		break;
+	case ESC_PHY_NONE:
+		status = NXGE_ERROR;
+		NXGE_DEBUG_MSG((nxgep, CFG_CTL, "nxge_espc_phy_type_get:"
+				"No phy type set"));
+		break;
+	default:
+		status = NXGE_ERROR;
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_espc_phy_type_get: "
+				"Unknown phy type [%d]", phy_type));
+		break;
+	}
+
+exit:
+
+	NXGE_DEBUG_MSG((nxgep, CFG_CTL, "<== nxge_espc_phy_type_get, "
+			"status [0x%x]", status));
+
+	return (status);
+}
+
+nxge_status_t
+nxge_espc_max_frame_sz_get(p_nxge_t nxgep)
+{
+	nxge_status_t	status = NXGE_OK;
+	npi_status_t	npi_status = NPI_SUCCESS;
+	npi_handle_t	handle = NXGE_DEV_NPI_HANDLE(nxgep);
+
+	NXGE_DEBUG_MSG((nxgep, CFG_CTL, "==> nxge_espc_max_frame_sz_get"));
+
+	npi_status = npi_espc_max_frame_get(handle, &nxgep->mac.maxframesize);
+	if (npi_status != NPI_SUCCESS) {
+		status = (NXGE_ERROR | npi_status);
+	}
+
+	NXGE_DEBUG_MSG((nxgep, CFG_CTL, " nxge_espc_max_frame_sz_get, "
+			    "status [0x%x]", status));
+
+	return (status);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/uts/common/io/nxge/nxge_fflp.c	Mon Mar 19 19:37:22 2007 -0700
@@ -0,0 +1,2060 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident	"%Z%%M%	%I%	%E% SMI"
+
+#include <npi_fflp.h>
+#include <npi_mac.h>
+#include <nxge_defs.h>
+#include <nxge_flow.h>
+#include <nxge_fflp.h>
+#include <nxge_impl.h>
+#include <nxge_fflp_hash.h>
+#include <nxge_common.h>
+
+
+/*
+ * Function prototypes
+ */
+static nxge_status_t nxge_fflp_vlan_tbl_clear_all(p_nxge_t);
+static nxge_status_t nxge_fflp_tcam_invalidate_all(p_nxge_t);
+static nxge_status_t nxge_fflp_tcam_init(p_nxge_t);
+static nxge_status_t nxge_fflp_fcram_invalidate_all(p_nxge_t);
+static nxge_status_t nxge_fflp_fcram_init(p_nxge_t);
+static int nxge_flow_need_hash_lookup(p_nxge_t, flow_resource_t *);
+static void nxge_fill_tcam_entry_tcp(p_nxge_t, flow_spec_t *, tcam_entry_t *);
+static void nxge_fill_tcam_entry_udp(p_nxge_t, flow_spec_t *, tcam_entry_t *);
+static void nxge_fill_tcam_entry_sctp(p_nxge_t, flow_spec_t *, tcam_entry_t *);
+static void nxge_fill_tcam_entry_tcp_ipv6(p_nxge_t, flow_spec_t *,
+	tcam_entry_t *);
+static void nxge_fill_tcam_entry_udp_ipv6(p_nxge_t, flow_spec_t *,
+	tcam_entry_t *);
+static void nxge_fill_tcam_entry_sctp_ipv6(p_nxge_t, flow_spec_t *,
+	tcam_entry_t *);
+static uint8_t nxge_get_rdc_offset(p_nxge_t, uint8_t, intptr_t);
+static uint8_t nxge_get_rdc_group(p_nxge_t, uint8_t, intptr_t);
+static tcam_location_t nxge_get_tcam_location(p_nxge_t, uint8_t);
+
+/*
+ * functions used outside this file
+ */
+nxge_status_t nxge_fflp_config_vlan_table(p_nxge_t, uint16_t);
+nxge_status_t nxge_fflp_ip_class_config_all(p_nxge_t);
+nxge_status_t nxge_add_flow(p_nxge_t, flow_resource_t *);
+static nxge_status_t nxge_tcam_handle_ip_fragment(p_nxge_t);
+nxge_status_t nxge_add_tcam_entry(p_nxge_t, flow_resource_t *);
+nxge_status_t nxge_add_fcram_entry(p_nxge_t, flow_resource_t *);
+nxge_status_t nxge_flow_get_hash(p_nxge_t, flow_resource_t *,
+	uint32_t *, uint16_t *);
+
+nxge_status_t
+nxge_tcam_dump_entry(p_nxge_t nxgep, uint32_t location)
+{
+	tcam_entry_t tcam_rdptr;
+	uint64_t asc_ram = 0;
+	npi_handle_t handle;
+	npi_status_t status;
+
+	handle = nxgep->npi_reg_handle;
+
+	bzero((char *)&tcam_rdptr, sizeof (struct tcam_entry));
+	status = npi_fflp_tcam_entry_read(handle, (tcam_location_t)location,
+		(struct tcam_entry *)&tcam_rdptr);
+	if (status & NPI_FAILURE) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			" nxge_tcam_dump_entry:"
+			"  tcam read failed at location %d ", location));
+		return (NXGE_ERROR);
+	}
+	status = npi_fflp_tcam_asc_ram_entry_read(handle,
+		(tcam_location_t)location, &asc_ram);
+
+	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "location %x\n"
+		" key:  %llx %llx %llx %llx \n"
+		" mask: %llx %llx %llx %llx \n"
+		" ASC RAM %llx \n", location,
+		tcam_rdptr.key0, tcam_rdptr.key1,
+		tcam_rdptr.key2, tcam_rdptr.key3,
+		tcam_rdptr.mask0, tcam_rdptr.mask1,
+		tcam_rdptr.mask2, tcam_rdptr.mask3, asc_ram));
+	return (NXGE_OK);
+}
+
+void
+nxge_get_tcam(p_nxge_t nxgep, p_mblk_t mp)
+{
+	uint32_t tcam_loc;
+	int *lptr;
+	int location;
+
+	uint32_t start_location = 0;
+	uint32_t stop_location = nxgep->classifier.tcam_size;
+	lptr = (int *)mp->b_rptr;
+	location = *lptr;
+
+	if ((location >= nxgep->classifier.tcam_size) || (location < -1)) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"nxge_tcam_dump: Invalid location %d \n", location));
+		return;
+	}
+	if (location == -1) {
+		start_location = 0;
+		stop_location = nxgep->classifier.tcam_size;
+	} else {
+		start_location = location;
+		stop_location = location + 1;
+	}
+	for (tcam_loc = start_location; tcam_loc < stop_location; tcam_loc++)
+		(void) nxge_tcam_dump_entry(nxgep, tcam_loc);
+}
+
+/*
+ * nxge_fflp_vlan_table_invalidate_all
+ * invalidates the vlan RDC table entries.
+ * INPUT
+ * nxge    soft state data structure
+ * Return
+ *      NXGE_OK
+ *      NXGE_ERROR
+ *
+ */
+
+static nxge_status_t
+nxge_fflp_vlan_tbl_clear_all(p_nxge_t nxgep)
+{
+	vlan_id_t vlan_id;
+	npi_handle_t handle;
+	npi_status_t rs = NPI_SUCCESS;
+	vlan_id_t start = 0, stop = NXGE_MAX_VLANS;
+
+	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "==> nxge_fflp_vlan_tbl_clear_all "));
+	handle = nxgep->npi_reg_handle;
+	for (vlan_id = start; vlan_id < stop; vlan_id++) {
+		rs = npi_fflp_cfg_vlan_table_clear(handle, vlan_id);
+		if (rs != NPI_SUCCESS) {
+			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+				"VLAN Table invalidate failed for vlan id %d ",
+				vlan_id));
+			return (NXGE_ERROR | rs);
+		}
+	}
+	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "<== nxge_fflp_vlan_tbl_clear_all "));
+	return (NXGE_OK);
+}
+
+/*
+ * The following functions are used by other modules to init
+ * the fflp module.
+ * these functions are the basic API used to init
+ * the fflp modules (tcam, fcram etc ......)
+ *
+ * The TCAM search future would be disabled  by default.
+ */
+
+static nxge_status_t
+nxge_fflp_tcam_init(p_nxge_t nxgep)
+{
+	uint8_t access_ratio;
+	tcam_class_t class;
+	npi_status_t rs = NPI_SUCCESS;
+	npi_handle_t handle;
+
+	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "==> nxge_fflp_tcam_init"));
+	handle = nxgep->npi_reg_handle;
+
+	rs = npi_fflp_cfg_tcam_disable(handle);
+	if (rs != NPI_SUCCESS) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "failed TCAM Disable\n"));
+		return (NXGE_ERROR | rs);
+	}
+
+	access_ratio = nxgep->param_arr[param_tcam_access_ratio].value;
+	rs = npi_fflp_cfg_tcam_access(handle, access_ratio);
+	if (rs != NPI_SUCCESS) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+				"failed TCAM Access cfg\n"));
+		return (NXGE_ERROR | rs);
+	}
+
+	/* disable configurable classes */
+	/* disable the configurable ethernet classes; */
+	for (class = TCAM_CLASS_ETYPE_1;
+		class <= TCAM_CLASS_ETYPE_2; class++) {
+		rs = npi_fflp_cfg_enet_usr_cls_disable(handle, class);
+		if (rs != NPI_SUCCESS) {
+			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+				"TCAM USR Ether Class config failed."));
+			return (NXGE_ERROR | rs);
+		}
+	}
+
+	/* disable the configurable ip classes; */
+	for (class = TCAM_CLASS_IP_USER_4;
+		class <= TCAM_CLASS_IP_USER_7; class++) {
+		rs = npi_fflp_cfg_ip_usr_cls_disable(handle, class);
+		if (rs != NPI_SUCCESS) {
+			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+				"TCAM USR IP Class cnfg failed."));
+			return (NXGE_ERROR | rs);
+		}
+	}
+	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "<== nxge_fflp_tcam_init"));
+	return (NXGE_OK);
+}
+
+/*
+ * nxge_fflp_tcam_invalidate_all
+ * invalidates all the tcam entries.
+ * INPUT
+ * nxge    soft state data structure
+ * Return
+ *      NXGE_OK
+ *      NXGE_ERROR
+ *
+ */
+
+
+static nxge_status_t
+nxge_fflp_tcam_invalidate_all(p_nxge_t nxgep)
+{
+	uint16_t location;
+	npi_status_t rs = NPI_SUCCESS;
+	npi_handle_t handle;
+	uint16_t start = 0, stop = nxgep->classifier.tcam_size;
+	p_nxge_hw_list_t hw_p;
+
+	NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
+		"==> nxge_fflp_tcam_invalidate_all"));
+	handle = nxgep->npi_reg_handle;
+	if ((hw_p = nxgep->nxge_hw_p) == NULL) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			" nxge_fflp_tcam_invalidate_all:"
+			" common hardware not set", nxgep->niu_type));
+		return (NXGE_ERROR);
+	}
+	MUTEX_ENTER(&hw_p->nxge_tcam_lock);
+	for (location = start; location < stop; location++) {
+		rs = npi_fflp_tcam_entry_invalidate(handle, location);
+		if (rs != NPI_SUCCESS) {
+			MUTEX_EXIT(&hw_p->nxge_tcam_lock);
+			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+				"TCAM invalidate failed at loc %d ", location));
+			return (NXGE_ERROR | rs);
+		}
+	}
+	MUTEX_EXIT(&hw_p->nxge_tcam_lock);
+	NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
+			"<== nxge_fflp_tcam_invalidate_all"));
+	return (NXGE_OK);
+}
+
+/*
+ * nxge_fflp_fcram_entry_invalidate_all
+ * invalidates all the FCRAM entries.
+ * INPUT
+ * nxge    soft state data structure
+ * Return
+ *      NXGE_OK
+ *      NXGE_ERROR
+ *
+ */
+
+static nxge_status_t
+nxge_fflp_fcram_invalidate_all(p_nxge_t nxgep)
+{
+	npi_handle_t handle;
+	npi_status_t rs = NPI_SUCCESS;
+	part_id_t pid = 0;
+	uint8_t base_mask, base_reloc;
+	fcram_entry_t fc;
+	uint32_t location;
+	uint32_t increment, last_location;
+
+	/*
+	 * (1) configure and enable partition 0 with no relocation
+	 * (2) Assume the FCRAM is used as IPv4 exact match entry cells
+	 * (3) Invalidate these cells by clearing the valid bit in
+	 * the subareas 0 and 4
+	 * (4) disable the partition
+	 *
+	 */
+
+	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "==> nxge_fflp_fcram_invalidate_all"));
+
+	base_mask = base_reloc = 0x0;
+	handle = nxgep->npi_reg_handle;
+	rs = npi_fflp_cfg_fcram_partition(handle, pid, base_mask, base_reloc);
+
+	if (rs != NPI_SUCCESS) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "failed partition cfg\n"));
+		return (NXGE_ERROR | rs);
+	}
+	rs = npi_fflp_cfg_fcram_partition_disable(handle, pid);
+
+	if (rs != NPI_SUCCESS) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"failed partition enable\n"));
+		return (NXGE_ERROR | rs);
+	}
+	fc.dreg[0].value = 0;
+	fc.hash_hdr_valid = 0;
+	fc.hash_hdr_ext = 1;	/* specify as IPV4 exact match entry */
+	increment = sizeof (hash_ipv4_t);
+	last_location = FCRAM_SIZE * 0x40;
+
+	for (location = 0; location < last_location; location += increment) {
+		rs = npi_fflp_fcram_subarea_write(handle, pid,
+			location,
+			fc.value[0]);
+		if (rs != NPI_SUCCESS) {
+			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+					"failed write"
+					"at location %x ",
+					location));
+			return (NXGE_ERROR | rs);
+		}
+	}
+	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "<== nxge_fflp_fcram_invalidate_all"));
+	return (NXGE_OK);
+}
+
+static nxge_status_t
+nxge_fflp_fcram_init(p_nxge_t nxgep)
+{
+	fflp_fcram_output_drive_t strength;
+	fflp_fcram_qs_t qs;
+	npi_status_t rs = NPI_SUCCESS;
+	uint8_t access_ratio;
+	int partition;
+	npi_handle_t handle;
+	uint32_t min_time, max_time, sys_time;
+
+	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "==> nxge_fflp_fcram_init"));
+
+	/*
+	 * Recommended values are needed.
+	 */
+	min_time = FCRAM_REFRESH_DEFAULT_MIN_TIME;
+	max_time = FCRAM_REFRESH_DEFAULT_MAX_TIME;
+	sys_time = FCRAM_REFRESH_DEFAULT_SYS_TIME;
+
+	handle = nxgep->npi_reg_handle;
+	strength = FCRAM_OUTDR_NORMAL;
+	qs = FCRAM_QS_MODE_QS;
+	rs = npi_fflp_cfg_fcram_reset(handle, strength, qs);
+	if (rs != NPI_SUCCESS) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "failed FCRAM Reset. "));
+		return (NXGE_ERROR | rs);
+	}
+
+	access_ratio = nxgep->param_arr[param_fcram_access_ratio].value;
+	rs = npi_fflp_cfg_fcram_access(handle, access_ratio);
+	if (rs != NPI_SUCCESS) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "failed FCRAM Access ratio"
+			"configuration \n"));
+		return (NXGE_ERROR | rs);
+	}
+	rs = npi_fflp_cfg_fcram_refresh_time(handle, min_time,
+		max_time, sys_time);
+	if (rs != NPI_SUCCESS) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"failed FCRAM refresh cfg"));
+		return (NXGE_ERROR);
+	}
+
+	/* disable all the partitions until explicitly enabled */
+	for (partition = 0; partition < FFLP_FCRAM_MAX_PARTITION; partition++) {
+		rs = npi_fflp_cfg_fcram_partition_disable(handle, partition);
+		if (rs != NPI_SUCCESS) {
+			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+				"failed FCRAM partition"
+				" enable for partition %d ", partition));
+			return (NXGE_ERROR | rs);
+		}
+	}
+
+	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "<== nxge_fflp_fcram_init"));
+	return (NXGE_OK);
+}
+
+nxge_status_t
+nxge_logical_mac_assign_rdc_table(p_nxge_t nxgep, uint8_t alt_mac)
+{
+	npi_status_t rs = NPI_SUCCESS;
+	hostinfo_t mac_rdc;
+	npi_handle_t handle;
+	p_nxge_class_pt_cfg_t p_class_cfgp;
+
+	p_class_cfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config;
+	if (p_class_cfgp->mac_host_info[alt_mac].flag == 0) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			" nxge_logical_mac_assign_rdc_table"
+			" unconfigured alt MAC addr %d ", alt_mac));
+		return (NXGE_ERROR);
+	}
+	handle = nxgep->npi_reg_handle;
+	mac_rdc.value = 0;
+	mac_rdc.bits.w0.rdc_tbl_num =
+		p_class_cfgp->mac_host_info[alt_mac].rdctbl;
+	mac_rdc.bits.w0.mac_pref = p_class_cfgp->mac_host_info[alt_mac].mpr_npr;
+
+	rs = npi_mac_hostinfo_entry(handle, OP_SET,
+		nxgep->function_num, alt_mac, &mac_rdc);
+
+	if (rs != NPI_SUCCESS) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"failed Assign RDC table"));
+		return (NXGE_ERROR | rs);
+	}
+	return (NXGE_OK);
+}
+
+nxge_status_t
+nxge_main_mac_assign_rdc_table(p_nxge_t nxgep)
+{
+	npi_status_t rs = NPI_SUCCESS;
+	hostinfo_t mac_rdc;
+	npi_handle_t handle;
+
+	handle = nxgep->npi_reg_handle;
+	mac_rdc.value = 0;
+	mac_rdc.bits.w0.rdc_tbl_num = nxgep->class_config.mac_rdcgrp;
+	mac_rdc.bits.w0.mac_pref = 1;
+	switch (nxgep->function_num) {
+	case 0:
+	case 1:
+		rs = npi_mac_hostinfo_entry(handle, OP_SET,
+			nxgep->function_num, XMAC_UNIQUE_HOST_INFO_ENTRY,
+			&mac_rdc);
+		break;
+	case 2:
+	case 3:
+		rs = npi_mac_hostinfo_entry(handle, OP_SET,
+			nxgep->function_num, BMAC_UNIQUE_HOST_INFO_ENTRY,
+			&mac_rdc);
+		break;
+	default:
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"failed Assign RDC table (invalid function #)"));
+		return (NXGE_ERROR);
+	}
+
+	if (rs != NPI_SUCCESS) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+				"failed Assign RDC table"));
+		return (NXGE_ERROR | rs);
+	}
+	return (NXGE_OK);
+}
+
+/*
+ * Initialize hostinfo registers for alternate MAC addresses and
+ * multicast MAC address.
+ */
+nxge_status_t
+nxge_alt_mcast_mac_assign_rdc_table(p_nxge_t nxgep)
+{
+	npi_status_t rs = NPI_SUCCESS;
+	hostinfo_t mac_rdc;
+	npi_handle_t handle;
+	int i;
+
+	handle = nxgep->npi_reg_handle;
+	mac_rdc.value = 0;
+	mac_rdc.bits.w0.rdc_tbl_num = nxgep->class_config.mcast_rdcgrp;
+	mac_rdc.bits.w0.mac_pref = 1;
+	switch (nxgep->function_num) {
+	case 0:
+	case 1:
+		/*
+		 * Tests indicate that it is OK not to re-initialize the
+		 * hostinfo registers for the XMAC's alternate MAC
+		 * addresses. But that is necessary for BMAC (case 2
+		 * and case 3 below)
+		 */
+		rs = npi_mac_hostinfo_entry(handle, OP_SET,
+			nxgep->function_num,
+			XMAC_MULTI_HOST_INFO_ENTRY, &mac_rdc);
+		break;
+	case 2:
+	case 3:
+		for (i = 1; i <= BMAC_MAX_ALT_ADDR_ENTRY; i++)
+			rs |= npi_mac_hostinfo_entry(handle, OP_SET,
+			nxgep->function_num, i, &mac_rdc);
+
+		rs |= npi_mac_hostinfo_entry(handle, OP_SET,
+			nxgep->function_num,
+			BMAC_MULTI_HOST_INFO_ENTRY, &mac_rdc);
+		break;
+	default:
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"failed Assign RDC table (invalid funcion #)"));
+		return (NXGE_ERROR);
+	}
+
+	if (rs != NPI_SUCCESS) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"failed Assign RDC table"));
+		return (NXGE_ERROR | rs);
+	}
+	return (NXGE_OK);
+}
+
+nxge_status_t
+nxge_fflp_init_hostinfo(p_nxge_t nxgep)
+{
+	nxge_status_t status = NXGE_OK;
+
+	status = nxge_alt_mcast_mac_assign_rdc_table(nxgep);
+	status |= nxge_main_mac_assign_rdc_table(nxgep);
+	return (status);
+}
+
+nxge_status_t
+nxge_fflp_hw_reset(p_nxge_t nxgep)
+{
+	npi_handle_t handle;
+	npi_status_t rs = NPI_SUCCESS;
+	nxge_status_t status = NXGE_OK;
+
+	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, " ==> nxge_fflp_hw_reset"));
+
+	if (nxgep->niu_type == NEPTUNE) {
+		status = nxge_fflp_fcram_init(nxgep);
+		if (status != NXGE_OK) {
+			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+				" failed FCRAM init. "));
+			return (status);
+		}
+	}
+
+	status = nxge_fflp_tcam_init(nxgep);
+	if (status != NXGE_OK) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"failed TCAM init."));
+		return (status);
+	}
+
+	handle = nxgep->npi_reg_handle;
+	rs = npi_fflp_cfg_llcsnap_enable(handle);
+	if (rs != NPI_SUCCESS) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"failed LLCSNAP enable. "));
+		return (NXGE_ERROR | rs);
+	}
+
+	rs = npi_fflp_cfg_cam_errorcheck_disable(handle);
+	if (rs != NPI_SUCCESS) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"failed CAM Error Check enable. "));
+		return (NXGE_ERROR | rs);
+	}
+
+	/* init the hash generators */
+	rs = npi_fflp_cfg_hash_h1poly(handle, 0);
+	if (rs != NPI_SUCCESS) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"failed H1 Poly Init. "));
+		return (NXGE_ERROR | rs);
+	}
+
+	rs = npi_fflp_cfg_hash_h2poly(handle, 0);
+	if (rs != NPI_SUCCESS) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"failed H2 Poly Init. "));
+		return (NXGE_ERROR | rs);
+	}
+
+	/* invalidate TCAM entries */
+	status = nxge_fflp_tcam_invalidate_all(nxgep);
+	if (status != NXGE_OK) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"failed TCAM Entry Invalidate. "));
+		return (status);
+	}
+
+	/* invalidate FCRAM entries */
+	if (nxgep->niu_type == NEPTUNE) {
+		status = nxge_fflp_fcram_invalidate_all(nxgep);
+		if (status != NXGE_OK) {
+			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+					"failed FCRAM Entry Invalidate."));
+			return (status);
+		}
+	}
+
+	/* invalidate VLAN RDC tables */
+	status = nxge_fflp_vlan_tbl_clear_all(nxgep);
+	if (status != NXGE_OK) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"failed VLAN Table Invalidate. "));
+		return (status);
+	}
+	nxgep->classifier.state |= NXGE_FFLP_HW_RESET;
+
+	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "<== nxge_fflp_hw_reset"));
+	return (NXGE_OK);
+}
+
+nxge_status_t
+nxge_cfg_ip_cls_flow_key(p_nxge_t nxgep, tcam_class_t l3_class,
+	uint32_t class_config)
+{
+	flow_key_cfg_t fcfg;
+	npi_handle_t handle;
+	npi_status_t rs = NPI_SUCCESS;
+
+	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, " ==> nxge_cfg_ip_cls_flow_key"));
+	handle = nxgep->npi_reg_handle;
+	bzero(&fcfg, sizeof (flow_key_cfg_t));
+
+	if (class_config & NXGE_CLASS_FLOW_USE_PROTO)
+		fcfg.use_proto = 1;
+	if (class_config & NXGE_CLASS_FLOW_USE_DST_PORT)
+		fcfg.use_dport = 1;
+	if (class_config & NXGE_CLASS_FLOW_USE_SRC_PORT)
+		fcfg.use_sport = 1;
+	if (class_config & NXGE_CLASS_FLOW_USE_IPDST)
+		fcfg.use_daddr = 1;
+	if (class_config & NXGE_CLASS_FLOW_USE_IPSRC)
+		fcfg.use_saddr = 1;
+	if (class_config & NXGE_CLASS_FLOW_USE_VLAN)
+		fcfg.use_vlan = 1;
+	if (class_config & NXGE_CLASS_FLOW_USE_L2DA)
+		fcfg.use_l2da = 1;
+	if (class_config & NXGE_CLASS_FLOW_USE_PORTNUM)
+		fcfg.use_portnum = 1;
+	fcfg.ip_opts_exist = 0;
+
+	rs = npi_fflp_cfg_ip_cls_flow_key(handle, l3_class, &fcfg);
+	if (rs & NPI_FFLP_ERROR) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, " nxge_cfg_ip_cls_flow_key"
+			" opt %x for class %d failed ",
+			class_config, l3_class));
+		return (NXGE_ERROR | rs);
+	}
+	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, " <== nxge_cfg_ip_cls_flow_key"));
+	return (NXGE_OK);
+}
+
+nxge_status_t
+nxge_cfg_ip_cls_flow_key_get(p_nxge_t nxgep, tcam_class_t l3_class,
+	uint32_t *class_config)
+{
+	flow_key_cfg_t fcfg;
+	npi_handle_t handle;
+	npi_status_t rs = NPI_SUCCESS;
+	uint32_t ccfg = 0;
+
+	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, " ==> nxge_cfg_ip_cls_flow_key_get"));
+	handle = nxgep->npi_reg_handle;
+	bzero(&fcfg, sizeof (flow_key_cfg_t));
+
+	rs = npi_fflp_cfg_ip_cls_flow_key_get(handle, l3_class, &fcfg);
+	if (rs & NPI_FFLP_ERROR) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, " nxge_cfg_ip_cls_flow_key"
+				" opt %x for class %d failed ",
+				class_config, l3_class));
+		return (NXGE_ERROR | rs);
+	}
+
+	if (fcfg.use_proto)
+		ccfg |= NXGE_CLASS_FLOW_USE_PROTO;
+	if (fcfg.use_dport)
+		ccfg |= NXGE_CLASS_FLOW_USE_DST_PORT;
+	if (fcfg.use_sport)
+		ccfg |= NXGE_CLASS_FLOW_USE_SRC_PORT;
+	if (fcfg.use_daddr)
+		ccfg |= NXGE_CLASS_FLOW_USE_IPDST;
+	if (fcfg.use_saddr)
+		ccfg |= NXGE_CLASS_FLOW_USE_IPSRC;
+	if (fcfg.use_vlan)
+		ccfg |= NXGE_CLASS_FLOW_USE_VLAN;
+	if (fcfg.use_l2da)
+		ccfg |= NXGE_CLASS_FLOW_USE_L2DA;
+	if (fcfg.use_portnum)
+		ccfg |= NXGE_CLASS_FLOW_USE_PORTNUM;
+
+	NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
+		" nxge_cfg_ip_cls_flow_key_get %x", ccfg));
+	*class_config = ccfg;
+
+	NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
+		" <== nxge_cfg_ip_cls_flow_key_get"));
+	return (NXGE_OK);
+}
+
+static nxge_status_t
+nxge_cfg_tcam_ip_class_get(p_nxge_t nxgep, tcam_class_t class,
+	uint32_t *class_config)
+{
+	npi_status_t rs = NPI_SUCCESS;
+	tcam_key_cfg_t cfg;
+	npi_handle_t handle;
+	uint32_t ccfg = 0;
+
+	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "==> nxge_cfg_tcam_ip_class"));
+
+	bzero(&cfg, sizeof (tcam_key_cfg_t));
+	handle = nxgep->npi_reg_handle;
+
+	rs = npi_fflp_cfg_ip_cls_tcam_key_get(handle, class, &cfg);
+	if (rs & NPI_FFLP_ERROR) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, " nxge_cfg_tcam_ip_class"
+			" opt %x for class %d failed ",
+			class_config, class));
+		return (NXGE_ERROR | rs);
+	}
+	if (cfg.discard)
+		ccfg |= NXGE_CLASS_DISCARD;
+	if (cfg.lookup_enable)
+		ccfg |= NXGE_CLASS_TCAM_LOOKUP;
+	if (cfg.use_ip_daddr)
+		ccfg |= NXGE_CLASS_TCAM_USE_SRC_ADDR;
+	*class_config = ccfg;
+	NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
+			" ==> nxge_cfg_tcam_ip_class %x", ccfg));
+	return (NXGE_OK);
+}
+
+static nxge_status_t
+nxge_cfg_tcam_ip_class(p_nxge_t nxgep, tcam_class_t class,
+	uint32_t class_config)
+{
+	npi_status_t rs = NPI_SUCCESS;
+	tcam_key_cfg_t cfg;
+	npi_handle_t handle;
+	p_nxge_class_pt_cfg_t p_class_cfgp;
+
+	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "==> nxge_cfg_tcam_ip_class"));
+
+	p_class_cfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config;
+	p_class_cfgp->class_cfg[class] = class_config;
+
+	bzero(&cfg, sizeof (tcam_key_cfg_t));
+	handle = nxgep->npi_reg_handle;
+	cfg.discard = 0;
+	cfg.lookup_enable = 0;
+	cfg.use_ip_daddr = 0;
+	if (class_config & NXGE_CLASS_DISCARD)
+		cfg.discard = 1;
+	if (class_config & NXGE_CLASS_TCAM_LOOKUP)
+		cfg.lookup_enable = 1;
+	if (class_config & NXGE_CLASS_TCAM_USE_SRC_ADDR)
+		cfg.use_ip_daddr = 1;
+
+	rs = npi_fflp_cfg_ip_cls_tcam_key(handle, class, &cfg);
+	if (rs & NPI_FFLP_ERROR) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, " nxge_cfg_tcam_ip_class"
+			" opt %x for class %d failed ",
+			class_config, class));
+		return (NXGE_ERROR | rs);
+	}
+	return (NXGE_OK);
+}
+
+nxge_status_t
+nxge_fflp_set_hash1(p_nxge_t nxgep, uint32_t h1)
+{
+	npi_status_t rs = NPI_SUCCESS;
+	npi_handle_t handle;
+	p_nxge_class_pt_cfg_t p_class_cfgp;
+
+	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, " ==> nxge_fflp_init_h1"));
+	p_class_cfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config;
+	p_class_cfgp->init_h1 = h1;
+	handle = nxgep->npi_reg_handle;
+	rs = npi_fflp_cfg_hash_h1poly(handle, h1);
+	if (rs & NPI_FFLP_ERROR) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			" nxge_fflp_init_h1 %x failed ", h1));
+		return (NXGE_ERROR | rs);
+	}
+	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, " <== nxge_fflp_init_h1"));
+	return (NXGE_OK);
+}
+
+nxge_status_t
+nxge_fflp_set_hash2(p_nxge_t nxgep, uint16_t h2)
+{
+	npi_status_t rs = NPI_SUCCESS;
+	npi_handle_t handle;
+	p_nxge_class_pt_cfg_t p_class_cfgp;
+
+	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, " ==> nxge_fflp_init_h2"));
+	p_class_cfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config;
+	p_class_cfgp->init_h2 = h2;
+
+	handle = nxgep->npi_reg_handle;
+	rs = npi_fflp_cfg_hash_h2poly(handle, h2);
+	if (rs & NPI_FFLP_ERROR) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			" nxge_fflp_init_h2 %x failed ", h2));
+		return (NXGE_ERROR | rs);
+	}
+	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, " <== nxge_fflp_init_h2"));
+	return (NXGE_OK);
+}
+
+nxge_status_t
+nxge_classify_init_sw(p_nxge_t nxgep)
+{
+	int alloc_size;
+	nxge_classify_t *classify_ptr;
+
+	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "==> nxge_classify_init_sw"));
+	classify_ptr = &nxgep->classifier;
+
+	if (classify_ptr->state & NXGE_FFLP_SW_INIT) {
+		NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
+			"nxge_classify_init_sw already init"));
+		return (NXGE_OK);
+	}
+	/* Init SW structures */
+	classify_ptr->tcam_size = TCAM_NIU_TCAM_MAX_ENTRY;
+
+	/* init data structures, based on HW type */
+	if (nxgep->niu_type == NEPTUNE) {
+		classify_ptr->tcam_size = TCAM_NXGE_TCAM_MAX_ENTRY;
+		/*
+		 * check if fcram based classification is required and init the
+		 * flow storage
+		 */
+	}
+	alloc_size = sizeof (tcam_flow_spec_t) * classify_ptr->tcam_size;
+	classify_ptr->tcam_entries = KMEM_ZALLOC(alloc_size, NULL);
+
+	/* Init defaults */
+	/*
+	 * add hacks required for HW shortcomings for example, code to handle
+	 * fragmented packets
+	 */
+	nxge_init_h1_table();
+	nxge_crc_ccitt_init();
+	nxgep->classifier.tcam_location = nxgep->function_num;
+	nxgep->classifier.fragment_bug = 1;
+	classify_ptr->state |= NXGE_FFLP_SW_INIT;
+
+	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "<== nxge_classify_init_sw"));
+	return (NXGE_OK);
+}
+
+nxge_status_t
+nxge_classify_exit_sw(p_nxge_t nxgep)
+{
+	int alloc_size;
+	nxge_classify_t *classify_ptr;
+	int fsize;
+
+	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "==> nxge_classify_exit_sw"));
+	classify_ptr = &nxgep->classifier;
+
+	fsize = sizeof (tcam_flow_spec_t);
+	if (classify_ptr->tcam_entries) {
+		alloc_size = fsize * classify_ptr->tcam_size;
+		KMEM_FREE((void *) classify_ptr->tcam_entries, alloc_size);
+	}
+	nxgep->classifier.state = NULL;
+
+	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "<== nxge_classify_exit_sw"));
+	return (NXGE_OK);
+}
+
+/*
+ * Figures out the location where the TCAM entry is
+ * to be inserted.
+ *
+ * The current implementation is just a place holder and it
+ * returns the next tcam location.
+ * The real location determining algorithm would consider
+ * the priority, partition etc ... before deciding which
+ * location to insert.
+ *
+ */
+
+/* ARGSUSED */
+static tcam_location_t
+nxge_get_tcam_location(p_nxge_t nxgep, uint8_t class)
+{
+	tcam_location_t location;
+
+	location = nxgep->classifier.tcam_location;
+	nxgep->classifier.tcam_location = (location + nxgep->nports) %
+		nxgep->classifier.tcam_size;
+	NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
+		"nxge_get_tcam_location: location %d next %d \n",
+		location, nxgep->classifier.tcam_location));
+	return (location);
+}
+
+/*
+ * Figures out the RDC Group for the entry
+ *
+ * The current implementation is just a place holder and it
+ * returns 0.
+ * The real location determining algorithm would consider
+ * the partition etc ... before deciding w
+ *
+ */
+
+/* ARGSUSED */
+static uint8_t
+nxge_get_rdc_group(p_nxge_t nxgep, uint8_t class, intptr_t cookie)
+{
+	int use_port_rdc_grp = 0;
+	uint8_t rdc_grp = 0;
+	p_nxge_dma_pt_cfg_t p_dma_cfgp;
+	p_nxge_hw_pt_cfg_t p_cfgp;
+	p_nxge_rdc_grp_t rdc_grp_p;
+
+	p_dma_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
+	p_cfgp = (p_nxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config;
+	rdc_grp_p = &p_dma_cfgp->rdc_grps[use_port_rdc_grp];
+	rdc_grp = p_cfgp->start_rdc_grpid;
+
+	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+		"nxge_get_rdc_group: grp 0x%x real_grp %x grpp $%p\n",
+		cookie, rdc_grp, rdc_grp_p));
+	return (rdc_grp);
+}
+
+/* ARGSUSED */
+static uint8_t
+nxge_get_rdc_offset(p_nxge_t nxgep, uint8_t class, intptr_t cookie)
+{
+	return ((uint8_t)cookie);
+}
+
+/* ARGSUSED */
+static void
+nxge_fill_tcam_entry_udp(p_nxge_t nxgep, flow_spec_t *flow_spec,
+	tcam_entry_t *tcam_ptr)
+{
+	udpip4_spec_t *fspec_key;
+	udpip4_spec_t *fspec_mask;
+
+	fspec_key = (udpip4_spec_t *)&flow_spec->uh.udpip4spec;
+	fspec_mask = (udpip4_spec_t *)&flow_spec->um.udpip4spec;
+	TCAM_IPV4_ADDR(tcam_ptr->ip4_dest_key, fspec_key->ip4dst);
+	TCAM_IPV4_ADDR(tcam_ptr->ip4_dest_mask, fspec_mask->ip4dst);
+	TCAM_IPV4_ADDR(tcam_ptr->ip4_src_key, fspec_key->ip4src);
+	TCAM_IPV4_ADDR(tcam_ptr->ip4_src_mask, fspec_mask->ip4src);
+	TCAM_IP_PORTS(tcam_ptr->ip4_port_key,
+		fspec_key->pdst, fspec_key->psrc);
+	TCAM_IP_PORTS(tcam_ptr->ip4_port_mask,
+		fspec_mask->pdst, fspec_mask->psrc);
+	TCAM_IP_CLASS(tcam_ptr->ip4_class_key,
+		tcam_ptr->ip4_class_mask,
+		TCAM_CLASS_UDP_IPV4);
+	TCAM_IP_PROTO(tcam_ptr->ip4_proto_key,
+		tcam_ptr->ip4_proto_mask,
+		IPPROTO_UDP);
+}
+
+static void
+nxge_fill_tcam_entry_udp_ipv6(p_nxge_t nxgep, flow_spec_t *flow_spec,
+	tcam_entry_t *tcam_ptr)
+{
+	udpip6_spec_t *fspec_key;
+	udpip6_spec_t *fspec_mask;
+	p_nxge_class_pt_cfg_t p_class_cfgp;
+
+	fspec_key = (udpip6_spec_t *)&flow_spec->uh.udpip6spec;
+	fspec_mask = (udpip6_spec_t *)&flow_spec->um.udpip6spec;
+	p_class_cfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config;
+	if (p_class_cfgp->class_cfg[TCAM_CLASS_UDP_IPV6] &
+			NXGE_CLASS_TCAM_USE_SRC_ADDR) {
+		TCAM_IPV6_ADDR(tcam_ptr->ip6_ip_addr_key, fspec_key->ip6src);
+		TCAM_IPV6_ADDR(tcam_ptr->ip6_ip_addr_mask, fspec_mask->ip6src);
+	} else {
+		TCAM_IPV6_ADDR(tcam_ptr->ip6_ip_addr_key, fspec_key->ip6dst);
+		TCAM_IPV6_ADDR(tcam_ptr->ip6_ip_addr_mask, fspec_mask->ip6dst);
+	}
+
+	TCAM_IP_CLASS(tcam_ptr->ip6_class_key,
+		tcam_ptr->ip6_class_mask, TCAM_CLASS_UDP_IPV6);
+	TCAM_IP_PROTO(tcam_ptr->ip6_nxt_hdr_key,
+		tcam_ptr->ip6_nxt_hdr_mask, IPPROTO_UDP);
+	TCAM_IP_PORTS(tcam_ptr->ip6_port_key,
+		fspec_key->pdst, fspec_key->psrc);
+	TCAM_IP_PORTS(tcam_ptr->ip6_port_mask,
+		fspec_mask->pdst, fspec_mask->psrc);
+}
+
+/* ARGSUSED */
+static void
+nxge_fill_tcam_entry_tcp(p_nxge_t nxgep, flow_spec_t *flow_spec,
+	tcam_entry_t *tcam_ptr)
+{
+	tcpip4_spec_t *fspec_key;
+	tcpip4_spec_t *fspec_mask;
+
+	fspec_key = (tcpip4_spec_t *)&flow_spec->uh.tcpip4spec;
+	fspec_mask = (tcpip4_spec_t *)&flow_spec->um.tcpip4spec;
+
+	TCAM_IPV4_ADDR(tcam_ptr->ip4_dest_key, fspec_key->ip4dst);
+	TCAM_IPV4_ADDR(tcam_ptr->ip4_dest_mask, fspec_mask->ip4dst);
+	TCAM_IPV4_ADDR(tcam_ptr->ip4_src_key, fspec_key->ip4src);
+	TCAM_IPV4_ADDR(tcam_ptr->ip4_src_mask, fspec_mask->ip4src);
+	TCAM_IP_PORTS(tcam_ptr->ip4_port_key,
+		fspec_key->pdst, fspec_key->psrc);
+	TCAM_IP_PORTS(tcam_ptr->ip4_port_mask,
+		fspec_mask->pdst, fspec_mask->psrc);
+	TCAM_IP_CLASS(tcam_ptr->ip4_class_key,
+		tcam_ptr->ip4_class_mask, TCAM_CLASS_TCP_IPV4);
+	TCAM_IP_PROTO(tcam_ptr->ip4_proto_key,
+		tcam_ptr->ip4_proto_mask, IPPROTO_TCP);
+}
+
+/* ARGSUSED */
+static void
+nxge_fill_tcam_entry_sctp(p_nxge_t nxgep, flow_spec_t *flow_spec,
+	tcam_entry_t *tcam_ptr)
+{
+	tcpip4_spec_t *fspec_key;
+	tcpip4_spec_t *fspec_mask;
+
+	fspec_key = (tcpip4_spec_t *)&flow_spec->uh.tcpip4spec;
+	fspec_mask = (tcpip4_spec_t *)&flow_spec->um.tcpip4spec;
+
+	TCAM_IPV4_ADDR(tcam_ptr->ip4_dest_key, fspec_key->ip4dst);
+	TCAM_IPV4_ADDR(tcam_ptr->ip4_dest_mask, fspec_mask->ip4dst);
+	TCAM_IPV4_ADDR(tcam_ptr->ip4_src_key, fspec_key->ip4src);
+	TCAM_IPV4_ADDR(tcam_ptr->ip4_src_mask, fspec_mask->ip4src);
+	TCAM_IP_CLASS(tcam_ptr->ip4_class_key,
+		tcam_ptr->ip4_class_mask, TCAM_CLASS_SCTP_IPV4);
+	TCAM_IP_PROTO(tcam_ptr->ip4_proto_key,
+		tcam_ptr->ip4_proto_mask, IPPROTO_SCTP);
+	TCAM_IP_PORTS(tcam_ptr->ip4_port_key,
+		fspec_key->pdst, fspec_key->psrc);
+	TCAM_IP_PORTS(tcam_ptr->ip4_port_mask,
+		fspec_mask->pdst, fspec_mask->psrc);
+}
+
+static void
+nxge_fill_tcam_entry_tcp_ipv6(p_nxge_t nxgep, flow_spec_t *flow_spec,
+	tcam_entry_t *tcam_ptr)
+{
+	tcpip6_spec_t *fspec_key;
+	tcpip6_spec_t *fspec_mask;
+	p_nxge_class_pt_cfg_t p_class_cfgp;
+
+	fspec_key = (tcpip6_spec_t *)&flow_spec->uh.tcpip6spec;
+	fspec_mask = (tcpip6_spec_t *)&flow_spec->um.tcpip6spec;
+
+	p_class_cfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config;
+	if (p_class_cfgp->class_cfg[TCAM_CLASS_UDP_IPV6] &
+			NXGE_CLASS_TCAM_USE_SRC_ADDR) {
+		TCAM_IPV6_ADDR(tcam_ptr->ip6_ip_addr_key, fspec_key->ip6src);
+		TCAM_IPV6_ADDR(tcam_ptr->ip6_ip_addr_mask, fspec_mask->ip6src);
+	} else {
+		TCAM_IPV6_ADDR(tcam_ptr->ip6_ip_addr_key, fspec_key->ip6dst);
+		TCAM_IPV6_ADDR(tcam_ptr->ip6_ip_addr_mask, fspec_mask->ip6dst);
+	}
+
+	TCAM_IP_CLASS(tcam_ptr->ip6_class_key,
+		tcam_ptr->ip6_class_mask, TCAM_CLASS_TCP_IPV6);
+	TCAM_IP_PROTO(tcam_ptr->ip6_nxt_hdr_key,
+		tcam_ptr->ip6_nxt_hdr_mask, IPPROTO_TCP);
+	TCAM_IP_PORTS(tcam_ptr->ip6_port_key,
+		fspec_key->pdst, fspec_key->psrc);
+	TCAM_IP_PORTS(tcam_ptr->ip6_port_mask,
+		fspec_mask->pdst, fspec_mask->psrc);
+}
+
+static void
+nxge_fill_tcam_entry_sctp_ipv6(p_nxge_t nxgep, flow_spec_t *flow_spec,
+	tcam_entry_t *tcam_ptr)
+{
+	tcpip6_spec_t *fspec_key;
+	tcpip6_spec_t *fspec_mask;
+	p_nxge_class_pt_cfg_t p_class_cfgp;
+
+	fspec_key = (tcpip6_spec_t *)&flow_spec->uh.tcpip6spec;
+	fspec_mask = (tcpip6_spec_t *)&flow_spec->um.tcpip6spec;
+	p_class_cfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config;
+
+	if (p_class_cfgp->class_cfg[TCAM_CLASS_UDP_IPV6] &
+			NXGE_CLASS_TCAM_USE_SRC_ADDR) {
+		TCAM_IPV6_ADDR(tcam_ptr->ip6_ip_addr_key, fspec_key->ip6src);
+		TCAM_IPV6_ADDR(tcam_ptr->ip6_ip_addr_mask, fspec_mask->ip6src);
+	} else {
+		TCAM_IPV6_ADDR(tcam_ptr->ip6_ip_addr_key, fspec_key->ip6dst);
+		TCAM_IPV6_ADDR(tcam_ptr->ip6_ip_addr_mask, fspec_mask->ip6dst);
+	}
+
+	TCAM_IP_CLASS(tcam_ptr->ip6_class_key,
+		tcam_ptr->ip6_class_mask, TCAM_CLASS_SCTP_IPV6);
+	TCAM_IP_PROTO(tcam_ptr->ip6_nxt_hdr_key,
+		tcam_ptr->ip6_nxt_hdr_mask, IPPROTO_SCTP);
+	TCAM_IP_PORTS(tcam_ptr->ip6_port_key,
+		fspec_key->pdst, fspec_key->psrc);
+	TCAM_IP_PORTS(tcam_ptr->ip6_port_mask,
+		fspec_mask->pdst, fspec_mask->psrc);
+}
+
+nxge_status_t
+nxge_flow_get_hash(p_nxge_t nxgep, flow_resource_t *flow_res,
+	uint32_t *H1, uint16_t *H2)
+{
+	flow_spec_t *flow_spec;
+	uint32_t class_cfg;
+	flow_template_t ft;
+	p_nxge_class_pt_cfg_t p_class_cfgp;
+
+	int ft_size = sizeof (flow_template_t);
+
+	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "==> nxge_flow_get_hash"));
+
+	flow_spec = (flow_spec_t *)&flow_res->flow_spec;
+	bzero((char *)&ft, ft_size);
+	p_class_cfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config;
+
+	switch (flow_spec->flow_type) {
+	case FSPEC_TCPIP4:
+		class_cfg = p_class_cfgp->class_cfg[TCAM_CLASS_TCP_IPV4];
+		if (class_cfg & NXGE_CLASS_FLOW_USE_PROTO)
+			ft.ip_proto = IPPROTO_TCP;
+		if (class_cfg & NXGE_CLASS_FLOW_USE_IPSRC)
+			ft.ip4_saddr = flow_res->flow_spec.uh.tcpip4spec.ip4src;
+		if (class_cfg & NXGE_CLASS_FLOW_USE_IPDST)
+			ft.ip4_daddr = flow_res->flow_spec.uh.tcpip4spec.ip4dst;
+		if (class_cfg & NXGE_CLASS_FLOW_USE_SRC_PORT)
+			ft.ip_src_port = flow_res->flow_spec.uh.tcpip4spec.psrc;
+		if (class_cfg & NXGE_CLASS_FLOW_USE_DST_PORT)
+			ft.ip_dst_port = flow_res->flow_spec.uh.tcpip4spec.pdst;
+		break;
+
+	case FSPEC_UDPIP4:
+		class_cfg = p_class_cfgp->class_cfg[TCAM_CLASS_UDP_IPV4];
+		if (class_cfg & NXGE_CLASS_FLOW_USE_PROTO)
+			ft.ip_proto = IPPROTO_UDP;
+		if (class_cfg & NXGE_CLASS_FLOW_USE_IPSRC)
+			ft.ip4_saddr = flow_res->flow_spec.uh.udpip4spec.ip4src;
+		if (class_cfg & NXGE_CLASS_FLOW_USE_IPDST)
+			ft.ip4_daddr = flow_res->flow_spec.uh.udpip4spec.ip4dst;
+		if (class_cfg & NXGE_CLASS_FLOW_USE_SRC_PORT)
+			ft.ip_src_port = flow_res->flow_spec.uh.udpip4spec.psrc;
+		if (class_cfg & NXGE_CLASS_FLOW_USE_DST_PORT)
+			ft.ip_dst_port = flow_res->flow_spec.uh.udpip4spec.pdst;
+		break;
+
+	default:
+		return (NXGE_ERROR);
+	}
+
+	*H1 = nxge_compute_h1(p_class_cfgp->init_h1,
+		(uint32_t *)&ft, ft_size) & 0xfffff;
+	*H2 = nxge_compute_h2(p_class_cfgp->init_h2,
+		(uint8_t *)&ft, ft_size);
+
+	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "<== nxge_flow_get_hash"));
+	return (NXGE_OK);
+}
+
+nxge_status_t
+nxge_add_fcram_entry(p_nxge_t nxgep, flow_resource_t *flow_res)
+{
+	uint32_t H1;
+	uint16_t H2;
+	nxge_status_t status = NXGE_OK;
+
+	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "==> nxge_add_fcram_entry"));
+	status = nxge_flow_get_hash(nxgep, flow_res, &H1, &H2);
+	if (status != NXGE_OK) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			" nxge_add_fcram_entry failed "));
+		return (status);
+	}
+
+	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "<== nxge_add_fcram_entry"));
+	return (NXGE_OK);
+}
+
+/*
+ * Already decided this flow goes into the tcam
+ */
+
+nxge_status_t
+nxge_add_tcam_entry(p_nxge_t nxgep, flow_resource_t *flow_res)
+{
+	npi_handle_t handle;
+	intptr_t channel_cookie;
+	intptr_t flow_cookie;
+	flow_spec_t *flow_spec;
+	npi_status_t rs = NPI_SUCCESS;
+	tcam_entry_t tcam_ptr;
+	tcam_location_t location = 0;
+	uint8_t offset, rdc_grp;
+	p_nxge_hw_list_t hw_p;
+
+	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "==> nxge_add_tcam_entry"));
+	handle = nxgep->npi_reg_handle;
+
+	bzero((void *)&tcam_ptr, sizeof (tcam_entry_t));
+	flow_spec = (flow_spec_t *)&flow_res->flow_spec;
+	flow_cookie = flow_res->flow_cookie;
+	channel_cookie = flow_res->channel_cookie;
+
+	switch (flow_spec->flow_type) {
+	case FSPEC_TCPIP4:
+		nxge_fill_tcam_entry_tcp(nxgep, flow_spec, &tcam_ptr);
+		location = nxge_get_tcam_location(nxgep,
+			TCAM_CLASS_TCP_IPV4);
+		rdc_grp = nxge_get_rdc_group(nxgep, TCAM_CLASS_TCP_IPV4,
+			flow_cookie);
+		offset = nxge_get_rdc_offset(nxgep, TCAM_CLASS_TCP_IPV4,
+			channel_cookie);
+		break;
+
+	case FSPEC_UDPIP4:
+		nxge_fill_tcam_entry_udp(nxgep, flow_spec, &tcam_ptr);
+		location = nxge_get_tcam_location(nxgep,
+			TCAM_CLASS_UDP_IPV4);
+		rdc_grp = nxge_get_rdc_group(nxgep,
+			TCAM_CLASS_UDP_IPV4,
+			flow_cookie);
+		offset = nxge_get_rdc_offset(nxgep,
+			TCAM_CLASS_UDP_IPV4,
+			channel_cookie);
+		break;
+
+	case FSPEC_TCPIP6:
+		nxge_fill_tcam_entry_tcp_ipv6(nxgep,
+			flow_spec, &tcam_ptr);
+		location = nxge_get_tcam_location(nxgep,
+			TCAM_CLASS_TCP_IPV6);
+		rdc_grp = nxge_get_rdc_group(nxgep, TCAM_CLASS_TCP_IPV6,
+			flow_cookie);
+		offset = nxge_get_rdc_offset(nxgep, TCAM_CLASS_TCP_IPV6,
+			channel_cookie);
+		break;
+
+	case FSPEC_UDPIP6:
+		nxge_fill_tcam_entry_udp_ipv6(nxgep,
+			flow_spec, &tcam_ptr);
+		location = nxge_get_tcam_location(nxgep,
+			TCAM_CLASS_UDP_IPV6);
+		rdc_grp = nxge_get_rdc_group(nxgep,
+			TCAM_CLASS_UDP_IPV6,
+			channel_cookie);
+		offset = nxge_get_rdc_offset(nxgep,
+			TCAM_CLASS_UDP_IPV6,
+			flow_cookie);
+		break;
+
+	case FSPEC_SCTPIP4:
+		nxge_fill_tcam_entry_sctp(nxgep, flow_spec, &tcam_ptr);
+		location = nxge_get_tcam_location(nxgep,
+			TCAM_CLASS_SCTP_IPV4);
+		rdc_grp = nxge_get_rdc_group(nxgep,
+			TCAM_CLASS_SCTP_IPV4,
+			channel_cookie);
+		offset = nxge_get_rdc_offset(nxgep,
+			TCAM_CLASS_SCTP_IPV4,
+			flow_cookie);
+		break;
+
+	case FSPEC_SCTPIP6:
+		nxge_fill_tcam_entry_sctp_ipv6(nxgep,
+			flow_spec, &tcam_ptr);
+		location = nxge_get_tcam_location(nxgep,
+			TCAM_CLASS_SCTP_IPV4);
+		rdc_grp = nxge_get_rdc_group(nxgep,
+			TCAM_CLASS_SCTP_IPV6,
+			channel_cookie);
+		offset = nxge_get_rdc_offset(nxgep,
+			TCAM_CLASS_SCTP_IPV6,
+			flow_cookie);
+		break;
+
+	default:
+		return (NXGE_OK);
+	}
+
+	NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
+		" nxge_add_tcam_entry write"
+		" for location %d offset %d", location, offset));
+
+	if ((hw_p = nxgep->nxge_hw_p) == NULL) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			" nxge_add_tcam_entry: common hardware not set",
+			nxgep->niu_type));
+		return (NXGE_ERROR);
+	}
+
+	MUTEX_ENTER(&hw_p->nxge_tcam_lock);
+	rs = npi_fflp_tcam_entry_write(handle, location, &tcam_ptr);
+
+	if (rs & NPI_FFLP_ERROR) {
+		MUTEX_EXIT(&hw_p->nxge_tcam_lock);
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			" nxge_add_tcam_entry write"
+			" failed for location %d", location));
+		return (NXGE_ERROR | rs);
+	}
+
+	tcam_ptr.match_action.value = 0;
+	tcam_ptr.match_action.bits.ldw.rdctbl = rdc_grp;
+	tcam_ptr.match_action.bits.ldw.offset = offset;
+	tcam_ptr.match_action.bits.ldw.tres =
+		TRES_TERM_OVRD_L2RDC;
+	if (channel_cookie == -1)
+		tcam_ptr.match_action.bits.ldw.disc = 1;
+	rs = npi_fflp_tcam_asc_ram_entry_write(handle,
+		location, tcam_ptr.match_action.value);
+	if (rs & NPI_FFLP_ERROR) {
+		MUTEX_EXIT(&hw_p->nxge_tcam_lock);
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			" nxge_add_tcam_entry write"
+			" failed for ASC RAM location %d", location));
+		return (NXGE_ERROR | rs);
+	}
+	bcopy((void *) &tcam_ptr,
+		(void *) &nxgep->classifier.tcam_entries[location].tce,
+		sizeof (tcam_entry_t));
+
+	MUTEX_EXIT(&hw_p->nxge_tcam_lock);
+	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "<== nxge_add_tcam_entry"));
+	return (NXGE_OK);
+}
+
+static nxge_status_t
+nxge_tcam_handle_ip_fragment(p_nxge_t nxgep)
+{
+	tcam_entry_t tcam_ptr;
+	tcam_location_t location;
+	uint8_t class;
+	uint32_t class_config;
+	npi_handle_t handle;
+	npi_status_t rs = NPI_SUCCESS;
+	p_nxge_hw_list_t hw_p;
+	nxge_status_t status = NXGE_OK;
+
+	handle = nxgep->npi_reg_handle;
+	class = 0;
+	bzero((void *)&tcam_ptr, sizeof (tcam_entry_t));
+	tcam_ptr.ip4_noport_key = 1;
+	tcam_ptr.ip4_noport_mask = 1;
+	location = nxgep->function_num;
+	nxgep->classifier.fragment_bug_location = location;
+
+	if ((hw_p = nxgep->nxge_hw_p) == NULL) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			" nxge_tcam_handle_ip_fragment:"
+			" common hardware not set",
+			nxgep->niu_type));
+		return (NXGE_ERROR);
+	}
+	MUTEX_ENTER(&hw_p->nxge_tcam_lock);
+	rs = npi_fflp_tcam_entry_write(handle,
+		location, &tcam_ptr);
+
+	if (rs & NPI_FFLP_ERROR) {
+		MUTEX_EXIT(&hw_p->nxge_tcam_lock);
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			" nxge_tcam_handle_ip_fragment "
+			" tcam_entry write"
+			" failed for location %d", location));
+		return (NXGE_ERROR);
+	}
+	tcam_ptr.match_action.bits.ldw.rdctbl = nxgep->class_config.mac_rdcgrp;
+	tcam_ptr.match_action.bits.ldw.offset = 0;	/* use the default */
+	tcam_ptr.match_action.bits.ldw.tres =
+		TRES_TERM_USE_OFFSET;
+	rs = npi_fflp_tcam_asc_ram_entry_write(handle,
+		location, tcam_ptr.match_action.value);
+
+	if (rs & NPI_FFLP_ERROR) {
+		MUTEX_EXIT(&hw_p->nxge_tcam_lock);
+		NXGE_DEBUG_MSG((nxgep,
+			FFLP_CTL,
+			" nxge_tcam_handle_ip_fragment "
+			" tcam_entry write"
+			" failed for ASC RAM location %d", location));
+		return (NXGE_ERROR);
+	}
+	bcopy((void *) &tcam_ptr,
+		(void *) &nxgep->classifier.tcam_entries[location].tce,
+		sizeof (tcam_entry_t));
+	for (class = TCAM_CLASS_TCP_IPV4;
+		class <= TCAM_CLASS_SCTP_IPV6; class++) {
+		class_config = nxgep->class_config.class_cfg[class];
+		class_config |= NXGE_CLASS_TCAM_LOOKUP;
+		status = nxge_fflp_ip_class_config(nxgep, class, class_config);
+
+		if (status & NPI_FFLP_ERROR) {
+			MUTEX_EXIT(&hw_p->nxge_tcam_lock);
+			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+				"nxge_tcam_handle_ip_fragment "
+				"nxge_fflp_ip_class_config failed "
+				" class %d config %x ", class, class_config));
+			return (NXGE_ERROR);
+		}
+	}
+
+	rs = npi_fflp_cfg_tcam_enable(handle);
+	if (rs & NPI_FFLP_ERROR) {
+		MUTEX_EXIT(&hw_p->nxge_tcam_lock);
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"nxge_tcam_handle_ip_fragment "
+			" nxge_fflp_config_tcam_enable failed"));
+		return (NXGE_ERROR);
+	}
+	MUTEX_EXIT(&hw_p->nxge_tcam_lock);
+	return (NXGE_OK);
+}
+
+/* ARGSUSED */
+static int
+nxge_flow_need_hash_lookup(p_nxge_t nxgep, flow_resource_t *flow_res)
+{
+	return (0);
+}
+
+nxge_status_t
+nxge_add_flow(p_nxge_t nxgep, flow_resource_t *flow_res)
+{
+
+	int insert_hash = 0;
+	nxge_status_t status = NXGE_OK;
+
+	if (nxgep->niu_type == NEPTUNE) {
+		/* determine whether to do TCAM or Hash flow */
+		insert_hash = nxge_flow_need_hash_lookup(nxgep, flow_res);
+	}
+	if (insert_hash) {
+		status = nxge_add_fcram_entry(nxgep, flow_res);
+	} else {
+		status = nxge_add_tcam_entry(nxgep, flow_res);
+	}
+	return (status);
+}
+
+void
+nxge_put_tcam(p_nxge_t nxgep, p_mblk_t mp)
+{
+	flow_resource_t *fs;
+
+	fs = (flow_resource_t *)mp->b_rptr;
+	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+		"nxge_put_tcam addr fs $%p  type %x offset %x",
+		fs, fs->flow_spec.flow_type, fs->channel_cookie));
+	(void) nxge_add_tcam_entry(nxgep, fs);
+}
+
+nxge_status_t
+nxge_fflp_config_tcam_enable(p_nxge_t nxgep)
+{
+	npi_handle_t handle = nxgep->npi_reg_handle;
+	npi_status_t rs = NPI_SUCCESS;
+
+	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, " ==> nxge_fflp_config_tcam_enable"));
+	rs = npi_fflp_cfg_tcam_enable(handle);
+	if (rs & NPI_FFLP_ERROR) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			" nxge_fflp_config_tcam_enable failed"));
+		return (NXGE_ERROR | rs);
+	}
+	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, " <== nxge_fflp_config_tcam_enable"));
+	return (NXGE_OK);
+}
+
+nxge_status_t
+nxge_fflp_config_tcam_disable(p_nxge_t nxgep)
+{
+	npi_handle_t handle = nxgep->npi_reg_handle;
+	npi_status_t rs = NPI_SUCCESS;
+
+	NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
+		" ==> nxge_fflp_config_tcam_disable"));
+	rs = npi_fflp_cfg_tcam_disable(handle);
+	if (rs & NPI_FFLP_ERROR) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+				" nxge_fflp_config_tcam_disable failed"));
+		return (NXGE_ERROR | rs);
+	}
+	NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
+		" <== nxge_fflp_config_tcam_disable"));
+	return (NXGE_OK);
+}
+
+nxge_status_t
+nxge_fflp_config_hash_lookup_enable(p_nxge_t nxgep)
+{
+	npi_handle_t handle = nxgep->npi_reg_handle;
+	npi_status_t rs = NPI_SUCCESS;
+	p_nxge_dma_pt_cfg_t p_dma_cfgp;
+	p_nxge_hw_pt_cfg_t p_cfgp;
+	uint8_t partition;
+
+	NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
+		" ==> nxge_fflp_config_hash_lookup_enable"));
+	p_dma_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
+	p_cfgp = (p_nxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config;
+
+	for (partition = p_cfgp->start_rdc_grpid;
+		partition < p_cfgp->max_rdc_grpids; partition++) {
+		rs = npi_fflp_cfg_fcram_partition_enable(handle, partition);
+		if (rs != NPI_SUCCESS) {
+			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+				" nxge_fflp_config_hash_lookup_enable"
+				"failed FCRAM partition"
+				" enable for partition %d ", partition));
+			return (NXGE_ERROR | rs);
+		}
+	}
+
+	NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
+		" <== nxge_fflp_config_hash_lookup_enable"));
+	return (NXGE_OK);
+}
+
+nxge_status_t
+nxge_fflp_config_hash_lookup_disable(p_nxge_t nxgep)
+{
+	npi_handle_t handle = nxgep->npi_reg_handle;
+	npi_status_t rs = NPI_SUCCESS;
+	p_nxge_dma_pt_cfg_t p_dma_cfgp;
+	p_nxge_hw_pt_cfg_t p_cfgp;
+	uint8_t partition;
+
+	NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
+		" ==> nxge_fflp_config_hash_lookup_disable"));
+	p_dma_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
+	p_cfgp = (p_nxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config;
+
+	for (partition = p_cfgp->start_rdc_grpid;
+		partition < p_cfgp->max_rdc_grpids; partition++) {
+		rs = npi_fflp_cfg_fcram_partition_disable(handle,
+			partition);
+		if (rs != NPI_SUCCESS) {
+			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+				" nxge_fflp_config_hash_lookup_disable"
+				" failed FCRAM partition"
+				" disable for partition %d ", partition));
+			return (NXGE_ERROR | rs);
+		}
+	}
+
+	NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
+		" <== nxge_fflp_config_hash_lookup_disable"));
+	return (NXGE_OK);
+}
+
+nxge_status_t
+nxge_fflp_config_llc_snap_enable(p_nxge_t nxgep)
+{
+	npi_handle_t handle = nxgep->npi_reg_handle;
+	npi_status_t rs = NPI_SUCCESS;
+
+	NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
+		" ==> nxge_fflp_config_llc_snap_enable"));
+	rs = npi_fflp_cfg_llcsnap_enable(handle);
+	if (rs & NPI_FFLP_ERROR) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			" nxge_fflp_config_llc_snap_enable failed"));
+		return (NXGE_ERROR | rs);
+	}
+	NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
+		" <== nxge_fflp_config_llc_snap_enable"));
+	return (NXGE_OK);
+}
+
+nxge_status_t
+nxge_fflp_config_llc_snap_disable(p_nxge_t nxgep)
+{
+	npi_handle_t handle = nxgep->npi_reg_handle;
+	npi_status_t rs = NPI_SUCCESS;
+
+	NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
+		" ==> nxge_fflp_config_llc_snap_disable"));
+	rs = npi_fflp_cfg_llcsnap_disable(handle);
+	if (rs & NPI_FFLP_ERROR) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			" nxge_fflp_config_llc_snap_disable failed"));
+		return (NXGE_ERROR | rs);
+	}
+	NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
+		" <== nxge_fflp_config_llc_snap_disable"));
+	return (NXGE_OK);
+}
+
+nxge_status_t
+nxge_fflp_ip_usr_class_config(p_nxge_t nxgep, tcam_class_t class,
+	uint32_t config)
+{
+	npi_status_t rs = NPI_SUCCESS;
+	npi_handle_t handle = nxgep->npi_reg_handle;
+	uint8_t tos, tos_mask, proto, ver = 0;
+	uint8_t class_enable = 0;
+
+	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "==> nxge_fflp_ip_usr_class_config"));
+
+	tos = (config & NXGE_CLASS_CFG_IP_TOS_MASK) >>
+		NXGE_CLASS_CFG_IP_TOS_SHIFT;
+	tos_mask = (config & NXGE_CLASS_CFG_IP_TOS_MASK_MASK) >>
+		NXGE_CLASS_CFG_IP_TOS_MASK_SHIFT;
+	proto = (config & NXGE_CLASS_CFG_IP_PROTO_MASK) >>
+		NXGE_CLASS_CFG_IP_PROTO_SHIFT;
+	if (config & NXGE_CLASS_CFG_IP_IPV6_MASK)
+		ver = 1;
+	if (config & NXGE_CLASS_CFG_IP_ENABLE_MASK)
+		class_enable = 1;
+	rs = npi_fflp_cfg_ip_usr_cls_set(handle, class, tos, tos_mask,
+		proto, ver);
+	if (rs & NPI_FFLP_ERROR) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			" nxge_fflp_ip_usr_class_config"
+			" for class %d failed ", class));
+		return (NXGE_ERROR | rs);
+	}
+	if (class_enable)
+		rs = npi_fflp_cfg_ip_usr_cls_enable(handle, class);
+	else
+		rs = npi_fflp_cfg_ip_usr_cls_disable(handle, class);
+
+	if (rs & NPI_FFLP_ERROR) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			" nxge_fflp_ip_usr_class_config"
+			" TCAM enable/disable for class %d failed ", class));
+		return (NXGE_ERROR | rs);
+	}
+	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "<== nxge_fflp_ip_usr_class_config"));
+	return (NXGE_OK);
+}
+
+nxge_status_t
+nxge_fflp_ip_class_config(p_nxge_t nxgep, tcam_class_t class, uint32_t config)
+{
+	uint32_t class_config;
+	nxge_status_t t_status = NXGE_OK;
+	nxge_status_t f_status = NXGE_OK;
+	p_nxge_class_pt_cfg_t p_class_cfgp;
+
+	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, " ==> nxge_fflp_ip_class_config"));
+
+	p_class_cfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config;
+	class_config = p_class_cfgp->class_cfg[class];
+
+	if (class_config != config) {
+		p_class_cfgp->class_cfg[class] = config;
+		class_config = config;
+	}
+
+	t_status = nxge_cfg_tcam_ip_class(nxgep, class, class_config);
+	f_status = nxge_cfg_ip_cls_flow_key(nxgep, class, class_config);
+
+	if (t_status & NPI_FFLP_ERROR) {
+		NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
+			" nxge_fflp_ip_class_config %x"
+			" for class %d tcam failed", config, class));
+		return (t_status);
+	}
+	if (f_status & NPI_FFLP_ERROR) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			" nxge_fflp_ip_class_config %x"
+			" for class %d flow key failed", config, class));
+		return (f_status);
+	}
+	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "<== nxge_fflp_ip_class_config"));
+	return (NXGE_OK);
+}
+
+nxge_status_t
+nxge_fflp_ip_class_config_get(p_nxge_t nxgep, tcam_class_t class,
+	uint32_t *config)
+{
+	uint32_t t_class_config, f_class_config;
+	int t_status = NXGE_OK;
+	int f_status = NXGE_OK;
+
+	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, " ==> nxge_fflp_ip_class_config"));
+
+	t_class_config = f_class_config = 0;
+	t_status = nxge_cfg_tcam_ip_class_get(nxgep, class, &t_class_config);
+	f_status = nxge_cfg_ip_cls_flow_key_get(nxgep, class, &f_class_config);
+
+	if (t_status & NPI_FFLP_ERROR) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			" nxge_fflp_ip_class_config_get  "
+			" for class %d tcam failed", class));
+		return (t_status);
+	}
+
+	if (f_status & NPI_FFLP_ERROR) {
+		NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
+			" nxge_fflp_ip_class_config_get  "
+			" for class %d flow key failed", class));
+		return (f_status);
+	}
+
+	NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
+		" nxge_fflp_ip_class_config tcam %x flow %x",
+		t_class_config, f_class_config));
+
+	*config = t_class_config | f_class_config;
+	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "<== nxge_fflp_ip_class_config_get"));
+	return (NXGE_OK);
+}
+
+nxge_status_t
+nxge_fflp_ip_class_config_all(p_nxge_t nxgep)
+{
+	uint32_t class_config;
+	tcam_class_t class;
+
+#ifdef	NXGE_DEBUG
+	int status = NXGE_OK;
+#endif
+
+	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "==> nxge_fflp_ip_class_config"));
+	for (class = TCAM_CLASS_TCP_IPV4;
+		class <= TCAM_CLASS_SCTP_IPV6; class++) {
+		class_config = nxgep->class_config.class_cfg[class];
+#ifndef	NXGE_DEBUG
+		(void) nxge_fflp_ip_class_config(nxgep, class, class_config);
+#else
+		status = nxge_fflp_ip_class_config(nxgep, class, class_config);
+		if (status & NPI_FFLP_ERROR) {
+			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+				"nxge_fflp_ip_class_config failed "
+				" class %d config %x ",
+				class, class_config));
+		}
+#endif
+	}
+	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "<== nxge_fflp_ip_class_config"));
+	return (NXGE_OK);
+}
+
+nxge_status_t
+nxge_fflp_config_vlan_table(p_nxge_t nxgep, uint16_t vlan_id)
+{
+	uint8_t port, rdc_grp;
+	npi_handle_t handle;
+	npi_status_t rs = NPI_SUCCESS;
+	uint8_t priority = 1;
+	p_nxge_mv_cfg_t vlan_table;
+	p_nxge_class_pt_cfg_t p_class_cfgp;
+	p_nxge_hw_list_t hw_p;
+
+	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "==> nxge_fflp_config_vlan_table"));
+	p_class_cfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config;
+	handle = nxgep->npi_reg_handle;
+	vlan_table = p_class_cfgp->vlan_tbl;
+	port = nxgep->function_num;
+
+	if (vlan_table[vlan_id].flag == 0) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			" nxge_fflp_config_vlan_table"
+			" vlan id is not configured %d", vlan_id));
+		return (NXGE_ERROR);
+	}
+
+	if ((hw_p = nxgep->nxge_hw_p) == NULL) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			" nxge_fflp_config_vlan_table:"
+			" common hardware not set", nxgep->niu_type));
+		return (NXGE_ERROR);
+	}
+	MUTEX_ENTER(&hw_p->nxge_vlan_lock);
+	rdc_grp = vlan_table[vlan_id].rdctbl;
+	rs = npi_fflp_cfg_enet_vlan_table_assoc(handle,
+		port, vlan_id,
+		rdc_grp, priority);
+
+	MUTEX_EXIT(&hw_p->nxge_vlan_lock);
+	if (rs & NPI_FFLP_ERROR) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"nxge_fflp_config_vlan_table failed "
+			" Port %d vlan_id %d rdc_grp %d",
+			port, vlan_id, rdc_grp));
+		return (NXGE_ERROR | rs);
+	}
+
+	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "<== nxge_fflp_config_vlan_table"));
+	return (NXGE_OK);
+}
+
+nxge_status_t
+nxge_fflp_update_hw(p_nxge_t nxgep)
+{
+	nxge_status_t status = NXGE_OK;
+	p_nxge_param_t pa;
+	uint64_t cfgd_vlans;
+	uint64_t *val_ptr;
+	int i;
+	int num_macs;
+	uint8_t alt_mac;
+	nxge_param_map_t *p_map;
+	p_nxge_mv_cfg_t vlan_table;
+	p_nxge_class_pt_cfg_t p_class_cfgp;
+	p_nxge_dma_pt_cfg_t p_all_cfgp;
+	p_nxge_hw_pt_cfg_t p_cfgp;
+
+	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "==> nxge_fflp_update_hw"));
+
+	p_class_cfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config;
+	p_all_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
+	p_cfgp = (p_nxge_hw_pt_cfg_t)&p_all_cfgp->hw_config;
+
+	status = nxge_fflp_set_hash1(nxgep, p_class_cfgp->init_h1);
+	if (status != NXGE_OK) {
+		NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
+			"nxge_fflp_set_hash1 Failed"));
+		return (NXGE_ERROR);
+	}
+
+	status = nxge_fflp_set_hash2(nxgep, p_class_cfgp->init_h2);
+	if (status != NXGE_OK) {
+		NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
+			"nxge_fflp_set_hash2 Failed"));
+		return (NXGE_ERROR);
+	}
+	vlan_table = p_class_cfgp->vlan_tbl;
+
+	/* configure vlan tables */
+	pa = (p_nxge_param_t)&nxgep->param_arr[param_vlan_2rdc_grp];
+	val_ptr = (uint64_t *)pa->value;
+	cfgd_vlans = ((pa->type & NXGE_PARAM_ARRAY_CNT_MASK) >>
+		NXGE_PARAM_ARRAY_CNT_SHIFT);
+
+	for (i = 0; i < cfgd_vlans; i++) {
+		p_map = (nxge_param_map_t *)&val_ptr[i];
+		if (vlan_table[p_map->param_id].flag) {
+			status = nxge_fflp_config_vlan_table(nxgep,
+				p_map->param_id);
+			if (status != NXGE_OK) {
+				NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
+					"nxge_fflp_config_vlan_table Failed"));
+				return (NXGE_ERROR);
+			}
+		}
+	}
+
+	/* config MAC addresses */
+	num_macs = p_cfgp->max_macs;
+	pa = (p_nxge_param_t)&nxgep->param_arr[param_mac_2rdc_grp];
+	val_ptr = (uint64_t *)pa->value;
+
+	for (alt_mac = 0; alt_mac < num_macs; alt_mac++) {
+		if (p_class_cfgp->mac_host_info[alt_mac].flag) {
+			status = nxge_logical_mac_assign_rdc_table(nxgep,
+				alt_mac);
+			if (status != NXGE_OK) {
+				NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
+					"nxge_logical_mac_assign_rdc_table"
+					" Failed"));
+				return (NXGE_ERROR);
+			}
+		}
+	}
+
+	/* Config Hash values */
+	/* config classess */
+	status = nxge_fflp_ip_class_config_all(nxgep);
+	if (status != NXGE_OK) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"nxge_fflp_ip_class_config_all Failed"));
+		return (NXGE_ERROR);
+	}
+	return (NXGE_OK);
+}
+
+nxge_status_t
+nxge_classify_init_hw(p_nxge_t nxgep)
+{
+	nxge_status_t status = NXGE_OK;
+
+	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "==> nxge_classify_init_hw"));
+
+	if (nxgep->classifier.state & NXGE_FFLP_HW_INIT) {
+		NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
+			"nxge_classify_init_hw already init"));
+		return (NXGE_OK);
+	}
+
+	/* Now do a real configuration */
+	status = nxge_fflp_update_hw(nxgep);
+	if (status != NXGE_OK) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"nxge_fflp_update_hw failed"));
+		return (NXGE_ERROR);
+	}
+
+	/* Init RDC tables? ? who should do that? rxdma or fflp ? */
+	/* attach rdc table to the MAC port. */
+	status = nxge_main_mac_assign_rdc_table(nxgep);
+	if (status != NXGE_OK) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+				"nxge_main_mac_assign_rdc_table failed"));
+		return (NXGE_ERROR);
+	}
+
+	status = nxge_alt_mcast_mac_assign_rdc_table(nxgep);
+	if (status != NXGE_OK) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"nxge_multicast_mac_assign_rdc_table failed"));
+		return (NXGE_ERROR);
+	}
+
+	status = nxge_tcam_handle_ip_fragment(nxgep);
+	if (status != NXGE_OK) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"nxge_tcam_handle_ip_fragment failed"));
+		return (NXGE_ERROR);
+	}
+
+	nxgep->classifier.state |= NXGE_FFLP_HW_INIT;
+	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "<== nxge_classify_init_hw"));
+	return (NXGE_OK);
+}
+
+nxge_status_t
+nxge_fflp_handle_sys_errors(p_nxge_t nxgep)
+{
+	npi_handle_t handle;
+	p_nxge_fflp_stats_t statsp;
+	uint8_t portn, rdc_grp;
+	p_nxge_dma_pt_cfg_t p_dma_cfgp;
+	p_nxge_hw_pt_cfg_t p_cfgp;
+	vlan_par_err_t vlan_err;
+	tcam_err_t tcam_err;
+	hash_lookup_err_log1_t fcram1_err;
+	hash_lookup_err_log2_t fcram2_err;
+	hash_tbl_data_log_t fcram_err;
+
+	handle = nxgep->npi_handle;
+	statsp = (p_nxge_fflp_stats_t)&nxgep->statsp->fflp_stats;
+	portn = nxgep->mac.portnum;
+
+	/*
+	 * need to read the fflp error registers to figure out what the error
+	 * is
+	 */
+	npi_fflp_vlan_error_get(handle, &vlan_err);
+	npi_fflp_tcam_error_get(handle, &tcam_err);
+
+	if (vlan_err.bits.ldw.m_err || vlan_err.bits.ldw.err) {
+		NXGE_ERROR_MSG((nxgep, FFLP_CTL,
+			" vlan table parity error on port %d"
+			" addr: 0x%x data: 0x%x",
+			portn, vlan_err.bits.ldw.addr,
+			vlan_err.bits.ldw.data));
+		statsp->vlan_parity_err++;
+
+		if (vlan_err.bits.ldw.m_err) {
+			NXGE_ERROR_MSG((nxgep, FFLP_CTL,
+				" vlan table multiple errors on port %d",
+				portn));
+		}
+		statsp->errlog.vlan = (uint32_t)vlan_err.value;
+		NXGE_FM_REPORT_ERROR(nxgep, NULL, NULL,
+			NXGE_FM_EREPORT_FFLP_VLAN_PAR_ERR);
+		npi_fflp_vlan_error_clear(handle);
+	}
+
+	if (tcam_err.bits.ldw.err) {
+		if (tcam_err.bits.ldw.p_ecc != 0) {
+			NXGE_ERROR_MSG((nxgep, FFLP_CTL,
+				" TCAM ECC error on port %d"
+				" TCAM entry: 0x%x syndrome: 0x%x",
+				portn, tcam_err.bits.ldw.addr,
+				tcam_err.bits.ldw.syndrome));
+			statsp->tcam_ecc_err++;
+		} else {
+			NXGE_ERROR_MSG((nxgep, FFLP_CTL,
+				" TCAM Parity error on port %d"
+				" addr: 0x%x parity value: 0x%x",
+				portn, tcam_err.bits.ldw.addr,
+				tcam_err.bits.ldw.syndrome));
+			statsp->tcam_parity_err++;
+		}
+
+		if (tcam_err.bits.ldw.mult) {
+			NXGE_ERROR_MSG((nxgep, FFLP_CTL,
+				" TCAM Multiple errors on port %d", portn));
+		} else {
+			NXGE_ERROR_MSG((nxgep, FFLP_CTL,
+					" TCAM PIO error on port %d",
+					portn));
+		}
+
+		statsp->errlog.tcam = (uint32_t)tcam_err.value;
+		NXGE_FM_REPORT_ERROR(nxgep, NULL, NULL,
+			NXGE_FM_EREPORT_FFLP_TCAM_ERR);
+		npi_fflp_tcam_error_clear(handle);
+	}
+
+	p_dma_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
+	p_cfgp = (p_nxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config;
+
+	for (rdc_grp = p_cfgp->start_rdc_grpid;
+		rdc_grp < p_cfgp->max_rdc_grpids; rdc_grp++) {
+		npi_fflp_fcram_error_get(handle, &fcram_err, rdc_grp);
+		if (fcram_err.bits.ldw.pio_err) {
+			NXGE_ERROR_MSG((nxgep, FFLP_CTL,
+				" FCRAM PIO ECC error on port %d"
+				" rdc group: %d Hash Table addr: 0x%x"
+				" syndrome: 0x%x",
+				portn, rdc_grp,
+				fcram_err.bits.ldw.fcram_addr,
+				fcram_err.bits.ldw.syndrome));
+			statsp->hash_pio_err[rdc_grp]++;
+			statsp->errlog.hash_pio[rdc_grp] =
+				(uint32_t)fcram_err.value;
+			NXGE_FM_REPORT_ERROR(nxgep, NULL, NULL,
+				NXGE_FM_EREPORT_FFLP_HASHT_DATA_ERR);
+			npi_fflp_fcram_error_clear(handle, rdc_grp);
+		}
+	}
+
+	npi_fflp_fcram_error_log1_get(handle, &fcram1_err);
+	if (fcram1_err.bits.ldw.ecc_err) {
+		char *multi_str = "";
+		char *multi_bit_str = "";
+
+		npi_fflp_fcram_error_log2_get(handle, &fcram2_err);
+		if (fcram1_err.bits.ldw.mult_lk) {
+			multi_str = "multiple";
+		}
+		if (fcram1_err.bits.ldw.mult_bit) {
+			multi_bit_str = "multiple bits";
+		}
+		NXGE_ERROR_MSG((nxgep, FFLP_CTL,
+			" FCRAM %s lookup %s ECC error on port %d"
+			" H1: 0x%x Subarea: 0x%x Syndrome: 0x%x",
+			multi_str, multi_bit_str, portn,
+			fcram2_err.bits.ldw.h1,
+			fcram2_err.bits.ldw.subarea,
+			fcram2_err.bits.ldw.syndrome));
+		NXGE_FM_REPORT_ERROR(nxgep, NULL, NULL,
+			NXGE_FM_EREPORT_FFLP_HASHT_LOOKUP_ERR);
+	}
+	statsp->errlog.hash_lookup1 = (uint32_t)fcram1_err.value;
+	statsp->errlog.hash_lookup2 = (uint32_t)fcram2_err.value;
+	return (NXGE_OK);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/uts/common/io/nxge/nxge_fflp_hash.c	Mon Mar 19 19:37:22 2007 -0700
@@ -0,0 +1,375 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
+ * Use is subject to license terms.
+ */
+#pragma ident	"%Z%%M%	%I%	%E% SMI"
+
+#include <sys/types.h>
+#include <nxge_fflp_hash.h>
+
+static void nxge_crc32c_word(uint32_t *crcptr, const uint32_t *buf, int len);
+
+/*
+ * The crc32c algorithms are taken from sctp_crc32 implementation
+ * common/inet/sctp_crc32.{c,h}
+ *
+ */
+
+/*
+ * Fast CRC32C calculation algorithm.  The basic idea is to look at it
+ * four bytes (one word) at a time, using four tables.  The
+ * standard algorithm in RFC 3309 uses one table.
+ */
+
+/*
+ * SCTP uses reflected/reverse polynomial CRC32 with generating
+ * polynomial 0x1EDC6F41L
+ */
+#define	SCTP_POLY 0x1EDC6F41L
+
+/* CRC-CCITT Polynomial */
+#define	CRC_CCITT_POLY 0x1021
+
+/* The four CRC32c tables. */
+static uint32_t crc32c_tab[4][256];
+
+/* The four CRC-CCITT tables. */
+static uint16_t crc_ccitt_tab[4][256];
+
+/* the four tables for H1 Computation */
+static uint32_t h1table[4][256];
+
+#define	CRC_32C_POLY 0x1EDC6F41L
+
+#define	COMPUTE_H1_BYTE(crc, data) \
+	(crc = (crc<<8)^h1table[0][((crc >> 24) ^data) & 0xff])
+
+static uint32_t
+reflect_32(uint32_t b)
+{
+	int i;
+	uint32_t rw = 0;
+
+	for (i = 0; i < 32; i++) {
+		if (b & 1) {
+			rw |= 1 << (31 - i);
+		}
+		b >>= 1;
+	}
+	return (rw);
+}
+
+static uint32_t
+flip32(uint32_t w)
+{
+	return (((w >> 24) | ((w >> 8) & 0xff00) |
+		((w << 8) & 0xff0000) | (w << 24)));
+}
+
+/*
+ * reference crc-ccitt implementation
+ */
+
+uint16_t
+crc_ccitt(uint16_t crcin, uint8_t data)
+{
+	uint16_t mcrc, crc = 0, bits = 0;
+
+	mcrc = (((crcin >> 8) ^ data) & 0xff) << 8;
+	for (bits = 0; bits < 8; bits++) {
+		crc = ((crc ^ mcrc) & 0x8000) ?
+			(crc << 1) ^ CRC_CCITT_POLY :
+			crc << 1;
+		mcrc <<= 1;
+	}
+	return ((crcin << 8) ^ crc);
+}
+
+/*
+ * Initialize the crc32c tables.
+ */
+
+void
+nxge_crc32c_init(void)
+{
+	uint32_t index, bit, byte, crc;
+
+	for (index = 0; index < 256; index++) {
+		crc = reflect_32(index);
+		for (byte = 0; byte < 4; byte++) {
+			for (bit = 0; bit < 8; bit++) {
+				crc = (crc & 0x80000000) ?
+					(crc << 1) ^ SCTP_POLY : crc << 1;
+			}
+#ifdef _BIG_ENDIAN
+			crc32c_tab[3 - byte][index] = flip32(reflect_32(crc));
+#else
+			crc32c_tab[byte][index] = reflect_32(crc);
+#endif
+		}
+	}
+}
+
+/*
+ * Initialize the crc-ccitt tables.
+ */
+
+void
+nxge_crc_ccitt_init(void)
+{
+	uint16_t crc;
+	uint16_t index, bit, byte;
+
+	for (index = 0; index < 256; index++) {
+		crc = index << 8;
+		for (byte = 0; byte < 4; byte++) {
+			for (bit = 0; bit < 8; bit++) {
+				crc = (crc & 0x8000) ?
+					(crc << 1) ^ CRC_CCITT_POLY : crc << 1;
+			}
+#ifdef _BIG_ENDIAN
+			crc_ccitt_tab[3 - byte][index] = crc;
+#else
+			crc_ccitt_tab[byte][index] = crc;
+#endif
+		}
+	}
+}
+
+/*
+ * Lookup  the crc32c for a byte stream
+ */
+
+static void
+nxge_crc32c_byte(uint32_t *crcptr, const uint8_t *buf, int len)
+{
+	uint32_t crc;
+	int i;
+
+	crc = *crcptr;
+	for (i = 0; i < len; i++) {
+#ifdef _BIG_ENDIAN
+		crc = (crc << 8) ^ crc32c_tab[3][buf[i] ^ (crc >> 24)];
+#else
+		crc = (crc >> 8) ^ crc32c_tab[0][buf[i] ^ (crc & 0xff)];
+#endif
+	}
+	*crcptr = crc;
+}
+
+/*
+ * Lookup  the crc-ccitt for a byte stream
+ */
+
+static void
+nxge_crc_ccitt_byte(uint16_t *crcptr, const uint8_t *buf, int len)
+{
+	uint16_t crc;
+	int i;
+
+	crc = *crcptr;
+	for (i = 0; i < len; i++) {
+
+#ifdef _BIG_ENDIAN
+		crc = (crc << 8) ^ crc_ccitt_tab[3][buf[i] ^ (crc >> 8)];
+#else
+		crc = (crc << 8) ^ crc_ccitt_tab[0][buf[i] ^ (crc >> 8)];
+#endif
+	}
+	*crcptr = crc;
+}
+
+/*
+ * Lookup  the crc32c for a 32 bit word stream
+ * Lookup is done fro the 4 bytes in parallel
+ * from the tables computed earlier
+ *
+ */
+
+static void
+nxge_crc32c_word(uint32_t *crcptr, const uint32_t *buf, int len)
+{
+	uint32_t w, crc;
+	int i;
+
+	crc = *crcptr;
+	for (i = 0; i < len; i++) {
+		w = crc ^ buf[i];
+		crc = crc32c_tab[0][w >> 24] ^
+			crc32c_tab[1][(w >> 16) & 0xff] ^
+			crc32c_tab[2][(w >> 8) & 0xff] ^
+			crc32c_tab[3][w & 0xff];
+	}
+	*crcptr = crc;
+}
+
+/*
+ * Lookup  the crc-ccitt for a stream of bytes
+ *
+ * Since the parallel lookup version doesn't work yet,
+ * use the byte stream version (lookup crc for a byte
+ * at a time
+ *
+ */
+
+uint16_t
+nxge_crc_ccitt(uint16_t crc16, const uint8_t *buf, int len)
+{
+	nxge_crc_ccitt_byte(&crc16, buf, len);
+	return (crc16);
+}
+
+/*
+ * Lookup  the crc32c for a stream of bytes
+ *
+ * Tries to lookup the CRC on 4 byte words
+ * If the buffer is not 4 byte aligned, first compute
+ * with byte lookup until aligned. Then compute crc
+ * for each 4 bytes. If there are bytes left at the end of
+ * the buffer, then perform a byte lookup for the remaining bytes
+ *
+ *
+ */
+
+uint32_t
+nxge_crc32c(uint32_t crc32, const uint8_t *buf, int len)
+{
+	int rem;
+
+	rem = 4 - ((uintptr_t)buf) & 3;
+	if (rem != 0) {
+		if (len < rem) {
+			rem = len;
+		}
+		nxge_crc32c_byte(&crc32, buf, rem);
+		buf = buf + rem;
+		len = len - rem;
+	}
+	if (len > 3) {
+		nxge_crc32c_word(&crc32, (const uint32_t *) buf, len / 4);
+	}
+	rem = len & 3;
+	if (rem != 0) {
+		nxge_crc32c_byte(&crc32, buf + len - rem, rem);
+	}
+	return (crc32);
+}
+
+void
+nxge_init_h1_table()
+{
+	uint32_t crc, bit, byte, index;
+
+	for (index = 0; index < 256; index++) {
+		crc = index << 24;
+		for (byte = 0; byte < 4; byte++) {
+			for (bit = 0; bit < 8; bit++) {
+				crc = ((crc & 0x80000000)) ?
+					(crc << 1) ^ CRC_32C_POLY : crc << 1;
+			}
+			h1table[byte][index] = crc;
+		}
+	}
+}
+
+/*
+ * Reference Neptune H1 computation function
+ *
+ * It is a slightly modified implementation of
+ * CRC-32C implementation
+ */
+
+uint32_t
+nxge_compute_h1_serial(uint32_t init_value, uint32_t *flow, uint32_t len)
+{
+	int bit, byte;
+	uint32_t crc_h1 = init_value;
+	uint8_t *buf;
+
+	buf = (uint8_t *)flow;
+	for (byte = 0; byte < len; byte++) {
+		for (bit = 0; bit < 8; bit++) {
+			crc_h1 = (((crc_h1 >> 24) & 0x80) ^
+				((buf[byte] << bit) & 0x80)) ?
+				(crc_h1 << 1) ^ CRC_32C_POLY : crc_h1 << 1;
+		}
+	}
+
+	return (crc_h1);
+}
+
+/*
+ * table based implementation
+ * uses 4 four tables in parallel
+ * 1 for each byte of a 32 bit word
+ *
+ * This is the default h1 computing function
+ *
+ */
+
+uint32_t
+nxge_compute_h1_table4(uint32_t crcin, uint32_t *flow, uint32_t length)
+{
+	uint32_t w, fw, i, crch1 = crcin;
+	uint32_t *buf;
+
+	buf = (uint32_t *)flow;
+
+	for (i = 0; i < length / 4; i++) {
+#ifdef _BIG_ENDIAN
+		fw = buf[i];
+#else
+		fw = flip32(buf[i]);
+		fw = buf[i];
+#endif
+		w = crch1 ^ fw;
+		crch1 = h1table[3][w >> 24] ^ h1table[2][(w >> 16) & 0xff] ^
+			h1table[1][(w >> 8) & 0xff] ^ h1table[0][w & 0xff];
+	}
+	return (crch1);
+}
+
+/*
+ * table based implementation
+ * uses a single table and computes h1 for a byte
+ * at a time.
+ *
+ */
+
+uint32_t
+nxge_compute_h1_table1(uint32_t crcin, uint32_t *flow, uint32_t length)
+{
+
+	uint32_t i, crch1, tmp = crcin;
+	uint8_t *buf;
+
+	buf = (uint8_t *)flow;
+
+	tmp = crcin;
+	for (i = 0; i < length; i++) {
+		crch1 = COMPUTE_H1_BYTE(tmp, buf[i]);
+		tmp = crch1;
+	}
+
+	return (crch1);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/uts/common/io/nxge/nxge_fm.c	Mon Mar 19 19:37:22 2007 -0700
@@ -0,0 +1,966 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident	"%Z%%M%	%I%	%E% SMI"
+
+#include <sys/nxge/nxge_impl.h>
+#include <sys/ddifm.h>
+#include <sys/fm/protocol.h>
+#include <sys/fm/util.h>
+#include <sys/fm/io/ddi.h>
+
+static nxge_fm_ereport_attr_t
+*nxge_fm_get_ereport_attr(nxge_fm_ereport_id_t);
+
+nxge_fm_ereport_attr_t	nxge_fm_ereport_pcs[] = {
+	{NXGE_FM_EREPORT_XPCS_LINK_DOWN,	"10g_link_down",
+						DDI_FM_DEVICE_INVAL_STATE,
+						DDI_SERVICE_LOST},
+	{NXGE_FM_EREPORT_XPCS_TX_LINK_FAULT,	"10g_tx_link_fault",
+						DDI_FM_DEVICE_INVAL_STATE,
+						DDI_SERVICE_DEGRADED},
+	{NXGE_FM_EREPORT_XPCS_RX_LINK_FAULT,	"10g_rx_link_fault",
+						DDI_FM_DEVICE_INVAL_STATE,
+						DDI_SERVICE_DEGRADED},
+	{NXGE_FM_EREPORT_PCS_LINK_DOWN,		"1g_link_down",
+						DDI_FM_DEVICE_INVAL_STATE,
+						DDI_SERVICE_LOST},
+	{NXGE_FM_EREPORT_PCS_REMOTE_FAULT,	"1g_remote_fault",
+						DDI_FM_DEVICE_INVAL_STATE,
+						DDI_SERVICE_DEGRADED},
+};
+
+nxge_fm_ereport_attr_t	nxge_fm_ereport_mif[] = {
+	{NXGE_FM_EREPORT_MIF_ACCESS_FAIL,	"transceiver_access_fail"}
+};
+
+nxge_fm_ereport_attr_t nxge_fm_ereport_fflp[] = {
+	{NXGE_FM_EREPORT_FFLP_TCAM_ERR,		"classifier_tcam_err",
+						DDI_FM_DEVICE_INTERN_UNCORR,
+						DDI_SERVICE_UNAFFECTED},
+	{NXGE_FM_EREPORT_FFLP_VLAN_PAR_ERR,	"classifier_vlan_par_err",
+						DDI_FM_DEVICE_INTERN_UNCORR,
+						DDI_SERVICE_UNAFFECTED},
+	{NXGE_FM_EREPORT_FFLP_HASHT_DATA_ERR,	"classifier_hasht_data_err",
+						DDI_FM_DEVICE_INTERN_UNCORR,
+						DDI_SERVICE_UNAFFECTED},
+	{NXGE_FM_EREPORT_FFLP_HASHT_LOOKUP_ERR,	"classifier_hasht_lookup_err",
+						DDI_FM_DEVICE_INTERN_UNCORR,
+						DDI_SERVICE_UNAFFECTED},
+	{NXGE_FM_EREPORT_FFLP_ACCESS_FAIL,	"classifier_access_fail",
+						DDI_FM_DEVICE_NO_RESPONSE,
+						DDI_SERVICE_DEGRADED}
+};
+
+nxge_fm_ereport_attr_t nxge_fm_ereport_ipp[] = {
+	{NXGE_FM_EREPORT_IPP_EOP_MISS,		"rx_eop_miss",
+						DDI_FM_DEVICE_INVAL_STATE,
+						DDI_SERVICE_LOST},
+	{NXGE_FM_EREPORT_IPP_SOP_MISS,		"rx_sop_miss",
+						DDI_FM_DEVICE_INVAL_STATE,
+						DDI_SERVICE_LOST},
+	{NXGE_FM_EREPORT_IPP_DFIFO_UE,		"rx_dfifo_ucorr_err",
+						DDI_FM_DEVICE_INTERN_UNCORR,
+						DDI_SERVICE_LOST},
+	{NXGE_FM_EREPORT_IPP_DFIFO_CE,		"rx_dfifo_corr_err",
+						DDI_FM_DEVICE_INTERN_CORR,
+						DDI_SERVICE_UNAFFECTED},
+	{NXGE_FM_EREPORT_IPP_PFIFO_PERR,	"rx_dfifo_parity_err",
+						DDI_FM_DEVICE_INTERN_UNCORR,
+						DDI_SERVICE_LOST},
+	{NXGE_FM_EREPORT_IPP_ECC_ERR_MAX,	"rx_ecc_err_max",
+						DDI_FM_DEVICE_INTERN_UNCORR,
+						DDI_SERVICE_UNAFFECTED},
+	{NXGE_FM_EREPORT_IPP_PFIFO_OVER,	"rx_pfifo_overflow",
+						DDI_FM_DEVICE_INVAL_STATE,
+						DDI_SERVICE_LOST},
+	{NXGE_FM_EREPORT_IPP_PFIFO_UND,		"rx_pfifo_underrun",
+						DDI_FM_DEVICE_INVAL_STATE,
+						DDI_SERVICE_LOST},
+	{NXGE_FM_EREPORT_IPP_BAD_CS_MX,		"rx_bad_cksum_max",
+						DDI_FM_DEVICE_INTERN_UNCORR,
+						DDI_SERVICE_UNAFFECTED},
+	{NXGE_FM_EREPORT_IPP_PKT_DIS_MX,	"rx_pkt_discard_max",
+						DDI_FM_DEVICE_INTERN_UNCORR,
+						DDI_SERVICE_UNAFFECTED},
+	{NXGE_FM_EREPORT_IPP_RESET_FAIL,	"rx_reset_fail",
+						DDI_FM_DEVICE_NO_RESPONSE,
+						DDI_SERVICE_LOST}
+};
+
+nxge_fm_ereport_attr_t nxge_fm_ereport_rdmc[] = {
+	{NXGE_FM_EREPORT_RDMC_DCF_ERR,		"rxdma_dcf_err",
+						DDI_FM_DEVICE_INTERN_UNCORR,
+						DDI_SERVICE_LOST},
+	{NXGE_FM_EREPORT_RDMC_RCR_ACK_ERR,	"rxdma_rcr_ack_err",
+						DDI_FM_DEVICE_INTERN_UNCORR,
+						DDI_SERVICE_DEGRADED},
+	{NXGE_FM_EREPORT_RDMC_DC_FIFO_ERR,	"rxdma_dc_fifo_err",
+						DDI_FM_DEVICE_INTERN_UNCORR,
+						DDI_SERVICE_LOST},
+	{NXGE_FM_EREPORT_RDMC_RCR_SHA_PAR,	"rxdma_rcr_sha_par_err",
+						DDI_FM_DEVICE_INTERN_UNCORR,
+						DDI_SERVICE_DEGRADED},
+	{NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR,	"rxdma_rbr_pre_par_err",
+						DDI_FM_DEVICE_INTERN_UNCORR,
+						DDI_SERVICE_DEGRADED},
+	{NXGE_FM_EREPORT_RDMC_RBR_TMOUT,	"rxdma_rbr_tmout",
+						DDI_FM_DEVICE_NO_RESPONSE,
+						DDI_SERVICE_DEGRADED},
+	{NXGE_FM_EREPORT_RDMC_RSP_CNT_ERR,	"rxdma_rsp_cnt_err",
+						DDI_FM_DEVICE_INVAL_STATE,
+						DDI_SERVICE_DEGRADED},
+	{NXGE_FM_EREPORT_RDMC_BYTE_EN_BUS,	"rxdma_byte_en_bus",
+						DDI_FM_DEVICE_INVAL_STATE,
+						DDI_SERVICE_DEGRADED},
+	{NXGE_FM_EREPORT_RDMC_RSP_DAT_ERR,	"rxdma_rsp_dat_err",
+						DDI_FM_DEVICE_INTERN_UNCORR,
+						DDI_SERVICE_DEGRADED},
+	{NXGE_FM_EREPORT_RDMC_ID_MISMATCH,	"rxdma_id_mismatch",
+						DDI_FM_DEVICE_INVAL_STATE,
+						DDI_SERVICE_LOST},
+	{NXGE_FM_EREPORT_RDMC_ZCP_EOP_ERR,	"rxdma_zcp_eop_err",
+						DDI_FM_DEVICE_INVAL_STATE,
+						DDI_SERVICE_LOST},
+	{NXGE_FM_EREPORT_RDMC_IPP_EOP_ERR,	"rxdma_ipp_eop_err",
+						DDI_FM_DEVICE_INVAL_STATE,
+						DDI_SERVICE_LOST},
+	{NXGE_FM_EREPORT_RDMC_COMPLETION_ERR,	"rxdma_completion_err",
+						DDI_FM_DEVICE_INTERN_UNCORR,
+						DDI_SERVICE_UNAFFECTED},
+	{NXGE_FM_EREPORT_RDMC_CONFIG_ERR,	"rxdma_config_err",
+						DDI_FM_DEVICE_INVAL_STATE,
+						DDI_SERVICE_DEGRADED},
+	{NXGE_FM_EREPORT_RDMC_RCRINCON,		"rxdma_rcrincon",
+						DDI_FM_DEVICE_INVAL_STATE,
+						DDI_SERVICE_DEGRADED},
+	{NXGE_FM_EREPORT_RDMC_RCRFULL,		"rxdma_rcrfull",
+						DDI_FM_DEVICE_INVAL_STATE,
+						DDI_SERVICE_DEGRADED},
+	{NXGE_FM_EREPORT_RDMC_RBRFULL,		"rxdma_rbrfull",
+						DDI_FM_DEVICE_INVAL_STATE,
+						DDI_SERVICE_DEGRADED},
+	{NXGE_FM_EREPORT_RDMC_RBRLOGPAGE,	"rxdma_rbrlogpage",
+						DDI_FM_DEVICE_INVAL_STATE,
+						DDI_SERVICE_DEGRADED},
+	{NXGE_FM_EREPORT_RDMC_CFIGLOGPAGE,	"rxdma_cfiglogpage",
+						DDI_FM_DEVICE_INVAL_STATE,
+						DDI_SERVICE_DEGRADED}
+};
+
+nxge_fm_ereport_attr_t nxge_fm_ereport_zcp[] = {
+	{NXGE_FM_EREPORT_ZCP_RRFIFO_UNDERRUN,	"rxzcopy_rrfifo_underrun",
+						DDI_FM_DEVICE_INVAL_STATE,
+						DDI_SERVICE_UNAFFECTED},
+	{NXGE_FM_EREPORT_ZCP_RSPFIFO_UNCORR_ERR,
+						"rxzcopy_rspfifo_uncorr_err",
+						DDI_FM_DEVICE_INTERN_UNCORR,
+						DDI_SERVICE_UNAFFECTED},
+	{NXGE_FM_EREPORT_ZCP_STAT_TBL_PERR,	"rxzcopy_stat_tbl_perr",
+						DDI_FM_DEVICE_INTERN_UNCORR,
+						DDI_SERVICE_UNAFFECTED},
+	{NXGE_FM_EREPORT_ZCP_DYN_TBL_PERR,	"rxzcopy_dyn_tbl_perr",
+						DDI_FM_DEVICE_INTERN_UNCORR,
+						DDI_SERVICE_UNAFFECTED},
+	{NXGE_FM_EREPORT_ZCP_BUF_TBL_PERR,	"rxzcopy_buf_tbl_perr",
+						DDI_FM_DEVICE_INTERN_UNCORR,
+						DDI_SERVICE_UNAFFECTED},
+	{NXGE_FM_EREPORT_ZCP_CFIFO_ECC,		"rxzcopy_cfifo_ecc",
+						DDI_FM_DEVICE_INTERN_CORR,
+						DDI_SERVICE_UNAFFECTED},
+	{NXGE_FM_EREPORT_ZCP_RRFIFO_OVERRUN,	"rxzcopy_rrfifo_overrun",
+						DDI_FM_DEVICE_INVAL_STATE,
+						DDI_SERVICE_UNAFFECTED},
+	{NXGE_FM_EREPORT_ZCP_BUFFER_OVERFLOW,	"rxzcopy_buffer_overflow",
+						DDI_FM_DEVICE_INVAL_STATE,
+						DDI_SERVICE_LOST},
+	{NXGE_FM_EREPORT_ZCP_TT_PROGRAM_ERR,	"rxzcopy_tt_program_err",
+						DDI_FM_DEVICE_INVAL_STATE,
+						DDI_SERVICE_UNAFFECTED},
+	{NXGE_FM_EREPORT_ZCP_RSP_TT_INDEX_ERR,	"rxzcopy_rsp_tt_index_err",
+						DDI_FM_DEVICE_INVAL_STATE,
+						DDI_SERVICE_UNAFFECTED},
+	{NXGE_FM_EREPORT_ZCP_SLV_TT_INDEX_ERR,	"rxzcopy_slv_tt_index_err",
+						DDI_FM_DEVICE_INVAL_STATE,
+						DDI_SERVICE_UNAFFECTED},
+	{NXGE_FM_EREPORT_ZCP_TT_INDEX_ERR,	"rxzcopy_tt_index_err",
+						DDI_FM_DEVICE_INVAL_STATE,
+						DDI_SERVICE_UNAFFECTED},
+	{NXGE_FM_EREPORT_ZCP_ACCESS_FAIL,	"rxzcopy_access_fail",
+						DDI_FM_DEVICE_NO_RESPONSE,
+						DDI_SERVICE_LOST},
+};
+
+nxge_fm_ereport_attr_t nxge_fm_ereport_rxmac[] = {
+	{NXGE_FM_EREPORT_RXMAC_UNDERFLOW,	"rxmac_underflow",
+						DDI_FM_DEVICE_INVAL_STATE,
+						DDI_SERVICE_UNAFFECTED},
+	{NXGE_FM_EREPORT_RXMAC_CRC_ERRCNT_EXP,	"rxmac_crc_errcnt_exp",
+						DDI_FM_DEVICE_INTERN_UNCORR,
+						DDI_SERVICE_UNAFFECTED},
+	{NXGE_FM_EREPORT_RXMAC_LENGTH_ERRCNT_EXP,
+						"rxmac_length_errcnt_exp",
+						DDI_FM_DEVICE_INTERN_UNCORR,
+						DDI_SERVICE_UNAFFECTED},
+	{NXGE_FM_EREPORT_RXMAC_VIOL_ERRCNT_EXP,	"rxmac_viol_errcnt_exp",
+						DDI_FM_DEVICE_INTERN_UNCORR,
+						DDI_SERVICE_UNAFFECTED},
+	{NXGE_FM_EREPORT_RXMAC_RXFRAG_CNT_EXP,	"rxmac_rxfrag_cnt_exp",
+						DDI_FM_DEVICE_INTERN_UNCORR,
+						DDI_SERVICE_UNAFFECTED},
+	{NXGE_FM_EREPORT_RXMAC_ALIGN_ECNT_EXP,	"rxmac_align_ecnt_exp",
+						DDI_FM_DEVICE_INTERN_UNCORR,
+						DDI_SERVICE_UNAFFECTED},
+	{NXGE_FM_EREPORT_RXMAC_LINKFAULT_CNT_EXP,
+						"rxmac_linkfault_cnt_exp",
+						DDI_FM_DEVICE_INTERN_UNCORR,
+						DDI_SERVICE_UNAFFECTED},
+	{NXGE_FM_EREPORT_RXMAC_RESET_FAIL,	"rxmac_reset_fail",
+						DDI_FM_DEVICE_NO_RESPONSE,
+						DDI_SERVICE_UNAFFECTED},
+};
+
+nxge_fm_ereport_attr_t nxge_fm_ereport_tdmc[] = {
+	{NXGE_FM_EREPORT_TDMC_PREF_BUF_PAR_ERR,	"txdma_pref_buf_par_err",
+						DDI_FM_DEVICE_INTERN_UNCORR,
+						DDI_SERVICE_DEGRADED},
+	{NXGE_FM_EREPORT_TDMC_MBOX_ERR,		"txdma_mbox_err",
+						DDI_FM_DEVICE_NO_RESPONSE,
+						DDI_SERVICE_DEGRADED},
+	{NXGE_FM_EREPORT_TDMC_NACK_PREF,	"txdma_nack_pref",
+						DDI_FM_DEVICE_NO_RESPONSE,
+						DDI_SERVICE_DEGRADED},
+	{NXGE_FM_EREPORT_TDMC_NACK_PKT_RD,	"txdma_nack_pkt_rd",
+						DDI_FM_DEVICE_NO_RESPONSE,
+						DDI_SERVICE_DEGRADED},
+	{NXGE_FM_EREPORT_TDMC_PKT_SIZE_ERR,	"txdma_pkt_size_err",
+						DDI_FM_DEVICE_INVAL_STATE,
+						DDI_SERVICE_DEGRADED},
+	{NXGE_FM_EREPORT_TDMC_TX_RING_OFLOW,	"txdma_tx_ring_oflow",
+						DDI_FM_DEVICE_INVAL_STATE,
+						DDI_SERVICE_DEGRADED},
+	{NXGE_FM_EREPORT_TDMC_CONF_PART_ERR,	"txdma_conf_part_err",
+						DDI_FM_DEVICE_INVAL_STATE,
+						DDI_SERVICE_DEGRADED},
+	{NXGE_FM_EREPORT_TDMC_PKT_PRT_ERR,	"txdma_pkt_prt_err",
+						DDI_FM_DEVICE_INTERN_UNCORR,
+						DDI_SERVICE_DEGRADED},
+	{NXGE_FM_EREPORT_TDMC_RESET_FAIL,	"txdma_reset_fail",
+						DDI_FM_DEVICE_NO_RESPONSE,
+						DDI_SERVICE_LOST},
+};
+
+nxge_fm_ereport_attr_t nxge_fm_ereport_txc[] = {
+	{NXGE_FM_EREPORT_TXC_RO_CORRECT_ERR,	"tx_ro_correct_err",
+						DDI_FM_DEVICE_INTERN_CORR,
+						DDI_SERVICE_UNAFFECTED},
+	{NXGE_FM_EREPORT_TXC_RO_UNCORRECT_ERR,	"tx_ro_uncorrect_err",
+						DDI_FM_DEVICE_INTERN_UNCORR,
+						DDI_SERVICE_UNAFFECTED},
+	{NXGE_FM_EREPORT_TXC_SF_CORRECT_ERR,	"tx_sf_correct_err",
+						DDI_FM_DEVICE_INTERN_CORR,
+						DDI_SERVICE_UNAFFECTED},
+	{NXGE_FM_EREPORT_TXC_SF_UNCORRECT_ERR,	"tx_sf_uncorrect_err",
+						DDI_FM_DEVICE_INTERN_UNCORR,
+						DDI_SERVICE_UNAFFECTED},
+	{NXGE_FM_EREPORT_TXC_ASSY_DEAD,		"tx_assembly_uncorrect_err",
+						DDI_FM_DEVICE_INTERN_UNCORR,
+						DDI_SERVICE_UNAFFECTED},
+	{NXGE_FM_EREPORT_TXC_REORDER_ERR,	"tx_reorder_err",
+						DDI_FM_DEVICE_INVAL_STATE,
+						DDI_SERVICE_LOST},
+};
+
+nxge_fm_ereport_attr_t nxge_fm_ereport_txmac[] = {
+	{NXGE_FM_EREPORT_TXMAC_UNDERFLOW,	"txmac_underflow",
+						DDI_FM_DEVICE_INVAL_STATE,
+						DDI_SERVICE_UNAFFECTED},
+	{NXGE_FM_EREPORT_TXMAC_OVERFLOW,	"txmac_overflow",
+						DDI_FM_DEVICE_INVAL_STATE,
+						DDI_SERVICE_UNAFFECTED},
+	{NXGE_FM_EREPORT_TXMAC_TXFIFO_XFR_ERR,	"txmac_txfifo_xfr_err",
+						DDI_FM_DEVICE_INVAL_STATE,
+						DDI_SERVICE_UNAFFECTED},
+	{NXGE_FM_EREPORT_TXMAC_MAX_PKT_ERR,	"txmac_max_pkt_err",
+						DDI_FM_DEVICE_INTERN_UNCORR,
+						DDI_SERVICE_UNAFFECTED},
+	{NXGE_FM_EREPORT_TXMAC_RESET_FAIL,	"txmac_reset_fail",
+						DDI_FM_DEVICE_NO_RESPONSE,
+						DDI_SERVICE_UNAFFECTED},
+};
+
+nxge_fm_ereport_attr_t nxge_fm_ereport_espc[] = {
+	{NXGE_FM_EREPORT_ESPC_ACCESS_FAIL,	"eprom_access_fail",
+						DDI_FM_DEVICE_NO_RESPONSE,
+						DDI_SERVICE_LOST},
+};
+
+nxge_fm_ereport_attr_t nxge_fm_ereport_sw[] = {
+	{NXGE_FM_EREPORT_SW_INVALID_PORT_NUM,	"invalid_port_num",
+						DDI_FM_DEVICE_INVAL_STATE,
+						DDI_SERVICE_LOST},
+	{NXGE_FM_EREPORT_SW_INVALID_CHAN_NUM,	"invalid_chan_num",
+						DDI_FM_DEVICE_INVAL_STATE,
+						DDI_SERVICE_LOST},
+	{NXGE_FM_EREPORT_SW_INVALID_PARAM,	"invalid_param",
+						DDI_FM_DEVICE_INVAL_STATE,
+						DDI_SERVICE_LOST},
+};
+
+void
+nxge_fm_init(p_nxge_t nxgep, ddi_device_acc_attr_t *reg_attr,
+		ddi_device_acc_attr_t *desc_attr, ddi_dma_attr_t *dma_attr)
+{
+	ddi_iblock_cookie_t iblk;
+
+	nxgep->fm_capabilities = ddi_prop_get_int(DDI_DEV_T_ANY, nxgep->dip,
+			DDI_PROP_DONTPASS, "fm-capable", 1);
+	NXGE_DEBUG_MSG((nxgep, DDI_CTL,
+		"FM capable = %d\n", nxgep->fm_capabilities));
+
+	/* Only register with IO Fault Services if we have some capability */
+	if (nxgep->fm_capabilities) {
+		reg_attr->devacc_attr_access = DDI_FLAGERR_ACC;
+		desc_attr->devacc_attr_access = DDI_FLAGERR_ACC;
+		dma_attr->dma_attr_flags = DDI_DMA_FLAGERR;
+
+		/* Register capabilities with IO Fault Services */
+		ddi_fm_init(nxgep->dip, &nxgep->fm_capabilities, &iblk);
+
+		/*
+		 * Initialize pci ereport capabilities if ereport capable
+		 */
+		if (DDI_FM_EREPORT_CAP(nxgep->fm_capabilities) ||
+		    DDI_FM_ERRCB_CAP(nxgep->fm_capabilities))
+			pci_ereport_setup(nxgep->dip);
+	} else {
+		/*
+		 * These fields have to be cleared of FMA if there are no
+		 * FMA capabilities at runtime.
+		 */
+		reg_attr->devacc_attr_access = DDI_DEFAULT_ACC;
+		desc_attr->devacc_attr_access = DDI_DEFAULT_ACC;
+		dma_attr->dma_attr_flags = 0;
+	}
+}
+
+void
+nxge_fm_fini(p_nxge_t nxgep)
+{
+	/* Only unregister FMA capabilities if we registered some */
+	if (nxgep->fm_capabilities) {
+
+		/*
+		 * Release any resources allocated by pci_ereport_setup()
+		 */
+		if (DDI_FM_EREPORT_CAP(nxgep->fm_capabilities) ||
+		    DDI_FM_ERRCB_CAP(nxgep->fm_capabilities))
+			pci_ereport_teardown(nxgep->dip);
+
+		/*
+		 * Un-register error callback if error callback capable
+		 */
+		if (DDI_FM_ERRCB_CAP(nxgep->fm_capabilities))
+			ddi_fm_handler_unregister(nxgep->dip);
+
+		/* Unregister from IO Fault Services */
+		ddi_fm_fini(nxgep->dip);
+	}
+}
+
+void
+nxge_fm_npi_error_handler(p_nxge_t nxgep, npi_status_t status)
+{
+	uint8_t			block_id;
+	uint8_t			error_type;
+	nxge_fm_ereport_id_t	fm_ereport_id;
+	nxge_fm_ereport_attr_t	*fm_ereport_attr;
+	char			*class_name;
+	uint64_t		ena;
+	uint8_t			portn = 0;
+	uint8_t			chan = 0;
+	boolean_t		is_port;
+	boolean_t		is_chan;
+
+	if (status == NPI_SUCCESS)
+		return;
+
+	block_id = (status >> NPI_BLOCK_ID_SHIFT) & 0xF;
+	error_type = status & 0xFF;
+	is_port = (status & IS_PORT)? B_TRUE: B_FALSE;
+	is_chan = (status & IS_CHAN)? B_TRUE: B_FALSE;
+
+	if (is_port)
+		portn = (status >> NPI_PORT_CHAN_SHIFT) & 0xF;
+	else if (is_chan)
+		chan = (status >> NPI_PORT_CHAN_SHIFT) & 0xF;
+
+	/* Map error type into FM ereport id */
+
+	/* Handle all software errors */
+
+	if (((error_type >= COMMON_SW_ERR_START) &&
+				(error_type <= COMMON_SW_ERR_END)) ||
+		((error_type >= BLK_SPEC_SW_ERR_START) &&
+				(error_type <= BLK_SPEC_SW_ERR_END))) {
+		switch (error_type) {
+		case PORT_INVALID:
+			fm_ereport_id = NXGE_FM_EREPORT_SW_INVALID_PORT_NUM;
+			break;
+		case CHANNEL_INVALID:
+			fm_ereport_id = NXGE_FM_EREPORT_SW_INVALID_CHAN_NUM;
+			break;
+		default:
+			fm_ereport_id = NXGE_FM_EREPORT_SW_INVALID_PARAM;
+		}
+	} else if (((error_type >= COMMON_HW_ERR_START) &&
+				(error_type <= COMMON_HW_ERR_END)) ||
+		((error_type >= BLK_SPEC_HW_ERR_START) &&
+				(error_type <= BLK_SPEC_SW_ERR_END))) {
+		/* Handle hardware errors */
+		switch (error_type) {
+		case RESET_FAILED:
+			switch (block_id) {
+			case TXMAC_BLK_ID:
+				fm_ereport_id =
+					NXGE_FM_EREPORT_TXMAC_RESET_FAIL;
+				break;
+			case RXMAC_BLK_ID:
+				fm_ereport_id =
+					NXGE_FM_EREPORT_RXMAC_RESET_FAIL;
+				break;
+			case IPP_BLK_ID:
+				fm_ereport_id = NXGE_FM_EREPORT_IPP_RESET_FAIL;
+				break;
+			case TXDMA_BLK_ID:
+				fm_ereport_id = NXGE_FM_EREPORT_TDMC_RESET_FAIL;
+				break;
+			default:
+				fm_ereport_id = NXGE_FM_EREPORT_UNKNOWN;
+			}
+			break;
+		case WRITE_FAILED:
+		case READ_FAILED:
+			switch (block_id) {
+			case MIF_BLK_ID:
+				fm_ereport_id = NXGE_FM_EREPORT_MIF_ACCESS_FAIL;
+				break;
+			case ZCP_BLK_ID:
+				fm_ereport_id = NXGE_FM_EREPORT_ZCP_ACCESS_FAIL;
+				break;
+			case ESPC_BLK_ID:
+				fm_ereport_id =
+					NXGE_FM_EREPORT_ESPC_ACCESS_FAIL;
+				break;
+			case FFLP_BLK_ID:
+				fm_ereport_id =
+					NXGE_FM_EREPORT_FFLP_ACCESS_FAIL;
+				break;
+			default:
+				fm_ereport_id = NXGE_FM_EREPORT_UNKNOWN;
+			}
+			break;
+		case TXDMA_HW_STOP_FAILED:
+		case TXDMA_HW_RESUME_FAILED:
+			fm_ereport_id = NXGE_FM_EREPORT_TDMC_RESET_FAIL;
+			break;
+		}
+	}
+
+	fm_ereport_attr = nxge_fm_get_ereport_attr(fm_ereport_id);
+	if (fm_ereport_attr == NULL)
+		return;
+	class_name = fm_ereport_attr->eclass;
+
+	ena = fm_ena_generate(0, FM_ENA_FMT1);
+
+	if ((is_port == B_FALSE) && (is_chan == B_FALSE)) {
+		ddi_fm_ereport_post(nxgep->dip, class_name, ena,
+			DDI_NOSLEEP,
+			FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0,
+			NULL);
+	} else if ((is_port == B_TRUE) && (is_chan == B_FALSE)) {
+		ddi_fm_ereport_post(nxgep->dip, class_name, ena, DDI_NOSLEEP,
+			FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0,
+			ERNAME_ERR_PORTN, DATA_TYPE_UINT8, portn,
+			NULL);
+	} else if ((is_port == B_FALSE) && (is_chan == B_TRUE)) {
+		ddi_fm_ereport_post(nxgep->dip, class_name, ena, DDI_NOSLEEP,
+			FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0,
+			ERNAME_ERR_DCHAN, DATA_TYPE_UINT8, chan,
+			NULL);
+	} else if ((is_port == B_TRUE) && (is_chan == B_TRUE)) {
+		ddi_fm_ereport_post(nxgep->dip, class_name, ena, DDI_NOSLEEP,
+			FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0,
+			ERNAME_ERR_PORTN, DATA_TYPE_UINT8, portn,
+			ERNAME_ERR_DCHAN, DATA_TYPE_UINT8, chan,
+			NULL);
+	}
+}
+
+static nxge_fm_ereport_attr_t *
+nxge_fm_get_ereport_attr(nxge_fm_ereport_id_t ereport_id)
+{
+	nxge_fm_ereport_attr_t *attr;
+	uint8_t	blk_id = ((ereport_id >> EREPORT_FM_ID_SHIFT) &
+							EREPORT_FM_ID_MASK);
+	uint8_t index = (ereport_id & EREPORT_INDEX_MASK);
+
+	switch (blk_id) {
+	case FM_SW_ID:
+		attr = &nxge_fm_ereport_sw[index];
+		break;
+	case FM_PCS_ID:
+		attr = &nxge_fm_ereport_pcs[index];
+		break;
+	case FM_TXMAC_ID:
+		attr = &nxge_fm_ereport_txmac[index];
+		break;
+	case FM_RXMAC_ID:
+		attr = &nxge_fm_ereport_rxmac[index];
+		break;
+	case FM_MIF_ID:
+		attr = &nxge_fm_ereport_mif[index];
+		break;
+	case FM_FFLP_ID:
+		attr = &nxge_fm_ereport_fflp[index];
+		break;
+	case FM_ZCP_ID:
+		attr = &nxge_fm_ereport_zcp[index];
+		break;
+	case FM_RXDMA_ID:
+		attr = &nxge_fm_ereport_rdmc[index];
+		break;
+	case FM_TXDMA_ID:
+		attr = &nxge_fm_ereport_tdmc[index];
+		break;
+	case FM_IPP_ID:
+		attr = &nxge_fm_ereport_ipp[index];
+		break;
+	case FM_TXC_ID:
+		attr = &nxge_fm_ereport_txc[index];
+		break;
+	case FM_ESPC_ID:
+		attr = &nxge_fm_ereport_espc[index];
+		break;
+	default:
+		attr = NULL;
+	}
+
+	return (attr);
+}
+
+static void
+nxge_fm_ereport(p_nxge_t nxgep, uint8_t err_portn, uint8_t err_chan,
+					nxge_fm_ereport_attr_t *ereport)
+{
+	uint64_t		ena;
+	char			eclass[FM_MAX_CLASS];
+	char			*err_str;
+	p_nxge_stats_t		statsp;
+
+	(void) snprintf(eclass, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE,
+			ereport->eclass);
+	err_str = ereport->str;
+	ena = fm_ena_generate(0, FM_ENA_FMT1);
+	statsp = nxgep->statsp;
+
+	if (DDI_FM_EREPORT_CAP(nxgep->fm_capabilities)) {
+		switch (ereport->index) {
+		case NXGE_FM_EREPORT_XPCS_LINK_DOWN:
+		case NXGE_FM_EREPORT_XPCS_TX_LINK_FAULT:
+		case NXGE_FM_EREPORT_XPCS_RX_LINK_FAULT:
+		case NXGE_FM_EREPORT_PCS_LINK_DOWN:
+		case NXGE_FM_EREPORT_PCS_REMOTE_FAULT:
+			ddi_fm_ereport_post(nxgep->dip, eclass, ena,
+				DDI_NOSLEEP,
+				FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0,
+				ERNAME_ERR_PORTN, DATA_TYPE_UINT8, err_portn,
+				NULL);
+			break;
+		case NXGE_FM_EREPORT_IPP_EOP_MISS:
+		case NXGE_FM_EREPORT_IPP_SOP_MISS:
+			ddi_fm_ereport_post(nxgep->dip, eclass, ena,
+				DDI_NOSLEEP,
+				FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0,
+				ERNAME_ERR_PORTN, DATA_TYPE_UINT8, err_portn,
+				ERNAME_DFIFO_RD_PTR, DATA_TYPE_UINT16,
+					statsp->ipp_stats.errlog.dfifo_rd_ptr,
+				ERNAME_IPP_STATE_MACH, DATA_TYPE_UINT32,
+					statsp->ipp_stats.errlog.state_mach,
+				NULL);
+			break;
+		case NXGE_FM_EREPORT_IPP_DFIFO_UE:
+			ddi_fm_ereport_post(nxgep->dip, eclass, ena,
+				DDI_NOSLEEP,
+				FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0,
+				ERNAME_ERR_PORTN, DATA_TYPE_UINT8, err_portn,
+				ERNAME_DFIFO_ENTRY, DATA_TYPE_UINT16,
+				nxgep->ipp.status.bits.w0.dfifo_ecc_err_idx,
+				ERNAME_DFIFO_SYNDROME, DATA_TYPE_UINT16,
+					statsp->ipp_stats.errlog.ecc_syndrome,
+				NULL);
+			break;
+		case NXGE_FM_EREPORT_IPP_PFIFO_PERR:
+			ddi_fm_ereport_post(nxgep->dip, eclass, ena,
+				DDI_NOSLEEP,
+				FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0,
+				ERNAME_ERR_PORTN, DATA_TYPE_UINT8, err_portn,
+				ERNAME_PFIFO_ENTRY, DATA_TYPE_UINT8,
+				nxgep->ipp.status.bits.w0.pre_fifo_perr_idx,
+				NULL);
+			break;
+		case NXGE_FM_EREPORT_IPP_DFIFO_CE:
+		case NXGE_FM_EREPORT_IPP_ECC_ERR_MAX:
+			ddi_fm_ereport_post(nxgep->dip, eclass, ena,
+				DDI_NOSLEEP,
+				FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0,
+				ERNAME_ERR_PORTN, DATA_TYPE_UINT8, err_portn,
+				NULL);
+			break;
+		case NXGE_FM_EREPORT_IPP_PFIFO_OVER:
+		case NXGE_FM_EREPORT_IPP_PFIFO_UND:
+			ddi_fm_ereport_post(nxgep->dip, eclass, ena,
+				DDI_NOSLEEP,
+				FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0,
+				ERNAME_ERR_PORTN, DATA_TYPE_UINT8, err_portn,
+				ERNAME_IPP_STATE_MACH, DATA_TYPE_UINT32,
+					statsp->ipp_stats.errlog.state_mach,
+				NULL);
+			break;
+		case NXGE_FM_EREPORT_IPP_BAD_CS_MX:
+		case NXGE_FM_EREPORT_IPP_PKT_DIS_MX:
+			ddi_fm_ereport_post(nxgep->dip, eclass, ena,
+				DDI_NOSLEEP,
+				FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0,
+				ERNAME_ERR_PORTN, DATA_TYPE_UINT8, err_portn,
+				NULL);
+			break;
+		case NXGE_FM_EREPORT_FFLP_TCAM_ERR:
+			ddi_fm_ereport_post(nxgep->dip, eclass, ena,
+				DDI_NOSLEEP,
+				FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0,
+				ERNAME_TCAM_ERR_LOG, DATA_TYPE_UINT32,
+					statsp->fflp_stats.errlog.tcam,
+				NULL);
+			break;
+		case NXGE_FM_EREPORT_FFLP_VLAN_PAR_ERR:
+			ddi_fm_ereport_post(nxgep->dip, eclass, ena,
+				DDI_NOSLEEP,
+				FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0,
+				ERNAME_VLANTAB_ERR_LOG, DATA_TYPE_UINT32,
+					statsp->fflp_stats.errlog.vlan,
+				NULL);
+			break;
+		case NXGE_FM_EREPORT_FFLP_HASHT_DATA_ERR:
+		{
+			int rdc_grp;
+			hash_tbl_data_log_t hash_log;
+
+			for (rdc_grp = 0; rdc_grp < MAX_PARTITION; rdc_grp++) {
+				hash_log.value = nxgep->classifier.fflp_stats->
+						errlog.hash_pio[rdc_grp];
+				if (hash_log.bits.ldw.pio_err) {
+					ddi_fm_ereport_post(nxgep->dip, eclass,
+						ena, DDI_NOSLEEP,
+						FM_VERSION, DATA_TYPE_UINT8,
+						FM_EREPORT_VERS0,
+						ERNAME_HASHTAB_ERR_LOG,
+						DATA_TYPE_UINT32,
+						nxgep->classifier.fflp_stats->
+						errlog.hash_pio[rdc_grp], NULL);
+				}
+			}
+		}
+			break;
+		case NXGE_FM_EREPORT_FFLP_HASHT_LOOKUP_ERR:
+			ddi_fm_ereport_post(nxgep->dip, eclass, ena,
+				DDI_NOSLEEP,
+				FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0,
+				ERNAME_HASHT_LOOKUP_ERR_LOG0, DATA_TYPE_UINT32,
+					statsp->fflp_stats.errlog. hash_lookup1,
+				ERNAME_HASHT_LOOKUP_ERR_LOG1, DATA_TYPE_UINT32,
+					statsp->fflp_stats.errlog.hash_lookup2,
+				NULL);
+			break;
+		case NXGE_FM_EREPORT_RDMC_DCF_ERR:
+		case NXGE_FM_EREPORT_RDMC_RBR_TMOUT:
+		case NXGE_FM_EREPORT_RDMC_RSP_CNT_ERR:
+		case NXGE_FM_EREPORT_RDMC_BYTE_EN_BUS:
+		case NXGE_FM_EREPORT_RDMC_RSP_DAT_ERR:
+		case NXGE_FM_EREPORT_RDMC_RCR_ACK_ERR:
+		case NXGE_FM_EREPORT_RDMC_DC_FIFO_ERR:
+		case NXGE_FM_EREPORT_RDMC_CONFIG_ERR:
+		case NXGE_FM_EREPORT_RDMC_RCRINCON:
+		case NXGE_FM_EREPORT_RDMC_RCRFULL:
+		case NXGE_FM_EREPORT_RDMC_RBRFULL:
+		case NXGE_FM_EREPORT_RDMC_RBRLOGPAGE:
+		case NXGE_FM_EREPORT_RDMC_CFIGLOGPAGE:
+		case NXGE_FM_EREPORT_RDMC_ID_MISMATCH:
+		case NXGE_FM_EREPORT_RDMC_ZCP_EOP_ERR:
+		case NXGE_FM_EREPORT_RDMC_IPP_EOP_ERR:
+			ddi_fm_ereport_post(nxgep->dip, eclass, ena,
+				DDI_NOSLEEP,
+				FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0,
+				ERNAME_ERR_PORTN, DATA_TYPE_UINT8, err_portn,
+				ERNAME_ERR_DCHAN, DATA_TYPE_UINT8, err_chan,
+				NULL);
+			break;
+		case NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR:
+		case NXGE_FM_EREPORT_RDMC_RCR_SHA_PAR:
+			{
+			uint32_t err_log;
+			if (ereport->index == NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR)
+				err_log = (uint32_t)statsp->
+				rdc_stats[err_chan].errlog.pre_par.value;
+			else
+				err_log = (uint32_t)statsp->
+				rdc_stats[err_chan].errlog.sha_par.value;
+			ddi_fm_ereport_post(nxgep->dip, eclass, ena,
+				DDI_NOSLEEP,
+				FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0,
+				ERNAME_ERR_PORTN, DATA_TYPE_UINT8, err_portn,
+				ERNAME_ERR_DCHAN, DATA_TYPE_UINT8, err_chan,
+				ERNAME_RDMC_PAR_ERR_LOG, DATA_TYPE_UINT8,
+				err_log, NULL);
+			}
+			break;
+		case NXGE_FM_EREPORT_RDMC_COMPLETION_ERR:
+			{
+			uint8_t err_type;
+			err_type = statsp->
+				rdc_stats[err_chan].errlog.compl_err_type;
+			ddi_fm_ereport_post(nxgep->dip, eclass, ena,
+				DDI_NOSLEEP,
+				FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0,
+				ERNAME_ERR_PORTN, DATA_TYPE_UINT8, err_portn,
+				ERNAME_ERR_DCHAN, DATA_TYPE_UINT8, err_chan,
+				ERNAME_RDC_ERR_TYPE, DATA_TYPE_UINT8,
+				err_type, NULL);
+			}
+			break;
+
+		case NXGE_FM_EREPORT_ZCP_RRFIFO_UNDERRUN:
+		case NXGE_FM_EREPORT_ZCP_RRFIFO_OVERRUN:
+		case NXGE_FM_EREPORT_ZCP_BUFFER_OVERFLOW:
+			{
+			uint32_t sm;
+			sm = statsp->
+				zcp_stats.errlog.state_mach.bits.ldw.state;
+			ddi_fm_ereport_post(nxgep->dip, eclass, ena,
+				DDI_NOSLEEP,
+				FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0,
+				sm, DATA_TYPE_UINT32,
+				NULL);
+			break;
+			}
+		case NXGE_FM_EREPORT_ZCP_CFIFO_ECC:
+			ddi_fm_ereport_post(nxgep->dip, eclass, ena,
+				DDI_NOSLEEP,
+				FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0,
+				ERNAME_ERR_PORTN, DATA_TYPE_UINT8,
+				err_portn,
+				NULL);
+			break;
+		case NXGE_FM_EREPORT_ZCP_RSPFIFO_UNCORR_ERR:
+		case NXGE_FM_EREPORT_ZCP_STAT_TBL_PERR:
+		case NXGE_FM_EREPORT_ZCP_DYN_TBL_PERR:
+		case NXGE_FM_EREPORT_ZCP_BUF_TBL_PERR:
+		case NXGE_FM_EREPORT_ZCP_TT_PROGRAM_ERR:
+		case NXGE_FM_EREPORT_ZCP_RSP_TT_INDEX_ERR:
+		case NXGE_FM_EREPORT_ZCP_SLV_TT_INDEX_ERR:
+		case NXGE_FM_EREPORT_ZCP_TT_INDEX_ERR:
+		case NXGE_FM_EREPORT_RXMAC_UNDERFLOW:
+		case NXGE_FM_EREPORT_RXMAC_CRC_ERRCNT_EXP:
+		case NXGE_FM_EREPORT_RXMAC_LENGTH_ERRCNT_EXP:
+		case NXGE_FM_EREPORT_RXMAC_VIOL_ERRCNT_EXP:
+		case NXGE_FM_EREPORT_RXMAC_RXFRAG_CNT_EXP:
+		case NXGE_FM_EREPORT_RXMAC_LINKFAULT_CNT_EXP:
+		case NXGE_FM_EREPORT_RXMAC_ALIGN_ECNT_EXP:
+			ddi_fm_ereport_post(nxgep->dip, eclass, ena,
+				DDI_NOSLEEP,
+				FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0,
+				ERNAME_ERR_PORTN, DATA_TYPE_UINT8, err_portn,
+				NULL);
+			break;
+		case NXGE_FM_EREPORT_TDMC_MBOX_ERR:
+		case NXGE_FM_EREPORT_TDMC_TX_RING_OFLOW:
+			ddi_fm_ereport_post(nxgep->dip, eclass, ena,
+				DDI_NOSLEEP,
+				FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0,
+				ERNAME_ERR_PORTN, DATA_TYPE_UINT8, err_portn,
+				ERNAME_ERR_DCHAN, DATA_TYPE_UINT8, err_chan,
+				NULL);
+			break;
+		case NXGE_FM_EREPORT_TDMC_PREF_BUF_PAR_ERR:
+		case NXGE_FM_EREPORT_TDMC_NACK_PREF:
+		case NXGE_FM_EREPORT_TDMC_NACK_PKT_RD:
+		case NXGE_FM_EREPORT_TDMC_PKT_SIZE_ERR:
+		case NXGE_FM_EREPORT_TDMC_CONF_PART_ERR:
+		case NXGE_FM_EREPORT_TDMC_PKT_PRT_ERR:
+			ddi_fm_ereport_post(nxgep->dip, eclass, ena,
+				DDI_NOSLEEP,
+				FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0,
+				ERNAME_ERR_PORTN, DATA_TYPE_UINT8, err_portn,
+				ERNAME_ERR_DCHAN, DATA_TYPE_UINT8, err_chan,
+				ERNAME_TDMC_ERR_LOG1, DATA_TYPE_UINT32,
+					statsp->
+					tdc_stats[err_chan].errlog.logl.value,
+				ERNAME_TDMC_ERR_LOG1, DATA_TYPE_UINT32,
+				statsp->tdc_stats[err_chan].errlog.logh.value,
+					DATA_TYPE_UINT32,
+				NULL);
+			break;
+		case NXGE_FM_EREPORT_TXC_RO_CORRECT_ERR:
+		case NXGE_FM_EREPORT_TXC_RO_UNCORRECT_ERR:
+			ddi_fm_ereport_post(nxgep->dip, eclass, ena,
+				DDI_NOSLEEP,
+				FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0,
+				ERNAME_ERR_PORTN, DATA_TYPE_UINT8, err_portn,
+				ERNAME_TXC_ROECC_ADDR, DATA_TYPE_UINT16,
+					statsp->txc_stats.errlog.ro_st.roecc.
+					bits.ldw.ecc_address,
+				ERNAME_TXC_ROECC_DATA0, DATA_TYPE_UINT32,
+					statsp->txc_stats.errlog.ro_st.d0.
+					bits.ldw.ro_ecc_data0,
+				ERNAME_TXC_ROECC_DATA1, DATA_TYPE_UINT32,
+					statsp->txc_stats.errlog.ro_st.d1.
+					bits.ldw.ro_ecc_data1,
+				ERNAME_TXC_ROECC_DATA2, DATA_TYPE_UINT32,
+					statsp->txc_stats.errlog.ro_st.d2.
+					bits.ldw.ro_ecc_data2,
+				ERNAME_TXC_ROECC_DATA3, DATA_TYPE_UINT32,
+					statsp->txc_stats.errlog.ro_st.d3.
+					bits.ldw.ro_ecc_data3,
+				ERNAME_TXC_ROECC_DATA4, DATA_TYPE_UINT32,
+					statsp->txc_stats.errlog.ro_st.d4.
+					bits.ldw.ro_ecc_data4,
+				NULL);
+			break;
+		case NXGE_FM_EREPORT_TXC_REORDER_ERR:
+			ddi_fm_ereport_post(nxgep->dip, eclass, ena,
+				DDI_NOSLEEP,
+				FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0,
+				ERNAME_DETAILED_ERR_TYPE, DATA_TYPE_STRING,
+					err_str,
+				ERNAME_ERR_PORTN, DATA_TYPE_UINT8, err_portn,
+				ERNAME_TXC_RO_STATE0, DATA_TYPE_UINT32,
+					(uint32_t)statsp->
+					txc_stats.errlog.ro_st.st0.value,
+				ERNAME_TXC_RO_STATE1, DATA_TYPE_UINT32,
+					(uint32_t)statsp->
+					txc_stats.errlog.ro_st.st1.value,
+				ERNAME_TXC_RO_STATE2, DATA_TYPE_UINT32,
+					(uint32_t)statsp->
+					txc_stats.errlog.ro_st.st2.value,
+				ERNAME_TXC_RO_STATE3, DATA_TYPE_UINT32,
+					(uint32_t)statsp->
+					txc_stats.errlog.ro_st.st3.value,
+				ERNAME_TXC_RO_STATE_CTL, DATA_TYPE_UINT32,
+					(uint32_t)statsp->
+					txc_stats.errlog.ro_st.ctl.value,
+				ERNAME_TXC_RO_TIDS, DATA_TYPE_UINT32,
+					(uint32_t)statsp->
+					txc_stats.errlog.ro_st.tids.value,
+				NULL);
+			break;
+		case NXGE_FM_EREPORT_TXC_SF_CORRECT_ERR:
+		case NXGE_FM_EREPORT_TXC_SF_UNCORRECT_ERR:
+			ddi_fm_ereport_post(nxgep->dip, eclass, ena,
+				DDI_NOSLEEP,
+				FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0,
+				ERNAME_ERR_PORTN, DATA_TYPE_UINT8, err_portn,
+				ERNAME_TXC_SFECC_ADDR, DATA_TYPE_UINT32,
+					statsp->txc_stats.errlog.sf_st.sfecc.
+					bits.ldw.ecc_address,
+				ERNAME_TXC_SFECC_DATA0, DATA_TYPE_UINT32,
+					statsp->txc_stats.errlog.sf_st.d0.
+					bits.ldw.sf_ecc_data0,
+				ERNAME_TXC_SFECC_DATA0, DATA_TYPE_UINT32,
+					statsp->txc_stats.errlog.sf_st.d1.
+					bits.ldw.sf_ecc_data1,
+				ERNAME_TXC_SFECC_DATA0, DATA_TYPE_UINT32,
+					statsp->txc_stats.errlog.sf_st.d2.
+					bits.ldw.sf_ecc_data2,
+				ERNAME_TXC_SFECC_DATA0, DATA_TYPE_UINT32,
+					statsp->txc_stats.errlog.sf_st.d3.
+					bits.ldw.sf_ecc_data3,
+				ERNAME_TXC_SFECC_DATA0, DATA_TYPE_UINT32,
+					statsp->txc_stats.errlog.sf_st.d4.
+					bits.ldw.sf_ecc_data4,
+				NULL);
+			break;
+		case NXGE_FM_EREPORT_TXMAC_UNDERFLOW:
+		case NXGE_FM_EREPORT_TXMAC_OVERFLOW:
+		case NXGE_FM_EREPORT_TXMAC_TXFIFO_XFR_ERR:
+		case NXGE_FM_EREPORT_TXMAC_MAX_PKT_ERR:
+		case NXGE_FM_EREPORT_SW_INVALID_PORT_NUM:
+		case NXGE_FM_EREPORT_SW_INVALID_CHAN_NUM:
+		case NXGE_FM_EREPORT_SW_INVALID_PARAM:
+			ddi_fm_ereport_post(nxgep->dip, eclass, ena,
+				DDI_NOSLEEP,
+				FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0,
+				ERNAME_ERR_PORTN, DATA_TYPE_UINT8, err_portn,
+				NULL);
+			break;
+		}
+
+	}
+}
+
+void
+nxge_fm_report_error(p_nxge_t nxgep, uint8_t err_portn, uint8_t err_chan,
+					nxge_fm_ereport_id_t fm_ereport_id)
+{
+	nxge_fm_ereport_attr_t	*fm_ereport_attr;
+
+	fm_ereport_attr = nxge_fm_get_ereport_attr(fm_ereport_id);
+
+	if (fm_ereport_attr != NULL) {
+		nxge_fm_ereport(nxgep, err_portn, err_chan, fm_ereport_attr);
+		ddi_fm_service_impact(nxgep->dip, fm_ereport_attr->impact);
+	}
+}
+
+int
+fm_check_acc_handle(ddi_acc_handle_t handle)
+{
+	ddi_fm_error_t err;
+
+	ddi_fm_acc_err_get(handle, &err, DDI_FME_VERSION);
+#ifndef	NXGE_FM_S10
+	ddi_fm_acc_err_clear(handle, DDI_FME_VERSION);
+#endif
+	return (err.fme_status);
+}
+
+int
+fm_check_dma_handle(ddi_dma_handle_t handle)
+{
+	ddi_fm_error_t err;
+
+	ddi_fm_dma_err_get(handle, &err, DDI_FME_VERSION);
+	return (err.fme_status);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/uts/common/io/nxge/nxge_fzc.c	Mon Mar 19 19:37:22 2007 -0700
@@ -0,0 +1,1039 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident	"%Z%%M%	%I%	%E% SMI"
+
+#include	<nxge_impl.h>
+#include	<npi_mac.h>
+#include	<npi_rxdma.h>
+
+#if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
+static int	nxge_herr2kerr(uint64_t);
+#endif
+
+/*
+ * The following interfaces are controlled by the
+ * function control registers. Some global registers
+ * are to be initialized by only byt one of the 2/4 functions.
+ * Use the test and set register.
+ */
+/*ARGSUSED*/
+nxge_status_t
+nxge_test_and_set(p_nxge_t nxgep, uint8_t tas)
+{
+	npi_handle_t		handle;
+	npi_status_t		rs = NPI_SUCCESS;
+
+	handle = NXGE_DEV_NPI_HANDLE(nxgep);
+	if ((rs = npi_dev_func_sr_sr_get_set_clear(handle, tas))
+			!= NPI_SUCCESS) {
+		return (NXGE_ERROR | rs);
+	}
+
+	return (NXGE_OK);
+}
+
+nxge_status_t
+nxge_set_fzc_multi_part_ctl(p_nxge_t nxgep, boolean_t mpc)
+{
+	npi_handle_t		handle;
+	npi_status_t		rs = NPI_SUCCESS;
+
+	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_set_fzc_multi_part_ctl"));
+
+	/*
+	 * In multi-partitioning, the partition manager
+	 * who owns function zero should set this multi-partition
+	 * control bit.
+	 */
+	if (nxgep->use_partition && nxgep->function_num) {
+		return (NXGE_ERROR);
+	}
+
+	handle = NXGE_DEV_NPI_HANDLE(nxgep);
+	if ((rs = npi_fzc_mpc_set(handle, mpc)) != NPI_SUCCESS) {
+		NXGE_DEBUG_MSG((nxgep, DMA_CTL,
+			"<== nxge_set_fzc_multi_part_ctl"));
+		return (NXGE_ERROR | rs);
+	}
+
+	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_set_fzc_multi_part_ctl"));
+
+	return (NXGE_OK);
+}
+
+nxge_status_t
+nxge_get_fzc_multi_part_ctl(p_nxge_t nxgep, boolean_t *mpc_p)
+{
+	npi_handle_t		handle;
+	npi_status_t		rs = NPI_SUCCESS;
+
+	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_get_fzc_multi_part_ctl"));
+
+	handle = NXGE_DEV_NPI_HANDLE(nxgep);
+	if ((rs = npi_fzc_mpc_get(handle, mpc_p)) != NPI_SUCCESS) {
+		NXGE_DEBUG_MSG((nxgep, DMA_CTL,
+			"<== nxge_set_fzc_multi_part_ctl"));
+		return (NXGE_ERROR | rs);
+	}
+	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_get_fzc_multi_part_ctl"));
+
+	return (NXGE_OK);
+}
+
+/*
+ * System interrupt registers that are under function zero
+ * management.
+ */
+nxge_status_t
+nxge_fzc_intr_init(p_nxge_t nxgep)
+{
+	nxge_status_t	status = NXGE_OK;
+
+	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_fzc_intr_init"));
+
+	/* Configure the initial timer resolution */
+	if ((status = nxge_fzc_intr_tmres_set(nxgep)) != NXGE_OK) {
+		return (status);
+	}
+
+	switch (nxgep->niu_type) {
+	case NEPTUNE:
+	case NEPTUNE_2:
+		/*
+		 * Set up the logical device group's logical devices that
+		 * the group owns.
+		 */
+		if ((status = nxge_fzc_intr_ldg_num_set(nxgep))
+				!= NXGE_OK) {
+			break;
+		}
+
+		/* Configure the system interrupt data */
+		if ((status = nxge_fzc_intr_sid_set(nxgep)) != NXGE_OK) {
+			break;
+		}
+
+		break;
+	}
+
+	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_fzc_intr_init"));
+
+	return (status);
+}
+
+nxge_status_t
+nxge_fzc_intr_ldg_num_set(p_nxge_t nxgep)
+{
+	p_nxge_ldg_t	ldgp;
+	p_nxge_ldv_t	ldvp;
+	npi_handle_t	handle;
+	int		i, j;
+	npi_status_t	rs = NPI_SUCCESS;
+
+	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_fzc_intr_ldg_num_set"));
+
+	if (nxgep->ldgvp == NULL) {
+		return (NXGE_ERROR);
+	}
+
+	ldgp = nxgep->ldgvp->ldgp;
+	ldvp = nxgep->ldgvp->ldvp;
+	if (ldgp == NULL || ldvp == NULL) {
+		return (NXGE_ERROR);
+	}
+
+	handle = NXGE_DEV_NPI_HANDLE(nxgep);
+
+	for (i = 0; i < nxgep->ldgvp->ldg_intrs; i++, ldgp++) {
+		NXGE_DEBUG_MSG((nxgep, INT_CTL,
+			"==> nxge_fzc_intr_ldg_num_set "
+			"<== nxge_f(Neptune): # ldv %d "
+			"in group %d", ldgp->nldvs, ldgp->ldg));
+
+		for (j = 0; j < ldgp->nldvs; j++, ldvp++) {
+			rs = npi_fzc_ldg_num_set(handle, ldvp->ldv,
+				ldvp->ldg_assigned);
+			if (rs != NPI_SUCCESS) {
+				NXGE_DEBUG_MSG((nxgep, INT_CTL,
+					"<== nxge_fzc_intr_ldg_num_set failed "
+					" rs 0x%x ldv %d ldg %d",
+					rs, ldvp->ldv, ldvp->ldg_assigned));
+				return (NXGE_ERROR | rs);
+			}
+			NXGE_DEBUG_MSG((nxgep, INT_CTL,
+				"<== nxge_fzc_intr_ldg_num_set OK "
+				" ldv %d ldg %d",
+				ldvp->ldv, ldvp->ldg_assigned));
+		}
+	}
+
+	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_fzc_intr_ldg_num_set"));
+
+	return (NXGE_OK);
+}
+
+nxge_status_t
+nxge_fzc_intr_tmres_set(p_nxge_t nxgep)
+{
+	npi_handle_t	handle;
+	npi_status_t	rs = NPI_SUCCESS;
+
+	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_fzc_intr_tmrese_set"));
+	if (nxgep->ldgvp == NULL) {
+		return (NXGE_ERROR);
+	}
+	handle = NXGE_DEV_NPI_HANDLE(nxgep);
+	if ((rs = npi_fzc_ldg_timer_res_set(handle, nxgep->ldgvp->tmres))) {
+		return (NXGE_ERROR | rs);
+	}
+	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_fzc_intr_tmrese_set"));
+
+	return (NXGE_OK);
+}
+
+nxge_status_t
+nxge_fzc_intr_sid_set(p_nxge_t nxgep)
+{
+	npi_handle_t	handle;
+	p_nxge_ldg_t	ldgp;
+	fzc_sid_t	sid;
+	int		i;
+	npi_status_t	rs = NPI_SUCCESS;
+
+	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_fzc_intr_sid_set"));
+	if (nxgep->ldgvp == NULL) {
+		NXGE_DEBUG_MSG((nxgep, INT_CTL,
+			"<== nxge_fzc_intr_sid_set: no ldg"));
+		return (NXGE_ERROR);
+	}
+	handle = NXGE_DEV_NPI_HANDLE(nxgep);
+	ldgp = nxgep->ldgvp->ldgp;
+	NXGE_DEBUG_MSG((nxgep, INT_CTL,
+		"==> nxge_fzc_intr_sid_set: #int %d", nxgep->ldgvp->ldg_intrs));
+	for (i = 0; i < nxgep->ldgvp->ldg_intrs; i++, ldgp++) {
+		sid.ldg = ldgp->ldg;
+		sid.niu = B_FALSE;
+		sid.func = ldgp->func;
+		sid.vector = ldgp->vector;
+		NXGE_DEBUG_MSG((nxgep, INT_CTL,
+			"==> nxge_fzc_intr_sid_set(%d): func %d group %d "
+			"vector %d",
+			i, sid.func, sid.ldg, sid.vector));
+		rs = npi_fzc_sid_set(handle, sid);
+		if (rs != NPI_SUCCESS) {
+			NXGE_DEBUG_MSG((nxgep, INT_CTL,
+				"<== nxge_fzc_intr_sid_set:failed 0x%x",
+				rs));
+			return (NXGE_ERROR | rs);
+		}
+	}
+
+	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_fzc_intr_sid_set"));
+
+	return (NXGE_OK);
+
+}
+
+/*
+ * Receive DMA registers that are under function zero
+ * management.
+ */
+/*ARGSUSED*/
+nxge_status_t
+nxge_init_fzc_rxdma_channel(p_nxge_t nxgep, uint16_t channel,
+	p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t mbox_p)
+{
+	nxge_status_t	status = NXGE_OK;
+	NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_init_fzc_rxdma_channel"));
+
+	switch (nxgep->niu_type) {
+	case NEPTUNE:
+	case NEPTUNE_2:
+	default:
+		/* Initialize the RXDMA logical pages */
+		status = nxge_init_fzc_rxdma_channel_pages(nxgep, channel,
+			rbr_p);
+		if (status != NXGE_OK) {
+			return (status);
+		}
+
+		break;
+
+#ifndef	NIU_HV_WORKAROUND
+	case N2_NIU:
+#if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
+		NXGE_DEBUG_MSG((nxgep, RX_CTL,
+			"==> nxge_init_fzc_rxdma_channel: N2_NIU - call HV "
+			"set up logical pages"));
+		/* Initialize the RXDMA logical pages */
+		status = nxge_init_hv_fzc_rxdma_channel_pages(nxgep, channel,
+			rbr_p);
+		if (status != NXGE_OK) {
+			return (status);
+		}
+#endif
+		break;
+#else
+	case N2_NIU:
+		NXGE_DEBUG_MSG((nxgep, RX_CTL,
+			"==> nxge_init_fzc_rxdma_channel: N2_NIU - NEED to "
+			"set up logical pages"));
+		/* Initialize the RXDMA logical pages */
+		status = nxge_init_fzc_rxdma_channel_pages(nxgep, channel,
+			rbr_p);
+		if (status != NXGE_OK) {
+			return (status);
+		}
+
+		break;
+#endif
+	}
+
+	/* Configure RED parameters */
+	status = nxge_init_fzc_rxdma_channel_red(nxgep, channel, rcr_p);
+
+	NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_init_fzc_rxdma_channel"));
+	return (status);
+}
+
+/*ARGSUSED*/
+nxge_status_t
+nxge_init_fzc_rxdma_channel_pages(p_nxge_t nxgep,
+		uint16_t channel, p_rx_rbr_ring_t rbrp)
+{
+	npi_handle_t		handle;
+	dma_log_page_t		cfg;
+	npi_status_t		rs = NPI_SUCCESS;
+
+	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
+		"==> nxge_init_fzc_rxdma_channel_pages"));
+
+	handle = NXGE_DEV_NPI_HANDLE(nxgep);
+	/*
+	 * Initialize logical page 1.
+	 */
+	cfg.func_num = nxgep->function_num;
+	cfg.page_num = 0;
+	cfg.valid = rbrp->page_valid.bits.ldw.page0;
+	cfg.value = rbrp->page_value_1.value;
+	cfg.mask = rbrp->page_mask_1.value;
+	cfg.reloc = rbrp->page_reloc_1.value;
+	rs = npi_rxdma_cfg_logical_page(handle, channel,
+			(p_dma_log_page_t)&cfg);
+	if (rs != NPI_SUCCESS) {
+		return (NXGE_ERROR | rs);
+	}
+
+	/*
+	 * Initialize logical page 2.
+	 */
+	cfg.page_num = 1;
+	cfg.valid = rbrp->page_valid.bits.ldw.page1;
+	cfg.value = rbrp->page_value_2.value;
+	cfg.mask = rbrp->page_mask_2.value;
+	cfg.reloc = rbrp->page_reloc_2.value;
+
+	rs = npi_rxdma_cfg_logical_page(handle, channel, &cfg);
+	if (rs != NPI_SUCCESS) {
+		return (NXGE_ERROR | rs);
+	}
+
+	/* Initialize the page handle */
+	rs = npi_rxdma_cfg_logical_page_handle(handle, channel,
+			rbrp->page_hdl.bits.ldw.handle);
+
+	if (rs != NPI_SUCCESS) {
+		return (NXGE_ERROR | rs);
+	}
+
+	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
+		"<== nxge_init_fzc_rxdma_channel_pages"));
+
+	return (NXGE_OK);
+}
+
+/*ARGSUSED*/
+nxge_status_t
+nxge_init_fzc_rxdma_channel_red(p_nxge_t nxgep,
+	uint16_t channel, p_rx_rcr_ring_t rcr_p)
+{
+	npi_handle_t		handle;
+	rdc_red_para_t		red;
+	npi_status_t		rs = NPI_SUCCESS;
+
+	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_init_fzc_rxdma_channel_red"));
+
+	handle = NXGE_DEV_NPI_HANDLE(nxgep);
+	red.value = 0;
+	red.bits.ldw.win = RXDMA_RED_WINDOW_DEFAULT;
+	red.bits.ldw.thre = (rcr_p->comp_size - RXDMA_RED_LESS_ENTRIES);
+	red.bits.ldw.win_syn = RXDMA_RED_WINDOW_DEFAULT;
+	red.bits.ldw.thre_sync = (rcr_p->comp_size - RXDMA_RED_LESS_ENTRIES);
+
+	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
+		"==> nxge_init_fzc_rxdma_channel_red(thre_sync %d(%x))",
+		red.bits.ldw.thre_sync,
+		red.bits.ldw.thre_sync));
+
+	rs = npi_rxdma_cfg_wred_param(handle, channel, &red);
+	if (rs != NPI_SUCCESS) {
+		return (NXGE_ERROR | rs);
+	}
+
+	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
+		"<== nxge_init_fzc_rxdma_channel_red"));
+
+	return (NXGE_OK);
+}
+
+/*ARGSUSED*/
+nxge_status_t
+nxge_init_fzc_txdma_channel(p_nxge_t nxgep, uint16_t channel,
+	p_tx_ring_t tx_ring_p, p_tx_mbox_t mbox_p)
+{
+	nxge_status_t	status = NXGE_OK;
+
+	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
+		"==> nxge_init_fzc_txdma_channel"));
+
+	switch (nxgep->niu_type) {
+	case NEPTUNE:
+	case NEPTUNE_2:
+	default:
+		/* Initialize the TXDMA logical pages */
+		(void) nxge_init_fzc_txdma_channel_pages(nxgep, channel,
+			tx_ring_p);
+		break;
+
+#ifndef	NIU_HV_WORKAROUND
+	case N2_NIU:
+#if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
+		NXGE_DEBUG_MSG((nxgep, DMA_CTL,
+			"==> nxge_init_fzc_txdma_channel "
+			"N2_NIU: call HV to set up txdma logical pages"));
+		status = nxge_init_hv_fzc_txdma_channel_pages(nxgep, channel,
+			tx_ring_p);
+		if (status != NXGE_OK) {
+			return (status);
+		}
+#endif
+		break;
+#else
+	case N2_NIU:
+		NXGE_DEBUG_MSG((nxgep, DMA_CTL,
+			"==> nxge_init_fzc_txdma_channel "
+			"N2_NIU: NEED to set up txdma logical pages"));
+		/* Initialize the TXDMA logical pages */
+		(void) nxge_init_fzc_txdma_channel_pages(nxgep, channel,
+			tx_ring_p);
+		break;
+#endif
+	}
+
+	/*
+	 * Configure Transmit DRR Weight parameters
+	 * (It actually programs the TXC max burst register).
+	 */
+	(void) nxge_init_fzc_txdma_channel_drr(nxgep, channel, tx_ring_p);
+
+	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
+		"<== nxge_init_fzc_txdma_channel"));
+	return (status);
+}
+
+nxge_status_t
+nxge_init_fzc_common(p_nxge_t nxgep)
+{
+	nxge_status_t	status = NXGE_OK;
+
+	(void) nxge_init_fzc_rx_common(nxgep);
+
+	return (status);
+}
+
+nxge_status_t
+nxge_init_fzc_rx_common(p_nxge_t nxgep)
+{
+	npi_handle_t	handle;
+	npi_status_t	rs = NPI_SUCCESS;
+	nxge_status_t	status = NXGE_OK;
+	clock_t		lbolt;
+
+	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_init_fzc_rx_common"));
+	handle = NXGE_DEV_NPI_HANDLE(nxgep);
+	if (!handle.regp) {
+		NXGE_DEBUG_MSG((nxgep, DMA_CTL,
+			"==> nxge_init_fzc_rx_common null ptr"));
+		return (NXGE_ERROR);
+	}
+
+	/*
+	 * Configure the rxdma clock divider
+	 * This is the granularity counter based on
+	 * the hardware system clock (i.e. 300 Mhz) and
+	 * it is running around 3 nanoseconds.
+	 * So, set the clock divider counter to 1000 to get
+	 * microsecond granularity.
+	 * For example, for a 3 microsecond timeout, the timeout
+	 * will be set to 1.
+	 */
+	rs = npi_rxdma_cfg_clock_div_set(handle, RXDMA_CK_DIV_DEFAULT);
+	if (rs != NPI_SUCCESS)
+		return (NXGE_ERROR | rs);
+
+#if defined(__i386)
+	rs = npi_rxdma_cfg_32bitmode_enable(handle);
+	if (rs != NPI_SUCCESS)
+		return (NXGE_ERROR | rs);
+	rs = npi_txdma_mode32_set(handle, B_TRUE);
+	if (rs != NPI_SUCCESS)
+		return (NXGE_ERROR | rs);
+#endif
+
+	/*
+	 * Enable WRED and program an initial value.
+	 * Use time to set the initial random number.
+	 */
+	(void) drv_getparm(LBOLT, &lbolt);
+	rs = npi_rxdma_cfg_red_rand_init(handle, (uint16_t)lbolt);
+	if (rs != NPI_SUCCESS)
+		return (NXGE_ERROR | rs);
+
+	/* Initialize the RDC tables for each group */
+	status = nxge_init_fzc_rdc_tbl(nxgep);
+
+
+	/* Ethernet Timeout Counter (?) */
+
+	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
+		"<== nxge_init_fzc_rx_common:status 0x%08x", status));
+
+	return (status);
+}
+
+nxge_status_t
+nxge_init_fzc_rdc_tbl(p_nxge_t nxgep)
+{
+	npi_handle_t		handle;
+	p_nxge_dma_pt_cfg_t	p_dma_cfgp;
+	p_nxge_hw_pt_cfg_t	p_cfgp;
+	p_nxge_rdc_grp_t	rdc_grp_p;
+	uint8_t 		grp_tbl_id;
+	int			ngrps;
+	int			i;
+	npi_status_t		rs = NPI_SUCCESS;
+	nxge_status_t		status = NXGE_OK;
+
+	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_init_fzc_rdc_tbl"));
+
+	handle = NXGE_DEV_NPI_HANDLE(nxgep);
+	p_dma_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
+	p_cfgp = (p_nxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config;
+
+	grp_tbl_id = p_cfgp->start_rdc_grpid;
+	rdc_grp_p = &p_dma_cfgp->rdc_grps[0];
+	ngrps = p_cfgp->max_rdc_grpids;
+	for (i = 0; i < ngrps; i++, rdc_grp_p++) {
+		rs = npi_rxdma_cfg_rdc_table(handle, grp_tbl_id++,
+			rdc_grp_p->rdc);
+		if (rs != NPI_SUCCESS) {
+			status = NXGE_ERROR | rs;
+			break;
+		}
+	}
+
+	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_init_fzc_rdc_tbl"));
+	return (status);
+}
+
+nxge_status_t
+nxge_init_fzc_rxdma_port(p_nxge_t nxgep)
+{
+	npi_handle_t		handle;
+	p_nxge_dma_pt_cfg_t	p_all_cfgp;
+	p_nxge_hw_pt_cfg_t	p_cfgp;
+	hostinfo_t 		hostinfo;
+	int			i;
+	npi_status_t		rs = NPI_SUCCESS;
+	p_nxge_class_pt_cfg_t 	p_class_cfgp;
+	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_init_fzc_rxdma_port"));
+
+	p_all_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
+	p_cfgp = (p_nxge_hw_pt_cfg_t)&p_all_cfgp->hw_config;
+	handle = NXGE_DEV_NPI_HANDLE(nxgep);
+	/*
+	 * Initialize the port scheduler DRR weight.
+	 * npi_rxdma_cfg_port_ddr_weight();
+	 */
+
+	if (nxgep->niu_type == NEPTUNE) {
+		if ((nxgep->mac.portmode == PORT_1G_COPPER) ||
+			(nxgep->mac.portmode == PORT_1G_FIBER)) {
+			rs = npi_rxdma_cfg_port_ddr_weight(handle,
+							    nxgep->function_num,
+							    NXGE_RX_DRR_WT_1G);
+			if (rs != NPI_SUCCESS) {
+				return (NXGE_ERROR | rs);
+			}
+		}
+	}
+
+	/* Program the default RDC of a port */
+	rs = npi_rxdma_cfg_default_port_rdc(handle, nxgep->function_num,
+			p_cfgp->def_rdc);
+	if (rs != NPI_SUCCESS) {
+		return (NXGE_ERROR | rs);
+	}
+
+	/*
+	 * Configure the MAC host info table with RDC tables
+	 */
+	hostinfo.value = 0;
+	p_class_cfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config;
+	for (i = 0; i < p_cfgp->max_macs; i++) {
+		hostinfo.bits.w0.rdc_tbl_num = p_cfgp->start_rdc_grpid;
+		hostinfo.bits.w0.mac_pref = p_cfgp->mac_pref;
+		if (p_class_cfgp->mac_host_info[i].flag) {
+			hostinfo.bits.w0.rdc_tbl_num =
+				p_class_cfgp->mac_host_info[i].rdctbl;
+			hostinfo.bits.w0.mac_pref =
+				p_class_cfgp->mac_host_info[i].mpr_npr;
+		}
+
+		rs = npi_mac_hostinfo_entry(handle, OP_SET,
+				nxgep->function_num, i, &hostinfo);
+		if (rs != NPI_SUCCESS)
+			return (NXGE_ERROR | rs);
+	}
+
+	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
+		"<== nxge_init_fzc_rxdma_port rs 0x%08x", rs));
+
+	return (NXGE_OK);
+
+}
+
+nxge_status_t
+nxge_fzc_dmc_def_port_rdc(p_nxge_t nxgep, uint8_t port, uint16_t rdc)
+{
+	npi_status_t rs = NPI_SUCCESS;
+	rs = npi_rxdma_cfg_default_port_rdc(nxgep->npi_reg_handle,
+				    port, rdc);
+	if (rs & NPI_FAILURE)
+		return (NXGE_ERROR | rs);
+	return (NXGE_OK);
+}
+
+nxge_status_t
+nxge_init_fzc_txdma_channel_pages(p_nxge_t nxgep, uint16_t channel,
+	p_tx_ring_t tx_ring_p)
+{
+	npi_handle_t		handle;
+	dma_log_page_t		cfg;
+	npi_status_t		rs = NPI_SUCCESS;
+
+	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
+		"==> nxge_init_fzc_txdma_channel_pages"));
+
+#ifndef	NIU_HV_WORKAROUND
+	if (nxgep->niu_type == N2_NIU) {
+		NXGE_DEBUG_MSG((nxgep, DMA_CTL,
+			"<== nxge_init_fzc_txdma_channel_pages: "
+			"N2_NIU: no need to set txdma logical pages"));
+		return (NXGE_OK);
+	}
+#else
+	if (nxgep->niu_type == N2_NIU) {
+		NXGE_DEBUG_MSG((nxgep, DMA_CTL,
+			"<== nxge_init_fzc_txdma_channel_pages: "
+			"N2_NIU: NEED to set txdma logical pages"));
+	}
+#endif
+
+	/*
+	 * Initialize logical page 1.
+	 */
+	handle = NXGE_DEV_NPI_HANDLE(nxgep);
+	cfg.func_num = nxgep->function_num;
+	cfg.page_num = 0;
+	cfg.valid = tx_ring_p->page_valid.bits.ldw.page0;
+	cfg.value = tx_ring_p->page_value_1.value;
+	cfg.mask = tx_ring_p->page_mask_1.value;
+	cfg.reloc = tx_ring_p->page_reloc_1.value;
+
+	rs = npi_txdma_log_page_set(handle, channel,
+		(p_dma_log_page_t)&cfg);
+	if (rs != NPI_SUCCESS) {
+		return (NXGE_ERROR | rs);
+	}
+
+	/*
+	 * Initialize logical page 2.
+	 */
+	cfg.page_num = 1;
+	cfg.valid = tx_ring_p->page_valid.bits.ldw.page1;
+	cfg.value = tx_ring_p->page_value_2.value;
+	cfg.mask = tx_ring_p->page_mask_2.value;
+	cfg.reloc = tx_ring_p->page_reloc_2.value;
+
+	rs = npi_txdma_log_page_set(handle, channel, &cfg);
+	if (rs != NPI_SUCCESS) {
+		return (NXGE_ERROR | rs);
+	}
+
+	/* Initialize the page handle */
+	rs = npi_txdma_log_page_handle_set(handle, channel,
+			&tx_ring_p->page_hdl);
+
+	if (rs == NPI_SUCCESS) {
+		return (NXGE_OK);
+	} else {
+		return (NXGE_ERROR | rs);
+	}
+}
+
+
+nxge_status_t
+nxge_init_fzc_txdma_channel_drr(p_nxge_t nxgep, uint16_t channel,
+	p_tx_ring_t tx_ring_p)
+{
+	npi_status_t	rs = NPI_SUCCESS;
+	npi_handle_t	handle;
+
+	handle = NXGE_DEV_NPI_HANDLE(nxgep);
+	rs = npi_txc_dma_max_burst_set(handle, channel,
+			tx_ring_p->max_burst.value);
+	if (rs == NPI_SUCCESS) {
+		return (NXGE_OK);
+	} else {
+		return (NXGE_ERROR | rs);
+	}
+}
+
+nxge_status_t
+nxge_fzc_sys_err_mask_set(p_nxge_t nxgep, uint64_t mask)
+{
+	npi_status_t	rs = NPI_SUCCESS;
+	npi_handle_t	handle;
+
+	handle = NXGE_DEV_NPI_HANDLE(nxgep);
+	rs = npi_fzc_sys_err_mask_set(handle, mask);
+	if (rs == NPI_SUCCESS) {
+		return (NXGE_OK);
+	} else {
+		return (NXGE_ERROR | rs);
+	}
+}
+
+#if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
+nxge_status_t
+nxge_init_hv_fzc_txdma_channel_pages(p_nxge_t nxgep, uint16_t channel,
+	p_tx_ring_t tx_ring_p)
+{
+	int			err;
+	uint64_t		hverr;
+#ifdef	DEBUG
+	uint64_t		ra, size;
+#endif
+
+	NXGE_DEBUG_MSG((nxgep, TX_CTL,
+		"==> nxge_init_hv_fzc_txdma_channel_pages"));
+
+	if (tx_ring_p->hv_set) {
+		return (NXGE_OK);
+	}
+
+	/*
+	 * Initialize logical page 1 for data buffers.
+	 */
+	hverr = hv_niu_tx_logical_page_conf((uint64_t)channel,
+			(uint64_t)0,
+			tx_ring_p->hv_tx_buf_base_ioaddr_pp,
+			tx_ring_p->hv_tx_buf_ioaddr_size);
+
+	err = (nxge_status_t)nxge_herr2kerr(hverr);
+	if (err != 0) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"<== nxge_init_hv_fzc_txdma_channel_pages: channel %d "
+			"error status 0x%x "
+			"(page 0 data buf) hverr 0x%llx "
+			"ioaddr_pp $%p "
+			"size 0x%llx ",
+			channel,
+			err,
+			hverr,
+			tx_ring_p->hv_tx_buf_base_ioaddr_pp,
+			tx_ring_p->hv_tx_buf_ioaddr_size));
+		return (NXGE_ERROR | err);
+	}
+
+#ifdef	DEBUG
+	ra = size = 0;
+	hverr = hv_niu_tx_logical_page_info((uint64_t)channel,
+			(uint64_t)0,
+			&ra,
+			&size);
+
+	NXGE_DEBUG_MSG((nxgep, TX_CTL,
+		"==> nxge_init_hv_fzc_txdma_channel_pages: channel %d "
+		"ok status 0x%x "
+		"(page 0 data buf) hverr 0x%llx "
+		"set ioaddr_pp $%p "
+		"set size 0x%llx "
+		"get ra ioaddr_pp $%p "
+		"get size 0x%llx ",
+		channel,
+		err,
+		hverr,
+		tx_ring_p->hv_tx_buf_base_ioaddr_pp,
+		tx_ring_p->hv_tx_buf_ioaddr_size,
+		ra,
+		size));
+#endif
+
+	NXGE_DEBUG_MSG((nxgep, TX_CTL,
+		"==> nxge_init_hv_fzc_txdma_channel_pages: channel %d "
+		"(page 0 data buf) hverr 0x%llx "
+		"ioaddr_pp $%p "
+		"size 0x%llx ",
+		channel,
+		hverr,
+		tx_ring_p->hv_tx_buf_base_ioaddr_pp,
+		tx_ring_p->hv_tx_buf_ioaddr_size));
+
+	/*
+	 * Initialize logical page 2 for control buffers.
+	 */
+	hverr = hv_niu_tx_logical_page_conf((uint64_t)channel,
+			(uint64_t)1,
+			tx_ring_p->hv_tx_cntl_base_ioaddr_pp,
+			tx_ring_p->hv_tx_cntl_ioaddr_size);
+
+	err = (nxge_status_t)nxge_herr2kerr(hverr);
+
+	NXGE_DEBUG_MSG((nxgep, TX_CTL,
+		"==> nxge_init_hv_fzc_txdma_channel_pages: channel %d"
+		"ok status 0x%x "
+		"(page 1 cntl buf) hverr 0x%llx "
+		"ioaddr_pp $%p "
+		"size 0x%llx ",
+		channel,
+		err,
+		hverr,
+		tx_ring_p->hv_tx_cntl_base_ioaddr_pp,
+		tx_ring_p->hv_tx_cntl_ioaddr_size));
+
+	if (err != 0) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"<== nxge_init_hv_fzc_txdma_channel_pages: channel %d"
+			"error status 0x%x "
+			"(page 1 cntl buf) hverr 0x%llx "
+			"ioaddr_pp $%p "
+			"size 0x%llx ",
+			channel,
+			err,
+			hverr,
+			tx_ring_p->hv_tx_cntl_base_ioaddr_pp,
+			tx_ring_p->hv_tx_cntl_ioaddr_size));
+		return (NXGE_ERROR | err);
+	}
+
+#ifdef	DEBUG
+	ra = size = 0;
+	hverr = hv_niu_tx_logical_page_info((uint64_t)channel,
+			(uint64_t)1,
+			&ra,
+			&size);
+
+	NXGE_DEBUG_MSG((nxgep, TX_CTL,
+		"==> nxge_init_hv_fzc_txdma_channel_pages: channel %d "
+		"(page 1 cntl buf) hverr 0x%llx "
+		"set ioaddr_pp $%p "
+		"set size 0x%llx "
+		"get ra ioaddr_pp $%p "
+		"get size 0x%llx ",
+		channel,
+		hverr,
+		tx_ring_p->hv_tx_cntl_base_ioaddr_pp,
+		tx_ring_p->hv_tx_cntl_ioaddr_size,
+		ra,
+		size));
+#endif
+
+	tx_ring_p->hv_set = B_TRUE;
+
+	NXGE_DEBUG_MSG((nxgep, TX_CTL,
+		"<== nxge_init_hv_fzc_txdma_channel_pages"));
+
+	return (NXGE_OK);
+}
+
+/*ARGSUSED*/
+nxge_status_t
+nxge_init_hv_fzc_rxdma_channel_pages(p_nxge_t nxgep,
+		uint16_t channel, p_rx_rbr_ring_t rbrp)
+{
+	int			err;
+	uint64_t		hverr;
+#ifdef	DEBUG
+	uint64_t		ra, size;
+#endif
+
+	NXGE_DEBUG_MSG((nxgep, RX_CTL,
+		"==> nxge_init_hv_fzc_rxdma_channel_pages"));
+
+	if (rbrp->hv_set) {
+		return (NXGE_OK);
+	}
+
+	/* Initialize data buffers for page 0 */
+	hverr = hv_niu_rx_logical_page_conf((uint64_t)channel,
+			(uint64_t)0,
+			rbrp->hv_rx_buf_base_ioaddr_pp,
+			rbrp->hv_rx_buf_ioaddr_size);
+	err = (nxge_status_t)nxge_herr2kerr(hverr);
+	if (err != 0) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"<== nxge_init_hv_fzc_rxdma_channel_pages: channel %d"
+			"error status 0x%x "
+			"(page 0 data buf) hverr 0x%llx "
+			"ioaddr_pp $%p "
+			"size 0x%llx ",
+			channel,
+			err,
+			hverr,
+			rbrp->hv_rx_buf_base_ioaddr_pp,
+			rbrp->hv_rx_buf_ioaddr_size));
+
+		return (NXGE_ERROR | err);
+	}
+
+#ifdef	DEBUG
+	ra = size = 0;
+	(void) hv_niu_rx_logical_page_info((uint64_t)channel,
+			(uint64_t)0,
+			&ra,
+			&size);
+
+	NXGE_DEBUG_MSG((nxgep, RX_CTL,
+		"==> nxge_init_hv_fzc_rxdma_channel_pages: channel %d "
+		"ok status 0x%x "
+		"(page 0 data buf) hverr 0x%llx "
+		"set databuf ioaddr_pp $%p "
+		"set databuf size 0x%llx "
+		"get databuf ra ioaddr_pp %p "
+		"get databuf size 0x%llx",
+		channel,
+		err,
+		hverr,
+		rbrp->hv_rx_buf_base_ioaddr_pp,
+		rbrp->hv_rx_buf_ioaddr_size,
+		ra,
+		size));
+#endif
+
+	/* Initialize control buffers for logical page 1.  */
+	hverr = hv_niu_rx_logical_page_conf((uint64_t)channel,
+			(uint64_t)1,
+			rbrp->hv_rx_cntl_base_ioaddr_pp,
+			rbrp->hv_rx_cntl_ioaddr_size);
+
+	err = (nxge_status_t)nxge_herr2kerr(hverr);
+	if (err != 0) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"<== nxge_init_hv_fzc_rxdma_channel_pages: channel %d"
+			"error status 0x%x "
+			"(page 1 cntl buf) hverr 0x%llx "
+			"ioaddr_pp $%p "
+			"size 0x%llx ",
+			channel,
+			err,
+			hverr,
+			rbrp->hv_rx_buf_base_ioaddr_pp,
+			rbrp->hv_rx_buf_ioaddr_size));
+
+		return (NXGE_ERROR | err);
+	}
+
+#ifdef	DEBUG
+	ra = size = 0;
+	(void) hv_niu_rx_logical_page_info((uint64_t)channel,
+			(uint64_t)1,
+			&ra,
+			&size);
+
+
+	NXGE_DEBUG_MSG((nxgep, RX_CTL,
+		"==> nxge_init_hv_fzc_rxdma_channel_pages: channel %d "
+		"error status 0x%x "
+		"(page 1 cntl buf) hverr 0x%llx "
+		"set cntl ioaddr_pp $%p "
+		"set cntl size 0x%llx "
+		"get cntl ioaddr_pp $%p "
+		"get cntl size 0x%llx ",
+		channel,
+		err,
+		hverr,
+		rbrp->hv_rx_cntl_base_ioaddr_pp,
+		rbrp->hv_rx_cntl_ioaddr_size,
+		ra,
+		size));
+#endif
+
+	rbrp->hv_set = B_FALSE;
+
+	NXGE_DEBUG_MSG((nxgep, RX_CTL,
+		"<== nxge_init_hv_fzc_rxdma_channel_pages"));
+
+	return (NXGE_OK);
+}
+
+/*
+ * Map hypervisor error code to errno. Only
+ * H_ENORADDR, H_EBADALIGN and H_EINVAL are meaningful
+ * for niu driver. Any other error codes are mapped to EINVAL.
+ */
+static int
+nxge_herr2kerr(uint64_t hv_errcode)
+{
+	int	s_errcode;
+
+	switch (hv_errcode) {
+	case H_ENORADDR:
+	case H_EBADALIGN:
+		s_errcode = EFAULT;
+		break;
+	case H_EOK:
+		s_errcode = 0;
+		break;
+	default:
+		s_errcode = EINVAL;
+		break;
+	}
+	return (s_errcode);
+}
+
+#endif	/* sun4v and NIU_LP_WORKAROUND */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/uts/common/io/nxge/nxge_hcall.s	Mon Mar 19 19:37:22 2007 -0700
@@ -0,0 +1,114 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident	"%Z%%M%	%I%	%E% SMI"
+
+/*
+ * Hypervisor calls called by niu leaf driver.
+*/
+
+#include <sys/asm_linkage.h>
+#include <sys/hypervisor_api.h>
+#include <sys/nxge/nxge_impl.h>
+
+#if defined(lint) || defined(__lint)
+
+/*ARGSUSED*/
+uint64_t
+hv_niu_rx_logical_page_conf(uint64_t chidx, uint64_t pgidx,
+	uint64_t raddr, uint64_t size)
+{ return (0); }
+
+/*ARGSUSED*/
+uint64_t
+hv_niu_rx_logical_page_info(uint64_t chidx, uint64_t pgidx,
+	uint64_t *raddr, uint64_t *size)
+{ return (0); }
+
+/*ARGSUSED*/
+uint64_t
+hv_niu_tx_logical_page_conf(uint64_t chidx, uint64_t pgidx,
+	uint64_t raddr, uint64_t size)
+{ return (0); }
+
+/*ARGSUSED*/
+uint64_t
+hv_niu_tx_logical_page_info(uint64_t chidx, uint64_t pgidx,
+	uint64_t *raddr, uint64_t *size)
+{ return (0); }
+
+#else	/* lint || __lint */
+
+	/*
+	 * hv_niu_rx_logical_page_conf(uint64_t chidx, uint64_t pgidx,
+	 *	uint64_t raddr, uint64_t size)
+	 */
+	ENTRY(hv_niu_rx_logical_page_conf)
+	mov	N2NIU_RX_LP_CONF, %o5
+	ta	FAST_TRAP
+	retl
+	nop
+	SET_SIZE(hv_niu_rx_logical_page_conf)
+
+	/*
+	 * hv_niu_rx_logical_page_info(uint64_t chidx, uint64_t pgidx,
+	 *	uint64_t *raddr, uint64_t *size)
+	 */
+	ENTRY(hv_niu_rx_logical_page_info)
+	mov	%o2, %g1
+	mov	%o3, %g2
+	mov	N2NIU_RX_LP_INFO, %o5
+	ta	FAST_TRAP
+	stx	%o1, [%g1]
+	retl
+	stx	%o2, [%g2]
+	SET_SIZE(hv_niu_rx_logical_page_info)
+
+	/*
+	 * hv_niu_tx_logical_page_conf(uint64_t chidx, uint64_t pgidx,
+	 *	uint64_t raddr, uint64_t size)
+	 */
+	ENTRY(hv_niu_tx_logical_page_conf)
+	mov	N2NIU_TX_LP_CONF, %o5
+	ta	FAST_TRAP
+	retl
+	nop
+	SET_SIZE(hv_niu_tx_logical_page_conf)
+
+	/*
+	 * hv_niu_tx_logical_page_info(uint64_t chidx, uint64_t pgidx,
+	 *	uint64_t *raddr, uint64_t *size)
+	 */
+	ENTRY(hv_niu_tx_logical_page_info)
+	mov	%o2, %g1
+	mov	%o3, %g2
+	mov	N2NIU_TX_LP_INFO, %o5
+	ta	FAST_TRAP
+	stx	%o1, [%g1]
+	retl
+	stx	%o2, [%g2]
+	SET_SIZE(hv_niu_tx_logical_page_info)
+
+#endif	/* lint || __lint */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/uts/common/io/nxge/nxge_hw.c	Mon Mar 19 19:37:22 2007 -0700
@@ -0,0 +1,1021 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident	"%Z%%M%	%I%	%E% SMI"
+
+#include <sys/nxge/nxge_impl.h>
+
+/*
+ * Tunable Receive Completion Ring Configuration B parameters.
+ */
+uint16_t nxge_rx_pkt_thres;	/* 16 bits */
+uint8_t nxge_rx_pkt_timeout;	/* 6 bits based on DMA clock divider */
+
+lb_property_t lb_normal = {normal, "normal", nxge_lb_normal};
+lb_property_t lb_external10g = {external, "external10g", nxge_lb_ext10g};
+lb_property_t lb_external1000 = {external, "external1000", nxge_lb_ext1000};
+lb_property_t lb_external100 = {external, "external100", nxge_lb_ext100};
+lb_property_t lb_external10 = {external, "external10", nxge_lb_ext10};
+lb_property_t lb_phy10g = {internal, "phy10g", nxge_lb_phy10g};
+lb_property_t lb_phy1000 = {internal, "phy1000", nxge_lb_phy1000};
+lb_property_t lb_phy = {internal, "phy", nxge_lb_phy};
+lb_property_t lb_serdes10g = {internal, "serdes10g", nxge_lb_serdes10g};
+lb_property_t lb_serdes1000 = {internal, "serdes", nxge_lb_serdes1000};
+lb_property_t lb_mac10g = {internal, "mac10g", nxge_lb_mac10g};
+lb_property_t lb_mac1000 = {internal, "mac1000", nxge_lb_mac1000};
+lb_property_t lb_mac = {internal, "mac10/100", nxge_lb_mac};
+
+uint32_t nxge_lb_dbg = 1;
+void nxge_get_mii(p_nxge_t nxgep, p_mblk_t mp);
+void nxge_put_mii(p_nxge_t nxgep, p_mblk_t mp);
+
+extern uint32_t nxge_rx_mode;
+extern uint32_t nxge_jumbo_mtu;
+extern boolean_t nxge_jumbo_enable;
+
+static void
+nxge_rtrace_ioctl(p_nxge_t, queue_t *, mblk_t *, struct iocblk *);
+
+/* ARGSUSED */
+void
+nxge_global_reset(p_nxge_t nxgep)
+{
+	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_global_reset"));
+
+	(void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP);
+	(void) nxge_intr_hw_disable(nxgep);
+
+	if ((nxgep->suspended) ||
+			((nxgep->statsp->port_stats.lb_mode ==
+			nxge_lb_phy1000) ||
+			(nxgep->statsp->port_stats.lb_mode ==
+			nxge_lb_phy10g) ||
+			(nxgep->statsp->port_stats.lb_mode ==
+			nxge_lb_serdes1000) ||
+			(nxgep->statsp->port_stats.lb_mode ==
+			nxge_lb_serdes10g))) {
+		(void) nxge_link_init(nxgep);
+	}
+	(void) nxge_link_monitor(nxgep, LINK_MONITOR_START);
+	(void) nxge_mac_init(nxgep);
+	(void) nxge_intr_hw_enable(nxgep);
+
+	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_global_reset"));
+}
+
+/* ARGSUSED */
+void
+nxge_hw_id_init(p_nxge_t nxgep)
+{
+	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_hw_id_init"));
+	/*
+	 * Set up initial hardware parameters required such as mac mtu size.
+	 */
+	nxgep->mac.is_jumbo = B_FALSE;
+	nxgep->mac.maxframesize = NXGE_MTU_DEFAULT_MAX;	/* 1522 */
+	if (nxgep->param_arr[param_accept_jumbo].value || nxge_jumbo_enable) {
+		nxgep->mac.maxframesize = (uint16_t)nxge_jumbo_mtu;
+		nxgep->mac.is_jumbo = B_TRUE;
+	}
+	NXGE_DEBUG_MSG((nxgep, DDI_CTL,
+		"==> nxge_hw_id_init: maxframesize %d",
+		nxgep->mac.maxframesize));
+
+	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_hw_id_init"));
+}
+
+/* ARGSUSED */
+void
+nxge_hw_init_niu_common(p_nxge_t nxgep)
+{
+	p_nxge_hw_list_t hw_p;
+
+	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_hw_init_niu_common"));
+
+	if ((hw_p = nxgep->nxge_hw_p) == NULL) {
+		return;
+	}
+	MUTEX_ENTER(&hw_p->nxge_cfg_lock);
+	if (hw_p->flags & COMMON_INIT_DONE) {
+		NXGE_DEBUG_MSG((nxgep, MOD_CTL,
+			"nxge_hw_init_niu_common"
+			" already done for dip $%p function %d exiting",
+			hw_p->parent_devp, nxgep->function_num));
+		MUTEX_EXIT(&hw_p->nxge_cfg_lock);
+		return;
+	}
+
+	hw_p->flags = COMMON_INIT_START;
+	NXGE_DEBUG_MSG((nxgep, MOD_CTL, "nxge_hw_init_niu_common"
+		" Started for device id %x with function %d",
+		hw_p->parent_devp, nxgep->function_num));
+
+	/* per neptune common block init */
+	(void) nxge_fflp_hw_reset(nxgep);
+
+	hw_p->flags = COMMON_INIT_DONE;
+	MUTEX_EXIT(&hw_p->nxge_cfg_lock);
+
+	NXGE_DEBUG_MSG((nxgep, MOD_CTL, "nxge_hw_init_niu_common"
+		" Done for device id %x with function %d",
+		hw_p->parent_devp, nxgep->function_num));
+	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_hw_init_niu_common"));
+}
+
+/* ARGSUSED */
+uint_t
+nxge_intr(void *arg1, void *arg2)
+{
+	p_nxge_ldv_t ldvp = (p_nxge_ldv_t)arg1;
+	p_nxge_t nxgep = (p_nxge_t)arg2;
+	uint_t serviced = DDI_INTR_UNCLAIMED;
+	uint8_t ldv;
+	npi_handle_t handle;
+	p_nxge_ldgv_t ldgvp;
+	p_nxge_ldg_t ldgp, t_ldgp;
+	p_nxge_ldv_t t_ldvp;
+	uint64_t vector0 = 0, vector1 = 0, vector2 = 0;
+	int i, j, nldvs, nintrs = 1;
+	npi_status_t rs = NPI_SUCCESS;
+
+	/* DDI interface returns second arg as NULL (n2 niumx driver) !!! */
+	if (arg2 == NULL || (void *) ldvp->nxgep != arg2) {
+		nxgep = ldvp->nxgep;
+	}
+	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intr"));
+
+	if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
+		NXGE_ERROR_MSG((nxgep, INT_CTL,
+			"<== nxge_intr: not initialized 0x%x", serviced));
+		return (serviced);
+	}
+
+	ldgvp = nxgep->ldgvp;
+	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intr: ldgvp $%p", ldgvp));
+	if (ldvp == NULL && ldgvp) {
+		t_ldvp = ldvp = ldgvp->ldvp;
+	}
+	if (ldvp) {
+		ldgp = t_ldgp = ldvp->ldgp;
+	}
+	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intr: "
+		"ldgvp $%p ldvp $%p ldgp $%p", ldgvp, ldvp, ldgp));
+	if (ldgvp == NULL || ldvp == NULL || ldgp == NULL) {
+		NXGE_ERROR_MSG((nxgep, INT_CTL, "==> nxge_intr: "
+			"ldgvp $%p ldvp $%p ldgp $%p", ldgvp, ldvp, ldgp));
+		NXGE_ERROR_MSG((nxgep, INT_CTL, "<== nxge_intr: not ready"));
+		return (DDI_INTR_UNCLAIMED);
+	}
+	/*
+	 * This interrupt handler will have to go through all the logical
+	 * devices to find out which logical device interrupts us and then call
+	 * its handler to process the events.
+	 */
+	handle = NXGE_DEV_NPI_HANDLE(nxgep);
+	t_ldgp = ldgp;
+	t_ldvp = ldgp->ldvp;
+
+	nldvs = ldgp->nldvs;
+
+	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intr: #ldvs %d #intrs %d",
+			nldvs, ldgvp->ldg_intrs));
+
+	serviced = DDI_INTR_CLAIMED;
+	for (i = 0; i < nintrs; i++, t_ldgp++) {
+		NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intr(%d): #ldvs %d "
+				" #intrs %d", i, nldvs, nintrs));
+		/* Get this group's flag bits.  */
+		t_ldgp->interrupted = B_FALSE;
+		rs = npi_ldsv_ldfs_get(handle, t_ldgp->ldg,
+			&vector0, &vector1, &vector2);
+		if (rs) {
+			continue;
+		}
+		if (!vector0 && !vector1 && !vector2) {
+			NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intr: "
+				"no interrupts on group %d", t_ldgp->ldg));
+			continue;
+		}
+		NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intr: "
+			"vector0 0x%llx vector1 0x%llx vector2 0x%llx",
+			vector0, vector1, vector2));
+		t_ldgp->interrupted = B_TRUE;
+		nldvs = t_ldgp->nldvs;
+		for (j = 0; j < nldvs; j++, t_ldvp++) {
+			/*
+			 * Call device's handler if flag bits are on.
+			 */
+			ldv = t_ldvp->ldv;
+			if (((ldv < NXGE_MAC_LD_START) &&
+					(LDV_ON(ldv, vector0) |
+					(LDV_ON(ldv, vector1)))) ||
+					(ldv >= NXGE_MAC_LD_START &&
+					((LDV2_ON_1(ldv, vector2)) ||
+					(LDV2_ON_2(ldv, vector2))))) {
+				(void) (t_ldvp->ldv_intr_handler)(
+					(caddr_t)t_ldvp, arg2);
+				NXGE_DEBUG_MSG((nxgep, INT_CTL,
+					"==> nxge_intr: "
+					"calling device %d #ldvs %d #intrs %d",
+					j, nldvs, nintrs));
+			}
+		}
+	}
+
+	t_ldgp = ldgp;
+	for (i = 0; i < nintrs; i++, t_ldgp++) {
+		/* rearm group interrupts */
+		if (t_ldgp->interrupted) {
+			NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intr: arm "
+				"group %d", t_ldgp->ldg));
+			(void) npi_intr_ldg_mgmt_set(handle, t_ldgp->ldg,
+				t_ldgp->arm, t_ldgp->ldg_timer);
+		}
+	}
+
+	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intr: serviced 0x%x",
+		serviced));
+	return (serviced);
+}
+
+/* ARGSUSED */
+uint_t
+nxge_syserr_intr(void *arg1, void *arg2)
+{
+	p_nxge_ldv_t ldvp = (p_nxge_ldv_t)arg1;
+	p_nxge_t nxgep = (p_nxge_t)arg2;
+	p_nxge_ldg_t ldgp = NULL;
+	npi_handle_t handle;
+	sys_err_stat_t estat;
+	uint_t serviced = DDI_INTR_UNCLAIMED;
+
+	if (arg1 == NULL && arg2 == NULL) {
+		return (serviced);
+	}
+	if (arg2 == NULL || ((ldvp != NULL && (void *) ldvp->nxgep != arg2))) {
+		if (ldvp != NULL) {
+			nxgep = ldvp->nxgep;
+		}
+	}
+	NXGE_DEBUG_MSG((nxgep, SYSERR_CTL,
+		"==> nxge_syserr_intr: arg2 $%p arg1 $%p", nxgep, ldvp));
+	if (ldvp != NULL && ldvp->use_timer == B_FALSE) {
+		ldgp = ldvp->ldgp;
+		if (ldgp == NULL) {
+			NXGE_ERROR_MSG((nxgep, SYSERR_CTL,
+				"<== nxge_syserrintr(no logical group): "
+				"arg2 $%p arg1 $%p", nxgep, ldvp));
+			return (DDI_INTR_UNCLAIMED);
+		}
+		/*
+		 * Get the logical device state if the function uses interrupt.
+		 */
+	}
+
+	/* This interrupt handler is for system error interrupts.  */
+	handle = NXGE_DEV_NPI_HANDLE(nxgep);
+	estat.value = 0;
+	(void) npi_fzc_sys_err_stat_get(handle, &estat);
+	NXGE_DEBUG_MSG((nxgep, SYSERR_CTL,
+		"==> nxge_syserr_intr: device error 0x%016llx", estat.value));
+
+	if (estat.bits.ldw.smx) {
+		/* SMX */
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"==> nxge_syserr_intr: device error - SMX"));
+	} else if (estat.bits.ldw.mac) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"==> nxge_syserr_intr: device error - MAC"));
+		/*
+		 * There is nothing to be done here. All MAC errors go to per
+		 * MAC port interrupt. MIF interrupt is the only MAC sub-block
+		 * that can generate status here. MIF status reported will be
+		 * ignored here. It is checked by per port timer instead.
+		 */
+	} else if (estat.bits.ldw.ipp) {
+		NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
+			"==> nxge_syserr_intr: device error - IPP"));
+		(void) nxge_ipp_handle_sys_errors(nxgep);
+	} else if (estat.bits.ldw.zcp) {
+		/* ZCP */
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"==> nxge_syserr_intr: device error - ZCP"));
+		(void) nxge_zcp_handle_sys_errors(nxgep);
+	} else if (estat.bits.ldw.tdmc) {
+		/* TDMC */
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"==> nxge_syserr_intr: device error - TDMC"));
+		/*
+		 * There is no TDMC system errors defined in the PRM. All TDMC
+		 * channel specific errors are reported on a per channel basis.
+		 */
+	} else if (estat.bits.ldw.rdmc) {
+		/* RDMC */
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"==> nxge_syserr_intr: device error - RDMC"));
+		(void) nxge_rxdma_handle_sys_errors(nxgep);
+	} else if (estat.bits.ldw.txc) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"==> nxge_syserr_intr: device error - TXC"));
+		(void) nxge_txc_handle_sys_errors(nxgep);
+	} else if ((nxgep->niu_type != N2_NIU) && estat.bits.ldw.peu) {
+		/* PCI-E */
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"==> nxge_syserr_intr: device error - PCI-E"));
+	} else if (estat.bits.ldw.meta1) {
+		/* META1 */
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"==> nxge_syserr_intr: device error - META1"));
+	} else if (estat.bits.ldw.meta2) {
+		/* META2 */
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"==> nxge_syserr_intr: device error - META2"));
+	} else if (estat.bits.ldw.fflp) {
+		/* FFLP */
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"==> nxge_syserr_intr: device error - FFLP"));
+		(void) nxge_fflp_handle_sys_errors(nxgep);
+	}
+	serviced = DDI_INTR_CLAIMED;
+
+	if (ldgp != NULL && ldvp != NULL && ldgp->nldvs == 1 &&
+		!ldvp->use_timer) {
+		(void) npi_intr_ldg_mgmt_set(handle, ldgp->ldg,
+			B_TRUE, ldgp->ldg_timer);
+	}
+	NXGE_DEBUG_MSG((nxgep, SYSERR_CTL, "<== nxge_syserr_intr"));
+	return (serviced);
+}
+
+/* ARGSUSED */
+void
+nxge_intr_hw_enable(p_nxge_t nxgep)
+{
+	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intr_hw_enable"));
+	(void) nxge_intr_mask_mgmt_set(nxgep, B_TRUE);
+	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intr_hw_enable"));
+}
+
+/* ARGSUSED */
+void
+nxge_intr_hw_disable(p_nxge_t nxgep)
+{
+	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intr_hw_disable"));
+	(void) nxge_intr_mask_mgmt_set(nxgep, B_FALSE);
+	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intr_hw_disable"));
+}
+
+/* ARGSUSED */
+void
+nxge_rx_hw_blank(void *arg, time_t ticks, uint_t count)
+{
+	p_nxge_t nxgep = (p_nxge_t)arg;
+	uint8_t channel;
+	npi_handle_t handle;
+	p_nxge_ldgv_t ldgvp;
+	p_nxge_ldv_t ldvp;
+	int i;
+
+	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_rx_hw_blank"));
+	handle = NXGE_DEV_NPI_HANDLE(nxgep);
+
+	if ((ldgvp = nxgep->ldgvp) == NULL) {
+		NXGE_ERROR_MSG((nxgep, INT_CTL,
+			"<== nxge_rx_hw_blank (not enabled)"));
+		return;
+	}
+	ldvp = nxgep->ldgvp->ldvp;
+	if (ldvp == NULL) {
+		return;
+	}
+	for (i = 0; i < ldgvp->nldvs; i++, ldvp++) {
+		if (ldvp->is_rxdma) {
+			channel = ldvp->channel;
+			(void) npi_rxdma_cfg_rdc_rcr_threshold(handle,
+				channel, count);
+			(void) npi_rxdma_cfg_rdc_rcr_timeout(handle,
+				channel, ticks);
+		}
+	}
+
+	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_rx_hw_blank"));
+}
+
+/* ARGSUSED */
+void
+nxge_hw_stop(p_nxge_t nxgep)
+{
+	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_hw_stop"));
+
+	(void) nxge_tx_mac_disable(nxgep);
+	(void) nxge_rx_mac_disable(nxgep);
+	(void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_STOP);
+	(void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_STOP);
+
+	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_hw_stop"));
+}
+
+/* ARGSUSED */
+void
+nxge_hw_ioctl(p_nxge_t nxgep, queue_t *wq, mblk_t *mp, struct iocblk *iocp)
+{
+	int cmd;
+
+	NXGE_DEBUG_MSG((nxgep, IOC_CTL, "==> nxge_hw_ioctl"));
+
+	if (nxgep == NULL) {
+		miocnak(wq, mp, 0, EINVAL);
+		return;
+	}
+	iocp->ioc_error = 0;
+	cmd = iocp->ioc_cmd;
+
+	switch (cmd) {
+	default:
+		miocnak(wq, mp, 0, EINVAL);
+		return;
+
+	case NXGE_GET_MII:
+		nxge_get_mii(nxgep, mp->b_cont);
+		miocack(wq, mp, sizeof (uint16_t), 0);
+		break;
+
+	case NXGE_PUT_MII:
+		nxge_put_mii(nxgep, mp->b_cont);
+		miocack(wq, mp, 0, 0);
+		break;
+
+	case NXGE_GET64:
+		nxge_get64(nxgep, mp->b_cont);
+		miocack(wq, mp, sizeof (uint32_t), 0);
+		break;
+
+	case NXGE_PUT64:
+		nxge_put64(nxgep, mp->b_cont);
+		miocack(wq, mp, 0, 0);
+		break;
+
+	case NXGE_PUT_TCAM:
+		nxge_put_tcam(nxgep, mp->b_cont);
+		miocack(wq, mp, 0, 0);
+		break;
+
+	case NXGE_GET_TCAM:
+		nxge_get_tcam(nxgep, mp->b_cont);
+		miocack(wq, mp, 0, 0);
+		break;
+
+	case NXGE_TX_REGS_DUMP:
+		nxge_txdma_regs_dump_channels(nxgep);
+		miocack(wq, mp, 0, 0);
+		break;
+	case NXGE_RX_REGS_DUMP:
+		nxge_rxdma_regs_dump_channels(nxgep);
+		miocack(wq, mp, 0, 0);
+		break;
+	case NXGE_VIR_INT_REGS_DUMP:
+	case NXGE_INT_REGS_DUMP:
+		nxge_virint_regs_dump(nxgep);
+		miocack(wq, mp, 0, 0);
+		break;
+	case NXGE_RTRACE:
+		nxge_rtrace_ioctl(nxgep, wq, mp, iocp);
+		break;
+	}
+}
+
+/* ARGSUSED */
+void
+nxge_loopback_ioctl(p_nxge_t nxgep, queue_t *wq, mblk_t *mp,
+	struct iocblk *iocp)
+{
+	p_lb_property_t lb_props;
+
+	size_t size;
+	int i;
+
+	if (mp->b_cont == NULL) {
+		miocnak(wq, mp, 0, EINVAL);
+	}
+	switch (iocp->ioc_cmd) {
+	case LB_GET_MODE:
+		NXGE_DEBUG_MSG((nxgep, IOC_CTL, "NXGE_GET_LB_MODE command"));
+		if (nxgep != NULL) {
+			*(lb_info_sz_t *)mp->b_cont->b_rptr =
+				nxgep->statsp->port_stats.lb_mode;
+			miocack(wq, mp, sizeof (nxge_lb_t), 0);
+		} else
+			miocnak(wq, mp, 0, EINVAL);
+		break;
+	case LB_SET_MODE:
+		NXGE_DEBUG_MSG((nxgep, IOC_CTL, "NXGE_SET_LB_MODE command"));
+		if (iocp->ioc_count != sizeof (uint32_t)) {
+			miocack(wq, mp, 0, 0);
+			break;
+		}
+		if ((nxgep != NULL) && nxge_set_lb(nxgep, wq, mp->b_cont)) {
+			miocack(wq, mp, 0, 0);
+		} else {
+			miocnak(wq, mp, 0, EPROTO);
+		}
+		break;
+	case LB_GET_INFO_SIZE:
+		NXGE_DEBUG_MSG((nxgep, IOC_CTL, "LB_GET_INFO_SIZE command"));
+		if (nxgep != NULL) {
+			size = sizeof (lb_normal);
+			if (nxgep->statsp->mac_stats.cap_10gfdx) {
+				size += sizeof (lb_external10g);
+				size += sizeof (lb_phy10g);
+				size += sizeof (lb_serdes10g);
+				size += sizeof (lb_mac10g);
+			}
+			if (nxgep->statsp->mac_stats.cap_1000fdx) {
+				size += sizeof (lb_external1000);
+				size += sizeof (lb_mac1000);
+				if (nxgep->mac.portmode == PORT_1G_COPPER)
+					size += sizeof (lb_phy1000);
+			}
+			if (nxgep->statsp->mac_stats.cap_100fdx)
+				size += sizeof (lb_external100);
+			if (nxgep->statsp->mac_stats.cap_10fdx)
+				size += sizeof (lb_external10);
+			else if (nxgep->mac.portmode == PORT_1G_FIBER)
+				size += sizeof (lb_serdes1000);
+			*(lb_info_sz_t *)mp->b_cont->b_rptr = size;
+
+			NXGE_DEBUG_MSG((nxgep, IOC_CTL,
+				"NXGE_GET_LB_INFO command: size %d", size));
+			miocack(wq, mp, sizeof (lb_info_sz_t), 0);
+		} else
+			miocnak(wq, mp, 0, EINVAL);
+		break;
+
+	case LB_GET_INFO:
+		NXGE_DEBUG_MSG((nxgep, IOC_CTL, "NXGE_GET_LB_INFO command"));
+		if (nxgep != NULL) {
+			size = sizeof (lb_normal);
+			if (nxgep->statsp->mac_stats.cap_10gfdx) {
+				size += sizeof (lb_external10g);
+				size += sizeof (lb_phy10g);
+				size += sizeof (lb_serdes10g);
+				size += sizeof (lb_mac10g);
+			}
+			if (nxgep->statsp->mac_stats.cap_1000fdx) {
+				size += sizeof (lb_external1000);
+				size += sizeof (lb_mac1000);
+				if (nxgep->mac.portmode == PORT_1G_COPPER)
+					size += sizeof (lb_phy1000);
+			}
+			if (nxgep->statsp->mac_stats.cap_100fdx)
+				size += sizeof (lb_external100);
+			if (nxgep->statsp->mac_stats.cap_10fdx)
+				size += sizeof (lb_external10);
+			else if (nxgep->mac.portmode == PORT_1G_FIBER)
+				size += sizeof (lb_serdes1000);
+
+			NXGE_DEBUG_MSG((nxgep, IOC_CTL,
+				"NXGE_GET_LB_INFO command: size %d", size));
+			if (size == iocp->ioc_count) {
+				i = 0;
+				lb_props = (p_lb_property_t)mp->b_cont->b_rptr;
+				lb_props[i++] = lb_normal;
+				if (nxgep->statsp->mac_stats.cap_10gfdx) {
+					lb_props[i++] = lb_mac10g;
+					lb_props[i++] = lb_serdes10g;
+					lb_props[i++] = lb_phy10g;
+					lb_props[i++] = lb_external10g;
+				}
+				if (nxgep->statsp->mac_stats.cap_1000fdx)
+					lb_props[i++] = lb_external1000;
+				if (nxgep->statsp->mac_stats.cap_100fdx)
+					lb_props[i++] = lb_external100;
+				if (nxgep->statsp->mac_stats.cap_10fdx)
+					lb_props[i++] = lb_external10;
+				if (nxgep->statsp->mac_stats.cap_1000fdx)
+					lb_props[i++] = lb_mac1000;
+				if (nxgep->mac.portmode == PORT_1G_COPPER) {
+					if (nxgep->statsp->mac_stats.
+						cap_1000fdx)
+						lb_props[i++] = lb_phy1000;
+				} else if (nxgep->mac.portmode ==
+					PORT_1G_FIBER)
+					lb_props[i++] = lb_serdes1000;
+				miocack(wq, mp, size, 0);
+			} else
+				miocnak(wq, mp, 0, EINVAL);
+		} else {
+			miocnak(wq, mp, 0, EINVAL);
+			cmn_err(CE_NOTE, "!nxge_hw_ioctl: invalid command 0x%x",
+				iocp->ioc_cmd);
+		}
+		break;
+	}
+}
+
+/*
+ * DMA channel interfaces to access various channel specific
+ * hardware functions.
+ */
+/* ARGSUSED */
+void
+nxge_rxdma_channel_put64(nxge_os_acc_handle_t handle, void *reg_addrp,
+	uint32_t reg_base, uint16_t channel, uint64_t reg_data)
+{
+	uint64_t reg_offset;
+
+	NXGE_DEBUG_MSG((NULL, DMA_CTL, "<== nxge_rxdma_channel_put64"));
+
+	/*
+	 * Channel is assumed to be from 0 to the maximum DMA channel #. If we
+	 * use the virtual DMA CSR address space from the config space (in PCI
+	 * case), then the following code need to be use different offset
+	 * computation macro.
+	 */
+	reg_offset = reg_base + DMC_OFFSET(channel);
+	NXGE_PIO_WRITE64(handle, reg_addrp, reg_offset, reg_data);
+
+	NXGE_DEBUG_MSG((NULL, DMA_CTL, "<== nxge_rxdma_channel_put64"));
+}
+
+/* ARGSUSED */
+uint64_t
+nxge_rxdma_channel_get64(nxge_os_acc_handle_t handle, void *reg_addrp,
+	uint32_t reg_base, uint16_t channel)
+{
+	uint64_t reg_offset;
+
+	NXGE_DEBUG_MSG((NULL, DMA_CTL, "<== nxge_rxdma_channel_get64"));
+
+	/*
+	 * Channel is assumed to be from 0 to the maximum DMA channel #. If we
+	 * use the virtual DMA CSR address space from the config space (in PCI
+	 * case), then the following code need to be use different offset
+	 * computation macro.
+	 */
+	reg_offset = reg_base + DMC_OFFSET(channel);
+
+	NXGE_DEBUG_MSG((NULL, DMA_CTL, "<== nxge_rxdma_channel_get64"));
+
+	return (NXGE_PIO_READ64(handle, reg_addrp, reg_offset));
+}
+
+/* ARGSUSED */
+void
+nxge_get32(p_nxge_t nxgep, p_mblk_t mp)
+{
+	nxge_os_acc_handle_t nxge_regh;
+
+	NXGE_DEBUG_MSG((nxgep, IOC_CTL, "nxge_get32"));
+	nxge_regh = nxgep->dev_regs->nxge_regh;
+
+	*(uint32_t *)mp->b_rptr = NXGE_PIO_READ32(nxge_regh,
+		nxgep->dev_regs->nxge_regp, *(uint32_t *)mp->b_rptr);
+
+	NXGE_DEBUG_MSG((nxgep, IOC_CTL, "value = 0x%08X",
+		*(uint32_t *)mp->b_rptr));
+	NXGE_DEBUG_MSG((nxgep, IOC_CTL, "nxge_get32"));
+}
+
+/* ARGSUSED */
+void
+nxge_put32(p_nxge_t nxgep, p_mblk_t mp)
+{
+	nxge_os_acc_handle_t nxge_regh;
+	uint32_t *buf;
+	uint8_t *reg;
+
+	NXGE_DEBUG_MSG((nxgep, IOC_CTL, "nxge_put32"));
+	nxge_regh = nxgep->dev_regs->nxge_regh;
+
+	buf = (uint32_t *)mp->b_rptr;
+	reg = (uint8_t *)(nxgep->dev_regs->nxge_regp) + buf[0];
+	NXGE_DEBUG_MSG((nxgep, IOC_CTL,
+		"reg = 0x%016llX index = 0x%08X value = 0x%08X",
+		reg, buf[0], buf[1]));
+	NXGE_PIO_WRITE32(nxge_regh, (uint32_t *)reg, 0, buf[1]);
+	NXGE_DEBUG_MSG((nxgep, IOC_CTL, "nxge_put32"));
+}
+
+/*ARGSUSED*/
+boolean_t
+nxge_set_lb(p_nxge_t nxgep, queue_t *wq, p_mblk_t mp)
+{
+	boolean_t status = B_TRUE;
+	uint32_t lb_mode;
+	lb_property_t *lb_info;
+
+	NXGE_DEBUG_MSG((nxgep, IOC_CTL, "<== nxge_set_lb"));
+	lb_mode = nxgep->statsp->port_stats.lb_mode;
+	if (lb_mode == *(uint32_t *)mp->b_rptr) {
+		cmn_err(CE_NOTE,
+			"!nxge%d: Loopback mode already set (lb_mode %d).\n",
+			nxgep->instance, lb_mode);
+		status = B_FALSE;
+		goto nxge_set_lb_exit;
+	}
+	lb_mode = *(uint32_t *)mp->b_rptr;
+	lb_info = NULL;
+	if (lb_mode == lb_normal.value)
+		lb_info = &lb_normal;
+	else if ((lb_mode == lb_external10g.value) &&
+		(nxgep->statsp->mac_stats.cap_10gfdx))
+		lb_info = &lb_external10g;
+	else if ((lb_mode == lb_external1000.value) &&
+		(nxgep->statsp->mac_stats.cap_1000fdx))
+		lb_info = &lb_external1000;
+	else if ((lb_mode == lb_external100.value) &&
+		(nxgep->statsp->mac_stats.cap_100fdx))
+		lb_info = &lb_external100;
+	else if ((lb_mode == lb_external10.value) &&
+		(nxgep->statsp->mac_stats.cap_10fdx))
+		lb_info = &lb_external10;
+	else if ((lb_mode == lb_phy10g.value) &&
+			((nxgep->mac.portmode == PORT_10G_COPPER) ||
+			(nxgep->mac.portmode == PORT_10G_FIBER)))
+		lb_info = &lb_phy10g;
+	else if ((lb_mode == lb_phy1000.value) &&
+		(nxgep->mac.portmode == PORT_1G_COPPER))
+		lb_info = &lb_phy1000;
+	else if ((lb_mode == lb_phy.value) &&
+		(nxgep->mac.portmode == PORT_1G_COPPER))
+		lb_info = &lb_phy;
+	else if ((lb_mode == lb_serdes10g.value) &&
+			(nxgep->mac.portmode == PORT_10G_FIBER) ||
+		(nxgep->mac.portmode == PORT_10G_COPPER))
+		lb_info = &lb_serdes10g;
+	else if ((lb_mode == lb_serdes1000.value) &&
+		(nxgep->mac.portmode == PORT_1G_FIBER))
+		lb_info = &lb_serdes1000;
+	else if (lb_mode == lb_mac10g.value)
+		lb_info = &lb_mac10g;
+	else if (lb_mode == lb_mac1000.value)
+		lb_info = &lb_mac1000;
+	else if (lb_mode == lb_mac.value)
+		lb_info = &lb_mac;
+	else {
+		cmn_err(CE_NOTE,
+			"!nxge%d: Loopback mode not supported(mode %d).\n",
+			nxgep->instance, lb_mode);
+		status = B_FALSE;
+		goto nxge_set_lb_exit;
+	}
+
+	if (lb_mode == nxge_lb_normal) {
+		if (nxge_lb_dbg) {
+			cmn_err(CE_NOTE,
+				"!nxge%d: Returning to normal operation",
+				nxgep->instance);
+		}
+		nxge_set_lb_normal(nxgep);
+		goto nxge_set_lb_exit;
+	}
+	nxgep->statsp->port_stats.lb_mode = lb_mode;
+
+	if (nxge_lb_dbg)
+		cmn_err(CE_NOTE,
+			"!nxge%d: Adapter now in %s loopback mode",
+			nxgep->instance, lb_info->key);
+	nxgep->param_arr[param_autoneg].value = 0;
+	nxgep->param_arr[param_anar_10gfdx].value =
+		(nxgep->statsp->port_stats.lb_mode == nxge_lb_ext10g) ||
+		(nxgep->statsp->port_stats.lb_mode == nxge_lb_mac10g) ||
+		(nxgep->statsp->port_stats.lb_mode == nxge_lb_phy10g) ||
+		(nxgep->statsp->port_stats.lb_mode == nxge_lb_serdes10g);
+	nxgep->param_arr[param_anar_10ghdx].value = 0;
+	nxgep->param_arr[param_anar_1000fdx].value =
+		(nxgep->statsp->port_stats.lb_mode == nxge_lb_ext1000) ||
+		(nxgep->statsp->port_stats.lb_mode == nxge_lb_mac1000) ||
+		(nxgep->statsp->port_stats.lb_mode == nxge_lb_phy1000) ||
+		(nxgep->statsp->port_stats.lb_mode == nxge_lb_serdes1000);
+	nxgep->param_arr[param_anar_1000hdx].value = 0;
+	nxgep->param_arr[param_anar_100fdx].value =
+		(nxgep->statsp->port_stats.lb_mode == nxge_lb_phy) ||
+		(nxgep->statsp->port_stats.lb_mode == nxge_lb_mac) ||
+		(nxgep->statsp->port_stats.lb_mode == nxge_lb_ext100);
+	nxgep->param_arr[param_anar_100hdx].value = 0;
+	nxgep->param_arr[param_anar_10fdx].value =
+		(nxgep->statsp->port_stats.lb_mode == nxge_lb_mac) ||
+		(nxgep->statsp->port_stats.lb_mode == nxge_lb_ext10);
+	if (nxgep->statsp->port_stats.lb_mode == nxge_lb_ext1000) {
+		nxgep->param_arr[param_master_cfg_enable].value = 1;
+		nxgep->param_arr[param_master_cfg_value].value = 1;
+	}
+	if ((nxgep->statsp->port_stats.lb_mode == nxge_lb_ext10g) ||
+		(nxgep->statsp->port_stats.lb_mode == nxge_lb_ext1000) ||
+		(nxgep->statsp->port_stats.lb_mode == nxge_lb_ext100) ||
+		(nxgep->statsp->port_stats.lb_mode == nxge_lb_ext10) ||
+		(nxgep->statsp->port_stats.lb_mode == nxge_lb_phy10g) ||
+		(nxgep->statsp->port_stats.lb_mode == nxge_lb_phy1000) ||
+		(nxgep->statsp->port_stats.lb_mode == nxge_lb_phy)) {
+
+		(void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP);
+		(void) nxge_xcvr_find(nxgep);
+		(void) nxge_link_init(nxgep);
+		(void) nxge_link_monitor(nxgep, LINK_MONITOR_START);
+	}
+	if (lb_info->lb_type == internal) {
+		if ((nxgep->statsp->port_stats.lb_mode == nxge_lb_mac10g) ||
+				(nxgep->statsp->port_stats.lb_mode ==
+				nxge_lb_phy10g) ||
+				(nxgep->statsp->port_stats.lb_mode ==
+				nxge_lb_serdes10g)) {
+			nxgep->statsp->mac_stats.link_speed = 10000;
+		} else if ((nxgep->statsp->port_stats.lb_mode
+				== nxge_lb_mac1000) ||
+				(nxgep->statsp->port_stats.lb_mode ==
+				nxge_lb_phy1000) ||
+				(nxgep->statsp->port_stats.lb_mode ==
+				nxge_lb_serdes1000)) {
+			nxgep->statsp->mac_stats.link_speed = 1000;
+		} else {
+			nxgep->statsp->mac_stats.link_speed = 100;
+		}
+		nxgep->statsp->mac_stats.link_duplex = 2;
+		nxgep->statsp->mac_stats.link_up = 1;
+	}
+	nxge_global_reset(nxgep);
+
+nxge_set_lb_exit:
+	NXGE_DEBUG_MSG((nxgep, DDI_CTL,
+		"<== nxge_set_lb status = 0x%08x", status));
+	return (status);
+}
+
+/* ARGSUSED */
+void
+nxge_set_lb_normal(p_nxge_t nxgep)
+{
+	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_set_lb_normal"));
+	nxgep->statsp->port_stats.lb_mode = nxge_lb_normal;
+	nxgep->param_arr[param_autoneg].value =
+		nxgep->param_arr[param_autoneg].old_value;
+	nxgep->param_arr[param_anar_1000fdx].value =
+		nxgep->param_arr[param_anar_1000fdx].old_value;
+	nxgep->param_arr[param_anar_1000hdx].value =
+		nxgep->param_arr[param_anar_1000hdx].old_value;
+	nxgep->param_arr[param_anar_100fdx].value =
+		nxgep->param_arr[param_anar_100fdx].old_value;
+	nxgep->param_arr[param_anar_100hdx].value =
+		nxgep->param_arr[param_anar_100hdx].old_value;
+	nxgep->param_arr[param_anar_10fdx].value =
+		nxgep->param_arr[param_anar_10fdx].old_value;
+	nxgep->param_arr[param_master_cfg_enable].value =
+		nxgep->param_arr[param_master_cfg_enable].old_value;
+	nxgep->param_arr[param_master_cfg_value].value =
+		nxgep->param_arr[param_master_cfg_value].old_value;
+
+	nxge_global_reset(nxgep);
+
+	(void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP);
+	(void) nxge_xcvr_find(nxgep);
+	(void) nxge_link_init(nxgep);
+	(void) nxge_link_monitor(nxgep, LINK_MONITOR_START);
+
+	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_set_lb_normal"));
+}
+
+/* ARGSUSED */
+void
+nxge_get_mii(p_nxge_t nxgep, p_mblk_t mp)
+{
+	uint16_t reg;
+
+	NXGE_DEBUG_MSG((nxgep, IOC_CTL, "==> nxge_get_mii"));
+
+	reg = *(uint16_t *)mp->b_rptr;
+	(void) nxge_mii_read(nxgep, nxgep->statsp->mac_stats.xcvr_portn, reg,
+		(uint16_t *)mp->b_rptr);
+	NXGE_DEBUG_MSG((nxgep, IOC_CTL, "reg = 0x%08X value = 0x%04X",
+		reg, *(uint16_t *)mp->b_rptr));
+	NXGE_DEBUG_MSG((nxgep, IOC_CTL, "<== nxge_get_mii"));
+}
+
+/* ARGSUSED */
+void
+nxge_put_mii(p_nxge_t nxgep, p_mblk_t mp)
+{
+	uint16_t *buf;
+	uint8_t reg;
+
+	NXGE_DEBUG_MSG((nxgep, IOC_CTL, "==> nxge_put_mii"));
+	buf = (uint16_t *)mp->b_rptr;
+	reg = (uint8_t)buf[0];
+	NXGE_DEBUG_MSG((nxgep, IOC_CTL,
+		"reg = 0x%08X index = 0x%08X value = 0x%08X",
+		reg, buf[0], buf[1]));
+	(void) nxge_mii_write(nxgep, nxgep->statsp->mac_stats.xcvr_portn,
+		reg, buf[1]);
+	NXGE_DEBUG_MSG((nxgep, IOC_CTL, "<== nxge_put_mii"));
+}
+
+/* ARGSUSED */
+void
+nxge_check_hw_state(p_nxge_t nxgep)
+{
+	p_nxge_ldgv_t ldgvp;
+	p_nxge_ldv_t t_ldvp;
+
+	NXGE_DEBUG_MSG((nxgep, SYSERR_CTL, "==> nxge_check_hw_state"));
+
+	MUTEX_ENTER(nxgep->genlock);
+	nxgep->nxge_timerid = 0;
+	if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
+		goto nxge_check_hw_state_exit;
+	}
+	nxge_check_tx_hang(nxgep);
+
+	ldgvp = nxgep->ldgvp;
+	if (ldgvp == NULL || (ldgvp->ldvp_syserr == NULL)) {
+		NXGE_ERROR_MSG((nxgep, SYSERR_CTL, "<== nxge_check_hw_state: "
+				"NULL ldgvp (interrupt not ready)."));
+		goto nxge_check_hw_state_exit;
+	}
+	t_ldvp = ldgvp->ldvp_syserr;
+	if (!t_ldvp->use_timer) {
+		NXGE_DEBUG_MSG((nxgep, SYSERR_CTL, "<== nxge_check_hw_state: "
+				"ldgvp $%p t_ldvp $%p use_timer flag %d",
+				ldgvp, t_ldvp, t_ldvp->use_timer));
+		goto nxge_check_hw_state_exit;
+	}
+	if (fm_check_acc_handle(nxgep->dev_regs->nxge_regh) != DDI_FM_OK) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"port%d Bad register acc handle", nxgep->mac.portnum));
+	}
+	(void) nxge_syserr_intr((void *) t_ldvp, (void *) nxgep);
+
+	nxgep->nxge_timerid = nxge_start_timer(nxgep, nxge_check_hw_state,
+		NXGE_CHECK_TIMER);
+
+nxge_check_hw_state_exit:
+	MUTEX_EXIT(nxgep->genlock);
+	NXGE_DEBUG_MSG((nxgep, SYSERR_CTL, "<== nxge_check_hw_state"));
+}
+
+/*ARGSUSED*/
+static void
+nxge_rtrace_ioctl(p_nxge_t nxgep, queue_t *wq, mblk_t *mp,
+	struct iocblk *iocp)
+{
+	ssize_t size;
+	rtrace_t *rtp;
+	mblk_t *nmp;
+	uint32_t i, j;
+	uint32_t start_blk;
+	uint32_t base_entry;
+	uint32_t num_entries;
+
+	NXGE_DEBUG_MSG((nxgep, STR_CTL, "==> nxge_rtrace_ioctl"));
+
+	size = 1024;
+	if (mp->b_cont == NULL || MBLKL(mp->b_cont) < size) {
+		NXGE_DEBUG_MSG((nxgep, STR_CTL,
+				"malformed M_IOCTL MBLKL = %d size = %d",
+				MBLKL(mp->b_cont), size));
+		miocnak(wq, mp, 0, EINVAL);
+		return;
+	}
+	nmp = mp->b_cont;
+	rtp = (rtrace_t *)nmp->b_rptr;
+	start_blk = rtp->next_idx;
+	num_entries = rtp->last_idx;
+	base_entry = start_blk * MAX_RTRACE_IOC_ENTRIES;
+
+	NXGE_DEBUG_MSG((nxgep, STR_CTL, "start_blk = %d\n", start_blk));
+	NXGE_DEBUG_MSG((nxgep, STR_CTL, "num_entries = %d\n", num_entries));
+	NXGE_DEBUG_MSG((nxgep, STR_CTL, "base_entry = %d\n", base_entry));
+
+	rtp->next_idx = npi_rtracebuf.next_idx;
+	rtp->last_idx = npi_rtracebuf.last_idx;
+	rtp->wrapped = npi_rtracebuf.wrapped;
+	for (i = 0, j = base_entry; i < num_entries; i++, j++) {
+		rtp->buf[i].ctl_addr = npi_rtracebuf.buf[j].ctl_addr;
+		rtp->buf[i].val_l32 = npi_rtracebuf.buf[j].val_l32;
+		rtp->buf[i].val_h32 = npi_rtracebuf.buf[j].val_h32;
+	}
+
+	nmp->b_wptr = nmp->b_rptr + size;
+	NXGE_DEBUG_MSG((nxgep, STR_CTL, "<== nxge_rtrace_ioctl"));
+	miocack(wq, mp, (int)size, 0);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/uts/common/io/nxge/nxge_ipp.c	Mon Mar 19 19:37:22 2007 -0700
@@ -0,0 +1,675 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident	"%Z%%M%	%I%	%E% SMI"
+
+#include <nxge_impl.h>
+#include <nxge_ipp.h>
+
+#define	NXGE_IPP_FIFO_SYNC_TRY_COUNT 100
+
+/* ARGSUSED */
+nxge_status_t
+nxge_ipp_init(p_nxge_t nxgep)
+{
+	uint8_t portn;
+	uint32_t config;
+	npi_handle_t handle;
+	uint32_t pkt_size;
+	ipp_status_t istatus;
+	npi_status_t rs = NPI_SUCCESS;
+	uint64_t val;
+	uint32_t d0, d1, d2, d3, d4;
+	int i;
+	uint32_t dfifo_entries;
+
+	handle = nxgep->npi_handle;
+	portn = NXGE_GET_PORT_NUM(nxgep->function_num);
+
+	NXGE_DEBUG_MSG((nxgep, IPP_CTL, "==> nxge_ipp_init: port%d", portn));
+
+	/* Initialize ECC and parity in SRAM of DFIFO and PFIFO */
+	if ((nxgep->niu_type == NEPTUNE) || (nxgep->niu_type == NEPTUNE_2)) {
+		if (portn < 2)
+			dfifo_entries = IPP_P0_P1_DFIFO_ENTRIES;
+		else
+			dfifo_entries = IPP_P2_P3_DFIFO_ENTRIES;
+	} else if (nxgep->niu_type == N2_NIU) {
+		dfifo_entries = IPP_NIU_DFIFO_ENTRIES;
+	} else
+		goto fail;
+
+	for (i = 0; i < dfifo_entries; i++) {
+		if ((rs = npi_ipp_write_dfifo(handle,
+				portn, i, 0, 0, 0, 0, 0)) != NPI_SUCCESS)
+			goto fail;
+		if ((rs = npi_ipp_read_dfifo(handle, portn,
+				i, &d0, &d1, &d2, &d3, &d4)) != NPI_SUCCESS)
+			goto fail;
+	}
+
+	/* Clear PFIFO DFIFO status bits */
+	if ((rs = npi_ipp_get_status(handle, portn, &istatus)) != NPI_SUCCESS)
+		goto fail;
+	if ((rs = npi_ipp_get_status(handle, portn, &istatus)) != NPI_SUCCESS)
+		goto fail;
+
+	/*
+	 * Soft reset to make sure we bring the FIFO pointers back to the
+	 * original initial position.
+	 */
+	if ((rs = npi_ipp_reset(handle, portn)) != NPI_SUCCESS)
+		goto fail;
+
+	/* Clean up ECC counter */
+	IPP_REG_RD(nxgep->npi_handle, portn, IPP_ECC_ERR_COUNTER_REG, &val);
+	IPP_REG_RD(nxgep->npi_handle, portn, IPP_TCP_CKSUM_ERR_CNT_REG, &val);
+	IPP_REG_RD(nxgep->npi_handle, portn, IPP_DISCARD_PKT_CNT_REG, &val);
+
+	if ((rs = npi_ipp_get_status(handle, portn, &istatus)) != NPI_SUCCESS)
+		goto fail;
+
+	/* Configure IPP port */
+	if ((rs = npi_ipp_iconfig(handle, INIT, portn, ICFG_IPP_ALL))
+			!= NPI_SUCCESS)
+		goto fail;
+	nxgep->ipp.iconfig = ICFG_IPP_ALL;
+
+	config = CFG_IPP | CFG_IPP_DFIFO_ECC_CORRECT | CFG_IPP_DROP_BAD_CRC |
+		CFG_IPP_TCP_UDP_CKSUM;
+	if ((rs = npi_ipp_config(handle, INIT, portn, config)) != NPI_SUCCESS)
+		goto fail;
+	nxgep->ipp.config = config;
+
+	/* Set max packet size */
+	pkt_size = IPP_MAX_PKT_SIZE;
+	if ((rs = npi_ipp_set_max_pktsize(handle, portn,
+			IPP_MAX_PKT_SIZE)) != NPI_SUCCESS)
+		goto fail;
+	nxgep->ipp.max_pkt_size = pkt_size;
+
+	NXGE_DEBUG_MSG((nxgep, IPP_CTL, "<== nxge_ipp_init: port%d", portn));
+
+	return (NXGE_OK);
+fail:
+	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"nxge_ipp_init: Fail to initialize IPP Port #%d\n",
+			portn));
+	return (NXGE_ERROR | rs);
+}
+
+/* ARGSUSED */
+nxge_status_t
+nxge_ipp_disable(p_nxge_t nxgep)
+{
+	uint8_t portn;
+	uint32_t config;
+	npi_handle_t handle;
+	npi_status_t rs = NPI_SUCCESS;
+	uint16_t wr_ptr, rd_ptr;
+	uint32_t try_count;
+
+	handle = nxgep->npi_handle;
+	portn = NXGE_GET_PORT_NUM(nxgep->function_num);
+
+	NXGE_DEBUG_MSG((nxgep, IPP_CTL, "==> nxge_ipp_disable: port%d", portn));
+	(void) nxge_rx_mac_disable(nxgep);
+
+	/*
+	 * Wait until ip read and write fifo pointers are equal
+	 */
+	(void) npi_ipp_get_dfifo_rd_ptr(handle, portn, &rd_ptr);
+	(void) npi_ipp_get_dfifo_wr_ptr(handle, portn, &wr_ptr);
+	try_count = NXGE_IPP_FIFO_SYNC_TRY_COUNT;
+
+	while ((try_count > 0) && (rd_ptr != wr_ptr)) {
+		(void) npi_ipp_get_dfifo_rd_ptr(handle, portn, &rd_ptr);
+		(void) npi_ipp_get_dfifo_wr_ptr(handle, portn, &wr_ptr);
+		try_count--;
+	}
+
+	if (try_count == 0) {
+		if ((rd_ptr != 0) && (wr_ptr != 1)) {
+			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+				" nxge_ipp_disable: port%d failed"
+				" rd_fifo != wr_fifo", portn));
+			goto fail;
+		}
+	}
+	/* disable the IPP */
+	config = nxgep->ipp.config;
+	if ((rs = npi_ipp_config(handle, DISABLE,
+			portn, config)) != NPI_SUCCESS)
+		goto fail;
+
+	/* IPP soft reset */
+	if ((rs = npi_ipp_reset(handle, portn)) != NPI_SUCCESS)
+		goto fail;
+
+	NXGE_DEBUG_MSG((nxgep, IPP_CTL, "<== nxge_ipp_disable: port%d", portn));
+	return (NXGE_OK);
+fail:
+	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+		"nxge_ipp_disable: Fail to disable IPP Port #%d\n", portn));
+	return (NXGE_ERROR | rs);
+}
+
+/* ARGSUSED */
+nxge_status_t
+nxge_ipp_reset(p_nxge_t nxgep)
+{
+	uint8_t portn;
+	uint32_t config;
+	npi_handle_t handle;
+	npi_status_t rs = NPI_SUCCESS;
+	uint16_t wr_ptr, rd_ptr;
+	uint32_t try_count;
+
+	handle = nxgep->npi_handle;
+	portn = NXGE_GET_PORT_NUM(nxgep->function_num);
+
+	NXGE_DEBUG_MSG((nxgep, IPP_CTL, "==> nxge_ipp_reset: port%d", portn));
+
+	/* disable the IPP */
+	config = nxgep->ipp.config;
+	if ((rs = npi_ipp_config(handle, DISABLE,
+			portn, config)) != NPI_SUCCESS)
+		goto fail;
+
+	/*
+	 * Wait until ip read and write fifo pointers are equal
+	 */
+	(void) npi_ipp_get_dfifo_rd_ptr(handle, portn, &rd_ptr);
+	(void) npi_ipp_get_dfifo_wr_ptr(handle, portn, &wr_ptr);
+	try_count = NXGE_IPP_FIFO_SYNC_TRY_COUNT;
+
+	while ((try_count > 0) && (rd_ptr != wr_ptr)) {
+		(void) npi_ipp_get_dfifo_rd_ptr(handle, portn, &rd_ptr);
+		(void) npi_ipp_get_dfifo_wr_ptr(handle, portn, &wr_ptr);
+		try_count--;
+	}
+
+	if (try_count == 0) {
+		if ((rd_ptr != 0) && (wr_ptr != 1)) {
+			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+				" nxge_ipp_disable: port%d failed"
+				" rd_fifo != wr_fifo", portn));
+			goto fail;
+		}
+	}
+
+	/* IPP soft reset */
+	if ((rs = npi_ipp_reset(handle, portn)) != NPI_SUCCESS) {
+		goto fail;
+	}
+
+	/* to reset control FIFO */
+	if ((rs = npi_zcp_rest_cfifo_port(handle, portn)) != NPI_SUCCESS)
+		goto fail;
+
+	/*
+	 * Making sure that error source is cleared if this is an injected
+	 * error.
+	 */
+	IPP_REG_WR(handle, portn, IPP_ECC_CTRL_REG, 0);
+
+	NXGE_DEBUG_MSG((nxgep, IPP_CTL, "<== nxge_ipp_reset: port%d", portn));
+	return (NXGE_OK);
+fail:
+	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"nxge_ipp_init: Fail to Reset IPP Port #%d\n",
+			portn));
+	return (NXGE_ERROR | rs);
+}
+
+/* ARGSUSED */
+nxge_status_t
+nxge_ipp_enable(p_nxge_t nxgep)
+{
+	uint8_t portn;
+	uint32_t config;
+	npi_handle_t handle;
+	uint32_t pkt_size;
+	npi_status_t rs = NPI_SUCCESS;
+
+	handle = nxgep->npi_handle;
+	portn = NXGE_GET_PORT_NUM(nxgep->function_num);
+
+	NXGE_DEBUG_MSG((nxgep, IPP_CTL, "==> nxge_ipp_enable: port%d", portn));
+
+	config = CFG_IPP | CFG_IPP_DFIFO_ECC_CORRECT | CFG_IPP_DROP_BAD_CRC |
+		CFG_IPP_TCP_UDP_CKSUM;
+	if ((rs = npi_ipp_config(handle, INIT, portn, config)) != NPI_SUCCESS)
+		goto fail;
+	nxgep->ipp.config = config;
+
+	/* Set max packet size */
+	pkt_size = IPP_MAX_PKT_SIZE;
+	if ((rs = npi_ipp_set_max_pktsize(handle, portn,
+			IPP_MAX_PKT_SIZE)) != NPI_SUCCESS)
+		goto fail;
+	nxgep->ipp.max_pkt_size = pkt_size;
+
+	NXGE_DEBUG_MSG((nxgep, IPP_CTL, "<== nxge_ipp_enable: port%d", portn));
+	return (NXGE_OK);
+fail:
+	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+		"nxge_ipp_init: Fail to Enable IPP Port #%d\n", portn));
+	return (NXGE_ERROR | rs);
+}
+
+/* ARGSUSED */
+nxge_status_t
+nxge_ipp_handle_sys_errors(p_nxge_t nxgep)
+{
+	npi_handle_t handle;
+	npi_status_t rs = NPI_SUCCESS;
+	p_nxge_ipp_stats_t statsp;
+	ipp_status_t istatus;
+	uint8_t portn;
+	p_ipp_errlog_t errlogp;
+	boolean_t rxport_fatal = B_FALSE;
+	nxge_status_t status = NXGE_OK;
+
+	handle = nxgep->npi_handle;
+	statsp = (p_nxge_ipp_stats_t)&nxgep->statsp->ipp_stats;
+	portn = nxgep->mac.portnum;
+
+	errlogp = (p_ipp_errlog_t)&statsp->errlog;
+
+	if ((rs = npi_ipp_get_status(handle, portn, &istatus)) != NPI_SUCCESS)
+		return (NXGE_ERROR | rs);
+
+	if (istatus.value == 0) {
+		/*
+		 * The error is not initiated from this port, so just exit.
+		 */
+		return (NXGE_OK);
+	}
+
+	if (istatus.bits.w0.dfifo_missed_sop) {
+		statsp->sop_miss++;
+		if ((rs = npi_ipp_get_dfifo_eopm_rdptr(handle, portn,
+					&errlogp->dfifo_rd_ptr)) != NPI_SUCCESS)
+			return (NXGE_ERROR | rs);
+		if ((rs = npi_ipp_get_state_mach(handle, portn,
+				&errlogp->state_mach)) != NPI_SUCCESS)
+			return (NXGE_ERROR | rs);
+		NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
+			NXGE_FM_EREPORT_IPP_SOP_MISS);
+		if (statsp->sop_miss < IPP_MAX_ERR_SHOW)
+			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+				"nxge_ipp_err_evnts: fatal error: sop_miss\n"));
+		rxport_fatal = B_TRUE;
+	}
+	if (istatus.bits.w0.dfifo_missed_eop) {
+		statsp->eop_miss++;
+		if ((rs = npi_ipp_get_dfifo_eopm_rdptr(handle, portn,
+				&errlogp->dfifo_rd_ptr)) != NPI_SUCCESS)
+			return (NXGE_ERROR | rs);
+		if ((rs = npi_ipp_get_state_mach(handle, portn,
+				&errlogp->state_mach)) != NPI_SUCCESS)
+			return (NXGE_ERROR | rs);
+		NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
+			NXGE_FM_EREPORT_IPP_EOP_MISS);
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"nxge_ipp_err_evnts: fatal error: eop_miss\n"));
+		rxport_fatal = B_TRUE;
+	}
+	if (istatus.bits.w0.dfifo_uncorr_ecc_err) {
+		boolean_t ue_ecc_valid;
+
+		if ((status = nxge_ipp_eccue_valid_check(nxgep,
+				&ue_ecc_valid)) != NXGE_OK)
+			return (status);
+
+		if (ue_ecc_valid) {
+			statsp->dfifo_ue++;
+			if ((rs = npi_ipp_get_ecc_syndrome(handle, portn,
+					&errlogp->ecc_syndrome)) != NPI_SUCCESS)
+				return (NXGE_ERROR | rs);
+			NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
+				NXGE_FM_EREPORT_IPP_DFIFO_UE);
+			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+				"nxge_ipp_err_evnts: fatal error: dfifo_ue\n"));
+			rxport_fatal = B_TRUE;
+		}
+	}
+	if (istatus.bits.w0.pre_fifo_perr) {
+		statsp->pfifo_perr++;
+		NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
+			NXGE_FM_EREPORT_IPP_PFIFO_PERR);
+		if (statsp->pfifo_perr < IPP_MAX_ERR_SHOW)
+			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+				"nxge_ipp_err_evnts: "
+				"fatal error: pre_pifo_perr\n"));
+		rxport_fatal = B_TRUE;
+	}
+	if (istatus.bits.w0.pre_fifo_overrun) {
+		statsp->pfifo_over++;
+		NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
+			NXGE_FM_EREPORT_IPP_PFIFO_OVER);
+		if (statsp->pfifo_over < IPP_MAX_ERR_SHOW)
+			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+				"nxge_ipp_err_evnts: "
+				"fatal error: pfifo_over\n"));
+		rxport_fatal = B_TRUE;
+	}
+	if (istatus.bits.w0.pre_fifo_underrun) {
+		statsp->pfifo_und++;
+		NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
+			NXGE_FM_EREPORT_IPP_PFIFO_UND);
+		if (statsp->pfifo_und < IPP_MAX_ERR_SHOW)
+			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+				"nxge_ipp_err_evnts: "
+				"fatal error: pfifo_und\n"));
+		rxport_fatal = B_TRUE;
+	}
+	if (istatus.bits.w0.bad_cksum_cnt_ovfl) {
+		statsp->bad_cs_cnt += IPP_BAD_CS_CNT_MASK;
+		NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
+			NXGE_FM_EREPORT_IPP_BAD_CS_MX);
+		if (statsp->bad_cs_cnt < (IPP_MAX_ERR_SHOW *
+				IPP_BAD_CS_CNT_MASK))
+			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+				"nxge_ipp_err_evnts: bad_cs_max\n"));
+	}
+	if (istatus.bits.w0.pkt_discard_cnt_ovfl) {
+		statsp->pkt_dis_cnt += IPP_PKT_DIS_CNT_MASK;
+		NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
+			NXGE_FM_EREPORT_IPP_PKT_DIS_MX);
+		if (statsp->pkt_dis_cnt < (IPP_MAX_ERR_SHOW *
+				IPP_PKT_DIS_CNT_MASK))
+			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+				"nxge_ipp_err_evnts: pkt_dis_max\n"));
+	}
+
+	/*
+	 * Making sure that error source is cleared if this is an injected
+	 * error.
+	 */
+	IPP_REG_WR(handle, portn, IPP_ECC_CTRL_REG, 0);
+
+	if (rxport_fatal) {
+		NXGE_DEBUG_MSG((nxgep, IPP_CTL,
+			" nxge_ipp_handle_sys_errors:"
+			" fatal Error on  Port #%d\n", portn));
+		status = nxge_ipp_fatal_err_recover(nxgep);
+		if (status == NXGE_OK) {
+			FM_SERVICE_RESTORED(nxgep);
+		}
+	}
+	return (status);
+}
+
+/* ARGSUSED */
+void
+nxge_ipp_inject_err(p_nxge_t nxgep, uint32_t err_id)
+{
+	ipp_status_t ipps;
+	ipp_ecc_ctrl_t ecc_ctrl;
+	uint8_t portn = nxgep->mac.portnum;
+
+	switch (err_id) {
+	case NXGE_FM_EREPORT_IPP_DFIFO_UE:
+		ecc_ctrl.value = 0;
+		ecc_ctrl.bits.w0.cor_dbl = 1;
+		ecc_ctrl.bits.w0.cor_1 = 1;
+		ecc_ctrl.bits.w0.cor_lst = 1;
+		cmn_err(CE_NOTE, "!Write 0x%llx to IPP_ECC_CTRL_REG\n",
+			(unsigned long long) ecc_ctrl.value);
+		IPP_REG_WR(nxgep->npi_handle, portn, IPP_ECC_CTRL_REG,
+			ecc_ctrl.value);
+		break;
+
+	case NXGE_FM_EREPORT_IPP_DFIFO_CE:
+		ecc_ctrl.value = 0;
+		ecc_ctrl.bits.w0.cor_sng = 1;
+		ecc_ctrl.bits.w0.cor_1 = 1;
+		ecc_ctrl.bits.w0.cor_snd = 1;
+		cmn_err(CE_NOTE, "!Write 0x%llx to IPP_ECC_CTRL_REG\n",
+			(unsigned long long) ecc_ctrl.value);
+		IPP_REG_WR(nxgep->npi_handle, portn, IPP_ECC_CTRL_REG,
+			ecc_ctrl.value);
+		break;
+
+	case NXGE_FM_EREPORT_IPP_EOP_MISS:
+	case NXGE_FM_EREPORT_IPP_SOP_MISS:
+	case NXGE_FM_EREPORT_IPP_PFIFO_PERR:
+	case NXGE_FM_EREPORT_IPP_ECC_ERR_MAX:
+	case NXGE_FM_EREPORT_IPP_PFIFO_OVER:
+	case NXGE_FM_EREPORT_IPP_PFIFO_UND:
+	case NXGE_FM_EREPORT_IPP_BAD_CS_MX:
+	case NXGE_FM_EREPORT_IPP_PKT_DIS_MX:
+	case NXGE_FM_EREPORT_IPP_RESET_FAIL:
+		IPP_REG_RD(nxgep->npi_handle, portn, IPP_INT_STATUS_REG,
+			&ipps.value);
+		if (err_id == NXGE_FM_EREPORT_IPP_EOP_MISS)
+			ipps.bits.w0.dfifo_missed_eop = 1;
+		else if (err_id == NXGE_FM_EREPORT_IPP_SOP_MISS)
+			ipps.bits.w0.dfifo_missed_sop = 1;
+		else if (err_id == NXGE_FM_EREPORT_IPP_DFIFO_UE)
+			ipps.bits.w0.dfifo_uncorr_ecc_err = 1;
+		else if (err_id == NXGE_FM_EREPORT_IPP_DFIFO_CE)
+			ipps.bits.w0.dfifo_corr_ecc_err = 1;
+		else if (err_id == NXGE_FM_EREPORT_IPP_PFIFO_PERR)
+			ipps.bits.w0.pre_fifo_perr = 1;
+		else if (err_id == NXGE_FM_EREPORT_IPP_ECC_ERR_MAX)
+			ipps.bits.w0.ecc_err_cnt_ovfl = 1;
+		else if (err_id == NXGE_FM_EREPORT_IPP_PFIFO_OVER)
+			ipps.bits.w0.pre_fifo_overrun = 1;
+		else if (err_id == NXGE_FM_EREPORT_IPP_PFIFO_UND)
+			ipps.bits.w0.pre_fifo_underrun = 1;
+		else if (err_id == NXGE_FM_EREPORT_IPP_BAD_CS_MX)
+			ipps.bits.w0.bad_cksum_cnt_ovfl = 1;
+		else if (err_id == NXGE_FM_EREPORT_IPP_PKT_DIS_MX)
+			ipps.bits.w0.pkt_discard_cnt_ovfl = 1;
+		cmn_err(CE_NOTE, "!Write 0x%llx to IPP_INT_STATUS_REG\n",
+			(unsigned long long) ipps.value);
+		IPP_REG_WR(nxgep->npi_handle, portn, IPP_INT_STATUS_REG,
+			ipps.value);
+		break;
+	}
+}
+
+/* ARGSUSED */
+nxge_status_t
+nxge_ipp_fatal_err_recover(p_nxge_t nxgep)
+{
+	npi_handle_t handle;
+	npi_status_t rs = NPI_SUCCESS;
+	nxge_status_t status = NXGE_OK;
+	uint8_t portn;
+	uint16_t wr_ptr;
+	uint16_t rd_ptr;
+	uint32_t try_count;
+	uint32_t dfifo_entries;
+	ipp_status_t istatus;
+	uint32_t d0, d1, d2, d3, d4;
+	int i;
+
+	NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_ipp_fatal_err_recover"));
+	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+		"Recovering from RxPort error..."));
+
+	handle = nxgep->npi_handle;
+	portn = nxgep->mac.portnum;
+
+	/*
+	 * Making sure that error source is cleared if this is an injected
+	 * error.
+	 */
+	IPP_REG_WR(handle, portn, IPP_ECC_CTRL_REG, 0);
+
+	/* Disable RxMAC */
+	if (nxge_rx_mac_disable(nxgep) != NXGE_OK)
+		goto fail;
+
+	/* When recovering from IPP, RxDMA channel resets are not necessary */
+	/* Reset ZCP CFIFO */
+	NXGE_DEBUG_MSG((nxgep, IPP_CTL, "port%d Reset ZCP CFIFO...", portn));
+	if ((rs = npi_zcp_rest_cfifo_port(handle, portn)) != NPI_SUCCESS)
+		goto fail;
+
+	/*
+	 * Wait until ip read and write fifo pointers are equal
+	 */
+	(void) npi_ipp_get_dfifo_rd_ptr(handle, portn, &rd_ptr);
+	(void) npi_ipp_get_dfifo_wr_ptr(handle, portn, &wr_ptr);
+	try_count = 512;
+
+	while ((try_count > 0) && (rd_ptr != wr_ptr)) {
+		(void) npi_ipp_get_dfifo_rd_ptr(handle, portn, &rd_ptr);
+		(void) npi_ipp_get_dfifo_wr_ptr(handle, portn, &wr_ptr);
+		try_count--;
+	}
+
+	if (try_count == 0) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			" nxge_ipp_reset: port%d IPP stalled..."
+			" rd_fifo_ptr = 0x%x wr_fifo_ptr = 0x%x",
+			portn, rd_ptr, wr_ptr));
+		/*
+		 * This means the fatal error occurred on the first line of the
+		 * fifo. In this case, just reset the IPP without draining the
+		 * PFIFO.
+		 */
+	}
+
+	if ((nxgep->niu_type == NEPTUNE) || (nxgep->niu_type == NEPTUNE_2)) {
+		if (portn < 2)
+			dfifo_entries = IPP_P0_P1_DFIFO_ENTRIES;
+		else
+			dfifo_entries = IPP_P2_P3_DFIFO_ENTRIES;
+	} else if (nxgep->niu_type == N2_NIU) {
+		dfifo_entries = IPP_NIU_DFIFO_ENTRIES;
+	} else
+		goto fail;
+
+	/* Clean up DFIFO SRAM entries */
+	for (i = 0; i < dfifo_entries; i++) {
+		if ((rs = npi_ipp_write_dfifo(handle, portn,
+				i, 0, 0, 0, 0, 0)) != NPI_SUCCESS)
+			goto fail;
+		if ((rs = npi_ipp_read_dfifo(handle, portn, i,
+				&d0, &d1, &d2, &d3, &d4)) != NPI_SUCCESS)
+			goto fail;
+	}
+
+	/* Clear PFIFO DFIFO status bits */
+	if ((rs = npi_ipp_get_status(handle, portn, &istatus)) != NPI_SUCCESS)
+		goto fail;
+	if ((rs = npi_ipp_get_status(handle, portn, &istatus)) != NPI_SUCCESS)
+		goto fail;
+
+	/* Reset IPP */
+	NXGE_DEBUG_MSG((nxgep, IPP_CTL, "port%d Reset IPP...", portn));
+	if ((rs = npi_ipp_reset(handle, portn)) != NPI_SUCCESS)
+		goto fail;
+
+	NXGE_DEBUG_MSG((nxgep, IPP_CTL, "port%d Reset RxMAC...", portn));
+	if (nxge_rx_mac_reset(nxgep) != NXGE_OK)
+		goto fail;
+
+	NXGE_DEBUG_MSG((nxgep, IPP_CTL, "port%d Initialize RxMAC...", portn));
+	if ((status = nxge_rx_mac_init(nxgep)) != NXGE_OK)
+		goto fail;
+
+	NXGE_DEBUG_MSG((nxgep, IPP_CTL, "port%d Enable RxMAC...", portn));
+	if (nxge_rx_mac_enable(nxgep) != NXGE_OK)
+		goto fail;
+
+	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+		"Recovery Sucessful, RxPort Restored"));
+	NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_ipp_fatal_err_recover"));
+
+	return (NXGE_OK);
+fail:
+	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed"));
+	return (status | rs);
+}
+
+/* ARGSUSED */
+nxge_status_t
+nxge_ipp_eccue_valid_check(p_nxge_t nxgep, boolean_t *valid)
+{
+	npi_handle_t handle;
+	npi_status_t rs = NPI_SUCCESS;
+	uint8_t portn;
+	uint16_t rd_ptr;
+	uint16_t wr_ptr;
+	uint16_t curr_rd_ptr;
+	uint16_t curr_wr_ptr;
+	uint32_t stall_cnt;
+	uint32_t d0, d1, d2, d3, d4;
+
+	handle = nxgep->npi_handle;
+	portn = nxgep->mac.portnum;
+	*valid = B_TRUE;
+
+	if ((rs = npi_ipp_get_dfifo_rd_ptr(handle, portn, &rd_ptr))
+		!= NPI_SUCCESS)
+		goto fail;
+	if ((rs = npi_ipp_get_dfifo_rd_ptr(handle, portn, &wr_ptr))
+		!= NPI_SUCCESS)
+		goto fail;
+
+	if (rd_ptr == wr_ptr) {
+		cmn_err(CE_NOTE,
+			"nxge_ipp_eccue_valid_check: rd_ptr = %d wr_ptr = %d\n",
+			rd_ptr, wr_ptr);
+		*valid = B_FALSE;	/* IPP not stuck */
+	} else {
+		stall_cnt = 0;
+		while (stall_cnt < 16) {
+			if ((rs = npi_ipp_get_dfifo_rd_ptr(handle,
+					portn, &curr_rd_ptr)) != NPI_SUCCESS)
+				goto fail;
+			if ((rs = npi_ipp_get_dfifo_wr_ptr(handle,
+					portn, &curr_wr_ptr)) != NPI_SUCCESS)
+				goto fail;
+
+			if ((rd_ptr == curr_rd_ptr) && (wr_ptr == curr_wr_ptr))
+				stall_cnt++;
+			else {
+				*valid = B_FALSE;
+				break;
+			}
+		}
+
+		if (valid) {
+			/* futher check to see if ECC UE is valid */
+			if ((rs = npi_ipp_read_dfifo(handle, portn,
+					rd_ptr, &d0, &d1, &d2, &d3,
+					&d4)) != NPI_SUCCESS)
+				goto fail;
+			if ((d4 & 0x1) == 0)	/* Not the 1st line */
+				*valid = B_FALSE;
+		}
+	}
+	return (NXGE_OK);
+fail:
+	return (NXGE_ERROR | rs);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/uts/common/io/nxge/nxge_kstats.c	Mon Mar 19 19:37:22 2007 -0700
@@ -0,0 +1,2345 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident	"%Z%%M%	%I%	%E% SMI"
+
+#include <sys/nxge/nxge_impl.h>
+#include <inet/mi.h>
+
+#define	RDC_NAME_FORMAT1	"RDC Channel"
+#define	TDC_NAME_FORMAT1	"TDC Channel"
+#define	CH_NAME_FORMAT		" %d Stats"
+#define	TDC_NAME_FORMAT		"TDC Channel %d Stats"
+#define	RDC_NAME_FORMAT		"RDC Channel %d Stats"
+
+void nxge_mac_init_kstats(p_nxge_t, struct kstat *);
+void nxge_xmac_init_kstats(struct kstat *);
+void nxge_bmac_init_kstats(struct kstat *);
+
+/* ARGSUSED */
+void
+nxge_init_statsp(p_nxge_t nxgep)
+{
+	size_t stats_size;
+
+	NXGE_DEBUG_MSG((nxgep, KST_CTL, "==> nxge_init_statsp"));
+
+	stats_size = sizeof (nxge_stats_t);
+	nxgep->statsp = KMEM_ZALLOC(stats_size, KM_SLEEP);
+	nxgep->statsp->stats_size = stats_size;
+
+	NXGE_DEBUG_MSG((nxgep, KST_CTL, " <== nxge_init_statsp"));
+}
+
+typedef struct {
+	uint8_t index;
+	uint8_t type;
+	char *name;
+} nxge_kstat_index_t;
+
+typedef enum {
+	RDC_STAT_PACKETS = 0,
+	RDC_STAT_BYTES,
+	RDC_STAT_ERRORS,
+	RDC_STAT_DCF_ERR,
+	RDC_STAT_RCR_ACK_ERR,
+	RDC_STAT_RCR_DC_FIFOFLOW_ERR,
+	RDC_STAT_RCR_SHA_PAR_ERR,
+	RDC_STAT_RBR_PRE_PAR_ERR,
+	RDC_STAT_WRED_DROP,
+	RDC_STAT_RBR_PRE_EMTY,
+	RDC_STAT_RCR_SHADOW_FULL,
+	RDC_STAT_RBR_TMOUT,
+	RDC_STAT_RSP_CNT_ERR,
+	RDC_STAT_BYTE_EN_BUS,
+	RDC_STAT_RSP_DAT_ERR,
+	RDC_STAT_COMPL_L2_ERR,
+	RDC_STAT_COMPL_L4_CKSUM_ERR,
+	RDC_STAT_COMPL_ZCP_SOFT_ERR,
+	RDC_STAT_COMPL_FFLP_SOFT_ERR,
+	RDC_STAT_CONFIG_ERR,
+	RDC_STAT_RCRINCON,
+	RDC_STAT_RCRFULL,
+	RDC_STAT_RBR_EMPTY,
+	RDC_STAT_RBR_FULL,
+	RDC_STAT_RBRLOGPAGE,
+	RDC_STAT_CFIGLOGPAGE,
+	RDC_STAT_PORT_DROP_PKT,
+	RDC_STAT_RCRTO,
+	RDC_STAT_RCRTHRES,
+	RDC_STAT_MEX,
+	RDC_STAT_ID_MIS,
+	RDC_STAT_ZCP_EOP,
+	RDC_STAT_IPP_EOP,
+	RDC_STAT_END
+} nxge_rdc_stat_index_t;
+
+nxge_kstat_index_t nxge_rdc_stats[] = {
+	{RDC_STAT_PACKETS, KSTAT_DATA_UINT64, "rdc_packets"},
+	{RDC_STAT_BYTES, KSTAT_DATA_UINT64, "rdc_bytes"},
+	{RDC_STAT_ERRORS, KSTAT_DATA_ULONG, "rdc_errors"},
+	{RDC_STAT_DCF_ERR, KSTAT_DATA_ULONG, "rdc_dcf_err"},
+	{RDC_STAT_RCR_ACK_ERR, KSTAT_DATA_ULONG, "rdc_rcr_ack_err"},
+	{RDC_STAT_RCR_DC_FIFOFLOW_ERR, KSTAT_DATA_ULONG, "rdc_dc_fifoflow_err"},
+	{RDC_STAT_RCR_SHA_PAR_ERR, KSTAT_DATA_ULONG, "rdc_rcr_sha_par_err"},
+	{RDC_STAT_RBR_PRE_PAR_ERR, KSTAT_DATA_ULONG, "rdc_rbr_pre_par_err"},
+	{RDC_STAT_WRED_DROP, KSTAT_DATA_ULONG, "rdc_wred_drop"},
+	{RDC_STAT_RBR_PRE_EMTY, KSTAT_DATA_ULONG, "rdc_rbr_pre_empty"},
+	{RDC_STAT_RCR_SHADOW_FULL, KSTAT_DATA_ULONG, "rdc_rcr_shadow_full"},
+	{RDC_STAT_RBR_TMOUT, KSTAT_DATA_ULONG, "rdc_rbr_tmout"},
+	{RDC_STAT_RSP_CNT_ERR, KSTAT_DATA_ULONG, "rdc_rsp_cnt_err"},
+	{RDC_STAT_BYTE_EN_BUS, KSTAT_DATA_ULONG, "rdc_byte_en_bus"},
+	{RDC_STAT_RSP_DAT_ERR, KSTAT_DATA_ULONG, "rdc_rsp_dat_err"},
+	{RDC_STAT_COMPL_L2_ERR, KSTAT_DATA_ULONG, "rdc_compl_l2_err"},
+	{RDC_STAT_COMPL_L4_CKSUM_ERR, KSTAT_DATA_ULONG, "rdc_compl_l4_cksum"},
+	{RDC_STAT_COMPL_ZCP_SOFT_ERR, KSTAT_DATA_ULONG,
+		"rdc_compl_zcp_soft_err"},
+	{RDC_STAT_COMPL_FFLP_SOFT_ERR, KSTAT_DATA_ULONG,
+		"rdc_compl_fflp_soft_err"},
+	{RDC_STAT_CONFIG_ERR, KSTAT_DATA_ULONG, "rdc_config_err"},
+	{RDC_STAT_RCRINCON, KSTAT_DATA_ULONG, "rdc_rcrincon"},
+	{RDC_STAT_RCRFULL, KSTAT_DATA_ULONG, "rdc_rcrfull"},
+	{RDC_STAT_RBR_EMPTY, KSTAT_DATA_ULONG, "rdc_rbr_empty"},
+	{RDC_STAT_RBR_FULL, KSTAT_DATA_ULONG, "rdc_rbrfull"},
+	{RDC_STAT_RBRLOGPAGE, KSTAT_DATA_ULONG, "rdc_rbrlogpage"},
+	{RDC_STAT_CFIGLOGPAGE, KSTAT_DATA_ULONG, "rdc_cfiglogpage"},
+	{RDC_STAT_PORT_DROP_PKT, KSTAT_DATA_ULONG, "rdc_port_drop_pkt"},
+	{RDC_STAT_RCRTO, KSTAT_DATA_ULONG, "rdc_rcrto"},
+	{RDC_STAT_RCRTHRES, KSTAT_DATA_ULONG, "rdc_rcrthres"},
+	{RDC_STAT_MEX, KSTAT_DATA_ULONG, "rdc_mex"},
+	{RDC_STAT_ID_MIS, KSTAT_DATA_ULONG, "rdc_id_mismatch"},
+	{RDC_STAT_ZCP_EOP, KSTAT_DATA_ULONG, "rdc_zcp_eop"},
+	{RDC_STAT_IPP_EOP, KSTAT_DATA_ULONG, "rdc_ipp_eop"},
+	{RDC_STAT_END, NULL, NULL}
+};
+
+typedef enum {
+	RDC_SYS_STAT_PRE_PAR_ERR = 0,
+	RDC_SYS_STAT_SHA_PAR_ERR,
+	RDC_SYS_STAT_ID_MISMATCH,
+	RDC_SYS_STAT_IPP_EOP_ERR,
+	RDC_SYS_STAT_ZCP_EOP_ERR,
+	RDC_SYS_STAT_END
+} nxge_rdc_sys_stat_idx_t;
+
+nxge_kstat_index_t nxge_rdc_sys_stats[] = {
+	{RDC_SYS_STAT_PRE_PAR_ERR, KSTAT_DATA_UINT64, "rdc_pre_par_err"},
+	{RDC_SYS_STAT_SHA_PAR_ERR, KSTAT_DATA_UINT64, "rdc_sha_par_err"},
+	{RDC_SYS_STAT_ID_MISMATCH, KSTAT_DATA_UINT64, "rdc_stat_id_mismatch"},
+	{RDC_SYS_STAT_IPP_EOP_ERR, KSTAT_DATA_UINT64, "rdc_ipp_eop_err"},
+	{RDC_SYS_STAT_ZCP_EOP_ERR, KSTAT_DATA_UINT64, "rdc_zcp_eop_err"},
+	{RDC_SYS_STAT_END, NULL, NULL}
+};
+
+typedef enum {
+	TDC_STAT_PACKETS = 0,
+	TDC_STAT_BYTES,
+	TDC_STAT_ERRORS,
+	TDC_STAT_TX_INITS,
+	TDC_STAT_TX_NO_BUF,
+	TDC_STAT_MBOX_ERR,
+	TDC_STAT_PKT_SIZE_ERR,
+	TDC_STAT_TX_RING_OFLOW,
+	TDC_STAT_PREF_BUF_ECC_ERR,
+	TDC_STAT_NACK_PREF,
+	TDC_STAT_NACK_PKT_RD,
+	TDC_STAT_CONF_PART_ERR,
+	TDC_STAT_PKT_PRT_ERR,
+	TDC_STAT_RESET_FAIL,
+	TDC_STAT_TX_STARTS,
+	TDC_STAT_TX_NOCANPUT,
+	TDC_STAT_TX_MSGDUP_FAIL,
+	TDC_STAT_TX_ALLOCB_FAIL,
+	TDC_STAT_TX_NO_DESC,
+	TDC_STAT_TX_DMA_BIND_FAIL,
+	TDC_STAT_TX_UFLOW,
+	TDC_STAT_TX_HDR_PKTS,
+	TDC_STAT_TX_DDI_PKTS,
+	TDC_STAT_TX_DVMA_PKTS,
+	TDC_STAT_TX_MAX_PEND,
+	TDC_STAT_END
+} nxge_tdc_stats_index_t;
+
+nxge_kstat_index_t nxge_tdc_stats[] = {
+	{TDC_STAT_PACKETS, KSTAT_DATA_UINT64, "tdc_packets"},
+	{TDC_STAT_BYTES, KSTAT_DATA_UINT64, "tdc_bytes"},
+	{TDC_STAT_ERRORS, KSTAT_DATA_UINT64, "tdc_errors"},
+	{TDC_STAT_TX_INITS, KSTAT_DATA_ULONG, "tdc_tx_inits"},
+	{TDC_STAT_TX_NO_BUF, KSTAT_DATA_ULONG, "tdc_tx_no_buf"},
+	{TDC_STAT_MBOX_ERR, KSTAT_DATA_ULONG, "tdc_mbox_err"},
+	{TDC_STAT_PKT_SIZE_ERR, KSTAT_DATA_ULONG, "tdc_pkt_size_err"},
+	{TDC_STAT_TX_RING_OFLOW,
+		KSTAT_DATA_ULONG, "tdc_tx_ring_oflow"},
+	{TDC_STAT_PREF_BUF_ECC_ERR,
+		KSTAT_DATA_ULONG, "tdc_pref_buf_err_err"},
+	{TDC_STAT_NACK_PREF, KSTAT_DATA_ULONG, "tdc_nack_pref"},
+	{TDC_STAT_NACK_PKT_RD, KSTAT_DATA_ULONG, "tdc_nack_pkt_rd"},
+	{TDC_STAT_CONF_PART_ERR,
+		KSTAT_DATA_ULONG, "tdc_conf_part_err"},
+	{TDC_STAT_PKT_PRT_ERR, KSTAT_DATA_ULONG, "tdc_pkt_prt_err"},
+	{TDC_STAT_RESET_FAIL, KSTAT_DATA_ULONG, "tdc_reset_fail"},
+	{TDC_STAT_TX_STARTS, KSTAT_DATA_ULONG, "tdc_tx_starts"},
+	{TDC_STAT_TX_NOCANPUT, KSTAT_DATA_ULONG, "tdc_tx_nocanput"},
+	{TDC_STAT_TX_MSGDUP_FAIL, KSTAT_DATA_ULONG, "tdc_tx_msgdup_fail"},
+	{TDC_STAT_TX_ALLOCB_FAIL, KSTAT_DATA_ULONG, "tdc_tx_allocb_fail"},
+	{TDC_STAT_TX_NO_DESC, KSTAT_DATA_ULONG, "tdc_tx_no_desc"},
+	{TDC_STAT_TX_DMA_BIND_FAIL, KSTAT_DATA_ULONG, "tdc_tx_dma_bind_fail"},
+	{TDC_STAT_TX_UFLOW, KSTAT_DATA_ULONG, "tdc_tx_uflow"},
+	{TDC_STAT_TX_HDR_PKTS, KSTAT_DATA_ULONG, "tdc_tx_hdr_pkts"},
+	{TDC_STAT_TX_DDI_PKTS, KSTAT_DATA_ULONG, "tdc_tx_ddi_pkts"},
+	{TDC_STAT_TX_DVMA_PKTS, KSTAT_DATA_ULONG, "tdc_tx_dvma_pkts"},
+	{TDC_STAT_TX_MAX_PEND, KSTAT_DATA_ULONG, "tdc_tx_max_pend"},
+	{TDC_STAT_END, NULL, NULL}
+};
+
+/* IPP Statistics definitions */
+typedef enum {
+	IPP_STAT_EOP_MISS = 0,
+	IPP_STAT_SOP_MISS,
+	IPP_STAT_DFIFO_UE,
+	IPP_STAT_ECC_ERR,
+	IPP_STAT_PFIFO_OVER,
+	IPP_STAT_PFIFO_UND,
+	IPP_STAT_BAD_CS,
+	IPP_STAT_BAD_DIS,
+	IPP_STAT_CS_FAIL,
+	IPP_STAT_END
+} nxge_ipp_stat_index_t;
+
+nxge_kstat_index_t nxge_ipp_stats[] = {
+	{IPP_STAT_EOP_MISS, KSTAT_DATA_ULONG, "rxipp_eop_miss"},
+	{IPP_STAT_SOP_MISS, KSTAT_DATA_ULONG, "rxipp_sop_miss"},
+	{IPP_STAT_DFIFO_UE, KSTAT_DATA_ULONG, "rxipp_dfifo_ue"},
+	{IPP_STAT_ECC_ERR, KSTAT_DATA_ULONG, "rxipp_ecc_err"},
+	{IPP_STAT_PFIFO_OVER, KSTAT_DATA_ULONG, "rxipp_pfifo_over"},
+	{IPP_STAT_PFIFO_UND, KSTAT_DATA_ULONG, "rxipp_pfifo_und"},
+	{IPP_STAT_BAD_CS, KSTAT_DATA_ULONG, "rxipp_bad_cs"},
+	{IPP_STAT_BAD_DIS, KSTAT_DATA_ULONG, "rxipp_bad_dis"},
+	{IPP_STAT_CS_FAIL, KSTAT_DATA_ULONG, "rxipp_cs_fail"},
+	{IPP_STAT_END, NULL, NULL}
+};
+
+/* TXC Statistics definitions */
+typedef enum {
+	TXC_STAT_PKT_STUFFED = 0,
+	TXC_STAT_PKT_XMIT,
+	TXC_STAT_RO_CORRECT_ERR,
+	TXC_STAT_RO_UNCORRECT_ERR,
+	TXC_STAT_SF_CORRECT_ERR,
+	TXC_STAT_SF_UNCORRECT_ERR,
+	TXC_STAT_ADDRESS_FAILED,
+	TXC_STAT_DMA_FAILED,
+	TXC_STAT_LENGTH_FAILED,
+	TXC_STAT_PKT_ASSY_DEAD,
+	TXC_STAT_REORDER_ERR,
+	TXC_STAT_END
+} nxge_txc_stat_index_t;
+
+nxge_kstat_index_t nxge_txc_stats[] = {
+	{TXC_STAT_PKT_STUFFED, KSTAT_DATA_ULONG, "txc_pkt_stuffed"},
+	{TXC_STAT_PKT_XMIT, KSTAT_DATA_ULONG, "txc_pkt_xmit"},
+	{TXC_STAT_RO_CORRECT_ERR, KSTAT_DATA_ULONG, "txc_ro_correct_err"},
+	{TXC_STAT_RO_UNCORRECT_ERR, KSTAT_DATA_ULONG, "txc_ro_uncorrect_err"},
+	{TXC_STAT_SF_CORRECT_ERR, KSTAT_DATA_ULONG, "txc_sf_correct_err"},
+	{TXC_STAT_SF_UNCORRECT_ERR, KSTAT_DATA_ULONG, "txc_sf_uncorrect_err"},
+	{TXC_STAT_ADDRESS_FAILED, KSTAT_DATA_ULONG, "txc_address_failed"},
+	{TXC_STAT_DMA_FAILED, KSTAT_DATA_ULONG, "txc_dma_failed"},
+	{TXC_STAT_LENGTH_FAILED, KSTAT_DATA_ULONG, "txc_length_failed"},
+	{TXC_STAT_PKT_ASSY_DEAD, KSTAT_DATA_ULONG, "txc_pkt_assy_dead"},
+	{TXC_STAT_REORDER_ERR, KSTAT_DATA_ULONG, "txc_reorder_err"},
+	{TXC_STAT_END, NULL, NULL}
+};
+
+typedef enum {
+	XMAC_STAT_TX_FRAME_CNT = 0,
+	XMAC_STAT_TX_UNDERFLOW_ERR,
+	XMAC_STAT_TX_MAXPKTSIZE_ERR,
+	XMAC_STAT_TX_OVERFLOW_ERR,
+	XMAC_STAT_TX_FIFO_XFR_ERR,
+	XMAC_STAT_TX_BYTE_CNT,
+	XMAC_STAT_RX_FRAME_CNT,
+	XMAC_STAT_RX_UNDERFLOW_ERR,
+	XMAC_STAT_RX_OVERFLOW_ERR,
+	XMAC_STAT_RX_CRC_ERR_CNT,
+	XMAC_STAT_RX_LEN_ERR_CNT,
+	XMAC_STAT_RX_VIOL_ERR_CNT,
+	XMAC_STAT_RX_BYTE_CNT,
+	XMAC_STAT_RX_HIST1_CNT,
+	XMAC_STAT_RX_HIST2_CNT,
+	XMAC_STAT_RX_HIST3_CNT,
+	XMAC_STAT_RX_HIST4_CNT,
+	XMAC_STAT_RX_HIST5_CNT,
+	XMAC_STAT_RX_HIST6_CNT,
+	XMAC_STAT_RX_HIST7_CNT,
+	XMAC_STAT_RX_BROADCAST_CNT,
+	XMAC_STAT_RX_MULT_CNT,
+	XMAC_STAT_RX_FRAG_CNT,
+	XMAC_STAT_RX_FRAME_ALIGN_ERR_CNT,
+	XMAC_STAT_RX_LINKFAULT_ERR_CNT,
+	XMAC_STAT_RX_REMOTEFAULT_ERR,
+	XMAC_STAT_RX_LOCALFAULT_ERR,
+	XMAC_STAT_RX_PAUSE_CNT,
+	XMAC_STAT_TX_PAUSE_STATE,
+	XMAC_STAT_TX_NOPAUSE_STATE,
+	XMAC_STAT_XPCS_DESKEW_ERR_CNT,
+#ifdef	NXGE_DEBUG_SYMBOL_ERR
+	XMAC_STAT_XPCS_SYMBOL_L0_ERR_CNT,
+	XMAC_STAT_XPCS_SYMBOL_L1_ERR_CNT,
+	XMAC_STAT_XPCS_SYMBOL_L2_ERR_CNT,
+	XMAC_STAT_XPCS_SYMBOL_L3_ERR_CNT,
+#endif
+	XMAC_STAT_END
+} nxge_xmac_stat_index_t;
+
+nxge_kstat_index_t nxge_xmac_stats[] = {
+	{XMAC_STAT_TX_FRAME_CNT, KSTAT_DATA_ULONG, "txmac_frame_cnt"},
+	{XMAC_STAT_TX_UNDERFLOW_ERR, KSTAT_DATA_ULONG, "tmac_underflow_err"},
+	{XMAC_STAT_TX_MAXPKTSIZE_ERR, KSTAT_DATA_ULONG, "txmac_maxpktsize_err"},
+	{XMAC_STAT_TX_OVERFLOW_ERR, KSTAT_DATA_ULONG, "txmac_overflow_err"},
+	{XMAC_STAT_TX_FIFO_XFR_ERR, KSTAT_DATA_ULONG, "txmac_fifo_xfr_err"},
+	{XMAC_STAT_TX_BYTE_CNT, KSTAT_DATA_ULONG, "txmac_byte_cnt"},
+	{XMAC_STAT_RX_FRAME_CNT, KSTAT_DATA_ULONG, "rxmac_frame_cnt"},
+	{XMAC_STAT_RX_UNDERFLOW_ERR, KSTAT_DATA_ULONG, "rxmac_underflow_err"},
+	{XMAC_STAT_RX_OVERFLOW_ERR, KSTAT_DATA_ULONG, "rxmac_overflow_err"},
+	{XMAC_STAT_RX_CRC_ERR_CNT, KSTAT_DATA_ULONG, "rxmac_crc_err"},
+	{XMAC_STAT_RX_LEN_ERR_CNT, KSTAT_DATA_ULONG, "rxmac_length_err"},
+	{XMAC_STAT_RX_VIOL_ERR_CNT, KSTAT_DATA_ULONG, "rxmac_code_violations"},
+	{XMAC_STAT_RX_BYTE_CNT, KSTAT_DATA_ULONG, "rxmac_byte_cnt"},
+	{XMAC_STAT_RX_HIST1_CNT, KSTAT_DATA_ULONG, "rxmac_64_cnt"},
+	{XMAC_STAT_RX_HIST2_CNT, KSTAT_DATA_ULONG, "rxmac_65_127_cnt"},
+	{XMAC_STAT_RX_HIST3_CNT, KSTAT_DATA_ULONG, "rxmac_128_255_cnt"},
+	{XMAC_STAT_RX_HIST4_CNT, KSTAT_DATA_ULONG, "rxmac_256_511_cnt"},
+	{XMAC_STAT_RX_HIST5_CNT, KSTAT_DATA_ULONG, "rxmac_512_1023_cnt"},
+	{XMAC_STAT_RX_HIST6_CNT, KSTAT_DATA_ULONG, "rxmac_1024_1522_cnt"},
+	{XMAC_STAT_RX_HIST7_CNT, KSTAT_DATA_ULONG, "rxmac_jumbo_cnt"},
+	{XMAC_STAT_RX_BROADCAST_CNT, KSTAT_DATA_ULONG, "rxmac_broadcast_cnt"},
+	{XMAC_STAT_RX_MULT_CNT, KSTAT_DATA_ULONG, "rxmac_multicast_cnt"},
+	{XMAC_STAT_RX_FRAG_CNT, KSTAT_DATA_ULONG, "rxmac_fragment_cnt"},
+	{XMAC_STAT_RX_FRAME_ALIGN_ERR_CNT,
+		KSTAT_DATA_ULONG, "rxmac_alignment_err"},
+	{XMAC_STAT_RX_LINKFAULT_ERR_CNT,
+		KSTAT_DATA_ULONG, "rxmac_linkfault_errs"},
+	{XMAC_STAT_RX_REMOTEFAULT_ERR,
+		KSTAT_DATA_ULONG, "rxmac_remote_faults"},
+	{XMAC_STAT_RX_LOCALFAULT_ERR,
+		KSTAT_DATA_ULONG, "rxmac_local_faults"},
+	{XMAC_STAT_RX_PAUSE_CNT, KSTAT_DATA_ULONG, "rxmac_pause_cnt"},
+	{XMAC_STAT_TX_PAUSE_STATE, KSTAT_DATA_ULONG, "txmac_pause_state"},
+	{XMAC_STAT_TX_NOPAUSE_STATE, KSTAT_DATA_ULONG, "txmac_nopause_state"},
+	{XMAC_STAT_XPCS_DESKEW_ERR_CNT,
+		KSTAT_DATA_ULONG, "xpcs_deskew_err_cnt"},
+#ifdef	NXGE_DEBUG_SYMBOL_ERR
+	{XMAC_STAT_XPCS_SYMBOL_L0_ERR_CNT,
+		KSTAT_DATA_ULONG, "xpcs_ln0_symbol_err_cnt"},
+	{XMAC_STAT_XPCS_SYMBOL_L1_ERR_CNT,
+		KSTAT_DATA_ULONG, "xpcs_ln1_symbol_err_cnt"},
+	{XMAC_STAT_XPCS_SYMBOL_L2_ERR_CNT,
+		KSTAT_DATA_ULONG, "xpcs_ln2_symbol_err_cnt"},
+	{XMAC_STAT_XPCS_SYMBOL_L3_ERR_CNT,
+		KSTAT_DATA_ULONG, "xpcs_ln3_symbol_err_cnt"},
+#endif
+	{XMAC_STAT_END, NULL, NULL}
+};
+
+typedef enum {
+	BMAC_STAT_TX_FRAME_CNT = 0,
+	BMAC_STAT_TX_UNDERRUN_ERR,
+	BMAC_STAT_TX_MAX_PKT_ERR,
+	BMAC_STAT_TX_BYTE_CNT,
+	BMAC_STAT_RX_FRAME_CNT,
+	BMAC_STAT_RX_BYTE_CNT,
+	BMAC_STAT_RX_OVERFLOW_ERR,
+	BMAC_STAT_RX_ALIGN_ERR_CNT,
+	BMAC_STAT_RX_CRC_ERR_CNT,
+	BMAC_STAT_RX_LEN_ERR_CNT,
+	BMAC_STAT_RX_VIOL_ERR_CNT,
+	BMAC_STAT_RX_PAUSE_CNT,
+	BMAC_STAT_RX_PAUSE_STATE,
+	BMAC_STAT_RX_NOPAUSE_STATE,
+	BMAC_STAT_END
+} nxge_bmac_stat_index_t;
+
+nxge_kstat_index_t nxge_bmac_stats[] = {
+	{BMAC_STAT_TX_FRAME_CNT, KSTAT_DATA_ULONG, "txmac_frame_cnt"},
+	{BMAC_STAT_TX_UNDERRUN_ERR, KSTAT_DATA_ULONG, "txmac_underrun_err"},
+	{BMAC_STAT_TX_MAX_PKT_ERR, KSTAT_DATA_ULONG, "txmac_max_pkt_err"},
+	{BMAC_STAT_TX_BYTE_CNT, KSTAT_DATA_ULONG, "txmac_byte_cnt"},
+	{BMAC_STAT_RX_FRAME_CNT, KSTAT_DATA_ULONG, "rxmac_frame_cnt"},
+	{BMAC_STAT_RX_BYTE_CNT, KSTAT_DATA_ULONG, "rxmac_byte_cnt"},
+	{BMAC_STAT_RX_OVERFLOW_ERR, KSTAT_DATA_ULONG, "rxmac_overflow_err"},
+	{BMAC_STAT_RX_ALIGN_ERR_CNT, KSTAT_DATA_ULONG, "rxmac_align_err_cnt"},
+	{BMAC_STAT_RX_CRC_ERR_CNT, KSTAT_DATA_ULONG, "rxmac_crc_err_cnt"},
+	{BMAC_STAT_RX_LEN_ERR_CNT, KSTAT_DATA_ULONG, "rxmac_len_err_cnt"},
+	{BMAC_STAT_RX_VIOL_ERR_CNT, KSTAT_DATA_ULONG, "rxmac_viol_err_cnt"},
+	{BMAC_STAT_RX_PAUSE_CNT, KSTAT_DATA_ULONG, "rxmac_pause_cnt"},
+	{BMAC_STAT_RX_PAUSE_STATE, KSTAT_DATA_ULONG, "txmac_pause_state"},
+	{BMAC_STAT_RX_NOPAUSE_STATE, KSTAT_DATA_ULONG, "tx_nopause_state"},
+	{BMAC_STAT_END, NULL, NULL}
+};
+
+typedef enum {
+	ZCP_STAT_ERRORS,
+	ZCP_STAT_INITS,
+	ZCP_STAT_RRFIFO_UNDERRUN,
+	ZCP_STAT_RRFIFO_OVERRUN,
+	ZCP_STAT_RSPFIFO_UNCORR_ERR,
+	ZCP_STAT_BUFFER_OVERFLOW,
+	ZCP_STAT_STAT_TBL_PERR,
+	ZCP_STAT_DYN_TBL_PERR,
+	ZCP_STAT_BUF_TBL_PERR,
+	ZCP_STAT_TT_PROGRAM_ERR,
+	ZCP_STAT_RSP_TT_INDEX_ERR,
+	ZCP_STAT_SLV_TT_INDEX_ERR,
+	ZCP_STAT_ZCP_TT_INDEX_ERR,
+	ZCP_STAT_ZCP_ACCESS_FAIL,
+	ZCP_CFIFO_ECC,
+	ZCP_STAT_END
+} nxge_zcp_stat_index_t;
+
+nxge_kstat_index_t nxge_zcp_stats[] = {
+	{ZCP_STAT_ERRORS, KSTAT_DATA_ULONG, "zcp_erros"},
+	{ZCP_STAT_INITS, KSTAT_DATA_ULONG, "zcp_inits"},
+	{ZCP_STAT_RRFIFO_UNDERRUN, KSTAT_DATA_ULONG, "zcp_rrfifo_underrun"},
+	{ZCP_STAT_RRFIFO_OVERRUN, KSTAT_DATA_ULONG, "zcp_rrfifo_overrun"},
+	{ZCP_STAT_RSPFIFO_UNCORR_ERR, KSTAT_DATA_ULONG,
+	"zcp_rspfifo_uncorr_err"},
+	{ZCP_STAT_BUFFER_OVERFLOW, KSTAT_DATA_ULONG, "zcp_buffer_overflow"},
+	{ZCP_STAT_STAT_TBL_PERR, KSTAT_DATA_ULONG, "zcp_stat_tbl_perr"},
+	{ZCP_STAT_DYN_TBL_PERR, KSTAT_DATA_ULONG, "zcp_dyn_tbl_perr"},
+	{ZCP_STAT_BUF_TBL_PERR, KSTAT_DATA_ULONG, "zcp_buf_tbl_perr"},
+	{ZCP_STAT_TT_PROGRAM_ERR, KSTAT_DATA_ULONG, "zcp_tt_program_err"},
+	{ZCP_STAT_RSP_TT_INDEX_ERR, KSTAT_DATA_ULONG, "zcp_rsp_tt_index_err"},
+	{ZCP_STAT_SLV_TT_INDEX_ERR, KSTAT_DATA_ULONG, "zcp_slv_tt_index_err"},
+	{ZCP_STAT_ZCP_TT_INDEX_ERR, KSTAT_DATA_ULONG, "zcp_zcp_tt_index_err"},
+	{ZCP_STAT_ZCP_ACCESS_FAIL, KSTAT_DATA_ULONG, "zcp_access_fail"},
+	{ZCP_STAT_ZCP_ACCESS_FAIL, KSTAT_DATA_ULONG, "zcp_cfifo_ecc"},
+	{ZCP_STAT_END, NULL, NULL}
+};
+
+typedef enum {
+	FFLP_STAT_TCAM_PERR,
+	FFLP_STAT_TCAM_ECC_ERR,
+	FFLP_STAT_VLAN_PERR,
+	FFLP_STAT_HASH_LOOKUP_ERR,
+	FFLP_STAT_HASH_P0_PIO_ERR,
+	FFLP_STAT_HASH_P1_PIO_ERR,
+	FFLP_STAT_HASH_P2_PIO_ERR,
+	FFLP_STAT_HASH_P3_PIO_ERR,
+	FFLP_STAT_HASH_P4_PIO_ERR,
+	FFLP_STAT_HASH_P5_PIO_ERR,
+	FFLP_STAT_HASH_P6_PIO_ERR,
+	FFLP_STAT_HASH_P7_PIO_ERR,
+	FFLP_STAT_END
+} nxge_fflp_stat_index_t;
+
+nxge_kstat_index_t nxge_fflp_stats[] = {
+	{FFLP_STAT_TCAM_PERR, KSTAT_DATA_ULONG, "fflp_tcam_perr"},
+	{FFLP_STAT_TCAM_ECC_ERR, KSTAT_DATA_ULONG, "fflp_tcam_ecc_err"},
+	{FFLP_STAT_VLAN_PERR, KSTAT_DATA_ULONG, "fflp_vlan_perr"},
+	{FFLP_STAT_HASH_LOOKUP_ERR, KSTAT_DATA_ULONG, "fflp_hash_lookup_err"},
+	{FFLP_STAT_HASH_P0_PIO_ERR, KSTAT_DATA_ULONG, "fflp_hash_p0_pio_err"},
+	{FFLP_STAT_HASH_P1_PIO_ERR, KSTAT_DATA_ULONG, "fflp_hash_p1_pio_err"},
+	{FFLP_STAT_HASH_P2_PIO_ERR, KSTAT_DATA_ULONG, "fflp_hash_p2_pio_err"},
+	{FFLP_STAT_HASH_P3_PIO_ERR, KSTAT_DATA_ULONG, "fflp_hash_p3_pio_err"},
+	{FFLP_STAT_HASH_P4_PIO_ERR, KSTAT_DATA_ULONG, "fflp_hash_p4_pio_err"},
+	{FFLP_STAT_HASH_P5_PIO_ERR, KSTAT_DATA_ULONG, "fflp_hash_p5_pio_err"},
+	{FFLP_STAT_HASH_P6_PIO_ERR, KSTAT_DATA_ULONG, "fflp_hash_p6_pio_err"},
+	{FFLP_STAT_HASH_P7_PIO_ERR, KSTAT_DATA_ULONG, "fflp_hash_p7_pio_err"},
+	{FFLP_STAT_END, NULL, NULL}
+};
+
+typedef enum {
+	MMAC_MAX_ADDR,
+	MMAC_AVAIL_ADDR,
+	MMAC_ADDR_POOL1,
+	MMAC_ADDR_POOL2,
+	MMAC_ADDR_POOL3,
+	MMAC_ADDR_POOL4,
+	MMAC_ADDR_POOL5,
+	MMAC_ADDR_POOL6,
+	MMAC_ADDR_POOL7,
+	MMAC_ADDR_POOL8,
+	MMAC_ADDR_POOL9,
+	MMAC_ADDR_POOL10,
+	MMAC_ADDR_POOL11,
+	MMAC_ADDR_POOL12,
+	MMAC_ADDR_POOL13,
+	MMAC_ADDR_POOL14,
+	MMAC_ADDR_POOL15,
+	MMAC_ADDR_POOL16,
+	MMAC_STATS_END
+} nxge_mmac_stat_index_t;
+
+nxge_kstat_index_t nxge_mmac_stats[] = {
+	{MMAC_MAX_ADDR, KSTAT_DATA_UINT64, "max_mmac_addr"},
+	{MMAC_AVAIL_ADDR, KSTAT_DATA_UINT64, "avail_mmac_addr"},
+	{MMAC_ADDR_POOL1, KSTAT_DATA_UINT64, "mmac_addr_1"},
+	{MMAC_ADDR_POOL2, KSTAT_DATA_UINT64, "mmac_addr_2"},
+	{MMAC_ADDR_POOL3, KSTAT_DATA_UINT64, "mmac_addr_3"},
+	{MMAC_ADDR_POOL4, KSTAT_DATA_UINT64, "mmac_addr_4"},
+	{MMAC_ADDR_POOL5, KSTAT_DATA_UINT64, "mmac_addr_5"},
+	{MMAC_ADDR_POOL6, KSTAT_DATA_UINT64, "mmac_addr_6"},
+	{MMAC_ADDR_POOL7, KSTAT_DATA_UINT64, "mmac_addr_7"},
+	{MMAC_ADDR_POOL8, KSTAT_DATA_UINT64, "mmac_addr_8"},
+	{MMAC_ADDR_POOL9, KSTAT_DATA_UINT64, "mmac_addr_9"},
+	{MMAC_ADDR_POOL10, KSTAT_DATA_UINT64, "mmac_addr_10"},
+	{MMAC_ADDR_POOL11, KSTAT_DATA_UINT64, "mmac_addr_11"},
+	{MMAC_ADDR_POOL12, KSTAT_DATA_UINT64, "mmac_addr_12"},
+	{MMAC_ADDR_POOL13, KSTAT_DATA_UINT64, "mmac_addr_13"},
+	{MMAC_ADDR_POOL14, KSTAT_DATA_UINT64, "mmac_addr_14"},
+	{MMAC_ADDR_POOL15, KSTAT_DATA_UINT64, "mmac_addr_15"},
+	{MMAC_ADDR_POOL16, KSTAT_DATA_UINT64, "mmac_addr_16"},
+	{MMAC_STATS_END, NULL, NULL},
+};
+
+/* ARGSUSED */
+int
+nxge_tdc_stat_update(kstat_t *ksp, int rw)
+{
+	p_nxge_t nxgep;
+	p_nxge_tdc_kstat_t tdc_kstatsp;
+	p_nxge_tx_ring_stats_t statsp;
+	int channel;
+	char *ch_name, *end;
+
+	nxgep = (p_nxge_t)ksp->ks_private;
+	if (nxgep == NULL)
+		return (-1);
+
+	NXGE_DEBUG_MSG((nxgep, KST_CTL, "==> nxge_rxstat_update"));
+
+	ch_name = ksp->ks_name;
+	ch_name += strlen(TDC_NAME_FORMAT1);
+	channel = mi_strtol(ch_name, &end, 10);
+
+	tdc_kstatsp = (p_nxge_tdc_kstat_t)ksp->ks_data;
+	statsp = (p_nxge_tx_ring_stats_t)&nxgep->statsp->tdc_stats[channel];
+
+	NXGE_DEBUG_MSG((nxgep, KST_CTL,
+		"nxge_tdc_stat_update data $%p statsp $%p channel %d",
+		ksp->ks_data, statsp, channel));
+
+	if (rw == KSTAT_WRITE) {
+		statsp->opackets = tdc_kstatsp->opackets.value.ull;
+		statsp->obytes = tdc_kstatsp->obytes.value.ull;
+		statsp->oerrors = tdc_kstatsp->oerrors.value.ull;
+		statsp->mbox_err = tdc_kstatsp->mbox_err.value.ul;
+		statsp->pkt_size_err = tdc_kstatsp->pkt_size_err.value.ul;
+		statsp->tx_ring_oflow = tdc_kstatsp->tx_ring_oflow.value.ul;
+		statsp->pre_buf_par_err =
+			tdc_kstatsp->pref_buf_ecc_err.value.ul;
+		statsp->nack_pref = tdc_kstatsp->nack_pref.value.ul;
+		statsp->nack_pkt_rd = tdc_kstatsp->nack_pkt_rd.value.ul;
+		statsp->conf_part_err = tdc_kstatsp->conf_part_err.value.ul;
+		statsp->pkt_part_err = tdc_kstatsp->pkt_prt_err.value.ul;
+	} else {
+		tdc_kstatsp->opackets.value.ull = statsp->opackets;
+		tdc_kstatsp->obytes.value.ull = statsp->obytes;
+		tdc_kstatsp->oerrors.value.ull = statsp->oerrors;
+		tdc_kstatsp->tx_hdr_pkts.value.ull = statsp->tx_hdr_pkts;
+		tdc_kstatsp->tx_ddi_pkts.value.ull = statsp->tx_ddi_pkts;
+		tdc_kstatsp->tx_dvma_pkts.value.ull = statsp->tx_dvma_pkts;
+		tdc_kstatsp->tx_max_pend.value.ull = statsp->tx_max_pend;
+		tdc_kstatsp->mbox_err.value.ul = statsp->mbox_err;
+		tdc_kstatsp->pkt_size_err.value.ul = statsp->pkt_size_err;
+		tdc_kstatsp->tx_ring_oflow.value.ul = statsp->tx_ring_oflow;
+		tdc_kstatsp->pref_buf_ecc_err.value.ul =
+			statsp->pre_buf_par_err;
+		tdc_kstatsp->nack_pref.value.ul = statsp->nack_pref;
+		tdc_kstatsp->nack_pkt_rd.value.ul = statsp->nack_pkt_rd;
+		tdc_kstatsp->conf_part_err.value.ul = statsp->conf_part_err;
+		tdc_kstatsp->pkt_prt_err.value.ul = statsp->pkt_part_err;
+		tdc_kstatsp->tx_starts.value.ul = statsp->tx_starts;
+		tdc_kstatsp->tx_nocanput.value.ul = statsp->tx_nocanput;
+		tdc_kstatsp->tx_msgdup_fail.value.ul = statsp->tx_msgdup_fail;
+		tdc_kstatsp->tx_allocb_fail.value.ul = statsp->tx_allocb_fail;
+		tdc_kstatsp->tx_no_desc.value.ul = statsp->tx_no_desc;
+		tdc_kstatsp->tx_dma_bind_fail.value.ul =
+			statsp->tx_dma_bind_fail;
+	}
+	NXGE_DEBUG_MSG((nxgep, KST_CTL, " <== nxge_tdc_stat_update"));
+	return (0);
+}
+
+/* ARGSUSED */
+int
+nxge_rdc_stat_update(kstat_t *ksp, int rw)
+{
+	p_nxge_t nxgep;
+	p_nxge_rdc_kstat_t rdc_kstatsp;
+	p_nxge_rx_ring_stats_t statsp;
+	int channel;
+	char *ch_name, *end;
+
+	nxgep = (p_nxge_t)ksp->ks_private;
+	if (nxgep == NULL)
+		return (-1);
+
+	NXGE_DEBUG_MSG((nxgep, KST_CTL, "==> nxge_rdc_stat_update"));
+
+	ch_name = ksp->ks_name;
+	ch_name += strlen(RDC_NAME_FORMAT1);
+	channel = mi_strtol(ch_name, &end, 10);
+
+	rdc_kstatsp = (p_nxge_rdc_kstat_t)ksp->ks_data;
+	statsp = (p_nxge_rx_ring_stats_t)&nxgep->statsp->rdc_stats[channel];
+
+	NXGE_DEBUG_MSG((nxgep, KST_CTL,
+		"nxge_rdc_stat_update $%p statsp $%p channel %d",
+		ksp->ks_data, statsp, channel));
+
+	if (rw == KSTAT_WRITE) {
+		statsp->dcf_err = rdc_kstatsp->dcf_err.value.ul;
+		statsp->rcr_ack_err = rdc_kstatsp->rcr_ack_err.value.ul;
+		statsp->dc_fifo_err = rdc_kstatsp->dc_fifoflow_err.value.ul;
+		statsp->rcr_sha_par = rdc_kstatsp->rcr_sha_par_err.value.ul;
+		statsp->rbr_pre_par = rdc_kstatsp->rbr_pre_par_err.value.ul;
+		statsp->wred_drop = rdc_kstatsp->wred_drop.value.ul;
+		statsp->rbr_pre_empty = rdc_kstatsp->rbr_pre_emty.value.ul;
+		statsp->rcr_shadow_full = rdc_kstatsp->rcr_shadow_full.value.ul;
+		statsp->rx_rbr_tmout = rdc_kstatsp->rbr_tmout.value.ul;
+		statsp->rsp_cnt_err = rdc_kstatsp->rsp_cnt_err.value.ul;
+		statsp->byte_en_bus = rdc_kstatsp->byte_en_bus.value.ul;
+		statsp->rsp_dat_err = rdc_kstatsp->rsp_dat_err.value.ul;
+		statsp->l2_err = rdc_kstatsp->compl_l2_err.value.ul;
+		statsp->l4_cksum_err = rdc_kstatsp->compl_l4_cksum_err.value.ul;
+		statsp->fflp_soft_err =
+			rdc_kstatsp->compl_fflp_soft_err.value.ul;
+		statsp->zcp_soft_err = rdc_kstatsp->compl_zcp_soft_err.value.ul;
+		statsp->config_err = rdc_kstatsp->config_err.value.ul;
+		statsp->rcrincon = rdc_kstatsp->rcrincon.value.ul;
+		statsp->rcrfull = rdc_kstatsp->rcrfull.value.ul;
+		statsp->rbr_empty = rdc_kstatsp->rbr_empty.value.ul;
+		statsp->rbrfull = rdc_kstatsp->rbrfull.value.ul;
+		statsp->rbrlogpage = rdc_kstatsp->rbrlogpage.value.ul;
+		statsp->cfiglogpage = rdc_kstatsp->cfiglogpage.value.ul;
+	} else {
+		rdc_kstatsp->ipackets.value.ull = statsp->ipackets;
+		rdc_kstatsp->rbytes.value.ull = statsp->ibytes;
+		rdc_kstatsp->errors.value.ul = statsp->ierrors;
+		rdc_kstatsp->dcf_err.value.ul = statsp->dcf_err;
+		rdc_kstatsp->rcr_ack_err.value.ul = statsp->rcr_ack_err;
+		rdc_kstatsp->dc_fifoflow_err.value.ul = statsp->dc_fifo_err;
+		rdc_kstatsp->rcr_sha_par_err.value.ul = statsp->rcr_sha_par;
+		rdc_kstatsp->rbr_pre_par_err.value.ul = statsp->rbr_pre_par;
+		rdc_kstatsp->wred_drop.value.ul = statsp->wred_drop;
+		rdc_kstatsp->port_drop_pkt.value.ul = statsp->port_drop_pkt;
+		rdc_kstatsp->rbr_pre_emty.value.ul = statsp->rbr_pre_empty;
+		rdc_kstatsp->rcr_shadow_full.value.ul = statsp->rcr_shadow_full;
+		rdc_kstatsp->rbr_tmout.value.ul = statsp->rx_rbr_tmout;
+		rdc_kstatsp->rsp_cnt_err.value.ul = statsp->rsp_cnt_err;
+		rdc_kstatsp->byte_en_bus.value.ul = statsp->byte_en_bus;
+		rdc_kstatsp->rsp_dat_err.value.ul = statsp->rsp_dat_err;
+		rdc_kstatsp->compl_l2_err.value.ul = statsp->l2_err;
+		rdc_kstatsp->compl_l4_cksum_err.value.ul = statsp->l4_cksum_err;
+		rdc_kstatsp->compl_fflp_soft_err.value.ul =
+			statsp->fflp_soft_err;
+		rdc_kstatsp->compl_zcp_soft_err.value.ul = statsp->zcp_soft_err;
+		rdc_kstatsp->config_err.value.ul = statsp->config_err;
+		rdc_kstatsp->rcrincon.value.ul = statsp->rcrincon;
+		rdc_kstatsp->rcrfull.value.ul = statsp->rcrfull;
+		rdc_kstatsp->rbr_empty.value.ul = statsp->rbr_empty;
+		rdc_kstatsp->rbrfull.value.ul = statsp->rbrfull;
+		rdc_kstatsp->rbrlogpage.value.ul = statsp->rbrlogpage;
+		rdc_kstatsp->cfiglogpage.value.ul = statsp->cfiglogpage;
+	}
+
+	NXGE_DEBUG_MSG((nxgep, KST_CTL, " <== nxge_rdc_stat_update"));
+	return (0);
+}
+
+/* ARGSUSED */
+int
+nxge_rdc_sys_stat_update(kstat_t *ksp, int rw)
+{
+	p_nxge_t nxgep;
+	p_nxge_rdc_sys_kstat_t rdc_sys_kstatsp;
+	p_nxge_rdc_sys_stats_t statsp;
+
+	nxgep = (p_nxge_t)ksp->ks_private;
+	if (nxgep == NULL)
+		return (-1);
+
+	NXGE_DEBUG_MSG((nxgep, KST_CTL, "==> nxge_rdc_sys_stat_update"));
+
+	rdc_sys_kstatsp = (p_nxge_rdc_sys_kstat_t)ksp->ks_data;
+	statsp = (p_nxge_rdc_sys_stats_t)&nxgep->statsp->rdc_sys_stats;
+
+	NXGE_DEBUG_MSG((nxgep, KST_CTL, "nxge_rdc_sys_stat_update %llx",
+		ksp->ks_data));
+
+	if (rw == KSTAT_WRITE) {
+		statsp->id_mismatch = rdc_sys_kstatsp->id_mismatch.value.ul;
+		statsp->ipp_eop_err = rdc_sys_kstatsp->ipp_eop_err.value.ul;
+		statsp->zcp_eop_err = rdc_sys_kstatsp->zcp_eop_err.value.ul;
+	} else {
+		rdc_sys_kstatsp->id_mismatch.value.ul = statsp->id_mismatch;
+		rdc_sys_kstatsp->ipp_eop_err.value.ul = statsp->ipp_eop_err;
+		rdc_sys_kstatsp->zcp_eop_err.value.ul = statsp->zcp_eop_err;
+	}
+	NXGE_DEBUG_MSG((nxgep, KST_CTL, " <== nxge_rdc_sys_stat_update"));
+	return (0);
+}
+
+/* ARGSUSED */
+static int
+nxge_txc_stat_update(kstat_t *ksp, int rw)
+{
+	p_nxge_t nxgep;
+	p_nxge_txc_kstat_t txc_kstatsp;
+	p_nxge_txc_stats_t statsp;
+
+	nxgep = (p_nxge_t)ksp->ks_private;
+
+	if (nxgep == NULL)
+		return (-1);
+
+	NXGE_DEBUG_MSG((nxgep, KST_CTL, "==> nxge_txc_stat_update"));
+
+	txc_kstatsp = (p_nxge_txc_kstat_t)ksp->ks_data;
+	statsp = (p_nxge_txc_stats_t)&nxgep->statsp->txc_stats;
+
+	if (rw == KSTAT_WRITE) {
+		statsp->pkt_stuffed = txc_kstatsp->pkt_stuffed.value.ul;
+		statsp->pkt_xmit = txc_kstatsp->pkt_xmit.value.ul;
+		statsp->ro_correct_err = txc_kstatsp->ro_correct_err.value.ul;
+		statsp->ro_uncorrect_err =
+			txc_kstatsp->ro_uncorrect_err.value.ul;
+		statsp->sf_correct_err = txc_kstatsp->sf_correct_err.value.ul;
+		statsp->sf_uncorrect_err =
+			txc_kstatsp->sf_uncorrect_err.value.ul;
+		statsp->address_failed = txc_kstatsp->address_failed.value.ul;
+		statsp->dma_failed = txc_kstatsp->dma_failed.value.ul;
+		statsp->length_failed = txc_kstatsp->length_failed.value.ul;
+		statsp->pkt_assy_dead = txc_kstatsp->pkt_assy_dead.value.ul;
+		statsp->reorder_err = txc_kstatsp->reorder_err.value.ul;
+	} else {
+		txc_kstatsp->pkt_stuffed.value.ul = statsp->pkt_stuffed;
+		txc_kstatsp->pkt_xmit.value.ul = statsp->pkt_xmit;
+		txc_kstatsp->ro_correct_err.value.ul = statsp->ro_correct_err;
+		txc_kstatsp->ro_uncorrect_err.value.ul =
+			statsp->ro_uncorrect_err;
+		txc_kstatsp->sf_correct_err.value.ul = statsp->sf_correct_err;
+		txc_kstatsp->sf_uncorrect_err.value.ul =
+			statsp->sf_uncorrect_err;
+		txc_kstatsp->address_failed.value.ul = statsp->address_failed;
+		txc_kstatsp->dma_failed.value.ul = statsp->dma_failed;
+		txc_kstatsp->length_failed.value.ul = statsp->length_failed;
+		txc_kstatsp->pkt_assy_dead.value.ul = statsp->pkt_assy_dead;
+		txc_kstatsp->reorder_err.value.ul = statsp->reorder_err;
+	}
+	NXGE_DEBUG_MSG((nxgep, KST_CTL, "<== nxge_txc_stat_update"));
+	return (0);
+}
+
+/* ARGSUSED */
+int
+nxge_ipp_stat_update(kstat_t *ksp, int rw)
+{
+	p_nxge_t nxgep;
+	p_nxge_ipp_kstat_t ipp_kstatsp;
+	p_nxge_ipp_stats_t statsp;
+
+	nxgep = (p_nxge_t)ksp->ks_private;
+	if (nxgep == NULL)
+		return (-1);
+
+	NXGE_DEBUG_MSG((nxgep, KST_CTL, "==> nxge_ipp_stat_update"));
+
+	ipp_kstatsp = (p_nxge_ipp_kstat_t)ksp->ks_data;
+	statsp = (p_nxge_ipp_stats_t)&nxgep->statsp->ipp_stats;
+
+	if (rw == KSTAT_WRITE) {
+		statsp->eop_miss = ipp_kstatsp->eop_miss.value.ul;
+		statsp->sop_miss = ipp_kstatsp->sop_miss.value.ul;
+		statsp->dfifo_ue = ipp_kstatsp->dfifo_ue.value.ul;
+		statsp->ecc_err_cnt = ipp_kstatsp->ecc_err_cnt.value.ul;
+		statsp->pfifo_over = ipp_kstatsp->pfifo_over.value.ul;
+		statsp->pfifo_und = ipp_kstatsp->pfifo_und.value.ul;
+		statsp->bad_cs_cnt = ipp_kstatsp->bad_cs_cnt.value.ul;
+		statsp->pkt_dis_cnt = ipp_kstatsp->pkt_dis_cnt.value.ul;
+		statsp->bad_cs_cnt = ipp_kstatsp->cs_fail.value.ul;
+	} else {
+		ipp_kstatsp->eop_miss.value.ul = statsp->eop_miss;
+		ipp_kstatsp->sop_miss.value.ul = statsp->sop_miss;
+		ipp_kstatsp->dfifo_ue.value.ul = statsp->dfifo_ue;
+		ipp_kstatsp->ecc_err_cnt.value.ul = statsp->ecc_err_cnt;
+		ipp_kstatsp->pfifo_over.value.ul = statsp->pfifo_over;
+		ipp_kstatsp->pfifo_und.value.ul = statsp->pfifo_und;
+		ipp_kstatsp->bad_cs_cnt.value.ul = statsp->bad_cs_cnt;
+		ipp_kstatsp->pkt_dis_cnt.value.ul = statsp->pkt_dis_cnt;
+		ipp_kstatsp->cs_fail.value.ul = statsp->bad_cs_cnt;
+	}
+	NXGE_DEBUG_MSG((nxgep, KST_CTL, "<== nxge_ipp_stat_update"));
+	return (0);
+}
+
+/* ARGSUSED */
+int
+nxge_xmac_stat_update(kstat_t *ksp, int rw)
+{
+	p_nxge_t nxgep;
+	p_nxge_xmac_kstat_t xmac_kstatsp;
+	p_nxge_xmac_stats_t statsp;
+
+	nxgep = (p_nxge_t)ksp->ks_private;
+	if (nxgep == NULL)
+		return (-1);
+
+	NXGE_DEBUG_MSG((nxgep, KST_CTL, "==> nxge_xmac_stat_update"));
+
+	xmac_kstatsp = (p_nxge_xmac_kstat_t)ksp->ks_data;
+	statsp = (p_nxge_xmac_stats_t)&nxgep->statsp->xmac_stats;
+
+	if (rw == KSTAT_WRITE) {
+		statsp->tx_frame_cnt = xmac_kstatsp->tx_frame_cnt.value.ul;
+		statsp->tx_underflow_err =
+			xmac_kstatsp->tx_underflow_err.value.ul;
+		statsp->tx_maxpktsize_err =
+			xmac_kstatsp->tx_maxpktsize_err.value.ul;
+		statsp->tx_overflow_err =
+			xmac_kstatsp->tx_overflow_err.value.ul;
+		statsp->tx_fifo_xfr_err =
+			xmac_kstatsp->tx_fifo_xfr_err.value.ul;
+		statsp->tx_byte_cnt = xmac_kstatsp->tx_byte_cnt.value.ul;
+		statsp->rx_underflow_err =
+			xmac_kstatsp->rx_underflow_err.value.ul;
+		statsp->rx_overflow_err =
+			xmac_kstatsp->rx_overflow_err.value.ul;
+		statsp->rx_crc_err_cnt = xmac_kstatsp->rx_crc_err_cnt.value.ul;
+		statsp->rx_len_err_cnt = xmac_kstatsp->rx_len_err_cnt.value.ul;
+		statsp->rx_viol_err_cnt =
+			xmac_kstatsp->rx_viol_err_cnt.value.ul;
+		statsp->rx_byte_cnt = xmac_kstatsp->rx_byte_cnt.value.ul;
+		statsp->rx_hist1_cnt = xmac_kstatsp->rx_hist1_cnt.value.ul;
+		statsp->rx_hist2_cnt = xmac_kstatsp->rx_hist2_cnt.value.ul;
+		statsp->rx_hist3_cnt = xmac_kstatsp->rx_hist3_cnt.value.ul;
+		statsp->rx_hist4_cnt = xmac_kstatsp->rx_hist4_cnt.value.ul;
+		statsp->rx_hist5_cnt = xmac_kstatsp->rx_hist5_cnt.value.ul;
+		statsp->rx_hist6_cnt = xmac_kstatsp->rx_hist6_cnt.value.ul;
+		statsp->rx_mult_cnt = xmac_kstatsp->rx_mult_cnt.value.ul;
+		statsp->rx_frag_cnt = xmac_kstatsp->rx_frag_cnt.value.ul;
+		statsp->rx_frame_align_err_cnt =
+			xmac_kstatsp->rx_frame_align_err_cnt.value.ul;
+		statsp->rx_linkfault_err_cnt =
+			xmac_kstatsp->rx_linkfault_err_cnt.value.ul;
+		statsp->rx_localfault_err =
+			xmac_kstatsp->rx_local_fault_err_cnt.value.ul;
+		statsp->rx_remotefault_err =
+			xmac_kstatsp->rx_remote_fault_err_cnt.value.ul;
+		statsp->xpcs_deskew_err_cnt =
+			xmac_kstatsp->xpcs_deskew_err_cnt.value.ul;
+#ifdef	NXGE_DEBUG_SYMBOL_ERR
+		statsp->xpcs_ln0_symbol_err_cnt =
+			xmac_kstatsp->xpcs_ln0_symbol_err_cnt.value.ul;
+		statsp->xpcs_ln1_symbol_err_cnt =
+			xmac_kstatsp->xpcs_ln1_symbol_err_cnt.value.ul;
+		statsp->xpcs_ln2_symbol_err_cnt =
+			xmac_kstatsp->xpcs_ln2_symbol_err_cnt.value.ul;
+		statsp->xpcs_ln3_symbol_err_cnt =
+			xmac_kstatsp->xpcs_ln3_symbol_err_cnt.value.ul;
+#endif
+	} else {
+		xmac_kstatsp->tx_frame_cnt.value.ul = statsp->tx_frame_cnt;
+		xmac_kstatsp->tx_underflow_err.value.ul =
+			statsp->tx_underflow_err;
+		xmac_kstatsp->tx_maxpktsize_err.value.ul =
+			statsp->tx_maxpktsize_err;
+		xmac_kstatsp->tx_overflow_err.value.ul =
+			statsp->tx_overflow_err;
+		xmac_kstatsp->tx_fifo_xfr_err.value.ul =
+			statsp->tx_fifo_xfr_err;
+		xmac_kstatsp->tx_byte_cnt.value.ul = statsp->tx_byte_cnt;
+		xmac_kstatsp->rx_underflow_err.value.ul =
+			statsp->rx_underflow_err;
+		xmac_kstatsp->rx_overflow_err.value.ul =
+			statsp->rx_overflow_err;
+		xmac_kstatsp->rx_crc_err_cnt.value.ul = statsp->rx_crc_err_cnt;
+		xmac_kstatsp->rx_len_err_cnt.value.ul = statsp->rx_len_err_cnt;
+		xmac_kstatsp->rx_viol_err_cnt.value.ul =
+			statsp->rx_viol_err_cnt;
+		xmac_kstatsp->rx_byte_cnt.value.ul = statsp->rx_byte_cnt;
+		xmac_kstatsp->rx_hist1_cnt.value.ul = statsp->rx_hist1_cnt;
+		xmac_kstatsp->rx_hist2_cnt.value.ul = statsp->rx_hist2_cnt;
+		xmac_kstatsp->rx_hist3_cnt.value.ul = statsp->rx_hist3_cnt;
+		xmac_kstatsp->rx_hist4_cnt.value.ul = statsp->rx_hist4_cnt;
+		xmac_kstatsp->rx_hist5_cnt.value.ul = statsp->rx_hist5_cnt;
+		xmac_kstatsp->rx_hist6_cnt.value.ul = statsp->rx_hist6_cnt;
+		xmac_kstatsp->rx_mult_cnt.value.ul = statsp->rx_mult_cnt;
+		xmac_kstatsp->rx_frag_cnt.value.ul = statsp->rx_frag_cnt;
+		xmac_kstatsp->rx_frame_align_err_cnt.value.ul =
+			statsp->rx_frame_align_err_cnt;
+		xmac_kstatsp->rx_linkfault_err_cnt.value.ul =
+			statsp->rx_linkfault_err_cnt;
+		xmac_kstatsp->rx_local_fault_err_cnt.value.ul =
+			statsp->rx_localfault_err;
+		xmac_kstatsp->rx_remote_fault_err_cnt.value.ul =
+			statsp->rx_remotefault_err;
+		xmac_kstatsp->xpcs_deskew_err_cnt.value.ul =
+			statsp->xpcs_deskew_err_cnt;
+#ifdef	NXGE_DEBUG_SYMBOL_ERR
+		xmac_kstatsp->xpcs_ln0_symbol_err_cnt.value.ul =
+			statsp->xpcs_ln0_symbol_err_cnt;
+		xmac_kstatsp->xpcs_ln1_symbol_err_cnt.value.ul =
+			statsp->xpcs_ln1_symbol_err_cnt;
+		xmac_kstatsp->xpcs_ln2_symbol_err_cnt.value.ul =
+			statsp->xpcs_ln2_symbol_err_cnt;
+		xmac_kstatsp->xpcs_ln3_symbol_err_cnt.value.ul =
+			statsp->xpcs_ln3_symbol_err_cnt;
+#endif
+	}
+	NXGE_DEBUG_MSG((nxgep, KST_CTL, "<== nxge_xmac_stat_update"));
+	return (0);
+}
+
+/* ARGSUSED */
+int
+nxge_bmac_stat_update(kstat_t *ksp, int rw)
+{
+	p_nxge_t nxgep;
+	p_nxge_bmac_kstat_t bmac_kstatsp;
+	p_nxge_bmac_stats_t statsp;
+
+	nxgep = (p_nxge_t)ksp->ks_private;
+	if (nxgep == NULL)
+		return (-1);
+
+	NXGE_DEBUG_MSG((nxgep, KST_CTL, "==> nxge_bmac_stat_update"));
+
+	bmac_kstatsp = (p_nxge_bmac_kstat_t)ksp->ks_data;
+	statsp = (p_nxge_bmac_stats_t)&nxgep->statsp->bmac_stats;
+
+	if (rw == KSTAT_WRITE) {
+		statsp->tx_frame_cnt = bmac_kstatsp->tx_frame_cnt.value.ul;
+		statsp->tx_underrun_err =
+			bmac_kstatsp->tx_underrun_err.value.ul;
+		statsp->tx_max_pkt_err = bmac_kstatsp->tx_max_pkt_err.value.ul;
+		statsp->tx_byte_cnt = bmac_kstatsp->tx_byte_cnt.value.ul;
+		statsp->rx_frame_cnt = bmac_kstatsp->rx_frame_cnt.value.ul;
+		statsp->rx_byte_cnt = bmac_kstatsp->rx_byte_cnt.value.ul;
+		statsp->rx_overflow_err =
+			bmac_kstatsp->rx_overflow_err.value.ul;
+		statsp->rx_align_err_cnt =
+			bmac_kstatsp->rx_align_err_cnt.value.ul;
+		statsp->rx_crc_err_cnt = bmac_kstatsp->rx_crc_err_cnt.value.ul;
+		statsp->rx_len_err_cnt = bmac_kstatsp->rx_len_err_cnt.value.ul;
+		statsp->rx_viol_err_cnt =
+			bmac_kstatsp->rx_viol_err_cnt.value.ul;
+	} else {
+		bmac_kstatsp->tx_frame_cnt.value.ul = statsp->tx_frame_cnt;
+		bmac_kstatsp->tx_underrun_err.value.ul =
+			statsp->tx_underrun_err;
+		bmac_kstatsp->tx_max_pkt_err.value.ul = statsp->tx_max_pkt_err;
+		bmac_kstatsp->tx_byte_cnt.value.ul = statsp->tx_byte_cnt;
+		bmac_kstatsp->rx_frame_cnt.value.ul = statsp->rx_frame_cnt;
+		bmac_kstatsp->rx_byte_cnt.value.ul = statsp->rx_byte_cnt;
+		bmac_kstatsp->rx_overflow_err.value.ul =
+			statsp->rx_overflow_err;
+		bmac_kstatsp->rx_align_err_cnt.value.ul =
+			statsp->rx_align_err_cnt;
+		bmac_kstatsp->rx_crc_err_cnt.value.ul = statsp->rx_crc_err_cnt;
+		bmac_kstatsp->rx_len_err_cnt.value.ul = statsp->rx_len_err_cnt;
+		bmac_kstatsp->rx_viol_err_cnt.value.ul =
+			statsp->rx_viol_err_cnt;
+	}
+	NXGE_DEBUG_MSG((nxgep, KST_CTL, "<== nxge_bmac_stat_update"));
+	return (0);
+}
+
+/* ARGSUSED */
+int
+nxge_zcp_stat_update(kstat_t *ksp, int rw)
+{
+	p_nxge_t nxgep;
+	p_nxge_zcp_kstat_t zcp_kstatsp;
+	p_nxge_zcp_stats_t statsp;
+
+	nxgep = (p_nxge_t)ksp->ks_private;
+	if (nxgep == NULL)
+		return (-1);
+
+	NXGE_DEBUG_MSG((nxgep, KST_CTL, "==> nxge_zcp_stat_update"));
+
+	zcp_kstatsp = (p_nxge_zcp_kstat_t)ksp->ks_data;
+	statsp = (p_nxge_zcp_stats_t)&nxgep->statsp->zcp_stats;
+
+	if (rw == KSTAT_WRITE) {
+		statsp->rrfifo_underrun = zcp_kstatsp->rrfifo_underrun.value.ul;
+		statsp->rrfifo_overrun = zcp_kstatsp->rrfifo_overrun.value.ul;
+		statsp->rspfifo_uncorr_err =
+			zcp_kstatsp->rspfifo_uncorr_err.value.ul;
+		statsp->buffer_overflow = zcp_kstatsp->buffer_overflow.value.ul;
+		statsp->stat_tbl_perr = zcp_kstatsp->stat_tbl_perr.value.ul;
+		statsp->dyn_tbl_perr = zcp_kstatsp->dyn_tbl_perr.value.ul;
+		statsp->buf_tbl_perr = zcp_kstatsp->buf_tbl_perr.value.ul;
+		statsp->tt_program_err = zcp_kstatsp->tt_program_err.value.ul;
+		statsp->rsp_tt_index_err =
+			zcp_kstatsp->rsp_tt_index_err.value.ul;
+		statsp->slv_tt_index_err =
+			zcp_kstatsp->slv_tt_index_err.value.ul;
+		statsp->zcp_tt_index_err =
+			zcp_kstatsp->zcp_tt_index_err.value.ul;
+		statsp->cfifo_ecc = zcp_kstatsp->cfifo_ecc.value.ul;
+	} else {
+		zcp_kstatsp->rrfifo_underrun.value.ul = statsp->rrfifo_underrun;
+		zcp_kstatsp->rrfifo_overrun.value.ul = statsp->rrfifo_overrun;
+		zcp_kstatsp->rspfifo_uncorr_err.value.ul =
+			statsp->rspfifo_uncorr_err;
+		zcp_kstatsp->buffer_overflow.value.ul =
+			statsp->buffer_overflow;
+		zcp_kstatsp->stat_tbl_perr.value.ul = statsp->stat_tbl_perr;
+		zcp_kstatsp->dyn_tbl_perr.value.ul = statsp->dyn_tbl_perr;
+		zcp_kstatsp->buf_tbl_perr.value.ul = statsp->buf_tbl_perr;
+		zcp_kstatsp->tt_program_err.value.ul = statsp->tt_program_err;
+		zcp_kstatsp->rsp_tt_index_err.value.ul =
+			statsp->rsp_tt_index_err;
+		zcp_kstatsp->slv_tt_index_err.value.ul =
+			statsp->slv_tt_index_err;
+		zcp_kstatsp->zcp_tt_index_err.value.ul =
+			statsp->zcp_tt_index_err;
+		zcp_kstatsp->cfifo_ecc.value.ul = statsp->cfifo_ecc;
+	}
+	NXGE_DEBUG_MSG((nxgep, KST_CTL, "<== nxge_zcp_stat_update"));
+	return (0);
+}
+
+/* ARGSUSED */
+int
+nxge_fflp_stat_update(kstat_t *ksp, int rw)
+{
+	p_nxge_t nxgep;
+	p_nxge_fflp_kstat_t fflp_kstatsp;
+	p_nxge_fflp_stats_t statsp;
+	int ldc_grp;
+
+	nxgep = (p_nxge_t)ksp->ks_private;
+	if (nxgep == NULL)
+		return (-1);
+
+	NXGE_DEBUG_MSG((nxgep, KST_CTL, "==> nxge_fflp_stat_update"));
+
+	fflp_kstatsp = (p_nxge_fflp_kstat_t)ksp->ks_data;
+	statsp = (p_nxge_fflp_stats_t)&nxgep->statsp->fflp_stats;
+
+	if (rw == KSTAT_WRITE) {
+		statsp->tcam_parity_err = fflp_kstatsp->fflp_tcam_perr.value.ul;
+		statsp->tcam_ecc_err = fflp_kstatsp->fflp_tcam_ecc_err.value.ul;
+		statsp->vlan_parity_err = fflp_kstatsp->fflp_vlan_perr.value.ul;
+		statsp->hash_lookup_err =
+			fflp_kstatsp->fflp_hasht_lookup_err.value.ul;
+		for (ldc_grp = 0; ldc_grp < MAX_PARTITION; ldc_grp++) {
+			statsp->hash_pio_err[ldc_grp] =
+				fflp_kstatsp->fflp_hasht_data_err[ldc_grp].
+				value.ul;
+		}
+	} else {
+		fflp_kstatsp->fflp_tcam_perr.value.ul =
+			fflp_kstatsp->fflp_tcam_perr.value.ul;
+		fflp_kstatsp->fflp_tcam_ecc_err.value.ul = statsp->tcam_ecc_err;
+		fflp_kstatsp->fflp_vlan_perr.value.ul = statsp->vlan_parity_err;
+		fflp_kstatsp->fflp_hasht_lookup_err.value.ul =
+			statsp->hash_lookup_err;
+		for (ldc_grp = 0; ldc_grp < MAX_PARTITION; ldc_grp++) {
+			fflp_kstatsp->fflp_hasht_data_err[ldc_grp].value.ul =
+				statsp->hash_pio_err[ldc_grp];
+		}
+	}
+	NXGE_DEBUG_MSG((nxgep, KST_CTL, "<== nxge_fflp_stat_update"));
+	return (0);
+}
+
+/* ARGSUSED */
+static uint64_t
+nxge_mac_octet_to_u64(struct ether_addr addr)
+{
+	int i;
+	uint64_t addr64 = 0;
+
+	for (i = ETHERADDRL - 1; i >= 0; i--) {
+		addr64 <<= 8;
+		addr64 |= addr.ether_addr_octet[i];
+	}
+	return (addr64);
+}
+
+/* ARGSUSED */
+int
+nxge_mmac_stat_update(kstat_t *ksp, int rw)
+{
+	p_nxge_t nxgep;
+	p_nxge_mmac_kstat_t mmac_kstatsp;
+	p_nxge_mmac_stats_t statsp;
+
+	nxgep = (p_nxge_t)ksp->ks_private;
+	if (nxgep == NULL)
+		return (-1);
+
+	NXGE_DEBUG_MSG((nxgep, KST_CTL, "==> nxge_mmac_stat_update"));
+
+	mmac_kstatsp = (p_nxge_mmac_kstat_t)ksp->ks_data;
+	statsp = (p_nxge_mmac_stats_t)&nxgep->statsp->mmac_stats;
+
+	if (rw == KSTAT_WRITE) {
+		cmn_err(CE_WARN, "Can not write mmac stats");
+	} else {
+		mmac_kstatsp->mmac_max_addr_cnt.value.ul =
+			statsp->mmac_max_cnt;
+		mmac_kstatsp->mmac_avail_addr_cnt.value.ul =
+			statsp->mmac_avail_cnt;
+		mmac_kstatsp->mmac_addr1.value.ul =
+			nxge_mac_octet_to_u64(statsp->mmac_avail_pool[0]);
+		mmac_kstatsp->mmac_addr2.value.ul =
+			nxge_mac_octet_to_u64(statsp->mmac_avail_pool[1]);
+		mmac_kstatsp->mmac_addr3.value.ul =
+			nxge_mac_octet_to_u64(statsp->mmac_avail_pool[2]);
+		mmac_kstatsp->mmac_addr4.value.ul =
+			nxge_mac_octet_to_u64(statsp->mmac_avail_pool[3]);
+		mmac_kstatsp->mmac_addr5.value.ul =
+			nxge_mac_octet_to_u64(statsp->mmac_avail_pool[4]);
+		mmac_kstatsp->mmac_addr6.value.ul =
+			nxge_mac_octet_to_u64(statsp->mmac_avail_pool[5]);
+		mmac_kstatsp->mmac_addr7.value.ul =
+			nxge_mac_octet_to_u64(statsp->mmac_avail_pool[6]);
+		mmac_kstatsp->mmac_addr8.value.ul =
+			nxge_mac_octet_to_u64(statsp->mmac_avail_pool[7]);
+		mmac_kstatsp->mmac_addr9.value.ul =
+			nxge_mac_octet_to_u64(statsp->mmac_avail_pool[8]);
+		mmac_kstatsp->mmac_addr10.value.ul =
+			nxge_mac_octet_to_u64(statsp->mmac_avail_pool[9]);
+		mmac_kstatsp->mmac_addr11.value.ul =
+			nxge_mac_octet_to_u64(statsp->mmac_avail_pool[10]);
+		mmac_kstatsp->mmac_addr12.value.ul =
+			nxge_mac_octet_to_u64(statsp->mmac_avail_pool[11]);
+		mmac_kstatsp->mmac_addr13.value.ul =
+			nxge_mac_octet_to_u64(statsp->mmac_avail_pool[12]);
+		mmac_kstatsp->mmac_addr14.value.ul =
+			nxge_mac_octet_to_u64(statsp->mmac_avail_pool[13]);
+		mmac_kstatsp->mmac_addr15.value.ul =
+			nxge_mac_octet_to_u64(statsp->mmac_avail_pool[14]);
+		mmac_kstatsp->mmac_addr16.value.ul =
+			nxge_mac_octet_to_u64(statsp->mmac_avail_pool[15]);
+	}
+	NXGE_DEBUG_MSG((nxgep, KST_CTL, "<== nxge_mmac_stat_update"));
+	return (0);
+}
+
+/* ARGSUSED */
+static kstat_t *
+nxge_setup_local_kstat(p_nxge_t nxgep, int instance, char *name,
+	const nxge_kstat_index_t *ksip, size_t count,
+	int (*update) (kstat_t *, int))
+{
+	kstat_t *ksp;
+	kstat_named_t *knp;
+	int i;
+
+	ksp = kstat_create(NXGE_DRIVER_NAME, instance, name, "net",
+		KSTAT_TYPE_NAMED, count, 0);
+	if (ksp == NULL)
+		return (NULL);
+
+	ksp->ks_private = (void *)nxgep;
+	ksp->ks_update = update;
+	knp = ksp->ks_data;
+
+	for (i = 0; ksip[i].name != NULL; i++) {
+		kstat_named_init(&knp[i], ksip[i].name, ksip[i].type);
+	}
+
+	kstat_install(ksp);
+	return (ksp);
+}
+
+/* ARGSUSED */
+void
+nxge_setup_kstats(p_nxge_t nxgep)
+{
+	struct kstat *ksp;
+	p_nxge_port_kstat_t nxgekp;
+	size_t nxge_kstat_sz;
+	char stat_name[64];
+	char mmac_name[64];
+	int i;
+
+	NXGE_DEBUG_MSG((nxgep, KST_CTL, "==> nxge_setup_kstats"));
+
+
+	/* Setup RDC statistics */
+	for (i = 0; i < nxgep->nrdc; i++) {
+		(void) sprintf(stat_name, "%s" CH_NAME_FORMAT,
+			RDC_NAME_FORMAT1, i);
+		nxgep->statsp->rdc_ksp[i] = nxge_setup_local_kstat(nxgep,
+			nxgep->instance, stat_name,
+			&nxge_rdc_stats[0], RDC_STAT_END, nxge_rdc_stat_update);
+#ifdef	NXGE_DEBUG_ERROR
+		if (nxgep->statsp->rdc_ksp[i] == NULL)
+			NXGE_DEBUG_MSG((nxgep, KST_CTL,
+				"kstat_create failed for rdc channel %d", i));
+#endif
+	}
+
+	/* Setup RDC System statistics */
+	nxgep->statsp->rdc_sys_ksp = nxge_setup_local_kstat(nxgep,
+		nxgep->instance,
+		"RDC System Stats",
+		&nxge_rdc_sys_stats[0],
+		RDC_SYS_STAT_END,
+		nxge_rdc_sys_stat_update);
+
+	/* Setup IPP statistics */
+	nxgep->statsp->ipp_ksp = nxge_setup_local_kstat(nxgep,
+		nxgep->instance,
+		"IPP Stats",
+		&nxge_ipp_stats[0],
+		IPP_STAT_END,
+		nxge_ipp_stat_update);
+#ifdef	NXGE_DEBUG_ERROR
+	if (nxgep->istatsp->pp_ksp == NULL)
+		NXGE_DEBUG_MSG((nxgep, KST_CTL, "kstat_create failed for ipp"));
+#endif
+
+	/* Setup TDC statistics */
+	for (i = 0; i < nxgep->ntdc; i++) {
+		(void) sprintf(stat_name, "%s" CH_NAME_FORMAT,
+			TDC_NAME_FORMAT1, i);
+		nxgep->statsp->tdc_ksp[i] = nxge_setup_local_kstat(nxgep,
+			nxgep->instance,
+			stat_name,
+			&nxge_tdc_stats[0],
+			TDC_STAT_END,
+			nxge_tdc_stat_update);
+#ifdef	NXGE_DEBUG_ERROR
+		if (nxgep->statsp->tdc_ksp[i] == NULL) {
+			NXGE_DEBUG_MSG((nxgep, KST_CTL,
+				"kstat_create failed for tdc channel %d", i));
+		}
+#endif
+	}
+
+	/* Setup TXC statistics */
+	nxgep->statsp->txc_ksp = nxge_setup_local_kstat(nxgep,
+		nxgep->instance, "TXC Stats", &nxge_txc_stats[0],
+		TXC_STAT_END, nxge_txc_stat_update);
+#ifdef	NXGE_DEBUG_ERROR
+	if (nxgep->statsp->txc_ksp == NULL)
+		NXGE_DEBUG_MSG((nxgep, KST_CTL, "kstat_create failed for txc"));
+#endif
+
+	/* Setup ZCP statistics */
+	nxgep->statsp->zcp_ksp = nxge_setup_local_kstat(nxgep,
+		nxgep->instance, "ZCP Stats", &nxge_zcp_stats[0],
+		ZCP_STAT_END, nxge_zcp_stat_update);
+#ifdef	NXGE_DEBUG_ERROR
+	if (nxgep->statsp->zcp_ksp == NULL)
+		NXGE_DEBUG_MSG((nxgep, KST_CTL, "kstat_create failed for zcp"));
+#endif
+
+	/* Setup FFLP statistics */
+	nxgep->statsp->fflp_ksp[0] = nxge_setup_local_kstat(nxgep,
+		nxgep->instance, "FFLP Stats", &nxge_fflp_stats[0],
+		FFLP_STAT_END, nxge_fflp_stat_update);
+
+#ifdef	NXGE_DEBUG_ERROR
+	if (nxgep->statsp->fflp_ksp == NULL)
+		NXGE_DEBUG_MSG((nxgep, KST_CTL,
+			"kstat_create failed for fflp"));
+#endif
+
+	(void) sprintf(mmac_name, "MMAC Stats%d", nxgep->instance);
+	nxgep->statsp->mmac_ksp = nxge_setup_local_kstat(nxgep,
+		nxgep->instance, "MMAC Stats", &nxge_mmac_stats[0],
+		MMAC_STATS_END, nxge_mmac_stat_update);
+
+	nxge_kstat_sz = sizeof (nxge_port_kstat_t) +
+		sizeof (nxge_mac_kstat_t) - sizeof (kstat_named_t);
+
+	if ((ksp = kstat_create(NXGE_DRIVER_NAME, nxgep->instance,
+			"Port Stats", "net", KSTAT_TYPE_NAMED,
+			nxge_kstat_sz / sizeof (kstat_named_t), 0)) == NULL) {
+		NXGE_DEBUG_MSG((nxgep, KST_CTL, "kstat_create failed"));
+		NXGE_DEBUG_MSG((nxgep, KST_CTL, "<== nxge_setup_kstats"));
+		return;
+	}
+
+	/*
+	 * kstats
+	 */
+	nxgekp = (p_nxge_port_kstat_t)ksp->ks_data;
+
+	/*
+	 * transceiver state informations.
+	 */
+	kstat_named_init(&nxgekp->xcvr_inits, "xcvr_inits",
+		KSTAT_DATA_ULONG);
+	kstat_named_init(&nxgekp->xcvr_inuse, "xcvr_inuse",
+		KSTAT_DATA_ULONG);
+	kstat_named_init(&nxgekp->xcvr_addr, "xcvr_addr",
+		KSTAT_DATA_ULONG);
+	kstat_named_init(&nxgekp->xcvr_id, "xcvr_id",
+		KSTAT_DATA_ULONG);
+	kstat_named_init(&nxgekp->cap_autoneg, "cap_autoneg",
+		KSTAT_DATA_ULONG);
+	kstat_named_init(&nxgekp->cap_10gfdx, "cap_10gfdx",
+		KSTAT_DATA_ULONG);
+	kstat_named_init(&nxgekp->cap_10ghdx, "cap_10ghdx",
+		KSTAT_DATA_ULONG);
+	kstat_named_init(&nxgekp->cap_1000fdx, "cap_1000fdx",
+		KSTAT_DATA_ULONG);
+	kstat_named_init(&nxgekp->cap_1000hdx, "cap_1000hdx",
+		KSTAT_DATA_ULONG);
+	kstat_named_init(&nxgekp->cap_100T4, "cap_100T4",
+		KSTAT_DATA_ULONG);
+	kstat_named_init(&nxgekp->cap_100fdx, "cap_100fdx",
+		KSTAT_DATA_ULONG);
+	kstat_named_init(&nxgekp->cap_100hdx, "cap_100hdx",
+		KSTAT_DATA_ULONG);
+	kstat_named_init(&nxgekp->cap_10fdx, "cap_10fdx",
+		KSTAT_DATA_ULONG);
+	kstat_named_init(&nxgekp->cap_10hdx, "cap_10hdx",
+		KSTAT_DATA_ULONG);
+	kstat_named_init(&nxgekp->cap_asmpause, "cap_asmpause",
+		KSTAT_DATA_ULONG);
+	kstat_named_init(&nxgekp->cap_pause, "cap_pause",
+		KSTAT_DATA_ULONG);
+
+	/*
+	 * Link partner capabilities.
+	 */
+	kstat_named_init(&nxgekp->lp_cap_autoneg, "lp_cap_autoneg",
+		KSTAT_DATA_ULONG);
+	kstat_named_init(&nxgekp->lp_cap_10gfdx, "lp_cap_10gfdx",
+		KSTAT_DATA_ULONG);
+	kstat_named_init(&nxgekp->lp_cap_10ghdx, "lp_cap_10ghdx",
+		KSTAT_DATA_ULONG);
+	kstat_named_init(&nxgekp->lp_cap_1000fdx, "lp_cap_1000fdx",
+		KSTAT_DATA_ULONG);
+	kstat_named_init(&nxgekp->lp_cap_1000hdx, "lp_cap_1000hdx",
+		KSTAT_DATA_ULONG);
+	kstat_named_init(&nxgekp->lp_cap_100T4, "lp_cap_100T4",
+		KSTAT_DATA_ULONG);
+	kstat_named_init(&nxgekp->lp_cap_100fdx, "lp_cap_100fdx",
+		KSTAT_DATA_ULONG);
+	kstat_named_init(&nxgekp->lp_cap_100hdx, "lp_cap_100hdx",
+		KSTAT_DATA_ULONG);
+	kstat_named_init(&nxgekp->lp_cap_10fdx, "lp_cap_10fdx",
+		KSTAT_DATA_ULONG);
+	kstat_named_init(&nxgekp->lp_cap_10hdx, "lp_cap_10hdx",
+		KSTAT_DATA_ULONG);
+	kstat_named_init(&nxgekp->lp_cap_asmpause, "lp_cap_asmpause",
+		KSTAT_DATA_ULONG);
+	kstat_named_init(&nxgekp->lp_cap_pause, "lp_cap_pause",
+		KSTAT_DATA_ULONG);
+	/*
+	 * Shared link setup.
+	 */
+	kstat_named_init(&nxgekp->link_T4, "link_T4",
+		KSTAT_DATA_ULONG);
+	kstat_named_init(&nxgekp->link_speed, "link_speed",
+		KSTAT_DATA_ULONG);
+	kstat_named_init(&nxgekp->link_duplex, "link_duplex",
+		KSTAT_DATA_CHAR);
+	kstat_named_init(&nxgekp->link_asmpause, "link_asmpause",
+		KSTAT_DATA_ULONG);
+	kstat_named_init(&nxgekp->link_pause, "link_pause",
+		KSTAT_DATA_ULONG);
+	kstat_named_init(&nxgekp->link_up, "link_up",
+		KSTAT_DATA_ULONG);
+
+	/*
+	 * Let the user know the MTU currently in use by the physical MAC
+	 * port.
+	 */
+	kstat_named_init(&nxgekp->mac_mtu, "mac_mtu",
+		KSTAT_DATA_ULONG);
+
+	/*
+	 * Loopback statistics.
+	 */
+	kstat_named_init(&nxgekp->lb_mode, "lb_mode",
+		KSTAT_DATA_ULONG);
+
+	/*
+	 * This tells the user whether the driver is in QOS mode or not.
+	 */
+	kstat_named_init(&nxgekp->qos_mode, "qos_mode",
+		KSTAT_DATA_ULONG);
+
+	/*
+	 * This tells whether the instance is trunked or not
+	 */
+	kstat_named_init(&nxgekp->trunk_mode, "trunk_mode",
+		KSTAT_DATA_ULONG);
+
+#if defined MULTI_DATA_TX || defined MULTI_DATA_TXV2
+	kstat_named_init(&nxgekp->mdt_reqs, "mdt_reqs",
+		KSTAT_DATA_ULONG);
+	kstat_named_init(&nxgekp->mdt_hdr_bufs, "mdt_hdr_bufs",
+		KSTAT_DATA_ULONG);
+	kstat_named_init(&nxgekp->mdt_pld_bufs, "mdt_pld_bufs",
+		KSTAT_DATA_ULONG);
+	kstat_named_init(&nxgekp->mdt_pkts, "mdt_pkts",
+		KSTAT_DATA_ULONG);
+	kstat_named_init(&nxgekp->mdt_hdrs, "mdt_hdrs",
+		KSTAT_DATA_ULONG);
+	kstat_named_init(&nxgekp->mdt_plds, "mdt_plds",
+		KSTAT_DATA_ULONG);
+	kstat_named_init(&nxgekp->mdt_hdr_bind_fail, "mdt_hdr_bind_fail",
+		KSTAT_DATA_ULONG);
+	kstat_named_init(&nxgekp->mdt_pld_bind_fail, "mdt_pld_bind_fail",
+		KSTAT_DATA_ULONG);
+#endif
+#ifdef ACNXGEPT_JUMBO
+	kstat_named_init(&nxgekp->tx_jumbo_pkts, "tx_jumbo_pkts",
+		KSTAT_DATA_ULONG);
+#endif
+
+	/*
+	 * Rx Statistics.
+	 */
+#ifdef ACNXGEPT_JUMBO
+	kstat_named_init(&nxgekp->rx_jumbo_pkts, "rx_jumbo_pkts",
+		KSTAT_DATA_ULONG);
+#endif
+	/* General MAC statistics */
+	kstat_named_init(&nxgekp->ifspeed, "ifspeed",
+		KSTAT_DATA_UINT64);
+	kstat_named_init(&nxgekp->promisc, "promisc",
+		KSTAT_DATA_CHAR);
+	kstat_named_init(&nxgekp->rev_id, "rev_id",
+		KSTAT_DATA_ULONG);
+
+	ksp->ks_update = nxge_port_kstat_update;
+	ksp->ks_private = (void *) nxgep;
+	if (nxgep->mac.porttype == PORT_TYPE_XMAC)
+		nxge_xmac_init_kstats(ksp);
+	else
+		nxge_bmac_init_kstats(ksp);
+	kstat_install(ksp);
+	nxgep->statsp->port_ksp = ksp;
+	NXGE_DEBUG_MSG((nxgep, KST_CTL, "<== nxge_setup_kstats"));
+}
+
+/* ARGSUSED */
+void
+nxge_xmac_init_kstats(struct kstat *ksp)
+{
+	p_nxge_xmac_kstat_t nxgekp;
+
+	nxgekp = (p_nxge_xmac_kstat_t)ksp->ks_data;
+
+	/*
+	 * Transmit MAC statistics.
+	 */
+	kstat_named_init(&nxgekp->tx_frame_cnt, "txmac_frame_cnt",
+		KSTAT_DATA_ULONG);
+	kstat_named_init(&nxgekp->tx_underflow_err, "txmac_underflow_err",
+		KSTAT_DATA_ULONG);
+	kstat_named_init(&nxgekp->tx_overflow_err, "txmac_overflow_err",
+		KSTAT_DATA_ULONG);
+	kstat_named_init(&nxgekp->tx_maxpktsize_err, "txmac_maxpktsize_err",
+		KSTAT_DATA_ULONG);
+	kstat_named_init(&nxgekp->tx_fifo_xfr_err, "txmac_fifo_xfr_err",
+		KSTAT_DATA_ULONG);
+	kstat_named_init(&nxgekp->tx_byte_cnt, "txmac_byte_cnt",
+		KSTAT_DATA_ULONG);
+
+	/* Receive MAC statistics */
+	kstat_named_init(&nxgekp->rx_overflow_err, "rxmac_overflow_err",
+		KSTAT_DATA_ULONG);
+	kstat_named_init(&nxgekp->rx_underflow_err, "rxmac_underflow_err",
+		KSTAT_DATA_ULONG);
+	kstat_named_init(&nxgekp->rx_crc_err_cnt, "rxmac_crc_err",
+		KSTAT_DATA_ULONG);
+	kstat_named_init(&nxgekp->rx_len_err_cnt, "rxmac_length_err",
+		KSTAT_DATA_ULONG);
+	kstat_named_init(&nxgekp->rx_viol_err_cnt, "rxmac_code_violations",
+		KSTAT_DATA_ULONG);
+	kstat_named_init(&nxgekp->rx_byte_cnt, "rxmac_byte_cnt",
+		KSTAT_DATA_ULONG);
+	kstat_named_init(&nxgekp->rx_frame_align_err_cnt,
+		"rxmac_alignment_err",
+		KSTAT_DATA_ULONG);
+	kstat_named_init(&nxgekp->rx_hist1_cnt, "rxmac_64_cnt",
+		KSTAT_DATA_ULONG);
+	kstat_named_init(&nxgekp->rx_hist2_cnt, "rxmac_65_127_cnt",
+		KSTAT_DATA_ULONG);
+	kstat_named_init(&nxgekp->rx_hist3_cnt, "rxmac_128_255_cnt",
+		KSTAT_DATA_ULONG);
+	kstat_named_init(&nxgekp->rx_hist4_cnt, "rxmac_256_511_cnt",
+		KSTAT_DATA_ULONG);
+	kstat_named_init(&nxgekp->rx_hist5_cnt, "rxmac_512_1023_cnt",
+		KSTAT_DATA_ULONG);
+	kstat_named_init(&nxgekp->rx_hist6_cnt, "rxmac_1024_1522_cnt",
+		KSTAT_DATA_ULONG);
+	kstat_named_init(&nxgekp->rx_broadcast_cnt, "rxmac_broadcast_cnt",
+		KSTAT_DATA_ULONG);
+	kstat_named_init(&nxgekp->rx_mult_cnt, "rxmac_multicast_cnt",
+		KSTAT_DATA_ULONG);
+	kstat_named_init(&nxgekp->rx_frag_cnt, "rxmac_fragment_cnt",
+		KSTAT_DATA_ULONG);
+	kstat_named_init(&nxgekp->rx_linkfault_err_cnt, "rxmac_linkfault_errs",
+		KSTAT_DATA_ULONG);
+	kstat_named_init(&nxgekp->rx_remote_fault_err_cnt,
+		"rxmac_remote_faults",
+		KSTAT_DATA_ULONG);
+	kstat_named_init(&nxgekp->rx_local_fault_err_cnt, "rxmac_local_faults",
+		KSTAT_DATA_ULONG);
+
+	/* XPCS statistics */
+
+	kstat_named_init(&nxgekp->xpcs_deskew_err_cnt, "xpcs_deskew_err_cnt",
+		KSTAT_DATA_ULONG);
+#ifdef	NXGE_DEBUG_SYMBOL_ERR
+	kstat_named_init(&nxgekp->xpcs_ln0_symbol_err_cnt,
+		"xpcs_ln0_symbol_err_cnt",
+		KSTAT_DATA_ULONG);
+	kstat_named_init(&nxgekp->xpcs_ln1_symbol_err_cnt,
+		"xpcs_ln1_symbol_err_cnt",
+		KSTAT_DATA_ULONG);
+	kstat_named_init(&nxgekp->xpcs_ln2_symbol_err_cnt,
+		"xpcs_ln2_symbol_err_cnt",
+		KSTAT_DATA_ULONG);
+	kstat_named_init(&nxgekp->xpcs_ln3_symbol_err_cnt,
+		"xpcs_ln3_symbol_err_cnt",
+		KSTAT_DATA_ULONG);
+#endif
+}
+
+/* ARGSUSED */
+void
+nxge_bmac_init_kstats(struct kstat *ksp)
+{
+	p_nxge_bmac_kstat_t nxgekp;
+
+	nxgekp = (p_nxge_bmac_kstat_t)ksp->ks_data;
+
+	/*
+	 * Transmit MAC statistics.
+	 */
+	kstat_named_init(&nxgekp->tx_frame_cnt, "txmac_frame_cnt",
+		KSTAT_DATA_ULONG);
+	kstat_named_init(&nxgekp->tx_underrun_err, "txmac_underflow_err",
+		KSTAT_DATA_ULONG);
+	kstat_named_init(&nxgekp->tx_max_pkt_err, "txmac_maxpktsize_err",
+		KSTAT_DATA_ULONG);
+	kstat_named_init(&nxgekp->tx_byte_cnt, "txmac_byte_cnt",
+		KSTAT_DATA_ULONG);
+
+	/* Receive MAC statistics */
+	kstat_named_init(&nxgekp->rx_overflow_err, "rxmac_overflow_err",
+		KSTAT_DATA_ULONG);
+	kstat_named_init(&nxgekp->rx_crc_err_cnt, "rxmac_crc_err",
+		KSTAT_DATA_ULONG);
+	kstat_named_init(&nxgekp->rx_len_err_cnt, "rxmac_length_err",
+		KSTAT_DATA_ULONG);
+	kstat_named_init(&nxgekp->rx_viol_err_cnt, "rxmac_code_violations",
+		KSTAT_DATA_ULONG);
+	kstat_named_init(&nxgekp->rx_byte_cnt, "rxmac_byte_cnt",
+		KSTAT_DATA_ULONG);
+	kstat_named_init(&nxgekp->rx_align_err_cnt, "rxmac_alignment_err",
+		KSTAT_DATA_ULONG);
+	kstat_named_init(&nxgekp->rx_frame_cnt, "rxmac_frame_cnt",
+		KSTAT_DATA_ULONG);
+}
+
+/* ARGSUSED */
+void
+nxge_mac_init_kstats(p_nxge_t nxgep, struct kstat *ksp)
+{
+	p_nxge_mac_kstat_t nxgekp;
+
+	nxgekp = (p_nxge_mac_kstat_t)ksp->ks_data;
+
+	/*
+	 * Transmit MAC statistics.
+	 */
+	kstat_named_init(&nxgekp->tx_frame_cnt, "txmac_frame_cnt",
+		KSTAT_DATA_ULONG);
+	kstat_named_init(&nxgekp->tx_underflow_err, "txmac_underflow_err",
+		KSTAT_DATA_ULONG);
+	kstat_named_init(&nxgekp->tx_overflow_err, "txmac_overflow_err",
+		KSTAT_DATA_ULONG);
+	kstat_named_init(&nxgekp->tx_maxpktsize_err, "txmac_maxpktsize_err",
+		KSTAT_DATA_ULONG);
+	kstat_named_init(&nxgekp->tx_fifo_xfr_err, "txmac_fifo_xfr_err",
+		KSTAT_DATA_ULONG);
+	kstat_named_init(&nxgekp->tx_byte_cnt, "txmac_byte_cnt",
+		KSTAT_DATA_ULONG);
+
+	/*
+	 * Receive MAC statistics
+	 */
+	kstat_named_init(&nxgekp->rx_overflow_err, "rxmac_overflow_err",
+		KSTAT_DATA_ULONG);
+	kstat_named_init(&nxgekp->rx_underflow_err, "rxmac_underflow_err",
+		KSTAT_DATA_ULONG);
+	kstat_named_init(&nxgekp->rx_crc_err_cnt, "rxmac_crc_err",
+		KSTAT_DATA_ULONG);
+	kstat_named_init(&nxgekp->rx_len_err_cnt, "rxmac_length_err",
+		KSTAT_DATA_ULONG);
+	kstat_named_init(&nxgekp->rx_viol_err_cnt, "rxmac_code_violations",
+		KSTAT_DATA_ULONG);
+	kstat_named_init(&nxgekp->rx_byte_cnt, "rxmac_byte_cnt",
+		KSTAT_DATA_ULONG);
+	kstat_named_init(&nxgekp->rx_frame_align_err_cnt,
+		"rxmac_alignment_err",
+		KSTAT_DATA_ULONG);
+	if (nxgep->mac.porttype == PORT_TYPE_XMAC) {
+		kstat_named_init(&nxgekp->rx_hist1_cnt, "rxmac_64_cnt",
+			KSTAT_DATA_ULONG);
+		kstat_named_init(&nxgekp->rx_hist2_cnt, "rxmac_65_127_cnt",
+			KSTAT_DATA_ULONG);
+		kstat_named_init(&nxgekp->rx_hist3_cnt, "rxmac_128_255_cnt",
+			KSTAT_DATA_ULONG);
+		kstat_named_init(&nxgekp->rx_hist4_cnt, "rxmac_256_511_cnt",
+			KSTAT_DATA_ULONG);
+		kstat_named_init(&nxgekp->rx_hist5_cnt, "rxmac_512_1023_cnt",
+			KSTAT_DATA_ULONG);
+		kstat_named_init(&nxgekp->rx_hist6_cnt, "rxmac_1024_1522_cnt",
+			KSTAT_DATA_ULONG);
+		kstat_named_init(&nxgekp->rx_broadcast_cnt,
+			"rxmac_broadcast_cnt",
+			KSTAT_DATA_ULONG);
+		kstat_named_init(&nxgekp->rx_mult_cnt, "rxmac_multicast_cnt",
+			KSTAT_DATA_ULONG);
+		kstat_named_init(&nxgekp->rx_frag_cnt, "rxmac_fragment_cnt",
+			KSTAT_DATA_ULONG);
+		kstat_named_init(&nxgekp->rx_linkfault_err_cnt,
+			"rxmac_linkfault_errs",
+			KSTAT_DATA_ULONG);
+		kstat_named_init(&nxgekp->rx_remote_fault_err_cnt,
+			"rxmac_remote_faults",
+			KSTAT_DATA_ULONG);
+		kstat_named_init(&nxgekp->rx_local_fault_err_cnt,
+			"rxmac_local_faults",
+			KSTAT_DATA_ULONG);
+	} else if (nxgep->mac.porttype == PORT_TYPE_BMAC) {
+		kstat_named_init(&nxgekp->rx_frame_cnt, "rxmac_frame_cnt",
+			KSTAT_DATA_ULONG);
+	}
+}
+
+/* ARGSUSED */
+void
+nxge_destroy_kstats(p_nxge_t nxgep)
+{
+	int channel;
+	p_nxge_dma_pt_cfg_t p_dma_cfgp;
+	p_nxge_hw_pt_cfg_t p_cfgp;
+
+	NXGE_DEBUG_MSG((nxgep, KST_CTL, "==> nxge_destroy_kstats"));
+
+	if (nxgep->statsp == NULL)
+		return;
+	if (nxgep->statsp->ksp)
+		kstat_delete(nxgep->statsp->ksp);
+
+	p_dma_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
+	p_cfgp = (p_nxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config;
+
+	for (channel = 0; channel < p_cfgp->max_rdcs; channel++) {
+		if (nxgep->statsp->rdc_ksp[channel])
+			kstat_delete(nxgep->statsp->rdc_ksp[channel]);
+	}
+
+	for (channel = 0; channel < p_cfgp->max_tdcs; channel++) {
+		if (nxgep->statsp->tdc_ksp[channel])
+			kstat_delete(nxgep->statsp->tdc_ksp[channel]);
+	}
+
+	if (nxgep->statsp->rdc_sys_ksp)
+		kstat_delete(nxgep->statsp->rdc_sys_ksp);
+	if (nxgep->statsp->fflp_ksp[0])
+		kstat_delete(nxgep->statsp->fflp_ksp[0]);
+	if (nxgep->statsp->ipp_ksp)
+		kstat_delete(nxgep->statsp->ipp_ksp);
+	if (nxgep->statsp->txc_ksp)
+		kstat_delete(nxgep->statsp->txc_ksp);
+	if (nxgep->statsp->mac_ksp)
+		kstat_delete(nxgep->statsp->mac_ksp);
+	if (nxgep->statsp->zcp_ksp)
+		kstat_delete(nxgep->statsp->zcp_ksp);
+	if (nxgep->statsp->port_ksp)
+		kstat_delete(nxgep->statsp->port_ksp);
+	if (nxgep->statsp->mmac_ksp)
+		kstat_delete(nxgep->statsp->mmac_ksp);
+	if (nxgep->statsp)
+		KMEM_FREE(nxgep->statsp, nxgep->statsp->stats_size);
+
+	NXGE_DEBUG_MSG((nxgep, KST_CTL, "<== nxge_destroy_kstats"));
+}
+
+/* ARGSUSED */
+int
+nxge_port_kstat_update(kstat_t *ksp, int rw)
+{
+	p_nxge_t nxgep;
+	p_nxge_stats_t statsp;
+	p_nxge_port_kstat_t nxgekp;
+
+	nxgep = (p_nxge_t)ksp->ks_private;
+	if (nxgep == NULL)
+		return (-1);
+
+	NXGE_DEBUG_MSG((nxgep, KST_CTL, "==> nxge_port_kstat_update"));
+	statsp = (p_nxge_stats_t)nxgep->statsp;
+	nxgekp = (p_nxge_port_kstat_t)ksp->ks_data;
+	nxge_save_cntrs(nxgep);
+
+	if (rw == KSTAT_WRITE) {
+		/*
+		 * transceiver state informations.
+		 */
+		statsp->mac_stats.xcvr_inits = nxgekp->xcvr_inits.value.ul;
+
+		/*
+		 * Tx Statistics.
+		 */
+#if defined MULTI_DATA_TX || defined MULTI_DATA_TXV2
+		statsp->port_stats.mdt_reqs = nxgekp->mdt_reqs.value.ul;
+		statsp->port_stats.mdt_hdr_bufs = nxgekp->mdt_hdr_bufs.value.ul;
+		statsp->port_stats.mdt_pld_bufs = nxgekp->mdt_pld_bufs.value.ul;
+		statsp->port_stats.mdt_pkts = nxgekp->mdt_pkts.value.ul;
+		statsp->port_stats.mdt_hdrs = nxgekp->mdt_hdrs.value.ul;
+		statsp->port_stats.mdt_plds = nxgekp->mdt_plds.value.ul;
+		statsp->port_stats.mdt_hdr_bind_fail =
+			nxgekp->mdt_hdr_bind_fail.value.ul;
+		statsp->port_stats.mdt_pld_bind_fail =
+			nxgekp->mdt_pld_bind_fail.value.ul;
+#endif
+#ifdef ACCEPT_JUMBO
+		statsp->port_stats.tx_jumbo_pkts =
+			nxgekp->tx_jumbo_pkts.value.ul;
+#endif
+		/*
+		 * Rx Statistics.
+		 */
+#ifdef ACNXGEPT_JUMBO
+		statsp->port_stats.rx_jumbo_pkts =
+			nxgekp->rx_jumbo_pkts.value.ul;
+#endif
+		(void) nxge_xmac_stat_update(ksp, KSTAT_WRITE);
+		return (0);
+	} else {
+		if (nxgep->filter.all_phys_cnt)
+			(void) strcpy(nxgekp->promisc.value.c, "phys");
+		else if (nxgep->filter.all_multicast_cnt)
+			(void) strcpy(nxgekp->promisc.value.c, "multi");
+		else
+			(void) strcpy(nxgekp->promisc.value.c, "off");
+		nxgekp->ifspeed.value.ul =
+			statsp->mac_stats.link_speed * 1000000ULL;
+		nxgekp->rev_id.value.ul = statsp->mac_stats.rev_id;
+
+		/*
+		 * transceiver state informations.
+		 */
+		nxgekp->xcvr_inits.value.ul = statsp->mac_stats.xcvr_inits;
+		nxgekp->xcvr_inuse.value.ul = statsp->mac_stats.xcvr_inuse;
+		nxgekp->xcvr_addr.value.ul = statsp->mac_stats.xcvr_portn;
+		nxgekp->xcvr_id.value.ul = statsp->mac_stats.xcvr_id;
+		nxgekp->cap_autoneg.value.ul = statsp->mac_stats.cap_autoneg;
+		nxgekp->cap_10gfdx.value.ul = statsp->mac_stats.cap_10gfdx;
+		nxgekp->cap_10ghdx.value.ul = statsp->mac_stats.cap_10ghdx;
+		nxgekp->cap_1000fdx.value.ul = statsp->mac_stats.cap_1000fdx;
+		nxgekp->cap_1000hdx.value.ul = statsp->mac_stats.cap_1000hdx;
+		nxgekp->cap_100T4.value.ul = statsp->mac_stats.cap_100T4;
+		nxgekp->cap_100fdx.value.ul = statsp->mac_stats.cap_100fdx;
+		nxgekp->cap_100hdx.value.ul = statsp->mac_stats.cap_100hdx;
+		nxgekp->cap_10fdx.value.ul = statsp->mac_stats.cap_10fdx;
+		nxgekp->cap_10hdx.value.ul = statsp->mac_stats.cap_10hdx;
+		nxgekp->cap_asmpause.value.ul =
+			statsp->mac_stats.cap_asmpause;
+		nxgekp->cap_pause.value.ul = statsp->mac_stats.cap_pause;
+
+		/*
+		 * Link partner capabilities.
+		 */
+		nxgekp->lp_cap_autoneg.value.ul =
+			statsp->mac_stats.lp_cap_autoneg;
+		nxgekp->lp_cap_10gfdx.value.ul =
+			statsp->mac_stats.lp_cap_10gfdx;
+		nxgekp->lp_cap_10ghdx.value.ul =
+			statsp->mac_stats.lp_cap_10ghdx;
+		nxgekp->lp_cap_1000fdx.value.ul =
+			statsp->mac_stats.lp_cap_1000fdx;
+		nxgekp->lp_cap_1000hdx.value.ul =
+			statsp->mac_stats.lp_cap_1000hdx;
+		nxgekp->lp_cap_100T4.value.ul =
+			statsp->mac_stats.lp_cap_100T4;
+		nxgekp->lp_cap_100fdx.value.ul =
+			statsp->mac_stats.lp_cap_100fdx;
+		nxgekp->lp_cap_100hdx.value.ul =
+			statsp->mac_stats.lp_cap_100hdx;
+		nxgekp->lp_cap_10fdx.value.ul =
+			statsp->mac_stats.lp_cap_10fdx;
+		nxgekp->lp_cap_10hdx.value.ul =
+			statsp->mac_stats.lp_cap_10hdx;
+		nxgekp->lp_cap_asmpause.value.ul =
+			statsp->mac_stats.lp_cap_asmpause;
+		nxgekp->lp_cap_pause.value.ul =
+			statsp->mac_stats.lp_cap_pause;
+
+		/*
+		 * Physical link statistics.
+		 */
+		nxgekp->link_T4.value.ul = statsp->mac_stats.link_T4;
+		nxgekp->link_speed.value.ul = statsp->mac_stats.link_speed;
+		if (statsp->mac_stats.link_duplex == 2)
+			(void) strcpy(nxgekp->link_duplex.value.c, "full");
+		else if (statsp->mac_stats.link_duplex == 1)
+			(void) strcpy(nxgekp->link_duplex.value.c, "half");
+		else
+			(void) strcpy(nxgekp->link_duplex.value.c, "unknown");
+		nxgekp->link_asmpause.value.ul =
+			statsp->mac_stats.link_asmpause;
+		nxgekp->link_pause.value.ul = statsp->mac_stats.link_pause;
+		nxgekp->link_up.value.ul = statsp->mac_stats.link_up;
+
+		/*
+		 * Lets the user know the MTU currently in use by the physical
+		 * MAC port.
+		 */
+		nxgekp->mac_mtu.value.ul = statsp->mac_stats.mac_mtu;
+
+		/*
+		 * Loopback statistics.
+		 */
+		nxgekp->lb_mode.value.ul = statsp->port_stats.lb_mode;
+
+		/*
+		 * This tells the user whether the driver is in QOS mode or
+		 * not.
+		 */
+		nxgekp->qos_mode.value.ul = statsp->port_stats.qos_mode;
+
+		/*
+		 * This tells whether the instance is trunked or not
+		 */
+		nxgekp->trunk_mode.value.ul = statsp->port_stats.trunk_mode;
+
+#if defined MULTI_DATA_TX || defined MULTI_DATA_TXV2
+		nxgekp->mdt_reqs.value.ul = statsp->port_stats.mdt_reqs;
+		nxgekp->mdt_hdr_bufs.value.ul =
+			statsp->port_stats.mdt_hdr_bufs;
+		nxgekp->mdt_pld_bufs.value.ul =
+			statsp->port_stats.mdt_pld_bufs;
+		nxgekp->mdt_pkts.value.ul = statsp->port_stats.mdt_pkts;
+		nxgekp->mdt_hdrs.value.ul = statsp->port_stats.mdt_hdrs;
+		nxgekp->mdt_plds.value.ul = statsp->port_stats.mdt_plds;
+		nxgekp->mdt_hdr_bind_fail.value.ul =
+			statsp->port_stats.mdt_hdr_bind_fail;
+		nxgekp->mdt_pld_bind_fail.value.ul =
+			statsp->port_stats.mdt_pld_bind_fail;
+#endif
+#ifdef ACCEPT_JUMBO
+		nxgekp->tx_jumbo_pkts.value.ul =
+			statsp->port_stats.tx_jumbo_pkts;
+#endif
+#ifdef TX_MBLK_DEST
+		nxgekp->tx_1_desc.value.ul = statsp->port_stats.tx_1_desc;
+		nxgekp->tx_2_desc.value.ul = statsp->port_stats.tx_2_desc;
+		nxgekp->tx_3_desc.value.ul = statsp->port_stats.tx_3_desc;
+		nxgekp->tx_4_desc.value.ul = statsp->port_stats.tx_4_desc;
+		nxgekp->tx_5_desc.value.ul = statsp->port_stats.tx_5_desc;
+		nxgekp->tx_6_desc.value.ul = statsp->port_stats.tx_6_desc;
+		nxgekp->tx_7_desc.value.ul = statsp->port_stats.tx_7_desc;
+		nxgekp->tx_8_desc.value.ul = statsp->port_stats.tx_8_desc;
+		nxgekp->tx_max_desc.value.ul =
+			statsp->port_stats.tx_max_desc;
+#endif
+		/*
+		 * Rx Statistics.
+		 */
+#ifdef ACCEPT_JUMBO
+		nxgekp->rx_jumbo_pkts.value.ul =
+			statsp->port_stats.rx_jumbo_pkts;
+#endif
+		(void) nxge_xmac_stat_update(ksp, KSTAT_READ);
+	}
+
+	NXGE_DEBUG_MSG((nxgep, KST_CTL, "<== nxge_port_kstat_update"));
+	return (0);
+}
+
+/*
+ * if this is the first init do not bother to save the
+ * counters.
+ */
+/* ARGSUSED */
+void
+nxge_save_cntrs(p_nxge_t nxgep)
+{
+	p_nxge_stats_t statsp;
+	uint64_t val;
+	npi_handle_t handle;
+	uint8_t portn;
+	uint8_t cnt8;
+	uint16_t cnt16;
+	uint32_t cnt32;
+
+	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_save_cntrs"));
+
+	statsp = (p_nxge_stats_t)nxgep->statsp;
+	handle = nxgep->npi_handle;
+	portn = nxgep->mac.portnum;
+
+	MUTEX_ENTER(&nxgep->ouraddr_lock);
+
+	if (nxgep->mac.porttype == PORT_TYPE_XMAC) {
+		/*
+		 * Transmit MAC statistics.
+		 */
+		XMAC_REG_RD(handle, portn, XTXMAC_FRM_CNT_REG, &val);
+		statsp->xmac_stats.tx_frame_cnt += (val & XTXMAC_FRM_CNT_MASK);
+		XMAC_REG_RD(handle, portn, XTXMAC_BYTE_CNT_REG, &val);
+		statsp->xmac_stats.tx_byte_cnt += (val & XTXMAC_BYTE_CNT_MASK);
+		/*
+		 * Receive XMAC statistics.
+		 */
+		XMAC_REG_RD(handle, portn, XRXMAC_CRC_ER_CNT_REG, &val);
+		statsp->xmac_stats.rx_crc_err_cnt +=
+			(val & XRXMAC_CRC_ER_CNT_MASK);
+		XMAC_REG_RD(handle, portn, XRXMAC_MPSZER_CNT_REG, &val);
+		statsp->xmac_stats.rx_len_err_cnt +=
+			(val & XRXMAC_MPSZER_CNT_MASK);
+		XMAC_REG_RD(handle, portn, XRXMAC_CD_VIO_CNT_REG, &val);
+		statsp->xmac_stats.rx_viol_err_cnt +=
+			(val & XRXMAC_CD_VIO_CNT_MASK);
+		XMAC_REG_RD(handle, portn, XRXMAC_BT_CNT_REG, &val);
+		statsp->xmac_stats.rx_byte_cnt += (val & XRXMAC_BT_CNT_MASK);
+		XMAC_REG_RD(handle, portn, XRXMAC_HIST_CNT1_REG, &val);
+		statsp->xmac_stats.rx_hist1_cnt +=
+			(val & XRXMAC_HIST_CNT1_MASK);
+		XMAC_REG_RD(handle, portn, XRXMAC_HIST_CNT2_REG, &val);
+		statsp->xmac_stats.rx_hist2_cnt +=
+			(val & XRXMAC_HIST_CNT2_MASK);
+		XMAC_REG_RD(handle, portn, XRXMAC_HIST_CNT3_REG, &val);
+		statsp->xmac_stats.rx_hist3_cnt +=
+			(val & XRXMAC_HIST_CNT3_MASK);
+		XMAC_REG_RD(handle, portn, XRXMAC_HIST_CNT4_REG, &val);
+		statsp->xmac_stats.rx_hist4_cnt +=
+			(val & XRXMAC_HIST_CNT4_MASK);
+		XMAC_REG_RD(handle, portn, XRXMAC_HIST_CNT5_REG, &val);
+		statsp->xmac_stats.rx_hist5_cnt +=
+			(val & XRXMAC_HIST_CNT5_MASK);
+		XMAC_REG_RD(handle, portn, XRXMAC_HIST_CNT6_REG, &val);
+		statsp->xmac_stats.rx_hist6_cnt +=
+			(val & XRXMAC_HIST_CNT6_MASK);
+		XMAC_REG_RD(handle, portn, XRXMAC_BC_FRM_CNT_REG, &val);
+		statsp->xmac_stats.rx_broadcast_cnt +=
+			(val & XRXMAC_BC_FRM_CNT_MASK);
+		XMAC_REG_RD(handle, portn, XRXMAC_MC_FRM_CNT_REG, &val);
+		statsp->xmac_stats.rx_mult_cnt +=
+			(val & XRXMAC_MC_FRM_CNT_MASK);
+		XMAC_REG_RD(handle, portn, XRXMAC_FRAG_CNT_REG, &val);
+		statsp->xmac_stats.rx_frag_cnt += (val & XRXMAC_FRAG_CNT_MASK);
+		XMAC_REG_RD(handle, portn, XRXMAC_AL_ER_CNT_REG, &val);
+		statsp->xmac_stats.rx_frame_align_err_cnt +=
+			(val & XRXMAC_AL_ER_CNT_MASK);
+		XMAC_REG_RD(handle, portn, XMAC_LINK_FLT_CNT_REG, &val);
+		statsp->xmac_stats.rx_linkfault_err_cnt +=
+			(val & XMAC_LINK_FLT_CNT_MASK);
+		(void) npi_xmac_xpcs_read(handle, portn,
+			XPCS_REG_DESCWERR_COUNTER, &cnt32);
+		statsp->xmac_stats.xpcs_deskew_err_cnt +=
+			(val & XMAC_XPCS_DESKEW_ERR_CNT_MASK);
+#ifdef	NXGE_DEBUG_SYMBOL_ERR
+		(void) npi_xmac_xpcs_read(handle, portn,
+			XPCS_REG_SYMBOL_ERR_L0_1_COUNTER, &cnt32);
+		statsp->xmac_stats.xpcs_ln0_symbol_err_cnt +=
+			(cnt32 & XMAC_XPCS_SYM_ERR_CNT_L0_MASK);
+		statsp->xmac_stats.xpcs_ln1_symbol_err_cnt +=
+			((cnt32 & XMAC_XPCS_SYM_ERR_CNT_L1_MASK) >>
+			XMAC_XPCS_SYM_ERR_CNT_L1_SHIFT);
+		(void) npi_xmac_xpcs_read(handle, portn,
+			XPCS_REG_SYMBOL_ERR_L2_3_COUNTER, &cnt32);
+		statsp->xmac_stats.xpcs_ln2_symbol_err_cnt +=
+			(cnt32 & XMAC_XPCS_SYM_ERR_CNT_L2_MASK);
+		statsp->xmac_stats.xpcs_ln3_symbol_err_cnt +=
+			((cnt32 & XMAC_XPCS_SYM_ERR_CNT_L3_MASK) >>
+			XMAC_XPCS_SYM_ERR_CNT_L3_SHIFT);
+#endif
+	} else if (nxgep->mac.porttype == PORT_TYPE_BMAC) {
+		/*
+		 * Transmit MAC statistics.
+		 */
+		BMAC_REG_RD(handle, portn, BTXMAC_FRM_CNT_REG, &val);
+		statsp->bmac_stats.tx_frame_cnt += (val & BTXMAC_FRM_CNT_MASK);
+		XMAC_REG_RD(handle, portn, BTXMAC_BYTE_CNT_REG, &val);
+		statsp->bmac_stats.tx_byte_cnt += (val & BTXMAC_BYTE_CNT_MASK);
+
+		/*
+		 * Receive MAC statistics.
+		 */
+		XMAC_REG_RD(handle, portn, RXMAC_FRM_CNT_REG, &val);
+		statsp->bmac_stats.rx_frame_cnt += (val & RXMAC_FRM_CNT_MASK);
+		XMAC_REG_RD(handle, portn, BRXMAC_BYTE_CNT_REG, &val);
+		statsp->bmac_stats.rx_byte_cnt += (val & BRXMAC_BYTE_CNT_MASK);
+		XMAC_REG_RD(handle, portn, BMAC_AL_ER_CNT_REG, &val);
+		statsp->bmac_stats.rx_align_err_cnt +=
+			(val & BMAC_AL_ER_CNT_MASK);
+		XMAC_REG_RD(handle, portn, MAC_LEN_ER_CNT_REG, &val);
+		statsp->bmac_stats.rx_len_err_cnt +=
+			(val & MAC_LEN_ER_CNT_MASK);
+		XMAC_REG_RD(handle, portn, BMAC_CRC_ER_CNT_REG, &val);
+		statsp->bmac_stats.rx_crc_err_cnt +=
+			(val & BMAC_CRC_ER_CNT_MASK);
+		XMAC_REG_RD(handle, portn, BMAC_CD_VIO_CNT_REG, &val);
+		statsp->bmac_stats.rx_viol_err_cnt +=
+			(val & BMAC_CD_VIO_CNT_MASK);
+	}
+	/* Update IPP counters */
+	(void) npi_ipp_get_ecc_err_count(handle, portn, &cnt8);
+	statsp->ipp_stats.ecc_err_cnt += cnt8;
+	(void) npi_ipp_get_pkt_dis_count(handle, portn, &cnt16);
+	statsp->ipp_stats.pkt_dis_cnt += cnt16;
+	(void) npi_ipp_get_cs_err_count(handle, portn, &cnt16);
+	statsp->ipp_stats.bad_cs_cnt += cnt16;
+
+	MUTEX_EXIT(&nxgep->ouraddr_lock);
+
+nxge_save_cntrs_exit:
+	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_save_cntrs"));
+}
+
+/* ARGSUSED */
+int
+nxge_m_stat(void *arg, uint_t stat, uint64_t *value)
+{
+	p_nxge_t nxgep = (p_nxge_t)arg;
+	p_nxge_stats_t statsp;
+	uint64_t val = 0;
+	int channel;
+
+	NXGE_DEBUG_MSG((nxgep, KST_CTL, "==> nxge_m_stat"));
+	statsp = (p_nxge_stats_t)nxgep->statsp;
+
+	switch (stat) {
+	case MAC_STAT_IFSPEED:
+		val = statsp->mac_stats.link_speed * 1000000ull;
+		break;
+
+	case MAC_STAT_MULTIRCV:
+		val = statsp->port_stats.multircv;
+		break;
+
+	case MAC_STAT_BRDCSTRCV:
+		val = statsp->port_stats.brdcstrcv;
+		break;
+
+	case MAC_STAT_MULTIXMT:
+		val = statsp->port_stats.multixmt;
+		break;
+
+	case MAC_STAT_BRDCSTXMT:
+		val = statsp->port_stats.brdcstxmt;
+		break;
+
+	case MAC_STAT_NORCVBUF:
+		val = statsp->port_stats.norcvbuf;
+		break;
+
+	case MAC_STAT_IERRORS:
+	case ETHER_STAT_MACRCV_ERRORS:
+		val = 0;
+		for (channel = 0; channel < nxgep->nrdc; channel++) {
+			val += statsp->rdc_stats[channel].ierrors;
+		}
+		break;
+
+	case MAC_STAT_NOXMTBUF:
+		val = statsp->port_stats.noxmtbuf;
+		break;
+
+	case MAC_STAT_OERRORS:
+		for (channel = 0; channel < nxgep->ntdc; channel++) {
+			val += statsp->tdc_stats[channel].oerrors;
+		}
+
+		break;
+
+	case MAC_STAT_COLLISIONS:
+		val = 0;
+		break;
+
+	case MAC_STAT_RBYTES:
+		for (channel = 0; channel < nxgep->nrdc; channel++) {
+			val += statsp->rdc_stats[channel].ibytes;
+		}
+		break;
+
+	case MAC_STAT_IPACKETS:
+		for (channel = 0; channel < nxgep->nrdc; channel++) {
+			val += statsp->rdc_stats[channel].ipackets;
+		}
+		break;
+
+	case MAC_STAT_OBYTES:
+		for (channel = 0; channel < nxgep->ntdc; channel++) {
+			val += statsp->tdc_stats[channel].obytes;
+		}
+		break;
+
+	case MAC_STAT_OPACKETS:
+		for (channel = 0; channel < nxgep->ntdc; channel++) {
+			val += statsp->tdc_stats[channel].opackets;
+		}
+		break;
+	case MAC_STAT_LINK_STATE:
+		val = statsp->mac_stats.link_duplex;
+		break;
+	case MAC_STAT_LINK_UP:
+		val = statsp->mac_stats.link_up;
+		break;
+	case MAC_STAT_PROMISC:
+		val = statsp->mac_stats.promisc;
+		break;
+	case ETHER_STAT_SQE_ERRORS:
+		val = 0;
+		break;
+
+	case ETHER_STAT_ALIGN_ERRORS:
+		if (nxgep->mac.porttype == PORT_TYPE_XMAC)
+			val = statsp->xmac_stats.rx_frame_align_err_cnt;
+		else if (nxgep->mac.porttype == PORT_TYPE_BMAC)
+			val = statsp->bmac_stats.rx_align_err_cnt;
+		else
+			val = 0;
+		break;
+
+	case ETHER_STAT_FCS_ERRORS:
+		if (nxgep->mac.porttype == PORT_TYPE_XMAC)
+			val = statsp->xmac_stats.rx_crc_err_cnt;
+		else if (nxgep->mac.porttype == PORT_TYPE_BMAC)
+			val = statsp->bmac_stats.rx_crc_err_cnt;
+		else
+			val = 0;
+		break;
+
+	case ETHER_STAT_FIRST_COLLISIONS:
+		val = 0;
+		break;
+
+	case ETHER_STAT_MULTI_COLLISIONS:
+		val = 0;
+		break;
+
+	case ETHER_STAT_TX_LATE_COLLISIONS:
+		val = 0;
+		break;
+
+	case ETHER_STAT_EX_COLLISIONS:
+		val = 0;
+		break;
+
+	case ETHER_STAT_DEFER_XMTS:
+		val = 0;
+		break;
+
+	case ETHER_STAT_MACXMT_ERRORS:
+		if (nxgep->mac.porttype == PORT_TYPE_XMAC) {
+			val = statsp->xmac_stats.tx_underflow_err +
+				statsp->xmac_stats.tx_maxpktsize_err +
+				statsp->xmac_stats.tx_overflow_err +
+				statsp->xmac_stats.tx_fifo_xfr_err;
+		} else {
+			val = statsp->bmac_stats.tx_underrun_err +
+				statsp->bmac_stats.tx_max_pkt_err;
+		}
+		break;
+
+	case ETHER_STAT_CARRIER_ERRORS:
+		if (nxgep->mac.porttype == PORT_TYPE_XMAC) {
+			val = statsp->xmac_stats.rx_linkfault_err_cnt;
+		} else {
+			val = statsp->mac_stats.xcvr_inits +
+				statsp->mac_stats.serdes_inits;
+		}
+		break;
+
+	case ETHER_STAT_TOOLONG_ERRORS:
+		if (nxgep->mac.porttype == PORT_TYPE_XMAC) {
+			val = statsp->xmac_stats.tx_maxpktsize_err +
+				statsp->xmac_stats.rx_len_err_cnt;
+
+		} else {
+			val = statsp->bmac_stats.rx_len_err_cnt +
+				statsp->bmac_stats.tx_max_pkt_err;
+		}
+		break;
+
+
+	case ETHER_STAT_XCVR_ADDR:
+		val = statsp->mac_stats.xcvr_portn;
+		break;
+	case ETHER_STAT_XCVR_ID:
+		val = statsp->mac_stats.xcvr_id;
+		break;
+
+	case ETHER_STAT_XCVR_INUSE:
+		val = statsp->mac_stats.xcvr_inuse;
+		break;
+
+	case ETHER_STAT_CAP_1000FDX:
+		val = statsp->mac_stats.cap_1000fdx;
+		break;
+
+	case ETHER_STAT_CAP_1000HDX:
+		val = statsp->mac_stats.cap_1000hdx;
+		break;
+
+	case ETHER_STAT_CAP_100FDX:
+		val = statsp->mac_stats.cap_100fdx;
+		break;
+
+	case ETHER_STAT_CAP_100HDX:
+		val = statsp->mac_stats.cap_100hdx;
+		break;
+
+	case ETHER_STAT_CAP_10FDX:
+		val = statsp->mac_stats.cap_10fdx;
+		break;
+
+	case ETHER_STAT_CAP_10HDX:
+		val = statsp->mac_stats.cap_10hdx;
+		break;
+
+	case ETHER_STAT_CAP_ASMPAUSE:
+		val = statsp->mac_stats.cap_asmpause;
+		val = 1;
+		break;
+
+	case ETHER_STAT_CAP_PAUSE:
+		val = statsp->mac_stats.cap_pause;
+		break;
+
+	case ETHER_STAT_CAP_AUTONEG:
+		val = statsp->mac_stats.cap_autoneg;
+		break;
+
+	case ETHER_STAT_ADV_CAP_1000FDX:
+		val = statsp->mac_stats.adv_cap_1000fdx;
+		break;
+
+	case ETHER_STAT_ADV_CAP_1000HDX:
+		val = statsp->mac_stats.adv_cap_1000hdx;
+		break;
+
+	case ETHER_STAT_ADV_CAP_100FDX:
+		val = statsp->mac_stats.adv_cap_100fdx;
+		break;
+
+	case ETHER_STAT_ADV_CAP_100HDX:
+		val = statsp->mac_stats.adv_cap_100hdx;
+		break;
+
+	case ETHER_STAT_ADV_CAP_10FDX:
+		val = statsp->mac_stats.adv_cap_10fdx;
+		break;
+
+	case ETHER_STAT_ADV_CAP_10HDX:
+		val = statsp->mac_stats.adv_cap_10hdx;
+		break;
+
+	case ETHER_STAT_ADV_CAP_ASMPAUSE:
+		val = statsp->mac_stats.adv_cap_asmpause;
+		break;
+
+	case ETHER_STAT_ADV_CAP_PAUSE:
+		val = statsp->mac_stats.adv_cap_pause;
+		break;
+
+	case ETHER_STAT_ADV_CAP_AUTONEG:
+		val = statsp->mac_stats.adv_cap_autoneg;
+		break;
+
+	case ETHER_STAT_LP_CAP_1000FDX:
+		val = statsp->mac_stats.lp_cap_1000fdx;
+		break;
+
+	case ETHER_STAT_LP_CAP_1000HDX:
+		val = statsp->mac_stats.lp_cap_1000hdx;
+		break;
+
+	case ETHER_STAT_LP_CAP_100FDX:
+		val = statsp->mac_stats.lp_cap_100fdx;
+		break;
+
+	case ETHER_STAT_LP_CAP_100HDX:
+		val = statsp->mac_stats.lp_cap_100hdx;
+		break;
+
+	case ETHER_STAT_LP_CAP_10FDX:
+		val = statsp->mac_stats.lp_cap_10fdx;
+		break;
+
+	case ETHER_STAT_LP_CAP_10HDX:
+		val = statsp->mac_stats.lp_cap_10hdx;
+		break;
+
+	case ETHER_STAT_LP_CAP_ASMPAUSE:
+		val = statsp->mac_stats.lp_cap_asmpause;
+		break;
+
+	case ETHER_STAT_LP_CAP_PAUSE:
+		val = statsp->mac_stats.lp_cap_pause;
+		break;
+
+	case ETHER_STAT_LP_CAP_AUTONEG:
+		val = statsp->mac_stats.lp_cap_autoneg;
+		break;
+
+	case ETHER_STAT_LINK_ASMPAUSE:
+		val = statsp->mac_stats.link_asmpause;
+		break;
+
+	case ETHER_STAT_LINK_PAUSE:
+		val = statsp->mac_stats.link_pause;
+		break;
+
+	case ETHER_STAT_LINK_AUTONEG:
+		val = statsp->mac_stats.cap_autoneg;
+		break;
+
+	case ETHER_STAT_LINK_DUPLEX:
+		val = statsp->mac_stats.link_duplex;
+		break;
+
+	default:
+		/*
+		 * Shouldn't reach here...
+		 */
+#ifdef NXGE_DEBUG
+		NXGE_ERROR_MSG((nxgep, KST_CTL,
+			"nxge_m_stat: unrecognized parameter value = 0x%x",
+			stat));
+#endif
+
+		return (ENOTSUP);
+	}
+	*value = val;
+	return (0);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/uts/common/io/nxge/nxge_mac.c	Mon Mar 19 19:37:22 2007 -0700
@@ -0,0 +1,3325 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident	"%Z%%M%	%I%	%E% SMI"
+
+#include <sys/nxge/nxge_impl.h>
+#include <sys/nxge/nxge_mac.h>
+
+extern uint32_t nxge_no_link_notify;
+extern uint32_t nxge_no_msg;
+extern uint32_t nxge_lb_dbg;
+extern nxge_os_mutex_t	nxge_mdio_lock;
+extern nxge_os_mutex_t	nxge_mii_lock;
+extern boolean_t nxge_jumbo_enable;
+
+/*
+ * Ethernet broadcast address definition.
+ */
+static ether_addr_st etherbroadcastaddr =
+				{{0xff, 0xff, 0xff, 0xff, 0xff, 0xff}};
+
+nxge_status_t nxge_mac_init(p_nxge_t);
+
+/* Initialize the entire MAC and physical layer */
+
+nxge_status_t
+nxge_mac_init(p_nxge_t nxgep)
+{
+	uint8_t			portn;
+	nxge_status_t		status = NXGE_OK;
+
+	portn = NXGE_GET_PORT_NUM(nxgep->function_num);
+
+	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "==> nxge_mac_init: port<%d>", portn));
+
+	nxgep->mac.portnum = portn;
+	nxgep->mac.porttype = PORT_TYPE_XMAC;
+
+	if ((portn == BMAC_PORT_0) || (portn == BMAC_PORT_1))
+		nxgep->mac.porttype = PORT_TYPE_BMAC;
+
+	/* Initialize XIF to configure a network mode */
+	if ((status = nxge_xif_init(nxgep)) != NXGE_OK) {
+		goto fail;
+	}
+
+	if ((status = nxge_pcs_init(nxgep)) != NXGE_OK) {
+		goto fail;
+	}
+
+	/* Initialize TX and RX MACs */
+	/*
+	 * Always perform XIF init first, before TX and RX MAC init
+	 */
+	if ((status = nxge_tx_mac_reset(nxgep)) != NXGE_OK)
+		goto fail;
+
+	if ((status = nxge_tx_mac_init(nxgep)) != NXGE_OK)
+		goto fail;
+
+	if ((status = nxge_rx_mac_reset(nxgep)) != NXGE_OK)
+		goto fail;
+
+	if ((status = nxge_rx_mac_init(nxgep)) != NXGE_OK)
+		goto fail;
+
+	if ((status = nxge_tx_mac_enable(nxgep)) != NXGE_OK)
+		goto fail;
+
+	if ((status = nxge_rx_mac_enable(nxgep)) != NXGE_OK)
+		goto fail;
+
+	nxgep->statsp->mac_stats.mac_mtu = nxgep->mac.maxframesize;
+
+	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "<== nxge_mac_init: port<%d>", portn));
+
+	return (NXGE_OK);
+fail:
+	NXGE_DEBUG_MSG((nxgep, MAC_CTL,
+			"nxge_mac_init: failed to initialize MAC port<%d>",
+			portn));
+	return (status);
+}
+
+/* Initialize the Ethernet Link */
+
+nxge_status_t
+nxge_link_init(p_nxge_t nxgep)
+{
+	nxge_status_t		status = NXGE_OK;
+#ifdef	NXGE_DEBUG
+	uint8_t			portn;
+
+	portn = nxgep->mac.portnum;
+
+	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "==> nxge_link_init: port<%d>", portn));
+#endif
+
+	if (nxgep->niu_type == N2_NIU) {
+		/* Workaround to get link up in both NIU ports */
+		if ((status = nxge_xcvr_init(nxgep)) != NXGE_OK)
+			goto fail;
+	}
+	NXGE_DELAY(200000);
+	/* Initialize internal serdes */
+	if ((status = nxge_serdes_init(nxgep)) != NXGE_OK)
+		goto fail;
+	NXGE_DELAY(200000);
+	if ((status = nxge_xcvr_init(nxgep)) != NXGE_OK)
+		goto fail;
+
+	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "<== nxge_link_init: port<%d>", portn));
+
+	return (NXGE_OK);
+
+fail:
+	NXGE_DEBUG_MSG((nxgep, MAC_CTL,
+		"nxge_link_init: ",
+		"failed to initialize Ethernet link on port<%d>",
+		portn));
+
+	return (status);
+}
+
+
+/* Initialize the XIF sub-block within the MAC */
+
+nxge_status_t
+nxge_xif_init(p_nxge_t nxgep)
+{
+	uint32_t		xif_cfg = 0;
+	npi_attr_t		ap;
+	uint8_t			portn;
+	nxge_port_t		portt;
+	nxge_port_mode_t	portmode;
+	p_nxge_stats_t		statsp;
+	npi_status_t		rs = NPI_SUCCESS;
+	npi_handle_t		handle;
+
+	portn = NXGE_GET_PORT_NUM(nxgep->function_num);
+
+	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "==> nxge_xif_init: port<%d>", portn));
+
+	handle = nxgep->npi_handle;
+	portmode = nxgep->mac.portmode;
+	portt = nxgep->mac.porttype;
+	statsp = nxgep->statsp;
+
+	if (portt == PORT_TYPE_XMAC) {
+
+		/* Setup XIF Configuration for XMAC */
+
+		if ((portmode == PORT_10G_FIBER) ||
+					(portmode == PORT_10G_COPPER))
+			xif_cfg |= CFG_XMAC_XIF_LFS;
+
+		if (portmode == PORT_1G_COPPER) {
+			xif_cfg |= CFG_XMAC_XIF_1G_PCS_BYPASS;
+		}
+
+		/* Set MAC Internal Loopback if necessary */
+		if (statsp->port_stats.lb_mode == nxge_lb_mac1000)
+			xif_cfg |= CFG_XMAC_XIF_LOOPBACK;
+
+		if (statsp->mac_stats.link_speed == 100)
+			xif_cfg |= CFG_XMAC_XIF_SEL_CLK_25MHZ;
+
+		xif_cfg |= CFG_XMAC_XIF_TX_OUTPUT;
+
+		if (portmode == PORT_10G_FIBER) {
+			if (statsp->mac_stats.link_up) {
+				xif_cfg |= CFG_XMAC_XIF_LED_POLARITY;
+			} else {
+				xif_cfg |= CFG_XMAC_XIF_LED_FORCE;
+			}
+		}
+
+		rs = npi_xmac_xif_config(handle, INIT, portn, xif_cfg);
+		if (rs != NPI_SUCCESS)
+			goto fail;
+
+		nxgep->mac.xif_config = xif_cfg;
+
+		/* Set Port Mode */
+		if ((portmode == PORT_10G_FIBER) ||
+					(portmode == PORT_10G_COPPER)) {
+			SET_MAC_ATTR1(handle, ap, portn, MAC_PORT_MODE,
+						MAC_XGMII_MODE, rs);
+			if (rs != NPI_SUCCESS)
+				goto fail;
+			if (statsp->mac_stats.link_up) {
+				if (nxge_10g_link_led_on(nxgep) != NXGE_OK)
+					goto fail;
+			} else {
+				if (nxge_10g_link_led_off(nxgep) != NXGE_OK)
+					goto fail;
+			}
+		} else if ((portmode == PORT_1G_FIBER) ||
+						(portmode == PORT_1G_COPPER)) {
+			if (statsp->mac_stats.link_speed == 1000) {
+				SET_MAC_ATTR1(handle, ap, portn, MAC_PORT_MODE,
+							MAC_GMII_MODE, rs);
+			} else {
+				SET_MAC_ATTR1(handle, ap, portn, MAC_PORT_MODE,
+							MAC_MII_MODE, rs);
+			}
+			if (rs != NPI_SUCCESS)
+				goto fail;
+		} else {
+			NXGE_DEBUG_MSG((nxgep, MAC_CTL,
+					"nxge_xif_init: Unknown port mode (%d)"
+					" for port<%d>", portmode, portn));
+			goto fail;
+		}
+
+	} else if (portt == PORT_TYPE_BMAC) {
+
+		/* Setup XIF Configuration for BMAC */
+
+		if (portmode == PORT_1G_COPPER) {
+			if (statsp->mac_stats.link_speed == 100)
+				xif_cfg |= CFG_BMAC_XIF_SEL_CLK_25MHZ;
+		}
+
+		if (statsp->port_stats.lb_mode == nxge_lb_mac1000)
+			xif_cfg |= CFG_BMAC_XIF_LOOPBACK;
+
+		if (statsp->mac_stats.link_speed == 1000)
+			xif_cfg |= CFG_BMAC_XIF_GMII_MODE;
+
+		xif_cfg |= CFG_BMAC_XIF_TX_OUTPUT;
+
+		rs = npi_bmac_xif_config(handle, INIT, portn, xif_cfg);
+		if (rs != NPI_SUCCESS)
+			goto fail;
+		nxgep->mac.xif_config = xif_cfg;
+	}
+
+	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "<== nxge_xif_init: port<%d>", portn));
+	return (NXGE_OK);
+fail:
+	NXGE_DEBUG_MSG((nxgep, MAC_CTL,
+			"nxge_xif_init: Failed to initialize XIF port<%d>",
+			portn));
+	return (NXGE_ERROR | rs);
+}
+
+/* Initialize the PCS sub-block in the MAC */
+
+nxge_status_t
+nxge_pcs_init(p_nxge_t nxgep)
+{
+	pcs_cfg_t		pcs_cfg;
+	uint32_t		val;
+	uint8_t			portn;
+	nxge_port_mode_t	portmode;
+	npi_handle_t		handle;
+	p_nxge_stats_t		statsp;
+	npi_status_t		rs = NPI_SUCCESS;
+
+	handle = nxgep->npi_handle;
+	portmode = nxgep->mac.portmode;
+	portn = nxgep->mac.portnum;
+	statsp = nxgep->statsp;
+
+	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "==> nxge_pcs_init: port<%d>", portn));
+
+	if (portmode == PORT_1G_FIBER) {
+		/* Initialize port's PCS */
+		pcs_cfg.value = 0;
+		pcs_cfg.bits.w0.enable = 1;
+		pcs_cfg.bits.w0.mask = 1;
+		PCS_REG_WR(handle, portn, PCS_CONFIG_REG, pcs_cfg.value);
+		PCS_REG_WR(handle, portn, PCS_DATAPATH_MODE_REG, 0);
+		if ((rs = npi_mac_pcs_reset(handle, portn)) != NPI_SUCCESS)
+			goto fail;
+
+	} else if ((portmode == PORT_10G_FIBER) ||
+						(portmode == PORT_10G_COPPER)) {
+		/* Use internal XPCS, bypass 1G PCS */
+		XMAC_REG_RD(handle, portn, XMAC_CONFIG_REG, &val);
+		val &= ~XMAC_XIF_XPCS_BYPASS;
+		XMAC_REG_WR(handle, portn, XMAC_CONFIG_REG, val);
+
+		if ((rs = npi_xmac_xpcs_reset(handle, portn)) != NPI_SUCCESS)
+			goto fail;
+
+		/* Set XPCS Internal Loopback if necessary */
+		if ((rs = npi_xmac_xpcs_read(handle, portn,
+						XPCS_REG_CONTROL1, &val))
+						!= NPI_SUCCESS)
+			goto fail;
+		if ((statsp->port_stats.lb_mode == nxge_lb_mac10g) ||
+			(statsp->port_stats.lb_mode == nxge_lb_mac1000))
+			val |= XPCS_CTRL1_LOOPBK;
+		else
+			val &= ~XPCS_CTRL1_LOOPBK;
+		if ((rs = npi_xmac_xpcs_write(handle, portn,
+						XPCS_REG_CONTROL1, val))
+						!= NPI_SUCCESS)
+			goto fail;
+
+		/* Clear descw errors */
+		if ((rs = npi_xmac_xpcs_write(handle, portn,
+						XPCS_REG_DESCWERR_COUNTER, 0))
+						!= NPI_SUCCESS)
+			goto fail;
+		/* Clear symbol errors */
+		if ((rs = npi_xmac_xpcs_read(handle, portn,
+					XPCS_REG_SYMBOL_ERR_L0_1_COUNTER, &val))
+					!= NPI_SUCCESS)
+			goto fail;
+		if ((rs = npi_xmac_xpcs_read(handle, portn,
+					XPCS_REG_SYMBOL_ERR_L2_3_COUNTER, &val))
+					!= NPI_SUCCESS)
+			goto fail;
+
+	} else if (portmode == PORT_1G_COPPER) {
+		if (portn < 4) {
+			PCS_REG_WR(handle, portn, PCS_DATAPATH_MODE_REG,
+					PCS_DATAPATH_MODE_MII);
+		}
+		if ((rs = npi_mac_pcs_reset(handle, portn)) != NPI_SUCCESS)
+			goto fail;
+
+	} else {
+		goto fail;
+	}
+pass:
+	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "<== nxge_pcs_init: port<%d>", portn));
+	return (NXGE_OK);
+fail:
+	NXGE_DEBUG_MSG((nxgep, MAC_CTL,
+			"nxge_pcs_init: Failed to initialize PCS port<%d>",
+			portn));
+	return (NXGE_ERROR | rs);
+}
+
+/* Initialize the Internal Serdes */
+
+nxge_status_t
+nxge_serdes_init(p_nxge_t nxgep)
+{
+	p_nxge_stats_t		statsp;
+#ifdef	NXGE_DEBUG
+	uint8_t			portn;
+#endif
+	nxge_status_t		status = NXGE_OK;
+
+#ifdef	NXGE_DEBUG
+	portn = nxgep->mac.portnum;
+	NXGE_DEBUG_MSG((nxgep, MAC_CTL,
+		"==> nxge_serdes_init port<%d>", portn));
+#endif
+
+	statsp = nxgep->statsp;
+
+	if (nxgep->niu_type == N2_NIU) {
+		if (nxge_n2_serdes_init(nxgep) != NXGE_OK)
+			goto fail;
+	} else if ((nxgep->niu_type == NEPTUNE) ||
+				(nxgep->niu_type == NEPTUNE_2)) {
+			if ((status = nxge_neptune_serdes_init(nxgep))
+								!= NXGE_OK)
+				goto fail;
+	} else {
+		goto fail;
+	}
+
+	statsp->mac_stats.serdes_inits++;
+
+	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "<== nxge_serdes_init port<%d>",
+			portn));
+
+	return (NXGE_OK);
+
+fail:
+	NXGE_DEBUG_MSG((nxgep, MAC_CTL,
+		"nxge_serdes_init: Failed to initialize serdes for port<%d>",
+			portn));
+
+	return (status);
+}
+
+/* Initialize the TI Hedwig Internal Serdes (N2-NIU only) */
+
+nxge_status_t
+nxge_n2_serdes_init(p_nxge_t nxgep)
+{
+	uint8_t portn;
+	int chan;
+	esr_ti_cfgpll_l_t pll_cfg_l;
+	esr_ti_cfgrx_l_t rx_cfg_l;
+	esr_ti_cfgrx_h_t rx_cfg_h;
+	esr_ti_cfgtx_l_t tx_cfg_l;
+	esr_ti_cfgtx_h_t tx_cfg_h;
+	esr_ti_testcfg_t test_cfg;
+	nxge_status_t status = NXGE_OK;
+
+	portn = nxgep->mac.portnum;
+
+	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "==> nxge_n2_serdes_init port<%d>",
+			portn));
+
+	tx_cfg_l.value = 0;
+	tx_cfg_h.value = 0;
+	rx_cfg_l.value = 0;
+	rx_cfg_h.value = 0;
+	pll_cfg_l.value = 0;
+	test_cfg.value = 0;
+
+	if (nxgep->mac.portmode == PORT_10G_FIBER) {
+		/* 0x0E01 */
+		tx_cfg_l.bits.entx = 1;
+		tx_cfg_l.bits.swing = CFGTX_SWING_1375MV;
+
+		/* 0x9101 */
+		rx_cfg_l.bits.enrx = 1;
+		rx_cfg_l.bits.term = CFGRX_TERM_0P8VDDT;
+		rx_cfg_l.bits.align = CFGRX_ALIGN_EN;
+		rx_cfg_l.bits.los = CFGRX_LOS_LOTHRES;
+
+		/* 0x0008 */
+		rx_cfg_h.bits.eq = CFGRX_EQ_ADAPTIVE_LP_ADAPTIVE_ZF;
+
+		/* Set loopback mode if necessary */
+		if (nxgep->statsp->port_stats.lb_mode == nxge_lb_serdes10g) {
+			tx_cfg_l.bits.entest = 1;
+			rx_cfg_l.bits.entest = 1;
+			test_cfg.bits.loopback = TESTCFG_INNER_CML_DIS_LOOPBACK;
+			if ((status = nxge_mdio_write(nxgep, portn,
+				ESR_N2_DEV_ADDR,
+				ESR_N2_TEST_CFG_REG, test_cfg.value))
+				!= NXGE_OK)
+			goto fail;
+		}
+
+		/* Use default PLL value */
+
+	} else if (nxgep->mac.portmode == PORT_1G_FIBER) {
+
+		/* 0x0E21 */
+		tx_cfg_l.bits.entx = 1;
+		tx_cfg_l.bits.rate = CFGTX_RATE_HALF;
+		tx_cfg_l.bits.swing = CFGTX_SWING_1375MV;
+
+		/* 0x9121 */
+		rx_cfg_l.bits.enrx = 1;
+		rx_cfg_l.bits.rate = CFGRX_RATE_HALF;
+		rx_cfg_l.bits.term = CFGRX_TERM_0P8VDDT;
+		rx_cfg_l.bits.align = CFGRX_ALIGN_EN;
+		rx_cfg_l.bits.los = CFGRX_LOS_LOTHRES;
+
+		/* 0x8 */
+		rx_cfg_h.bits.eq = CFGRX_EQ_ADAPTIVE_LP_ADAPTIVE_ZF;
+
+		/* MPY = 0x100 */
+		pll_cfg_l.bits.mpy = CFGPLL_MPY_8X;
+
+		/* Set PLL */
+		pll_cfg_l.bits.enpll = 1;
+		if ((status = nxge_mdio_write(nxgep, portn, ESR_N2_DEV_ADDR,
+				ESR_N2_PLL_CFG_L_REG, pll_cfg_l.value))
+				!= NXGE_OK)
+			goto fail;
+	} else {
+		goto fail;
+	}
+
+	/*   MIF_REG_WR(handle, MIF_MASK_REG, ~mask); */
+
+	NXGE_DELAY(20);
+
+	/* init TX channels */
+	for (chan = 0; chan < 4; chan++) {
+		if ((status = nxge_mdio_write(nxgep, portn, ESR_N2_DEV_ADDR,
+				ESR_N2_TX_CFG_L_REG_ADDR(chan), tx_cfg_l.value))
+				!= NXGE_OK)
+			goto fail;
+
+		if ((status = nxge_mdio_write(nxgep, portn, ESR_N2_DEV_ADDR,
+				ESR_N2_TX_CFG_H_REG_ADDR(chan), tx_cfg_h.value))
+				!= NXGE_OK)
+			goto fail;
+	}
+
+	/* init RX channels */
+	for (chan = 0; chan < 4; chan++) {
+		if ((status = nxge_mdio_write(nxgep, portn, ESR_N2_DEV_ADDR,
+				ESR_N2_RX_CFG_L_REG_ADDR(chan), rx_cfg_l.value))
+				!= NXGE_OK)
+			goto fail;
+
+		if ((status = nxge_mdio_write(nxgep, portn, ESR_N2_DEV_ADDR,
+				ESR_N2_RX_CFG_H_REG_ADDR(chan), rx_cfg_h.value))
+				!= NXGE_OK)
+			goto fail;
+	}
+
+	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "<== nxge_n2_serdes_init port<%d>",
+			portn));
+
+	return (NXGE_OK);
+fail:
+	NXGE_DEBUG_MSG((nxgep, MAC_CTL,
+	"nxge_n2_serdes_init: Failed to initialize N2 serdes for port<%d>",
+				portn));
+
+	return (status);
+}
+
+/* Initialize Neptune Internal Serdes (Neptune only) */
+
+nxge_status_t
+nxge_neptune_serdes_init(p_nxge_t nxgep)
+{
+	npi_handle_t		handle;
+	uint8_t			portn;
+	nxge_port_mode_t	portmode;
+	int			chan;
+	sr_rx_tx_ctrl_l_t	rx_tx_ctrl_l;
+	sr_rx_tx_ctrl_h_t	rx_tx_ctrl_h;
+	sr_glue_ctrl0_l_t	glue_ctrl0_l;
+	sr_glue_ctrl0_h_t	glue_ctrl0_h;
+	uint64_t		val;
+	uint16_t		val16l;
+	uint16_t		val16h;
+	nxge_status_t		status = NXGE_OK;
+
+	portn = nxgep->mac.portnum;
+
+	if ((portn != 0) && (portn != 1))
+		return (NXGE_OK);
+
+	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "==> nxge_neptune_serdes_init port<%d>",
+			portn));
+
+	handle = nxgep->npi_handle;
+	portmode = nxgep->mac.portmode;
+
+	if ((portmode == PORT_10G_FIBER) || (portmode == PORT_10G_COPPER)) {
+
+		switch (portn) {
+		case 0:
+			ESR_REG_WR(handle, ESR_0_CONTROL_REG,
+				ESR_CTL_EN_SYNCDET_0 | ESR_CTL_EN_SYNCDET_1 |
+				ESR_CTL_EN_SYNCDET_2 | ESR_CTL_EN_SYNCDET_3 |
+				(0x5 << ESR_CTL_OUT_EMPH_0_SHIFT) |
+				(0x5 << ESR_CTL_OUT_EMPH_1_SHIFT) |
+				(0x5 << ESR_CTL_OUT_EMPH_2_SHIFT) |
+				(0x5 << ESR_CTL_OUT_EMPH_3_SHIFT) |
+				(0x5 << ESR_CTL_OUT_EMPH_3_SHIFT) |
+				(0x1 << ESR_CTL_LOSADJ_0_SHIFT) |
+				(0x1 << ESR_CTL_LOSADJ_1_SHIFT) |
+				(0x1 << ESR_CTL_LOSADJ_2_SHIFT) |
+				(0x1 << ESR_CTL_LOSADJ_3_SHIFT));
+
+				/* Set Serdes0 Internal Loopback if necessary */
+				if (nxgep->statsp->port_stats.lb_mode ==
+							nxge_lb_serdes10g) {
+					ESR_REG_WR(handle,
+						ESR_0_TEST_CONFIG_REG,
+						ESR_PAD_LOOPBACK_CH3 |
+						ESR_PAD_LOOPBACK_CH2 |
+						ESR_PAD_LOOPBACK_CH1 |
+						ESR_PAD_LOOPBACK_CH0);
+				} else {
+					ESR_REG_WR(handle,
+						ESR_0_TEST_CONFIG_REG, 0);
+				}
+			break;
+		case 1:
+			ESR_REG_WR(handle, ESR_1_CONTROL_REG,
+				ESR_CTL_EN_SYNCDET_0 | ESR_CTL_EN_SYNCDET_1 |
+				ESR_CTL_EN_SYNCDET_2 | ESR_CTL_EN_SYNCDET_3 |
+				(0x5 << ESR_CTL_OUT_EMPH_0_SHIFT) |
+				(0x5 << ESR_CTL_OUT_EMPH_1_SHIFT) |
+				(0x5 << ESR_CTL_OUT_EMPH_2_SHIFT) |
+				(0x5 << ESR_CTL_OUT_EMPH_3_SHIFT) |
+				(0x5 << ESR_CTL_OUT_EMPH_3_SHIFT) |
+				(0x1 << ESR_CTL_LOSADJ_0_SHIFT) |
+				(0x1 << ESR_CTL_LOSADJ_1_SHIFT) |
+				(0x1 << ESR_CTL_LOSADJ_2_SHIFT) |
+				(0x1 << ESR_CTL_LOSADJ_3_SHIFT));
+
+				/* Set Serdes1 Internal Loopback if necessary */
+				if (nxgep->statsp->port_stats.lb_mode ==
+							nxge_lb_serdes10g) {
+					ESR_REG_WR(handle,
+						ESR_1_TEST_CONFIG_REG,
+						ESR_PAD_LOOPBACK_CH3 |
+						ESR_PAD_LOOPBACK_CH2 |
+						ESR_PAD_LOOPBACK_CH1 |
+						ESR_PAD_LOOPBACK_CH0);
+				} else {
+					ESR_REG_WR(handle,
+						ESR_1_TEST_CONFIG_REG, 0);
+				}
+			break;
+		default:
+			/* Nothing to do here */
+			goto done;
+		}
+
+		/* init TX RX channels */
+		for (chan = 0; chan < 4; chan++) {
+			if ((status = nxge_mdio_read(nxgep, portn,
+					ESR_NEPTUNE_DEV_ADDR,
+					ESR_NEP_RX_TX_CONTROL_L_ADDR(chan),
+					&rx_tx_ctrl_l.value)) != NXGE_OK)
+				goto fail;
+			if ((status = nxge_mdio_read(nxgep, portn,
+					ESR_NEPTUNE_DEV_ADDR,
+					ESR_NEP_RX_TX_CONTROL_H_ADDR(chan),
+					&rx_tx_ctrl_h.value)) != NXGE_OK)
+				goto fail;
+			if ((status = nxge_mdio_read(nxgep, portn,
+					ESR_NEPTUNE_DEV_ADDR,
+					ESR_NEP_GLUE_CONTROL0_L_ADDR(chan),
+					&glue_ctrl0_l.value)) != NXGE_OK)
+				goto fail;
+			if ((status = nxge_mdio_read(nxgep, portn,
+					ESR_NEPTUNE_DEV_ADDR,
+					ESR_NEP_GLUE_CONTROL0_H_ADDR(chan),
+					&glue_ctrl0_h.value)) != NXGE_OK)
+				goto fail;
+			rx_tx_ctrl_l.bits.enstretch = 1;
+			rx_tx_ctrl_h.bits.vmuxlo = 2;
+			rx_tx_ctrl_h.bits.vpulselo = 2;
+			glue_ctrl0_l.bits.rxlosenable = 1;
+			glue_ctrl0_l.bits.samplerate = 0xF;
+			glue_ctrl0_l.bits.thresholdcount = 0xFF;
+			glue_ctrl0_h.bits.bitlocktime = BITLOCKTIME_300_CYCLES;
+			if ((status = nxge_mdio_write(nxgep, portn,
+					ESR_NEPTUNE_DEV_ADDR,
+					ESR_NEP_RX_TX_CONTROL_L_ADDR(chan),
+					rx_tx_ctrl_l.value)) != NXGE_OK)
+				goto fail;
+			if ((status = nxge_mdio_write(nxgep, portn,
+					ESR_NEPTUNE_DEV_ADDR,
+					ESR_NEP_RX_TX_CONTROL_H_ADDR(chan),
+					rx_tx_ctrl_h.value)) != NXGE_OK)
+				goto fail;
+			if ((status = nxge_mdio_write(nxgep, portn,
+					ESR_NEPTUNE_DEV_ADDR,
+					ESR_NEP_GLUE_CONTROL0_L_ADDR(chan),
+					glue_ctrl0_l.value)) != NXGE_OK)
+				goto fail;
+			if ((status = nxge_mdio_write(nxgep, portn,
+					ESR_NEPTUNE_DEV_ADDR,
+					ESR_NEP_GLUE_CONTROL0_H_ADDR(chan),
+					glue_ctrl0_h.value)) != NXGE_OK)
+				goto fail;
+		}
+
+		/* Apply Tx core reset */
+		if ((status = nxge_mdio_write(nxgep, portn,
+					ESR_NEPTUNE_DEV_ADDR,
+					ESR_NEP_RX_TX_RESET_CONTROL_L_ADDR(),
+					(uint16_t)0)) != NXGE_OK)
+			goto fail;
+
+		if ((status = nxge_mdio_write(nxgep, portn,
+					ESR_NEPTUNE_DEV_ADDR,
+					ESR_NEP_RX_TX_RESET_CONTROL_H_ADDR(),
+					(uint16_t)0xffff)) != NXGE_OK)
+			goto fail;
+
+		NXGE_DELAY(200);
+
+		/* Apply Rx core reset */
+		if ((status = nxge_mdio_write(nxgep, portn,
+					ESR_NEPTUNE_DEV_ADDR,
+					ESR_NEP_RX_TX_RESET_CONTROL_L_ADDR(),
+					(uint16_t)0xffff)) != NXGE_OK)
+			goto fail;
+
+		NXGE_DELAY(200);
+		if ((status = nxge_mdio_write(nxgep, portn,
+					ESR_NEPTUNE_DEV_ADDR,
+					ESR_NEP_RX_TX_RESET_CONTROL_H_ADDR(),
+					(uint16_t)0)) != NXGE_OK)
+			goto fail;
+
+		NXGE_DELAY(200);
+		if ((status = nxge_mdio_read(nxgep, portn,
+					ESR_NEPTUNE_DEV_ADDR,
+					ESR_NEP_RX_TX_RESET_CONTROL_L_ADDR(),
+					&val16l)) != NXGE_OK)
+			goto fail;
+		if ((status = nxge_mdio_read(nxgep, portn,
+					ESR_NEPTUNE_DEV_ADDR,
+					ESR_NEP_RX_TX_RESET_CONTROL_H_ADDR(),
+					&val16h)) != NXGE_OK)
+			goto fail;
+		if ((val16l != 0) || (val16h != 0)) {
+			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+					"Failed to reset port<%d> XAUI Serdes",
+					portn));
+		}
+
+		ESR_REG_RD(handle, ESR_INTERNAL_SIGNALS_REG, &val);
+
+		if (portn == 0) {
+			if ((val & ESR_SIG_P0_BITS_MASK) !=
+				(ESR_SIG_SERDES_RDY0_P0 | ESR_SIG_DETECT0_P0 |
+					ESR_SIG_XSERDES_RDY_P0 |
+					ESR_SIG_XDETECT_P0_CH3 |
+					ESR_SIG_XDETECT_P0_CH2 |
+					ESR_SIG_XDETECT_P0_CH1 |
+					ESR_SIG_XDETECT_P0_CH0)) {
+				goto fail;
+			}
+		} else if (portn == 1) {
+			if ((val & ESR_SIG_P1_BITS_MASK) !=
+				(ESR_SIG_SERDES_RDY0_P1 | ESR_SIG_DETECT0_P1 |
+					ESR_SIG_XSERDES_RDY_P1 |
+					ESR_SIG_XDETECT_P1_CH3 |
+					ESR_SIG_XDETECT_P1_CH2 |
+					ESR_SIG_XDETECT_P1_CH1 |
+					ESR_SIG_XDETECT_P1_CH0)) {
+				goto fail;
+			}
+		}
+
+	} else if (portmode == PORT_1G_FIBER) {
+		ESR_REG_RD(handle, ESR_1_PLL_CONFIG_REG, &val)
+		val &= ~ESR_PLL_CFG_FBDIV_2;
+		switch (portn) {
+		case 0:
+			val |= ESR_PLL_CFG_HALF_RATE_0;
+			break;
+		case 1:
+			val |= ESR_PLL_CFG_HALF_RATE_1;
+			break;
+		case 2:
+			val |= ESR_PLL_CFG_HALF_RATE_2;
+			break;
+		case 3:
+			val |= ESR_PLL_CFG_HALF_RATE_3;
+			break;
+		default:
+			goto fail;
+		}
+
+		ESR_REG_WR(handle, ESR_1_PLL_CONFIG_REG, val);
+	}
+
+done:
+	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "<== nxge_neptune_serdes_init port<%d>",
+			portn));
+	return (NXGE_OK);
+fail:
+	NXGE_DEBUG_MSG((nxgep, TX_CTL,
+			"nxge_neptune_serdes_init: "
+			"Failed to initialize Neptune serdes for port<%d>",
+			portn));
+
+	return (status);
+}
+
+/* Look for transceiver type */
+
+nxge_status_t
+nxge_xcvr_find(p_nxge_t nxgep)
+{
+	uint8_t		portn;
+
+	portn = nxgep->mac.portnum;
+	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "==> nxge_xcvr_find: port<%d>", portn));
+
+	if (nxge_get_xcvr_type(nxgep) != NXGE_OK)
+		return (NXGE_ERROR);
+
+	nxgep->mac.linkchkmode = LINKCHK_TIMER;
+	if (nxgep->mac.portmode == PORT_10G_FIBER) {
+		nxgep->statsp->mac_stats.xcvr_inuse = PCS_XCVR;
+		if ((nxgep->niu_type == NEPTUNE) ||
+			(nxgep->niu_type == NEPTUNE_2)) {
+			nxgep->statsp->mac_stats.xcvr_portn =
+					BCM8704_NEPTUNE_PORT_ADDR_BASE + portn;
+		} else if (nxgep->niu_type == N2_NIU) {
+			nxgep->statsp->mac_stats.xcvr_portn =
+					BCM8704_N2_PORT_ADDR_BASE + portn;
+		} else
+			return (NXGE_ERROR);
+	} else if (nxgep->mac.portmode == PORT_1G_COPPER) {
+		nxgep->statsp->mac_stats.xcvr_inuse = INT_MII_XCVR;
+		/*
+		 * For Altas, Xcvr port numbers are swapped with ethernet
+		 * port number. This is designed for better signal
+		 * integrity in routing.
+		 */
+
+		switch (portn) {
+		case 0:
+			nxgep->statsp->mac_stats.xcvr_portn =
+					BCM5464_NEPTUNE_PORT_ADDR_BASE + 3;
+			break;
+		case 1:
+			nxgep->statsp->mac_stats.xcvr_portn =
+					BCM5464_NEPTUNE_PORT_ADDR_BASE + 2;
+			break;
+		case 2:
+			nxgep->statsp->mac_stats.xcvr_portn =
+					BCM5464_NEPTUNE_PORT_ADDR_BASE + 1;
+			break;
+		case 3:
+			nxgep->statsp->mac_stats.xcvr_portn =
+					BCM5464_NEPTUNE_PORT_ADDR_BASE;
+			break;
+		default:
+			return (NXGE_ERROR);
+		}
+	} else if (nxgep->mac.portmode == PORT_1G_FIBER) {
+		nxgep->statsp->mac_stats.xcvr_inuse = PCS_XCVR;
+		nxgep->statsp->mac_stats.xcvr_portn = portn;
+	} else {
+		return (NXGE_ERROR);
+	}
+
+	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "<== nxge_xcvr_find: xcvr_inuse = %d",
+					nxgep->statsp->mac_stats.xcvr_inuse));
+	return (NXGE_OK);
+}
+
+/* Initialize transceiver */
+
+nxge_status_t
+nxge_xcvr_init(p_nxge_t nxgep)
+{
+	p_nxge_param_t		param_arr;
+	p_nxge_stats_t		statsp;
+	uint16_t		val;
+#ifdef	NXGE_DEBUG
+	uint8_t			portn;
+	uint16_t		val1;
+#endif
+	uint8_t			phy_port_addr;
+	pmd_tx_control_t	tx_ctl;
+	control_t		ctl;
+	phyxs_control_t		phyxs_ctl;
+	pcs_control_t		pcs_ctl;
+	uint32_t		delay = 0;
+	optics_dcntr_t		op_ctr;
+	nxge_status_t		status = NXGE_OK;
+#ifdef	NXGE_DEBUG
+	portn = nxgep->mac.portnum;
+#endif
+	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "==> nxge_xcvr_init: port<%d>", portn));
+
+	param_arr = nxgep->param_arr;
+	statsp = nxgep->statsp;
+
+	/*
+	 * Initialize the xcvr statistics.
+	 */
+	statsp->mac_stats.cap_autoneg = 0;
+	statsp->mac_stats.cap_100T4 = 0;
+	statsp->mac_stats.cap_100fdx = 0;
+	statsp->mac_stats.cap_100hdx = 0;
+	statsp->mac_stats.cap_10fdx = 0;
+	statsp->mac_stats.cap_10hdx = 0;
+	statsp->mac_stats.cap_asmpause = 0;
+	statsp->mac_stats.cap_pause = 0;
+	statsp->mac_stats.cap_1000fdx = 0;
+	statsp->mac_stats.cap_1000hdx = 0;
+	statsp->mac_stats.cap_10gfdx = 0;
+	statsp->mac_stats.cap_10ghdx = 0;
+
+	/*
+	 * Initialize the link statistics.
+	 */
+	statsp->mac_stats.link_T4 = 0;
+	statsp->mac_stats.link_asmpause = 0;
+	statsp->mac_stats.link_pause = 0;
+
+	phy_port_addr = nxgep->statsp->mac_stats.xcvr_portn;
+
+	switch (nxgep->mac.portmode) {
+	case PORT_10G_FIBER:
+		/* Disable Link LEDs */
+		if (nxge_10g_link_led_off(nxgep) != NXGE_OK)
+			goto fail;
+
+		/* Set Clause 45 */
+		npi_mac_mif_set_indirect_mode(nxgep->npi_handle, B_TRUE);
+
+		/* Reset the transceiver */
+		if ((status = nxge_mdio_read(nxgep,
+				phy_port_addr,
+				BCM8704_PHYXS_ADDR,
+				BCM8704_PHYXS_CONTROL_REG,
+				&phyxs_ctl.value)) != NXGE_OK)
+			goto fail;
+
+		phyxs_ctl.bits.reset = 1;
+		if ((status = nxge_mdio_write(nxgep,
+				phy_port_addr,
+				BCM8704_PHYXS_ADDR,
+				BCM8704_PHYXS_CONTROL_REG,
+				phyxs_ctl.value)) != NXGE_OK)
+			goto fail;
+
+		do {
+			drv_usecwait(500);
+			if ((status = nxge_mdio_read(nxgep,
+					phy_port_addr,
+					BCM8704_PHYXS_ADDR,
+					BCM8704_PHYXS_CONTROL_REG,
+					&phyxs_ctl.value)) != NXGE_OK)
+				goto fail;
+			delay++;
+		} while ((phyxs_ctl.bits.reset) && (delay < 100));
+		if (delay == 100) {
+			NXGE_DEBUG_MSG((nxgep, MAC_CTL,
+				"nxge_xcvr_init: "
+				"failed to reset Transceiver on port<%d>",
+				portn));
+			status = NXGE_ERROR;
+			goto fail;
+		}
+
+		/* Set to 0x7FBF */
+		ctl.value = 0;
+		ctl.bits.res1 = 0x3F;
+		ctl.bits.optxon_lvl = 1;
+		ctl.bits.oprxflt_lvl = 1;
+		ctl.bits.optrxlos_lvl = 1;
+		ctl.bits.optxflt_lvl = 1;
+		ctl.bits.opprflt_lvl = 1;
+		ctl.bits.obtmpflt_lvl = 1;
+		ctl.bits.opbiasflt_lvl = 1;
+		ctl.bits.optxrst_lvl = 1;
+		if ((status = nxge_mdio_write(nxgep,
+				phy_port_addr,
+				BCM8704_USER_DEV3_ADDR,
+				BCM8704_USER_CONTROL_REG, ctl.value))
+				!= NXGE_OK)
+			goto fail;
+
+		/* Set to 0x164 */
+		tx_ctl.value = 0;
+		tx_ctl.bits.tsck_lpwren = 1;
+		tx_ctl.bits.tx_dac_txck = 0x2;
+		tx_ctl.bits.tx_dac_txd = 0x1;
+		tx_ctl.bits.xfp_clken = 1;
+		if ((status = nxge_mdio_write(nxgep,
+				phy_port_addr,
+				BCM8704_USER_DEV3_ADDR,
+				BCM8704_USER_PMD_TX_CONTROL_REG, tx_ctl.value))
+				!= NXGE_OK)
+			goto fail;
+		/*
+		 * According to Broadcom's instruction, SW needs to read
+		 * back these registers twice after written.
+		 */
+		if ((status = nxge_mdio_read(nxgep,
+				phy_port_addr,
+				BCM8704_USER_DEV3_ADDR,
+				BCM8704_USER_CONTROL_REG, &val))
+				!= NXGE_OK)
+			goto fail;
+
+		if ((status = nxge_mdio_read(nxgep,
+				phy_port_addr,
+				BCM8704_USER_DEV3_ADDR,
+				BCM8704_USER_CONTROL_REG, &val))
+				!= NXGE_OK)
+			goto fail;
+
+		if ((status = nxge_mdio_read(nxgep,
+				phy_port_addr,
+				BCM8704_USER_DEV3_ADDR,
+				BCM8704_USER_PMD_TX_CONTROL_REG, &val))
+				!= NXGE_OK)
+			goto fail;
+
+		if ((status = nxge_mdio_read(nxgep,
+				phy_port_addr,
+				BCM8704_USER_DEV3_ADDR,
+				BCM8704_USER_PMD_TX_CONTROL_REG, &val))
+				!= NXGE_OK)
+			goto fail;
+
+
+		/* Enable Tx and Rx LEDs to be driven by traffic */
+		if ((status = nxge_mdio_read(nxgep,
+					phy_port_addr,
+					BCM8704_USER_DEV3_ADDR,
+					BCM8704_USER_OPTICS_DIGITAL_CTRL_REG,
+					&op_ctr.value)) != NXGE_OK)
+			goto fail;
+		op_ctr.bits.gpio_sel = 0x3;
+		if ((status = nxge_mdio_write(nxgep,
+					phy_port_addr,
+					BCM8704_USER_DEV3_ADDR,
+					BCM8704_USER_OPTICS_DIGITAL_CTRL_REG,
+					op_ctr.value)) != NXGE_OK)
+			goto fail;
+
+		NXGE_DELAY(1000000);
+
+		/* Set BCM8704 Internal Loopback mode if necessary */
+		if ((status = nxge_mdio_read(nxgep,
+					phy_port_addr,
+					BCM8704_PCS_DEV_ADDR,
+					BCM8704_PCS_CONTROL_REG,
+					&pcs_ctl.value)) != NXGE_OK)
+			goto fail;
+		if (nxgep->statsp->port_stats.lb_mode == nxge_lb_phy10g)
+			pcs_ctl.bits.loopback = 1;
+		else
+			pcs_ctl.bits.loopback = 0;
+		if ((status = nxge_mdio_write(nxgep,
+					phy_port_addr,
+					BCM8704_PCS_DEV_ADDR,
+					BCM8704_PCS_CONTROL_REG,
+					pcs_ctl.value)) != NXGE_OK)
+			goto fail;
+
+		status = nxge_mdio_read(nxgep, phy_port_addr,
+				0x1, 0xA, &val);
+		if (status != NXGE_OK)
+			goto fail;
+		NXGE_DEBUG_MSG((nxgep, MAC_CTL,
+				"BCM8704 port<%d> Dev 1 Reg 0xA = 0x%x\n",
+				portn, val));
+		status = nxge_mdio_read(nxgep, phy_port_addr, 0x3, 0x20, &val);
+		if (status != NXGE_OK)
+			goto fail;
+		NXGE_DEBUG_MSG((nxgep, MAC_CTL,
+				"BCM8704 port<%d> Dev 3 Reg 0x20 = 0x%x\n",
+				portn, val));
+		status = nxge_mdio_read(nxgep, phy_port_addr, 0x4, 0x18, &val);
+		if (status != NXGE_OK)
+			goto fail;
+		NXGE_DEBUG_MSG((nxgep, MAC_CTL,
+				"BCM8704 port<%d> Dev 4 Reg 0x18 = 0x%x\n",
+				portn, val));
+
+#ifdef	NXGE_DEBUG
+		/* Diagnose link issue if link is not up */
+		status = nxge_mdio_read(nxgep, phy_port_addr,
+					BCM8704_USER_DEV3_ADDR,
+					BCM8704_USER_ANALOG_STATUS0_REG,
+					&val);
+		if (status != NXGE_OK)
+			goto fail;
+
+		status = nxge_mdio_read(nxgep, phy_port_addr,
+					BCM8704_USER_DEV3_ADDR,
+					BCM8704_USER_ANALOG_STATUS0_REG,
+					&val);
+		if (status != NXGE_OK)
+			goto fail;
+
+		status = nxge_mdio_read(nxgep, phy_port_addr,
+					BCM8704_USER_DEV3_ADDR,
+					BCM8704_USER_TX_ALARM_STATUS_REG,
+					&val1);
+		if (status != NXGE_OK)
+			goto fail;
+
+		status = nxge_mdio_read(nxgep, phy_port_addr,
+					BCM8704_USER_DEV3_ADDR,
+					BCM8704_USER_TX_ALARM_STATUS_REG,
+					&val1);
+		if (status != NXGE_OK)
+			goto fail;
+
+		if (val != 0x3FC) {
+			if ((val == 0x43BC) && (val1 != 0)) {
+				NXGE_DEBUG_MSG((nxgep, MAC_CTL,
+					"Cable not connected to peer or bad"
+					" cable on port<%d>\n", portn));
+			} else if (val == 0x639C) {
+				NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+					"Optical module (XFP) is bad or absence"
+					" on port<%d>\n", portn));
+			}
+		}
+#endif
+
+		statsp->mac_stats.cap_10gfdx = 1;
+		statsp->mac_stats.lp_cap_10gfdx = 1;
+		break;
+	case PORT_10G_COPPER:
+		break;
+	case PORT_1G_FIBER:
+	case PORT_1G_COPPER:
+		/* Set Clause 22 */
+		npi_mac_mif_set_indirect_mode(nxgep->npi_handle, B_FALSE);
+
+		/* Set capability flags */
+		statsp->mac_stats.cap_1000fdx =
+					param_arr[param_anar_1000fdx].value;
+		statsp->mac_stats.cap_100fdx =
+					param_arr[param_anar_100fdx].value;
+		statsp->mac_stats.cap_10fdx = param_arr[param_anar_10fdx].value;
+
+		if ((status = nxge_mii_xcvr_init(nxgep)) != NXGE_OK)
+			goto fail;
+		break;
+	default:
+		goto fail;
+	}
+
+	statsp->mac_stats.xcvr_inits++;
+
+	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "==> nxge_xcvr_init: port<%d>", portn));
+	return (NXGE_OK);
+
+fail:
+	NXGE_DEBUG_MSG((nxgep, MAC_CTL,
+		"nxge_xcvr_init: failed to initialize transceiver for port<%d>",
+		portn));
+	return (status);
+}
+
+
+/* Initialize the TxMAC sub-block */
+
+nxge_status_t
+nxge_tx_mac_init(p_nxge_t nxgep)
+{
+	npi_attr_t		ap;
+	uint8_t			portn;
+	nxge_port_mode_t	portmode;
+	nxge_port_t		portt;
+	npi_handle_t		handle;
+	npi_status_t		rs = NPI_SUCCESS;
+
+	portn = NXGE_GET_PORT_NUM(nxgep->function_num);
+	portt    = nxgep->mac.porttype;
+	handle   = nxgep->npi_handle;
+	portmode = nxgep->mac.portmode;
+
+	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "==> nxge_tx_mac_init: port<%d>",
+			portn));
+
+	/* Set Max and Min Frame Size */
+	if (nxgep->param_arr[param_accept_jumbo].value || nxge_jumbo_enable) {
+		SET_MAC_ATTR2(handle, ap, portn,
+		    MAC_PORT_FRAME_SIZE, 64, 0x2400, rs);
+	} else {
+		SET_MAC_ATTR2(handle, ap, portn,
+		    MAC_PORT_FRAME_SIZE, 64, 0x5EE + 4, rs);
+	}
+
+	if (rs != NPI_SUCCESS)
+		goto fail;
+	if (nxgep->param_arr[param_accept_jumbo].value ||
+		nxgep->mac.is_jumbo == B_TRUE)
+		nxgep->mac.maxframesize = 0x2400;
+	else
+		nxgep->mac.maxframesize = 0x5EE + 4;
+	nxgep->mac.minframesize = 64;
+
+	if (portt == PORT_TYPE_XMAC) {
+		if ((rs = npi_xmac_tx_iconfig(handle, INIT, portn,
+				0)) != NPI_SUCCESS)
+			goto fail;
+		nxgep->mac.tx_iconfig = NXGE_XMAC_TX_INTRS;
+		if ((portmode == PORT_10G_FIBER) ||
+					(portmode == PORT_10G_COPPER)) {
+			SET_MAC_ATTR1(handle, ap, portn, XMAC_10G_PORT_IPG,
+					XGMII_IPG_12_15, rs);
+			if (rs != NPI_SUCCESS)
+				goto fail;
+			nxgep->mac.ipg[0] = XGMII_IPG_12_15;
+		} else {
+			SET_MAC_ATTR1(handle, ap, portn, XMAC_PORT_IPG,
+					MII_GMII_IPG_12, rs);
+			if (rs != NPI_SUCCESS)
+				goto fail;
+			nxgep->mac.ipg[0] = MII_GMII_IPG_12;
+		}
+		if ((rs = npi_xmac_tx_config(handle, INIT, portn,
+				CFG_XMAC_TX_CRC | CFG_XMAC_TX)) != NPI_SUCCESS)
+			goto fail;
+		nxgep->mac.tx_config = CFG_XMAC_TX_CRC | CFG_XMAC_TX;
+		nxgep->mac.maxburstsize = 0;	/* not programmable */
+		nxgep->mac.ctrltype = 0;	/* not programmable */
+		nxgep->mac.pa_size = 0;		/* not programmable */
+
+		if ((rs = npi_xmac_zap_tx_counters(handle, portn))
+							!= NPI_SUCCESS)
+			goto fail;
+
+	} else {
+		if ((rs = npi_bmac_tx_iconfig(handle, INIT, portn,
+				0)) != NPI_SUCCESS)
+			goto fail;
+		nxgep->mac.tx_iconfig = NXGE_BMAC_TX_INTRS;
+
+		SET_MAC_ATTR1(handle, ap, portn, BMAC_PORT_CTRL_TYPE, 0x8808,
+				rs);
+		if (rs != NPI_SUCCESS)
+			goto fail;
+		nxgep->mac.ctrltype = 0x8808;
+
+		SET_MAC_ATTR1(handle, ap, portn, BMAC_PORT_PA_SIZE, 0x7, rs);
+		if (rs != NPI_SUCCESS)
+			goto fail;
+		nxgep->mac.pa_size = 0x7;
+
+		if ((rs = npi_bmac_tx_config(handle, INIT, portn,
+				CFG_BMAC_TX_CRC | CFG_BMAC_TX)) != NPI_SUCCESS)
+			goto fail;
+		nxgep->mac.tx_config = CFG_BMAC_TX_CRC | CFG_BMAC_TX;
+	}
+
+	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "<== nxge_tx_mac_init: port<%d>",
+			portn));
+
+	return (NXGE_OK);
+fail:
+	NXGE_DEBUG_MSG((nxgep, MAC_CTL,
+		"nxge_tx_mac_init: failed to initialize port<%d> TXMAC",
+					portn));
+
+	return (NXGE_ERROR | rs);
+}
+
+/* Initialize the RxMAC sub-block */
+
+nxge_status_t
+nxge_rx_mac_init(p_nxge_t nxgep)
+{
+	npi_attr_t		ap;
+	uint32_t		i;
+	uint16_t		hashtab_e;
+	p_hash_filter_t		hash_filter;
+	nxge_port_t		portt;
+	uint8_t			portn;
+	npi_handle_t		handle;
+	npi_status_t		rs = NPI_SUCCESS;
+	uint16_t 		*addr16p;
+	uint16_t 		addr0, addr1, addr2;
+	xmac_rx_config_t	xconfig;
+	bmac_rx_config_t	bconfig;
+
+	portn = NXGE_GET_PORT_NUM(nxgep->function_num);
+
+	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "==> nxge_rx_mac_init: port<%d>\n",
+			portn));
+	handle = nxgep->npi_handle;
+	portt = nxgep->mac.porttype;
+
+	addr16p = (uint16_t *)nxgep->ouraddr.ether_addr_octet;
+	addr0 = ntohs(addr16p[2]);
+	addr1 = ntohs(addr16p[1]);
+	addr2 = ntohs(addr16p[0]);
+	SET_MAC_ATTR3(handle, ap, portn, MAC_PORT_ADDR, addr0, addr1, addr2,
+		rs);
+
+	if (rs != NPI_SUCCESS)
+		goto fail;
+	SET_MAC_ATTR3(handle, ap, portn, MAC_PORT_ADDR_FILTER, 0, 0, 0, rs);
+	if (rs != NPI_SUCCESS)
+		goto fail;
+	SET_MAC_ATTR2(handle, ap, portn, MAC_PORT_ADDR_FILTER_MASK, 0, 0, rs);
+	if (rs != NPI_SUCCESS)
+		goto fail;
+
+	/*
+	 * Load the multicast hash filter bits.
+	 */
+	hash_filter = nxgep->hash_filter;
+	for (i = 0; i < MAC_MAX_HASH_ENTRY; i++) {
+		if (hash_filter != NULL) {
+			hashtab_e = (uint16_t)hash_filter->hash_filter_regs[
+				(NMCFILTER_REGS - 1) - i];
+		} else {
+			hashtab_e = 0;
+		}
+
+		if ((rs = npi_mac_hashtab_entry(handle, OP_SET, portn, i,
+					(uint16_t *)&hashtab_e)) != NPI_SUCCESS)
+			goto fail;
+	}
+
+	if (portt == PORT_TYPE_XMAC) {
+		if ((rs = npi_xmac_rx_iconfig(handle, INIT, portn,
+				0)) != NPI_SUCCESS)
+			goto fail;
+		nxgep->mac.rx_iconfig = NXGE_XMAC_RX_INTRS;
+
+		(void) nxge_fflp_init_hostinfo(nxgep);
+
+		xconfig = CFG_XMAC_RX_ERRCHK | CFG_XMAC_RX_CRC_CHK |
+			CFG_XMAC_RX | CFG_XMAC_RX_CODE_VIO_CHK &
+			~CFG_XMAC_RX_STRIP_CRC;
+
+		if (nxgep->filter.all_phys_cnt != 0)
+			xconfig |= CFG_XMAC_RX_PROMISCUOUS;
+
+		if (nxgep->filter.all_multicast_cnt != 0)
+			xconfig |= CFG_XMAC_RX_PROMISCUOUSGROUP;
+
+		xconfig |= CFG_XMAC_RX_HASH_FILTER;
+
+		if ((rs = npi_xmac_rx_config(handle, INIT, portn,
+					xconfig)) != NPI_SUCCESS)
+			goto fail;
+		nxgep->mac.rx_config = xconfig;
+
+		/* Comparison of mac unique address is always enabled on XMAC */
+
+		if ((rs = npi_xmac_zap_rx_counters(handle, portn))
+							!= NPI_SUCCESS)
+			goto fail;
+	} else {
+		(void) nxge_fflp_init_hostinfo(nxgep);
+
+		if (npi_bmac_rx_iconfig(nxgep->npi_handle, INIT, portn,
+					0) != NPI_SUCCESS)
+			goto fail;
+		nxgep->mac.rx_iconfig = NXGE_BMAC_RX_INTRS;
+
+		bconfig = CFG_BMAC_RX_DISCARD_ON_ERR | CFG_BMAC_RX &
+			~CFG_BMAC_RX_STRIP_CRC;
+
+		if (nxgep->filter.all_phys_cnt != 0)
+			bconfig |= CFG_BMAC_RX_PROMISCUOUS;
+
+		if (nxgep->filter.all_multicast_cnt != 0)
+			bconfig |= CFG_BMAC_RX_PROMISCUOUSGROUP;
+
+		bconfig |= CFG_BMAC_RX_HASH_FILTER;
+		if ((rs = npi_bmac_rx_config(handle, INIT, portn,
+					bconfig)) != NPI_SUCCESS)
+			goto fail;
+		nxgep->mac.rx_config = bconfig;
+
+		/* Always enable comparison of mac unique address */
+		if ((rs = npi_mac_altaddr_enable(handle, portn, 0))
+					!= NPI_SUCCESS)
+			goto fail;
+	}
+
+	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "<== nxge_rx_mac_init: port<%d>\n",
+			portn));
+
+	return (NXGE_OK);
+
+fail:
+	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+		"nxge_rx_mac_init: Failed to Initialize port<%d> RxMAC",
+				portn));
+
+	return (NXGE_ERROR | rs);
+}
+
+/* Enable TXMAC */
+
+nxge_status_t
+nxge_tx_mac_enable(p_nxge_t nxgep)
+{
+	npi_handle_t	handle;
+	npi_status_t	rs = NPI_SUCCESS;
+	nxge_status_t	status = NXGE_OK;
+
+	handle = nxgep->npi_handle;
+
+	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "==> nxge_tx_mac_enable: port<%d>",
+			nxgep->mac.portnum));
+
+	if ((status = nxge_tx_mac_init(nxgep)) != NXGE_OK)
+		goto fail;
+
+	/* based on speed */
+	nxgep->msg_min = ETHERMIN;
+
+	if (nxgep->mac.porttype == PORT_TYPE_XMAC) {
+		if ((rs = npi_xmac_tx_config(handle, ENABLE, nxgep->mac.portnum,
+						CFG_XMAC_TX)) != NPI_SUCCESS)
+			goto fail;
+	} else {
+		if ((rs = npi_bmac_tx_config(handle, ENABLE, nxgep->mac.portnum,
+						CFG_BMAC_TX)) != NPI_SUCCESS)
+			goto fail;
+	}
+
+	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "<== nxge_tx_mac_enable: port<%d>",
+			nxgep->mac.portnum));
+
+	return (NXGE_OK);
+fail:
+	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"nxgep_tx_mac_enable: Failed to enable port<%d> TxMAC",
+			nxgep->mac.portnum));
+	if (rs != NPI_SUCCESS)
+		return (NXGE_ERROR | rs);
+	else
+		return (status);
+}
+
+/* Disable TXMAC */
+
+nxge_status_t
+nxge_tx_mac_disable(p_nxge_t nxgep)
+{
+	npi_handle_t	handle;
+	npi_status_t	rs = NPI_SUCCESS;
+
+	handle = nxgep->npi_handle;
+
+	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "==> nxge_tx_mac_disable: port<%d>",
+			nxgep->mac.portnum));
+
+	if (nxgep->mac.porttype == PORT_TYPE_XMAC) {
+		if ((rs = npi_xmac_tx_config(handle, DISABLE,
+			nxgep->mac.portnum, CFG_XMAC_TX)) != NPI_SUCCESS)
+			goto fail;
+	} else {
+		if ((rs = npi_bmac_tx_config(handle, DISABLE,
+			nxgep->mac.portnum, CFG_BMAC_TX)) != NPI_SUCCESS)
+			goto fail;
+	}
+
+	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "<== nxge_tx_mac_disable: port<%d>",
+			nxgep->mac.portnum));
+	return (NXGE_OK);
+fail:
+	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"nxge_tx_mac_disable: Failed to disable port<%d> TxMAC",
+			nxgep->mac.portnum));
+	return (NXGE_ERROR | rs);
+}
+
+/* Enable RXMAC */
+
+nxge_status_t
+nxge_rx_mac_enable(p_nxge_t nxgep)
+{
+	npi_handle_t	handle;
+	uint8_t 	portn;
+	npi_status_t	rs = NPI_SUCCESS;
+	nxge_status_t	status = NXGE_OK;
+
+	handle = nxgep->npi_handle;
+	portn = nxgep->mac.portnum;
+
+	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "==> nxge_rx_mac_enable: port<%d>",
+			portn));
+
+	if ((status = nxge_rx_mac_init(nxgep)) != NXGE_OK)
+		goto fail;
+
+	if (nxgep->mac.porttype == PORT_TYPE_XMAC) {
+		if ((rs = npi_xmac_rx_config(handle, ENABLE, portn,
+						CFG_XMAC_RX)) != NPI_SUCCESS)
+			goto fail;
+	} else {
+		if ((rs = npi_bmac_rx_config(handle, ENABLE, portn,
+						CFG_BMAC_RX)) != NPI_SUCCESS)
+			goto fail;
+	}
+
+	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "<== nxge_rx_mac_enable: port<%d>",
+			portn));
+
+	return (NXGE_OK);
+fail:
+	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"nxgep_rx_mac_enable: Failed to enable port<%d> RxMAC",
+			portn));
+
+	if (rs != NPI_SUCCESS)
+		return (NXGE_ERROR | rs);
+	else
+		return (status);
+}
+
+/* Disable RXMAC */
+
+nxge_status_t
+nxge_rx_mac_disable(p_nxge_t nxgep)
+{
+	npi_handle_t	handle;
+	uint8_t		portn;
+	npi_status_t	rs = NPI_SUCCESS;
+
+	handle = nxgep->npi_handle;
+	portn = nxgep->mac.portnum;
+
+	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "==> nxge_rx_mac_disable: port<%d>",
+			portn));
+
+	if (nxgep->mac.porttype == PORT_TYPE_XMAC) {
+		if ((rs = npi_xmac_rx_config(handle, DISABLE, portn,
+						CFG_XMAC_RX)) != NPI_SUCCESS)
+			goto fail;
+	} else {
+		if ((rs = npi_bmac_rx_config(handle, DISABLE, portn,
+						CFG_BMAC_RX)) != NPI_SUCCESS)
+			goto fail;
+	}
+
+	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "<== nxge_rx_mac_disable: port<%d>",
+			portn));
+	return (NXGE_OK);
+fail:
+	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"nxgep_rx_mac_disable: ",
+			"Failed to disable port<%d> RxMAC",
+			portn));
+
+	return (NXGE_ERROR | rs);
+}
+
+/* Reset TXMAC */
+
+nxge_status_t
+nxge_tx_mac_reset(p_nxge_t nxgep)
+{
+	npi_handle_t	handle;
+	uint8_t		portn;
+	npi_status_t	rs = NPI_SUCCESS;
+
+	handle = nxgep->npi_handle;
+	portn = nxgep->mac.portnum;
+
+	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "==> nxge_tx_mac_reset: port<%d>",
+			portn));
+
+	if (nxgep->mac.porttype == PORT_TYPE_XMAC) {
+		if ((rs = npi_xmac_reset(handle, portn, XTX_MAC_RESET_ALL))
+		    != NPI_SUCCESS)
+			goto fail;
+	} else {
+		if ((rs = npi_bmac_reset(handle, portn, TX_MAC_RESET))
+					!= NPI_SUCCESS)
+			goto fail;
+	}
+
+	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "<== nxge_tx_mac_reset: port<%d>",
+			portn));
+
+	return (NXGE_OK);
+fail:
+	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"nxge_tx_mac_reset: Failed to Reset TxMAC port<%d>",
+			portn));
+
+	return (NXGE_ERROR | rs);
+}
+
+/* Reset RXMAC */
+
+nxge_status_t
+nxge_rx_mac_reset(p_nxge_t nxgep)
+{
+	npi_handle_t	handle;
+	uint8_t		portn;
+	npi_status_t	rs = NPI_SUCCESS;
+
+	handle = nxgep->npi_handle;
+	portn = nxgep->mac.portnum;
+
+	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "==> nxge_rx_mac_reset: port<%d>",
+			portn));
+
+	if (nxgep->mac.porttype == PORT_TYPE_XMAC) {
+		if ((rs = npi_xmac_reset(handle, portn, XRX_MAC_RESET_ALL))
+		    != NPI_SUCCESS)
+		goto fail;
+	} else {
+		if ((rs = npi_bmac_reset(handle, portn, RX_MAC_RESET))
+					!= NPI_SUCCESS)
+		goto fail;
+	}
+
+	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "<== nxge_rx_mac_reset: port<%d>",
+			portn));
+
+	return (NXGE_OK);
+fail:
+	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"nxge_rx_mac_reset: Failed to Reset RxMAC port<%d>",
+			portn));
+	return (NXGE_ERROR | rs);
+}
+
+
+/* Enable/Disable MII Link Status change interrupt */
+
+nxge_status_t
+nxge_link_intr(p_nxge_t nxgep, link_intr_enable_t enable)
+{
+	uint8_t			portn;
+	nxge_port_mode_t	portmode;
+	npi_status_t		rs = NPI_SUCCESS;
+
+	portn = nxgep->mac.portnum;
+	portmode = nxgep->mac.portmode;
+
+	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "==> nxge_link_intr: port<%d>", portn));
+
+	if (enable == LINK_INTR_START) {
+		if (portmode == PORT_10G_FIBER) {
+			if ((rs = npi_xmac_xpcs_link_intr_enable(
+						nxgep->npi_handle,
+						portn)) != NPI_SUCCESS)
+				goto fail;
+		} else if (portmode == PORT_1G_FIBER) {
+			if ((rs = npi_mac_pcs_link_intr_enable(
+						nxgep->npi_handle,
+						portn)) != NPI_SUCCESS)
+				goto fail;
+		} else if (portmode == PORT_1G_COPPER) {
+			if ((rs = npi_mac_mif_link_intr_enable(
+				nxgep->npi_handle,
+				portn, MII_BMSR, BMSR_LSTATUS)) != NPI_SUCCESS)
+				goto fail;
+		} else
+			goto fail;
+	} else if (enable == LINK_INTR_STOP) {
+		if (portmode == PORT_10G_FIBER) {
+			if ((rs = npi_xmac_xpcs_link_intr_disable(
+						nxgep->npi_handle,
+						portn)) != NPI_SUCCESS)
+				goto fail;
+		} else  if (portmode == PORT_1G_FIBER) {
+			if ((rs = npi_mac_pcs_link_intr_disable(
+						nxgep->npi_handle,
+						portn)) != NPI_SUCCESS)
+				goto fail;
+		} else if (portmode == PORT_1G_COPPER) {
+			if ((rs = npi_mac_mif_link_intr_disable(
+						nxgep->npi_handle,
+						portn)) != NPI_SUCCESS)
+				goto fail;
+		} else
+			goto fail;
+	}
+
+	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "<== nxge_link_intr: port<%d>", portn));
+
+	return (NXGE_OK);
+fail:
+	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"nxge_link_intr: Failed to set port<%d> mif intr mode",
+			portn));
+
+	return (NXGE_ERROR | rs);
+}
+
+/* Initialize 1G Fiber / Copper transceiver using Clause 22 */
+
+nxge_status_t
+nxge_mii_xcvr_init(p_nxge_t nxgep)
+{
+	p_nxge_param_t	param_arr;
+	p_nxge_stats_t	statsp;
+	uint8_t		xcvr_portn;
+	p_mii_regs_t	mii_regs;
+	mii_bmcr_t	bmcr;
+	mii_bmsr_t	bmsr;
+	mii_anar_t	anar;
+	mii_gcr_t	gcr;
+	mii_esr_t	esr;
+	mii_aux_ctl_t	bcm5464r_aux;
+	int		status = NXGE_OK;
+
+	uint_t delay;
+
+	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "==> nxge_mii_xcvr_init"));
+
+	param_arr = nxgep->param_arr;
+	statsp = nxgep->statsp;
+	xcvr_portn = statsp->mac_stats.xcvr_portn;
+
+	mii_regs = NULL;
+
+	NXGE_DEBUG_MSG((nxgep, MAC_CTL,
+		"nxge_param_autoneg = 0x%02x", param_arr[param_autoneg].value));
+
+	/*
+	 * Reset the transceiver.
+	 */
+	delay = 0;
+	bmcr.value = 0;
+	bmcr.bits.reset = 1;
+	if ((status = nxge_mii_write(nxgep, xcvr_portn,
+		(uint8_t)(uint64_t)&mii_regs->bmcr, bmcr.value)) != NXGE_OK)
+		goto fail;
+	do {
+		drv_usecwait(500);
+		if ((status = nxge_mii_read(nxgep, xcvr_portn,
+			(uint8_t)(uint64_t)&mii_regs->bmcr, &bmcr.value))
+				!= NXGE_OK)
+			goto fail;
+		delay++;
+	} while ((bmcr.bits.reset) && (delay < 1000));
+	if (delay == 1000) {
+		NXGE_DEBUG_MSG((nxgep, MAC_CTL, "Xcvr reset failed."));
+		goto fail;
+	}
+
+	if ((status = nxge_mii_read(nxgep, xcvr_portn,
+			(uint8_t)(uint64_t)(&mii_regs->bmsr),
+			&bmsr.value)) != NXGE_OK)
+		goto fail;
+
+	param_arr[param_autoneg].value &= bmsr.bits.auto_neg_able;
+	param_arr[param_anar_100T4].value &= bmsr.bits.link_100T4;
+	param_arr[param_anar_100fdx].value &= bmsr.bits.link_100fdx;
+	param_arr[param_anar_100hdx].value = 0;
+	param_arr[param_anar_10fdx].value &= bmsr.bits.link_10fdx;
+	param_arr[param_anar_10hdx].value = 0;
+
+	/*
+	 * Initialize the xcvr statistics.
+	 */
+	statsp->mac_stats.cap_autoneg = bmsr.bits.auto_neg_able;
+	statsp->mac_stats.cap_100T4 = bmsr.bits.link_100T4;
+	statsp->mac_stats.cap_100fdx = bmsr.bits.link_100fdx;
+	statsp->mac_stats.cap_100hdx = 0;
+	statsp->mac_stats.cap_10fdx = bmsr.bits.link_10fdx;
+	statsp->mac_stats.cap_10hdx = 0;
+	statsp->mac_stats.cap_asmpause = param_arr[param_anar_asmpause].value;
+	statsp->mac_stats.cap_pause = param_arr[param_anar_pause].value;
+
+	/*
+	 * Initialise the xcvr advertised capability statistics.
+	 */
+	statsp->mac_stats.adv_cap_autoneg = param_arr[param_autoneg].value;
+	statsp->mac_stats.adv_cap_1000fdx = param_arr[param_anar_1000fdx].value;
+	statsp->mac_stats.adv_cap_1000hdx = param_arr[param_anar_1000hdx].value;
+	statsp->mac_stats.adv_cap_100T4 = param_arr[param_anar_100T4].value;
+	statsp->mac_stats.adv_cap_100fdx = param_arr[param_anar_100fdx].value;
+	statsp->mac_stats.adv_cap_100hdx = param_arr[param_anar_100hdx].value;
+	statsp->mac_stats.adv_cap_10fdx = param_arr[param_anar_10fdx].value;
+	statsp->mac_stats.adv_cap_10hdx = param_arr[param_anar_10hdx].value;
+	statsp->mac_stats.adv_cap_asmpause =
+					param_arr[param_anar_asmpause].value;
+	statsp->mac_stats.adv_cap_pause = param_arr[param_anar_pause].value;
+
+
+	/*
+	 * Check for extended status just in case we're
+	 * running a Gigibit phy.
+	 */
+	if (bmsr.bits.extend_status) {
+		if ((status = nxge_mii_read(nxgep, xcvr_portn,
+			(uint8_t)(uint64_t)(&mii_regs->esr), &esr.value))
+				!= NXGE_OK)
+			goto fail;
+		param_arr[param_anar_1000fdx].value &=
+					esr.bits.link_1000fdx;
+		param_arr[param_anar_1000hdx].value = 0;
+
+		statsp->mac_stats.cap_1000fdx =
+			(esr.bits.link_1000Xfdx ||
+				esr.bits.link_1000fdx);
+		statsp->mac_stats.cap_1000hdx = 0;
+	} else {
+		param_arr[param_anar_1000fdx].value = 0;
+		param_arr[param_anar_1000hdx].value = 0;
+	}
+
+	/*
+	 * Initialize 1G Statistics once the capability is established.
+	 */
+	statsp->mac_stats.adv_cap_1000fdx = param_arr[param_anar_1000fdx].value;
+	statsp->mac_stats.adv_cap_1000hdx = param_arr[param_anar_1000hdx].value;
+
+	/*
+	 * Initialise the link statistics.
+	 */
+	statsp->mac_stats.link_T4 = 0;
+	statsp->mac_stats.link_asmpause = 0;
+	statsp->mac_stats.link_pause = 0;
+	statsp->mac_stats.link_speed = 0;
+	statsp->mac_stats.link_duplex = 0;
+	statsp->mac_stats.link_up = 0;
+
+	/*
+	 * Switch off Auto-negotiation, 100M and full duplex.
+	 */
+	bmcr.value = 0;
+	if ((status = nxge_mii_write(nxgep, xcvr_portn,
+		(uint8_t)(uint64_t)(&mii_regs->bmcr), bmcr.value)) != NXGE_OK)
+		goto fail;
+
+	if ((statsp->port_stats.lb_mode == nxge_lb_phy) ||
+			(statsp->port_stats.lb_mode == nxge_lb_phy1000)) {
+		bmcr.bits.loopback = 1;
+		bmcr.bits.enable_autoneg = 0;
+		if (statsp->port_stats.lb_mode == nxge_lb_phy1000)
+			bmcr.bits.speed_1000_sel = 1;
+		bmcr.bits.duplex_mode = 1;
+		param_arr[param_autoneg].value = 0;
+	} else {
+		bmcr.bits.loopback = 0;
+	}
+
+	if ((statsp->port_stats.lb_mode == nxge_lb_ext1000) ||
+		(statsp->port_stats.lb_mode == nxge_lb_ext100) ||
+		(statsp->port_stats.lb_mode == nxge_lb_ext10)) {
+		param_arr[param_autoneg].value = 0;
+		bcm5464r_aux.value = 0;
+		bcm5464r_aux.bits.ext_lb = 1;
+		bcm5464r_aux.bits.write_1 = 1;
+		if ((status = nxge_mii_write(nxgep, xcvr_portn,
+				BCM5464R_AUX_CTL, bcm5464r_aux.value))
+				!= NXGE_OK)
+			goto fail;
+	}
+
+	if (param_arr[param_autoneg].value) {
+		NXGE_DEBUG_MSG((nxgep, MAC_CTL,
+				"Restarting Auto-negotiation."));
+		/*
+		 * Setup our Auto-negotiation advertisement register.
+		 */
+		anar.value = 0;
+		anar.bits.selector = 1;
+		anar.bits.cap_100T4 = param_arr[param_anar_100T4].value;
+		anar.bits.cap_100fdx = param_arr[param_anar_100fdx].value;
+		anar.bits.cap_100hdx = param_arr[param_anar_100hdx].value;
+		anar.bits.cap_10fdx = param_arr[param_anar_10fdx].value;
+		anar.bits.cap_10hdx = param_arr[param_anar_10hdx].value;
+		anar.bits.cap_asmpause = 0;
+		anar.bits.cap_pause = 0;
+		if (param_arr[param_anar_1000fdx].value ||
+			param_arr[param_anar_100fdx].value ||
+			param_arr[param_anar_10fdx].value) {
+			anar.bits.cap_asmpause = statsp->mac_stats.cap_asmpause;
+			anar.bits.cap_pause = statsp->mac_stats.cap_pause;
+		}
+
+		if ((status = nxge_mii_write(nxgep, xcvr_portn,
+			(uint8_t)(uint64_t)(&mii_regs->anar), anar.value))
+				!= NXGE_OK)
+			goto fail;
+		if (bmsr.bits.extend_status) {
+			gcr.value = 0;
+			gcr.bits.ms_mode_en =
+				param_arr[param_master_cfg_enable].value;
+			gcr.bits.master =
+				param_arr[param_master_cfg_value].value;
+			gcr.bits.link_1000fdx =
+				param_arr[param_anar_1000fdx].value;
+			gcr.bits.link_1000hdx =
+				param_arr[param_anar_1000hdx].value;
+			if ((status = nxge_mii_write(nxgep, xcvr_portn,
+				(uint8_t)(uint64_t)(&mii_regs->gcr), gcr.value))
+				!= NXGE_OK)
+				goto fail;
+		}
+
+		bmcr.bits.enable_autoneg = 1;
+		bmcr.bits.restart_autoneg = 1;
+
+	} else {
+		NXGE_DEBUG_MSG((nxgep, MAC_CTL, "Going into forced mode."));
+		bmcr.bits.speed_1000_sel =
+			param_arr[param_anar_1000fdx].value |
+				param_arr[param_anar_1000hdx].value;
+		bmcr.bits.speed_sel = (~bmcr.bits.speed_1000_sel) &
+			(param_arr[param_anar_100fdx].value |
+				param_arr[param_anar_100hdx].value);
+		if (bmcr.bits.speed_1000_sel) {
+			statsp->mac_stats.link_speed = 1000;
+			gcr.value = 0;
+			gcr.bits.ms_mode_en =
+				param_arr[param_master_cfg_enable].value;
+			gcr.bits.master =
+				param_arr[param_master_cfg_value].value;
+			if ((status = nxge_mii_write(nxgep, xcvr_portn,
+				(uint8_t)(uint64_t)(&mii_regs->gcr),
+				gcr.value))
+				!= NXGE_OK)
+				goto fail;
+			if (param_arr[param_anar_1000fdx].value) {
+				bmcr.bits.duplex_mode = 1;
+				statsp->mac_stats.link_duplex = 2;
+			} else
+				statsp->mac_stats.link_duplex = 1;
+		} else if (bmcr.bits.speed_sel) {
+			statsp->mac_stats.link_speed = 100;
+			if (param_arr[param_anar_100fdx].value) {
+				bmcr.bits.duplex_mode = 1;
+				statsp->mac_stats.link_duplex = 2;
+			} else
+				statsp->mac_stats.link_duplex = 1;
+		} else {
+			statsp->mac_stats.link_speed = 10;
+			if (param_arr[param_anar_10fdx].value) {
+				bmcr.bits.duplex_mode = 1;
+				statsp->mac_stats.link_duplex = 2;
+			} else
+				statsp->mac_stats.link_duplex = 1;
+		}
+		if (statsp->mac_stats.link_duplex != 1) {
+			statsp->mac_stats.link_asmpause =
+						statsp->mac_stats.cap_asmpause;
+			statsp->mac_stats.link_pause =
+						statsp->mac_stats.cap_pause;
+		}
+
+		if ((statsp->port_stats.lb_mode == nxge_lb_ext1000) ||
+			(statsp->port_stats.lb_mode == nxge_lb_ext100) ||
+			(statsp->port_stats.lb_mode == nxge_lb_ext10)) {
+			if (statsp->port_stats.lb_mode == nxge_lb_ext1000) {
+				/* BCM5464R 1000mbps external loopback mode */
+				gcr.value = 0;
+				gcr.bits.ms_mode_en = 1;
+				gcr.bits.master = 1;
+				if ((status = nxge_mii_write(nxgep, xcvr_portn,
+					(uint8_t)(uint64_t)(&mii_regs->gcr),
+					gcr.value))
+					!= NXGE_OK)
+					goto fail;
+				bmcr.value = 0;
+				bmcr.bits.speed_1000_sel = 1;
+				statsp->mac_stats.link_speed = 1000;
+			} else if (statsp->port_stats.lb_mode
+			    == nxge_lb_ext100) {
+				/* BCM5464R 100mbps external loopback mode */
+				bmcr.value = 0;
+				bmcr.bits.speed_sel = 1;
+				bmcr.bits.duplex_mode = 1;
+				statsp->mac_stats.link_speed = 100;
+			} else if (statsp->port_stats.lb_mode
+			    == nxge_lb_ext10) {
+				/* BCM5464R 10mbps external loopback mode */
+				bmcr.value = 0;
+				bmcr.bits.duplex_mode = 1;
+				statsp->mac_stats.link_speed = 10;
+			}
+		}
+	}
+
+	if ((status = nxge_mii_write(nxgep, xcvr_portn,
+			(uint8_t)(uint64_t)(&mii_regs->bmcr),
+			bmcr.value)) != NXGE_OK)
+		goto fail;
+
+	if ((status = nxge_mii_read(nxgep, xcvr_portn,
+		(uint8_t)(uint64_t)(&mii_regs->bmcr), &bmcr.value)) != NXGE_OK)
+		goto fail;
+	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "bmcr = 0x%04X", bmcr.value));
+
+	/*
+	 * Initialize the xcvr status kept in the context structure.
+	 */
+	nxgep->soft_bmsr.value = 0;
+
+	if ((status = nxge_mii_read(nxgep, xcvr_portn,
+		(uint8_t)(uint64_t)(&mii_regs->bmsr),
+			&nxgep->bmsr.value)) != NXGE_OK)
+		goto fail;
+
+	statsp->mac_stats.xcvr_inits++;
+	nxgep->bmsr.value = 0;
+
+fail:
+	NXGE_DEBUG_MSG((nxgep, MAC_CTL,
+			"<== nxge_mii_xcvr_init status 0x%x", status));
+	return (status);
+}
+
+/* Read from a MII compliant register */
+
+nxge_status_t
+nxge_mii_read(p_nxge_t nxgep, uint8_t xcvr_portn, uint8_t xcvr_reg,
+		uint16_t *value)
+{
+	npi_status_t rs = NPI_SUCCESS;
+
+	NXGE_DEBUG_MSG((nxgep, MIF_CTL, "==> nxge_mii_read: xcvr_port<%d>"
+			"xcvr_reg<%d>", xcvr_portn, xcvr_reg));
+
+	MUTEX_ENTER(&nxge_mii_lock);
+
+	if (nxgep->mac.portmode == PORT_1G_COPPER) {
+		if ((rs = npi_mac_mif_mii_read(nxgep->npi_handle,
+				xcvr_portn, xcvr_reg, value)) != NPI_SUCCESS)
+			goto fail;
+	} else if (nxgep->mac.portmode == PORT_1G_FIBER) {
+		if ((rs = npi_mac_pcs_mii_read(nxgep->npi_handle,
+				xcvr_portn, xcvr_reg, value)) != NPI_SUCCESS)
+			goto fail;
+	} else
+		goto fail;
+
+	MUTEX_EXIT(&nxge_mii_lock);
+
+	NXGE_DEBUG_MSG((nxgep, MIF_CTL, "<== nxge_mii_read: xcvr_port<%d>"
+			"xcvr_reg<%d> value=0x%x",
+			xcvr_portn, xcvr_reg, *value));
+	return (NXGE_OK);
+fail:
+	MUTEX_EXIT(&nxge_mii_lock);
+	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"nxge_mii_read: Failed to read mii on xcvr %d",
+			xcvr_portn));
+
+	return (NXGE_ERROR | rs);
+}
+
+/* Write to a MII compliant Register */
+
+nxge_status_t
+nxge_mii_write(p_nxge_t nxgep, uint8_t xcvr_portn, uint8_t xcvr_reg,
+		uint16_t value)
+{
+	npi_status_t rs = NPI_SUCCESS;
+
+	NXGE_DEBUG_MSG((nxgep, MIF_CTL, "==> nxge_mii_write: xcvr_port<%d>"
+			"xcvr_reg<%d> value=0x%x", xcvr_portn, xcvr_reg,
+			value));
+
+	MUTEX_ENTER(&nxge_mii_lock);
+
+	if (nxgep->mac.portmode == PORT_1G_COPPER) {
+		if ((rs = npi_mac_mif_mii_write(nxgep->npi_handle,
+				xcvr_portn, xcvr_reg, value)) != NPI_SUCCESS)
+			goto fail;
+	} else if (nxgep->mac.portmode == PORT_1G_FIBER) {
+		if ((rs = npi_mac_pcs_mii_write(nxgep->npi_handle,
+				xcvr_portn, xcvr_reg, value)) != NPI_SUCCESS)
+			goto fail;
+	} else
+		goto fail;
+
+	MUTEX_EXIT(&nxge_mii_lock);
+
+	NXGE_DEBUG_MSG((nxgep, MIF_CTL, "<== nxge_mii_write: xcvr_port<%d>"
+			"xcvr_reg<%d>", xcvr_portn, xcvr_reg));
+	return (NXGE_OK);
+fail:
+	MUTEX_EXIT(&nxge_mii_lock);
+
+	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"nxge_mii_write: Failed to write mii on xcvr %d",
+			xcvr_portn));
+
+	return (NXGE_ERROR | rs);
+}
+
+/* Perform read from Clause45 serdes / transceiver device */
+
+nxge_status_t
+nxge_mdio_read(p_nxge_t nxgep, uint8_t xcvr_portn, uint8_t device,
+		uint16_t xcvr_reg, uint16_t *value)
+{
+	npi_status_t rs = NPI_SUCCESS;
+
+	NXGE_DEBUG_MSG((nxgep, MIF_CTL, "==> nxge_mdio_read: xcvr_port<%d>",
+			xcvr_portn));
+
+	MUTEX_ENTER(&nxge_mdio_lock);
+
+	if ((rs = npi_mac_mif_mdio_read(nxgep->npi_handle,
+			xcvr_portn, device, xcvr_reg, value)) != NPI_SUCCESS)
+		goto fail;
+
+	MUTEX_EXIT(&nxge_mdio_lock);
+
+	NXGE_DEBUG_MSG((nxgep, MIF_CTL, "<== nxge_mdio_read: xcvr_port<%d>",
+			xcvr_portn));
+	return (NXGE_OK);
+fail:
+	MUTEX_EXIT(&nxge_mdio_lock);
+
+	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"nxge_mdio_read: Failed to read mdio on xcvr %d",
+			xcvr_portn));
+
+	return (NXGE_ERROR | rs);
+}
+
+/* Perform write to Clause45 serdes / transceiver device */
+
+nxge_status_t
+nxge_mdio_write(p_nxge_t nxgep, uint8_t xcvr_portn, uint8_t device,
+		uint16_t xcvr_reg, uint16_t value)
+{
+	npi_status_t rs = NPI_SUCCESS;
+
+	NXGE_DEBUG_MSG((nxgep, MIF_CTL, "==> nxge_mdio_write: xcvr_port<%d>",
+			xcvr_portn));
+
+	MUTEX_ENTER(&nxge_mdio_lock);
+
+	if ((rs = npi_mac_mif_mdio_write(nxgep->npi_handle,
+			xcvr_portn, device, xcvr_reg, value)) != NPI_SUCCESS)
+		goto fail;
+
+	MUTEX_EXIT(&nxge_mdio_lock);
+
+	NXGE_DEBUG_MSG((nxgep, MIF_CTL, "<== nxge_mdio_write: xcvr_port<%d>",
+			xcvr_portn));
+	return (NXGE_OK);
+fail:
+	MUTEX_EXIT(&nxge_mdio_lock);
+
+	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"nxge_mdio_write: Failed to write mdio on xcvr %d",
+			xcvr_portn));
+
+	return (NXGE_ERROR | rs);
+}
+
+
+/* Check MII to see if there is any link status change */
+
+nxge_status_t
+nxge_mii_check(p_nxge_t nxgep, mii_bmsr_t bmsr, mii_bmsr_t bmsr_ints,
+		nxge_link_state_t *link_up)
+{
+	p_nxge_param_t	param_arr;
+	p_nxge_stats_t	statsp;
+	p_mii_regs_t	mii_regs;
+	p_mii_bmsr_t	soft_bmsr;
+	mii_anar_t	anar;
+	mii_anlpar_t	anlpar;
+	mii_anar_t	an_common;
+	mii_aner_t	aner;
+	mii_gsr_t	gsr;
+	nxge_status_t	status = NXGE_OK;
+
+	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "==> nxge_mii_check"));
+
+	mii_regs = NULL;
+	param_arr = nxgep->param_arr;
+	statsp = nxgep->statsp;
+	soft_bmsr = &nxgep->soft_bmsr;
+	*link_up = LINK_NO_CHANGE;
+
+	if (bmsr_ints.bits.link_status) {
+		if (bmsr.bits.link_status) {
+			soft_bmsr->bits.link_status = 1;
+		} else {
+			statsp->mac_stats.link_up = 0;
+			soft_bmsr->bits.link_status = 0;
+			NXGE_DEBUG_MSG((nxgep, MAC_CTL,
+					"Link down cable problem"));
+			*link_up = LINK_IS_DOWN;
+		}
+	}
+
+	if (param_arr[param_autoneg].value) {
+		if (bmsr_ints.bits.auto_neg_complete) {
+			if (bmsr.bits.auto_neg_complete)
+				soft_bmsr->bits.auto_neg_complete = 1;
+			else
+				soft_bmsr->bits.auto_neg_complete = 0;
+		}
+		if (soft_bmsr->bits.link_status == 0) {
+			statsp->mac_stats.link_T4 = 0;
+			statsp->mac_stats.link_speed = 0;
+			statsp->mac_stats.link_duplex = 0;
+			statsp->mac_stats.link_asmpause = 0;
+			statsp->mac_stats.link_pause = 0;
+			statsp->mac_stats.lp_cap_autoneg = 0;
+			statsp->mac_stats.lp_cap_100T4 = 0;
+			statsp->mac_stats.lp_cap_1000fdx = 0;
+			statsp->mac_stats.lp_cap_1000hdx = 0;
+			statsp->mac_stats.lp_cap_100fdx = 0;
+			statsp->mac_stats.lp_cap_100hdx = 0;
+			statsp->mac_stats.lp_cap_10fdx = 0;
+			statsp->mac_stats.lp_cap_10hdx = 0;
+			statsp->mac_stats.lp_cap_10gfdx = 0;
+			statsp->mac_stats.lp_cap_10ghdx = 0;
+			statsp->mac_stats.lp_cap_asmpause = 0;
+			statsp->mac_stats.lp_cap_pause = 0;
+		}
+	} else
+		soft_bmsr->bits.auto_neg_complete = 1;
+
+	if ((bmsr_ints.bits.link_status ||
+		bmsr_ints.bits.auto_neg_complete) &&
+		soft_bmsr->bits.link_status &&
+		soft_bmsr->bits.auto_neg_complete) {
+		statsp->mac_stats.link_up = 1;
+		if (param_arr[param_autoneg].value) {
+			if ((status = nxge_mii_read(nxgep,
+				statsp->mac_stats.xcvr_portn,
+				(uint8_t)(uint64_t)(&mii_regs->anar),
+					&anar.value)) != NXGE_OK)
+				goto fail;
+			if ((status = nxge_mii_read(nxgep,
+				statsp->mac_stats.xcvr_portn,
+				(uint8_t)(uint64_t)(&mii_regs->anlpar),
+					&anlpar.value)) != NXGE_OK)
+				goto fail;
+			if ((status = nxge_mii_read(nxgep,
+				statsp->mac_stats.xcvr_portn,
+				(uint8_t)(uint64_t)(&mii_regs->aner),
+					&aner.value)) != NXGE_OK)
+				goto fail;
+			statsp->mac_stats.lp_cap_autoneg = aner.bits.lp_an_able;
+			statsp->mac_stats.lp_cap_100T4 = anlpar.bits.cap_100T4;
+			statsp->mac_stats.lp_cap_100fdx =
+							anlpar.bits.cap_100fdx;
+			statsp->mac_stats.lp_cap_100hdx =
+							anlpar.bits.cap_100hdx;
+			statsp->mac_stats.lp_cap_10fdx = anlpar.bits.cap_10fdx;
+			statsp->mac_stats.lp_cap_10hdx = anlpar.bits.cap_10hdx;
+			statsp->mac_stats.lp_cap_asmpause =
+						anlpar.bits.cap_asmpause;
+			statsp->mac_stats.lp_cap_pause = anlpar.bits.cap_pause;
+			an_common.value = anar.value & anlpar.value;
+			if (param_arr[param_anar_1000fdx].value ||
+				param_arr[param_anar_1000hdx].value) {
+				if ((status = nxge_mii_read(nxgep,
+					statsp->mac_stats.xcvr_portn,
+					(uint8_t)(uint64_t)(&mii_regs->gsr),
+						&gsr.value))
+						!= NXGE_OK)
+					goto fail;
+				statsp->mac_stats.lp_cap_1000fdx =
+					gsr.bits.link_1000fdx;
+				statsp->mac_stats.lp_cap_1000hdx =
+					gsr.bits.link_1000hdx;
+				if (param_arr[param_anar_1000fdx].value &&
+					gsr.bits.link_1000fdx) {
+					statsp->mac_stats.link_speed = 1000;
+					statsp->mac_stats.link_duplex = 2;
+				} else if (
+					param_arr[param_anar_1000hdx].value &&
+						gsr.bits.link_1000hdx) {
+					statsp->mac_stats.link_speed = 1000;
+					statsp->mac_stats.link_duplex = 1;
+				}
+			}
+			if ((an_common.value != 0) &&
+					!(statsp->mac_stats.link_speed)) {
+				if (an_common.bits.cap_100T4) {
+					statsp->mac_stats.link_T4 = 1;
+					statsp->mac_stats.link_speed = 100;
+					statsp->mac_stats.link_duplex = 1;
+				} else if (an_common.bits.cap_100fdx) {
+					statsp->mac_stats.link_speed = 100;
+					statsp->mac_stats.link_duplex = 2;
+				} else if (an_common.bits.cap_100hdx) {
+					statsp->mac_stats.link_speed = 100;
+					statsp->mac_stats.link_duplex = 1;
+				} else if (an_common.bits.cap_10fdx) {
+					statsp->mac_stats.link_speed = 10;
+					statsp->mac_stats.link_duplex = 2;
+				} else if (an_common.bits.cap_10hdx) {
+					statsp->mac_stats.link_speed = 10;
+					statsp->mac_stats.link_duplex = 1;
+				} else {
+					goto fail;
+				}
+			}
+			if (statsp->mac_stats.link_duplex != 1) {
+				statsp->mac_stats.link_asmpause =
+					an_common.bits.cap_asmpause;
+				if (statsp->mac_stats.link_asmpause)
+				if ((statsp->mac_stats.cap_pause == 0) &&
+						(statsp->mac_stats.lp_cap_pause
+						== 1))
+						statsp->mac_stats.link_pause
+						= 0;
+					else
+						statsp->mac_stats.link_pause
+						= 1;
+				else
+					statsp->mac_stats.link_pause =
+						an_common.bits.cap_pause;
+			}
+		}
+		*link_up = LINK_IS_UP;
+	}
+
+	if (nxgep->link_notify) {
+		*link_up = ((statsp->mac_stats.link_up) ? LINK_IS_UP :
+				LINK_IS_DOWN);
+		nxgep->link_notify = B_FALSE;
+	}
+	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "<== nxge_mii_check"));
+	return (NXGE_OK);
+fail:
+	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"nxge_mii_check: Unable to check MII"));
+	return (status);
+}
+
+/* Add a multicast address entry into the HW hash table */
+
+nxge_status_t
+nxge_add_mcast_addr(p_nxge_t nxgep, struct ether_addr *addrp)
+{
+	uint32_t mchash;
+	p_hash_filter_t hash_filter;
+	uint16_t hash_bit;
+	boolean_t rx_init = B_FALSE;
+	uint_t j;
+	nxge_status_t status = NXGE_OK;
+
+	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "==> nxge_add_mcast_addr"));
+
+	RW_ENTER_WRITER(&nxgep->filter_lock);
+	mchash = crc32_mchash(addrp);
+	if (nxgep->hash_filter == NULL) {
+		NXGE_DEBUG_MSG((NULL, STR_CTL,
+			"Allocating hash filter storage."));
+		nxgep->hash_filter = KMEM_ZALLOC(sizeof (hash_filter_t),
+					KM_SLEEP);
+	}
+	hash_filter = nxgep->hash_filter;
+	j = mchash / HASH_REG_WIDTH;
+	hash_bit = (1 << (mchash % HASH_REG_WIDTH));
+	hash_filter->hash_filter_regs[j] |= hash_bit;
+	hash_filter->hash_bit_ref_cnt[mchash]++;
+	if (hash_filter->hash_bit_ref_cnt[mchash] == 1) {
+		hash_filter->hash_ref_cnt++;
+		rx_init = B_TRUE;
+	}
+	if (rx_init) {
+		if ((status = nxge_rx_mac_disable(nxgep)) != NXGE_OK)
+			goto fail;
+		if ((status = nxge_rx_mac_enable(nxgep)) != NXGE_OK)
+			goto fail;
+	}
+
+	RW_EXIT(&nxgep->filter_lock);
+
+	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "<== nxge_add_mcast_addr"));
+
+	return (NXGE_OK);
+fail:
+	RW_EXIT(&nxgep->filter_lock);
+	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_add_mcast_addr: "
+					"Unable to add multicast address"));
+	return (status);
+}
+
+/* Remove a multicast address entry from the HW hash table */
+
+nxge_status_t
+nxge_del_mcast_addr(p_nxge_t nxgep, struct ether_addr *addrp)
+{
+	uint32_t mchash;
+	p_hash_filter_t hash_filter;
+	uint16_t hash_bit;
+	boolean_t rx_init = B_FALSE;
+	uint_t j;
+	nxge_status_t status = NXGE_OK;
+
+	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "==> nxge_del_mcast_addr"));
+	RW_ENTER_WRITER(&nxgep->filter_lock);
+	mchash = crc32_mchash(addrp);
+	if (nxgep->hash_filter == NULL) {
+		NXGE_DEBUG_MSG((NULL, STR_CTL,
+			"Hash filter already de_allocated."));
+		RW_EXIT(&nxgep->filter_lock);
+		NXGE_DEBUG_MSG((nxgep, MAC_CTL, "<== nxge_del_mcast_addr"));
+		return (NXGE_OK);
+	}
+	hash_filter = nxgep->hash_filter;
+	hash_filter->hash_bit_ref_cnt[mchash]--;
+	if (hash_filter->hash_bit_ref_cnt[mchash] == 0) {
+		j = mchash / HASH_REG_WIDTH;
+		hash_bit = (1 << (mchash % HASH_REG_WIDTH));
+		hash_filter->hash_filter_regs[j] &= ~hash_bit;
+		hash_filter->hash_ref_cnt--;
+		rx_init = B_TRUE;
+	}
+	if (hash_filter->hash_ref_cnt == 0) {
+		NXGE_DEBUG_MSG((NULL, STR_CTL,
+			"De-allocating hash filter storage."));
+		KMEM_FREE(hash_filter, sizeof (hash_filter_t));
+		nxgep->hash_filter = NULL;
+	}
+
+	if (rx_init) {
+		if ((status = nxge_rx_mac_disable(nxgep)) != NXGE_OK)
+			goto fail;
+		if ((status = nxge_rx_mac_enable(nxgep)) != NXGE_OK)
+			goto fail;
+	}
+	RW_EXIT(&nxgep->filter_lock);
+	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "<== nxge_del_mcast_addr"));
+
+	return (NXGE_OK);
+fail:
+	RW_EXIT(&nxgep->filter_lock);
+	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_del_mcast_addr: "
+			"Unable to remove multicast address"));
+
+	return (status);
+}
+
+/* Set MAC address into MAC address HW registers */
+
+nxge_status_t
+nxge_set_mac_addr(p_nxge_t nxgep, struct ether_addr *addrp)
+{
+	nxge_status_t status = NXGE_OK;
+
+	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "==> nxge_set_mac_addr"));
+
+	MUTEX_ENTER(&nxgep->ouraddr_lock);
+	/*
+	 * Exit if the address is same as ouraddr or multicast or broadcast
+	 */
+	if (((addrp->ether_addr_octet[0] & 01) == 1) ||
+		(ether_cmp(addrp, &etherbroadcastaddr) == 0) ||
+		(ether_cmp(addrp, &nxgep->ouraddr) == 0)) {
+		goto nxge_set_mac_addr_exit;
+	}
+	nxgep->ouraddr = *addrp;
+	/*
+	 * Set new interface local address and re-init device.
+	 * This is destructive to any other streams attached
+	 * to this device.
+	 */
+	RW_ENTER_WRITER(&nxgep->filter_lock);
+	if ((status = nxge_rx_mac_disable(nxgep)) != NXGE_OK)
+		goto fail;
+	if ((status = nxge_rx_mac_enable(nxgep)) != NXGE_OK)
+		goto fail;
+
+	RW_EXIT(&nxgep->filter_lock);
+	MUTEX_EXIT(&nxgep->ouraddr_lock);
+	goto nxge_set_mac_addr_end;
+nxge_set_mac_addr_exit:
+	MUTEX_EXIT(&nxgep->ouraddr_lock);
+nxge_set_mac_addr_end:
+	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "<== nxge_set_mac_addr"));
+
+	return (NXGE_OK);
+fail:
+	MUTEX_EXIT(&nxgep->ouraddr_lock);
+	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_set_mac_addr: "
+			"Unable to set mac address"));
+	return (status);
+}
+
+/* Check status of MII (MIF or PCS) link */
+
+nxge_status_t
+nxge_check_mii_link(p_nxge_t nxgep)
+{
+	mii_bmsr_t bmsr_ints, bmsr_data;
+	mii_anlpar_t anlpar;
+	mii_gsr_t gsr;
+	p_mii_regs_t mii_regs;
+	nxge_status_t status = NXGE_OK;
+	uint8_t portn;
+	nxge_link_state_t link_up;
+
+	portn = nxgep->mac.portnum;
+
+	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "==> nxge_check_mii_link port<%d>",
+				portn));
+
+	mii_regs = NULL;
+
+	RW_ENTER_WRITER(&nxgep->filter_lock);
+
+	if (nxgep->statsp->port_stats.lb_mode > nxge_lb_ext10)
+		goto nxge_check_mii_link_exit;
+
+	if ((status = nxge_mii_read(nxgep, nxgep->statsp->mac_stats.xcvr_portn,
+		(uint8_t)(uint64_t)(&mii_regs->bmsr),
+		&bmsr_data.value)) != NXGE_OK)
+		goto fail;
+
+	if (nxgep->param_arr[param_autoneg].value) {
+		if ((status = nxge_mii_read(nxgep,
+			nxgep->statsp->mac_stats.xcvr_portn,
+			(uint8_t)(uint64_t)(&mii_regs->gsr),
+			&gsr.value)) != NXGE_OK)
+			goto fail;
+		if ((status = nxge_mii_read(nxgep,
+			nxgep->statsp->mac_stats.xcvr_portn,
+			(uint8_t)(uint64_t)(&mii_regs->anlpar),
+			&anlpar.value)) != NXGE_OK)
+			goto fail;
+		if (nxgep->statsp->mac_stats.link_up &&
+			((nxgep->statsp->mac_stats.lp_cap_1000fdx ^
+				gsr.bits.link_1000fdx) ||
+			(nxgep->statsp->mac_stats.lp_cap_1000hdx ^
+				gsr.bits.link_1000hdx) ||
+			(nxgep->statsp->mac_stats.lp_cap_100T4 ^
+				anlpar.bits.cap_100T4) ||
+			(nxgep->statsp->mac_stats.lp_cap_100fdx ^
+				anlpar.bits.cap_100fdx) ||
+			(nxgep->statsp->mac_stats.lp_cap_100hdx ^
+				anlpar.bits.cap_100hdx) ||
+			(nxgep->statsp->mac_stats.lp_cap_10fdx ^
+				anlpar.bits.cap_10fdx) ||
+			(nxgep->statsp->mac_stats.lp_cap_10hdx ^
+				anlpar.bits.cap_10hdx))) {
+			bmsr_data.bits.link_status = 0;
+		}
+	}
+
+	/* Workaround for link down issue */
+	if (bmsr_data.value == 0) {
+		cmn_err(CE_NOTE, "!LINK DEBUG: Read zero bmsr\n");
+		goto nxge_check_mii_link_exit;
+	}
+
+	bmsr_ints.value = nxgep->bmsr.value ^ bmsr_data.value;
+	nxgep->bmsr.value = bmsr_data.value;
+	if ((status = nxge_mii_check(nxgep, bmsr_data, bmsr_ints, &link_up))
+			!= NXGE_OK)
+		goto fail;
+
+nxge_check_mii_link_exit:
+	RW_EXIT(&nxgep->filter_lock);
+	if (link_up == LINK_IS_UP) {
+		nxge_link_is_up(nxgep);
+	} else if (link_up == LINK_IS_DOWN) {
+		nxge_link_is_down(nxgep);
+	}
+
+	(void) nxge_link_monitor(nxgep, LINK_MONITOR_START);
+
+	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "<== nxge_check_mii_link port<%d>",
+				portn));
+	return (NXGE_OK);
+
+fail:
+	RW_EXIT(&nxgep->filter_lock);
+
+	(void) nxge_link_monitor(nxgep, LINK_MONITOR_START);
+	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"nxge_check_mii_link: Failed to check link port<%d>",
+			portn));
+	return (status);
+}
+
+
+/*ARGSUSED*/
+nxge_status_t
+nxge_check_10g_link(p_nxge_t nxgep)
+{
+	uint8_t		portn;
+
+	nxge_status_t	status = NXGE_OK;
+	boolean_t	link_up;
+
+	portn = nxgep->mac.portnum;
+
+	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "==> nxge_check_10g_link port<%d>",
+				portn));
+
+	status = nxge_check_bcm8704_link(nxgep, &link_up);
+
+	if (status != NXGE_OK)
+		goto fail;
+
+	if (link_up) {
+		if (nxgep->link_notify ||
+			nxgep->statsp->mac_stats.link_up == 0) {
+			if (nxge_10g_link_led_on(nxgep) != NXGE_OK)
+				goto fail;
+			nxgep->statsp->mac_stats.link_up = 1;
+			nxgep->statsp->mac_stats.link_speed = 10000;
+			nxgep->statsp->mac_stats.link_duplex = 2;
+
+			nxge_link_is_up(nxgep);
+			nxgep->link_notify = B_FALSE;
+		}
+	} else {
+		if (nxgep->link_notify ||
+			nxgep->statsp->mac_stats.link_up == 1) {
+			if (nxge_10g_link_led_off(nxgep) != NXGE_OK)
+				goto fail;
+			NXGE_DEBUG_MSG((nxgep, MAC_CTL,
+					"Link down cable problem"));
+			nxgep->statsp->mac_stats.link_up = 0;
+			nxgep->statsp->mac_stats.link_speed = 0;
+			nxgep->statsp->mac_stats.link_duplex = 0;
+
+			nxge_link_is_down(nxgep);
+			nxgep->link_notify = B_FALSE;
+		}
+	}
+
+	(void) nxge_link_monitor(nxgep, LINK_MONITOR_START);
+	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "<== nxge_check_10g_link port<%d>",
+				portn));
+	return (NXGE_OK);
+
+fail:
+	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"nxge_check_10g_link: Failed to check link port<%d>",
+			portn));
+	return (status);
+}
+
+
+/* Declare link down */
+
+void
+nxge_link_is_down(p_nxge_t nxgep)
+{
+	p_nxge_stats_t statsp;
+	char link_stat_msg[64];
+
+	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "==> nxge_link_is_down"));
+
+	statsp = nxgep->statsp;
+	(void) sprintf(link_stat_msg, "xcvr addr:0x%02x - link down",
+			statsp->mac_stats.xcvr_portn);
+
+	mac_link_update(nxgep->mach, LINK_STATE_DOWN);
+
+	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "<== nxge_link_is_down"));
+}
+
+/* Declare link up */
+
+void
+nxge_link_is_up(p_nxge_t nxgep)
+{
+	p_nxge_stats_t statsp;
+	char link_stat_msg[64];
+	uint32_t val;
+
+	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "==> nxge_link_is_up"));
+
+	statsp = nxgep->statsp;
+	(void) sprintf(link_stat_msg, "xcvr addr:0x%02x - link up %d Mbps ",
+				statsp->mac_stats.xcvr_portn,
+				statsp->mac_stats.link_speed);
+
+	if (statsp->mac_stats.link_T4)
+		(void) strcat(link_stat_msg, "T4");
+	else if (statsp->mac_stats.link_duplex == 2)
+		(void) strcat(link_stat_msg, "full duplex");
+	else
+		(void) strcat(link_stat_msg, "half duplex");
+
+	(void) nxge_xif_init(nxgep);
+
+	/* Clean up symbol errors incurred during link transition */
+	if (nxgep->mac.portmode == PORT_10G_FIBER) {
+		(void) npi_xmac_xpcs_read(nxgep->npi_handle, nxgep->mac.portnum,
+					XPCS_REG_SYMBOL_ERR_L0_1_COUNTER, &val);
+		(void) npi_xmac_xpcs_read(nxgep->npi_handle, nxgep->mac.portnum,
+					XPCS_REG_SYMBOL_ERR_L2_3_COUNTER, &val);
+	}
+
+	mac_link_update(nxgep->mach, LINK_STATE_UP);
+
+	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "<== nxge_link_is_up"));
+}
+
+/*
+ * Calculate the bit in the multicast address filter
+ * that selects the given * address.
+ * Note: For GEM, the last 8-bits are used.
+ */
+uint32_t
+crc32_mchash(p_ether_addr_t addr)
+{
+	uint8_t *cp;
+	uint32_t crc;
+	uint32_t c;
+	int byte;
+	int bit;
+
+	cp = (uint8_t *)addr;
+	crc = (uint32_t)0xffffffff;
+	for (byte = 0; byte < 6; byte++) {
+		c = (uint32_t)cp[byte];
+		for (bit = 0; bit < 8; bit++) {
+			if ((c & 0x1) ^ (crc & 0x1))
+				crc = (crc >> 1)^0xedb88320;
+			else
+				crc = (crc >> 1);
+			c >>= 1;
+		}
+	}
+	return ((~crc) >> (32 - HASH_BITS));
+}
+
+/* Reset serdes */
+
+nxge_status_t
+nxge_serdes_reset(p_nxge_t nxgep)
+{
+	npi_handle_t		handle;
+
+	handle = nxgep->npi_handle;
+
+	ESR_REG_WR(handle, ESR_RESET_REG, ESR_RESET_0 | ESR_RESET_1);
+	drv_usecwait(500);
+	ESR_REG_WR(handle, ESR_CONFIG_REG, 0);
+
+	return (NXGE_OK);
+}
+
+/* Monitor link status using interrupt or polling */
+
+nxge_status_t
+nxge_link_monitor(p_nxge_t nxgep, link_mon_enable_t enable)
+{
+	nxge_status_t status = NXGE_OK;
+
+	/*
+	 * Make sure that we don't check the link if this happen to
+	 * be not port0 or 1 and it is not BMAC port.
+	 */
+	if ((nxgep->mac.portmode == PORT_10G_FIBER) && (nxgep->mac.portnum > 1))
+		return (NXGE_OK);
+
+	if (nxgep->statsp == NULL) {
+		/* stats has not been allocated. */
+		return (NXGE_OK);
+	}
+	/* Don't check link if we're not in internal loopback mode */
+	if (nxgep->statsp->port_stats.lb_mode != nxge_lb_normal)
+		return (NXGE_OK);
+
+	NXGE_DEBUG_MSG((nxgep, MAC_CTL,
+			"==> nxge_link_monitor port<%d> enable=%d",
+			nxgep->mac.portnum, enable));
+	if (enable == LINK_MONITOR_START) {
+		if (nxgep->mac.linkchkmode == LINKCHK_INTR) {
+			if ((status = nxge_link_intr(nxgep, LINK_INTR_START))
+							!= NXGE_OK)
+				goto fail;
+		} else {
+			switch (nxgep->mac.portmode) {
+			case PORT_10G_FIBER:
+				nxgep->nxge_link_poll_timerid = timeout(
+						(fptrv_t)nxge_check_10g_link,
+						nxgep,
+						drv_usectohz(1000 * 1000));
+			break;
+
+			case PORT_1G_COPPER:
+			case PORT_1G_FIBER:
+				nxgep->nxge_link_poll_timerid = timeout(
+						(fptrv_t)nxge_check_mii_link,
+						nxgep,
+						drv_usectohz(1000 * 1000));
+			break;
+			default:
+				;
+			}
+		}
+	} else {
+		if (nxgep->mac.linkchkmode == LINKCHK_INTR) {
+			if ((status = nxge_link_intr(nxgep, LINK_INTR_STOP))
+							!= NXGE_OK)
+				goto fail;
+		} else {
+			if (nxgep->nxge_link_poll_timerid != 0) {
+				(void) untimeout(nxgep->nxge_link_poll_timerid);
+				nxgep->nxge_link_poll_timerid = 0;
+			}
+		}
+	}
+	NXGE_DEBUG_MSG((nxgep, MAC_CTL,
+			"<== nxge_link_monitor port<%d> enable=%d",
+			nxgep->mac.portnum, enable));
+	return (NXGE_OK);
+fail:
+	return (status);
+}
+
+/* Set promiscous mode */
+
+nxge_status_t
+nxge_set_promisc(p_nxge_t nxgep, boolean_t on)
+{
+	nxge_status_t status = NXGE_OK;
+
+	NXGE_DEBUG_MSG((nxgep, MAC_CTL,
+		"==> nxge_set_promisc: on %d", on));
+
+	nxgep->filter.all_phys_cnt = ((on) ? 1 : 0);
+
+	RW_ENTER_WRITER(&nxgep->filter_lock);
+
+	if ((status = nxge_rx_mac_disable(nxgep)) != NXGE_OK) {
+		goto fail;
+	}
+	if ((status = nxge_rx_mac_enable(nxgep)) != NXGE_OK) {
+		goto fail;
+	}
+
+	RW_EXIT(&nxgep->filter_lock);
+
+	if (on)
+		nxgep->statsp->mac_stats.promisc = B_TRUE;
+	else
+		nxgep->statsp->mac_stats.promisc = B_FALSE;
+
+	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "<== nxge_set_promisc"));
+
+	return (NXGE_OK);
+fail:
+	RW_EXIT(&nxgep->filter_lock);
+	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_set_promisc: "
+			"Unable to set promisc (%d)", on));
+
+	return (status);
+}
+
+/*ARGSUSED*/
+uint_t
+nxge_mif_intr(void *arg1, void *arg2)
+{
+#ifdef	NXGE_DEBUG
+	p_nxge_t		nxgep = (p_nxge_t)arg2;
+#endif
+#if NXGE_MIF
+	p_nxge_ldv_t		ldvp = (p_nxge_ldv_t)arg1;
+	uint32_t		status;
+	npi_handle_t		handle;
+	uint8_t			portn;
+	p_nxge_stats_t		statsp;
+#endif
+
+#ifdef	NXGE_MIF
+	if (arg2 == NULL || (void *)ldvp->nxgep != arg2) {
+		nxgep = ldvp->nxgep;
+	}
+	nxgep = ldvp->nxgep;
+#endif
+	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_mif_intr"));
+
+	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_mif_intr"));
+	return (DDI_INTR_CLAIMED);
+
+mif_intr_fail:
+	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_mif_intr"));
+	return (DDI_INTR_UNCLAIMED);
+}
+
+/*ARGSUSED*/
+uint_t
+nxge_mac_intr(void *arg1, void *arg2)
+{
+	p_nxge_t		nxgep = (p_nxge_t)arg2;
+	p_nxge_ldv_t		ldvp = (p_nxge_ldv_t)arg1;
+	p_nxge_ldg_t		ldgp;
+	uint32_t		status;
+	npi_handle_t		handle;
+	uint8_t			portn;
+	p_nxge_stats_t		statsp;
+	npi_status_t		rs = NPI_SUCCESS;
+
+	if (arg2 == NULL || (void *)ldvp->nxgep != arg2) {
+		nxgep = ldvp->nxgep;
+	}
+
+	ldgp = ldvp->ldgp;
+	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_mac_intr: "
+		"group %d", ldgp->ldg));
+
+	handle = NXGE_DEV_NPI_HANDLE(nxgep);
+	/*
+	 * This interrupt handler is for a specific
+	 * mac port.
+	 */
+	statsp = (p_nxge_stats_t)nxgep->statsp;
+	portn = nxgep->mac.portnum;
+
+	NXGE_DEBUG_MSG((nxgep, INT_CTL,
+		"==> nxge_mac_intr: reading mac stats: port<%d>", portn));
+
+	if (nxgep->mac.porttype == PORT_TYPE_XMAC) {
+		rs = npi_xmac_tx_get_istatus(handle, portn,
+					(xmac_tx_iconfig_t *)&status);
+		if (rs != NPI_SUCCESS)
+			goto npi_fail;
+		if (status & ICFG_XMAC_TX_ALL) {
+			if (status & ICFG_XMAC_TX_UNDERRUN) {
+				statsp->xmac_stats.tx_underflow_err++;
+				NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
+					NXGE_FM_EREPORT_TXMAC_UNDERFLOW);
+			}
+			if (status & ICFG_XMAC_TX_MAX_PACKET_ERR) {
+				statsp->xmac_stats.tx_maxpktsize_err++;
+				NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
+					NXGE_FM_EREPORT_TXMAC_MAX_PKT_ERR);
+			}
+			if (status & ICFG_XMAC_TX_OVERFLOW) {
+				statsp->xmac_stats.tx_overflow_err++;
+				NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
+					NXGE_FM_EREPORT_TXMAC_OVERFLOW);
+			}
+			if (status & ICFG_XMAC_TX_FIFO_XFR_ERR) {
+				statsp->xmac_stats.tx_fifo_xfr_err++;
+				NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
+					NXGE_FM_EREPORT_TXMAC_TXFIFO_XFR_ERR);
+			}
+			if (status & ICFG_XMAC_TX_BYTE_CNT_EXP) {
+				statsp->xmac_stats.tx_byte_cnt +=
+							XTXMAC_BYTE_CNT_MASK;
+			}
+			if (status & ICFG_XMAC_TX_FRAME_CNT_EXP) {
+				statsp->xmac_stats.tx_frame_cnt +=
+							XTXMAC_FRM_CNT_MASK;
+			}
+		}
+
+		rs = npi_xmac_rx_get_istatus(handle, portn,
+					(xmac_rx_iconfig_t *)&status);
+		if (rs != NPI_SUCCESS)
+			goto npi_fail;
+		if (status & ICFG_XMAC_RX_ALL) {
+			if (status & ICFG_XMAC_RX_OVERFLOW)
+				statsp->xmac_stats.rx_overflow_err++;
+			if (status & ICFG_XMAC_RX_UNDERFLOW) {
+				statsp->xmac_stats.rx_underflow_err++;
+				NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
+					NXGE_FM_EREPORT_RXMAC_UNDERFLOW);
+			}
+			if (status & ICFG_XMAC_RX_CRC_ERR_CNT_EXP) {
+				statsp->xmac_stats.rx_crc_err_cnt +=
+							XRXMAC_CRC_ER_CNT_MASK;
+				NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
+					NXGE_FM_EREPORT_RXMAC_CRC_ERRCNT_EXP);
+			}
+			if (status & ICFG_XMAC_RX_LEN_ERR_CNT_EXP) {
+				statsp->xmac_stats.rx_len_err_cnt +=
+							MAC_LEN_ER_CNT_MASK;
+				NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
+				NXGE_FM_EREPORT_RXMAC_LENGTH_ERRCNT_EXP);
+			}
+			if (status & ICFG_XMAC_RX_VIOL_ERR_CNT_EXP) {
+				statsp->xmac_stats.rx_viol_err_cnt +=
+							XRXMAC_CD_VIO_CNT_MASK;
+				NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
+					NXGE_FM_EREPORT_RXMAC_VIOL_ERRCNT_EXP);
+			}
+			if (status & ICFG_XMAC_RX_OCT_CNT_EXP) {
+				statsp->xmac_stats.rx_byte_cnt +=
+							XRXMAC_BT_CNT_MASK;
+			}
+			if (status & ICFG_XMAC_RX_HST_CNT1_EXP) {
+				statsp->xmac_stats.rx_hist1_cnt +=
+							XRXMAC_HIST_CNT1_MASK;
+			}
+			if (status & ICFG_XMAC_RX_HST_CNT2_EXP) {
+				statsp->xmac_stats.rx_hist2_cnt +=
+							XRXMAC_HIST_CNT2_MASK;
+			}
+			if (status & ICFG_XMAC_RX_HST_CNT3_EXP) {
+				statsp->xmac_stats.rx_hist3_cnt +=
+							XRXMAC_HIST_CNT3_MASK;
+			}
+			if (status & ICFG_XMAC_RX_HST_CNT4_EXP) {
+				statsp->xmac_stats.rx_hist4_cnt +=
+							XRXMAC_HIST_CNT4_MASK;
+			}
+			if (status & ICFG_XMAC_RX_HST_CNT5_EXP) {
+				statsp->xmac_stats.rx_hist5_cnt +=
+							XRXMAC_HIST_CNT5_MASK;
+			}
+			if (status & ICFG_XMAC_RX_HST_CNT6_EXP) {
+				statsp->xmac_stats.rx_hist6_cnt +=
+							XRXMAC_HIST_CNT6_MASK;
+			}
+			if (status & ICFG_XMAC_RX_BCAST_CNT_EXP) {
+				statsp->xmac_stats.rx_broadcast_cnt +=
+							XRXMAC_BC_FRM_CNT_MASK;
+			}
+			if (status & ICFG_XMAC_RX_MCAST_CNT_EXP) {
+				statsp->xmac_stats.rx_mult_cnt +=
+							XRXMAC_MC_FRM_CNT_MASK;
+			}
+			if (status & ICFG_XMAC_RX_FRAG_CNT_EXP) {
+				statsp->xmac_stats.rx_frag_cnt +=
+							XRXMAC_FRAG_CNT_MASK;
+				NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
+					NXGE_FM_EREPORT_RXMAC_RXFRAG_CNT_EXP);
+			}
+			if (status & ICFG_XMAC_RX_ALIGNERR_CNT_EXP) {
+				statsp->xmac_stats.rx_frame_align_err_cnt +=
+							XRXMAC_AL_ER_CNT_MASK;
+				NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
+					NXGE_FM_EREPORT_RXMAC_ALIGN_ECNT_EXP);
+			}
+			if (status & ICFG_XMAC_RX_LINK_FLT_CNT_EXP) {
+				statsp->xmac_stats.rx_linkfault_err_cnt +=
+							XMAC_LINK_FLT_CNT_MASK;
+				NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
+				NXGE_FM_EREPORT_RXMAC_LINKFAULT_CNT_EXP);
+			}
+			if (status & ICFG_XMAC_RX_REMOTE_FLT_DET) {
+				statsp->xmac_stats.rx_remotefault_err++;
+			}
+			if (status & ICFG_XMAC_RX_LOCAL_FLT_DET) {
+				statsp->xmac_stats.rx_localfault_err++;
+			}
+		}
+
+		rs = npi_xmac_ctl_get_istatus(handle, portn,
+						(xmac_ctl_iconfig_t *)&status);
+		if (rs != NPI_SUCCESS)
+			goto npi_fail;
+		if (status & ICFG_XMAC_CTRL_ALL) {
+			if (status & ICFG_XMAC_CTRL_PAUSE_RCVD)
+				statsp->xmac_stats.rx_pause_cnt++;
+			if (status & ICFG_XMAC_CTRL_PAUSE_STATE)
+				statsp->xmac_stats.tx_pause_state++;
+			if (status & ICFG_XMAC_CTRL_NOPAUSE_STATE)
+				statsp->xmac_stats.tx_nopause_state++;
+		}
+	} else if (nxgep->mac.porttype == PORT_TYPE_BMAC) {
+		rs = npi_bmac_tx_get_istatus(handle, portn,
+						(bmac_tx_iconfig_t *)&status);
+		if (rs != NPI_SUCCESS)
+			goto npi_fail;
+		if (status & ICFG_BMAC_TX_ALL) {
+			if (status & ICFG_BMAC_TX_UNDERFLOW) {
+				statsp->bmac_stats.tx_underrun_err++;
+				NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
+					NXGE_FM_EREPORT_TXMAC_UNDERFLOW);
+			}
+			if (status & ICFG_BMAC_TX_MAXPKTSZ_ERR) {
+				statsp->bmac_stats.tx_max_pkt_err++;
+				NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
+					NXGE_FM_EREPORT_TXMAC_MAX_PKT_ERR);
+			}
+			if (status & ICFG_BMAC_TX_BYTE_CNT_EXP) {
+				statsp->bmac_stats.tx_byte_cnt +=
+							BTXMAC_BYTE_CNT_MASK;
+			}
+			if (status & ICFG_BMAC_TX_FRAME_CNT_EXP) {
+				statsp->bmac_stats.tx_frame_cnt +=
+							BTXMAC_FRM_CNT_MASK;
+			}
+		}
+
+		rs = npi_bmac_rx_get_istatus(handle, portn,
+						(bmac_rx_iconfig_t *)&status);
+		if (rs != NPI_SUCCESS)
+			goto npi_fail;
+		if (status & ICFG_BMAC_RX_ALL) {
+			if (status & ICFG_BMAC_RX_OVERFLOW) {
+				statsp->bmac_stats.rx_overflow_err++;
+			}
+			if (status & ICFG_BMAC_RX_FRAME_CNT_EXP) {
+				statsp->bmac_stats.rx_frame_cnt +=
+							RXMAC_FRM_CNT_MASK;
+			}
+			if (status & ICFG_BMAC_RX_CRC_ERR_CNT_EXP) {
+				statsp->bmac_stats.rx_crc_err_cnt +=
+							BMAC_CRC_ER_CNT_MASK;
+				NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
+					NXGE_FM_EREPORT_RXMAC_CRC_ERRCNT_EXP);
+			}
+			if (status & ICFG_BMAC_RX_LEN_ERR_CNT_EXP) {
+				statsp->bmac_stats.rx_len_err_cnt +=
+							MAC_LEN_ER_CNT_MASK;
+				NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
+				NXGE_FM_EREPORT_RXMAC_LENGTH_ERRCNT_EXP);
+			}
+			if (status & ICFG_BMAC_RX_VIOL_ERR_CNT_EXP)
+				statsp->bmac_stats.rx_viol_err_cnt +=
+							BMAC_CD_VIO_CNT_MASK;
+				NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
+					NXGE_FM_EREPORT_RXMAC_VIOL_ERRCNT_EXP);
+			}
+			if (status & ICFG_BMAC_RX_BYTE_CNT_EXP) {
+				statsp->bmac_stats.rx_byte_cnt +=
+							BRXMAC_BYTE_CNT_MASK;
+			}
+			if (status & ICFG_BMAC_RX_ALIGNERR_CNT_EXP) {
+				statsp->bmac_stats.rx_align_err_cnt +=
+							BMAC_AL_ER_CNT_MASK;
+				NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
+					NXGE_FM_EREPORT_RXMAC_ALIGN_ECNT_EXP);
+			}
+
+			rs = npi_bmac_ctl_get_istatus(handle, portn,
+						(bmac_ctl_iconfig_t *)&status);
+			if (rs != NPI_SUCCESS)
+				goto npi_fail;
+
+			if (status & ICFG_BMAC_CTL_ALL) {
+				if (status & ICFG_BMAC_CTL_RCVPAUSE)
+					statsp->bmac_stats.rx_pause_cnt++;
+				if (status & ICFG_BMAC_CTL_INPAUSE_ST)
+					statsp->bmac_stats.tx_pause_state++;
+				if (status & ICFG_BMAC_CTL_INNOTPAUSE_ST)
+					statsp->bmac_stats.tx_nopause_state++;
+			}
+		}
+
+	if (ldgp->nldvs == 1) {
+		(void) npi_intr_ldg_mgmt_set(handle, ldgp->ldg,
+			B_TRUE, ldgp->ldg_timer);
+	}
+
+	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_mac_intr"));
+	return (DDI_INTR_CLAIMED);
+
+npi_fail:
+	NXGE_ERROR_MSG((nxgep, INT_CTL, "<== nxge_mac_intr"));
+	return (DDI_INTR_UNCLAIMED);
+}
+
+nxge_status_t
+nxge_check_bcm8704_link(p_nxge_t nxgep, boolean_t *link_up)
+{
+	uint8_t		phy_port_addr;
+	nxge_status_t	status = NXGE_OK;
+	boolean_t	rx_sig_ok;
+	boolean_t	pcs_blk_lock;
+	boolean_t	link_align;
+	uint16_t	val1, val2, val3;
+#ifdef	NXGE_DEBUG_SYMBOL_ERR
+	uint16_t	val_debug;
+	uint16_t	val;
+#endif
+
+	phy_port_addr = nxgep->statsp->mac_stats.xcvr_portn;
+
+#ifdef	NXGE_DEBUG_SYMBOL_ERR
+	/* Check Device 3 Register Device 3 0xC809 */
+	(void) nxge_mdio_read(nxgep, phy_port_addr, 0x3, 0xC809, &val_debug);
+	if ((val_debug & ~0x200) != 0) {
+		cmn_err(CE_NOTE, "!Port%d BCM8704 Dev3 Reg 0xc809 = 0x%x\n",
+				nxgep->mac.portnum, val_debug);
+		(void) nxge_mdio_read(nxgep, phy_port_addr, 0x4, 0x18,
+				&val_debug);
+		cmn_err(CE_NOTE, "!Port%d BCM8704 Dev4 Reg 0x18 = 0x%x\n",
+				nxgep->mac.portnum, val_debug);
+	}
+
+	(void) npi_xmac_xpcs_read(nxgep->npi_handle, nxgep->mac.portnum,
+					XPCS_REG_DESCWERR_COUNTER, &val);
+	if (val != 0)
+		cmn_err(CE_NOTE, "!XPCS DESCWERR = 0x%x\n", val);
+
+	(void) npi_xmac_xpcs_read(nxgep->npi_handle, nxgep->mac.portnum,
+					XPCS_REG_SYMBOL_ERR_L0_1_COUNTER, &val);
+	if (val != 0)
+		cmn_err(CE_NOTE, "!XPCS SYMBOL_ERR_L0_1 = 0x%x\n", val);
+
+	(void) npi_xmac_xpcs_read(nxgep->npi_handle, nxgep->mac.portnum,
+					XPCS_REG_SYMBOL_ERR_L2_3_COUNTER, &val);
+	if (val != 0)
+		cmn_err(CE_NOTE, "!XPCS SYMBOL_ERR_L2_3 = 0x%x\n", val);
+#endif
+
+	/* Check from BCM8704 if 10G link is up or down */
+
+	/* Check Device 1 Register 0xA bit0 */
+	status = nxge_mdio_read(nxgep, phy_port_addr,
+			BCM8704_PMA_PMD_DEV_ADDR,
+			BCM8704_PMD_RECEIVE_SIG_DETECT,
+			&val1);
+	if (status != NXGE_OK)
+		goto fail;
+	rx_sig_ok = ((val1 & GLOB_PMD_RX_SIG_OK) ? B_TRUE : B_FALSE);
+
+	/* Check Device 3 Register 0x20 bit0 */
+	if ((status = nxge_mdio_read(nxgep, phy_port_addr,
+			BCM8704_PCS_DEV_ADDR,
+			BCM8704_10GBASE_R_PCS_STATUS_REG,
+			&val2)) != NPI_SUCCESS)
+		goto fail;
+	pcs_blk_lock = ((val2 & PCS_10GBASE_R_PCS_BLK_LOCK) ? B_TRUE : B_FALSE);
+
+	/* Check Device 4 Register 0x18 bit12 */
+	status = nxge_mdio_read(nxgep, phy_port_addr,
+			BCM8704_PHYXS_ADDR,
+			BCM8704_PHYXS_XGXS_LANE_STATUS_REG,
+			&val3);
+	if (status != NXGE_OK)
+		goto fail;
+	link_align = (val3 == (XGXS_LANE_ALIGN_STATUS | XGXS_LANE3_SYNC |
+				XGXS_LANE2_SYNC | XGXS_LANE1_SYNC |
+				XGXS_LANE0_SYNC | 0x400)) ? B_TRUE : B_FALSE;
+
+#ifdef	NXGE_DEBUG_ALIGN_ERR
+	/* Temp workaround for link down issue */
+	if (pcs_blk_lock == B_FALSE) {
+		if (val2 != 0x4) {
+			pcs_blk_lock = B_TRUE;
+			cmn_err(CE_NOTE,
+				"!LINK DEBUG: port%d PHY Dev3 "
+				"Reg 0x20 = 0x%x\n",
+				nxgep->mac.portnum, val2);
+		}
+	}
+
+	if (link_align == B_FALSE) {
+		if (val3 != 0x140f) {
+			link_align = B_TRUE;
+			cmn_err(CE_NOTE,
+				"!LINK DEBUG: port%d PHY Dev4 "
+				"Reg 0x18 = 0x%x\n",
+				nxgep->mac.portnum, val3);
+		}
+	}
+
+	if (rx_sig_ok == B_FALSE) {
+		if ((val2 == 0) || (val3 == 0)) {
+			rx_sig_ok = B_TRUE;
+			cmn_err(CE_NOTE,
+				"!LINK DEBUG: port %d Dev3 or Dev4 read zero\n",
+				nxgep->mac.portnum);
+		}
+	}
+#endif
+
+	*link_up = ((rx_sig_ok == B_TRUE) && (pcs_blk_lock == B_TRUE) &&
+			(link_align == B_TRUE)) ? B_TRUE : B_FALSE;
+
+	return (NXGE_OK);
+fail:
+	return (status);
+}
+
+
+nxge_status_t
+nxge_get_xcvr_type(p_nxge_t nxgep)
+{
+	nxge_status_t status = NXGE_OK;
+
+#if defined(_BIG_ENDIAN)
+	char *prop_val;
+
+	if (ddi_prop_lookup_string(DDI_DEV_T_ANY, nxgep->dip, 0,
+		"phy-type", &prop_val) == DDI_PROP_SUCCESS) {
+		if (strcmp("xgf", prop_val) == 0) {
+			nxgep->statsp->mac_stats.xcvr_inuse = XPCS_XCVR;
+			nxgep->mac.portmode = PORT_10G_FIBER;
+			NXGE_DEBUG_MSG((nxgep, MAC_CTL, "10G Fiber Xcvr"));
+		} else if (strcmp("mif", prop_val)	== 0) {
+			nxgep->statsp->mac_stats.xcvr_inuse = INT_MII_XCVR;
+			nxgep->mac.portmode = PORT_1G_COPPER;
+			NXGE_DEBUG_MSG((nxgep, MAC_CTL, "1G Copper Xcvr"));
+		} else if (strcmp("pcs", prop_val) == 0) {
+			nxgep->statsp->mac_stats.xcvr_inuse = PCS_XCVR;
+			nxgep->mac.portmode = PORT_1G_FIBER;
+			NXGE_DEBUG_MSG((nxgep, MAC_CTL, "1G Fiber Xcvr"));
+		} else if (strcmp("xgc", prop_val) == 0) {
+			nxgep->statsp->mac_stats.xcvr_inuse = XPCS_XCVR;
+			nxgep->mac.portmode = PORT_10G_COPPER;
+			NXGE_DEBUG_MSG((nxgep, MAC_CTL, "10G Copper Xcvr"));
+		} else {
+			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+					    "Unknown phy-type: %s",
+					    prop_val));
+			ddi_prop_free(prop_val);
+			return (NXGE_ERROR);
+		}
+		status = NXGE_OK;
+		(void) ddi_prop_update_string(DDI_DEV_T_NONE, nxgep->dip,
+						    "phy-type", prop_val);
+		ddi_prop_free(prop_val);
+	} else {
+		/*
+		 * This should really be an error. But for now default
+		 * this to 10G fiber.
+		 */
+		if (nxgep->niu_type == N2_NIU) {
+			nxgep->statsp->mac_stats.xcvr_inuse = XPCS_XCVR;
+			nxgep->mac.portmode = PORT_10G_FIBER;
+			NXGE_DEBUG_MSG((nxgep, MAC_CTL,
+					    "Cannot find phy-type: "
+					    " Default to 10G Fiber Xcvr"));
+			status = NXGE_OK;
+		} else {
+			NXGE_DEBUG_MSG((nxgep, MAC_CTL,
+					    "Cannot get phy-type"));
+			return (NXGE_ERROR);
+		}
+	}
+#else
+	status = nxge_espc_phy_type_get(nxgep);
+#endif
+
+	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "<== nxge_get_xcvr_type"));
+	return (status);
+}
+
+nxge_status_t
+nxge_10g_link_led_on(p_nxge_t nxgep)
+{
+	if (npi_xmac_xif_led(nxgep->npi_handle, nxgep->mac.portnum, B_TRUE)
+							!= NPI_SUCCESS)
+		return (NXGE_ERROR);
+	else
+		return (NXGE_OK);
+}
+
+nxge_status_t
+nxge_10g_link_led_off(p_nxge_t nxgep)
+{
+	if (npi_xmac_xif_led(nxgep->npi_handle, nxgep->mac.portnum, B_FALSE)
+							!= NPI_SUCCESS)
+		return (NXGE_ERROR);
+	else
+		return (NXGE_OK);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/uts/common/io/nxge/nxge_main.c	Mon Mar 19 19:37:22 2007 -0700
@@ -0,0 +1,4752 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident	"%Z%%M%	%I%	%E% SMI"
+
+/*
+ * SunOs MT STREAMS NIU/Neptune 10Gb Ethernet Device Driver.
+ */
+#include	<sys/nxge/nxge_impl.h>
+#include	<sys/pcie.h>
+
+uint32_t 	nxge_use_partition = 0;		/* debug partition flag */
+uint32_t 	nxge_dma_obp_props_only = 1;	/* use obp published props */
+uint32_t 	nxge_use_rdc_intr = 1;		/* debug to assign rdc intr */
+/*
+ * until MSIX supported, assume msi, use 2 for msix
+ */
+uint32_t	nxge_msi_enable = 1;		/* debug: turn msi off */
+
+/*
+ * Globals: tunable parameters (/etc/system or adb)
+ *
+ */
+uint32_t 	nxge_rbr_size = NXGE_RBR_RBB_DEFAULT;
+uint32_t 	nxge_rbr_spare_size = 0;
+uint32_t 	nxge_rcr_size = NXGE_RCR_DEFAULT;
+uint32_t 	nxge_tx_ring_size = NXGE_TX_RING_DEFAULT;
+uint32_t 	nxge_no_msg = 0;		/* control message display */
+uint32_t 	nxge_no_link_notify = 0;	/* control DL_NOTIFY */
+uint32_t 	nxge_bcopy_thresh = TX_BCOPY_MAX;
+uint32_t 	nxge_dvma_thresh = TX_FASTDVMA_MIN;
+uint32_t 	nxge_dma_stream_thresh = TX_STREAM_MIN;
+uint32_t	nxge_jumbo_mtu	= TX_JUMBO_MTU;
+boolean_t	nxge_jumbo_enable = B_FALSE;
+uint16_t	nxge_rcr_timeout = NXGE_RDC_RCR_TIMEOUT;
+uint16_t	nxge_rcr_threshold = NXGE_RDC_RCR_THRESHOLD;
+
+/*
+ * Debugging flags:
+ *		nxge_no_tx_lb : transmit load balancing
+ *		nxge_tx_lb_policy: 0 - TCP port (default)
+ *				   3 - DEST MAC
+ */
+uint32_t 	nxge_no_tx_lb = 0;
+uint32_t 	nxge_tx_lb_policy = NXGE_TX_LB_TCPUDP;
+
+/*
+ * Add tunable to reduce the amount of time spent in the
+ * ISR doing Rx Processing.
+ */
+uint32_t nxge_max_rx_pkts = 1024;
+
+/*
+ * Tunables to manage the receive buffer blocks.
+ *
+ * nxge_rx_threshold_hi: copy all buffers.
+ * nxge_rx_bcopy_size_type: receive buffer block size type.
+ * nxge_rx_threshold_lo: copy only up to tunable block size type.
+ */
+nxge_rxbuf_threshold_t nxge_rx_threshold_hi = NXGE_RX_COPY_6;
+nxge_rxbuf_type_t nxge_rx_buf_size_type = RCR_PKTBUFSZ_0;
+nxge_rxbuf_threshold_t nxge_rx_threshold_lo = NXGE_RX_COPY_3;
+
+rtrace_t npi_rtracebuf;
+
+#if	defined(sun4v)
+/*
+ * Hypervisor N2/NIU services information.
+ */
+static hsvc_info_t niu_hsvc = {
+	HSVC_REV_1, NULL, HSVC_GROUP_NIU, NIU_MAJOR_VER,
+	NIU_MINOR_VER, "nxge"
+};
+#endif
+
+/*
+ * Function Prototypes
+ */
+static int nxge_attach(dev_info_t *, ddi_attach_cmd_t);
+static int nxge_detach(dev_info_t *, ddi_detach_cmd_t);
+static void nxge_unattach(p_nxge_t);
+
+#if NXGE_PROPERTY
+static void nxge_remove_hard_properties(p_nxge_t);
+#endif
+
+static nxge_status_t nxge_setup_system_dma_pages(p_nxge_t);
+
+static nxge_status_t nxge_setup_mutexes(p_nxge_t);
+static void nxge_destroy_mutexes(p_nxge_t);
+
+static nxge_status_t nxge_map_regs(p_nxge_t nxgep);
+static void nxge_unmap_regs(p_nxge_t nxgep);
+#ifdef	NXGE_DEBUG
+static void nxge_test_map_regs(p_nxge_t nxgep);
+#endif
+
+static nxge_status_t nxge_add_intrs(p_nxge_t nxgep);
+static nxge_status_t nxge_add_soft_intrs(p_nxge_t nxgep);
+static void nxge_remove_intrs(p_nxge_t nxgep);
+static void nxge_remove_soft_intrs(p_nxge_t nxgep);
+
+static nxge_status_t nxge_add_intrs_adv(p_nxge_t nxgep);
+static nxge_status_t nxge_add_intrs_adv_type(p_nxge_t, uint32_t);
+static nxge_status_t nxge_add_intrs_adv_type_fix(p_nxge_t, uint32_t);
+static void nxge_intrs_enable(p_nxge_t nxgep);
+static void nxge_intrs_disable(p_nxge_t nxgep);
+
+static void nxge_suspend(p_nxge_t);
+static nxge_status_t nxge_resume(p_nxge_t);
+
+static nxge_status_t nxge_setup_dev(p_nxge_t);
+static void nxge_destroy_dev(p_nxge_t);
+
+static nxge_status_t nxge_alloc_mem_pool(p_nxge_t);
+static void nxge_free_mem_pool(p_nxge_t);
+
+static nxge_status_t nxge_alloc_rx_mem_pool(p_nxge_t);
+static void nxge_free_rx_mem_pool(p_nxge_t);
+
+static nxge_status_t nxge_alloc_tx_mem_pool(p_nxge_t);
+static void nxge_free_tx_mem_pool(p_nxge_t);
+
+static nxge_status_t nxge_dma_mem_alloc(p_nxge_t, dma_method_t,
+	struct ddi_dma_attr *,
+	size_t, ddi_device_acc_attr_t *, uint_t,
+	p_nxge_dma_common_t);
+
+static void nxge_dma_mem_free(p_nxge_dma_common_t);
+
+static nxge_status_t nxge_alloc_rx_buf_dma(p_nxge_t, uint16_t,
+	p_nxge_dma_common_t *, size_t, size_t, uint32_t *);
+static void nxge_free_rx_buf_dma(p_nxge_t, p_nxge_dma_common_t, uint32_t);
+
+static nxge_status_t nxge_alloc_rx_cntl_dma(p_nxge_t, uint16_t,
+	p_nxge_dma_common_t *, size_t);
+static void nxge_free_rx_cntl_dma(p_nxge_t, p_nxge_dma_common_t);
+
+static nxge_status_t nxge_alloc_tx_buf_dma(p_nxge_t, uint16_t,
+	p_nxge_dma_common_t *, size_t, size_t, uint32_t *);
+static void nxge_free_tx_buf_dma(p_nxge_t, p_nxge_dma_common_t, uint32_t);
+
+static nxge_status_t nxge_alloc_tx_cntl_dma(p_nxge_t, uint16_t,
+	p_nxge_dma_common_t *,
+	size_t);
+static void nxge_free_tx_cntl_dma(p_nxge_t, p_nxge_dma_common_t);
+
+static int nxge_init_common_dev(p_nxge_t);
+static void nxge_uninit_common_dev(p_nxge_t);
+
+/*
+ * The next declarations are for the GLDv3 interface.
+ */
+static int nxge_m_start(void *);
+static void nxge_m_stop(void *);
+static int nxge_m_unicst(void *, const uint8_t *);
+static int nxge_m_multicst(void *, boolean_t, const uint8_t *);
+static int nxge_m_promisc(void *, boolean_t);
+static void nxge_m_ioctl(void *, queue_t *, mblk_t *);
+static void nxge_m_resources(void *);
+mblk_t *nxge_m_tx(void *arg, mblk_t *);
+static nxge_status_t nxge_mac_register(p_nxge_t);
+static int nxge_altmac_set(p_nxge_t nxgep, uint8_t *mac_addr,
+	mac_addr_slot_t slot);
+static void nxge_mmac_kstat_update(p_nxge_t nxgep, mac_addr_slot_t slot,
+	boolean_t factory);
+static int nxge_m_mmac_add(void *arg, mac_multi_addr_t *maddr);
+static int nxge_m_mmac_reserve(void *arg, mac_multi_addr_t *maddr);
+static int nxge_m_mmac_remove(void *arg, mac_addr_slot_t slot);
+static int nxge_m_mmac_modify(void *arg, mac_multi_addr_t *maddr);
+static int nxge_m_mmac_get(void *arg, mac_multi_addr_t *maddr);
+
+#define	NXGE_NEPTUNE_MAGIC	0x4E584745UL
+#define	MAX_DUMP_SZ 256
+
+#define	NXGE_M_CALLBACK_FLAGS	(MC_RESOURCES | MC_IOCTL | MC_GETCAPAB)
+
+static	boolean_t	nxge_m_getcapab(void *, mac_capab_t, void *);
+static mac_callbacks_t nxge_m_callbacks = {
+	NXGE_M_CALLBACK_FLAGS,
+	nxge_m_stat,
+	nxge_m_start,
+	nxge_m_stop,
+	nxge_m_promisc,
+	nxge_m_multicst,
+	nxge_m_unicst,
+	nxge_m_tx,
+	nxge_m_resources,
+	nxge_m_ioctl,
+	nxge_m_getcapab
+};
+
+void
+nxge_err_inject(p_nxge_t, queue_t *, mblk_t *);
+
+/*
+ * These global variables control the message
+ * output.
+ */
+out_dbgmsg_t nxge_dbgmsg_out = DBG_CONSOLE | STR_LOG;
+uint64_t nxge_debug_level = 0;
+
+/*
+ * This list contains the instance structures for the Neptune
+ * devices present in the system. The lock exists to guarantee
+ * mutually exclusive access to the list.
+ */
+void 			*nxge_list = NULL;
+
+void			*nxge_hw_list = NULL;
+nxge_os_mutex_t 	nxge_common_lock;
+
+nxge_os_mutex_t		nxge_mii_lock;
+static uint32_t		nxge_mii_lock_init = 0;
+nxge_os_mutex_t		nxge_mdio_lock;
+static uint32_t		nxge_mdio_lock_init = 0;
+
+extern uint64_t 	npi_debug_level;
+
+extern nxge_status_t	nxge_ldgv_init(p_nxge_t, int *, int *);
+extern nxge_status_t	nxge_ldgv_init_n2(p_nxge_t, int *, int *);
+extern nxge_status_t	nxge_ldgv_uninit(p_nxge_t);
+extern nxge_status_t	nxge_intr_ldgv_init(p_nxge_t);
+extern void		nxge_fm_init(p_nxge_t,
+					ddi_device_acc_attr_t *,
+					ddi_device_acc_attr_t *,
+					ddi_dma_attr_t *);
+extern void		nxge_fm_fini(p_nxge_t);
+extern npi_status_t	npi_mac_altaddr_disable(npi_handle_t, uint8_t, uint8_t);
+
+/*
+ * Count used to maintain the number of buffers being used
+ * by Neptune instances and loaned up to the upper layers.
+ */
+uint32_t nxge_mblks_pending = 0;
+
+/*
+ * Device register access attributes for PIO.
+ */
+static ddi_device_acc_attr_t nxge_dev_reg_acc_attr = {
+	DDI_DEVICE_ATTR_V0,
+	DDI_STRUCTURE_LE_ACC,
+	DDI_STRICTORDER_ACC,
+};
+
+/*
+ * Device descriptor access attributes for DMA.
+ */
+static ddi_device_acc_attr_t nxge_dev_desc_dma_acc_attr = {
+	DDI_DEVICE_ATTR_V0,
+	DDI_STRUCTURE_LE_ACC,
+	DDI_STRICTORDER_ACC
+};
+
+/*
+ * Device buffer access attributes for DMA.
+ */
+static ddi_device_acc_attr_t nxge_dev_buf_dma_acc_attr = {
+	DDI_DEVICE_ATTR_V0,
+	DDI_STRUCTURE_BE_ACC,
+	DDI_STRICTORDER_ACC
+};
+
+ddi_dma_attr_t nxge_desc_dma_attr = {
+	DMA_ATTR_V0,		/* version number. */
+	0,			/* low address */
+	0xffffffffffffffff,	/* high address */
+	0xffffffffffffffff,	/* address counter max */
+#ifndef NIU_PA_WORKAROUND
+	0x100000,		/* alignment */
+#else
+	0x2000,
+#endif
+	0xfc00fc,		/* dlim_burstsizes */
+	0x1,			/* minimum transfer size */
+	0xffffffffffffffff,	/* maximum transfer size */
+	0xffffffffffffffff,	/* maximum segment size */
+	1,			/* scatter/gather list length */
+	(unsigned int) 1,	/* granularity */
+	0			/* attribute flags */
+};
+
+ddi_dma_attr_t nxge_tx_dma_attr = {
+	DMA_ATTR_V0,		/* version number. */
+	0,			/* low address */
+	0xffffffffffffffff,	/* high address */
+	0xffffffffffffffff,	/* address counter max */
+#if defined(_BIG_ENDIAN)
+	0x2000,			/* alignment */
+#else
+	0x1000,			/* alignment */
+#endif
+	0xfc00fc,		/* dlim_burstsizes */
+	0x1,			/* minimum transfer size */
+	0xffffffffffffffff,	/* maximum transfer size */
+	0xffffffffffffffff,	/* maximum segment size */
+	5,			/* scatter/gather list length */
+	(unsigned int) 1,	/* granularity */
+	0			/* attribute flags */
+};
+
+ddi_dma_attr_t nxge_rx_dma_attr = {
+	DMA_ATTR_V0,		/* version number. */
+	0,			/* low address */
+	0xffffffffffffffff,	/* high address */
+	0xffffffffffffffff,	/* address counter max */
+	0x2000,			/* alignment */
+	0xfc00fc,		/* dlim_burstsizes */
+	0x1,			/* minimum transfer size */
+	0xffffffffffffffff,	/* maximum transfer size */
+	0xffffffffffffffff,	/* maximum segment size */
+	1,			/* scatter/gather list length */
+	(unsigned int) 1,	/* granularity */
+	0			/* attribute flags */
+};
+
+ddi_dma_lim_t nxge_dma_limits = {
+	(uint_t)0,		/* dlim_addr_lo */
+	(uint_t)0xffffffff,	/* dlim_addr_hi */
+	(uint_t)0xffffffff,	/* dlim_cntr_max */
+	(uint_t)0xfc00fc,	/* dlim_burstsizes for 32 and 64 bit xfers */
+	0x1,			/* dlim_minxfer */
+	1024			/* dlim_speed */
+};
+
+dma_method_t nxge_force_dma = DVMA;
+
+/*
+ * dma chunk sizes.
+ *
+ * Try to allocate the largest possible size
+ * so that fewer number of dma chunks would be managed
+ */
+#ifdef NIU_PA_WORKAROUND
+size_t alloc_sizes [] = {0x2000};
+#else
+size_t alloc_sizes [] = {0x1000, 0x2000, 0x4000, 0x8000,
+		0x10000, 0x20000, 0x40000, 0x80000,
+		0x100000, 0x200000, 0x400000, 0x800000, 0x1000000};
+#endif
+
+/*
+ * Translate "dev_t" to a pointer to the associated "dev_info_t".
+ */
+
+static int
+nxge_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
+{
+	p_nxge_t	nxgep = NULL;
+	int		instance;
+	int		status = DDI_SUCCESS;
+	nxge_status_t	nxge_status = NXGE_OK;
+	uint8_t		portn;
+	nxge_mmac_t	*mmac_info;
+
+	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_attach"));
+
+	/*
+	 * Get the device instance since we'll need to setup
+	 * or retrieve a soft state for this instance.
+	 */
+	instance = ddi_get_instance(dip);
+
+	switch (cmd) {
+	case DDI_ATTACH:
+		NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_ATTACH"));
+		break;
+
+	case DDI_RESUME:
+		NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_RESUME"));
+		nxgep = (p_nxge_t)ddi_get_soft_state(nxge_list, instance);
+		if (nxgep == NULL) {
+			status = DDI_FAILURE;
+			break;
+		}
+		if (nxgep->dip != dip) {
+			status = DDI_FAILURE;
+			break;
+		}
+		if (nxgep->suspended == DDI_PM_SUSPEND) {
+			status = ddi_dev_is_needed(nxgep->dip, 0, 1);
+		} else {
+			nxge_status = nxge_resume(nxgep);
+		}
+		goto nxge_attach_exit;
+
+	case DDI_PM_RESUME:
+		NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_PM_RESUME"));
+		nxgep = (p_nxge_t)ddi_get_soft_state(nxge_list, instance);
+		if (nxgep == NULL) {
+			status = DDI_FAILURE;
+			break;
+		}
+		if (nxgep->dip != dip) {
+			status = DDI_FAILURE;
+			break;
+		}
+		nxge_status = nxge_resume(nxgep);
+		goto nxge_attach_exit;
+
+	default:
+		NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing unknown"));
+		status = DDI_FAILURE;
+		goto nxge_attach_exit;
+	}
+
+
+	if (ddi_soft_state_zalloc(nxge_list, instance) == DDI_FAILURE) {
+		status = DDI_FAILURE;
+		goto nxge_attach_exit;
+	}
+
+	nxgep = ddi_get_soft_state(nxge_list, instance);
+	if (nxgep == NULL) {
+		goto nxge_attach_fail;
+	}
+
+	nxgep->drv_state = 0;
+	nxgep->dip = dip;
+	nxgep->instance = instance;
+	nxgep->p_dip = ddi_get_parent(dip);
+	nxgep->nxge_debug_level = nxge_debug_level;
+	npi_debug_level = nxge_debug_level;
+
+	nxge_fm_init(nxgep, &nxge_dev_reg_acc_attr, &nxge_dev_desc_dma_acc_attr,
+				&nxge_rx_dma_attr);
+
+	status = nxge_map_regs(nxgep);
+	if (status != NXGE_OK) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_map_regs failed"));
+		goto nxge_attach_fail;
+	}
+
+	status = nxge_init_common_dev(nxgep);
+	if (status != NXGE_OK) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"nxge_init_common_dev failed"));
+		goto nxge_attach_fail;
+	}
+
+	portn = NXGE_GET_PORT_NUM(nxgep->function_num);
+	nxgep->mac.portnum = portn;
+	if ((portn == 0) || (portn == 1))
+		nxgep->mac.porttype = PORT_TYPE_XMAC;
+	else
+		nxgep->mac.porttype = PORT_TYPE_BMAC;
+	/*
+	 * Neptune has 4 ports, the first 2 ports use XMAC (10G MAC)
+	 * internally, the rest 2 ports use BMAC (1G "Big" MAC).
+	 * The two types of MACs have different characterizations.
+	 */
+	mmac_info = &nxgep->nxge_mmac_info;
+	if (nxgep->function_num < 2) {
+		mmac_info->num_mmac = XMAC_MAX_ALT_ADDR_ENTRY;
+		mmac_info->naddrfree = XMAC_MAX_ALT_ADDR_ENTRY;
+	} else {
+		mmac_info->num_mmac = BMAC_MAX_ALT_ADDR_ENTRY;
+		mmac_info->naddrfree = BMAC_MAX_ALT_ADDR_ENTRY;
+	}
+	/*
+	 * Setup the Ndd parameters for the this instance.
+	 */
+	nxge_init_param(nxgep);
+
+	/*
+	 * Setup Register Tracing Buffer.
+	 */
+	npi_rtrace_buf_init((rtrace_t *)&npi_rtracebuf);
+
+	/* init stats ptr */
+	nxge_init_statsp(nxgep);
+	status = nxge_get_xcvr_type(nxgep);
+
+	if (status != NXGE_OK) {
+		NXGE_DEBUG_MSG((nxgep, DDI_CTL, "nxge_attach: "
+				    " Couldn't determine card type"
+				    " .... exit "));
+		goto nxge_attach_fail;
+	}
+
+	if ((nxgep->niu_type == NEPTUNE) &&
+		(nxgep->mac.portmode == PORT_10G_FIBER)) {
+		nxgep->niu_type = NEPTUNE_2;
+	}
+
+	status = nxge_get_config_properties(nxgep);
+
+	if (status != NXGE_OK) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "get_hw create failed"));
+		goto nxge_attach_fail;
+	}
+
+	nxge_get_xcvr_properties(nxgep);
+
+	/*
+	 * Setup the Kstats for the driver.
+	 */
+	nxge_setup_kstats(nxgep);
+
+	nxge_setup_param(nxgep);
+
+	status = nxge_setup_system_dma_pages(nxgep);
+	if (status != NXGE_OK) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "set dma page failed"));
+		goto nxge_attach_fail;
+	}
+
+#if	defined(sun4v)
+	if (nxgep->niu_type == N2_NIU) {
+		nxgep->niu_hsvc_available = B_FALSE;
+		bcopy(&niu_hsvc, &nxgep->niu_hsvc, sizeof (hsvc_info_t));
+		if ((status =
+			hsvc_register(&nxgep->niu_hsvc,
+					&nxgep->niu_min_ver)) != 0) {
+				NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+					"nxge_attach: "
+					"%s: cannot negotiate "
+					"hypervisor services "
+					"revision %d "
+					"group: 0x%lx "
+					"major: 0x%lx minor: 0x%lx "
+					"errno: %d",
+					niu_hsvc.hsvc_modname,
+					niu_hsvc.hsvc_rev,
+					niu_hsvc.hsvc_group,
+					niu_hsvc.hsvc_major,
+					niu_hsvc.hsvc_minor,
+					status));
+				status = DDI_FAILURE;
+				goto nxge_attach_fail;
+		}
+
+		nxgep->niu_hsvc_available = B_TRUE;
+		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
+			"NIU Hypervisor service enabled"));
+	}
+#endif
+
+	nxge_hw_id_init(nxgep);
+	nxge_hw_init_niu_common(nxgep);
+
+	status = nxge_setup_mutexes(nxgep);
+	if (status != NXGE_OK) {
+		NXGE_DEBUG_MSG((nxgep, DDI_CTL, "set mutex failed"));
+		goto nxge_attach_fail;
+	}
+
+	status = nxge_setup_dev(nxgep);
+	if (status != DDI_SUCCESS) {
+		NXGE_DEBUG_MSG((nxgep, DDI_CTL, "set dev failed"));
+		goto nxge_attach_fail;
+	}
+
+	status = nxge_add_intrs(nxgep);
+	if (status != DDI_SUCCESS) {
+		NXGE_DEBUG_MSG((nxgep, DDI_CTL, "add_intr failed"));
+		goto nxge_attach_fail;
+	}
+	status = nxge_add_soft_intrs(nxgep);
+	if (status != DDI_SUCCESS) {
+		NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, "add_soft_intr failed"));
+		goto nxge_attach_fail;
+	}
+
+	/*
+	 * Enable interrupts.
+	 */
+	nxge_intrs_enable(nxgep);
+
+	if ((status = nxge_mac_register(nxgep)) != DDI_SUCCESS) {
+		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
+			"unable to register to mac layer (%d)", status));
+		goto nxge_attach_fail;
+	}
+
+	mac_link_update(nxgep->mach, LINK_STATE_UNKNOWN);
+
+	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "registered to mac (instance %d)",
+		instance));
+
+	(void) nxge_link_monitor(nxgep, LINK_MONITOR_START);
+
+	goto nxge_attach_exit;
+
+nxge_attach_fail:
+	nxge_unattach(nxgep);
+	if (nxge_status != NXGE_OK)
+		nxge_status = (NXGE_ERROR | NXGE_DDI_FAILED);
+	nxgep = NULL;
+
+nxge_attach_exit:
+	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_attach status = 0x%08x",
+		status));
+
+	return (status);
+}
+
+static int
+nxge_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
+{
+	int 		status = DDI_SUCCESS;
+	int 		instance;
+	p_nxge_t 	nxgep = NULL;
+
+	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_detach"));
+	instance = ddi_get_instance(dip);
+	nxgep = ddi_get_soft_state(nxge_list, instance);
+	if (nxgep == NULL) {
+		status = DDI_FAILURE;
+		goto nxge_detach_exit;
+	}
+
+	switch (cmd) {
+	case DDI_DETACH:
+		NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_DETACH"));
+		break;
+
+	case DDI_PM_SUSPEND:
+		NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_PM_SUSPEND"));
+		nxgep->suspended = DDI_PM_SUSPEND;
+		nxge_suspend(nxgep);
+		break;
+
+	case DDI_SUSPEND:
+		NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_SUSPEND"));
+		if (nxgep->suspended != DDI_PM_SUSPEND) {
+			nxgep->suspended = DDI_SUSPEND;
+			nxge_suspend(nxgep);
+		}
+		break;
+
+	default:
+		status = DDI_FAILURE;
+	}
+
+	if (cmd != DDI_DETACH)
+		goto nxge_detach_exit;
+
+	/*
+	 * Stop the xcvr polling.
+	 */
+	nxgep->suspended = cmd;
+
+	(void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP);
+
+	if (nxgep->mach && (status = mac_unregister(nxgep->mach)) != 0) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"<== nxge_detach status = 0x%08X", status));
+		return (DDI_FAILURE);
+	}
+
+	NXGE_DEBUG_MSG((nxgep, DDI_CTL,
+		"<== nxge_detach (mac_unregister) status = 0x%08X", status));
+
+	nxge_unattach(nxgep);
+	nxgep = NULL;
+
+nxge_detach_exit:
+	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_detach status = 0x%08X",
+		status));
+
+	return (status);
+}
+
+static void
+nxge_unattach(p_nxge_t nxgep)
+{
+	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_unattach"));
+
+	if (nxgep == NULL || nxgep->dev_regs == NULL) {
+		return;
+	}
+
+	if (nxgep->nxge_hw_p) {
+		nxge_uninit_common_dev(nxgep);
+		nxgep->nxge_hw_p = NULL;
+	}
+
+	if (nxgep->nxge_timerid) {
+		nxge_stop_timer(nxgep, nxgep->nxge_timerid);
+		nxgep->nxge_timerid = 0;
+	}
+
+#if	defined(sun4v)
+	if (nxgep->niu_type == N2_NIU && nxgep->niu_hsvc_available == B_TRUE) {
+		(void) hsvc_unregister(&nxgep->niu_hsvc);
+		nxgep->niu_hsvc_available = B_FALSE;
+	}
+#endif
+	/*
+	 * Stop any further interrupts.
+	 */
+	nxge_remove_intrs(nxgep);
+
+	/* remove soft interrups */
+	nxge_remove_soft_intrs(nxgep);
+
+	/*
+	 * Stop the device and free resources.
+	 */
+	nxge_destroy_dev(nxgep);
+
+	/*
+	 * Tear down the ndd parameters setup.
+	 */
+	nxge_destroy_param(nxgep);
+
+	/*
+	 * Tear down the kstat setup.
+	 */
+	nxge_destroy_kstats(nxgep);
+
+	/*
+	 * Destroy all mutexes.
+	 */
+	nxge_destroy_mutexes(nxgep);
+
+	/*
+	 * Remove the list of ndd parameters which
+	 * were setup during attach.
+	 */
+	if (nxgep->dip) {
+		NXGE_DEBUG_MSG((nxgep, OBP_CTL,
+				    " nxge_unattach: remove all properties"));
+
+		(void) ddi_prop_remove_all(nxgep->dip);
+	}
+
+#if NXGE_PROPERTY
+	nxge_remove_hard_properties(nxgep);
+#endif
+
+	/*
+	 * Unmap the register setup.
+	 */
+	nxge_unmap_regs(nxgep);
+
+	nxge_fm_fini(nxgep);
+
+	ddi_soft_state_free(nxge_list, nxgep->instance);
+
+	NXGE_DEBUG_MSG((NULL, DDI_CTL, "<== nxge_unattach"));
+}
+
+static char n2_siu_name[] = "niu";
+
+static nxge_status_t
+nxge_map_regs(p_nxge_t nxgep)
+{
+	int		ddi_status = DDI_SUCCESS;
+	p_dev_regs_t 	dev_regs;
+	char		buf[MAXPATHLEN + 1];
+	char 		*devname;
+#ifdef	NXGE_DEBUG
+	char 		*sysname;
+#endif
+	off_t		regsize;
+	nxge_status_t	status = NXGE_OK;
+#if !defined(_BIG_ENDIAN)
+	off_t pci_offset;
+	uint16_t pcie_devctl;
+#endif
+
+	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_map_regs"));
+	nxgep->dev_regs = NULL;
+	dev_regs = KMEM_ZALLOC(sizeof (dev_regs_t), KM_SLEEP);
+	dev_regs->nxge_regh = NULL;
+	dev_regs->nxge_pciregh = NULL;
+	dev_regs->nxge_msix_regh = NULL;
+	dev_regs->nxge_vir_regh = NULL;
+	dev_regs->nxge_vir2_regh = NULL;
+	nxgep->niu_type = NEPTUNE;
+
+	devname = ddi_pathname(nxgep->dip, buf);
+	ASSERT(strlen(devname) > 0);
+	NXGE_DEBUG_MSG((nxgep, DDI_CTL,
+		"nxge_map_regs: pathname devname %s", devname));
+
+	if (strstr(devname, n2_siu_name)) {
+		/* N2/NIU */
+		nxgep->niu_type = N2_NIU;
+		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
+			"nxge_map_regs: N2/NIU devname %s", devname));
+		/* get function number */
+		nxgep->function_num =
+			(devname[strlen(devname) -1] == '1' ? 1 : 0);
+		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
+			"nxge_map_regs: N2/NIU function number %d",
+			nxgep->function_num));
+	} else {
+		int		*prop_val;
+		uint_t 		prop_len;
+		uint8_t 	func_num;
+
+		if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, nxgep->dip,
+				0, "reg",
+				&prop_val, &prop_len) != DDI_PROP_SUCCESS) {
+			NXGE_DEBUG_MSG((nxgep, VPD_CTL,
+				"Reg property not found"));
+			ddi_status = DDI_FAILURE;
+			goto nxge_map_regs_fail0;
+
+		} else {
+			func_num = (prop_val[0] >> 8) & 0x7;
+			NXGE_DEBUG_MSG((nxgep, DDI_CTL,
+				"Reg property found: fun # %d",
+				func_num));
+			nxgep->function_num = func_num;
+			ddi_prop_free(prop_val);
+		}
+	}
+
+	switch (nxgep->niu_type) {
+	case NEPTUNE:
+	case NEPTUNE_2:
+	default:
+		(void) ddi_dev_regsize(nxgep->dip, 0, &regsize);
+		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
+			"nxge_map_regs: pci config size 0x%x", regsize));
+
+		ddi_status = ddi_regs_map_setup(nxgep->dip, 0,
+			(caddr_t *)&(dev_regs->nxge_pciregp), 0, 0,
+			&nxge_dev_reg_acc_attr, &dev_regs->nxge_pciregh);
+		if (ddi_status != DDI_SUCCESS) {
+			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+				"ddi_map_regs, nxge bus config regs failed"));
+			goto nxge_map_regs_fail0;
+		}
+		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
+			"nxge_map_reg: PCI config addr 0x%0llx "
+			" handle 0x%0llx", dev_regs->nxge_pciregp,
+			dev_regs->nxge_pciregh));
+			/*
+			 * IMP IMP
+			 * workaround  for bit swapping bug in HW
+			 * which ends up in no-snoop = yes
+			 * resulting, in DMA not synched properly
+			 */
+#if !defined(_BIG_ENDIAN)
+		/* workarounds for x86 systems */
+		pci_offset = 0x80 + PCIE_DEVCTL;
+		pcie_devctl = 0x0;
+		pcie_devctl &= PCIE_DEVCTL_ENABLE_NO_SNOOP;
+		pcie_devctl |= PCIE_DEVCTL_RO_EN;
+		pci_config_put16(dev_regs->nxge_pciregh, pci_offset,
+				    pcie_devctl);
+#endif
+
+		(void) ddi_dev_regsize(nxgep->dip, 1, &regsize);
+		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
+			"nxge_map_regs: pio size 0x%x", regsize));
+		/* set up the device mapped register */
+		ddi_status = ddi_regs_map_setup(nxgep->dip, 1,
+			(caddr_t *)&(dev_regs->nxge_regp), 0, 0,
+			&nxge_dev_reg_acc_attr, &dev_regs->nxge_regh);
+		if (ddi_status != DDI_SUCCESS) {
+			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+				"ddi_map_regs for Neptune global reg failed"));
+			goto nxge_map_regs_fail1;
+		}
+
+		/* set up the msi/msi-x mapped register */
+		(void) ddi_dev_regsize(nxgep->dip, 2, &regsize);
+		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
+			"nxge_map_regs: msix size 0x%x", regsize));
+		ddi_status = ddi_regs_map_setup(nxgep->dip, 2,
+			(caddr_t *)&(dev_regs->nxge_msix_regp), 0, 0,
+			&nxge_dev_reg_acc_attr, &dev_regs->nxge_msix_regh);
+		if (ddi_status != DDI_SUCCESS) {
+			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+				"ddi_map_regs for msi reg failed"));
+			goto nxge_map_regs_fail2;
+		}
+
+		/* set up the vio region mapped register */
+		(void) ddi_dev_regsize(nxgep->dip, 3, &regsize);
+		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
+			"nxge_map_regs: vio size 0x%x", regsize));
+		ddi_status = ddi_regs_map_setup(nxgep->dip, 3,
+			(caddr_t *)&(dev_regs->nxge_vir_regp), 0, 0,
+			&nxge_dev_reg_acc_attr, &dev_regs->nxge_vir_regh);
+
+		if (ddi_status != DDI_SUCCESS) {
+			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+				"ddi_map_regs for nxge vio reg failed"));
+			goto nxge_map_regs_fail3;
+		}
+		nxgep->dev_regs = dev_regs;
+
+		NPI_PCI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_pciregh);
+		NPI_PCI_ADD_HANDLE_SET(nxgep,
+			(npi_reg_ptr_t)dev_regs->nxge_pciregp);
+		NPI_MSI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_msix_regh);
+		NPI_MSI_ADD_HANDLE_SET(nxgep,
+			(npi_reg_ptr_t)dev_regs->nxge_msix_regp);
+
+		NPI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh);
+		NPI_ADD_HANDLE_SET(nxgep, (npi_reg_ptr_t)dev_regs->nxge_regp);
+
+		NPI_REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh);
+		NPI_REG_ADD_HANDLE_SET(nxgep,
+			(npi_reg_ptr_t)dev_regs->nxge_regp);
+
+		NPI_VREG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir_regh);
+		NPI_VREG_ADD_HANDLE_SET(nxgep,
+			(npi_reg_ptr_t)dev_regs->nxge_vir_regp);
+
+		break;
+
+	case N2_NIU:
+		NXGE_DEBUG_MSG((nxgep, DDI_CTL, "ddi_map_regs, NIU"));
+		/*
+		 * Set up the device mapped register (FWARC 2006/556)
+		 * (changed back to 1: reg starts at 1!)
+		 */
+		(void) ddi_dev_regsize(nxgep->dip, 1, &regsize);
+		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
+			"nxge_map_regs: dev size 0x%x", regsize));
+		ddi_status = ddi_regs_map_setup(nxgep->dip, 1,
+				(caddr_t *)&(dev_regs->nxge_regp), 0, 0,
+				&nxge_dev_reg_acc_attr, &dev_regs->nxge_regh);
+
+		if (ddi_status != DDI_SUCCESS) {
+			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+				"ddi_map_regs for N2/NIU, global reg failed "));
+			goto nxge_map_regs_fail1;
+		}
+
+		/* set up the vio region mapped register */
+		(void) ddi_dev_regsize(nxgep->dip, 2, &regsize);
+		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
+			"nxge_map_regs: vio (1) size 0x%x", regsize));
+		ddi_status = ddi_regs_map_setup(nxgep->dip, 2,
+			(caddr_t *)&(dev_regs->nxge_vir_regp), 0, 0,
+			&nxge_dev_reg_acc_attr, &dev_regs->nxge_vir_regh);
+
+		if (ddi_status != DDI_SUCCESS) {
+			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+				"ddi_map_regs for nxge vio reg failed"));
+			goto nxge_map_regs_fail2;
+		}
+		/* set up the vio region mapped register */
+		(void) ddi_dev_regsize(nxgep->dip, 3, &regsize);
+		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
+			"nxge_map_regs: vio (3) size 0x%x", regsize));
+		ddi_status = ddi_regs_map_setup(nxgep->dip, 3,
+			(caddr_t *)&(dev_regs->nxge_vir2_regp), 0, 0,
+			&nxge_dev_reg_acc_attr, &dev_regs->nxge_vir2_regh);
+
+		if (ddi_status != DDI_SUCCESS) {
+			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+				"ddi_map_regs for nxge vio2 reg failed"));
+			goto nxge_map_regs_fail3;
+		}
+		nxgep->dev_regs = dev_regs;
+
+		NPI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh);
+		NPI_ADD_HANDLE_SET(nxgep, (npi_reg_ptr_t)dev_regs->nxge_regp);
+
+		NPI_REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh);
+		NPI_REG_ADD_HANDLE_SET(nxgep,
+			(npi_reg_ptr_t)dev_regs->nxge_regp);
+
+		NPI_VREG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir_regh);
+		NPI_VREG_ADD_HANDLE_SET(nxgep,
+			(npi_reg_ptr_t)dev_regs->nxge_vir_regp);
+
+		NPI_V2REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir2_regh);
+		NPI_V2REG_ADD_HANDLE_SET(nxgep,
+			(npi_reg_ptr_t)dev_regs->nxge_vir2_regp);
+
+		break;
+	}
+
+	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "nxge_map_reg: hardware addr 0x%0llx "
+		" handle 0x%0llx", dev_regs->nxge_regp, dev_regs->nxge_regh));
+
+	goto nxge_map_regs_exit;
+nxge_map_regs_fail3:
+	if (dev_regs->nxge_msix_regh) {
+		ddi_regs_map_free(&dev_regs->nxge_msix_regh);
+	}
+	if (dev_regs->nxge_vir_regh) {
+		ddi_regs_map_free(&dev_regs->nxge_regh);
+	}
+nxge_map_regs_fail2:
+	if (dev_regs->nxge_regh) {
+		ddi_regs_map_free(&dev_regs->nxge_regh);
+	}
+nxge_map_regs_fail1:
+	if (dev_regs->nxge_pciregh) {
+		ddi_regs_map_free(&dev_regs->nxge_pciregh);
+	}
+nxge_map_regs_fail0:
+	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "Freeing register set memory"));
+	kmem_free(dev_regs, sizeof (dev_regs_t));
+
+nxge_map_regs_exit:
+	if (ddi_status != DDI_SUCCESS)
+		status |= (NXGE_ERROR | NXGE_DDI_FAILED);
+	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_map_regs"));
+	return (status);
+}
+
+static void
+nxge_unmap_regs(p_nxge_t nxgep)
+{
+	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_unmap_regs"));
+	if (nxgep->dev_regs) {
+		if (nxgep->dev_regs->nxge_pciregh) {
+			NXGE_DEBUG_MSG((nxgep, DDI_CTL,
+				"==> nxge_unmap_regs: bus"));
+			ddi_regs_map_free(&nxgep->dev_regs->nxge_pciregh);
+			nxgep->dev_regs->nxge_pciregh = NULL;
+		}
+		if (nxgep->dev_regs->nxge_regh) {
+			NXGE_DEBUG_MSG((nxgep, DDI_CTL,
+				"==> nxge_unmap_regs: device registers"));
+			ddi_regs_map_free(&nxgep->dev_regs->nxge_regh);
+			nxgep->dev_regs->nxge_regh = NULL;
+		}
+		if (nxgep->dev_regs->nxge_msix_regh) {
+			NXGE_DEBUG_MSG((nxgep, DDI_CTL,
+				"==> nxge_unmap_regs: device interrupts"));
+			ddi_regs_map_free(&nxgep->dev_regs->nxge_msix_regh);
+			nxgep->dev_regs->nxge_msix_regh = NULL;
+		}
+		if (nxgep->dev_regs->nxge_vir_regh) {
+			NXGE_DEBUG_MSG((nxgep, DDI_CTL,
+				"==> nxge_unmap_regs: vio region"));
+			ddi_regs_map_free(&nxgep->dev_regs->nxge_vir_regh);
+			nxgep->dev_regs->nxge_vir_regh = NULL;
+		}
+		if (nxgep->dev_regs->nxge_vir2_regh) {
+			NXGE_DEBUG_MSG((nxgep, DDI_CTL,
+				"==> nxge_unmap_regs: vio2 region"));
+			ddi_regs_map_free(&nxgep->dev_regs->nxge_vir2_regh);
+			nxgep->dev_regs->nxge_vir2_regh = NULL;
+		}
+
+		kmem_free(nxgep->dev_regs, sizeof (dev_regs_t));
+		nxgep->dev_regs = NULL;
+	}
+
+	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_unmap_regs"));
+}
+
+static nxge_status_t
+nxge_setup_mutexes(p_nxge_t nxgep)
+{
+	int ddi_status = DDI_SUCCESS;
+	nxge_status_t status = NXGE_OK;
+	nxge_classify_t *classify_ptr;
+	int partition;
+
+	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_setup_mutexes"));
+
+	/*
+	 * Get the interrupt cookie so the mutexes can be
+	 * Initialized.
+	 */
+	ddi_status = ddi_get_iblock_cookie(nxgep->dip, 0,
+					&nxgep->interrupt_cookie);
+	if (ddi_status != DDI_SUCCESS) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"<== nxge_setup_mutexes: failed 0x%x", ddi_status));
+		goto nxge_setup_mutexes_exit;
+	}
+
+	/* Initialize global mutex */
+
+	if (nxge_mdio_lock_init == 0) {
+		MUTEX_INIT(&nxge_mdio_lock, NULL, MUTEX_DRIVER, NULL);
+	}
+	atomic_add_32(&nxge_mdio_lock_init, 1);
+
+	if (nxge_mii_lock_init == 0) {
+		MUTEX_INIT(&nxge_mii_lock, NULL, MUTEX_DRIVER, NULL);
+	}
+	atomic_add_32(&nxge_mii_lock_init, 1);
+
+	nxgep->drv_state |= STATE_MDIO_LOCK_INIT;
+	nxgep->drv_state |= STATE_MII_LOCK_INIT;
+
+	/*
+	 * Initialize mutex's for this device.
+	 */
+	MUTEX_INIT(nxgep->genlock, NULL,
+		MUTEX_DRIVER, (void *)nxgep->interrupt_cookie);
+	MUTEX_INIT(&nxgep->ouraddr_lock, NULL,
+		MUTEX_DRIVER, (void *)nxgep->interrupt_cookie);
+	MUTEX_INIT(&nxgep->mif_lock, NULL,
+		MUTEX_DRIVER, (void *)nxgep->interrupt_cookie);
+	RW_INIT(&nxgep->filter_lock, NULL,
+		RW_DRIVER, (void *)nxgep->interrupt_cookie);
+
+	classify_ptr = &nxgep->classifier;
+		/*
+		 * FFLP Mutexes are never used in interrupt context
+		 * as fflp operation can take very long time to
+		 * complete and hence not suitable to invoke from interrupt
+		 * handlers.
+		 */
+	MUTEX_INIT(&classify_ptr->tcam_lock, NULL,
+			    NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie);
+	if (nxgep->niu_type == NEPTUNE) {
+		MUTEX_INIT(&classify_ptr->fcram_lock, NULL,
+			    NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie);
+		for (partition = 0; partition < MAX_PARTITION; partition++) {
+			MUTEX_INIT(&classify_ptr->hash_lock[partition], NULL,
+			    NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie);
+		}
+	}
+
+nxge_setup_mutexes_exit:
+	NXGE_DEBUG_MSG((nxgep, DDI_CTL,
+			"<== nxge_setup_mutexes status = %x", status));
+
+	if (ddi_status != DDI_SUCCESS)
+		status |= (NXGE_ERROR | NXGE_DDI_FAILED);
+
+	return (status);
+}
+
+static void
+nxge_destroy_mutexes(p_nxge_t nxgep)
+{
+	int partition;
+	nxge_classify_t *classify_ptr;
+
+	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_destroy_mutexes"));
+	RW_DESTROY(&nxgep->filter_lock);
+	MUTEX_DESTROY(&nxgep->mif_lock);
+	MUTEX_DESTROY(&nxgep->ouraddr_lock);
+	MUTEX_DESTROY(nxgep->genlock);
+
+	classify_ptr = &nxgep->classifier;
+	MUTEX_DESTROY(&classify_ptr->tcam_lock);
+
+		/* free data structures, based on HW type */
+	if (nxgep->niu_type == NEPTUNE) {
+		MUTEX_DESTROY(&classify_ptr->fcram_lock);
+		for (partition = 0; partition < MAX_PARTITION; partition++) {
+			MUTEX_DESTROY(&classify_ptr->hash_lock[partition]);
+		}
+	}
+	if (nxgep->drv_state & STATE_MDIO_LOCK_INIT) {
+		if (nxge_mdio_lock_init == 1) {
+			MUTEX_DESTROY(&nxge_mdio_lock);
+		}
+		atomic_add_32(&nxge_mdio_lock_init, -1);
+	}
+	if (nxgep->drv_state & STATE_MII_LOCK_INIT) {
+		if (nxge_mii_lock_init == 1) {
+			MUTEX_DESTROY(&nxge_mii_lock);
+		}
+		atomic_add_32(&nxge_mii_lock_init, -1);
+	}
+
+	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_destroy_mutexes"));
+}
+
+nxge_status_t
+nxge_init(p_nxge_t nxgep)
+{
+	nxge_status_t	status = NXGE_OK;
+
+	NXGE_DEBUG_MSG((nxgep, STR_CTL, "==> nxge_init"));
+
+	if (nxgep->drv_state & STATE_HW_INITIALIZED) {
+		return (status);
+	}
+
+	/*
+	 * Allocate system memory for the receive/transmit buffer blocks
+	 * and receive/transmit descriptor rings.
+	 */
+	status = nxge_alloc_mem_pool(nxgep);
+	if (status != NXGE_OK) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "alloc mem failed\n"));
+		goto nxge_init_fail1;
+	}
+
+	/*
+	 * Initialize and enable TXC registers
+	 * (Globally enable TX controller,
+	 *  enable a port, configure dma channel bitmap,
+	 *  configure the max burst size).
+	 */
+	status = nxge_txc_init(nxgep);
+	if (status != NXGE_OK) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init txc failed\n"));
+		goto nxge_init_fail2;
+	}
+
+	/*
+	 * Initialize and enable TXDMA channels.
+	 */
+	status = nxge_init_txdma_channels(nxgep);
+	if (status != NXGE_OK) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init txdma failed\n"));
+		goto nxge_init_fail3;
+	}
+
+	/*
+	 * Initialize and enable RXDMA channels.
+	 */
+	status = nxge_init_rxdma_channels(nxgep);
+	if (status != NXGE_OK) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init rxdma failed\n"));
+		goto nxge_init_fail4;
+	}
+
+	/*
+	 * Initialize TCAM and FCRAM (Neptune).
+	 */
+	status = nxge_classify_init(nxgep);
+	if (status != NXGE_OK) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init classify failed\n"));
+		goto nxge_init_fail5;
+	}
+
+	/*
+	 * Initialize ZCP
+	 */
+	status = nxge_zcp_init(nxgep);
+	if (status != NXGE_OK) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init ZCP failed\n"));
+		goto nxge_init_fail5;
+	}
+
+	/*
+	 * Initialize IPP.
+	 */
+	status = nxge_ipp_init(nxgep);
+	if (status != NXGE_OK) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init IPP failed\n"));
+		goto nxge_init_fail5;
+	}
+
+	/*
+	 * Initialize the MAC block.
+	 */
+	status = nxge_mac_init(nxgep);
+	if (status != NXGE_OK) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init MAC failed\n"));
+		goto nxge_init_fail5;
+	}
+
+	nxge_intrs_enable(nxgep);
+
+	/*
+	 * Enable hardware interrupts.
+	 */
+	nxge_intr_hw_enable(nxgep);
+	nxgep->drv_state |= STATE_HW_INITIALIZED;
+
+	goto nxge_init_exit;
+
+nxge_init_fail5:
+	nxge_uninit_rxdma_channels(nxgep);
+nxge_init_fail4:
+	nxge_uninit_txdma_channels(nxgep);
+nxge_init_fail3:
+	(void) nxge_txc_uninit(nxgep);
+nxge_init_fail2:
+	nxge_free_mem_pool(nxgep);
+nxge_init_fail1:
+	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+		"<== nxge_init status (failed) = 0x%08x", status));
+	return (status);
+
+nxge_init_exit:
+
+	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_init status = 0x%08x",
+		status));
+	return (status);
+}
+
+
+timeout_id_t
+nxge_start_timer(p_nxge_t nxgep, fptrv_t func, int msec)
+{
+	if ((nxgep->suspended == 0) ||
+			(nxgep->suspended == DDI_RESUME)) {
+		return (timeout(func, (caddr_t)nxgep,
+			drv_usectohz(1000 * msec)));
+	}
+	return (NULL);
+}
+
+/*ARGSUSED*/
+void
+nxge_stop_timer(p_nxge_t nxgep, timeout_id_t timerid)
+{
+	if (timerid) {
+		(void) untimeout(timerid);
+	}
+}
+
+void
+nxge_uninit(p_nxge_t nxgep)
+{
+	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_uninit"));
+
+	if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
+		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
+			"==> nxge_uninit: not initialized"));
+		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
+			"<== nxge_uninit"));
+		return;
+	}
+
+	/* stop timer */
+	if (nxgep->nxge_timerid) {
+		nxge_stop_timer(nxgep, nxgep->nxge_timerid);
+		nxgep->nxge_timerid = 0;
+	}
+
+	(void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP);
+	(void) nxge_intr_hw_disable(nxgep);
+
+	/*
+	 * Reset the receive MAC side.
+	 */
+	(void) nxge_rx_mac_disable(nxgep);
+
+	/* Disable and soft reset the IPP */
+	(void) nxge_ipp_disable(nxgep);
+
+	/* Free classification resources */
+	(void) nxge_classify_uninit(nxgep);
+
+	/*
+	 * Reset the transmit/receive DMA side.
+	 */
+	(void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_STOP);
+	(void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_STOP);
+
+	nxge_uninit_txdma_channels(nxgep);
+	nxge_uninit_rxdma_channels(nxgep);
+
+	/*
+	 * Reset the transmit MAC side.
+	 */
+	(void) nxge_tx_mac_disable(nxgep);
+
+	nxge_free_mem_pool(nxgep);
+
+	(void) nxge_link_monitor(nxgep, LINK_MONITOR_START);
+
+	nxgep->drv_state &= ~STATE_HW_INITIALIZED;
+
+	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_uninit: "
+		"nxge_mblks_pending %d", nxge_mblks_pending));
+}
+
+void
+nxge_get64(p_nxge_t nxgep, p_mblk_t mp)
+{
+	uint64_t	reg;
+	uint64_t	regdata;
+	int		i, retry;
+
+	bcopy((char *)mp->b_rptr, (char *)&reg, sizeof (uint64_t));
+	regdata = 0;
+	retry = 1;
+
+	for (i = 0; i < retry; i++) {
+		NXGE_REG_RD64(nxgep->npi_handle, reg, &regdata);
+	}
+	bcopy((char *)&regdata, (char *)mp->b_rptr, sizeof (uint64_t));
+}
+
+void
+nxge_put64(p_nxge_t nxgep, p_mblk_t mp)
+{
+	uint64_t	reg;
+	uint64_t	buf[2];
+
+	bcopy((char *)mp->b_rptr, (char *)&buf[0], 2 * sizeof (uint64_t));
+	reg = buf[0];
+
+	NXGE_NPI_PIO_WRITE64(nxgep->npi_handle, reg, buf[1]);
+}
+
+
+nxge_os_mutex_t nxgedebuglock;
+int nxge_debug_init = 0;
+
+/*ARGSUSED*/
+/*VARARGS*/
+void
+nxge_debug_msg(p_nxge_t nxgep, uint64_t level, char *fmt, ...)
+{
+	char msg_buffer[1048];
+	char prefix_buffer[32];
+	int instance;
+	uint64_t debug_level;
+	int cmn_level = CE_CONT;
+	va_list ap;
+
+	debug_level = (nxgep == NULL) ? nxge_debug_level :
+		nxgep->nxge_debug_level;
+
+	if ((level & debug_level) ||
+		(level == NXGE_NOTE) ||
+		(level == NXGE_ERR_CTL)) {
+		/* do the msg processing */
+		if (nxge_debug_init == 0) {
+			MUTEX_INIT(&nxgedebuglock, NULL, MUTEX_DRIVER, NULL);
+			nxge_debug_init = 1;
+		}
+
+		MUTEX_ENTER(&nxgedebuglock);
+
+		if ((level & NXGE_NOTE)) {
+			cmn_level = CE_NOTE;
+		}
+
+		if (level & NXGE_ERR_CTL) {
+			cmn_level = CE_WARN;
+		}
+
+		va_start(ap, fmt);
+		(void) vsprintf(msg_buffer, fmt, ap);
+		va_end(ap);
+		if (nxgep == NULL) {
+			instance = -1;
+			(void) sprintf(prefix_buffer, "%s :", "nxge");
+		} else {
+			instance = nxgep->instance;
+			(void) sprintf(prefix_buffer,
+						    "%s%d :", "nxge", instance);
+		}
+
+		MUTEX_EXIT(&nxgedebuglock);
+		cmn_err(cmn_level, "!%s %s\n",
+				prefix_buffer, msg_buffer);
+
+	}
+}
+
+char *
+nxge_dump_packet(char *addr, int size)
+{
+	uchar_t *ap = (uchar_t *)addr;
+	int i;
+	static char etherbuf[1024];
+	char *cp = etherbuf;
+	char digits[] = "0123456789abcdef";
+
+	if (!size)
+		size = 60;
+
+	if (size > MAX_DUMP_SZ) {
+		/* Dump the leading bytes */
+		for (i = 0; i < MAX_DUMP_SZ/2; i++) {
+			if (*ap > 0x0f)
+				*cp++ = digits[*ap >> 4];
+			*cp++ = digits[*ap++ & 0xf];
+			*cp++ = ':';
+		}
+		for (i = 0; i < 20; i++)
+			*cp++ = '.';
+		/* Dump the last MAX_DUMP_SZ/2 bytes */
+		ap = (uchar_t *)(addr + (size - MAX_DUMP_SZ/2));
+		for (i = 0; i < MAX_DUMP_SZ/2; i++) {
+			if (*ap > 0x0f)
+				*cp++ = digits[*ap >> 4];
+			*cp++ = digits[*ap++ & 0xf];
+			*cp++ = ':';
+		}
+	} else {
+		for (i = 0; i < size; i++) {
+			if (*ap > 0x0f)
+				*cp++ = digits[*ap >> 4];
+			*cp++ = digits[*ap++ & 0xf];
+			*cp++ = ':';
+		}
+	}
+	*--cp = 0;
+	return (etherbuf);
+}
+
+#ifdef	NXGE_DEBUG
+static void
+nxge_test_map_regs(p_nxge_t nxgep)
+{
+	ddi_acc_handle_t cfg_handle;
+	p_pci_cfg_t	cfg_ptr;
+	ddi_acc_handle_t dev_handle;
+	char		*dev_ptr;
+	ddi_acc_handle_t pci_config_handle;
+	uint32_t	regval;
+	int		i;
+
+	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_test_map_regs"));
+
+	dev_handle = nxgep->dev_regs->nxge_regh;
+	dev_ptr = (char *)nxgep->dev_regs->nxge_regp;
+
+	if (nxgep->niu_type == NEPTUNE) {
+		cfg_handle = nxgep->dev_regs->nxge_pciregh;
+		cfg_ptr = (void *)nxgep->dev_regs->nxge_pciregp;
+
+		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
+			"Neptune PCI regp cfg_ptr 0x%llx", (char *)cfg_ptr));
+		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
+			"Neptune PCI cfg_ptr vendor id ptr 0x%llx",
+			&cfg_ptr->vendorid));
+		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
+			"\tvendorid 0x%x devid 0x%x",
+			NXGE_PIO_READ16(cfg_handle, &cfg_ptr->vendorid, 0),
+			NXGE_PIO_READ16(cfg_handle, &cfg_ptr->devid,    0)));
+		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
+			"PCI BAR: base 0x%x base14 0x%x base 18 0x%x "
+			"bar1c 0x%x",
+			NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base,   0),
+			NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base14, 0),
+			NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base18, 0),
+			NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base1c, 0)));
+		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
+			"\nNeptune PCI BAR: base20 0x%x base24 0x%x "
+			"base 28 0x%x bar2c 0x%x\n",
+			NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base20, 0),
+			NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base24, 0),
+			NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base28, 0),
+			NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base2c, 0)));
+		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
+			"\nNeptune PCI BAR: base30 0x%x\n",
+			NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base30, 0)));
+
+		cfg_handle = nxgep->dev_regs->nxge_pciregh;
+		cfg_ptr = (void *)nxgep->dev_regs->nxge_pciregp;
+		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
+			"first  0x%llx second 0x%llx third 0x%llx "
+			"last 0x%llx ",
+			NXGE_PIO_READ64(dev_handle,
+				    (uint64_t *)(dev_ptr + 0),  0),
+			NXGE_PIO_READ64(dev_handle,
+				    (uint64_t *)(dev_ptr + 8),  0),
+			NXGE_PIO_READ64(dev_handle,
+				    (uint64_t *)(dev_ptr + 16), 0),
+			NXGE_PIO_READ64(cfg_handle,
+				    (uint64_t *)(dev_ptr + 24), 0)));
+	}
+}
+
+#endif
+
+static void
+nxge_suspend(p_nxge_t nxgep)
+{
+	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_suspend"));
+
+	nxge_intrs_disable(nxgep);
+	nxge_destroy_dev(nxgep);
+
+	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_suspend"));
+}
+
+static nxge_status_t
+nxge_resume(p_nxge_t nxgep)
+{
+	nxge_status_t status = NXGE_OK;
+
+	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_resume"));
+	nxgep->suspended = DDI_RESUME;
+
+	nxge_global_reset(nxgep);
+	nxgep->suspended = 0;
+
+	NXGE_DEBUG_MSG((nxgep, DDI_CTL,
+			"<== nxge_resume status = 0x%x", status));
+	return (status);
+}
+
+static nxge_status_t
+nxge_setup_dev(p_nxge_t nxgep)
+{
+	nxge_status_t	status = NXGE_OK;
+
+	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_setup_dev port %d",
+			nxgep->mac.portnum));
+
+	status = nxge_xcvr_find(nxgep);
+	if (status != NXGE_OK) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			    " nxge_setup_dev status "
+			    " (xcvr find 0x%08x)", status));
+		goto nxge_setup_dev_exit;
+	}
+
+	status = nxge_link_init(nxgep);
+
+	if (fm_check_acc_handle(nxgep->dev_regs->nxge_regh) != DDI_FM_OK) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"port%d Bad register acc handle", nxgep->mac.portnum));
+		status = NXGE_ERROR;
+	}
+
+	if (status != NXGE_OK) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			    " nxge_setup_dev status "
+			    "(xcvr init 0x%08x)", status));
+		goto nxge_setup_dev_exit;
+	}
+
+nxge_setup_dev_exit:
+	NXGE_DEBUG_MSG((nxgep, DDI_CTL,
+		"<== nxge_setup_dev port %d status = 0x%08x",
+		nxgep->mac.portnum, status));
+
+	return (status);
+}
+
+static void
+nxge_destroy_dev(p_nxge_t nxgep)
+{
+	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_destroy_dev"));
+
+	(void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP);
+
+	(void) nxge_hw_stop(nxgep);
+
+	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_destroy_dev"));
+}
+
+static nxge_status_t
+nxge_setup_system_dma_pages(p_nxge_t nxgep)
+{
+	int 			ddi_status = DDI_SUCCESS;
+	uint_t 			count;
+	ddi_dma_cookie_t 	cookie;
+	uint_t 			iommu_pagesize;
+	nxge_status_t		status = NXGE_OK;
+
+	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_setup_system_dma_pages"));
+	nxgep->sys_page_sz = ddi_ptob(nxgep->dip, (ulong_t)1);
+	if (nxgep->niu_type != N2_NIU) {
+		iommu_pagesize = dvma_pagesize(nxgep->dip);
+		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
+			" nxge_setup_system_dma_pages: page %d (ddi_ptob %d) "
+			" default_block_size %d iommu_pagesize %d",
+			nxgep->sys_page_sz,
+			ddi_ptob(nxgep->dip, (ulong_t)1),
+			nxgep->rx_default_block_size,
+			iommu_pagesize));
+
+		if (iommu_pagesize != 0) {
+			if (nxgep->sys_page_sz == iommu_pagesize) {
+				if (iommu_pagesize > 0x4000)
+					nxgep->sys_page_sz = 0x4000;
+			} else {
+				if (nxgep->sys_page_sz > iommu_pagesize)
+					nxgep->sys_page_sz = iommu_pagesize;
+			}
+		}
+	}
+	nxgep->sys_page_mask = ~(nxgep->sys_page_sz - 1);
+	NXGE_DEBUG_MSG((nxgep, DDI_CTL,
+		"==> nxge_setup_system_dma_pages: page %d (ddi_ptob %d) "
+		"default_block_size %d page mask %d",
+		nxgep->sys_page_sz,
+		ddi_ptob(nxgep->dip, (ulong_t)1),
+		nxgep->rx_default_block_size,
+		nxgep->sys_page_mask));
+
+
+	switch (nxgep->sys_page_sz) {
+	default:
+		nxgep->sys_page_sz = 0x1000;
+		nxgep->sys_page_mask = ~(nxgep->sys_page_sz - 1);
+		nxgep->rx_default_block_size = 0x1000;
+		nxgep->rx_bksize_code = RBR_BKSIZE_4K;
+		break;
+	case 0x1000:
+		nxgep->rx_default_block_size = 0x1000;
+		nxgep->rx_bksize_code = RBR_BKSIZE_4K;
+		break;
+	case 0x2000:
+		nxgep->rx_default_block_size = 0x2000;
+		nxgep->rx_bksize_code = RBR_BKSIZE_8K;
+		break;
+	case 0x4000:
+		nxgep->rx_default_block_size = 0x4000;
+		nxgep->rx_bksize_code = RBR_BKSIZE_16K;
+		break;
+	case 0x8000:
+		nxgep->rx_default_block_size = 0x8000;
+		nxgep->rx_bksize_code = RBR_BKSIZE_32K;
+		break;
+	}
+
+#ifndef USE_RX_BIG_BUF
+	nxge_rx_dma_attr.dma_attr_align = nxgep->sys_page_sz;
+#else
+		nxgep->rx_default_block_size = 0x2000;
+		nxgep->rx_bksize_code = RBR_BKSIZE_8K;
+#endif
+	/*
+	 * Get the system DMA burst size.
+	 */
+	ddi_status = ddi_dma_alloc_handle(nxgep->dip, &nxge_tx_dma_attr,
+			DDI_DMA_DONTWAIT, 0,
+			&nxgep->dmasparehandle);
+	if (ddi_status != DDI_SUCCESS) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"ddi_dma_alloc_handle: failed "
+			" status 0x%x", ddi_status));
+		goto nxge_get_soft_properties_exit;
+	}
+
+	ddi_status = ddi_dma_addr_bind_handle(nxgep->dmasparehandle, NULL,
+				(caddr_t)nxgep->dmasparehandle,
+				sizeof (nxgep->dmasparehandle),
+				DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
+				DDI_DMA_DONTWAIT, 0,
+				&cookie, &count);
+	if (ddi_status != DDI_DMA_MAPPED) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"Binding spare handle to find system"
+			" burstsize failed."));
+		ddi_status = DDI_FAILURE;
+		goto nxge_get_soft_properties_fail1;
+	}
+
+	nxgep->sys_burst_sz = ddi_dma_burstsizes(nxgep->dmasparehandle);
+	(void) ddi_dma_unbind_handle(nxgep->dmasparehandle);
+
+nxge_get_soft_properties_fail1:
+	ddi_dma_free_handle(&nxgep->dmasparehandle);
+
+nxge_get_soft_properties_exit:
+
+	if (ddi_status != DDI_SUCCESS)
+		status |= (NXGE_ERROR | NXGE_DDI_FAILED);
+
+	NXGE_DEBUG_MSG((nxgep, DDI_CTL,
+		"<== nxge_setup_system_dma_pages status = 0x%08x", status));
+	return (status);
+}
+
+static nxge_status_t
+nxge_alloc_mem_pool(p_nxge_t nxgep)
+{
+	nxge_status_t	status = NXGE_OK;
+
+	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_alloc_mem_pool"));
+
+	status = nxge_alloc_rx_mem_pool(nxgep);
+	if (status != NXGE_OK) {
+		return (NXGE_ERROR);
+	}
+
+	status = nxge_alloc_tx_mem_pool(nxgep);
+	if (status != NXGE_OK) {
+		nxge_free_rx_mem_pool(nxgep);
+		return (NXGE_ERROR);
+	}
+
+	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_alloc_mem_pool"));
+	return (NXGE_OK);
+}
+
+static void
+nxge_free_mem_pool(p_nxge_t nxgep)
+{
+	NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_free_mem_pool"));
+
+	nxge_free_rx_mem_pool(nxgep);
+	nxge_free_tx_mem_pool(nxgep);
+
+	NXGE_DEBUG_MSG((nxgep, MEM_CTL, "<== nxge_free_mem_pool"));
+}
+
+static nxge_status_t
+nxge_alloc_rx_mem_pool(p_nxge_t nxgep)
+{
+	int			i, j;
+	uint32_t		ndmas, st_rdc;
+	p_nxge_dma_pt_cfg_t	p_all_cfgp;
+	p_nxge_hw_pt_cfg_t	p_cfgp;
+	p_nxge_dma_pool_t	dma_poolp;
+	p_nxge_dma_common_t	*dma_buf_p;
+	p_nxge_dma_pool_t	dma_cntl_poolp;
+	p_nxge_dma_common_t	*dma_cntl_p;
+	size_t			rx_buf_alloc_size;
+	size_t			rx_cntl_alloc_size;
+	uint32_t 		*num_chunks; /* per dma */
+	nxge_status_t		status = NXGE_OK;
+
+	uint32_t		nxge_port_rbr_size;
+	uint32_t		nxge_port_rbr_spare_size;
+	uint32_t		nxge_port_rcr_size;
+
+	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_mem_pool"));
+
+	p_all_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
+	p_cfgp = (p_nxge_hw_pt_cfg_t)&p_all_cfgp->hw_config;
+	st_rdc = p_cfgp->start_rdc;
+	ndmas = p_cfgp->max_rdcs;
+
+	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
+		" nxge_alloc_rx_mem_pool st_rdc %d ndmas %d", st_rdc, ndmas));
+
+	/*
+	 * Allocate memory for each receive DMA channel.
+	 */
+	dma_poolp = (p_nxge_dma_pool_t)KMEM_ZALLOC(sizeof (nxge_dma_pool_t),
+			KM_SLEEP);
+	dma_buf_p = (p_nxge_dma_common_t *)KMEM_ZALLOC(
+			sizeof (p_nxge_dma_common_t) * ndmas, KM_SLEEP);
+
+	dma_cntl_poolp = (p_nxge_dma_pool_t)
+				KMEM_ZALLOC(sizeof (nxge_dma_pool_t), KM_SLEEP);
+	dma_cntl_p = (p_nxge_dma_common_t *)KMEM_ZALLOC(
+			sizeof (p_nxge_dma_common_t) * ndmas, KM_SLEEP);
+
+	num_chunks = (uint32_t *)KMEM_ZALLOC(
+			sizeof (uint32_t) * ndmas, KM_SLEEP);
+
+	/*
+	 * Assume that each DMA channel will be configured with default
+	 * block size.
+	 * rbr block counts are mod of batch count (16).
+	 */
+	nxge_port_rbr_size = p_all_cfgp->rbr_size;
+	nxge_port_rcr_size = p_all_cfgp->rcr_size;
+
+	if (!nxge_port_rbr_size) {
+		nxge_port_rbr_size = NXGE_RBR_RBB_DEFAULT;
+	}
+	if (nxge_port_rbr_size % NXGE_RXDMA_POST_BATCH) {
+		nxge_port_rbr_size = (NXGE_RXDMA_POST_BATCH *
+			(nxge_port_rbr_size / NXGE_RXDMA_POST_BATCH + 1));
+	}
+
+	p_all_cfgp->rbr_size = nxge_port_rbr_size;
+	nxge_port_rbr_spare_size = nxge_rbr_spare_size;
+
+	if (nxge_port_rbr_spare_size % NXGE_RXDMA_POST_BATCH) {
+		nxge_port_rbr_spare_size = (NXGE_RXDMA_POST_BATCH *
+			(nxge_port_rbr_spare_size / NXGE_RXDMA_POST_BATCH + 1));
+	}
+
+	/*
+	 * N2/NIU has limitation on the descriptor sizes (contiguous
+	 * memory allocation on data buffers to 4M (contig_mem_alloc)
+	 * and little endian for control buffers (must use the ddi/dki mem alloc
+	 * function).
+	 */
+#if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
+	if (nxgep->niu_type == N2_NIU) {
+		nxge_port_rbr_spare_size = 0;
+		if ((nxge_port_rbr_size > NXGE_NIU_CONTIG_RBR_MAX) ||
+				(!ISP2(nxge_port_rbr_size))) {
+			nxge_port_rbr_size = NXGE_NIU_CONTIG_RBR_MAX;
+		}
+		if ((nxge_port_rcr_size > NXGE_NIU_CONTIG_RCR_MAX) ||
+				(!ISP2(nxge_port_rcr_size))) {
+			nxge_port_rcr_size = NXGE_NIU_CONTIG_RCR_MAX;
+		}
+	}
+#endif
+
+	rx_buf_alloc_size = (nxgep->rx_default_block_size *
+		(nxge_port_rbr_size + nxge_port_rbr_spare_size));
+
+	/*
+	 * Addresses of receive block ring, receive completion ring and the
+	 * mailbox must be all cache-aligned (64 bytes).
+	 */
+	rx_cntl_alloc_size = nxge_port_rbr_size + nxge_port_rbr_spare_size;
+	rx_cntl_alloc_size *= (sizeof (rx_desc_t));
+	rx_cntl_alloc_size += (sizeof (rcr_entry_t) * nxge_port_rcr_size);
+	rx_cntl_alloc_size += sizeof (rxdma_mailbox_t);
+
+	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_alloc_rx_mem_pool: "
+		"nxge_port_rbr_size = %d nxge_port_rbr_spare_size = %d "
+		"nxge_port_rcr_size = %d "
+		"rx_cntl_alloc_size = %d",
+		nxge_port_rbr_size, nxge_port_rbr_spare_size,
+		nxge_port_rcr_size,
+		rx_cntl_alloc_size));
+
+#if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
+	if (nxgep->niu_type == N2_NIU) {
+		if (!ISP2(rx_buf_alloc_size)) {
+			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+				"==> nxge_alloc_rx_mem_pool: "
+				" must be power of 2"));
+			status |= (NXGE_ERROR | NXGE_DDI_FAILED);
+			goto nxge_alloc_rx_mem_pool_exit;
+		}
+
+		if (rx_buf_alloc_size > (1 << 22)) {
+			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+				"==> nxge_alloc_rx_mem_pool: "
+				" limit size to 4M"));
+			status |= (NXGE_ERROR | NXGE_DDI_FAILED);
+			goto nxge_alloc_rx_mem_pool_exit;
+		}
+
+		if (rx_cntl_alloc_size < 0x2000) {
+			rx_cntl_alloc_size = 0x2000;
+		}
+	}
+#endif
+	nxgep->nxge_port_rbr_size = nxge_port_rbr_size;
+	nxgep->nxge_port_rcr_size = nxge_port_rcr_size;
+
+	/*
+	 * Allocate memory for receive buffers and descriptor rings.
+	 * Replace allocation functions with interface functions provided
+	 * by the partition manager when it is available.
+	 */
+	/*
+	 * Allocate memory for the receive buffer blocks.
+	 */
+	for (i = 0; i < ndmas; i++) {
+		NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
+			" nxge_alloc_rx_mem_pool to alloc mem: "
+			" dma %d dma_buf_p %llx &dma_buf_p %llx",
+			i, dma_buf_p[i], &dma_buf_p[i]));
+		num_chunks[i] = 0;
+		status = nxge_alloc_rx_buf_dma(nxgep, st_rdc, &dma_buf_p[i],
+				rx_buf_alloc_size,
+				nxgep->rx_default_block_size, &num_chunks[i]);
+		if (status != NXGE_OK) {
+			break;
+		}
+		st_rdc++;
+		NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
+			" nxge_alloc_rx_mem_pool DONE  alloc mem: "
+			"dma %d dma_buf_p %llx &dma_buf_p %llx", i,
+			dma_buf_p[i], &dma_buf_p[i]));
+	}
+	if (i < ndmas) {
+		goto nxge_alloc_rx_mem_fail1;
+	}
+	/*
+	 * Allocate memory for descriptor rings and mailbox.
+	 */
+	st_rdc = p_cfgp->start_rdc;
+	for (j = 0; j < ndmas; j++) {
+		status = nxge_alloc_rx_cntl_dma(nxgep, st_rdc, &dma_cntl_p[j],
+					rx_cntl_alloc_size);
+		if (status != NXGE_OK) {
+			break;
+		}
+		st_rdc++;
+	}
+	if (j < ndmas) {
+		goto nxge_alloc_rx_mem_fail2;
+	}
+
+	dma_poolp->ndmas = ndmas;
+	dma_poolp->num_chunks = num_chunks;
+	dma_poolp->buf_allocated = B_TRUE;
+	nxgep->rx_buf_pool_p = dma_poolp;
+	dma_poolp->dma_buf_pool_p = dma_buf_p;
+
+	dma_cntl_poolp->ndmas = ndmas;
+	dma_cntl_poolp->buf_allocated = B_TRUE;
+	nxgep->rx_cntl_pool_p = dma_cntl_poolp;
+	dma_cntl_poolp->dma_buf_pool_p = dma_cntl_p;
+
+	goto nxge_alloc_rx_mem_pool_exit;
+
+nxge_alloc_rx_mem_fail2:
+	/* Free control buffers */
+	j--;
+	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
+		"==> nxge_alloc_rx_mem_pool: freeing control bufs (%d)", j));
+	for (; j >= 0; j--) {
+		nxge_free_rx_cntl_dma(nxgep,
+			(p_nxge_dma_common_t)dma_cntl_p[i]);
+		NXGE_DEBUG_MSG((nxgep, DMA_CTL,
+			"==> nxge_alloc_rx_mem_pool: control bufs freed (%d)",
+			j));
+	}
+	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
+		"==> nxge_alloc_rx_mem_pool: control bufs freed (%d)", j));
+
+nxge_alloc_rx_mem_fail1:
+	/* Free data buffers */
+	i--;
+	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
+		"==> nxge_alloc_rx_mem_pool: freeing data bufs (%d)", i));
+	for (; i >= 0; i--) {
+		nxge_free_rx_buf_dma(nxgep, (p_nxge_dma_common_t)dma_buf_p[i],
+			num_chunks[i]);
+	}
+	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
+		"==> nxge_alloc_rx_mem_pool: data bufs freed (%d)", i));
+
+	KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas);
+	KMEM_FREE(dma_poolp, sizeof (nxge_dma_pool_t));
+	KMEM_FREE(dma_buf_p, ndmas * sizeof (p_nxge_dma_common_t));
+	KMEM_FREE(dma_cntl_poolp, sizeof (nxge_dma_pool_t));
+	KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_nxge_dma_common_t));
+
+nxge_alloc_rx_mem_pool_exit:
+	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
+		"<== nxge_alloc_rx_mem_pool:status 0x%08x", status));
+
+	return (status);
+}
+
+static void
+nxge_free_rx_mem_pool(p_nxge_t nxgep)
+{
+	uint32_t		i, ndmas;
+	p_nxge_dma_pool_t	dma_poolp;
+	p_nxge_dma_common_t	*dma_buf_p;
+	p_nxge_dma_pool_t	dma_cntl_poolp;
+	p_nxge_dma_common_t	*dma_cntl_p;
+	uint32_t 		*num_chunks;
+
+	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_free_rx_mem_pool"));
+
+	dma_poolp = nxgep->rx_buf_pool_p;
+	if (dma_poolp == NULL || (!dma_poolp->buf_allocated)) {
+		NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
+			"<== nxge_free_rx_mem_pool "
+			"(null rx buf pool or buf not allocated"));
+		return;
+	}
+
+	dma_cntl_poolp = nxgep->rx_cntl_pool_p;
+	if (dma_cntl_poolp == NULL || (!dma_cntl_poolp->buf_allocated)) {
+		NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
+			"<== nxge_free_rx_mem_pool "
+			"(null rx cntl buf pool or cntl buf not allocated"));
+		return;
+	}
+
+	dma_buf_p = dma_poolp->dma_buf_pool_p;
+	num_chunks = dma_poolp->num_chunks;
+
+	dma_cntl_p = dma_cntl_poolp->dma_buf_pool_p;
+	ndmas = dma_cntl_poolp->ndmas;
+
+	for (i = 0; i < ndmas; i++) {
+		nxge_free_rx_buf_dma(nxgep, dma_buf_p[i], num_chunks[i]);
+	}
+
+	for (i = 0; i < ndmas; i++) {
+		nxge_free_rx_cntl_dma(nxgep, dma_cntl_p[i]);
+	}
+
+	for (i = 0; i < ndmas; i++) {
+		KMEM_FREE(dma_buf_p[i],
+			sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK);
+		KMEM_FREE(dma_cntl_p[i], sizeof (nxge_dma_common_t));
+	}
+
+	KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas);
+	KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_nxge_dma_common_t));
+	KMEM_FREE(dma_cntl_poolp, sizeof (nxge_dma_pool_t));
+	KMEM_FREE(dma_buf_p, ndmas * sizeof (p_nxge_dma_common_t));
+	KMEM_FREE(dma_poolp, sizeof (nxge_dma_pool_t));
+
+	nxgep->rx_buf_pool_p = NULL;
+	nxgep->rx_cntl_pool_p = NULL;
+
+	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_free_rx_mem_pool"));
+}
+
+
+static nxge_status_t
+nxge_alloc_rx_buf_dma(p_nxge_t nxgep, uint16_t dma_channel,
+	p_nxge_dma_common_t *dmap,
+	size_t alloc_size, size_t block_size, uint32_t *num_chunks)
+{
+	p_nxge_dma_common_t 	rx_dmap;
+	nxge_status_t		status = NXGE_OK;
+	size_t			total_alloc_size;
+	size_t			allocated = 0;
+	int			i, size_index, array_size;
+
+	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_buf_dma"));
+
+	rx_dmap = (p_nxge_dma_common_t)
+			KMEM_ZALLOC(sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK,
+			KM_SLEEP);
+
+	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
+		" alloc_rx_buf_dma rdc %d asize %x bsize %x bbuf %llx ",
+		dma_channel, alloc_size, block_size, dmap));
+
+	total_alloc_size = alloc_size;
+
+#if defined(RX_USE_RECLAIM_POST)
+	total_alloc_size = alloc_size + alloc_size/4;
+#endif
+
+	i = 0;
+	size_index = 0;
+	array_size =  sizeof (alloc_sizes)/sizeof (size_t);
+	while ((alloc_sizes[size_index] < alloc_size) &&
+			(size_index < array_size))
+			size_index++;
+	if (size_index >= array_size) {
+		size_index = array_size - 1;
+	}
+
+	while ((allocated < total_alloc_size) &&
+			(size_index >= 0) && (i < NXGE_DMA_BLOCK)) {
+		rx_dmap[i].dma_chunk_index = i;
+		rx_dmap[i].block_size = block_size;
+		rx_dmap[i].alength = alloc_sizes[size_index];
+		rx_dmap[i].orig_alength = rx_dmap[i].alength;
+		rx_dmap[i].nblocks = alloc_sizes[size_index] / block_size;
+		rx_dmap[i].dma_channel = dma_channel;
+		rx_dmap[i].contig_alloc_type = B_FALSE;
+
+		/*
+		 * N2/NIU: data buffers must be contiguous as the driver
+		 *	   needs to call Hypervisor api to set up
+		 *	   logical pages.
+		 */
+		if ((nxgep->niu_type == N2_NIU) && (NXGE_DMA_BLOCK == 1)) {
+			rx_dmap[i].contig_alloc_type = B_TRUE;
+		}
+
+		NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
+			"alloc_rx_buf_dma rdc %d chunk %d bufp %llx size %x "
+			"i %d nblocks %d alength %d",
+			dma_channel, i, &rx_dmap[i], block_size,
+			i, rx_dmap[i].nblocks,
+			rx_dmap[i].alength));
+		status = nxge_dma_mem_alloc(nxgep, nxge_force_dma,
+			&nxge_rx_dma_attr,
+			rx_dmap[i].alength,
+			&nxge_dev_buf_dma_acc_attr,
+			DDI_DMA_READ | DDI_DMA_STREAMING,
+			(p_nxge_dma_common_t)(&rx_dmap[i]));
+		if (status != NXGE_OK) {
+			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+				" nxge_alloc_rx_buf_dma: Alloc Failed "));
+			size_index--;
+		} else {
+			NXGE_DEBUG_MSG((nxgep, DMA_CTL,
+				" alloc_rx_buf_dma allocated rdc %d "
+				"chunk %d size %x dvma %x bufp %llx ",
+				dma_channel, i, rx_dmap[i].alength,
+				rx_dmap[i].ioaddr_pp, &rx_dmap[i]));
+			i++;
+			allocated += alloc_sizes[size_index];
+		}
+	}
+
+
+	if (allocated < total_alloc_size) {
+		goto nxge_alloc_rx_mem_fail1;
+	}
+
+	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
+		" alloc_rx_buf_dma rdc %d allocated %d chunks",
+		dma_channel, i));
+	*num_chunks = i;
+	*dmap = rx_dmap;
+
+	goto nxge_alloc_rx_mem_exit;
+
+nxge_alloc_rx_mem_fail1:
+	KMEM_FREE(rx_dmap, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK);
+
+nxge_alloc_rx_mem_exit:
+	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
+		"<== nxge_alloc_rx_buf_dma status 0x%08x", status));
+
+	return (status);
+}
+
+/*ARGSUSED*/
+static void
+nxge_free_rx_buf_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap,
+    uint32_t num_chunks)
+{
+	int		i;
+
+	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
+		"==> nxge_free_rx_buf_dma: # of chunks %d", num_chunks));
+
+	for (i = 0; i < num_chunks; i++) {
+		NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
+			"==> nxge_free_rx_buf_dma: chunk %d dmap 0x%llx",
+				i, dmap));
+		nxge_dma_mem_free(dmap++);
+	}
+
+	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_free_rx_buf_dma"));
+}
+
+/*ARGSUSED*/
+static nxge_status_t
+nxge_alloc_rx_cntl_dma(p_nxge_t nxgep, uint16_t dma_channel,
+    p_nxge_dma_common_t *dmap, size_t size)
+{
+	p_nxge_dma_common_t 	rx_dmap;
+	nxge_status_t		status = NXGE_OK;
+
+	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_cntl_dma"));
+
+	rx_dmap = (p_nxge_dma_common_t)
+			KMEM_ZALLOC(sizeof (nxge_dma_common_t), KM_SLEEP);
+
+	rx_dmap->contig_alloc_type = B_FALSE;
+
+	status = nxge_dma_mem_alloc(nxgep, nxge_force_dma,
+			&nxge_desc_dma_attr,
+			size,
+			&nxge_dev_desc_dma_acc_attr,
+			DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
+			rx_dmap);
+	if (status != NXGE_OK) {
+		goto nxge_alloc_rx_cntl_dma_fail1;
+	}
+
+	*dmap = rx_dmap;
+	goto nxge_alloc_rx_cntl_dma_exit;
+
+nxge_alloc_rx_cntl_dma_fail1:
+	KMEM_FREE(rx_dmap, sizeof (nxge_dma_common_t));
+
+nxge_alloc_rx_cntl_dma_exit:
+	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
+		"<== nxge_alloc_rx_cntl_dma status 0x%08x", status));
+
+	return (status);
+}
+
+/*ARGSUSED*/
+static void
+nxge_free_rx_cntl_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap)
+{
+	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_free_rx_cntl_dma"));
+
+	nxge_dma_mem_free(dmap);
+
+	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_free_rx_cntl_dma"));
+}
+
+static nxge_status_t
+nxge_alloc_tx_mem_pool(p_nxge_t nxgep)
+{
+	nxge_status_t		status = NXGE_OK;
+	int			i, j;
+	uint32_t		ndmas, st_tdc;
+	p_nxge_dma_pt_cfg_t	p_all_cfgp;
+	p_nxge_hw_pt_cfg_t	p_cfgp;
+	p_nxge_dma_pool_t	dma_poolp;
+	p_nxge_dma_common_t	*dma_buf_p;
+	p_nxge_dma_pool_t	dma_cntl_poolp;
+	p_nxge_dma_common_t	*dma_cntl_p;
+	size_t			tx_buf_alloc_size;
+	size_t			tx_cntl_alloc_size;
+	uint32_t		*num_chunks; /* per dma */
+
+	NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_alloc_tx_mem_pool"));
+
+	p_all_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
+	p_cfgp = (p_nxge_hw_pt_cfg_t)&p_all_cfgp->hw_config;
+	st_tdc = p_cfgp->start_tdc;
+	ndmas = p_cfgp->max_tdcs;
+
+	NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_alloc_tx_mem_pool: "
+		"p_cfgp 0x%016llx start_tdc %d ndmas %d nxgep->max_tdcs %d",
+		p_cfgp, p_cfgp->start_tdc, p_cfgp->max_tdcs, nxgep->max_tdcs));
+	/*
+	 * Allocate memory for each transmit DMA channel.
+	 */
+	dma_poolp = (p_nxge_dma_pool_t)KMEM_ZALLOC(sizeof (nxge_dma_pool_t),
+			KM_SLEEP);
+	dma_buf_p = (p_nxge_dma_common_t *)KMEM_ZALLOC(
+			sizeof (p_nxge_dma_common_t) * ndmas, KM_SLEEP);
+
+	dma_cntl_poolp = (p_nxge_dma_pool_t)
+			KMEM_ZALLOC(sizeof (nxge_dma_pool_t), KM_SLEEP);
+	dma_cntl_p = (p_nxge_dma_common_t *)KMEM_ZALLOC(
+			sizeof (p_nxge_dma_common_t) * ndmas, KM_SLEEP);
+
+#if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
+	/*
+	 * N2/NIU has limitation on the descriptor sizes (contiguous
+	 * memory allocation on data buffers to 4M (contig_mem_alloc)
+	 * and little endian for control buffers (must use the ddi/dki mem alloc
+	 * function). The transmit ring is limited to 8K (includes the
+	 * mailbox).
+	 */
+	if (nxgep->niu_type == N2_NIU) {
+		if ((nxge_tx_ring_size > NXGE_NIU_CONTIG_TX_MAX) ||
+			(!ISP2(nxge_tx_ring_size))) {
+			nxge_tx_ring_size = NXGE_NIU_CONTIG_TX_MAX;
+		}
+	}
+#endif
+
+	nxgep->nxge_port_tx_ring_size = nxge_tx_ring_size;
+
+	/*
+	 * Assume that each DMA channel will be configured with default
+	 * transmit bufer size for copying transmit data.
+	 * (For packet payload over this limit, packets will not be
+	 *  copied.)
+	 */
+	tx_buf_alloc_size = (nxge_bcopy_thresh * nxge_tx_ring_size);
+
+	/*
+	 * Addresses of transmit descriptor ring and the
+	 * mailbox must be all cache-aligned (64 bytes).
+	 */
+	tx_cntl_alloc_size = nxge_tx_ring_size;
+	tx_cntl_alloc_size *= (sizeof (tx_desc_t));
+	tx_cntl_alloc_size += sizeof (txdma_mailbox_t);
+
+#if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
+	if (nxgep->niu_type == N2_NIU) {
+		if (!ISP2(tx_buf_alloc_size)) {
+			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+				"==> nxge_alloc_tx_mem_pool: "
+				" must be power of 2"));
+			status |= (NXGE_ERROR | NXGE_DDI_FAILED);
+			goto nxge_alloc_tx_mem_pool_exit;
+		}
+
+		if (tx_buf_alloc_size > (1 << 22)) {
+			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+				"==> nxge_alloc_tx_mem_pool: "
+				" limit size to 4M"));
+			status |= (NXGE_ERROR | NXGE_DDI_FAILED);
+			goto nxge_alloc_tx_mem_pool_exit;
+		}
+
+		if (tx_cntl_alloc_size < 0x2000) {
+			tx_cntl_alloc_size = 0x2000;
+		}
+	}
+#endif
+
+	num_chunks = (uint32_t *)KMEM_ZALLOC(
+			sizeof (uint32_t) * ndmas, KM_SLEEP);
+
+	/*
+	 * Allocate memory for transmit buffers and descriptor rings.
+	 * Replace allocation functions with interface functions provided
+	 * by the partition manager when it is available.
+	 *
+	 * Allocate memory for the transmit buffer pool.
+	 */
+	for (i = 0; i < ndmas; i++) {
+		num_chunks[i] = 0;
+		status = nxge_alloc_tx_buf_dma(nxgep, st_tdc, &dma_buf_p[i],
+					tx_buf_alloc_size,
+					nxge_bcopy_thresh, &num_chunks[i]);
+		if (status != NXGE_OK) {
+			break;
+		}
+		st_tdc++;
+	}
+	if (i < ndmas) {
+		goto nxge_alloc_tx_mem_pool_fail1;
+	}
+
+	st_tdc = p_cfgp->start_tdc;
+	/*
+	 * Allocate memory for descriptor rings and mailbox.
+	 */
+	for (j = 0; j < ndmas; j++) {
+		status = nxge_alloc_tx_cntl_dma(nxgep, st_tdc, &dma_cntl_p[j],
+					tx_cntl_alloc_size);
+		if (status != NXGE_OK) {
+			break;
+		}
+		st_tdc++;
+	}
+	if (j < ndmas) {
+		goto nxge_alloc_tx_mem_pool_fail2;
+	}
+
+	dma_poolp->ndmas = ndmas;
+	dma_poolp->num_chunks = num_chunks;
+	dma_poolp->buf_allocated = B_TRUE;
+	dma_poolp->dma_buf_pool_p = dma_buf_p;
+	nxgep->tx_buf_pool_p = dma_poolp;
+
+	dma_cntl_poolp->ndmas = ndmas;
+	dma_cntl_poolp->buf_allocated = B_TRUE;
+	dma_cntl_poolp->dma_buf_pool_p = dma_cntl_p;
+	nxgep->tx_cntl_pool_p = dma_cntl_poolp;
+
+	NXGE_DEBUG_MSG((nxgep, MEM_CTL,
+		"==> nxge_alloc_tx_mem_pool: start_tdc %d "
+		"ndmas %d poolp->ndmas %d",
+		st_tdc, ndmas, dma_poolp->ndmas));
+
+	goto nxge_alloc_tx_mem_pool_exit;
+
+nxge_alloc_tx_mem_pool_fail2:
+	/* Free control buffers */
+	j--;
+	for (; j >= 0; j--) {
+		nxge_free_tx_cntl_dma(nxgep,
+			(p_nxge_dma_common_t)dma_cntl_p[i]);
+	}
+
+nxge_alloc_tx_mem_pool_fail1:
+	/* Free data buffers */
+	i--;
+	for (; i >= 0; i--) {
+		nxge_free_tx_buf_dma(nxgep, (p_nxge_dma_common_t)dma_buf_p[i],
+			num_chunks[i]);
+	}
+
+	KMEM_FREE(dma_poolp, sizeof (nxge_dma_pool_t));
+	KMEM_FREE(dma_buf_p, ndmas * sizeof (p_nxge_dma_common_t));
+	KMEM_FREE(dma_cntl_poolp, sizeof (nxge_dma_pool_t));
+	KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_nxge_dma_common_t));
+	KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas);
+
+nxge_alloc_tx_mem_pool_exit:
+	NXGE_DEBUG_MSG((nxgep, MEM_CTL,
+		"<== nxge_alloc_tx_mem_pool:status 0x%08x", status));
+
+	return (status);
+}
+
+static nxge_status_t
+nxge_alloc_tx_buf_dma(p_nxge_t nxgep, uint16_t dma_channel,
+    p_nxge_dma_common_t *dmap, size_t alloc_size,
+    size_t block_size, uint32_t *num_chunks)
+{
+	p_nxge_dma_common_t 	tx_dmap;
+	nxge_status_t		status = NXGE_OK;
+	size_t			total_alloc_size;
+	size_t			allocated = 0;
+	int			i, size_index, array_size;
+
+	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_tx_buf_dma"));
+
+	tx_dmap = (p_nxge_dma_common_t)
+		KMEM_ZALLOC(sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK,
+			KM_SLEEP);
+
+	total_alloc_size = alloc_size;
+	i = 0;
+	size_index = 0;
+	array_size =  sizeof (alloc_sizes) /  sizeof (size_t);
+	while ((alloc_sizes[size_index] < alloc_size) &&
+		(size_index < array_size))
+		size_index++;
+	if (size_index >= array_size) {
+		size_index = array_size - 1;
+	}
+
+	while ((allocated < total_alloc_size) &&
+			(size_index >= 0) && (i < NXGE_DMA_BLOCK)) {
+
+		tx_dmap[i].dma_chunk_index = i;
+		tx_dmap[i].block_size = block_size;
+		tx_dmap[i].alength = alloc_sizes[size_index];
+		tx_dmap[i].orig_alength = tx_dmap[i].alength;
+		tx_dmap[i].nblocks = alloc_sizes[size_index] / block_size;
+		tx_dmap[i].dma_channel = dma_channel;
+		tx_dmap[i].contig_alloc_type = B_FALSE;
+
+		/*
+		 * N2/NIU: data buffers must be contiguous as the driver
+		 *	   needs to call Hypervisor api to set up
+		 *	   logical pages.
+		 */
+		if ((nxgep->niu_type == N2_NIU) && (NXGE_DMA_BLOCK == 1)) {
+			tx_dmap[i].contig_alloc_type = B_TRUE;
+		}
+
+		status = nxge_dma_mem_alloc(nxgep, nxge_force_dma,
+			&nxge_tx_dma_attr,
+			tx_dmap[i].alength,
+			&nxge_dev_buf_dma_acc_attr,
+			DDI_DMA_WRITE | DDI_DMA_STREAMING,
+			(p_nxge_dma_common_t)(&tx_dmap[i]));
+		if (status != NXGE_OK) {
+			size_index--;
+		} else {
+			i++;
+			allocated += alloc_sizes[size_index];
+		}
+	}
+
+	if (allocated < total_alloc_size) {
+		goto nxge_alloc_tx_mem_fail1;
+	}
+
+	*num_chunks = i;
+	*dmap = tx_dmap;
+	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
+		"==> nxge_alloc_tx_buf_dma dmap 0x%016llx num chunks %d",
+		*dmap, i));
+	goto nxge_alloc_tx_mem_exit;
+
+nxge_alloc_tx_mem_fail1:
+	KMEM_FREE(tx_dmap, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK);
+
+nxge_alloc_tx_mem_exit:
+	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
+		"<== nxge_alloc_tx_buf_dma status 0x%08x", status));
+
+	return (status);
+}
+
+/*ARGSUSED*/
+static void
+nxge_free_tx_buf_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap,
+    uint32_t num_chunks)
+{
+	int		i;
+
+	NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_free_tx_buf_dma"));
+
+	for (i = 0; i < num_chunks; i++) {
+		nxge_dma_mem_free(dmap++);
+	}
+
+	NXGE_DEBUG_MSG((nxgep, MEM_CTL, "<== nxge_free_tx_buf_dma"));
+}
+
+/*ARGSUSED*/
+static nxge_status_t
+nxge_alloc_tx_cntl_dma(p_nxge_t nxgep, uint16_t dma_channel,
+    p_nxge_dma_common_t *dmap, size_t size)
+{
+	p_nxge_dma_common_t 	tx_dmap;
+	nxge_status_t		status = NXGE_OK;
+
+	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_tx_cntl_dma"));
+	tx_dmap = (p_nxge_dma_common_t)
+			KMEM_ZALLOC(sizeof (nxge_dma_common_t), KM_SLEEP);
+
+	tx_dmap->contig_alloc_type = B_FALSE;
+
+	status = nxge_dma_mem_alloc(nxgep, nxge_force_dma,
+			&nxge_desc_dma_attr,
+			size,
+			&nxge_dev_desc_dma_acc_attr,
+			DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
+			tx_dmap);
+	if (status != NXGE_OK) {
+		goto nxge_alloc_tx_cntl_dma_fail1;
+	}
+
+	*dmap = tx_dmap;
+	goto nxge_alloc_tx_cntl_dma_exit;
+
+nxge_alloc_tx_cntl_dma_fail1:
+	KMEM_FREE(tx_dmap, sizeof (nxge_dma_common_t));
+
+nxge_alloc_tx_cntl_dma_exit:
+	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
+		"<== nxge_alloc_tx_cntl_dma status 0x%08x", status));
+
+	return (status);
+}
+
+/*ARGSUSED*/
+static void
+nxge_free_tx_cntl_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap)
+{
+	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_free_tx_cntl_dma"));
+
+	nxge_dma_mem_free(dmap);
+
+	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_free_tx_cntl_dma"));
+}
+
+static void
+nxge_free_tx_mem_pool(p_nxge_t nxgep)
+{
+	uint32_t		i, ndmas;
+	p_nxge_dma_pool_t	dma_poolp;
+	p_nxge_dma_common_t	*dma_buf_p;
+	p_nxge_dma_pool_t	dma_cntl_poolp;
+	p_nxge_dma_common_t	*dma_cntl_p;
+	uint32_t 		*num_chunks;
+
+	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_free_tx_mem_pool"));
+
+	dma_poolp = nxgep->tx_buf_pool_p;
+	if (dma_poolp == NULL || (!dma_poolp->buf_allocated)) {
+		NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
+			"<== nxge_free_tx_mem_pool "
+			"(null rx buf pool or buf not allocated"));
+		return;
+	}
+
+	dma_cntl_poolp = nxgep->tx_cntl_pool_p;
+	if (dma_cntl_poolp == NULL || (!dma_cntl_poolp->buf_allocated)) {
+		NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
+			"<== nxge_free_tx_mem_pool "
+			"(null tx cntl buf pool or cntl buf not allocated"));
+		return;
+	}
+
+	dma_buf_p = dma_poolp->dma_buf_pool_p;
+	num_chunks = dma_poolp->num_chunks;
+
+	dma_cntl_p = dma_cntl_poolp->dma_buf_pool_p;
+	ndmas = dma_cntl_poolp->ndmas;
+
+	for (i = 0; i < ndmas; i++) {
+		nxge_free_tx_buf_dma(nxgep, dma_buf_p[i], num_chunks[i]);
+	}
+
+	for (i = 0; i < ndmas; i++) {
+		nxge_free_tx_cntl_dma(nxgep, dma_cntl_p[i]);
+	}
+
+	for (i = 0; i < ndmas; i++) {
+		KMEM_FREE(dma_buf_p[i],
+			sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK);
+		KMEM_FREE(dma_cntl_p[i], sizeof (nxge_dma_common_t));
+	}
+
+	KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas);
+	KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_nxge_dma_common_t));
+	KMEM_FREE(dma_cntl_poolp, sizeof (nxge_dma_pool_t));
+	KMEM_FREE(dma_buf_p, ndmas * sizeof (p_nxge_dma_common_t));
+	KMEM_FREE(dma_poolp, sizeof (nxge_dma_pool_t));
+
+	nxgep->tx_buf_pool_p = NULL;
+	nxgep->tx_cntl_pool_p = NULL;
+
+	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_free_tx_mem_pool"));
+}
+
+/*ARGSUSED*/
+static nxge_status_t
+nxge_dma_mem_alloc(p_nxge_t nxgep, dma_method_t method,
+	struct ddi_dma_attr *dma_attrp,
+	size_t length, ddi_device_acc_attr_t *acc_attr_p, uint_t xfer_flags,
+	p_nxge_dma_common_t dma_p)
+{
+	caddr_t 		kaddrp;
+	int			ddi_status = DDI_SUCCESS;
+	boolean_t		contig_alloc_type;
+
+	contig_alloc_type = dma_p->contig_alloc_type;
+
+	if (contig_alloc_type && (nxgep->niu_type != N2_NIU)) {
+		/*
+		 * contig_alloc_type for contiguous memory only allowed
+		 * for N2/NIU.
+		 */
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"nxge_dma_mem_alloc: alloc type not allows (%d)",
+			dma_p->contig_alloc_type));
+		return (NXGE_ERROR | NXGE_DDI_FAILED);
+	}
+
+	dma_p->dma_handle = NULL;
+	dma_p->acc_handle = NULL;
+	dma_p->kaddrp = dma_p->last_kaddrp = NULL;
+	dma_p->first_ioaddr_pp = dma_p->last_ioaddr_pp = NULL;
+	ddi_status = ddi_dma_alloc_handle(nxgep->dip, dma_attrp,
+		DDI_DMA_DONTWAIT, NULL, &dma_p->dma_handle);
+	if (ddi_status != DDI_SUCCESS) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"nxge_dma_mem_alloc:ddi_dma_alloc_handle failed."));
+		return (NXGE_ERROR | NXGE_DDI_FAILED);
+	}
+
+	switch (contig_alloc_type) {
+	case B_FALSE:
+		ddi_status = ddi_dma_mem_alloc(dma_p->dma_handle, length,
+			acc_attr_p,
+			xfer_flags,
+			DDI_DMA_DONTWAIT, 0, &kaddrp, &dma_p->alength,
+			&dma_p->acc_handle);
+		if (ddi_status != DDI_SUCCESS) {
+			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+				"nxge_dma_mem_alloc:ddi_dma_mem_alloc failed"));
+			ddi_dma_free_handle(&dma_p->dma_handle);
+			dma_p->dma_handle = NULL;
+			return (NXGE_ERROR | NXGE_DDI_FAILED);
+		}
+		if (dma_p->alength < length) {
+			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+				"nxge_dma_mem_alloc:ddi_dma_mem_alloc "
+				"< length."));
+			ddi_dma_mem_free(&dma_p->acc_handle);
+			ddi_dma_free_handle(&dma_p->dma_handle);
+			dma_p->acc_handle = NULL;
+			dma_p->dma_handle = NULL;
+			return (NXGE_ERROR);
+		}
+
+		ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, NULL,
+			kaddrp, dma_p->alength, xfer_flags, DDI_DMA_DONTWAIT, 0,
+			&dma_p->dma_cookie, &dma_p->ncookies);
+		if (ddi_status != DDI_DMA_MAPPED) {
+			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+				"nxge_dma_mem_alloc:di_dma_addr_bind failed "
+				"(staus 0x%x ncookies %d.)", ddi_status,
+				dma_p->ncookies));
+			if (dma_p->acc_handle) {
+				ddi_dma_mem_free(&dma_p->acc_handle);
+				dma_p->acc_handle = NULL;
+			}
+			ddi_dma_free_handle(&dma_p->dma_handle);
+			dma_p->dma_handle = NULL;
+			return (NXGE_ERROR | NXGE_DDI_FAILED);
+		}
+
+		if (dma_p->ncookies != 1) {
+			NXGE_DEBUG_MSG((nxgep, DMA_CTL,
+				"nxge_dma_mem_alloc:ddi_dma_addr_bind "
+				"> 1 cookie"
+				"(staus 0x%x ncookies %d.)", ddi_status,
+				dma_p->ncookies));
+			if (dma_p->acc_handle) {
+				ddi_dma_mem_free(&dma_p->acc_handle);
+				dma_p->acc_handle = NULL;
+			}
+			ddi_dma_free_handle(&dma_p->dma_handle);
+			dma_p->dma_handle = NULL;
+			return (NXGE_ERROR);
+		}
+		break;
+
+#if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
+	case B_TRUE:
+		kaddrp = (caddr_t)contig_mem_alloc(length);
+		if (kaddrp == NULL) {
+			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+				"nxge_dma_mem_alloc:contig_mem_alloc failed."));
+			ddi_dma_free_handle(&dma_p->dma_handle);
+			return (NXGE_ERROR | NXGE_DDI_FAILED);
+		}
+
+		dma_p->alength = length;
+		ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, NULL,
+			kaddrp, dma_p->alength, xfer_flags, DDI_DMA_DONTWAIT, 0,
+			&dma_p->dma_cookie, &dma_p->ncookies);
+		if (ddi_status != DDI_DMA_MAPPED) {
+			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+				"nxge_dma_mem_alloc:di_dma_addr_bind failed "
+				"(status 0x%x ncookies %d.)", ddi_status,
+				dma_p->ncookies));
+
+			NXGE_DEBUG_MSG((nxgep, DMA_CTL,
+				"==> nxge_dma_mem_alloc: (not mapped)"
+				"length %lu (0x%x) "
+				"free contig kaddrp $%p "
+				"va_to_pa $%p",
+				length, length,
+				kaddrp,
+				va_to_pa(kaddrp)));
+
+
+			contig_mem_free((void *)kaddrp, length);
+			ddi_dma_free_handle(&dma_p->dma_handle);
+
+			dma_p->dma_handle = NULL;
+			dma_p->acc_handle = NULL;
+			dma_p->alength = NULL;
+			dma_p->kaddrp = NULL;
+
+			return (NXGE_ERROR | NXGE_DDI_FAILED);
+		}
+
+		if (dma_p->ncookies != 1 ||
+			(dma_p->dma_cookie.dmac_laddress == NULL)) {
+			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+				"nxge_dma_mem_alloc:di_dma_addr_bind > 1 "
+				"cookie or "
+				"dmac_laddress is NULL $%p size %d "
+				" (status 0x%x ncookies %d.)",
+				ddi_status,
+				dma_p->dma_cookie.dmac_laddress,
+				dma_p->dma_cookie.dmac_size,
+				dma_p->ncookies));
+
+			contig_mem_free((void *)kaddrp, length);
+			ddi_dma_free_handle(&dma_p->dma_handle);
+
+			dma_p->alength = 0;
+			dma_p->dma_handle = NULL;
+			dma_p->acc_handle = NULL;
+			dma_p->kaddrp = NULL;
+
+			return (NXGE_ERROR | NXGE_DDI_FAILED);
+		}
+		break;
+
+#else
+	case B_TRUE:
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"nxge_dma_mem_alloc: invalid alloc type for !sun4v"));
+		return (NXGE_ERROR | NXGE_DDI_FAILED);
+#endif
+	}
+
+	dma_p->kaddrp = kaddrp;
+	dma_p->last_kaddrp = (unsigned char *)kaddrp +
+			dma_p->alength - RXBUF_64B_ALIGNED;
+	dma_p->ioaddr_pp = (unsigned char *)dma_p->dma_cookie.dmac_laddress;
+	dma_p->last_ioaddr_pp =
+		(unsigned char *)dma_p->dma_cookie.dmac_laddress +
+				dma_p->alength - RXBUF_64B_ALIGNED;
+
+	NPI_DMA_ACC_HANDLE_SET(dma_p, dma_p->acc_handle);
+
+#if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
+	dma_p->orig_ioaddr_pp =
+		(unsigned char *)dma_p->dma_cookie.dmac_laddress;
+	dma_p->orig_alength = length;
+	dma_p->orig_kaddrp = kaddrp;
+	dma_p->orig_vatopa = (uint64_t)va_to_pa(kaddrp);
+#endif
+
+	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_dma_mem_alloc: "
+		"dma buffer allocated: dma_p $%p "
+		"return dmac_ladress from cookie $%p cookie dmac_size %d "
+		"dma_p->ioaddr_p $%p "
+		"dma_p->orig_ioaddr_p $%p "
+		"orig_vatopa $%p "
+		"alength %d (0x%x) "
+		"kaddrp $%p "
+		"length %d (0x%x)",
+		dma_p,
+		dma_p->dma_cookie.dmac_laddress, dma_p->dma_cookie.dmac_size,
+		dma_p->ioaddr_pp,
+		dma_p->orig_ioaddr_pp,
+		dma_p->orig_vatopa,
+		dma_p->alength, dma_p->alength,
+		kaddrp,
+		length, length));
+
+	return (NXGE_OK);
+}
+
+static void
+nxge_dma_mem_free(p_nxge_dma_common_t dma_p)
+{
+	if (dma_p->dma_handle != NULL) {
+		if (dma_p->ncookies) {
+			(void) ddi_dma_unbind_handle(dma_p->dma_handle);
+			dma_p->ncookies = 0;
+		}
+		ddi_dma_free_handle(&dma_p->dma_handle);
+		dma_p->dma_handle = NULL;
+	}
+
+	if (dma_p->acc_handle != NULL) {
+		ddi_dma_mem_free(&dma_p->acc_handle);
+		dma_p->acc_handle = NULL;
+		NPI_DMA_ACC_HANDLE_SET(dma_p, NULL);
+	}
+
+#if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
+	if (dma_p->contig_alloc_type &&
+			dma_p->orig_kaddrp && dma_p->orig_alength) {
+		NXGE_DEBUG_MSG((NULL, DMA_CTL, "nxge_dma_mem_free: "
+			"kaddrp $%p (orig_kaddrp $%p)"
+			"mem type %d ",
+			"orig_alength %d "
+			"alength 0x%x (%d)",
+			dma_p->kaddrp,
+			dma_p->orig_kaddrp,
+			dma_p->contig_alloc_type,
+			dma_p->orig_alength,
+			dma_p->alength, dma_p->alength));
+
+		contig_mem_free(dma_p->orig_kaddrp, dma_p->orig_alength);
+		dma_p->orig_alength = NULL;
+		dma_p->orig_kaddrp = NULL;
+		dma_p->contig_alloc_type = B_FALSE;
+	}
+#endif
+	dma_p->kaddrp = NULL;
+	dma_p->alength = NULL;
+}
+
+/*
+ *	nxge_m_start() -- start transmitting and receiving.
+ *
+ *	This function is called by the MAC layer when the first
+ *	stream is open to prepare the hardware ready for sending
+ *	and transmitting packets.
+ */
+static int
+nxge_m_start(void *arg)
+{
+	p_nxge_t 	nxgep = (p_nxge_t)arg;
+
+	NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_start"));
+
+	MUTEX_ENTER(nxgep->genlock);
+	if (nxge_init(nxgep) != NXGE_OK) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"<== nxge_m_start: initialization failed"));
+		MUTEX_EXIT(nxgep->genlock);
+		return (EIO);
+	}
+
+	if (nxgep->nxge_mac_state == NXGE_MAC_STARTED)
+		goto nxge_m_start_exit;
+	/*
+	 * Start timer to check the system error and tx hangs
+	 */
+	nxgep->nxge_timerid = nxge_start_timer(nxgep, nxge_check_hw_state,
+		NXGE_CHECK_TIMER);
+
+	nxgep->link_notify = B_TRUE;
+
+	nxgep->nxge_mac_state = NXGE_MAC_STARTED;
+
+nxge_m_start_exit:
+	MUTEX_EXIT(nxgep->genlock);
+	NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_start"));
+	return (0);
+}
+
+/*
+ *	nxge_m_stop(): stop transmitting and receiving.
+ */
+static void
+nxge_m_stop(void *arg)
+{
+	p_nxge_t 	nxgep = (p_nxge_t)arg;
+
+	NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_stop"));
+
+	if (nxgep->nxge_timerid) {
+		nxge_stop_timer(nxgep, nxgep->nxge_timerid);
+		nxgep->nxge_timerid = 0;
+	}
+
+	MUTEX_ENTER(nxgep->genlock);
+	nxge_uninit(nxgep);
+
+	nxgep->nxge_mac_state = NXGE_MAC_STOPPED;
+
+	MUTEX_EXIT(nxgep->genlock);
+
+	NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_stop"));
+}
+
+static int
+nxge_m_unicst(void *arg, const uint8_t *macaddr)
+{
+	p_nxge_t 	nxgep = (p_nxge_t)arg;
+	struct 		ether_addr addrp;
+
+	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "==> nxge_m_unicst"));
+
+	bcopy(macaddr, (uint8_t *)&addrp, ETHERADDRL);
+	if (nxge_set_mac_addr(nxgep, &addrp)) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"<== nxge_m_unicst: set unitcast failed"));
+		return (EINVAL);
+	}
+
+	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "<== nxge_m_unicst"));
+
+	return (0);
+}
+
+static int
+nxge_m_multicst(void *arg, boolean_t add, const uint8_t *mca)
+{
+	p_nxge_t 	nxgep = (p_nxge_t)arg;
+	struct 		ether_addr addrp;
+
+	NXGE_DEBUG_MSG((nxgep, MAC_CTL,
+		"==> nxge_m_multicst: add %d", add));
+
+	bcopy(mca, (uint8_t *)&addrp, ETHERADDRL);
+	if (add) {
+		if (nxge_add_mcast_addr(nxgep, &addrp)) {
+			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+				"<== nxge_m_multicst: add multicast failed"));
+			return (EINVAL);
+		}
+	} else {
+		if (nxge_del_mcast_addr(nxgep, &addrp)) {
+			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+				"<== nxge_m_multicst: del multicast failed"));
+			return (EINVAL);
+		}
+	}
+
+	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "<== nxge_m_multicst"));
+
+	return (0);
+}
+
+static int
+nxge_m_promisc(void *arg, boolean_t on)
+{
+	p_nxge_t 	nxgep = (p_nxge_t)arg;
+
+	NXGE_DEBUG_MSG((nxgep, MAC_CTL,
+		"==> nxge_m_promisc: on %d", on));
+
+	if (nxge_set_promisc(nxgep, on)) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"<== nxge_m_promisc: set promisc failed"));
+		return (EINVAL);
+	}
+
+	NXGE_DEBUG_MSG((nxgep, MAC_CTL,
+		"<== nxge_m_promisc: on %d", on));
+
+	return (0);
+}
+
+static void
+nxge_m_ioctl(void *arg,  queue_t *wq, mblk_t *mp)
+{
+	p_nxge_t 	nxgep = (p_nxge_t)arg;
+	struct 		iocblk *iocp = (struct iocblk *)mp->b_rptr;
+	boolean_t 	need_privilege;
+	int 		err;
+	int 		cmd;
+
+	NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_ioctl"));
+
+	iocp = (struct iocblk *)mp->b_rptr;
+	iocp->ioc_error = 0;
+	need_privilege = B_TRUE;
+	cmd = iocp->ioc_cmd;
+	NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_ioctl: cmd 0x%08x", cmd));
+	switch (cmd) {
+	default:
+		miocnak(wq, mp, 0, EINVAL);
+		NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_ioctl: invalid"));
+		return;
+
+	case LB_GET_INFO_SIZE:
+	case LB_GET_INFO:
+	case LB_GET_MODE:
+		need_privilege = B_FALSE;
+		break;
+	case LB_SET_MODE:
+		break;
+
+	case ND_GET:
+		need_privilege = B_FALSE;
+		break;
+	case ND_SET:
+		break;
+
+	case NXGE_GET_MII:
+	case NXGE_PUT_MII:
+	case NXGE_GET64:
+	case NXGE_PUT64:
+	case NXGE_GET_TX_RING_SZ:
+	case NXGE_GET_TX_DESC:
+	case NXGE_TX_SIDE_RESET:
+	case NXGE_RX_SIDE_RESET:
+	case NXGE_GLOBAL_RESET:
+	case NXGE_RESET_MAC:
+	case NXGE_TX_REGS_DUMP:
+	case NXGE_RX_REGS_DUMP:
+	case NXGE_INT_REGS_DUMP:
+	case NXGE_VIR_INT_REGS_DUMP:
+	case NXGE_PUT_TCAM:
+	case NXGE_GET_TCAM:
+	case NXGE_RTRACE:
+	case NXGE_RDUMP:
+
+		need_privilege = B_FALSE;
+		break;
+	case NXGE_INJECT_ERR:
+		cmn_err(CE_NOTE, "!nxge_m_ioctl: Inject error\n");
+		nxge_err_inject(nxgep, wq, mp);
+		break;
+	}
+
+	if (need_privilege) {
+		if (secpolicy_net_config != NULL)
+			err = secpolicy_net_config(iocp->ioc_cr, B_FALSE);
+		else
+			err = drv_priv(iocp->ioc_cr);
+		if (err != 0) {
+			miocnak(wq, mp, 0, err);
+			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+				"<== nxge_m_ioctl: no priv"));
+			return;
+		}
+	}
+
+	switch (cmd) {
+	case ND_GET:
+		NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "ND_GET command"));
+	case ND_SET:
+		NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "ND_SET command"));
+		nxge_param_ioctl(nxgep, wq, mp, iocp);
+		break;
+
+	case LB_GET_MODE:
+	case LB_SET_MODE:
+	case LB_GET_INFO_SIZE:
+	case LB_GET_INFO:
+		nxge_loopback_ioctl(nxgep, wq, mp, iocp);
+		break;
+
+	case NXGE_GET_MII:
+	case NXGE_PUT_MII:
+	case NXGE_PUT_TCAM:
+	case NXGE_GET_TCAM:
+	case NXGE_GET64:
+	case NXGE_PUT64:
+	case NXGE_GET_TX_RING_SZ:
+	case NXGE_GET_TX_DESC:
+	case NXGE_TX_SIDE_RESET:
+	case NXGE_RX_SIDE_RESET:
+	case NXGE_GLOBAL_RESET:
+	case NXGE_RESET_MAC:
+	case NXGE_TX_REGS_DUMP:
+	case NXGE_RX_REGS_DUMP:
+	case NXGE_INT_REGS_DUMP:
+	case NXGE_VIR_INT_REGS_DUMP:
+		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
+			"==> nxge_m_ioctl: cmd 0x%x", cmd));
+		nxge_hw_ioctl(nxgep, wq, mp, iocp);
+		break;
+	}
+
+	NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_ioctl"));
+}
+
+extern void nxge_rx_hw_blank(void *arg, time_t ticks, uint_t count);
+
+static void
+nxge_m_resources(void *arg)
+{
+	p_nxge_t		nxgep = arg;
+	mac_rx_fifo_t 		mrf;
+	p_rx_rcr_rings_t	rcr_rings;
+	p_rx_rcr_ring_t		*rcr_p;
+	uint32_t		i, ndmas;
+	nxge_status_t		status;
+
+	NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_m_resources"));
+
+	MUTEX_ENTER(nxgep->genlock);
+
+	/*
+	 * CR 6492541 Check to see if the drv_state has been initialized,
+	 * if not * call nxge_init().
+	 */
+	if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
+		status = nxge_init(nxgep);
+		if (status != NXGE_OK)
+			goto nxge_m_resources_exit;
+	}
+
+	mrf.mrf_type = MAC_RX_FIFO;
+	mrf.mrf_blank = nxge_rx_hw_blank;
+	mrf.mrf_arg = (void *)nxgep;
+
+	mrf.mrf_normal_blank_time = 128;
+	mrf.mrf_normal_pkt_count = 8;
+	rcr_rings = nxgep->rx_rcr_rings;
+	rcr_p = rcr_rings->rcr_rings;
+	ndmas = rcr_rings->ndmas;
+
+	/*
+	 * Export our receive resources to the MAC layer.
+	 */
+	for (i = 0; i < ndmas; i++) {
+		((p_rx_rcr_ring_t)rcr_p[i])->rcr_mac_handle =
+				mac_resource_add(nxgep->mach,
+				    (mac_resource_t *)&mrf);
+
+		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
+			"==> nxge_m_resources: vdma %d dma %d "
+			"rcrptr 0x%016llx mac_handle 0x%016llx",
+			i, ((p_rx_rcr_ring_t)rcr_p[i])->rdc,
+			rcr_p[i],
+			((p_rx_rcr_ring_t)rcr_p[i])->rcr_mac_handle));
+	}
+
+nxge_m_resources_exit:
+	MUTEX_EXIT(nxgep->genlock);
+	NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_m_resources"));
+}
+
+static void
+nxge_mmac_kstat_update(p_nxge_t nxgep, mac_addr_slot_t slot, boolean_t factory)
+{
+	p_nxge_mmac_stats_t mmac_stats;
+	int i;
+	nxge_mmac_t *mmac_info;
+
+	mmac_info = &nxgep->nxge_mmac_info;
+
+	mmac_stats = &nxgep->statsp->mmac_stats;
+	mmac_stats->mmac_max_cnt = mmac_info->num_mmac;
+	mmac_stats->mmac_avail_cnt = mmac_info->naddrfree;
+
+	for (i = 0; i < ETHERADDRL; i++) {
+		if (factory) {
+			mmac_stats->mmac_avail_pool[slot-1].ether_addr_octet[i]
+			= mmac_info->factory_mac_pool[slot][(ETHERADDRL-1) - i];
+		} else {
+			mmac_stats->mmac_avail_pool[slot-1].ether_addr_octet[i]
+			= mmac_info->mac_pool[slot].addr[(ETHERADDRL - 1) - i];
+		}
+	}
+}
+
+/*
+ * nxge_altmac_set() -- Set an alternate MAC address
+ */
+static int
+nxge_altmac_set(p_nxge_t nxgep, uint8_t *maddr, mac_addr_slot_t slot)
+{
+	uint8_t addrn;
+	uint8_t portn;
+	npi_mac_addr_t altmac;
+
+	altmac.w2 = ((uint16_t)maddr[0] << 8) | ((uint16_t)maddr[1] & 0x0ff);
+	altmac.w1 = ((uint16_t)maddr[2] << 8) | ((uint16_t)maddr[3] & 0x0ff);
+	altmac.w0 = ((uint16_t)maddr[4] << 8) | ((uint16_t)maddr[5] & 0x0ff);
+
+	portn = nxgep->mac.portnum;
+	addrn = (uint8_t)slot - 1;
+
+	if (npi_mac_altaddr_entry(nxgep->npi_handle, OP_SET, portn,
+		addrn, &altmac) != NPI_SUCCESS)
+		return (EIO);
+	/*
+	 * Enable comparison with the alternate MAC address.
+	 * While the first alternate addr is enabled by bit 1 of register
+	 * BMAC_ALTAD_CMPEN, it is enabled by bit 0 of register
+	 * XMAC_ADDR_CMPEN, so slot needs to be converted to addrn
+	 * accordingly before calling npi_mac_altaddr_entry.
+	 */
+	if (portn == XMAC_PORT_0 || portn == XMAC_PORT_1)
+		addrn = (uint8_t)slot - 1;
+	else
+		addrn = (uint8_t)slot;
+
+	if (npi_mac_altaddr_enable(nxgep->npi_handle, portn, addrn)
+		!= NPI_SUCCESS)
+		return (EIO);
+
+	return (0);
+}
+
+/*
+ * nxeg_m_mmac_add() - find an unused address slot, set the address
+ * value to the one specified, enable the port to start filtering on
+ * the new MAC address.  Returns 0 on success.
+ */
+static int
+nxge_m_mmac_add(void *arg, mac_multi_addr_t *maddr)
+{
+	p_nxge_t nxgep = arg;
+	mac_addr_slot_t slot;
+	nxge_mmac_t *mmac_info;
+	int err;
+	nxge_status_t status;
+
+	mutex_enter(nxgep->genlock);
+
+	/*
+	 * Make sure that nxge is initialized, if _start() has
+	 * not been called.
+	 */
+	if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
+		status = nxge_init(nxgep);
+		if (status != NXGE_OK) {
+			mutex_exit(nxgep->genlock);
+			return (ENXIO);
+		}
+	}
+
+	mmac_info = &nxgep->nxge_mmac_info;
+	if (mmac_info->naddrfree == 0) {
+		mutex_exit(nxgep->genlock);
+		return (ENOSPC);
+	}
+	if (!mac_unicst_verify(nxgep->mach, maddr->mma_addr,
+		maddr->mma_addrlen)) {
+		mutex_exit(nxgep->genlock);
+		return (EINVAL);
+	}
+	/*
+	 * 	Search for the first available slot. Because naddrfree
+	 * is not zero, we are guaranteed to find one.
+	 * 	Slot 0 is for unique (primary) MAC. The first alternate
+	 * MAC slot is slot 1.
+	 *	Each of the first two ports of Neptune has 16 alternate
+	 * MAC slots but only the first 7 (of 15) slots have assigned factory
+	 * MAC addresses. We first search among the slots without bundled
+	 * factory MACs. If we fail to find one in that range, then we
+	 * search the slots with bundled factory MACs.  A factory MAC
+	 * will be wasted while the slot is used with a user MAC address.
+	 * But the slot could be used by factory MAC again after calling
+	 * nxge_m_mmac_remove and nxge_m_mmac_reserve.
+	 */
+	if (mmac_info->num_factory_mmac < mmac_info->num_mmac) {
+		for (slot = mmac_info->num_factory_mmac + 1;
+			slot <= mmac_info->num_mmac; slot++) {
+			if (!(mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED))
+				break;
+		}
+		if (slot > mmac_info->num_mmac) {
+			for (slot = 1; slot <= mmac_info->num_factory_mmac;
+				slot++) {
+				if (!(mmac_info->mac_pool[slot].flags
+					& MMAC_SLOT_USED))
+					break;
+			}
+		}
+	} else {
+		for (slot = 1; slot <= mmac_info->num_mmac; slot++) {
+			if (!(mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED))
+				break;
+		}
+	}
+	ASSERT(slot <= mmac_info->num_mmac);
+	if ((err = nxge_altmac_set(nxgep, maddr->mma_addr, slot)) != 0) {
+		mutex_exit(nxgep->genlock);
+		return (err);
+	}
+	bcopy(maddr->mma_addr, mmac_info->mac_pool[slot].addr, ETHERADDRL);
+	mmac_info->mac_pool[slot].flags |= MMAC_SLOT_USED;
+	mmac_info->mac_pool[slot].flags &= ~MMAC_VENDOR_ADDR;
+	mmac_info->naddrfree--;
+	nxge_mmac_kstat_update(nxgep, slot, B_FALSE);
+
+	maddr->mma_slot = slot;
+
+	mutex_exit(nxgep->genlock);
+	return (0);
+}
+
+/*
+ * This function reserves an unused slot and programs the slot and the HW
+ * with a factory mac address.
+ */
+static int
+nxge_m_mmac_reserve(void *arg, mac_multi_addr_t *maddr)
+{
+	p_nxge_t nxgep = arg;
+	mac_addr_slot_t slot;
+	nxge_mmac_t *mmac_info;
+	int err;
+	nxge_status_t status;
+
+	mutex_enter(nxgep->genlock);
+
+	/*
+	 * Make sure that nxge is initialized, if _start() has
+	 * not been called.
+	 */
+	if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
+		status = nxge_init(nxgep);
+		if (status != NXGE_OK) {
+			mutex_exit(nxgep->genlock);
+			return (ENXIO);
+		}
+	}
+
+	mmac_info = &nxgep->nxge_mmac_info;
+	if (mmac_info->naddrfree == 0) {
+		mutex_exit(nxgep->genlock);
+		return (ENOSPC);
+	}
+
+	slot = maddr->mma_slot;
+	if (slot == -1) {  /* -1: Take the first available slot */
+		for (slot = 1; slot <= mmac_info->num_factory_mmac; slot++) {
+			if (!(mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED))
+				break;
+		}
+		if (slot > mmac_info->num_factory_mmac) {
+			mutex_exit(nxgep->genlock);
+			return (ENOSPC);
+		}
+	}
+	if (slot < 1 || slot > mmac_info->num_factory_mmac) {
+		/*
+		 * Do not support factory MAC at a slot greater than
+		 * num_factory_mmac even when there are available factory
+		 * MAC addresses because the alternate MACs are bundled with
+		 * slot[1] through slot[num_factory_mmac]
+		 */
+		mutex_exit(nxgep->genlock);
+		return (EINVAL);
+	}
+	if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) {
+		mutex_exit(nxgep->genlock);
+		return (EBUSY);
+	}
+	/* Verify the address to be reserved */
+	if (!mac_unicst_verify(nxgep->mach,
+		mmac_info->factory_mac_pool[slot], ETHERADDRL)) {
+		mutex_exit(nxgep->genlock);
+		return (EINVAL);
+	}
+	if (err = nxge_altmac_set(nxgep,
+		mmac_info->factory_mac_pool[slot], slot)) {
+		mutex_exit(nxgep->genlock);
+		return (err);
+	}
+	bcopy(mmac_info->factory_mac_pool[slot], maddr->mma_addr, ETHERADDRL);
+	mmac_info->mac_pool[slot].flags |= MMAC_SLOT_USED | MMAC_VENDOR_ADDR;
+	mmac_info->naddrfree--;
+
+	nxge_mmac_kstat_update(nxgep, slot, B_TRUE);
+	mutex_exit(nxgep->genlock);
+
+	/* Pass info back to the caller */
+	maddr->mma_slot = slot;
+	maddr->mma_addrlen = ETHERADDRL;
+	maddr->mma_flags = MMAC_SLOT_USED | MMAC_VENDOR_ADDR;
+
+	return (0);
+}
+
+/*
+ * Remove the specified mac address and update the HW not to filter
+ * the mac address anymore.
+ */
+static int
+nxge_m_mmac_remove(void *arg, mac_addr_slot_t slot)
+{
+	p_nxge_t nxgep = arg;
+	nxge_mmac_t *mmac_info;
+	uint8_t addrn;
+	uint8_t portn;
+	int err = 0;
+	nxge_status_t status;
+
+	mutex_enter(nxgep->genlock);
+
+	/*
+	 * Make sure that nxge is initialized, if _start() has
+	 * not been called.
+	 */
+	if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
+		status = nxge_init(nxgep);
+		if (status != NXGE_OK) {
+			mutex_exit(nxgep->genlock);
+			return (ENXIO);
+		}
+	}
+
+	mmac_info = &nxgep->nxge_mmac_info;
+	if (slot < 1 || slot > mmac_info->num_mmac) {
+		mutex_exit(nxgep->genlock);
+		return (EINVAL);
+	}
+
+	portn = nxgep->mac.portnum;
+	if (portn == XMAC_PORT_0 || portn == XMAC_PORT_1)
+		addrn = (uint8_t)slot - 1;
+	else
+		addrn = (uint8_t)slot;
+
+	if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) {
+		if (npi_mac_altaddr_disable(nxgep->npi_handle, portn, addrn)
+				== NPI_SUCCESS) {
+			mmac_info->naddrfree++;
+			mmac_info->mac_pool[slot].flags &= ~MMAC_SLOT_USED;
+			/*
+			 * Regardless if the MAC we just stopped filtering
+			 * is a user addr or a facory addr, we must set
+			 * the MMAC_VENDOR_ADDR flag if this slot has an
+			 * associated factory MAC to indicate that a factory
+			 * MAC is available.
+			 */
+			if (slot <= mmac_info->num_factory_mmac) {
+				mmac_info->mac_pool[slot].flags
+					|= MMAC_VENDOR_ADDR;
+			}
+			/*
+			 * Clear mac_pool[slot].addr so that kstat shows 0
+			 * alternate MAC address if the slot is not used.
+			 * (But nxge_m_mmac_get returns the factory MAC even
+			 * when the slot is not used!)
+			 */
+			bzero(mmac_info->mac_pool[slot].addr, ETHERADDRL);
+			nxge_mmac_kstat_update(nxgep, slot, B_FALSE);
+		} else {
+			err = EIO;
+		}
+	} else {
+		err = EINVAL;
+	}
+
+	mutex_exit(nxgep->genlock);
+	return (err);
+}
+
+
+/*
+ * Modify a mac address added by nxge_m_mmac_add or nxge_m_mmac_reserve().
+ */
+static int
+nxge_m_mmac_modify(void *arg, mac_multi_addr_t *maddr)
+{
+	p_nxge_t nxgep = arg;
+	mac_addr_slot_t slot;
+	nxge_mmac_t *mmac_info;
+	int err = 0;
+	nxge_status_t status;
+
+	if (!mac_unicst_verify(nxgep->mach, maddr->mma_addr,
+			maddr->mma_addrlen))
+		return (EINVAL);
+
+	slot = maddr->mma_slot;
+
+	mutex_enter(nxgep->genlock);
+
+	/*
+	 * Make sure that nxge is initialized, if _start() has
+	 * not been called.
+	 */
+	if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
+		status = nxge_init(nxgep);
+		if (status != NXGE_OK) {
+			mutex_exit(nxgep->genlock);
+			return (ENXIO);
+		}
+	}
+
+	mmac_info = &nxgep->nxge_mmac_info;
+	if (slot < 1 || slot > mmac_info->num_mmac) {
+		mutex_exit(nxgep->genlock);
+		return (EINVAL);
+	}
+	if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) {
+		if ((err = nxge_altmac_set(nxgep, maddr->mma_addr, slot))
+			!= 0) {
+			bcopy(maddr->mma_addr, mmac_info->mac_pool[slot].addr,
+				ETHERADDRL);
+			/*
+			 * Assume that the MAC passed down from the caller
+			 * is not a factory MAC address (The user should
+			 * call mmac_remove followed by mmac_reserve if
+			 * he wants to use the factory MAC for this slot).
+			 */
+			mmac_info->mac_pool[slot].flags &= ~MMAC_VENDOR_ADDR;
+			nxge_mmac_kstat_update(nxgep, slot, B_FALSE);
+		}
+	} else {
+		err = EINVAL;
+	}
+	mutex_exit(nxgep->genlock);
+	return (err);
+}
+
+/*
+ * nxge_m_mmac_get() - Get the MAC address and other information
+ * related to the slot.  mma_flags should be set to 0 in the call.
+ * Note: although kstat shows MAC address as zero when a slot is
+ * not used, Crossbow expects nxge_m_mmac_get to copy factory MAC
+ * to the caller as long as the slot is not using a user MAC address.
+ * The following table shows the rules,
+ *
+ *				   USED    VENDOR    mma_addr
+ * ------------------------------------------------------------
+ * (1) Slot uses a user MAC:        yes      no     user MAC
+ * (2) Slot uses a factory MAC:     yes      yes    factory MAC
+ * (3) Slot is not used but is
+ *     factory MAC capable:         no       yes    factory MAC
+ * (4) Slot is not used and is
+ *     not factory MAC capable:     no       no        0
+ * ------------------------------------------------------------
+ */
+static int
+nxge_m_mmac_get(void *arg, mac_multi_addr_t *maddr)
+{
+	nxge_t *nxgep = arg;
+	mac_addr_slot_t slot;
+	nxge_mmac_t *mmac_info;
+	nxge_status_t status;
+
+	slot = maddr->mma_slot;
+
+	mutex_enter(nxgep->genlock);
+
+	/*
+	 * Make sure that nxge is initialized, if _start() has
+	 * not been called.
+	 */
+	if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
+		status = nxge_init(nxgep);
+		if (status != NXGE_OK) {
+			mutex_exit(nxgep->genlock);
+			return (ENXIO);
+		}
+	}
+
+	mmac_info = &nxgep->nxge_mmac_info;
+
+	if (slot < 1 || slot > mmac_info->num_mmac) {
+		mutex_exit(nxgep->genlock);
+		return (EINVAL);
+	}
+	maddr->mma_flags = 0;
+	if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED)
+		maddr->mma_flags |= MMAC_SLOT_USED;
+
+	if (mmac_info->mac_pool[slot].flags & MMAC_VENDOR_ADDR) {
+		maddr->mma_flags |= MMAC_VENDOR_ADDR;
+		bcopy(mmac_info->factory_mac_pool[slot],
+			maddr->mma_addr, ETHERADDRL);
+		maddr->mma_addrlen = ETHERADDRL;
+	} else {
+		if (maddr->mma_flags & MMAC_SLOT_USED) {
+			bcopy(mmac_info->mac_pool[slot].addr,
+				maddr->mma_addr, ETHERADDRL);
+			maddr->mma_addrlen = ETHERADDRL;
+		} else {
+			bzero(maddr->mma_addr, ETHERADDRL);
+			maddr->mma_addrlen = 0;
+		}
+	}
+	mutex_exit(nxgep->genlock);
+	return (0);
+}
+
+
+static boolean_t
+nxge_m_getcapab(void *arg, mac_capab_t cap, void *cap_data)
+{
+	nxge_t *nxgep = arg;
+	uint32_t *txflags = cap_data;
+	multiaddress_capab_t *mmacp = cap_data;
+
+	switch (cap) {
+	case MAC_CAPAB_HCKSUM:
+		*txflags = HCKSUM_INET_PARTIAL;
+		break;
+	case MAC_CAPAB_POLL:
+		/*
+		 * There's nothing for us to fill in, simply returning
+		 * B_TRUE stating that we support polling is sufficient.
+		 */
+		break;
+
+	case MAC_CAPAB_MULTIADDRESS:
+		mutex_enter(nxgep->genlock);
+
+		mmacp->maddr_naddr = nxgep->nxge_mmac_info.num_mmac;
+		mmacp->maddr_naddrfree = nxgep->nxge_mmac_info.naddrfree;
+		mmacp->maddr_flag = 0; /* 0 is requried by PSARC2006/265 */
+		/*
+		 * maddr_handle is driver's private data, passed back to
+		 * entry point functions as arg.
+		 */
+		mmacp->maddr_handle	= nxgep;
+		mmacp->maddr_add	= nxge_m_mmac_add;
+		mmacp->maddr_remove	= nxge_m_mmac_remove;
+		mmacp->maddr_modify	= nxge_m_mmac_modify;
+		mmacp->maddr_get	= nxge_m_mmac_get;
+		mmacp->maddr_reserve	= nxge_m_mmac_reserve;
+
+		mutex_exit(nxgep->genlock);
+		break;
+	default:
+		return (B_FALSE);
+	}
+	return (B_TRUE);
+}
+
+/*
+ * Module loading and removing entry points.
+ */
+
+static	struct cb_ops 	nxge_cb_ops = {
+	nodev,			/* cb_open */
+	nodev,			/* cb_close */
+	nodev,			/* cb_strategy */
+	nodev,			/* cb_print */
+	nodev,			/* cb_dump */
+	nodev,			/* cb_read */
+	nodev,			/* cb_write */
+	nodev,			/* cb_ioctl */
+	nodev,			/* cb_devmap */
+	nodev,			/* cb_mmap */
+	nodev,			/* cb_segmap */
+	nochpoll,		/* cb_chpoll */
+	ddi_prop_op,		/* cb_prop_op */
+	NULL,
+	D_MP, 			/* cb_flag */
+	CB_REV,			/* rev */
+	nodev,			/* int (*cb_aread)() */
+	nodev			/* int (*cb_awrite)() */
+};
+
+static struct dev_ops nxge_dev_ops = {
+	DEVO_REV,		/* devo_rev */
+	0,			/* devo_refcnt */
+	nulldev,
+	nulldev,		/* devo_identify */
+	nulldev,		/* devo_probe */
+	nxge_attach,		/* devo_attach */
+	nxge_detach,		/* devo_detach */
+	nodev,			/* devo_reset */
+	&nxge_cb_ops,		/* devo_cb_ops */
+	(struct bus_ops *)NULL, /* devo_bus_ops	*/
+	ddi_power		/* devo_power */
+};
+
+extern	struct	mod_ops	mod_driverops;
+
+#define	NXGE_DESC_VER		"Sun NIU 10Gb Ethernet %I%"
+
+/*
+ * Module linkage information for the kernel.
+ */
+static struct modldrv 	nxge_modldrv = {
+	&mod_driverops,
+	NXGE_DESC_VER,
+	&nxge_dev_ops
+};
+
+static struct modlinkage modlinkage = {
+	MODREV_1, (void *) &nxge_modldrv, NULL
+};
+
+int
+_init(void)
+{
+	int		status;
+
+	NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _init"));
+	mac_init_ops(&nxge_dev_ops, "nxge");
+	status = ddi_soft_state_init(&nxge_list, sizeof (nxge_t), 0);
+	if (status != 0) {
+		NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL,
+			"failed to init device soft state"));
+		goto _init_exit;
+	}
+
+	status = mod_install(&modlinkage);
+	if (status != 0) {
+		ddi_soft_state_fini(&nxge_list);
+		NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, "Mod install failed"));
+		goto _init_exit;
+	}
+
+	MUTEX_INIT(&nxge_common_lock, NULL, MUTEX_DRIVER, NULL);
+
+_init_exit:
+	NXGE_DEBUG_MSG((NULL, MOD_CTL, "_init status = 0x%X", status));
+
+	return (status);
+}
+
+int
+_fini(void)
+{
+	int		status;
+
+	NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini"));
+
+	NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini: mod_remove"));
+
+	if (nxge_mblks_pending)
+		return (EBUSY);
+
+	status = mod_remove(&modlinkage);
+	if (status != DDI_SUCCESS) {
+		NXGE_DEBUG_MSG((NULL, MOD_CTL,
+			    "Module removal failed 0x%08x",
+			    status));
+		goto _fini_exit;
+	}
+
+	mac_fini_ops(&nxge_dev_ops);
+
+	ddi_soft_state_fini(&nxge_list);
+
+	MUTEX_DESTROY(&nxge_common_lock);
+_fini_exit:
+	NXGE_DEBUG_MSG((NULL, MOD_CTL, "_fini status = 0x%08x", status));
+
+	return (status);
+}
+
+int
+_info(struct modinfo *modinfop)
+{
+	int		status;
+
+	NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _info"));
+	status = mod_info(&modlinkage, modinfop);
+	NXGE_DEBUG_MSG((NULL, MOD_CTL, " _info status = 0x%X", status));
+
+	return (status);
+}
+
+/*ARGSUSED*/
+static nxge_status_t
+nxge_add_intrs(p_nxge_t nxgep)
+{
+
+	int		intr_types;
+	int		type = 0;
+	int		ddi_status = DDI_SUCCESS;
+	nxge_status_t	status = NXGE_OK;
+
+	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs"));
+
+	nxgep->nxge_intr_type.intr_registered = B_FALSE;
+	nxgep->nxge_intr_type.intr_enabled = B_FALSE;
+	nxgep->nxge_intr_type.msi_intx_cnt = 0;
+	nxgep->nxge_intr_type.intr_added = 0;
+	nxgep->nxge_intr_type.niu_msi_enable = B_FALSE;
+	nxgep->nxge_intr_type.intr_type = 0;
+
+	if (nxgep->niu_type == N2_NIU) {
+		nxgep->nxge_intr_type.niu_msi_enable = B_TRUE;
+	} else if (nxge_msi_enable) {
+		nxgep->nxge_intr_type.niu_msi_enable = B_TRUE;
+	}
+
+	/* Get the supported interrupt types */
+	if ((ddi_status = ddi_intr_get_supported_types(nxgep->dip, &intr_types))
+			!= DDI_SUCCESS) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_add_intrs: "
+			"ddi_intr_get_supported_types failed: status 0x%08x",
+			ddi_status));
+		return (NXGE_ERROR | NXGE_DDI_FAILED);
+	}
+	nxgep->nxge_intr_type.intr_types = intr_types;
+
+	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: "
+		"ddi_intr_get_supported_types: 0x%08x", intr_types));
+
+	/*
+	 * Solaris MSIX is not supported yet. use MSI for now.
+	 * nxge_msi_enable (1):
+	 *	1 - MSI		2 - MSI-X	others - FIXED
+	 */
+	switch (nxge_msi_enable) {
+	default:
+		type = DDI_INTR_TYPE_FIXED;
+		NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: "
+			"use fixed (intx emulation) type %08x",
+			type));
+		break;
+
+	case 2:
+		NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: "
+			"ddi_intr_get_supported_types: 0x%08x", intr_types));
+		if (intr_types & DDI_INTR_TYPE_MSIX) {
+			type = DDI_INTR_TYPE_MSIX;
+			NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: "
+				"ddi_intr_get_supported_types: MSIX 0x%08x",
+				type));
+		} else if (intr_types & DDI_INTR_TYPE_MSI) {
+			type = DDI_INTR_TYPE_MSI;
+			NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: "
+				"ddi_intr_get_supported_types: MSI 0x%08x",
+				type));
+		} else if (intr_types & DDI_INTR_TYPE_FIXED) {
+			type = DDI_INTR_TYPE_FIXED;
+			NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: "
+				"ddi_intr_get_supported_types: MSXED0x%08x",
+				type));
+		}
+		break;
+
+	case 1:
+		if (intr_types & DDI_INTR_TYPE_MSI) {
+			type = DDI_INTR_TYPE_MSI;
+			NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: "
+				"ddi_intr_get_supported_types: MSI 0x%08x",
+				type));
+		} else if (intr_types & DDI_INTR_TYPE_MSIX) {
+			type = DDI_INTR_TYPE_MSIX;
+			NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: "
+				"ddi_intr_get_supported_types: MSIX 0x%08x",
+				type));
+		} else if (intr_types & DDI_INTR_TYPE_FIXED) {
+			type = DDI_INTR_TYPE_FIXED;
+			NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: "
+				"ddi_intr_get_supported_types: MSXED0x%08x",
+				type));
+		}
+	}
+
+	nxgep->nxge_intr_type.intr_type = type;
+	if ((type == DDI_INTR_TYPE_MSIX || type == DDI_INTR_TYPE_MSI ||
+		type == DDI_INTR_TYPE_FIXED) &&
+			nxgep->nxge_intr_type.niu_msi_enable) {
+		if ((status = nxge_add_intrs_adv(nxgep)) != DDI_SUCCESS) {
+			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+				    " nxge_add_intrs: "
+				    " nxge_add_intrs_adv failed: status 0x%08x",
+				    status));
+			return (status);
+		} else {
+			NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: "
+			"interrupts registered : type %d", type));
+			nxgep->nxge_intr_type.intr_registered = B_TRUE;
+
+			NXGE_DEBUG_MSG((nxgep, DDI_CTL,
+				"\nAdded advanced nxge add_intr_adv "
+					"intr type 0x%x\n", type));
+
+			return (status);
+		}
+	}
+
+	if (!nxgep->nxge_intr_type.intr_registered) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "==> nxge_add_intrs: "
+			"failed to register interrupts"));
+		return (NXGE_ERROR | NXGE_DDI_FAILED);
+	}
+
+	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_add_intrs"));
+	return (status);
+}
+
+/*ARGSUSED*/
+static nxge_status_t
+nxge_add_soft_intrs(p_nxge_t nxgep)
+{
+
+	int		ddi_status = DDI_SUCCESS;
+	nxge_status_t	status = NXGE_OK;
+
+	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_soft_intrs"));
+
+	nxgep->resched_id = NULL;
+	nxgep->resched_running = B_FALSE;
+	ddi_status = ddi_add_softintr(nxgep->dip, DDI_SOFTINT_LOW,
+			&nxgep->resched_id,
+		NULL, NULL, nxge_reschedule, (caddr_t)nxgep);
+	if (ddi_status != DDI_SUCCESS) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_add_soft_intrs: "
+			"ddi_add_softintrs failed: status 0x%08x",
+			ddi_status));
+		return (NXGE_ERROR | NXGE_DDI_FAILED);
+	}
+
+	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_ddi_add_soft_intrs"));
+
+	return (status);
+}
+
+static nxge_status_t
+nxge_add_intrs_adv(p_nxge_t nxgep)
+{
+	int		intr_type;
+	p_nxge_intr_t	intrp;
+
+	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs_adv"));
+
+	intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type;
+	intr_type = intrp->intr_type;
+	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs_adv: type 0x%x",
+		intr_type));
+
+	switch (intr_type) {
+	case DDI_INTR_TYPE_MSI: /* 0x2 */
+	case DDI_INTR_TYPE_MSIX: /* 0x4 */
+		return (nxge_add_intrs_adv_type(nxgep, intr_type));
+
+	case DDI_INTR_TYPE_FIXED: /* 0x1 */
+		return (nxge_add_intrs_adv_type_fix(nxgep, intr_type));
+
+	default:
+		return (NXGE_ERROR);
+	}
+}
+
+
+/*ARGSUSED*/
+static nxge_status_t
+nxge_add_intrs_adv_type(p_nxge_t nxgep, uint32_t int_type)
+{
+	dev_info_t		*dip = nxgep->dip;
+	p_nxge_ldg_t		ldgp;
+	p_nxge_intr_t		intrp;
+	uint_t			*inthandler;
+	void			*arg1, *arg2;
+	int			behavior;
+	int			nintrs, navail;
+	int			nactual, nrequired;
+	int			inum = 0;
+	int			x, y;
+	int			ddi_status = DDI_SUCCESS;
+	nxge_status_t		status = NXGE_OK;
+
+	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs_adv_type"));
+	intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type;
+	intrp->start_inum = 0;
+
+	ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs);
+	if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"ddi_intr_get_nintrs() failed, status: 0x%x%, "
+			    "nintrs: %d", ddi_status, nintrs));
+		return (NXGE_ERROR | NXGE_DDI_FAILED);
+	}
+
+	ddi_status = ddi_intr_get_navail(dip, int_type, &navail);
+	if ((ddi_status != DDI_SUCCESS) || (navail == 0)) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"ddi_intr_get_navail() failed, status: 0x%x%, "
+			    "nintrs: %d", ddi_status, navail));
+		return (NXGE_ERROR | NXGE_DDI_FAILED);
+	}
+
+	NXGE_DEBUG_MSG((nxgep, INT_CTL,
+		"ddi_intr_get_navail() returned: nintrs %d, navail %d",
+		    nintrs, navail));
+
+	if (int_type == DDI_INTR_TYPE_MSI && !ISP2(navail)) {
+		/* MSI must be power of 2 */
+		if ((navail & 16) == 16) {
+			navail = 16;
+		} else if ((navail & 8) == 8) {
+			navail = 8;
+		} else if ((navail & 4) == 4) {
+			navail = 4;
+		} else if ((navail & 2) == 2) {
+			navail = 2;
+		} else {
+			navail = 1;
+		}
+		NXGE_DEBUG_MSG((nxgep, INT_CTL,
+			"ddi_intr_get_navail(): (msi power of 2) nintrs %d, "
+			"navail %d", nintrs, navail));
+	}
+
+	behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT :
+			DDI_INTR_ALLOC_NORMAL);
+	intrp->intr_size = navail * sizeof (ddi_intr_handle_t);
+	intrp->htable = kmem_alloc(intrp->intr_size, KM_SLEEP);
+	ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum,
+		    navail, &nactual, behavior);
+	if (ddi_status != DDI_SUCCESS || nactual == 0) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+				    " ddi_intr_alloc() failed: %d",
+				    ddi_status));
+		kmem_free(intrp->htable, intrp->intr_size);
+		return (NXGE_ERROR | NXGE_DDI_FAILED);
+	}
+
+	if ((ddi_status = ddi_intr_get_pri(intrp->htable[0],
+			(uint_t *)&intrp->pri)) != DDI_SUCCESS) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+				    " ddi_intr_get_pri() failed: %d",
+				    ddi_status));
+		/* Free already allocated interrupts */
+		for (y = 0; y < nactual; y++) {
+			(void) ddi_intr_free(intrp->htable[y]);
+		}
+
+		kmem_free(intrp->htable, intrp->intr_size);
+		return (NXGE_ERROR | NXGE_DDI_FAILED);
+	}
+
+	nrequired = 0;
+	switch (nxgep->niu_type) {
+	case NEPTUNE:
+	case NEPTUNE_2:
+	default:
+		status = nxge_ldgv_init(nxgep, &nactual, &nrequired);
+		break;
+
+	case N2_NIU:
+		status = nxge_ldgv_init_n2(nxgep, &nactual, &nrequired);
+		break;
+	}
+
+	if (status != NXGE_OK) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"nxge_add_intrs_adv_typ:nxge_ldgv_init "
+			"failed: 0x%x", status));
+		/* Free already allocated interrupts */
+		for (y = 0; y < nactual; y++) {
+			(void) ddi_intr_free(intrp->htable[y]);
+		}
+
+		kmem_free(intrp->htable, intrp->intr_size);
+		return (status);
+	}
+
+	ldgp = nxgep->ldgvp->ldgp;
+	for (x = 0; x < nrequired; x++, ldgp++) {
+		ldgp->vector = (uint8_t)x;
+		ldgp->intdata = SID_DATA(ldgp->func, x);
+		arg1 = ldgp->ldvp;
+		arg2 = nxgep;
+		if (ldgp->nldvs == 1) {
+			inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler;
+			NXGE_DEBUG_MSG((nxgep, INT_CTL,
+				"nxge_add_intrs_adv_type: "
+				"arg1 0x%x arg2 0x%x: "
+				"1-1 int handler (entry %d intdata 0x%x)\n",
+				arg1, arg2,
+				x, ldgp->intdata));
+		} else if (ldgp->nldvs > 1) {
+			inthandler = (uint_t *)ldgp->sys_intr_handler;
+			NXGE_DEBUG_MSG((nxgep, INT_CTL,
+				"nxge_add_intrs_adv_type: "
+				"arg1 0x%x arg2 0x%x: "
+				"nldevs %d int handler "
+				"(entry %d intdata 0x%x)\n",
+				arg1, arg2,
+				ldgp->nldvs, x, ldgp->intdata));
+		}
+
+		NXGE_DEBUG_MSG((nxgep, INT_CTL,
+			"==> nxge_add_intrs_adv_type: ddi_add_intr(inum) #%d "
+			"htable 0x%llx", x, intrp->htable[x]));
+
+		if ((ddi_status = ddi_intr_add_handler(intrp->htable[x],
+			(ddi_intr_handler_t *)inthandler, arg1, arg2))
+				!= DDI_SUCCESS) {
+			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+				"==> nxge_add_intrs_adv_type: failed #%d "
+				"status 0x%x", x, ddi_status));
+			for (y = 0; y < intrp->intr_added; y++) {
+				(void) ddi_intr_remove_handler(
+						intrp->htable[y]);
+			}
+			/* Free already allocated intr */
+			for (y = 0; y < nactual; y++) {
+				(void) ddi_intr_free(intrp->htable[y]);
+			}
+			kmem_free(intrp->htable, intrp->intr_size);
+
+			(void) nxge_ldgv_uninit(nxgep);
+
+			return (NXGE_ERROR | NXGE_DDI_FAILED);
+		}
+		intrp->intr_added++;
+	}
+
+	intrp->msi_intx_cnt = nactual;
+
+	NXGE_DEBUG_MSG((nxgep, DDI_CTL,
+		"Requested: %d, Allowed: %d msi_intx_cnt %d intr_added %d",
+		navail, nactual,
+		intrp->msi_intx_cnt,
+		intrp->intr_added));
+
+	(void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap);
+
+	(void) nxge_intr_ldgv_init(nxgep);
+
+	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_add_intrs_adv_type"));
+
+	return (status);
+}
+
+/*ARGSUSED*/
+static nxge_status_t
+nxge_add_intrs_adv_type_fix(p_nxge_t nxgep, uint32_t int_type)
+{
+	dev_info_t		*dip = nxgep->dip;
+	p_nxge_ldg_t		ldgp;
+	p_nxge_intr_t		intrp;
+	uint_t			*inthandler;
+	void			*arg1, *arg2;
+	int			behavior;
+	int			nintrs, navail;
+	int			nactual, nrequired;
+	int			inum = 0;
+	int			x, y;
+	int			ddi_status = DDI_SUCCESS;
+	nxge_status_t		status = NXGE_OK;
+
+	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs_adv_type_fix"));
+	intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type;
+	intrp->start_inum = 0;
+
+	ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs);
+	if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) {
+		NXGE_DEBUG_MSG((nxgep, INT_CTL,
+			"ddi_intr_get_nintrs() failed, status: 0x%x%, "
+			    "nintrs: %d", status, nintrs));
+		return (NXGE_ERROR | NXGE_DDI_FAILED);
+	}
+
+	ddi_status = ddi_intr_get_navail(dip, int_type, &navail);
+	if ((ddi_status != DDI_SUCCESS) || (navail == 0)) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"ddi_intr_get_navail() failed, status: 0x%x%, "
+			    "nintrs: %d", ddi_status, navail));
+		return (NXGE_ERROR | NXGE_DDI_FAILED);
+	}
+
+	NXGE_DEBUG_MSG((nxgep, INT_CTL,
+		"ddi_intr_get_navail() returned: nintrs %d, naavail %d",
+		    nintrs, navail));
+
+	behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT :
+			DDI_INTR_ALLOC_NORMAL);
+	intrp->intr_size = navail * sizeof (ddi_intr_handle_t);
+	intrp->htable = kmem_alloc(intrp->intr_size, KM_SLEEP);
+	ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum,
+		    navail, &nactual, behavior);
+	if (ddi_status != DDI_SUCCESS || nactual == 0) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			    " ddi_intr_alloc() failed: %d",
+			    ddi_status));
+		kmem_free(intrp->htable, intrp->intr_size);
+		return (NXGE_ERROR | NXGE_DDI_FAILED);
+	}
+
+	if ((ddi_status = ddi_intr_get_pri(intrp->htable[0],
+			(uint_t *)&intrp->pri)) != DDI_SUCCESS) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+				    " ddi_intr_get_pri() failed: %d",
+				    ddi_status));
+		/* Free already allocated interrupts */
+		for (y = 0; y < nactual; y++) {
+			(void) ddi_intr_free(intrp->htable[y]);
+		}
+
+		kmem_free(intrp->htable, intrp->intr_size);
+		return (NXGE_ERROR | NXGE_DDI_FAILED);
+	}
+
+	nrequired = 0;
+	switch (nxgep->niu_type) {
+	case NEPTUNE:
+	case NEPTUNE_2:
+	default:
+		status = nxge_ldgv_init(nxgep, &nactual, &nrequired);
+		break;
+
+	case N2_NIU:
+		status = nxge_ldgv_init_n2(nxgep, &nactual, &nrequired);
+		break;
+	}
+
+	if (status != NXGE_OK) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"nxge_add_intrs_adv_type_fix:nxge_ldgv_init "
+			"failed: 0x%x", status));
+		/* Free already allocated interrupts */
+		for (y = 0; y < nactual; y++) {
+			(void) ddi_intr_free(intrp->htable[y]);
+		}
+
+		kmem_free(intrp->htable, intrp->intr_size);
+		return (status);
+	}
+
+	ldgp = nxgep->ldgvp->ldgp;
+	for (x = 0; x < nrequired; x++, ldgp++) {
+		ldgp->vector = (uint8_t)x;
+		if (nxgep->niu_type != N2_NIU) {
+			ldgp->intdata = SID_DATA(ldgp->func, x);
+		}
+
+		arg1 = ldgp->ldvp;
+		arg2 = nxgep;
+		if (ldgp->nldvs == 1) {
+			inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler;
+			NXGE_DEBUG_MSG((nxgep, INT_CTL,
+				"nxge_add_intrs_adv_type_fix: "
+				"1-1 int handler(%d) ldg %d ldv %d "
+				"arg1 $%p arg2 $%p\n",
+				x, ldgp->ldg, ldgp->ldvp->ldv,
+				arg1, arg2));
+		} else if (ldgp->nldvs > 1) {
+			inthandler = (uint_t *)ldgp->sys_intr_handler;
+			NXGE_DEBUG_MSG((nxgep, INT_CTL,
+				"nxge_add_intrs_adv_type_fix: "
+				"shared ldv %d int handler(%d) ldv %d ldg %d"
+				"arg1 0x%016llx arg2 0x%016llx\n",
+				x, ldgp->nldvs, ldgp->ldg, ldgp->ldvp->ldv,
+				arg1, arg2));
+		}
+
+		if ((ddi_status = ddi_intr_add_handler(intrp->htable[x],
+			(ddi_intr_handler_t *)inthandler, arg1, arg2))
+				!= DDI_SUCCESS) {
+			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+				"==> nxge_add_intrs_adv_type_fix: failed #%d "
+				"status 0x%x", x, ddi_status));
+			for (y = 0; y < intrp->intr_added; y++) {
+				(void) ddi_intr_remove_handler(
+						intrp->htable[y]);
+			}
+			for (y = 0; y < nactual; y++) {
+				(void) ddi_intr_free(intrp->htable[y]);
+			}
+			/* Free already allocated intr */
+			kmem_free(intrp->htable, intrp->intr_size);
+
+			(void) nxge_ldgv_uninit(nxgep);
+
+			return (NXGE_ERROR | NXGE_DDI_FAILED);
+		}
+		intrp->intr_added++;
+	}
+
+	intrp->msi_intx_cnt = nactual;
+
+	(void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap);
+
+	status = nxge_intr_ldgv_init(nxgep);
+	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_add_intrs_adv_type_fix"));
+
+	return (status);
+}
+
+static void
+nxge_remove_intrs(p_nxge_t nxgep)
+{
+	int		i, inum;
+	p_nxge_intr_t	intrp;
+
+	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_remove_intrs"));
+	intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type;
+	if (!intrp->intr_registered) {
+		NXGE_DEBUG_MSG((nxgep, INT_CTL,
+			"<== nxge_remove_intrs: interrupts not registered"));
+		return;
+	}
+
+	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_remove_intrs:advanced"));
+
+	if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) {
+		(void) ddi_intr_block_disable(intrp->htable,
+			intrp->intr_added);
+	} else {
+		for (i = 0; i < intrp->intr_added; i++) {
+			(void) ddi_intr_disable(intrp->htable[i]);
+		}
+	}
+
+	for (inum = 0; inum < intrp->intr_added; inum++) {
+		if (intrp->htable[inum]) {
+			(void) ddi_intr_remove_handler(intrp->htable[inum]);
+		}
+	}
+
+	for (inum = 0; inum < intrp->msi_intx_cnt; inum++) {
+		if (intrp->htable[inum]) {
+			NXGE_DEBUG_MSG((nxgep, DDI_CTL,
+				"nxge_remove_intrs: ddi_intr_free inum %d "
+				"msi_intx_cnt %d intr_added %d",
+				inum,
+				intrp->msi_intx_cnt,
+				intrp->intr_added));
+
+			(void) ddi_intr_free(intrp->htable[inum]);
+		}
+	}
+
+	kmem_free(intrp->htable, intrp->intr_size);
+	intrp->intr_registered = B_FALSE;
+	intrp->intr_enabled = B_FALSE;
+	intrp->msi_intx_cnt = 0;
+	intrp->intr_added = 0;
+
+	(void) nxge_ldgv_uninit(nxgep);
+
+	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_remove_intrs"));
+}
+
+/*ARGSUSED*/
+static void
+nxge_remove_soft_intrs(p_nxge_t nxgep)
+{
+	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_remove_soft_intrs"));
+	if (nxgep->resched_id) {
+		ddi_remove_softintr(nxgep->resched_id);
+		NXGE_DEBUG_MSG((nxgep, INT_CTL,
+			"==> nxge_remove_soft_intrs: removed"));
+		nxgep->resched_id = NULL;
+	}
+
+	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_remove_soft_intrs"));
+}
+
+/*ARGSUSED*/
+static void
+nxge_intrs_enable(p_nxge_t nxgep)
+{
+	p_nxge_intr_t	intrp;
+	int		i;
+	int		status;
+
+	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable"));
+
+	intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type;
+
+	if (!intrp->intr_registered) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_intrs_enable: "
+			"interrupts are not registered"));
+		return;
+	}
+
+	if (intrp->intr_enabled) {
+		NXGE_DEBUG_MSG((nxgep, INT_CTL,
+			"<== nxge_intrs_enable: already enabled"));
+		return;
+	}
+
+	if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) {
+		status = ddi_intr_block_enable(intrp->htable,
+			intrp->intr_added);
+		NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable "
+			"block enable - status 0x%x total inums #%d\n",
+			status, intrp->intr_added));
+	} else {
+		for (i = 0; i < intrp->intr_added; i++) {
+			status = ddi_intr_enable(intrp->htable[i]);
+			NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable "
+				"ddi_intr_enable:enable - status 0x%x "
+				"total inums %d enable inum #%d\n",
+				status, intrp->intr_added, i));
+			if (status == DDI_SUCCESS) {
+				intrp->intr_enabled = B_TRUE;
+			}
+		}
+	}
+
+	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_enable"));
+}
+
+/*ARGSUSED*/
+static void
+nxge_intrs_disable(p_nxge_t nxgep)
+{
+	p_nxge_intr_t	intrp;
+	int		i;
+
+	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_disable"));
+
+	intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type;
+
+	if (!intrp->intr_registered) {
+		NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_disable: "
+			"interrupts are not registered"));
+		return;
+	}
+
+	if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) {
+		(void) ddi_intr_block_disable(intrp->htable,
+			intrp->intr_added);
+	} else {
+		for (i = 0; i < intrp->intr_added; i++) {
+			(void) ddi_intr_disable(intrp->htable[i]);
+		}
+	}
+
+	intrp->intr_enabled = B_FALSE;
+	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_disable"));
+}
+
+static nxge_status_t
+nxge_mac_register(p_nxge_t nxgep)
+{
+	mac_register_t *macp;
+	int		status;
+
+	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_mac_register"));
+
+	if ((macp = mac_alloc(MAC_VERSION)) == NULL)
+		return (NXGE_ERROR);
+
+	macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
+	macp->m_driver = nxgep;
+	macp->m_dip = nxgep->dip;
+	macp->m_src_addr = nxgep->ouraddr.ether_addr_octet;
+	macp->m_callbacks = &nxge_m_callbacks;
+	macp->m_min_sdu = 0;
+	macp->m_max_sdu = nxgep->mac.maxframesize -
+		sizeof (struct ether_header) - ETHERFCSL - 4;
+
+	status = mac_register(macp, &nxgep->mach);
+	mac_free(macp);
+
+	if (status != 0) {
+		cmn_err(CE_WARN,
+			"!nxge_mac_register failed (status %d instance %d)",
+			status, nxgep->instance);
+		return (NXGE_ERROR);
+	}
+
+	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_mac_register success "
+		"(instance %d)", nxgep->instance));
+
+	return (NXGE_OK);
+}
+
+void
+nxge_err_inject(p_nxge_t nxgep, queue_t *wq, mblk_t *mp)
+{
+	ssize_t		size;
+	mblk_t		*nmp;
+	uint8_t		blk_id;
+	uint8_t		chan;
+	uint32_t	err_id;
+	err_inject_t	*eip;
+
+	NXGE_DEBUG_MSG((nxgep, STR_CTL, "==> nxge_err_inject"));
+
+	size = 1024;
+	nmp = mp->b_cont;
+	eip = (err_inject_t *)nmp->b_rptr;
+	blk_id = eip->blk_id;
+	err_id = eip->err_id;
+	chan = eip->chan;
+	cmn_err(CE_NOTE, "!blk_id = 0x%x\n", blk_id);
+	cmn_err(CE_NOTE, "!err_id = 0x%x\n", err_id);
+	cmn_err(CE_NOTE, "!chan = 0x%x\n", chan);
+	switch (blk_id) {
+	case MAC_BLK_ID:
+		break;
+	case TXMAC_BLK_ID:
+		break;
+	case RXMAC_BLK_ID:
+		break;
+	case MIF_BLK_ID:
+		break;
+	case IPP_BLK_ID:
+		nxge_ipp_inject_err(nxgep, err_id);
+		break;
+	case TXC_BLK_ID:
+		nxge_txc_inject_err(nxgep, err_id);
+		break;
+	case TXDMA_BLK_ID:
+		nxge_txdma_inject_err(nxgep, err_id, chan);
+		break;
+	case RXDMA_BLK_ID:
+		nxge_rxdma_inject_err(nxgep, err_id, chan);
+		break;
+	case ZCP_BLK_ID:
+		nxge_zcp_inject_err(nxgep, err_id);
+		break;
+	case ESPC_BLK_ID:
+		break;
+	case FFLP_BLK_ID:
+		break;
+	case PHY_BLK_ID:
+		break;
+	case ETHER_SERDES_BLK_ID:
+		break;
+	case PCIE_SERDES_BLK_ID:
+		break;
+	case VIR_BLK_ID:
+		break;
+	}
+
+	nmp->b_wptr = nmp->b_rptr + size;
+	NXGE_DEBUG_MSG((nxgep, STR_CTL, "<== nxge_err_inject"));
+
+	miocack(wq, mp, (int)size, 0);
+}
+
+static int
+nxge_init_common_dev(p_nxge_t nxgep)
+{
+	p_nxge_hw_list_t	hw_p;
+	dev_info_t 		*p_dip;
+
+	NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==> nxge_init_common_device"));
+
+	p_dip = nxgep->p_dip;
+	MUTEX_ENTER(&nxge_common_lock);
+	NXGE_DEBUG_MSG((nxgep, MOD_CTL,
+		"==> nxge_init_common_dev:func # %d",
+			nxgep->function_num));
+	/*
+	 * Loop through existing per neptune hardware list.
+	 */
+	for (hw_p = nxge_hw_list; hw_p; hw_p = hw_p->next) {
+		NXGE_DEBUG_MSG((nxgep, MOD_CTL,
+			"==> nxge_init_common_device:func # %d "
+			"hw_p $%p parent dip $%p",
+			nxgep->function_num,
+			hw_p,
+			p_dip));
+		if (hw_p->parent_devp == p_dip) {
+			nxgep->nxge_hw_p = hw_p;
+			hw_p->ndevs++;
+			hw_p->nxge_p[nxgep->function_num] = nxgep;
+			NXGE_DEBUG_MSG((nxgep, MOD_CTL,
+				"==> nxge_init_common_device:func # %d "
+				"hw_p $%p parent dip $%p "
+				"ndevs %d (found)",
+				nxgep->function_num,
+				hw_p,
+				p_dip,
+				hw_p->ndevs));
+			break;
+		}
+	}
+
+	if (hw_p == NULL) {
+		NXGE_DEBUG_MSG((nxgep, MOD_CTL,
+			"==> nxge_init_common_device:func # %d "
+			"parent dip $%p (new)",
+			nxgep->function_num,
+			p_dip));
+		hw_p = kmem_zalloc(sizeof (nxge_hw_list_t), KM_SLEEP);
+		hw_p->parent_devp = p_dip;
+		hw_p->magic = NXGE_NEPTUNE_MAGIC;
+		nxgep->nxge_hw_p = hw_p;
+		hw_p->ndevs++;
+		hw_p->nxge_p[nxgep->function_num] = nxgep;
+		hw_p->next = nxge_hw_list;
+
+		MUTEX_INIT(&hw_p->nxge_cfg_lock, NULL, MUTEX_DRIVER, NULL);
+		MUTEX_INIT(&hw_p->nxge_tcam_lock, NULL, MUTEX_DRIVER, NULL);
+		MUTEX_INIT(&hw_p->nxge_vlan_lock, NULL, MUTEX_DRIVER, NULL);
+		MUTEX_INIT(&hw_p->nxge_mdio_lock, NULL, MUTEX_DRIVER, NULL);
+		MUTEX_INIT(&hw_p->nxge_mii_lock, NULL, MUTEX_DRIVER, NULL);
+
+		nxge_hw_list = hw_p;
+	}
+
+	MUTEX_EXIT(&nxge_common_lock);
+	NXGE_DEBUG_MSG((nxgep, MOD_CTL,
+		"==> nxge_init_common_device (nxge_hw_list) $%p",
+		nxge_hw_list));
+	NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<== nxge_init_common_device"));
+
+	return (NXGE_OK);
+}
+
+static void
+nxge_uninit_common_dev(p_nxge_t nxgep)
+{
+	p_nxge_hw_list_t	hw_p, h_hw_p;
+	dev_info_t 		*p_dip;
+
+	NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==> nxge_uninit_common_device"));
+	if (nxgep->nxge_hw_p == NULL) {
+		NXGE_DEBUG_MSG((nxgep, MOD_CTL,
+			"<== nxge_uninit_common_device (no common)"));
+		return;
+	}
+
+	MUTEX_ENTER(&nxge_common_lock);
+	h_hw_p = nxge_hw_list;
+	for (hw_p = nxge_hw_list; hw_p; hw_p = hw_p->next) {
+		p_dip = hw_p->parent_devp;
+		if (nxgep->nxge_hw_p == hw_p &&
+			p_dip == nxgep->p_dip &&
+			nxgep->nxge_hw_p->magic == NXGE_NEPTUNE_MAGIC &&
+			hw_p->magic == NXGE_NEPTUNE_MAGIC) {
+
+			NXGE_DEBUG_MSG((nxgep, MOD_CTL,
+				"==> nxge_uninit_common_device:func # %d "
+				"hw_p $%p parent dip $%p "
+				"ndevs %d (found)",
+				nxgep->function_num,
+				hw_p,
+				p_dip,
+				hw_p->ndevs));
+
+			nxgep->nxge_hw_p = NULL;
+			if (hw_p->ndevs) {
+				hw_p->ndevs--;
+			}
+			hw_p->nxge_p[nxgep->function_num] = NULL;
+			if (!hw_p->ndevs) {
+				MUTEX_DESTROY(&hw_p->nxge_vlan_lock);
+				MUTEX_DESTROY(&hw_p->nxge_tcam_lock);
+				MUTEX_DESTROY(&hw_p->nxge_cfg_lock);
+				MUTEX_DESTROY(&hw_p->nxge_mdio_lock);
+				MUTEX_DESTROY(&hw_p->nxge_mii_lock);
+				NXGE_DEBUG_MSG((nxgep, MOD_CTL,
+					"==> nxge_uninit_common_device: "
+					"func # %d "
+					"hw_p $%p parent dip $%p "
+					"ndevs %d (last)",
+					nxgep->function_num,
+					hw_p,
+					p_dip,
+					hw_p->ndevs));
+
+				if (hw_p == nxge_hw_list) {
+					NXGE_DEBUG_MSG((nxgep, MOD_CTL,
+						"==> nxge_uninit_common_device:"
+						"remove head func # %d "
+						"hw_p $%p parent dip $%p "
+						"ndevs %d (head)",
+						nxgep->function_num,
+						hw_p,
+						p_dip,
+						hw_p->ndevs));
+					nxge_hw_list = hw_p->next;
+				} else {
+					NXGE_DEBUG_MSG((nxgep, MOD_CTL,
+						"==> nxge_uninit_common_device:"
+						"remove middle func # %d "
+						"hw_p $%p parent dip $%p "
+						"ndevs %d (middle)",
+						nxgep->function_num,
+						hw_p,
+						p_dip,
+						hw_p->ndevs));
+					h_hw_p->next = hw_p->next;
+				}
+
+				KMEM_FREE(hw_p, sizeof (nxge_hw_list_t));
+			}
+			break;
+		} else {
+			h_hw_p = hw_p;
+		}
+	}
+
+	MUTEX_EXIT(&nxge_common_lock);
+	NXGE_DEBUG_MSG((nxgep, MOD_CTL,
+		"==> nxge_uninit_common_device (nxge_hw_list) $%p",
+		nxge_hw_list));
+
+	NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<= nxge_uninit_common_device"));
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/uts/common/io/nxge/nxge_ndd.c	Mon Mar 19 19:37:22 2007 -0700
@@ -0,0 +1,2547 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident	"%Z%%M%	%I%	%E% SMI"
+
+#include <sys/nxge/nxge_impl.h>
+#include <inet/common.h>
+#include <inet/mi.h>
+#include <inet/nd.h>
+
+extern uint64_t npi_debug_level;
+
+#define	NXGE_PARAM_MAC_RW \
+	NXGE_PARAM_RW | NXGE_PARAM_MAC | \
+	NXGE_PARAM_NDD_WR_OK | NXGE_PARAM_READ_PROP
+
+#define	NXGE_PARAM_MAC_DONT_SHOW \
+	NXGE_PARAM_RW | NXGE_PARAM_MAC | NXGE_PARAM_DONT_SHOW
+
+#define	NXGE_PARAM_RXDMA_RW \
+	NXGE_PARAM_RWP | NXGE_PARAM_RXDMA | NXGE_PARAM_NDD_WR_OK | \
+	NXGE_PARAM_READ_PROP
+
+#define	NXGE_PARAM_RXDMA_RWC \
+	NXGE_PARAM_RWP | NXGE_PARAM_RXDMA | NXGE_PARAM_INIT_ONLY | \
+	NXGE_PARAM_READ_PROP
+
+#define	NXGE_PARAM_L2CLASS_CFG \
+	NXGE_PARAM_RW | NXGE_PARAM_PROP_ARR32 | NXGE_PARAM_READ_PROP | \
+	NXGE_PARAM_NDD_WR_OK
+
+#define	NXGE_PARAM_CLASS_RWS \
+	NXGE_PARAM_RWS |  NXGE_PARAM_READ_PROP
+
+#define	NXGE_PARAM_ARRAY_INIT_SIZE	0x20ULL
+
+#define	SET_RX_INTR_TIME_DISABLE 0
+#define	SET_RX_INTR_TIME_ENABLE 1
+#define	SET_RX_INTR_PKTS 2
+
+#define	BASE_ANY	0
+#define	BASE_BINARY 	2
+#define	BASE_HEX	16
+#define	BASE_DECIMAL	10
+#define	ALL_FF_64	0xFFFFFFFFFFFFFFFFULL
+#define	ALL_FF_32	0xFFFFFFFFUL
+
+#define	NXGE_NDD_INFODUMP_BUFF_SIZE	2048 /* is 2k enough? */
+#define	NXGE_NDD_INFODUMP_BUFF_8K	8192
+#define	NXGE_NDD_INFODUMP_BUFF_16K	0x2000
+#define	NXGE_NDD_INFODUMP_BUFF_64K	0x8000
+
+#define	PARAM_OUTOF_RANGE(vptr, eptr, rval, pa)	\
+	((vptr == eptr) || (rval < pa->minimum) || (rval > pa->maximum))
+
+#define	ADVANCE_PRINT_BUFFER(pmp, plen, rlen) { \
+	((mblk_t *)pmp)->b_wptr += plen; \
+	rlen -= plen; \
+}
+
+static int nxge_param_rx_intr_pkts(p_nxge_t, queue_t *,
+	mblk_t *, char *, caddr_t);
+static int nxge_param_rx_intr_time(p_nxge_t, queue_t *,
+	mblk_t *, char *, caddr_t);
+static int nxge_param_set_mac(p_nxge_t, queue_t *,
+	mblk_t *, char *, caddr_t);
+static int nxge_param_set_port_rdc(p_nxge_t, queue_t *,
+	mblk_t *, char *, caddr_t);
+static int nxge_param_set_grp_rdc(p_nxge_t, queue_t *,
+	mblk_t *, char *, caddr_t);
+static int nxge_param_set_ether_usr(p_nxge_t,
+	queue_t *, mblk_t *, char *, caddr_t);
+static int nxge_param_set_ip_usr(p_nxge_t,
+	queue_t *, mblk_t *, char *, caddr_t);
+static int nxge_param_set_ip_opt(p_nxge_t,
+	queue_t *, mblk_t *, char *, caddr_t);
+static int nxge_param_set_vlan_rdcgrp(p_nxge_t,
+	queue_t *, mblk_t *, char *, caddr_t);
+static int nxge_param_set_mac_rdcgrp(p_nxge_t,
+	queue_t *, mblk_t *, char *, caddr_t);
+static int nxge_param_fflp_hash_init(p_nxge_t,
+	queue_t *, mblk_t *, char *, caddr_t);
+static int nxge_param_llc_snap_enable(p_nxge_t, queue_t *,
+	mblk_t *, char *, caddr_t);
+static int nxge_param_hash_lookup_enable(p_nxge_t, queue_t *,
+	mblk_t *, char *, caddr_t);
+static int nxge_param_tcam_enable(p_nxge_t, queue_t *,
+	mblk_t *, char *, caddr_t);
+static int nxge_param_get_rxdma_info(p_nxge_t, queue_t *q,
+	p_mblk_t, caddr_t);
+static int nxge_param_get_txdma_info(p_nxge_t, queue_t *q,
+	p_mblk_t, caddr_t);
+static int nxge_param_get_vlan_rdcgrp(p_nxge_t, queue_t *,
+	p_mblk_t, caddr_t);
+static int nxge_param_get_mac_rdcgrp(p_nxge_t, queue_t *,
+	p_mblk_t, caddr_t);
+static int nxge_param_get_rxdma_rdcgrp_info(p_nxge_t, queue_t *,
+	p_mblk_t, caddr_t);
+static int nxge_param_get_ip_opt(p_nxge_t, queue_t *, mblk_t *, caddr_t);
+static int nxge_param_get_mac(p_nxge_t, queue_t *q, p_mblk_t, caddr_t);
+static int nxge_param_get_debug_flag(p_nxge_t, queue_t *, p_mblk_t, caddr_t);
+static int nxge_param_set_nxge_debug_flag(p_nxge_t, queue_t *, mblk_t *,
+	char *, caddr_t);
+static int nxge_param_set_npi_debug_flag(p_nxge_t,
+	queue_t *, mblk_t *, char *, caddr_t);
+static int nxge_param_dump_rdc(p_nxge_t, queue_t *q, p_mblk_t, caddr_t);
+static int nxge_param_dump_tdc(p_nxge_t, queue_t *q, p_mblk_t, caddr_t);
+static int nxge_param_dump_mac_regs(p_nxge_t, queue_t *, p_mblk_t, caddr_t);
+static int nxge_param_dump_ipp_regs(p_nxge_t, queue_t *, p_mblk_t, caddr_t);
+static int nxge_param_dump_fflp_regs(p_nxge_t, queue_t *, p_mblk_t, caddr_t);
+static int nxge_param_dump_vlan_table(p_nxge_t, queue_t *, p_mblk_t, caddr_t);
+static int nxge_param_dump_rdc_table(p_nxge_t, queue_t *, p_mblk_t, caddr_t);
+static int nxge_param_dump_ptrs(p_nxge_t, queue_t *, p_mblk_t, caddr_t);
+static boolean_t nxge_param_link_update(p_nxge_t);
+
+/*
+ * Global array of Neptune changable parameters.
+ * This array is initialized to correspond to the default
+ * Neptune 4 port configuration. This array would be copied
+ * into each port's parameter structure and modifed per
+ * fcode and nxge.conf configuration. Later, the parameters are
+ * exported to ndd to display and run-time configuration (at least
+ * some of them).
+ *
+ */
+
+static nxge_param_t	nxge_param_arr[] = {
+	/*
+	 * min	max	value	old	hw-name	conf-name
+	 */
+	{ nxge_param_get_generic, NULL, NXGE_PARAM_READ,
+		0, 999, 1000, 0, "instance", "instance"},
+
+	{ nxge_param_get_generic, NULL, NXGE_PARAM_READ,
+		0, 999, 1000, 0, "main-instance", "main_instance"},
+
+	{ nxge_param_get_generic, NULL, NXGE_PARAM_READ,
+		0, 3, 0, 0, "function-number", "function_number"},
+
+	/* Partition Id */
+	{ nxge_param_get_generic, NULL, NXGE_PARAM_READ,
+		0, 8, 0, 0, "partition-id", "partition_id"},
+
+	/* Read Write Permission Mode */
+	{ nxge_param_get_generic, NULL, NXGE_PARAM_READ | NXGE_PARAM_DONT_SHOW,
+		0, 2, 0, 0, "read-write-mode", "read_write_mode"},
+
+	/* hw cfg types */
+	/* control the DMA config of Neptune/NIU */
+	{ nxge_param_get_generic, NULL, NXGE_PARAM_READ,
+		CFG_DEFAULT, CFG_CUSTOM, CFG_DEFAULT, CFG_DEFAULT,
+		"niu-cfg-type", "niu_cfg_type"},
+
+	/* control the TXDMA config of the Port controlled by tx-quick-cfg */
+	{ nxge_param_get_generic, NULL, NXGE_PARAM_READ,
+		CFG_DEFAULT, CFG_CUSTOM, CFG_NOT_SPECIFIED, CFG_DEFAULT,
+		"tx-qcfg-type", "tx_qcfg_type"},
+
+	/* control the RXDMA config of the Port controlled by rx-quick-cfg */
+	{ nxge_param_get_generic, NULL, NXGE_PARAM_READ,
+		CFG_DEFAULT, CFG_CUSTOM, CFG_NOT_SPECIFIED, CFG_DEFAULT,
+		"rx-qcfg-type", "rx_qcfg_type"},
+
+	{ nxge_param_get_mac, nxge_param_set_mac,
+		NXGE_PARAM_RW  | NXGE_PARAM_DONT_SHOW,
+		0, 1, 0, 0, "master-cfg-enable", "master_cfg_enable"},
+
+	{ nxge_param_get_mac, nxge_param_set_mac,
+		NXGE_PARAM_RW | NXGE_PARAM_DONT_SHOW,
+		0, 1, 0, 0, "master-cfg-value", "master_cfg_value"},
+
+	{ nxge_param_get_mac, nxge_param_set_mac, NXGE_PARAM_MAC_RW,
+		0, 1, 1, 1, "adv-autoneg-cap", "adv_autoneg_cap"},
+
+	{ nxge_param_get_mac, nxge_param_set_mac, NXGE_PARAM_MAC_RW,
+		0, 1, 1, 1, "adv-10gfdx-cap", "adv_10gfdx_cap"},
+
+	{ nxge_param_get_mac, nxge_param_set_mac, NXGE_PARAM_MAC_DONT_SHOW,
+		0, 1, 0, 0, "adv-10ghdx-cap", "adv_10ghdx_cap"},
+
+	{ nxge_param_get_mac, nxge_param_set_mac, NXGE_PARAM_MAC_RW,
+		0, 1, 1, 1, "adv-1000fdx-cap", "adv_1000fdx_cap"},
+
+	{ nxge_param_get_mac, nxge_param_set_mac, NXGE_PARAM_MAC_DONT_SHOW,
+		0, 1, 0, 0, "adv-1000hdx-cap",	"adv_1000hdx_cap"},
+
+	{ nxge_param_get_mac, nxge_param_set_mac, NXGE_PARAM_MAC_DONT_SHOW,
+		0, 1, 0, 0, "adv-100T4-cap", "adv_100T4_cap"},
+
+	{ nxge_param_get_mac, nxge_param_set_mac, NXGE_PARAM_MAC_RW,
+		0, 1, 1, 1, "adv-100fdx-cap", "adv_100fdx_cap"},
+
+	{ nxge_param_get_mac, nxge_param_set_mac, NXGE_PARAM_MAC_DONT_SHOW,
+		0, 1, 0, 0, "adv-100hdx-cap", "adv_100hdx_cap"},
+
+	{ nxge_param_get_mac, nxge_param_set_mac, NXGE_PARAM_MAC_RW,
+		0, 1, 1, 1, "adv-10fdx-cap", "adv_10fdx_cap"},
+
+	{ nxge_param_get_mac, nxge_param_set_mac, NXGE_PARAM_MAC_DONT_SHOW,
+		0, 1, 0, 0, "adv-10hdx-cap", "adv_10hdx_cap"},
+
+	{ nxge_param_get_mac, nxge_param_set_mac, NXGE_PARAM_MAC_RW,
+		0, 1, 0, 0, "adv-asmpause-cap",	"adv_asmpause_cap"},
+
+	{ nxge_param_get_mac, nxge_param_set_mac, NXGE_PARAM_MAC_RW,
+		0, 1, 0, 0, "adv-pause-cap", "adv_pause_cap"},
+
+	{ nxge_param_get_mac, nxge_param_set_mac, NXGE_PARAM_MAC_RW,
+		0, 1, 0, 0, "use-int-xcvr", "use_int_xcvr"},
+
+	{ nxge_param_get_mac, nxge_param_set_mac, NXGE_PARAM_MAC_RW,
+		0, 1, 1, 1, "enable-ipg0", "enable_ipg0"},
+
+	{ nxge_param_get_mac, nxge_param_set_mac, NXGE_PARAM_MAC_RW,
+		0, 255,	8, 8, "ipg0", "ipg0"},
+
+	{ nxge_param_get_mac, nxge_param_set_mac, NXGE_PARAM_MAC_RW,
+		0, 255,	8, 8, "ipg1", "ipg1"},
+
+	{ nxge_param_get_mac, nxge_param_set_mac, NXGE_PARAM_MAC_RW,
+		0, 255,	4, 4, "ipg2", "ipg2"},
+
+	{ nxge_param_get_mac, nxge_param_set_mac, NXGE_PARAM_MAC_RW,
+		0, 1, 0, 0, "accept-jumbo", "accept_jumbo"},
+
+	/* Transmit DMA channels */
+	{ nxge_param_get_generic, NULL, NXGE_PARAM_READ | NXGE_PARAM_READ_PROP,
+		0, 3, 0, 0, "tx-dma-weight", "tx_dma_weight"},
+
+	{ nxge_param_get_generic, NULL, NXGE_PARAM_READ | NXGE_PARAM_READ_PROP,
+		0, 31, 0, 0, "tx-dma-channels-begin", "tx_dma_channels_begin"},
+
+	{ nxge_param_get_generic, NULL, NXGE_PARAM_READ | NXGE_PARAM_READ_PROP,
+		0, 32, 0, 0, "tx-dma-channels", "tx_dma_channels"},
+	{ nxge_param_get_txdma_info, NULL,
+		NXGE_PARAM_READ | NXGE_PARAM_READ_PROP,
+		0, 32, 0, 0, "tx-dma-info", "tx_dma_info"},
+
+	/* Receive DMA channels */
+	{ nxge_param_get_generic, NULL,
+		NXGE_PARAM_READ | NXGE_PARAM_READ_PROP,
+		0, 31, 0, 0, "rx-dma-channels-begin", "rx_dma_channels_begin"},
+
+	{ nxge_param_get_generic, NULL, NXGE_PARAM_READ | NXGE_PARAM_READ_PROP,
+		0, 32, 0, 0, "rx-dma-channels",	"rx_dma_channels"},
+
+	{ nxge_param_get_generic, NULL, NXGE_PARAM_READ | NXGE_PARAM_READ_PROP,
+		0, 65535, PT_DRR_WT_DEFAULT_10G, 0,
+		"rx-drr-weight", "rx_drr_weight"},
+
+	{ nxge_param_get_generic, NULL, NXGE_PARAM_READ | NXGE_PARAM_READ_PROP,
+		0, 1, 1, 0, "rx-full-header", "rx_full_header"},
+
+	{ nxge_param_get_rxdma_info, NULL, NXGE_PARAM_READ,
+		0, 32, 0, 0, "rx-dma-info", "rx_dma_info"},
+
+	{ nxge_param_get_rxdma_info, NULL,
+		NXGE_PARAM_READ | NXGE_PARAM_DONT_SHOW,
+		NXGE_RBR_RBB_MIN, NXGE_RBR_RBB_MAX, NXGE_RBR_RBB_DEFAULT, 0,
+		"rx-rbr-size", "rx_rbr_size"},
+
+	{ nxge_param_get_rxdma_info, NULL,
+		NXGE_PARAM_READ | NXGE_PARAM_DONT_SHOW,
+		NXGE_RCR_MIN, NXGE_RCR_MAX, NXGE_RCR_DEFAULT, 0,
+		"rx-rcr-size", "rx_rcr_size"},
+
+	{ nxge_param_get_generic, nxge_param_set_port_rdc, NXGE_PARAM_RXDMA_RW,
+		0, 15, 0, 0, "default-port-rdc", "default_port_rdc"},
+
+	{ nxge_param_get_generic, nxge_param_rx_intr_time, NXGE_PARAM_RXDMA_RW,
+		NXGE_RDC_RCR_TIMEOUT_MIN, NXGE_RDC_RCR_TIMEOUT_MAX,
+		RXDMA_RCR_TO_DEFAULT, 0, "rxdma-intr-time", "rxdma_intr_time"},
+
+	{ nxge_param_get_generic, nxge_param_rx_intr_pkts, NXGE_PARAM_RXDMA_RW,
+		NXGE_RDC_RCR_THRESHOLD_MIN, NXGE_RDC_RCR_THRESHOLD_MAX,
+		RXDMA_RCR_PTHRES_DEFAULT, 0,
+		"rxdma-intr-pkts", "rxdma_intr_pkts"},
+
+	{ nxge_param_get_generic, NULL, NXGE_PARAM_READ_PROP,
+		0, 8, 0, 0, "rx-rdc-grps-begin", "rx_rdc_grps_begin"},
+
+	{ nxge_param_get_generic, NULL, NXGE_PARAM_READ_PROP,
+		0, 8, 0, 0, "rx-rdc-grps", "rx_rdc_grps"},
+
+	{ nxge_param_get_generic, nxge_param_set_grp_rdc, NXGE_PARAM_RXDMA_RW,
+		0, 15, 0, 0, "default-grp0-rdc", "default_grp0_rdc"},
+
+	{ nxge_param_get_generic, nxge_param_set_grp_rdc, NXGE_PARAM_RXDMA_RW,
+		0, 15,	2, 0, "default-grp1-rdc", "default_grp1_rdc"},
+
+	{ nxge_param_get_generic, nxge_param_set_grp_rdc, NXGE_PARAM_RXDMA_RW,
+		0, 15, 4, 0, "default-grp2-rdc", "default_grp2_rdc"},
+
+	{ nxge_param_get_generic, nxge_param_set_grp_rdc, NXGE_PARAM_RXDMA_RW,
+		0, 15, 6, 0, "default-grp3-rdc", "default_grp3_rdc"},
+
+	{ nxge_param_get_generic, nxge_param_set_grp_rdc, NXGE_PARAM_RXDMA_RW,
+		0, 15, 8, 0, "default-grp4-rdc", "default_grp4_rdc"},
+
+	{ nxge_param_get_generic, nxge_param_set_grp_rdc, NXGE_PARAM_RXDMA_RW,
+		0, 15, 10, 0, "default-grp5-rdc", "default_grp5_rdc"},
+
+	{ nxge_param_get_generic, nxge_param_set_grp_rdc, NXGE_PARAM_RXDMA_RW,
+		0, 15, 12, 0, "default-grp6-rdc", "default_grp6_rdc"},
+
+	{ nxge_param_get_generic, nxge_param_set_grp_rdc, NXGE_PARAM_RXDMA_RW,
+		0, 15, 14, 0, "default-grp7-rdc", "default_grp7_rdc"},
+
+	{ nxge_param_get_rxdma_rdcgrp_info, NULL,
+		NXGE_PARAM_READ | NXGE_PARAM_CMPLX,
+		0, 8, 0, 0, "rdc-groups-info", "rdc_groups_info"},
+
+	/* Logical device groups */
+	{ nxge_param_get_generic, NULL, NXGE_PARAM_READ,
+		0, 63, 0, 0, "start-ldg", "start_ldg"},
+
+	{ nxge_param_get_generic, NULL, NXGE_PARAM_READ,
+		0, 64, 0, 0, "max-ldg", "max_ldg" },
+
+	/* MAC table information */
+	{ nxge_param_get_mac_rdcgrp, nxge_param_set_mac_rdcgrp,
+		NXGE_PARAM_L2CLASS_CFG,
+		0, 31, 0, 0, "mac-2rdc-grp", "mac_2rdc_grp"},
+
+	/* VLAN table information */
+	{ nxge_param_get_vlan_rdcgrp, nxge_param_set_vlan_rdcgrp,
+		NXGE_PARAM_L2CLASS_CFG,
+		0, 31, 0, 0, "vlan-2rdc-grp", "vlan_2rdc_grp"},
+
+	{ nxge_param_get_generic, NULL,
+		NXGE_PARAM_READ_PROP | NXGE_PARAM_READ | NXGE_PARAM_PROP_ARR32,
+		0, 0x0ffff, 0x0ffff, 0, "fcram-part-cfg", "fcram_part_cfg"},
+
+	{ nxge_param_get_generic, NULL, NXGE_PARAM_CLASS_RWS,
+		0, 0x10, 0xa, 0, "fcram-access-ratio", "fcram_access_ratio"},
+
+	{ nxge_param_get_generic, NULL, NXGE_PARAM_CLASS_RWS,
+		0, 0x10, 0xa, 0, "tcam-access-ratio", "tcam_access_ratio"},
+
+	{ nxge_param_get_generic, nxge_param_tcam_enable,
+		NXGE_PARAM_CLASS_RWS,
+		0, 0x1, 0x0, 0, "tcam-enable", "tcam_enable"},
+
+	{ nxge_param_get_generic, nxge_param_hash_lookup_enable,
+		NXGE_PARAM_CLASS_RWS,
+		0, 0x01, 0x0, 0, "hash-lookup-enable", "hash_lookup_enable"},
+
+	{ nxge_param_get_generic, nxge_param_llc_snap_enable,
+		NXGE_PARAM_CLASS_RWS,
+		0, 0x01, 0x01, 0, "llc-snap-enable", "llc_snap_enable"},
+
+	{ nxge_param_get_generic, nxge_param_fflp_hash_init,
+		NXGE_PARAM_CLASS_RWS,
+		0, ALL_FF_32, ALL_FF_32, 0, "h1-init-value", "h1_init_value"},
+
+	{ nxge_param_get_generic,	nxge_param_fflp_hash_init,
+		NXGE_PARAM_CLASS_RWS,
+		0, 0x0ffff, 0x0ffff, 0, "h2-init-value", "h2_init_value"},
+
+	{ nxge_param_get_generic, nxge_param_set_ether_usr,
+		NXGE_PARAM_CLASS_RWS | NXGE_PARAM_DONT_SHOW,
+		0, ALL_FF_32, 0x0, 0,
+		"class-cfg-ether-usr1", "class_cfg_ether_usr1"},
+
+	{ nxge_param_get_generic, nxge_param_set_ether_usr,
+		NXGE_PARAM_CLASS_RWS | NXGE_PARAM_DONT_SHOW,
+		0, ALL_FF_32, 0x0, 0,
+		"class-cfg-ether-usr2", "class_cfg_ether_usr2"},
+
+	{ nxge_param_get_generic, nxge_param_set_ip_usr,
+		NXGE_PARAM_CLASS_RWS | NXGE_PARAM_DONT_SHOW,
+		0, ALL_FF_32, 0x0, 0,
+		"class-cfg-ip-usr4", "class_cfg_ip_usr4"},
+
+	{ nxge_param_get_generic, nxge_param_set_ip_usr,
+		NXGE_PARAM_CLASS_RWS | NXGE_PARAM_DONT_SHOW,
+		0, ALL_FF_32, 0x0, 0,
+		"class-cfg-ip-usr5", "class_cfg_ip_usr5"},
+
+	{ nxge_param_get_generic, nxge_param_set_ip_usr,
+		NXGE_PARAM_CLASS_RWS | NXGE_PARAM_DONT_SHOW,
+		0, ALL_FF_32, 0x0, 0,
+		"class-cfg-ip-usr6", "class_cfg_ip_usr6"},
+
+	{ nxge_param_get_generic, nxge_param_set_ip_usr,
+		NXGE_PARAM_CLASS_RWS | NXGE_PARAM_DONT_SHOW,
+		0, ALL_FF_32, 0x0, 0,
+		"class-cfg-ip-usr7", "class_cfg_ip_usr7"},
+
+	{ nxge_param_get_ip_opt, nxge_param_set_ip_opt,
+		NXGE_PARAM_CLASS_RWS | NXGE_PARAM_DONT_SHOW,
+		0, ALL_FF_32, 0x0, 0,
+		"class-opt-ip-usr4", "class_opt_ip_usr4"},
+
+	{ nxge_param_get_ip_opt, nxge_param_set_ip_opt,
+		NXGE_PARAM_CLASS_RWS | NXGE_PARAM_DONT_SHOW,
+		0, ALL_FF_32, 0x0, 0,
+		"class-opt-ip-usr5", "class_opt_ip_usr5"},
+
+	{ nxge_param_get_ip_opt, nxge_param_set_ip_opt,
+		NXGE_PARAM_CLASS_RWS | NXGE_PARAM_DONT_SHOW,
+		0, ALL_FF_32, 0x0, 0,
+		"class-opt-ip-usr6", "class_opt_ip_usr6"},
+
+	{ nxge_param_get_ip_opt, nxge_param_set_ip_opt,
+		NXGE_PARAM_CLASS_RWS | NXGE_PARAM_DONT_SHOW,
+		0, ALL_FF_32, 0x0, 0,
+		"class-opt-ip-usr7", "class_opt_ip_usr7"},
+
+	{ nxge_param_get_ip_opt, nxge_param_set_ip_opt,
+		NXGE_PARAM_CLASS_RWS,
+		0, ALL_FF_32, NXGE_CLASS_FLOW_GEN_SERVER, 0,
+		"class-opt-ipv4-tcp", "class_opt_ipv4_tcp"},
+
+	{ nxge_param_get_ip_opt, nxge_param_set_ip_opt,
+		NXGE_PARAM_CLASS_RWS,
+		0, ALL_FF_32, NXGE_CLASS_FLOW_GEN_SERVER, 0,
+		"class-opt-ipv4-udp", "class_opt_ipv4_udp"},
+
+	{ nxge_param_get_ip_opt, nxge_param_set_ip_opt,
+		NXGE_PARAM_CLASS_RWS,
+		0, ALL_FF_32, NXGE_CLASS_FLOW_GEN_SERVER, 0,
+		"class-opt-ipv4-ah", "class_opt_ipv4_ah"},
+
+	{ nxge_param_get_ip_opt, nxge_param_set_ip_opt,
+		NXGE_PARAM_CLASS_RWS,
+		0, ALL_FF_32, NXGE_CLASS_FLOW_GEN_SERVER, 0,
+		"class-opt-ipv4-sctp", "class_opt_ipv4_sctp"},
+
+	{ nxge_param_get_ip_opt, nxge_param_set_ip_opt, NXGE_PARAM_CLASS_RWS,
+		0, ALL_FF_32, NXGE_CLASS_FLOW_GEN_SERVER, 0,
+		"class-opt-ipv6-tcp", "class_opt_ipv6_tcp"},
+
+	{ nxge_param_get_ip_opt, nxge_param_set_ip_opt, NXGE_PARAM_CLASS_RWS,
+		0, ALL_FF_32, NXGE_CLASS_FLOW_GEN_SERVER, 0,
+		"class-opt-ipv6-udp", "class_opt_ipv6_udp"},
+
+	{ nxge_param_get_ip_opt, nxge_param_set_ip_opt, NXGE_PARAM_CLASS_RWS,
+		0, ALL_FF_32, NXGE_CLASS_FLOW_GEN_SERVER, 0,
+		"class-opt-ipv6-ah", "class_opt_ipv6_ah"},
+
+	{ nxge_param_get_ip_opt, nxge_param_set_ip_opt, NXGE_PARAM_CLASS_RWS,
+		0, ALL_FF_32, NXGE_CLASS_FLOW_GEN_SERVER, 0,
+		"class-opt-ipv6-sctp",	"class_opt_ipv6_sctp"},
+
+	{ nxge_param_get_debug_flag, nxge_param_set_nxge_debug_flag,
+		NXGE_PARAM_RW,
+		0ULL, ALL_FF_64, 0ULL, 0ULL,
+		"nxge-debug-flag", "nxge_debug_flag"},
+
+	{ nxge_param_get_debug_flag, nxge_param_set_npi_debug_flag,
+		NXGE_PARAM_RW,
+		0ULL, ALL_FF_64, 0ULL, 0ULL,
+		"npi-debug-flag", "npi_debug_flag"},
+
+	{ nxge_param_dump_tdc, NULL, NXGE_PARAM_READ,
+		0, 0x0fffffff, 0x0fffffff, 0, "dump-tdc", "dump_tdc"},
+
+	{ nxge_param_dump_rdc, NULL, NXGE_PARAM_READ,
+		0, 0x0fffffff, 0x0fffffff, 0, "dump-rdc", "dump_rdc"},
+
+	{ nxge_param_dump_mac_regs, NULL, NXGE_PARAM_READ,
+		0, 0x0fffffff, 0x0fffffff, 0, "dump-mac-regs", "dump_mac_regs"},
+
+	{ nxge_param_dump_ipp_regs, NULL, NXGE_PARAM_READ,
+		0, 0x0fffffff, 0x0fffffff, 0, "dump-ipp-regs", "dump_ipp_regs"},
+
+	{ nxge_param_dump_fflp_regs, NULL, NXGE_PARAM_READ,
+		0, 0x0fffffff, 0x0fffffff, 0,
+		"dump-fflp-regs", "dump_fflp_regs"},
+
+	{ nxge_param_dump_vlan_table, NULL, NXGE_PARAM_READ,
+		0, 0x0fffffff, 0x0fffffff, 0,
+		"dump-vlan-table", "dump_vlan_table"},
+
+	{ nxge_param_dump_rdc_table, NULL, NXGE_PARAM_READ,
+		0, 0x0fffffff, 0x0fffffff, 0,
+		"dump-rdc-table", "dump_rdc_table"},
+
+	{ nxge_param_dump_ptrs,	NULL, NXGE_PARAM_READ,
+		0, 0x0fffffff, 0x0fffffff, 0, "dump-ptrs", "dump_ptrs"},
+
+	{  NULL, NULL, NXGE_PARAM_READ | NXGE_PARAM_DONT_SHOW,
+		0, 0x0fffffff, 0x0fffffff, 0, "end", "end"},
+};
+
+extern void 		*nxge_list;
+
+void
+nxge_get_param_soft_properties(p_nxge_t nxgep)
+{
+
+	p_nxge_param_t 		param_arr;
+	uint_t 			prop_len;
+	int 			i, j;
+	uint32_t		param_count;
+	uint32_t		*int_prop_val;
+
+	NXGE_DEBUG_MSG((nxgep, DDI_CTL, " ==> nxge_get_param_soft_properties"));
+
+	param_arr = nxgep->param_arr;
+	param_count = nxgep->param_count;
+	for (i = 0; i < param_count; i++) {
+		if ((param_arr[i].type & NXGE_PARAM_READ_PROP) == 0)
+			continue;
+		if ((param_arr[i].type & NXGE_PARAM_PROP_STR))
+			continue;
+		if ((param_arr[i].type & NXGE_PARAM_PROP_ARR32) ||
+				(param_arr[i].type & NXGE_PARAM_PROP_ARR64)) {
+			if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY,
+					nxgep->dip, 0, param_arr[i].fcode_name,
+					(int **)&int_prop_val,
+					(uint_t *)&prop_len)
+					== DDI_PROP_SUCCESS) {
+				uint32_t *cfg_value;
+				uint64_t prop_count;
+
+				if (prop_len > NXGE_PARAM_ARRAY_INIT_SIZE)
+					prop_len = NXGE_PARAM_ARRAY_INIT_SIZE;
+				cfg_value = (uint32_t *)param_arr[i].value;
+				for (j = 0; j < prop_len; j++) {
+					cfg_value[j] = int_prop_val[j];
+				}
+				prop_count = prop_len;
+				param_arr[i].type |=
+				    (prop_count << NXGE_PARAM_ARRAY_CNT_SHIFT);
+				ddi_prop_free(int_prop_val);
+			}
+			continue;
+		}
+
+		if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, nxgep->dip, 0,
+				param_arr[i].fcode_name,
+				(int **)&int_prop_val,
+				&prop_len) == DDI_PROP_SUCCESS) {
+			if ((*int_prop_val >= param_arr[i].minimum) &&
+					(*int_prop_val <= param_arr[i].maximum))
+				param_arr[i].value = *int_prop_val;
+#ifdef NXGE_DEBUG_ERROR
+			else {
+				NXGE_DEBUG_MSG((nxgep, OBP_CTL,
+					"nxge%d: 'prom' file parameter error\n",
+					nxgep->instance));
+				NXGE_DEBUG_MSG((nxgep, OBP_CTL,
+					"Parameter keyword '%s'"
+					" is outside valid range\n",
+					param_arr[i].name));
+			}
+#endif
+			ddi_prop_free(int_prop_val);
+		}
+
+		if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, nxgep->dip, 0,
+				param_arr[i].name,
+				(int **)&int_prop_val,
+				&prop_len) == DDI_PROP_SUCCESS) {
+			if ((*int_prop_val >= param_arr[i].minimum) &&
+				(*int_prop_val <= param_arr[i].maximum))
+				param_arr[i].value = *int_prop_val;
+#ifdef NXGE_DEBUG_ERROR
+			else {
+				NXGE_DEBUG_MSG((nxgep, OBP_CTL,
+					"nxge%d: 'conf' file parameter error\n",
+					nxgep->instance));
+				NXGE_DEBUG_MSG((nxgep, OBP_CTL,
+					"Parameter keyword '%s'"
+					"is outside valid range\n",
+					param_arr[i].name));
+			}
+#endif
+			ddi_prop_free(int_prop_val);
+		}
+	}
+}
+
+static int
+nxge_private_param_register(p_nxge_t nxgep, p_nxge_param_t param_arr)
+{
+	int status = B_TRUE;
+	int channel;
+	uint8_t grp;
+	char *prop_name;
+	char *end;
+	uint32_t name_chars;
+
+	NXGE_DEBUG_MSG((nxgep, NDD2_CTL,
+		"nxge_private_param_register %s", param_arr->name));
+
+	if ((param_arr->type & NXGE_PARAM_PRIV) != NXGE_PARAM_PRIV)
+		return (B_TRUE);
+
+	prop_name =  param_arr->name;
+	if (param_arr->type & NXGE_PARAM_RXDMA) {
+		if (strncmp("rxdma_intr", prop_name, 10) == 0)
+			return (B_TRUE);
+		name_chars = strlen("default_grp");
+		if (strncmp("default_grp", prop_name, name_chars) == 0) {
+			prop_name += name_chars;
+			grp = mi_strtol(prop_name, &end, 10);
+				/* now check if this rdcgrp is in config */
+			return (nxge_check_rdcgrp_port_member(nxgep, grp));
+		}
+		name_chars = strlen(prop_name);
+		if (strncmp("default_port_rdc", prop_name, name_chars) == 0) {
+			return (B_TRUE);
+		}
+		return (B_FALSE);
+	}
+
+	if (param_arr->type & NXGE_PARAM_TXDMA) {
+		name_chars = strlen("txdma");
+		if (strncmp("txdma", prop_name, name_chars) == 0) {
+			prop_name += name_chars;
+			channel = mi_strtol(prop_name, &end, 10);
+				/* now check if this rdc is in config */
+			NXGE_DEBUG_MSG((nxgep, NDD2_CTL,
+					    " nxge_private_param_register: %d",
+					    channel));
+			return (nxge_check_txdma_port_member(nxgep, channel));
+		}
+		return (B_FALSE);
+	}
+
+	status = B_FALSE;
+	NXGE_DEBUG_MSG((nxgep, NDD2_CTL, "<== nxge_private_param_register"));
+
+	return (status);
+}
+
+void
+nxge_setup_param(p_nxge_t nxgep)
+{
+	p_nxge_param_t param_arr;
+	int i;
+	pfi_t set_pfi;
+
+	NXGE_DEBUG_MSG((nxgep, NDD_CTL, "==> nxge_setup_param"));
+
+	/*
+	 * Make sure the param_instance is set to a valid device instance.
+	 */
+	if (nxge_param_arr[param_instance].value == 1000)
+		nxge_param_arr[param_instance].value = nxgep->instance;
+
+	param_arr = nxgep->param_arr;
+	param_arr[param_instance].value = nxgep->instance;
+	param_arr[param_function_number].value = nxgep->function_num;
+
+	for (i = 0; i < nxgep->param_count; i++) {
+		if ((param_arr[i].type & NXGE_PARAM_PRIV) &&
+				(nxge_private_param_register(nxgep,
+				&param_arr[i]) == B_FALSE)) {
+			param_arr[i].setf = NULL;
+			param_arr[i].getf = NULL;
+		}
+
+		if (param_arr[i].type & NXGE_PARAM_CMPLX)
+			param_arr[i].setf = NULL;
+
+		if (param_arr[i].type & NXGE_PARAM_DONT_SHOW) {
+			param_arr[i].setf = NULL;
+			param_arr[i].getf = NULL;
+		}
+
+		set_pfi = (pfi_t)param_arr[i].setf;
+
+		if ((set_pfi) && (param_arr[i].type & NXGE_PARAM_INIT_ONLY)) {
+			set_pfi = NULL;
+		}
+
+		if (!nxge_nd_load(&nxgep->param_list, param_arr[i].name,
+				(pfi_t)param_arr[i].getf, set_pfi,
+				(caddr_t)&param_arr[i])) {
+			(void) nxge_nd_free(&nxgep->param_list);
+			break;
+		}
+	}
+	NXGE_DEBUG_MSG((nxgep, NDD_CTL, "<== nxge_setup_param"));
+}
+
+void
+nxge_init_param(p_nxge_t nxgep)
+{
+	p_nxge_param_t param_arr;
+	int i, alloc_size;
+	uint64_t alloc_count;
+	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_init_param"));
+	/*
+	 * Make sure the param_instance is set to a valid device instance.
+	 */
+	if (nxge_param_arr[param_instance].value == 1000)
+		nxge_param_arr[param_instance].value = nxgep->instance;
+
+	param_arr = nxgep->param_arr;
+	if (param_arr == NULL) {
+		param_arr = (p_nxge_param_t)
+			KMEM_ZALLOC(sizeof (nxge_param_arr), KM_SLEEP);
+	}
+
+	for (i = 0; i < sizeof (nxge_param_arr)/sizeof (nxge_param_t); i++) {
+		param_arr[i] = nxge_param_arr[i];
+		if ((param_arr[i].type & NXGE_PARAM_PROP_ARR32) ||
+			(param_arr[i].type & NXGE_PARAM_PROP_ARR64)) {
+			alloc_count = NXGE_PARAM_ARRAY_INIT_SIZE;
+			alloc_size = alloc_count * sizeof (uint64_t);
+			param_arr[i].value =
+			    (uint64_t)KMEM_ZALLOC(alloc_size, KM_SLEEP);
+			param_arr[i].old_value =
+				    (uint64_t)KMEM_ZALLOC(alloc_size, KM_SLEEP);
+			param_arr[i].type |=
+				(alloc_count << NXGE_PARAM_ARRAY_ALLOC_SHIFT);
+		}
+	}
+
+	nxgep->param_arr = param_arr;
+	nxgep->param_count = sizeof (nxge_param_arr)/sizeof (nxge_param_t);
+	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_init_param: count %d",
+		nxgep->param_count));
+}
+
+void
+nxge_destroy_param(p_nxge_t nxgep)
+{
+	int i;
+	uint64_t free_size, free_count;
+
+	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_destroy_param"));
+
+	/*
+	 * Make sure the param_instance is set to a valid device instance.
+	 */
+	if (nxge_param_arr[param_instance].value == nxgep->instance) {
+		for (i = 0; i <= nxge_param_arr[param_instance].maximum; i++) {
+			if ((ddi_get_soft_state(nxge_list, i) != NULL) &&
+				(i != nxgep->instance))
+				break;
+		}
+		nxge_param_arr[param_instance].value = i;
+	}
+
+	if (nxgep->param_list)
+		nxge_nd_free(&nxgep->param_list);
+	for (i = 0; i < nxgep->param_count; i++)
+		if ((nxgep->param_arr[i].type & NXGE_PARAM_PROP_ARR32) ||
+			(nxgep->param_arr[i].type & NXGE_PARAM_PROP_ARR64)) {
+			free_count = ((nxgep->param_arr[i].type &
+					    NXGE_PARAM_ARRAY_ALLOC_MASK) >>
+					    NXGE_PARAM_ARRAY_ALLOC_SHIFT);
+			free_count = NXGE_PARAM_ARRAY_INIT_SIZE;
+			free_size = sizeof (uint64_t) * free_count;
+			KMEM_FREE((void *)nxgep->param_arr[i].value, free_size);
+			KMEM_FREE((void *)nxgep->param_arr[i].old_value,
+				free_size);
+		}
+
+	KMEM_FREE(nxgep->param_arr, sizeof (nxge_param_arr));
+	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_destroy_param"));
+}
+
+/*
+ * Extracts the value from the 'nxge' parameter array and prints the
+ * parameter value. cp points to the required parameter.
+ */
+
+/* ARGSUSED */
+int
+nxge_param_get_generic(p_nxge_t nxgep, queue_t *q, p_mblk_t mp, caddr_t cp)
+{
+	p_nxge_param_t pa = (p_nxge_param_t)cp;
+
+	NXGE_DEBUG_MSG((nxgep, NDD_CTL,
+		"==> nxge_param_get_generic name %s ", pa->name));
+
+	if (pa->value > 0xffffffff)
+		(void) mi_mpprintf(mp, "%x%x",
+			(int)(pa->value >> 32), (int)(pa->value & 0xffffffff));
+	else
+		(void) mi_mpprintf(mp, "%x", (int)pa->value);
+
+	NXGE_DEBUG_MSG((nxgep, NDD_CTL, "<== nxge_param_get_generic"));
+	return (0);
+}
+
+/* ARGSUSED */
+static int
+nxge_param_get_mac(p_nxge_t nxgep, queue_t *q, p_mblk_t mp, caddr_t cp)
+{
+	p_nxge_param_t pa = (p_nxge_param_t)cp;
+
+	NXGE_DEBUG_MSG((nxgep, NDD_CTL, "==> nxge_param_get_mac"));
+
+	(void) mi_mpprintf(mp, "%d", (uint32_t)pa->value);
+	NXGE_DEBUG_MSG((nxgep, NDD_CTL, "<== nxge_param_get_mac"));
+	return (0);
+}
+
+/* ARGSUSED */
+int
+nxge_param_get_txdma_info(p_nxge_t nxgep, queue_t *q, p_mblk_t mp, caddr_t cp)
+{
+
+	uint_t	print_len, buf_len;
+	p_mblk_t np;
+	int tdc;
+
+	int buff_alloc_size = NXGE_NDD_INFODUMP_BUFF_SIZE;
+	NXGE_DEBUG_MSG((nxgep, NDD_CTL, "==> nxge_param_get_txdma_info"));
+
+	(void) mi_mpprintf(mp, "TXDMA Information for Port\t %d \n",
+		nxgep->function_num);
+
+
+	if ((np = allocb(buff_alloc_size, BPRI_HI)) == NULL) {
+		(void) mi_mpprintf(mp, "%s\n", "out of buffer");
+		return (0);
+	}
+
+	buf_len = buff_alloc_size;
+	mp->b_cont = np;
+
+	print_len = snprintf((char *)((mblk_t *)np)->b_wptr, buf_len,
+		"Total TDCs\t %d\n", nxgep->ntdc);
+
+	((mblk_t *)np)->b_wptr += print_len;
+	buf_len -= print_len;
+	print_len = snprintf((char *)((mblk_t *)np)->b_wptr, buf_len,
+		"TDC\t HW TDC\t\n");
+	((mblk_t *)np)->b_wptr += print_len;
+
+	buf_len -= print_len;
+	for (tdc = 0; tdc < nxgep->ntdc; tdc++) {
+		print_len = snprintf((char *)((mblk_t *)np)->b_wptr,
+					    buf_len, "%d\t %d\n",
+					    tdc, nxgep->tdc[tdc]);
+		((mblk_t *)np)->b_wptr += print_len;
+		buf_len -= print_len;
+	}
+
+	NXGE_DEBUG_MSG((nxgep, NDD_CTL, "<== nxge_param_get_txdma_info"));
+	return (0);
+}
+
+/* ARGSUSED */
+int
+nxge_param_get_rxdma_info(p_nxge_t nxgep, queue_t *q, p_mblk_t mp, caddr_t cp)
+{
+	uint_t			print_len, buf_len;
+	p_mblk_t		np;
+	int			rdc;
+	p_nxge_dma_pt_cfg_t	p_dma_cfgp;
+	p_nxge_hw_pt_cfg_t	p_cfgp;
+	int			buff_alloc_size = NXGE_NDD_INFODUMP_BUFF_SIZE;
+	p_rx_rcr_rings_t 	rx_rcr_rings;
+	p_rx_rcr_ring_t		*rcr_rings;
+	p_rx_rbr_rings_t 	rx_rbr_rings;
+	p_rx_rbr_ring_t		*rbr_rings;
+
+	NXGE_DEBUG_MSG((nxgep, NDD_CTL, "==> nxge_param_get_rxdma_info"));
+
+	(void) mi_mpprintf(mp, "RXDMA Information for Port\t %d \n",
+		nxgep->function_num);
+
+	if ((np = allocb(buff_alloc_size, BPRI_HI)) == NULL) {
+		/* The following may work even if we cannot get a large buf. */
+		(void) mi_mpprintf(mp, "%s\n", "out of buffer");
+		return (0);
+	}
+
+	buf_len = buff_alloc_size;
+	mp->b_cont = np;
+	p_dma_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
+	p_cfgp = (p_nxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config;
+
+	rx_rcr_rings = nxgep->rx_rcr_rings;
+	rcr_rings = rx_rcr_rings->rcr_rings;
+	rx_rbr_rings = nxgep->rx_rbr_rings;
+	rbr_rings = rx_rbr_rings->rbr_rings;
+
+	print_len = snprintf((char *)((mblk_t *)np)->b_wptr, buf_len,
+		"Total RDCs\t %d\n", p_cfgp->max_rdcs);
+
+	((mblk_t *)np)->b_wptr += print_len;
+	buf_len -= print_len;
+	print_len = snprintf((char *)((mblk_t *)np)->b_wptr, buf_len,
+		"RDC\t HW RDC\t Timeout\t Packets RBR ptr \t"
+		"chunks\t RCR ptr\n");
+
+	((mblk_t *)np)->b_wptr += print_len;
+	buf_len -= print_len;
+	for (rdc = 0; rdc < p_cfgp->max_rdcs; rdc++) {
+		print_len = snprintf((char *)((mblk_t *)np)->b_wptr, buf_len,
+			" %d\t  %d\t   %x\t\t %x\t $%p\t 0x%x\t $%p\n",
+			rdc, nxgep->rdc[rdc],
+			p_dma_cfgp->rcr_timeout[rdc],
+			p_dma_cfgp->rcr_threshold[rdc],
+			rbr_rings[rdc],
+			rbr_rings[rdc]->num_blocks, rcr_rings[rdc]);
+			((mblk_t *)np)->b_wptr += print_len;
+			buf_len -= print_len;
+	}
+
+	NXGE_DEBUG_MSG((nxgep, NDD_CTL, "<== nxge_param_get_rxdma_info"));
+	return (0);
+}
+
+/* ARGSUSED */
+int
+nxge_param_get_rxdma_rdcgrp_info(p_nxge_t nxgep, queue_t *q,
+	p_mblk_t mp, caddr_t cp)
+{
+	uint_t			print_len, buf_len;
+	p_mblk_t		np;
+	int			offset, rdc, i, rdc_grp;
+	p_nxge_rdc_grp_t	rdc_grp_p;
+	p_nxge_dma_pt_cfg_t	p_dma_cfgp;
+	p_nxge_hw_pt_cfg_t	p_cfgp;
+
+	int buff_alloc_size = NXGE_NDD_INFODUMP_BUFF_SIZE;
+	NXGE_DEBUG_MSG((nxgep, NDD_CTL,
+		"==> nxge_param_get_rxdma_rdcgrp_info"));
+
+	p_dma_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
+	p_cfgp = (p_nxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config;
+
+	(void) mi_mpprintf(mp, "RXDMA RDC Group Information for Port\t %d \n",
+		nxgep->function_num);
+
+	rdc_grp = p_cfgp->start_rdc_grpid;
+	if ((np = allocb(buff_alloc_size, BPRI_HI)) == NULL) {
+		/* The following may work even if we cannot get a large buf. */
+		(void) mi_mpprintf(mp, "%s\n", "out of buffer");
+		return (0);
+	}
+
+	buf_len = buff_alloc_size;
+	mp->b_cont = np;
+	print_len = snprintf((char *)((mblk_t *)np)->b_wptr, buf_len,
+		"Total RDC Groups\t %d \n"
+		"start RDC group\t %d\n",
+		p_cfgp->max_rdc_grpids,
+		p_cfgp->start_rdc_grpid);
+
+	((mblk_t *)np)->b_wptr += print_len;
+	buf_len -= print_len;
+
+	for (i = 0, rdc_grp = p_cfgp->start_rdc_grpid;
+	    rdc_grp < (p_cfgp->max_rdc_grpids + p_cfgp->start_rdc_grpid);
+	    rdc_grp++, i++) {
+		rdc_grp_p = &p_dma_cfgp->rdc_grps[i];
+		print_len = snprintf((char *)((mblk_t *)np)->b_wptr, buf_len,
+			"\nRDC Group Info for Group [%d] %d\n"
+			"RDC Count %d\tstart RDC %d\n"
+			"RDC Group Population Information"
+			" (offsets 0 - 15)\n",
+			i, rdc_grp, rdc_grp_p->max_rdcs,
+			rdc_grp_p->start_rdc);
+
+		((mblk_t *)np)->b_wptr += print_len;
+		buf_len -= print_len;
+		print_len = snprintf((char *)((mblk_t *)np)->b_wptr,
+			buf_len, "\n");
+		((mblk_t *)np)->b_wptr += print_len;
+		buf_len -= print_len;
+
+		for (rdc = 0; rdc < rdc_grp_p->max_rdcs; rdc++) {
+			print_len = snprintf((char *)((mblk_t *)np)->b_wptr,
+				buf_len, "[%d]=%d ", rdc,
+				rdc_grp_p->start_rdc + rdc);
+			((mblk_t *)np)->b_wptr += print_len;
+			buf_len -= print_len;
+		}
+		print_len = snprintf((char *)((mblk_t *)np)->b_wptr,
+					    buf_len, "\n");
+		((mblk_t *)np)->b_wptr += print_len;
+		buf_len -= print_len;
+
+		for (offset = 0; offset < 16; offset++) {
+			print_len = snprintf((char *)((mblk_t *)np)->b_wptr,
+				buf_len, " %2d ",
+				rdc_grp_p->rdc[offset]);
+			((mblk_t *)np)->b_wptr += print_len;
+			buf_len -= print_len;
+		}
+		print_len = snprintf((char *)((mblk_t *)np)->b_wptr,
+			buf_len, "\n");
+		((mblk_t *)np)->b_wptr += print_len;
+		buf_len -= print_len;
+	}
+	NXGE_DEBUG_MSG((nxgep, NDD_CTL,
+		"<== nxge_param_get_rxdma_rdcgrp_info"));
+	return (0);
+}
+
+int
+nxge_mk_mblk_tail_space(p_mblk_t mp, p_mblk_t *nmp, size_t size)
+{
+	p_mblk_t tmp;
+
+	tmp = mp;
+	while (tmp->b_cont)
+		tmp = tmp->b_cont;
+	if ((tmp->b_wptr + size) >= tmp->b_datap->db_lim) {
+		tmp->b_cont = allocb(1024, BPRI_HI);
+		tmp = tmp->b_cont;
+		if (!tmp)
+			return (ENOMEM);
+	}
+
+	*nmp = tmp;
+	return (0);
+}
+
+/*
+ * Sets the ge parameter to the value in the nxge_param_register using
+ * nxge_nd_load().
+ */
+
+/* ARGSUSED */
+int
+nxge_param_set_generic(p_nxge_t nxgep, queue_t *q, mblk_t *mp,
+			    char *value, caddr_t cp)
+{
+	char *end;
+	uint32_t new_value;
+	p_nxge_param_t pa = (p_nxge_param_t)cp;
+
+	NXGE_DEBUG_MSG((nxgep, IOC_CTL, " ==> nxge_param_set_generic"));
+	new_value = (uint32_t)mi_strtol(value, &end, 10);
+	if (end == value || new_value < pa->minimum ||
+		new_value > pa->maximum) {
+			return (EINVAL);
+	}
+	pa->value = new_value;
+	NXGE_DEBUG_MSG((nxgep, IOC_CTL, " <== nxge_param_set_generic"));
+	return (0);
+}
+
+/*
+ * Sets the ge parameter to the value in the nxge_param_register using
+ * nxge_nd_load().
+ */
+
+/* ARGSUSED */
+int
+nxge_param_set_instance(p_nxge_t nxgep, queue_t *q, mblk_t *mp,
+	char *value, caddr_t cp)
+{
+	NXGE_DEBUG_MSG((nxgep, NDD_CTL, " ==> nxge_param_set_instance"));
+	NXGE_DEBUG_MSG((nxgep, NDD_CTL, " <== nxge_param_set_instance"));
+	return (0);
+}
+
+/*
+ * Sets the ge parameter to the value in the nxge_param_register using
+ * nxge_nd_load().
+ */
+
+/* ARGSUSED */
+int
+nxge_param_set_mac(p_nxge_t nxgep, queue_t *q, mblk_t *mp,
+	char *value, caddr_t cp)
+{
+	char		*end;
+	uint32_t	new_value;
+	int		status = 0;
+	p_nxge_param_t	pa = (p_nxge_param_t)cp;
+
+	NXGE_DEBUG_MSG((nxgep, NDD_CTL, "==> nxge_param_set_mac"));
+	new_value = (uint32_t)mi_strtol(value, &end, BASE_DECIMAL);
+	if (PARAM_OUTOF_RANGE(value, end, new_value, pa)) {
+		return (EINVAL);
+	}
+
+	if (pa->value != new_value) {
+		pa->old_value = pa->value;
+		pa->value = new_value;
+	}
+
+	if (!nxge_param_link_update(nxgep)) {
+		NXGE_DEBUG_MSG((nxgep, NDD_CTL,
+				    " false ret from nxge_param_link_update"));
+		status = EINVAL;
+	}
+
+	NXGE_DEBUG_MSG((nxgep, NDD_CTL, "<== nxge_param_set_mac"));
+	return (status);
+}
+
+/* ARGSUSED */
+static int
+nxge_param_rx_intr_pkts(p_nxge_t nxgep, queue_t *q, mblk_t *mp,
+	char *value, caddr_t cp)
+{
+	char		*end;
+	uint32_t	cfg_value;
+	p_nxge_param_t	pa = (p_nxge_param_t)cp;
+
+	NXGE_DEBUG_MSG((nxgep, NDD_CTL, "==> nxge_param_rx_intr_pkts"));
+
+	cfg_value = (uint32_t)mi_strtol(value, &end, BASE_ANY);
+
+	if ((cfg_value > NXGE_RDC_RCR_THRESHOLD_MAX) ||
+		(cfg_value < NXGE_RDC_RCR_THRESHOLD_MIN)) {
+		return (EINVAL);
+	}
+
+	if ((pa->value != cfg_value)) {
+		pa->old_value = pa->value;
+		pa->value = cfg_value;
+		nxgep->intr_threshold = pa->value;
+	}
+
+	NXGE_DEBUG_MSG((nxgep, NDD_CTL, "<== nxge_param_rx_intr_pkts"));
+	return (0);
+}
+
+/* ARGSUSED */
+static int
+nxge_param_rx_intr_time(p_nxge_t nxgep, queue_t *q, mblk_t *mp,
+	char *value, caddr_t cp)
+{
+	char		*end;
+	uint32_t	cfg_value;
+	p_nxge_param_t	pa = (p_nxge_param_t)cp;
+
+	NXGE_DEBUG_MSG((nxgep, NDD_CTL, "==> nxge_param_rx_intr_time"));
+
+	cfg_value = (uint32_t)mi_strtol(value, &end, BASE_ANY);
+
+	if ((cfg_value > NXGE_RDC_RCR_TIMEOUT_MAX) ||
+		(cfg_value < NXGE_RDC_RCR_TIMEOUT_MIN)) {
+		return (EINVAL);
+	}
+
+	if ((pa->value != cfg_value)) {
+		pa->old_value = pa->value;
+		pa->value = cfg_value;
+		nxgep->intr_timeout = pa->value;
+	}
+
+	NXGE_DEBUG_MSG((nxgep, NDD_CTL, "<== nxge_param_rx_intr_time"));
+	return (0);
+}
+
+/* ARGSUSED */
+static int
+nxge_param_set_mac_rdcgrp(p_nxge_t nxgep, queue_t *q,
+	mblk_t *mp, char *value, caddr_t cp)
+{
+	char			 *end;
+	uint32_t		status = 0, cfg_value;
+	p_nxge_param_t		pa = (p_nxge_param_t)cp;
+	uint32_t		cfg_it = B_FALSE;
+	p_nxge_dma_pt_cfg_t	p_dma_cfgp;
+	p_nxge_hw_pt_cfg_t	p_cfgp;
+	uint32_t		*val_ptr, *old_val_ptr;
+	nxge_param_map_t	*mac_map;
+	p_nxge_class_pt_cfg_t	p_class_cfgp;
+	nxge_mv_cfg_t		*mac_host_info;
+
+	NXGE_DEBUG_MSG((nxgep, NDD_CTL, "==> nxge_param_set_mac_rdcgrp "));
+
+	p_dma_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
+	p_cfgp = (p_nxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config;
+	p_class_cfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config;
+	mac_host_info = (nxge_mv_cfg_t	*)&p_class_cfgp->mac_host_info[0];
+	cfg_value = (uint32_t)mi_strtol(value, &end, BASE_HEX);
+
+	/*
+	 * now do decoding
+	 */
+	mac_map = (nxge_param_map_t *)&cfg_value;
+	NXGE_DEBUG_MSG((nxgep, NDD_CTL, " cfg_value %x id %x map_to %x",
+		cfg_value, mac_map->param_id, mac_map->map_to));
+
+	if ((mac_map->param_id < p_cfgp->max_macs) &&
+			(mac_map->map_to < (p_cfgp->max_rdc_grpids +
+			p_cfgp->start_rdc_grpid)) && (mac_map->map_to >=
+			p_cfgp->start_rdc_grpid)) {
+		NXGE_DEBUG_MSG((nxgep, NDD_CTL,
+			" nxge_param_set_mac_rdcgrp mapping"
+			" id %d grp %d", mac_map->param_id, mac_map->map_to));
+		val_ptr = (uint32_t *)pa->value;
+		old_val_ptr = (uint32_t *)pa->old_value;
+		if (val_ptr[mac_map->param_id] != cfg_value) {
+			old_val_ptr[mac_map->param_id] =
+				    val_ptr[mac_map->param_id];
+			val_ptr[mac_map->param_id] = cfg_value;
+			mac_host_info[mac_map->param_id].mpr_npr =
+				    mac_map->pref;
+			mac_host_info[mac_map->param_id].flag = 1;
+			mac_host_info[mac_map->param_id].rdctbl =
+				    mac_map->map_to;
+			cfg_it = B_TRUE;
+		}
+	} else {
+		return (EINVAL);
+	}
+
+	if (cfg_it == B_TRUE) {
+		status = nxge_logical_mac_assign_rdc_table(nxgep,
+						    (uint8_t)mac_map->param_id);
+		if (status != NXGE_OK)
+			return (EINVAL);
+	}
+
+	NXGE_DEBUG_MSG((nxgep, NDD_CTL, "<== nxge_param_set_mac_rdcgrp"));
+	return (0);
+}
+
+/* ARGSUSED */
+static int
+nxge_param_set_vlan_rdcgrp(p_nxge_t nxgep, queue_t *q,
+	mblk_t	*mp, char *value, caddr_t cp)
+{
+	char			*end;
+	uint32_t		status = 0, cfg_value;
+	p_nxge_param_t		pa = (p_nxge_param_t)cp;
+	uint32_t		cfg_it = B_FALSE;
+	p_nxge_dma_pt_cfg_t	p_dma_cfgp;
+	p_nxge_hw_pt_cfg_t	p_cfgp;
+	uint32_t		*val_ptr, *old_val_ptr;
+	nxge_param_map_t	*vmap, *old_map;
+	p_nxge_class_pt_cfg_t	p_class_cfgp;
+	uint64_t		cfgd_vlans;
+	int			i, inc = 0, cfg_position;
+	nxge_mv_cfg_t		*vlan_tbl;
+
+	NXGE_DEBUG_MSG((nxgep, NDD_CTL, "==> nxge_param_set_vlan_rdcgrp "));
+
+	p_dma_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
+	p_cfgp = (p_nxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config;
+	p_class_cfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config;
+	vlan_tbl = (nxge_mv_cfg_t *)&p_class_cfgp->vlan_tbl[0];
+
+	cfg_value = (uint32_t)mi_strtol(value, &end, BASE_HEX);
+
+	/* now do decoding */
+	cfgd_vlans = ((pa->type &  NXGE_PARAM_ARRAY_CNT_MASK) >>
+		NXGE_PARAM_ARRAY_CNT_SHIFT);
+
+	if (cfgd_vlans == NXGE_PARAM_ARRAY_INIT_SIZE) {
+		/*
+		 * for now, we process only upto max
+		 * NXGE_PARAM_ARRAY_INIT_SIZE parameters
+		 * In the future, we may want to expand
+		 * the storage array and continue
+		 */
+		return (EINVAL);
+	}
+
+	vmap = (nxge_param_map_t *)&cfg_value;
+	if ((vmap->param_id) &&
+		(vmap->param_id < NXGE_MAX_VLANS) &&
+		(vmap->map_to < p_cfgp->max_rdc_grpids)) {
+		NXGE_DEBUG_MSG((nxgep, NDD_CTL,
+			"nxge_param_set_vlan_rdcgrp mapping"
+			" id %d grp %d",
+			vmap->param_id, vmap->map_to));
+		val_ptr = (uint32_t *)pa->value;
+		old_val_ptr = (uint32_t *)pa->old_value;
+
+		/* search to see if this vlan id is already configured */
+		for (i = 0; i < cfgd_vlans; i++) {
+			old_map = (nxge_param_map_t *)&val_ptr[i];
+			if ((old_map->param_id == 0) ||
+				(vmap->param_id == old_map->param_id) ||
+				(vlan_tbl[vmap->param_id].flag)) {
+				cfg_position = i;
+				break;
+			}
+		}
+
+		if (cfgd_vlans == 0) {
+			cfg_position = 0;
+			inc++;
+		}
+
+		if (i == cfgd_vlans) {
+			cfg_position = i;
+			inc++;
+		}
+
+		NXGE_DEBUG_MSG((nxgep, NDD2_CTL,
+			"set_vlan_rdcgrp mapping"
+			" i %d cfgd_vlans %llx position %d ",
+			i, cfgd_vlans, cfg_position));
+		if (val_ptr[cfg_position] != cfg_value) {
+			old_val_ptr[cfg_position] = val_ptr[cfg_position];
+			val_ptr[cfg_position] = cfg_value;
+			vlan_tbl[vmap->param_id].mpr_npr = vmap->pref;
+			vlan_tbl[vmap->param_id].flag = 1;
+			vlan_tbl[vmap->param_id].rdctbl =
+			    vmap->map_to + p_cfgp->start_rdc_grpid;
+			cfg_it = B_TRUE;
+			if (inc) {
+				cfgd_vlans++;
+				pa->type &= ~NXGE_PARAM_ARRAY_CNT_MASK;
+				pa->type |= (cfgd_vlans <<
+						    NXGE_PARAM_ARRAY_CNT_SHIFT);
+
+			}
+			NXGE_DEBUG_MSG((nxgep, NDD2_CTL,
+				"after: param_set_vlan_rdcgrp "
+				" cfg_vlans %llx position %d \n",
+				cfgd_vlans, cfg_position));
+		}
+	} else {
+		return (EINVAL);
+	}
+
+	if (cfg_it == B_TRUE) {
+		status = nxge_fflp_config_vlan_table(nxgep,
+			(uint16_t)vmap->param_id);
+		if (status != NXGE_OK)
+			return (EINVAL);
+	}
+
+	NXGE_DEBUG_MSG((nxgep, NDD_CTL, "<== nxge_param_set_vlan_rdcgrp"));
+	return (0);
+}
+
+/* ARGSUSED */
+static int
+nxge_param_get_vlan_rdcgrp(p_nxge_t nxgep, queue_t *q,
+	mblk_t *mp, caddr_t cp)
+{
+
+	uint_t 			print_len, buf_len;
+	p_mblk_t		np;
+	int			i;
+	uint32_t		*val_ptr;
+	nxge_param_map_t	*vmap;
+	p_nxge_param_t		pa = (p_nxge_param_t)cp;
+	p_nxge_class_pt_cfg_t 	p_class_cfgp;
+	p_nxge_dma_pt_cfg_t	p_dma_cfgp;
+	p_nxge_hw_pt_cfg_t	p_cfgp;
+	uint64_t		cfgd_vlans = 0;
+	nxge_mv_cfg_t		*vlan_tbl;
+	int			buff_alloc_size =
+					NXGE_NDD_INFODUMP_BUFF_SIZE * 32;
+
+	NXGE_DEBUG_MSG((nxgep, NDD_CTL, "==> nxge_param_set_vlan_rdcgrp "));
+	(void) mi_mpprintf(mp, "VLAN RDC Mapping Information for Port\t %d \n",
+		nxgep->function_num);
+
+	if ((np = allocb(buff_alloc_size, BPRI_HI)) == NULL) {
+		(void) mi_mpprintf(mp, "%s\n", "out of buffer");
+		return (0);
+	}
+
+	p_dma_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
+	p_cfgp = (p_nxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config;
+
+	buf_len = buff_alloc_size;
+	mp->b_cont = np;
+	cfgd_vlans = (pa->type &  NXGE_PARAM_ARRAY_CNT_MASK) >>
+		NXGE_PARAM_ARRAY_CNT_SHIFT;
+
+	i = (int)cfgd_vlans;
+	p_class_cfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config;
+	vlan_tbl = (nxge_mv_cfg_t *)&p_class_cfgp->vlan_tbl[0];
+	print_len = snprintf((char *)((mblk_t *)np)->b_wptr, buf_len,
+		"Configured VLANs %d\n"
+		"VLAN ID\t RDC GRP (Actual/Port)\t"
+		" Prefernce\n", i);
+	((mblk_t *)np)->b_wptr += print_len;
+	buf_len -= print_len;
+
+	val_ptr = (uint32_t *)pa->value;
+
+	for (i = 0; i < cfgd_vlans; i++) {
+		vmap = (nxge_param_map_t *)&val_ptr[i];
+		if (p_class_cfgp->vlan_tbl[vmap->param_id].flag) {
+			print_len = snprintf((char *)((mblk_t *)np)->b_wptr,
+				buf_len,
+				"  %d\t\t %d/%d\t\t %d\n",
+				vmap->param_id,
+				vlan_tbl[vmap->param_id].rdctbl,
+				vlan_tbl[vmap->param_id].rdctbl -
+				p_cfgp->start_rdc_grpid,
+				vlan_tbl[vmap->param_id].mpr_npr);
+			((mblk_t *)np)->b_wptr += print_len;
+			buf_len -= print_len;
+		}
+	}
+
+	NXGE_DEBUG_MSG((nxgep, NDD_CTL, "<== nxge_param_get_vlan_rdcgrp"));
+	return (0);
+}
+
+/* ARGSUSED */
+static int
+nxge_param_get_mac_rdcgrp(p_nxge_t nxgep, queue_t *q,
+	mblk_t *mp, caddr_t cp)
+{
+	uint_t			print_len, buf_len;
+	p_mblk_t		np;
+	int			i;
+	p_nxge_class_pt_cfg_t 	p_class_cfgp;
+	p_nxge_dma_pt_cfg_t	p_dma_cfgp;
+	p_nxge_hw_pt_cfg_t	p_cfgp;
+	nxge_mv_cfg_t		*mac_host_info;
+
+	int buff_alloc_size = NXGE_NDD_INFODUMP_BUFF_SIZE * 32;
+	NXGE_DEBUG_MSG((nxgep, NDD_CTL, "==> nxge_param_get_mac_rdcgrp "));
+	(void) mi_mpprintf(mp,
+		"MAC ADDR RDC Mapping Information for Port\t %d\n",
+		nxgep->function_num);
+
+	if ((np = allocb(buff_alloc_size, BPRI_HI)) == NULL) {
+		(void) mi_mpprintf(mp, "%s\n", "out of buffer");
+		return (0);
+	}
+
+	buf_len = buff_alloc_size;
+	mp->b_cont = np;
+	p_class_cfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config;
+	p_dma_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
+	p_cfgp = (p_nxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config;
+	mac_host_info = (nxge_mv_cfg_t	*)&p_class_cfgp->mac_host_info[0];
+	print_len = snprintf((char *)np->b_wptr, buf_len,
+		"MAC ID\t RDC GRP (Actual/Port)\t"
+		" Prefernce\n");
+	((mblk_t *)np)->b_wptr += print_len;
+	buf_len -= print_len;
+	for (i = 0; i < p_cfgp->max_macs; i++) {
+		if (mac_host_info[i].flag) {
+			print_len = snprintf((char *)((mblk_t *)np)->b_wptr,
+				buf_len,
+				"   %d\t  %d/%d\t\t %d\n",
+				i, mac_host_info[i].rdctbl,
+				mac_host_info[i].rdctbl -
+				p_cfgp->start_rdc_grpid,
+				mac_host_info[i].mpr_npr);
+			((mblk_t *)np)->b_wptr += print_len;
+			buf_len -= print_len;
+		}
+	}
+	print_len = snprintf((char *)((mblk_t *)np)->b_wptr, buf_len,
+		"Done Info Dumping \n");
+	((mblk_t *)np)->b_wptr += print_len;
+	buf_len -= print_len;
+	NXGE_DEBUG_MSG((nxgep, NDD_CTL, "<== nxge_param_get_macrdcgrp"));
+	return (0);
+}
+
+/* ARGSUSED */
+static int
+nxge_param_tcam_enable(p_nxge_t nxgep, queue_t *q,
+	mblk_t *mp, char *value, caddr_t cp)
+{
+	uint32_t	status = 0, cfg_value;
+	p_nxge_param_t	pa = (p_nxge_param_t)cp;
+	uint32_t	cfg_it = B_FALSE;
+	char		*end;
+
+	NXGE_DEBUG_MSG((nxgep, NDD_CTL, "==> nxge_param_tcam_enable"));
+
+	cfg_value = (uint32_t)mi_strtol(value, &end, BASE_BINARY);
+	if (pa->value != cfg_value) {
+		pa->old_value = pa->value;
+		pa->value = cfg_value;
+		cfg_it = B_TRUE;
+	}
+
+	if (cfg_it == B_TRUE) {
+		if (pa->value)
+			status = nxge_fflp_config_tcam_enable(nxgep);
+		else
+			status = nxge_fflp_config_tcam_disable(nxgep);
+		if (status != NXGE_OK)
+			return (EINVAL);
+	}
+
+	NXGE_DEBUG_MSG((nxgep, NDD_CTL, " <== nxge_param_tcam_enable"));
+	return (0);
+}
+
+/* ARGSUSED */
+static int
+nxge_param_hash_lookup_enable(p_nxge_t nxgep, queue_t *q,
+	mblk_t *mp, char *value, caddr_t cp)
+{
+	uint32_t	status = 0, cfg_value;
+	p_nxge_param_t	pa = (p_nxge_param_t)cp;
+	uint32_t	cfg_it = B_FALSE;
+	char		*end;
+
+	NXGE_DEBUG_MSG((nxgep, NDD_CTL, "==> nxge_param_hash_lookup_enable"));
+
+	cfg_value = (uint32_t)mi_strtol(value, &end, BASE_BINARY);
+	if (pa->value != cfg_value) {
+		pa->old_value = pa->value;
+		pa->value = cfg_value;
+		cfg_it = B_TRUE;
+	}
+
+	if (cfg_it == B_TRUE) {
+		if (pa->value)
+			status = nxge_fflp_config_hash_lookup_enable(nxgep);
+		else
+			status = nxge_fflp_config_hash_lookup_disable(nxgep);
+		if (status != NXGE_OK)
+			return (EINVAL);
+	}
+
+	NXGE_DEBUG_MSG((nxgep, NDD_CTL, " <== nxge_param_hash_lookup_enable"));
+	return (0);
+}
+
+/* ARGSUSED */
+static int
+nxge_param_llc_snap_enable(p_nxge_t nxgep, queue_t *q,
+	mblk_t *mp, char *value, caddr_t cp)
+{
+	char		*end;
+	uint32_t	status = 0, cfg_value;
+	p_nxge_param_t	pa = (p_nxge_param_t)cp;
+	uint32_t	cfg_it = B_FALSE;
+
+	NXGE_DEBUG_MSG((nxgep, NDD_CTL, "==> nxge_param_llc_snap_enable"));
+
+	cfg_value = (uint32_t)mi_strtol(value, &end, BASE_BINARY);
+	if (pa->value != cfg_value) {
+		pa->old_value = pa->value;
+		pa->value = cfg_value;
+		cfg_it = B_TRUE;
+	}
+
+	if (cfg_it == B_TRUE) {
+		if (pa->value)
+			status = nxge_fflp_config_tcam_enable(nxgep);
+		else
+			status = nxge_fflp_config_tcam_disable(nxgep);
+		if (status != NXGE_OK)
+			return (EINVAL);
+	}
+
+	NXGE_DEBUG_MSG((nxgep, NDD_CTL, " <== nxge_param_llc_snap_enable"));
+	return (0);
+}
+
+/* ARGSUSED */
+static int
+nxge_param_set_ether_usr(p_nxge_t nxgep, queue_t *q,
+	mblk_t	*mp, char *value, caddr_t cp)
+{
+	char		*end;
+	uint8_t		ether_class;
+	uint32_t	status = 0, cfg_value;
+	p_nxge_param_t	pa = (p_nxge_param_t)cp;
+	uint8_t		cfg_it = B_FALSE;
+
+	NXGE_DEBUG_MSG((nxgep, NDD_CTL, "==> nxge_param_set_ether_usr"));
+
+	cfg_value = (uint32_t)mi_strtol(value, &end, BASE_HEX);
+	if (PARAM_OUTOF_RANGE(value, end, cfg_value, pa)) {
+		return (EINVAL);
+	}
+
+	if (pa->value != cfg_value) {
+		pa->old_value = pa->value;
+		pa->value = cfg_value;
+		cfg_it = B_TRUE;
+	}
+
+	/* do the actual hw setup  */
+	if (cfg_it == B_TRUE) {
+		ether_class = mi_strtol(pa->name, &end, 10);
+#ifdef lint
+		ether_class = ether_class;
+#endif
+		NXGE_DEBUG_MSG((nxgep, NDD_CTL, " nxge_param_set_ether_usr"));
+	}
+
+	NXGE_DEBUG_MSG((nxgep, NDD_CTL, "<== nxge_param_set_ether_usr"));
+	return (status);
+}
+
+/* ARGSUSED */
+static int
+nxge_param_set_ip_usr(p_nxge_t nxgep, queue_t *q,
+	mblk_t *mp, char *value, caddr_t cp)
+{
+	char		*end;
+	tcam_class_t	class;
+	uint32_t	status, cfg_value;
+	p_nxge_param_t	pa = (p_nxge_param_t)cp;
+	uint32_t	cfg_it = B_FALSE;
+
+	NXGE_DEBUG_MSG((nxgep, NDD_CTL, "==> nxge_param_set_ip_usr"));
+
+	cfg_value = (uint32_t)mi_strtol(value, &end, BASE_HEX);
+	if (PARAM_OUTOF_RANGE(value, end, cfg_value, pa)) {
+		return (EINVAL);
+	}
+
+	if (pa->value != cfg_value) {
+		pa->old_value = pa->value;
+		pa->value = cfg_value;
+		cfg_it = B_TRUE;
+	}
+
+	/* do the actual hw setup with cfg_value. */
+	if (cfg_it == B_TRUE) {
+		class = mi_strtol(pa->name, &end, 10);
+		status = nxge_fflp_ip_usr_class_config(nxgep, class, pa->value);
+	}
+
+	NXGE_DEBUG_MSG((nxgep, NDD_CTL, "<== nxge_param_set_ip_usr"));
+	return (status);
+}
+
+/* ARGSUSED */
+static int
+nxge_class_name_2value(p_nxge_t nxgep, char *name)
+{
+	int		i;
+	int		class_instance = param_class_opt_ip_usr4;
+	p_nxge_param_t	param_arr;
+
+	param_arr = nxgep->param_arr;
+	for (i = TCAM_CLASS_IP_USER_4; i <= TCAM_CLASS_SCTP_IPV6; i++) {
+		if (strcmp(param_arr[class_instance].name, name) == 0)
+			return (i);
+		class_instance++;
+	}
+	return (-1);
+}
+
+/* ARGSUSED */
+static int
+nxge_param_set_ip_opt(p_nxge_t nxgep, queue_t *q,
+	mblk_t *mp, char *value, caddr_t cp)
+{
+	char		*end;
+	uint32_t	status, cfg_value;
+	p_nxge_param_t	pa = (p_nxge_param_t)cp;
+	tcam_class_t	class;
+	uint32_t	cfg_it = B_FALSE;
+
+	NXGE_DEBUG_MSG((nxgep, NDD_CTL, "==> nxge_param_set_ip_opt"));
+
+	cfg_value = (uint32_t)mi_strtol(value, &end, BASE_HEX);
+	if (PARAM_OUTOF_RANGE(value, end, cfg_value, pa)) {
+		return (EINVAL);
+	}
+
+	if (pa->value != cfg_value) {
+		pa->old_value = pa->value;
+		pa->value = cfg_value;
+		cfg_it = B_TRUE;
+	}
+
+	if (cfg_it == B_TRUE) {
+		/* do the actual hw setup  */
+		class = nxge_class_name_2value(nxgep, pa->name);
+		if (class == -1)
+			return (EINVAL);
+
+		status = nxge_fflp_ip_class_config(nxgep, class, pa->value);
+		if (status != NXGE_OK)
+			return (EINVAL);
+	}
+
+	NXGE_DEBUG_MSG((nxgep, NDD_CTL, "<== nxge_param_set_ip_opt"));
+	return (0);
+}
+
+/* ARGSUSED */
+static int
+nxge_param_get_ip_opt(p_nxge_t nxgep, queue_t *q,
+	mblk_t *mp, caddr_t cp)
+{
+	uint32_t status, cfg_value;
+	p_nxge_param_t pa = (p_nxge_param_t)cp;
+	tcam_class_t class;
+
+	NXGE_DEBUG_MSG((nxgep, NDD_CTL, "==> nxge_param_get_ip_opt"));
+
+	/* do the actual hw setup  */
+	class = nxge_class_name_2value(nxgep, pa->name);
+	if (class == -1)
+		return (EINVAL);
+
+	cfg_value = 0;
+	status = nxge_fflp_ip_class_config_get(nxgep, class, &cfg_value);
+	if (status != NXGE_OK)
+		return (EINVAL);
+
+	NXGE_DEBUG_MSG((nxgep, NDD_CTL,
+		"nxge_param_get_ip_opt_get %x ", cfg_value));
+
+	pa->value = cfg_value;
+	(void) mi_mpprintf(mp, "%x", cfg_value);
+
+	NXGE_DEBUG_MSG((nxgep, NDD_CTL, "<== nxge_param_get_ip_opt status "));
+	return (0);
+}
+
+/* ARGSUSED */
+static int
+nxge_param_fflp_hash_init(p_nxge_t nxgep, queue_t *q,
+	mblk_t *mp, char *value, caddr_t cp)
+{
+	char		*end;
+	uint32_t	status, cfg_value;
+	p_nxge_param_t	pa = (p_nxge_param_t)cp;
+	tcam_class_t	class;
+	uint32_t	cfg_it = B_FALSE;
+
+	NXGE_DEBUG_MSG((nxgep, NDD_CTL, "==> nxge_param_fflp_hash_init"));
+
+	cfg_value = (uint32_t)mi_strtol(value, &end, BASE_HEX);
+	if (PARAM_OUTOF_RANGE(value, end, cfg_value, pa)) {
+		return (EINVAL);
+	}
+
+	NXGE_DEBUG_MSG((nxgep, NDD_CTL,
+		"nxge_param_fflp_hash_init value %x", cfg_value));
+
+	if (pa->value != cfg_value) {
+		pa->old_value = pa->value;
+		pa->value = cfg_value;
+		cfg_it = B_TRUE;
+	}
+
+	if (cfg_it == B_TRUE) {
+		char *h_name;
+
+		/* do the actual hw setup */
+		h_name = pa->name;
+		h_name++;
+		class = mi_strtol(h_name, &end, 10);
+		switch (class) {
+			case 1:
+				status = nxge_fflp_set_hash1(nxgep,
+					(uint32_t)pa->value);
+				break;
+			case 2:
+				status = nxge_fflp_set_hash2(nxgep,
+					(uint16_t)pa->value);
+				break;
+
+			default:
+			NXGE_DEBUG_MSG((nxgep, NDD_CTL,
+				" nxge_param_fflp_hash_init"
+				" %s Wrong hash var %d",
+				pa->name, class));
+			return (EINVAL);
+		}
+		if (status != NXGE_OK)
+			return (EINVAL);
+	}
+
+	NXGE_DEBUG_MSG((nxgep, NDD_CTL, " <== nxge_param_fflp_hash_init"));
+	return (0);
+}
+
+/* ARGSUSED */
+static int
+nxge_param_set_grp_rdc(p_nxge_t nxgep, queue_t *q,
+	mblk_t *mp, char *value, caddr_t cp)
+{
+	char			*end;
+	uint32_t		status = 0, cfg_value;
+	p_nxge_param_t		pa = (p_nxge_param_t)cp;
+	uint32_t		cfg_it = B_FALSE;
+	int			rdc_grp;
+	uint8_t			real_rdc;
+	p_nxge_dma_pt_cfg_t	p_dma_cfgp;
+	p_nxge_hw_pt_cfg_t	p_cfgp;
+	p_nxge_rdc_grp_t	rdc_grp_p;
+
+	p_dma_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
+	p_cfgp = (p_nxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config;
+
+	NXGE_DEBUG_MSG((nxgep, NDD_CTL, "==> nxge_param_set_grp_rdc"));
+
+	cfg_value = (uint32_t)mi_strtol(value, &end, BASE_ANY);
+	if (PARAM_OUTOF_RANGE(value, end, cfg_value, pa)) {
+		return (EINVAL);
+	}
+
+	if (cfg_value >= p_cfgp->max_rdcs) {
+		return (EINVAL);
+	}
+
+	if (pa->value != cfg_value) {
+		pa->old_value = pa->value;
+		pa->value = cfg_value;
+		cfg_it = B_TRUE;
+	}
+
+	if (cfg_it == B_TRUE) {
+		char *grp_name;
+		grp_name = pa->name;
+		grp_name += strlen("default-grp");
+		rdc_grp = mi_strtol(grp_name, &end, 10);
+		rdc_grp_p = &p_dma_cfgp->rdc_grps[rdc_grp];
+		real_rdc = rdc_grp_p->start_rdc + cfg_value;
+		if (nxge_check_rxdma_rdcgrp_member(nxgep, rdc_grp,
+				cfg_value) == B_FALSE) {
+			pa->value = pa->old_value;
+			NXGE_DEBUG_MSG((nxgep, NDD_CTL,
+				" nxge_param_set_grp_rdc"
+				" %d read %d actual %d outof range",
+				rdc_grp, cfg_value, real_rdc));
+			return (EINVAL);
+		}
+		status = nxge_rxdma_cfg_rdcgrp_default_rdc(nxgep, rdc_grp,
+							    real_rdc);
+		if (status != NXGE_OK)
+			return (EINVAL);
+	}
+
+	NXGE_DEBUG_MSG((nxgep, NDD_CTL, "<== nxge_param_set_grp_rdc"));
+	return (0);
+}
+
+/* ARGSUSED */
+static int
+nxge_param_set_port_rdc(p_nxge_t nxgep, queue_t *q,
+	mblk_t *mp, char *value, caddr_t cp)
+{
+	char		*end;
+	uint32_t	status = B_TRUE, cfg_value;
+	p_nxge_param_t	pa = (p_nxge_param_t)cp;
+	uint32_t	cfg_it = B_FALSE;
+
+	p_nxge_dma_pt_cfg_t	p_dma_cfgp;
+	p_nxge_hw_pt_cfg_t	p_cfgp;
+
+	NXGE_DEBUG_MSG((nxgep, NDD_CTL, "==> nxge_param_set_port_rdc"));
+	p_dma_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
+	p_cfgp = (p_nxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config;
+
+	cfg_value = (uint32_t)mi_strtol(value, &end, BASE_ANY);
+	if (PARAM_OUTOF_RANGE(value, end, cfg_value, pa)) {
+		return (EINVAL);
+	}
+
+	if (pa->value != cfg_value) {
+		if (cfg_value >= p_cfgp->max_rdcs)
+			return (EINVAL);
+		pa->old_value = pa->value;
+		pa->value = cfg_value;
+		cfg_it = B_TRUE;
+	}
+
+	if (cfg_it == B_TRUE) {
+		status = nxge_rxdma_cfg_port_default_rdc(nxgep,
+			nxgep->function_num,
+			nxgep->rdc[cfg_value]);
+		if (status != NXGE_OK)
+			return (EINVAL);
+	}
+
+	NXGE_DEBUG_MSG((nxgep, NDD_CTL, "<== nxge_param_set_port_rdc"));
+	return (0);
+}
+
+/* ARGSUSED */
+static int
+nxge_param_set_nxge_debug_flag(p_nxge_t nxgep, queue_t *q,
+	mblk_t *mp, char *value, caddr_t cp)
+{
+	char *end;
+	uint32_t status = 0;
+	uint64_t cfg_value = 0;
+	p_nxge_param_t pa = (p_nxge_param_t)cp;
+	uint32_t cfg_it = B_FALSE;
+
+	NXGE_DEBUG_MSG((nxgep, NDD_CTL, "==> nxge_param_set_nxge_debug_flag"));
+	cfg_value = mi_strtol(value, &end, BASE_HEX);
+
+	if (PARAM_OUTOF_RANGE(value, end, cfg_value, pa)) {
+		NXGE_DEBUG_MSG((nxgep, NDD_CTL,
+			" nxge_param_set_nxge_debug_flag"
+			" outof range %llx", cfg_value));
+		return (EINVAL);
+	}
+	if (pa->value != cfg_value) {
+		pa->old_value = pa->value;
+		pa->value = cfg_value;
+		cfg_it = B_TRUE;
+	}
+
+	if (cfg_it == B_TRUE) {
+		nxgep->nxge_debug_level = pa->value;
+	}
+
+	NXGE_DEBUG_MSG((nxgep, NDD_CTL, "<== nxge_param_set_nxge_debug_flag"));
+	return (status);
+}
+
+/* ARGSUSED */
+static int
+nxge_param_get_debug_flag(p_nxge_t nxgep, queue_t *q, p_mblk_t mp, caddr_t cp)
+{
+	int		status = 0;
+	p_nxge_param_t	pa = (p_nxge_param_t)cp;
+
+	NXGE_DEBUG_MSG((nxgep, NDD_CTL, "==> nxge_param_get_debug_flag"));
+
+	if (pa->value > 0xffffffff)
+		(void) mi_mpprintf(mp, "%x%x",  (int)(pa->value >> 32),
+			(int)(pa->value & 0xffffffff));
+	else
+		(void) mi_mpprintf(mp, "%x", (int)pa->value);
+
+	NXGE_DEBUG_MSG((nxgep, NDD_CTL, "<== nxge_param_get_debug_flag"));
+	return (status);
+}
+
+/* ARGSUSED */
+static int
+nxge_param_set_npi_debug_flag(p_nxge_t nxgep, queue_t *q,
+	mblk_t *mp, char *value, caddr_t cp)
+{
+	char		*end;
+	uint32_t	status = 0;
+	uint64_t	 cfg_value = 0;
+	p_nxge_param_t	pa;
+	uint32_t	cfg_it = B_FALSE;
+
+	NXGE_DEBUG_MSG((nxgep, NDD_CTL, "==> nxge_param_set_npi_debug_flag"));
+	cfg_value = mi_strtol(value, &end, BASE_HEX);
+	pa = (p_nxge_param_t)cp;
+	if (PARAM_OUTOF_RANGE(value, end, cfg_value, pa)) {
+		NXGE_DEBUG_MSG((nxgep, NDD_CTL, " nxge_param_set_npi_debug_flag"
+				    " outof range %llx", cfg_value));
+		return (EINVAL);
+	}
+	if (pa->value != cfg_value) {
+		pa->old_value = pa->value;
+		pa->value = cfg_value;
+		cfg_it = B_TRUE;
+	}
+
+	if (cfg_it == B_TRUE) {
+		npi_debug_level = pa->value;
+	}
+	NXGE_DEBUG_MSG((nxgep, NDD_CTL, "<== nxge_param_set_debug_flag"));
+	return (status);
+}
+
+/* ARGSUSED */
+static int
+nxge_param_dump_rdc(p_nxge_t nxgep, queue_t *q, p_mblk_t mp, caddr_t cp)
+{
+	uint_t rdc;
+
+	NXGE_DEBUG_MSG((nxgep, IOC_CTL, "==> nxge_param_dump_rdc"));
+
+	(void) npi_rxdma_dump_fzc_regs(NXGE_DEV_NPI_HANDLE(nxgep));
+	for (rdc = 0; rdc < nxgep->nrdc; rdc++)
+		(void) nxge_dump_rxdma_channel(nxgep, nxgep->rdc[rdc]);
+
+	NXGE_DEBUG_MSG((nxgep, IOC_CTL, "<== nxge_param_dump_rdc"));
+	return (0);
+}
+
+/* ARGSUSED */
+static int
+nxge_param_dump_tdc(p_nxge_t nxgep, queue_t *q, p_mblk_t mp, caddr_t cp)
+{
+	uint_t	tdc;
+
+	NXGE_DEBUG_MSG((nxgep, IOC_CTL, "==> nxge_param_dump_tdc"));
+
+	for (tdc = 0; tdc < nxgep->ntdc; tdc++)
+		(void) nxge_txdma_regs_dump(nxgep, nxgep->tdc[tdc]);
+
+	NXGE_DEBUG_MSG((nxgep, IOC_CTL, "<== nxge_param_dump_tdc"));
+	return (0);
+}
+
+/* ARGSUSED */
+static int
+nxge_param_dump_fflp_regs(p_nxge_t nxgep, queue_t *q, p_mblk_t mp, caddr_t cp)
+{
+	NXGE_DEBUG_MSG((nxgep, IOC_CTL, "==> nxge_param_dump_fflp_regs"));
+
+	(void) npi_fflp_dump_regs(NXGE_DEV_NPI_HANDLE(nxgep));
+
+	NXGE_DEBUG_MSG((nxgep, IOC_CTL, "<== nxge_param_dump_fflp_regs"));
+	return (0);
+}
+
+/* ARGSUSED */
+static int
+nxge_param_dump_mac_regs(p_nxge_t nxgep, queue_t *q, p_mblk_t mp, caddr_t cp)
+{
+	NXGE_DEBUG_MSG((nxgep, IOC_CTL, "==> nxge_param_dump_mac_regs"));
+
+	(void) npi_mac_dump_regs(NXGE_DEV_NPI_HANDLE(nxgep),
+		nxgep->function_num);
+
+	NXGE_DEBUG_MSG((nxgep, IOC_CTL, "<== nxge_param_dump_mac_regs"));
+	return (0);
+}
+
+/* ARGSUSED */
+static int
+nxge_param_dump_ipp_regs(p_nxge_t nxgep, queue_t *q, p_mblk_t mp, caddr_t cp)
+{
+	NXGE_DEBUG_MSG((nxgep, IOC_CTL, "==> nxge_param_dump_ipp_regs"));
+
+	(void) npi_ipp_dump_regs(NXGE_DEV_NPI_HANDLE(nxgep),
+		nxgep->function_num);
+	NXGE_DEBUG_MSG((nxgep, IOC_CTL, "<== nxge_param_dump_ipp_regs"));
+	return (0);
+}
+
+/* ARGSUSED */
+static int
+nxge_param_dump_vlan_table(p_nxge_t nxgep, queue_t *q, p_mblk_t mp, caddr_t cp)
+{
+	NXGE_DEBUG_MSG((nxgep, NDD_CTL, "==> nxge_param_dump_vlan_table"));
+
+	(void) npi_fflp_vlan_tbl_dump(NXGE_DEV_NPI_HANDLE(nxgep));
+
+	NXGE_DEBUG_MSG((nxgep, NDD_CTL, "<== nxge_param_dump_vlan_table"));
+	return (0);
+}
+
+/* ARGSUSED */
+static int
+nxge_param_dump_rdc_table(p_nxge_t nxgep, queue_t *q, p_mblk_t mp, caddr_t cp)
+{
+	uint8_t	table;
+
+	NXGE_DEBUG_MSG((nxgep, NDD_CTL, "==> nxge_param_dump_rdc_table"));
+	for (table = 0; table < NXGE_MAX_RDC_GROUPS; table++) {
+		(void) npi_rxdma_dump_rdc_table(NXGE_DEV_NPI_HANDLE(nxgep),
+					    table);
+	}
+
+	NXGE_DEBUG_MSG((nxgep, NDD_CTL, "<== nxge_param_dump_rdc_table"));
+	return (0);
+}
+
+typedef struct block_info {
+	char		*name;
+	uint32_t	offset;
+} block_info_t;
+
+block_info_t reg_block[] = {
+	{"PIO",		PIO},
+	{"FZC_PIO",	FZC_PIO},
+	{"FZC_XMAC",	FZC_MAC},
+	{"FZC_IPP",	FZC_IPP},
+	{"FFLP",	FFLP},
+	{"FZC_FFLP",	FZC_FFLP},
+	{"PIO_VADDR",	PIO_VADDR},
+	{"ZCP",	ZCP},
+	{"FZC_ZCP",	FZC_ZCP},
+	{"DMC",	DMC},
+	{"FZC_DMC",	FZC_DMC},
+	{"TXC",	TXC},
+	{"FZC_TXC",	FZC_TXC},
+	{"PIO_LDSV",	PIO_LDSV},
+	{"PIO_LDGIM",	PIO_LDGIM},
+	{"PIO_IMASK0",	PIO_IMASK0},
+	{"PIO_IMASK1",	PIO_IMASK1},
+	{"FZC_PROM",	FZC_PROM},
+	{"END",	ALL_FF_32},
+};
+
+/* ARGSUSED */
+static int
+nxge_param_dump_ptrs(p_nxge_t nxgep, queue_t *q, p_mblk_t mp, caddr_t cp)
+{
+	uint_t			print_len, buf_len;
+	p_mblk_t		np;
+	int			rdc, tdc, block;
+	uint64_t		base;
+	p_nxge_dma_pt_cfg_t	p_dma_cfgp;
+	p_nxge_hw_pt_cfg_t	p_cfgp;
+	int			buff_alloc_size = NXGE_NDD_INFODUMP_BUFF_8K;
+	p_tx_ring_t 		*tx_rings;
+	p_rx_rcr_rings_t 	rx_rcr_rings;
+	p_rx_rcr_ring_t		*rcr_rings;
+	p_rx_rbr_rings_t 	rx_rbr_rings;
+	p_rx_rbr_ring_t		*rbr_rings;
+
+	NXGE_DEBUG_MSG((nxgep, IOC_CTL,
+		"==> nxge_param_dump_ptrs"));
+
+	(void) mi_mpprintf(mp, "ptr information for Port\t %d \n",
+		nxgep->function_num);
+
+	if ((np = allocb(buff_alloc_size, BPRI_HI)) == NULL) {
+		/* The following may work even if we cannot get a large buf. */
+		(void) mi_mpprintf(mp, "%s\n", "out of buffer");
+		return (0);
+	}
+
+	buf_len = buff_alloc_size;
+	mp->b_cont = np;
+	p_dma_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
+	p_cfgp = (p_nxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config;
+
+	rx_rcr_rings = nxgep->rx_rcr_rings;
+	rcr_rings = rx_rcr_rings->rcr_rings;
+	rx_rbr_rings = nxgep->rx_rbr_rings;
+	rbr_rings = rx_rbr_rings->rbr_rings;
+	print_len = snprintf((char *)((mblk_t *)np)->b_wptr, buf_len,
+		"nxgep (nxge_t) $%p\n"
+		"dev_regs (dev_regs_t) $%p\n",
+		nxgep, nxgep->dev_regs);
+
+	ADVANCE_PRINT_BUFFER(np, print_len, buf_len);
+
+	/* do register pointers */
+	print_len = snprintf((char *)((mblk_t *)np)->b_wptr, buf_len,
+		"reg base (npi_reg_ptr_t) $%p\t "
+		"pci reg (npi_reg_ptr_t) $%p\n",
+		nxgep->dev_regs->nxge_regp,
+		nxgep->dev_regs->nxge_pciregp);
+
+	ADVANCE_PRINT_BUFFER(np, print_len, buf_len);
+
+	print_len = snprintf((char *)((mblk_t *)np)->b_wptr, buf_len,
+		"\nBlock \t Offset \n");
+
+	ADVANCE_PRINT_BUFFER(np, print_len, buf_len);
+	block = 0;
+	base = (uint64_t)nxgep->dev_regs->nxge_regp;
+	while (reg_block[block].offset != ALL_FF_32) {
+		print_len = snprintf((char *)((mblk_t *)np)->b_wptr, buf_len,
+			"%9s\t 0x%llx\n",
+			reg_block[block].name,
+			(unsigned long long)(reg_block[block].offset + base));
+		ADVANCE_PRINT_BUFFER(np, print_len, buf_len);
+		block++;
+	}
+
+	print_len = snprintf((char *)((mblk_t *)np)->b_wptr, buf_len,
+		"\nRDC\t rcrp (rx_rcr_ring_t)\t "
+		"rbrp (rx_rbr_ring_t)\n");
+
+	ADVANCE_PRINT_BUFFER(np, print_len, buf_len);
+
+	for (rdc = 0; rdc < p_cfgp->max_rdcs; rdc++) {
+		print_len = snprintf((char *)((mblk_t *)np)->b_wptr, buf_len,
+			" %d\t  $%p\t\t   $%p\n",
+			rdc, rcr_rings[rdc],
+			rbr_rings[rdc]);
+		ADVANCE_PRINT_BUFFER(np, print_len, buf_len);
+	}
+
+	print_len = snprintf((char *)((mblk_t *)np)->b_wptr, buf_len,
+			    "\nTDC\t tdcp (tx_ring_t)\n");
+
+	ADVANCE_PRINT_BUFFER(np, print_len, buf_len);
+	tx_rings = nxgep->tx_rings->rings;
+	for (tdc = 0; tdc < p_cfgp->max_tdcs; tdc++) {
+		print_len = snprintf((char *)((mblk_t *)np)->b_wptr, buf_len,
+			" %d\t  $%p\n", tdc, tx_rings[tdc]);
+		ADVANCE_PRINT_BUFFER(np, print_len, buf_len);
+	}
+
+	print_len = snprintf((char *)((mblk_t *)np)->b_wptr, buf_len, "\n\n");
+
+	ADVANCE_PRINT_BUFFER(np, print_len, buf_len);
+	NXGE_DEBUG_MSG((nxgep, IOC_CTL, "<== nxge_param_dump_ptrs"));
+	return (0);
+}
+
+/*
+ * Load 'name' into the named dispatch table pointed to by 'ndp'.
+ * 'ndp' should be the address of a char pointer cell.  If the table
+ * does not exist (*ndp == 0), a new table is allocated and 'ndp'
+ * is stuffed.  If there is not enough space in the table for a new
+ * entry, more space is allocated.
+ */
+/* ARGSUSED */
+boolean_t
+nxge_nd_load(caddr_t *pparam, char *name,
+	pfi_t get_pfi, pfi_t set_pfi, caddr_t data)
+{
+	ND	*nd;
+	NDE	*nde;
+
+	NXGE_DEBUG_MSG((NULL, NDD2_CTL, " ==> nxge_nd_load"));
+	if (!pparam)
+		return (B_FALSE);
+
+	if ((nd = (ND *)*pparam) == NULL) {
+		if ((nd = (ND *)KMEM_ZALLOC(sizeof (ND), KM_NOSLEEP)) == NULL)
+			return (B_FALSE);
+		*pparam = (caddr_t)nd;
+	}
+
+	if (nd->nd_tbl) {
+		for (nde = nd->nd_tbl; nde->nde_name; nde++) {
+			if (strcmp(name, nde->nde_name) == 0)
+				goto fill_it;
+		}
+	}
+
+	if (nd->nd_free_count <= 1) {
+		if ((nde = (NDE *)KMEM_ZALLOC(nd->nd_size +
+					NDE_ALLOC_SIZE, KM_NOSLEEP)) == NULL)
+			return (B_FALSE);
+		nd->nd_free_count += NDE_ALLOC_COUNT;
+		if (nd->nd_tbl) {
+			bcopy((char *)nd->nd_tbl, (char *)nde, nd->nd_size);
+			KMEM_FREE((char *)nd->nd_tbl, nd->nd_size);
+		} else {
+			nd->nd_free_count--;
+			nde->nde_name = "?";
+			nde->nde_get_pfi = nxge_nd_get_names;
+			nde->nde_set_pfi = nxge_set_default;
+		}
+		nde->nde_data = (caddr_t)nd;
+		nd->nd_tbl = nde;
+		nd->nd_size += NDE_ALLOC_SIZE;
+	}
+	for (nde = nd->nd_tbl; nde->nde_name; nde++)
+		noop;
+	nd->nd_free_count--;
+fill_it:
+	nde->nde_name = name;
+	nde->nde_get_pfi = get_pfi;
+	nde->nde_set_pfi = set_pfi;
+	nde->nde_data = data;
+	NXGE_DEBUG_MSG((NULL, NDD2_CTL, " <== nxge_nd_load"));
+
+	return (B_TRUE);
+}
+
+/*
+ * Free the table pointed to by 'pparam'
+ */
+void
+nxge_nd_free(caddr_t *pparam)
+{
+	ND *nd;
+
+	if ((nd = (ND *)*pparam) != NULL) {
+		if (nd->nd_tbl)
+			KMEM_FREE((char *)nd->nd_tbl, nd->nd_size);
+		KMEM_FREE((char *)nd, sizeof (ND));
+		*pparam = nil(caddr_t);
+	}
+}
+
+int
+nxge_nd_getset(p_nxge_t nxgep, queue_t *q, caddr_t param, p_mblk_t mp)
+{
+	int		err;
+	IOCP		iocp;
+	p_mblk_t	mp1, mp2;
+	ND		*nd;
+	NDE		*nde;
+	char		*valp;
+	size_t		avail;
+
+	if (!param) {
+		return (B_FALSE);
+	}
+
+	nd = (ND *)param;
+	iocp = (IOCP)mp->b_rptr;
+	if ((iocp->ioc_count == 0) || !(mp1 = mp->b_cont)) {
+		mp->b_datap->db_type = M_IOCACK;
+		iocp->ioc_count = 0;
+		iocp->ioc_error = EINVAL;
+		return (B_FALSE);
+	}
+
+	/*
+	 * NOTE - logic throughout nd_xxx assumes single data block for ioctl.
+	 *	However, existing code sends in some big buffers.
+	 */
+	avail = iocp->ioc_count;
+	if (mp1->b_cont) {
+		freemsg(mp1->b_cont);
+		mp1->b_cont = NULL;
+	}
+
+	mp1->b_datap->db_lim[-1] = '\0';	/* Force null termination */
+	for (valp = (char *)mp1->b_rptr; *valp != '\0'; valp++) {
+		if (*valp == '-')
+			*valp = '_';
+	}
+
+	valp = (char *)mp1->b_rptr;
+
+	for (nde = nd->nd_tbl; /* */; nde++) {
+		if (!nde->nde_name)
+			return (B_FALSE);
+		if (strcmp(nde->nde_name, valp) == 0)
+			break;
+	}
+	err = EINVAL;
+	while (*valp++)
+		noop;
+	if (!*valp || valp >= (char *)mp1->b_wptr)
+		valp = nilp(char);
+	switch (iocp->ioc_cmd) {
+	case ND_GET:
+		/*
+		 * (temporary) hack: "*valp" is size of user buffer for
+		 * copyout. If result of action routine is too big, free
+		 * excess and return ioc_rval as buffer size needed.
+		 * Return as many mblocks as will fit, free the rest.  For
+		 * backward compatibility, assume size of original ioctl
+		 * buffer if "*valp" bad or not given.
+		 */
+		if (valp)
+			avail = mi_strtol(valp, (char **)0, 10);
+		/*
+		 * We overwrite the name/value with the reply data
+		 */
+		mp2 = mp1;
+		while (mp2) {
+			mp2->b_wptr = mp2->b_rptr;
+			mp2 = mp2->b_cont;
+		}
+
+		err = (*nde->nde_get_pfi)(nxgep, q, mp1, nde->nde_data);
+
+		if (!err) {
+			size_t	size_out = 0;
+			size_t	excess;
+
+			iocp->ioc_rval = 0;
+
+			/* Tack on the null */
+			err = nxge_mk_mblk_tail_space(mp1, &mp2, 1);
+			if (!err) {
+				*mp2->b_wptr++ = '\0';
+				size_out = msgdsize(mp1);
+				excess = size_out - avail;
+				if (excess > 0) {
+					iocp->ioc_rval = (int)size_out;
+					size_out -= excess;
+					(void) adjmsg(mp1, -(excess + 1));
+					err = nxge_mk_mblk_tail_space(
+							mp1, &mp2, 1);
+					if (!err)
+						*mp2->b_wptr++ = '\0';
+					else
+						size_out = 0;
+				}
+			} else
+				size_out = 0;
+			iocp->ioc_count = size_out;
+		}
+		break;
+
+	case ND_SET:
+		if (valp) {
+			if (nde->nde_set_pfi) {
+				err = (*nde->nde_set_pfi)(nxgep, q, mp1, valp,
+							    nde->nde_data);
+				iocp->ioc_count = 0;
+				freemsg(mp1);
+				mp->b_cont = NULL;
+			}
+		}
+		break;
+
+	default:
+		break;
+	}
+	iocp->ioc_error = err;
+	mp->b_datap->db_type = M_IOCACK;
+	return (B_TRUE);
+}
+
+/* ARGSUSED */
+int
+nxge_nd_get_names(p_nxge_t nxgep, queue_t *q, p_mblk_t mp, caddr_t param)
+{
+	ND		*nd;
+	NDE		*nde;
+	char		*rwtag;
+	boolean_t	get_ok, set_ok;
+	size_t		param_len;
+	int		status = 0;
+
+	nd = (ND *)param;
+	if (!nd)
+		return (ENOENT);
+
+	for (nde = nd->nd_tbl; nde->nde_name; nde++) {
+		get_ok = (nde->nde_get_pfi != nxge_get_default) &&
+				(nde->nde_get_pfi != NULL);
+		set_ok = (nde->nde_set_pfi != nxge_set_default) &&
+				(nde->nde_set_pfi != NULL);
+		if (get_ok) {
+			if (set_ok)
+				rwtag = "read and write";
+			else
+				rwtag = "read only";
+		} else if (set_ok)
+			rwtag = "write only";
+		else {
+			continue;
+		}
+		param_len = strlen(rwtag);
+		param_len += strlen(nde->nde_name);
+		param_len += 4;
+
+		(void) mi_mpprintf(mp, "%s (%s)", nde->nde_name, rwtag);
+	}
+	return (status);
+}
+
+/* ARGSUSED */
+int
+nxge_get_default(p_nxge_t nxgep, queue_t *q, p_mblk_t mp, caddr_t data)
+{
+	return (EACCES);
+}
+
+/* ARGSUSED */
+int
+nxge_set_default(p_nxge_t nxgep, queue_t *q, p_mblk_t mp, char *value,
+	caddr_t data)
+{
+	return (EACCES);
+}
+
+void
+nxge_param_ioctl(p_nxge_t nxgep, queue_t *wq, mblk_t *mp, struct iocblk *iocp)
+{
+	int		cmd;
+	int		status = B_FALSE;
+
+	NXGE_DEBUG_MSG((nxgep, IOC_CTL, "==> nxge_param_ioctl"));
+	cmd = iocp->ioc_cmd;
+
+	switch (cmd) {
+	default:
+		NXGE_DEBUG_MSG((nxgep, IOC_CTL,
+			"nxge_param_ioctl: bad cmd 0x%0x", cmd));
+		break;
+
+	case ND_GET:
+	case ND_SET:
+		NXGE_DEBUG_MSG((nxgep, IOC_CTL,
+			"nxge_param_ioctl: cmd 0x%0x", cmd));
+		if (!nxge_nd_getset(nxgep, wq, nxgep->param_list, mp)) {
+			NXGE_DEBUG_MSG((nxgep, IOC_CTL,
+				"false ret from nxge_nd_getset"));
+			break;
+		}
+		status = B_TRUE;
+		break;
+	}
+
+	if (status) {
+		qreply(wq, mp);
+	} else {
+		miocnak(wq, mp, 0, EINVAL);
+	}
+	NXGE_DEBUG_MSG((nxgep, IOC_CTL, "<== nxge_param_ioctl"));
+}
+
+/* ARGSUSED */
+static boolean_t
+nxge_param_link_update(p_nxge_t nxgep)
+{
+	p_nxge_param_t 		param_arr;
+	nxge_param_index_t 	i;
+	boolean_t 		update_xcvr;
+	boolean_t 		update_dev;
+	int 			instance;
+	boolean_t 		status = B_TRUE;
+
+	NXGE_DEBUG_MSG((nxgep, IOC_CTL, "==> nxge_param_link_update"));
+
+	param_arr = nxgep->param_arr;
+	instance = nxgep->instance;
+	update_xcvr = B_FALSE;
+	for (i = param_anar_1000fdx; i < param_anar_asmpause; i++) {
+		update_xcvr |= param_arr[i].value;
+	}
+
+	if (update_xcvr) {
+		update_xcvr = B_FALSE;
+		for (i = param_autoneg; i < param_enable_ipg0; i++) {
+			update_xcvr |=
+				(param_arr[i].value != param_arr[i].old_value);
+			param_arr[i].old_value = param_arr[i].value;
+		}
+		if (update_xcvr) {
+			RW_ENTER_WRITER(&nxgep->filter_lock);
+			(void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP);
+			(void) nxge_link_init(nxgep);
+			(void) nxge_mac_init(nxgep);
+			(void) nxge_link_monitor(nxgep, LINK_MONITOR_START);
+			RW_EXIT(&nxgep->filter_lock);
+		}
+	} else {
+		cmn_err(CE_WARN, " Last setting will leave nxge%d with "
+				" no link capabilities.", instance);
+		cmn_err(CE_WARN, " Restoring previous setting.");
+		for (i = param_anar_1000fdx; i < param_anar_asmpause; i++)
+			param_arr[i].value = param_arr[i].old_value;
+	}
+
+	update_dev = B_FALSE;
+
+	if (update_dev) {
+		RW_ENTER_WRITER(&nxgep->filter_lock);
+		(void) nxge_rx_mac_disable(nxgep);
+		(void) nxge_tx_mac_disable(nxgep);
+		(void) nxge_tx_mac_enable(nxgep);
+		(void) nxge_rx_mac_enable(nxgep);
+		RW_EXIT(&nxgep->filter_lock);
+	}
+
+nxge_param_hw_update_exit:
+	NXGE_DEBUG_MSG((nxgep, DDI_CTL,
+			"<== nxge_param_link_update status = 0x%08x", status));
+	return (status);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/uts/common/io/nxge/nxge_rxdma.c	Mon Mar 19 19:37:22 2007 -0700
@@ -0,0 +1,4538 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident	"%Z%%M%	%I%	%E% SMI"
+
+#include <sys/nxge/nxge_impl.h>
+#include <sys/nxge/nxge_rxdma.h>
+
+#define	NXGE_ACTUAL_RDCGRP(nxgep, rdcgrp)	\
+	(rdcgrp + nxgep->pt_config.hw_config.start_rdc_grpid)
+#define	NXGE_ACTUAL_RDC(nxgep, rdc)	\
+	(rdc + nxgep->pt_config.hw_config.start_rdc)
+
+/*
+ * Globals: tunable parameters (/etc/system or adb)
+ *
+ */
+extern uint32_t nxge_rbr_size;
+extern uint32_t nxge_rcr_size;
+extern uint32_t	nxge_rbr_spare_size;
+
+extern uint32_t nxge_mblks_pending;
+
+/*
+ * Tunable to reduce the amount of time spent in the
+ * ISR doing Rx Processing.
+ */
+extern uint32_t nxge_max_rx_pkts;
+boolean_t nxge_jumbo_enable;
+
+/*
+ * Tunables to manage the receive buffer blocks.
+ *
+ * nxge_rx_threshold_hi: copy all buffers.
+ * nxge_rx_bcopy_size_type: receive buffer block size type.
+ * nxge_rx_threshold_lo: copy only up to tunable block size type.
+ */
+extern nxge_rxbuf_threshold_t nxge_rx_threshold_hi;
+extern nxge_rxbuf_type_t nxge_rx_buf_size_type;
+extern nxge_rxbuf_threshold_t nxge_rx_threshold_lo;
+
+static nxge_status_t nxge_map_rxdma(p_nxge_t);
+static void nxge_unmap_rxdma(p_nxge_t);
+
+static nxge_status_t nxge_rxdma_hw_start_common(p_nxge_t);
+static void nxge_rxdma_hw_stop_common(p_nxge_t);
+
+static nxge_status_t nxge_rxdma_hw_start(p_nxge_t);
+static void nxge_rxdma_hw_stop(p_nxge_t);
+
+static nxge_status_t nxge_map_rxdma_channel(p_nxge_t, uint16_t,
+    p_nxge_dma_common_t *,  p_rx_rbr_ring_t *,
+    uint32_t,
+    p_nxge_dma_common_t *, p_rx_rcr_ring_t *,
+    p_rx_mbox_t *);
+static void nxge_unmap_rxdma_channel(p_nxge_t, uint16_t,
+    p_rx_rbr_ring_t, p_rx_rcr_ring_t, p_rx_mbox_t);
+
+static nxge_status_t nxge_map_rxdma_channel_cfg_ring(p_nxge_t,
+    uint16_t,
+    p_nxge_dma_common_t *, p_rx_rbr_ring_t *,
+    p_rx_rcr_ring_t *, p_rx_mbox_t *);
+static void nxge_unmap_rxdma_channel_cfg_ring(p_nxge_t,
+    p_rx_rcr_ring_t, p_rx_mbox_t);
+
+static nxge_status_t nxge_map_rxdma_channel_buf_ring(p_nxge_t,
+    uint16_t,
+    p_nxge_dma_common_t *,
+    p_rx_rbr_ring_t *, uint32_t);
+static void nxge_unmap_rxdma_channel_buf_ring(p_nxge_t,
+    p_rx_rbr_ring_t);
+
+static nxge_status_t nxge_rxdma_start_channel(p_nxge_t, uint16_t,
+    p_rx_rbr_ring_t, p_rx_rcr_ring_t, p_rx_mbox_t);
+static nxge_status_t nxge_rxdma_stop_channel(p_nxge_t, uint16_t);
+
+mblk_t *
+nxge_rx_pkts(p_nxge_t, uint_t, p_nxge_ldv_t,
+    p_rx_rcr_ring_t *, rx_dma_ctl_stat_t);
+
+static void nxge_receive_packet(p_nxge_t,
+	p_rx_rcr_ring_t,
+	p_rcr_entry_t,
+	boolean_t *,
+	mblk_t **, mblk_t **);
+
+nxge_status_t nxge_disable_rxdma_channel(p_nxge_t, uint16_t);
+
+static p_rx_msg_t nxge_allocb(size_t, uint32_t, p_nxge_dma_common_t);
+static void nxge_freeb(p_rx_msg_t);
+static void nxge_rx_pkts_vring(p_nxge_t, uint_t,
+    p_nxge_ldv_t, rx_dma_ctl_stat_t);
+static nxge_status_t nxge_rx_err_evnts(p_nxge_t, uint_t,
+				p_nxge_ldv_t, rx_dma_ctl_stat_t);
+
+static nxge_status_t nxge_rxdma_handle_port_errors(p_nxge_t,
+				uint32_t, uint32_t);
+
+static nxge_status_t nxge_rxbuf_index_info_init(p_nxge_t,
+    p_rx_rbr_ring_t);
+
+
+static nxge_status_t
+nxge_rxdma_fatal_err_recover(p_nxge_t, uint16_t);
+
+nxge_status_t
+nxge_rx_port_fatal_err_recover(p_nxge_t);
+
+static uint16_t
+nxge_get_pktbuf_size(p_nxge_t nxgep, int bufsz_type, rbr_cfig_b_t rbr_cfgb);
+
+nxge_status_t
+nxge_init_rxdma_channels(p_nxge_t nxgep)
+{
+	nxge_status_t	status = NXGE_OK;
+
+	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_init_rxdma_channels"));
+
+	status = nxge_map_rxdma(nxgep);
+	if (status != NXGE_OK) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"<== nxge_init_rxdma: status 0x%x", status));
+		return (status);
+	}
+
+	status = nxge_rxdma_hw_start_common(nxgep);
+	if (status != NXGE_OK) {
+		nxge_unmap_rxdma(nxgep);
+	}
+
+	status = nxge_rxdma_hw_start(nxgep);
+	if (status != NXGE_OK) {
+		nxge_unmap_rxdma(nxgep);
+	}
+
+	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
+		"<== nxge_init_rxdma_channels: status 0x%x", status));
+
+	return (status);
+}
+
+void
+nxge_uninit_rxdma_channels(p_nxge_t nxgep)
+{
+	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_uninit_rxdma_channels"));
+
+	nxge_rxdma_hw_stop(nxgep);
+	nxge_rxdma_hw_stop_common(nxgep);
+	nxge_unmap_rxdma(nxgep);
+
+	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
+		"<== nxge_uinit_rxdma_channels"));
+}
+
+nxge_status_t
+nxge_reset_rxdma_channel(p_nxge_t nxgep, uint16_t channel)
+{
+	npi_handle_t		handle;
+	npi_status_t		rs = NPI_SUCCESS;
+	nxge_status_t		status = NXGE_OK;
+
+	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_reset_rxdma_channel"));
+
+	handle = NXGE_DEV_NPI_HANDLE(nxgep);
+	rs = npi_rxdma_cfg_rdc_reset(handle, channel);
+
+	if (rs != NPI_SUCCESS) {
+		status = NXGE_ERROR | rs;
+	}
+
+	return (status);
+}
+
+void
+nxge_rxdma_regs_dump_channels(p_nxge_t nxgep)
+{
+	int			i, ndmas;
+	uint16_t		channel;
+	p_rx_rbr_rings_t 	rx_rbr_rings;
+	p_rx_rbr_ring_t		*rbr_rings;
+	npi_handle_t		handle;
+
+	NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_regs_dump_channels"));
+
+	handle = NXGE_DEV_NPI_HANDLE(nxgep);
+	(void) npi_rxdma_dump_fzc_regs(handle);
+
+	rx_rbr_rings = nxgep->rx_rbr_rings;
+	if (rx_rbr_rings == NULL) {
+		NXGE_DEBUG_MSG((nxgep, RX_CTL,
+			"<== nxge_rxdma_regs_dump_channels: "
+			"NULL ring pointer"));
+		return;
+	}
+	if (rx_rbr_rings->rbr_rings == NULL) {
+		NXGE_DEBUG_MSG((nxgep, RX_CTL,
+			"<== nxge_rxdma_regs_dump_channels: "
+			" NULL rbr rings pointer"));
+		return;
+	}
+
+	ndmas = rx_rbr_rings->ndmas;
+	if (!ndmas) {
+		NXGE_DEBUG_MSG((nxgep, RX_CTL,
+			"<== nxge_rxdma_regs_dump_channels: no channel"));
+		return;
+	}
+
+	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
+		"==> nxge_rxdma_regs_dump_channels (ndmas %d)", ndmas));
+
+	rbr_rings = rx_rbr_rings->rbr_rings;
+	for (i = 0; i < ndmas; i++) {
+		if (rbr_rings == NULL || rbr_rings[i] == NULL) {
+			continue;
+		}
+		channel = rbr_rings[i]->rdc;
+		(void) nxge_dump_rxdma_channel(nxgep, channel);
+	}
+
+	NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_regs_dump"));
+
+}
+
+nxge_status_t
+nxge_dump_rxdma_channel(p_nxge_t nxgep, uint8_t channel)
+{
+	npi_handle_t		handle;
+	npi_status_t		rs = NPI_SUCCESS;
+	nxge_status_t		status = NXGE_OK;
+
+	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_dump_rxdma_channel"));
+
+	handle = NXGE_DEV_NPI_HANDLE(nxgep);
+	rs = npi_rxdma_dump_rdc_regs(handle, channel);
+
+	if (rs != NPI_SUCCESS) {
+		status = NXGE_ERROR | rs;
+	}
+	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_dump_rxdma_channel"));
+	return (status);
+}
+
+nxge_status_t
+nxge_init_rxdma_channel_event_mask(p_nxge_t nxgep, uint16_t channel,
+    p_rx_dma_ent_msk_t mask_p)
+{
+	npi_handle_t		handle;
+	npi_status_t		rs = NPI_SUCCESS;
+	nxge_status_t		status = NXGE_OK;
+
+	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
+		"<== nxge_init_rxdma_channel_event_mask"));
+
+	handle = NXGE_DEV_NPI_HANDLE(nxgep);
+	rs = npi_rxdma_event_mask(handle, OP_SET, channel, mask_p);
+	if (rs != NPI_SUCCESS) {
+		status = NXGE_ERROR | rs;
+	}
+
+	return (status);
+}
+
+nxge_status_t
+nxge_init_rxdma_channel_cntl_stat(p_nxge_t nxgep, uint16_t channel,
+    p_rx_dma_ctl_stat_t cs_p)
+{
+	npi_handle_t		handle;
+	npi_status_t		rs = NPI_SUCCESS;
+	nxge_status_t		status = NXGE_OK;
+
+	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
+		"<== nxge_init_rxdma_channel_cntl_stat"));
+
+	handle = NXGE_DEV_NPI_HANDLE(nxgep);
+	rs = npi_rxdma_control_status(handle, OP_SET, channel, cs_p);
+
+	if (rs != NPI_SUCCESS) {
+		status = NXGE_ERROR | rs;
+	}
+
+	return (status);
+}
+
+nxge_status_t
+nxge_rxdma_cfg_rdcgrp_default_rdc(p_nxge_t nxgep, uint8_t rdcgrp,
+				    uint8_t rdc)
+{
+	npi_handle_t		handle;
+	npi_status_t		rs = NPI_SUCCESS;
+	p_nxge_dma_pt_cfg_t	p_dma_cfgp;
+	p_nxge_rdc_grp_t	rdc_grp_p;
+	uint8_t actual_rdcgrp, actual_rdc;
+
+	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
+			    " ==> nxge_rxdma_cfg_rdcgrp_default_rdc"));
+	p_dma_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
+
+	handle = NXGE_DEV_NPI_HANDLE(nxgep);
+
+	rdc_grp_p = &p_dma_cfgp->rdc_grps[rdcgrp];
+	rdc_grp_p->rdc[0] = rdc;
+
+	actual_rdcgrp = NXGE_ACTUAL_RDCGRP(nxgep, rdcgrp);
+	actual_rdc = NXGE_ACTUAL_RDC(nxgep, rdc);
+
+	rs = npi_rxdma_cfg_rdc_table_default_rdc(handle, actual_rdcgrp,
+							    actual_rdc);
+
+	if (rs != NPI_SUCCESS) {
+		return (NXGE_ERROR | rs);
+	}
+	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
+			    " <== nxge_rxdma_cfg_rdcgrp_default_rdc"));
+	return (NXGE_OK);
+}
+
+nxge_status_t
+nxge_rxdma_cfg_port_default_rdc(p_nxge_t nxgep, uint8_t port, uint8_t rdc)
+{
+	npi_handle_t		handle;
+
+	uint8_t actual_rdc;
+	npi_status_t		rs = NPI_SUCCESS;
+
+	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
+			    " ==> nxge_rxdma_cfg_port_default_rdc"));
+
+	handle = NXGE_DEV_NPI_HANDLE(nxgep);
+	actual_rdc = NXGE_ACTUAL_RDC(nxgep, rdc);
+	rs = npi_rxdma_cfg_default_port_rdc(handle, port, actual_rdc);
+
+
+	if (rs != NPI_SUCCESS) {
+		return (NXGE_ERROR | rs);
+	}
+	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
+			    " <== nxge_rxdma_cfg_port_default_rdc"));
+
+	return (NXGE_OK);
+}
+
+nxge_status_t
+nxge_rxdma_cfg_rcr_threshold(p_nxge_t nxgep, uint8_t channel,
+				    uint16_t pkts)
+{
+	npi_status_t	rs = NPI_SUCCESS;
+	npi_handle_t	handle;
+	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
+			    " ==> nxge_rxdma_cfg_rcr_threshold"));
+	handle = NXGE_DEV_NPI_HANDLE(nxgep);
+
+	rs = npi_rxdma_cfg_rdc_rcr_threshold(handle, channel, pkts);
+
+	if (rs != NPI_SUCCESS) {
+		return (NXGE_ERROR | rs);
+	}
+	NXGE_DEBUG_MSG((nxgep, RX2_CTL, " <== nxge_rxdma_cfg_rcr_threshold"));
+	return (NXGE_OK);
+}
+
+nxge_status_t
+nxge_rxdma_cfg_rcr_timeout(p_nxge_t nxgep, uint8_t channel,
+			    uint16_t tout, uint8_t enable)
+{
+	npi_status_t	rs = NPI_SUCCESS;
+	npi_handle_t	handle;
+	NXGE_DEBUG_MSG((nxgep, RX2_CTL, " ==> nxge_rxdma_cfg_rcr_timeout"));
+	handle = NXGE_DEV_NPI_HANDLE(nxgep);
+	if (enable == 0) {
+		rs = npi_rxdma_cfg_rdc_rcr_timeout_disable(handle, channel);
+	} else {
+		rs = npi_rxdma_cfg_rdc_rcr_timeout(handle, channel,
+							    tout);
+	}
+
+	if (rs != NPI_SUCCESS) {
+		return (NXGE_ERROR | rs);
+	}
+	NXGE_DEBUG_MSG((nxgep, RX2_CTL, " <== nxge_rxdma_cfg_rcr_timeout"));
+	return (NXGE_OK);
+}
+
+nxge_status_t
+nxge_enable_rxdma_channel(p_nxge_t nxgep, uint16_t channel,
+    p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t mbox_p)
+{
+	npi_handle_t		handle;
+	rdc_desc_cfg_t 		rdc_desc;
+	p_rcrcfig_b_t		cfgb_p;
+	npi_status_t		rs = NPI_SUCCESS;
+
+	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_enable_rxdma_channel"));
+	handle = NXGE_DEV_NPI_HANDLE(nxgep);
+	/*
+	 * Use configuration data composed at init time.
+	 * Write to hardware the receive ring configurations.
+	 */
+	rdc_desc.mbox_enable = 1;
+	rdc_desc.mbox_addr = mbox_p->mbox_addr;
+	NXGE_DEBUG_MSG((nxgep, RX_CTL,
+		"==> nxge_enable_rxdma_channel: mboxp $%p($%p)",
+		mbox_p->mbox_addr, rdc_desc.mbox_addr));
+
+	rdc_desc.rbr_len = rbr_p->rbb_max;
+	rdc_desc.rbr_addr = rbr_p->rbr_addr;
+
+	switch (nxgep->rx_bksize_code) {
+	case RBR_BKSIZE_4K:
+		rdc_desc.page_size = SIZE_4KB;
+		break;
+	case RBR_BKSIZE_8K:
+		rdc_desc.page_size = SIZE_8KB;
+		break;
+	case RBR_BKSIZE_16K:
+		rdc_desc.page_size = SIZE_16KB;
+		break;
+	case RBR_BKSIZE_32K:
+		rdc_desc.page_size = SIZE_32KB;
+		break;
+	}
+
+	rdc_desc.size0 = rbr_p->npi_pkt_buf_size0;
+	rdc_desc.valid0 = 1;
+
+	rdc_desc.size1 = rbr_p->npi_pkt_buf_size1;
+	rdc_desc.valid1 = 1;
+
+	rdc_desc.size2 = rbr_p->npi_pkt_buf_size2;
+	rdc_desc.valid2 = 1;
+
+	rdc_desc.full_hdr = rcr_p->full_hdr_flag;
+	rdc_desc.offset = rcr_p->sw_priv_hdr_len;
+
+	rdc_desc.rcr_len = rcr_p->comp_size;
+	rdc_desc.rcr_addr = rcr_p->rcr_addr;
+
+	cfgb_p = &(rcr_p->rcr_cfgb);
+	rdc_desc.rcr_threshold = cfgb_p->bits.ldw.pthres;
+	rdc_desc.rcr_timeout = cfgb_p->bits.ldw.timeout;
+	rdc_desc.rcr_timeout_enable = cfgb_p->bits.ldw.entout;
+
+	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_enable_rxdma_channel: "
+		"rbr_len qlen %d pagesize code %d rcr_len %d",
+		rdc_desc.rbr_len, rdc_desc.page_size, rdc_desc.rcr_len));
+	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_enable_rxdma_channel: "
+		"size 0 %d size 1 %d size 2 %d",
+		rbr_p->npi_pkt_buf_size0, rbr_p->npi_pkt_buf_size1,
+		rbr_p->npi_pkt_buf_size2));
+
+	rs = npi_rxdma_cfg_rdc_ring(handle, rbr_p->rdc, &rdc_desc);
+	if (rs != NPI_SUCCESS) {
+		return (NXGE_ERROR | rs);
+	}
+
+	/*
+	 * Enable the timeout and threshold.
+	 */
+	rs = npi_rxdma_cfg_rdc_rcr_threshold(handle, channel,
+			rdc_desc.rcr_threshold);
+	if (rs != NPI_SUCCESS) {
+		return (NXGE_ERROR | rs);
+	}
+
+	rs = npi_rxdma_cfg_rdc_rcr_timeout(handle, channel,
+			rdc_desc.rcr_timeout);
+	if (rs != NPI_SUCCESS) {
+		return (NXGE_ERROR | rs);
+	}
+
+	/* Enable the DMA */
+	rs = npi_rxdma_cfg_rdc_enable(handle, channel);
+	if (rs != NPI_SUCCESS) {
+		return (NXGE_ERROR | rs);
+	}
+
+	/* Kick the DMA engine. */
+	npi_rxdma_rdc_rbr_kick(handle, channel, rbr_p->rbb_max);
+	/* Clear the rbr empty bit */
+	(void) npi_rxdma_channel_rbr_empty_clear(handle, channel);
+
+	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_enable_rxdma_channel"));
+
+	return (NXGE_OK);
+}
+
+nxge_status_t
+nxge_disable_rxdma_channel(p_nxge_t nxgep, uint16_t channel)
+{
+	npi_handle_t		handle;
+	npi_status_t		rs = NPI_SUCCESS;
+
+	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_disable_rxdma_channel"));
+	handle = NXGE_DEV_NPI_HANDLE(nxgep);
+
+	/* disable the DMA */
+	rs = npi_rxdma_cfg_rdc_disable(handle, channel);
+	if (rs != NPI_SUCCESS) {
+		NXGE_DEBUG_MSG((nxgep, RX_CTL,
+			"<== nxge_disable_rxdma_channel:failed (0x%x)",
+			rs));
+		return (NXGE_ERROR | rs);
+	}
+
+	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_disable_rxdma_channel"));
+	return (NXGE_OK);
+}
+
+nxge_status_t
+nxge_rxdma_channel_rcrflush(p_nxge_t nxgep, uint8_t channel)
+{
+	npi_handle_t		handle;
+	nxge_status_t		status = NXGE_OK;
+
+	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
+		"<== nxge_init_rxdma_channel_rcrflush"));
+
+	handle = NXGE_DEV_NPI_HANDLE(nxgep);
+	npi_rxdma_rdc_rcr_flush(handle, channel);
+
+	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
+		"<== nxge_init_rxdma_channel_rcrflsh"));
+	return (status);
+
+}
+
+#define	MID_INDEX(l, r) ((r + l + 1) >> 1)
+
+#define	TO_LEFT -1
+#define	TO_RIGHT 1
+#define	BOTH_RIGHT (TO_RIGHT + TO_RIGHT)
+#define	BOTH_LEFT (TO_LEFT + TO_LEFT)
+#define	IN_MIDDLE (TO_RIGHT + TO_LEFT)
+#define	NO_HINT 0xffffffff
+
+/*ARGSUSED*/
+nxge_status_t
+nxge_rxbuf_pp_to_vp(p_nxge_t nxgep, p_rx_rbr_ring_t rbr_p,
+	uint8_t pktbufsz_type, uint64_t *pkt_buf_addr_pp,
+	uint64_t **pkt_buf_addr_p, uint32_t *bufoffset, uint32_t *msg_index)
+{
+	int			bufsize;
+	uint64_t		pktbuf_pp;
+	uint64_t 		dvma_addr;
+	rxring_info_t 		*ring_info;
+	int 			base_side, end_side;
+	int 			r_index, l_index, anchor_index;
+	int 			found, search_done;
+	uint32_t offset, chunk_size, block_size, page_size_mask;
+	uint32_t chunk_index, block_index, total_index;
+	int 			max_iterations, iteration;
+	rxbuf_index_info_t 	*bufinfo;
+
+	NXGE_DEBUG_MSG((nxgep, RX2_CTL, "==> nxge_rxbuf_pp_to_vp"));
+
+	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
+		"==> nxge_rxbuf_pp_to_vp: buf_pp $%p btype %d",
+		pkt_buf_addr_pp,
+		pktbufsz_type));
+
+	pktbuf_pp = (uint64_t)pkt_buf_addr_pp;
+
+	switch (pktbufsz_type) {
+	case 0:
+		bufsize = rbr_p->pkt_buf_size0;
+		break;
+	case 1:
+		bufsize = rbr_p->pkt_buf_size1;
+		break;
+	case 2:
+		bufsize = rbr_p->pkt_buf_size2;
+		break;
+	case RCR_SINGLE_BLOCK:
+		bufsize = 0;
+		anchor_index = 0;
+		break;
+	default:
+		return (NXGE_ERROR);
+	}
+
+	if (rbr_p->num_blocks == 1) {
+		anchor_index = 0;
+		ring_info = rbr_p->ring_info;
+		bufinfo = (rxbuf_index_info_t *)ring_info->buffer;
+		NXGE_DEBUG_MSG((nxgep, RX2_CTL,
+			"==> nxge_rxbuf_pp_to_vp: (found, 1 block) "
+			"buf_pp $%p btype %d anchor_index %d "
+			"bufinfo $%p",
+			pkt_buf_addr_pp,
+			pktbufsz_type,
+			anchor_index,
+			bufinfo));
+
+		goto found_index;
+	}
+
+	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
+		"==> nxge_rxbuf_pp_to_vp: "
+		"buf_pp $%p btype %d  anchor_index %d",
+		pkt_buf_addr_pp,
+		pktbufsz_type,
+		anchor_index));
+
+	ring_info = rbr_p->ring_info;
+	found = B_FALSE;
+	bufinfo = (rxbuf_index_info_t *)ring_info->buffer;
+	iteration = 0;
+	max_iterations = ring_info->max_iterations;
+		/*
+		 * First check if this block has been seen
+		 * recently. This is indicated by a hint which
+		 * is initialized when the first buffer of the block
+		 * is seen. The hint is reset when the last buffer of
+		 * the block has been processed.
+		 * As three block sizes are supported, three hints
+		 * are kept. The idea behind the hints is that once
+		 * the hardware  uses a block for a buffer  of that
+		 * size, it will use it exclusively for that size
+		 * and will use it until it is exhausted. It is assumed
+		 * that there would a single block being used for the same
+		 * buffer sizes at any given time.
+		 */
+	if (ring_info->hint[pktbufsz_type] != NO_HINT) {
+		anchor_index = ring_info->hint[pktbufsz_type];
+		dvma_addr =  bufinfo[anchor_index].dvma_addr;
+		chunk_size = bufinfo[anchor_index].buf_size;
+		if ((pktbuf_pp >= dvma_addr) &&
+			(pktbuf_pp < (dvma_addr + chunk_size))) {
+			found = B_TRUE;
+				/*
+				 * check if this is the last buffer in the block
+				 * If so, then reset the hint for the size;
+				 */
+
+			if ((pktbuf_pp + bufsize) >= (dvma_addr + chunk_size))
+				ring_info->hint[pktbufsz_type] = NO_HINT;
+		}
+	}
+
+	if (found == B_FALSE) {
+		NXGE_DEBUG_MSG((nxgep, RX2_CTL,
+			"==> nxge_rxbuf_pp_to_vp: (!found)"
+			"buf_pp $%p btype %d anchor_index %d",
+			pkt_buf_addr_pp,
+			pktbufsz_type,
+			anchor_index));
+
+			/*
+			 * This is the first buffer of the block of this
+			 * size. Need to search the whole information
+			 * array.
+			 * the search algorithm uses a binary tree search
+			 * algorithm. It assumes that the information is
+			 * already sorted with increasing order
+			 * info[0] < info[1] < info[2]  .... < info[n-1]
+			 * where n is the size of the information array
+			 */
+		r_index = rbr_p->num_blocks - 1;
+		l_index = 0;
+		search_done = B_FALSE;
+		anchor_index = MID_INDEX(r_index, l_index);
+		while (search_done == B_FALSE) {
+			if ((r_index == l_index) ||
+				(iteration >= max_iterations))
+				search_done = B_TRUE;
+			end_side = TO_RIGHT; /* to the right */
+			base_side = TO_LEFT; /* to the left */
+			/* read the DVMA address information and sort it */
+			dvma_addr =  bufinfo[anchor_index].dvma_addr;
+			chunk_size = bufinfo[anchor_index].buf_size;
+			NXGE_DEBUG_MSG((nxgep, RX2_CTL,
+				"==> nxge_rxbuf_pp_to_vp: (searching)"
+				"buf_pp $%p btype %d "
+				"anchor_index %d chunk_size %d dvmaaddr $%p",
+				pkt_buf_addr_pp,
+				pktbufsz_type,
+				anchor_index,
+				chunk_size,
+				dvma_addr));
+
+			if (pktbuf_pp >= dvma_addr)
+				base_side = TO_RIGHT; /* to the right */
+			if (pktbuf_pp < (dvma_addr + chunk_size))
+				end_side = TO_LEFT; /* to the left */
+
+			switch (base_side + end_side) {
+				case IN_MIDDLE:
+					/* found */
+					found = B_TRUE;
+					search_done = B_TRUE;
+					if ((pktbuf_pp + bufsize) <
+						(dvma_addr + chunk_size))
+						ring_info->hint[pktbufsz_type] =
+						bufinfo[anchor_index].buf_index;
+					break;
+				case BOTH_RIGHT:
+						/* not found: go to the right */
+					l_index = anchor_index + 1;
+					anchor_index =
+						MID_INDEX(r_index, l_index);
+					break;
+
+				case  BOTH_LEFT:
+						/* not found: go to the left */
+					r_index = anchor_index - 1;
+					anchor_index = MID_INDEX(r_index,
+						l_index);
+					break;
+				default: /* should not come here */
+					return (NXGE_ERROR);
+			}
+			iteration++;
+		}
+
+		NXGE_DEBUG_MSG((nxgep, RX2_CTL,
+			"==> nxge_rxbuf_pp_to_vp: (search done)"
+			"buf_pp $%p btype %d anchor_index %d",
+			pkt_buf_addr_pp,
+			pktbufsz_type,
+			anchor_index));
+	}
+
+	if (found == B_FALSE) {
+		NXGE_DEBUG_MSG((nxgep, RX2_CTL,
+			"==> nxge_rxbuf_pp_to_vp: (search failed)"
+			"buf_pp $%p btype %d anchor_index %d",
+			pkt_buf_addr_pp,
+			pktbufsz_type,
+			anchor_index));
+		return (NXGE_ERROR);
+	}
+
+found_index:
+	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
+		"==> nxge_rxbuf_pp_to_vp: (FOUND1)"
+		"buf_pp $%p btype %d bufsize %d anchor_index %d",
+		pkt_buf_addr_pp,
+		pktbufsz_type,
+		bufsize,
+		anchor_index));
+
+	/* index of the first block in this chunk */
+	chunk_index = bufinfo[anchor_index].start_index;
+	dvma_addr =  bufinfo[anchor_index].dvma_addr;
+	page_size_mask = ring_info->block_size_mask;
+
+	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
+		"==> nxge_rxbuf_pp_to_vp: (FOUND3), get chunk)"
+		"buf_pp $%p btype %d bufsize %d "
+		"anchor_index %d chunk_index %d dvma $%p",
+		pkt_buf_addr_pp,
+		pktbufsz_type,
+		bufsize,
+		anchor_index,
+		chunk_index,
+		dvma_addr));
+
+	offset = pktbuf_pp - dvma_addr; /* offset within the chunk */
+	block_size = rbr_p->block_size; /* System  block(page) size */
+
+	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
+		"==> nxge_rxbuf_pp_to_vp: (FOUND4), get chunk)"
+		"buf_pp $%p btype %d bufsize %d "
+		"anchor_index %d chunk_index %d dvma $%p "
+		"offset %d block_size %d",
+		pkt_buf_addr_pp,
+		pktbufsz_type,
+		bufsize,
+		anchor_index,
+		chunk_index,
+		dvma_addr,
+		offset,
+		block_size));
+
+	NXGE_DEBUG_MSG((nxgep, RX2_CTL, "==> getting total index"));
+
+	block_index = (offset / block_size); /* index within chunk */
+	total_index = chunk_index + block_index;
+
+
+	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
+		"==> nxge_rxbuf_pp_to_vp: "
+		"total_index %d dvma_addr $%p "
+		"offset %d block_size %d "
+		"block_index %d ",
+		total_index, dvma_addr,
+		offset, block_size,
+		block_index));
+
+	*pkt_buf_addr_p = (uint64_t *)((uint64_t)bufinfo[anchor_index].kaddr
+				+ offset);
+
+	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
+		"==> nxge_rxbuf_pp_to_vp: "
+		"total_index %d dvma_addr $%p "
+		"offset %d block_size %d "
+		"block_index %d "
+		"*pkt_buf_addr_p $%p",
+		total_index, dvma_addr,
+		offset, block_size,
+		block_index,
+		*pkt_buf_addr_p));
+
+
+	*msg_index = total_index;
+	*bufoffset =  (offset & page_size_mask);
+
+	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
+		"==> nxge_rxbuf_pp_to_vp: get msg index: "
+		"msg_index %d bufoffset_index %d",
+		*msg_index,
+		*bufoffset));
+
+	NXGE_DEBUG_MSG((nxgep, RX2_CTL, "<== nxge_rxbuf_pp_to_vp"));
+
+	return (NXGE_OK);
+}
+
+/*
+ * used by quick sort (qsort) function
+ * to perform comparison
+ */
+static int
+nxge_sort_compare(const void *p1, const void *p2)
+{
+
+	rxbuf_index_info_t *a, *b;
+
+	a = (rxbuf_index_info_t *)p1;
+	b = (rxbuf_index_info_t *)p2;
+
+	if (a->dvma_addr > b->dvma_addr)
+		return (1);
+	if (a->dvma_addr < b->dvma_addr)
+		return (-1);
+	return (0);
+}
+
+
+
+/*
+ * grabbed this sort implementation from common/syscall/avl.c
+ *
+ */
+/*
+ * Generic shellsort, from K&R (1st ed, p 58.), somewhat modified.
+ * v = Ptr to array/vector of objs
+ * n = # objs in the array
+ * s = size of each obj (must be multiples of a word size)
+ * f = ptr to function to compare two objs
+ *	returns (-1 = less than, 0 = equal, 1 = greater than
+ */
+void
+nxge_ksort(caddr_t v, int n, int s, int (*f)())
+{
+	int g, i, j, ii;
+	unsigned int *p1, *p2;
+	unsigned int tmp;
+
+	/* No work to do */
+	if (v == NULL || n <= 1)
+		return;
+	/* Sanity check on arguments */
+	ASSERT(((uintptr_t)v & 0x3) == 0 && (s & 0x3) == 0);
+	ASSERT(s > 0);
+
+	for (g = n / 2; g > 0; g /= 2) {
+		for (i = g; i < n; i++) {
+			for (j = i - g; j >= 0 &&
+				(*f)(v + j * s, v + (j + g) * s) == 1;
+					j -= g) {
+				p1 = (unsigned *)(v + j * s);
+				p2 = (unsigned *)(v + (j + g) * s);
+				for (ii = 0; ii < s / 4; ii++) {
+					tmp = *p1;
+					*p1++ = *p2;
+					*p2++ = tmp;
+				}
+			}
+		}
+	}
+}
+
+/*
+ * Initialize data structures required for rxdma
+ * buffer dvma->vmem address lookup
+ */
+/*ARGSUSED*/
+static nxge_status_t
+nxge_rxbuf_index_info_init(p_nxge_t nxgep, p_rx_rbr_ring_t rbrp)
+{
+
+	int index;
+	rxring_info_t *ring_info;
+	int max_iteration = 0, max_index = 0;
+
+	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_rxbuf_index_info_init"));
+
+	ring_info = rbrp->ring_info;
+	ring_info->hint[0] = NO_HINT;
+	ring_info->hint[1] = NO_HINT;
+	ring_info->hint[2] = NO_HINT;
+	max_index = rbrp->num_blocks;
+
+		/* read the DVMA address information and sort it */
+		/* do init of the information array */
+
+
+	NXGE_DEBUG_MSG((nxgep, DMA2_CTL,
+		" nxge_rxbuf_index_info_init Sort ptrs"));
+
+		/* sort the array */
+	nxge_ksort((void *)ring_info->buffer, max_index,
+		sizeof (rxbuf_index_info_t), nxge_sort_compare);
+
+
+
+	for (index = 0; index < max_index; index++) {
+		NXGE_DEBUG_MSG((nxgep, DMA2_CTL,
+			" nxge_rxbuf_index_info_init: sorted chunk %d "
+			" ioaddr $%p kaddr $%p size %x",
+			index, ring_info->buffer[index].dvma_addr,
+			ring_info->buffer[index].kaddr,
+			ring_info->buffer[index].buf_size));
+	}
+
+	max_iteration = 0;
+	while (max_index >= (1ULL << max_iteration))
+		max_iteration++;
+	ring_info->max_iterations = max_iteration + 1;
+	NXGE_DEBUG_MSG((nxgep, DMA2_CTL,
+		" nxge_rxbuf_index_info_init Find max iter %d",
+					ring_info->max_iterations));
+
+	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_rxbuf_index_info_init"));
+	return (NXGE_OK);
+}
+
+/* ARGSUSED */
+void
+nxge_dump_rcr_entry(p_nxge_t nxgep, p_rcr_entry_t entry_p)
+{
+#ifdef	NXGE_DEBUG
+
+	uint32_t bptr;
+	uint64_t pp;
+
+	bptr = entry_p->bits.hdw.pkt_buf_addr;
+
+	NXGE_DEBUG_MSG((nxgep, RX_CTL,
+		"\trcr entry $%p "
+		"\trcr entry 0x%0llx "
+		"\trcr entry 0x%08x "
+		"\trcr entry 0x%08x "
+		"\tvalue 0x%0llx\n"
+		"\tmulti = %d\n"
+		"\tpkt_type = 0x%x\n"
+		"\tzero_copy = %d\n"
+		"\tnoport = %d\n"
+		"\tpromis = %d\n"
+		"\terror = 0x%04x\n"
+		"\tdcf_err = 0x%01x\n"
+		"\tl2_len = %d\n"
+		"\tpktbufsize = %d\n"
+		"\tpkt_buf_addr = $%p\n"
+		"\tpkt_buf_addr (<< 6) = $%p\n",
+		entry_p,
+		*(int64_t *)entry_p,
+		*(int32_t *)entry_p,
+		*(int32_t *)((char *)entry_p + 32),
+		entry_p->value,
+		entry_p->bits.hdw.multi,
+		entry_p->bits.hdw.pkt_type,
+		entry_p->bits.hdw.zero_copy,
+		entry_p->bits.hdw.noport,
+		entry_p->bits.hdw.promis,
+		entry_p->bits.hdw.error,
+		entry_p->bits.hdw.dcf_err,
+		entry_p->bits.hdw.l2_len,
+		entry_p->bits.hdw.pktbufsz,
+		bptr,
+		entry_p->bits.ldw.pkt_buf_addr));
+
+	pp = (entry_p->value & RCR_PKT_BUF_ADDR_MASK) <<
+		RCR_PKT_BUF_ADDR_SHIFT;
+
+	NXGE_DEBUG_MSG((nxgep, RX_CTL, "rcr pp 0x%llx l2 len %d",
+		pp, (*(int64_t *)entry_p >> 40) & 0x3fff));
+#endif
+}
+
+void
+nxge_rxdma_regs_dump(p_nxge_t nxgep, int rdc)
+{
+	npi_handle_t		handle;
+	rbr_stat_t 		rbr_stat;
+	addr44_t 		hd_addr;
+	addr44_t 		tail_addr;
+	uint16_t 		qlen;
+
+	NXGE_DEBUG_MSG((nxgep, RX_CTL,
+		"==> nxge_rxdma_regs_dump: rdc channel %d", rdc));
+
+	handle = NXGE_DEV_NPI_HANDLE(nxgep);
+
+	/* RBR head */
+	hd_addr.addr = 0;
+	(void) npi_rxdma_rdc_rbr_head_get(handle, rdc, &hd_addr);
+	printf("nxge_rxdma_regs_dump: got hdptr $%p \n",
+		(void *)hd_addr.addr);
+
+	/* RBR stats */
+	(void) npi_rxdma_rdc_rbr_stat_get(handle, rdc, &rbr_stat);
+	printf("nxge_rxdma_regs_dump: rbr len %d \n", rbr_stat.bits.ldw.qlen);
+
+	/* RCR tail */
+	tail_addr.addr = 0;
+	(void) npi_rxdma_rdc_rcr_tail_get(handle, rdc, &tail_addr);
+	printf("nxge_rxdma_regs_dump: got tail ptr $%p \n",
+		(void *)tail_addr.addr);
+
+	/* RCR qlen */
+	(void) npi_rxdma_rdc_rcr_qlen_get(handle, rdc, &qlen);
+	printf("nxge_rxdma_regs_dump: rcr len %x \n", qlen);
+
+	NXGE_DEBUG_MSG((nxgep, RX_CTL,
+		"<== nxge_rxdma_regs_dump: rdc rdc %d", rdc));
+}
+
+void
+nxge_rxdma_stop(p_nxge_t nxgep)
+{
+	NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_stop"));
+
+	(void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP);
+	(void) nxge_rx_mac_disable(nxgep);
+	(void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_STOP);
+	NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_stop"));
+}
+
+void
+nxge_rxdma_stop_reinit(p_nxge_t nxgep)
+{
+	NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_stop_reinit"));
+
+	(void) nxge_rxdma_stop(nxgep);
+	(void) nxge_uninit_rxdma_channels(nxgep);
+	(void) nxge_init_rxdma_channels(nxgep);
+
+#ifndef	AXIS_DEBUG_LB
+	(void) nxge_xcvr_init(nxgep);
+	(void) nxge_link_monitor(nxgep, LINK_MONITOR_START);
+#endif
+	(void) nxge_rx_mac_enable(nxgep);
+
+	NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_stop_reinit"));
+}
+
+nxge_status_t
+nxge_rxdma_hw_mode(p_nxge_t nxgep, boolean_t enable)
+{
+	int			i, ndmas;
+	uint16_t		channel;
+	p_rx_rbr_rings_t 	rx_rbr_rings;
+	p_rx_rbr_ring_t		*rbr_rings;
+	npi_handle_t		handle;
+	npi_status_t		rs = NPI_SUCCESS;
+	nxge_status_t		status = NXGE_OK;
+
+	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
+		"==> nxge_rxdma_hw_mode: mode %d", enable));
+
+	if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
+		NXGE_DEBUG_MSG((nxgep, RX_CTL,
+			"<== nxge_rxdma_mode: not initialized"));
+		return (NXGE_ERROR);
+	}
+
+	rx_rbr_rings = nxgep->rx_rbr_rings;
+	if (rx_rbr_rings == NULL) {
+		NXGE_DEBUG_MSG((nxgep, RX_CTL,
+			"<== nxge_rxdma_mode: NULL ring pointer"));
+		return (NXGE_ERROR);
+	}
+	if (rx_rbr_rings->rbr_rings == NULL) {
+		NXGE_DEBUG_MSG((nxgep, RX_CTL,
+			"<== nxge_rxdma_mode: NULL rbr rings pointer"));
+		return (NXGE_ERROR);
+	}
+
+	ndmas = rx_rbr_rings->ndmas;
+	if (!ndmas) {
+		NXGE_DEBUG_MSG((nxgep, RX_CTL,
+			"<== nxge_rxdma_mode: no channel"));
+		return (NXGE_ERROR);
+	}
+
+	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
+		"==> nxge_rxdma_mode (ndmas %d)", ndmas));
+
+	rbr_rings = rx_rbr_rings->rbr_rings;
+
+	handle = NXGE_DEV_NPI_HANDLE(nxgep);
+	for (i = 0; i < ndmas; i++) {
+		if (rbr_rings == NULL || rbr_rings[i] == NULL) {
+			continue;
+		}
+		channel = rbr_rings[i]->rdc;
+		if (enable) {
+			NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
+				"==> nxge_rxdma_hw_mode: channel %d (enable)",
+				channel));
+			rs = npi_rxdma_cfg_rdc_enable(handle, channel);
+		} else {
+			NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
+				"==> nxge_rxdma_hw_mode: channel %d (disable)",
+				channel));
+			rs = npi_rxdma_cfg_rdc_disable(handle, channel);
+		}
+	}
+
+	status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs);
+
+	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
+		"<== nxge_rxdma_hw_mode: status 0x%x", status));
+
+	return (status);
+}
+
+void
+nxge_rxdma_enable_channel(p_nxge_t nxgep, uint16_t channel)
+{
+	npi_handle_t		handle;
+
+	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
+		"==> nxge_rxdma_enable_channel: channel %d", channel));
+
+	handle = NXGE_DEV_NPI_HANDLE(nxgep);
+	(void) npi_rxdma_cfg_rdc_enable(handle, channel);
+
+	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_rxdma_enable_channel"));
+}
+
+void
+nxge_rxdma_disable_channel(p_nxge_t nxgep, uint16_t channel)
+{
+	npi_handle_t		handle;
+
+	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
+		"==> nxge_rxdma_disable_channel: channel %d", channel));
+
+	handle = NXGE_DEV_NPI_HANDLE(nxgep);
+	(void) npi_rxdma_cfg_rdc_disable(handle, channel);
+
+	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_rxdma_disable_channel"));
+}
+
+void
+nxge_hw_start_rx(p_nxge_t nxgep)
+{
+	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_hw_start_rx"));
+
+	(void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_START);
+	(void) nxge_rx_mac_enable(nxgep);
+
+	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_hw_start_rx"));
+}
+
+/*ARGSUSED*/
+void
+nxge_fixup_rxdma_rings(p_nxge_t nxgep)
+{
+	int			i, ndmas;
+	uint16_t		rdc;
+	p_rx_rbr_rings_t 	rx_rbr_rings;
+	p_rx_rbr_ring_t		*rbr_rings;
+	p_rx_rcr_rings_t 	rx_rcr_rings;
+
+	NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_fixup_rxdma_rings"));
+
+	rx_rbr_rings = nxgep->rx_rbr_rings;
+	if (rx_rbr_rings == NULL) {
+		NXGE_DEBUG_MSG((nxgep, RX_CTL,
+			"<== nxge_fixup_rxdma_rings: NULL ring pointer"));
+		return;
+	}
+	ndmas = rx_rbr_rings->ndmas;
+	if (!ndmas) {
+		NXGE_DEBUG_MSG((nxgep, RX_CTL,
+			"<== nxge_fixup_rxdma_rings: no channel"));
+		return;
+	}
+
+	rx_rcr_rings = nxgep->rx_rcr_rings;
+	if (rx_rcr_rings == NULL) {
+		NXGE_DEBUG_MSG((nxgep, RX_CTL,
+			"<== nxge_fixup_rxdma_rings: NULL ring pointer"));
+		return;
+	}
+	NXGE_DEBUG_MSG((nxgep, RX_CTL,
+		"==> nxge_fixup_rxdma_rings (ndmas %d)", ndmas));
+
+	nxge_rxdma_hw_stop(nxgep);
+
+	rbr_rings = rx_rbr_rings->rbr_rings;
+	for (i = 0; i < ndmas; i++) {
+		rdc = rbr_rings[i]->rdc;
+		NXGE_DEBUG_MSG((nxgep, RX_CTL,
+			"==> nxge_fixup_rxdma_rings: channel %d "
+			"ring $%px", rdc, rbr_rings[i]));
+		(void) nxge_rxdma_fixup_channel(nxgep, rdc, i);
+	}
+
+	NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_fixup_rxdma_rings"));
+}
+
+void
+nxge_rxdma_fix_channel(p_nxge_t nxgep, uint16_t channel)
+{
+	int		i;
+
+	NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_fix_channel"));
+	i = nxge_rxdma_get_ring_index(nxgep, channel);
+	if (i < 0) {
+		NXGE_DEBUG_MSG((nxgep, RX_CTL,
+			"<== nxge_rxdma_fix_channel: no entry found"));
+		return;
+	}
+
+	nxge_rxdma_fixup_channel(nxgep, channel, i);
+
+	NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_txdma_fix_channel"));
+}
+
+void
+nxge_rxdma_fixup_channel(p_nxge_t nxgep, uint16_t channel, int entry)
+{
+	int			ndmas;
+	p_rx_rbr_rings_t 	rx_rbr_rings;
+	p_rx_rbr_ring_t		*rbr_rings;
+	p_rx_rcr_rings_t 	rx_rcr_rings;
+	p_rx_rcr_ring_t		*rcr_rings;
+	p_rx_mbox_areas_t 	rx_mbox_areas_p;
+	p_rx_mbox_t		*rx_mbox_p;
+	p_nxge_dma_pool_t	dma_buf_poolp;
+	p_nxge_dma_pool_t	dma_cntl_poolp;
+	p_rx_rbr_ring_t 	rbrp;
+	p_rx_rcr_ring_t 	rcrp;
+	p_rx_mbox_t 		mboxp;
+	p_nxge_dma_common_t 	dmap;
+	nxge_status_t		status = NXGE_OK;
+
+	NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_fixup_channel"));
+
+	(void) nxge_rxdma_stop_channel(nxgep, channel);
+
+	dma_buf_poolp = nxgep->rx_buf_pool_p;
+	dma_cntl_poolp = nxgep->rx_cntl_pool_p;
+
+	if (!dma_buf_poolp->buf_allocated || !dma_cntl_poolp->buf_allocated) {
+		NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
+			"<== nxge_rxdma_fixup_channel: buf not allocated"));
+		return;
+	}
+
+	ndmas = dma_buf_poolp->ndmas;
+	if (!ndmas) {
+		NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
+			"<== nxge_rxdma_fixup_channel: no dma allocated"));
+		return;
+	}
+
+	rx_rbr_rings = nxgep->rx_rbr_rings;
+	rx_rcr_rings = nxgep->rx_rcr_rings;
+	rbr_rings = rx_rbr_rings->rbr_rings;
+	rcr_rings = rx_rcr_rings->rcr_rings;
+	rx_mbox_areas_p = nxgep->rx_mbox_areas_p;
+	rx_mbox_p = rx_mbox_areas_p->rxmbox_areas;
+
+	/* Reinitialize the receive block and completion rings */
+	rbrp = (p_rx_rbr_ring_t)rbr_rings[entry],
+	rcrp = (p_rx_rcr_ring_t)rcr_rings[entry],
+	mboxp = (p_rx_mbox_t)rx_mbox_p[entry];
+
+
+	rbrp->rbr_wr_index = (rbrp->rbb_max - 1);
+	rbrp->rbr_rd_index = 0;
+	rcrp->comp_rd_index = 0;
+	rcrp->comp_wt_index = 0;
+
+	dmap = (p_nxge_dma_common_t)&rcrp->rcr_desc;
+	bzero((caddr_t)dmap->kaddrp, dmap->alength);
+
+	status = nxge_rxdma_start_channel(nxgep, channel,
+			rbrp, rcrp, mboxp);
+	if (status != NXGE_OK) {
+		goto nxge_rxdma_fixup_channel_fail;
+	}
+	if (status != NXGE_OK) {
+		goto nxge_rxdma_fixup_channel_fail;
+	}
+
+nxge_rxdma_fixup_channel_fail:
+	NXGE_DEBUG_MSG((nxgep, RX_CTL,
+		"==> nxge_rxdma_fixup_channel: failed (0x%08x)", status));
+
+	NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_fixup_channel"));
+}
+
+int
+nxge_rxdma_get_ring_index(p_nxge_t nxgep, uint16_t channel)
+{
+	int			i, ndmas;
+	uint16_t		rdc;
+	p_rx_rbr_rings_t 	rx_rbr_rings;
+	p_rx_rbr_ring_t		*rbr_rings;
+
+	NXGE_DEBUG_MSG((nxgep, RX_CTL,
+		"==> nxge_rxdma_get_ring_index: channel %d", channel));
+
+	rx_rbr_rings = nxgep->rx_rbr_rings;
+	if (rx_rbr_rings == NULL) {
+		NXGE_DEBUG_MSG((nxgep, RX_CTL,
+			"<== nxge_rxdma_get_ring_index: NULL ring pointer"));
+		return (-1);
+	}
+	ndmas = rx_rbr_rings->ndmas;
+	if (!ndmas) {
+		NXGE_DEBUG_MSG((nxgep, RX_CTL,
+			"<== nxge_rxdma_get_ring_index: no channel"));
+		return (-1);
+	}
+
+	NXGE_DEBUG_MSG((nxgep, RX_CTL,
+		"==> nxge_rxdma_get_ring_index (ndmas %d)", ndmas));
+
+	rbr_rings = rx_rbr_rings->rbr_rings;
+	for (i = 0; i < ndmas; i++) {
+		rdc = rbr_rings[i]->rdc;
+		if (channel == rdc) {
+			NXGE_DEBUG_MSG((nxgep, RX_CTL,
+				"==> nxge_rxdma_get_rbr_ring: "
+				"channel %d (index %d) "
+				"ring %d", channel, i,
+				rbr_rings[i]));
+			return (i);
+		}
+	}
+
+	NXGE_DEBUG_MSG((nxgep, RX_CTL,
+		"<== nxge_rxdma_get_rbr_ring_index: not found"));
+
+	return (-1);
+}
+
+p_rx_rbr_ring_t
+nxge_rxdma_get_rbr_ring(p_nxge_t nxgep, uint16_t channel)
+{
+	int			i, ndmas;
+	uint16_t		rdc;
+	p_rx_rbr_rings_t 	rx_rbr_rings;
+	p_rx_rbr_ring_t		*rbr_rings;
+
+	NXGE_DEBUG_MSG((nxgep, RX_CTL,
+		"==> nxge_rxdma_get_rbr_ring: channel %d", channel));
+
+	rx_rbr_rings = nxgep->rx_rbr_rings;
+	if (rx_rbr_rings == NULL) {
+		NXGE_DEBUG_MSG((nxgep, RX_CTL,
+			"<== nxge_rxdma_get_rbr_ring: NULL ring pointer"));
+		return (NULL);
+	}
+	ndmas = rx_rbr_rings->ndmas;
+	if (!ndmas) {
+		NXGE_DEBUG_MSG((nxgep, RX_CTL,
+			"<== nxge_rxdma_get_rbr_ring: no channel"));
+		return (NULL);
+	}
+
+	NXGE_DEBUG_MSG((nxgep, RX_CTL,
+		"==> nxge_rxdma_get_ring (ndmas %d)", ndmas));
+
+	rbr_rings = rx_rbr_rings->rbr_rings;
+	for (i = 0; i < ndmas; i++) {
+		rdc = rbr_rings[i]->rdc;
+		if (channel == rdc) {
+			NXGE_DEBUG_MSG((nxgep, RX_CTL,
+				"==> nxge_rxdma_get_rbr_ring: channel %d "
+				"ring $%p", channel, rbr_rings[i]));
+			return (rbr_rings[i]);
+		}
+	}
+
+	NXGE_DEBUG_MSG((nxgep, RX_CTL,
+		"<== nxge_rxdma_get_rbr_ring: not found"));
+
+	return (NULL);
+}
+
+p_rx_rcr_ring_t
+nxge_rxdma_get_rcr_ring(p_nxge_t nxgep, uint16_t channel)
+{
+	int			i, ndmas;
+	uint16_t		rdc;
+	p_rx_rcr_rings_t 	rx_rcr_rings;
+	p_rx_rcr_ring_t		*rcr_rings;
+
+	NXGE_DEBUG_MSG((nxgep, RX_CTL,
+		"==> nxge_rxdma_get_rcr_ring: channel %d", channel));
+
+	rx_rcr_rings = nxgep->rx_rcr_rings;
+	if (rx_rcr_rings == NULL) {
+		NXGE_DEBUG_MSG((nxgep, RX_CTL,
+			"<== nxge_rxdma_get_rcr_ring: NULL ring pointer"));
+		return (NULL);
+	}
+	ndmas = rx_rcr_rings->ndmas;
+	if (!ndmas) {
+		NXGE_DEBUG_MSG((nxgep, RX_CTL,
+			"<== nxge_rxdma_get_rcr_ring: no channel"));
+		return (NULL);
+	}
+
+	NXGE_DEBUG_MSG((nxgep, RX_CTL,
+		"==> nxge_rxdma_get_rcr_ring (ndmas %d)", ndmas));
+
+	rcr_rings = rx_rcr_rings->rcr_rings;
+	for (i = 0; i < ndmas; i++) {
+		rdc = rcr_rings[i]->rdc;
+		if (channel == rdc) {
+			NXGE_DEBUG_MSG((nxgep, RX_CTL,
+				"==> nxge_rxdma_get_rcr_ring: channel %d "
+				"ring $%p", channel, rcr_rings[i]));
+			return (rcr_rings[i]);
+		}
+	}
+
+	NXGE_DEBUG_MSG((nxgep, RX_CTL,
+		"<== nxge_rxdma_get_rcr_ring: not found"));
+
+	return (NULL);
+}
+
+/*
+ * Static functions start here.
+ */
+
+static p_rx_msg_t
+nxge_allocb(size_t size, uint32_t pri, p_nxge_dma_common_t dmabuf_p)
+{
+	p_rx_msg_t nxge_mp 		= NULL;
+	p_nxge_dma_common_t		dmamsg_p;
+	uchar_t 			*buffer;
+
+	nxge_mp = KMEM_ZALLOC(sizeof (rx_msg_t), KM_NOSLEEP);
+	if (nxge_mp == NULL) {
+		NXGE_DEBUG_MSG((NULL, MEM_CTL,
+			"Allocation of a rx msg failed."));
+		goto nxge_allocb_exit;
+	}
+
+	nxge_mp->use_buf_pool = B_FALSE;
+	if (dmabuf_p) {
+		nxge_mp->use_buf_pool = B_TRUE;
+		dmamsg_p = (p_nxge_dma_common_t)&nxge_mp->buf_dma;
+		*dmamsg_p = *dmabuf_p;
+		dmamsg_p->nblocks = 1;
+		dmamsg_p->block_size = size;
+		dmamsg_p->alength = size;
+		buffer = (uchar_t *)dmabuf_p->kaddrp;
+
+		dmabuf_p->kaddrp = (void *)
+				((char *)dmabuf_p->kaddrp + size);
+		dmabuf_p->ioaddr_pp = (void *)
+				((char *)dmabuf_p->ioaddr_pp + size);
+		dmabuf_p->alength -= size;
+		dmabuf_p->offset += size;
+		dmabuf_p->dma_cookie.dmac_laddress += size;
+		dmabuf_p->dma_cookie.dmac_size -= size;
+
+	} else {
+		buffer = KMEM_ALLOC(size, KM_NOSLEEP);
+		if (buffer == NULL) {
+			NXGE_DEBUG_MSG((NULL, MEM_CTL,
+				"Allocation of a receive page failed."));
+			goto nxge_allocb_fail1;
+		}
+	}
+
+	nxge_mp->rx_mblk_p = desballoc(buffer, size, pri, &nxge_mp->freeb);
+	if (nxge_mp->rx_mblk_p == NULL) {
+		NXGE_DEBUG_MSG((NULL, MEM_CTL, "desballoc failed."));
+		goto nxge_allocb_fail2;
+	}
+
+	nxge_mp->buffer = buffer;
+	nxge_mp->block_size = size;
+	nxge_mp->freeb.free_func = (void (*)())nxge_freeb;
+	nxge_mp->freeb.free_arg = (caddr_t)nxge_mp;
+	nxge_mp->ref_cnt = 1;
+	nxge_mp->free = B_TRUE;
+	nxge_mp->rx_use_bcopy = B_FALSE;
+
+	atomic_inc_32(&nxge_mblks_pending);
+
+	goto nxge_allocb_exit;
+
+nxge_allocb_fail2:
+	if (!nxge_mp->use_buf_pool) {
+		KMEM_FREE(buffer, size);
+	}
+
+nxge_allocb_fail1:
+	KMEM_FREE(nxge_mp, sizeof (rx_msg_t));
+	nxge_mp = NULL;
+
+nxge_allocb_exit:
+	return (nxge_mp);
+}
+
+p_mblk_t
+nxge_dupb(p_rx_msg_t nxge_mp, uint_t offset, size_t size)
+{
+	p_mblk_t mp;
+
+	NXGE_DEBUG_MSG((NULL, MEM_CTL, "==> nxge_dupb"));
+	NXGE_DEBUG_MSG((NULL, MEM_CTL, "nxge_mp = $%p "
+		"offset = 0x%08X "
+		"size = 0x%08X",
+		nxge_mp, offset, size));
+
+	mp = desballoc(&nxge_mp->buffer[offset], size,
+				0, &nxge_mp->freeb);
+	if (mp == NULL) {
+		NXGE_DEBUG_MSG((NULL, RX_CTL, "desballoc failed"));
+		goto nxge_dupb_exit;
+	}
+	atomic_inc_32(&nxge_mp->ref_cnt);
+	atomic_inc_32(&nxge_mblks_pending);
+
+
+nxge_dupb_exit:
+	NXGE_DEBUG_MSG((NULL, MEM_CTL, "<== nxge_dupb mp = $%p",
+		nxge_mp));
+	return (mp);
+}
+
+p_mblk_t
+nxge_dupb_bcopy(p_rx_msg_t nxge_mp, uint_t offset, size_t size)
+{
+	p_mblk_t mp;
+	uchar_t *dp;
+
+	mp = allocb(size + NXGE_RXBUF_EXTRA, 0);
+	if (mp == NULL) {
+		NXGE_DEBUG_MSG((NULL, RX_CTL, "desballoc failed"));
+		goto nxge_dupb_bcopy_exit;
+	}
+	dp = mp->b_rptr = mp->b_rptr + NXGE_RXBUF_EXTRA;
+	bcopy((void *)&nxge_mp->buffer[offset], dp, size);
+	mp->b_wptr = dp + size;
+
+nxge_dupb_bcopy_exit:
+	NXGE_DEBUG_MSG((NULL, MEM_CTL, "<== nxge_dupb mp = $%p",
+		nxge_mp));
+	return (mp);
+}
+
+void nxge_post_page(p_nxge_t nxgep, p_rx_rbr_ring_t rx_rbr_p,
+	p_rx_msg_t rx_msg_p);
+
+void
+nxge_post_page(p_nxge_t nxgep, p_rx_rbr_ring_t rx_rbr_p, p_rx_msg_t rx_msg_p)
+{
+
+	npi_handle_t		handle;
+
+	NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_post_page"));
+
+	/* Reuse this buffer */
+	rx_msg_p->free = B_FALSE;
+	rx_msg_p->cur_usage_cnt = 0;
+	rx_msg_p->max_usage_cnt = 0;
+	rx_msg_p->pkt_buf_size = 0;
+
+	if (rx_rbr_p->rbr_use_bcopy) {
+		rx_msg_p->rx_use_bcopy = B_FALSE;
+		atomic_dec_32(&rx_rbr_p->rbr_consumed);
+	}
+
+	/*
+	 * Get the rbr header pointer and its offset index.
+	 */
+	MUTEX_ENTER(&rx_rbr_p->post_lock);
+
+
+	rx_rbr_p->rbr_wr_index =  ((rx_rbr_p->rbr_wr_index + 1) &
+					    rx_rbr_p->rbr_wrap_mask);
+	rx_rbr_p->rbr_desc_vp[rx_rbr_p->rbr_wr_index] = rx_msg_p->shifted_addr;
+	MUTEX_EXIT(&rx_rbr_p->post_lock);
+	handle = NXGE_DEV_NPI_HANDLE(nxgep);
+	npi_rxdma_rdc_rbr_kick(handle, rx_rbr_p->rdc, 1);
+
+	NXGE_DEBUG_MSG((nxgep, RX_CTL,
+		"<== nxge_post_page (channel %d post_next_index %d)",
+		rx_rbr_p->rdc, rx_rbr_p->rbr_wr_index));
+
+	NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_post_page"));
+}
+
+void
+nxge_freeb(p_rx_msg_t rx_msg_p)
+{
+	size_t size;
+	uchar_t *buffer = NULL;
+	int ref_cnt;
+
+	NXGE_DEBUG_MSG((NULL, MEM2_CTL, "==> nxge_freeb"));
+	NXGE_DEBUG_MSG((NULL, MEM2_CTL,
+		"nxge_freeb:rx_msg_p = $%p (block pending %d)",
+		rx_msg_p, nxge_mblks_pending));
+
+
+	ref_cnt = atomic_add_32_nv(&rx_msg_p->ref_cnt, -1);
+	atomic_dec_32(&nxge_mblks_pending);
+	if (!ref_cnt) {
+		buffer = rx_msg_p->buffer;
+		size = rx_msg_p->block_size;
+		NXGE_DEBUG_MSG((NULL, MEM2_CTL, "nxge_freeb: "
+			"will free: rx_msg_p = $%p (block pending %d)",
+			(long long)rx_msg_p, nxge_mblks_pending));
+
+		if (!rx_msg_p->use_buf_pool) {
+			KMEM_FREE(buffer, size);
+		}
+
+		KMEM_FREE(rx_msg_p, sizeof (rx_msg_t));
+		return;
+	}
+
+	/*
+	 * Repost buffer.
+	 */
+	if ((ref_cnt == 1) && (rx_msg_p->free == B_TRUE)) {
+		NXGE_DEBUG_MSG((NULL, RX_CTL,
+		    "nxge_freeb: post page $%p:", rx_msg_p));
+		nxge_post_page(rx_msg_p->nxgep, rx_msg_p->rx_rbr_p,
+		    rx_msg_p);
+	}
+
+	NXGE_DEBUG_MSG((NULL, MEM2_CTL, "<== nxge_freeb"));
+}
+
+uint_t
+nxge_rx_intr(void *arg1, void *arg2)
+{
+	p_nxge_ldv_t		ldvp = (p_nxge_ldv_t)arg1;
+	p_nxge_t		nxgep = (p_nxge_t)arg2;
+	p_nxge_ldg_t		ldgp;
+	uint8_t			channel;
+	npi_handle_t		handle;
+	rx_dma_ctl_stat_t	cs;
+
+#ifdef	NXGE_DEBUG
+	rxdma_cfig1_t		cfg;
+#endif
+	uint_t 			serviced = DDI_INTR_UNCLAIMED;
+
+	if (ldvp == NULL) {
+		NXGE_DEBUG_MSG((NULL, INT_CTL,
+			"<== nxge_rx_intr: arg2 $%p arg1 $%p",
+			nxgep, ldvp));
+
+		return (DDI_INTR_CLAIMED);
+	}
+
+	if (arg2 == NULL || (void *)ldvp->nxgep != arg2) {
+		nxgep = ldvp->nxgep;
+	}
+	NXGE_DEBUG_MSG((nxgep, RX_CTL,
+		"==> nxge_rx_intr: arg2 $%p arg1 $%p",
+		nxgep, ldvp));
+
+	/*
+	 * This interrupt handler is for a specific
+	 * receive dma channel.
+	 */
+	handle = NXGE_DEV_NPI_HANDLE(nxgep);
+	/*
+	 * Get the control and status for this channel.
+	 */
+	channel = ldvp->channel;
+	ldgp = ldvp->ldgp;
+	RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel, &cs.value);
+
+	NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_intr:channel %d "
+		"cs 0x%016llx rcrto 0x%x rcrthres %x",
+		channel,
+		cs.value,
+		cs.bits.hdw.rcrto,
+		cs.bits.hdw.rcrthres));
+
+	nxge_rx_pkts_vring(nxgep, ldvp->vdma_index, ldvp, cs);
+	serviced = DDI_INTR_CLAIMED;
+
+	/* error events. */
+	if (cs.value & RX_DMA_CTL_STAT_ERROR) {
+		(void) nxge_rx_err_evnts(nxgep, ldvp->vdma_index, ldvp, cs);
+	}
+
+nxge_intr_exit:
+
+
+	/*
+	 * Enable the mailbox update interrupt if we want
+	 * to use mailbox. We probably don't need to use
+	 * mailbox as it only saves us one pio read.
+	 * Also write 1 to rcrthres and rcrto to clear
+	 * these two edge triggered bits.
+	 */
+
+	cs.value &= RX_DMA_CTL_STAT_WR1C;
+	cs.bits.hdw.mex = 1;
+	RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel,
+			cs.value);
+
+	/*
+	 * Rearm this logical group if this is a single device
+	 * group.
+	 */
+	if (ldgp->nldvs == 1) {
+		ldgimgm_t		mgm;
+		mgm.value = 0;
+		mgm.bits.ldw.arm = 1;
+		mgm.bits.ldw.timer = ldgp->ldg_timer;
+		NXGE_REG_WR64(handle,
+			    LDGIMGN_REG + LDSV_OFFSET(ldgp->ldg),
+			    mgm.value);
+	}
+
+	NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rx_intr: serviced %d",
+		serviced));
+	return (serviced);
+}
+
+/*
+ * Process the packets received in the specified logical device
+ * and pass up a chain of message blocks to the upper layer.
+ */
+static void
+nxge_rx_pkts_vring(p_nxge_t nxgep, uint_t vindex, p_nxge_ldv_t ldvp,
+				    rx_dma_ctl_stat_t cs)
+{
+	p_mblk_t		mp;
+	p_rx_rcr_ring_t		rcrp;
+
+	NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts_vring"));
+	if ((mp = nxge_rx_pkts(nxgep, vindex, ldvp, &rcrp, cs)) == NULL) {
+		NXGE_DEBUG_MSG((nxgep, RX_CTL,
+			"<== nxge_rx_pkts_vring: no mp"));
+		return;
+	}
+
+	NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts_vring: $%p",
+		mp));
+
+#ifdef  NXGE_DEBUG
+		NXGE_DEBUG_MSG((nxgep, RX_CTL,
+			"==> nxge_rx_pkts_vring:calling mac_rx "
+			"LEN %d mp $%p mp->b_cont $%p mp->b_next $%p rcrp $%p "
+			"mac_handle $%p",
+			mp->b_wptr - mp->b_rptr,
+			mp, mp->b_cont, mp->b_next,
+			rcrp, rcrp->rcr_mac_handle));
+
+		NXGE_DEBUG_MSG((nxgep, RX_CTL,
+			"==> nxge_rx_pkts_vring: dump packets "
+			"(mp $%p b_rptr $%p b_wptr $%p):\n %s",
+			mp,
+			mp->b_rptr,
+			mp->b_wptr,
+			nxge_dump_packet((char *)mp->b_rptr,
+			mp->b_wptr - mp->b_rptr)));
+		if (mp->b_cont) {
+			NXGE_DEBUG_MSG((nxgep, RX_CTL,
+				"==> nxge_rx_pkts_vring: dump b_cont packets "
+				"(mp->b_cont $%p b_rptr $%p b_wptr $%p):\n %s",
+				mp->b_cont,
+				mp->b_cont->b_rptr,
+				mp->b_cont->b_wptr,
+				nxge_dump_packet((char *)mp->b_cont->b_rptr,
+				mp->b_cont->b_wptr - mp->b_cont->b_rptr)));
+		}
+		if (mp->b_next) {
+			NXGE_DEBUG_MSG((nxgep, RX_CTL,
+				"==> nxge_rx_pkts_vring: dump next packets "
+				"(b_rptr $%p): %s",
+				mp->b_next->b_rptr,
+				nxge_dump_packet((char *)mp->b_next->b_rptr,
+				mp->b_next->b_wptr - mp->b_next->b_rptr)));
+		}
+#endif
+
+	mac_rx(nxgep->mach, rcrp->rcr_mac_handle, mp);
+}
+
+
+/*
+ * This routine is the main packet receive processing function.
+ * It gets the packet type, error code, and buffer related
+ * information from the receive completion entry.
+ * How many completion entries to process is based on the number of packets
+ * queued by the hardware, a hardware maintained tail pointer
+ * and a configurable receive packet count.
+ *
+ * A chain of message blocks will be created as result of processing
+ * the completion entries. This chain of message blocks will be returned and
+ * a hardware control status register will be updated with the number of
+ * packets were removed from the hardware queue.
+ *
+ */
+mblk_t *
+nxge_rx_pkts(p_nxge_t nxgep, uint_t vindex, p_nxge_ldv_t ldvp,
+    p_rx_rcr_ring_t *rcrp, rx_dma_ctl_stat_t cs)
+{
+	npi_handle_t		handle;
+	uint8_t			channel;
+	p_rx_rcr_rings_t	rx_rcr_rings;
+	p_rx_rcr_ring_t		rcr_p;
+	uint32_t		comp_rd_index;
+	p_rcr_entry_t		rcr_desc_rd_head_p;
+	p_rcr_entry_t		rcr_desc_rd_head_pp;
+	p_mblk_t		nmp, mp_cont, head_mp, *tail_mp;
+	uint16_t		qlen, nrcr_read, npkt_read;
+	uint32_t qlen_hw;
+	boolean_t		multi;
+	rcrcfig_b_t rcr_cfg_b;
+#if defined(_BIG_ENDIAN)
+	npi_status_t		rs = NPI_SUCCESS;
+#endif
+
+	NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts:vindex %d "
+		"channel %d", vindex, ldvp->channel));
+
+	if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
+		return (NULL);
+	}
+	handle = NXGE_DEV_NPI_HANDLE(nxgep);
+	rx_rcr_rings = nxgep->rx_rcr_rings;
+	rcr_p = rx_rcr_rings->rcr_rings[vindex];
+	channel = rcr_p->rdc;
+	if (channel != ldvp->channel) {
+		NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts:index %d "
+			"channel %d, and rcr channel %d not matched.",
+			vindex, ldvp->channel, channel));
+		return (NULL);
+	}
+
+	NXGE_DEBUG_MSG((nxgep, RX_CTL,
+		"==> nxge_rx_pkts: START: rcr channel %d "
+		"head_p $%p head_pp $%p  index %d ",
+		channel, rcr_p->rcr_desc_rd_head_p,
+		rcr_p->rcr_desc_rd_head_pp,
+		rcr_p->comp_rd_index));
+
+
+#if !defined(_BIG_ENDIAN)
+	qlen = RXDMA_REG_READ32(handle, RCRSTAT_A_REG, channel) & 0xffff;
+#else
+	rs = npi_rxdma_rdc_rcr_qlen_get(handle, channel, &qlen);
+	if (rs != NPI_SUCCESS) {
+		NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts:index %d "
+		"channel %d, get qlen failed 0x%08x",
+		vindex, ldvp->channel, rs));
+		return (NULL);
+	}
+#endif
+	NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts:rcr channel %d "
+		"qlen %d", channel, qlen));
+
+
+
+	if (!qlen) {
+		NXGE_DEBUG_MSG((nxgep, RX_CTL,
+			"==> nxge_rx_pkts:rcr channel %d "
+			"qlen %d (no pkts)", channel, qlen));
+
+		return (NULL);
+	}
+
+	comp_rd_index = rcr_p->comp_rd_index;
+
+	rcr_desc_rd_head_p = rcr_p->rcr_desc_rd_head_p;
+	rcr_desc_rd_head_pp = rcr_p->rcr_desc_rd_head_pp;
+	nrcr_read = npkt_read = 0;
+
+	/*
+	 * Number of packets queued
+	 * (The jumbo or multi packet will be counted as only one
+	 *  packets and it may take up more than one completion entry).
+	 */
+	qlen_hw = (qlen < nxge_max_rx_pkts) ?
+		qlen : nxge_max_rx_pkts;
+	head_mp = NULL;
+	tail_mp = &head_mp;
+	nmp = mp_cont = NULL;
+	multi = B_FALSE;
+
+	while (qlen_hw) {
+
+#ifdef NXGE_DEBUG
+		nxge_dump_rcr_entry(nxgep, rcr_desc_rd_head_p);
+#endif
+		/*
+		 * Process one completion ring entry.
+		 */
+		nxge_receive_packet(nxgep,
+			rcr_p, rcr_desc_rd_head_p, &multi, &nmp, &mp_cont);
+
+		/*
+		 * message chaining modes
+		 */
+		if (nmp) {
+			nmp->b_next = NULL;
+			if (!multi && !mp_cont) { /* frame fits a partition */
+				*tail_mp = nmp;
+				tail_mp = &nmp->b_next;
+				nmp = NULL;
+			} else if (multi && !mp_cont) { /* first segment */
+				*tail_mp = nmp;
+				tail_mp = &nmp->b_cont;
+			} else if (multi && mp_cont) {	/* mid of multi segs */
+				*tail_mp = mp_cont;
+				tail_mp = &mp_cont->b_cont;
+			} else if (!multi && mp_cont) { /* last segment */
+				*tail_mp = mp_cont;
+				tail_mp = &nmp->b_next;
+				nmp = NULL;
+			}
+		}
+		NXGE_DEBUG_MSG((nxgep, RX_CTL,
+			"==> nxge_rx_pkts: loop: rcr channel %d "
+			"before updating: multi %d "
+			"nrcr_read %d "
+			"npk read %d "
+			"head_pp $%p  index %d ",
+			channel,
+			multi,
+			nrcr_read, npkt_read, rcr_desc_rd_head_pp,
+			comp_rd_index));
+
+		if (!multi) {
+			qlen_hw--;
+			npkt_read++;
+		}
+
+		/*
+		 * Update the next read entry.
+		 */
+		comp_rd_index = NEXT_ENTRY(comp_rd_index,
+					rcr_p->comp_wrap_mask);
+
+		rcr_desc_rd_head_p = NEXT_ENTRY_PTR(rcr_desc_rd_head_p,
+				rcr_p->rcr_desc_first_p,
+				rcr_p->rcr_desc_last_p);
+
+		nrcr_read++;
+
+		NXGE_DEBUG_MSG((nxgep, RX_CTL,
+			"<== nxge_rx_pkts: (SAM, process one packet) "
+			"nrcr_read %d",
+			nrcr_read));
+		NXGE_DEBUG_MSG((nxgep, RX_CTL,
+			"==> nxge_rx_pkts: loop: rcr channel %d "
+			"multi %d "
+			"nrcr_read %d "
+			"npk read %d "
+			"head_pp $%p  index %d ",
+			channel,
+			multi,
+			nrcr_read, npkt_read, rcr_desc_rd_head_pp,
+			comp_rd_index));
+
+	}
+
+	rcr_p->rcr_desc_rd_head_pp = rcr_desc_rd_head_pp;
+	rcr_p->comp_rd_index = comp_rd_index;
+	rcr_p->rcr_desc_rd_head_p = rcr_desc_rd_head_p;
+
+	if ((nxgep->intr_timeout != rcr_p->intr_timeout) ||
+		(nxgep->intr_threshold != rcr_p->intr_threshold)) {
+		rcr_p->intr_timeout = nxgep->intr_timeout;
+		rcr_p->intr_threshold = nxgep->intr_threshold;
+		rcr_cfg_b.value = 0x0ULL;
+		if (rcr_p->intr_timeout)
+			rcr_cfg_b.bits.ldw.entout = 1;
+		rcr_cfg_b.bits.ldw.timeout = rcr_p->intr_timeout;
+		rcr_cfg_b.bits.ldw.pthres = rcr_p->intr_threshold;
+		RXDMA_REG_WRITE64(handle, RCRCFIG_B_REG,
+				    channel, rcr_cfg_b.value);
+	}
+
+	cs.bits.ldw.pktread = npkt_read;
+	cs.bits.ldw.ptrread = nrcr_read;
+	RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG,
+			    channel, cs.value);
+	NXGE_DEBUG_MSG((nxgep, RX_CTL,
+		"==> nxge_rx_pkts: EXIT: rcr channel %d "
+		"head_pp $%p  index %016llx ",
+		channel,
+		rcr_p->rcr_desc_rd_head_pp,
+		rcr_p->comp_rd_index));
+	/*
+	 * Update RCR buffer pointer read and number of packets
+	 * read.
+	 */
+
+	*rcrp = rcr_p;
+	NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rx_pkts"));
+	return (head_mp);
+}
+
+void
+nxge_receive_packet(p_nxge_t nxgep,
+    p_rx_rcr_ring_t rcr_p, p_rcr_entry_t rcr_desc_rd_head_p,
+    boolean_t *multi_p, mblk_t **mp, mblk_t **mp_cont)
+{
+	p_mblk_t		nmp = NULL;
+	uint64_t		multi;
+	uint64_t		dcf_err;
+	uint8_t			channel;
+
+	boolean_t		first_entry = B_TRUE;
+	boolean_t		is_tcp_udp = B_FALSE;
+	boolean_t		buffer_free = B_FALSE;
+	boolean_t		error_send_up = B_FALSE;
+	uint8_t			error_type;
+	uint16_t		l2_len;
+	uint16_t		skip_len;
+	uint8_t			pktbufsz_type;
+	uint16_t		pktbufsz;
+	uint64_t		rcr_entry;
+	uint64_t		*pkt_buf_addr_pp;
+	uint64_t		*pkt_buf_addr_p;
+	uint32_t		buf_offset;
+	uint32_t		bsize;
+	uint32_t		error_disp_cnt;
+	uint32_t		msg_index;
+	p_rx_rbr_ring_t		rx_rbr_p;
+	p_rx_msg_t 		*rx_msg_ring_p;
+	p_rx_msg_t		rx_msg_p;
+	uint16_t		sw_offset_bytes = 0, hdr_size = 0;
+	nxge_status_t		status = NXGE_OK;
+	boolean_t		is_valid = B_FALSE;
+	p_nxge_rx_ring_stats_t	rdc_stats;
+	uint32_t		bytes_read;
+	uint64_t		pkt_type;
+	uint64_t		frag;
+#ifdef	NXGE_DEBUG
+	int			dump_len;
+#endif
+	NXGE_DEBUG_MSG((nxgep, RX2_CTL, "==> nxge_receive_packet"));
+	first_entry = (*mp == NULL) ? B_TRUE : B_FALSE;
+
+	rcr_entry = *((uint64_t *)rcr_desc_rd_head_p);
+
+	multi = (rcr_entry & RCR_MULTI_MASK);
+	dcf_err = (rcr_entry & RCR_DCF_ERROR_MASK);
+	pkt_type = (rcr_entry & RCR_PKT_TYPE_MASK);
+
+	error_type = ((rcr_entry & RCR_ERROR_MASK) >> RCR_ERROR_SHIFT);
+	frag = (rcr_entry & RCR_FRAG_MASK);
+
+	l2_len = ((rcr_entry & RCR_L2_LEN_MASK) >> RCR_L2_LEN_SHIFT);
+
+	pktbufsz_type = ((rcr_entry & RCR_PKTBUFSZ_MASK) >>
+				RCR_PKTBUFSZ_SHIFT);
+
+	pkt_buf_addr_pp = (uint64_t *)((rcr_entry & RCR_PKT_BUF_ADDR_MASK) <<
+			RCR_PKT_BUF_ADDR_SHIFT);
+
+	channel = rcr_p->rdc;
+
+	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
+		"==> nxge_receive_packet: entryp $%p entry 0x%0llx "
+		"pkt_buf_addr_pp $%p l2_len %d multi 0x%llx "
+		"error_type 0x%x pkt_type 0x%x  "
+		"pktbufsz_type %d ",
+		rcr_desc_rd_head_p,
+		rcr_entry, pkt_buf_addr_pp, l2_len,
+		multi,
+		error_type,
+		pkt_type,
+		pktbufsz_type));
+
+	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
+		"==> nxge_receive_packet: entryp $%p entry 0x%0llx "
+		"pkt_buf_addr_pp $%p l2_len %d multi 0x%llx "
+		"error_type 0x%x pkt_type 0x%x ", rcr_desc_rd_head_p,
+		rcr_entry, pkt_buf_addr_pp, l2_len,
+		multi,
+		error_type,
+		pkt_type));
+
+	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
+		"==> (rbr) nxge_receive_packet: entry 0x%0llx "
+		"full pkt_buf_addr_pp $%p l2_len %d",
+		rcr_entry, pkt_buf_addr_pp, l2_len));
+
+	/* get the stats ptr */
+	rdc_stats = rcr_p->rdc_stats;
+
+	if (!l2_len) {
+
+		NXGE_DEBUG_MSG((nxgep, RX_CTL,
+			"<== nxge_receive_packet: failed: l2 length is 0."));
+		return;
+	}
+
+	/* shift 6 bits to get the full io address */
+	pkt_buf_addr_pp = (uint64_t *)((uint64_t)pkt_buf_addr_pp <<
+				RCR_PKT_BUF_ADDR_SHIFT_FULL);
+	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
+		"==> (rbr) nxge_receive_packet: entry 0x%0llx "
+		"full pkt_buf_addr_pp $%p l2_len %d",
+		rcr_entry, pkt_buf_addr_pp, l2_len));
+
+	rx_rbr_p = rcr_p->rx_rbr_p;
+	rx_msg_ring_p = rx_rbr_p->rx_msg_ring;
+
+	if (first_entry) {
+		hdr_size = (rcr_p->full_hdr_flag ? RXDMA_HDR_SIZE_FULL :
+			RXDMA_HDR_SIZE_DEFAULT);
+
+		NXGE_DEBUG_MSG((nxgep, RX_CTL,
+			"==> nxge_receive_packet: first entry 0x%016llx "
+			"pkt_buf_addr_pp $%p l2_len %d hdr %d",
+			rcr_entry, pkt_buf_addr_pp, l2_len,
+			hdr_size));
+	}
+
+	MUTEX_ENTER(&rcr_p->lock);
+	MUTEX_ENTER(&rx_rbr_p->lock);
+
+	bytes_read = rcr_p->rcvd_pkt_bytes;
+
+	NXGE_DEBUG_MSG((nxgep, RX_CTL,
+		"==> (rbr 1) nxge_receive_packet: entry 0x%0llx "
+		"full pkt_buf_addr_pp $%p l2_len %d",
+		rcr_entry, pkt_buf_addr_pp, l2_len));
+
+	/*
+	 * Packet buffer address in the completion entry points
+	 * to the starting buffer address (offset 0).
+	 * Use the starting buffer address to locate the corresponding
+	 * kernel address.
+	 */
+	status = nxge_rxbuf_pp_to_vp(nxgep, rx_rbr_p,
+			pktbufsz_type, pkt_buf_addr_pp, &pkt_buf_addr_p,
+			&buf_offset,
+			&msg_index);
+
+	NXGE_DEBUG_MSG((nxgep, RX_CTL,
+		"==> (rbr 2) nxge_receive_packet: entry 0x%0llx "
+		"full pkt_buf_addr_pp $%p l2_len %d",
+		rcr_entry, pkt_buf_addr_pp, l2_len));
+
+	if (status != NXGE_OK) {
+		MUTEX_EXIT(&rx_rbr_p->lock);
+		MUTEX_EXIT(&rcr_p->lock);
+		NXGE_DEBUG_MSG((nxgep, RX_CTL,
+			"<== nxge_receive_packet: found vaddr failed %d",
+				status));
+		return;
+	}
+
+	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
+		"==> (rbr 3) nxge_receive_packet: entry 0x%0llx "
+		"full pkt_buf_addr_pp $%p l2_len %d",
+		rcr_entry, pkt_buf_addr_pp, l2_len));
+
+	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
+		"==> (rbr 4 msgindex %d) nxge_receive_packet: entry 0x%0llx "
+		"full pkt_buf_addr_pp $%p l2_len %d",
+		msg_index, rcr_entry, pkt_buf_addr_pp, l2_len));
+
+	rx_msg_p = rx_msg_ring_p[msg_index];
+
+	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
+		"==> (rbr 4 msgindex %d) nxge_receive_packet: entry 0x%0llx "
+		"full pkt_buf_addr_pp $%p l2_len %d",
+		msg_index, rcr_entry, pkt_buf_addr_pp, l2_len));
+
+	switch (pktbufsz_type) {
+	case RCR_PKTBUFSZ_0:
+		bsize = rx_rbr_p->pkt_buf_size0_bytes;
+		NXGE_DEBUG_MSG((nxgep, RX2_CTL,
+			"==> nxge_receive_packet: 0 buf %d", bsize));
+		break;
+	case RCR_PKTBUFSZ_1:
+		bsize = rx_rbr_p->pkt_buf_size1_bytes;
+		NXGE_DEBUG_MSG((nxgep, RX2_CTL,
+			"==> nxge_receive_packet: 1 buf %d", bsize));
+		break;
+	case RCR_PKTBUFSZ_2:
+		bsize = rx_rbr_p->pkt_buf_size2_bytes;
+		NXGE_DEBUG_MSG((nxgep, RX_CTL,
+			"==> nxge_receive_packet: 2 buf %d", bsize));
+		break;
+	case RCR_SINGLE_BLOCK:
+		bsize = rx_msg_p->block_size;
+		NXGE_DEBUG_MSG((nxgep, RX2_CTL,
+			"==> nxge_receive_packet: single %d", bsize));
+
+		break;
+	default:
+		MUTEX_EXIT(&rx_rbr_p->lock);
+		MUTEX_EXIT(&rcr_p->lock);
+		return;
+	}
+
+	DMA_COMMON_SYNC_OFFSET(rx_msg_p->buf_dma,
+		(buf_offset + sw_offset_bytes),
+		(hdr_size + l2_len),
+		DDI_DMA_SYNC_FORCPU);
+
+	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
+		"==> nxge_receive_packet: after first dump:usage count"));
+
+	if (rx_msg_p->cur_usage_cnt == 0) {
+		if (rx_rbr_p->rbr_use_bcopy) {
+			atomic_inc_32(&rx_rbr_p->rbr_consumed);
+			if (rx_rbr_p->rbr_consumed <
+					rx_rbr_p->rbr_threshold_hi) {
+				if (rx_rbr_p->rbr_threshold_lo == 0 ||
+					((rx_rbr_p->rbr_consumed >=
+						rx_rbr_p->rbr_threshold_lo) &&
+						(rx_rbr_p->rbr_bufsize_type >=
+							pktbufsz_type))) {
+					rx_msg_p->rx_use_bcopy = B_TRUE;
+				}
+			} else {
+				rx_msg_p->rx_use_bcopy = B_TRUE;
+			}
+		}
+		NXGE_DEBUG_MSG((nxgep, RX2_CTL,
+			"==> nxge_receive_packet: buf %d (new block) ",
+			bsize));
+
+		rx_msg_p->pkt_buf_size_code = pktbufsz_type;
+		rx_msg_p->pkt_buf_size = bsize;
+		rx_msg_p->cur_usage_cnt = 1;
+		if (pktbufsz_type == RCR_SINGLE_BLOCK) {
+			NXGE_DEBUG_MSG((nxgep, RX2_CTL,
+				"==> nxge_receive_packet: buf %d "
+				"(single block) ",
+				bsize));
+			/*
+			 * Buffer can be reused once the free function
+			 * is called.
+			 */
+			rx_msg_p->max_usage_cnt = 1;
+			buffer_free = B_TRUE;
+		} else {
+			rx_msg_p->max_usage_cnt = rx_msg_p->block_size/bsize;
+			if (rx_msg_p->max_usage_cnt == 1) {
+				buffer_free = B_TRUE;
+			}
+		}
+	} else {
+		rx_msg_p->cur_usage_cnt++;
+		if (rx_msg_p->cur_usage_cnt == rx_msg_p->max_usage_cnt) {
+			buffer_free = B_TRUE;
+		}
+	}
+
+	NXGE_DEBUG_MSG((nxgep, RX_CTL,
+	    "msgbuf index = %d l2len %d bytes usage %d max_usage %d ",
+		msg_index, l2_len,
+		rx_msg_p->cur_usage_cnt, rx_msg_p->max_usage_cnt));
+
+	if ((error_type) || (dcf_err)) {
+		rdc_stats->ierrors++;
+		if (dcf_err) {
+			rdc_stats->dcf_err++;
+#ifdef	NXGE_DEBUG
+			if (!rdc_stats->dcf_err) {
+				NXGE_DEBUG_MSG((nxgep, RX_CTL,
+				"nxge_receive_packet: channel %d dcf_err rcr"
+				" 0x%llx", channel, rcr_entry));
+			}
+#endif
+			NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, NULL,
+					NXGE_FM_EREPORT_RDMC_DCF_ERR);
+		} else {
+				/* Update error stats */
+			error_disp_cnt = NXGE_ERROR_SHOW_MAX;
+			rdc_stats->errlog.compl_err_type = error_type;
+			NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, NULL,
+				    NXGE_FM_EREPORT_RDMC_COMPLETION_ERR);
+
+			switch (error_type) {
+				case RCR_L2_ERROR:
+					rdc_stats->l2_err++;
+					if (rdc_stats->l2_err <
+						error_disp_cnt)
+						NXGE_ERROR_MSG((nxgep,
+						NXGE_ERR_CTL,
+						" nxge_receive_packet:"
+						" channel %d RCR L2_ERROR",
+						channel));
+					break;
+				case RCR_L4_CSUM_ERROR:
+					error_send_up = B_TRUE;
+					rdc_stats->l4_cksum_err++;
+					if (rdc_stats->l4_cksum_err <
+						error_disp_cnt)
+						NXGE_ERROR_MSG((nxgep,
+						NXGE_ERR_CTL,
+							" nxge_receive_packet:"
+							" channel %d"
+							" RCR L4_CSUM_ERROR",
+							channel));
+					break;
+				case RCR_FFLP_SOFT_ERROR:
+					error_send_up = B_TRUE;
+					rdc_stats->fflp_soft_err++;
+					if (rdc_stats->fflp_soft_err <
+						error_disp_cnt)
+						NXGE_ERROR_MSG((nxgep,
+							NXGE_ERR_CTL,
+							" nxge_receive_packet:"
+							" channel %d"
+							" RCR FFLP_SOFT_ERROR",
+							channel));
+					break;
+				case RCR_ZCP_SOFT_ERROR:
+					error_send_up = B_TRUE;
+					rdc_stats->fflp_soft_err++;
+					if (rdc_stats->zcp_soft_err <
+						error_disp_cnt)
+						NXGE_ERROR_MSG((nxgep,
+							NXGE_ERR_CTL,
+							" nxge_receive_packet:"
+							" Channel %d"
+							" RCR ZCP_SOFT_ERROR",
+							channel));
+					break;
+				default:
+					NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+							" nxge_receive_packet:"
+							" Channel %d"
+							" RCR entry 0x%llx"
+							" error 0x%x",
+							rcr_entry, channel,
+							error_type));
+					break;
+			}
+		}
+
+		/*
+		 * Update and repost buffer block if max usage
+		 * count is reached.
+		 */
+		if (error_send_up == B_FALSE) {
+			if (buffer_free == B_TRUE) {
+				rx_msg_p->free = B_TRUE;
+			}
+
+			atomic_inc_32(&rx_msg_p->ref_cnt);
+			MUTEX_EXIT(&rx_rbr_p->lock);
+			MUTEX_EXIT(&rcr_p->lock);
+			nxge_freeb(rx_msg_p);
+			return;
+		}
+	}
+
+	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
+		"==> nxge_receive_packet: DMA sync second "));
+
+	skip_len = sw_offset_bytes + hdr_size;
+	if (!rx_msg_p->rx_use_bcopy) {
+		nmp = nxge_dupb(rx_msg_p, buf_offset, bsize);
+	} else {
+		nmp = nxge_dupb_bcopy(rx_msg_p, buf_offset + skip_len, l2_len);
+		NXGE_DEBUG_MSG((nxgep, RX_CTL,
+			"==> nxge_receive_packet: use bcopy "
+			"rbr consumed %d "
+			"pktbufsz_type %d "
+			"offset %d "
+			"hdr_size %d l2_len %d "
+			"nmp->b_rptr $%p",
+			rx_rbr_p->rbr_consumed,
+			pktbufsz_type,
+			buf_offset, hdr_size, l2_len,
+			nmp->b_rptr));
+	}
+	if (nmp != NULL) {
+		pktbufsz = nxge_get_pktbuf_size(nxgep, pktbufsz_type,
+			rx_rbr_p->rbr_cfgb);
+		if (!rx_msg_p->rx_use_bcopy) {
+			if (first_entry) {
+				bytes_read = 0;
+				nmp->b_rptr = &nmp->b_rptr[skip_len];
+				if (l2_len > pktbufsz - skip_len)
+					nmp->b_wptr = &nmp->b_rptr[pktbufsz
+						- skip_len];
+				else
+					nmp->b_wptr = &nmp->b_rptr[l2_len];
+			} else {
+				if (l2_len - bytes_read > pktbufsz)
+					nmp->b_wptr = &nmp->b_rptr[pktbufsz];
+				else
+					nmp->b_wptr =
+					    &nmp->b_rptr[l2_len - bytes_read];
+			}
+			bytes_read += nmp->b_wptr - nmp->b_rptr;
+			NXGE_DEBUG_MSG((nxgep, RX_CTL,
+				"==> nxge_receive_packet after dupb: "
+				"rbr consumed %d "
+				"pktbufsz_type %d "
+				"nmp $%p rptr $%p wptr $%p "
+				"buf_offset %d bzise %d l2_len %d skip_len %d",
+				rx_rbr_p->rbr_consumed,
+				pktbufsz_type,
+				nmp, nmp->b_rptr, nmp->b_wptr,
+				buf_offset, bsize, l2_len, skip_len));
+		}
+	} else {
+		cmn_err(CE_WARN, "!nxge_receive_packet: "
+			"update stats (error)");
+	}
+	if (buffer_free == B_TRUE) {
+		rx_msg_p->free = B_TRUE;
+	}
+
+	/*
+	 * ERROR, FRAG and PKT_TYPE are only reported
+	 * in the first entry.
+	 * If a packet is not fragmented and no error bit is set, then
+	 * L4 checksum is OK.
+	 */
+	is_valid = (nmp != NULL);
+	rdc_stats->ibytes += l2_len;
+	rdc_stats->ipackets++;
+	MUTEX_EXIT(&rx_rbr_p->lock);
+	MUTEX_EXIT(&rcr_p->lock);
+
+	if (rx_msg_p->free && rx_msg_p->rx_use_bcopy) {
+		atomic_inc_32(&rx_msg_p->ref_cnt);
+		nxge_freeb(rx_msg_p);
+	}
+
+	if (is_valid) {
+		nmp->b_cont = NULL;
+		if (first_entry) {
+			*mp = nmp;
+			*mp_cont = NULL;
+		} else
+			*mp_cont = nmp;
+	}
+
+	/*
+	 * Update stats and hardware checksuming.
+	 */
+	if (is_valid && !multi) {
+
+		is_tcp_udp = ((pkt_type == RCR_PKT_IS_TCP ||
+				pkt_type == RCR_PKT_IS_UDP) ?
+					B_TRUE: B_FALSE);
+
+		NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_receive_packet: "
+			"is_valid 0x%x multi 0x%llx pkt %d frag %d error %d",
+			is_valid, multi, is_tcp_udp, frag, error_type));
+
+		if (is_tcp_udp && !frag && !error_type) {
+			(void) hcksum_assoc(nmp, NULL, NULL, 0, 0, 0, 0,
+				HCK_FULLCKSUM_OK | HCK_FULLCKSUM, 0);
+			NXGE_DEBUG_MSG((nxgep, RX_CTL,
+				"==> nxge_receive_packet: Full tcp/udp cksum "
+				"is_valid 0x%x multi 0x%llx pkt %d frag %d "
+				"error %d",
+				is_valid, multi, is_tcp_udp, frag, error_type));
+		}
+	}
+
+	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
+		"==> nxge_receive_packet: *mp 0x%016llx", *mp));
+
+	*multi_p = (multi == RCR_MULTI_MASK);
+	NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_receive_packet: "
+		"multi %d nmp 0x%016llx *mp 0x%016llx *mp_cont 0x%016llx",
+		*multi_p, nmp, *mp, *mp_cont));
+}
+
+/*ARGSUSED*/
+static nxge_status_t
+nxge_rx_err_evnts(p_nxge_t nxgep, uint_t index, p_nxge_ldv_t ldvp,
+						rx_dma_ctl_stat_t cs)
+{
+	p_nxge_rx_ring_stats_t	rdc_stats;
+	npi_handle_t		handle;
+	npi_status_t		rs;
+	boolean_t		rxchan_fatal = B_FALSE;
+	boolean_t		rxport_fatal = B_FALSE;
+	uint8_t			channel;
+	uint8_t			portn;
+	nxge_status_t		status = NXGE_OK;
+	uint32_t		error_disp_cnt = NXGE_ERROR_SHOW_MAX;
+	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_rx_err_evnts"));
+
+	handle = NXGE_DEV_NPI_HANDLE(nxgep);
+	channel = ldvp->channel;
+	portn = nxgep->mac.portnum;
+	rdc_stats = &nxgep->statsp->rdc_stats[ldvp->vdma_index];
+
+	if (cs.bits.hdw.rbr_tmout) {
+		rdc_stats->rx_rbr_tmout++;
+		NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
+					NXGE_FM_EREPORT_RDMC_RBR_TMOUT);
+		rxchan_fatal = B_TRUE;
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"==> nxge_rx_err_evnts: rx_rbr_timeout"));
+	}
+	if (cs.bits.hdw.rsp_cnt_err) {
+		rdc_stats->rsp_cnt_err++;
+		NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
+					NXGE_FM_EREPORT_RDMC_RSP_CNT_ERR);
+		rxchan_fatal = B_TRUE;
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"==> nxge_rx_err_evnts(channel %d): "
+			"rsp_cnt_err", channel));
+	}
+	if (cs.bits.hdw.byte_en_bus) {
+		rdc_stats->byte_en_bus++;
+		NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
+					NXGE_FM_EREPORT_RDMC_BYTE_EN_BUS);
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"==> nxge_rx_err_evnts(channel %d): "
+			"fatal error: byte_en_bus", channel));
+		rxchan_fatal = B_TRUE;
+	}
+	if (cs.bits.hdw.rsp_dat_err) {
+		rdc_stats->rsp_dat_err++;
+		NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
+					NXGE_FM_EREPORT_RDMC_RSP_DAT_ERR);
+		rxchan_fatal = B_TRUE;
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"==> nxge_rx_err_evnts(channel %d): "
+			"fatal error: rsp_dat_err", channel));
+	}
+	if (cs.bits.hdw.rcr_ack_err) {
+		rdc_stats->rcr_ack_err++;
+		NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
+					NXGE_FM_EREPORT_RDMC_RCR_ACK_ERR);
+		rxchan_fatal = B_TRUE;
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"==> nxge_rx_err_evnts(channel %d): "
+			"fatal error: rcr_ack_err", channel));
+	}
+	if (cs.bits.hdw.dc_fifo_err) {
+		rdc_stats->dc_fifo_err++;
+		NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
+					NXGE_FM_EREPORT_RDMC_DC_FIFO_ERR);
+		/* This is not a fatal error! */
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"==> nxge_rx_err_evnts(channel %d): "
+			"dc_fifo_err", channel));
+		rxport_fatal = B_TRUE;
+	}
+	if ((cs.bits.hdw.rcr_sha_par) || (cs.bits.hdw.rbr_pre_par)) {
+		if ((rs = npi_rxdma_ring_perr_stat_get(handle,
+				&rdc_stats->errlog.pre_par,
+				&rdc_stats->errlog.sha_par))
+				!= NPI_SUCCESS) {
+			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+				"==> nxge_rx_err_evnts(channel %d): "
+				"rcr_sha_par: get perr", channel));
+			return (NXGE_ERROR | rs);
+		}
+		if (cs.bits.hdw.rcr_sha_par) {
+			rdc_stats->rcr_sha_par++;
+			NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
+					NXGE_FM_EREPORT_RDMC_RCR_SHA_PAR);
+			rxchan_fatal = B_TRUE;
+			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+				"==> nxge_rx_err_evnts(channel %d): "
+				"fatal error: rcr_sha_par", channel));
+		}
+		if (cs.bits.hdw.rbr_pre_par) {
+			rdc_stats->rbr_pre_par++;
+			NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
+					NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR);
+			rxchan_fatal = B_TRUE;
+			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+				"==> nxge_rx_err_evnts(channel %d): "
+				"fatal error: rbr_pre_par", channel));
+		}
+	}
+	if (cs.bits.hdw.port_drop_pkt) {
+		rdc_stats->port_drop_pkt++;
+		if (rdc_stats->port_drop_pkt < error_disp_cnt)
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"==> nxge_rx_err_evnts (channel %d): "
+			"port_drop_pkt", channel));
+	}
+	if (cs.bits.hdw.wred_drop) {
+		rdc_stats->wred_drop++;
+		NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
+			"==> nxge_rx_err_evnts(channel %d): "
+		"wred_drop", channel));
+	}
+	if (cs.bits.hdw.rbr_pre_empty) {
+		rdc_stats->rbr_pre_empty++;
+		if (rdc_stats->rbr_pre_empty < error_disp_cnt)
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"==> nxge_rx_err_evnts(channel %d): "
+			"rbr_pre_empty", channel));
+	}
+	if (cs.bits.hdw.rcr_shadow_full) {
+		rdc_stats->rcr_shadow_full++;
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"==> nxge_rx_err_evnts(channel %d): "
+			"rcr_shadow_full", channel));
+	}
+	if (cs.bits.hdw.config_err) {
+		rdc_stats->config_err++;
+		NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
+					NXGE_FM_EREPORT_RDMC_CONFIG_ERR);
+		rxchan_fatal = B_TRUE;
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"==> nxge_rx_err_evnts(channel %d): "
+			"config error", channel));
+	}
+	if (cs.bits.hdw.rcrincon) {
+		rdc_stats->rcrincon++;
+		NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
+					NXGE_FM_EREPORT_RDMC_RCRINCON);
+		rxchan_fatal = B_TRUE;
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"==> nxge_rx_err_evnts(channel %d): "
+			"fatal error: rcrincon error", channel));
+	}
+	if (cs.bits.hdw.rcrfull) {
+		rdc_stats->rcrfull++;
+		NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
+					NXGE_FM_EREPORT_RDMC_RCRFULL);
+		rxchan_fatal = B_TRUE;
+		if (rdc_stats->rcrfull < error_disp_cnt)
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"==> nxge_rx_err_evnts(channel %d): "
+			"fatal error: rcrfull error", channel));
+	}
+	if (cs.bits.hdw.rbr_empty) {
+		rdc_stats->rbr_empty++;
+		if (rdc_stats->rbr_empty < error_disp_cnt)
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"==> nxge_rx_err_evnts(channel %d): "
+			"rbr empty error", channel));
+	}
+	if (cs.bits.hdw.rbrfull) {
+		rdc_stats->rbrfull++;
+		NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
+					NXGE_FM_EREPORT_RDMC_RBRFULL);
+		rxchan_fatal = B_TRUE;
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"==> nxge_rx_err_evnts(channel %d): "
+			"fatal error: rbr_full error", channel));
+	}
+	if (cs.bits.hdw.rbrlogpage) {
+		rdc_stats->rbrlogpage++;
+		NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
+					NXGE_FM_EREPORT_RDMC_RBRLOGPAGE);
+		rxchan_fatal = B_TRUE;
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"==> nxge_rx_err_evnts(channel %d): "
+			"fatal error: rbr logical page error", channel));
+	}
+	if (cs.bits.hdw.cfiglogpage) {
+		rdc_stats->cfiglogpage++;
+		NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
+					NXGE_FM_EREPORT_RDMC_CFIGLOGPAGE);
+		rxchan_fatal = B_TRUE;
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"==> nxge_rx_err_evnts(channel %d): "
+			"fatal error: cfig logical page error", channel));
+	}
+
+	if (rxport_fatal)  {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+				" nxge_rx_err_evnts: "
+				" fatal error on Port #%d\n",
+				portn));
+		status = nxge_ipp_fatal_err_recover(nxgep);
+		if (status == NXGE_OK) {
+			FM_SERVICE_RESTORED(nxgep);
+		}
+	}
+
+	if (rxchan_fatal) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+				" nxge_rx_err_evnts: "
+				" fatal error on Channel #%d\n",
+				channel));
+		status = nxge_rxdma_fatal_err_recover(nxgep, channel);
+		if (status == NXGE_OK) {
+			FM_SERVICE_RESTORED(nxgep);
+		}
+	}
+
+	NXGE_DEBUG_MSG((nxgep, RX2_CTL, "<== nxge_rx_err_evnts"));
+
+	return (status);
+}
+
+static nxge_status_t
+nxge_map_rxdma(p_nxge_t nxgep)
+{
+	int			i, ndmas;
+	uint16_t		channel;
+	p_rx_rbr_rings_t 	rx_rbr_rings;
+	p_rx_rbr_ring_t		*rbr_rings;
+	p_rx_rcr_rings_t 	rx_rcr_rings;
+	p_rx_rcr_ring_t		*rcr_rings;
+	p_rx_mbox_areas_t 	rx_mbox_areas_p;
+	p_rx_mbox_t		*rx_mbox_p;
+	p_nxge_dma_pool_t	dma_buf_poolp;
+	p_nxge_dma_pool_t	dma_cntl_poolp;
+	p_nxge_dma_common_t	*dma_buf_p;
+	p_nxge_dma_common_t	*dma_cntl_p;
+	uint32_t		*num_chunks;
+	nxge_status_t		status = NXGE_OK;
+#if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
+	p_nxge_dma_common_t	t_dma_buf_p;
+	p_nxge_dma_common_t	t_dma_cntl_p;
+#endif
+
+	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_map_rxdma"));
+
+	dma_buf_poolp = nxgep->rx_buf_pool_p;
+	dma_cntl_poolp = nxgep->rx_cntl_pool_p;
+
+	if (!dma_buf_poolp->buf_allocated || !dma_cntl_poolp->buf_allocated) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"<== nxge_map_rxdma: buf not allocated"));
+		return (NXGE_ERROR);
+	}
+
+	ndmas = dma_buf_poolp->ndmas;
+	if (!ndmas) {
+		NXGE_DEBUG_MSG((nxgep, RX_CTL,
+			"<== nxge_map_rxdma: no dma allocated"));
+		return (NXGE_ERROR);
+	}
+
+	num_chunks = dma_buf_poolp->num_chunks;
+	dma_buf_p = dma_buf_poolp->dma_buf_pool_p;
+	dma_cntl_p = dma_cntl_poolp->dma_buf_pool_p;
+
+	rx_rbr_rings = (p_rx_rbr_rings_t)
+		KMEM_ZALLOC(sizeof (rx_rbr_rings_t), KM_SLEEP);
+	rbr_rings = (p_rx_rbr_ring_t *)
+		KMEM_ZALLOC(sizeof (p_rx_rbr_ring_t) * ndmas, KM_SLEEP);
+	rx_rcr_rings = (p_rx_rcr_rings_t)
+		KMEM_ZALLOC(sizeof (rx_rcr_rings_t), KM_SLEEP);
+	rcr_rings = (p_rx_rcr_ring_t *)
+		KMEM_ZALLOC(sizeof (p_rx_rcr_ring_t) * ndmas, KM_SLEEP);
+	rx_mbox_areas_p = (p_rx_mbox_areas_t)
+		KMEM_ZALLOC(sizeof (rx_mbox_areas_t), KM_SLEEP);
+	rx_mbox_p = (p_rx_mbox_t *)
+		KMEM_ZALLOC(sizeof (p_rx_mbox_t) * ndmas, KM_SLEEP);
+
+	/*
+	 * Timeout should be set based on the system clock divider.
+	 * The following timeout value of 1 assumes that the
+	 * granularity (1000) is 3 microseconds running at 300MHz.
+	 */
+
+	nxgep->intr_threshold = RXDMA_RCR_PTHRES_DEFAULT;
+	nxgep->intr_timeout = RXDMA_RCR_TO_DEFAULT;
+
+	/*
+	 * Map descriptors from the buffer polls for each dam channel.
+	 */
+	for (i = 0; i < ndmas; i++) {
+		/*
+		 * Set up and prepare buffer blocks, descriptors
+		 * and mailbox.
+		 */
+		channel = ((p_nxge_dma_common_t)dma_buf_p[i])->dma_channel;
+		status = nxge_map_rxdma_channel(nxgep, channel,
+				(p_nxge_dma_common_t *)&dma_buf_p[i],
+				(p_rx_rbr_ring_t *)&rbr_rings[i],
+				num_chunks[i],
+				(p_nxge_dma_common_t *)&dma_cntl_p[i],
+				(p_rx_rcr_ring_t *)&rcr_rings[i],
+				(p_rx_mbox_t *)&rx_mbox_p[i]);
+		if (status != NXGE_OK) {
+			goto nxge_map_rxdma_fail1;
+		}
+		rbr_rings[i]->index = (uint16_t)i;
+		rcr_rings[i]->index = (uint16_t)i;
+		rcr_rings[i]->rdc_stats = &nxgep->statsp->rdc_stats[i];
+
+#if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
+		if (nxgep->niu_type == N2_NIU && NXGE_DMA_BLOCK == 1) {
+			rbr_rings[i]->hv_set = B_FALSE;
+			t_dma_buf_p = (p_nxge_dma_common_t)dma_buf_p[i];
+			t_dma_cntl_p =
+				(p_nxge_dma_common_t)dma_cntl_p[i];
+
+			rbr_rings[i]->hv_rx_buf_base_ioaddr_pp =
+				(uint64_t)t_dma_buf_p->orig_ioaddr_pp;
+			rbr_rings[i]->hv_rx_buf_ioaddr_size =
+				(uint64_t)t_dma_buf_p->orig_alength;
+			NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
+				"==> nxge_map_rxdma_channel: "
+				"channel %d "
+				"data buf base io $%p ($%p) "
+				"size 0x%llx (%d 0x%x)",
+				channel,
+				rbr_rings[i]->hv_rx_buf_base_ioaddr_pp,
+				t_dma_cntl_p->ioaddr_pp,
+				rbr_rings[i]->hv_rx_buf_ioaddr_size,
+				t_dma_buf_p->orig_alength,
+				t_dma_buf_p->orig_alength));
+
+			rbr_rings[i]->hv_rx_cntl_base_ioaddr_pp =
+				(uint64_t)t_dma_cntl_p->orig_ioaddr_pp;
+			rbr_rings[i]->hv_rx_cntl_ioaddr_size =
+				(uint64_t)t_dma_cntl_p->orig_alength;
+			NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
+				"==> nxge_map_rxdma_channel: "
+				"channel %d "
+				"cntl base io $%p ($%p) "
+				"size 0x%llx (%d 0x%x)",
+				channel,
+				rbr_rings[i]->hv_rx_cntl_base_ioaddr_pp,
+				t_dma_cntl_p->ioaddr_pp,
+				rbr_rings[i]->hv_rx_cntl_ioaddr_size,
+				t_dma_cntl_p->orig_alength,
+				t_dma_cntl_p->orig_alength));
+		}
+
+#endif	/* sun4v and NIU_LP_WORKAROUND */
+	}
+
+	rx_rbr_rings->ndmas = rx_rcr_rings->ndmas = ndmas;
+	rx_rbr_rings->rbr_rings = rbr_rings;
+	nxgep->rx_rbr_rings = rx_rbr_rings;
+	rx_rcr_rings->rcr_rings = rcr_rings;
+	nxgep->rx_rcr_rings = rx_rcr_rings;
+
+	rx_mbox_areas_p->rxmbox_areas = rx_mbox_p;
+	nxgep->rx_mbox_areas_p = rx_mbox_areas_p;
+
+	goto nxge_map_rxdma_exit;
+
+nxge_map_rxdma_fail1:
+	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+		"==> nxge_map_rxdma: unmap rbr,rcr "
+		"(status 0x%x channel %d i %d)",
+		status, channel, i));
+	for (; i >= 0; i--) {
+		channel = ((p_nxge_dma_common_t)dma_buf_p[i])->dma_channel;
+		nxge_unmap_rxdma_channel(nxgep, channel,
+			rbr_rings[i],
+			rcr_rings[i],
+			rx_mbox_p[i]);
+	}
+
+	KMEM_FREE(rbr_rings, sizeof (p_rx_rbr_ring_t) * ndmas);
+	KMEM_FREE(rx_rbr_rings, sizeof (rx_rbr_rings_t));
+	KMEM_FREE(rcr_rings, sizeof (p_rx_rcr_ring_t) * ndmas);
+	KMEM_FREE(rx_rcr_rings, sizeof (rx_rcr_rings_t));
+	KMEM_FREE(rx_mbox_p, sizeof (p_rx_mbox_t) * ndmas);
+	KMEM_FREE(rx_mbox_areas_p, sizeof (rx_mbox_areas_t));
+
+nxge_map_rxdma_exit:
+	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
+		"<== nxge_map_rxdma: "
+		"(status 0x%x channel %d)",
+		status, channel));
+
+	return (status);
+}
+
+static void
+nxge_unmap_rxdma(p_nxge_t nxgep)
+{
+	int			i, ndmas;
+	uint16_t		channel;
+	p_rx_rbr_rings_t 	rx_rbr_rings;
+	p_rx_rbr_ring_t		*rbr_rings;
+	p_rx_rcr_rings_t 	rx_rcr_rings;
+	p_rx_rcr_ring_t		*rcr_rings;
+	p_rx_mbox_areas_t 	rx_mbox_areas_p;
+	p_rx_mbox_t		*rx_mbox_p;
+	p_nxge_dma_pool_t	dma_buf_poolp;
+	p_nxge_dma_pool_t	dma_cntl_poolp;
+	p_nxge_dma_common_t	*dma_buf_p;
+
+	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_unmap_rxdma"));
+
+	dma_buf_poolp = nxgep->rx_buf_pool_p;
+	dma_cntl_poolp = nxgep->rx_cntl_pool_p;
+
+	if (!dma_buf_poolp->buf_allocated || !dma_cntl_poolp->buf_allocated) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"<== nxge_unmap_rxdma: NULL buf pointers"));
+		return;
+	}
+
+	rx_rbr_rings = nxgep->rx_rbr_rings;
+	rx_rcr_rings = nxgep->rx_rcr_rings;
+	if (rx_rbr_rings == NULL || rx_rcr_rings == NULL) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"<== nxge_unmap_rxdma: NULL ring pointers"));
+		return;
+	}
+	ndmas = rx_rbr_rings->ndmas;
+	if (!ndmas) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"<== nxge_unmap_rxdma: no channel"));
+		return;
+	}
+
+	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
+		"==> nxge_unmap_rxdma (ndmas %d)", ndmas));
+	rbr_rings = rx_rbr_rings->rbr_rings;
+	rcr_rings = rx_rcr_rings->rcr_rings;
+	rx_mbox_areas_p = nxgep->rx_mbox_areas_p;
+	rx_mbox_p = rx_mbox_areas_p->rxmbox_areas;
+	dma_buf_p = dma_buf_poolp->dma_buf_pool_p;
+
+	for (i = 0; i < ndmas; i++) {
+		channel = ((p_nxge_dma_common_t)dma_buf_p[i])->dma_channel;
+		NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
+			"==> nxge_unmap_rxdma (ndmas %d) channel %d",
+				ndmas, channel));
+		(void) nxge_unmap_rxdma_channel(nxgep, channel,
+				(p_rx_rbr_ring_t)rbr_rings[i],
+				(p_rx_rcr_ring_t)rcr_rings[i],
+				(p_rx_mbox_t)rx_mbox_p[i]);
+	}
+
+	KMEM_FREE(rx_rbr_rings, sizeof (rx_rbr_rings_t));
+	KMEM_FREE(rbr_rings, sizeof (p_rx_rbr_ring_t) * ndmas);
+	KMEM_FREE(rx_rcr_rings, sizeof (rx_rcr_rings_t));
+	KMEM_FREE(rcr_rings, sizeof (p_rx_rcr_ring_t) * ndmas);
+	KMEM_FREE(rx_mbox_areas_p, sizeof (rx_mbox_areas_t));
+	KMEM_FREE(rx_mbox_p, sizeof (p_rx_mbox_t) * ndmas);
+
+	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
+		"<== nxge_unmap_rxdma"));
+}
+
+nxge_status_t
+nxge_map_rxdma_channel(p_nxge_t nxgep, uint16_t channel,
+    p_nxge_dma_common_t *dma_buf_p,  p_rx_rbr_ring_t *rbr_p,
+    uint32_t num_chunks,
+    p_nxge_dma_common_t *dma_cntl_p, p_rx_rcr_ring_t *rcr_p,
+    p_rx_mbox_t *rx_mbox_p)
+{
+	int	status = NXGE_OK;
+
+	/*
+	 * Set up and prepare buffer blocks, descriptors
+	 * and mailbox.
+	 */
+	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
+		"==> nxge_map_rxdma_channel (channel %d)", channel));
+	/*
+	 * Receive buffer blocks
+	 */
+	status = nxge_map_rxdma_channel_buf_ring(nxgep, channel,
+			dma_buf_p, rbr_p, num_chunks);
+	if (status != NXGE_OK) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"==> nxge_map_rxdma_channel (channel %d): "
+			"map buffer failed 0x%x", channel, status));
+		goto nxge_map_rxdma_channel_exit;
+	}
+
+	/*
+	 * Receive block ring, completion ring and mailbox.
+	 */
+	status = nxge_map_rxdma_channel_cfg_ring(nxgep, channel,
+			dma_cntl_p, rbr_p, rcr_p, rx_mbox_p);
+	if (status != NXGE_OK) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"==> nxge_map_rxdma_channel (channel %d): "
+			"map config failed 0x%x", channel, status));
+		goto nxge_map_rxdma_channel_fail2;
+	}
+
+	goto nxge_map_rxdma_channel_exit;
+
+nxge_map_rxdma_channel_fail3:
+	/* Free rbr, rcr */
+	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+		"==> nxge_map_rxdma_channel: free rbr/rcr "
+		"(status 0x%x channel %d)",
+		status, channel));
+	nxge_unmap_rxdma_channel_cfg_ring(nxgep,
+		*rcr_p, *rx_mbox_p);
+
+nxge_map_rxdma_channel_fail2:
+	/* Free buffer blocks */
+	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+		"==> nxge_map_rxdma_channel: free rx buffers"
+		"(nxgep 0x%x status 0x%x channel %d)",
+		nxgep, status, channel));
+	nxge_unmap_rxdma_channel_buf_ring(nxgep, *rbr_p);
+
+nxge_map_rxdma_channel_exit:
+	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
+		"<== nxge_map_rxdma_channel: "
+		"(nxgep 0x%x status 0x%x channel %d)",
+		nxgep, status, channel));
+
+	return (status);
+}
+
+/*ARGSUSED*/
+static void
+nxge_unmap_rxdma_channel(p_nxge_t nxgep, uint16_t channel,
+    p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t rx_mbox_p)
+{
+	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
+		"==> nxge_unmap_rxdma_channel (channel %d)", channel));
+
+	/*
+	 * unmap receive block ring, completion ring and mailbox.
+	 */
+	(void) nxge_unmap_rxdma_channel_cfg_ring(nxgep,
+			rcr_p, rx_mbox_p);
+
+	/* unmap buffer blocks */
+	(void) nxge_unmap_rxdma_channel_buf_ring(nxgep, rbr_p);
+
+	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_unmap_rxdma_channel"));
+}
+
+/*ARGSUSED*/
+static nxge_status_t
+nxge_map_rxdma_channel_cfg_ring(p_nxge_t nxgep, uint16_t dma_channel,
+    p_nxge_dma_common_t *dma_cntl_p, p_rx_rbr_ring_t *rbr_p,
+    p_rx_rcr_ring_t *rcr_p, p_rx_mbox_t *rx_mbox_p)
+{
+	p_rx_rbr_ring_t 	rbrp;
+	p_rx_rcr_ring_t 	rcrp;
+	p_rx_mbox_t 		mboxp;
+	p_nxge_dma_common_t 	cntl_dmap;
+	p_nxge_dma_common_t 	dmap;
+	p_rx_msg_t 		*rx_msg_ring;
+	p_rx_msg_t 		rx_msg_p;
+	p_rbr_cfig_a_t		rcfga_p;
+	p_rbr_cfig_b_t		rcfgb_p;
+	p_rcrcfig_a_t		cfga_p;
+	p_rcrcfig_b_t		cfgb_p;
+	p_rxdma_cfig1_t		cfig1_p;
+	p_rxdma_cfig2_t		cfig2_p;
+	p_rbr_kick_t		kick_p;
+	uint32_t		dmaaddrp;
+	uint32_t		*rbr_vaddrp;
+	uint32_t		bkaddr;
+	nxge_status_t		status = NXGE_OK;
+	int			i;
+	uint32_t 		nxge_port_rcr_size;
+
+	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
+		"==> nxge_map_rxdma_channel_cfg_ring"));
+
+	cntl_dmap = *dma_cntl_p;
+
+	/* Map in the receive block ring */
+	rbrp = *rbr_p;
+	dmap = (p_nxge_dma_common_t)&rbrp->rbr_desc;
+	nxge_setup_dma_common(dmap, cntl_dmap, rbrp->rbb_max, 4);
+	/*
+	 * Zero out buffer block ring descriptors.
+	 */
+	bzero((caddr_t)dmap->kaddrp, dmap->alength);
+
+	rcfga_p = &(rbrp->rbr_cfga);
+	rcfgb_p = &(rbrp->rbr_cfgb);
+	kick_p = &(rbrp->rbr_kick);
+	rcfga_p->value = 0;
+	rcfgb_p->value = 0;
+	kick_p->value = 0;
+	rbrp->rbr_addr = dmap->dma_cookie.dmac_laddress;
+	rcfga_p->value = (rbrp->rbr_addr &
+				(RBR_CFIG_A_STDADDR_MASK |
+				RBR_CFIG_A_STDADDR_BASE_MASK));
+	rcfga_p->value |= ((uint64_t)rbrp->rbb_max << RBR_CFIG_A_LEN_SHIFT);
+
+	rcfgb_p->bits.ldw.bufsz0 = rbrp->pkt_buf_size0;
+	rcfgb_p->bits.ldw.vld0 = 1;
+	rcfgb_p->bits.ldw.bufsz1 = rbrp->pkt_buf_size1;
+	rcfgb_p->bits.ldw.vld1 = 1;
+	rcfgb_p->bits.ldw.bufsz2 = rbrp->pkt_buf_size2;
+	rcfgb_p->bits.ldw.vld2 = 1;
+	rcfgb_p->bits.ldw.bksize = nxgep->rx_bksize_code;
+
+	/*
+	 * For each buffer block, enter receive block address to the ring.
+	 */
+	rbr_vaddrp = (uint32_t *)dmap->kaddrp;
+	rbrp->rbr_desc_vp = (uint32_t *)dmap->kaddrp;
+	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
+		"==> nxge_map_rxdma_channel_cfg_ring: channel %d "
+		"rbr_vaddrp $%p", dma_channel, rbr_vaddrp));
+
+	rx_msg_ring = rbrp->rx_msg_ring;
+	for (i = 0; i < rbrp->tnblocks; i++) {
+		rx_msg_p = rx_msg_ring[i];
+		rx_msg_p->nxgep = nxgep;
+		rx_msg_p->rx_rbr_p = rbrp;
+		bkaddr = (uint32_t)
+			((rx_msg_p->buf_dma.dma_cookie.dmac_laddress
+				>> RBR_BKADDR_SHIFT));
+		rx_msg_p->free = B_FALSE;
+		rx_msg_p->max_usage_cnt = 0xbaddcafe;
+
+		*rbr_vaddrp++ = bkaddr;
+	}
+
+	kick_p->bits.ldw.bkadd = rbrp->rbb_max;
+	rbrp->rbr_wr_index = (rbrp->rbb_max - 1);
+
+	rbrp->rbr_rd_index = 0;
+
+	rbrp->rbr_consumed = 0;
+	rbrp->rbr_use_bcopy = B_TRUE;
+	rbrp->rbr_bufsize_type = RCR_PKTBUFSZ_0;
+	/*
+	 * Do bcopy on packets greater than bcopy size once
+	 * the lo threshold is reached.
+	 * This lo threshold should be less than the hi threshold.
+	 *
+	 * Do bcopy on every packet once the hi threshold is reached.
+	 */
+	if (nxge_rx_threshold_lo >= nxge_rx_threshold_hi) {
+		/* default it to use hi */
+		nxge_rx_threshold_lo = nxge_rx_threshold_hi;
+	}
+
+	if (nxge_rx_buf_size_type > NXGE_RBR_TYPE2) {
+		nxge_rx_buf_size_type = NXGE_RBR_TYPE2;
+	}
+	rbrp->rbr_bufsize_type = nxge_rx_buf_size_type;
+
+	switch (nxge_rx_threshold_hi) {
+	default:
+	case	NXGE_RX_COPY_NONE:
+		/* Do not do bcopy at all */
+		rbrp->rbr_use_bcopy = B_FALSE;
+		rbrp->rbr_threshold_hi = rbrp->rbb_max;
+		break;
+
+	case NXGE_RX_COPY_1:
+	case NXGE_RX_COPY_2:
+	case NXGE_RX_COPY_3:
+	case NXGE_RX_COPY_4:
+	case NXGE_RX_COPY_5:
+	case NXGE_RX_COPY_6:
+	case NXGE_RX_COPY_7:
+		rbrp->rbr_threshold_hi =
+			rbrp->rbb_max *
+			(nxge_rx_threshold_hi)/NXGE_RX_BCOPY_SCALE;
+		break;
+
+	case NXGE_RX_COPY_ALL:
+		rbrp->rbr_threshold_hi = 0;
+		break;
+	}
+
+	switch (nxge_rx_threshold_lo) {
+	default:
+	case	NXGE_RX_COPY_NONE:
+		/* Do not do bcopy at all */
+		if (rbrp->rbr_use_bcopy) {
+			rbrp->rbr_use_bcopy = B_FALSE;
+		}
+		rbrp->rbr_threshold_lo = rbrp->rbb_max;
+		break;
+
+	case NXGE_RX_COPY_1:
+	case NXGE_RX_COPY_2:
+	case NXGE_RX_COPY_3:
+	case NXGE_RX_COPY_4:
+	case NXGE_RX_COPY_5:
+	case NXGE_RX_COPY_6:
+	case NXGE_RX_COPY_7:
+		rbrp->rbr_threshold_lo =
+			rbrp->rbb_max *
+			(nxge_rx_threshold_lo)/NXGE_RX_BCOPY_SCALE;
+		break;
+
+	case NXGE_RX_COPY_ALL:
+		rbrp->rbr_threshold_lo = 0;
+		break;
+	}
+
+	NXGE_DEBUG_MSG((nxgep, RX_CTL,
+		"nxge_map_rxdma_channel_cfg_ring: channel %d "
+		"rbb_max %d "
+		"rbrp->rbr_bufsize_type %d "
+		"rbb_threshold_hi %d "
+		"rbb_threshold_lo %d",
+		dma_channel,
+		rbrp->rbb_max,
+		rbrp->rbr_bufsize_type,
+		rbrp->rbr_threshold_hi,
+		rbrp->rbr_threshold_lo));
+
+	rbrp->page_valid.value = 0;
+	rbrp->page_mask_1.value = rbrp->page_mask_2.value = 0;
+	rbrp->page_value_1.value = rbrp->page_value_2.value = 0;
+	rbrp->page_reloc_1.value = rbrp->page_reloc_2.value = 0;
+	rbrp->page_hdl.value = 0;
+
+	rbrp->page_valid.bits.ldw.page0 = 1;
+	rbrp->page_valid.bits.ldw.page1 = 1;
+
+	/* Map in the receive completion ring */
+	rcrp = (p_rx_rcr_ring_t)
+		KMEM_ZALLOC(sizeof (rx_rcr_ring_t), KM_SLEEP);
+	rcrp->rdc = dma_channel;
+
+	nxge_port_rcr_size = nxgep->nxge_port_rcr_size;
+	rcrp->comp_size = nxge_port_rcr_size;
+	rcrp->comp_wrap_mask = nxge_port_rcr_size - 1;
+
+	rcrp->max_receive_pkts = nxge_max_rx_pkts;
+
+	dmap = (p_nxge_dma_common_t)&rcrp->rcr_desc;
+	nxge_setup_dma_common(dmap, cntl_dmap, rcrp->comp_size,
+			sizeof (rcr_entry_t));
+	rcrp->comp_rd_index = 0;
+	rcrp->comp_wt_index = 0;
+	rcrp->rcr_desc_rd_head_p = rcrp->rcr_desc_first_p =
+		(p_rcr_entry_t)DMA_COMMON_VPTR(rcrp->rcr_desc);
+	rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp =
+		(p_rcr_entry_t)DMA_COMMON_IOADDR(rcrp->rcr_desc);
+
+	rcrp->rcr_desc_last_p = rcrp->rcr_desc_rd_head_p +
+			(nxge_port_rcr_size - 1);
+	rcrp->rcr_desc_last_pp = rcrp->rcr_desc_rd_head_pp +
+			(nxge_port_rcr_size - 1);
+
+	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
+		"==> nxge_map_rxdma_channel_cfg_ring: "
+		"channel %d "
+		"rbr_vaddrp $%p "
+		"rcr_desc_rd_head_p $%p "
+		"rcr_desc_rd_head_pp $%p "
+		"rcr_desc_rd_last_p $%p "
+		"rcr_desc_rd_last_pp $%p ",
+		dma_channel,
+		rbr_vaddrp,
+		rcrp->rcr_desc_rd_head_p,
+		rcrp->rcr_desc_rd_head_pp,
+		rcrp->rcr_desc_last_p,
+		rcrp->rcr_desc_last_pp));
+
+	/*
+	 * Zero out buffer block ring descriptors.
+	 */
+	bzero((caddr_t)dmap->kaddrp, dmap->alength);
+	rcrp->intr_timeout = nxgep->intr_timeout;
+	rcrp->intr_threshold = nxgep->intr_threshold;
+	rcrp->full_hdr_flag = B_FALSE;
+	rcrp->sw_priv_hdr_len = 0;
+
+	cfga_p = &(rcrp->rcr_cfga);
+	cfgb_p = &(rcrp->rcr_cfgb);
+	cfga_p->value = 0;
+	cfgb_p->value = 0;
+	rcrp->rcr_addr = dmap->dma_cookie.dmac_laddress;
+	cfga_p->value = (rcrp->rcr_addr &
+			    (RCRCFIG_A_STADDR_MASK |
+			    RCRCFIG_A_STADDR_BASE_MASK));
+
+	rcfga_p->value |= ((uint64_t)rcrp->comp_size <<
+				RCRCFIG_A_LEN_SHIF);
+
+	/*
+	 * Timeout should be set based on the system clock divider.
+	 * The following timeout value of 1 assumes that the
+	 * granularity (1000) is 3 microseconds running at 300MHz.
+	 */
+	cfgb_p->bits.ldw.pthres = rcrp->intr_threshold;
+	cfgb_p->bits.ldw.timeout = rcrp->intr_timeout;
+	cfgb_p->bits.ldw.entout = 1;
+
+	/* Map in the mailbox */
+	mboxp = (p_rx_mbox_t)
+			KMEM_ZALLOC(sizeof (rx_mbox_t), KM_SLEEP);
+	dmap = (p_nxge_dma_common_t)&mboxp->rx_mbox;
+	nxge_setup_dma_common(dmap, cntl_dmap, 1, sizeof (rxdma_mailbox_t));
+	cfig1_p = (p_rxdma_cfig1_t)&mboxp->rx_cfg1;
+	cfig2_p = (p_rxdma_cfig2_t)&mboxp->rx_cfg2;
+	cfig1_p->value = cfig2_p->value = 0;
+
+	mboxp->mbox_addr = dmap->dma_cookie.dmac_laddress;
+	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
+		"==> nxge_map_rxdma_channel_cfg_ring: "
+		"channel %d cfg1 0x%016llx cfig2 0x%016llx cookie 0x%016llx",
+		dma_channel, cfig1_p->value, cfig2_p->value,
+		mboxp->mbox_addr));
+
+	dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress >> 32
+			& 0xfff);
+	cfig1_p->bits.ldw.mbaddr_h = dmaaddrp;
+
+
+	dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress & 0xffffffff);
+	dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress &
+				RXDMA_CFIG2_MBADDR_L_MASK);
+
+	cfig2_p->bits.ldw.mbaddr = (dmaaddrp >> RXDMA_CFIG2_MBADDR_L_SHIFT);
+
+	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
+		"==> nxge_map_rxdma_channel_cfg_ring: "
+		"channel %d damaddrp $%p "
+		"cfg1 0x%016llx cfig2 0x%016llx",
+		dma_channel, dmaaddrp,
+		cfig1_p->value, cfig2_p->value));
+
+	cfig2_p->bits.ldw.full_hdr = rcrp->full_hdr_flag;
+	cfig2_p->bits.ldw.offset = rcrp->sw_priv_hdr_len;
+
+	rbrp->rx_rcr_p = rcrp;
+	rcrp->rx_rbr_p = rbrp;
+	*rcr_p = rcrp;
+	*rx_mbox_p = mboxp;
+
+	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
+		"<== nxge_map_rxdma_channel_cfg_ring status 0x%08x", status));
+
+	return (status);
+}
+
+/*ARGSUSED*/
+static void
+nxge_unmap_rxdma_channel_cfg_ring(p_nxge_t nxgep,
+    p_rx_rcr_ring_t rcr_p, p_rx_mbox_t rx_mbox_p)
+{
+	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
+		"==> nxge_unmap_rxdma_channel_cfg_ring: channel %d",
+		rcr_p->rdc));
+
+	KMEM_FREE(rcr_p, sizeof (rx_rcr_ring_t));
+	KMEM_FREE(rx_mbox_p, sizeof (rx_mbox_t));
+
+	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
+		"<== nxge_unmap_rxdma_channel_cfg_ring"));
+}
+
+static nxge_status_t
+nxge_map_rxdma_channel_buf_ring(p_nxge_t nxgep, uint16_t channel,
+    p_nxge_dma_common_t *dma_buf_p,
+    p_rx_rbr_ring_t *rbr_p, uint32_t num_chunks)
+{
+	p_rx_rbr_ring_t 	rbrp;
+	p_nxge_dma_common_t 	dma_bufp, tmp_bufp;
+	p_rx_msg_t 		*rx_msg_ring;
+	p_rx_msg_t 		rx_msg_p;
+	p_mblk_t 		mblk_p;
+
+	rxring_info_t *ring_info;
+	nxge_status_t		status = NXGE_OK;
+	int			i, j, index;
+	uint32_t		size, bsize, nblocks, nmsgs;
+
+	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
+		"==> nxge_map_rxdma_channel_buf_ring: channel %d",
+		channel));
+
+	dma_bufp = tmp_bufp = *dma_buf_p;
+	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
+		" nxge_map_rxdma_channel_buf_ring: channel %d to map %d "
+		"chunks bufp 0x%016llx",
+		channel, num_chunks, dma_bufp));
+
+	nmsgs = 0;
+	for (i = 0; i < num_chunks; i++, tmp_bufp++) {
+		NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
+			"==> nxge_map_rxdma_channel_buf_ring: channel %d "
+			"bufp 0x%016llx nblocks %d nmsgs %d",
+			channel, tmp_bufp, tmp_bufp->nblocks, nmsgs));
+		nmsgs += tmp_bufp->nblocks;
+	}
+	if (!nmsgs) {
+		NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
+			"<== nxge_map_rxdma_channel_buf_ring: channel %d "
+			"no msg blocks",
+			channel));
+		status = NXGE_ERROR;
+		goto nxge_map_rxdma_channel_buf_ring_exit;
+	}
+
+	rbrp = (p_rx_rbr_ring_t)
+		KMEM_ZALLOC(sizeof (rx_rbr_ring_t), KM_SLEEP);
+
+	size = nmsgs * sizeof (p_rx_msg_t);
+	rx_msg_ring = KMEM_ZALLOC(size, KM_SLEEP);
+	ring_info = (rxring_info_t *)KMEM_ZALLOC(sizeof (rxring_info_t),
+		KM_SLEEP);
+
+	MUTEX_INIT(&rbrp->lock, NULL, MUTEX_DRIVER,
+				(void *)nxgep->interrupt_cookie);
+	MUTEX_INIT(&rbrp->post_lock, NULL, MUTEX_DRIVER,
+				(void *)nxgep->interrupt_cookie);
+	rbrp->rdc = channel;
+	rbrp->num_blocks = num_chunks;
+	rbrp->tnblocks = nmsgs;
+	rbrp->rbb_max = nmsgs;
+	rbrp->rbr_max_size = nmsgs;
+	rbrp->rbr_wrap_mask = (rbrp->rbb_max - 1);
+
+	/*
+	 * Buffer sizes suggested by NIU architect.
+	 * 256, 512 and 2K.
+	 */
+
+	rbrp->pkt_buf_size0 = RBR_BUFSZ0_256B;
+	rbrp->pkt_buf_size0_bytes = RBR_BUFSZ0_256_BYTES;
+	rbrp->npi_pkt_buf_size0 = SIZE_256B;
+
+	rbrp->pkt_buf_size1 = RBR_BUFSZ1_1K;
+	rbrp->pkt_buf_size1_bytes = RBR_BUFSZ1_1K_BYTES;
+	rbrp->npi_pkt_buf_size1 = SIZE_1KB;
+
+	rbrp->block_size = nxgep->rx_default_block_size;
+
+	if (!nxge_jumbo_enable && !nxgep->param_arr[param_accept_jumbo].value) {
+		rbrp->pkt_buf_size2 = RBR_BUFSZ2_2K;
+		rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_2K_BYTES;
+		rbrp->npi_pkt_buf_size2 = SIZE_2KB;
+	} else {
+		if (rbrp->block_size >= 0x2000) {
+			rbrp->pkt_buf_size2 = RBR_BUFSZ2_8K;
+			rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_8K_BYTES;
+			rbrp->npi_pkt_buf_size2 = SIZE_8KB;
+		} else {
+			rbrp->pkt_buf_size2 = RBR_BUFSZ2_4K;
+			rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_4K_BYTES;
+			rbrp->npi_pkt_buf_size2 = SIZE_4KB;
+		}
+	}
+
+	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
+		"==> nxge_map_rxdma_channel_buf_ring: channel %d "
+		"actual rbr max %d rbb_max %d nmsgs %d "
+		"rbrp->block_size %d default_block_size %d "
+		"(config nxge_rbr_size %d nxge_rbr_spare_size %d)",
+		channel, rbrp->rbr_max_size, rbrp->rbb_max, nmsgs,
+		rbrp->block_size, nxgep->rx_default_block_size,
+		nxge_rbr_size, nxge_rbr_spare_size));
+
+	/* Map in buffers from the buffer pool.  */
+	index = 0;
+	for (i = 0; i < rbrp->num_blocks; i++, dma_bufp++) {
+		bsize = dma_bufp->block_size;
+		nblocks = dma_bufp->nblocks;
+		ring_info->buffer[i].dvma_addr = (uint64_t)dma_bufp->ioaddr_pp;
+		ring_info->buffer[i].buf_index = i;
+		ring_info->buffer[i].buf_size = dma_bufp->alength;
+		ring_info->buffer[i].start_index = index;
+		ring_info->buffer[i].kaddr = (uint64_t)dma_bufp->kaddrp;
+
+		NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
+			" nxge_map_rxdma_channel_buf_ring: map channel %d "
+			"chunk %d"
+			" nblocks %d chunk_size %x block_size 0x%x "
+			"dma_bufp $%p", channel, i,
+			dma_bufp->nblocks, ring_info->buffer[i].buf_size, bsize,
+			dma_bufp));
+
+		for (j = 0; j < nblocks; j++) {
+			if ((rx_msg_p = nxge_allocb(bsize, BPRI_LO,
+					dma_bufp)) == NULL) {
+				NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
+					"allocb failed"));
+				break;
+			}
+			rx_msg_ring[index] = rx_msg_p;
+			rx_msg_p->block_index = index;
+			rx_msg_p->shifted_addr = (uint32_t)
+				((rx_msg_p->buf_dma.dma_cookie.dmac_laddress >>
+					    RBR_BKADDR_SHIFT));
+
+			NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
+				"index %d j %d rx_msg_p $%p",
+				index, j, rx_msg_p));
+
+			mblk_p = rx_msg_p->rx_mblk_p;
+			mblk_p->b_wptr = mblk_p->b_rptr + bsize;
+			index++;
+			rx_msg_p->buf_dma.dma_channel = channel;
+		}
+	}
+	if (i < rbrp->num_blocks) {
+		goto nxge_map_rxdma_channel_buf_ring_fail1;
+	}
+
+	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
+		"nxge_map_rxdma_channel_buf_ring: done buf init "
+			"channel %d msg block entries %d",
+			channel, index));
+	ring_info->block_size_mask = bsize - 1;
+	rbrp->rx_msg_ring = rx_msg_ring;
+	rbrp->dma_bufp = dma_buf_p;
+	rbrp->ring_info = ring_info;
+
+	status = nxge_rxbuf_index_info_init(nxgep, rbrp);
+	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
+		" nxge_map_rxdma_channel_buf_ring: "
+		"channel %d done buf info init", channel));
+
+	*rbr_p = rbrp;
+	goto nxge_map_rxdma_channel_buf_ring_exit;
+
+nxge_map_rxdma_channel_buf_ring_fail1:
+	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
+		" nxge_map_rxdma_channel_buf_ring: failed channel (0x%x)",
+		channel, status));
+
+	index--;
+	for (; index >= 0; index--) {
+		rx_msg_p = rx_msg_ring[index];
+		if (rx_msg_p != NULL) {
+			freeb(rx_msg_p->rx_mblk_p);
+			rx_msg_ring[index] = NULL;
+		}
+	}
+nxge_map_rxdma_channel_buf_ring_fail:
+	MUTEX_DESTROY(&rbrp->post_lock);
+	MUTEX_DESTROY(&rbrp->lock);
+	KMEM_FREE(ring_info, sizeof (rxring_info_t));
+	KMEM_FREE(rx_msg_ring, size);
+	KMEM_FREE(rbrp, sizeof (rx_rbr_ring_t));
+
+nxge_map_rxdma_channel_buf_ring_exit:
+	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
+		"<== nxge_map_rxdma_channel_buf_ring status 0x%08x", status));
+
+	return (status);
+}
+
+/*ARGSUSED*/
+static void
+nxge_unmap_rxdma_channel_buf_ring(p_nxge_t nxgep,
+    p_rx_rbr_ring_t rbr_p)
+{
+	p_rx_msg_t 		*rx_msg_ring;
+	p_rx_msg_t 		rx_msg_p;
+	rxring_info_t 		*ring_info;
+	int			i;
+	uint32_t		size;
+#ifdef	NXGE_DEBUG
+	int			num_chunks;
+#endif
+
+	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
+		"==> nxge_unmap_rxdma_channel_buf_ring"));
+	if (rbr_p == NULL) {
+		NXGE_DEBUG_MSG((nxgep, RX_CTL,
+			"<== nxge_unmap_rxdma_channel_buf_ring: NULL rbrp"));
+		return;
+	}
+	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
+		"==> nxge_unmap_rxdma_channel_buf_ring: channel %d",
+		rbr_p->rdc));
+
+	rx_msg_ring = rbr_p->rx_msg_ring;
+	ring_info = rbr_p->ring_info;
+
+	if (rx_msg_ring == NULL || ring_info == NULL) {
+			NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
+		"<== nxge_unmap_rxdma_channel_buf_ring: "
+		"rx_msg_ring $%p ring_info $%p",
+		rx_msg_p, ring_info));
+		return;
+	}
+
+#ifdef	NXGE_DEBUG
+	num_chunks = rbr_p->num_blocks;
+#endif
+	size = rbr_p->tnblocks * sizeof (p_rx_msg_t);
+	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
+		" nxge_unmap_rxdma_channel_buf_ring: channel %d chunks %d "
+		"tnblocks %d (max %d) size ptrs %d ",
+		rbr_p->rdc, num_chunks,
+		rbr_p->tnblocks, rbr_p->rbr_max_size, size));
+
+	for (i = 0; i < rbr_p->tnblocks; i++) {
+		rx_msg_p = rx_msg_ring[i];
+		NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
+			" nxge_unmap_rxdma_channel_buf_ring: "
+			"rx_msg_p $%p",
+			rx_msg_p));
+		if (rx_msg_p != NULL) {
+			freeb(rx_msg_p->rx_mblk_p);
+			rx_msg_ring[i] = NULL;
+		}
+	}
+
+	MUTEX_DESTROY(&rbr_p->post_lock);
+	MUTEX_DESTROY(&rbr_p->lock);
+	KMEM_FREE(ring_info, sizeof (rxring_info_t));
+	KMEM_FREE(rx_msg_ring, size);
+	KMEM_FREE(rbr_p, sizeof (rx_rbr_ring_t));
+
+	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
+		"<== nxge_unmap_rxdma_channel_buf_ring"));
+}
+
+static nxge_status_t
+nxge_rxdma_hw_start_common(p_nxge_t nxgep)
+{
+	nxge_status_t		status = NXGE_OK;
+
+	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start_common"));
+
+	/*
+	 * Load the sharable parameters by writing to the
+	 * function zero control registers. These FZC registers
+	 * should be initialized only once for the entire chip.
+	 */
+	(void) nxge_init_fzc_rx_common(nxgep);
+
+	/*
+	 * Initialize the RXDMA port specific FZC control configurations.
+	 * These FZC registers are pertaining to each port.
+	 */
+	(void) nxge_init_fzc_rxdma_port(nxgep);
+
+	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start_common"));
+
+	return (status);
+}
+
+/*ARGSUSED*/
+static void
+nxge_rxdma_hw_stop_common(p_nxge_t nxgep)
+{
+	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_stop_common"));
+
+	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_stop_common"));
+}
+
+static nxge_status_t
+nxge_rxdma_hw_start(p_nxge_t nxgep)
+{
+	int			i, ndmas;
+	uint16_t		channel;
+	p_rx_rbr_rings_t 	rx_rbr_rings;
+	p_rx_rbr_ring_t		*rbr_rings;
+	p_rx_rcr_rings_t 	rx_rcr_rings;
+	p_rx_rcr_ring_t		*rcr_rings;
+	p_rx_mbox_areas_t 	rx_mbox_areas_p;
+	p_rx_mbox_t		*rx_mbox_p;
+	nxge_status_t		status = NXGE_OK;
+
+	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start"));
+
+	rx_rbr_rings = nxgep->rx_rbr_rings;
+	rx_rcr_rings = nxgep->rx_rcr_rings;
+	if (rx_rbr_rings == NULL || rx_rcr_rings == NULL) {
+		NXGE_DEBUG_MSG((nxgep, RX_CTL,
+			"<== nxge_rxdma_hw_start: NULL ring pointers"));
+		return (NXGE_ERROR);
+	}
+	ndmas = rx_rbr_rings->ndmas;
+	if (ndmas == 0) {
+		NXGE_DEBUG_MSG((nxgep, RX_CTL,
+			"<== nxge_rxdma_hw_start: no dma channel allocated"));
+		return (NXGE_ERROR);
+	}
+
+	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
+		"==> nxge_rxdma_hw_start (ndmas %d)", ndmas));
+
+	rbr_rings = rx_rbr_rings->rbr_rings;
+	rcr_rings = rx_rcr_rings->rcr_rings;
+	rx_mbox_areas_p = nxgep->rx_mbox_areas_p;
+	if (rx_mbox_areas_p) {
+		rx_mbox_p = rx_mbox_areas_p->rxmbox_areas;
+	}
+
+	for (i = 0; i < ndmas; i++) {
+		channel = rbr_rings[i]->rdc;
+		NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
+			"==> nxge_rxdma_hw_start (ndmas %d) channel %d",
+				ndmas, channel));
+		status = nxge_rxdma_start_channel(nxgep, channel,
+				(p_rx_rbr_ring_t)rbr_rings[i],
+				(p_rx_rcr_ring_t)rcr_rings[i],
+				(p_rx_mbox_t)rx_mbox_p[i]);
+		if (status != NXGE_OK) {
+			goto nxge_rxdma_hw_start_fail1;
+		}
+	}
+
+	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start: "
+		"rx_rbr_rings 0x%016llx rings 0x%016llx",
+		rx_rbr_rings, rx_rcr_rings));
+
+	goto nxge_rxdma_hw_start_exit;
+
+nxge_rxdma_hw_start_fail1:
+	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+		"==> nxge_rxdma_hw_start: disable "
+		"(status 0x%x channel %d i %d)", status, channel, i));
+	for (; i >= 0; i--) {
+		channel = rbr_rings[i]->rdc;
+		(void) nxge_rxdma_stop_channel(nxgep, channel);
+	}
+
+nxge_rxdma_hw_start_exit:
+	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
+		"==> nxge_rxdma_hw_start: (status 0x%x)", status));
+
+	return (status);
+}
+
+static void
+nxge_rxdma_hw_stop(p_nxge_t nxgep)
+{
+	int			i, ndmas;
+	uint16_t		channel;
+	p_rx_rbr_rings_t 	rx_rbr_rings;
+	p_rx_rbr_ring_t		*rbr_rings;
+	p_rx_rcr_rings_t 	rx_rcr_rings;
+
+	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_stop"));
+
+	rx_rbr_rings = nxgep->rx_rbr_rings;
+	rx_rcr_rings = nxgep->rx_rcr_rings;
+	if (rx_rbr_rings == NULL || rx_rcr_rings == NULL) {
+		NXGE_DEBUG_MSG((nxgep, RX_CTL,
+			"<== nxge_rxdma_hw_stop: NULL ring pointers"));
+		return;
+	}
+	ndmas = rx_rbr_rings->ndmas;
+	if (!ndmas) {
+		NXGE_DEBUG_MSG((nxgep, RX_CTL,
+			"<== nxge_rxdma_hw_stop: no dma channel allocated"));
+		return;
+	}
+
+	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
+		"==> nxge_rxdma_hw_stop (ndmas %d)", ndmas));
+
+	rbr_rings = rx_rbr_rings->rbr_rings;
+
+	for (i = 0; i < ndmas; i++) {
+		channel = rbr_rings[i]->rdc;
+		NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
+			"==> nxge_rxdma_hw_stop (ndmas %d) channel %d",
+				ndmas, channel));
+		(void) nxge_rxdma_stop_channel(nxgep, channel);
+	}
+
+	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_stop: "
+		"rx_rbr_rings 0x%016llx rings 0x%016llx",
+		rx_rbr_rings, rx_rcr_rings));
+
+	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_rxdma_hw_stop"));
+}
+
+
+static nxge_status_t
+nxge_rxdma_start_channel(p_nxge_t nxgep, uint16_t channel,
+    p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t mbox_p)
+
+{
+	npi_handle_t		handle;
+	npi_status_t		rs = NPI_SUCCESS;
+	rx_dma_ctl_stat_t	cs;
+	rx_dma_ent_msk_t	ent_mask;
+	nxge_status_t		status = NXGE_OK;
+
+	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel"));
+
+	handle = NXGE_DEV_NPI_HANDLE(nxgep);
+
+	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "nxge_rxdma_start_channel: "
+		"npi handle addr $%p acc $%p",
+		nxgep->npi_handle.regp, nxgep->npi_handle.regh));
+
+	/* Reset RXDMA channel */
+	rs = npi_rxdma_cfg_rdc_reset(handle, channel);
+	if (rs != NPI_SUCCESS) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"==> nxge_rxdma_start_channel: "
+			"reset rxdma failed (0x%08x channel %d)",
+			status, channel));
+		return (NXGE_ERROR | rs);
+	}
+
+	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
+		"==> nxge_rxdma_start_channel: reset done: channel %d",
+		channel));
+
+	/*
+	 * Initialize the RXDMA channel specific FZC control
+	 * configurations. These FZC registers are pertaining
+	 * to each RX channel (logical pages).
+	 */
+	status = nxge_init_fzc_rxdma_channel(nxgep,
+			channel, rbr_p, rcr_p, mbox_p);
+	if (status != NXGE_OK) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"==> nxge_rxdma_start_channel: "
+			"init fzc rxdma failed (0x%08x channel %d)",
+			status, channel));
+		return (status);
+	}
+
+	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
+		"==> nxge_rxdma_start_channel: fzc done"));
+
+	/*
+	 * Zero out the shadow  and prefetch ram.
+	 */
+
+	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: "
+		"ram done"));
+
+	/* Set up the interrupt event masks. */
+	ent_mask.value = 0;
+	ent_mask.value |= RX_DMA_ENT_MSK_RBREMPTY_MASK;
+	rs = npi_rxdma_event_mask(handle, OP_SET, channel,
+			&ent_mask);
+	if (rs != NPI_SUCCESS) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"==> nxge_rxdma_start_channel: "
+			"init rxdma event masks failed (0x%08x channel %d)",
+			status, channel));
+		return (NXGE_ERROR | rs);
+	}
+
+	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: "
+		"event done: channel %d (mask 0x%016llx)",
+		channel, ent_mask.value));
+
+	/* Initialize the receive DMA control and status register */
+	cs.value = 0;
+	cs.bits.hdw.mex = 1;
+	cs.bits.hdw.rcrthres = 1;
+	cs.bits.hdw.rcrto = 1;
+	cs.bits.hdw.rbr_empty = 1;
+	status = nxge_init_rxdma_channel_cntl_stat(nxgep, channel, &cs);
+	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: "
+		"channel %d rx_dma_cntl_stat 0x%0016llx", channel, cs.value));
+	if (status != NXGE_OK) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"==> nxge_rxdma_start_channel: "
+			"init rxdma control register failed (0x%08x channel %d",
+			status, channel));
+		return (status);
+	}
+
+	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: "
+		"control done - channel %d cs 0x%016llx", channel, cs.value));
+
+	/*
+	 * Load RXDMA descriptors, buffers, mailbox,
+	 * initialise the receive DMA channels and
+	 * enable each DMA channel.
+	 */
+	status = nxge_enable_rxdma_channel(nxgep,
+			channel, rbr_p, rcr_p, mbox_p);
+
+	if (status != NXGE_OK) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			    " nxge_rxdma_start_channel: "
+			    " init enable rxdma failed (0x%08x channel %d)",
+			    status, channel));
+		return (status);
+	}
+
+	ent_mask.value = 0;
+	ent_mask.value |= (RX_DMA_ENT_MSK_WRED_DROP_MASK |
+				RX_DMA_ENT_MSK_PTDROP_PKT_MASK);
+	rs = npi_rxdma_event_mask(handle, OP_SET, channel,
+			&ent_mask);
+	if (rs != NPI_SUCCESS) {
+		NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
+			"==> nxge_rxdma_start_channel: "
+			"init rxdma event masks failed (0x%08x channel %d)",
+			status, channel));
+		return (NXGE_ERROR | rs);
+	}
+
+	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: "
+		"control done - channel %d cs 0x%016llx", channel, cs.value));
+
+	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
+		"==> nxge_rxdma_start_channel: enable done"));
+
+	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_rxdma_start_channel"));
+
+	return (NXGE_OK);
+}
+
+static nxge_status_t
+nxge_rxdma_stop_channel(p_nxge_t nxgep, uint16_t channel)
+{
+	npi_handle_t		handle;
+	npi_status_t		rs = NPI_SUCCESS;
+	rx_dma_ctl_stat_t	cs;
+	rx_dma_ent_msk_t	ent_mask;
+	nxge_status_t		status = NXGE_OK;
+
+	NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_stop_channel"));
+
+	handle = NXGE_DEV_NPI_HANDLE(nxgep);
+
+	NXGE_DEBUG_MSG((nxgep, RX_CTL, "nxge_rxdma_stop_channel: "
+		"npi handle addr $%p acc $%p",
+		nxgep->npi_handle.regp, nxgep->npi_handle.regh));
+
+	/* Reset RXDMA channel */
+	rs = npi_rxdma_cfg_rdc_reset(handle, channel);
+	if (rs != NPI_SUCCESS) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			    " nxge_rxdma_stop_channel: "
+			    " reset rxdma failed (0x%08x channel %d)",
+			    rs, channel));
+		return (NXGE_ERROR | rs);
+	}
+
+	NXGE_DEBUG_MSG((nxgep, RX_CTL,
+		"==> nxge_rxdma_stop_channel: reset done"));
+
+	/* Set up the interrupt event masks. */
+	ent_mask.value = RX_DMA_ENT_MSK_ALL;
+	rs = npi_rxdma_event_mask(handle, OP_SET, channel,
+			&ent_mask);
+	if (rs != NPI_SUCCESS) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			    "==> nxge_rxdma_stop_channel: "
+			    "set rxdma event masks failed (0x%08x channel %d)",
+			    rs, channel));
+		return (NXGE_ERROR | rs);
+	}
+
+	NXGE_DEBUG_MSG((nxgep, RX_CTL,
+		"==> nxge_rxdma_stop_channel: event done"));
+
+	/* Initialize the receive DMA control and status register */
+	cs.value = 0;
+	status = nxge_init_rxdma_channel_cntl_stat(nxgep, channel,
+			&cs);
+	NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_stop_channel: control "
+		" to default (all 0s) 0x%08x", cs.value));
+	if (status != NXGE_OK) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			    " nxge_rxdma_stop_channel: init rxdma"
+			    " control register failed (0x%08x channel %d",
+			status, channel));
+		return (status);
+	}
+
+	NXGE_DEBUG_MSG((nxgep, RX_CTL,
+		"==> nxge_rxdma_stop_channel: control done"));
+
+	/* disable dma channel */
+	status = nxge_disable_rxdma_channel(nxgep, channel);
+
+	if (status != NXGE_OK) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			    " nxge_rxdma_stop_channel: "
+			    " init enable rxdma failed (0x%08x channel %d)",
+			    status, channel));
+		return (status);
+	}
+
+	NXGE_DEBUG_MSG((nxgep,
+		RX_CTL, "==> nxge_rxdma_stop_channel: disable done"));
+
+	NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_stop_channel"));
+
+	return (NXGE_OK);
+}
+
+nxge_status_t
+nxge_rxdma_handle_sys_errors(p_nxge_t nxgep)
+{
+	npi_handle_t		handle;
+	p_nxge_rdc_sys_stats_t	statsp;
+	rx_ctl_dat_fifo_stat_t	stat;
+	uint32_t		zcp_err_status;
+	uint32_t		ipp_err_status;
+	nxge_status_t		status = NXGE_OK;
+	npi_status_t		rs = NPI_SUCCESS;
+	boolean_t		my_err = B_FALSE;
+
+	handle = nxgep->npi_handle;
+	statsp = (p_nxge_rdc_sys_stats_t)&nxgep->statsp->rdc_sys_stats;
+
+	rs = npi_rxdma_rxctl_fifo_error_intr_get(handle, &stat);
+
+	if (rs != NPI_SUCCESS)
+		return (NXGE_ERROR | rs);
+
+	if (stat.bits.ldw.id_mismatch) {
+		statsp->id_mismatch++;
+		NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, NULL,
+					NXGE_FM_EREPORT_RDMC_ID_MISMATCH);
+		/* Global fatal error encountered */
+	}
+
+	if ((stat.bits.ldw.zcp_eop_err) || (stat.bits.ldw.ipp_eop_err)) {
+		switch (nxgep->mac.portnum) {
+		case 0:
+			if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT0) ||
+				(stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT0)) {
+				my_err = B_TRUE;
+				zcp_err_status = stat.bits.ldw.zcp_eop_err;
+				ipp_err_status = stat.bits.ldw.ipp_eop_err;
+			}
+			break;
+		case 1:
+			if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT1) ||
+				(stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT1)) {
+				my_err = B_TRUE;
+				zcp_err_status = stat.bits.ldw.zcp_eop_err;
+				ipp_err_status = stat.bits.ldw.ipp_eop_err;
+			}
+			break;
+		case 2:
+			if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT2) ||
+				(stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT2)) {
+				my_err = B_TRUE;
+				zcp_err_status = stat.bits.ldw.zcp_eop_err;
+				ipp_err_status = stat.bits.ldw.ipp_eop_err;
+			}
+			break;
+		case 3:
+			if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT3) ||
+				(stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT3)) {
+				my_err = B_TRUE;
+				zcp_err_status = stat.bits.ldw.zcp_eop_err;
+				ipp_err_status = stat.bits.ldw.ipp_eop_err;
+			}
+			break;
+		default:
+			return (NXGE_ERROR);
+		}
+	}
+
+	if (my_err) {
+		status = nxge_rxdma_handle_port_errors(nxgep, ipp_err_status,
+							zcp_err_status);
+		if (status != NXGE_OK)
+			return (status);
+	}
+
+	return (NXGE_OK);
+}
+
+static nxge_status_t
+nxge_rxdma_handle_port_errors(p_nxge_t nxgep, uint32_t ipp_status,
+							uint32_t zcp_status)
+{
+	boolean_t		rxport_fatal = B_FALSE;
+	p_nxge_rdc_sys_stats_t	statsp;
+	nxge_status_t		status = NXGE_OK;
+	uint8_t			portn;
+
+	portn = nxgep->mac.portnum;
+	statsp = (p_nxge_rdc_sys_stats_t)&nxgep->statsp->rdc_sys_stats;
+
+	if (ipp_status & (0x1 << portn)) {
+		statsp->ipp_eop_err++;
+		NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
+					NXGE_FM_EREPORT_RDMC_IPP_EOP_ERR);
+		rxport_fatal = B_TRUE;
+	}
+
+	if (zcp_status & (0x1 << portn)) {
+		statsp->zcp_eop_err++;
+		NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
+					NXGE_FM_EREPORT_RDMC_ZCP_EOP_ERR);
+		rxport_fatal = B_TRUE;
+	}
+
+	if (rxport_fatal) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			    " nxge_rxdma_handle_port_error: "
+			    " fatal error on Port #%d\n",
+				portn));
+		status = nxge_rx_port_fatal_err_recover(nxgep);
+		if (status == NXGE_OK) {
+			FM_SERVICE_RESTORED(nxgep);
+		}
+	}
+
+	return (status);
+}
+
+static nxge_status_t
+nxge_rxdma_fatal_err_recover(p_nxge_t nxgep, uint16_t channel)
+{
+	npi_handle_t		handle;
+	npi_status_t		rs = NPI_SUCCESS;
+	nxge_status_t		status = NXGE_OK;
+	p_rx_rbr_ring_t		rbrp;
+	p_rx_rcr_ring_t		rcrp;
+	p_rx_mbox_t		mboxp;
+	rx_dma_ent_msk_t	ent_mask;
+	p_nxge_dma_common_t	dmap;
+	int			ring_idx;
+	uint32_t		ref_cnt;
+	p_rx_msg_t		rx_msg_p;
+	int			i;
+	uint32_t		nxge_port_rcr_size;
+
+	NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_fatal_err_recover"));
+	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"Recovering from RxDMAChannel#%d error...", channel));
+
+	/*
+	 * Stop the dma channel waits for the stop done.
+	 * If the stop done bit is not set, then create
+	 * an error.
+	 */
+
+	handle = NXGE_DEV_NPI_HANDLE(nxgep);
+	NXGE_DEBUG_MSG((nxgep, RX_CTL, "Rx DMA stop..."));
+
+	ring_idx = nxge_rxdma_get_ring_index(nxgep, channel);
+	rbrp = (p_rx_rbr_ring_t)nxgep->rx_rbr_rings->rbr_rings[ring_idx];
+	rcrp = (p_rx_rcr_ring_t)nxgep->rx_rcr_rings->rcr_rings[ring_idx];
+
+	MUTEX_ENTER(&rcrp->lock);
+	MUTEX_ENTER(&rbrp->lock);
+	MUTEX_ENTER(&rbrp->post_lock);
+
+	NXGE_DEBUG_MSG((nxgep, RX_CTL, "Disable RxDMA channel..."));
+
+	rs = npi_rxdma_cfg_rdc_disable(handle, channel);
+	if (rs != NPI_SUCCESS) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"nxge_disable_rxdma_channel:failed"));
+		goto fail;
+	}
+
+	NXGE_DEBUG_MSG((nxgep, RX_CTL, "Disable RxDMA interrupt..."));
+
+	/* Disable interrupt */
+	ent_mask.value = RX_DMA_ENT_MSK_ALL;
+	rs = npi_rxdma_event_mask(handle, OP_SET, channel, &ent_mask);
+	if (rs != NPI_SUCCESS) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+				"nxge_rxdma_stop_channel: "
+				"set rxdma event masks failed (channel %d)",
+				channel));
+	}
+
+	NXGE_DEBUG_MSG((nxgep, RX_CTL, "RxDMA channel reset..."));
+
+	/* Reset RXDMA channel */
+	rs = npi_rxdma_cfg_rdc_reset(handle, channel);
+	if (rs != NPI_SUCCESS) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"nxge_rxdma_fatal_err_recover: "
+				" reset rxdma failed (channel %d)", channel));
+		goto fail;
+	}
+
+	nxge_port_rcr_size = nxgep->nxge_port_rcr_size;
+
+	mboxp =
+	(p_rx_mbox_t)nxgep->rx_mbox_areas_p->rxmbox_areas[ring_idx];
+
+	rbrp->rbr_wr_index = (rbrp->rbb_max - 1);
+	rbrp->rbr_rd_index = 0;
+
+	rcrp->comp_rd_index = 0;
+	rcrp->comp_wt_index = 0;
+	rcrp->rcr_desc_rd_head_p = rcrp->rcr_desc_first_p =
+		(p_rcr_entry_t)DMA_COMMON_VPTR(rcrp->rcr_desc);
+	rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp =
+		(p_rcr_entry_t)DMA_COMMON_IOADDR(rcrp->rcr_desc);
+
+	rcrp->rcr_desc_last_p = rcrp->rcr_desc_rd_head_p +
+		(nxge_port_rcr_size - 1);
+	rcrp->rcr_desc_last_pp = rcrp->rcr_desc_rd_head_pp +
+		(nxge_port_rcr_size - 1);
+
+	dmap = (p_nxge_dma_common_t)&rcrp->rcr_desc;
+	bzero((caddr_t)dmap->kaddrp, dmap->alength);
+
+	cmn_err(CE_NOTE, "!rbr entries = %d\n", rbrp->rbr_max_size);
+
+	for (i = 0; i < rbrp->rbr_max_size; i++) {
+		rx_msg_p = rbrp->rx_msg_ring[i];
+		ref_cnt = rx_msg_p->ref_cnt;
+		if (ref_cnt != 1) {
+			if (rx_msg_p->cur_usage_cnt !=
+					rx_msg_p->max_usage_cnt) {
+				NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+						"buf[%d]: cur_usage_cnt = %d "
+						"max_usage_cnt = %d\n", i,
+						rx_msg_p->cur_usage_cnt,
+						rx_msg_p->max_usage_cnt));
+			} else {
+				/* Buffer can be re-posted */
+				rx_msg_p->free = B_TRUE;
+				rx_msg_p->cur_usage_cnt = 0;
+				rx_msg_p->max_usage_cnt = 0xbaddcafe;
+				rx_msg_p->pkt_buf_size = 0;
+			}
+		}
+	}
+
+	NXGE_DEBUG_MSG((nxgep, RX_CTL, "RxDMA channel re-start..."));
+
+	status = nxge_rxdma_start_channel(nxgep, channel, rbrp, rcrp, mboxp);
+	if (status != NXGE_OK) {
+		goto fail;
+	}
+
+	MUTEX_EXIT(&rbrp->post_lock);
+	MUTEX_EXIT(&rbrp->lock);
+	MUTEX_EXIT(&rcrp->lock);
+
+	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"Recovery Successful, RxDMAChannel#%d Restored",
+			channel));
+	NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_fatal_err_recover"));
+
+	return (NXGE_OK);
+fail:
+	MUTEX_EXIT(&rbrp->post_lock);
+	MUTEX_EXIT(&rbrp->lock);
+	MUTEX_EXIT(&rcrp->lock);
+	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed"));
+
+	return (NXGE_ERROR | rs);
+}
+
+nxge_status_t
+nxge_rx_port_fatal_err_recover(p_nxge_t nxgep)
+{
+	nxge_status_t		status = NXGE_OK;
+	p_nxge_dma_common_t	*dma_buf_p;
+	uint16_t		channel;
+	int			ndmas;
+	int			i;
+
+	NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rx_port_fatal_err_recover"));
+	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+				"Recovering from RxPort error..."));
+	/* Disable RxMAC */
+
+	NXGE_DEBUG_MSG((nxgep, RX_CTL, "Disable RxMAC...\n"));
+	if (nxge_rx_mac_disable(nxgep) != NXGE_OK)
+		goto fail;
+
+	NXGE_DELAY(1000);
+
+	NXGE_DEBUG_MSG((nxgep, RX_CTL, "Stop all RxDMA channels..."));
+
+	ndmas = nxgep->rx_buf_pool_p->ndmas;
+	dma_buf_p = nxgep->rx_buf_pool_p->dma_buf_pool_p;
+
+	for (i = 0; i < ndmas; i++) {
+		channel = ((p_nxge_dma_common_t)dma_buf_p[i])->dma_channel;
+		if (nxge_rxdma_fatal_err_recover(nxgep, channel) != NXGE_OK) {
+			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+					"Could not recover channel %d",
+					channel));
+		}
+	}
+
+	NXGE_DEBUG_MSG((nxgep, RX_CTL, "Reset IPP..."));
+
+	/* Reset IPP */
+	if (nxge_ipp_reset(nxgep) != NXGE_OK) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"nxge_rx_port_fatal_err_recover: "
+			"Failed to reset IPP"));
+		goto fail;
+	}
+
+	NXGE_DEBUG_MSG((nxgep, RX_CTL, "Reset RxMAC..."));
+
+	/* Reset RxMAC */
+	if (nxge_rx_mac_reset(nxgep) != NXGE_OK) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"nxge_rx_port_fatal_err_recover: "
+			"Failed to reset RxMAC"));
+		goto fail;
+	}
+
+	NXGE_DEBUG_MSG((nxgep, RX_CTL, "Re-initialize IPP..."));
+
+	/* Re-Initialize IPP */
+	if (nxge_ipp_init(nxgep) != NXGE_OK) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"nxge_rx_port_fatal_err_recover: "
+			"Failed to init IPP"));
+		goto fail;
+	}
+
+	NXGE_DEBUG_MSG((nxgep, RX_CTL, "Re-initialize RxMAC..."));
+
+	/* Re-Initialize RxMAC */
+	if ((status = nxge_rx_mac_init(nxgep)) != NXGE_OK) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"nxge_rx_port_fatal_err_recover: "
+			"Failed to reset RxMAC"));
+		goto fail;
+	}
+
+	NXGE_DEBUG_MSG((nxgep, RX_CTL, "Re-enable RxMAC..."));
+
+	/* Re-enable RxMAC */
+	if ((status = nxge_rx_mac_enable(nxgep)) != NXGE_OK) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"nxge_rx_port_fatal_err_recover: "
+			"Failed to enable RxMAC"));
+		goto fail;
+	}
+
+	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"Recovery Successful, RxPort Restored"));
+
+	return (NXGE_OK);
+fail:
+	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed"));
+	return (status);
+}
+
+void
+nxge_rxdma_inject_err(p_nxge_t nxgep, uint32_t err_id, uint8_t chan)
+{
+	rx_dma_ctl_stat_t	cs;
+	rx_ctl_dat_fifo_stat_t	cdfs;
+
+	switch (err_id) {
+	case NXGE_FM_EREPORT_RDMC_RCR_ACK_ERR:
+	case NXGE_FM_EREPORT_RDMC_DC_FIFO_ERR:
+	case NXGE_FM_EREPORT_RDMC_RCR_SHA_PAR:
+	case NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR:
+	case NXGE_FM_EREPORT_RDMC_RBR_TMOUT:
+	case NXGE_FM_EREPORT_RDMC_RSP_CNT_ERR:
+	case NXGE_FM_EREPORT_RDMC_BYTE_EN_BUS:
+	case NXGE_FM_EREPORT_RDMC_RSP_DAT_ERR:
+	case NXGE_FM_EREPORT_RDMC_RCRINCON:
+	case NXGE_FM_EREPORT_RDMC_RCRFULL:
+	case NXGE_FM_EREPORT_RDMC_RBRFULL:
+	case NXGE_FM_EREPORT_RDMC_RBRLOGPAGE:
+	case NXGE_FM_EREPORT_RDMC_CFIGLOGPAGE:
+	case NXGE_FM_EREPORT_RDMC_CONFIG_ERR:
+		RXDMA_REG_READ64(nxgep->npi_handle, RX_DMA_CTL_STAT_DBG_REG,
+			chan, &cs.value);
+		if (err_id == NXGE_FM_EREPORT_RDMC_RCR_ACK_ERR)
+			cs.bits.hdw.rcr_ack_err = 1;
+		else if (err_id == NXGE_FM_EREPORT_RDMC_DC_FIFO_ERR)
+			cs.bits.hdw.dc_fifo_err = 1;
+		else if (err_id == NXGE_FM_EREPORT_RDMC_RCR_SHA_PAR)
+			cs.bits.hdw.rcr_sha_par = 1;
+		else if (err_id == NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR)
+			cs.bits.hdw.rbr_pre_par = 1;
+		else if (err_id == NXGE_FM_EREPORT_RDMC_RBR_TMOUT)
+			cs.bits.hdw.rbr_tmout = 1;
+		else if (err_id == NXGE_FM_EREPORT_RDMC_RSP_CNT_ERR)
+			cs.bits.hdw.rsp_cnt_err = 1;
+		else if (err_id == NXGE_FM_EREPORT_RDMC_BYTE_EN_BUS)
+			cs.bits.hdw.byte_en_bus = 1;
+		else if (err_id == NXGE_FM_EREPORT_RDMC_RSP_DAT_ERR)
+			cs.bits.hdw.rsp_dat_err = 1;
+		else if (err_id == NXGE_FM_EREPORT_RDMC_CONFIG_ERR)
+			cs.bits.hdw.config_err = 1;
+		else if (err_id == NXGE_FM_EREPORT_RDMC_RCRINCON)
+			cs.bits.hdw.rcrincon = 1;
+		else if (err_id == NXGE_FM_EREPORT_RDMC_RCRFULL)
+			cs.bits.hdw.rcrfull = 1;
+		else if (err_id == NXGE_FM_EREPORT_RDMC_RBRFULL)
+			cs.bits.hdw.rbrfull = 1;
+		else if (err_id == NXGE_FM_EREPORT_RDMC_RBRLOGPAGE)
+			cs.bits.hdw.rbrlogpage = 1;
+		else if (err_id == NXGE_FM_EREPORT_RDMC_CFIGLOGPAGE)
+			cs.bits.hdw.cfiglogpage = 1;
+		cmn_err(CE_NOTE, "!Write 0x%lx to RX_DMA_CTL_STAT_DBG_REG\n",
+				cs.value);
+		RXDMA_REG_WRITE64(nxgep->npi_handle, RX_DMA_CTL_STAT_DBG_REG,
+			chan, cs.value);
+		break;
+	case NXGE_FM_EREPORT_RDMC_ID_MISMATCH:
+	case NXGE_FM_EREPORT_RDMC_ZCP_EOP_ERR:
+	case NXGE_FM_EREPORT_RDMC_IPP_EOP_ERR:
+		cdfs.value = 0;
+		if (err_id ==  NXGE_FM_EREPORT_RDMC_ID_MISMATCH)
+			cdfs.bits.ldw.id_mismatch = (1 << nxgep->mac.portnum);
+		else if (err_id == NXGE_FM_EREPORT_RDMC_ZCP_EOP_ERR)
+			cdfs.bits.ldw.zcp_eop_err = (1 << nxgep->mac.portnum);
+		else if (err_id == NXGE_FM_EREPORT_RDMC_IPP_EOP_ERR)
+			cdfs.bits.ldw.ipp_eop_err = (1 << nxgep->mac.portnum);
+		cmn_err(CE_NOTE,
+			"!Write 0x%lx to RX_CTL_DAT_FIFO_STAT_DBG_REG\n",
+			cdfs.value);
+		RXDMA_REG_WRITE64(nxgep->npi_handle,
+			RX_CTL_DAT_FIFO_STAT_DBG_REG, chan, cdfs.value);
+		break;
+	case NXGE_FM_EREPORT_RDMC_DCF_ERR:
+		break;
+	case NXGE_FM_EREPORT_RDMC_COMPLETION_ERR:
+		break;
+	}
+}
+
+
+static uint16_t
+nxge_get_pktbuf_size(p_nxge_t nxgep, int bufsz_type, rbr_cfig_b_t rbr_cfgb)
+{
+	uint16_t sz = RBR_BKSIZE_8K_BYTES;
+
+	switch (bufsz_type) {
+	case RCR_PKTBUFSZ_0:
+		switch (rbr_cfgb.bits.ldw.bufsz0) {
+		case RBR_BUFSZ0_256B:
+			sz = RBR_BUFSZ0_256_BYTES;
+			break;
+		case RBR_BUFSZ0_512B:
+			sz = RBR_BUFSZ0_512B_BYTES;
+			break;
+		case RBR_BUFSZ0_1K:
+			sz = RBR_BUFSZ0_1K_BYTES;
+			break;
+		case RBR_BUFSZ0_2K:
+			sz = RBR_BUFSZ0_2K_BYTES;
+			break;
+		default:
+			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"nxge_get_pktbug_size: bad bufsz0"));
+			break;
+		}
+		break;
+	case RCR_PKTBUFSZ_1:
+		switch (rbr_cfgb.bits.ldw.bufsz1) {
+		case RBR_BUFSZ1_1K:
+			sz = RBR_BUFSZ1_1K_BYTES;
+			break;
+		case RBR_BUFSZ1_2K:
+			sz = RBR_BUFSZ1_2K_BYTES;
+			break;
+		case RBR_BUFSZ1_4K:
+			sz = RBR_BUFSZ1_4K_BYTES;
+			break;
+		case RBR_BUFSZ1_8K:
+			sz = RBR_BUFSZ1_8K_BYTES;
+			break;
+		default:
+			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"nxge_get_pktbug_size: bad bufsz1"));
+			break;
+		}
+		break;
+	case RCR_PKTBUFSZ_2:
+		switch (rbr_cfgb.bits.ldw.bufsz2) {
+		case RBR_BUFSZ2_2K:
+			sz = RBR_BUFSZ2_2K_BYTES;
+			break;
+		case RBR_BUFSZ2_4K:
+			sz = RBR_BUFSZ2_4K_BYTES;
+			break;
+		case RBR_BUFSZ2_8K:
+			sz = RBR_BUFSZ2_8K_BYTES;
+			break;
+		case RBR_BUFSZ2_16K:
+			sz = RBR_BUFSZ2_16K_BYTES;
+			break;
+		default:
+			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"nxge_get_pktbug_size: bad bufsz2"));
+			break;
+		}
+		break;
+	case RCR_SINGLE_BLOCK:
+		switch (rbr_cfgb.bits.ldw.bksize) {
+		case BKSIZE_4K:
+			sz = RBR_BKSIZE_4K_BYTES;
+			break;
+		case BKSIZE_8K:
+			sz = RBR_BKSIZE_8K_BYTES;
+			break;
+		case BKSIZE_16K:
+			sz = RBR_BKSIZE_16K_BYTES;
+			break;
+		case BKSIZE_32K:
+			sz = RBR_BKSIZE_32K_BYTES;
+			break;
+		default:
+			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"nxge_get_pktbug_size: bad bksize"));
+			break;
+		}
+		break;
+	default:
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+		"nxge_get_pktbug_size: bad bufsz_type"));
+		break;
+	}
+	return (sz);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/uts/common/io/nxge/nxge_send.c	Mon Mar 19 19:37:22 2007 -0700
@@ -0,0 +1,1035 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident	"%Z%%M%	%I%	%E% SMI"
+
+#include <sys/nxge/nxge_impl.h>
+
+extern uint32_t		nxge_reclaim_pending;
+extern uint32_t 	nxge_bcopy_thresh;
+extern uint32_t 	nxge_dvma_thresh;
+extern uint32_t 	nxge_dma_stream_thresh;
+extern uint32_t		nxge_tx_minfree;
+extern uint32_t		nxge_tx_intr_thres;
+extern uint32_t		nxge_tx_max_gathers;
+extern uint32_t		nxge_tx_tiny_pack;
+extern uint32_t		nxge_tx_use_bcopy;
+extern uint32_t		nxge_tx_lb_policy;
+extern uint32_t		nxge_no_tx_lb;
+
+typedef struct _mac_tx_hint {
+	uint16_t	sap;
+	uint16_t	vid;
+	void		*hash;
+} mac_tx_hint_t, *p_mac_tx_hint_t;
+
+int nxge_tx_lb_ring_1(p_mblk_t, uint32_t, p_mac_tx_hint_t);
+
+int
+nxge_start(p_nxge_t nxgep, p_tx_ring_t tx_ring_p, p_mblk_t mp)
+{
+	int 			status = 0;
+	p_tx_desc_t 		tx_desc_ring_vp;
+	npi_handle_t		npi_desc_handle;
+	nxge_os_dma_handle_t 	tx_desc_dma_handle;
+	p_tx_desc_t 		tx_desc_p;
+	p_tx_msg_t 		tx_msg_ring;
+	p_tx_msg_t 		tx_msg_p;
+	tx_desc_t		tx_desc, *tmp_desc_p;
+	tx_desc_t		sop_tx_desc, *sop_tx_desc_p;
+	p_tx_pkt_header_t	hdrp;
+	p_tx_pkt_hdr_all_t	pkthdrp;
+	uint8_t			npads = 0;
+	uint64_t 		dma_ioaddr;
+	uint32_t		dma_flags;
+	int			last_bidx;
+	uint8_t 		*b_rptr;
+	caddr_t 		kaddr;
+	uint32_t		nmblks;
+	uint32_t		ngathers;
+	uint32_t		clen;
+	int 			len;
+	uint32_t		pkt_len, pack_len, min_len;
+	uint32_t		bcopy_thresh;
+	int 			i, cur_index, sop_index;
+	uint16_t		tail_index;
+	boolean_t		tail_wrap = B_FALSE;
+	nxge_dma_common_t	desc_area;
+	nxge_os_dma_handle_t 	dma_handle;
+	ddi_dma_cookie_t 	dma_cookie;
+	npi_handle_t		npi_handle;
+	p_mblk_t 		nmp;
+	p_mblk_t		t_mp;
+	uint32_t 		ncookies;
+	boolean_t 		good_packet;
+	boolean_t 		mark_mode = B_FALSE;
+	p_nxge_stats_t 		statsp;
+	p_nxge_tx_ring_stats_t tdc_stats;
+	t_uscalar_t 		start_offset = 0;
+	t_uscalar_t 		stuff_offset = 0;
+	t_uscalar_t 		end_offset = 0;
+	t_uscalar_t 		value = 0;
+	t_uscalar_t 		cksum_flags = 0;
+	boolean_t		cksum_on = B_FALSE;
+	uint32_t		boff = 0;
+	uint64_t		tot_xfer_len = 0, tmp_len = 0;
+	boolean_t		header_set = B_FALSE;
+#ifdef NXGE_DEBUG
+	p_tx_desc_t 		tx_desc_ring_pp;
+	p_tx_desc_t 		tx_desc_pp;
+	tx_desc_t		*save_desc_p;
+	int			dump_len;
+	int			sad_len;
+	uint64_t		sad;
+	int			xfer_len;
+	uint32_t		msgsize;
+#endif
+
+	NXGE_DEBUG_MSG((nxgep, TX_CTL,
+		"==> nxge_start: tx dma channel %d", tx_ring_p->tdc));
+	NXGE_DEBUG_MSG((nxgep, TX_CTL,
+		"==> nxge_start: Starting tdc %d desc pending %d",
+		tx_ring_p->tdc, tx_ring_p->descs_pending));
+
+	statsp = nxgep->statsp;
+
+	if (nxgep->statsp->port_stats.lb_mode == nxge_lb_normal) {
+		if (!statsp->mac_stats.link_up) {
+			freemsg(mp);
+			NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_start: "
+				"link not up or LB mode"));
+			goto nxge_start_fail1;
+		}
+	}
+
+	hcksum_retrieve(mp, NULL, NULL, &start_offset,
+		&stuff_offset, &end_offset, &value, &cksum_flags);
+	if (!NXGE_IS_VLAN_PACKET(mp->b_rptr)) {
+		start_offset += sizeof (ether_header_t);
+		stuff_offset += sizeof (ether_header_t);
+	} else {
+		start_offset += sizeof (struct ether_vlan_header);
+		stuff_offset += sizeof (struct ether_vlan_header);
+	}
+
+	if (cksum_flags & HCK_PARTIALCKSUM) {
+		NXGE_DEBUG_MSG((nxgep, TX_CTL,
+			"==> nxge_start: cksum_flags 0x%x (partial checksum) ",
+			cksum_flags));
+		cksum_on = B_TRUE;
+	}
+
+#ifdef	NXGE_DEBUG
+	if (tx_ring_p->descs_pending) {
+		NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_start: "
+			"desc pending %d ", tx_ring_p->descs_pending));
+	}
+
+	dump_len = (int)(MBLKL(mp));
+	dump_len = (dump_len > 128) ? 128: dump_len;
+
+	NXGE_DEBUG_MSG((nxgep, TX_CTL,
+		"==> nxge_start: tdc %d: dumping ...: b_rptr $%p "
+		"(Before header reserve: ORIGINAL LEN %d)",
+		tx_ring_p->tdc,
+		mp->b_rptr,
+		dump_len));
+
+	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_start: dump packets "
+		"(IP ORIGINAL b_rptr $%p): %s", mp->b_rptr,
+		nxge_dump_packet((char *)mp->b_rptr, dump_len)));
+#endif
+
+	MUTEX_ENTER(&tx_ring_p->lock);
+	tdc_stats = tx_ring_p->tdc_stats;
+	mark_mode = (tx_ring_p->descs_pending &&
+		((tx_ring_p->tx_ring_size - tx_ring_p->descs_pending)
+		< nxge_tx_minfree));
+
+	NXGE_DEBUG_MSG((nxgep, TX_CTL,
+		"TX Descriptor ring is channel %d mark mode %d",
+		tx_ring_p->tdc, mark_mode));
+
+	if (!nxge_txdma_reclaim(nxgep, tx_ring_p, nxge_tx_minfree)) {
+		NXGE_DEBUG_MSG((nxgep, TX_CTL,
+			"TX Descriptor ring is full: channel %d",
+			tx_ring_p->tdc));
+		cas32((uint32_t *)&tx_ring_p->queueing, 0, 1);
+		tdc_stats->tx_no_desc++;
+		MUTEX_EXIT(&tx_ring_p->lock);
+		if (nxgep->resched_needed && !nxgep->resched_running) {
+			nxgep->resched_running = B_TRUE;
+			ddi_trigger_softintr(nxgep->resched_id);
+		}
+		status = 1;
+		goto nxge_start_fail1;
+	}
+
+	nmp = mp;
+	i = sop_index = tx_ring_p->wr_index;
+	nmblks = 0;
+	ngathers = 0;
+	pkt_len = 0;
+	pack_len = 0;
+	clen = 0;
+	last_bidx = -1;
+	good_packet = B_TRUE;
+
+	desc_area = tx_ring_p->tdc_desc;
+	npi_handle = desc_area.npi_handle;
+	npi_desc_handle.regh = (nxge_os_acc_handle_t)
+			DMA_COMMON_ACC_HANDLE(desc_area);
+	tx_desc_ring_vp = (p_tx_desc_t)DMA_COMMON_VPTR(desc_area);
+#ifdef	NXGE_DEBUG
+	tx_desc_ring_pp = (p_tx_desc_t)DMA_COMMON_IOADDR(desc_area);
+#endif
+	tx_desc_dma_handle = (nxge_os_dma_handle_t)
+			DMA_COMMON_HANDLE(desc_area);
+	tx_msg_ring = tx_ring_p->tx_msg_ring;
+
+	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_start: wr_index %d i %d",
+		sop_index, i));
+
+#ifdef	NXGE_DEBUG
+	msgsize = msgdsize(nmp);
+	NXGE_DEBUG_MSG((nxgep, TX_CTL,
+		"==> nxge_start(1): wr_index %d i %d msgdsize %d",
+		sop_index, i, msgsize));
+#endif
+	/*
+	 * The first 16 bytes of the premapped buffer are reserved
+	 * for header. No padding will be used.
+	 */
+	pkt_len = pack_len = boff = TX_PKT_HEADER_SIZE;
+	if (nxge_tx_use_bcopy) {
+		bcopy_thresh = (nxge_bcopy_thresh - TX_PKT_HEADER_SIZE);
+	} else {
+		bcopy_thresh = (TX_BCOPY_SIZE - TX_PKT_HEADER_SIZE);
+	}
+	while (nmp) {
+		good_packet = B_TRUE;
+		b_rptr = nmp->b_rptr;
+		len = MBLKL(nmp);
+		if (len <= 0) {
+			nmp = nmp->b_cont;
+			continue;
+		}
+		nmblks++;
+
+		NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_start(1): nmblks %d "
+			"len %d pkt_len %d pack_len %d",
+			nmblks, len, pkt_len, pack_len));
+		/*
+		 * Hardware limits the transfer length to 4K for NIU and
+		 * 4076 (TX_MAX_TRANSFER_LENGTH) for Neptune. But we just
+		 * use TX_MAX_TRANSFER_LENGTH as the limit for both.
+		 * If len is longer than the limit, then we break nmp into
+		 * two chunks: Make the first chunk equal to the limit and
+		 * the second chunk for the remaining data. If the second
+		 * chunk is still larger than the limit, then it will be
+		 * broken into two in the next pass.
+		 */
+		if (len > TX_MAX_TRANSFER_LENGTH - TX_PKT_HEADER_SIZE) {
+			t_mp = dupb(nmp);
+			nmp->b_wptr = nmp->b_rptr +
+				(TX_MAX_TRANSFER_LENGTH - TX_PKT_HEADER_SIZE);
+			t_mp->b_rptr = nmp->b_wptr;
+			t_mp->b_cont = nmp->b_cont;
+			nmp->b_cont = t_mp;
+			len = MBLKL(nmp);
+		}
+
+		tx_desc.value = 0;
+		tx_desc_p = &tx_desc_ring_vp[i];
+#ifdef	NXGE_DEBUG
+		tx_desc_pp = &tx_desc_ring_pp[i];
+#endif
+		tx_msg_p = &tx_msg_ring[i];
+		npi_desc_handle.regp = (uint64_t)tx_desc_p;
+		if (!header_set &&
+			((!nxge_tx_use_bcopy && (len > TX_BCOPY_SIZE)) ||
+				(len >= bcopy_thresh))) {
+			header_set = B_TRUE;
+			bcopy_thresh += TX_PKT_HEADER_SIZE;
+			boff = 0;
+			pack_len = 0;
+			kaddr = (caddr_t)DMA_COMMON_VPTR(tx_msg_p->buf_dma);
+			hdrp = (p_tx_pkt_header_t)kaddr;
+			clen = pkt_len;
+			dma_handle = tx_msg_p->buf_dma_handle;
+			dma_ioaddr = DMA_COMMON_IOADDR(tx_msg_p->buf_dma);
+			(void) ddi_dma_sync(dma_handle,
+				i * nxge_bcopy_thresh, nxge_bcopy_thresh,
+				DDI_DMA_SYNC_FORDEV);
+
+			tx_msg_p->flags.dma_type = USE_BCOPY;
+			goto nxge_start_control_header_only;
+		}
+
+		pkt_len += len;
+		pack_len += len;
+
+		NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_start(3): "
+			"desc entry %d "
+			"DESC IOADDR $%p "
+			"desc_vp $%p tx_desc_p $%p "
+			"desc_pp $%p tx_desc_pp $%p "
+			"len %d pkt_len %d pack_len %d",
+			i,
+			DMA_COMMON_IOADDR(desc_area),
+			tx_desc_ring_vp, tx_desc_p,
+			tx_desc_ring_pp, tx_desc_pp,
+			len, pkt_len, pack_len));
+
+		if (len < bcopy_thresh) {
+			NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_start(4): "
+				"USE BCOPY: "));
+			if (nxge_tx_tiny_pack) {
+				uint32_t blst =
+					TXDMA_DESC_NEXT_INDEX(i, -1,
+						tx_ring_p->tx_wrap_mask);
+				NXGE_DEBUG_MSG((nxgep, TX_CTL,
+					"==> nxge_start(5): pack"));
+				if ((pack_len <= bcopy_thresh) &&
+					(last_bidx == blst)) {
+					NXGE_DEBUG_MSG((nxgep, TX_CTL,
+						"==> nxge_start: pack(6) "
+						"(pkt_len %d pack_len %d)",
+						pkt_len, pack_len));
+					i = blst;
+					tx_desc_p = &tx_desc_ring_vp[i];
+#ifdef	NXGE_DEBUG
+					tx_desc_pp = &tx_desc_ring_pp[i];
+#endif
+					tx_msg_p = &tx_msg_ring[i];
+					boff = pack_len - len;
+					ngathers--;
+				} else if (pack_len > bcopy_thresh &&
+					header_set) {
+					pack_len = len;
+					boff = 0;
+					bcopy_thresh = nxge_bcopy_thresh;
+					NXGE_DEBUG_MSG((nxgep, TX_CTL,
+						"==> nxge_start(7): > max NEW "
+						"bcopy thresh %d "
+						"pkt_len %d pack_len %d(next)",
+						bcopy_thresh,
+						pkt_len, pack_len));
+				}
+				last_bidx = i;
+			}
+			kaddr = (caddr_t)DMA_COMMON_VPTR(tx_msg_p->buf_dma);
+			if ((boff == TX_PKT_HEADER_SIZE) && (nmblks == 1)) {
+				hdrp = (p_tx_pkt_header_t)kaddr;
+				header_set = B_TRUE;
+				NXGE_DEBUG_MSG((nxgep, TX_CTL,
+					"==> nxge_start(7_x2): "
+					"pkt_len %d pack_len %d (new hdrp $%p)",
+					pkt_len, pack_len, hdrp));
+			}
+			tx_msg_p->flags.dma_type = USE_BCOPY;
+			kaddr += boff;
+			NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_start(8): "
+				"USE BCOPY: before bcopy "
+				"DESC IOADDR $%p entry %d "
+				"bcopy packets %d "
+				"bcopy kaddr $%p "
+				"bcopy ioaddr (SAD) $%p "
+				"bcopy clen %d "
+				"bcopy boff %d",
+				DMA_COMMON_IOADDR(desc_area), i,
+				tdc_stats->tx_hdr_pkts,
+				kaddr,
+				dma_ioaddr,
+				clen,
+				boff));
+			NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_start: "
+				"1USE BCOPY: "));
+			NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_start: "
+				"2USE BCOPY: "));
+			NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_start: "
+				"last USE BCOPY: copy from b_rptr $%p "
+				"to KADDR $%p (len %d offset %d",
+				b_rptr, kaddr, len, boff));
+
+			bcopy(b_rptr, kaddr, len);
+
+#ifdef	NXGE_DEBUG
+			dump_len = (len > 128) ? 128: len;
+			NXGE_DEBUG_MSG((nxgep, TX_CTL,
+				"==> nxge_start: dump packets "
+				"(After BCOPY len %d)"
+				"(b_rptr $%p): %s", len, nmp->b_rptr,
+				nxge_dump_packet((char *)nmp->b_rptr,
+				dump_len)));
+#endif
+
+			dma_handle = tx_msg_p->buf_dma_handle;
+			dma_ioaddr = DMA_COMMON_IOADDR(tx_msg_p->buf_dma);
+			(void) ddi_dma_sync(dma_handle,
+				i * nxge_bcopy_thresh, nxge_bcopy_thresh,
+					DDI_DMA_SYNC_FORDEV);
+			clen = len + boff;
+			tdc_stats->tx_hdr_pkts++;
+			NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_start(9): "
+				"USE BCOPY: "
+				"DESC IOADDR $%p entry %d "
+				"bcopy packets %d "
+				"bcopy kaddr $%p "
+				"bcopy ioaddr (SAD) $%p "
+				"bcopy clen %d "
+				"bcopy boff %d",
+				DMA_COMMON_IOADDR(desc_area),
+				i,
+				tdc_stats->tx_hdr_pkts,
+				kaddr,
+				dma_ioaddr,
+				clen,
+				boff));
+		} else {
+			NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_start(12): "
+				"USE DVMA: len %d", len));
+			tx_msg_p->flags.dma_type = USE_DMA;
+			dma_flags = DDI_DMA_WRITE;
+			if (len < nxge_dma_stream_thresh) {
+				dma_flags |= DDI_DMA_CONSISTENT;
+			} else {
+				dma_flags |= DDI_DMA_STREAMING;
+			}
+
+			dma_handle = tx_msg_p->dma_handle;
+			status = ddi_dma_addr_bind_handle(dma_handle, NULL,
+				(caddr_t)b_rptr, len, dma_flags,
+				DDI_DMA_DONTWAIT, NULL,
+				&dma_cookie, &ncookies);
+			if (status == DDI_DMA_MAPPED) {
+				dma_ioaddr = dma_cookie.dmac_laddress;
+				len = (int)dma_cookie.dmac_size;
+				clen = (uint32_t)dma_cookie.dmac_size;
+				NXGE_DEBUG_MSG((nxgep, TX_CTL,
+					"==> nxge_start(12_1): "
+					"USE DVMA: len %d clen %d "
+					"ngathers %d",
+					len, clen,
+					ngathers));
+
+				npi_desc_handle.regp = (uint64_t)tx_desc_p;
+				while (ncookies > 1) {
+					ngathers++;
+					/*
+					 * this is the fix for multiple
+					 * cookies, which are basicaly
+					 * a descriptor entry, we don't set
+					 * SOP bit as well as related fields
+					 */
+
+					(void) npi_txdma_desc_gather_set(
+						npi_desc_handle,
+						&tx_desc,
+						(ngathers -1),
+						mark_mode,
+						ngathers,
+						dma_ioaddr,
+						clen);
+
+					tx_msg_p->tx_msg_size = clen;
+					NXGE_DEBUG_MSG((nxgep, TX_CTL,
+						"==> nxge_start:  DMA "
+						"ncookie %d "
+						"ngathers %d "
+						"dma_ioaddr $%p len %d"
+						"desc $%p descp $%p (%d)",
+						ncookies,
+						ngathers,
+						dma_ioaddr, clen,
+						*tx_desc_p, tx_desc_p, i));
+
+					ddi_dma_nextcookie(dma_handle,
+							&dma_cookie);
+					dma_ioaddr =
+						dma_cookie.dmac_laddress;
+
+					len = (int)dma_cookie.dmac_size;
+					clen = (uint32_t)dma_cookie.dmac_size;
+					NXGE_DEBUG_MSG((nxgep, TX_CTL,
+						"==> nxge_start(12_2): "
+						"USE DVMA: len %d clen %d ",
+						len, clen));
+
+					i = TXDMA_DESC_NEXT_INDEX(i, 1,
+						tx_ring_p->tx_wrap_mask);
+					tx_desc_p = &tx_desc_ring_vp[i];
+
+					npi_desc_handle.regp =
+						(uint64_t)tx_desc_p;
+					tx_msg_p = &tx_msg_ring[i];
+					tx_msg_p->flags.dma_type = USE_NONE;
+					tx_desc.value = 0;
+
+					ncookies--;
+				}
+				tdc_stats->tx_ddi_pkts++;
+				NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_start:"
+					"DMA: ddi packets %d",
+					tdc_stats->tx_ddi_pkts));
+			} else {
+				NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+				    "dma mapping failed for %d "
+				    "bytes addr $%p flags %x (%d)",
+				    len, b_rptr, status, status));
+				good_packet = B_FALSE;
+				tdc_stats->tx_dma_bind_fail++;
+				tx_msg_p->flags.dma_type = USE_NONE;
+				goto nxge_start_fail2;
+			}
+		} /* ddi dvma */
+
+		nmp = nmp->b_cont;
+nxge_start_control_header_only:
+		npi_desc_handle.regp = (uint64_t)tx_desc_p;
+		ngathers++;
+
+		if (ngathers == 1) {
+#ifdef	NXGE_DEBUG
+			save_desc_p = &sop_tx_desc;
+#endif
+			sop_tx_desc_p = &sop_tx_desc;
+			sop_tx_desc_p->value = 0;
+			sop_tx_desc_p->bits.hdw.tr_len = clen;
+			sop_tx_desc_p->bits.hdw.sad = dma_ioaddr >> 32;
+			sop_tx_desc_p->bits.ldw.sad = dma_ioaddr & 0xffffffff;
+		} else {
+#ifdef	NXGE_DEBUG
+			save_desc_p = &tx_desc;
+#endif
+			tmp_desc_p = &tx_desc;
+			tmp_desc_p->value = 0;
+			tmp_desc_p->bits.hdw.tr_len = clen;
+			tmp_desc_p->bits.hdw.sad = dma_ioaddr >> 32;
+			tmp_desc_p->bits.ldw.sad = dma_ioaddr & 0xffffffff;
+
+			tx_desc_p->value = tmp_desc_p->value;
+		}
+
+		NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_start(13): "
+			"Desc_entry %d ngathers %d "
+			"desc_vp $%p tx_desc_p $%p "
+			"len %d clen %d pkt_len %d pack_len %d nmblks %d "
+			"dma_ioaddr (SAD) $%p mark %d",
+			i, ngathers,
+			tx_desc_ring_vp, tx_desc_p,
+			len, clen, pkt_len, pack_len, nmblks,
+			dma_ioaddr, mark_mode));
+
+#ifdef NXGE_DEBUG
+		npi_desc_handle.nxgep = nxgep;
+		npi_desc_handle.function.function = nxgep->function_num;
+		npi_desc_handle.function.instance = nxgep->instance;
+		sad = (save_desc_p->value & TX_PKT_DESC_SAD_MASK);
+		xfer_len = ((save_desc_p->value & TX_PKT_DESC_TR_LEN_MASK) >>
+			TX_PKT_DESC_TR_LEN_SHIFT);
+
+
+		NXGE_DEBUG_MSG((nxgep, TX_CTL, "\n\t: value 0x%llx\n"
+			"\t\tsad $%p\ttr_len %d len %d\tnptrs %d\t"
+			"mark %d sop %d\n",
+			save_desc_p->value,
+			sad,
+			save_desc_p->bits.hdw.tr_len,
+			xfer_len,
+			save_desc_p->bits.hdw.num_ptr,
+			save_desc_p->bits.hdw.mark,
+			save_desc_p->bits.hdw.sop));
+
+		npi_txdma_dump_desc_one(npi_desc_handle, NULL, i);
+#endif
+
+		tx_msg_p->tx_msg_size = clen;
+		i = TXDMA_DESC_NEXT_INDEX(i, 1, tx_ring_p->tx_wrap_mask);
+		if (ngathers > nxge_tx_max_gathers) {
+			good_packet = B_FALSE;
+			hcksum_retrieve(mp, NULL, NULL, &start_offset,
+				&stuff_offset, &end_offset, &value,
+				&cksum_flags);
+
+			NXGE_DEBUG_MSG((NULL, TX_CTL,
+				"==> nxge_start(14): pull msg - "
+				"len %d pkt_len %d ngathers %d",
+				len, pkt_len, ngathers));
+			/* Pull all message blocks from b_cont */
+			if ((msgpullup(mp, -1)) == NULL) {
+				goto nxge_start_fail2;
+			}
+			goto nxge_start_fail2;
+		}
+	} /* while (nmp) */
+
+	tx_msg_p->tx_message = mp;
+	tx_desc_p = &tx_desc_ring_vp[sop_index];
+	npi_desc_handle.regp = (uint64_t)tx_desc_p;
+
+	pkthdrp = (p_tx_pkt_hdr_all_t)hdrp;
+	pkthdrp->reserved = 0;
+	hdrp->value = 0;
+	(void) nxge_fill_tx_hdr(mp, B_FALSE, cksum_on,
+		(pkt_len - TX_PKT_HEADER_SIZE), npads, pkthdrp);
+
+	if (pkt_len > NXGE_MTU_DEFAULT_MAX) {
+		tdc_stats->tx_jumbo_pkts++;
+	}
+
+	min_len = (nxgep->msg_min + TX_PKT_HEADER_SIZE + (npads * 2));
+	if (pkt_len < min_len) {
+		/* Assume we use bcopy to premapped buffers */
+		kaddr = (caddr_t)DMA_COMMON_VPTR(tx_msg_p->buf_dma);
+		NXGE_DEBUG_MSG((NULL, TX_CTL,
+			"==> nxge_start(14-1): < (msg_min + 16)"
+			"len %d pkt_len %d min_len %d bzero %d ngathers %d",
+			len, pkt_len, min_len, (min_len - pkt_len), ngathers));
+		bzero((kaddr + pkt_len), (min_len - pkt_len));
+		pkt_len = tx_msg_p->tx_msg_size = min_len;
+
+		sop_tx_desc_p->bits.hdw.tr_len = min_len;
+
+		NXGE_MEM_PIO_WRITE64(npi_desc_handle, sop_tx_desc_p->value);
+		tx_desc_p->value = sop_tx_desc_p->value;
+
+		NXGE_DEBUG_MSG((NULL, TX_CTL,
+			"==> nxge_start(14-2): < msg_min - "
+			"len %d pkt_len %d min_len %d ngathers %d",
+			len, pkt_len, min_len, ngathers));
+	}
+
+	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_start: cksum_flags 0x%x ",
+		cksum_flags));
+	if (cksum_flags & HCK_PARTIALCKSUM) {
+		NXGE_DEBUG_MSG((nxgep, TX_CTL,
+			"==> nxge_start: cksum_flags 0x%x (partial checksum) ",
+			cksum_flags));
+		cksum_on = B_TRUE;
+		NXGE_DEBUG_MSG((nxgep, TX_CTL,
+			"==> nxge_start: from IP cksum_flags 0x%x "
+			"(partial checksum) "
+			"start_offset %d stuff_offset %d",
+			cksum_flags, start_offset, stuff_offset));
+		tmp_len = (uint64_t)(start_offset >> 1);
+		hdrp->value |= (tmp_len << TX_PKT_HEADER_L4START_SHIFT);
+		tmp_len = (uint64_t)(stuff_offset >> 1);
+		hdrp->value |= (tmp_len << TX_PKT_HEADER_L4STUFF_SHIFT);
+
+		NXGE_DEBUG_MSG((nxgep, TX_CTL,
+			"==> nxge_start: from IP cksum_flags 0x%x "
+			"(partial checksum) "
+			"after SHIFT start_offset %d stuff_offset %d",
+			cksum_flags, start_offset, stuff_offset));
+	}
+	{
+		uint64_t	tmp_len;
+
+		/* pkt_len already includes 16 + paddings!! */
+		/* Update the control header length */
+		tot_xfer_len = (pkt_len - TX_PKT_HEADER_SIZE);
+		tmp_len = hdrp->value |
+			(tot_xfer_len << TX_PKT_HEADER_TOT_XFER_LEN_SHIFT);
+
+		NXGE_DEBUG_MSG((nxgep, TX_CTL,
+			"==> nxge_start(15_x1): setting SOP "
+			"tot_xfer_len 0x%llx (%d) pkt_len %d tmp_len "
+			"0x%llx hdrp->value 0x%llx",
+			tot_xfer_len, tot_xfer_len, pkt_len,
+			tmp_len, hdrp->value));
+#if defined(_BIG_ENDIAN)
+		hdrp->value = ddi_swap64(tmp_len);
+#else
+		hdrp->value = tmp_len;
+#endif
+		NXGE_DEBUG_MSG((nxgep,
+			TX_CTL, "==> nxge_start(15_x2): setting SOP "
+			"after SWAP: tot_xfer_len 0x%llx pkt_len %d "
+			"tmp_len 0x%llx hdrp->value 0x%llx",
+			tot_xfer_len, pkt_len,
+			tmp_len, hdrp->value));
+	}
+
+	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_start(15): setting SOP "
+		"wr_index %d "
+		"tot_xfer_len (%d) pkt_len %d npads %d",
+		sop_index,
+		tot_xfer_len, pkt_len,
+		npads));
+
+	sop_tx_desc_p->bits.hdw.sop = 1;
+	sop_tx_desc_p->bits.hdw.mark = mark_mode;
+	sop_tx_desc_p->bits.hdw.num_ptr = ngathers;
+
+	NXGE_MEM_PIO_WRITE64(npi_desc_handle, sop_tx_desc_p->value);
+
+	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_start(16): set SOP done"));
+
+#ifdef NXGE_DEBUG
+	npi_desc_handle.nxgep = nxgep;
+	npi_desc_handle.function.function = nxgep->function_num;
+	npi_desc_handle.function.instance = nxgep->instance;
+
+	NXGE_DEBUG_MSG((nxgep, TX_CTL, "\n\t: value 0x%llx\n"
+		"\t\tsad $%p\ttr_len %d len %d\tnptrs %d\tmark %d sop %d\n",
+		save_desc_p->value,
+		sad,
+		save_desc_p->bits.hdw.tr_len,
+		xfer_len,
+		save_desc_p->bits.hdw.num_ptr,
+		save_desc_p->bits.hdw.mark,
+		save_desc_p->bits.hdw.sop));
+	(void) npi_txdma_dump_desc_one(npi_desc_handle, NULL, sop_index);
+
+	dump_len = (pkt_len > 128) ? 128: pkt_len;
+	NXGE_DEBUG_MSG((nxgep, TX_CTL,
+		"==> nxge_start: dump packets(17) (after sop set, len "
+		" (len/dump_len/pkt_len/tot_xfer_len) %d/%d/%d/%d):\n"
+		"ptr $%p: %s", len, dump_len, pkt_len, tot_xfer_len,
+		(char *)hdrp,
+		nxge_dump_packet((char *)hdrp, dump_len)));
+	NXGE_DEBUG_MSG((nxgep, TX_CTL,
+		"==> nxge_start(18): TX desc sync: sop_index %d",
+			sop_index));
+#endif
+
+	if ((ngathers == 1) || tx_ring_p->wr_index < i) {
+		(void) ddi_dma_sync(tx_desc_dma_handle,
+			sop_index * sizeof (tx_desc_t),
+			ngathers * sizeof (tx_desc_t),
+			DDI_DMA_SYNC_FORDEV);
+
+		NXGE_DEBUG_MSG((nxgep, TX_CTL, "nxge_start(19): sync 1 "
+			"cs_off = 0x%02X cs_s_off = 0x%02X "
+			"pkt_len %d ngathers %d sop_index %d\n",
+			stuff_offset, start_offset,
+			pkt_len, ngathers, sop_index));
+	} else { /* more than one descriptor and wrap around */
+		uint32_t nsdescs = tx_ring_p->tx_ring_size - sop_index;
+		(void) ddi_dma_sync(tx_desc_dma_handle,
+			sop_index * sizeof (tx_desc_t),
+			nsdescs * sizeof (tx_desc_t),
+			DDI_DMA_SYNC_FORDEV);
+		NXGE_DEBUG_MSG((nxgep, TX_CTL, "nxge_start(20): sync 1 "
+			"cs_off = 0x%02X cs_s_off = 0x%02X "
+			"pkt_len %d ngathers %d sop_index %d\n",
+			stuff_offset, start_offset,
+				pkt_len, ngathers, sop_index));
+
+		(void) ddi_dma_sync(tx_desc_dma_handle,
+			0,
+			(ngathers - nsdescs) * sizeof (tx_desc_t),
+			DDI_DMA_SYNC_FORDEV);
+		NXGE_DEBUG_MSG((nxgep, TX_CTL, "nxge_start(21): sync 2 "
+			"cs_off = 0x%02X cs_s_off = 0x%02X "
+			"pkt_len %d ngathers %d sop_index %d\n",
+			stuff_offset, start_offset,
+			pkt_len, ngathers, sop_index));
+	}
+
+	tail_index = tx_ring_p->wr_index;
+	tail_wrap = tx_ring_p->wr_index_wrap;
+
+	tx_ring_p->wr_index = i;
+	if (tx_ring_p->wr_index <= tail_index) {
+		tx_ring_p->wr_index_wrap = ((tail_wrap == B_TRUE) ?
+						B_FALSE : B_TRUE);
+	}
+
+	tx_ring_p->descs_pending += ngathers;
+	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_start: TX kick: "
+		"channel %d wr_index %d wrap %d ngathers %d desc_pend %d",
+		tx_ring_p->tdc,
+		tx_ring_p->wr_index,
+		tx_ring_p->wr_index_wrap,
+		ngathers,
+		tx_ring_p->descs_pending));
+
+	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_start: TX KICKING: "));
+
+	{
+		tx_ring_kick_t		kick;
+
+		kick.value = 0;
+		kick.bits.ldw.wrap = tx_ring_p->wr_index_wrap;
+		kick.bits.ldw.tail = (uint16_t)tx_ring_p->wr_index;
+
+		/* Kick start the Transmit kick register */
+		TXDMA_REG_WRITE64(NXGE_DEV_NPI_HANDLE(nxgep),
+			TX_RING_KICK_REG,
+			(uint8_t)tx_ring_p->tdc,
+			kick.value);
+	}
+
+	tdc_stats->tx_starts++;
+
+	MUTEX_EXIT(&tx_ring_p->lock);
+
+	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_start"));
+
+	return (status);
+
+nxge_start_fail2:
+	if (good_packet == B_FALSE) {
+		cur_index = sop_index;
+		NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_start: clean up"));
+		for (i = 0; i < ngathers; i++) {
+			tx_desc_p = &tx_desc_ring_vp[cur_index];
+			npi_handle.regp = (uint64_t)tx_desc_p;
+			tx_msg_p = &tx_msg_ring[cur_index];
+			(void) npi_txdma_desc_set_zero(npi_handle, 1);
+			if (tx_msg_p->flags.dma_type == USE_DVMA) {
+				NXGE_DEBUG_MSG((nxgep, TX_CTL,
+					"tx_desc_p = %X index = %d",
+					tx_desc_p, tx_ring_p->rd_index));
+				(void) dvma_unload(
+						tx_msg_p->dvma_handle,
+						0, -1);
+				tx_msg_p->dvma_handle = NULL;
+				if (tx_ring_p->dvma_wr_index ==
+					tx_ring_p->dvma_wrap_mask)
+					tx_ring_p->dvma_wr_index = 0;
+				else
+					tx_ring_p->dvma_wr_index++;
+				tx_ring_p->dvma_pending--;
+			} else if (tx_msg_p->flags.dma_type ==
+					USE_DMA) {
+				if (ddi_dma_unbind_handle(
+					tx_msg_p->dma_handle))
+					cmn_err(CE_WARN, "!nxge_start: "
+						"ddi_dma_unbind_handle failed");
+			}
+			tx_msg_p->flags.dma_type = USE_NONE;
+			cur_index = TXDMA_DESC_NEXT_INDEX(cur_index, 1,
+				tx_ring_p->tx_wrap_mask);
+
+		}
+
+		nxgep->resched_needed = B_TRUE;
+	}
+
+	MUTEX_EXIT(&tx_ring_p->lock);
+
+nxge_start_fail1:
+	/* Add FMA to check the access handle nxge_hregh */
+
+	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_start"));
+
+	return (status);
+}
+
+boolean_t
+nxge_send(p_nxge_t nxgep, mblk_t *mp, p_mac_tx_hint_t hp)
+{
+	p_tx_ring_t 		*tx_rings;
+	uint8_t			ring_index;
+
+	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_send"));
+
+	ASSERT(mp->b_next == NULL);
+
+	ring_index = nxge_tx_lb_ring_1(mp, nxgep->max_tdcs, hp);
+	tx_rings = nxgep->tx_rings->rings;
+	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_tx_msg: tx_rings $%p",
+		tx_rings));
+	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_tx_msg: max_tdcs %d "
+		"ring_index %d", nxgep->max_tdcs, ring_index));
+
+	if (nxge_start(nxgep, tx_rings[ring_index], mp)) {
+		NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_send: failed "
+			"ring index %d", ring_index));
+		return (B_FALSE);
+	}
+
+	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_send: ring index %d",
+		ring_index));
+
+	return (B_TRUE);
+}
+
+/*
+ * nxge_m_tx() - send a chain of packets
+ */
+mblk_t *
+nxge_m_tx(void *arg, mblk_t *mp)
+{
+	p_nxge_t 		nxgep = (p_nxge_t)arg;
+	mblk_t 			*next;
+	mac_tx_hint_t		hint;
+
+	if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
+		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
+			"==> nxge_m_tx: hardware not initialized"));
+		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
+			"<== nxge_m_tx"));
+		return (mp);
+	}
+
+	hint.hash =  NULL;
+	hint.vid =  0;
+	hint.sap =  0;
+
+	while (mp != NULL) {
+		next = mp->b_next;
+		mp->b_next = NULL;
+
+		/*
+		 * Until Nemo tx resource works, the mac driver
+		 * does the load balancing based on TCP port,
+		 * or CPU. For debugging, we use a system
+		 * configurable parameter.
+		 */
+		if (!nxge_send(nxgep, mp, &hint)) {
+			mp->b_next = next;
+			break;
+		}
+
+		mp = next;
+	}
+
+	return (mp);
+}
+
+int
+nxge_tx_lb_ring_1(p_mblk_t mp, uint32_t maxtdcs, p_mac_tx_hint_t hp)
+{
+	uint8_t 		ring_index = 0;
+	uint8_t 		*tcp_port;
+	p_mblk_t 		nmp;
+	size_t 			mblk_len;
+	size_t 			iph_len;
+	size_t 			hdrs_size;
+	uint8_t			hdrs_buf[sizeof (struct  ether_header) +
+					IP_MAX_HDR_LENGTH + sizeof (uint32_t)];
+				/*
+				 * allocate space big enough to cover
+				 * the max ip header length and the first
+				 * 4 bytes of the TCP/IP header.
+				 */
+
+	boolean_t		qos = B_FALSE;
+
+	NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_lb_ring"));
+
+	if (hp->vid) {
+		qos = B_TRUE;
+	}
+	switch (nxge_tx_lb_policy) {
+	case NXGE_TX_LB_TCPUDP: /* default IPv4 TCP/UDP */
+	default:
+		tcp_port = mp->b_rptr;
+		if (!nxge_no_tx_lb && !qos &&
+			(ntohs(((p_ether_header_t)tcp_port)->ether_type)
+				== ETHERTYPE_IP)) {
+			nmp = mp;
+			mblk_len = MBLKL(nmp);
+			tcp_port = NULL;
+			if (mblk_len > sizeof (struct ether_header) +
+					sizeof (uint8_t)) {
+				tcp_port = nmp->b_rptr +
+					sizeof (struct ether_header);
+				mblk_len -= sizeof (struct ether_header);
+				iph_len = ((*tcp_port) & 0x0f) << 2;
+				if (mblk_len > (iph_len + sizeof (uint32_t))) {
+					tcp_port = nmp->b_rptr;
+				} else {
+					tcp_port = NULL;
+				}
+			}
+			if (tcp_port == NULL) {
+				hdrs_size = 0;
+				((p_ether_header_t)hdrs_buf)->ether_type = 0;
+				while ((nmp) && (hdrs_size <
+						sizeof (hdrs_buf))) {
+					mblk_len = MBLKL(nmp);
+					if (mblk_len >=
+						(sizeof (hdrs_buf) - hdrs_size))
+						mblk_len = sizeof (hdrs_buf) -
+							hdrs_size;
+					bcopy(nmp->b_rptr,
+						&hdrs_buf[hdrs_size], mblk_len);
+					hdrs_size += mblk_len;
+					nmp = nmp->b_cont;
+				}
+				tcp_port = hdrs_buf;
+			}
+			tcp_port += sizeof (ether_header_t);
+			if (!(tcp_port[6] & 0x3f) && !(tcp_port[7] & 0xff)) {
+				if ((tcp_port[9] == IPPROTO_TCP) ||
+						(tcp_port[9] == IPPROTO_UDP)) {
+					tcp_port += ((*tcp_port) & 0x0f) << 2;
+					ring_index =
+						((tcp_port[1] ^ tcp_port[3])
+						% maxtdcs);
+				} else {
+					ring_index = tcp_port[19] % maxtdcs;
+				}
+			} else { /* fragmented packet */
+				ring_index = tcp_port[19] % maxtdcs;
+			}
+		} else {
+			ring_index = mp->b_band % maxtdcs;
+		}
+		break;
+
+	case NXGE_TX_LB_HASH:
+		if (hp->hash) {
+			ring_index = ((uint64_t)(hp->hash) % maxtdcs);
+		} else {
+			ring_index = mp->b_band % maxtdcs;
+		}
+		break;
+
+	case NXGE_TX_LB_DEST_MAC: /* Use destination MAC address */
+		tcp_port = mp->b_rptr;
+		ring_index = tcp_port[5] % maxtdcs;
+		break;
+	}
+
+	NXGE_DEBUG_MSG((NULL, TX_CTL, "<== nxge_tx_lb_ring"));
+
+	return (ring_index);
+}
+
+uint_t
+nxge_reschedule(caddr_t arg)
+{
+	p_nxge_t nxgep;
+
+	nxgep = (p_nxge_t)arg;
+
+	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_reschedule"));
+
+	if (nxgep->nxge_mac_state == NXGE_MAC_STARTED &&
+			nxgep->resched_needed) {
+		mac_tx_update(nxgep->mach);
+		nxgep->resched_needed = B_FALSE;
+		nxgep->resched_running = B_FALSE;
+	}
+
+	NXGE_DEBUG_MSG((NULL, TX_CTL, "<== nxge_reschedule"));
+	return (DDI_INTR_CLAIMED);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/uts/common/io/nxge/nxge_txc.c	Mon Mar 19 19:37:22 2007 -0700
@@ -0,0 +1,420 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident	"%Z%%M%	%I%	%E% SMI"
+
+#include <sys/nxge/nxge_impl.h>
+#include <sys/nxge/nxge_txc.h>
+
+static nxge_status_t
+nxge_txc_handle_port_errors(p_nxge_t, uint32_t);
+static void
+nxge_txc_inject_port_err(uint8_t, txc_int_stat_dbg_t *,
+			uint8_t istats);
+extern nxge_status_t nxge_tx_port_fatal_err_recover(p_nxge_t);
+
+nxge_status_t
+nxge_txc_init(p_nxge_t nxgep)
+{
+	uint8_t			port;
+	npi_handle_t		handle;
+	npi_status_t		rs = NPI_SUCCESS;
+
+	handle = NXGE_DEV_NPI_HANDLE(nxgep);
+	port = NXGE_GET_PORT_NUM(nxgep->function_num);
+
+	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txc_init: portn %d", port));
+
+	/*
+	 * Enable the TXC controller.
+	 */
+	if ((rs = npi_txc_global_enable(handle)) != NPI_SUCCESS) {
+		goto fail;
+	}
+
+	/* Enable this port within the TXC. */
+	if ((rs = npi_txc_port_enable(handle, port)) != NPI_SUCCESS) {
+		goto fail;
+	}
+
+	/* Bind DMA channels to this port. */
+	if ((rs = npi_txc_port_dma_enable(handle, port,
+			TXDMA_PORT_BITMAP(nxgep))) != NPI_SUCCESS) {
+		goto fail;
+	}
+
+	/* Unmask all TXC interrupts */
+	npi_txc_global_imask_set(handle, port, 0);
+
+	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txc_init: portn %d", port));
+
+	return (NXGE_OK);
+fail:
+	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"nxge_txc_init: Failed to initialize txc on port %d",
+			port));
+
+	return (NXGE_ERROR | rs);
+}
+
+nxge_status_t
+nxge_txc_uninit(p_nxge_t nxgep)
+{
+	uint8_t			port;
+	npi_handle_t		handle;
+	npi_status_t		rs = NPI_SUCCESS;
+
+	handle = NXGE_DEV_NPI_HANDLE(nxgep);
+	port = NXGE_GET_PORT_NUM(nxgep->function_num);
+
+	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txc_uninit: portn %d", port));
+
+	/*
+	 * disable the TXC controller.
+	 */
+	if ((rs = npi_txc_global_disable(handle)) != NPI_SUCCESS) {
+		goto fail;
+	}
+
+	/* disable this port within the TXC. */
+	if ((rs = npi_txc_port_disable(handle, port)) != NPI_SUCCESS) {
+		goto fail;
+	}
+
+	/* unbind DMA channels to this port. */
+	if ((rs = npi_txc_port_dma_enable(handle, port, 0)) != NPI_SUCCESS) {
+		goto fail;
+	}
+
+	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txc_uninit: portn %d", port));
+
+	return (NXGE_OK);
+fail:
+	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"nxge_txc_init: Failed to initialize txc on port %d",
+			port));
+
+	return (NXGE_ERROR | rs);
+}
+
+void
+nxge_txc_regs_dump(p_nxge_t nxgep)
+{
+	uint32_t		cnt1, cnt2;
+	npi_handle_t		handle;
+	txc_control_t		control;
+	uint32_t		bitmap = 0;
+
+	NXGE_DEBUG_MSG((nxgep, TX_CTL, "\nTXC dump: func # %d:\n",
+		nxgep->function_num));
+
+	handle = NXGE_DEV_NPI_HANDLE(nxgep);
+
+	(void) npi_txc_control(handle, OP_GET, &control);
+	(void) npi_txc_port_dma_list_get(handle, nxgep->function_num, &bitmap);
+
+	NXGE_DEBUG_MSG((nxgep, TX_CTL, "\n\tTXC port control 0x%0llx",
+		(long long)control.value));
+	NXGE_DEBUG_MSG((nxgep, TX_CTL, "\n\tTXC port bitmap 0x%x", bitmap));
+
+	(void) npi_txc_pkt_xmt_to_mac_get(handle, nxgep->function_num,
+	    &cnt1, &cnt2);
+	NXGE_DEBUG_MSG((nxgep, TX_CTL, "\n\tTXC bytes to MAC %d "
+		"packets to MAC %d",
+		cnt1, cnt2));
+
+	(void) npi_txc_pkt_stuffed_get(handle, nxgep->function_num,
+					    &cnt1, &cnt2);
+	NXGE_DEBUG_MSG((nxgep, TX_CTL,
+		"\n\tTXC ass packets %d reorder packets %d",
+		cnt1 & 0xffff, cnt2 & 0xffff));
+
+	(void) npi_txc_reorder_get(handle, nxgep->function_num, &cnt1);
+	NXGE_DEBUG_MSG((nxgep, TX_CTL,
+		"\n\tTXC reorder resource %d", cnt1 & 0xff));
+}
+
+nxge_status_t
+nxge_txc_handle_sys_errors(p_nxge_t nxgep)
+{
+	npi_handle_t		handle;
+	txc_int_stat_t		istatus;
+	uint32_t		err_status;
+	uint8_t			err_portn;
+	boolean_t		my_err = B_FALSE;
+	nxge_status_t		status = NXGE_OK;
+
+	handle = nxgep->npi_handle;
+	npi_txc_global_istatus_get(handle, (txc_int_stat_t *)&istatus.value);
+	switch (nxgep->mac.portnum) {
+	case 0:
+		if (istatus.bits.ldw.port0_int_status) {
+			my_err = B_TRUE;
+			err_portn = 0;
+			err_status = istatus.bits.ldw.port0_int_status;
+		}
+		break;
+	case 1:
+		if (istatus.bits.ldw.port1_int_status) {
+			my_err = B_TRUE;
+			err_portn = 1;
+			err_status = istatus.bits.ldw.port1_int_status;
+		}
+		break;
+	case 2:
+		if (istatus.bits.ldw.port2_int_status) {
+			my_err = B_TRUE;
+			err_portn = 2;
+			err_status = istatus.bits.ldw.port2_int_status;
+		}
+		break;
+	case 3:
+		if (istatus.bits.ldw.port3_int_status) {
+			my_err = B_TRUE;
+			err_portn = 3;
+			err_status = istatus.bits.ldw.port3_int_status;
+		}
+		break;
+	default:
+		return (NXGE_ERROR);
+	}
+	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			    " nxge_txc_handle_sys_errors: errored port %d",
+			    err_portn));
+	if (my_err) {
+		status = nxge_txc_handle_port_errors(nxgep, err_status);
+	}
+
+	return (status);
+}
+
+static nxge_status_t
+nxge_txc_handle_port_errors(p_nxge_t nxgep, uint32_t err_status)
+{
+	npi_handle_t		handle;
+	npi_status_t		rs = NPI_SUCCESS;
+	p_nxge_txc_stats_t	statsp;
+	txc_int_stat_t		istatus;
+	boolean_t		txport_fatal = B_FALSE;
+	uint8_t			portn;
+	nxge_status_t		status = NXGE_OK;
+
+	handle = nxgep->npi_handle;
+	statsp = (p_nxge_txc_stats_t)&nxgep->statsp->txc_stats;
+	portn = nxgep->mac.portnum;
+	istatus.value = 0;
+
+	if ((err_status & TXC_INT_STAT_RO_CORR_ERR) ||
+			(err_status & TXC_INT_STAT_RO_CORR_ERR) ||
+			(err_status & TXC_INT_STAT_RO_UNCORR_ERR) ||
+			(err_status & TXC_INT_STAT_REORDER_ERR)) {
+		if ((rs = npi_txc_ro_states_get(handle, portn,
+				&statsp->errlog.ro_st)) != NPI_SUCCESS) {
+			return (NXGE_ERROR | rs);
+		}
+
+		if (err_status & TXC_INT_STAT_RO_CORR_ERR) {
+			statsp->ro_correct_err++;
+			NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
+					NXGE_FM_EREPORT_TXC_RO_CORRECT_ERR);
+			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+				"nxge_txc_err_evnts: "
+				"RO FIFO correctable error"));
+		}
+		if (err_status & TXC_INT_STAT_RO_UNCORR_ERR) {
+			statsp->ro_uncorrect_err++;
+			NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
+					NXGE_FM_EREPORT_TXC_RO_UNCORRECT_ERR);
+			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+				"nxge_txc_err_evnts: "
+				"RO FIFO uncorrectable error"));
+		}
+		if (err_status & TXC_INT_STAT_REORDER_ERR) {
+			statsp->reorder_err++;
+			NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
+					NXGE_FM_EREPORT_TXC_REORDER_ERR);
+			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+				"nxge_txc_err_evnts: "
+				"fatal error: Reorder error"));
+			txport_fatal = B_TRUE;
+		}
+
+		if ((err_status & TXC_INT_STAT_RO_CORR_ERR) ||
+			(err_status & TXC_INT_STAT_RO_CORR_ERR) ||
+			(err_status & TXC_INT_STAT_RO_UNCORR_ERR)) {
+
+			if ((rs = npi_txc_ro_ecc_state_clr(handle, portn))
+							!= NPI_SUCCESS)
+				return (NXGE_ERROR | rs);
+			/*
+			 * Making sure that error source is cleared if this is
+			 * an injected error.
+			 */
+			TXC_FZC_CNTL_REG_WRITE64(handle, TXC_ROECC_CTL_REG,
+								portn, 0);
+		}
+	}
+
+	if ((err_status & TXC_INT_STAT_SF_CORR_ERR) ||
+			(err_status & TXC_INT_STAT_SF_UNCORR_ERR)) {
+		if ((rs = npi_txc_sf_states_get(handle, portn,
+				&statsp->errlog.sf_st)) != NPI_SUCCESS) {
+			return (NXGE_ERROR | rs);
+		}
+		if (err_status & TXC_INT_STAT_SF_CORR_ERR) {
+			statsp->sf_correct_err++;
+			NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
+					NXGE_FM_EREPORT_TXC_SF_CORRECT_ERR);
+			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+				"nxge_txc_err_evnts: "
+				"SF FIFO correctable error"));
+		}
+		if (err_status & TXC_INT_STAT_SF_UNCORR_ERR) {
+			statsp->sf_uncorrect_err++;
+			NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
+					NXGE_FM_EREPORT_TXC_SF_UNCORRECT_ERR);
+			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+				"nxge_txc_err_evnts: "
+				"SF FIFO uncorrectable error"));
+		}
+		if ((rs = npi_txc_sf_ecc_state_clr(handle, portn))
+							!= NPI_SUCCESS)
+			return (NXGE_ERROR | rs);
+		/*
+		 * Making sure that error source is cleared if this is
+		 * an injected error.
+		 */
+		TXC_FZC_CNTL_REG_WRITE64(handle, TXC_SFECC_CTL_REG, portn, 0);
+	}
+
+	/* Clear corresponding errors */
+	switch (portn) {
+	case 0:
+		istatus.bits.ldw.port0_int_status = err_status;
+		break;
+	case 1:
+		istatus.bits.ldw.port1_int_status = err_status;
+		break;
+	case 2:
+		istatus.bits.ldw.port2_int_status = err_status;
+		break;
+	case 3:
+		istatus.bits.ldw.port3_int_status = err_status;
+		break;
+	default:
+		return (NXGE_ERROR);
+	}
+
+	npi_txc_global_istatus_clear(handle, istatus.value);
+
+	if (txport_fatal) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+				" nxge_txc_handle_port_errors:"
+				" fatal Error on Port#%d\n",
+				portn));
+		status = nxge_tx_port_fatal_err_recover(nxgep);
+		if (status == NXGE_OK) {
+			FM_SERVICE_RESTORED(nxgep);
+		}
+	}
+
+	return (status);
+}
+
+void
+nxge_txc_inject_err(p_nxge_t nxgep, uint32_t err_id)
+{
+	txc_int_stat_dbg_t	txcs;
+	txc_roecc_ctl_t		ro_ecc_ctl;
+	txc_sfecc_ctl_t		sf_ecc_ctl;
+	uint8_t			portn = nxgep->mac.portnum;
+
+	cmn_err(CE_NOTE, "!TXC error Inject\n");
+	switch (err_id) {
+	case NXGE_FM_EREPORT_TXC_RO_CORRECT_ERR:
+	case NXGE_FM_EREPORT_TXC_RO_UNCORRECT_ERR:
+		ro_ecc_ctl.value = 0;
+		ro_ecc_ctl.bits.ldw.all_pkts = 1;
+		ro_ecc_ctl.bits.ldw.second_line_pkt = 1;
+		if (err_id == NXGE_FM_EREPORT_TXC_RO_CORRECT_ERR)
+			ro_ecc_ctl.bits.ldw.single_bit_err = 1;
+		else
+			ro_ecc_ctl.bits.ldw.double_bit_err = 1;
+		cmn_err(CE_NOTE, "!Write 0x%lx to TXC_ROECC_CTL_REG\n",
+					ro_ecc_ctl.value);
+		TXC_FZC_CNTL_REG_WRITE64(nxgep->npi_handle, TXC_ROECC_CTL_REG,
+					portn, ro_ecc_ctl.value);
+		break;
+	case NXGE_FM_EREPORT_TXC_SF_CORRECT_ERR:
+	case NXGE_FM_EREPORT_TXC_SF_UNCORRECT_ERR:
+		sf_ecc_ctl.value = 0;
+		sf_ecc_ctl.bits.ldw.all_pkts = 1;
+		sf_ecc_ctl.bits.ldw.second_line_pkt = 1;
+		if (err_id == NXGE_FM_EREPORT_TXC_SF_CORRECT_ERR)
+			sf_ecc_ctl.bits.ldw.single_bit_err = 1;
+		else
+			sf_ecc_ctl.bits.ldw.double_bit_err = 1;
+		cmn_err(CE_NOTE, "!Write 0x%lx to TXC_SFECC_CTL_REG\n",
+					sf_ecc_ctl.value);
+		TXC_FZC_CNTL_REG_WRITE64(nxgep->npi_handle, TXC_SFECC_CTL_REG,
+					portn, sf_ecc_ctl.value);
+		break;
+	case NXGE_FM_EREPORT_TXC_REORDER_ERR:
+		NXGE_REG_RD64(nxgep->npi_handle, TXC_INT_STAT_DBG_REG,
+					&txcs.value);
+		nxge_txc_inject_port_err(portn, &txcs,
+						TXC_INT_STAT_REORDER_ERR);
+		cmn_err(CE_NOTE, "!Write 0x%lx to TXC_INT_STAT_DBG_REG\n",
+					txcs.value);
+		NXGE_REG_WR64(nxgep->npi_handle, TXC_INT_STAT_DBG_REG,
+					txcs.value);
+		break;
+	default:
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+				"nxge_txc_inject_err: Unknown err_id"));
+	}
+}
+
+static void
+nxge_txc_inject_port_err(uint8_t portn, txc_int_stat_dbg_t *txcs,
+				uint8_t istats)
+{
+	switch (portn) {
+	case 0:
+		txcs->bits.ldw.port0_int_status |= istats;
+		break;
+	case 1:
+		txcs->bits.ldw.port1_int_status |= istats;
+		break;
+	case 2:
+		txcs->bits.ldw.port2_int_status |= istats;
+		break;
+	case 3:
+		txcs->bits.ldw.port3_int_status |= istats;
+		break;
+	default:
+		;
+	}
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/uts/common/io/nxge/nxge_txdma.c	Mon Mar 19 19:37:22 2007 -0700
@@ -0,0 +1,3263 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident	"%Z%%M%	%I%	%E% SMI"
+
+#include <sys/nxge/nxge_impl.h>
+#include <sys/nxge/nxge_txdma.h>
+#include <sys/llc1.h>
+
+uint32_t 	nxge_reclaim_pending = TXDMA_RECLAIM_PENDING_DEFAULT;
+uint32_t	nxge_tx_minfree = 32;
+uint32_t	nxge_tx_intr_thres = 0;
+uint32_t	nxge_tx_max_gathers = TX_MAX_GATHER_POINTERS;
+uint32_t	nxge_tx_tiny_pack = 1;
+uint32_t	nxge_tx_use_bcopy = 1;
+
+extern uint32_t 	nxge_tx_ring_size;
+extern uint32_t 	nxge_bcopy_thresh;
+extern uint32_t 	nxge_dvma_thresh;
+extern uint32_t 	nxge_dma_stream_thresh;
+extern dma_method_t 	nxge_force_dma;
+
+/* Device register access attributes for PIO.  */
+extern ddi_device_acc_attr_t nxge_dev_reg_acc_attr;
+/* Device descriptor access attributes for DMA.  */
+extern ddi_device_acc_attr_t nxge_dev_desc_dma_acc_attr;
+/* Device buffer access attributes for DMA.  */
+extern ddi_device_acc_attr_t nxge_dev_buf_dma_acc_attr;
+extern ddi_dma_attr_t nxge_desc_dma_attr;
+extern ddi_dma_attr_t nxge_tx_dma_attr;
+
+static nxge_status_t nxge_map_txdma(p_nxge_t);
+static void nxge_unmap_txdma(p_nxge_t);
+
+static nxge_status_t nxge_txdma_hw_start(p_nxge_t);
+static void nxge_txdma_hw_stop(p_nxge_t);
+
+static nxge_status_t nxge_map_txdma_channel(p_nxge_t, uint16_t,
+	p_nxge_dma_common_t *, p_tx_ring_t *,
+	uint32_t, p_nxge_dma_common_t *,
+	p_tx_mbox_t *);
+static void nxge_unmap_txdma_channel(p_nxge_t, uint16_t,
+	p_tx_ring_t, p_tx_mbox_t);
+
+static nxge_status_t nxge_map_txdma_channel_buf_ring(p_nxge_t, uint16_t,
+	p_nxge_dma_common_t *, p_tx_ring_t *, uint32_t);
+static void nxge_unmap_txdma_channel_buf_ring(p_nxge_t, p_tx_ring_t);
+
+static void nxge_map_txdma_channel_cfg_ring(p_nxge_t, uint16_t,
+	p_nxge_dma_common_t *, p_tx_ring_t,
+	p_tx_mbox_t *);
+static void nxge_unmap_txdma_channel_cfg_ring(p_nxge_t,
+	p_tx_ring_t, p_tx_mbox_t);
+
+static nxge_status_t nxge_txdma_start_channel(p_nxge_t, uint16_t,
+    p_tx_ring_t, p_tx_mbox_t);
+static nxge_status_t nxge_txdma_stop_channel(p_nxge_t, uint16_t,
+	p_tx_ring_t, p_tx_mbox_t);
+
+static p_tx_ring_t nxge_txdma_get_ring(p_nxge_t, uint16_t);
+static nxge_status_t nxge_tx_err_evnts(p_nxge_t, uint_t,
+	p_nxge_ldv_t, tx_cs_t);
+static p_tx_mbox_t nxge_txdma_get_mbox(p_nxge_t, uint16_t);
+static nxge_status_t nxge_txdma_fatal_err_recover(p_nxge_t,
+	uint16_t, p_tx_ring_t);
+
+nxge_status_t
+nxge_init_txdma_channels(p_nxge_t nxgep)
+{
+	nxge_status_t		status = NXGE_OK;
+
+	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_init_txdma_channels"));
+
+	status = nxge_map_txdma(nxgep);
+	if (status != NXGE_OK) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"<== nxge_init_txdma_channels: status 0x%x", status));
+		return (status);
+	}
+
+	status = nxge_txdma_hw_start(nxgep);
+	if (status != NXGE_OK) {
+		nxge_unmap_txdma(nxgep);
+		return (status);
+	}
+
+	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
+		"<== nxge_init_txdma_channels: status 0x%x", status));
+
+	return (NXGE_OK);
+}
+
+void
+nxge_uninit_txdma_channels(p_nxge_t nxgep)
+{
+	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_uninit_txdma_channels"));
+
+	nxge_txdma_hw_stop(nxgep);
+	nxge_unmap_txdma(nxgep);
+
+	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
+		"<== nxge_uinit_txdma_channels"));
+}
+
+void
+nxge_setup_dma_common(p_nxge_dma_common_t dest_p, p_nxge_dma_common_t src_p,
+	uint32_t entries, uint32_t size)
+{
+	size_t		tsize;
+	*dest_p = *src_p;
+	tsize = size * entries;
+	dest_p->alength = tsize;
+	dest_p->nblocks = entries;
+	dest_p->block_size = size;
+	dest_p->offset += tsize;
+
+	src_p->kaddrp = (caddr_t)dest_p->kaddrp + tsize;
+	src_p->alength -= tsize;
+	src_p->dma_cookie.dmac_laddress += tsize;
+	src_p->dma_cookie.dmac_size -= tsize;
+}
+
+nxge_status_t
+nxge_reset_txdma_channel(p_nxge_t nxgep, uint16_t channel, uint64_t reg_data)
+{
+	npi_status_t		rs = NPI_SUCCESS;
+	nxge_status_t		status = NXGE_OK;
+	npi_handle_t		handle;
+
+	NXGE_DEBUG_MSG((nxgep, TX_CTL, " ==> nxge_reset_txdma_channel"));
+
+	handle = NXGE_DEV_NPI_HANDLE(nxgep);
+	if ((reg_data & TX_CS_RST_MASK) == TX_CS_RST_MASK) {
+		rs = npi_txdma_channel_reset(handle, channel);
+	} else {
+		rs = npi_txdma_channel_control(handle, TXDMA_RESET,
+				channel);
+	}
+
+	if (rs != NPI_SUCCESS) {
+		status = NXGE_ERROR | rs;
+	}
+
+	/*
+	 * Reset the tail (kick) register to 0.
+	 * (Hardware will not reset it. Tx overflow fatal
+	 * error if tail is not set to 0 after reset!
+	 */
+	TXDMA_REG_WRITE64(handle, TX_RING_KICK_REG, channel, 0);
+
+	NXGE_DEBUG_MSG((nxgep, TX_CTL, " <== nxge_reset_txdma_channel"));
+	return (status);
+}
+
+nxge_status_t
+nxge_init_txdma_channel_event_mask(p_nxge_t nxgep, uint16_t channel,
+		p_tx_dma_ent_msk_t mask_p)
+{
+	npi_handle_t		handle;
+	npi_status_t		rs = NPI_SUCCESS;
+	nxge_status_t		status = NXGE_OK;
+
+	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
+		"<== nxge_init_txdma_channel_event_mask"));
+
+	handle = NXGE_DEV_NPI_HANDLE(nxgep);
+	rs = npi_txdma_event_mask(handle, OP_SET, channel, mask_p);
+	if (rs != NPI_SUCCESS) {
+		status = NXGE_ERROR | rs;
+	}
+
+	return (status);
+}
+
+nxge_status_t
+nxge_init_txdma_channel_cntl_stat(p_nxge_t nxgep, uint16_t channel,
+	uint64_t reg_data)
+{
+	npi_handle_t		handle;
+	npi_status_t		rs = NPI_SUCCESS;
+	nxge_status_t		status = NXGE_OK;
+
+	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
+		"<== nxge_init_txdma_channel_cntl_stat"));
+
+	handle = NXGE_DEV_NPI_HANDLE(nxgep);
+	rs = npi_txdma_control_status(handle, OP_SET, channel,
+			(p_tx_cs_t)&reg_data);
+
+	if (rs != NPI_SUCCESS) {
+		status = NXGE_ERROR | rs;
+	}
+
+	return (status);
+}
+
+nxge_status_t
+nxge_enable_txdma_channel(p_nxge_t nxgep,
+	uint16_t channel, p_tx_ring_t tx_desc_p, p_tx_mbox_t mbox_p)
+{
+	npi_handle_t		handle;
+	npi_status_t		rs = NPI_SUCCESS;
+	nxge_status_t		status = NXGE_OK;
+
+	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_enable_txdma_channel"));
+
+	handle = NXGE_DEV_NPI_HANDLE(nxgep);
+	/*
+	 * Use configuration data composed at init time.
+	 * Write to hardware the transmit ring configurations.
+	 */
+	rs = npi_txdma_ring_config(handle, OP_SET, channel,
+			(uint64_t *)&(tx_desc_p->tx_ring_cfig.value));
+
+	if (rs != NPI_SUCCESS) {
+		return (NXGE_ERROR | rs);
+	}
+
+	/* Write to hardware the mailbox */
+	rs = npi_txdma_mbox_config(handle, OP_SET, channel,
+		(uint64_t *)&mbox_p->tx_mbox.dma_cookie.dmac_laddress);
+
+	if (rs != NPI_SUCCESS) {
+		return (NXGE_ERROR | rs);
+	}
+
+	/* Start the DMA engine. */
+	rs = npi_txdma_channel_init_enable(handle, channel);
+
+	if (rs != NPI_SUCCESS) {
+		return (NXGE_ERROR | rs);
+	}
+
+	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_enable_txdma_channel"));
+
+	return (status);
+}
+
+void
+nxge_fill_tx_hdr(p_mblk_t mp, boolean_t fill_len,
+		boolean_t l4_cksum, int pkt_len, uint8_t npads,
+		p_tx_pkt_hdr_all_t pkthdrp)
+{
+	p_tx_pkt_header_t	hdrp;
+	p_mblk_t 		nmp;
+	uint64_t		tmp;
+	size_t 			mblk_len;
+	size_t 			iph_len;
+	size_t 			hdrs_size;
+	uint8_t			hdrs_buf[sizeof (struct ether_header) +
+					64 + sizeof (uint32_t)];
+	uint8_t 		*ip_buf;
+	uint16_t		eth_type;
+	uint8_t			ipproto;
+	boolean_t		is_vlan = B_FALSE;
+	size_t			eth_hdr_size;
+
+	NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: mp $%p", mp));
+
+	/*
+	 * Caller should zero out the headers first.
+	 */
+	hdrp = (p_tx_pkt_header_t)&pkthdrp->pkthdr;
+
+	if (fill_len) {
+		NXGE_DEBUG_MSG((NULL, TX_CTL,
+			"==> nxge_fill_tx_hdr: pkt_len %d "
+			"npads %d", pkt_len, npads));
+		tmp = (uint64_t)pkt_len;
+		hdrp->value |= (tmp << TX_PKT_HEADER_TOT_XFER_LEN_SHIFT);
+		goto fill_tx_header_done;
+	}
+
+	tmp = (uint64_t)npads;
+	hdrp->value |= (tmp << TX_PKT_HEADER_PAD_SHIFT);
+
+	/*
+	 * mp is the original data packet (does not include the
+	 * Neptune transmit header).
+	 */
+	nmp = mp;
+	mblk_len = (size_t)nmp->b_wptr - (size_t)nmp->b_rptr;
+	NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: "
+		"mp $%p b_rptr $%p len %d",
+		mp, nmp->b_rptr, mblk_len));
+	ip_buf = NULL;
+	bcopy(nmp->b_rptr, &hdrs_buf[0], sizeof (struct ether_vlan_header));
+	eth_type = ntohs(((p_ether_header_t)hdrs_buf)->ether_type);
+	NXGE_DEBUG_MSG((NULL, TX_CTL, "==> : nxge_fill_tx_hdr: (value 0x%llx) "
+		"ether type 0x%x", eth_type, hdrp->value));
+
+	if (eth_type < ETHERMTU) {
+		tmp = 1ull;
+		hdrp->value |= (tmp << TX_PKT_HEADER_LLC_SHIFT);
+		NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: LLC "
+			"value 0x%llx", hdrp->value));
+		if (*(hdrs_buf + sizeof (struct ether_header))
+				== LLC_SNAP_SAP) {
+			eth_type = ntohs(*((uint16_t *)(hdrs_buf +
+					sizeof (struct ether_header) + 6)));
+			NXGE_DEBUG_MSG((NULL, TX_CTL,
+				"==> nxge_tx_pkt_hdr_init: LLC ether type 0x%x",
+				eth_type));
+		} else {
+			goto fill_tx_header_done;
+		}
+	} else if (eth_type == VLAN_ETHERTYPE) {
+		tmp = 1ull;
+		hdrp->value |= (tmp << TX_PKT_HEADER_VLAN__SHIFT);
+
+		eth_type = ntohs(((struct ether_vlan_header *)
+			hdrs_buf)->ether_type);
+		is_vlan = B_TRUE;
+		NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: VLAN "
+			"value 0x%llx", hdrp->value));
+	}
+
+	if (!is_vlan) {
+		eth_hdr_size = sizeof (struct ether_header);
+	} else {
+		eth_hdr_size = sizeof (struct ether_vlan_header);
+	}
+
+	switch (eth_type) {
+	case ETHERTYPE_IP:
+		if (mblk_len > eth_hdr_size + sizeof (uint8_t)) {
+			ip_buf = nmp->b_rptr + eth_hdr_size;
+			mblk_len -= eth_hdr_size;
+			iph_len = ((*ip_buf) & 0x0f);
+			if (mblk_len > (iph_len + sizeof (uint32_t))) {
+				ip_buf = nmp->b_rptr;
+				ip_buf += eth_hdr_size;
+			} else {
+				ip_buf = NULL;
+			}
+
+		}
+		if (ip_buf == NULL) {
+			hdrs_size = 0;
+			((p_ether_header_t)hdrs_buf)->ether_type = 0;
+			while ((nmp) && (hdrs_size <
+					sizeof (hdrs_buf))) {
+				mblk_len = (size_t)nmp->b_wptr -
+					(size_t)nmp->b_rptr;
+				if (mblk_len >=
+					(sizeof (hdrs_buf) - hdrs_size))
+					mblk_len = sizeof (hdrs_buf) -
+						hdrs_size;
+				bcopy(nmp->b_rptr,
+					&hdrs_buf[hdrs_size], mblk_len);
+				hdrs_size += mblk_len;
+				nmp = nmp->b_cont;
+			}
+			ip_buf = hdrs_buf;
+			ip_buf += eth_hdr_size;
+			iph_len = ((*ip_buf) & 0x0f);
+		}
+
+		ipproto = ip_buf[9];
+
+		tmp = (uint64_t)iph_len;
+		hdrp->value |= (tmp << TX_PKT_HEADER_IHL_SHIFT);
+		tmp = (uint64_t)(eth_hdr_size >> 1);
+		hdrp->value |= (tmp << TX_PKT_HEADER_L3START_SHIFT);
+
+		NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: IPv4 "
+			" iph_len %d l3start %d eth_hdr_size %d proto 0x%x"
+			"tmp 0x%x",
+			iph_len, hdrp->bits.hdw.l3start, eth_hdr_size,
+			ipproto, tmp));
+		NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: IP "
+			"value 0x%llx", hdrp->value));
+
+		break;
+
+	case ETHERTYPE_IPV6:
+		hdrs_size = 0;
+		((p_ether_header_t)hdrs_buf)->ether_type = 0;
+		while ((nmp) && (hdrs_size <
+				sizeof (hdrs_buf))) {
+			mblk_len = (size_t)nmp->b_wptr - (size_t)nmp->b_rptr;
+			if (mblk_len >=
+				(sizeof (hdrs_buf) - hdrs_size))
+				mblk_len = sizeof (hdrs_buf) -
+					hdrs_size;
+			bcopy(nmp->b_rptr,
+				&hdrs_buf[hdrs_size], mblk_len);
+			hdrs_size += mblk_len;
+			nmp = nmp->b_cont;
+		}
+		ip_buf = hdrs_buf;
+		ip_buf += eth_hdr_size;
+
+		tmp = 1ull;
+		hdrp->value |= (tmp << TX_PKT_HEADER_IP_VER_SHIFT);
+
+		tmp = (eth_hdr_size >> 1);
+		hdrp->value |= (tmp << TX_PKT_HEADER_L3START_SHIFT);
+
+		/* byte 6 is the next header protocol */
+		ipproto = ip_buf[6];
+
+		NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: IPv6 "
+			" iph_len %d l3start %d eth_hdr_size %d proto 0x%x",
+			iph_len, hdrp->bits.hdw.l3start, eth_hdr_size,
+			ipproto));
+		NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: IPv6 "
+			"value 0x%llx", hdrp->value));
+
+		break;
+
+	default:
+		NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: non-IP"));
+		goto fill_tx_header_done;
+	}
+
+	switch (ipproto) {
+	case IPPROTO_TCP:
+		NXGE_DEBUG_MSG((NULL, TX_CTL,
+			"==> nxge_fill_tx_hdr: TCP (cksum flag %d)", l4_cksum));
+		if (l4_cksum) {
+			tmp = 1ull;
+			hdrp->value |= (tmp << TX_PKT_HEADER_PKT_TYPE_SHIFT);
+			NXGE_DEBUG_MSG((NULL, TX_CTL,
+				"==> nxge_tx_pkt_hdr_init: TCP CKSUM"
+				"value 0x%llx", hdrp->value));
+		}
+
+		NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: TCP "
+			"value 0x%llx", hdrp->value));
+		break;
+
+	case IPPROTO_UDP:
+		NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: UDP"));
+		if (l4_cksum) {
+			tmp = 0x2ull;
+			hdrp->value |= (tmp << TX_PKT_HEADER_PKT_TYPE_SHIFT);
+		}
+		NXGE_DEBUG_MSG((NULL, TX_CTL,
+			"==> nxge_tx_pkt_hdr_init: UDP"
+			"value 0x%llx", hdrp->value));
+		break;
+
+	default:
+		goto fill_tx_header_done;
+	}
+
+fill_tx_header_done:
+	NXGE_DEBUG_MSG((NULL, TX_CTL,
+		"==> nxge_fill_tx_hdr: pkt_len %d  "
+		"npads %d value 0x%llx", pkt_len, npads, hdrp->value));
+
+	NXGE_DEBUG_MSG((NULL, TX_CTL, "<== nxge_fill_tx_hdr"));
+}
+
+/*ARGSUSED*/
+p_mblk_t
+nxge_tx_pkt_header_reserve(p_mblk_t mp, uint8_t *npads)
+{
+	p_mblk_t 		newmp = NULL;
+
+	if ((newmp = allocb(TX_PKT_HEADER_SIZE, BPRI_MED)) == NULL) {
+		NXGE_DEBUG_MSG((NULL, TX_CTL,
+			"<== nxge_tx_pkt_header_reserve: allocb failed"));
+		return (NULL);
+	}
+
+	NXGE_DEBUG_MSG((NULL, TX_CTL,
+		"==> nxge_tx_pkt_header_reserve: get new mp"));
+	DB_TYPE(newmp) = M_DATA;
+	newmp->b_rptr = newmp->b_wptr = DB_LIM(newmp);
+	linkb(newmp, mp);
+	newmp->b_rptr -= TX_PKT_HEADER_SIZE;
+
+	NXGE_DEBUG_MSG((NULL, TX_CTL, "==>nxge_tx_pkt_header_reserve: "
+		"b_rptr $%p b_wptr $%p",
+		newmp->b_rptr, newmp->b_wptr));
+
+	NXGE_DEBUG_MSG((NULL, TX_CTL,
+		"<== nxge_tx_pkt_header_reserve: use new mp"));
+
+	return (newmp);
+}
+
+int
+nxge_tx_pkt_nmblocks(p_mblk_t mp, int *tot_xfer_len_p)
+{
+	uint_t 			nmblks;
+	ssize_t			len;
+	uint_t 			pkt_len;
+	p_mblk_t 		nmp, bmp, tmp;
+	uint8_t 		*b_wptr;
+
+	NXGE_DEBUG_MSG((NULL, TX_CTL,
+		"==> nxge_tx_pkt_nmblocks: mp $%p rptr $%p wptr $%p "
+		"len %d", mp, mp->b_rptr, mp->b_wptr, MBLKL(mp)));
+
+	nmp = mp;
+	bmp = mp;
+	nmblks = 0;
+	pkt_len = 0;
+	*tot_xfer_len_p = 0;
+
+	while (nmp) {
+		len = MBLKL(nmp);
+		NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_nmblocks: "
+			"len %d pkt_len %d nmblks %d tot_xfer_len %d",
+			len, pkt_len, nmblks,
+			*tot_xfer_len_p));
+
+		if (len <= 0) {
+			bmp = nmp;
+			nmp = nmp->b_cont;
+			NXGE_DEBUG_MSG((NULL, TX_CTL,
+				"==> nxge_tx_pkt_nmblocks: "
+				"len (0) pkt_len %d nmblks %d",
+				pkt_len, nmblks));
+			continue;
+		}
+
+		*tot_xfer_len_p += len;
+		NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_nmblocks: "
+			"len %d pkt_len %d nmblks %d tot_xfer_len %d",
+			len, pkt_len, nmblks,
+			*tot_xfer_len_p));
+
+		if (len < nxge_bcopy_thresh) {
+			NXGE_DEBUG_MSG((NULL, TX_CTL,
+				"==> nxge_tx_pkt_nmblocks: "
+				"len %d (< thresh) pkt_len %d nmblks %d",
+				len, pkt_len, nmblks));
+			if (pkt_len == 0)
+				nmblks++;
+			pkt_len += len;
+			if (pkt_len >= nxge_bcopy_thresh) {
+				pkt_len = 0;
+				len = 0;
+				nmp = bmp;
+			}
+		} else {
+			NXGE_DEBUG_MSG((NULL, TX_CTL,
+				"==> nxge_tx_pkt_nmblocks: "
+				"len %d (> thresh) pkt_len %d nmblks %d",
+				len, pkt_len, nmblks));
+			pkt_len = 0;
+			nmblks++;
+			/*
+			 * Hardware limits the transfer length to 4K.
+			 * If len is more than 4K, we need to break
+			 * it up to at most 2 more blocks.
+			 */
+			if (len > TX_MAX_TRANSFER_LENGTH) {
+				uint32_t	nsegs;
+
+				NXGE_DEBUG_MSG((NULL, TX_CTL,
+					"==> nxge_tx_pkt_nmblocks: "
+					"len %d pkt_len %d nmblks %d nsegs %d",
+					len, pkt_len, nmblks, nsegs));
+				nsegs = 1;
+				if (len % (TX_MAX_TRANSFER_LENGTH * 2)) {
+					++nsegs;
+				}
+				do {
+					b_wptr = nmp->b_rptr +
+						TX_MAX_TRANSFER_LENGTH;
+					nmp->b_wptr = b_wptr;
+					if ((tmp = dupb(nmp)) == NULL) {
+						return (0);
+					}
+					tmp->b_rptr = b_wptr;
+					tmp->b_wptr = nmp->b_wptr;
+					tmp->b_cont = nmp->b_cont;
+					nmp->b_cont = tmp;
+					nmblks++;
+					if (--nsegs) {
+						nmp = tmp;
+					}
+				} while (nsegs);
+				nmp = tmp;
+			}
+		}
+
+		/*
+		 * Hardware limits the transmit gather pointers to 15.
+		 */
+		if (nmp->b_cont && (nmblks + TX_GATHER_POINTERS_THRESHOLD) >
+				TX_MAX_GATHER_POINTERS) {
+			NXGE_DEBUG_MSG((NULL, TX_CTL,
+				"==> nxge_tx_pkt_nmblocks: pull msg - "
+				"len %d pkt_len %d nmblks %d",
+				len, pkt_len, nmblks));
+			/* Pull all message blocks from b_cont */
+			if ((tmp = msgpullup(nmp->b_cont, -1)) == NULL) {
+				return (0);
+			}
+			freemsg(nmp->b_cont);
+			nmp->b_cont = tmp;
+			pkt_len = 0;
+		}
+		bmp = nmp;
+		nmp = nmp->b_cont;
+	}
+
+	NXGE_DEBUG_MSG((NULL, TX_CTL,
+		"<== nxge_tx_pkt_nmblocks: rptr $%p wptr $%p "
+		"nmblks %d len %d tot_xfer_len %d",
+		mp->b_rptr, mp->b_wptr, nmblks,
+		MBLKL(mp), *tot_xfer_len_p));
+
+	return (nmblks);
+}
+
+boolean_t
+nxge_txdma_reclaim(p_nxge_t nxgep, p_tx_ring_t tx_ring_p, int nmblks)
+{
+	boolean_t 		status = B_TRUE;
+	p_nxge_dma_common_t	tx_desc_dma_p;
+	nxge_dma_common_t	desc_area;
+	p_tx_desc_t 		tx_desc_ring_vp;
+	p_tx_desc_t 		tx_desc_p;
+	p_tx_desc_t 		tx_desc_pp;
+	tx_desc_t 		r_tx_desc;
+	p_tx_msg_t 		tx_msg_ring;
+	p_tx_msg_t 		tx_msg_p;
+	npi_handle_t		handle;
+	tx_ring_hdl_t		tx_head;
+	uint32_t 		pkt_len;
+	uint_t			tx_rd_index;
+	uint16_t		head_index, tail_index;
+	uint8_t			tdc;
+	boolean_t		head_wrap, tail_wrap;
+	p_nxge_tx_ring_stats_t tdc_stats;
+	int			rc;
+
+	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_reclaim"));
+
+	status = ((tx_ring_p->descs_pending < nxge_reclaim_pending) &&
+			(nmblks != 0));
+	NXGE_DEBUG_MSG((nxgep, TX_CTL,
+		"==> nxge_txdma_reclaim: pending %d  reclaim %d nmblks %d",
+			tx_ring_p->descs_pending, nxge_reclaim_pending,
+			nmblks));
+	if (!status) {
+		tx_desc_dma_p = &tx_ring_p->tdc_desc;
+		desc_area = tx_ring_p->tdc_desc;
+		handle = NXGE_DEV_NPI_HANDLE(nxgep);
+		tx_desc_ring_vp = tx_desc_dma_p->kaddrp;
+		tx_desc_ring_vp =
+			(p_tx_desc_t)DMA_COMMON_VPTR(desc_area);
+		tx_rd_index = tx_ring_p->rd_index;
+		tx_desc_p = &tx_desc_ring_vp[tx_rd_index];
+		tx_msg_ring = tx_ring_p->tx_msg_ring;
+		tx_msg_p = &tx_msg_ring[tx_rd_index];
+		tdc = tx_ring_p->tdc;
+		tdc_stats = tx_ring_p->tdc_stats;
+		if (tx_ring_p->descs_pending > tdc_stats->tx_max_pend) {
+			tdc_stats->tx_max_pend = tx_ring_p->descs_pending;
+		}
+
+		tail_index = tx_ring_p->wr_index;
+		tail_wrap = tx_ring_p->wr_index_wrap;
+
+		NXGE_DEBUG_MSG((nxgep, TX_CTL,
+			"==> nxge_txdma_reclaim: tdc %d tx_rd_index %d "
+			"tail_index %d tail_wrap %d "
+			"tx_desc_p $%p ($%p) ",
+			tdc, tx_rd_index, tail_index, tail_wrap,
+			tx_desc_p, (*(uint64_t *)tx_desc_p)));
+		/*
+		 * Read the hardware maintained transmit head
+		 * and wrap around bit.
+		 */
+		TXDMA_REG_READ64(handle, TX_RING_HDL_REG, tdc, &tx_head.value);
+		head_index =  tx_head.bits.ldw.head;
+		head_wrap = tx_head.bits.ldw.wrap;
+		NXGE_DEBUG_MSG((nxgep, TX_CTL,
+			"==> nxge_txdma_reclaim: "
+			"tx_rd_index %d tail %d tail_wrap %d "
+			"head %d wrap %d",
+			tx_rd_index, tail_index, tail_wrap,
+			head_index, head_wrap));
+
+		if (head_index == tail_index) {
+			if (TXDMA_RING_EMPTY(head_index, head_wrap,
+					tail_index, tail_wrap) &&
+					(head_index == tx_rd_index)) {
+				NXGE_DEBUG_MSG((nxgep, TX_CTL,
+					"==> nxge_txdma_reclaim: EMPTY"));
+				return (B_TRUE);
+			}
+
+			NXGE_DEBUG_MSG((nxgep, TX_CTL,
+				"==> nxge_txdma_reclaim: Checking "
+					"if ring full"));
+			if (TXDMA_RING_FULL(head_index, head_wrap, tail_index,
+					tail_wrap)) {
+				NXGE_DEBUG_MSG((nxgep, TX_CTL,
+					"==> nxge_txdma_reclaim: full"));
+				return (B_FALSE);
+			}
+		}
+
+		NXGE_DEBUG_MSG((nxgep, TX_CTL,
+			"==> nxge_txdma_reclaim: tx_rd_index and head_index"));
+
+		tx_desc_pp = &r_tx_desc;
+		while ((tx_rd_index != head_index) &&
+			(tx_ring_p->descs_pending != 0)) {
+
+			NXGE_DEBUG_MSG((nxgep, TX_CTL,
+				"==> nxge_txdma_reclaim: Checking if pending"));
+
+			NXGE_DEBUG_MSG((nxgep, TX_CTL,
+				"==> nxge_txdma_reclaim: "
+				"descs_pending %d ",
+				tx_ring_p->descs_pending));
+
+			NXGE_DEBUG_MSG((nxgep, TX_CTL,
+				"==> nxge_txdma_reclaim: "
+				"(tx_rd_index %d head_index %d "
+				"(tx_desc_p $%p)",
+				tx_rd_index, head_index,
+				tx_desc_p));
+
+			tx_desc_pp->value = tx_desc_p->value;
+			NXGE_DEBUG_MSG((nxgep, TX_CTL,
+				"==> nxge_txdma_reclaim: "
+				"(tx_rd_index %d head_index %d "
+				"tx_desc_p $%p (desc value 0x%llx) ",
+				tx_rd_index, head_index,
+				tx_desc_pp, (*(uint64_t *)tx_desc_pp)));
+
+			NXGE_DEBUG_MSG((nxgep, TX_CTL,
+				"==> nxge_txdma_reclaim: dump desc:"));
+
+			pkt_len = tx_desc_pp->bits.hdw.tr_len;
+			tdc_stats->obytes += pkt_len;
+			tdc_stats->opackets += tx_desc_pp->bits.hdw.sop;
+			NXGE_DEBUG_MSG((nxgep, TX_CTL,
+				"==> nxge_txdma_reclaim: pkt_len %d "
+				"tdc channel %d opackets %d",
+				pkt_len,
+				tdc,
+				tdc_stats->opackets));
+
+			if (tx_msg_p->flags.dma_type == USE_DVMA) {
+				NXGE_DEBUG_MSG((nxgep, TX_CTL,
+					"tx_desc_p = $%p "
+					"tx_desc_pp = $%p "
+					"index = %d",
+					tx_desc_p,
+					tx_desc_pp,
+					tx_ring_p->rd_index));
+				(void) dvma_unload(tx_msg_p->dvma_handle,
+					0, -1);
+				tx_msg_p->dvma_handle = NULL;
+				if (tx_ring_p->dvma_wr_index ==
+					tx_ring_p->dvma_wrap_mask) {
+					tx_ring_p->dvma_wr_index = 0;
+				} else {
+					tx_ring_p->dvma_wr_index++;
+				}
+				tx_ring_p->dvma_pending--;
+			} else if (tx_msg_p->flags.dma_type ==
+					USE_DMA) {
+				NXGE_DEBUG_MSG((nxgep, TX_CTL,
+					"==> nxge_txdma_reclaim: "
+					"USE DMA"));
+				if (rc = ddi_dma_unbind_handle
+					(tx_msg_p->dma_handle)) {
+					cmn_err(CE_WARN, "!nxge_reclaim: "
+						"ddi_dma_unbind_handle "
+						"failed. status %d", rc);
+				}
+			}
+			NXGE_DEBUG_MSG((nxgep, TX_CTL,
+				"==> nxge_txdma_reclaim: count packets"));
+			/*
+			 * count a chained packet only once.
+			 */
+			if (tx_msg_p->tx_message != NULL) {
+				freemsg(tx_msg_p->tx_message);
+				tx_msg_p->tx_message = NULL;
+			}
+
+			tx_msg_p->flags.dma_type = USE_NONE;
+			tx_rd_index = tx_ring_p->rd_index;
+			tx_rd_index = (tx_rd_index + 1) &
+					tx_ring_p->tx_wrap_mask;
+			tx_ring_p->rd_index = tx_rd_index;
+			tx_ring_p->descs_pending--;
+			tx_desc_p = &tx_desc_ring_vp[tx_rd_index];
+			tx_msg_p = &tx_msg_ring[tx_rd_index];
+		}
+
+		status = (nmblks <= (tx_ring_p->tx_ring_size -
+				tx_ring_p->descs_pending -
+				TX_FULL_MARK));
+		if (status) {
+			cas32((uint32_t *)&tx_ring_p->queueing, 1, 0);
+		}
+	} else {
+		status = (nmblks <=
+			(tx_ring_p->tx_ring_size -
+				tx_ring_p->descs_pending -
+				TX_FULL_MARK));
+	}
+
+	NXGE_DEBUG_MSG((nxgep, TX_CTL,
+		"<== nxge_txdma_reclaim status = 0x%08x", status));
+
+	return (status);
+}
+
+uint_t
+nxge_tx_intr(void *arg1, void *arg2)
+{
+	p_nxge_ldv_t		ldvp = (p_nxge_ldv_t)arg1;
+	p_nxge_t		nxgep = (p_nxge_t)arg2;
+	p_nxge_ldg_t		ldgp;
+	uint8_t			channel;
+	uint32_t		vindex;
+	npi_handle_t		handle;
+	tx_cs_t			cs;
+	p_tx_ring_t 		*tx_rings;
+	p_tx_ring_t 		tx_ring_p;
+	npi_status_t		rs = NPI_SUCCESS;
+	uint_t 			serviced = DDI_INTR_UNCLAIMED;
+	nxge_status_t 		status = NXGE_OK;
+
+	if (ldvp == NULL) {
+		NXGE_DEBUG_MSG((NULL, INT_CTL,
+			"<== nxge_tx_intr: nxgep $%p ldvp $%p",
+			nxgep, ldvp));
+		return (DDI_INTR_UNCLAIMED);
+	}
+
+	if (arg2 == NULL || (void *)ldvp->nxgep != arg2) {
+		nxgep = ldvp->nxgep;
+	}
+	NXGE_DEBUG_MSG((nxgep, INT_CTL,
+		"==> nxge_tx_intr: nxgep(arg2) $%p ldvp(arg1) $%p",
+		nxgep, ldvp));
+	/*
+	 * This interrupt handler is for a specific
+	 * transmit dma channel.
+	 */
+	handle = NXGE_DEV_NPI_HANDLE(nxgep);
+	/* Get the control and status for this channel. */
+	channel = ldvp->channel;
+	ldgp = ldvp->ldgp;
+	NXGE_DEBUG_MSG((nxgep, INT_CTL,
+		"==> nxge_tx_intr: nxgep $%p ldvp (ldvp) $%p "
+		"channel %d",
+		nxgep, ldvp, channel));
+
+	rs = npi_txdma_control_status(handle, OP_GET, channel, &cs);
+	vindex = ldvp->vdma_index;
+	NXGE_DEBUG_MSG((nxgep, INT_CTL,
+		"==> nxge_tx_intr:channel %d ring index %d status 0x%08x",
+		channel, vindex, rs));
+	if (!rs && cs.bits.ldw.mk) {
+		NXGE_DEBUG_MSG((nxgep, INT_CTL,
+			"==> nxge_tx_intr:channel %d ring index %d "
+			"status 0x%08x (mk bit set)",
+			channel, vindex, rs));
+		tx_rings = nxgep->tx_rings->rings;
+		tx_ring_p = tx_rings[vindex];
+		NXGE_DEBUG_MSG((nxgep, INT_CTL,
+			"==> nxge_tx_intr:channel %d ring index %d "
+			"status 0x%08x (mk bit set, calling reclaim)",
+			channel, vindex, rs));
+
+		MUTEX_ENTER(&tx_ring_p->lock);
+		(void) nxge_txdma_reclaim(nxgep, tx_rings[vindex], 0);
+		MUTEX_EXIT(&tx_ring_p->lock);
+		mac_tx_update(nxgep->mach);
+	}
+
+	/*
+	 * Process other transmit control and status.
+	 * Check the ldv state.
+	 */
+	status = nxge_tx_err_evnts(nxgep, ldvp->vdma_index, ldvp, cs);
+	/*
+	 * Rearm this logical group if this is a single device
+	 * group.
+	 */
+	if (ldgp->nldvs == 1) {
+		NXGE_DEBUG_MSG((nxgep, INT_CTL,
+			"==> nxge_tx_intr: rearm"));
+		if (status == NXGE_OK) {
+			(void) npi_intr_ldg_mgmt_set(handle, ldgp->ldg,
+				B_TRUE, ldgp->ldg_timer);
+		}
+	}
+
+	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_tx_intr"));
+	serviced = DDI_INTR_CLAIMED;
+	return (serviced);
+}
+
+void
+nxge_txdma_stop(p_nxge_t nxgep)
+{
+	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_stop"));
+
+	(void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP);
+
+	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_stop"));
+}
+
+void
+nxge_txdma_stop_start(p_nxge_t nxgep)
+{
+	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_stop_start"));
+
+	(void) nxge_txdma_stop(nxgep);
+
+	(void) nxge_fixup_txdma_rings(nxgep);
+	(void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_START);
+	(void) nxge_tx_mac_enable(nxgep);
+	(void) nxge_txdma_hw_kick(nxgep);
+
+	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_stop_start"));
+}
+
+nxge_status_t
+nxge_txdma_hw_mode(p_nxge_t nxgep, boolean_t enable)
+{
+	int			i, ndmas;
+	uint16_t		channel;
+	p_tx_rings_t 		tx_rings;
+	p_tx_ring_t 		*tx_desc_rings;
+	npi_handle_t		handle;
+	npi_status_t		rs = NPI_SUCCESS;
+	nxge_status_t		status = NXGE_OK;
+
+	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
+		"==> nxge_txdma_hw_mode: enable mode %d", enable));
+
+	if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
+		NXGE_DEBUG_MSG((nxgep, TX_CTL,
+			"<== nxge_txdma_mode: not initialized"));
+		return (NXGE_ERROR);
+	}
+
+	tx_rings = nxgep->tx_rings;
+	if (tx_rings == NULL) {
+		NXGE_DEBUG_MSG((nxgep, TX_CTL,
+			"<== nxge_txdma_hw_mode: NULL global ring pointer"));
+		return (NXGE_ERROR);
+	}
+
+	tx_desc_rings = tx_rings->rings;
+	if (tx_desc_rings == NULL) {
+		NXGE_DEBUG_MSG((nxgep, TX_CTL,
+			"<== nxge_txdma_hw_mode: NULL rings pointer"));
+		return (NXGE_ERROR);
+	}
+
+	ndmas = tx_rings->ndmas;
+	if (!ndmas) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"<== nxge_txdma_hw_mode: no dma channel allocated"));
+		return (NXGE_ERROR);
+	}
+
+	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_mode: "
+		"tx_rings $%p tx_desc_rings $%p ndmas %d",
+		tx_rings, tx_desc_rings, ndmas));
+
+	handle = NXGE_DEV_NPI_HANDLE(nxgep);
+	for (i = 0; i < ndmas; i++) {
+		if (tx_desc_rings[i] == NULL) {
+			continue;
+		}
+		channel = tx_desc_rings[i]->tdc;
+		NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
+			"==> nxge_txdma_hw_mode: channel %d", channel));
+		if (enable) {
+			rs = npi_txdma_channel_enable(handle, channel);
+			NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
+				"==> nxge_txdma_hw_mode: channel %d (enable) "
+				"rs 0x%x", channel, rs));
+		} else {
+			/*
+			 * Stop the dma channel and waits for the stop done.
+			 * If the stop done bit is not set, then force
+			 * an error so TXC will stop.
+			 * All channels bound to this port need to be stopped
+			 * and reset after injecting an interrupt error.
+			 */
+			rs = npi_txdma_channel_disable(handle, channel);
+			NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
+				"==> nxge_txdma_hw_mode: channel %d (disable) "
+				"rs 0x%x", channel, rs));
+			{
+				tdmc_intr_dbg_t		intr_dbg;
+
+				if (rs != NPI_SUCCESS) {
+					/* Inject any error */
+					intr_dbg.value = 0;
+					intr_dbg.bits.ldw.nack_pref = 1;
+					NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
+						"==> nxge_txdma_hw_mode: "
+						"channel %d (stop failed 0x%x) "
+						"(inject err)", rs, channel));
+					(void) npi_txdma_inj_int_error_set(
+						handle, channel, &intr_dbg);
+					rs = npi_txdma_channel_disable(handle,
+						channel);
+					NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
+						"==> nxge_txdma_hw_mode: "
+						"channel %d (stop again 0x%x) "
+						"(after inject err)",
+						rs, channel));
+				}
+			}
+		}
+	}
+
+	status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs);
+
+	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
+		"<== nxge_txdma_hw_mode: status 0x%x", status));
+
+	return (status);
+}
+
+void
+nxge_txdma_enable_channel(p_nxge_t nxgep, uint16_t channel)
+{
+	npi_handle_t		handle;
+
+	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
+		"==> nxge_txdma_enable_channel: channel %d", channel));
+
+	handle = NXGE_DEV_NPI_HANDLE(nxgep);
+	/* enable the transmit dma channels */
+	(void) npi_txdma_channel_enable(handle, channel);
+
+	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_txdma_enable_channel"));
+}
+
+void
+nxge_txdma_disable_channel(p_nxge_t nxgep, uint16_t channel)
+{
+	npi_handle_t		handle;
+
+	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
+		"==> nxge_txdma_disable_channel: channel %d", channel));
+
+	handle = NXGE_DEV_NPI_HANDLE(nxgep);
+	/* stop the transmit dma channels */
+	(void) npi_txdma_channel_disable(handle, channel);
+
+	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_disable_channel"));
+}
+
+int
+nxge_txdma_stop_inj_err(p_nxge_t nxgep, int channel)
+{
+	npi_handle_t		handle;
+	tdmc_intr_dbg_t		intr_dbg;
+	int			status;
+	npi_status_t		rs = NPI_SUCCESS;
+
+	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_stop_inj_err"));
+	/*
+	 * Stop the dma channel waits for the stop done.
+	 * If the stop done bit is not set, then create
+	 * an error.
+	 */
+	handle = NXGE_DEV_NPI_HANDLE(nxgep);
+	rs = npi_txdma_channel_disable(handle, channel);
+	status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs);
+	if (status == NXGE_OK) {
+		NXGE_DEBUG_MSG((nxgep, TX_CTL,
+			"<== nxge_txdma_stop_inj_err (channel %d): "
+			"stopped OK", channel));
+		return (status);
+	}
+
+	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+		"==> nxge_txdma_stop_inj_err (channel %d): stop failed (0x%x) "
+		"injecting error", channel, rs));
+	/* Inject any error */
+	intr_dbg.value = 0;
+	intr_dbg.bits.ldw.nack_pref = 1;
+	(void) npi_txdma_inj_int_error_set(handle, channel, &intr_dbg);
+
+	/* Stop done bit will be set as a result of error injection */
+	rs = npi_txdma_channel_disable(handle, channel);
+	status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs);
+	if (!(rs & NPI_TXDMA_STOP_FAILED)) {
+		NXGE_DEBUG_MSG((nxgep, TX_CTL,
+			"<== nxge_txdma_stop_inj_err (channel %d): "
+			"stopped OK ", channel));
+		return (status);
+	}
+
+#if	defined(NXGE_DEBUG)
+	nxge_txdma_regs_dump_channels(nxgep);
+#endif
+	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+		"==> nxge_txdma_stop_inj_err (channel): stop failed (0x%x) "
+		" (injected error but still not stopped)", channel, rs));
+
+	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_stop_inj_err"));
+	return (status);
+}
+
+void
+nxge_hw_start_tx(p_nxge_t nxgep)
+{
+	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_hw_start_tx"));
+
+	(void) nxge_txdma_hw_start(nxgep);
+	(void) nxge_tx_mac_enable(nxgep);
+
+	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_hw_start_tx"));
+}
+
+/*ARGSUSED*/
+void
+nxge_fixup_txdma_rings(p_nxge_t nxgep)
+{
+	int			index, ndmas;
+	uint16_t		channel;
+	p_tx_rings_t 		tx_rings;
+
+	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_fixup_txdma_rings"));
+
+	/*
+	 * For each transmit channel, reclaim each descriptor and
+	 * free buffers.
+	 */
+	tx_rings = nxgep->tx_rings;
+	if (tx_rings == NULL) {
+		NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
+			"<== nxge_fixup_txdma_rings: NULL ring pointer"));
+		return;
+	}
+
+	ndmas = tx_rings->ndmas;
+	if (!ndmas) {
+		NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
+			"<== nxge_fixup_txdma_rings: no channel allocated"));
+		return;
+	}
+
+	if (tx_rings->rings == NULL) {
+		NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
+			"<== nxge_fixup_txdma_rings: NULL rings pointer"));
+		return;
+	}
+
+	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_fixup_txdma_rings: "
+		"tx_rings $%p tx_desc_rings $%p ndmas %d",
+		tx_rings, tx_rings->rings, ndmas));
+
+	for (index = 0; index < ndmas; index++) {
+		channel = tx_rings->rings[index]->tdc;
+		NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
+			"==> nxge_fixup_txdma_rings: channel %d", channel));
+
+		nxge_txdma_fixup_channel(nxgep, tx_rings->rings[index],
+			channel);
+	}
+
+	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_fixup_txdma_rings"));
+}
+
+/*ARGSUSED*/
+void
+nxge_txdma_fix_channel(p_nxge_t nxgep, uint16_t channel)
+{
+	p_tx_ring_t	ring_p;
+
+	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fix_channel"));
+	ring_p = nxge_txdma_get_ring(nxgep, channel);
+	if (ring_p == NULL) {
+		NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fix_channel"));
+		return;
+	}
+
+	if (ring_p->tdc != channel) {
+		NXGE_DEBUG_MSG((nxgep, TX_CTL,
+			"<== nxge_txdma_fix_channel: channel not matched "
+			"ring tdc %d passed channel",
+			ring_p->tdc, channel));
+		return;
+	}
+
+	nxge_txdma_fixup_channel(nxgep, ring_p, channel);
+
+	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fix_channel"));
+}
+
+/*ARGSUSED*/
+void
+nxge_txdma_fixup_channel(p_nxge_t nxgep, p_tx_ring_t ring_p, uint16_t channel)
+{
+	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fixup_channel"));
+
+	if (ring_p == NULL) {
+		NXGE_DEBUG_MSG((nxgep, TX_CTL,
+			"<== nxge_txdma_fixup_channel: NULL ring pointer"));
+		return;
+	}
+
+	if (ring_p->tdc != channel) {
+		NXGE_DEBUG_MSG((nxgep, TX_CTL,
+			"<== nxge_txdma_fixup_channel: channel not matched "
+			"ring tdc %d passed channel",
+			ring_p->tdc, channel));
+		return;
+	}
+
+	MUTEX_ENTER(&ring_p->lock);
+	(void) nxge_txdma_reclaim(nxgep, ring_p, 0);
+	ring_p->rd_index = 0;
+	ring_p->wr_index = 0;
+	ring_p->ring_head.value = 0;
+	ring_p->ring_kick_tail.value = 0;
+	ring_p->descs_pending = 0;
+	MUTEX_EXIT(&ring_p->lock);
+
+	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fixup_channel"));
+}
+
+/*ARGSUSED*/
+void
+nxge_txdma_hw_kick(p_nxge_t nxgep)
+{
+	int			index, ndmas;
+	uint16_t		channel;
+	p_tx_rings_t 		tx_rings;
+
+	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_hw_kick"));
+
+	tx_rings = nxgep->tx_rings;
+	if (tx_rings == NULL) {
+		NXGE_DEBUG_MSG((nxgep, TX_CTL,
+			"<== nxge_txdma_hw_kick: NULL ring pointer"));
+		return;
+	}
+
+	ndmas = tx_rings->ndmas;
+	if (!ndmas) {
+		NXGE_DEBUG_MSG((nxgep, TX_CTL,
+			"<== nxge_txdma_hw_kick: no channel allocated"));
+		return;
+	}
+
+	if (tx_rings->rings == NULL) {
+		NXGE_DEBUG_MSG((nxgep, TX_CTL,
+			"<== nxge_txdma_hw_kick: NULL rings pointer"));
+		return;
+	}
+
+	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_kick: "
+		"tx_rings $%p tx_desc_rings $%p ndmas %d",
+		tx_rings, tx_rings->rings, ndmas));
+
+	for (index = 0; index < ndmas; index++) {
+		channel = tx_rings->rings[index]->tdc;
+		NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
+			"==> nxge_txdma_hw_kick: channel %d", channel));
+		nxge_txdma_hw_kick_channel(nxgep, tx_rings->rings[index],
+			channel);
+	}
+
+	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_hw_kick"));
+}
+
+/*ARGSUSED*/
+void
+nxge_txdma_kick_channel(p_nxge_t nxgep, uint16_t channel)
+{
+	p_tx_ring_t	ring_p;
+
+	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_kick_channel"));
+
+	ring_p = nxge_txdma_get_ring(nxgep, channel);
+	if (ring_p == NULL) {
+		NXGE_DEBUG_MSG((nxgep, TX_CTL,
+			    " nxge_txdma_kick_channel"));
+		return;
+	}
+
+	if (ring_p->tdc != channel) {
+		NXGE_DEBUG_MSG((nxgep, TX_CTL,
+			"<== nxge_txdma_kick_channel: channel not matched "
+			"ring tdc %d passed channel",
+			ring_p->tdc, channel));
+		return;
+	}
+
+	nxge_txdma_hw_kick_channel(nxgep, ring_p, channel);
+
+	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_kick_channel"));
+}
+
+/*ARGSUSED*/
+void
+nxge_txdma_hw_kick_channel(p_nxge_t nxgep, p_tx_ring_t ring_p, uint16_t channel)
+{
+
+	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_hw_kick_channel"));
+
+	if (ring_p == NULL) {
+		NXGE_DEBUG_MSG((nxgep, TX_CTL,
+			"<== nxge_txdma_hw_kick_channel: NULL ring pointer"));
+		return;
+	}
+
+	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_hw_kick_channel"));
+}
+
+/*ARGSUSED*/
+void
+nxge_check_tx_hang(p_nxge_t nxgep)
+{
+
+	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_check_tx_hang"));
+
+	/*
+	 * Needs inputs from hardware for regs:
+	 *	head index had not moved since last timeout.
+	 *	packets not transmitted or stuffed registers.
+	 */
+	if (nxge_txdma_hung(nxgep)) {
+		nxge_fixup_hung_txdma_rings(nxgep);
+	}
+	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_check_tx_hang"));
+}
+
+int
+nxge_txdma_hung(p_nxge_t nxgep)
+{
+	int			index, ndmas;
+	uint16_t		channel;
+	p_tx_rings_t 		tx_rings;
+	p_tx_ring_t 		tx_ring_p;
+
+	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_hung"));
+	tx_rings = nxgep->tx_rings;
+	if (tx_rings == NULL) {
+		NXGE_DEBUG_MSG((nxgep, TX_CTL,
+			"<== nxge_txdma_hung: NULL ring pointer"));
+		return (B_FALSE);
+	}
+
+	ndmas = tx_rings->ndmas;
+	if (!ndmas) {
+		NXGE_DEBUG_MSG((nxgep, TX_CTL,
+			"<== nxge_txdma_hung: no channel "
+			"allocated"));
+		return (B_FALSE);
+	}
+
+	if (tx_rings->rings == NULL) {
+		NXGE_DEBUG_MSG((nxgep, TX_CTL,
+			"<== nxge_txdma_hung: NULL rings pointer"));
+		return (B_FALSE);
+	}
+
+	for (index = 0; index < ndmas; index++) {
+		channel = tx_rings->rings[index]->tdc;
+		tx_ring_p = tx_rings->rings[index];
+		NXGE_DEBUG_MSG((nxgep, TX_CTL,
+			"==> nxge_txdma_hung: channel %d", channel));
+		if (nxge_txdma_channel_hung(nxgep, tx_ring_p, channel)) {
+			return (B_TRUE);
+		}
+	}
+
+	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_hung"));
+
+	return (B_FALSE);
+}
+
+int
+nxge_txdma_channel_hung(p_nxge_t nxgep, p_tx_ring_t tx_ring_p, uint16_t channel)
+{
+	uint16_t		head_index, tail_index;
+	boolean_t		head_wrap, tail_wrap;
+	npi_handle_t		handle;
+	tx_ring_hdl_t		tx_head;
+	uint_t			tx_rd_index;
+
+	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_channel_hung"));
+
+	handle = NXGE_DEV_NPI_HANDLE(nxgep);
+	NXGE_DEBUG_MSG((nxgep, TX_CTL,
+		"==> nxge_txdma_channel_hung: channel %d", channel));
+	MUTEX_ENTER(&tx_ring_p->lock);
+	(void) nxge_txdma_reclaim(nxgep, tx_ring_p, 0);
+
+	tail_index = tx_ring_p->wr_index;
+	tail_wrap = tx_ring_p->wr_index_wrap;
+	tx_rd_index = tx_ring_p->rd_index;
+	MUTEX_EXIT(&tx_ring_p->lock);
+
+	NXGE_DEBUG_MSG((nxgep, TX_CTL,
+		"==> nxge_txdma_channel_hung: tdc %d tx_rd_index %d "
+		"tail_index %d tail_wrap %d ",
+		channel, tx_rd_index, tail_index, tail_wrap));
+	/*
+	 * Read the hardware maintained transmit head
+	 * and wrap around bit.
+	 */
+	(void) npi_txdma_ring_head_get(handle, channel, &tx_head);
+	head_index =  tx_head.bits.ldw.head;
+	head_wrap = tx_head.bits.ldw.wrap;
+	NXGE_DEBUG_MSG((nxgep, TX_CTL,
+		"==> nxge_txdma_channel_hung: "
+		"tx_rd_index %d tail %d tail_wrap %d "
+		"head %d wrap %d",
+		tx_rd_index, tail_index, tail_wrap,
+		head_index, head_wrap));
+
+	if (TXDMA_RING_EMPTY(head_index, head_wrap,
+			tail_index, tail_wrap) &&
+			(head_index == tx_rd_index)) {
+		NXGE_DEBUG_MSG((nxgep, TX_CTL,
+			"==> nxge_txdma_channel_hung: EMPTY"));
+		return (B_FALSE);
+	}
+
+	NXGE_DEBUG_MSG((nxgep, TX_CTL,
+		"==> nxge_txdma_channel_hung: Checking if ring full"));
+	if (TXDMA_RING_FULL(head_index, head_wrap, tail_index,
+			tail_wrap)) {
+		NXGE_DEBUG_MSG((nxgep, TX_CTL,
+			"==> nxge_txdma_channel_hung: full"));
+		return (B_TRUE);
+	}
+
+	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_channel_hung"));
+
+	return (B_FALSE);
+}
+
+/*ARGSUSED*/
+void
+nxge_fixup_hung_txdma_rings(p_nxge_t nxgep)
+{
+	int			index, ndmas;
+	uint16_t		channel;
+	p_tx_rings_t 		tx_rings;
+
+	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_fixup_hung_txdma_rings"));
+	tx_rings = nxgep->tx_rings;
+	if (tx_rings == NULL) {
+		NXGE_DEBUG_MSG((nxgep, TX_CTL,
+			"<== nxge_fixup_hung_txdma_rings: NULL ring pointer"));
+		return;
+	}
+
+	ndmas = tx_rings->ndmas;
+	if (!ndmas) {
+		NXGE_DEBUG_MSG((nxgep, TX_CTL,
+			"<== nxge_fixup_hung_txdma_rings: no channel "
+			"allocated"));
+		return;
+	}
+
+	if (tx_rings->rings == NULL) {
+		NXGE_DEBUG_MSG((nxgep, TX_CTL,
+			"<== nxge_fixup_hung_txdma_rings: NULL rings pointer"));
+		return;
+	}
+
+	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_fixup_hung_txdma_rings: "
+		"tx_rings $%p tx_desc_rings $%p ndmas %d",
+		tx_rings, tx_rings->rings, ndmas));
+
+	for (index = 0; index < ndmas; index++) {
+		channel = tx_rings->rings[index]->tdc;
+		NXGE_DEBUG_MSG((nxgep, TX_CTL,
+			"==> nxge_fixup_hung_txdma_rings: channel %d",
+			channel));
+
+		nxge_txdma_fixup_hung_channel(nxgep, tx_rings->rings[index],
+			channel);
+	}
+
+	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_fixup_hung_txdma_rings"));
+}
+
+/*ARGSUSED*/
+void
+nxge_txdma_fix_hung_channel(p_nxge_t nxgep, uint16_t channel)
+{
+	p_tx_ring_t	ring_p;
+
+	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fix_hung_channel"));
+	ring_p = nxge_txdma_get_ring(nxgep, channel);
+	if (ring_p == NULL) {
+		NXGE_DEBUG_MSG((nxgep, TX_CTL,
+			"<== nxge_txdma_fix_hung_channel"));
+		return;
+	}
+
+	if (ring_p->tdc != channel) {
+		NXGE_DEBUG_MSG((nxgep, TX_CTL,
+			"<== nxge_txdma_fix_hung_channel: channel not matched "
+			"ring tdc %d passed channel",
+			ring_p->tdc, channel));
+		return;
+	}
+
+	nxge_txdma_fixup_channel(nxgep, ring_p, channel);
+
+	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fix_hung_channel"));
+}
+
+/*ARGSUSED*/
+void
+nxge_txdma_fixup_hung_channel(p_nxge_t nxgep, p_tx_ring_t ring_p,
+	uint16_t channel)
+{
+	npi_handle_t		handle;
+	tdmc_intr_dbg_t		intr_dbg;
+	int			status = NXGE_OK;
+
+	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fixup_hung_channel"));
+
+	if (ring_p == NULL) {
+		NXGE_DEBUG_MSG((nxgep, TX_CTL,
+			"<== nxge_txdma_fixup_channel: NULL ring pointer"));
+		return;
+	}
+
+	if (ring_p->tdc != channel) {
+		NXGE_DEBUG_MSG((nxgep, TX_CTL,
+			"<== nxge_txdma_fixup_hung_channel: channel "
+			"not matched "
+			"ring tdc %d passed channel",
+			ring_p->tdc, channel));
+		return;
+	}
+
+	/* Reclaim descriptors */
+	MUTEX_ENTER(&ring_p->lock);
+	(void) nxge_txdma_reclaim(nxgep, ring_p, 0);
+	MUTEX_EXIT(&ring_p->lock);
+
+	handle = NXGE_DEV_NPI_HANDLE(nxgep);
+	/*
+	 * Stop the dma channel waits for the stop done.
+	 * If the stop done bit is not set, then force
+	 * an error.
+	 */
+	status = npi_txdma_channel_disable(handle, channel);
+	if (!(status & NPI_TXDMA_STOP_FAILED)) {
+		NXGE_DEBUG_MSG((nxgep, TX_CTL,
+			"<== nxge_txdma_fixup_hung_channel: stopped OK "
+			"ring tdc %d passed channel %d",
+			ring_p->tdc, channel));
+		return;
+	}
+
+	/* Inject any error */
+	intr_dbg.value = 0;
+	intr_dbg.bits.ldw.nack_pref = 1;
+	(void) npi_txdma_inj_int_error_set(handle, channel, &intr_dbg);
+
+	/* Stop done bit will be set as a result of error injection */
+	status = npi_txdma_channel_disable(handle, channel);
+	if (!(status & NPI_TXDMA_STOP_FAILED)) {
+		NXGE_DEBUG_MSG((nxgep, TX_CTL,
+			"<== nxge_txdma_fixup_hung_channel: stopped again"
+			"ring tdc %d passed channel",
+			ring_p->tdc, channel));
+		return;
+	}
+
+	NXGE_DEBUG_MSG((nxgep, TX_CTL,
+		"<== nxge_txdma_fixup_hung_channel: stop done still not set!! "
+		"ring tdc %d passed channel",
+		ring_p->tdc, channel));
+
+	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fixup_hung_channel"));
+}
+
+/*ARGSUSED*/
+void
+nxge_reclaim_rings(p_nxge_t nxgep)
+{
+	int			index, ndmas;
+	uint16_t		channel;
+	p_tx_rings_t 		tx_rings;
+	p_tx_ring_t 		tx_ring_p;
+
+	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_reclaim_ring"));
+	tx_rings = nxgep->tx_rings;
+	if (tx_rings == NULL) {
+		NXGE_DEBUG_MSG((nxgep, TX_CTL,
+			"<== nxge_reclain_rimgs: NULL ring pointer"));
+		return;
+	}
+
+	ndmas = tx_rings->ndmas;
+	if (!ndmas) {
+		NXGE_DEBUG_MSG((nxgep, TX_CTL,
+			"<== nxge_reclain_rimgs: no channel "
+			"allocated"));
+		return;
+	}
+
+	if (tx_rings->rings == NULL) {
+		NXGE_DEBUG_MSG((nxgep, TX_CTL,
+			"<== nxge_reclain_rimgs: NULL rings pointer"));
+		return;
+	}
+
+	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_reclain_rimgs: "
+		"tx_rings $%p tx_desc_rings $%p ndmas %d",
+		tx_rings, tx_rings->rings, ndmas));
+
+	for (index = 0; index < ndmas; index++) {
+		channel = tx_rings->rings[index]->tdc;
+		NXGE_DEBUG_MSG((nxgep, TX_CTL,
+			"==> reclain_rimgs: channel %d",
+			channel));
+		tx_ring_p = tx_rings->rings[index];
+		MUTEX_ENTER(&tx_ring_p->lock);
+		(void) nxge_txdma_reclaim(nxgep, tx_ring_p, channel);
+		MUTEX_EXIT(&tx_ring_p->lock);
+	}
+
+	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_reclaim_rings"));
+}
+
+void
+nxge_txdma_regs_dump_channels(p_nxge_t nxgep)
+{
+	int			index, ndmas;
+	uint16_t		channel;
+	p_tx_rings_t 		tx_rings;
+	npi_handle_t		handle;
+
+	NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_txdma_regs_dump_channels"));
+
+	handle = NXGE_DEV_NPI_HANDLE(nxgep);
+	(void) npi_txdma_dump_fzc_regs(handle);
+
+	tx_rings = nxgep->tx_rings;
+	if (tx_rings == NULL) {
+		NXGE_DEBUG_MSG((nxgep, TX_CTL,
+			"<== nxge_txdma_regs_dump_channels: NULL ring"));
+		return;
+	}
+
+	ndmas = tx_rings->ndmas;
+	if (!ndmas) {
+		NXGE_DEBUG_MSG((nxgep, TX_CTL,
+			"<== nxge_txdma_regs_dump_channels: "
+			"no channel allocated"));
+		return;
+	}
+
+	if (tx_rings->rings == NULL) {
+		NXGE_DEBUG_MSG((nxgep, TX_CTL,
+			"<== nxge_txdma_regs_dump_channels: NULL rings"));
+		return;
+	}
+
+	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_regs_dump_channels: "
+		"tx_rings $%p tx_desc_rings $%p ndmas %d",
+		tx_rings, tx_rings->rings, ndmas));
+
+	for (index = 0; index < ndmas; index++) {
+		channel = tx_rings->rings[index]->tdc;
+		NXGE_DEBUG_MSG((nxgep, TX_CTL,
+			"==> nxge_txdma_regs_dump_channels: channel %d",
+			channel));
+		(void) npi_txdma_dump_tdc_regs(handle, channel);
+	}
+
+	/* Dump TXC registers */
+	(void) npi_txc_dump_fzc_regs(handle);
+	(void) npi_txc_dump_port_fzc_regs(handle, nxgep->function_num);
+
+	for (index = 0; index < ndmas; index++) {
+		channel = tx_rings->rings[index]->tdc;
+		NXGE_DEBUG_MSG((nxgep, TX_CTL,
+			"==> nxge_txdma_regs_dump_channels: channel %d",
+			channel));
+		(void) npi_txc_dump_tdc_fzc_regs(handle, channel);
+	}
+
+	for (index = 0; index < ndmas; index++) {
+		channel = tx_rings->rings[index]->tdc;
+		NXGE_DEBUG_MSG((nxgep, TX_CTL,
+			"==> nxge_txdma_regs_dump_channels: channel %d",
+			channel));
+		nxge_txdma_regs_dump(nxgep, channel);
+	}
+
+	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_regs_dump"));
+
+}
+
+void
+nxge_txdma_regs_dump(p_nxge_t nxgep, int channel)
+{
+	npi_handle_t		handle;
+	tx_ring_hdl_t 		hdl;
+	tx_ring_kick_t 		kick;
+	tx_cs_t 		cs;
+	txc_control_t		control;
+	uint32_t		bitmap = 0;
+	uint32_t		burst = 0;
+	uint32_t		bytes = 0;
+	dma_log_page_t		cfg;
+
+	printf("\n\tfunc # %d tdc %d ",
+		nxgep->function_num, channel);
+	cfg.page_num = 0;
+	handle = NXGE_DEV_NPI_HANDLE(nxgep);
+	(void) npi_txdma_log_page_get(handle, channel, &cfg);
+	printf("\n\tlog page func %d valid page 0 %d",
+		cfg.func_num, cfg.valid);
+	cfg.page_num = 1;
+	(void) npi_txdma_log_page_get(handle, channel, &cfg);
+	printf("\n\tlog page func %d valid page 1 %d",
+		cfg.func_num, cfg.valid);
+
+	(void) npi_txdma_ring_head_get(handle, channel, &hdl);
+	(void) npi_txdma_desc_kick_reg_get(handle, channel, &kick);
+	printf("\n\thead value is 0x%0llx",
+		(long long)hdl.value);
+	printf("\n\thead index %d", hdl.bits.ldw.head);
+	printf("\n\tkick value is 0x%0llx",
+		(long long)kick.value);
+	printf("\n\ttail index %d\n", kick.bits.ldw.tail);
+
+	(void) npi_txdma_control_status(handle, OP_GET, channel, &cs);
+	printf("\n\tControl statue is 0x%0llx", (long long)cs.value);
+	printf("\n\tControl status RST state %d", cs.bits.ldw.rst);
+
+	(void) npi_txc_control(handle, OP_GET, &control);
+	(void) npi_txc_port_dma_list_get(handle, nxgep->function_num, &bitmap);
+	(void) npi_txc_dma_max_burst(handle, OP_GET, channel, &burst);
+	(void) npi_txc_dma_bytes_transmitted(handle, channel, &bytes);
+
+	printf("\n\tTXC port control 0x%0llx",
+		(long long)control.value);
+	printf("\n\tTXC port bitmap 0x%x", bitmap);
+	printf("\n\tTXC max burst %d", burst);
+	printf("\n\tTXC bytes xmt %d\n", bytes);
+
+	{
+		ipp_status_t status;
+
+		(void) npi_ipp_get_status(handle, nxgep->function_num, &status);
+		printf("\n\tIPP status 0x%lux\n", (uint64_t)status.value);
+	}
+}
+
+/*
+ * Static functions start here.
+ */
+static nxge_status_t
+nxge_map_txdma(p_nxge_t nxgep)
+{
+	int			i, ndmas;
+	uint16_t		channel;
+	p_tx_rings_t 		tx_rings;
+	p_tx_ring_t 		*tx_desc_rings;
+	p_tx_mbox_areas_t 	tx_mbox_areas_p;
+	p_tx_mbox_t		*tx_mbox_p;
+	p_nxge_dma_pool_t	dma_buf_poolp;
+	p_nxge_dma_pool_t	dma_cntl_poolp;
+	p_nxge_dma_common_t	*dma_buf_p;
+	p_nxge_dma_common_t	*dma_cntl_p;
+	nxge_status_t		status = NXGE_OK;
+#if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
+	p_nxge_dma_common_t	t_dma_buf_p;
+	p_nxge_dma_common_t	t_dma_cntl_p;
+#endif
+
+	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma"));
+
+	dma_buf_poolp = nxgep->tx_buf_pool_p;
+	dma_cntl_poolp = nxgep->tx_cntl_pool_p;
+
+	if (!dma_buf_poolp->buf_allocated || !dma_cntl_poolp->buf_allocated) {
+		NXGE_DEBUG_MSG((nxgep, TX_CTL,
+			"==> nxge_map_txdma: buf not allocated"));
+		return (NXGE_ERROR);
+	}
+
+	ndmas = dma_buf_poolp->ndmas;
+	if (!ndmas) {
+		NXGE_DEBUG_MSG((nxgep, TX_CTL,
+			"<== nxge_map_txdma: no dma allocated"));
+		return (NXGE_ERROR);
+	}
+
+	dma_buf_p = dma_buf_poolp->dma_buf_pool_p;
+	dma_cntl_p = dma_cntl_poolp->dma_buf_pool_p;
+
+	tx_rings = (p_tx_rings_t)
+			KMEM_ZALLOC(sizeof (tx_rings_t), KM_SLEEP);
+	tx_desc_rings = (p_tx_ring_t *)KMEM_ZALLOC(
+			sizeof (p_tx_ring_t) * ndmas, KM_SLEEP);
+
+	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma: "
+		"tx_rings $%p tx_desc_rings $%p",
+		tx_rings, tx_desc_rings));
+
+	tx_mbox_areas_p = (p_tx_mbox_areas_t)
+			KMEM_ZALLOC(sizeof (tx_mbox_areas_t), KM_SLEEP);
+	tx_mbox_p = (p_tx_mbox_t *)KMEM_ZALLOC(
+			sizeof (p_tx_mbox_t) * ndmas, KM_SLEEP);
+
+	/*
+	 * Map descriptors from the buffer pools for each dma channel.
+	 */
+	for (i = 0; i < ndmas; i++) {
+		/*
+		 * Set up and prepare buffer blocks, descriptors
+		 * and mailbox.
+		 */
+		channel = ((p_nxge_dma_common_t)dma_buf_p[i])->dma_channel;
+		status = nxge_map_txdma_channel(nxgep, channel,
+				(p_nxge_dma_common_t *)&dma_buf_p[i],
+				(p_tx_ring_t *)&tx_desc_rings[i],
+				dma_buf_poolp->num_chunks[i],
+				(p_nxge_dma_common_t *)&dma_cntl_p[i],
+				(p_tx_mbox_t *)&tx_mbox_p[i]);
+		if (status != NXGE_OK) {
+			goto nxge_map_txdma_fail1;
+		}
+		tx_desc_rings[i]->index = (uint16_t)i;
+		tx_desc_rings[i]->tdc_stats = &nxgep->statsp->tdc_stats[i];
+
+#if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
+		if (nxgep->niu_type == N2_NIU && NXGE_DMA_BLOCK == 1) {
+			tx_desc_rings[i]->hv_set = B_FALSE;
+			t_dma_buf_p = (p_nxge_dma_common_t)dma_buf_p[i];
+			t_dma_cntl_p = (p_nxge_dma_common_t)dma_cntl_p[i];
+
+			tx_desc_rings[i]->hv_tx_buf_base_ioaddr_pp =
+				(uint64_t)t_dma_buf_p->orig_ioaddr_pp;
+			tx_desc_rings[i]->hv_tx_buf_ioaddr_size =
+				(uint64_t)t_dma_buf_p->orig_alength;
+
+			NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
+				"==> nxge_map_txdma_channel: "
+				"hv data buf base io $%p "
+				"size 0x%llx (%d) "
+				"buf base io $%p "
+				"orig vatopa base io $%p "
+				"orig_len 0x%llx (%d)",
+				tx_desc_rings[i]->hv_tx_buf_base_ioaddr_pp,
+				tx_desc_rings[i]->hv_tx_buf_ioaddr_size,
+				tx_desc_rings[i]->hv_tx_buf_ioaddr_size,
+				t_dma_buf_p->ioaddr_pp,
+				t_dma_buf_p->orig_vatopa,
+				t_dma_buf_p->orig_alength,
+				t_dma_buf_p->orig_alength));
+
+			tx_desc_rings[i]->hv_tx_cntl_base_ioaddr_pp =
+				(uint64_t)t_dma_cntl_p->orig_ioaddr_pp;
+			tx_desc_rings[i]->hv_tx_cntl_ioaddr_size =
+				(uint64_t)t_dma_cntl_p->orig_alength;
+
+			NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
+				"==> nxge_map_txdma_channel: "
+				"hv cntl base io $%p "
+				"orig ioaddr_pp ($%p) "
+				"orig vatopa ($%p) "
+				"size 0x%llx (%d 0x%x)",
+				tx_desc_rings[i]->hv_tx_cntl_base_ioaddr_pp,
+				t_dma_cntl_p->orig_ioaddr_pp,
+				t_dma_cntl_p->orig_vatopa,
+				tx_desc_rings[i]->hv_tx_cntl_ioaddr_size,
+				t_dma_cntl_p->orig_alength,
+				t_dma_cntl_p->orig_alength));
+		}
+#endif
+	}
+
+	tx_rings->ndmas = ndmas;
+	tx_rings->rings = tx_desc_rings;
+	nxgep->tx_rings = tx_rings;
+	tx_mbox_areas_p->txmbox_areas_p = tx_mbox_p;
+	nxgep->tx_mbox_areas_p = tx_mbox_areas_p;
+
+	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma: "
+		"tx_rings $%p rings $%p",
+		nxgep->tx_rings, nxgep->tx_rings->rings));
+	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma: "
+		"tx_rings $%p tx_desc_rings $%p",
+		nxgep->tx_rings, tx_desc_rings));
+
+	goto nxge_map_txdma_exit;
+
+nxge_map_txdma_fail1:
+	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
+		"==> nxge_map_txdma: uninit tx desc "
+		"(status 0x%x channel %d i %d)",
+		nxgep, status, channel, i));
+	i--;
+	for (; i >= 0; i--) {
+		channel = ((p_nxge_dma_common_t)dma_buf_p[i])->dma_channel;
+		nxge_unmap_txdma_channel(nxgep, channel,
+			tx_desc_rings[i],
+			tx_mbox_p[i]);
+	}
+
+	KMEM_FREE(tx_desc_rings, sizeof (p_tx_ring_t) * ndmas);
+	KMEM_FREE(tx_rings, sizeof (tx_rings_t));
+	KMEM_FREE(tx_mbox_p, sizeof (p_tx_mbox_t) * ndmas);
+	KMEM_FREE(tx_mbox_areas_p, sizeof (tx_mbox_areas_t));
+
+nxge_map_txdma_exit:
+	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
+		"==> nxge_map_txdma: "
+		"(status 0x%x channel %d)",
+		status, channel));
+
+	return (status);
+}
+
+static void
+nxge_unmap_txdma(p_nxge_t nxgep)
+{
+	int			i, ndmas;
+	uint8_t			channel;
+	p_tx_rings_t 		tx_rings;
+	p_tx_ring_t 		*tx_desc_rings;
+	p_tx_mbox_areas_t 	tx_mbox_areas_p;
+	p_tx_mbox_t		*tx_mbox_p;
+	p_nxge_dma_pool_t	dma_buf_poolp;
+
+	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_unmap_txdma"));
+
+	dma_buf_poolp = nxgep->tx_buf_pool_p;
+	if (!dma_buf_poolp->buf_allocated) {
+		NXGE_DEBUG_MSG((nxgep, TX_CTL,
+			"==> nxge_unmap_txdma: buf not allocated"));
+		return;
+	}
+
+	ndmas = dma_buf_poolp->ndmas;
+	if (!ndmas) {
+		NXGE_DEBUG_MSG((nxgep, TX_CTL,
+			"<== nxge_unmap_txdma: no dma allocated"));
+		return;
+	}
+
+	tx_rings = nxgep->tx_rings;
+	tx_desc_rings = tx_rings->rings;
+	if (tx_rings == NULL) {
+		NXGE_DEBUG_MSG((nxgep, TX_CTL,
+			"<== nxge_unmap_txdma: NULL ring pointer"));
+		return;
+	}
+
+	tx_desc_rings = tx_rings->rings;
+	if (tx_desc_rings == NULL) {
+		NXGE_DEBUG_MSG((nxgep, TX_CTL,
+			"<== nxge_unmap_txdma: NULL ring pointers"));
+		return;
+	}
+
+	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_unmap_txdma: "
+		"tx_rings $%p tx_desc_rings $%p ndmas %d",
+		tx_rings, tx_desc_rings, ndmas));
+
+	tx_mbox_areas_p = nxgep->tx_mbox_areas_p;
+	tx_mbox_p = tx_mbox_areas_p->txmbox_areas_p;
+
+	for (i = 0; i < ndmas; i++) {
+		channel = tx_desc_rings[i]->tdc;
+		(void) nxge_unmap_txdma_channel(nxgep, channel,
+				(p_tx_ring_t)tx_desc_rings[i],
+				(p_tx_mbox_t)tx_mbox_p[i]);
+	}
+
+	KMEM_FREE(tx_desc_rings, sizeof (p_tx_ring_t) * ndmas);
+	KMEM_FREE(tx_rings, sizeof (tx_rings_t));
+	KMEM_FREE(tx_mbox_p, sizeof (p_tx_mbox_t) * ndmas);
+	KMEM_FREE(tx_mbox_areas_p, sizeof (tx_mbox_areas_t));
+
+	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
+		"<== nxge_unmap_txdma"));
+}
+
+static nxge_status_t
+nxge_map_txdma_channel(p_nxge_t nxgep, uint16_t channel,
+	p_nxge_dma_common_t *dma_buf_p,
+	p_tx_ring_t *tx_desc_p,
+	uint32_t num_chunks,
+	p_nxge_dma_common_t *dma_cntl_p,
+	p_tx_mbox_t *tx_mbox_p)
+{
+	int	status = NXGE_OK;
+
+	/*
+	 * Set up and prepare buffer blocks, descriptors
+	 * and mailbox.
+	 */
+	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
+		"==> nxge_map_txdma_channel (channel %d)", channel));
+	/*
+	 * Transmit buffer blocks
+	 */
+	status = nxge_map_txdma_channel_buf_ring(nxgep, channel,
+			dma_buf_p, tx_desc_p, num_chunks);
+	if (status != NXGE_OK) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"==> nxge_map_txdma_channel (channel %d): "
+			"map buffer failed 0x%x", channel, status));
+		goto nxge_map_txdma_channel_exit;
+	}
+
+	/*
+	 * Transmit block ring, and mailbox.
+	 */
+	nxge_map_txdma_channel_cfg_ring(nxgep, channel, dma_cntl_p, *tx_desc_p,
+					tx_mbox_p);
+
+	goto nxge_map_txdma_channel_exit;
+
+nxge_map_txdma_channel_fail1:
+	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
+		"==> nxge_map_txdma_channel: unmap buf"
+		"(status 0x%x channel %d)",
+		status, channel));
+	nxge_unmap_txdma_channel_buf_ring(nxgep, *tx_desc_p);
+
+nxge_map_txdma_channel_exit:
+	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
+		"<== nxge_map_txdma_channel: "
+		"(status 0x%x channel %d)",
+		status, channel));
+
+	return (status);
+}
+
+/*ARGSUSED*/
+static void
+nxge_unmap_txdma_channel(p_nxge_t nxgep, uint16_t channel,
+	p_tx_ring_t tx_ring_p,
+	p_tx_mbox_t tx_mbox_p)
+{
+	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
+		"==> nxge_unmap_txdma_channel (channel %d)", channel));
+	/*
+	 * unmap tx block ring, and mailbox.
+	 */
+	(void) nxge_unmap_txdma_channel_cfg_ring(nxgep,
+			tx_ring_p, tx_mbox_p);
+
+	/* unmap buffer blocks */
+	(void) nxge_unmap_txdma_channel_buf_ring(nxgep, tx_ring_p);
+
+	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_unmap_txdma_channel"));
+}
+
+/*ARGSUSED*/
+static void
+nxge_map_txdma_channel_cfg_ring(p_nxge_t nxgep, uint16_t dma_channel,
+	p_nxge_dma_common_t *dma_cntl_p,
+	p_tx_ring_t tx_ring_p,
+	p_tx_mbox_t *tx_mbox_p)
+{
+	p_tx_mbox_t 		mboxp;
+	p_nxge_dma_common_t 	cntl_dmap;
+	p_nxge_dma_common_t 	dmap;
+	p_tx_rng_cfig_t		tx_ring_cfig_p;
+	p_tx_ring_kick_t	tx_ring_kick_p;
+	p_tx_cs_t		tx_cs_p;
+	p_tx_dma_ent_msk_t	tx_evmask_p;
+	p_txdma_mbh_t		mboxh_p;
+	p_txdma_mbl_t		mboxl_p;
+	uint64_t		tx_desc_len;
+
+	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
+		"==> nxge_map_txdma_channel_cfg_ring"));
+
+	cntl_dmap = *dma_cntl_p;
+
+	dmap = (p_nxge_dma_common_t)&tx_ring_p->tdc_desc;
+	nxge_setup_dma_common(dmap, cntl_dmap, tx_ring_p->tx_ring_size,
+			sizeof (tx_desc_t));
+	/*
+	 * Zero out transmit ring descriptors.
+	 */
+	bzero((caddr_t)dmap->kaddrp, dmap->alength);
+	tx_ring_cfig_p = &(tx_ring_p->tx_ring_cfig);
+	tx_ring_kick_p = &(tx_ring_p->tx_ring_kick);
+	tx_cs_p = &(tx_ring_p->tx_cs);
+	tx_evmask_p = &(tx_ring_p->tx_evmask);
+	tx_ring_cfig_p->value = 0;
+	tx_ring_kick_p->value = 0;
+	tx_cs_p->value = 0;
+	tx_evmask_p->value = 0;
+
+	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
+		"==> nxge_map_txdma_channel_cfg_ring: channel %d des $%p",
+		dma_channel,
+		dmap->dma_cookie.dmac_laddress));
+
+	tx_ring_cfig_p->value = 0;
+	tx_desc_len = (uint64_t)(tx_ring_p->tx_ring_size >> 3);
+	tx_ring_cfig_p->value =
+		(dmap->dma_cookie.dmac_laddress & TX_RNG_CFIG_ADDR_MASK) |
+		(tx_desc_len << TX_RNG_CFIG_LEN_SHIFT);
+
+	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
+		"==> nxge_map_txdma_channel_cfg_ring: channel %d cfg 0x%llx",
+		dma_channel,
+		tx_ring_cfig_p->value));
+
+	tx_cs_p->bits.ldw.rst = 1;
+
+	/* Map in mailbox */
+	mboxp = (p_tx_mbox_t)
+		KMEM_ZALLOC(sizeof (tx_mbox_t), KM_SLEEP);
+	dmap = (p_nxge_dma_common_t)&mboxp->tx_mbox;
+	nxge_setup_dma_common(dmap, cntl_dmap, 1, sizeof (txdma_mailbox_t));
+	mboxh_p = (p_txdma_mbh_t)&tx_ring_p->tx_mbox_mbh;
+	mboxl_p = (p_txdma_mbl_t)&tx_ring_p->tx_mbox_mbl;
+	mboxh_p->value = mboxl_p->value = 0;
+
+	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
+		"==> nxge_map_txdma_channel_cfg_ring: mbox 0x%lx",
+		dmap->dma_cookie.dmac_laddress));
+
+	mboxh_p->bits.ldw.mbaddr = ((dmap->dma_cookie.dmac_laddress >>
+				TXDMA_MBH_ADDR_SHIFT) & TXDMA_MBH_MASK);
+
+	mboxl_p->bits.ldw.mbaddr = ((dmap->dma_cookie.dmac_laddress &
+				TXDMA_MBL_MASK) >> TXDMA_MBL_SHIFT);
+
+	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
+		"==> nxge_map_txdma_channel_cfg_ring: mbox 0x%lx",
+		dmap->dma_cookie.dmac_laddress));
+	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
+		"==> nxge_map_txdma_channel_cfg_ring: hmbox $%p "
+		"mbox $%p",
+		mboxh_p->bits.ldw.mbaddr, mboxl_p->bits.ldw.mbaddr));
+	tx_ring_p->page_valid.value = 0;
+	tx_ring_p->page_mask_1.value = tx_ring_p->page_mask_2.value = 0;
+	tx_ring_p->page_value_1.value = tx_ring_p->page_value_2.value = 0;
+	tx_ring_p->page_reloc_1.value = tx_ring_p->page_reloc_2.value = 0;
+	tx_ring_p->page_hdl.value = 0;
+
+	tx_ring_p->page_valid.bits.ldw.page0 = 1;
+	tx_ring_p->page_valid.bits.ldw.page1 = 1;
+
+	tx_ring_p->max_burst.value = 0;
+	tx_ring_p->max_burst.bits.ldw.dma_max_burst = TXC_DMA_MAX_BURST_DEFAULT;
+
+	*tx_mbox_p = mboxp;
+
+	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
+				"<== nxge_map_txdma_channel_cfg_ring"));
+}
+
+/*ARGSUSED*/
+static void
+nxge_unmap_txdma_channel_cfg_ring(p_nxge_t nxgep,
+	p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p)
+{
+	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
+		"==> nxge_unmap_txdma_channel_cfg_ring: channel %d",
+		tx_ring_p->tdc));
+
+	KMEM_FREE(tx_mbox_p, sizeof (tx_mbox_t));
+
+	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
+		"<== nxge_unmap_txdma_channel_cfg_ring"));
+}
+
+static nxge_status_t
+nxge_map_txdma_channel_buf_ring(p_nxge_t nxgep, uint16_t channel,
+	p_nxge_dma_common_t *dma_buf_p,
+	p_tx_ring_t *tx_desc_p, uint32_t num_chunks)
+{
+	p_nxge_dma_common_t 	dma_bufp, tmp_bufp;
+	p_nxge_dma_common_t 	dmap;
+	nxge_os_dma_handle_t	tx_buf_dma_handle;
+	p_tx_ring_t 		tx_ring_p;
+	p_tx_msg_t 		tx_msg_ring;
+	nxge_status_t		status = NXGE_OK;
+	int			ddi_status = DDI_SUCCESS;
+	int			i, j, index;
+	uint32_t		size, bsize;
+	uint32_t 		nblocks, nmsgs;
+
+	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
+		"==> nxge_map_txdma_channel_buf_ring"));
+
+	dma_bufp = tmp_bufp = *dma_buf_p;
+		NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
+		" nxge_map_txdma_channel_buf_ring: channel %d to map %d "
+		"chunks bufp $%p",
+		channel, num_chunks, dma_bufp));
+
+	nmsgs = 0;
+	for (i = 0; i < num_chunks; i++, tmp_bufp++) {
+		nmsgs += tmp_bufp->nblocks;
+		NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
+			"==> nxge_map_txdma_channel_buf_ring: channel %d "
+			"bufp $%p nblocks %d nmsgs %d",
+			channel, tmp_bufp, tmp_bufp->nblocks, nmsgs));
+	}
+	if (!nmsgs) {
+		NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
+			"<== nxge_map_txdma_channel_buf_ring: channel %d "
+			"no msg blocks",
+			channel));
+		status = NXGE_ERROR;
+		goto nxge_map_txdma_channel_buf_ring_exit;
+	}
+
+	tx_ring_p = (p_tx_ring_t)
+		KMEM_ZALLOC(sizeof (tx_ring_t), KM_SLEEP);
+	MUTEX_INIT(&tx_ring_p->lock, NULL, MUTEX_DRIVER,
+		(void *)nxgep->interrupt_cookie);
+	/*
+	 * Allocate transmit message rings and handles for packets
+	 * not to be copied to premapped buffers.
+	 */
+	size = nmsgs * sizeof (tx_msg_t);
+	tx_msg_ring = KMEM_ZALLOC(size, KM_SLEEP);
+	for (i = 0; i < nmsgs; i++) {
+		ddi_status = ddi_dma_alloc_handle(nxgep->dip, &nxge_tx_dma_attr,
+				DDI_DMA_DONTWAIT, 0,
+				&tx_msg_ring[i].dma_handle);
+		if (ddi_status != DDI_SUCCESS) {
+			status |= NXGE_DDI_FAILED;
+			break;
+		}
+	}
+	if (i < nmsgs) {
+		NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "Allocate handles failed."));
+		goto nxge_map_txdma_channel_buf_ring_fail1;
+	}
+
+	tx_ring_p->tdc = channel;
+	tx_ring_p->tx_msg_ring = tx_msg_ring;
+	tx_ring_p->tx_ring_size = nmsgs;
+	tx_ring_p->num_chunks = num_chunks;
+	if (!nxge_tx_intr_thres) {
+		nxge_tx_intr_thres = tx_ring_p->tx_ring_size/4;
+	}
+	tx_ring_p->tx_wrap_mask = tx_ring_p->tx_ring_size - 1;
+	tx_ring_p->rd_index = 0;
+	tx_ring_p->wr_index = 0;
+	tx_ring_p->ring_head.value = 0;
+	tx_ring_p->ring_kick_tail.value = 0;
+	tx_ring_p->descs_pending = 0;
+
+	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
+		"==> nxge_map_txdma_channel_buf_ring: channel %d "
+		"actual tx desc max %d nmsgs %d "
+		"(config nxge_tx_ring_size %d)",
+		channel, tx_ring_p->tx_ring_size, nmsgs,
+		nxge_tx_ring_size));
+
+	/*
+	 * Map in buffers from the buffer pool.
+	 */
+	index = 0;
+	bsize = dma_bufp->block_size;
+
+	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma_channel_buf_ring: "
+		"dma_bufp $%p tx_rng_p $%p "
+		"tx_msg_rng_p $%p bsize %d",
+		dma_bufp, tx_ring_p, tx_msg_ring, bsize));
+
+	tx_buf_dma_handle = dma_bufp->dma_handle;
+	for (i = 0; i < num_chunks; i++, dma_bufp++) {
+		bsize = dma_bufp->block_size;
+		nblocks = dma_bufp->nblocks;
+		NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
+			"==> nxge_map_txdma_channel_buf_ring: dma chunk %d "
+			"size %d dma_bufp $%p",
+			i, sizeof (nxge_dma_common_t), dma_bufp));
+
+		for (j = 0; j < nblocks; j++) {
+			tx_msg_ring[index].buf_dma_handle = tx_buf_dma_handle;
+			dmap = &tx_msg_ring[index++].buf_dma;
+#ifdef TX_MEM_DEBUG
+			NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
+				"==> nxge_map_txdma_channel_buf_ring: j %d"
+				"dmap $%p", i, dmap));
+#endif
+			nxge_setup_dma_common(dmap, dma_bufp, 1,
+				bsize);
+		}
+	}
+
+	if (i < num_chunks) {
+		goto nxge_map_txdma_channel_buf_ring_fail1;
+	}
+
+	*tx_desc_p = tx_ring_p;
+
+	goto nxge_map_txdma_channel_buf_ring_exit;
+
+nxge_map_txdma_channel_buf_ring_fail1:
+	index--;
+	for (; index >= 0; index--) {
+		if (tx_msg_ring[i].dma_handle != NULL) {
+			ddi_dma_free_handle(&tx_msg_ring[i].dma_handle);
+		}
+	}
+	MUTEX_DESTROY(&tx_ring_p->lock);
+	KMEM_FREE(tx_msg_ring, sizeof (tx_msg_t) * tx_ring_p->tx_ring_size);
+	KMEM_FREE(tx_ring_p, sizeof (tx_ring_t));
+
+nxge_map_txdma_channel_buf_ring_exit:
+	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
+		"<== nxge_map_txdma_channel_buf_ring status 0x%x", status));
+
+	return (status);
+}
+
+/*ARGSUSED*/
+static void
+nxge_unmap_txdma_channel_buf_ring(p_nxge_t nxgep, p_tx_ring_t tx_ring_p)
+{
+	p_tx_msg_t 		tx_msg_ring;
+	p_tx_msg_t 		tx_msg_p;
+	int			i;
+
+	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
+		"==> nxge_unmap_txdma_channel_buf_ring"));
+	if (tx_ring_p == NULL) {
+		NXGE_DEBUG_MSG((nxgep, TX_CTL,
+			"<== nxge_unmap_txdma_channel_buf_ring: NULL ringp"));
+		return;
+	}
+	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
+		"==> nxge_unmap_txdma_channel_buf_ring: channel %d",
+		tx_ring_p->tdc));
+
+	tx_msg_ring = tx_ring_p->tx_msg_ring;
+	for (i = 0; i < tx_ring_p->tx_ring_size; i++) {
+		tx_msg_p = &tx_msg_ring[i];
+		if (tx_msg_p->flags.dma_type == USE_DVMA) {
+			NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
+				"entry = %d",
+				i));
+			(void) dvma_unload(tx_msg_p->dvma_handle,
+				0, -1);
+			tx_msg_p->dvma_handle = NULL;
+			if (tx_ring_p->dvma_wr_index ==
+				tx_ring_p->dvma_wrap_mask) {
+				tx_ring_p->dvma_wr_index = 0;
+			} else {
+				tx_ring_p->dvma_wr_index++;
+			}
+			tx_ring_p->dvma_pending--;
+		} else if (tx_msg_p->flags.dma_type ==
+				USE_DMA) {
+			if (ddi_dma_unbind_handle
+				(tx_msg_p->dma_handle)) {
+				cmn_err(CE_WARN, "!nxge_unmap_tx_bug_ring: "
+					"ddi_dma_unbind_handle "
+					"failed.");
+			}
+		}
+
+		if (tx_msg_p->tx_message != NULL) {
+			freemsg(tx_msg_p->tx_message);
+			tx_msg_p->tx_message = NULL;
+		}
+	}
+
+	for (i = 0; i < tx_ring_p->tx_ring_size; i++) {
+		if (tx_msg_ring[i].dma_handle != NULL) {
+			ddi_dma_free_handle(&tx_msg_ring[i].dma_handle);
+		}
+	}
+
+	MUTEX_DESTROY(&tx_ring_p->lock);
+	KMEM_FREE(tx_msg_ring, sizeof (tx_msg_t) * tx_ring_p->tx_ring_size);
+	KMEM_FREE(tx_ring_p, sizeof (tx_ring_t));
+
+	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
+		"<== nxge_unmap_txdma_channel_buf_ring"));
+}
+
+static nxge_status_t
+nxge_txdma_hw_start(p_nxge_t nxgep)
+{
+	int			i, ndmas;
+	uint16_t		channel;
+	p_tx_rings_t 		tx_rings;
+	p_tx_ring_t 		*tx_desc_rings;
+	p_tx_mbox_areas_t 	tx_mbox_areas_p;
+	p_tx_mbox_t		*tx_mbox_p;
+	nxge_status_t		status = NXGE_OK;
+
+	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start"));
+
+	tx_rings = nxgep->tx_rings;
+	if (tx_rings == NULL) {
+		NXGE_DEBUG_MSG((nxgep, TX_CTL,
+			"<== nxge_txdma_hw_start: NULL ring pointer"));
+		return (NXGE_ERROR);
+	}
+	tx_desc_rings = tx_rings->rings;
+	if (tx_desc_rings == NULL) {
+		NXGE_DEBUG_MSG((nxgep, TX_CTL,
+			"<== nxge_txdma_hw_start: NULL ring pointers"));
+		return (NXGE_ERROR);
+	}
+
+	ndmas = tx_rings->ndmas;
+	if (!ndmas) {
+		NXGE_DEBUG_MSG((nxgep, TX_CTL,
+			"<== nxge_txdma_hw_start: no dma channel allocated"));
+		return (NXGE_ERROR);
+	}
+
+	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start: "
+		"tx_rings $%p tx_desc_rings $%p ndmas %d",
+		tx_rings, tx_desc_rings, ndmas));
+
+	tx_mbox_areas_p = nxgep->tx_mbox_areas_p;
+	tx_mbox_p = tx_mbox_areas_p->txmbox_areas_p;
+
+	for (i = 0; i < ndmas; i++) {
+		channel = tx_desc_rings[i]->tdc,
+		status = nxge_txdma_start_channel(nxgep, channel,
+				(p_tx_ring_t)tx_desc_rings[i],
+				(p_tx_mbox_t)tx_mbox_p[i]);
+		if (status != NXGE_OK) {
+			goto nxge_txdma_hw_start_fail1;
+		}
+	}
+
+	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start: "
+		"tx_rings $%p rings $%p",
+		nxgep->tx_rings, nxgep->tx_rings->rings));
+	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start: "
+		"tx_rings $%p tx_desc_rings $%p",
+		nxgep->tx_rings, tx_desc_rings));
+
+	goto nxge_txdma_hw_start_exit;
+
+nxge_txdma_hw_start_fail1:
+	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
+		"==> nxge_txdma_hw_start: disable "
+		"(status 0x%x channel %d i %d)", status, channel, i));
+	for (; i >= 0; i--) {
+		channel = tx_desc_rings[i]->tdc,
+		(void) nxge_txdma_stop_channel(nxgep, channel,
+			(p_tx_ring_t)tx_desc_rings[i],
+			(p_tx_mbox_t)tx_mbox_p[i]);
+	}
+
+nxge_txdma_hw_start_exit:
+	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
+		"==> nxge_txdma_hw_start: (status 0x%x)", status));
+
+	return (status);
+}
+
+static void
+nxge_txdma_hw_stop(p_nxge_t nxgep)
+{
+	int			i, ndmas;
+	uint16_t		channel;
+	p_tx_rings_t 		tx_rings;
+	p_tx_ring_t 		*tx_desc_rings;
+	p_tx_mbox_areas_t 	tx_mbox_areas_p;
+	p_tx_mbox_t		*tx_mbox_p;
+
+	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_stop"));
+
+	tx_rings = nxgep->tx_rings;
+	if (tx_rings == NULL) {
+		NXGE_DEBUG_MSG((nxgep, TX_CTL,
+			"<== nxge_txdma_hw_stop: NULL ring pointer"));
+		return;
+	}
+	tx_desc_rings = tx_rings->rings;
+	if (tx_desc_rings == NULL) {
+		NXGE_DEBUG_MSG((nxgep, TX_CTL,
+			"<== nxge_txdma_hw_stop: NULL ring pointers"));
+		return;
+	}
+
+	ndmas = tx_rings->ndmas;
+	if (!ndmas) {
+		NXGE_DEBUG_MSG((nxgep, TX_CTL,
+			"<== nxge_txdma_hw_stop: no dma channel allocated"));
+		return;
+	}
+
+	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_stop: "
+		"tx_rings $%p tx_desc_rings $%p",
+		tx_rings, tx_desc_rings));
+
+	tx_mbox_areas_p = nxgep->tx_mbox_areas_p;
+	tx_mbox_p = tx_mbox_areas_p->txmbox_areas_p;
+
+	for (i = 0; i < ndmas; i++) {
+		channel = tx_desc_rings[i]->tdc;
+		(void) nxge_txdma_stop_channel(nxgep, channel,
+				(p_tx_ring_t)tx_desc_rings[i],
+				(p_tx_mbox_t)tx_mbox_p[i]);
+	}
+
+	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_stop: "
+		"tx_rings $%p tx_desc_rings $%p",
+		tx_rings, tx_desc_rings));
+
+	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_txdma_hw_stop"));
+}
+
+static nxge_status_t
+nxge_txdma_start_channel(p_nxge_t nxgep, uint16_t channel,
+    p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p)
+
+{
+	nxge_status_t		status = NXGE_OK;
+
+	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
+		"==> nxge_txdma_start_channel (channel %d)", channel));
+	/*
+	 * TXDMA/TXC must be in stopped state.
+	 */
+	(void) nxge_txdma_stop_inj_err(nxgep, channel);
+
+	/*
+	 * Reset TXDMA channel
+	 */
+	tx_ring_p->tx_cs.value = 0;
+	tx_ring_p->tx_cs.bits.ldw.rst = 1;
+	status = nxge_reset_txdma_channel(nxgep, channel,
+			tx_ring_p->tx_cs.value);
+	if (status != NXGE_OK) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"==> nxge_txdma_start_channel (channel %d)"
+			" reset channel failed 0x%x", channel, status));
+		goto nxge_txdma_start_channel_exit;
+	}
+
+	/*
+	 * Initialize the TXDMA channel specific FZC control
+	 * configurations. These FZC registers are pertaining
+	 * to each TX channel (i.e. logical pages).
+	 */
+	status = nxge_init_fzc_txdma_channel(nxgep, channel,
+			tx_ring_p, tx_mbox_p);
+	if (status != NXGE_OK) {
+		goto nxge_txdma_start_channel_exit;
+	}
+
+	/*
+	 * Initialize the event masks.
+	 */
+	tx_ring_p->tx_evmask.value = 0;
+	status = nxge_init_txdma_channel_event_mask(nxgep,
+			channel, &tx_ring_p->tx_evmask);
+	if (status != NXGE_OK) {
+		goto nxge_txdma_start_channel_exit;
+	}
+
+	/*
+	 * Load TXDMA descriptors, buffers, mailbox,
+	 * initialise the DMA channels and
+	 * enable each DMA channel.
+	 */
+	status = nxge_enable_txdma_channel(nxgep, channel,
+			tx_ring_p, tx_mbox_p);
+	if (status != NXGE_OK) {
+		goto nxge_txdma_start_channel_exit;
+	}
+
+nxge_txdma_start_channel_exit:
+	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_txdma_start_channel"));
+
+	return (status);
+}
+
+/*ARGSUSED*/
+static nxge_status_t
+nxge_txdma_stop_channel(p_nxge_t nxgep, uint16_t channel,
+	p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p)
+{
+	int		status = NXGE_OK;
+
+	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
+		"==> nxge_txdma_stop_channel: channel %d", channel));
+
+	/*
+	 * Stop (disable) TXDMA and TXC (if stop bit is set
+	 * and STOP_N_GO bit not set, the TXDMA reset state will
+	 * not be set if reset TXDMA.
+	 */
+	(void) nxge_txdma_stop_inj_err(nxgep, channel);
+
+	/*
+	 * Reset TXDMA channel
+	 */
+	tx_ring_p->tx_cs.value = 0;
+	tx_ring_p->tx_cs.bits.ldw.rst = 1;
+	status = nxge_reset_txdma_channel(nxgep, channel,
+			tx_ring_p->tx_cs.value);
+	if (status != NXGE_OK) {
+		goto nxge_txdma_stop_channel_exit;
+	}
+
+#ifdef HARDWARE_REQUIRED
+	/* Set up the interrupt event masks. */
+	tx_ring_p->tx_evmask.value = 0;
+	status = nxge_init_txdma_channel_event_mask(nxgep,
+			channel, &tx_ring_p->tx_evmask);
+	if (status != NXGE_OK) {
+		goto nxge_txdma_stop_channel_exit;
+	}
+
+	/* Initialize the DMA control and status register */
+	tx_ring_p->tx_cs.value = TX_ENT_MSK_MK_ALL;
+	status = nxge_init_txdma_channel_cntl_stat(nxgep, channel,
+			tx_ring_p->tx_cs.value);
+	if (status != NXGE_OK) {
+		goto nxge_txdma_stop_channel_exit;
+	}
+
+	/* Disable channel */
+	status = nxge_disable_txdma_channel(nxgep, channel,
+			tx_ring_p, tx_mbox_p);
+	if (status != NXGE_OK) {
+		goto nxge_txdma_start_channel_exit;
+	}
+
+	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
+		"==> nxge_txdma_stop_channel: event done"));
+
+#endif
+
+nxge_txdma_stop_channel_exit:
+	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_txdma_stop_channel"));
+	return (status);
+}
+
+static p_tx_ring_t
+nxge_txdma_get_ring(p_nxge_t nxgep, uint16_t channel)
+{
+	int			index, ndmas;
+	uint16_t		tdc;
+	p_tx_rings_t 		tx_rings;
+
+	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_get_ring"));
+
+	tx_rings = nxgep->tx_rings;
+	if (tx_rings == NULL) {
+		NXGE_DEBUG_MSG((nxgep, TX_CTL,
+			"<== nxge_txdma_get_ring: NULL ring pointer"));
+		return (NULL);
+	}
+
+	ndmas = tx_rings->ndmas;
+	if (!ndmas) {
+		NXGE_DEBUG_MSG((nxgep, TX_CTL,
+			"<== nxge_txdma_get_ring: no channel allocated"));
+		return (NULL);
+	}
+
+	if (tx_rings->rings == NULL) {
+		NXGE_DEBUG_MSG((nxgep, TX_CTL,
+			"<== nxge_txdma_get_ring: NULL rings pointer"));
+		return (NULL);
+	}
+
+	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_get_ring: "
+		"tx_rings $%p tx_desc_rings $%p ndmas %d",
+		tx_rings, tx_rings, ndmas));
+
+	for (index = 0; index < ndmas; index++) {
+		tdc = tx_rings->rings[index]->tdc;
+		NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
+			"==> nxge_fixup_txdma_rings: channel %d", tdc));
+		if (channel == tdc) {
+			NXGE_DEBUG_MSG((nxgep, TX_CTL,
+				"<== nxge_txdma_get_ring: tdc %d "
+				"ring $%p",
+				tdc, tx_rings->rings[index]));
+			return (p_tx_ring_t)(tx_rings->rings[index]);
+		}
+	}
+
+	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_get_ring"));
+	return (NULL);
+}
+
+static p_tx_mbox_t
+nxge_txdma_get_mbox(p_nxge_t nxgep, uint16_t channel)
+{
+	int			index, tdc, ndmas;
+	p_tx_rings_t 		tx_rings;
+	p_tx_mbox_areas_t 	tx_mbox_areas_p;
+	p_tx_mbox_t		*tx_mbox_p;
+
+	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_get_mbox"));
+
+	tx_rings = nxgep->tx_rings;
+	if (tx_rings == NULL) {
+		NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
+			"<== nxge_txdma_get_mbox: NULL ring pointer"));
+		return (NULL);
+	}
+
+	tx_mbox_areas_p = nxgep->tx_mbox_areas_p;
+	if (tx_mbox_areas_p == NULL) {
+		NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
+			"<== nxge_txdma_get_mbox: NULL mbox pointer"));
+		return (NULL);
+	}
+
+	tx_mbox_p = tx_mbox_areas_p->txmbox_areas_p;
+
+	ndmas = tx_rings->ndmas;
+	if (!ndmas) {
+		NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
+			"<== nxge_txdma_get_mbox: no channel allocated"));
+		return (NULL);
+	}
+
+	if (tx_rings->rings == NULL) {
+		NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
+			"<== nxge_txdma_get_mbox: NULL rings pointer"));
+		return (NULL);
+	}
+
+	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_get_mbox: "
+		"tx_rings $%p tx_desc_rings $%p ndmas %d",
+		tx_rings, tx_rings, ndmas));
+
+	for (index = 0; index < ndmas; index++) {
+		tdc = tx_rings->rings[index]->tdc;
+		NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
+			"==> nxge_txdma_get_mbox: channel %d", tdc));
+		if (channel == tdc) {
+			NXGE_DEBUG_MSG((nxgep, TX_CTL,
+				"<== nxge_txdma_get_mbox: tdc %d "
+				"ring $%p",
+				tdc, tx_rings->rings[index]));
+			return (p_tx_mbox_t)(tx_mbox_p[index]);
+		}
+	}
+
+	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_get_mbox"));
+	return (NULL);
+}
+
+/*ARGSUSED*/
+static nxge_status_t
+nxge_tx_err_evnts(p_nxge_t nxgep, uint_t index, p_nxge_ldv_t ldvp, tx_cs_t cs)
+{
+	npi_handle_t		handle;
+	npi_status_t		rs;
+	uint8_t			channel;
+	p_tx_ring_t 		*tx_rings;
+	p_tx_ring_t 		tx_ring_p;
+	p_nxge_tx_ring_stats_t	tdc_stats;
+	boolean_t		txchan_fatal = B_FALSE;
+	nxge_status_t		status = NXGE_OK;
+	tdmc_inj_par_err_t	par_err;
+	uint32_t		value;
+
+	NXGE_DEBUG_MSG((nxgep, RX2_CTL, "==> nxge_tx_err_evnts"));
+	handle = NXGE_DEV_NPI_HANDLE(nxgep);
+	channel = ldvp->channel;
+
+	tx_rings = nxgep->tx_rings->rings;
+	tx_ring_p = tx_rings[index];
+	tdc_stats = tx_ring_p->tdc_stats;
+	if ((cs.bits.ldw.pkt_size_err) || (cs.bits.ldw.pref_buf_par_err) ||
+		(cs.bits.ldw.nack_pref) || (cs.bits.ldw.nack_pkt_rd) ||
+		(cs.bits.ldw.conf_part_err) || (cs.bits.ldw.pkt_prt_err)) {
+		if ((rs = npi_txdma_ring_error_get(handle, channel,
+					&tdc_stats->errlog)) != NPI_SUCCESS)
+			return (NXGE_ERROR | rs);
+	}
+
+	if (cs.bits.ldw.mbox_err) {
+		tdc_stats->mbox_err++;
+		NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel,
+					NXGE_FM_EREPORT_TDMC_MBOX_ERR);
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"==> nxge_tx_err_evnts(channel %d): "
+			"fatal error: mailbox", channel));
+		txchan_fatal = B_TRUE;
+	}
+	if (cs.bits.ldw.pkt_size_err) {
+		tdc_stats->pkt_size_err++;
+		NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel,
+					NXGE_FM_EREPORT_TDMC_PKT_SIZE_ERR);
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"==> nxge_tx_err_evnts(channel %d): "
+			"fatal error: pkt_size_err", channel));
+		txchan_fatal = B_TRUE;
+	}
+	if (cs.bits.ldw.tx_ring_oflow) {
+		tdc_stats->tx_ring_oflow++;
+		NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel,
+					NXGE_FM_EREPORT_TDMC_TX_RING_OFLOW);
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"==> nxge_tx_err_evnts(channel %d): "
+			"fatal error: tx_ring_oflow", channel));
+		txchan_fatal = B_TRUE;
+	}
+	if (cs.bits.ldw.pref_buf_par_err) {
+		tdc_stats->pre_buf_par_err++;
+		NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel,
+					NXGE_FM_EREPORT_TDMC_PREF_BUF_PAR_ERR);
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"==> nxge_tx_err_evnts(channel %d): "
+			"fatal error: pre_buf_par_err", channel));
+		/* Clear error injection source for parity error */
+		(void) npi_txdma_inj_par_error_get(handle, &value);
+		par_err.value = value;
+		par_err.bits.ldw.inject_parity_error &= ~(1 << channel);
+		(void) npi_txdma_inj_par_error_set(handle, par_err.value);
+		txchan_fatal = B_TRUE;
+	}
+	if (cs.bits.ldw.nack_pref) {
+		tdc_stats->nack_pref++;
+		NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel,
+					NXGE_FM_EREPORT_TDMC_NACK_PREF);
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"==> nxge_tx_err_evnts(channel %d): "
+			"fatal error: nack_pref", channel));
+		txchan_fatal = B_TRUE;
+	}
+	if (cs.bits.ldw.nack_pkt_rd) {
+		tdc_stats->nack_pkt_rd++;
+		NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel,
+					NXGE_FM_EREPORT_TDMC_NACK_PKT_RD);
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"==> nxge_tx_err_evnts(channel %d): "
+			"fatal error: nack_pkt_rd", channel));
+		txchan_fatal = B_TRUE;
+	}
+	if (cs.bits.ldw.conf_part_err) {
+		tdc_stats->conf_part_err++;
+		NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel,
+					NXGE_FM_EREPORT_TDMC_CONF_PART_ERR);
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"==> nxge_tx_err_evnts(channel %d): "
+			"fatal error: config_partition_err", channel));
+		txchan_fatal = B_TRUE;
+	}
+	if (cs.bits.ldw.pkt_prt_err) {
+		tdc_stats->pkt_part_err++;
+		NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel,
+					NXGE_FM_EREPORT_TDMC_PKT_PRT_ERR);
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"==> nxge_tx_err_evnts(channel %d): "
+			"fatal error: pkt_prt_err", channel));
+		txchan_fatal = B_TRUE;
+	}
+
+	/* Clear error injection source in case this is an injected error */
+	TXDMA_REG_WRITE64(nxgep->npi_handle, TDMC_INTR_DBG_REG, channel, 0);
+
+	if (txchan_fatal) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			" nxge_tx_err_evnts: "
+			" fatal error on channel %d cs 0x%llx\n",
+			channel, cs.value));
+		status = nxge_txdma_fatal_err_recover(nxgep, channel,
+								tx_ring_p);
+		if (status == NXGE_OK) {
+			FM_SERVICE_RESTORED(nxgep);
+		}
+	}
+
+	NXGE_DEBUG_MSG((nxgep, RX2_CTL, "<== nxge_tx_err_evnts"));
+
+	return (status);
+}
+
+static nxge_status_t
+nxge_txdma_fatal_err_recover(p_nxge_t nxgep, uint16_t channel,
+						p_tx_ring_t tx_ring_p)
+{
+	npi_handle_t	handle;
+	npi_status_t	rs = NPI_SUCCESS;
+	p_tx_mbox_t	tx_mbox_p;
+	nxge_status_t	status = NXGE_OK;
+
+	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fatal_err_recover"));
+	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"Recovering from TxDMAChannel#%d error...", channel));
+
+	/*
+	 * Stop the dma channel waits for the stop done.
+	 * If the stop done bit is not set, then create
+	 * an error.
+	 */
+
+	handle = NXGE_DEV_NPI_HANDLE(nxgep);
+	NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel stop..."));
+	MUTEX_ENTER(&tx_ring_p->lock);
+	rs = npi_txdma_channel_control(handle, TXDMA_STOP, channel);
+	if (rs != NPI_SUCCESS) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"==> nxge_txdma_fatal_err_recover (channel %d): "
+			"stop failed ", channel));
+		goto fail;
+	}
+
+	NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel reclaim..."));
+	(void) nxge_txdma_reclaim(nxgep, tx_ring_p, 0);
+
+	/*
+	 * Reset TXDMA channel
+	 */
+	NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel reset..."));
+	if ((rs = npi_txdma_channel_control(handle, TXDMA_RESET, channel)) !=
+						NPI_SUCCESS) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"==> nxge_txdma_fatal_err_recover (channel %d)"
+			" reset channel failed 0x%x", channel, rs));
+		goto fail;
+	}
+
+	/*
+	 * Reset the tail (kick) register to 0.
+	 * (Hardware will not reset it. Tx overflow fatal
+	 * error if tail is not set to 0 after reset!
+	 */
+	TXDMA_REG_WRITE64(handle, TX_RING_KICK_REG, channel, 0);
+
+	/* Restart TXDMA channel */
+
+	/*
+	 * Initialize the TXDMA channel specific FZC control
+	 * configurations. These FZC registers are pertaining
+	 * to each TX channel (i.e. logical pages).
+	 */
+	tx_mbox_p = nxge_txdma_get_mbox(nxgep, channel);
+
+	NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel restart..."));
+	status = nxge_init_fzc_txdma_channel(nxgep, channel,
+						tx_ring_p, tx_mbox_p);
+	if (status != NXGE_OK)
+		goto fail;
+
+	/*
+	 * Initialize the event masks.
+	 */
+	tx_ring_p->tx_evmask.value = 0;
+	status = nxge_init_txdma_channel_event_mask(nxgep, channel,
+							&tx_ring_p->tx_evmask);
+	if (status != NXGE_OK)
+		goto fail;
+
+	tx_ring_p->wr_index_wrap = B_FALSE;
+	tx_ring_p->wr_index = 0;
+	tx_ring_p->rd_index = 0;
+
+	/*
+	 * Load TXDMA descriptors, buffers, mailbox,
+	 * initialise the DMA channels and
+	 * enable each DMA channel.
+	 */
+	NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel enable..."));
+	status = nxge_enable_txdma_channel(nxgep, channel,
+						tx_ring_p, tx_mbox_p);
+	MUTEX_EXIT(&tx_ring_p->lock);
+	if (status != NXGE_OK)
+		goto fail;
+
+	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"Recovery Successful, TxDMAChannel#%d Restored",
+			channel));
+	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fatal_err_recover"));
+
+	return (NXGE_OK);
+
+fail:
+	MUTEX_EXIT(&tx_ring_p->lock);
+	NXGE_DEBUG_MSG((nxgep, TX_CTL,
+		"nxge_txdma_fatal_err_recover (channel %d): "
+		"failed to recover this txdma channel", channel));
+	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed"));
+
+	return (status);
+}
+
+nxge_status_t
+nxge_tx_port_fatal_err_recover(p_nxge_t nxgep)
+{
+	npi_handle_t	handle;
+	npi_status_t	rs = NPI_SUCCESS;
+	nxge_status_t	status = NXGE_OK;
+	p_tx_ring_t 	*tx_desc_rings;
+	p_tx_rings_t	tx_rings;
+	p_tx_ring_t	tx_ring_p;
+	p_tx_mbox_t	tx_mbox_p;
+	int		i, ndmas;
+	uint16_t	channel;
+
+	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_tx_port_fatal_err_recover"));
+	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"Recovering from TxPort error..."));
+
+	/*
+	 * Stop the dma channel waits for the stop done.
+	 * If the stop done bit is not set, then create
+	 * an error.
+	 */
+
+	handle = NXGE_DEV_NPI_HANDLE(nxgep);
+	NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxPort stop all DMA channels..."));
+
+	tx_rings = nxgep->tx_rings;
+	tx_desc_rings = tx_rings->rings;
+	ndmas = tx_rings->ndmas;
+
+	for (i = 0; i < ndmas; i++) {
+		if (tx_desc_rings[i] == NULL) {
+			continue;
+		}
+		tx_ring_p = tx_rings->rings[i];
+		MUTEX_ENTER(&tx_ring_p->lock);
+	}
+
+	for (i = 0; i < ndmas; i++) {
+		if (tx_desc_rings[i] == NULL) {
+			continue;
+		}
+		channel = tx_desc_rings[i]->tdc;
+		tx_ring_p = tx_rings->rings[i];
+		rs = npi_txdma_channel_control(handle, TXDMA_STOP, channel);
+		if (rs != NPI_SUCCESS) {
+			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"==> nxge_txdma_fatal_err_recover (channel %d): "
+			"stop failed ", channel));
+			goto fail;
+		}
+	}
+
+	NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxPort reclaim all DMA channels..."));
+
+	for (i = 0; i < ndmas; i++) {
+		if (tx_desc_rings[i] == NULL) {
+			continue;
+		}
+		tx_ring_p = tx_rings->rings[i];
+		(void) nxge_txdma_reclaim(nxgep, tx_ring_p, 0);
+	}
+
+	/*
+	 * Reset TXDMA channel
+	 */
+	NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxPort reset all DMA channels..."));
+
+	for (i = 0; i < ndmas; i++) {
+		if (tx_desc_rings[i] == NULL) {
+			continue;
+		}
+		channel = tx_desc_rings[i]->tdc;
+		tx_ring_p = tx_rings->rings[i];
+		if ((rs = npi_txdma_channel_control(handle, TXDMA_RESET,
+				channel)) != NPI_SUCCESS) {
+			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+				"==> nxge_txdma_fatal_err_recover (channel %d)"
+				" reset channel failed 0x%x", channel, rs));
+			goto fail;
+		}
+
+		/*
+		 * Reset the tail (kick) register to 0.
+		 * (Hardware will not reset it. Tx overflow fatal
+		 * error if tail is not set to 0 after reset!
+		 */
+
+		TXDMA_REG_WRITE64(handle, TX_RING_KICK_REG, channel, 0);
+
+	}
+
+	/*
+	 * Initialize the TXDMA channel specific FZC control
+	 * configurations. These FZC registers are pertaining
+	 * to each TX channel (i.e. logical pages).
+	 */
+
+	/* Restart TXDMA channels */
+
+	NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxPort re-start all DMA channels..."));
+
+	for (i = 0; i < ndmas; i++) {
+		if (tx_desc_rings[i] == NULL) {
+			continue;
+		}
+		channel = tx_desc_rings[i]->tdc;
+		tx_ring_p = tx_rings->rings[i];
+		tx_mbox_p = nxge_txdma_get_mbox(nxgep, channel);
+		status = nxge_init_fzc_txdma_channel(nxgep, channel,
+						tx_ring_p, tx_mbox_p);
+		tx_ring_p->tx_evmask.value = 0;
+		/*
+		 * Initialize the event masks.
+		 */
+		status = nxge_init_txdma_channel_event_mask(nxgep, channel,
+							&tx_ring_p->tx_evmask);
+
+		tx_ring_p->wr_index_wrap = B_FALSE;
+		tx_ring_p->wr_index = 0;
+		tx_ring_p->rd_index = 0;
+
+		if (status != NXGE_OK)
+			goto fail;
+		if (status != NXGE_OK)
+			goto fail;
+	}
+
+	/*
+	 * Load TXDMA descriptors, buffers, mailbox,
+	 * initialise the DMA channels and
+	 * enable each DMA channel.
+	 */
+	NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxPort re-enable all DMA channels..."));
+
+	for (i = 0; i < ndmas; i++) {
+		if (tx_desc_rings[i] == NULL) {
+			continue;
+		}
+		channel = tx_desc_rings[i]->tdc;
+		tx_ring_p = tx_rings->rings[i];
+		tx_mbox_p = nxge_txdma_get_mbox(nxgep, channel);
+		status = nxge_enable_txdma_channel(nxgep, channel,
+						tx_ring_p, tx_mbox_p);
+		if (status != NXGE_OK)
+			goto fail;
+	}
+
+	for (i = 0; i < ndmas; i++) {
+		if (tx_desc_rings[i] == NULL) {
+			continue;
+		}
+		tx_ring_p = tx_rings->rings[i];
+		MUTEX_EXIT(&tx_ring_p->lock);
+	}
+
+	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"Recovery Successful, TxPort Restored"));
+	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_tx_port_fatal_err_recover"));
+
+	return (NXGE_OK);
+
+fail:
+	for (i = 0; i < ndmas; i++) {
+		if (tx_desc_rings[i] == NULL) {
+			continue;
+		}
+		tx_ring_p = tx_rings->rings[i];
+		MUTEX_EXIT(&tx_ring_p->lock);
+	}
+
+	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed"));
+	NXGE_DEBUG_MSG((nxgep, TX_CTL,
+		"nxge_txdma_fatal_err_recover (channel %d): "
+		"failed to recover this txdma channel"));
+
+	return (status);
+}
+
+void
+nxge_txdma_inject_err(p_nxge_t nxgep, uint32_t err_id, uint8_t chan)
+{
+	tdmc_intr_dbg_t		tdi;
+	tdmc_inj_par_err_t	par_err;
+	uint32_t		value;
+	npi_handle_t		handle;
+
+	switch (err_id) {
+
+	case NXGE_FM_EREPORT_TDMC_PREF_BUF_PAR_ERR:
+		handle = NXGE_DEV_NPI_HANDLE(nxgep);
+		/* Clear error injection source for parity error */
+		(void) npi_txdma_inj_par_error_get(handle, &value);
+		par_err.value = value;
+		par_err.bits.ldw.inject_parity_error &= ~(1 << chan);
+		(void) npi_txdma_inj_par_error_set(handle, par_err.value);
+
+		par_err.bits.ldw.inject_parity_error = (1 << chan);
+		(void) npi_txdma_inj_par_error_get(handle, &value);
+		par_err.value = value;
+		par_err.bits.ldw.inject_parity_error |= (1 << chan);
+		cmn_err(CE_NOTE, "!Write 0x%llx to TDMC_INJ_PAR_ERR_REG\n",
+				(unsigned long long)par_err.value);
+		(void) npi_txdma_inj_par_error_set(handle, par_err.value);
+		break;
+
+	case NXGE_FM_EREPORT_TDMC_MBOX_ERR:
+	case NXGE_FM_EREPORT_TDMC_NACK_PREF:
+	case NXGE_FM_EREPORT_TDMC_NACK_PKT_RD:
+	case NXGE_FM_EREPORT_TDMC_PKT_SIZE_ERR:
+	case NXGE_FM_EREPORT_TDMC_TX_RING_OFLOW:
+	case NXGE_FM_EREPORT_TDMC_CONF_PART_ERR:
+	case NXGE_FM_EREPORT_TDMC_PKT_PRT_ERR:
+		TXDMA_REG_READ64(nxgep->npi_handle, TDMC_INTR_DBG_REG,
+			chan, &tdi.value);
+		if (err_id == NXGE_FM_EREPORT_TDMC_PREF_BUF_PAR_ERR)
+			tdi.bits.ldw.pref_buf_par_err = 1;
+		else if (err_id == NXGE_FM_EREPORT_TDMC_MBOX_ERR)
+			tdi.bits.ldw.mbox_err = 1;
+		else if (err_id == NXGE_FM_EREPORT_TDMC_NACK_PREF)
+			tdi.bits.ldw.nack_pref = 1;
+		else if (err_id == NXGE_FM_EREPORT_TDMC_NACK_PKT_RD)
+			tdi.bits.ldw.nack_pkt_rd = 1;
+		else if (err_id == NXGE_FM_EREPORT_TDMC_PKT_SIZE_ERR)
+			tdi.bits.ldw.pkt_size_err = 1;
+		else if (err_id == NXGE_FM_EREPORT_TDMC_TX_RING_OFLOW)
+			tdi.bits.ldw.tx_ring_oflow = 1;
+		else if (err_id == NXGE_FM_EREPORT_TDMC_CONF_PART_ERR)
+			tdi.bits.ldw.conf_part_err = 1;
+		else if (err_id == NXGE_FM_EREPORT_TDMC_PKT_PRT_ERR)
+			tdi.bits.ldw.pkt_part_err = 1;
+		cmn_err(CE_NOTE, "!Write 0x%lx to TDMC_INTR_DBG_REG\n",
+				tdi.value);
+		TXDMA_REG_WRITE64(nxgep->npi_handle, TDMC_INTR_DBG_REG,
+			chan, tdi.value);
+
+		break;
+	}
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/uts/common/io/nxge/nxge_virtual.c	Mon Mar 19 19:37:22 2007 -0700
@@ -0,0 +1,3650 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident	"%Z%%M%	%I%	%E% SMI"
+
+#include <sys/nxge/nxge_impl.h>
+#include <sys/nxge/nxge_mac.h>
+
+static void nxge_get_niu_property(dev_info_t *, niu_type_t *);
+static nxge_status_t nxge_get_mac_addr_properties(p_nxge_t);
+static nxge_status_t nxge_use_cfg_n2niu_properties(p_nxge_t);
+static void nxge_use_cfg_neptune_properties(p_nxge_t);
+static void nxge_use_cfg_dma_config(p_nxge_t);
+static void nxge_use_cfg_vlan_class_config(p_nxge_t);
+static void nxge_use_cfg_mac_class_config(p_nxge_t);
+static void nxge_use_cfg_class_config(p_nxge_t);
+static void nxge_use_cfg_link_cfg(p_nxge_t);
+static void nxge_set_hw_dma_config(p_nxge_t);
+static void nxge_set_hw_vlan_class_config(p_nxge_t);
+static void nxge_set_hw_mac_class_config(p_nxge_t);
+static void nxge_set_hw_class_config(p_nxge_t);
+static nxge_status_t nxge_use_default_dma_config_n2(p_nxge_t);
+static void nxge_ldgv_setup(p_nxge_ldg_t *, p_nxge_ldv_t *, uint8_t,
+	uint8_t, int *);
+static void nxge_init_mmac(p_nxge_t);
+
+uint32_t nxge_use_hw_property = 1;
+uint32_t nxge_groups_per_port = 2;
+
+extern uint32_t nxge_use_partition;
+extern uint32_t nxge_dma_obp_props_only;
+
+extern uint16_t nxge_rcr_timeout;
+extern uint16_t nxge_rcr_threshold;
+
+extern uint_t nxge_rx_intr(void *, void *);
+extern uint_t nxge_tx_intr(void *, void *);
+extern uint_t nxge_mif_intr(void *, void *);
+extern uint_t nxge_mac_intr(void *, void *);
+extern uint_t nxge_syserr_intr(void *, void *);
+extern void *nxge_list;
+
+#define	NXGE_SHARED_REG_SW_SIM
+
+#ifdef NXGE_SHARED_REG_SW_SIM
+uint64_t global_dev_ctrl = 0;
+#endif
+
+#define	MAX_SIBLINGS	NXGE_MAX_PORTS
+
+extern uint32_t nxge_rbr_size;
+extern uint32_t nxge_rcr_size;
+extern uint32_t nxge_tx_ring_size;
+extern uint32_t nxge_rbr_spare_size;
+
+extern npi_status_t npi_mac_altaddr_disable(npi_handle_t, uint8_t, uint8_t);
+
+static uint8_t p2_tx_fair[2] = {12, 12};
+static uint8_t p2_tx_equal[2] = {12, 12};
+static uint8_t p4_tx_fair[4] = {6, 6, 6, 6};
+static uint8_t p4_tx_equal[4] = {6, 6, 6, 6};
+static uint8_t p2_rx_fair[2] = {8, 8};
+static uint8_t p2_rx_equal[2] = {8, 8};
+
+static uint8_t p4_rx_fair[4] = {4, 4, 4, 4};
+static uint8_t p4_rx_equal[4] = {4, 4, 4, 4};
+
+static uint8_t p2_rdcgrp_fair[2] = {4, 4};
+static uint8_t p2_rdcgrp_equal[2] = {4, 4};
+static uint8_t p4_rdcgrp_fair[4] = {2, 2, 1, 1};
+static uint8_t p4_rdcgrp_equal[4] = {2, 2, 2, 2};
+static uint8_t p2_rdcgrp_cls[2] = {1, 1};
+static uint8_t p4_rdcgrp_cls[4] = {1, 1, 1, 1};
+
+typedef enum {
+	DEFAULT = 0,
+	EQUAL,
+	FAIR,
+	CUSTOM,
+	CLASSIFY,
+	L2_CLASSIFY,
+	L3_DISTRIBUTE,
+	L3_CLASSIFY,
+	L3_TCAM,
+	CONFIG_TOKEN_NONE
+} config_token_t;
+
+static char *token_names[] = {
+	"default",
+	"equal",
+	"fair",
+	"custom",
+	"classify",
+	"l2_classify",
+	"l3_distribute",
+	"l3_classify",
+	"l3_tcam",
+	"none",
+};
+
+void nxge_virint_regs_dump(p_nxge_t nxgep);
+
+void
+nxge_virint_regs_dump(p_nxge_t nxgep)
+{
+	npi_handle_t handle;
+
+	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_virint_regs_dump"));
+	handle = NXGE_DEV_NPI_HANDLE(nxgep);
+	(void) npi_vir_dump_pio_fzc_regs_one(handle);
+	(void) npi_vir_dump_ldgnum(handle);
+	(void) npi_vir_dump_ldsv(handle);
+	(void) npi_vir_dump_imask0(handle);
+	(void) npi_vir_dump_sid(handle);
+	(void) npi_mac_dump_regs(handle, nxgep->function_num);
+	(void) npi_ipp_dump_regs(handle, nxgep->function_num);
+	(void) npi_fflp_dump_regs(handle);
+	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_virint_regs_dump"));
+}
+
+/*
+ * For now: we hard coded the DMA configurations.
+ *	    and assume for one partition only.
+ *
+ *       OBP. Then OBP will pass this partition's
+ *	 Neptune configurations to fcode to create
+ *	 properties for them.
+ *
+ *	Since Neptune(PCI-E) and NIU (Niagara-2) has
+ *	different bus interfaces, the driver needs
+ *	to know which bus it is connected to.
+ *  	Ravinder suggested: create a device property.
+ *	In partitioning environment, we cannot
+ *	use .conf file (need to check). If conf changes,
+ *	need to reboot the system.
+ *	The following function assumes that we will
+ *	retrieve its properties from a virtualized nexus driver.
+ */
+
+nxge_status_t
+nxge_cntlops(dev_info_t *dip, nxge_ctl_enum_t ctlop, void *arg, void *result)
+{
+	nxge_status_t status = NXGE_OK;
+	int instance;
+	p_nxge_t nxgep;
+
+#ifndef NXGE_SHARED_REG_SW_SIM
+	npi_handle_t handle;
+	uint16_t sr16, cr16;
+#endif
+	instance = ddi_get_instance(dip);
+	NXGE_DEBUG_MSG((NULL, VIR_CTL, "Instance %d ", instance));
+
+	if (nxge_list == NULL) {
+		NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL,
+				"nxge_cntlops: nxge_list null"));
+		return (NXGE_ERROR);
+	}
+	nxgep = (p_nxge_t)ddi_get_soft_state(nxge_list, instance);
+	if (nxgep == NULL) {
+		NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL,
+				"nxge_cntlops: nxgep null"));
+		return (NXGE_ERROR);
+	}
+#ifndef NXGE_SHARED_REG_SW_SIM
+	handle = nxgep->npi_reg_handle;
+#endif
+	switch (ctlop) {
+	case NXGE_CTLOPS_NIUTYPE:
+		nxge_get_niu_property(dip, (niu_type_t *)result);
+		return (status);
+
+	case NXGE_CTLOPS_GET_SHARED_REG:
+#ifdef NXGE_SHARED_REG_SW_SIM
+		*(uint64_t *)result = global_dev_ctrl;
+		return (0);
+#else
+		status = npi_dev_func_sr_sr_get(handle, &sr16);
+		*(uint16_t *)result = sr16;
+		NXGE_DEBUG_MSG((NULL, VIR_CTL,
+			"nxge_cntlops: NXGE_CTLOPS_GET_SHARED_REG"));
+		return (0);
+#endif
+
+	case NXGE_CTLOPS_SET_SHARED_REG_LOCK:
+#ifdef NXGE_SHARED_REG_SW_SIM
+		global_dev_ctrl = *(uint64_t *)arg;
+		return (0);
+#else
+		status = NPI_FAILURE;
+		while (status != NPI_SUCCESS)
+			status = npi_dev_func_sr_lock_enter(handle);
+
+		sr16 = *(uint16_t *)arg;
+		status = npi_dev_func_sr_sr_set_only(handle, &sr16);
+		status = npi_dev_func_sr_lock_free(handle);
+		NXGE_DEBUG_MSG((NULL, VIR_CTL,
+			"nxge_cntlops: NXGE_CTLOPS_SET_SHARED_REG"));
+		return (0);
+#endif
+
+	case NXGE_CTLOPS_UPDATE_SHARED_REG:
+#ifdef NXGE_SHARED_REG_SW_SIM
+		global_dev_ctrl |= *(uint64_t *)arg;
+		return (0);
+#else
+		status = NPI_FAILURE;
+		while (status != NPI_SUCCESS)
+			status = npi_dev_func_sr_lock_enter(handle);
+		status = npi_dev_func_sr_sr_get(handle, &sr16);
+		sr16 |= *(uint16_t *)arg;
+		status = npi_dev_func_sr_sr_set_only(handle, &sr16);
+		status = npi_dev_func_sr_lock_free(handle);
+		NXGE_DEBUG_MSG((NULL, VIR_CTL,
+			"nxge_cntlops: NXGE_CTLOPS_SET_SHARED_REG"));
+		return (0);
+#endif
+
+	case NXGE_CTLOPS_CLEAR_BIT_SHARED_REG_UL:
+#ifdef NXGE_SHARED_REG_SW_SIM
+		global_dev_ctrl |= *(uint64_t *)arg;
+		return (0);
+#else
+		status = npi_dev_func_sr_sr_get(handle, &sr16);
+		cr16 = *(uint16_t *)arg;
+		sr16 &= ~cr16;
+		status = npi_dev_func_sr_sr_set_only(handle, &sr16);
+		NXGE_DEBUG_MSG((NULL, VIR_CTL,
+			"nxge_cntlops: NXGE_CTLOPS_SET_SHARED_REG"));
+		return (0);
+#endif
+
+	case NXGE_CTLOPS_CLEAR_BIT_SHARED_REG:
+#ifdef NXGE_SHARED_REG_SW_SIM
+		global_dev_ctrl |= *(uint64_t *)arg;
+		return (0);
+#else
+		status = NPI_FAILURE;
+		while (status != NPI_SUCCESS)
+			status = npi_dev_func_sr_lock_enter(handle);
+		status = npi_dev_func_sr_sr_get(handle, &sr16);
+		cr16 = *(uint16_t *)arg;
+		sr16 &= ~cr16;
+		status = npi_dev_func_sr_sr_set_only(handle, &sr16);
+		status = npi_dev_func_sr_lock_free(handle);
+		NXGE_DEBUG_MSG((NULL, VIR_CTL,
+			"nxge_cntlops: NXGE_CTLOPS_SET_SHARED_REG"));
+		return (0);
+#endif
+
+	case NXGE_CTLOPS_GET_LOCK_BLOCK:
+#ifdef NXGE_SHARED_REG_SW_SIM
+		global_dev_ctrl |= *(uint64_t *)arg;
+		return (0);
+#else
+		status = NPI_FAILURE;
+		while (status != NPI_SUCCESS)
+			status = npi_dev_func_sr_lock_enter(handle);
+		NXGE_DEBUG_MSG((NULL, VIR_CTL,
+			"nxge_cntlops: NXGE_CTLOPS_GET_LOCK_BLOCK"));
+		return (0);
+#endif
+	case NXGE_CTLOPS_GET_LOCK_TRY:
+#ifdef NXGE_SHARED_REG_SW_SIM
+		global_dev_ctrl |= *(uint64_t *)arg;
+		return (0);
+#else
+		status = npi_dev_func_sr_lock_enter(handle);
+		NXGE_DEBUG_MSG((NULL, VIR_CTL,
+			"nxge_cntlops: NXGE_CTLOPS_GET_LOCK_TRY"));
+		if (status == NPI_SUCCESS)
+			return (NXGE_OK);
+		else
+			return (NXGE_ERROR);
+#endif
+	case NXGE_CTLOPS_FREE_LOCK:
+#ifdef NXGE_SHARED_REG_SW_SIM
+		global_dev_ctrl |= *(uint64_t *)arg;
+		return (0);
+#else
+		status = npi_dev_func_sr_lock_free(handle);
+		NXGE_DEBUG_MSG((NULL, VIR_CTL,
+			"nxge_cntlops: NXGE_CTLOPS_GET_LOCK_FREE"));
+		if (status == NPI_SUCCESS)
+			return (NXGE_OK);
+		else
+			return (NXGE_ERROR);
+#endif
+
+	default:
+		status = NXGE_ERROR;
+	}
+
+	return (status);
+}
+
+void
+nxge_common_lock_get(p_nxge_t nxgep)
+{
+	uint32_t status = NPI_FAILURE;
+	npi_handle_t handle;
+
+#if	defined(NXGE_SHARE_REG_SW_SIM)
+	return;
+#endif
+	handle = nxgep->npi_reg_handle;
+	while (status != NPI_SUCCESS)
+		status = npi_dev_func_sr_lock_enter(handle);
+}
+
+void
+nxge_common_lock_free(p_nxge_t nxgep)
+{
+	npi_handle_t handle;
+
+#if	defined(NXGE_SHARE_REG_SW_SIM)
+	return;
+#endif
+	handle = nxgep->npi_reg_handle;
+	(void) npi_dev_func_sr_lock_free(handle);
+}
+
+static void
+nxge_get_niu_property(dev_info_t *dip, niu_type_t *niu_type)
+{
+	uchar_t *prop_val;
+	uint_t prop_len;
+
+	*niu_type = NEPTUNE;
+	if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, dip, 0,
+			"niu-type", (uchar_t **)&prop_val,
+			&prop_len) == DDI_PROP_SUCCESS) {
+		if (strncmp("niu", (caddr_t)prop_val, (size_t)prop_len) == 0) {
+			*niu_type = N2_NIU;
+		}
+		ddi_prop_free(prop_val);
+	}
+}
+
+static config_token_t
+nxge_get_config_token(char *prop)
+{
+	config_token_t token = DEFAULT;
+
+	while (token < CONFIG_TOKEN_NONE) {
+		if (strncmp(prop, token_names[token], 4) == 0)
+			break;
+		token++;
+	}
+	return (token);
+}
+
+/* per port */
+
+static nxge_status_t
+nxge_update_rxdma_grp_properties(p_nxge_t nxgep, config_token_t token,
+	dev_info_t *s_dip[])
+{
+	nxge_status_t status = NXGE_OK;
+	int ddi_status;
+	int num_ports = nxgep->nports;
+	int port, bits, j;
+	uint8_t start_grp = 0, num_grps = 0;
+	p_nxge_param_t param_arr;
+	uint32_t grp_bitmap[MAX_SIBLINGS];
+	int custom_start_grp[MAX_SIBLINGS];
+	int custom_num_grp[MAX_SIBLINGS];
+	uint8_t bad_config = B_FALSE;
+	char *start_prop, *num_prop, *cfg_prop;
+
+	start_grp = 0;
+	param_arr = nxgep->param_arr;
+	start_prop = param_arr[param_rdc_grps_start].fcode_name;
+	num_prop = param_arr[param_rx_rdc_grps].fcode_name;
+
+	switch (token) {
+	case FAIR:
+		cfg_prop = "fair";
+		for (port = 0; port < num_ports; port++) {
+			custom_num_grp[port] =
+				(num_ports == 4) ?
+				p4_rdcgrp_fair[port] :
+				p2_rdcgrp_fair[port];
+			custom_start_grp[port] = start_grp;
+			start_grp += custom_num_grp[port];
+		}
+		break;
+
+	case EQUAL:
+		cfg_prop = "equal";
+		for (port = 0; port < num_ports; port++) {
+			custom_num_grp[port] =
+				(num_ports == 4) ?
+				p4_rdcgrp_equal[port] :
+				p2_rdcgrp_equal[port];
+			custom_start_grp[port] = start_grp;
+			start_grp += custom_num_grp[port];
+		}
+		break;
+
+
+	case CLASSIFY:
+		cfg_prop = "classify";
+		for (port = 0; port < num_ports; port++) {
+			custom_num_grp[port] = (num_ports == 4) ?
+				p4_rdcgrp_cls[port] : p2_rdcgrp_cls[port];
+			custom_start_grp[port] = start_grp;
+			start_grp += custom_num_grp[port];
+		}
+		break;
+
+	case CUSTOM:
+		cfg_prop = "custom";
+		/* See if it is good config */
+		num_grps = 0;
+		for (port = 0; port < num_ports; port++) {
+			custom_start_grp[port] =
+				ddi_prop_get_int(DDI_DEV_T_NONE, s_dip[port],
+				DDI_PROP_DONTPASS, start_prop, -1);
+			if ((custom_start_grp[port] == -1) ||
+				(custom_start_grp[port] >=
+					NXGE_MAX_RDC_GRPS)) {
+				bad_config = B_TRUE;
+				break;
+			}
+			custom_num_grp[port] = ddi_prop_get_int(
+				DDI_DEV_T_NONE,
+				s_dip[port],
+				DDI_PROP_DONTPASS,
+				num_prop, -1);
+
+			if ((custom_num_grp[port] == -1) ||
+				(custom_num_grp[port] >
+					NXGE_MAX_RDC_GRPS) ||
+				((custom_num_grp[port] +
+						custom_start_grp[port]) >=
+					NXGE_MAX_RDC_GRPS)) {
+				bad_config = B_TRUE;
+				break;
+			}
+			num_grps += custom_num_grp[port];
+			if (num_grps > NXGE_MAX_RDC_GRPS) {
+				bad_config = B_TRUE;
+				break;
+			}
+			grp_bitmap[port] = 0;
+			for (bits = 0;
+				bits < custom_num_grp[port];
+				bits++) {
+				grp_bitmap[port] |=
+					(1 << (bits + custom_start_grp[port]));
+			}
+
+		}
+
+		if (bad_config == B_FALSE) {
+			/* check for overlap */
+			for (port = 0; port < num_ports - 1; port++) {
+				for (j = port + 1; j < num_ports; j++) {
+					if (grp_bitmap[port] &
+						grp_bitmap[j]) {
+						bad_config = B_TRUE;
+						break;
+					}
+				}
+				if (bad_config == B_TRUE)
+					break;
+			}
+		}
+		if (bad_config == B_TRUE) {
+			/* use default config */
+			for (port = 0; port < num_ports; port++) {
+				custom_num_grp[port] =
+					(num_ports == 4) ?
+					p4_rx_fair[port] : p2_rx_fair[port];
+				custom_start_grp[port] = start_grp;
+				start_grp += custom_num_grp[port];
+			}
+		}
+		break;
+
+	default:
+		/* use default config */
+		cfg_prop = "fair";
+		for (port = 0; port < num_ports; port++) {
+			custom_num_grp[port] = (num_ports == 4) ?
+				p4_rx_fair[port] : p2_rx_fair[port];
+			custom_start_grp[port] = start_grp;
+			start_grp += custom_num_grp[port];
+		}
+		break;
+	}
+
+	/* Now Update the rx properties */
+	for (port = 0; port < num_ports; port++) {
+		ddi_status = ddi_prop_update_string(DDI_DEV_T_NONE, s_dip[port],
+			"rxdma-grp-cfg", cfg_prop);
+		if (ddi_status != DDI_PROP_SUCCESS) {
+			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+					" property %s not updating",
+					cfg_prop));
+			status |= NXGE_DDI_FAILED;
+		}
+		ddi_status = ddi_prop_update_int(DDI_DEV_T_NONE, s_dip[port],
+			num_prop, custom_num_grp[port]);
+
+		if (ddi_status != DDI_PROP_SUCCESS) {
+			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+					" property %s not updating",
+					num_prop));
+			status |= NXGE_DDI_FAILED;
+		}
+		ddi_status = ddi_prop_update_int(DDI_DEV_T_NONE, s_dip[port],
+			start_prop, custom_start_grp[port]);
+
+		if (ddi_status != DDI_PROP_SUCCESS) {
+			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+					" property %s not updating",
+					start_prop));
+			status |= NXGE_DDI_FAILED;
+		}
+	}
+	if (status & NXGE_DDI_FAILED)
+		status |= NXGE_ERROR;
+
+	return (status);
+}
+
+static nxge_status_t
+nxge_update_rxdma_properties(p_nxge_t nxgep, config_token_t token,
+	dev_info_t *s_dip[])
+{
+	nxge_status_t status = NXGE_OK;
+	int ddi_status;
+	int num_ports = nxgep->nports;
+	int port, bits, j;
+	uint8_t start_rdc = 0, num_rdc = 0;
+	p_nxge_param_t param_arr;
+	uint32_t rdc_bitmap[MAX_SIBLINGS];
+	int custom_start_rdc[MAX_SIBLINGS];
+	int custom_num_rdc[MAX_SIBLINGS];
+	uint8_t bad_config = B_FALSE;
+	int *prop_val;
+	uint_t prop_len;
+	char *start_rdc_prop, *num_rdc_prop, *cfg_prop;
+
+	start_rdc = 0;
+	param_arr = nxgep->param_arr;
+	start_rdc_prop = param_arr[param_rxdma_channels_begin].fcode_name;
+	num_rdc_prop = param_arr[param_rxdma_channels].fcode_name;
+
+	switch (token) {
+	case FAIR:
+		cfg_prop = "fair";
+		for (port = 0; port < num_ports; port++) {
+			custom_num_rdc[port] = (num_ports == 4) ?
+				p4_rx_fair[port] : p2_rx_fair[port];
+			custom_start_rdc[port] = start_rdc;
+			start_rdc += custom_num_rdc[port];
+		}
+		break;
+
+	case EQUAL:
+		cfg_prop = "equal";
+		for (port = 0; port < num_ports; port++) {
+			custom_num_rdc[port] = (num_ports == 4) ?
+				p4_rx_equal[port] :
+				p2_rx_equal[port];
+			custom_start_rdc[port] = start_rdc;
+			start_rdc += custom_num_rdc[port];
+		}
+		break;
+
+	case CUSTOM:
+		cfg_prop = "custom";
+		/* See if it is good config */
+		num_rdc = 0;
+		for (port = 0; port < num_ports; port++) {
+			ddi_status = ddi_prop_lookup_int_array(
+				DDI_DEV_T_ANY,
+				s_dip[port], 0,
+				start_rdc_prop,
+				&prop_val,
+				&prop_len);
+			if (ddi_status == DDI_SUCCESS)
+				custom_start_rdc[port] = *prop_val;
+			else {
+				NXGE_DEBUG_MSG((nxgep, CFG_CTL,
+						" %s custom start port %d"
+						" read failed ",
+						" rxdma-cfg", port));
+				bad_config = B_TRUE;
+				status |= NXGE_DDI_FAILED;
+			}
+			if ((custom_start_rdc[port] == -1) ||
+				(custom_start_rdc[port] >=
+					NXGE_MAX_RDCS)) {
+				NXGE_DEBUG_MSG((nxgep, CFG_CTL,
+						" %s custom start %d"
+						" out of range %x ",
+						" rxdma-cfg",
+						port,
+						custom_start_rdc[port]));
+				bad_config = B_TRUE;
+				break;
+			}
+			ddi_status = ddi_prop_lookup_int_array(
+				DDI_DEV_T_ANY,
+				s_dip[port],
+				0,
+				num_rdc_prop,
+				&prop_val,
+				&prop_len);
+
+			if (ddi_status == DDI_SUCCESS)
+				custom_num_rdc[port] = *prop_val;
+			else {
+				NXGE_DEBUG_MSG((nxgep, CFG_CTL,
+					" %s custom num port %d"
+					" read failed ",
+					"rxdma-cfg", port));
+				bad_config = B_TRUE;
+				status |= NXGE_DDI_FAILED;
+			}
+
+			if ((custom_num_rdc[port] == -1) ||
+					(custom_num_rdc[port] >
+						NXGE_MAX_RDCS) ||
+					((custom_num_rdc[port] +
+						custom_start_rdc[port]) >
+					NXGE_MAX_RDCS)) {
+				NXGE_DEBUG_MSG((nxgep, CFG_CTL,
+					" %s custom num %d"
+					" out of range %x ",
+					" rxdma-cfg",
+					port, custom_num_rdc[port]));
+				bad_config = B_TRUE;
+				break;
+			}
+			num_rdc += custom_num_rdc[port];
+			if (num_rdc > NXGE_MAX_RDCS) {
+				bad_config = B_TRUE;
+				break;
+			}
+			rdc_bitmap[port] = 0;
+			for (bits = 0;
+				bits < custom_num_rdc[port]; bits++) {
+				rdc_bitmap[port] |=
+					(1 << (bits + custom_start_rdc[port]));
+			}
+		}
+
+		if (bad_config == B_FALSE) {
+			/* check for overlap */
+			for (port = 0; port < num_ports - 1; port++) {
+				for (j = port + 1; j < num_ports; j++) {
+					if (rdc_bitmap[port] &
+						rdc_bitmap[j]) {
+						NXGE_DEBUG_MSG((nxgep,
+							CFG_CTL,
+							" rxdma-cfg"
+							" property custom"
+							" bit overlap"
+							" %d %d ",
+							port, j));
+						bad_config = B_TRUE;
+						break;
+					}
+				}
+				if (bad_config == B_TRUE)
+					break;
+			}
+		}
+		if (bad_config == B_TRUE) {
+			/* use default config */
+			NXGE_DEBUG_MSG((nxgep, CFG_CTL,
+				" rxdma-cfg property:"
+				" bad custom config:"
+				" use default"));
+			for (port = 0; port < num_ports; port++) {
+				custom_num_rdc[port] =
+					(num_ports == 4) ?
+					p4_rx_fair[port] :
+					p2_rx_fair[port];
+				custom_start_rdc[port] = start_rdc;
+				start_rdc += custom_num_rdc[port];
+			}
+		}
+		break;
+
+	default:
+		/* use default config */
+		cfg_prop = "fair";
+		for (port = 0; port < num_ports; port++) {
+			custom_num_rdc[port] = (num_ports == 4) ?
+				p4_rx_fair[port] : p2_rx_fair[port];
+			custom_start_rdc[port] = start_rdc;
+			start_rdc += custom_num_rdc[port];
+		}
+		break;
+	}
+
+	/* Now Update the rx properties */
+	for (port = 0; port < num_ports; port++) {
+		NXGE_DEBUG_MSG((nxgep, CFG_CTL,
+			" update property rxdma-cfg with %s ", cfg_prop));
+		ddi_status = ddi_prop_update_string(DDI_DEV_T_NONE, s_dip[port],
+			"rxdma-cfg", cfg_prop);
+		if (ddi_status != DDI_PROP_SUCCESS) {
+			NXGE_DEBUG_MSG((nxgep, CFG_CTL,
+				" property rxdma-cfg is not updating to %s",
+				cfg_prop));
+			status |= NXGE_DDI_FAILED;
+		}
+		NXGE_DEBUG_MSG((nxgep, CFG_CTL, " update property %s with %d ",
+			num_rdc_prop, custom_num_rdc[port]));
+
+		ddi_status = ddi_prop_update_int(DDI_DEV_T_NONE, s_dip[port],
+			num_rdc_prop, custom_num_rdc[port]);
+
+		if (ddi_status != DDI_PROP_SUCCESS) {
+			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+				" property %s not updating with %d",
+				num_rdc_prop, custom_num_rdc[port]));
+			status |= NXGE_DDI_FAILED;
+		}
+		NXGE_DEBUG_MSG((nxgep, CFG_CTL, " update property %s with %d ",
+			start_rdc_prop, custom_start_rdc[port]));
+		ddi_status = ddi_prop_update_int(DDI_DEV_T_NONE, s_dip[port],
+			start_rdc_prop, custom_start_rdc[port]);
+
+		if (ddi_status != DDI_PROP_SUCCESS) {
+			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+				" property %s not updating with %d ",
+				start_rdc_prop, custom_start_rdc[port]));
+			status |= NXGE_DDI_FAILED;
+		}
+	}
+	if (status & NXGE_DDI_FAILED)
+		status |= NXGE_ERROR;
+	return (status);
+}
+
+static nxge_status_t
+nxge_update_txdma_properties(p_nxge_t nxgep, config_token_t token,
+	dev_info_t *s_dip[])
+{
+	nxge_status_t status = NXGE_OK;
+	int ddi_status = DDI_SUCCESS;
+	int num_ports = nxgep->nports;
+	int port, bits, j;
+	uint8_t start_tdc = 0, num_tdc = 0;
+	p_nxge_param_t param_arr;
+	uint32_t tdc_bitmap[MAX_SIBLINGS];
+	int custom_start_tdc[MAX_SIBLINGS];
+	int custom_num_tdc[MAX_SIBLINGS];
+	uint8_t bad_config = B_FALSE;
+	int *prop_val;
+	uint_t prop_len;
+	char *start_tdc_prop, *num_tdc_prop, *cfg_prop;
+
+	start_tdc = 0;
+	param_arr = nxgep->param_arr;
+	start_tdc_prop = param_arr[param_txdma_channels_begin].fcode_name;
+	num_tdc_prop = param_arr[param_txdma_channels].fcode_name;
+
+	switch (token) {
+	case FAIR:
+		cfg_prop = "fair";
+		for (port = 0; port < num_ports; port++) {
+			custom_num_tdc[port] = (num_ports == 4) ?
+				p4_tx_fair[port] : p2_tx_fair[port];
+			custom_start_tdc[port] = start_tdc;
+			start_tdc += custom_num_tdc[port];
+		}
+		break;
+
+	case EQUAL:
+		cfg_prop = "equal";
+		for (port = 0; port < num_ports; port++) {
+			custom_num_tdc[port] = (num_ports == 4) ?
+				p4_tx_equal[port] : p2_tx_equal[port];
+			custom_start_tdc[port] = start_tdc;
+			start_tdc += custom_num_tdc[port];
+		}
+		break;
+
+	case CUSTOM:
+		cfg_prop = "custom";
+		/* See if it is good config */
+		num_tdc = 0;
+		for (port = 0; port < num_ports; port++) {
+			ddi_status = ddi_prop_lookup_int_array(
+				DDI_DEV_T_ANY, s_dip[port], 0, start_tdc_prop,
+				&prop_val, &prop_len);
+			if (ddi_status == DDI_SUCCESS)
+				custom_start_tdc[port] = *prop_val;
+			else {
+				NXGE_DEBUG_MSG((nxgep, CFG_CTL,
+					" %s custom start port %d"
+					" read failed ", " txdma-cfg", port));
+				bad_config = B_TRUE;
+				status |= NXGE_DDI_FAILED;
+			}
+
+			if ((custom_start_tdc[port] == -1) ||
+					(custom_start_tdc[port] >=
+					NXGE_MAX_RDCS)) {
+				NXGE_DEBUG_MSG((nxgep, CFG_CTL,
+					" %s custom start %d"
+					" out of range %x ", " txdma-cfg",
+					port, custom_start_tdc[port]));
+				bad_config = B_TRUE;
+				break;
+			}
+
+			ddi_status = ddi_prop_lookup_int_array(
+				DDI_DEV_T_ANY, s_dip[port], 0, num_tdc_prop,
+				&prop_val, &prop_len);
+			if (ddi_status == DDI_SUCCESS)
+				custom_num_tdc[port] = *prop_val;
+			else {
+				NXGE_DEBUG_MSG((nxgep, CFG_CTL,
+					" %s custom num port %d"
+					" read failed ", " txdma-cfg", port));
+				bad_config = B_TRUE;
+				status |= NXGE_DDI_FAILED;
+			}
+
+			if ((custom_num_tdc[port] == -1) ||
+					(custom_num_tdc[port] >
+						NXGE_MAX_TDCS) ||
+					((custom_num_tdc[port] +
+						custom_start_tdc[port]) >
+					NXGE_MAX_TDCS)) {
+				NXGE_DEBUG_MSG((nxgep, CFG_CTL,
+					" %s custom num %d"
+					" out of range %x ", " rxdma-cfg",
+					port, custom_num_tdc[port]));
+				bad_config = B_TRUE;
+				break;
+			}
+			num_tdc += custom_num_tdc[port];
+			if (num_tdc > NXGE_MAX_TDCS) {
+				bad_config = B_TRUE;
+				break;
+			}
+			tdc_bitmap[port] = 0;
+			for (bits = 0;
+				bits < custom_num_tdc[port]; bits++) {
+				tdc_bitmap[port] |=
+					(1 <<
+					(bits + custom_start_tdc[port]));
+			}
+
+		}
+
+		if (bad_config == B_FALSE) {
+			/* check for overlap */
+			for (port = 0; port < num_ports - 1; port++) {
+				for (j = port + 1; j < num_ports; j++) {
+					if (tdc_bitmap[port] &
+						tdc_bitmap[j]) {
+						NXGE_DEBUG_MSG((nxgep, CFG_CTL,
+							" rxdma-cfg"
+							" property custom"
+							" bit overlap"
+							" %d %d ",
+							port, j));
+						bad_config = B_TRUE;
+						break;
+					}
+				}
+				if (bad_config == B_TRUE)
+					break;
+			}
+		}
+		if (bad_config == B_TRUE) {
+			/* use default config */
+			NXGE_DEBUG_MSG((nxgep, CFG_CTL,
+				" txdma-cfg property:"
+				" bad custom config:" " use default"));
+
+			for (port = 0; port < num_ports; port++) {
+				custom_num_tdc[port] = (num_ports == 4) ?
+					p4_tx_fair[port] : p2_tx_fair[port];
+				custom_start_tdc[port] = start_tdc;
+				start_tdc += custom_num_tdc[port];
+			}
+		}
+		break;
+
+	default:
+		/* use default config */
+		cfg_prop = "fair";
+		for (port = 0; port < num_ports; port++) {
+			custom_num_tdc[port] = (num_ports == 4) ?
+				p4_tx_fair[port] : p2_tx_fair[port];
+			custom_start_tdc[port] = start_tdc;
+			start_tdc += custom_num_tdc[port];
+		}
+		break;
+	}
+
+	/* Now Update the tx properties */
+	for (port = 0; port < num_ports; port++) {
+		NXGE_DEBUG_MSG((nxgep, CFG_CTL,
+			" update property txdma-cfg with %s ", cfg_prop));
+		ddi_status = ddi_prop_update_string(DDI_DEV_T_NONE, s_dip[port],
+			"txdma-cfg", cfg_prop);
+		if (ddi_status != DDI_PROP_SUCCESS) {
+			NXGE_DEBUG_MSG((nxgep, CFG_CTL,
+				" property txdma-cfg is not updating to %s",
+				cfg_prop));
+			status |= NXGE_DDI_FAILED;
+		}
+		NXGE_DEBUG_MSG((nxgep, CFG_CTL, " update property %s with %d ",
+			num_tdc_prop, custom_num_tdc[port]));
+
+		ddi_status = ddi_prop_update_int(DDI_DEV_T_NONE, s_dip[port],
+			num_tdc_prop, custom_num_tdc[port]);
+
+		if (ddi_status != DDI_PROP_SUCCESS) {
+			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+				" property %s not updating with %d",
+				num_tdc_prop,
+				custom_num_tdc[port]));
+			status |= NXGE_DDI_FAILED;
+		}
+
+		NXGE_DEBUG_MSG((nxgep, CFG_CTL, " update property %s with %d ",
+			start_tdc_prop, custom_start_tdc[port]));
+
+		ddi_status = ddi_prop_update_int(DDI_DEV_T_NONE, s_dip[port],
+			start_tdc_prop, custom_start_tdc[port]);
+		if (ddi_status != DDI_PROP_SUCCESS) {
+			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+				" property %s not updating with %d ",
+				start_tdc_prop, custom_start_tdc[port]));
+			status |= NXGE_DDI_FAILED;
+		}
+	}
+	if (status & NXGE_DDI_FAILED)
+		status |= NXGE_ERROR;
+	return (status);
+}
+
+static nxge_status_t
+nxge_update_cfg_properties(p_nxge_t nxgep, uint32_t flags,
+	config_token_t token, dev_info_t *s_dip[])
+{
+	nxge_status_t status = NXGE_OK;
+
+	switch (flags) {
+	case COMMON_TXDMA_CFG:
+		if (nxge_dma_obp_props_only == 0)
+			status = nxge_update_txdma_properties(nxgep,
+				token, s_dip);
+		break;
+	case COMMON_RXDMA_CFG:
+		if (nxge_dma_obp_props_only == 0)
+			status = nxge_update_rxdma_properties(nxgep,
+				token, s_dip);
+
+		break;
+	case COMMON_RXDMA_GRP_CFG:
+		status = nxge_update_rxdma_grp_properties(nxgep,
+			token, s_dip);
+		break;
+	default:
+		return (NXGE_ERROR);
+	}
+	return (status);
+}
+
+/*
+ * verify consistence.
+ * (May require publishing the properties on all the ports.
+ *
+ * What if properties are published on function 0 device only?
+ *
+ *
+ * rxdma-cfg, txdma-cfg, rxdma-grp-cfg (required )
+ * What about class configs?
+ *
+ * If consistent, update the property on all the siblings.
+ * set  a flag on hardware shared register
+ * The rest of the siblings will check the flag
+ * if the flag is set, they will use the updated property
+ * without doing any validation.
+ */
+
+nxge_status_t
+nxge_cfg_verify_set_classify_prop(p_nxge_t nxgep, char *prop,
+	uint64_t known_cfg, uint32_t override, dev_info_t *c_dip[])
+{
+	nxge_status_t status = NXGE_OK;
+	int ddi_status = DDI_SUCCESS;
+	int i = 0, found = 0, update_prop = B_TRUE;
+	int *cfg_val;
+	uint_t new_value, cfg_value[MAX_SIBLINGS];
+	uint_t prop_len;
+	uint_t known_cfg_value;
+
+	known_cfg_value = (uint_t)known_cfg;
+
+	if (override == B_TRUE) {
+		new_value = known_cfg_value;
+		for (i = 0; i < nxgep->nports; i++) {
+			ddi_status = ddi_prop_update_int(DDI_DEV_T_NONE,
+				c_dip[i], prop, new_value);
+#ifdef NXGE_DEBUG_ERROR
+			if (ddi_status != DDI_PROP_SUCCESS)
+				NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+					" property %s failed update ", prop));
+#endif
+		}
+		if (ddi_status != DDI_PROP_SUCCESS)
+			return (NXGE_ERROR | NXGE_DDI_FAILED);
+	}
+	for (i = 0; i < nxgep->nports; i++) {
+		cfg_value[i] = known_cfg_value;
+		if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, c_dip[i], 0,
+				prop, &cfg_val,
+				&prop_len) == DDI_PROP_SUCCESS) {
+			cfg_value[i] = *cfg_val;
+			ddi_prop_free(cfg_val);
+			found++;
+		}
+	}
+
+	if (found != i) {
+		NXGE_DEBUG_MSG((nxgep, CFG_CTL,
+			" property %s not specified on all ports", prop));
+		if (found == 0) {
+			/* not specified: Use default */
+			NXGE_DEBUG_MSG((nxgep, CFG_CTL,
+				" property %s not specified on any port:"
+				" Using default", prop));
+			new_value = known_cfg_value;
+		} else {
+			/* specified on some */
+			NXGE_DEBUG_MSG((nxgep, CFG_CTL,
+				" property %s not specified"
+				" on some ports: Using default", prop));
+			/* ? use p0 value instead ? */
+			new_value = known_cfg_value;
+		}
+	} else {
+		/* check type and consistence */
+		/* found on all devices */
+		for (i = 1; i < found; i++) {
+			if (cfg_value[i] != cfg_value[i - 1]) {
+				NXGE_DEBUG_MSG((nxgep, CFG_CTL,
+					" property %s inconsistent:"
+					" Using default", prop));
+				new_value = known_cfg_value;
+				break;
+			}
+			/*
+			 * Found on all the ports and consistent. Nothing to
+			 * do.
+			 */
+			update_prop = B_FALSE;
+		}
+	}
+
+	if (update_prop == B_TRUE) {
+		for (i = 0; i < nxgep->nports; i++) {
+			ddi_status = ddi_prop_update_int(DDI_DEV_T_NONE,
+				c_dip[i], prop, new_value);
+#ifdef NXGE_DEBUG_ERROR
+			if (ddi_status != DDI_SUCCESS)
+				NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+					" property %s not updating with %d"
+					" Using default",
+					prop, new_value));
+#endif
+			if (ddi_status != DDI_PROP_SUCCESS)
+				status |= NXGE_DDI_FAILED;
+		}
+	}
+	if (status & NXGE_DDI_FAILED)
+		status |= NXGE_ERROR;
+
+	return (status);
+}
+
+static uint64_t
+nxge_class_get_known_cfg(p_nxge_t nxgep, int class_prop, int rx_quick_cfg)
+{
+	int start_prop;
+	uint64_t cfg_value;
+	p_nxge_param_t param_arr;
+
+	param_arr = nxgep->param_arr;
+	cfg_value = param_arr[class_prop].value;
+	start_prop = param_h1_init_value;
+
+	/* update the properties per quick config */
+	switch (rx_quick_cfg) {
+	case CFG_L3_WEB:
+	case CFG_L3_DISTRIBUTE:
+		cfg_value = nxge_classify_get_cfg_value(nxgep,
+			rx_quick_cfg, class_prop - start_prop);
+		break;
+	default:
+		cfg_value = param_arr[class_prop].value;
+		break;
+	}
+	return (cfg_value);
+}
+
+static nxge_status_t
+nxge_cfg_verify_set_classify(p_nxge_t nxgep, dev_info_t *c_dip[])
+{
+	nxge_status_t status = NXGE_OK;
+	int rx_quick_cfg, class_prop, start_prop, end_prop;
+	char *prop_name;
+	int override = B_TRUE;
+	uint64_t cfg_value;
+	p_nxge_param_t param_arr;
+
+	param_arr = nxgep->param_arr;
+	rx_quick_cfg = param_arr[param_rx_quick_cfg].value;
+	start_prop = param_h1_init_value;
+	end_prop = param_class_opt_ipv6_sctp;
+
+	/* update the properties per quick config */
+	if (rx_quick_cfg == CFG_NOT_SPECIFIED)
+		override = B_FALSE;
+
+	/*
+	 * these parameter affect the classification outcome.
+	 * these parameters are used to configure the Flow key and
+	 * the TCAM key for each of the IP classes.
+	 * Included here are also the H1 and H2 initial values
+	 * which affect the distribution as well as final hash value
+	 * (hence the offset into RDC table and FCRAM bucket location)
+	 *
+	 */
+	for (class_prop = start_prop; class_prop <= end_prop; class_prop++) {
+		prop_name = param_arr[class_prop].fcode_name;
+		cfg_value = nxge_class_get_known_cfg(nxgep,
+			class_prop, rx_quick_cfg);
+		status = nxge_cfg_verify_set_classify_prop(nxgep, prop_name,
+			cfg_value, override, c_dip);
+	}
+
+	/*
+	 * these properties do not affect the actual classification outcome.
+	 * used to enable/disable or tune the fflp hardware
+	 *
+	 * fcram_access_ratio, tcam_access_ratio, tcam_enable, llc_snap_enable
+	 *
+	 */
+	override = B_FALSE;
+	for (class_prop = param_fcram_access_ratio;
+			class_prop <= param_llc_snap_enable; class_prop++) {
+		prop_name = param_arr[class_prop].fcode_name;
+		cfg_value = param_arr[class_prop].value;
+		status = nxge_cfg_verify_set_classify_prop(nxgep, prop_name,
+			cfg_value, override, c_dip);
+	}
+
+	return (status);
+}
+
+nxge_status_t
+nxge_cfg_verify_set(p_nxge_t nxgep, uint32_t flag)
+{
+	nxge_status_t status = NXGE_OK;
+	int i = 0, found = 0;
+	int num_siblings;
+	dev_info_t *c_dip[MAX_SIBLINGS + 1];
+	char *prop_val[MAX_SIBLINGS];
+	config_token_t c_token[MAX_SIBLINGS];
+	char *prop;
+
+	if (nxge_dma_obp_props_only)
+		return (NXGE_OK);
+
+	num_siblings = 0;
+	c_dip[num_siblings] = ddi_get_child(nxgep->p_dip);
+	while (c_dip[num_siblings]) {
+		c_dip[num_siblings + 1] =
+			ddi_get_next_sibling(c_dip[num_siblings]);
+		num_siblings++;
+	}
+
+	switch (flag) {
+	case COMMON_TXDMA_CFG:
+		prop = "txdma-cfg";
+		break;
+	case COMMON_RXDMA_CFG:
+		prop = "rxdma-cfg";
+		break;
+	case COMMON_RXDMA_GRP_CFG:
+		prop = "rxdma-grp-cfg";
+		break;
+	case COMMON_CLASS_CFG:
+		status = nxge_cfg_verify_set_classify(nxgep, c_dip);
+		return (status);
+	default:
+		return (NXGE_ERROR);
+	}
+
+	i = 0;
+	while (i < num_siblings) {
+		if (ddi_prop_lookup_string(DDI_DEV_T_ANY, c_dip[i], 0, prop,
+				(char **)&prop_val[i]) == DDI_PROP_SUCCESS) {
+			c_token[i] = nxge_get_config_token(prop_val[i]);
+			ddi_prop_free(prop_val[i]);
+			found++;
+		} else
+			c_token[i] = CONFIG_TOKEN_NONE;
+		i++;
+	}
+
+	if (found != i) {
+		if (found == 0) {
+			/* not specified: Use default */
+			NXGE_DEBUG_MSG((nxgep, CFG_CTL,
+				" property %s not specified on any port:"
+					" Using default", prop));
+
+			status = nxge_update_cfg_properties(nxgep,
+				flag, FAIR, c_dip);
+			return (status);
+		} else {
+			/*
+			 * if  the convention is to use function 0 device then
+			 * populate the other devices with this configuration.
+			 *
+			 * The other alternative is to use the default config.
+			 */
+			/* not specified: Use default */
+			NXGE_DEBUG_MSG((nxgep, CFG_CTL,
+				" property %s not specified on some ports:"
+				" Using default", prop));
+			status = nxge_update_cfg_properties(nxgep,
+				flag, FAIR, c_dip);
+			return (status);
+		}
+	}
+
+	/* check type and consistence */
+	/* found on all devices */
+	for (i = 1; i < found; i++) {
+		if (c_token[i] != c_token[i - 1]) {
+			NXGE_DEBUG_MSG((nxgep, CFG_CTL,
+				" property %s inconsistent:"
+				" Using default", prop));
+			status = nxge_update_cfg_properties(nxgep,
+				flag, FAIR, c_dip);
+			return (status);
+		}
+	}
+
+	/*
+	 * Found on all the ports check if it is custom configuration. if
+	 * custom, then verify consistence
+	 *
+	 * finally create soft properties
+	 */
+	status = nxge_update_cfg_properties(nxgep, flag, c_token[0], c_dip);
+	return (status);
+}
+
+nxge_status_t
+nxge_cfg_verify_set_quick_config(p_nxge_t nxgep)
+{
+	nxge_status_t status = NXGE_OK;
+	int ddi_status = DDI_SUCCESS;
+	char *prop_val;
+	char *rx_prop;
+	char *prop;
+	uint32_t cfg_value = CFG_NOT_SPECIFIED;
+	p_nxge_param_t param_arr;
+
+	param_arr = nxgep->param_arr;
+	rx_prop = param_arr[param_rx_quick_cfg].fcode_name;
+
+	prop = "rx-quick-cfg";
+
+	/*
+	 * good value are
+	 *
+	 * "web-server" "generic-server" "l3-classify" "flow-classify"
+	 */
+	if (ddi_prop_lookup_string(DDI_DEV_T_ANY, nxgep->dip, 0,
+			prop, (char **)&prop_val) != DDI_PROP_SUCCESS) {
+		NXGE_DEBUG_MSG((nxgep, VPD_CTL,
+			" property %s not specified: using default ", prop));
+		cfg_value = CFG_NOT_SPECIFIED;
+	} else {
+		cfg_value = CFG_L3_DISTRIBUTE;
+		if (strncmp("web-server", (caddr_t)prop_val, 8) == 0) {
+			cfg_value = CFG_L3_WEB;
+			NXGE_DEBUG_MSG((nxgep, CFG_CTL,
+				" %s: web server ", prop));
+		}
+		if (strncmp("generic-server", (caddr_t)prop_val, 8) == 0) {
+			cfg_value = CFG_L3_DISTRIBUTE;
+			NXGE_DEBUG_MSG((nxgep, CFG_CTL,
+				" %s: distribute ", prop));
+		}
+		/* more */
+		ddi_prop_free(prop_val);
+	}
+
+	ddi_status = ddi_prop_update_int(DDI_DEV_T_NONE, nxgep->dip,
+		rx_prop, cfg_value);
+	if (ddi_status != DDI_PROP_SUCCESS)
+		status |= NXGE_DDI_FAILED;
+
+	/* now handle specified cases: */
+	if (status & NXGE_DDI_FAILED)
+		status |= NXGE_ERROR;
+	return (status);
+}
+
+static void
+nxge_use_cfg_link_cfg(p_nxge_t nxgep)
+{
+	int *prop_val;
+	uint_t prop_len;
+	dev_info_t *dip;
+	int speed;
+	int duplex;
+	int adv_autoneg_cap;
+	int adv_10gfdx_cap;
+	int adv_10ghdx_cap;
+	int adv_1000fdx_cap;
+	int adv_1000hdx_cap;
+	int adv_100fdx_cap;
+	int adv_100hdx_cap;
+	int adv_10fdx_cap;
+	int adv_10hdx_cap;
+	int status = DDI_SUCCESS;
+
+	dip = nxgep->dip;
+
+	/*
+	 * first find out the card type and the supported link speeds and
+	 * features
+	 */
+	/* add code for card type */
+	if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, 0, "adv-autoneg-cap",
+			&prop_val, &prop_len) == DDI_PROP_SUCCESS) {
+		ddi_prop_free(prop_val);
+		return;
+	}
+
+	if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, 0, "adv-10gfdx-cap",
+			&prop_val, &prop_len) == DDI_PROP_SUCCESS) {
+		ddi_prop_free(prop_val);
+		return;
+	}
+
+	if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, 0, "adv-1000hdx-cap",
+			&prop_val, &prop_len) == DDI_PROP_SUCCESS) {
+		ddi_prop_free(prop_val);
+		return;
+	}
+
+	if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, 0, "adv-1000fdx-cap",
+			&prop_val, &prop_len) == DDI_PROP_SUCCESS) {
+		ddi_prop_free(prop_val);
+		return;
+	}
+
+	if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, 0, "adv-100fdx-cap",
+			&prop_val, &prop_len) == DDI_PROP_SUCCESS) {
+		ddi_prop_free(prop_val);
+		return;
+	}
+
+	if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, 0, "adv-100hdx-cap",
+			&prop_val, &prop_len) == DDI_PROP_SUCCESS) {
+		ddi_prop_free(prop_val);
+		return;
+	}
+
+	if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, 0, "adv-10fdx-cap",
+			&prop_val, &prop_len) == DDI_PROP_SUCCESS) {
+		ddi_prop_free(prop_val);
+		return;
+	}
+
+	if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, 0, "adv-10hdx-cap",
+			&prop_val, &prop_len) == DDI_PROP_SUCCESS) {
+		ddi_prop_free(prop_val);
+		return;
+	}
+
+	if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, dip, 0, "speed",
+			(uchar_t **)&prop_val, &prop_len) == DDI_PROP_SUCCESS) {
+		if (strncmp("10000", (caddr_t)prop_val,
+				(size_t)prop_len) == 0) {
+			speed = 10000;
+		} else if (strncmp("1000", (caddr_t)prop_val,
+				(size_t)prop_len) == 0) {
+			speed = 1000;
+		} else if (strncmp("100", (caddr_t)prop_val,
+				(size_t)prop_len) == 0) {
+			speed = 100;
+		} else if (strncmp("10", (caddr_t)prop_val,
+				(size_t)prop_len) == 0) {
+			speed = 10;
+		} else if (strncmp("auto", (caddr_t)prop_val,
+				(size_t)prop_len) == 0) {
+			speed = 0;
+		} else {
+			NXGE_ERROR_MSG((nxgep, NXGE_NOTE,
+				"speed property is invalid reverting to auto"));
+			speed = 0;
+		}
+		ddi_prop_free(prop_val);
+	} else
+		speed = 0;
+
+	if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, dip, 0, "duplex",
+			(uchar_t **)&prop_val, &prop_len) == DDI_PROP_SUCCESS) {
+		if (strncmp("full", (caddr_t)prop_val,
+				(size_t)prop_len) == 0) {
+			duplex = 2;
+		} else if (strncmp("half", (caddr_t)prop_val,
+				(size_t)prop_len) == 0) {
+			duplex = 1;
+		} else if (strncmp("auto", (caddr_t)prop_val,
+				(size_t)prop_len) == 0) {
+			duplex = 0;
+		} else {
+			NXGE_ERROR_MSG((nxgep, NXGE_NOTE,
+				"duplex property is invalid"
+				" reverting to auto"));
+			duplex = 0;
+		}
+		ddi_prop_free(prop_val);
+	} else
+		duplex = 0;
+
+	adv_autoneg_cap = (speed == 0) || (duplex == 0);
+	if (adv_autoneg_cap == 0) {
+		adv_10gfdx_cap = ((speed == 10000) && (duplex == 2));
+		adv_10ghdx_cap = adv_10gfdx_cap;
+		adv_10ghdx_cap |= ((speed == 10000) && (duplex == 1));
+		adv_1000fdx_cap = adv_10ghdx_cap;
+		adv_1000fdx_cap |= ((speed == 1000) && (duplex == 2));
+		adv_1000hdx_cap = adv_1000fdx_cap;
+		adv_1000hdx_cap |= ((speed == 1000) && (duplex == 1));
+		adv_100fdx_cap = adv_1000hdx_cap;
+		adv_100fdx_cap |= ((speed == 100) && (duplex == 2));
+		adv_100hdx_cap = adv_100fdx_cap;
+		adv_100hdx_cap |= ((speed == 100) && (duplex == 1));
+		adv_10fdx_cap = adv_100hdx_cap;
+		adv_10fdx_cap |= ((speed == 10) && (duplex == 2));
+		adv_10hdx_cap = adv_10fdx_cap;
+		adv_10hdx_cap |= ((speed == 10) && (duplex == 1));
+	} else if (speed == 0) {
+		adv_10gfdx_cap = (duplex == 2);
+		adv_10ghdx_cap = (duplex == 1);
+		adv_1000fdx_cap = (duplex == 2);
+		adv_1000hdx_cap = (duplex == 1);
+		adv_100fdx_cap = (duplex == 2);
+		adv_100hdx_cap = (duplex == 1);
+		adv_10fdx_cap = (duplex == 2);
+		adv_10hdx_cap = (duplex == 1);
+	}
+	if (duplex == 0) {
+		adv_10gfdx_cap = (speed == 0);
+		adv_10gfdx_cap |= (speed == 10000);
+		adv_10ghdx_cap = adv_10gfdx_cap;
+		adv_10ghdx_cap |= (speed == 10000);
+		adv_1000fdx_cap = adv_10ghdx_cap;
+		adv_1000fdx_cap |= (speed == 1000);
+		adv_1000hdx_cap = adv_1000fdx_cap;
+		adv_1000hdx_cap |= (speed == 1000);
+		adv_100fdx_cap = adv_1000hdx_cap;
+		adv_100fdx_cap |= (speed == 100);
+		adv_100hdx_cap = adv_100fdx_cap;
+		adv_100hdx_cap |= (speed == 100);
+		adv_10fdx_cap = adv_100hdx_cap;
+		adv_10fdx_cap |= (speed == 10);
+		adv_10hdx_cap = adv_10fdx_cap;
+		adv_10hdx_cap |= (speed == 10);
+	}
+	status = ddi_prop_update_int_array(DDI_DEV_T_NONE, dip,
+		"adv-autoneg-cap", &adv_autoneg_cap, 1);
+	if (status)
+		return;
+
+	status = ddi_prop_update_int_array(DDI_DEV_T_NONE, dip,
+		"adv-10gfdx-cap", &adv_10gfdx_cap, 1);
+	if (status)
+		goto nxge_map_myargs_to_gmii_fail1;
+
+	status = ddi_prop_update_int_array(DDI_DEV_T_NONE, dip,
+		"adv-10ghdx-cap", &adv_10ghdx_cap, 1);
+	if (status)
+		goto nxge_map_myargs_to_gmii_fail2;
+
+	status = ddi_prop_update_int_array(DDI_DEV_T_NONE, dip,
+		"adv-1000fdx-cap", &adv_1000fdx_cap, 1);
+	if (status)
+		goto nxge_map_myargs_to_gmii_fail3;
+
+	status = ddi_prop_update_int_array(DDI_DEV_T_NONE, dip,
+		"adv-1000hdx-cap", &adv_1000hdx_cap, 1);
+	if (status)
+		goto nxge_map_myargs_to_gmii_fail4;
+
+	status = ddi_prop_update_int_array(DDI_DEV_T_NONE, dip,
+		"adv-100fdx-cap", &adv_100fdx_cap, 1);
+	if (status)
+		goto nxge_map_myargs_to_gmii_fail5;
+
+	status = ddi_prop_update_int_array(DDI_DEV_T_NONE, dip,
+		"adv-100hdx-cap", &adv_100hdx_cap, 1);
+	if (status)
+		goto nxge_map_myargs_to_gmii_fail6;
+
+	status = ddi_prop_update_int_array(DDI_DEV_T_NONE, dip,
+		"adv-10fdx-cap", &adv_10fdx_cap, 1);
+	if (status)
+		goto nxge_map_myargs_to_gmii_fail7;
+
+	status = ddi_prop_update_int_array(DDI_DEV_T_NONE, dip,
+		"adv-10hdx-cap", &adv_10hdx_cap, 1);
+	if (status)
+		goto nxge_map_myargs_to_gmii_fail8;
+
+	return;
+
+nxge_map_myargs_to_gmii_fail9:
+	(void) ddi_prop_remove(DDI_DEV_T_NONE, dip, "adv-10hdx-cap");
+
+nxge_map_myargs_to_gmii_fail8:
+	(void) ddi_prop_remove(DDI_DEV_T_NONE, dip, "adv-10fdx-cap");
+
+nxge_map_myargs_to_gmii_fail7:
+	(void) ddi_prop_remove(DDI_DEV_T_NONE, dip, "adv-100hdx-cap");
+
+nxge_map_myargs_to_gmii_fail6:
+	(void) ddi_prop_remove(DDI_DEV_T_NONE, dip, "adv-100fdx-cap");
+
+nxge_map_myargs_to_gmii_fail5:
+	(void) ddi_prop_remove(DDI_DEV_T_NONE, dip, "adv-1000hdx-cap");
+
+nxge_map_myargs_to_gmii_fail4:
+	(void) ddi_prop_remove(DDI_DEV_T_NONE, dip, "adv-1000fdx-cap");
+
+nxge_map_myargs_to_gmii_fail3:
+	(void) ddi_prop_remove(DDI_DEV_T_NONE, dip, "adv-10ghdx-cap");
+
+nxge_map_myargs_to_gmii_fail2:
+	(void) ddi_prop_remove(DDI_DEV_T_NONE, dip, "adv-10gfdx-cap");
+
+nxge_map_myargs_to_gmii_fail1:
+	(void) ddi_prop_remove(DDI_DEV_T_NONE, dip, "adv-autoneg-cap");
+}
+
+nxge_status_t
+nxge_get_config_properties(p_nxge_t nxgep)
+{
+	nxge_status_t status = NXGE_OK;
+	p_nxge_hw_list_t hw_p;
+	uint_t prop_len;
+	uchar_t *prop_val8;
+
+	NXGE_DEBUG_MSG((nxgep, VPD_CTL, " ==> nxge_get_config_properties"));
+
+	if ((hw_p = nxgep->nxge_hw_p) == NULL) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			" nxge_get_config_properties:"
+			" common hardware not set", nxgep->niu_type));
+		return (NXGE_ERROR);
+	}
+
+	/*
+	 * Get info on how many ports Neptune card has.
+	 */
+	switch (nxgep->niu_type) {
+	case N2_NIU:
+		nxgep->nports = 2;
+		nxgep->classifier.tcam_size = TCAM_NIU_TCAM_MAX_ENTRY;
+		if (nxgep->function_num > 1) {
+			return (NXGE_ERROR);
+		}
+		break;
+	case NEPTUNE_2:
+		if (nxgep->function_num > 1)
+			return (NXGE_ERROR);
+
+		/* Set Board Version Number */
+		nxgep->board_ver = 0;
+		if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY,
+				nxgep->dip, 0, "board-model", &prop_val8,
+				&prop_len) == DDI_PROP_SUCCESS) {
+			if (prop_len > 9) {
+				if ((prop_val8[9] == '0') &&
+						(prop_val8[10] == '4'))
+					nxgep->board_ver = 4;
+			}
+			ddi_prop_free(prop_val8);
+		}
+		status = nxge_espc_num_ports_get(nxgep);
+		if (status != NXGE_OK)
+			return (NXGE_ERROR);
+
+		nxgep->classifier.tcam_size = TCAM_NXGE_TCAM_MAX_ENTRY;
+		break;
+
+	case NEPTUNE:
+	default:
+		status = nxge_espc_num_ports_get(nxgep);
+		if (status != NXGE_OK)
+			return (NXGE_ERROR);
+		nxgep->classifier.tcam_size = TCAM_NXGE_TCAM_MAX_ENTRY;
+		break;
+	}
+
+	status = nxge_get_mac_addr_properties(nxgep);
+	if (status != NXGE_OK)
+		return (NXGE_ERROR);
+
+	/*
+	 * read the configuration type. If none is specified, used default.
+	 * Config types: equal: (default) DMA channels, RDC groups, TCAM, FCRAM
+	 * are shared equally across all the ports.
+	 *
+	 * Fair: DMA channels, RDC groups, TCAM, FCRAM are shared proportional
+	 * to the port speed.
+	 *
+	 *
+	 * custom: DMA channels, RDC groups, TCAM, FCRAM partition is
+	 * specified in nxge.conf. Need to read each parameter and set
+	 * up the parameters in nxge structures.
+	 *
+	 */
+	switch (nxgep->niu_type) {
+	case N2_NIU:
+		NXGE_DEBUG_MSG((nxgep, VPD_CTL,
+			" ==> nxge_get_config_properties: N2"));
+		MUTEX_ENTER(&hw_p->nxge_cfg_lock);
+		if ((hw_p->flags & COMMON_CFG_VALID) !=
+			COMMON_CFG_VALID) {
+			status = nxge_cfg_verify_set(nxgep,
+				COMMON_RXDMA_GRP_CFG);
+			status = nxge_cfg_verify_set(nxgep,
+				COMMON_CLASS_CFG);
+			hw_p->flags |= COMMON_CFG_VALID;
+		}
+		MUTEX_EXIT(&hw_p->nxge_cfg_lock);
+		status = nxge_use_cfg_n2niu_properties(nxgep);
+		break;
+
+	case NEPTUNE:
+		NXGE_DEBUG_MSG((nxgep, VPD_CTL,
+			" ==> nxge_get_config_properties: Neptune"));
+		status = nxge_cfg_verify_set_quick_config(nxgep);
+		MUTEX_ENTER(&hw_p->nxge_cfg_lock);
+		if ((hw_p->flags & COMMON_CFG_VALID) !=
+			COMMON_CFG_VALID) {
+			status = nxge_cfg_verify_set(nxgep,
+				COMMON_TXDMA_CFG);
+			status = nxge_cfg_verify_set(nxgep,
+				COMMON_RXDMA_CFG);
+			status = nxge_cfg_verify_set(nxgep,
+				COMMON_RXDMA_GRP_CFG);
+			status = nxge_cfg_verify_set(nxgep,
+				COMMON_CLASS_CFG);
+			hw_p->flags |= COMMON_CFG_VALID;
+		}
+		MUTEX_EXIT(&hw_p->nxge_cfg_lock);
+		nxge_use_cfg_neptune_properties(nxgep);
+		status = NXGE_OK;
+		break;
+
+	case NEPTUNE_2:
+		NXGE_DEBUG_MSG((nxgep, VPD_CTL,
+			" ==> nxge_get_config_properties: Neptune-2"));
+		if (nxgep->function_num > 1)
+			return (NXGE_ERROR);
+		status = nxge_cfg_verify_set_quick_config(nxgep);
+		MUTEX_ENTER(&hw_p->nxge_cfg_lock);
+
+		if ((hw_p->flags & COMMON_CFG_VALID) !=
+			COMMON_CFG_VALID) {
+			status = nxge_cfg_verify_set(nxgep,
+				COMMON_TXDMA_CFG);
+			status = nxge_cfg_verify_set(nxgep,
+				COMMON_RXDMA_CFG);
+			status = nxge_cfg_verify_set(nxgep,
+				COMMON_RXDMA_GRP_CFG);
+			status = nxge_cfg_verify_set(nxgep,
+				COMMON_CLASS_CFG);
+			hw_p->flags |= COMMON_CFG_VALID;
+		}
+		MUTEX_EXIT(&hw_p->nxge_cfg_lock);
+
+		nxge_use_cfg_neptune_properties(nxgep);
+		status = NXGE_OK;
+		break;
+
+	default:
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			" nxge_get_config_properties:"
+			" unknown NIU type %x", nxgep->niu_type));
+		return (NXGE_ERROR);
+	}
+
+	NXGE_DEBUG_MSG((nxgep, VPD_CTL, " <== nxge_get_config_properties"));
+	return (status);
+}
+
+static nxge_status_t
+nxge_use_cfg_n2niu_properties(p_nxge_t nxgep)
+{
+	nxge_status_t status = NXGE_OK;
+
+	NXGE_DEBUG_MSG((nxgep, CFG_CTL, " ==> nxge_use_cfg_n2niu_properties"));
+
+	status = nxge_use_default_dma_config_n2(nxgep);
+	if (status != NXGE_OK) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			" ==> nxge_use_cfg_n2niu_properties (err 0x%x)",
+			status));
+		return (status | NXGE_ERROR);
+	}
+
+	(void) nxge_use_cfg_vlan_class_config(nxgep);
+	(void) nxge_use_cfg_mac_class_config(nxgep);
+	(void) nxge_use_cfg_class_config(nxgep);
+	(void) nxge_use_cfg_link_cfg(nxgep);
+
+	/*
+	 * Read in the hardware (fcode) properties. Use the ndd array to read
+	 * each property.
+	 */
+	(void) nxge_get_param_soft_properties(nxgep);
+	NXGE_DEBUG_MSG((nxgep, CFG_CTL, " <== nxge_use_cfg_n2niu_properties"));
+
+	return (status);
+}
+
+static void
+nxge_use_cfg_neptune_properties(p_nxge_t nxgep)
+{
+	NXGE_DEBUG_MSG((nxgep, CFG_CTL, "==> nxge_use_cfg_neptune_properties"));
+
+	(void) nxge_use_cfg_dma_config(nxgep);
+	(void) nxge_use_cfg_vlan_class_config(nxgep);
+	(void) nxge_use_cfg_mac_class_config(nxgep);
+	(void) nxge_use_cfg_class_config(nxgep);
+	(void) nxge_use_cfg_link_cfg(nxgep);
+
+	/*
+	 * Read in the hardware (fcode) properties. Use the ndd array to read
+	 * each property.
+	 */
+	(void) nxge_get_param_soft_properties(nxgep);
+	NXGE_DEBUG_MSG((nxgep, CFG_CTL, "<== nxge_use_cfg_neptune_properties"));
+}
+
+/*
+ * FWARC 2006/556
+ */
+
+static nxge_status_t
+nxge_use_default_dma_config_n2(p_nxge_t nxgep)
+{
+	int ndmas;
+	int nrxgp;
+	uint8_t func;
+	p_nxge_dma_pt_cfg_t p_dma_cfgp;
+	p_nxge_hw_pt_cfg_t p_cfgp;
+	int *prop_val;
+	uint_t prop_len;
+	int i;
+	nxge_status_t status = NXGE_OK;
+
+	NXGE_DEBUG_MSG((nxgep, OBP_CTL, "==> nxge_use_default_dma_config_n2"));
+
+	p_dma_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
+	p_cfgp = (p_nxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config;
+
+	func = nxgep->function_num;
+	p_cfgp->function_number = func;
+	ndmas = NXGE_TDMA_PER_NIU_PORT;
+	if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, nxgep->dip, 0,
+			"tx-dma-channels", (int **)&prop_val,
+			&prop_len) == DDI_PROP_SUCCESS) {
+		p_cfgp->start_tdc = prop_val[0];
+		NXGE_DEBUG_MSG((nxgep, OBP_CTL,
+			"==> nxge_use_default_dma_config_n2: tdc starts %d "
+			"(#%d)", p_cfgp->start_tdc, prop_len));
+
+		ndmas = prop_val[1];
+		NXGE_DEBUG_MSG((nxgep, OBP_CTL,
+			"==> nxge_use_default_dma_config_n2: #tdc %d (#%d)",
+			ndmas, prop_len));
+		ddi_prop_free(prop_val);
+	} else {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"==> nxge_use_default_dma_config_n2: "
+			"get tx-dma-channels failed"));
+		return (NXGE_DDI_FAILED);
+	}
+
+	p_cfgp->max_tdcs = nxgep->max_tdcs = ndmas;
+	nxgep->tdc_mask = (ndmas - 1);
+
+	NXGE_DEBUG_MSG((nxgep, OBP_CTL, "==> nxge_use_default_dma_config_n2: "
+		"p_cfgp 0x%llx max_tdcs %d nxgep->max_tdcs %d start %d",
+		p_cfgp, p_cfgp->max_tdcs, nxgep->max_tdcs, p_cfgp->start_tdc));
+
+	/* Receive DMA */
+	ndmas = NXGE_RDMA_PER_NIU_PORT;
+	if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, nxgep->dip, 0,
+			"rx-dma-channels", (int **)&prop_val,
+			&prop_len) == DDI_PROP_SUCCESS) {
+		p_cfgp->start_rdc = prop_val[0];
+		NXGE_DEBUG_MSG((nxgep, OBP_CTL,
+			"==> nxge_use_default_dma_config_n2(obp): rdc start %d"
+			" (#%d)", p_cfgp->start_rdc, prop_len));
+		ndmas = prop_val[1];
+		NXGE_DEBUG_MSG((nxgep, OBP_CTL,
+			"==> nxge_use_default_dma_config_n2(obp):#rdc %d (#%d)",
+			ndmas, prop_len));
+		ddi_prop_free(prop_val);
+	} else {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"==> nxge_use_default_dma_config_n2: "
+			"get rx-dma-channel failed"));
+		return (NXGE_DDI_FAILED);
+	}
+
+	p_cfgp->max_rdcs = nxgep->max_rdcs = ndmas;
+	nxgep->rdc_mask = (ndmas - 1);
+
+	/* Hypervisor: rdc # and group # use the same # !! */
+	p_cfgp->max_grpids = p_cfgp->max_rdcs + p_cfgp->max_tdcs;
+	p_cfgp->start_grpid = 0;
+	p_cfgp->mif_ldvid = p_cfgp->mac_ldvid = p_cfgp->ser_ldvid = 0;
+
+	if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, nxgep->dip, 0,
+			"interrupts", (int **)&prop_val,
+			&prop_len) == DDI_PROP_SUCCESS) {
+		/*
+		 * For each device assigned, the content of each interrupts
+		 * property is its logical device group.
+		 *
+		 * Assignment of interrupts property is in the the following
+		 * order:
+		 *
+		 * MAC MIF (if configured) SYSTEM ERROR (if configured) first
+		 * receive channel next channel...... last receive channel
+		 * first transmit channel next channel...... last transmit
+		 * channel
+		 *
+		 * prop_len should be at least for one mac and total # of rx and
+		 * tx channels. Function 0 owns MIF and ERROR
+		 */
+		NXGE_DEBUG_MSG((nxgep, OBP_CTL,
+			"==> nxge_use_default_dma_config_n2(obp): "
+			"# interrupts %d", prop_len));
+
+		switch (func) {
+		case 0:
+			p_cfgp->ldg_chn_start = 3;
+			p_cfgp->mac_ldvid = NXGE_MAC_LD_PORT0;
+			p_cfgp->mif_ldvid = NXGE_MIF_LD;
+			p_cfgp->ser_ldvid = NXGE_SYS_ERROR_LD;
+
+			break;
+		case 1:
+			p_cfgp->ldg_chn_start = 1;
+			p_cfgp->mac_ldvid = NXGE_MAC_LD_PORT1;
+
+			break;
+		default:
+			status = NXGE_DDI_FAILED;
+			break;
+		}
+
+		if (status != NXGE_OK)
+			return (status);
+
+		for (i = 0; i < prop_len; i++) {
+			p_cfgp->ldg[i] = prop_val[i];
+			NXGE_DEBUG_MSG((nxgep, OBP_CTL,
+				"==> nxge_use_default_dma_config_n2(obp): "
+				"interrupt #%d, ldg %d",
+				i, p_cfgp->ldg[i]));
+		}
+
+		p_cfgp->max_grpids = prop_len;
+		NXGE_DEBUG_MSG((nxgep, OBP_CTL,
+			"==> nxge_use_default_dma_config_n2(obp): %d "
+			"(#%d) maxgrpids %d channel starts %d",
+			p_cfgp->mac_ldvid, i, p_cfgp->max_grpids,
+			p_cfgp->ldg_chn_start));
+		ddi_prop_free(prop_val);
+	} else {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"==> nxge_use_default_dma_config_n2: "
+			"get interrupts failed"));
+		return (NXGE_DDI_FAILED);
+	}
+
+	p_cfgp->max_ldgs = p_cfgp->max_grpids;
+	NXGE_DEBUG_MSG((nxgep, OBP_CTL,
+		"==> nxge_use_default_dma_config_n2: "
+		"p_cfgp 0x%llx max_rdcs %d nxgep->max_rdcs %d max_grpids %d"
+		"start_grpid %d macid %d mifid %d serrid %d",
+		p_cfgp, p_cfgp->max_rdcs, nxgep->max_rdcs, p_cfgp->max_grpids,
+		p_cfgp->start_grpid,
+		p_cfgp->mac_ldvid, p_cfgp->mif_ldvid, p_cfgp->ser_ldvid));
+
+	NXGE_DEBUG_MSG((nxgep, OBP_CTL, "==> nxge_use_default_dma_config_n2: "
+		"p_cfgp p%p start_ldg %d nxgep->max_ldgs %d",
+		p_cfgp, p_cfgp->start_ldg, p_cfgp->max_ldgs));
+
+	/*
+	 * RDC groups and the beginning RDC group assigned to this function.
+	 */
+	nrxgp = 2;
+	p_cfgp->max_rdc_grpids = nrxgp;
+	p_cfgp->start_rdc_grpid = (nxgep->function_num * nrxgp);
+
+	status = ddi_prop_update_int(DDI_DEV_T_NONE, nxgep->dip,
+		"rx-rdc-grps", nrxgp);
+	if (status) {
+		return (NXGE_DDI_FAILED);
+	}
+	status = ddi_prop_update_int(DDI_DEV_T_NONE, nxgep->dip,
+		"rx-rdc-grps-begin", p_cfgp->start_rdc_grpid);
+	if (status) {
+		(void) ddi_prop_remove(DDI_DEV_T_NONE, nxgep->dip,
+			"rx-rdc-grps");
+		return (NXGE_DDI_FAILED);
+	}
+	NXGE_DEBUG_MSG((nxgep, OBP_CTL, "==> nxge_use_default_dma_config_n2: "
+		"p_cfgp $%p # rdc groups %d start rdc group id %d",
+		p_cfgp, p_cfgp->max_rdc_grpids,
+		p_cfgp->start_rdc_grpid));
+
+	nxge_set_hw_dma_config(nxgep);
+	NXGE_DEBUG_MSG((nxgep, OBP_CTL, "<== nxge_use_default_dma_config_n2"));
+	return (status);
+}
+
+static void
+nxge_use_cfg_dma_config(p_nxge_t nxgep)
+{
+	int tx_ndmas, rx_ndmas, nrxgp;
+	p_nxge_dma_pt_cfg_t p_dma_cfgp;
+	p_nxge_hw_pt_cfg_t p_cfgp;
+	dev_info_t *dip;
+	p_nxge_param_t param_arr;
+	char *prop;
+	int *prop_val;
+	uint_t prop_len;
+
+	NXGE_DEBUG_MSG((nxgep, CFG_CTL, " ==> nxge_use_cfg_dma_config"));
+	param_arr = nxgep->param_arr;
+
+	p_dma_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
+	p_cfgp = (p_nxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config;
+	dip = nxgep->dip;
+	p_cfgp->function_number = nxgep->function_num;
+	prop = param_arr[param_txdma_channels_begin].fcode_name;
+
+	if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, 0, prop,
+			&prop_val, &prop_len) == DDI_PROP_SUCCESS) {
+		p_cfgp->start_tdc = *prop_val;
+		ddi_prop_free(prop_val);
+	} else {
+		if (nxgep->nports == 2) {
+			tx_ndmas = (nxgep->function_num * p2_tx_equal[0]);
+		} else {
+			tx_ndmas = (nxgep->function_num * p4_tx_equal[0]);
+		}
+		(void) ddi_prop_update_int(DDI_DEV_T_NONE, nxgep->dip,
+			prop, tx_ndmas);
+		p_cfgp->start_tdc = tx_ndmas;
+	}
+
+	prop = param_arr[param_txdma_channels].fcode_name;
+	if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, 0, prop,
+			&prop_val, &prop_len) == DDI_PROP_SUCCESS) {
+		tx_ndmas = *prop_val;
+		ddi_prop_free(prop_val);
+	} else {
+		if (nxgep->nports == 2) {
+			tx_ndmas = p2_tx_equal[0];
+		} else {
+			tx_ndmas = p4_tx_equal[0];
+		}
+		(void) ddi_prop_update_int(DDI_DEV_T_NONE, nxgep->dip,
+			prop, tx_ndmas);
+	}
+
+	p_cfgp->max_tdcs = nxgep->max_tdcs = tx_ndmas;
+	nxgep->tdc_mask = (tx_ndmas - 1);
+	NXGE_DEBUG_MSG((nxgep, CFG_CTL, "==> nxge_use_cfg_dma_config: "
+		"p_cfgp 0x%llx max_tdcs %d nxgep->max_tdcs %d",
+		p_cfgp, p_cfgp->max_tdcs, nxgep->max_tdcs));
+
+	prop = param_arr[param_rxdma_channels_begin].fcode_name;
+
+	if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, 0, prop,
+			&prop_val, &prop_len) == DDI_PROP_SUCCESS) {
+		p_cfgp->start_rdc = *prop_val;
+		ddi_prop_free(prop_val);
+	} else {
+		if (nxgep->nports == 2) {
+			rx_ndmas = (nxgep->function_num * p2_rx_equal[0]);
+		} else {
+			rx_ndmas = (nxgep->function_num * p4_rx_equal[0]);
+		}
+		(void) ddi_prop_update_int(DDI_DEV_T_NONE, nxgep->dip,
+			prop, rx_ndmas);
+		p_cfgp->start_rdc = rx_ndmas;
+	}
+
+	prop = param_arr[param_rxdma_channels].fcode_name;
+
+	if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, 0, prop,
+			&prop_val, &prop_len) == DDI_PROP_SUCCESS) {
+		rx_ndmas = *prop_val;
+		ddi_prop_free(prop_val);
+	} else {
+		if (nxgep->nports == 2) {
+			rx_ndmas = p2_rx_equal[0];
+		} else {
+			rx_ndmas = p4_rx_equal[0];
+		}
+		(void) ddi_prop_update_int(DDI_DEV_T_NONE, nxgep->dip,
+			prop, rx_ndmas);
+	}
+
+	p_cfgp->max_rdcs = nxgep->max_rdcs = rx_ndmas;
+
+	prop = param_arr[param_rdc_grps_start].fcode_name;
+	if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, 0, prop,
+			&prop_val, &prop_len) == DDI_PROP_SUCCESS) {
+		p_cfgp->start_rdc_grpid = *prop_val;
+		ddi_prop_free(prop_val);
+		NXGE_DEBUG_MSG((nxgep, CFG_CTL,
+			"==> nxge_use_default_dma_config: "
+			"use property " "start_grpid %d ",
+			p_cfgp->start_grpid));
+	} else {
+		p_cfgp->start_rdc_grpid = nxgep->function_num;
+		(void) ddi_prop_update_int(DDI_DEV_T_NONE, nxgep->dip,
+			prop, p_cfgp->start_rdc_grpid);
+
+		NXGE_DEBUG_MSG((nxgep, CFG_CTL,
+			"==> nxge_use_default_dma_config: "
+			"use default "
+			"start_grpid %d (same as function #)",
+			p_cfgp->start_grpid));
+	}
+
+	prop = param_arr[param_rx_rdc_grps].fcode_name;
+	if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, 0, prop,
+			&prop_val, &prop_len) == DDI_PROP_SUCCESS) {
+		nrxgp = *prop_val;
+		ddi_prop_free(prop_val);
+	} else {
+		nrxgp = 1;
+		(void) ddi_prop_update_int(DDI_DEV_T_NONE, nxgep->dip,
+			prop, nrxgp);
+		NXGE_DEBUG_MSG((nxgep, CFG_CTL,
+			"==> nxge_use_default_dma_config: "
+			"num_rdc_grpid not found: use def:# of "
+			"rdc groups %d\n", nrxgp));
+	}
+
+	p_cfgp->max_rdc_grpids = nrxgp;
+
+	/*
+	 * 2/4 ports have the same hard-wired logical groups assigned.
+	 */
+	p_cfgp->start_ldg = nxgep->function_num * NXGE_LDGRP_PER_4PORTS;
+	p_cfgp->max_ldgs = NXGE_LDGRP_PER_4PORTS;
+
+	NXGE_DEBUG_MSG((nxgep, CFG_CTL, "==> nxge_use_default_dma_config: "
+		"p_cfgp 0x%llx max_rdcs %d nxgep->max_rdcs %d max_grpids %d"
+		"start_grpid %d",
+		p_cfgp, p_cfgp->max_rdcs, nxgep->max_rdcs, p_cfgp->max_grpids,
+		p_cfgp->start_grpid));
+
+	NXGE_DEBUG_MSG((nxgep, CFG_CTL, "==> nxge_use_cfg_dma_config: "
+		"p_cfgp 0x%016llx start_ldg %d nxgep->max_ldgs %d "
+		"start_rdc_grpid %d",
+		p_cfgp, p_cfgp->start_ldg, p_cfgp->max_ldgs,
+		p_cfgp->start_rdc_grpid));
+
+	prop = param_arr[param_rxdma_intr_time].fcode_name;
+
+	if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, 0, prop,
+			&prop_val, &prop_len) == DDI_PROP_SUCCESS) {
+		if ((prop_len > 0) && (prop_len <= p_cfgp->max_rdcs)) {
+			(void) ddi_prop_update_int_array(DDI_DEV_T_NONE,
+				nxgep->dip, prop, prop_val, prop_len);
+		}
+		ddi_prop_free(prop_val);
+	}
+	prop = param_arr[param_rxdma_intr_pkts].fcode_name;
+
+	if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, 0, prop,
+			&prop_val, &prop_len) == DDI_PROP_SUCCESS) {
+		if ((prop_len > 0) && (prop_len <= p_cfgp->max_rdcs)) {
+			(void) ddi_prop_update_int_array(DDI_DEV_T_NONE,
+				nxgep->dip, prop, prop_val, prop_len);
+		}
+		ddi_prop_free(prop_val);
+	}
+	nxge_set_hw_dma_config(nxgep);
+
+	NXGE_DEBUG_MSG((nxgep, CFG_CTL, "<== nxge_use_cfg_dma_config"));
+}
+
+static void
+nxge_use_cfg_vlan_class_config(p_nxge_t nxgep)
+{
+	uint_t vlan_cnt;
+	int *vlan_cfg_val;
+	int status;
+	p_nxge_param_t param_arr;
+	char *prop;
+
+	NXGE_DEBUG_MSG((nxgep, CFG_CTL, " ==> nxge_use_cfg_vlan_config"));
+	param_arr = nxgep->param_arr;
+	prop = param_arr[param_vlan_2rdc_grp].fcode_name;
+
+	status = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, nxgep->dip, 0, prop,
+		&vlan_cfg_val, &vlan_cnt);
+	if (status == DDI_PROP_SUCCESS) {
+		status = ddi_prop_update_int_array(DDI_DEV_T_NONE,
+			nxgep->dip, prop, vlan_cfg_val, vlan_cnt);
+		ddi_prop_free(vlan_cfg_val);
+	}
+	nxge_set_hw_vlan_class_config(nxgep);
+	NXGE_DEBUG_MSG((nxgep, CFG_CTL, " <== nxge_use_cfg_vlan_config"));
+}
+
+static void
+nxge_use_cfg_mac_class_config(p_nxge_t nxgep)
+{
+	p_nxge_dma_pt_cfg_t p_dma_cfgp;
+	p_nxge_hw_pt_cfg_t p_cfgp;
+	uint_t mac_cnt;
+	int *mac_cfg_val;
+	int status;
+	p_nxge_param_t param_arr;
+	char *prop;
+
+	NXGE_DEBUG_MSG((nxgep, CFG_CTL, "==> nxge_use_cfg_mac_class_config"));
+	p_dma_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
+	p_cfgp = (p_nxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config;
+	p_cfgp->start_mac_entry = 0;
+	param_arr = nxgep->param_arr;
+	prop = param_arr[param_mac_2rdc_grp].fcode_name;
+
+	switch (nxgep->function_num) {
+	case 0:
+	case 1:
+		/* 10G ports */
+		p_cfgp->max_macs = NXGE_MAX_MACS_XMACS;
+		break;
+	case 2:
+	case 3:
+		/* 1G ports */
+	default:
+		p_cfgp->max_macs = NXGE_MAX_MACS_BMACS;
+		break;
+	}
+
+	p_cfgp->mac_pref = 1;
+	p_cfgp->def_mac_rxdma_grpid = p_cfgp->start_rdc_grpid;
+
+	NXGE_DEBUG_MSG((nxgep, OBP_CTL,
+		"== nxge_use_cfg_mac_class_config: "
+		" mac_pref bit set def_mac_rxdma_grpid %d",
+		p_cfgp->def_mac_rxdma_grpid));
+
+	status = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, nxgep->dip, 0, prop,
+		&mac_cfg_val, &mac_cnt);
+	if (status == DDI_PROP_SUCCESS) {
+		if (mac_cnt <= p_cfgp->max_macs)
+			status = ddi_prop_update_int_array(DDI_DEV_T_NONE,
+				nxgep->dip, prop, mac_cfg_val, mac_cnt);
+		ddi_prop_free(mac_cfg_val);
+	}
+	nxge_set_hw_mac_class_config(nxgep);
+	NXGE_DEBUG_MSG((nxgep, CFG_CTL, " <== nxge_use_cfg_mac_class_config"));
+}
+
+static void
+nxge_use_cfg_class_config(p_nxge_t nxgep)
+{
+	nxge_set_hw_class_config(nxgep);
+}
+
+static void
+nxge_set_rdc_intr_property(p_nxge_t nxgep)
+{
+	int i;
+	p_nxge_dma_pt_cfg_t p_dma_cfgp;
+
+	NXGE_DEBUG_MSG((nxgep, CFG_CTL, " ==> nxge_set_rdc_intr_property"));
+	p_dma_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
+
+	for (i = 0; i < NXGE_MAX_RDCS; i++) {
+		p_dma_cfgp->rcr_timeout[i] = nxge_rcr_timeout;
+		p_dma_cfgp->rcr_threshold[i] = nxge_rcr_threshold;
+	}
+
+	NXGE_DEBUG_MSG((nxgep, CFG_CTL, " <== nxge_set_rdc_intr_property"));
+}
+
+static void
+nxge_set_hw_dma_config(p_nxge_t nxgep)
+{
+	int i, j, rdc, ndmas, ngrps, bitmap, end, st_rdc;
+	int32_t status;
+	uint8_t rdcs_per_grp;
+	p_nxge_dma_pt_cfg_t p_dma_cfgp;
+	p_nxge_hw_pt_cfg_t p_cfgp;
+	p_nxge_rdc_grp_t rdc_grp_p;
+	int rdcgrp_cfg = CFG_NOT_SPECIFIED, rx_quick_cfg;
+	char *prop, *prop_val;
+	p_nxge_param_t param_arr;
+	config_token_t token;
+
+	NXGE_DEBUG_MSG((nxgep, CFG_CTL, "==> nxge_set_hw_dma_config"));
+
+	p_dma_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
+	p_cfgp = (p_nxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config;
+	rdc_grp_p = p_dma_cfgp->rdc_grps;
+
+	/* Transmit DMA Channels */
+	bitmap = 0;
+	end = p_cfgp->start_tdc + p_cfgp->max_tdcs;
+	nxgep->ntdc = p_cfgp->max_tdcs;
+	p_dma_cfgp->tx_dma_map = 0;
+	for (i = p_cfgp->start_tdc; i < end; i++) {
+		bitmap |= (1 << i);
+		nxgep->tdc[i - p_cfgp->start_tdc] = (uint8_t)i;
+	}
+
+	p_dma_cfgp->tx_dma_map = bitmap;
+	param_arr = nxgep->param_arr;
+
+	/* Assume RDCs are evenly distributed */
+	rx_quick_cfg = param_arr[param_rx_quick_cfg].value;
+	switch (rx_quick_cfg) {
+	case CFG_NOT_SPECIFIED:
+		prop = "rxdma-grp-cfg";
+		status = ddi_prop_lookup_string(DDI_DEV_T_NONE,
+			nxgep->dip, 0, prop, (char **)&prop_val);
+		if (status != DDI_PROP_SUCCESS) {
+			NXGE_DEBUG_MSG((nxgep, CFG_CTL,
+				" property %s not found", prop));
+			rdcgrp_cfg = CFG_L3_DISTRIBUTE;
+		} else {
+			token = nxge_get_config_token(prop_val);
+			switch (token) {
+			case L2_CLASSIFY:
+				break;
+			case CLASSIFY:
+			case L3_CLASSIFY:
+			case L3_DISTRIBUTE:
+			case L3_TCAM:
+				rdcgrp_cfg = CFG_L3_DISTRIBUTE;
+				break;
+			default:
+				rdcgrp_cfg = CFG_L3_DISTRIBUTE;
+				break;
+			}
+			ddi_prop_free(prop_val);
+		}
+		break;
+	case CFG_L3_WEB:
+	case CFG_L3_DISTRIBUTE:
+	case CFG_L2_CLASSIFY:
+	case CFG_L3_TCAM:
+		rdcgrp_cfg = rx_quick_cfg;
+		break;
+	default:
+		rdcgrp_cfg = CFG_L3_DISTRIBUTE;
+		break;
+	}
+
+	/* Receive DMA Channels */
+	st_rdc = p_cfgp->start_rdc;
+	nxgep->nrdc = p_cfgp->max_rdcs;
+
+	for (i = 0; i < p_cfgp->max_rdcs; i++) {
+		nxgep->rdc[i] = i + p_cfgp->start_rdc;
+	}
+
+	switch (rdcgrp_cfg) {
+	case CFG_L3_DISTRIBUTE:
+	case CFG_L3_WEB:
+	case CFG_L3_TCAM:
+		ndmas = p_cfgp->max_rdcs;
+		ngrps = 1;
+		rdcs_per_grp = ndmas / ngrps;
+		break;
+	case CFG_L2_CLASSIFY:
+		ndmas = p_cfgp->max_rdcs / 2;
+		if (p_cfgp->max_rdcs < 2)
+			ndmas = 1;
+		ngrps = 1;
+		rdcs_per_grp = ndmas / ngrps;
+		break;
+	default:
+		ngrps = p_cfgp->max_rdc_grpids;
+		ndmas = p_cfgp->max_rdcs;
+		rdcs_per_grp = ndmas / ngrps;
+		break;
+	}
+
+	for (i = 0; i < ngrps; i++) {
+		rdc_grp_p = &p_dma_cfgp->rdc_grps[i];
+		rdc_grp_p->start_rdc = st_rdc + i * rdcs_per_grp;
+		rdc_grp_p->max_rdcs = rdcs_per_grp;
+
+		/* default to: 0, 1, 2, 3, ...., 0, 1, 2, 3.... */
+		rdc_grp_p->config_method = RDC_TABLE_ENTRY_METHOD_SEQ;
+		rdc = rdc_grp_p->start_rdc;
+		for (j = 0; j < NXGE_MAX_RDCS; j++) {
+			rdc_grp_p->rdc[j] = rdc++;
+			if (rdc == (rdc_grp_p->start_rdc + rdcs_per_grp)) {
+				rdc = rdc_grp_p->start_rdc;
+			}
+		}
+		rdc_grp_p->def_rdc = rdc_grp_p->rdc[0];
+		rdc_grp_p->flag = 1;	/* configured */
+	}
+
+	/* default RDC */
+	p_cfgp->def_rdc = p_cfgp->start_rdc;
+	nxgep->def_rdc = p_cfgp->start_rdc;
+
+	/* full 18 byte header ? */
+	p_dma_cfgp->rcr_full_header = NXGE_RCR_FULL_HEADER;
+	p_dma_cfgp->rx_drr_weight = PT_DRR_WT_DEFAULT_10G;
+	if (nxgep->function_num > 1)
+		p_dma_cfgp->rx_drr_weight = PT_DRR_WT_DEFAULT_1G;
+	p_dma_cfgp->rbr_size = nxge_rbr_size;
+	p_dma_cfgp->rcr_size = nxge_rcr_size;
+
+	nxge_set_rdc_intr_property(nxgep);
+	NXGE_DEBUG_MSG((nxgep, CFG_CTL, " <== nxge_set_hw_dma_config"));
+}
+
+boolean_t
+nxge_check_rxdma_port_member(p_nxge_t nxgep, uint8_t rdc)
+{
+	p_nxge_dma_pt_cfg_t p_dma_cfgp;
+	p_nxge_hw_pt_cfg_t p_cfgp;
+	int status = B_TRUE;
+
+	NXGE_DEBUG_MSG((nxgep, CFG2_CTL, "==> nxge_check_rxdma_port_member"));
+
+	p_dma_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
+	p_cfgp = (p_nxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config;
+
+	/* Receive DMA Channels */
+	if (rdc < p_cfgp->max_rdcs)
+		status = B_TRUE;
+	NXGE_DEBUG_MSG((nxgep, CFG2_CTL, " <== nxge_check_rxdma_port_member"));
+	return (status);
+}
+
+boolean_t
+nxge_check_txdma_port_member(p_nxge_t nxgep, uint8_t tdc)
+{
+	p_nxge_dma_pt_cfg_t p_dma_cfgp;
+	p_nxge_hw_pt_cfg_t p_cfgp;
+	int status = B_FALSE;
+
+	NXGE_DEBUG_MSG((nxgep, CFG2_CTL, "==> nxge_check_rxdma_port_member"));
+
+	p_dma_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
+	p_cfgp = (p_nxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config;
+
+	/* Receive DMA Channels */
+	if (tdc < p_cfgp->max_tdcs)
+		status = B_TRUE;
+	NXGE_DEBUG_MSG((nxgep, CFG2_CTL, " <== nxge_check_rxdma_port_member"));
+	return (status);
+}
+
+boolean_t
+nxge_check_rxdma_rdcgrp_member(p_nxge_t nxgep, uint8_t rdc_grp, uint8_t rdc)
+{
+	p_nxge_dma_pt_cfg_t p_dma_cfgp;
+	int status = B_TRUE;
+	p_nxge_rdc_grp_t rdc_grp_p;
+
+	NXGE_DEBUG_MSG((nxgep, CFG2_CTL,
+		" ==> nxge_check_rxdma_rdcgrp_member"));
+	NXGE_DEBUG_MSG((nxgep, CFG2_CTL, "  nxge_check_rxdma_rdcgrp_member"
+		" rdc  %d group %d", rdc, rdc_grp));
+	p_dma_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
+
+	rdc_grp_p = &p_dma_cfgp->rdc_grps[rdc_grp];
+	NXGE_DEBUG_MSG((nxgep, CFG2_CTL, "  max  %d ", rdc_grp_p->max_rdcs));
+	if (rdc >= rdc_grp_p->max_rdcs) {
+		status = B_FALSE;
+	}
+	NXGE_DEBUG_MSG((nxgep, CFG2_CTL,
+		" <== nxge_check_rxdma_rdcgrp_member"));
+	return (status);
+}
+
+boolean_t
+nxge_check_rdcgrp_port_member(p_nxge_t nxgep, uint8_t rdc_grp)
+{
+	p_nxge_dma_pt_cfg_t p_dma_cfgp;
+	p_nxge_hw_pt_cfg_t p_cfgp;
+	int status = B_TRUE;
+
+	NXGE_DEBUG_MSG((nxgep, CFG2_CTL, "==> nxge_check_rdcgrp_port_member"));
+
+	p_dma_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
+	p_cfgp = (p_nxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config;
+
+	if (rdc_grp >= p_cfgp->max_rdc_grpids)
+		status = B_FALSE;
+	NXGE_DEBUG_MSG((nxgep, CFG2_CTL, " <== nxge_check_rdcgrp_port_member"));
+	return (status);
+}
+
+static void
+nxge_set_hw_vlan_class_config(p_nxge_t nxgep)
+{
+	int i;
+	p_nxge_dma_pt_cfg_t p_dma_cfgp;
+	p_nxge_hw_pt_cfg_t p_cfgp;
+	p_nxge_param_t param_arr;
+	uint_t vlan_cnt;
+	int *vlan_cfg_val;
+	nxge_param_map_t *vmap;
+	char *prop;
+	p_nxge_class_pt_cfg_t p_class_cfgp;
+	uint32_t good_cfg[32];
+	int good_count = 0;
+	nxge_mv_cfg_t *vlan_tbl;
+
+	NXGE_DEBUG_MSG((nxgep, CFG_CTL, " ==> nxge_set_hw_vlan_config"));
+	p_dma_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
+	p_cfgp = (p_nxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config;
+	p_class_cfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config;
+
+	param_arr = nxgep->param_arr;
+	prop = param_arr[param_vlan_2rdc_grp].fcode_name;
+
+	/*
+	 * By default, VLAN to RDC group mapping is disabled Need to read HW or
+	 * .conf properties to find out if mapping is required
+	 *
+	 * Format
+	 *
+	 * uint32_t array, each array entry specifying the VLAN id and the
+	 * mapping
+	 *
+	 * bit[30] = add bit[29] = remove bit[28]  = preference bits[23-16] =
+	 * rdcgrp bits[15-0] = VLAN ID ( )
+	 */
+
+	for (i = 0; i < NXGE_MAX_VLANS; i++) {
+		p_class_cfgp->vlan_tbl[i].flag = 0;
+	}
+
+	vlan_tbl = (nxge_mv_cfg_t *)&p_class_cfgp->vlan_tbl[0];
+	if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, nxgep->dip, 0, prop,
+			&vlan_cfg_val, &vlan_cnt) == DDI_PROP_SUCCESS) {
+		for (i = 0; i < vlan_cnt; i++) {
+			vmap = (nxge_param_map_t *)&vlan_cfg_val[i];
+			if ((vmap->param_id) &&
+					(vmap->param_id < NXGE_MAX_VLANS) &&
+					(vmap->map_to <
+						p_cfgp->max_rdc_grpids) &&
+					(vmap->map_to >= (uint8_t)0)) {
+				NXGE_DEBUG_MSG((nxgep, CFG2_CTL,
+					" nxge_vlan_config mapping"
+					" id %d grp %d",
+					vmap->param_id, vmap->map_to));
+				good_cfg[good_count] = vlan_cfg_val[i];
+				if (vlan_tbl[vmap->param_id].flag == 0)
+					good_count++;
+				vlan_tbl[vmap->param_id].flag = 1;
+				vlan_tbl[vmap->param_id].rdctbl =
+					vmap->map_to + p_cfgp->start_rdc_grpid;
+				vlan_tbl[vmap->param_id].mpr_npr = vmap->pref;
+			}
+		}
+		ddi_prop_free(vlan_cfg_val);
+		if (good_count != vlan_cnt) {
+			(void) ddi_prop_update_int_array(DDI_DEV_T_NONE,
+				nxgep->dip, prop, (int *)good_cfg, good_count);
+		}
+	}
+	NXGE_DEBUG_MSG((nxgep, CFG_CTL, "<== nxge_set_hw_vlan_config"));
+}
+
+static void
+nxge_set_hw_mac_class_config(p_nxge_t nxgep)
+{
+	int i;
+	p_nxge_dma_pt_cfg_t p_dma_cfgp;
+	p_nxge_hw_pt_cfg_t p_cfgp;
+	p_nxge_param_t param_arr;
+	uint_t mac_cnt;
+	int *mac_cfg_val;
+	nxge_param_map_t *mac_map;
+	char *prop;
+	p_nxge_class_pt_cfg_t p_class_cfgp;
+	int good_count = 0;
+	int good_cfg[NXGE_MAX_MACS];
+	nxge_mv_cfg_t *mac_host_info;
+
+	NXGE_DEBUG_MSG((nxgep, CFG_CTL, "==> nxge_set_hw_mac_config"));
+
+	p_dma_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
+	p_cfgp = (p_nxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config;
+	p_class_cfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config;
+	mac_host_info = (nxge_mv_cfg_t *)&p_class_cfgp->mac_host_info[0];
+
+	param_arr = nxgep->param_arr;
+	prop = param_arr[param_mac_2rdc_grp].fcode_name;
+
+	for (i = 0; i < NXGE_MAX_MACS; i++) {
+		p_class_cfgp->mac_host_info[i].flag = 0;
+	}
+
+	if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, nxgep->dip, 0, prop,
+			&mac_cfg_val, &mac_cnt) == DDI_PROP_SUCCESS) {
+		for (i = 0; i < mac_cnt; i++) {
+			mac_map = (nxge_param_map_t *)&mac_cfg_val[i];
+			if ((mac_map->param_id < p_cfgp->max_macs) &&
+					(mac_map->map_to <
+						p_cfgp->max_rdc_grpids) &&
+					(mac_map->map_to >= (uint8_t)0)) {
+				NXGE_DEBUG_MSG((nxgep, CFG2_CTL,
+					" nxge_mac_config mapping"
+					" id %d grp %d",
+					mac_map->param_id, mac_map->map_to));
+				mac_host_info[mac_map->param_id].mpr_npr =
+					mac_map->pref;
+				mac_host_info[mac_map->param_id].rdctbl =
+					mac_map->map_to +
+					p_cfgp->start_rdc_grpid;
+				good_cfg[good_count] = mac_cfg_val[i];
+				if (mac_host_info[mac_map->param_id].flag == 0)
+					good_count++;
+				mac_host_info[mac_map->param_id].flag = 1;
+			}
+		}
+		ddi_prop_free(mac_cfg_val);
+		if (good_count != mac_cnt) {
+			(void) ddi_prop_update_int_array(DDI_DEV_T_NONE,
+				nxgep->dip, prop, good_cfg, good_count);
+		}
+	}
+	NXGE_DEBUG_MSG((nxgep, CFG_CTL, "<== nxge_set_hw_mac_config"));
+}
+
+static void
+nxge_set_hw_class_config(p_nxge_t nxgep)
+{
+	int i;
+	p_nxge_param_t param_arr;
+	int *int_prop_val;
+	uint32_t cfg_value;
+	char *prop;
+	p_nxge_class_pt_cfg_t p_class_cfgp;
+	int start_prop, end_prop;
+	uint_t prop_cnt;
+
+	NXGE_DEBUG_MSG((nxgep, CFG_CTL, " ==> nxge_set_hw_class_config"));
+
+	p_class_cfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config;
+	param_arr = nxgep->param_arr;
+	start_prop = param_class_opt_ip_usr4;
+	end_prop = param_class_opt_ipv6_sctp;
+
+	for (i = start_prop; i <= end_prop; i++) {
+		prop = param_arr[i].fcode_name;
+		if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, nxgep->dip,
+				0, prop, &int_prop_val,
+				&prop_cnt) == DDI_PROP_SUCCESS) {
+			cfg_value = (uint32_t)*int_prop_val;
+			ddi_prop_free(int_prop_val);
+		} else {
+			cfg_value = (uint32_t)param_arr[i].value;
+		}
+		p_class_cfgp->class_cfg[i - start_prop] = cfg_value;
+	}
+
+	prop = param_arr[param_h1_init_value].fcode_name;
+
+	if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, nxgep->dip, 0, prop,
+			&int_prop_val, &prop_cnt) == DDI_PROP_SUCCESS) {
+		cfg_value = (uint32_t)*int_prop_val;
+		ddi_prop_free(int_prop_val);
+	} else {
+		cfg_value = (uint32_t)param_arr[param_h1_init_value].value;
+	}
+
+	p_class_cfgp->init_h1 = (uint32_t)cfg_value;
+	prop = param_arr[param_h2_init_value].fcode_name;
+
+	if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, nxgep->dip, 0, prop,
+			&int_prop_val, &prop_cnt) == DDI_PROP_SUCCESS) {
+		cfg_value = (uint32_t)*int_prop_val;
+		ddi_prop_free(int_prop_val);
+	} else {
+		cfg_value = (uint32_t)param_arr[param_h2_init_value].value;
+	}
+
+	p_class_cfgp->init_h2 = (uint16_t)cfg_value;
+	NXGE_DEBUG_MSG((nxgep, CFG_CTL, " <== nxge_set_hw_class_config"));
+}
+
+nxge_status_t
+nxge_ldgv_init_n2(p_nxge_t nxgep, int *navail_p, int *nrequired_p)
+{
+	int i, maxldvs, maxldgs, start, end, nldvs;
+	int ldv, endldg;
+	uint8_t func;
+	uint8_t channel;
+	uint8_t chn_start;
+	boolean_t own_sys_err = B_FALSE, own_fzc = B_FALSE;
+	p_nxge_dma_pt_cfg_t p_dma_cfgp;
+	p_nxge_hw_pt_cfg_t p_cfgp;
+	p_nxge_ldgv_t ldgvp;
+	p_nxge_ldg_t ldgp, ptr;
+	p_nxge_ldv_t ldvp;
+	nxge_status_t status = NXGE_OK;
+
+	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_ldgv_init_n2"));
+	if (!*navail_p) {
+		*nrequired_p = 0;
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"<== nxge_ldgv_init:no avail"));
+		return (NXGE_ERROR);
+	}
+	/*
+	 * N2/NIU: one logical device owns one logical group. and each
+	 * device/group will be assigned one vector by Hypervisor.
+	 */
+	p_dma_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
+	p_cfgp = (p_nxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config;
+	maxldgs = p_cfgp->max_ldgs;
+	if (!maxldgs) {
+		/* No devices configured. */
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_ldgv_init_n2: "
+			"no logical groups configured."));
+		return (NXGE_ERROR);
+	} else {
+		maxldvs = maxldgs + 1;
+	}
+
+	/*
+	 * If function zero instance, it needs to handle the system and MIF
+	 * error interrupts. MIF interrupt may not be needed for N2/NIU.
+	 */
+	func = nxgep->function_num;
+	if (func == 0) {
+		own_sys_err = B_TRUE;
+		if (!p_cfgp->ser_ldvid) {
+			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+				"nxge_ldgv_init_n2: func 0, ERR ID not set!"));
+		}
+		/* MIF interrupt */
+		if (!p_cfgp->mif_ldvid) {
+			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+				"nxge_ldgv_init_n2: func 0, MIF ID not set!"));
+		}
+	}
+
+	/*
+	 * Assume single partition, each function owns mac.
+	 */
+	if (!nxge_use_partition)
+		own_fzc = B_TRUE;
+
+	ldgvp = nxgep->ldgvp;
+	if (ldgvp == NULL) {
+		ldgvp = KMEM_ZALLOC(sizeof (nxge_ldgv_t), KM_SLEEP);
+		nxgep->ldgvp = ldgvp;
+		ldgvp->maxldgs = (uint8_t)maxldgs;
+		ldgvp->maxldvs = (uint8_t)maxldvs;
+		ldgp = ldgvp->ldgp = KMEM_ZALLOC(sizeof (nxge_ldg_t) * maxldgs,
+			KM_SLEEP);
+		ldvp = ldgvp->ldvp = KMEM_ZALLOC(sizeof (nxge_ldv_t) * maxldvs,
+			KM_SLEEP);
+	} else {
+		ldgp = ldgvp->ldgp;
+		ldvp = ldgvp->ldvp;
+	}
+
+	ldgvp->ndma_ldvs = p_cfgp->max_tdcs + p_cfgp->max_rdcs;
+	ldgvp->tmres = NXGE_TIMER_RESO;
+
+	NXGE_DEBUG_MSG((nxgep, INT_CTL,
+		"==> nxge_ldgv_init_n2: maxldvs %d maxldgs %d",
+		maxldvs, maxldgs));
+
+	/* logical start_ldg is ldv */
+	ptr = ldgp;
+	for (i = 0; i < maxldgs; i++) {
+		ptr->func = func;
+		ptr->arm = B_TRUE;
+		ptr->vldg_index = (uint8_t)i;
+		ptr->ldg_timer = NXGE_TIMER_LDG;
+		ptr->ldg = p_cfgp->ldg[i];
+		ptr->sys_intr_handler = nxge_intr;
+		ptr->nldvs = 0;
+		ptr->ldvp = NULL;
+		ptr->nxgep = nxgep;
+		NXGE_DEBUG_MSG((nxgep, INT_CTL,
+			"==> nxge_ldgv_init_n2: maxldvs %d maxldgs %d "
+			"ldg %d ldgptr $%p",
+			maxldvs, maxldgs, ptr->ldg, ptr));
+		ptr++;
+	}
+
+	endldg = NXGE_INT_MAX_LDG;
+	nldvs = 0;
+	ldgvp->nldvs = 0;
+	ldgp->ldvp = NULL;
+	*nrequired_p = 0;
+
+	/*
+	 * logical device group table is organized in the following order (same
+	 * as what interrupt property has). function 0: owns MAC, MIF, error,
+	 * rx, tx. function 1: owns MAC, rx, tx.
+	 */
+
+	if (own_fzc && p_cfgp->mac_ldvid) {
+		/* Each function should own MAC interrupt */
+		ldv = p_cfgp->mac_ldvid;
+		ldvp->ldv = (uint8_t)ldv;
+		ldvp->is_mac = B_TRUE;
+		ldvp->ldv_intr_handler = nxge_mac_intr;
+		ldvp->ldv_ldf_masks = 0;
+		ldvp->nxgep = nxgep;
+		NXGE_DEBUG_MSG((nxgep, INT_CTL,
+			"==> nxge_ldgv_init_n2(mac): maxldvs %d ldv %d "
+			"ldg %d ldgptr $%p ldvptr $%p",
+			maxldvs, ldv, ldgp->ldg, ldgp, ldvp));
+		nxge_ldgv_setup(&ldgp, &ldvp, ldv, endldg, nrequired_p);
+		nldvs++;
+	}
+
+	if (own_fzc && p_cfgp->mif_ldvid) {
+		ldv = p_cfgp->mif_ldvid;
+		ldvp->ldv = (uint8_t)ldv;
+		ldvp->is_mif = B_TRUE;
+		ldvp->ldv_intr_handler = nxge_mif_intr;
+		ldvp->ldv_ldf_masks = 0;
+		ldvp->nxgep = nxgep;
+		NXGE_DEBUG_MSG((nxgep, INT_CTL,
+			"==> nxge_ldgv_init_n2(mif): maxldvs %d ldv %d "
+			"ldg %d ldgptr $%p ldvptr $%p",
+			maxldvs, ldv, ldgp->ldg, ldgp, ldvp));
+		nxge_ldgv_setup(&ldgp, &ldvp, ldv, endldg, nrequired_p);
+		nldvs++;
+	}
+
+	ldv = NXGE_SYS_ERROR_LD;
+	ldvp->use_timer = B_TRUE;
+	if (own_sys_err && p_cfgp->ser_ldvid) {
+		ldv = p_cfgp->ser_ldvid;
+		/*
+		 * Unmask the system interrupt states.
+		 */
+		(void) nxge_fzc_sys_err_mask_set(nxgep, SYS_ERR_SMX_MASK |
+			SYS_ERR_IPP_MASK | SYS_ERR_TXC_MASK |
+			SYS_ERR_ZCP_MASK);
+	}
+	ldvp->ldv = (uint8_t)ldv;
+	ldvp->is_syserr = B_TRUE;
+	ldvp->ldv_intr_handler = nxge_syserr_intr;
+	ldvp->ldv_ldf_masks = 0;
+	ldvp->nxgep = nxgep;
+	ldgvp->ldvp_syserr = ldvp;
+
+	NXGE_DEBUG_MSG((nxgep, INT_CTL,
+		"==> nxge_ldgv_init_n2(syserr): maxldvs %d ldv %d "
+		"ldg %d ldgptr $%p ldvptr p%p",
+		maxldvs, ldv, ldgp->ldg, ldgp, ldvp));
+
+	if (own_sys_err && p_cfgp->ser_ldvid) {
+		(void) nxge_ldgv_setup(&ldgp, &ldvp, ldv, endldg, nrequired_p);
+	} else {
+		ldvp++;
+	}
+
+	nldvs++;
+
+	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_ldgv_init_n2: "
+		"(before rx) func %d nldvs %d navail %d nrequired %d",
+		func, nldvs, *navail_p, *nrequired_p));
+
+	/*
+	 * Receive DMA channels.
+	 */
+	channel = p_cfgp->start_rdc;
+	start = p_cfgp->start_rdc + NXGE_RDMA_LD_START;
+	end = start + p_cfgp->max_rdcs;
+	chn_start = p_cfgp->ldg_chn_start;
+	/*
+	 * Start with RDC to configure logical devices for each group.
+	 */
+	for (i = 0, ldv = start; ldv < end; i++, ldv++, chn_start++) {
+		ldvp->is_rxdma = B_TRUE;
+		ldvp->ldv = (uint8_t)ldv;
+		ldvp->channel = channel++;
+		ldvp->vdma_index = (uint8_t)i;
+		ldvp->ldv_intr_handler = nxge_rx_intr;
+		ldvp->ldv_ldf_masks = 0;
+		ldvp->nxgep = nxgep;
+		ldgp->ldg = p_cfgp->ldg[chn_start];
+
+		NXGE_DEBUG_MSG((nxgep, INT_CTL,
+			"==> nxge_ldgv_init_n2(rx%d): maxldvs %d ldv %d "
+			"ldg %d ldgptr 0x%016llx ldvptr 0x%016llx",
+			i, maxldvs, ldv, ldgp->ldg, ldgp, ldvp));
+		nxge_ldgv_setup(&ldgp, &ldvp, ldv, endldg, nrequired_p);
+		nldvs++;
+	}
+
+	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_ldgv_init_n2: "
+		"func %d nldvs %d navail %d nrequired %d",
+		func, nldvs, *navail_p, *nrequired_p));
+
+	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_ldgv_init_n2: "
+		"func %d nldvs %d navail %d nrequired %d ldgp 0x%llx "
+		"ldvp 0x%llx",
+		func, nldvs, *navail_p, *nrequired_p, ldgp, ldvp));
+	/*
+	 * Transmit DMA channels.
+	 */
+	channel = p_cfgp->start_tdc;
+	start = p_cfgp->start_tdc + NXGE_TDMA_LD_START;
+	end = start + p_cfgp->max_tdcs;
+	for (i = 0, ldv = start; ldv < end; i++, ldv++, chn_start++) {
+		ldvp->is_txdma = B_TRUE;
+		ldvp->ldv = (uint8_t)ldv;
+		ldvp->channel = channel++;
+		ldvp->vdma_index = (uint8_t)i;
+		ldvp->ldv_intr_handler = nxge_tx_intr;
+		ldvp->ldv_ldf_masks = 0;
+		ldgp->ldg = p_cfgp->ldg[chn_start];
+		ldvp->nxgep = nxgep;
+		NXGE_DEBUG_MSG((nxgep, INT_CTL,
+			"==> nxge_ldgv_init_n2(tx%d): maxldvs %d ldv %d "
+			"ldg %d ldgptr 0x%016llx ldvptr 0x%016llx",
+			i, maxldvs, ldv, ldgp->ldg, ldgp, ldvp));
+		nxge_ldgv_setup(&ldgp, &ldvp, ldv, endldg, nrequired_p);
+		nldvs++;
+	}
+
+	ldgvp->ldg_intrs = *nrequired_p;
+	ldgvp->nldvs = (uint8_t)nldvs;
+
+	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_ldgv_init_n2: "
+		"func %d nldvs %d maxgrps %d navail %d nrequired %d",
+		func, nldvs, maxldgs, *navail_p, *nrequired_p));
+
+	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_ldgv_init_n2"));
+	return (status);
+}
+
+/*
+ * Interrupts related interface functions.
+ */
+
+nxge_status_t
+nxge_ldgv_init(p_nxge_t nxgep, int *navail_p, int *nrequired_p)
+{
+	int i, maxldvs, maxldgs, start, end, nldvs;
+	int ldv, ldg, endldg, ngrps;
+	uint8_t func;
+	uint8_t channel;
+	boolean_t own_sys_err = B_FALSE, own_fzc = B_FALSE;
+	p_nxge_dma_pt_cfg_t p_dma_cfgp;
+	p_nxge_hw_pt_cfg_t p_cfgp;
+	p_nxge_ldgv_t ldgvp;
+	p_nxge_ldg_t ldgp, ptr;
+	p_nxge_ldv_t ldvp;
+	nxge_status_t status = NXGE_OK;
+
+	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_ldgv_init"));
+	if (!*navail_p) {
+		*nrequired_p = 0;
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"<== nxge_ldgv_init:no avail"));
+		return (NXGE_ERROR);
+	}
+	p_dma_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
+	p_cfgp = (p_nxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config;
+
+	nldvs = p_cfgp->max_tdcs + p_cfgp->max_rdcs;
+
+	/*
+	 * If function zero instance, it needs to handle the system error
+	 * interrupts.
+	 */
+	func = nxgep->function_num;
+	if (func == 0) {
+		nldvs++;
+		own_sys_err = B_TRUE;
+	} else {
+		/* use timer */
+		nldvs++;
+	}
+
+	/*
+	 * Assume single partition, each function owns mac.
+	 */
+	if (!nxge_use_partition) {
+		/* mac */
+		nldvs++;
+		/* MIF */
+		nldvs++;
+		own_fzc = B_TRUE;
+	}
+	maxldvs = nldvs;
+	maxldgs = p_cfgp->max_ldgs;
+	if (!maxldvs || !maxldgs) {
+		/* No devices configured. */
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_ldgv_init: "
+			"no logical devices or groups configured."));
+		return (NXGE_ERROR);
+	}
+	ldgvp = nxgep->ldgvp;
+	if (ldgvp == NULL) {
+		ldgvp = KMEM_ZALLOC(sizeof (nxge_ldgv_t), KM_SLEEP);
+		nxgep->ldgvp = ldgvp;
+		ldgvp->maxldgs = (uint8_t)maxldgs;
+		ldgvp->maxldvs = (uint8_t)maxldvs;
+		ldgp = ldgvp->ldgp = KMEM_ZALLOC(sizeof (nxge_ldg_t) * maxldgs,
+			KM_SLEEP);
+		ldvp = ldgvp->ldvp = KMEM_ZALLOC(sizeof (nxge_ldv_t) * maxldvs,
+			KM_SLEEP);
+	}
+	ldgvp->ndma_ldvs = p_cfgp->max_tdcs + p_cfgp->max_rdcs;
+	ldgvp->tmres = NXGE_TIMER_RESO;
+
+	NXGE_DEBUG_MSG((nxgep, INT_CTL,
+		"==> nxge_ldgv_init: maxldvs %d maxldgs %d nldvs %d",
+		maxldvs, maxldgs, nldvs));
+	ldg = p_cfgp->start_ldg;
+	ptr = ldgp;
+	for (i = 0; i < maxldgs; i++) {
+		ptr->func = func;
+		ptr->arm = B_TRUE;
+		ptr->vldg_index = (uint8_t)i;
+		ptr->ldg_timer = NXGE_TIMER_LDG;
+		ptr->ldg = ldg++;
+		ptr->sys_intr_handler = nxge_intr;
+		ptr->nldvs = 0;
+		ptr->nxgep = nxgep;
+		NXGE_DEBUG_MSG((nxgep, INT_CTL,
+			"==> nxge_ldgv_init: maxldvs %d maxldgs %d ldg %d",
+			maxldvs, maxldgs, ptr->ldg));
+		ptr++;
+	}
+
+	ldg = p_cfgp->start_ldg;
+	if (maxldgs > *navail_p) {
+		ngrps = *navail_p;
+	} else {
+		ngrps = maxldgs;
+	}
+	endldg = ldg + ngrps;
+
+	/*
+	 * Receive DMA channels.
+	 */
+	channel = p_cfgp->start_rdc;
+	start = p_cfgp->start_rdc + NXGE_RDMA_LD_START;
+	end = start + p_cfgp->max_rdcs;
+	nldvs = 0;
+	ldgvp->nldvs = 0;
+	ldgp->ldvp = NULL;
+	*nrequired_p = 0;
+
+	/*
+	 * Start with RDC to configure logical devices for each group.
+	 */
+	for (i = 0, ldv = start; ldv < end; i++, ldv++) {
+		ldvp->is_rxdma = B_TRUE;
+		ldvp->ldv = (uint8_t)ldv;
+		/* If non-seq needs to change the following code */
+		ldvp->channel = channel++;
+		ldvp->vdma_index = (uint8_t)i;
+		ldvp->ldv_intr_handler = nxge_rx_intr;
+		ldvp->ldv_ldf_masks = 0;
+		ldvp->use_timer = B_FALSE;
+		ldvp->nxgep = nxgep;
+		nxge_ldgv_setup(&ldgp, &ldvp, ldv, endldg, nrequired_p);
+		nldvs++;
+	}
+
+	/*
+	 * Transmit DMA channels.
+	 */
+	channel = p_cfgp->start_tdc;
+	start = p_cfgp->start_tdc + NXGE_TDMA_LD_START;
+	end = start + p_cfgp->max_tdcs;
+	for (i = 0, ldv = start; ldv < end; i++, ldv++) {
+		ldvp->is_txdma = B_TRUE;
+		ldvp->ldv = (uint8_t)ldv;
+		ldvp->channel = channel++;
+		ldvp->vdma_index = (uint8_t)i;
+		ldvp->ldv_intr_handler = nxge_tx_intr;
+		ldvp->ldv_ldf_masks = 0;
+		ldvp->use_timer = B_FALSE;
+		ldvp->nxgep = nxgep;
+		nxge_ldgv_setup(&ldgp, &ldvp, ldv, endldg, nrequired_p);
+		nldvs++;
+	}
+
+	if (own_fzc) {
+		ldv = NXGE_MIF_LD;
+		ldvp->ldv = (uint8_t)ldv;
+		ldvp->is_mif = B_TRUE;
+		ldvp->ldv_intr_handler = nxge_mif_intr;
+		ldvp->ldv_ldf_masks = 0;
+		ldvp->use_timer = B_FALSE;
+		ldvp->nxgep = nxgep;
+		nxge_ldgv_setup(&ldgp, &ldvp, ldv, endldg, nrequired_p);
+		nldvs++;
+	}
+	/*
+	 * MAC port (function zero control)
+	 */
+	if (own_fzc) {
+		ldvp->is_mac = B_TRUE;
+		ldvp->ldv_intr_handler = nxge_mac_intr;
+		ldvp->ldv_ldf_masks = 0;
+		ldv = func + NXGE_MAC_LD_START;
+		ldvp->ldv = (uint8_t)ldv;
+		ldvp->use_timer = B_FALSE;
+		ldvp->nxgep = nxgep;
+		nxge_ldgv_setup(&ldgp, &ldvp, ldv, endldg, nrequired_p);
+		nldvs++;
+	}
+	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_ldgv_init: "
+		"func %d nldvs %d navail %d nrequired %d",
+		func, nldvs, *navail_p, *nrequired_p));
+	/*
+	 * Function 0 owns system error interrupts.
+	 */
+	ldvp->use_timer = B_TRUE;
+	if (own_sys_err) {
+		ldv = NXGE_SYS_ERROR_LD;
+		ldvp->ldv = (uint8_t)ldv;
+		ldvp->is_syserr = B_TRUE;
+		ldvp->ldv_intr_handler = nxge_syserr_intr;
+		ldvp->ldv_ldf_masks = 0;
+		ldvp->nxgep = nxgep;
+		ldgvp->ldvp_syserr = ldvp;
+		/*
+		 * Unmask the system interrupt states.
+		 */
+		(void) nxge_fzc_sys_err_mask_set(nxgep, SYS_ERR_SMX_MASK |
+			SYS_ERR_IPP_MASK | SYS_ERR_TXC_MASK |
+			SYS_ERR_ZCP_MASK);
+
+		(void) nxge_ldgv_setup(&ldgp, &ldvp, ldv, endldg, nrequired_p);
+		nldvs++;
+	} else {
+		ldv = NXGE_SYS_ERROR_LD;
+		ldvp->ldv = (uint8_t)ldv;
+		ldvp->is_syserr = B_TRUE;
+		ldvp->ldv_intr_handler = nxge_syserr_intr;
+		ldvp->nxgep = nxgep;
+		ldvp->ldv_ldf_masks = 0;
+		ldgvp->ldvp_syserr = ldvp;
+	}
+
+	ldgvp->ldg_intrs = *nrequired_p;
+
+	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_ldgv_init: "
+		"func %d nldvs %d navail %d nrequired %d",
+		func, nldvs, *navail_p, *nrequired_p));
+
+	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_ldgv_init"));
+	return (status);
+}
+
+nxge_status_t
+nxge_ldgv_uninit(p_nxge_t nxgep)
+{
+	p_nxge_ldgv_t ldgvp;
+
+	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_ldgv_uninit"));
+	ldgvp = nxgep->ldgvp;
+	if (ldgvp == NULL) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_ldgv_uninit: "
+				"no logical group configured."));
+		return (NXGE_OK);
+	}
+	if (ldgvp->ldgp) {
+		KMEM_FREE(ldgvp->ldgp, sizeof (nxge_ldg_t) * ldgvp->maxldgs);
+	}
+	if (ldgvp->ldvp) {
+		KMEM_FREE(ldgvp->ldvp, sizeof (nxge_ldv_t) * ldgvp->maxldvs);
+	}
+	KMEM_FREE(ldgvp, sizeof (nxge_ldgv_t));
+	nxgep->ldgvp = NULL;
+
+	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_ldgv_uninit"));
+	return (NXGE_OK);
+}
+
+nxge_status_t
+nxge_intr_ldgv_init(p_nxge_t nxgep)
+{
+	nxge_status_t status = NXGE_OK;
+
+	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intr_ldgv_init"));
+	/*
+	 * Configure the logical device group numbers, state vectors and
+	 * interrupt masks for each logical device.
+	 */
+	status = nxge_fzc_intr_init(nxgep);
+
+	/*
+	 * Configure logical device masks and timers.
+	 */
+	status = nxge_intr_mask_mgmt(nxgep);
+
+	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intr_ldgv_init"));
+	return (status);
+}
+
+nxge_status_t
+nxge_intr_mask_mgmt(p_nxge_t nxgep)
+{
+	p_nxge_ldgv_t ldgvp;
+	p_nxge_ldg_t ldgp;
+	p_nxge_ldv_t ldvp;
+	npi_handle_t handle;
+	int i, j;
+	npi_status_t rs = NPI_SUCCESS;
+
+	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intr_mask_mgmt"));
+
+	if ((ldgvp = nxgep->ldgvp) == NULL) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"<== nxge_intr_mask_mgmt: Null ldgvp"));
+		return (NXGE_ERROR);
+	}
+	handle = NXGE_DEV_NPI_HANDLE(nxgep);
+	ldgp = ldgvp->ldgp;
+	ldvp = ldgvp->ldvp;
+	if (ldgp == NULL || ldvp == NULL) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"<== nxge_intr_mask_mgmt: Null ldgp or ldvp"));
+		return (NXGE_ERROR);
+	}
+	NXGE_DEBUG_MSG((nxgep, INT_CTL,
+		"==> nxge_intr_mask_mgmt: # of intrs %d ", ldgvp->ldg_intrs));
+	/* Initialize masks. */
+	if (nxgep->niu_type != N2_NIU) {
+		NXGE_DEBUG_MSG((nxgep, INT_CTL,
+			"==> nxge_intr_mask_mgmt(Neptune): # intrs %d ",
+			ldgvp->ldg_intrs));
+		for (i = 0; i < ldgvp->ldg_intrs; i++, ldgp++) {
+			NXGE_DEBUG_MSG((nxgep, INT_CTL,
+				"==> nxge_intr_mask_mgmt(Neptune): # ldv %d "
+				"in group %d", ldgp->nldvs, ldgp->ldg));
+			for (j = 0; j < ldgp->nldvs; j++, ldvp++) {
+				NXGE_DEBUG_MSG((nxgep, INT_CTL,
+					"==> nxge_intr_mask_mgmt: set ldv # %d "
+					"for ldg %d", ldvp->ldv, ldgp->ldg));
+				rs = npi_intr_mask_set(handle, ldvp->ldv,
+					ldvp->ldv_ldf_masks);
+				if (rs != NPI_SUCCESS) {
+					NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+						"<== nxge_intr_mask_mgmt: "
+						"set mask failed "
+						" rs 0x%x ldv %d mask 0x%x",
+						rs, ldvp->ldv,
+						ldvp->ldv_ldf_masks));
+					return (NXGE_ERROR | rs);
+				}
+				NXGE_DEBUG_MSG((nxgep, INT_CTL,
+					"==> nxge_intr_mask_mgmt: "
+					"set mask OK "
+					" rs 0x%x ldv %d mask 0x%x",
+					rs, ldvp->ldv,
+					ldvp->ldv_ldf_masks));
+			}
+		}
+	}
+	ldgp = ldgvp->ldgp;
+	/* Configure timer and arm bit */
+	for (i = 0; i < nxgep->ldgvp->ldg_intrs; i++, ldgp++) {
+		rs = npi_intr_ldg_mgmt_set(handle, ldgp->ldg,
+			ldgp->arm, ldgp->ldg_timer);
+		if (rs != NPI_SUCCESS) {
+			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+				"<== nxge_intr_mask_mgmt: "
+				"set timer failed "
+				" rs 0x%x dg %d timer 0x%x",
+				rs, ldgp->ldg, ldgp->ldg_timer));
+			return (NXGE_ERROR | rs);
+		}
+		NXGE_DEBUG_MSG((nxgep, INT_CTL,
+			"==> nxge_intr_mask_mgmt: "
+			"set timer OK "
+			" rs 0x%x ldg %d timer 0x%x",
+			rs, ldgp->ldg, ldgp->ldg_timer));
+	}
+
+	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_fzc_intr_mask_mgmt"));
+	return (NXGE_OK);
+}
+
+nxge_status_t
+nxge_intr_mask_mgmt_set(p_nxge_t nxgep, boolean_t on)
+{
+	p_nxge_ldgv_t ldgvp;
+	p_nxge_ldg_t ldgp;
+	p_nxge_ldv_t ldvp;
+	npi_handle_t handle;
+	int i, j;
+	npi_status_t rs = NPI_SUCCESS;
+
+	NXGE_DEBUG_MSG((nxgep, INT_CTL,
+		"==> nxge_intr_mask_mgmt_set (%d)", on));
+
+	if (nxgep->niu_type == N2_NIU) {
+		NXGE_DEBUG_MSG((nxgep, INT_CTL,
+			"<== nxge_intr_mask_mgmt_set (%d) not set (N2/NIU)",
+			on));
+		return (NXGE_ERROR);
+	}
+
+	if ((ldgvp = nxgep->ldgvp) == NULL) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"==> nxge_intr_mask_mgmt_set: Null ldgvp"));
+		return (NXGE_ERROR);
+	}
+
+	handle = NXGE_DEV_NPI_HANDLE(nxgep);
+	ldgp = ldgvp->ldgp;
+	ldvp = ldgvp->ldvp;
+	if (ldgp == NULL || ldvp == NULL) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"<== nxge_intr_mask_mgmt_set: Null ldgp or ldvp"));
+		return (NXGE_ERROR);
+	}
+	/* set masks. */
+	for (i = 0; i < ldgvp->ldg_intrs; i++, ldgp++) {
+		NXGE_DEBUG_MSG((nxgep, INT_CTL,
+			"==> nxge_intr_mask_mgmt_set: flag %d ldg %d"
+			"set mask nldvs %d", on, ldgp->ldg, ldgp->nldvs));
+		for (j = 0; j < ldgp->nldvs; j++, ldvp++) {
+			NXGE_DEBUG_MSG((nxgep, INT_CTL,
+				"==> nxge_intr_mask_mgmt_set: "
+				"for %d %d flag %d", i, j, on));
+			if (on) {
+				ldvp->ldv_ldf_masks = 0;
+				NXGE_DEBUG_MSG((nxgep, INT_CTL,
+					"==> nxge_intr_mask_mgmt_set: "
+					"ON mask off"));
+			} else if (!on) {
+				ldvp->ldv_ldf_masks = (uint8_t)LD_IM1_MASK;
+				NXGE_DEBUG_MSG((nxgep, INT_CTL,
+					"==> nxge_intr_mask_mgmt_set:mask on"));
+			}
+			rs = npi_intr_mask_set(handle, ldvp->ldv,
+				ldvp->ldv_ldf_masks);
+			if (rs != NPI_SUCCESS) {
+				NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+					"==> nxge_intr_mask_mgmt_set: "
+					"set mask failed "
+					" rs 0x%x ldv %d mask 0x%x",
+					rs, ldvp->ldv, ldvp->ldv_ldf_masks));
+				return (NXGE_ERROR | rs);
+			}
+			NXGE_DEBUG_MSG((nxgep, INT_CTL,
+				"==> nxge_intr_mask_mgmt_set: flag %d"
+				"set mask OK "
+				" ldv %d mask 0x%x",
+				on, ldvp->ldv, ldvp->ldv_ldf_masks));
+		}
+	}
+
+	ldgp = ldgvp->ldgp;
+	/* set the arm bit */
+	for (i = 0; i < nxgep->ldgvp->ldg_intrs; i++, ldgp++) {
+		if (on && !ldgp->arm) {
+			ldgp->arm = B_TRUE;
+		} else if (!on && ldgp->arm) {
+			ldgp->arm = B_FALSE;
+		}
+		rs = npi_intr_ldg_mgmt_set(handle, ldgp->ldg,
+			ldgp->arm, ldgp->ldg_timer);
+		if (rs != NPI_SUCCESS) {
+			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+				"<== nxge_intr_mask_mgmt_set: "
+				"set timer failed "
+				" rs 0x%x ldg %d timer 0x%x",
+				rs, ldgp->ldg, ldgp->ldg_timer));
+			return (NXGE_ERROR | rs);
+		}
+		NXGE_DEBUG_MSG((nxgep, INT_CTL,
+			"==> nxge_intr_mask_mgmt_set: OK (flag %d) "
+			"set timer "
+			" ldg %d timer 0x%x",
+			on, ldgp->ldg, ldgp->ldg_timer));
+	}
+
+	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intr_mask_mgmt_set"));
+	return (NXGE_OK);
+}
+
+static nxge_status_t
+nxge_get_mac_addr_properties(p_nxge_t nxgep)
+{
+	uchar_t *prop_val;
+	uint_t prop_len;
+	uint_t i;
+	uint8_t func_num;
+	uint8_t total_factory_macs;
+
+	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_get_mac_addr_properties "));
+
+#if defined(_BIG_ENDIAN)
+	/*
+	 * Get the ethernet address.
+	 */
+	(void) localetheraddr((struct ether_addr *)NULL, &nxgep->ouraddr);
+
+	/*
+	 * Check if it is an adapter with its own local mac address If it is
+	 * present, override the system mac address.
+	 */
+	if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, nxgep->dip, 0,
+			"local-mac-address", &prop_val,
+			&prop_len) == DDI_PROP_SUCCESS) {
+		if (prop_len == ETHERADDRL) {
+			nxgep->factaddr = *(p_ether_addr_t)prop_val;
+			NXGE_DEBUG_MSG((nxgep, DDI_CTL, "Local mac address = "
+				"%02x:%02x:%02x:%02x:%02x:%02x",
+				prop_val[0], prop_val[1], prop_val[2],
+				prop_val[3], prop_val[4], prop_val[5]));
+		}
+		ddi_prop_free(prop_val);
+	}
+	if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, nxgep->dip, 0,
+			"local-mac-address?", &prop_val,
+			&prop_len) == DDI_PROP_SUCCESS) {
+		if (strncmp("true", (caddr_t)prop_val, (size_t)prop_len) == 0) {
+			nxgep->ouraddr = nxgep->factaddr;
+			NXGE_DEBUG_MSG((nxgep, DDI_CTL,
+				"Using local MAC address"));
+		}
+		ddi_prop_free(prop_val);
+	} else {
+		nxgep->ouraddr = nxgep->factaddr;
+	}
+#else
+	(void) nxge_espc_mac_addrs_get(nxgep);
+	nxgep->ouraddr = nxgep->factaddr;
+#endif
+
+	func_num = nxgep->function_num;
+
+	/*
+	 * total_factory_macs is the total number of MACs the factory assigned
+	 * to the whole Neptune device.  NIU does not need this parameter
+	 * because it derives the number of factory MACs for each port from
+	 * the device properties.
+	 */
+	if (nxgep->niu_type == NEPTUNE || nxgep->niu_type == NEPTUNE_2) {
+		if (nxge_espc_num_macs_get(nxgep, &total_factory_macs)
+			== NXGE_OK) {
+			nxgep->nxge_mmac_info.total_factory_macs
+				= total_factory_macs;
+	} else {
+			NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL,
+				"nxge_espc_num_macs_get: espc access failed"));
+			return (NXGE_ERROR);
+		}
+	}
+
+	/*
+	 * Note: mac-addresses of n2-niu is the list of mac addresses for a
+	 * port. #mac-addresses stored in Neptune's SEEPROM is the total number
+	 * of MAC addresses allocated for a board.
+	 */
+	if (nxgep->niu_type == N2_NIU) {
+		if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, nxgep->dip, 0,
+				"mac-addresses", &prop_val, &prop_len) ==
+			DDI_PROP_SUCCESS) {
+			/*
+			 * XAUI may have up to 18 MACs, more than the XMAC can
+			 * use (1 unique MAC plus 16 alternate MACs)
+			 */
+			nxgep->nxge_mmac_info.num_factory_mmac
+			    = prop_len / ETHERADDRL - 1;
+			if (nxgep->nxge_mmac_info.num_factory_mmac >
+				XMAC_MAX_ALT_ADDR_ENTRY) {
+				nxgep->nxge_mmac_info.num_factory_mmac =
+					XMAC_MAX_ALT_ADDR_ENTRY;
+			}
+			ddi_prop_free(prop_val);
+		}
+	} else {
+		/*
+		 * total_factory_macs = 32
+		 * num_factory_mmac = (32 >> (nports/2)) - 1
+		 * So if nports = 4, then num_factory_mmac =  7
+		 *    if nports = 2, then num_factory_mmac = 15
+		 */
+		nxgep->nxge_mmac_info.num_factory_mmac
+			= ((nxgep->nxge_mmac_info.total_factory_macs >>
+			(nxgep->nports >> 1))) - 1;
+	}
+	for (i = 0; i <= nxgep->nxge_mmac_info.num_mmac; i++) {
+		(void) npi_mac_altaddr_disable(nxgep->npi_handle,
+			NXGE_GET_PORT_NUM(func_num), i);
+	}
+
+	(void) nxge_init_mmac(nxgep);
+	return (NXGE_OK);
+}
+
+void
+nxge_get_xcvr_properties(p_nxge_t nxgep)
+{
+	uchar_t *prop_val;
+	uint_t prop_len;
+
+	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_get_xcvr_properties"));
+
+	/*
+	 * Read the type of physical layer interface being used.
+	 */
+	nxgep->statsp->mac_stats.xcvr_inuse = INT_MII_XCVR;
+	if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, nxgep->dip, 0,
+			"phy-type", &prop_val, &prop_len) == DDI_PROP_SUCCESS) {
+		if (strncmp("pcs", (caddr_t)prop_val,
+				(size_t)prop_len) == 0) {
+			nxgep->statsp->mac_stats.xcvr_inuse = PCS_XCVR;
+		} else {
+			nxgep->statsp->mac_stats.xcvr_inuse = INT_MII_XCVR;
+		}
+		ddi_prop_free(prop_val);
+	} else if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, nxgep->dip, 0,
+			"phy-interface", &prop_val,
+			&prop_len) == DDI_PROP_SUCCESS) {
+		if (strncmp("pcs", (caddr_t)prop_val, (size_t)prop_len) == 0) {
+			nxgep->statsp->mac_stats.xcvr_inuse = PCS_XCVR;
+		} else {
+			nxgep->statsp->mac_stats.xcvr_inuse = INT_MII_XCVR;
+		}
+		ddi_prop_free(prop_val);
+	}
+}
+
+/*
+ * Static functions start here.
+ */
+
+static void
+nxge_ldgv_setup(p_nxge_ldg_t *ldgp, p_nxge_ldv_t *ldvp, uint8_t ldv,
+	uint8_t endldg, int *ngrps)
+{
+	NXGE_DEBUG_MSG((NULL, INT_CTL, "==> nxge_ldgv_setup"));
+	/* Assign the group number for each device. */
+	(*ldvp)->ldg_assigned = (*ldgp)->ldg;
+	(*ldvp)->ldgp = *ldgp;
+	(*ldvp)->ldv = ldv;
+
+	NXGE_DEBUG_MSG((NULL, INT_CTL, "==> nxge_ldgv_setup: "
+		"ldv %d endldg %d ldg %d, ldvp $%p",
+		ldv, endldg, (*ldgp)->ldg, (*ldgp)->ldvp));
+
+	(*ldgp)->nldvs++;
+	if ((*ldgp)->ldg == (endldg - 1)) {
+		if ((*ldgp)->ldvp == NULL) {
+			(*ldgp)->ldvp = *ldvp;
+			*ngrps += 1;
+			NXGE_DEBUG_MSG((NULL, INT_CTL,
+				"==> nxge_ldgv_setup: ngrps %d", *ngrps));
+		}
+		NXGE_DEBUG_MSG((NULL, INT_CTL,
+			"==> nxge_ldgv_setup: ldvp $%p ngrps %d",
+			*ldvp, *ngrps));
+		++*ldvp;
+	} else {
+		(*ldgp)->ldvp = *ldvp;
+		*ngrps += 1;
+		NXGE_DEBUG_MSG((NULL, INT_CTL, "==> nxge_ldgv_setup(done): "
+			"ldv %d endldg %d ldg %d, ldvp $%p",
+			ldv, endldg, (*ldgp)->ldg, (*ldgp)->ldvp));
+		(*ldvp) = ++*ldvp;
+		(*ldgp) = ++*ldgp;
+		NXGE_DEBUG_MSG((NULL, INT_CTL,
+			"==> nxge_ldgv_setup: new ngrps %d", *ngrps));
+	}
+
+	NXGE_DEBUG_MSG((NULL, INT_CTL, "==> nxge_ldgv_setup: "
+		"ldv %d ldvp $%p endldg %d ngrps %d",
+		ldv, ldvp, endldg, *ngrps));
+
+	NXGE_DEBUG_MSG((NULL, INT_CTL, "<== nxge_ldgv_setup"));
+}
+
+/*
+ * Note: This function assumes the following distribution of mac
+ * addresses among 4 ports in neptune:
+ *
+ *      -------------
+ *    0|            |0 - local-mac-address for fn 0
+ *      -------------
+ *    1|            |1 - local-mac-address for fn 1
+ *      -------------
+ *    2|            |2 - local-mac-address for fn 2
+ *      -------------
+ *    3|            |3 - local-mac-address for fn 3
+ *      -------------
+ *     |            |4 - Start of alt. mac addr. for fn 0
+ *     |            |
+ *     |            |
+ *     |            |10
+ *     --------------
+ *     |            |11 - Start of alt. mac addr. for fn 1
+ *     |            |
+ *     |            |
+ *     |            |17
+ *     --------------
+ *     |            |18 - Start of alt. mac addr. for fn 2
+ *     |            |
+ *     |            |
+ *     |            |24
+ *     --------------
+ *     |            |25 - Start of alt. mac addr. for fn 3
+ *     |            |
+ *     |            |
+ *     |            |31
+ *     --------------
+ *
+ * For N2/NIU the mac addresses is from XAUI card.
+ */
+
+static void
+nxge_init_mmac(p_nxge_t nxgep)
+{
+	int slot;
+	uint8_t func_num;
+	uint16_t *base_mmac_addr;
+	uint32_t alt_mac_ls4b;
+	uint16_t *mmac_addr;
+	uint32_t base_mac_ls4b; /* least significant 4 bytes */
+	nxge_mmac_t *mmac_info;
+	npi_mac_addr_t mac_addr;
+
+	func_num = nxgep->function_num;
+	base_mmac_addr = (uint16_t *)&nxgep->factaddr;
+	mmac_info = (nxge_mmac_t *)&nxgep->nxge_mmac_info;
+
+	base_mac_ls4b = ((uint32_t)base_mmac_addr[1]) << 16 |
+		base_mmac_addr[2];
+
+	if (nxgep->niu_type == N2_NIU) {
+		alt_mac_ls4b = base_mac_ls4b + 1; /* ls4b of 1st altmac */
+	} else {			/* Neptune */
+		alt_mac_ls4b = base_mac_ls4b + (nxgep->nports - func_num)
+			+ (func_num * (mmac_info->num_factory_mmac));
+	}
+
+	/* Set flags for unique MAC */
+	mmac_info->mac_pool[0].flags |= MMAC_SLOT_USED | MMAC_VENDOR_ADDR;
+
+	/* Clear flags of all alternate MAC slots */
+	for (slot = 1; slot <= mmac_info->num_mmac; slot++) {
+		if (slot <= mmac_info->num_factory_mmac)
+			mmac_info->mac_pool[slot].flags = MMAC_VENDOR_ADDR;
+		else
+			mmac_info->mac_pool[slot].flags = 0;
+	}
+
+	/* Generate and store factory alternate MACs */
+	for (slot = 1; slot <= mmac_info->num_factory_mmac; slot++) {
+		mmac_addr = (uint16_t *)&mmac_info->factory_mac_pool[slot];
+		mmac_addr[0] = base_mmac_addr[0];
+		mac_addr.w2 = mmac_addr[0];
+
+		mmac_addr[1] = (alt_mac_ls4b >> 16) & 0x0FFFF;
+		mac_addr.w1 = mmac_addr[1];
+
+		mmac_addr[2] = alt_mac_ls4b & 0x0FFFF;
+		mac_addr.w0 = mmac_addr[2];
+		/*
+		 * slot minus 1 because npi_mac_alraddr_entry expects 0
+		 * for the first alternate mac address.
+		 */
+		(void) npi_mac_altaddr_entry(nxgep->npi_handle, OP_SET,
+			NXGE_GET_PORT_NUM(func_num), slot - 1, &mac_addr);
+
+		alt_mac_ls4b++;
+	}
+	/* Initialize the first two parameters for mmac kstat */
+	nxgep->statsp->mmac_stats.mmac_max_cnt = mmac_info->num_mmac;
+	nxgep->statsp->mmac_stats.mmac_avail_cnt = mmac_info->num_mmac;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/uts/common/io/nxge/nxge_zcp.c	Mon Mar 19 19:37:22 2007 -0700
@@ -0,0 +1,473 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident	"%Z%%M%	%I%	%E% SMI"
+
+#include <nxge_impl.h>
+#include <nxge_zcp.h>
+#include <nxge_ipp.h>
+
+nxge_status_t
+nxge_zcp_init(p_nxge_t nxgep)
+{
+	uint8_t portn;
+	npi_handle_t handle;
+	zcp_iconfig_t istatus;
+	npi_status_t rs = NPI_SUCCESS;
+	int i;
+	zcp_ram_unit_t w_data;
+	zcp_ram_unit_t r_data;
+	uint32_t cfifo_depth;
+
+	handle = nxgep->npi_handle;
+	portn = NXGE_GET_PORT_NUM(nxgep->function_num);
+
+	if ((nxgep->niu_type == NEPTUNE) || (nxgep->niu_type == NEPTUNE_2)) {
+		if (portn < 2)
+			cfifo_depth = ZCP_P0_P1_CFIFO_DEPTH;
+		else
+			cfifo_depth = ZCP_P2_P3_CFIFO_DEPTH;
+	} else if (nxgep->niu_type == N2_NIU)
+		cfifo_depth = ZCP_NIU_CFIFO_DEPTH;
+
+	/* Clean up CFIFO */
+	w_data.w0 = 0;
+	w_data.w1 = 0;
+	w_data.w2 = 0;
+	w_data.w3 = 0;
+	w_data.w4 = 0;
+
+	for (i = 0; i < cfifo_depth; i++) {
+		if (npi_zcp_tt_cfifo_entry(handle, OP_SET,
+				portn, i, &w_data) != NPI_SUCCESS)
+			goto fail;
+		if (npi_zcp_tt_cfifo_entry(handle, OP_GET,
+				portn, i, &r_data) != NPI_SUCCESS)
+			goto fail;
+	}
+
+	if (npi_zcp_rest_cfifo_port(handle, portn) != NPI_SUCCESS)
+		goto fail;
+
+	/*
+	 * Making sure that error source is cleared if this is an injected
+	 * error.
+	 */
+	switch (portn) {
+	case 0:
+		NXGE_REG_WR64(handle, ZCP_CFIFO_ECC_PORT0_REG, 0);
+		break;
+	case 1:
+		NXGE_REG_WR64(handle, ZCP_CFIFO_ECC_PORT1_REG, 0);
+		break;
+	case 2:
+		NXGE_REG_WR64(handle, ZCP_CFIFO_ECC_PORT2_REG, 0);
+		break;
+	case 3:
+		NXGE_REG_WR64(handle, ZCP_CFIFO_ECC_PORT3_REG, 0);
+		break;
+	}
+
+	if ((rs = npi_zcp_clear_istatus(handle)) != NPI_SUCCESS)
+		return (NXGE_ERROR | rs);
+	if ((rs = npi_zcp_get_istatus(handle, &istatus)) != NPI_SUCCESS)
+		return (NXGE_ERROR | rs);
+	if ((rs = npi_zcp_iconfig(handle, INIT, ICFG_ZCP_ALL)) != NPI_SUCCESS)
+		goto fail;
+
+	NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_zcp_init: port%d", portn));
+	return (NXGE_OK);
+
+fail:
+	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+		"nxge_zcp_init: Fail to initialize ZCP Port #%d\n", portn));
+	return (NXGE_ERROR | rs);
+}
+
+nxge_status_t
+nxge_zcp_handle_sys_errors(p_nxge_t nxgep)
+{
+	npi_handle_t handle;
+	npi_status_t rs = NPI_SUCCESS;
+	p_nxge_zcp_stats_t statsp;
+	uint8_t portn;
+	zcp_iconfig_t istatus;
+	boolean_t rxport_fatal = B_FALSE;
+	nxge_status_t status = NXGE_OK;
+
+	handle = nxgep->npi_handle;
+	statsp = (p_nxge_zcp_stats_t)&nxgep->statsp->zcp_stats;
+	portn = nxgep->mac.portnum;
+
+	if ((rs = npi_zcp_get_istatus(handle, &istatus)) != NPI_SUCCESS)
+		return (NXGE_ERROR | rs);
+
+	if (istatus & ICFG_ZCP_RRFIFO_UNDERRUN) {
+		statsp->rrfifo_underrun++;
+		NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
+			NXGE_FM_EREPORT_ZCP_RRFIFO_UNDERRUN);
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"nxge_zcp_err_evnts: rrfifo_underrun"));
+	}
+
+	if (istatus & ICFG_ZCP_RRFIFO_OVERRUN) {
+		statsp->rrfifo_overrun++;
+		NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
+			NXGE_FM_EREPORT_ZCP_RRFIFO_OVERRUN);
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"nxge_zcp_err_evnts: buf_rrfifo_overrun"));
+	}
+
+	if (istatus & ICFG_ZCP_RSPFIFO_UNCORR_ERR) {
+		statsp->rspfifo_uncorr_err++;
+		NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
+			NXGE_FM_EREPORT_ZCP_RSPFIFO_UNCORR_ERR);
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"nxge_zcp_err_evnts: rspfifo_uncorr_err"));
+	}
+
+	if (istatus & ICFG_ZCP_BUFFER_OVERFLOW) {
+		statsp->buffer_overflow++;
+		NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
+			NXGE_FM_EREPORT_ZCP_BUFFER_OVERFLOW);
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"nxge_zcp_err_evnts: buffer_overflow"));
+		rxport_fatal = B_TRUE;
+	}
+
+	if (istatus & ICFG_ZCP_STAT_TBL_PERR) {
+		statsp->stat_tbl_perr++;
+		NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
+			NXGE_FM_EREPORT_ZCP_STAT_TBL_PERR);
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"nxge_zcp_err_evnts: stat_tbl_perr"));
+	}
+
+	if (istatus & ICFG_ZCP_DYN_TBL_PERR) {
+		statsp->dyn_tbl_perr++;
+		NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
+			NXGE_FM_EREPORT_ZCP_DYN_TBL_PERR);
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"nxge_zcp_err_evnts: dyn_tbl_perr"));
+	}
+
+	if (istatus & ICFG_ZCP_BUF_TBL_PERR) {
+		statsp->buf_tbl_perr++;
+		NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
+			NXGE_FM_EREPORT_ZCP_BUF_TBL_PERR);
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"nxge_zcp_err_evnts: buf_tbl_perr"));
+	}
+
+	if (istatus & ICFG_ZCP_TT_PROGRAM_ERR) {
+		statsp->tt_program_err++;
+		NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
+			NXGE_FM_EREPORT_ZCP_TT_PROGRAM_ERR);
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"nxge_zcp_err_evnts: tt_program_err"));
+	}
+
+	if (istatus & ICFG_ZCP_RSP_TT_INDEX_ERR) {
+		statsp->rsp_tt_index_err++;
+		NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
+			NXGE_FM_EREPORT_ZCP_RSP_TT_INDEX_ERR);
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"nxge_zcp_err_evnts: rsp_tt_index_err"));
+	}
+
+	if (istatus & ICFG_ZCP_SLV_TT_INDEX_ERR) {
+		statsp->slv_tt_index_err++;
+		NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
+			NXGE_FM_EREPORT_ZCP_SLV_TT_INDEX_ERR);
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"nxge_zcp_err_evnts: slv_tt_index_err"));
+	}
+
+	if (istatus & ICFG_ZCP_TT_INDEX_ERR) {
+		statsp->zcp_tt_index_err++;
+		NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
+			NXGE_FM_EREPORT_ZCP_TT_INDEX_ERR);
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			"nxge_zcp_err_evnts: tt_index_err"));
+	}
+
+	if (((portn == 0) && (istatus & ICFG_ZCP_CFIFO_ECC0)) ||
+			((portn == 1) && (istatus & ICFG_ZCP_CFIFO_ECC1)) ||
+			((portn == 2) && (istatus & ICFG_ZCP_CFIFO_ECC2)) ||
+			((portn == 3) && (istatus & ICFG_ZCP_CFIFO_ECC3))) {
+		boolean_t ue_ecc_valid;
+
+		if ((status = nxge_ipp_eccue_valid_check(nxgep,
+				&ue_ecc_valid)) != NXGE_OK)
+			return (status);
+
+		if (ue_ecc_valid) {
+			statsp->cfifo_ecc++;
+			NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
+				NXGE_FM_EREPORT_ZCP_CFIFO_ECC);
+			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+				"nxge_zcp_err_evnts: port%d buf_cfifo_ecc",
+				portn));
+			rxport_fatal = B_TRUE;
+		}
+	}
+
+	/*
+	 * Making sure that error source is cleared if this is an injected
+	 * error.
+	 */
+	switch (portn) {
+	case 0:
+		NXGE_REG_WR64(handle, ZCP_CFIFO_ECC_PORT0_REG, 0);
+		break;
+	case 1:
+		NXGE_REG_WR64(handle, ZCP_CFIFO_ECC_PORT1_REG, 0);
+		break;
+	case 2:
+		NXGE_REG_WR64(handle, ZCP_CFIFO_ECC_PORT2_REG, 0);
+		break;
+	case 3:
+		NXGE_REG_WR64(handle, ZCP_CFIFO_ECC_PORT3_REG, 0);
+		break;
+	}
+
+	(void) npi_zcp_clear_istatus(handle);
+
+	if (rxport_fatal) {
+		NXGE_DEBUG_MSG((nxgep, IPP_CTL,
+			" nxge_zcp_handle_sys_errors:"
+			" fatal Error on  Port #%d\n", portn));
+		status = nxge_zcp_fatal_err_recover(nxgep);
+		if (status == NXGE_OK) {
+			FM_SERVICE_RESTORED(nxgep);
+		}
+	}
+	return (status);
+}
+
+void
+nxge_zcp_inject_err(p_nxge_t nxgep, uint32_t err_id)
+{
+	zcp_int_stat_reg_t zcps;
+	uint8_t portn = nxgep->mac.portnum;
+	zcp_ecc_ctrl_t ecc_ctrl;
+
+	switch (err_id) {
+	case NXGE_FM_EREPORT_ZCP_CFIFO_ECC:
+		ecc_ctrl.value = 0;
+		ecc_ctrl.bits.w0.cor_dbl = 1;
+		ecc_ctrl.bits.w0.cor_lst = 1;
+		ecc_ctrl.bits.w0.cor_all = 0;
+		switch (portn) {
+		case 0:
+			cmn_err(CE_NOTE,
+				"!Write 0x%llx to port%d ZCP_CFIFO_ECC_PORT\n",
+				(unsigned long long) ecc_ctrl.value, portn);
+			NXGE_REG_WR64(nxgep->npi_handle,
+				ZCP_CFIFO_ECC_PORT0_REG,
+				ecc_ctrl.value);
+			break;
+		case 1:
+			cmn_err(CE_NOTE,
+				"!Write 0x%llx to port%d ZCP_CFIFO_ECC_PORT\n",
+				(unsigned long long) ecc_ctrl.value, portn);
+			NXGE_REG_WR64(nxgep->npi_handle,
+				ZCP_CFIFO_ECC_PORT1_REG,
+				ecc_ctrl.value);
+			break;
+		case 2:
+			cmn_err(CE_NOTE,
+				"!Write 0x%llx to port%d ZCP_CFIFO_ECC_PORT\n",
+				(unsigned long long) ecc_ctrl.value, portn);
+			NXGE_REG_WR64(nxgep->npi_handle,
+				ZCP_CFIFO_ECC_PORT2_REG,
+				ecc_ctrl.value);
+			break;
+		case 3:
+			cmn_err(CE_NOTE,
+				"!Write 0x%llx to port%d ZCP_CFIFO_ECC_PORT\n",
+				(unsigned long long) ecc_ctrl.value, portn);
+			NXGE_REG_WR64(nxgep->npi_handle,
+				ZCP_CFIFO_ECC_PORT3_REG,
+				ecc_ctrl.value);
+			break;
+		}
+		break;
+
+	case NXGE_FM_EREPORT_ZCP_RRFIFO_UNDERRUN:
+	case NXGE_FM_EREPORT_ZCP_RSPFIFO_UNCORR_ERR:
+	case NXGE_FM_EREPORT_ZCP_STAT_TBL_PERR:
+	case NXGE_FM_EREPORT_ZCP_DYN_TBL_PERR:
+	case NXGE_FM_EREPORT_ZCP_BUF_TBL_PERR:
+	case NXGE_FM_EREPORT_ZCP_RRFIFO_OVERRUN:
+	case NXGE_FM_EREPORT_ZCP_BUFFER_OVERFLOW:
+	case NXGE_FM_EREPORT_ZCP_TT_PROGRAM_ERR:
+	case NXGE_FM_EREPORT_ZCP_RSP_TT_INDEX_ERR:
+	case NXGE_FM_EREPORT_ZCP_SLV_TT_INDEX_ERR:
+	case NXGE_FM_EREPORT_ZCP_TT_INDEX_ERR:
+		NXGE_REG_RD64(nxgep->npi_handle, ZCP_INT_STAT_TEST_REG,
+			&zcps.value);
+		if (err_id == NXGE_FM_EREPORT_ZCP_RRFIFO_UNDERRUN)
+			zcps.bits.ldw.rrfifo_urun = 1;
+		if (err_id == NXGE_FM_EREPORT_ZCP_RSPFIFO_UNCORR_ERR)
+			zcps.bits.ldw.rspfifo_uc_err = 1;
+		if (err_id == NXGE_FM_EREPORT_ZCP_STAT_TBL_PERR)
+			zcps.bits.ldw.stat_tbl_perr = 1;
+		if (err_id == NXGE_FM_EREPORT_ZCP_DYN_TBL_PERR)
+			zcps.bits.ldw.dyn_tbl_perr = 1;
+		if (err_id == NXGE_FM_EREPORT_ZCP_BUF_TBL_PERR)
+			zcps.bits.ldw.buf_tbl_perr = 1;
+		if (err_id == NXGE_FM_EREPORT_ZCP_CFIFO_ECC) {
+			switch (portn) {
+			case 0:
+				zcps.bits.ldw.cfifo_ecc0 = 1;
+				break;
+			case 1:
+				zcps.bits.ldw.cfifo_ecc1 = 1;
+				break;
+			case 2:
+				zcps.bits.ldw.cfifo_ecc2 = 1;
+				break;
+			case 3:
+				zcps.bits.ldw.cfifo_ecc3 = 1;
+				break;
+			}
+		}
+
+	default:
+		if (err_id == NXGE_FM_EREPORT_ZCP_RRFIFO_OVERRUN)
+			zcps.bits.ldw.rrfifo_orun = 1;
+		if (err_id == NXGE_FM_EREPORT_ZCP_BUFFER_OVERFLOW)
+			zcps.bits.ldw.buf_overflow = 1;
+		if (err_id == NXGE_FM_EREPORT_ZCP_TT_PROGRAM_ERR)
+			zcps.bits.ldw.tt_tbl_perr = 1;
+		if (err_id == NXGE_FM_EREPORT_ZCP_RSP_TT_INDEX_ERR)
+			zcps.bits.ldw.rsp_tt_index_err = 1;
+		if (err_id == NXGE_FM_EREPORT_ZCP_SLV_TT_INDEX_ERR)
+			zcps.bits.ldw.slv_tt_index_err = 1;
+		if (err_id == NXGE_FM_EREPORT_ZCP_TT_INDEX_ERR)
+			zcps.bits.ldw.zcp_tt_index_err = 1;
+		cmn_err(CE_NOTE, "!Write 0x%lx to ZCP_INT_STAT_TEST_REG\n",
+			zcps.value);
+		NXGE_REG_WR64(nxgep->npi_handle, ZCP_INT_STAT_TEST_REG,
+			zcps.value);
+		break;
+	}
+}
+
+nxge_status_t
+nxge_zcp_fatal_err_recover(p_nxge_t nxgep)
+{
+	npi_handle_t handle;
+	npi_status_t rs = NPI_SUCCESS;
+	nxge_status_t status = NXGE_OK;
+	uint8_t portn;
+	zcp_ram_unit_t w_data;
+	zcp_ram_unit_t r_data;
+	uint32_t cfifo_depth;
+	int i;
+
+	NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_zcp_fatal_err_recover"));
+	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+		"Recovering from RxPort error..."));
+
+	handle = nxgep->npi_handle;
+	portn = nxgep->mac.portnum;
+
+	/* Disable RxMAC */
+	if (nxge_rx_mac_disable(nxgep) != NXGE_OK)
+		goto fail;
+
+	/* Make sure source is clear if this is an injected error */
+	switch (portn) {
+	case 0:
+		NXGE_REG_WR64(handle, ZCP_CFIFO_ECC_PORT0_REG, 0);
+		break;
+	case 1:
+		NXGE_REG_WR64(handle, ZCP_CFIFO_ECC_PORT1_REG, 0);
+		break;
+	case 2:
+		NXGE_REG_WR64(handle, ZCP_CFIFO_ECC_PORT2_REG, 0);
+		break;
+	case 3:
+		NXGE_REG_WR64(handle, ZCP_CFIFO_ECC_PORT3_REG, 0);
+		break;
+	}
+
+	/* Clear up CFIFO */
+	if ((nxgep->niu_type == NEPTUNE) || (nxgep->niu_type == NEPTUNE_2)) {
+		if (portn < 2)
+			cfifo_depth = ZCP_P0_P1_CFIFO_DEPTH;
+		else
+			cfifo_depth = ZCP_P2_P3_CFIFO_DEPTH;
+	} else if (nxgep->niu_type == N2_NIU)
+		cfifo_depth = ZCP_NIU_CFIFO_DEPTH;
+
+	w_data.w0 = 0;
+	w_data.w1 = 0;
+	w_data.w2 = 0;
+	w_data.w3 = 0;
+	w_data.w4 = 0;
+
+	for (i = 0; i < cfifo_depth; i++) {
+		if (npi_zcp_tt_cfifo_entry(handle, OP_SET,
+				portn, i, &w_data) != NPI_SUCCESS)
+			goto fail;
+		if (npi_zcp_tt_cfifo_entry(handle, OP_GET,
+				portn, i, &r_data) != NPI_SUCCESS)
+			goto fail;
+	}
+
+	/* When recovering from ZCP, RxDMA channel resets are not necessary */
+	/* Reset ZCP CFIFO */
+	NXGE_DEBUG_MSG((nxgep, RX_CTL, "port%d Reset ZCP CFIFO...", portn));
+	if ((rs = npi_zcp_rest_cfifo_port(handle, portn)) != NPI_SUCCESS)
+		goto fail;
+
+	/* Reset IPP */
+	NXGE_DEBUG_MSG((nxgep, RX_CTL, "port%d Reset IPP...", portn));
+	if ((rs = npi_ipp_reset(handle, portn)) != NPI_SUCCESS)
+		goto fail;
+
+	NXGE_DEBUG_MSG((nxgep, RX_CTL, "port%d Reset RxMAC...", portn));
+	if (nxge_rx_mac_reset(nxgep) != NXGE_OK)
+		goto fail;
+
+	NXGE_DEBUG_MSG((nxgep, RX_CTL, "port%d Initialize RxMAC...", portn));
+	if ((status = nxge_rx_mac_init(nxgep)) != NXGE_OK)
+		goto fail;
+
+	NXGE_DEBUG_MSG((nxgep, RX_CTL, "port%d Enable RxMAC...", portn));
+	if (nxge_rx_mac_enable(nxgep) != NXGE_OK)
+		goto fail;
+
+	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+		"Recovery Sucessful, RxPort Restored"));
+	NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_zcp_fatal_err_recover"));
+	return (NXGE_OK);
+fail:
+	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed"));
+	return (status | rs);
+}
--- a/usr/src/uts/common/sys/Makefile	Mon Mar 19 18:02:35 2007 -0700
+++ b/usr/src/uts/common/sys/Makefile	Mon Mar 19 19:37:22 2007 -0700
@@ -940,6 +940,29 @@
 	i2omstr.h	\
 	i2outil.h
 
+NXGEHDRS=			\
+	nxge.h			\
+	nxge_common.h		\
+	nxge_common_impl.h	\
+	nxge_defs.h		\
+	nxge_hw.h		\
+	nxge_impl.h		\
+	nxge_ipp.h		\
+	nxge_ipp_hw.h		\
+	nxge_mac.h		\
+	nxge_mac_hw.h		\
+	nxge_fflp.h		\
+	nxge_fflp_hw.h		\
+	nxge_mii.h		\
+	nxge_rxdma.h		\
+	nxge_rxdma_hw.h		\
+	nxge_txc.h		\
+	nxge_txc_hw.h		\
+	nxge_txdma.h		\
+	nxge_txdma_hw.h		\
+	nxge_virtual.h		\
+	nxge_espc.h
+
 include Makefile.syshdrs
 
 dcam/%.check:	dcam/%.h
@@ -987,7 +1010,8 @@
 	$(USBHDRS:%.h=usb/%.check)                      \
 	$(I1394HDRS:%.h=1394/%.check)			\
 	$(RSMHDRS:%.h=rsm/%.check)			\
-	$(TSOLHDRS:%.h=tsol/%.check)
+	$(TSOLHDRS:%.h=tsol/%.check)			\
+	$(NXGEHDRS:%.h=nxge/%.check)
 
 
 .KEEP_STATE:
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/uts/common/sys/nxge/nxge.h	Mon Mar 19 19:37:22 2007 -0700
@@ -0,0 +1,1044 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef	_SYS_NXGE_NXGE_H
+#define	_SYS_NXGE_NXGE_H
+
+#pragma ident	"%Z%%M%	%I%	%E% SMI"
+
+#ifdef	__cplusplus
+extern "C" {
+#endif
+
+#if defined(_KERNEL) || defined(COSIM)
+#include <nxge_mac.h>
+#include <nxge_ipp.h>
+#include <nxge_fflp.h>
+#endif
+
+/*
+ * NXGE diagnostics IOCTLS.
+ */
+#define	NXGE_IOC		((((('N' << 8) + 'X') << 8) + 'G') << 8)
+
+#define	NXGE_GET64		(NXGE_IOC|1)
+#define	NXGE_PUT64		(NXGE_IOC|2)
+#define	NXGE_GET_TX_RING_SZ	(NXGE_IOC|3)
+#define	NXGE_GET_TX_DESC	(NXGE_IOC|4)
+#define	NXGE_GLOBAL_RESET	(NXGE_IOC|5)
+#define	NXGE_TX_SIDE_RESET	(NXGE_IOC|6)
+#define	NXGE_RX_SIDE_RESET	(NXGE_IOC|7)
+#define	NXGE_RESET_MAC		(NXGE_IOC|8)
+
+#define	NXGE_GET_MII		(NXGE_IOC|11)
+#define	NXGE_PUT_MII		(NXGE_IOC|12)
+#define	NXGE_RTRACE		(NXGE_IOC|13)
+#define	NXGE_RTRACE_TEST	(NXGE_IOC|20)
+#define	NXGE_TX_REGS_DUMP	(NXGE_IOC|21)
+#define	NXGE_RX_REGS_DUMP	(NXGE_IOC|22)
+#define	NXGE_INT_REGS_DUMP	(NXGE_IOC|23)
+#define	NXGE_VIR_REGS_DUMP	(NXGE_IOC|24)
+#define	NXGE_VIR_INT_REGS_DUMP	(NXGE_IOC|25)
+#define	NXGE_RDUMP		(NXGE_IOC|26)
+#define	NXGE_RDC_GRPS_DUMP	(NXGE_IOC|27)
+#define	NXGE_PIO_TEST		(NXGE_IOC|28)
+
+#define	NXGE_GET_TCAM		(NXGE_IOC|29)
+#define	NXGE_PUT_TCAM		(NXGE_IOC|30)
+#define	NXGE_INJECT_ERR		(NXGE_IOC|40)
+
+#if (defined(SOLARIS) && defined(_KERNEL)) || defined(COSIM)
+#define	NXGE_OK			0
+#define	NXGE_ERROR		0x40000000
+#define	NXGE_DDI_FAILED		0x20000000
+#define	NXGE_GET_PORT_NUM(n)	n
+
+/*
+ * Definitions for module_info.
+ */
+#define	NXGE_IDNUM		(0)			/* module ID number */
+#define	NXGE_DRIVER_NAME	"nxge"			/* module name */
+
+#define	NXGE_MINPSZ		(0)			/* min packet size */
+#define	NXGE_MAXPSZ		(ETHERMTU)		/* max packet size */
+#define	NXGE_HIWAT		(2048 * NXGE_MAXPSZ)	/* hi-water mark */
+#define	NXGE_LOWAT		(1)			/* lo-water mark */
+#define	NXGE_HIWAT_MAX		(192000 * NXGE_MAXPSZ)
+#define	NXGE_HIWAT_MIN		(2 * NXGE_MAXPSZ)
+#define	NXGE_LOWAT_MAX		(192000 * NXGE_MAXPSZ)
+#define	NXGE_LOWAT_MIN		(1)
+
+#ifndef	D_HOTPLUG
+#define	D_HOTPLUG		0x00
+#endif
+
+#define	INIT_BUCKET_SIZE	16	/* Initial Hash Bucket Size */
+
+#define	NXGE_CHECK_TIMER	(5000)
+
+typedef enum {
+	param_instance,
+	param_main_instance,
+	param_function_number,
+	param_partition_id,
+	param_read_write_mode,
+	param_niu_cfg_type,
+	param_tx_quick_cfg,
+	param_rx_quick_cfg,
+	param_master_cfg_enable,
+	param_master_cfg_value,
+
+	param_autoneg,
+	param_anar_10gfdx,
+	param_anar_10ghdx,
+	param_anar_1000fdx,
+	param_anar_1000hdx,
+	param_anar_100T4,
+	param_anar_100fdx,
+	param_anar_100hdx,
+	param_anar_10fdx,
+	param_anar_10hdx,
+
+	param_anar_asmpause,
+	param_anar_pause,
+	param_use_int_xcvr,
+	param_enable_ipg0,
+	param_ipg0,
+	param_ipg1,
+	param_ipg2,
+	param_accept_jumbo,
+	param_txdma_weight,
+	param_txdma_channels_begin,
+
+	param_txdma_channels,
+	param_txdma_info,
+	param_rxdma_channels_begin,
+	param_rxdma_channels,
+	param_rxdma_drr_weight,
+	param_rxdma_full_header,
+	param_rxdma_info,
+	param_rxdma_rbr_size,
+	param_rxdma_rcr_size,
+	param_default_port_rdc,
+	param_rxdma_intr_time,
+	param_rxdma_intr_pkts,
+
+	param_rdc_grps_start,
+	param_rx_rdc_grps,
+	param_default_grp0_rdc,
+	param_default_grp1_rdc,
+	param_default_grp2_rdc,
+	param_default_grp3_rdc,
+	param_default_grp4_rdc,
+	param_default_grp5_rdc,
+	param_default_grp6_rdc,
+	param_default_grp7_rdc,
+
+	param_info_rdc_groups,
+	param_start_ldg,
+	param_max_ldg,
+	param_mac_2rdc_grp,
+	param_vlan_2rdc_grp,
+	param_fcram_part_cfg,
+	param_fcram_access_ratio,
+	param_tcam_access_ratio,
+	param_tcam_enable,
+	param_hash_lookup_enable,
+	param_llc_snap_enable,
+
+	param_h1_init_value,
+	param_h2_init_value,
+	param_class_cfg_ether_usr1,
+	param_class_cfg_ether_usr2,
+	param_class_cfg_ip_usr4,
+	param_class_cfg_ip_usr5,
+	param_class_cfg_ip_usr6,
+	param_class_cfg_ip_usr7,
+	param_class_opt_ip_usr4,
+	param_class_opt_ip_usr5,
+	param_class_opt_ip_usr6,
+	param_class_opt_ip_usr7,
+	param_class_opt_ipv4_tcp,
+	param_class_opt_ipv4_udp,
+	param_class_opt_ipv4_ah,
+	param_class_opt_ipv4_sctp,
+	param_class_opt_ipv6_tcp,
+	param_class_opt_ipv6_udp,
+	param_class_opt_ipv6_ah,
+	param_class_opt_ipv6_sctp,
+	param_nxge_debug_flag,
+	param_npi_debug_flag,
+	param_dump_rdc,
+	param_dump_tdc,
+	param_dump_mac_regs,
+	param_dump_ipp_regs,
+	param_dump_fflp_regs,
+	param_dump_vlan_table,
+	param_dump_rdc_table,
+	param_dump_ptrs,
+	param_end
+} nxge_param_index_t;
+
+
+/*
+ * Named Dispatch Parameter Management Structure
+ */
+typedef	int (*nxge_ndgetf_t)(p_nxge_t, queue_t *, MBLKP, caddr_t, cred_t *);
+typedef	int (*nxge_ndsetf_t)(p_nxge_t, queue_t *,
+	    MBLKP, char *, caddr_t, cred_t *);
+
+#define	NXGE_PARAM_READ			0x00000001ULL
+#define	NXGE_PARAM_WRITE		0x00000002ULL
+#define	NXGE_PARAM_SHARED		0x00000004ULL
+#define	NXGE_PARAM_PRIV			0x00000008ULL
+#define	NXGE_PARAM_RW			NXGE_PARAM_READ | NXGE_PARAM_WRITE
+#define	NXGE_PARAM_RWS			NXGE_PARAM_RW | NXGE_PARAM_SHARED
+#define	NXGE_PARAM_RWP			NXGE_PARAM_RW | NXGE_PARAM_PRIV
+
+#define	NXGE_PARAM_RXDMA		0x00000010ULL
+#define	NXGE_PARAM_TXDMA		0x00000020ULL
+#define	NXGE_PARAM_CLASS_GEN	0x00000040ULL
+#define	NXGE_PARAM_MAC			0x00000080ULL
+#define	NXGE_PARAM_CLASS_BIN	NXGE_PARAM_CLASS_GEN | NXGE_PARAM_BASE_BIN
+#define	NXGE_PARAM_CLASS_HEX	NXGE_PARAM_CLASS_GEN | NXGE_PARAM_BASE_HEX
+#define	NXGE_PARAM_CLASS		NXGE_PARAM_CLASS_HEX
+
+#define	NXGE_PARAM_CMPLX		0x00010000ULL
+#define	NXGE_PARAM_NDD_WR_OK		0x00020000ULL
+#define	NXGE_PARAM_INIT_ONLY		0x00040000ULL
+#define	NXGE_PARAM_INIT_CONFIG		0x00080000ULL
+
+#define	NXGE_PARAM_READ_PROP		0x00100000ULL
+#define	NXGE_PARAM_PROP_ARR32		0x00200000ULL
+#define	NXGE_PARAM_PROP_ARR64		0x00400000ULL
+#define	NXGE_PARAM_PROP_STR		0x00800000ULL
+
+#define	NXGE_PARAM_BASE_DEC		0x00000000ULL
+#define	NXGE_PARAM_BASE_BIN		0x10000000ULL
+#define	NXGE_PARAM_BASE_HEX		0x20000000ULL
+#define	NXGE_PARAM_BASE_STR		0x40000000ULL
+#define	NXGE_PARAM_DONT_SHOW		0x80000000ULL
+
+#define	NXGE_PARAM_ARRAY_CNT_MASK	0x0000ffff00000000ULL
+#define	NXGE_PARAM_ARRAY_CNT_SHIFT	32ULL
+#define	NXGE_PARAM_ARRAY_ALLOC_MASK	0xffff000000000000ULL
+#define	NXGE_PARAM_ARRAY_ALLOC_SHIFT	48ULL
+
+typedef struct _nxge_param_t {
+	int (*getf)();
+	int (*setf)();   /* null for read only */
+	uint64_t type;  /* R/W/ Common/Port/ .... */
+	uint64_t minimum;
+	uint64_t maximum;
+	uint64_t value;	/* for array params, pointer to value array */
+	uint64_t old_value; /* for array params, pointer to old_value array */
+	char   *fcode_name;
+	char   *name;
+} nxge_param_t, *p_nxge_param_t;
+
+
+
+typedef enum {
+	nxge_lb_normal,
+	nxge_lb_ext10g,
+	nxge_lb_ext1000,
+	nxge_lb_ext100,
+	nxge_lb_ext10,
+	nxge_lb_phy10g,
+	nxge_lb_phy1000,
+	nxge_lb_phy,
+	nxge_lb_serdes10g,
+	nxge_lb_serdes1000,
+	nxge_lb_serdes,
+	nxge_lb_mac10g,
+	nxge_lb_mac1000,
+	nxge_lb_mac
+} nxge_lb_t;
+
+enum nxge_mac_state {
+	NXGE_MAC_STOPPED = 0,
+	NXGE_MAC_STARTED
+};
+
+/*
+ * Private DLPI full dlsap address format.
+ */
+typedef struct _nxge_dladdr_t {
+	ether_addr_st dl_phys;
+	uint16_t dl_sap;
+} nxge_dladdr_t, *p_nxge_dladdr_t;
+
+typedef struct _mc_addr_t {
+	ether_addr_st multcast_addr;
+	uint_t mc_addr_cnt;
+} mc_addr_t, *p_mc_addr_t;
+
+typedef struct _mc_bucket_t {
+	p_mc_addr_t addr_list;
+	uint_t list_size;
+} mc_bucket_t, *p_mc_bucket_t;
+
+typedef struct _mc_table_t {
+	p_mc_bucket_t bucket_list;
+	uint_t buckets_used;
+} mc_table_t, *p_mc_table_t;
+
+typedef struct _filter_t {
+	uint32_t all_phys_cnt;
+	uint32_t all_multicast_cnt;
+	uint32_t all_sap_cnt;
+} filter_t, *p_filter_t;
+
+#if defined(_KERNEL) || defined(COSIM)
+
+
+typedef struct _nxge_port_stats_t {
+	/*
+	 *  Overall structure size
+	 */
+	size_t			stats_size;
+
+	/*
+	 * Link Input/Output stats
+	 */
+	uint64_t		ipackets;
+	uint64_t		ierrors;
+	uint64_t		opackets;
+	uint64_t		oerrors;
+	uint64_t		collisions;
+
+	/*
+	 * MIB II variables
+	 */
+	uint64_t		rbytes;    /* # bytes received */
+	uint64_t		obytes;    /* # bytes transmitted */
+	uint32_t		multircv;  /* # multicast packets received */
+	uint32_t		multixmt;  /* # multicast packets for xmit */
+	uint32_t		brdcstrcv; /* # broadcast packets received */
+	uint32_t		brdcstxmt; /* # broadcast packets for xmit */
+	uint32_t		norcvbuf;  /* # rcv packets discarded */
+	uint32_t		noxmtbuf;  /* # xmit packets discarded */
+
+	/*
+	 * Lets the user know the MTU currently in use by
+	 * the physical MAC port.
+	 */
+	nxge_lb_t		lb_mode;
+	uint32_t		qos_mode;
+	uint32_t		trunk_mode;
+	uint32_t		poll_mode;
+
+	/*
+	 * Tx Statistics.
+	 */
+	uint32_t		tx_inits;
+	uint32_t		tx_starts;
+	uint32_t		tx_nocanput;
+	uint32_t		tx_msgdup_fail;
+	uint32_t		tx_allocb_fail;
+	uint32_t		tx_no_desc;
+	uint32_t		tx_dma_bind_fail;
+	uint32_t		tx_uflo;
+	uint32_t		tx_hdr_pkts;
+	uint32_t		tx_ddi_pkts;
+	uint32_t		tx_dvma_pkts;
+
+	uint32_t		tx_max_pend;
+
+	/*
+	 * Rx Statistics.
+	 */
+	uint32_t		rx_inits;
+	uint32_t		rx_hdr_pkts;
+	uint32_t		rx_mtu_pkts;
+	uint32_t		rx_split_pkts;
+	uint32_t		rx_no_buf;
+	uint32_t		rx_no_comp_wb;
+	uint32_t		rx_ov_flow;
+	uint32_t		rx_len_mm;
+	uint32_t		rx_tag_err;
+	uint32_t		rx_nocanput;
+	uint32_t		rx_msgdup_fail;
+	uint32_t		rx_allocb_fail;
+
+	/*
+	 * Receive buffer management statistics.
+	 */
+	uint32_t		rx_new_pages;
+	uint32_t		rx_new_hdr_pgs;
+	uint32_t		rx_new_mtu_pgs;
+	uint32_t		rx_new_nxt_pgs;
+	uint32_t		rx_reused_pgs;
+	uint32_t		rx_hdr_drops;
+	uint32_t		rx_mtu_drops;
+	uint32_t		rx_nxt_drops;
+
+	/*
+	 * Receive flow statistics
+	 */
+	uint32_t		rx_rel_flow;
+	uint32_t		rx_rel_bit;
+
+	uint32_t		rx_pkts_dropped;
+
+	/*
+	 * PCI-E Bus Statistics.
+	 */
+	uint32_t		pci_bus_speed;
+	uint32_t		pci_err;
+	uint32_t		pci_rta_err;
+	uint32_t		pci_rma_err;
+	uint32_t		pci_parity_err;
+	uint32_t		pci_bad_ack_err;
+	uint32_t		pci_drto_err;
+	uint32_t		pci_dmawz_err;
+	uint32_t		pci_dmarz_err;
+
+	uint32_t		rx_taskq_waits;
+
+	uint32_t		tx_jumbo_pkts;
+
+	/*
+	 * Some statistics added to support bringup, these
+	 * should be removed.
+	 */
+	uint32_t		user_defined;
+} nxge_port_stats_t, *p_nxge_port_stats_t;
+
+
+typedef struct _nxge_stats_t {
+	/*
+	 *  Overall structure size
+	 */
+	size_t			stats_size;
+
+	kstat_t			*ksp;
+	kstat_t			*rdc_ksp[NXGE_MAX_RDCS];
+	kstat_t			*tdc_ksp[NXGE_MAX_TDCS];
+	kstat_t			*rdc_sys_ksp;
+	kstat_t			*fflp_ksp[1];
+	kstat_t			*ipp_ksp;
+	kstat_t			*txc_ksp;
+	kstat_t			*mac_ksp;
+	kstat_t			*zcp_ksp;
+	kstat_t			*port_ksp;
+	kstat_t			*mmac_ksp;
+
+	nxge_mac_stats_t	mac_stats;	/* Common MAC Statistics */
+	nxge_xmac_stats_t	xmac_stats;	/* XMAC Statistics */
+	nxge_bmac_stats_t	bmac_stats;	/* BMAC Statistics */
+
+	nxge_rx_ring_stats_t	rx_stats;	/* per port RX stats */
+	nxge_ipp_stats_t	ipp_stats;	/* per port IPP stats */
+	nxge_zcp_stats_t	zcp_stats;	/* per port IPP stats */
+	nxge_rx_ring_stats_t	rdc_stats[NXGE_MAX_RDCS]; /* per rdc stats */
+	nxge_rdc_sys_stats_t	rdc_sys_stats;	/* per port RDC stats */
+
+	nxge_tx_ring_stats_t	tx_stats;	/* per port TX stats */
+	nxge_txc_stats_t	txc_stats;	/* per port TX stats */
+	nxge_tx_ring_stats_t	tdc_stats[NXGE_MAX_TDCS]; /* per tdc stats */
+	nxge_fflp_stats_t	fflp_stats;	/* fflp stats */
+	nxge_port_stats_t	port_stats;	/* fflp stats */
+	nxge_mmac_stats_t	mmac_stats;	/* Multi mac. stats */
+
+} nxge_stats_t, *p_nxge_stats_t;
+
+
+
+typedef struct _nxge_intr_t {
+	boolean_t		intr_registered; /* interrupts are registered */
+	boolean_t		intr_enabled; 	/* interrupts are enabled */
+	boolean_t		niu_msi_enable;	/* debug or configurable? */
+	uint8_t			nldevs;		/* # of logical devices */
+	int			intr_types;	/* interrupt types supported */
+	int			intr_type;	/* interrupt type to add */
+	int			max_int_cnt;	/* max MSIX/INT HW supports */
+	int			start_inum;	/* start inum (in sequence?) */
+	int			msi_intx_cnt;	/* # msi/intx ints returned */
+	int			intr_added;	/* # ints actually needed */
+	int			intr_cap;	/* interrupt capabilities */
+	size_t			intr_size;	/* size of array to allocate */
+	ddi_intr_handle_t 	*htable;	/* For array of interrupts */
+	/* Add interrupt number for each interrupt vector */
+	int			pri;
+} nxge_intr_t, *p_nxge_intr_t;
+
+typedef struct _nxge_ldgv_t {
+	uint8_t			ndma_ldvs;
+	uint8_t			nldvs;
+	uint8_t			start_ldg;
+	uint8_t			start_ldg_tx;
+	uint8_t			start_ldg_rx;
+	uint8_t			maxldgs;
+	uint8_t			maxldvs;
+	uint8_t			ldg_intrs;
+	boolean_t		own_sys_err;
+	boolean_t		own_max_ldv;
+	uint32_t		tmres;
+	p_nxge_ldg_t		ldgp;
+	p_nxge_ldv_t		ldvp;
+	p_nxge_ldv_t		ldvp_syserr;
+} nxge_ldgv_t, *p_nxge_ldgv_t;
+
+/*
+ * Neptune Device instance state information.
+ *
+ * Each instance is dynamically allocated on first attach.
+ */
+struct _nxge_t {
+	dev_info_t		*dip;		/* device instance */
+	dev_info_t		*p_dip;		/* Parent's device instance */
+	int			instance;	/* instance number */
+	int			function_num;	/* device function number */
+	int			nports;		/* # of ports on this device */
+	int			board_ver;	/* Board Version */
+	int			partition_id;	/* partition ID */
+	int			use_partition;	/* partition is enabled */
+	uint32_t		drv_state;	/* driver state bit flags */
+	uint64_t		nxge_debug_level; /* driver state bit flags */
+	kmutex_t		genlock[1];
+	enum nxge_mac_state	nxge_mac_state;
+	ddi_softintr_t		resched_id;	/* reschedule callback	*/
+	boolean_t		resched_needed;
+	boolean_t		resched_running;
+
+	p_dev_regs_t		dev_regs;
+	npi_handle_t		npi_handle;
+	npi_handle_t		npi_pci_handle;
+	npi_handle_t		npi_reg_handle;
+	npi_handle_t		npi_msi_handle;
+	npi_handle_t		npi_vreg_handle;
+	npi_handle_t		npi_v2reg_handle;
+
+	nxge_mac_t		mac;
+	nxge_ipp_t		ipp;
+	nxge_txc_t		txc;
+	nxge_classify_t		classifier;
+
+	mac_handle_t		mach;	/* mac module handle */
+	p_nxge_stats_t		statsp;
+	uint32_t		param_count;
+	p_nxge_param_t		param_arr;
+	nxge_hw_list_t		*nxge_hw_p; 	/* pointer to per Neptune */
+	niu_type_t		niu_type;
+	boolean_t		os_addr_mode32;	/* set to 1 for 32 bit mode */
+	uint8_t			nrdc;
+	uint8_t			def_rdc;
+	uint8_t			rdc[NXGE_MAX_RDCS];
+	uint8_t			ntdc;
+	uint8_t			tdc[NXGE_MAX_TDCS];
+
+	nxge_intr_t		nxge_intr_type;
+	nxge_dma_pt_cfg_t 	pt_config;
+	nxge_class_pt_cfg_t 	class_config;
+
+	/* Logical device and group data structures. */
+	p_nxge_ldgv_t		ldgvp;
+
+	caddr_t			param_list;	/* Parameter list */
+
+	ether_addr_st		factaddr;	/* factory mac address	    */
+	ether_addr_st		ouraddr;	/* individual address	    */
+	kmutex_t		ouraddr_lock;	/* lock to protect to uradd */
+
+	ddi_iblock_cookie_t	interrupt_cookie;
+
+	/*
+	 * Blocks of memory may be pre-allocated by the
+	 * partition manager or the driver. They may include
+	 * blocks for configuration and buffers. The idea is
+	 * to preallocate big blocks of contiguous areas in
+	 * system memory (i.e. with IOMMU). These blocks then
+	 * will be broken up to a fixed number of blocks with
+	 * each block having the same block size (4K, 8K, 16K or
+	 * 32K) in the case of buffer blocks. For systems that
+	 * do not support DVMA, more than one big block will be
+	 * allocated.
+	 */
+	uint32_t		rx_default_block_size;
+	nxge_rx_block_size_t	rx_bksize_code;
+
+	p_nxge_dma_pool_t	rx_buf_pool_p;
+	p_nxge_dma_pool_t	rx_cntl_pool_p;
+
+	p_nxge_dma_pool_t	tx_buf_pool_p;
+	p_nxge_dma_pool_t	tx_cntl_pool_p;
+
+	/* Receive buffer block ring and completion ring. */
+	p_rx_rbr_rings_t 	rx_rbr_rings;
+	p_rx_rcr_rings_t 	rx_rcr_rings;
+	p_rx_mbox_areas_t 	rx_mbox_areas_p;
+
+	p_rx_tx_params_t	rx_params;
+	uint32_t		start_rdc;
+	uint32_t		max_rdcs;
+	uint32_t		rdc_mask;
+
+	/* Transmit descriptors rings */
+	p_tx_rings_t 		tx_rings;
+	p_tx_mbox_areas_t	tx_mbox_areas_p;
+
+	uint32_t		start_tdc;
+	uint32_t		max_tdcs;
+	uint32_t		tdc_mask;
+
+	p_rx_tx_params_t	tx_params;
+
+	ddi_dma_handle_t 	dmasparehandle;
+
+	ulong_t 		sys_page_sz;
+	ulong_t 		sys_page_mask;
+	int 			suspended;
+
+	mii_bmsr_t 		bmsr;		/* xcvr status at last poll. */
+	mii_bmsr_t 		soft_bmsr;	/* xcvr status kept by SW. */
+
+	kmutex_t 		mif_lock;	/* Lock to protect the list. */
+
+	void 			(*mii_read)();
+	void 			(*mii_write)();
+	void 			(*mii_poll)();
+	filter_t 		filter;		/* Current instance filter */
+	p_hash_filter_t 	hash_filter;	/* Multicast hash filter. */
+	krwlock_t		filter_lock;	/* Lock to protect filters. */
+
+	ulong_t 		sys_burst_sz;
+
+	uint8_t 		cache_line;
+
+	timeout_id_t 		nxge_link_poll_timerid;
+	timeout_id_t 		nxge_timerid;
+
+	uint_t 			need_periodic_reclaim;
+	timeout_id_t 		reclaim_timer;
+
+	uint8_t 		msg_min;
+	uint8_t 		crc_size;
+
+	boolean_t 		hard_props_read;
+
+	boolean_t 		nxge_htraffic;
+	uint32_t 		nxge_ncpus;
+	uint32_t 		nxge_cpumask;
+	uint16_t 		intr_timeout;
+	uint16_t 		intr_threshold;
+	uchar_t 		nxge_rxmode;
+	uint32_t 		active_threads;
+
+	rtrace_t		rtrace;
+	int			fm_capabilities; /* FMA capabilities */
+
+	uint32_t 		nxge_port_rbr_size;
+	uint32_t 		nxge_port_rcr_size;
+	uint32_t 		nxge_port_tx_ring_size;
+	nxge_mmac_t		nxge_mmac_info;
+#if	defined(sun4v)
+	boolean_t		niu_hsvc_available;
+	hsvc_info_t		niu_hsvc;
+	uint64_t		niu_min_ver;
+#endif
+	boolean_t		link_notify;
+};
+
+/*
+ * Driver state flags.
+ */
+#define	STATE_REGS_MAPPED	0x000000001	/* device registers mapped */
+#define	STATE_KSTATS_SETUP	0x000000002	/* kstats allocated	*/
+#define	STATE_NODE_CREATED	0x000000004	/* device node created	*/
+#define	STATE_HW_CONFIG_CREATED	0x000000008	/* hardware properties	*/
+#define	STATE_HW_INITIALIZED	0x000000010	/* hardware initialized	*/
+#define	STATE_MDIO_LOCK_INIT	0x000000020	/* mdio lock initialized */
+#define	STATE_MII_LOCK_INIT	0x000000040	/* mii lock initialized */
+
+#define	STOP_POLL_THRESH 	9
+#define	START_POLL_THRESH	2
+
+typedef struct _nxge_port_kstat_t {
+	/*
+	 * Transciever state informations.
+	 */
+	kstat_named_t	xcvr_inits;
+	kstat_named_t	xcvr_inuse;
+	kstat_named_t	xcvr_addr;
+	kstat_named_t	xcvr_id;
+	kstat_named_t	cap_autoneg;
+	kstat_named_t	cap_10gfdx;
+	kstat_named_t	cap_10ghdx;
+	kstat_named_t	cap_1000fdx;
+	kstat_named_t	cap_1000hdx;
+	kstat_named_t	cap_100T4;
+	kstat_named_t	cap_100fdx;
+	kstat_named_t	cap_100hdx;
+	kstat_named_t	cap_10fdx;
+	kstat_named_t	cap_10hdx;
+	kstat_named_t	cap_asmpause;
+	kstat_named_t	cap_pause;
+
+	/*
+	 * Link partner capabilities.
+	 */
+	kstat_named_t	lp_cap_autoneg;
+	kstat_named_t	lp_cap_10gfdx;
+	kstat_named_t	lp_cap_10ghdx;
+	kstat_named_t	lp_cap_1000fdx;
+	kstat_named_t	lp_cap_1000hdx;
+	kstat_named_t	lp_cap_100T4;
+	kstat_named_t	lp_cap_100fdx;
+	kstat_named_t	lp_cap_100hdx;
+	kstat_named_t	lp_cap_10fdx;
+	kstat_named_t	lp_cap_10hdx;
+	kstat_named_t	lp_cap_asmpause;
+	kstat_named_t	lp_cap_pause;
+
+	/*
+	 * Shared link setup.
+	 */
+	kstat_named_t	link_T4;
+	kstat_named_t	link_speed;
+	kstat_named_t	link_duplex;
+	kstat_named_t	link_asmpause;
+	kstat_named_t	link_pause;
+	kstat_named_t	link_up;
+
+	/*
+	 * Lets the user know the MTU currently in use by
+	 * the physical MAC port.
+	 */
+	kstat_named_t	mac_mtu;
+	kstat_named_t	lb_mode;
+	kstat_named_t	qos_mode;
+	kstat_named_t	trunk_mode;
+
+	/*
+	 * Misc MAC statistics.
+	 */
+	kstat_named_t	ifspeed;
+	kstat_named_t	promisc;
+	kstat_named_t	rev_id;
+
+	/*
+	 * Some statistics added to support bringup, these
+	 * should be removed.
+	 */
+	kstat_named_t	user_defined;
+} nxge_port_kstat_t, *p_nxge_port_kstat_t;
+
+typedef struct _nxge_rdc_kstat {
+	/*
+	 * Receive DMA channel statistics.
+	 */
+	kstat_named_t	ipackets;
+	kstat_named_t	rbytes;
+	kstat_named_t	errors;
+	kstat_named_t	dcf_err;
+	kstat_named_t	rcr_ack_err;
+
+	kstat_named_t	dc_fifoflow_err;
+	kstat_named_t	rcr_sha_par_err;
+	kstat_named_t	rbr_pre_par_err;
+	kstat_named_t	wred_drop;
+	kstat_named_t	rbr_pre_emty;
+
+	kstat_named_t	rcr_shadow_full;
+	kstat_named_t	rbr_tmout;
+	kstat_named_t	rsp_cnt_err;
+	kstat_named_t	byte_en_bus;
+	kstat_named_t	rsp_dat_err;
+
+	kstat_named_t	compl_l2_err;
+	kstat_named_t	compl_l4_cksum_err;
+	kstat_named_t	compl_zcp_soft_err;
+	kstat_named_t	compl_fflp_soft_err;
+	kstat_named_t	config_err;
+
+	kstat_named_t	rcrincon;
+	kstat_named_t	rcrfull;
+	kstat_named_t	rbr_empty;
+	kstat_named_t	rbrfull;
+	kstat_named_t	rbrlogpage;
+
+	kstat_named_t	cfiglogpage;
+	kstat_named_t	port_drop_pkt;
+	kstat_named_t	rcr_to;
+	kstat_named_t	rcr_thresh;
+	kstat_named_t	rcr_mex;
+	kstat_named_t	id_mismatch;
+	kstat_named_t	zcp_eop_err;
+	kstat_named_t	ipp_eop_err;
+} nxge_rdc_kstat_t, *p_nxge_rdc_kstat_t;
+
+typedef struct _nxge_rdc_sys_kstat {
+	/*
+	 * Receive DMA system statistics.
+	 */
+	kstat_named_t	pre_par;
+	kstat_named_t	sha_par;
+	kstat_named_t	id_mismatch;
+	kstat_named_t	ipp_eop_err;
+	kstat_named_t	zcp_eop_err;
+} nxge_rdc_sys_kstat_t, *p_nxge_rdc_sys_kstat_t;
+
+typedef	struct _nxge_tdc_kstat {
+	/*
+	 * Transmit DMA channel statistics.
+	 */
+	kstat_named_t	opackets;
+	kstat_named_t	obytes;
+	kstat_named_t	oerrors;
+	kstat_named_t	tx_inits;
+	kstat_named_t	tx_no_buf;
+
+	kstat_named_t	mbox_err;
+	kstat_named_t	pkt_size_err;
+	kstat_named_t	tx_ring_oflow;
+	kstat_named_t	pref_buf_ecc_err;
+	kstat_named_t	nack_pref;
+	kstat_named_t	nack_pkt_rd;
+	kstat_named_t	conf_part_err;
+	kstat_named_t	pkt_prt_err;
+	kstat_named_t	reset_fail;
+/* used to in the common (per port) counter */
+
+	kstat_named_t	tx_starts;
+	kstat_named_t	tx_nocanput;
+	kstat_named_t	tx_msgdup_fail;
+	kstat_named_t	tx_allocb_fail;
+	kstat_named_t	tx_no_desc;
+	kstat_named_t	tx_dma_bind_fail;
+	kstat_named_t	tx_uflo;
+	kstat_named_t	tx_hdr_pkts;
+	kstat_named_t	tx_ddi_pkts;
+	kstat_named_t	tx_dvma_pkts;
+	kstat_named_t	tx_max_pend;
+} nxge_tdc_kstat_t, *p_nxge_tdc_kstat_t;
+
+typedef	struct _nxge_txc_kstat {
+	/*
+	 * Transmit port TXC block statistics.
+	 */
+	kstat_named_t	pkt_stuffed;
+	kstat_named_t	pkt_xmit;
+	kstat_named_t	ro_correct_err;
+	kstat_named_t	ro_uncorrect_err;
+	kstat_named_t	sf_correct_err;
+	kstat_named_t	sf_uncorrect_err;
+	kstat_named_t	address_failed;
+	kstat_named_t	dma_failed;
+	kstat_named_t	length_failed;
+	kstat_named_t	pkt_assy_dead;
+	kstat_named_t	reorder_err;
+} nxge_txc_kstat_t, *p_nxge_txc_kstat_t;
+
+typedef struct _nxge_ipp_kstat {
+	/*
+	 * Receive port IPP block statistics.
+	 */
+	kstat_named_t	eop_miss;
+	kstat_named_t	sop_miss;
+	kstat_named_t	dfifo_ue;
+	kstat_named_t	ecc_err_cnt;
+	kstat_named_t	dfifo_perr;
+	kstat_named_t	pfifo_over;
+	kstat_named_t	pfifo_und;
+	kstat_named_t	bad_cs_cnt;
+	kstat_named_t	pkt_dis_cnt;
+	kstat_named_t	cs_fail;
+} nxge_ipp_kstat_t, *p_nxge_ipp_kstat_t;
+
+typedef	struct _nxge_zcp_kstat {
+	/*
+	 * ZCP statistics.
+	 */
+	kstat_named_t	errors;
+	kstat_named_t	inits;
+	kstat_named_t	rrfifo_underrun;
+	kstat_named_t	rrfifo_overrun;
+	kstat_named_t	rspfifo_uncorr_err;
+	kstat_named_t	buffer_overflow;
+	kstat_named_t	stat_tbl_perr;
+	kstat_named_t	dyn_tbl_perr;
+	kstat_named_t	buf_tbl_perr;
+	kstat_named_t	tt_program_err;
+	kstat_named_t	rsp_tt_index_err;
+	kstat_named_t	slv_tt_index_err;
+	kstat_named_t	zcp_tt_index_err;
+	kstat_named_t	access_fail;
+	kstat_named_t	cfifo_ecc;
+} nxge_zcp_kstat_t, *p_nxge_zcp_kstat_t;
+
+typedef	struct _nxge_mac_kstat {
+	/*
+	 * Transmit MAC statistics.
+	 */
+	kstat_named_t	tx_frame_cnt;
+	kstat_named_t	tx_underflow_err;
+	kstat_named_t	tx_overflow_err;
+	kstat_named_t	tx_maxpktsize_err;
+	kstat_named_t	tx_fifo_xfr_err;
+	kstat_named_t	tx_byte_cnt;
+
+	/*
+	 * Receive MAC statistics.
+	 */
+	kstat_named_t	rx_frame_cnt;
+	kstat_named_t	rx_underflow_err;
+	kstat_named_t	rx_overflow_err;
+	kstat_named_t	rx_len_err_cnt;
+	kstat_named_t	rx_crc_err_cnt;
+	kstat_named_t	rx_viol_err_cnt;
+	kstat_named_t	rx_byte_cnt;
+	kstat_named_t	rx_hist1_cnt;
+	kstat_named_t	rx_hist2_cnt;
+	kstat_named_t	rx_hist3_cnt;
+	kstat_named_t	rx_hist4_cnt;
+	kstat_named_t	rx_hist5_cnt;
+	kstat_named_t	rx_hist6_cnt;
+	kstat_named_t	rx_broadcast_cnt;
+	kstat_named_t	rx_mult_cnt;
+	kstat_named_t	rx_frag_cnt;
+	kstat_named_t	rx_frame_align_err_cnt;
+	kstat_named_t	rx_linkfault_err_cnt;
+	kstat_named_t	rx_local_fault_err_cnt;
+	kstat_named_t	rx_remote_fault_err_cnt;
+} nxge_mac_kstat_t, *p_nxge_mac_kstat_t;
+
+typedef	struct _nxge_xmac_kstat {
+	/*
+	 * XMAC statistics.
+	 */
+	kstat_named_t	tx_frame_cnt;
+	kstat_named_t	tx_underflow_err;
+	kstat_named_t	tx_maxpktsize_err;
+	kstat_named_t	tx_overflow_err;
+	kstat_named_t	tx_fifo_xfr_err;
+	kstat_named_t	tx_byte_cnt;
+	kstat_named_t	rx_frame_cnt;
+	kstat_named_t	rx_underflow_err;
+	kstat_named_t	rx_overflow_err;
+	kstat_named_t	rx_crc_err_cnt;
+	kstat_named_t	rx_len_err_cnt;
+	kstat_named_t	rx_viol_err_cnt;
+	kstat_named_t	rx_byte_cnt;
+	kstat_named_t	rx_hist1_cnt;
+	kstat_named_t	rx_hist2_cnt;
+	kstat_named_t	rx_hist3_cnt;
+	kstat_named_t	rx_hist4_cnt;
+	kstat_named_t	rx_hist5_cnt;
+	kstat_named_t	rx_hist6_cnt;
+	kstat_named_t	rx_hist7_cnt;
+	kstat_named_t	rx_broadcast_cnt;
+	kstat_named_t	rx_mult_cnt;
+	kstat_named_t	rx_frag_cnt;
+	kstat_named_t	rx_frame_align_err_cnt;
+	kstat_named_t	rx_linkfault_err_cnt;
+	kstat_named_t	rx_remote_fault_err_cnt;
+	kstat_named_t	rx_local_fault_err_cnt;
+	kstat_named_t	rx_pause_cnt;
+	kstat_named_t	xpcs_deskew_err_cnt;
+	kstat_named_t	xpcs_ln0_symbol_err_cnt;
+	kstat_named_t	xpcs_ln1_symbol_err_cnt;
+	kstat_named_t	xpcs_ln2_symbol_err_cnt;
+	kstat_named_t	xpcs_ln3_symbol_err_cnt;
+} nxge_xmac_kstat_t, *p_nxge_xmac_kstat_t;
+
+typedef	struct _nxge_bmac_kstat {
+	/*
+	 * BMAC statistics.
+	 */
+	kstat_named_t tx_frame_cnt;
+	kstat_named_t tx_underrun_err;
+	kstat_named_t tx_max_pkt_err;
+	kstat_named_t tx_byte_cnt;
+	kstat_named_t rx_frame_cnt;
+	kstat_named_t rx_byte_cnt;
+	kstat_named_t rx_overflow_err;
+	kstat_named_t rx_align_err_cnt;
+	kstat_named_t rx_crc_err_cnt;
+	kstat_named_t rx_len_err_cnt;
+	kstat_named_t rx_viol_err_cnt;
+	kstat_named_t rx_pause_cnt;
+	kstat_named_t tx_pause_state;
+	kstat_named_t tx_nopause_state;
+} nxge_bmac_kstat_t, *p_nxge_bmac_kstat_t;
+
+
+typedef struct _nxge_fflp_kstat {
+	/*
+	 * FFLP statistics.
+	 */
+
+	kstat_named_t	fflp_tcam_ecc_err;
+	kstat_named_t	fflp_tcam_perr;
+	kstat_named_t	fflp_vlan_perr;
+	kstat_named_t	fflp_hasht_lookup_err;
+	kstat_named_t	fflp_access_fail;
+	kstat_named_t	fflp_hasht_data_err[MAX_PARTITION];
+} nxge_fflp_kstat_t, *p_nxge_fflp_kstat_t;
+
+typedef struct _nxge_mmac_kstat {
+	kstat_named_t	mmac_max_addr_cnt;
+	kstat_named_t	mmac_avail_addr_cnt;
+	kstat_named_t	mmac_addr1;
+	kstat_named_t	mmac_addr2;
+	kstat_named_t	mmac_addr3;
+	kstat_named_t	mmac_addr4;
+	kstat_named_t	mmac_addr5;
+	kstat_named_t	mmac_addr6;
+	kstat_named_t	mmac_addr7;
+	kstat_named_t	mmac_addr8;
+	kstat_named_t	mmac_addr9;
+	kstat_named_t	mmac_addr10;
+	kstat_named_t	mmac_addr11;
+	kstat_named_t	mmac_addr12;
+	kstat_named_t	mmac_addr13;
+	kstat_named_t	mmac_addr14;
+	kstat_named_t	mmac_addr15;
+	kstat_named_t	mmac_addr16;
+} nxge_mmac_kstat_t, *p_nxge_mmac_kstat_t;
+
+#endif	/* _KERNEL */
+
+/*
+ * Prototype definitions.
+ */
+nxge_status_t nxge_init(p_nxge_t);
+void nxge_uninit(p_nxge_t);
+void nxge_get64(p_nxge_t, p_mblk_t);
+void nxge_put64(p_nxge_t, p_mblk_t);
+void nxge_pio_loop(p_nxge_t, p_mblk_t);
+
+#ifndef COSIM
+typedef	void	(*fptrv_t)();
+timeout_id_t nxge_start_timer(p_nxge_t, fptrv_t, int);
+void nxge_stop_timer(p_nxge_t, timeout_id_t);
+#endif
+#endif
+
+#ifdef	__cplusplus
+}
+#endif
+
+#endif	/* _SYS_NXGE_NXGE_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/uts/common/sys/nxge/nxge_common.h	Mon Mar 19 19:37:22 2007 -0700
@@ -0,0 +1,487 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef	_SYS_NXGE_NXGE_COMMON_H
+#define	_SYS_NXGE_NXGE_COMMON_H
+
+#pragma ident	"%Z%%M%	%I%	%E% SMI"
+
+#ifdef	__cplusplus
+extern "C" {
+#endif
+
+#define	NXGE_DMA_START			B_TRUE
+#define	NXGE_DMA_STOP			B_FALSE
+
+/*
+ * Default DMA configurations.
+ */
+#define	NXGE_RDMA_PER_NIU_PORT		(NXGE_MAX_RDCS/NXGE_PORTS_NIU)
+#define	NXGE_TDMA_PER_NIU_PORT		(NXGE_MAX_TDCS_NIU/NXGE_PORTS_NIU)
+#define	NXGE_RDMA_PER_NEP_PORT		(NXGE_MAX_RDCS/NXGE_PORTS_NEPTUNE)
+#define	NXGE_TDMA_PER_NEP_PORT		(NXGE_MAX_TDCS/NXGE_PORTS_NEPTUNE)
+#define	NXGE_RDCGRP_PER_NIU_PORT	(NXGE_MAX_RDC_GROUPS/NXGE_PORTS_NIU)
+#define	NXGE_RDCGRP_PER_NEP_PORT	(NXGE_MAX_RDC_GROUPS/NXGE_PORTS_NEPTUNE)
+
+#define	NXGE_TIMER_RESO			2
+
+#define	NXGE_TIMER_LDG			2
+
+/*
+ * Receive and Transmit DMA definitions
+ */
+#if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
+/*
+ * N2/NIU: Maximum descriptors if we need to call
+ *	   Hypervisor to set up the logical pages
+ *	   and the driver must use contiguous memory.
+ */
+#define	NXGE_NIU_MAX_ENTRY		(1 << 9) /* 512 */
+#define	NXGE_NIU_CONTIG_RBR_MAX		(NXGE_NIU_MAX_ENTRY)
+#define	NXGE_NIU_CONTIG_RCR_MAX		(NXGE_NIU_MAX_ENTRY)
+#define	NXGE_NIU_CONTIG_TX_MAX		(NXGE_NIU_MAX_ENTRY)
+#endif
+
+#ifdef	_DMA_USES_VIRTADDR
+#ifdef	NIU_PA_WORKAROUND
+#define	NXGE_DMA_BLOCK		(16 * 64 * 4)
+#else
+#define	NXGE_DMA_BLOCK		1
+#endif
+#else
+#define	NXGE_DMA_BLOCK		(64 * 64)
+#endif
+
+#define	NXGE_RBR_RBB_MIN	(128)
+#define	NXGE_RBR_RBB_MAX	(64 * 128 -1)
+
+#if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
+#define	NXGE_RBR_RBB_DEFAULT	512
+#define	NXGE_RBR_SPARE		0
+#else
+#define	NXGE_RBR_RBB_DEFAULT	(64 * 16) /* x86 hello */
+#define	NXGE_RBR_SPARE		0
+#endif
+
+
+#define	NXGE_RCR_MIN		(NXGE_RBR_RBB_MIN * 2)
+
+#if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
+#define	NXGE_RCR_MAX		(NXGE_NIU_CONTIG_RCR_MAX)
+#define	NXGE_RCR_DEFAULT	(512)
+#define	NXGE_TX_RING_DEFAULT	(512)
+#else
+#ifndef	NIU_PA_WORKAROUND
+#define	NXGE_RCR_MAX		(65355) /* MAX hardware supported */
+#if defined(_BIG_ENDIAN)
+#define	NXGE_RCR_DEFAULT	(NXGE_RBR_RBB_DEFAULT * 8)
+#else
+#ifdef USE_RX_BIG_BUF
+#define	NXGE_RCR_DEFAULT	(NXGE_RBR_RBB_DEFAULT * 8)
+#else
+#define	NXGE_RCR_DEFAULT	(NXGE_RBR_RBB_DEFAULT * 4)
+#endif
+#endif
+#define	NXGE_TX_RING_DEFAULT	(1024)
+#define	NXGE_TX_RING_MAX	(64 * 128 - 1)
+#else
+#define	NXGE_RCR_DEFAULT	(512)
+#define	NXGE_TX_RING_DEFAULT	(512)
+#define	NXGE_RCR_MAX		(1024)
+#define	NXGE_TX_RING_MAX	(1024)
+#endif
+#endif
+
+#define	NXGE_TX_RECLAIM 	32
+
+/* per receive DMA channel configuration data structure */
+typedef struct  nxge_rdc_cfg {
+	uint32_t	flag;		/* 0: not configured, 1: configured */
+	struct nxge_hw_list *nxge_hw_p;
+	uint32_t	partition_id;
+	uint32_t	port;		/* function number */
+	uint32_t	rx_group_id;
+
+	/* Partitioning, DMC function zero. */
+	uint32_t	rx_log_page_vld_page0;	/* TRUE or FALSE */
+	uint32_t	rx_log_page_vld_page1;	/* TRUE or FALSE */
+	uint64_t	rx_log_mask1;
+	uint64_t	rx_log_value1;
+	uint64_t	rx_log_mask2;
+	uint64_t	rx_log_value2;
+	uint64_t	rx_log_page_relo1;
+	uint64_t	rx_log_page_relo2;
+	uint64_t	rx_log_page_hdl;
+
+	/* WRED parameters, DMC function zero */
+	uint32_t	red_enable;
+
+	uint32_t	thre_syn;
+	uint32_t	win_syn;
+	uint32_t	threshold;
+	uint32_t	win_non_syn;
+
+	/* RXDMA configuration, DMC */
+	char		*rdc_mbaddr_p;	/* mailbox address */
+	uint32_t	min_flag;	/* TRUE for 18 bytes header */
+
+	/* Software Reserved Packet Buffer Offset, DMC */
+	uint32_t	sw_offset;
+
+	/* RBR Configuration A */
+	uint64_t	rbr_staddr;	/* starting address of RBR */
+	uint32_t	rbr_nblks;	/* # of RBR entries */
+	uint32_t	rbr_len;	/* # of RBR entries in 64B lines */
+
+	/* RBR Configuration B */
+	uint32_t	bksize;		/* Block size is fixed. */
+#define	RBR_BKSIZE_4K			0
+#define	RBR_BKSIZE_4K_BYTES		(4 * 1024)
+#define	RBR_BKSIZE_8K			1
+#define	RBR_BKSIZE_8K_BYTES		(8 * 1024)
+#define	RBR_BKSIZE_16K			2
+#define	RBR_BKSIZE_16K_BYTES		(16 * 1024)
+#define	RBR_BKSIZE_32K			3
+#define	RBR_BKSIZE_32K_BYTES		(32 * 1024)
+
+	uint32_t	bufsz2;
+#define	RBR_BUFSZ2_2K			0
+#define	RBR_BUFSZ2_2K_BYTES		(2 * 1024)
+#define	RBR_BUFSZ2_4K			1
+#define	RBR_BUFSZ2_4K_BYTES		(4 * 1024)
+#define	RBR_BUFSZ2_8K			2
+#define	RBR_BUFSZ2_8K_BYTES		(8 * 1024)
+#define	RBR_BUFSZ2_16K			3
+#define	RBR_BUFSZ2_16K_BYTES		(16 * 1024)
+
+	uint32_t	bufsz1;
+#define	RBR_BUFSZ1_1K			0
+#define	RBR_BUFSZ1_1K_BYTES		1024
+#define	RBR_BUFSZ1_2K			1
+#define	RBR_BUFSZ1_2K_BYTES		(2 * 1024)
+#define	RBR_BUFSZ1_4K			2
+#define	RBR_BUFSZ1_4K_BYTES		(4 * 1024)
+#define	RBR_BUFSZ1_8K			3
+#define	RBR_BUFSZ1_8K_BYTES		(8 * 1024)
+
+	uint32_t	bufsz0;
+#define	RBR_BUFSZ0_256B			0
+#define	RBR_BUFSZ0_256_BYTES		256
+#define	RBR_BUFSZ0_512B			1
+#define	RBR_BUFSZ0_512B_BYTES		512
+#define	RBR_BUFSZ0_1K			2
+#define	RBR_BUFSZ0_1K_BYTES		(1024)
+#define	RBR_BUFSZ0_2K			3
+#define	RBR_BUFSZ0_2K_BYTES		(2 * 1024)
+
+	/* Receive buffers added by the software */
+	uint32_t	bkadd;		/* maximum size is 1 million */
+
+	/* Receive Completion Ring Configuration A */
+	uint32_t	rcr_len;	/* # of 64B blocks, each RCR is 8B */
+	uint64_t	rcr_staddr;
+
+	/* Receive Completion Ring Configuration B */
+	uint32_t	pthres;		/* packet threshold */
+	uint32_t	entout;		/* enable timeout */
+	uint32_t	timeout;	/* timeout value */
+
+	/* Logical Device Group Number */
+	uint16_t	rx_ldg;
+	uint16_t	rx_ld_state_flags;
+
+	/* Receive DMA Channel Event Mask */
+	uint64_t	rx_dma_ent_mask;
+
+	/* 32 bit (set to 1) or 64 bit (set to 0) addressing mode */
+	uint32_t	rx_addr_md;
+} nxge_rdc_cfg_t, *p_nxge_rdc_cfg_t;
+
+/*
+ * Per Transmit DMA Channel Configuration Data Structure (32 TDC)
+ */
+typedef struct  nxge_tdc_cfg {
+	uint32_t	flag;		/* 0: not configured 1: configured */
+	struct nxge_hw_list *nxge_hw_p;
+	uint32_t	partition_id;
+	uint32_t	port; 		/* function number */
+	/* partitioning, DMC function zero (All 0s for non-partitioning) */
+	uint32_t	tx_log_page_vld_page0;	/* TRUE or FALSE */
+	uint32_t	tx_log_page_vld_page1;	/* TRUE or FALSE */
+	uint64_t	tx_log_mask1;
+	uint64_t	tx_log_value1;
+	uint64_t	tx_log_mask2;
+	uint64_t	tx_log_value2;
+	uint64_t	tx_log_page_relo1;
+	uint64_t	tx_log_page_relo2;
+	uint64_t	tx_log_page_hdl;
+
+	/* Transmit Ring Configuration */
+	uint64_t	tx_staddr;
+	uint64_t	tx_rng_len;	/* in 64 B Blocks */
+#define	TX_MAX_BUF_SIZE			4096
+
+	/* TXDMA configuration, DMC */
+	char		*tdc_mbaddr_p;	/* mailbox address */
+
+	/* Logical Device Group Number */
+	uint16_t	tx_ldg;
+	uint16_t	tx_ld_state_flags;
+
+	/* TXDMA event flags */
+	uint64_t	tx_event_mask;
+
+	/* Transmit threshold before reclamation */
+	uint32_t	tx_rng_threshold;
+#define	TX_RING_THRESHOLD		(TX_DEFAULT_MAX_GPS/4)
+#define	TX_RING_JUMBO_THRESHOLD		(TX_DEFAULT_JUMBO_MAX_GPS/4)
+
+	/* For reclaim: a wrap-around counter (packets transmitted) */
+	uint32_t	tx_pkt_cnt;
+	/* last packet with the mark bit set */
+	uint32_t	tx_lastmark;
+} nxge_tdc_cfg_t, *p_nxge_tdc_cfg_t;
+
+#define	RDC_TABLE_ENTRY_METHOD_SEQ	0
+#define	RDC_TABLE_ENTRY_METHOD_REP	1
+
+/* per receive DMA channel table group data structure */
+typedef struct nxge_rdc_grp {
+	uint32_t	flag;		/* 0:not configured 1: configured */
+	uint8_t	port;
+	uint8_t	partition_id;
+	uint8_t	rx_group_id;
+	uint8_t	start_rdc;	/* assume assigned in sequence	*/
+	uint8_t	max_rdcs;
+	uint8_t	def_rdc;
+	uint8_t		rdc[NXGE_MAX_RDCS];
+	uint16_t	config_method;
+} nxge_rdc_grp_t, *p_nxge_rdc_grp_t;
+
+/* Common RDC and TDC configuration of DMC */
+typedef struct _nxge_dma_common_cfg_t {
+	uint16_t	rdc_red_ran_init; /* RED initial seed value */
+
+	/* Transmit Ring */
+} nxge_dma_common_cfg_t, *p_nxge_dma_common_cfg_t;
+
+/*
+ * VLAN and MAC table configurations:
+ *  Each VLAN ID should belong to at most one RDC group.
+ *  Each port could own multiple RDC groups.
+ *  Each MAC should belong to one RDC group.
+ */
+typedef struct nxge_mv_cfg {
+	uint8_t		flag;			/* 0:unconfigure 1:configured */
+	uint8_t		rdctbl;			/* RDC channel table group */
+	uint8_t		mpr_npr;		/* MAC and VLAN preference */
+	uint8_t		odd_parity;
+} nxge_mv_cfg_t, *p_nxge_mv_cfg_t;
+
+typedef struct nxge_param_map {
+#if defined(_BIG_ENDIAN)
+	uint32_t		rsrvd2:2;	/* [30:31] rsrvd */
+	uint32_t		remove:1;	/* [29] Remove */
+	uint32_t		pref:1;		/* [28] preference */
+	uint32_t		rsrv:4;		/* [27:24] preference */
+	uint32_t		map_to:8;	/* [23:16] map to resource */
+	uint32_t		param_id:16;	/* [15:0] Param ID */
+#else
+	uint32_t		param_id:16;	/* [15:0] Param ID */
+	uint32_t		map_to:8;	/* [23:16] map to resource */
+	uint32_t		rsrv:4;		/* [27:24] preference */
+	uint32_t		pref:1;		/* [28] preference */
+	uint32_t		remove:1;	/* [29] Remove */
+	uint32_t		rsrvd2:2;	/* [30:31] rsrvd */
+#endif
+} nxge_param_map_t, *p_nxge_param_map_t;
+
+typedef struct nxge_rcr_param {
+#if defined(_BIG_ENDIAN)
+	uint32_t		rsrvd2:2;	/* [30:31] rsrvd */
+	uint32_t		remove:1;	/* [29] Remove */
+	uint32_t		rsrv:5;		/* [28:24] preference */
+	uint32_t		rdc:8;		/* [23:16] rdc # */
+	uint32_t		cfg_val:16;	/* [15:0] interrupt parameter */
+#else
+	uint32_t		cfg_val:16;	/* [15:0] interrupt parameter */
+	uint32_t		rdc:8;		/* [23:16] rdc # */
+	uint32_t		rsrv:5;		/* [28:24] preference */
+	uint32_t		remove:1;	/* [29] Remove */
+	uint32_t		rsrvd2:2;	/* [30:31] rsrvd */
+#endif
+} nxge_rcr_param_t, *p_nxge_rcr_param_t;
+
+/* Needs to have entries in the ndd table */
+/*
+ * Hardware properties created by fcode.
+ * In order for those properties visible to the user
+ * command ndd, we need to add the following properties
+ * to the ndd defined parameter array and data structures.
+ *
+ * Use default static configuration for x86.
+ */
+typedef struct nxge_hw_pt_cfg {
+	uint32_t	partition_id;	 /* partition Id		*/
+	uint32_t	read_write_mode; /* read write permission mode	*/
+	uint32_t	function_number; /* function number		*/
+	uint32_t	start_tdc;	 /* start TDC (0 - 31)		*/
+	uint32_t	max_tdcs;	 /* max TDC in sequence		*/
+	uint32_t	start_rdc;	 /* start RDC (0 - 31)		*/
+	uint32_t	max_rdcs;	 /* max rdc in sequence		*/
+	uint32_t	ninterrupts;	/* obp interrupts(mac/mif/syserr) */
+	uint32_t	mac_ldvid;
+	uint32_t	mif_ldvid;
+	uint32_t	ser_ldvid;
+	uint32_t	def_rdc;	 /* default RDC			*/
+	uint32_t	drr_wt;		 /* port DRR weight		*/
+	uint32_t	rx_full_header;	 /* select the header flag	*/
+	uint32_t	start_grpid;	 /* starting group ID		*/
+	uint32_t	max_grpids;	 /* max group ID		*/
+	uint32_t	start_rdc_grpid; /* starting RDC group ID	*/
+	uint32_t	max_rdc_grpids;	 /* max RDC group ID		*/
+	uint32_t	start_ldg;	 /* starting logical group # 	*/
+	uint32_t	max_ldgs;	 /* max logical device group	*/
+	uint32_t	max_ldvs;	 /* max logical devices		*/
+	uint32_t	start_mac_entry; /* where to put the first mac	*/
+	uint32_t	max_macs;	 /* the max mac entry allowed	*/
+	uint32_t	mac_pref;	 /* preference over VLAN	*/
+	uint32_t	def_mac_rxdma_grpid; /* default RDC group ID	*/
+	uint32_t	start_vlan;	 /* starting VLAN ID		*/
+	uint32_t	max_vlans;	 /* max VLAN ID			*/
+	uint32_t	vlan_pref;	 /* preference over MAC		*/
+	uint32_t	def_vlan_rxdma_grpid; /* default RDC group Id	*/
+
+	/* Expand if we have more hardware or default configurations    */
+	uint16_t	ldg[NXGE_INT_MAX_LDG];
+	uint16_t	ldg_chn_start;
+} nxge_hw_pt_cfg_t, *p_nxge_hw_pt_cfg_t;
+
+
+/* per port configuration */
+typedef struct nxge_dma_pt_cfg {
+	uint8_t		mac_port;	/* MAC port (function)		*/
+	nxge_hw_pt_cfg_t hw_config;	/* hardware configuration 	*/
+
+	uint32_t alloc_buf_size;
+	uint32_t rbr_size;
+	uint32_t rcr_size;
+
+	/*
+	 * Configuration for hardware initialization based on the
+	 * hardware properties or the default properties.
+	 */
+	uint32_t	tx_dma_map;	/* Transmit DMA channel bit map */
+
+	/* Receive DMA channel */
+	nxge_rdc_grp_t	rdc_grps[NXGE_MAX_RDC_GROUPS];
+
+	uint16_t	rcr_timeout[NXGE_MAX_RDCS];
+	uint16_t	rcr_threshold[NXGE_MAX_RDCS];
+	uint8_t	rcr_full_header;
+	uint16_t	rx_drr_weight;
+
+	/* Add more stuff later */
+} nxge_dma_pt_cfg_t, *p_nxge_dma_pt_cfg_t;
+
+/* classification configuration */
+typedef struct nxge_class_pt_cfg {
+
+	/* MAC table */
+	nxge_mv_cfg_t	mac_host_info[NXGE_MAX_MACS];
+
+	/* VLAN table */
+	nxge_mv_cfg_t	vlan_tbl[NXGE_MAX_VLANS];
+	/* class config value */
+	uint32_t	init_h1;
+	uint16_t	init_h2;
+	uint8_t mcast_rdcgrp;
+	uint8_t mac_rdcgrp;
+	uint32_t	class_cfg[TCAM_CLASS_MAX];
+} nxge_class_pt_cfg_t, *p_nxge_class_pt_cfg_t;
+
+/* per Neptune sharable resources among ports */
+typedef struct nxge_common {
+	uint32_t		partition_id;
+	boolean_t		mode32;
+	/* DMA Channels: RDC and TDC */
+	nxge_rdc_cfg_t		rdc_config[NXGE_MAX_RDCS];
+	nxge_tdc_cfg_t		tdc_config[NXGE_MAX_TDCS];
+	nxge_dma_common_cfg_t	dma_common_config;
+
+	uint32_t		timer_res;
+	boolean_t		ld_sys_error_set;
+	uint8_t			sys_error_owner;
+
+	/* Layer 2/3/4 */
+	uint16_t		class2_etype;
+	uint16_t		class3_etype;
+
+	/* FCRAM (hashing) */
+	uint32_t		hash1_initval;
+	uint32_t		hash2_initval;
+} nxge_common_t, *p_nxge_common_t;
+
+/*
+ * Partition (logical domain) configuration per Neptune/NIU.
+ */
+typedef struct nxge_part_cfg {
+	uint32_t	rdc_grpbits;	/* RDC group bit masks */
+	uint32_t	tdc_bitmap;	/* bounded TDC */
+	nxge_dma_pt_cfg_t pt_config[NXGE_MAX_PORTS];
+
+	/* Flow Classification Partition (flow partition select register) */
+	uint8_t		hash_lookup;	/* external lookup is available */
+	uint8_t		base_mask;	/* select bits in base_h1 to replace */
+					/* bits [19:15} in Hash 1. */
+	uint8_t		base_h1;	/* value to replace Hash 1 [19:15]. */
+
+	/* Add more here */
+	uint32_t	attributes;	/* permission and attribute bits */
+#define	FZC_SERVICE_ENTITY		0x01
+#define	FZC_READ_WRITE			0x02
+#define	FZC_READ_ONLY			0x04
+} nxge_part_cfg_t, *p_nxge_part_cfg_t;
+
+typedef struct nxge_hw_list {
+	struct nxge_hw_list 	*next;
+	nxge_os_mutex_t 	nxge_cfg_lock;
+	nxge_os_mutex_t 	nxge_tcam_lock;
+	nxge_os_mutex_t 	nxge_vlan_lock;
+	nxge_os_mutex_t 	nxge_mdio_lock;
+	nxge_os_mutex_t 	nxge_mii_lock;
+
+	nxge_dev_info_t		*parent_devp;
+	struct _nxge_t		*nxge_p[NXGE_MAX_PORTS];
+	uint32_t		ndevs;
+	uint32_t 		flags;
+	uint32_t 		magic;
+} nxge_hw_list_t, *p_nxge_hw_list_t;
+
+#ifdef	__cplusplus
+}
+#endif
+
+#endif	/* _SYS_NXGE_NXGE_COMMON_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/uts/common/sys/nxge/nxge_common_impl.h	Mon Mar 19 19:37:22 2007 -0700
@@ -0,0 +1,384 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef	_SYS_NXGE_NXGE_COMMON_IMPL_H
+#define	_SYS_NXGE_NXGE_COMMON_IMPL_H
+
+#pragma ident	"%Z%%M%	%I%	%E% SMI"
+
+#ifdef	__cplusplus
+extern "C" {
+#endif
+
+#define	NPI_REGH(npi_handle)		(npi_handle.regh)
+#define	NPI_REGP(npi_handle)		(npi_handle.regp)
+
+#if defined(NXGE_DEBUG_DMA) || defined(NXGE_DEBUG_TXC)
+#define	__NXGE_STATIC
+#define	__NXGE_INLINE
+#else
+#define	__NXGE_STATIC			static
+#define	__NXGE_INLINE			inline
+#endif
+
+#ifdef	AXIS_DEBUG
+#define	AXIS_WAIT			(100000)
+#define	AXIS_LONG_WAIT			(100000)
+#define	AXIS_WAIT_W			(80000)
+#define	AXIS_WAIT_R			(100000)
+#define	AXIS_WAIT_LOOP			(4000)
+#define	AXIS_WAIT_PER_LOOP		(AXIS_WAIT_R/AXIS_WAIT_LOOP)
+#endif
+
+#define	NO_DEBUG	0x0000000000000000ULL
+#define	MDT_CTL		0x0000000000000001ULL
+#define	RX_CTL		0x0000000000000002ULL
+#define	TX_CTL		0x0000000000000004ULL
+#define	OBP_CTL		0x0000000000000008ULL
+
+#define	VPD_CTL		0x0000000000000010ULL
+#define	DDI_CTL		0x0000000000000020ULL
+#define	MEM_CTL		0x0000000000000040ULL
+#define	SAP_CTL		0x0000000000000080ULL
+
+#define	IOC_CTL		0x0000000000000100ULL
+#define	MOD_CTL		0x0000000000000200ULL
+#define	DMA_CTL		0x0000000000000400ULL
+#define	STR_CTL		0x0000000000000800ULL
+
+#define	INT_CTL		0x0000000000001000ULL
+#define	SYSERR_CTL	0x0000000000002000ULL
+#define	KST_CTL		0x0000000000004000ULL
+#define	PCS_CTL		0x0000000000008000ULL
+
+#define	MII_CTL		0x0000000000010000ULL
+#define	MIF_CTL		0x0000000000020000ULL
+#define	FCRAM_CTL	0x0000000000040000ULL
+#define	MAC_CTL		0x0000000000080000ULL
+
+#define	IPP_CTL		0x0000000000100000ULL
+#define	DMA2_CTL	0x0000000000200000ULL
+#define	RX2_CTL		0x0000000000400000ULL
+#define	TX2_CTL		0x0000000000800000ULL
+
+#define	MEM2_CTL	0x0000000001000000ULL
+#define	MEM3_CTL	0x0000000002000000ULL
+#define	NXGE_CTL	0x0000000004000000ULL
+#define	NDD_CTL		0x0000000008000000ULL
+#define	NDD2_CTL	0x0000000010000000ULL
+
+#define	TCAM_CTL	0x0000000020000000ULL
+#define	CFG_CTL		0x0000000040000000ULL
+#define	CFG2_CTL	0x0000000080000000ULL
+
+#define	FFLP_CTL	TCAM_CTL | FCRAM_CTL
+
+#define	VIR_CTL		0x0000000100000000ULL
+#define	VIR2_CTL	0x0000000200000000ULL
+
+#define	NXGE_NOTE	0x0000001000000000ULL
+#define	NXGE_ERR_CTL	0x0000002000000000ULL
+
+#define	DUMP_ALWAYS	0x2000000000000000ULL
+
+/* NPI Debug and Error defines */
+#define	NPI_RDC_CTL	0x0000000000000001ULL
+#define	NPI_TDC_CTL	0x0000000000000002ULL
+#define	NPI_TXC_CTL	0x0000000000000004ULL
+#define	NPI_IPP_CTL	0x0000000000000008ULL
+
+#define	NPI_XPCS_CTL	0x0000000000000010ULL
+#define	NPI_PCS_CTL	0x0000000000000020ULL
+#define	NPI_ESR_CTL	0x0000000000000040ULL
+#define	NPI_BMAC_CTL	0x0000000000000080ULL
+#define	NPI_XMAC_CTL	0x0000000000000100ULL
+#define	NPI_MAC_CTL	NPI_BMAC_CTL | NPI_XMAC_CTL
+
+#define	NPI_ZCP_CTL	0x0000000000000200ULL
+#define	NPI_TCAM_CTL	0x0000000000000400ULL
+#define	NPI_FCRAM_CTL	0x0000000000000800ULL
+#define	NPI_FFLP_CTL	NPI_TCAM_CTL | NPI_FCRAM_CTL
+
+#define	NPI_VIR_CTL	0x0000000000001000ULL
+#define	NPI_PIO_CTL	0x0000000000002000ULL
+#define	NPI_VIO_CTL	0x0000000000004000ULL
+
+#define	NPI_REG_CTL	0x0000000040000000ULL
+#define	NPI_CTL		0x0000000080000000ULL
+#define	NPI_ERR_CTL	0x0000000080000000ULL
+
+#if defined(SOLARIS) && defined(_KERNEL)
+
+#include <sys/types.h>
+#include <sys/ddi.h>
+#include <sys/sunddi.h>
+#include <sys/dditypes.h>
+#include <sys/ethernet.h>
+
+#ifdef NXGE_DEBUG
+#define	NXGE_DEBUG_MSG(params) nxge_debug_msg params
+#else
+#define	NXGE_DEBUG_MSG(params)
+#endif
+
+#if 1
+#define	NXGE_ERROR_MSG(params)	nxge_debug_msg params
+#define	NXGE_WARN_MSG(params)	nxge_debug_msg params
+#else
+#define	NXGE_ERROR_MSG(params)
+#define	NXGE_WARN_MSG(params)
+#endif
+
+typedef kmutex_t			nxge_os_mutex_t;
+typedef	krwlock_t			nxge_os_rwlock_t;
+
+typedef	dev_info_t			nxge_dev_info_t;
+typedef	ddi_iblock_cookie_t 		nxge_intr_cookie_t;
+
+typedef ddi_acc_handle_t		nxge_os_acc_handle_t;
+typedef	nxge_os_acc_handle_t		npi_reg_handle_t;
+typedef	uint64_t			npi_reg_ptr_t;
+
+typedef ddi_dma_handle_t		nxge_os_dma_handle_t;
+typedef struct _nxge_dma_common_t	nxge_os_dma_common_t;
+typedef struct _nxge_block_mv_t		nxge_os_block_mv_t;
+typedef frtn_t				nxge_os_frtn_t;
+
+#define	NXGE_MUTEX_DRIVER		MUTEX_DRIVER
+#define	MUTEX_INIT(lock, name, type, arg) \
+	mutex_init(lock, name, type, arg)
+#define	MUTEX_ENTER(lock)		mutex_enter(lock)
+#define	MUTEX_TRY_ENTER(lock)		mutex_tryenter(lock)
+#define	MUTEX_EXIT(lock)		mutex_exit(lock)
+#define	MUTEX_DESTROY(lock)		mutex_destroy(lock)
+
+#define	RW_INIT(lock, name, type, arg)	rw_init(lock, name, type, arg)
+#define	RW_ENTER_WRITER(lock)		rw_enter(lock, RW_WRITER)
+#define	RW_ENTER_READER(lock)		rw_enter(lock, RW_READER)
+#define	RW_TRY_ENTER(lock, type)	rw_tryenter(lock, type)
+#define	RW_EXIT(lock)			rw_exit(lock)
+#define	RW_DESTROY(lock)		rw_destroy(lock)
+#define	KMEM_ALLOC(size, flag)		kmem_alloc(size, flag)
+#define	KMEM_ZALLOC(size, flag)		kmem_zalloc(size, flag)
+#define	KMEM_FREE(buf, size)		kmem_free(buf, size)
+
+#define	NXGE_DELAY(microseconds)	 (drv_usecwait(microseconds))
+
+#define	NXGE_PIO_READ8(handle, devaddr, offset) \
+	(ddi_get8(handle, (uint8_t *)((caddr_t)devaddr + offset)))
+
+#define	NXGE_PIO_READ16(handle, devaddr, offset) \
+	(ddi_get16(handle, (uint16_t *)((caddr_t)devaddr + offset)))
+
+#define	NXGE_PIO_READ32(handle, devaddr, offset) \
+	(ddi_get32(handle, (uint32_t *)((caddr_t)devaddr + offset)))
+
+#define	NXGE_PIO_READ64(handle, devaddr, offset) \
+	(ddi_get64(handle, (uint64_t *)((caddr_t)devaddr + offset)))
+
+#define	NXGE_PIO_WRITE8(handle, devaddr, offset, data) \
+	(ddi_put8(handle, (uint8_t *)((caddr_t)devaddr + offset), data))
+
+#define	NXGE_PIO_WRITE16(handle, devaddr, offset, data) \
+	(ddi_get16(handle, (uint16_t *)((caddr_t)devaddr + offset), data))
+
+#define	NXGE_PIO_WRITE32(handle, devaddr, offset, data)	\
+	(ddi_put32(handle, (uint32_t *)((caddr_t)devaddr + offset), data))
+
+#define	NXGE_PIO_WRITE64(handle, devaddr, offset, data) \
+	(ddi_put64(handle, (uint64_t *)((caddr_t)devaddr + offset), data))
+
+#define	NXGE_NPI_PIO_READ8(npi_handle, offset) \
+	(ddi_get8(NPI_REGH(npi_handle),	\
+	(uint8_t *)(NPI_REGP(npi_handle) + offset)))
+
+#define	NXGE_NPI_PIO_READ16(npi_handle, offset) \
+	(ddi_get16(NPI_REGH(npi_handle), \
+	(uint16_t *)(NPI_REGP(npi_handle) + offset)))
+
+#define	NXGE_NPI_PIO_READ32(npi_handle, offset) \
+	(ddi_get32(NPI_REGH(npi_handle), \
+	(uint32_t *)(NPI_REGP(npi_handle) + offset)))
+
+#define	NXGE_NPI_PIO_READ64(npi_handle, offset)		\
+	(ddi_get64(NPI_REGH(npi_handle),		\
+	(uint64_t *)(NPI_REGP(npi_handle) + offset)))
+
+#define	NXGE_NPI_PIO_WRITE8(npi_handle, offset, data)	\
+	(ddi_put8(NPI_REGH(npi_handle),			\
+	(uint8_t *)(NPI_REGP(npi_handle) + offset), data))
+
+#define	NXGE_NPI_PIO_WRITE16(npi_handle, offset, data)	\
+	(ddi_put16(NPI_REGH(npi_handle),		\
+	(uint16_t *)(NPI_REGP(npi_handle) + offset), data))
+
+#define	NXGE_NPI_PIO_WRITE32(npi_handle, offset, data)	\
+	(ddi_put32(NPI_REGH(npi_handle),		\
+	(uint32_t *)(NPI_REGP(npi_handle) + offset), data))
+
+#define	NXGE_NPI_PIO_WRITE64(npi_handle, offset, data)	\
+	(ddi_put64(NPI_REGH(npi_handle),		\
+	(uint64_t *)(NPI_REGP(npi_handle) + offset), data))
+
+#define	NXGE_MEM_PIO_READ8(npi_handle)		\
+	(ddi_get8(NPI_REGH(npi_handle), (uint8_t *)NPI_REGP(npi_handle)))
+
+#define	NXGE_MEM_PIO_READ16(npi_handle)		\
+	(ddi_get16(NPI_REGH(npi_handle), (uint16_t *)NPI_REGP(npi_handle)))
+
+#define	NXGE_MEM_PIO_READ32(npi_handle)		\
+	(ddi_get32(NPI_REGH(npi_handle), (uint32_t *)NPI_REGP(npi_handle)))
+
+#define	NXGE_MEM_PIO_READ64(npi_handle)		\
+	(ddi_get64(NPI_REGH(npi_handle), (uint64_t *)NPI_REGP(npi_handle)))
+
+#define	NXGE_MEM_PIO_WRITE8(npi_handle, data)	\
+	(ddi_put8(NPI_REGH(npi_handle), (uint8_t *)NPI_REGP(npi_handle), data))
+
+#define	NXGE_MEM_PIO_WRITE16(npi_handle, data)	\
+		(ddi_put16(NPI_REGH(npi_handle),	\
+		(uint16_t *)NPI_REGP(npi_handle), data))
+
+#define	NXGE_MEM_PIO_WRITE32(npi_handle, data)	\
+		(ddi_put32(NPI_REGH(npi_handle),	\
+		(uint32_t *)NPI_REGP(npi_handle), data))
+
+#define	NXGE_MEM_PIO_WRITE64(npi_handle, data)	\
+		(ddi_put64(NPI_REGH(npi_handle),	\
+		(uint64_t *)NPI_REGP(npi_handle), data))
+
+#define	SERVICE_LOST		DDI_SERVICE_LOST
+#define	SERVICE_DEGRADED	DDI_SERVICE_DEGRADED
+#define	SERVICE_UNAFFECTED	DDI_SERVICE_UNAFFECTED
+#define	SERVICE_RESTORED	DDI_SERVICE_RESTORED
+
+#define	DATAPATH_FAULT		DDI_DATAPATH_FAULT
+#define	DEVICE_FAULT		DDI_DEVICE_FAULT
+#define	EXTERNAL_FAULT		DDI_EXTERNAL_FAULT
+
+#define	NOTE_LINK_UP		DL_NOTE_LINK_UP
+#define	NOTE_LINK_DOWN		DL_NOTE_LINK_DOWN
+#define	NOTE_SPEED		DL_NOTE_SPEED
+#define	NOTE_PHYS_ADDR		DL_NOTE_PHYS_ADDR
+#define	NOTE_AGGR_AVAIL		DL_NOTE_AGGR_AVAIL
+#define	NOTE_AGGR_UNAVAIL	DL_NOTE_AGGR_UNAVAIL
+
+#define	FM_REPORT_FAULT(nxgep, impact, location, msg)\
+		ddi_dev_report_fault(nxgep->dip, impact, location, msg)
+#define	FM_CHECK_DEV_HANDLE(nxgep)\
+		ddi_check_acc_handle(nxgep->dev_regs->nxge_regh)
+#define	FM_GET_DEVSTATE(nxgep)\
+		ddi_get_devstate(nxgep->dip)
+#define	FM_SERVICE_RESTORED(nxgep)\
+		ddi_fm_service_impact(nxgep->dip, DDI_SERVICE_RESTORED)
+#define	NXGE_FM_REPORT_ERROR(nxgep, portn, chan, ereport_id)\
+		nxge_fm_report_error(nxgep, portn, chan, ereport_id)
+#define	FM_CHECK_ACC_HANDLE(nxgep, handle)\
+		fm_check_acc_handle(handle)
+#define	FM_CHECK_DMA_HANDLE(nxgep, handle)\
+		fm_check_dma_handle(handle)
+
+#endif
+
+#if defined(REG_TRACE)
+#define	NXGE_REG_RD64(handle, offset, val_p) {\
+	*(val_p) = NXGE_NPI_PIO_READ64(handle, offset);\
+	npi_rtrace_update(handle, B_FALSE, &npi_rtracebuf, (uint32_t)offset, \
+			(uint64_t)(*(val_p)));\
+}
+#elif defined(REG_SHOW)
+	/*
+	 * Send 0xbadbad to tell rs_show_reg that we do not have
+	 * a valid RTBUF index to pass
+	 */
+#define	NXGE_REG_RD64(handle, offset, val_p) {\
+	*(val_p) = NXGE_NPI_PIO_READ64(handle, offset);\
+	rt_show_reg(0xbadbad, B_FALSE, (uint32_t)offset, (uint64_t)(*(val_p)));\
+}
+#elif defined(AXIS_DEBUG) && !defined(LEGION)
+#define	NXGE_REG_RD64(handle, offset, val_p) {\
+	int	n;				\
+	for (n = 0; n < AXIS_WAIT_LOOP; n++) {	\
+		*(val_p) = 0;		\
+		*(val_p) = NXGE_NPI_PIO_READ64(handle, offset);\
+		if (*(val_p) != (~0)) { \
+			break; \
+		}	\
+		drv_usecwait(AXIS_WAIT_PER_LOOP); \
+		if (n < 20) { \
+			cmn_err(CE_WARN, "NXGE_REG_RD64: loop %d " \
+			"REG 0x%x(0x%llx)", \
+			n, offset, *val_p);\
+		}	\
+	} \
+	if (n >= AXIS_WAIT_LOOP) {	\
+		cmn_err(CE_WARN, "(FATAL)NXGE_REG_RD64 on offset 0x%x " \
+			"with -1!!!", offset); \
+	}	\
+}
+#else
+
+#define	NXGE_REG_RD64(handle, offset, val_p) {\
+	*(val_p) = NXGE_NPI_PIO_READ64(handle, offset);\
+}
+#endif
+
+/*
+ *	 In COSIM mode, we could loop for very long time when polling
+ *  for the completion of a Clause45 frame MDIO operations. Display
+ *  one rtrace line for each poll can result in messy screen.  Add
+ *  this MACRO for no rtrace show.
+ */
+#define	NXGE_REG_RD64_NO_SHOW(handle, offset, val_p) {\
+	*(val_p) = NXGE_NPI_PIO_READ64(handle, offset);\
+}
+
+
+#if defined(REG_TRACE)
+#define	NXGE_REG_WR64(handle, offset, val) {\
+	NXGE_NPI_PIO_WRITE64(handle, (offset), (val));\
+	npi_rtrace_update(handle, B_TRUE, &npi_rtracebuf, (uint32_t)offset,\
+				(uint64_t)(val));\
+}
+#elif defined(REG_SHOW)
+/*
+ * Send 0xbadbad to tell rs_show_reg that we do not have
+ * a valid RTBUF index to pass
+ */
+#define	NXGE_REG_WR64(handle, offset, val) {\
+	NXGE_NPI_PIO_WRITE64(handle, offset, (val));\
+	rt_show_reg(0xbadbad, B_TRUE, (uint32_t)offset, (uint64_t)(val));\
+}
+#else
+#define	NXGE_REG_WR64(handle, offset, val) {\
+	NXGE_NPI_PIO_WRITE64(handle, (offset), (val));\
+}
+#endif
+
+#ifdef	__cplusplus
+}
+#endif
+
+#endif	/* _SYS_NXGE_NXGE_COMMON_IMPL_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/uts/common/sys/nxge/nxge_defs.h	Mon Mar 19 19:37:22 2007 -0700
@@ -0,0 +1,465 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef	_SYS_NXGE_NXGE_DEFS_H
+#define	_SYS_NXGE_NXGE_DEFS_H
+
+#pragma ident	"%Z%%M%	%I%	%E% SMI"
+
+#ifdef	__cplusplus
+extern "C" {
+#endif
+
+/*
+ * Block Address Assignment (24-bit base address)
+ * (bits [23:20]: block	 [19]: set to 1 for FZC	)
+ */
+#define	PIO			0x000000
+#define	FZC_PIO			0x080000
+#define	RESERVED_1		0x100000
+#define	FZC_MAC			0x180000
+#define	RESERVED_2		0x200000
+#define	FZC_IPP			0x280000
+#define	FFLP			0x300000
+#define	FZC_FFLP		0x380000
+#define	PIO_VADDR		0x400000
+#define	RESERVED_3		0x480000
+#define	ZCP			0x500000
+#define	FZC_ZCP			0x580000
+#define	DMC			0x600000
+#define	FZC_DMC			0x680000
+#define	TXC			0x700000
+#define	FZC_TXC			0x780000
+#define	PIO_LDSV		0x800000
+#define	RESERVED_4		0x880000
+#define	PIO_LDGIM		0x900000
+#define	RESERVED_5		0x980000
+#define	PIO_IMASK0		0xa00000
+#define	RESERVED_6		0xa80000
+#define	PIO_IMASK1		0xb00000
+#define	RESERVED_7_START	0xb80000
+#define	RESERVED_7_END		0xc00000
+#define	FZC_PROM		0xc80000
+#define	RESERVED_8		0xd00000
+#define	FZC_PIM			0xd80000
+#define	RESERVED_9_START 	0xe00000
+#define	RESERVED_9_END 		0xf80000
+
+/* PIO		(0x000000) */
+
+
+/* FZC_PIO	(0x080000) */
+#define	LDGITMRES		(FZC_PIO + 0x00008)	/* timer resolution */
+#define	SID			(FZC_PIO + 0x10200)	/* 64 LDG, INT data */
+#define	LDG_NUM			(FZC_PIO + 0x20000)	/* 69 LDs */
+
+
+
+/* FZC_IPP 	(0x280000) */
+
+
+/* FFLP		(0x300000), Header Parser */
+
+/* PIO_VADDR	(0x400000), PIO Virtaul DMA Address */
+/* ?? how to access DMA via PIO_VADDR? */
+#define	VADDR			(PIO_VADDR + 0x00000) /* ?? not for driver */
+
+
+/* ZCP		(0x500000), Neptune Only */
+
+
+/* FZC_ZCP	(0x580000), Neptune Only */
+
+
+/* DMC 		(0x600000), register offset (32 DMA channels) */
+
+/* Transmit Ring Register Offset (32 Channels) */
+#define	TX_RNG_CFIG		(DMC + 0x40000)
+#define	TX_RING_HDH		(DMC + 0x40008)
+#define	TX_RING_HDL		(DMC + 0x40010)
+#define	TX_RING_KICK		(DMC + 0x40018)
+/* Transmit Operations (32 Channels) */
+#define	TX_ENT_MSK		(DMC + 0x40020)
+#define	TX_CS			(DMC + 0x40028)
+#define	TXDMA_MBH		(DMC + 0x40030)
+#define	TXDMA_MBL		(DMC + 0x40038)
+#define	TX_DMA_PRE_ST		(DMC + 0x40040)
+#define	TX_RNG_ERR_LOGH		(DMC + 0x40048)
+#define	TX_RNG_ERR_LOGL		(DMC + 0x40050)
+#if OLD
+#define	SH_TX_RNG_ERR_LOGH	(DMC + 0x40058)
+#define	SH_TX_RNG_ERR_LOGL	(DMC + 0x40060)
+#endif
+
+/* FZC_DMC RED Initial Random Value register offset (global) */
+#define	RED_RAN_INIT		(FZC_DMC + 0x00068)
+
+#define	RX_ADDR_MD		(FZC_DMC + 0x00070)
+
+/* FZC_DMC Ethernet Timeout Countue register offset (global) */
+#define	EING_TIMEOUT		(FZC_DMC + 0x00078)
+
+/* RDC Table */
+#define	RDC_TBL			(FZC_DMC + 0x10000)	/* 256 * 8 */
+
+/* FZC_DMC partitioning support register offset (32 channels) */
+
+#define	TX_LOG_PAGE_VLD		(FZC_DMC + 0x40000)
+#define	TX_LOG_MASK1		(FZC_DMC + 0x40008)
+#define	TX_LOG_VAL1		(FZC_DMC + 0x40010)
+#define	TX_LOG_MASK2		(FZC_DMC + 0x40018)
+#define	TX_LOG_VAL2		(FZC_DMC + 0x40020)
+#define	TX_LOG_PAGE_RELO1	(FZC_DMC + 0x40028)
+#define	TX_LOG_PAGE_RELO2	(FZC_DMC + 0x40030)
+#define	TX_LOG_PAGE_HDL		(FZC_DMC + 0x40038)
+
+#define	TX_ADDR_MOD		(FZC_DMC + 0x41000) /* only one? */
+
+
+/* FZC_DMC RED Parameters register offset (32 channels) */
+#define	RDC_RED_PARA1		(FZC_DMC + 0x30000)
+#define	RDC_RED_PARA2		(FZC_DMC + 0x30008)
+/* FZC_DMC RED Discard Cound Register offset (32 channels) */
+#define	RED_DIS_CNT		(FZC_DMC + 0x30010)
+
+#if OLD /* This has been moved to TXC */
+/* Transmit Ring Scheduler (per port) */
+#define	TX_DMA_MAP0		(FZC_DMC + 0x50000)
+#define	TX_DMA_MAP1		(FZC_DMC + 0x50008)
+#define	TX_DMA_MAP2		(FZC_DMC + 0x50010)
+#define	TX_DMA_MAP3		(FZC_DMC + 0x50018)
+#endif
+
+/* Transmit Ring Scheduler: DRR Weight (32 Channels) */
+#define	DRR_WT			(FZC_DMC + 0x51000)
+#if OLD
+#define	TXRNG_USE		(FZC_DMC + 0x51008)
+#endif
+
+/* TXC		(0x700000)??	*/
+
+
+/* FZC_TXC	(0x780000)??	*/
+
+
+/*
+ * PIO_LDSV	(0x800000)
+ * Logical Device State Vector 0, 1, 2.
+ * (69 logical devices, 8192 apart, partitioning control)
+ */
+#define	LDSV0			(PIO_LDSV + 0x00000)	/* RO (64 - 69) */
+#define	LDSV1			(PIO_LDSV + 0x00008)	/* RO (32 - 63) */
+#define	LDSV2			(PIO_LDSV + 0x00010)	/* RO ( 0 - 31) */
+
+/*
+ * PIO_LDGIM	(0x900000)
+ * Logical Device Group Interrupt Management (64 groups).
+ * (count 64, step 8192)
+ */
+#define	LDGIMGN			(PIO_LDGIMGN + 0x00000)	/* RW */
+
+/*
+ * PIO_IMASK0	(0xA000000)
+ *
+ * Logical Device Masks 0, 1.
+ * (64 logical devices, 8192 apart, partitioning control)
+ */
+#define	LD_IM0			(PIO_IMASK0 + 0x00000)	/* RW ( 0 - 63) */
+
+/*
+ * PIO_IMASK0	(0xB000000)
+ *
+ * Logical Device Masks 0, 1.
+ * (5 logical devices, 8192 apart, partitioning control)
+ */
+#define	LD_IM1			(PIO_IMASK1 + 0x00000)	/* RW (64 - 69) */
+
+
+/* DMC/TMC CSR size */
+#define	DMA_CSR_SIZE		512
+#define	DMA_CSR_MIN_PAGE_SIZE	1024
+
+/*
+ * Define the Default RBR, RCR
+ */
+#define	RBR_DEFAULT_MAX_BLKS	4096	/* each entry (16 blockaddr/64B) */
+#define	RBR_NBLK_PER_LINE	16	/* 16 block addresses per 64 B line */
+#define	RBR_DEFAULT_MAX_LEN	(RBR_DEFAULT_MAX_BLKS)
+#define	RBR_DEFAULT_MIN_LEN	1
+
+#define	SW_OFFSET_NO_OFFSET		0
+#define	SW_OFFSET_64			1	/* 64 bytes */
+#define	SW_OFFSET_128			2	/* 128 bytes */
+#define	SW_OFFSET_INVALID		3
+
+/*
+ * RBR block descriptor is 32 bits (bits [43:12]
+ */
+#define	RBR_BKADDR_SHIFT	12
+
+
+#define	RCR_DEFAULT_MAX_BLKS	4096	/* each entry (8 blockaddr/64B) */
+#define	RCR_NBLK_PER_LINE	8	/* 8 block addresses per 64 B line */
+#define	RCR_DEFAULT_MAX_LEN	(RCR_DEFAULT_MAX_BLKS)
+#define	RCR_DEFAULT_MIN_LEN	1
+
+/*  DMA Channels.  */
+#define	NXGE_MAX_DMCS		(NXGE_MAX_RDCS + NXGE_MAX_TDCS)
+#define	NXGE_MAX_RDCS		16
+#define	NXGE_MAX_TDCS		24
+#define	NXGE_MAX_TDCS_NIU	16
+/*
+ * original mapping from Hypervisor
+ */
+#ifdef	ORIGINAL
+#define	NXGE_N2_RXDMA_START_LDG	0
+#define	NXGE_N2_TXDMA_START_LDG	16
+#define	NXGE_N2_MIF_LDG		32
+#define	NXGE_N2_MAC_0_LDG	33
+#define	NXGE_N2_MAC_1_LDG	34
+#define	NXGE_N2_SYS_ERROR_LDG	35
+#endif
+
+#define	NXGE_N2_RXDMA_START_LDG	19
+#define	NXGE_N2_TXDMA_START_LDG	27
+#define	NXGE_N2_MIF_LDG		17
+#define	NXGE_N2_MAC_0_LDG	16
+#define	NXGE_N2_MAC_1_LDG	35
+#define	NXGE_N2_SYS_ERROR_LDG	18
+#define	NXGE_N2_LDG_GAP		17
+
+#define	NXGE_MAX_RDC_GRPS	8
+
+/*
+ * Max. ports per Neptune and NIU
+ */
+#define	NXGE_MAX_PORTS			4
+#define	NXGE_PORTS_NEPTUNE		4
+#define	NXGE_PORTS_NIU			2
+
+/* Max. RDC table groups */
+#define	NXGE_MAX_RDC_GROUPS		8
+#define	NXGE_MAX_RDCS			16
+#define	NXGE_MAX_DMAS			32
+
+
+#define	NXGE_MAX_MACS_XMACS		16
+#define	NXGE_MAX_MACS_BMACS		8
+#define	NXGE_MAX_MACS			(NXGE_MAX_PORTS * NXGE_MAX_MACS_XMACS)
+
+#define	NXGE_MAX_VLANS			4096
+#define	VLAN_ETHERTYPE			(0x8100)
+
+
+/* Scaling factor for RBR (receive block ring) */
+#define	RBR_SCALE_1		0
+#define	RBR_SCALE_2		1
+#define	RBR_SCALE_3		2
+#define	RBR_SCALE_4		3
+#define	RBR_SCALE_5		4
+#define	RBR_SCALE_6		5
+#define	RBR_SCALE_7		6
+#define	RBR_SCALE_8		7
+
+
+#define	MAX_PORTS_PER_NXGE	4
+#define	MAX_MACS		32
+
+#define	TX_GATHER_POINTER_SZ	8
+#define	TX_GP_PER_BLOCK		8
+#define	TX_DEFAULT_MAX_GPS	1024	/* Max. # of gather pointers */
+#define	TX_DEFAULT_JUMBO_MAX_GPS 4096	/* Max. # of gather pointers */
+#define	TX_DEFAULT_MAX_LEN	(TX_DEFAULT_MAX_GPS/TX_GP_PER_BLOCK)
+#define	TX_DEFAULT_JUMBO_MAX_LEN (TX_DEFAULT_JUMBO_MAX_GPS/TX_GP_PER_BLOCK)
+
+#define	TX_RING_THRESHOLD		(TX_DEFAULT_MAX_GPS/4)
+#define	TX_RING_JUMBO_THRESHOLD		(TX_DEFAULT_JUMBO_MAX_GPS/4)
+
+#define	TRANSMIT_HEADER_SIZE		16	/* 16 B frame header */
+
+#define	TX_DESC_SAD_SHIFT	0
+#define	TX_DESC_SAD_MASK	0x00000FFFFFFFFFFFULL	/* start address */
+#define	TX_DESC_TR_LEN_SHIFT	44
+#define	TX_DESC_TR_LEN_MASK	0x00FFF00000000000ULL	/* Transfer Length */
+#define	TX_DESC_NUM_PTR_SHIFT	58
+#define	TX_DESC_NUM_PTR_MASK	0x2C00000000000000ULL	/* gather pointers */
+#define	TX_DESC_MASK_SHIFT	62
+#define	TX_DESC_MASK_MASK	0x4000000000000000ULL	/* Mark bit */
+#define	TX_DESC_SOP_SHIF	63
+#define	TX_DESC_NUM_MASK	0x8000000000000000ULL	/* Start of packet */
+
+#define	TCAM_FLOW_KEY_MAX_CLASS		12
+#define	TCAM_L3_MAX_USER_CLASS		4
+#define	TCAM_NIU_TCAM_MAX_ENTRY		128
+#define	TCAM_NXGE_TCAM_MAX_ENTRY	256
+
+
+
+/* TCAM entry formats */
+#define	TCAM_IPV4_5TUPLE_FORMAT	0x00
+#define	TCAM_IPV6_5TUPLE_FORMAT	0x01
+#define	TCAM_ETHERTYPE_FORMAT	0x02
+
+
+/* TCAM */
+#define	TCAM_SELECT_IPV6	0x01
+#define	TCAM_LOOKUP		0x04
+#define	TCAM_DISCARD		0x08
+
+/* FLOW Key */
+#define	FLOW_L4_1_34_BYTES	0x10
+#define	FLOW_L4_1_78_BYTES	0x11
+#define	FLOW_L4_0_12_BYTES	(0x10 << 2)
+#define	FLOW_L4_0_56_BYTES	(0x11 << 2)
+#define	FLOW_PROTO_NEXT		0x10
+#define	FLOW_IPDA		0x20
+#define	FLOW_IPSA		0x40
+#define	FLOW_VLAN		0x80
+#define	FLOW_L2DA		0x100
+#define	FLOW_PORT		0x200
+
+/* TCAM */
+#define	MAX_EFRAME	11
+
+#define	TCAM_USE_L2RDC_FLOW_LOOKUP	0x00
+#define	TCAM_USE_OFFSET_DONE		0x01
+#define	TCAM_OVERRIDE_L2_FLOW_LOOKUP	0x02
+#define	TCAM_OVERRIDE_L2_USE_OFFSET	0x03
+
+/*
+ * FCRAM (Hashing):
+ *	1. IPv4 exact match
+ *	2. IPv6 exact match
+ *	3. IPv4 Optimistic match
+ *	4. IPv6 Optimistic match
+ *
+ */
+#define	FCRAM_IPV4_EXT_MATCH	0x00
+#define	FCRAM_IPV6_EXT_MATCH	0x01
+#define	FCRAM_IPV4_OPTI_MATCH	0x02
+#define	FCRAM_IPV6_OPTI_MATCH	0x03
+
+
+#define	NXGE_HASH_MAX_ENTRY	256
+
+
+#define	MAC_ADDR_LENGTH		6
+
+/* convert values */
+#define	NXGE_BASE(x, y)		(((y) << (x ## _SHIFT)) & (x ## _MASK))
+#define	NXGE_VAL(x, y)		(((y) & (x ## _MASK)) >> (x ## _SHIFT))
+
+/*
+ * Locate the DMA channel start offset (PIO_VADDR)
+ * (DMA virtual address space of the PIO block)
+ */
+#define	TDMC_PIOVADDR_OFFSET(channel)	(2 * DMA_CSR_SIZE * channel)
+#define	RDMC_PIOVADDR_OFFSET(channel)	(TDMC_OFFSET(channel) + DMA_CSR_SIZE)
+
+/*
+ * PIO access using the DMC block directly (DMC)
+ */
+#define	DMC_OFFSET(channel)	(DMA_CSR_SIZE * channel)
+#define	TDMC_OFFSET(channel)	(TX_RNG_CFIG + DMA_CSR_SIZE * channel)
+
+/*
+ * Number of logical pages.
+ */
+#define	NXGE_MAX_LOGICAL_PAGES		2
+
+#ifdef	SOLARIS
+#ifndef	i386
+#define	_BIT_FIELDS_BIG_ENDIAN		_BIT_FIELDS_HTOL
+#else
+#define	_BIT_FIELDS_LITTLE_ENDIAN	_BIT_FIELDS_LTOH
+#endif
+#else
+#define	_BIT_FIELDS_LITTLE_ENDIAN	_LITTLE_ENDIAN_BITFIELD
+#endif
+
+#ifdef COSIM
+#define	MAX_PIO_RETRIES		3200
+#else
+#define	MAX_PIO_RETRIES		32
+#endif
+
+#define	IS_PORT_NUM_VALID(portn)\
+	(portn < 4)
+
+/*
+ * The following macros expect unsigned input values.
+ */
+#define	TXDMA_CHANNEL_VALID(cn)		(cn < NXGE_MAX_TDCS)
+#define	TXDMA_PAGE_VALID(pn)		(pn < NXGE_MAX_LOGICAL_PAGES)
+#define	TXDMA_FUNC_VALID(fn)		(fn < MAX_PORTS_PER_NXGE)
+#define	FUNC_VALID(n)			(n < MAX_PORTS_PER_NXGE)
+
+/*
+ * DMA channel binding definitions.
+ */
+#define	VIR_PAGE_INDEX_MAX		8
+#define	VIR_SUB_REGIONS			2
+#define	VIR_DMA_BIND			1
+
+#define	SUBREGION_VALID(n)		(n < VIR_SUB_REGIONS)
+#define	VIR_PAGE_INDEX_VALID(n)		(n < VIR_PAGE_INDEX_MAX)
+#define	VRXDMA_CHANNEL_VALID(n)		(n < NXGE_MAX_RDCS)
+
+/*
+ * Logical device definitions.
+ */
+#define	NXGE_INT_MAX_LD		69
+#define	NXGE_INT_MAX_LDG	64
+
+#define	NXGE_RDMA_LD_START	 0
+#define	NXGE_TDMA_LD_START	32
+#define	NXGE_MIF_LD		63
+#define	NXGE_MAC_LD_PORT0	64
+#define	NXGE_MAC_LD_PORT1	65
+#define	NXGE_MAC_LD_PORT2	66
+#define	NXGE_MAC_LD_PORT3	67
+#define	NXGE_SYS_ERROR_LD	68
+
+#define	LDG_VALID(n)			(n < NXGE_INT_MAX_LDG)
+#define	LD_VALID(n)			(n < NXGE_INT_MAX_LD)
+#define	LD_RXDMA_LD_VALID(n)		(n < NXGE_MAX_RDCS)
+#define	LD_TXDMA_LD_VALID(n)		(n >= NXGE_MAX_RDCS && \
+					((n - NXGE_MAX_RDCS) < NXGE_MAX_TDCS)))
+#define	LD_MAC_VALID(n)			(IS_PORT_NUM_VALID(n))
+
+#define	LD_TIMER_MAX			0x3f
+#define	LD_INTTIMER_VALID(n)		(n <= LD_TIMER_MAX)
+
+/* System Interrupt Data */
+#define	SID_VECTOR_MAX			0x1f
+#define	SID_VECTOR_VALID(n)		(n <= SID_VECTOR_MAX)
+
+#define	NXGE_COMPILE_32
+
+#ifdef	__cplusplus
+}
+#endif
+
+#endif	/* _SYS_NXGE_NXGE_DEFS_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/uts/common/sys/nxge/nxge_espc.h	Mon Mar 19 19:37:22 2007 -0700
@@ -0,0 +1,236 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef	_SYS_NXGE_NXGE_ESPC_H
+#define	_SYS_NXGE_NXGE_ESPC_H
+
+#pragma ident	"%Z%%M%	%I%	%E% SMI"
+
+#ifdef	__cplusplus
+extern "C" {
+#endif
+
+#include <nxge_espc_hw.h>
+
+#define	ESPC_MAC_ADDR_0		ESPC_NCR_REGN(0)
+#define	ESPC_MAC_ADDR_1		ESPC_NCR_REGN(1)
+#define	ESPC_NUM_PORTS_MACS	ESPC_NCR_REGN(2)
+#define	ESPC_MOD_STR_LEN	ESPC_NCR_REGN(4)
+#define	ESPC_MOD_STR_1		ESPC_NCR_REGN(5)
+#define	ESPC_MOD_STR_2		ESPC_NCR_REGN(6)
+#define	ESPC_MOD_STR_3		ESPC_NCR_REGN(7)
+#define	ESPC_MOD_STR_4		ESPC_NCR_REGN(8)
+#define	ESPC_MOD_STR_5		ESPC_NCR_REGN(9)
+#define	ESPC_MOD_STR_6		ESPC_NCR_REGN(10)
+#define	ESPC_MOD_STR_7		ESPC_NCR_REGN(11)
+#define	ESPC_MOD_STR_8		ESPC_NCR_REGN(12)
+#define	ESPC_BD_MOD_STR_LEN	ESPC_NCR_REGN(13)
+#define	ESPC_BD_MOD_STR_1	ESPC_NCR_REGN(14)
+#define	ESPC_BD_MOD_STR_2	ESPC_NCR_REGN(15)
+#define	ESPC_BD_MOD_STR_3	ESPC_NCR_REGN(16)
+#define	ESPC_BD_MOD_STR_4	ESPC_NCR_REGN(17)
+#define	ESPC_PHY_TYPE		ESPC_NCR_REGN(18)
+#define	ESPC_MAX_FM_SZ		ESPC_NCR_REGN(19)
+#define	ESPC_INTR_NUM		ESPC_NCR_REGN(20)
+#define	ESPC_VER_IMGSZ		ESPC_NCR_REGN(21)
+#define	ESPC_CHKSUM		ESPC_NCR_REGN(22)
+
+#define	NUM_PORTS_MASK		0xff
+#define	NUM_MAC_ADDRS_MASK	0xff0000
+#define	NUM_MAC_ADDRS_SHIFT	16
+#define	MOD_STR_LEN_MASK	0xffff
+#define	BD_MOD_STR_LEN_MASK	0xffff
+#define	MAX_FM_SZ_MASK		0xffff
+#define	VER_NUM_MASK		0xffff
+#define	IMG_SZ_MASK		0xffff0000
+#define	IMG_SZ_SHIFT		16
+#define	CHKSUM_MASK		0xff
+
+/* 0 <= n < 8 */
+#define	ESPC_MOD_STR(n)		(ESPC_MOD_STR_1 + n*8)
+#define	MAX_MOD_STR_LEN		32
+
+/* 0 <= n < 4 */
+#define	ESPC_BD_MOD_STR(n)	(ESPC_BD_MOD_STR_1 + n*8)
+#define	MAX_BD_MOD_STR_LEN	16
+
+#define	ESC_PHY_10G_FIBER	0x0
+#define	ESC_PHY_10G_COPPER	0x1
+#define	ESC_PHY_1G_FIBER	0x2
+#define	ESC_PHY_1G_COPPER	0x3
+#define	ESC_PHY_NONE		0xf
+
+#define	ESC_IMG_CHKSUM_VAL	0xab
+
+typedef union _mac_addr_0_t {
+	uint64_t value;
+
+	struct {
+#if defined(_BIG_ENDIAN)
+		uint32_t msw;	/* Most significant word */
+		uint32_t lsw;	/* Least significant word */
+#elif defined(_LITTLE_ENDIAN)
+		uint32_t lsw;	/* Least significant word */
+		uint32_t msw;	/* Most significant word */
+#endif
+	} val;
+	struct {
+#if defined(_BIG_ENDIAN)
+		uint32_t	w1;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+		uint32_t byte3		: 8;
+		uint32_t byte2		: 8;
+		uint32_t byte1		: 8;
+		uint32_t byte0		: 8;
+#elif defined(_BIT_FIELDS_LTOH)
+		uint32_t byte0		: 8;
+		uint32_t byte1		: 8;
+		uint32_t byte2		: 8;
+		uint32_t byte3		: 8;
+#endif
+		} w0;
+
+#if defined(_LITTLE_ENDIAN)
+		uint32_t	w1;
+#endif
+	} bits;
+} mac_addr_0_t;
+
+typedef union _mac_addr_1_t {
+	uint64_t value;
+
+	struct {
+#if defined(_BIG_ENDIAN)
+		uint32_t msw;	/* Most significant word */
+		uint32_t lsw;	/* Least significant word */
+#elif defined(_LITTLE_ENDIAN)
+		uint32_t lsw;	/* Least significant word */
+		uint32_t msw;	/* Most significant word */
+#endif
+	} val;
+	struct {
+#if defined(_BIG_ENDIAN)
+		uint32_t	w1;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+		uint32_t res		: 16;
+		uint32_t byte5		: 8;
+		uint32_t byte4		: 8;
+#elif defined(_BIT_FIELDS_LTOH)
+		uint32_t byte4		: 8;
+		uint32_t byte5		: 8;
+		uint32_t res		: 16;
+#endif
+		} w0;
+
+#if defined(_LITTLE_ENDIAN)
+		uint32_t	w1;
+#endif
+	} bits;
+} mac_addr_1_t;
+
+
+typedef union _phy_type_t {
+	uint64_t value;
+
+	struct {
+#if defined(_BIG_ENDIAN)
+		uint32_t msw;	/* Most significant word */
+		uint32_t lsw;	/* Least significant word */
+#elif defined(_LITTLE_ENDIAN)
+		uint32_t lsw;	/* Least significant word */
+		uint32_t msw;	/* Most significant word */
+#endif
+	} val;
+	struct {
+#if defined(_BIG_ENDIAN)
+		uint32_t	w1;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+		uint32_t pt0_phy_type	: 8;
+		uint32_t pt1_phy_type	: 8;
+		uint32_t pt2_phy_type	: 8;
+		uint32_t pt3_phy_type	: 8;
+#elif defined(_BIT_FIELDS_LTOH)
+		uint32_t pt3_phy_type	: 8;
+		uint32_t pt2_phy_type	: 8;
+		uint32_t pt1_phy_type	: 8;
+		uint32_t pt0_phy_type	: 8;
+#endif
+		} w0;
+
+#if defined(_LITTLE_ENDIAN)
+		uint32_t	w1;
+#endif
+	} bits;
+} phy_type_t;
+
+
+typedef union _intr_num_t {
+	uint64_t value;
+
+	struct {
+#if defined(_BIG_ENDIAN)
+		uint32_t msw;	/* Most significant word */
+		uint32_t lsw;	/* Least significant word */
+#elif defined(_LITTLE_ENDIAN)
+		uint32_t lsw;	/* Least significant word */
+		uint32_t msw;	/* Most significant word */
+#endif
+	} val;
+	struct {
+#if defined(_BIG_ENDIAN)
+		uint32_t	w1;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+		uint32_t pt0_intr_num	: 8;
+		uint32_t pt1_intr_num	: 8;
+		uint32_t pt2_intr_num	: 8;
+		uint32_t pt3_intr_num	: 8;
+#elif defined(_BIT_FIELDS_LTOH)
+		uint32_t pt3_intr_num	: 8;
+		uint32_t pt2_intr_num	: 8;
+		uint32_t pt1_intr_num	: 8;
+		uint32_t pt0_intr_num	: 8;
+#endif
+		} w0;
+
+#if defined(_LITTLE_ENDIAN)
+		uint32_t	w1;
+#endif
+	} bits;
+} intr_num_t;
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif	/* _SYS_NXGE_NXGE_ESPC_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/uts/common/sys/nxge/nxge_espc_hw.h	Mon Mar 19 19:37:22 2007 -0700
@@ -0,0 +1,64 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef	_SYS_NXGE_NXGE_ESPC_HW_H
+#define	_SYS_NXGE_NXGE_ESPC_HW_H
+
+#pragma ident	"%Z%%M%	%I%	%E% SMI"
+
+#ifdef	__cplusplus
+extern "C" {
+#endif
+
+#include <nxge_defs.h>
+
+/* EPC / SPC Registers offsets */
+#define	ESPC_PIO_EN_REG		0x040000
+#define	ESPC_PIO_EN_MASK	0x0000000000000001ULL
+#define	ESPC_PIO_STATUS_REG	0x040008
+
+/* EPC Status Register */
+#define	EPC_READ_INITIATE	(1ULL << 31)
+#define	EPC_READ_COMPLETE	(1 << 30)
+#define	EPC_WRITE_INITIATE	(1 << 29)
+#define	EPC_WRITE_COMPLETE	(1 << 28)
+#define	EPC_EEPROM_ADDR_BITS	0x3FFFF
+#define	EPC_EEPROM_ADDR_SHIFT	8
+#define	EPC_EEPROM_ADDR_MASK	(EPC_EEPROM_ADDR_BITS << EPC_EEPROM_ADDR_SHIFT)
+#define	EPC_EEPROM_DATA_MASK	0xFF
+
+#define	EPC_RW_WAIT		10	/* TBD */
+
+#define	ESPC_NCR_REG		0x040020   /* Count 128, step 8 */
+#define	ESPC_REG_ADDR(reg)	(FZC_PROM + (reg))
+
+#define	ESPC_NCR_REGN(n)	((ESPC_REG_ADDR(ESPC_NCR_REG)) + n*8)
+#define	ESPC_NCR_VAL_MASK	0x00000000FFFFFFFFULL
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif	/* _SYS_NXGE_NXGE_ESPC_HW_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/uts/common/sys/nxge/nxge_fflp.h	Mon Mar 19 19:37:22 2007 -0700
@@ -0,0 +1,233 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef	_SYS_NXGE_NXGE_FFLP_H
+#define	_SYS_NXGE_NXGE_FFLP_H
+
+#pragma ident	"%Z%%M%	%I%	%E% SMI"
+
+#ifdef	__cplusplus
+extern "C" {
+#endif
+
+#include <npi_fflp.h>
+
+#define	MAX_PARTITION 8
+
+typedef	struct _fflp_errlog {
+	uint32_t		vlan;
+	uint32_t		tcam;
+	uint32_t		hash_pio[MAX_PARTITION];
+	uint32_t		hash_lookup1;
+	uint32_t		hash_lookup2;
+} fflp_errlog_t, *p_fflp_errlog_t;
+
+typedef struct _fflp_stats {
+	uint32_t 		tcam_entries;
+	uint32_t 		fcram_entries;
+	uint32_t 		tcam_parity_err;
+	uint32_t 		tcam_ecc_err;
+	uint32_t 		vlan_parity_err;
+	uint32_t 		hash_lookup_err;
+	uint32_t 		hash_pio_err[MAX_PARTITION];
+	fflp_errlog_t		errlog;
+} nxge_fflp_stats_t, *p_nxge_fflp_stats_t;
+
+/*
+ * The FCRAM (hash table) cosnists of 1 meg cells
+ * each 64 byte wide. Each cell can hold either of:
+ * 2 IPV4 Exact match entry (each 32 bytes)
+ * 1 IPV6 Exact match entry (each 56 bytes) and
+ *    1 Optimistic match entry (each 8 bytes)
+ * 8 Optimistic match entries (each 8 bytes)
+ * In the case IPV4 Exact match, half of the cell
+ * (the first or the second 32 bytes) could be used
+ * to hold 4 Optimistic matches
+ */
+
+#define	FCRAM_CELL_EMPTY	0x00
+#define	FCRAM_CELL_IPV4_IPV4	0x01
+#define	FCRAM_CELL_IPV4_OPT	0x02
+#define	FCRAM_CELL_OPT_IPV4	0x04
+#define	FCRAM_CELL_IPV6_OPT	0x08
+#define	FCRAM_CELL_OPT_OPT	0x10
+
+
+#define	FCRAM_SUBAREA0_OCCUPIED	0x01
+#define	FCRAM_SUBAREA1_OCCUPIED	0x02
+#define	FCRAM_SUBAREA2_OCCUPIED	0x04
+#define	FCRAM_SUBAREA3_OCCUPIED	0x08
+
+#define	FCRAM_SUBAREA4_OCCUPIED	0x10
+#define	FCRAM_SUBAREA5_OCCUPIED	0x20
+#define	FCRAM_SUBAREA6_OCCUPIED	0x40
+#define	FCRAM_SUBAREA7_OCCUPIED	0x20
+
+#define	FCRAM_IPV4_SUBAREA0_OCCUPIED \
+	(FCRAM_SUBAREA0_OCCUPIED | FCRAM_SUBAREA1_OCCUPIED | \
+	FCRAM_SUBAREA2_OCCUPIED | FCRAM_SUBAREA3_OCCUPIED)
+
+#define	FCRAM_IPV4_SUBAREA4_OCCUPIED \
+	(FCRAM_SUBAREA4_OCCUPIED | FCRAM_SUBAREA5_OCCUPIED | \
+	FCRAM_SUBAREA6_OCCUPIED | FCRAM_SUBAREA7_OCCUPIED)
+
+
+#define	FCRAM_IPV6_SUBAREA0_OCCUPIED \
+	(FCRAM_SUBAREA0_OCCUPIED | FCRAM_SUBAREA1_OCCUPIED | \
+	FCRAM_SUBAREA2_OCCUPIED | FCRAM_SUBAREA3_OCCUPIED | \
+	FCRAM_SUBAREA4_OCCUPIED | FCRAM_SUBAREA5_OCCUPIED | \
+	FCRAM_SUBAREA6_OCCUPIED)
+
+	/*
+	 * The current occupancy state of each FCRAM cell isy
+	 * described by the fcram_cell_t data structure.
+	 * The "type" field denotes the type of entry (or combination)
+	 * the cell holds (FCRAM_CELL_EMPTY ...... FCRAM_CELL_OPT_OPT)
+	 * The "occupied" field indicates if individual 8 bytes (subareas)
+	 * with in the cell are occupied
+	 */
+
+typedef struct _fcram_cell {
+	uint32_t 		type:8;
+	uint32_t 		occupied:8;
+	uint32_t 		shadow_loc:16;
+} fcram_cell_t, *p_fcram_cell_t;
+
+typedef struct _fcram_parition {
+	uint8_t 		id;
+	uint8_t 		base;
+	uint8_t 		mask;
+	uint8_t 		reloc;
+	uint32_t 		flags;
+#define	HASH_PARTITION_ENABLED 1
+	uint32_t 		offset;
+	uint32_t 		size;
+} fcram_parition_t, *p_fcram_partition_t;
+
+
+typedef struct _tcam_flow_spec {
+	tcam_entry_t tce;
+	uint64_t flags;
+	uint64_t user_info;
+} tcam_flow_spec_t, *p_tcam_flow_spec_t;
+
+
+/*
+ * Used for configuration.
+ * ndd as well nxge.conf use the following definitions
+ */
+
+#define	NXGE_CLASS_CONFIG_PARAMS	20
+/* Used for ip class flow key and tcam key config */
+
+#define	NXGE_CLASS_TCAM_LOOKUP		0x0001
+#define	NXGE_CLASS_TCAM_USE_SRC_ADDR	0x0002
+#define	NXGE_CLASS_FLOW_USE_PORTNUM	0x0010
+#define	NXGE_CLASS_FLOW_USE_L2DA	0x0020
+#define	NXGE_CLASS_FLOW_USE_VLAN	0x0040
+#define	NXGE_CLASS_FLOW_USE_PROTO	0x0080
+#define	NXGE_CLASS_FLOW_USE_IPSRC	0x0100
+#define	NXGE_CLASS_FLOW_USE_IPDST	0x0200
+#define	NXGE_CLASS_FLOW_USE_SRC_PORT	0x0400
+#define	NXGE_CLASS_FLOW_USE_DST_PORT	0x0800
+#define	NXGE_CLASS_DISCARD		0x80000000
+
+/* these are used for quick configs */
+#define	NXGE_CLASS_FLOW_WEB_SERVER	NXGE_CLASS_FLOW_USE_IPSRC | \
+					NXGE_CLASS_FLOW_USE_SRC_PORT
+
+#define	NXGE_CLASS_FLOW_GEN_SERVER	NXGE_CLASS_FLOW_USE_IPSRC | \
+					NXGE_CLASS_FLOW_USE_IPDST | \
+					NXGE_CLASS_FLOW_USE_SRC_PORT |	\
+					NXGE_CLASS_FLOW_USE_DST_PORT | \
+					NXGE_CLASS_FLOW_USE_PROTO | \
+					NXGE_CLASS_FLOW_USE_L2DA | \
+					NXGE_CLASS_FLOW_USE_VLAN
+
+/*
+ * used for use classes
+ */
+
+
+/* Ethernet Classes */
+#define	NXGE_CLASS_CFG_ETHER_TYPE_MASK		0x0000FFFF
+#define	NXGE_CLASS_CFG_ETHER_ENABLE_MASK	0x40000000
+
+/* IP Classes */
+#define	NXGE_CLASS_CFG_IP_TOS_MASK		0x000000FF
+#define	NXGE_CLASS_CFG_IP_TOS_SHIFT		0
+#define	NXGE_CLASS_CFG_IP_TOS_MASK_MASK		0x0000FF00
+#define	NXGE_CLASS_CFG_IP_TOS_MASK_SHIFT	8
+#define	NXGE_CLASS_CFG_IP_PROTO_MASK		0x00FFFF00
+#define	NXGE_CLASS_CFG_IP_PROTO_SHIFT		16
+
+#define	NXGE_CLASS_CFG_IP_IPV6_MASK		0x01000000
+#define	NXGE_CLASS_CFG_IP_PARAM_MASK	NXGE_CLASS_CFG_IP_TOS_MASK | \
+					NXGE_CLASS_CFG_IP_TOS_MASK_MASK | \
+					NXGE_CLASS_CFG_IP_PROTO_MASK | \
+					NXGE_CLASS_CFG_IP_IPV6_MASK
+
+#define	NXGE_CLASS_CFG_IP_ENABLE_MASK		0x40000000
+
+typedef struct _vlan_rdcgrp_map {
+	uint32_t		rsrvd:8;
+	uint32_t		vid:16;
+	uint32_t		rdc_grp:8;
+}	vlan_rdcgrp_map_t, *p_vlan_rdcgrp_map_t;
+
+#define	NXGE_INIT_VLAN_RDCG_TBL	32
+
+typedef struct _nxge_classify {
+	nxge_os_mutex_t 	tcam_lock;
+	nxge_os_mutex_t		fcram_lock;
+	nxge_os_mutex_t		hash_lock[MAX_PARTITION];
+	uint32_t 		tcam_size;
+	uint32_t 		state;
+#define	NXGE_FFLP_HW_RESET	0x1
+#define	NXGE_FFLP_HW_INIT	0x2
+#define	NXGE_FFLP_SW_INIT	0x4
+#define	NXGE_FFLP_FCRAM_PART	0x80000000
+	p_nxge_fflp_stats_t	fflp_stats;
+
+	tcam_flow_spec_t    *tcam_entries;
+	uint8_t		    tcam_location;
+#define	NXGE_FLOW_NO_SUPPORT  0x0
+#define	NXGE_FLOW_USE_TCAM    0x1
+#define	NXGE_FLOW_USE_FCRAM   0x2
+#define	NXGE_FLOW_USE_TCAM_FCRAM   0x3
+
+#define	NXGE_FLOW_COMPUTE_H1   0x10
+#define	NXGE_FLOW_COMPUTE_H2   0x20
+	uint8_t	fragment_bug;
+	uint8_t	fragment_bug_location;
+	fcram_cell_t		*hash_table; /* allocated for Neptune only */
+	fcram_parition_t    partition[MAX_PARTITION];
+} nxge_classify_t, *p_nxge_classify_t;
+
+#ifdef	__cplusplus
+}
+#endif
+
+#endif	/* _SYS_NXGE_NXGE_FFLP_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/uts/common/sys/nxge/nxge_fflp_hash.h	Mon Mar 19 19:37:22 2007 -0700
@@ -0,0 +1,58 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _SYS_NXGE_NXGE_CRC_H
+#define	_SYS_NXGE_NXGE_CRC_H
+
+#pragma ident	"%Z%%M%	%I%	%E% SMI"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+void nxge_crc32c_init(void);
+uint32_t nxge_crc32c(uint32_t, const uint8_t *, int);
+
+void nxge_crc_ccitt_init(void);
+uint16_t nxge_crc_ccitt(uint16_t, const uint8_t *, int);
+
+uint32_t nxge_compute_h1_table1(uint32_t, uint32_t *, uint32_t);
+uint32_t nxge_compute_h1_table4(uint32_t, uint32_t *, uint32_t);
+uint32_t nxge_compute_h1_serial(uint32_t crcin, uint32_t *, uint32_t);
+
+#define	nxge_compute_h2(cin, flow, len)			\
+	nxge_crc_ccitt(cin, flow, len)
+
+void nxge_init_h1_table(void);
+
+#define	nxge_compute_h1(cin, flow, len)			\
+	nxge_compute_h1_table4(cin, flow, len)
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_NXGE_NXGE_CRC_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/uts/common/sys/nxge/nxge_fflp_hw.h	Mon Mar 19 19:37:22 2007 -0700
@@ -0,0 +1,1664 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef	_SYS_NXGE_NXGE_FFLP_HW_H
+#define	_SYS_NXGE_NXGE_FFLP_HW_H
+
+#pragma ident	"%Z%%M%	%I%	%E% SMI"
+
+#ifdef	__cplusplus
+extern "C" {
+#endif
+
+#include <nxge_defs.h>
+
+
+/* FZC_FFLP Offsets */
+#define	    FFLP_ENET_VLAN_TBL_REG	(FZC_FFLP + 0x00000)
+
+	/* defines for FFLP_ENET_VLAN_TBL */
+
+#define	ENET_VLAN_TBL_VLANRDCTBLN0_MASK 	0x0000000000000003ULL
+#define	ENET_VLAN_TBL_VLANRDCTBLN0_SHIFT 	0
+#define	ENET_VLAN_TBL_VPR0_MASK			0x00000000000000008ULL
+#define	ENET_VLAN_TBL_VPR0_SHIFT		3
+
+#define	ENET_VLAN_TBL_VLANRDCTBLN1_MASK 	0x0000000000000030ULL
+#define	ENET_VLAN_TBL_VLANRDCTBLN1_SHIFT	4
+#define	ENET_VLAN_TBL_VPR1_MASK			0x00000000000000080ULL
+#define	ENET_VLAN_TBL_VPR1_SHIFT		7
+
+#define	ENET_VLAN_TBL_VLANRDCTBLN2_MASK 	0x0000000000000300ULL
+#define	ENET_VLAN_TBL_VLANRDCTBLN2_SHIFT 	8
+#define	ENET_VLAN_TBL_VPR2_MASK			0x00000000000000800ULL
+#define	ENET_VLAN_TBL_VPR2_SHIFT		11
+
+#define	ENET_VLAN_TBL_VLANRDCTBLN3_MASK 	0x0000000000003000ULL
+#define	ENET_VLAN_TBL_VLANRDCTBLN3_SHIFT 	12
+#define	ENET_VLAN_TBL_VPR3_MASK			0x0000000000008000ULL
+#define	ENET_VLAN_TBL_VPR3_SHIFT		15
+
+#define	ENET_VLAN_TBL_PARITY0_MASK		0x0000000000010000ULL
+#define	ENET_VLAN_TBL_PARITY0_SHIFT		16
+#define	ENET_VLAN_TBL_PARITY1_MASK		0x0000000000020000ULL
+#define	ENET_VLAN_TBL_PARITY1_SHIFT		17
+
+
+
+typedef union _fflp_enet_vlan_tbl_t {
+    uint64_t value;
+    struct {
+#if	defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+		struct {
+#ifdef _BIT_FIELDS_HTOL
+			uint32_t rsrvd:14;
+			uint32_t parity1:1;
+			uint32_t parity0:1;
+			uint32_t vpr3:1;
+			uint32_t vlanrdctbln3:3;
+			uint32_t vpr2:1;
+			uint32_t vlanrdctbln2:3;
+			uint32_t vpr1:1;
+			uint32_t vlanrdctbln1:3;
+			uint32_t vpr0:1;
+			uint32_t vlanrdctbln0:3;
+#else
+			uint32_t vlanrdctbln0:3;
+			uint32_t vpr0:1;
+			uint32_t vlanrdctbln1:3;
+			uint32_t vpr1:1;
+			uint32_t vlanrdctbln2:3;
+			uint32_t vpr2:1;
+			uint32_t vlanrdctbln3:3;
+			uint32_t vpr3:1;
+			uint32_t parity0:1;
+			uint32_t parity1:1;
+			uint32_t rsrvd:14;
+#endif
+		} ldw;
+#ifndef _BIG_ENDIAN
+		uint32_t hdw;
+#endif
+	} bits;
+} fflp_enet_vlan_tbl_t, *p_fflp_enet_vlan_tbl_t;
+
+
+#define	FFLP_TCAM_CLS_BASE_OFFSET (FZC_FFLP + 0x20000)
+#define	FFLP_L2_CLS_ENET1_REG	  (FZC_FFLP + 0x20000)
+#define	FFLP_L2_CLS_ENET2_REG	  (FZC_FFLP + 0x20008)
+
+
+
+typedef union _tcam_class_prg_ether_t {
+#define	TCAM_ENET_USR_CLASS_ENABLE   0x1
+#define	TCAM_ENET_USR_CLASS_DISABLE  0x0
+
+    uint64_t value;
+    struct {
+#ifdef	_BIG_ENDIAN
+		uint32_t hdw;
+#endif
+		struct {
+#ifdef _BIT_FIELDS_HTOL
+			uint32_t rsrvd:15;
+			uint32_t valid:1;
+			uint32_t etype:16;
+#else
+			uint32_t etype:16;
+			uint32_t valid:1;
+			uint32_t rsrvd:15;
+#endif
+		} ldw;
+#ifndef _BIG_ENDIAN
+		uint32_t hdw;
+#endif
+	} bits;
+} tcam_class_prg_ether_t, *p_tcam_class_prg_ether_t;
+
+
+#define		FFLP_L3_CLS_IP_U4_REG	(FZC_FFLP + 0x20010)
+#define		FFLP_L3_CLS_IP_U5_REG	(FZC_FFLP + 0x20018)
+#define		FFLP_L3_CLS_IP_U6_REG	(FZC_FFLP + 0x20020)
+#define		FFLP_L3_CLS_IP_U7_REG	(FZC_FFLP + 0x20028)
+
+typedef union _tcam_class_prg_ip_t {
+#define	TCAM_IP_USR_CLASS_ENABLE   0x1
+#define	TCAM_IP_USR_CLASS_DISABLE  0x0
+
+    uint64_t value;
+    struct {
+#if	defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+		struct {
+#ifdef _BIT_FIELDS_HTOL
+			uint32_t rsrvd:6;
+			uint32_t valid:1;
+			uint32_t ipver:1;
+			uint32_t pid:8;
+			uint32_t tosmask:8;
+			uint32_t tos:8;
+#else
+			uint32_t tos:8;
+			uint32_t tosmask:8;
+			uint32_t pid:8;
+			uint32_t ipver:1;
+			uint32_t valid:1;
+			uint32_t rsrvd:6;
+#endif
+		} ldw;
+#ifndef _BIG_ENDIAN
+		uint32_t hdw;
+#endif
+	} bits;
+} tcam_class_prg_ip_t, *p_tcam_class_prg_ip_t;
+/* define the classes which use the above structure */
+
+typedef enum fflp_tcam_class {
+    TCAM_CLASS_INVALID = 0,
+    TCAM_CLASS_DUMMY = 1,
+    TCAM_CLASS_ETYPE_1 = 2,
+    TCAM_CLASS_ETYPE_2,
+    TCAM_CLASS_IP_USER_4,
+    TCAM_CLASS_IP_USER_5,
+    TCAM_CLASS_IP_USER_6,
+    TCAM_CLASS_IP_USER_7,
+    TCAM_CLASS_TCP_IPV4,
+    TCAM_CLASS_UDP_IPV4,
+    TCAM_CLASS_AH_ESP_IPV4,
+    TCAM_CLASS_SCTP_IPV4,
+    TCAM_CLASS_TCP_IPV6,
+    TCAM_CLASS_UDP_IPV6,
+    TCAM_CLASS_AH_ESP_IPV6,
+    TCAM_CLASS_SCTP_IPV6,
+    TCAM_CLASS_ARP,
+    TCAM_CLASS_RARP,
+    TCAM_CLASS_DUMMY_12,
+    TCAM_CLASS_DUMMY_13,
+    TCAM_CLASS_DUMMY_14,
+    TCAM_CLASS_DUMMY_15,
+    TCAM_CLASS_MAX
+} tcam_class_t;
+
+
+
+/*
+ * Specify how to build TCAM key for L3
+ * IP Classes. Both User configured and
+ * hardwired IP services are included.
+ * These are the supported 12 classes.
+ */
+
+#define		FFLP_TCAM_KEY_BASE_OFFSET	(FZC_FFLP + 0x20030)
+#define		FFLP_TCAM_KEY_IP_USR4_REG		(FZC_FFLP + 0x20030)
+#define		FFLP_TCAM_KEY_IP_USR5_REG		(FZC_FFLP + 0x20038)
+#define		FFLP_TCAM_KEY_IP_USR6_REG		(FZC_FFLP + 0x20040)
+#define		FFLP_TCAM_KEY_IP_USR7_REG		(FZC_FFLP + 0x20048)
+#define		FFLP_TCAM_KEY_IP4_TCP_REG		(FZC_FFLP + 0x20050)
+#define		FFLP_TCAM_KEY_IP4_UDP_REG		(FZC_FFLP + 0x20058)
+#define		FFLP_TCAM_KEY_IP4_AH_ESP_REG	(FZC_FFLP + 0x20060)
+#define		FFLP_TCAM_KEY_IP4_SCTP_REG		(FZC_FFLP + 0x20068)
+#define		FFLP_TCAM_KEY_IP6_TCP_REG		(FZC_FFLP + 0x20070)
+#define		FFLP_TCAM_KEY_IP6_UDP_REG		(FZC_FFLP + 0x20078)
+#define		FFLP_TCAM_KEY_IP6_AH_ESP_REG	(FZC_FFLP + 0x20080)
+#define		FFLP_TCAM_KEY_IP6_SCTP_REG		(FZC_FFLP + 0x20088)
+
+
+typedef union _tcam_class_key_ip_t {
+    uint64_t value;
+    struct {
+#if	defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+		struct {
+#ifdef _BIT_FIELDS_HTOL
+			uint32_t rsrvd2:28;
+			uint32_t discard:1;
+			uint32_t tsel:1;
+			uint32_t rsrvd:1;
+			uint32_t ipaddr:1;
+#else
+			uint32_t ipaddr:1;
+			uint32_t rsrvd:1;
+			uint32_t tsel:1;
+			uint32_t discard:1;
+			uint32_t rsrvd2:28;
+#endif
+		} ldw;
+#ifndef _BIG_ENDIAN
+		uint32_t hdw;
+#endif
+	} bits;
+} tcam_class_key_ip_t, *p_tcam_class_key_ip_t;
+
+
+
+#define	FFLP_TCAM_KEY_0_REG			(FZC_FFLP + 0x20090)
+#define	FFLP_TCAM_KEY_1_REG		(FZC_FFLP + 0x20098)
+#define	FFLP_TCAM_KEY_2_REG		(FZC_FFLP + 0x200A0)
+#define	FFLP_TCAM_KEY_3_REG	(FZC_FFLP + 0x200A8)
+#define	FFLP_TCAM_MASK_0_REG	(FZC_FFLP + 0x200B0)
+#define	FFLP_TCAM_MASK_1_REG	(FZC_FFLP + 0x200B8)
+#define	FFLP_TCAM_MASK_2_REG	(FZC_FFLP + 0x200C0)
+#define	FFLP_TCAM_MASK_3_REG	(FZC_FFLP + 0x200C8)
+
+#define		FFLP_TCAM_CTL_REG		(FZC_FFLP + 0x200D0)
+
+/* bit defines for FFLP_TCAM_CTL register */
+#define	   TCAM_CTL_TCAM_WR		  0x0ULL
+#define	   TCAM_CTL_TCAM_RD		  0x040000ULL
+#define	   TCAM_CTL_TCAM_CMP		  0x080000ULL
+#define	   TCAM_CTL_RAM_WR		  0x100000ULL
+#define	   TCAM_CTL_RAM_RD		  0x140000ULL
+#define	   TCAM_CTL_RWC_STAT		  0x0020000ULL
+#define	   TCAM_CTL_RWC_MATCH		  0x0010000ULL
+
+
+typedef union _tcam_ctl_t {
+#define	TCAM_CTL_RWC_TCAM_WR	0x0
+#define	TCAM_CTL_RWC_TCAM_RD	0x1
+#define	TCAM_CTL_RWC_TCAM_CMP	0x2
+#define	TCAM_CTL_RWC_RAM_WR	0x4
+#define	TCAM_CTL_RWC_RAM_RD	0x5
+#define	TCAM_CTL_RWC_RWC_STAT	0x1
+#define	TCAM_CTL_RWC_RWC_MATCH	0x1
+
+	uint64_t value;
+	struct {
+#if	defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+		struct {
+#ifdef _BIT_FIELDS_HTOL
+			uint32_t rsrvd2:11;
+			uint32_t rwc:3;
+			uint32_t stat:1;
+			uint32_t match:1;
+			uint32_t rsrvd:6;
+			uint32_t location:10;
+#else
+			uint32_t location:10;
+			uint32_t rsrvd:6;
+			uint32_t match:1;
+			uint32_t stat:1;
+			uint32_t rwc:3;
+			uint32_t rsrvd2:11;
+#endif
+		} ldw;
+#ifndef _BIG_ENDIAN
+		uint32_t hdw;
+#endif
+	} bits;
+} tcam_ctl_t, *p_tcam_ctl_t;
+
+
+
+/* Bit defines for TCAM ASC RAM */
+
+
+typedef union _tcam_res_t {
+	uint64_t value;
+	struct {
+#if	defined(_BIG_ENDIAN)
+		struct {
+			uint32_t rsrvd:22;
+			uint32_t syndrome:10;
+		} hdw;
+#endif
+		struct {
+#ifdef _BIT_FIELDS_HTOL
+			uint32_t syndrome:6;
+			uint32_t zfid:12;
+			uint32_t v4_ecc_ck:1;
+			uint32_t disc:1;
+			uint32_t tres:2;
+			uint32_t rdctbl:3;
+			uint32_t offset:5;
+			uint32_t zfld:1;
+			uint32_t age:1;
+#else
+			uint32_t age:1;
+			uint32_t zfld:1;
+			uint32_t offset:5;
+			uint32_t rdctbl:3;
+			uint32_t tres:2;
+			uint32_t disc:1;
+			uint32_t v4_ecc_ck:1;
+			uint32_t zfid:12;
+			uint32_t syndrome:6;
+#endif
+		} ldw;
+#ifndef _BIG_ENDIAN
+		struct {
+			uint32_t syndrome:10;
+			uint32_t rsrvd:22;
+		} hdw;
+#endif
+	} bits;
+} tcam_res_t, *p_tcam_res_t;
+
+
+
+#define	TCAM_ASC_DATA_AGE		0x0000000000000001ULL
+#define	TCAM_ASC_DATA_AGE_SHIFT		0x0
+#define	TCAM_ASC_DATA_ZFVLD		0x0000000000000002ULL
+#define	TCAM_ASC_DATA_ZFVLD_SHIFT	1
+
+#define	TCAM_ASC_DATA_OFFSET_MASK	0x000000000000007CULL
+#define	TCAM_ASC_DATA_OFFSET_SHIFT	2
+
+#define	TCAM_ASC_DATA_RDCTBL_MASK	0x0000000000000038ULL
+#define	TCAM_ASC_DATA_RDCTBL_SHIFT	7
+#define	TCAM_ASC_DATA_TRES_MASK		0x0000000000000C00ULL
+#define	TRES_CONT_USE_L2RDC		0x00
+#define	TRES_TERM_USE_OFFSET		0x01
+#define	TRES_CONT_OVRD_L2RDC		0x02
+#define	TRES_TERM_OVRD_L2RDC		0x03
+
+#define	TCAM_ASC_DATA_TRES_SHIFT	10
+#define	TCAM_TRES_CONT_USE_L2RDC	\
+		(0x0000000000000000ULL << TCAM_ASC_DATA_TRES_SHIFT)
+#define	TCAM_TRES_TERM_USE_OFFSET	\
+		(0x0000000000000001ULL << TCAM_ASC_DATA_TRES_SHIFT)
+#define	TCAM_TRES_CONT_OVRD_L2RDC	\
+		(0x0000000000000002ULL << TCAM_ASC_DATA_TRES_SHIFT)
+#define	TCAM_TRES_TERM_OVRD_L2RDC	\
+		(0x0000000000000003ULL << TCAM_ASC_DATA_TRES_SHIFT)
+
+#define	TCAM_ASC_DATA_DISC_MASK		0x0000000000001000ULL
+#define	TCAM_ASC_DATA_DISC_SHIFT	12
+#define	TCAM_ASC_DATA_V4_ECC_OK_MASK    0x0000000000002000ULL
+#define	TCAM_ASC_DATA_V4_ECC_OK_SHIFT	13
+#define	TCAM_ASC_DATA_V4_ECC_OK		\
+		(0x0000000000000001ULL << TCAM_ASC_DATA_V4_ECC_OK_MASK_SHIFT)
+
+#define	TCAM_ASC_DATA_ZFID_MASK		0x0000000003FF3000ULL
+#define	TCAM_ASC_DATA_ZFID_SHIFT	14
+#define	TCAM_ASC_DATA_ZFID(value)	\
+		((value & TCAM_ASC_DATA_ZFID_MASK) >> TCAM_ASC_DATA_ZFID_SHIFT)
+
+#define	TCAM_ASC_DATA_SYNDR_MASK	0x000003FFF3000000ULL
+#define	TCAM_ASC_DATA_SYNDR_SHIFT	26
+#define	TCAM_ASC_DATA_SYNDR(value)  \
+	((value & TCAM_ASC_DATA_SYNDR_MASK) >> TCAM_ASC_DATA_SYNDR_SHIFT)
+
+
+	/* error registers */
+
+#define	FFLP_VLAN_PAR_ERR_REG		(FZC_FFLP + 0x08000)
+
+typedef union _vlan_par_err_t {
+    uint64_t value;
+    struct {
+#if	defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+		struct {
+#ifdef _BIT_FIELDS_HTOL
+			uint32_t err:1;
+			uint32_t m_err:1;
+			uint32_t addr:12;
+			uint32_t data:18;
+#else
+			uint32_t data:18;
+			uint32_t addr:12;
+			uint32_t m_err:1;
+			uint32_t err:1;
+#endif
+		} ldw;
+#ifndef _BIG_ENDIAN
+		uint32_t hdw;
+#endif
+	} bits;
+} vlan_par_err_t, *p_vlan_par_err_t;
+
+
+#define		FFLP_TCAM_ERR_REG		(FZC_FFLP + 0x200D8)
+
+typedef union _tcam_err_t {
+    uint64_t value;
+    struct {
+#if	defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+		struct {
+#ifdef _BIT_FIELDS_HTOL
+			uint32_t err:1;
+			uint32_t p_ecc:1;
+			uint32_t mult:1;
+			uint32_t rsrvd:5;
+			uint32_t addr:8;
+			uint32_t syndrome:16;
+#else
+			uint32_t syndrome:16;
+			uint32_t addr:8;
+			uint32_t rsrvd:5;
+			uint32_t mult:1;
+			uint32_t p_ecc:1;
+			uint32_t err:1;
+#endif
+		} ldw;
+#ifndef _BIG_ENDIAN
+		uint32_t hdw;
+#endif
+	} bits;
+} tcam_err_t, *p_tcam_err_t;
+
+
+#define		TCAM_ERR_SYNDROME_MASK		0x000000000000FFFFULL
+#define		TCAM_ERR_MULT_SHIFT		29
+#define		TCAM_ERR_MULT			0x0000000020000000ULL
+#define		TCAM_ERR_P_ECC			0x0000000040000000ULL
+#define		TCAM_ERR_ERR			0x0000000080000000ULL
+
+#define		HASH_LKUP_ERR_LOG1_REG		(FZC_FFLP + 0x200E0)
+#define		HASH_LKUP_ERR_LOG2_REG		(FZC_FFLP + 0x200E8)
+
+
+
+typedef union _hash_lookup_err_log1_t {
+    uint64_t value;
+    struct {
+#if	defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+		struct {
+#ifdef _BIT_FIELDS_HTOL
+			uint32_t rsrvd:28;
+			uint32_t ecc_err:1;
+			uint32_t mult_lk:1;
+			uint32_t cu:1;
+			uint32_t mult_bit:1;
+#else
+			uint32_t mult_bit:1;
+			uint32_t cu:1;
+			uint32_t mult_lk:1;
+			uint32_t ecc_err:1;
+			uint32_t rsrvd:28;
+#endif
+		} ldw;
+#ifndef _BIG_ENDIAN
+		uint32_t hdw;
+#endif
+	} bits;
+} hash_lookup_err_log1_t, *p_hash_lookup_err_log1_t;
+
+
+
+typedef union _hash_lookup_err_log2_t {
+    uint64_t value;
+    struct {
+#if	defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+		struct {
+#ifdef _BIT_FIELDS_HTOL
+			uint32_t rsrvd:1;
+			uint32_t h1:20;
+			uint32_t subarea:3;
+			uint32_t syndrome:8;
+#else
+			uint32_t syndrome:8;
+			uint32_t subarea:3;
+			uint32_t h1:20;
+			uint32_t rsrvd:1;
+#endif
+		} ldw;
+#ifndef _BIG_ENDIAN
+		uint32_t hdw;
+#endif
+	} bits;
+} hash_lookup_err_log2_t, *p_hash_lookup_err_log2_t;
+
+
+
+#define		FFLP_FCRAM_ERR_TST0_REG	(FZC_FFLP + 0x20128)
+
+typedef union _fcram_err_tst0_t {
+    uint64_t value;
+    struct {
+#if	defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+		struct {
+#ifdef _BIT_FIELDS_HTOL
+			uint32_t rsrvd:24;
+			uint32_t syndrome_mask:8;
+#else
+			uint32_t syndrome_mask:10;
+			uint32_t rsrvd:24;
+#endif
+		} ldw;
+#ifndef _BIG_ENDIAN
+		uint32_t hdw;
+#endif
+	} bits;
+} fcram_err_tst0_t, *p_fcram_err_tst0_t;
+
+
+#define		FFLP_FCRAM_ERR_TST1_REG	(FZC_FFLP + 0x20130)
+#define		FFLP_FCRAM_ERR_TST2_REG	(FZC_FFLP + 0x20138)
+
+typedef union _fcram_err_tst_t {
+    uint64_t value;
+    struct {
+#if	defined(_BIG_ENDIAN)
+		struct {
+			uint32_t dat;
+		} hdw;
+#endif
+		struct {
+			uint32_t dat;
+		} ldw;
+#ifndef _BIG_ENDIAN
+		struct {
+			uint32_t dat;
+		} hdw;
+#endif
+	} bits;
+} fcram_err_tst1_t, *p_fcram_err_tst1_t,
+	fcram_err_tst2_t, *p_fcram_err_tst2_t,
+	fcram_err_data_t, *p_fcram_err_data_t;
+
+
+
+#define		FFLP_ERR_MSK_REG	(FZC_FFLP + 0x20140)
+
+typedef union _fflp_err_mask_t {
+    uint64_t value;
+    struct {
+#if	defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+		struct {
+#ifdef _BIT_FIELDS_HTOL
+			uint32_t rsrvd:21;
+			uint32_t hash_tbl_dat:8;
+			uint32_t hash_tbl_lkup:1;
+			uint32_t tcam:1;
+			uint32_t vlan:1;
+#else
+			uint32_t vlan:1;
+			uint32_t tcam:1;
+			uint32_t hash_tbl_lkup:1;
+			uint32_t hash_tbl_dat:8;
+			uint32_t rsrvd:21;
+#endif
+		} ldw;
+#ifndef _BIG_ENDIAN
+		uint32_t hdw;
+#endif
+	} bits;
+} fflp_err_mask_t, *p_fflp_err_mask_t;
+
+#define	FFLP_ERR_VLAN_MASK 0x00000001ULL
+#define	FFLP_ERR_VLAN 0x00000001ULL
+#define	FFLP_ERR_VLAN_SHIFT 0x0
+
+#define	FFLP_ERR_TCAM_MASK 0x00000002ULL
+#define	FFLP_ERR_TCAM 0x00000001ULL
+#define	FFLP_ERR_TCAM_SHIFT 0x1
+
+#define	FFLP_ERR_HASH_TBL_LKUP_MASK 0x00000004ULL
+#define	FFLP_ERR_HASH_TBL_LKUP 0x00000001ULL
+#define	FFLP_ERR_HASH_TBL_LKUP_SHIFT 0x2
+
+#define	FFLP_ERR_HASH_TBL_DAT_MASK 0x00000007F8ULL
+#define	FFLP_ERR_HASH_TBL_DAT 0x0000000FFULL
+#define	FFLP_ERR_HASH_TBL_DAT_SHIFT 0x3
+
+#define	FFLP_ERR_MASK_ALL (FFLP_ERR_VLAN_MASK | FFLP_ERR_TCAM_MASK | \
+			    FFLP_ERR_HASH_TBL_LKUP_MASK | \
+			    FFLP_ERR_HASH_TBL_DAT_MASK)
+
+
+#define		FFLP_CFG_1_REG	(FZC_FFLP + 0x20100)
+
+typedef union _fflp_cfg_1_t {
+    uint64_t value;
+    struct {
+#if	defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+		struct {
+#ifdef _BIT_FIELDS_HTOL
+			uint32_t rsrvd:5;
+			uint32_t tcam_disable:1;
+			uint32_t pio_dbg_sel:3;
+			uint32_t pio_fio_rst:1;
+			uint32_t pio_fio_lat:2;
+			uint32_t camlatency:4;
+			uint32_t camratio:4;
+			uint32_t fcramratio:4;
+			uint32_t fcramoutdr:4;
+			uint32_t fcramqs:1;
+			uint32_t errordis:1;
+			uint32_t fflpinitdone:1;
+			uint32_t llcsnap:1;
+#else
+			uint32_t llcsnap:1;
+			uint32_t fflpinitdone:1;
+			uint32_t errordis:1;
+			uint32_t fcramqs:1;
+			uint32_t fcramoutdr:4;
+			uint32_t fcramratio:4;
+			uint32_t camratio:4;
+			uint32_t camlatency:4;
+			uint32_t pio_fio_lat:2;
+			uint32_t pio_fio_rst:1;
+			uint32_t pio_dbg_sel:3;
+			uint32_t tcam_disable:1;
+			uint32_t rsrvd:5;
+#endif
+		} ldw;
+#ifndef _BIG_ENDIAN
+		uint32_t hdw;
+#endif
+	} bits;
+} fflp_cfg_1_t, *p_fflp_cfg_1_t;
+
+
+typedef	enum fflp_fcram_output_drive {
+    FCRAM_OUTDR_NORMAL	= 0x0,
+    FCRAM_OUTDR_STRONG	= 0x5,
+    FCRAM_OUTDR_WEAK	= 0xa
+} fflp_fcram_output_drive_t;
+
+
+typedef	enum fflp_fcram_qs {
+    FCRAM_QS_MODE_QS	= 0x0,
+    FCRAM_QS_MODE_FREE	= 0x1
+} fflp_fcram_qs_t;
+
+#define		FCRAM_PIO_HIGH_PRI	0xf
+#define		FCRAM_PIO_MED_PRI	0xa
+#define		FCRAM_LOOKUP_HIGH_PRI	0x0
+#define		FCRAM_LOOKUP_HIGH_PRI	0x0
+#define		FCRAM_IO_DEFAULT_PRI	FCRAM_PIO_MED_PRI
+
+#define		TCAM_PIO_HIGH_PRI	0xf
+#define		TCAM_PIO_MED_PRI	0xa
+#define		TCAM_LOOKUP_HIGH_PRI	0x0
+#define		TCAM_LOOKUP_HIGH_PRI	0x0
+#define		TCAM_IO_DEFAULT_PRI	TCAM_PIO_MED_PRI
+
+#define		TCAM_DEFAULT_LATENCY	0x4
+
+
+#define		FFLP_DBG_TRAIN_VCT_REG	(FZC_FFLP + 0x20148)
+
+typedef union _fflp_dbg_train_vct_t {
+    uint64_t value;
+    struct {
+#if	defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+		struct {
+#ifdef _BIT_FIELDS_HTOL
+			uint32_t vector;
+#else
+			uint32_t vector;
+#endif
+		} ldw;
+#ifndef _BIG_ENDIAN
+		uint32_t hdw;
+#endif
+	} bits;
+} fflp_dbg_train_vct_t, *p_fflp_dbg_train_vct_t;
+
+
+
+#define		FFLP_TCP_CFLAG_MSK_REG	(FZC_FFLP + 0x20108)
+
+typedef union _tcp_cflag_mask_t {
+    uint64_t value;
+    struct {
+#if	defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+		struct {
+#ifdef _BIT_FIELDS_HTOL
+			uint32_t rsrvd:20;
+			uint32_t mask:12;
+#else
+			uint32_t mask:12;
+			uint32_t rsrvd:20;
+#endif
+		} ldw;
+#ifndef _BIG_ENDIAN
+		uint32_t hdw;
+#endif
+	} bits;
+} tcp_cflag_mask_t, *p_tcp_cflag_mask_t;
+
+
+
+#define		FFLP_FCRAM_REF_TMR_REG		(FZC_FFLP + 0x20110)
+
+
+typedef union _fcram_ref_tmr_t {
+#define		FCRAM_REFRESH_DEFAULT_MAX_TIME	0x200
+#define		FCRAM_REFRESH_DEFAULT_MIN_TIME	0x200
+#define		FCRAM_REFRESH_DEFAULT_SYS_TIME	0x200
+#define		FCRAM_REFRESH_MAX_TICK		39 /* usecs */
+#define		FCRAM_REFRESH_MIN_TICK		400 /* nsecs */
+
+    uint64_t value;
+    struct {
+#if	defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+		struct {
+#ifdef _BIT_FIELDS_HTOL
+			uint32_t max:16;
+			uint32_t min:16;
+#else
+			uint32_t min:16;
+			uint32_t max:16;
+#endif
+		} ldw;
+#ifndef _BIG_ENDIAN
+		uint32_t hdw;
+#endif
+	} bits;
+} fcram_ref_tmr_t, *p_fcram_ref_tmr_t;
+
+
+
+
+#define		FFLP_FCRAM_FIO_ADDR_REG	(FZC_FFLP + 0x20118)
+
+typedef union _fcram_fio_addr_t {
+    uint64_t value;
+    struct {
+#if	defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+		struct {
+#ifdef _BIT_FIELDS_HTOL
+			uint32_t rsrvd:22;
+			uint32_t addr:10;
+#else
+			uint32_t addr:10;
+			uint32_t rsrvd:22;
+#endif
+		} ldw;
+#ifndef _BIG_ENDIAN
+		uint32_t hdw;
+#endif
+	} bits;
+} fcram_fio_addr_t, *p_fcram_fio_addr_t;
+
+
+#define		FFLP_FCRAM_FIO_DAT_REG	(FZC_FFLP + 0x20120)
+
+typedef union _fcram_fio_dat_t {
+    uint64_t value;
+    struct {
+#if	defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+		struct {
+#ifdef _BIT_FIELDS_HTOL
+			uint32_t rsrvd:22;
+			uint32_t addr:10;
+#else
+			uint32_t addr:10;
+			uint32_t rsrvd:22;
+#endif
+		} ldw;
+#ifndef _BIG_ENDIAN
+		uint32_t hdw;
+#endif
+	} bits;
+} fcram_fio_dat_t, *p_fcram_fio_dat_t;
+
+
+#define	FFLP_FCRAM_PHY_RD_LAT_REG	(FZC_FFLP + 0x20150)
+
+typedef union _fcram_phy_rd_lat_t {
+	uint64_t value;
+	struct {
+#if	defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+		struct {
+#ifdef _BIT_FIELDS_HTOL
+			uint32_t rsrvd:24;
+			uint32_t lat:8;
+#else
+			uint32_t lat:8;
+			uint32_t rsrvd:24;
+#endif
+		} ldw;
+#ifndef _BIG_ENDIAN
+		uint32_t hdw;
+#endif
+	} bits;
+} fcram_phy_rd_lat_t, *p_fcram_phy_rd_lat_t;
+
+
+/*
+ * Specify how to build a flow key for IP
+ * classes, both programmable and hardwired
+ */
+#define		FFLP_FLOW_KEY_BASE_OFFSET		(FZC_FFLP + 0x40000)
+#define		FFLP_FLOW_KEY_IP_USR4_REG		(FZC_FFLP + 0x40000)
+#define		FFLP_FLOW_KEY_IP_USR5_REG		(FZC_FFLP + 0x40008)
+#define		FFLP_FLOW_KEY_IP_USR6_REG		(FZC_FFLP + 0x40010)
+#define		FFLP_FLOW_KEY_IP_USR7_REG		(FZC_FFLP + 0x40018)
+#define		FFLP_FLOW_KEY_IP4_TCP_REG		(FZC_FFLP + 0x40020)
+#define		FFLP_FLOW_KEY_IP4_UDP_REG		(FZC_FFLP + 0x40028)
+#define		FFLP_FLOW_KEY_IP4_AH_ESP_REG	(FZC_FFLP + 0x40030)
+#define		FFLP_FLOW_KEY_IP4_SCTP_REG		(FZC_FFLP + 0x40038)
+#define		FFLP_FLOW_KEY_IP6_TCP_REG		(FZC_FFLP + 0x40040)
+#define		FFLP_FLOW_KEY_IP6_UDP_REG		(FZC_FFLP + 0x40048)
+#define		FFLP_FLOW_KEY_IP6_AH_ESP_REG	(FZC_FFLP + 0x40050)
+#define		FFLP_FLOW_KEY_IP6_SCTP_REG		(FZC_FFLP + 0x40058)
+
+typedef union _flow_class_key_ip_t {
+    uint64_t value;
+    struct {
+#if	defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+		struct {
+#ifdef _BIT_FIELDS_HTOL
+			uint32_t rsrvd2:22;
+			uint32_t port:1;
+			uint32_t l2da:1;
+			uint32_t vlan:1;
+			uint32_t ipsa:1;
+			uint32_t ipda:1;
+			uint32_t proto:1;
+			uint32_t l4_0:2;
+			uint32_t l4_1:2;
+#else
+			uint32_t l4_1:2;
+			uint32_t l4_0:2;
+			uint32_t proto:1;
+			uint32_t ipda:1;
+			uint32_t ipsa:1;
+			uint32_t vlan:1;
+			uint32_t l2da:1;
+			uint32_t port:1;
+			uint32_t rsrvd2:22;
+#endif
+		} ldw;
+#ifndef _BIG_ENDIAN
+		uint32_t hdw;
+#endif
+	} bits;
+} flow_class_key_ip_t, *p_flow_class_key_ip_t;
+
+
+#define		FFLP_H1POLY_REG		(FZC_FFLP + 0x40060)
+
+
+typedef union _hash_h1poly_t {
+    uint64_t value;
+    struct {
+#if	defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+		struct {
+			uint32_t init_value;
+		} ldw;
+#ifndef _BIG_ENDIAN
+		uint32_t hdw;
+#endif
+	} bits;
+} hash_h1poly_t, *p_hash_h1poly_t;
+
+#define		FFLP_H2POLY_REG		(FZC_FFLP + 0x40068)
+
+typedef union _hash_h2poly_t {
+    uint64_t value;
+    struct {
+#if	defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+		struct {
+#ifdef _BIT_FIELDS_HTOL
+			uint32_t rsrvd:16;
+			uint32_t init_value:16;
+#else
+			uint32_t init_value:16;
+			uint32_t rsrvd:16;
+#endif
+		} ldw;
+#ifndef _BIG_ENDIAN
+		uint32_t hdw;
+#endif
+	} bits;
+} hash_h2poly_t, *p_hash_h2poly_t;
+
+#define		FFLP_FLW_PRT_SEL_REG		(FZC_FFLP + 0x40070)
+
+
+typedef union _flow_prt_sel_t {
+#define		FFLP_FCRAM_MAX_PARTITION	8
+    uint64_t value;
+    struct {
+#if	defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+		struct {
+#ifdef _BIT_FIELDS_HTOL
+			uint32_t rsrvd3:15;
+			uint32_t ext:1;
+			uint32_t rsrvd2:3;
+			uint32_t mask:5;
+			uint32_t rsrvd:3;
+			uint32_t base:5;
+#else
+			uint32_t base:5;
+			uint32_t rsrvd:3;
+			uint32_t mask:5;
+			uint32_t rsrvd2:3;
+			uint32_t ext:1;
+			uint32_t rsrvd3:15;
+#endif
+		} ldw;
+#ifndef _BIG_ENDIAN
+		uint32_t hdw;
+#endif
+	} bits;
+} flow_prt_sel_t, *p_flow_prt_sel_t;
+
+
+
+/* FFLP Offsets */
+
+
+#define		FFLP_HASH_TBL_ADDR_REG		(FFLP + 0x00000)
+
+typedef union _hash_tbl_addr_t {
+    uint64_t value;
+    struct {
+#if	defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+		struct {
+#ifdef _BIT_FIELDS_HTOL
+			uint32_t rsrvd:8;
+			uint32_t autoinc:1;
+			uint32_t addr:23;
+#else
+			uint32_t addr:23;
+			uint32_t autoinc:1;
+			uint32_t rsrvd:8;
+#endif
+		} ldw;
+#ifndef _BIG_ENDIAN
+		uint32_t hdw;
+#endif
+	} bits;
+} hash_tbl_addr_t, *p_hash_tbl_addr_t;
+
+
+#define		FFLP_HASH_TBL_DATA_REG		(FFLP + 0x00008)
+
+typedef union _hash_tbl_data_t {
+    uint64_t value;
+    struct {
+#ifdef	_BIG_ENDIAN
+		uint32_t hdw;
+		uint32_t ldw;
+#else
+		uint32_t ldw;
+		uint32_t hdw;
+#endif
+	} bits;
+} hash_tbl_data_t, *p_hash_tbl_data_t;
+
+
+#define		FFLP_HASH_TBL_DATA_LOG_REG		(FFLP + 0x00010)
+
+
+typedef union _hash_tbl_data_log_t {
+    uint64_t value;
+    struct {
+#if	defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+		struct {
+#ifdef _BIT_FIELDS_HTOL
+			uint32_t pio_err:1;
+			uint32_t fcram_addr:23;
+			uint32_t syndrome:8;
+#else
+			uint32_t syndrome:8;
+			uint32_t fcram_addr:23;
+			uint32_t pio_err:1;
+#endif
+		} ldw;
+#ifndef _BIG_ENDIAN
+		uint32_t hdw;
+#endif
+	} bits;
+} hash_tbl_data_log_t, *p_hash_tbl_data_log_t;
+
+
+
+#define	REG_PIO_WRITE64(handle, offset, value) \
+		NXGE_REG_WR64((handle), (offset), (value))
+#define	REG_PIO_READ64(handle, offset, val_p) \
+		NXGE_REG_RD64((handle), (offset), (val_p))
+
+
+#define	WRITE_TCAM_REG_CTL(handle, ctl) \
+		REG_PIO_WRITE64(handle, FFLP_TCAM_CTL_REG, ctl)
+
+#define	READ_TCAM_REG_CTL(handle, val_p) \
+		REG_PIO_READ64(handle, FFLP_TCAM_CTL_REG, val_p)
+
+
+#define	WRITE_TCAM_REG_KEY0(handle, key)	\
+		REG_PIO_WRITE64(handle,  FFLP_TCAM_KEY_0_REG, key)
+#define	WRITE_TCAM_REG_KEY1(handle, key) \
+		REG_PIO_WRITE64(handle,  FFLP_TCAM_KEY_1_REG, key)
+#define	WRITE_TCAM_REG_KEY2(handle, key) \
+		REG_PIO_WRITE64(handle,  FFLP_TCAM_KEY_2_REG, key)
+#define	WRITE_TCAM_REG_KEY3(handle, key) \
+		REG_PIO_WRITE64(handle,  FFLP_TCAM_KEY_3_REG, key)
+#define	WRITE_TCAM_REG_MASK0(handle, mask)   \
+		REG_PIO_WRITE64(handle,  FFLP_TCAM_MASK_0_REG, mask)
+#define	WRITE_TCAM_REG_MASK1(handle, mask)   \
+		REG_PIO_WRITE64(handle,  FFLP_TCAM_MASK_1_REG, mask)
+#define	WRITE_TCAM_REG_MASK2(handle, mask)   \
+		REG_PIO_WRITE64(handle,  FFLP_TCAM_MASK_2_REG, mask)
+#define	WRITE_TCAM_REG_MASK3(handle, mask)   \
+		REG_PIO_WRITE64(handle,  FFLP_TCAM_MASK_3_REG, mask)
+
+#define	READ_TCAM_REG_KEY0(handle, val_p)	\
+		REG_PIO_READ64(handle,  FFLP_TCAM_KEY_0_REG, val_p)
+#define	READ_TCAM_REG_KEY1(handle, val_p)	\
+		REG_PIO_READ64(handle,  FFLP_TCAM_KEY_1_REG, val_p)
+#define	READ_TCAM_REG_KEY2(handle, val_p)	\
+		REG_PIO_READ64(handle,  FFLP_TCAM_KEY_2_REG, val_p)
+#define	READ_TCAM_REG_KEY3(handle, val_p)	\
+		REG_PIO_READ64(handle,  FFLP_TCAM_KEY_3_REG, val_p)
+#define	READ_TCAM_REG_MASK0(handle, val_p)	\
+		REG_PIO_READ64(handle,  FFLP_TCAM_MASK_0_REG, val_p)
+#define	READ_TCAM_REG_MASK1(handle, val_p)	\
+		REG_PIO_READ64(handle,  FFLP_TCAM_MASK_1_REG, val_p)
+#define	READ_TCAM_REG_MASK2(handle, val_p)	\
+		REG_PIO_READ64(handle,  FFLP_TCAM_MASK_2_REG, val_p)
+#define	READ_TCAM_REG_MASK3(handle, val_p)	\
+		REG_PIO_READ64(handle,  FFLP_TCAM_MASK_3_REG, val_p)
+
+
+
+
+typedef struct tcam_ipv4 {
+#if defined(_BIG_ENDIAN)
+	uint32_t	reserved6;		/* 255 : 224 */
+	uint32_t	reserved5 : 24;		/* 223 : 200 */
+	uint32_t	cls_code : 5;		/* 199 : 195 */
+	uint32_t	reserved4 : 3;		/* 194 : 192 */
+	uint32_t	l2rd_tbl_num : 5;	/* 191: 187  */
+	uint32_t	noport : 1;		/* 186 */
+	uint32_t	reserved3 : 26;		/* 185: 160  */
+	uint32_t	reserved2;		/* 159: 128  */
+	uint32_t	reserved : 16;		/* 127 : 112 */
+	uint32_t	tos : 8;		/* 111 : 104 */
+	uint32_t	proto : 8;		/* 103 : 96  */
+	uint32_t	l4_port_spi;		/* 95 : 64   */
+	uint32_t	ip_src;			/* 63 : 32   */
+	uint32_t	ip_dest;		/* 31 : 0    */
+#else
+	uint32_t	ip_dest;		/* 31 : 0    */
+	uint32_t	ip_src;			/* 63 : 32   */
+	uint32_t	l4_port_spi;		/* 95 : 64   */
+	uint32_t	proto : 8;		/* 103 : 96  */
+	uint32_t	tos : 8;		/* 111 : 104 */
+	uint32_t	reserved : 16;		/* 127 : 112 */
+	uint32_t	reserved2;		/* 159: 128  */
+	uint32_t	reserved3 : 26;		/* 185: 160  */
+	uint32_t	noport : 1;		/* 186	*/
+	uint32_t	l2rd_tbl_num : 5;	/* 191: 187  */
+	uint32_t	reserved4 : 3;		/* 194 : 192 */
+	uint32_t	cls_code : 5;		/* 199 : 195 */
+	uint32_t	reserved5 : 24;		/* 223 : 200 */
+	uint32_t	reserved6;		/* 255 : 224 */
+#endif
+} tcam_ipv4_t;
+
+
+
+typedef struct tcam_reg {
+#if defined(_BIG_ENDIAN)
+    uint64_t		reg0;
+    uint64_t		reg1;
+    uint64_t		reg2;
+    uint64_t		reg3;
+#else
+    uint64_t		reg3;
+    uint64_t		reg2;
+    uint64_t		reg1;
+    uint64_t		reg0;
+#endif
+} tcam_reg_t;
+
+
+typedef struct tcam_ether {
+#if defined(_BIG_ENDIAN)
+	uint8_t		reserved3[7];		/* 255 : 200 */
+	uint8_t		cls_code : 5;		/* 199 : 195 */
+	uint8_t		reserved2 : 3;		/* 194 : 192 */
+	uint8_t		ethframe[11];		/* 191 : 104 */
+	uint8_t		reserved[13];		/* 103 : 0   */
+#else
+	uint8_t		reserved[13];		/* 103 : 0   */
+	uint8_t		ethframe[11];		/* 191 : 104 */
+	uint8_t		reserved2 : 3;		/* 194 : 192 */
+	uint8_t		cls_code : 5;		/* 199 : 195 */
+	uint8_t		reserved3[7];		/* 255 : 200 */
+#endif
+} tcam_ether_t;
+
+
+typedef struct tcam_ipv6 {
+#if defined(_BIG_ENDIAN)
+	uint32_t	reserved4;		/* 255 : 224 */
+	uint32_t	reserved3 : 24;		/* 223 : 200 */
+	uint32_t	cls_code : 5;		/* 199 : 195 */
+	uint32_t	reserved2 : 3;		/* 194 : 192 */
+	uint32_t	l2rd_tbl_num : 5;	/* 191: 187  */
+	uint32_t	noport : 1;		/* 186  */
+	uint32_t	reserved : 10;		/* 185 : 176 */
+	uint32_t	tos : 8;		/* 175 : 168 */
+	uint32_t	nxt_hdr : 8;		/* 167 : 160 */
+	uint32_t	l4_port_spi;		/* 159 : 128 */
+	uint32_t	ip_addr[4];		/* 127 : 0   */
+#else
+	uint32_t	ip_addr[4];		/* 127 : 0   */
+	uint32_t	l4_port_spi;		/* 159 : 128 */
+	uint32_t	nxt_hdr : 8;		/* 167 : 160 */
+	uint32_t	tos : 8;		/* 175 : 168 */
+	uint32_t	reserved : 10;		/* 185 : 176 */
+	uint32_t	noport : 1;		/* 186 */
+	uint32_t	l2rd_tbl_num : 5;	/* 191: 187  */
+	uint32_t	reserved2 : 3;		/* 194 : 192 */
+	uint32_t	cls_code : 5;		/* 199 : 195 */
+	uint32_t	reserved3 : 24;		/* 223 : 200 */
+	uint32_t	reserved4;		/* 255 : 224 */
+#endif
+} tcam_ipv6_t;
+
+
+typedef struct tcam_entry {
+    union  _tcam_entry {
+	tcam_reg_t	   regs_e;
+	tcam_ether_t	   ether_e;
+	tcam_ipv4_t	   ipv4_e;
+	tcam_ipv6_t	   ipv6_e;
+	} key, mask;
+	tcam_res_t	match_action;
+} tcam_entry_t;
+
+
+#define		key_reg0		key.regs_e.reg0
+#define		key_reg1		key.regs_e.reg1
+#define		key_reg2		key.regs_e.reg2
+#define		key_reg3		key.regs_e.reg3
+#define		mask_reg0		mask.regs_e.reg0
+#define		mask_reg1		mask.regs_e.reg1
+#define		mask_reg2		mask.regs_e.reg2
+#define		mask_reg3		mask.regs_e.reg3
+
+
+#define		key0			key.regs_e.reg0
+#define		key1			key.regs_e.reg1
+#define		key2			key.regs_e.reg2
+#define		key3			key.regs_e.reg3
+#define		mask0			mask.regs_e.reg0
+#define		mask1			mask.regs_e.reg1
+#define		mask2			mask.regs_e.reg2
+#define		mask3			mask.regs_e.reg3
+
+
+#define		ip4_src_key		key.ipv4_e.ip_src
+#define		ip4_dest_key		key.ipv4_e.ip_dest
+#define		ip4_proto_key		key.ipv4_e.proto
+#define		ip4_port_key		key.ipv4_e.l4_port_spi
+#define		ip4_tos_key		key.ipv4_e.tos
+#define		ip4_noport_key		key.ipv4_e.noport
+#define		ip4_nrdc_key		key.ipv4_e.l2rdc_tbl_num
+#define		ip4_class_key		key.ipv4_e.cls_code
+
+#define		ip4_src_mask		mask.ipv4_e.ip_src
+#define		ip4_dest_mask		mask.ipv4_e.ip_dest
+#define		ip4_proto_mask		mask.ipv4_e.proto
+#define		ip4_port_mask		mask.ipv4_e.l4_port_spi
+#define		ip4_tos_mask		mask.ipv4_e.tos
+#define		ip4_nrdc_mask		mask.ipv4_e.l2rdc_tbl_num
+#define		ip4_noport_mask		mask.ipv4_e.noport
+#define		ip4_class_mask		mask.ipv4_e.cls_code
+
+
+#define		ip6_ip_addr_key		key.ipv6_e.ip_addr
+#define		ip6_port_key		key.ipv6_e.l4_port_spi
+#define		ip6_nxt_hdr_key		key.ipv6_e.nxt_hdr
+#define		ip6_tos_key		key.ipv6_e.tos
+#define		ip6_nrdc_key		key.ipv6_e.l2rdc_tbl_num
+#define		ip6_noport_key		key.ipv6_e.noport
+#define		ip6_class_key		key.ipv6_e.cls_code
+
+
+#define		ip6_ip_addr_mask	mask.ipv6_e.ip_addr
+#define		ip6_port_mask		mask.ipv6_e.l4_port_spi
+#define		ip6_nxt_hdr_mask	mask.ipv6_e.nxt_hdr
+#define		ip6_tos_mask		mask.ipv6_e.tos
+#define		ip6_nrdc_mask		mask.ipv6_e.l2rdc_tbl_num
+#define		ip6_noport_mask		mask.ipv6_e.noport
+#define		ip6_class_mask		mask.ipv6_e.cls_code
+
+#define		ether_class_key		key.ether_e.cls_code
+#define		ether_ethframe_key	key.ether_e.ethframe
+#define		ether_class_mask	mask.ether_e.cls_code
+#define		ether_ethframe_mask	mask.ether_e.ethframe
+
+
+/*
+ * flow template structure
+ * The flow header is passed through the hash function
+ * which generates the H1 (and the H2 ) hash value.
+ * Hash computation is started at the 22 zeros.
+ *
+ * Since this structure uses the ip address fields,
+ * /usr/include/netinet/in.h has to be included
+ * before this header file.
+ * Need to move these includes to impl files ...
+ */
+
+#if defined(SOLARIS) || defined(COSIM)
+#include <netinet/in.h>
+#endif
+
+typedef union flow_template {
+
+	struct {
+#if defined(_BIG_ENDIAN)
+		uint32_t l4_0:16;  /* src port */
+		uint32_t l4_1:16;  /* dest Port */
+
+		uint32_t pid:8;
+		uint32_t port:2;
+		uint32_t zeros:22; /* 0 */
+
+		union {
+			struct {
+				struct in6_addr daddr;
+				struct in6_addr saddr;
+			} ip6_addr;
+
+			struct  {
+				uint32_t rsrvd1;
+				struct in_addr daddr;
+				uint32_t rsrvd2[3];
+				struct in_addr saddr;
+				uint32_t rsrvd5[2];
+			} ip4_addr;
+		} ipaddr;
+
+		union {
+			uint64_t l2_info;
+			struct {
+				uint32_t vlan_valid : 4;
+				uint32_t l2da_1 : 28;
+				uint32_t l2da_0 : 20;
+				uint32_t vlanid : 12;
+
+			}l2_bits;
+		}l2;
+#else
+
+		uint32_t l4_1:16;  /* dest Port */
+		uint32_t l4_0:16;  /* src port */
+
+		uint32_t zeros:22; /* 0 */
+		uint32_t port:2;
+		uint32_t pid:8;
+
+		union {
+			struct {
+				struct in6_addr daddr;
+				struct in6_addr saddr;
+			} ip6_addr;
+
+			struct  {
+				uint32_t rsrvd1;
+				struct in_addr daddr;
+				uint32_t rsrvd2[3];
+				struct in_addr saddr;
+				uint32_t rsrvd5[2];
+			} ip4_addr;
+		} ipaddr;
+
+		union {
+			uint64_t l2_info;
+			struct {
+
+				uint32_t l2da_1 : 28;
+				uint32_t vlan_valid : 4;
+
+				uint32_t vlanid : 12;
+				uint32_t l2da_0 : 20;
+			}l2_bits;
+		}l2;
+#endif
+	} bits;
+
+} flow_template_t;
+
+
+
+#define	ip4_saddr bits.ipaddr.ip4_addr.saddr.s_addr
+#define	ip4_daddr bits.ipaddr.ip4_addr.daddr.s_addr
+
+#define	ip_src_port  bits.l4_0
+#define	ip_dst_port  bits.l4_1
+#define	ip_proto  bits.pid
+
+#define	ip6_saddr bits.ipaddr.ip6_addr.saddr
+#define	ip6_daddr bits.ipaddr.ip6_addr.daddr
+
+
+
+
+typedef struct _flow_key_cfg_t {
+    uint32_t rsrvd:23;
+    uint32_t use_portnum:1;
+    uint32_t use_l2da:1;
+    uint32_t use_vlan:1;
+    uint32_t use_saddr:1;
+    uint32_t use_daddr:1;
+    uint32_t use_sport:1;
+    uint32_t use_dport:1;
+    uint32_t use_proto:1;
+    uint32_t ip_opts_exist:1;
+} flow_key_cfg_t;
+
+
+typedef struct _tcam_key_cfg_t {
+    uint32_t rsrvd:28;
+    uint32_t use_ip_daddr:1;
+    uint32_t use_ip_saddr:1;
+    uint32_t lookup_enable:1;
+    uint32_t discard:1;
+} tcam_key_cfg_t;
+
+
+
+/*
+ * FCRAM Entry Formats
+ *
+ * ip6 and ip4 entries, the first 64 bits layouts are identical
+ * optimistic entry has only 64 bit layout
+ * The first three bits, fmt, ext and valid are the same
+ * accoross all the entries
+ */
+
+typedef union hash_optim {
+    uint64_t value;
+    struct _bits {
+#if defined(_BIG_ENDIAN)
+		uint32_t	fmt : 1;	/* 63  set to zero */
+		uint32_t	ext : 1;	/* 62  set to zero */
+		uint32_t	valid : 1;	/* 61 */
+		uint32_t	rdc_offset : 5;	/* 60 : 56 */
+		uint32_t	h2 : 16;	/* 55 : 40 */
+		uint32_t	rsrvd : 8;	/* 32 : 32 */
+		uint32_t	usr_info;	/* 31 : 0   */
+#else
+		uint32_t	usr_info;	/* 31 : 0   */
+		uint32_t	rsrvd : 8;	/* 39 : 32  */
+		uint32_t	h2 : 16;	/* 55 : 40  */
+		uint32_t	rdc_offset : 5;	/* 60 : 56  */
+		uint32_t	valid : 1;	/* 61 */
+		uint32_t	ext : 1;	/* 62  set to zero */
+		uint32_t	fmt : 1;	/* 63  set to zero */
+#endif
+	} bits;
+} hash_optim_t;
+
+
+typedef    union _hash_hdr {
+    uint64_t value;
+    struct _exact_hdr {
+#if defined(_BIG_ENDIAN)
+		uint32_t	fmt : 1;	/* 63  1 for ipv6, 0 for ipv4 */
+		uint32_t	ext : 1;	/* 62  set to 1 */
+		uint32_t	valid : 1;	/* 61 */
+		uint32_t	rsrvd : 1;	/* 60 */
+		uint32_t	l2da_1 : 28;	/* 59 : 32 */
+		uint32_t	l2da_0 : 20;	/* 31 : 12 */
+		uint32_t	vlan : 12;	/* 12 : 0   */
+#else
+		uint32_t	vlan : 12;	/* 12 : 0   */
+		uint32_t	l2da_0 : 20;	/* 31 : 12 */
+		uint32_t	l2da_1 : 28;	/* 59 : 32 */
+		uint32_t	rsrvd : 1;	/* 60 */
+		uint32_t	valid : 1;	/* 61 */
+		uint32_t	ext : 1;	/* 62  set to 1 */
+		uint32_t	fmt : 1;	/* 63  1 for ipv6, 0 for ipv4 */
+#endif
+	} exact_hdr;
+    hash_optim_t optim_hdr;
+} hash_hdr_t;
+
+
+
+typedef    union _hash_ports {
+    uint64_t value;
+    struct _ports_bits {
+#if defined(_BIG_ENDIAN)
+		uint32_t	ip_dport : 16;	/* 63 : 48 */
+		uint32_t	ip_sport : 16;	/* 47 : 32 */
+		uint32_t	proto : 8;	/* 31 : 24 */
+		uint32_t	port : 2;	/* 23 : 22 */
+		uint32_t	rsrvd : 22;	/* 21 : 0   */
+#else
+		uint32_t	rsrvd : 22;	/* 21 : 0   */
+		uint32_t	port : 2;	/* 23 : 22 */
+		uint32_t	proto : 8;	/* 31 : 24 */
+		uint32_t	ip_sport : 16;	/* 47 : 32 */
+		uint32_t	ip_dport : 16;	/* 63 : 48 */
+#endif
+	} ports_bits;
+} hash_ports_t;
+
+
+
+typedef    union _hash_match_action {
+    uint64_t value;
+    struct _action_bits {
+#if defined(_BIG_ENDIAN)
+		uint32_t	rsrvd2 : 3;	/* 63 : 61  */
+		uint32_t	rdc_offset : 5;	/* 60 : 56 */
+		uint32_t	zfvld : 1;	/* 55 */
+		uint32_t	rsrvd : 3;	/* 54 : 52   */
+		uint32_t	zfid : 12;	/* 51 : 40 */
+		uint32_t	_rsrvd : 8;	/* 39 : 32 */
+		uint32_t	usr_info;	/* 31 : 0   */
+#else
+		uint32_t	usr_info;	/* 31 : 0   */
+		uint32_t	_rsrvd : 8;	/* 39 : 32  */
+		uint32_t	zfid : 12;	/* 51 : 40 */
+		uint32_t	rsrvd : 3;	/* 54 : 52   */
+		uint32_t	zfvld : 1;	/* 55 */
+		uint32_t	rdc_offset : 5;	/* 60 : 56 */
+		uint32_t	rsrvd2 : 1;	/* 63 : 61  */
+#endif
+	} action_bits;
+} hash_match_action_t;
+
+
+typedef    struct _ipaddr6 {
+    struct in6_addr	 saddr;
+    struct in6_addr	 daddr;
+} ip6_addr_t;
+
+
+typedef    struct   _ipaddr4   {
+#if defined(_BIG_ENDIAN)
+    struct in_addr	saddr;
+    struct in_addr	daddr;
+#else
+    struct in_addr	daddr;
+    struct in_addr	saddr;
+#endif
+} ip4_addr_t;
+
+
+	/* ipv4 has 32 byte layout */
+
+typedef struct hash_ipv4 {
+    hash_hdr_t		 hdr;
+    ip4_addr_t		 ip_addr;
+    hash_ports_t	 proto_ports;
+    hash_match_action_t	 action;
+} hash_ipv4_t;
+
+
+	/* ipv4 has 56 byte layout */
+typedef struct hash_ipv6 {
+	hash_hdr_t	hdr;
+    ip6_addr_t		  ip_addr;
+    hash_ports_t	  proto_ports;
+    hash_match_action_t	  action;
+} hash_ipv6_t;
+
+
+
+typedef union fcram_entry {
+    uint64_t		  value[8];
+    hash_tbl_data_t	  dreg[8];
+    hash_ipv6_t		  ipv6_entry;
+    hash_ipv4_t		  ipv4_entry;
+    hash_optim_t	  optim_entry;
+} fcram_entry_t;
+
+
+
+#define	hash_hdr_fmt	ipv4_entry.hdr.exact_hdr.fmt
+#define	hash_hdr_ext	ipv4_entry.hdr.exact_hdr.ext
+#define	hash_hdr_valid	ipv4_entry.hdr.exact_hdr.valid
+
+#define	HASH_ENTRY_EXACT(fc)	\
+	(fc->ipv4_entry.hdr.exact_hdr.ext == 1)
+#define	HASH_ENTRY_OPTIM(fc)	\
+	((fc->ipv4_entry.hdr.exact_hdr.ext == 0) && \
+	(fc->ipv6_entry.hdr.exact_hdr.fmt == 0))
+#define	HASH_ENTRY_EXACT_IP6(fc) \
+	((fc->ipv6_entry.hdr.exact_hdr.fmt == 1) && \
+	(fc->ipv4_entry.hdr.exact_hdr.ext == 1))
+
+#define	HASH_ENTRY_EXACT_IP4(fc) \
+	((fc->ipv6_entry.hdr.exact_hdr.fmt == 0) && \
+	(fc->ipv4_entry.hdr.exact_hdr.ext == 1))
+
+#define	HASH_ENTRY_TYPE(fc)	\
+	(fc->ipv4_entry.hdr.exact_hdr.ext | \
+	(fc->ipv4_entry.hdr.exact_hdr.fmt << 1))
+
+
+
+typedef enum fcram_entry_format {
+	FCRAM_ENTRY_OPTIM = 0x0,
+	FCRAM_ENTRY_EX_IP4 = 0x2,
+	FCRAM_ENTRY_EX_IP6 = 0x3,
+	FCRAM_ENTRY_UNKOWN = 0x1
+} fcram_entry_format_t;
+
+
+#define		HASH_ENTRY_TYPE_OPTIM		FCRAM_ENTRY_OPTIM
+#define		HASH_ENTRY_TYPE_OPTIM_IP4	FCRAM_ENTRY_OPTIM
+#define		HASH_ENTRY_TYPE_OPTIM_IP4	FCRAM_ENTRY_OPTIM
+#define		HASH_ENTRY_TYPE_EX_IP4		FCRAM_ENTRY_EX_IP4
+#define		HASH_ENTRY_TYPE_EX_IP6		FCRAM_ENTRY_EX_IP6
+
+
+
+
+	/* error xxx formats */
+
+
+typedef struct _hash_lookup_err_log {
+    uint32_t rsrvd:28;
+    uint32_t lookup_err:1;
+    uint32_t ecc_err:1;
+    uint32_t uncor_err:1;
+    uint32_t multi_lkup:1;
+    uint32_t multi_bit:1;
+    uint32_t subarea:3;
+    uint32_t syndrome:8;
+    uint32_t h1:20;
+} hash_lookup_err_log_t, *p_hash_lookup_err_log_t;
+
+
+
+typedef struct _hash_pio_err_log {
+    uint32_t rsrvd:32;
+    uint32_t pio_err:1;
+    uint32_t syndrome:8;
+    uint32_t addr:23;
+} hash_pio_err_log_t, *p_hash_pio_err_log_t;
+
+
+
+typedef struct _tcam_err_log {
+    uint32_t rsrvd:2;
+    uint32_t tcam_err:1;
+    uint32_t parity_err:1;
+    uint32_t ecc_err:1;
+    uint32_t multi_lkup:1;
+    uint32_t location:8;
+    uint32_t syndrome:16;
+} tcam_err_log_t, *p_tcam_err_log_t;
+
+
+typedef struct _vlan_tbl_err_log {
+    uint32_t rsrvd:32;
+    uint32_t err:1;
+    uint32_t multi:1;
+    uint32_t addr:12;
+    uint32_t data:18;
+} vlan_tbl_err_log_t, *p_vlan_tbl_err_log_t;
+
+
+#define		NEPTUNE_TCAM_SIZE		0x100
+#define		NIU_TCAM_SIZE			0x80
+#define		FCRAM_SIZE			0x100000
+
+#ifdef	__cplusplus
+}
+#endif
+
+#endif	/* _SYS_NXGE_NXGE_FFLP_HW_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/uts/common/sys/nxge/nxge_flow.h	Mon Mar 19 19:37:22 2007 -0700
@@ -0,0 +1,186 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef	_SYS_NXGE_NXGE_FLOW_H
+#define	_SYS_NXGE_NXGE_FLOW_H
+
+#pragma ident	"%Z%%M%	%I%	%E% SMI"
+
+#ifdef	__cplusplus
+extern "C" {
+#endif
+
+#if defined(SOLARIS) && defined(_KERNEL)
+#include <netinet/in.h>
+#define	S6_addr32	_S6_un._S6_u32
+#endif
+
+typedef struct tcpip4_spec_s {
+	in_addr_t  ip4src;
+	in_addr_t  ip4dst;
+	in_port_t  psrc;
+	in_port_t  pdst;
+} tcpip4_spec_t;
+
+typedef struct tcpip6_spec_s {
+	struct in6_addr ip6src;
+	struct in6_addr ip6dst;
+	in_port_t  psrc;
+	in_port_t  pdst;
+} tcpip6_spec_t;
+
+typedef struct udpip4_spec_s {
+	in_addr_t  ip4src;
+	in_addr_t  ip4dst;
+	in_port_t  psrc;
+	in_port_t  pdst;
+} udpip4_spec_t;
+
+typedef struct udpip6_spec_s {
+	struct in6_addr ip6src;
+	struct in6_addr ip6dst;
+	in_port_t  psrc;
+	in_port_t  pdst;
+} udpip6_spec_t;
+
+typedef struct ahip4_spec_s {
+	in_addr_t  ip4src;
+	in_addr_t  ip4dst;
+	uint32_t   spi;
+} ahip4_spec_t;
+
+typedef struct ahip6_spec_s {
+	struct in6_addr ip6src;
+	struct in6_addr ip6dst;
+	uint32_t   spi;
+} ahip6_spec_t;
+
+typedef ahip4_spec_t espip4_spec_t;
+typedef ahip6_spec_t espip6_spec_t;
+
+typedef struct rawip4_spec_s {
+	struct in6_addr ip4src;
+	struct in6_addr ip4dst;
+	uint8_t    hdata[64];
+} rawip4_spec_t;
+
+typedef struct rawip6_spec_s {
+	struct in6_addr ip6src;
+	struct in6_addr ip6dst;
+	uint8_t    hdata[64];
+} rawip6_spec_t;
+
+
+typedef struct ether_spec_s {
+	uint16_t   ether_type;
+	uint8_t    frame_size;
+	uint8_t    eframe[16];
+} ether_spec_t;
+
+
+typedef struct ip_user_spec_s {
+	uint8_t    id;
+	uint8_t    ip_ver;
+	uint8_t    proto;
+	uint8_t    tos_mask;
+	uint8_t    tos;
+} ip_user_spec_t;
+
+
+
+typedef ether_spec_t arpip_spec_t;
+typedef ether_spec_t ether_user_spec_t;
+
+
+typedef struct flow_spec_s {
+	uint32_t  flow_type;
+	union {
+		tcpip4_spec_t tcpip4spec;
+		tcpip6_spec_t tcpip6spec;
+		udpip4_spec_t udpip4spec;
+		udpip6_spec_t udpip6spec;
+		arpip_spec_t  arpipspec;
+		ahip4_spec_t  ahip4spec;
+		ahip6_spec_t  ahip6spec;
+		espip4_spec_t espip4spec;
+		espip6_spec_t espip6spec;
+		rawip4_spec_t rawip4spec;
+		rawip6_spec_t rawip6spec;
+		ether_spec_t  etherspec;
+		ip_user_spec_t  ip_usr_spec;
+		uint8_t		hdata[64];
+	} uh, um; /* entry, mask */
+} flow_spec_t;
+
+#define	FSPEC_TCPIP4	0x1	/* TCP/IPv4 Flow */
+#define	FSPEC_TCPIP6	0x2	/* TCP/IPv6 */
+#define	FSPEC_UDPIP4	0x3	/* UDP/IPv4 */
+#define	FSPEC_UDPIP6	0x4	/* UDP/IPv6 */
+#define	FSPEC_ARPIP	0x5	/* ARP/IPv4 */
+#define	FSPEC_AHIP4	0x6	/* AH/IP4   */
+#define	FSPEC_AHIP6	0x7	/* AH/IP6   */
+#define	FSPEC_ESPIP4	0x8	/* ESP/IP4  */
+#define	FSPEC_ESPIP6	0x9	/* ESP/IP6  */
+#define	FSPEC_SCTPIP4	0xA	/* ESP/IP4  */
+#define	FSPEC_SCTPIP6	0xB	/* ESP/IP6  */
+#define	FSPEC_RAW4	0xC	/* RAW/IP4  */
+#define	FSPEC_RAW6	0xD	/* RAW/IP6  */
+#define	FSPEC_ETHER	0xE	/* ETHER Programmable  */
+#define	FSPEC_IP_USR	0xF	/* IP Programmable  */
+#define	FSPEC_HDATA	0x10	/* Pkt Headers eth-da,sa,etype,ip,tcp(Bitmap) */
+
+
+#define	TCAM_IPV6_ADDR(m32, ip6addr) {		\
+		m32[0] = ip6addr.S6_addr32[0]; \
+		m32[1] = ip6addr.S6_addr32[1]; \
+		m32[2] = ip6addr.S6_addr32[2]; \
+		m32[3] = ip6addr.S6_addr32[3]; \
+	}
+
+
+#define	TCAM_IPV4_ADDR(m32, ip4addr) (m32 = ip4addr)
+#define	TCAM_IP_PORTS(port32, dp, sp)	  (port32 = dp | (sp << 16))
+#define	TCAM_IP_CLASS(key, mask, class)	  {		\
+		key = class; \
+		mask = 0x1f; \
+	}
+
+#define	TCAM_IP_PROTO(key, mask, proto) {		\
+		key = proto; \
+		mask = 0xff; \
+	}
+
+
+typedef struct flow_resource_s {
+	uint64_t channel_cookie;
+	uint64_t flow_cookie;
+	flow_spec_t flow_spec;
+} flow_resource_t;
+
+#ifdef	__cplusplus
+}
+#endif
+
+#endif	/* _SYS_NXGE_NXGE_FLOW_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/uts/common/sys/nxge/nxge_fm.h	Mon Mar 19 19:37:22 2007 -0700
@@ -0,0 +1,249 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef	_SYS_NXGE_NXGE_FM_H
+#define	_SYS_NXGE_NXGE_FM_H
+
+#pragma ident	"%Z%%M%	%I%	%E% SMI"
+
+#ifdef	__cplusplus
+extern "C" {
+#endif
+
+#include <sys/ddi.h>
+
+#define	ERNAME_DETAILED_ERR_TYPE	"detailed error type"
+#define	ERNAME_ERR_PORTN		"port number"
+#define	ERNAME_ERR_DCHAN		"dma channel number"
+#define	ERNAME_TCAM_ERR_LOG		"tcam error log"
+#define	ERNAME_VLANTAB_ERR_LOG		"vlan table error log"
+#define	ERNAME_HASHTAB_ERR_LOG		"hash table error log"
+#define	ERNAME_HASHT_LOOKUP_ERR_LOG0	"hash table lookup error log0"
+#define	ERNAME_HASHT_LOOKUP_ERR_LOG1	"hash table lookup error log1"
+#define	ERNAME_RDMC_PAR_ERR_LOG		"rdmc parity error log"
+#define	ERNAME_DFIFO_RD_PTR		"dfifo read pointer"
+#define	ERNAME_IPP_STATE_MACH		"ipp state machine"
+#define	ERNAME_DFIFO_ENTRY		"dfifo entry"
+#define	ERNAME_DFIFO_SYNDROME		"dfifo syndrome"
+#define	ERNAME_PFIFO_ENTRY		"pfifo entry"
+#define	ERNAME_ZCP_STATE_MACH		"zcp state machine"
+#define	ERNAME_CFIFO_PORT_NUM		"cfifo port number"
+#define	ERNAME_RDC_ERR_TYPE		"completion error type"
+#define	ERNAME_TDMC_ERR_LOG0		"tdmc error log0"
+#define	ERNAME_TDMC_ERR_LOG1		"tdmc error log1"
+#define	ERNAME_TXC_ROECC_ADDR		"txc reorder FIFO ECC error address"
+#define	ERNAME_TXC_ROECC_DATA0		"txc reorder FIFO data0"
+#define	ERNAME_TXC_ROECC_DATA1		"txc reorder FIFO data1"
+#define	ERNAME_TXC_ROECC_DATA2		"txc reorder FIFO data2"
+#define	ERNAME_TXC_ROECC_DATA3		"txc reorder FIFO data3"
+#define	ERNAME_TXC_ROECC_DATA4		"txc reorder FIFO data4"
+#define	ERNAME_TXC_RO_STATE0		"txc reorder FIFO error state0" \
+					"(duplicate TID)"
+#define	ERNAME_TXC_RO_STATE1		"txc reorder FIFO error state1" \
+					"(uninitialized TID)"
+#define	ERNAME_TXC_RO_STATE2		"txc reorder FIFO error state2" \
+					"(timed out TIDs)"
+#define	ERNAME_TXC_RO_STATE3		"txc reorder FIFO error state3"
+#define	ERNAME_TXC_RO_STATE_CTL		"txc reorder FIFO error control"
+#define	ERNAME_TXC_RO_TIDS		"txc reorder tids"
+#define	ERNAME_TXC_SFECC_ADDR		"txc store forward FIFO ECC error "\
+					"address"
+#define	ERNAME_TXC_SFECC_DATA0		"txc store forward FIFO data0"
+#define	ERNAME_TXC_SFECC_DATA1		"txc store forward FIFO data1"
+#define	ERNAME_TXC_SFECC_DATA2		"txc store forward FIFO data2"
+#define	ERNAME_TXC_SFECC_DATA3		"txc store forward FIFO data3"
+#define	ERNAME_TXC_SFECC_DATA4		"txc store forward FIFO data4"
+
+#define	EREPORT_FM_ID_SHIFT		16
+#define	EREPORT_FM_ID_MASK		0xFF
+#define	EREPORT_INDEX_MASK		0xFF
+#define	NXGE_FM_EREPORT_UNKNOWN		0
+
+#define	FM_SW_ID			0xFF
+#define	FM_PCS_ID			MAC_BLK_ID
+#define	FM_TXMAC_ID			TXMAC_BLK_ID
+#define	FM_RXMAC_ID			RXMAC_BLK_ID
+#define	FM_MIF_ID			MIF_BLK_ID
+#define	FM_IPP_ID			IPP_BLK_ID
+#define	FM_TXC_ID			TXC_BLK_ID
+#define	FM_TXDMA_ID			TXDMA_BLK_ID
+#define	FM_RXDMA_ID			RXDMA_BLK_ID
+#define	FM_ZCP_ID			ZCP_BLK_ID
+#define	FM_ESPC_ID			ESPC_BLK_ID
+#define	FM_FFLP_ID			FFLP_BLK_ID
+#define	FM_PCIE_ID			PCIE_BLK_ID
+#define	FM_ETHER_SERDES_ID		ETHER_SERDES_BLK_ID
+#define	FM_PCIE_SERDES_ID		PCIE_SERDES_BLK_ID
+#define	FM_VIR_ID			VIR_BLK_ID
+
+typedef	uint32_t nxge_fm_ereport_id_t;
+
+typedef	struct _nxge_fm_ereport_attr {
+	uint32_t		index;
+	char			*str;
+	char			*eclass;
+	ddi_fault_impact_t	impact;
+} nxge_fm_ereport_attr_t;
+
+/* General MAC ereports */
+typedef	enum {
+	NXGE_FM_EREPORT_XPCS_LINK_DOWN = (FM_PCS_ID << EREPORT_FM_ID_SHIFT),
+	NXGE_FM_EREPORT_XPCS_TX_LINK_FAULT,
+	NXGE_FM_EREPORT_XPCS_RX_LINK_FAULT,
+	NXGE_FM_EREPORT_PCS_LINK_DOWN,
+	NXGE_FM_EREPORT_PCS_REMOTE_FAULT
+} nxge_fm_ereport_pcs_t;
+
+/* MIF ereports */
+typedef	enum {
+	NXGE_FM_EREPORT_MIF_ACCESS_FAIL = (FM_MIF_ID << EREPORT_FM_ID_SHIFT)
+} nxge_fm_ereport_mif_t;
+
+/* FFLP ereports */
+typedef	enum {
+	NXGE_FM_EREPORT_FFLP_TCAM_ERR = (FM_FFLP_ID << EREPORT_FM_ID_SHIFT),
+	NXGE_FM_EREPORT_FFLP_VLAN_PAR_ERR,
+	NXGE_FM_EREPORT_FFLP_HASHT_DATA_ERR,
+	NXGE_FM_EREPORT_FFLP_HASHT_LOOKUP_ERR,
+	NXGE_FM_EREPORT_FFLP_ACCESS_FAIL
+} nxge_fm_ereport_fflp_t;
+
+/* IPP ereports */
+typedef	enum {
+	NXGE_FM_EREPORT_IPP_EOP_MISS = (FM_IPP_ID << EREPORT_FM_ID_SHIFT),
+	NXGE_FM_EREPORT_IPP_SOP_MISS,
+	NXGE_FM_EREPORT_IPP_DFIFO_UE,
+	NXGE_FM_EREPORT_IPP_DFIFO_CE,
+	NXGE_FM_EREPORT_IPP_PFIFO_PERR,
+	NXGE_FM_EREPORT_IPP_ECC_ERR_MAX,
+	NXGE_FM_EREPORT_IPP_PFIFO_OVER,
+	NXGE_FM_EREPORT_IPP_PFIFO_UND,
+	NXGE_FM_EREPORT_IPP_BAD_CS_MX,
+	NXGE_FM_EREPORT_IPP_PKT_DIS_MX,
+	NXGE_FM_EREPORT_IPP_RESET_FAIL
+} nxge_fm_ereport_ipp_t;
+
+/* RDMC ereports */
+typedef	enum {
+	NXGE_FM_EREPORT_RDMC_DCF_ERR = (FM_RXDMA_ID << EREPORT_FM_ID_SHIFT),
+	NXGE_FM_EREPORT_RDMC_RCR_ACK_ERR,
+	NXGE_FM_EREPORT_RDMC_DC_FIFO_ERR,
+	NXGE_FM_EREPORT_RDMC_RCR_SHA_PAR,
+	NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR,
+	NXGE_FM_EREPORT_RDMC_RBR_TMOUT,
+	NXGE_FM_EREPORT_RDMC_RSP_CNT_ERR,
+	NXGE_FM_EREPORT_RDMC_BYTE_EN_BUS,
+	NXGE_FM_EREPORT_RDMC_RSP_DAT_ERR,
+	NXGE_FM_EREPORT_RDMC_ID_MISMATCH,
+	NXGE_FM_EREPORT_RDMC_ZCP_EOP_ERR,
+	NXGE_FM_EREPORT_RDMC_IPP_EOP_ERR,
+	NXGE_FM_EREPORT_RDMC_COMPLETION_ERR,
+	NXGE_FM_EREPORT_RDMC_CONFIG_ERR,
+	NXGE_FM_EREPORT_RDMC_RCRINCON,
+	NXGE_FM_EREPORT_RDMC_RCRFULL,
+	NXGE_FM_EREPORT_RDMC_RBRFULL,
+	NXGE_FM_EREPORT_RDMC_RBRLOGPAGE,
+	NXGE_FM_EREPORT_RDMC_CFIGLOGPAGE
+} nxge_fm_ereport_rdmc_t;
+
+/* ZCP ereports */
+typedef	enum {
+	NXGE_FM_EREPORT_ZCP_RRFIFO_UNDERRUN =
+					(FM_ZCP_ID << EREPORT_FM_ID_SHIFT),
+	NXGE_FM_EREPORT_ZCP_RSPFIFO_UNCORR_ERR,
+	NXGE_FM_EREPORT_ZCP_STAT_TBL_PERR,
+	NXGE_FM_EREPORT_ZCP_DYN_TBL_PERR,
+	NXGE_FM_EREPORT_ZCP_BUF_TBL_PERR,
+	NXGE_FM_EREPORT_ZCP_CFIFO_ECC,
+	NXGE_FM_EREPORT_ZCP_RRFIFO_OVERRUN,
+	NXGE_FM_EREPORT_ZCP_BUFFER_OVERFLOW,
+	NXGE_FM_EREPORT_ZCP_TT_PROGRAM_ERR,
+	NXGE_FM_EREPORT_ZCP_RSP_TT_INDEX_ERR,
+	NXGE_FM_EREPORT_ZCP_SLV_TT_INDEX_ERR,
+	NXGE_FM_EREPORT_ZCP_TT_INDEX_ERR,
+	NXGE_FM_EREPORT_ZCP_ACCESS_FAIL
+} nxge_fm_ereport_zcp_t;
+
+typedef enum {
+	NXGE_FM_EREPORT_RXMAC_UNDERFLOW = (FM_RXMAC_ID << EREPORT_FM_ID_SHIFT),
+	NXGE_FM_EREPORT_RXMAC_CRC_ERRCNT_EXP,
+	NXGE_FM_EREPORT_RXMAC_LENGTH_ERRCNT_EXP,
+	NXGE_FM_EREPORT_RXMAC_VIOL_ERRCNT_EXP,
+	NXGE_FM_EREPORT_RXMAC_RXFRAG_CNT_EXP,
+	NXGE_FM_EREPORT_RXMAC_ALIGN_ECNT_EXP,
+	NXGE_FM_EREPORT_RXMAC_LINKFAULT_CNT_EXP,
+	NXGE_FM_EREPORT_RXMAC_RESET_FAIL
+} nxge_fm_ereport_rxmac_t;
+
+typedef	enum {
+	NXGE_FM_EREPORT_TDMC_PREF_BUF_PAR_ERR =
+				(FM_TXDMA_ID << EREPORT_FM_ID_SHIFT),
+	NXGE_FM_EREPORT_TDMC_MBOX_ERR,
+	NXGE_FM_EREPORT_TDMC_NACK_PREF,
+	NXGE_FM_EREPORT_TDMC_NACK_PKT_RD,
+	NXGE_FM_EREPORT_TDMC_PKT_SIZE_ERR,
+	NXGE_FM_EREPORT_TDMC_TX_RING_OFLOW,
+	NXGE_FM_EREPORT_TDMC_CONF_PART_ERR,
+	NXGE_FM_EREPORT_TDMC_PKT_PRT_ERR,
+	NXGE_FM_EREPORT_TDMC_RESET_FAIL
+} nxge_fm_ereport_attr_tdmc_t;
+
+typedef	enum {
+	NXGE_FM_EREPORT_TXC_RO_CORRECT_ERR =
+				(FM_TXC_ID << EREPORT_FM_ID_SHIFT),
+	NXGE_FM_EREPORT_TXC_RO_UNCORRECT_ERR,
+	NXGE_FM_EREPORT_TXC_SF_CORRECT_ERR,
+	NXGE_FM_EREPORT_TXC_SF_UNCORRECT_ERR,
+	NXGE_FM_EREPORT_TXC_ASSY_DEAD,
+	NXGE_FM_EREPORT_TXC_REORDER_ERR
+} nxge_fm_ereport_attr_txc_t;
+
+typedef	enum {
+	NXGE_FM_EREPORT_TXMAC_UNDERFLOW =
+				(FM_TXMAC_ID << EREPORT_FM_ID_SHIFT),
+	NXGE_FM_EREPORT_TXMAC_OVERFLOW,
+	NXGE_FM_EREPORT_TXMAC_TXFIFO_XFR_ERR,
+	NXGE_FM_EREPORT_TXMAC_MAX_PKT_ERR,
+	NXGE_FM_EREPORT_TXMAC_RESET_FAIL
+} nxge_fm_ereport_attr_txmac_t;
+
+typedef	enum {
+	NXGE_FM_EREPORT_ESPC_ACCESS_FAIL = (FM_ESPC_ID << EREPORT_FM_ID_SHIFT)
+} nxge_fm_ereport_espc_t;
+
+typedef	enum {
+	NXGE_FM_EREPORT_SW_INVALID_PORT_NUM = (FM_SW_ID << EREPORT_FM_ID_SHIFT),
+	NXGE_FM_EREPORT_SW_INVALID_CHAN_NUM,
+	NXGE_FM_EREPORT_SW_INVALID_PARAM
+} nxge_fm_ereport_sw_t;
+
+#define	NXGE_FM_EREPORT_UNKNOWN			0
+#define	NXGE_FM_EREPORT_UNKNOWN_NAME		""
+
+#ifdef	__cplusplus
+}
+#endif
+
+#endif	/* _SYS_NXGE_NXGE_FM_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/uts/common/sys/nxge/nxge_fzc.h	Mon Mar 19 19:37:22 2007 -0700
@@ -0,0 +1,93 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef	_SYS_NXGE_NXGE_FZC_H
+#define	_SYS_NXGE_NXGE_FZC_H
+
+#pragma ident	"%Z%%M%	%I%	%E% SMI"
+
+#ifdef	__cplusplus
+extern "C" {
+#endif
+
+#include <npi_vir.h>
+
+nxge_status_t nxge_fzc_intr_init(p_nxge_t);
+nxge_status_t nxge_fzc_intr_ldg_num_set(p_nxge_t);
+nxge_status_t nxge_fzc_intr_tmres_set(p_nxge_t);
+nxge_status_t nxge_fzc_intr_sid_set(p_nxge_t);
+
+nxge_status_t nxge_fzc_dmc_rx_log_page_vld(p_nxge_t, uint16_t,
+	uint32_t, boolean_t);
+nxge_status_t nxge_fzc_dmc_rx_log_page_mask(p_nxge_t, uint16_t,
+	uint32_t, uint32_t, uint32_t);
+
+void nxge_init_fzc_txdma_channels(p_nxge_t);
+
+nxge_status_t nxge_init_fzc_txdma_channel(p_nxge_t, uint16_t,
+	p_tx_ring_t, p_tx_mbox_t);
+nxge_status_t nxge_init_fzc_txdma_port(p_nxge_t);
+
+nxge_status_t nxge_init_fzc_rxdma_channel(p_nxge_t, uint16_t,
+	p_rx_rbr_ring_t, p_rx_rcr_ring_t, p_rx_mbox_t);
+
+nxge_status_t nxge_init_fzc_rdc_tbl(p_nxge_t);
+nxge_status_t nxge_init_fzc_rx_common(p_nxge_t);
+nxge_status_t nxge_init_fzc_rxdma_port(p_nxge_t);
+
+nxge_status_t nxge_init_fzc_rxdma_channel_pages(p_nxge_t,
+	uint16_t, p_rx_rbr_ring_t);
+
+nxge_status_t nxge_init_fzc_rxdma_channel_red(p_nxge_t,
+	uint16_t, p_rx_rcr_ring_t);
+
+nxge_status_t nxge_init_fzc_rxdma_channel_clrlog(p_nxge_t,
+	uint16_t, p_rx_rbr_ring_t);
+
+nxge_status_t nxge_init_fzc_txdma_channel_pages(p_nxge_t,
+	uint16_t, p_tx_ring_t);
+
+nxge_status_t nxge_init_fzc_txdma_channel_drr(p_nxge_t, uint16_t,
+	p_tx_ring_t);
+
+nxge_status_t nxge_init_fzc_txdma_port(p_nxge_t);
+
+void nxge_init_fzc_ldg_num(p_nxge_t);
+void nxge_init_fzc_sys_int_data(p_nxge_t);
+void nxge_init_fzc_ldg_int_timer(p_nxge_t);
+nxge_status_t nxge_fzc_sys_err_mask_set(p_nxge_t, uint64_t);
+
+#if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
+nxge_status_t nxge_init_hv_fzc_rxdma_channel_pages(p_nxge_t,
+	uint16_t, p_rx_rbr_ring_t);
+nxge_status_t nxge_init_hv_fzc_txdma_channel_pages(p_nxge_t,
+	uint16_t, p_tx_ring_t);
+#endif
+
+#ifdef	__cplusplus
+}
+#endif
+
+#endif	/* _SYS_NXGE_NXGE_FZC_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/uts/common/sys/nxge/nxge_hw.h	Mon Mar 19 19:37:22 2007 -0700
@@ -0,0 +1,1057 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef	_SYS_NXGE_NXGE_HW_H
+#define	_SYS_NXGE_NXGE_HW_H
+
+#pragma ident	"%Z%%M%	%I%	%E% SMI"
+
+#ifdef	__cplusplus
+extern "C" {
+#endif
+
+#if	!defined(_BIG_ENDIAN) && !defined(_LITTLE_ENDIAN) && \
+		!defined(__BIG_ENDIAN) && !defined(__LITTLE_ENDIAN)
+#error	Host endianness not defined
+#endif
+
+#if	!defined(_BIT_FIELDS_HTOL) && !defined(_BIT_FIELDS_LTOH) && \
+		!defined(__BIT_FIELDS_HTOL) && !defined(__BIT_FIELDS_LTOH)
+#error	Bit ordering not defined
+#endif
+
+#include <nxge_fflp_hw.h>
+#include <nxge_ipp_hw.h>
+#include <nxge_mac_hw.h>
+#include <nxge_rxdma_hw.h>
+#include <nxge_txc_hw.h>
+#include <nxge_txdma_hw.h>
+#include <nxge_zcp_hw.h>
+#include <nxge_espc_hw.h>
+#include <nxge_n2_esr_hw.h>
+#include <nxge_sr_hw.h>
+#include <nxge_phy_hw.h>
+
+
+/* Modes of NXGE core */
+typedef	enum nxge_mode_e {
+	NXGE_MODE_NE		= 1,
+	NXGE_MODE_N2		= 2
+} nxge_mode_t;
+
+/*
+ * Function control Register
+ * (bit 31 is reset to 0. Read back 0 then free to use it.
+ * (once done with it, bit 0:15 can be used to store SW status)
+ */
+#define	DEV_FUNC_SR_REG			(PIO + 0x10000)
+#define	DEV_FUNC_SR_SR_SHIFT		0
+#define	DEV_FUNC_SR_SR_MASK		0x000000000000FFFFULL
+#define	DEV_FUNC_SR_FUNCID_SHIFT	16
+#define	DEV_FUNC_SR_FUNCID_MASK		0x0000000000030000ULL
+#define	DEV_FUNC_SR_TAS_SHIFT		31
+#define	DEV_FUNC_SR_TAS_MASK		0x0000000080000000ULL
+
+typedef union _dev_func_sr_t {
+	uint64_t value;
+	struct {
+#if defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t tas:1;
+			uint32_t res2:13;
+			uint32_t funcid:2;
+			uint32_t sr:16;
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t sr:16;
+			uint32_t funcid:2;
+			uint32_t res2:13;
+			uint32_t tas:1;
+#endif
+		} ldw;
+#if !defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+	} bits;
+} dev_func_sr_t, *p_dev_func_sr_t;
+
+
+/*
+ * Multi Parition Control Register (partitiion manager)
+ */
+#define	MULTI_PART_CTL_REG	(FZC_PIO + 0x00000)
+#define	MULTI_PART_CTL_MPC	0x0000000000000001ULL
+
+typedef union _multi_part_ctl_t {
+	uint64_t value;
+	struct {
+#if defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t res1:31;
+			uint32_t mpc:1;
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t mpc:1;
+			uint32_t res1:31;
+#endif
+		} ldw;
+#if !defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+	} bits;
+} multi_part_ctl_t, *p_multi_part_ctl_t;
+
+/*
+ * Virtual DMA CSR Address (partition manager)
+ */
+#define	VADDR_REG		(PIO_VADDR + 0x00000)
+
+/*
+ * DMA Channel Binding Register (partition manager)
+ */
+#define	DMA_BIND_REG		(FZC_PIO + 0x10000)
+#define	DMA_BIND_RX_SHIFT	0
+#define	DMA_BIND_RX_MASK	0x000000000000001FULL
+#define	DMA_BIND_RX_BIND_SHIFT	5
+#define	DMA_BIND_RX_BIND_SET	0x0000000000000020ULL
+#define	DMA_BIND_RX_BIND_MASK	0x0000000000000020ULL
+#define	DMA_BIND_TX_SHIFT	8
+#define	DMA_BIND_TX_MASK	0x0000000000001f00ULL
+#define	DMA_BIND_TX_BIND_SHIFT	13
+#define	DMA_BIND_TX_BIND_SET	0x0000000000002000ULL
+#define	DMA_BIND_TX_BIND_MASK	0x0000000000002000ULL
+
+typedef union _dma_bind_t {
+	uint64_t value;
+	struct {
+#if defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t res1_1:16;
+			uint32_t tx_bind:1;
+			uint32_t tx:5;
+			uint32_t res2:2;
+			uint32_t rx_bind:1;
+			uint32_t rx:5;
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t rx:5;
+			uint32_t rx_bind:1;
+			uint32_t res2:2;
+			uint32_t tx:5;
+			uint32_t tx_bind:1;
+			uint32_t res1_1:16;
+#endif
+		} ldw;
+#if !defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+	} bits;
+}  dma_bind_t, *p_dma_bind_t;
+
+/*
+ * System interrupts:
+ *	Logical device and group definitions.
+ */
+#define	NXGE_INT_MAX_LDS		69
+#define	NXGE_INT_MAX_LDGS		64
+#define	NXGE_LDGRP_PER_NIU_PORT		(NXGE_INT_MAX_LDGS/2)
+#define	NXGE_LDGRP_PER_NEP_PORT		(NXGE_INT_MAX_LDGS/4)
+#define	NXGE_LDGRP_PER_2PORTS		(NXGE_INT_MAX_LDGS/2)
+#define	NXGE_LDGRP_PER_4PORTS		(NXGE_INT_MAX_LDGS/4)
+
+#define	NXGE_RDMA_LD_START		0
+#define	NXGE_TDMA_LD_START		32
+#define	NXGE_MIF_LD			63
+#define	NXGE_MAC_LD_START		64
+#define	NXGE_MAC_LD_PORT0		64
+#define	NXGE_MAC_LD_PORT1		65
+#define	NXGE_MAC_LD_PORT2		66
+#define	NXGE_MAC_LD_PORT3		67
+#define	NXGE_SYS_ERROR_LD		68
+
+/*
+ * Logical Device Group Number
+ */
+#define	LDG_NUM_REG		(FZC_PIO + 0x20000)
+#define	LDG_NUM_NUM_SHIFT	0
+#define	LDG_NUM_NUM_MASK	0x000000000000001FULL
+
+typedef union _ldg_num_t {
+	uint64_t value;
+	struct {
+#if defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t res1_1:26;
+			uint32_t num:6;
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t num:6;
+			uint32_t res1_1:26;
+#endif
+		} ldw;
+#if !defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+	} bits;
+} ldg_num_t, *p_ldg_num_t;
+
+/*
+ * Logical Device State Vector
+ */
+#define	LDSV0_REG		(PIO_LDSV + 0x00000)
+#define	LDSV0_LDF_SHIFT		0
+#define	LDSV0_LDF_MASK		0x00000000000003FFULL
+#define	LDG_NUM_NUM_MASK	0x000000000000001FULL
+#define	LDSV_MASK_ALL		0x0000000000000001ULL
+
+/*
+ * Logical Device State Vector 1
+ */
+#define	LDSV1_REG		(PIO_LDSV + 0x00008)
+
+/*
+ * Logical Device State Vector 2
+ */
+#define	LDSV2_REG		(PIO_LDSV + 0x00010)
+
+/* For Logical Device State Vector 0 and 1 */
+typedef union _ldsv_t {
+	uint64_t value;
+	struct {
+#if defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+		uint32_t ldw;
+#if !defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+	} bits;
+} ldsv_t, *p_ldsv_t;
+
+#define	LDSV2_LDF0_SHIFT		0
+#define	LDSV2_LDF0_MASK			0x000000000000001FULL
+#define	LDSV2_LDF1_SHIFT		5
+#define	LDSV2_LDF1_MASK			0x00000000000001E0ULL
+
+typedef union _ldsv2_t {
+	uint64_t value;
+	struct {
+#if defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t res1_1:22;
+			uint32_t ldf1:5;
+			uint32_t ldf0:5;
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t ldf0:5;
+			uint32_t ldf1:5;
+			uint32_t res1_1:22;
+#endif
+		} ldw;
+#if !defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+	} bits;
+} ldsv2_t, *p_ldsv2_t;
+
+/*
+ * Logical Device Interrupt Mask 0
+ */
+#define	LD_IM0_REG		(PIO_IMASK0 + 0x00000)
+#define	LD_IM0_SHIFT		0
+#define	LD_IM0_MASK		0x0000000000000003ULL
+#define	LD_IM_MASK		0x0000000000000003ULL
+
+/*
+ * Logical Device Interrupt Mask 1
+ */
+#define	LD_IM1_REG		(PIO_IMASK1 + 0x00000)
+#define	LD_IM1_SHIFT		0
+#define	LD_IM1_MASK		0x0000000000000003ULL
+
+/* For Lofical Device Interrupt Mask 0 and 1 */
+typedef union _ld_im_t {
+	uint64_t value;
+	struct {
+#if defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+		struct {
+
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t res1_1:30;
+			uint32_t ldf_mask:2;
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t ldf_mask:2;
+			uint32_t res1_1:30;
+#endif
+		} ldw;
+#if !defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+	} bits;
+} ld_im_t, *p_ld_im_t;
+
+/*
+ * Logical Device Group Interrupt Management
+ */
+#define	LDGIMGN_REG		(PIO_LDSV + 0x00018)
+#define	LDGIMGN_TIMER_SHIFT	0
+#define	LDGIMGM_TIMER_MASK	0x000000000000003FULL
+#define	LDGIMGN_ARM_SHIFT	31
+#define	LDGIMGM_ARM		0x0000000080000000ULL
+#define	LDGIMGM_ARM_MASK	0x0000000080000000ULL
+
+typedef union _ldgimgm_t {
+	uint64_t value;
+	struct {
+#if defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+		uint32_t arm:1;
+		uint32_t res2:25;
+		uint32_t timer:6;
+#elif defined(_BIT_FIELDS_LTOH)
+		uint32_t timer:6;
+		uint32_t res2:25;
+		uint32_t arm:1;
+#endif
+		} ldw;
+#if !defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+	} bits;
+} ldgimgm_t, *p_ldgimgm_t;
+
+/*
+ * Logical Device Group Interrupt Timer Resolution
+ */
+#define	LDGITMRES_REG		(FZC_PIO + 0x00008)
+#define	LDGTITMRES_RES_SHIFT	0			/* bits 19:0 */
+#define	LDGTITMRES_RES_MASK	0x00000000000FFFFFULL
+typedef union _ldgitmres_t {
+	uint64_t value;
+	struct {
+#if defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+		uint32_t res1_1:12;
+		uint32_t res:20;
+#elif defined(_BIT_FIELDS_LTOH)
+		uint32_t res:20;
+		uint32_t res1_1:12;
+#endif
+		} ldw;
+#if !defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+	} bits;
+} ldgitmres_t, *p_ldgitmres_t;
+
+/*
+ * System Interrupt Data
+ */
+#define	SID_REG			(FZC_PIO + 0x10200)
+#define	SID_DATA_SHIFT		0			/* bits 6:0 */
+#define	SID_DATA_MASK		0x000000000000007FULL
+#define	SID_DATA_INTNUM_SHIFT	0			/* bits 4:0 */
+#define	SID_DATA_INTNUM_MASK	0x000000000000001FULL
+#define	SID_DATA_FUNCNUM_SHIFT	5			/* bits 6:5 */
+#define	SID_DATA_FUNCNUM_MASK	0x0000000000000060ULL
+#define	SID_PCI_FUNCTION_SHIFT	(1 << 5)
+#define	SID_N2_INDEX		(1 << 6)
+
+#define	SID_DATA(f, v)		((f << SID_DATA_FUNCNUM_SHIFT) |	\
+				((v << SID_DATA_SHIFT) & SID_DATA_INTNUM_MASK))
+
+#define	SID_DATA_N2(v)		(v | SID_N2_INDEX)
+
+typedef union _sid_t {
+	uint64_t value;
+	struct {
+#if defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+		uint32_t res1_1:25;
+		uint32_t data:7;
+#elif defined(_BIT_FIELDS_LTOH)
+		uint32_t data:7;
+		uint32_t res1_1:25;
+#endif
+		} ldw;
+#if !defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+	} bits;
+} sid_t, *p_sid_t;
+
+/*
+ * Reset Control
+ */
+#define	RST_CTL_REG		(FZC_PIO + 0x00038)
+#define	RST_CTL_MAC_RST3	0x0000000000400000ULL
+#define	RST_CTL_MAC_RST3_SHIFT	22
+#define	RST_CTL_MAC_RST2	0x0000000000200000ULL
+#define	RST_CTL_MAC_RST2_SHIFT	21
+#define	RST_CTL_MAC_RST1	0x0000000000100000ULL
+#define	RST_CTL_MAC_RST1_SHIFT	20
+#define	RST_CTL_MAC_RST0	0x0000000000080000ULL
+#define	RST_CTL_MAC_RST0_SHIFT	19
+#define	RST_CTL_EN_ACK_TO	0x0000000000000800ULL
+#define	RST_CTL_EN_ACK_TO_SHIFT	11
+#define	RST_CTL_ACK_TO_MASK	0x00000000000007FEULL
+#define	RST_CTL_ACK_TO_SHIFT	1
+
+
+typedef union _rst_ctl_t {
+	uint64_t value;
+	struct {
+#if defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+		uint32_t res1:9;
+		uint32_t mac_rst3:1;
+		uint32_t mac_rst2:1;
+		uint32_t mac_rst1:1;
+		uint32_t mac_rst0:1;
+		uint32_t res2:7;
+		uint32_t ack_to_en:1;
+		uint32_t ack_to_val:10;
+		uint32_t res3:1;
+#elif defined(_BIT_FIELDS_LTOH)
+		uint32_t res3:1;
+		uint32_t ack_to_val:10;
+		uint32_t ack_to_en:1;
+		uint32_t res2:7;
+		uint32_t mac_rst0:1;
+		uint32_t mac_rst1:1;
+		uint32_t mac_rst2:1;
+		uint32_t mac_rst3:1;
+		uint32_t res1:9;
+#endif
+		} ldw;
+#if !defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+	} bits;
+} rst_ctl_t, *p_rst_ctl_t;
+
+/*
+ * System Error Mask
+ */
+#define	SYS_ERR_MASK_REG	(FZC_PIO + 0x00090)
+
+/*
+ * System Error Status
+ */
+#define	SYS_ERR_STAT_REG	(FZC_PIO + 0x00098)
+
+
+#define	SYS_ERR_META2_MASK	0x0000000000000400ULL
+#define	SYS_ERR_META2_SHIFT	10
+#define	SYS_ERR_META1_MASK	0x0000000000000200ULL
+#define	SYS_ERR_META1_SHIFT	9
+#define	SYS_ERR_PEU_MASK	0x0000000000000100ULL
+#define	SYS_ERR_PEU_SHIFT	8
+#define	SYS_ERR_TXC_MASK	0x0000000000000080ULL
+#define	SYS_ERR_TXC_SHIFT	7
+#define	SYS_ERR_RDMC_MASK	0x0000000000000040ULL
+#define	SYS_ERR_RDMC_SHIFT	6
+#define	SYS_ERR_TDMC_MASK	0x0000000000000020ULL
+#define	SYS_ERR_TDMC_SHIFT	5
+#define	SYS_ERR_ZCP_MASK	0x0000000000000010ULL
+#define	SYS_ERR_ZCP_SHIFT	4
+#define	SYS_ERR_FFLP_MASK	0x0000000000000008ULL
+#define	SYS_ERR_FFLP_SHIFT	3
+#define	SYS_ERR_IPP_MASK	0x0000000000000004ULL
+#define	SYS_ERR_IPP_SHIFT	2
+#define	SYS_ERR_MAC_MASK	0x0000000000000002ULL
+#define	SYS_ERR_MAC_SHIFT	1
+#define	SYS_ERR_SMX_MASK	0x0000000000000001ULL
+#define	SYS_ERR_SMX_SHIFT	0
+#define	SYS_ERR_MASK_ALL	(SYS_ERR_SMX_MASK | SYS_ERR_MAC_MASK | \
+				SYS_ERR_IPP_MASK | SYS_ERR_FFLP_MASK | \
+				SYS_ERR_ZCP_MASK | SYS_ERR_TDMC_MASK | \
+				SYS_ERR_RDMC_MASK | SYS_ERR_TXC_MASK | \
+				SYS_ERR_PEU_MASK | SYS_ERR_META1_MASK | \
+				SYS_ERR_META2_MASK)
+
+
+typedef union _sys_err_mask_t {
+	uint64_t value;
+	struct {
+#if defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+		uint32_t res:21;
+		uint32_t meta2:1;
+		uint32_t meta1:1;
+		uint32_t peu:1;
+		uint32_t txc:1;
+		uint32_t rdmc:1;
+		uint32_t tdmc:1;
+		uint32_t zcp:1;
+		uint32_t fflp:1;
+		uint32_t ipp:1;
+		uint32_t mac:1;
+		uint32_t smx:1;
+#elif defined(_BIT_FIELDS_LTOH)
+		uint32_t smx:1;
+		uint32_t mac:1;
+		uint32_t ipp:1;
+		uint32_t fflp:1;
+		uint32_t zcp:1;
+		uint32_t tdmc:1;
+		uint32_t rdmc:1;
+		uint32_t txc:1;
+		uint32_t peu:1;
+		uint32_t meta1:1;
+		uint32_t meta2:1;
+		uint32_t res:21;
+#endif
+		} ldw;
+#if !defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+	} bits;
+} sys_err_mask_t, sys_err_stat_t, *p_sys_err_mask_t, *p_sys_err_stat_t;
+
+
+/*
+ * Meta Arbiter Dirty Transaction ID Control
+ */
+
+#define	DIRTY_TID_CTL_REG		(FZC_PIO + 0x0010)
+#define	DIRTY_TID_CTL_WR_THRES_MASK	0x00000000003F0000ULL
+#define	DIRTY_TID_CTL_WR_THRES_SHIFT    16
+#define	DIRTY_TID_CTL_RD_THRES_MASK	0x00000000000003F0ULL
+#define	DIRTY_TID_CTL_RD_THRES_SHIFT	4
+#define	DIRTY_TID_CTL_DTID_CLR		0x0000000000000002ULL
+#define	DIRTY_TID_CTL_DTID_CLR_SHIFT	1
+#define	DIRTY_TID_CTL_DTID_EN		0x0000000000000001ULL
+#define	DIRTY_TID_CTL_DTID_EN_SHIFT	0
+
+typedef union _dty_tid_ctl_t {
+	uint64_t value;
+	struct {
+#if defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+		uint32_t res1:10;
+		uint32_t np_wr_thres_val:6;
+		uint32_t res2:6;
+		uint32_t np_rd_thres_val:6;
+		uint32_t res3:2;
+		uint32_t dty_tid_clr:1;
+		uint32_t dty_tid_en:1;
+#elif defined(_BIT_FIELDS_LTOH)
+		uint32_t dty_tid_en:1;
+		uint32_t dty_tid_clr:1;
+		uint32_t res3:2;
+		uint32_t np_rd_thres_val:6;
+		uint32_t res2:6;
+		uint32_t np_wr_thres_val:6;
+		uint32_t res1:10;
+#endif
+		} ldw;
+#if !defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+	} bits;
+} dty_tid_ctl_t, *p_dty_tid_ctl_t;
+
+
+/*
+ * Meta Arbiter Dirty Transaction ID Status
+ */
+#define	DIRTY_TID_STAT_REG			(FZC_PIO + 0x0018)
+#define	DIRTY_TID_STAT_WR_TID_DTY_CNT_MASK	0x0000000000003F00ULL
+#define	DIRTY_TID_STAT_WR_TID_DTY_CNT_SHIFT	8
+#define	DIRTY_TID_STAT_RD_TID_DTY_CNT_MASK	0x000000000000003FULL
+#define	DIRTY_TID_STAT_RD_TID_DTY_CNT_SHIFT	0
+
+typedef union _dty_tid_stat_t {
+	uint64_t value;
+	struct {
+#if defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+		uint32_t res1:18;
+		uint32_t wr_tid_dirty_cnt:6;
+		uint32_t res2:2;
+		uint32_t rd_tid_dirty_cnt:6;
+#elif defined(_BIT_FIELDS_LTOH)
+		uint32_t rd_tid_dirty_cnt:6;
+		uint32_t res2:2;
+		uint32_t wr_tid_dirty_cnt:6;
+		uint32_t res1:18;
+#endif
+		} ldw;
+#if !defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+	} bits;
+} dty_tid_stat_t, *p_dty_tid_stat_t;
+
+
+/*
+ * SMX Registers
+ */
+#define	SMX_CFIG_DAT_REG		(FZC_PIO + 0x00040)
+#define	SMX_CFIG_DAT_RAS_DET_EN_MASK	0x0000000080000000ULL
+#define	SMX_CFIG_DAT_RAS_DET_EN_SHIFT	31
+#define	SMX_CFIG_DAT_RAS_INJ_EN_MASK	0x0000000040000000ULL
+#define	SMX_CFIG_DAT_RAS_INJ_EN_SHIFT	30
+#define	SMX_CFIG_DAT_TRANS_TO_MASK	0x000000000FFFFFFFULL
+#define	SMX_CFIG_DAT_TRANS_TO_SHIFT	0
+
+typedef union _smx_cfg_dat_t {
+	uint64_t value;
+	struct {
+#if defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+		uint32_t res_err_det:1;
+		uint32_t ras_err_inj_en:1;
+		uint32_t res:2;
+		uint32_t trans_to_val:28;
+#elif defined(_BIT_FIELDS_LTOH)
+		uint32_t trans_to_val:28;
+		uint32_t res:2;
+		uint32_t ras_err_inj_en:1;
+		uint32_t res_err_det:1;
+#endif
+		} ldw;
+#if !defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+	} bits;
+} smx_cfg_dat_t, *p_smx_cfg_dat_t;
+
+
+#define	SMX_INT_STAT_REG	(FZC_PIO + 0x00048)
+#define	SMX_INT_STAT_SM_MASK	0x00000000FFFFFFC0ULL
+#define	SMX_INT_STAT_SM_SHIFT	6
+
+typedef union _smx_int_stat_t {
+	uint64_t value;
+	struct {
+#if defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+		uint32_t st_mc_stat:26;
+		uint32_t res:6;
+#elif defined(_BIT_FIELDS_LTOH)
+		uint32_t res:6;
+		uint32_t st_mc_stat:26;
+#endif
+		} ldw;
+#if !defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+	} bits;
+} smx_int_stat_t, *p_smx_int_stat_t;
+
+
+#define		SMX_CTL_REG	(FZC_PIO + 0x00050)
+
+typedef union _smx_ctl_t {
+	uint64_t value;
+	struct {
+#if defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+		uint32_t res1:21;
+		uint32_t resp_err_inj:3;
+		uint32_t res2:1;
+		uint32_t xtb_err_inj:3;
+		uint32_t res3:1;
+		uint32_t dbg_sel:3;
+#elif defined(_BIT_FIELDS_LTOH)
+		uint32_t dbg_sel:3;
+		uint32_t res3:1;
+		uint32_t xtb_err_inj:3;
+		uint32_t res2:1;
+		uint32_t resp_err_inj:3;
+		uint32_t res1:21;
+#endif
+		} ldw;
+#if !defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+	} bits;
+} smx_ctl_t, *p_smx_ctl_t;
+
+
+#define	SMX_DBG_VEC_REG	(FZC_PIO + 0x00058)
+
+typedef union _smx_dbg_vec_t {
+	uint64_t value;
+	struct {
+#if defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+		struct {
+		uint32_t dbg_tng_vec;
+		} ldw;
+#if !defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+	} bits;
+} smx_dbg_vec_t, *p_smx_dbg_vec_t;
+
+
+/*
+ * Debug registers
+ */
+
+#define	PIO_DBG_SEL_REG	(FZC_PIO + 0x00060)
+
+typedef union _pio_dbg_sel_t {
+	uint64_t value;
+	struct {
+#if defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+		struct {
+		uint32_t sel;
+		} ldw;
+#if !defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+	} bits;
+} pio_dbg_sel_t, *p_pio_dbg_sel_t;
+
+
+#define	PIO_TRAIN_VEC_REG	(FZC_PIO + 0x00068)
+
+typedef union _pio_tng_vec_t {
+	uint64_t value;
+	struct {
+#if defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+		struct {
+		uint32_t training_vec;
+		} ldw;
+#if !defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+	} bits;
+} pio_tng_vec_t, *p_pio_tng_vec_t;
+
+#define	PIO_ARB_CTL_REG	(FZC_PIO + 0x00070)
+
+typedef union _pio_arb_ctl_t {
+	uint64_t value;
+	struct {
+#if defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+		struct {
+		uint32_t ctl;
+		} ldw;
+#if !defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+	} bits;
+} pio_arb_ctl_t, *p_pio_arb_ctl_t;
+
+#define	PIO_ARB_DBG_VEC_REG	(FZC_PIO + 0x00078)
+
+typedef union _pio_arb_dbg_vec_t {
+	uint64_t value;
+	struct {
+#if defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+		struct {
+		uint32_t dbg_vector;
+		} ldw;
+#if !defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+	} bits;
+} pio_arb_dbg_vec_t, *p_pio_arb_dbg_vec_t;
+
+
+/*
+ * GPIO Registers
+ */
+
+#define	GPIO_EN_REG	(FZC_PIO + 0x00028)
+#define	GPIO_EN_ENABLE_MASK	 0x000000000000FFFFULL
+#define	GPIO_EN_ENABLE_SHIFT	 0
+typedef union _gpio_en_t {
+	uint64_t value;
+	struct {
+#if defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+		uint32_t res:16;
+		uint32_t enable:16;
+#elif defined(_BIT_FIELDS_LTOH)
+		uint32_t enable:16;
+		uint32_t res:16;
+#endif
+		} ldw;
+#if !defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+	} bits;
+} gpio_en_t, *p_gpio_en_t;
+
+#define	GPIO_DATA_IN_REG	(FZC_PIO + 0x00030)
+#define	GPIO_DATA_IN_MASK	0x000000000000FFFFULL
+#define	GPIO_DATA_IN_SHIFT	0
+typedef union _gpio_data_in_t {
+	uint64_t value;
+	struct {
+#if defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+		uint32_t res:16;
+		uint32_t data_in:16;
+#elif defined(_BIT_FIELDS_LTOH)
+		uint32_t data_in:16;
+		uint32_t res:16;
+#endif
+		} ldw;
+#if !defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+	} bits;
+} gpio_data_in_t, *p_gpio_data_in_t;
+
+
+/*
+ * PCI Express Interface Module (PIM) registers
+ */
+#define	PIM_CONTROL_REG	(FZC_PIM + 0x0)
+#define	PIM_CONTROL_DBG_SEL_MASK 0x000000000000000FULL
+#define	PIM_CONTROL_DBG_SEL_SHIFT	0
+typedef union _pim_ctl_t {
+	uint64_t value;
+	struct {
+#if defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+		uint32_t res:28;
+		uint32_t dbg_sel:4;
+#elif defined(_BIT_FIELDS_LTOH)
+		uint32_t dbg_sel:4;
+		uint32_t res:28;
+#endif
+		} ldw;
+#if !defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+	} bits;
+} pim_ctl_t, *p_pim_ctl_t;
+
+#define	PIM_DBG_TRAINING_VEC_REG	(FZC_PIM + 0x00008)
+#define	PIM_DBG_TRAINING_VEC_MASK	0x00000000FFFFFFFFULL
+
+#define	PIM_INTR_STATUS_REG		(FZC_PIM + 0x00010)
+#define	PIM_INTR_STATUS_MASK		0x00000000FFFFFFFFULL
+
+#define	PIM_INTERNAL_STATUS_REG		(FZC_PIM + 0x00018)
+#define	PIM_INTERNAL_STATUS_MASK	0x00000000FFFFFFFFULL
+
+#define	PIM_INTR_MASK_REG		(FZC_PIM + 0x00020)
+#define	PIM_INTR_MASK_MASK		0x00000000FFFFFFFFULL
+
+/*
+ * Partitioning Logical pages Definition registers.
+ * (used by both receive and transmit DMA channels)
+ */
+
+/* Logical page definitions */
+typedef union _log_page_vld_t {
+	uint64_t value;
+	struct {
+#if defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t res1_1:28;
+			uint32_t func:2;
+			uint32_t page1:1;
+			uint32_t page0:1;
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t page0:1;
+			uint32_t page1:1;
+			uint32_t func:2;
+			uint32_t res1_1:28;
+#endif
+		} ldw;
+#if !defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+	} bits;
+} log_page_vld_t, *p_log_page_vld_t;
+
+
+#define	DMA_LOG_PAGE_MASK_SHIFT		0
+#define	DMA_LOG_PAGE_MASK_MASK		0x00000000ffffffffULL
+
+/* Receive Logical Page Mask */
+typedef union _log_page_mask_t {
+	uint64_t value;
+	struct {
+#if defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t mask:32;
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t mask:32;
+#endif
+		} ldw;
+#if !defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+	} bits;
+} log_page_mask_t, *p_log_page_mask_t;
+
+
+/* Receive Logical Page Value */
+#define	DMA_LOG_PAGE_VALUE_SHIFT	0
+#define	DMA_LOG_PAGE_VALUE_MASK		0x00000000ffffffffULL
+
+/* Receive Logical Page Value */
+typedef union _log_page_value_t {
+	uint64_t value;
+	struct {
+#if defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t value:32;
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t value:32;
+#endif
+		} ldw;
+#if !defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+	} bits;
+} log_page_value_t, *p_log_page_value_t;
+
+/* Receive Logical Page Relocation */
+#define	DMA_LOG_PAGE_RELO_SHIFT		0			/* bits 31:0 */
+#define	DMA_LOG_PAGE_RELO_MASK		0x00000000ffffffffULL
+
+/* Receive Logical Page Relocation */
+typedef union _log_page_relo_t {
+	uint64_t value;
+	struct {
+#if defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t relo:32;
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t relo:32;
+#endif
+		} ldw;
+#if !defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+	} bits;
+} log_page_relo_t, *p_log_page_relo_t;
+
+
+/* Receive Logical Page Handle */
+#define	DMA_LOG_PAGE_HANDLE_SHIFT	0			/* bits 19:0 */
+#define	DMA_LOG_PAGE_HANDLE_MASK	0x00000000ffffffffULL
+
+/* Receive Logical Page Handle */
+typedef union _log_page_hdl_t {
+	uint64_t value;
+	struct {
+#if defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t res1_1:12;
+			uint32_t handle:20;
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t handle:20;
+			uint32_t res1_1:12;
+#endif
+		} ldw;
+#if !defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+	} bits;
+} log_page_hdl_t, *p_log_page_hdl_t;
+
+#ifdef	__cplusplus
+}
+#endif
+
+#endif	/* _SYS_NXGE_NXGE_HW_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/uts/common/sys/nxge/nxge_impl.h	Mon Mar 19 19:37:22 2007 -0700
@@ -0,0 +1,878 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef	_SYS_NXGE_NXGE_IMPL_H
+#define	_SYS_NXGE_NXGE_IMPL_H
+
+#pragma ident	"%Z%%M%	%I%	%E% SMI"
+
+#ifdef	__cplusplus
+extern "C" {
+#endif
+
+/*
+ * NIU HV API version definitions.
+ */
+#define	NIU_MAJOR_VER		1
+#define	NIU_MINOR_VER		1
+
+/*
+ * NIU HV API v1.0 definitions
+ */
+#define	N2NIU_RX_LP_CONF		0x142
+#define	N2NIU_RX_LP_INFO		0x143
+#define	N2NIU_TX_LP_CONF		0x144
+#define	N2NIU_TX_LP_INFO		0x145
+
+#ifndef _ASM
+
+#include	<sys/types.h>
+#include	<sys/byteorder.h>
+#include	<sys/debug.h>
+#include	<sys/stropts.h>
+#include	<sys/stream.h>
+#include	<sys/strlog.h>
+#ifndef	COSIM
+#include	<sys/strsubr.h>
+#endif
+#include	<sys/cmn_err.h>
+#include	<sys/vtrace.h>
+#include	<sys/kmem.h>
+#include	<sys/ddi.h>
+#include	<sys/sunddi.h>
+#include	<sys/strsun.h>
+#include	<sys/stat.h>
+#include	<sys/cpu.h>
+#include	<sys/kstat.h>
+#include	<inet/common.h>
+#include	<inet/ip.h>
+#include	<sys/dlpi.h>
+#include	<inet/nd.h>
+#include	<netinet/in.h>
+#include	<sys/ethernet.h>
+#include	<sys/vlan.h>
+#include	<sys/pci.h>
+#include	<sys/taskq.h>
+#include	<sys/atomic.h>
+
+#include 	<sys/nxge/nxge_defs.h>
+#include 	<sys/nxge/nxge_hw.h>
+#include 	<sys/nxge/nxge_mac.h>
+#include	<sys/nxge/nxge_mii.h>
+#include	<sys/nxge/nxge_fm.h>
+#if !defined(IODIAG)
+#include	<sys/netlb.h>
+#endif
+
+#include	<sys/ddi_intr.h>
+
+#if	defined(_KERNEL)
+#include 	<sys/mac.h>
+#include	<sys/mac_impl.h>
+#include	<sys/mac_ether.h>
+#endif
+
+#if	defined(sun4v)
+#include	<sys/hypervisor_api.h>
+#include 	<sys/machsystm.h>
+#include 	<sys/hsvc.h>
+#endif
+
+/*
+ * Handy macros (taken from bge driver)
+ */
+#define	RBR_SIZE			4
+#define	DMA_COMMON_CHANNEL(area)	((area.dma_channel))
+#define	DMA_COMMON_VPTR(area)		((area.kaddrp))
+#define	DMA_COMMON_VPTR_INDEX(area, index)	\
+					(((char *)(area.kaddrp)) + \
+					(index * RBR_SIZE))
+#define	DMA_COMMON_HANDLE(area)		((area.dma_handle))
+#define	DMA_COMMON_ACC_HANDLE(area)	((area.acc_handle))
+#define	DMA_COMMON_IOADDR(area)		((area.dma_cookie.dmac_laddress))
+#define	DMA_COMMON_IOADDR_INDEX(area, index)	\
+					((area.dma_cookie.dmac_laddress) + \
+						(index * RBR_SIZE))
+
+#define	DMA_NPI_HANDLE(area)		((area.npi_handle)
+
+#define	DMA_COMMON_SYNC(area, flag)	((void) ddi_dma_sync((area).dma_handle,\
+						(area).offset, (area).alength, \
+						(flag)))
+#define	DMA_COMMON_SYNC_OFFSET(area, bufoffset, len, flag)	\
+					((void) ddi_dma_sync((area).dma_handle,\
+					(area.offset + bufoffset), len, \
+					(flag)))
+
+#define	DMA_COMMON_SYNC_RBR_DESC(area, index, flag)	\
+				((void) ddi_dma_sync((area).dma_handle,\
+				(index * RBR_SIZE), RBR_SIZE,	\
+				(flag)))
+
+#define	DMA_COMMON_SYNC_RBR_DESC_MULTI(area, index, count, flag)	\
+			((void) ddi_dma_sync((area).dma_handle,\
+			(index * RBR_SIZE), count * RBR_SIZE,	\
+				(flag)))
+#define	DMA_COMMON_SYNC_ENTRY(area, index, flag)	\
+				((void) ddi_dma_sync((area).dma_handle,\
+				(index * (area).block_size),	\
+				(area).block_size, \
+				(flag)))
+
+#define	NEXT_ENTRY(index, wrap)		((index + 1) & wrap)
+#define	NEXT_ENTRY_PTR(ptr, first, last)	\
+					((ptr == last) ? first : (ptr + 1))
+
+/*
+ * NPI related macros
+ */
+#define	NXGE_DEV_NPI_HANDLE(nxgep)	(nxgep->npi_handle)
+
+#define	NPI_PCI_ACC_HANDLE_SET(nxgep, ah) (nxgep->npi_pci_handle.regh = ah)
+#define	NPI_PCI_ADD_HANDLE_SET(nxgep, ap) (nxgep->npi_pci_handle.regp = ap)
+
+#define	NPI_ACC_HANDLE_SET(nxgep, ah)	(nxgep->npi_handle.regh = ah)
+#define	NPI_ADD_HANDLE_SET(nxgep, ap)	\
+		nxgep->npi_handle.is_vraddr = B_FALSE;	\
+		nxgep->npi_handle.function.instance = nxgep->instance;   \
+		nxgep->npi_handle.function.function = nxgep->function_num;   \
+		nxgep->npi_handle.nxgep = (void *) nxgep;   \
+		nxgep->npi_handle.regp = ap;
+
+#define	NPI_REG_ACC_HANDLE_SET(nxgep, ah) (nxgep->npi_reg_handle.regh = ah)
+#define	NPI_REG_ADD_HANDLE_SET(nxgep, ap)	\
+		nxgep->npi_reg_handle.is_vraddr = B_FALSE;	\
+		nxgep->npi_handle.function.instance = nxgep->instance;   \
+		nxgep->npi_handle.function.function = nxgep->function_num;   \
+		nxgep->npi_reg_handle.nxgep = (void *) nxgep;   \
+		nxgep->npi_reg_handle.regp = ap;
+
+#define	NPI_MSI_ACC_HANDLE_SET(nxgep, ah) (nxgep->npi_msi_handle.regh = ah)
+#define	NPI_MSI_ADD_HANDLE_SET(nxgep, ap) (nxgep->npi_msi_handle.regp = ap)
+
+#define	NPI_VREG_ACC_HANDLE_SET(nxgep, ah) (nxgep->npi_vreg_handle.regh = ah)
+#define	NPI_VREG_ADD_HANDLE_SET(nxgep, ap)	\
+		nxgep->npi_vreg_handle.is_vraddr = B_TRUE; \
+		nxgep->npi_handle.function.instance = nxgep->instance;   \
+		nxgep->npi_handle.function.function = nxgep->function_num;   \
+		nxgep->npi_vreg_handle.nxgep = (void *) nxgep;   \
+		nxgep->npi_vreg_handle.regp = ap;
+
+#define	NPI_V2REG_ACC_HANDLE_SET(nxgep, ah) (nxgep->npi_v2reg_handle.regh = ah)
+#define	NPI_V2REG_ADD_HANDLE_SET(nxgep, ap)	\
+		nxgep->npi_v2reg_handle.is_vraddr = B_TRUE; \
+		nxgep->npi_handle.function.instance = nxgep->instance;   \
+		nxgep->npi_handle.function.function = nxgep->function_num;   \
+		nxgep->npi_v2reg_handle.nxgep = (void *) nxgep;   \
+		nxgep->npi_v2reg_handle.regp = ap;
+
+#define	NPI_PCI_ACC_HANDLE_GET(nxgep) (nxgep->npi_pci_handle.regh)
+#define	NPI_PCI_ADD_HANDLE_GET(nxgep) (nxgep->npi_pci_handle.regp)
+#define	NPI_ACC_HANDLE_GET(nxgep) (nxgep->npi_handle.regh)
+#define	NPI_ADD_HANDLE_GET(nxgep) (nxgep->npi_handle.regp)
+#define	NPI_REG_ACC_HANDLE_GET(nxgep) (nxgep->npi_reg_handle.regh)
+#define	NPI_REG_ADD_HANDLE_GET(nxgep) (nxgep->npi_reg_handle.regp)
+#define	NPI_MSI_ACC_HANDLE_GET(nxgep) (nxgep->npi_msi_handle.regh)
+#define	NPI_MSI_ADD_HANDLE_GET(nxgep) (nxgep->npi_msi_handle.regp)
+#define	NPI_VREG_ACC_HANDLE_GET(nxgep) (nxgep->npi_vreg_handle.regh)
+#define	NPI_VREG_ADD_HANDLE_GET(nxgep) (nxgep->npi_vreg_handle.regp)
+#define	NPI_V2REG_ACC_HANDLE_GET(nxgep) (nxgep->npi_v2reg_handle.regh)
+#define	NPI_V2REG_ADD_HANDLE_GET(nxgep) (nxgep->npi_v2reg_handle.regp)
+
+#define	NPI_DMA_ACC_HANDLE_SET(dmap, ah) (dmap->npi_handle.regh = ah)
+#define	NPI_DMA_ACC_HANDLE_GET(dmap) 	(dmap->npi_handle.regh)
+
+/*
+ * DMA handles.
+ */
+#define	NXGE_DESC_D_HANDLE_GET(desc)	(desc.dma_handle)
+#define	NXGE_DESC_D_IOADD_GET(desc)	(desc.dma_cookie.dmac_laddress)
+#define	NXGE_DMA_IOADD_GET(dma_cookie) (dma_cookie.dmac_laddress)
+#define	NXGE_DMA_AREA_IOADD_GET(dma_area) (dma_area.dma_cookie.dmac_laddress)
+
+#define	LDV_ON(ldv, vector)	((vector >> ldv) & 0x1)
+#define	LDV2_ON_1(ldv, vector)	((vector >> (ldv - 64)) & 0x1)
+#define	LDV2_ON_2(ldv, vector)	(((vector >> 5) >> (ldv - 64)) & 0x1)
+
+typedef uint32_t		nxge_status_t;
+
+typedef enum  {
+	IDLE,
+	PROGRESS,
+	CONFIGURED
+} dev_func_shared_t;
+
+typedef enum  {
+	DVMA,
+	DMA,
+	SDMA
+} dma_method_t;
+
+typedef enum  {
+	BKSIZE_4K,
+	BKSIZE_8K,
+	BKSIZE_16K,
+	BKSIZE_32K
+} nxge_rx_block_size_t;
+
+#ifdef TX_ONE_BUF
+#define	TX_BCOPY_MAX 1514
+#else
+#if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
+#define	TX_BCOPY_MAX	4096
+#define	TX_BCOPY_SIZE	4096
+#else
+#define	TX_BCOPY_MAX	2048
+#define	TX_BCOPY_SIZE	2048
+#endif
+#endif
+
+#define	TX_STREAM_MIN 512
+#define	TX_FASTDVMA_MIN 1024
+
+#define	NXGE_ERROR_SHOW_MAX	0
+
+/*
+ * Defaults
+ */
+#define	NXGE_RDC_RCR_THRESHOLD		8
+#define	NXGE_RDC_RCR_TIMEOUT		16
+
+#define	NXGE_RDC_RCR_THRESHOLD_MAX	1024
+#define	NXGE_RDC_RCR_TIMEOUT_MAX	64
+#define	NXGE_RDC_RCR_THRESHOLD_MIN	1
+#define	NXGE_RDC_RCR_TIMEOUT_MIN	1
+#define	NXGE_RCR_FULL_HEADER		1
+
+#define	NXGE_IS_VLAN_PACKET(ptr)				\
+	((((struct ether_vlan_header *)ptr)->ether_tpid) ==	\
+	htons(VLAN_ETHERTYPE))
+
+typedef enum {
+	NONE,
+	SMALL,
+	MEDIUM,
+	LARGE
+} dma_size_t;
+
+typedef enum {
+	USE_NONE,
+	USE_BCOPY,
+	USE_DVMA,
+	USE_DMA,
+	USE_SDMA
+} dma_type_t;
+
+typedef enum {
+	NOT_IN_USE,
+	HDR_BUF,
+	MTU_BUF,
+	RE_ASSEMBLY_BUF,
+	FREE_BUF
+} rx_page_state_t;
+
+struct _nxge_block_mv_t {
+	uint32_t msg_type;
+	dma_type_t dma_type;
+};
+
+typedef struct _nxge_block_mv_t nxge_block_mv_t, *p_nxge_block_mv_t;
+
+typedef enum {
+	NEPTUNE,	/* 4 ports */
+	NEPTUNE_2,	/* 2 ports */
+	N2_NIU		/* N2/NIU 2 ports */
+} niu_type_t;
+
+typedef enum {
+	CFG_DEFAULT = 0,	/* default cfg */
+	CFG_EQUAL,	/* Equal */
+	CFG_FAIR,	/* Equal */
+	CFG_CLASSIFY,
+	CFG_L2_CLASSIFY,
+	CFG_L3_CLASSIFY,
+	CFG_L3_DISTRIBUTE,
+	CFG_L3_WEB,
+	CFG_L3_TCAM,
+	CFG_NOT_SPECIFIED,
+	CFG_CUSTOM	/* Custom */
+} cfg_type_t;
+
+typedef enum {
+	NO_MSG = 0x0,		/* No message output or storage. */
+	CONSOLE = 0x1,		/* Messages are go to the console. */
+	BUFFER = 0x2,		/* Messages are go to the system buffer. */
+	CON_BUF = 0x3,		/* Messages are go to the console and */
+				/* system buffer. */
+	VERBOSE = 0x4		/* Messages are go out only in VERBOSE node. */
+} out_msg_t, *p_out_msg_t;
+
+typedef enum {
+	DBG_NO_MSG = 0x0,	/* No message output or storage. */
+	DBG_CONSOLE = 0x1,	/* Messages are go to the console. */
+	DBG_BUFFER = 0x2,	/* Messages are go to the system buffer. */
+	DBG_CON_BUF = 0x3,	/* Messages are go to the console and */
+				/* system buffer. */
+	STR_LOG = 4		/* Sessage sent to streams logging driver. */
+} out_dbgmsg_t, *p_out_dbgmsg_t;
+
+
+
+#if defined(_KERNEL) || defined(COSIM)
+
+typedef struct ether_addr ether_addr_st, *p_ether_addr_t;
+typedef struct ether_header ether_header_t, *p_ether_header_t;
+typedef queue_t *p_queue_t;
+
+#if !defined(IODIAG)
+typedef mblk_t *p_mblk_t;
+#endif
+
+/*
+ * Common DMA data elements.
+ */
+struct _nxge_dma_common_t {
+	uint16_t		dma_channel;
+	void			*kaddrp;
+	void			*first_kaddrp;
+	void			*last_kaddrp;
+	void			*ioaddr_pp;
+	void			*first_ioaddr_pp;
+	void			*last_ioaddr_pp;
+	ddi_dma_cookie_t 	dma_cookie;
+	uint32_t		ncookies;
+
+	nxge_block_mv_t		msg_dma_flags;
+	ddi_dma_handle_t	dma_handle;
+	nxge_os_acc_handle_t	acc_handle;
+	npi_handle_t		npi_handle;
+
+	size_t			block_size;
+	uint32_t		nblocks;
+	size_t			alength;
+	uint_t			offset;
+	uint_t			dma_chunk_index;
+	void			*orig_ioaddr_pp;
+	uint64_t		orig_vatopa;
+	void			*orig_kaddrp;
+	size_t			orig_alength;
+	boolean_t		contig_alloc_type;
+};
+
+typedef struct _nxge_t nxge_t, *p_nxge_t;
+typedef struct _nxge_dma_common_t nxge_dma_common_t, *p_nxge_dma_common_t;
+
+typedef struct _nxge_dma_pool_t {
+	p_nxge_dma_common_t	*dma_buf_pool_p;
+	uint32_t		ndmas;
+	uint32_t		*num_chunks;
+	boolean_t		buf_allocated;
+} nxge_dma_pool_t, *p_nxge_dma_pool_t;
+
+/*
+ * Each logical device (69):
+ *	- LDG #
+ *	- flag bits
+ *	- masks.
+ *	- interrupt handler function.
+ *
+ * Generic system interrupt handler with two arguments:
+ *	(nxge_sys_intr_t)
+ *	Per device instance data structure
+ *	Logical group data structure.
+ *
+ * Logical device interrupt handler with two arguments:
+ *	(nxge_ldv_intr_t)
+ *	Per device instance data structure
+ *	Logical device number
+ */
+typedef struct	_nxge_ldg_t nxge_ldg_t, *p_nxge_ldg_t;
+typedef struct	_nxge_ldv_t nxge_ldv_t, *p_nxge_ldv_t;
+typedef uint_t	(*nxge_sys_intr_t)(void *arg1, void *arg2);
+typedef uint_t	(*nxge_ldv_intr_t)(void *arg1, void *arg2);
+
+/*
+ * Each logical device Group (64) needs to have the following
+ * configurations:
+ *	- timer counter (6 bits)
+ *	- timer resolution (20 bits, number of system clocks)
+ *	- system data (7 bits)
+ */
+struct _nxge_ldg_t {
+	uint8_t			ldg;		/* logical group number */
+	uint8_t			vldg_index;
+	boolean_t		arm;
+	boolean_t		interrupted;
+	uint16_t		ldg_timer;	/* counter */
+	uint8_t			func;
+	uint8_t			vector;
+	uint8_t			intdata;
+	uint8_t			nldvs;
+	p_nxge_ldv_t		ldvp;
+	nxge_sys_intr_t		sys_intr_handler;
+	uint_t			(*ih_cb_func)(caddr_t, caddr_t);
+	p_nxge_t		nxgep;
+};
+
+struct _nxge_ldv_t {
+	uint8_t			ldg_assigned;
+	uint8_t			ldv;
+	boolean_t		is_rxdma;
+	boolean_t		is_txdma;
+	boolean_t		is_mif;
+	boolean_t		is_mac;
+	boolean_t		is_syserr;
+	boolean_t		use_timer;
+	uint8_t			channel;
+	uint8_t			vdma_index;
+	uint8_t			func;
+	p_nxge_ldg_t		ldgp;
+	uint8_t			ldv_flags;
+	boolean_t		is_leve;
+	boolean_t		is_edge;
+	uint8_t			ldv_ldf_masks;
+	nxge_ldv_intr_t		ldv_intr_handler;
+	uint_t			(*ih_cb_func)(caddr_t, caddr_t);
+	p_nxge_t		nxgep;
+};
+#endif
+
+typedef struct _nxge_logical_page_t {
+	uint16_t		dma;
+	uint16_t		page;
+	boolean_t		valid;
+	uint64_t		mask;
+	uint64_t		value;
+	uint64_t		reloc;
+	uint32_t		handle;
+} nxge_logical_page_t, *p_nxge_logical_page_t;
+
+/*
+ * (Internal) return values from ioctl subroutines.
+ */
+enum nxge_ioc_reply {
+	IOC_INVAL = -1,				/* bad, NAK with EINVAL	*/
+	IOC_DONE,				/* OK, reply sent	*/
+	IOC_ACK,				/* OK, just send ACK	*/
+	IOC_REPLY,				/* OK, just send reply	*/
+	IOC_RESTART_ACK,			/* OK, restart & ACK	*/
+	IOC_RESTART_REPLY			/* OK, restart & reply	*/
+};
+
+typedef struct _pci_cfg_t {
+	uint16_t vendorid;
+	uint16_t devid;
+	uint16_t command;
+	uint16_t status;
+	uint8_t  revid;
+	uint8_t  res0;
+	uint16_t junk1;
+	uint8_t  cache_line;
+	uint8_t  latency;
+	uint8_t  header;
+	uint8_t  bist;
+	uint32_t base;
+	uint32_t base14;
+	uint32_t base18;
+	uint32_t base1c;
+	uint32_t base20;
+	uint32_t base24;
+	uint32_t base28;
+	uint32_t base2c;
+	uint32_t base30;
+	uint32_t res1[2];
+	uint8_t int_line;
+	uint8_t int_pin;
+	uint8_t	min_gnt;
+	uint8_t max_lat;
+} pci_cfg_t, *p_pci_cfg_t;
+
+#if defined(_KERNEL) || defined(COSIM)
+
+typedef struct _dev_regs_t {
+	nxge_os_acc_handle_t	nxge_pciregh;	/* PCI config DDI IO handle */
+	p_pci_cfg_t		nxge_pciregp;	/* mapped PCI registers */
+
+	nxge_os_acc_handle_t	nxge_regh;	/* device DDI IO (BAR 0) */
+	void			*nxge_regp;	/* mapped device registers */
+
+	nxge_os_acc_handle_t	nxge_msix_regh;	/* MSI/X DDI handle (BAR 2) */
+	void 			*nxge_msix_regp; /* MSI/X register */
+
+	nxge_os_acc_handle_t	nxge_vir_regh;	/* virtualization (BAR 4) */
+	unsigned char		*nxge_vir_regp;	/* virtualization register */
+
+	nxge_os_acc_handle_t	nxge_vir2_regh;	/* second virtualization */
+	unsigned char		*nxge_vir2_regp; /* second virtualization */
+
+	nxge_os_acc_handle_t	nxge_romh;	/* fcode rom handle */
+	unsigned char		*nxge_romp;	/* fcode pointer */
+} dev_regs_t, *p_dev_regs_t;
+
+
+typedef struct _nxge_mac_addr_t {
+	ether_addr_t	addr;
+	uint_t		flags;
+} nxge_mac_addr_t;
+
+/*
+ * The hardware supports 1 unique MAC and 16 alternate MACs (num_mmac)
+ * for each XMAC port and supports 1 unique MAC and 7 alternate MACs
+ * for each BMAC port.  The number of MACs assigned by the factory is
+ * different and is as follows,
+ * 	BMAC port:		   num_factory_mmac = num_mmac = 7
+ *	XMAC port on a 2-port NIC: num_factory_mmac = num_mmac - 1 = 15
+ *	XMAC port on a 4-port NIC: num_factory_mmac = 7
+ * So num_factory_mmac is smaller than num_mmac.  nxge_m_mmac_add uses
+ * num_mmac and nxge_m_mmac_reserve uses num_factory_mmac.
+ *
+ * total_factory_macs is the total number of factory MACs, including
+ * the unique MAC, assigned to a Neptune based NIC card, it is 32.
+ */
+typedef struct _nxge_mmac_t {
+	uint8_t		total_factory_macs;
+	uint8_t		num_mmac;
+	uint8_t		num_factory_mmac;
+	nxge_mac_addr_t	mac_pool[XMAC_MAX_ADDR_ENTRY];
+	ether_addr_t	factory_mac_pool[XMAC_MAX_ADDR_ENTRY];
+	uint8_t		naddrfree;  /* number of alt mac addr available */
+} nxge_mmac_t;
+
+/*
+ * mmac stats structure
+ */
+typedef struct _nxge_mmac_stats_t {
+	uint8_t mmac_max_cnt;
+	uint8_t	mmac_avail_cnt;
+	struct ether_addr mmac_avail_pool[16];
+} nxge_mmac_stats_t, *p_nxge_mmac_stats_t;
+
+#define	NXGE_MAX_MMAC_ADDRS	32
+#define	NXGE_NUM_MMAC_ADDRS	8
+#define	NXGE_NUM_OF_PORTS	4
+
+#endif
+
+#include 	<sys/nxge/nxge_common_impl.h>
+#include 	<sys/nxge/nxge_common.h>
+#include	<sys/nxge/nxge_txc.h>
+#include	<sys/nxge/nxge_rxdma.h>
+#include	<sys/nxge/nxge_txdma.h>
+#include	<sys/nxge/nxge_fflp.h>
+#include	<sys/nxge/nxge_ipp.h>
+#include	<sys/nxge/nxge_zcp.h>
+#include	<sys/nxge/nxge_fzc.h>
+#include	<sys/nxge/nxge_flow.h>
+#include	<sys/nxge/nxge_virtual.h>
+
+#include 	<sys/nxge/nxge.h>
+
+#include	<sys/modctl.h>
+#include	<sys/pattr.h>
+
+#include	<npi_vir.h>
+
+/*
+ * Reconfiguring the network devices requires the net_config privilege
+ * in Solaris 10+.  Prior to this, root privilege is required.  In order
+ * that the driver binary can run on both S10+ and earlier versions, we
+ * make the decisiion as to which to use at runtime.  These declarations
+ * allow for either (or both) to exist ...
+ */
+extern int secpolicy_net_config(const cred_t *, boolean_t);
+extern int drv_priv(cred_t *);
+extern void nxge_fm_report_error(p_nxge_t, uint8_t,
+			uint8_t, nxge_fm_ereport_id_t);
+extern int fm_check_acc_handle(ddi_acc_handle_t);
+extern int fm_check_dma_handle(ddi_dma_handle_t);
+
+#pragma weak    secpolicy_net_config
+
+/* nxge_classify.c */
+nxge_status_t nxge_classify_init(p_nxge_t);
+nxge_status_t nxge_classify_uninit(p_nxge_t);
+nxge_status_t nxge_set_hw_classify_config(p_nxge_t);
+nxge_status_t nxge_classify_exit_sw(p_nxge_t);
+
+/* nxge_fflp.c */
+void nxge_put_tcam(p_nxge_t, p_mblk_t);
+void nxge_get_tcam(p_nxge_t, p_mblk_t);
+nxge_status_t nxge_classify_init_hw(p_nxge_t);
+nxge_status_t nxge_classify_init_sw(p_nxge_t);
+nxge_status_t nxge_fflp_ip_class_config_all(p_nxge_t);
+nxge_status_t nxge_fflp_ip_class_config(p_nxge_t, tcam_class_t,
+				    uint32_t);
+
+nxge_status_t nxge_fflp_ip_class_config_get(p_nxge_t,
+				    tcam_class_t,
+				    uint32_t *);
+
+nxge_status_t nxge_cfg_ip_cls_flow_key(p_nxge_t, tcam_class_t,
+				    uint32_t);
+
+nxge_status_t nxge_fflp_ip_usr_class_config(p_nxge_t, tcam_class_t,
+				    uint32_t);
+
+uint64_t nxge_classify_get_cfg_value(p_nxge_t, uint8_t, uint8_t);
+nxge_status_t nxge_add_flow(p_nxge_t, flow_resource_t *);
+nxge_status_t nxge_fflp_config_tcam_enable(p_nxge_t);
+nxge_status_t nxge_fflp_config_tcam_disable(p_nxge_t);
+
+nxge_status_t nxge_fflp_config_hash_lookup_enable(p_nxge_t);
+nxge_status_t nxge_fflp_config_hash_lookup_disable(p_nxge_t);
+
+nxge_status_t nxge_fflp_config_llc_snap_enable(p_nxge_t);
+nxge_status_t nxge_fflp_config_llc_snap_disable(p_nxge_t);
+
+nxge_status_t nxge_logical_mac_assign_rdc_table(p_nxge_t, uint8_t);
+nxge_status_t nxge_fflp_config_vlan_table(p_nxge_t, uint16_t);
+
+nxge_status_t nxge_fflp_set_hash1(p_nxge_t, uint32_t);
+
+nxge_status_t nxge_fflp_set_hash2(p_nxge_t, uint16_t);
+
+nxge_status_t nxge_fflp_init_hostinfo(p_nxge_t);
+
+void nxge_handle_tcam_fragment_bug(p_nxge_t);
+nxge_status_t nxge_fflp_hw_reset(p_nxge_t);
+nxge_status_t nxge_fflp_handle_sys_errors(p_nxge_t);
+nxge_status_t nxge_zcp_handle_sys_errors(p_nxge_t);
+
+/* nxge_kstats.c */
+void nxge_init_statsp(p_nxge_t);
+void nxge_setup_kstats(p_nxge_t);
+void nxge_destroy_kstats(p_nxge_t);
+int nxge_port_kstat_update(kstat_t *, int);
+void nxge_save_cntrs(p_nxge_t);
+
+int nxge_m_stat(void *arg, uint_t, uint64_t *);
+
+/* nxge_hw.c */
+void
+nxge_hw_ioctl(p_nxge_t, queue_t *, mblk_t *, struct iocblk *);
+void nxge_loopback_ioctl(p_nxge_t, queue_t *, mblk_t *, struct iocblk *);
+void nxge_global_reset(p_nxge_t);
+uint_t nxge_intr(void *, void *);
+void nxge_intr_enable(p_nxge_t);
+void nxge_intr_disable(p_nxge_t);
+void nxge_hw_blank(void *arg, time_t, uint_t);
+void nxge_hw_id_init(p_nxge_t);
+void nxge_hw_init_niu_common(p_nxge_t);
+void nxge_intr_hw_enable(p_nxge_t);
+void nxge_intr_hw_disable(p_nxge_t);
+void nxge_hw_stop(p_nxge_t);
+void nxge_global_reset(p_nxge_t);
+void nxge_check_hw_state(p_nxge_t);
+
+void nxge_rxdma_channel_put64(nxge_os_acc_handle_t,
+	void *, uint32_t, uint16_t,
+	uint64_t);
+uint64_t nxge_rxdma_channel_get64(nxge_os_acc_handle_t, void *,
+	uint32_t, uint16_t);
+
+
+void nxge_get32(p_nxge_t, p_mblk_t);
+void nxge_put32(p_nxge_t, p_mblk_t);
+
+void nxge_hw_set_mac_modes(p_nxge_t);
+
+/* nxge_send.c. */
+uint_t nxge_reschedule(caddr_t);
+
+/* nxge_rxdma.c */
+nxge_status_t nxge_rxdma_cfg_rdcgrp_default_rdc(p_nxge_t,
+					    uint8_t, uint8_t);
+
+nxge_status_t nxge_rxdma_cfg_port_default_rdc(p_nxge_t,
+				    uint8_t, uint8_t);
+nxge_status_t nxge_rxdma_cfg_rcr_threshold(p_nxge_t, uint8_t,
+				    uint16_t);
+nxge_status_t nxge_rxdma_cfg_rcr_timeout(p_nxge_t, uint8_t,
+				    uint16_t, uint8_t);
+
+/* nxge_ndd.c */
+void nxge_get_param_soft_properties(p_nxge_t);
+void nxge_copy_hw_default_to_param(p_nxge_t);
+void nxge_copy_param_hw_to_config(p_nxge_t);
+void nxge_setup_param(p_nxge_t);
+void nxge_init_param(p_nxge_t);
+void nxge_destroy_param(p_nxge_t);
+boolean_t nxge_check_rxdma_rdcgrp_member(p_nxge_t, uint8_t, uint8_t);
+boolean_t nxge_check_rxdma_port_member(p_nxge_t, uint8_t);
+boolean_t nxge_check_rdcgrp_port_member(p_nxge_t, uint8_t);
+
+boolean_t nxge_check_txdma_port_member(p_nxge_t, uint8_t);
+
+int nxge_param_get_generic(p_nxge_t, queue_t *, mblk_t *, caddr_t);
+int nxge_param_set_generic(p_nxge_t, queue_t *, mblk_t *, char *, caddr_t);
+int nxge_get_default(p_nxge_t, queue_t *, p_mblk_t, caddr_t);
+int nxge_set_default(p_nxge_t, queue_t *, p_mblk_t, char *, caddr_t);
+int nxge_nd_get_names(p_nxge_t, queue_t *, p_mblk_t, caddr_t);
+int nxge_mk_mblk_tail_space(p_mblk_t, p_mblk_t *, size_t);
+long nxge_strtol(char *, char **, int);
+boolean_t nxge_param_get_instance(queue_t *, mblk_t *);
+void nxge_param_ioctl(p_nxge_t, queue_t *, mblk_t *, struct iocblk *);
+boolean_t nxge_nd_load(caddr_t *, char *, pfi_t, pfi_t, caddr_t);
+void nxge_nd_free(caddr_t *);
+int nxge_nd_getset(p_nxge_t, queue_t *, caddr_t, p_mblk_t);
+
+void nxge_set_lb_normal(p_nxge_t);
+boolean_t nxge_set_lb(p_nxge_t, queue_t *, p_mblk_t);
+
+/* nxge_virtual.c */
+nxge_status_t nxge_cntlops(dev_info_t *, nxge_ctl_enum_t, void *, void *);
+void nxge_common_lock_get(p_nxge_t);
+void nxge_common_lock_free(p_nxge_t);
+
+nxge_status_t nxge_get_config_properties(p_nxge_t);
+void nxge_get_xcvr_properties(p_nxge_t);
+void nxge_init_vlan_config(p_nxge_t);
+void nxge_init_mac_config(p_nxge_t);
+
+
+void nxge_init_logical_devs(p_nxge_t);
+int nxge_init_ldg_intrs(p_nxge_t);
+
+void nxge_set_ldgimgmt(p_nxge_t, uint32_t, boolean_t,
+	uint32_t);
+
+void nxge_init_fzc_txdma_channels(p_nxge_t);
+
+nxge_status_t nxge_init_fzc_txdma_channel(p_nxge_t, uint16_t,
+	p_tx_ring_t, p_tx_mbox_t);
+nxge_status_t nxge_init_fzc_txdma_port(p_nxge_t);
+
+nxge_status_t nxge_init_fzc_rxdma_channel(p_nxge_t, uint16_t,
+	p_rx_rbr_ring_t, p_rx_rcr_ring_t, p_rx_mbox_t);
+
+nxge_status_t nxge_init_fzc_rdc_tbl(p_nxge_t);
+nxge_status_t nxge_init_fzc_rx_common(p_nxge_t);
+nxge_status_t nxge_init_fzc_rxdma_port(p_nxge_t);
+
+nxge_status_t nxge_init_fzc_rxdma_channel_pages(p_nxge_t,
+	uint16_t, p_rx_rbr_ring_t);
+nxge_status_t nxge_init_fzc_rxdma_channel_red(p_nxge_t,
+	uint16_t, p_rx_rcr_ring_t);
+
+nxge_status_t nxge_init_fzc_rxdma_channel_clrlog(p_nxge_t,
+	uint16_t, p_rx_rbr_ring_t);
+
+
+nxge_status_t nxge_init_fzc_txdma_channel_pages(p_nxge_t,
+	uint16_t, p_tx_ring_t);
+
+nxge_status_t nxge_init_fzc_txdma_channel_drr(p_nxge_t, uint16_t,
+	p_tx_ring_t);
+
+nxge_status_t nxge_init_fzc_txdma_port(p_nxge_t);
+
+void nxge_init_fzc_ldg_num(p_nxge_t);
+void nxge_init_fzc_sys_int_data(p_nxge_t);
+void nxge_init_fzc_ldg_int_timer(p_nxge_t);
+nxge_status_t nxge_intr_mask_mgmt_set(p_nxge_t, boolean_t on);
+
+/* MAC functions */
+nxge_status_t nxge_mac_init(p_nxge_t);
+nxge_status_t nxge_link_init(p_nxge_t);
+nxge_status_t nxge_xif_init(p_nxge_t);
+nxge_status_t nxge_pcs_init(p_nxge_t);
+nxge_status_t nxge_serdes_init(p_nxge_t);
+nxge_status_t nxge_n2_serdes_init(p_nxge_t);
+nxge_status_t nxge_neptune_serdes_init(p_nxge_t);
+nxge_status_t nxge_xcvr_find(p_nxge_t);
+nxge_status_t nxge_get_xcvr_type(p_nxge_t);
+nxge_status_t nxge_xcvr_init(p_nxge_t);
+nxge_status_t nxge_tx_mac_init(p_nxge_t);
+nxge_status_t nxge_rx_mac_init(p_nxge_t);
+nxge_status_t nxge_tx_mac_enable(p_nxge_t);
+nxge_status_t nxge_tx_mac_disable(p_nxge_t);
+nxge_status_t nxge_rx_mac_enable(p_nxge_t);
+nxge_status_t nxge_rx_mac_disable(p_nxge_t);
+nxge_status_t nxge_tx_mac_reset(p_nxge_t);
+nxge_status_t nxge_rx_mac_reset(p_nxge_t);
+nxge_status_t nxge_link_intr(p_nxge_t, link_intr_enable_t);
+nxge_status_t nxge_mii_xcvr_init(p_nxge_t);
+nxge_status_t nxge_mii_read(p_nxge_t, uint8_t,
+			uint8_t, uint16_t *);
+nxge_status_t nxge_mii_write(p_nxge_t, uint8_t,
+			uint8_t, uint16_t);
+nxge_status_t nxge_mdio_read(p_nxge_t, uint8_t, uint8_t,
+			uint16_t, uint16_t *);
+nxge_status_t nxge_mdio_write(p_nxge_t, uint8_t,
+			uint8_t, uint16_t, uint16_t);
+nxge_status_t nxge_mii_check(p_nxge_t, mii_bmsr_t,
+			mii_bmsr_t, nxge_link_state_t *);
+nxge_status_t nxge_add_mcast_addr(p_nxge_t, struct ether_addr *);
+nxge_status_t nxge_del_mcast_addr(p_nxge_t, struct ether_addr *);
+nxge_status_t nxge_set_mac_addr(p_nxge_t, struct ether_addr *);
+nxge_status_t nxge_check_mii_link(p_nxge_t);
+nxge_status_t nxge_check_10g_link(p_nxge_t);
+nxge_status_t nxge_check_serdes_link(p_nxge_t);
+nxge_status_t nxge_check_bcm8704_link(p_nxge_t, boolean_t *);
+void nxge_link_is_down(p_nxge_t);
+void nxge_link_is_up(p_nxge_t);
+nxge_status_t nxge_link_monitor(p_nxge_t, link_mon_enable_t);
+uint32_t crc32_mchash(p_ether_addr_t);
+nxge_status_t nxge_set_promisc(p_nxge_t, boolean_t);
+nxge_status_t nxge_mac_handle_sys_errors(p_nxge_t);
+nxge_status_t nxge_10g_link_led_on(p_nxge_t);
+nxge_status_t nxge_10g_link_led_off(p_nxge_t);
+
+/* espc (sprom) prototypes */
+nxge_status_t nxge_espc_mac_addrs_get(p_nxge_t);
+nxge_status_t nxge_espc_num_macs_get(p_nxge_t, uint8_t *);
+nxge_status_t nxge_espc_num_ports_get(p_nxge_t);
+nxge_status_t nxge_espc_phy_type_get(p_nxge_t);
+
+
+void nxge_debug_msg(p_nxge_t, uint64_t, char *, ...);
+
+uint64_t hv_niu_rx_logical_page_conf(uint64_t, uint64_t,
+	uint64_t, uint64_t);
+#pragma weak	hv_niu_rx_logical_page_conf
+
+uint64_t hv_niu_rx_logical_page_info(uint64_t, uint64_t,
+	uint64_t *, uint64_t *);
+#pragma weak	hv_niu_rx_logical_page_info
+
+uint64_t hv_niu_tx_logical_page_conf(uint64_t, uint64_t,
+	uint64_t, uint64_t);
+#pragma weak	hv_niu_tx_logical_page_conf
+
+uint64_t hv_niu_tx_logical_page_info(uint64_t, uint64_t,
+	uint64_t *, uint64_t *);
+#pragma weak	hv_niu_tx_logical_page_info
+
+#ifdef NXGE_DEBUG
+char *nxge_dump_packet(char *, int);
+#endif
+
+#endif	/* !_ASM */
+
+#ifdef	__cplusplus
+}
+#endif
+
+#endif	/* _SYS_NXGE_NXGE_IMPL_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/uts/common/sys/nxge/nxge_ipp.h	Mon Mar 19 19:37:22 2007 -0700
@@ -0,0 +1,84 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _SYS_NXGE_NXGE_IPP_H
+#define	_SYS_NXGE_NXGE_IPP_H
+
+#pragma ident	"%Z%%M%	%I%	%E% SMI"
+
+#ifdef	__cplusplus
+extern "C" {
+#endif
+
+#include <nxge_ipp_hw.h>
+#include <npi_ipp.h>
+
+#define	IPP_MAX_PKT_SIZE	0x1FFFF
+#define	IPP_MAX_ERR_SHOW	10
+
+typedef	struct _ipp_errlog {
+	boolean_t		multiple_err;
+	uint16_t		dfifo_rd_ptr;
+	uint32_t		state_mach;
+	uint16_t		ecc_syndrome;
+} ipp_errlog_t, *p_ipp_errlog_t;
+
+typedef struct _nxge_ipp_stats {
+	uint32_t 		errors;
+	uint32_t 		inits;
+	uint32_t 		sop_miss;
+	uint32_t 		eop_miss;
+	uint32_t 		dfifo_ue;
+	uint32_t 		ecc_err_cnt;
+	uint32_t 		pfifo_perr;
+	uint32_t 		pfifo_over;
+	uint32_t 		pfifo_und;
+	uint32_t 		bad_cs_cnt;
+	uint32_t 		pkt_dis_cnt;
+	ipp_errlog_t		errlog;
+} nxge_ipp_stats_t, *p_nxge_ipp_stats_t;
+
+typedef	struct _nxge_ipp {
+	uint32_t		config;
+	uint32_t		iconfig;
+	ipp_status_t		status;
+	uint32_t		max_pkt_size;
+	nxge_ipp_stats_t	*stat;
+} nxge_ipp_t;
+
+/* IPP prototypes */
+nxge_status_t nxge_ipp_reset(p_nxge_t);
+nxge_status_t nxge_ipp_init(p_nxge_t);
+nxge_status_t nxge_ipp_disable(p_nxge_t);
+nxge_status_t nxge_ipp_handle_sys_errors(p_nxge_t);
+nxge_status_t nxge_ipp_fatal_err_recover(p_nxge_t);
+nxge_status_t nxge_ipp_eccue_valid_check(p_nxge_t, boolean_t *);
+void nxge_ipp_inject_err(p_nxge_t, uint32_t);
+
+#ifdef	__cplusplus
+}
+#endif
+
+#endif	/* _SYS_NXGE_NXGE_IPP_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/uts/common/sys/nxge/nxge_ipp_hw.h	Mon Mar 19 19:37:22 2007 -0700
@@ -0,0 +1,251 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _SYS_NXGE_NXGE_IPP_HW_H
+#define	_SYS_NXGE_NXGE_IPP_HW_H
+
+#pragma ident	"%Z%%M%	%I%	%E% SMI"
+
+#ifdef	__cplusplus
+extern "C" {
+#endif
+
+#include <nxge_defs.h>
+
+/* IPP Registers */
+#define	IPP_CONFIG_REG				0x000
+#define	IPP_DISCARD_PKT_CNT_REG			0x020
+#define	IPP_TCP_CKSUM_ERR_CNT_REG		0x028
+#define	IPP_ECC_ERR_COUNTER_REG			0x030
+#define	IPP_INT_STATUS_REG			0x040
+#define	IPP_INT_MASK_REG			0x048
+
+#define	IPP_PFIFO_RD_DATA0_REG			0x060
+#define	IPP_PFIFO_RD_DATA1_REG			0x068
+#define	IPP_PFIFO_RD_DATA2_REG			0x070
+#define	IPP_PFIFO_RD_DATA3_REG			0x078
+#define	IPP_PFIFO_RD_DATA4_REG			0x080
+#define	IPP_PFIFO_WR_DATA0_REG			0x088
+#define	IPP_PFIFO_WR_DATA1_REG			0x090
+#define	IPP_PFIFO_WR_DATA2_REG			0x098
+#define	IPP_PFIFO_WR_DATA3_REG			0x0a0
+#define	IPP_PFIFO_WR_DATA4_REG			0x0a8
+#define	IPP_PFIFO_RD_PTR_REG			0x0b0
+#define	IPP_PFIFO_WR_PTR_REG			0x0b8
+#define	IPP_DFIFO_RD_DATA0_REG			0x0c0
+#define	IPP_DFIFO_RD_DATA1_REG			0x0c8
+#define	IPP_DFIFO_RD_DATA2_REG			0x0d0
+#define	IPP_DFIFO_RD_DATA3_REG			0x0d8
+#define	IPP_DFIFO_RD_DATA4_REG			0x0e0
+#define	IPP_DFIFO_WR_DATA0_REG			0x0e8
+#define	IPP_DFIFO_WR_DATA1_REG			0x0f0
+#define	IPP_DFIFO_WR_DATA2_REG			0x0f8
+#define	IPP_DFIFO_WR_DATA3_REG			0x100
+#define	IPP_DFIFO_WR_DATA4_REG			0x108
+#define	IPP_DFIFO_RD_PTR_REG			0x110
+#define	IPP_DFIFO_WR_PTR_REG			0x118
+#define	IPP_STATE_MACHINE_REG			0x120
+#define	IPP_CKSUM_STATUS_REG			0x128
+#define	IPP_FFLP_CKSUM_INFO_REG			0x130
+#define	IPP_DEBUG_SELECT_REG			0x138
+#define	IPP_DFIFO_ECC_SYNDROME_REG		0x140
+#define	IPP_DFIFO_EOPM_RD_PTR_REG		0x148
+#define	IPP_ECC_CTRL_REG			0x150
+
+#define	IPP_PORT_OFFSET				0x4000
+#define	IPP_PORT0_OFFSET			0
+#define	IPP_PORT1_OFFSET			0x8000
+#define	IPP_PORT2_OFFSET			0x4000
+#define	IPP_PORT3_OFFSET			0xc000
+#define	IPP_REG_ADDR(port_num, reg)\
+	((port_num == 0) ? FZC_IPP + reg : \
+	FZC_IPP + reg + (((port_num % 2) * IPP_PORT_OFFSET) + \
+	((port_num / 3) * IPP_PORT_OFFSET) + IPP_PORT_OFFSET))
+#define	IPP_PORT_ADDR(port_num)\
+	((port_num == 0) ? FZC_IPP: \
+	FZC_IPP + (((port_num % 2) * IPP_PORT_OFFSET) + \
+	((port_num / 3) * IPP_PORT_OFFSET) + IPP_PORT_OFFSET))
+
+/* IPP Configuration Register */
+
+#define	IPP_SOFT_RESET				(1ULL << 31)
+#define	IPP_IP_MAX_PKT_BYTES_SHIFT		8
+#define	IPP_IP_MAX_PKT_BYTES_MASK		0x1FFFF
+#define	IPP_FFLP_CKSUM_INFO_PIO_WR_EN		(1 << 7)
+#define	IPP_PRE_FIFO_PIO_WR_EN			(1 << 6)
+#define	IPP_DFIFO_PIO_WR_EN			(1 << 5)
+#define	IPP_TCP_UDP_CKSUM_EN			(1 << 4)
+#define	IPP_DROP_BAD_CRC_EN			(1 << 3)
+#define	IPP_DFIFO_ECC_CORRECT_EN		(1 << 2)
+#define	IPP_EN					(1 << 0)
+
+/* IPP Interrupt Status Registers */
+
+#define	IPP_DFIFO_MISSED_SOP			(1ULL << 31)
+#define	IPP_DFIFO_MISSED_EOP			(1 << 30)
+#define	IPP_DFIFO_ECC_UNCORR_ERR_MASK		0x3
+#define	IPP_DFIFO_ECC_UNCORR_ERR_SHIFT		28
+#define	IPP_DFIFO_ECC_CORR_ERR_MASK		0x3
+#define	IPP_DFIFO_ECC_CORR_ERR_SHIFT		26
+#define	IPP_DFIFO_ECC_ERR_MASK			0x3
+#define	IPP_DFIFO_ECC_ERR_SHIFT			24
+#define	IPP_DFIFO_NO_ECC_ERR			(1 << 23)
+#define	IPP_DFIFO_ECC_ERR_ENTRY_INDEX_MASK	0x7FF
+#define	IPP_DFIFO_ECC_ERR_ENTRY_INDEX_SHIFT	12
+#define	IPP_PRE_FIFO_PERR			(1 << 11)
+#define	IPP_ECC_ERR_CNT_MAX			(1 << 10)
+#define	IPP_PRE_FIFO_PERR_ENTRY_INDEX_MASK	0x3F
+#define	IPP_PRE_FIFO_PERR_ENTRY_INDEX_SHIFT	4
+#define	IPP_PRE_FIFO_OVERRUN			(1 << 3)
+#define	IPP_PRE_FIFO_UNDERRUN			(1 << 2)
+#define	IPP_BAD_TCPIP_CHKSUM_CNT_MAX		(1 << 1)
+#define	IPP_PKT_DISCARD_CNT_MAX			(1 << 0)
+
+#define	IPP_P0_P1_DFIFO_ENTRIES			2048
+#define	IPP_P2_P3_DFIFO_ENTRIES			1024
+#define	IPP_NIU_DFIFO_ENTRIES			1024
+
+typedef	union _ipp_status {
+	uint64_t value;
+
+	struct {
+#if defined(_BIG_ENDIAN)
+		uint32_t	w1;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+		uint32_t dfifo_missed_sop	: 1;
+		uint32_t dfifo_missed_eop	: 1;
+		uint32_t dfifo_uncorr_ecc_err	: 2;
+		uint32_t dfifo_corr_ecc_err	: 2;
+		uint32_t dfifo_ecc_err		: 2;
+		uint32_t dfifo_no_ecc_err	: 1;
+		uint32_t dfifo_ecc_err_idx	: 11;
+		uint32_t pre_fifo_perr		: 1;
+		uint32_t ecc_err_cnt_ovfl	: 1;
+		uint32_t pre_fifo_perr_idx	: 6;
+		uint32_t pre_fifo_overrun	: 1;
+		uint32_t pre_fifo_underrun	: 1;
+		uint32_t bad_cksum_cnt_ovfl	: 1;
+		uint32_t pkt_discard_cnt_ovfl	: 1;
+#elif defined(_BIT_FIELDS_LTOH)
+		uint32_t pkt_discard_cnt_ovfl	: 1;
+		uint32_t bad_cksum_cnt_ovfl	: 1;
+		uint32_t pre_fifo_underrun	: 1;
+		uint32_t pre_fifo_overrun	: 1;
+		uint32_t pre_fifo_perr_idx	: 6;
+		uint32_t ecc_err_cnt_ovfl	: 1;
+		uint32_t pre_fifo_perr		: 1;
+		uint32_t dfifo_ecc_err_idx	: 11;
+		uint32_t dfifo_no_ecc_err	: 1;
+		uint32_t dfifo_ecc_err		: 2;
+		uint32_t dfifo_corr_ecc_err	: 2;
+		uint32_t dfifo_uncorr_ecc_err	: 2;
+		uint32_t dfifo_missed_eop	: 1;
+		uint32_t dfifo_missed_sop	: 1;
+#else
+#error	one of _BIT_FIELDS_HTOL or _BIT_FIELDS_LTOH must be defined
+#endif
+	} w0;
+
+#if !defined(_BIG_ENDIAN)
+		uint32_t	w1;
+#endif
+	} bits;
+} ipp_status_t;
+
+typedef	union _ipp_ecc_ctrl {
+	uint64_t value;
+
+	struct {
+#if defined(_BIG_ENDIAN)
+		uint32_t	w1;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+		uint32_t dis_dbl	: 1;
+		uint32_t res3		: 13;
+		uint32_t cor_dbl	: 1;
+		uint32_t cor_sng	: 1;
+		uint32_t rsvd		: 5;
+		uint32_t cor_all	: 1;
+		uint32_t res2		: 1;
+		uint32_t cor_1		: 1;
+		uint32_t res1		: 5;
+		uint32_t cor_lst	: 1;
+		uint32_t cor_snd	: 1;
+		uint32_t cor_fst	: 1;
+#elif defined(_BIT_FIELDS_LTOH)
+		uint32_t cor_fst	: 1;
+		uint32_t cor_snd	: 1;
+		uint32_t cor_lst	: 1;
+		uint32_t res1		: 5;
+		uint32_t cor_1		: 1;
+		uint32_t res2		: 1;
+		uint32_t cor_all	: 1;
+		uint32_t rsvd		: 5;
+		uint32_t cor_sng	: 1;
+		uint32_t cor_dbl	: 1;
+		uint32_t res3		: 13;
+		uint32_t dis_dbl	: 1;
+#else
+#error	one of _BIT_FIELDS_HTOL or _BIT_FIELDS_LTOH must be defined
+#endif
+	} w0;
+
+#if !defined(_BIG_ENDIAN)
+		uint32_t	w1;
+#endif
+	} bits;
+} ipp_ecc_ctrl_t;
+
+
+/* IPP Interrupt Mask Registers */
+
+#define	IPP_ECC_ERR_CNT_MAX_INTR_DIS		(1 << 7)
+#define	IPP_DFIFO_MISSING_EOP_SOP_INTR_DIS	(1 << 6)
+#define	IPP_DFIFO_ECC_UNCORR_ERR_INTR_DIS	(1 << 5)
+#define	IPP_PRE_FIFO_PERR_INTR_DIS		(1 << 4)
+#define	IPP_PRE_FIFO_OVERRUN_INTR_DIS		(1 << 3)
+#define	IPP_PRE_FIFO_UNDERRUN_INTR_DIS		(1 << 2)
+#define	IPP_BAD_TCPIP_CKSUM_CNT_INTR_DIS	(1 << 1)
+#define	IPP_PKT_DISCARD_CNT_INTR_DIS		(1 << 0)
+
+#define	IPP_RESET_WAIT				10
+
+/* DFIFO RD/WR pointers mask */
+
+#define	IPP_XMAC_DFIFO_PTR_MASK			0x7FF
+#define	IPP_BMAC_DFIFO_PTR_MASK			0x3FF
+
+#define	IPP_ECC_CNT_MASK			0xFF
+#define	IPP_BAD_CS_CNT_MASK			0x3FFF
+#define	IPP_PKT_DIS_CNT_MASK			0x3FFF
+
+#ifdef	__cplusplus
+}
+#endif
+
+#endif	/* _SYS_NXGE_NXGE_IPP_HW_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/uts/common/sys/nxge/nxge_mac.h	Mon Mar 19 19:37:22 2007 -0700
@@ -0,0 +1,245 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef	_SYS_NXGE_NXGE_MAC_H
+#define	_SYS_NXGE_NXGE_MAC_H
+
+#pragma ident	"%Z%%M%	%I%	%E% SMI"
+
+#ifdef	__cplusplus
+extern "C" {
+#endif
+
+#include <nxge_mac_hw.h>
+#include <npi_mac.h>
+
+#define	NXGE_MTU_DEFAULT_MAX	1522	/* 0x5f2 */
+
+#define	NXGE_XMAC_TX_INTRS	(ICFG_XMAC_TX_ALL & \
+					~(ICFG_XMAC_TX_FRAME_XMIT |\
+					ICFG_XMAC_TX_BYTE_CNT_EXP |\
+					ICFG_XMAC_TX_FRAME_CNT_EXP))
+#define	NXGE_XMAC_RX_INTRS	(ICFG_XMAC_RX_ALL & \
+					~(ICFG_XMAC_RX_FRAME_RCVD |\
+					ICFG_XMAC_RX_OCT_CNT_EXP |\
+					ICFG_XMAC_RX_HST_CNT1_EXP |\
+					ICFG_XMAC_RX_HST_CNT2_EXP |\
+					ICFG_XMAC_RX_HST_CNT3_EXP |\
+					ICFG_XMAC_RX_HST_CNT4_EXP |\
+					ICFG_XMAC_RX_HST_CNT5_EXP |\
+					ICFG_XMAC_RX_HST_CNT6_EXP |\
+					ICFG_XMAC_RX_BCAST_CNT_EXP |\
+					ICFG_XMAC_RX_MCAST_CNT_EXP |\
+					ICFG_XMAC_RX_HST_CNT7_EXP))
+#define	NXGE_BMAC_TX_INTRS	(ICFG_BMAC_TX_ALL & \
+					~(ICFG_BMAC_TX_FRAME_SENT |\
+					ICFG_BMAC_TX_BYTE_CNT_EXP |\
+					ICFG_BMAC_TX_FRAME_CNT_EXP))
+#define	NXGE_BMAC_RX_INTRS	(ICFG_BMAC_RX_ALL & \
+					~(ICFG_BMAC_RX_FRAME_RCVD |\
+					ICFG_BMAC_RX_FRAME_CNT_EXP |\
+					ICFG_BMAC_RX_BYTE_CNT_EXP))
+
+typedef enum  {
+	LINK_NO_CHANGE,
+	LINK_IS_UP,
+	LINK_IS_DOWN
+} nxge_link_state_t;
+
+/* Common MAC statistics */
+
+typedef	struct _nxge_mac_stats {
+	/*
+	 * MTU size
+	 */
+	uint32_t	mac_mtu;
+	uint16_t	rev_id;
+
+	/*
+	 * Transciever state informations.
+	 */
+	uint32_t	xcvr_inits;
+	xcvr_inuse_t	xcvr_inuse;
+	uint32_t	xcvr_portn;
+	uint32_t	xcvr_id;
+	uint32_t	serdes_inits;
+	uint32_t	serdes_portn;
+	uint32_t	cap_autoneg;
+	uint32_t	cap_10gfdx;
+	uint32_t	cap_10ghdx;
+	uint32_t	cap_1000fdx;
+	uint32_t	cap_1000hdx;
+	uint32_t	cap_100T4;
+	uint32_t	cap_100fdx;
+	uint32_t	cap_100hdx;
+	uint32_t	cap_10fdx;
+	uint32_t	cap_10hdx;
+	uint32_t	cap_asmpause;
+	uint32_t	cap_pause;
+
+	/*
+	 * Advertised capabilities.
+	 */
+	uint32_t	adv_cap_autoneg;
+	uint32_t	adv_cap_10gfdx;
+	uint32_t	adv_cap_10ghdx;
+	uint32_t	adv_cap_1000fdx;
+	uint32_t	adv_cap_1000hdx;
+	uint32_t	adv_cap_100T4;
+	uint32_t	adv_cap_100fdx;
+	uint32_t	adv_cap_100hdx;
+	uint32_t	adv_cap_10fdx;
+	uint32_t	adv_cap_10hdx;
+	uint32_t	adv_cap_asmpause;
+	uint32_t	adv_cap_pause;
+
+	/*
+	 * Link partner capabilities.
+	 */
+	uint32_t	lp_cap_autoneg;
+	uint32_t	lp_cap_10gfdx;
+	uint32_t	lp_cap_10ghdx;
+	uint32_t	lp_cap_1000fdx;
+	uint32_t	lp_cap_1000hdx;
+	uint32_t	lp_cap_100T4;
+	uint32_t	lp_cap_100fdx;
+	uint32_t	lp_cap_100hdx;
+	uint32_t	lp_cap_10fdx;
+	uint32_t	lp_cap_10hdx;
+	uint32_t	lp_cap_asmpause;
+	uint32_t	lp_cap_pause;
+
+	/*
+	 * Physical link statistics.
+	 */
+	uint32_t	link_T4;
+	uint32_t	link_speed;
+	uint32_t	link_duplex;
+	uint32_t	link_asmpause;
+	uint32_t	link_pause;
+	uint32_t	link_up;
+
+	/* Promiscous mode */
+	boolean_t	promisc;
+} nxge_mac_stats_t;
+
+/* XMAC Statistics */
+
+typedef	struct _nxge_xmac_stats {
+	uint32_t tx_frame_cnt;
+	uint32_t tx_underflow_err;
+	uint32_t tx_maxpktsize_err;
+	uint32_t tx_overflow_err;
+	uint32_t tx_fifo_xfr_err;
+	uint64_t tx_byte_cnt;
+	uint32_t rx_frame_cnt;
+	uint32_t rx_underflow_err;
+	uint32_t rx_overflow_err;
+	uint32_t rx_crc_err_cnt;
+	uint32_t rx_len_err_cnt;
+	uint32_t rx_viol_err_cnt;
+	uint64_t rx_byte_cnt;
+	uint64_t rx_hist1_cnt;
+	uint64_t rx_hist2_cnt;
+	uint64_t rx_hist3_cnt;
+	uint64_t rx_hist4_cnt;
+	uint64_t rx_hist5_cnt;
+	uint64_t rx_hist6_cnt;
+	uint64_t rx_hist7_cnt;
+	uint64_t rx_broadcast_cnt;
+	uint64_t rx_mult_cnt;
+	uint32_t rx_frag_cnt;
+	uint32_t rx_frame_align_err_cnt;
+	uint32_t rx_linkfault_err_cnt;
+	uint32_t rx_remotefault_err;
+	uint32_t rx_localfault_err;
+	uint32_t rx_pause_cnt;
+	uint32_t tx_pause_state;
+	uint32_t tx_nopause_state;
+	uint32_t xpcs_deskew_err_cnt;
+	uint32_t xpcs_ln0_symbol_err_cnt;
+	uint32_t xpcs_ln1_symbol_err_cnt;
+	uint32_t xpcs_ln2_symbol_err_cnt;
+	uint32_t xpcs_ln3_symbol_err_cnt;
+} nxge_xmac_stats_t, *p_nxge_xmac_stats_t;
+
+/* BMAC Statistics */
+
+typedef	struct _nxge_bmac_stats {
+	uint64_t tx_frame_cnt;
+	uint32_t tx_underrun_err;
+	uint32_t tx_max_pkt_err;
+	uint64_t tx_byte_cnt;
+	uint64_t rx_frame_cnt;
+	uint64_t rx_byte_cnt;
+	uint32_t rx_overflow_err;
+	uint32_t rx_align_err_cnt;
+	uint32_t rx_crc_err_cnt;
+	uint32_t rx_len_err_cnt;
+	uint32_t rx_viol_err_cnt;
+	uint32_t rx_pause_cnt;
+	uint32_t tx_pause_state;
+	uint32_t tx_nopause_state;
+} nxge_bmac_stats_t, *p_nxge_bmac_stats_t;
+
+typedef struct _hash_filter_t {
+	uint_t hash_ref_cnt;
+	uint16_t hash_filter_regs[NMCFILTER_REGS];
+	uint32_t hash_bit_ref_cnt[NMCFILTER_BITS];
+} hash_filter_t, *p_hash_filter_t;
+
+typedef	struct _nxge_mac {
+	uint8_t			portnum;
+	nxge_port_t		porttype;
+	nxge_port_mode_t	portmode;
+	nxge_linkchk_mode_t	linkchkmode;
+	boolean_t		is_jumbo;
+	uint32_t		tx_config;
+	uint32_t		rx_config;
+	uint32_t		xif_config;
+	uint32_t		tx_iconfig;
+	uint32_t		rx_iconfig;
+	uint32_t		ctl_iconfig;
+	uint16_t		minframesize;
+	uint16_t		maxframesize;
+	uint16_t		maxburstsize;
+	uint16_t		ctrltype;
+	uint16_t		pa_size;
+	uint8_t			ipg[3];
+	struct ether_addr	mac_addr;
+	struct ether_addr	alt_mac_addr[MAC_MAX_ALT_ADDR_ENTRY];
+	struct ether_addr	mac_addr_filter;
+	uint16_t		hashtab[MAC_MAX_HASH_ENTRY];
+	hostinfo_t		hostinfo[MAC_MAX_HOST_INFO_ENTRY];
+	nxge_mac_stats_t	*mac_stats;
+	nxge_xmac_stats_t	*xmac_stats;
+	nxge_bmac_stats_t	*bmac_stats;
+} nxge_mac_t;
+
+#ifdef	__cplusplus
+}
+#endif
+
+#endif	/* _SYS_NXGE_NXGE_MAC_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/uts/common/sys/nxge/nxge_mac_hw.h	Mon Mar 19 19:37:22 2007 -0700
@@ -0,0 +1,2410 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef	_SYS_MAC_NXGE_MAC_HW_H
+#define	_SYS_MAC_NXGE_MAC_HW_H
+
+#pragma ident	"%Z%%M%	%I%	%E% SMI"
+
+#ifdef	__cplusplus
+extern "C" {
+#endif
+
+#include <nxge_defs.h>
+
+/* -------------------------- From May's template --------------------------- */
+
+#define	NXGE_1GETHERMIN			255
+#define	NXGE_ETHERMIN			97
+#define	NXGE_MAX_HEADER			250
+
+/* Hardware reset */
+typedef enum  {
+	NXGE_TX_DISABLE,			/* Disable Tx side */
+	NXGE_RX_DISABLE,			/* Disable Rx side */
+	NXGE_CHIP_RESET				/* Full chip reset */
+} nxge_reset_t;
+
+#define	NXGE_DELAY_AFTER_TXRX		10000	/* 10ms after idling rx/tx */
+#define	NXGE_DELAY_AFTER_RESET		1000	/* 1ms after the reset */
+#define	NXGE_DELAY_AFTER_EE_RESET	10000	/* 10ms after EEPROM reset */
+#define	NXGE_DELAY_AFTER_LINK_RESET	13	/* 13 Us after link reset */
+#define	NXGE_LINK_RESETS		8	/* Max PHY resets to wait for */
+						/* linkup */
+
+#define	FILTER_M_CTL 			0xDCEF1
+#define	HASH_BITS			8
+#define	NMCFILTER_BITS			(1 << HASH_BITS)
+#define	HASH_REG_WIDTH			16
+#define	BROADCAST_HASH_WORD		0x0f
+#define	BROADCAST_HASH_BIT		0x8000
+#define	NMCFILTER_REGS			NMCFILTER_BITS / HASH_REG_WIDTH
+					/* Number of multicast filter regs */
+
+/* -------------------------------------------------------------------------- */
+
+#define	XMAC_PORT_0			0
+#define	XMAC_PORT_1			1
+#define	BMAC_PORT_0			2
+#define	BMAC_PORT_1			3
+
+#define	MAC_RESET_WAIT			10	/* usecs */
+
+#define	MAC_ADDR_REG_MASK		0xFFFF
+
+/* Network Modes */
+
+typedef enum nxge_network_mode {
+	NET_2_10GE_FIBER = 1,
+	NET_2_10GE_COPPER,
+	NET_1_10GE_FIBER_3_1GE_COPPER,
+	NET_1_10GE_COPPER_3_1GE_COPPER,
+	NET_1_10GE_FIBER_3_1GE_FIBER,
+	NET_1_10GE_COPPER_3_1GE_FIBER,
+	NET_2_1GE_FIBER_2_1GE_COPPER,
+	NET_QGE_FIBER,
+	NET_QGE_COPPER
+} nxge_network_mode_t;
+
+typedef	enum nxge_port {
+	PORT_TYPE_XMAC = 1,
+	PORT_TYPE_BMAC
+} nxge_port_t;
+
+typedef	enum nxge_port_mode {
+	PORT_1G_COPPER = 1,
+	PORT_1G_FIBER,
+	PORT_10G_COPPER,
+	PORT_10G_FIBER
+} nxge_port_mode_t;
+
+typedef	enum nxge_linkchk_mode {
+	LINKCHK_INTR = 1,
+	LINKCHK_TIMER
+} nxge_linkchk_mode_t;
+
+typedef enum {
+	LINK_INTR_STOP,
+	LINK_INTR_START
+} link_intr_enable_t, *link_intr_enable_pt;
+
+typedef	enum {
+	LINK_MONITOR_STOP,
+	LINK_MONITOR_START
+} link_mon_enable_t, *link_mon_enable_pt;
+
+typedef enum {
+	NO_XCVR,
+	INT_MII_XCVR,
+	EXT_MII_XCVR,
+	PCS_XCVR,
+	XPCS_XCVR
+} xcvr_inuse_t;
+
+/* macros for port offset calculations */
+
+#define	PORT_1_OFFSET			0x6000
+#define	PORT_GT_1_OFFSET		0x4000
+
+/* XMAC address macros */
+
+#define	XMAC_ADDR_OFFSET_0		0
+#define	XMAC_ADDR_OFFSET_1		0x6000
+
+#define	XMAC_ADDR_OFFSET(port_num)\
+	(XMAC_ADDR_OFFSET_0 + ((port_num) * PORT_1_OFFSET))
+
+#define	XMAC_REG_ADDR(port_num, reg)\
+	(FZC_MAC + (XMAC_ADDR_OFFSET(port_num)) + (reg))
+
+#define	XMAC_PORT_ADDR(port_num)\
+	(FZC_MAC + XMAC_ADDR_OFFSET(port_num))
+
+/* BMAC address macros */
+
+#define	BMAC_ADDR_OFFSET_2		0x0C000
+#define	BMAC_ADDR_OFFSET_3		0x10000
+
+#define	BMAC_ADDR_OFFSET(port_num)\
+	(BMAC_ADDR_OFFSET_2 + (((port_num) - 2) * PORT_GT_1_OFFSET))
+
+#define	BMAC_REG_ADDR(port_num, reg)\
+	(FZC_MAC + (BMAC_ADDR_OFFSET(port_num)) + (reg))
+
+#define	BMAC_PORT_ADDR(port_num)\
+	(FZC_MAC + BMAC_ADDR_OFFSET(port_num))
+
+/* PCS address macros */
+
+#define	PCS_ADDR_OFFSET_0		0x04000
+#define	PCS_ADDR_OFFSET_1		0x0A000
+#define	PCS_ADDR_OFFSET_2		0x0E000
+#define	PCS_ADDR_OFFSET_3		0x12000
+
+#define	PCS_ADDR_OFFSET(port_num)\
+	((port_num <= 1) ? \
+	(PCS_ADDR_OFFSET_0 + (port_num) * PORT_1_OFFSET) : \
+	(PCS_ADDR_OFFSET_2 + (((port_num) - 2) * PORT_GT_1_OFFSET)))
+
+#define	PCS_REG_ADDR(port_num, reg)\
+	(FZC_MAC + (PCS_ADDR_OFFSET((port_num)) + (reg)))
+
+#define	PCS_PORT_ADDR(port_num)\
+	(FZC_MAC + (PCS_ADDR_OFFSET(port_num)))
+
+/* XPCS address macros */
+
+#define	XPCS_ADDR_OFFSET_0		0x02000
+#define	XPCS_ADDR_OFFSET_1		0x08000
+#define	XPCS_ADDR_OFFSET(port_num)\
+	(XPCS_ADDR_OFFSET_0 + ((port_num) * PORT_1_OFFSET))
+
+#define	XPCS_ADDR(port_num, reg)\
+	(FZC_MAC + (XPCS_ADDR_OFFSET((port_num)) + (reg)))
+
+#define	XPCS_PORT_ADDR(port_num)\
+	(FZC_MAC + (XPCS_ADDR_OFFSET(port_num)))
+
+/* ESR address macro */
+#define	ESR_ADDR_OFFSET		0x14000
+#define	ESR_ADDR(reg)\
+	(FZC_MAC + (ESR_ADDR_OFFSET) + (reg))
+
+/* MIF address macros */
+#define	MIF_ADDR_OFFSET		0x16000
+#define	MIF_ADDR(reg)\
+	(FZC_MAC + (MIF_ADDR_OFFSET) + (reg))
+
+/* BMAC registers offset */
+#define	BTXMAC_SW_RST_REG		0x000	/* TX MAC software reset */
+#define	BRXMAC_SW_RST_REG		0x008	/* RX MAC software reset */
+#define	MAC_SEND_PAUSE_REG		0x010	/* send pause command */
+#define	BTXMAC_STATUS_REG		0x020	/* TX MAC status */
+#define	BRXMAC_STATUS_REG		0x028	/* RX MAC status */
+#define	BMAC_CTRL_STAT_REG		0x030	/* MAC control status */
+#define	BTXMAC_STAT_MSK_REG		0x040	/* TX MAC mask */
+#define	BRXMAC_STAT_MSK_REG		0x048	/* RX MAC mask */
+#define	BMAC_C_S_MSK_REG		0x050	/* MAC control mask */
+#define	TXMAC_CONFIG_REG		0x060	/* TX MAC config */
+/* cfg register bitmap */
+
+typedef union _btxmac_config_t {
+	uint64_t value;
+
+	struct {
+#if defined(_BIG_ENDIAN)
+		uint32_t msw;	/* Most significant word */
+		uint32_t lsw;	/* Least significant word */
+#elif defined(_LITTLE_ENDIAN)
+		uint32_t lsw;	/* Least significant word */
+		uint32_t msw;	/* Most significant word */
+#endif
+	} val;
+	struct {
+#if defined(_BIG_ENDIAN)
+		uint32_t	w1;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t rsrvd	: 22;
+			uint32_t hdx_ctrl2	: 1;
+			uint32_t no_fcs	: 1;
+			uint32_t hdx_ctrl	: 7;
+			uint32_t txmac_enable	: 1;
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t txmac_enable	: 1;
+			uint32_t hdx_ctrl	: 7;
+			uint32_t no_fcs	: 1;
+			uint32_t hdx_ctrl2	: 1;
+			uint32_t rsrvd	: 22;
+#endif
+		} w0;
+
+#if defined(_LITTLE_ENDIAN)
+		uint32_t	w1;
+#endif
+	} bits;
+} btxmac_config_t, *p_btxmac_config_t;
+
+#define	RXMAC_CONFIG_REG		0x068	/* RX MAC config */
+
+typedef union _brxmac_config_t {
+	uint64_t value;
+
+	struct {
+#if defined(_BIG_ENDIAN)
+		uint32_t msw;	/* Most significant word */
+		uint32_t lsw;	/* Least significant word */
+#elif defined(_LITTLE_ENDIAN)
+		uint32_t lsw;	/* Least significant word */
+		uint32_t msw;	/* Most significant word */
+#endif
+	} val;
+	struct {
+#if defined(_BIG_ENDIAN)
+		uint32_t	w1;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t rsrvd	: 20;
+			uint32_t mac_reg_sw_test : 2;
+			uint32_t mac2ipp_pkt_cnt_en : 1;
+			uint32_t rx_crs_extend_en : 1;
+			uint32_t error_chk_dis	: 1;
+			uint32_t addr_filter_en	: 1;
+			uint32_t hash_filter_en	: 1;
+			uint32_t promiscuous_group	: 1;
+			uint32_t promiscuous	: 1;
+			uint32_t strip_fcs	: 1;
+			uint32_t strip_pad	: 1;
+			uint32_t rxmac_enable	: 1;
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t rxmac_enable	: 1;
+			uint32_t strip_pad	: 1;
+			uint32_t strip_fcs	: 1;
+			uint32_t promiscuous	: 1;
+			uint32_t promiscuous_group	: 1;
+			uint32_t hash_filter_en	: 1;
+			uint32_t addr_filter_en	: 1;
+			uint32_t error_chk_dis	: 1;
+			uint32_t rx_crs_extend_en : 1;
+			uint32_t mac2ipp_pkt_cnt_en : 1;
+			uint32_t mac_reg_sw_test : 2;
+			uint32_t rsrvd	: 20;
+#endif
+		} w0;
+
+#if defined(_LITTLE_ENDIAN)
+		uint32_t	w1;
+#endif
+	} bits;
+} brxmac_config_t, *p_brxmac_config_t;
+
+#define	MAC_CTRL_CONFIG_REG		0x070	/* MAC control config */
+#define	MAC_XIF_CONFIG_REG		0x078	/* XIF config */
+
+typedef union _bxif_config_t {
+	uint64_t value;
+
+	struct {
+#if defined(_BIG_ENDIAN)
+		uint32_t msw;	/* Most significant word */
+		uint32_t lsw;	/* Least significant word */
+#elif defined(_LITTLE_ENDIAN)
+		uint32_t lsw;	/* Least significant word */
+		uint32_t msw;	/* Most significant word */
+#endif
+	} val;
+	struct {
+#if defined(_BIG_ENDIAN)
+		uint32_t	w1;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t rsrvd2		: 24;
+			uint32_t sel_clk_25mhz	: 1;
+			uint32_t led_polarity	: 1;
+			uint32_t force_led_on	: 1;
+			uint32_t used		: 1;
+			uint32_t gmii_mode	: 1;
+			uint32_t rsrvd		: 1;
+			uint32_t loopback	: 1;
+			uint32_t tx_output_en	: 1;
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t tx_output_en	: 1;
+			uint32_t loopback	: 1;
+			uint32_t rsrvd		: 1;
+			uint32_t gmii_mode	: 1;
+			uint32_t used		: 1;
+			uint32_t force_led_on	: 1;
+			uint32_t led_polarity	: 1;
+			uint32_t sel_clk_25mhz	: 1;
+			uint32_t rsrvd2		: 24;
+#endif
+		} w0;
+
+#if defined(_LITTLE_ENDIAN)
+		uint32_t	w1;
+#endif
+	} bits;
+} bxif_config_t, *p_bxif_config_t;
+
+#define	BMAC_MIN_REG			0x0a0	/* min frame size */
+#define	BMAC_MAX_REG			0x0a8	/* max frame size reg */
+#define	MAC_PA_SIZE_REG			0x0b0	/* num of preamble bytes */
+#define	MAC_CTRL_TYPE_REG		0x0c8	/* type field of MAC ctrl */
+#define	BMAC_ADDR0_REG			0x100	/* MAC unique ad0 reg (HI 0) */
+#define	BMAC_ADDR1_REG			0x108	/* MAC unique ad1 reg */
+#define	BMAC_ADDR2_REG			0x110	/* MAC unique ad2 reg */
+#define	BMAC_ADDR3_REG			0x118	/* MAC alt ad0 reg (HI 1) */
+#define	BMAC_ADDR4_REG			0x120	/* MAC alt ad0 reg */
+#define	BMAC_ADDR5_REG			0x128	/* MAC alt ad0 reg */
+#define	BMAC_ADDR6_REG			0x130	/* MAC alt ad1 reg (HI 2) */
+#define	BMAC_ADDR7_REG			0x138	/* MAC alt ad1 reg */
+#define	BMAC_ADDR8_REG			0x140	/* MAC alt ad1 reg */
+#define	BMAC_ADDR9_REG			0x148	/* MAC alt ad2 reg (HI 3) */
+#define	BMAC_ADDR10_REG			0x150	/* MAC alt ad2 reg */
+#define	BMAC_ADDR11_REG			0x158	/* MAC alt ad2 reg */
+#define	BMAC_ADDR12_REG			0x160	/* MAC alt ad3 reg (HI 4) */
+#define	BMAC_ADDR13_REG			0x168	/* MAC alt ad3 reg */
+#define	BMAC_ADDR14_REG			0x170	/* MAC alt ad3 reg */
+#define	BMAC_ADDR15_REG			0x178	/* MAC alt ad4 reg (HI 5) */
+#define	BMAC_ADDR16_REG			0x180	/* MAC alt ad4 reg */
+#define	BMAC_ADDR17_REG			0x188	/* MAC alt ad4 reg */
+#define	BMAC_ADDR18_REG			0x190	/* MAC alt ad5 reg (HI 6) */
+#define	BMAC_ADDR19_REG			0x198	/* MAC alt ad5 reg */
+#define	BMAC_ADDR20_REG			0x1a0	/* MAC alt ad5 reg */
+#define	BMAC_ADDR21_REG			0x1a8	/* MAC alt ad6 reg (HI 7) */
+#define	BMAC_ADDR22_REG			0x1b0	/* MAC alt ad6 reg */
+#define	BMAC_ADDR23_REG			0x1b8	/* MAC alt ad6 reg */
+#define	MAC_FC_ADDR0_REG		0x268	/* FC frame addr0 (HI 0, p3) */
+#define	MAC_FC_ADDR1_REG		0x270	/* FC frame addr1 */
+#define	MAC_FC_ADDR2_REG		0x278	/* FC frame addr2 */
+#define	MAC_ADDR_FILT0_REG		0x298	/* bits [47:32] (HI 0, p2) */
+#define	MAC_ADDR_FILT1_REG		0x2a0	/* bits [31:16] */
+#define	MAC_ADDR_FILT2_REG		0x2a8	/* bits [15:0]  */
+#define	MAC_ADDR_FILT12_MASK_REG 	0x2b0	/* addr filter 2 & 1 mask */
+#define	MAC_ADDR_FILT00_MASK_REG	0x2b8	/* addr filter 0 mask */
+#define	MAC_HASH_TBL0_REG		0x2c0	/* hash table 0 reg */
+#define	MAC_HASH_TBL1_REG		0x2c8	/* hash table 1 reg */
+#define	MAC_HASH_TBL2_REG		0x2d0	/* hash table 2 reg */
+#define	MAC_HASH_TBL3_REG		0x2d8	/* hash table 3 reg */
+#define	MAC_HASH_TBL4_REG		0x2e0	/* hash table 4 reg */
+#define	MAC_HASH_TBL5_REG		0x2e8	/* hash table 5 reg */
+#define	MAC_HASH_TBL6_REG		0x2f0	/* hash table 6 reg */
+#define	MAC_HASH_TBL7_REG		0x2f8	/* hash table 7 reg */
+#define	MAC_HASH_TBL8_REG		0x300	/* hash table 8 reg */
+#define	MAC_HASH_TBL9_REG		0x308	/* hash table 9 reg */
+#define	MAC_HASH_TBL10_REG		0x310	/* hash table 10 reg */
+#define	MAC_HASH_TBL11_REG		0x318	/* hash table 11 reg */
+#define	MAC_HASH_TBL12_REG		0x320	/* hash table 12 reg */
+#define	MAC_HASH_TBL13_REG		0x328	/* hash table 13 reg */
+#define	MAC_HASH_TBL14_REG		0x330	/* hash table 14 reg */
+#define	MAC_HASH_TBL15_REG		0x338	/* hash table 15 reg */
+#define	RXMAC_FRM_CNT_REG		0x370	/* receive frame counter */
+#define	MAC_LEN_ER_CNT_REG		0x378	/* length error counter */
+#define	BMAC_AL_ER_CNT_REG		0x380	/* alignment error counter */
+#define	BMAC_CRC_ER_CNT_REG		0x388	/* FCS error counter */
+#define	BMAC_CD_VIO_CNT_REG		0x390	/* RX code violation err */
+#define	BMAC_SM_REG			0x3a0	/* (ro) state machine reg */
+#define	BMAC_ALTAD_CMPEN_REG		0x3f8	/* Alt addr compare enable */
+#define	BMAC_HOST_INF0_REG		0x400	/* Host info */
+						/* (own da, add filter, fc) */
+#define	BMAC_HOST_INF1_REG		0x408	/* Host info (alt ad 0) */
+#define	BMAC_HOST_INF2_REG		0x410	/* Host info (alt ad 1) */
+#define	BMAC_HOST_INF3_REG		0x418	/* Host info (alt ad 2) */
+#define	BMAC_HOST_INF4_REG		0x420	/* Host info (alt ad 3) */
+#define	BMAC_HOST_INF5_REG		0x428	/* Host info (alt ad 4) */
+#define	BMAC_HOST_INF6_REG		0x430	/* Host info (alt ad 5) */
+#define	BMAC_HOST_INF7_REG		0x438	/* Host info (alt ad 6) */
+#define	BMAC_HOST_INF8_REG		0x440	/* Host info (hash hit, miss) */
+#define	BTXMAC_BYTE_CNT_REG		0x448	/* Tx byte count */
+#define	BTXMAC_FRM_CNT_REG		0x450	/* frame count */
+#define	BRXMAC_BYTE_CNT_REG		0x458	/* Rx byte count */
+/* x ranges from 0 to 6 (BMAC_MAX_ALT_ADDR_ENTRY - 1) */
+#define	BMAC_ALT_ADDR0N_REG_ADDR(x)	(BMAC_ADDR3_REG + (x) * 24)
+#define	BMAC_ALT_ADDR1N_REG_ADDR(x)	(BMAC_ADDR3_REG + 8 + (x) * 24)
+#define	BMAC_ALT_ADDR2N_REG_ADDR(x)	(BMAC_ADDR3_REG + 0x10 + (x) * 24)
+#define	BMAC_HASH_TBLN_REG_ADDR(x)	(MAC_HASH_TBL0_REG + (x) * 8)
+#define	BMAC_HOST_INFN_REG_ADDR(x)	(BMAC_HOST_INF0_REG + (x) * 8)
+
+/* XMAC registers offset */
+#define	XTXMAC_SW_RST_REG		0x000	/* XTX MAC soft reset */
+#define	XRXMAC_SW_RST_REG		0x008	/* XRX MAC soft reset */
+#define	XTXMAC_STATUS_REG		0x020	/* XTX MAC status */
+#define	XRXMAC_STATUS_REG		0x028	/* XRX MAC status */
+#define	XMAC_CTRL_STAT_REG		0x030	/* Control / Status */
+#define	XTXMAC_STAT_MSK_REG		0x040	/* XTX MAC Status mask */
+#define	XRXMAC_STAT_MSK_REG		0x048	/* XRX MAC Status mask */
+#define	XMAC_C_S_MSK_REG		0x050	/* Control / Status mask */
+#define	XMAC_CONFIG_REG			0x060	/* Configuration */
+
+/* xmac config bit fields */
+typedef union _xmac_cfg_t {
+	uint64_t value;
+
+	struct {
+#if defined(_BIG_ENDIAN)
+		uint32_t msw;	/* Most significant word */
+		uint32_t lsw;	/* Least significant word */
+#elif defined(_LITTLE_ENDIAN)
+		uint32_t lsw;	/* Least significant word */
+		uint32_t msw;	/* Most significant word */
+#endif
+	} val;
+	struct {
+#if defined(_BIG_ENDIAN)
+		uint32_t	w1;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+		uint32_t sel_clk_25mhz : 1;
+		uint32_t pcs_bypass	: 1;
+		uint32_t xpcs_bypass	: 1;
+		uint32_t mii_gmii_mode	: 2;
+		uint32_t lfs_disable	: 1;
+		uint32_t loopback	: 1;
+		uint32_t tx_output_en	: 1;
+		uint32_t sel_por_clk_src : 1;
+		uint32_t led_polarity	: 1;
+		uint32_t force_led_on	: 1;
+		uint32_t pass_fctl_frames : 1;
+		uint32_t recv_pause_en	: 1;
+		uint32_t mac2ipp_pkt_cnt_en : 1;
+		uint32_t strip_crc	: 1;
+		uint32_t addr_filter_en	: 1;
+		uint32_t hash_filter_en	: 1;
+		uint32_t code_viol_chk_dis	: 1;
+		uint32_t reserved_mcast	: 1;
+		uint32_t rx_crc_chk_dis	: 1;
+		uint32_t error_chk_dis	: 1;
+		uint32_t promisc_grp	: 1;
+		uint32_t promiscuous	: 1;
+		uint32_t rx_mac_enable	: 1;
+		uint32_t warning_msg_en	: 1;
+		uint32_t used		: 3;
+		uint32_t always_no_crc	: 1;
+		uint32_t var_min_ipg_en	: 1;
+		uint32_t strech_mode	: 1;
+		uint32_t tx_enable	: 1;
+#elif defined(_BIT_FIELDS_LTOH)
+		uint32_t tx_enable	: 1;
+		uint32_t strech_mode	: 1;
+		uint32_t var_min_ipg_en	: 1;
+		uint32_t always_no_crc	: 1;
+		uint32_t used		: 3;
+		uint32_t warning_msg_en	: 1;
+		uint32_t rx_mac_enable	: 1;
+		uint32_t promiscuous	: 1;
+		uint32_t promisc_grp	: 1;
+		uint32_t error_chk_dis	: 1;
+		uint32_t rx_crc_chk_dis	: 1;
+		uint32_t reserved_mcast	: 1;
+		uint32_t code_viol_chk_dis	: 1;
+		uint32_t hash_filter_en	: 1;
+		uint32_t addr_filter_en	: 1;
+		uint32_t strip_crc	: 1;
+		uint32_t mac2ipp_pkt_cnt_en : 1;
+		uint32_t recv_pause_en	: 1;
+		uint32_t pass_fctl_frames : 1;
+		uint32_t force_led_on	: 1;
+		uint32_t led_polarity	: 1;
+		uint32_t sel_por_clk_src : 1;
+		uint32_t tx_output_en	: 1;
+		uint32_t loopback	: 1;
+		uint32_t lfs_disable	: 1;
+		uint32_t mii_gmii_mode	: 2;
+		uint32_t xpcs_bypass	: 1;
+		uint32_t pcs_bypass	: 1;
+		uint32_t sel_clk_25mhz : 1;
+#endif
+		} w0;
+
+#if defined(_LITTLE_ENDIAN)
+		uint32_t	w1;
+#endif
+	} bits;
+} xmac_cfg_t, *p_xmac_cfg_t;
+
+#define	XMAC_IPG_REG			0x080	/* Inter-Packet-Gap */
+#define	XMAC_MIN_REG			0x088	/* min frame size register */
+#define	XMAC_MAX_REG			0x090	/* max frame/burst size */
+#define	XMAC_ADDR0_REG			0x0a0	/* [47:32] of MAC addr (HI17) */
+#define	XMAC_ADDR1_REG			0x0a8	/* [31:16] of MAC addr */
+#define	XMAC_ADDR2_REG			0x0b0	/* [15:0] of MAC addr */
+#define	XRXMAC_BT_CNT_REG		0x100	/* bytes received / 8 */
+#define	XRXMAC_BC_FRM_CNT_REG		0x108	/* good BC frames received */
+#define	XRXMAC_MC_FRM_CNT_REG		0x110	/* good MC frames received */
+#define	XRXMAC_FRAG_CNT_REG		0x118	/* frag frames rejected */
+#define	XRXMAC_HIST_CNT1_REG		0x120	/* 64 bytes frames */
+#define	XRXMAC_HIST_CNT2_REG		0x128	/* 65-127 bytes frames */
+#define	XRXMAC_HIST_CNT3_REG		0x130	/* 128-255 bytes frames */
+#define	XRXMAC_HIST_CNT4_REG		0x138	/* 256-511 bytes frames */
+#define	XRXMAC_HIST_CNT5_REG		0x140	/* 512-1023 bytes frames */
+#define	XRXMAC_HIST_CNT6_REG		0x148	/* 1024-1522 bytes frames */
+#define	XRXMAC_MPSZER_CNT_REG		0x150	/* frames > maxframesize */
+#define	XRXMAC_CRC_ER_CNT_REG		0x158	/* frames failed CRC */
+#define	XRXMAC_CD_VIO_CNT_REG		0x160	/* frames with code vio */
+#define	XRXMAC_AL_ER_CNT_REG		0x168	/* frames with align error */
+#define	XTXMAC_FRM_CNT_REG		0x170	/* tx frames */
+#define	XTXMAC_BYTE_CNT_REG		0x178	/* tx bytes / 8 */
+#define	XMAC_LINK_FLT_CNT_REG		0x180	/* link faults */
+#define	XRXMAC_HIST_CNT7_REG		0x188	/* MAC2IPP/>1523 bytes frames */
+#define	XMAC_SM_REG			0x1a8	/* State machine */
+#define	XMAC_INTERN1_REG		0x1b0	/* internal signals for diag */
+#define	XMAC_INTERN2_REG		0x1b8	/* internal signals for diag */
+#define	XMAC_ADDR_CMPEN_REG		0x208	/* alt MAC addr check */
+#define	XMAC_ADDR3_REG			0x218	/* alt MAC addr 0 (HI 0) */
+#define	XMAC_ADDR4_REG			0x220	/* alt MAC addr 0 */
+#define	XMAC_ADDR5_REG			0x228	/* alt MAC addr 0 */
+#define	XMAC_ADDR6_REG			0x230	/* alt MAC addr 1 (HI 1) */
+#define	XMAC_ADDR7_REG			0x238	/* alt MAC addr 1 */
+#define	XMAC_ADDR8_REG			0x240	/* alt MAC addr 1 */
+#define	XMAC_ADDR9_REG			0x248	/* alt MAC addr 2 (HI 2) */
+#define	XMAC_ADDR10_REG			0x250	/* alt MAC addr 2 */
+#define	XMAC_ADDR11_REG			0x258	/* alt MAC addr 2 */
+#define	XMAC_ADDR12_REG			0x260	/* alt MAC addr 3 (HI 3) */
+#define	XMAC_ADDR13_REG			0x268	/* alt MAC addr 3 */
+#define	XMAC_ADDR14_REG			0x270	/* alt MAC addr 3 */
+#define	XMAC_ADDR15_REG			0x278	/* alt MAC addr 4 (HI 4) */
+#define	XMAC_ADDR16_REG			0x280	/* alt MAC addr 4 */
+#define	XMAC_ADDR17_REG			0x288	/* alt MAC addr 4 */
+#define	XMAC_ADDR18_REG			0x290	/* alt MAC addr 5 (HI 5) */
+#define	XMAC_ADDR19_REG			0x298	/* alt MAC addr 5 */
+#define	XMAC_ADDR20_REG			0x2a0	/* alt MAC addr 5 */
+#define	XMAC_ADDR21_REG			0x2a8	/* alt MAC addr 6 (HI 6) */
+#define	XMAC_ADDR22_REG			0x2b0	/* alt MAC addr 6 */
+#define	XMAC_ADDR23_REG			0x2b8	/* alt MAC addr 6 */
+#define	XMAC_ADDR24_REG			0x2c0	/* alt MAC addr 7 (HI 7) */
+#define	XMAC_ADDR25_REG			0x2c8	/* alt MAC addr 7 */
+#define	XMAC_ADDR26_REG			0x2d0	/* alt MAC addr 7 */
+#define	XMAC_ADDR27_REG			0x2d8	/* alt MAC addr 8 (HI 8) */
+#define	XMAC_ADDR28_REG			0x2e0	/* alt MAC addr 8 */
+#define	XMAC_ADDR29_REG			0x2e8	/* alt MAC addr 8 */
+#define	XMAC_ADDR30_REG			0x2f0	/* alt MAC addr 9 (HI 9) */
+#define	XMAC_ADDR31_REG			0x2f8	/* alt MAC addr 9 */
+#define	XMAC_ADDR32_REG			0x300	/* alt MAC addr 9 */
+#define	XMAC_ADDR33_REG			0x308	/* alt MAC addr 10 (HI 10) */
+#define	XMAC_ADDR34_REG			0x310	/* alt MAC addr 10 */
+#define	XMAC_ADDR35_REG			0x318	/* alt MAC addr 10 */
+#define	XMAC_ADDR36_REG			0x320	/* alt MAC addr 11 (HI 11) */
+#define	XMAC_ADDR37_REG			0x328	/* alt MAC addr 11 */
+#define	XMAC_ADDR38_REG			0x330	/* alt MAC addr 11 */
+#define	XMAC_ADDR39_REG			0x338	/* alt MAC addr 12 (HI 12) */
+#define	XMAC_ADDR40_REG			0x340	/* alt MAC addr 12 */
+#define	XMAC_ADDR41_REG			0x348	/* alt MAC addr 12 */
+#define	XMAC_ADDR42_REG			0x350	/* alt MAC addr 13 (HI 13) */
+#define	XMAC_ADDR43_REG			0x358	/* alt MAC addr 13 */
+#define	XMAC_ADDR44_REG			0x360	/* alt MAC addr 13 */
+#define	XMAC_ADDR45_REG			0x368	/* alt MAC addr 14 (HI 14) */
+#define	XMAC_ADDR46_REG			0x370	/* alt MAC addr 14 */
+#define	XMAC_ADDR47_REG			0x378	/* alt MAC addr 14 */
+#define	XMAC_ADDR48_REG			0x380	/* alt MAC addr 15 (HI 15) */
+#define	XMAC_ADDR49_REG			0x388	/* alt MAC addr 15 */
+#define	XMAC_ADDR50_REG			0x390	/* alt MAC addr 15 */
+#define	XMAC_ADDR_FILT0_REG		0x818	/* [47:32] addr filter (HI18) */
+#define	XMAC_ADDR_FILT1_REG		0x820	/* [31:16] of addr filter */
+#define	XMAC_ADDR_FILT2_REG		0x828	/* [15:0] of addr filter */
+#define	XMAC_ADDR_FILT12_MASK_REG 	0x830	/* addr filter 2 & 1 mask */
+#define	XMAC_ADDR_FILT0_MASK_REG	0x838	/* addr filter 0 mask */
+#define	XMAC_HASH_TBL0_REG		0x840	/* hash table 0 reg */
+#define	XMAC_HASH_TBL1_REG		0x848	/* hash table 1 reg */
+#define	XMAC_HASH_TBL2_REG		0x850	/* hash table 2 reg */
+#define	XMAC_HASH_TBL3_REG		0x858	/* hash table 3 reg */
+#define	XMAC_HASH_TBL4_REG		0x860	/* hash table 4 reg */
+#define	XMAC_HASH_TBL5_REG		0x868	/* hash table 5 reg */
+#define	XMAC_HASH_TBL6_REG		0x870	/* hash table 6 reg */
+#define	XMAC_HASH_TBL7_REG		0x878	/* hash table 7 reg */
+#define	XMAC_HASH_TBL8_REG		0x880	/* hash table 8 reg */
+#define	XMAC_HASH_TBL9_REG		0x888	/* hash table 9 reg */
+#define	XMAC_HASH_TBL10_REG		0x890	/* hash table 10 reg */
+#define	XMAC_HASH_TBL11_REG		0x898	/* hash table 11 reg */
+#define	XMAC_HASH_TBL12_REG		0x8a0	/* hash table 12 reg */
+#define	XMAC_HASH_TBL13_REG		0x8a8	/* hash table 13 reg */
+#define	XMAC_HASH_TBL14_REG		0x8b0	/* hash table 14 reg */
+#define	XMAC_HASH_TBL15_REG		0x8b8	/* hash table 15 reg */
+#define	XMAC_HOST_INF0_REG		0x900	/* Host info 0 (alt ad 0) */
+#define	XMAC_HOST_INF1_REG		0x908	/* Host info 1 (alt ad 1) */
+#define	XMAC_HOST_INF2_REG		0x910	/* Host info 2 (alt ad 2) */
+#define	XMAC_HOST_INF3_REG		0x918	/* Host info 3 (alt ad 3) */
+#define	XMAC_HOST_INF4_REG		0x920	/* Host info 4 (alt ad 4) */
+#define	XMAC_HOST_INF5_REG		0x928	/* Host info 5 (alt ad 5) */
+#define	XMAC_HOST_INF6_REG		0x930	/* Host info 6 (alt ad 6) */
+#define	XMAC_HOST_INF7_REG		0x938	/* Host info 7 (alt ad 7) */
+#define	XMAC_HOST_INF8_REG		0x940	/* Host info 8 (alt ad 8) */
+#define	XMAC_HOST_INF9_REG		0x948	/* Host info 9 (alt ad 9) */
+#define	XMAC_HOST_INF10_REG		0x950	/* Host info 10 (alt ad 10) */
+#define	XMAC_HOST_INF11_REG		0x958	/* Host info 11 (alt ad 11) */
+#define	XMAC_HOST_INF12_REG		0x960	/* Host info 12 (alt ad 12) */
+#define	XMAC_HOST_INF13_REG		0x968	/* Host info 13 (alt ad 13) */
+#define	XMAC_HOST_INF14_REG		0x970	/* Host info 14 (alt ad 14) */
+#define	XMAC_HOST_INF15_REG		0x978	/* Host info 15 (alt ad 15) */
+#define	XMAC_HOST_INF16_REG		0x980	/* Host info 16 (hash hit) */
+#define	XMAC_HOST_INF17_REG		0x988	/* Host info 17 (own da) */
+#define	XMAC_HOST_INF18_REG		0x990	/* Host info 18 (filter hit) */
+#define	XMAC_HOST_INF19_REG		0x998	/* Host info 19 (fc hit) */
+#define	XMAC_PA_DATA0_REG		0xb80	/* preamble [31:0] */
+#define	XMAC_PA_DATA1_REG		0xb88	/* preamble [63:32] */
+#define	XMAC_DEBUG_SEL_REG		0xb90	/* debug select */
+#define	XMAC_TRAINING_VECT_REG		0xb98	/* training vector */
+/* x ranges from 0 to 15 (XMAC_MAX_ALT_ADDR_ENTRY - 1) */
+#define	XMAC_ALT_ADDR0N_REG_ADDR(x)	(XMAC_ADDR3_REG + (x) * 24)
+#define	XMAC_ALT_ADDR1N_REG_ADDR(x)	(XMAC_ADDR3_REG + 8 + (x) * 24)
+#define	XMAC_ALT_ADDR2N_REG_ADDR(x)	(XMAC_ADDR3_REG + 16 + (x) * 24)
+#define	XMAC_HASH_TBLN_REG_ADDR(x)	(XMAC_HASH_TBL0_REG + (x) * 8)
+#define	XMAC_HOST_INFN_REG_ADDR(x)	(XMAC_HOST_INF0_REG + (x) * 8)
+
+/* MIF registers offset */
+#define	MIF_BB_MDC_REG			0	   /* MIF bit-bang clock */
+#define	MIF_BB_MDO_REG			0x008	   /* MIF bit-bang data */
+#define	MIF_BB_MDO_EN_REG		0x010	   /* MIF bit-bang output en */
+#define	MIF_OUTPUT_FRAME_REG		0x018	   /* MIF frame/output reg */
+#define	MIF_CONFIG_REG			0x020	   /* MIF config reg */
+#define	MIF_POLL_STATUS_REG		0x028	   /* MIF poll status reg */
+#define	MIF_POLL_MASK_REG		0x030	   /* MIF poll mask reg */
+#define	MIF_STATE_MACHINE_REG		0x038	   /* MIF state machine reg */
+#define	MIF_STATUS_REG			0x040	   /* MIF status reg */
+#define	MIF_MASK_REG			0x048	   /* MIF mask reg */
+
+
+/* PCS registers offset */
+#define	PCS_MII_CTRL_REG		0	   /* PCS MII control reg */
+#define	PCS_MII_STATUS_REG		0x008	   /* PCS MII status reg */
+#define	PCS_MII_ADVERT_REG		0x010	   /* PCS MII advertisement */
+#define	PCS_MII_LPA_REG			0x018	   /* link partner ability */
+#define	PCS_CONFIG_REG			0x020	   /* PCS config reg */
+#define	PCS_STATE_MACHINE_REG		0x028	   /* PCS state machine */
+#define	PCS_INTR_STATUS_REG		0x030	/* PCS interrupt status */
+#define	PCS_DATAPATH_MODE_REG		0x0a0	   /* datapath mode reg */
+#define	PCS_PACKET_COUNT_REG		0x0c0	   /* PCS packet counter */
+
+#define	XPCS_CTRL_1_REG			0	/* Control */
+#define	XPCS_STATUS_1_REG		0x008
+#define	XPCS_DEV_ID_REG			0x010	/* 32bits IEEE manufacture ID */
+#define	XPCS_SPEED_ABILITY_REG		0x018
+#define	XPCS_DEV_IN_PKG_REG		0x020
+#define	XPCS_CTRL_2_REG			0x028
+#define	XPCS_STATUS_2_REG		0x030
+#define	XPCS_PKG_ID_REG			0x038	/* Package ID */
+#define	XPCS_STATUS_REG			0x040
+#define	XPCS_TEST_CTRL_REG		0x048
+#define	XPCS_CFG_VENDOR_1_REG		0x050
+#define	XPCS_DIAG_VENDOR_2_REG		0x058
+#define	XPCS_MASK_1_REG			0x060
+#define	XPCS_PKT_CNTR_REG		0x068
+#define	XPCS_TX_STATE_MC_REG		0x070
+#define	XPCS_DESKEW_ERR_CNTR_REG	0x078
+#define	XPCS_SYM_ERR_CNTR_L0_L1_REG	0x080
+#define	XPCS_SYM_ERR_CNTR_L2_L3_REG	0x088
+#define	XPCS_TRAINING_VECTOR_REG	0x090
+
+/* ESR registers offset */
+#define	ESR_RESET_REG			0
+#define	ESR_CONFIG_REG			0x008
+#define	ESR_0_PLL_CONFIG_REG		0x010
+#define	ESR_0_CONTROL_REG		0x018
+#define	ESR_0_TEST_CONFIG_REG		0x020
+#define	ESR_1_PLL_CONFIG_REG		0x028
+#define	ESR_1_CONTROL_REG		0x030
+#define	ESR_1_TEST_CONFIG_REG		0x038
+#define	ESR_ENET_RGMII_CFG_REG		0x040
+#define	ESR_INTERNAL_SIGNALS_REG	0x800
+#define	ESR_DEBUG_SEL_REG		0x808
+
+
+/* Reset Register */
+#define	MAC_SEND_PAUSE_TIME_MASK	0x0000FFFF /* value of pause time */
+#define	MAC_SEND_PAUSE_SEND		0x00010000 /* send pause flow ctrl */
+
+/* Tx MAC Status Register */
+#define	MAC_TX_FRAME_XMIT		0x00000001 /* successful tx frame */
+#define	MAC_TX_UNDERRUN			0x00000002 /* starvation in xmit */
+#define	MAC_TX_MAX_PACKET_ERR		0x00000004 /* TX frame exceeds max */
+#define	MAC_TX_BYTE_CNT_EXP		0x00000400 /* TX byte cnt overflow */
+#define	MAC_TX_FRAME_CNT_EXP		0x00000800 /* Tx frame cnt overflow */
+
+/* Rx MAC Status Register */
+#define	MAC_RX_FRAME_RECV		0x00000001 /* successful rx frame */
+#define	MAC_RX_OVERFLOW			0x00000002 /* RX FIFO overflow */
+#define	MAC_RX_FRAME_COUNT		0x00000004 /* rx frame cnt rollover */
+#define	MAC_RX_ALIGN_ERR		0x00000008 /* alignment err rollover */
+#define	MAC_RX_CRC_ERR			0x00000010 /* crc error cnt rollover */
+#define	MAC_RX_LEN_ERR			0x00000020 /* length err cnt rollover */
+#define	MAC_RX_VIOL_ERR			0x00000040 /* code vio err rollover */
+#define	MAC_RX_BYTE_CNT_EXP		0x00000080 /* RX MAC byte rollover */
+
+/* MAC Control Status Register */
+#define	MAC_CTRL_PAUSE_RECEIVED		0x00000001 /* successful pause frame */
+#define	MAC_CTRL_PAUSE_STATE		0x00000002 /* notpause-->pause */
+#define	MAC_CTRL_NOPAUSE_STATE		0x00000004 /* pause-->notpause */
+#define	MAC_CTRL_PAUSE_TIME_MASK	0xFFFF0000 /* value of pause time */
+#define	MAC_CTRL_PAUSE_TIME_SHIFT	16
+
+/* Tx MAC Configuration Register */
+#define	MAC_TX_CFG_TXMAC_ENABLE		0x00000001 /* enable TX MAC. */
+#define	MAC_TX_CFG_NO_FCS		0x00000100 /* TX not generate CRC */
+
+/* Rx MAC Configuration Register */
+#define	MAC_RX_CFG_RXMAC_ENABLE		0x00000001 /* enable RX MAC */
+#define	MAC_RX_CFG_STRIP_PAD		0x00000002 /* not supported, set to 0 */
+#define	MAC_RX_CFG_STRIP_FCS		0x00000004 /* strip last 4bytes (CRC) */
+#define	MAC_RX_CFG_PROMISC		0x00000008 /* promisc mode enable */
+#define	MAC_RX_CFG_PROMISC_GROUP  	0x00000010 /* accept all MC frames */
+#define	MAC_RX_CFG_HASH_FILTER_EN	0x00000020 /* use hash table */
+#define	MAC_RX_CFG_ADDR_FILTER_EN    	0x00000040 /* use address filter */
+#define	MAC_RX_CFG_DISABLE_DISCARD	0x00000080 /* do not set abort bit */
+#define	MAC_RX_MAC2IPP_PKT_CNT_EN	0x00000200 /* rx pkt cnt -> BMAC-IPP */
+#define	MAC_RX_MAC_REG_RW_TEST_MASK	0x00000c00 /* BMAC reg RW test */
+#define	MAC_RX_MAC_REG_RW_TEST_SHIFT	10
+
+/* MAC Control Configuration Register */
+#define	MAC_CTRL_CFG_SEND_PAUSE_EN	0x00000001 /* send pause flow ctrl */
+#define	MAC_CTRL_CFG_RECV_PAUSE_EN	0x00000002 /* receive pause flow ctrl */
+#define	MAC_CTRL_CFG_PASS_CTRL		0x00000004 /* accept MAC ctrl pkts */
+
+/* MAC XIF Configuration Register */
+#define	MAC_XIF_TX_OUTPUT_EN		0x00000001 /* enable Tx output driver */
+#define	MAC_XIF_MII_INT_LOOPBACK	0x00000002 /* loopback GMII xmit data */
+#define	MAC_XIF_GMII_MODE		0x00000008 /* operates with GMII clks */
+#define	MAC_XIF_LINK_LED		0x00000020 /* LINKLED# active (low) */
+#define	MAC_XIF_LED_POLARITY		0x00000040 /* LED polarity */
+#define	MAC_XIF_SEL_CLK_25MHZ		0x00000080 /* Select 10/100Mbps */
+
+/* MAC IPG Registers */
+#define	BMAC_MIN_FRAME_MASK		0x3FF	   /* 10-bit reg */
+
+/* MAC Max Frame Size Register */
+#define	BMAC_MAX_BURST_MASK    		0x3FFF0000 /* max burst size [30:16] */
+#define	BMAC_MAX_BURST_SHIFT   		16
+#define	BMAC_MAX_FRAME_MASK    		0x00007FFF /* max frame size [14:0] */
+#define	BMAC_MAX_FRAME_SHIFT   		0
+
+/* MAC Preamble size register */
+#define	BMAC_PA_SIZE_MASK		0x000003FF
+	/* # of preable bytes TxMAC sends at the beginning of each frame */
+
+/*
+ * mac address registers:
+ *	register	contains			comparison
+ *	--------	--------			----------
+ *	0		16 MSB of primary MAC addr	[47:32] of DA field
+ *	1		16 middle bits ""		[31:16] of DA field
+ *	2		16 LSB ""			[15:0] of DA field
+ *	3*x		16MSB of alt MAC addr 1-7	[47:32] of DA field
+ *	4*x		16 middle bits ""		[31:16]
+ *	5*x		16 LSB ""			[15:0]
+ *	42		16 MSB of MAC CTRL addr		[47:32] of DA.
+ *	43		16 middle bits ""		[31:16]
+ *	44		16 LSB ""			[15:0]
+ *	MAC CTRL addr must be the reserved multicast addr for MAC CTRL frames.
+ *	if there is a match, MAC will set the bit for alternative address
+ *	filter pass [15]
+ *
+ *	here is the map of registers given MAC address notation: a:b:c:d:e:f
+ *			ab		cd		ef
+ *	primary addr	reg 2		reg 1		reg 0
+ *	alt addr 1	reg 5		reg 4		reg 3
+ *	alt addr x	reg 5*x		reg 4*x		reg 3*x
+ *	|		|		|		|
+ *	|		|		|		|
+ *	alt addr 7	reg 23		reg 22		reg 21
+ *	ctrl addr	reg 44		reg 43		reg 42
+ */
+
+#define	BMAC_ALT_ADDR_BASE		0x118
+#define	BMAC_MAX_ALT_ADDR_ENTRY		7	   /* 7 alternate MAC addr */
+#define	BMAC_MAX_ADDR_ENTRY		(BMAC_MAX_ALT_ADDR_ENTRY + 1)
+
+/* hash table registers */
+#define	MAC_MAX_HASH_ENTRY		16
+
+/* 27-bit register has the current state for key state machines in the MAC */
+#define	MAC_SM_RLM_MASK			0x07800000
+#define	MAC_SM_RLM_SHIFT		23
+#define	MAC_SM_RX_FC_MASK		0x00700000
+#define	MAC_SM_RX_FC_SHIFT		20
+#define	MAC_SM_TLM_MASK			0x000F0000
+#define	MAC_SM_TLM_SHIFT		16
+#define	MAC_SM_ENCAP_SM_MASK		0x0000F000
+#define	MAC_SM_ENCAP_SM_SHIFT		12
+#define	MAC_SM_TX_REQ_MASK		0x00000C00
+#define	MAC_SM_TX_REQ_SHIFT		10
+#define	MAC_SM_TX_FC_MASK		0x000003C0
+#define	MAC_SM_TX_FC_SHIFT		6
+#define	MAC_SM_FIFO_WRITE_SEL_MASK	0x00000038
+#define	MAC_SM_FIFO_WRITE_SEL_SHIFT	3
+#define	MAC_SM_TX_FIFO_EMPTY_MASK	0x00000007
+#define	MAC_SM_TX_FIFO_EMPTY_SHIFT	0
+
+#define	BMAC_ADDR0_CMPEN		0x00000001
+#define	BMAC_ADDRN_CMPEN(x)		(BMAC_ADDR0_CMP_EN << (x))
+
+/* MAC Host Info Table Registers */
+#define	BMAC_MAX_HOST_INFO_ENTRY	9 	/* 9 host entries */
+
+/*
+ * ********************* XMAC registers *********************************
+ */
+
+/* Reset Register */
+#define	XTXMAC_SOFT_RST			0x00000001 /* XTX MAC software reset */
+#define	XTXMAC_REG_RST			0x00000002 /* XTX MAC registers reset */
+#define	XRXMAC_SOFT_RST			0x00000001 /* XRX MAC software reset */
+#define	XRXMAC_REG_RST			0x00000002 /* XRX MAC registers reset */
+
+/* XTX MAC Status Register */
+#define	XMAC_TX_FRAME_XMIT		0x00000001 /* successful tx frame */
+#define	XMAC_TX_UNDERRUN		0x00000002 /* starvation in xmit */
+#define	XMAC_TX_MAX_PACKET_ERR		0x00000004 /* XTX frame exceeds max */
+#define	XMAC_TX_OVERFLOW		0x00000008 /* XTX byte cnt overflow */
+#define	XMAC_TX_FIFO_XFR_ERR		0x00000010 /* xtlm state mach error */
+#define	XMAC_TX_BYTE_CNT_EXP		0x00000400 /* XTX byte cnt overflow */
+#define	XMAC_TX_FRAME_CNT_EXP		0x00000800 /* XTX frame cnt overflow */
+
+/* XRX MAC Status Register */
+#define	XMAC_RX_FRAME_RCVD		0x00000001 /* successful rx frame */
+#define	XMAC_RX_OVERFLOW		0x00000002 /* RX FIFO overflow */
+#define	XMAC_RX_UNDERFLOW		0x00000004 /* RX FIFO underrun */
+#define	XMAC_RX_CRC_ERR_CNT_EXP		0x00000008 /* crc error cnt rollover */
+#define	XMAC_RX_LEN_ERR_CNT_EXP		0x00000010 /* length err cnt rollover */
+#define	XMAC_RX_VIOL_ERR_CNT_EXP	0x00000020 /* code vio err rollover */
+#define	XMAC_RX_OCT_CNT_EXP		0x00000040 /* XRX MAC byte rollover */
+#define	XMAC_RX_HST_CNT1_EXP		0x00000080 /* XRX MAC hist1 rollover */
+#define	XMAC_RX_HST_CNT2_EXP		0x00000100 /* XRX MAC hist2 rollover */
+#define	XMAC_RX_HST_CNT3_EXP		0x00000200 /* XRX MAC hist3 rollover */
+#define	XMAC_RX_HST_CNT4_EXP		0x00000400 /* XRX MAC hist4 rollover */
+#define	XMAC_RX_HST_CNT5_EXP		0x00000800 /* XRX MAC hist5 rollover */
+#define	XMAC_RX_HST_CNT6_EXP		0x00001000 /* XRX MAC hist6 rollover */
+#define	XMAC_RX_BCAST_CNT_EXP		0x00002000 /* XRX BC cnt rollover */
+#define	XMAC_RX_MCAST_CNT_EXP		0x00004000 /* XRX MC cnt rollover */
+#define	XMAC_RX_FRAG_CNT_EXP		0x00008000 /* fragment cnt rollover */
+#define	XMAC_RX_ALIGNERR_CNT_EXP	0x00010000 /* framealign err rollover */
+#define	XMAC_RX_LINK_FLT_CNT_EXP	0x00020000 /* link fault cnt rollover */
+#define	XMAC_RX_REMOTE_FLT_DET		0x00040000 /* Remote Fault detected */
+#define	XMAC_RX_LOCAL_FLT_DET		0x00080000 /* Local Fault detected */
+#define	XMAC_RX_HST_CNT7_EXP		0x00100000 /* XRX MAC hist7 rollover */
+
+
+#define	XMAC_CTRL_PAUSE_RCVD		0x00000001 /* successful pause frame */
+#define	XMAC_CTRL_PAUSE_STATE		0x00000002 /* notpause-->pause */
+#define	XMAC_CTRL_NOPAUSE_STATE		0x00000004 /* pause-->notpause */
+#define	XMAC_CTRL_PAUSE_TIME_MASK	0xFFFF0000 /* value of pause time */
+#define	XMAC_CTRL_PAUSE_TIME_SHIFT	16
+
+/* XMAC Configuration Register */
+#define	XMAC_CONFIG_TX_BIT_MASK		0x000000ff /* bits [7:0] */
+#define	XMAC_CONFIG_RX_BIT_MASK		0x001fff00 /* bits [20:8] */
+#define	XMAC_CONFIG_XIF_BIT_MASK	0xffe00000 /* bits [31:21] */
+
+/* XTX MAC config bits */
+#define	XMAC_TX_CFG_TX_ENABLE		0x00000001 /* enable XTX MAC */
+#define	XMAC_TX_CFG_STRETCH_MD		0x00000002 /* WAN application */
+#define	XMAC_TX_CFG_VAR_MIN_IPG_EN	0x00000004 /* Transmit pkts < minpsz */
+#define	XMAC_TX_CFG_ALWAYS_NO_CRC	0x00000008 /* No CRC generated */
+
+#define	XMAC_WARNING_MSG_ENABLE		0x00000080 /* Sim warning msg enable */
+
+/* XRX MAC config bits */
+#define	XMAC_RX_CFG_RX_ENABLE		0x00000100 /* enable XRX MAC */
+#define	XMAC_RX_CFG_PROMISC		0x00000200 /* promisc mode enable */
+#define	XMAC_RX_CFG_PROMISC_GROUP  	0x00000400 /* accept all MC frames */
+#define	XMAC_RX_CFG_ERR_CHK_DISABLE	0x00000800 /* do not set abort bit */
+#define	XMAC_RX_CFG_CRC_CHK_DISABLE	0x00001000 /* disable CRC logic */
+#define	XMAC_RX_CFG_RESERVED_MCAST	0x00002000 /* reserved MCaddr compare */
+#define	XMAC_RX_CFG_CD_VIO_CHK		0x00004000 /* rx code violation chk */
+#define	XMAC_RX_CFG_HASH_FILTER_EN	0x00008000 /* use hash table */
+#define	XMAC_RX_CFG_ADDR_FILTER_EN	0x00010000 /* use alt addr filter */
+#define	XMAC_RX_CFG_STRIP_CRC		0x00020000 /* strip last 4bytes (CRC) */
+#define	XMAC_RX_MAC2IPP_PKT_CNT_EN	0x00040000 /* histo_cntr7 cnt mode */
+#define	XMAC_RX_CFG_RX_PAUSE_EN		0x00080000 /* receive pause flow ctrl */
+#define	XMAC_RX_CFG_PASS_FLOW_CTRL	0x00100000 /* accept MAC ctrl pkts */
+
+
+/* MAC transceiver (XIF) configuration registers */
+
+#define	XMAC_XIF_FORCE_LED_ON		0x00200000 /* Force Link LED on */
+#define	XMAC_XIF_LED_POLARITY		0x00400000 /* LED polarity */
+#define	XMAC_XIF_SEL_POR_CLK_SRC	0x00800000 /* Select POR clk src */
+#define	XMAC_XIF_TX_OUTPUT_EN		0x01000000 /* enable MII/GMII modes */
+#define	XMAC_XIF_LOOPBACK		0x02000000 /* loopback xmac xgmii tx */
+#define	XMAC_XIF_LFS_DISABLE		0x04000000 /* disable link fault sig */
+#define	XMAC_XIF_MII_MODE_MASK		0x18000000 /* MII/GMII/XGMII mode */
+#define	XMAC_XIF_MII_MODE_SHIFT		27
+#define	XMAC_XIF_XGMII_MODE		0x00
+#define	XMAC_XIF_GMII_MODE		0x01
+#define	XMAC_XIF_MII_MODE		0x02
+#define	XMAC_XIF_ILLEGAL_MODE		0x03
+#define	XMAC_XIF_XPCS_BYPASS		0x20000000 /* use external xpcs */
+#define	XMAC_XIF_1G_PCS_BYPASS		0x40000000 /* use external pcs */
+#define	XMAC_XIF_SEL_CLK_25MHZ		0x80000000 /* 25Mhz clk for 100mbps */
+
+/* IPG register */
+#define	XMAC_IPG_VALUE_MASK		0x00000007 /* IPG in XGMII mode */
+#define	XMAC_IPG_VALUE_SHIFT		0
+#define	XMAC_IPG_VALUE1_MASK		0x0000ff00 /* IPG in GMII/MII mode */
+#define	XMAC_IPG_VALUE1_SHIFT		8
+#define	XMAC_IPG_STRETCH_RATIO_MASK	0x001f0000
+#define	XMAC_IPG_STRETCH_RATIO_SHIFT	16
+#define	XMAC_IPG_STRETCH_CONST_MASK	0x00e00000
+#define	XMAC_IPG_STRETCH_CONST_SHIFT	21
+
+#define	IPG_12_15_BYTE			3
+#define	IPG_16_19_BYTE			4
+#define	IPG_20_23_BYTE			5
+#define	IPG1_12_BYTES			10
+#define	IPG1_13_BYTES			11
+#define	IPG1_14_BYTES			12
+#define	IPG1_15_BYTES			13
+#define	IPG1_16_BYTES			14
+
+
+#define	XMAC_MIN_TX_FRM_SZ_MASK		0x3ff	   /* Min tx frame size */
+#define	XMAC_MIN_TX_FRM_SZ_SHIFT	0
+#define	XMAC_SLOT_TIME_MASK		0x0003fc00 /* slot time */
+#define	XMAC_SLOT_TIME_SHIFT		10
+#define	XMAC_MIN_RX_FRM_SZ_MASK		0x3ff00000 /* Min rx frame size */
+#define	XMAC_MIN_RX_FRM_SZ_SHIFT	20
+#define	XMAC_MAX_FRM_SZ_MASK		0x00003fff /* max tx frame size */
+
+/* State Machine Register */
+#define	XMAC_SM_TX_LNK_MGMT_MASK	0x00000007
+#define	XMAC_SM_TX_LNK_MGMT_SHIFT	0
+#define	XMAC_SM_SOP_DETECT		0x00000008
+#define	XMAC_SM_LNK_FLT_SIG_MASK	0x00000030
+#define	XMAC_SM_LNK_FLT_SIG_SHIFT	4
+#define	XMAC_SM_MII_GMII_MD_RX_LNK	0x00000040
+#define	XMAC_SM_XGMII_MD_RX_LNK		0x00000080
+#define	XMAC_SM_XGMII_ONLY_VAL_SIG	0x00000100
+#define	XMAC_SM_ALT_ADR_N_HSH_FN_SIG	0x00000200
+#define	XMAC_SM_RXMAC_IPP_STAT_MASK	0x00001c00
+#define	XMAC_SM_RXMAC_IPP_STAT_SHIFT	10
+#define	XMAC_SM_RXFIFO_WPTR_CLK_MASK	0x007c0000
+#define	XMAC_SM_RXFIFO_WPTR_CLK_SHIFT	18
+#define	XMAC_SM_RXFIFO_RPTR_CLK_MASK	0x0F800000
+#define	XMAC_SM_RXFIFO_RPTR_CLK_SHIFT	23
+#define	XMAC_SM_TXFIFO_FULL_CLK		0x10000000
+#define	XMAC_SM_TXFIFO_EMPTY_CLK	0x20000000
+#define	XMAC_SM_RXFIFO_FULL_CLK		0x40000000
+#define	XMAC_SM_RXFIFO_EMPTY_CLK	0x80000000
+
+/* Internal Signals 1 Register */
+#define	XMAC_IS1_OPP_TXMAC_STAT_MASK	0x0000000F
+#define	XMAC_IS1_OPP_TXMAC_STAT_SHIFT	0
+#define	XMAC_IS1_OPP_TXMAC_ABORT	0x00000010
+#define	XMAC_IS1_OPP_TXMAC_TAG 		0x00000020
+#define	XMAC_IS1_OPP_TXMAC_ACK		0x00000040
+#define	XMAC_IS1_TXMAC_OPP_REQ		0x00000080
+#define	XMAC_IS1_RXMAC_IPP_STAT_MASK	0x0FFFFF00
+#define	XMAC_IS1_RXMAC_IPP_STAT_SHIFT	8
+#define	XMAC_IS1_RXMAC_IPP_CTRL		0x10000000
+#define	XMAC_IS1_RXMAC_IPP_TAG		0x20000000
+#define	XMAC_IS1_IPP_RXMAC_REQ		0x40000000
+#define	XMAC_IS1_RXMAC_IPP_ACK		0x80000000
+
+/* Internal Signals 2 Register */
+#define	XMAC_IS2_TX_HB_TIMER_MASK	0x0000000F
+#define	XMAC_IS2_TX_HB_TIMER_SHIFT	0
+#define	XMAC_IS2_RX_HB_TIMER_MASK	0x000000F0
+#define	XMAC_IS2_RX_HB_TIMER_SHIFT	4
+#define	XMAC_IS2_XPCS_RXC_MASK		0x0000FF00
+#define	XMAC_IS2_XPCS_RXC_SHIFT		8
+#define	XMAC_IS2_XPCS_TXC_MASK		0x00FF0000
+#define	XMAC_IS2_XPCS_TXC_SHIFT		16
+#define	XMAC_IS2_LOCAL_FLT_OC_SYNC	0x01000000
+#define	XMAC_IS2_RMT_FLT_OC_SYNC	0x02000000
+
+/* Register size masking */
+
+#define	XTXMAC_FRM_CNT_MASK		0xFFFFFFFF
+#define	XTXMAC_BYTE_CNT_MASK		0xFFFFFFFF
+#define	XRXMAC_CRC_ER_CNT_MASK		0x000000FF
+#define	XRXMAC_MPSZER_CNT_MASK		0x000000FF
+#define	XRXMAC_CD_VIO_CNT_MASK		0x000000FF
+#define	XRXMAC_BT_CNT_MASK		0xFFFFFFFF
+#define	XRXMAC_HIST_CNT1_MASK		0x001FFFFF
+#define	XRXMAC_HIST_CNT2_MASK		0x001FFFFF
+#define	XRXMAC_HIST_CNT3_MASK		0x000FFFFF
+#define	XRXMAC_HIST_CNT4_MASK		0x0007FFFF
+#define	XRXMAC_HIST_CNT5_MASK		0x0003FFFF
+#define	XRXMAC_HIST_CNT6_MASK		0x0001FFFF
+#define	XRXMAC_BC_FRM_CNT_MASK		0x001FFFFF
+#define	XRXMAC_MC_FRM_CNT_MASK		0x001FFFFF
+#define	XRXMAC_FRAG_CNT_MASK		0x001FFFFF
+#define	XRXMAC_AL_ER_CNT_MASK		0x000000FF
+#define	XMAC_LINK_FLT_CNT_MASK		0x000000FF
+#define	BTXMAC_FRM_CNT_MASK		0x001FFFFF
+#define	BTXMAC_BYTE_CNT_MASK		0x07FFFFFF
+#define	RXMAC_FRM_CNT_MASK		0x0000FFFF
+#define	BRXMAC_BYTE_CNT_MASK		0x07FFFFFF
+#define	BMAC_AL_ER_CNT_MASK		0x0000FFFF
+#define	MAC_LEN_ER_CNT_MASK		0x0000FFFF
+#define	BMAC_CRC_ER_CNT_MASK		0x0000FFFF
+#define	BMAC_CD_VIO_CNT_MASK		0x0000FFFF
+#define	XMAC_XPCS_DESKEW_ERR_CNT_MASK	0x000000FF
+#define	XMAC_XPCS_SYM_ERR_CNT_L0_MASK	0x0000FFFF
+#define	XMAC_XPCS_SYM_ERR_CNT_L1_MASK	0xFFFF0000
+#define	XMAC_XPCS_SYM_ERR_CNT_L1_SHIFT	16
+#define	XMAC_XPCS_SYM_ERR_CNT_L2_MASK	0x0000FFFF
+#define	XMAC_XPCS_SYM_ERR_CNT_L3_MASK	0xFFFF0000
+#define	XMAC_XPCS_SYM_ERR_CNT_L3_SHIFT	16
+
+/* Alternate MAC address registers */
+#define	XMAC_MAX_ALT_ADDR_ENTRY		16	   /* 16 alternate MAC addrs */
+#define	XMAC_MAX_ADDR_ENTRY		(XMAC_MAX_ALT_ADDR_ENTRY + 1)
+
+/* Max / Min parameters for Neptune MAC */
+
+#define	MAC_MAX_ALT_ADDR_ENTRY		XMAC_MAX_ALT_ADDR_ENTRY
+#define	MAC_MAX_HOST_INFO_ENTRY		XMAC_MAX_HOST_INFO_ENTRY
+
+/* HostInfo entry for the unique MAC address */
+#define	XMAC_UNIQUE_HOST_INFO_ENTRY	17
+#define	BMAC_UNIQUE_HOST_INFO_ENTRY	0
+
+/* HostInfo entry for the multicat address */
+#define	XMAC_MULTI_HOST_INFO_ENTRY	16
+#define	BMAC_MULTI_HOST_INFO_ENTRY	8
+
+/* XMAC Host Info Register */
+typedef union hostinfo {
+
+	uint64_t value;
+
+	struct {
+#if defined(_BIG_ENDIAN)
+		uint32_t msw;	/* Most significant word */
+		uint32_t lsw;	/* Least significant word */
+#elif defined(_LITTLE_ENDIAN)
+		uint32_t lsw;	/* Least significant word */
+		uint32_t msw;	/* Most significant word */
+#endif
+	} val;
+	struct {
+#if defined(_BIG_ENDIAN)
+		uint32_t	w1;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+		uint32_t reserved2	: 23;
+		uint32_t mac_pref	: 1;
+		uint32_t reserved1	: 5;
+		uint32_t rdc_tbl_num	: 3;
+#elif defined(_BIT_FIELDS_LTOH)
+		uint32_t rdc_tbl_num	: 3;
+		uint32_t reserved1	: 5;
+		uint32_t mac_pref	: 1;
+		uint32_t reserved2	: 23;
+#endif
+		} w0;
+
+#if defined(_LITTLE_ENDIAN)
+		uint32_t	w1;
+#endif
+	} bits;
+
+} hostinfo_t;
+
+typedef union hostinfo *hostinfo_pt;
+
+#define	XMAC_HI_RDC_TBL_NUM_MASK	0x00000007
+#define	XMAC_HI_MAC_PREF		0x00000100
+
+#define	XMAC_MAX_HOST_INFO_ENTRY	20	   /* 20 host entries */
+
+/*
+ * ******************** MIF registers *********************************
+ */
+
+/*
+ * 32-bit register serves as an instruction register when the MIF is
+ * programmed in frame mode. load this register w/ a valid instruction
+ * (as per IEEE 802.3u MII spec). poll this register to check for instruction
+ * execution completion. during a read operation, this register will also
+ * contain the 16-bit data returned by the transceiver. unless specified
+ * otherwise, fields are considered "don't care" when polling for
+ * completion.
+ */
+
+#define	MIF_FRAME_START_MASK		0xC0000000 /* start of frame mask */
+#define	MIF_FRAME_ST_22			0x40000000 /* STart of frame, Cl 22 */
+#define	MIF_FRAME_ST_45			0x00000000 /* STart of frame, Cl 45 */
+#define	MIF_FRAME_OPCODE_MASK		0x30000000 /* opcode */
+#define	MIF_FRAME_OP_READ_22		0x20000000 /* read OPcode, Cl 22 */
+#define	MIF_FRAME_OP_WRITE_22		0x10000000 /* write OPcode, Cl 22 */
+#define	MIF_FRAME_OP_ADDR_45		0x00000000 /* addr of reg to access */
+#define	MIF_FRAME_OP_READ_45		0x30000000 /* read OPcode, Cl 45 */
+#define	MIF_FRAME_OP_WRITE_45		0x10000000 /* write OPcode, Cl 45 */
+#define	MIF_FRAME_OP_P_R_I_A_45		0x10000000 /* post-read-inc-addr */
+#define	MIF_FRAME_PHY_ADDR_MASK		0x0F800000 /* phy address mask */
+#define	MIF_FRAME_PHY_ADDR_SHIFT	23
+#define	MIF_FRAME_REG_ADDR_MASK		0x007C0000 /* reg addr in Cl 22 */
+						/* dev addr in Cl 45 */
+#define	MIF_FRAME_REG_ADDR_SHIFT	18
+#define	MIF_FRAME_TURN_AROUND_MSB	0x00020000 /* turn around, MSB. */
+#define	MIF_FRAME_TURN_AROUND_LSB	0x00010000 /* turn around, LSB. */
+#define	MIF_FRAME_DATA_MASK		0x0000FFFF /* instruction payload */
+
+/* Clause 45 frame field values */
+#define	FRAME45_ST		0
+#define	FRAME45_OP_ADDR		0
+#define	FRAME45_OP_WRITE	1
+#define	FRAME45_OP_READ_INC	2
+#define	FRAME45_OP_READ		3
+
+typedef union _mif_frame_t {
+
+	uint64_t value;
+
+	struct {
+#if defined(_BIG_ENDIAN)
+		uint32_t msw;	/* Most significant word */
+		uint32_t lsw;	/* Least significant word */
+#elif defined(_LITTLE_ENDIAN)
+		uint32_t lsw;	/* Least significant word */
+		uint32_t msw;	/* Most significant word */
+#endif
+	} val;
+	struct {
+#if defined(_BIG_ENDIAN)
+		uint32_t	w1;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+		uint32_t st		: 2;
+		uint32_t op		: 2;
+		uint32_t phyad		: 5;
+		uint32_t regad		: 5;
+		uint32_t ta_msb		: 1;
+		uint32_t ta_lsb		: 1;
+		uint32_t data		: 16;
+#elif defined(_BIT_FIELDS_LTOH)
+		uint32_t data		: 16;
+		uint32_t ta_lsb		: 1;
+		uint32_t ta_msb		: 1;
+		uint32_t regad		: 5;
+		uint32_t phyad		: 5;
+		uint32_t op		: 2;
+		uint32_t st		: 2;
+#endif
+		} w0;
+
+#if defined(_LITTLE_ENDIAN)
+		uint32_t	w1;
+#endif
+	} bits;
+} mif_frame_t;
+
+#define	MIF_CFG_POLL_EN			0x00000008 /* enable polling */
+#define	MIF_CFG_BB_MODE			0x00000010 /* bit-bang mode */
+#define	MIF_CFG_POLL_REG_MASK		0x000003E0 /* reg addr to be polled */
+#define	MIF_CFG_POLL_REG_SHIFT		5
+#define	MIF_CFG_POLL_PHY_MASK		0x00007C00 /* XCVR addr to be polled */
+#define	MIF_CFG_POLL_PHY_SHIFT		10
+#define	MIF_CFG_INDIRECT_MODE		0x0000800
+					/* used to decide if Cl 22 */
+					/* or Cl 45 frame is */
+					/* constructed. */
+					/* 1 = Clause 45,ST = '00' */
+					/* 0 = Clause 22,ST = '01' */
+#define	MIF_CFG_ATCE_GE_EN	0x00010000 /* Enable ATCA gigabit mode */
+
+typedef union _mif_cfg_t {
+
+	uint64_t value;
+
+	struct {
+#if defined(_BIG_ENDIAN)
+		uint32_t msw;	/* Most significant word */
+		uint32_t lsw;	/* Least significant word */
+
+#elif defined(_LITTLE_ENDIAN)
+		uint32_t lsw;	/* Least significant word */
+		uint32_t msw;	/* Most significant word */
+#endif
+	} val;
+	struct {
+#if defined(_BIG_ENDIAN)
+		uint32_t	w1;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+		uint32_t res2		: 15;
+		uint32_t atca_ge	: 1;
+		uint32_t indirect_md	: 1;
+		uint32_t phy_addr	: 5;
+		uint32_t reg_addr	: 5;
+		uint32_t bb_mode	: 1;
+		uint32_t poll_en	: 1;
+		uint32_t res1		: 2;
+		uint32_t res		: 1;
+#elif defined(_BIT_FIELDS_LTOH)
+		uint32_t res		: 1;
+		uint32_t res1		: 2;
+		uint32_t poll_en	: 1;
+		uint32_t bb_mode	: 1;
+		uint32_t reg_addr	: 5;
+		uint32_t phy_addr	: 5;
+		uint32_t indirect_md	: 1;
+		uint32_t atca_ge	: 1;
+		uint32_t res2		: 15;
+#endif
+		} w0;
+
+#if defined(_LITTLE_ENDIAN)
+		uint32_t	w1;
+#endif
+	} bits;
+
+} mif_cfg_t;
+
+#define	MIF_POLL_STATUS_DATA_MASK	0xffff0000
+#define	MIF_POLL_STATUS_STAT_MASK	0x0000ffff
+
+typedef union _mif_poll_stat_t {
+	uint64_t value;
+
+	struct {
+#if defined(_BIG_ENDIAN)
+		uint32_t msw;	/* Most significant word */
+		uint32_t lsw;	/* Least significant word */
+#elif defined(_LITTLE_ENDIAN)
+		uint32_t lsw;	/* Least significant word */
+		uint32_t msw;	/* Most significant word */
+#endif
+	} val;
+	struct {
+#if defined(_BIG_ENDIAN)
+		uint32_t	w1;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+		uint16_t data;
+		uint16_t status;
+#elif defined(_BIT_FIELDS_LTOH)
+		uint16_t status;
+		uint16_t data;
+#endif
+		} w0;
+
+#if defined(_LITTLE_ENDIAN)
+		uint32_t	w1;
+#endif
+	} bits;
+} mif_poll_stat_t;
+
+
+#define	MIF_POLL_MASK_MASK	0x0000ffff
+
+typedef union _mif_poll_mask_t {
+	uint64_t value;
+
+	struct {
+#if defined(_BIG_ENDIAN)
+		uint32_t msw;	/* Most significant word */
+		uint32_t lsw;	/* Least significant word */
+#elif defined(_LITTLE_ENDIAN)
+		uint32_t lsw;	/* Least significant word */
+		uint32_t msw;	/* Most significant word */
+#endif
+	} val;
+	struct {
+#if defined(_BIG_ENDIAN)
+		uint32_t	w1;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+		uint16_t rsvd;
+		uint16_t mask;
+#elif defined(_BIT_FIELDS_LTOH)
+		uint16_t mask;
+		uint16_t rsvd;
+#endif
+		} w0;
+
+#if defined(_LITTLE_ENDIAN)
+		uint32_t	w1;
+#endif
+	} bits;
+} mif_poll_mask_t;
+
+#define	MIF_STATUS_INIT_DONE_MASK	0x00000001
+#define	MIF_STATUS_XGE_ERR0_MASK	0x00000002
+#define	MIF_STATUS_XGE_ERR1_MASK	0x00000004
+#define	MIF_STATUS_PEU_ERR_MASK		0x00000008
+#define	MIF_STATUS_EXT_PHY_INTR0_MASK	0x00000010
+#define	MIF_STATUS_EXT_PHY_INTR1_MASK	0x00000020
+
+typedef union _mif_stat_t {
+	uint64_t value;
+
+	struct {
+#if defined(_BIG_ENDIAN)
+		uint32_t msw;	/* Most significant word */
+		uint32_t lsw;	/* Least significant word */
+#elif defined(_LITTLE_ENDIAN)
+		uint32_t lsw;	/* Least significant word */
+		uint32_t msw;	/* Most significant word */
+#endif
+	} val;
+	struct {
+#if defined(_BIG_ENDIAN)
+		uint32_t	w1;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+		uint32_t rsvd:26;
+		uint32_t ext_phy_intr_flag1:1;
+		uint32_t ext_phy_intr_flag0:1;
+		uint32_t peu_err:1;
+		uint32_t xge_err1:1;
+		uint32_t xge_err0:1;
+		uint32_t mif_init_done_stat:1;
+
+#elif defined(_BIT_FIELDS_LTOH)
+		uint32_t mif_init_done_stat:1;
+		uint32_t xge_err0:1;
+		uint32_t xge_err1:1;
+		uint32_t ext_phy_intr_flag0:1;
+		uint32_t ext_phy_intr_flag1:1;
+		uint32_t rsvd:26;
+#endif
+		} w0;
+
+#if defined(_LITTLE_ENDIAN)
+		uint32_t	w1;
+#endif
+	} bits;
+} mif_stat_t;
+
+/* MIF State Machine Register */
+
+#define	MIF_SM_EXECUTION_MASK		0x0000003f /* execution state */
+#define	MIF_SM_EXECUTION_SHIFT		0
+#define	MIF_SM_CONTROL_MASK		0x000001c0 /* control state */
+#define	MIF_SM_CONTROL_MASK_SHIFT	6
+#define	MIF_SM_MDI			0x00000200
+#define	MIF_SM_MDO			0x00000400
+#define	MIF_SM_MDO_EN			0x00000800
+#define	MIF_SM_MDC			0x00001000
+#define	MIF_SM_MDI_0			0x00002000
+#define	MIF_SM_MDI_1			0x00004000
+#define	MIF_SM_MDI_2			0x00008000
+#define	MIF_SM_PORT_ADDR_MASK		0x001f0000
+#define	MIF_SM_PORT_ADDR_SHIFT		16
+#define	MIF_SM_INT_SIG_MASK		0xffe00000
+#define	MIF_SM_INT_SIG_SHIFT		21
+
+
+/*
+ * ******************** PCS registers *********************************
+ */
+
+/* PCS Registers */
+#define	PCS_MII_CTRL_1000_SEL		0x0040	   /* reads 1. ignored on wr */
+#define	PCS_MII_CTRL_COLLISION_TEST	0x0080	   /* COL signal */
+#define	PCS_MII_CTRL_DUPLEX		0x0100	   /* forced 0x0. */
+#define	PCS_MII_RESTART_AUTONEG		0x0200	   /* self clearing. */
+#define	PCS_MII_ISOLATE			0x0400	   /* read 0. ignored on wr */
+#define	PCS_MII_POWER_DOWN		0x0800	   /* read 0. ignored on wr */
+#define	PCS_MII_AUTONEG_EN		0x1000	   /* autonegotiation */
+#define	PCS_MII_10_100_SEL		0x2000	   /* read 0. ignored on wr */
+#define	PCS_MII_RESET			0x8000	   /* reset PCS. */
+
+typedef union _pcs_ctrl_t {
+	uint64_t value;
+
+	struct {
+#if defined(_BIG_ENDIAN)
+		uint32_t msw;	/* Most significant word */
+		uint32_t lsw;	/* Least significant word */
+#elif defined(_LITTLE_ENDIAN)
+		uint32_t lsw;	/* Least significant word */
+		uint32_t msw;	/* Most significant word */
+#endif
+	} val;
+	struct {
+#if defined(_BIG_ENDIAN)
+		uint32_t	w1;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t res0		: 16;
+			uint32_t reset		: 1;
+			uint32_t res1		: 1;
+			uint32_t sel_10_100	: 1;
+			uint32_t an_enable	: 1;
+			uint32_t pwr_down	: 1;
+			uint32_t isolate	: 1;
+			uint32_t restart_an	: 1;
+			uint32_t duplex		: 1;
+			uint32_t col_test	: 1;
+			uint32_t sel_1000	: 1;
+			uint32_t res2		: 6;
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t res2		: 6;
+			uint32_t sel_1000	: 1;
+			uint32_t col_test	: 1;
+			uint32_t duplex		: 1;
+			uint32_t restart_an	: 1;
+			uint32_t isolate	: 1;
+			uint32_t pwr_down	: 1;
+			uint32_t an_enable	: 1;
+			uint32_t sel_10_100	: 1;
+			uint32_t res1		: 1;
+			uint32_t reset		: 1;
+			uint32_t res0		: 16;
+#endif
+		} w0;
+
+#if defined(_LITTLE_ENDIAN)
+		uint32_t	w1;
+#endif
+	} bits;
+} pcs_ctrl_t;
+
+#define	PCS_MII_STATUS_EXTEND_CAP	0x0001	   /* reads 0 */
+#define	PCS_MII_STATUS_JABBER_DETECT	0x0002	   /* reads 0 */
+#define	PCS_MII_STATUS_LINK_STATUS	0x0004	   /* link status */
+#define	PCS_MII_STATUS_AUTONEG_ABLE	0x0008	   /* reads 1 */
+#define	PCS_MII_STATUS_REMOTE_FAULT	0x0010	   /* remote fault detected */
+#define	PCS_MII_STATUS_AUTONEG_COMP	0x0020	   /* auto-neg completed */
+#define	PCS_MII_STATUS_EXTEND_STATUS	0x0100	   /* 1000 Base-X PHY */
+
+typedef union _pcs_stat_t {
+	uint64_t value;
+
+	struct {
+#if defined(_BIG_ENDIAN)
+		uint32_t msw;	/* Most significant word */
+		uint32_t lsw;	/* Least significant word */
+#elif defined(_LITTLE_ENDIAN)
+		uint32_t lsw;	/* Least significant word */
+		uint32_t msw;	/* Most significant word */
+#endif
+	} val;
+	struct {
+#if defined(_BIG_ENDIAN)
+		uint32_t	w1;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+		uint32_t res0		: 23;
+		uint32_t ext_stat	: 1;
+		uint32_t res1		: 2;
+		uint32_t an_complete	: 1;
+		uint32_t remote_fault	: 1;
+		uint32_t an_able	: 1;
+		uint32_t link_stat	: 1;
+		uint32_t jabber_detect	: 1;
+		uint32_t ext_cap	: 1;
+#elif defined(_BIT_FIELDS_LTOH)
+		uint32_t ext_cap	: 1;
+		uint32_t jabber_detect	: 1;
+		uint32_t link_stat	: 1;
+		uint32_t an_able	: 1;
+		uint32_t remote_fault	: 1;
+		uint32_t an_complete	: 1;
+		uint32_t res1		: 2;
+		uint32_t ext_stat	: 1;
+		uint32_t res0		: 23;
+#endif
+		} w0;
+
+#if defined(_LITTLE_ENDIAN)
+		uint32_t	w1;
+#endif
+	} bits;
+} pcs_stat_t;
+
+#define	PCS_MII_ADVERT_FD		0x0020	   /* advertise full duplex */
+#define	PCS_MII_ADVERT_HD		0x0040	   /* advertise half-duplex */
+#define	PCS_MII_ADVERT_SYM_PAUSE	0x0080	   /* advertise PAUSE sym */
+#define	PCS_MII_ADVERT_ASYM_PAUSE	0x0100	   /* advertises PAUSE asym */
+#define	PCS_MII_ADVERT_RF_MASK		0x3000	   /* remote fault */
+#define	PCS_MII_ADVERT_RF_SHIFT		12
+#define	PCS_MII_ADVERT_ACK		0x4000	   /* (ro) */
+#define	PCS_MII_ADVERT_NEXT_PAGE	0x8000	   /* (ro) forced 0x0 */
+
+#define	PCS_MII_LPA_FD			PCS_MII_ADVERT_FD
+#define	PCS_MII_LPA_HD			PCS_MII_ADVERT_HD
+#define	PCS_MII_LPA_SYM_PAUSE		PCS_MII_ADVERT_SYM_PAUSE
+#define	PCS_MII_LPA_ASYM_PAUSE		PCS_MII_ADVERT_ASYM_PAUSE
+#define	PCS_MII_LPA_RF_MASK		PCS_MII_ADVERT_RF_MASK
+#define	PCS_MII_LPA_RF_SHIFT		PCS_MII_ADVERT_RF_SHIFT
+#define	PCS_MII_LPA_ACK			PCS_MII_ADVERT_ACK
+#define	PCS_MII_LPA_NEXT_PAGE		PCS_MII_ADVERT_NEXT_PAGE
+
+typedef union _pcs_anar_t {
+	uint64_t value;
+
+	struct {
+#if defined(_BIG_ENDIAN)
+		uint32_t msw;	/* Most significant word */
+		uint32_t lsw;	/* Least significant word */
+#elif defined(_LITTLE_ENDIAN)
+		uint32_t lsw;	/* Least significant word */
+		uint32_t msw;	/* Most significant word */
+#endif
+	} val;
+	struct {
+#if defined(_BIG_ENDIAN)
+		uint32_t	w1;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+		uint32_t res0		: 16;
+		uint32_t next_page	: 1;
+		uint32_t ack		: 1;
+		uint32_t remote_fault	: 2;
+		uint32_t res1		: 3;
+		uint32_t asm_pause	: 1;
+		uint32_t pause		: 1;
+		uint32_t half_duplex	: 1;
+		uint32_t full_duplex	: 1;
+		uint32_t res2		: 5;
+#elif defined(_BIT_FIELDS_LTOH)
+		uint32_t res2		: 5;
+		uint32_t full_duplex	: 1;
+		uint32_t half_duplex	: 1;
+		uint32_t pause		: 1;
+		uint32_t asm_pause	: 1;
+		uint32_t res1		: 3;
+		uint32_t remore_fault	: 2;
+		uint32_t ack		: 1;
+		uint32_t next_page	: 1;
+		uint32_t res0		: 16;
+#endif
+		} w0;
+
+#if defined(_LITTLE_ENDIAN)
+		uint32_t	w1;
+#endif
+	} bits;
+} pcs_anar_t, *p_pcs_anar_t;
+
+#define	PCS_CFG_EN			0x0001	   /* enable PCS. */
+#define	PCS_CFG_SD_OVERRIDE		0x0002
+#define	PCS_CFG_SD_ACTIVE_LOW		0x0004	   /* sig detect active low */
+#define	PCS_CFG_JITTER_STUDY_MASK	0x0018	   /* jitter measurements */
+#define	PCS_CFG_JITTER_STUDY_SHIFT	4
+#define	PCS_CFG_10MS_TIMER_OVERRIDE	0x0020	   /* shortens autoneg timer */
+#define	PCS_CFG_MASK			0x0040	   /* PCS global mask bit */
+
+typedef union _pcs_cfg_t {
+	uint64_t value;
+
+	struct {
+#if defined(_BIG_ENDIAN)
+		uint32_t msw;	/* Most significant word */
+		uint32_t lsw;	/* Least significant word */
+#elif defined(_LITTLE_ENDIAN)
+		uint32_t lsw;	/* Least significant word */
+		uint32_t msw;	/* Most significant word */
+#endif
+	} val;
+	struct {
+#if defined(_BIG_ENDIAN)
+		uint32_t	w1;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+		uint32_t res0			: 25;
+		uint32_t mask			: 1;
+		uint32_t override_10ms_timer	: 1;
+		uint32_t jitter_study		: 2;
+		uint32_t sig_det_a_low		: 1;
+		uint32_t sig_det_override	: 1;
+		uint32_t enable			: 1;
+#elif defined(_BIT_FIELDS_LTOH)
+		uint32_t enable			: 1;
+		uint32_t sig_det_override	: 1;
+		uint32_t sig_det_a_low		: 1;
+		uint32_t jitter_study		: 2;
+		uint32_t override_10ms_timer	: 1;
+		uint32_t mask			: 1;
+		uint32_t res0			: 25;
+#endif
+		} w0;
+
+#if defined(_LITTLE_ENDIAN)
+		uint32_t	w1;
+#endif
+	} bits;
+} pcs_cfg_t, *p_pcs_cfg_t;
+
+
+/* used for diagnostic purposes. bits 20-22 autoclear on read */
+#define	PCS_SM_TX_STATE_MASK		0x0000000F /* Tx idle state mask */
+#define	PCS_SM_TX_STATE_SHIFT		0
+#define	PCS_SM_RX_STATE_MASK		0x000000F0 /* Rx idle state mask */
+#define	PCS_SM_RX_STATE_SHIFT		4
+#define	PCS_SM_WORD_SYNC_STATE_MASK	0x00000700 /* loss of sync state mask */
+#define	PCS_SM_WORD_SYNC_STATE_SHIFT	8
+#define	PCS_SM_SEQ_DETECT_STATE_MASK	0x00001800 /* sequence detect */
+#define	PCS_SM_SEQ_DETECT_STATE_SHIFT	11
+#define	PCS_SM_LINK_STATE_MASK		0x0001E000 /* link state */
+#define	PCS_SM_LINK_STATE_SHIFT		13
+#define	PCS_SM_LOSS_LINK_C		0x00100000 /* loss of link */
+#define	PCS_SM_LOSS_LINK_SYNC		0x00200000 /* loss of sync */
+#define	PCS_SM_LOSS_SIGNAL_DETECT	0x00400000 /* signal detect fail */
+#define	PCS_SM_NO_LINK_BREAKLINK	0x01000000 /* receipt of breaklink */
+#define	PCS_SM_NO_LINK_SERDES		0x02000000 /* serdes initializing */
+#define	PCS_SM_NO_LINK_C		0x04000000 /* C codes not stable */
+#define	PCS_SM_NO_LINK_SYNC		0x08000000 /* word sync not achieved */
+#define	PCS_SM_NO_LINK_WAIT_C		0x10000000 /* waiting for C codes */
+#define	PCS_SM_NO_LINK_NO_IDLE		0x20000000 /* linkpartner send C code */
+
+typedef union _pcs_stat_mc_t {
+	uint64_t value;
+
+	struct {
+#if defined(_BIG_ENDIAN)
+		uint32_t msw;	/* Most significant word */
+		uint32_t lsw;	/* Least significant word */
+#elif defined(_LITTLE_ENDIAN)
+		uint32_t lsw;	/* Least significant word */
+		uint32_t msw;	/* Most significant word */
+#endif
+	} val;
+	struct {
+#if defined(_BIG_ENDIAN)
+		uint32_t	w1;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+		uint32_t res2		: 2;
+		uint32_t lnk_dwn_ni	: 1;
+		uint32_t lnk_dwn_wc	: 1;
+		uint32_t lnk_dwn_ls	: 1;
+		uint32_t lnk_dwn_nc	: 1;
+		uint32_t lnk_dwn_ser	: 1;
+		uint32_t lnk_loss_bc	: 1;
+		uint32_t res1		: 1;
+		uint32_t loss_sd	: 1;
+		uint32_t lnk_loss_sync	: 1;
+		uint32_t lnk_loss_c	: 1;
+		uint32_t res0		: 3;
+		uint32_t link_cfg_stat	: 4;
+		uint32_t seq_detc_stat	: 2;
+		uint32_t word_sync	: 3;
+		uint32_t rx_ctrl	: 4;
+		uint32_t tx_ctrl	: 4;
+#elif defined(_BIT_FIELDS_LTOH)
+		uint32_t tx_ctrl	: 4;
+		uint32_t rx_ctrl	: 4;
+		uint32_t word_sync	: 3;
+		uint32_t seq_detc_stat	: 2;
+		uint32_t link_cfg_stat	: 4;
+		uint32_t res0		: 3;
+		uint32_t lnk_loss_c	: 1;
+		uint32_t lnk_loss_sync	: 1;
+		uint32_t loss_sd	: 1;
+		uint32_t res1		: 1;
+		uint32_t lnk_loss_bc	: 1;
+		uint32_t lnk_dwn_ser	: 1;
+		uint32_t lnk_dwn_nc	: 1;
+		uint32_t lnk_dwn_ls	: 1;
+		uint32_t lnk_dwn_wc	: 1;
+		uint32_t lnk_dwn_ni	: 1;
+		uint32_t res2		: 2;
+#endif
+		} w0;
+
+#if defined(_LITTLE_ENDIAN)
+		uint32_t	w1;
+#endif
+	} bits;
+} pcs_stat_mc_t, *p_pcs_stat_mc_t;
+
+#define	PCS_INTR_STATUS_LINK_CHANGE	0x04	/* link status has changed */
+
+/*
+ * control which network interface is used. no more than one bit should
+ * be set.
+ */
+#define	PCS_DATAPATH_MODE_PCS		0	   /* Internal PCS is used */
+#define	PCS_DATAPATH_MODE_MII		0x00000002 /* GMII/RGMII is selected. */
+
+#define	PCS_PACKET_COUNT_TX_MASK	0x000007FF /* pkts xmitted by PCS */
+#define	PCS_PACKET_COUNT_RX_MASK	0x07FF0000 /* pkts recvd by PCS */
+#define	PCS_PACKET_COUNT_RX_SHIFT	16
+
+/*
+ * ******************** XPCS registers *********************************
+ */
+
+/* XPCS Base 10G Control1 Register */
+#define	XPCS_CTRL1_RST			0x8000 /* Self clearing reset. */
+#define	XPCS_CTRL1_LOOPBK		0x4000 /* xpcs Loopback */
+#define	XPCS_CTRL1_SPEED_SEL_3		0x2000 /* 1 indicates 10G speed */
+#define	XPCS_CTRL1_LOW_PWR		0x0800 /* low power mode. */
+#define	XPCS_CTRL1_SPEED_SEL_1		0x0040 /* 1 indicates 10G speed */
+#define	XPCS_CTRL1_SPEED_SEL_0_MASK	0x003c /* 0 indicates 10G speed. */
+#define	XPCS_CTRL1_SPEED_SEL_0_SHIFT	2
+
+
+
+typedef union _xpcs_ctrl1_t {
+	uint64_t value;
+
+	struct {
+#if defined(_BIG_ENDIAN)
+		uint32_t msw;	/* Most significant word */
+		uint32_t lsw;	/* Least significant word */
+#elif defined(_LITTLE_ENDIAN)
+		uint32_t lsw;	/* Least significant word */
+		uint32_t msw;	/* Most significant word */
+#endif
+	} val;
+	struct {
+#if defined(_BIG_ENDIAN)
+		uint32_t	w1;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+		uint32_t res3		: 16;
+		uint32_t reset		: 1;
+		uint32_t csr_lb		: 1;
+		uint32_t csr_speed_sel3	: 1;
+		uint32_t res2		: 1;
+		uint32_t csr_low_pwr	: 1;
+		uint32_t res1		: 4;
+		uint32_t csr_speed_sel1	: 1;
+		uint32_t csr_speed_sel0	: 4;
+		uint32_t res0		: 2;
+#elif defined(_BIT_FIELDS_LTOH)
+		uint32_t res0		: 2;
+		uint32_t csr_speed_sel0	: 4;
+		uint32_t csr_speed_sel1	: 1;
+		uint32_t res1		: 4;
+		uint32_t csr_low_pwr	: 1;
+		uint32_t res2		: 1;
+		uint32_t csr_speed_sel3	: 1;
+		uint32_t csr_lb		: 1;
+		uint32_t reset		: 1;
+		uint32_t res3		: 16;
+#endif
+		} w0;
+
+#if defined(_LITTLE_ENDIAN)
+		uint32_t	w1;
+#endif
+	} bits;
+} xpcs_ctrl1_t;
+
+
+/* XPCS Base 10G Status1 Register (Read Only) */
+#define	XPCS_STATUS1_FAULT		0x0080
+#define	XPCS_STATUS1_RX_LINK_STATUS_UP	0x0004 /* Link status interrupt */
+#define	XPCS_STATUS1_LOW_POWER_ABILITY	0x0002 /* low power mode */
+
+
+typedef	union _xpcs_stat1_t {
+	uint64_t value;
+
+	struct {
+#if defined(_BIG_ENDIAN)
+		uint32_t msw;	/* Most significant word */
+		uint32_t lsw;	/* Least significant word */
+#elif defined(_LITTLE_ENDIAN)
+		uint32_t lsw;	/* Least significant word */
+		uint32_t msw;	/* Most significant word */
+#endif
+	} val;
+	struct {
+#if defined(_BIG_ENDIAN)
+		uint32_t	w1;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+		uint32_t res4			: 16;
+		uint32_t res3			: 8;
+		uint32_t csr_fault		: 1;
+		uint32_t res1			: 4;
+		uint32_t csr_rx_link_stat	: 1;
+		uint32_t csr_low_pwr_ability	: 1;
+		uint32_t res0			: 1;
+#elif defined(_BIT_FIELDS_LTOH)
+		uint32_t res0			: 1;
+		uint32_t csr_low_pwr_ability	: 1;
+		uint32_t csr_rx_link_stat	: 1;
+		uint32_t res1			: 4;
+		uint32_t csr_fault		: 1;
+		uint32_t res3			: 8;
+		uint32_t res4			: 16;
+#endif
+		} w0;
+
+#if defined(_LITTLE_ENDIAN)
+		uint32_t	w1;
+#endif
+	} bits;
+} xpcs_stat1_t;
+
+
+/* XPCS Base Speed Ability Register. Indicates 10G capability */
+#define	XPCS_SPEED_ABILITY_10_GIG	0x0001
+
+
+typedef	union _xpcs_speed_ab_t {
+	uint64_t value;
+
+	struct {
+#if defined(_BIG_ENDIAN)
+		uint32_t msw;	/* Most significant word */
+		uint32_t lsw;	/* Least significant word */
+#elif defined(_LITTLE_ENDIAN)
+		uint32_t lsw;	/* Least significant word */
+		uint32_t msw;	/* Most significant word */
+#endif
+	} val;
+	struct {
+#if defined(_BIG_ENDIAN)
+		uint32_t	w1;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+		uint32_t res1		: 16;
+		uint32_t res0		: 15;
+		uint32_t csr_10gig	: 1;
+#elif defined(_BIT_FIELDS_LTOH)
+		uint32_t csr_10gig	: 1;
+		uint32_t res0		: 15;
+		uint32_t res1		: 16;
+#endif
+		} w0;
+
+#if defined(_LITTLE_ENDIAN)
+		uint32_t	w1;
+#endif
+	} bits;
+} xpcs_speed_ab_t;
+
+
+/* XPCS Base 10G Devices in Package Register */
+#define	XPCS_DEV_IN_PKG_CSR_VENDOR2	0x80000000
+#define	XPCS_DEV_IN_PKG_CSR_VENDOR1	0x40000000
+#define	XPCS_DEV_IN_PKG_DTE_XS		0x00000020
+#define	XPCS_DEV_IN_PKG_PHY_XS		0x00000010
+#define	XPCS_DEV_IN_PKG_PCS		0x00000008
+#define	XPCS_DEV_IN_PKG_WIS		0x00000004
+#define	XPCS_DEV_IN_PKG_PMD_PMA		0x00000002
+#define	XPCS_DEV_IN_PKG_CLS_22_REG	0x00000000
+
+
+
+typedef	union _xpcs_dev_in_pkg_t {
+	uint64_t value;
+
+	struct {
+#if defined(_BIG_ENDIAN)
+		uint32_t msw;	/* Most significant word */
+		uint32_t lsw;	/* Least significant word */
+#elif defined(_LITTLE_ENDIAN)
+		uint32_t lsw;	/* Least significant word */
+		uint32_t msw;	/* Most significant word */
+#endif
+	} val;
+	struct {
+#if defined(_BIG_ENDIAN)
+		uint32_t	w1;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+		uint32_t csr_vendor2	: 1;
+		uint32_t csr_vendor1	: 1;
+		uint32_t res1		: 14;
+		uint32_t res0		: 10;
+		uint32_t dte_xs		: 1;
+		uint32_t phy_xs		: 1;
+		uint32_t pcs		: 1;
+		uint32_t wis		: 1;
+		uint32_t pmd_pma	: 1;
+		uint32_t clause_22_reg	: 1;
+#elif defined(_BIT_FIELDS_LTOH)
+		uint32_t clause_22_reg	: 1;
+		uint32_t pmd_pma	: 1;
+		uint32_t wis		: 1;
+		uint32_t pcs		: 1;
+		uint32_t phy_xs		: 1;
+		uint32_t dte_xs		: 1;
+		uint32_t res0		: 10;
+		uint32_t res1		: 14;
+		uint32_t csr_vendor1	: 1;
+		uint32_t csr_vendor2	: 1;
+#endif
+		} w0;
+
+#if defined(_LITTLE_ENDIAN)
+		uint32_t	w1;
+#endif
+	} bits;
+} xpcs_dev_in_pkg_t;
+
+
+/* XPCS Base 10G Control2 Register */
+#define	XPCS_PSC_SEL_MASK		0x0003
+#define	PSC_SEL_10G_BASE_X_PCS		0x0001
+
+
+typedef	union _xpcs_ctrl2_t {
+	uint64_t value;
+
+	struct {
+#if defined(_BIG_ENDIAN)
+		uint32_t msw;	/* Most significant word */
+		uint32_t lsw;	/* Least significant word */
+#elif defined(_LITTLE_ENDIAN)
+		uint32_t lsw;	/* Least significant word */
+		uint32_t msw;	/* Most significant word */
+#endif
+	} val;
+	struct {
+#if defined(_BIG_ENDIAN)
+		uint32_t	w1;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+		uint32_t res1		: 16;
+		uint32_t res0		: 14;
+		uint32_t csr_psc_sel	: 2;
+#elif defined(_BIT_FIELDS_LTOH)
+		uint32_t csr_psc_sel	: 2;
+		uint32_t res0		: 14;
+		uint32_t res1		: 16;
+#endif
+		} w0;
+
+#if defined(_LITTLE_ENDIAN)
+		uint32_t	w1;
+#endif
+	} bits;
+} xpcs_ctrl2_t;
+
+
+/* XPCS Base10G Status2 Register */
+#define	XPCS_STATUS2_DEV_PRESENT_MASK	0xc000	/* ?????? */
+#define	XPCS_STATUS2_TX_FAULT		0x0800	/* Fault on tx path */
+#define	XPCS_STATUS2_RX_FAULT		0x0400	/* Fault on rx path */
+#define	XPCS_STATUS2_TEN_GBASE_W	0x0004	/* 10G-Base-W */
+#define	XPCS_STATUS2_TEN_GBASE_X	0x0002	/* 10G-Base-X */
+#define	XPCS_STATUS2_TEN_GBASE_R	0x0001	/* 10G-Base-R */
+
+typedef	union _xpcs_stat2_t {
+	uint64_t value;
+
+	struct {
+#if defined(_BIG_ENDIAN)
+		uint32_t msw;	/* Most significant word */
+		uint32_t lsw;	/* Least significant word */
+#elif defined(_LITTLE_ENDIAN)
+		uint32_t lsw;	/* Least significant word */
+		uint32_t msw;	/* Most significant word */
+#endif
+	} val;
+	struct {
+#if defined(_BIG_ENDIAN)
+		uint32_t	w1;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+		uint32_t res2		: 16;
+		uint32_t csr_dev_pres	: 2;
+		uint32_t res1		: 2;
+		uint32_t csr_tx_fault	: 1;
+		uint32_t csr_rx_fault	: 1;
+		uint32_t res0		: 7;
+		uint32_t ten_gbase_w	: 1;
+		uint32_t ten_gbase_x	: 1;
+		uint32_t ten_gbase_r	: 1;
+#elif defined(_BIT_FIELDS_LTOH)
+		uint32_t ten_gbase_r	: 1;
+		uint32_t ten_gbase_x	: 1;
+		uint32_t ten_gbase_w	: 1;
+		uint32_t res0		: 7;
+		uint32_t csr_rx_fault	: 1;
+		uint32_t csr_tx_fault	: 1;
+		uint32_t res1		: 2;
+		uint32_t csr_dev_pres	: 2;
+		uint32_t res2		: 16;
+#endif
+		} w0;
+
+#if defined(_LITTLE_ENDIAN)
+		uint32_t	w1;
+#endif
+	} bits;
+} xpcs_stat2_t;
+
+
+
+/* XPCS Base10G Status Register */
+#define	XPCS_STATUS_LANE_ALIGN		0x1000 /* 10GBaseX PCS rx lanes align */
+#define	XPCS_STATUS_PATTERN_TEST_ABLE	0x0800 /* able to generate patterns. */
+#define	XPCS_STATUS_LANE3_SYNC		0x0008 /* Lane 3 is synchronized */
+#define	XPCS_STATUS_LANE2_SYNC		0x0004 /* Lane 2 is synchronized */
+#define	XPCS_STATUS_LANE1_SYNC		0x0002 /* Lane 1 is synchronized */
+#define	XPCS_STATUS_LANE0_SYNC		0x0001 /* Lane 0 is synchronized */
+
+typedef	union _xpcs_stat_t {
+	uint64_t value;
+
+	struct {
+#if defined(_BIG_ENDIAN)
+		uint32_t msw;	/* Most significant word */
+		uint32_t lsw;	/* Least significant word */
+#elif defined(_LITTLE_ENDIAN)
+		uint32_t lsw;	/* Least significant word */
+		uint32_t msw;	/* Most significant word */
+#endif
+	} val;
+	struct {
+#if defined(_BIG_ENDIAN)
+		uint32_t	w1;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+		uint32_t res2			: 16;
+		uint32_t res1			: 3;
+		uint32_t csr_lane_align		: 1;
+		uint32_t csr_pattern_test_able	: 1;
+		uint32_t res0			: 7;
+		uint32_t csr_lane3_sync		: 1;
+		uint32_t csr_lane2_sync		: 1;
+		uint32_t csr_lane1_sync		: 1;
+		uint32_t csr_lane0_sync		: 1;
+#elif defined(_BIT_FIELDS_LTOH)
+		uint32_t csr_lane0_sync		: 1;
+		uint32_t csr_lane1_sync		: 1;
+		uint32_t csr_lane2_sync		: 1;
+		uint32_t csr_lane3_sync		: 1;
+		uint32_t res0			: 7;
+		uint32_t csr_pat_test_able	: 1;
+		uint32_t csr_lane_align		: 1;
+		uint32_t res1			: 3;
+		uint32_t res2			: 16;
+#endif
+		} w0;
+
+#if defined(_LITTLE_ENDIAN)
+		uint32_t	w1;
+#endif
+	} bits;
+} xpcs_stat_t;
+
+/* XPCS Base10G Test Control Register */
+#define	XPCS_TEST_CTRL_TX_TEST_ENABLE		0x0004
+#define	XPCS_TEST_CTRL_TEST_PATTERN_SEL_MASK	0x0003
+#define	TEST_PATTERN_HIGH_FREQ			0
+#define	TEST_PATTERN_LOW_FREQ			1
+#define	TEST_PATTERN_MIXED_FREQ			2
+
+typedef	union _xpcs_test_ctl_t {
+	uint64_t value;
+
+	struct {
+#if defined(_BIG_ENDIAN)
+		uint32_t msw;	/* Most significant word */
+		uint32_t lsw;	/* Least significant word */
+#elif defined(_LITTLE_ENDIAN)
+		uint32_t lsw;	/* Least significant word */
+		uint32_t msw;	/* Most significant word */
+#endif
+	} val;
+	struct {
+#if defined(_BIG_ENDIAN)
+		uint32_t	w1;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+		uint32_t res1			: 16;
+		uint32_t res0			: 13;
+		uint32_t csr_tx_test_en		: 1;
+		uint32_t csr_test_pat_sel	: 2;
+#elif defined(_BIT_FIELDS_LTOH)
+		uint32_t csr_test_pat_sel	: 2;
+		uint32_t csr_tx_test_en		: 1;
+		uint32_t res0			: 13;
+		uint32_t res1			: 16;
+#endif
+		} w0;
+
+#if defined(_LITTLE_ENDIAN)
+		uint32_t	w1;
+#endif
+	} bits;
+} xpcs_test_ctl_t;
+
+/* XPCS Base10G Diagnostic Register */
+#define	XPCS_DIAG_EB_ALIGN_ERR3		0x40
+#define	XPCS_DIAG_EB_ALIGN_ERR2		0x20
+#define	XPCS_DIAG_EB_ALIGN_ERR1		0x10
+#define	XPCS_DIAG_EB_DESKEW_OK		0x08
+#define	XPCS_DIAG_EB_ALIGN_DET3		0x04
+#define	XPCS_DIAG_EB_ALIGN_DET2		0x02
+#define	XPCS_DIAG_EB_ALIGN_DET1		0x01
+#define	XPCS_DIAG_EB_DESKEW_LOSS	0
+
+#define	XPCS_DIAG_SYNC_3_INVALID	0x8
+#define	XPCS_DIAG_SYNC_2_INVALID	0x4
+#define	XPCS_DIAG_SYNC_1_INVALID	0x2
+#define	XPCS_DIAG_SYNC_IN_SYNC		0x1
+#define	XPCS_DIAG_SYNC_LOSS_SYNC	0
+
+#define	XPCS_RX_SM_RECEIVE_STATE	1
+#define	XPCS_RX_SM_FAULT_STATE		0
+
+typedef	union _xpcs_diag_t {
+	uint64_t value;
+
+	struct {
+#if defined(_BIG_ENDIAN)
+		uint32_t msw;	/* Most significant word */
+		uint32_t lsw;	/* Least significant word */
+#elif defined(_LITTLE_ENDIAN)
+		uint32_t lsw;	/* Least significant word */
+		uint32_t msw;	/* Most significant word */
+#endif
+	} val;
+	struct {
+#if defined(_BIG_ENDIAN)
+		uint32_t	w1;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+		uint32_t res1			: 7;
+		uint32_t sync_sm_lane3		: 4;
+		uint32_t sync_sm_lane2		: 4;
+		uint32_t sync_sm_lane1		: 4;
+		uint32_t sync_sm_lane0		: 4;
+		uint32_t elastic_buffer_sm	: 8;
+		uint32_t receive_sm		: 1;
+#elif defined(_BIT_FIELDS_LTOH)
+		uint32_t receive_sm		: 1;
+		uint32_t elastic_buffer_sm	: 8;
+		uint32_t sync_sm_lane0		: 4;
+		uint32_t sync_sm_lane1		: 4;
+		uint32_t sync_sm_lane2		: 4;
+		uint32_t sync_sm_lane3		: 4;
+		uint32_t res1			: 7;
+#endif
+		} w0;
+
+#if defined(_LITTLE_ENDIAN)
+		uint32_t	w1;
+#endif
+	} bits;
+} xpcs_diag_t;
+
+/* XPCS Base10G Tx State Machine Register */
+#define	XPCS_TX_SM_SEND_UNDERRUN	0x9
+#define	XPCS_TX_SM_SEND_RANDOM_Q	0x8
+#define	XPCS_TX_SM_SEND_RANDOM_K	0x7
+#define	XPCS_TX_SM_SEND_RANDOM_A	0x6
+#define	XPCS_TX_SM_SEND_RANDOM_R	0x5
+#define	XPCS_TX_SM_SEND_Q		0x4
+#define	XPCS_TX_SM_SEND_K		0x3
+#define	XPCS_TX_SM_SEND_A		0x2
+#define	XPCS_TX_SM_SEND_SDP		0x1
+#define	XPCS_TX_SM_SEND_DATA		0
+
+/* XPCS Base10G Configuration Register */
+#define	XPCS_CFG_VENDOR_DBG_SEL_MASK	0x78
+#define	XPCS_CFG_VENDOR_DBG_SEL_SHIFT	3
+#define	XPCS_CFG_BYPASS_SIG_DETECT	0x0004
+#define	XPCS_CFG_ENABLE_TX_BUFFERS	0x0002
+#define	XPCS_CFG_XPCS_ENABLE		0x0001
+
+typedef	union _xpcs_config_t {
+	uint64_t value;
+
+	struct {
+#if defined(_BIG_ENDIAN)
+		uint32_t msw;	/* Most significant word */
+		uint32_t lsw;	/* Least significant word */
+#elif defined(_LITTLE_ENDIAN)
+		uint32_t lsw;	/* Least significant word */
+		uint32_t msw;	/* Most significant word */
+#endif
+	} val;
+	struct {
+#if defined(_BIG_ENDIAN)
+		uint32_t	w1;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+		uint32_t res1			: 16;
+		uint32_t res0			: 9;
+		uint32_t csr_vendor_dbg_sel	: 4;
+		uint32_t csr_bypass_sig_detect	: 1;
+		uint32_t csr_en_tx_buf		: 1;
+		uint32_t csr_xpcs_en		: 1;
+#elif defined(_BIT_FIELDS_LTOH)
+		uint32_t csr_xpcs_en		: 1;
+		uint32_t csr_en_tx_buf		: 1;
+		uint32_t csr_bypass_sig_detect	: 1;
+		uint32_t csr_vendor_dbg_sel	: 4;
+		uint32_t res0			: 9;
+		uint32_t res1			: 16;
+#endif
+		} w0;
+
+#if defined(_LITTLE_ENDIAN)
+		uint32_t	w1;
+#endif
+	} bits;
+} xpcs_config_t;
+
+
+
+/* XPCS Base10G Mask1 Register */
+#define	XPCS_MASK1_FAULT_MASK		0x0080	/* mask fault interrupt. */
+#define	XPCS_MASK1_RX_LINK_STATUS_MASK	0x0040	/* mask linkstat interrupt */
+
+/* XPCS Base10G Packet Counter */
+#define	XPCS_PKT_CNTR_TX_PKT_CNT_MASK	0xffff0000
+#define	XPCS_PKT_CNTR_TX_PKT_CNT_SHIFT	16
+#define	XPCS_PKT_CNTR_RX_PKT_CNT_MASK	0x0000ffff
+#define	XPCS_PKT_CNTR_RX_PKT_CNT_SHIFT	0
+
+/* XPCS Base10G TX State Machine status register */
+#define	XPCS_TX_STATE_MC_TX_STATE_MASK	0x0f
+#define	XPCS_DESKEW_ERR_CNTR_MASK	0xff
+
+/* XPCS Base10G Lane symbol error counters */
+#define	XPCS_SYM_ERR_CNT_L1_MASK  0xffff0000
+#define	XPCS_SYM_ERR_CNT_L0_MASK  0x0000ffff
+#define	XPCS_SYM_ERR_CNT_L3_MASK  0xffff0000
+#define	XPCS_SYM_ERR_CNT_L2_MASK  0x0000ffff
+
+#define	XPCS_SYM_ERR_CNT_MULTIPLIER	16
+
+/* ESR Reset Register */
+#define	ESR_RESET_1			2
+#define	ESR_RESET_0			1
+
+/* ESR Configuration Register */
+#define	ESR_BLUNT_END_LOOPBACK		2
+#define	ESR_FORCE_SERDES_SERDES_RDY	1
+
+/* ESR Neptune Serdes PLL Configuration */
+#define	ESR_PLL_CFG_FBDIV_0		0x1
+#define	ESR_PLL_CFG_FBDIV_1		0x2
+#define	ESR_PLL_CFG_FBDIV_2		0x4
+#define	ESR_PLL_CFG_HALF_RATE_0		0x8
+#define	ESR_PLL_CFG_HALF_RATE_1		0x10
+#define	ESR_PLL_CFG_HALF_RATE_2		0x20
+#define	ESR_PLL_CFG_HALF_RATE_3		0x40
+
+/* ESR Neptune Serdes Control Register */
+#define	ESR_CTL_EN_SYNCDET_0		0x00000001
+#define	ESR_CTL_EN_SYNCDET_1		0x00000002
+#define	ESR_CTL_EN_SYNCDET_2		0x00000004
+#define	ESR_CTL_EN_SYNCDET_3		0x00000008
+#define	ESR_CTL_OUT_EMPH_0_MASK		0x00000070
+#define	ESR_CTL_OUT_EMPH_0_SHIFT	4
+#define	ESR_CTL_OUT_EMPH_1_MASK		0x00000380
+#define	ESR_CTL_OUT_EMPH_1_SHIFT	7
+#define	ESR_CTL_OUT_EMPH_2_MASK		0x00001c00
+#define	ESR_CTL_OUT_EMPH_2_SHIFT	10
+#define	ESR_CTL_OUT_EMPH_3_MASK		0x0000e000
+#define	ESR_CTL_OUT_EMPH_3_SHIFT	13
+#define	ESR_CTL_LOSADJ_0_MASK		0x00070000
+#define	ESR_CTL_LOSADJ_0_SHIFT		16
+#define	ESR_CTL_LOSADJ_1_MASK		0x00380000
+#define	ESR_CTL_LOSADJ_1_SHIFT		19
+#define	ESR_CTL_LOSADJ_2_MASK		0x01c00000
+#define	ESR_CTL_LOSADJ_2_SHIFT		22
+#define	ESR_CTL_LOSADJ_3_MASK		0x0e000000
+#define	ESR_CTL_LOSADJ_3_SHIFT		25
+#define	ESR_CTL_RXITERM_0		0x10000000
+#define	ESR_CTL_RXITERM_1		0x20000000
+#define	ESR_CTL_RXITERM_2		0x40000000
+#define	ESR_CTL_RXITERM_3		0x80000000
+
+/* ESR Neptune Serdes Test Configuration Register */
+#define	ESR_TSTCFG_LBTEST_MD_0_MASK	0x00000003
+#define	ESR_TSTCFG_LBTEST_MD_0_SHIFT	0
+#define	ESR_TSTCFG_LBTEST_MD_1_MASK	0x0000000c
+#define	ESR_TSTCFG_LBTEST_MD_1_SHIFT	2
+#define	ESR_TSTCFG_LBTEST_MD_2_MASK	0x00000030
+#define	ESR_TSTCFG_LBTEST_MD_2_SHIFT	4
+#define	ESR_TSTCFG_LBTEST_MD_3_MASK	0x000000c0
+#define	ESR_TSTCFG_LBTEST_MD_3_SHIFT	6
+
+/* ESR Neptune Ethernet RGMII Configuration Register */
+#define	ESR_RGMII_PT0_IN_USE		0x00000001
+#define	ESR_RGMII_PT1_IN_USE		0x00000002
+#define	ESR_RGMII_PT2_IN_USE		0x00000004
+#define	ESR_RGMII_PT3_IN_USE		0x00000008
+#define	ESR_RGMII_REG_RW_TEST		0x00000010
+
+/* ESR Internal Signals Observation Register */
+#define	ESR_SIG_MASK			0xFFFFFFFF
+#define	ESR_SIG_P0_BITS_MASK		0x33E0000F
+#define	ESR_SIG_P1_BITS_MASK		0x0C1F00F0
+#define	ESR_SIG_SERDES_RDY0_P0		0x20000000
+#define	ESR_SIG_DETECT0_P0		0x10000000
+#define	ESR_SIG_SERDES_RDY0_P1		0x08000000
+#define	ESR_SIG_DETECT0_P1		0x04000000
+#define	ESR_SIG_XSERDES_RDY_P0		0x02000000
+#define	ESR_SIG_XDETECT_P0_CH3		0x01000000
+#define	ESR_SIG_XDETECT_P0_CH2		0x00800000
+#define	ESR_SIG_XDETECT_P0_CH1		0x00400000
+#define	ESR_SIG_XDETECT_P0_CH0		0x00200000
+#define	ESR_SIG_XSERDES_RDY_P1		0x00100000
+#define	ESR_SIG_XDETECT_P1_CH3		0x00080000
+#define	ESR_SIG_XDETECT_P1_CH2		0x00040000
+#define	ESR_SIG_XDETECT_P1_CH1		0x00020000
+#define	ESR_SIG_XDETECT_P1_CH0		0x00010000
+#define	ESR_SIG_LOS_P1_CH3		0x00000080
+#define	ESR_SIG_LOS_P1_CH2		0x00000040
+#define	ESR_SIG_LOS_P1_CH1		0x00000020
+#define	ESR_SIG_LOS_P1_CH0		0x00000010
+#define	ESR_SIG_LOS_P0_CH3		0x00000008
+#define	ESR_SIG_LOS_P0_CH2		0x00000004
+#define	ESR_SIG_LOS_P0_CH1		0x00000002
+#define	ESR_SIG_LOS_P0_CH0		0x00000001
+
+/* ESR Debug Selection Register */
+#define	ESR_DEBUG_SEL_MASK		0x00000003f
+
+/* ESR Test Configuration Register */
+#define	ESR_NO_LOOPBACK_CH3		(0x0 << 6)
+#define	ESR_EWRAP_CH3			(0x1 << 6)
+#define	ESR_PAD_LOOPBACK_CH3		(0x2 << 6)
+#define	ESR_REVLOOPBACK_CH3		(0x3 << 6)
+#define	ESR_NO_LOOPBACK_CH2		(0x0 << 4)
+#define	ESR_EWRAP_CH2			(0x1 << 4)
+#define	ESR_PAD_LOOPBACK_CH2		(0x2 << 4)
+#define	ESR_REVLOOPBACK_CH2		(0x3 << 4)
+#define	ESR_NO_LOOPBACK_CH1		(0x0 << 2)
+#define	ESR_EWRAP_CH1			(0x1 << 2)
+#define	ESR_PAD_LOOPBACK_CH1		(0x2 << 2)
+#define	ESR_REVLOOPBACK_CH1		(0x3 << 2)
+#define	ESR_NO_LOOPBACK_CH0		0x0
+#define	ESR_EWRAP_CH0			0x1
+#define	ESR_PAD_LOOPBACK_CH0		0x2
+#define	ESR_REVLOOPBACK_CH0		0x3
+
+/* convert values */
+#define	NXGE_BASE(x, y)	\
+	(((y) << (x ## _SHIFT)) & (x ## _MASK))
+
+#define	NXGE_VAL_GET(fieldname, regval)		\
+	(((regval) & ((fieldname) ## _MASK)) >> ((fieldname) ## _SHIFT))
+
+#define	NXGE_VAL_SET(fieldname, regval, val)		\
+{							\
+	(regval) &= ~((fieldname) ## _MASK);		\
+	(regval) |= ((val) << (fieldname ## _SHIFT)); 	\
+}
+
+
+#ifdef	__cplusplus
+}
+#endif
+
+#endif	/* _SYS_MAC_NXGE_MAC_HW_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/uts/common/sys/nxge/nxge_mii.h	Mon Mar 19 19:37:22 2007 -0700
@@ -0,0 +1,454 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _SYS_NXGE_NXGE_MII_H_
+#define	_SYS_NXGE_NXGE_MII_H_
+
+#pragma ident	"%Z%%M%	%I%	%E% SMI"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * Configuration Register space.
+ */
+
+#define	MII_BMCR		0
+#define	MII_BMSR		1
+#define	MII_IDR1		2
+#define	MII_IDR2		3
+#define	MII_ANAR		4
+#define	MII_ANLPAR		5
+#define	MII_ANER		6
+#define	MII_NPTXR		7
+#define	MII_LPRXNPR		8
+#define	MII_GCR			9
+#define	MII_GSR			10
+#define	MII_RES0		11
+#define	MII_RES1		12
+#define	MII_RES2		13
+#define	MII_RES3		14
+#define	MII_ESR			15
+
+#define	NXGE_MAX_MII_REGS	32
+
+/*
+ * Configuration Register space.
+ */
+typedef struct _mii_regs {
+	uchar_t bmcr;		/* Basic mode control register */
+	uchar_t bmsr;		/* Basic mode status register */
+	uchar_t idr1;		/* Phy identifier register 1 */
+	uchar_t idr2;		/* Phy identifier register 2 */
+	uchar_t anar;		/* Auto-Negotiation advertisement register */
+	uchar_t anlpar;		/* Auto-Negotiation link Partner ability reg */
+	uchar_t aner;		/* Auto-Negotiation expansion register */
+	uchar_t nptxr;		/* Next page transmit register */
+	uchar_t lprxnpr;	/* Link partner received next page register */
+	uchar_t gcr;		/* Gigabit basic mode control register. */
+	uchar_t gsr;		/* Gigabit basic mode status register */
+	uchar_t mii_res1[4];	/* For future use by MII working group */
+	uchar_t esr;		/* Extended status register. */
+	uchar_t vendor_res[16];	/* For future use by Phy Vendors */
+} mii_regs_t, *p_mii_regs_t;
+
+/*
+ * MII Register 0: Basic mode control register.
+ */
+#define	BMCR_RES		0x003f  /* Unused... */
+#define	BMCR_SSEL_MSB		0x0040  /* Used to manually select speed */
+					/* (with * bit 6) when auto-neg */
+					/* disabled */
+#define	BMCR_COL_TEST		0x0080  /* Collision test */
+#define	BMCR_DPLX_MD		0x0100  /* Full duplex */
+#define	BMCR_RESTART_AN		0x0200  /* Auto negotiation restart */
+#define	BMCR_ISOLATE		0x0400	/* Disconnect BCM5464R from MII */
+#define	BMCR_PDOWN		0x0800	/* Powerdown the BCM5464R */
+#define	BMCR_ANENABLE		0x1000	/* Enable auto negotiation */
+#define	BMCR_SSEL_LSB		0x2000  /* Used to manually select speed */
+					/* (with bit 13) when auto-neg */
+					/* disabled */
+#define	BMCR_LOOPBACK		0x4000	/* TXD loopback bits */
+#define	BMCR_RESET		0x8000	/* Reset the BCM5464R */
+
+typedef union _mii_bmcr {
+	uint16_t value;
+	struct {
+#if defined(_BIT_FIELDS_HTOL)
+		uint16_t reset:1;
+		uint16_t loopback:1;
+		uint16_t speed_sel:1;
+		uint16_t enable_autoneg:1;
+		uint16_t power_down:1;
+		uint16_t isolate:1;
+		uint16_t restart_autoneg:1;
+		uint16_t duplex_mode:1;
+		uint16_t col_test:1;
+		uint16_t speed_1000_sel:1;
+		uint16_t res1:6;
+#elif defined(_BIT_FIELDS_LTOH)
+		uint16_t res1:6;
+		uint16_t speed_1000_sel:1;
+		uint16_t col_test:1;
+		uint16_t duplex_mode:1;
+		uint16_t restart_autoneg:1;
+		uint16_t isolate:1;
+		uint16_t power_down:1;
+		uint16_t enable_autoneg:1;
+		uint16_t speed_sel:1;
+		uint16_t loopback:1;
+		uint16_t reset:1;
+#endif
+	} bits;
+} mii_bmcr_t, *p_mii_bmcr_t;
+
+/*
+ * MII Register 1:  Basic mode status register.
+ */
+#define	BMSR_ERCAP		0x0001  /* Ext-reg capability */
+#define	BMSR_JCD		0x0002  /* Jabber detected */
+#define	BMSR_LSTATUS		0x0004  /* Link status */
+#define	BMSR_ANEGCAPABLE	0x0008  /* Able to do auto-negotiation */
+#define	BMSR_RFAULT		0x0010  /* Remote fault detected */
+#define	BMSR_ANEGCOMPLETE	0x0020  /* Auto-negotiation complete */
+#define	BMSR_MF_PRE_SUP		0x0040  /* Preamble for MIF frame suppressed, */
+					/* always 1 for BCM5464R */
+#define	BMSR_RESV		0x0080  /* Unused... */
+#define	BMSR_ESTAT		0x0100  /* Contains IEEE extended status reg */
+#define	BMSR_100BASE2HALF	0x0200  /* Can do 100mbps, 2k pkts half-dplx */
+#define	BMSR_100BASE2FULL	0x0400  /* Can do 100mbps, 2k pkts full-dplx */
+#define	BMSR_10HALF		0x0800  /* Can do 10mbps, half-duplex */
+#define	BMSR_10FULL		0x1000  /* Can do 10mbps, full-duplex */
+#define	BMSR_100HALF		0x2000  /* Can do 100mbps, half-duplex */
+#define	BMSR_100FULL		0x4000  /* Can do 100mbps, full-duplex */
+#define	BMSR_100BASE4		0x8000  /* Can do 100mbps, 4k packets */
+
+typedef union _mii_bmsr {
+	uint16_t value;
+	struct {
+#if defined(_BIT_FIELDS_HTOL)
+		uint16_t link_100T4:1;
+		uint16_t link_100fdx:1;
+		uint16_t link_100hdx:1;
+		uint16_t link_10fdx:1;
+		uint16_t link_10hdx:1;
+		uint16_t res2:2;
+		uint16_t extend_status:1;
+		uint16_t res1:1;
+		uint16_t preamble_supress:1;
+		uint16_t auto_neg_complete:1;
+		uint16_t remote_fault:1;
+		uint16_t auto_neg_able:1;
+		uint16_t link_status:1;
+		uint16_t jabber_detect:1;
+		uint16_t ext_cap:1;
+#elif defined(_BIT_FIELDS_LTOH)
+		int16_t ext_cap:1;
+		uint16_t jabber_detect:1;
+		uint16_t link_status:1;
+		uint16_t auto_neg_able:1;
+		uint16_t remote_fault:1;
+		uint16_t auto_neg_complete:1;
+		uint16_t preamble_supress:1;
+		uint16_t res1:1;
+		uint16_t extend_status:1;
+		uint16_t res2:2;
+		uint16_t link_10hdx:1;
+		uint16_t link_10fdx:1;
+		uint16_t link_100hdx:1;
+		uint16_t link_100fdx:1;
+		uint16_t link_100T4:1;
+#endif
+	} bits;
+} mii_bmsr_t, *p_mii_bmsr_t;
+
+/*
+ * MII Register 2: Physical Identifier 1.
+ */
+/* contains BCM OUI bits [3:18] */
+typedef union _mii_idr1 {
+	uint16_t value;
+	struct {
+		uint16_t ieee_address:16;
+	} bits;
+} mii_idr1_t, *p_mii_idr1_t;
+
+/*
+ * MII Register 3: Physical Identifier 2.
+ */
+typedef union _mii_idr2 {
+	uint16_t value;
+	struct {
+#if defined(_BIT_FIELDS_HTOL)
+		uint16_t ieee_address:6;
+		uint16_t model_no:6;
+		uint16_t rev_no:4;
+#elif defined(_BIT_FIELDS_LTOH)
+		uint16_t rev_no:4;
+		uint16_t model_no:6;
+		uint16_t ieee_address:6;
+#endif
+	} bits;
+} mii_idr2_t, *p_mii_idr2_t;
+
+/*
+ * MII Register 4: Auto-negotiation advertisement register.
+ */
+#define	ADVERTISE_SLCT		0x001f  /* Selector bits for proto, 0x01 */
+					/* indicates IEEE 802.3 CSMA/CD phy */
+#define	ADVERTISE_CSMA		0x0001  /* Only selector supported */
+#define	ADVERTISE_10HALF	0x0020  /* Try for 10mbps half-duplex  */
+#define	ADVERTISE_10FULL	0x0040  /* Try for 10mbps full-duplex  */
+#define	ADVERTISE_100HALF	0x0080  /* Try for 100mbps half-duplex */
+#define	ADVERTISE_100FULL	0x0100  /* Try for 100mbps full-duplex */
+#define	ADVERTISE_100BASE4	0x0200  /* Try for 100mbps 4k packets. set to */
+					/* 0, BCM5464R not 100BASE-T4 capable */
+#define	ADVERTISE_RES1		0x0400  /* Unused... */
+#define	ADVERTISE_ASM_PAUS	0x0800  /* advertise asymmetric pause */
+#define	ADVERTISE_PAUS		0x1000  /* can do full dplx pause */
+#define	ADVERTISE_RFAULT	0x2000  /* Say we can detect faults */
+#define	ADVERTISE_RES0		0x4000  /* Unused... */
+#define	ADVERTISE_NPAGE		0x8000  /* Next page bit */
+
+#define	ADVERTISE_FULL (ADVERTISE_100FULL | ADVERTISE_10FULL | \
+			ADVERTISE_CSMA)
+#define	ADVERTISE_ALL (ADVERTISE_10HALF | ADVERTISE_10FULL | \
+			ADVERTISE_100HALF | ADVERTISE_100FULL)
+
+typedef union _mii_anar {
+	uint16_t value;
+	struct {
+#if defined(_BIT_FIELDS_HTOL)
+		uint16_t np_indication:1;
+		uint16_t acknowledge:1;
+		uint16_t remote_fault:1;
+		uint16_t res1:1;
+		uint16_t cap_asmpause:1;
+		uint16_t cap_pause:1;
+		uint16_t cap_100T4:1;
+		uint16_t cap_100fdx:1;
+		uint16_t cap_100hdx:1;
+		uint16_t cap_10fdx:1;
+		uint16_t cap_10hdx:1;
+		uint16_t selector:5;
+#elif defined(_BIT_FIELDS_LTOH)
+		uint16_t selector:5;
+		uint16_t cap_10hdx:1;
+		uint16_t cap_10fdx:1;
+		uint16_t cap_100hdx:1;
+		uint16_t cap_100fdx:1;
+		uint16_t cap_100T4:1;
+		uint16_t cap_pause:1;
+		uint16_t cap_asmpause:1;
+		uint16_t res1:1;
+		uint16_t remote_fault:1;
+		uint16_t acknowledge:1;
+		uint16_t np_indication:1;
+#endif
+	} bits;
+} mii_anar_t, *p_mii_anar_t;
+
+/*
+ * MII Register 5: Auto-negotiation link partner ability register.
+ */
+#define	LPA_SLCT		0x001f  /* Same as advertise selector */
+#define	LPA_10HALF		0x0020  /* Can do 10mbps half-duplex */
+#define	LPA_10FULL		0x0040  /* Can do 10mbps full-duplex */
+#define	LPA_100HALF		0x0080  /* Can do 100mbps half-duplex */
+#define	LPA_100FULL		0x0100  /* Can do 100mbps full-duplex */
+#define	LPA_100BASE4		0x0200  /* Can do 100mbps 4k packets */
+#define	LPA_RES1		0x0400  /* Unused... */
+#define	LPA_ASM_PAUS		0x0800  /* advertise asymmetric pause */
+#define	LPA__PAUS		0x1000  /* can do full dplx pause */
+#define	LPA_RFAULT		0x2000	/* Link partner faulted */
+#define	LPA_LPACK		0x4000	/* Link partner acked us */
+#define	LPA_NPAGE		0x8000	/* Next page bit */
+
+#define	LPA_DUPLEX		(LPA_10FULL | LPA_100FULL)
+#define	LPA_100			(LPA_100FULL | LPA_100HALF | LPA_100BASE4)
+
+typedef mii_anar_t mii_anlpar_t, *pmii_anlpar_t;
+
+/*
+ * MII Register 6: Auto-negotiation expansion register.
+ */
+#define	EXPANSION_LP_AN_ABLE	0x0001	/* Link partner has auto-neg cap */
+#define	EXPANSION_PG_RX		0x0002	/* Got new RX page code word */
+#define	EXPANSION_NP_ABLE	0x0004	/* This enables npage words */
+#define	EXPANSION_LPNP_ABLE	0x0008	/* Link partner supports npage */
+#define	EXPANSION_MFAULTS	0x0010	/* Multiple link faults detected */
+#define	EXPANSION_RESV		0xffe0	/* Unused... */
+
+typedef union _mii_aner {
+	uint16_t value;
+	struct {
+#if defined(_BIT_FIELDS_HTOL)
+		uint16_t res:11;
+		uint16_t mlf:1;
+		uint16_t lp_np_able:1;
+		uint16_t np_able:1;
+		uint16_t page_rx:1;
+		uint16_t lp_an_able:1;
+#else
+		uint16_t lp_an_able:1;
+		uint16_t page_rx:1;
+		uint16_t np_able:1;
+		uint16_t lp_np_able:1;
+		uint16_t mlf:1;
+		uint16_t res:11;
+#endif
+	} bits;
+} mii_aner_t, *p_mii_aner_t;
+
+/*
+ * MII Register 7: Next page transmit register.
+ */
+typedef	union _mii_nptxr {
+	uint16_t value;
+	struct {
+#if defined(_BIT_FIELDS_HTOL)
+		uint16_t np:1;
+		uint16_t res:1;
+		uint16_t msgp:1;
+		uint16_t ack2:1;
+		uint16_t toggle:1;
+		uint16_t res1:11;
+#else
+		uint16_t res1:11;
+		uint16_t toggle:1;
+		uint16_t ack2:1;
+		uint16_t msgp:1;
+		uint16_t res:1;
+		uint16_t np:1;
+#endif
+	} bits;
+} mii_nptxr_t, *p_mii_nptxr_t;
+
+/*
+ * MII Register 8: Link partner received next page register.
+ */
+typedef union _mii_lprxnpr {
+	uint16_t value;
+	struct {
+#if defined(_BIT_FIELDS_HTOL)
+		uint16_t np:1;
+			uint16_t ack:1;
+		uint16_t msgp:1;
+		uint16_t ack2:1;
+		uint16_t toggle:1;
+		uint16_t mcf:11;
+#else
+		uint16_t mcf:11;
+		uint16_t toggle:1;
+		uint16_t ack2:1;
+		uint16_t msgp:1;
+		uint16_t ack:1;
+		uint16_t np:1;
+#endif
+	} bits;
+} mii_lprxnpr_t, *p_mii_lprxnpr_t;
+
+/*
+ * MII Register 9: 1000BaseT control register.
+ */
+typedef union _mii_gcr {
+	uint16_t value;
+	struct {
+#if defined(_BIT_FIELDS_HTOL)
+		uint16_t test_mode:3;
+		uint16_t ms_mode_en:1;
+		uint16_t master:1;
+		uint16_t dte_or_repeater:1;
+		uint16_t link_1000fdx:1;
+		uint16_t link_1000hdx:1;
+		uint16_t res:8;
+#else
+		uint16_t res:8;
+		uint16_t link_1000hdx:1;
+		uint16_t link_1000fdx:1;
+		uint16_t dte_or_repeater:1;
+		uint16_t master:1;
+		uint16_t ms_mode_en:1;
+		uint16_t test_mode:3;
+#endif
+	} bits;
+} mii_gcr_t, *p_mii_gcr_t;
+
+/*
+ * MII Register 10: 1000BaseT status register.
+ */
+typedef union _mii_gsr {
+	uint16_t value;
+	struct {
+#if defined(_BIT_FIELDS_HTOL)
+		uint16_t ms_config_fault:1;
+		uint16_t ms_resolve:1;
+		uint16_t local_rx_status:1;
+		uint16_t remote_rx_status:1;
+		uint16_t link_1000fdx:1;
+		uint16_t link_1000hdx:1;
+		uint16_t res:2;
+		uint16_t idle_err_cnt:8;
+#else
+		uint16_t idle_err_cnt:8;
+		uint16_t res:2;
+		uint16_t link_1000hdx:1;
+		uint16_t link_1000fdx:1;
+		uint16_t remote_rx_status:1;
+		uint16_t local_rx_status:1;
+		uint16_t ms_resolve:1;
+		uint16_t ms_config_fault:1;
+#endif
+	} bits;
+} mii_gsr_t, *p_mii_gsr_t;
+
+/*
+ * MII Register 15: Extended status register.
+ */
+typedef union _mii_esr {
+	uint16_t value;
+	struct {
+#if defined(_BIT_FIELDS_HTOL)
+		uint16_t link_1000Xfdx:1;
+		uint16_t link_1000Xhdx:1;
+		uint16_t link_1000fdx:1;
+		uint16_t link_1000hdx:1;
+		uint16_t res:12;
+#else
+			uint16_t res:12;
+		uint16_t link_1000hdx:1;
+		uint16_t link_1000fdx:1;
+		uint16_t link_1000Xhdx:1;
+		uint16_t link_1000Xfdx:1;
+#endif
+	} bits;
+} mii_esr_t, *p_mii_esr_t;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_NXGE_NXGE_MII_H_ */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/uts/common/sys/nxge/nxge_n2_esr_hw.h	Mon Mar 19 19:37:22 2007 -0700
@@ -0,0 +1,363 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _SYS_NXGE_NXGE_N2_ESR_HW_H
+#define	_SYS_NXGE_NXGE_N2_ESR_HW_H
+
+#pragma ident	"%Z%%M%	%I%	%E% SMI"
+
+#ifdef	__cplusplus
+extern "C" {
+#endif
+
+#define	ESR_N2_DEV_ADDR		0x1E
+#define	ESR_N2_BASE		0x8000
+
+/*
+ * Definitions for TI WIZ6C2xxN2x0 Macro Family.
+ */
+
+/* Register Blocks base address */
+
+#define	ESR_N2_PLL_REG_OFFSET		0
+#define	ESR_N2_TEST_REG_OFFSET		0x004
+#define	ESR_N2_TX_REG_OFFSET		0x100
+#define	ESR_N2_TX_0_REG_OFFSET		0x100
+#define	ESR_N2_TX_1_REG_OFFSET		0x104
+#define	ESR_N2_TX_2_REG_OFFSET		0x108
+#define	ESR_N2_TX_3_REG_OFFSET		0x10c
+#define	ESR_N2_TX_4_REG_OFFSET		0x110
+#define	ESR_N2_TX_5_REG_OFFSET		0x114
+#define	ESR_N2_TX_6_REG_OFFSET		0x118
+#define	ESR_N2_TX_7_REG_OFFSET		0x11c
+#define	ESR_N2_RX_REG_OFFSET		0x120
+#define	ESR_N2_RX_0_REG_OFFSET		0x120
+#define	ESR_N2_RX_1_REG_OFFSET		0x124
+#define	ESR_N2_RX_2_REG_OFFSET		0x128
+#define	ESR_N2_RX_3_REG_OFFSET		0x12c
+#define	ESR_N2_RX_4_REG_OFFSET		0x130
+#define	ESR_N2_RX_5_REG_OFFSET		0x134
+#define	ESR_N2_RX_6_REG_OFFSET		0x138
+#define	ESR_N2_RX_7_REG_OFFSET		0x13c
+#define	ESR_N2_P1_REG_OFFSET		0x400
+
+/* Register address */
+
+#define	ESR_N2_PLL_CFG_REG		ESR_N2_BASE + ESR_N2_PLL_REG_OFFSET
+#define	ESR_N2_PLL_CFG_L_REG	ESR_N2_BASE + ESR_N2_PLL_REG_OFFSET
+#define	ESR_N2_PLL_CFG_H_REG	ESR_N2_BASE + ESR_N2_PLL_REG_OFFSET + 1
+#define	ESR_N2_PLL_STS_REG		ESR_N2_BASE + ESR_N2_PLL_REG_OFFSET + 2
+#define	ESR_N2_PLL_STS_L_REG	ESR_N2_BASE + ESR_N2_PLL_REG_OFFSET + 2
+#define	ESR_N2_PLL_STS_H_REG	ESR_N2_BASE + ESR_N2_PLL_REG_OFFSET + 3
+#define	ESR_N2_TEST_CFG_REG		ESR_N2_BASE + ESR_N2_TEST_REG_OFFSET
+#define	ESR_N2_TEST_CFG_L_REG	ESR_N2_BASE + ESR_N2_TEST_REG_OFFSET
+#define	ESR_N2_TEST_CFG_H_REG	ESR_N2_BASE + ESR_N2_TEST_REG_OFFSET + 1
+
+#define	ESR_N2_TX_CFG_REG_ADDR(chan)	(ESR_N2_BASE + ESR_N2_TX_REG_OFFSET +\
+					(chan * 4))
+#define	ESR_N2_TX_CFG_L_REG_ADDR(chan)	(ESR_N2_BASE + ESR_N2_TX_REG_OFFSET +\
+					(chan * 4))
+#define	ESR_N2_TX_CFG_H_REG_ADDR(chan)	(ESR_N2_BASE + ESR_N2_TX_REG_OFFSET +\
+					(chan * 4) + 1)
+#define	ESR_N2_TX_STS_REG_ADDR(chan)	(ESR_N2_BASE + ESR_N2_TX_REG_OFFSET +\
+					(chan * 4) + 2)
+#define	ESR_N2_TX_STS_L_REG_ADDR(chan)	(ESR_N2_BASE + ESR_N2_TX_REG_OFFSET +\
+					(chan * 4) + 2)
+#define	ESR_N2_TX_STS_H_REG_ADDR(chan)	(ESR_N2_BASE + ESR_N2_TX_REG_OFFSET +\
+					(chan * 4) + 3)
+#define	ESR_N2_RX_CFG_REG_ADDR(chan)	(ESR_N2_BASE + ESR_N2_RX_REG_OFFSET +\
+					(chan * 4))
+#define	ESR_N2_RX_CFG_L_REG_ADDR(chan)	(ESR_N2_BASE + ESR_N2_RX_REG_OFFSET +\
+					(chan * 4))
+#define	ESR_N2_RX_CFG_H_REG_ADDR(chan)	(ESR_N2_BASE + ESR_N2_RX_REG_OFFSET +\
+					(chan * 4) + 1)
+#define	ESR_N2_RX_STS_REG_ADDR(chan)	(ESR_N2_BASE + ESR_N2_RX_REG_OFFSET +\
+					(chan * 4) + 2)
+#define	ESR_N2_RX_STS_L_REG_ADDR(chan)	(ESR_N2_BASE + ESR_N2_RX_REG_OFFSET +\
+					(chan * 4) + 2)
+#define	ESR_N2_RX_STS_H_REG_ADDR(chan)	(ESR_N2_BASE + ESR_N2_RX_REG_OFFSET +\
+					(chan * 4) + 3)
+
+/* PLL Configuration Low 16-bit word */
+typedef	union _esr_ti_cfgpll_l {
+	uint16_t value;
+
+	struct {
+#if defined(_BIT_FIELDS_HTOL)
+		uint16_t res2		: 6;
+		uint16_t lb			: 2;
+		uint16_t res1		: 3;
+		uint16_t mpy		: 4;
+		uint16_t enpll		: 1;
+#elif defined(_BIT_FIELDS_LTOH)
+		uint16_t enpll		: 1;
+		uint16_t mpy		: 4;
+		uint16_t res1		: 3;
+		uint16_t lb			: 2;
+		uint16_t res2		: 6;
+#endif
+	} bits;
+} esr_ti_cfgpll_l_t;
+
+/* PLL Configurations */
+#define	CFGPLL_LB_FREQ_DEP_BANDWIDTH	0
+#define	CFGPLL_LB_LOW_BANDWIDTH		0x2
+#define	CFGPLL_LB_HIGH_BANDWIDTH	0x3
+#define	CFGPLL_MPY_4X			0
+#define	CFGPLL_MPY_5X			0x1
+#define	CFGPLL_MPY_6X			0x2
+#define	CFGPLL_MPY_8X			0x4
+#define	CFGPLL_MPY_10X			0x5
+#define	CFGPLL_MPY_12X			0x6
+#define	CFGPLL_MPY_12P5X		0x7
+
+/* Rx Configuration Low 16-bit word */
+
+typedef	union _esr_ti_cfgrx_l {
+	uint16_t value;
+
+	struct {
+#if defined(_BIT_FIELDS_HTOL)
+		uint16_t los		: 2;
+		uint16_t align		: 2;
+		uint16_t res		: 1;
+		uint16_t term		: 3;
+		uint16_t invpair	: 1;
+		uint16_t rate		: 2;
+		uint16_t buswidth	: 3;
+		uint16_t entest		: 1;
+		uint16_t enrx		: 1;
+#elif defined(_BIT_FIELDS_LTOH)
+		uint16_t enrx		: 1;
+		uint16_t entest		: 1;
+		uint16_t buswidth	: 3;
+		uint16_t rate		: 2;
+		uint16_t invpair	: 1;
+		uint16_t term		: 3;
+		uint16_t res		: 1;
+		uint16_t align		: 2;
+		uint16_t los		: 2;
+#endif
+	} bits;
+} esr_ti_cfgrx_l_t;
+
+/* Rx Configuration High 16-bit word */
+
+typedef	union _esr_ti_cfgrx_h {
+	uint16_t value;
+
+	struct {
+#if defined(_BIT_FIELDS_HTOL)
+		uint16_t res2		: 6;
+		uint16_t bsinrxn	: 1;
+		uint16_t bsinrxp	: 1;
+		uint16_t res1		: 1;
+		uint16_t eq		: 4;
+		uint16_t cdr		: 3;
+#elif defined(_BIT_FIELDS_LTOH)
+		uint16_t cdr		: 3;
+		uint16_t eq		: 4;
+		uint16_t res1		: 1;
+		uint16_t bsinrxp	: 1;
+		uint16_t bsinrxn	: 1;
+		uint16_t res2		: 6;
+#endif
+	} bits;
+} esr_ti_cfgrx_h_t;
+
+/* Receive Configurations */
+#define	CFGRX_BUSWIDTH_10BIT			0
+#define	CFGRX_BUSWIDTH_8BIT			1
+#define	CFGRX_RATE_FULL				0
+#define	CFGRX_RATE_HALF				1
+#define	CFGRX_RATE_QUAD				2
+#define	CFGRX_TERM_VDDT				0
+#define	CFGRX_TERM_0P8VDDT			1
+#define	CFGRX_TERM_FLOAT			3
+#define	CFGRX_ALIGN_DIS				0
+#define	CFGRX_ALIGN_EN				1
+#define	CFGRX_ALIGN_JOG				2
+#define	CFGRX_LOS_DIS				0
+#define	CFGRX_LOS_HITHRES			1
+#define	CFGRX_LOS_LOTHRES			2
+#define	CFGRX_CDR_1ST_ORDER			0
+#define	CFGRX_CDR_2ND_ORDER_HP			1
+#define	CFGRX_CDR_2ND_ORDER_MP			2
+#define	CFGRX_CDR_2ND_ORDER_LP			3
+#define	CFGRX_CDR_1ST_ORDER_FAST_LOCK		4
+#define	CFGRX_CDR_2ND_ORDER_HP_FAST_LOCK	5
+#define	CFGRX_CDR_2ND_ORDER_MP_FAST_LOCK	6
+#define	CFGRX_CDR_2ND_ORDER_LP_FAST_LOCK	7
+#define	CFGRX_EQ_MAX_LF				0
+#define	CFGRX_EQ_ADAPTIVE_LP_ADAPTIVE_ZF	0x1
+#define	CFGRX_EQ_ADAPTIVE_LF_1084MHZ_ZF		0x8
+#define	CFGRX_EQ_ADAPTIVE_LF_805MHZ_ZF		0x9
+#define	CFGRX_EQ_ADAPTIVE_LP_573MHZ_ZF		0xA
+#define	CFGRX_EQ_ADAPTIVE_LP_402MHZ_ZF		0xB
+#define	CFGRX_EQ_ADAPTIVE_LP_304MHZ_ZF		0xC
+#define	CFGRX_EQ_ADAPTIVE_LP_216MHZ_ZF		0xD
+#define	CFGRX_EQ_ADAPTIVE_LP_156MHZ_ZF		0xE
+#define	CFGRX_EQ_ADAPTIVE_LP_135HZ_ZF		0xF
+
+/* Rx Status Low 16-bit word */
+
+typedef	union _esr_ti_stsrx_l {
+	uint16_t value;
+
+	struct {
+#if defined(_BIT_FIELDS_HTOL)
+		uint16_t res		: 10;
+		uint16_t bsrxn		: 1;
+		uint16_t bsrxp		: 1;
+		uint16_t losdtct	: 1;
+		uint16_t oddcg		: 1;
+		uint16_t sync		: 1;
+		uint16_t testfail	: 1;
+#elif defined(_BIT_FIELDS_LTOH)
+		uint16_t testfail	: 1;
+		uint16_t sync		: 1;
+		uint16_t oddcg		: 1;
+		uint16_t losdtct	: 1;
+		uint16_t bsrxp		: 1;
+		uint16_t bsrxn		: 1;
+		uint16_t res		: 10;
+#endif
+	} bits;
+} esr_ti_stsrx_l_t;
+
+/* Tx Configuration Low 16-bit word */
+
+typedef	union _esr_ti_cfgtx_l {
+	uint16_t value;
+
+	struct {
+#if defined(_BIT_FIELDS_HTOL)
+		uint16_t de		: 4;
+		uint16_t swing		: 3;
+		uint16_t cm		: 1;
+		uint16_t invpair	: 1;
+		uint16_t rate		: 2;
+		uint16_t buswwidth	: 3;
+		uint16_t entest		: 1;
+		uint16_t entx		: 1;
+#elif defined(_BIT_FIELDS_LTOH)
+		uint16_t entx		: 1;
+		uint16_t entest		: 1;
+		uint16_t buswwidth	: 3;
+		uint16_t rate		: 2;
+		uint16_t invpair	: 1;
+		uint16_t cm		: 1;
+		uint16_t swing		: 3;
+		uint16_t de		: 4;
+#endif
+	} bits;
+} esr_ti_cfgtx_l_t;
+
+/* Tx Configuration High 16-bit word */
+
+typedef	union _esr_ti_cfgtx_h {
+	uint16_t value;
+
+	struct {
+#if defined(_BIT_FIELDS_HTOL)
+		uint16_t res		: 14;
+		uint16_t bstx		: 1;
+		uint16_t enftp		: 1;
+#elif defined(_BIT_FIELDS_LTOH)
+		uint16_t enftp		: 1;
+		uint16_t bstx		: 1;
+		uint16_t res		: 14;
+#endif
+	} bits;
+} esr_ti_cfgtx_h_t;
+
+/* Transmit Configurations */
+#define	CFGTX_BUSWIDTH_10BIT		0
+#define	CFGTX_BUSWIDTH_8BIT		1
+#define	CFGTX_RATE_FULL			0
+#define	CFGTX_RATE_HALF			1
+#define	CFGTX_RATE_QUAD			2
+#define	CFGTX_SWING_125MV		0
+#define	CFGTX_SWING_250MV		1
+#define	CFGTX_SWING_500MV		2
+#define	CFGTX_SWING_625MV		3
+#define	CFGTX_SWING_750MV		4
+#define	CFGTX_SWING_1000MV		5
+#define	CFGTX_SWING_1250MV		6
+#define	CFGTX_SWING_1375MV		7
+#define	CFGTX_DE_0			0
+#define	CFGTX_DE_4P76			1
+#define	CFGTX_DE_9P52			2
+#define	CFGTX_DE_14P28			3
+#define	CFGTX_DE_19P04			4
+#define	CFGTX_DE_23P8			5
+#define	CFGTX_DE_28P56			6
+#define	CFGTX_DE_33P32			7
+
+/* Test Configuration */
+
+typedef	union _esr_ti_testcfg {
+	uint16_t value;
+
+	struct {
+#if defined(_BIT_FIELDS_HTOL)
+		uint16_t res1		: 1;
+		uint16_t invpat		: 1;
+		uint16_t rate		: 2;
+		uint16_t res		: 1;
+		uint16_t enbspls	: 1;
+		uint16_t enbsrx		: 1;
+		uint16_t enbstx		: 1;
+		uint16_t loopback	: 2;
+		uint16_t clkbyp		: 2;
+		uint16_t enrxpatt	: 1;
+		uint16_t entxpatt	: 1;
+		uint16_t testpatt	: 2;
+#elif defined(_BIT_FIELDS_LTOH)
+		uint16_t testpatt	: 2;
+		uint16_t entxpatt	: 1;
+		uint16_t enrxpatt	: 1;
+		uint16_t clkbyp		: 2;
+		uint16_t loopback	: 2;
+		uint16_t enbstx		: 1;
+		uint16_t enbsrx		: 1;
+		uint16_t enbspls	: 1;
+		uint16_t res		: 1;
+		uint16_t rate		: 2;
+		uint16_t invpat		: 1;
+		uint16_t res1		: 1;
+#endif
+	} bits;
+} esr_ti_testcfg_t;
+
+#define	TESTCFG_PAD_LOOPBACK		0x1
+#define	TESTCFG_INNER_CML_DIS_LOOPBACK	0x2
+#define	TESTCFG_INNER_CML_EN_LOOOPBACK	0x3
+
+#ifdef	__cplusplus
+}
+#endif
+
+#endif	/* _SYS_NXGE_NXGE_N2_ESR_HW_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/uts/common/sys/nxge/nxge_phy_hw.h	Mon Mar 19 19:37:22 2007 -0700
@@ -0,0 +1,633 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef	_SYS_NXGE_NXGE_PHY_HW_H
+#define	_SYS_NXGE_NXGE_PHY_HW_H
+
+#pragma ident	"%Z%%M%	%I%	%E% SMI"
+
+
+#ifdef	__cplusplus
+extern "C" {
+#endif
+
+#include <nxge_defs.h>
+
+#define	BCM5464_NEPTUNE_PORT_ADDR_BASE		10
+#define	BCM8704_NEPTUNE_PORT_ADDR_BASE		8
+#define	BCM8704_N2_PORT_ADDR_BASE		16
+#define	BCM8704_PMA_PMD_DEV_ADDR		1
+#define	BCM8704_PCS_DEV_ADDR			3
+#define	BCM8704_USER_DEV3_ADDR			3
+#define	BCM8704_PHYXS_ADDR			4
+#define	BCM8704_USER_DEV4_ADDR			4
+
+/* Definitions for BCM 5464R PHY chip */
+
+#define	BCM5464R_PHY_ECR	16
+#define	BCM5464R_PHY_ESR	17
+#define	BCM5464R_RXERR_CNT	18
+#define	BCM5464R_FALSECS_CNT	19
+#define	BCM5464R_RX_NOTOK_CNT	20
+#define	BCM5464R_ER_DATA	21
+#define	BCM5464R_RES		22
+#define	BCM5464R_ER_ACC		23
+#define	BCM5464R_AUX_CTL	24
+#define	BCM5464R_AUX_S		25
+#define	BCM5464R_INTR_S		26
+#define	BCM5464R_INTR_M		27
+#define	BCM5464R_MISC		28
+#define	BCM5464R_MISC1		29
+#define	BCM5464R_TESTR1		30
+
+#define	PHY_BCM_5464R_OUI	0x001018
+#define	PHY_BCM_5464R_MODEL	0x0B
+
+/*
+ * MII Register 16:  PHY Extended Control Register
+ */
+
+typedef	union _mii_phy_ecr_t {
+	uint16_t value;
+	struct {
+#ifdef _BIT_FIELDS_HTOL
+		uint16_t mac_phy_if_mode	: 1;
+		uint16_t dis_automdicross	: 1;
+		uint16_t tx_dis			: 1;
+		uint16_t intr_dis		: 1;
+		uint16_t force_intr		: 1;
+		uint16_t bypass_encdec		: 1;
+		uint16_t bypass_scrdes		: 1;
+		uint16_t bypass_mlt3		: 1;
+		uint16_t bypass_rx_sym		: 1;
+		uint16_t reset_scr		: 1;
+		uint16_t en_led_traffic		: 1;
+		uint16_t force_leds_on		: 1;
+		uint16_t force_leds_off		: 1;
+		uint16_t res			: 2;
+		uint16_t gmii_fifo_elas		: 1;
+#else
+		uint16_t gmii_fifo_elas		: 1;
+		uint16_t res			: 2;
+		uint16_t force_leds_off		: 1;
+		uint16_t force_leds_on		: 1;
+		uint16_t en_led_traffic		: 1;
+		uint16_t reset_scr		: 1;
+		uint16_t bypass_rx_sym		: 1;
+		uint16_t bypass_mlt3		: 1;
+		uint16_t bypass_scrdes		: 1;
+		uint16_t bypass_encdec		: 1;
+		uint16_t force_intr		: 1;
+		uint16_t intr_dis		: 1;
+		uint16_t tx_dis			: 1;
+		uint16_t dis_automdicross	: 1;
+		uint16_t mac_phy_if_mode	: 1;
+#endif
+	} bits;
+} mii_phy_ecr_t, *p_mii_phy_ecr_t;
+
+/*
+ * MII Register 17:  PHY Extended Status Register
+ */
+typedef	union _mii_phy_esr_t {
+	uint16_t value;
+	struct {
+#ifdef _BIT_FIELDS_HTOL
+		uint16_t anbpsfm		: 1;
+		uint16_t wsdwngr		: 1;
+		uint16_t mdi_crst		: 1;
+		uint16_t intr_s			: 1;
+		uint16_t rmt_rx_s		: 1;
+		uint16_t loc_rx_s		: 1;
+		uint16_t locked			: 1;
+		uint16_t link_s			: 1;
+		uint16_t crc_err		: 1;
+		uint16_t cext_err		: 1;
+		uint16_t bad_ssd		: 1;
+		uint16_t bad_esd		: 1;
+		uint16_t rx_err			: 1;
+		uint16_t tx_err			: 1;
+		uint16_t lock_err		: 1;
+		uint16_t mlt3_cerr		: 1;
+#else
+		uint16_t mlt3_cerr		: 1;
+		uint16_t lock_err		: 1;
+		uint16_t tx_err			: 1;
+		uint16_t rx_err			: 1;
+		uint16_t bad_esd		: 1;
+		uint16_t bad_ssd		: 1;
+		uint16_t cext_err		: 1;
+		uint16_t crc_err		: 1;
+		uint16_t link_s			: 1;
+		uint16_t locked			: 1;
+		uint16_t loc_rx_s		: 1;
+		uint16_t rmt_rx_s		: 1;
+		uint16_t intr_s			: 1;
+		uint16_t mdi_crst		: 1;
+		uint16_t wsdwngr		: 1;
+		uint16_t anbpsfm		: 1;
+#endif
+	} bits;
+} mii_phy_esr_t, *p_mii_phy_esr_t;
+
+/*
+ * MII Register 18:  Receive Error Counter Register
+ */
+typedef	union _mii_rxerr_cnt_t {
+	uint16_t value;
+	struct {
+		uint16_t rx_err_cnt		: 16;
+	} bits;
+} mii_rxerr_cnt_t, *p_mii_rxerr_cnt_t;
+
+/*
+ * MII Register 19:  False Carrier Sense Counter Register
+ */
+typedef	union _mii_falsecs_cnt_t {
+	uint16_t value;
+	struct {
+#ifdef _BIT_FIELDS_HTOL
+		uint16_t res			: 8;
+		uint16_t false_cs_cnt		: 8;
+#else
+		uint16_t false_cs_cnt		: 8;
+		uint16_t res			: 8;
+#endif
+	} bits;
+} mii_falsecs_cnt_t, *p_mii_falsecs_cnt_t;
+
+/*
+ * MII Register 20:  Receiver NOT_OK Counter Register
+ */
+typedef	union _mii_rx_notok_cnt_t {
+	uint16_t value;
+	struct {
+#ifdef _BIT_FIELDS_HTOL
+		uint16_t l_rx_notok_cnt		: 8;
+		uint16_t r_rx_notok_cnt		: 8;
+#else
+		uint16_t r_rx_notok_cnt		: 8;
+		uint16_t l_rx_notok_cnt		: 8;
+#endif
+	} bits;
+} mii_rx_notok_cnt_t, *p_mii_rx_notok_t;
+
+/*
+ * MII Register 21:  Expansion Register Data Register
+ */
+typedef	union _mii_er_data_t {
+	uint16_t value;
+	struct {
+		uint16_t reg_data;
+	} bits;
+} mii_er_data_t, *p_mii_er_data_t;
+
+/*
+ * MII Register 23:  Expansion Register Access Register
+ */
+typedef	union _mii_er_acc_t {
+	struct {
+#ifdef _BIT_FIELDS_HTOL
+		uint16_t res			: 4;
+		uint16_t er_sel			: 4;
+		uint16_t er_acc			: 8;
+#else
+		uint16_t er_acc			: 8;
+		uint16_t er_sel			: 4;
+		uint16_t res			: 4;
+#endif
+	} bits;
+} mii_er_acc_t, *p_mii_er_acc_t;
+
+#define	EXP_RXTX_PKT_CNT		0x0
+#define	EXP_INTR_STAT			0x1
+#define	MULTICOL_LED_SEL		0x4
+#define	MULTICOL_LED_FLASH_RATE_CTL	0x5
+#define	MULTICOL_LED_BLINK_CTL		0x6
+#define	CABLE_DIAG_CTL			0x10
+#define	CABLE_DIAG_RES			0x11
+#define	CABLE_DIAG_LEN_CH_2_1		0x12
+#define	CABLE_DIAG_LEN_CH_4_3		0x13
+
+/*
+ * MII Register 24:  Auxiliary Control Register
+ */
+typedef	union _mii_aux_ctl_t {
+	uint16_t value;
+	struct {
+#ifdef _BIT_FIELDS_HTOL
+		uint16_t ext_lb			: 1;
+		uint16_t ext_pkt_len		: 1;
+		uint16_t edge_rate_ctl_1000	: 2;
+		uint16_t res			: 1;
+		uint16_t write_1		: 1;
+		uint16_t res1			: 2;
+		uint16_t dis_partial_resp	: 1;
+		uint16_t res2			: 1;
+		uint16_t edge_rate_ctl_100	: 2;
+		uint16_t diag_mode		: 1;
+		uint16_t shadow_reg_sel		: 3;
+#else
+		uint16_t shadow_reg_sel		: 3;
+		uint16_t diag_mode		: 1;
+		uint16_t edge_rate_ctl_100	: 2;
+		uint16_t res2			: 1;
+		uint16_t dis_partial_resp	: 1;
+		uint16_t res1			: 2;
+		uint16_t write_1		: 1;
+		uint16_t res			: 1;
+		uint16_t edge_rate_ctl_1000	: 2;
+		uint16_t ext_pkt_len		: 1;
+		uint16_t ext_lb			: 1;
+#endif
+	} bits;
+} mii_aux_ctl_t, *p_mii_aux_ctl_t;
+
+#define	AUX_REG				0x0
+#define	AUX_10BASET			0x1
+#define	AUX_PWR_CTL			0x2
+#define	AUX_MISC_TEST			0x4
+#define	AUX_MISC_CTL			0x7
+
+/*
+ * MII Register 25:  Auxiliary Status Summary Register
+ */
+typedef	union _mii_aux_s_t {
+	uint16_t value;
+	struct {
+#ifdef _BIT_FIELDS_HTOL
+		uint16_t an_complete		: 1;
+		uint16_t an_complete_ack	: 1;
+		uint16_t an_ack_detect		: 1;
+		uint16_t an_ability_detect	: 1;
+		uint16_t an_np_wait		: 1;
+		uint16_t an_hcd			: 3;
+		uint16_t pd_fault		: 1;
+		uint16_t rmt_fault		: 1;
+		uint16_t an_page_rx		: 1;
+		uint16_t lp_an_ability		: 1;
+		uint16_t lp_np_ability		: 1;
+		uint16_t link_s			: 1;
+		uint16_t pause_res_rx_dir	: 1;
+		uint16_t pause_res_tx_dir	: 1;
+#else
+		uint16_t pause_res_tx_dir	: 1;
+		uint16_t pause_res_rx_dir	: 1;
+		uint16_t link_s			: 1;
+		uint16_t lp_np_ability		: 1;
+		uint16_t lp_an_ability		: 1;
+		uint16_t an_page_rx		: 1;
+		uint16_t rmt_fault		: 1;
+		uint16_t pd_fault		: 1;
+		uint16_t an_hcd			: 3;
+		uint16_t an_np_wait		: 1;
+		uint16_t an_ability_detect	: 1;
+		uint16_t an_ack_detect		: 1;
+		uint16_t an_complete_ack	: 1;
+		uint16_t an_complete		: 1;
+#endif
+	} bits;
+} mii_aux_s_t, *p_mii_aux_s_t;
+
+/*
+ * MII Register 26, 27:  Interrupt Status and Mask Registers
+ */
+typedef	union _mii_intr_t {
+	uint16_t value;
+	struct {
+#ifdef _BIT_FIELDS_HTOL
+		uint16_t res			: 1;
+		uint16_t illegal_pair_swap	: 1;
+		uint16_t mdix_status_change	: 1;
+		uint16_t exceed_hicnt_thres	: 1;
+		uint16_t exceed_locnt_thres	: 1;
+		uint16_t an_page_rx		: 1;
+		uint16_t hcd_nolink		: 1;
+		uint16_t no_hcd			: 1;
+		uint16_t neg_unsupported_hcd	: 1;
+		uint16_t scr_sync_err		: 1;
+		uint16_t rmt_rx_status_change	: 1;
+		uint16_t loc_rx_status_change	: 1;
+		uint16_t duplex_mode_change	: 1;
+		uint16_t link_speed_change	: 1;
+		uint16_t link_status_change	: 1;
+		uint16_t crc_err		: 1;
+#else
+		uint16_t crc_err		: 1;
+		uint16_t link_status_change	: 1;
+		uint16_t link_speed_change	: 1;
+		uint16_t duplex_mode_change	: 1;
+		uint16_t loc_rx_status_change	: 1;
+		uint16_t rmt_rx_status_change	: 1;
+		uint16_t scr_sync_err		: 1;
+		uint16_t neg_unsupported_hcd	: 1;
+		uint16_t no_hcd			: 1;
+		uint16_t hcd_nolink		: 1;
+		uint16_t an_page_rx		: 1;
+		uint16_t exceed_locnt_thres	: 1;
+		uint16_t exceed_hicnt_thres	: 1;
+		uint16_t mdix_status_change	: 1;
+		uint16_t illegal_pair_swap	: 1;
+		uint16_t res			: 1;
+#endif
+	} bits;
+} mii_intr_t, *p_mii_intr_t;
+
+/*
+ * MII Register 28:  Register 1C Access Register
+ */
+typedef	union _mii_misc_t {
+	uint16_t value;
+	struct {
+#ifdef _BIT_FIELDS_HTOL
+		uint16_t w_en			: 1;
+		uint16_t shadow_reg_sel		: 5;
+		uint16_t data			: 10;
+#else
+		uint16_t data			: 10;
+		uint16_t shadow_reg_sel		: 5;
+		uint16_t w_en			: 1;
+#endif
+	} bits;
+} mii_misc_t, *p_mii_misc_t;
+
+#define	LINK_LED_MODE			0x2
+#define	CLK_ALIGN_CTL			0x3
+#define	WIRE_SP_RETRY			0x4
+#define	CLK125				0x5
+#define	LED_STATUS			0x8
+#define	LED_CONTROL			0x9
+#define	AUTO_PWR_DOWN			0xA
+#define	LED_SEL1			0xD
+#define	LED_SEL2			0xE
+
+/*
+ * MII Register 29:  Master/Slave Seed / HCD Status Register
+ */
+
+typedef	union _mii_misc1_t {
+	uint16_t value;
+	struct {
+#ifdef _BIT_FIELDS_HTOL
+		uint16_t en_shadow_reg		: 1;
+		uint16_t data			: 15;
+#else
+		uint16_t data			: 15;
+		uint16_t en_shadow_reg		: 1;
+#endif
+	} bits;
+} mii_misc1_t, *p_mii_misc1_t;
+
+/*
+ * MII Register 30:  Test Register 1
+ */
+
+typedef	union _mii_test1_t {
+	uint16_t value;
+	struct {
+#ifdef _BIT_FIELDS_HTOL
+		uint16_t crc_err_cnt_sel	: 1;
+		uint16_t res			: 7;
+		uint16_t manual_swap_mdi_st	: 1;
+		uint16_t res1			: 7;
+#else
+		uint16_t res1			: 7;
+		uint16_t manual_swap_mdi_st	: 1;
+		uint16_t res			: 7;
+		uint16_t crc_err_cnt_sel	: 1;
+#endif
+	} bits;
+} mii_test1_t, *p_mii_test1_t;
+
+
+/* Definitions of BCM8704 */
+
+#define	BCM8704_PMD_CONTROL_REG			0
+#define	BCM8704_PMD_STATUS_REG			0x1
+#define	BCM8704_PMD_ID_0_REG			0x2
+#define	BCM8704_PMD_ID_1_REG			0x3
+#define	BCM8704_PMD_SPEED_ABIL_REG		0x4
+#define	BCM8704_PMD_DEV_IN_PKG1_REG		0x5
+#define	BCM8704_PMD_DEV_IN_PKG2_REG		0x6
+#define	BCM8704_PMD_CONTROL2_REG		0x7
+#define	BCM8704_PMD_STATUS2_REG			0x8
+#define	BCM8704_PMD_TRANSMIT_DIS_REG		0x9
+#define	BCM8704_PMD_RECEIVE_SIG_DETECT		0xa
+#define	BCM8704_PMD_ORG_UNIQUE_ID_0_REG		0xe
+#define	BCM8704_PMD_ORG_UNIQUE_ID_1_REG		0xf
+#define	BCM8704_PCS_CONTROL_REG			0
+#define	BCM8704_PCS_STATUS1_REG			0x1
+#define	BCM8704_PCS_ID_0_REG			0x2
+#define	BCM8704_PCS_ID_1_REG			0x3
+#define	BCM8704_PCS_SPEED_ABILITY_REG		0x4
+#define	BCM8704_PCS_DEV_IN_PKG1_REG		0x5
+#define	BCM8704_PCS_DEV_IN_PKG2_REG		0x6
+#define	BCM8704_PCS_CONTROL2_REG		0x7
+#define	BCM8704_PCS_STATUS2_REG			0x8
+#define	BCM8704_PCS_ORG_UNIQUE_ID_0_REG		0xe
+#define	BCM8704_PCS_ORG_UNIQUE_ID_1_REG		0xf
+#define	BCM8704_PCS_STATUS_REG			0x18
+#define	BCM8704_10GBASE_R_PCS_STATUS_REG	0x20
+#define	BCM8704_10GBASE_R_PCS_STATUS2_REG	0x21
+#define	BCM8704_PHYXS_CONTROL_REG		0
+#define	BCM8704_PHYXS_STATUS_REG		0x1
+#define	BCM8704_PHY_ID_0_REG			0x2
+#define	BCM8704_PHY_ID_1_REG			0x3
+#define	BCM8704_PHYXS_SPEED_ABILITY_REG		0x4
+#define	BCM8704_PHYXS_DEV_IN_PKG2_REG		0x5
+#define	BCM8704_PHYXS_DEV_IN_PKG1_REG		0x6
+#define	BCM8704_PHYXS_STATUS2_REG		0x8
+#define	BCM8704_PHYXS_ORG_UNIQUE_ID_0_REG	0xe
+#define	BCM8704_PHYXS_ORG_UNIQUE_ID_1_REG	0xf
+#define	BCM8704_PHYXS_XGXS_LANE_STATUS_REG	0x18
+#define	BCM8704_PHYXS_XGXS_TEST_CONTROL_REG	0x19
+#define	BCM8704_USER_CONTROL_REG		0xC800
+#define	BCM8704_USER_ANALOG_CLK_REG		0xC801
+#define	BCM8704_USER_PMD_RX_CONTROL_REG		0xC802
+#define	BCM8704_USER_PMD_TX_CONTROL_REG		0xC803
+#define	BCM8704_USER_ANALOG_STATUS0_REG		0xC804
+#define	BCM8704_USER_OPTICS_DIGITAL_CTRL_REG	0xC808
+#define	BCM8704_USER_RX2_CONTROL1_REG		0x80C6
+#define	BCM8704_USER_RX1_CONTROL1_REG		0x80D6
+#define	BCM8704_USER_RX0_CONTROL1_REG		0x80E6
+#define	BCM8704_USER_TX_ALARM_STATUS_REG	0x9004
+
+/* Rx Channel Control1 Register bits */
+#define	BCM8704_RXPOL_FLIP			0x20
+
+typedef	union _phyxs_control {
+	uint16_t value;
+	struct {
+#ifdef _BIT_FIELDS_HTOL
+		uint16_t reset			: 1;
+		uint16_t loopback		: 1;
+		uint16_t speed_sel2		: 1;
+		uint16_t res2			: 1;
+		uint16_t low_power		: 1;
+		uint16_t res1			: 4;
+		uint16_t speed_sel1		: 1;
+		uint16_t speed_sel0		: 4;
+		uint16_t res0			: 2;
+#else
+		uint16_t res0			: 2;
+		uint16_t speed_sel0		: 4;
+		uint16_t speed_sel1		: 1;
+		uint16_t res1			: 4;
+		uint16_t low_power		: 1;
+		uint16_t res2			: 1;
+		uint16_t speed_sel2		: 1;
+		uint16_t loopback		: 1;
+		uint16_t reset			: 1;
+#endif
+	} bits;
+} phyxs_control_t, *p_phyxs_control_t, pcs_control_t, *p_pcs_control_t;
+
+
+/* PMD/Optics Digital Control Register (Dev=3 Addr=0xc800) */
+
+typedef	union _control {
+	uint16_t value;
+	struct {
+#ifdef _BIT_FIELDS_HTOL
+		uint16_t optxenb_lvl		: 1;
+		uint16_t optxrst_lvl		: 1;
+		uint16_t opbiasflt_lvl		: 1;
+		uint16_t obtmpflt_lvl		: 1;
+		uint16_t opprflt_lvl		: 1;
+		uint16_t optxflt_lvl		: 1;
+		uint16_t optrxlos_lvl		: 1;
+		uint16_t oprxflt_lvl		: 1;
+		uint16_t optxon_lvl		: 1;
+		uint16_t res1			: 7;
+#else
+		uint16_t res1			: 7;
+		uint16_t optxon_lvl		: 1;
+		uint16_t oprxflt_lvl		: 1;
+		uint16_t optrxlos_lvl		: 1;
+		uint16_t optxflt_lvl		: 1;
+		uint16_t opprflt_lvl		: 1;
+		uint16_t obtmpflt_lvl		: 1;
+		uint16_t opbiasflt_lvl		: 1;
+		uint16_t optxrst_lvl		: 1;
+		uint16_t optxenb_lvl		: 1;
+#endif
+	} bits;
+} control_t, *p_control_t;
+
+typedef	union _pmd_tx_control {
+	uint16_t value;
+	struct {
+#ifdef _BIT_FIELDS_HTOL
+		uint16_t res1			: 7;
+		uint16_t xfp_clken		: 1;
+		uint16_t tx_dac_txd		: 2;
+		uint16_t tx_dac_txck		: 2;
+		uint16_t tsd_lpwren		: 1;
+		uint16_t tsck_lpwren		: 1;
+		uint16_t cmu_lpwren		: 1;
+		uint16_t sfiforst		: 1;
+#else
+		uint16_t sfiforst		: 1;
+		uint16_t cmu_lpwren		: 1;
+		uint16_t tsck_lpwren		: 1;
+		uint16_t tsd_lpwren		: 1;
+		uint16_t tx_dac_txck		: 2;
+		uint16_t tx_dac_txd		: 2;
+		uint16_t xfp_clken		: 1;
+		uint16_t res1			: 7;
+#endif
+	} bits;
+} pmd_tx_control_t, *p_pmd_tx_control_t;
+
+
+/* PMD/Optics Digital Control Register (Dev=3 Addr=0xc808) */
+
+
+/* PMD/Optics Digital Control Register (Dev=3 Addr=0xc808) */
+
+typedef	union _optics_dcntr {
+	uint16_t value;
+	struct {
+#ifdef _BIT_FIELDS_HTOL
+		uint16_t fault_mode		: 1;
+		uint16_t tx_pwrdown		: 1;
+		uint16_t rx_pwrdown		: 1;
+		uint16_t ext_flt_en		: 1;
+		uint16_t opt_rst		: 1;
+		uint16_t pcs_tx_inv_b		: 1;
+		uint16_t pcs_rx_inv		: 1;
+		uint16_t res3			: 2;
+		uint16_t gpio_sel		: 2;
+		uint16_t res2			: 1;
+		uint16_t lpbk_err_dis		: 1;
+		uint16_t res1			: 2;
+		uint16_t txonoff_pwdwn_dis	: 1;
+#else
+		uint16_t txonoff_pwdwn_dis	: 1;
+		uint16_t res1			: 2;
+		uint16_t lpbk_err_dis		: 1;
+		uint16_t res2			: 1;
+		uint16_t gpio_sel		: 2;
+		uint16_t res3			: 2;
+		uint16_t pcs_rx_inv		: 1;
+		uint16_t pcs_tx_inv_b		: 1;
+		uint16_t opt_rst		: 1;
+		uint16_t ext_flt_en		: 1;
+		uint16_t rx_pwrdown		: 1;
+		uint16_t tx_pwrdown		: 1;
+		uint16_t fault_mode		: 1;
+#endif
+	} bits;
+} optics_dcntr_t, *p_optics_dcntr_t;
+
+/* PMD Receive Signal Detect Register (Dev = 1 Register Address = 0x000A) */
+
+#define	PMD_RX_SIG_DET3			0x10
+#define	PMD_RX_SIG_DET2			0x08
+#define	PMD_RX_SIG_DET1			0x04
+#define	PMD_RX_SIG_DET0			0x02
+#define	GLOB_PMD_RX_SIG_OK		0x01
+
+/* 10GBase-R PCS Status Register (Dev = 3, Register Address = 0x0020) */
+
+#define	PCS_10GBASE_RX_LINK_STATUS	0x1000
+#define	PCS_PRBS31_ABLE			0x0004
+#define	PCS_10GBASE_R_HI_BER		0x0002
+#define	PCS_10GBASE_R_PCS_BLK_LOCK	0x0001
+
+/* XGXS Lane Status Register (Dev = 4, Register Address = 0x0018) */
+
+#define	XGXS_LANE_ALIGN_STATUS		0x1000
+#define	XGXS_PATTERN_TEST_ABILITY	0x0800
+#define	XGXS_LANE3_SYNC			0x0008
+#define	XGXS_LANE2_SYNC			0x0004
+#define	XGXS_LANE1_SYNC			0x0002
+#define	XGXS_LANE0_SYNC			0x0001
+
+#ifdef	__cplusplus
+}
+#endif
+
+#endif	/* _SYS_NXGE_NXGE_PHY_HW_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/uts/common/sys/nxge/nxge_rxdma.h	Mon Mar 19 19:37:22 2007 -0700
@@ -0,0 +1,465 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef	_SYS_NXGE_NXGE_RXDMA_H
+#define	_SYS_NXGE_NXGE_RXDMA_H
+
+#pragma ident	"%Z%%M%	%I%	%E% SMI"
+
+#ifdef	__cplusplus
+extern "C" {
+#endif
+
+#include <sys/nxge/nxge_rxdma_hw.h>
+#include <npi_rxdma.h>
+
+#define	RXDMA_CK_DIV_DEFAULT		7500 	/* 25 usec */
+/*
+ * Hardware RDC designer: 8 cache lines during Atlas bringup.
+ */
+#define	RXDMA_RED_LESS_BYTES		(8 * 64) /* 8 cache line */
+#define	RXDMA_RED_LESS_ENTRIES		(RXDMA_RED_LESS_BYTES/8)
+#define	RXDMA_RED_WINDOW_DEFAULT	0
+#define	RXDMA_RED_THRES_DEFAULT		0
+
+#define	RXDMA_RCR_PTHRES_DEFAULT	0x20
+#define	RXDMA_RCR_TO_DEFAULT		0x8
+
+/*
+ * hardware workarounds: kick 16 (was 8 before)
+ */
+#define	NXGE_RXDMA_POST_BATCH		16
+
+#define	RXBUF_START_ADDR(a, index, bsize)	((a & (index * bsize))
+#define	RXBUF_OFFSET_FROM_START(a, start)	(start - a)
+#define	RXBUF_64B_ALIGNED		64
+
+#define	NXGE_RXBUF_EXTRA		34
+/*
+ * Receive buffer thresholds and buffer types
+ */
+#define	NXGE_RX_BCOPY_SCALE	8	/* use 1/8 as lowest granularity */
+typedef enum  {
+	NXGE_RX_COPY_ALL = 0,		/* do bcopy on every packet	 */
+	NXGE_RX_COPY_1,			/* bcopy on 1/8 of buffer posted */
+	NXGE_RX_COPY_2,			/* bcopy on 2/8 of buffer posted */
+	NXGE_RX_COPY_3,			/* bcopy on 3/8 of buffer posted */
+	NXGE_RX_COPY_4,			/* bcopy on 4/8 of buffer posted */
+	NXGE_RX_COPY_5,			/* bcopy on 5/8 of buffer posted */
+	NXGE_RX_COPY_6,			/* bcopy on 6/8 of buffer posted */
+	NXGE_RX_COPY_7,			/* bcopy on 7/8 of buffer posted */
+	NXGE_RX_COPY_NONE		/* don't do bcopy at all	 */
+} nxge_rxbuf_threshold_t;
+
+typedef enum  {
+	NXGE_RBR_TYPE0 = RCR_PKTBUFSZ_0,  /* bcopy buffer size 0 (small) */
+	NXGE_RBR_TYPE1 = RCR_PKTBUFSZ_1,  /* bcopy buffer size 1 (medium) */
+	NXGE_RBR_TYPE2 = RCR_PKTBUFSZ_2	  /* bcopy buffer size 2 (large) */
+} nxge_rxbuf_type_t;
+
+typedef	struct _rdc_errlog {
+	rdmc_par_err_log_t	pre_par;
+	rdmc_par_err_log_t	sha_par;
+	uint8_t			compl_err_type;
+} rdc_errlog_t;
+
+/*
+ * Receive  Statistics.
+ */
+typedef struct _nxge_rx_ring_stats_t {
+	uint64_t	ipackets;
+	uint64_t	ibytes;
+	uint32_t	ierrors;
+	uint32_t	multircv;
+	uint32_t	brdcstrcv;
+	uint32_t	norcvbuf;
+
+	uint32_t	rx_inits;
+	uint32_t	rx_jumbo_pkts;
+	uint32_t	rx_multi_pkts;
+	uint32_t	rx_mtu_pkts;
+	uint32_t	rx_no_buf;
+
+	/*
+	 * Receive buffer management statistics.
+	 */
+	uint32_t	rx_new_pages;
+	uint32_t	rx_new_mtu_pgs;
+	uint32_t	rx_new_nxt_pgs;
+	uint32_t	rx_reused_pgs;
+	uint32_t	rx_mtu_drops;
+	uint32_t	rx_nxt_drops;
+
+	/*
+	 * Error event stats.
+	 */
+	uint32_t	rx_rbr_tmout;
+	uint32_t	l2_err;
+	uint32_t	l4_cksum_err;
+	uint32_t	fflp_soft_err;
+	uint32_t	zcp_soft_err;
+	uint32_t	dcf_err;
+	uint32_t 	rbr_tmout;
+	uint32_t 	rsp_cnt_err;
+	uint32_t 	byte_en_err;
+	uint32_t 	byte_en_bus;
+	uint32_t 	rsp_dat_err;
+	uint32_t 	rcr_ack_err;
+	uint32_t 	dc_fifo_err;
+	uint32_t 	rcr_sha_par;
+	uint32_t 	rbr_pre_par;
+	uint32_t 	port_drop_pkt;
+	uint32_t 	wred_drop;
+	uint32_t 	rbr_pre_empty;
+	uint32_t 	rcr_shadow_full;
+	uint32_t 	config_err;
+	uint32_t 	rcrincon;
+	uint32_t 	rcrfull;
+	uint32_t 	rbr_empty;
+	uint32_t 	rbrfull;
+	uint32_t 	rbrlogpage;
+	uint32_t 	cfiglogpage;
+	uint32_t 	rcrto;
+	uint32_t 	rcrthres;
+	uint32_t 	mex;
+	rdc_errlog_t	errlog;
+} nxge_rx_ring_stats_t, *p_nxge_rx_ring_stats_t;
+
+typedef struct _nxge_rdc_sys_stats {
+	uint32_t	pre_par;
+	uint32_t	sha_par;
+	uint32_t	id_mismatch;
+	uint32_t	ipp_eop_err;
+	uint32_t	zcp_eop_err;
+} nxge_rdc_sys_stats_t, *p_nxge_rdc_sys_stats_t;
+
+/*
+ * Software reserved buffer offset
+ */
+typedef struct _nxge_rxbuf_off_hdr_t {
+	uint32_t		index;
+} nxge_rxbuf_off_hdr_t, *p_nxge_rxbuf_off_hdr_t;
+
+/*
+ * Definitions for each receive buffer block.
+ */
+typedef struct _nxge_rbb_t {
+	nxge_os_dma_common_t	dma_buf_info;
+	uint8_t			rbr_page_num;
+	uint32_t		block_size;
+	uint16_t		dma_channel;
+	uint32_t		bytes_received;
+	uint32_t		ref_cnt;
+	uint_t			pkt_buf_size;
+	uint_t			max_pkt_bufs;
+	uint32_t		cur_usage_cnt;
+} nxge_rbb_t, *p_nxge_rbb_t;
+
+
+typedef struct _rx_tx_param_t {
+	nxge_logical_page_t logical_pages[NXGE_MAX_LOGICAL_PAGES];
+} rx_tx_param_t, *p_rx_tx_param_t;
+
+typedef struct _rx_tx_params {
+	struct _tx_param_t 	*tx_param_p;
+} rx_tx_params_t, *p_rx_tx_params_t;
+
+
+typedef struct _rx_msg_t {
+	nxge_os_dma_common_t	buf_dma;
+	nxge_os_mutex_t 	lock;
+	struct _nxge_t		*nxgep;
+	struct _rx_rbr_ring_t	*rx_rbr_p;
+	boolean_t 		spare_in_use;
+	boolean_t 		free;
+	uint32_t 		ref_cnt;
+#ifdef RXBUFF_USE_SEPARATE_UP_CNTR
+	uint32_t 		pass_up_cnt;
+	boolean_t 		release;
+#endif
+	nxge_os_frtn_t 		freeb;
+	size_t 			bytes_arrived;
+	size_t 			bytes_expected;
+	size_t 			block_size;
+	uint32_t		block_index;
+	uint32_t 		pkt_buf_size;
+	uint32_t 		pkt_buf_size_code;
+	uint32_t 		max_pkt_bufs;
+	uint32_t		cur_usage_cnt;
+	uint32_t		max_usage_cnt;
+	uchar_t			*buffer;
+	uint32_t 		pri;
+	uint32_t 		shifted_addr;
+	boolean_t		use_buf_pool;
+	p_mblk_t 		rx_mblk_p;
+	boolean_t		rx_use_bcopy;
+} rx_msg_t, *p_rx_msg_t;
+
+typedef struct _rx_dma_handle_t {
+	nxge_os_dma_handle_t	dma_handle;	/* DMA handle	*/
+	nxge_os_acc_handle_t	acc_handle;	/* DMA memory handle */
+	npi_handle_t		npi_handle;
+} rx_dma_handle_t, *p_rx_dma_handle_t;
+
+#define	RXCOMP_HIST_ELEMENTS 100000
+
+typedef struct _nxge_rxcomphist_t {
+	uint_t 			comp_cnt;
+	uint64_t 		rx_comp_entry;
+} nxge_rxcomphist_t, *p_nxge_rxcomphist_t;
+
+/* Receive Completion Ring */
+typedef struct _rx_rcr_ring_t {
+	nxge_os_dma_common_t	rcr_desc;
+	uint8_t			rcr_page_num;
+	uint8_t			rcr_buf_page_num;
+
+	struct _nxge_t		*nxgep;
+
+	p_nxge_rx_ring_stats_t	rdc_stats;
+
+	rcrcfig_a_t		rcr_cfga;
+	rcrcfig_b_t		rcr_cfgb;
+	boolean_t		cfg_set;
+
+	nxge_os_mutex_t 	lock;
+	uint16_t		index;
+	uint16_t		rdc;
+	uint16_t		rdc_grp_id;
+	uint16_t		ldg_group_id;
+	boolean_t		full_hdr_flag;	 /* 1: 18 bytes header */
+	uint16_t		sw_priv_hdr_len; /* 0 - 192 bytes (SW) */
+	uint32_t 		comp_size;	 /* # of RCR entries */
+	uint64_t		rcr_addr;
+	uint_t 			comp_wrap_mask;
+	uint_t 			comp_rd_index;
+	uint_t 			comp_wt_index;
+
+	p_rcr_entry_t		rcr_desc_first_p;
+	p_rcr_entry_t		rcr_desc_first_pp;
+	p_rcr_entry_t		rcr_desc_last_p;
+	p_rcr_entry_t		rcr_desc_last_pp;
+
+	p_rcr_entry_t		rcr_desc_rd_head_p;	/* software next read */
+	p_rcr_entry_t		rcr_desc_rd_head_pp;
+
+	p_rcr_entry_t		rcr_desc_wt_tail_p;	/* hardware write */
+	p_rcr_entry_t		rcr_desc_wt_tail_pp;
+
+	uint64_t		rcr_tail_pp;
+	uint64_t		rcr_head_pp;
+	struct _rx_rbr_ring_t	*rx_rbr_p;
+	uint32_t		intr_timeout;
+	uint32_t		intr_threshold;
+	uint64_t		max_receive_pkts;
+	p_mblk_t		rx_first_mp;
+	mac_resource_handle_t	rcr_mac_handle;
+	uint32_t		rcvd_pkt_bytes; /* Received bytes of a packet */
+} rx_rcr_ring_t, *p_rx_rcr_ring_t;
+
+
+
+/* Buffer index information */
+typedef struct _rxbuf_index_info_t {
+	uint32_t buf_index;
+	uint32_t start_index;
+	uint32_t buf_size;
+	uint64_t dvma_addr;
+	uint64_t kaddr;
+} rxbuf_index_info_t, *p_rxbuf_index_info_t;
+
+/* Buffer index information */
+
+typedef struct _rxring_info_t {
+	uint32_t hint[3];
+	uint32_t block_size_mask;
+	uint16_t max_iterations;
+	rxbuf_index_info_t buffer[NXGE_DMA_BLOCK];
+} rxring_info_t, *p_rxring_info_t;
+
+
+/* Receive Buffer Block Ring */
+typedef struct _rx_rbr_ring_t {
+	nxge_os_dma_common_t	rbr_desc;
+	p_rx_msg_t 		*rx_msg_ring;
+	p_nxge_dma_common_t 	*dma_bufp;
+	rbr_cfig_a_t		rbr_cfga;
+	rbr_cfig_b_t		rbr_cfgb;
+	rbr_kick_t		rbr_kick;
+	log_page_vld_t		page_valid;
+	log_page_mask_t		page_mask_1;
+	log_page_mask_t		page_mask_2;
+	log_page_value_t	page_value_1;
+	log_page_value_t	page_value_2;
+	log_page_relo_t		page_reloc_1;
+	log_page_relo_t		page_reloc_2;
+	log_page_hdl_t		page_hdl;
+
+	boolean_t		cfg_set;
+
+	nxge_os_mutex_t		lock;
+	nxge_os_mutex_t		post_lock;
+	uint16_t		index;
+	struct _nxge_t		*nxgep;
+	uint16_t		rdc;
+	uint16_t		rdc_grp_id;
+	uint_t 			rbr_max_size;
+	uint64_t		rbr_addr;
+	uint_t 			rbr_wrap_mask;
+	uint_t 			rbb_max;
+	uint_t 			rbb_added;
+	uint_t			block_size;
+	uint_t			num_blocks;
+	uint_t			tnblocks;
+	uint_t			pkt_buf_size0;
+	uint_t			pkt_buf_size0_bytes;
+	uint_t			npi_pkt_buf_size0;
+	uint_t			pkt_buf_size1;
+	uint_t			pkt_buf_size1_bytes;
+	uint_t			npi_pkt_buf_size1;
+	uint_t			pkt_buf_size2;
+	uint_t			pkt_buf_size2_bytes;
+	uint_t			npi_pkt_buf_size2;
+
+	uint64_t		rbr_head_pp;
+	uint64_t		rbr_tail_pp;
+	uint32_t		*rbr_desc_vp;
+
+	p_rx_rcr_ring_t		rx_rcr_p;
+
+	rx_dma_ent_msk_t	rx_dma_ent_mask;
+
+	rbr_hdh_t		rbr_head;
+	rbr_hdl_t		rbr_tail;
+	uint_t 			rbr_wr_index;
+	uint_t 			rbr_rd_index;
+	uint_t 			rbr_hw_head_index;
+	uint64_t 		rbr_hw_head_ptr;
+
+	/* may not be needed */
+	p_nxge_rbb_t		rbb_p;
+
+	rxring_info_t  *ring_info;
+#ifdef RX_USE_RECLAIM_POST
+	uint32_t hw_freed;
+	uint32_t sw_freed;
+	uint32_t msg_rd_index;
+	uint32_t msg_cnt;
+#endif
+#if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
+	uint64_t		hv_rx_buf_base_ioaddr_pp;
+	uint64_t		hv_rx_buf_ioaddr_size;
+	uint64_t		hv_rx_cntl_base_ioaddr_pp;
+	uint64_t		hv_rx_cntl_ioaddr_size;
+	boolean_t		hv_set;
+#endif
+	uint_t 			rbr_consumed;
+	uint_t 			rbr_threshold_hi;
+	uint_t 			rbr_threshold_lo;
+	nxge_rxbuf_type_t	rbr_bufsize_type;
+	boolean_t		rbr_use_bcopy;
+} rx_rbr_ring_t, *p_rx_rbr_ring_t;
+
+/* Receive Mailbox */
+typedef struct _rx_mbox_t {
+	nxge_os_dma_common_t	rx_mbox;
+	rxdma_cfig1_t		rx_cfg1;
+	rxdma_cfig2_t		rx_cfg2;
+	uint64_t		mbox_addr;
+	boolean_t		cfg_set;
+
+	nxge_os_mutex_t 	lock;
+	uint16_t		index;
+	struct _nxge_t		*nxgep;
+	uint16_t		rdc;
+} rx_mbox_t, *p_rx_mbox_t;
+
+
+typedef struct _rx_rbr_rings_t {
+	p_rx_rbr_ring_t 	*rbr_rings;
+	uint32_t			ndmas;
+	boolean_t		rxbuf_allocated;
+} rx_rbr_rings_t, *p_rx_rbr_rings_t;
+
+typedef struct _rx_rcr_rings_t {
+	p_rx_rcr_ring_t 	*rcr_rings;
+	uint32_t			ndmas;
+	boolean_t		cntl_buf_allocated;
+} rx_rcr_rings_t, *p_rx_rcr_rings_t;
+
+typedef struct _rx_mbox_areas_t {
+	p_rx_mbox_t 		*rxmbox_areas;
+	uint32_t			ndmas;
+	boolean_t		mbox_allocated;
+} rx_mbox_areas_t, *p_rx_mbox_areas_t;
+
+/*
+ * Global register definitions per chip and they are initialized
+ * using the function zero control registers.
+ * .
+ */
+
+typedef struct _rxdma_globals {
+	boolean_t		mode32;
+	uint16_t		rxdma_ck_div_cnt;
+	uint16_t		rxdma_red_ran_init;
+	uint32_t		rxdma_eing_timeout;
+} rxdma_globals_t, *p_rxdma_globals;
+
+
+/*
+ * Receive DMA Prototypes.
+ */
+nxge_status_t nxge_init_rxdma_channel_rcrflush(p_nxge_t, uint8_t);
+nxge_status_t nxge_init_rxdma_channels(p_nxge_t);
+void nxge_uninit_rxdma_channels(p_nxge_t);
+nxge_status_t nxge_reset_rxdma_channel(p_nxge_t, uint16_t);
+nxge_status_t nxge_init_rxdma_channel_cntl_stat(p_nxge_t,
+	uint16_t, p_rx_dma_ctl_stat_t);
+nxge_status_t nxge_enable_rxdma_channel(p_nxge_t,
+	uint16_t, p_rx_rbr_ring_t, p_rx_rcr_ring_t,
+	p_rx_mbox_t);
+nxge_status_t nxge_init_rxdma_channel_event_mask(p_nxge_t,
+		uint16_t, p_rx_dma_ent_msk_t);
+
+nxge_status_t nxge_rxdma_hw_mode(p_nxge_t, boolean_t);
+void nxge_hw_start_rx(p_nxge_t);
+void nxge_fixup_rxdma_rings(p_nxge_t);
+nxge_status_t nxge_dump_rxdma_channel(p_nxge_t, uint8_t);
+
+void nxge_rxdma_fix_channel(p_nxge_t, uint16_t);
+void nxge_rxdma_fixup_channel(p_nxge_t, uint16_t, int);
+int nxge_rxdma_get_ring_index(p_nxge_t, uint16_t);
+
+void nxge_rxdma_regs_dump_channels(p_nxge_t);
+nxge_status_t nxge_rxdma_handle_sys_errors(p_nxge_t);
+void nxge_rxdma_inject_err(p_nxge_t, uint32_t, uint8_t);
+
+
+#ifdef	__cplusplus
+}
+#endif
+
+#endif	/* _SYS_NXGE_NXGE_RXDMA_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/uts/common/sys/nxge/nxge_rxdma_hw.h	Mon Mar 19 19:37:22 2007 -0700
@@ -0,0 +1,1899 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef	_SYS_NXGE_NXGE_RXDMA_HW_H
+#define	_SYS_NXGE_NXGE_RXDMA_HW_H
+
+#pragma ident	"%Z%%M%	%I%	%E% SMI"
+
+#ifdef	__cplusplus
+extern "C" {
+#endif
+
+#include <nxge_defs.h>
+#include <nxge_hw.h>
+
+/*
+ * NIU: Receive DMA Channels
+ */
+/* Receive DMA Clock Divider */
+#define	RX_DMA_CK_DIV_REG	(FZC_DMC + 0x00000)
+#define	RX_DMA_CK_DIV_SHIFT	0			/* bits 15:0 */
+#define	RX_DMA_CK_DIV_MASK	0x000000000000FFFFULL
+
+typedef union _rx_dma_ck_div_t {
+	uint64_t value;
+	struct {
+#if defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t res1_1:16;
+			uint32_t cnt:16;
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t cnt:16;
+			uint32_t res1_1:16;
+#endif
+		} ldw;
+#if !defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+	} bits;
+} rx_dma_ck_div_t, *p_rx_dma_ck_div_t;
+
+
+/*
+ * Default Port Receive DMA Channel (RDC)
+ */
+#define	DEF_PT_RDC_REG(port)	(FZC_DMC + 0x00008 * (port + 1))
+#define	DEF_PT0_RDC_REG		(FZC_DMC + 0x00008)
+#define	DEF_PT1_RDC_REG		(FZC_DMC + 0x00010)
+#define	DEF_PT2_RDC_REG		(FZC_DMC + 0x00018)
+#define	DEF_PT3_RDC_REG		(FZC_DMC + 0x00020)
+#define	DEF_PT_RDC_SHIFT	0			/* bits 4:0 */
+#define	DEF_PT_RDC_MASK		0x000000000000001FULL
+
+
+#define	RDC_TBL_REG		(FZC_ZCP + 0x10000)
+#define	RDC_TBL_SHIFT		0			/* bits 4:0 */
+#define	RDC_TBL_MASK		0x000000000000001FULL
+
+/* For the default port RDC and RDC table */
+typedef union _def_pt_rdc_t {
+	uint64_t value;
+	struct {
+#if defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t res1_1:27;
+			uint32_t rdc:5;
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t rdc:5;
+			uint32_t res1_1:27;
+#endif
+		} ldw;
+#if !defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+	} bits;
+} def_pt_rdc_t, *p_def_pt_rdc_t;
+
+typedef union _rdc_tbl_t {
+	uint64_t value;
+	struct {
+#if defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t res1_1:28;
+			uint32_t rdc:4;
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t rdc:4;
+			uint32_t res1_1:28;
+#endif
+		} ldw;
+#if !defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+	} bits;
+} rdc_tbl_t, *p_rdc_tbl_t;
+
+/*
+ * RDC: 32 bit Addressing mode
+ */
+#define	RX_ADDR_MD_REG		(FZC_DMC + 0x00070)
+#define	RX_ADDR_MD_SHIFT	0			/* bits 0:0 */
+#define	RX_ADDR_MD_SET_32	0x0000000000000001ULL	/* 1 to select 32 bit */
+#define	RX_ADDR_MD_MASK		0x0000000000000001ULL
+
+typedef union _rx_addr_md_t {
+	uint64_t value;
+	struct {
+#if defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t res1_1:28;
+			uint32_t dbg_pt_mux_sel:2;
+			uint32_t ram_acc:1;
+			uint32_t mode32:1;
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t mode32:1;
+			uint32_t ram_acc:1;
+			uint32_t dbg_pt_mux_sel:2;
+			uint32_t res1_1:28;
+#endif
+		} ldw;
+#if !defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+	} bits;
+} rx_addr_md_t, *p_rx_addr_md_t;
+
+/*
+ * RDC: Port Scheduler
+ */
+
+#define	PT_DRR_WT_REG(portnm)		((FZC_DMC + 0x00028) + (portnm * 8))
+#define	PT_DRR_WT0_REG		(FZC_DMC + 0x00028)
+#define	PT_DRR_WT1_REG		(FZC_DMC + 0x00030)
+#define	PT_DRR_WT2_REG		(FZC_DMC + 0x00038)
+#define	PT_DRR_WT3_REG		(FZC_DMC + 0x00040)
+#define	PT_DRR_WT_SHIFT		0
+#define	PT_DRR_WT_MASK		0x000000000000FFFFULL	/* bits 15:0 */
+#define	PT_DRR_WT_DEFAULT_10G	0x0400
+#define	PT_DRR_WT_DEFAULT_1G	0x0066
+typedef union _pt_drr_wt_t {
+	uint64_t value;
+	struct {
+#if defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t res1_1:16;
+			uint32_t wt:16;
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t wt:16;
+			uint32_t res1_1:16;
+#endif
+		} ldw;
+#if !defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+	} bits;
+} pt_drr_wt_t, *p_pt_drr_wt_t;
+
+#define	NXGE_RX_DRR_WT_10G	0x400
+#define	NXGE_RX_DRR_WT_1G	0x066
+
+/* Port FIFO Usage */
+#define	PT_USE_REG(portnum)		((FZC_DMC + 0x00048) + (portnum * 8))
+#define	PT_USE0_REG		(FZC_DMC + 0x00048)
+#define	PT_USE1_REG		(FZC_DMC + 0x00050)
+#define	PT_USE2_REG		(FZC_DMC + 0x00058)
+#define	PT_USE3_REG		(FZC_DMC + 0x00060)
+#define	PT_USE_SHIFT		0			/* bits 19:0 */
+#define	PT_USE_MASK		0x00000000000FFFFFULL
+
+typedef union _pt_use_t {
+	uint64_t value;
+	struct {
+#if defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t res1_1:12;
+			uint32_t cnt:20;
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t cnt:20;
+			uint32_t res1_1:12;
+#endif
+		} ldw;
+#if !defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+	} bits;
+} pt_use_t, *p_pt_use_t;
+
+/*
+ * RDC: Partitioning Support
+ *	(Each of the following registers is for each RDC)
+ * Please refer to nxge_hw.h for the common logical
+ * page configuration register definitions.
+ */
+#define	RX_LOG_REG_SIZE			0x40
+#define	RX_LOG_DMA_OFFSET(channel)	(channel * RX_LOG_REG_SIZE)
+
+#define	RX_LOG_PAGE_VLD_REG	(FZC_DMC + 0x20000)
+#define	RX_LOG_PAGE_MASK1_REG	(FZC_DMC + 0x20008)
+#define	RX_LOG_PAGE_VAL1_REG	(FZC_DMC + 0x20010)
+#define	RX_LOG_PAGE_MASK2_REG	(FZC_DMC + 0x20018)
+#define	RX_LOG_PAGE_VAL2_REG	(FZC_DMC + 0x20020)
+#define	RX_LOG_PAGE_RELO1_REG	(FZC_DMC + 0x20028)
+#define	RX_LOG_PAGE_RELO2_REG	(FZC_DMC + 0x20030)
+#define	RX_LOG_PAGE_HDL_REG	(FZC_DMC + 0x20038)
+
+/* RX and TX have the same definitions */
+#define	RX_LOG_PAGE1_VLD_SHIFT	1			/* bit 1 */
+#define	RX_LOG_PAGE0_VLD_SHIFT	0			/* bit 0 */
+#define	RX_LOG_PAGE1_VLD	0x0000000000000002ULL
+#define	RX_LOG_PAGE0_VLD	0x0000000000000001ULL
+#define	RX_LOG_PAGE1_VLD_MASK	0x0000000000000002ULL
+#define	RX_LOG_PAGE0_VLD_MASK	0x0000000000000001ULL
+#define	RX_LOG_FUNC_VLD_SHIFT	2			/* bit 3:2 */
+#define	RX_LOG_FUNC_VLD_MASK	0x000000000000000CULL
+
+#define	LOG_PAGE_ADDR_SHIFT	12	/* bits[43:12] --> bits[31:0] */
+
+/* RDC: Weighted Random Early Discard */
+#define	RED_RAN_INIT_REG	(FZC_DMC + 0x00068)
+
+#define	RED_RAN_INIT_SHIFT	0			/* bits 15:0 */
+#define	RED_RAN_INIT_MASK	0x000000000000ffffULL
+
+/* Weighted Random */
+typedef union _red_ran_init_t {
+	uint64_t value;
+	struct {
+#if defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t res1_1:15;
+			uint32_t enable:1;
+			uint32_t init:16;
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t init:16;
+			uint32_t enable:1;
+			uint32_t res1_1:15;
+#endif
+		} ldw;
+#if !defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+	} bits;
+} red_ran_init_t, *p_red_ran_init_t;
+
+/*
+ * Buffer block descriptor
+ */
+typedef struct _rx_desc_t {
+	uint32_t	block_addr;
+} rx_desc_t, *p_rx_desc_t;
+
+/*
+ * RDC: RED Parameter
+ *	(Each DMC has one RED register)
+ */
+#define	RDC_RED_CHANNEL_SIZE		(0x40)
+#define	RDC_RED_CHANNEL_OFFSET(channel)	(channel * RDC_RED_CHANNEL_SIZE)
+
+#define	RDC_RED_PARA_REG		(FZC_DMC + 0x30000)
+#define	RDC_RED_RDC_PARA_REG(rdc)	\
+	(RDC_RED_PARA_REG + (rdc * RDC_RED_CHANNEL_SIZE))
+
+/* the layout of this register is  rx_disc_cnt_t */
+#define	RDC_RED_DISC_CNT_REG		(FZC_DMC + 0x30008)
+#define	RDC_RED_RDC_DISC_REG(rdc)	\
+	(RDC_RED_DISC_CNT_REG + (rdc * RDC_RED_CHANNEL_SIZE))
+
+
+#define	RDC_RED_PARA1_RBR_SCL_SHIFT	0			/* bits 2:0 */
+#define	RDC_RED_PARA1_RBR_SCL_MASK	0x0000000000000007ULL
+#define	RDC_RED_PARA1_ENB_SHIFT		3			/* bit 3 */
+#define	RDC_RED_PARA1_ENB		0x0000000000000008ULL
+#define	RDC_RED_PARA1_ENB_MASK		0x0000000000000008ULL
+
+#define	RDC_RED_PARA_WIN_SHIFT		0			/* bits 3:0 */
+#define	RDC_RED_PARA_WIN_MASK		0x000000000000000fULL
+#define	RDC_RED_PARA_THRE_SHIFT	4			/* bits 15:4 */
+#define	RDC_RED_PARA_THRE_MASK		0x00000000000000f0ULL
+#define	RDC_RED_PARA_WIN_SYN_SHIFT	16			/* bits 19:16 */
+#define	RDC_RED_PARA_WIN_SYN_MASK	0x00000000000000f0ULL
+#define	RDC_RED_PARA_THRE_SYN_SHIFT	20			/* bits 31:20 */
+#define	RDC_RED_PARA_THRE_SYN_MASK	0x00000000000fff00ULL
+
+/* RDC:  RED parameters  */
+typedef union _rdc_red_para_t {
+	uint64_t value;
+	struct {
+#if defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+		uint32_t thre_sync:12;
+		uint32_t win_syn:4;
+		uint32_t thre:12;
+		uint32_t win:4;
+#elif defined(_BIT_FIELDS_LTOH)
+		uint32_t win:4;
+		uint32_t thre:12;
+		uint32_t win_syn:4;
+		uint32_t thre_sync:12;
+#endif
+		} ldw;
+#if !defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+	} bits;
+} rdc_red_para_t, *p_rdc_red_para_t;
+
+/*
+ * RDC: Receive DMA Datapath Configuration
+ *	The following register definitions are for
+ *	each DMA channel. Each DMA CSR is 512 bytes
+ *	(0x200).
+ */
+#define	RXDMA_CFIG1_REG			(DMC + 0x00000)
+#define	RXDMA_CFIG2_REG			(DMC + 0x00008)
+
+#define	RXDMA_CFIG1_MBADDR_H_SHIFT	0			/* bits 11:0 */
+#define	RXDMA_CFIG1_MBADDR_H_MASK	0x0000000000000fc0ULL
+#define	RXDMA_CFIG1_RST_SHIFT		30			/* bit 30 */
+#define	RXDMA_CFIG1_RST			0x0000000040000000ULL
+#define	RXDMA_CFIG1_RST_MASK		0x0000000040000000ULL
+#define	RXDMA_CFIG1_EN_SHIFT		31
+#define	RXDMA_CFIG1_EN			0x0000000080000000ULL
+#define	RXDMA_CFIG1_EN_MASK		0x0000000080000000ULL
+
+typedef union _rxdma_cfig1_t {
+	uint64_t value;
+	struct {
+#if defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t en:1;
+			uint32_t rst:1;
+			uint32_t qst:1;
+			uint32_t res2:17;
+			uint32_t mbaddr_h:12;
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t mbaddr_h:12;
+			uint32_t res2:17;
+			uint32_t qst:1;
+			uint32_t rst:1;
+			uint32_t en:1;
+#endif
+		} ldw;
+#if !defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+	} bits;
+} rxdma_cfig1_t, *p_rxdma_cfig1_t;
+
+#define	RXDMA_HDR_SIZE_DEFAULT		2
+#define	RXDMA_HDR_SIZE_FULL		18
+
+#define	RXDMA_CFIG2_FULL_HDR_SHIFT	0			/* Set to 1 */
+#define	RXDMA_CFIG2_FULL_HDR		0x0000000000000001ULL
+#define	RXDMA_CFIG2_FULL_HDR_MASK	0x0000000000000001ULL
+#define	RXDMA_CFIG2_OFFSET_SHIFT		1		/* bit 3:1 */
+#define	RXDMA_CFIG2_OFFSET_MASK		0x000000004000000eULL
+#define	RXDMA_CFIG2_MBADDR_L_SHIFT	6			/* bit 31:6 */
+#define	RXDMA_CFIG2_MBADDR_L_MASK	0x00000000ffffffc0ULL
+
+typedef union _rxdma_cfig2_t {
+	uint64_t value;
+	struct {
+#if defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t mbaddr:26;
+			uint32_t res2:3;
+			uint32_t offset:2;
+			uint32_t full_hdr:1;
+
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t full_hdr:1;
+			uint32_t offset:2;
+			uint32_t res2:3;
+			uint32_t mbaddr:26;
+#endif
+		} ldw;
+#if !defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+	} bits;
+} rxdma_cfig2_t, *p_rxdma_cfig2_t;
+
+/*
+ * RDC: Receive Block Ring Configuration
+ *	The following register definitions are for
+ *	each DMA channel.
+ */
+#define	RBR_CFIG_A_REG			(DMC + 0x00010)
+#define	RBR_CFIG_B_REG			(DMC + 0x00018)
+#define	RBR_KICK_REG			(DMC + 0x00020)
+#define	RBR_STAT_REG			(DMC + 0x00028)
+#define	RBR_HDH_REG			(DMC + 0x00030)
+#define	RBR_HDL_REG			(DMC + 0x00038)
+
+#define	RBR_CFIG_A_STADDR_SHIFT		6			/* bits 17:6 */
+#define	RBR_CFIG_A_STDADDR_MASK		0x000000000003ffc0ULL
+#define	RBR_CFIG_A_STADDR_BASE_SHIFT	18			/* bits 43:18 */
+#define	RBR_CFIG_A_STDADDR_BASE_MASK	0x00000ffffffc0000ULL
+#define	RBR_CFIG_A_LEN_SHIFT		48			/* bits 63:48 */
+#define	RBR_CFIG_A_LEN_MASK		0xFFFF000000000000ULL
+
+typedef union _rbr_cfig_a_t {
+	uint64_t value;
+	struct {
+#if defined(_BIG_ENDIAN)
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t len:16;
+			uint32_t res1:4;
+			uint32_t staddr_base:12;
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t staddr_base:12;
+			uint32_t res1:4;
+			uint32_t len:16;
+#endif
+		} hdw;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t staddr_base:14;
+			uint32_t staddr:12;
+			uint32_t res2:6;
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t res2:6;
+			uint32_t staddr:12;
+			uint32_t staddr_base:14;
+#endif
+		} ldw;
+#if !defined(_BIG_ENDIAN)
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t len:16;
+			uint32_t res1:4;
+			uint32_t staddr_base:12;
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t staddr_base:12;
+			uint32_t res1:4;
+			uint32_t len:16;
+#endif
+		} hdw;
+#endif
+	} bits;
+} rbr_cfig_a_t, *p_rbr_cfig_a_t;
+
+
+#define	RBR_CFIG_B_BUFSZ0_SHIFT		0			/* bit 1:0 */
+#define	RBR_CFIG_B_BUFSZ0_MASK		0x0000000000000001ULL
+#define	RBR_CFIG_B_VLD0_SHIFT		7			/* bit 7 */
+#define	RBR_CFIG_B_VLD0			0x0000000000000008ULL
+#define	RBR_CFIG_B_VLD0_MASK		0x0000000000000008ULL
+#define	RBR_CFIG_B_BUFSZ1_SHIFT		8			/* bit 9:8 */
+#define	RBR_CFIG_B_BUFSZ1_MASK		0x0000000000000300ULL
+#define	RBR_CFIG_B_VLD1_SHIFT		15			/* bit 15 */
+#define	RBR_CFIG_B_VLD1			0x0000000000008000ULL
+#define	RBR_CFIG_B_VLD1_MASK		0x0000000000008000ULL
+#define	RBR_CFIG_B_BUFSZ2_SHIFT		16			/* bit 17:16 */
+#define	RBR_CFIG_B_BUFSZ2_MASK		0x0000000000030000ULL
+#define	RBR_CFIG_B_VLD2_SHIFT		23			/* bit 23 */
+#define	RBR_CFIG_B_VLD2			0x0000000000800000ULL
+#define	RBR_CFIG_B_BKSIZE_SHIFT		24			/* bit 25:24 */
+#define	RBR_CFIG_B_BKSIZE_MASK		0x0000000003000000ULL
+
+
+typedef union _rbr_cfig_b_t {
+	uint64_t value;
+	struct {
+#if defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t res1_1:6;
+			uint32_t bksize:2;
+			uint32_t vld2:1;
+			uint32_t res2:5;
+			uint32_t bufsz2:2;
+			uint32_t vld1:1;
+			uint32_t res3:5;
+			uint32_t bufsz1:2;
+			uint32_t vld0:1;
+			uint32_t res4:5;
+			uint32_t bufsz0:2;
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t bufsz0:2;
+			uint32_t res4:5;
+			uint32_t vld0:1;
+			uint32_t bufsz1:2;
+			uint32_t res3:5;
+			uint32_t vld1:1;
+			uint32_t bufsz2:2;
+			uint32_t res2:5;
+			uint32_t vld2:1;
+			uint32_t bksize:2;
+			uint32_t res1_1:6;
+#endif
+		} ldw;
+#if !defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+	} bits;
+} rbr_cfig_b_t, *p_rbr_cfig_b_t;
+
+
+#define	RBR_KICK_SHIFT			0			/* bit 15:0 */
+#define	RBR_KICK_MASK			0x00000000000ffff1ULL
+
+
+typedef union _rbr_kick_t {
+	uint64_t value;
+	struct {
+#if defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t res1_1:16;
+			uint32_t bkadd:16;
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t bkadd:16;
+			uint32_t res1_1:16;
+#endif
+		} ldw;
+#if !defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+	} bits;
+} rbr_kick_t, *p_rbr_kick_t;
+
+#define	RBR_STAT_QLEN_SHIFT		0		/* bit bit 15:0 */
+#define	RBR_STAT_QLEN_MASK		0x000000000000ffffULL
+#define	RBR_STAT_OFLOW_SHIFT		16		/* bit 16 */
+#define	RBR_STAT_OFLOW			0x0000000000010000ULL
+#define	RBR_STAT_OFLOW_MASK		0x0000000000010000ULL
+
+typedef union _rbr_stat_t {
+	uint64_t value;
+	struct {
+#if defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t res1_1:15;
+			uint32_t oflow:1;
+			uint32_t qlen:16;
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t qlen:16;
+			uint32_t oflow:1;
+			uint32_t res1_1:15;
+#endif
+		} ldw;
+#if !defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+	} bits;
+} rbr_stat_t, *p_rbr_stat_t;
+
+
+#define	RBR_HDH_HEAD_H_SHIFT		0			/* bit 11:0 */
+#define	RBR_HDH_HEAD_H_MASK		0x0000000000000fffULL
+typedef union _rbr_hdh_t {
+	uint64_t value;
+	struct {
+#if defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t res1_1:20;
+			uint32_t head_h:12;
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t head_h:12;
+			uint32_t res1_1:20;
+#endif
+		} ldw;
+#if !defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+	} bits;
+} rbr_hdh_t, *p_rbr_hdh_t;
+
+#define	RBR_HDL_HEAD_L_SHIFT		2			/* bit 31:2 */
+#define	RBR_HDL_HEAD_L_MASK		0x00000000FFFFFFFCULL
+
+typedef union _rbr_hdl_t {
+	uint64_t value;
+	struct {
+#if defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t head_l:30;
+			uint32_t res2:2;
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t res2:2;
+			uint32_t head_l:30;
+#endif
+		} ldw;
+#if !defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+	} bits;
+} rbr_hdl_t, *p_rbr_hdl_t;
+
+/*
+ * Receive Completion Ring (RCR)
+ */
+#define	RCR_PKT_BUF_ADDR_SHIFT		0			/* bit 37:0 */
+#define	RCR_PKT_BUF_ADDR_SHIFT_FULL	6	/* fulll buffer address */
+#define	RCR_PKT_BUF_ADDR_MASK		0x0000003FFFFFFFFFULL
+#define	RCR_PKTBUFSZ_SHIFT		38			/* bit 39:38 */
+#define	RCR_PKTBUFSZ_MASK		0x000000C000000000ULL
+#define	RCR_L2_LEN_SHIFT		40			/* bit 39:38 */
+#define	RCR_L2_LEN_MASK			0x003fff0000000000ULL
+#define	RCR_DCF_ERROR_SHIFT		54			/* bit 54 */
+#define	RCR_DCF_ERROR_MASK		0x0040000000000000ULL
+#define	RCR_ERROR_SHIFT			55			/* bit 57:55 */
+#define	RCR_ERROR_MASK			0x0380000000000000ULL
+#define	RCR_PROMIS_SHIFT		58			/* bit 58 */
+#define	RCR_PROMIS_MASK			0x0400000000000000ULL
+#define	RCR_FRAG_SHIFT			59			/* bit 59 */
+#define	RCR_FRAG_MASK			0x0800000000000000ULL
+#define	RCR_ZERO_COPY_SHIFT		60			/* bit 60 */
+#define	RCR_ZERO_COPY_MASK		0x1000000000000000ULL
+#define	RCR_PKT_TYPE_SHIFT		61			/* bit 62:61 */
+#define	RCR_PKT_TYPE_MASK		0x6000000000000000ULL
+#define	RCR_MULTI_SHIFT			63			/* bit 63 */
+#define	RCR_MULTI_MASK			0x8000000000000000ULL
+
+#define	RCR_PKTBUFSZ_0			0x00
+#define	RCR_PKTBUFSZ_1			0x01
+#define	RCR_PKTBUFSZ_2			0x02
+#define	RCR_SINGLE_BLOCK		0x03
+
+#define	RCR_NO_ERROR			0x0
+#define	RCR_L2_ERROR			0x1
+#define	RCR_L4_CSUM_ERROR		0x3
+#define	RCR_FFLP_SOFT_ERROR		0x4
+#define	RCR_ZCP_SOFT_ERROR		0x5
+#define	RCR_ERROR_RESERVE		0x6
+#define	RCR_ERROR_RESERVE_END	0x7
+
+#define	RCR_PKT_TYPE_UDP		0x1
+#define	RCR_PKT_TYPE_TCP		0x2
+#define	RCR_PKT_TYPE_SCTP		0x3
+#define	RCR_PKT_TYPE_OTHERS		0x0
+#define	RCR_PKT_IS_TCP			0x2000000000000000ULL
+#define	RCR_PKT_IS_UDP			0x4000000000000000ULL
+#define	RCR_PKT_IS_SCTP			0x6000000000000000ULL
+
+
+typedef union _rcr_entry_t {
+	uint64_t value;
+	struct {
+#if defined(_BIG_ENDIAN)
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t multi:1;
+			uint32_t pkt_type:2;
+			uint32_t zero_copy:1;
+			uint32_t noport:1;
+			uint32_t promis:1;
+			uint32_t error:3;
+			uint32_t dcf_err:1;
+			uint32_t l2_len:14;
+			uint32_t pktbufsz:2;
+			uint32_t pkt_buf_addr:6;
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t pkt_buf_addr:6;
+			uint32_t pktbufsz:2;
+			uint32_t l2_len:14;
+			uint32_t dcf_err:1;
+			uint32_t error:3;
+			uint32_t promis:1;
+			uint32_t noport:1;
+			uint32_t zero_copy:1;
+			uint32_t pkt_type:2;
+			uint32_t multi:1;
+#endif
+		} hdw;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t pkt_buf_addr:32;
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t pkt_buf_addr:32;
+#endif
+		} ldw;
+#if !defined(_BIG_ENDIAN)
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t multi:1;
+			uint32_t pkt_type:2;
+			uint32_t zero_copy:1;
+			uint32_t noport:1;
+			uint32_t promis:1;
+			uint32_t error:3;
+			uint32_t dcf_err:1;
+			uint32_t l2_len:14;
+			uint32_t pktbufsz:2;
+			uint32_t pkt_buf_addr:6;
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t pkt_buf_addr:6;
+			uint32_t pktbufsz:2;
+			uint32_t l2_len:14;
+			uint32_t dcf_err:1;
+			uint32_t error:3;
+			uint32_t promis:1;
+			uint32_t noport:1;
+			uint32_t zero_copy:1;
+			uint32_t pkt_type:2;
+			uint32_t multi:1;
+#endif
+		} hdw;
+#endif
+	} bits;
+} rcr_entry_t, *p_rcr_entry_t;
+
+/*
+ * Receive Completion Ring Configuration.
+ * (for each DMA channel)
+ */
+#define	RCRCFIG_A_REG			(DMC + 0x00040)
+#define	RCRCFIG_B_REG			(DMC + 0x00048)
+#define	RCRSTAT_A_REG			(DMC + 0x00050)
+#define	RCRSTAT_B_REG			(DMC + 0x00058)
+#define	RCRSTAT_C_REG			(DMC + 0x00060)
+#define	RX_DMA_ENT_MSK_REG		(DMC + 0x00068)
+#define	RX_DMA_CTL_STAT_REG		(DMC + 0x00070)
+#define	RCR_FLSH_REG			(DMC + 0x00078)
+#if OLD
+#define	RX_DMA_LOGA_REG			(DMC + 0x00080)
+#define	RX_DMA_LOGB_REG			(DMC + 0x00088)
+#endif
+#define	RX_DMA_CTL_STAT_DBG_REG		(DMC + 0x00098)
+
+/* (DMC + 0x00050) */
+#define	RCRCFIG_A_STADDR_SHIFT		6	/* bit 18:6 */
+#define	RCRCFIG_A_STADDR_MASK		0x000000000007FFC0ULL
+#define	RCRCFIG_A_STADDR_BASE_SHIF	19	/* bit 43:19 */
+#define	RCRCFIG_A_STADDR_BASE_MASK	0x00000FFFFFF80000ULL
+#define	RCRCFIG_A_LEN_SHIF		48	/* bit 63:48 */
+#define	RCRCFIG_A_LEN__MASK		0xFFFF000000000000ULL
+
+/* (DMC + 0x00058) */
+#define	RCRCFIG_B_TIMEOUT_SHIFT		0		/* bit 5:0 */
+#define	RCRCFIG_B_TIMEOUT_MASK		0x000000000000003FULL
+#define	RCRCFIG_B_ENTOUT_SHIFT		15		/* bit  15 */
+#define	RCRCFIG_B_TIMEOUT		0x0000000000008000ULL
+#define	RCRCFIG_B_PTHRES_SHIFT		16		/* bit 31:16 */
+#define	RCRCFIG_B_PTHRES_MASK		0x00000000FFFF0000ULL
+
+/* (DMC + 0x00060) */
+#define	RCRSTAT_A_QLEN_SHIFT		0		/* bit 15:0 */
+#define	RCRSTAT_A_QLEN_MASK		0x000000000000FFFFULL
+#define	RCRSTAT_A_PKT_OFL_SHIFT		16		/* bit 16 */
+#define	RCRSTAT_A_PKT_OFL_MASK		0x0000000000010000ULL
+#define	RCRSTAT_A_ENT_OFL_SHIFT		17		/* bit 17 */
+#define	RCRSTAT_A_ENT_QFL_MASK		0x0000000000020000ULL
+
+#define	RCRSTAT_C_TLPTR_H_SHIFT		0		/* bit 11:0 */
+#define	RCRSTAT_C_TLPTR_H_MASK		0x0000000000000FFFULL
+
+#define	RCRSTAT_D_TLPTR_L_SHIFT		3		/* bit 31:3 */
+#define	RCRSTAT_D_TLPTR_L_MASK		0x00000000FFFFFFF8ULL
+
+/* Receive DMA Interrupt Behavior: Event Mask  (DMC + 0x00068) */
+#define	RX_DMA_ENT_MSK_CFIGLOGPGE_SHIFT	0		/* bit 0: 0 to flag */
+#define	RX_DMA_ENT_MSK_CFIGLOGPGE_MASK	0x0000000000000001ULL
+#define	RX_DMA_ENT_MSK_RBRLOGPGE_SHIFT	1		/* bit 1: 0 to flag */
+#define	RX_DMA_ENT_MSK_RBRLOGPGE_MASK	0x0000000000000002ULL
+#define	RX_DMA_ENT_MSK_RBRFULL_SHIFT	2		/* bit 2: 0 to flag */
+#define	RX_DMA_ENT_MSK_RBRFULL_MASK	0x0000000000000004ULL
+#define	RX_DMA_ENT_MSK_RBREMPTY_SHIFT	3		/* bit 3: 0 to flag */
+#define	RX_DMA_ENT_MSK_RBREMPTY_MASK	0x0000000000000008ULL
+#define	RX_DMA_ENT_MSK_RCRFULL_SHIFT	4		/* bit 4: 0 to flag */
+#define	RX_DMA_ENT_MSK_RCRFULL_MASK	0x0000000000000010ULL
+#define	RX_DMA_ENT_MSK_RCRINCON_SHIFT	5		/* bit 5: 0 to flag */
+#define	RX_DMA_ENT_MSK_RCRINCON_MASK	0x0000000000000020ULL
+#define	RX_DMA_ENT_MSK_CONFIG_ERR_SHIFT	6		/* bit 6: 0 to flag */
+#define	RX_DMA_ENT_MSK_CONFIG_ERR_MASK	0x0000000000000040ULL
+#define	RX_DMA_ENT_MSK_RCRSH_FULL_SHIFT	7		/* bit 7: 0 to flag */
+#define	RX_DMA_ENT_MSK_RCRSH_FULL_MASK	0x0000000000000080ULL
+#define	RX_DMA_ENT_MSK_RBR_PRE_EMPTY_SHIFT	8	/* bit 8: 0 to flag */
+#define	RX_DMA_ENT_MSK_RBR_PRE_EMPTY_MASK	0x0000000000000100ULL
+#define	RX_DMA_ENT_MSK_WRED_DROP_SHIFT	9		/* bit 9: 0 to flag */
+#define	RX_DMA_ENT_MSK_WRED_DROP_MASK	0x0000000000000200ULL
+#define	RX_DMA_ENT_MSK_PTDROP_PKT_SHIFT	10		/* bit 10: 0 to flag */
+#define	RX_DMA_ENT_MSK_PTDROP_PKT_MASK	0x0000000000000400ULL
+#define	RX_DMA_ENT_MSK_RBR_PRE_PAR_SHIFT	11	/* bit 11: 0 to flag */
+#define	RX_DMA_ENT_MSK_RBR_PRE_PAR_MASK	0x0000000000000800ULL
+#define	RX_DMA_ENT_MSK_RCR_SHA_PAR_SHIFT	12	/* bit 12: 0 to flag */
+#define	RX_DMA_ENT_MSK_RCR_SHA_PAR_MASK	0x0000000000001000ULL
+#define	RX_DMA_ENT_MSK_RCRTO_SHIFT	13		/* bit 13: 0 to flag */
+#define	RX_DMA_ENT_MSK_RCRTO_MASK	0x0000000000002000ULL
+#define	RX_DMA_ENT_MSK_THRES_SHIFT	14		/* bit 14: 0 to flag */
+#define	RX_DMA_ENT_MSK_THRES_MASK	0x0000000000004000ULL
+#define	RX_DMA_ENT_MSK_DC_FIFO_ERR_SHIFT	16	/* bit 16: 0 to flag */
+#define	RX_DMA_ENT_MSK_DC_FIFO_ERR_MASK	0x0000000000010000ULL
+#define	RX_DMA_ENT_MSK_RCR_ACK_ERR_SHIFT	17	/* bit 17: 0 to flag */
+#define	RX_DMA_ENT_MSK_RCR_ACK_ERR_MASK	0x0000000000020000ULL
+#define	RX_DMA_ENT_MSK_RSP_DAT_ERR_SHIFT	18	/* bit 18: 0 to flag */
+#define	RX_DMA_ENT_MSK_RSP_DAT_ERR_MASK	0x0000000000040000ULL
+#define	RX_DMA_ENT_MSK_BYTE_EN_BUS_SHIFT	19	/* bit 19: 0 to flag */
+#define	RX_DMA_ENT_MSK_BYTE_EN_BUS_MASK	0x0000000000080000ULL
+#define	RX_DMA_ENT_MSK_RSP_CNT_ERR_SHIFT	20	/* bit 20: 0 to flag */
+#define	RX_DMA_ENT_MSK_RSP_CNT_ERR_MASK	0x0000000000100000ULL
+#define	RX_DMA_ENT_MSK_RBR_TMOUT_SHIFT	21		/* bit 21: 0 to flag */
+#define	RX_DMA_ENT_MSK_RBR_TMOUT_MASK	0x0000000000200000ULL
+#define	RX_DMA_ENT_MSK_ALL	(RX_DMA_ENT_MSK_CFIGLOGPGE_MASK |	\
+				RX_DMA_ENT_MSK_RBRLOGPGE_MASK |	\
+				RX_DMA_ENT_MSK_RBRFULL_MASK |		\
+				RX_DMA_ENT_MSK_RBREMPTY_MASK |		\
+				RX_DMA_ENT_MSK_RCRFULL_MASK |		\
+				RX_DMA_ENT_MSK_RCRINCON_MASK |		\
+				RX_DMA_ENT_MSK_CONFIG_ERR_MASK |	\
+				RX_DMA_ENT_MSK_RCRSH_FULL_MASK |	\
+				RX_DMA_ENT_MSK_RBR_PRE_EMPTY_MASK |	\
+				RX_DMA_ENT_MSK_WRED_DROP_MASK |	\
+				RX_DMA_ENT_MSK_PTDROP_PKT_MASK |	\
+				RX_DMA_ENT_MSK_PTDROP_PKT_MASK |	\
+				RX_DMA_ENT_MSK_RBR_PRE_PAR_MASK |	\
+				RX_DMA_ENT_MSK_RCR_SHA_PAR_MASK |	\
+				RX_DMA_ENT_MSK_RCRTO_MASK |		\
+				RX_DMA_ENT_MSK_THRES_MASK |		\
+				RX_DMA_ENT_MSK_DC_FIFO_ERR_MASK |	\
+				RX_DMA_ENT_MSK_RCR_ACK_ERR_MASK |	\
+				RX_DMA_ENT_MSK_RSP_DAT_ERR_MASK |	\
+				RX_DMA_ENT_MSK_BYTE_EN_BUS_MASK |	\
+				RX_DMA_ENT_MSK_RSP_CNT_ERR_MASK |	\
+				RX_DMA_ENT_MSK_RBR_TMOUT_MASK)
+
+/* Receive DMA Control and Status  (DMC + 0x00070) */
+#define	RX_DMA_CTL_STAT_PKTREAD_SHIFT	0	/* WO, bit 15:0 */
+#define	RX_DMA_CTL_STAT_PKTREAD_MASK	0x000000000000ffffULL
+#define	RX_DMA_CTL_STAT_PTRREAD_SHIFT	16	/* WO, bit 31:16 */
+#define	RX_DMA_CTL_STAT_PTRREAD_MASK	0x00000000FFFF0000ULL
+#define	RX_DMA_CTL_STAT_CFIGLOGPG_SHIFT 32	/* RO, bit 32 */
+#define	RX_DMA_CTL_STAT_CFIGLOGPG	0x0000000100000000ULL
+#define	RX_DMA_CTL_STAT_CFIGLOGPG_MASK	0x0000000100000000ULL
+#define	RX_DMA_CTL_STAT_RBRLOGPG_SHIFT	33	/* RO, bit 33 */
+#define	RX_DMA_CTL_STAT_RBRLOGPG	0x0000000200000000ULL
+#define	RX_DMA_CTL_STAT_RBRLOGPG_MASK	0x0000000200000000ULL
+#define	RX_DMA_CTL_STAT_RBRFULL_SHIFT	34	/* RO, bit 34 */
+#define	RX_DMA_CTL_STAT_RBRFULL		0x0000000400000000ULL
+#define	RX_DMA_CTL_STAT_RBRFULL_MASK	0x0000000400000000ULL
+#define	RX_DMA_CTL_STAT_RBREMPTY_SHIFT	35	/* RW1C, bit 35 */
+#define	RX_DMA_CTL_STAT_RBREMPTY	0x0000000800000000ULL
+#define	RX_DMA_CTL_STAT_RBREMPTY_MASK	0x0000000800000000ULL
+#define	RX_DMA_CTL_STAT_RCRFULL_SHIFT	36	/* RW1C, bit 36 */
+#define	RX_DMA_CTL_STAT_RCRFULL		0x0000001000000000ULL
+#define	RX_DMA_CTL_STAT_RCRFULL_MASK	0x0000001000000000ULL
+#define	RX_DMA_CTL_STAT_RCRINCON_SHIFT	37	/* RO, bit 37 */
+#define	RX_DMA_CTL_STAT_RCRINCON	0x0000002000000000ULL
+#define	RX_DMA_CTL_STAT_RCRINCON_MASK	0x0000002000000000ULL
+#define	RX_DMA_CTL_STAT_CONFIG_ERR_SHIFT 38	/* RO, bit 38 */
+#define	RX_DMA_CTL_STAT_CONFIG_ERR	0x0000004000000000ULL
+#define	RX_DMA_CTL_STAT_CONFIG_ERR_MASK	0x0000004000000000ULL
+#define	RX_DMA_CTL_STAT_RCR_SHDW_FULL_SHIFT 39	/* RO, bit 39 */
+#define	RX_DMA_CTL_STAT_RCR_SHDW_FULL 0x0000008000000000ULL
+#define	RX_DMA_CTL_STAT_RCR_SHDW_FULL_MASK 0x0000008000000000ULL
+#define	RX_DMA_CTL_STAT_RBR_PRE_EMTY_MASK  0x0000010000000000ULL
+#define	RX_DMA_CTL_STAT_RBR_PRE_EMTY_SHIFT 40	/* RO, bit 40 */
+#define	RX_DMA_CTL_STAT_RBR_PRE_EMTY 0x0000010000000000ULL
+#define	RX_DMA_CTL_STAT_RBR_PRE_EMTY_MASK  0x0000010000000000ULL
+#define	RX_DMA_CTL_STAT_WRED_DROP_SHIFT 41	/* RO, bit 41 */
+#define	RX_DMA_CTL_STAT_WRED_DROP 0x0000020000000000ULL
+#define	RX_DMA_CTL_STAT_WRED_DROP_MASK  0x0000020000000000ULL
+#define	RX_DMA_CTL_STAT_PORT_DROP_PKT_SHIFT 42	/* RO, bit 42 */
+#define	RX_DMA_CTL_STAT_PORT_DROP_PKT 0x0000040000000000ULL
+#define	RX_DMA_CTL_STAT_PORT_DROP_PKT_MASK  0x0000040000000000ULL
+#define	RX_DMA_CTL_STAT_RBR_PRE_PAR_SHIFT 43	/* RO, bit 43 */
+#define	RX_DMA_CTL_STAT_RBR_PRE_PAR 0x0000080000000000ULL
+#define	RX_DMA_CTL_STAT_RBR_PRE_PAR_MASK  0x0000080000000000ULL
+#define	RX_DMA_CTL_STAT_RCR_SHA_PAR_SHIFT 44	/* RO, bit 44 */
+#define	RX_DMA_CTL_STAT_RCR_SHA_PAR 0x0000100000000000ULL
+#define	RX_DMA_CTL_STAT_RCR_SHA_PAR_MASK  0x0000100000000000ULL
+#define	RX_DMA_CTL_STAT_RCRTO_SHIFT	45	/* RW1C, bit 45 */
+#define	RX_DMA_CTL_STAT_RCRTO		0x0000200000000000ULL
+#define	RX_DMA_CTL_STAT_RCRTO_MASK	0x0000200000000000ULL
+#define	RX_DMA_CTL_STAT_RCRTHRES_SHIFT	46	/* RO, bit 46 */
+#define	RX_DMA_CTL_STAT_RCRTHRES	0x0000400000000000ULL
+#define	RX_DMA_CTL_STAT_RCRTHRES_MASK	0x0000400000000000ULL
+#define	RX_DMA_CTL_STAT_MEX_SHIFT	47	/* RW, bit 47 */
+#define	RX_DMA_CTL_STAT_MEX		0x0000800000000000ULL
+#define	RX_DMA_CTL_STAT_MEX_MASK	0x0000800000000000ULL
+#define	RX_DMA_CTL_STAT_DC_FIFO_ERR_SHIFT	48	/* RW1C, bit 48 */
+#define	RX_DMA_CTL_STAT_DC_FIFO_ERR		0x0001000000000000ULL
+#define	RX_DMA_CTL_STAT_DC_FIFO_ERR_MASK	0x0001000000000000ULL
+#define	RX_DMA_CTL_STAT_RCR_ACK_ERR_SHIFT	49	/* RO, bit 49 */
+#define	RX_DMA_CTL_STAT_RCR_ACK_ERR		0x0002000000000000ULL
+#define	RX_DMA_CTL_STAT_RCR_ACK_ERR_MASK	0x0002000000000000ULL
+#define	RX_DMA_CTL_STAT_RSP_DAT_ERR_SHIFT	50	/* RO, bit 50 */
+#define	RX_DMA_CTL_STAT_RSP_DAT_ERR		0x0004000000000000ULL
+#define	RX_DMA_CTL_STAT_RSP_DAT_ERR_MASK	0x0004000000000000ULL
+
+#define	RX_DMA_CTL_STAT_BYTE_EN_BUS_SHIFT	51	/* RO, bit 51 */
+#define	RX_DMA_CTL_STAT_BYTE_EN_BUS		0x0008000000000000ULL
+#define	RX_DMA_CTL_STAT_BYTE_EN_BUS_MASK	0x0008000000000000ULL
+
+#define	RX_DMA_CTL_STAT_RSP_CNT_ERR_SHIFT	52	/* RO, bit 52 */
+#define	RX_DMA_CTL_STAT_RSP_CNT_ERR		0x0010000000000000ULL
+#define	RX_DMA_CTL_STAT_RSP_CNT_ERR_MASK	0x0010000000000000ULL
+
+#define	RX_DMA_CTL_STAT_RBR_TMOUT_SHIFT	53	/* RO, bit 53 */
+#define	RX_DMA_CTL_STAT_RBR_TMOUT		0x0020000000000000ULL
+#define	RX_DMA_CTL_STAT_RBR_TMOUT_MASK	0x0020000000000000ULL
+#define	RX_DMA_CTRL_STAT_ENT_MASK_SHIFT 32
+#define	RX_DMA_CTL_STAT_ERROR 			(RX_DMA_ENT_MSK_ALL << \
+						RX_DMA_CTRL_STAT_ENT_MASK_SHIFT)
+
+/* the following are write 1 to clear bits */
+#define	RX_DMA_CTL_STAT_WR1C	RX_DMA_CTL_STAT_RBREMPTY | \
+				RX_DMA_CTL_STAT_RCR_SHDW_FULL | \
+				RX_DMA_CTL_STAT_RBR_PRE_EMTY | \
+				RX_DMA_CTL_STAT_WRED_DROP | \
+				RX_DMA_CTL_STAT_PORT_DROP_PKT | \
+				RX_DMA_CTL_STAT_RCRTO | \
+				RX_DMA_CTL_STAT_RCRTHRES | \
+				RX_DMA_CTL_STAT_DC_FIFO_ERR
+
+/* Receive DMA Interrupt Behavior: Force an update to RCR  (DMC + 0x00078 */
+#define	RCR_FLSH_SHIFT			0	/* RW, bit 0:0 */
+#define	RCR_FLSH_SET			0x0000000000000001ULL
+#define	RCR_FLSH_MASK			0x0000000000000001ULL
+
+/* Receive DMA Interrupt Behavior: the first error log  (DMC + 0x00080 */
+#define	RX_DMA_LOGA_ADDR_SHIFT		0	/* RO, bit 11:0 */
+#define	RX_DMA_LOGA_ADDR		0x0000000000000FFFULL
+#define	RX_DMA_LOGA_ADDR_MASK		0x0000000000000FFFULL
+#define	RX_DMA_LOGA_TYPE_SHIFT		28	/* RO, bit 30:28 */
+#define	RX_DMA_LOGA_TYPE		0x0000000070000000ULL
+#define	RX_DMA_LOGA_TYPE_MASK		0x0000000070000FFFULL
+#define	RX_DMA_LOGA_MULTI_SHIFT		28	/* RO, bit 30:28 */
+#define	RX_DMA_LOGA_MULTI		0x0000000080000000ULL
+#define	RX_DMA_LOGA_MULTI_MASK		0x0000000080000FFFULL
+
+/* Receive DMA Interrupt Behavior: the first error log  (DMC + 0x00088 */
+#define	RX_DMA_LOGA_ADDR_L_SHIFT	0	/* RO, bit 31:0 */
+#define	RX_DMA_LOGA_ADDRL_L		0x00000000FFFFFFFFULL
+#define	RX_DMA_LOGA_ADDR_LMASK		0x00000000FFFFFFFFULL
+
+typedef union _rcrcfig_a_t {
+	uint64_t value;
+	struct {
+#if defined(_BIG_ENDIAN)
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t len:16;
+			uint32_t res1:4;
+			uint32_t staddr_base:12;
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t staddr_base:12;
+			uint32_t res1:4;
+			uint32_t len:16;
+#endif
+		} hdw;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t staddr_base:13;
+			uint32_t staddr:13;
+			uint32_t res2:6;
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t res2:6;
+			uint32_t staddr:13;
+			uint32_t staddr_base:13;
+#endif
+		} ldw;
+#if !defined(_BIG_ENDIAN)
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t len:16;
+			uint32_t res1:4;
+			uint32_t staddr_base:12;
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t staddr_base:12;
+			uint32_t res1:4;
+			uint32_t len:16;
+#endif
+		} hdw;
+#endif
+	} bits;
+} rcrcfig_a_t, *p_rcrcfig_a_t;
+
+
+typedef union _rcrcfig_b_t {
+	uint64_t value;
+	struct {
+#if defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t pthres:16;
+			uint32_t entout:1;
+			uint32_t res1:9;
+			uint32_t timeout:6;
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t timeout:6;
+			uint32_t res1:9;
+			uint32_t entout:1;
+			uint32_t pthres:16;
+#endif
+		} ldw;
+#if !defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+	} bits;
+} rcrcfig_b_t, *p_rcrcfig_b_t;
+
+
+typedef union _rcrstat_a_t {
+	uint64_t value;
+	struct {
+#if defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t res1:16;
+			uint32_t qlen:16;
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t qlen:16;
+			uint32_t res1:16;
+#endif
+		} ldw;
+#if !defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+	} bits;
+} rcrstat_a_t, *p_rcrstat_a_t;
+
+
+typedef union _rcrstat_b_t {
+	uint64_t value;
+	struct {
+#if defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t res1:20;
+			uint32_t tlptr_h:12;
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t tlptr_h:12;
+			uint32_t res1:20;
+#endif
+		} ldw;
+#if !defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+	} bits;
+} rcrstat_b_t, *p_rcrstat_b_t;
+
+
+typedef union _rcrstat_c_t {
+	uint64_t value;
+	struct {
+#if defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t tlptr_l:29;
+			uint32_t res1:3;
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t res1:3;
+			uint32_t tlptr_l:29;
+#endif
+		} ldw;
+#if !defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+	} bits;
+} rcrstat_c_t, *p_rcrstat_c_t;
+
+
+/* Receive DMA Event Mask */
+typedef union _rx_dma_ent_msk_t {
+	uint64_t value;
+	struct {
+#if defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t rsrvd2:10;
+			uint32_t rbr_tmout:1;
+			uint32_t rsp_cnt_err:1;
+			uint32_t byte_en_bus:1;
+			uint32_t rsp_dat_err:1;
+			uint32_t rcr_ack_err:1;
+			uint32_t dc_fifo_err:1;
+			uint32_t rsrvd:1;
+			uint32_t rcrthres:1;
+			uint32_t rcrto:1;
+			uint32_t rcr_sha_par:1;
+			uint32_t rbr_pre_par:1;
+			uint32_t port_drop_pkt:1;
+			uint32_t wred_drop:1;
+			uint32_t rbr_pre_empty:1;
+			uint32_t rcr_shadow_full:1;
+			uint32_t config_err:1;
+			uint32_t rcrincon:1;
+			uint32_t rcrfull:1;
+			uint32_t rbr_empty:1;
+			uint32_t rbrfull:1;
+			uint32_t rbrlogpage:1;
+			uint32_t cfiglogpage:1;
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t cfiglogpage:1;
+			uint32_t rbrlogpage:1;
+			uint32_t rbrfull:1;
+			uint32_t rbr_empty:1;
+			uint32_t rcrfull:1;
+			uint32_t rcrincon:1;
+			uint32_t config_err:1;
+			uint32_t rcr_shadow_full:1;
+			uint32_t rbr_pre_empty:1;
+			uint32_t wred_drop:1;
+			uint32_t port_drop_pkt:1;
+			uint32_t rbr_pre_par:1;
+			uint32_t rcr_sha_par:1;
+			uint32_t rcrto:1;
+			uint32_t rcrthres:1;
+			uint32_t rsrvd:1;
+			uint32_t dc_fifo_err:1;
+			uint32_t rcr_ack_err:1;
+			uint32_t rsp_dat_err:1;
+			uint32_t byte_en_bus:1;
+			uint32_t rsp_cnt_err:1;
+			uint32_t rbr_tmout:1;
+			uint32_t rsrvd2:10;
+#endif
+		} ldw;
+#if !defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+	} bits;
+} rx_dma_ent_msk_t, *p_rx_dma_ent_msk_t;
+
+
+/* Receive DMA Control and Status */
+typedef union _rx_dma_ctl_stat_t {
+	uint64_t value;
+	struct {
+#if defined(_BIG_ENDIAN)
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t rsrvd:10;
+			uint32_t rbr_tmout:1;
+			uint32_t rsp_cnt_err:1;
+			uint32_t byte_en_bus:1;
+			uint32_t rsp_dat_err:1;
+			uint32_t rcr_ack_err:1;
+			uint32_t dc_fifo_err:1;
+			uint32_t mex:1;
+			uint32_t rcrthres:1;
+			uint32_t rcrto:1;
+			uint32_t rcr_sha_par:1;
+			uint32_t rbr_pre_par:1;
+			uint32_t port_drop_pkt:1;
+			uint32_t wred_drop:1;
+			uint32_t rbr_pre_empty:1;
+			uint32_t rcr_shadow_full:1;
+			uint32_t config_err:1;
+			uint32_t rcrincon:1;
+			uint32_t rcrfull:1;
+			uint32_t rbr_empty:1;
+			uint32_t rbrfull:1;
+			uint32_t rbrlogpage:1;
+			uint32_t cfiglogpage:1;
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t cfiglogpage:1;
+			uint32_t rbrlogpage:1;
+			uint32_t rbrfull:1;
+			uint32_t rbr_empty:1;
+			uint32_t rcrfull:1;
+			uint32_t rcrincon:1;
+			uint32_t config_err:1;
+			uint32_t rcr_shadow_full:1;
+			uint32_t rbr_pre_empty:1;
+			uint32_t wred_drop:1;
+			uint32_t port_drop_pkt:1;
+			uint32_t rbr_pre_par:1;
+			uint32_t rcr_sha_par:1;
+			uint32_t rcrto:1;
+			uint32_t rcrthres:1;
+			uint32_t mex:1;
+			uint32_t dc_fifo_err:1;
+			uint32_t rcr_ack_err:1;
+			uint32_t rsp_dat_err:1;
+			uint32_t byte_en_bus:1;
+			uint32_t rsp_cnt_err:1;
+			uint32_t rbr_tmout:1;
+			uint32_t rsrvd:10;
+#endif
+		} hdw;
+
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t ptrread:16;
+			uint32_t pktread:16;
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t pktread:16;
+			uint32_t ptrread:16;
+
+#endif
+		} ldw;
+#if !defined(_BIG_ENDIAN)
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t rsrvd:10;
+			uint32_t rbr_tmout:1;
+			uint32_t rsp_cnt_err:1;
+			uint32_t byte_en_bus:1;
+			uint32_t rsp_dat_err:1;
+			uint32_t rcr_ack_err:1;
+			uint32_t dc_fifo_err:1;
+			uint32_t mex:1;
+			uint32_t rcrthres:1;
+			uint32_t rcrto:1;
+			uint32_t rcr_sha_par:1;
+			uint32_t rbr_pre_par:1;
+			uint32_t port_drop_pkt:1;
+			uint32_t wred_drop:1;
+			uint32_t rbr_pre_empty:1;
+			uint32_t rcr_shadow_full:1;
+			uint32_t config_err:1;
+			uint32_t rcrincon:1;
+			uint32_t rcrfull:1;
+			uint32_t rbr_empty:1;
+			uint32_t rbrfull:1;
+			uint32_t rbrlogpage:1;
+			uint32_t cfiglogpage:1;
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t cfiglogpage:1;
+			uint32_t rbrlogpage:1;
+			uint32_t rbrfull:1;
+			uint32_t rbr_empty:1;
+			uint32_t rcrfull:1;
+			uint32_t rcrincon:1;
+			uint32_t config_err:1;
+			uint32_t rcr_shadow_full:1;
+			uint32_t rbr_pre_empty:1;
+			uint32_t wred_drop:1;
+			uint32_t port_drop_pkt:1;
+			uint32_t rbr_pre_par:1;
+			uint32_t rcr_sha_par:1;
+			uint32_t rcrto:1;
+			uint32_t rcrthres:1;
+			uint32_t mex:1;
+			uint32_t dc_fifo_err:1;
+			uint32_t rcr_ack_err:1;
+			uint32_t rsp_dat_err:1;
+			uint32_t byte_en_bus:1;
+			uint32_t rsp_cnt_err:1;
+			uint32_t rbr_tmout:1;
+			uint32_t rsrvd:10;
+#endif
+		} hdw;
+#endif
+	} bits;
+} rx_dma_ctl_stat_t, *p_rx_dma_ctl_stat_t;
+
+typedef union _rcr_flsh_t {
+	uint64_t value;
+	struct {
+#if defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t res1_1:31;
+			uint32_t flsh:1;
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t flsh:1;
+			uint32_t res1_1:31;
+#endif
+		} ldw;
+#if !defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+	} bits;
+} rcr_flsh_t, *p_rcr_flsh_t;
+
+
+typedef union _rx_dma_loga_t {
+	uint64_t value;
+	struct {
+#if defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t multi:1;
+			uint32_t type:3;
+			uint32_t res1:16;
+			uint32_t addr:12;
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t addr:12;
+			uint32_t res1:16;
+			uint32_t type:3;
+			uint32_t multi:1;
+#endif
+		} ldw;
+#if !defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+	} bits;
+} rx_dma_loga_t, *p_rx_dma_loga_t;
+
+
+typedef union _rx_dma_logb_t {
+	uint64_t value;
+	struct {
+#if defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t addr_l:32;
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t addr_l:32;
+#endif
+		} ldw;
+#if !defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+	} bits;
+} rx_dma_logb_t, *p_rx_dma_logb_t;
+
+
+#define	RX_DMA_MAILBOX_BYTE_LENGTH	64
+#define	RX_DMA_MBOX_UNUSED_1		8
+#define	RX_DMA_MBOX_UNUSED_2		16
+
+typedef struct _rxdma_mailbox_t {
+	rx_dma_ctl_stat_t	rxdma_ctl_stat;		/* 8 bytes */
+	rbr_stat_t		rbr_stat;		/* 8 bytes */
+	uint32_t		rbr_hdl;		/* 4 bytes (31:0) */
+	uint32_t		rbr_hdh;		/* 4 bytes (31:0) */
+	uint32_t		resv_1[RX_DMA_MBOX_UNUSED_1];
+	uint32_t		rcrstat_c;		/* 4 bytes (31:0) */
+	uint32_t		rcrstat_b;		/* 4 bytes (31:0) */
+	rcrstat_a_t		rcrstat_a;		/* 8 bytes */
+	uint32_t		resv_2[RX_DMA_MBOX_UNUSED_2];
+} rxdma_mailbox_t, *p_rxdma_mailbox_t;
+
+
+
+typedef union _rx_disc_cnt_t {
+	uint64_t value;
+	struct {
+#if defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t res_1:15;
+			uint32_t oflow:1;
+			uint32_t count:16;
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t count:16;
+			uint32_t oflow:1;
+			uint32_t res_1:15;
+#endif
+		} ldw;
+#if !defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+	} bits;
+} rx_disc_cnt_t, *p_rx_disc_cnt_t;
+
+#define	RXMISC_DISCARD_REG		(DMC + 0x00090)
+
+#if OLD
+/*
+ * RBR Empty: If the RBR is empty or the prefetch buffer is empty,
+ * packets will be discarded (Each RBR has one).
+ * (16 channels, 0x200)
+ */
+#define	RDC_PRE_EMPTY_REG		(DMC + 0x000B0)
+#define	RDC_PRE_EMPTY_OFFSET(channel)	(RDC_PRE_EMPTY_REG + \
+						(DMC_OFFSET(channel))
+
+typedef union _rdc_pre_empty_t {
+	uint64_t value;
+	struct {
+#if defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t res_1:15;
+			uint32_t oflow:1;
+			uint32_t count:16;
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t count:16;
+			uint32_t oflow:1;
+			uint32_t res_1:15;
+#endif
+		} ldw;
+#if !defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+	} bits;
+} rdc_pre_empty_t, *p_rdc_pre_empty_t;
+#endif
+
+
+#define	FZC_DMC_REG_SIZE		0x20
+#define	FZC_DMC_OFFSET(channel)		(FZC_DMC_REG_SIZE * channel)
+
+/* WRED discard count register (16, 0x40) */
+#define	RED_DIS_CNT_REG			(FZC_DMC + 0x30008)
+#define	RED_DMC_OFFSET(channel)		(0x40 * channel)
+#define	RDC_DIS_CNT_OFFSET(rdc)	(RED_DIS_CNT_REG + RED_DMC_OFFSET(rdc))
+
+typedef union _red_disc_cnt_t {
+	uint64_t value;
+	struct {
+#if defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t res_1:15;
+			uint32_t oflow:1;
+			uint32_t count:16;
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t count:16;
+			uint32_t oflow:1;
+			uint32_t res_1:15;
+#endif
+		} ldw;
+#if !defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+	} bits;
+} red_disc_cnt_t, *p_red_disc_cnt_t;
+
+
+#define	RDMC_PRE_PAR_ERR_REG			(FZC_DMC + 0x00078)
+#define	RDMC_SHA_PAR_ERR_REG			(FZC_DMC + 0x00080)
+
+typedef union _rdmc_par_err_log {
+	uint64_t value;
+	struct {
+#if defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t res_1:16;
+			uint32_t err:1;
+			uint32_t merr:1;
+			uint32_t res:6;
+			uint32_t addr:8;
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t addr:8;
+			uint32_t res:6;
+			uint32_t merr:1;
+			uint32_t err:1;
+			uint32_t res_1:16;
+#endif
+		} ldw;
+#if !defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+	} bits;
+} rdmc_par_err_log_t, *p_rdmc_par_err_log_t;
+
+
+/* Used for accessing RDMC Memory */
+#define	RDMC_MEM_ADDR_REG			(FZC_DMC + 0x00088)
+
+
+typedef union _rdmc_mem_addr {
+	uint64_t value;
+	struct {
+#if defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+
+#define	RDMC_MEM_ADDR_PREFETCH 0
+#define	RDMC_MEM_ADDR_SHADOW 1
+
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t res_1:23;
+			uint32_t pre_shad:1;
+			uint32_t addr:8;
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t addr:8;
+			uint32_t pre_shad:1;
+			uint32_t res_1:23;
+#endif
+		} ldw;
+#if !defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+	} bits;
+} rdmc_mem_addr_t, *p_rdmc_mem_addr_t;
+
+
+#define	RDMC_MEM_DATA0_REG			(FZC_DMC + 0x00090)
+#define	RDMC_MEM_DATA1_REG			(FZC_DMC + 0x00098)
+#define	RDMC_MEM_DATA2_REG			(FZC_DMC + 0x000A0)
+#define	RDMC_MEM_DATA3_REG			(FZC_DMC + 0x000A8)
+#define	RDMC_MEM_DATA4_REG			(FZC_DMC + 0x000B0)
+
+typedef union _rdmc_mem_data {
+	uint64_t value;
+	struct {
+#if defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t data;
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t data;
+#endif
+		} ldw;
+#if !defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+	} bits;
+} rdmc_mem_data_t, *p_rdmc_mem_data_t;
+
+
+typedef union _rdmc_mem_access {
+#define	RDMC_MEM_READ 1
+#define	RDMC_MEM_WRITE 2
+	uint32_t data[5];
+	uint8_t addr;
+	uint8_t location;
+} rdmc_mem_access_t, *p_rdmc_mem_access_t;
+
+
+#define	RX_CTL_DAT_FIFO_STAT_REG			(FZC_DMC + 0x000B8)
+#define	RX_CTL_DAT_FIFO_MASK_REG			(FZC_DMC + 0x000C0)
+#define	RX_CTL_DAT_FIFO_STAT_DBG_REG		(FZC_DMC + 0x000D0)
+
+typedef union _rx_ctl_dat_fifo {
+#define	FIFO_EOP_PORT0 0x1
+#define	FIFO_EOP_PORT1 0x2
+#define	FIFO_EOP_PORT2 0x4
+#define	FIFO_EOP_PORT3 0x8
+#define	FIFO_EOP_ALL 0xF
+	uint64_t value;
+	struct {
+#if defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t res_1:23;
+			uint32_t id_mismatch:1;
+			uint32_t zcp_eop_err:4;
+			uint32_t ipp_eop_err:4;
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t ipp_eop_err:4;
+			uint32_t zcp_eop_err:4;
+			uint32_t id_mismatch:1;
+			uint32_t res_1:23;
+#endif
+		} ldw;
+#if !defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+	} bits;
+} rx_ctl_dat_fifo_mask_t, rx_ctl_dat_fifo_stat_t,
+	rx_ctl_dat_fifo_stat_dbg_t, *p_rx_ctl_dat_fifo_t;
+
+
+
+#define	RDMC_TRAINING_VECTOR_REG		(FZC_DMC + 0x000C8)
+
+typedef union _rx_training_vect {
+	uint64_t value;
+	struct {
+#if defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+		struct {
+			uint32_t tv;
+		} ldw;
+#if !defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+	} bits;
+} rx_training_vect_t, *p_rx_training_vect_t;
+
+#define	RXCTL_IPP_EOP_ERR_MASK	0x0000000FULL
+#define	RXCTL_IPP_EOP_ERR_SHIFT	0x0
+#define	RXCTL_ZCP_EOP_ERR_MASK	0x000000F0ULL
+#define	RXCTL_ZCP_EOP_ERR_SHIFT	0x4
+#define	RXCTL_ID_MISMATCH_MASK	0x00000100ULL
+#define	RXCTL_ID_MISMATCH_SHIFT	0x8
+
+
+/*
+ * Receive Packet Header Format
+ * Packet header before the packet.
+ * The minimum is 2 bytes and the max size is 18 bytes.
+ */
+/*
+ * Packet header format 0 (2 bytes).
+ */
+typedef union _rx_pkt_hdr0_t {
+	uint16_t value;
+	struct {
+#if	defined(_BIT_FIELDS_HTOL)
+		uint16_t inputport:2;
+		uint16_t maccheck:1;
+		uint16_t class:5;
+		uint16_t vlan:1;
+		uint16_t llcsnap:1;
+		uint16_t noport:1;
+		uint16_t badip:1;
+		uint16_t tcamhit:1;
+		uint16_t tres:2;
+		uint16_t tzfvld:1;
+#elif defined(_BIT_FIELDS_LTOH)
+		uint16_t tzfvld:1;
+		uint16_t tres:2;
+		uint16_t tcamhit:1;
+		uint16_t badip:1;
+		uint16_t noport:1;
+		uint16_t llcsnap:1;
+		uint16_t vlan:1;
+		uint16_t class:5;
+		uint16_t maccheck:1;
+		uint16_t inputport:2;
+#endif
+	} bits;
+} rx_pkt_hdr0_t, *p_rx_pkt_hdr0_t;
+
+
+/*
+ * Packet header format 1.
+ */
+typedef union _rx_pkt_hdr1_b0_t {
+	uint8_t value;
+	struct  {
+#if	defined(_BIT_FIELDS_HTOL)
+		uint8_t hwrsvd:8;
+#elif defined(_BIT_FIELDS_LTOH)
+		uint8_t hwrsvd:8;
+#endif
+	} bits;
+} rx_pkt_hdr1_b0_t, *p_rx_pkt_hdr1_b0_t;
+
+typedef union _rx_pkt_hdr1_b1_t {
+	uint8_t value;
+	struct  {
+#if	defined(_BIT_FIELDS_HTOL)
+		uint8_t tcammatch:8;
+#elif defined(_BIT_FIELDS_LTOH)
+		uint8_t tcammatch:8;
+#endif
+	} bits;
+} rx_pkt_hdr1_b1_t, *p_rx_pkt_hdr1_b1_t;
+
+typedef union _rx_pkt_hdr1_b2_t {
+	uint8_t value;
+	struct  {
+#if	defined(_BIT_FIELDS_HTOL)
+		uint8_t resv:2;
+		uint8_t hashhit:1;
+		uint8_t exact:1;
+		uint8_t hzfvld:1;
+		uint8_t hashidx:3;
+#elif defined(_BIT_FIELDS_LTOH)
+		uint8_t hashidx:3;
+		uint8_t hzfvld:1;
+		uint8_t exact:1;
+		uint8_t hashhit:1;
+		uint8_t resv:2;
+#endif
+	} bits;
+} rx_pkt_hdr1_b2_t, *p_rx_pkt_hdr1_b2_t;
+
+typedef union _rx_pkt_hdr1_b3_t {
+	uint8_t value;
+	struct  {
+#if	defined(_BIT_FIELDS_HTOL)
+		uint8_t zc_resv:8;
+#elif defined(_BIT_FIELDS_LTOH)
+		uint8_t zc_resv:8;
+#endif
+	} bits;
+} rx_pkt_hdr1_b3_t, *p_rx_pkt_hdr1_b3_t;
+
+typedef union _rx_pkt_hdr1_b4_t {
+	uint8_t value;
+	struct  {
+#if	defined(_BIT_FIELDS_HTOL)
+		uint8_t resv:4;
+		uint8_t zflowid:4;
+#elif defined(_BIT_FIELDS_LTOH)
+		uint8_t zflowid:4;
+		uint8_t resv:4;
+#endif
+	} bits;
+} rx_pkt_hdr1_b4_t, *p_rx_pkt_hdr1_b4_t;
+
+typedef union _rx_pkt_hdr1_b5_t {
+	uint8_t value;
+	struct  {
+#if	defined(_BIT_FIELDS_HTOL)
+		uint8_t zflowid:8;
+#elif defined(_BIT_FIELDS_LTOH)
+		uint8_t zflowid:8;
+#endif
+	} bits;
+} rx_pkt_hdr1_b5_t, *p_rx_pkt_hdr1_b5_t;
+
+typedef union _rx_pkt_hdr1_b6_t {
+	uint8_t value;
+	struct  {
+#if	defined(_BIT_FIELDS_HTOL)
+		uint8_t hashval2:8;
+#elif defined(_BIT_FIELDS_LTOH)
+		uint8_t hashval2:8;
+#endif
+	} bits;
+} rx_pkt_hdr1_b6_t, *p_rx_pkt_hdr1_b6_t;
+
+typedef union _rx_pkt_hdr1_b7_t {
+	uint8_t value;
+	struct  {
+#if	defined(_BIT_FIELDS_HTOL)
+		uint8_t hashval2:8;
+#elif defined(_BIT_FIELDS_LTOH)
+		uint8_t hashval2:8;
+#endif
+	} bits;
+} rx_pkt_hdr1_b7_t, *p_rx_pkt_hdr1_b7_t;
+
+typedef union _rx_pkt_hdr1_b8_t {
+	uint8_t value;
+	struct  {
+#if defined(_BIT_FIELDS_HTOL)
+		uint8_t resv:4;
+		uint8_t h1:4;
+#elif defined(_BIT_FIELDS_LTOH)
+		uint8_t h1:4;
+		uint8_t resv:4;
+#endif
+	} bits;
+} rx_pkt_hdr1_b8_t, *p_rx_pkt_hdr1_b8_t;
+
+typedef union _rx_pkt_hdr1_b9_t {
+	uint8_t value;
+	struct  {
+#if defined(_BIT_FIELDS_HTOL)
+		uint8_t h1:8;
+#elif defined(_BIT_FIELDS_LTOH)
+		uint8_t h1:8;
+#endif
+	} bits;
+} rx_pkt_hdr1_b9_t, *p_rx_pkt_hdr1_b9_t;
+
+typedef union _rx_pkt_hdr1_b10_t {
+	uint8_t value;
+	struct  {
+#if defined(_BIT_FIELDS_HTOL)
+		uint8_t resv:4;
+		uint8_t h1:4;
+#elif defined(_BIT_FIELDS_LTOH)
+		uint8_t h1:4;
+		uint8_t resv:4;
+#endif
+	} bits;
+} rx_pkt_hdr1_b10_t, *p_rx_pkt_hdr1_b10_t;
+
+typedef union _rx_pkt_hdr1_b11_b12_t {
+	uint16_t value;
+	struct {
+#if	defined(_BIT_FIELDS_HTOL)
+		uint16_t h1_1:8;
+		uint16_t h1_2:8;
+#elif defined(_BIT_FIELDS_LTOH)
+		uint16_t h1_2:8;
+		uint16_t h1_1:8;
+#endif
+	} bits;
+} rx_pkt_hdr1_b11_b12_t, *p_rx_pkt_hdr1_b11_b12_t;
+
+typedef union _rx_pkt_hdr1_b13_t {
+	uint8_t value;
+	struct  {
+#if defined(_BIT_FIELDS_HTOL)
+		uint8_t usr_data:8;
+#elif defined(_BIT_FIELDS_LTOH)
+		uint8_t usr_data:8;
+#endif
+	} bits;
+} rx_pkt_hdr1_b13_t, *p_rx_pkt_hdr1_b13_t;
+
+typedef union _rx_pkt_hdr1_b14_b17_t {
+	uint32_t value;
+	struct  {
+#if defined(_BIT_FIELDS_HTOL)
+		uint32_t usr_data_1:8;
+		uint32_t usr_data_2:8;
+		uint32_t usr_data_3:8;
+		uint32_t usr_data_4:8;
+#elif defined(_BIT_FIELDS_LTOH)
+		uint32_t usr_data_4:8;
+		uint32_t usr_data_3:8;
+		uint32_t usr_data_2:8;
+		uint32_t usr_data_1:8;
+#endif
+	} bits;
+} rx_pkt_hdr1_b14_b17_t, *p_rx_pkt_hdr1_b14_b17_t;
+
+/* Receive packet header 1 format (18 bytes) */
+typedef struct _rx_pkt_hdr_t {
+	rx_pkt_hdr1_b0_t		rx_hdr1_b0;
+	rx_pkt_hdr1_b1_t		rx_hdr1_b1;
+	rx_pkt_hdr1_b2_t		rx_hdr1_b2;
+	rx_pkt_hdr1_b3_t		rx_hdr1_b3;
+	rx_pkt_hdr1_b4_t		rx_hdr1_b4;
+	rx_pkt_hdr1_b5_t		rx_hdr1_b5;
+	rx_pkt_hdr1_b6_t		rx_hdr1_b6;
+	rx_pkt_hdr1_b7_t		rx_hdr1_b7;
+	rx_pkt_hdr1_b8_t		rx_hdr1_b8;
+	rx_pkt_hdr1_b9_t		rx_hdr1_b9;
+	rx_pkt_hdr1_b10_t		rx_hdr1_b10;
+	rx_pkt_hdr1_b11_b12_t		rx_hdr1_b11_b12;
+	rx_pkt_hdr1_b13_t		rx_hdr1_b13;
+	rx_pkt_hdr1_b14_b17_t		rx_hdr1_b14_b17;
+} rx_pkt_hdr1_t, *p_rx_pkt_hdr1_t;
+
+
+#ifdef	__cplusplus
+}
+#endif
+
+#endif	/* _SYS_NXGE_NXGE_RXDMA_HW_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/uts/common/sys/nxge/nxge_sr_hw.h	Mon Mar 19 19:37:22 2007 -0700
@@ -0,0 +1,793 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef	_SYS_NXGE_NXGE_SR_HW_H
+#define	_SYS_NXGE_NXGE_SR_HW_H
+
+#pragma ident	"%Z%%M%	%I%	%E% SMI"
+
+#ifdef	__cplusplus
+extern "C" {
+#endif
+
+#define	ESR_NEPTUNE_DEV_ADDR	0x1E
+#define	ESR_NEPTUNE_BASE	0
+#define	ESR_PORT_ADDR_BASE	0
+#define	PCISR_DEV_ADDR		0x1E
+#define	PCISR_BASE		0
+#define	PCISR_PORT_ADDR_BASE	2
+
+#define	PB	0
+
+#define	SR_RX_TX_COMMON_CONTROL	PB + 0x000
+#define	SR_RX_TX_RESET_CONTROL	PB + 0x004
+#define	SR_RX_POWER_CONTROL	PB + 0x008
+#define	SR_TX_POWER_CONTROL	PB + 0x00C
+#define	SR_MISC_POWER_CONTROL	PB + 0x010
+#define	SR_RX_TX_CONTROL_A	PB + 0x100
+#define	SR_RX_TX_TUNING_A	PB + 0x104
+#define	SR_RX_SYNCCHAR_A	PB + 0x108
+#define	SR_RX_TX_TEST_A		PB + 0x10C
+#define	SR_GLUE_CONTROL0_A	PB + 0x110
+#define	SR_GLUE_CONTROL1_A	PB + 0x114
+#define	SR_RX_TX_CONTROL_B	PB + 0x120
+#define	SR_RX_TX_TUNING_B	PB + 0x124
+#define	SR_RX_SYNCCHAR_B	PB + 0x128
+#define	SR_RX_TX_TEST_B		PB + 0x12C
+#define	SR_GLUE_CONTROL0_B	PB + 0x130
+#define	SR_GLUE_CONTROL1_B	PB + 0x134
+#define	SR_RX_TX_CONTROL_C	PB + 0x140
+#define	SR_RX_TX_TUNING_C	PB + 0x144
+#define	SR_RX_SYNCCHAR_C	PB + 0x148
+#define	SR_RX_TX_TEST_C		PB + 0x14C
+#define	SR_GLUE_CONTROL0_C	PB + 0x150
+#define	SR_GLUE_CONTROL1_C	PB + 0x154
+#define	SR_RX_TX_CONTROL_D	PB + 0x160
+#define	SR_RX_TX_TUNING_D	PB + 0x164
+#define	SR_RX_SYNCCHAR_D	PB + 0x168
+#define	SR_RX_TX_TEST_D		PB + 0x16C
+#define	SR_GLUE_CONTROL0_D	PB + 0x170
+#define	SR_GLUE_CONTROL1_D	PB + 0x174
+#define	SR_RX_TX_TUNING_1_A	PB + 0x184
+#define	SR_RX_TX_TUNING_1_B	PB + 0x1A4
+#define	SR_RX_TX_TUNING_1_C	PB + 0x1C4
+#define	SR_RX_TX_TUNING_1_D	PB + 0x1E4
+#define	SR_RX_TX_TUNING_2_A	PB + 0x204
+#define	SR_RX_TX_TUNING_2_B	PB + 0x224
+#define	SR_RX_TX_TUNING_2_C	PB + 0x244
+#define	SR_RX_TX_TUNING_2_D	PB + 0x264
+#define	SR_RX_TX_TUNING_3_A	PB + 0x284
+#define	SR_RX_TX_TUNING_3_B	PB + 0x2A4
+#define	SR_RX_TX_TUNING_3_C	PB + 0x2C4
+#define	SR_RX_TX_TUNING_3_D	PB + 0x2E4
+
+/*
+ * Shift right by 1 because the PRM requires that all the serdes register
+ * address be divided by 2
+ */
+#define	ESR_NEP_RX_TX_COMMON_CONTROL_L_ADDR()	(ESR_NEPTUNE_BASE +\
+						(SR_RX_TX_COMMON_CONTROL >> 1))
+#define	ESR_NEP_RX_TX_COMMON_CONTROL_H_ADDR()	(ESR_NEPTUNE_BASE +\
+						(SR_RX_TX_COMMON_CONTROL >> 1)\
+						+ 1)
+#define	ESR_NEP_RX_TX_RESET_CONTROL_L_ADDR()	(ESR_NEPTUNE_BASE +\
+						(SR_RX_TX_RESET_CONTROL >> 1))
+#define	ESR_NEP_RX_TX_RESET_CONTROL_H_ADDR()	(ESR_NEPTUNE_BASE +\
+						(SR_RX_TX_RESET_CONTROL >> 1)\
+						+ 1)
+#define	ESR_NEP_RX_POWER_CONTROL_L_ADDR()	(ESR_NEPTUNE_BASE +\
+						(SR_RX_POWER_CONTROL >> 1))
+#define	ESR_NEP_RX_POWER_CONTROL_H_ADDR()	(ESR_NEPTUNE_BASE +\
+						(SR_RX_POWER_CONTROL >> 1) + 1)
+#define	ESR_NEP_TX_POWER_CONTROL_L_ADDR()	(ESR_NEPTUNE_BASE +\
+						(SR_TX_POWER_CONTROL >> 1))
+#define	ESR_NEP_TX_POWER_CONTROL_H_ADDR()	(ESR_NEPTUNE_BASE +\
+						(SR_TX_POWER_CONTROL >> 1) + 1)
+#define	ESR_NEP_MISC_POWER_CONTROL_L_ADDR()	(ESR_NEPTUNE_BASE +\
+						(SR_MISC_POWER_CONTROL >> 1))
+#define	ESR_NEP_MISC_POWER_CONTROL_H_ADDR()	(ESR_NEPTUNE_BASE +\
+						(SR_MISC_POWER_CONTROL >> 1)\
+						+ 1)
+#define	ESR_NEP_RX_TX_CONTROL_L_ADDR(chan)	((ESR_NEPTUNE_BASE +\
+						SR_RX_TX_CONTROL_A +\
+						(chan * 0x20)) >> 1)
+#define	ESR_NEP_RX_TX_CONTROL_H_ADDR(chan)	((ESR_NEPTUNE_BASE +\
+						SR_RX_TX_CONTROL_A +\
+						(chan * 0x20)) >> 1) + 1
+#define	ESR_NEP_RX_TX_TUNING_L_ADDR(chan)	((ESR_NEPTUNE_BASE +\
+						SR_RX_TX_TUNING_A +\
+						(chan * 0x20)) >> 1)
+#define	ESR_NEP_RX_TX_TUNING_H_ADDR(chan)	((ESR_NEPTUNE_BASE +\
+						SR_RX_TX_TUNING_A +\
+						(chan * 0x20)) >> 1) + 1
+#define	ESR_NEP_RX_TX_SYNCCHAR_L_ADDR(chan)	((ESR_NEPTUNE_BASE +\
+						SR_RX_SYNCCHAR_A +\
+						(chan * 0x20)) >> 1)
+#define	ESR_NEP_RX_TX_SYNCCHAR_H_ADDR(chan)	((ESR_NEPTUNE_BASE +\
+						SR_RX_SYNCCHAR_A +\
+						(chan * 0x20)) >> 1) + 1
+#define	ESR_NEP_RX_TX_TEST_L_ADDR(chan)		((ESR_NEPTUNE_BASE +\
+						SR_RX_TX_TEST_A +\
+						(chan * 0x20)) >> 1)
+#define	ESR_NEP_RX_TX_TEST_H_ADDR(chan)		((ESR_NEPTUNE_BASE +\
+						SR_RX_TX_TEST_A +\
+						(chan * 0x20)) >> 1) + 1
+#define	ESR_NEP_GLUE_CONTROL0_L_ADDR(chan)	((ESR_NEPTUNE_BASE +\
+						SR_GLUE_CONTROL0_A +\
+						(chan * 0x20)) >> 1)
+#define	ESR_NEP_GLUE_CONTROL0_H_ADDR(chan)	((ESR_NEPTUNE_BASE +\
+						SR_GLUE_CONTROL0_A +\
+						(chan * 0x20)) >> 1) + 1
+#define	ESR_NEP_GLUE_CONTROL1_L_ADDR(chan)	((ESR_NEPTUNE_BASE +\
+						SR_GLUE_CONTROL1_A +\
+						(chan * 0x20)) >> 1)
+#define	ESR_NEP_GLUE_CONTROL1_H_ADDR(chan)	((ESR_NEPTUNE_BASE +\
+						SR_GLUE_CONTROL1_A +\
+						(chan * 0x20)) >> 1) + 1
+#define	ESR_NEP_RX_TX_TUNING_1_L_ADDR(chan)	((ESR_NEPTUNE_BASE +\
+						SR_RX_TX_TUNING_1_A +\
+						(chan * 0x20)) >> 1)
+#define	ESR_NEP_RX_TX_TUNING_1_H_ADDR(chan)	((ESR_NEPTUNE_BASE +\
+						SR_RX_TX_TUNING_1_A +\
+						(chan * 0x20)) >> 1) + 1
+#define	ESR_NEP_RX_TX_TUNING_2_L_ADDR(chan)	((ESR_NEPTUNE_BASE +\
+						SR_RX_TX_TUNING_2_A +\
+						(chan * 0x20)) >> 1)
+#define	ESR_NEP_RX_TX_TUNING_2_H_ADDR(chan)	((ESR_NEPTUNE_BASE +\
+						SR_RX_TX_TUNING_2_A +\
+						(chan * 0x20)) >> 1) + 1
+#define	ESR_NEP_RX_TX_TUNING_3_L_ADDR(chan)	((ESR_NEPTUNE_BASE +\
+						SR_RX_TX_TUNING_3_A +\
+						(chan * 0x20)) >> 1)
+#define	ESR_NEP_RX_TX_TUNING_3_H_ADDR(chan)	((ESR_NEPTUNE_BASE +\
+						SR_RX_TX_TUNING_3_A +\
+						(chan * 0x20)) >> 1) + 1
+
+typedef	union _sr_rx_tx_common_ctrl_l {
+	uint16_t value;
+	struct {
+#if defined(_BIT_FIELDS_HTOL)
+		uint16_t res3		: 3;
+		uint16_t refclkr_freq	: 5;
+		uint16_t res4		: 8;
+#elif defined(_BIT_FIELDS_LTOH)
+		uint16_t res4		: 8;
+		uint16_t refclkr_freq	: 5;
+		uint16_t res3		: 3;
+#else
+#error one of _BIT_FIELDS_HTOL or _BIT_FIELDS_LTOH must be defined
+#endif
+	} bits;
+} sr_rx_tx_common_ctrl_l;
+
+typedef	union _sr_rx_tx_common_ctrl_h {
+	uint16_t value;
+	struct {
+#if defined(_BIT_FIELDS_HTOL)
+		uint16_t res1		: 5;
+		uint16_t tdmaster	: 3;
+		uint16_t tp		: 2;
+		uint16_t tz		: 2;
+		uint16_t res2		: 2;
+		uint16_t revlbrefsel	: 2;
+#elif defined(_BIT_FIELDS_LTOH)
+		uint16_t revlbrefsel	: 2;
+		uint16_t res2		: 2;
+		uint16_t tz		: 2;
+		uint16_t tp		: 2;
+		uint16_t tdmaster	: 3;
+		uint16_t res1		: 5;
+#else
+#error one of _BIT_FIELDS_HTOL or _BIT_FIELDS_LTOH must be defined
+#endif
+	} bits;
+} sr_rx_tx_common_ctrl_h;
+
+
+/* RX TX Common Control Register field values */
+
+#define	TDMASTER_LANE_A		0
+#define	TDMASTER_LANE_B		1
+#define	TDMASTER_LANE_C		2
+#define	TDMASTER_LANE_D		3
+
+#define	REVLBREFSEL_GBT_RBC_A_O		0
+#define	REVLBREFSEL_GBT_RBC_B_O		1
+#define	REVLBREFSEL_GBT_RBC_C_O		2
+#define	REVLBREFSEL_GBT_RBC_D_O		3
+
+#define	REFCLKR_FREQ_SIM		0
+#define	REFCLKR_FREQ_53_125		0x1
+#define	REFCLKR_FREQ_62_5		0x3
+#define	REFCLKR_FREQ_70_83		0x4
+#define	REFCLKR_FREQ_75			0x5
+#define	REFCLKR_FREQ_78_125		0x6
+#define	REFCLKR_FREQ_79_6875		0x7
+#define	REFCLKR_FREQ_83_33		0x8
+#define	REFCLKR_FREQ_85			0x9
+#define	REFCLKR_FREQ_100		0xA
+#define	REFCLKR_FREQ_104_17		0xB
+#define	REFCLKR_FREQ_106_25		0xC
+#define	REFCLKR_FREQ_120		0xF
+#define	REFCLKR_FREQ_125		0x10
+#define	REFCLKR_FREQ_127_5		0x11
+#define	REFCLKR_FREQ_141_67		0x13
+#define	REFCLKR_FREQ_150		0x15
+#define	REFCLKR_FREQ_156_25		0x16
+#define	REFCLKR_FREQ_159_375		0x17
+#define	REFCLKR_FREQ_170		0x19
+#define	REFCLKR_FREQ_212_5		0x1E
+
+typedef	union _sr_rx_tx_reset_ctrl_l {
+	uint16_t value;
+	struct {
+#if defined(_BIT_FIELDS_HTOL)
+		uint16_t rxreset_0a	: 1;
+		uint16_t rxreset_0b	: 1;
+		uint16_t rxreset_0c	: 1;
+		uint16_t rxreset_0d	: 1;
+		uint16_t rxreset_1a	: 1;
+		uint16_t rxreset_1b	: 1;
+		uint16_t rxreset_1c	: 1;
+		uint16_t rxreset_1d	: 1;
+		uint16_t rxreset_2a	: 1;
+		uint16_t rxreset_2b	: 1;
+		uint16_t rxreset_2c	: 1;
+		uint16_t rxreset_2d	: 1;
+		uint16_t rxreset_3a	: 1;
+		uint16_t rxreset_3b	: 1;
+		uint16_t rxreset_3c	: 1;
+		uint16_t rxreset_3d	: 1;
+#elif defined(_BIT_FIELDS_LTOH)
+		uint16_t rxreset_3d	: 1;
+		uint16_t rxreset_3c	: 1;
+		uint16_t rxreset_3b	: 1;
+		uint16_t rxreset_3a	: 1;
+		uint16_t rxreset_2d	: 1;
+		uint16_t rxreset_2c	: 1;
+		uint16_t rxreset_2b	: 1;
+		uint16_t rxreset_2a	: 1;
+		uint16_t rxreset_1d	: 1;
+		uint16_t rxreset_1c	: 1;
+		uint16_t rxreset_1b	: 1;
+		uint16_t rxreset_1a	: 1;
+		uint16_t rxreset_0d	: 1;
+		uint16_t rxreset_0c	: 1;
+		uint16_t rxreset_0b	: 1;
+		uint16_t rxreset_0a	: 1;
+#else
+#error one of _BIT_FIELDS_HTOL or _BIT_FIELDS_LTOH must be defined
+#endif
+	} bits;
+} sr_rx_tx_reset_ctrl_l;
+
+
+typedef	union _sr_rx_tx_reset_ctrl_h {
+	uint16_t value;
+	struct {
+#if defined(_BIT_FIELDS_HTOL)
+		uint16_t txreset_0a	: 1;
+		uint16_t txreset_0b	: 1;
+		uint16_t txreset_0c	: 1;
+		uint16_t txreset_0d	: 1;
+		uint16_t txreset_1a	: 1;
+		uint16_t txreset_1b	: 1;
+		uint16_t txreset_1c	: 1;
+		uint16_t txreset_1d	: 1;
+		uint16_t txreset_2a	: 1;
+		uint16_t txreset_2b	: 1;
+		uint16_t txreset_2c	: 1;
+		uint16_t txreset_2d	: 1;
+		uint16_t txreset_3a	: 1;
+		uint16_t txreset_3b	: 1;
+		uint16_t txreset_3c	: 1;
+		uint16_t txreset_3d	: 1;
+#elif defined(_BIT_FIELDS_LTOH)
+		uint16_t txreset_3d	: 1;
+		uint16_t txreset_3c	: 1;
+		uint16_t txreset_3b	: 1;
+		uint16_t txreset_3a	: 1;
+		uint16_t txreset_2d	: 1;
+		uint16_t txreset_2c	: 1;
+		uint16_t txreset_2b	: 1;
+		uint16_t txreset_2a	: 1;
+		uint16_t txreset_1d	: 1;
+		uint16_t txreset_1c	: 1;
+		uint16_t txreset_1b	: 1;
+		uint16_t txreset_1a	: 1;
+		uint16_t txreset_0d	: 1;
+		uint16_t txreset_0c	: 1;
+		uint16_t txreset_0b	: 1;
+		uint16_t txreset_0a	: 1;
+#else
+#error one of _BIT_FIELDS_HTOL or _BIT_FIELDS_LTOH must be defined
+#endif
+	} bits;
+} sr_rx_tx_reset_ctrl_h;
+
+typedef	union _sr_rx_power_ctrl_l {
+	uint16_t value;
+	struct {
+#if defined(_BIT_FIELDS_HTOL)
+		uint16_t pdrxlos_0a	: 1;
+		uint16_t pdrxlos_0b	: 1;
+		uint16_t pdrxlos_0c	: 1;
+		uint16_t pdrxlos_0d	: 1;
+		uint16_t pdrxlos_1a	: 1;
+		uint16_t pdrxlos_1b	: 1;
+		uint16_t pdrxlos_1c	: 1;
+		uint16_t pdrxlos_1d	: 1;
+		uint16_t pdrxlos_2a	: 1;
+		uint16_t pdrxlos_2b	: 1;
+		uint16_t pdrxlos_2c	: 1;
+		uint16_t pdrxlos_2d	: 1;
+		uint16_t pdrxlos_3a	: 1;
+		uint16_t pdrxlos_3b	: 1;
+		uint16_t pdrxlos_3c	: 1;
+		uint16_t pdrxlos_3d	: 1;
+#elif defined(_BIT_FIELDS_LTOH)
+		uint16_t pdrxlos_3d	: 1;
+		uint16_t pdrxlos_3c	: 1;
+		uint16_t pdrxlos_3b	: 1;
+		uint16_t pdrxlos_3a	: 1;
+		uint16_t pdrxlos_2d	: 1;
+		uint16_t pdrxlos_2c	: 1;
+		uint16_t pdrxlos_2b	: 1;
+		uint16_t pdrxlos_2a	: 1;
+		uint16_t pdrxlos_1d	: 1;
+		uint16_t pdrxlos_1c	: 1;
+		uint16_t pdrxlos_1b	: 1;
+		uint16_t pdrxlos_1a	: 1;
+		uint16_t pdrxlos_0d	: 1;
+		uint16_t pdrxlos_0c	: 1;
+		uint16_t pdrxlos_0b	: 1;
+		uint16_t pdrxlos_0a	: 1;
+#else
+#error one of _BIT_FIELDS_HTOL or _BIT_FIELDS_LTOH must be defined
+#endif
+	} bits;
+} sr_rx_power_ctrl_l_t;
+
+
+typedef	union _sr_rx_power_ctrl_h {
+	uint16_t value;
+	struct {
+#if defined(_BIT_FIELDS_HTOL)
+		uint16_t pdownr_0a	: 1;
+		uint16_t pdownr_0b	: 1;
+		uint16_t pdownr_0c	: 1;
+		uint16_t pdownr_0d	: 1;
+		uint16_t pdownr_1a	: 1;
+		uint16_t pdownr_1b	: 1;
+		uint16_t pdownr_1c	: 1;
+		uint16_t pdownr_1d	: 1;
+		uint16_t pdownr_2a	: 1;
+		uint16_t pdownr_2b	: 1;
+		uint16_t pdownr_2c	: 1;
+		uint16_t pdownr_2d	: 1;
+		uint16_t pdownr_3a	: 1;
+		uint16_t pdownr_3b	: 1;
+		uint16_t pdownr_3c	: 1;
+		uint16_t pdownr_3d	: 1;
+#elif defined(_BIT_FIELDS_LTOH)
+		uint16_t pdownr_3d	: 1;
+		uint16_t pdownr_3c	: 1;
+		uint16_t pdownr_3b	: 1;
+		uint16_t pdownr_3a	: 1;
+		uint16_t pdownr_2d	: 1;
+		uint16_t pdownr_2c	: 1;
+		uint16_t pdownr_2b	: 1;
+		uint16_t pdownr_2a	: 1;
+		uint16_t pdownr_1d	: 1;
+		uint16_t pdownr_1c	: 1;
+		uint16_t pdownr_1b	: 1;
+		uint16_t pdownr_1a	: 1;
+		uint16_t pdownr_0d	: 1;
+		uint16_t pdownr_0c	: 1;
+		uint16_t pdownr_0b	: 1;
+		uint16_t pdownr_0a	: 1;
+#else
+#error one of _BIT_FIELDS_HTOL or _BIT_FIELDS_LTOH must be defined
+#endif
+	} bits;
+} sr_rx_power_ctrl_h_t;
+
+typedef	union _sr_tx_power_ctrl_l {
+	uint16_t value;
+	struct {
+#if defined(_BIT_FIELDS_HTOL)
+		uint16_t res1		: 8;
+		uint16_t pdownppll0	: 1;
+		uint16_t pdownppll1	: 1;
+		uint16_t pdownppll2	: 1;
+		uint16_t pdownppll3	: 1;
+		uint16_t res2		: 4;
+#elif defined(_BIT_FIELDS_LTOH)
+		uint16_t res2		: 4;
+		uint16_t pdownppll3	: 1;
+		uint16_t pdownppll2	: 1;
+		uint16_t pdownppll1	: 1;
+		uint16_t pdownppll0	: 1;
+		uint16_t res1		: 8;
+#else
+#error one of _BIT_FIELDS_HTOL or _BIT_FIELDS_LTOH must be defined
+#endif
+	} bits;
+} sr_tx_power_ctrl_l_t;
+
+typedef	union _sr_tx_power_ctrl_h {
+	uint16_t value;
+	struct {
+#if defined(_BIT_FIELDS_HTOL)
+		uint16_t pdownt_0a	: 1;
+		uint16_t pdownt_0b	: 1;
+		uint16_t pdownt_0c	: 1;
+		uint16_t pdownt_0d	: 1;
+		uint16_t pdownt_1a	: 1;
+		uint16_t pdownt_1b	: 1;
+		uint16_t pdownt_1c	: 1;
+		uint16_t pdownt_1d	: 1;
+		uint16_t pdownt_2a	: 1;
+		uint16_t pdownt_2b	: 1;
+		uint16_t pdownt_2c	: 1;
+		uint16_t pdownt_2d	: 1;
+		uint16_t pdownt_3a	: 1;
+		uint16_t pdownt_3b	: 1;
+		uint16_t pdownt_3c	: 1;
+		uint16_t pdownt_3d	: 1;
+#elif defined(_BIT_FIELDS_LTOH)
+		uint16_t pdownt_3d	: 1;
+		uint16_t pdownt_3c	: 1;
+		uint16_t pdownt_3b	: 1;
+		uint16_t pdownt_3a	: 1;
+		uint16_t pdownt_2d	: 1;
+		uint16_t pdownt_2c	: 1;
+		uint16_t pdownt_2b	: 1;
+		uint16_t pdownt_2a	: 1;
+		uint16_t pdownt_1d	: 1;
+		uint16_t pdownt_1c	: 1;
+		uint16_t pdownt_1b	: 1;
+		uint16_t pdownt_1a	: 1;
+		uint16_t pdownt_0d	: 1;
+		uint16_t pdownt_0c	: 1;
+		uint16_t pdownt_0b	: 1;
+		uint16_t pdownt_0a	: 1;
+#else
+#error one of _BIT_FIELDS_HTOL or _BIT_FIELDS_LTOH must be defined
+#endif
+	} bits;
+} sr_tx_power_ctrl_h_t;
+
+typedef	union _sr_misc_power_ctrl_l {
+	uint16_t value;
+	struct {
+#if defined(_BIT_FIELDS_HTOL)
+		uint16_t res1		: 3;
+		uint16_t pdrtrim	: 1;
+		uint16_t pdownpecl0	: 1;
+		uint16_t pdownpecl1	: 1;
+		uint16_t pdownpecl2	: 1;
+		uint16_t pdownpecl3	: 1;
+		uint16_t pdownppll0	: 1;
+		uint16_t pdownppll1	: 1;
+		uint16_t pdownppll2	: 1;
+		uint16_t pdownppll3	: 1;
+		uint16_t res2		: 4;
+#elif defined(_BIT_FIELDS_LTOH)
+		uint16_t res2		: 4;
+		uint16_t pdownppll3	: 1;
+		uint16_t pdownppll2	: 1;
+		uint16_t pdownppll1	: 1;
+		uint16_t pdownppll0	: 1;
+		uint16_t pdownpecl3	: 1;
+		uint16_t pdownpecl2	: 1;
+		uint16_t pdownpecl1	: 1;
+		uint16_t pdownpecl0	: 1;
+		uint16_t pdrtrim	: 1;
+		uint16_t res1		: 3;
+#else
+#error one of _BIT_FIELDS_HTOL or _BIT_FIELDS_LTOH must be defined
+#endif
+	} bits;
+} sr_misc_power_ctrl_l_t;
+
+typedef	union _misc_power_ctrl_h {
+	uint16_t value;
+	struct {
+#if defined(_BIT_FIELDS_HTOL)
+		uint16_t pdclkout0	: 1;
+		uint16_t pdclkout1	: 1;
+		uint16_t pdclkout2	: 1;
+		uint16_t pdclkout3	: 1;
+		uint16_t res1		: 12;
+#elif defined(_BIT_FIELDS_LTOH)
+		uint16_t res1		: 12;
+		uint16_t pdclkout3	: 1;
+		uint16_t pdclkout2	: 1;
+		uint16_t pdclkout1	: 1;
+		uint16_t pdclkout0	: 1;
+#else
+#error one of _BIT_FIELDS_HTOL or _BIT_FIELDS_LTOH must be defined
+#endif
+	} bits;
+} misc_power_ctrl_h_t;
+
+typedef	union _sr_rx_tx_ctrl_l {
+	uint16_t value;
+	struct {
+#if defined(_BIT_FIELDS_HTOL)
+		uint16_t res1		: 2;
+		uint16_t rxpreswin	: 2;
+		uint16_t res2		: 1;
+		uint16_t risefall	: 3;
+		uint16_t res3		: 7;
+		uint16_t enstretch	: 1;
+#elif defined(_BIT_FIELDS_LTOH)
+		uint16_t enstretch	: 1;
+		uint16_t res3		: 7;
+		uint16_t risefall	: 3;
+		uint16_t res2		: 1;
+		uint16_t rxpreswin	: 2;
+		uint16_t res1		: 2;
+#else
+#error one of _BIT_FIELDS_HTOL or _BIT_FIELDS_LTOH must be defined
+#endif
+	} bits;
+} sr_rx_tx_ctrl_l_t;
+
+typedef	union _sr_rx_tx_ctrl_h {
+	uint16_t value;
+	struct {
+#if defined(_BIT_FIELDS_HTOL)
+		uint16_t biascntl	: 1;
+		uint16_t res1		: 5;
+		uint16_t tdenfifo	: 1;
+		uint16_t tdws20		: 1;
+		uint16_t vmuxlo		: 2;
+		uint16_t vpulselo	: 2;
+		uint16_t res2		: 4;
+#elif defined(_BIT_FIELDS_LTOH)
+		uint16_t res2		: 4;
+		uint16_t vpulselo	: 2;
+		uint16_t vmuxlo		: 2;
+		uint16_t tdws20		: 1;
+		uint16_t tdenfifo	: 1;
+		uint16_t res1		: 5;
+		uint16_t biascntl	: 1;
+#else
+#error one of _BIT_FIELDS_HTOL or _BIT_FIELDS_LTOH must be defined
+#endif
+	} bits;
+} sr_rx_tx_ctrl_h_t;
+
+#define	RXPRESWIN_52US_300BITTIMES	0
+#define	RXPRESWIN_53US_300BITTIMES	1
+#define	RXPRESWIN_54US_300BITTIMES	2
+#define	RXPRESWIN_55US_300BITTIMES	3
+
+typedef	union _sr_rx_tx_tuning_l {
+	uint16_t value;
+	struct {
+#if defined(_BIT_FIELDS_HTOL)
+		uint16_t rxeq		: 4;
+		uint16_t res1		: 12;
+#elif defined(_BIT_FIELDS_LTOH)
+		uint16_t res1		: 12;
+		uint16_t rxeq		: 4;
+#else
+#error one of _BIT_FIELDS_HTOL or _BIT_FIELDS_LTOH must be defined
+#endif
+	} bits;
+} sr_rx_tx_tuning_l_t;
+
+typedef	union _sr_rx_tx_tuning_h {
+	uint16_t value;
+	struct {
+#if defined(_BIT_FIELDS_HTOL)
+		uint16_t res1		: 8;
+		uint16_t rp		: 2;
+		uint16_t rz		: 2;
+		uint16_t vtxlo		: 4;
+#elif defined(_BIT_FIELDS_LTOH)
+		uint16_t vtxlo		: 4;
+		uint16_t rz		: 2;
+		uint16_t rp		: 2;
+		uint16_t res1		: 8;
+#else
+#error one of _BIT_FIELDS_HTOL or _BIT_FIELDS_LTOH must be defined
+#endif
+	} bits;
+} sr_rx_tx_tuning_h_t;
+
+typedef	union _sr_rx_syncchar_l {
+	uint16_t value;
+	struct {
+#if defined(_BIT_FIELDS_HTOL)
+		uint16_t syncchar_0_3	: 4;
+		uint16_t res1		: 2;
+		uint16_t syncmask	: 10;
+#elif defined(_BIT_FIELDS_LTOH)
+		uint16_t syncmask	: 10;
+		uint16_t res1		: 2;
+		uint16_t syncchar_0_3	: 4;
+#else
+#error one of _BIT_FIELDS_HTOL or _BIT_FIELDS_LTOH must be defined
+#endif
+	} bits;
+} sr_rx_syncchar_l_t;
+
+typedef	union _sr_rx_syncchar_h {
+	uint16_t value;
+	struct {
+#if defined(_BIT_FIELDS_HTOL)
+		uint16_t res1		: 1;
+		uint16_t syncpol	: 1;
+		uint16_t res2		: 8;
+		uint16_t syncchar_4_10	: 6;
+#elif defined(_BIT_FIELDS_LTOH)
+		uint16_t syncchar_4_10	: 6;
+		uint16_t res2		: 8;
+		uint16_t syncpol	: 1;
+		uint16_t res1		: 1;
+#else
+#error one of _BIT_FIELDS_HTOL or _BIT_FIELDS_LTOH must be defined
+#endif
+	} bits;
+} sr_rx_syncchar_h_t;
+
+typedef	union _sr_rx_tx_test_l {
+	uint16_t value;
+	struct {
+#if defined(_BIT_FIELDS_HTOL)
+		uint16_t res1		: 15;
+		uint16_t ref50		: 1;
+#elif defined(_BIT_FIELDS_LTOH)
+		uint16_t ref50		: 1;
+		uint16_t res1		: 15;
+#else
+#error one of _BIT_FIELDS_HTOL or _BIT_FIELDS_LTOH must be defined
+#endif
+	} bits;
+} sr_rx_tx_test_l_t;
+
+typedef	union _sr_rx_tx_test_h {
+	uint16_t value;
+	struct {
+#if defined(_BIT_FIELDS_HTOL)
+		uint16_t res1		: 5;
+		uint16_t selftest	: 3;
+		uint16_t res2		: 8;
+#elif defined(_BIT_FIELDS_LTOH)
+		uint16_t res2		: 8;
+		uint16_t selftest	: 3;
+		uint16_t res1		: 5;
+#else
+#error one of _BIT_FIELDS_HTOL or _BIT_FIELDS_LTOH must be defined
+#endif
+	} bits;
+} sr_rx_tx_test_h_t;
+
+typedef	union _sr_glue_ctrl0_l {
+	uint16_t value;
+	struct {
+#if defined(_BIT_FIELDS_HTOL)
+		uint16_t rxlos_test	: 1;
+		uint16_t res1		: 1;
+		uint16_t rxlosenable	: 1;
+		uint16_t fastresync	: 1;
+		uint16_t samplerate	: 4;
+		uint16_t thresholdcount	: 8;
+#elif defined(_BIT_FIELDS_LTOH)
+		uint16_t thresholdcount	: 8;
+		uint16_t samplerate	: 4;
+		uint16_t fastresync	: 1;
+		uint16_t rxlosenable	: 1;
+		uint16_t res1		: 1;
+		uint16_t rxlos_test	: 1;
+#else
+#error one of _BIT_FIELDS_HTOL or _BIT_FIELDS_LTOH must be defined
+#endif
+	} bits;
+} sr_glue_ctrl0_l_t;
+
+typedef	union _sr_glue_ctrl0_h {
+	uint16_t value;
+	struct {
+#if defined(_BIT_FIELDS_HTOL)
+		uint16_t res1		: 5;
+		uint16_t bitlocktime	: 3;
+		uint16_t res2		: 8;
+#elif defined(_BIT_FIELDS_LTOH)
+		uint16_t res2		: 8;
+		uint16_t bitlocktime	: 3;
+		uint16_t res1		: 5;
+#else
+#error one of _BIT_FIELDS_HTOL or _BIT_FIELDS_LTOH must be defined
+#endif
+	} bits;
+} sr_glue_ctrl0_h_t;
+
+#define	BITLOCKTIME_64_CYCLES		0
+#define	BITLOCKTIME_128_CYCLES		1
+#define	BITLOCKTIME_256_CYCLES		2
+#define	BITLOCKTIME_300_CYCLES		3
+#define	BITLOCKTIME_384_CYCLES		4
+#define	BITLOCKTIME_512_CYCLES		5
+#define	BITLOCKTIME_1024_CYCLES		6
+#define	BITLOCKTIME_2048_CYCLES		7
+
+typedef	union _sr_glue_ctrl1_l {
+	uint16_t value;
+	struct {
+#if defined(_BIT_FIELDS_HTOL)
+		uint16_t res1		: 14;
+		uint16_t inittime	: 2;
+#elif defined(_BIT_FIELDS_LTOH)
+		uint16_t inittime	: 2;
+		uint16_t res1		: 14;
+#else
+#error one of _BIT_FIELDS_HTOL or _BIT_FIELDS_LTOH must be defined
+#endif
+	} bits;
+} sr_glue_ctrl1_l_t;
+
+typedef	union glue_ctrl1_h {
+	uint16_t value;
+	struct {
+#if defined(_BIT_FIELDS_HTOL)
+		uint16_t termr_cfg	: 2;
+		uint16_t termt_cfg	: 2;
+		uint16_t rtrimen	: 2;
+		uint16_t res1		: 10;
+#elif defined(_BIT_FIELDS_LTOH)
+		uint16_t res1		: 10;
+		uint16_t rtrimen	: 2;
+		uint16_t termt_cfg	: 2;
+		uint16_t termr_cfg	: 2;
+#else
+#error one of _BIT_FIELDS_HTOL or _BIT_FIELDS_LTOH must be defined
+#endif
+	} bits;
+} glue_ctrl1_h_t;
+
+#define	TERM_CFG_67OHM		0
+#define	TERM_CFG_72OHM		1
+#define	TERM_CFG_80OHM		2
+#define	TERM_CFG_87OHM		3
+#define	TERM_CFG_46OHM		4
+#define	TERM_CFG_48OHM		5
+#define	TERM_CFG_52OHM		6
+#define	TERM_CFG_55OHM		7
+
+#define	INITTIME_60US		0
+#define	INITTIME_120US		1
+#define	INITTIME_240US		2
+#define	INITTIME_480US		3
+
+#ifdef	__cplusplus
+}
+#endif
+
+#endif	/* _SYS_NXGE_NXGE_SR_HW_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/uts/common/sys/nxge/nxge_txc.h	Mon Mar 19 19:37:22 2007 -0700
@@ -0,0 +1,83 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef	_SYS_NXGE_NXGE_TXC_H
+#define	_SYS_NXGE_NXGE_TXC_H
+
+#pragma ident	"%Z%%M%	%I%	%E% SMI"
+
+#ifdef	__cplusplus
+extern "C" {
+#endif
+
+#include <sys/nxge/nxge_txc_hw.h>
+#include <npi_txc.h>
+
+/* Suggested by hardware team 7/19/2006 */
+#define	TXC_DMA_MAX_BURST_DEFAULT	1530	/* Max burst used by DRR */
+
+typedef	struct _txc_errlog {
+	txc_ro_states_t		ro_st;
+	txc_sf_states_t		sf_st;
+} txc_errlog_t;
+
+typedef struct _nxge_txc_stats {
+	uint32_t		pkt_stuffed;
+	uint32_t		pkt_xmit;
+	uint32_t		ro_correct_err;
+	uint32_t		ro_uncorrect_err;
+	uint32_t		sf_correct_err;
+	uint32_t		sf_uncorrect_err;
+	uint32_t		address_failed;
+	uint32_t		dma_failed;
+	uint32_t		length_failed;
+	uint32_t		pkt_assy_dead;
+	uint32_t		reorder_err;
+	txc_errlog_t		errlog;
+} nxge_txc_stats_t, *p_nxge_txc_stats_t;
+
+typedef struct _nxge_txc {
+	uint32_t		dma_max_burst;
+	uint32_t		dma_length;
+	uint32_t		training;
+	uint8_t			debug_select;
+	uint64_t		control_status;
+	uint64_t		port_dma_list;
+	nxge_txc_stats_t	*txc_stats;
+} nxge_txc_t, *p_nxge_txc_t;
+
+/*
+ * Transmit Controller (TXC) prototypes.
+ */
+nxge_status_t nxge_txc_init(p_nxge_t);
+nxge_status_t nxge_txc_uninit(p_nxge_t);
+nxge_status_t nxge_txc_handle_sys_errors(p_nxge_t);
+void nxge_txc_inject_err(p_nxge_t, uint32_t);
+
+#ifdef	__cplusplus
+}
+#endif
+
+#endif	/* _SYS_NXGE_NXGE_TXC_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/uts/common/sys/nxge/nxge_txc_hw.h	Mon Mar 19 19:37:22 2007 -0700
@@ -0,0 +1,1270 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef	_SYS_NXGE_NXGE_TXC_HW_H
+#define	_SYS_NXGE_NXGE_TXC_HW_H
+
+#pragma ident	"%Z%%M%	%I%	%E% SMI"
+
+#ifdef	__cplusplus
+extern "C" {
+#endif
+
+#include <nxge_defs.h>
+
+/* Transmit Ring Scheduler Registers */
+#define	TXC_PORT_DMA_ENABLE_REG		(FZC_TXC + 0x20028)
+#define	TXC_PORT_DMA_LIST		0	/* RW bit 23:0 */
+#define	TXC_DMA_DMA_LIST_MASK		0x0000000000FFFFFFULL
+#define	TXC_DMA_DMA_LIST_MASK_N2	0x000000000000FFFFULL
+
+typedef union _txc_port_enable_t {
+	uint64_t value;
+	struct {
+#ifdef	_BIG_ENDIAN
+		uint32_t hdw;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t res:8;
+			uint32_t port_dma_list:24;
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t port_dma_list:24;
+			uint32_t res:8;
+#endif
+		} ldw;
+#ifndef _BIG_ENDIAN
+		uint32_t hdw;
+#endif
+	} bits;
+} txc_port_enable_t, *p_txc_port_enable_t;
+
+typedef union _txc_port_enable_n2_t {
+	uint64_t value;
+	struct {
+#ifdef	_BIG_ENDIAN
+		uint32_t hdw;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t res:16;
+			uint32_t port_dma_list:16;
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t port_dma_list:16;
+			uint32_t res:16;
+#endif
+		} ldw;
+#ifndef _BIG_ENDIAN
+		uint32_t hdw;
+#endif
+	} bits;
+} txc_port_enable_n2_t, *p_txc_port_enable_n2_t;
+
+/* Transmit Controller - Registers */
+#define	TXC_FZC_OFFSET			0x1000
+#define	TXC_FZC_PORT_OFFSET(port)	(port * TXC_FZC_OFFSET)
+#define	TXC_FZC_CHANNEL_OFFSET(channel)	(channel * TXC_FZC_OFFSET)
+#define	TXC_FZC_REG_CN_OFFSET(x, cn)	(x + TXC_FZC_CHANNEL_OFFSET(cn))
+
+#define	TXC_FZC_CONTROL_OFFSET		0x100
+#define	TXC_FZC_CNTL_PORT_OFFSET(port)	(port * TXC_FZC_CONTROL_OFFSET)
+#define	TXC_FZC_REG_PT_OFFSET(x, pt)	(x + TXC_FZC_CNTL_PORT_OFFSET(pt))
+
+#define	TXC_DMA_MAX_BURST_REG		(FZC_TXC + 0x00000)
+#define	TXC_DMA_MAX_BURST_SHIFT		0	/* RW bit 19:0 */
+#define	TXC_DMA_MAX_BURST_MASK		0x00000000000FFFFFULL
+
+#define	TXC_MAX_BURST_OFFSET(channel)	(TXC_DMA_MAX_BURST_REG + \
+					(channel * TXC_FZC_OFFSET))
+
+typedef union _txc_dma_max_burst_t {
+	uint64_t value;
+	struct {
+#ifdef	_BIG_ENDIAN
+		uint32_t hdw;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t res:12;
+			uint32_t dma_max_burst:20;
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t dma_max_burst:20;
+			uint32_t res:12;
+
+#endif
+		} ldw;
+#ifndef _BIG_ENDIAN
+		uint32_t hdw;
+#endif
+	} bits;
+} txc_dma_max_burst_t, *p_txc_dma_max_burst_t;
+
+/* DRR Performance Monitoring Register */
+#define	TXC_DMA_MAX_LENGTH_REG		(FZC_TXC + 0x00008)
+#define	TXC_DMA_MAX_LENGTH_SHIFT	/* RW bit 27:0 */
+#define	TXC_DMA_MAX_LENGTH_MASK		0x000000000FFFFFFFULL
+
+#define	TXC_DMA_MAX_LEN_OFFSET(channel)	(TXC_DMA_MAX_LENGTH_REG + \
+					(channel * TXC_FZC_OFFSET))
+
+typedef union _txc_dma_max_length_t {
+	uint64_t value;
+	struct {
+#ifdef	_BIG_ENDIAN
+		uint32_t hdw;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t res:4;
+			uint32_t dma_length:28;
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t dma_length:28;
+			uint32_t res:4;
+
+#endif
+		} ldw;
+#ifndef _BIG_ENDIAN
+		uint32_t hdw;
+#endif
+	} bits;
+} txc_dma_max_length_t, *p_txc_dma_max_length_t;
+
+
+#define	TXC_CONTROL_REG			(FZC_TXC + 0x20000)
+#define	TXC_DMA_LENGTH_SHIFT		0	/* RW bit 27:0 */
+#define	TXC_DMA_LENGTH_MASK		0x000000000FFFFFFFULL
+
+typedef union _txc_control_t {
+	uint64_t value;
+	struct {
+#ifdef	_BIG_ENDIAN
+		uint32_t hdw;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t res:27;
+			uint32_t txc_enabled:1;
+			uint32_t port3_enabled:1;
+			uint32_t port2_enabled:1;
+			uint32_t port1_enabled:1;
+			uint32_t port0_enabled:1;
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t port0_enabled:1;
+			uint32_t port1_enabled:1;
+			uint32_t port2_enabled:1;
+			uint32_t port3_enabled:1;
+			uint32_t txc_enabled:1;
+			uint32_t res:27;
+
+#endif
+		} ldw;
+#ifndef _BIG_ENDIAN
+		uint32_t hdw;
+#endif
+	} bits;
+} txc_control_t, *p_txc_control_t;
+
+typedef union _txc_control_n2_t {
+	uint64_t value;
+	struct {
+#ifdef	_BIG_ENDIAN
+		uint32_t hdw;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t res:27;
+			uint32_t txc_enabled:1;
+			uint32_t res1:2;
+			uint32_t port1_enabled:1;
+			uint32_t port0_enabled:1;
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t port0_enabled:1;
+			uint32_t port1_enabled:1;
+			uint32_t res1:2;
+			uint32_t txc_enabled:1;
+			uint32_t res:27;
+
+#endif
+		} ldw;
+#ifndef _BIG_ENDIAN
+		uint32_t hdw;
+#endif
+	} bits;
+} txc_control_n2_t, *p_txc_control_n2_t;
+
+
+#define	TXC_TRAINING_REG		(FZC_TXC + 0x20008)
+#define	TXC_TRAINING_VECTOR		0	/* RW bit 32:0 */
+#define	TXC_TRAINING_VECTOR_MASK	0x00000000FFFFFFFFULL
+
+typedef union _txc_training_t {
+	uint64_t value;
+	struct {
+#ifdef	_BIG_ENDIAN
+		uint32_t hdw;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t txc_training_vector:32;
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t txc_training_vector:32;
+
+#endif
+		} ldw;
+#ifndef _BIG_ENDIAN
+		uint32_t hdw;
+#endif
+	} bits;
+} txc_training_t, *p_txc_training_t;
+
+
+#define	TXC_DEBUG_SELECT_REG		(FZC_TXC + 0x20010)
+#define	TXC_DEBUG_SELECT_SHIFT		0	/* WO bit 5:0 */
+#define	TXC_DEBUG_SELECT_MASK		0x000000000000003FULL
+
+typedef union _txc_debug_select_t {
+	uint64_t value;
+	struct {
+#ifdef	_BIG_ENDIAN
+		uint32_t hdw;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t res:26;
+			uint32_t debug_select:6;
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t debug_select:6;
+			uint32_t res:26;
+
+#endif
+		} ldw;
+#ifndef _BIG_ENDIAN
+		uint32_t hdw;
+#endif
+	} bits;
+} txc_debug_select_t, *p_txc_debug_select_t;
+
+
+#define	TXC_MAX_REORDER_REG		(FZC_TXC + 0x20018)
+#define	TXC_MAX_REORDER_MASK_2		(0xf)
+#define	TXC_MAX_REORDER_MASK_4		(0x7)
+#define	TXC_MAX_REORDER_SHIFT_BITS	8
+#define	TXC_MAX_REORDER_SHIFT(port)	(port * (TXC_MAX_REORDER_SHIFT_BITS))
+
+typedef union _txc_max_reorder_t {
+	uint64_t value;
+	struct {
+#ifdef	_BIG_ENDIAN
+		uint32_t hdw;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t resv3:4;
+			uint32_t port3:4;
+			uint32_t resv2:4;
+			uint32_t port2:4;
+			uint32_t resv1:4;
+			uint32_t port1:4;
+			uint32_t resv0:4;
+			uint32_t port0:4;
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t port0:4;
+			uint32_t resv0:4;
+			uint32_t port1:4;
+			uint32_t resv1:4;
+			uint32_t port2:4;
+			uint32_t resv2:4;
+			uint32_t port3:4;
+			uint32_t resv3:4;
+#endif
+		} ldw;
+#ifndef _BIG_ENDIAN
+		uint32_t hdw;
+#endif
+	} bits;
+} txc_max_reorder_t, *p_txc_max_reorder_t;
+
+
+#define	TXC_PORT_CTL_REG		(FZC_TXC + 0x20020)	/* RO */
+#define	TXC_PORT_CTL_OFFSET(port)	(TXC_PORT_CTL_REG + \
+					(port * TXC_FZC_CONTROL_OFFSET))
+#define	TXC_PORT_CNTL_CLEAR		0x1
+
+typedef union _txc_port_ctl_t {
+	uint64_t value;
+	struct {
+#ifdef	_BIG_ENDIAN
+		uint32_t hdw;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t rsvd:31;
+			uint32_t clr_all_stat:1;
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t clr_all_stat:1;
+			uint32_t rsvd:31;
+#endif
+		} ldw;
+#ifndef _BIG_ENDIAN
+		uint32_t hdw;
+#endif
+	} bits;
+} txc_port_ctl_t, *p_txc_port_ctl_t;
+
+#define	TXC_PKT_STUFFED_REG		(FZC_TXC + 0x20030)
+#define	TXC_PKT_STUFF_PKTASY_SHIFT	16	/* RW bit 16:0 */
+#define	TXC_PKT_STUFF_PKTASY_MASK	0x000000000000FFFFULL
+#define	TXC_PKT_STUFF_REORDER_SHIFT	0	/* RW bit 31:16 */
+#define	TXC_PKT_STUFF_REORDER_MASK	0x00000000FFFF0000ULL
+
+typedef union _txc_pkt_stuffed_t {
+	uint64_t value;
+	struct {
+#ifdef	_BIG_ENDIAN
+		uint32_t hdw;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t pkt_pro_reorder:16;
+			uint32_t pkt_proc_pktasy:16;
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t pkt_proc_pktasy:16;
+			uint32_t pkt_pro_reorder:16;
+#endif
+		} ldw;
+#ifndef _BIG_ENDIAN
+		uint32_t hdw;
+#endif
+	} bits;
+} txc_pkt_stuffed_t, *p_txc_pkt_stuffed_t;
+
+
+#define	TXC_PKT_XMIT_REG		(FZC_TXC + 0x20038)
+#define	TXC_PKTS_XMIT_SHIFT		0	/* RW bit 15:0 */
+#define	TXC_PKTS_XMIT_MASK		0x000000000000FFFFULL
+#define	TXC_BYTES_XMIT_SHIFT		16	/* RW bit 31:16 */
+#define	TXC_BYTES_XMIT_MASK		0x00000000FFFF0000ULL
+
+typedef union _txc_pkt_xmit_t {
+	uint64_t value;
+	struct {
+#ifdef	_BIG_ENDIAN
+		uint32_t hdw;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t bytes_transmitted:16;
+			uint32_t pkts_transmitted:16;
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t pkts_transmitted:16;
+			uint32_t bytes_transmitted:16;
+#endif
+		} ldw;
+#ifndef _BIG_ENDIAN
+		uint32_t hdw;
+#endif
+	} bits;
+} txc_pkt_xmit, *p_txc_pkt_xmit;
+
+
+/* count 4 step 0x00100 */
+#define	TXC_ROECC_CTL_REG		(FZC_TXC + 0x20040)
+#define	TXC_ROECC_CTL_OFFSET(port)	(TXC_ROECC_CTL_REG + \
+					(TXC_FZC_CNTL_PORT_OFFSET(port)))
+
+typedef union _txc_roecc_ctl_t {
+	uint64_t value;
+	struct {
+#ifdef	_BIG_ENDIAN
+		uint32_t hdw;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t disable_ue_error:1;
+			uint32_t rsvd:13;
+			uint32_t double_bit_err:1;
+			uint32_t single_bit_err:1;
+			uint32_t rsvd_2:5;
+			uint32_t all_pkts:1;
+			uint32_t alternate_pkts:1;
+			uint32_t one_pkt:1;
+			uint32_t rsvd_3:5;
+			uint32_t last_line_pkt:1;
+			uint32_t second_line_pkt:1;
+			uint32_t firstd_line_pkt:1;
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t firstd_line_pkt:1;
+			uint32_t second_line_pkt:1;
+			uint32_t last_line_pkt:1;
+			uint32_t rsvd_3:5;
+			uint32_t one_pkt:1;
+			uint32_t alternate_pkts:1;
+			uint32_t all_pkts:1;
+			uint32_t rsvd_2:5;
+			uint32_t single_bit_err:1;
+			uint32_t double_bit_err:1;
+			uint32_t rsvd:13;
+			uint32_t disable_ue_error:1;
+#endif
+		} ldw;
+#ifndef _BIG_ENDIAN
+		uint32_t hdw;
+#endif
+	} bits;
+} txc_roecc_ctl_t, *p_txc_roecc_ctl_t;
+
+
+#define	TXC_ROECC_ST_REG		(FZC_TXC + 0x20048)
+
+#define	TXC_ROECC_ST_OFFSET(port)	(TXC_ROECC_ST_REG + \
+					(TXC_FZC_CNTL_PORT_OFFSET(port)))
+
+typedef union _txc_roecc_st_t {
+	uint64_t value;
+	struct {
+#ifdef	_BIG_ENDIAN
+		uint32_t hdw;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t clr_st:1;
+			uint32_t res:13;
+			uint32_t correct_error:1;
+			uint32_t uncorrect_error:1;
+			uint32_t rsvd:6;
+			uint32_t ecc_address:10;
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t ecc_address:10;
+			uint32_t rsvd:6;
+			uint32_t uncorrect_error:1;
+			uint32_t correct_error:1;
+			uint32_t res:13;
+			uint32_t clr_st:1;
+#endif
+		} ldw;
+#ifndef _BIG_ENDIAN
+		uint32_t hdw;
+#endif
+	} bits;
+} txc_roecc_st_t, *p_txc_roecc_st_t;
+
+
+#define	TXC_RO_DATA0_REG		(FZC_TXC + 0x20050)
+#define	TXC_RO_DATA0_OFFSET(port)	(TXC_RO_DATA0_REG + \
+					(TXC_FZC_CNTL_PORT_OFFSET(port)))
+
+typedef union _txc_ro_data0_t {
+	uint64_t value;
+	struct {
+#ifdef	_BIG_ENDIAN
+		uint32_t hdw;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t ro_ecc_data0:32;	/* ro_ecc_data[31:0] */
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t ro_ecc_data0:32;	/* ro_ecc_data[31:0] */
+#endif
+		} ldw;
+#ifndef _BIG_ENDIAN
+		uint32_t hdw;
+#endif
+	} bits;
+} txc_ro_data0_t, *p_txc_ro_data0_t;
+
+#define	TXC_RO_DATA1_REG		(FZC_TXC + 0x20058)
+#define	TXC_RO_DATA1_OFFSET(port)	(TXC_RO_DATA1_REG + \
+					(TXC_FZC_CNTL_PORT_OFFSET(port)))
+
+typedef union _txc_ro_data1_t {
+	uint64_t value;
+	struct {
+#ifdef	_BIG_ENDIAN
+		uint32_t hdw;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t ro_ecc_data1:32;	/* ro_ecc_data[63:32] */
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t ro_ecc_data1:32;	/* ro_ecc_data[31:32] */
+#endif
+		} ldw;
+#ifndef _BIG_ENDIAN
+		uint32_t hdw;
+#endif
+	} bits;
+} txc_ro_data1_t, *p_txc_ro_data1_t;
+
+
+#define	TXC_RO_DATA2_REG		(FZC_TXC + 0x20060)
+
+#define	TXC_RO_DATA2_OFFSET(port)	(TXC_RO_DATA2_REG + \
+					(TXC_FZC_CNTL_PORT_OFFSET(port)))
+
+typedef union _txc_ro_data2_t {
+	uint64_t value;
+	struct {
+#ifdef	_BIG_ENDIAN
+		uint32_t hdw;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t ro_ecc_data2:32;	/* ro_ecc_data[95:64] */
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t ro_ecc_data2:32;	/* ro_ecc_data[95:64] */
+#endif
+		} ldw;
+#ifndef _BIG_ENDIAN
+		uint32_t hdw;
+#endif
+	} bits;
+} txc_ro_data2_t, *p_txc_ro_data2_t;
+
+#define	TXC_RO_DATA3_REG		(FZC_TXC + 0x20068)
+#define	TXC_RO_DATA3_OFFSET(port)	(TXC_RO_DATA3_REG + \
+					(TXC_FZC_CNTL_PORT_OFFSET(port)))
+
+typedef union _txc_ro_data3_t {
+	uint64_t value;
+	struct {
+#ifdef	_BIG_ENDIAN
+		uint32_t hdw;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t ro_ecc_data3:32; /* ro_ecc_data[127:96] */
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t ro_ecc_data3:32; /* ro_ecc_data[127:96] */
+#endif
+		} ldw;
+#ifndef _BIG_ENDIAN
+		uint32_t hdw;
+#endif
+	} bits;
+} txc_ro_data3_t, *p_txc_ro_data3_t;
+
+#define	TXC_RO_DATA4_REG		(FZC_TXC + 0x20070)
+#define	TXC_RO_DATA4_OFFSET(port)	(TXC_RO_DATA4_REG + \
+					(TXC_FZC_CNTL_PORT_OFFSET(port)))
+
+typedef union _txc_ro_data4_t {
+	uint64_t value;
+	struct {
+#ifdef	_BIG_ENDIAN
+		uint32_t hdw;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t ro_ecc_data4:32; /* ro_ecc_data[151:128] */
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t ro_ecc_data4:32; /* ro_ecc_data[151:128] */
+#endif
+		} ldw;
+#ifndef _BIG_ENDIAN
+		uint32_t hdw;
+#endif
+	} bits;
+} txc_ro_data4_t, *p_txc_ro_data4_t;
+
+/* count 4 step 0x00100 */
+#define	TXC_SFECC_CTL_REG		(FZC_TXC + 0x20078)
+#define	TXC_SFECC_CTL_OFFSET(port)	(TXC_SFECC_CTL_REG + \
+					(TXC_FZC_CNTL_PORT_OFFSET(port)))
+
+typedef union _txc_sfecc_ctl_t {
+	uint64_t value;
+	struct {
+#ifdef	_BIG_ENDIAN
+		uint32_t hdw;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t disable_ue_error:1;
+			uint32_t rsvd:13;
+			uint32_t double_bit_err:1;
+			uint32_t single_bit_err:1;
+			uint32_t rsvd_2:5;
+			uint32_t all_pkts:1;
+			uint32_t alternate_pkts:1;
+			uint32_t one_pkt:1;
+			uint32_t rsvd_3:5;
+			uint32_t last_line_pkt:1;
+			uint32_t second_line_pkt:1;
+			uint32_t firstd_line_pkt:1;
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t firstd_line_pkt:1;
+			uint32_t second_line_pkt:1;
+			uint32_t last_line_pkt:1;
+			uint32_t rsvd_3:5;
+			uint32_t one_pkt:1;
+			uint32_t alternate_pkts:1;
+			uint32_t all_pkts:1;
+			uint32_t rsvd_2:5;
+			uint32_t single_bit_err:1;
+			uint32_t double_bit_err:1;
+			uint32_t rsvd:13;
+			uint32_t disable_ue_error:1;
+#endif
+		} ldw;
+#ifndef _BIG_ENDIAN
+		uint32_t hdw;
+#endif
+	} bits;
+} txc_sfecc_ctl_t, *p_txc_sfecc_ctl_t;
+
+#define	TXC_SFECC_ST_REG		(FZC_TXC + 0x20080)
+#define	TXC_SFECC_ST_OFFSET(port)	(TXC_SFECC_ST_REG + \
+					(TXC_FZC_CNTL_PORT_OFFSET(port)))
+
+typedef union _txc_sfecc_st_t {
+	uint64_t value;
+	struct {
+#ifdef	_BIG_ENDIAN
+		uint32_t hdw;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t clr_st:1;
+			uint32_t res:13;
+			uint32_t correct_error:1;
+			uint32_t uncorrect_error:1;
+			uint32_t rsvd:6;
+			uint32_t ecc_address:10;
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t ecc_address:10;
+			uint32_t rsvd:6;
+			uint32_t uncorrect_error:1;
+			uint32_t correct_error:1;
+			uint32_t res:13;
+			uint32_t clr_st:1;
+#endif
+		} ldw;
+#ifndef _BIG_ENDIAN
+		uint32_t hdw;
+#endif
+	} bits;
+} txc_sfecc_st_t, *p_txc_sfecc_st_t;
+
+#define	TXC_SF_DATA0_REG		(FZC_TXC + 0x20088)
+#define	TXC_SF_DATA0_OFFSET(port)	(TXC_SF_DATA0_REG + \
+					(TXC_FZC_CNTL_PORT_OFFSET(port)))
+
+typedef union _txc_sf_data0_t {
+	uint64_t value;
+	struct {
+#ifdef	_BIG_ENDIAN
+		uint32_t hdw;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t sf_ecc_data0:32;	/* sf_ecc_data[31:0] */
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t sf_ecc_data0:32;	/* sf_ecc_data[31:0] */
+#endif
+		} ldw;
+#ifndef _BIG_ENDIAN
+		uint32_t hdw;
+#endif
+	} bits;
+} txc_sf_data0_t, *p_txc_sf_data0_t;
+
+#define	TXC_SF_DATA1_REG		(FZC_TXC + 0x20090)
+#define	TXC_SF_DATA1_OFFSET(port)	(TXC_SF_DATA1_REG + \
+					(TXC_FZC_CNTL_PORT_OFFSET(port)))
+
+typedef union _txc_sf_data1_t {
+	uint64_t value;
+	struct {
+#ifdef	_BIG_ENDIAN
+		uint32_t hdw;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t sf_ecc_data1:32;	/* sf_ecc_data[63:32] */
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t sf_ecc_data1:32;	/* sf_ecc_data[31:32] */
+#endif
+		} ldw;
+#ifndef _BIG_ENDIAN
+		uint32_t hdw;
+#endif
+	} bits;
+} txc_sf_data1_t, *p_txc_sf_data1_t;
+
+
+#define	TXC_SF_DATA2_REG		(FZC_TXC + 0x20098)
+#define	TXC_SF_DATA2_OFFSET(port)	(TXC_SF_DATA2_REG + \
+					(TXC_FZC_CNTL_PORT_OFFSET(port)))
+
+typedef union _txc_sf_data2_t {
+	uint64_t value;
+	struct {
+#ifdef	_BIG_ENDIAN
+		uint32_t hdw;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t sf_ecc_data2:32;	/* sf_ecc_data[95:64] */
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t sf_ecc_data2:32;	/* sf_ecc_data[95:64] */
+#endif
+		} ldw;
+#ifndef _BIG_ENDIAN
+		uint32_t hdw;
+#endif
+	} bits;
+} txc_sf_data2_t, *p_txc_sf_data2_t;
+
+#define	TXC_SF_DATA3_REG		(FZC_TXC + 0x200A0)
+#define	TXC_SF_DATA3_OFFSET(port)	(TXC_SF_DATA3_REG + \
+					(TXC_FZC_CNTL_PORT_OFFSET(port)))
+
+typedef union _txc_sf_data3_t {
+	uint64_t value;
+	struct {
+#ifdef	_BIG_ENDIAN
+		uint32_t hdw;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t sf_ecc_data3:32; /* sf_ecc_data[127:96] */
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t sf_ecc_data3:32; /* sf_ecc_data[127:96] */
+#endif
+		} ldw;
+#ifndef _BIG_ENDIAN
+		uint32_t hdw;
+#endif
+	} bits;
+} txc_sf_data3_t, *p_txc_sf_data3_t;
+
+#define	TXC_SF_DATA4_REG		(FZC_TXC + 0x200A8)
+#define	TXC_SF_DATA4_OFFSET(port)	(TXC_SF_DATA4_REG + \
+					(TXC_FZC_CNTL_PORT_OFFSET(port)))
+
+typedef union _txc_sf_data4_t {
+	uint64_t value;
+	struct {
+#ifdef	_BIG_ENDIAN
+		uint32_t hdw;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t sf_ecc_data4:32; /* sf_ecc_data[151:128] */
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t sf_ecc_data4:32; /* sf_ecc_data[151:128] */
+#endif
+		} ldw;
+#ifndef _BIG_ENDIAN
+		uint32_t hdw;
+#endif
+	} bits;
+} txc_sf_data4_t, *p_txc_sf_data4_t;
+
+#define	TXC_RO_TIDS_REG			(FZC_TXC + 0x200B0)
+#define	TXC_RO_TIDS_OFFSET(port)	(TXC_RO_TIDS_REG + \
+					(TXC_FZC_CNTL_PORT_OFFSET(port)))
+#define	TXC_RO_TIDS_MASK		0x00000000FFFFFFFFULL
+
+typedef union _txc_ro_tids_t {
+	uint64_t value;
+	struct {
+#ifdef	_BIG_ENDIAN
+		uint32_t hdw;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t tids_in_use:32;
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t tids_in_use:32;
+#endif
+		} ldw;
+#ifndef _BIG_ENDIAN
+		uint32_t hdw;
+#endif
+	} bits;
+} txc_ro_tids_t, *p_txc_ro_tids_t;
+
+#define	TXC_RO_STATE0_REG		(FZC_TXC + 0x200B8)
+#define	TXC_RO_STATE0_OFFSET(port)	(TXC_STATE0_REG + \
+					(TXC_FZC_CNTL_PORT_OFFSET(port)))
+#define	TXC_RO_STATE0_MASK		0x00000000FFFFFFFFULL
+
+typedef union _txc_ro_state0_t {
+	uint64_t value;
+	struct {
+#ifdef	_BIG_ENDIAN
+		uint32_t hdw;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t duplicate_tid:32;
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t duplicate_tid:32;
+#endif
+		} ldw;
+#ifndef _BIG_ENDIAN
+		uint32_t hdw;
+#endif
+	} bits;
+} txc_ro_state0_t, *p_txc_ro_state0_t;
+
+#define	TXC_RO_STATE1_REG		(FZC_TXC + 0x200C0)
+#define	TXC_RO_STATE1_OFFSET(port)	(TXC_STATE1_REG + \
+					(TXC_FZC_CNTL_PORT_OFFSET(port)))
+#define	TXC_RO_STATE1_MASK		0x00000000FFFFFFFFULL
+
+typedef union _txc_ro_state1_t {
+	uint64_t value;
+	struct {
+#ifdef	_BIG_ENDIAN
+		uint32_t hdw;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t unused_tid:32;
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t unused_tid:32;
+#endif
+		} ldw;
+#ifndef _BIG_ENDIAN
+		uint32_t hdw;
+#endif
+	} bits;
+} txc_ro_state1_t, *p_txc_ro_state1_t;
+
+#define	TXC_RO_STATE2_REG		(FZC_TXC + 0x200C8)
+#define	TXC_RO_STATE2_OFFSET(port)	(TXC_STATE2_REG + \
+					(TXC_FZC_CNTL_PORT_OFFSET(port)))
+#define	TXC_RO_STATE2_MASK		0x00000000FFFFFFFFULL
+
+typedef union _txc_ro_state2_t {
+	uint64_t value;
+	struct {
+#ifdef	_BIG_ENDIAN
+		uint32_t hdw;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t transaction_timeout:32;
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t transaction_timeout:32;
+#endif
+		} ldw;
+#ifndef _BIG_ENDIAN
+		uint32_t hdw;
+#endif
+	} bits;
+} txc_ro_state2_t, *p_txc_ro_state2_t;
+
+#define	TXC_RO_STATE3_REG		(FZC_TXC + 0x200D0)
+#define	TXC_RO_STATE3_OFFSET(port)	(TXC_RO_STATE3_REG + \
+					(TXC_FZC_CNTL_PORT_OFFSET(port)))
+
+typedef union _txc_ro_state3_t {
+	uint64_t value;
+	struct {
+#ifdef	_BIG_ENDIAN
+		uint32_t hdw;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t enable_spacefilled_watermark:1;
+			uint32_t ro_spacefilled_watermask:10;
+			uint32_t ro_fifo_spaceavailable:10;
+			uint32_t rsv:2;
+			uint32_t enable_ro_watermark:1;
+			uint32_t highest_reorder_used:4;
+			uint32_t num_reorder_used:4;
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t num_reorder_used:4;
+			uint32_t highest_reorder_used:4;
+			uint32_t enable_ro_watermark:1;
+			uint32_t rsv:2;
+			uint32_t ro_fifo_spaceavailable:10;
+			uint32_t ro_spacefilled_watermask:10;
+			uint32_t enable_spacefilled_watermark:1;
+#endif
+		} ldw;
+#ifndef _BIG_ENDIAN
+		uint32_t hdw;
+#endif
+	} bits;
+} txc_ro_state3_t, *p_txc_ro_state3_t;
+
+#define	TXC_RO_CTL_REG			(FZC_TXC + 0x200D8)
+#define	TXC_RO_CTL_OFFSET(port)		(TXC_RO_CTL_REG + \
+					(TXC_FZC_CNTL_PORT_OFFSET(port)))
+
+typedef union _txc_ro_ctl_t {
+	uint64_t value;
+	struct {
+#ifdef	_BIG_ENDIAN
+		uint32_t hdw;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t clr_fail_state:1;
+			uint32_t rsvd3:3;
+			uint32_t ro_addr1:4;
+			uint32_t rsvd2:1;
+			uint32_t address_failed:1;
+			uint32_t dma_failed:1;
+			uint32_t length_failed:1;
+			uint32_t rsv:1;
+			uint32_t capture_address_fail:1;
+			uint32_t capture_dma_fail:1;
+			uint32_t capture_length_fail:1;
+			uint32_t rsvd:8;
+			uint32_t ro_state_rd_done:1;
+			uint32_t ro_state_wr_done:1;
+			uint32_t ro_state_rd:1;
+			uint32_t ro_state_wr:1;
+			uint32_t ro_state_addr:4;
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t ro_state_addr:4;
+			uint32_t ro_state_wr:1;
+			uint32_t ro_state_rd:1;
+			uint32_t ro_state_wr_done:1;
+			uint32_t ro_state_rd_done:1;
+			uint32_t rsvd:8;
+			uint32_t capture_length_fail:1;
+			uint32_t capture_dma_fail:1;
+			uint32_t capture_address_fail:1;
+			uint32_t rsv:1;
+			uint32_t length_failed:1;
+			uint32_t dma_failed:1;
+			uint32_t address_failed:1;
+			uint32_t rsvd2:1;
+			uint32_t ro_addr1:4;
+			uint32_t rsvd3:3;
+			uint32_t clr_fail_state:1;
+#endif
+		} ldw;
+#ifndef _BIG_ENDIAN
+		uint32_t hdw;
+#endif
+	} bits;
+} txc_ro_ctl_t, *p_txc_ro_ctl_t;
+
+
+#define	TXC_RO_ST_DATA0_REG		(FZC_TXC + 0x200E0)
+#define	TXC_RO_ST_DATA0_OFFSET(port)	(TXC_RO_ST_DATA0_REG + \
+					(TXC_FZC_CNTL_PORT_OFFSET(port)))
+#define	TXC_RO_ST_DATA0_MASK		0x00000000FFFFFFFFULL
+
+typedef union _txc_ro_st_data0_t {
+	uint64_t value;
+	struct {
+#ifdef	_BIG_ENDIAN
+		uint32_t hdw;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t ro_st_dat0:32;
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t ro_st_dat0:32;
+#endif
+		} ldw;
+#ifndef _BIG_ENDIAN
+		uint32_t hdw;
+#endif
+	} bits;
+} txc_ro_st_data0_t, *p_txc_ro_st_data0_t;
+
+
+#define	TXC_RO_ST_DATA1_REG		(FZC_TXC + 0x200E8)
+#define	TXC_RO_ST_DATA1_OFFSET(port)	(TXC_RO_ST_DATA1_REG + \
+					(TXC_FZC_CNTL_PORT_OFFSET(port)))
+#define	TXC_RO_ST_DATA1_MASK		0x00000000FFFFFFFFULL
+
+typedef union _txc_ro_st_data1_t {
+	uint64_t value;
+	struct {
+#ifdef	_BIG_ENDIAN
+		uint32_t hdw;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t ro_st_dat1:32;
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t ro_st_dat1:32;
+#endif
+		} ldw;
+#ifndef _BIG_ENDIAN
+		uint32_t hdw;
+#endif
+	} bits;
+} txc_ro_st_data1_t, *p_txc_ro_st_data1_t;
+
+
+#define	TXC_RO_ST_DATA2_REG		(FZC_TXC + 0x200F0)
+#define	TXC_RO_ST_DATA2_OFFSET(port)	(TXC_RO_ST_DATA2_REG + \
+					(TXC_FZC_CNTL_PORT_OFFSET(port)))
+#define	TXC_RO_ST_DATA2_MASK		0x00000000FFFFFFFFULL
+
+typedef union _txc_ro_st_data2_t {
+	uint64_t value;
+	struct {
+#ifdef	_BIG_ENDIAN
+		uint32_t hdw;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t ro_st_dat2:32;
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t ro_st_dat2:32;
+#endif
+		} ldw;
+#ifndef _BIG_ENDIAN
+		uint32_t hdw;
+#endif
+	} bits;
+} txc_ro_st_data2_t, *p_txc_ro_st_data2_t;
+
+#define	TXC_RO_ST_DATA3_REG		(FZC_TXC + 0x200F8)
+#define	TXC_RO_ST_DATA3_OFFSET(port)	(TXC_RO_ST_DATA3_REG + \
+					(TXC_FZC_CNTL_PORT_OFFSET(port)))
+#define	TXC_RO_ST_DATA3_MASK		0x00000000FFFFFFFFULL
+
+typedef union _txc_ro_st_data3_t {
+	uint64_t value;
+	struct {
+#ifdef	_BIG_ENDIAN
+		uint32_t hdw;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t ro_st_dat3:32;
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t ro_st_dat3:32;
+#endif
+		} ldw;
+#ifndef _BIG_ENDIAN
+		uint32_t hdw;
+#endif
+	} bits;
+} txc_ro_st_data3_t, *p_txc_ro_st_data3_t;
+
+#define	TXC_PORT_PACKET_REQ_REG		(FZC_TXC + 0x20100)
+#define	TXC_PORT_PACKET_REQ_OFFSET(port) (TXC_PORT_PACKET_REQ_REG + \
+					(TXC_FZC_CNTL_PORT_OFFSET(port)))
+#define	TXC_PORT_PACKET_REQ_MASK	0x00000000FFFFFFFFULL
+
+typedef union _txc_port_packet_req_t {
+	uint64_t value;
+	struct {
+#ifdef	_BIG_ENDIAN
+		uint32_t hdw;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t gather_req:4;
+			uint32_t packet_eq:12;
+			uint32_t pkterr_abort:16;
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t pkterr_abort:16;
+			uint32_t packet_eq:12;
+			uint32_t gather_req:4;
+#endif
+		} ldw;
+#ifndef _BIG_ENDIAN
+		uint32_t hdw;
+#endif
+	} bits;
+} txc_port_packet_req_t, *p_txc_port_packet_req_t;
+
+/* Reorder error bits in interrupt registers  */
+#define	TXC_INT_STAT_SF_CORR_ERR	0x01
+#define	TXC_INT_STAT_SF_UNCORR_ERR	0x02
+#define	TXC_INT_STAT_RO_CORR_ERR	0x04
+#define	TXC_INT_STAT_RO_UNCORR_ERR	0x08
+#define	TXC_INT_STAT_REORDER_ERR	0x10
+#define	TXC_INT_STAT_PKTASSYDEAD	0x20
+
+#define	TXC_INT_STAT_DBG_REG		(FZC_TXC + 0x20420)
+#define	TXC_INT_STAT_DBG_MASK		0x00000000FFFFFFFFULL
+
+typedef union _txc_int_stat_dbg_t {
+	uint64_t value;
+	struct {
+#ifdef	_BIG_ENDIAN
+		uint32_t hdw;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t rsvd3:2;
+			uint32_t port3_int_status:6;
+			uint32_t rsvd2:2;
+			uint32_t port2_int_status:6;
+			uint32_t rsvd1:2;
+			uint32_t port1_int_status:6;
+			uint32_t rsvd:2;
+			uint32_t port0_int_status:6;
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t port0_int_status:6;
+			uint32_t rsvd:2;
+			uint32_t port1_int_status:6;
+			uint32_t rsvd1:2;
+			uint32_t port2_int_status:6;
+			uint32_t rsvd2:2;
+			uint32_t port3_int_status:6;
+			uint32_t rsvd3:2;
+#endif
+		} ldw;
+#ifndef _BIG_ENDIAN
+		uint32_t hdw;
+#endif
+	} bits;
+} txc_int_stat_dbg_t, *p_txc_int_stat_dbg_t;
+
+
+#define	TXC_INT_STAT_REG		(FZC_TXC + 0x20428)
+#define	TXC_INT_STAT_MASK		0x00000000FFFFFFFFULL
+
+typedef union _txc_int_stat_t {
+	uint64_t value;
+	struct {
+#ifdef	_BIG_ENDIAN
+		uint32_t hdw;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t rsvd3:2;
+			uint32_t port3_int_status:6;
+			uint32_t rsvd2:2;
+			uint32_t port2_int_status:6;
+			uint32_t rsvd1:2;
+			uint32_t port1_int_status:6;
+			uint32_t rsvd:2;
+			uint32_t port0_int_status:6;
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t port0_int_status:6;
+			uint32_t rsvd:2;
+			uint32_t port1_int_status:6;
+			uint32_t rsvd1:2;
+			uint32_t port2_int_status:6;
+			uint32_t rsvd2:2;
+			uint32_t port3_int_status:6;
+			uint32_t rsvd3:2;
+#endif
+		} ldw;
+#ifndef _BIG_ENDIAN
+		uint32_t hdw;
+#endif
+	} bits;
+} txc_int_stat_t, *p_txc_int_stat_t;
+
+#define	TXC_INT_MASK_REG		(FZC_TXC + 0x20430)
+#define	TXC_INT_MASK_MASK		0x00000000FFFFFFFFULL
+
+typedef union _txc_int_mask_t {
+	uint64_t value;
+	struct {
+#ifdef	_BIG_ENDIAN
+		uint32_t hdw;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t rsvd3:2;
+			uint32_t port3_int_mask:6;
+			uint32_t rsvd2:2;
+			uint32_t port2_int_mask:6;
+			uint32_t rsvd1:2;
+			uint32_t port1_int_mask:6;
+			uint32_t rsvd:2;
+			uint32_t port0_int_mask:6;
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t port0_int_mask:6;
+			uint32_t rsvd:2;
+			uint32_t port1_int_mask:6;
+			uint32_t rsvd1:2;
+			uint32_t port2_int_mask:6;
+			uint32_t rsvd2:2;
+			uint32_t port3_int_mask:6;
+			uint32_t rsvd3:2;
+#endif
+		} ldw;
+#ifndef _BIG_ENDIAN
+		uint32_t hdw;
+#endif
+	} bits;
+} txc_int_mask_t, *p_txc_int_mask_t;
+
+/* 2 ports */
+typedef union _txc_int_mask_n2_t {
+	uint64_t value;
+	struct {
+#ifdef	_BIG_ENDIAN
+		uint32_t hdw;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t rsvd1:18;
+			uint32_t port1_int_mask:6;
+			uint32_t rsvd:2;
+			uint32_t port0_int_mask:6;
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t port0_int_mask:6;
+			uint32_t rsvd:2;
+			uint32_t port1_int_mask:6;
+			uint32_t rsvd1:18;
+#endif
+		} ldw;
+#ifndef _BIG_ENDIAN
+		uint32_t hdw;
+#endif
+	} bits;
+} txc_int_mask_n2_t, *p_txc_int_mask_n2_t;
+
+typedef	struct _txc_ro_states {
+	txc_roecc_st_t		roecc;
+	txc_ro_data0_t		d0;
+	txc_ro_data1_t		d1;
+	txc_ro_data2_t		d2;
+	txc_ro_data3_t		d3;
+	txc_ro_data4_t		d4;
+	txc_ro_tids_t		tids;
+	txc_ro_state0_t		st0;
+	txc_ro_state1_t		st1;
+	txc_ro_state2_t		st2;
+	txc_ro_state3_t		st3;
+	txc_ro_ctl_t		ctl;
+} txc_ro_states_t, *p_txc_ro_states_t;
+
+typedef	struct _txc_sf_states {
+	txc_sfecc_st_t		sfecc;
+	txc_sf_data0_t		d0;
+	txc_sf_data1_t		d1;
+	txc_sf_data2_t		d2;
+	txc_sf_data3_t		d3;
+	txc_sf_data4_t		d4;
+} txc_sf_states_t, *p_txc_sf_states_t;
+
+#ifdef	__cplusplus
+}
+#endif
+
+#endif	/* _SYS_NXGE_NXGE_TXC_HW_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/uts/common/sys/nxge/nxge_txdma.h	Mon Mar 19 19:37:22 2007 -0700
@@ -0,0 +1,304 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef	_SYS_NXGE_NXGE_TXDMA_H
+#define	_SYS_NXGE_NXGE_TXDMA_H
+
+#pragma ident	"%Z%%M%	%I%	%E% SMI"
+
+#ifdef	__cplusplus
+extern "C" {
+#endif
+
+#include <sys/nxge/nxge_txdma_hw.h>
+#include <npi_txdma.h>
+
+#define	TXDMA_PORT_BITMAP(nxgep)		(nxgep->pt_config.tx_dma_map)
+
+#define	TXDMA_RECLAIM_PENDING_DEFAULT		64
+#define	TX_FULL_MARK				3
+
+/*
+ * Transmit load balancing definitions.
+ */
+#define	NXGE_TX_LB_TCPUDP			0	/* default policy */
+#define	NXGE_TX_LB_HASH				1	/* from the hint data */
+#define	NXGE_TX_LB_DEST_MAC			2	/* Dest. MAC */
+
+/*
+ * Descriptor ring empty:
+ *		(1) head index is equal to tail index.
+ *		(2) wrapped around bits are the same.
+ * Descriptor ring full:
+ *		(1) head index is equal to tail index.
+ *		(2) wrapped around bits are different.
+ *
+ */
+#define	TXDMA_RING_EMPTY(head, head_wrap, tail, tail_wrap)	\
+	((head == tail && head_wrap == tail_wrap) ? B_TRUE : B_FALSE)
+
+#define	TXDMA_RING_FULL(head, head_wrap, tail, tail_wrap)	\
+	((head == tail && head_wrap != tail_wrap) ? B_TRUE : B_FALSE)
+
+#define	TXDMA_DESC_NEXT_INDEX(index, entries, wrap_mask) \
+			((index + entries) & wrap_mask)
+
+#define	TXDMA_DRR_WEIGHT_DEFAULT	0x001f
+
+typedef struct _tx_msg_t {
+	nxge_os_block_mv_t 	flags;		/* DMA, BCOPY, DVMA (?) */
+	nxge_os_dma_common_t	buf_dma;	/* premapped buffer blocks */
+	nxge_os_dma_handle_t	buf_dma_handle; /* premapped buffer handle */
+	nxge_os_dma_handle_t 	dma_handle;	/* DMA handle for normal send */
+	nxge_os_dma_handle_t 	dvma_handle;	/* Fast DVMA  handle */
+
+	p_mblk_t 		tx_message;
+	uint32_t 		tx_msg_size;
+	size_t			bytes_used;
+	int			head;
+	int			tail;
+} tx_msg_t, *p_tx_msg_t;
+
+/*
+ * TX  Statistics.
+ */
+typedef struct _nxge_tx_ring_stats_t {
+	uint64_t	opackets;
+	uint64_t	obytes;
+	uint64_t	oerrors;
+
+	uint32_t	tx_inits;
+	uint32_t	tx_no_buf;
+
+	uint32_t		mbox_err;
+	uint32_t		pkt_size_err;
+	uint32_t 		tx_ring_oflow;
+	uint32_t 		pre_buf_par_err;
+	uint32_t 		nack_pref;
+	uint32_t 		nack_pkt_rd;
+	uint32_t 		conf_part_err;
+	uint32_t 		pkt_part_err;
+	uint32_t		tx_starts;
+	uint32_t		tx_nocanput;
+	uint32_t		tx_msgdup_fail;
+	uint32_t		tx_allocb_fail;
+	uint32_t		tx_no_desc;
+	uint32_t		tx_dma_bind_fail;
+	uint32_t		tx_uflo;
+
+	uint32_t		tx_hdr_pkts;
+	uint32_t		tx_ddi_pkts;
+	uint32_t		tx_dvma_pkts;
+
+	uint32_t		tx_max_pend;
+	uint32_t		tx_jumbo_pkts;
+
+	txdma_ring_errlog_t	errlog;
+} nxge_tx_ring_stats_t, *p_nxge_tx_ring_stats_t;
+
+typedef struct _tx_ring_t {
+	nxge_os_dma_common_t	tdc_desc;
+	struct _nxge_t		*nxgep;
+	p_tx_msg_t 		tx_msg_ring;
+	uint32_t		tnblocks;
+	tx_rng_cfig_t		tx_ring_cfig;
+	tx_ring_hdl_t		tx_ring_hdl;
+	tx_ring_kick_t		tx_ring_kick;
+	tx_cs_t			tx_cs;
+	tx_dma_ent_msk_t	tx_evmask;
+	txdma_mbh_t		tx_mbox_mbh;
+	txdma_mbl_t		tx_mbox_mbl;
+	log_page_vld_t		page_valid;
+	log_page_mask_t		page_mask_1;
+	log_page_mask_t		page_mask_2;
+	log_page_value_t	page_value_1;
+	log_page_value_t	page_value_2;
+	log_page_relo_t		page_reloc_1;
+	log_page_relo_t		page_reloc_2;
+	log_page_hdl_t		page_hdl;
+	txc_dma_max_burst_t	max_burst;
+	boolean_t		cfg_set;
+	uint32_t		tx_ring_state;
+
+	nxge_os_mutex_t		lock;
+	uint16_t 		index;
+	uint16_t		tdc;
+	struct nxge_tdc_cfg	*tdc_p;
+	uint_t 			tx_ring_size;
+	uint32_t 		num_chunks;
+
+	uint_t 			tx_wrap_mask;
+	uint_t 			rd_index;
+	uint_t 			wr_index;
+	boolean_t		wr_index_wrap;
+	uint_t 			head_index;
+	boolean_t		head_wrap;
+	tx_ring_hdl_t		ring_head;
+	tx_ring_kick_t		ring_kick_tail;
+	txdma_mailbox_t		tx_mbox;
+
+	uint_t 			descs_pending;
+	boolean_t 		queueing;
+
+	nxge_os_mutex_t		sq_lock;
+
+	p_mblk_t 		head;
+	p_mblk_t 		tail;
+
+	uint16_t		ldg_group_id;
+	p_nxge_tx_ring_stats_t tdc_stats;
+
+	nxge_os_mutex_t 	dvma_lock;
+	uint_t 			dvma_wr_index;
+	uint_t 			dvma_rd_index;
+	uint_t 			dvma_pending;
+	uint_t 			dvma_available;
+	uint_t 			dvma_wrap_mask;
+
+	nxge_os_dma_handle_t 	*dvma_ring;
+
+#if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
+	uint64_t		hv_tx_buf_base_ioaddr_pp;
+	uint64_t		hv_tx_buf_ioaddr_size;
+	uint64_t		hv_tx_cntl_base_ioaddr_pp;
+	uint64_t		hv_tx_cntl_ioaddr_size;
+	boolean_t		hv_set;
+#endif
+} tx_ring_t, *p_tx_ring_t;
+
+
+/* Transmit Mailbox */
+typedef struct _tx_mbox_t {
+	nxge_os_mutex_t 	lock;
+	uint16_t		index;
+	struct _nxge_t		*nxgep;
+	uint16_t		tdc;
+	nxge_os_dma_common_t	tx_mbox;
+	txdma_mbl_t		tx_mbox_l;
+	txdma_mbh_t		tx_mbox_h;
+} tx_mbox_t, *p_tx_mbox_t;
+
+typedef struct _tx_rings_t {
+	p_tx_ring_t 		*rings;
+	boolean_t		txdesc_allocated;
+	uint32_t		ndmas;
+	nxge_os_dma_common_t	tdc_dma;
+	nxge_os_dma_common_t	tdc_mbox;
+} tx_rings_t, *p_tx_rings_t;
+
+
+#if defined(_KERNEL) || (defined(COSIM) && !defined(IODIAG))
+
+typedef struct _tx_buf_rings_t {
+	struct _tx_buf_ring_t 	*txbuf_rings;
+	boolean_t		txbuf_allocated;
+} tx_buf_rings_t, *p_tx_buf_rings_t;
+
+#endif
+
+typedef struct _tx_mbox_areas_t {
+	p_tx_mbox_t 		*txmbox_areas_p;
+	boolean_t		txmbox_allocated;
+} tx_mbox_areas_t, *p_tx_mbox_areas_t;
+
+typedef struct _tx_param_t {
+	nxge_logical_page_t tx_logical_pages[NXGE_MAX_LOGICAL_PAGES];
+} tx_param_t, *p_tx_param_t;
+
+typedef struct _tx_params {
+	struct _tx_param_t 	*tx_param_p;
+} tx_params_t, *p_tx_params_t;
+
+/*
+ * Global register definitions per chip and they are initialized
+ * using the function zero control registers.
+ * .
+ */
+typedef struct _txdma_globals {
+	boolean_t		mode32;
+} txdma_globals_t, *p_txdma_globals;
+
+
+#if	defined(SOLARIS) && (defined(_KERNEL) || \
+	(defined(COSIM) && !defined(IODIAG)))
+
+/*
+ * Transmit prototypes.
+ */
+nxge_status_t nxge_init_txdma_channels(p_nxge_t);
+void nxge_uninit_txdma_channels(p_nxge_t);
+void nxge_setup_dma_common(p_nxge_dma_common_t, p_nxge_dma_common_t,
+		uint32_t, uint32_t);
+nxge_status_t nxge_reset_txdma_channel(p_nxge_t, uint16_t,
+	uint64_t);
+nxge_status_t nxge_init_txdma_channel_event_mask(p_nxge_t,
+	uint16_t, p_tx_dma_ent_msk_t);
+nxge_status_t nxge_init_txdma_channel_cntl_stat(p_nxge_t,
+	uint16_t, uint64_t);
+nxge_status_t nxge_enable_txdma_channel(p_nxge_t, uint16_t,
+	p_tx_ring_t, p_tx_mbox_t);
+
+p_mblk_t nxge_tx_pkt_header_reserve(p_mblk_t, uint8_t *);
+int nxge_tx_pkt_nmblocks(p_mblk_t, int *);
+boolean_t nxge_txdma_reclaim(p_nxge_t, p_tx_ring_t, int);
+
+void nxge_fill_tx_hdr(p_mblk_t, boolean_t, boolean_t,
+	int, uint8_t, p_tx_pkt_hdr_all_t);
+
+nxge_status_t nxge_txdma_hw_mode(p_nxge_t, boolean_t);
+void nxge_hw_start_tx(p_nxge_t);
+void nxge_txdma_stop(p_nxge_t);
+void nxge_txdma_stop_start(p_nxge_t);
+void nxge_fixup_txdma_rings(p_nxge_t);
+void nxge_txdma_hw_kick(p_nxge_t);
+void nxge_txdma_fix_channel(p_nxge_t, uint16_t);
+void nxge_txdma_fixup_channel(p_nxge_t, p_tx_ring_t,
+	uint16_t);
+void nxge_txdma_hw_kick_channel(p_nxge_t, p_tx_ring_t,
+	uint16_t);
+
+void nxge_txdma_regs_dump(p_nxge_t, int);
+void nxge_txdma_regs_dump_channels(p_nxge_t);
+
+void nxge_check_tx_hang(p_nxge_t);
+void nxge_fixup_hung_txdma_rings(p_nxge_t);
+void nxge_txdma_fix_hung_channel(p_nxge_t, uint16_t);
+void nxge_txdma_fixup_hung_channel(p_nxge_t, p_tx_ring_t,
+	uint16_t);
+
+void nxge_reclaim_rings(p_nxge_t);
+int nxge_txdma_channel_hung(p_nxge_t,
+	p_tx_ring_t tx_ring_p, uint16_t);
+int nxge_txdma_hung(p_nxge_t);
+int nxge_txdma_stop_inj_err(p_nxge_t, int);
+void nxge_txdma_inject_err(p_nxge_t, uint32_t, uint8_t);
+
+#endif
+
+#ifdef	__cplusplus
+}
+#endif
+
+#endif	/* _SYS_NXGE_NXGE_TXDMA_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/uts/common/sys/nxge/nxge_txdma_hw.h	Mon Mar 19 19:37:22 2007 -0700
@@ -0,0 +1,1031 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef	_SYS_NXGE_NXGE_TXDMA_HW_H
+#define	_SYS_NXGE_NXGE_TXDMA_HW_H
+
+#pragma ident	"%Z%%M%	%I%	%E% SMI"
+
+#ifdef	__cplusplus
+extern "C" {
+#endif
+
+#include <nxge_defs.h>
+#include <nxge_hw.h>
+
+#if !defined(_BIG_ENDIAN)
+#define	SWAP(X)	(X)
+#else
+#define	SWAP(X)   \
+	(((X >> 32) & 0x00000000ffffffff) | \
+	((X << 32) & 0xffffffff00000000))
+#endif
+
+/*
+ * Partitioning Suport: same as those defined for the RX
+ */
+/*
+ * TDC: Partitioning Support
+ *	(Each of the following registers is for each TDC)
+ */
+#define	TX_LOG_REG_SIZE			512
+#define	TX_LOG_DMA_OFFSET(channel)	(channel * TX_LOG_REG_SIZE)
+
+#define	TX_LOG_PAGE_VLD_REG		(FZC_DMC + 0x40000)
+#define	TX_LOG_PAGE_MASK1_REG		(FZC_DMC + 0x40008)
+#define	TX_LOG_PAGE_VAL1_REG		(FZC_DMC + 0x40010)
+#define	TX_LOG_PAGE_MASK2_REG		(FZC_DMC + 0x40018)
+#define	TX_LOG_PAGE_VAL2_REG		(FZC_DMC + 0x40020)
+#define	TX_LOG_PAGE_RELO1_REG		(FZC_DMC + 0x40028)
+#define	TX_LOG_PAGE_RELO2_REG		(FZC_DMC + 0x40030)
+#define	TX_LOG_PAGE_HDL_REG		(FZC_DMC + 0x40038)
+
+/* Transmit Addressing Mode: Set to 1 to select 32-bit addressing mode */
+#define	TX_ADDR_MD_REG			(FZC_DMC + 0x45000)
+
+#define	TX_ADDR_MD_SHIFT	0			/* bits 0:0 */
+#define	TX_ADDR_MD_SET_32	0x0000000000000001ULL	/* 1 to select 32 bit */
+#define	TX_ADDR_MD_MASK		0x0000000000000001ULL
+
+typedef union _tx_addr_md_t {
+	uint64_t value;
+	struct {
+#if defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t res1_1:31;
+			uint32_t mode32:1;
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t mode32:1;
+			uint32_t res1_1:31;
+#endif
+		} ldw;
+#if !defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+	} bits;
+} tx_addr_md_t, *p_tx_addr_md_t;
+
+/* Transmit Packet Descriptor Structure */
+#define	TX_PKT_DESC_SAD_SHIFT		0		/* bits 43:0 */
+#define	TX_PKT_DESC_SAD_MASK		0x00000FFFFFFFFFFFULL
+#define	TX_PKT_DESC_TR_LEN_SHIFT	44		/* bits 56:44 */
+#define	TX_PKT_DESC_TR_LEN_MASK		0x01FFF00000000000ULL
+#define	TX_PKT_DESC_NUM_PTR_SHIFT	58		/* bits 61:58 */
+#define	TX_PKT_DESC_NUM_PTR_MASK	0x3C00000000000000ULL
+#define	TX_PKT_DESC_MARK_SHIFT		62		/* bit 62 */
+#define	TX_PKT_DESC_MARK		0x4000000000000000ULL
+#define	TX_PKT_DESC_MARK_MASK		0x4000000000000000ULL
+#define	TX_PKT_DESC_SOP_SHIFT		63		/* bit 63 */
+#define	TX_PKT_DESC_SOP			0x8000000000000000ULL
+#define	TX_PKT_DESC_SOP_MASK		0x8000000000000000ULL
+
+typedef union _tx_desc_t {
+	uint64_t value;
+	struct {
+#if defined(_BIG_ENDIAN)
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t sop:1;
+			uint32_t mark:1;
+			uint32_t num_ptr:4;
+			uint32_t res1:1;
+			uint32_t tr_len:13;
+			uint32_t sad:12;
+
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t sad:12;
+			uint32_t tr_len:13;
+			uint32_t res1:1;
+			uint32_t num_ptr:4;
+			uint32_t mark:1;
+			uint32_t sop:1;
+
+#endif
+		} hdw;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t sad:32;
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t sad:32;
+#endif
+		} ldw;
+#if !defined(_BIG_ENDIAN)
+		struct {
+
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t sop:1;
+			uint32_t mark:1;
+			uint32_t num_ptr:4;
+			uint32_t res1:1;
+			uint32_t tr_len:13;
+			uint32_t sad:12;
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t sad:12;
+			uint32_t tr_len:13;
+			uint32_t res1:1;
+			uint32_t num_ptr:4;
+			uint32_t mark:1;
+			uint32_t sop:1;
+#endif
+		} hdw;
+#endif
+	} bits;
+} tx_desc_t, *p_tx_desc_t;
+
+
+/* Transmit Ring Configuration (24 Channels) */
+#define	TX_RNG_CFIG_REG			(DMC + 0x40000)
+#if OLD
+#define	TX_RING_HDH_REG			(DMC + 0x40008)
+#endif
+#define	TX_RING_HDL_REG			(DMC + 0x40010)
+#define	TX_RING_KICK_REG		(DMC + 0x40018)
+#define	TX_ENT_MSK_REG			(DMC + 0x40020)
+#define	TX_CS_REG			(DMC + 0x40028)
+#define	TXDMA_MBH_REG			(DMC + 0x40030)
+#define	TXDMA_MBL_REG			(DMC + 0x40038)
+#define	TX_DMA_PRE_ST_REG		(DMC + 0x40040)
+#define	TX_RNG_ERR_LOGH_REG		(DMC + 0x40048)
+#define	TX_RNG_ERR_LOGL_REG		(DMC + 0x40050)
+#define	TDMC_INTR_DBG_REG		(DMC + 0x40060)
+#define	TX_CS_DBG_REG			(DMC + 0x40068)
+
+/* Transmit Ring Configuration */
+#define	TX_RNG_CFIG_STADDR_SHIFT	6			/* bits 18:6 */
+#define	TX_RNG_CFIG_STADDR_MASK		0x000000000007FFC0ULL
+#define	TX_RNG_CFIG_ADDR_MASK		0x00000FFFFFFFFFC0ULL
+#define	TX_RNG_CFIG_STADDR_BASE_SHIFT	19			/* bits 43:19 */
+#define	TX_RNG_CFIG_STADDR_BASE_MASK	0x00000FFFFFF80000ULL
+#define	TX_RNG_CFIG_LEN_SHIFT		48			/* bits 60:48 */
+#define	TX_RNG_CFIG_LEN_MASK		0xFFF8000000000000ULL
+
+#define	TX_RNG_HEAD_TAIL_SHIFT		3
+#define	TX_RNG_HEAD_TAIL_WRAP_SHIFT	19
+
+typedef union _tx_rng_cfig_t {
+	uint64_t value;
+	struct {
+#if defined(_BIG_ENDIAN)
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t res2:3;
+			uint32_t len:13;
+			uint32_t res1:4;
+			uint32_t staddr_base:12;
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t staddr_base:12;
+			uint32_t res1:4;
+			uint32_t len:13;
+			uint32_t res2:3;
+#endif
+		} hdw;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t staddr_base:13;
+			uint32_t staddr:13;
+			uint32_t res2:6;
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t res2:6;
+			uint32_t staddr:13;
+			uint32_t staddr_base:13;
+#endif
+		} ldw;
+#ifndef _BIG_ENDIAN
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t res2:3;
+			uint32_t len:13;
+			uint32_t res1:4;
+			uint32_t staddr_base:12;
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t staddr_base:12;
+			uint32_t res1:4;
+			uint32_t len:13;
+			uint32_t res2:3;
+#endif
+		} hdw;
+#endif
+	} bits;
+} tx_rng_cfig_t, *p_tx_rng_cfig_t;
+
+/* Transmit Ring Head Low */
+#define	TX_RING_HDL_SHIFT		3			/* bit 31:3 */
+#define	TX_RING_HDL_MASK		0x00000000FFFFFFF8ULL
+
+typedef union _tx_ring_hdl_t {
+	uint64_t value;
+	struct {
+#if defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t res0:12;
+			uint32_t wrap:1;
+			uint32_t head:16;
+			uint32_t res2:3;
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t res2:3;
+			uint32_t head:16;
+			uint32_t wrap:1;
+			uint32_t res0:12;
+#endif
+		} ldw;
+#ifndef _BIG_ENDIAN
+		uint32_t hdw;
+#endif
+	} bits;
+} tx_ring_hdl_t, *p_tx_ring_hdl_t;
+
+/* Transmit Ring Kick */
+#define	TX_RING_KICK_TAIL_SHIFT		3			/* bit 43:3 */
+#define	TX_RING_KICK_TAIL_MASK		0x000000FFFFFFFFFF8ULL
+
+typedef union _tx_ring_kick_t {
+	uint64_t value;
+	struct {
+#ifdef	_BIG_ENDIAN
+		uint32_t hdw;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t res0:12;
+			uint32_t wrap:1;
+			uint32_t tail:16;
+			uint32_t res2:3;
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t res2:3;
+			uint32_t tail:16;
+			uint32_t wrap:1;
+			uint32_t res0:12;
+#endif
+		} ldw;
+#ifndef _BIG_ENDIAN
+		uint32_t hdw;
+#endif
+	} bits;
+} tx_ring_kick_t, *p_tx_ring_kick_t;
+
+/* Transmit Event Mask (DMC + 0x40020) */
+#define	TX_ENT_MSK_PKT_PRT_ERR_SHIFT		0	/* bit 0: 0 to flag */
+#define	TX_ENT_MSK_PKT_PRT_ERR_MASK		0x0000000000000001ULL
+#define	TX_ENT_MSK_CONF_PART_ERR_SHIFT		1	/* bit 1: 0 to flag */
+#define	TX_ENT_MSK_CONF_PART_ERR_MASK		0x0000000000000002ULL
+#define	TX_ENT_MSK_NACK_PKT_RD_SHIFT		2	/* bit 2: 0 to flag */
+#define	TX_ENT_MSK_NACK_PKT_RD_MASK		0x0000000000000004ULL
+#define	TX_ENT_MSK_NACK_PREF_SHIFT		3	/* bit 3: 0 to flag */
+#define	TX_ENT_MSK_NACK_PREF_MASK		0x0000000000000008ULL
+#define	TX_ENT_MSK_PREF_BUF_ECC_ERR_SHIFT	4	/* bit 4: 0 to flag */
+#define	TX_ENT_MSK_PREF_BUF_ECC_ERR_MASK	0x0000000000000010ULL
+#define	TX_ENT_MSK_TX_RING_OFLOW_SHIFT		5	/* bit 5: 0 to flag */
+#define	TX_ENT_MSK_TX_RING_OFLOW_MASK		0x0000000000000020ULL
+#define	TX_ENT_MSK_PKT_SIZE_ERR_SHIFT		6	/* bit 6: 0 to flag */
+#define	TX_ENT_MSK_PKT_SIZE_ERR_MASK		0x0000000000000040ULL
+#define	TX_ENT_MSK_MBOX_ERR_SHIFT		7	/* bit 7: 0 to flag */
+#define	TX_ENT_MSK_MBOX_ERR_MASK		0x0000000000000080ULL
+#define	TX_ENT_MSK_MK_SHIFT			15	/* bit 15: 0 to flag */
+#define	TX_ENT_MSK_MK_MASK			0x0000000000008000ULL
+#define	TX_ENT_MSK_MK_ALL		(TX_ENT_MSK_PKT_PRT_ERR_MASK | \
+					TX_ENT_MSK_CONF_PART_ERR_MASK |	\
+					TX_ENT_MSK_NACK_PKT_RD_MASK |	\
+					TX_ENT_MSK_NACK_PREF_MASK |	\
+					TX_ENT_MSK_PREF_BUF_ECC_ERR_MASK | \
+					TX_ENT_MSK_TX_RING_OFLOW_MASK |	\
+					TX_ENT_MSK_PKT_SIZE_ERR_MASK | \
+					TX_ENT_MSK_MBOX_ERR_MASK | \
+					TX_ENT_MSK_MK_MASK)
+
+
+typedef union _tx_dma_ent_msk_t {
+	uint64_t value;
+	struct {
+#ifdef	_BIG_ENDIAN
+		uint32_t hdw;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t res1_1:16;
+			uint32_t mk:1;
+			uint32_t res2:7;
+			uint32_t mbox_err:1;
+			uint32_t pkt_size_err:1;
+			uint32_t tx_ring_oflow:1;
+			uint32_t pref_buf_ecc_err:1;
+			uint32_t nack_pref:1;
+			uint32_t nack_pkt_rd:1;
+			uint32_t conf_part_err:1;
+			uint32_t pkt_prt_err:1;
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t pkt_prt_err:1;
+			uint32_t conf_part_err:1;
+			uint32_t nack_pkt_rd:1;
+			uint32_t nack_pref:1;
+			uint32_t pref_buf_ecc_err:1;
+			uint32_t tx_ring_oflow:1;
+			uint32_t pkt_size_err:1;
+			uint32_t mbox_err:1;
+			uint32_t res2:7;
+			uint32_t mk:1;
+			uint32_t res1_1:16;
+#endif
+		} ldw;
+#ifndef _BIG_ENDIAN
+		uint32_t hdw;
+#endif
+	} bits;
+} tx_dma_ent_msk_t, *p_tx_dma_ent_msk_t;
+
+
+/* Transmit Control and Status  (DMC + 0x40028) */
+#define	TX_CS_PKT_PRT_ERR_SHIFT			0	/* RO, bit 0 */
+#define	TX_CS_PKT_PRT_ERR_MASK			0x0000000000000001ULL
+#define	TX_CS_CONF_PART_ERR_SHIF		1	/* RO, bit 1 */
+#define	TX_CS_CONF_PART_ERR_MASK		0x0000000000000002ULL
+#define	TX_CS_NACK_PKT_RD_SHIFT			2	/* RO, bit 2 */
+#define	TX_CS_NACK_PKT_RD_MASK			0x0000000000000004ULL
+#define	TX_CS_PREF_SHIFT			3	/* RO, bit 3 */
+#define	TX_CS_PREF_MASK				0x0000000000000008ULL
+#define	TX_CS_PREF_BUF_PAR_ERR_SHIFT		4	/* RO, bit 4 */
+#define	TX_CS_PREF_BUF_PAR_ERR_MASK		0x0000000000000010ULL
+#define	TX_CS_RING_OFLOW_SHIFT			5	/* RO, bit 5 */
+#define	TX_CS_RING_OFLOW_MASK			0x0000000000000020ULL
+#define	TX_CS_PKT_SIZE_ERR_SHIFT		6	/* RW, bit 6 */
+#define	TX_CS_PKT_SIZE_ERR_MASK			0x0000000000000040ULL
+#define	TX_CS_MMK_SHIFT				14	/* RC, bit 14 */
+#define	TX_CS_MMK_MASK				0x0000000000004000ULL
+#define	TX_CS_MK_SHIFT				15	/* RCW1C, bit 15 */
+#define	TX_CS_MK_MASK				0x0000000000008000ULL
+#define	TX_CS_SNG_SHIFT				27	/* RO, bit 27 */
+#define	TX_CS_SNG_MASK				0x0000000008000000ULL
+#define	TX_CS_STOP_N_GO_SHIFT			28	/* RW, bit 28 */
+#define	TX_CS_STOP_N_GO_MASK			0x0000000010000000ULL
+#define	TX_CS_MB_SHIFT				29	/* RO, bit 29 */
+#define	TX_CS_MB_MASK				0x0000000020000000ULL
+#define	TX_CS_RST_STATE_SHIFT			30	/* Rw, bit 30 */
+#define	TX_CS_RST_STATE_MASK			0x0000000040000000ULL
+#define	TX_CS_RST_SHIFT				31	/* Rw, bit 31 */
+#define	TX_CS_RST_MASK				0x0000000080000000ULL
+#define	TX_CS_LASTMASK_SHIFT			32	/* RW, bit 43:32 */
+#define	TX_CS_LASTMARK_MASK			0x00000FFF00000000ULL
+#define	TX_CS_PKT_CNT_SHIFT			48	/* RW, bit 59:48 */
+#define	TX_CS_PKT_CNT_MASK			0x0FFF000000000000ULL
+
+/* Trasnmit Control and Status */
+typedef union _tx_cs_t {
+	uint64_t value;
+	struct {
+#ifdef	_BIG_ENDIAN
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t res1:4;
+			uint32_t pkt_cnt:12;
+			uint32_t res2:4;
+			uint32_t lastmark:12;
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t lastmark:12;
+			uint32_t res2:4;
+			uint32_t pkt_cnt:12;
+			uint32_t res1:4;
+#endif
+		} hdw;
+
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t rst:1;
+			uint32_t rst_state:1;
+			uint32_t mb:1;
+			uint32_t stop_n_go:1;
+			uint32_t sng_state:1;
+			uint32_t res1:11;
+			uint32_t mk:1;
+			uint32_t mmk:1;
+			uint32_t res2:6;
+			uint32_t mbox_err:1;
+			uint32_t pkt_size_err:1;
+			uint32_t tx_ring_oflow:1;
+			uint32_t pref_buf_par_err:1;
+			uint32_t nack_pref:1;
+			uint32_t nack_pkt_rd:1;
+			uint32_t conf_part_err:1;
+			uint32_t pkt_prt_err:1;
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t pkt_prt_err:1;
+			uint32_t conf_part_err:1;
+			uint32_t nack_pkt_rd:1;
+			uint32_t nack_pref:1;
+			uint32_t pref_buf_par_err:1;
+			uint32_t tx_ring_oflow:1;
+			uint32_t pkt_size_err:1;
+			uint32_t mbox_err:1;
+			uint32_t res2:6;
+			uint32_t mmk:1;
+			uint32_t mk:1;
+			uint32_t res1:11;
+			uint32_t sng_state:1;
+			uint32_t stop_n_go:1;
+			uint32_t mb:1;
+			uint32_t rst_state:1;
+			uint32_t rst:1;
+#endif
+		} ldw;
+#ifndef _BIG_ENDIAN
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t res1:4;
+			uint32_t pkt_cnt:12;
+			uint32_t res2:4;
+			uint32_t lastmark:12;
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t lastmark:12;
+			uint32_t res2:4;
+			uint32_t pkt_cnt:12;
+			uint32_t res1:4;
+#endif
+	} hdw;
+
+#endif
+	} bits;
+} tx_cs_t, *p_tx_cs_t;
+
+/* Trasnmit Mailbox High (DMC + 0x40030) */
+#define	TXDMA_MBH_SHIFT			0	/* bit 11:0 */
+#define	TXDMA_MBH_ADDR_SHIFT		32	/* bit 43:32 */
+#define	TXDMA_MBH_MASK			0x0000000000000FFFULL
+
+typedef union _txdma_mbh_t {
+	uint64_t value;
+	struct {
+#ifdef	_BIG_ENDIAN
+		uint32_t hdw;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t res1_1:20;
+			uint32_t mbaddr:12;
+
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t mbaddr:12;
+			uint32_t res1_1:20;
+#endif
+		} ldw;
+#ifndef _BIG_ENDIAN
+		uint32_t hdw;
+#endif
+	} bits;
+} txdma_mbh_t, *p_txdma_mbh_t;
+
+
+/* Trasnmit Mailbox Low (DMC + 0x40038) */
+#define	TXDMA_MBL_SHIFT			6	/* bit 31:6 */
+#define	TXDMA_MBL_MASK			0x00000000FFFFFFC0ULL
+
+typedef union _txdma_mbl_t {
+	uint64_t value;
+	struct {
+#ifdef	_BIG_ENDIAN
+		uint32_t hdw;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t mbaddr:26;
+			uint32_t res2:6;
+
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t res2:6;
+			uint32_t mbaddr:26;
+#endif
+		} ldw;
+#ifndef _BIG_ENDIAN
+		uint32_t hdw;
+#endif
+	} bits;
+} txdma_mbl_t, *p_txdma_mbl_t;
+
+/* Trasnmit Prefetch State High (DMC + 0x40040) */
+#define	TX_DMA_PREF_ST_SHIFT		0	/* bit 5:0 */
+#define	TX_DMA_PREF_ST_MASK		0x000000000000003FULL
+
+typedef union _tx_dma_pre_st_t {
+	uint64_t value;
+	struct {
+#ifdef	_BIG_ENDIAN
+		uint32_t hdw;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t res1_1:13;
+			uint32_t shadow_hd:19;
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t shadow_hd:19;
+			uint32_t res1_1:13;
+#endif
+		} ldw;
+#ifndef _BIG_ENDIAN
+		uint32_t hdw;
+#endif
+	} bits;
+} tx_dma_pre_st_t, *p_tx_dma_pre_st_t;
+
+/* Trasnmit Ring Error Log High (DMC + 0x40048) */
+#define	TX_RNG_ERR_LOGH_ERR_ADDR_SHIFT		0	/* RO bit 11:0 */
+#define	TX_RNG_ERR_LOGH_ERR_ADDR_MASK		0x0000000000000FFFULL
+#define	TX_RNG_ERR_LOGH_ADDR_SHIFT		32
+#define	TX_RNG_ERR_LOGH_ERRCODE_SHIFT		26	/* RO bit 29:26 */
+#define	TX_RNG_ERR_LOGH_ERRCODE_MASK		0x000000003C000000ULL
+#define	TX_RNG_ERR_LOGH_MERR_SHIFT		30	/* RO bit 30 */
+#define	TX_RNG_ERR_LOGH_MERR_MASK		0x0000000040000000ULL
+#define	TX_RNG_ERR_LOGH_ERR_SHIFT		31	/* RO bit 31 */
+#define	TX_RNG_ERR_LOGH_ERR_MASK		0x0000000080000000ULL
+
+/* Transmit Ring Error codes */
+#define	TXDMA_RING_PKT_PRT_ERR			0
+#define	TXDMA_RING_CONF_PART_ERR		0x01
+#define	TXDMA_RING_NACK_PKT_ERR			0x02
+#define	TXDMA_RING_NACK_PREF_ERR		0x03
+#define	TXDMA_RING_PREF_BUF_PAR_ERR		0x04
+#define	TXDMA_RING_TX_RING_OFLOW_ERR		0x05
+#define	TXDMA_RING_PKT_SIZE_ERR			0x06
+
+typedef union _tx_rng_err_logh_t {
+	uint64_t value;
+	struct {
+#ifdef	_BIG_ENDIAN
+		uint32_t hdw;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t err:1;
+			uint32_t merr:1;
+			uint32_t errcode:4;
+			uint32_t res2:14;
+			uint32_t err_addr:12;
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t err_addr:12;
+			uint32_t res2:14;
+			uint32_t errcode:4;
+			uint32_t merr:1;
+			uint32_t err:1;
+
+#endif
+		} ldw;
+#ifndef _BIG_ENDIAN
+		uint32_t hdw;
+#endif
+	} bits;
+} tx_rng_err_logh_t, *p_tx_rng_err_logh_t;
+
+
+/* Trasnmit Ring Error Log Log (DMC + 0x40050) */
+#define	TX_RNG_ERR_LOGL_ERR_ADDR_SHIFT		0	/* RO bit 31:0 */
+#define	TX_RNG_ERR_LOGL_ERR_ADDR_MASK		0x00000000FFFFFFFFULL
+
+typedef union _tx_rng_err_logl_t {
+	uint64_t value;
+	struct {
+#ifdef	_BIG_ENDIAN
+		uint32_t hdw;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t err_addr:32;
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t err_addr:32;
+
+#endif
+		} ldw;
+#ifndef _BIG_ENDIAN
+		uint32_t hdw;
+#endif
+	} bits;
+} tx_rng_err_logl_t, *p_tx_rng_err_logl_t;
+
+/*
+ * TDMC_INTR_RBG_REG (DMC + 0x40060)
+ */
+typedef union _tdmc_intr_dbg_t {
+	uint64_t value;
+	struct {
+#ifdef	_BIG_ENDIAN
+		uint32_t hdw;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t res:16;
+			uint32_t mk:1;
+			uint32_t rsvd:7;
+			uint32_t mbox_err:1;
+			uint32_t pkt_size_err:1;
+			uint32_t tx_ring_oflow:1;
+			uint32_t pref_buf_par_err:1;
+			uint32_t nack_pref:1;
+			uint32_t nack_pkt_rd:1;
+			uint32_t conf_part_err:1;
+			uint32_t pkt_part_err:1;
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t pkt_part_err:1;
+			uint32_t conf_part_err:1;
+			uint32_t nack_pkt_rd:1;
+			uint32_t nack_pref:1;
+			uint32_t pref_buf_par_err:1;
+			uint32_t tx_ring_oflow:1;
+			uint32_t pkt_size_err:1;
+			uint32_t mbox_err:1;
+			uint32_t rsvd:7;
+			uint32_t mk:1;
+			uint32_t res:16;
+#endif
+		} ldw;
+#ifndef _BIG_ENDIAN
+		uint32_t hdw;
+#endif
+	} bits;
+} tdmc_intr_dbg_t, *p_tdmc_intr_dbg_t;
+
+
+/*
+ * TX_CS_DBG (DMC + 0x40068)
+ */
+typedef union _tx_cs_dbg_t {
+	uint64_t value;
+	struct {
+#ifdef	_BIG_ENDIAN
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t res1:4;
+			uint32_t pkt_cnt:12;
+			uint32_t res2:16;
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t res2:16;
+			uint32_t pkt_cnt:12;
+			uint32_t res1:4;
+#endif
+		} hdw;
+
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t rsvd:32;
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t rsvd:32;
+
+#endif
+		} ldw;
+
+#ifndef _BIG_ENDIAN
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t res1:4;
+			uint32_t pkt_cnt:12;
+			uint32_t res2:16;
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t res2:16;
+			uint32_t pkt_cnt:12;
+			uint32_t res1:4;
+#endif
+	} hdw;
+
+#endif
+	} bits;
+} tx_cs_dbg_t, *p_tx_cs_dbg_t;
+
+#define	TXDMA_MAILBOX_BYTE_LENGTH		64
+#define	TXDMA_MAILBOX_UNUSED			24
+
+typedef struct _txdma_mailbox_t {
+	tx_cs_t			tx_cs;				/* 8 bytes */
+	tx_dma_pre_st_t		tx_dma_pre_st;			/* 8 bytes */
+	tx_ring_hdl_t		tx_ring_hdl;			/* 8 bytes */
+	tx_ring_kick_t		tx_ring_kick;			/* 8 bytes */
+	uint32_t		tx_rng_err_logh;		/* 4 bytes */
+	uint32_t		tx_rng_err_logl;		/* 4 bytes */
+	uint32_t		resv[TXDMA_MAILBOX_UNUSED];
+} txdma_mailbox_t, *p_txdma_mailbox_t;
+
+#if OLD
+/* Transmit Ring Scheduler (per port) */
+#define	TX_DMA_MAP_OFFSET(port)		(port * 8 + TX_DMA_MAP_REG)
+#define	TX_DMA_MAP_PORT_OFFSET(port)	(port * 8)
+#define	TX_DMA_MAP_REG			(FZC_DMC + 0x50000)
+#define	TX_DMA_MAP0_REG			(FZC_DMC + 0x50000)
+#define	TX_DMA_MAP1_REG			(FZC_DMC + 0x50008)
+#define	TX_DMA_MAP2_REG			(FZC_DMC + 0x50010)
+#define	TX_DMA_MAP3_REG			(FZC_DMC + 0x50018)
+
+#define	TX_DMA_MAP_SHIFT		0	/* RO bit 31:0 */
+#define	TX_DMA_MAPMASK			0x00000000FFFFFFFFULL
+
+typedef union _tx_dma_map_t {
+	uint64_t value;
+	struct {
+#ifdef	_BIG_ENDIAN
+		uint32_t hdw;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t bind:32;
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t bind:32;
+
+#endif
+		} ldw;
+#ifndef _BIG_ENDIAN
+		uint32_t hdw;
+#endif
+	} bits;
+} tx_dma_map_t, *p_tx_dma_map_t;
+#endif
+
+#if OLD
+/* Transmit Ring Scheduler: DRR Weight (32 Channels) */
+#define	DRR_WT_REG			(FZC_DMC + 0x51000)
+#define	DRR_WT_SHIFT			0	/* RO bit 19:0 */
+#define	DRR_WT_MASK			0x00000000000FFFFFULL
+
+#define	TXDMA_DRR_RNG_USE_OFFSET(channel)	(channel * 16)
+
+typedef union _drr_wt_t {
+	uint64_t value;
+	struct {
+#ifdef	_BIG_ENDIAN
+		uint32_t hdw;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t res1_1:12;
+			uint32_t wt:20;
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t wt:20;
+			uint32_t res1_1:12;
+#endif
+		} ldw;
+#ifndef _BIG_ENDIAN
+		uint32_t hdw;
+#endif
+	} bits;
+} drr_wt_t, *p_drr_wt_t;
+#endif
+
+#if OLD
+
+/* Performance Monitoring (32 Channels) */
+#define	TXRNG_USE_REG			(FZC_DMC + 0x51008)
+#define	TXRNG_USE_CNT_SHIFT		0	/* RO bit 26:0 */
+#define	TXRNG_USE_CNT_MASK		0x0000000007FFFFFFULL
+#define	TXRNG_USE_OFLOW_SHIFT		0	/* RO bit 27 */
+#define	TXRNG_USE_OFLOW_MASK		0x0000000008000000ULL
+
+typedef union _txrng_use_t {
+	uint64_t value;
+	struct {
+#ifdef	_BIG_ENDIAN
+		uint32_t hdw;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t res1_1:4;
+			uint32_t oflow:1;
+			uint32_t cnt:27;
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t cnt:27;
+			uint32_t oflow:1;
+			uint32_t res1_1:4;
+
+#endif
+		} ldw;
+#ifndef _BIG_ENDIAN
+		uint32_t hdw;
+#endif
+	} bits;
+} txrng_use_t, *p_txrng_use_t;
+
+#endif
+
+/*
+ * Internal Transmit Packet Format (16 bytes)
+ */
+#define	TX_PKT_HEADER_SIZE			16
+#define	TX_MAX_GATHER_POINTERS			15
+#define	TX_GATHER_POINTERS_THRESHOLD		8
+/*
+ * There is bugs in the hardware
+ * and max sfter len is changed from 4096 to 4076.
+ *
+ * Jumbo from 9500 to 9216
+ */
+#define	TX_MAX_TRANSFER_LENGTH			4076
+#define	TX_JUMBO_MTU				9216
+
+#define	TX_PKT_HEADER_PAD_SHIFT			0	/* bit 2:0 */
+#define	TX_PKT_HEADER_PAD_MASK			0x0000000000000007ULL
+#define	TX_PKT_HEADER_TOT_XFER_LEN_SHIFT	16	/* bit 16:29 */
+#define	TX_PKT_HEADER_TOT_XFER_LEN_MASK		0x000000000000FFF8ULL
+#define	TX_PKT_HEADER_L4STUFF_SHIFT		32	/* bit 37:32 */
+#define	TX_PKT_HEADER_L4STUFF_MASK		0x0000003F00000000ULL
+#define	TX_PKT_HEADER_L4START_SHIFT		40	/* bit 45:40 */
+#define	TX_PKT_HEADER_L4START_MASK		0x00003F0000000000ULL
+#define	TX_PKT_HEADER_L3START_SHIFT		48	/* bit 45:40 */
+#define	TX_PKT_HEADER_IHL_SHIFT			52	/* bit 52 */
+#define	TX_PKT_HEADER_VLAN__SHIFT		56	/* bit 56 */
+#define	TX_PKT_HEADER_TCP_UDP_CRC32C_SHIFT	57	/* bit 57 */
+#define	TX_PKT_HEADER_LLC_SHIFT			57	/* bit 57 */
+#define	TX_PKT_HEADER_TCP_UDP_CRC32C_SET	0x0200000000000000ULL
+#define	TX_PKT_HEADER_TCP_UDP_CRC32C_MASK	0x0200000000000000ULL
+#define	TX_PKT_HEADER_L4_PROTO_OP_SHIFT		2	/* bit 59:58 */
+#define	TX_PKT_HEADER_L4_PROTO_OP_MASK		0x0C00000000000000ULL
+#define	TX_PKT_HEADER_V4_HDR_CS_SHIFT		60	/* bit 60 */
+#define	TX_PKT_HEADER_V4_HDR_CS_SET		0x1000000000000000ULL
+#define	TX_PKT_HEADER_V4_HDR_CS_MASK		0x1000000000000000ULL
+#define	TX_PKT_HEADER_IP_VER_SHIFT		61	/* bit 61 */
+#define	TX_PKT_HEADER_IP_VER_MASK		0x2000000000000000ULL
+#define	TX_PKT_HEADER_PKT_TYPE_SHIFT		62	/* bit 62 */
+#define	TX_PKT_HEADER_PKT_TYPE_MASK		0x4000000000000000ULL
+
+/* L4 Prototol Operations */
+#define	TX_PKT_L4_PROTO_OP_NOP			0x00
+#define	TX_PKT_L4_PROTO_OP_FULL_L4_CSUM		0x01
+#define	TX_PKT_L4_PROTO_OP_L4_PAYLOAD_CSUM	0x02
+#define	TX_PKT_L4_PROTO_OP_SCTP_CRC32		0x04
+
+/* Transmit Packet Types */
+#define	TX_PKT_PKT_TYPE_NOP			0x00
+#define	TX_PKT_PKT_TYPE_TCP			0x01
+#define	TX_PKT_PKT_TYPE_UDP			0x02
+#define	TX_PKT_PKT_TYPE_SCTP			0x03
+
+typedef union _tx_pkt_header_t {
+	uint64_t value;
+	struct {
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t pad:3;
+			uint32_t resv2:13;
+			uint32_t tot_xfer_len:14;
+			uint32_t resv1:2;
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t pad:3;
+			uint32_t resv2:13;
+			uint32_t tot_xfer_len:14;
+			uint32_t resv1:2;
+#endif
+		} ldw;
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t l4stuff:6;
+			uint32_t resv3:2;
+			uint32_t l4start:6;
+			uint32_t resv2:2;
+			uint32_t l3start:4;
+			uint32_t ihl:4;
+			uint32_t vlan:1;
+			uint32_t llc:1;
+			uint32_t res1:3;
+			uint32_t ip_ver:1;
+			uint32_t cksum_en_pkt_type:2;
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t l4stuff:6;
+			uint32_t resv3:2;
+			uint32_t l4start:6;
+			uint32_t resv2:2;
+			uint32_t l3start:4;
+			uint32_t ihl:4;
+			uint32_t vlan:1;
+			uint32_t llc:1;
+			uint32_t res1:3;
+			uint32_t ip_ver:1;
+			uint32_t cksum_en_pkt_type:2;
+#endif
+		} hdw;
+	} bits;
+} tx_pkt_header_t, *p_tx_pkt_header_t;
+
+typedef struct _tx_pkt_hdr_all_t {
+	tx_pkt_header_t		pkthdr;
+	uint64_t		reserved;
+} tx_pkt_hdr_all_t, *p_tx_pkt_hdr_all_t;
+
+/* Debug only registers */
+#define	TDMC_INJ_PAR_ERR_REG		(FZC_DMC + 0x45040)
+#define	TDMC_INJ_PAR_ERR_MASK		0x0000000000FFFFFFULL
+#define	TDMC_INJ_PAR_ERR_MASK_N2	0x000000000000FFFFULL
+
+typedef union _tdmc_inj_par_err_t {
+	uint64_t value;
+	struct {
+#ifdef	_BIG_ENDIAN
+		uint32_t hdw;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t rsvc:8;
+			uint32_t inject_parity_error:24;
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t inject_parity_error:24;
+			uint32_t rsvc:8;
+#endif
+		} ldw;
+#ifndef _BIG_ENDIAN
+		uint32_t hdw;
+#endif
+	} bits;
+} tdmc_inj_par_err_t, *p_tdmc_inj_par_err_t;
+
+typedef union _tdmc_inj_par_err_n2_t {
+	uint64_t value;
+	struct {
+#ifdef	_BIG_ENDIAN
+		uint32_t hdw;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t rsvc:16;
+			uint32_t inject_parity_error:16;
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t inject_parity_error:16;
+			uint32_t rsvc:16;
+#endif
+		} ldw;
+#ifndef _BIG_ENDIAN
+		uint32_t hdw;
+#endif
+	} bits;
+} tdmc_inj_par_err_n2_t, *p_tdmc_inj_par_err_n2_t;
+
+#define	TDMC_DBG_SEL_REG		(FZC_DMC + 0x45080)
+#define	TDMC_DBG_SEL_MASK		0x000000000000003FULL
+
+typedef union _tdmc_dbg_sel_t {
+	uint64_t value;
+	struct {
+#ifdef	_BIG_ENDIAN
+		uint32_t hdw;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t rsvc:26;
+			uint32_t dbg_sel:6;
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t dbg_sel:6;
+			uint32_t rsvc:26;
+#endif
+		} ldw;
+#ifndef _BIG_ENDIAN
+		uint32_t hdw;
+#endif
+	} bits;
+} tdmc_dbg_sel_t, *p_tdmc_dbg_sel_t;
+
+#define	TDMC_TRAINING_REG		(FZC_DMC + 0x45088)
+#define	TDMC_TRAINING_MASK		0x00000000FFFFFFFFULL
+
+typedef union _tdmc_training_t {
+	uint64_t value;
+	struct {
+#ifdef	_BIG_ENDIAN
+		uint32_t hdw;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t vec:32;
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t vec:6;
+#endif
+		} ldw;
+#ifndef _BIG_ENDIAN
+		uint32_t hdw;
+#endif
+	} bits;
+} tdmc_training_t, *p_tdmc_training_t;
+
+#ifdef	__cplusplus
+}
+#endif
+
+#endif	/* _SYS_NXGE_NXGE_TXDMA_HW_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/uts/common/sys/nxge/nxge_virtual.h	Mon Mar 19 19:37:22 2007 -0700
@@ -0,0 +1,80 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef	_SYS_NXGE_NXGE_VIRTUAL_H
+#define	_SYS_NXGE_NXGE_VIRTUAL_H
+
+#pragma ident	"%Z%%M%	%I%	%E% SMI"
+
+#ifdef	__cplusplus
+extern "C" {
+#endif
+
+/*
+ * Neptune Virtualization Control Operations
+ */
+typedef enum {
+	NXGE_CTLOPS_NIUTYPE,
+	NXGE_CTLOPS_GET_ATTRIBUTES,
+	NXGE_CTLOPS_GET_HWPROPERTIES,
+	NXGE_CTLOPS_SET_HWPROPERTIES,
+	NXGE_CTLOPS_GET_SHARED_REG,
+	NXGE_CTLOPS_SET_SHARED_REG,
+	NXGE_CTLOPS_UPDATE_SHARED_REG,
+	NXGE_CTLOPS_GET_LOCK_BLOCK,
+	NXGE_CTLOPS_GET_LOCK_TRY,
+	NXGE_CTLOPS_FREE_LOCK,
+	NXGE_CTLOPS_SET_SHARED_REG_LOCK,
+	NXGE_CTLOPS_CLEAR_BIT_SHARED_REG,
+	NXGE_CTLOPS_CLEAR_BIT_SHARED_REG_UL,
+	NXGE_CTLOPS_END
+} nxge_ctl_enum_t;
+
+/* 12 bits are available */
+#define	COMMON_CFG_VALID	0x01
+#define	COMMON_CFG_BUSY	0x02
+#define	COMMON_INIT_START	0x04
+#define	COMMON_INIT_DONE	0x08
+#define	COMMON_TCAM_BUSY	0x10
+#define	COMMON_VLAN_BUSY	0x20
+
+#define	NXGE_SR_FUNC_BUSY_SHIFT	0x8
+#define	NXGE_SR_FUNC_BUSY_MASK	0xf00
+
+
+#define	COMMON_TXDMA_CFG	1
+#define	COMMON_RXDMA_CFG	2
+#define	COMMON_RXDMA_GRP_CFG	4
+#define	COMMON_CLASS_CFG	8
+#define	COMMON_QUICK_CFG	0x10
+
+nxge_status_t nxge_intr_mask_mgmt(p_nxge_t nxgep);
+void nxge_virint_regs_dump(p_nxge_t nxgep);
+
+#ifdef	__cplusplus
+}
+#endif
+
+#endif	/* _SYS_NXGE_NXGE_VIRTUAL_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/uts/common/sys/nxge/nxge_zcp.h	Mon Mar 19 19:37:22 2007 -0700
@@ -0,0 +1,75 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef	_SYS_NXGE_NXGE_ZCP_H
+#define	_SYS_NXGE_NXGE_ZCP_H
+
+#pragma ident	"%Z%%M%	%I%	%E% SMI"
+
+#ifdef	__cplusplus
+extern "C" {
+#endif
+
+#include <nxge_zcp_hw.h>
+#include <npi_zcp.h>
+
+typedef	struct _zcp_errlog {
+	zcp_state_machine_t	state_mach;
+} zcp_errlog_t, *p_zcp_errlog_t;
+
+typedef struct _nxge_zcp_stats_t {
+	uint32_t 		errors;
+	uint32_t 		inits;
+	uint32_t 		rrfifo_underrun;
+	uint32_t 		rrfifo_overrun;
+	uint32_t 		rspfifo_uncorr_err;
+	uint32_t 		buffer_overflow;
+	uint32_t 		stat_tbl_perr;
+	uint32_t 		dyn_tbl_perr;
+	uint32_t 		buf_tbl_perr;
+	uint32_t 		tt_program_err;
+	uint32_t 		rsp_tt_index_err;
+	uint32_t 		slv_tt_index_err;
+	uint32_t 		zcp_tt_index_err;
+	uint32_t 		zcp_access_fail;
+	uint32_t 		cfifo_ecc;
+	zcp_errlog_t		errlog;
+} nxge_zcp_stats_t, *p_nxge_zcp_stats_t;
+
+typedef	struct _nxge_zcp {
+	uint32_t		config;
+	uint32_t		iconfig;
+	nxge_zcp_stats_t	*stat;
+} nxge_zcp_t;
+
+nxge_status_t nxge_zcp_init(p_nxge_t nxgep);
+void nxge_zcp_inject_err(p_nxge_t nxgep, uint32_t);
+nxge_status_t nxge_zcp_fatal_err_recover(p_nxge_t nxgep);
+
+#ifdef	__cplusplus
+}
+#endif
+
+#endif	/* _SYS_NXGE_NXGE_ZCP_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/uts/common/sys/nxge/nxge_zcp_hw.h	Mon Mar 19 19:37:22 2007 -0700
@@ -0,0 +1,771 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef	_SYS_NXGE_NXGE_ZCP_HW_H
+#define	_SYS_NXGE_NXGE_ZCP_HW_H
+
+#pragma ident	"%Z%%M%	%I%	%E% SMI"
+
+#ifdef	__cplusplus
+extern "C" {
+#endif
+
+#include <nxge_defs.h>
+
+/*
+ * Neptune Zerocopy Hardware definitions
+ * Updated to reflect PRM-0.8.
+ */
+
+#define	ZCP_CONFIG_REG		(FZC_ZCP + 0x00000)
+#define	ZCP_INT_STAT_REG	(FZC_ZCP + 0x00008)
+#define	ZCP_INT_STAT_TEST_REG	(FZC_ZCP + 0x00108)
+#define	ZCP_INT_MASK_REG	(FZC_ZCP + 0x00010)
+
+#define	ZCP_BAM4_RE_CTL_REG 	(FZC_ZCP + 0x00018)
+#define	ZCP_BAM8_RE_CTL_REG 	(FZC_ZCP + 0x00020)
+#define	ZCP_BAM16_RE_CTL_REG 	(FZC_ZCP + 0x00028)
+#define	ZCP_BAM32_RE_CTL_REG 	(FZC_ZCP + 0x00030)
+
+#define	ZCP_DST4_RE_CTL_REG 	(FZC_ZCP + 0x00038)
+#define	ZCP_DST8_RE_CTL_REG 	(FZC_ZCP + 0x00040)
+#define	ZCP_DST16_RE_CTL_REG 	(FZC_ZCP + 0x00048)
+#define	ZCP_DST32_RE_CTL_REG 	(FZC_ZCP + 0x00050)
+
+#define	ZCP_RAM_DATA_REG	(FZC_ZCP + 0x00058)
+#define	ZCP_RAM_DATA0_REG	(FZC_ZCP + 0x00058)
+#define	ZCP_RAM_DATA1_REG	(FZC_ZCP + 0x00060)
+#define	ZCP_RAM_DATA2_REG	(FZC_ZCP + 0x00068)
+#define	ZCP_RAM_DATA3_REG	(FZC_ZCP + 0x00070)
+#define	ZCP_RAM_DATA4_REG	(FZC_ZCP + 0x00078)
+#define	ZCP_RAM_BE_REG		(FZC_ZCP + 0x00080)
+#define	ZCP_RAM_ACC_REG		(FZC_ZCP + 0x00088)
+
+#define	ZCP_TRAINING_VECTOR_REG	(FZC_ZCP + 0x000C0)
+#define	ZCP_STATE_MACHINE_REG	(FZC_ZCP + 0x000C8)
+#define	ZCP_CHK_BIT_DATA_REG	(FZC_ZCP + 0x00090)
+#define	ZCP_RESET_CFIFO_REG	(FZC_ZCP + 0x00098)
+#define	ZCP_RESET_CFIFO_MASK	0x0F
+
+#define	ZCP_CFIFIO_RESET_WAIT		10
+#define	ZCP_P0_P1_CFIFO_DEPTH		2048
+#define	ZCP_P2_P3_CFIFO_DEPTH		1024
+#define	ZCP_NIU_CFIFO_DEPTH		1024
+
+typedef union _zcp_reset_cfifo {
+	uint64_t value;
+	struct {
+#if defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t rsrvd:28;
+			uint32_t reset_cfifo3:1;
+			uint32_t reset_cfifo2:1;
+			uint32_t reset_cfifo1:1;
+			uint32_t reset_cfifo0:1;
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t reset_cfifo0:1;
+			uint32_t reset_cfifo1:1;
+			uint32_t reset_cfifo2:1;
+			uint32_t reset_cfifo3:1;
+			uint32_t rsrvd:28;
+#endif
+		} ldw;
+#if !defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+	} bits;
+} zcp_reset_cfifo_t, *p_zcp_reset_cfifo_t;
+
+#define	ZCP_CFIFO_ECC_PORT0_REG	(FZC_ZCP + 0x000A0)
+#define	ZCP_CFIFO_ECC_PORT1_REG	(FZC_ZCP + 0x000A8)
+#define	ZCP_CFIFO_ECC_PORT2_REG	(FZC_ZCP + 0x000B0)
+#define	ZCP_CFIFO_ECC_PORT3_REG	(FZC_ZCP + 0x000B8)
+
+/* NOTE: Same as RX_LOG_PAGE_HDL */
+#define	ZCP_PAGE_HDL_REG	(FZC_DMC + 0x20038)
+
+/* Data Structures */
+
+typedef union zcp_config_reg_u {
+	uint64_t value;
+	struct {
+#if defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t rsvd:7;
+			uint32_t mode_32_bit:1;
+			uint32_t debug_sel:8;
+			uint32_t rdma_th:11;
+			uint32_t ecc_chk_dis:1;
+			uint32_t par_chk_dis:1;
+			uint32_t dis_buf_rn:1;
+			uint32_t dis_buf_rq_if:1;
+			uint32_t zc_enable:1;
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t zc_enable:1;
+			uint32_t dis_buf_rq_if:1;
+			uint32_t dis_buf_rn:1;
+			uint32_t par_chk_dis:1;
+			uint32_t ecc_chk_dis:1;
+			uint32_t rdma_th:11;
+			uint32_t debug_sel:8;
+			uint32_t mode_32_bit:1;
+			uint32_t rsvd:7;
+#endif
+		} ldw;
+#if !defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+	} bits;
+} zcp_config_reg_t, *zcp_config_reg_pt;
+
+#define	ZCP_DEBUG_SEL_BITS	0xFF
+#define	ZCP_DEBUG_SEL_SHIFT	16
+#define	ZCP_DEBUG_SEL_MASK	(ZCP_DEBUG_SEL_BITS << ZCP_DEBUG_SEL_SHIFT)
+#define	RDMA_TH_BITS		0x7FF
+#define	RDMA_TH_SHIFT		5
+#define	RDMA_TH_MASK		(RDMA_TH_BITS << RDMA_TH_SHIFT)
+#define	ECC_CHK_DIS		(1 << 4)
+#define	PAR_CHK_DIS		(1 << 3)
+#define	DIS_BUFF_RN		(1 << 2)
+#define	DIS_BUFF_RQ_IF		(1 << 1)
+#define	ZC_ENABLE		(1 << 0)
+
+typedef union zcp_int_stat_reg_u {
+	uint64_t value;
+	struct {
+#if defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t rsvd:16;
+			uint32_t rrfifo_urun:1;
+			uint32_t rrfifo_orun:1;
+			uint32_t rsvd1:1;
+			uint32_t rspfifo_uc_err:1;
+			uint32_t buf_overflow:1;
+			uint32_t stat_tbl_perr:1;
+			uint32_t dyn_tbl_perr:1;
+			uint32_t buf_tbl_perr:1;
+			uint32_t tt_tbl_perr:1;
+			uint32_t rsp_tt_index_err:1;
+			uint32_t slv_tt_index_err:1;
+			uint32_t zcp_tt_index_err:1;
+			uint32_t cfifo_ecc3:1;
+			uint32_t cfifo_ecc2:1;
+			uint32_t cfifo_ecc1:1;
+			uint32_t cfifo_ecc0:1;
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t cfifo_ecc0:1;
+			uint32_t cfifo_ecc1:1;
+			uint32_t cfifo_ecc2:1;
+			uint32_t cfifo_ecc3:1;
+			uint32_t zcp_tt_index_err:1;
+			uint32_t slv_tt_index_err:1;
+			uint32_t rsp_tt_index_err:1;
+			uint32_t tt_tbl_perr:1;
+			uint32_t buf_tbl_perr:1;
+			uint32_t dyn_tbl_perr:1;
+			uint32_t stat_tbl_perr:1;
+			uint32_t buf_overflow:1;
+			uint32_t rspfifo_uc_err:1;
+			uint32_t rsvd1:1;
+			uint32_t rrfifo_orun:1;
+			uint32_t rrfifo_urun:1;
+			uint32_t rsvd:16;
+#endif
+		} ldw;
+#if !defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+	} bits;
+} zcp_int_stat_reg_t, *zcp_int_stat_reg_pt, zcp_int_mask_reg_t,
+	*zcp_int_mask_reg_pt;
+
+#define	RRFIFO_UNDERRUN		(1 << 15)
+#define	RRFIFO_OVERRUN		(1 << 14)
+#define	RSPFIFO_UNCORR_ERR	(1 << 12)
+#define	BUFFER_OVERFLOW		(1 << 11)
+#define	STAT_TBL_PERR		(1 << 10)
+#define	BUF_DYN_TBL_PERR	(1 << 9)
+#define	BUF_TBL_PERR		(1 << 8)
+#define	TT_PROGRAM_ERR		(1 << 7)
+#define	RSP_TT_INDEX_ERR	(1 << 6)
+#define	SLV_TT_INDEX_ERR	(1 << 5)
+#define	ZCP_TT_INDEX_ERR	(1 << 4)
+#define	CFIFO_ECC3		(1 << 3)
+#define	CFIFO_ECC0		(1 << 0)
+#define	CFIFO_ECC2		(1 << 2)
+#define	CFIFO_ECC1		(1 << 1)
+
+typedef union zcp_bam_region_reg_u {
+	uint64_t value;
+	struct {
+#if defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t loj:1;
+			uint32_t range_chk_en:1;
+			uint32_t last_zcfid:10;
+			uint32_t first_zcfid:10;
+			uint32_t offset:10;
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t offset:10;
+			uint32_t first_zcfid:10;
+			uint32_t last_zcfid:10;
+			uint32_t range_chk_en:1;
+			uint32_t loj:1;
+#endif
+		} ldw;
+#if !defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+	} bits;
+} zcp_bam_region_reg_t, *zcp_bam_region_reg_pt;
+
+typedef union zcp_dst_region_reg_u {
+	uint64_t value;
+	struct {
+#if defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t rsvd:22;
+			uint32_t ds_offset:10;
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t rsvd:22;
+			uint32_t ds_offset:10;
+#endif
+		} ldw;
+#if !defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+	} bits;
+} zcp_dst_region_reg_t, *zcp_dst_region_reg_pt;
+
+typedef	enum tbuf_size_e {
+	TBUF_4K		= 0,
+	TBUF_8K,
+	TBUF_16K,
+	TBUF_32K,
+	TBUF_64K,
+	TBUF_128K,
+	TBUF_256K,
+	TBUF_512K,
+	TBUF_1M,
+	TBUF_2M,
+	TBUF_4M,
+	TBUF_8M
+} tbuf_size_t;
+
+typedef	enum tbuf_num_e {
+	TBUF_NUM_4	= 0,
+	TBUF_NUM_8,
+	TBUF_NUM_16,
+	TBUF_NUM_32
+} tbuf_num_t;
+
+typedef	enum tmode_e {
+	TMODE_BASIC		= 0,
+	TMODE_AUTO_UNMAP	= 1,
+	TMODE_AUTO_ADV		= 3
+} tmode_t;
+
+typedef	struct tte_sflow_attr_s {
+	union {
+		uint64_t value;
+		struct {
+#if defined(_BIG_ENDIAN)
+			uint32_t hdw;
+#endif
+			struct {
+#if defined(_BIT_FIELDS_HTOL)
+				uint32_t ulp_end:18;
+				uint32_t num_buf:2;
+				uint32_t buf_size:4;
+				uint32_t rdc_tbl_offset:8;
+#elif defined(_BIT_FIELDS_LTOH)
+				uint32_t rdc_tbl_offset:8;
+				uint32_t buf_size:4;
+				uint32_t num_buf:2;
+				uint32_t ulp_end:18;
+#endif
+			} ldw;
+#if !defined(_BIG_ENDIAN)
+			uint32_t hdw;
+#endif
+		} bits;
+	} qw0;
+
+	union {
+		uint64_t value;
+		struct {
+#if defined(_BIG_ENDIAN)
+			uint32_t hdw;
+#endif
+			struct {
+#if defined(_BIT_FIELDS_HTOL)
+				uint32_t ring_base:12;
+				uint32_t skip:1;
+				uint32_t rsvd:1;
+				uint32_t tmode:2;
+				uint32_t unmap_all_en:1;
+				uint32_t ulp_end_en:1;
+				uint32_t ulp_end:14;
+#elif defined(_BIT_FIELDS_LTOH)
+				uint32_t ulp_end:14;
+				uint32_t ulp_end_en:1;
+				uint32_t unmap_all_en:1;
+				uint32_t tmode:2;
+				uint32_t rsvd:1;
+				uint32_t skip:1;
+				uint32_t ring_base:12;
+#endif
+			} ldw;
+#if !defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+		} bits;
+	} qw1;
+
+	union {
+		uint64_t value;
+		struct {
+#if defined(_BIG_ENDIAN)
+			uint32_t hdw;
+#endif
+			struct {
+#if defined(_BIT_FIELDS_HTOL)
+				uint32_t busy:1;
+				uint32_t ring_size:4;
+				uint32_t ring_base:27;
+#elif defined(_BIT_FIELDS_LTOH)
+				uint32_t ring_base:27;
+				uint32_t ring_size:4;
+				uint32_t busy:1;
+#endif
+			} ldw;
+#if !defined(_BIG_ENDIAN)
+			uint32_t hdw;
+#endif
+		} bits;
+	} qw2;
+
+	union {
+		uint64_t value;
+		struct {
+#if defined(_BIG_ENDIAN)
+			uint32_t hdw;
+#endif
+			struct {
+#if defined(_BIT_FIELDS_HTOL)
+				uint32_t rsvd:16;
+				uint32_t toq:16;
+#elif defined(_BIT_FIELDS_LTOH)
+				uint32_t toq:16;
+				uint32_t rsvd:16;
+#endif
+			} ldw;
+#if !defined(_BIG_ENDIAN)
+			uint32_t hdw;
+#endif
+		} bits;
+	} qw3;
+
+	union {
+		uint64_t value;
+		struct {
+#if defined(_BIG_ENDIAN)
+			uint32_t hdw;
+#endif
+			struct {
+#if defined(_BIT_FIELDS_HTOL)
+				uint32_t rsvd:28;
+				uint32_t dat4:4;
+#elif defined(_BIT_FIELDS_LTOH)
+				uint32_t dat4:4;
+				uint32_t rsvd:28;
+#endif
+			} ldw;
+#if !defined(_BIG_ENDIAN)
+			uint32_t hdw;
+#endif
+		} bits;
+	} qw4;
+
+} tte_sflow_attr_t, *tte_sflow_attr_pt;
+
+#define	TTE_RDC_TBL_SFLOW_BITS_EN	0x0001
+#define	TTE_BUF_SIZE_BITS_EN		0x0002
+#define	TTE_NUM_BUF_BITS_EN		0x0002
+#define	TTE_ULP_END_BITS_EN		0x003E
+#define	TTE_ULP_END_EN_BITS_EN		0x0020
+#define	TTE_UNMAP_ALL_BITS_EN		0x0020
+#define	TTE_TMODE_BITS_EN		0x0040
+#define	TTE_SKIP_BITS_EN		0x0040
+#define	TTE_RING_BASE_ADDR_BITS_EN	0x0FC0
+#define	TTE_RING_SIZE_BITS_EN		0x0800
+#define	TTE_BUSY_BITS_EN		0x0800
+#define	TTE_TOQ_BITS_EN			0x3000
+
+#define	TTE_MAPPED_IN_BITS_EN		0x0000F
+#define	TTE_ANCHOR_SEQ_BITS_EN		0x000F0
+#define	TTE_ANCHOR_OFFSET_BITS_EN	0x00700
+#define	TTE_ANCHOR_BUFFER_BITS_EN	0x00800
+#define	TTE_ANCHOR_BUF_FLAG_BITS_EN	0x00800
+#define	TTE_UNMAP_ON_LEFT_BITS_EN	0x00800
+#define	TTE_ULP_END_REACHED_BITS_EN	0x00800
+#define	TTE_ERR_STAT_BITS_EN		0x01000
+#define	TTE_WR_PTR_BITS_EN		0x01000
+#define	TTE_HOQ_BITS_EN			0x0E000
+#define	TTE_PREFETCH_ON_BITS_EN		0x08000
+
+typedef	enum tring_size_e {
+	TRING_SIZE_8		= 0,
+	TRING_SIZE_16,
+	TRING_SIZE_32,
+	TRING_SIZE_64,
+	TRING_SIZE_128,
+	TRING_SIZE_256,
+	TRING_SIZE_512,
+	TRING_SIZE_1K,
+	TRING_SIZE_2K,
+	TRING_SIZE_4K,
+	TRING_SIZE_8K,
+	TRING_SIZE_16K,
+	TRING_SIZE_32K
+} tring_size_t;
+
+typedef struct tte_dflow_attr_s {
+	union {
+		uint64_t value;
+		struct {
+#if defined(_BIG_ENDIAN)
+			uint32_t hdw;
+#endif
+			struct {
+#if defined(_BIT_FIELDS_HTOL)
+				uint32_t mapped_in;
+#elif defined(_BIT_FIELDS_LTOH)
+				uint32_t mapped_in;
+#endif
+			} ldw;
+#if !defined(_BIG_ENDIAN)
+			uint32_t hdw;
+#endif
+		} bits;
+	} qw0;
+
+	union {
+		uint64_t value;
+		struct {
+#if defined(_BIG_ENDIAN)
+			uint32_t hdw;
+#endif
+			struct {
+#if defined(_BIT_FIELDS_HTOL)
+				uint32_t anchor_seq;
+#elif defined(_BIT_FIELDS_LTOH)
+				uint32_t anchor_seq;
+#endif
+			} ldw;
+#if !defined(_BIG_ENDIAN)
+			uint32_t hdw;
+#endif
+		} bits;
+	} qw1;
+
+	union {
+		uint64_t value;
+		struct {
+#if defined(_BIG_ENDIAN)
+			uint32_t hdw;
+#endif
+			struct {
+#if defined(_BIT_FIELDS_HTOL)
+				uint32_t ulp_end_reached;
+				uint32_t unmap_on_left;
+				uint32_t anchor_buf_flag;
+				uint32_t anchor_buf:5;
+				uint32_t anchor_offset:24;
+#elif defined(_BIT_FIELDS_LTOH)
+				uint32_t anchor_offset:24;
+				uint32_t anchor_buf:5;
+				uint32_t anchor_buf_flag;
+				uint32_t unmap_on_left;
+				uint32_t ulp_end_reached;
+#endif
+			} ldw;
+#if !defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+		} bits;
+	} qw2;
+
+	union {
+		uint64_t value;
+		struct {
+#if defined(_BIG_ENDIAN)
+			uint32_t hdw;
+#endif
+			struct {
+#if defined(_BIT_FIELDS_HTOL)
+				uint32_t rsvd1:1;
+				uint32_t prefetch_on:1;
+				uint32_t hoq:16;
+				uint32_t rsvd:6;
+				uint32_t wr_ptr:6;
+				uint32_t err_stat:2;
+#elif defined(_BIT_FIELDS_LTOH)
+				uint32_t err_stat:2;
+				uint32_t wr_ptr:6;
+				uint32_t rsvd:6;
+				uint32_t hoq:16;
+				uint32_t prefetch_on:1;
+				uint32_t rsvd1:1;
+#endif
+			} ldw;
+#if !defined(_BIG_ENDIAN)
+			uint32_t hdw;
+#endif
+		} bits;
+	} qw3;
+
+	union {
+		uint64_t value;
+		struct {
+#if defined(_BIG_ENDIAN)
+			uint32_t hdw;
+#endif
+			struct {
+#if defined(_BIT_FIELDS_HTOL)
+				uint32_t rsvd:28;
+				uint32_t dat4:4;
+#elif defined(_BIT_FIELDS_LTOH)
+				uint32_t dat4:4;
+				uint32_t rsvd:28;
+#endif
+			} ldw;
+#if !defined(_BIG_ENDIAN)
+			uint32_t hdw;
+#endif
+		} bits;
+	} qw4;
+
+} tte_dflow_attr_t, *tte_dflow_attr_pt;
+
+#define	MAX_BAM_BANKS	8
+
+typedef	struct zcp_ram_unit_s {
+	uint32_t	w0;
+	uint32_t	w1;
+	uint32_t	w2;
+	uint32_t	w3;
+	uint32_t	w4;
+} zcp_ram_unit_t;
+
+typedef	enum dmaw_type_e {
+	DMAW_NO_CROSS_BUF	= 0,
+	DMAW_IP_CROSS_BUF_2,
+	DMAW_IP_CROSS_BUF_3,
+	DMAW_IP_CROSS_BUF_4
+} dmaw_type_t;
+
+typedef union zcp_ram_data_u {
+	tte_sflow_attr_t sentry;
+	tte_dflow_attr_t dentry;
+} zcp_ram_data_t, *zcp_ram_data_pt;
+
+typedef union zcp_ram_access_u {
+	uint64_t value;
+	struct {
+#if defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t busy:1;
+			uint32_t rdwr:1;
+			uint32_t rsvd:1;
+			uint32_t zcfid:12;
+			uint32_t ram_sel:5;
+			uint32_t cfifo:12;
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t cfifo:12;
+			uint32_t ram_sel:5;
+			uint32_t zcfid:12;
+			uint32_t rsvd:1;
+			uint32_t rdwr:1;
+			uint32_t busy:1;
+#endif
+		} ldw;
+#if !defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+	} bits;
+} zcp_ram_access_t, *zcp_ram_access_pt;
+
+#define	ZCP_RAM_WR		0
+#define	ZCP_RAM_RD		1
+#define	ZCP_RAM_SEL_BAM0	0
+#define	ZCP_RAM_SEL_BAM1	0x1
+#define	ZCP_RAM_SEL_BAM2	0x2
+#define	ZCP_RAM_SEL_BAM3	0x3
+#define	ZCP_RAM_SEL_BAM4	0x4
+#define	ZCP_RAM_SEL_BAM5	0x5
+#define	ZCP_RAM_SEL_BAM6	0x6
+#define	ZCP_RAM_SEL_BAM7	0x7
+#define	ZCP_RAM_SEL_TT_STATIC	0x8
+#define	ZCP_RAM_SEL_TT_DYNAMIC	0x9
+#define	ZCP_RAM_SEL_CFIFO0	0x10
+#define	ZCP_RAM_SEL_CFIFO1	0x11
+#define	ZCP_RAM_SEL_CFIFO2	0x12
+#define	ZCP_RAM_SEL_CFIFO3	0x13
+
+typedef union zcp_ram_benable_u {
+	uint64_t value;
+	struct {
+#if defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t rsvd:15;
+			uint32_t be:17;
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t be:17;
+			uint32_t rsvd:15;
+#endif
+		} ldw;
+#if !defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+	} bits;
+} zcp_ram_benable_t, *zcp_ram_benable_pt;
+
+typedef union zcp_training_vector_u {
+	uint64_t value;
+	struct {
+#if defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t train_vec;
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t train_vec;
+#endif
+		} ldw;
+#if !defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+	} bits;
+} zcp_training_vector_t, *zcp_training_vector_pt;
+
+typedef union zcp_state_machine_u {
+	uint64_t value;
+	struct {
+#if defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+			uint32_t state;
+#elif defined(_BIT_FIELDS_LTOH)
+			uint32_t state;
+#endif
+		} ldw;
+#if !defined(_BIG_ENDIAN)
+		uint32_t hdw;
+#endif
+	} bits;
+} zcp_state_machine_t, *zcp_state_machine_pt;
+
+typedef	struct zcp_hdr_s {
+	uint16_t	zflowid;
+	uint16_t	tcp_hdr_len;
+	uint16_t	tcp_payld_len;
+	uint16_t	head_of_que;
+	uint32_t	first_b_offset;
+	boolean_t	reach_buf_end;
+	dmaw_type_t	dmaw_type;
+	uint8_t		win_buf_offset;
+} zcp_hdr_t;
+
+typedef	union _zcp_ecc_ctrl {
+	uint64_t value;
+
+	struct {
+#if defined(_BIG_ENDIAN)
+		uint32_t	w1;
+#endif
+		struct {
+#if defined(_BIT_FIELDS_HTOL)
+		uint32_t dis_dbl	: 1;
+		uint32_t res3		: 13;
+		uint32_t cor_dbl	: 1;
+		uint32_t cor_sng	: 1;
+		uint32_t res2		: 5;
+		uint32_t cor_all	: 1;
+		uint32_t res1		: 7;
+		uint32_t cor_lst	: 1;
+		uint32_t cor_snd	: 1;
+		uint32_t cor_fst	: 1;
+#elif defined(_BIT_FIELDS_LTOH)
+		uint32_t cor_fst	: 1;
+		uint32_t cor_snd	: 1;
+		uint32_t cor_lst	: 1;
+		uint32_t res1		: 7;
+		uint32_t cor_all	: 1;
+		uint32_t res2		: 5;
+		uint32_t cor_sng	: 1;
+		uint32_t cor_dbl	: 1;
+		uint32_t res3		: 13;
+		uint32_t dis_dbl	: 1;
+#else
+#error	one of _BIT_FIELDS_HTOL or _BIT_FIELDS_LTOH must be defined
+#endif
+	} w0;
+
+#if !defined(_BIG_ENDIAN)
+		uint32_t	w1;
+#endif
+	} bits;
+} zcp_ecc_ctrl_t;
+
+#ifdef	__cplusplus
+}
+#endif
+
+#endif	/* _SYS_NXGE_NXGE_ZCP_HW_H */
--- a/usr/src/uts/i86pc/Makefile	Mon Mar 19 18:02:35 2007 -0700
+++ b/usr/src/uts/i86pc/Makefile	Mon Mar 19 19:37:22 2007 -0700
@@ -42,9 +42,10 @@
 
 INTEL_LINTS	 = genunix
 
+LINT_PARALLEL_KMODS	= $(PARALLEL_KMODS:nxge=)
 LINT_LIBS	 = $(LINT_LIB) \
 		   $(GENUNIX_KMODS:%=$(LINT_LIB_DIR)/llib-l%.ln) \
-		   $(PARALLEL_KMODS:%=$(LINT_LIB_DIR)/llib-l%.ln) \
+		   $(LINT_PARALLEL_KMODS:%=$(LINT_LIB_DIR)/llib-l%.ln) \
 		   $(CLOSED_KMODS:%=$(LINT_LIB_DIR)/llib-l%.ln) \
 		   $(INTEL_LINTS:%=$(INTEL_LIB_DIR)/llib-l%.ln)
 
--- a/usr/src/uts/i86pc/Makefile.i86pc.shared	Mon Mar 19 18:02:35 2007 -0700
+++ b/usr/src/uts/i86pc/Makefile.i86pc.shared	Mon Mar 19 19:37:22 2007 -0700
@@ -246,6 +246,7 @@
 DRV_KMODS	+= cpc
 DRV_KMODS	+= pci
 DRV_KMODS	+= npe
+DRV_KMODS	+= nxge
 DRV_KMODS	+= pci-ide
 DRV_KMODS	+= xsvc
 DRV_KMODS	+= mc-amd
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/uts/i86pc/nxge/Makefile	Mon Mar 19 19:37:22 2007 -0700
@@ -0,0 +1,125 @@
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+# uts/i86pc/nxge/Makefile
+#
+# Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
+# Use is subject to license terms.
+#
+#
+# ident	"%Z%%M%	%I%	%E% SMI"
+#
+#	This makefile drives the production of the Sun NIU
+#	10G/1G Ethernet leaf driver kernel module.
+#
+#
+#	Path to the base of the uts directory tree (usually /usr/src/uts).
+#
+UTSBASE	= ../..
+
+#
+#	Define the module and object file sets.
+#
+MODULE		= nxge
+NXGE_OBJECTS =	$(NXGE_OBJS) $(NXGE_NPI_OBJS)
+OBJECTS		=  $(NXGE_OBJECTS:%=$(OBJS_DIR)/%)
+LINTS		= $(NXGE_OBJECTS:%.o=$(LINTS_DIR)/%.ln)
+ROOTMODULE	= $(ROOT_PSM_DRV_DIR)/$(MODULE)
+CONF_SRCDIR	= $(UTSBASE)/common/io/nxge
+
+#
+#	Include common rules.
+#
+include $(UTSBASE)/i86pc/Makefile.i86pc
+
+#
+#	Override defaults to build a unique, local modstubs.o.
+#
+MODSTUBS_DIR	= $(OBJS_DIR)
+
+CLEANFILES	+= $(MODSTUBS_O)
+
+#
+#	Define targets
+#
+ALL_TARGET	= $(BINARY)
+LINT_TARGET	= $(MODULE).lint
+INSTALL_TARGET	= $(BINARY) $(ROOTMODULE) $(ROOT_CONFFILE)
+
+#
+#
+# Turn on doubleword alignment for 64 bit registers
+#
+CFLAGS	+= -dalign
+#
+# Include nxge specific header files
+#
+INC_PATH	+= -I$(UTSBASE)/common
+INC_PATH	+= -I$(UTSBASE)/common/io/nxge/npi
+INC_PATH	+= -I$(UTSBASE)/common/sys/nxge
+#
+#
+# lint pass one enforcement
+#
+CFLAGS += -DSOLARIS
+#
+# Only build 64-bit version.
+ALL_BUILDS      = $(ALL_BUILDS64)
+DEF_BUILDS      = $(DEF_BUILDS64)
+CLEANLINTFILES  += $(LINT64_FILES)
+#
+LINTFLAGS += -DSOLARIS
+#
+# STREAMS, DDI API limitations and other ON header file definitions such as ethernet.h
+# force us to turn off these lint checks.
+#
+LINTTAGS	+= -erroff=E_BAD_PTR_CAST_ALIGN
+LINTTAGS	+= -erroff=E_PTRDIFF_OVERFLOW
+LINTTAGS	+= -erroff=E_FALSE_LOGICAL_EXPR
+#
+#	Driver depends on mac & IP
+#
+LDFLAGS		+= -dy -N misc/mac -N drv/ip
+
+#
+#	Default build targets.
+#
+.KEEP_STATE:
+
+def:		$(DEF_DEPS)
+
+all:		$(ALL_DEPS)
+
+clean:		$(CLEAN_DEPS)
+
+clobber:	$(CLOBBER_DEPS)
+
+lint:		$(LINT_DEPS)
+
+modlintlib:	$(MODLINTLIB_DEPS)
+
+clean.lint:	$(CLEAN_LINT_DEPS)
+
+install:	$(INSTALL_DEPS)
+
+#
+#	Include common targets.
+#
+include $(UTSBASE)/i86pc/Makefile.targ
--- a/usr/src/uts/sun4u/Makefile.sun4u.shared	Mon Mar 19 18:02:35 2007 -0700
+++ b/usr/src/uts/sun4u/Makefile.sun4u.shared	Mon Mar 19 19:37:22 2007 -0700
@@ -403,6 +403,7 @@
 DRV_KMODS	+= rmclomv
 DRV_KMODS	+= wrsmd
 DRV_KMODS	+= sf
+DRV_KMODS	+= nxge
 
 $(CLOSED_BUILD)CLOSED_DRV_KMODS	+= ctsmc
 $(CLOSED_BUILD)CLOSED_DRV_KMODS	+= i2bsc 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/uts/sun4u/nxge/Makefile	Mon Mar 19 19:37:22 2007 -0700
@@ -0,0 +1,119 @@
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+# uts/sun4u/nxge/Makefile
+#
+# Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
+# Use is subject to license terms.
+#
+#
+# ident	"%Z%%M%	%I%	%E% SMI"
+#
+#	This makefile drives the production of the Sun
+#	10G/1G Ethernet leaf driver kernel module.
+#
+#
+#	Path to the base of the uts directory tree (usually /usr/src/uts).
+#
+UTSBASE	= ../..
+
+#
+#	Define the module and object file sets.
+#
+MODULE		= nxge
+NXGE_OBJECTS =	$(NXGE_OBJS) $(NXGE_NPI_OBJS)
+OBJECTS		=  $(NXGE_OBJECTS:%=$(OBJS_DIR)/%)
+LINTS		= $(NXGE_OBJECTS:%.o=$(LINTS_DIR)/%.ln)
+ROOTMODULE	= $(ROOT_PSM_DRV_DIR)/$(MODULE)
+CONF_SRCDIR	= $(UTSBASE)/common/io/nxge
+
+#
+#	Include common rules.
+#
+include $(UTSBASE)/sun4u/Makefile.sun4u
+
+#
+#	Override defaults to build a unique, local modstubs.o.
+#
+MODSTUBS_DIR	= $(OBJS_DIR)
+
+CLEANFILES	+= $(MODSTUBS_O)
+
+#
+#	Define targets
+#
+ALL_TARGET	= $(BINARY)
+LINT_TARGET	= $(MODULE).lint
+INSTALL_TARGET	= $(BINARY) $(ROOTMODULE) $(ROOT_CONFFILE)
+
+#
+#
+# Turn on doubleword alignment for 64 bit registers
+#
+CFLAGS	+= -dalign
+#
+# Include nxge specific header files
+#
+INC_PATH	+= -I$(UTSBASE)/common
+INC_PATH	+= -I$(UTSBASE)/common/io/nxge/npi
+INC_PATH	+= -I$(UTSBASE)/common/sys/nxge
+#
+#
+# lint pass one enforcement
+#
+CFLAGS += -DSOLARIS
+#
+LINTFLAGS += -DSOLARIS
+#
+# STREAMS, DDI API limitations and other ON header file definitions such as ethernet.h
+# force us to turn off these lint checks.
+#
+LINTTAGS	+= -erroff=E_BAD_PTR_CAST_ALIGN
+LINTTAGS	+= -erroff=E_PTRDIFF_OVERFLOW
+#
+#	Driver depends on mac & IP
+#
+LDFLAGS		+= -dy -N misc/mac -N drv/ip
+
+#
+#	Default build targets.
+#
+.KEEP_STATE:
+
+def:		$(DEF_DEPS)
+
+all:		$(ALL_DEPS)
+
+clean:		$(CLEAN_DEPS)
+
+clobber:	$(CLOBBER_DEPS)
+
+lint:		$(LINT_DEPS)
+
+modlintlib:	$(MODLINTLIB_DEPS)
+
+clean.lint:	$(CLEAN_LINT_DEPS)
+
+install:	$(INSTALL_DEPS)
+
+#
+#	Include common targets.
+#
+include $(UTSBASE)/sun4u/Makefile.targ
--- a/usr/src/uts/sun4v/Makefile.files	Mon Mar 19 18:02:35 2007 -0700
+++ b/usr/src/uts/sun4v/Makefile.files	Mon Mar 19 19:37:22 2007 -0700
@@ -207,20 +207,3 @@
 
 ARCFOUR_OBJS	+= arcfour.o arcfour_crypt.o
 
-#
-#	N2/NIU 10G driver module
-#
-NXGE_OBJS =	nxge_mac.o nxge_ipp.o nxge_rxdma.o 		\
-		nxge_txdma.o nxge_txc.o	nxge_main.o		\
-		nxge_hw.o nxge_fzc.o nxge_virtual.o		\
-		nxge_send.o nxge_classify.o nxge_fflp.o		\
-		nxge_fflp_hash.o nxge_ndd.o nxge_kstats.o	\
-		nxge_zcp.o nxge_fm.o nxge_espc.o nxge_hcall.o
-
-NXGE_NPI_OBJS =	\
-		npi.o npi_mac.o	npi_ipp.o			\
-		npi_txdma.o npi_rxdma.o	npi_txc.o		\
-		npi_zcp.o npi_espc.o npi_fflp.o			\
-		npi_vir.o
-
-#
--- a/usr/src/uts/sun4v/Makefile.rules	Mon Mar 19 18:02:35 2007 -0700
+++ b/usr/src/uts/sun4v/Makefile.rules	Mon Mar 19 19:37:22 2007 -0700
@@ -20,7 +20,7 @@
 #
 
 #
-# Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
+# Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
 # Use is subject to license terms.
 #
 # ident	"%Z%%M%	%I%	%E% SMI"
@@ -73,17 +73,6 @@
 $(OBJS_DIR)/%.o:		$(UTSBASE)/sun4v/io/px/%.s
 	$(COMPILE.s) -o $@ $<
 
-$(OBJS_DIR)/%.o:		$(UTSBASE)/sun4v/io/nxge/%.c
-	$(COMPILE.c) -o $@ $<
-	$(CTFCONVERT_O)
-
-$(OBJS_DIR)/%.o:		$(UTSBASE)/sun4v/io/nxge/%.s
-	$(COMPILE.s) -o $@ $<
-
-$(OBJS_DIR)/%.o:		$(UTSBASE)/sun4v/io/nxge/npi/%.c
-	$(COMPILE.c) -o $@ $<
-	$(CTFCONVERT_O)
-
 $(OBJS_DIR)/%.o:		$(UTSBASE)/sun4v/io/fpc/%.c
 	$(COMPILE.c) -o $@ $<
 	$(CTFCONVERT_O)
@@ -171,15 +160,6 @@
 $(LINTS_DIR)/%.ln:		$(UTSBASE)/sun4v/io/niumx/%.c
 	@($(LHEAD) $(LINT.c) $< $(LTAIL))
 
-$(LINTS_DIR)/%.ln:		$(UTSBASE)/sun4v/io/nxge/%.c
-	@($(LHEAD) $(LINT.c) $< $(LTAIL))
-
-$(LINTS_DIR)/%.ln:		$(UTSBASE)/sun4v/io/nxge/%.s
-	@($(LHEAD) $(LINT.c) $< $(LTAIL))
-
-$(LINTS_DIR)/%.ln:		$(UTSBASE)/sun4v/io/nxge/npi/%.c
-	@($(LHEAD) $(LINT.c) $< $(LTAIL))
-
 $(LINTS_DIR)/%.ln:		$(UTSBASE)/sun4v/io/fpc/%.c
 	@($(LHEAD) $(LINT.c) $< $(LTAIL))
 
--- a/usr/src/uts/sun4v/io/nxge/npi/npi.c	Mon Mar 19 18:02:35 2007 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,107 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License (the "License").
- * You may not use this file except in compliance with the License.
- *
- * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
- * or http://www.opensolaris.org/os/licensing.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information: Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- */
-/*
- * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
- * Use is subject to license terms.
- */
-
-#pragma ident	"%Z%%M%	%I%	%E% SMI"
-
-#include <npi.h>
-#include <sys/nxge/nxge_impl.h>
-
-nxge_os_mutex_t npidebuglock;
-int npi_debug_init = 0;
-uint64_t npi_debug_level = 0;
-
-void
-npi_debug_msg(npi_handle_function_t function, uint64_t level, char *fmt, ...)
-{
-	char msg_buffer[1024];
-	char prefix_buffer[32];
-	int cmn_level = CE_CONT;
-	va_list ap;
-
-	if ((level & npi_debug_level) ||
-		(level & NPI_REG_CTL) ||
-		(level & NPI_ERR_CTL)) {
-
-		if (npi_debug_init == 0) {
-			MUTEX_INIT(&npidebuglock, NULL, MUTEX_DRIVER, NULL);
-			npi_debug_init = 1;
-		}
-
-		MUTEX_ENTER(&npidebuglock);
-
-		if (level & NPI_ERR_CTL) {
-			cmn_level = CE_WARN;
-		}
-
-		va_start(ap, fmt);
-		(void) vsprintf(msg_buffer, fmt, ap);
-		va_end(ap);
-
-		(void) sprintf(prefix_buffer, "%s%d(%d):", "npi",
-				function.instance, function.function);
-
-		MUTEX_EXIT(&npidebuglock);
-		cmn_err(cmn_level, "!%s %s\n", prefix_buffer, msg_buffer);
-	}
-}
-
-void
-npi_rtrace_buf_init(rtrace_t *rt)
-{
-	int i;
-
-	rt->next_idx = 0;
-	rt->last_idx = MAX_RTRACE_ENTRIES - 1;
-	rt->wrapped = B_FALSE;
-	for (i = 0; i < MAX_RTRACE_ENTRIES; i++) {
-		rt->buf[i].ctl_addr = TRACE_CTL_INVALID;
-		rt->buf[i].val_l32 = 0;
-		rt->buf[i].val_h32 = 0;
-	}
-}
-
-void
-npi_rtrace_update(npi_handle_t handle, boolean_t wr, rtrace_t *rt,
-		    uint32_t addr, uint64_t val)
-{
-	int idx;
-	idx = rt->next_idx;
-	if (wr == B_TRUE)
-		rt->buf[idx].ctl_addr = (addr & TRACE_ADDR_MASK)
-						| TRACE_CTL_WR;
-	else
-		rt->buf[idx].ctl_addr = (addr & TRACE_ADDR_MASK);
-	rt->buf[idx].ctl_addr |= (((handle.function.function
-				<< TRACE_FUNC_SHIFT) & TRACE_FUNC_MASK) |
-				((handle.function.instance
-				<< TRACE_INST_SHIFT) & TRACE_INST_MASK));
-	rt->buf[idx].val_l32 = val & 0xFFFFFFFF;
-	rt->buf[idx].val_h32 = (val >> 32) & 0xFFFFFFFF;
-	rt->next_idx++;
-	if (rt->next_idx > rt->last_idx) {
-		rt->next_idx = 0;
-		rt->wrapped = B_TRUE;
-	}
-}
--- a/usr/src/uts/sun4v/io/nxge/npi/npi.h	Mon Mar 19 18:02:35 2007 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,247 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License (the "License").
- * You may not use this file except in compliance with the License.
- *
- * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
- * or http://www.opensolaris.org/os/licensing.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information: Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- */
-/*
- * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
- * Use is subject to license terms.
- */
-
-#ifndef _NPI_H
-#define	_NPI_H
-
-#pragma ident	"%Z%%M%	%I%	%E% SMI"
-
-#ifdef	__cplusplus
-extern "C" {
-#endif
-
-#include <nxge_common_impl.h>
-
-typedef	uint32_t			npi_status_t;
-
-/* Common Block ID */
-
-#define	MAC_BLK_ID			0x1
-#define	TXMAC_BLK_ID			0x2
-#define	RXMAC_BLK_ID			0x3
-#define	MIF_BLK_ID			0x4
-#define	IPP_BLK_ID			0x5
-#define	TXC_BLK_ID			0x6
-#define	TXDMA_BLK_ID			0x7
-#define	RXDMA_BLK_ID			0x8
-#define	ZCP_BLK_ID			0x9
-#define	ESPC_BLK_ID			0xa
-#define	FFLP_BLK_ID			0xb
-#define	PHY_BLK_ID			0xc
-#define	ETHER_SERDES_BLK_ID		0xd
-#define	PCIE_SERDES_BLK_ID		0xe
-#define	VIR_BLK_ID			0xf
-
-/* Common HW error code */
-/* HW unable to exit from reset state. */
-#define	RESET_FAILED			0x81
-
-/* Write operation failed on indirect write. */
-#define	WRITE_FAILED			0x82
-/* Read operation failed on indirect read.	 */
-#define	READ_FAILED			0x83
-
-/* Error code boundary */
-
-#define	COMMON_SW_ERR_START		0x40
-#define	COMMON_SW_ERR_END		0x4f
-#define	BLK_SPEC_SW_ERR_START		0x50
-#define	BLK_SPEC_SW_ERR_END		0x7f
-#define	COMMON_HW_ERR_START		0x80
-#define	COMMON_HW_ERR_END		0x8f
-#define	BLK_SPEC_HW_ERR_START		0x90
-#define	BLK_SPEC_HW_ERR_END		0xbf
-
-#define	IS_PORT				0x00100000
-#define	IS_CHAN				0x00200000
-
-/* Common SW errors code */
-
-#define	PORT_INVALID			0x41	/* Invalid port number */
-#define	CHANNEL_INVALID			0x42	/* Invalid dma channel number */
-#define	OPCODE_INVALID			0x43	/* Invalid opcode */
-#define	REGISTER_INVALID		0x44	/* Invalid register number */
-#define	COUNTER_INVALID			0x45	/* Invalid counter number */
-#define	CONFIG_INVALID			0x46	/* Invalid config input */
-#define	LOGICAL_PAGE_INVALID		0x47	/* Invalid logical page # */
-#define	VLAN_INVALID			0x48	/* Invalid Vlan ID */
-#define	RDC_TAB_INVALID			0x49	/* Invalid RDC Group Number */
-#define	LOCATION_INVALID		0x4a	/* Invalid Entry Location */
-
-#define	NPI_SUCCESS			0		/* Operation succeed */
-#define	NPI_FAILURE			0x80000000	/* Operation failed */
-
-#define	NPI_CNT_CLR_VAL			0
-
-/*
- * Block identifier starts at bit 8.
- */
-#define	NPI_BLOCK_ID_SHIFT		8
-
-/*
- * Port, channel and misc. information starts at bit 12.
- */
-#define	NPI_PORT_CHAN_SHIFT			12
-
-/*
- * Software Block specific error codes start at 0x50.
- */
-#define	NPI_BK_ERROR_START		0x50
-
-/*
- * Hardware block specific error codes start at 0x90.
- */
-#define	NPI_BK_HW_ER_START		0x90
-
-/* Structures for register tracing */
-
-typedef struct _rt_buf {
-	uint32_t	ctl_addr;
-	uint32_t	val_l32;
-	uint32_t	val_h32;
-} rt_buf_t;
-
-/*
- * Control Address field format
- *
- * Bit 0 - 23: Address
- * Bit 24 - 25: Function Number
- * Bit 26 - 29: Instance Number
- * Bit 30: Read/Write Direction bit
- * Bit 31: Invalid bit
- */
-
-#define	MAX_RTRACE_ENTRIES	1024
-#define	MAX_RTRACE_IOC_ENTRIES	64
-#define	TRACE_ADDR_MASK		0x00FFFFFF
-#define	TRACE_FUNC_MASK		0x03000000
-#define	TRACE_INST_MASK		0x3C000000
-#define	TRACE_CTL_WR		0x40000000
-#define	TRACE_CTL_INVALID	0x80000000
-#define	TRACE_FUNC_SHIFT	24
-#define	TRACE_INST_SHIFT	26
-#define	MSG_BUF_SIZE		1024
-
-
-typedef struct _rtrace {
-	uint16_t	next_idx;
-	uint16_t	last_idx;
-	boolean_t	wrapped;
-	rt_buf_t	buf[MAX_RTRACE_ENTRIES];
-} rtrace_t;
-
-typedef struct _err_inject {
-	uint8_t		blk_id;
-	uint8_t		chan;
-	uint32_t	err_id;
-	uint32_t	control;
-} err_inject_t;
-
-/* Configuration options */
-typedef enum config_op {
-	DISABLE = 0,
-	ENABLE,
-	INIT
-} config_op_t;
-
-/* I/O options */
-typedef enum io_op {
-	OP_SET = 0,
-	OP_GET,
-	OP_UPDATE,
-	OP_CLEAR
-} io_op_t;
-
-/* Counter options */
-typedef enum counter_op {
-	SNAP_STICKY = 0,
-	SNAP_ACCUMULATE,
-	CLEAR
-} counter_op_t;
-
-/* NPI attribute */
-typedef struct _npi_attr_t {
-	uint32_t type;
-	uint32_t idata[16];
-	uint32_t odata[16];
-} npi_attr_t;
-
-/* NPI Handle */
-typedef	struct	_npi_handle_function {
-	uint16_t		instance;
-	uint16_t		function;
-} npi_handle_function_t;
-
-/* NPI Handle */
-typedef	struct	_npi_handle {
-	npi_reg_handle_t	regh;
-	npi_reg_ptr_t		regp;
-	boolean_t		is_vraddr; /* virtualization region address */
-	npi_handle_function_t	function;
-	void * nxgep;
-} npi_handle_t;
-
-/* NPI Counter */
-typedef struct _npi_counter_t {
-	uint32_t id;
-	char *name;
-	uint32_t val;
-} npi_counter_t;
-
-/*
- * Commmon definitions for NPI RXDMA and TXDMA functions.
- */
-typedef struct _dma_log_page {
-	uint8_t			page_num;
-	boolean_t		valid;
-	uint8_t			func_num;
-	uint64_t		mask;
-	uint64_t		value;
-	uint64_t		reloc;
-} dma_log_page_t, *p_dma_log_page_t;
-
-extern	rtrace_t npi_rtracebuf;
-void npi_rtrace_buf_init(rtrace_t *);
-void npi_rtrace_update(npi_handle_t, boolean_t, rtrace_t *,
-			uint32_t, uint64_t);
-void npi_rtrace_buf_init(rtrace_t *);
-
-void npi_debug_msg(npi_handle_function_t, uint64_t,
-	char *, ...);
-
-#ifdef	NPI_DEBUG
-#define	NPI_DEBUG_MSG(params) npi_debug_msg params
-#else
-#define	NPI_DEBUG_MSG(params)
-#endif
-
-#define	NPI_ERROR_MSG(params) npi_debug_msg params
-#define	NPI_REG_DUMP_MSG(params) npi_debug_msg params
-
-#ifdef	__cplusplus
-}
-#endif
-
-#endif	/* _NPI_H */
--- a/usr/src/uts/sun4v/io/nxge/npi/npi_espc.c	Mon Mar 19 18:02:35 2007 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,352 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License (the "License").
- * You may not use this file except in compliance with the License.
- *
- * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
- * or http://www.opensolaris.org/os/licensing.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information: Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- */
-/*
- * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
- * Use is subject to license terms.
- */
-
-#pragma ident	"%Z%%M%	%I%	%E% SMI"
-
-#include <npi_espc.h>
-#include <nxge_espc.h>
-
-npi_status_t
-npi_espc_pio_enable(npi_handle_t handle)
-{
-	NXGE_REG_WR64(handle, ESPC_REG_ADDR(ESPC_PIO_EN_REG), 0x1);
-	return (NPI_SUCCESS);
-}
-
-npi_status_t
-npi_espc_pio_disable(npi_handle_t handle)
-{
-	NXGE_REG_WR64(handle, ESPC_PIO_EN_REG, 0);
-	return (NPI_SUCCESS);
-}
-
-npi_status_t
-npi_espc_eeprom_entry(npi_handle_t handle, io_op_t op, uint32_t addr,
-			uint8_t *data)
-{
-	uint64_t val = 0;
-
-	if ((addr & ~EPC_EEPROM_ADDR_BITS) != 0) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-			" npi_espc_eerprom_entry"
-			" Invalid input addr <0x%x>\n",
-			addr));
-		return (NPI_FAILURE | NPI_ESPC_EEPROM_ADDR_INVALID);
-	}
-
-	switch (op) {
-	case OP_SET:
-		val = EPC_WRITE_INITIATE | (addr << EPC_EEPROM_ADDR_SHIFT) |
-			*data;
-		NXGE_REG_WR64(handle, ESPC_REG_ADDR(ESPC_PIO_STATUS_REG), val);
-		EPC_WAIT_RW_COMP(handle, &val, EPC_WRITE_COMPLETE);
-		if ((val & EPC_WRITE_COMPLETE) == 0) {
-			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				" npi_espc_eeprom_entry"
-				" HW Error: EEPROM_WR <0x%x>\n",
-				val));
-			return (NPI_FAILURE | NPI_ESPC_EEPROM_WRITE_FAILED);
-		}
-		break;
-	case OP_GET:
-		val = EPC_READ_INITIATE | (addr << EPC_EEPROM_ADDR_SHIFT);
-		NXGE_REG_WR64(handle, ESPC_REG_ADDR(ESPC_PIO_STATUS_REG), val);
-		EPC_WAIT_RW_COMP(handle, &val, EPC_READ_COMPLETE);
-		if ((val & EPC_READ_COMPLETE) == 0) {
-			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				" npi_espc_eeprom_entry"
-				" HW Error: EEPROM_RD <0x%x>",
-				val));
-			return (NPI_FAILURE | NPI_ESPC_EEPROM_READ_FAILED);
-		}
-		NXGE_REG_RD64(handle, ESPC_REG_ADDR(ESPC_PIO_STATUS_REG), &val);
-		*data = val & EPC_EEPROM_DATA_MASK;
-		break;
-	default:
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				    " npi_espc_eeprom_entry"
-				    " Invalid Input addr <0x%x>\n", addr));
-		return (NPI_FAILURE | NPI_ESPC_OPCODE_INVALID);
-	}
-
-	return (NPI_SUCCESS);
-}
-
-npi_status_t
-npi_espc_mac_addr_get(npi_handle_t handle, uint8_t *data)
-{
-	mac_addr_0_t mac0;
-	mac_addr_1_t mac1;
-
-	NXGE_REG_RD64(handle, ESPC_MAC_ADDR_0, &mac0.value);
-	data[0] = mac0.bits.w0.byte0;
-	data[1] = mac0.bits.w0.byte1;
-	data[2] = mac0.bits.w0.byte2;
-	data[3] = mac0.bits.w0.byte3;
-
-	NXGE_REG_RD64(handle, ESPC_MAC_ADDR_1, &mac1.value);
-	data[4] = mac1.bits.w0.byte4;
-	data[5] = mac1.bits.w0.byte5;
-
-	return (NPI_SUCCESS);
-}
-
-npi_status_t
-npi_espc_num_ports_get(npi_handle_t handle, uint8_t *data)
-{
-	uint64_t val = 0;
-
-	NXGE_REG_RD64(handle, ESPC_NUM_PORTS_MACS, &val);
-	val &= NUM_PORTS_MASK;
-	*data = (uint8_t)val;
-
-	return (NPI_SUCCESS);
-}
-
-npi_status_t
-npi_espc_num_macs_get(npi_handle_t handle, uint8_t *data)
-{
-	uint64_t val = 0;
-
-	NXGE_REG_RD64(handle, ESPC_NUM_PORTS_MACS, &val);
-	val &= NUM_MAC_ADDRS_MASK;
-	val = (val >> NUM_MAC_ADDRS_SHIFT);
-	*data = (uint8_t)val;
-
-	return (NPI_SUCCESS);
-}
-
-npi_status_t
-npi_espc_model_str_get(npi_handle_t handle, char *data)
-{
-	uint64_t val = 0;
-	uint16_t str_len;
-	int i, j;
-
-	NXGE_REG_RD64(handle, ESPC_MOD_STR_LEN, &val);
-	val &= MOD_STR_LEN_MASK;
-	str_len = (uint8_t)val;
-
-	if (str_len > MAX_MOD_STR_LEN) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				" npi_espc_model_str_get"
-				" Model string length %d exceeds max %d\n",
-				str_len, MAX_MOD_STR_LEN));
-		return (NPI_FAILURE | NPI_ESPC_STR_LEN_INVALID);
-	}
-
-	/*
-	 * Might have to reverse the order depending on how the string
-	 * is written.
-	 */
-	for (i = 0, j = 0; i < str_len; j++) {
-		NXGE_REG_RD64(handle, ESPC_MOD_STR(j), &val);
-		data[i++] = ((char *)&val)[3];
-		data[i++] = ((char *)&val)[2];
-		data[i++] = ((char *)&val)[1];
-		data[i++] = ((char *)&val)[0];
-	}
-
-	data[str_len] = '\0';
-
-	return (NPI_SUCCESS);
-}
-
-npi_status_t
-npi_espc_bd_model_str_get(npi_handle_t handle, char *data)
-{
-	uint64_t val = 0;
-	uint16_t str_len;
-	int i, j;
-
-	NXGE_REG_RD64(handle, ESPC_BD_MOD_STR_LEN, &val);
-	val &= BD_MOD_STR_LEN_MASK;
-	str_len = (uint8_t)val;
-
-	if (str_len > MAX_BD_MOD_STR_LEN) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				" npi_espc_model_str_get"
-				" Board Model string length %d "
-				"exceeds max %d\n",
-				str_len, MAX_BD_MOD_STR_LEN));
-		return (NPI_FAILURE | NPI_ESPC_STR_LEN_INVALID);
-	}
-
-	/*
-	 * Might have to reverse the order depending on how the string
-	 * is written.
-	 */
-	for (i = 0, j = 0; i < str_len; j++) {
-		NXGE_REG_RD64(handle, ESPC_BD_MOD_STR(j), &val);
-		data[i++] = ((char *)&val)[3];
-		data[i++] = ((char *)&val)[2];
-		data[i++] = ((char *)&val)[1];
-		data[i++] = ((char *)&val)[0];
-	}
-
-	data[str_len] = '\0';
-
-	return (NPI_SUCCESS);
-}
-
-npi_status_t
-npi_espc_phy_type_get(npi_handle_t handle, uint8_t *data)
-{
-	phy_type_t	phy;
-
-	NXGE_REG_RD64(handle, ESPC_PHY_TYPE, &phy.value);
-	data[0] = phy.bits.w0.pt0_phy_type;
-	data[1] = phy.bits.w0.pt1_phy_type;
-	data[2] = phy.bits.w0.pt2_phy_type;
-	data[3] = phy.bits.w0.pt3_phy_type;
-
-	return (NPI_SUCCESS);
-}
-
-npi_status_t
-npi_espc_port_phy_type_get(npi_handle_t handle, uint8_t *data, uint8_t portn)
-{
-	phy_type_t	phy;
-
-	ASSERT(IS_PORT_NUM_VALID(portn));
-
-	NXGE_REG_RD64(handle, ESPC_PHY_TYPE, &phy.value);
-	switch (portn) {
-	case 0:
-		*data = phy.bits.w0.pt0_phy_type;
-		break;
-	case 1:
-		*data = phy.bits.w0.pt1_phy_type;
-		break;
-	case 2:
-		*data = phy.bits.w0.pt2_phy_type;
-		break;
-	case 3:
-		*data = phy.bits.w0.pt3_phy_type;
-		break;
-	default:
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				" npi_espc_port_phy_type_get"
-				" Invalid Input: portn <%d>",
-				portn));
-		return (NPI_FAILURE | NPI_ESPC_PORT_INVALID);
-	}
-
-	return (NPI_SUCCESS);
-}
-
-npi_status_t
-npi_espc_max_frame_get(npi_handle_t handle, uint16_t *data)
-{
-	uint64_t val = 0;
-
-	NXGE_REG_RD64(handle, ESPC_MAX_FM_SZ, &val);
-	val &= MAX_FM_SZ_MASK;
-	*data = (uint8_t)val;
-
-	return (NPI_SUCCESS);
-}
-
-npi_status_t
-npi_espc_version_get(npi_handle_t handle, uint16_t *data)
-{
-	uint64_t val = 0;
-
-	NXGE_REG_RD64(handle, ESPC_VER_IMGSZ, &val);
-	val &= VER_NUM_MASK;
-	*data = (uint8_t)val;
-
-	return (NPI_SUCCESS);
-}
-
-npi_status_t
-npi_espc_img_sz_get(npi_handle_t handle, uint16_t *data)
-{
-	uint64_t val = 0;
-
-	NXGE_REG_RD64(handle, ESPC_VER_IMGSZ, &val);
-	val &= IMG_SZ_MASK;
-	val = val >> IMG_SZ_SHIFT;
-	*data = (uint8_t)val;
-
-	return (NPI_SUCCESS);
-}
-
-npi_status_t
-npi_espc_chksum_get(npi_handle_t handle, uint8_t *data)
-{
-	uint64_t val = 0;
-
-	NXGE_REG_RD64(handle, ESPC_CHKSUM, &val);
-	val &= CHKSUM_MASK;
-	*data = (uint8_t)val;
-
-	return (NPI_SUCCESS);
-}
-
-npi_status_t
-npi_espc_intr_num_get(npi_handle_t handle, uint8_t *data)
-{
-	intr_num_t	intr;
-
-	NXGE_REG_RD64(handle, ESPC_INTR_NUM, &intr.value);
-	data[0] = intr.bits.w0.pt0_intr_num;
-	data[1] = intr.bits.w0.pt1_intr_num;
-	data[2] = intr.bits.w0.pt2_intr_num;
-	data[3] = intr.bits.w0.pt3_intr_num;
-
-	return (NPI_SUCCESS);
-}
-
-void
-npi_espc_dump(npi_handle_t handle)
-{
-	int i;
-	uint64_t val = 0;
-
-	NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
-				    "Dumping SEEPROM registers directly:\n\n"));
-
-	for (i = 0; i < 23; i++) {
-		NXGE_REG_RD64(handle, ESPC_NCR_REGN(i), &val);
-		NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
-					    "reg[%d]      0x%llx\n",
-					    i, val & 0xffffffff));
-	}
-
-	NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL, "\n\n"));
-}
-
-uint32_t
-npi_espc_reg_get(npi_handle_t handle, int reg_idx)
-{
-	uint64_t val = 0;
-	uint32_t reg_val = 0;
-
-	NXGE_REG_RD64(handle, ESPC_NCR_REGN(reg_idx), &val);
-	reg_val = val & 0xffffffff;
-
-	return (reg_val);
-}
--- a/usr/src/uts/sun4v/io/nxge/npi/npi_espc.h	Mon Mar 19 18:02:35 2007 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,87 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License (the "License").
- * You may not use this file except in compliance with the License.
- *
- * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
- * or http://www.opensolaris.org/os/licensing.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information: Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- */
-/*
- * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
- * Use is subject to license terms.
- */
-
-#ifndef _NPI_ESPC_H
-#define	_NPI_ESPC_H
-
-#pragma ident	"%Z%%M%	%I%	%E% SMI"
-
-#ifdef	__cplusplus
-extern "C" {
-#endif
-
-#include <npi.h>
-#include <nxge_espc_hw.h>
-
-#define	EPC_WAIT_RW_COMP(handle, val_p, comp_bit) {\
-	uint32_t cnt = MAX_PIO_RETRIES;\
-	do {\
-		NXGE_DELAY(EPC_RW_WAIT);\
-		NXGE_REG_RD64(handle, ESPC_REG_ADDR(ESPC_PIO_STATUS_REG),\
-				val_p); cnt--;\
-	} while (((val & comp_bit) == 0) && (cnt > 0));\
-}
-
-/* ESPC specific errors */
-
-#define	ESPC_EEPROM_ADDR_INVALID	0x51
-#define	ESPC_STR_LEN_INVALID		0x91
-
-/* ESPC error return macros */
-
-#define	NPI_ESPC_EEPROM_ADDR_INVALID	((ESPC_BLK_ID << 8) |\
-					ESPC_EEPROM_ADDR_INVALID)
-#define	NPI_ESPC_EEPROM_WRITE_FAILED	((ESPC_BLK_ID << 8) | WRITE_FAILED)
-#define	NPI_ESPC_EEPROM_READ_FAILED	((ESPC_BLK_ID << 8) | READ_FAILED)
-#define	NPI_ESPC_OPCODE_INVALID		((ESPC_BLK_ID << 8) | OPCODE_INVALID)
-#define	NPI_ESPC_STR_LEN_INVALID	((ESPC_BLK_ID << 8) |\
-					ESPC_STR_LEN_INVALID)
-#define	NPI_ESPC_PORT_INVALID		((ESPC_BLK_ID << 8) | PORT_INVALID)
-
-npi_status_t npi_espc_pio_enable(npi_handle_t);
-npi_status_t npi_espc_pio_disable(npi_handle_t);
-npi_status_t npi_espc_eeprom_entry(npi_handle_t, io_op_t,
-				uint32_t, uint8_t *);
-npi_status_t npi_espc_mac_addr_get(npi_handle_t, uint8_t *);
-npi_status_t npi_espc_num_ports_get(npi_handle_t, uint8_t *);
-	npi_status_t npi_espc_num_macs_get(npi_handle_t, uint8_t *);
-npi_status_t npi_espc_model_str_get(npi_handle_t, char *);
-npi_status_t npi_espc_bd_model_str_get(npi_handle_t, char *);
-npi_status_t npi_espc_phy_type_get(npi_handle_t, uint8_t *);
-npi_status_t npi_espc_port_phy_type_get(npi_handle_t, uint8_t *,
-				uint8_t);
-npi_status_t npi_espc_max_frame_get(npi_handle_t, uint16_t *);
-npi_status_t npi_espc_version_get(npi_handle_t, uint16_t *);
-	npi_status_t npi_espc_img_sz_get(npi_handle_t, uint16_t *);
-npi_status_t npi_espc_chksum_get(npi_handle_t, uint8_t *);
-npi_status_t npi_espc_intr_num_get(npi_handle_t, uint8_t *);
-uint32_t npi_espc_reg_get(npi_handle_t, int);
-void npi_espc_dump(npi_handle_t);
-
-#ifdef	__cplusplus
-}
-#endif
-
-#endif	/* _NPI_ESPC_H */
--- a/usr/src/uts/sun4v/io/nxge/npi/npi_fflp.c	Mon Mar 19 18:02:35 2007 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,2720 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License (the "License").
- * You may not use this file except in compliance with the License.
- *
- * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
- * or http://www.opensolaris.org/os/licensing.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information: Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- */
-/*
- * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
- * Use is subject to license terms.
- */
-
-
-#pragma ident	"%Z%%M%	%I%	%E% SMI"
-
-#include <npi_fflp.h>
-#include <nxge_common.h>
-
-/* macros to compute calss configuration register offset */
-
-#define	  GET_TCAM_CLASS_OFFSET(cls) \
-	(FFLP_TCAM_CLS_BASE_OFFSET + (cls - 2) * 8)
-#define	  GET_TCAM_KEY_OFFSET(cls) \
-	(FFLP_TCAM_KEY_BASE_OFFSET + (cls - 4) * 8)
-#define	  GET_FLOW_KEY_OFFSET(cls) \
-	(FFLP_FLOW_KEY_BASE_OFFSET + (cls - 4) * 8)
-
-#define	  HASHTBL_PART_REG_STEP 8192
-#define	  HASHTBL_PART_REG_VIR_OFFSET 0x2100
-#define	  HASHTBL_PART_REG_VIR_STEP 0x4000
-#define	  GET_HASHTBL_PART_OFFSET_NVIR(partid, reg)	\
-	((partid  * HASHTBL_PART_REG_STEP) + reg)
-
-#define	  GET_HASHTBL_PART_OFFSET(handle, partid, reg)	\
-	    (handle.is_vraddr ?					\
-	    (((partid & 0x1) * HASHTBL_PART_REG_VIR_STEP) +	\
-	    (reg & 0x8) + (HASHTBL_PART_REG_VIR_OFFSET)) :	\
-	    (partid * HASHTBL_PART_REG_STEP) + reg)
-
-#define	 FFLP_PART_OFFSET(partid, reg) ((partid  * 8) + reg)
-#define	 FFLP_VLAN_OFFSET(vid, reg) ((vid  * 8) + reg)
-
-#define	 TCAM_COMPLETION_TRY_COUNT 10
-#define	 BIT_ENABLE	0x1
-#define	 BIT_DISABLE	0x0
-
-#define	 FCRAM_PARTITION_VALID(partid) \
-	((partid < NXGE_MAX_RDC_GRPS))
-#define	FFLP_VLAN_VALID(vid) \
-	((vid > 0) && (vid < NXGE_MAX_VLANS))
-#define	FFLP_PORT_VALID(port) \
-	((port < MAX_PORTS_PER_NXGE))
-#define	FFLP_RDC_TABLE_VALID(table) \
-	((table < NXGE_MAX_RDC_GRPS))
-#define	TCAM_L3_USR_CLASS_VALID(class) \
-	((class >= TCAM_CLASS_IP_USER_4) && (class <= TCAM_CLASS_IP_USER_7))
-#define	TCAM_L2_USR_CLASS_VALID(class) \
-	((class == TCAM_CLASS_ETYPE_1) || (class == TCAM_CLASS_ETYPE_2))
-#define	TCAM_L3_CLASS_VALID(class) \
-	((class >= TCAM_CLASS_IP_USER_4) && (class <= TCAM_CLASS_SCTP_IPV6))
-#define	TCAM_CLASS_VALID(class) \
-	((class >= TCAM_CLASS_ETYPE_1) && (class <= TCAM_CLASS_RARP))
-
-
-uint64_t fflp_fzc_offset[] = {
-	FFLP_ENET_VLAN_TBL_REG, FFLP_L2_CLS_ENET1_REG, FFLP_L2_CLS_ENET2_REG,
-	FFLP_TCAM_KEY_IP_USR4_REG, FFLP_TCAM_KEY_IP_USR5_REG,
-	FFLP_TCAM_KEY_IP_USR6_REG, FFLP_TCAM_KEY_IP_USR7_REG,
-	FFLP_TCAM_KEY_IP4_TCP_REG, FFLP_TCAM_KEY_IP4_UDP_REG,
-	FFLP_TCAM_KEY_IP4_AH_ESP_REG, FFLP_TCAM_KEY_IP4_SCTP_REG,
-	FFLP_TCAM_KEY_IP6_TCP_REG, FFLP_TCAM_KEY_IP6_UDP_REG,
-	FFLP_TCAM_KEY_IP6_AH_ESP_REG, FFLP_TCAM_KEY_IP6_SCTP_REG,
-	FFLP_TCAM_KEY_0_REG, FFLP_TCAM_KEY_1_REG, FFLP_TCAM_KEY_2_REG,
-	FFLP_TCAM_KEY_3_REG, FFLP_TCAM_MASK_0_REG, FFLP_TCAM_MASK_1_REG,
-	FFLP_TCAM_MASK_2_REG, FFLP_TCAM_MASK_3_REG, FFLP_TCAM_CTL_REG,
-	FFLP_VLAN_PAR_ERR_REG, FFLP_TCAM_ERR_REG, HASH_LKUP_ERR_LOG1_REG,
-	HASH_LKUP_ERR_LOG2_REG, FFLP_FCRAM_ERR_TST0_REG,
-	FFLP_FCRAM_ERR_TST1_REG, FFLP_FCRAM_ERR_TST2_REG, FFLP_ERR_MSK_REG,
-	FFLP_CFG_1_REG, FFLP_DBG_TRAIN_VCT_REG, FFLP_TCP_CFLAG_MSK_REG,
-	FFLP_FCRAM_REF_TMR_REG,  FFLP_FLOW_KEY_IP_USR4_REG,
-	FFLP_FLOW_KEY_IP_USR5_REG, FFLP_FLOW_KEY_IP_USR6_REG,
-	FFLP_FLOW_KEY_IP_USR7_REG, FFLP_FLOW_KEY_IP4_TCP_REG,
-	FFLP_FLOW_KEY_IP4_UDP_REG, FFLP_FLOW_KEY_IP4_AH_ESP_REG,
-	FFLP_FLOW_KEY_IP4_SCTP_REG, FFLP_FLOW_KEY_IP6_TCP_REG,
-	FFLP_FLOW_KEY_IP6_UDP_REG, FFLP_FLOW_KEY_IP6_AH_ESP_REG,
-	FFLP_FLOW_KEY_IP6_SCTP_REG, FFLP_H1POLY_REG, FFLP_H2POLY_REG,
-	FFLP_FLW_PRT_SEL_REG
-};
-
-const char *fflp_fzc_name[] = {
-	"FFLP_ENET_VLAN_TBL_REG", "FFLP_L2_CLS_ENET1_REG",
-	"FFLP_L2_CLS_ENET2_REG", "FFLP_TCAM_KEY_IP_USR4_REG",
-	"FFLP_TCAM_KEY_IP_USR5_REG", "FFLP_TCAM_KEY_IP_USR6_REG",
-	"FFLP_TCAM_KEY_IP_USR7_REG", "FFLP_TCAM_KEY_IP4_TCP_REG",
-	"FFLP_TCAM_KEY_IP4_UDP_REG", "FFLP_TCAM_KEY_IP4_AH_ESP_REG",
-	"FFLP_TCAM_KEY_IP4_SCTP_REG", "FFLP_TCAM_KEY_IP6_TCP_REG",
-	"FFLP_TCAM_KEY_IP6_UDP_REG", "FFLP_TCAM_KEY_IP6_AH_ESP_REG",
-	"FFLP_TCAM_KEY_IP6_SCTP_REG", "FFLP_TCAM_KEY_0_REG",
-	"FFLP_TCAM_KEY_1_REG", "FFLP_TCAM_KEY_2_REG", "FFLP_TCAM_KEY_3_REG",
-	"FFLP_TCAM_MASK_0_REG", "FFLP_TCAM_MASK_1_REG", "FFLP_TCAM_MASK_2_REG",
-	"FFLP_TCAM_MASK_3_REG", "FFLP_TCAM_CTL_REG", "FFLP_VLAN_PAR_ERR_REG",
-	"FFLP_TCAM_ERR_REG", "HASH_LKUP_ERR_LOG1_REG",
-	"HASH_LKUP_ERR_LOG2_REG", "FFLP_FCRAM_ERR_TST0_REG",
-	"FFLP_FCRAM_ERR_TST1_REG", "FFLP_FCRAM_ERR_TST2_REG",
-	"FFLP_ERR_MSK_REG", "FFLP_CFG_1_REG", "FFLP_DBG_TRAIN_VCT_REG",
-	"FFLP_TCP_CFLAG_MSK_REG", "FFLP_FCRAM_REF_TMR_REG",
-	"FFLP_FLOW_KEY_IP_USR4_REG", "FFLP_FLOW_KEY_IP_USR5_REG",
-	"FFLP_FLOW_KEY_IP_USR6_REG", "FFLP_FLOW_KEY_IP_USR7_REG",
-	"FFLP_FLOW_KEY_IP4_TCP_REG", "FFLP_FLOW_KEY_IP4_UDP_REG",
-	"FFLP_FLOW_KEY_IP4_AH_ESP_REG", "FFLP_FLOW_KEY_IP4_SCTP_REG",
-	"FFLP_FLOW_KEY_IP6_TCP_REG", "FFLP_FLOW_KEY_IP6_UDP_REG",
-	"FFLP_FLOW_KEY_IP6_AH_ESP_REG",
-	"FFLP_FLOW_KEY_IP6_SCTP_REG", "FFLP_H1POLY_REG", "FFLP_H2POLY_REG",
-	"FFLP_FLW_PRT_SEL_REG"
-};
-
-uint64_t fflp_reg_offset[] = {
-	FFLP_HASH_TBL_ADDR_REG, FFLP_HASH_TBL_DATA_REG,
-	FFLP_HASH_TBL_DATA_LOG_REG
-};
-
-const char *fflp_reg_name[] = {
-	"FFLP_HASH_TBL_ADDR_REG", "FFLP_HASH_TBL_DATA_REG",
-	"FFLP_HASH_TBL_DATA_LOG_REG"
-};
-
-
-
-
-npi_status_t
-npi_fflp_dump_regs(npi_handle_t handle)
-{
-
-	uint64_t value;
-	int num_regs, i;
-
-	num_regs = sizeof (fflp_fzc_offset) / sizeof (uint64_t);
-	NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
-		"\nFFLP_FZC Register Dump \n"));
-	for (i = 0; i < num_regs; i++) {
-		REG_PIO_READ64(handle, fflp_fzc_offset[i], &value);
-		NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
-			" %8llx %s\t %8llx \n",
-			fflp_fzc_offset[i], fflp_fzc_name[i], value));
-
-	}
-
-	NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
-					    "\nFFLP Register Dump\n"));
-	num_regs = sizeof (fflp_reg_offset) / sizeof (uint64_t);
-
-	for (i = 0; i < num_regs; i++) {
-		REG_PIO_READ64(handle, fflp_reg_offset[i], &value);
-		NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
-			" %8llx %s\t %8llx \n",
-			fflp_reg_offset[i], fflp_reg_name[i], value));
-
-	}
-
-	NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
-					    "\n FFLP Register Dump done\n"));
-
-	return (NPI_SUCCESS);
-}
-
-void
-npi_fflp_vlan_tbl_dump(npi_handle_t handle)
-{
-	uint64_t offset;
-	vlan_id_t vlan_id;
-	uint64_t value;
-	vlan_id_t start = 0, stop = NXGE_MAX_VLANS;
-
-	NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
-		"\nVlan Table Dump \n"));
-
-	NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
-		"VID\t Offset\t Value\n"));
-
-	for (vlan_id = start; vlan_id < stop; vlan_id++) {
-		offset = FFLP_VLAN_OFFSET(vlan_id, FFLP_ENET_VLAN_TBL_REG);
-		REG_PIO_READ64(handle, offset, &value);
-		NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
-			    "%x\t %llx\t %llx\n", vlan_id, offset, value));
-	}
-
-}
-
-static uint64_t
-npi_fflp_tcam_check_completion(npi_handle_t handle, tcam_op_t op_type);
-
-/*
- * npi_fflp_tcam_check_completion()
- * Returns TCAM completion status.
- *
- * Input:
- *           op_type :        Read, Write, Compare
- *           handle  :        OS specific handle
- *
- * Output:
- *        For Read and write operations:
- *        0   Successful
- *        -1  Fail/timeout
- *
- *       For Compare operations (debug only )
- *        TCAM_REG_CTL read value    on success
- *                     value contains match location
- *        NPI_TCAM_COMP_NO_MATCH          no match
- *
- */
-static uint64_t
-npi_fflp_tcam_check_completion(npi_handle_t handle, tcam_op_t op_type)
-{
-
-	uint32_t try_counter, tcam_delay = 10;
-	tcam_ctl_t tctl;
-
-	try_counter = TCAM_COMPLETION_TRY_COUNT;
-
-	switch (op_type) {
-	case TCAM_RWC_STAT:
-
-		READ_TCAM_REG_CTL(handle, &tctl.value);
-		while ((try_counter) &&
-				(tctl.bits.ldw.stat != TCAM_CTL_RWC_RWC_STAT)) {
-			try_counter--;
-			NXGE_DELAY(tcam_delay);
-			READ_TCAM_REG_CTL(handle, &tctl.value);
-		}
-
-		if (!try_counter) {
-			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-					    " TCAM RWC_STAT operation"
-					    " failed to complete \n"));
-			return (NPI_FFLP_TCAM_HW_ERROR);
-		}
-
-		tctl.value = 0;
-		break;
-
-	case TCAM_RWC_MATCH:
-		READ_TCAM_REG_CTL(handle, &tctl.value);
-
-		while ((try_counter) &&
-			(tctl.bits.ldw.match != TCAM_CTL_RWC_RWC_MATCH)) {
-			try_counter--;
-			NXGE_DELAY(tcam_delay);
-			READ_TCAM_REG_CTL(handle, &tctl.value);
-		}
-
-		if (!try_counter) {
-			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				    " TCAM Match operation"
-				    "failed to find match \n"));
-			tctl.value = NPI_TCAM_COMP_NO_MATCH;
-		}
-
-
-		break;
-
-	default:
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-		" Invalid TCAM completion Request \n"));
-		return (NPI_FFLP_ERROR |
-		    NPI_TCAM_ERROR | OPCODE_INVALID);
-	}
-
-	return (tctl.value);
-}
-
-/*
- * npi_fflp_tcam_entry_invalidate()
- *
- * invalidates entry at tcam location
- *
- * Input
- * handle  :        OS specific handle
- * location	:	TCAM location
- *
- * Return
- *   NPI_SUCCESS
- *   NPI_FFLP_TCAM_HW_ERROR
- *
- */
-npi_status_t
-npi_fflp_tcam_entry_invalidate(npi_handle_t handle, tcam_location_t location)
-{
-
-	tcam_ctl_t tctl, tctl_stat;
-
-/*
- * Need to write zero to class field.
- * Class field is bits [195:191].
- * This corresponds to TCAM key 0 register
- *
- */
-
-
-	WRITE_TCAM_REG_MASK0(handle, 0xffULL);
-	WRITE_TCAM_REG_KEY0(handle, 0x0ULL);
-	tctl.value = 0;
-	tctl.bits.ldw.location = location;
-	tctl.bits.ldw.rwc = TCAM_CTL_RWC_TCAM_WR;
-
-	WRITE_TCAM_REG_CTL(handle, tctl.value);
-
-	tctl_stat.value = npi_fflp_tcam_check_completion(handle, TCAM_RWC_STAT);
-
-	if (tctl_stat.value & NPI_FAILURE)
-		return (NPI_FFLP_TCAM_HW_ERROR);
-
-	return (NPI_SUCCESS);
-
-}
-
-/*
- * npi_fflp_tcam_entry_match()
- *
- * lookup a tcam entry in the TCAM
- *
- * Input
- * handle  :        OS specific handle
- * tcam_ptr   :     TCAM entry ptr
- *
- * Return
- *
- *	 NPI_FAILURE | NPI_XX_ERROR:	     Operational Error (HW etc ...)
- *	 NPI_TCAM_NO_MATCH:		     no match
- *	 0 - TCAM_SIZE:			     matching entry location (if match)
- */
-int
-npi_fflp_tcam_entry_match(npi_handle_t handle,  tcam_entry_t *tcam_ptr)
-{
-
-	uint64_t tcam_stat = 0;
-	tcam_ctl_t tctl, tctl_stat;
-
-	WRITE_TCAM_REG_MASK0(handle, tcam_ptr->mask0);
-	WRITE_TCAM_REG_MASK1(handle, tcam_ptr->mask1);
-	WRITE_TCAM_REG_MASK2(handle, tcam_ptr->mask2);
-	WRITE_TCAM_REG_MASK3(handle, tcam_ptr->mask3);
-
-	WRITE_TCAM_REG_KEY0(handle, tcam_ptr->key0);
-	WRITE_TCAM_REG_KEY1(handle, tcam_ptr->key1);
-	WRITE_TCAM_REG_KEY2(handle, tcam_ptr->key2);
-	WRITE_TCAM_REG_KEY3(handle, tcam_ptr->key3);
-
-	tctl.value = 0;
-	tctl.bits.ldw.rwc = TCAM_CTL_RWC_TCAM_CMP;
-
-	WRITE_TCAM_REG_CTL(handle, tctl.value);
-
-	tcam_stat = npi_fflp_tcam_check_completion(handle, TCAM_RWC_STAT);
-	if (tcam_stat & NPI_FAILURE) {
-		return ((uint32_t)tcam_stat);
-	}
-
-	tctl_stat.value = npi_fflp_tcam_check_completion(handle,
-				TCAM_RWC_MATCH);
-
-	if (tctl_stat.bits.ldw.match == TCAM_CTL_RWC_RWC_MATCH) {
-		return (uint32_t)(tctl_stat.bits.ldw.location);
-	}
-
-	return ((uint32_t)tctl_stat.value);
-
-}
-
-/*
- * npi_fflp_tcam_entry_read ()
- *
- * Reads a tcam entry from the TCAM location, location
- *
- * Input:
- * handle  :        OS specific handle
- * location  :		TCAM location
- * tcam_ptr  :		TCAM entry pointer
- *
- * Return:
- * NPI_SUCCESS
- * NPI_FFLP_TCAM_RD_ERROR
- *
- */
-npi_status_t
-npi_fflp_tcam_entry_read(npi_handle_t handle,
-						    tcam_location_t location,
-						    struct tcam_entry *tcam_ptr)
-{
-
-	uint64_t tcam_stat;
-	tcam_ctl_t tctl;
-
-	tctl.value = 0;
-	tctl.bits.ldw.location = location;
-	tctl.bits.ldw.rwc = TCAM_CTL_RWC_TCAM_RD;
-
-	WRITE_TCAM_REG_CTL(handle, tctl.value);
-
-	tcam_stat = npi_fflp_tcam_check_completion(handle, TCAM_RWC_STAT);
-
-	if (tcam_stat & NPI_FAILURE) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-		    "TCAM read failed loc %d \n", location));
-		return (NPI_FFLP_TCAM_RD_ERROR);
-	}
-
-	READ_TCAM_REG_MASK0(handle, &tcam_ptr->mask0);
-	READ_TCAM_REG_MASK1(handle, &tcam_ptr->mask1);
-	READ_TCAM_REG_MASK2(handle, &tcam_ptr->mask2);
-	READ_TCAM_REG_MASK3(handle, &tcam_ptr->mask3);
-
-	READ_TCAM_REG_KEY0(handle, &tcam_ptr->key0);
-	READ_TCAM_REG_KEY1(handle, &tcam_ptr->key1);
-	READ_TCAM_REG_KEY2(handle, &tcam_ptr->key2);
-	READ_TCAM_REG_KEY3(handle, &tcam_ptr->key3);
-
-	return (NPI_SUCCESS);
-}
-
-/*
- * npi_fflp_tcam_entry_write()
- *
- * writes a tcam entry to the TCAM location, location
- *
- * Input:
- * handle  :        OS specific handle
- * location :	TCAM location
- * tcam_ptr :	TCAM entry pointer
- *
- * Return:
- * NPI_SUCCESS
- * NPI_FFLP_TCAM_WR_ERROR
- *
- */
-npi_status_t
-npi_fflp_tcam_entry_write(npi_handle_t handle,
-			    tcam_location_t location,
-			    tcam_entry_t *tcam_ptr)
-{
-
-	uint64_t tcam_stat;
-
-	tcam_ctl_t tctl;
-
-	WRITE_TCAM_REG_MASK0(handle, tcam_ptr->mask0);
-	WRITE_TCAM_REG_MASK1(handle, tcam_ptr->mask1);
-	WRITE_TCAM_REG_MASK2(handle, tcam_ptr->mask2);
-	WRITE_TCAM_REG_MASK3(handle, tcam_ptr->mask3);
-
-	WRITE_TCAM_REG_KEY0(handle, tcam_ptr->key0);
-	WRITE_TCAM_REG_KEY1(handle, tcam_ptr->key1);
-	WRITE_TCAM_REG_KEY2(handle, tcam_ptr->key2);
-	WRITE_TCAM_REG_KEY3(handle, tcam_ptr->key3);
-
-	NPI_DEBUG_MSG((handle.function, NPI_FFLP_CTL,
-			    " tcam write: location %x\n"
-			    " key:  %llx %llx %llx %llx \n"
-			    " mask: %llx %llx %llx %llx \n",
-			    location, tcam_ptr->key0, tcam_ptr->key1,
-			    tcam_ptr->key2, tcam_ptr->key3,
-			    tcam_ptr->mask0, tcam_ptr->mask1,
-			    tcam_ptr->mask2, tcam_ptr->mask3));
-	tctl.value = 0;
-	tctl.bits.ldw.location = location;
-	tctl.bits.ldw.rwc = TCAM_CTL_RWC_TCAM_WR;
-	NPI_DEBUG_MSG((handle.function, NPI_FFLP_CTL,
-			    " tcam write: ctl value %llx \n", tctl.value));
-	WRITE_TCAM_REG_CTL(handle, tctl.value);
-
-	tcam_stat = npi_fflp_tcam_check_completion(handle, TCAM_RWC_STAT);
-
-	if (tcam_stat & NPI_FAILURE) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-		    "TCAM Write failed loc %d \n", location));
-		return (NPI_FFLP_TCAM_WR_ERROR);
-	}
-
-	return (NPI_SUCCESS);
-}
-
-/*
- * npi_fflp_tcam_asc_ram_entry_write()
- *
- * writes a tcam associatedRAM at the TCAM location, location
- *
- * Input:
- * handle  :        OS specific handle
- * location :	tcam associatedRAM location
- * ram_data :	Value to write
- *
- * Return:
- * NPI_SUCCESS
- * NPI_FFLP_ASC_RAM_WR_ERROR
- *
- */
-npi_status_t
-npi_fflp_tcam_asc_ram_entry_write(npi_handle_t handle,
-				    tcam_location_t location,
-				    uint64_t ram_data)
-{
-
-	uint64_t tcam_stat = 0;
-	tcam_ctl_t tctl;
-
-
-	WRITE_TCAM_REG_KEY1(handle, ram_data);
-
-	tctl.value = 0;
-	tctl.bits.ldw.location = location;
-	tctl.bits.ldw.rwc = TCAM_CTL_RWC_RAM_WR;
-
-	NPI_DEBUG_MSG((handle.function, NPI_FFLP_CTL,
-		    " tcam ascr write: location %x data %llx ctl value %llx \n",
-		    location, ram_data, tctl.value));
-	WRITE_TCAM_REG_CTL(handle, tctl.value);
-	tcam_stat = npi_fflp_tcam_check_completion(handle, TCAM_RWC_STAT);
-
-	if (tcam_stat & NPI_FAILURE) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-		    "TCAM RAM write failed loc %d \n", location));
-		return (NPI_FFLP_ASC_RAM_WR_ERROR);
-	}
-
-	return (NPI_SUCCESS);
-}
-
-/*
- * npi_fflp_tcam_asc_ram_entry_read()
- *
- * reads a tcam associatedRAM content at the TCAM location, location
- *
- * Input:
- * handle  :        OS specific handle
- * location :	tcam associatedRAM location
- * ram_data :	ptr to return contents
- *
- * Return:
- * NPI_SUCCESS
- * NPI_FFLP_ASC_RAM_RD_ERROR
- *
- */
-npi_status_t
-npi_fflp_tcam_asc_ram_entry_read(npi_handle_t handle,
-				    tcam_location_t location,
-				    uint64_t *ram_data)
-{
-
-	uint64_t tcam_stat;
-	tcam_ctl_t tctl;
-
-
-	tctl.value = 0;
-	tctl.bits.ldw.location = location;
-	tctl.bits.ldw.rwc = TCAM_CTL_RWC_RAM_RD;
-
-	WRITE_TCAM_REG_CTL(handle, tctl.value);
-
-	tcam_stat = npi_fflp_tcam_check_completion(handle, TCAM_RWC_STAT);
-
-	if (tcam_stat & NPI_FAILURE) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-		    "TCAM RAM read failed loc %d \n", location));
-		return (NPI_FFLP_ASC_RAM_RD_ERROR);
-	}
-
-	READ_TCAM_REG_KEY1(handle, ram_data);
-
-	return (NPI_SUCCESS);
-}
-
-/* FFLP FCRAM Related functions */
-/* The following are FCRAM datapath functions */
-
-/*
- * npi_fflp_fcram_entry_write ()
- * Populates an FCRAM entry
- * Inputs:
- *         handle:	opaque handle interpreted by the underlying OS
- *	   partid:	Partition ID
- *	   location:	Index to the FCRAM.
- *			 Corresponds to last 20 bits of H1 value
- *	   fcram_ptr:	Pointer to the FCRAM contents to be used for writing
- *	   format:	Entry Format. Determines the size of the write.
- *			      FCRAM_ENTRY_OPTIM:   8 bytes (a 64 bit write)
- *			      FCRAM_ENTRY_EX_IP4:  32 bytes (4 X 64 bit write)
- *			      FCRAM_ENTRY_EX_IP6:  56 bytes (7 X 64 bit write)
- *
- * Outputs:
- *         NPI success/failure status code
- */
-npi_status_t
-npi_fflp_fcram_entry_write(npi_handle_t handle, part_id_t partid,
-			    uint32_t location, fcram_entry_t *fcram_ptr,
-			    fcram_entry_format_t format)
-
-{
-
-	int num_subareas = 0;
-	uint64_t addr_reg, data_reg;
-	int subarea;
-	int autoinc;
-	hash_tbl_addr_t addr;
-	switch (format) {
-	case FCRAM_ENTRY_OPTIM:
-		if (location % 8) {
-		/* need to be 8 byte alligned */
-
-			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				    " FCRAM_ENTRY_OOPTIM Write:"
-				    " unaligned location %llx \n",
-				    location));
-
-			return (NPI_FFLP_FCRAM_LOC_INVALID);
-	}
-
-	num_subareas = 1;
-	autoinc = 0;
-	break;
-
-	case FCRAM_ENTRY_EX_IP4:
-		if (location % 32) {
-/* need to be 32 byte alligned */
-			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-			    " FCRAM_ENTRY_EX_IP4 Write:"
-			    " unaligned location %llx \n",
-			    location));
-			return (NPI_FFLP_FCRAM_LOC_INVALID);
-	}
-
-	num_subareas = 4;
-	autoinc = 1;
-
-	break;
-	case FCRAM_ENTRY_EX_IP6:
-		if (location % 64) {
-				/* need to be 64 byte alligned */
-			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				    " FCRAM_ENTRY_EX_IP6 Write:"
-				    " unaligned location %llx \n",
-				    location));
-				return (NPI_FFLP_FCRAM_LOC_INVALID);
-
-		}
-		num_subareas = 7;
-		autoinc = 1;
-			break;
-	default:
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-			    " fcram_entry_write:"
-			    " unknown format param location %llx\n",
-			    location));
-		return (NPI_FFLP_ERROR | NPI_FCRAM_ERROR | OPCODE_INVALID);
-	}
-
-	addr.value = 0;
-	addr.bits.ldw.autoinc = autoinc;
-	addr.bits.ldw.addr = location;
-	addr_reg = GET_HASHTBL_PART_OFFSET(handle, partid,
-					    FFLP_HASH_TBL_ADDR_REG);
-	data_reg = GET_HASHTBL_PART_OFFSET(handle, partid,
-					    FFLP_HASH_TBL_DATA_REG);
-/* write to addr reg */
-	REG_PIO_WRITE64(handle, addr_reg, addr.value);
-/* write data to the data register */
-
-	for (subarea = 0; subarea < num_subareas; subarea++) {
-		REG_PIO_WRITE64(handle, data_reg, fcram_ptr->value[subarea]);
-	}
-
-	return (NPI_SUCCESS);
-}
-
-/*
- * npi_fflp_fcram_read_read ()
- * Reads an FCRAM entry
- * Inputs:
- *         handle:	opaque handle interpreted by the underlying OS
- *	   partid:	Partition ID
- *	   location:	Index to the FCRAM.
- *                  Corresponds to last 20 bits of H1 value
- *
- *	   fcram_ptr:	Pointer to the FCRAM contents to be updated
- *	   format:	Entry Format. Determines the size of the read.
- *			      FCRAM_ENTRY_OPTIM:   8 bytes (a 64 bit read)
- *			      FCRAM_ENTRY_EX_IP4:  32 bytes (4 X 64 bit read )
- *			      FCRAM_ENTRY_EX_IP6:  56 bytes (7 X 64 bit read )
- * Return:
- * NPI Success/Failure status code
- *
- */
-npi_status_t
-npi_fflp_fcram_entry_read(npi_handle_t handle,  part_id_t partid,
-			    uint32_t location, fcram_entry_t *fcram_ptr,
-			    fcram_entry_format_t format)
-{
-
-	int num_subareas = 0;
-	uint64_t addr_reg, data_reg;
-	int subarea, autoinc;
-	hash_tbl_addr_t addr;
-	switch (format) {
-		case FCRAM_ENTRY_OPTIM:
-			if (location % 8) {
-			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				" FCRAM_ENTRY_OOPTIM Read:"
-				" unaligned location %llx \n",
-				location));
-			/* need to be 8 byte alligned */
-				return (NPI_FFLP_FCRAM_LOC_INVALID);
-			}
-			num_subareas = 1;
-			autoinc = 0;
-			break;
-		case FCRAM_ENTRY_EX_IP4:
-			if (location % 32) {
-					/* need to be 32 byte alligned */
-			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-					" FCRAM_ENTRY_EX_IP4 READ:"
-					" unaligned location %llx \n",
-					location));
-				return (NPI_FFLP_FCRAM_LOC_INVALID);
-			}
-			num_subareas = 4;
-			autoinc = 1;
-
-			break;
-		case FCRAM_ENTRY_EX_IP6:
-			if (location % 64) {
-					/* need to be 64 byte alligned */
-			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				" FCRAM_ENTRY_EX_IP6 READ:"
-				" unaligned location %llx \n",
-				location));
-
-				return (NPI_FFLP_FCRAM_LOC_INVALID);
-	}
-			num_subareas = 7;
-			autoinc = 1;
-
-			break;
-		default:
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-			" fcram_entry_read:"
-			" unknown format param location %llx\n",
-			location));
-		return (NPI_FFLP_SW_PARAM_ERROR);
-	}
-
-	addr.value = 0;
-	addr.bits.ldw.autoinc = autoinc;
-	addr.bits.ldw.addr = location;
-	addr_reg = GET_HASHTBL_PART_OFFSET(handle, partid,
-			FFLP_HASH_TBL_ADDR_REG);
-	data_reg = GET_HASHTBL_PART_OFFSET(handle, partid,
-			FFLP_HASH_TBL_DATA_REG);
-/* write to addr reg */
-	REG_PIO_WRITE64(handle, addr_reg, addr.value);
-/* read data from the data register */
-	for (subarea = 0; subarea < num_subareas; subarea++) {
-		REG_PIO_READ64(handle, data_reg, &fcram_ptr->value[subarea]);
-	}
-
-
-	return (NPI_SUCCESS);
-
-}
-
-/*
- * npi_fflp_fcram_entry_invalidate ()
- * Invalidate FCRAM entry at the given location
- * Inputs:
- *	handle:		opaque handle interpreted by the underlying OS
- *	partid:		Partition ID
- *	location:	location of the FCRAM/hash entry.
- *
- * Return:
- * NPI Success/Failure status code
- */
-npi_status_t
-npi_fflp_fcram_entry_invalidate(npi_handle_t handle, part_id_t partid,
-				    uint32_t location)
-{
-
-	hash_tbl_addr_t addr;
-	uint64_t addr_reg, data_reg;
-	hash_hdr_t	   hdr;
-
-
-	if (location % 8) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-			" FCRAM_ENTRY_Invalidate:"
-			" unaligned location %llx \n",
-			location));
-			/* need to be 8 byte aligned */
-		return (NPI_FFLP_FCRAM_LOC_INVALID);
-	}
-
-	addr.value = 0;
-	addr.bits.ldw.addr = location;
-	addr_reg = GET_HASHTBL_PART_OFFSET(handle, partid,
-			FFLP_HASH_TBL_ADDR_REG);
-	data_reg = GET_HASHTBL_PART_OFFSET(handle, partid,
-			FFLP_HASH_TBL_DATA_REG);
-
-/* write to addr reg */
-	REG_PIO_WRITE64(handle, addr_reg, addr.value);
-
-	REG_PIO_READ64(handle, data_reg, &hdr.value);
-	hdr.exact_hdr.valid = 0;
-	REG_PIO_WRITE64(handle, data_reg, hdr.value);
-
-	return (NPI_SUCCESS);
-
-}
-
-/*
- * npi_fflp_fcram_write_subarea ()
- * Writes to FCRAM entry subarea i.e the 8 bytes within the 64 bytes
- * pointed by the  last 20 bits of  H1. Effectively, this accesses
- * specific 8 bytes within the hash table bucket.
- *
- *  H1-->  |-----------------|
- *	   |	subarea 0    |
- *	   |_________________|
- *	   | Subarea 1	     |
- *	   |_________________|
- *	   | .......	     |
- *	   |_________________|
- *	   | Subarea 7       |
- *	   |_________________|
- *
- * Inputs:
- *         handle:	opaque handle interpreted by the underlying OS
- *	   partid:	Partition ID
- *	   location:	location of the subarea. It is derived from:
- *			Bucket = [19:15][14:0]       (20 bits of H1)
- *			location = (Bucket << 3 ) + subarea * 8
- *				 = [22:18][17:3] || subarea * 8
- *	   data:	Data
- *
- * Return:
- * NPI Success/Failure status code
- */
-npi_status_t
-npi_fflp_fcram_subarea_write(npi_handle_t handle, part_id_t partid,
-			    uint32_t location, uint64_t data)
-{
-
-	hash_tbl_addr_t addr;
-	uint64_t addr_reg, data_reg;
-
-
-	if (location % 8) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-			" fcram_subarea_write:"
-			" unaligned location %llx \n",
-			location));
-			/* need to be 8 byte alligned */
-		return (NPI_FFLP_FCRAM_LOC_INVALID);
-	}
-
-	addr.value = 0;
-	addr.bits.ldw.addr = location;
-	addr_reg = GET_HASHTBL_PART_OFFSET(handle, partid,
-			FFLP_HASH_TBL_ADDR_REG);
-	data_reg = GET_HASHTBL_PART_OFFSET(handle, partid,
-			FFLP_HASH_TBL_DATA_REG);
-
-/* write to addr reg */
-	REG_PIO_WRITE64(handle, addr_reg, addr.value);
-	REG_PIO_WRITE64(handle, data_reg, data);
-
-	return (NPI_SUCCESS);
-
-}
-
-/*
- * npi_fflp_fcram_subarea_read ()
- * Reads an FCRAM entry subarea i.e the 8 bytes within the 64 bytes
- * pointed by  the last 20 bits of  H1. Effectively, this accesses
- * specific 8 bytes within the hash table bucket.
- *
- *  H1-->  |-----------------|
- *	   |	subarea 0    |
- *	   |_________________|
- *	   | Subarea 1	     |
- *	   |_________________|
- *	   | .......	     |
- *	   |_________________|
- *	   | Subarea 7       |
- *	   |_________________|
- *
- * Inputs:
- *         handle:	opaque handle interpreted by the underlying OS
- *	   partid:	Partition ID
- *	   location:	location of the subarea. It is derived from:
- *			Bucket = [19:15][14:0]       (20 bits of H1)
- *			location = (Bucket << 3 ) + subarea * 8
- *				 = [22:18][17:3] || subarea * 8
- *	   data:	ptr do write subarea contents to.
- *
- * Return:
- * NPI Success/Failure status code
- */
-npi_status_t
-npi_fflp_fcram_subarea_read(npi_handle_t handle, part_id_t partid,
-			    uint32_t location, uint64_t *data)
-
-{
-
-	hash_tbl_addr_t addr;
-	uint64_t addr_reg, data_reg;
-
-	if (location % 8) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				    " fcram_subarea_read:"
-				    " unaligned location %llx \n",
-				    location));
-			/* need to be 8 byte alligned */
-		return (NPI_FFLP_FCRAM_LOC_INVALID);
-	}
-
-	addr.value = 0;
-	addr.bits.ldw.addr = location;
-	addr_reg = GET_HASHTBL_PART_OFFSET(handle, partid,
-						    FFLP_HASH_TBL_ADDR_REG);
-	data_reg = GET_HASHTBL_PART_OFFSET(handle, partid,
-						    FFLP_HASH_TBL_DATA_REG);
-
-/* write to addr reg */
-	REG_PIO_WRITE64(handle, addr_reg, addr.value);
-	REG_PIO_READ64(handle, data_reg, data);
-
-	return (NPI_SUCCESS);
-
-}
-
-/*
- * The following are zero function fflp configuration functions.
- */
-
-/*
- * npi_fflp_fcram_config_partition()
- * Partitions and configures the FCRAM
- */
-npi_status_t
-npi_fflp_cfg_fcram_partition(npi_handle_t handle, part_id_t partid,
-				    uint8_t base_mask, uint8_t base_reloc)
-
-{
-/*
- * assumes that the base mask and relocation are computed somewhere
- * and kept in the state data structure. Alternativiely, one can pass
- * a partition size and a starting address and this routine can compute
- * the mask and reloc vlaues.
- */
-
-    flow_prt_sel_t sel;
-    uint64_t offset;
-
-    ASSERT(FCRAM_PARTITION_VALID(partid));
-	if (!FCRAM_PARTITION_VALID(partid)) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				    " npi_fflp_cfg_fcram_partition:"
-				    " Invalid Partition %d \n",
-				    partid));
-		return (NPI_FFLP_FCRAM_PART_INVALID);
-	}
-
-    offset = FFLP_PART_OFFSET(partid, FFLP_FLW_PRT_SEL_REG);
-    sel.value = 0;
-    sel.bits.ldw.mask = base_mask;
-    sel.bits.ldw.base = base_reloc;
-    sel.bits.ldw.ext = BIT_DISABLE; /* disable */
-    REG_PIO_WRITE64(handle, offset, sel.value);
-    return (NPI_SUCCESS);
-
-}
-
-/*
- * npi_fflp_fcram_partition_enable
- * Enable previously configured FCRAM partition
- *
- * Input
- *         handle:	opaque handle interpreted by the underlying OS
- *         partid:	 partition ID, Corresponds to the RDC table
- *
- * Return
- *      0			Successful
- *      Non zero  error code    Enable failed, and reason.
- *
- */
-npi_status_t
-npi_fflp_cfg_fcram_partition_enable  (npi_handle_t handle, part_id_t partid)
-
-{
-
-    flow_prt_sel_t sel;
-    uint64_t offset;
-
-    ASSERT(FCRAM_PARTITION_VALID(partid));
-    if (!FCRAM_PARTITION_VALID(partid)) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				    " fcram_partition enable:"
-				    " Invalid Partition %d \n",
-				    partid));
-		return (NPI_FFLP_FCRAM_PART_INVALID);
-	}
-
-    offset = FFLP_PART_OFFSET(partid, FFLP_FLW_PRT_SEL_REG);
-
-    REG_PIO_READ64(handle, offset, &sel.value);
-    sel.bits.ldw.ext = BIT_ENABLE; /* enable */
-    REG_PIO_WRITE64(handle, offset, sel.value);
-
-    return (NPI_SUCCESS);
-
-}
-
-/*
- * npi_fflp_fcram_partition_disable
- * Disable previously configured FCRAM partition
- *
- * Input
- *         handle:	opaque handle interpreted by the underlying OS
- *         partid:	partition ID, Corresponds to the RDC table
- *
- * Return:
- * NPI Success/Failure status code
- */
-npi_status_t
-npi_fflp_cfg_fcram_partition_disable(npi_handle_t handle, part_id_t partid)
-
-{
-
-	flow_prt_sel_t sel;
-	uint64_t offset;
-
-	ASSERT(FCRAM_PARTITION_VALID(partid));
-	if (!FCRAM_PARTITION_VALID(partid)) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				    " fcram_partition disable:"
-				    " Invalid Partition %d \n",
-				    partid));
-		return (NPI_FFLP_FCRAM_PART_INVALID);
-	}
-	offset = FFLP_PART_OFFSET(partid, FFLP_FLW_PRT_SEL_REG);
-	REG_PIO_READ64(handle, offset, &sel.value);
-	sel.bits.ldw.ext = BIT_DISABLE; /* disable */
-	REG_PIO_WRITE64(handle, offset, sel.value);
-	return (NPI_SUCCESS);
-}
-
-/*
- *  npi_fflp_cam_errorcheck_disable
- *  Disables FCRAM and TCAM error checking
- */
-npi_status_t
-npi_fflp_cfg_cam_errorcheck_disable(npi_handle_t handle)
-
-{
-
-	fflp_cfg_1_t fflp_cfg;
-	uint64_t offset;
-	offset = FFLP_CFG_1_REG;
-
-	REG_PIO_READ64(handle, offset, &fflp_cfg.value);
-
-	fflp_cfg.bits.ldw.errordis = BIT_ENABLE;
-	REG_PIO_WRITE64(handle, offset, fflp_cfg.value);
-
-	return (NPI_SUCCESS);
-
-}
-
-/*
- *  npi_fflp_cam_errorcheck_enable
- *  Enables FCRAM and TCAM error checking
- */
-npi_status_t
-npi_fflp_cfg_cam_errorcheck_enable(npi_handle_t handle)
-
-{
-	fflp_cfg_1_t fflp_cfg;
-	uint64_t offset;
-	offset = FFLP_CFG_1_REG;
-
-	REG_PIO_READ64(handle, offset, &fflp_cfg.value);
-
-	fflp_cfg.bits.ldw.errordis = BIT_DISABLE;
-	REG_PIO_WRITE64(handle, offset, fflp_cfg.value);
-
-	return (NPI_SUCCESS);
-
-}
-
-/*
- *  npi_fflp_cam_llcsnap_enable
- *  Enables input parser llcsnap recognition
- */
-npi_status_t
-npi_fflp_cfg_llcsnap_enable(npi_handle_t handle)
-
-{
-
-	fflp_cfg_1_t fflp_cfg;
-	uint64_t offset;
-	offset = FFLP_CFG_1_REG;
-
-	REG_PIO_READ64(handle, offset, &fflp_cfg.value);
-
-	fflp_cfg.bits.ldw.llcsnap = BIT_ENABLE;
-	REG_PIO_WRITE64(handle, offset, fflp_cfg.value);
-
-	return (NPI_SUCCESS);
-
-}
-
-/*
- *  npi_fflp_cam_llcsnap_disable
- *  Disables input parser llcsnap recognition
- */
-npi_status_t
-npi_fflp_cfg_llcsnap_disable(npi_handle_t handle)
-
-{
-
-
-	fflp_cfg_1_t fflp_cfg;
-	uint64_t offset;
-	offset = FFLP_CFG_1_REG;
-
-	REG_PIO_READ64(handle, offset, &fflp_cfg.value);
-
-	fflp_cfg.bits.ldw.llcsnap = BIT_DISABLE;
-	REG_PIO_WRITE64(handle, offset, fflp_cfg.value);
-
-	return (NPI_SUCCESS);
-
-}
-
-/*
- * npi_fflp_config_fcram_refresh
- * Set FCRAM min and max refresh time.
- *
- * Input
- *      handle			opaque handle interpreted by the underlying OS
- *	min_time		Minimum Refresh time count
- *	max_time		maximum Refresh Time count
- *	sys_time		System Clock rate
- *
- *	The counters are 16 bit counters. The maximum refresh time is
- *      3.9us/clock cycle. The minimum is 400ns/clock cycle.
- *	Clock cycle is the FCRAM clock cycle?????
- *	If the cycle is FCRAM clock cycle, then sys_time parameter
- *      is not needed as there wont be configuration variation due to
- *      system clock cycle.
- *
- * Return:
- * NPI Success/Failure status code
- */
-npi_status_t
-npi_fflp_cfg_fcram_refresh_time(npi_handle_t handle, uint32_t min_time,
-				    uint32_t max_time, uint32_t sys_time)
-
-{
-
-	uint64_t offset;
-	fcram_ref_tmr_t refresh_timer_reg;
-	uint16_t max, min;
-
-	offset = FFLP_FCRAM_REF_TMR_REG;
-/* need to figure out how to dervive the numbers */
-	max = max_time * sys_time;
-	min = min_time * sys_time;
-/* for now, just set with #def values */
-
-	max = FCRAM_REFRESH_DEFAULT_MAX_TIME;
-	min = FCRAM_REFRESH_DEFAULT_MIN_TIME;
-	REG_PIO_READ64(handle, offset, &refresh_timer_reg.value);
-	refresh_timer_reg.bits.ldw.min = min;
-	refresh_timer_reg.bits.ldw.max = max;
-	REG_PIO_WRITE64(handle, offset, refresh_timer_reg.value);
-	return (NPI_SUCCESS);
-}
-
-/*
- *  npi_fflp_hash_lookup_err_report
- *  Reports hash table (fcram) lookup errors
- *
- *  Input
- *      handle			opaque handle interpreted by the underlying OS
- *      err_stat		Pointer to return Error bits
- *
- *
- * Return:
- * NPI success/failure status code
- */
-npi_status_t
-npi_fflp_fcram_get_lookup_err_log(npi_handle_t handle,
-				    hash_lookup_err_log_t *err_stat)
-
-{
-
-	hash_lookup_err_log1_t err_log1;
-	hash_lookup_err_log2_t err_log2;
-	uint64_t  err_log1_offset, err_log2_offset;
-	err_log1.value = 0;
-	err_log2.value = 0;
-
-	err_log1_offset = HASH_LKUP_ERR_LOG1_REG;
-	err_log2_offset = HASH_LKUP_ERR_LOG2_REG;
-
-	REG_PIO_READ64(handle, err_log1_offset, &err_log1.value);
-	REG_PIO_READ64(handle, err_log2_offset, &err_log2.value);
-
-	if (err_log1.value) {
-/* nonzero means there are some errors */
-		err_stat->lookup_err = BIT_ENABLE;
-		err_stat->syndrome = err_log2.bits.ldw.syndrome;
-		err_stat->subarea = err_log2.bits.ldw.subarea;
-		err_stat->h1 = err_log2.bits.ldw.h1;
-		err_stat->multi_bit = err_log1.bits.ldw.mult_bit;
-		err_stat->multi_lkup = err_log1.bits.ldw.mult_lk;
-		err_stat->ecc_err = err_log1.bits.ldw.ecc_err;
-		err_stat->uncor_err = err_log1.bits.ldw.cu;
-	} else {
-		err_stat->lookup_err = BIT_DISABLE;
-	}
-
-	return (NPI_SUCCESS);
-
-}
-
-/*
- * npi_fflp_fcram_get_pio_err_log
- * Reports hash table PIO read errors for the given partition.
- * by default, it clears the error bit which was set by the HW.
- *
- * Input
- *      handle:		opaque handle interpreted by the underlying OS
- *	partid:		partition ID
- *      err_stat	Pointer to return Error bits
- *
- * Return
- *	NPI success/failure status code
- */
-npi_status_t
-npi_fflp_fcram_get_pio_err_log(npi_handle_t handle, part_id_t partid,
-				    hash_pio_err_log_t *err_stat)
-{
-
-	hash_tbl_data_log_t err_log;
-	uint64_t offset;
-
-	ASSERT(FCRAM_PARTITION_VALID(partid));
-	if (!FCRAM_PARTITION_VALID(partid)) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-			" fcram_get_pio_err_log:"
-			" Invalid Partition %d \n",
-			partid));
-		return (NPI_FFLP_FCRAM_PART_INVALID);
-	}
-
-	offset = GET_HASHTBL_PART_OFFSET_NVIR(partid,
-			FFLP_HASH_TBL_DATA_LOG_REG);
-
-	REG_PIO_READ64(handle, offset, &err_log.value);
-
-	if (err_log.bits.ldw.pio_err == BIT_ENABLE) {
-/* nonzero means there are some errors */
-		err_stat->pio_err = BIT_ENABLE;
-		err_stat->syndrome = err_log.bits.ldw.syndrome;
-		err_stat->addr = err_log.bits.ldw.fcram_addr;
-		err_log.value = 0;
-		REG_PIO_WRITE64(handle, offset, err_log.value);
-	} else {
-		err_stat->pio_err = BIT_DISABLE;
-	}
-
-	return (NPI_SUCCESS);
-
-}
-
-/*
- * npi_fflp_fcram_clr_pio_err_log
- * Clears FCRAM PIO  error status for the partition.
- * If there are TCAM errors as indicated by err bit set by HW,
- *  then the SW will clear it by clearing the bit.
- *
- * Input
- *      handle:		opaque handle interpreted by the underlying OS
- *	partid:		partition ID
- *
- *
- * Return
- *	NPI success/failure status code
- */
-npi_status_t
-npi_fflp_fcram_clr_pio_err_log(npi_handle_t handle, part_id_t partid)
-{
-	uint64_t offset;
-
-	hash_tbl_data_log_t err_log;
-
-	ASSERT(FCRAM_PARTITION_VALID(partid));
-	if (!FCRAM_PARTITION_VALID(partid)) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-			" fcram_clr_pio_err_log:"
-			" Invalid Partition %d \n",
-			partid));
-
-		return (NPI_FFLP_FCRAM_PART_INVALID);
-	}
-
-	offset = GET_HASHTBL_PART_OFFSET_NVIR(partid,
-			FFLP_HASH_TBL_DATA_LOG_REG);
-
-	err_log.value = 0;
-	REG_PIO_WRITE64(handle, offset, err_log.value);
-
-
-	return (NPI_SUCCESS);
-
-}
-
-/*
- * npi_fflp_tcam_get_err_log
- * Reports TCAM PIO read and lookup errors.
- * If there are TCAM errors as indicated by err bit set by HW,
- *  then the SW will clear it by clearing the bit.
- *
- * Input
- *      handle:		opaque handle interpreted by the underlying OS
- *	err_stat:	 structure to report various TCAM errors.
- *                       will be updated if there are TCAM errors.
- *
- *
- * Return
- *	NPI_SUCCESS	Success
- *
- *
- */
-npi_status_t
-npi_fflp_tcam_get_err_log(npi_handle_t handle, tcam_err_log_t *err_stat)
-{
-	tcam_err_t err_log;
-	uint64_t offset;
-
-	offset = FFLP_TCAM_ERR_REG;
-	err_log.value = 0;
-
-	REG_PIO_READ64(handle, offset, &err_log.value);
-
-	if (err_log.bits.ldw.err == BIT_ENABLE) {
-/* non-zero means err */
-		err_stat->tcam_err = BIT_ENABLE;
-		if (err_log.bits.ldw.p_ecc) {
-			err_stat->parity_err = 0;
-			err_stat->ecc_err = 1;
-		} else {
-			err_stat->parity_err = 1;
-			err_stat->ecc_err = 0;
-
-		}
-		err_stat->syndrome = err_log.bits.ldw.syndrome;
-		err_stat->location = err_log.bits.ldw.addr;
-
-
-		err_stat->multi_lkup = err_log.bits.ldw.mult;
-			/* now clear the error */
-		err_log.value = 0;
-		REG_PIO_WRITE64(handle, offset, err_log.value);
-
-	} else {
-		err_stat->tcam_err = 0;
-	}
-	return (NPI_SUCCESS);
-
-}
-
-/*
- * npi_fflp_tcam_clr_err_log
- * Clears TCAM PIO read and lookup error status.
- * If there are TCAM errors as indicated by err bit set by HW,
- *  then the SW will clear it by clearing the bit.
- *
- * Input
- *         handle:	opaque handle interpreted by the underlying OS
- *
- *
- * Return
- *	NPI_SUCCESS	Success
- *
- *
- */
-npi_status_t
-npi_fflp_tcam_clr_err_log(npi_handle_t handle)
-{
-	tcam_err_t err_log;
-	uint64_t offset;
-
-	offset = FFLP_TCAM_ERR_REG;
-	err_log.value = 0;
-	REG_PIO_WRITE64(handle, offset, err_log.value);
-
-	return (NPI_SUCCESS);
-
-}
-
-/*
- * npi_fflp_fcram_err_synd_test
- * Tests the FCRAM error detection logic.
- * The error detection logic for the syndrome is tested.
- * tst0->synd (8bits) are set to select the syndrome bits
- * to be XOR'ed
- *
- * Input
- *      handle:	opaque handle interpreted by the underlying OS
- *	syndrome_bits:	 Syndrome bits to select bits to be xor'ed
- *
- *
- * Return
- *	NPI_SUCCESS	Success
- *
- *
- */
-npi_status_t
-npi_fflp_fcram_err_synd_test(npi_handle_t handle, uint8_t syndrome_bits)
-{
-
-	uint64_t t0_offset;
-	fcram_err_tst0_t tst0;
-	t0_offset = FFLP_FCRAM_ERR_TST0_REG;
-
-	tst0.value = 0;
-	tst0.bits.ldw.syndrome_mask = syndrome_bits;
-
-	REG_PIO_WRITE64(handle, t0_offset, tst0.value);
-
-	return (NPI_SUCCESS);
-
-}
-
-/*
- * npi_fflp_fcram_err_data_test
- * Tests the FCRAM error detection logic.
- * The error detection logic for the datapath is tested.
- * bits [63:0] are set to select the data bits to be xor'ed
- *
- * Input
- *      handle:	opaque handle interpreted by the underlying OS
- *	data:	 data bits to select bits to be xor'ed
- *
- *
- * Return
- *	NPI_SUCCESS	Success
- *
- *
- */
-npi_status_t
-npi_fflp_fcram_err_data_test(npi_handle_t handle, fcram_err_data_t *data)
-{
-
-	uint64_t t1_offset, t2_offset;
-	fcram_err_tst1_t tst1; /* for data bits [31:0] */
-	fcram_err_tst2_t tst2; /* for data bits [63:32] */
-
-	t1_offset = FFLP_FCRAM_ERR_TST1_REG;
-	t2_offset = FFLP_FCRAM_ERR_TST2_REG;
-	tst1.value = 0;
-	tst2.value = 0;
-	tst1.bits.ldw.dat = data->bits.ldw.dat;
-	tst2.bits.ldw.dat = data->bits.hdw.dat;
-
-	REG_PIO_WRITE64(handle, t1_offset, tst1.value);
-	REG_PIO_WRITE64(handle, t2_offset, tst2.value);
-
-	return (NPI_SUCCESS);
-
-}
-
-/*
- * npi_fflp_cfg_enet_vlan_table_assoc
- * associates port vlan id to rdc table.
- *
- * Input
- *     handle			opaque handle interpreted by the underlying OS
- *     mac_portn		port number
- *     vlan_id			VLAN ID
- *     rdc_table		RDC Table #
- *     priority			priority
- *
- * Output
- *
- *	NPI success/failure status code
- *
- */
-npi_status_t
-npi_fflp_cfg_enet_vlan_table_assoc(npi_handle_t handle, uint8_t mac_portn,
-				    vlan_id_t vlan_id, uint8_t rdc_table,
-				    uint8_t priority)
-{
-
-	fflp_enet_vlan_tbl_t cfg;
-	uint64_t offset;
-	uint8_t vlan_parity[8] = {0, 1, 1, 2, 1, 2, 2, 3};
-	uint8_t parity_bit;
-
-	ASSERT(FFLP_VLAN_VALID(vlan_id));
-	if (!FFLP_VLAN_VALID(vlan_id)) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-			" fflp_cfg_enet_vlan_table:"
-			" Invalid vlan ID %d \n",
-			vlan_id));
-		return (NPI_FFLP_VLAN_INVALID);
-	}
-
-	ASSERT(FFLP_PORT_VALID(mac_portn));
-	if (!FFLP_PORT_VALID(mac_portn)) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-			" fflp_cfg_enet_vlan_table:"
-			" Invalid port num %d \n",
-			mac_portn));
-		return (NPI_FFLP_PORT_INVALID);
-	}
-
-	ASSERT(FFLP_RDC_TABLE_VALID(rdc_table));
-	if (!FFLP_RDC_TABLE_VALID(rdc_table)) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-			" fflp_cfg_enet_vlan_table:"
-			" Invalid RDC Table %d \n",
-			rdc_table));
-		return (NPI_FFLP_RDC_TABLE_INVALID);
-	}
-
-	offset = FFLP_VLAN_OFFSET(vlan_id, FFLP_ENET_VLAN_TBL_REG);
-	REG_PIO_READ64(handle, offset, &cfg.value);
-
-	switch (mac_portn) {
-		case 0:
-			cfg.bits.ldw.vlanrdctbln0 = rdc_table;
-			if (priority)
-				cfg.bits.ldw.vpr0 = BIT_ENABLE;
-			else
-				cfg.bits.ldw.vpr0 = BIT_DISABLE;
-				/* set the parity bits */
-			parity_bit = vlan_parity[cfg.bits.ldw.vlanrdctbln0] +
-				vlan_parity[cfg.bits.ldw.vlanrdctbln1] +
-				cfg.bits.ldw.vpr0 + cfg.bits.ldw.vpr1;
-			cfg.bits.ldw.parity0 = parity_bit & 0x1;
-			break;
-		case 1:
-			cfg.bits.ldw.vlanrdctbln1 = rdc_table;
-			if (priority)
-				cfg.bits.ldw.vpr1 = BIT_ENABLE;
-			else
-				cfg.bits.ldw.vpr1 = BIT_DISABLE;
-				/* set the parity bits */
-			parity_bit = vlan_parity[cfg.bits.ldw.vlanrdctbln0] +
-				vlan_parity[cfg.bits.ldw.vlanrdctbln1] +
-				cfg.bits.ldw.vpr0 + cfg.bits.ldw.vpr1;
-				cfg.bits.ldw.parity0 = parity_bit & 0x1;
-
-			break;
-		case 2:
-			cfg.bits.ldw.vlanrdctbln2 = rdc_table;
-			if (priority)
-				cfg.bits.ldw.vpr2 = BIT_ENABLE;
-			else
-				cfg.bits.ldw.vpr2 = BIT_DISABLE;
-				/* set the parity bits */
-			parity_bit = vlan_parity[cfg.bits.ldw.vlanrdctbln2] +
-				vlan_parity[cfg.bits.ldw.vlanrdctbln3] +
-				cfg.bits.ldw.vpr2 + cfg.bits.ldw.vpr3;
-			cfg.bits.ldw.parity1 = parity_bit & 0x1;
-
-			break;
-		case 3:
-			cfg.bits.ldw.vlanrdctbln3 = rdc_table;
-			if (priority)
-				cfg.bits.ldw.vpr3 = BIT_ENABLE;
-			else
-				cfg.bits.ldw.vpr3 = BIT_DISABLE;
-				/* set the parity bits */
-			parity_bit = vlan_parity[cfg.bits.ldw.vlanrdctbln2] +
-				vlan_parity[cfg.bits.ldw.vlanrdctbln3] +
-				cfg.bits.ldw.vpr2 + cfg.bits.ldw.vpr3;
-			cfg.bits.ldw.parity1 = parity_bit & 0x1;
-			break;
-		default:
-			return (NPI_FFLP_SW_PARAM_ERROR);
-	}
-
-	REG_PIO_WRITE64(handle, offset, cfg.value);
-	return (NPI_SUCCESS);
-}
-
-/*
- * npi_fflp_cfg_enet_vlan_table_set_pri
- * sets the  vlan based classification priority in respect to L2DA
- * classification.
- *
- * Input
- *     handle		opaque handle interpreted by the underlying OS
- *     mac_portn	port number
- *     vlan_id		VLAN ID
- *     priority 	priority
- *			1: vlan classification has higher priority
- *			0: l2da classification has higher priority
- *
- * Output
- *
- *	NPI success/failure status code
- */
-npi_status_t
-npi_fflp_cfg_enet_vlan_table_set_pri(npi_handle_t handle, uint8_t mac_portn,
-				    vlan_id_t vlan_id, uint8_t priority)
-{
-
-	fflp_enet_vlan_tbl_t cfg;
-	uint64_t offset;
-	uint64_t old_value;
-
-	ASSERT(FFLP_VLAN_VALID(vlan_id));
-	if (!FFLP_VLAN_VALID(vlan_id)) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-			" enet_vlan_table set pri:"
-			" Invalid vlan ID %d \n",
-			vlan_id));
-		return (NPI_FFLP_VLAN_INVALID);
-	}
-
-	ASSERT(FFLP_PORT_VALID(mac_portn));
-	if (!FFLP_PORT_VALID(mac_portn)) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-			" enet_vlan_table set pri:"
-			" Invalid port num %d \n",
-			mac_portn));
-		return (NPI_FFLP_PORT_INVALID);
-	}
-
-
-	offset = FFLP_ENET_VLAN_TBL_REG + (vlan_id  << 3);
-	REG_PIO_READ64(handle, offset, &cfg.value);
-	old_value = cfg.value;
-	switch (mac_portn) {
-		case 0:
-			if (priority)
-				cfg.bits.ldw.vpr0 = BIT_ENABLE;
-			else
-				cfg.bits.ldw.vpr0 = BIT_DISABLE;
-			break;
-		case 1:
-			if (priority)
-				cfg.bits.ldw.vpr1 = BIT_ENABLE;
-			else
-				cfg.bits.ldw.vpr1 = BIT_DISABLE;
-			break;
-		case 2:
-			if (priority)
-				cfg.bits.ldw.vpr2 = BIT_ENABLE;
-			else
-				cfg.bits.ldw.vpr2 = BIT_DISABLE;
-			break;
-		case 3:
-			if (priority)
-				cfg.bits.ldw.vpr3 = BIT_ENABLE;
-			else
-				cfg.bits.ldw.vpr3 = BIT_DISABLE;
-			break;
-		default:
-			return (NPI_FFLP_SW_PARAM_ERROR);
-	}
-	if (old_value != cfg.value) {
-		if (mac_portn > 1)
-			cfg.bits.ldw.parity1++;
-		else
-			cfg.bits.ldw.parity0++;
-
-		REG_PIO_WRITE64(handle, offset, cfg.value);
-	}
-	return (NPI_SUCCESS);
-}
-
-/*
- * npi_fflp_cfg_vlan_table_clear
- * Clears the vlan RDC table
- *
- * Input
- *     handle		opaque handle interpreted by the underlying OS
- *     vlan_id		VLAN ID
- *
- * Output
- *
- *	NPI success/failure status code
- *
- */
-npi_status_t
-npi_fflp_cfg_vlan_table_clear(npi_handle_t handle, vlan_id_t vlan_id)
-{
-
-	uint64_t offset;
-	uint64_t clear = 0ULL;
-	vlan_id_t start_vlan = 0;
-
-	if ((vlan_id < start_vlan) || (vlan_id >= NXGE_MAX_VLANS)) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-			" enet_vlan_table clear:"
-			" Invalid vlan ID %d \n",
-			vlan_id));
-		return (NPI_FFLP_VLAN_INVALID);
-	}
-
-
-	offset = FFLP_VLAN_OFFSET(vlan_id, FFLP_ENET_VLAN_TBL_REG);
-
-	REG_PIO_WRITE64(handle, offset, clear);
-	return (NPI_SUCCESS);
-}
-
-/*
- * npi_fflp_vlan_tbl_get_err_log
- * Reports VLAN Table  errors.
- * If there are VLAN Table errors as indicated by err bit set by HW,
- *  then the SW will clear it by clearing the bit.
- *
- * Input
- *      handle:		opaque handle interpreted by the underlying OS
- *	err_stat:	 structure to report various VLAN table errors.
- *                       will be updated if there are errors.
- *
- *
- * Return
- *	NPI_SUCCESS	Success
- *
- *
- */
-npi_status_t
-npi_fflp_vlan_tbl_get_err_log(npi_handle_t handle, vlan_tbl_err_log_t *err_stat)
-{
-	vlan_par_err_t err_log;
-	uint64_t offset;
-
-
-	offset = FFLP_VLAN_PAR_ERR_REG;
-	err_log.value = 0;
-
-	REG_PIO_READ64(handle, offset, &err_log.value);
-
-	if (err_log.bits.ldw.err == BIT_ENABLE) {
-/* non-zero means err */
-		err_stat->err = BIT_ENABLE;
-		err_stat->multi = err_log.bits.ldw.m_err;
-		err_stat->addr = err_log.bits.ldw.addr;
-		err_stat->data = err_log.bits.ldw.data;
-/* now clear the error */
-		err_log.value = 0;
-		REG_PIO_WRITE64(handle, offset, err_log.value);
-
-	} else {
-		err_stat->err = 0;
-	}
-
-	return (NPI_SUCCESS);
-}
-
-/*
- * npi_fflp_vlan_tbl_clr_err_log
- * Clears VLAN Table PIO  error status.
- * If there are VLAN Table errors as indicated by err bit set by HW,
- *  then the SW will clear it by clearing the bit.
- *
- * Input
- *         handle:	opaque handle interpreted by the underlying OS
- *
- *
- * Return
- *	NPI_SUCCESS	Success
- *
- *
- */
-npi_status_t
-npi_fflp_vlan_tbl_clr_err_log(npi_handle_t handle)
-{
-	vlan_par_err_t err_log;
-	uint64_t offset;
-
-	offset = FFLP_VLAN_PAR_ERR_REG;
-	err_log.value = 0;
-
-	REG_PIO_WRITE64(handle, offset, err_log.value);
-
-	return (NPI_SUCCESS);
-}
-
-/*
- * npi_fflp_cfg_enet_usr_cls_set()
- * Configures a user configurable ethernet class
- *
- * Input
- *      handle:		opaque handle interpreted by the underlying OS
- *      class:       Ethernet Class  class
- *		     (TCAM_CLASS_ETYPE or  TCAM_CLASS_ETYPE_2)
- *      enet_type:   16 bit Ethernet Type value, corresponding ethernet bytes
- *                        [13:14] in the frame.
- *
- *  by default, the class will be disabled until explicitly enabled.
- *
- * Return
- * NPI success/failure status code
- */
-npi_status_t
-npi_fflp_cfg_enet_usr_cls_set(npi_handle_t handle,
-			    tcam_class_t class, uint16_t enet_type)
-{
-	uint64_t offset;
-	tcam_class_prg_ether_t cls_cfg;
-	cls_cfg.value = 0x0;
-
-/* check if etype is valid */
-	ASSERT(TCAM_L2_USR_CLASS_VALID(class));
-	if (!TCAM_L2_USR_CLASS_VALID(class)) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-			" npi_fflp_cfg_enet_usr_cls_set:"
-			" Invalid class %d \n",
-			class));
-		return (NPI_FFLP_TCAM_CLASS_INVALID);
-	}
-	offset = GET_TCAM_CLASS_OFFSET(class);
-
-/*
- * etype check code
- *
- * if (check_fail)
- *  return (NPI_FAILURE | NPI_SW_ERROR);
- */
-
-	cls_cfg.bits.ldw.etype = enet_type;
-	cls_cfg.bits.ldw.valid = BIT_DISABLE;
-	REG_PIO_WRITE64(handle, offset, cls_cfg.value);
-	return (NPI_SUCCESS);
-}
-
-/*
- * npi_fflp_cfg_enet_usr_cls_enable()
- * Enable previously configured TCAM user configurable Ethernet classes.
- *
- * Input
- *      handle:	opaque handle interpreted by the underlying OS
- *      class:       Ethernet Class  class
- *		     (TCAM_CLASS_ETYPE or  TCAM_CLASS_ETYPE_2)
- *
- * Return
- * NPI success/failure status code
- */
-npi_status_t
-npi_fflp_cfg_enet_usr_cls_enable(npi_handle_t handle, tcam_class_t class)
-{
-	uint64_t offset;
-	tcam_class_prg_ether_t cls_cfg;
-
-	ASSERT(TCAM_L2_USR_CLASS_VALID(class));
-	if (!TCAM_L2_USR_CLASS_VALID(class)) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-			" npi_fflp_cfg_enet_usr_cls_enable:"
-			" Invalid class %d \n",
-			class));
-		return (NPI_FFLP_TCAM_CLASS_INVALID);
-	}
-
-	offset = GET_TCAM_CLASS_OFFSET(class);
-
-	REG_PIO_READ64(handle, offset, &cls_cfg.value);
-	cls_cfg.bits.ldw.valid = BIT_ENABLE;
-	REG_PIO_WRITE64(handle, offset, cls_cfg.value);
-	return (NPI_SUCCESS);
-}
-
-/*
- * npi_fflp_cfg_enet_usr_cls_disable()
- * Disables previously configured TCAM user configurable Ethernet classes.
- *
- * Input
- *      handle:	opaque handle interpreted by the underlying OS
- *      class:       Ethernet Class  class
- *		     (TCAM_CLASS_ETYPE or  TCAM_CLASS_ETYPE_2)
- *
- * Return
- * NPI success/failure status code
- */
-npi_status_t
-npi_fflp_cfg_enet_usr_cls_disable(npi_handle_t handle, tcam_class_t class)
-{
-	uint64_t offset;
-	tcam_class_prg_ether_t cls_cfg;
-
-	ASSERT(TCAM_L2_USR_CLASS_VALID(class));
-	if (!TCAM_L2_USR_CLASS_VALID(class)) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-			" npi_fflp_cfg_enet_usr_cls_disable:"
-			" Invalid class %d \n",
-			class));
-		return (NPI_FFLP_TCAM_CLASS_INVALID);
-	}
-
-	offset = GET_TCAM_CLASS_OFFSET(class);
-
-	REG_PIO_READ64(handle, offset, &cls_cfg.value);
-	cls_cfg.bits.ldw.valid = BIT_DISABLE;
-
-	REG_PIO_WRITE64(handle, offset, cls_cfg.value);
-	return (NPI_SUCCESS);
-}
-
-/*
- * npi_fflp_cfg_ip_usr_cls_set()
- * Configures the TCAM user configurable IP classes.
- *
- * Input
- *      handle:		opaque handle interpreted by the underlying OS
- *      class:       IP Class  class
- *		     (TCAM_CLASS_IP_USER_4 <= class <= TCAM_CLASS_IP_USER_7)
- *      tos:         IP TOS bits
- *      tos_mask:    IP TOS bits mask. bits with mask bits set will be used
- *      proto:       IP Proto
- *      ver:         IP Version
- * by default, will the class is disabled until explicitly enabled
- *
- * Return
- * NPI success/failure status code
- */
-npi_status_t
-npi_fflp_cfg_ip_usr_cls_set(npi_handle_t handle, tcam_class_t class,
-			    uint8_t tos, uint8_t tos_mask,
-			    uint8_t proto, uint8_t ver)
-{
-	uint64_t offset;
-	tcam_class_prg_ip_t ip_cls_cfg;
-
-	ASSERT(TCAM_L3_USR_CLASS_VALID(class));
-	if (!TCAM_L3_USR_CLASS_VALID(class)) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-			" npi_fflp_cfg_ip_usr_cls_set:"
-			" Invalid class %d \n",
-			class));
-		return (NPI_FFLP_TCAM_CLASS_INVALID);
-	}
-
-	offset = GET_TCAM_CLASS_OFFSET(class);
-
-	ip_cls_cfg.bits.ldw.pid = proto;
-	ip_cls_cfg.bits.ldw.ipver = ver;
-	ip_cls_cfg.bits.ldw.tos = tos;
-	ip_cls_cfg.bits.ldw.tosmask = tos_mask;
-	ip_cls_cfg.bits.ldw.valid = 0;
-	REG_PIO_WRITE64(handle, offset, ip_cls_cfg.value);
-	return (NPI_SUCCESS);
-
-}
-
-/*
- * npi_fflp_cfg_ip_usr_cls_enable()
- * Enable previously configured TCAM user configurable IP classes.
- *
- * Input
- *      handle:	opaque handle interpreted by the underlying OS
- *      class:       IP Class  class
- *		     (TCAM_CLASS_IP_USER_4 <= class <= TCAM_CLASS_IP_USER_7)
- *
- * Return
- * NPI success/failure status code
- */
-npi_status_t
-npi_fflp_cfg_ip_usr_cls_enable(npi_handle_t handle, tcam_class_t class)
-{
-	uint64_t offset;
-	tcam_class_prg_ip_t ip_cls_cfg;
-
-	ASSERT(TCAM_L3_USR_CLASS_VALID(class));
-	if (!TCAM_L3_USR_CLASS_VALID(class)) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-			" npi_fflp_cfg_ip_usr_cls_enable:"
-			" Invalid class %d \n",
-			class));
-		return (NPI_FFLP_TCAM_CLASS_INVALID);
-	}
-
-	offset = GET_TCAM_CLASS_OFFSET(class);
-	REG_PIO_READ64(handle, offset, &ip_cls_cfg.value);
-	ip_cls_cfg.bits.ldw.valid = 1;
-
-	REG_PIO_WRITE64(handle, offset, ip_cls_cfg.value);
-	return (NPI_SUCCESS);
-
-}
-
-/*
- * npi_fflp_cfg_ip_usr_cls_disable()
- * Disables previously configured TCAM user configurable IP classes.
- *
- * Input
- *      handle:	opaque handle interpreted by the underlying OS
- *      class:       IP Class  class
- *		     (TCAM_CLASS_IP_USER_4 <= class <= TCAM_CLASS_IP_USER_7)
- *
- * Return
- * NPI success/failure status code
- */
-npi_status_t
-npi_fflp_cfg_ip_usr_cls_disable(npi_handle_t handle, tcam_class_t class)
-{
-	uint64_t offset;
-	tcam_class_prg_ip_t ip_cls_cfg;
-
-	ASSERT(TCAM_L3_USR_CLASS_VALID(class));
-	if (!TCAM_L3_USR_CLASS_VALID(class)) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-			" npi_fflp_cfg_ip_usr_cls_disable:"
-			" Invalid class %d \n",
-			class));
-		return (NPI_FFLP_TCAM_CLASS_INVALID);
-	}
-
-	offset = GET_TCAM_CLASS_OFFSET(class);
-
-	REG_PIO_READ64(handle, offset, &ip_cls_cfg.value);
-	ip_cls_cfg.bits.ldw.valid = 0;
-
-	REG_PIO_WRITE64(handle, offset, ip_cls_cfg.value);
-	return (NPI_SUCCESS);
-
-}
-
-/*
- * npi_fflp_cfg_ip_cls_tcam_key ()
- *
- * Configures the TCAM key generation for the IP classes
- *
- * Input
- *      handle:	opaque handle interpreted by the underlying OS
- *      l3_class:        IP class to configure key generation
- *      cfg:             Configuration bits:
- *                   discard:      Discard all frames of this class
- *                   use_ip_saddr: use ip src address (for ipv6)
- *                   use_ip_daddr: use ip dest address (for ipv6)
- *                   lookup_enable: Enable Lookup
- *
- *
- * Return
- * NPI success/failure status code
- */
-npi_status_t
-npi_fflp_cfg_ip_cls_tcam_key(npi_handle_t handle,
-			    tcam_class_t l3_class, tcam_key_cfg_t *cfg)
-{
-	uint64_t offset;
-	tcam_class_key_ip_t tcam_cls_cfg;
-
-	ASSERT(TCAM_L3_CLASS_VALID(l3_class));
-	if (!(TCAM_L3_CLASS_VALID(l3_class))) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-			" npi_fflp_cfg_ip_cls_tcam_key:"
-			" Invalid class %d \n",
-			l3_class));
-		return (NPI_FFLP_TCAM_CLASS_INVALID);
-	}
-
-	if ((cfg->use_ip_daddr) &&
-		(cfg->use_ip_saddr == cfg->use_ip_daddr)) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-			    " npi_fflp_cfg_ip_cls_tcam_key:"
-			    " Invalid configuration %x for class %d \n",
-			    *cfg, l3_class));
-		return (NPI_FFLP_SW_PARAM_ERROR);
-	}
-
-
-	offset = GET_TCAM_KEY_OFFSET(l3_class);
-	tcam_cls_cfg.value = 0;
-
-	if (cfg->discard) {
-		tcam_cls_cfg.bits.ldw.discard = 1;
-	}
-
-	if (cfg->use_ip_saddr) {
-		tcam_cls_cfg.bits.ldw.ipaddr = 1;
-	}
-
-	if (cfg->use_ip_daddr) {
-		tcam_cls_cfg.bits.ldw.ipaddr = 0;
-	}
-
-	if (cfg->lookup_enable) {
-		tcam_cls_cfg.bits.ldw.tsel = 1;
-	}
-
-	REG_PIO_WRITE64(handle, offset, tcam_cls_cfg.value);
-	return (NPI_SUCCESS);
-}
-
-/*
- * npi_fflp_cfg_ip_cls_flow_key ()
- *
- * Configures the flow key generation for the IP classes
- * Flow key is used to generate the H1 hash function value
- * The fields used for the generation are configured using this
- * NPI function.
- *
- * Input
- *      handle:	opaque handle interpreted by the underlying OS
- *      l3_class:        IP class to configure flow key generation
- *      cfg:             Configuration bits:
- *                   use_proto:     Use IP proto field
- *                   use_dport:     use l4 destination port
- *                   use_sport:     use l4 source port
- *                   ip_opts_exist: IP Options Present
- *                   use_daddr:     use ip dest address
- *                   use_saddr:     use ip source address
- *                   use_vlan:      use VLAN ID
- *                   use_l2da:      use L2 Dest MAC Address
- *                   use_portnum:   use L2 virtual port number
- *
- *
- * Return
- * NPI success/failure status code
- */
-npi_status_t
-npi_fflp_cfg_ip_cls_flow_key(npi_handle_t handle, tcam_class_t l3_class,
-							    flow_key_cfg_t *cfg)
-{
-	uint64_t offset;
-	flow_class_key_ip_t flow_cfg_reg;
-
-	ASSERT(TCAM_L3_CLASS_VALID(l3_class));
-	if (!(TCAM_L3_CLASS_VALID(l3_class))) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-			" npi_fflp_cfg_ip_cls_flow_key:"
-			" Invalid class %d \n",
-			l3_class));
-		return (NPI_FFLP_TCAM_CLASS_INVALID);
-	}
-
-
-	offset = GET_FLOW_KEY_OFFSET(l3_class);
-	flow_cfg_reg.value = 0; /* default */
-
-	if (cfg->use_proto) {
-		flow_cfg_reg.bits.ldw.proto = 1;
-	}
-
-	if (cfg->use_dport) {
-		flow_cfg_reg.bits.ldw.l4_1 = 2;
-		if (cfg->ip_opts_exist)
-			flow_cfg_reg.bits.ldw.l4_1 = 3;
-	}
-
-	if (cfg->use_sport) {
-		flow_cfg_reg.bits.ldw.l4_0 = 2;
-		if (cfg->ip_opts_exist)
-			flow_cfg_reg.bits.ldw.l4_0 = 3;
-	}
-
-	if (cfg->use_daddr) {
-		flow_cfg_reg.bits.ldw.ipda = BIT_ENABLE;
-	}
-
-	if (cfg->use_saddr) {
-		flow_cfg_reg.bits.ldw.ipsa = BIT_ENABLE;
-	}
-
-	if (cfg->use_vlan) {
-		flow_cfg_reg.bits.ldw.vlan = BIT_ENABLE;
-	}
-
-	if (cfg->use_l2da) {
-		flow_cfg_reg.bits.ldw.l2da = BIT_ENABLE;
-	}
-
-	if (cfg->use_portnum) {
-		flow_cfg_reg.bits.ldw.port = BIT_ENABLE;
-	}
-
-	REG_PIO_WRITE64(handle, offset, flow_cfg_reg.value);
-	return (NPI_SUCCESS);
-
-}
-
-npi_status_t
-npi_fflp_cfg_ip_cls_flow_key_get(npi_handle_t handle,
-				    tcam_class_t l3_class,
-				    flow_key_cfg_t *cfg)
-{
-	uint64_t offset;
-	flow_class_key_ip_t flow_cfg_reg;
-
-	ASSERT(TCAM_L3_CLASS_VALID(l3_class));
-	if (!(TCAM_L3_CLASS_VALID(l3_class))) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				    " npi_fflp_cfg_ip_cls_flow_key:"
-				    " Invalid class %d \n",
-				    l3_class));
-		return (NPI_FFLP_TCAM_CLASS_INVALID);
-	}
-
-	offset = GET_FLOW_KEY_OFFSET(l3_class);
-
-	cfg->use_proto = 0;
-	cfg->use_dport = 0;
-	cfg->use_sport = 0;
-	cfg->ip_opts_exist = 0;
-	cfg->use_daddr = 0;
-	cfg->use_saddr = 0;
-	cfg->use_vlan = 0;
-	cfg->use_l2da = 0;
-	cfg->use_portnum  = 0;
-
-	REG_PIO_READ64(handle, offset, &flow_cfg_reg.value);
-
-	if (flow_cfg_reg.bits.ldw.proto) {
-		cfg->use_proto = 1;
-	}
-
-	if (flow_cfg_reg.bits.ldw.l4_1 == 2) {
-		cfg->use_dport = 1;
-	}
-
-	if (flow_cfg_reg.bits.ldw.l4_1 == 3) {
-		cfg->use_dport = 1;
-		cfg->ip_opts_exist = 1;
-	}
-
-	if (flow_cfg_reg.bits.ldw.l4_0 == 2) {
-		cfg->use_sport = 1;
-	}
-
-	if (flow_cfg_reg.bits.ldw.l4_0 == 3) {
-		cfg->use_sport = 1;
-		cfg->ip_opts_exist = 1;
-	}
-
-	if (flow_cfg_reg.bits.ldw.ipda) {
-		cfg->use_daddr = 1;
-	}
-
-	if (flow_cfg_reg.bits.ldw.ipsa) {
-		cfg->use_saddr = 1;
-	}
-
-	if (flow_cfg_reg.bits.ldw.vlan) {
-		cfg->use_vlan = 1;
-	}
-
-	if (flow_cfg_reg.bits.ldw.l2da) {
-		cfg->use_l2da = 1;
-	}
-
-	if (flow_cfg_reg.bits.ldw.port) {
-		cfg->use_portnum = 1;
-	}
-
-	NPI_DEBUG_MSG((handle.function, NPI_FFLP_CTL,
-			    " npi_fflp_cfg_ip_cls_flow_get %llx \n",
-			    flow_cfg_reg.value));
-
-	return (NPI_SUCCESS);
-
-}
-
-npi_status_t
-npi_fflp_cfg_ip_cls_tcam_key_get(npi_handle_t handle,
-			    tcam_class_t l3_class, tcam_key_cfg_t *cfg)
-{
-	uint64_t offset;
-	tcam_class_key_ip_t tcam_cls_cfg;
-
-	ASSERT(TCAM_L3_CLASS_VALID(l3_class));
-	if (!(TCAM_L3_CLASS_VALID(l3_class))) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				    " npi_fflp_cfg_ip_cls_tcam_key_get:"
-				    " Invalid class %d \n",
-				    l3_class));
-		return (NPI_FFLP_TCAM_CLASS_INVALID);
-	}
-
-
-	offset = GET_TCAM_KEY_OFFSET(l3_class);
-
-	REG_PIO_READ64(handle, offset, &tcam_cls_cfg.value);
-
-	cfg->discard = 0;
-	cfg->use_ip_saddr = 0;
-	cfg->use_ip_daddr = 1;
-	cfg->lookup_enable = 0;
-
-	if (tcam_cls_cfg.bits.ldw.discard)
-			cfg->discard = 1;
-
-	if (tcam_cls_cfg.bits.ldw.ipaddr) {
-		cfg->use_ip_saddr = 1;
-		cfg->use_ip_daddr = 0;
-	}
-
-	if (tcam_cls_cfg.bits.ldw.tsel) {
-		cfg->lookup_enable	= 1;
-	}
-
-	NPI_DEBUG_MSG((handle.function, NPI_CTL,
-				    " npi_fflp_cfg_ip_cls_tcam_key_get %llx \n",
-				    tcam_cls_cfg.value));
-	return (NPI_SUCCESS);
-}
-
-/*
- * npi_fflp_cfg_fcram_access ()
- *
- * Sets the ratio between the FCRAM pio and lookup access
- * Input:
- * handle:	opaque handle interpreted by the underlying OS
- * access_ratio: 0  Lookup has the highest priority
- *		 15 PIO has maximum possible priority
- *
- * Return
- * NPI success/failure status code
- */
-npi_status_t
-npi_fflp_cfg_fcram_access(npi_handle_t handle, uint8_t access_ratio)
-{
-
-	fflp_cfg_1_t fflp_cfg;
-	uint64_t offset;
-	offset = FFLP_CFG_1_REG;
-
-	if (access_ratio > 0xf) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-			" npi_fflp_cfg_fcram_access:"
-			" Invalid access ratio %d \n",
-			access_ratio));
-		return (NPI_FFLP_ERROR | NPI_FFLP_SW_PARAM_ERROR);
-	}
-
-	REG_PIO_READ64(handle, offset, &fflp_cfg.value);
-	fflp_cfg.bits.ldw.fflpinitdone = 0;
-	fflp_cfg.bits.ldw.fcramratio = access_ratio;
-	REG_PIO_WRITE64(handle, offset, fflp_cfg.value);
-	REG_PIO_READ64(handle, offset, &fflp_cfg.value);
-	fflp_cfg.bits.ldw.fflpinitdone = 1;
-	REG_PIO_WRITE64(handle, offset, fflp_cfg.value);
-	return (NPI_SUCCESS);
-
-}
-
-/*
- * npi_fflp_cfg_tcam_access ()
- *
- * Sets the ratio between the TCAM pio and lookup access
- * Input:
- * handle:	opaque handle interpreted by the underlying OS
- * access_ratio: 0  Lookup has the highest priority
- *		 15 PIO has maximum possible priority
- * Return
- * NPI success/failure status code
- */
-npi_status_t
-npi_fflp_cfg_tcam_access(npi_handle_t handle, uint8_t access_ratio)
-{
-	fflp_cfg_1_t fflp_cfg;
-	uint64_t offset;
-	offset = FFLP_CFG_1_REG;
-
-	if (access_ratio > 0xf) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-			" npi_fflp_cfg_tcram_access:"
-			" Invalid access ratio %d \n",
-			access_ratio));
-		return (NPI_FFLP_ERROR | NPI_FFLP_SW_PARAM_ERROR);
-	}
-
-	REG_PIO_READ64(handle, offset, &fflp_cfg.value);
-	fflp_cfg.bits.ldw.fflpinitdone = 0;
-	fflp_cfg.bits.ldw.camratio = access_ratio;
-
-	/* since the cam latency is fixed, we might set it here */
-	fflp_cfg.bits.ldw.camlatency = TCAM_DEFAULT_LATENCY;
-	REG_PIO_WRITE64(handle, offset, fflp_cfg.value);
-	REG_PIO_READ64(handle, offset, &fflp_cfg.value);
-	fflp_cfg.bits.ldw.fflpinitdone = 1;
-	REG_PIO_WRITE64(handle, offset, fflp_cfg.value);
-
-	return (NPI_SUCCESS);
-}
-
-/*
- * npi_fflp_cfg_hash_h1poly()
- * Initializes the H1 hash generation logic.
- *
- * Input
- *      handle:	opaque handle interpreted by the underlying OS
- *      init_value:       The initial value (seed)
- *
- * Return
- * NPI success/failure status code
- */
-npi_status_t
-npi_fflp_cfg_hash_h1poly(npi_handle_t handle, uint32_t init_value)
-{
-
-
-	hash_h1poly_t h1_cfg;
-	uint64_t offset;
-	offset = FFLP_H1POLY_REG;
-
-	h1_cfg.value = 0;
-	h1_cfg.bits.ldw.init_value = init_value;
-
-	REG_PIO_WRITE64(handle, offset, h1_cfg.value);
-	return (NPI_SUCCESS);
-}
-
-/*
- * npi_fflp_cfg_hash_h2poly()
- * Initializes the H2 hash generation logic.
- *
- * Input
- *      handle:	opaque handle interpreted by the underlying OS
- *      init_value:       The initial value (seed)
- *
- * Return
- * NPI_SUCCESS
- *
- */
-npi_status_t
-npi_fflp_cfg_hash_h2poly(npi_handle_t handle, uint16_t init_value)
-{
-
-
-	hash_h2poly_t h2_cfg;
-	uint64_t offset;
-	offset = FFLP_H2POLY_REG;
-
-	h2_cfg.value = 0;
-	h2_cfg.bits.ldw.init_value = init_value;
-
-	REG_PIO_WRITE64(handle, offset, h2_cfg.value);
-	return (NPI_SUCCESS);
-
-
-}
-
-/*
- *  npi_fflp_cfg_reset
- *  Initializes the FCRAM reset sequence.
- *
- *  Input
- *      handle:		opaque handle interpreted by the underlying OS
- *	strength:		FCRAM Drive strength
- *				   strong, weak or normal
- *				   HW recommended value:
- *	qs:			FCRAM QS mode selection
- *				   qs mode or free running
- *				   HW recommended value is:
- *
- * Return:
- * NPI success/failure status code
- */
-
-npi_status_t
-npi_fflp_cfg_fcram_reset(npi_handle_t handle,
-	fflp_fcram_output_drive_t strength, fflp_fcram_qs_t qs)
-{
-	fflp_cfg_1_t fflp_cfg;
-	uint64_t offset;
-	offset = FFLP_CFG_1_REG;
-
-	/* These bits have to be configured before FCRAM reset is issued */
-	fflp_cfg.value = 0;
-	fflp_cfg.bits.ldw.pio_fio_rst = 1;
-	REG_PIO_WRITE64(handle, offset, fflp_cfg.value);
-
-	NXGE_DELAY(5); /* TODO: What is the correct delay? */
-
-	fflp_cfg.bits.ldw.pio_fio_rst = 0;
-	REG_PIO_WRITE64(handle, offset, fflp_cfg.value);
-	fflp_cfg.bits.ldw.fcramqs = qs;
-	fflp_cfg.bits.ldw.fcramoutdr = strength;
-	fflp_cfg.bits.ldw.fflpinitdone = 1;
-	REG_PIO_WRITE64(handle, offset, fflp_cfg.value);
-
-	return (NPI_SUCCESS);
-}
-
-npi_status_t
-npi_fflp_cfg_init_done(npi_handle_t handle)
-
-{
-
-	fflp_cfg_1_t fflp_cfg;
-	uint64_t offset;
-	offset = FFLP_CFG_1_REG;
-
-	REG_PIO_READ64(handle, offset, &fflp_cfg.value);
-	fflp_cfg.bits.ldw.fflpinitdone = 1;
-	REG_PIO_WRITE64(handle, offset, fflp_cfg.value);
-	return (NPI_SUCCESS);
-
-}
-
-npi_status_t
-npi_fflp_cfg_init_start(npi_handle_t handle)
-
-{
-
-	fflp_cfg_1_t fflp_cfg;
-	uint64_t offset;
-	offset = FFLP_CFG_1_REG;
-
-	REG_PIO_READ64(handle, offset, &fflp_cfg.value);
-	fflp_cfg.bits.ldw.fflpinitdone = 0;
-	REG_PIO_WRITE64(handle, offset, fflp_cfg.value);
-	return (NPI_SUCCESS);
-
-}
-
-/*
- * Enables the TCAM search function.
- *
- */
-npi_status_t
-npi_fflp_cfg_tcam_enable(npi_handle_t handle)
-
-{
-
-	fflp_cfg_1_t fflp_cfg;
-	uint64_t offset;
-	offset = FFLP_CFG_1_REG;
-	REG_PIO_READ64(handle, offset, &fflp_cfg.value);
-	fflp_cfg.bits.ldw.tcam_disable = 0;
-	REG_PIO_WRITE64(handle, offset, fflp_cfg.value);
-	return (NPI_SUCCESS);
-
-}
-
-/*
- * Disables the TCAM search function.
- * While the TCAM is in disabled state, all TCAM matches would return NO_MATCH
- *
- */
-npi_status_t
-npi_fflp_cfg_tcam_disable(npi_handle_t handle)
-
-{
-
-	fflp_cfg_1_t fflp_cfg;
-	uint64_t offset;
-	offset = FFLP_CFG_1_REG;
-	REG_PIO_READ64(handle, offset, &fflp_cfg.value);
-	fflp_cfg.bits.ldw.tcam_disable = 1;
-	REG_PIO_WRITE64(handle, offset, fflp_cfg.value);
-	return (NPI_SUCCESS);
-
-}
-
-/*
- * npi_rxdma_event_mask_config():
- *	This function is called to operate on the event mask
- *	register which is used for generating interrupts
- *	and status register.
- */
-npi_status_t
-npi_fflp_event_mask_config(npi_handle_t handle, io_op_t op_mode,
-		fflp_event_mask_cfg_t *mask_cfgp)
-{
-	int		status = NPI_SUCCESS;
-	fflp_err_mask_t mask_reg;
-
-	switch (op_mode) {
-	case OP_GET:
-
-		REG_PIO_READ64(handle, FFLP_ERR_MSK_REG, &mask_reg.value);
-		*mask_cfgp = mask_reg.value & FFLP_ERR_MASK_ALL;
-		break;
-
-	case OP_SET:
-		mask_reg.value = (~(*mask_cfgp) & FFLP_ERR_MASK_ALL);
-		REG_PIO_WRITE64(handle, FFLP_ERR_MSK_REG, mask_reg.value);
-		break;
-
-	case OP_UPDATE:
-		REG_PIO_READ64(handle, FFLP_ERR_MSK_REG, &mask_reg.value);
-		mask_reg.value |=  (~(*mask_cfgp) & FFLP_ERR_MASK_ALL);
-		REG_PIO_WRITE64(handle, FFLP_ERR_MSK_REG, mask_reg.value);
-		break;
-
-	case OP_CLEAR:
-		mask_reg.value = FFLP_ERR_MASK_ALL;
-		REG_PIO_WRITE64(handle, FFLP_ERR_MSK_REG, mask_reg.value);
-		break;
-	default:
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-		    " npi_fflp_event_mask_config",
-		    " eventmask <0x%x>", op_mode));
-		return (NPI_FFLP_ERROR | NPI_FFLP_SW_PARAM_ERROR);
-	}
-
-	return (status);
-}
-
-/*
- * Read vlan error bits
- */
-void
-npi_fflp_vlan_error_get(npi_handle_t handle, p_vlan_par_err_t p_err)
-{
-	REG_PIO_READ64(handle, FFLP_VLAN_PAR_ERR_REG, &p_err->value);
-}
-
-/*
- * clear vlan error bits
- */
-void
-npi_fflp_vlan_error_clear(npi_handle_t handle)
-{
-	vlan_par_err_t p_err;
-	p_err.value  = 0;
-	p_err.bits.ldw.m_err = 0;
-	p_err.bits.ldw.err = 0;
-	REG_PIO_WRITE64(handle, FFLP_ERR_MSK_REG, p_err.value);
-
-}
-
-/*
- * Read TCAM error bits
- */
-void
-npi_fflp_tcam_error_get(npi_handle_t handle, p_tcam_err_t p_err)
-{
-	REG_PIO_READ64(handle, FFLP_TCAM_ERR_REG, &p_err->value);
-}
-
-/*
- * clear TCAM error bits
- */
-void
-npi_fflp_tcam_error_clear(npi_handle_t handle)
-{
-	tcam_err_t p_err;
-
-	p_err.value  = 0;
-	p_err.bits.ldw.p_ecc = 0;
-	p_err.bits.ldw.mult = 0;
-	p_err.bits.ldw.err = 0;
-	REG_PIO_WRITE64(handle, FFLP_TCAM_ERR_REG, p_err.value);
-
-}
-
-/*
- * Read FCRAM error bits
- */
-void
-npi_fflp_fcram_error_get(npi_handle_t handle,
-	p_hash_tbl_data_log_t p_err, uint8_t partition)
-{
-	uint64_t offset;
-
-	offset = FFLP_HASH_TBL_DATA_LOG_REG + partition * 8192;
-	REG_PIO_READ64(handle, offset, &p_err->value);
-}
-
-/*
- * clear FCRAM error bits
- */
-void
-npi_fflp_fcram_error_clear(npi_handle_t handle, uint8_t partition)
-{
-	hash_tbl_data_log_t p_err;
-	uint64_t offset;
-
-	p_err.value  = 0;
-	p_err.bits.ldw.pio_err = 0;
-	offset = FFLP_HASH_TBL_DATA_LOG_REG + partition * 8192;
-
-	REG_PIO_WRITE64(handle, offset,
-			    p_err.value);
-
-}
-
-/*
- * Read FCRAM lookup error log1 bits
- */
-void
-npi_fflp_fcram_error_log1_get(npi_handle_t handle,
-			    p_hash_lookup_err_log1_t log1)
-{
-	REG_PIO_READ64(handle, HASH_LKUP_ERR_LOG1_REG,
-				    &log1->value);
-}
-
-/*
- * Read FCRAM lookup error log2 bits
- */
-void
-npi_fflp_fcram_error_log2_get(npi_handle_t handle,
-		    p_hash_lookup_err_log2_t log2)
-{
-	REG_PIO_READ64(handle, HASH_LKUP_ERR_LOG2_REG,
-			    &log2->value);
-}
--- a/usr/src/uts/sun4v/io/nxge/npi/npi_fflp.h	Mon Mar 19 18:02:35 2007 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,1187 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License (the "License").
- * You may not use this file except in compliance with the License.
- *
- * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
- * or http://www.opensolaris.org/os/licensing.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information: Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- */
-/*
- * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
- * Use is subject to license terms.
- */
-
-#ifndef _NPI_FFLP_H
-#define	_NPI_FFLP_H
-
-#pragma ident	"%Z%%M%	%I%	%E% SMI"
-
-#ifdef	__cplusplus
-extern "C" {
-#endif
-
-
-#include <npi.h>
-#include <nxge_fflp_hw.h>
-#include <nxge_fflp.h>
-
-
-typedef uint8_t part_id_t;
-typedef uint8_t tcam_location_t;
-typedef uint16_t vlan_id_t;
-
-typedef	enum _tcam_op {
-	TCAM_RWC_STAT	= 0x1,
-	TCAM_RWC_MATCH	= 0x2
-} tcam_op_t;
-
-
-#define	NPI_TCAM_COMP_NO_MATCH	0x8000000000000ULL
-
-/*
- * NPI FFLP ERROR Codes
- */
-
-#define	NPI_FFLP_BLK_CODE	FFLP_BLK_ID << 8
-#define	NPI_FFLP_ERROR		(NPI_FAILURE | NPI_FFLP_BLK_CODE)
-#define	NPI_TCAM_ERROR		0x10
-#define	NPI_FCRAM_ERROR		0x20
-#define	NPI_GEN_FFLP		0x30
-#define	NPI_FFLP_SW_PARAM_ERROR	0x40
-#define	NPI_FFLP_HW_ERROR	0x80
-
-
-#define	NPI_FFLP_RESET_ERROR	(NPI_FFLP_ERROR | NPI_GEN_FFLP | RESET_FAILED)
-#define	NPI_FFLP_RDC_TABLE_INVALID	(NPI_FFLP_ERROR | RDC_TAB_INVALID)
-#define	NPI_FFLP_VLAN_INVALID		(NPI_FFLP_ERROR | VLAN_INVALID)
-#define	NPI_FFLP_PORT_INVALID		(NPI_FFLP_ERROR | PORT_INVALID)
-#define	NPI_FFLP_TCAM_RD_ERROR		\
-	(NPI_FFLP_ERROR | NPI_TCAM_ERROR | READ_FAILED)
-#define	NPI_FFLP_TCAM_WR_ERROR		\
-	(NPI_FFLP_ERROR | NPI_TCAM_ERROR | WRITE_FAILED)
-#define	NPI_FFLP_TCAM_LOC_INVALID	\
-	(NPI_FFLP_ERROR | NPI_TCAM_ERROR | LOCATION_INVALID)
-#define	NPI_FFLP_ASC_RAM_RD_ERROR	\
-	(NPI_FFLP_ERROR | NPI_TCAM_ERROR | READ_FAILED)
-#define	NPI_FFLP_ASC_RAM_WR_ERROR	\
-	(NPI_FFLP_ERROR | NPI_TCAM_ERROR | WRITE_FAILED)
-#define	NPI_FFLP_FCRAM_READ_ERROR	\
-	(NPI_FFLP_ERROR | NPI_FCRAM_ERROR | READ_FAILED)
-#define	NPI_FFLP_FCRAM_WR_ERROR		\
-	(NPI_FFLP_ERROR | NPI_FCRAM_ERROR | WRITE_FAILED)
-#define	NPI_FFLP_FCRAM_PART_INVALID	\
-	(NPI_FFLP_ERROR | NPI_FCRAM_ERROR | RDC_TAB_INVALID)
-#define	NPI_FFLP_FCRAM_LOC_INVALID	\
-	(NPI_FFLP_ERROR | NPI_FCRAM_ERROR | LOCATION_INVALID)
-
-#define	TCAM_CLASS_INVALID		\
-	(NPI_FFLP_SW_PARAM_ERROR | 0xb)
-/* have only 0xc, 0xd, 0xe and 0xf left for sw error codes */
-#define	NPI_FFLP_TCAM_CLASS_INVALID	\
-	(NPI_FFLP_ERROR | NPI_TCAM_ERROR | TCAM_CLASS_INVALID)
-#define	NPI_FFLP_TCAM_HW_ERROR		\
-	(NPI_FFLP_ERROR | NPI_FFLP_HW_ERROR | NPI_TCAM_ERROR)
-#define	NPI_FFLP_FCRAM_HW_ERROR		\
-	(NPI_FFLP_ERROR | NPI_FFLP_HW_ERROR | NPI_FCRAM_ERROR)
-
-
-/*
- * FFLP NPI defined event masks (mapped to the hardware defined masks).
- */
-typedef	enum _fflp_event_mask_cfg_e {
-	CFG_FFLP_ENT_MSK_VLAN_MASK = FFLP_ERR_VLAN_MASK,
-	CFG_FFLP_ENT_MSK_TCAM_MASK = FFLP_ERR_TCAM_MASK,
-	CFG_FFLP_ENT_MSK_HASH_TBL_LKUP_MASK = FFLP_ERR_HASH_TBL_LKUP_MASK,
-	CFG_FFLP_ENT_MSK_HASH_TBL_DAT_MASK = FFLP_ERR_HASH_TBL_DAT_MASK,
-
-	CFG_FFLP_MASK_ALL	= (FFLP_ERR_VLAN_MASK | FFLP_ERR_TCAM_MASK |
-						FFLP_ERR_HASH_TBL_LKUP_MASK |
-						FFLP_ERR_HASH_TBL_DAT_MASK)
-} fflp_event_mask_cfg_t;
-
-
-/* FFLP FCRAM Related Functions */
-/* The following are FCRAM datapath functions */
-
-/*
- * npi_fflp_fcram_entry_write ()
- * Populates an FCRAM entry
- * Inputs:
- *         handle:	opaque handle interpreted by the underlying OS
- *	   partid:	Partition ID
- *	   location:	Index to the FCRAM.
- *			Corresponds to last 20 bits of H1 value
- *	   fcram_ptr:	Pointer to the FCRAM contents to be used for writing
- *	   format:	Entry Format. Determines the size of the write.
- *			      FCRAM_ENTRY_OPTIM:   8 bytes (a 64 bit write)
- *			      FCRAM_ENTRY_EX_IP4:  32 bytes (4 X 64 bit write)
- *			      FCRAM_ENTRY_EX_IP6:  56 bytes (7 X 64 bit write)
- *
- * Return:
- * NPI_SUCCESS
- * NPI_FAILURE
- * NPI_HW_ERR
- * NPI_SW_ERR
- *
- */
-
-npi_status_t npi_fflp_fcram_entry_write(npi_handle_t, part_id_t,
-			    uint32_t, fcram_entry_t *,
-			    fcram_entry_format_t);
-
-/*
- * npi_fflp_fcram_entry_read ()
- * Reads an FCRAM entry
- * Inputs:
- *         handle:	opaque handle interpreted by the underlying OS
- *	   partid:	Partition ID
- *	   location:	Index to the FCRAM.
- *			Corresponds to last 20 bits of H1 value
- *	   fcram_ptr:	Pointer to the FCRAM contents to be updated
- *	   format:	Entry Format. Determines the size of the read.
- *			      FCRAM_ENTRY_OPTIM:   8 bytes (a 64 bit read)
- *			      FCRAM_ENTRY_EX_IP4:  32 bytes (4 X 64 bit read )
- *			      FCRAM_ENTRY_EX_IP6:  56 bytes (7 X 64 bit read )
- *
- * Return:
- * NPI_SUCCESS
- * NPI_FAILURE
- * NPI_HW_ERR
- * NPI_SW_ERR
- */
-
-npi_status_t npi_fflp_fcram_entry_read(npi_handle_t,  part_id_t,
-				    uint32_t, fcram_entry_t *,
-				    fcram_entry_format_t);
-
-/*
- * npi_fflp_fcram_entry_invalidate ()
- * Invalidate FCRAM entry at the given location
- * Inputs:
- *	handle:		opaque handle interpreted by the underlying OS
- *	partid:		Partition ID
- *	location:	location of the FCRAM/hash entry.
- *
- * Return:
- * NPI_SUCCESS
- * NPI_FAILURE
- * NPI_HW_ERR
- * NPI_SW_ERR
- *
- */
-
-npi_status_t
-npi_fflp_fcram_entry_invalidate(npi_handle_t, part_id_t,
-				    uint32_t);
-
-/*
- * npi_fflp_fcram_subarea_write ()
- * Writes to FCRAM entry subarea i.e the 8 bytes within the 64 bytes pointed by
- * last 20 bits of  H1. Effectively, this accesses specific 8 bytes within the
- * hash table bucket.
- *
- *    |-----------------| <-- H1
- *	   |	subarea 0    |
- *	   |_________________|
- *	   | Subarea 1	     |
- *	   |_________________|
- *	   | .......	     |
- *	   |_________________|
- *	   | Subarea 7       |
- *	   |_________________|
- *
- * Inputs:
- *         handle:	opaque handle interpreted by the underlying OS
- *	   partid:	Partition ID
- *	   location:	location of the subarea. It is derived from:
- *			Bucket = [19:15][14:0]       (20 bits of H1)
- *			location = (Bucket << 3 ) + subarea * 8
- *				 = [22:18][17:3] || subarea * 8
- *	   data:	Data
- * Return:
- * NPI_SUCCESS
- * NPI_FAILURE
- * NPI_HW_ERR
- * NPI_SW_ERR
- *
- */
-
-
-npi_status_t npi_fflp_fcram_subarea_write(npi_handle_t, part_id_t,
-				    uint32_t, uint64_t);
-/*
- * npi_fflp_fcram_subarea_read ()
- * Reads an FCRAM entry subarea i.e the 8 bytes within the 64 bytes pointed by
- * last 20 bits of  H1. Effectively, this accesses specific 8 bytes within the
- * hash table bucket.
- *
- *  H1-->  |-----------------|
- *	   |	subarea 0    |
- *	   |_________________|
- *	   | Subarea 1	     |
- *	   |_________________|
- *	   | .......	     |
- *	   |_________________|
- *	   | Subarea 7       |
- *	   |_________________|
- *
- * Inputs:
- *         handle:	opaque handle interpreted by the underlying OS
- *	   partid:	Partition ID
- *	   location:	location of the subarea. It is derived from:
- *			Bucket = [19:15][14:0]       (20 bits of H1)
- *			location = (Bucket << 3 ) + subarea * 8
- *				 = [22:18][17:3] || subarea * 8
- *	   data:	ptr do write subarea contents to.
- *
- * Return:
- * NPI_SUCCESS
- * NPI_FAILURE
- * NPI_HW_ERR
- * NPI_SW_ERR
- *
- */
-
-npi_status_t npi_fflp_fcram_subarea_read  (npi_handle_t,
-			part_id_t, uint32_t, uint64_t *);
-
-
-/* The following are zero function fflp configuration functions */
-/*
- * npi_fflp_fcram_config_partition()
- * Partitions and configures the FCRAM
- *
- * Input
- *     partid			partition ID
- *				Corresponds to the RDC table
- *     part_size		Size of the partition
- *
- * Return:
- * NPI_SUCCESS
- * NPI_FAILURE
- * NPI_HW_ERR
- * NPI_SW_ERR
- *
- */
-npi_status_t npi_fflp_cfg_fcram_partition(npi_handle_t, part_id_t,
-				uint8_t, uint8_t);
-
-/*
- * npi_fflp_fcram_partition_enable
- * Enable previously configured FCRAM partition
- *
- * Input
- *     partid			partition ID
- *				Corresponds to the RDC table
- *
- * Return
- *      0			Successful
- *      Non zero  error code    Enable failed, and reason.
- *
- */
-npi_status_t npi_fflp_cfg_fcram_partition_enable(npi_handle_t,
-				part_id_t);
-
-/*
- * npi_fflp_fcram_partition_disable
- * Disable previously configured FCRAM partition
- *
- * Input
- *     partid			partition ID
- *				Corresponds to the RDC table
- *
- * Return:
- * NPI_SUCCESS
- * NPI_FAILURE
- * NPI_HW_ERR
- * NPI_SW_ERR
- *
- */
-
-npi_status_t npi_fflp_cfg_fcram_partition_disable(npi_handle_t,
-				part_id_t);
-
-
-/*
- *  npi_fflp_cfg_fcram_reset
- *  Initializes the FCRAM reset sequence (including FFLP).
- *
- *  Input
- *	strength:		FCRAM Drive strength
- *				   strong, weak or normal
- *				   HW recommended value:
- *	qs:			FCRAM QS mode selection
- *				   qs mode or free running
- *				   HW recommended value is:
- *
- * Return:
- * NPI_SUCCESS
- * NPI_FAILURE
- * NPI_HW_ERR
- * NPI_SW_ERR
- *
- */
-
-npi_status_t npi_fflp_cfg_fcram_reset(npi_handle_t,
-				    fflp_fcram_output_drive_t,
-				    fflp_fcram_qs_t);
-
-
-
-/*
- *  npi_fflp_cfg_tcam_reset
- *  Initializes the FFLP reset sequence
- * Doesn't configure the FCRAM params.
- *
- *  Input
- *
- * Return:
- * NPI_SUCCESS
- * NPI_FAILURE
- * NPI_HW_ERR
- * NPI_SW_ERR
- *
- */
-
-npi_status_t npi_fflp_cfg_tcam_reset(npi_handle_t);
-
-/*
- *  npi_fflp_cfg_tcam_enable
- *  Enables the TCAM function
- *
- *  Input
- *
- * Return:
- * NPI_SUCCESS
- * NPI_FAILURE
- * NPI_HW_ERR
- * NPI_SW_ERR
- *
- */
-
-npi_status_t npi_fflp_cfg_tcam_enable(npi_handle_t);
-
-/*
- *  npi_fflp_cfg_tcam_disable
- *  Enables the TCAM function
- *
- *  Input
- *
- * Return:
- * NPI_SUCCESS
- * NPI_FAILURE
- * NPI_HW_ERR
- * NPI_SW_ERR
- *
- */
-
-npi_status_t npi_fflp_cfg_tcam_disable(npi_handle_t);
-
-
-/*
- *  npi_fflp_cfg_cam_errorcheck_disable
- *  Disables FCRAM and TCAM error checking
- *
- *  Input
- *
- *
- * Return:
- * NPI_SUCCESS
- * NPI_FAILURE
- * NPI_HW_ERR
- * NPI_SW_ERR
- *
- */
-
-npi_status_t npi_fflp_cfg_cam_errorcheck_disable(npi_handle_t);
-
-/*
- *  npi_fflp_cfg_cam_errorcheck_enable
- *  Enables FCRAM and TCAM error checking
- *
- *  Input
- *
- *
- *  Return
- *      0			Successful
- *      Non zero  error code    Enable failed, and reason.
- *
- */
-npi_status_t npi_fflp_cfg_cam_errorcheck_enable(npi_handle_t);
-
-
-/*
- *  npi_fflp_cfg_llcsnap_enable
- *  Enables input parser llcsnap recognition
- *
- *  Input
- *
- *
- * Return:
- * NPI_SUCCESS
- * NPI_FAILURE
- * NPI_HW_ERR
- * NPI_SW_ERR
- *
- *
- */
-npi_status_t npi_fflp_cfg_llcsnap_enable(npi_handle_t);
-
-/*
- *  npi_fflp_cam_llcsnap_disable
- *  Disables input parser llcsnap recognition
- *
- *  Input
- *
- *
- * Return:
- * NPI_SUCCESS
- * NPI_FAILURE
- * NPI_HW_ERR
- * NPI_SW_ERR
- *
- *
- */
-
-npi_status_t npi_fflp_cfg_llcsnap_disable(npi_handle_t);
-
-/*
- * npi_fflp_config_fcram_refresh
- * Set FCRAM min and max refresh time.
- *
- * Input
- *	min_time		Minimum Refresh time count
- *	max_time		maximum Refresh Time count
- *	sys_time		System Clock rate
- *
- *	The counters are 16 bit counters. The maximum refresh time is
- *      3.9us/clock cycle. The minimum is 400ns/clock cycle.
- *	Clock cycle is the FCRAM clock cycle?????
- *	If the cycle is FCRAM clock cycle, then sys_time parameter
- *      is not needed as there wont be configuration variation due to
- *      system clock cycle.
- *
- * Return:
- * NPI_SUCCESS
- * NPI_FAILURE
- * NPI_HW_ERR
- * NPI_SW_ERR
- *
- */
-
-npi_status_t npi_fflp_cfg_fcram_refresh_time(npi_handle_t,
-		uint32_t, uint32_t, uint32_t);
-
-
-/*
- * npi_fflp_cfg_fcram_access ()
- *
- * Sets the ratio between the FCRAM pio and lookup access
- * Input:
- * access_ratio: 0  Lookup has the highest priority
- *		 15 PIO has maximum possible priority
- *
- */
-
-npi_status_t npi_fflp_cfg_fcram_access(npi_handle_t,
-					uint8_t);
-
-
-/*
- * npi_fflp_cfg_tcam_access ()
- *
- * Sets the ratio between the TCAM pio and lookup access
- * Input:
- * access_ratio: 0  Lookup has the highest priority
- *		 15 PIO has maximum possible priority
- *
- */
-
-npi_status_t npi_fflp_cfg_tcam_access(npi_handle_t, uint8_t);
-
-
-/*
- *  npi_fflp_hash_lookup_err_report
- *  Reports hash table (fcram) lookup errors
- *
- *  Input
- *      status			Pointer to return Error bits
- *
- *
- * Return:
- * NPI_SUCCESS
- * NPI_FAILURE
- * NPI_HW_ERR
- * NPI_SW_ERR
- *
- */
-
-npi_status_t npi_fflp_fcram_get_lookup_err_log(npi_handle_t,
-				    hash_lookup_err_log_t *);
-
-
-
-/*
- * npi_fflp_fcram_get_pio_err_log
- * Reports hash table PIO read errors.
- *
- * Input
- *	partid:		partition ID
- *      err_stat	pointer to return Error bits
- *
- * Return:
- * NPI_SUCCESS
- * NPI_FAILURE
- * NPI_HW_ERR
- * NPI_SW_ERR
- *
- */
-npi_status_t npi_fflp_fcram_get_pio_err_log(npi_handle_t,
-				part_id_t, hash_pio_err_log_t *);
-
-
-/*
- * npi_fflp_fcram_clr_pio_err_log
- * Clears FCRAM PIO  error status for the partition.
- * If there are TCAM errors as indicated by err bit set by HW,
- *  then the SW will clear it by clearing the bit.
- *
- * Input
- *	partid:		partition ID
- *
- *
- * Return
- *	NPI_SUCCESS	Success
- *
- *
- */
-
-npi_status_t npi_fflp_fcram_clr_pio_err_log(npi_handle_t,
-						part_id_t);
-
-
-
-/*
- * npi_fflp_fcram_err_data_test
- * Tests the FCRAM error detection logic.
- * The error detection logic for the datapath is tested.
- * bits [63:0] are set to select the data bits to be xored
- *
- * Input
- *	data:	 data bits to select bits to be xored
- *
- *
- * Return
- *	NPI_SUCCESS	Success
- *
- *
- */
-npi_status_t npi_fflp_fcram_err_data_test(npi_handle_t, fcram_err_data_t *);
-
-
-/*
- * npi_fflp_fcram_err_synd_test
- * Tests the FCRAM error detection logic.
- * The error detection logic for the syndrome is tested.
- * tst0->synd (8bits) are set to select the syndrome bits
- * to be XOR'ed
- *
- * Input
- *	syndrome_bits:	 Syndrome bits to select bits to be xor'ed
- *
- *
- * Return
- *	NPI_SUCCESS	Success
- *
- *
- */
-npi_status_t npi_fflp_fcram_err_synd_test(npi_handle_t, uint8_t);
-
-
-/*
- * npi_fflp_cfg_vlan_table_clear
- * Clears the vlan RDC table
- *
- * Input
- *     vlan_id		VLAN ID
- *
- * Output
- *
- *	NPI_SUCCESS			Successful
- *
- */
-
-npi_status_t npi_fflp_cfg_vlan_table_clear(npi_handle_t, vlan_id_t);
-
-/*
- * npi_fflp_cfg_enet_vlan_table_assoc
- * associates port vlan id to rdc table and sets the priority
- * in respect to L2DA rdc table.
- *
- * Input
- *     mac_portn		port number
- *     vlan_id			VLAN ID
- *     rdc_table		RDC Table #
- *     priority			priority
- *				1: vlan classification has higher priority
- *				0: l2da classification has higher priority
- *
- * Return:
- * NPI_SUCCESS
- * NPI_FAILURE
- * NPI_HW_ERR
- * NPI_SW_ERR
- *
- */
-
-npi_status_t npi_fflp_cfg_enet_vlan_table_assoc(npi_handle_t,
-				    uint8_t, vlan_id_t,
-				    uint8_t, uint8_t);
-
-
-/*
- * npi_fflp_cfg_enet_vlan_table_set_pri
- * sets the  vlan based classification priority in respect to
- * L2DA classification.
- *
- * Input
- *     mac_portn	port number
- *     vlan_id		VLAN ID
- *     priority 	priority
- *			1: vlan classification has higher priority
- *			0: l2da classification has higher priority
- *
- * Return:
- * NPI_SUCCESS
- * NPI_FAILURE
- * NPI_HW_ERR
- * NPI_SW_ERR
- *
- */
-
-npi_status_t npi_fflp_cfg_enet_vlan_table_set_pri(npi_handle_t,
-				    uint8_t, vlan_id_t,
-				    uint8_t);
-
-/*
- * npi_fflp_cfg_enet_usr_cls_set()
- * Configures a user configurable ethernet class
- *
- * Input
- *      class:       Ethernet Class
- *		     class (TCAM_CLASS_ETYPE or  TCAM_CLASS_ETYPE_2)
- *      enet_type:   16 bit Ethernet Type value, corresponding ethernet bytes
- *                        [13:14] in the frame.
- *
- *  by default, the class will be disabled until explicitly enabled.
- *
- * Return
- * NPI_SUCCESS
- * NPI_FAILURE
- * NPI_HW_ERR
- * NPI_SW_ERR
- *
- *
- *
- */
-
-npi_status_t npi_fflp_cfg_enet_usr_cls_set(npi_handle_t,
-				    tcam_class_t, uint16_t);
-
-/*
- * npi_fflp_cfg_enet_usr_cls_enable()
- * Enable previously configured TCAM user configurable Ethernet classes.
- *
- * Input
- *      class:       Ethernet Class  class
- *		     (TCAM_CLASS_ETYPE or  TCAM_CLASS_ETYPE_2)
- *
- * Return
- * NPI_SUCCESS
- * NPI_FAILURE
- * NPI_HW_ERR
- * NPI_SW_ERR
- *
- */
-
-npi_status_t npi_fflp_cfg_enet_usr_cls_enable(npi_handle_t, tcam_class_t);
-
-/*
- * npi_fflp_cfg_enet_usr_cls_disable()
- * Disables previously configured TCAM user configurable Ethernet classes.
- *
- * Input
- *      class:       Ethernet Class
- *		     class = (TCAM_CLASS_ETYPE or  TCAM_CLASS_ETYPE_2)
- *
- * Return
- * NPI_SUCCESS
- * NPI_FAILURE
- * NPI_HW_ERR
- * NPI_SW_ERR
- *
- */
-
-
-npi_status_t npi_fflp_cfg_enet_usr_cls_disable(npi_handle_t, tcam_class_t);
-
-
-/*
- * npi_fflp_cfg_ip_usr_cls_set()
- * Configures the TCAM user configurable IP classes.
- *
- * Input
- *      class:       IP Class
- *		     (TCAM_CLASS_IP_USER_4 <= class <= TCAM_CLASS_IP_USER_7)
- *      tos:         IP TOS bits
- *      tos_mask:    IP TOS bits mask. bits with mask bits set will be used
- *      proto:       IP Proto
- *      ver:         IP Version
- * by default, will the class is disabled until explicitly enabled
- *
- * Return
- * NPI_SUCCESS
- * NPI_FAILURE
- * NPI_HW_ERR
- * NPI_SW_ERR
- *
- */
-
-npi_status_t npi_fflp_cfg_ip_usr_cls_set(npi_handle_t,
-					tcam_class_t,
-					uint8_t, uint8_t,
-					uint8_t, uint8_t);
-
-/*
- * npi_fflp_cfg_ip_usr_cls_enable()
- * Enable previously configured TCAM user configurable IP classes.
- *
- * Input
- *      class:       IP Class
- *		     (TCAM_CLASS_IP_USER_4 <= class <= TCAM_CLASS_IP_USER_7)
- *
- * Return
- * NPI_SUCCESS
- * NPI_FAILURE
- * NPI_HW_ERR
- * NPI_SW_ERR
- *
- */
-
-npi_status_t npi_fflp_cfg_ip_usr_cls_enable(npi_handle_t, tcam_class_t);
-
-/*
- * npi_fflp_cfg_ip_usr_cls_disable()
- * Disables previously configured TCAM user configurable IP classes.
- *
- * Input
- *      class:       IP Class
- *		     (TCAM_CLASS_IP_USER_4 <= class <= TCAM_CLASS_IP_USER_7)
- *
- * Return
- * NPI_SUCCESS
- * NPI_FAILURE
- * NPI_HW_ERR
- * NPI_SW_ERR
- *
- */
-
-
-npi_status_t npi_fflp_cfg_ip_usr_cls_disable(npi_handle_t, tcam_class_t);
-
-
-/*
- * npi_fflp_cfg_ip_cls_tcam_key ()
- *
- * Configures the TCAM key generation for the IP classes
- *
- * Input
- *      l3_class:        IP class to configure key generation
- *      cfg:             Configuration bits:
- *                   discard:      Discard all frames of this class
- *                   use_ip_saddr: use ip src address (for ipv6)
- *                   use_ip_daddr: use ip dest address (for ipv6)
- *                   lookup_enable: Enable Lookup
- *
- *
- * Return
- * NPI_SUCCESS
- * NPI_HW_ERR
- * NPI_SW_ERR
- *
- */
-
-
-npi_status_t npi_fflp_cfg_ip_cls_tcam_key(npi_handle_t,
-				    tcam_class_t, tcam_key_cfg_t *);
-
-/*
- * npi_fflp_cfg_ip_cls_flow_key ()
- *
- * Configures the flow key generation for the IP classes
- * Flow key is used to generate the H1 hash function value
- * The fields used for the generation are configured using this
- * NPI function.
- *
- * Input
- *      l3_class:        IP class to configure flow key generation
- *      cfg:             Configuration bits:
- *                   use_proto:     Use IP proto field
- *                   use_dport:     use l4 destination port
- *                   use_sport:     use l4 source port
- *                   ip_opts_exist: IP Options Present
- *                   use_daddr:     use ip dest address
- *                   use_saddr:     use ip source address
- *                   use_vlan:      use VLAN ID
- *                   use_l2da:      use L2 Dest MAC Address
- *                   use_portnum:   use L2 virtual port number
- *
- *
- * Return
- * NPI_SUCCESS
- * NPI_HW_ERR
- * NPI_SW_ERR
- *
- */
-
-npi_status_t npi_fflp_cfg_ip_cls_flow_key(npi_handle_t,
-			    tcam_class_t, flow_key_cfg_t *);
-
-
-
-npi_status_t npi_fflp_cfg_ip_cls_flow_key_get(npi_handle_t,
-				    tcam_class_t,
-				    flow_key_cfg_t *);
-
-
-npi_status_t npi_fflp_cfg_ip_cls_tcam_key_get(npi_handle_t,
-				    tcam_class_t, tcam_key_cfg_t *);
-/*
- * npi_fflp_cfg_hash_h1poly()
- * Initializes the H1 hash generation logic.
- *
- * Input
- *      init_value:       The initial value (seed)
- *
- * Return
- * NPI_SUCCESS
- * NPI_FAILURE
- * NPI_HW_ERR
- * NPI_SW_ERR
- *
- */
-
-npi_status_t npi_fflp_cfg_hash_h1poly(npi_handle_t, uint32_t);
-
-
-
-/*
- * npi_fflp_cfg_hash_h2poly()
- * Initializes the H2 hash generation logic.
- *
- * Input
- *      init_value:       The initial value (seed)
- *
- * Return
- * NPI_SUCCESS
- * NPI_FAILURE
- * NPI_HW_ERR
- * NPI_SW_ERR
- *
- */
-
-npi_status_t npi_fflp_cfg_hash_h2poly(npi_handle_t, uint16_t);
-
-
-/*
- * Reset the fflp block (actually the FCRAM)
- * Waits until reset is completed
- *
- * input
- * strength	fcram output drive strength: weak, normal or strong
- * qs		qs mode. Normal or free running
- *
- * return value
- *	  NPI_SUCCESS
- *	  NPI_SW_ERR
- *	  NPI_HW_ERR
- */
-
-npi_status_t npi_fflp_fcram_reset(npi_handle_t,
-			    fflp_fcram_output_drive_t,
-			    fflp_fcram_qs_t);
-
-
-/* FFLP TCAM Related Functions */
-
-
-/*
- * npi_fflp_tcam_entry_match()
- *
- * Tests for TCAM match of the tcam entry
- *
- * Input
- * tcam_ptr
- *
- * Return
- *   NPI_SUCCESS
- *   NPI_SW_ERR
- *   NPI_HW_ERR
- *
- */
-
-int npi_fflp_tcam_entry_match(npi_handle_t, tcam_entry_t *);
-
-/*
- * npi_fflp_tcam_entry_write()
- *
- * writes a tcam entry at the TCAM location, location
- *
- * Input
- * location
- * tcam_ptr
- *
- * Return
- *   NPI_SUCCESS
- *   NPI_SW_ERR
- *   NPI_HW_ERR
- *
- */
-
-npi_status_t npi_fflp_tcam_entry_write(npi_handle_t,
-				tcam_location_t,
-				tcam_entry_t *);
-
-/*
- * npi_fflp_tcam_entry_read ()
- *
- * Reads a tcam entry from the TCAM location, location
- *
- * Input:
- * location
- * tcam_ptr
- *
- * Return:
- * NPI_SUCCESS
- * NPI_HW_ERR
- * NPI_SW_ERR
- *
- */
-
-
-npi_status_t npi_fflp_tcam_entry_read(npi_handle_t,
-					tcam_location_t,
-					tcam_entry_t *);
-
-/*
- * npi_fflp_tcam_entry_invalidate()
- *
- * invalidates entry at tcam location
- *
- * Input
- * location
- *
- * Return
- *   NPI_SUCCESS
- *   NPI_SW_ERR
- *   NPI_HW_ERR
- *
- */
-
-npi_status_t npi_fflp_tcam_entry_invalidate(npi_handle_t,
-				    tcam_location_t);
-
-
-/*
- * npi_fflp_tcam_asc_ram_entry_write()
- *
- * writes a tcam associatedRAM at the TCAM location, location
- *
- * Input:
- * location	tcam associatedRAM location
- * ram_data	Value to write
- *
- * Return:
- * NPI_SUCCESS
- * NPI_HW_ERR
- * NPI_SW_ERR
- *
- */
-
-npi_status_t npi_fflp_tcam_asc_ram_entry_write(npi_handle_t,
-				    tcam_location_t,
-				    uint64_t);
-
-
-/*
- * npi_fflp_tcam_asc_ram_entry_read()
- *
- * reads a tcam associatedRAM content at the TCAM location, location
- *
- * Input:
- * location	tcam associatedRAM location
- * ram_data	ptr to return contents
- *
- * Return:
- * NPI_SUCCESS
- * NPI_HW_ERR
- * NPI_SW_ERR
- *
- */
-
-npi_status_t npi_fflp_tcam_asc_ram_entry_read(npi_handle_t,
-				    tcam_location_t,
-				    uint64_t *);
-
-/*
- * npi_fflp_tcam_get_err_log
- * Reports TCAM PIO read and lookup errors.
- * If there are TCAM errors as indicated by err bit set by HW,
- *  then the SW will clear it by clearing the bit.
- *
- * Input
- *	err_stat:	 structure to report various TCAM errors.
- *                       will be updated if there are TCAM errors.
- *
- *
- * Return
- *	NPI_SUCCESS	Success
- *
- *
- */
-npi_status_t npi_fflp_tcam_get_err_log(npi_handle_t, tcam_err_log_t *);
-
-
-
-/*
- * npi_fflp_tcam_clr_err_log
- * Clears TCAM PIO read and lookup error status.
- * If there are TCAM errors as indicated by err bit set by HW,
- *  then the SW will clear it by clearing the bit.
- *
- * Input
- *	err_stat:	 structure to report various TCAM errors.
- *                       will be updated if there are TCAM errors.
- *
- *
- * Return
- *	NPI_SUCCESS	Success
- *
- *
- */
-
-npi_status_t npi_fflp_tcam_clr_err_log(npi_handle_t);
-
-
-
-
-
-/*
- * npi_fflp_vlan_tbl_clr_err_log
- * Clears VLAN Table PIO  error status.
- * If there are VLAN Table errors as indicated by err bit set by HW,
- *  then the SW will clear it by clearing the bit.
- *
- * Input
- *	err_stat:	 structure to report various VLAN Table errors.
- *                       will be updated if there are  errors.
- *
- *
- * Return
- *	NPI_SUCCESS	Success
- *
- *
- */
-
-npi_status_t npi_fflp_vlan_tbl_clr_err_log(npi_handle_t);
-
-
-/*
- * npi_fflp_vlan_tbl_get_err_log
- * Reports VLAN Table  errors.
- * If there are VLAN Table errors as indicated by err bit set by HW,
- *  then the SW will clear it by clearing the bit.
- *
- * Input
- *	err_stat:	 structure to report various VLAN table errors.
- *                       will be updated if there are errors.
- *
- *
- * Return
- *	NPI_SUCCESS	Success
- *
- *
- */
-npi_status_t npi_fflp_vlan_tbl_get_err_log(npi_handle_t,
-				    vlan_tbl_err_log_t *);
-
-
-
-
-/*
- * npi_rxdma_event_mask_config():
- *	This function is called to operate on the event mask
- *	register which is used for generating interrupts
- *	and status register.
- *
- * Parameters:
- *	handle		- NPI handle
- *	op_mode		- OP_GET: get hardware event mask
- *			  OP_SET: set hardware interrupt event masks
- *	channel		- hardware RXDMA channel from 0 to 23.
- *	cfgp		- pointer to NPI defined event mask
- *			  enum data type.
- * Return:
- *	NPI_SUCCESS		- If set is complete successfully.
- *
- *	Error:
- *	NPI_FAILURE		-
- *	NPI_FFLP_ERROR | NPI_FFLP_SW_PARAM_ERROR
- *
- */
-npi_status_t
-npi_fflp_event_mask_config(npi_handle_t, io_op_t,
-			    fflp_event_mask_cfg_t *);
-
-npi_status_t npi_fflp_dump_regs(npi_handle_t);
-
-
-/* Error status read and clear functions */
-
-void	npi_fflp_vlan_error_get(npi_handle_t,
-				    p_vlan_par_err_t);
-void	npi_fflp_vlan_error_clear(npi_handle_t);
-void	npi_fflp_tcam_error_get(npi_handle_t,
-				    p_tcam_err_t);
-void	npi_fflp_tcam_error_clear(npi_handle_t);
-
-void	npi_fflp_fcram_error_get(npi_handle_t,
-				    p_hash_tbl_data_log_t,
-				    uint8_t);
-void npi_fflp_fcram_error_clear(npi_handle_t, uint8_t);
-
-void npi_fflp_fcram_error_log1_get(npi_handle_t,
-				    p_hash_lookup_err_log1_t);
-
-void npi_fflp_fcram_error_log2_get(npi_handle_t,
-			    p_hash_lookup_err_log2_t);
-
-void npi_fflp_vlan_tbl_dump(npi_handle_t);
-
-#ifdef	__cplusplus
-}
-#endif
-
-#endif	/* _NPI_FFLP_H */
--- a/usr/src/uts/sun4v/io/nxge/npi/npi_ipp.c	Mon Mar 19 18:02:35 2007 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,565 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License (the "License").
- * You may not use this file except in compliance with the License.
- *
- * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
- * or http://www.opensolaris.org/os/licensing.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information: Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- */
-/*
- * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
- * Use is subject to license terms.
- */
-
-#pragma ident	"%Z%%M%	%I%	%E% SMI"
-
-#include <npi_ipp.h>
-
-uint64_t ipp_fzc_offset[] = {
-		IPP_CONFIG_REG,
-		IPP_DISCARD_PKT_CNT_REG,
-		IPP_TCP_CKSUM_ERR_CNT_REG,
-		IPP_ECC_ERR_COUNTER_REG,
-		IPP_INT_STATUS_REG,
-		IPP_INT_MASK_REG,
-		IPP_PFIFO_RD_DATA0_REG,
-		IPP_PFIFO_RD_DATA1_REG,
-		IPP_PFIFO_RD_DATA2_REG,
-		IPP_PFIFO_RD_DATA3_REG,
-		IPP_PFIFO_RD_DATA4_REG,
-		IPP_PFIFO_WR_DATA0_REG,
-		IPP_PFIFO_WR_DATA1_REG,
-		IPP_PFIFO_WR_DATA2_REG,
-		IPP_PFIFO_WR_DATA3_REG,
-		IPP_PFIFO_WR_DATA4_REG,
-		IPP_PFIFO_RD_PTR_REG,
-		IPP_PFIFO_WR_PTR_REG,
-		IPP_DFIFO_RD_DATA0_REG,
-		IPP_DFIFO_RD_DATA1_REG,
-		IPP_DFIFO_RD_DATA2_REG,
-		IPP_DFIFO_RD_DATA3_REG,
-		IPP_DFIFO_RD_DATA4_REG,
-		IPP_DFIFO_WR_DATA0_REG,
-		IPP_DFIFO_WR_DATA1_REG,
-		IPP_DFIFO_WR_DATA2_REG,
-		IPP_DFIFO_WR_DATA3_REG,
-		IPP_DFIFO_WR_DATA4_REG,
-		IPP_DFIFO_RD_PTR_REG,
-		IPP_DFIFO_WR_PTR_REG,
-		IPP_STATE_MACHINE_REG,
-		IPP_CKSUM_STATUS_REG,
-		IPP_FFLP_CKSUM_INFO_REG,
-		IPP_DEBUG_SELECT_REG,
-		IPP_DFIFO_ECC_SYNDROME_REG,
-		IPP_DFIFO_EOPM_RD_PTR_REG,
-		IPP_ECC_CTRL_REG
-};
-
-const char *ipp_fzc_name[] = {
-		"IPP_CONFIG_REG",
-		"IPP_DISCARD_PKT_CNT_REG",
-		"IPP_TCP_CKSUM_ERR_CNT_REG",
-		"IPP_ECC_ERR_COUNTER_REG",
-		"IPP_INT_STATUS_REG",
-		"IPP_INT_MASK_REG",
-		"IPP_PFIFO_RD_DATA0_REG",
-		"IPP_PFIFO_RD_DATA1_REG",
-		"IPP_PFIFO_RD_DATA2_REG",
-		"IPP_PFIFO_RD_DATA3_REG",
-		"IPP_PFIFO_RD_DATA4_REG",
-		"IPP_PFIFO_WR_DATA0_REG",
-		"IPP_PFIFO_WR_DATA1_REG",
-		"IPP_PFIFO_WR_DATA2_REG",
-		"IPP_PFIFO_WR_DATA3_REG",
-		"IPP_PFIFO_WR_DATA4_REG",
-		"IPP_PFIFO_RD_PTR_REG",
-		"IPP_PFIFO_WR_PTR_REG",
-		"IPP_DFIFO_RD_DATA0_REG",
-		"IPP_DFIFO_RD_DATA1_REG",
-		"IPP_DFIFO_RD_DATA2_REG",
-		"IPP_DFIFO_RD_DATA3_REG",
-		"IPP_DFIFO_RD_DATA4_REG",
-		"IPP_DFIFO_WR_DATA0_REG",
-		"IPP_DFIFO_WR_DATA1_REG",
-		"IPP_DFIFO_WR_DATA2_REG",
-		"IPP_DFIFO_WR_DATA3_REG",
-		"IPP_DFIFO_WR_DATA4_REG",
-		"IPP_DFIFO_RD_PTR_REG",
-		"IPP_DFIFO_WR_PTR_REG",
-		"IPP_STATE_MACHINE_REG",
-		"IPP_CKSUM_STATUS_REG",
-		"IPP_FFLP_CKSUM_INFO_REG",
-		"IPP_DEBUG_SELECT_REG",
-		"IPP_DFIFO_ECC_SYNDROME_REG",
-		"IPP_DFIFO_EOPM_RD_PTR_REG",
-		"IPP_ECC_CTRL_REG",
-};
-
-npi_status_t
-npi_ipp_dump_regs(npi_handle_t handle, uint8_t port)
-{
-	uint64_t		value, offset;
-	int 			num_regs, i;
-
-	ASSERT(IS_PORT_NUM_VALID(port));
-
-	NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
-		"\nIPP PORT Register Dump for port %d\n", port));
-
-	num_regs = sizeof (ipp_fzc_offset) / sizeof (uint64_t);
-	for (i = 0; i < num_regs; i++) {
-		offset = IPP_REG_ADDR(port, ipp_fzc_offset[i]);
-		NXGE_REG_RD64(handle, offset, &value);
-		NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL, "0x%08llx "
-			"%s\t 0x%08llx \n",
-			offset, ipp_fzc_name[i], value));
-	}
-
-	NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
-		"\n IPP FZC Register Dump for port %d done\n", port));
-
-	return (NPI_SUCCESS);
-}
-
-void
-npi_ipp_read_regs(npi_handle_t handle, uint8_t port)
-{
-	uint64_t		value, offset;
-	int 			num_regs, i;
-
-	ASSERT(IS_PORT_NUM_VALID(port));
-
-	NPI_DEBUG_MSG((handle.function, NPI_IPP_CTL,
-		"\nIPP PORT Register read (to clear) for port %d\n", port));
-
-	num_regs = sizeof (ipp_fzc_offset) / sizeof (uint64_t);
-	for (i = 0; i < num_regs; i++) {
-		offset = IPP_REG_ADDR(port, ipp_fzc_offset[i]);
-		NXGE_REG_RD64(handle, offset, &value);
-	}
-
-}
-
-/*
- * IPP Reset Routine
- */
-npi_status_t
-npi_ipp_reset(npi_handle_t handle, uint8_t portn)
-{
-	uint64_t val = 0;
-	uint32_t cnt = MAX_PIO_RETRIES;
-
-	ASSERT(IS_PORT_NUM_VALID(portn));
-
-	IPP_REG_RD(handle, portn, IPP_CONFIG_REG, &val);
-	val |= IPP_SOFT_RESET;
-	IPP_REG_WR(handle, portn, IPP_CONFIG_REG, val);
-
-	do {
-		NXGE_DELAY(IPP_RESET_WAIT);
-		IPP_REG_RD(handle, portn, IPP_CONFIG_REG, &val);
-		cnt--;
-	} while (((val & IPP_SOFT_RESET) != 0) && (cnt > 0));
-
-	if (cnt == 0) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				    " npi_ipp_reset"
-				    " HW Error: IPP_RESET  <0x%x>", val));
-		return (NPI_FAILURE | NPI_IPP_RESET_FAILED(portn));
-	}
-
-	return (NPI_SUCCESS);
-}
-
-
-/*
- * IPP Configuration Routine
- */
-npi_status_t
-npi_ipp_config(npi_handle_t handle, config_op_t op, uint8_t portn,
-		ipp_config_t config)
-{
-	uint64_t val = 0;
-
-	ASSERT(IS_PORT_NUM_VALID(portn));
-
-	switch (op) {
-
-	case ENABLE:
-	case DISABLE:
-		if ((config == 0) || ((config & ~CFG_IPP_ALL) != 0)) {
-			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				" npi_ipp_config",
-				" Invalid Input config <0x%x>",
-				config));
-			return (NPI_FAILURE | NPI_IPP_CONFIG_INVALID(portn));
-		}
-
-		IPP_REG_RD(handle, portn, IPP_CONFIG_REG, &val);
-
-		if (op == ENABLE)
-			val |= config;
-		else
-			val &= ~config;
-		break;
-
-	case INIT:
-		if ((config & ~CFG_IPP_ALL) != 0) {
-			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				" npi_ipp_config"
-				" Invalid Input config <0x%x>",
-				config));
-			return (NPI_FAILURE | NPI_IPP_CONFIG_INVALID(portn));
-		}
-		IPP_REG_RD(handle, portn, IPP_CONFIG_REG, &val);
-
-
-		val &= (IPP_IP_MAX_PKT_BYTES_MASK);
-		val |= config;
-		break;
-
-	default:
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				    " npi_ipp_config"
-				    " Invalid Input op <0x%x>", op));
-		return (NPI_FAILURE | NPI_IPP_OPCODE_INVALID(portn));
-	}
-
-	IPP_REG_WR(handle, portn, IPP_CONFIG_REG, val);
-	return (NPI_SUCCESS);
-}
-
-npi_status_t
-npi_ipp_set_max_pktsize(npi_handle_t handle, uint8_t portn, uint32_t bytes)
-{
-	uint64_t val = 0;
-
-	ASSERT(IS_PORT_NUM_VALID(portn));
-
-	if (bytes > IPP_IP_MAX_PKT_BYTES_MASK) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-			" npi_ipp_set_max_pktsize"
-			" Invalid Input Max bytes <0x%x>",
-			bytes));
-		return (NPI_FAILURE | NPI_IPP_MAX_PKT_BYTES_INVALID(portn));
-	}
-
-	IPP_REG_RD(handle, portn, IPP_CONFIG_REG, &val);
-	val &= ~(IPP_IP_MAX_PKT_BYTES_MASK << IPP_IP_MAX_PKT_BYTES_SHIFT);
-
-	val |= (bytes << IPP_IP_MAX_PKT_BYTES_SHIFT);
-	IPP_REG_WR(handle, portn, IPP_CONFIG_REG, val);
-
-	return (NPI_SUCCESS);
-}
-
-/*
- * IPP Interrupt Configuration Routine
- */
-npi_status_t
-npi_ipp_iconfig(npi_handle_t handle, config_op_t op, uint8_t portn,
-		ipp_iconfig_t iconfig)
-{
-	uint64_t val = 0;
-
-	ASSERT(IS_PORT_NUM_VALID(portn));
-
-	switch (op) {
-	case ENABLE:
-	case DISABLE:
-
-		if ((iconfig == 0) || ((iconfig & ~ICFG_IPP_ALL) != 0)) {
-			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				" npi_ipp_iconfig"
-				" Invalid Input iconfig <0x%x>",
-				iconfig));
-			return (NPI_FAILURE | NPI_IPP_CONFIG_INVALID(portn));
-		}
-
-		IPP_REG_RD(handle, portn, IPP_INT_MASK_REG, &val);
-		if (op == ENABLE)
-			val &= ~iconfig;
-		else
-			val |= iconfig;
-		IPP_REG_WR(handle, portn, IPP_INT_MASK_REG, val);
-
-		break;
-	case INIT:
-
-		if ((iconfig & ~ICFG_IPP_ALL) != 0) {
-			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				" npi_ipp_iconfig"
-				" Invalid Input iconfig <0x%x>",
-				iconfig));
-			return (NPI_FAILURE | NPI_IPP_CONFIG_INVALID(portn));
-		}
-		IPP_REG_WR(handle, portn, IPP_INT_MASK_REG, ~iconfig);
-
-		break;
-	default:
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-			" npi_ipp_iconfig"
-			" Invalid Input iconfig <0x%x>",
-			iconfig));
-		return (NPI_FAILURE | NPI_IPP_OPCODE_INVALID(portn));
-	}
-
-	return (NPI_SUCCESS);
-}
-
-npi_status_t
-npi_ipp_get_status(npi_handle_t handle, uint8_t portn, ipp_status_t *status)
-{
-	uint64_t val;
-
-	ASSERT(IS_PORT_NUM_VALID(portn));
-
-	IPP_REG_RD(handle, portn, IPP_INT_STATUS_REG, &val);
-
-	status->value = val;
-	return (NPI_SUCCESS);
-}
-
-npi_status_t
-npi_ipp_get_pfifo_rd_ptr(npi_handle_t handle, uint8_t portn, uint16_t *rd_ptr)
-{
-	uint64_t value;
-
-	ASSERT(IS_PORT_NUM_VALID(portn));
-
-	IPP_REG_RD(handle, portn, IPP_PFIFO_RD_PTR_REG, &value);
-	*rd_ptr = value & 0xfff;
-	return (NPI_SUCCESS);
-}
-
-npi_status_t
-npi_ipp_get_pfifo_wr_ptr(npi_handle_t handle, uint8_t portn, uint16_t *wr_ptr)
-{
-	uint64_t value;
-
-	ASSERT(IS_PORT_NUM_VALID(portn));
-
-	IPP_REG_RD(handle, portn, IPP_PFIFO_WR_PTR_REG, &value);
-	*wr_ptr = value & 0xfff;
-	return (NPI_SUCCESS);
-}
-
-npi_status_t
-npi_ipp_get_dfifo_rd_ptr(npi_handle_t handle, uint8_t portn, uint16_t *rd_ptr)
-{
-	uint64_t value;
-
-	ASSERT(IS_PORT_NUM_VALID(portn));
-
-	IPP_REG_RD(handle, portn, IPP_DFIFO_RD_PTR_REG, &value);
-	*rd_ptr = (uint16_t)(value & ((portn < 2) ? IPP_XMAC_DFIFO_PTR_MASK :
-					IPP_BMAC_DFIFO_PTR_MASK));
-	return (NPI_SUCCESS);
-}
-
-npi_status_t
-npi_ipp_get_dfifo_wr_ptr(npi_handle_t handle, uint8_t portn, uint16_t *wr_ptr)
-{
-	uint64_t value;
-
-	ASSERT(IS_PORT_NUM_VALID(portn));
-
-	IPP_REG_RD(handle, portn, IPP_DFIFO_WR_PTR_REG, &value);
-	*wr_ptr = (uint16_t)(value & ((portn < 2) ? IPP_XMAC_DFIFO_PTR_MASK :
-					IPP_BMAC_DFIFO_PTR_MASK));
-	return (NPI_SUCCESS);
-}
-
-npi_status_t
-npi_ipp_write_pfifo(npi_handle_t handle, uint8_t portn, uint8_t addr,
-		uint32_t d0, uint32_t d1, uint32_t d2, uint32_t d3, uint32_t d4)
-{
-	uint64_t val;
-
-	ASSERT(IS_PORT_NUM_VALID(portn));
-
-	if (addr >= 64) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-			" npi_ipp_write_pfifo"
-			" Invalid PFIFO address <0x%x>", addr));
-		return (NPI_FAILURE | NPI_IPP_FIFO_ADDR_INVALID(portn));
-	}
-
-	IPP_REG_RD(handle, portn, IPP_CONFIG_REG, &val);
-	val |= IPP_PRE_FIFO_PIO_WR_EN;
-	IPP_REG_WR(handle, portn, IPP_CONFIG_REG, val);
-
-	IPP_REG_WR(handle, portn, IPP_PFIFO_WR_PTR_REG, addr);
-	IPP_REG_WR(handle, portn, IPP_PFIFO_WR_DATA0_REG, d0);
-	IPP_REG_WR(handle, portn, IPP_PFIFO_WR_DATA1_REG, d1);
-	IPP_REG_WR(handle, portn, IPP_PFIFO_WR_DATA2_REG, d2);
-	IPP_REG_WR(handle, portn, IPP_PFIFO_WR_DATA3_REG, d3);
-	IPP_REG_WR(handle, portn, IPP_PFIFO_WR_DATA4_REG, d4);
-
-	val &= ~IPP_PRE_FIFO_PIO_WR_EN;
-	IPP_REG_WR(handle, portn, IPP_CONFIG_REG, val);
-
-	return (NPI_SUCCESS);
-}
-
-npi_status_t
-npi_ipp_read_pfifo(npi_handle_t handle, uint8_t portn, uint8_t addr,
-		uint32_t *d0, uint32_t *d1, uint32_t *d2, uint32_t *d3,
-		uint32_t *d4)
-{
-	ASSERT(IS_PORT_NUM_VALID(portn));
-
-	if (addr >= 64) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-			" npi_ipp_read_pfifo"
-			" Invalid PFIFO address <0x%x>", addr));
-		return (NPI_FAILURE | NPI_IPP_FIFO_ADDR_INVALID(portn));
-	}
-
-	IPP_REG_WR(handle, portn, IPP_PFIFO_RD_PTR_REG, addr);
-	IPP_REG_RD(handle, portn, IPP_PFIFO_RD_DATA0_REG, d0);
-	IPP_REG_RD(handle, portn, IPP_PFIFO_RD_DATA1_REG, d1);
-	IPP_REG_RD(handle, portn, IPP_PFIFO_RD_DATA2_REG, d2);
-	IPP_REG_RD(handle, portn, IPP_PFIFO_RD_DATA3_REG, d3);
-	IPP_REG_RD(handle, portn, IPP_PFIFO_RD_DATA4_REG, d4);
-
-	return (NPI_SUCCESS);
-}
-
-npi_status_t
-npi_ipp_write_dfifo(npi_handle_t handle, uint8_t portn, uint16_t addr,
-		uint32_t d0, uint32_t d1, uint32_t d2, uint32_t d3, uint32_t d4)
-{
-	uint64_t val;
-
-	ASSERT(IS_PORT_NUM_VALID(portn));
-
-	if (addr >= 2048) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-			" npi_ipp_write_dfifo"
-			" Invalid DFIFO address <0x%x>", addr));
-		return (NPI_FAILURE | NPI_IPP_FIFO_ADDR_INVALID(portn));
-	}
-
-	IPP_REG_RD(handle, portn, IPP_CONFIG_REG, &val);
-	val |= IPP_DFIFO_PIO_WR_EN;
-	IPP_REG_WR(handle, portn, IPP_CONFIG_REG, val);
-
-	IPP_REG_WR(handle, portn, IPP_DFIFO_WR_PTR_REG, addr);
-	IPP_REG_WR(handle, portn, IPP_DFIFO_WR_DATA0_REG, d0);
-	IPP_REG_WR(handle, portn, IPP_DFIFO_WR_DATA1_REG, d1);
-	IPP_REG_WR(handle, portn, IPP_DFIFO_WR_DATA2_REG, d2);
-	IPP_REG_WR(handle, portn, IPP_DFIFO_WR_DATA3_REG, d3);
-	IPP_REG_WR(handle, portn, IPP_DFIFO_WR_DATA4_REG, d4);
-
-	val &= ~IPP_DFIFO_PIO_WR_EN;
-	IPP_REG_WR(handle, portn, IPP_CONFIG_REG, val);
-
-	return (NPI_SUCCESS);
-}
-
-npi_status_t
-npi_ipp_read_dfifo(npi_handle_t handle, uint8_t portn, uint16_t addr,
-		uint32_t *d0, uint32_t *d1, uint32_t *d2, uint32_t *d3,
-		uint32_t *d4)
-{
-	ASSERT(IS_PORT_NUM_VALID(portn));
-
-	if (addr >= 2048) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-			" npi_ipp_read_dfifo"
-			" Invalid DFIFO address <0x%x>", addr));
-		return (NPI_FAILURE | NPI_IPP_FIFO_ADDR_INVALID(portn));
-	}
-
-	IPP_REG_WR(handle, portn, IPP_DFIFO_RD_PTR_REG, addr);
-	IPP_REG_RD(handle, portn, IPP_DFIFO_RD_DATA0_REG, d0);
-	IPP_REG_RD(handle, portn, IPP_DFIFO_RD_DATA1_REG, d1);
-	IPP_REG_RD(handle, portn, IPP_DFIFO_RD_DATA2_REG, d2);
-	IPP_REG_RD(handle, portn, IPP_DFIFO_RD_DATA3_REG, d3);
-	IPP_REG_RD(handle, portn, IPP_DFIFO_RD_DATA4_REG, d4);
-
-	return (NPI_SUCCESS);
-}
-
-npi_status_t
-npi_ipp_get_ecc_syndrome(npi_handle_t handle, uint8_t portn, uint16_t *syndrome)
-{
-	uint64_t val;
-
-	ASSERT(IS_PORT_NUM_VALID(portn));
-
-	IPP_REG_RD(handle, portn, IPP_DFIFO_ECC_SYNDROME_REG, &val);
-
-	*syndrome = (uint16_t)val;
-	return (NPI_SUCCESS);
-}
-
-npi_status_t
-npi_ipp_get_dfifo_eopm_rdptr(npi_handle_t handle, uint8_t portn,
-							uint16_t *rdptr)
-{
-	uint64_t val;
-
-	ASSERT(IS_PORT_NUM_VALID(portn));
-
-	IPP_REG_RD(handle, portn, IPP_DFIFO_EOPM_RD_PTR_REG, &val);
-
-	*rdptr = (uint16_t)val;
-	return (NPI_SUCCESS);
-}
-
-npi_status_t
-npi_ipp_get_state_mach(npi_handle_t handle, uint8_t portn, uint32_t *sm)
-{
-	uint64_t val;
-
-	ASSERT(IS_PORT_NUM_VALID(portn));
-
-	IPP_REG_RD(handle, portn, IPP_STATE_MACHINE_REG, &val);
-
-	*sm = (uint32_t)val;
-	return (NPI_SUCCESS);
-}
-
-npi_status_t
-npi_ipp_get_ecc_err_count(npi_handle_t handle, uint8_t portn, uint8_t *err_cnt)
-{
-	ASSERT(IS_PORT_NUM_VALID(portn));
-
-	IPP_REG_RD(handle, portn, IPP_ECC_ERR_COUNTER_REG, err_cnt);
-
-	return (NPI_SUCCESS);
-}
-
-npi_status_t
-npi_ipp_get_pkt_dis_count(npi_handle_t handle, uint8_t portn, uint16_t *dis_cnt)
-{
-	ASSERT(IS_PORT_NUM_VALID(portn));
-
-	IPP_REG_RD(handle, portn, IPP_DISCARD_PKT_CNT_REG, dis_cnt);
-
-	return (NPI_SUCCESS);
-}
-
-npi_status_t
-npi_ipp_get_cs_err_count(npi_handle_t handle, uint8_t portn, uint16_t *err_cnt)
-{
-	ASSERT(IS_PORT_NUM_VALID(portn));
-
-	IPP_REG_RD(handle, portn, IPP_ECC_ERR_COUNTER_REG, err_cnt);
-
-	return (NPI_SUCCESS);
-}
--- a/usr/src/uts/sun4v/io/nxge/npi/npi_ipp.h	Mon Mar 19 18:02:35 2007 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,188 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License (the "License").
- * You may not use this file except in compliance with the License.
- *
- * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
- * or http://www.opensolaris.org/os/licensing.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information: Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- */
-/*
- * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
- * Use is subject to license terms.
- */
-
-#ifndef _NPI_IPP_H
-#define	_NPI_IPP_H
-
-#pragma ident	"%Z%%M%	%I%	%E% SMI"
-
-#ifdef	__cplusplus
-extern "C" {
-#endif
-
-#include <npi.h>
-#include <nxge_ipp_hw.h>
-
-/* IBTP IPP Configuration */
-
-typedef enum ipp_config_e {
-	CFG_IPP =			IPP_EN,
-	CFG_IPP_DFIFO_ECC_CORRECT =	IPP_DFIFO_ECC_CORRECT_EN,
-	CFG_IPP_DROP_BAD_CRC =		IPP_DROP_BAD_CRC_EN,
-	CFG_IPP_TCP_UDP_CKSUM =		IPP_TCP_UDP_CKSUM_EN,
-	CFG_IPP_DFIFO_PIO_WR =		IPP_DFIFO_PIO_WR_EN,
-	CFG_IPP_PRE_FIFO_PIO_WR =	IPP_PRE_FIFO_PIO_WR_EN,
-	CFG_IPP_FFLP_CKSUM_INFO_PIO_WR = IPP_FFLP_CKSUM_INFO_PIO_WR_EN,
-	CFG_IPP_ALL =			(IPP_EN | IPP_DFIFO_ECC_CORRECT_EN |
-			IPP_DROP_BAD_CRC_EN | IPP_TCP_UDP_CKSUM_EN |
-			IPP_DFIFO_PIO_WR_EN | IPP_PRE_FIFO_PIO_WR_EN)
-} ipp_config_t;
-
-typedef enum ipp_iconfig_e {
-	ICFG_IPP_PKT_DISCARD_OVFL =	IPP_PKT_DISCARD_CNT_INTR_DIS,
-	ICFG_IPP_BAD_TCPIP_CKSUM_OVFL =	IPP_BAD_TCPIP_CKSUM_CNT_INTR_DIS,
-	ICFG_IPP_PRE_FIFO_UNDERRUN =	IPP_PRE_FIFO_UNDERRUN_INTR_DIS,
-	ICFG_IPP_PRE_FIFO_OVERRUN =	IPP_PRE_FIFO_OVERRUN_INTR_DIS,
-	ICFG_IPP_PRE_FIFO_PERR =	IPP_PRE_FIFO_PERR_INTR_DIS,
-	ICFG_IPP_DFIFO_ECC_UNCORR_ERR =	IPP_DFIFO_ECC_UNCORR_ERR_INTR_DIS,
-	ICFG_IPP_DFIFO_MISSING_EOP_SOP = IPP_DFIFO_MISSING_EOP_SOP_INTR_DIS,
-	ICFG_IPP_ECC_ERR_OVFL =		IPP_ECC_ERR_CNT_MAX_INTR_DIS,
-	ICFG_IPP_ALL =			(IPP_PKT_DISCARD_CNT_INTR_DIS |
-			IPP_BAD_TCPIP_CKSUM_CNT_INTR_DIS |
-			IPP_PRE_FIFO_UNDERRUN_INTR_DIS |
-			IPP_PRE_FIFO_OVERRUN_INTR_DIS |
-			IPP_PRE_FIFO_PERR_INTR_DIS |
-			IPP_DFIFO_ECC_UNCORR_ERR_INTR_DIS |
-			IPP_DFIFO_MISSING_EOP_SOP_INTR_DIS |
-			IPP_ECC_ERR_CNT_MAX_INTR_DIS)
-} ipp_iconfig_t;
-
-typedef enum ipp_counter_e {
-	CNT_IPP_DISCARD_PKT		= 0x00000001,
-	CNT_IPP_TCP_CKSUM_ERR		= 0x00000002,
-	CNT_IPP_ECC_ERR			= 0x00000004,
-	CNT_IPP_ALL			= 0x00000007
-} ipp_counter_t;
-
-
-typedef enum ipp_port_cnt_idx_e {
-	HWCI_IPP_PKT_DISCARD = 0,
-	HWCI_IPP_TCP_CKSUM_ERR,
-	HWCI_IPP_ECC_ERR,
-	CI_IPP_MISSING_EOP_SOP,
-	CI_IPP_UNCORR_ERR,
-	CI_IPP_PERR,
-	CI_IPP_FIFO_OVERRUN,
-	CI_IPP_FIFO_UNDERRUN,
-	CI_IPP_PORT_CNT_ARR_SIZE
-} ipp_port_cnt_idx_t;
-
-/* IPP specific errors */
-
-#define	IPP_MAX_PKT_BYTES_INVALID	0x50
-#define	IPP_FIFO_ADDR_INVALID		0x51
-
-/* IPP error return macros */
-
-#define	NPI_IPP_PORT_INVALID(portn)\
-		((IPP_BLK_ID << NPI_BLOCK_ID_SHIFT) | PORT_INVALID |\
-				IS_PORT | (portn << NPI_PORT_CHAN_SHIFT))
-#define	NPI_IPP_OPCODE_INVALID(portn)\
-		((IPP_BLK_ID << NPI_BLOCK_ID_SHIFT) | OPCODE_INVALID |\
-				IS_PORT | (portn << NPI_PORT_CHAN_SHIFT))
-#define	NPI_IPP_CONFIG_INVALID(portn)\
-		((IPP_BLK_ID << NPI_BLOCK_ID_SHIFT) | CONFIG_INVALID |\
-				IS_PORT | (portn << NPI_PORT_CHAN_SHIFT))
-#define	NPI_IPP_MAX_PKT_BYTES_INVALID(portn)\
-		((IPP_BLK_ID << NPI_BLOCK_ID_SHIFT) |\
-		IPP_MAX_PKT_BYTES_INVALID |\
-				IS_PORT | (portn << NPI_PORT_CHAN_SHIFT))
-#define	NPI_IPP_COUNTER_INVALID(portn)\
-		((IPP_BLK_ID << NPI_BLOCK_ID_SHIFT) | COUNTER_INVALID |\
-				IS_PORT | (portn << NPI_PORT_CHAN_SHIFT))
-#define	NPI_IPP_RESET_FAILED(portn)\
-		((IPP_BLK_ID << NPI_BLOCK_ID_SHIFT) | RESET_FAILED |\
-				IS_PORT | (portn << NPI_PORT_CHAN_SHIFT))
-#define	NPI_IPP_FIFO_ADDR_INVALID(portn)\
-		((IPP_BLK_ID << NPI_BLOCK_ID_SHIFT) |\
-		IPP_FIFO_ADDR_INVALID |\
-				IS_PORT | (portn << NPI_PORT_CHAN_SHIFT))
-
-#define	IPP_REG_RD(handle, portn, reg, val) {\
-	NXGE_REG_RD64(handle, IPP_REG_ADDR(portn, reg), val);\
-}
-
-#define	IPP_REG_WR(handle, portn, reg, val) {\
-	NXGE_REG_WR64(handle, IPP_REG_ADDR(portn, reg), val);\
-}
-
-/* IPP NPI function prototypes */
-npi_status_t npi_ipp_get_pfifo_rd_ptr(npi_handle_t, uint8_t,
-			    uint16_t *);
-
-npi_status_t npi_ipp_get_pfifo_wr_ptr(npi_handle_t, uint8_t,
-			    uint16_t *);
-
-npi_status_t npi_ipp_write_pfifo(npi_handle_t, uint8_t,
-			uint8_t, uint32_t, uint32_t, uint32_t,
-			uint32_t, uint32_t);
-
-npi_status_t npi_ipp_read_pfifo(npi_handle_t, uint8_t,
-			uint8_t, uint32_t *, uint32_t *, uint32_t *,
-			uint32_t *, uint32_t *);
-
-npi_status_t npi_ipp_write_dfifo(npi_handle_t, uint8_t,
-			uint16_t, uint32_t, uint32_t, uint32_t,
-			uint32_t, uint32_t);
-
-npi_status_t npi_ipp_read_dfifo(npi_handle_t, uint8_t,
-			uint16_t, uint32_t *, uint32_t *, uint32_t *,
-			uint32_t *, uint32_t *);
-
-npi_status_t npi_ipp_reset(npi_handle_t, uint8_t);
-npi_status_t npi_ipp_config(npi_handle_t, config_op_t, uint8_t,
-			ipp_config_t);
-npi_status_t npi_ipp_set_max_pktsize(npi_handle_t, uint8_t,
-			uint32_t);
-npi_status_t npi_ipp_iconfig(npi_handle_t, config_op_t, uint8_t,
-			ipp_iconfig_t);
-npi_status_t npi_ipp_get_status(npi_handle_t, uint8_t,
-			ipp_status_t *);
-npi_status_t npi_ipp_counters(npi_handle_t, counter_op_t,
-			ipp_counter_t, uint8_t, npi_counter_t *);
-npi_status_t npi_ipp_get_ecc_syndrome(npi_handle_t, uint8_t,
-			uint16_t *);
-npi_status_t npi_ipp_get_dfifo_eopm_rdptr(npi_handle_t, uint8_t,
-			uint16_t *);
-npi_status_t npi_ipp_get_state_mach(npi_handle_t, uint8_t,
-			uint32_t *);
-npi_status_t npi_ipp_get_dfifo_rd_ptr(npi_handle_t, uint8_t,
-			uint16_t *);
-npi_status_t npi_ipp_get_dfifo_wr_ptr(npi_handle_t, uint8_t,
-			uint16_t *);
-npi_status_t npi_ipp_get_ecc_err_count(npi_handle_t, uint8_t,
-			uint8_t *);
-npi_status_t npi_ipp_get_pkt_dis_count(npi_handle_t, uint8_t,
-			uint16_t *);
-npi_status_t npi_ipp_get_cs_err_count(npi_handle_t, uint8_t,
-			uint16_t *);
-npi_status_t npi_ipp_dump_regs(npi_handle_t, uint8_t);
-void npi_ipp_read_regs(npi_handle_t, uint8_t);
-
-#ifdef	__cplusplus
-}
-#endif
-
-#endif	/* _NPI_IPP_H */
--- a/usr/src/uts/sun4v/io/nxge/npi/npi_mac.c	Mon Mar 19 18:02:35 2007 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,3515 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License (the "License").
- * You may not use this file except in compliance with the License.
- *
- * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
- * or http://www.opensolaris.org/os/licensing.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information: Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- */
-/*
- * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
- * Use is subject to license terms.
- */
-
-#pragma ident	"%Z%%M%	%I%	%E% SMI"
-
-#include <npi_mac.h>
-
-#define	MIF_DELAY	500
-
-#define	MAX_FRAME_SZ1	0x5EE
-#define	MAX_FRAME_SZ2	0x5F6
-#define	MAX_FRAME_SZ3	0x7D6
-#define	MAX_FRAME_SZ4	0x232E
-#define	MAX_FRAME_SZ5	0x2406
-
-#define	XMAC_WAIT_REG(handle, portn, reg, val) {\
-	uint32_t cnt = MAX_PIO_RETRIES;\
-	do {\
-		NXGE_DELAY(MAC_RESET_WAIT);\
-		XMAC_REG_RD(handle, portn, reg, &val);\
-		cnt--;\
-	} while (((val & 0x3) != 0) && (cnt > 0));\
-}
-
-#define	BMAC_WAIT_REG(handle, portn, reg, val) {\
-	uint32_t cnt = MAX_PIO_RETRIES;\
-	do {\
-		NXGE_DELAY(MAC_RESET_WAIT);\
-		BMAC_REG_RD(handle, portn, reg, &val);\
-		cnt--;\
-	} while (((val & 0x3) != 0) && (cnt > 0));\
-}
-
-#define	MIF_WAIT_REG(handle, m_frame, t_delay, interval, max_delay) {	  \
-	do {								  \
-		NXGE_DELAY(interval);					  \
-		MIF_REG_RD(handle, MIF_OUTPUT_FRAME_REG, &m_frame.value); \
-		t_delay++;						  \
-	} while ((m_frame.bits.w0.ta_lsb == 0) && t_delay < max_delay);	  \
-}
-
-uint64_t xmac_offset[] = {
-	XTXMAC_SW_RST_REG,
-	XRXMAC_SW_RST_REG,
-	XTXMAC_STATUS_REG,
-	XRXMAC_STATUS_REG,
-	XMAC_CTRL_STAT_REG,
-	XTXMAC_STAT_MSK_REG,
-	XRXMAC_STAT_MSK_REG,
-	XMAC_C_S_MSK_REG,
-	XMAC_CONFIG_REG,
-	XMAC_IPG_REG,
-	XMAC_MIN_REG,
-	XMAC_MAX_REG,
-	XMAC_ADDR0_REG,
-	XMAC_ADDR1_REG,
-	XMAC_ADDR2_REG,
-	XRXMAC_BT_CNT_REG,
-	XRXMAC_BC_FRM_CNT_REG,
-	XRXMAC_MC_FRM_CNT_REG,
-	XRXMAC_FRAG_CNT_REG,
-	XRXMAC_HIST_CNT1_REG,
-	XRXMAC_HIST_CNT2_REG,
-	XRXMAC_HIST_CNT3_REG,
-	XRXMAC_HIST_CNT4_REG,
-	XRXMAC_HIST_CNT5_REG,
-	XRXMAC_HIST_CNT6_REG,
-	XRXMAC_MPSZER_CNT_REG,
-	XRXMAC_CRC_ER_CNT_REG,
-	XRXMAC_CD_VIO_CNT_REG,
-	XRXMAC_AL_ER_CNT_REG,
-	XTXMAC_FRM_CNT_REG,
-	XTXMAC_BYTE_CNT_REG,
-	XMAC_LINK_FLT_CNT_REG,
-	XRXMAC_HIST_CNT7_REG,
-	XMAC_SM_REG,
-	XMAC_INTERN1_REG,
-	XMAC_ADDR_CMPEN_REG,
-	XMAC_ADDR3_REG,
-	XMAC_ADDR4_REG,
-	XMAC_ADDR5_REG,
-	XMAC_ADDR6_REG,
-	XMAC_ADDR7_REG,
-	XMAC_ADDR8_REG,
-	XMAC_ADDR9_REG,
-	XMAC_ADDR10_REG,
-	XMAC_ADDR11_REG,
-	XMAC_ADDR12_REG,
-	XMAC_ADDR13_REG,
-	XMAC_ADDR14_REG,
-	XMAC_ADDR15_REG,
-	XMAC_ADDR16_REG,
-	XMAC_ADDR17_REG,
-	XMAC_ADDR18_REG,
-	XMAC_ADDR19_REG,
-	XMAC_ADDR20_REG,
-	XMAC_ADDR21_REG,
-	XMAC_ADDR22_REG,
-	XMAC_ADDR23_REG,
-	XMAC_ADDR24_REG,
-	XMAC_ADDR25_REG,
-	XMAC_ADDR26_REG,
-	XMAC_ADDR27_REG,
-	XMAC_ADDR28_REG,
-	XMAC_ADDR29_REG,
-	XMAC_ADDR30_REG,
-	XMAC_ADDR31_REG,
-	XMAC_ADDR32_REG,
-	XMAC_ADDR33_REG,
-	XMAC_ADDR34_REG,
-	XMAC_ADDR35_REG,
-	XMAC_ADDR36_REG,
-	XMAC_ADDR37_REG,
-	XMAC_ADDR38_REG,
-	XMAC_ADDR39_REG,
-	XMAC_ADDR40_REG,
-	XMAC_ADDR41_REG,
-	XMAC_ADDR42_REG,
-	XMAC_ADDR43_REG,
-	XMAC_ADDR44_REG,
-	XMAC_ADDR45_REG,
-	XMAC_ADDR46_REG,
-	XMAC_ADDR47_REG,
-	XMAC_ADDR48_REG,
-	XMAC_ADDR49_REG,
-	XMAC_ADDR50_REG,
-	XMAC_ADDR_FILT0_REG,
-	XMAC_ADDR_FILT1_REG,
-	XMAC_ADDR_FILT2_REG,
-	XMAC_ADDR_FILT12_MASK_REG,
-	XMAC_ADDR_FILT0_MASK_REG,
-	XMAC_HASH_TBL0_REG,
-	XMAC_HASH_TBL1_REG,
-	XMAC_HASH_TBL2_REG,
-	XMAC_HASH_TBL3_REG,
-	XMAC_HASH_TBL4_REG,
-	XMAC_HASH_TBL5_REG,
-	XMAC_HASH_TBL6_REG,
-	XMAC_HASH_TBL7_REG,
-	XMAC_HASH_TBL8_REG,
-	XMAC_HASH_TBL9_REG,
-	XMAC_HASH_TBL10_REG,
-	XMAC_HASH_TBL11_REG,
-	XMAC_HASH_TBL12_REG,
-	XMAC_HASH_TBL13_REG,
-	XMAC_HASH_TBL14_REG,
-	XMAC_HASH_TBL15_REG,
-	XMAC_HOST_INF0_REG,
-	XMAC_HOST_INF1_REG,
-	XMAC_HOST_INF2_REG,
-	XMAC_HOST_INF3_REG,
-	XMAC_HOST_INF4_REG,
-	XMAC_HOST_INF5_REG,
-	XMAC_HOST_INF6_REG,
-	XMAC_HOST_INF7_REG,
-	XMAC_HOST_INF8_REG,
-	XMAC_HOST_INF9_REG,
-	XMAC_HOST_INF10_REG,
-	XMAC_HOST_INF11_REG,
-	XMAC_HOST_INF12_REG,
-	XMAC_HOST_INF13_REG,
-	XMAC_HOST_INF14_REG,
-	XMAC_HOST_INF15_REG,
-	XMAC_HOST_INF16_REG,
-	XMAC_HOST_INF17_REG,
-	XMAC_HOST_INF18_REG,
-	XMAC_HOST_INF19_REG,
-	XMAC_PA_DATA0_REG,
-	XMAC_PA_DATA1_REG,
-	XMAC_DEBUG_SEL_REG,
-	XMAC_TRAINING_VECT_REG,
-};
-
-const char *xmac_name[] = {
-	"XTXMAC_SW_RST_REG",
-	"XRXMAC_SW_RST_REG",
-	"XTXMAC_STATUS_REG",
-	"XRXMAC_STATUS_REG",
-	"XMAC_CTRL_STAT_REG",
-	"XTXMAC_STAT_MSK_REG",
-	"XRXMAC_STAT_MSK_REG",
-	"XMAC_C_S_MSK_REG",
-	"XMAC_CONFIG_REG",
-	"XMAC_IPG_REG",
-	"XMAC_MIN_REG",
-	"XMAC_MAX_REG",
-	"XMAC_ADDR0_REG",
-	"XMAC_ADDR1_REG",
-	"XMAC_ADDR2_REG",
-	"XRXMAC_BT_CNT_REG",
-	"XRXMAC_BC_FRM_CNT_REG",
-	"XRXMAC_MC_FRM_CNT_REG",
-	"XRXMAC_FRAG_CNT_REG",
-	"XRXMAC_HIST_CNT1_REG",
-	"XRXMAC_HIST_CNT2_REG",
-	"XRXMAC_HIST_CNT3_REG",
-	"XRXMAC_HIST_CNT4_REG",
-	"XRXMAC_HIST_CNT5_REG",
-	"XRXMAC_HIST_CNT6_REG",
-	"XRXMAC_MPSZER_CNT_REG",
-	"XRXMAC_CRC_ER_CNT_REG",
-	"XRXMAC_CD_VIO_CNT_REG",
-	"XRXMAC_AL_ER_CNT_REG",
-	"XTXMAC_FRM_CNT_REG",
-	"XTXMAC_BYTE_CNT_REG",
-	"XMAC_LINK_FLT_CNT_REG",
-	"XRXMAC_HIST_CNT7_REG",
-	"XMAC_SM_REG",
-	"XMAC_INTERN1_REG",
-	"XMAC_ADDR_CMPEN_REG",
-	"XMAC_ADDR3_REG",
-	"XMAC_ADDR4_REG",
-	"XMAC_ADDR5_REG",
-	"XMAC_ADDR6_REG",
-	"XMAC_ADDR7_REG",
-	"XMAC_ADDR8_REG",
-	"XMAC_ADDR9_REG",
-	"XMAC_ADDR10_REG",
-	"XMAC_ADDR11_REG",
-	"XMAC_ADDR12_REG",
-	"XMAC_ADDR13_REG",
-	"XMAC_ADDR14_REG",
-	"XMAC_ADDR15_REG",
-	"XMAC_ADDR16_REG",
-	"XMAC_ADDR17_REG",
-	"XMAC_ADDR18_REG",
-	"XMAC_ADDR19_REG",
-	"XMAC_ADDR20_REG",
-	"XMAC_ADDR21_REG",
-	"XMAC_ADDR22_REG",
-	"XMAC_ADDR23_REG",
-	"XMAC_ADDR24_REG",
-	"XMAC_ADDR25_REG",
-	"XMAC_ADDR26_REG",
-	"XMAC_ADDR27_REG",
-	"XMAC_ADDR28_REG",
-	"XMAC_ADDR29_REG",
-	"XMAC_ADDR30_REG",
-	"XMAC_ADDR31_REG",
-	"XMAC_ADDR32_REG",
-	"XMAC_ADDR33_REG",
-	"XMAC_ADDR34_REG",
-	"XMAC_ADDR35_REG",
-	"XMAC_ADDR36_REG",
-	"XMAC_ADDR37_REG",
-	"XMAC_ADDR38_REG",
-	"XMAC_ADDR39_REG",
-	"XMAC_ADDR40_REG",
-	"XMAC_ADDR41_REG",
-	"XMAC_ADDR42_REG",
-	"XMAC_ADDR43_REG",
-	"XMAC_ADDR44_REG",
-	"XMAC_ADDR45_REG",
-	"XMAC_ADDR46_REG",
-	"XMAC_ADDR47_REG",
-	"XMAC_ADDR48_REG",
-	"XMAC_ADDR49_REG",
-	"XMAC_ADDR50_RE",
-	"XMAC_ADDR_FILT0_REG",
-	"XMAC_ADDR_FILT1_REG",
-	"XMAC_ADDR_FILT2_REG",
-	"XMAC_ADDR_FILT12_MASK_REG",
-	"XMAC_ADDR_FILT0_MASK_REG",
-	"XMAC_HASH_TBL0_REG",
-	"XMAC_HASH_TBL1_REG",
-	"XMAC_HASH_TBL2_REG",
-	"XMAC_HASH_TBL3_REG",
-	"XMAC_HASH_TBL4_REG",
-	"XMAC_HASH_TBL5_REG",
-	"XMAC_HASH_TBL6_REG",
-	"XMAC_HASH_TBL7_REG",
-	"XMAC_HASH_TBL8_REG",
-	"XMAC_HASH_TBL9_REG",
-	"XMAC_HASH_TBL10_REG",
-	"XMAC_HASH_TBL11_REG",
-	"XMAC_HASH_TBL12_REG",
-	"XMAC_HASH_TBL13_REG",
-	"XMAC_HASH_TBL14_REG",
-	"XMAC_HASH_TBL15_REG",
-	"XMAC_HOST_INF0_REG",
-	"XMAC_HOST_INF1_REG",
-	"XMAC_HOST_INF2_REG",
-	"XMAC_HOST_INF3_REG",
-	"XMAC_HOST_INF4_REG",
-	"XMAC_HOST_INF5_REG",
-	"XMAC_HOST_INF6_REG",
-	"XMAC_HOST_INF7_REG",
-	"XMAC_HOST_INF8_REG",
-	"XMAC_HOST_INF9_REG",
-	"XMAC_HOST_INF10_REG",
-	"XMAC_HOST_INF11_REG",
-	"XMAC_HOST_INF12_REG",
-	"XMAC_HOST_INF13_REG",
-	"XMAC_HOST_INF14_REG",
-	"XMAC_HOST_INF15_REG",
-	"XMAC_HOST_INF16_REG",
-	"XMAC_HOST_INF17_REG",
-	"XMAC_HOST_INF18_REG",
-	"XMAC_HOST_INF19_REG",
-	"XMAC_PA_DATA0_REG",
-	"XMAC_PA_DATA1_REG",
-	"XMAC_DEBUG_SEL_REG",
-	"XMAC_TRAINING_VECT_REG",
-};
-
-uint64_t bmac_offset[] = {
-	BTXMAC_SW_RST_REG,
-	BRXMAC_SW_RST_REG,
-	MAC_SEND_PAUSE_REG,
-	BTXMAC_STATUS_REG,
-	BRXMAC_STATUS_REG,
-	BMAC_CTRL_STAT_REG,
-	BTXMAC_STAT_MSK_REG,
-	BRXMAC_STAT_MSK_REG,
-	BMAC_C_S_MSK_REG,
-	TXMAC_CONFIG_REG,
-	RXMAC_CONFIG_REG,
-	MAC_CTRL_CONFIG_REG,
-	MAC_XIF_CONFIG_REG,
-	BMAC_MIN_REG,
-	BMAC_MAX_REG,
-	MAC_PA_SIZE_REG,
-	MAC_CTRL_TYPE_REG,
-	BMAC_ADDR0_REG,
-	BMAC_ADDR1_REG,
-	BMAC_ADDR2_REG,
-	BMAC_ADDR3_REG,
-	BMAC_ADDR4_REG,
-	BMAC_ADDR5_REG,
-	BMAC_ADDR6_REG,
-	BMAC_ADDR7_REG,
-	BMAC_ADDR8_REG,
-	BMAC_ADDR9_REG,
-	BMAC_ADDR10_REG,
-	BMAC_ADDR11_REG,
-	BMAC_ADDR12_REG,
-	BMAC_ADDR13_REG,
-	BMAC_ADDR14_REG,
-	BMAC_ADDR15_REG,
-	BMAC_ADDR16_REG,
-	BMAC_ADDR17_REG,
-	BMAC_ADDR18_REG,
-	BMAC_ADDR19_REG,
-	BMAC_ADDR20_REG,
-	BMAC_ADDR21_REG,
-	BMAC_ADDR22_REG,
-	BMAC_ADDR23_REG,
-	MAC_FC_ADDR0_REG,
-	MAC_FC_ADDR1_REG,
-	MAC_FC_ADDR2_REG,
-	MAC_ADDR_FILT0_REG,
-	MAC_ADDR_FILT1_REG,
-	MAC_ADDR_FILT2_REG,
-	MAC_ADDR_FILT12_MASK_REG,
-	MAC_ADDR_FILT00_MASK_REG,
-	MAC_HASH_TBL0_REG,
-	MAC_HASH_TBL1_REG,
-	MAC_HASH_TBL2_REG,
-	MAC_HASH_TBL3_REG,
-	MAC_HASH_TBL4_REG,
-	MAC_HASH_TBL5_REG,
-	MAC_HASH_TBL6_REG,
-	MAC_HASH_TBL7_REG,
-	MAC_HASH_TBL8_REG,
-	MAC_HASH_TBL9_REG,
-	MAC_HASH_TBL10_REG,
-	MAC_HASH_TBL11_REG,
-	MAC_HASH_TBL12_REG,
-	MAC_HASH_TBL13_REG,
-	MAC_HASH_TBL14_REG,
-	MAC_HASH_TBL15_REG,
-	RXMAC_FRM_CNT_REG,
-	MAC_LEN_ER_CNT_REG,
-	BMAC_AL_ER_CNT_REG,
-	BMAC_CRC_ER_CNT_REG,
-	BMAC_CD_VIO_CNT_REG,
-	BMAC_SM_REG,
-	BMAC_ALTAD_CMPEN_REG,
-	BMAC_HOST_INF0_REG,
-	BMAC_HOST_INF1_REG,
-	BMAC_HOST_INF2_REG,
-	BMAC_HOST_INF3_REG,
-	BMAC_HOST_INF4_REG,
-	BMAC_HOST_INF5_REG,
-	BMAC_HOST_INF6_REG,
-	BMAC_HOST_INF7_REG,
-	BMAC_HOST_INF8_REG,
-	BTXMAC_BYTE_CNT_REG,
-	BTXMAC_FRM_CNT_REG,
-	BRXMAC_BYTE_CNT_REG,
-};
-
-const char *bmac_name[] = {
-	"BTXMAC_SW_RST_REG",
-	"BRXMAC_SW_RST_REG",
-	"MAC_SEND_PAUSE_REG",
-	"BTXMAC_STATUS_REG",
-	"BRXMAC_STATUS_REG",
-	"BMAC_CTRL_STAT_REG",
-	"BTXMAC_STAT_MSK_REG",
-	"BRXMAC_STAT_MSK_REG",
-	"BMAC_C_S_MSK_REG",
-	"TXMAC_CONFIG_REG",
-	"RXMAC_CONFIG_REG",
-	"MAC_CTRL_CONFIG_REG",
-	"MAC_XIF_CONFIG_REG",
-	"BMAC_MIN_REG",
-	"BMAC_MAX_REG",
-	"MAC_PA_SIZE_REG",
-	"MAC_CTRL_TYPE_REG",
-	"BMAC_ADDR0_REG",
-	"BMAC_ADDR1_REG",
-	"BMAC_ADDR2_REG",
-	"BMAC_ADDR3_REG",
-	"BMAC_ADDR4_REG",
-	"BMAC_ADDR5_REG",
-	"BMAC_ADDR6_REG",
-	"BMAC_ADDR7_REG",
-	"BMAC_ADDR8_REG",
-	"BMAC_ADDR9_REG",
-	"BMAC_ADDR10_REG",
-	"BMAC_ADDR11_REG",
-	"BMAC_ADDR12_REG",
-	"BMAC_ADDR13_REG",
-	"BMAC_ADDR14_REG",
-	"BMAC_ADDR15_REG",
-	"BMAC_ADDR16_REG",
-	"BMAC_ADDR17_REG",
-	"BMAC_ADDR18_REG",
-	"BMAC_ADDR19_REG",
-	"BMAC_ADDR20_REG",
-	"BMAC_ADDR21_REG",
-	"BMAC_ADDR22_REG",
-	"BMAC_ADDR23_REG",
-	"MAC_FC_ADDR0_REG",
-	"MAC_FC_ADDR1_REG",
-	"MAC_FC_ADDR2_REG",
-	"MAC_ADDR_FILT0_REG",
-	"MAC_ADDR_FILT1_REG",
-	"MAC_ADDR_FILT2_REG",
-	"MAC_ADDR_FILT12_MASK_REG",
-	"MAC_ADDR_FILT00_MASK_REG",
-	"MAC_HASH_TBL0_REG",
-	"MAC_HASH_TBL1_REG",
-	"MAC_HASH_TBL2_REG",
-	"MAC_HASH_TBL3_REG",
-	"MAC_HASH_TBL4_REG",
-	"MAC_HASH_TBL5_REG",
-	"MAC_HASH_TBL6_REG",
-	"MAC_HASH_TBL7_REG",
-	"MAC_HASH_TBL8_REG",
-	"MAC_HASH_TBL9_REG",
-	"MAC_HASH_TBL10_REG",
-	"MAC_HASH_TBL11_REG",
-	"MAC_HASH_TBL12_REG",
-	"MAC_HASH_TBL13_REG",
-	"MAC_HASH_TBL14_REG",
-	"MAC_HASH_TBL15_REG",
-	"RXMAC_FRM_CNT_REG",
-	"MAC_LEN_ER_CNT_REG",
-	"BMAC_AL_ER_CNT_REG",
-	"BMAC_CRC_ER_CNT_REG",
-	"BMAC_CD_VIO_CNT_REG",
-	"BMAC_SM_REG",
-	"BMAC_ALTAD_CMPEN_REG",
-	"BMAC_HOST_INF0_REG",
-	"BMAC_HOST_INF1_REG",
-	"BMAC_HOST_INF2_REG",
-	"BMAC_HOST_INF3_REG",
-	"BMAC_HOST_INF4_REG",
-	"BMAC_HOST_INF5_REG",
-	"BMAC_HOST_INF6_REG",
-	"BMAC_HOST_INF7_REG",
-	"BMAC_HOST_INF8_REG",
-	"BTXMAC_BYTE_CNT_REG",
-	"BTXMAC_FRM_CNT_REG",
-	"BRXMAC_BYTE_CNT_REG",
-};
-
-npi_status_t
-npi_mac_dump_regs(npi_handle_t handle, uint8_t port)
-{
-
-	uint64_t value;
-	int num_regs, i;
-
-	ASSERT(IS_PORT_NUM_VALID(port));
-
-	switch (port) {
-	case 0:
-	case 1:
-		num_regs = sizeof (xmac_offset) / sizeof (uint64_t);
-		NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
-				    "\nXMAC Register Dump for port %d\n",
-				    port));
-		for (i = 0; i < num_regs; i++) {
-			XMAC_REG_RD(handle, port, xmac_offset[i], &value);
-			NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
-				"%08llx %s\t %08llx \n",
-				(XMAC_REG_ADDR((port), (xmac_offset[i]))),
-				xmac_name[i], value));
-		}
-
-		NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
-			    "\n XMAC Register Dump for port %d done\n",
-			    port));
-		break;
-
-	case 2:
-	case 3:
-		num_regs = sizeof (bmac_offset) / sizeof (uint64_t);
-		NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
-				    "\nBMAC Register Dump for port %d\n",
-				    port));
-		for (i = 0; i < num_regs; i++) {
-			BMAC_REG_RD(handle, port, bmac_offset[i], &value);
-			NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
-				"%08llx %s\t %08llx \n",
-				(BMAC_REG_ADDR((port), (bmac_offset[i]))),
-				bmac_name[i], value));
-		}
-
-		NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
-			    "\n BMAC Register Dump for port %d done\n",
-			    port));
-		break;
-	}
-
-	return (NPI_SUCCESS);
-}
-
-npi_status_t
-npi_mac_pcs_link_intr_enable(npi_handle_t handle, uint8_t portn)
-{
-	pcs_cfg_t pcs_cfg;
-
-	ASSERT(IS_PORT_NUM_VALID(portn));
-
-	PCS_REG_RD(handle, portn, PCS_CONFIG_REG, &pcs_cfg.value);
-	pcs_cfg.bits.w0.mask = 0;
-	PCS_REG_WR(handle, portn, PCS_CONFIG_REG, pcs_cfg.value);
-
-	return (NPI_SUCCESS);
-}
-
-npi_status_t
-npi_mac_pcs_link_intr_disable(npi_handle_t handle, uint8_t portn)
-{
-	pcs_cfg_t pcs_cfg;
-
-	ASSERT(IS_PORT_NUM_VALID(portn));
-
-	PCS_REG_RD(handle, portn, PCS_CONFIG_REG, &pcs_cfg.val.lsw);
-	pcs_cfg.bits.w0.mask = 1;
-	PCS_REG_WR(handle, portn, PCS_CONFIG_REG, pcs_cfg.val.lsw);
-
-	return (NPI_SUCCESS);
-}
-
-npi_status_t
-npi_xmac_xpcs_link_intr_enable(npi_handle_t handle, uint8_t portn)
-{
-	xpcs_stat1_t xpcs_mask1;
-
-	ASSERT(IS_XMAC_PORT_NUM_VALID(portn));
-
-	XPCS_REG_RD(handle, portn, XPCS_MASK_1_REG, &xpcs_mask1.val.lsw);
-	xpcs_mask1.bits.w0.csr_rx_link_stat = 1;
-	XPCS_REG_WR(handle, portn, XPCS_MASK_1_REG, xpcs_mask1.val.lsw);
-
-	return (NPI_SUCCESS);
-}
-
-npi_status_t
-npi_xmac_xpcs_link_intr_disable(npi_handle_t handle, uint8_t portn)
-{
-	xpcs_stat1_t xpcs_mask1;
-
-	ASSERT(IS_XMAC_PORT_NUM_VALID(portn));
-
-	XPCS_REG_RD(handle, portn, XPCS_MASK_1_REG, &xpcs_mask1.val.lsw);
-	xpcs_mask1.bits.w0.csr_rx_link_stat = 0;
-	XPCS_REG_WR(handle, portn, XPCS_MASK_1_REG, xpcs_mask1.val.lsw);
-
-	return (NPI_SUCCESS);
-}
-
-npi_status_t
-npi_mac_mif_link_intr_disable(npi_handle_t handle, uint8_t portn)
-{
-	mif_cfg_t mif_cfg;
-
-	ASSERT(IS_PORT_NUM_VALID(portn));
-
-	MIF_REG_RD(handle, MIF_CONFIG_REG, &mif_cfg.val.lsw);
-
-	mif_cfg.bits.w0.phy_addr = portn;
-	mif_cfg.bits.w0.poll_en = 0;
-
-	MIF_REG_WR(handle, MIF_CONFIG_REG, mif_cfg.val.lsw);
-
-	NXGE_DELAY(20);
-
-	return (NPI_SUCCESS);
-}
-
-npi_status_t
-npi_mac_hashtab_entry(npi_handle_t handle, io_op_t op, uint8_t portn,
-			uint8_t entryn, uint16_t *data)
-{
-	uint64_t val;
-
-	ASSERT((op == OP_GET) || (op == OP_SET));
-	ASSERT(IS_PORT_NUM_VALID(portn));
-
-	ASSERT(entryn < MAC_MAX_HASH_ENTRY);
-	if (entryn >= MAC_MAX_HASH_ENTRY) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				    " npi_mac_hashtab_entry"
-				    " Invalid Input: entryn <0x%x>",
-				    entryn));
-		return (NPI_FAILURE | NPI_MAC_HASHTAB_ENTRY_INVALID(portn));
-	}
-
-	if (op == OP_SET) {
-		val = *data;
-		if ((portn == XMAC_PORT_0) || (portn == XMAC_PORT_1)) {
-			XMAC_REG_WR(handle, portn,
-					XMAC_HASH_TBLN_REG_ADDR(entryn), val);
-		} else {
-			BMAC_REG_WR(handle, portn,
-					BMAC_HASH_TBLN_REG_ADDR(entryn), val);
-		}
-	} else {
-		if ((portn == XMAC_PORT_0) || (portn == XMAC_PORT_1)) {
-			XMAC_REG_RD(handle, portn,
-					XMAC_HASH_TBLN_REG_ADDR(entryn), &val);
-		} else {
-			BMAC_REG_RD(handle, portn,
-					BMAC_HASH_TBLN_REG_ADDR(entryn), &val);
-		}
-		*data = val & 0xFFFF;
-	}
-
-	return (NPI_SUCCESS);
-}
-
-npi_status_t
-npi_mac_hostinfo_entry(npi_handle_t handle, io_op_t op, uint8_t portn,
-				uint8_t entryn, hostinfo_t *hostinfo)
-{
-	ASSERT((op == OP_GET) || (op == OP_SET));
-	ASSERT(IS_PORT_NUM_VALID(portn));
-
-	if ((portn == XMAC_PORT_0) || (portn == XMAC_PORT_1)) {
-		ASSERT(entryn < XMAC_MAX_HOST_INFO_ENTRY);
-		if (entryn >= XMAC_MAX_HOST_INFO_ENTRY) {
-			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-					    " npi_mac_hostinfo_entry"
-					    " Invalid Input: entryn <0x%x>",
-					    entryn));
-			return (NPI_FAILURE |
-				NPI_MAC_HOSTINFO_ENTRY_INVALID(portn));
-		}
-	} else {
-		ASSERT(entryn < BMAC_MAX_HOST_INFO_ENTRY);
-		if (entryn >= BMAC_MAX_HOST_INFO_ENTRY) {
-			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-					    " npi_mac_hostinfo_entry"
-					    " Invalid Input: entryn <0x%x>",
-					    entryn));
-			return (NPI_FAILURE |
-				NPI_MAC_HOSTINFO_ENTRY_INVALID(portn));
-		}
-	}
-
-	if (op == OP_SET) {
-		if ((portn == XMAC_PORT_0) || (portn == XMAC_PORT_1)) {
-			XMAC_REG_WR(handle, portn,
-					XMAC_HOST_INFN_REG_ADDR(entryn),
-					hostinfo->value);
-		} else {
-			BMAC_REG_WR(handle, portn,
-					BMAC_HOST_INFN_REG_ADDR(entryn),
-					hostinfo->value);
-		}
-	} else {
-		if ((portn == XMAC_PORT_0) || (portn == XMAC_PORT_1)) {
-			XMAC_REG_RD(handle, portn,
-					XMAC_HOST_INFN_REG_ADDR(entryn),
-					&hostinfo->value);
-		} else {
-			BMAC_REG_RD(handle, portn,
-					BMAC_HOST_INFN_REG_ADDR(entryn),
-					&hostinfo->value);
-		}
-	}
-
-	return (NPI_SUCCESS);
-}
-
-npi_status_t
-npi_mac_altaddr_enable(npi_handle_t handle, uint8_t portn, uint8_t addrn)
-{
-	uint64_t val;
-
-	ASSERT(IS_PORT_NUM_VALID(portn));
-
-	if ((portn == XMAC_PORT_0) || (portn == XMAC_PORT_1)) {
-		ASSERT(addrn <= XMAC_MAX_ALT_ADDR_ENTRY);
-		if (addrn > XMAC_MAX_ALT_ADDR_ENTRY) {
-			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-					    " npi_mac_altaddr_enable"
-					    " Invalid Input: addrn <0x%x>",
-					    addrn));
-			return (NPI_FAILURE |
-				NPI_MAC_ALT_ADDR_ENTRY_INVALID(portn));
-		}
-		XMAC_REG_RD(handle, portn, XMAC_ADDR_CMPEN_REG, &val);
-		val |= (1 << addrn);
-		XMAC_REG_WR(handle, portn, XMAC_ADDR_CMPEN_REG, val);
-	} else {
-		ASSERT(addrn <= BMAC_MAX_ALT_ADDR_ENTRY);
-		if (addrn > BMAC_MAX_ALT_ADDR_ENTRY) {
-			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-					    " npi_mac_altaddr_enable"
-					    " Invalid Input: addrn <0x%x>",
-					    addrn));
-			return (NPI_FAILURE |
-				NPI_MAC_ALT_ADDR_ENTRY_INVALID(portn));
-		}
-		BMAC_REG_RD(handle, portn, BMAC_ALTAD_CMPEN_REG, &val);
-		val |= (1 << addrn);
-		BMAC_REG_WR(handle, portn, BMAC_ALTAD_CMPEN_REG, val);
-	}
-
-	return (NPI_SUCCESS);
-}
-
-/*
- * While all bits of XMAC_ADDR_CMPEN_REG are for alternate MAC addresses,
- * bit0 of BMAC_ALTAD_CMPEN_REG is for unique MAC address.
- */
-npi_status_t
-npi_mac_altaddr_disable(npi_handle_t handle, uint8_t portn, uint8_t addrn)
-{
-	uint64_t val;
-
-	ASSERT(IS_PORT_NUM_VALID(portn));
-
-	if ((portn == XMAC_PORT_0) || (portn == XMAC_PORT_1)) {
-		ASSERT(addrn <= XMAC_MAX_ALT_ADDR_ENTRY);
-		if (addrn > XMAC_MAX_ALT_ADDR_ENTRY) {
-			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-					" npi_mac_altaddr_disable"
-					" Invalid Input: addrn <0x%x>",
-					addrn));
-			return (NPI_FAILURE |
-				NPI_MAC_ALT_ADDR_ENTRY_INVALID(portn));
-		}
-		XMAC_REG_RD(handle, portn, XMAC_ADDR_CMPEN_REG, &val);
-		val &= ~(1 << addrn);
-		XMAC_REG_WR(handle, portn, XMAC_ADDR_CMPEN_REG, val);
-	} else {
-		ASSERT(addrn <= BMAC_MAX_ALT_ADDR_ENTRY);
-		if (addrn > BMAC_MAX_ALT_ADDR_ENTRY) {
-			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-					" npi_mac_altaddr_disable"
-					" Invalid Input: addrn <0x%x>",
-				    addrn));
-			return (NPI_FAILURE |
-				NPI_MAC_ALT_ADDR_ENTRY_INVALID(portn));
-		}
-		BMAC_REG_RD(handle, portn, BMAC_ALTAD_CMPEN_REG, &val);
-		val &= ~(1 << addrn);
-		BMAC_REG_WR(handle, portn, BMAC_ALTAD_CMPEN_REG, val);
-	}
-
-	return (NPI_SUCCESS);
-}
-
-npi_status_t
-npi_mac_altaddr_entry(npi_handle_t handle, io_op_t op, uint8_t portn,
-			uint8_t entryn, npi_mac_addr_t *data)
-{
-	uint64_t val0, val1, val2;
-
-	ASSERT(IS_PORT_NUM_VALID(portn));
-	ASSERT((op == OP_GET) || (op == OP_SET));
-
-	if ((portn == XMAC_PORT_0) || (portn == XMAC_PORT_1)) {
-		ASSERT(entryn <= XMAC_MAX_ALT_ADDR_ENTRY);
-		if (entryn > XMAC_MAX_ALT_ADDR_ENTRY) {
-			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-					    " npi_mac_altaddr_entry"
-					    " Invalid Input: entryn <0x%x>",
-					    entryn));
-			return (NPI_FAILURE |
-				NPI_MAC_ALT_ADDR_ENTRY_INVALID(portn));
-		}
-		if (op == OP_SET) {
-			val0 = data->w0;
-			val1 = data->w1;
-			val2 = data->w2;
-			XMAC_REG_WR(handle, portn,
-				XMAC_ALT_ADDR0N_REG_ADDR(entryn), val0);
-			XMAC_REG_WR(handle, portn,
-				XMAC_ALT_ADDR1N_REG_ADDR(entryn), val1);
-			XMAC_REG_WR(handle, portn,
-				XMAC_ALT_ADDR2N_REG_ADDR(entryn), val2);
-		} else {
-			XMAC_REG_RD(handle, portn,
-				XMAC_ALT_ADDR0N_REG_ADDR(entryn), &val0);
-			XMAC_REG_RD(handle, portn,
-				XMAC_ALT_ADDR1N_REG_ADDR(entryn), &val1);
-			XMAC_REG_RD(handle, portn,
-				XMAC_ALT_ADDR2N_REG_ADDR(entryn), &val2);
-			data->w0 = val0 & 0xFFFF;
-			data->w1 = val1 & 0xFFFF;
-			data->w2 = val2 & 0xFFFF;
-		}
-	} else {
-		ASSERT(entryn <= BMAC_MAX_ALT_ADDR_ENTRY);
-		if (entryn > BMAC_MAX_ALT_ADDR_ENTRY) {
-			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-					    " npi_mac_altaddr_entry"
-					    " Invalid Input: entryn <0x%x>",
-					    entryn));
-			return (NPI_FAILURE |
-				NPI_MAC_ALT_ADDR_ENTRY_INVALID(portn));
-		}
-		if (op == OP_SET) {
-			val0 = data->w0;
-			val1 = data->w1;
-			val2 = data->w2;
-			BMAC_REG_WR(handle, portn,
-				BMAC_ALT_ADDR0N_REG_ADDR(entryn), val0);
-			BMAC_REG_WR(handle, portn,
-				BMAC_ALT_ADDR1N_REG_ADDR(entryn), val1);
-			BMAC_REG_WR(handle, portn,
-				BMAC_ALT_ADDR2N_REG_ADDR(entryn), val2);
-		} else {
-			BMAC_REG_RD(handle, portn,
-				BMAC_ALT_ADDR0N_REG_ADDR(entryn), &val0);
-			BMAC_REG_RD(handle, portn,
-				BMAC_ALT_ADDR1N_REG_ADDR(entryn), &val1);
-			BMAC_REG_RD(handle, portn,
-				BMAC_ALT_ADDR2N_REG_ADDR(entryn), &val2);
-			data->w0 = val0 & 0xFFFF;
-			data->w1 = val1 & 0xFFFF;
-			data->w2 = val2 & 0xFFFF;
-		}
-	}
-
-	return (NPI_SUCCESS);
-}
-
-npi_status_t
-npi_mac_port_attr(npi_handle_t handle, io_op_t op, uint8_t portn,
-			npi_attr_t *attrp)
-{
-	uint64_t val = 0;
-	uint32_t attr;
-
-	ASSERT(IS_PORT_NUM_VALID(portn));
-	ASSERT((op == OP_GET) || (op == OP_SET));
-
-	switch (attrp->type) {
-	case MAC_PORT_MODE:
-		switch (portn) {
-		case XMAC_PORT_0:
-		case XMAC_PORT_1:
-			if (op == OP_SET) {
-				attr = attrp->idata[0];
-				ASSERT((attr == MAC_MII_MODE) ||	\
-					(attr == MAC_GMII_MODE) ||	\
-					(attr == MAC_XGMII_MODE));
-				if ((attr != MAC_MII_MODE) &&
-					(attr != MAC_GMII_MODE) &&
-					(attr != MAC_XGMII_MODE)) {
-					NPI_ERROR_MSG((handle.function,
-						    NPI_ERR_CTL,
-						    " npi_mac_port_attr"
-						    " Invalid Input:"
-						    " MAC_PORT_MODE <0x%x>",
-						    attr));
-					return (NPI_FAILURE |
-					NPI_MAC_PORT_ATTR_INVALID(portn));
-				}
-				XMAC_REG_RD(handle, portn, XMAC_CONFIG_REG,
-						&val);
-				val &= ~XMAC_XIF_MII_MODE_MASK;
-				switch (attr) {
-				case MAC_MII_MODE:
-					val |= (XMAC_XIF_MII_MODE <<
-						XMAC_XIF_MII_MODE_SHIFT);
-					break;
-				case MAC_GMII_MODE:
-					val |= (XMAC_XIF_GMII_MODE <<
-						XMAC_XIF_MII_MODE_SHIFT);
-					break;
-				case MAC_XGMII_MODE:
-					val |= (XMAC_XIF_XGMII_MODE <<
-						XMAC_XIF_MII_MODE_SHIFT);
-					break;
-				default:
-					return (NPI_FAILURE |
-					NPI_MAC_PORT_ATTR_INVALID(portn));
-				}
-				XMAC_REG_WR(handle, portn, XMAC_CONFIG_REG,
-						val);
-			} else {
-				XMAC_REG_RD(handle, portn, XMAC_CONFIG_REG,
-						&val);
-				val &= XMAC_XIF_MII_MODE_MASK;
-				attr = val >> XMAC_XIF_MII_MODE_SHIFT;
-				attrp->odata[0] = attr;
-			}
-			break;
-		case BMAC_PORT_0:
-		case BMAC_PORT_1:
-			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-					    " npi_mac_port_attr"
-					    " Invalid Input:"
-					    " MAC_PORT_MODE <0x%x>",
-					    attrp->type));
-			return (NPI_FAILURE |
-				NPI_MAC_PORT_ATTR_INVALID(portn));
-		default:
-			return (NPI_FAILURE | NPI_MAC_PORT_INVALID(portn));
-		}
-		break;
-
-	case MAC_PORT_FRAME_SIZE: {
-		uint32_t min_fsize;
-		uint32_t max_fsize;
-
-		switch (portn) {
-		case XMAC_PORT_0:
-		case XMAC_PORT_1:
-			if (op == OP_SET) {
-				min_fsize = attrp->idata[0];
-				max_fsize = attrp->idata[1];
-				ASSERT((min_fsize &	\
-					~XMAC_MIN_TX_FRM_SZ_MASK) == 0);
-				if ((min_fsize & ~XMAC_MIN_TX_FRM_SZ_MASK)
-						!= 0) {
-					NPI_ERROR_MSG((handle.function,
-						    NPI_ERR_CTL,
-						    " npi_mac_port_attr"
-						    " MAC_PORT_FRAME_SIZE:"
-						    " Invalid Input:"
-						    " xmac_min_fsize <0x%x>",
-						    min_fsize));
-					return (NPI_FAILURE |
-					NPI_MAC_PORT_ATTR_INVALID(portn));
-				}
-				ASSERT((max_fsize &	\
-					~XMAC_MAX_FRM_SZ_MASK) == 0);
-				if ((max_fsize & ~XMAC_MAX_FRM_SZ_MASK)
-						!= 0) {
-					NPI_ERROR_MSG((handle.function,
-						    NPI_ERR_CTL,
-						    " npi_mac_port_attr"
-						    " MAC_PORT_FRAME_SIZE:"
-						    " Invalid Input:"
-						    " xmac_max_fsize <0x%x>",
-						    max_fsize));
-					return (NPI_FAILURE |
-					NPI_MAC_PORT_ATTR_INVALID(portn));
-				}
-				XMAC_REG_RD(handle, portn, XMAC_MIN_REG, &val);
-				val &= ~(XMAC_MIN_TX_FRM_SZ_MASK |
-					XMAC_MIN_RX_FRM_SZ_MASK);
-				val |= (min_fsize << XMAC_MIN_TX_FRM_SZ_SHIFT);
-				val |= (min_fsize << XMAC_MIN_RX_FRM_SZ_SHIFT);
-				XMAC_REG_WR(handle, portn, XMAC_MIN_REG, val);
-				XMAC_REG_WR(handle, portn, XMAC_MAX_REG,
-						max_fsize);
-			} else {
-				XMAC_REG_RD(handle, portn, XMAC_MIN_REG, &val);
-				min_fsize = (val & XMAC_MIN_TX_FRM_SZ_MASK)
-						>> XMAC_MIN_TX_FRM_SZ_SHIFT;
-				XMAC_REG_RD(handle, portn, XMAC_MAX_REG, &val);
-				attrp->odata[0] = min_fsize;
-				attrp->odata[1] = max_fsize;
-			}
-			break;
-		case BMAC_PORT_0:
-		case BMAC_PORT_1:
-			if (op == OP_SET) {
-				min_fsize = attrp->idata[0];
-				max_fsize = attrp->idata[1];
-				ASSERT((min_fsize & ~BMAC_MIN_FRAME_MASK) == 0);
-				if ((min_fsize & ~BMAC_MIN_FRAME_MASK)
-						!= 0) {
-					NPI_ERROR_MSG((handle.function,
-						    NPI_ERR_CTL,
-						    " npi_mac_port_attr"
-						    " MAC_FRAME_SIZE:"
-						    " Invalid Input:"
-						    " bmac_min_fsize <0x%x>",
-						    min_fsize));
-					return (NPI_FAILURE |
-					NPI_MAC_PORT_ATTR_INVALID(portn));
-				}
-				ASSERT((max_fsize & ~BMAC_MAX_FRAME_MASK) == 0);
-				if ((max_fsize & ~BMAC_MAX_FRAME_MASK)
-						!= 0) {
-					NPI_ERROR_MSG((handle.function,
-						    NPI_ERR_CTL,
-						    " npi_mac_port_attr"
-						    " MAC_FRAME_SIZE:"
-						    " Invalid Input:"
-						    " bmac_max_fsize <0x%x>",
-						    max_fsize));
-					return (NPI_FAILURE |
-					NPI_MAC_PORT_ATTR_INVALID(portn));
-				}
-				BMAC_REG_RD(handle, portn, BMAC_MAX_REG, &val);
-				val &= ~BMAC_MAX_FRAME_MASK;
-				if (max_fsize <= MAX_FRAME_SZ1)
-					val |= MAX_FRAME_SZ1;
-				else if ((max_fsize > MAX_FRAME_SZ1) &&
-					(max_fsize <= MAX_FRAME_SZ2))
-					val |= MAX_FRAME_SZ2;
-				else if ((max_fsize > MAX_FRAME_SZ2) &&
-					(max_fsize <= MAX_FRAME_SZ3))
-					val |= MAX_FRAME_SZ3;
-				else if ((max_fsize > MAX_FRAME_SZ3) &&
-					(max_fsize <= MAX_FRAME_SZ4))
-					val |= MAX_FRAME_SZ4;
-				else if ((max_fsize > MAX_FRAME_SZ4) &&
-					(max_fsize <= MAX_FRAME_SZ5))
-					val |= MAX_FRAME_SZ5;
-				BMAC_REG_WR(handle, portn, BMAC_MAX_REG, val);
-				BMAC_REG_WR(handle, portn, BMAC_MIN_REG,
-						min_fsize);
-			} else {
-				BMAC_REG_RD(handle, portn, BMAC_MIN_REG, &val);
-				min_fsize = val & BMAC_MIN_FRAME_MASK;
-				BMAC_REG_RD(handle, portn, BMAC_MAX_REG, &val);
-				max_fsize = val & BMAC_MAX_FRAME_MASK;
-				attrp->odata[0] = min_fsize;
-				attrp->odata[1] = max_fsize;
-			}
-			break;
-		default:
-			return (NPI_FAILURE | NPI_MAC_PORT_INVALID(portn));
-		}
-	}	break;
-
-	case BMAC_PORT_MAX_BURST_SIZE: {
-		uint32_t burst_size;
-		switch (portn) {
-		case XMAC_PORT_0:
-		case XMAC_PORT_1:
-			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-					    " npi_mac_port_attr"
-					    " BMAC_PORT_MAX_BURST_SIZE:"
-					    " Invalid Input: portn <%d>",
-					    portn));
-			return (NPI_FAILURE | NPI_MAC_PORT_ATTR_INVALID(portn));
-		case BMAC_PORT_0:
-		case BMAC_PORT_1:
-			/* NOTE: Not used in Full duplex mode */
-			if (op == OP_SET) {
-				burst_size = attrp->idata[0];
-				ASSERT((burst_size & ~0x7FFF) == 0);
-				if ((burst_size & ~0x7FFF) != 0) {
-					NPI_ERROR_MSG((handle.function,
-						    NPI_ERR_CTL,
-						    " npi_mac_port_attr"
-						    " BMAC_MAX_BURST_SIZE:"
-						    " Invalid Input:"
-						    " burst_size <0x%x>",
-						    burst_size));
-					return (NPI_FAILURE |
-					NPI_MAC_PORT_ATTR_INVALID(portn));
-				}
-				BMAC_REG_RD(handle, portn, BMAC_MAX_REG, &val);
-				val &= ~BMAC_MAX_BURST_MASK;
-				val |= (burst_size << BMAC_MAX_BURST_SHIFT);
-				BMAC_REG_WR(handle, portn, BMAC_MAX_REG, val);
-			} else {
-				BMAC_REG_RD(handle, portn, BMAC_MAX_REG, &val);
-				burst_size = (val & BMAC_MAX_BURST_MASK)
-						>> BMAC_MAX_BURST_SHIFT;
-				attrp->odata[0] = burst_size;
-			}
-			break;
-		default:
-			return (NPI_FAILURE | NPI_MAC_PORT_INVALID(portn));
-		}
-	}	break;
-
-	case BMAC_PORT_PA_SIZE: {
-		uint32_t pa_size;
-		switch (portn) {
-		case XMAC_PORT_0:
-		case XMAC_PORT_1:
-			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-					    " npi_mac_port_attr"
-					    " BMAC_PORT_PA_SIZE:"
-					    " Invalid Input: portn <%d>",
-					    portn));
-			return (NPI_FAILURE | NPI_MAC_PORT_ATTR_INVALID(portn));
-		case BMAC_PORT_0:
-		case BMAC_PORT_1:
-			if (op == OP_SET) {
-				pa_size = attrp->idata[0];
-				ASSERT((pa_size & ~0x3FF) == 0);
-				if ((pa_size & ~0x3FF) != 0) {
-					NPI_ERROR_MSG((handle.function,
-					    NPI_ERR_CTL,
-					    " npi_mac_port_attr"
-					    " BMAC_PORT_PA_SIZE:"
-					    " Invalid Input: pa_size <0x%x>",
-					    pa_size));
-
-					return (NPI_FAILURE |
-					NPI_MAC_PORT_ATTR_INVALID(portn));
-				}
-				BMAC_REG_RD(handle, portn, MAC_PA_SIZE_REG,
-					    &val);
-				val &= ~BMAC_PA_SIZE_MASK;
-				val |= (pa_size << 0);
-				BMAC_REG_WR(handle, portn, MAC_PA_SIZE_REG,
-					    val);
-			} else {
-				BMAC_REG_RD(handle, portn, MAC_PA_SIZE_REG,
-					    &val);
-				pa_size = (val & BMAC_PA_SIZE_MASK) >> 0;
-				attrp->odata[0] = pa_size;
-			}
-			break;
-		default:
-			return (NPI_FAILURE | NPI_MAC_PORT_INVALID(portn));
-		}
-	}	break;
-
-	case BMAC_PORT_CTRL_TYPE: {
-		uint32_t ctrl_type;
-		switch (portn) {
-		case XMAC_PORT_0:
-		case XMAC_PORT_1:
-			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-					    " npi_mac_port_attr"
-					    " BMAC_PORT_CTRL_TYPE:"
-					    " Invalid Input: portn <%d>",
-					    portn));
-			return (NPI_FAILURE | NPI_MAC_PORT_ATTR_INVALID(portn));
-		case BMAC_PORT_0:
-		case BMAC_PORT_1:
-			if (op == OP_SET) {
-				ctrl_type = attrp->idata[0];
-				ASSERT((ctrl_type & ~0xFFFF) == 0);
-				if ((ctrl_type & ~0xFFFF) != 0) {
-					NPI_ERROR_MSG((handle.function,
-						    NPI_ERR_CTL,
-						    " npi_mac_port_attr"
-						    " BMAC_PORT_CTRL_TYPE:"
-						    " Invalid Input:"
-						    " ctrl_type <0x%x>",
-						    ctrl_type));
-					return (NPI_FAILURE |
-					NPI_MAC_PORT_ATTR_INVALID(portn));
-				}
-				BMAC_REG_WR(handle, portn, MAC_CTRL_TYPE_REG,
-						val);
-			} else {
-				BMAC_REG_RD(handle, portn, MAC_CTRL_TYPE_REG,
-						&val);
-				ctrl_type = (val & 0xFFFF);
-				attrp->odata[0] = ctrl_type;
-			}
-			break;
-		default:
-			return (NPI_FAILURE | NPI_MAC_PORT_INVALID(portn));
-		}
-	}	break;
-
-	case XMAC_10G_PORT_IPG:
-		{
-		uint32_t	ipg0;
-
-		switch (portn) {
-		case XMAC_PORT_0:
-		case XMAC_PORT_1:
-			if (op == OP_SET) {
-				ipg0 = attrp->idata[0];
-				ASSERT((ipg0 == XGMII_IPG_12_15) ||	\
-					(ipg0 == XGMII_IPG_16_19) ||	\
-					(ipg0 == XGMII_IPG_20_23));
-				if ((ipg0 != XGMII_IPG_12_15) &&
-					(ipg0 != XGMII_IPG_16_19) &&
-					(ipg0 != XGMII_IPG_20_23)) {
-					NPI_ERROR_MSG((handle.function,
-						    NPI_ERR_CTL,
-						    " npi_mac_port_attr"
-						    " MAC_10G_PORT_IPG:"
-						    " Invalid Input:"
-						    " xgmii_ipg <0x%x>",
-						    ipg0));
-					return (NPI_FAILURE |
-					NPI_MAC_PORT_ATTR_INVALID(portn));
-				}
-
-				XMAC_REG_RD(handle, portn, XMAC_IPG_REG, &val);
-				val &= ~(XMAC_IPG_VALUE_MASK |
-					XMAC_IPG_VALUE1_MASK);
-
-				switch (ipg0) {
-				case XGMII_IPG_12_15:
-					val |= (IPG_12_15_BYTE <<
-						XMAC_IPG_VALUE_SHIFT);
-					break;
-				case XGMII_IPG_16_19:
-					val |= (IPG_16_19_BYTE <<
-						XMAC_IPG_VALUE_SHIFT);
-					break;
-				case XGMII_IPG_20_23:
-					val |= (IPG_20_23_BYTE <<
-						XMAC_IPG_VALUE_SHIFT);
-					break;
-				default:
-					return (NPI_FAILURE |
-					NPI_MAC_PORT_ATTR_INVALID(portn));
-				}
-				XMAC_REG_WR(handle, portn, XMAC_IPG_REG, val);
-			} else {
-				XMAC_REG_RD(handle, portn, XMAC_IPG_REG, &val);
-				ipg0 = (val & XMAC_IPG_VALUE_MASK) >>
-					XMAC_IPG_VALUE_SHIFT;
-				switch (ipg0) {
-				case IPG_12_15_BYTE:
-					attrp->odata[0] = XGMII_IPG_12_15;
-					break;
-				case IPG_16_19_BYTE:
-					attrp->odata[0] = XGMII_IPG_16_19;
-					break;
-				case IPG_20_23_BYTE:
-					attrp->odata[0] = XGMII_IPG_20_23;
-					break;
-				default:
-					return (NPI_FAILURE |
-					NPI_MAC_PORT_ATTR_INVALID(portn));
-				}
-			}
-			break;
-		case BMAC_PORT_0:
-		case BMAC_PORT_1:
-			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-					" npi_mac_port_attr" "MAC_PORT_IPG:"
-					"  Invalid Input: portn <%d>",
-					portn));
-		default:
-			return (NPI_FAILURE | NPI_MAC_PORT_INVALID(portn));
-		}
-		break;
-	}
-
-	case XMAC_PORT_IPG:
-		{
-		uint32_t	ipg1;
-		switch (portn) {
-		case XMAC_PORT_0:
-		case XMAC_PORT_1:
-			if (op == OP_SET) {
-				ipg1 = attrp->idata[0];
-				ASSERT((ipg1 == MII_GMII_IPG_12) ||	\
-					(ipg1 == MII_GMII_IPG_13) ||	\
-					(ipg1 == MII_GMII_IPG_14) ||	\
-					(ipg1 == MII_GMII_IPG_15) ||	\
-					(ipg1 == MII_GMII_IPG_16));
-				if ((ipg1 != MII_GMII_IPG_12) &&
-					(ipg1 != MII_GMII_IPG_13) &&
-					(ipg1 != MII_GMII_IPG_14) &&
-					(ipg1 != MII_GMII_IPG_15) &&
-					(ipg1 != MII_GMII_IPG_16)) {
-					NPI_ERROR_MSG((handle.function,
-						    NPI_ERR_CTL,
-						    " npi_mac_port_attr"
-						    " XMAC_PORT_IPG:"
-						    " Invalid Input:"
-						    " mii_gmii_ipg <0x%x>",
-						    ipg1));
-					return (NPI_FAILURE |
-					NPI_MAC_PORT_ATTR_INVALID(portn));
-				}
-
-				XMAC_REG_RD(handle, portn, XMAC_IPG_REG, &val);
-				val &= ~(XMAC_IPG_VALUE_MASK |
-					XMAC_IPG_VALUE1_MASK);
-
-				switch (ipg1) {
-				case MII_GMII_IPG_12:
-					val |= (IPG1_12_BYTES <<
-						XMAC_IPG_VALUE1_SHIFT);
-					break;
-				case MII_GMII_IPG_13:
-					val |= (IPG1_13_BYTES <<
-						XMAC_IPG_VALUE1_SHIFT);
-					break;
-				case MII_GMII_IPG_14:
-					val |= (IPG1_14_BYTES <<
-						XMAC_IPG_VALUE1_SHIFT);
-					break;
-				case MII_GMII_IPG_15:
-					val |= (IPG1_15_BYTES <<
-						XMAC_IPG_VALUE1_SHIFT);
-					break;
-				case MII_GMII_IPG_16:
-					val |= (IPG1_16_BYTES <<
-						XMAC_IPG_VALUE1_SHIFT);
-					break;
-				default:
-					return (NPI_FAILURE |
-					NPI_MAC_PORT_ATTR_INVALID(portn));
-				}
-				XMAC_REG_WR(handle, portn, XMAC_IPG_REG, val);
-			} else {
-				XMAC_REG_RD(handle, portn, XMAC_IPG_REG, &val);
-				ipg1 = (val & XMAC_IPG_VALUE1_MASK) >>
-					XMAC_IPG_VALUE1_SHIFT;
-				switch (ipg1) {
-				case IPG1_12_BYTES:
-					attrp->odata[1] = MII_GMII_IPG_12;
-					break;
-				case IPG1_13_BYTES:
-					attrp->odata[1] = MII_GMII_IPG_13;
-					break;
-				case IPG1_14_BYTES:
-					attrp->odata[1] = MII_GMII_IPG_14;
-					break;
-				case IPG1_15_BYTES:
-					attrp->odata[1] = MII_GMII_IPG_15;
-					break;
-				case IPG1_16_BYTES:
-					attrp->odata[1] = MII_GMII_IPG_16;
-					break;
-				default:
-					return (NPI_FAILURE |
-					NPI_MAC_PORT_ATTR_INVALID(portn));
-				}
-			}
-			break;
-		case BMAC_PORT_0:
-		case BMAC_PORT_1:
-			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-					    " npi_mac_port_attr"
-					    " MAC_PORT_IPG:"
-					    " Invalid Input: portn <%d>",
-					    portn));
-		default:
-			return (NPI_FAILURE | NPI_MAC_PORT_INVALID(portn));
-		}
-		break;
-	}
-
-	case MAC_PORT_ADDR: {
-		uint32_t addr0;
-		uint32_t addr1;
-		uint32_t addr2;
-
-		switch (portn) {
-		case XMAC_PORT_0:
-		case XMAC_PORT_1:
-			if (op == OP_SET) {
-				addr0 = attrp->idata[0];
-				addr1 = attrp->idata[1];
-				addr2 = attrp->idata[2];
-				ASSERT((addr0 & ~0xFFFF) == 0);
-				if ((addr0 & ~0xFFFF) != 0) {
-					NPI_ERROR_MSG((handle.function,
-						    NPI_ERR_CTL,
-						    " npi_mac_port_attr"
-						    " MAC_PORT_ADDR:"
-						    " Invalid Input:"
-						    " addr0 <0x%x>", addr0));
-
-					return (NPI_FAILURE |
-					NPI_MAC_PORT_ATTR_INVALID(portn));
-				}
-				ASSERT((addr1 & ~0xFFFF) == 0);
-				if ((addr1 & ~0xFFFF) != 0) {
-					NPI_ERROR_MSG((handle.function,
-						    NPI_ERR_CTL,
-						    " npi_mac_port_attr"
-						    " MAC_PORT_ADDR:"
-						    " Invalid Input:"
-						    " addr1 <0x%x>", addr1));
-					return (NPI_FAILURE |
-					NPI_MAC_PORT_ATTR_INVALID(portn));
-				}
-				ASSERT((addr2 & ~0xFFFF) == 0);
-				if ((addr2 & ~0xFFFF) != 0) {
-					NPI_ERROR_MSG((handle.function,
-						    NPI_ERR_CTL,
-						    " npi_mac_port_attr"
-						    " MAC_PORT_ADDR:"
-						    " Invalid Input:"
-						    " addr2 <0x%x.",
-						    addr2));
-
-					return (NPI_FAILURE |
-					NPI_MAC_PORT_ATTR_INVALID(portn));
-				}
-				XMAC_REG_WR(handle, portn, XMAC_ADDR0_REG,
-						addr0);
-				XMAC_REG_WR(handle, portn, XMAC_ADDR1_REG,
-						addr1);
-				XMAC_REG_WR(handle, portn, XMAC_ADDR2_REG,
-						addr2);
-			} else {
-				XMAC_REG_RD(handle, portn, XMAC_ADDR0_REG,
-						&addr0);
-				XMAC_REG_RD(handle, portn, XMAC_ADDR1_REG,
-						&addr1);
-				XMAC_REG_RD(handle, portn, XMAC_ADDR2_REG,
-						&addr2);
-				attrp->odata[0] = addr0 & MAC_ADDR_REG_MASK;
-				attrp->odata[1] = addr1 & MAC_ADDR_REG_MASK;
-				attrp->odata[2] = addr2 & MAC_ADDR_REG_MASK;
-			}
-			break;
-		case BMAC_PORT_0:
-		case BMAC_PORT_1:
-			if (op == OP_SET) {
-				addr0 = attrp->idata[0];
-				addr1 = attrp->idata[1];
-				addr2 = attrp->idata[2];
-				ASSERT((addr0 & ~0xFFFF) == 0);
-				if ((addr0 & ~0xFFFF) != 0) {
-					NPI_ERROR_MSG((handle.function,
-						    NPI_ERR_CTL,
-						    " npi_mac_port_attr"
-						    " MAC_PORT_ADDR:"
-						    " Invalid Input:"
-						    " addr0 <0x%x>",
-						    addr0));
-					return (NPI_FAILURE |
-					NPI_MAC_PORT_ATTR_INVALID(portn));
-				}
-				ASSERT((addr1 & ~0xFFFF) == 0);
-				if ((addr1 & ~0xFFFF) != 0) {
-					NPI_ERROR_MSG((handle.function,
-						    NPI_ERR_CTL,
-						    " npi_mac_port_attr"
-						    " MAC_PORT_ADDR:"
-						    " Invalid Input:"
-						    " addr1 <0x%x>",
-						    addr1));
-					return (NPI_FAILURE |
-					NPI_MAC_PORT_ATTR_INVALID(portn));
-				}
-				ASSERT((addr2 & ~0xFFFF) == 0);
-				if ((addr2 & ~0xFFFF) != 0) {
-					NPI_ERROR_MSG((handle.function,
-						    NPI_ERR_CTL,
-						    " npi_mac_port_attr"
-						    " MAC_PORT_ADDR:"
-						    " Invalid Input:"
-						    " addr2 <0x%x>",
-						    addr2));
-					return (NPI_FAILURE |
-					NPI_MAC_PORT_ATTR_INVALID(portn));
-				}
-				BMAC_REG_WR(handle, portn, BMAC_ADDR0_REG,
-						addr0);
-				BMAC_REG_WR(handle, portn, BMAC_ADDR1_REG,
-						addr1);
-				BMAC_REG_WR(handle, portn, BMAC_ADDR2_REG,
-						addr2);
-			} else {
-				BMAC_REG_RD(handle, portn, BMAC_ADDR0_REG,
-						&addr0);
-				BMAC_REG_RD(handle, portn, BMAC_ADDR1_REG,
-						&addr1);
-				BMAC_REG_RD(handle, portn, BMAC_ADDR2_REG,
-						&addr2);
-				attrp->odata[0] = addr0 & MAC_ADDR_REG_MASK;
-				attrp->odata[1] = addr1 & MAC_ADDR_REG_MASK;
-				attrp->odata[2] = addr2 & MAC_ADDR_REG_MASK;
-			}
-			break;
-		default:
-			return (NPI_FAILURE | NPI_MAC_PORT_INVALID(portn));
-		}
-	}	break;
-
-	case MAC_PORT_ADDR_FILTER: {
-		uint32_t addr0;
-		uint32_t addr1;
-		uint32_t addr2;
-
-		switch (portn) {
-		case XMAC_PORT_0:
-		case XMAC_PORT_1:
-			if (op == OP_SET) {
-				addr0 = attrp->idata[0];
-				addr1 = attrp->idata[1];
-				addr2 = attrp->idata[2];
-				ASSERT((addr0 & ~0xFFFF) == 0);
-				if ((addr0 & ~0xFFFF) != 0) {
-					NPI_ERROR_MSG((handle.function,
-						    NPI_ERR_CTL,
-						    " npi_mac_port_attr"
-						    " MAC_PORT_ADDR_FILTER:"
-						    " Invalid Input:"
-						    " addr0 <0x%x>",
-						    addr0));
-					return (NPI_FAILURE |
-					NPI_MAC_PORT_ATTR_INVALID(portn));
-				}
-				ASSERT((addr1 & ~0xFFFF) == 0);
-				if ((addr1 & ~0xFFFF) != 0) {
-					NPI_ERROR_MSG((handle.function,
-						    NPI_ERR_CTL,
-						    " npi_mac_port_attr"
-						    " MAC_PORT_ADDR_FILTER:"
-						    " Invalid Input:"
-						    " addr1 <0x%x>",
-						    addr1));
-					return (NPI_FAILURE |
-					NPI_MAC_PORT_ATTR_INVALID(portn));
-				}
-				ASSERT((addr2 & ~0xFFFF) == 0);
-				if ((addr2 & ~0xFFFF) != 0) {
-					NPI_ERROR_MSG((handle.function,
-						    NPI_ERR_CTL,
-						    " npi_mac_port_attr"
-						    " MAC_PORT_ADDR_FILTER:"
-						    " Invalid Input:"
-						    " addr2 <0x%x>",
-						    addr2));
-					return (NPI_FAILURE |
-					NPI_MAC_PORT_ATTR_INVALID(portn));
-				}
-				XMAC_REG_WR(handle, portn,
-						XMAC_ADDR_FILT0_REG, addr0);
-				XMAC_REG_WR(handle, portn,
-						XMAC_ADDR_FILT1_REG, addr1);
-				XMAC_REG_WR(handle, portn,
-						XMAC_ADDR_FILT2_REG, addr2);
-			} else {
-				XMAC_REG_RD(handle, portn,
-						XMAC_ADDR_FILT0_REG, &addr0);
-				XMAC_REG_RD(handle, portn,
-						XMAC_ADDR_FILT1_REG, &addr1);
-				XMAC_REG_RD(handle, portn,
-						XMAC_ADDR_FILT2_REG, &addr2);
-				attrp->odata[0] = addr0 & MAC_ADDR_REG_MASK;
-				attrp->odata[1] = addr1 & MAC_ADDR_REG_MASK;
-				attrp->odata[2] = addr2 & MAC_ADDR_REG_MASK;
-			}
-			break;
-		case BMAC_PORT_0:
-		case BMAC_PORT_1:
-			if (op == OP_SET) {
-				addr0 = attrp->idata[0];
-				addr1 = attrp->idata[1];
-				addr2 = attrp->idata[2];
-				ASSERT((addr0 & ~0xFFFF) == 0);
-				if ((addr0 & ~0xFFFF) != 0) {
-					NPI_ERROR_MSG((handle.function,
-						    NPI_ERR_CTL,
-						    " npi_mac_port_attr"
-						    " MAC_PORT_ADDR_FILTER:"
-						    " addr0",
-						    addr0));
-					return (NPI_FAILURE |
-					NPI_MAC_PORT_ATTR_INVALID(portn));
-				}
-				ASSERT((addr1 & ~0xFFFF) == 0);
-				if ((addr1 & ~0xFFFF) != 0) {
-					NPI_ERROR_MSG((handle.function,
-						    NPI_ERR_CTL,
-						    " npi_mac_port_attr"
-						    " MAC_PORT_ADDR_FILTER:"
-						    " Invalid Input:"
-						    " addr1 <0x%x>",
-						    addr1));
-					return (NPI_FAILURE |
-					NPI_MAC_PORT_ATTR_INVALID(portn));
-				}
-				ASSERT((addr2 & ~0xFFFF) == 0);
-				if ((addr2 & ~0xFFFF) != 0) {
-					NPI_ERROR_MSG((handle.function,
-						    NPI_ERR_CTL,
-						    " npi_mac_port_attr"
-						    " MAC_PORT_ADDR_FILTER:"
-						    " Invalid Input:"
-						    " addr2 <0x%x>",
-						    addr2));
-					return (NPI_FAILURE |
-					NPI_MAC_PORT_ATTR_INVALID(portn));
-				}
-				BMAC_REG_WR(handle, portn, MAC_ADDR_FILT0_REG,
-						addr0);
-				BMAC_REG_WR(handle, portn, MAC_ADDR_FILT1_REG,
-						addr1);
-				BMAC_REG_WR(handle, portn, MAC_ADDR_FILT2_REG,
-						addr2);
-			} else {
-				BMAC_REG_RD(handle, portn, MAC_ADDR_FILT0_REG,
-						&addr0);
-				BMAC_REG_RD(handle, portn, MAC_ADDR_FILT1_REG,
-						&addr1);
-				BMAC_REG_RD(handle, portn, MAC_ADDR_FILT2_REG,
-						&addr2);
-				attrp->odata[0] = addr0 & MAC_ADDR_REG_MASK;
-				attrp->odata[1] = addr1 & MAC_ADDR_REG_MASK;
-				attrp->odata[2] = addr2 & MAC_ADDR_REG_MASK;
-			}
-			break;
-		default:
-			return (NPI_FAILURE | NPI_MAC_PORT_INVALID(portn));
-		}
-	}	break;
-
-	case MAC_PORT_ADDR_FILTER_MASK: {
-		uint32_t mask_1_2;
-		uint32_t mask_0;
-
-		switch (portn) {
-		case XMAC_PORT_0:
-		case XMAC_PORT_1:
-			if (op == OP_SET) {
-				mask_0 = attrp->idata[0];
-				mask_1_2 = attrp->idata[1];
-				ASSERT((mask_0 & ~0xFFFF) == 0);
-				if ((mask_0 & ~0xFFFF) != 0) {
-					NPI_ERROR_MSG((handle.function,
-						    NPI_ERR_CTL,
-						    " npi_mac_port_attr"
-						    " MAC_ADDR_FILTER_MASK:"
-						    " Invalid Input:"
-						    " mask_0 <0x%x>",
-						    mask_0));
-					return (NPI_FAILURE |
-					NPI_MAC_PORT_ATTR_INVALID(portn));
-				}
-				ASSERT((mask_1_2 & ~0xFF) == 0);
-				if ((mask_1_2 & ~0xFF) != 0) {
-					NPI_ERROR_MSG((handle.function,
-						    NPI_ERR_CTL,
-						    " npi_mac_port_attr"
-						    " MAC_ADDR_FILTER_MASK:"
-						    " Invalid Input:"
-						    " mask_1_2 <0x%x>",
-						    mask_1_2));
-					return (NPI_FAILURE |
-					NPI_MAC_PORT_ATTR_INVALID(portn));
-				}
-				XMAC_REG_WR(handle, portn,
-					XMAC_ADDR_FILT0_MASK_REG, mask_0);
-				XMAC_REG_WR(handle, portn,
-					XMAC_ADDR_FILT12_MASK_REG, mask_1_2);
-			} else {
-				XMAC_REG_RD(handle, portn,
-					XMAC_ADDR_FILT0_MASK_REG, &mask_0);
-				XMAC_REG_RD(handle, portn,
-					XMAC_ADDR_FILT12_MASK_REG, &mask_1_2);
-				attrp->odata[0] = mask_0 & 0xFFFF;
-				attrp->odata[1] = mask_1_2 & 0xFF;
-			}
-			break;
-		case BMAC_PORT_0:
-		case BMAC_PORT_1:
-			if (op == OP_SET) {
-				mask_0 = attrp->idata[0];
-				mask_1_2 = attrp->idata[1];
-				BMAC_REG_WR(handle, portn,
-					MAC_ADDR_FILT00_MASK_REG, mask_0);
-				BMAC_REG_WR(handle, portn,
-					MAC_ADDR_FILT12_MASK_REG, mask_1_2);
-			} else {
-				BMAC_REG_RD(handle, portn,
-					MAC_ADDR_FILT00_MASK_REG, &mask_0);
-				BMAC_REG_RD(handle, portn,
-					MAC_ADDR_FILT12_MASK_REG, &mask_1_2);
-				attrp->odata[0] = mask_0;
-				attrp->odata[1] = mask_1_2;
-			}
-			break;
-		default:
-			return (NPI_FAILURE | NPI_MAC_PORT_INVALID(portn));
-		}
-	}	break;
-
-	default:
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				    " npi_mac_port_attr"
-				    " Invalid Input:"
-				    " attr <0x%x>", attrp->type));
-		return (NPI_FAILURE | NPI_MAC_PORT_ATTR_INVALID(portn));
-	}
-
-	return (NPI_SUCCESS);
-}
-
-npi_status_t
-npi_xmac_reset(npi_handle_t handle, uint8_t portn, npi_mac_reset_t mode)
-{
-	uint64_t val;
-	boolean_t txmac = B_FALSE;
-
-	ASSERT(IS_XMAC_PORT_NUM_VALID(portn));
-
-	switch (mode) {
-	case XTX_MAC_REG_RESET:
-		XMAC_REG_WR(handle, portn, XTXMAC_SW_RST_REG, XTXMAC_REG_RST);
-		XMAC_WAIT_REG(handle, portn, XTXMAC_SW_RST_REG, val);
-		txmac = B_TRUE;
-		break;
-	case XRX_MAC_REG_RESET:
-		XMAC_REG_WR(handle, portn, XRXMAC_SW_RST_REG, XRXMAC_REG_RST);
-		XMAC_WAIT_REG(handle, portn, XRXMAC_SW_RST_REG, val);
-		break;
-	case XTX_MAC_LOGIC_RESET:
-		XMAC_REG_WR(handle, portn, XTXMAC_SW_RST_REG, XTXMAC_SOFT_RST);
-		XMAC_WAIT_REG(handle, portn, XTXMAC_SW_RST_REG, val);
-		txmac = B_TRUE;
-		break;
-	case XRX_MAC_LOGIC_RESET:
-		XMAC_REG_WR(handle, portn, XRXMAC_SW_RST_REG, XRXMAC_SOFT_RST);
-		XMAC_WAIT_REG(handle, portn, XRXMAC_SW_RST_REG, val);
-		break;
-	case XTX_MAC_RESET_ALL:
-		XMAC_REG_WR(handle, portn, XTXMAC_SW_RST_REG,
-					XTXMAC_SOFT_RST | XTXMAC_REG_RST);
-		XMAC_WAIT_REG(handle, portn, XTXMAC_SW_RST_REG, val);
-		txmac = B_TRUE;
-		break;
-	case XRX_MAC_RESET_ALL:
-		XMAC_REG_WR(handle, portn, XRXMAC_SW_RST_REG,
-					XRXMAC_SOFT_RST | XRXMAC_REG_RST);
-		XMAC_WAIT_REG(handle, portn, XRXMAC_SW_RST_REG, val);
-		break;
-	default:
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				    " npi_mac_reset"
-				    " Invalid Input: mode <0x%x>",
-				    mode));
-		return (NPI_FAILURE | NPI_MAC_RESET_MODE_INVALID(portn));
-	}
-
-	if (val != 0) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				    " npi_xmac_reset"
-				    " HW ERROR: MAC_RESET  failed <0x%x>",
-				    val));
-
-		if (txmac)
-			return (NPI_FAILURE | NPI_TXMAC_RESET_FAILED(portn));
-		else
-			return (NPI_FAILURE | NPI_RXMAC_RESET_FAILED(portn));
-	}
-
-	return (NPI_SUCCESS);
-}
-
-npi_status_t
-npi_xmac_xif_config(npi_handle_t handle, config_op_t op, uint8_t portn,
-			xmac_xif_config_t config)
-{
-	uint64_t val = 0;
-
-	ASSERT(IS_XMAC_PORT_NUM_VALID(portn));
-
-	switch (op) {
-	case ENABLE:
-	case DISABLE:
-		ASSERT((config != 0) && ((config & ~CFG_XMAC_XIF_ALL) == 0));
-		if ((config == 0) || (config & ~CFG_XMAC_XIF_ALL) != 0) {
-			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-					    " npi_xmac_xif_config"
-					    " Invalid Input:"
-					    " config <0x%x>", config));
-			return (NPI_FAILURE | NPI_MAC_OPCODE_INVALID(portn));
-		}
-		if (op == ENABLE) {
-			XMAC_REG_RD(handle, portn, XMAC_CONFIG_REG, &val);
-			if (config & CFG_XMAC_XIF_LED_FORCE)
-				val |= XMAC_XIF_FORCE_LED_ON;
-			if (config & CFG_XMAC_XIF_LED_POLARITY)
-				val |= XMAC_XIF_LED_POLARITY;
-			if (config & CFG_XMAC_XIF_SEL_POR_CLK_SRC)
-				val |= XMAC_XIF_SEL_POR_CLK_SRC;
-			if (config & CFG_XMAC_XIF_TX_OUTPUT)
-				val |= XMAC_XIF_TX_OUTPUT_EN;
-
-			if (config & CFG_XMAC_XIF_LOOPBACK) {
-				val &= ~XMAC_XIF_SEL_POR_CLK_SRC;
-				val |= XMAC_XIF_LOOPBACK;
-			}
-
-			if (config & CFG_XMAC_XIF_LFS)
-				val &= ~XMAC_XIF_LFS_DISABLE;
-			if (config & CFG_XMAC_XIF_XPCS_BYPASS)
-				val |= XMAC_XIF_XPCS_BYPASS;
-			if (config & CFG_XMAC_XIF_1G_PCS_BYPASS)
-				val |= XMAC_XIF_1G_PCS_BYPASS;
-			if (config & CFG_XMAC_XIF_SEL_CLK_25MHZ)
-				val |= XMAC_XIF_SEL_CLK_25MHZ;
-			XMAC_REG_WR(handle, portn, XMAC_CONFIG_REG, val);
-
-		} else {
-			XMAC_REG_RD(handle, portn, XMAC_CONFIG_REG, &val);
-			if (config & CFG_XMAC_XIF_LED_FORCE)
-				val &= ~XMAC_XIF_FORCE_LED_ON;
-			if (config & CFG_XMAC_XIF_LED_POLARITY)
-				val &= ~XMAC_XIF_LED_POLARITY;
-			if (config & CFG_XMAC_XIF_SEL_POR_CLK_SRC)
-				val &= ~XMAC_XIF_SEL_POR_CLK_SRC;
-			if (config & CFG_XMAC_XIF_TX_OUTPUT)
-				val &= ~XMAC_XIF_TX_OUTPUT_EN;
-			if (config & CFG_XMAC_XIF_LOOPBACK)
-				val &= ~XMAC_XIF_LOOPBACK;
-			if (config & CFG_XMAC_XIF_LFS)
-				val |= XMAC_XIF_LFS_DISABLE;
-			if (config & CFG_XMAC_XIF_XPCS_BYPASS)
-				val &= ~XMAC_XIF_XPCS_BYPASS;
-			if (config & CFG_XMAC_XIF_1G_PCS_BYPASS)
-				val &= ~XMAC_XIF_1G_PCS_BYPASS;
-			if (config & CFG_XMAC_XIF_SEL_CLK_25MHZ)
-				val &= ~XMAC_XIF_SEL_CLK_25MHZ;
-			XMAC_REG_WR(handle, portn, XMAC_CONFIG_REG, val);
-		}
-		break;
-	case INIT:
-		ASSERT((config & ~CFG_XMAC_XIF_ALL) == 0);
-		if ((config & ~CFG_XMAC_XIF_ALL) != 0) {
-			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-					    " npi_xmac_xif_config"
-					    " Invalid Input: config <0x%x>",
-					    config));
-			return (NPI_FAILURE | NPI_MAC_CONFIG_INVALID(portn));
-		}
-		XMAC_REG_RD(handle, portn, XMAC_CONFIG_REG, &val);
-
-		if (config & CFG_XMAC_XIF_LED_FORCE)
-			val |= XMAC_XIF_FORCE_LED_ON;
-		else
-			val &= ~XMAC_XIF_FORCE_LED_ON;
-		if (config & CFG_XMAC_XIF_LED_POLARITY)
-			val |= XMAC_XIF_LED_POLARITY;
-		else
-			val &= ~XMAC_XIF_LED_POLARITY;
-		if (config & CFG_XMAC_XIF_SEL_POR_CLK_SRC)
-			val |= XMAC_XIF_SEL_POR_CLK_SRC;
-		else
-			val &= ~XMAC_XIF_SEL_POR_CLK_SRC;
-		if (config & CFG_XMAC_XIF_TX_OUTPUT)
-			val |= XMAC_XIF_TX_OUTPUT_EN;
-		else
-			val &= ~XMAC_XIF_TX_OUTPUT_EN;
-
-		if (config & CFG_XMAC_XIF_LOOPBACK) {
-			val &= ~XMAC_XIF_SEL_POR_CLK_SRC;
-			val |= XMAC_XIF_LOOPBACK;
-#ifdef	AXIS_DEBUG_LB
-			val |= XMAC_RX_MAC2IPP_PKT_CNT_EN;
-#endif
-		} else {
-			val &= ~XMAC_XIF_LOOPBACK;
-		}
-
-		if (config & CFG_XMAC_XIF_LFS)
-			val &= ~XMAC_XIF_LFS_DISABLE;
-		else
-			val |= XMAC_XIF_LFS_DISABLE;
-		if (config & CFG_XMAC_XIF_XPCS_BYPASS)
-			val |= XMAC_XIF_XPCS_BYPASS;
-		else
-			val &= ~XMAC_XIF_XPCS_BYPASS;
-		if (config & CFG_XMAC_XIF_1G_PCS_BYPASS)
-			val |= XMAC_XIF_1G_PCS_BYPASS;
-		else
-			val &= ~XMAC_XIF_1G_PCS_BYPASS;
-		if (config & CFG_XMAC_XIF_SEL_CLK_25MHZ)
-			val |= XMAC_XIF_SEL_CLK_25MHZ;
-		else
-			val &= ~XMAC_XIF_SEL_CLK_25MHZ;
-		XMAC_REG_WR(handle, portn, XMAC_CONFIG_REG, val);
-
-		break;
-	default:
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				    " npi_xmac_xif_config"
-				    " Invalid Input: op <0x%x>", op));
-		return (NPI_FAILURE | NPI_MAC_OPCODE_INVALID(portn));
-	}
-
-	return (NPI_SUCCESS);
-}
-
-npi_status_t
-npi_xmac_tx_config(npi_handle_t handle, config_op_t op, uint8_t portn,
-			xmac_tx_config_t config)
-{
-	uint64_t val = 0;
-
-	ASSERT(IS_XMAC_PORT_NUM_VALID(portn));
-
-	switch (op) {
-	case ENABLE:
-	case DISABLE:
-		ASSERT((config != 0) && ((config & ~CFG_XMAC_TX_ALL) == 0));
-		if ((config == 0) || (config & ~CFG_XMAC_TX_ALL) != 0) {
-			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				    " npi_xmac_tx_config"
-				    " Invalid Input: config <0x%x>",
-				    config));
-			return (NPI_FAILURE | NPI_MAC_OPCODE_INVALID(portn));
-		}
-		if (op == ENABLE) {
-			XMAC_REG_RD(handle, portn, XMAC_CONFIG_REG, &val);
-			if (config & CFG_XMAC_TX)
-				val |= XMAC_TX_CFG_TX_ENABLE;
-			if (config & CFG_XMAC_TX_STRETCH_MODE)
-				val |= XMAC_TX_CFG_STRETCH_MD;
-			if (config & CFG_XMAC_VAR_IPG)
-				val |= XMAC_TX_CFG_VAR_MIN_IPG_EN;
-			if (config & CFG_XMAC_TX_CRC)
-				val &= ~XMAC_TX_CFG_ALWAYS_NO_CRC;
-			XMAC_REG_WR(handle, portn, XMAC_CONFIG_REG, val);
-		} else {
-			XMAC_REG_RD(handle, portn, XMAC_CONFIG_REG, &val);
-			if (config & CFG_XMAC_TX)
-				val &= ~XMAC_TX_CFG_TX_ENABLE;
-			if (config & CFG_XMAC_TX_STRETCH_MODE)
-				val &= ~XMAC_TX_CFG_STRETCH_MD;
-			if (config & CFG_XMAC_VAR_IPG)
-				val &= ~XMAC_TX_CFG_VAR_MIN_IPG_EN;
-			if (config & CFG_XMAC_TX_CRC)
-				val |= XMAC_TX_CFG_ALWAYS_NO_CRC;
-			XMAC_REG_WR(handle, portn, XMAC_CONFIG_REG, val);
-		}
-		break;
-	case INIT:
-		ASSERT((config & ~CFG_XMAC_TX_ALL) == 0);
-		if ((config & ~CFG_XMAC_TX_ALL) != 0) {
-			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-					    " npi_xmac_tx_config"
-					    " Invalid Input: config <0x%x>",
-					    config));
-			return (NPI_FAILURE | NPI_MAC_CONFIG_INVALID(portn));
-		}
-		XMAC_REG_RD(handle, portn, XMAC_CONFIG_REG, &val);
-		if (config & CFG_XMAC_TX)
-			val |= XMAC_TX_CFG_TX_ENABLE;
-		else
-			val &= ~XMAC_TX_CFG_TX_ENABLE;
-		if (config & CFG_XMAC_TX_STRETCH_MODE)
-			val |= XMAC_TX_CFG_STRETCH_MD;
-		else
-			val &= ~XMAC_TX_CFG_STRETCH_MD;
-		if (config & CFG_XMAC_VAR_IPG)
-			val |= XMAC_TX_CFG_VAR_MIN_IPG_EN;
-		else
-			val &= ~XMAC_TX_CFG_VAR_MIN_IPG_EN;
-		if (config & CFG_XMAC_TX_CRC)
-			val &= ~XMAC_TX_CFG_ALWAYS_NO_CRC;
-		else
-			val |= XMAC_TX_CFG_ALWAYS_NO_CRC;
-
-		XMAC_REG_WR(handle, portn, XMAC_CONFIG_REG, val);
-		break;
-	default:
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				    " npi_xmac_tx_config"
-				    " Invalid Input: op <0x%x>",
-				    op));
-		return (NPI_FAILURE | NPI_MAC_OPCODE_INVALID(portn));
-	}
-
-	return (NPI_SUCCESS);
-}
-
-npi_status_t
-npi_xmac_rx_config(npi_handle_t handle, config_op_t op, uint8_t portn,
-			xmac_rx_config_t config)
-{
-	uint64_t val = 0;
-
-	ASSERT(IS_XMAC_PORT_NUM_VALID(portn));
-
-	switch (op) {
-	case ENABLE:
-	case DISABLE:
-		ASSERT((config != 0) && ((config & ~CFG_XMAC_RX_ALL) == 0));
-		if ((config == 0) || (config & ~CFG_XMAC_RX_ALL) != 0) {
-			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-					    " npi_xmac_rx_config"
-					    " Invalid Input: config <0x%x>",
-					    config));
-			return (NPI_FAILURE | NPI_MAC_CONFIG_INVALID(portn));
-		}
-		if (op == ENABLE) {
-			XMAC_REG_RD(handle, portn, XMAC_CONFIG_REG, &val);
-			if (config & CFG_XMAC_RX)
-				val |= XMAC_RX_CFG_RX_ENABLE;
-			if (config & CFG_XMAC_RX_PROMISCUOUS)
-				val |= XMAC_RX_CFG_PROMISC;
-			if (config & CFG_XMAC_RX_PROMISCUOUSGROUP)
-				val |= XMAC_RX_CFG_PROMISC_GROUP;
-			if (config & CFG_XMAC_RX_ERRCHK)
-				val &= ~XMAC_RX_CFG_ERR_CHK_DISABLE;
-			if (config & CFG_XMAC_RX_CRC_CHK)
-				val &= ~XMAC_RX_CFG_CRC_CHK_DISABLE;
-			if (config & CFG_XMAC_RX_RESV_MULTICAST)
-				val |= XMAC_RX_CFG_RESERVED_MCAST;
-			if (config & CFG_XMAC_RX_CODE_VIO_CHK)
-				val &= ~XMAC_RX_CFG_CD_VIO_CHK;
-			if (config & CFG_XMAC_RX_HASH_FILTER)
-				val |= XMAC_RX_CFG_HASH_FILTER_EN;
-			if (config & CFG_XMAC_RX_ADDR_FILTER)
-				val |= XMAC_RX_CFG_ADDR_FILTER_EN;
-			if (config & CFG_XMAC_RX_STRIP_CRC)
-				val |= XMAC_RX_CFG_STRIP_CRC;
-			if (config & CFG_XMAC_RX_PAUSE)
-				val |= XMAC_RX_CFG_RX_PAUSE_EN;
-			if (config & CFG_XMAC_RX_PASS_FC_FRAME)
-				val |= XMAC_RX_CFG_PASS_FLOW_CTRL;
-			XMAC_REG_WR(handle, portn, XMAC_CONFIG_REG, val);
-		} else {
-			XMAC_REG_RD(handle, portn, XMAC_CONFIG_REG, &val);
-			if (config & CFG_XMAC_RX)
-				val &= ~XMAC_RX_CFG_RX_ENABLE;
-			if (config & CFG_XMAC_RX_PROMISCUOUS)
-				val &= ~XMAC_RX_CFG_PROMISC;
-			if (config & CFG_XMAC_RX_PROMISCUOUSGROUP)
-				val &= ~XMAC_RX_CFG_PROMISC_GROUP;
-			if (config & CFG_XMAC_RX_ERRCHK)
-				val |= XMAC_RX_CFG_ERR_CHK_DISABLE;
-			if (config & CFG_XMAC_RX_CRC_CHK)
-				val |= XMAC_RX_CFG_CRC_CHK_DISABLE;
-			if (config & CFG_XMAC_RX_RESV_MULTICAST)
-				val &= ~XMAC_RX_CFG_RESERVED_MCAST;
-			if (config & CFG_XMAC_RX_CODE_VIO_CHK)
-				val |= XMAC_RX_CFG_CD_VIO_CHK;
-			if (config & CFG_XMAC_RX_HASH_FILTER)
-				val &= ~XMAC_RX_CFG_HASH_FILTER_EN;
-			if (config & CFG_XMAC_RX_ADDR_FILTER)
-				val &= ~XMAC_RX_CFG_ADDR_FILTER_EN;
-			if (config & CFG_XMAC_RX_STRIP_CRC)
-				val &= ~XMAC_RX_CFG_STRIP_CRC;
-			if (config & CFG_XMAC_RX_PAUSE)
-				val &= ~XMAC_RX_CFG_RX_PAUSE_EN;
-			if (config & CFG_XMAC_RX_PASS_FC_FRAME)
-				val &= ~XMAC_RX_CFG_PASS_FLOW_CTRL;
-			XMAC_REG_WR(handle, portn, XMAC_CONFIG_REG, val);
-		}
-		break;
-	case INIT:
-		ASSERT((config & ~CFG_XMAC_RX_ALL) == 0);
-		if ((config & ~CFG_XMAC_RX_ALL) != 0) {
-			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-					    " npi_xmac_rx_config"
-					    " Invalid Input: config <0x%x>",
-					    config));
-			return (NPI_FAILURE | NPI_MAC_CONFIG_INVALID(portn));
-		}
-		XMAC_REG_RD(handle, portn, XMAC_CONFIG_REG, &val);
-		if (config & CFG_XMAC_RX)
-			val |= XMAC_RX_CFG_RX_ENABLE;
-		else
-			val &= ~XMAC_RX_CFG_RX_ENABLE;
-		if (config & CFG_XMAC_RX_PROMISCUOUS)
-			val |= XMAC_RX_CFG_PROMISC;
-		else
-			val &= ~XMAC_RX_CFG_PROMISC;
-		if (config & CFG_XMAC_RX_PROMISCUOUSGROUP)
-			val |= XMAC_RX_CFG_PROMISC_GROUP;
-		else
-			val &= ~XMAC_RX_CFG_PROMISC_GROUP;
-		if (config & CFG_XMAC_RX_ERRCHK)
-			val &= ~XMAC_RX_CFG_ERR_CHK_DISABLE;
-		else
-			val |= XMAC_RX_CFG_ERR_CHK_DISABLE;
-		if (config & CFG_XMAC_RX_CRC_CHK)
-			val &= ~XMAC_RX_CFG_CRC_CHK_DISABLE;
-		else
-			val |= XMAC_RX_CFG_CRC_CHK_DISABLE;
-		if (config & CFG_XMAC_RX_RESV_MULTICAST)
-			val |= XMAC_RX_CFG_RESERVED_MCAST;
-		else
-			val &= ~XMAC_RX_CFG_RESERVED_MCAST;
-		if (config & CFG_XMAC_RX_CODE_VIO_CHK)
-			val &= ~XMAC_RX_CFG_CD_VIO_CHK;
-		else
-			val |= XMAC_RX_CFG_CD_VIO_CHK;
-		if (config & CFG_XMAC_RX_HASH_FILTER)
-			val |= XMAC_RX_CFG_HASH_FILTER_EN;
-		else
-			val &= ~XMAC_RX_CFG_HASH_FILTER_EN;
-		if (config & CFG_XMAC_RX_ADDR_FILTER)
-			val |= XMAC_RX_CFG_ADDR_FILTER_EN;
-		else
-			val &= ~XMAC_RX_CFG_ADDR_FILTER_EN;
-		if (config & CFG_XMAC_RX_PAUSE)
-			val |= XMAC_RX_CFG_RX_PAUSE_EN;
-		else
-			val &= ~XMAC_RX_CFG_RX_PAUSE_EN;
-		if (config & CFG_XMAC_RX_STRIP_CRC)
-			val |= XMAC_RX_CFG_STRIP_CRC;
-		else
-			val &= ~XMAC_RX_CFG_STRIP_CRC;
-		if (config & CFG_XMAC_RX_PASS_FC_FRAME)
-			val |= XMAC_RX_CFG_PASS_FLOW_CTRL;
-		else
-			val &= ~XMAC_RX_CFG_PASS_FLOW_CTRL;
-
-		XMAC_REG_WR(handle, portn, XMAC_CONFIG_REG, val);
-		break;
-	default:
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-					    " npi_xmac_rx_config"
-					    " Invalid Input: op <0x%x>", op));
-		return (NPI_FAILURE | NPI_MAC_OPCODE_INVALID(portn));
-	}
-
-	return (NPI_SUCCESS);
-}
-
-npi_status_t
-npi_xmac_tx_iconfig(npi_handle_t handle, config_op_t op, uint8_t portn,
-		    xmac_tx_iconfig_t iconfig)
-{
-	uint64_t val = 0;
-
-	ASSERT(IS_XMAC_PORT_NUM_VALID(portn));
-
-	switch (op) {
-	case ENABLE:
-	case DISABLE:
-		ASSERT((iconfig != 0) && ((iconfig & ~ICFG_XMAC_TX_ALL) == 0));
-		if ((iconfig == 0) || (iconfig & ~ICFG_XMAC_TX_ALL) != 0) {
-			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				    " npi_xmac_tx_iconfig"
-				    " Invalid Input: iconfig <0x%x>",
-				    iconfig));
-			return (NPI_FAILURE | NPI_MAC_CONFIG_INVALID(portn));
-		}
-		XMAC_REG_RD(handle, portn, XTXMAC_STAT_MSK_REG, &val);
-		if (op == ENABLE)
-			val &= ~iconfig;
-		else
-			val |= iconfig;
-		XMAC_REG_WR(handle, portn, XTXMAC_STAT_MSK_REG, val);
-
-		break;
-	case INIT:
-		ASSERT((iconfig & ~ICFG_XMAC_TX_ALL) == 0);
-		if ((iconfig & ~ICFG_XMAC_TX_ALL) != 0) {
-			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				    " npi_xmac_tx_iconfig"
-				    " Invalid Input: iconfig <0x%x>",
-				    iconfig));
-			return (NPI_FAILURE | NPI_MAC_CONFIG_INVALID(portn));
-		}
-		XMAC_REG_WR(handle, portn, XTXMAC_STAT_MSK_REG, ~iconfig);
-
-		break;
-	default:
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				    " npi_xmac_tx_iconfig"
-				    " Invalid Input: iconfig <0x%x>",
-				    iconfig));
-		return (NPI_FAILURE | NPI_MAC_OPCODE_INVALID(portn));
-	}
-
-	return (NPI_SUCCESS);
-}
-
-npi_status_t
-npi_xmac_rx_iconfig(npi_handle_t handle, config_op_t op, uint8_t portn,
-		    xmac_rx_iconfig_t iconfig)
-{
-	uint64_t val = 0;
-
-	ASSERT(IS_XMAC_PORT_NUM_VALID(portn));
-
-	switch (op) {
-	case ENABLE:
-	case DISABLE:
-		ASSERT((iconfig != 0) && ((iconfig & ~ICFG_XMAC_RX_ALL) == 0));
-		if ((iconfig == 0) || (iconfig & ~ICFG_XMAC_RX_ALL) != 0) {
-			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-					    " npi_xmac_rx_iconfig"
-					    " Invalid Input: iconfig <0x%x>",
-					    iconfig));
-			return (NPI_FAILURE | NPI_MAC_CONFIG_INVALID(portn));
-		}
-		XMAC_REG_RD(handle, portn, XRXMAC_STAT_MSK_REG, &val);
-		if (op == ENABLE)
-			val &= ~iconfig;
-		else
-			val |= iconfig;
-		XMAC_REG_WR(handle, portn, XRXMAC_STAT_MSK_REG, val);
-
-		break;
-	case INIT:
-		ASSERT((iconfig & ~ICFG_XMAC_RX_ALL) == 0);
-		if ((iconfig & ~ICFG_XMAC_RX_ALL) != 0) {
-			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-					    " npi_xmac_rx_iconfig"
-					    " Invalid Input: iconfig <0x%x>",
-					    iconfig));
-			return (NPI_FAILURE | NPI_MAC_CONFIG_INVALID(portn));
-		}
-		XMAC_REG_WR(handle, portn, XRXMAC_STAT_MSK_REG, ~iconfig);
-
-		break;
-	default:
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				    " npi_xmac_rx_iconfig"
-				    " Invalid Input: iconfig <0x%x>",
-				    iconfig));
-		return (NPI_FAILURE | NPI_MAC_OPCODE_INVALID(portn));
-	}
-
-	return (NPI_SUCCESS);
-}
-
-npi_status_t
-npi_xmac_ctl_iconfig(npi_handle_t handle, config_op_t op, uint8_t portn,
-			xmac_ctl_iconfig_t iconfig)
-{
-	uint64_t val = 0;
-
-	ASSERT(IS_XMAC_PORT_NUM_VALID(portn));
-
-	switch (op) {
-	case ENABLE:
-	case DISABLE:
-		ASSERT((iconfig != 0) &&	\
-			((iconfig & ~ICFG_XMAC_CTRL_ALL) == 0));
-		if ((iconfig == 0) || (iconfig & ~ICFG_XMAC_CTRL_ALL) != 0) {
-			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-					    " npi_xmac_ctl_iconfig"
-					    " Invalid Input: iconfig <0x%x>",
-					    iconfig));
-			return (NPI_FAILURE | NPI_MAC_CONFIG_INVALID(portn));
-		}
-		XMAC_REG_RD(handle, portn, XMAC_C_S_MSK_REG, &val);
-		if (op == ENABLE)
-			val &= ~iconfig;
-		else
-			val |= iconfig;
-		XMAC_REG_WR(handle, portn, XMAC_C_S_MSK_REG, val);
-
-		break;
-	case INIT:
-		ASSERT((iconfig & ~ICFG_XMAC_CTRL_ALL) == 0);
-		if ((iconfig & ~ICFG_XMAC_CTRL_ALL) != 0) {
-			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-					    " npi_xmac_ctl_iconfig"
-					    " Invalid Input: iconfig <0x%x>",
-					    iconfig));
-			return (NPI_FAILURE | NPI_MAC_CONFIG_INVALID(portn));
-		}
-		XMAC_REG_WR(handle, portn, XMAC_C_S_MSK_REG, ~iconfig);
-
-		break;
-	default:
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-					    " npi_xmac_ctl_iconfig"
-					    " Invalid Input: iconfig <0x%x>",
-					    iconfig));
-		return (NPI_FAILURE | NPI_MAC_OPCODE_INVALID(portn));
-	}
-
-	return (NPI_SUCCESS);
-}
-
-npi_status_t
-npi_xmac_tx_get_istatus(npi_handle_t handle, uint8_t portn,
-			xmac_tx_iconfig_t *istatus)
-{
-	uint64_t val;
-
-	ASSERT(IS_XMAC_PORT_NUM_VALID(portn));
-
-	XMAC_REG_RD(handle, portn, XTXMAC_STATUS_REG, &val);
-	*istatus = (uint32_t)val;
-
-	return (NPI_SUCCESS);
-}
-
-npi_status_t
-npi_xmac_rx_get_istatus(npi_handle_t handle, uint8_t portn,
-			xmac_rx_iconfig_t *istatus)
-{
-	uint64_t val;
-
-	ASSERT(IS_XMAC_PORT_NUM_VALID(portn));
-
-	XMAC_REG_RD(handle, portn, XRXMAC_STATUS_REG, &val);
-	*istatus = (uint32_t)val;
-
-	return (NPI_SUCCESS);
-}
-
-npi_status_t
-npi_xmac_ctl_get_istatus(npi_handle_t handle, uint8_t portn,
-			xmac_ctl_iconfig_t *istatus)
-{
-	uint64_t val;
-
-	ASSERT(IS_XMAC_PORT_NUM_VALID(portn));
-
-	XMAC_REG_RD(handle, portn, XMAC_CTRL_STAT_REG, &val);
-	*istatus = (uint32_t)val;
-
-	return (NPI_SUCCESS);
-}
-
-npi_status_t
-npi_xmac_xpcs_reset(npi_handle_t handle, uint8_t portn)
-{
-	uint64_t val;
-
-	ASSERT(IS_XMAC_PORT_NUM_VALID(portn));
-
-	XPCS_REG_RD(handle, portn, XPCS_CTRL_1_REG, &val);
-	val |= XPCS_CTRL1_RST;
-	XPCS_REG_WR(handle, portn, XPCS_CTRL_1_REG, val);
-
-	return (NPI_SUCCESS);
-}
-
-npi_status_t
-npi_xmac_xpcs_enable(npi_handle_t handle, uint8_t portn)
-{
-	uint64_t val;
-
-	ASSERT(IS_XMAC_PORT_NUM_VALID(portn));
-
-	XPCS_REG_RD(handle, portn, XPCS_CFG_VENDOR_1_REG, &val);
-	val |= XPCS_CFG_XPCS_ENABLE;
-	XPCS_REG_WR(handle, portn, XPCS_CFG_VENDOR_1_REG, val);
-
-	return (NPI_SUCCESS);
-}
-
-npi_status_t
-npi_xmac_xpcs_disable(npi_handle_t handle, uint8_t portn)
-{
-	uint64_t val;
-
-	ASSERT(IS_XMAC_PORT_NUM_VALID(portn));
-
-	XPCS_REG_RD(handle, portn, XPCS_CFG_VENDOR_1_REG, &val);
-	val &= ~XPCS_CFG_XPCS_ENABLE;
-	XPCS_REG_WR(handle, portn, XPCS_CFG_VENDOR_1_REG, val);
-
-	return (NPI_SUCCESS);
-}
-
-npi_status_t
-npi_xmac_xpcs_read(npi_handle_t handle, uint8_t portn, uint8_t xpcs_reg,
-			uint32_t *value)
-{
-	uint32_t reg;
-	uint64_t val;
-
-	ASSERT(IS_XMAC_PORT_NUM_VALID(portn));
-
-	switch (xpcs_reg) {
-	case XPCS_REG_CONTROL1:
-		reg = XPCS_CTRL_1_REG;
-		break;
-	case XPCS_REG_STATUS1:
-		reg = XPCS_STATUS_1_REG;
-		break;
-	case XPCS_REG_DEVICE_ID:
-		reg = XPCS_DEV_ID_REG;
-		break;
-	case XPCS_REG_SPEED_ABILITY:
-		reg = XPCS_SPEED_ABILITY_REG;
-		break;
-	case XPCS_REG_DEVICE_IN_PKG:
-		reg = XPCS_DEV_IN_PKG_REG;
-		break;
-	case XPCS_REG_CONTROL2:
-		reg = XPCS_CTRL_2_REG;
-		break;
-	case XPCS_REG_STATUS2:
-		reg = XPCS_STATUS_2_REG;
-		break;
-	case XPCS_REG_PKG_ID:
-		reg = XPCS_PKG_ID_REG;
-		break;
-	case XPCS_REG_STATUS:
-		reg = XPCS_STATUS_REG;
-		break;
-	case XPCS_REG_TEST_CONTROL:
-		reg = XPCS_TEST_CTRL_REG;
-		break;
-	case XPCS_REG_CONFIG_VENDOR1:
-		reg = XPCS_CFG_VENDOR_1_REG;
-		break;
-	case XPCS_REG_DIAG_VENDOR2:
-		reg = XPCS_DIAG_VENDOR_2_REG;
-		break;
-	case XPCS_REG_MASK1:
-		reg = XPCS_MASK_1_REG;
-		break;
-	case XPCS_REG_PACKET_COUNTER:
-		reg = XPCS_PKT_CNTR_REG;
-		break;
-	case XPCS_REG_TX_STATEMACHINE:
-		reg = XPCS_TX_STATE_MC_REG;
-		break;
-	case XPCS_REG_DESCWERR_COUNTER:
-		reg = XPCS_DESKEW_ERR_CNTR_REG;
-		break;
-	case XPCS_REG_SYMBOL_ERR_L0_1_COUNTER:
-		reg = XPCS_SYM_ERR_CNTR_L0_L1_REG;
-		break;
-	case XPCS_REG_SYMBOL_ERR_L2_3_COUNTER:
-		reg = XPCS_SYM_ERR_CNTR_L2_L3_REG;
-		break;
-	case XPCS_REG_TRAINING_VECTOR:
-		reg = XPCS_TRAINING_VECTOR_REG;
-		break;
-	default:
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				    " npi_xmac_xpcs_read"
-				    " Invalid Input: xpcs_reg <0x%x>",
-				    xpcs_reg));
-		return (NPI_FAILURE | NPI_MAC_REG_INVALID(portn));
-	}
-	XPCS_REG_RD(handle, portn, reg, &val);
-	*value = val & 0xFFFFFFFF;
-
-	return (NPI_SUCCESS);
-}
-
-npi_status_t
-npi_xmac_xpcs_write(npi_handle_t handle, uint8_t portn, uint8_t xpcs_reg,
-			uint32_t value)
-{
-	uint32_t reg;
-	uint64_t val;
-
-	ASSERT(IS_XMAC_PORT_NUM_VALID(portn));
-
-	switch (xpcs_reg) {
-	case XPCS_REG_CONTROL1:
-		reg = XPCS_CTRL_1_REG;
-		break;
-	case XPCS_REG_TEST_CONTROL:
-		reg = XPCS_TEST_CTRL_REG;
-		break;
-	case XPCS_REG_CONFIG_VENDOR1:
-		reg = XPCS_CFG_VENDOR_1_REG;
-		break;
-	case XPCS_REG_DIAG_VENDOR2:
-		reg = XPCS_DIAG_VENDOR_2_REG;
-		break;
-	case XPCS_REG_MASK1:
-		reg = XPCS_MASK_1_REG;
-		break;
-	case XPCS_REG_PACKET_COUNTER:
-		reg = XPCS_PKT_CNTR_REG;
-		break;
-	case XPCS_REG_DESCWERR_COUNTER:
-		reg = XPCS_DESKEW_ERR_CNTR_REG;
-		break;
-	case XPCS_REG_TRAINING_VECTOR:
-		reg = XPCS_TRAINING_VECTOR_REG;
-		break;
-	default:
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				    " npi_xmac_xpcs_write"
-				    " Invalid Input: xpcs_reg <0x%x>",
-				    xpcs_reg));
-		return (NPI_FAILURE | NPI_MAC_PCS_REG_INVALID(portn));
-	}
-	val = value;
-
-	XPCS_REG_WR(handle, portn, reg, val);
-
-	return (NPI_SUCCESS);
-}
-
-npi_status_t
-npi_bmac_reset(npi_handle_t handle, uint8_t portn, npi_mac_reset_t mode)
-{
-	uint64_t val = 0;
-	boolean_t txmac = B_FALSE;
-
-	ASSERT(IS_BMAC_PORT_NUM_VALID(portn));
-
-	switch (mode) {
-	case TX_MAC_RESET:
-		BMAC_REG_WR(handle, portn, BTXMAC_SW_RST_REG, 0x1);
-		BMAC_WAIT_REG(handle, portn, BTXMAC_SW_RST_REG, val);
-		txmac = B_TRUE;
-		break;
-	case RX_MAC_RESET:
-		BMAC_REG_WR(handle, portn, BRXMAC_SW_RST_REG, 0x1);
-		BMAC_WAIT_REG(handle, portn, BRXMAC_SW_RST_REG, val);
-		break;
-	default:
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				    " npi_bmac_reset"
-				    " Invalid Input: mode <0x%x>",
-				    mode));
-		return (NPI_FAILURE | NPI_MAC_RESET_MODE_INVALID(portn));
-	}
-
-	if (val != 0) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				    " npi_bmac_reset"
-				    " BMAC_RESET HW Error: ret <0x%x>",
-				    val));
-		if (txmac)
-			return (NPI_FAILURE | NPI_TXMAC_RESET_FAILED(portn));
-		else
-			return (NPI_FAILURE | NPI_RXMAC_RESET_FAILED(portn));
-	}
-
-	return (NPI_SUCCESS);
-}
-
-npi_status_t
-npi_mac_pcs_reset(npi_handle_t handle, uint8_t portn)
-{
-	/* what to do here ? */
-	uint64_t val = 0;
-
-	ASSERT(IS_PORT_NUM_VALID(portn));
-
-	PCS_REG_RD(handle, portn, PCS_MII_CTRL_REG, &val);
-	val |= PCS_MII_RESET;
-	PCS_REG_WR(handle, portn, PCS_MII_CTRL_REG, val);
-
-	return (NPI_SUCCESS);
-}
-
-npi_status_t
-npi_mac_get_link_status(npi_handle_t handle, uint8_t portn,
-			boolean_t *link_up)
-{
-	uint64_t val;
-
-	ASSERT(IS_PORT_NUM_VALID(portn));
-
-	PCS_REG_RD(handle, portn, PCS_MII_STATUS_REG, &val);
-
-	if (val & PCS_MII_STATUS_LINK_STATUS) {
-		*link_up = B_TRUE;
-	} else {
-		*link_up = B_FALSE;
-	}
-
-	return (NPI_SUCCESS);
-}
-
-npi_status_t
-npi_bmac_tx_config(npi_handle_t handle, config_op_t op, uint8_t portn,
-			bmac_tx_config_t config)
-{
-	uint64_t val = 0;
-
-	ASSERT(IS_BMAC_PORT_NUM_VALID(portn));
-
-	switch (op) {
-	case ENABLE:
-	case DISABLE:
-		ASSERT((config != 0) && ((config & ~CFG_BMAC_TX_ALL) == 0));
-		if ((config == 0) || (config & ~CFG_BMAC_TX_ALL) != 0) {
-			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-					    " npi_bmac_tx_config"
-					    " Invalid Input: config <0x%x>",
-					    config));
-			return (NPI_FAILURE | NPI_MAC_CONFIG_INVALID(portn));
-		}
-		if (op == ENABLE) {
-			BMAC_REG_RD(handle, portn, TXMAC_CONFIG_REG, &val);
-			if (config & CFG_BMAC_TX)
-				val |= MAC_TX_CFG_TXMAC_ENABLE;
-			if (config & CFG_BMAC_TX_CRC)
-				val &= ~MAC_TX_CFG_NO_FCS;
-			BMAC_REG_WR(handle, portn, TXMAC_CONFIG_REG, val);
-		} else {
-			BMAC_REG_RD(handle, portn, TXMAC_CONFIG_REG, &val);
-			if (config & CFG_BMAC_TX)
-				val &= ~MAC_TX_CFG_TXMAC_ENABLE;
-			if (config & CFG_BMAC_TX_CRC)
-				val |= MAC_TX_CFG_NO_FCS;
-			BMAC_REG_WR(handle, portn, TXMAC_CONFIG_REG, val);
-		}
-		break;
-	case INIT:
-		ASSERT((config & ~CFG_BMAC_TX_ALL) == 0);
-		if ((config & ~CFG_BMAC_TX_ALL) != 0) {
-			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-					    " npi_bmac_tx_config"
-					    " Invalid Input: config <0x%x>",
-					    config));
-			return (NPI_FAILURE | NPI_MAC_CONFIG_INVALID(portn));
-		}
-		BMAC_REG_RD(handle, portn, TXMAC_CONFIG_REG, &val);
-		if (config & CFG_BMAC_TX)
-			val |= MAC_TX_CFG_TXMAC_ENABLE;
-		else
-			val &= ~MAC_TX_CFG_TXMAC_ENABLE;
-		if (config & CFG_BMAC_TX_CRC)
-			val &= ~MAC_TX_CFG_NO_FCS;
-		else
-			val |= MAC_TX_CFG_NO_FCS;
-		BMAC_REG_WR(handle, portn, TXMAC_CONFIG_REG, val);
-		break;
-	default:
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				    " npi_bmac_tx_config"
-				    " Invalid Input: op <0x%x>",
-				    op));
-		return (NPI_FAILURE | NPI_MAC_OPCODE_INVALID(portn));
-	}
-
-	return (NPI_SUCCESS);
-}
-
-npi_status_t
-npi_bmac_rx_config(npi_handle_t handle, config_op_t op, uint8_t portn,
-			bmac_rx_config_t config)
-{
-	uint64_t val = 0;
-
-	ASSERT(IS_BMAC_PORT_NUM_VALID(portn));
-
-	switch (op) {
-	case ENABLE:
-	case DISABLE:
-		ASSERT((config != 0) && ((config & ~CFG_BMAC_RX_ALL) == 0));
-		if ((config == 0) || (config & ~CFG_BMAC_RX_ALL) != 0) {
-			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-					    " npi_bmac_rx_config"
-					    " Invalid Input: config <0x%x>",
-					    config));
-			return (NPI_FAILURE | NPI_MAC_CONFIG_INVALID(portn));
-		}
-		if (op == ENABLE) {
-			BMAC_REG_RD(handle, portn, RXMAC_CONFIG_REG, &val);
-			if (config & CFG_BMAC_RX)
-				val |= MAC_RX_CFG_RXMAC_ENABLE;
-			if (config & CFG_BMAC_RX_STRIP_PAD)
-				val |= MAC_RX_CFG_STRIP_PAD;
-			if (config & CFG_BMAC_RX_STRIP_CRC)
-				val |= MAC_RX_CFG_STRIP_FCS;
-			if (config & CFG_BMAC_RX_PROMISCUOUS)
-				val |= MAC_RX_CFG_PROMISC;
-			if (config & CFG_BMAC_RX_PROMISCUOUSGROUP)
-				val |= MAC_RX_CFG_PROMISC_GROUP;
-			if (config & CFG_BMAC_RX_HASH_FILTER)
-				val |= MAC_RX_CFG_HASH_FILTER_EN;
-			if (config & CFG_BMAC_RX_ADDR_FILTER)
-				val |= MAC_RX_CFG_ADDR_FILTER_EN;
-			if (config & CFG_BMAC_RX_DISCARD_ON_ERR)
-				val &= ~MAC_RX_CFG_DISABLE_DISCARD;
-			BMAC_REG_WR(handle, portn, RXMAC_CONFIG_REG, val);
-		} else {
-			BMAC_REG_RD(handle, portn, RXMAC_CONFIG_REG, &val);
-			if (config & CFG_BMAC_RX)
-				val &= ~MAC_RX_CFG_RXMAC_ENABLE;
-			if (config & CFG_BMAC_RX_STRIP_PAD)
-				val &= ~MAC_RX_CFG_STRIP_PAD;
-			if (config & CFG_BMAC_RX_STRIP_CRC)
-				val &= ~MAC_RX_CFG_STRIP_FCS;
-			if (config & CFG_BMAC_RX_PROMISCUOUS)
-				val &= ~MAC_RX_CFG_PROMISC;
-			if (config & CFG_BMAC_RX_PROMISCUOUSGROUP)
-				val &= ~MAC_RX_CFG_PROMISC_GROUP;
-			if (config & CFG_BMAC_RX_HASH_FILTER)
-				val &= ~MAC_RX_CFG_HASH_FILTER_EN;
-			if (config & CFG_BMAC_RX_ADDR_FILTER)
-				val &= ~MAC_RX_CFG_ADDR_FILTER_EN;
-			if (config & CFG_BMAC_RX_DISCARD_ON_ERR)
-				val |= MAC_RX_CFG_DISABLE_DISCARD;
-			BMAC_REG_WR(handle, portn, RXMAC_CONFIG_REG, val);
-		}
-		break;
-	case INIT:
-		ASSERT((config & ~CFG_BMAC_RX_ALL) == 0);
-		if ((config & ~CFG_BMAC_RX_ALL) != 0) {
-			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-					    " npi_bmac_rx_config"
-					    " Invalid Input: config <0x%x>",
-					    config));
-			return (NPI_FAILURE | NPI_MAC_CONFIG_INVALID(portn));
-		}
-		BMAC_REG_RD(handle, portn, RXMAC_CONFIG_REG, &val);
-		if (config & CFG_BMAC_RX)
-			val |= MAC_RX_CFG_RXMAC_ENABLE;
-		else
-			val &= ~MAC_RX_CFG_RXMAC_ENABLE;
-		if (config & CFG_BMAC_RX_STRIP_PAD)
-			val |= MAC_RX_CFG_STRIP_PAD;
-		else
-			val &= ~MAC_RX_CFG_STRIP_PAD;
-		if (config & CFG_BMAC_RX_STRIP_CRC)
-			val |= MAC_RX_CFG_STRIP_FCS;
-		else
-			val &= ~MAC_RX_CFG_STRIP_FCS;
-		if (config & CFG_BMAC_RX_PROMISCUOUS)
-			val |= MAC_RX_CFG_PROMISC;
-		else
-			val &= ~MAC_RX_CFG_PROMISC;
-		if (config & CFG_BMAC_RX_PROMISCUOUSGROUP)
-			val |= MAC_RX_CFG_PROMISC_GROUP;
-		else
-			val &= ~MAC_RX_CFG_PROMISC_GROUP;
-		if (config & CFG_BMAC_RX_HASH_FILTER)
-			val |= MAC_RX_CFG_HASH_FILTER_EN;
-		else
-			val &= ~MAC_RX_CFG_HASH_FILTER_EN;
-		if (config & CFG_BMAC_RX_ADDR_FILTER)
-			val |= MAC_RX_CFG_ADDR_FILTER_EN;
-		else
-			val &= ~MAC_RX_CFG_ADDR_FILTER_EN;
-		if (config & CFG_BMAC_RX_DISCARD_ON_ERR)
-			val &= ~MAC_RX_CFG_DISABLE_DISCARD;
-		else
-			val |= MAC_RX_CFG_DISABLE_DISCARD;
-
-		BMAC_REG_WR(handle, portn, RXMAC_CONFIG_REG, val);
-		break;
-	default:
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-					    " npi_bmac_rx_config"
-					    " Invalid Input: op <0x%x>", op));
-		return (NPI_FAILURE | NPI_MAC_OPCODE_INVALID(portn));
-	}
-
-	return (NPI_SUCCESS);
-}
-
-npi_status_t
-npi_bmac_rx_iconfig(npi_handle_t handle, config_op_t op, uint8_t portn,
-		    bmac_rx_iconfig_t iconfig)
-{
-	uint64_t val = 0;
-
-	ASSERT(IS_BMAC_PORT_NUM_VALID(portn));
-
-	switch (op) {
-	case ENABLE:
-	case DISABLE:
-		ASSERT((iconfig != 0) && ((iconfig & ~ICFG_BMAC_RX_ALL) == 0));
-		if ((iconfig == 0) || (iconfig & ~ICFG_BMAC_RX_ALL) != 0) {
-			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-					    " npi_bmac_rx_iconfig"
-					    " Invalid Input: iconfig <0x%x>",
-					    iconfig));
-			return (NPI_FAILURE | NPI_MAC_CONFIG_INVALID(portn));
-		}
-		BMAC_REG_RD(handle, portn, BRXMAC_STAT_MSK_REG, &val);
-		if (op == ENABLE)
-			val &= ~iconfig;
-		else
-			val |= iconfig;
-		BMAC_REG_WR(handle, portn, BRXMAC_STAT_MSK_REG, val);
-
-		break;
-	case INIT:
-		ASSERT((iconfig & ~ICFG_BMAC_RX_ALL) == 0);
-		if ((iconfig & ~ICFG_BMAC_RX_ALL) != 0) {
-			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-					    " npi_bmac_rx_iconfig"
-					    " Invalid Input: iconfig <0x%x>",
-					    iconfig));
-			return (NPI_FAILURE | NPI_MAC_CONFIG_INVALID(portn));
-		}
-		BMAC_REG_WR(handle, portn, BRXMAC_STAT_MSK_REG, ~iconfig);
-
-		break;
-	default:
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				    " npi_bmac_rx_iconfig"
-				    " Invalid Input: iconfig <0x%x>",
-				    iconfig));
-		return (NPI_FAILURE | NPI_MAC_OPCODE_INVALID(portn));
-	}
-
-	return (NPI_SUCCESS);
-}
-
-npi_status_t
-npi_bmac_xif_config(npi_handle_t handle, config_op_t op, uint8_t portn,
-		    bmac_xif_config_t config)
-{
-	uint64_t val = 0;
-
-	ASSERT(IS_BMAC_PORT_NUM_VALID(portn));
-
-	switch (op) {
-	case ENABLE:
-	case DISABLE:
-		ASSERT((config != 0) && ((config & ~CFG_BMAC_XIF_ALL) == 0));
-		if ((config == 0) || (config & ~CFG_BMAC_XIF_ALL) != 0) {
-			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-					    " npi_bmac_xif_config"
-					    " Invalid Input: config <0x%x>",
-					    config));
-			return (NPI_FAILURE | NPI_MAC_CONFIG_INVALID(portn));
-		}
-		if (op == ENABLE) {
-			BMAC_REG_RD(handle, portn, MAC_XIF_CONFIG_REG, &val);
-			if (config & CFG_BMAC_XIF_TX_OUTPUT)
-				val |= MAC_XIF_TX_OUTPUT_EN;
-			if (config & CFG_BMAC_XIF_LOOPBACK)
-				val |= MAC_XIF_MII_INT_LOOPBACK;
-			if (config & CFG_BMAC_XIF_GMII_MODE)
-				val |= MAC_XIF_GMII_MODE;
-			if (config & CFG_BMAC_XIF_LINKLED)
-				val |= MAC_XIF_LINK_LED;
-			if (config & CFG_BMAC_XIF_LED_POLARITY)
-				val |= MAC_XIF_LED_POLARITY;
-			if (config & CFG_BMAC_XIF_SEL_CLK_25MHZ)
-				val |= MAC_XIF_SEL_CLK_25MHZ;
-			BMAC_REG_WR(handle, portn, MAC_XIF_CONFIG_REG, val);
-		} else {
-			BMAC_REG_RD(handle, portn, MAC_XIF_CONFIG_REG, &val);
-			if (config & CFG_BMAC_XIF_TX_OUTPUT)
-				val &= ~MAC_XIF_TX_OUTPUT_EN;
-			if (config & CFG_BMAC_XIF_LOOPBACK)
-				val &= ~MAC_XIF_MII_INT_LOOPBACK;
-			if (config & CFG_BMAC_XIF_GMII_MODE)
-				val &= ~MAC_XIF_GMII_MODE;
-			if (config & CFG_BMAC_XIF_LINKLED)
-				val &= ~MAC_XIF_LINK_LED;
-			if (config & CFG_BMAC_XIF_LED_POLARITY)
-				val &= ~MAC_XIF_LED_POLARITY;
-			if (config & CFG_BMAC_XIF_SEL_CLK_25MHZ)
-				val &= ~MAC_XIF_SEL_CLK_25MHZ;
-			BMAC_REG_WR(handle, portn, MAC_XIF_CONFIG_REG, val);
-		}
-		break;
-	case INIT:
-		ASSERT((config & ~CFG_BMAC_XIF_ALL) == 0);
-		if ((config & ~CFG_BMAC_XIF_ALL) != 0) {
-			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-					    " npi_bmac_xif_config"
-					    " Invalid Input: config <0x%x>",
-					    config));
-			return (NPI_FAILURE | NPI_MAC_CONFIG_INVALID(portn));
-		}
-		BMAC_REG_RD(handle, portn, MAC_XIF_CONFIG_REG, &val);
-		if (config & CFG_BMAC_XIF_TX_OUTPUT)
-			val |= MAC_XIF_TX_OUTPUT_EN;
-		else
-			val &= ~MAC_XIF_TX_OUTPUT_EN;
-		if (config & CFG_BMAC_XIF_LOOPBACK)
-			val |= MAC_XIF_MII_INT_LOOPBACK;
-		else
-			val &= ~MAC_XIF_MII_INT_LOOPBACK;
-		if (config & CFG_BMAC_XIF_GMII_MODE)
-			val |= MAC_XIF_GMII_MODE;
-		else
-			val &= ~MAC_XIF_GMII_MODE;
-		if (config & CFG_BMAC_XIF_LINKLED)
-			val |= MAC_XIF_LINK_LED;
-		else
-			val &= ~MAC_XIF_LINK_LED;
-		if (config & CFG_BMAC_XIF_LED_POLARITY)
-			val |= MAC_XIF_LED_POLARITY;
-		else
-			val &= ~MAC_XIF_LED_POLARITY;
-		if (config & CFG_BMAC_XIF_SEL_CLK_25MHZ)
-			val |= MAC_XIF_SEL_CLK_25MHZ;
-		else
-			val &= ~MAC_XIF_SEL_CLK_25MHZ;
-		BMAC_REG_WR(handle, portn, MAC_XIF_CONFIG_REG, val);
-		break;
-	default:
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				    " npi_bmac_xif_config"
-				    " Invalid Input: op <0x%x>",
-				    op));
-		return (NPI_FAILURE | NPI_MAC_OPCODE_INVALID(portn));
-	}
-
-	return (NPI_SUCCESS);
-}
-
-npi_status_t
-npi_bmac_tx_iconfig(npi_handle_t handle, config_op_t op, uint8_t portn,
-		    bmac_tx_iconfig_t iconfig)
-{
-	uint64_t val = 0;
-
-	ASSERT(IS_BMAC_PORT_NUM_VALID(portn));
-
-	switch (op) {
-	case ENABLE:
-	case DISABLE:
-		ASSERT((iconfig != 0) && ((iconfig & ~ICFG_XMAC_TX_ALL) == 0));
-		if ((iconfig == 0) || (iconfig & ~ICFG_XMAC_TX_ALL) != 0) {
-			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-					    " npi_bmac_tx_iconfig"
-					    " Invalid Input: iconfig <0x%x>",
-					    iconfig));
-			return (NPI_FAILURE | NPI_MAC_CONFIG_INVALID(portn));
-		}
-		BMAC_REG_RD(handle, portn, BTXMAC_STAT_MSK_REG, &val);
-		if (op == ENABLE)
-			val &= ~iconfig;
-		else
-			val |= iconfig;
-		BMAC_REG_WR(handle, portn, BTXMAC_STAT_MSK_REG, val);
-
-		break;
-	case INIT:
-		ASSERT((iconfig & ~ICFG_XMAC_TX_ALL) == 0);
-		if ((iconfig & ~ICFG_XMAC_TX_ALL) != 0) {
-			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-					    " npi_bmac_tx_iconfig"
-					    " Invalid Input: iconfig <0x%x>",
-					    iconfig));
-			return (NPI_FAILURE | NPI_MAC_CONFIG_INVALID(portn));
-		}
-		BMAC_REG_WR(handle, portn, BTXMAC_STAT_MSK_REG, ~iconfig);
-
-		break;
-	default:
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				    " npi_bmac_tx_iconfig"
-				    " Invalid Input: iconfig <0x%x>",
-				    iconfig));
-		return (NPI_FAILURE | NPI_MAC_OPCODE_INVALID(portn));
-	}
-
-	return (NPI_SUCCESS);
-}
-
-npi_status_t
-npi_bmac_ctl_iconfig(npi_handle_t handle, config_op_t op, uint8_t portn,
-			bmac_ctl_iconfig_t iconfig)
-{
-	uint64_t val = 0;
-
-	ASSERT(IS_BMAC_PORT_NUM_VALID(portn));
-
-	switch (op) {
-	case ENABLE:
-	case DISABLE:
-		ASSERT((iconfig != 0) && ((iconfig & ~ICFG_BMAC_CTL_ALL) == 0));
-		if ((iconfig == 0) || (iconfig & ~ICFG_BMAC_CTL_ALL) != 0) {
-			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-					    " npi_bmac_ctl_iconfig"
-					    " Invalid Input: iconfig <0x%x>",
-					    iconfig));
-			return (NPI_FAILURE | NPI_MAC_CONFIG_INVALID(portn));
-		}
-		BMAC_REG_RD(handle, portn, BMAC_C_S_MSK_REG, &val);
-		if (op == ENABLE)
-			val &= ~iconfig;
-		else
-			val |= iconfig;
-		BMAC_REG_WR(handle, portn, BMAC_C_S_MSK_REG, val);
-
-		break;
-	case INIT:
-		ASSERT((iconfig & ~ICFG_BMAC_RX_ALL) == 0);
-		if ((iconfig & ~ICFG_BMAC_RX_ALL) != 0) {
-			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-					    " npi_bmac_ctl_iconfig"
-					    " Invalid Input: iconfig <0x%x>",
-					    iconfig));
-			return (NPI_FAILURE | NPI_MAC_CONFIG_INVALID(portn));
-		}
-		BMAC_REG_WR(handle, portn, BMAC_C_S_MSK_REG, ~iconfig);
-
-		break;
-	default:
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				    " npi_bmac_ctl_iconfig"
-				    " Invalid Input: iconfig <0x%x>",
-				    iconfig));
-		return (NPI_FAILURE | NPI_MAC_OPCODE_INVALID(portn));
-	}
-
-	return (NPI_SUCCESS);
-}
-
-npi_status_t
-npi_bmac_tx_get_istatus(npi_handle_t handle, uint8_t portn,
-			bmac_tx_iconfig_t *istatus)
-{
-	uint64_t val = 0;
-
-	ASSERT(IS_BMAC_PORT_NUM_VALID(portn));
-
-	BMAC_REG_RD(handle, portn, BTXMAC_STATUS_REG, &val);
-	*istatus = (uint32_t)val;
-
-	return (NPI_SUCCESS);
-}
-
-npi_status_t
-npi_bmac_rx_get_istatus(npi_handle_t handle, uint8_t portn,
-			bmac_rx_iconfig_t *istatus)
-{
-	uint64_t val = 0;
-
-	ASSERT(IS_BMAC_PORT_NUM_VALID(portn));
-
-	BMAC_REG_RD(handle, portn, BRXMAC_STATUS_REG, &val);
-	*istatus = (uint32_t)val;
-
-	return (NPI_SUCCESS);
-}
-
-npi_status_t
-npi_bmac_ctl_get_istatus(npi_handle_t handle, uint8_t portn,
-				bmac_ctl_iconfig_t *istatus)
-{
-	uint64_t val = 0;
-
-	ASSERT(IS_BMAC_PORT_NUM_VALID(portn));
-
-	BMAC_REG_RD(handle, portn, BMAC_CTRL_STAT_REG, &val);
-	*istatus = (uint32_t)val;
-
-	return (NPI_SUCCESS);
-}
-
-npi_status_t
-npi_mac_mif_mdio_read(npi_handle_t handle, uint8_t portn, uint8_t device,
-			uint16_t xcvr_reg, uint16_t *value)
-{
-	mif_frame_t frame;
-	uint_t delay;
-
-	frame.value = 0;
-	frame.bits.w0.st = FRAME45_ST;		/* Clause 45	*/
-	frame.bits.w0.op = FRAME45_OP_ADDR;	/* Select address	*/
-	frame.bits.w0.phyad = portn;		/* Port number	*/
-	frame.bits.w0.regad = device;		/* Device number	*/
-	frame.bits.w0.ta_msb = 1;
-	frame.bits.w0.ta_lsb = 0;
-	frame.bits.w0.data = xcvr_reg;	/* register address */
-
-	NPI_DEBUG_MSG((handle.function, MIF_CTL,
-		"mdio read port %d addr val=0x%x\n", portn, frame.value));
-
-	MIF_REG_WR(handle, MIF_OUTPUT_FRAME_REG, frame.value);
-
-	delay = 0;
-	MIF_WAIT_REG(handle, frame, delay, MIF_DELAY, MIF_DELAY);
-
-	NPI_DEBUG_MSG((handle.function, MIF_CTL,
-		"mdio read port %d addr poll=0x%x\n", portn, frame.value));
-
-	if (delay == MIF_DELAY) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-					"mdio read no response1\n"));
-	}
-
-	frame.bits.w0.st = FRAME45_ST; /* Clause 45 */
-	frame.bits.w0.op = FRAME45_OP_READ; /* Read */
-	frame.bits.w0.phyad = portn; /* Port Number */
-	frame.bits.w0.regad = device; /* Device Number */
-	frame.bits.w0.ta_msb = 1;
-	frame.bits.w0.ta_lsb = 0;
-
-	NPI_DEBUG_MSG((handle.function, MIF_CTL,
-		"mdio read port %d data frame=0x%x\n", portn, frame.value));
-
-	MIF_REG_WR(handle, MIF_OUTPUT_FRAME_REG, frame.value);
-
-	delay = 0;
-	MIF_WAIT_REG(handle, frame, delay, MIF_DELAY, MIF_DELAY);
-
-	NPI_DEBUG_MSG((handle.function, MIF_CTL,
-		"mdio read port %d data poll=0x%x\n", portn, frame.value));
-
-	*value = frame.bits.w0.data;
-	NPI_DEBUG_MSG((handle.function, MIF_CTL,
-		"mdio read port=%d val=0x%x\n", portn, *value));
-
-	if (delay == MIF_DELAY) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-			"mdio read no response2\n"));
-	}
-
-	return (NPI_SUCCESS);
-}
-
-npi_status_t
-npi_mac_mif_mii_read(npi_handle_t handle, uint8_t portn, uint8_t xcvr_reg,
-			uint16_t *value)
-{
-	mif_frame_t frame;
-	uint_t delay;
-
-	frame.bits.w0.st = 0x1; /* Clause 22 */
-	frame.bits.w0.op = 0x2;
-	frame.bits.w0.phyad = portn;
-	frame.bits.w0.regad = xcvr_reg;
-	frame.bits.w0.ta_msb = 1;
-	frame.bits.w0.ta_lsb = 0;
-	MIF_REG_WR(handle, MIF_OUTPUT_FRAME_REG, frame.value);
-
-	delay = 0;
-	MIF_WAIT_REG(handle, frame, delay, MIF_DELAY, MAX_PIO_RETRIES);
-
-	if (delay == MAX_PIO_RETRIES)
-		return (NPI_FAILURE | NPI_MAC_MII_READ_FAILED(portn));
-
-	*value = frame.bits.w0.data;
-	NPI_DEBUG_MSG((handle.function, MIF_CTL,
-			"mif mii read port %d reg=0x%x frame=0x%x\n", portn,
-			xcvr_reg, frame.bits.w0.data));
-
-	return (NPI_SUCCESS);
-}
-
-npi_status_t
-npi_mac_mif_mdio_write(npi_handle_t handle, uint8_t portn, uint8_t device,
-			uint16_t xcvr_reg, uint16_t value)
-{
-	mif_frame_t frame;
-	uint_t delay;
-
-	frame.value = 0;
-	frame.bits.w0.st = FRAME45_ST; /* Clause 45 */
-	frame.bits.w0.op = FRAME45_OP_ADDR; /* Select Address */
-	frame.bits.w0.phyad = portn; /* Port Number */
-	frame.bits.w0.regad = device; /* Device Number */
-	frame.bits.w0.ta_msb = 1;
-	frame.bits.w0.ta_lsb = 0;
-	frame.bits.w0.data = xcvr_reg;	/* register address */
-
-	MIF_REG_WR(handle, MIF_OUTPUT_FRAME_REG, frame.value);
-
-	NPI_DEBUG_MSG((handle.function, MIF_CTL,
-		"mdio write port %d addr val=0x%x\n", portn, frame.value));
-
-	delay = 0;
-	MIF_WAIT_REG(handle, frame, delay, MIF_DELAY, MIF_DELAY);
-
-	NPI_DEBUG_MSG((handle.function, MIF_CTL,
-		"mdio write port %d addr poll=0x%x\n", portn, frame.value));
-
-	if (delay == MIF_DELAY) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				"mdio write no response1\n"));
-	}
-
-	frame.bits.w0.st = FRAME45_ST; /* Clause 45 */
-	frame.bits.w0.op = FRAME45_OP_WRITE; /* Write */
-	frame.bits.w0.phyad = portn; /* Port number   */
-	frame.bits.w0.regad = device; /* Device number */
-	frame.bits.w0.ta_msb = 1;
-	frame.bits.w0.ta_lsb = 0;
-	frame.bits.w0.data = value;
-	MIF_REG_WR(handle, MIF_OUTPUT_FRAME_REG, frame.value);
-
-	NPI_DEBUG_MSG((handle.function, MIF_CTL,
-		"mdio write port %d data val=0x%x\n", portn, frame.value));
-
-	delay = 0;
-	MIF_WAIT_REG(handle, frame, delay, MIF_DELAY, MIF_DELAY);
-
-	NPI_DEBUG_MSG((handle.function, MIF_CTL,
-		"mdio write port %d data poll=0x%x\n", portn, frame.value));
-
-	if (delay == MIF_DELAY) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				"mdio write no response2\n"));
-	}
-
-	return (NPI_SUCCESS);
-}
-
-npi_status_t
-npi_mac_mif_mii_write(npi_handle_t handle, uint8_t portn, uint8_t xcvr_reg,
-			uint16_t value)
-{
-	mif_frame_t frame;
-	uint_t delay;
-
-	frame.bits.w0.st = 0x1; /* Clause 22 */
-	frame.bits.w0.op = 0x1;
-	frame.bits.w0.phyad = portn;
-	frame.bits.w0.regad = xcvr_reg;
-	frame.bits.w0.ta_msb = 1;
-	frame.bits.w0.ta_lsb = 0;
-	frame.bits.w0.data = value;
-	MIF_REG_WR(handle, MIF_OUTPUT_FRAME_REG, frame.value);
-
-	delay = 0;
-	MIF_WAIT_REG(handle, frame, delay, MIF_DELAY, MAX_PIO_RETRIES);
-
-	NPI_DEBUG_MSG((handle.function, MIF_CTL,
-			"mif mii write port %d reg=0x%x frame=0x%x\n", portn,
-			xcvr_reg, frame.value));
-
-	if (delay == MAX_PIO_RETRIES)
-		return (NPI_FAILURE | NPI_MAC_MII_WRITE_FAILED(portn));
-
-	return (NPI_SUCCESS);
-}
-
-npi_status_t
-npi_mac_pcs_mii_read(npi_handle_t handle, uint8_t portn, uint8_t xcvr_reg,
-			uint16_t *value)
-{
-	pcs_anar_t pcs_anar;
-	pcs_anar_t pcs_anlpar;
-	pcs_stat_t pcs_stat;
-	pcs_stat_mc_t pcs_stat_mc;
-	mii_anar_t anar;
-	mii_anar_t anlpar;
-	mii_aner_t aner;
-	mii_esr_t esr;
-	mii_gsr_t gsr;
-	uint64_t val = 0;
-
-	ASSERT(IS_PORT_NUM_VALID(portn));
-
-	switch (xcvr_reg) {
-	case MII_BMCR:
-		PCS_REG_RD(handle, portn, PCS_MII_CTRL_REG, &val);
-		*value = (uint16_t)val;
-		break;
-	case MII_BMSR:
-		PCS_REG_RD(handle, portn, PCS_MII_STATUS_REG, &val);
-		pcs_stat.value = val;
-		PCS_REG_RD(handle, portn, PCS_STATE_MACHINE_REG, &val);
-		pcs_stat_mc.value = val;
-		if ((pcs_stat_mc.bits.w0.link_cfg_stat == 0xB) &&
-			(pcs_stat_mc.bits.w0.word_sync != 0)) {
-			pcs_stat.bits.w0.link_stat = 1;
-		} else if (pcs_stat_mc.bits.w0.link_cfg_stat != 0xB) {
-			pcs_stat.bits.w0.link_stat = 0;
-		}
-		*value = (uint16_t)pcs_stat.value;
-		break;
-	case MII_ESR:
-		PCS_REG_RD(handle, portn, PCS_MII_ADVERT_REG, &val);
-		pcs_anar.value = (uint16_t)val;
-		esr.value = 0;
-		esr.bits.link_1000fdx = pcs_anar.bits.w0.full_duplex;
-		esr.bits.link_1000hdx = pcs_anar.bits.w0.half_duplex;
-		*value = esr.value;
-		break;
-	case MII_ANAR:
-		PCS_REG_RD(handle, portn, PCS_MII_ADVERT_REG, &val);
-		pcs_anar.value = (uint16_t)val;
-		anar.value = 0;
-		anar.bits.cap_pause = pcs_anar.bits.w0.pause;
-		anar.bits.cap_asmpause = pcs_anar.bits.w0.asm_pause;
-		*value = anar.value;
-		break;
-	case MII_ANLPAR:
-		PCS_REG_RD(handle, portn, PCS_MII_LPA_REG, &val);
-		pcs_anlpar.value = (uint16_t)val;
-		anlpar.bits.cap_pause = pcs_anlpar.bits.w0.pause;
-		anlpar.bits.cap_asmpause = pcs_anlpar.bits.w0.asm_pause;
-		*value = anlpar.value;
-		break;
-	case MII_ANER:
-		PCS_REG_RD(handle, portn, PCS_MII_ADVERT_REG, &val);
-		pcs_anar.value = (uint16_t)val;
-		aner.value = 0;
-		aner.bits.lp_an_able = pcs_anar.bits.w0.full_duplex |
-						pcs_anar.bits.w0.half_duplex;
-		*value = aner.value;
-		break;
-	case MII_GSR:
-		PCS_REG_RD(handle, portn, PCS_MII_LPA_REG, &val);
-		pcs_anar.value = (uint16_t)val;
-		gsr.value = 0;
-		gsr.bits.link_1000fdx = pcs_anar.bits.w0.full_duplex;
-		gsr.bits.link_1000hdx = pcs_anar.bits.w0.half_duplex;
-		*value = gsr.value;
-		break;
-	default:
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				    " npi_mac_pcs_mii_read"
-				    " Invalid Input: xcvr_reg <0x%x>",
-				    xcvr_reg));
-		return (NPI_FAILURE | NPI_MAC_REG_INVALID(portn));
-	}
-
-	return (NPI_SUCCESS);
-}
-
-npi_status_t
-npi_mac_pcs_mii_write(npi_handle_t handle, uint8_t portn, uint8_t xcvr_reg,
-			uint16_t value)
-{
-	pcs_anar_t pcs_anar;
-	mii_anar_t anar;
-	mii_gcr_t gcr;
-	uint64_t val;
-
-	ASSERT(IS_PORT_NUM_VALID(portn));
-
-	switch (xcvr_reg) {
-	case MII_BMCR:
-		val = (uint16_t)value;
-		PCS_REG_WR(handle, portn, PCS_MII_CTRL_REG, val);
-		break;
-	case MII_ANAR:
-		PCS_REG_RD(handle, portn, PCS_MII_ADVERT_REG, &val);
-		pcs_anar.value = (uint16_t)val;
-		anar.value = value;
-		pcs_anar.bits.w0.asm_pause = anar.bits.cap_asmpause;
-		pcs_anar.bits.w0.pause = anar.bits.cap_pause;
-		val = pcs_anar.value;
-		PCS_REG_WR(handle, portn, PCS_MII_ADVERT_REG, val);
-		break;
-	case MII_GCR:
-		PCS_REG_RD(handle, portn, PCS_MII_ADVERT_REG, &val);
-		pcs_anar.value = (uint16_t)val;
-		gcr.value = value;
-		pcs_anar.bits.w0.full_duplex = gcr.bits.link_1000fdx;
-		pcs_anar.bits.w0.half_duplex = gcr.bits.link_1000hdx;
-		val = pcs_anar.value;
-		PCS_REG_WR(handle, portn, PCS_MII_ADVERT_REG, val);
-		break;
-	default:
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				    " npi_mac_pcs_mii_write"
-				    " Invalid Input: xcvr_reg <0x%x>",
-				    xcvr_reg));
-		return (NPI_FAILURE | NPI_MAC_REG_INVALID(portn));
-	}
-
-	return (NPI_SUCCESS);
-}
-
-npi_status_t
-npi_mac_mif_link_intr_enable(npi_handle_t handle, uint8_t portn,
-				uint8_t xcvr_reg, uint16_t mask)
-{
-	mif_cfg_t mif_cfg;
-
-	ASSERT(IS_PORT_NUM_VALID(portn));
-
-	ASSERT(xcvr_reg <= NXGE_MAX_MII_REGS);
-	if (xcvr_reg > NXGE_MAX_MII_REGS) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				    " npi_mac_mif_link_intr_enable"
-				    " Invalid Input: xcvr_reg <0x%x>",
-				    xcvr_reg));
-		return (NPI_FAILURE | NPI_MAC_REG_INVALID(portn));
-	}
-
-	MIF_REG_RD(handle, MIF_CONFIG_REG, &mif_cfg.value);
-
-	mif_cfg.bits.w0.phy_addr = portn;		/* Port number */
-	mif_cfg.bits.w0.reg_addr = xcvr_reg;		/* Register address */
-	mif_cfg.bits.w0.indirect_md = 0; 		/* Clause 22 */
-	mif_cfg.bits.w0.poll_en = 1;
-
-	MIF_REG_WR(handle, MIF_MASK_REG, ~mask);
-	MIF_REG_WR(handle, MIF_CONFIG_REG, mif_cfg.value);
-
-	NXGE_DELAY(20);
-
-	return (NPI_SUCCESS);
-}
-
-npi_status_t
-npi_mac_mif_mdio_link_intr_enable(npi_handle_t handle, uint8_t portn,
-			uint8_t device, uint16_t xcvr_reg, uint16_t mask)
-{
-	mif_cfg_t mif_cfg;
-	mif_frame_t frame;
-	uint_t delay;
-
-	ASSERT(IS_PORT_NUM_VALID(portn));
-
-	frame.bits.w0.st = 0;		/* Clause 45 */
-	frame.bits.w0.op = 0;		/* Select address */
-	frame.bits.w0.phyad = portn;	/* Port number */
-	frame.bits.w0.regad = device;	/* Device number */
-	frame.bits.w0.ta_msb = 1;
-	frame.bits.w0.ta_lsb = 0;
-	frame.bits.w0.data = xcvr_reg;	/* register address */
-
-	MIF_REG_WR(handle, MIF_OUTPUT_FRAME_REG, frame.value);
-
-	delay = 0;
-	MIF_WAIT_REG(handle, frame, delay, MIF_DELAY, MAX_PIO_RETRIES);
-	if (delay == MAX_PIO_RETRIES)
-		return (NPI_FAILURE);
-
-	MIF_REG_RD(handle, MIF_CONFIG_REG, &mif_cfg.value);
-
-	mif_cfg.bits.w0.phy_addr = portn;		/* Port number */
-	mif_cfg.bits.w0.reg_addr = device;		/* Register address */
-	mif_cfg.bits.w0.indirect_md = 1; 		/* Clause 45 */
-	mif_cfg.bits.w0.poll_en = 1;
-
-	MIF_REG_WR(handle, MIF_MASK_REG, ~mask);
-	MIF_REG_WR(handle, MIF_CONFIG_REG, mif_cfg.value);
-
-	NXGE_DELAY(20);
-
-	return (NPI_SUCCESS);
-}
-
-void
-npi_mac_mif_set_indirect_mode(npi_handle_t handle, boolean_t on_off)
-{
-	mif_cfg_t mif_cfg;
-
-	MIF_REG_RD(handle, MIF_CONFIG_REG, &mif_cfg.value);
-	mif_cfg.bits.w0.indirect_md = on_off;
-	MIF_REG_WR(handle, MIF_CONFIG_REG, mif_cfg.value);
-}
-
-npi_status_t
-npi_bmac_send_pause(npi_handle_t handle, uint8_t portn, uint16_t pause_time)
-{
-	uint64_t val;
-
-	ASSERT(IS_BMAC_PORT_NUM_VALID(portn));
-
-	val = MAC_SEND_PAUSE_SEND | pause_time;
-	BMAC_REG_WR(handle, portn, MAC_SEND_PAUSE_REG, val);
-
-	return (NPI_SUCCESS);
-}
-
-npi_status_t
-npi_xmac_xif_led(npi_handle_t handle, uint8_t portn, boolean_t on_off)
-{
-	uint64_t val = 0;
-
-	ASSERT(IS_XMAC_PORT_NUM_VALID(portn));
-
-	XMAC_REG_RD(handle, portn, XMAC_CONFIG_REG, &val);
-
-	if (on_off) {
-		val |= XMAC_XIF_LED_POLARITY;
-		val &= ~XMAC_XIF_FORCE_LED_ON;
-	} else {
-		val &= ~XMAC_XIF_LED_POLARITY;
-		val |= XMAC_XIF_FORCE_LED_ON;
-	}
-
-	XMAC_REG_WR(handle, portn, XMAC_CONFIG_REG, val);
-
-	return (NPI_SUCCESS);
-}
-
-npi_status_t
-npi_xmac_zap_tx_counters(npi_handle_t handle, uint8_t portn)
-{
-	ASSERT(IS_XMAC_PORT_NUM_VALID(portn));
-
-	XMAC_REG_WR(handle, portn, XTXMAC_FRM_CNT_REG, 0);
-	XMAC_REG_WR(handle, portn, XTXMAC_BYTE_CNT_REG, 0);
-
-	return (NPI_SUCCESS);
-}
-
-npi_status_t
-npi_xmac_zap_rx_counters(npi_handle_t handle, uint8_t portn)
-{
-	ASSERT(IS_XMAC_PORT_NUM_VALID(portn));
-
-	XMAC_REG_WR(handle, portn, XRXMAC_BT_CNT_REG, 0);
-	XMAC_REG_WR(handle, portn, XRXMAC_BC_FRM_CNT_REG, 0);
-	XMAC_REG_WR(handle, portn, XRXMAC_MC_FRM_CNT_REG, 0);
-	XMAC_REG_WR(handle, portn, XRXMAC_FRAG_CNT_REG, 0);
-	XMAC_REG_WR(handle, portn, XRXMAC_HIST_CNT1_REG, 0);
-	XMAC_REG_WR(handle, portn, XRXMAC_HIST_CNT2_REG, 0);
-	XMAC_REG_WR(handle, portn, XRXMAC_HIST_CNT3_REG, 0);
-	XMAC_REG_WR(handle, portn, XRXMAC_HIST_CNT4_REG, 0);
-	XMAC_REG_WR(handle, portn, XRXMAC_HIST_CNT5_REG, 0);
-	XMAC_REG_WR(handle, portn, XRXMAC_HIST_CNT6_REG, 0);
-	XMAC_REG_WR(handle, portn, XRXMAC_MPSZER_CNT_REG, 0);
-	XMAC_REG_WR(handle, portn, XRXMAC_CRC_ER_CNT_REG, 0);
-	XMAC_REG_WR(handle, portn, XRXMAC_CD_VIO_CNT_REG, 0);
-	XMAC_REG_WR(handle, portn, XRXMAC_AL_ER_CNT_REG, 0);
-	XMAC_REG_WR(handle, portn, XMAC_LINK_FLT_CNT_REG, 0);
-
-	return (NPI_SUCCESS);
-}
--- a/usr/src/uts/sun4v/io/nxge/npi/npi_mac.h	Mon Mar 19 18:02:35 2007 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,573 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License (the "License").
- * You may not use this file except in compliance with the License.
- *
- * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
- * or http://www.opensolaris.org/os/licensing.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information: Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- */
-/*
- * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
- * Use is subject to license terms.
- */
-
-#ifndef _NPI_MAC_H
-#define	_NPI_MAC_H
-
-#pragma ident	"%Z%%M%	%I%	%E% SMI"
-
-#ifdef	__cplusplus
-extern "C" {
-#endif
-
-#include <npi.h>
-#include <nxge_mac_hw.h>
-#include <nxge_mii.h>
-
-typedef struct _npi_mac_addr {
-	uint16_t	w0;
-	uint16_t	w1;
-	uint16_t	w2;
-} npi_mac_addr_t;
-
-typedef enum npi_mac_attr {
-	MAC_PORT_MODE = 0,
-	MAC_PORT_FRAME_SIZE,
-	MAC_PORT_ADDR,
-	MAC_PORT_ADDR_FILTER,
-	MAC_PORT_ADDR_FILTER_MASK,
-	XMAC_PORT_IPG,
-	XMAC_10G_PORT_IPG,
-	BMAC_PORT_MAX_BURST_SIZE,
-	BMAC_PORT_PA_SIZE,
-	BMAC_PORT_CTRL_TYPE
-} npi_mac_attr_t;
-
-/* MAC Mode options */
-
-typedef enum npi_mac_mode_e {
-	MAC_MII_MODE = 0,
-	MAC_GMII_MODE,
-	MAC_XGMII_MODE
-} npi_mac_mode_t;
-
-typedef enum npi_mac_reset_e {
-	TX_MAC_RESET = 1,
-	RX_MAC_RESET,
-	XTX_MAC_REG_RESET,
-	XRX_MAC_REG_RESET,
-	XTX_MAC_LOGIC_RESET,
-	XRX_MAC_LOGIC_RESET,
-	XTX_MAC_RESET_ALL,
-	XRX_MAC_RESET_ALL,
-	BMAC_RESET_ALL,
-	XMAC_RESET_ALL
-} npi_mac_reset_t;
-
-typedef enum xmac_tx_iconfig_e {
-	ICFG_XMAC_TX_FRAME_XMIT 	= XMAC_TX_FRAME_XMIT,
-	ICFG_XMAC_TX_UNDERRUN		= XMAC_TX_UNDERRUN,
-	ICFG_XMAC_TX_MAX_PACKET_ERR	= XMAC_TX_MAX_PACKET_ERR,
-	ICFG_XMAC_TX_OVERFLOW		= XMAC_TX_OVERFLOW,
-	ICFG_XMAC_TX_FIFO_XFR_ERR	= XMAC_TX_FIFO_XFR_ERR,
-	ICFG_XMAC_TX_BYTE_CNT_EXP	= XMAC_TX_BYTE_CNT_EXP,
-	ICFG_XMAC_TX_FRAME_CNT_EXP	= XMAC_TX_FRAME_CNT_EXP,
-	ICFG_XMAC_TX_ALL = (XMAC_TX_FRAME_XMIT | XMAC_TX_UNDERRUN |
-				XMAC_TX_MAX_PACKET_ERR | XMAC_TX_OVERFLOW |
-				XMAC_TX_FIFO_XFR_ERR |  XMAC_TX_BYTE_CNT_EXP |
-				XMAC_TX_FRAME_CNT_EXP)
-} xmac_tx_iconfig_t;
-
-typedef enum xmac_rx_iconfig_e {
-	ICFG_XMAC_RX_FRAME_RCVD		= XMAC_RX_FRAME_RCVD,
-	ICFG_XMAC_RX_OVERFLOW		= XMAC_RX_OVERFLOW,
-	ICFG_XMAC_RX_UNDERFLOW		= XMAC_RX_UNDERFLOW,
-	ICFG_XMAC_RX_CRC_ERR_CNT_EXP	= XMAC_RX_CRC_ERR_CNT_EXP,
-	ICFG_XMAC_RX_LEN_ERR_CNT_EXP	= XMAC_RX_LEN_ERR_CNT_EXP,
-	ICFG_XMAC_RX_VIOL_ERR_CNT_EXP	= XMAC_RX_VIOL_ERR_CNT_EXP,
-	ICFG_XMAC_RX_OCT_CNT_EXP	= XMAC_RX_OCT_CNT_EXP,
-	ICFG_XMAC_RX_HST_CNT1_EXP	= XMAC_RX_HST_CNT1_EXP,
-	ICFG_XMAC_RX_HST_CNT2_EXP	= XMAC_RX_HST_CNT2_EXP,
-	ICFG_XMAC_RX_HST_CNT3_EXP	= XMAC_RX_HST_CNT3_EXP,
-	ICFG_XMAC_RX_HST_CNT4_EXP	= XMAC_RX_HST_CNT4_EXP,
-	ICFG_XMAC_RX_HST_CNT5_EXP	= XMAC_RX_HST_CNT5_EXP,
-	ICFG_XMAC_RX_HST_CNT6_EXP	= XMAC_RX_HST_CNT6_EXP,
-	ICFG_XMAC_RX_BCAST_CNT_EXP	= XMAC_RX_BCAST_CNT_EXP,
-	ICFG_XMAC_RX_MCAST_CNT_EXP	= XMAC_RX_MCAST_CNT_EXP,
-	ICFG_XMAC_RX_FRAG_CNT_EXP	= XMAC_RX_FRAG_CNT_EXP,
-	ICFG_XMAC_RX_ALIGNERR_CNT_EXP	= XMAC_RX_ALIGNERR_CNT_EXP,
-	ICFG_XMAC_RX_LINK_FLT_CNT_EXP	= XMAC_RX_LINK_FLT_CNT_EXP,
-	ICFG_XMAC_RX_HST_CNT7_EXP	= XMAC_RX_HST_CNT7_EXP,
-	ICFG_XMAC_RX_REMOTE_FLT_DET	= XMAC_RX_REMOTE_FLT_DET,
-	ICFG_XMAC_RX_LOCAL_FLT_DET	= XMAC_RX_LOCAL_FLT_DET,
-	ICFG_XMAC_RX_ALL = (XMAC_RX_FRAME_RCVD | XMAC_RX_OVERFLOW |
-				XMAC_RX_UNDERFLOW | XMAC_RX_CRC_ERR_CNT_EXP |
-				XMAC_RX_LEN_ERR_CNT_EXP |
-				XMAC_RX_VIOL_ERR_CNT_EXP |
-				XMAC_RX_OCT_CNT_EXP | XMAC_RX_HST_CNT1_EXP |
-				XMAC_RX_HST_CNT2_EXP | XMAC_RX_HST_CNT3_EXP |
-				XMAC_RX_HST_CNT4_EXP | XMAC_RX_HST_CNT5_EXP |
-				XMAC_RX_HST_CNT6_EXP | XMAC_RX_BCAST_CNT_EXP |
-				XMAC_RX_MCAST_CNT_EXP | XMAC_RX_FRAG_CNT_EXP |
-				XMAC_RX_ALIGNERR_CNT_EXP |
-				XMAC_RX_LINK_FLT_CNT_EXP |
-				XMAC_RX_HST_CNT7_EXP |
-				XMAC_RX_REMOTE_FLT_DET | XMAC_RX_LOCAL_FLT_DET)
-} xmac_rx_iconfig_t;
-
-typedef enum xmac_ctl_iconfig_e {
-	ICFG_XMAC_CTRL_PAUSE_RCVD	= XMAC_CTRL_PAUSE_RCVD,
-	ICFG_XMAC_CTRL_PAUSE_STATE	= XMAC_CTRL_PAUSE_STATE,
-	ICFG_XMAC_CTRL_NOPAUSE_STATE	= XMAC_CTRL_NOPAUSE_STATE,
-	ICFG_XMAC_CTRL_ALL = (XMAC_CTRL_PAUSE_RCVD | XMAC_CTRL_PAUSE_STATE |
-				XMAC_CTRL_NOPAUSE_STATE)
-} xmac_ctl_iconfig_t;
-
-
-typedef enum bmac_tx_iconfig_e {
-	ICFG_BMAC_TX_FRAME_SENT 	= MAC_TX_FRAME_XMIT,
-	ICFG_BMAC_TX_UNDERFLOW		= MAC_TX_UNDERRUN,
-	ICFG_BMAC_TX_MAXPKTSZ_ERR	= MAC_TX_MAX_PACKET_ERR,
-	ICFG_BMAC_TX_BYTE_CNT_EXP	= MAC_TX_BYTE_CNT_EXP,
-	ICFG_BMAC_TX_FRAME_CNT_EXP	= MAC_TX_FRAME_CNT_EXP,
-	ICFG_BMAC_TX_ALL = (MAC_TX_FRAME_XMIT | MAC_TX_UNDERRUN |
-				MAC_TX_MAX_PACKET_ERR | MAC_TX_BYTE_CNT_EXP |
-				MAC_TX_FRAME_CNT_EXP)
-} bmac_tx_iconfig_t;
-
-typedef enum bmac_rx_iconfig_e {
-	ICFG_BMAC_RX_FRAME_RCVD		= MAC_RX_FRAME_RECV,
-	ICFG_BMAC_RX_OVERFLOW		= MAC_RX_OVERFLOW,
-	ICFG_BMAC_RX_FRAME_CNT_EXP	= MAC_RX_FRAME_COUNT,
-	ICFG_BMAC_RX_CRC_ERR_CNT_EXP	= MAC_RX_ALIGN_ERR,
-	ICFG_BMAC_RX_LEN_ERR_CNT_EXP	= MAC_RX_CRC_ERR,
-	ICFG_BMAC_RX_VIOL_ERR_CNT_EXP	= MAC_RX_LEN_ERR,
-	ICFG_BMAC_RX_BYTE_CNT_EXP	= MAC_RX_VIOL_ERR,
-	ICFG_BMAC_RX_ALIGNERR_CNT_EXP	= MAC_RX_BYTE_CNT_EXP,
-	ICFG_BMAC_RX_ALL = (MAC_RX_FRAME_RECV | MAC_RX_OVERFLOW |
-				MAC_RX_FRAME_COUNT | MAC_RX_ALIGN_ERR |
-				MAC_RX_CRC_ERR | MAC_RX_LEN_ERR |
-				MAC_RX_VIOL_ERR | MAC_RX_BYTE_CNT_EXP)
-} bmac_rx_iconfig_t;
-
-typedef enum bmac_ctl_iconfig_e {
-	ICFG_BMAC_CTL_RCVPAUSE		= MAC_CTRL_PAUSE_RECEIVED,
-	ICFG_BMAC_CTL_INPAUSE_ST	= MAC_CTRL_PAUSE_STATE,
-	ICFG_BMAC_CTL_INNOTPAUSE_ST	= MAC_CTRL_NOPAUSE_STATE,
-	ICFG_BMAC_CTL_ALL = (MAC_CTRL_PAUSE_RECEIVED | MAC_CTRL_PAUSE_STATE |
-				MAC_CTRL_NOPAUSE_STATE)
-} bmac_ctl_iconfig_t;
-
-typedef	enum xmac_tx_config_e {
-	CFG_XMAC_TX			= 0x00000001,
-	CFG_XMAC_TX_STRETCH_MODE	= 0x00000002,
-	CFG_XMAC_VAR_IPG		= 0x00000004,
-	CFG_XMAC_TX_CRC			= 0x00000008,
-	CFG_XMAC_TX_ALL			= 0x0000000F
-} xmac_tx_config_t;
-
-typedef enum xmac_rx_config_e {
-	CFG_XMAC_RX			= 0x00000001,
-	CFG_XMAC_RX_PROMISCUOUS		= 0x00000002,
-	CFG_XMAC_RX_PROMISCUOUSGROUP	= 0x00000004,
-	CFG_XMAC_RX_ERRCHK		= 0x00000008,
-	CFG_XMAC_RX_CRC_CHK		= 0x00000010,
-	CFG_XMAC_RX_RESV_MULTICAST	= 0x00000020,
-	CFG_XMAC_RX_CODE_VIO_CHK	= 0x00000040,
-	CFG_XMAC_RX_HASH_FILTER		= 0x00000080,
-	CFG_XMAC_RX_ADDR_FILTER		= 0x00000100,
-	CFG_XMAC_RX_STRIP_CRC		= 0x00000200,
-	CFG_XMAC_RX_PAUSE		= 0x00000400,
-	CFG_XMAC_RX_PASS_FC_FRAME	= 0x00000800,
-	CFG_XMAC_RX_MAC2IPP_PKT_CNT	= 0x00001000,
-	CFG_XMAC_RX_ALL			= 0x00001FFF
-} xmac_rx_config_t;
-
-typedef	enum xmac_xif_config_e {
-	CFG_XMAC_XIF_LED_FORCE		= 0x00000001,
-	CFG_XMAC_XIF_LED_POLARITY	= 0x00000002,
-	CFG_XMAC_XIF_SEL_POR_CLK_SRC	= 0x00000004,
-	CFG_XMAC_XIF_TX_OUTPUT		= 0x00000008,
-	CFG_XMAC_XIF_LOOPBACK		= 0x00000010,
-	CFG_XMAC_XIF_LFS		= 0x00000020,
-	CFG_XMAC_XIF_XPCS_BYPASS	= 0x00000040,
-	CFG_XMAC_XIF_1G_PCS_BYPASS	= 0x00000080,
-	CFG_XMAC_XIF_SEL_CLK_25MHZ	= 0x00000100,
-	CFG_XMAC_XIF_ALL		= 0x000001FF
-} xmac_xif_config_t;
-
-typedef	enum bmac_tx_config_e {
-	CFG_BMAC_TX			= 0x00000001,
-	CFG_BMAC_TX_CRC			= 0x00000002,
-	CFG_BMAC_TX_ALL			= 0x00000003
-} bmac_tx_config_t;
-
-typedef enum bmac_rx_config_e {
-	CFG_BMAC_RX			= 0x00000001,
-	CFG_BMAC_RX_STRIP_PAD		= 0x00000002,
-	CFG_BMAC_RX_STRIP_CRC		= 0x00000004,
-	CFG_BMAC_RX_PROMISCUOUS		= 0x00000008,
-	CFG_BMAC_RX_PROMISCUOUSGROUP	= 0x00000010,
-	CFG_BMAC_RX_HASH_FILTER		= 0x00000020,
-	CFG_BMAC_RX_ADDR_FILTER		= 0x00000040,
-	CFG_BMAC_RX_DISCARD_ON_ERR	= 0x00000080,
-	CFG_BMAC_RX_ALL			= 0x000000FF
-} bmac_rx_config_t;
-
-typedef	enum bmac_xif_config_e {
-	CFG_BMAC_XIF_TX_OUTPUT		= 0x00000001,
-	CFG_BMAC_XIF_LOOPBACK		= 0x00000002,
-	CFG_BMAC_XIF_GMII_MODE		= 0x00000008,
-	CFG_BMAC_XIF_LINKLED		= 0x00000020,
-	CFG_BMAC_XIF_LED_POLARITY	= 0x00000040,
-	CFG_BMAC_XIF_SEL_CLK_25MHZ	= 0x00000080,
-	CFG_BMAC_XIF_ALL		= 0x000000FF
-} bmac_xif_config_t;
-
-
-typedef enum xmac_ipg_e {
-	XGMII_IPG_12_15 = 0,
-	XGMII_IPG_16_19,
-	XGMII_IPG_20_23,
-	MII_GMII_IPG_12,
-	MII_GMII_IPG_13,
-	MII_GMII_IPG_14,
-	MII_GMII_IPG_15,
-	MII_GMII_IPG_16
-} xmac_ipg_t;
-
-typedef	enum xpcs_reg_e {
-	XPCS_REG_CONTROL1,
-	XPCS_REG_STATUS1,
-	XPCS_REG_DEVICE_ID,
-	XPCS_REG_SPEED_ABILITY,
-	XPCS_REG_DEVICE_IN_PKG,
-	XPCS_REG_CONTROL2,
-	XPCS_REG_STATUS2,
-	XPCS_REG_PKG_ID,
-	XPCS_REG_STATUS,
-	XPCS_REG_TEST_CONTROL,
-	XPCS_REG_CONFIG_VENDOR1,
-	XPCS_REG_DIAG_VENDOR2,
-	XPCS_REG_MASK1,
-	XPCS_REG_PACKET_COUNTER,
-	XPCS_REG_TX_STATEMACHINE,
-	XPCS_REG_DESCWERR_COUNTER,
-	XPCS_REG_SYMBOL_ERR_L0_1_COUNTER,
-	XPCS_REG_SYMBOL_ERR_L2_3_COUNTER,
-	XPCS_REG_TRAINING_VECTOR
-} xpcs_reg_t;
-
-#define	IS_XMAC_PORT_NUM_VALID(portn)\
-	((portn == XMAC_PORT_0) || (portn == XMAC_PORT_1))
-
-#define	IS_BMAC_PORT_NUM_VALID(portn)\
-	((portn == BMAC_PORT_0) || (portn == BMAC_PORT_1))
-
-#define	XMAC_REG_WR(handle, portn, reg, val)\
-	NXGE_REG_WR64(handle, XMAC_REG_ADDR((portn), (reg)), (val))
-
-#define	XMAC_REG_RD(handle, portn, reg, val_p)\
-	NXGE_REG_RD64(handle, XMAC_REG_ADDR((portn), (reg)), (val_p))
-
-#define	BMAC_REG_WR(handle, portn, reg, val)\
-	NXGE_REG_WR64(handle, BMAC_REG_ADDR((portn), (reg)), (val))
-
-#define	BMAC_REG_RD(handle, portn, reg, val_p)\
-	NXGE_REG_RD64(handle, BMAC_REG_ADDR((portn), (reg)), (val_p))
-
-#define	PCS_REG_WR(handle, portn, reg, val)\
-	NXGE_REG_WR64(handle, PCS_REG_ADDR((portn), (reg)), (val))
-
-#define	PCS_REG_RD(handle, portn, reg, val_p)\
-	NXGE_REG_RD64(handle, PCS_REG_ADDR((portn), (reg)), (val_p))
-
-#define	XPCS_REG_WR(handle, portn, reg, val)\
-	NXGE_REG_WR64(handle, XPCS_ADDR((portn), (reg)), (val))
-
-#define	XPCS_REG_RD(handle, portn, reg, val_p)\
-	NXGE_REG_RD64(handle, XPCS_ADDR((portn), (reg)), (val_p))
-
-#define	MIF_REG_WR(handle, reg, val)\
-	NXGE_REG_WR64(handle, MIF_ADDR((reg)), (val))
-
-#define	MIF_REG_RD(handle, reg, val_p)\
-	NXGE_REG_RD64(handle, MIF_ADDR((reg)), (val_p))
-
-
-/*
- * When MIF_REG_RD is called inside a poll loop and if the poll takes
- * very long time to complete, then each poll will print a rt_show_reg
- * result on the screen and the rtrace "register show" result may
- * become too messy to read.  The solution is to call MIF_REG_RD_NO_SHOW
- * instead of MIF_REG_RD in a polling loop. When COSIM or REG_SHOW is
- * not defined, this macro is the same as MIF_REG_RD.  When both COSIM
- * and REG_SHOW are defined, this macro calls NXGE_REG_RD64_NO_SHOW
- * which does not call rt_show_reg.
- */
-#if defined(COSIM) && defined(REG_SHOW)
-#define	MIF_REG_RD_NO_SHOW(handle, reg, val_p)\
-	NXGE_REG_RD64_NO_SHOW(handle, MIF_ADDR((reg)), (val_p))
-#else
-	/*	If not COSIM or REG_SHOW, still show */
-#define	MIF_REG_RD_NO_SHOW(handle, reg, val_p)\
-	NXGE_REG_RD64(handle, MIF_ADDR((reg)), (val_p))
-#endif
-
-#define	ESR_REG_WR(handle, reg, val)\
-	NXGE_REG_WR64(handle, ESR_ADDR((reg)), (val))
-
-#define	ESR_REG_RD(handle, reg, val_p)\
-	NXGE_REG_RD64(handle, ESR_ADDR((reg)), (val_p))
-
-/* Macros to read/modify MAC attributes */
-
-#define	SET_MAC_ATTR1(handle, p, portn, attr, val, stat) {\
-	p.type = attr;\
-	p.idata[0] = (uint32_t)val;\
-	stat = npi_mac_port_attr(handle, OP_SET, portn, (npi_attr_t *)&p);\
-}
-
-#define	SET_MAC_ATTR2(handle, p, portn, attr, val0, val1, stat) {\
-	p.type = attr;\
-	p.idata[0] = (uint32_t)val0;\
-	p.idata[1] = (uint32_t)val1;\
-	stat = npi_mac_port_attr(handle, OP_SET, portn, (npi_attr_t *)&p);\
-}
-
-#define	SET_MAC_ATTR3(handle, p, portn, attr, val0, val1, val2, stat) {\
-	p.type = attr;\
-	p.idata[0] = (uint32_t)val0;\
-	p.idata[1] = (uint32_t)val1;\
-	p.idata[2] = (uint32_t)val2;\
-	stat = npi_mac_port_attr(handle, OP_SET, portn, (npi_attr_t *)&p);\
-}
-
-#define	SET_MAC_ATTR4(handle, p, portn, attr, val0, val1, val2, val3, stat) {\
-	p.type = attr;\
-	p.idata[0] = (uint32_t)val0;\
-	p.idata[1] = (uint32_t)val1;\
-	p.idata[2] = (uint32_t)val2;\
-	p.idata[3] = (uint32_t)val3;\
-	stat = npi_mac_port_attr(handle, OP_SET, portn, (npi_attr_t *)&p);\
-}
-
-#define	GET_MAC_ATTR1(handle, p, portn, attr, val, stat) {\
-	p.type = attr;\
-	if ((stat = npi_mac_port_attr(handle, OP_GET, portn, \
-					(npi_attr_t *)&p)) == NPI_SUCCESS) {\
-		val = p.odata[0];\
-	}\
-}
-
-#define	GET_MAC_ATTR2(handle, p, portn, attr, val0, val1, stat) {\
-	p.type = attr;\
-	if ((stat = npi_mac_port_attr(handle, OP_GET, portn, \
-					(npi_attr_t *)&p)) == NPI_SUCCESS) {\
-		val0 = p.odata[0];\
-		val1 = p.odata[1];\
-	}\
-}
-
-#define	GET_MAC_ATTR3(handle, p, portn, attr, val0, val1, \
-			val2, stat) {\
-	p.type = attr;\
-	if ((stat = npi_mac_port_attr(handle, OP_GET, portn, \
-					(npi_attr_t *)&p)) == NPI_SUCCESS) {\
-		val0 = p.odata[0];\
-		val1 = p.odata[1];\
-		val2 = p.odata[2];\
-	}\
-}
-
-#define	GET_MAC_ATTR4(handle, p, portn, attr, val0, val1, \
-			val2, val3, stat) {\
-	p.type = attr;\
-	if ((stat = npi_mac_port_attr(handle, OP_GET, portn, \
-					(npi_attr_t *)&p)) == NPI_SUCCESS) {\
-		val0 = p.odata[0];\
-		val1 = p.odata[1];\
-		val2 = p.odata[2];\
-		val3 = p.odata[3];\
-	}\
-}
-
-/* MAC specific errors */
-
-#define	MAC_PORT_ATTR_INVALID		0x50
-#define	MAC_RESET_MODE_INVALID		0x51
-#define	MAC_HASHTAB_ENTRY_INVALID	0x52
-#define	MAC_HOSTINFO_ENTRY_INVALID	0x53
-#define	MAC_ALT_ADDR_ENTRY_INVALID	0x54
-
-/* MAC error return macros */
-
-#define	NPI_MAC_PORT_INVALID(portn)	((MAC_BLK_ID << NPI_BLOCK_ID_SHIFT) |\
-					PORT_INVALID | IS_PORT | (portn << 12))
-#define	NPI_MAC_OPCODE_INVALID(portn)	((MAC_BLK_ID << NPI_BLOCK_ID_SHIFT) |\
-					OPCODE_INVALID |\
-					IS_PORT | (portn << 12))
-#define	NPI_MAC_HASHTAB_ENTRY_INVALID(portn)\
-					((MAC_BLK_ID << NPI_BLOCK_ID_SHIFT) |\
-					MAC_HASHTAB_ENTRY_INVALID |\
-					IS_PORT | (portn << 12))
-#define	NPI_MAC_HOSTINFO_ENTRY_INVALID(portn)\
-					((MAC_BLK_ID << NPI_BLOCK_ID_SHIFT) |\
-					MAC_HOSTINFO_ENTRY_INVALID |\
-					IS_PORT | (portn << 12))
-#define	NPI_MAC_ALT_ADDR_ENTRY_INVALID(portn)\
-					((MAC_BLK_ID << NPI_BLOCK_ID_SHIFT) |\
-					MAC_ALT_ADDR_ENTRY_INVALID |\
-					IS_PORT | (portn << 12))
-#define	NPI_MAC_PORT_ATTR_INVALID(portn)\
-					((MAC_BLK_ID << NPI_BLOCK_ID_SHIFT) |\
-					MAC_PORT_ATTR_INVALID |\
-					IS_PORT | (portn << 12))
-#define	NPI_MAC_RESET_MODE_INVALID(portn)\
-					((MAC_BLK_ID << NPI_BLOCK_ID_SHIFT) |\
-					MAC_RESET_MODE_INVALID |\
-					IS_PORT | (portn << 12))
-#define	NPI_MAC_PCS_REG_INVALID(portn)	((MAC_BLK_ID << NPI_BLOCK_ID_SHIFT) |\
-					REGISTER_INVALID |\
-					IS_PORT | (portn << 12))
-#define	NPI_TXMAC_RESET_FAILED(portn)	((TXMAC_BLK_ID << NPI_BLOCK_ID_SHIFT) |\
-					RESET_FAILED | IS_PORT | (portn << 12))
-#define	NPI_RXMAC_RESET_FAILED(portn)	((RXMAC_BLK_ID << NPI_BLOCK_ID_SHIFT) |\
-					RESET_FAILED | IS_PORT | (portn << 12))
-#define	NPI_MAC_CONFIG_INVALID(portn)	((MAC_BLK_ID << NPI_BLOCK_ID_SHIFT) |\
-					CONFIG_INVALID |\
-					IS_PORT | (portn << 12))
-#define	NPI_MAC_REG_INVALID(portn)	((MAC_BLK_ID << NPI_BLOCK_ID_SHIFT) |\
-					REGISTER_INVALID |\
-					IS_PORT | (portn << 12))
-#define	NPI_MAC_MII_READ_FAILED(portn)	((MIF_BLK_ID << NPI_BLOCK_ID_SHIFT) |\
-					READ_FAILED | IS_PORT | (portn << 12))
-#define	NPI_MAC_MII_WRITE_FAILED(portn)	((MIF_BLK_ID << NPI_BLOCK_ID_SHIFT) |\
-					WRITE_FAILED | IS_PORT | (portn << 12))
-
-/* library functions prototypes */
-
-/* general mac functions */
-npi_status_t npi_mac_hashtab_entry(npi_handle_t, io_op_t,
-				uint8_t, uint8_t, uint16_t *);
-npi_status_t npi_mac_hostinfo_entry(npi_handle_t, io_op_t,
-				uint8_t, uint8_t,
-				hostinfo_t *);
-npi_status_t npi_mac_altaddr_enable(npi_handle_t, uint8_t,
-				uint8_t);
-npi_status_t npi_mac_altaddr_disble(npi_handle_t, uint8_t,
-				uint8_t);
-npi_status_t npi_mac_altaddr_entry(npi_handle_t, io_op_t,
-				uint8_t, uint8_t,
-				npi_mac_addr_t *);
-npi_status_t npi_mac_port_attr(npi_handle_t, io_op_t, uint8_t,
-				npi_attr_t *);
-npi_status_t npi_mac_get_link_status(npi_handle_t, uint8_t,
-				boolean_t *);
-npi_status_t npi_mac_get_10g_link_status(npi_handle_t, uint8_t,
-				boolean_t *);
-npi_status_t npi_mac_mif_mii_read(npi_handle_t, uint8_t,
-				uint8_t, uint16_t *);
-npi_status_t npi_mac_mif_mii_write(npi_handle_t, uint8_t,
-				uint8_t, uint16_t);
-npi_status_t npi_mac_mif_link_intr_enable(npi_handle_t, uint8_t,
-				uint8_t, uint16_t);
-npi_status_t npi_mac_mif_mdio_read(npi_handle_t, uint8_t,
-				uint8_t, uint16_t,
-				uint16_t *);
-npi_status_t npi_mac_mif_mdio_write(npi_handle_t, uint8_t,
-				uint8_t, uint16_t,
-				uint16_t);
-npi_status_t npi_mac_mif_mdio_link_intr_enable(npi_handle_t,
-				uint8_t, uint8_t,
-				uint16_t, uint16_t);
-npi_status_t npi_mac_mif_link_intr_disable(npi_handle_t, uint8_t);
-npi_status_t npi_mac_pcs_mii_read(npi_handle_t, uint8_t,
-				uint8_t, uint16_t *);
-npi_status_t npi_mac_pcs_mii_write(npi_handle_t, uint8_t,
-				uint8_t, uint16_t);
-npi_status_t npi_mac_pcs_link_intr_enable(npi_handle_t, uint8_t);
-npi_status_t npi_mac_pcs_link_intr_disable(npi_handle_t, uint8_t);
-npi_status_t npi_mac_pcs_reset(npi_handle_t, uint8_t);
-
-/* xmac functions */
-npi_status_t npi_xmac_reset(npi_handle_t, uint8_t,
-				npi_mac_reset_t);
-npi_status_t npi_xmac_xif_config(npi_handle_t, config_op_t,
-				uint8_t, xmac_xif_config_t);
-npi_status_t npi_xmac_tx_config(npi_handle_t, config_op_t,
-				uint8_t, xmac_tx_config_t);
-npi_status_t npi_xmac_rx_config(npi_handle_t, config_op_t,
-				uint8_t, xmac_rx_config_t);
-npi_status_t npi_xmac_tx_iconfig(npi_handle_t, config_op_t,
-				uint8_t, xmac_tx_iconfig_t);
-npi_status_t npi_xmac_rx_iconfig(npi_handle_t, config_op_t,
-				uint8_t, xmac_rx_iconfig_t);
-npi_status_t npi_xmac_ctl_iconfig(npi_handle_t, config_op_t,
-				uint8_t, xmac_ctl_iconfig_t);
-npi_status_t npi_xmac_tx_get_istatus(npi_handle_t, uint8_t,
-				xmac_tx_iconfig_t *);
-npi_status_t npi_xmac_rx_get_istatus(npi_handle_t, uint8_t,
-				xmac_rx_iconfig_t *);
-npi_status_t npi_xmac_ctl_get_istatus(npi_handle_t, uint8_t,
-				xmac_ctl_iconfig_t *);
-npi_status_t npi_xmac_xpcs_reset(npi_handle_t, uint8_t);
-npi_status_t npi_xmac_xpcs_enable(npi_handle_t, uint8_t);
-npi_status_t npi_xmac_xpcs_disable(npi_handle_t, uint8_t);
-npi_status_t npi_xmac_xpcs_read(npi_handle_t, uint8_t,
-				uint8_t, uint32_t *);
-npi_status_t npi_xmac_xpcs_write(npi_handle_t, uint8_t,
-				uint8_t, uint32_t);
-npi_status_t npi_xmac_xpcs_link_intr_enable(npi_handle_t, uint8_t);
-npi_status_t npi_xmac_xpcs_link_intr_disable(npi_handle_t,
-				uint8_t);
-npi_status_t npi_xmac_xif_led(npi_handle_t, uint8_t,
-				boolean_t);
-npi_status_t npi_xmac_zap_tx_counters(npi_handle_t, uint8_t);
-npi_status_t npi_xmac_zap_rx_counters(npi_handle_t, uint8_t);
-
-/* bmac functions */
-npi_status_t npi_bmac_reset(npi_handle_t, uint8_t,
-				npi_mac_reset_t mode);
-npi_status_t npi_bmac_tx_config(npi_handle_t, config_op_t,
-				uint8_t, bmac_tx_config_t);
-npi_status_t npi_bmac_rx_config(npi_handle_t, config_op_t,
-				uint8_t, bmac_rx_config_t);
-npi_status_t npi_bmac_rx_iconfig(npi_handle_t, config_op_t,
-				uint8_t, bmac_rx_iconfig_t);
-npi_status_t npi_bmac_xif_config(npi_handle_t, config_op_t,
-				uint8_t, bmac_xif_config_t);
-npi_status_t npi_bmac_tx_iconfig(npi_handle_t, config_op_t,
-				uint8_t, bmac_tx_iconfig_t);
-npi_status_t npi_bmac_ctl_iconfig(npi_handle_t, config_op_t,
-				uint8_t, bmac_ctl_iconfig_t);
-npi_status_t npi_bmac_tx_get_istatus(npi_handle_t, uint8_t,
-				bmac_tx_iconfig_t *);
-npi_status_t npi_bmac_rx_get_istatus(npi_handle_t, uint8_t,
-				bmac_rx_iconfig_t *);
-npi_status_t npi_bmac_ctl_get_istatus(npi_handle_t, uint8_t,
-				bmac_ctl_iconfig_t *);
-npi_status_t npi_bmac_send_pause(npi_handle_t, uint8_t,
-				uint16_t);
-npi_status_t npi_mac_dump_regs(npi_handle_t, uint8_t);
-
-/* MIF common functions */
-void npi_mac_mif_set_indirect_mode(npi_handle_t, boolean_t);
-
-#ifdef	__cplusplus
-}
-#endif
-
-#endif	/* _NPI_MAC_H */
--- a/usr/src/uts/sun4v/io/nxge/npi/npi_rxdma.c	Mon Mar 19 18:02:35 2007 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,2287 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License (the "License").
- * You may not use this file except in compliance with the License.
- *
- * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
- * or http://www.opensolaris.org/os/licensing.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information: Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- */
-/*
- * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
- * Use is subject to license terms.
- */
-
-#pragma ident	"%Z%%M%	%I%	%E% SMI"
-
-#include <npi_rxdma.h>
-#include <nxge_common.h>
-
-#define	 RXDMA_RESET_TRY_COUNT	4
-#define	 RXDMA_RESET_DELAY	5
-
-#define	 RXDMA_OP_DISABLE	0
-#define	 RXDMA_OP_ENABLE	1
-#define	 RXDMA_OP_RESET	2
-
-#define	 RCR_TIMEOUT_ENABLE	1
-#define	 RCR_TIMEOUT_DISABLE	2
-#define	 RCR_THRESHOLD	4
-
-/* assume weight is in byte frames unit */
-#define	WEIGHT_FACTOR 3/2
-
-uint64_t rdc_dmc_offset[] = {
-	RXDMA_CFIG1_REG, RXDMA_CFIG2_REG, RBR_CFIG_A_REG, RBR_CFIG_B_REG,
-	RBR_KICK_REG, RBR_STAT_REG, RBR_HDH_REG, RBR_HDL_REG,
-	RCRCFIG_A_REG, RCRCFIG_B_REG, RCRSTAT_A_REG, RCRSTAT_B_REG,
-	RCRSTAT_C_REG, RX_DMA_ENT_MSK_REG, RX_DMA_CTL_STAT_REG, RCR_FLSH_REG,
-	RXMISC_DISCARD_REG
-};
-
-const char *rdc_dmc_name[] = {
-	"RXDMA_CFIG1", "RXDMA_CFIG2", "RBR_CFIG_A", "RBR_CFIG_B",
-	"RBR_KICK", "RBR_STAT", "RBR_HDH", "RBR_HDL",
-	"RCRCFIG_A", "RCRCFIG_B", "RCRSTAT_A", "RCRSTAT_B",
-	"RCRSTAT_C", "RX_DMA_ENT_MSK", "RX_DMA_CTL_STAT", "RCR_FLSH",
-	"RXMISC_DISCARD"
-};
-
-uint64_t rdc_fzc_offset [] = {
-	RX_LOG_PAGE_VLD_REG, RX_LOG_PAGE_MASK1_REG, RX_LOG_PAGE_VAL1_REG,
-	RX_LOG_PAGE_MASK2_REG, RX_LOG_PAGE_VAL2_REG, RX_LOG_PAGE_RELO1_REG,
-	RX_LOG_PAGE_RELO2_REG, RX_LOG_PAGE_HDL_REG, RDC_RED_PARA_REG,
-	RED_DIS_CNT_REG
-};
-
-
-const char *rdc_fzc_name [] = {
-	"RX_LOG_PAGE_VLD", "RX_LOG_PAGE_MASK1", "RX_LOG_PAGE_VAL1",
-	"RX_LOG_PAGE_MASK2", "RX_LOG_PAGE_VAL2", "RX_LOG_PAGE_RELO1",
-	"RX_LOG_PAGE_RELO2", "RX_LOG_PAGE_HDL", "RDC_RED_PARA", "RED_DIS_CNT"
-};
-
-
-/*
- * Dump the MEM_ADD register first so all the data registers
- * will have valid data buffer pointers.
- */
-uint64_t rx_fzc_offset[] = {
-	RX_DMA_CK_DIV_REG, DEF_PT0_RDC_REG, DEF_PT1_RDC_REG, DEF_PT2_RDC_REG,
-	DEF_PT3_RDC_REG, RX_ADDR_MD_REG, PT_DRR_WT0_REG, PT_DRR_WT1_REG,
-	PT_DRR_WT2_REG, PT_DRR_WT3_REG, PT_USE0_REG, PT_USE1_REG,
-	PT_USE2_REG, PT_USE3_REG, RED_RAN_INIT_REG, RX_ADDR_MD_REG,
-	RDMC_PRE_PAR_ERR_REG, RDMC_SHA_PAR_ERR_REG,
-	RDMC_MEM_DATA4_REG, RDMC_MEM_DATA3_REG, RDMC_MEM_DATA2_REG,
-	RDMC_MEM_DATA1_REG, RDMC_MEM_DATA0_REG,
-	RDMC_MEM_ADDR_REG,
-	RX_CTL_DAT_FIFO_STAT_REG, RX_CTL_DAT_FIFO_MASK_REG,
-	RX_CTL_DAT_FIFO_STAT_DBG_REG,
-	RDMC_TRAINING_VECTOR_REG,
-};
-
-
-const char *rx_fzc_name[] = {
-	"RX_DMA_CK_DIV", "DEF_PT0_RDC", "DEF_PT1_RDC", "DEF_PT2_RDC",
-	"DEF_PT3_RDC", "RX_ADDR_MD", "PT_DRR_WT0", "PT_DRR_WT1",
-	"PT_DRR_WT2", "PT_DRR_WT3", "PT_USE0", "PT_USE1",
-	"PT_USE2", "PT_USE3", "RED_RAN_INIT", "RX_ADDR_MD",
-	"RDMC_PRE_PAR_ERR", "RDMC_SHA_PAR_ERR",
-	"RDMC_MEM_DATA4", "RDMC_MEM_DATA3", "RDMC_MEM_DATA2",
-	"RDMC_MEM_DATA1", "RDMC_MEM_DATA0",
-	"RDMC_MEM_ADDR",
-	"RX_CTL_DAT_FIFO_STAT", "RX_CTL_DAT_FIFO_MASK",
-	"RDMC_TRAINING_VECTOR_REG",
-	"RX_CTL_DAT_FIFO_STAT_DBG_REG"
-};
-
-
-npi_status_t
-npi_rxdma_cfg_rdc_ctl(npi_handle_t handle, uint8_t rdc, uint8_t op);
-npi_status_t
-npi_rxdma_cfg_rdc_rcr_ctl(npi_handle_t handle, uint8_t rdc, uint8_t op,
-				uint16_t param);
-
-
-/*
- * npi_rxdma_dump_rdc_regs
- * Dumps the contents of rdc csrs and fzc registers
- *
- * Input:
- *      handle:	opaque handle interpreted by the underlying OS
- *         rdc:      RX DMA number
- *
- * return:
- *     NPI_SUCCESS
- *     NPI_RXDMA_RDC_INVALID
- *
- */
-npi_status_t
-npi_rxdma_dump_rdc_regs(npi_handle_t handle, uint8_t rdc)
-{
-
-	uint64_t value, offset;
-	int num_regs, i;
-#ifdef NPI_DEBUG
-	extern uint64_t npi_debug_level;
-	uint64_t old_npi_debug_level = npi_debug_level;
-#endif
-	ASSERT(RXDMA_CHANNEL_VALID(rdc));
-	if (!RXDMA_CHANNEL_VALID(rdc)) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-			    "npi_rxdma_dump_rdc_regs"
-			    " Illegal RDC number %d \n",
-			    rdc));
-		return (NPI_RXDMA_RDC_INVALID);
-	}
-#ifdef NPI_DEBUG
-	npi_debug_level |= DUMP_ALWAYS;
-#endif
-	num_regs = sizeof (rdc_dmc_offset) / sizeof (uint64_t);
-	NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
-			    "\nDMC Register Dump for Channel %d\n",
-			    rdc));
-	for (i = 0; i < num_regs; i++) {
-		RXDMA_REG_READ64(handle, rdc_dmc_offset[i], rdc, &value);
-		offset = NXGE_RXDMA_OFFSET(rdc_dmc_offset[i], handle.is_vraddr,
-				rdc);
-		NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
-			"%08llx %s\t %08llx \n",
-			offset, rdc_dmc_name[i], value));
-	}
-
-	NPI_DEBUG_MSG((handle.function, DUMP_ALWAYS,
-			    "\nFZC_DMC Register Dump for Channel %d\n",
-			    rdc));
-	num_regs = sizeof (rdc_fzc_offset) / sizeof (uint64_t);
-
-	for (i = 0; i < num_regs; i++) {
-		offset = REG_FZC_RDC_OFFSET(rdc_fzc_offset[i], rdc);
-		NXGE_REG_RD64(handle, offset, &value);
-		NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
-				    "%8llx %s\t %8llx \n",
-				    rdc_fzc_offset[i], rdc_fzc_name[i],
-				    value));
-
-	}
-
-	NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
-			    "\n Register Dump for Channel %d done\n",
-			    rdc));
-#ifdef NPI_DEBUG
-	npi_debug_level = old_npi_debug_level;
-#endif
-	return (NPI_SUCCESS);
-}
-
-/*
- * npi_rxdma_dump_fzc_regs
- * Dumps the contents of rdc csrs and fzc registers
- *
- * Input:
- *      handle:	opaque handle interpreted by the underlying OS
- *
- * return:
- *     NPI_SUCCESS
- */
-npi_status_t
-npi_rxdma_dump_fzc_regs(npi_handle_t handle)
-{
-
-	uint64_t value;
-	int num_regs, i;
-
-
-	NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
-			    "\nFZC_DMC Common Register Dump\n"));
-	num_regs = sizeof (rx_fzc_offset) / sizeof (uint64_t);
-
-	for (i = 0; i < num_regs; i++) {
-		NXGE_REG_RD64(handle, rx_fzc_offset[i], &value);
-		NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
-			"0x%08llx %s\t 0x%08llx \n",
-			    rx_fzc_offset[i],
-			rx_fzc_name[i], value));
-	}
-	NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
-			    "\n FZC_DMC Register Dump Done \n"));
-
-	return (NPI_SUCCESS);
-}
-
-
-
-/*
- * per rdc config functions
- */
-npi_status_t
-npi_rxdma_cfg_logical_page_disable(npi_handle_t handle, uint8_t rdc,
-				    uint8_t page_num)
-{
-	log_page_vld_t page_vld;
-	uint64_t valid_offset;
-
-	ASSERT(RXDMA_CHANNEL_VALID(rdc));
-	if (!RXDMA_CHANNEL_VALID(rdc)) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				    "rxdma_cfg_logical_page_disable"
-				    " Illegal RDC number %d \n",
-				    rdc));
-		return (NPI_RXDMA_RDC_INVALID);
-	}
-
-	ASSERT(RXDMA_PAGE_VALID(page_num));
-	if (!RXDMA_PAGE_VALID(page_num)) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				    "rxdma_cfg_logical_page_disable"
-				    " Illegal page number %d \n",
-				    page_num));
-		return (NPI_RXDMA_PAGE_INVALID);
-	}
-
-	valid_offset = REG_FZC_RDC_OFFSET(RX_LOG_PAGE_VLD_REG, rdc);
-	NXGE_REG_RD64(handle, valid_offset, &page_vld.value);
-
-	if (page_num == 0)
-		page_vld.bits.ldw.page0 = 0;
-
-	if (page_num == 1)
-		page_vld.bits.ldw.page1 = 0;
-
-	NXGE_REG_WR64(handle, valid_offset, page_vld.value);
-	return (NPI_SUCCESS);
-
-}
-
-npi_status_t
-npi_rxdma_cfg_logical_page(npi_handle_t handle, uint8_t rdc,
-			    dma_log_page_t *pg_cfg)
-{
-	log_page_vld_t page_vld;
-	log_page_mask_t page_mask;
-	log_page_value_t page_value;
-	log_page_relo_t page_reloc;
-	uint64_t value_offset, reloc_offset, mask_offset;
-	uint64_t valid_offset;
-
-	ASSERT(RXDMA_CHANNEL_VALID(rdc));
-	if (!RXDMA_CHANNEL_VALID(rdc)) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				    " rxdma_cfg_logical_page"
-				    " Illegal RDC number %d \n",
-				    rdc));
-		return (NPI_RXDMA_RDC_INVALID);
-	}
-
-	ASSERT(RXDMA_PAGE_VALID(pg_cfg->page_num));
-	if (!RXDMA_PAGE_VALID(pg_cfg->page_num)) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				    " rxdma_cfg_logical_page"
-				    " Illegal page number %d \n",
-				    pg_cfg->page_num));
-		return (NPI_RXDMA_PAGE_INVALID);
-	}
-
-	valid_offset = REG_FZC_RDC_OFFSET(RX_LOG_PAGE_VLD_REG, rdc);
-	NXGE_REG_RD64(handle, valid_offset, &page_vld.value);
-
-	if (!pg_cfg->valid) {
-		if (pg_cfg->page_num == 0)
-			page_vld.bits.ldw.page0 = 0;
-
-		if (pg_cfg->page_num == 1)
-			page_vld.bits.ldw.page1 = 0;
-		NXGE_REG_WR64(handle, valid_offset, page_vld.value);
-		return (NPI_SUCCESS);
-	}
-
-	if (pg_cfg->page_num == 0) {
-		mask_offset = REG_FZC_RDC_OFFSET(RX_LOG_PAGE_MASK1_REG, rdc);
-		value_offset = REG_FZC_RDC_OFFSET(RX_LOG_PAGE_VAL1_REG, rdc);
-		reloc_offset = REG_FZC_RDC_OFFSET(RX_LOG_PAGE_RELO1_REG, rdc);
-		page_vld.bits.ldw.page0 = 1;
-	}
-
-	if (pg_cfg->page_num == 1) {
-		mask_offset = REG_FZC_RDC_OFFSET(RX_LOG_PAGE_MASK2_REG, rdc);
-		value_offset = REG_FZC_RDC_OFFSET(RX_LOG_PAGE_VAL2_REG, rdc);
-		reloc_offset = REG_FZC_RDC_OFFSET(RX_LOG_PAGE_RELO2_REG, rdc);
-		page_vld.bits.ldw.page1 = 1;
-	}
-
-
-	page_vld.bits.ldw.func = pg_cfg->func_num;
-
-	page_mask.value = 0;
-	page_value.value = 0;
-	page_reloc.value = 0;
-
-
-	page_mask.bits.ldw.mask = pg_cfg->mask >> LOG_PAGE_ADDR_SHIFT;
-	page_value.bits.ldw.value = pg_cfg->value >> LOG_PAGE_ADDR_SHIFT;
-	page_reloc.bits.ldw.relo = pg_cfg->reloc >> LOG_PAGE_ADDR_SHIFT;
-
-
-	NXGE_REG_WR64(handle, mask_offset, page_mask.value);
-	NXGE_REG_WR64(handle, value_offset, page_value.value);
-	NXGE_REG_WR64(handle, reloc_offset, page_reloc.value);
-
-
-/* enable the logical page */
-	NXGE_REG_WR64(handle, valid_offset, page_vld.value);
-	return (NPI_SUCCESS);
-}
-
-npi_status_t
-npi_rxdma_cfg_logical_page_handle(npi_handle_t handle, uint8_t rdc,
-				    uint64_t page_handle)
-{
-	uint64_t offset;
-	log_page_hdl_t page_hdl;
-
-	ASSERT(RXDMA_CHANNEL_VALID(rdc));
-	if (!RXDMA_CHANNEL_VALID(rdc)) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-		    "rxdma_cfg_logical_page_handle"
-		    " Illegal RDC number %d \n", rdc));
-		return (NPI_RXDMA_RDC_INVALID);
-	}
-
-
-	page_hdl.value = 0;
-
-	page_hdl.bits.ldw.handle = (uint32_t)page_handle;
-	offset = REG_FZC_RDC_OFFSET(RX_LOG_PAGE_HDL_REG, rdc);
-	NXGE_REG_WR64(handle, offset, page_hdl.value);
-
-	return (NPI_SUCCESS);
-}
-
-/*
- * RX DMA functions
- */
-npi_status_t
-npi_rxdma_cfg_rdc_ctl(npi_handle_t handle, uint8_t rdc, uint8_t op)
-{
-
-	rxdma_cfig1_t cfg;
-	uint32_t count = RXDMA_RESET_TRY_COUNT;
-	uint32_t delay_time = RXDMA_RESET_DELAY;
-	uint32_t error = NPI_RXDMA_ERROR_ENCODE(NPI_RXDMA_RESET_ERR, rdc);
-
-	ASSERT(RXDMA_CHANNEL_VALID(rdc));
-	if (!RXDMA_CHANNEL_VALID(rdc)) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				    "npi_rxdma_cfg_rdc_ctl"
-				    " Illegal RDC number %d \n", rdc));
-		return (NPI_RXDMA_RDC_INVALID);
-	}
-
-
-	switch (op) {
-		case RXDMA_OP_ENABLE:
-			RXDMA_REG_READ64(handle, RXDMA_CFIG1_REG, rdc,
-						&cfg.value);
-			cfg.bits.ldw.en = 1;
-			RXDMA_REG_WRITE64(handle, RXDMA_CFIG1_REG,
-					    rdc, cfg.value);
-
-			NXGE_DELAY(delay_time);
-			RXDMA_REG_READ64(handle, RXDMA_CFIG1_REG, rdc,
-						&cfg.value);
-			while ((count--) && (cfg.bits.ldw.qst == 0)) {
-				NXGE_DELAY(delay_time);
-				RXDMA_REG_READ64(handle, RXDMA_CFIG1_REG, rdc,
-						&cfg.value);
-			}
-
-			if (cfg.bits.ldw.qst == 0) {
-				NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				    " npi_rxdma_cfg_rdc_ctl"
-				    " RXDMA_OP_ENABLE Failed for RDC %d \n",
-				    rdc));
-				return (error);
-			}
-
-			break;
-		case RXDMA_OP_DISABLE:
-			RXDMA_REG_READ64(handle, RXDMA_CFIG1_REG, rdc,
-						&cfg.value);
-			cfg.bits.ldw.en = 0;
-			RXDMA_REG_WRITE64(handle, RXDMA_CFIG1_REG,
-					    rdc, cfg.value);
-
-			NXGE_DELAY(delay_time);
-			RXDMA_REG_READ64(handle, RXDMA_CFIG1_REG, rdc,
-						&cfg.value);
-			while ((count--) && (cfg.bits.ldw.qst == 0)) {
-				NXGE_DELAY(delay_time);
-				RXDMA_REG_READ64(handle, RXDMA_CFIG1_REG, rdc,
-						&cfg.value);
-			}
-			if (cfg.bits.ldw.qst == 0) {
-				NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				    " npi_rxdma_cfg_rdc_ctl"
-				    " RXDMA_OP_DISABLE Failed for RDC %d \n",
-				    rdc));
-				return (error);
-			}
-
-			break;
-		case RXDMA_OP_RESET:
-			cfg.value = 0;
-			cfg.bits.ldw.rst = 1;
-			RXDMA_REG_WRITE64(handle,
-					    RXDMA_CFIG1_REG,
-					    rdc, cfg.value);
-			NXGE_DELAY(delay_time);
-			RXDMA_REG_READ64(handle, RXDMA_CFIG1_REG, rdc,
-						&cfg.value);
-			while ((count--) && (cfg.bits.ldw.rst)) {
-				NXGE_DELAY(delay_time);
-				RXDMA_REG_READ64(handle, RXDMA_CFIG1_REG, rdc,
-						&cfg.value);
-			}
-			if (count == 0) {
-				NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-					    " npi_rxdma_cfg_rdc_ctl"
-					    " Reset Failed for RDC %d \n",
-					    rdc));
-				return (error);
-			}
-			break;
-		default:
-			return (NPI_RXDMA_SW_PARAM_ERROR);
-	}
-
-	return (NPI_SUCCESS);
-}
-
-npi_status_t
-npi_rxdma_cfg_rdc_enable(npi_handle_t handle, uint8_t rdc)
-{
-	return (npi_rxdma_cfg_rdc_ctl(handle, rdc, RXDMA_OP_ENABLE));
-}
-
-npi_status_t
-npi_rxdma_cfg_rdc_disable(npi_handle_t handle, uint8_t rdc)
-{
-	return (npi_rxdma_cfg_rdc_ctl(handle, rdc, RXDMA_OP_DISABLE));
-}
-
-npi_status_t
-npi_rxdma_cfg_rdc_reset(npi_handle_t handle, uint8_t rdc)
-{
-	return (npi_rxdma_cfg_rdc_ctl(handle, rdc, RXDMA_OP_RESET));
-}
-
-/*
- * npi_rxdma_cfg_defualt_port_rdc()
- * Set the default rdc for the port
- *
- * Inputs:
- *	handle:		register handle interpreted by the underlying OS
- *	portnm:		Physical Port Number
- *	rdc:	RX DMA Channel number
- *
- * Return:
- * NPI_SUCCESS
- * NPI_RXDMA_RDC_INVALID
- * NPI_RXDMA_PORT_INVALID
- *
- */
-npi_status_t npi_rxdma_cfg_default_port_rdc(npi_handle_t handle,
-				    uint8_t portnm, uint8_t rdc)
-{
-
-	uint64_t offset;
-	def_pt_rdc_t cfg;
-
-	ASSERT(RXDMA_CHANNEL_VALID(rdc));
-	if (!RXDMA_CHANNEL_VALID(rdc)) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				    "rxdma_cfg_default_port_rdc"
-				    " Illegal RDC number %d \n",
-				    rdc));
-		return (NPI_RXDMA_RDC_INVALID);
-	}
-
-	ASSERT(RXDMA_PORT_VALID(portnm));
-	if (!RXDMA_PORT_VALID(portnm)) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				    "rxdma_cfg_default_port_rdc"
-				    " Illegal Port number %d \n",
-				    portnm));
-		return (NPI_RXDMA_PORT_INVALID);
-	}
-
-	offset = DEF_PT_RDC_REG(portnm);
-	cfg.value = 0;
-	cfg.bits.ldw.rdc = rdc;
-	NXGE_REG_WR64(handle, offset, cfg.value);
-	return (NPI_SUCCESS);
-}
-
-npi_status_t
-npi_rxdma_cfg_rdc_rcr_ctl(npi_handle_t handle, uint8_t rdc,
-			    uint8_t op, uint16_t param)
-{
-	rcrcfig_b_t rcr_cfgb;
-
-	ASSERT(RXDMA_CHANNEL_VALID(rdc));
-	if (!RXDMA_CHANNEL_VALID(rdc)) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				    "rxdma_cfg_rdc_rcr_ctl"
-				    " Illegal RDC number %d \n",
-				    rdc));
-		return (NPI_RXDMA_RDC_INVALID);
-	}
-
-
-	RXDMA_REG_READ64(handle, RCRCFIG_B_REG, rdc, &rcr_cfgb.value);
-
-	switch (op) {
-		case RCR_TIMEOUT_ENABLE:
-			rcr_cfgb.bits.ldw.timeout = (uint8_t)param;
-			rcr_cfgb.bits.ldw.entout = 1;
-			break;
-
-		case RCR_THRESHOLD:
-			rcr_cfgb.bits.ldw.pthres = param;
-			break;
-
-		case RCR_TIMEOUT_DISABLE:
-			rcr_cfgb.bits.ldw.entout = 0;
-			break;
-
-		default:
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				    "rxdma_cfg_rdc_rcr_ctl"
-				    " Illegal opcode %x \n",
-				    op));
-		return (NPI_RXDMA_OPCODE_INVALID(rdc));
-	}
-
-	RXDMA_REG_WRITE64(handle, RCRCFIG_B_REG, rdc, rcr_cfgb.value);
-	return (NPI_SUCCESS);
-}
-
-npi_status_t
-npi_rxdma_cfg_rdc_rcr_timeout_disable(npi_handle_t handle, uint8_t rdc)
-{
-	return (npi_rxdma_cfg_rdc_rcr_ctl(handle, rdc,
-	    RCR_TIMEOUT_DISABLE, 0));
-}
-
-npi_status_t
-npi_rxdma_cfg_rdc_rcr_threshold(npi_handle_t handle, uint8_t rdc,
-				    uint16_t rcr_threshold)
-{
-	return (npi_rxdma_cfg_rdc_rcr_ctl(handle, rdc,
-	    RCR_THRESHOLD, rcr_threshold));
-
-}
-
-npi_status_t
-npi_rxdma_cfg_rdc_rcr_timeout(npi_handle_t handle, uint8_t rdc,
-			    uint8_t rcr_timeout)
-{
-	return (npi_rxdma_cfg_rdc_rcr_ctl(handle, rdc,
-	    RCR_TIMEOUT_ENABLE, rcr_timeout));
-
-}
-
-/*
- * npi_rxdma_cfg_rdc_ring()
- * Configure The RDC channel Rcv Buffer Ring
- */
-npi_status_t
-npi_rxdma_cfg_rdc_ring(npi_handle_t handle, uint8_t rdc,
-			    rdc_desc_cfg_t *rdc_desc_cfg)
-{
-	rbr_cfig_a_t cfga;
-	rbr_cfig_b_t cfgb;
-	rxdma_cfig1_t cfg1;
-	rxdma_cfig2_t cfg2;
-	rcrcfig_a_t rcr_cfga;
-	rcrcfig_b_t rcr_cfgb;
-
-	ASSERT(RXDMA_CHANNEL_VALID(rdc));
-	if (!RXDMA_CHANNEL_VALID(rdc)) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				    "rxdma_cfg_rdc_ring"
-				    " Illegal RDC number %d \n",
-				    rdc));
-		return (NPI_RXDMA_RDC_INVALID);
-	}
-
-
-	cfga.value = 0;
-	cfgb.value = 0;
-	cfg1.value = 0;
-	cfg2.value = 0;
-
-	if (rdc_desc_cfg->mbox_enable == 1) {
-		cfg1.bits.ldw.mbaddr_h =
-		    (rdc_desc_cfg->mbox_addr >> 32) & 0xfff;
-		cfg2.bits.ldw.mbaddr =
-		    ((rdc_desc_cfg->mbox_addr &
-			    RXDMA_CFIG2_MBADDR_L_MASK) >>
-			    RXDMA_CFIG2_MBADDR_L_SHIFT);
-
-
-		/*
-		 * Only after all the configurations are set, then
-		 * enable the RDC or else configuration fatal error
-		 * will be returned (especially if the Hypervisor
-		 * set up the logical pages with non-zero values.
-		 * This NPI function only sets up the configuration.
-		 */
-	}
-
-
-	if (rdc_desc_cfg->full_hdr == 1)
-		cfg2.bits.ldw.full_hdr = 1;
-
-	if (RXDMA_BUFF_OFFSET_VALID(rdc_desc_cfg->offset)) {
-		cfg2.bits.ldw.offset = rdc_desc_cfg->offset;
-	} else {
-		cfg2.bits.ldw.offset = SW_OFFSET_NO_OFFSET;
-	}
-
-		/* rbr config */
-
-	cfga.value = (rdc_desc_cfg->rbr_addr & (RBR_CFIG_A_STDADDR_MASK |
-					    RBR_CFIG_A_STDADDR_BASE_MASK));
-
-	if ((rdc_desc_cfg->rbr_len < RBR_DEFAULT_MIN_LEN) ||
-		    (rdc_desc_cfg->rbr_len > RBR_DEFAULT_MAX_LEN)) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				    "npi_rxdma_cfg_rdc_ring"
-				    " Illegal RBR Queue Length %d \n",
-				    rdc_desc_cfg->rbr_len));
-		return (NPI_RXDMA_ERROR_ENCODE(NPI_RXDMA_RBRSZIE_INVALID, rdc));
-	}
-
-
-	cfga.bits.hdw.len = rdc_desc_cfg->rbr_len;
-	NPI_DEBUG_MSG((handle.function, NPI_RDC_CTL,
-		"npi_rxdma_cfg_rdc_ring"
-		" CFGA 0x%llx hdw.len %d (RBR LEN %d)\n",
-		cfga.value, cfga.bits.hdw.len,
-		rdc_desc_cfg->rbr_len));
-
-	if (rdc_desc_cfg->page_size == SIZE_4KB)
-		cfgb.bits.ldw.bksize = RBR_BKSIZE_4K;
-	else if (rdc_desc_cfg->page_size == SIZE_8KB)
-		cfgb.bits.ldw.bksize = RBR_BKSIZE_8K;
-	else if (rdc_desc_cfg->page_size == SIZE_16KB)
-		cfgb.bits.ldw.bksize = RBR_BKSIZE_16K;
-	else if (rdc_desc_cfg->page_size == SIZE_32KB)
-		cfgb.bits.ldw.bksize = RBR_BKSIZE_32K;
-	else {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-			    "rxdma_cfg_rdc_ring"
-			    " blksize: Illegal buffer size %d \n",
-			    rdc_desc_cfg->page_size));
-		return (NPI_RXDMA_BUFSZIE_INVALID);
-	}
-
-	if (rdc_desc_cfg->valid0) {
-
-		if (rdc_desc_cfg->size0 == SIZE_256B)
-			cfgb.bits.ldw.bufsz0 = RBR_BUFSZ0_256B;
-		else if (rdc_desc_cfg->size0 == SIZE_512B)
-			cfgb.bits.ldw.bufsz0 = RBR_BUFSZ0_512B;
-		else if (rdc_desc_cfg->size0 == SIZE_1KB)
-			cfgb.bits.ldw.bufsz0 = RBR_BUFSZ0_1K;
-		else if (rdc_desc_cfg->size0 == SIZE_2KB)
-			cfgb.bits.ldw.bufsz0 = RBR_BUFSZ0_2K;
-		else {
-			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				    " rxdma_cfg_rdc_ring"
-				    " blksize0: Illegal buffer size %x \n",
-				    rdc_desc_cfg->size0));
-			return (NPI_RXDMA_BUFSZIE_INVALID);
-		}
-		cfgb.bits.ldw.vld0 = 1;
-	} else {
-		cfgb.bits.ldw.vld0 = 0;
-	}
-
-
-	if (rdc_desc_cfg->valid1) {
-		if (rdc_desc_cfg->size1 == SIZE_1KB)
-			cfgb.bits.ldw.bufsz1 = RBR_BUFSZ1_1K;
-		else if (rdc_desc_cfg->size1 == SIZE_2KB)
-			cfgb.bits.ldw.bufsz1 = RBR_BUFSZ1_2K;
-		else if (rdc_desc_cfg->size1 == SIZE_4KB)
-			cfgb.bits.ldw.bufsz1 = RBR_BUFSZ1_4K;
-		else if (rdc_desc_cfg->size1 == SIZE_8KB)
-			cfgb.bits.ldw.bufsz1 = RBR_BUFSZ1_8K;
-		else {
-			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				    " rxdma_cfg_rdc_ring"
-				    " blksize1: Illegal buffer size %x \n",
-				    rdc_desc_cfg->size1));
-			return (NPI_RXDMA_BUFSZIE_INVALID);
-		}
-		cfgb.bits.ldw.vld1 = 1;
-	} else {
-		cfgb.bits.ldw.vld1 = 0;
-	}
-
-
-	if (rdc_desc_cfg->valid2) {
-		if (rdc_desc_cfg->size2 == SIZE_2KB)
-			cfgb.bits.ldw.bufsz2 = RBR_BUFSZ2_2K;
-		else if (rdc_desc_cfg->size2 == SIZE_4KB)
-			cfgb.bits.ldw.bufsz2 = RBR_BUFSZ2_4K;
-		else if (rdc_desc_cfg->size2 == SIZE_8KB)
-			cfgb.bits.ldw.bufsz2 = RBR_BUFSZ2_8K;
-		else if (rdc_desc_cfg->size2 == SIZE_16KB)
-			cfgb.bits.ldw.bufsz2 = RBR_BUFSZ2_16K;
-		else {
-			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				    " rxdma_cfg_rdc_ring"
-				    " blksize2: Illegal buffer size %x \n",
-				    rdc_desc_cfg->size2));
-			return (NPI_RXDMA_BUFSZIE_INVALID);
-		}
-		cfgb.bits.ldw.vld2 = 1;
-	} else {
-		cfgb.bits.ldw.vld2 = 0;
-	}
-
-
-	rcr_cfga.value = (rdc_desc_cfg->rcr_addr &
-			    (RCRCFIG_A_STADDR_MASK |
-			    RCRCFIG_A_STADDR_BASE_MASK));
-
-
-	if ((rdc_desc_cfg->rcr_len < RCR_DEFAULT_MIN_LEN) ||
-		    (rdc_desc_cfg->rcr_len > NXGE_RCR_MAX)) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-			    " rxdma_cfg_rdc_ring"
-			    " Illegal RCR Queue Length %d \n",
-			    rdc_desc_cfg->rcr_len));
-		return (NPI_RXDMA_ERROR_ENCODE(NPI_RXDMA_RCRSZIE_INVALID, rdc));
-	}
-
-	rcr_cfga.bits.hdw.len = rdc_desc_cfg->rcr_len;
-
-
-	rcr_cfgb.value = 0;
-	if (rdc_desc_cfg->rcr_timeout_enable == 1) {
-		/* check if the rcr timeout value is valid */
-
-		if (RXDMA_RCR_TO_VALID(rdc_desc_cfg->rcr_timeout)) {
-			rcr_cfgb.bits.ldw.timeout = rdc_desc_cfg->rcr_timeout;
-			rcr_cfgb.bits.ldw.entout = 1;
-		} else {
-			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				    " rxdma_cfg_rdc_ring"
-				    " Illegal RCR Timeout value %d \n",
-				    rdc_desc_cfg->rcr_timeout));
-			rcr_cfgb.bits.ldw.entout = 0;
-		}
-	} else {
-		rcr_cfgb.bits.ldw.entout = 0;
-	}
-
-		/* check if the rcr threshold value is valid */
-	if (RXDMA_RCR_THRESH_VALID(rdc_desc_cfg->rcr_threshold)) {
-		rcr_cfgb.bits.ldw.pthres = rdc_desc_cfg->rcr_threshold;
-	} else {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-			    " rxdma_cfg_rdc_ring"
-			    " Illegal RCR Threshold value %d \n",
-			    rdc_desc_cfg->rcr_threshold));
-		rcr_cfgb.bits.ldw.pthres = 1;
-	}
-
-		/* now do the actual HW configuration */
-	RXDMA_REG_WRITE64(handle, RXDMA_CFIG1_REG, rdc, cfg1.value);
-	RXDMA_REG_WRITE64(handle, RXDMA_CFIG2_REG, rdc, cfg2.value);
-
-
-	RXDMA_REG_WRITE64(handle, RBR_CFIG_A_REG, rdc, cfga.value);
-	RXDMA_REG_WRITE64(handle, RBR_CFIG_B_REG, rdc, cfgb.value);
-
-	RXDMA_REG_WRITE64(handle, RCRCFIG_A_REG, rdc, rcr_cfga.value);
-	RXDMA_REG_WRITE64(handle, RCRCFIG_B_REG, rdc, rcr_cfgb.value);
-
-	return (NPI_SUCCESS);
-
-}
-
-/*
- * npi_rxdma_red_discard_stat_get
- * Gets the current discrad count due RED
- * The counter overflow bit is cleared, if it has been set.
- *
- * Inputs:
- *      handle:	opaque handle interpreted by the underlying OS
- *	rdc:		RX DMA Channel number
- *	cnt:	Ptr to structure to write current RDC discard stat
- *
- * Return:
- * NPI_SUCCESS
- * NPI_RXDMA_RDC_INVALID
- *
- */
-npi_status_t
-npi_rxdma_red_discard_stat_get(npi_handle_t handle, uint8_t rdc,
-				    rx_disc_cnt_t *cnt)
-{
-	uint64_t offset;
-
-	ASSERT(RXDMA_CHANNEL_VALID(rdc));
-	if (!RXDMA_CHANNEL_VALID(rdc)) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				    " npi_rxdma_red_discard_stat_get"
-				    " Illegal RDC Number %d \n",
-				    rdc));
-		return (NPI_RXDMA_RDC_INVALID);
-	}
-
-	offset = RDC_RED_RDC_DISC_REG(rdc);
-	NXGE_REG_RD64(handle, offset, &cnt->value);
-	if (cnt->bits.ldw.oflow) {
-		NPI_DEBUG_MSG((handle.function, NPI_ERR_CTL,
-			    " npi_rxdma_red_discard_stat_get"
-			    " Counter overflow for channel %d ",
-			    " ..... clearing \n",
-			    rdc));
-		cnt->bits.ldw.oflow = 0;
-		NXGE_REG_WR64(handle, offset, cnt->value);
-		cnt->bits.ldw.oflow = 1;
-	}
-
-	return (NPI_SUCCESS);
-}
-
-/*
- * npi_rxdma_red_discard_oflow_clear
- * Clear RED discard counter overflow bit
- *
- * Inputs:
- *      handle:	opaque handle interpreted by the underlying OS
- *	rdc:		RX DMA Channel number
- *
- * Return:
- * NPI_SUCCESS
- * NPI_RXDMA_RDC_INVALID
- *
- */
-npi_status_t
-npi_rxdma_red_discard_oflow_clear(npi_handle_t handle, uint8_t rdc)
-
-{
-	uint64_t offset;
-	rx_disc_cnt_t cnt;
-
-	ASSERT(RXDMA_CHANNEL_VALID(rdc));
-	if (!RXDMA_CHANNEL_VALID(rdc)) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-			    " npi_rxdma_red_discard_oflow_clear"
-			    " Illegal RDC Number %d \n",
-			    rdc));
-		return (NPI_RXDMA_RDC_INVALID);
-	}
-
-	offset = RDC_RED_RDC_DISC_REG(rdc);
-	NXGE_REG_RD64(handle, offset, &cnt.value);
-	if (cnt.bits.ldw.oflow) {
-		NPI_DEBUG_MSG((handle.function, NPI_RDC_CTL,
-			    " npi_rxdma_red_discard_oflow_clear"
-			    " Counter overflow for channel %d ",
-			    " ..... clearing \n",
-			    rdc));
-		cnt.bits.ldw.oflow = 0;
-		NXGE_REG_WR64(handle, offset, cnt.value);
-	}
-	return (NPI_SUCCESS);
-}
-
-/*
- * npi_rxdma_misc_discard_stat_get
- * Gets the current discrad count for the rdc due to
- * buffer pool empty
- * The counter overflow bit is cleared, if it has been set.
- *
- * Inputs:
- *      handle:	opaque handle interpreted by the underlying OS
- *	rdc:		RX DMA Channel number
- *	cnt:	Ptr to structure to write current RDC discard stat
- *
- * Return:
- * NPI_SUCCESS
- * NPI_RXDMA_RDC_INVALID
- *
- */
-npi_status_t
-npi_rxdma_misc_discard_stat_get(npi_handle_t handle, uint8_t rdc,
-				    rx_disc_cnt_t *cnt)
-{
-	ASSERT(RXDMA_CHANNEL_VALID(rdc));
-	if (!RXDMA_CHANNEL_VALID(rdc)) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				    " npi_rxdma_misc_discard_stat_get"
-				    " Illegal RDC Number %d \n",
-				    rdc));
-		return (NPI_RXDMA_RDC_INVALID);
-	}
-
-	RXDMA_REG_READ64(handle, RXMISC_DISCARD_REG, rdc, &cnt->value);
-	if (cnt->bits.ldw.oflow) {
-		NPI_DEBUG_MSG((handle.function, NPI_RDC_CTL,
-			    " npi_rxdma_misc_discard_stat_get"
-			    " Counter overflow for channel %d ",
-			    " ..... clearing \n",
-			    rdc));
-		cnt->bits.ldw.oflow = 0;
-		RXDMA_REG_WRITE64(handle, RXMISC_DISCARD_REG, rdc, cnt->value);
-		cnt->bits.ldw.oflow = 1;
-	}
-
-	return (NPI_SUCCESS);
-}
-
-/*
- * npi_rxdma_red_discard_oflow_clear
- * Clear RED discard counter overflow bit
- * clear the overflow bit for  buffer pool empty discrad counter
- * for the rdc
- *
- * Inputs:
- *      handle:	opaque handle interpreted by the underlying OS
- *	rdc:		RX DMA Channel number
- *
- * Return:
- * NPI_SUCCESS
- * NPI_RXDMA_RDC_INVALID
- *
- */
-npi_status_t
-npi_rxdma_misc_discard_oflow_clear(npi_handle_t handle, uint8_t rdc)
-{
-	rx_disc_cnt_t cnt;
-
-	ASSERT(RXDMA_CHANNEL_VALID(rdc));
-	if (!RXDMA_CHANNEL_VALID(rdc)) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-			    " npi_rxdma_misc_discard_oflow_clear"
-			    " Illegal RDC Number %d \n",
-			    rdc));
-		return (NPI_RXDMA_RDC_INVALID);
-	}
-
-	RXDMA_REG_READ64(handle, RXMISC_DISCARD_REG, rdc, &cnt.value);
-	if (cnt.bits.ldw.oflow) {
-		NPI_DEBUG_MSG((handle.function, NPI_RDC_CTL,
-			    " npi_rxdma_misc_discard_oflow_clear"
-			    " Counter overflow for channel %d ",
-			    " ..... clearing \n",
-			    rdc));
-		cnt.bits.ldw.oflow = 0;
-		RXDMA_REG_WRITE64(handle, RXMISC_DISCARD_REG, rdc, cnt.value);
-	}
-
-	return (NPI_SUCCESS);
-}
-
-/*
- * npi_rxdma_ring_perr_stat_get
- * Gets the current RDC Memory parity error
- * The counter overflow bit is cleared, if it has been set.
- *
- * Inputs:
- * handle:	opaque handle interpreted by the underlying OS
- * pre_log:	Structure to write current RDC Prefetch memory
- *		Parity Error stat
- * sha_log:	Structure to write current RDC Shadow memory
- *		Parity Error stat
- *
- * Return:
- * NPI_SUCCESS
- *
- */
-npi_status_t
-npi_rxdma_ring_perr_stat_get(npi_handle_t handle,
-			    rdmc_par_err_log_t *pre_log,
-			    rdmc_par_err_log_t *sha_log)
-{
-	uint64_t pre_offset, sha_offset;
-	rdmc_par_err_log_t clr;
-	int clr_bits = 0;
-
-	pre_offset = RDMC_PRE_PAR_ERR_REG;
-	sha_offset = RDMC_SHA_PAR_ERR_REG;
-	NXGE_REG_RD64(handle, pre_offset, &pre_log->value);
-	NXGE_REG_RD64(handle, sha_offset, &sha_log->value);
-
-	clr.value = pre_log->value;
-	if (pre_log->bits.ldw.err) {
-		NPI_DEBUG_MSG((handle.function, NPI_RDC_CTL,
-			    " npi_rxdma_ring_perr_stat_get"
-			    " PRE ERR Bit set ..... clearing \n"));
-		clr.bits.ldw.err = 0;
-		clr_bits++;
-	}
-
-	if (pre_log->bits.ldw.merr) {
-		NPI_DEBUG_MSG((handle.function, NPI_RDC_CTL,
-			    " npi_rxdma_ring_perr_stat_get"
-			    " PRE MERR Bit set ..... clearing \n"));
-		clr.bits.ldw.merr = 0;
-		clr_bits++;
-	}
-
-	if (clr_bits) {
-		NXGE_REG_WR64(handle, pre_offset, clr.value);
-	}
-
-	clr_bits = 0;
-	clr.value = sha_log->value;
-	if (sha_log->bits.ldw.err) {
-		NPI_DEBUG_MSG((handle.function, NPI_RDC_CTL,
-			    " npi_rxdma_ring_perr_stat_get"
-			    " SHA ERR Bit set ..... clearing \n"));
-		clr.bits.ldw.err = 0;
-		clr_bits++;
-	}
-
-	if (sha_log->bits.ldw.merr) {
-		NPI_DEBUG_MSG((handle.function, NPI_RDC_CTL,
-			    " npi_rxdma_ring_perr_stat_get"
-			    " SHA MERR Bit set ..... clearing \n"));
-		clr.bits.ldw.merr = 0;
-		clr_bits++;
-	}
-
-	if (clr_bits) {
-		NXGE_REG_WR64(handle, sha_offset, clr.value);
-	}
-
-	return (NPI_SUCCESS);
-}
-
-/*
- * npi_rxdma_ring_perr_stat_clear
- * Clear RDC Memory Parity Error counter overflow bits
- *
- * Inputs:
- *      handle:	opaque handle interpreted by the underlying OS
- * Return:
- * NPI_SUCCESS
- *
- */
-npi_status_t
-npi_rxdma_ring_perr_stat_clear(npi_handle_t handle)
-{
-	uint64_t pre_offset, sha_offset;
-	rdmc_par_err_log_t clr;
-	int clr_bits = 0;
-	pre_offset = RDMC_PRE_PAR_ERR_REG;
-	sha_offset = RDMC_SHA_PAR_ERR_REG;
-
-	NXGE_REG_RD64(handle, pre_offset, &clr.value);
-
-	if (clr.bits.ldw.err) {
-		NPI_DEBUG_MSG((handle.function, NPI_RDC_CTL,
-			    " npi_rxdma_ring_perr_stat_get"
-			    " PRE ERR Bit set ..... clearing \n"));
-		clr.bits.ldw.err = 0;
-		clr_bits++;
-	}
-
-	if (clr.bits.ldw.merr) {
-		NPI_DEBUG_MSG((handle.function, NPI_RDC_CTL,
-			    " npi_rxdma_ring_perr_stat_get"
-			    " PRE MERR Bit set ..... clearing \n"));
-		clr.bits.ldw.merr = 0;
-		clr_bits++;
-	}
-
-	if (clr_bits) {
-		NXGE_REG_WR64(handle, pre_offset, clr.value);
-	}
-
-	clr_bits = 0;
-	NXGE_REG_RD64(handle, sha_offset, &clr.value);
-	if (clr.bits.ldw.err) {
-		NPI_DEBUG_MSG((handle.function, NPI_RDC_CTL,
-			    " npi_rxdma_ring_perr_stat_get"
-			    " SHA ERR Bit set ..... clearing \n"));
-		clr.bits.ldw.err = 0;
-		clr_bits++;
-	}
-
-	if (clr.bits.ldw.merr) {
-		NPI_DEBUG_MSG((handle.function, NPI_RDC_CTL,
-			    " npi_rxdma_ring_perr_stat_get"
-			    " SHA MERR Bit set ..... clearing \n"));
-		clr.bits.ldw.merr = 0;
-		clr_bits++;
-	}
-
-	if (clr_bits) {
-		NXGE_REG_WR64(handle, sha_offset, clr.value);
-	}
-
-	return (NPI_SUCCESS);
-}
-
-/*
- * Access the RDMC Memory: used for debugging
- */
-npi_status_t
-npi_rxdma_rdmc_memory_io(npi_handle_t handle,
-			    rdmc_mem_access_t *data, uint8_t op)
-{
-	uint64_t d0_offset, d1_offset, d2_offset, d3_offset, d4_offset;
-	uint64_t addr_offset;
-	rdmc_mem_addr_t addr;
-	rdmc_mem_data_t d0, d1, d2, d3, d4;
-	d0.value = 0;
-	d1.value = 0;
-	d2.value = 0;
-	d3.value = 0;
-	d4.value = 0;
-	addr.value = 0;
-
-
-	if ((data->location != RDMC_MEM_ADDR_PREFETCH) &&
-		    (data->location != RDMC_MEM_ADDR_SHADOW)) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-			    " npi_rxdma_rdmc_memory_io"
-			    " Illegal memory Type %x \n",
-			    data->location));
-		return (NPI_RXDMA_OPCODE_INVALID(0));
-	}
-
-	addr_offset = RDMC_MEM_ADDR_REG;
-	addr.bits.ldw.addr = data->addr;
-	addr.bits.ldw.pre_shad = data->location;
-
-	d0_offset = RDMC_MEM_DATA0_REG;
-	d1_offset = RDMC_MEM_DATA1_REG;
-	d2_offset = RDMC_MEM_DATA2_REG;
-	d3_offset = RDMC_MEM_DATA3_REG;
-	d4_offset = RDMC_MEM_DATA4_REG;
-
-
-	if (op == RDMC_MEM_WRITE) {
-		d0.bits.ldw.data = data->data[0];
-		d1.bits.ldw.data = data->data[1];
-		d2.bits.ldw.data = data->data[2];
-		d3.bits.ldw.data = data->data[3];
-		d4.bits.ldw.data = data->data[4];
-		NXGE_REG_WR64(handle, addr_offset, addr.value);
-		NXGE_REG_WR64(handle, d0_offset, d0.value);
-		NXGE_REG_WR64(handle, d1_offset, d1.value);
-		NXGE_REG_WR64(handle, d2_offset, d2.value);
-		NXGE_REG_WR64(handle, d3_offset, d3.value);
-		NXGE_REG_WR64(handle, d4_offset, d4.value);
-	}
-
-	if (op == RDMC_MEM_READ) {
-		NXGE_REG_WR64(handle, addr_offset, addr.value);
-		NXGE_REG_RD64(handle, d4_offset, &d4.value);
-		NXGE_REG_RD64(handle, d3_offset, &d3.value);
-		NXGE_REG_RD64(handle, d2_offset, &d2.value);
-		NXGE_REG_RD64(handle, d1_offset, &d1.value);
-		NXGE_REG_RD64(handle, d0_offset, &d0.value);
-
-		data->data[0] = d0.bits.ldw.data;
-		data->data[1] = d1.bits.ldw.data;
-		data->data[2] = d2.bits.ldw.data;
-		data->data[3] = d3.bits.ldw.data;
-		data->data[4] = d4.bits.ldw.data;
-	} else {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-			    " npi_rxdma_rdmc_memory_io"
-			    " Illegal opcode %x \n",
-			    op));
-		return (NPI_RXDMA_OPCODE_INVALID(0));
-
-	}
-
-	return (NPI_SUCCESS);
-}
-
-/*
- * system wide conf functions
- */
-npi_status_t
-npi_rxdma_cfg_clock_div_set(npi_handle_t handle, uint16_t count)
-{
-	uint64_t offset;
-	rx_dma_ck_div_t clk_div;
-
-	offset = RX_DMA_CK_DIV_REG;
-
-	clk_div.value = 0;
-	clk_div.bits.ldw.cnt = count;
-	NPI_DEBUG_MSG((handle.function, NPI_RDC_CTL,
-		    " npi_rxdma_cfg_clock_div_set: add 0x%llx "
-		    "handle 0x%llx value 0x%llx",
-		    handle.regp, handle.regh, clk_div.value));
-
-	NXGE_REG_WR64(handle, offset, clk_div.value);
-
-	return (NPI_SUCCESS);
-}
-
-npi_status_t
-npi_rxdma_cfg_red_rand_init(npi_handle_t handle, uint16_t init_value)
-{
-	uint64_t offset;
-	red_ran_init_t rand_reg;
-
-	offset = RED_RAN_INIT_REG;
-
-	rand_reg.value = 0;
-	rand_reg.bits.ldw.init = init_value;
-	rand_reg.bits.ldw.enable = 1;
-	NXGE_REG_WR64(handle, offset, rand_reg.value);
-
-	return (NPI_SUCCESS);
-
-}
-
-npi_status_t
-npi_rxdma_cfg_red_rand_disable(npi_handle_t handle)
-{
-	uint64_t offset;
-	red_ran_init_t rand_reg;
-
-	offset = RED_RAN_INIT_REG;
-
-	NXGE_REG_RD64(handle, offset, &rand_reg.value);
-	rand_reg.bits.ldw.enable = 0;
-	NXGE_REG_WR64(handle, offset, rand_reg.value);
-
-	return (NPI_SUCCESS);
-
-}
-
-npi_status_t
-npi_rxdma_cfg_32bitmode_enable(npi_handle_t handle)
-{
-	uint64_t offset;
-	rx_addr_md_t md_reg;
-	offset = RX_ADDR_MD_REG;
-	md_reg.value = 0;
-	md_reg.bits.ldw.mode32 = 1;
-
-	NXGE_REG_WR64(handle, offset, md_reg.value);
-	return (NPI_SUCCESS);
-
-}
-
-npi_status_t
-npi_rxdma_cfg_32bitmode_disable(npi_handle_t handle)
-{
-	uint64_t offset;
-	rx_addr_md_t md_reg;
-	offset = RX_ADDR_MD_REG;
-	md_reg.value = 0;
-
-	NXGE_REG_WR64(handle, offset, md_reg.value);
-	return (NPI_SUCCESS);
-
-}
-
-npi_status_t
-npi_rxdma_cfg_ram_access_enable(npi_handle_t handle)
-{
-	uint64_t offset;
-	rx_addr_md_t md_reg;
-	offset = RX_ADDR_MD_REG;
-	NXGE_REG_RD64(handle, offset, &md_reg.value);
-	md_reg.bits.ldw.ram_acc = 1;
-	NXGE_REG_WR64(handle, offset, md_reg.value);
-	return (NPI_SUCCESS);
-
-}
-
-npi_status_t
-npi_rxdma_cfg_ram_access_disable(npi_handle_t handle)
-{
-	uint64_t offset;
-	rx_addr_md_t md_reg;
-	offset = RX_ADDR_MD_REG;
-	NXGE_REG_RD64(handle, offset, &md_reg.value);
-	md_reg.bits.ldw.ram_acc = 0;
-	NXGE_REG_WR64(handle, offset, md_reg.value);
-	return (NPI_SUCCESS);
-
-}
-
-npi_status_t
-npi_rxdma_cfg_port_ddr_weight(npi_handle_t handle,
-				    uint8_t portnm, uint32_t weight)
-{
-
-	pt_drr_wt_t wt_reg;
-	uint64_t offset;
-
-	ASSERT(RXDMA_PORT_VALID(portnm));
-	if (!RXDMA_PORT_VALID(portnm)) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-			    " rxdma_cfg_port_ddr_weight"
-			    " Illegal Port Number %d \n",
-			    portnm));
-		return (NPI_RXDMA_PORT_INVALID);
-	}
-
-	offset = PT_DRR_WT_REG(portnm);
-	wt_reg.value = 0;
-	wt_reg.bits.ldw.wt = weight;
-	NXGE_REG_WR64(handle, offset, wt_reg.value);
-	return (NPI_SUCCESS);
-}
-
-npi_status_t
-npi_rxdma_port_usage_get(npi_handle_t handle,
-				    uint8_t portnm, uint32_t *blocks)
-{
-
-	pt_use_t use_reg;
-	uint64_t offset;
-
-	ASSERT(RXDMA_PORT_VALID(portnm));
-	if (!RXDMA_PORT_VALID(portnm)) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-			    " rxdma_port_usage_get"
-			    " Illegal Port Number %d \n",
-			    portnm));
-		return (NPI_RXDMA_PORT_INVALID);
-	}
-
-	offset = PT_USE_REG(portnm);
-	NXGE_REG_RD64(handle, offset, &use_reg.value);
-	*blocks = use_reg.bits.ldw.cnt;
-	return (NPI_SUCCESS);
-
-}
-
-npi_status_t
-npi_rxdma_cfg_wred_param(npi_handle_t handle, uint8_t rdc,
-				    rdc_red_para_t *wred_params)
-{
-	rdc_red_para_t wred_reg;
-	uint64_t offset;
-
-	ASSERT(RXDMA_CHANNEL_VALID(rdc));
-	if (!RXDMA_CHANNEL_VALID(rdc)) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-			    " rxdma_cfg_wred_param"
-			    " Illegal RDC Number %d \n",
-			    rdc));
-		return (NPI_RXDMA_RDC_INVALID);
-	}
-
-	/*
-	 * need to update RDC_RED_PARA_REG as well as bit defs in
-	 * the hw header file
-	 */
-	offset = RDC_RED_RDC_PARA_REG(rdc);
-
-	NPI_DEBUG_MSG((handle.function, NPI_RDC_CTL,
-		" npi_rxdma_cfg_wred_param: "
-		"set RED_PARA: passed value 0x%llx "
-		"win 0x%x thre 0x%x sync 0x%x thre_sync 0x%x",
-		wred_params->value,
-		wred_params->bits.ldw.win,
-		wred_params->bits.ldw.thre,
-		wred_params->bits.ldw.win_syn,
-		wred_params->bits.ldw.thre_sync));
-
-	wred_reg.value = 0;
-	wred_reg.bits.ldw.win = wred_params->bits.ldw.win;
-	wred_reg.bits.ldw.thre = wred_params->bits.ldw.thre;
-	wred_reg.bits.ldw.win_syn = wred_params->bits.ldw.win_syn;
-	wred_reg.bits.ldw.thre_sync = wred_params->bits.ldw.thre_sync;
-	NXGE_REG_WR64(handle, offset, wred_reg.value);
-
-	NPI_DEBUG_MSG((handle.function, NPI_RDC_CTL,
-		"set RED_PARA: value 0x%llx "
-		"win 0x%x thre 0x%x sync 0x%x thre_sync 0x%x",
-		wred_reg.value,
-		wred_reg.bits.ldw.win,
-		wred_reg.bits.ldw.thre,
-		wred_reg.bits.ldw.win_syn,
-		wred_reg.bits.ldw.thre_sync));
-
-	return (NPI_SUCCESS);
-}
-
-/*
- * npi_rxdma_cfg_rdc_table()
- * Configure/populate the RDC table
- *
- * Inputs:
- *	handle:		register handle interpreted by the underlying OS
- *	table:		RDC Group Number
- *	rdc[]:	 Array of RX DMA Channels
- *
- * Return:
- * NPI_SUCCESS
- * NPI_RXDMA_TABLE_INVALID
- *
- */
-npi_status_t
-npi_rxdma_cfg_rdc_table(npi_handle_t handle,
-			    uint8_t table, uint8_t rdc[])
-{
-	uint64_t offset;
-	int tbl_offset;
-	rdc_tbl_t tbl_reg;
-	tbl_reg.value = 0;
-
-	ASSERT(RXDMA_TABLE_VALID(table));
-	if (!RXDMA_TABLE_VALID(table)) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-			    " npi_rxdma_cfg_rdc_table"
-			    " Illegal RDC Rable Number %d \n",
-			    rdc));
-		return (NPI_RXDMA_TABLE_INVALID);
-	}
-
-	offset = REG_RDC_TABLE_OFFSET(table);
-	for (tbl_offset = 0; tbl_offset < NXGE_MAX_RDCS; tbl_offset++) {
-		tbl_reg.bits.ldw.rdc = rdc[tbl_offset];
-		NXGE_REG_WR64(handle, offset, tbl_reg.value);
-		offset += 8;
-	}
-
-	return (NPI_SUCCESS);
-
-}
-
-npi_status_t
-npi_rxdma_cfg_rdc_table_default_rdc(npi_handle_t handle,
-			    uint8_t table, uint8_t rdc)
-{
-	uint64_t offset;
-	rdc_tbl_t tbl_reg;
-	tbl_reg.value = 0;
-
-	ASSERT(RXDMA_TABLE_VALID(table));
-	if (!RXDMA_TABLE_VALID(table)) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-			    " npi_rxdma_cfg_rdc_table"
-			    " Illegal RDC table Number %d \n",
-			    rdc));
-		return (NPI_RXDMA_TABLE_INVALID);
-	}
-
-	offset = REG_RDC_TABLE_OFFSET(table);
-	tbl_reg.bits.ldw.rdc = rdc;
-	NXGE_REG_WR64(handle, offset, tbl_reg.value);
-	return (NPI_SUCCESS);
-
-}
-
-npi_status_t
-npi_rxdma_dump_rdc_table(npi_handle_t handle,
-			    uint8_t table)
-{
-	uint64_t offset;
-	int tbl_offset;
-	uint64_t value;
-
-	ASSERT(RXDMA_TABLE_VALID(table));
-	if (!RXDMA_TABLE_VALID(table)) {
-		NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
-			    " npi_rxdma_dump_rdc_table"
-			    " Illegal RDC Rable Number %d \n",
-			    table));
-		return (NPI_RXDMA_TABLE_INVALID);
-	}
-	NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
-			    "\n Register Dump for RDC Table %d \n",
-			    table));
-	offset = REG_RDC_TABLE_OFFSET(table);
-	for (tbl_offset = 0; tbl_offset < NXGE_MAX_RDCS; tbl_offset++) {
-		NXGE_REG_RD64(handle, offset, &value);
-		NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
-					    " 0x%08llx 0x%08llx \n",
-					    offset, value));
-		offset += 8;
-	}
-	NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
-			    "\n Register Dump for RDC Table %d done\n",
-			    table));
-	return (NPI_SUCCESS);
-
-}
-
-npi_status_t
-npi_rxdma_rdc_rbr_stat_get(npi_handle_t handle, uint8_t rdc,
-			    rbr_stat_t *rbr_stat)
-{
-
-	ASSERT(RXDMA_CHANNEL_VALID(rdc));
-	if (!RXDMA_CHANNEL_VALID(rdc)) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-			    " rxdma_rdc_rbr_stat_get"
-			    " Illegal RDC Number %d \n",
-			    rdc));
-		return (NPI_RXDMA_RDC_INVALID);
-	}
-
-	RXDMA_REG_READ64(handle, RBR_STAT_REG, rdc, &rbr_stat->value);
-	return (NPI_SUCCESS);
-}
-
-/*
- * npi_rxdma_rdc_rbr_head_get
- * Gets the current rbr head pointer.
- *
- * Inputs:
- *      handle:	opaque handle interpreted by the underlying OS
- *	rdc:		RX DMA Channel number
- *	hdptr		ptr to write the rbr head value
- *
- * Return:
- * NPI_SUCCESS
- * NPI_RXDMA_RDC_INVALID
- */
-npi_status_t
-npi_rxdma_rdc_rbr_head_get(npi_handle_t handle,
-			    uint8_t rdc, addr44_t *hdptr)
-{
-	rbr_hdh_t hh_ptr;
-	rbr_hdl_t hl_ptr;
-
-	ASSERT(RXDMA_CHANNEL_VALID(rdc));
-	if (!RXDMA_CHANNEL_VALID(rdc)) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-			    " rxdma_rdc_rbr_head_get"
-			    " Illegal RDC Number %d \n",
-			    rdc));
-		return (NPI_RXDMA_RDC_INVALID);
-	}
-	hh_ptr.value = 0;
-	hl_ptr.value = 0;
-	RXDMA_REG_READ64(handle, RBR_HDH_REG, rdc, &hh_ptr.value);
-	RXDMA_REG_READ64(handle, RBR_HDL_REG, rdc, &hl_ptr.value);
-	hdptr->bits.ldw = hl_ptr.bits.ldw.head_l << 2;
-	hdptr->bits.hdw = hh_ptr.bits.ldw.head_h;
-	return (NPI_SUCCESS);
-
-}
-
-npi_status_t
-npi_rxdma_rdc_rcr_qlen_get(npi_handle_t handle, uint8_t rdc,
-			    uint16_t *rcr_qlen)
-{
-
-	rcrstat_a_t stats;
-
-	ASSERT(RXDMA_CHANNEL_VALID(rdc));
-	if (!RXDMA_CHANNEL_VALID(rdc)) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-			    " rxdma_rdc_rcr_qlen_get"
-			    " Illegal RDC Number %d \n",
-			    rdc));
-		return (NPI_RXDMA_RDC_INVALID);
-	}
-
-	RXDMA_REG_READ64(handle, RCRSTAT_A_REG, rdc, &stats.value);
-	*rcr_qlen =  stats.bits.ldw.qlen;
-	NPI_DEBUG_MSG((handle.function, NPI_RDC_CTL,
-		    " rxdma_rdc_rcr_qlen_get"
-		    " RDC %d qlen %x qlen %x\n",
-		    rdc, *rcr_qlen, stats.bits.ldw.qlen));
-	return (NPI_SUCCESS);
-}
-
-npi_status_t
-npi_rxdma_rdc_rcr_tail_get(npi_handle_t handle,
-			    uint8_t rdc, addr44_t *tail_addr)
-{
-
-	rcrstat_b_t th_ptr;
-	rcrstat_c_t tl_ptr;
-
-	ASSERT(RXDMA_CHANNEL_VALID(rdc));
-	if (!RXDMA_CHANNEL_VALID(rdc)) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				    " rxdma_rdc_rcr_tail_get"
-				    " Illegal RDC Number %d \n",
-				    rdc));
-		return (NPI_RXDMA_RDC_INVALID);
-	}
-	th_ptr.value = 0;
-	tl_ptr.value = 0;
-	RXDMA_REG_READ64(handle, RCRSTAT_B_REG, rdc, &th_ptr.value);
-	RXDMA_REG_READ64(handle, RCRSTAT_C_REG, rdc, &tl_ptr.value);
-	tail_addr->bits.ldw = tl_ptr.bits.ldw.tlptr_l << 3;
-	tail_addr->bits.hdw = th_ptr.bits.ldw.tlptr_h;
-	NPI_DEBUG_MSG((handle.function, NPI_RDC_CTL,
-			    " rxdma_rdc_rcr_tail_get"
-			    " RDC %d rcr_tail %llx tl %x\n",
-			    rdc, tl_ptr.value,
-			    tl_ptr.bits.ldw.tlptr_l));
-
-	return (NPI_SUCCESS);
-
-
-}
-
-/*
- * npi_rxdma_rxctl_fifo_error_intr_set
- * Configure The RX ctrl fifo error interrupt generation
- *
- * Inputs:
- *      handle:	opaque handle interpreted by the underlying OS
- *	mask:	rx_ctl_dat_fifo_mask_t specifying the errors
- * valid fields in  rx_ctl_dat_fifo_mask_t structure are:
- * zcp_eop_err, ipp_eop_err, id_mismatch. If a field is set
- * to 1, we will enable interrupt generation for the
- * corresponding error condition. In the hardware, the bit(s)
- * have to be cleared to enable interrupt.
- *
- * Return:
- * NPI_SUCCESS
- *
- */
-npi_status_t
-npi_rxdma_rxctl_fifo_error_intr_set(npi_handle_t handle,
-				    rx_ctl_dat_fifo_mask_t *mask)
-{
-	uint64_t offset;
-	rx_ctl_dat_fifo_mask_t intr_mask;
-	offset = RX_CTL_DAT_FIFO_MASK_REG;
-	NXGE_REG_RD64(handle, offset, &intr_mask.value);
-
-	if (mask->bits.ldw.ipp_eop_err) {
-		intr_mask.bits.ldw.ipp_eop_err = 0;
-	}
-
-	if (mask->bits.ldw.zcp_eop_err) {
-		intr_mask.bits.ldw.zcp_eop_err = 0;
-	}
-
-	if (mask->bits.ldw.id_mismatch) {
-		intr_mask.bits.ldw.id_mismatch = 0;
-	}
-
-	NXGE_REG_WR64(handle, offset, intr_mask.value);
-	return (NPI_SUCCESS);
-}
-
-/*
- * npi_rxdma_rxctl_fifo_error_stat_get
- * Read The RX ctrl fifo error Status
- *
- * Inputs:
- *      handle:	opaque handle interpreted by the underlying OS
- *	stat:	rx_ctl_dat_fifo_stat_t to read the errors to
- * valid fields in  rx_ctl_dat_fifo_stat_t structure are:
- * zcp_eop_err, ipp_eop_err, id_mismatch.
- * Return:
- * NPI_SUCCESS
- *
- */
-npi_status_t
-npi_rxdma_rxctl_fifo_error_intr_get(npi_handle_t handle,
-			    rx_ctl_dat_fifo_stat_t *stat)
-{
-	uint64_t offset = RX_CTL_DAT_FIFO_STAT_REG;
-	NXGE_REG_RD64(handle, offset, &stat->value);
-	return (NPI_SUCCESS);
-}
-
-npi_status_t
-npi_rxdma_rdc_rcr_pktread_update(npi_handle_t handle, uint8_t channel,
-				    uint16_t pkts_read)
-{
-
-	rx_dma_ctl_stat_t	cs;
-	uint16_t min_read = 0;
-
-	ASSERT(RXDMA_CHANNEL_VALID(channel));
-	if (!RXDMA_CHANNEL_VALID(channel)) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-		    " npi_rxdma_rdc_rcr_pktread_update ",
-		    " channel %d", channel));
-		return (NPI_FAILURE | NPI_RXDMA_CHANNEL_INVALID(channel));
-	}
-
-	if ((pkts_read < min_read) && (pkts_read > 512)) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-		    " npi_rxdma_rdc_rcr_pktread_update ",
-		    " pkts %d out of bound", pkts_read));
-		return (NPI_RXDMA_OPCODE_INVALID(pkts_read));
-	}
-
-	RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel,
-				&cs.value);
-	cs.bits.ldw.pktread = pkts_read;
-	RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG,
-				    channel, cs.value);
-
-	return (NPI_SUCCESS);
-}
-
-npi_status_t
-npi_rxdma_rdc_rcr_bufread_update(npi_handle_t handle, uint8_t channel,
-					    uint16_t bufs_read)
-{
-
-	rx_dma_ctl_stat_t	cs;
-	uint16_t min_read = 0;
-
-	ASSERT(RXDMA_CHANNEL_VALID(channel));
-	if (!RXDMA_CHANNEL_VALID(channel)) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-		    " npi_rxdma_rdc_rcr_bufread_update ",
-		    " channel %d", channel));
-		return (NPI_FAILURE | NPI_RXDMA_CHANNEL_INVALID(channel));
-	}
-
-	if ((bufs_read < min_read) && (bufs_read > 512)) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-		    " npi_rxdma_rdc_rcr_bufread_update ",
-		    " bufs read %d out of bound", bufs_read));
-		return (NPI_RXDMA_OPCODE_INVALID(bufs_read));
-	}
-
-	RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel,
-				&cs.value);
-	cs.bits.ldw.ptrread = bufs_read;
-	RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG,
-				    channel, cs.value);
-
-	return (NPI_SUCCESS);
-}
-
-npi_status_t
-npi_rxdma_rdc_rcr_read_update(npi_handle_t handle, uint8_t channel,
-				    uint16_t pkts_read, uint16_t bufs_read)
-{
-
-	rx_dma_ctl_stat_t	cs;
-
-	ASSERT(RXDMA_CHANNEL_VALID(channel));
-	if (!RXDMA_CHANNEL_VALID(channel)) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-		    " npi_rxdma_rdc_rcr_read_update ",
-		    " channel %d", channel));
-		return (NPI_FAILURE | NPI_RXDMA_CHANNEL_INVALID(channel));
-	}
-
-	NPI_DEBUG_MSG((handle.function, NPI_RDC_CTL,
-	    " npi_rxdma_rdc_rcr_read_update "
-	    " bufs read %d pkt read %d",
-		bufs_read, pkts_read));
-
-	RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel,
-				&cs.value);
-
-	NPI_DEBUG_MSG((handle.function, NPI_RDC_CTL,
-		" npi_rxdma_rdc_rcr_read_update: "
-		" value: 0x%llx bufs read %d pkt read %d",
-		cs.value,
-		cs.bits.ldw.ptrread, cs.bits.ldw.pktread));
-
-	cs.bits.ldw.pktread = pkts_read;
-	cs.bits.ldw.ptrread = bufs_read;
-
-	RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG,
-				    channel, cs.value);
-
-	RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel,
-				&cs.value);
-
-	NPI_DEBUG_MSG((handle.function, NPI_RDC_CTL,
-	    " npi_rxdma_rdc_rcr_read_update: read back after update "
-	    " value: 0x%llx bufs read %d pkt read %d",
-		cs.value,
-		cs.bits.ldw.ptrread, cs.bits.ldw.pktread));
-
-	return (NPI_SUCCESS);
-}
-
-/*
- * npi_rxdma_channel_mex_set():
- *	This function is called to arm the DMA channel with
- *	mailbox updating capability. Software needs to rearm
- *	for each update by writing to the control and status register.
- *
- * Parameters:
- *	handle		- NPI handle (virtualization flag must be defined).
- *	channel		- logical RXDMA channel from 0 to 23.
- *			  (If virtualization flag is not set, then
- *			   logical channel is the same as the hardware
- *			   channel number).
- *
- * Return:
- *	NPI_SUCCESS		- If enable channel with mailbox update
- *				  is completed successfully.
- *
- *	Error:
- *	NPI error status code
- */
-npi_status_t
-npi_rxdma_channel_mex_set(npi_handle_t handle, uint8_t channel)
-{
-	return (npi_rxdma_channel_control(handle, RXDMA_MEX_SET, channel));
-}
-
-/*
- * npi_rxdma_channel_rcrto_clear():
- *	This function is called to reset RCRTO bit to 0.
- *
- * Parameters:
- *	handle		- NPI handle (virtualization flag must be defined).
- *	channel		- logical RXDMA channel from 0 to 23.
- *			  (If virtualization flag is not set, then
- *			   logical channel is the same as the hardware
- *			   channel number).
- * Return:
- *	NPI_SUCCESS
- *
- *	Error:
- *	NPI error status code
- */
-npi_status_t
-npi_rxdma_channel_rcrto_clear(npi_handle_t handle, uint8_t channel)
-{
-	return (npi_rxdma_channel_control(handle, RXDMA_RCRTO_CLEAR, channel));
-}
-
-/*
- * npi_rxdma_channel_pt_drop_pkt_clear():
- *	This function is called to clear the port drop packet bit (debug).
- *
- * Parameters:
- *	handle		- NPI handle (virtualization flag must be defined).
- *	channel		- logical RXDMA channel from 0 to 23.
- *			  (If virtualization flag is not set, then
- *			   logical channel is the same as the hardware
- *			   channel number).
- * Return:
- *	NPI_SUCCESS
- *
- *	Error:
- *	NPI error status code
- */
-npi_status_t
-npi_rxdma_channel_pt_drop_pkt_clear(npi_handle_t handle, uint8_t channel)
-{
-	return (npi_rxdma_channel_control(handle, RXDMA_PT_DROP_PKT_CLEAR,
-			channel));
-}
-
-/*
- * npi_rxdma_channel_wred_drop_clear():
- *	This function is called to wred drop bit (debug only).
- *
- * Parameters:
- *	handle		- NPI handle (virtualization flag must be defined).
- *	channel		- logical RXDMA channel from 0 to 23.
- *			  (If virtualization flag is not set, then
- *			   logical channel is the same as the hardware
- *			   channel number).
- * Return:
- *	NPI_SUCCESS
- *
- *	Error:
- *	NPI error status code
- */
-npi_status_t
-npi_rxdma_channel_wred_dop_clear(npi_handle_t handle, uint8_t channel)
-{
-	return (npi_rxdma_channel_control(handle, RXDMA_WRED_DROP_CLEAR,
-			channel));
-}
-
-/*
- * npi_rxdma_channel_rcr_shfull_clear():
- *	This function is called to clear RCR shadow full bit.
- *
- * Parameters:
- *	handle		- NPI handle (virtualization flag must be defined).
- *	channel		- logical RXDMA channel from 0 to 23.
- *			  (If virtualization flag is not set, then
- *			   logical channel is the same as the hardware
- *			   channel number).
- * Return:
- *	NPI_SUCCESS
- *
- *	Error:
- *	NPI error status code
- */
-npi_status_t
-npi_rxdma_channel_rcr_shfull_clear(npi_handle_t handle, uint8_t channel)
-{
-	return (npi_rxdma_channel_control(handle, RXDMA_RCR_SFULL_CLEAR,
-			channel));
-}
-
-/*
- * npi_rxdma_channel_rcrfull_clear():
- *	This function is called to clear RCR full bit.
- *
- * Parameters:
- *	handle		- NPI handle (virtualization flag must be defined).
- *	channel		- logical RXDMA channel from 0 to 23.
- *			  (If virtualization flag is not set, then
- *			   logical channel is the same as the hardware
- *			   channel number).
- * Return:
- *	NPI_SUCCESS
- *
- *	Error:
- *	NPI error status code
- */
-npi_status_t
-npi_rxdma_channel_rcr_full_clear(npi_handle_t handle, uint8_t channel)
-{
-	return (npi_rxdma_channel_control(handle, RXDMA_RCR_FULL_CLEAR,
-			channel));
-}
-
-npi_status_t
-npi_rxdma_channel_rbr_empty_clear(npi_handle_t handle, uint8_t channel)
-{
-	return (npi_rxdma_channel_control(handle,
-		RXDMA_RBR_EMPTY_CLEAR, channel));
-}
-
-npi_status_t
-npi_rxdma_channel_cs_clear_all(npi_handle_t handle, uint8_t channel)
-{
-	return (npi_rxdma_channel_control(handle, RXDMA_CS_CLEAR_ALL, channel));
-}
-
-/*
- * npi_rxdma_channel_control():
- *	This function is called to control a receive DMA channel
- *	for arming the channel with mailbox updates, resetting
- *	various event status bits (control and status register).
- *
- * Parameters:
- *	handle		- NPI handle (virtualization flag must be defined).
- *	control		- NPI defined control type supported:
- *				- RXDMA_MEX_SET
- * 				- RXDMA_RCRTO_CLEAR
- *				- RXDMA_PT_DROP_PKT_CLEAR
- *				- RXDMA_WRED_DROP_CLEAR
- *				- RXDMA_RCR_SFULL_CLEAR
- *				- RXDMA_RCR_FULL_CLEAR
- *				- RXDMA_RBR_PRE_EMPTY_CLEAR
- *				- RXDMA_RBR_EMPTY_CLEAR
- *	channel		- logical RXDMA channel from 0 to 23.
- *			  (If virtualization flag is not set, then
- *			   logical channel is the same as the hardware.
- * Return:
- *	NPI_SUCCESS
- *
- *	Error:
- *	NPI error status code
- */
-npi_status_t
-npi_rxdma_channel_control(npi_handle_t handle, rxdma_cs_cntl_t control,
-			uint8_t channel)
-{
-
-	rx_dma_ctl_stat_t	cs;
-
-	ASSERT(RXDMA_CHANNEL_VALID(channel));
-	if (!RXDMA_CHANNEL_VALID(channel)) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-		    " npi_rxdma_channel_control",
-		    " channel", channel));
-		return (NPI_FAILURE | NPI_RXDMA_CHANNEL_INVALID(channel));
-	}
-
-	switch (control) {
-	case RXDMA_MEX_SET:
-		RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel,
-				&cs.value);
-		cs.bits.hdw.mex = 1;
-		RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG,
-				channel, cs.value);
-		break;
-
-	case RXDMA_RCRTO_CLEAR:
-		RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel,
-				&cs.value);
-		cs.bits.hdw.rcrto = 0;
-		RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel,
-				cs.value);
-		break;
-
-	case RXDMA_PT_DROP_PKT_CLEAR:
-		RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel,
-				&cs.value);
-		cs.bits.hdw.port_drop_pkt = 0;
-		RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel,
-				cs.value);
-		break;
-
-	case RXDMA_WRED_DROP_CLEAR:
-		RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel,
-				&cs.value);
-		cs.bits.hdw.wred_drop = 0;
-		RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel,
-				cs.value);
-		break;
-
-	case RXDMA_RCR_SFULL_CLEAR:
-		RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel,
-				&cs.value);
-		cs.bits.hdw.rcr_shadow_full = 0;
-		RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel,
-				cs.value);
-		break;
-
-	case RXDMA_RCR_FULL_CLEAR:
-		RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel,
-				&cs.value);
-		cs.bits.hdw.rcrfull = 0;
-		RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel,
-				cs.value);
-		break;
-
-	case RXDMA_RBR_PRE_EMPTY_CLEAR:
-		RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel,
-				&cs.value);
-		cs.bits.hdw.rbr_pre_empty = 0;
-		RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel,
-				cs.value);
-		break;
-
-	case RXDMA_RBR_EMPTY_CLEAR:
-		RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel,
-				&cs.value);
-		cs.bits.hdw.rbr_empty = 1;
-		RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel,
-				cs.value);
-		break;
-
-	case RXDMA_CS_CLEAR_ALL:
-		cs.value = 0;
-		RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel,
-				cs.value);
-		break;
-
-	default:
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				    "npi_rxdma_channel_control",
-				    "control", control));
-		return (NPI_FAILURE | NPI_RXDMA_OPCODE_INVALID(channel));
-	}
-
-	return (NPI_SUCCESS);
-}
-
-/*
- * npi_rxdma_control_status():
- *	This function is called to operate on the control
- *	and status register.
- *
- * Parameters:
- *	handle		- NPI handle
- *	op_mode		- OP_GET: get hardware control and status
- *			  OP_SET: set hardware control and status
- *			  OP_UPDATE: update hardware control and status.
- *			  OP_CLEAR: clear control and status register to 0s.
- *	channel		- hardware RXDMA channel from 0 to 23.
- *	cs_p		- pointer to hardware defined control and status
- *			  structure.
- * Return:
- *	NPI_SUCCESS
- *
- *	Error:
- *	NPI error status code
- */
-npi_status_t
-npi_rxdma_control_status(npi_handle_t handle, io_op_t op_mode,
-			uint8_t channel, p_rx_dma_ctl_stat_t cs_p)
-{
-	int			status = NPI_SUCCESS;
-	rx_dma_ctl_stat_t	cs;
-
-	ASSERT(RXDMA_CHANNEL_VALID(channel));
-	if (!RXDMA_CHANNEL_VALID(channel)) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-		    "npi_rxdma_control_status",
-		    "channel", channel));
-		return (NPI_FAILURE | NPI_RXDMA_CHANNEL_INVALID(channel));
-	}
-
-	switch (op_mode) {
-	case OP_GET:
-		RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel,
-				&cs_p->value);
-		break;
-
-	case OP_SET:
-		RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel,
-			cs_p->value);
-		break;
-
-	case OP_UPDATE:
-		RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel,
-				&cs.value);
-		RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel,
-			cs_p->value | cs.value);
-		break;
-
-	default:
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-		    "npi_rxdma_control_status",
-		    "control", op_mode));
-		return (NPI_FAILURE | NPI_RXDMA_OPCODE_INVALID(channel));
-	}
-
-	return (status);
-}
-
-/*
- * npi_rxdma_event_mask():
- *	This function is called to operate on the event mask
- *	register which is used for generating interrupts.
- *
- * Parameters:
- *	handle		- NPI handle
- *	op_mode		- OP_GET: get hardware event mask
- *			  OP_SET: set hardware interrupt event masks
- *			  OP_CLEAR: clear control and status register to 0s.
- *	channel		- hardware RXDMA channel from 0 to 23.
- *	mask_p		- pointer to hardware defined event mask
- *			  structure.
- * Return:
- *	NPI_SUCCESS		- If set is complete successfully.
- *
- *	Error:
- *	NPI error status code
- */
-npi_status_t
-npi_rxdma_event_mask(npi_handle_t handle, io_op_t op_mode,
-		uint8_t channel, p_rx_dma_ent_msk_t mask_p)
-{
-	int			status = NPI_SUCCESS;
-	rx_dma_ent_msk_t	mask;
-
-	ASSERT(RXDMA_CHANNEL_VALID(channel));
-	if (!RXDMA_CHANNEL_VALID(channel)) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-		    "npi_rxdma_event_mask",
-		    "channel", channel));
-		return (NPI_FAILURE | NPI_RXDMA_CHANNEL_INVALID(channel));
-	}
-
-	switch (op_mode) {
-	case OP_GET:
-		RXDMA_REG_READ64(handle, RX_DMA_ENT_MSK_REG, channel,
-				&mask_p->value);
-		break;
-
-	case OP_SET:
-		RXDMA_REG_WRITE64(handle, RX_DMA_ENT_MSK_REG, channel,
-				mask_p->value);
-		break;
-
-	case OP_UPDATE:
-		RXDMA_REG_READ64(handle, RX_DMA_ENT_MSK_REG, channel,
-				&mask.value);
-		RXDMA_REG_WRITE64(handle, RX_DMA_ENT_MSK_REG, channel,
-			mask_p->value | mask.value);
-		break;
-
-	default:
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-		    "npi_rxdma_event_mask",
-		    "eventmask", op_mode));
-		return (NPI_FAILURE | NPI_RXDMA_OPCODE_INVALID(channel));
-	}
-
-	return (status);
-}
-
-/*
- * npi_rxdma_event_mask_config():
- *	This function is called to operate on the event mask
- *	register which is used for generating interrupts
- *	and status register.
- *
- * Parameters:
- *	handle		- NPI handle
- *	op_mode		- OP_GET: get hardware event mask
- *			  OP_SET: set hardware interrupt event masks
- *			  OP_CLEAR: clear control and status register to 0s.
- *	channel		- hardware RXDMA channel from 0 to 23.
- *	mask_cfgp		- pointer to NPI defined event mask
- *			  enum data type.
- * Return:
- *	NPI_SUCCESS		- If set is complete successfully.
- *
- *	Error:
- *	NPI error status code
- */
-npi_status_t
-npi_rxdma_event_mask_config(npi_handle_t handle, io_op_t op_mode,
-		uint8_t channel, rxdma_ent_msk_cfg_t *mask_cfgp)
-{
-	int		status = NPI_SUCCESS;
-	uint64_t	value;
-
-	ASSERT(RXDMA_CHANNEL_VALID(channel));
-	if (!RXDMA_CHANNEL_VALID(channel)) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-		    "npi_rxdma_event_mask_config",
-		    "channel", channel));
-		return (NPI_FAILURE | NPI_RXDMA_CHANNEL_INVALID(channel));
-	}
-
-	switch (op_mode) {
-	case OP_GET:
-		RXDMA_REG_READ64(handle, RX_DMA_ENT_MSK_REG, channel,
-				mask_cfgp);
-		break;
-
-	case OP_SET:
-		RXDMA_REG_WRITE64(handle, RX_DMA_ENT_MSK_REG, channel,
-				*mask_cfgp);
-		break;
-
-	case OP_UPDATE:
-		RXDMA_REG_READ64(handle, RX_DMA_ENT_MSK_REG, channel, &value);
-		RXDMA_REG_WRITE64(handle, RX_DMA_ENT_MSK_REG, channel,
-			*mask_cfgp | value);
-		break;
-
-	case OP_CLEAR:
-		RXDMA_REG_WRITE64(handle, RX_DMA_ENT_MSK_REG, channel,
-			CFG_RXDMA_MASK_ALL);
-		break;
-	default:
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-		    "npi_rxdma_event_mask_config",
-		    "eventmask", op_mode));
-		return (NPI_FAILURE | NPI_RXDMA_OPCODE_INVALID(channel));
-	}
-
-	return (status);
-}
--- a/usr/src/uts/sun4v/io/nxge/npi/npi_rxdma.h	Mon Mar 19 18:02:35 2007 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,1335 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License (the "License").
- * You may not use this file except in compliance with the License.
- *
- * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
- * or http://www.opensolaris.org/os/licensing.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information: Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- */
-/*
- * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
- * Use is subject to license terms.
- */
-
-#ifndef _NPI_RXDMA_H
-#define	_NPI_RXDMA_H
-
-#pragma ident	"%Z%%M%	%I%	%E% SMI"
-
-#ifdef	__cplusplus
-extern "C" {
-#endif
-
-#include <npi.h>
-
-#include "nxge_defs.h"
-#include "nxge_hw.h"
-#include <nxge_rxdma_hw.h>
-
-/*
- * Register offset (0x200 bytes for each channel) for receive ring registers.
- */
-#define	NXGE_RXDMA_OFFSET(x, v, channel) (x + \
-		(!v ? DMC_OFFSET(channel) : \
-		    RDMC_PIOVADDR_OFFSET(channel)))
-
-
-#define	 REG_FZC_RDC_OFFSET(reg, rdc) (reg + RX_LOG_DMA_OFFSET(rdc))
-
-#define	 REG_RDC_TABLE_OFFSET(table) \
-	    (RDC_TBL_REG + table * (NXGE_MAX_RDCS * 8))
-
-#define	RXDMA_REG_READ64(handle, reg, channel, data_p) {\
-	NXGE_REG_RD64(handle, (NXGE_RXDMA_OFFSET(reg, handle.is_vraddr,\
-			channel)), (data_p))\
-}
-
-#define	RXDMA_REG_READ32(handle, reg, channel) \
-	NXGE_NPI_PIO_READ32(handle, (NXGE_RXDMA_OFFSET(reg, handle.is_vraddr,\
-			channel)))
-
-
-#define	RXDMA_REG_WRITE64(handle, reg, channel, data) {\
-	NXGE_REG_WR64(handle, (NXGE_RXDMA_OFFSET(reg, handle.is_vraddr,\
-			channel)), (data))\
-}
-
-/*
- * RX NPI error codes
- */
-#define	RXDMA_ER_ST			(RXDMA_BLK_ID << NPI_BLOCK_ID_SHIFT)
-#define	RXDMA_ID_SHIFT(n)		(n << NPI_PORT_CHAN_SHIFT)
-
-
-#define	NPI_RXDMA_ERROR			RXDMA_ER_ST
-
-#define	NPI_RXDMA_SW_PARAM_ERROR	(NPI_RXDMA_ERROR | 0x40)
-#define	NPI_RXDMA_HW_ERROR	(NPI_RXDMA_ERROR | 0x80)
-
-#define	NPI_RXDMA_RDC_INVALID		(NPI_RXDMA_ERROR | CHANNEL_INVALID)
-#define	NPI_RXDMA_PAGE_INVALID		(NPI_RXDMA_ERROR | LOGICAL_PAGE_INVALID)
-#define	NPI_RXDMA_RESET_ERR		(NPI_RXDMA_HW_ERROR | RESET_FAILED)
-#define	NPI_RXDMA_DISABLE_ERR		(NPI_RXDMA_HW_ERROR | 0x0000a)
-#define	NPI_RXDMA_ENABLE_ERR		(NPI_RXDMA_HW_ERROR | 0x0000b)
-#define	NPI_RXDMA_FUNC_INVALID		(NPI_RXDMA_SW_PARAM_ERROR | 0x0000a)
-#define	NPI_RXDMA_BUFSZIE_INVALID	(NPI_RXDMA_SW_PARAM_ERROR | 0x0000b)
-#define	NPI_RXDMA_RBRSZIE_INVALID	(NPI_RXDMA_SW_PARAM_ERROR | 0x0000c)
-#define	NPI_RXDMA_RCRSZIE_INVALID	(NPI_RXDMA_SW_PARAM_ERROR | 0x0000d)
-#define	NPI_RXDMA_PORT_INVALID		(NPI_RXDMA_ERROR | PORT_INVALID)
-#define	NPI_RXDMA_TABLE_INVALID		(NPI_RXDMA_ERROR | RDC_TAB_INVALID)
-
-#define	NPI_RXDMA_CHANNEL_INVALID(n)	(RXDMA_ID_SHIFT(n) |	\
-					NPI_RXDMA_ERROR | CHANNEL_INVALID)
-#define	NPI_RXDMA_OPCODE_INVALID(n)	(RXDMA_ID_SHIFT(n) |	\
-					NPI_RXDMA_ERROR | OPCODE_INVALID)
-
-
-#define	NPI_RXDMA_ERROR_ENCODE(err, rdc)	\
-	(RXDMA_ID_SHIFT(rdc) | RXDMA_ER_ST | err)
-
-
-#define	RXDMA_CHANNEL_VALID(rdc) \
-	((rdc < NXGE_MAX_RDCS))
-
-#define	RXDMA_PORT_VALID(port) \
-	((port < MAX_PORTS_PER_NXGE))
-
-#define	RXDMA_TABLE_VALID(table) \
-	((table < NXGE_MAX_RDC_GROUPS))
-
-
-#define	RXDMA_PAGE_VALID(page) \
-	((page == 0) || (page == 1))
-
-#define	RXDMA_BUFF_OFFSET_VALID(offset) \
-	((offset == SW_OFFSET_NO_OFFSET) || \
-	    (offset == SW_OFFSET_64) || \
-	    (offset == SW_OFFSET_128))
-
-
-#define	RXDMA_RCR_TO_VALID(tov) ((tov) && (tov < 64))
-#define	RXDMA_RCR_THRESH_VALID(thresh) ((thresh) && (thresh < 512))
-
-
-/*
- * RXDMA NPI defined control types.
- */
-typedef	enum _rxdma_cs_cntl_e {
-	RXDMA_CS_CLEAR_ALL		= 0x1,
-	RXDMA_MEX_SET			= 0x2,
-	RXDMA_RCRTO_CLEAR		= 0x8,
-	RXDMA_PT_DROP_PKT_CLEAR		= 0x10,
-	RXDMA_WRED_DROP_CLEAR		= 0x20,
-	RXDMA_RCR_SFULL_CLEAR		= 0x40,
-	RXDMA_RCR_FULL_CLEAR		= 0x80,
-	RXDMA_RBR_PRE_EMPTY_CLEAR	= 0x100,
-	RXDMA_RBR_EMPTY_CLEAR		= 0x200
-} rxdma_cs_cntl_t;
-
-/*
- * RXDMA NPI defined event masks (mapped to the hardware defined masks).
- */
-typedef	enum _rxdma_ent_msk_cfg_e {
-	CFG_RXDMA_ENT_MSK_CFIGLOGPGE_MASK = RX_DMA_ENT_MSK_CFIGLOGPGE_MASK,
-	CFG_RXDMA_ENT_MSK_RBRLOGPGE_MASK  = RX_DMA_ENT_MSK_RBRLOGPGE_MASK,
-	CFG_RXDMA_ENT_MSK_RBRFULL_MASK	  = RX_DMA_ENT_MSK_RBRFULL_MASK,
-	CFG_RXDMA_ENT_MSK_RBREMPTY_MASK	  = RX_DMA_ENT_MSK_RBREMPTY_MASK,
-	CFG_RXDMA_ENT_MSK_RCRFULL_MASK	  = RX_DMA_ENT_MSK_RCRFULL_MASK,
-	CFG_RXDMA_ENT_MSK_RCRINCON_MASK	  = RX_DMA_ENT_MSK_RCRINCON_MASK,
-	CFG_RXDMA_ENT_MSK_CONFIG_ERR	  = RX_DMA_ENT_MSK_CONFIG_ERR_MASK,
-	CFG_RXDMA_ENT_MSK_RCR_SH_FULL_MASK = RX_DMA_ENT_MSK_RCRSH_FULL_MASK,
-	CFG_RXDMA_ENT_MSK_RBR_PRE_EMTY_MASK = RX_DMA_ENT_MSK_RBR_PRE_EMPTY_MASK,
-	CFG_RXDMA_ENT_MSK_WRED_DROP_MASK   = RX_DMA_ENT_MSK_WRED_DROP_MASK,
-	CFG_RXDMA_ENT_MSK_PT_DROP_PKT_MASK = RX_DMA_ENT_MSK_PTDROP_PKT_MASK,
-	CFG_RXDMA_ENT_MSK_RBR_PRE_PAR_MASK = RX_DMA_ENT_MSK_RBR_PRE_PAR_MASK,
-	CFG_RXDMA_ENT_MSK_RCR_SHA_PAR_MASK = RX_DMA_ENT_MSK_RCR_SHA_PAR_MASK,
-	CFG_RXDMA_ENT_MSK_RCRTO_MASK	  = RX_DMA_ENT_MSK_RCRTO_MASK,
-	CFG_RXDMA_ENT_MSK_THRES_MASK	  = RX_DMA_ENT_MSK_THRES_MASK,
-	CFG_RXDMA_ENT_MSK_DC_FIFO_ERR_MASK  = RX_DMA_ENT_MSK_DC_FIFO_ERR_MASK,
-	CFG_RXDMA_ENT_MSK_RCR_ACK_ERR_MASK  = RX_DMA_ENT_MSK_RCR_ACK_ERR_MASK,
-	CFG_RXDMA_ENT_MSK_RSP_DAT_ERR_MASK  = RX_DMA_ENT_MSK_RSP_DAT_ERR_MASK,
-	CFG_RXDMA_ENT_MSK_BYTE_EN_BUS_MASK  = RX_DMA_ENT_MSK_BYTE_EN_BUS_MASK,
-	CFG_RXDMA_ENT_MSK_RSP_CNT_ERR_MASK  = RX_DMA_ENT_MSK_RSP_CNT_ERR_MASK,
-	CFG_RXDMA_ENT_MSK_RBR_TMOUT_MASK  = RX_DMA_ENT_MSK_RBR_TMOUT_MASK,
-
-	CFG_RXDMA_MASK_ALL	  = (RX_DMA_ENT_MSK_CFIGLOGPGE_MASK |
-					RX_DMA_ENT_MSK_RBRLOGPGE_MASK |
-					RX_DMA_ENT_MSK_RBRFULL_MASK |
-					RX_DMA_ENT_MSK_RBREMPTY_MASK |
-					RX_DMA_ENT_MSK_RCRFULL_MASK |
-					RX_DMA_ENT_MSK_RCRINCON_MASK |
-					RX_DMA_ENT_MSK_CONFIG_ERR_MASK |
-					RX_DMA_ENT_MSK_RCRSH_FULL_MASK |
-					RX_DMA_ENT_MSK_RBR_PRE_EMPTY_MASK |
-					RX_DMA_ENT_MSK_WRED_DROP_MASK |
-					RX_DMA_ENT_MSK_PTDROP_PKT_MASK |
-					RX_DMA_ENT_MSK_RBR_PRE_PAR_MASK |
-					RX_DMA_ENT_MSK_RCR_SHA_PAR_MASK |
-					RX_DMA_ENT_MSK_RCRTO_MASK |
-					RX_DMA_ENT_MSK_THRES_MASK |
-					RX_DMA_ENT_MSK_DC_FIFO_ERR_MASK |
-					RX_DMA_ENT_MSK_RCR_ACK_ERR_MASK |
-					RX_DMA_ENT_MSK_RSP_DAT_ERR_MASK |
-					RX_DMA_ENT_MSK_BYTE_EN_BUS_MASK |
-					RX_DMA_ENT_MSK_RSP_CNT_ERR_MASK |
-					RX_DMA_ENT_MSK_RBR_TMOUT_MASK)
-} rxdma_ent_msk_cfg_t;
-
-
-
-typedef union _addr44 {
-	uint64_t	addr;
-	struct {
-#if defined(_BIG_ENDIAN)
-		uint32_t rsrvd:20;
-		uint32_t hdw:12;
-		uint32_t ldw;
-#else
-		uint32_t ldw;
-		uint32_t hdw:12;
-		uint32_t rsrvd:20;
-#endif
-	} bits;
-} addr44_t;
-
-
-/*
- * npi_rxdma_cfg_default_port_rdc()
- * Set the default rdc for the port
- *
- * Inputs:
- *	handle:		register handle interpreted by the underlying OS
- *	portnm:		Physical Port Number
- *	rdc:	RX DMA Channel number
- *
- * Return:
- * NPI_SUCCESS
- * NPI_RXDMA_RDC_INVALID
- * NPI_RXDMA_PORT_INVALID
- *
- */
-
-npi_status_t npi_rxdma_cfg_default_port_rdc(npi_handle_t,
-				    uint8_t, uint8_t);
-
-/*
- * npi_rxdma_cfg_rdc_table()
- * Configure/populate the RDC table
- *
- * Inputs:
- *	handle:		register handle interpreted by the underlying OS
- *	table:		RDC Group Number
- *	rdc[]:	 Array of RX DMA Channels
- *
- * Return:
- * NPI_SUCCESS
- * NPI_RXDMA_TABLE_INVALID
- *
- */
-
-npi_status_t npi_rxdma_cfg_rdc_table(npi_handle_t,
-			    uint8_t, uint8_t []);
-
-npi_status_t npi_rxdma_cfg_rdc_table_default_rdc(npi_handle_t,
-					    uint8_t, uint8_t);
-npi_status_t npi_rxdma_cfg_rdc_rcr_timeout_disable(npi_handle_t,
-					    uint8_t);
-
-
-/*
- * npi_rxdma_32bitmode_enable()
- * Enable 32 bit mode
- *
- * Inputs:
- *	handle:		register handle interpreted by the underlying OS
- *
- * Return:
- * NPI_SUCCESS
- * NPI_FAILURE
- * NPI_HW_ERR
- *
- */
-
-npi_status_t npi_rxdma_cfg_32bitmode_enable(npi_handle_t);
-
-
-/*
- * npi_rxdma_32bitmode_disable()
- * disable 32 bit mode
- *
- * Inputs:
- *	handle:		register handle interpreted by the underlying OS
- *
- * Return:
- * NPI_SUCCESS
- * NPI_FAILURE
- * NPI_HW_ERR
- *
- */
-
-
-npi_status_t npi_rxdma_cfg_32bitmode_disable(npi_handle_t);
-
-/*
- * npi_rxdma_cfg_ram_access_enable()
- * Enable PIO access to shadow and prefetch memory.
- * In the case of DMA errors, software may need to
- * initialize the shadow and prefetch memories to
- * sane value (may be clear it) before re-enabling
- * the DMA channel.
- *
- * Inputs:
- *	handle:		register handle interpreted by the underlying OS
- *
- * Return:
- * NPI_SUCCESS
- * NPI_FAILURE
- * NPI_HW_ERR
- *
- */
-
-npi_status_t npi_rxdma_cfg_ram_access_enable(npi_handle_t);
-
-
-/*
- * npi_rxdma_cfg_ram_access_disable()
- * Disable PIO access to shadow and prefetch memory.
- * This is the normal operation mode.
- *
- * Inputs:
- *	handle:		register handle interpreted by the underlying OS
- *
- * Return:
- * NPI_SUCCESS
- * NPI_FAILURE
- * NPI_HW_ERR
- *
- */
-
-npi_status_t npi_rxdma_cfg_ram_access_disable(npi_handle_t);
-
-
-/*
- * npi_rxdma_cfg_clock_div_set()
- * init the clock division, used for RX timers
- * This determines the granularity of RX DMA countdown timers
- * It depends on the system clock. For example if the system
- * clock is 300 MHz, a value of 30000 will yield a granularity
- * of 100usec.
- *
- * Inputs:
- *	handle:		register handle interpreted by the underlying OS
- *	count:		System clock divider
- *
- * Return:
- * NPI_SUCCESS
- * NPI_FAILURE
- * NPI_SW_ERR
- * NPI_HW_ERR
- *
- */
-
-npi_status_t npi_rxdma_cfg_clock_div_set(npi_handle_t, uint16_t);
-
-/*
- * npi_rxdma_cfg_red_rand_init()
- * init the WRED Discard
- * By default, it is enabled
- *
- * Inputs:
- *	handle:		register handle interpreted by the underlying OS
- *	init_value:	WRED init value
- *
- * Return:
- * NPI_SUCCESS
- * NPI_FAILURE
- * NPI_SW_ERR
- * NPI_HW_ERR
- *
- */
-
-npi_status_t npi_rxdma_cfg_red_rand_init(npi_handle_t, uint16_t);
-
-/*
- * npi_rxdma_cfg_wred_disable()
- * init the WRED Discard
- * By default, it is enabled
- *
- * Inputs:
- *	handle:		register handle interpreted by the underlying OS
- *
- * Return:
- * NPI_SUCCESS
- * NPI_FAILURE
- * NPI_SW_ERR
- * NPI_HW_ERR
- *
- */
-
-
-npi_status_t npi_rxdma_cfg_wred_disable(npi_handle_t);
-
-/*
- * npi_rxdma_cfg_wred_param()
- * COnfigure per rxdma channel WRED parameters
- * By default, it is enabled
- *
- * Inputs:
- *	handle:		register handle interpreted by the underlying OS
- *	rdc:	RX DMA Channel number
- *	wred_params:	WRED configuration parameters
- *
- * Return:
- * NPI_SUCCESS
- * NPI_FAILURE
- * NPI_SW_ERR
- * NPI_HW_ERR
- *
- */
-
-
-
-npi_status_t npi_rxdma_cfg_wred_param(npi_handle_t, uint8_t,
-				    rdc_red_para_t *);
-
-
-/*
- * npi_rxdma_port_ddr_weight
- * Set the DDR weight for a port.
- *
- * Inputs:
- *	handle:		register handle interpreted by the underlying OS
- *	portnm:		Physical Port Number
- *	weight:		Port relative weight (in approx. bytes)
- *			Default values are:
- *			0x400 (port 0 and 1) corresponding to 10 standard
- *			      size (1500 bytes) Frames
- *			0x66 (port 2 and 3) corresponding to 10% 10Gig ports
- *
- * Return:
- * NPI_SUCCESS
- * NPI_FAILURE
- * NPI_HW_ERR
- * NPI_SW_ERR
- *
- */
-
-npi_status_t npi_rxdma_cfg_port_ddr_weight(npi_handle_t,
-				    uint8_t, uint32_t);
-
-
-/*
- * npi_rxdma_port_usage_get()
- * Gets the port usage, in terms of 16 byte blocks
- *
- * NOTE: The register count is cleared upon reading.
- *
- * Inputs:
- *	handle:		register handle interpreted by the underlying OS
- *	portnm:		Physical Port Number
- *	blocks:		ptr to save current count.
- *
- * Return:
- * NPI_SUCCESS
- * NPI_FAILURE
- * NPI_HW_ERR
- * NPI_SW_ERR
- *
- */
-
-npi_status_t npi_rxdma_port_usage_get(npi_handle_t,
-				    uint8_t, uint32_t *);
-
-
-/*
- * npi_rxdma_cfg_logical_page()
- * Configure per rxdma channel Logical page
- *
- * To disable the logical page, set valid = 0;
- *
- * Inputs:
- *	handle:		register handle interpreted by the underlying OS
- *	rdc:		RX DMA Channel number
- *	page_params:	Logical Page configuration parameters
- *
- * Return:
- * NPI_SUCCESS
- * NPI_FAILURE
- * NPI_SW_ERR
- * NPI_HW_ERR
- *
- */
-
-
-
-npi_status_t npi_rxdma_cfg_logical_page(npi_handle_t, uint8_t,
-				    dma_log_page_t *);
-
-
-/*
- * npi_rxdma_cfg_logical_page_handle()
- * Configure per rxdma channel Logical page handle
- *
- *
- * Inputs:
- *	handle:		register handle interpreted by the underlying OS
- *	rdc:		RX DMA Channel number
- *	pg_handle:	Logical Page handle
- *
- * Return:
- * NPI_SUCCESS
- * NPI_FAILURE
- * NPI_SW_ERR
- * NPI_HW_ERR
- *
- */
-
-
-npi_status_t npi_rxdma_cfg_logical_page_handle(npi_handle_t, uint8_t,
-				    uint64_t);
-
-
-
-
-npi_status_t npi_rxdma_cfg_logical_page_disable(npi_handle_t,
-				    uint8_t, uint8_t);
-
-typedef enum _bsize {
-	SIZE_0B = 0x0,
-	SIZE_64B,
-	SIZE_128B,
-	SIZE_192B,
-	SIZE_256B,
-	SIZE_512B,
-	SIZE_1KB,
-	SIZE_2KB,
-	SIZE_4KB,
-	SIZE_8KB,
-	SIZE_16KB,
-	SIZE_32KB
-} bsize_t;
-
-
-
-/*
- * npi_rxdma_cfg_rdc_ring()
- * Configure The RDC channel Rcv Buffer Ring
- *
- * Inputs:
- *	rdc:		RX DMA Channel number
- *	rdc_params:	RDC configuration parameters
- *
- * Return:
- * NPI_SUCCESS
- * NPI_FAILURE
- * NPI_SW_ERR
- * NPI_HW_ERR
- *
- */
-
-typedef struct _rdc_desc_cfg_t {
-	uint8_t mbox_enable;	/* Enable full (18b) header */
-	uint8_t full_hdr;	/* Enable full (18b) header */
-	uint8_t offset;	/* 64 byte offsets */
-	uint8_t valid2;	/* size 2 is valid */
-	bsize_t size2;	/* Size 2 length */
-	uint8_t valid1;	/* size 1 is valid */
-	bsize_t size1;	/* Size 1 length */
-	uint8_t valid0;	/* size 0 is valid */
-	bsize_t size0;	/* Size 1 length */
-	bsize_t page_size;   /* Page or buffer Size */
-    uint8_t	rcr_timeout_enable;
-    uint8_t	rcr_timeout;
-    uint16_t	rcr_threshold;
-	uint16_t rcr_len;	   /* RBR Descriptor size (entries) */
-	uint16_t rbr_len;	   /* RBR Descriptor size (entries) */
-	uint64_t mbox_addr;	   /* Mailbox Address */
-	uint64_t rcr_addr;	   /* RCR Address */
-	uint64_t rbr_addr;	   /* RBB Address */
-} rdc_desc_cfg_t;
-
-
-
-npi_status_t npi_rxdma_cfg_rdc_ring(npi_handle_t, uint8_t,
-				    rdc_desc_cfg_t *);
-
-
-
-
-/*
- * npi_rxdma_rdc_rcr_flush
- * Forces RX completion ring update
- *
- * Inputs:
- *	rdc:		RX DMA Channel number
- *
- * Return:
- *
- */
-
-#define	npi_rxdma_rdc_rcr_flush(handle, rdc) \
-	RXDMA_REG_WRITE64(handle, RCR_FLSH_REG, rdc, \
-		    (RCR_FLSH_SET << RCR_FLSH_SHIFT))
-
-
-
-/*
- * npi_rxdma_rdc_rcr_read_update
- * Update the number of rcr packets and buffers processed
- *
- * Inputs:
- *	channel:	RX DMA Channel number
- *	num_pkts:	Number of pkts processed by SW.
- *			    A packet could constitute multiple
- *			    buffers, in case jumbo packets.
- *	num_bufs:	Number of buffer processed by SW.
- *
- * Return:
- *	NPI_FAILURE		-
- *		NPI_RXDMA_OPCODE_INVALID	-
- *		NPI_RXDMA_CHANNEL_INVALID	-
- *
- */
-
-npi_status_t npi_rxdma_rdc_rcr_read_update(npi_handle_t, uint8_t,
-				    uint16_t, uint16_t);
-/*
- * npi_rxdma_rdc_rcr_pktread_update
- * Update the number of packets processed
- *
- * Inputs:
- *	channel:	RX DMA Channel number
- *	num_pkts:	Number ofpkts processed by SW.
- *			A packet could constitute multiple
- *			buffers, in case jumbo packets.
- *
- * Return:
- *	NPI_FAILURE		-
- *		NPI_RXDMA_OPCODE_INVALID	-
- *		NPI_RXDMA_CHANNEL_INVALID	-
- *
- */
-
-npi_status_t npi_rxdma_rdc_rcr_pktread_update(npi_handle_t,
-					uint8_t, uint16_t);
-
-
-
-/*
- * npi_rxdma_rdc_rcr_bufread_update
- * Update the number of buffers processed
- *
- * Inputs:
- *	channel:		RX DMA Channel number
- *	num_bufs:	Number of buffer processed by SW. Multiple buffers
- *   could be part of a single packet.
- *
- * Return:
- *	NPI_FAILURE		-
- *		NPI_RXDMA_OPCODE_INVALID	-
- *		NPI_RXDMA_CHANNEL_INVALID	-
- *
- */
-
-npi_status_t npi_rxdma_rdc_rcr_bufread_update(npi_handle_t,
-					uint8_t, uint16_t);
-
-
-
-/*
- * npi_rxdma_rdc_rbr_kick
- * Kick RDC RBR
- *
- * Inputs:
- *	rdc:		RX DMA Channel number
- *	num_buffers:	Number of Buffers posted to the RBR
- *
- * Return:
- *
- */
-
-#define	npi_rxdma_rdc_rbr_kick(handle, rdc, num_buffers) \
-	RXDMA_REG_WRITE64(handle, RBR_KICK_REG, rdc, num_buffers)
-
-
-/*
- * npi_rxdma_rdc_rbr_head_get
- * Gets the current rbr head pointer.
- *
- * Inputs:
- *	rdc:		RX DMA Channel number
- *	hdptr		ptr to write the rbr head value
- *
- * Return:
- *
- */
-
-npi_status_t npi_rxdma_rdc_rbr_head_get(npi_handle_t,
-				    uint8_t, addr44_t  *);
-
-
-
-/*
- * npi_rxdma_rdc_rbr_stat_get
- * Returns the RBR stat. The stat consists of the
- * RX buffers in the ring. It also indicates if there
- * has been an overflow.
- *
- * Inputs:
- *	rdc:		RX DMA Channel number
- *	rbr_stat_t:	Structure to update stat
- *
- * Return:
- *
- */
-
-npi_status_t npi_rxdma_rdc_rbr_stat_get(npi_handle_t, uint8_t,
-				    rbr_stat_t *);
-
-
-
-/*
- * npi_rxdma_cfg_rdc_reset
- * Resets the RDC channel
- *
- * Inputs:
- *	rdc:		RX DMA Channel number
- *
- * Return:
- *
- */
-
-npi_status_t npi_rxdma_cfg_rdc_reset(npi_handle_t, uint8_t);
-
-
-/*
- * npi_rxdma_rdc_enable
- * Enables the RDC channel
- *
- * Inputs:
- *	rdc:		RX DMA Channel number
- *
- * Return:
- *
- */
-
-npi_status_t npi_rxdma_cfg_rdc_enable(npi_handle_t, uint8_t);
-
-/*
- * npi_rxdma_rdc_disable
- * Disables the RDC channel
- *
- * Inputs:
- *	rdc:		RX DMA Channel number
- *
- * Return:
- *
- */
-
-npi_status_t npi_rxdma_cfg_rdc_disable(npi_handle_t, uint8_t);
-
-
-/*
- * npi_rxdma_cfg_rdc_rcr_timeout()
- * Configure The RDC channel completion ring timeout.
- * If a frame has been received, an event would be
- * generated atleast at the expiration of the timeout.
- *
- * Enables timeout by default.
- *
- * Inputs:
- *	rdc:		RX DMA Channel number
- *	rcr_timeout:	Completion Ring timeout value
- *
- * Return:
- * NPI_SUCCESS
- * NPI_FAILURE
- * NPI_SW_ERR
- * NPI_HW_ERR
- *
- */
-
-npi_status_t npi_rxdma_cfg_rdc_rcr_timeout(npi_handle_t, uint8_t,
-				    uint8_t);
-
-
-/*
- * npi_rxdma_cfg_rdc_rcr_threshold()
- * Configure The RDC channel completion ring threshold.
- * An event would be If the number of frame received,
- * surpasses the threshold value
- *
- * Inputs:
- *	rdc:		RX DMA Channel number
- *	rcr_threshold:	Completion Ring Threshold count
- *
- * Return:
- * NPI_SUCCESS
- * NPI_FAILURE
- * NPI_SW_ERR
- * NPI_HW_ERR
- *
- */
-
-npi_status_t npi_rxdma_cfg_rdc_rcr_threshold(npi_handle_t, uint8_t,
-				    uint16_t);
-
-
-npi_status_t npi_rxdma_cfg_rdc_rcr_timeout_disable(npi_handle_t, uint8_t);
-
-typedef struct _rdc_error_stat_t {
-	uint8_t fault:1;
-    uint8_t	multi_fault:1;
-    uint8_t	rbr_fault:1;
-    uint8_t	buff_fault:1;
-    uint8_t	rcr_fault:1;
-	addr44_t fault_addr;
-} rdc_error_stat_t;
-
-#if OLD
-/*
- * npi_rxdma_rdc_error_stat_get
- * Gets the current Error stat for the RDC.
- *
- * Inputs:
- *	rdc:		RX DMA Channel number
- *	error_stat	Structure to write current RDC Error stat
- *
- * Return:
- *
- */
-
-npi_status_t npi_rxdma_rdc_error_stat_get(npi_handle_t,
-				    uint8_t, rdc_error_stat_t *);
-
-#endif
-
-/*
- * npi_rxdma_rdc_rcr_tail_get
- * Gets the current RCR tail address for the RDC.
- *
- * Inputs:
- *	rdc:		RX DMA Channel number
- *	tail_addr	Structure to write current RDC RCR tail address
- *
- * Return:
- *
- */
-
-npi_status_t npi_rxdma_rdc_rcr_tail_get(npi_handle_t,
-				    uint8_t, addr44_t *);
-
-
-npi_status_t npi_rxdma_rdc_rcr_qlen_get(npi_handle_t,
-				    uint8_t, uint16_t *);
-
-
-
-typedef struct _rdc_discard_stat_t {
-    uint8_t	nobuf_ovflow;
-    uint8_t	red_ovflow;
-    uint32_t	nobuf_discard;
-    uint32_t	red_discard;
-} rdc_discard_stat_t;
-
-
-/*
- * npi_rxdma_rdc_discard_stat_get
- * Gets the current discrad stats for the RDC.
- *
- * Inputs:
- *	rdc:		RX DMA Channel number
- *	rcr_stat	Structure to write current RDC discard stat
- *
- * Return:
- *
- */
-
-npi_status_t npi_rxdma_rdc_discard_stat_get(npi_handle_t,
-				    uint8_t, rdc_discard_stat_t);
-
-
-/*
- * npi_rx_port_discard_stat_get
- * Gets the current input (IPP) discrad stats for the rx port.
- *
- * Inputs:
- *	rdc:		RX DMA Channel number
- *	rx_disc_cnt_t	Structure to write current RDC discard stat
- *
- * Return:
- *
- */
-
-npi_status_t npi_rx_port_discard_stat_get(npi_handle_t,
-				    uint8_t,
-				    rx_disc_cnt_t *);
-
-
-/*
- * npi_rxdma_red_discard_stat_get
- * Gets the current discrad count due RED
- * The counter overflow bit is cleared, if it has been set.
- *
- * Inputs:
- *	rdc:		RX DMA Channel number
- *	rx_disc_cnt_t	Structure to write current RDC discard stat
- *
- * Return:
- * NPI_SUCCESS
- * NPI_RXDMA_RDC_INVALID
- *
- */
-
-npi_status_t npi_rxdma_red_discard_stat_get(npi_handle_t, uint8_t,
-				    rx_disc_cnt_t *);
-
-
-
-/*
- * npi_rxdma_red_discard_oflow_clear
- * Clear RED discard counter overflow bit
- *
- * Inputs:
- *	rdc:		RX DMA Channel number
- *
- * Return:
- * NPI_SUCCESS
- * NPI_RXDMA_RDC_INVALID
- *
- */
-
-npi_status_t npi_rxdma_red_discard_oflow_clear(npi_handle_t,
-					uint8_t);
-
-
-
-
-/*
- * npi_rxdma_misc_discard_stat_get
- * Gets the current discrad count for the rdc due to
- * buffer pool empty
- * The counter overflow bit is cleared, if it has been set.
- *
- * Inputs:
- *	rdc:		RX DMA Channel number
- *	rx_disc_cnt_t	Structure to write current RDC discard stat
- *
- * Return:
- * NPI_SUCCESS
- * NPI_RXDMA_RDC_INVALID
- *
- */
-
-npi_status_t npi_rxdma_misc_discard_stat_get(npi_handle_t, uint8_t,
-				    rx_disc_cnt_t *);
-
-
-
-/*
- * npi_rxdma_red_discard_oflow_clear
- * Clear RED discard counter overflow bit
- * clear the overflow bit for  buffer pool empty discrad counter
- * for the rdc
- *
- *
- * Inputs:
- *	rdc:		RX DMA Channel number
- *
- * Return:
- * NPI_SUCCESS
- * NPI_RXDMA_RDC_INVALID
- *
- */
-
-npi_status_t npi_rxdma_misc_discard_oflow_clear(npi_handle_t,
-					uint8_t);
-
-
-
-/*
- * npi_rxdma_ring_perr_stat_get
- * Gets the current RDC Memory parity error
- * The counter overflow bit is cleared, if it has been set.
- *
- * Inputs:
- * pre_cnt:	Structure to write current RDC Prefetch memory
- *		Parity Error stat
- * sha_cnt:	Structure to write current RDC Shadow memory
- *		Parity Error stat
- *
- * Return:
- * NPI_SUCCESS
- * NPI_RXDMA_RDC_INVALID
- *
- */
-
-npi_status_t npi_rxdma_ring_perr_stat_get(npi_handle_t,
-				    rdmc_par_err_log_t *,
-				    rdmc_par_err_log_t *);
-
-
-/*
- * npi_rxdma_ring_perr_stat_get
- * Clear RDC Memory Parity Error counter overflow bits
- *
- * Inputs:
- * Return:
- * NPI_SUCCESS
- *
- */
-
-npi_status_t npi_rxdma_ring_perr_stat_clear(npi_handle_t);
-
-
-/* Access the RDMC Memory: used for debugging */
-
-npi_status_t npi_rxdma_rdmc_memory_io(npi_handle_t,
-			    rdmc_mem_access_t *, uint8_t);
-
-
-
-/*
- * npi_rxdma_rxctl_fifo_error_intr_set
- * Configure The RX ctrl fifo error interrupt generation
- *
- * Inputs:
- *	mask:	rx_ctl_dat_fifo_mask_t specifying the errors
- *
- * Return:
- * NPI_SUCCESS
- * NPI_FAILURE
- *
- */
-
-npi_status_t npi_rxdma_rxctl_fifo_error_intr_set(npi_handle_t,
-				    rx_ctl_dat_fifo_mask_t *);
-
-/*
- * npi_rxdma_rxctl_fifo_error_status_get
- * Read The RX ctrl fifo error Status
- *
- * Inputs:
- *	stat:	rx_ctl_dat_fifo_stat_t to read the errors to
- * valid fields in  rx_ctl_dat_fifo_stat_t structure are:
- * zcp_eop_err, ipp_eop_err, id_mismatch.
- * Return:
- * NPI_SUCCESS
- * NPI_FAILURE
- *
- */
-
-npi_status_t npi_rxdma_rxctl_fifo_error_status_get(npi_handle_t,
-				    rx_ctl_dat_fifo_stat_t *);
-
-
-/*
- * npi_rxdma_channel_mex_set():
- *	This function is called to arm the DMA channel with
- *	mailbox updating capability. Software needs to rearm
- *	for each update by writing to the control and status register.
- *
- * Parameters:
- *	handle		- NPI handle (virtualization flag must be defined).
- *	channel		- logical RXDMA channel from 0 to 23.
- *			  (If virtualization flag is not set, then
- *			   logical channel is the same as the hardware
- *			   channel number).
- *
- * Return:
- *	NPI_SUCCESS		- If enable channel with mailbox update
- *				  is complete successfully.
- *
- *	Error:
- *	NPI_FAILURE	-
- *		NPI_RXDMA_CHANNEL_INVALID -
- */
-npi_status_t npi_rxdma_channel_mex_set(npi_handle_t, uint8_t);
-
-/*
- * npi_rxdma_channel_rcrto_clear():
- *	This function is called to reset RCRTO bit to 0.
- *
- * Parameters:
- *	handle		- NPI handle (virtualization flag must be defined).
- *	channel		- logical RXDMA channel from 0 to 23.
- *			  (If virtualization flag is not set, then
- *			   logical channel is the same as the hardware
- *			   channel number).
- * Return:
- *	NPI_SUCCESS
- *
- *	Error:
- *	NPI_FAILURE	-
- *		NPI_RXDMA_CHANNEL_INVALID -
- */
-npi_status_t npi_rxdma_channel_rcrto_clear(npi_handle_t, uint8_t);
-
-/*
- * npi_rxdma_channel_pt_drop_pkt_clear():
- *	This function is called to clear the port drop packet bit (debug).
- *
- * Parameters:
- *	handle		- NPI handle (virtualization flag must be defined).
- *	channel		- logical RXDMA channel from 0 to 23.
- *			  (If virtualization flag is not set, then
- *			   logical channel is the same as the hardware
- *			   channel number).
- * Return:
- *	NPI_SUCCESS
- *
- *	Error:
- *	NPI_FAILURE	-
- *		NPI_RXDMA_CHANNEL_INVALID -
- */
-npi_status_t npi_rxdma_channel_pt_drop_pkt_clear(npi_handle_t, uint8_t);
-
-/*
- * npi_rxdma_channel_wred_drop_clear():
- *	This function is called to wred drop bit (debug only).
- *
- * Parameters:
- *	handle		- NPI handle (virtualization flag must be defined).
- *	channel		- logical RXDMA channel from 0 to 23.
- *			  (If virtualization flag is not set, then
- *			   logical channel is the same as the hardware
- *			   channel number).
- * Return:
- *	NPI_SUCCESS
- *
- *	Error:
- *	NPI_FAILURE	-
- *		NPI_RXDMA_CHANNEL_INVALID -
- */
-npi_status_t npi_rxdma_channel_wred_drop_clear(npi_handle_t, uint8_t);
-
-/*
- * npi_rxdma_channel_rcr_shfull_clear():
- *	This function is called to clear RCR shadow full bit.
- *
- * Parameters:
- *	handle		- NPI handle (virtualization flag must be defined).
- *	channel		- logical RXDMA channel from 0 to 23.
- *			  (If virtualization flag is not set, then
- *			   logical channel is the same as the hardware
- *			   channel number).
- * Return:
- *	NPI_SUCCESS
- *
- *	Error:
- *	NPI_FAILURE	-
- *		NPI_RXDMA_CHANNEL_INVALID -
- */
-npi_status_t npi_rxdma_channel_rcr_shfull_clear(npi_handle_t, uint8_t);
-
-/*
- * npi_rxdma_channel_rcrfull_clear():
- *	This function is called to clear RCR full bit.
- *
- * Parameters:
- *	handle		- NPI handle (virtualization flag must be defined).
- *	channel		- logical RXDMA channel from 0 to 23.
- *			  (If virtualization flag is not set, then
- *			   logical channel is the same as the hardware
- *			   channel number).
- * Return:
- *	NPI_SUCCESS
- *
- *	Error:
- *	NPI_FAILURE	-
- *		NPI_RXDMA_CHANNEL_INVALID -
- */
-npi_status_t npi_rxdma_channel_rcrfull_clear(npi_handle_t, uint8_t);
-
-/*
- * npi_rxdma_rbr_pre_empty_clear():
- *	This function is called to control a receive DMA channel
- *	for arming the channel with mailbox updates, resetting
- *	various event status bits (control and status register).
- *
- * Parameters:
- *	handle		- NPI handle (virtualization flag must be defined).
- *	control		- NPI defined control type supported:
- *				- RXDMA_MEX_SET
- * 				- RXDMA_RCRTO_CLEAR
- *				- RXDMA_PT_DROP_PKT_CLEAR
- *				- RXDMA_WRED_DROP_CLEAR
- *				- RXDMA_RCR_SFULL_CLEAR
- *				- RXDMA_RCR_FULL_CLEAR
- *				- RXDMA_RBR_PRE_EMPTY_CLEAR
- *	channel		- logical RXDMA channel from 0 to 23.
- *			  (If virtualization flag is not set, then
- *			   logical channel is the same as the hardware.
- * Return:
- *	NPI_SUCCESS
- *
- *	Error:
- *	NPI_FAILURE		-
- *		NPI_RXDMA_CHANNEL_INVALID -
- */
-npi_status_t npi_rxdma_channel_rbr_pre_empty_clear(npi_handle_t, uint8_t);
-
-/*
- * npi_rxdma_channel_control():
- *	This function is called to control a receive DMA channel
- *	for arming the channel with mailbox updates, resetting
- *	various event status bits (control and status register).
- *
- * Parameters:
- *	handle		- NPI handle (virtualization flag must be defined).
- *	control		- NPI defined control type supported:
- *				- RXDMA_MEX_SET
- * 				- RXDMA_RCRTO_CLEAR
- *				- RXDMA_PT_DROP_PKT_CLEAR
- *				- RXDMA_WRED_DROP_CLEAR
- *				- RXDMA_RCR_SFULL_CLEAR
- *				- RXDMA_RCR_FULL_CLEAR
- *				- RXDMA_RBR_PRE_EMPTY_CLEAR
- *	channel		- logical RXDMA channel from 0 to 23.
- *			  (If virtualization flag is not set, then
- *			   logical channel is the same as the hardware.
- * Return:
- *	NPI_SUCCESS
- *
- *	Error:
- *	NPI_FAILURE		-
- *		NPI_TXDMA_OPCODE_INVALID	-
- *		NPI_TXDMA_CHANNEL_INVALID	-
- */
-npi_status_t npi_rxdma_channel_control(npi_handle_t,
-				rxdma_cs_cntl_t, uint8_t);
-
-/*
- * npi_rxdma_control_status():
- *	This function is called to operate on the control
- *	and status register.
- *
- * Parameters:
- *	handle		- NPI handle
- *	op_mode		- OP_GET: get hardware control and status
- *			  OP_SET: set hardware control and status
- *			  OP_UPDATE: update hardware control and status.
- *			  OP_CLEAR: clear control and status register to 0s.
- *	channel		- hardware RXDMA channel from 0 to 23.
- *	cs_p		- pointer to hardware defined control and status
- *			  structure.
- * Return:
- *	NPI_SUCCESS
- *
- *	Error:
- *	NPI_FAILURE		-
- *		NPI_RXDMA_OPCODE_INVALID	-
- *		NPI_RXDMA_CHANNEL_INVALID	-
- */
-npi_status_t npi_rxdma_control_status(npi_handle_t, io_op_t,
-			uint8_t, p_rx_dma_ctl_stat_t);
-
-/*
- * npi_rxdma_event_mask():
- *	This function is called to operate on the event mask
- *	register which is used for generating interrupts.
- *
- * Parameters:
- *	handle		- NPI handle
- *	op_mode		- OP_GET: get hardware event mask
- *			  OP_SET: set hardware interrupt event masks
- *			  OP_CLEAR: clear control and status register to 0s.
- *	channel		- hardware RXDMA channel from 0 to 23.
- *	mask_p		- pointer to hardware defined event mask
- *			  structure.
- * Return:
- *	NPI_SUCCESS		- If set is complete successfully.
- *
- *	Error:
- *	NPI_FAILURE		-
- *		NPI_RXDMA_OPCODE_INVALID	-
- *		NPI_RXDMA_CHANNEL_INVALID	-
- */
-npi_status_t npi_rxdma_event_mask(npi_handle_t, io_op_t,
-		uint8_t, p_rx_dma_ent_msk_t);
-
-/*
- * npi_rxdma_event_mask_config():
- *	This function is called to operate on the event mask
- *	register which is used for generating interrupts
- *	and status register.
- *
- * Parameters:
- *	handle		- NPI handle
- *	op_mode		- OP_GET: get hardware event mask
- *			  OP_SET: set hardware interrupt event masks
- *			  OP_CLEAR: clear control and status register to 0s.
- *	channel		- hardware RXDMA channel from 0 to 23.
- *	cfgp		- pointer to NPI defined event mask
- *			  enum data type.
- * Return:
- *	NPI_SUCCESS		- If set is complete successfully.
- *
- *	Error:
- *	NPI_FAILURE		-
- *		NPI_RXDMA_OPCODE_INVALID	-
- *		NPI_RXDMA_CHANNEL_INVALID	-
- */
-npi_status_t npi_rxdma_event_mask_config(npi_handle_t, io_op_t,
-		uint8_t, rxdma_ent_msk_cfg_t *);
-
-
-/*
- * npi_rxdma_dump_rdc_regs
- * Dumps the contents of rdc csrs and fzc registers
- *
- * Input:
- *         rdc:      RX DMA number
- *
- * return:
- *     NPI_SUCCESS
- *     NPI_FAILURE
- *     NPI_RXDMA_RDC_INVALID
- *
- */
-
-npi_status_t npi_rxdma_dump_rdc_regs(npi_handle_t, uint8_t);
-
-
-/*
- * npi_rxdma_dump_fzc_regs
- * Dumps the contents of rdc csrs and fzc registers
- *
- * Input:
- *         rdc:      RX DMA number
- *
- * return:
- *     NPI_SUCCESS
- *     NPI_FAILURE
- *     NPI_RXDMA_RDC_INVALID
- *
- */
-
-npi_status_t npi_rxdma_dump_fzc_regs(npi_handle_t);
-
-npi_status_t npi_rxdma_channel_rbr_empty_clear(npi_handle_t,
-							uint8_t);
-npi_status_t npi_rxdma_rxctl_fifo_error_intr_get(npi_handle_t,
-				rx_ctl_dat_fifo_stat_t *);
-
-npi_status_t npi_rxdma_rxctl_fifo_error_intr_set(npi_handle_t,
-				rx_ctl_dat_fifo_mask_t *);
-
-npi_status_t npi_rxdma_dump_rdc_table(npi_handle_t, uint8_t);
-#ifdef	__cplusplus
-}
-#endif
-
-#endif	/* _NPI_RXDMA_H */
--- a/usr/src/uts/sun4v/io/nxge/npi/npi_txc.c	Mon Mar 19 18:02:35 2007 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,1063 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License (the "License").
- * You may not use this file except in compliance with the License.
- *
- * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
- * or http://www.opensolaris.org/os/licensing.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information: Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- */
-/*
- * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
- * Use is subject to license terms.
- */
-
-#pragma ident	"%Z%%M%	%I%	%E% SMI"
-
-#include <npi_txc.h>
-
-/*
- * Transmit Controller (TXC) Functions.
- */
-
-uint64_t txc_fzc_dmc_offset[] = {
-	TXC_DMA_MAX_BURST_REG,
-	TXC_DMA_MAX_LENGTH_REG
-};
-
-const char *txc_fzc_dmc_name[] = {
-	"TXC_DMA_MAX_BURST_REG",
-	"TXC_DMA_MAX_LENGTH_REG"
-};
-
-uint64_t txc_fzc_offset [] = {
-	TXC_CONTROL_REG,
-	TXC_TRAINING_REG,
-	TXC_DEBUG_SELECT_REG,
-	TXC_MAX_REORDER_REG,
-	TXC_INT_STAT_DBG_REG,
-	TXC_INT_STAT_REG,
-	TXC_INT_MASK_REG
-};
-
-const char *txc_fzc_name [] = {
-	"TXC_CONTROL_REG",
-	"TXC_TRAINING_REG",
-	"TXC_DEBUG_SELECT_REG",
-	"TXC_MAX_REORDER_REG",
-	"TXC_INT_STAT_DBG_REG",
-	"TXC_INT_STAT_REG",
-	"TXC_INT_MASK_REG"
-};
-
-uint64_t txc_fzc_port_offset[] = {
-	TXC_PORT_CTL_REG,
-	TXC_PORT_DMA_ENABLE_REG,
-	TXC_PKT_STUFFED_REG,
-	TXC_PKT_XMIT_REG,
-	TXC_ROECC_CTL_REG,
-	TXC_ROECC_ST_REG,
-	TXC_RO_DATA0_REG,
-	TXC_RO_DATA1_REG,
-	TXC_RO_DATA2_REG,
-	TXC_RO_DATA3_REG,
-	TXC_RO_DATA4_REG,
-	TXC_SFECC_CTL_REG,
-	TXC_SFECC_ST_REG,
-	TXC_SF_DATA0_REG,
-	TXC_SF_DATA1_REG,
-	TXC_SF_DATA2_REG,
-	TXC_SF_DATA3_REG,
-	TXC_SF_DATA4_REG,
-	TXC_RO_TIDS_REG,
-	TXC_RO_STATE0_REG,
-	TXC_RO_STATE1_REG,
-	TXC_RO_STATE2_REG,
-	TXC_RO_STATE3_REG,
-	TXC_RO_CTL_REG,
-	TXC_RO_ST_DATA0_REG,
-	TXC_RO_ST_DATA1_REG,
-	TXC_RO_ST_DATA2_REG,
-	TXC_RO_ST_DATA3_REG,
-	TXC_PORT_PACKET_REQ_REG
-};
-
-const char *txc_fzc_port_name[] = {
-	"TXC_PORT_CTL_REG",
-	"TXC_PORT_DMA_ENABLE_REG",
-	"TXC_PKT_STUFFED_REG",
-	"TXC_PKT_XMIT_REG",
-	"TXC_ROECC_CTL_REG",
-	"TXC_ROECC_ST_REG",
-	"TXC_RO_DATA0_REG",
-	"TXC_RO_DATA1_REG",
-	"TXC_RO_DATA2_REG",
-	"TXC_RO_DATA3_REG",
-	"TXC_RO_DATA4_REG",
-	"TXC_SFECC_CTL_REG",
-	"TXC_SFECC_ST_REG",
-	"TXC_SF_DATA0_REG",
-	"TXC_SF_DATA1_REG",
-	"TXC_SF_DATA2_REG",
-	"TXC_SF_DATA3_REG",
-	"TXC_SF_DATA4_REG",
-	"TXC_RO_TIDS_REG",
-	"TXC_RO_STATE0_REG",
-	"TXC_RO_STATE1_REG",
-	"TXC_RO_STATE2_REG",
-	"TXC_RO_STATE3_REG",
-	"TXC_RO_CTL_REG",
-	"TXC_RO_ST_DATA0_REG",
-	"TXC_RO_ST_DATA1_REG",
-	"TXC_RO_ST_DATA2_REG",
-	"TXC_RO_ST_DATA3_REG",
-	"TXC_PORT_PACKET_REQ_REG"
-};
-
-/*
- * npi_txc_dump_tdc_fzc_regs
- * Dumps the contents of TXC csrs and fzc registers
- *
- * Input:
- *	handle		- NPI handle
- *         tdc:      TX DMA number
- *
- * return:
- *     NPI_SUCCESS
- *     NPI_FAILURE
- *     NPI_TXC_CHANNEL_INVALID
- *
- */
-npi_status_t
-npi_txc_dump_tdc_fzc_regs(npi_handle_t handle, uint8_t tdc)
-{
-	uint64_t		value, offset;
-	int 			num_regs, i;
-
-	ASSERT(TXDMA_CHANNEL_VALID(tdc));
-	if (!TXDMA_CHANNEL_VALID(tdc)) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-			"npi_txc_dump_tdc_fzc_regs"
-			" Invalid TDC number %d \n",
-			tdc));
-		return (NPI_FAILURE | NPI_TXC_CHANNEL_INVALID(tdc));
-	}
-
-	NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
-		    "\nTXC FZC DMC Register Dump for Channel %d\n",
-			    tdc));
-
-	num_regs = sizeof (txc_fzc_dmc_offset) / sizeof (uint64_t);
-	for (i = 0; i < num_regs; i++) {
-		offset = TXC_FZC_REG_CN_OFFSET(txc_fzc_dmc_offset[i], tdc);
-		NXGE_REG_RD64(handle, offset, &value);
-		NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL, "0x%08llx "
-			"%s\t 0x%08llx \n",
-			offset, txc_fzc_dmc_name[i], value));
-	}
-
-	NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
-		"\n TXC FZC Register Dump for Channel %d done\n", tdc));
-
-	return (NPI_SUCCESS);
-}
-
-/*
- * npi_txc_dump_fzc_regs
- * Dumps the contents of txc csrs and fzc registers
- *
- *
- * return:
- *     NPI_SUCCESS
- *     NPI_FAILURE
- *
- */
-npi_status_t
-npi_txc_dump_fzc_regs(npi_handle_t handle)
-{
-
-	uint64_t value;
-	int num_regs, i;
-
-	NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
-		"\nTXC FZC Common Register Dump\n"));
-
-	num_regs = sizeof (txc_fzc_offset) / sizeof (uint64_t);
-	for (i = 0; i < num_regs; i++) {
-		NXGE_REG_RD64(handle, txc_fzc_offset[i], &value);
-		NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL, "0x%08llx "
-			"%s\t 0x%08llx \n",
-			txc_fzc_offset[i], txc_fzc_name[i], value));
-	}
-	NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
-		"\n TXC FZC Common Register Dump Done \n"));
-
-	return (NPI_SUCCESS);
-}
-
-/*
- * npi_txc_dump_port_fzc_regs
- * Dumps the contents of TXC csrs and fzc registers
- *
- * Input:
- *	handle		- NPI handle
- *         port:      port number
- *
- * return:
- *     NPI_SUCCESS
- *     NPI_FAILURE
- *
- */
-npi_status_t
-npi_txc_dump_port_fzc_regs(npi_handle_t handle, uint8_t port)
-{
-	uint64_t		value, offset;
-	int 			num_regs, i;
-
-	ASSERT(IS_PORT_NUM_VALID(port));
-
-	NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
-		"\nTXC FZC PORT Register Dump for port %d\n", port));
-
-	num_regs = sizeof (txc_fzc_port_offset) / sizeof (uint64_t);
-	for (i = 0; i < num_regs; i++) {
-		offset = TXC_FZC_REG_PT_OFFSET(txc_fzc_port_offset[i], port);
-		NXGE_REG_RD64(handle, offset, &value);
-		NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL, "0x%08llx "
-			"%s\t 0x%08llx \n",
-			offset, txc_fzc_port_name[i], value));
-	}
-
-	NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
-		"\n TXC FZC Register Dump for port %d done\n", port));
-
-	return (NPI_SUCCESS);
-}
-
-/*
- * npi_txc_dma_max_burst():
- *	This function is called to configure the max burst bytes.
- *
- * Parameters:
- *	handle		- NPI handle
- *	op_mode		- OP_GET: get max burst value
- *			- OP_SET: set max burst value
- *	channel		- channel number (0 - 23)
- *	dma_max_burst_p - pointer to store or used for max burst value.
- * Return:
- *	NPI_SUCCESS	- If operation is complete successfully.
- *
- *	Error:
- *	NPI_FAILURE	-
- *		NPI_TXC_OPCODE_INVALID
- *		NPI_TXC_CHANNEL_INVALID
- */
-npi_status_t
-npi_txc_dma_max_burst(npi_handle_t handle, io_op_t op_mode, uint8_t channel,
-		uint32_t *dma_max_burst_p)
-{
-	uint64_t val;
-
-	ASSERT(TXDMA_CHANNEL_VALID(channel));
-	if (!TXDMA_CHANNEL_VALID(channel)) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				    " npi_txc_dma_max_burst"
-				    " Invalid Input: channel <0x%x>",
-				    channel));
-		return (NPI_FAILURE | NPI_TXC_CHANNEL_INVALID(channel));
-	}
-
-	switch (op_mode) {
-	case OP_GET:
-		TXC_FZC_REG_READ64(handle, TXC_DMA_MAX_BURST_REG, channel,
-					&val);
-		*dma_max_burst_p = (uint32_t)val;
-		break;
-
-	case OP_SET:
-		TXC_FZC_REG_WRITE64(handle,
-			TXC_DMA_MAX_BURST_REG, channel, *dma_max_burst_p);
-		break;
-
-	default:
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				    " npi_txc_dma_max_burst"
-				    " Invalid Input: burst <0x%x>",
-				    op_mode));
-		return (NPI_FAILURE | NPI_TXC_OPCODE_INVALID(channel));
-	}
-
-	return (NPI_SUCCESS);
-}
-
-/*
- * npi_txc_dma_max_burst_set():
- *	This function is called to set the max burst bytes.
- *
- * Parameters:
- *	handle		- NPI handle
- *	channel		- channel number (0 - 23)
- *	max_burst 	- max burst to set
- * Return:
- *	NPI_SUCCESS	- If operation is complete successfully.
- *
- *	Error:
- *	NPI_FAILURE	-
- */
-npi_status_t
-npi_txc_dma_max_burst_set(npi_handle_t handle, uint8_t channel,
-		uint32_t max_burst)
-{
-	ASSERT(TXDMA_CHANNEL_VALID(channel));
-	if (!TXDMA_CHANNEL_VALID(channel)) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				    " npi_txc_dma_max_burst_set"
-				    " Invalid Input: channel <0x%x>",
-				    channel));
-		return (NPI_FAILURE | NPI_TXC_CHANNEL_INVALID(channel));
-	}
-
-	TXC_FZC_REG_WRITE64(handle, TXC_DMA_MAX_BURST_REG,
-		channel, (uint64_t)max_burst);
-
-	return (NPI_SUCCESS);
-}
-
-/*
- * npi_txc_dma_bytes_transmitted():
- *	This function is called to get # of bytes transmitted by
- *	DMA (hardware register is cleared on read).
- *
- * Parameters:
- *	handle		- NPI handle
- *	channel		- channel number (0 - 23)
- *	dma_bytes_p 	- pointer to store bytes transmitted.
- * Return:
- *	NPI_SUCCESS	- If get is complete successfully.
- *
- *	Error:
- *	NPI_FAILURE	-
- *		NPI_TXC_PORT_INVALID
- */
-npi_status_t
-npi_txc_dma_bytes_transmitted(npi_handle_t handle, uint8_t channel,
-		uint32_t *dma_bytes_p)
-{
-	uint64_t val;
-
-	ASSERT(TXDMA_CHANNEL_VALID(channel));
-	if (!TXDMA_CHANNEL_VALID(channel)) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				    " npi_txc_dma_bytes_transmitted"
-				    " Invalid Input: channel %d",
-				    channel));
-		return (NPI_FAILURE | NPI_TXC_CHANNEL_INVALID(channel));
-	}
-
-	TXC_FZC_REG_READ64(handle, TXC_DMA_MAX_LENGTH_REG, channel, &val);
-	*dma_bytes_p = (uint32_t)val;
-
-	return (NPI_SUCCESS);
-}
-
-/*
- * npi_txc_control():
- *	This function is called to get or set the control register.
- *
- * Parameters:
- *	handle		- NPI handle
- *	op_mode		- OP_GET: get control register value
- *			  OP_SET: set control register value
- *	txc_control_p	- pointer to hardware defined data structure.
- * Return:
- *	NPI_SUCCESS	- If operation is complete successfully.
- *
- *	Error:
- *	NPI_FAILURE	-
- *		NPI_TXC_OPCODE_INVALID
- *		NPI_TXC_PORT_INVALID
- */
-npi_status_t
-npi_txc_control(npi_handle_t handle, io_op_t op_mode,
-		p_txc_control_t txc_control_p)
-{
-	switch (op_mode) {
-	case OP_GET:
-		NXGE_REG_RD64(handle, TXC_CONTROL_REG, &txc_control_p->value);
-		break;
-
-	case OP_SET:
-		NXGE_REG_WR64(handle, TXC_CONTROL_REG,
-			txc_control_p->value);
-		break;
-
-	default:
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				    " npi_txc_control"
-				    " Invalid Input:  control 0x%x",
-				    op_mode));
-		return (NPI_FAILURE | NPI_TXC_OPCODE_INVALID(op_mode));
-	}
-
-	return (NPI_SUCCESS);
-}
-
-/*
- * npi_txc_global_enable():
- *	This function is called to globally enable TXC.
- *
- * Parameters:
- *	handle		- NPI handle
- * Return:
- *	NPI_SUCCESS	- If enable is complete successfully.
- *
- *	Error:
- */
-npi_status_t
-npi_txc_global_enable(npi_handle_t handle)
-{
-	txc_control_t	cntl;
-	uint64_t	val;
-
-	cntl.value = 0;
-	cntl.bits.ldw.txc_enabled = 1;
-
-	NXGE_REG_RD64(handle, TXC_CONTROL_REG, &val);
-	NXGE_REG_WR64(handle, TXC_CONTROL_REG, val | cntl.value);
-
-	return (NPI_SUCCESS);
-}
-
-/*
- * npi_txc_global_disable():
- *	This function is called to globally disable TXC.
- *
- * Parameters:
- *	handle		- NPI handle
- * Return:
- *	NPI_SUCCESS	- If disable is complete successfully.
- *
- *	Error:
- */
-npi_status_t
-npi_txc_global_disable(npi_handle_t handle)
-{
-	txc_control_t	cntl;
-	uint64_t	val;
-
-
-	cntl.value = 0;
-	cntl.bits.ldw.txc_enabled = 0;
-
-	NXGE_REG_RD64(handle, TXC_CONTROL_REG, &val);
-	NXGE_REG_WR64(handle, TXC_CONTROL_REG, val | cntl.value);
-
-	return (NPI_SUCCESS);
-}
-
-/*
- * npi_txc_control_clear():
- *	This function is called to clear all bits.
- *
- * Parameters:
- *	handle		- NPI handle
- * Return:
- *	NPI_SUCCESS	- If reset all bits to 0s is complete successfully.
- *
- *	Error:
- */
-npi_status_t
-npi_txc_control_clear(npi_handle_t handle, uint8_t port)
-{
-	ASSERT(IS_PORT_NUM_VALID(port));
-
-	NXGE_REG_WR64(handle, TXC_PORT_CTL_REG, TXC_PORT_CNTL_CLEAR);
-
-	return (NPI_SUCCESS);
-}
-
-/*
- * npi_txc_training_set():
- *	This function is called to set the debug training vector.
- *
- * Parameters:
- *	handle			- NPI handle
- *	vector			- training vector to set.
- * Return:
- *	NPI_SUCCESS
- *
- *	Error:
- *	NPI_FAILURE		-
- */
-npi_status_t
-npi_txc_training_set(npi_handle_t handle, uint32_t vector)
-{
-	NXGE_REG_WR64(handle, TXC_TRAINING_REG, (uint64_t)vector);
-
-	return (NPI_SUCCESS);
-}
-
-/*
- * npi_txc_training_get():
- *	This function is called to get the debug training vector.
- *
- * Parameters:
- *	handle			- NPI handle
- *	vector_p		- pointer to store training vector.
- * Return:
- *	NPI_SUCCESS
- *
- *	Error:
- *	NPI_FAILURE		-
- */
-npi_status_t
-npi_txc_training_get(npi_handle_t handle, uint32_t *vector_p)
-{
-	uint64_t val;
-
-	NXGE_REG_RD64(handle, (TXC_TRAINING_REG & TXC_TRAINING_VECTOR_MASK),
-			&val);
-	*vector_p = (uint32_t)val;
-
-	return (NPI_SUCCESS);
-}
-
-/*
- * npi_txc_port_enable():
- *	This function is called to enable a particular port.
- *
- * Parameters:
- *	handle		- NPI handle
- *	port		- port number (0 - 3)
- * Return:
- *	NPI_SUCCESS	- If port is enabled successfully.
- *
- *	Error:
- *	NPI_FAILURE	-
- *		NPI_TXC_PORT_INVALID
- */
-npi_status_t
-npi_txc_port_enable(npi_handle_t handle, uint8_t port)
-{
-	uint64_t val;
-
-	ASSERT(IS_PORT_NUM_VALID(port));
-
-	NXGE_REG_RD64(handle, TXC_CONTROL_REG, &val);
-	NXGE_REG_WR64(handle, TXC_CONTROL_REG, val | (1 << port));
-
-	return (NPI_SUCCESS);
-}
-
-/*
- * npi_txc_port_disable():
- *	This function is called to disable a particular port.
- *
- * Parameters:
- *	handle		- NPI handle
- *	port		- port number (0 - 3)
- * Return:
- *	NPI_SUCCESS	- If port is disabled successfully.
- *
- *	Error:
- *	NPI_FAILURE	-
- *		NPI_TXC_PORT_INVALID
- */
-npi_status_t
-npi_txc_port_disable(npi_handle_t handle, uint8_t port)
-{
-	uint64_t val;
-
-	ASSERT(IS_PORT_NUM_VALID(port));
-
-	NXGE_REG_RD64(handle, TXC_CONTROL_REG, &val);
-	NXGE_REG_WR64(handle, TXC_CONTROL_REG, (val & ~(1 << port)));
-
-	return (NPI_SUCCESS);
-}
-
-/*
- * npi_txc_port_dma_enable():
- *	This function is called to bind DMA channels (bitmap) to a port.
- *
- * Parameters:
- *	handle			- NPI handle
- *	port			- port number (0 - 3)
- *	port_dma_list_bitmap	- channels bitmap
- *				(1 to bind, 0 - 23 bits one bit/channel)
- * Return:
- *	NPI_SUCCESS		- If channels are bound successfully.
- *
- *	Error:
- *	NPI_FAILURE	-
- *		NPI_TXC_PORT_INVALID
- */
-npi_status_t
-npi_txc_port_dma_enable(npi_handle_t handle, uint8_t port,
-		uint32_t port_dma_list_bitmap)
-{
-
-	ASSERT(IS_PORT_NUM_VALID(port));
-
-	TXC_FZC_CNTL_REG_WRITE64(handle, TXC_PORT_DMA_ENABLE_REG, port,
-		port_dma_list_bitmap);
-	return (NPI_SUCCESS);
-}
-
-npi_status_t
-npi_txc_port_dma_list_get(npi_handle_t handle, uint8_t port,
-		uint32_t *port_dma_list_bitmap)
-{
-	uint64_t val;
-
-	ASSERT(IS_PORT_NUM_VALID(port));
-
-	TXC_FZC_CNTL_REG_READ64(handle, TXC_PORT_DMA_ENABLE_REG, port, &val);
-	*port_dma_list_bitmap = (uint32_t)(val & TXC_DMA_DMA_LIST_MASK);
-
-	return (NPI_SUCCESS);
-}
-
-/*
- * npi_txc_port_dma_channel_enable():
- *	This function is called to bind a channel to a port.
- *
- * Parameters:
- *	handle			- NPI handle
- *	port			- port number (0 - 3)
- *	channel			- channel number (0 - 23)
- * Return:
- *	NPI_SUCCESS		- If channel is bound successfully.
- *
- *	Error:
- *	NPI_FAILURE		-
- *		NPI_TXC_PORT_INVALID	-
- */
-npi_status_t
-npi_txc_port_dma_channel_enable(npi_handle_t handle, uint8_t port,
-		uint8_t channel)
-{
-	uint64_t val;
-
-	ASSERT(IS_PORT_NUM_VALID(port));
-
-	ASSERT(TXDMA_CHANNEL_VALID(channel));
-	if (!TXDMA_CHANNEL_VALID(channel)) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				    " npi_txc_port_dma_channel_enable"
-				    " Invalid Input: channel <0x%x>", channel));
-		return (NPI_FAILURE | NPI_TXC_CHANNEL_INVALID(channel));
-	}
-
-	TXC_FZC_CNTL_REG_READ64(handle, TXC_PORT_DMA_ENABLE_REG, port, &val);
-	TXC_FZC_CNTL_REG_WRITE64(handle, TXC_PORT_DMA_ENABLE_REG, port,
-				(val | (1 << channel)));
-
-	return (NPI_SUCCESS);
-}
-
-/*
- * npi_txc_port_dma_channel_disable():
- *	This function is called to unbind a channel to a port.
- *
- * Parameters:
- *	handle			- NPI handle
- *	port			- port number (0 - 3)
- *	channel			- channel number (0 - 23)
- * Return:
- *	NPI_SUCCESS		- If channel is unbound successfully.
- *
- *	Error:
- *	NPI_FAILURE		-
- *		NPI_TXC_PORT_INVALID	-
- */
-npi_status_t
-npi_txc_port_dma_channel_disable(npi_handle_t handle, uint8_t port,
-		uint8_t channel)
-{
-	uint64_t val;
-
-	ASSERT(IS_PORT_NUM_VALID(port));
-
-	ASSERT(TXDMA_CHANNEL_VALID(channel));
-	if (!TXDMA_CHANNEL_VALID(channel)) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				    " npi_txc_port_dma_channel_disable"
-				    " Invalid Input: channel <0x%x>", channel));
-		return (NPI_FAILURE | NPI_TXC_CHANNEL_INVALID(channel));
-	}
-
-	TXC_FZC_CNTL_REG_READ64(handle, TXC_PORT_DMA_ENABLE_REG, port, &val)
-	TXC_FZC_CNTL_REG_WRITE64(handle, TXC_PORT_DMA_ENABLE_REG, port,
-				val & ~(1 << channel));
-
-	return (NPI_SUCCESS);
-}
-
-/*
- * npi_txc_max_reorder_set():
- *	This function is called to set the per port reorder resources
- *
- * Parameters:
- *	handle			- NPI handle
- *	port			- port to set
- *	reorder			- reorder resources (4 bits)
- * Return:
- *	NPI_SUCCESS
- *
- *	Error:
- *	NPI_FAILURE		-
- */
-npi_status_t
-npi_txc_reorder_set(npi_handle_t handle, uint8_t port, uint8_t *reorder)
-{
-	uint64_t val;
-
-	ASSERT(IS_PORT_NUM_VALID(port));
-
-	NXGE_REG_RD64(handle, TXC_MAX_REORDER_REG, &val);
-
-	val |= (*reorder << TXC_MAX_REORDER_SHIFT(port));
-
-	NXGE_REG_WR64(handle, TXC_MAX_REORDER_REG, val);
-
-	return (NPI_SUCCESS);
-}
-
-/*
- * npi_txc_reorder_get():
- *	This function is called to get the txc reorder resources.
- *
- * Parameters:
- *	handle			- NPI handle
- *	port			- port to get
- *	reorder			- data to be stored at
- * Return:
- *	NPI_SUCCESS
- *
- *	Error:
- *	NPI_FAILURE		-
- */
-npi_status_t
-npi_txc_reorder_get(npi_handle_t handle, uint8_t port, uint32_t *reorder)
-{
-	uint64_t val;
-
-	ASSERT(IS_PORT_NUM_VALID(port));
-
-	NXGE_REG_RD64(handle, TXC_MAX_REORDER_REG, &val);
-
-	*reorder = (uint8_t)(val >> TXC_MAX_REORDER_SHIFT(port));
-
-	return (NPI_SUCCESS);
-}
-
-/*
- * npi_txc_pkt_stuffed_get():
- *	This function is called to get total # of packets processed
- *	by reorder engine and packetAssy engine.
- *
- * Parameters:
- *	handle		- NPI handle
- *	port		- port number (0 - 3)
- *	pkt_assy_p 	- packets processed by Assy engine.
- *	pkt_reorder_p	- packets processed by reorder engine.
- *
- * Return:
- *	NPI_SUCCESS	- If get is complete successfully.
- *
- *	Error:
- *	NPI_FAILURE	-
- *		NPI_TXC_PORT_INVALID
- */
-npi_status_t
-npi_txc_pkt_stuffed_get(npi_handle_t handle, uint8_t port,
-		uint32_t *pkt_assy_p, uint32_t *pkt_reorder_p)
-{
-	uint64_t		value;
-
-	ASSERT(IS_PORT_NUM_VALID(port));
-
-	TXC_FZC_CNTL_REG_READ64(handle, TXC_PKT_STUFFED_REG, port, &value);
-	*pkt_assy_p = ((uint32_t)((value & TXC_PKT_STUFF_PKTASY_MASK) >>
-		TXC_PKT_STUFF_PKTASY_SHIFT));
-	*pkt_reorder_p = ((uint32_t)((value & TXC_PKT_STUFF_REORDER_MASK) >>
-		TXC_PKT_STUFF_REORDER_SHIFT));
-
-	return (NPI_SUCCESS);
-}
-
-/*
- * npi_txc_pkt_xmt_to_mac_get():
- *	This function is called to get total # of packets transmitted
- *	to the MAC.
- *
- * Parameters:
- *	handle		- NPI handle
- *	port		- port number (0 - 3)
- *	mac_bytes_p 	- bytes transmitted to the MAC.
- *	mac_pkts_p	- packets transmitted to the MAC.
- *
- * Return:
- *	NPI_SUCCESS	- If get is complete successfully.
- *
- *	Error:
- *	NPI_FAILURE	-
- *	NPI_TXC_PORT_INVALID
- */
-npi_status_t
-npi_txc_pkt_xmt_to_mac_get(npi_handle_t handle, uint8_t port,
-		uint32_t *mac_bytes_p, uint32_t *mac_pkts_p)
-{
-	uint64_t		value;
-
-	ASSERT(IS_PORT_NUM_VALID(port));
-
-	TXC_FZC_CNTL_REG_READ64(handle, TXC_PKT_XMIT_REG, port, &value);
-	*mac_pkts_p = ((uint32_t)((value & TXC_PKTS_XMIT_MASK) >>
-		TXC_PKTS_XMIT_SHIFT));
-	*mac_bytes_p = ((uint32_t)((value & TXC_BYTES_XMIT_MASK) >>
-		TXC_BYTES_XMIT_SHIFT));
-
-	return (NPI_SUCCESS);
-}
-
-/*
- * npi_txc_get_ro_states():
- *	This function is called to get TXC's reorder state-machine states.
- *
- * Parameters:
- *	handle		- NPI handle
- *	port		- port number
- *	*states		- TXC Re-order states.
- *
- * Return:
- *	NPI_SUCCESS	- If get is complete successfully.
- *
- *	Error:
- *	NPI_FAILURE	-
- *	NPI_TXC_PORT_INVALID
- */
-npi_status_t
-npi_txc_ro_states_get(npi_handle_t handle, uint8_t port,
-				txc_ro_states_t *states)
-{
-	txc_ro_ctl_t	ctl;
-	txc_ro_tids_t	tids;
-	txc_ro_state0_t	s0;
-	txc_ro_state1_t	s1;
-	txc_ro_state2_t	s2;
-	txc_ro_state3_t	s3;
-	txc_roecc_st_t	ecc;
-	txc_ro_data0_t	d0;
-	txc_ro_data1_t	d1;
-	txc_ro_data2_t	d2;
-	txc_ro_data3_t	d3;
-	txc_ro_data4_t	d4;
-
-	ASSERT(IS_PORT_NUM_VALID(port));
-
-	TXC_FZC_CNTL_REG_READ64(handle, TXC_ROECC_ST_REG, port, &ecc.value);
-	if ((ecc.bits.ldw.correct_error) || (ecc.bits.ldw.uncorrect_error)) {
-		TXC_FZC_CNTL_REG_READ64(handle, TXC_RO_DATA0_REG, port,
-								&d0.value);
-		TXC_FZC_CNTL_REG_READ64(handle, TXC_RO_DATA1_REG, port,
-								&d1.value);
-		TXC_FZC_CNTL_REG_READ64(handle, TXC_RO_DATA2_REG, port,
-								&d2.value);
-		TXC_FZC_CNTL_REG_READ64(handle, TXC_RO_DATA3_REG, port,
-								&d3.value);
-		TXC_FZC_CNTL_REG_READ64(handle, TXC_RO_DATA4_REG, port,
-								&d4.value);
-		states->d0.value = d0.value;
-		states->d1.value = d1.value;
-		states->d2.value = d2.value;
-		states->d3.value = d3.value;
-		states->d4.value = d4.value;
-
-		ecc.bits.ldw.ecc_address = 0;
-		ecc.bits.ldw.correct_error = 0;
-		ecc.bits.ldw.uncorrect_error = 0;
-		ecc.bits.ldw.clr_st = 1;
-		TXC_FZC_CNTL_REG_WRITE64(handle, TXC_ROECC_ST_REG, port,
-						ecc.value);
-	}
-
-	TXC_FZC_CNTL_REG_READ64(handle, TXC_RO_CTL_REG, port, &ctl.value);
-	TXC_FZC_CNTL_REG_READ64(handle, TXC_RO_STATE0_REG, port, &s0.value);
-	TXC_FZC_CNTL_REG_READ64(handle, TXC_RO_STATE1_REG, port, &s1.value);
-	TXC_FZC_CNTL_REG_READ64(handle, TXC_RO_STATE2_REG, port, &s2.value);
-	TXC_FZC_CNTL_REG_READ64(handle, TXC_RO_STATE3_REG, port, &s3.value);
-	TXC_FZC_CNTL_REG_READ64(handle, TXC_RO_TIDS_REG, port, &tids.value);
-
-	states->roecc.value = ctl.value;
-	states->st0.value = s0.value;
-	states->st1.value = s1.value;
-	states->st2.value = s2.value;
-	states->st3.value = s3.value;
-	states->ctl.value = ctl.value;
-	states->tids.value = tids.value;
-
-	ctl.bits.ldw.clr_fail_state = 1;
-	TXC_FZC_CNTL_REG_WRITE64(handle, TXC_RO_CTL_REG, port, ctl.value);
-
-	return (NPI_SUCCESS);
-}
-
-npi_status_t
-npi_txc_ro_ecc_state_clr(npi_handle_t handle, uint8_t port)
-{
-	ASSERT(IS_PORT_NUM_VALID(port));
-
-	TXC_FZC_CNTL_REG_WRITE64(handle, TXC_ROECC_ST_REG, port, 0);
-
-	return (NPI_SUCCESS);
-}
-
-/*
- * npi_txc_sf_states_get():
- *	This function is called to get TXC's store-forward state-machine states.
- *
- * Parameters:
- *	handle		- NPI handle
- *	port		- port number
- *	states		- TXC Store-forward states
- *
- * Return:
- *	NPI_SUCCESS	- If get is complete successfully.
- *
- *	Error:
- *	NPI_FAILURE	-
- *	NPI_TXC_PORT_INVALID
- */
-#ifdef lint
-/*ARGSUSED*/
-#endif
-npi_status_t
-npi_txc_sf_states_get(npi_handle_t handle, uint8_t port,
-				txc_sf_states_t *states)
-{
-	txc_sfecc_st_t	ecc;
-	txc_sf_data0_t	d0;
-	txc_sf_data1_t	d1;
-	txc_sf_data2_t	d2;
-	txc_sf_data3_t	d3;
-	txc_sf_data4_t	d4;
-
-	ASSERT(IS_PORT_NUM_VALID(port));
-
-	TXC_FZC_CNTL_REG_READ64(handle, TXC_SFECC_ST_REG, port, &ecc.value);
-	if ((ecc.bits.ldw.correct_error) || (ecc.bits.ldw.uncorrect_error)) {
-		TXC_FZC_CNTL_REG_READ64(handle, TXC_SF_DATA0_REG, port,
-								&d0.value);
-		TXC_FZC_CNTL_REG_READ64(handle, TXC_SF_DATA1_REG, port,
-								&d1.value);
-		TXC_FZC_CNTL_REG_READ64(handle, TXC_SF_DATA2_REG, port,
-								&d2.value);
-		TXC_FZC_CNTL_REG_READ64(handle, TXC_SF_DATA3_REG, port,
-								&d3.value);
-		TXC_FZC_CNTL_REG_READ64(handle, TXC_SF_DATA4_REG, port,
-								&d4.value);
-		ecc.bits.ldw.ecc_address = 0;
-		ecc.bits.ldw.correct_error = 0;
-		ecc.bits.ldw.uncorrect_error = 0;
-		ecc.bits.ldw.clr_st = 1;
-		TXC_FZC_CNTL_REG_WRITE64(handle, TXC_SFECC_ST_REG, port,
-						ecc.value);
-	}
-
-	states->sfecc.value = ecc.value;
-	states->d0.value = d0.value;
-	states->d1.value = d1.value;
-	states->d2.value = d2.value;
-	states->d3.value = d3.value;
-	states->d4.value = d4.value;
-
-	return (NPI_SUCCESS);
-}
-
-npi_status_t
-npi_txc_sf_ecc_state_clr(npi_handle_t handle, uint8_t port)
-{
-	ASSERT(IS_PORT_NUM_VALID(port));
-
-	TXC_FZC_CNTL_REG_WRITE64(handle, TXC_SFECC_ST_REG, port, 0);
-
-	return (NPI_SUCCESS);
-}
-
-/*
- * npi_txc_global_istatus_get():
- *	This function is called to get TXC's global interrupt status.
- *
- * Parameters:
- *	handle		- NPI handle
- *	istatus		- TXC global interrupt status
- *
- * Return:
- */
-void
-npi_txc_global_istatus_get(npi_handle_t handle, txc_int_stat_t *istatus)
-{
-	txc_int_stat_t	status;
-
-	NXGE_REG_RD64(handle, TXC_INT_STAT_REG, &status.value);
-
-	istatus->value = status.value;
-}
-
-/*
- * npi_txc_global_istatus_clear():
- *	This function is called to clear TXC's global interrupt status.
- *
- * Parameters:
- *	handle		- NPI handle
- *	istatus		- TXC global interrupt status
- *
- * Return:
- */
-void
-npi_txc_global_istatus_clear(npi_handle_t handle, uint64_t istatus)
-{
-	NXGE_REG_WR64(handle, TXC_INT_STAT_REG, istatus);
-}
-
-void
-npi_txc_global_imask_set(npi_handle_t handle, uint8_t portn, uint8_t istatus)
-{
-	uint64_t val;
-
-	NXGE_REG_RD64(handle, TXC_INT_MASK_REG, &val);
-	switch (portn) {
-	case 0:
-		val &= 0xFFFFFF00;
-		val |= istatus & 0x3F;
-		break;
-	case 1:
-		val &= 0xFFFF00FF;
-		val |= (istatus << 8) & 0x3F00;
-		break;
-	case 2:
-		val &= 0xFF00FFFF;
-		val |= (istatus << 16) & 0x3F0000;
-		break;
-	case 3:
-		val &= 0x00FFFFFF;
-		val |= (istatus << 24) & 0x3F000000;
-		break;
-	default:
-		;
-	}
-	NXGE_REG_WR64(handle, TXC_INT_MASK_REG, val);
-}
--- a/usr/src/uts/sun4v/io/nxge/npi/npi_txc.h	Mon Mar 19 18:02:35 2007 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,138 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License (the "License").
- * You may not use this file except in compliance with the License.
- *
- * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
- * or http://www.opensolaris.org/os/licensing.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information: Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- */
-/*
- * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
- * Use is subject to license terms.
- */
-
-#ifndef _NPI_TXC_H
-#define	_NPI_TXC_H
-
-#pragma ident	"%Z%%M%	%I%	%E% SMI"
-
-#ifdef	__cplusplus
-extern "C" {
-#endif
-
-#include <npi.h>
-#include <nxge_txc_hw.h>
-
-/*
- * Transmit Controller (TXC) NPI error codes
- */
-#define	TXC_ER_ST			(TXC_BLK_ID << NPI_BLOCK_ID_SHIFT)
-#define	TXC_ID_SHIFT(n)			(n << NPI_PORT_CHAN_SHIFT)
-
-#define	NPI_TXC_PORT_INVALID(n)		(TXC_ID_SHIFT(n) | IS_PORT |\
-					TXC_ER_ST | PORT_INVALID)
-
-#define	NPI_TXC_CHANNEL_INVALID(n)	(TXC_ID_SHIFT(n) | IS_PORT |\
-					TXC_ER_ST | CHANNEL_INVALID)
-
-#define	NPI_TXC_OPCODE_INVALID(n)	(TXC_ID_SHIFT(n) | IS_PORT |\
-					TXC_ER_ST | OPCODE_INVALID)
-
-/*
- * Register offset (0x1000 bytes for each channel) for TXC registers.
- */
-#define	NXGE_TXC_FZC_OFFSET(x, cn)	(x + TXC_FZC_CHANNEL_OFFSET(cn))
-
-/*
- * Register offset (0x100 bytes for each port) for TXC Function zero
- * control registers.
- */
-#define	NXGE_TXC_FZC_CNTL_OFFSET(x, port) (x + \
-			TXC_FZC_CNTL_PORT_OFFSET(port))
-/*
- * PIO macros to read and write the transmit control registers.
- */
-#define	TXC_FZC_REG_READ64(handle, reg, cn, val_p)	\
-		NXGE_REG_RD64(handle, \
-		(NXGE_TXC_FZC_OFFSET(reg, cn)), val_p)
-
-#define	TXC_FZC_REG_WRITE64(handle, reg, cn, data)	\
-		NXGE_REG_WR64(handle, \
-		(NXGE_TXC_FZC_OFFSET(reg, cn)), data)
-
-#define	TXC_FZC_CNTL_REG_READ64(handle, reg, port, val_p)	\
-		NXGE_REG_RD64(handle, \
-		(NXGE_TXC_FZC_CNTL_OFFSET(reg, port)), val_p)
-
-#define	TXC_FZC_CNTL_REG_WRITE64(handle, reg, port, data)	\
-		NXGE_REG_WR64(handle, \
-		(NXGE_TXC_FZC_CNTL_OFFSET(reg, port)), data)
-
-/*
- * TXC (Transmit Controller) prototypes.
- */
-npi_status_t npi_txc_dma_max_burst(npi_handle_t, io_op_t,
-		uint8_t, uint32_t *);
-npi_status_t npi_txc_dma_max_burst_set(npi_handle_t, uint8_t,
-		uint32_t);
-npi_status_t npi_txc_dma_bytes_transmitted(npi_handle_t,
-		uint8_t, uint32_t *);
-npi_status_t npi_txc_control(npi_handle_t, io_op_t,
-		p_txc_control_t);
-npi_status_t npi_txc_global_enable(npi_handle_t);
-npi_status_t npi_txc_global_disable(npi_handle_t);
-npi_status_t npi_txc_control_clear(npi_handle_t, uint8_t);
-npi_status_t npi_txc_training_set(npi_handle_t, uint32_t);
-npi_status_t npi_txc_training_get(npi_handle_t, uint32_t *);
-npi_status_t npi_txc_port_control_get(npi_handle_t, uint8_t,
-		uint32_t *);
-npi_status_t npi_txc_port_enable(npi_handle_t, uint8_t);
-npi_status_t npi_txc_port_disable(npi_handle_t, uint8_t);
-npi_status_t npi_txc_dma_max_burst(npi_handle_t, io_op_t,
-		uint8_t, uint32_t *);
-npi_status_t npi_txc_port_dma_enable(npi_handle_t, uint8_t,
-		uint32_t);
-npi_status_t npi_txc_port_dma_list_get(npi_handle_t, uint8_t,
-		uint32_t *);
-npi_status_t npi_txc_port_dma_channel_enable(npi_handle_t, uint8_t,
-		uint8_t);
-npi_status_t npi_txc_port_dma_channel_disable(npi_handle_t, uint8_t,
-		uint8_t);
-
-npi_status_t npi_txc_pkt_stuffed_get(npi_handle_t, uint8_t,
-		uint32_t *, uint32_t *);
-npi_status_t npi_txc_pkt_xmt_to_mac_get(npi_handle_t, uint8_t,
-		uint32_t *, uint32_t *);
-npi_status_t npi_txc_reorder_get(npi_handle_t, uint8_t,
-		uint32_t *);
-npi_status_t npi_txc_dump_tdc_fzc_regs(npi_handle_t, uint8_t);
-npi_status_t npi_txc_dump_fzc_regs(npi_handle_t);
-npi_status_t npi_txc_dump_port_fzc_regs(npi_handle_t, uint8_t);
-npi_status_t npi_txc_ro_states_get(npi_handle_t, uint8_t,
-		txc_ro_states_t *);
-npi_status_t npi_txc_ro_ecc_state_clr(npi_handle_t, uint8_t);
-npi_status_t npi_txc_sf_states_get(npi_handle_t, uint8_t,
-		txc_sf_states_t *);
-npi_status_t npi_txc_sf_ecc_state_clr(npi_handle_t, uint8_t);
-void npi_txc_global_istatus_get(npi_handle_t, txc_int_stat_t *);
-void npi_txc_global_istatus_clear(npi_handle_t, uint64_t);
-void npi_txc_global_imask_set(npi_handle_t, uint8_t,
-		uint8_t);
-
-#ifdef	__cplusplus
-}
-#endif
-
-#endif	/* _NPI_TXC_H */
--- a/usr/src/uts/sun4v/io/nxge/npi/npi_txdma.c	Mon Mar 19 18:02:35 2007 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,2077 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License (the "License").
- * You may not use this file except in compliance with the License.
- *
- * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
- * or http://www.opensolaris.org/os/licensing.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information: Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- */
-/*
- * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
- * Use is subject to license terms.
- */
-
-#pragma ident	"%Z%%M%	%I%	%E% SMI"
-
-#include <npi_txdma.h>
-
-#define	TXDMA_WAIT_LOOP		10000
-#define	TXDMA_WAIT_MSEC		5
-
-static npi_status_t npi_txdma_control_reset_wait(npi_handle_t handle,
-	uint8_t channel);
-static npi_status_t npi_txdma_control_stop_wait(npi_handle_t handle,
-	uint8_t channel);
-static npi_status_t npi_txdma_control_resume_wait(npi_handle_t handle,
-	uint8_t channel);
-
-uint64_t tdc_dmc_offset[] = {
-	TX_RNG_CFIG_REG,
-	TX_RING_HDL_REG,
-	TX_RING_KICK_REG,
-	TX_ENT_MSK_REG,
-	TX_CS_REG,
-	TXDMA_MBH_REG,
-	TXDMA_MBL_REG,
-	TX_DMA_PRE_ST_REG,
-	TX_RNG_ERR_LOGH_REG,
-	TX_RNG_ERR_LOGL_REG,
-	TDMC_INTR_DBG_REG,
-	TX_CS_DBG_REG
-};
-
-const char *tdc_dmc_name[] = {
-	"TX_RNG_CFIG_REG",
-	"TX_RING_HDL_REG",
-	"TX_RING_KICK_REG",
-	"TX_ENT_MSK_REG",
-	"TX_CS_REG",
-	"TXDMA_MBH_REG",
-	"TXDMA_MBL_REG",
-	"TX_DMA_PRE_ST_REG",
-	"TX_RNG_ERR_LOGH_REG",
-	"TX_RNG_ERR_LOGL_REG",
-	"TDMC_INTR_DBG_REG",
-	"TX_CS_DBG_REG"
-};
-
-uint64_t tdc_fzc_offset [] = {
-	TX_LOG_PAGE_VLD_REG,
-	TX_LOG_PAGE_MASK1_REG,
-	TX_LOG_PAGE_VAL1_REG,
-	TX_LOG_PAGE_MASK2_REG,
-	TX_LOG_PAGE_VAL2_REG,
-	TX_LOG_PAGE_RELO1_REG,
-	TX_LOG_PAGE_RELO2_REG,
-	TX_LOG_PAGE_HDL_REG
-};
-
-const char *tdc_fzc_name [] = {
-	"TX_LOG_PAGE_VLD_REG",
-	"TX_LOG_PAGE_MASK1_REG",
-	"TX_LOG_PAGE_VAL1_REG",
-	"TX_LOG_PAGE_MASK2_REG",
-	"TX_LOG_PAGE_VAL2_REG",
-	"TX_LOG_PAGE_RELO1_REG",
-	"TX_LOG_PAGE_RELO2_REG",
-	"TX_LOG_PAGE_HDL_REG"
-};
-
-uint64_t tx_fzc_offset[] = {
-	TX_ADDR_MD_REG,
-	TDMC_INJ_PAR_ERR_REG,
-	TDMC_DBG_SEL_REG,
-	TDMC_TRAINING_REG
-};
-
-const char *tx_fzc_name[] = {
-	"TX_ADDR_MD_REG",
-	"TDMC_INJ_PAR_ERR_REG",
-	"TDMC_DBG_SEL_REG",
-	"TDMC_TRAINING_REG"
-};
-
-#define	NUM_TDC_DMC_REGS	(sizeof (tdc_dmc_offset) / sizeof (uint64_t))
-#define	NUM_TX_FZC_REGS	(sizeof (tx_fzc_offset) / sizeof (uint64_t))
-
-/*
- * npi_txdma_dump_tdc_regs
- * Dumps the contents of tdc csrs and fzc registers
- *
- * Input:
- *         tdc:      TX DMA number
- *
- * return:
- *     NPI_SUCCESS
- *     NPI_FAILURE
- *     NPI_TXDMA_CHANNEL_INVALID
- *
- */
-npi_status_t
-npi_txdma_dump_tdc_regs(npi_handle_t handle, uint8_t tdc)
-{
-
-	uint64_t		value, offset;
-	int 			num_regs, i;
-
-	ASSERT(TXDMA_CHANNEL_VALID(tdc));
-	if (!TXDMA_CHANNEL_VALID(tdc)) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-			"npi_txdma_dump_tdc_regs"
-			" Invalid TDC number %d \n",
-			tdc));
-
-		return (NPI_FAILURE | NPI_TXDMA_CHANNEL_INVALID(tdc));
-	}
-
-	NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
-		    "\nTXDMA DMC Register Dump for Channel %d\n",
-			    tdc));
-
-	num_regs = NUM_TDC_DMC_REGS;
-	for (i = 0; i < num_regs; i++) {
-		TXDMA_REG_READ64(handle, tdc_dmc_offset[i], tdc, &value);
-		offset = NXGE_TXDMA_OFFSET(tdc_dmc_offset[i], handle.is_vraddr,
-				tdc);
-		NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL, "0x%08llx "
-			"%s\t 0x%016llx \n",
-			offset, tdc_dmc_name[i],
-			value));
-	}
-
-	NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
-		"\nTXDMA FZC_DMC Register Dump for Channel %d\n",
-		tdc));
-
-	num_regs = NUM_TX_FZC_REGS;
-	for (i = 0; i < num_regs; i++) {
-		offset = NXGE_TXLOG_OFFSET(tdc_fzc_offset[i], tdc);
-		NXGE_REG_RD64(handle, offset, &value);
-		NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL, "0x%08llx "
-			"%s\t %016llx \n",
-			offset, tdc_fzc_name[i],
-			value));
-	}
-
-	NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
-		"\n TXDMA Register Dump for Channel %d done\n", tdc));
-
-	return (NPI_SUCCESS);
-}
-
-/*
- * npi_txdma_dump_fzc_regs
- * Dumps the contents of tdc csrs and fzc registers
- *
- * Input:
- *         tdc:      TX DMA number
- *
- * return:
- *     NPI_SUCCESS
- *     NPI_FAILURE
- *     NPI_TXDMA_CHANNEL_INVALID
- *
- */
-npi_status_t
-npi_txdma_dump_fzc_regs(npi_handle_t handle)
-{
-
-	uint64_t value;
-	int num_regs, i;
-
-	NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
-		"\nFZC_DMC Common Register Dump\n"));
-
-	num_regs = NUM_TX_FZC_REGS;
-	for (i = 0; i < num_regs; i++) {
-		NXGE_REG_RD64(handle, tx_fzc_offset[i], &value);
-		NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL, "0x%08llx "
-			"%s\t 0x%08llx \n",
-			tx_fzc_offset[i],
-			tx_fzc_name[i], value));
-	}
-	NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
-		"\n TXDMA FZC_DMC Register Dump Done \n"));
-
-	return (NPI_SUCCESS);
-}
-
-npi_status_t
-npi_txdma_tdc_regs_zero(npi_handle_t handle, uint8_t tdc)
-{
-	uint64_t		value;
-	int 			num_regs, i;
-
-	ASSERT(TXDMA_CHANNEL_VALID(tdc));
-	if (!TXDMA_CHANNEL_VALID(tdc)) {
-		NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
-			"npi_txdma_tdc_regs_zero"
-			" InvaliInvalid TDC number %d \n",
-			tdc));
-		return (NPI_FAILURE | NPI_TXDMA_CHANNEL_INVALID(tdc));
-	}
-
-	NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
-		    "\nTXDMA DMC Register (zero) for Channel %d\n",
-			    tdc));
-
-	num_regs = NUM_TDC_DMC_REGS;
-	value = 0;
-	for (i = 0; i < num_regs; i++) {
-		TXDMA_REG_WRITE64(handle, tdc_dmc_offset[i], tdc,
-			value);
-	}
-
-	NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
-		"\nTXDMA FZC_DMC Register clear for Channel %d\n",
-		tdc));
-
-	NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
-		"\n TXDMA Register Clear to 0s for Channel %d done\n", tdc));
-
-	return (NPI_SUCCESS);
-}
-
-/*
- * npi_txdma_address_mode32_set():
- *	This function is called to only support 32 bit addressing.
- *
- * Parameters:
- *	handle		- NPI handle
- *	mode_enable	- B_TRUE  (enable 32 bit mode)
- *			  B_FALSE (disable 32 bit mode)
- *
- * Return:
- *	NPI_SUCCESS		- If set is complete successfully.
- *
- *	Error:
- *	NONE
- */
-npi_status_t
-npi_txdma_mode32_set(npi_handle_t handle, boolean_t mode_enable)
-{
-	tx_addr_md_t		mode32;
-
-	mode32.value = 0;
-	if (mode_enable) {
-		mode32.bits.ldw.mode32 = 1;
-	} else {
-		mode32.bits.ldw.mode32 = 0;
-	}
-	NXGE_REG_WR64(handle, TX_ADDR_MD_REG, mode32.value);
-
-	return (NPI_SUCCESS);
-}
-
-/*
- * npi_txdma_log_page_set():
- *	This function is called to configure a logical page
- *	(valid bit, mask, value, relocation).
- *
- * Parameters:
- *	handle		- NPI handle
- *	cfgp		- pointer to NPI defined data structure:
- *				- page valid
- * 				- mask
- *				- value
- *				- relocation
- *	channel		- hardware TXDMA channel from 0 to 23.
- *
- * Return:
- *	NPI_SUCCESS		- If configurations are set successfully.
- *
- *	Error:
- *	NPI_FAILURE -
- *		NPI_TXDMA_CHANNEL_INVALID	-
- *		NPI_TXDMA_FUNC_INVALID	-
- *		NPI_TXDMA_PAGE_INVALID	-
- */
-npi_status_t
-npi_txdma_log_page_set(npi_handle_t handle, uint8_t channel,
-		p_dma_log_page_t cfgp)
-{
-	log_page_vld_t		vld;
-	int			status;
-	uint64_t		val;
-	dma_log_page_t		cfg;
-
-	DMA_LOG_PAGE_FN_VALIDATE(channel, cfgp->page_num, cfgp->func_num,
-		status);
-	if (status) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				    " npi_txdma_log_page_set"
-				    " npi_status <0x%x>", status));
-		return (status);
-	}
-
-	TX_LOG_REG_WRITE64(handle, TX_LOG_PAGE_VLD_REG, channel, 0);
-	TX_LOG_REG_READ64(handle, TX_LOG_PAGE_VLD_REG, channel, &val);
-
-	NPI_DEBUG_MSG((handle.function, NPI_TDC_CTL,
-			    "\n==> npi_txdma_log_page_set: WRITE 0 and "
-			    " READ back 0x%llx\n ", val));
-
-	vld.value = 0;
-	TX_LOG_REG_READ64(handle, TX_LOG_PAGE_VLD_REG, channel, &val);
-
-	val &= 0x3;
-	vld.value |= val;
-
-	vld.value = 0;
-	vld.bits.ldw.func = cfgp->func_num;
-
-	if (!cfgp->page_num) {
-		TX_LOG_REG_WRITE64(handle, TX_LOG_PAGE_MASK1_REG,
-			channel, (cfgp->mask & DMA_LOG_PAGE_MASK_MASK));
-		TX_LOG_REG_WRITE64(handle, TX_LOG_PAGE_VAL1_REG,
-			channel, (cfgp->value & DMA_LOG_PAGE_VALUE_MASK));
-		TX_LOG_REG_WRITE64(handle, TX_LOG_PAGE_RELO1_REG,
-			channel, (cfgp->reloc & DMA_LOG_PAGE_RELO_MASK));
-	} else {
-		TX_LOG_REG_WRITE64(handle, TX_LOG_PAGE_MASK2_REG,
-			channel, (cfgp->mask & DMA_LOG_PAGE_MASK_MASK));
-		TX_LOG_REG_WRITE64(handle, TX_LOG_PAGE_VAL2_REG,
-			channel, (cfgp->value & DMA_LOG_PAGE_VALUE_MASK));
-		TX_LOG_REG_WRITE64(handle, TX_LOG_PAGE_RELO2_REG,
-			channel, (cfgp->reloc & DMA_LOG_PAGE_RELO_MASK));
-	}
-
-	TX_LOG_REG_WRITE64(handle, TX_LOG_PAGE_VLD_REG, channel,
-		vld.value | (cfgp->valid << cfgp->page_num));
-
-	NPI_DEBUG_MSG((handle.function, NPI_REG_CTL,
-				    "\n==> npi_txdma_log_page_set: vld value "
-				    " 0x%llx function %d page_valid01 0x%x\n",
-				    vld.value,
-				    vld.bits.ldw.func,
-		(cfgp->valid << cfgp->page_num)));
-
-
-	cfg.page_num = 0;
-	cfg.func_num = 0;
-	(void) npi_txdma_log_page_get(handle, channel, &cfg);
-	cfg.page_num = 1;
-	(void) npi_txdma_log_page_get(handle, channel, &cfg);
-
-	return (status);
-}
-
-/*
- * npi_txdma_log_page_get():
- *	This function is called to get a logical page
- *	(valid bit, mask, value, relocation).
- *
- * Parameters:
- *	handle		- NPI handle
- *	cfgp		- Get the following values (NPI defined structure):
- *				- page valid
- * 				- mask
- *				- value
- *				- relocation
- *	channel		- hardware TXDMA channel from 0 to 23.
- *
- * Return:
- *	NPI_SUCCESS		- If configurations are read successfully.
- *
- *	Error:
- *	NPI_FAILURE -
- *		NPI_TXDMA_CHANNEL_INVALID	-
- *		NPI_TXDMA_FUNC_INVALID	-
- *		NPI_TXDMA_PAGE_INVALID	-
- */
-npi_status_t
-npi_txdma_log_page_get(npi_handle_t handle, uint8_t channel,
-		p_dma_log_page_t cfgp)
-{
-	log_page_vld_t		vld;
-	int			status;
-	uint64_t		val;
-
-	DMA_LOG_PAGE_VALIDATE(channel, cfgp->page_num, status);
-	if (status) {
-		NPI_ERROR_MSG((handle.function, NPI_REG_CTL,
-					    " npi_txdma_log_page_get"
-					    " npi_status <0x%x>", status));
-		return (status);
-	}
-
-	vld.value = 0;
-	vld.bits.ldw.func = cfgp->func_num;
-	TX_LOG_REG_READ64(handle, TX_LOG_PAGE_VLD_REG, channel, &val);
-
-	NPI_DEBUG_MSG((handle.function, NPI_TDC_CTL,
-				    "\n==> npi_txdma_log_page_get: read value "
-				    " function %d  value 0x%llx\n",
-				    cfgp->func_num, val));
-
-	vld.value |= val;
-	cfgp->func_num = vld.bits.ldw.func;
-
-	if (!cfgp->page_num) {
-		TX_LOG_REG_READ64(handle, TX_LOG_PAGE_MASK1_REG, channel, &val);
-		cfgp->mask = val & DMA_LOG_PAGE_MASK_MASK;
-		TX_LOG_REG_READ64(handle, TX_LOG_PAGE_VAL1_REG, channel, &val);
-		cfgp->value = val & DMA_LOG_PAGE_VALUE_MASK;
-		TX_LOG_REG_READ64(handle, TX_LOG_PAGE_RELO1_REG, channel, &val);
-		cfgp->reloc = val & DMA_LOG_PAGE_RELO_MASK;
-		cfgp->valid = vld.bits.ldw.page0;
-	} else {
-		TX_LOG_REG_READ64(handle, TX_LOG_PAGE_MASK2_REG, channel, &val);
-		cfgp->mask = val & DMA_LOG_PAGE_MASK_MASK;
-		TX_LOG_REG_READ64(handle, TX_LOG_PAGE_VAL2_REG, channel, &val);
-		cfgp->value = val & DMA_LOG_PAGE_VALUE_MASK;
-		TX_LOG_REG_READ64(handle, TX_LOG_PAGE_RELO2_REG, channel, &val);
-		cfgp->reloc = val & DMA_LOG_PAGE_RELO_MASK;
-		cfgp->valid = vld.bits.ldw.page1;
-	}
-
-	return (status);
-}
-
-/*
- * npi_txdma_log_page_handle_set():
- *	This function is called to program a page handle
- *	(bits [63:44] of a 64-bit address to generate
- *	a 64 bit address)
- *
- * Parameters:
- *	handle		- NPI handle
- *	hdl_p		- pointer to a logical page handle
- *			  hardware data structure (log_page_hdl_t).
- *	channel		- hardware TXDMA channel from 0 to 23.
- *
- * Return:
- *	NPI_SUCCESS		- If configurations are set successfully.
- *
- *	Error:
- *	NPI_FAILURE -
- *		NPI_TXDMA_CHANNEL_INVALID	-
- *		NPI_TXDMA_FUNC_INVALID	-
- *		NPI_TXDMA_PAGE_INVALID	-
- */
-npi_status_t
-npi_txdma_log_page_handle_set(npi_handle_t handle, uint8_t channel,
-		p_log_page_hdl_t hdl_p)
-{
-	int			status = NPI_SUCCESS;
-
-	ASSERT(TXDMA_CHANNEL_VALID(channel));
-	if (!TXDMA_CHANNEL_VALID(channel)) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				    " npi_txdma_log_page_handle_set"
-				    " Invalid Input: channel <0x%x>",
-				    channel));
-		return (NPI_FAILURE | NPI_TXDMA_CHANNEL_INVALID(channel));
-	}
-
-	TX_LOG_REG_WRITE64(handle, TX_LOG_PAGE_HDL_REG,
-		channel, hdl_p->value);
-
-	return (status);
-}
-
-/*
- * npi_txdma_log_page_config():
- *	This function is called to IO operations on
- *	 a logical page to set, get, clear
- *	valid bit, mask, value, relocation).
- *
- * Parameters:
- *	handle		- NPI handle
- *	op_mode		- OP_GET, OP_SET, OP_CLEAR
- *	type		- NPI specific config type
- *			   TXDMA_LOG_PAGE_MASK
- *			   TXDMA_LOG_PAGE_VALUE
- *			   TXDMA_LOG_PAGE_RELOC
- *			   TXDMA_LOG_PAGE_VALID
- *			   TXDMA_LOG_PAGE_ALL
- *	channel		- hardware TXDMA channel from 0 to 23.
- *	cfgp		- pointer to the NPI config structure.
- * Return:
- *	NPI_SUCCESS		- If configurations are read successfully.
- *
- *	Error:
- *	NPI_FAILURE		-
- *		NPI_TXDMA_OPCODE_INVALID	-
- *		NPI_TXDMA_CHANNEL_INVALID	-
- *		NPI_TXDMA_FUNC_INVALID	-
- *		NPI_TXDMA_PAGE_INVALID	-
- */
-npi_status_t
-npi_txdma_log_page_config(npi_handle_t handle, io_op_t op_mode,
-		txdma_log_cfg_t type, uint8_t channel,
-		p_dma_log_page_t cfgp)
-{
-	int			status = NPI_SUCCESS;
-	uint64_t		val;
-
-	ASSERT(TXDMA_CHANNEL_VALID(channel));
-	if (!TXDMA_CHANNEL_VALID(channel)) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				    " npi_txdma_log_page_config"
-				    " Invalid Input: channel <0x%x>",
-				    channel));
-		return (NPI_FAILURE | NPI_TXDMA_CHANNEL_INVALID(channel));
-	}
-
-	switch (op_mode) {
-	case OP_GET:
-		switch (type) {
-		case TXDMA_LOG_PAGE_ALL:
-			return (npi_txdma_log_page_get(handle, channel,
-					cfgp));
-		case TXDMA_LOG_PAGE_MASK:
-			if (!cfgp->page_num) {
-				TX_LOG_REG_READ64(handle, TX_LOG_PAGE_MASK1_REG,
-						channel, &val);
-				cfgp->mask = val & DMA_LOG_PAGE_MASK_MASK;
-			} else {
-				TX_LOG_REG_READ64(handle, TX_LOG_PAGE_MASK2_REG,
-						channel, &val);
-				cfgp->mask = val & DMA_LOG_PAGE_MASK_MASK;
-			}
-			break;
-
-		case TXDMA_LOG_PAGE_VALUE:
-			if (!cfgp->page_num) {
-				TX_LOG_REG_READ64(handle, TX_LOG_PAGE_VAL1_REG,
-						channel, &val);
-				cfgp->value = val & DMA_LOG_PAGE_VALUE_MASK;
-			} else {
-				TX_LOG_REG_READ64(handle, TX_LOG_PAGE_VAL2_REG,
-						channel, &val);
-				cfgp->value = val & DMA_LOG_PAGE_VALUE_MASK;
-			}
-			break;
-
-		case TXDMA_LOG_PAGE_RELOC:
-			if (!cfgp->page_num) {
-				TX_LOG_REG_READ64(handle, TX_LOG_PAGE_RELO1_REG,
-						channel, &val);
-				cfgp->reloc = val & DMA_LOG_PAGE_RELO_MASK;
-			} else {
-				TX_LOG_REG_READ64(handle, TX_LOG_PAGE_VAL2_REG,
-						channel, &val);
-				cfgp->reloc = val & DMA_LOG_PAGE_RELO_MASK;
-			}
-			break;
-
-		default:
-			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-					    " npi_txdma_log_page_config"
-					    " Invalid Input: pageconfig <0x%x>",
-					    type));
-			return (NPI_FAILURE |
-				NPI_TXDMA_OPCODE_INVALID(channel));
-		}
-
-		break;
-
-	case OP_SET:
-	case OP_CLEAR:
-		if (op_mode == OP_CLEAR) {
-			cfgp->valid = 0;
-			cfgp->mask = cfgp->func_num = 0;
-			cfgp->value = cfgp->reloc = 0;
-		}
-		switch (type) {
-		case TXDMA_LOG_PAGE_ALL:
-			return (npi_txdma_log_page_set(handle, channel,
-					cfgp));
-		case TXDMA_LOG_PAGE_MASK:
-			if (!cfgp->page_num) {
-				TX_LOG_REG_WRITE64(handle,
-				TX_LOG_PAGE_MASK1_REG, channel,
-				(cfgp->mask & DMA_LOG_PAGE_MASK_MASK));
-			} else {
-				TX_LOG_REG_WRITE64(handle,
-				TX_LOG_PAGE_MASK2_REG,
-				channel, (cfgp->mask & DMA_LOG_PAGE_MASK_MASK));
-			}
-			break;
-
-		case TXDMA_LOG_PAGE_VALUE:
-			if (!cfgp->page_num) {
-				TX_LOG_REG_WRITE64(handle,
-				TX_LOG_PAGE_VAL1_REG, channel,
-				(cfgp->value & DMA_LOG_PAGE_VALUE_MASK));
-			} else {
-				TX_LOG_REG_WRITE64(handle,
-				TX_LOG_PAGE_VAL2_REG, channel,
-				(cfgp->value & DMA_LOG_PAGE_VALUE_MASK));
-			}
-			break;
-
-		case TXDMA_LOG_PAGE_RELOC:
-			if (!cfgp->page_num) {
-				TX_LOG_REG_WRITE64(handle,
-				TX_LOG_PAGE_RELO1_REG, channel,
-				(cfgp->reloc & DMA_LOG_PAGE_RELO_MASK));
-			} else {
-				TX_LOG_REG_WRITE64(handle,
-				TX_LOG_PAGE_RELO2_REG, channel,
-				(cfgp->reloc & DMA_LOG_PAGE_RELO_MASK));
-			}
-			break;
-
-		default:
-			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-					    " npi_txdma_log_page_config"
-					    " Invalid Input: pageconfig <0x%x>",
-					    type));
-			return (NPI_FAILURE |
-				NPI_TXDMA_OPCODE_INVALID(channel));
-		}
-
-		break;
-	default:
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-					    " npi_txdma_log_page_config"
-					    " Invalid Input: op <0x%x>",
-					    op_mode));
-		return (NPI_FAILURE | NPI_TXDMA_OPCODE_INVALID(channel));
-	}
-
-	return (status);
-}
-
-/*
- * npi_txdma_log_page_vld_config():
- *	This function is called to configure the logical
- *	page valid register.
- *
- * Parameters:
- *	handle		- NPI handle
- *	op_mode		- OP_GET: get valid page configuration
- *			  OP_SET: set valid page configuration
- *			  OP_UPDATE: update valid page configuration
- *			  OP_CLEAR: reset both valid pages to
- *			  not defined (0).
- *	channel		- hardware TXDMA channel from 0 to 23.
- *	vld_p		- pointer to hardware defined log page valid register.
- * Return:
- *	NPI_SUCCESS		- If set is complete successfully.
- *
- *	Error:
- *	NPI_FAILURE -
- *		NPI_TXDMA_CHANNEL_INVALID -
- *		NPI_TXDMA_OPCODE_INVALID -
- */
-npi_status_t
-npi_txdma_log_page_vld_config(npi_handle_t handle, io_op_t op_mode,
-		uint8_t channel, p_log_page_vld_t vld_p)
-{
-	int			status = NPI_SUCCESS;
-	log_page_vld_t		vld;
-
-	ASSERT(TXDMA_CHANNEL_VALID(channel));
-	if (!TXDMA_CHANNEL_VALID(channel)) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				    " npi_txdma_log_page_vld_config"
-				    " Invalid Input: channel <0x%x>",
-				    channel));
-		return (NPI_FAILURE | NPI_TXDMA_CHANNEL_INVALID(channel));
-	}
-
-	switch (op_mode) {
-	case OP_GET:
-		TX_LOG_REG_READ64(handle, TX_LOG_PAGE_VLD_REG, channel,
-					&vld_p->value);
-		break;
-
-	case OP_SET:
-		TX_LOG_REG_WRITE64(handle, TX_LOG_PAGE_VLD_REG,
-					channel, vld_p->value);
-		break;
-
-	case OP_UPDATE:
-		TX_LOG_REG_READ64(handle, TX_LOG_PAGE_VLD_REG, channel,
-					&vld.value);
-		TX_LOG_REG_WRITE64(handle, TX_LOG_PAGE_VLD_REG,
-					channel, vld.value | vld_p->value);
-		break;
-
-	case OP_CLEAR:
-		TX_LOG_REG_WRITE64(handle, TX_LOG_PAGE_VLD_REG,
-					channel, 0);
-		break;
-
-	default:
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				    " npi_txdma_log_pag_vld_cofig"
-				    " Invalid Input: pagevld <0x%x>",
-				    op_mode));
-		return (NPI_FAILURE | NPI_TXDMA_OPCODE_INVALID(channel));
-	}
-
-	return (status);
-}
-
-/*
- * npi_txdma_channel_reset():
- *	This function is called to reset a transmit DMA channel.
- *	(This function is used to reset a channel and reinitialize
- *	 all other bits except RST_STATE).
- *
- * Parameters:
- *	handle		- NPI handle (virtualization flag must be defined).
- *	channel		- logical TXDMA channel from 0 to 23.
- *			  (If virtualization flag is not set, then
- *			   logical channel is the same as the hardware
- *			   channel number).
- *
- * Return:
- *	NPI_SUCCESS		- If reset is complete successfully.
- *
- *	Error:
- *	NPI_FAILURE	-
- *		NPI_TXDMA_CHANNEL_INVALID -
- *		NPI_TXDMA_RESET_FAILED -
- */
-npi_status_t
-npi_txdma_channel_reset(npi_handle_t handle, uint8_t channel)
-{
-	NPI_DEBUG_MSG((handle.function, NPI_TDC_CTL,
-			    " npi_txdma_channel_reset"
-			    " RESETTING",
-			    channel));
-	return (npi_txdma_channel_control(handle, TXDMA_RESET, channel));
-}
-
-/*
- * npi_txdma_channel_init_enable():
- *	This function is called to start a transmit DMA channel after reset.
- *
- * Parameters:
- *	handle		- NPI handle (virtualization flag must be defined).
- *	channel		- logical TXDMA channel from 0 to 23.
- *			  (If virtualization flag is not set, then
- *			   logical channel is the same as the hardware
- *			   channel number).
- * Return:
- *	NPI_SUCCESS		- If DMA channel is started successfully.
- *
- *	Error:
- *	NPI_FAILURE	-
- *		NPI_TXDMA_CHANNEL_INVALID -
- */
-npi_status_t
-npi_txdma_channel_init_enable(npi_handle_t handle, uint8_t channel)
-{
-	return (npi_txdma_channel_control(handle, TXDMA_INIT_START, channel));
-}
-
-/*
- * npi_txdma_channel_enable():
- *	This function is called to start a transmit DMA channel.
- *
- * Parameters:
- *	handle		- NPI handle (virtualization flag must be defined).
- *	channel		- logical TXDMA channel from 0 to 23.
- *			  (If virtualization flag is not set, then
- *			   logical channel is the same as the hardware
- *			   channel number).
- * Return:
- *	NPI_SUCCESS		- If DMA channel is stopped successfully.
- *
- *	Error:
- *	NPI_FAILURE	-
- *		NPI_TXDMA_CHANNEL_INVALID -
- */
-
-npi_status_t
-npi_txdma_channel_enable(npi_handle_t handle, uint8_t channel)
-{
-	return (npi_txdma_channel_control(handle, TXDMA_START, channel));
-}
-
-/*
- * npi_txdma_channel_disable():
- *	This function is called to stop a transmit DMA channel.
- *
- * Parameters:
- *	handle		- NPI handle (virtualization flag must be defined).
- *	channel		- logical TXDMA channel from 0 to 23.
- *			  (If virtualization flag is not set, then
- *			   logical channel is the same as the hardware
- *			   channel number).
- * Return:
- *	NPI_SUCCESS		- If DMA channel is stopped successfully.
- *
- *	Error:
- *	NPI_FAILURE	-
- *		NPI_TXDMA_CHANNEL_INVALID -
- *		NPI_TXDMA_STOP_FAILED -
- */
-npi_status_t
-npi_txdma_channel_disable(npi_handle_t handle, uint8_t channel)
-{
-	return (npi_txdma_channel_control(handle, TXDMA_STOP, channel));
-}
-
-/*
- * npi_txdma_channel_resume():
- *	This function is called to restart a transmit DMA channel.
- *
- * Parameters:
- *	handle		- NPI handle (virtualization flag must be defined).
- *	channel		- logical TXDMA channel from 0 to 23.
- *			  (If virtualization flag is not set, then
- *			   logical channel is the same as the hardware
- *			   channel number).
- * Return:
- *	NPI_SUCCESS		- If DMA channel is stopped successfully.
- *
- *	Error:
- *	NPI_FAILURE	-
- *		NPI_TXDMA_CHANNEL_INVALID -
- *		NPI_TXDMA_RESUME_FAILED -
- */
-npi_status_t
-npi_txdma_channel_resume(npi_handle_t handle, uint8_t channel)
-{
-	return (npi_txdma_channel_control(handle, TXDMA_RESUME, channel));
-}
-
-/*
- * npi_txdma_channel_mmk_clear():
- *	This function is called to clear MMK bit.
- *
- * Parameters:
- *	handle		- NPI handle (virtualization flag must be defined).
- *	channel		- logical TXDMA channel from 0 to 23.
- *			  (If virtualization flag is not set, then
- *			   logical channel is the same as the hardware
- *			   channel number).
- * Return:
- *	NPI_SUCCESS		- If MMK is reset successfully.
- *
- *	Error:
- *	NPI_FAILURE	-
- *		NPI_TXDMA_CHANNEL_INVALID -
- */
-npi_status_t
-npi_txdma_channel_mmk_clear(npi_handle_t handle, uint8_t channel)
-{
-	return (npi_txdma_channel_control(handle, TXDMA_CLEAR_MMK, channel));
-}
-
-/*
- * npi_txdma_channel_mbox_enable():
- *	This function is called to enable the mailbox update.
- *
- * Parameters:
- *	handle		- NPI handle (virtualization flag must be defined).
- *	channel		- logical TXDMA channel from 0 to 23.
- *			  (If virtualization flag is not set, then
- *			   logical channel is the same as the hardware
- *			   channel number).
- * Return:
- *	NPI_SUCCESS		- If mailbox is enabled successfully.
- *
- *	Error:
- *	NPI_HW_ERROR		-
- *	NPI_FAILURE	-
- *		NPI_TXDMA_CHANNEL_INVALID -
- */
-npi_status_t
-npi_txdma_channel_mbox_enable(npi_handle_t handle, uint8_t channel)
-{
-	return (npi_txdma_channel_control(handle, TXDMA_MBOX_ENABLE, channel));
-}
-
-/*
- * npi_txdma_channel_control():
- *	This function is called to control a transmit DMA channel
- *	for reset, start or stop.
- *
- * Parameters:
- *	handle		- NPI handle (virtualization flag must be defined).
- *	control		- NPI defined control type supported
- *				- TXDMA_INIT_RESET
- * 				- TXDMA_INIT_START
- *				- TXDMA_RESET
- *				- TXDMA_START
- *				- TXDMA_STOP
- *	channel		- logical TXDMA channel from 0 to 23.
- *			  (If virtualization flag is not set, then
- *			   logical channel is the same as the hardware
- *
- * Return:
- *	NPI_SUCCESS		- If reset is complete successfully.
- *
- *	Error:
- *	NPI_FAILURE		-
- *		NPI_TXDMA_OPCODE_INVALID	-
- *		NPI_TXDMA_CHANNEL_INVALID	-
- *		NPI_TXDMA_RESET_FAILED	-
- *		NPI_TXDMA_STOP_FAILED	-
- *		NPI_TXDMA_RESUME_FAILED	-
- */
-npi_status_t
-npi_txdma_channel_control(npi_handle_t handle, txdma_cs_cntl_t control,
-		uint8_t channel)
-{
-	int		status = NPI_SUCCESS;
-	tx_cs_t		cs;
-
-	ASSERT(TXDMA_CHANNEL_VALID(channel));
-	if (!TXDMA_CHANNEL_VALID(channel)) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				    " npi_txdma_channel_control"
-				    " Invalid Input: channel <0x%x>",
-				    channel));
-		return (NPI_FAILURE | NPI_TXDMA_CHANNEL_INVALID(channel));
-	}
-
-	switch (control) {
-	case TXDMA_INIT_RESET:
-		cs.value = 0;
-		TXDMA_REG_READ64(handle, TX_CS_REG, channel, &cs.value);
-		cs.bits.ldw.rst = 1;
-		TXDMA_REG_WRITE64(handle, TX_CS_REG, channel, cs.value);
-		return (npi_txdma_control_reset_wait(handle, channel));
-
-	case TXDMA_INIT_START:
-		cs.value = 0;
-		TXDMA_REG_WRITE64(handle, TX_CS_REG, channel, cs.value);
-		break;
-
-	case TXDMA_RESET:
-		/*
-		 * Sets reset bit only (Hardware will reset all
-		 * the RW bits but leave the RO bits alone.
-		 */
-		cs.value = 0;
-		cs.bits.ldw.rst = 1;
-		TXDMA_REG_WRITE64(handle, TX_CS_REG, channel, cs.value);
-		return (npi_txdma_control_reset_wait(handle, channel));
-
-	case TXDMA_START:
-		/* Enable the DMA channel */
-		TXDMA_REG_READ64(handle, TX_CS_REG, channel, &cs.value);
-		cs.bits.ldw.stop_n_go = 0;
-		TXDMA_REG_WRITE64(handle, TX_CS_REG, channel, cs.value);
-		break;
-
-	case TXDMA_STOP:
-		/* Disable the DMA channel */
-		TXDMA_REG_READ64(handle, TX_CS_REG, channel, &cs.value);
-		cs.bits.ldw.stop_n_go = 1;
-		TXDMA_REG_WRITE64(handle, TX_CS_REG, channel, cs.value);
-		status = npi_txdma_control_stop_wait(handle, channel);
-		if (status) {
-			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				    "Cannot stop channel %d (TXC hung!)",
-				    channel));
-		}
-		break;
-
-	case TXDMA_RESUME:
-		/* Resume the packet transmission after stopping */
-		TXDMA_REG_READ64(handle, TX_CS_REG, channel, &cs.value);
-		cs.value |= ~TX_CS_STOP_N_GO_MASK;
-		TXDMA_REG_WRITE64(handle, TX_CS_REG, channel, cs.value);
-		return (npi_txdma_control_resume_wait(handle, channel));
-
-	case TXDMA_CLEAR_MMK:
-		/* Write 1 to MK bit to clear the MMK bit */
-		TXDMA_REG_READ64(handle, TX_CS_REG, channel, &cs.value);
-		cs.bits.ldw.mk = 1;
-		TXDMA_REG_WRITE64(handle, TX_CS_REG, channel, cs.value);
-		break;
-
-	case TXDMA_MBOX_ENABLE:
-		/*
-		 * Write 1 to MB bit to enable mailbox update
-		 * (cleared to 0 by hardware after update).
-		 */
-		TXDMA_REG_READ64(handle, TX_CS_REG, channel, &cs.value);
-		cs.bits.ldw.mb = 1;
-		TXDMA_REG_WRITE64(handle, TX_CS_REG, channel, cs.value);
-		break;
-
-	default:
-		status =  (NPI_FAILURE | NPI_TXDMA_OPCODE_INVALID(channel));
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				    " npi_txdma_channel_control"
-				    " Invalid Input: control <0x%x>",
-				    control));
-	}
-
-	return (status);
-}
-
-/*
- * npi_txdma_control_status():
- *	This function is called to operate on the control
- *	and status register.
- *
- * Parameters:
- *	handle		- NPI handle
- *	op_mode		- OP_GET: get hardware control and status
- *			  OP_SET: set hardware control and status
- *			  OP_UPDATE: update hardware control and status.
- *			  OP_CLEAR: clear control and status register to 0s.
- *	channel		- hardware TXDMA channel from 0 to 23.
- *	cs_p		- pointer to hardware defined control and status
- *			  structure.
- * Return:
- *	NPI_SUCCESS		- If set is complete successfully.
- *
- *	Error:
- *	NPI_FAILURE		-
- *		NPI_TXDMA_OPCODE_INVALID	-
- *		NPI_TXDMA_CHANNEL_INVALID	-
- *		NPI_TXDMA_FUNC_INVALID	-
- */
-npi_status_t
-npi_txdma_control_status(npi_handle_t handle, io_op_t op_mode,
-		uint8_t channel, p_tx_cs_t cs_p)
-{
-	int		status = NPI_SUCCESS;
-	tx_cs_t		txcs;
-
-	ASSERT(TXDMA_CHANNEL_VALID(channel));
-	if (!TXDMA_CHANNEL_VALID(channel)) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				    " npi_txdma_control_status"
-				    " Invalid Input: channel <0x%x>",
-				    channel));
-		return (NPI_FAILURE | NPI_TXDMA_CHANNEL_INVALID(channel));
-	}
-
-	switch (op_mode) {
-	case OP_GET:
-		TXDMA_REG_READ64(handle, TX_CS_REG, channel, &cs_p->value);
-		break;
-
-	case OP_SET:
-		TXDMA_REG_WRITE64(handle, TX_CS_REG, channel, cs_p->value);
-		break;
-
-	case OP_UPDATE:
-		TXDMA_REG_READ64(handle, TX_CS_REG, channel, &txcs.value);
-		TXDMA_REG_WRITE64(handle, TX_CS_REG, channel,
-			cs_p->value | txcs.value);
-		break;
-
-	default:
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				    " npi_txdma_control_status"
-				    " Invalid Input: control <0x%x>",
-				    op_mode));
-		return (NPI_FAILURE | NPI_TXDMA_OPCODE_INVALID(channel));
-	}
-
-	return (status);
-
-}
-
-/*
- * npi_txdma_event_mask():
- *	This function is called to operate on the event mask
- *	register which is used for generating interrupts..
- *	and status register.
- *
- * Parameters:
- *	handle		- NPI handle
- *	op_mode		- OP_GET: get hardware event mask
- *			  OP_SET: set hardware interrupt event masks
- *			  OP_CLEAR: clear control and status register to 0s.
- *	channel		- hardware TXDMA channel from 0 to 23.
- *	mask_p		- pointer to hardware defined event mask
- *			  structure.
- * Return:
- *	NPI_SUCCESS		- If set is complete successfully.
- *
- *	Error:
- *	NPI_FAILURE		-
- *		NPI_TXDMA_OPCODE_INVALID	-
- *		NPI_TXDMA_CHANNEL_INVALID	-
- */
-npi_status_t
-npi_txdma_event_mask(npi_handle_t handle, io_op_t op_mode,
-		uint8_t channel, p_tx_dma_ent_msk_t mask_p)
-{
-	int			status = NPI_SUCCESS;
-	tx_dma_ent_msk_t	mask;
-
-	ASSERT(TXDMA_CHANNEL_VALID(channel));
-	if (!TXDMA_CHANNEL_VALID(channel)) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-					    " npi_txdma_event_mask"
-					    " Invalid Input: channel <0x%x>",
-					    channel));
-		return (NPI_FAILURE | NPI_TXDMA_CHANNEL_INVALID(channel));
-	}
-
-	switch (op_mode) {
-	case OP_GET:
-		TXDMA_REG_READ64(handle, TX_ENT_MSK_REG, channel,
-				&mask_p->value);
-		break;
-
-	case OP_SET:
-		TXDMA_REG_WRITE64(handle, TX_ENT_MSK_REG, channel,
-				mask_p->value);
-		break;
-
-	case OP_UPDATE:
-		TXDMA_REG_READ64(handle, TX_ENT_MSK_REG, channel, &mask.value);
-		TXDMA_REG_WRITE64(handle, TX_ENT_MSK_REG, channel,
-			mask_p->value | mask.value);
-		break;
-
-	default:
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				    " npi_txdma_event_mask"
-				    " Invalid Input: eventmask <0x%x>",
-				    op_mode));
-		return (NPI_FAILURE | NPI_TXDMA_OPCODE_INVALID(channel));
-	}
-
-	return (status);
-}
-
-/*
- * npi_txdma_event_mask_config():
- *	This function is called to operate on the event mask
- *	register which is used for generating interrupts..
- *	and status register.
- *
- * Parameters:
- *	handle		- NPI handle
- *	op_mode		- OP_GET: get hardware event mask
- *			  OP_SET: set hardware interrupt event masks
- *			  OP_CLEAR: clear control and status register to 0s.
- *	channel		- hardware TXDMA channel from 0 to 23.
- *	cfgp		- pointer to NPI defined event mask
- *			  enum data type.
- * Return:
- *	NPI_SUCCESS		- If set is complete successfully.
- *
- *	Error:
- *	NPI_FAILURE		-
- *		NPI_TXDMA_OPCODE_INVALID	-
- *		NPI_TXDMA_CHANNEL_INVALID	-
- */
-npi_status_t
-npi_txdma_event_mask_config(npi_handle_t handle, io_op_t op_mode,
-		uint8_t channel, txdma_ent_msk_cfg_t *mask_cfgp)
-{
-	int		status = NPI_SUCCESS;
-	uint64_t	value;
-
-	ASSERT(TXDMA_CHANNEL_VALID(channel));
-	if (!TXDMA_CHANNEL_VALID(channel)) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				    " npi_txdma_event_mask_config"
-				    " Invalid Input: channel <0x%x>",
-				    channel));
-
-		return (NPI_FAILURE | NPI_TXDMA_CHANNEL_INVALID(channel));
-	}
-
-	switch (op_mode) {
-	case OP_GET:
-		TXDMA_REG_READ64(handle, TX_ENT_MSK_REG, channel, mask_cfgp);
-		break;
-
-	case OP_SET:
-		TXDMA_REG_WRITE64(handle, TX_ENT_MSK_REG, channel,
-				*mask_cfgp);
-		break;
-
-	case OP_UPDATE:
-		TXDMA_REG_READ64(handle, TX_ENT_MSK_REG, channel, &value);
-		TXDMA_REG_WRITE64(handle, TX_ENT_MSK_REG, channel,
-			*mask_cfgp | value);
-		break;
-
-	case OP_CLEAR:
-		TXDMA_REG_WRITE64(handle, TX_ENT_MSK_REG, channel,
-			CFG_TXDMA_MASK_ALL);
-		break;
-	default:
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				    " npi_txdma_event_mask_config"
-				    " Invalid Input: eventmask <0x%x>",
-				    op_mode));
-		return (NPI_FAILURE | NPI_TXDMA_OPCODE_INVALID(channel));
-	}
-
-	return (status);
-}
-
-/*
- * npi_txdma_event_mask_mk_out():
- *	This function is called to mask out the packet transmit marked event.
- *
- * Parameters:
- *	handle		- NPI handle
- *	channel		- hardware TXDMA channel from 0 to 23.
- *			  enum data type.
- * Return:
- *	NPI_SUCCESS		- If set is complete successfully.
- *
- *	Error:
- *	NPI_FAILURE		-
- *		NPI_TXDMA_CHANNEL_INVALID	-
- */
-npi_status_t
-npi_txdma_event_mask_mk_out(npi_handle_t handle, uint8_t channel)
-{
-	txdma_ent_msk_cfg_t event_mask;
-	int		status = NPI_SUCCESS;
-
-	ASSERT(TXDMA_CHANNEL_VALID(channel));
-	if (!TXDMA_CHANNEL_VALID(channel)) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				    " npi_txdma_event_mask_mk_out"
-				    " Invalid Input: channel <0x%x>",
-				    channel));
-		return (NPI_FAILURE | NPI_TXDMA_CHANNEL_INVALID(channel));
-	}
-
-	TXDMA_REG_READ64(handle, TX_ENT_MSK_REG, channel, &event_mask);
-	TXDMA_REG_WRITE64(handle, TX_ENT_MSK_REG, channel,
-		event_mask & (~TX_ENT_MSK_MK_MASK));
-
-	return (status);
-}
-
-/*
- * npi_txdma_event_mask_mk_in():
- *	This function is called to set the mask for the the packet marked event.
- *
- * Parameters:
- *	handle		- NPI handle
- *	channel		- hardware TXDMA channel from 0 to 23.
- *			  enum data type.
- * Return:
- *	NPI_SUCCESS		- If set is complete successfully.
- *
- *	Error:
- *	NPI_FAILURE		-
- *		NPI_TXDMA_CHANNEL_INVALID	-
- */
-npi_status_t
-npi_txdma_event_mask_mk_in(npi_handle_t handle, uint8_t channel)
-{
-	txdma_ent_msk_cfg_t event_mask;
-	int		status = NPI_SUCCESS;
-
-	ASSERT(TXDMA_CHANNEL_VALID(channel));
-	if (!TXDMA_CHANNEL_VALID(channel)) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				    " npi_txdma_event_mask_mk_in"
-				    " Invalid Input: channel <0x%x>",
-				    channel));
-		return (NPI_FAILURE | NPI_TXDMA_CHANNEL_INVALID(channel));
-	}
-
-	TXDMA_REG_READ64(handle, TX_ENT_MSK_REG, channel, &event_mask);
-	TXDMA_REG_WRITE64(handle, TX_ENT_MSK_REG, channel,
-		event_mask | TX_ENT_MSK_MK_MASK);
-
-	return (status);
-}
-
-/*
- * npi_txdma_ring_addr_set():
- *	This function is called to configure the transmit descriptor
- *	ring address and its size.
- *
- * Parameters:
- *	handle		- NPI handle (virtualization flag must be defined
- *			  if its register pointer is from the virtual region).
- *	channel		- logical TXDMA channel from 0 to 23.
- *			  (If virtualization flag is not set, then
- *			   logical channel is the same as the hardware
- *			   channel number).
- *	start_addr	- starting address of the descriptor
- *	len		- maximum length of the descriptor
- *			  (in number of 64 bytes block).
- * Return:
- *	NPI_SUCCESS		- If set is complete successfully.
- *
- *	Error:
- *	NPI_FAILURE		-
- *		NPI_TXDMA_OPCODE_INVALID	-
- *		NPI_TXDMA_CHANNEL_INVALID	-
- */
-npi_status_t
-npi_txdma_ring_addr_set(npi_handle_t handle, uint8_t channel,
-		uint64_t start_addr, uint32_t len)
-{
-	int		status = NPI_SUCCESS;
-	tx_rng_cfig_t	cfg;
-
-	ASSERT(TXDMA_CHANNEL_VALID(channel));
-	if (!TXDMA_CHANNEL_VALID(channel)) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				    " npi_txdma_ring_addr_set"
-				    " Invalid Input: channel <0x%x>",
-				    channel));
-		return (NPI_FAILURE | NPI_TXDMA_CHANNEL_INVALID(channel));
-	}
-
-	cfg.value = ((start_addr & TX_RNG_CFIG_ADDR_MASK) |
-			(((uint64_t)len) << TX_RNG_CFIG_LEN_SHIFT));
-	TXDMA_REG_WRITE64(handle, TX_RNG_CFIG_REG, channel, cfg.value);
-
-	return (status);
-}
-
-/*
- * npi_txdma_ring_config():
- *	This function is called to config a descriptor ring
- *	by using the hardware defined data.
- *
- * Parameters:
- *	handle		- NPI handle (virtualization flag must be defined
- *			  if its register pointer is from the virtual region).
- *	channel		- logical TXDMA channel from 0 to 23.
- *			  (If virtualization flag is not set, then
- *			   logical channel is the same as the hardware
- *			   channel number).
- *	op_mode		- OP_GET: get transmit ring configuration
- *			  OP_SET: set transmit ring configuration
- *	reg_data	- pointer to hardware defined transmit ring
- *			  configuration data structure.
- * Return:
- *	NPI_SUCCESS		- If set/get is complete successfully.
- *
- *	Error:
- *	NPI_FAILURE		-
- *		NPI_TXDMA_CHANNEL_INVALID	-
- */
-npi_status_t
-npi_txdma_ring_config(npi_handle_t handle, io_op_t op_mode,
-		uint8_t channel, uint64_t *reg_data)
-{
-	int		status = NPI_SUCCESS;
-
-	ASSERT(TXDMA_CHANNEL_VALID(channel));
-	if (!TXDMA_CHANNEL_VALID(channel)) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				    " npi_txdma_ring_config"
-				    " Invalid Input: channel <0x%x>",
-				    channel));
-		return (NPI_FAILURE | NPI_TXDMA_CHANNEL_INVALID(channel));
-	}
-
-	switch (op_mode) {
-	case OP_GET:
-		TXDMA_REG_READ64(handle, TX_RNG_CFIG_REG, channel, reg_data);
-		break;
-
-	case OP_SET:
-		TXDMA_REG_WRITE64(handle, TX_RNG_CFIG_REG, channel,
-			*reg_data);
-		break;
-
-	default:
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				    " npi_txdma_ring_config"
-				    " Invalid Input: ring_config <0x%x>",
-				    op_mode));
-		return (NPI_FAILURE | NPI_TXDMA_OPCODE_INVALID(channel));
-	}
-
-	return (status);
-}
-
-/*
- * npi_txdma_mbox_config():
- *	This function is called to config the mailbox address
- *
- * Parameters:
- *	handle		- NPI handle (virtualization flag must be defined
- *			  if its register pointer is from the virtual region).
- *	channel		- logical TXDMA channel from 0 to 23.
- *			  (If virtualization flag is not set, then
- *			   logical channel is the same as the hardware
- *			   channel number).
- *	op_mode		- OP_GET: get the mailbox address
- *			  OP_SET: set the mailbox address
- *	reg_data	- pointer to the mailbox address.
- * Return:
- *	NPI_SUCCESS		- If set is complete successfully.
- *
- *	Error:
- *	NPI_FAILURE		-
- *		NPI_TXDMA_OPCODE_INVALID	-
- *		NPI_TXDMA_CHANNEL_INVALID	-
- */
-npi_status_t
-npi_txdma_mbox_config(npi_handle_t handle, io_op_t op_mode,
-		uint8_t channel, uint64_t *mbox_addr)
-{
-	int		status = NPI_SUCCESS;
-	txdma_mbh_t	mh;
-	txdma_mbl_t	ml;
-
-	ASSERT(TXDMA_CHANNEL_VALID(channel));
-	if (!TXDMA_CHANNEL_VALID(channel)) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				    " npi_txdma_mbox_config"
-				    " Invalid Input: channel <0x%x>",
-				    channel));
-		return (NPI_FAILURE | NPI_TXDMA_CHANNEL_INVALID(channel));
-	}
-
-	mh.value = ml.value = 0;
-
-	switch (op_mode) {
-	case OP_GET:
-		TXDMA_REG_READ64(handle, TXDMA_MBH_REG, channel, &mh.value);
-		TXDMA_REG_READ64(handle, TXDMA_MBL_REG, channel, &ml.value);
-		*mbox_addr = ml.value;
-		*mbox_addr |= (mh.value << TXDMA_MBH_ADDR_SHIFT);
-
-		break;
-
-	case OP_SET:
-		ml.bits.ldw.mbaddr = ((*mbox_addr & TXDMA_MBL_MASK) >>
-			TXDMA_MBL_SHIFT);
-		TXDMA_REG_WRITE64(handle, TXDMA_MBL_REG, channel, ml.value);
-		mh.bits.ldw.mbaddr = ((*mbox_addr >> TXDMA_MBH_ADDR_SHIFT) &
-			TXDMA_MBH_MASK);
-		TXDMA_REG_WRITE64(handle, TXDMA_MBH_REG, channel, mh.value);
-
-		break;
-
-	default:
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				    " npi_txdma_mbox_config"
-				    " Invalid Input: mbox <0x%x>",
-				    op_mode));
-		return (NPI_FAILURE | NPI_TXDMA_OPCODE_INVALID(channel));
-	}
-
-	return (status);
-
-}
-
-/*
- * npi_txdma_desc_gather_set():
- *	This function is called to set up a transmit descriptor entry.
- *
- * Parameters:
- *	handle		- NPI handle (register pointer is the
- *			  descriptor address in memory).
- *	desc_p		- pointer to a descriptor
- *	gather_index	- which entry (starts from index 0 to 15)
- *	mark		- mark bit (only valid if it is the first gather).
- *	ngathers	- number of gather pointers to set to the first gather.
- *	dma_ioaddr	- starting dma address of an IO buffer to write.
- *			  (SAD)
- *	transfer_len	- transfer len.
- * Return:
- *	NPI_SUCCESS		- If set is complete successfully.
- *
- *	Error:
- *	NPI_FAILURE		-
- *		NPI_TXDMA_OPCODE_INVALID	-
- *		NPI_TXDMA_CHANNEL_INVALID	-
- *		NPI_TXDMA_XFER_LEN_INVALID	-
- */
-npi_status_t
-npi_txdma_desc_gather_set(npi_handle_t handle,
-		p_tx_desc_t desc_p, uint8_t gather_index,
-		boolean_t mark, uint8_t ngathers,
-		uint64_t dma_ioaddr, uint32_t transfer_len)
-{
-	int		status;
-
-	status = NPI_TXDMA_GATHER_INDEX(gather_index);
-	if (status) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				    " npi_txdma_desc_gather_set"
-				    " Invalid Input: gather_index <0x%x>",
-				    gather_index));
-		return (status);
-	}
-
-	if (transfer_len > TX_MAX_TRANSFER_LENGTH) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				    " npi_txdma_desc_gather_set"
-				    " Invalid Input: tr_len <0x%x>",
-				    transfer_len));
-		return (NPI_FAILURE | NPI_TXDMA_XFER_LEN_INVALID);
-	}
-
-	if (gather_index == 0) {
-		desc_p->bits.hdw.sop = 1;
-		desc_p->bits.hdw.mark = mark;
-		desc_p->bits.hdw.num_ptr = ngathers;
-		NPI_DEBUG_MSG((handle.function, NPI_TDC_CTL,
-			"npi_txdma_gather_set: SOP len %d (%d)",
-			desc_p->bits.hdw.tr_len, transfer_len));
-	}
-
-	desc_p->bits.hdw.tr_len = transfer_len;
-	desc_p->bits.hdw.sad = dma_ioaddr >> 32;
-	desc_p->bits.ldw.sad = dma_ioaddr & 0xffffffff;
-
-	NPI_DEBUG_MSG((handle.function, NPI_TDC_CTL,
-		"npi_txdma_gather_set: xfer len %d to set (%d)",
-		desc_p->bits.hdw.tr_len, transfer_len));
-
-	NXGE_MEM_PIO_WRITE64(handle, desc_p->value);
-
-	return (status);
-}
-
-/*
- * npi_txdma_desc_sop_set():
- *	This function is called to set up the first gather entry.
- *
- * Parameters:
- *	handle		- NPI handle (register pointer is the
- *			  descriptor address in memory).
- *	desc_p		- pointer to a descriptor
- *	mark		- mark bit (only valid if it is the first gather).
- *	ngathers	- number of gather pointers to set to the first gather.
- * Return:
- *	NPI_SUCCESS		- If set is complete successfully.
- *
- *	Error:
- */
-npi_status_t
-npi_txdma_desc_gather_sop_set(npi_handle_t handle,
-		p_tx_desc_t desc_p,
-		boolean_t mark_mode,
-		uint8_t ngathers)
-{
-	int		status = NPI_SUCCESS;
-
-	desc_p->bits.hdw.sop = 1;
-	desc_p->bits.hdw.mark = mark_mode;
-	desc_p->bits.hdw.num_ptr = ngathers;
-
-	NXGE_MEM_PIO_WRITE64(handle, desc_p->value);
-
-	return (status);
-}
-npi_status_t
-npi_txdma_desc_gather_sop_set_1(npi_handle_t handle,
-		p_tx_desc_t desc_p,
-		boolean_t mark_mode,
-		uint8_t ngathers,
-		uint32_t extra)
-{
-	int		status = NPI_SUCCESS;
-
-	desc_p->bits.hdw.sop = 1;
-	desc_p->bits.hdw.mark = mark_mode;
-	desc_p->bits.hdw.num_ptr = ngathers;
-	desc_p->bits.hdw.tr_len += extra;
-
-	NXGE_MEM_PIO_WRITE64(handle, desc_p->value);
-
-	return (status);
-}
-
-npi_status_t
-npi_txdma_desc_set_xfer_len(npi_handle_t handle,
-		p_tx_desc_t desc_p,
-		uint32_t transfer_len)
-{
-	int		status = NPI_SUCCESS;
-
-	desc_p->bits.hdw.tr_len = transfer_len;
-
-	NPI_DEBUG_MSG((handle.function, NPI_TDC_CTL,
-		"npi_set_xfer_len: len %d (%d)",
-		desc_p->bits.hdw.tr_len, transfer_len));
-
-	NXGE_MEM_PIO_WRITE64(handle, desc_p->value);
-
-	return (status);
-}
-
-npi_status_t
-npi_txdma_desc_set_zero(npi_handle_t handle, uint16_t entries)
-{
-	uint32_t	offset;
-	int		i;
-
-	/*
-	 * Assume no wrapped around.
-	 */
-	offset = 0;
-	for (i = 0; i < entries; i++) {
-		NXGE_REG_WR64(handle, offset, 0);
-		offset += (i * TXDMA_DESC_SIZE);
-	}
-
-	return (NPI_SUCCESS);
-}
-
-npi_status_t
-npi_txdma_desc_mem_get(npi_handle_t handle, uint16_t index,
-		p_tx_desc_t desc_p)
-{
-	int		status = NPI_SUCCESS;
-
-	npi_txdma_dump_desc_one(handle, desc_p, index);
-
-	return (status);
-
-}
-
-/*
- * npi_txdma_desc_kick_reg_set():
- *	This function is called to kick the transmit  to start transmission.
- *
- * Parameters:
- *	handle		- NPI handle (virtualization flag must be defined).
- *	channel		- logical TXDMA channel from 0 to 23.
- *			  (If virtualization flag is not set, then
- *			   logical channel is the same as the hardware
- *			   channel number).
- *	tail_index	- index into the transmit descriptor
- *	wrap		- toggle bit to indicate if the tail index is
- *			  wrapped around.
- *
- * Return:
- *	NPI_SUCCESS		- If set is complete successfully.
- *
- *	Error:
- *	NPI_FAILURE		-
- *		NPI_TXDMA_CHANNEL_INVALID	-
- */
-npi_status_t
-npi_txdma_desc_kick_reg_set(npi_handle_t handle, uint8_t channel,
-		uint16_t tail_index, boolean_t wrap)
-{
-	int			status = NPI_SUCCESS;
-	tx_ring_kick_t		kick;
-
-	ASSERT(TXDMA_CHANNEL_VALID(channel));
-	if (!TXDMA_CHANNEL_VALID(channel)) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				    " npi_txdma_desc_kick_reg_set"
-				    " Invalid Input: channel <0x%x>",
-				    channel));
-		return (NPI_FAILURE | NPI_TXDMA_CHANNEL_INVALID(channel));
-	}
-
-	NPI_DEBUG_MSG((handle.function, NPI_TDC_CTL,
-		" npi_txdma_desc_kick_reg_set: "
-		" KICKING channel %d",
-		channel));
-
-	/* Toggle the wrap around bit */
-	kick.value = 0;
-	kick.bits.ldw.wrap = wrap;
-	kick.bits.ldw.tail = tail_index;
-
-	/* Kick start the Transmit kick register */
-	TXDMA_REG_WRITE64(handle, TX_RING_KICK_REG, channel, kick.value);
-
-	return (status);
-}
-
-/*
- * npi_txdma_desc_kick_reg_get():
- *	This function is called to kick the transmit  to start transmission.
- *
- * Parameters:
- *	handle		- NPI handle (virtualization flag must be defined).
- *	channel		- logical TXDMA channel from 0 to 23.
- *			  (If virtualization flag is not set, then
- *			   logical channel is the same as the hardware
- *			   channel number).
- *	tail_index	- index into the transmit descriptor
- *	wrap		- toggle bit to indicate if the tail index is
- *			  wrapped around.
- *
- * Return:
- *	NPI_SUCCESS		- If get is complete successfully.
- *
- *	Error:
- *	NPI_FAILURE		-
- *		NPI_TXDMA_CHANNEL_INVALID	-
- */
-npi_status_t
-npi_txdma_desc_kick_reg_get(npi_handle_t handle, uint8_t channel,
-		p_tx_ring_kick_t kick_p)
-{
-	int		status = NPI_SUCCESS;
-
-	ASSERT(TXDMA_CHANNEL_VALID(channel));
-	if (!TXDMA_CHANNEL_VALID(channel)) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				    " npi_txdma_desc_kick_reg_get"
-				    " Invalid Input: channel <0x%x>",
-				    channel));
-		return (NPI_FAILURE | NPI_TXDMA_CHANNEL_INVALID(channel));
-	}
-
-	TXDMA_REG_READ64(handle, TX_RING_KICK_REG, channel, &kick_p->value);
-
-	return (status);
-}
-
-/*
- * npi_txdma_ring_head_get():
- *	This function is called to get the transmit ring head index.
- *
- * Parameters:
- *	handle		- NPI handle (virtualization flag must be defined).
- *	channel		- logical TXDMA channel from 0 to 23.
- *			  (If virtualization flag is not set, then
- *			   logical channel is the same as the hardware
- *			   channel number).
- *	hdl_p		- pointer to the hardware defined transmit
- *			  ring header data (head index and wrap bit).
- *
- * Return:
- *	NPI_SUCCESS		- If get is complete successfully.
- *
- *	Error:
- *	NPI_FAILURE		-
- *		NPI_TXDMA_CHANNEL_INVALID	-
- */
-npi_status_t
-npi_txdma_ring_head_get(npi_handle_t handle, uint8_t channel,
-		p_tx_ring_hdl_t hdl_p)
-{
-	int		status = NPI_SUCCESS;
-
-	ASSERT(TXDMA_CHANNEL_VALID(channel));
-	if (!TXDMA_CHANNEL_VALID(channel)) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				    " npi_txdma_ring_head_get"
-				    " Invalid Input: channel <0x%x>",
-				    channel));
-		return (NPI_FAILURE | NPI_TXDMA_CHANNEL_INVALID(channel));
-	}
-
-	TXDMA_REG_READ64(handle, TX_RING_HDL_REG, channel, &hdl_p->value);
-
-	return (status);
-}
-
-/*ARGSUSED*/
-npi_status_t
-npi_txdma_channel_mbox_get(npi_handle_t handle, uint8_t channel,
-		p_txdma_mailbox_t mbox_p)
-{
-	int		status = NPI_SUCCESS;
-
-	return (status);
-
-}
-
-npi_status_t
-npi_txdma_channel_pre_state_get(npi_handle_t handle, uint8_t channel,
-		p_tx_dma_pre_st_t prep)
-{
-	int		status = NPI_SUCCESS;
-
-	ASSERT(TXDMA_CHANNEL_VALID(channel));
-	if (!TXDMA_CHANNEL_VALID(channel)) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				    " npi_txdma_channel_pre_state_get"
-				    " Invalid Input: channel <0x%x>",
-				    channel));
-		return (NPI_FAILURE | NPI_TXDMA_CHANNEL_INVALID(channel));
-	}
-
-	TXDMA_REG_READ64(handle, TX_DMA_PRE_ST_REG, channel, &prep->value);
-
-	return (status);
-}
-
-npi_status_t
-npi_txdma_ring_error_get(npi_handle_t handle, uint8_t channel,
-		p_txdma_ring_errlog_t ring_errlog_p)
-{
-	tx_rng_err_logh_t	logh;
-	tx_rng_err_logl_t	logl;
-	int			status = NPI_SUCCESS;
-
-	ASSERT(TXDMA_CHANNEL_VALID(channel));
-	if (!TXDMA_CHANNEL_VALID(channel)) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				    " npi_txdma_ring_error_get"
-				    " Invalid Input: channel <0x%x>",
-				    channel));
-		return (NPI_FAILURE | NPI_TXDMA_CHANNEL_INVALID(channel));
-	}
-
-	logh.value = 0;
-	TXDMA_REG_READ64(handle, TX_RNG_ERR_LOGH_REG, channel, &logh.value);
-	TXDMA_REG_READ64(handle, TX_RNG_ERR_LOGL_REG, channel, &logl.value);
-	ring_errlog_p->logh.bits.ldw.err = logh.bits.ldw.err;
-	ring_errlog_p->logh.bits.ldw.merr = logh.bits.ldw.merr;
-	ring_errlog_p->logh.bits.ldw.errcode = logh.bits.ldw.errcode;
-	ring_errlog_p->logh.bits.ldw.err_addr = logh.bits.ldw.err_addr;
-	ring_errlog_p->logl.bits.ldw.err_addr = logl.bits.ldw.err_addr;
-
-	return (status);
-}
-
-npi_status_t
-npi_txdma_inj_par_error_clear(npi_handle_t handle)
-{
-	NXGE_REG_WR64(handle, TDMC_INJ_PAR_ERR_REG, 0);
-
-	return (NPI_SUCCESS);
-}
-
-npi_status_t
-npi_txdma_inj_par_error_set(npi_handle_t handle, uint32_t err_bits)
-{
-	tdmc_inj_par_err_t	inj;
-
-	inj.value = 0;
-	inj.bits.ldw.inject_parity_error = (err_bits & TDMC_INJ_PAR_ERR_MASK);
-	NXGE_REG_WR64(handle, TDMC_INJ_PAR_ERR_REG, inj.value);
-
-	return (NPI_SUCCESS);
-}
-
-npi_status_t
-npi_txdma_inj_par_error_update(npi_handle_t handle, uint32_t err_bits)
-{
-	tdmc_inj_par_err_t	inj;
-
-	inj.value = 0;
-	NXGE_REG_RD64(handle, TDMC_INJ_PAR_ERR_REG, &inj.value);
-	inj.value |= (err_bits & TDMC_INJ_PAR_ERR_MASK);
-	NXGE_REG_WR64(handle, TDMC_INJ_PAR_ERR_REG, inj.value);
-
-	return (NPI_SUCCESS);
-}
-
-npi_status_t
-npi_txdma_inj_par_error_get(npi_handle_t handle, uint32_t *err_bits)
-{
-	tdmc_inj_par_err_t	inj;
-
-	inj.value = 0;
-	NXGE_REG_RD64(handle, TDMC_INJ_PAR_ERR_REG, &inj.value);
-	*err_bits = (inj.value & TDMC_INJ_PAR_ERR_MASK);
-
-	return (NPI_SUCCESS);
-}
-
-npi_status_t
-npi_txdma_dbg_sel_set(npi_handle_t handle, uint8_t dbg_sel)
-{
-	tdmc_dbg_sel_t		dbg;
-
-	dbg.value = 0;
-	dbg.bits.ldw.dbg_sel = (dbg_sel & TDMC_DBG_SEL_MASK);
-
-	NXGE_REG_WR64(handle, TDMC_DBG_SEL_REG, dbg.value);
-
-	return (NPI_SUCCESS);
-}
-
-npi_status_t
-npi_txdma_training_vector_set(npi_handle_t handle, uint32_t training_vector)
-{
-	tdmc_training_t		vec;
-
-	vec.value = 0;
-	vec.bits.ldw.vec = training_vector;
-
-	NXGE_REG_WR64(handle, TDMC_TRAINING_REG, vec.value);
-
-	return (NPI_SUCCESS);
-}
-
-/*
- * npi_txdma_dump_desc_one(npi_handle_t handle, p_tx_desc_t desc_p,
- *	int desc_index)
- *
- *	Dumps the contents of transmit descriptors.
- *
- * Parameters:
- *	handle		- NPI handle (register pointer is the
- *			  descriptor address in memory).
- *	desc_p		- pointer to place the descriptor contents
- *	desc_index	- descriptor index
- *
- */
-/*ARGSUSED*/
-void
-npi_txdma_dump_desc_one(npi_handle_t handle, p_tx_desc_t desc_p, int desc_index)
-{
-
-	tx_desc_t 		desc, *desp;
-#ifdef NXGE_DEBUG
-	uint64_t		sad;
-	int			xfer_len;
-#endif
-
-	NPI_DEBUG_MSG((handle.function, NPI_TDC_CTL,
-		"\n==> npi_txdma_dump_desc_one: dump "
-		" desc_p $%p descriptor entry %d\n",
-		desc_p, desc_index));
-	desc.value = 0;
-	desp = ((desc_p != NULL) ? desc_p : (p_tx_desc_t)&desc);
-	desp->value = NXGE_MEM_PIO_READ64(handle);
-#ifdef NXGE_DEBUG
-	sad = (desp->value & TX_PKT_DESC_SAD_MASK);
-	xfer_len = ((desp->value & TX_PKT_DESC_TR_LEN_MASK) >>
-			TX_PKT_DESC_TR_LEN_SHIFT);
-#endif
-	NPI_DEBUG_MSG((handle.function, NPI_TDC_CTL, "\n\t: value 0x%llx\n"
-		"\t\tsad $%p\ttr_len %d len %d\tnptrs %d\tmark %d sop %d\n",
-		desp->value,
-		sad,
-		desp->bits.hdw.tr_len,
-		xfer_len,
-		desp->bits.hdw.num_ptr,
-		desp->bits.hdw.mark,
-		desp->bits.hdw.sop));
-
-	NPI_DEBUG_MSG((handle.function, NPI_TDC_CTL,
-			    "\n<== npi_txdma_dump_desc_one: Done \n"));
-
-}
-
-/*ARGSUSED*/
-void
-npi_txdma_dump_hdr(npi_handle_t handle, p_tx_pkt_header_t hdrp)
-{
-	NPI_DEBUG_MSG((handle.function, NPI_TDC_CTL,
-				    "\n==> npi_txdma_dump_hdr: dump\n"));
-	NPI_DEBUG_MSG((handle.function, NPI_TDC_CTL,
-				    "\n\t: value 0x%llx\n"
-		"\t\tpkttype 0x%x\tip_ver %d\tllc %d\tvlan %d \tihl %d\n"
-		"\t\tl3start %d\tl4start %d\tl4stuff %d\n"
-		"\t\txferlen %d\tpad %d\n",
-		hdrp->value,
-		hdrp->bits.hdw.cksum_en_pkt_type,
-		hdrp->bits.hdw.ip_ver,
-		hdrp->bits.hdw.llc,
-		hdrp->bits.hdw.vlan,
-		hdrp->bits.hdw.ihl,
-		hdrp->bits.hdw.l3start,
-		hdrp->bits.hdw.l4start,
-		hdrp->bits.hdw.l4stuff,
-		hdrp->bits.ldw.tot_xfer_len,
-		hdrp->bits.ldw.pad));
-
-	NPI_DEBUG_MSG((handle.function, NPI_TDC_CTL,
-			    "\n<== npi_txdma_dump_hdr: Done \n"));
-}
-
-npi_status_t
-npi_txdma_inj_int_error_set(npi_handle_t handle, uint8_t channel,
-	p_tdmc_intr_dbg_t erp)
-{
-	int		status = NPI_SUCCESS;
-
-	ASSERT(TXDMA_CHANNEL_VALID(channel));
-	if (!TXDMA_CHANNEL_VALID(channel)) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-			" npi_txdma_inj_int_error_set"
-			" Invalid Input: channel <0x%x>",
-					    channel));
-		return (NPI_FAILURE | NPI_TXDMA_CHANNEL_INVALID(channel));
-	}
-
-	TXDMA_REG_WRITE64(handle, TDMC_INTR_DBG_REG, channel, erp->value);
-
-	return (status);
-}
-
-/*
- * Static functions start here.
- */
-static npi_status_t
-npi_txdma_control_reset_wait(npi_handle_t handle, uint8_t channel)
-{
-
-	tx_cs_t		txcs;
-	int		loop = 0;
-
-	do {
-		NXGE_DELAY(TXDMA_WAIT_MSEC);
-		TXDMA_REG_READ64(handle, TX_CS_REG, channel, &txcs.value);
-		if (!txcs.bits.ldw.rst) {
-			return (NPI_SUCCESS);
-		}
-		loop++;
-	} while (loop < TXDMA_WAIT_LOOP);
-
-	if (loop == TXDMA_WAIT_LOOP) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-			    "npi_txdma_control_reset_wait: RST bit not "
-			    "cleared to 0 txcs.bits 0x%llx", txcs.value));
-		return (NPI_FAILURE | NPI_TXDMA_RESET_FAILED);
-	}
-	return (NPI_SUCCESS);
-}
-
-static npi_status_t
-npi_txdma_control_stop_wait(npi_handle_t handle, uint8_t channel)
-{
-	tx_cs_t		txcs;
-	int		loop = 0;
-
-	do {
-		NXGE_DELAY(TXDMA_WAIT_MSEC);
-		TXDMA_REG_READ64(handle, TX_CS_REG, channel, &txcs.value);
-		if (txcs.bits.ldw.sng_state) {
-			return (NPI_SUCCESS);
-		}
-		loop++;
-	} while (loop < TXDMA_WAIT_LOOP);
-
-	if (loop == TXDMA_WAIT_LOOP) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-			    "npi_txdma_control_stop_wait: SNG_STATE not "
-			    "set to 1 txcs.bits 0x%llx", txcs.value));
-		return (NPI_FAILURE | NPI_TXDMA_STOP_FAILED);
-	}
-
-	return (NPI_SUCCESS);
-}
-
-static npi_status_t
-npi_txdma_control_resume_wait(npi_handle_t handle, uint8_t channel)
-{
-	tx_cs_t		txcs;
-	int		loop = 0;
-
-	do {
-		NXGE_DELAY(TXDMA_WAIT_MSEC);
-		TXDMA_REG_READ64(handle, TX_CS_REG, channel, &txcs.value);
-		if (!txcs.bits.ldw.sng_state) {
-			return (NPI_SUCCESS);
-		}
-		loop++;
-	} while (loop < TXDMA_WAIT_LOOP);
-
-	if (loop == TXDMA_WAIT_LOOP) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-			    "npi_txdma_control_resume_wait: sng_state not "
-			    "set to 0 txcs.bits 0x%llx", txcs.value));
-		return (NPI_FAILURE | NPI_TXDMA_RESUME_FAILED);
-	}
-
-	return (NPI_SUCCESS);
-}
--- a/usr/src/uts/sun4v/io/nxge/npi/npi_txdma.h	Mon Mar 19 18:02:35 2007 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,290 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License (the "License").
- * You may not use this file except in compliance with the License.
- *
- * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
- * or http://www.opensolaris.org/os/licensing.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information: Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- */
-/*
- * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
- * Use is subject to license terms.
- */
-
-#ifndef _NPI_TXDMA_H
-#define	_NPI_TXDMA_H
-
-#pragma ident	"%Z%%M%	%I%	%E% SMI"
-
-#ifdef	__cplusplus
-extern "C" {
-#endif
-
-#include <npi.h>
-#include <nxge_txdma_hw.h>
-
-#define	DMA_LOG_PAGE_FN_VALIDATE(cn, pn, fn, status)	\
-{									\
-	status = NPI_SUCCESS;						\
-	if (!TXDMA_CHANNEL_VALID(channel)) {				\
-		status = (NPI_FAILURE | NPI_TXDMA_CHANNEL_INVALID(cn));	\
-	} else if (!TXDMA_PAGE_VALID(pn)) {			\
-		status =  (NPI_FAILURE | NPI_TXDMA_PAGE_INVALID(pn));	\
-	} else if (!TXDMA_FUNC_VALID(fn)) {			\
-		status =  (NPI_FAILURE | NPI_TXDMA_FUNC_INVALID(fn));	\
-	} \
-}
-
-#define	DMA_LOG_PAGE_VALIDATE(cn, pn, status)	\
-{									\
-	status = NPI_SUCCESS;						\
-	if (!TXDMA_CHANNEL_VALID(channel)) {				\
-		status = (NPI_FAILURE | NPI_TXDMA_CHANNEL_INVALID(cn));	\
-	} else if (!TXDMA_PAGE_VALID(pn)) {			\
-		status =  (NPI_FAILURE | NPI_TXDMA_PAGE_INVALID(pn));	\
-	} \
-}
-
-typedef	enum _txdma_cs_cntl_e {
-	TXDMA_INIT_RESET	= 0x1,
-	TXDMA_INIT_START	= 0x2,
-	TXDMA_START		= 0x3,
-	TXDMA_RESET		= 0x4,
-	TXDMA_STOP		= 0x5,
-	TXDMA_RESUME		= 0x6,
-	TXDMA_CLEAR_MMK		= 0x7,
-	TXDMA_MBOX_ENABLE	= 0x8
-} txdma_cs_cntl_t;
-
-typedef	enum _txdma_log_cfg_e {
-	TXDMA_LOG_PAGE_MASK	= 0x01,
-	TXDMA_LOG_PAGE_VALUE	= 0x02,
-	TXDMA_LOG_PAGE_RELOC	= 0x04,
-	TXDMA_LOG_PAGE_VALID	= 0x08,
-	TXDMA_LOG_PAGE_ALL	= (TXDMA_LOG_PAGE_MASK | TXDMA_LOG_PAGE_VALUE |
-				TXDMA_LOG_PAGE_RELOC | TXDMA_LOG_PAGE_VALID)
-} txdma_log_cfg_t;
-
-typedef	enum _txdma_ent_msk_cfg_e {
-	CFG_TXDMA_PKT_PRT_MASK		= TX_ENT_MSK_PKT_PRT_ERR_MASK,
-	CFG_TXDMA_CONF_PART_MASK	= TX_ENT_MSK_CONF_PART_ERR_MASK,
-	CFG_TXDMA_NACK_PKT_RD_MASK	= TX_ENT_MSK_NACK_PKT_RD_MASK,
-	CFG_TXDMA_NACK_PREF_MASK	= TX_ENT_MSK_NACK_PREF_MASK,
-	CFG_TXDMA_PREF_BUF_ECC_ERR_MASK	= TX_ENT_MSK_PREF_BUF_ECC_ERR_MASK,
-	CFG_TXDMA_TX_RING_OFLOW_MASK	= TX_ENT_MSK_TX_RING_OFLOW_MASK,
-	CFG_TXDMA_PKT_SIZE_ERR_MASK	= TX_ENT_MSK_PKT_SIZE_ERR_MASK,
-	CFG_TXDMA_MBOX_ERR_MASK		= TX_ENT_MSK_MBOX_ERR_MASK,
-	CFG_TXDMA_MK_MASK		= TX_ENT_MSK_MK_MASK,
-	CFG_TXDMA_MASK_ALL		= (TX_ENT_MSK_PKT_PRT_ERR_MASK |
-					TX_ENT_MSK_CONF_PART_ERR_MASK |
-					TX_ENT_MSK_NACK_PKT_RD_MASK |
-					TX_ENT_MSK_NACK_PREF_MASK |
-					TX_ENT_MSK_PREF_BUF_ECC_ERR_MASK |
-					TX_ENT_MSK_TX_RING_OFLOW_MASK |
-					TX_ENT_MSK_PKT_SIZE_ERR_MASK |
-					TX_ENT_MSK_MBOX_ERR_MASK |
-					TX_ENT_MSK_MK_MASK)
-} txdma_ent_msk_cfg_t;
-
-
-typedef	struct _txdma_ring_errlog {
-	tx_rng_err_logl_t	logl;
-	tx_rng_err_logh_t	logh;
-} txdma_ring_errlog_t, *p_txdma_ring_errlog_t;
-
-/*
- * Register offset (0x200 bytes for each channel) for logical pages registers.
- */
-#define	NXGE_TXLOG_OFFSET(x, channel) (x + TX_LOG_DMA_OFFSET(channel))
-
-/*
- * Register offset (0x200 bytes for each channel) for transmit ring registers.
- * (Ring configuration, kick register, event mask, control and status,
- *  mailbox, prefetch, ring errors).
- */
-#define	NXGE_TXDMA_OFFSET(x, v, channel) (x + \
-		(!v ? DMC_OFFSET(channel) : TDMC_PIOVADDR_OFFSET(channel)))
-/*
- * Register offset (0x8 bytes for each port) for transmit mapping registers.
- */
-#define	NXGE_TXDMA_MAP_OFFSET(x, port) (x + TX_DMA_MAP_PORT_OFFSET(port))
-
-/*
- * Register offset (0x10 bytes for each channel) for transmit DRR and ring
- * usage registers.
- */
-#define	NXGE_TXDMA_DRR_OFFSET(x, channel) (x + \
-			TXDMA_DRR_RNG_USE_OFFSET(channel))
-
-/*
- * PIO macros to read and write the transmit registers.
- */
-#define	TX_LOG_REG_READ64(handle, reg, channel, val_p)	\
-	NXGE_REG_RD64(handle, NXGE_TXLOG_OFFSET(reg, channel), val_p)
-
-#define	TX_LOG_REG_WRITE64(handle, reg, channel, data)	\
-	NXGE_REG_WR64(handle, NXGE_TXLOG_OFFSET(reg, channel), data)
-
-#define	TXDMA_REG_READ64(handle, reg, channel, val_p)	\
-		NXGE_REG_RD64(handle, \
-		(NXGE_TXDMA_OFFSET(reg, handle.is_vraddr, channel)), val_p)
-
-#define	TXDMA_REG_WRITE64(handle, reg, channel, data)	\
-		NXGE_REG_WR64(handle, \
-		NXGE_TXDMA_OFFSET(reg, handle.is_vraddr, channel), data)
-
-#define	TX_DRR_RNGUSE_REG_READ64(handle, reg, channel, val_p)	\
-	NXGE_REG_RD64(handle, (NXGE_TXDMA_DRR_OFFSET(reg, channel)), val_p)
-
-#define	TX_DRR_RNGUSE_REG_WRITE64(handle, reg, channel, data)	\
-	NXGE_REG_WR64(handle, NXGE_TXDMA_DRR_OFFSET(reg, channel), data)
-
-/*
- * Transmit Descriptor Definitions.
- */
-#define	TXDMA_DESC_SIZE			(sizeof (tx_desc_t))
-
-#define	NPI_TXDMA_GATHER_INDEX(index)	\
-	((index <= TX_MAX_GATHER_POINTERS)) ? NPI_SUCCESS : \
-				(NPI_TXDMA_GATHER_INVALID)
-
-/*
- * Transmit NPI error codes
- */
-#define	TXDMA_ER_ST			(TXDMA_BLK_ID << NPI_BLOCK_ID_SHIFT)
-#define	TXDMA_ID_SHIFT(n)		(n << NPI_PORT_CHAN_SHIFT)
-
-#define	TXDMA_HW_STOP_FAILED		(NPI_BK_HW_ER_START | 0x1)
-#define	TXDMA_HW_RESUME_FAILED		(NPI_BK_HW_ER_START | 0x2)
-
-#define	TXDMA_GATHER_INVALID		(NPI_BK_ERROR_START | 0x1)
-#define	TXDMA_XFER_LEN_INVALID		(NPI_BK_ERROR_START | 0x2)
-
-#define	NPI_TXDMA_OPCODE_INVALID(n)	(TXDMA_ID_SHIFT(n) |	\
-					TXDMA_ER_ST | OPCODE_INVALID)
-
-#define	NPI_TXDMA_FUNC_INVALID(n)	(TXDMA_ID_SHIFT(n) |	\
-					TXDMA_ER_ST | PORT_INVALID)
-#define	NPI_TXDMA_CHANNEL_INVALID(n)	(TXDMA_ID_SHIFT(n) |	\
-					TXDMA_ER_ST | CHANNEL_INVALID)
-
-#define	NPI_TXDMA_PAGE_INVALID(n)	(TXDMA_ID_SHIFT(n) |	\
-					TXDMA_ER_ST | LOGICAL_PAGE_INVALID)
-
-#define	NPI_TXDMA_REGISTER_INVALID	(TXDMA_ER_ST | REGISTER_INVALID)
-#define	NPI_TXDMA_COUNTER_INVALID	(TXDMA_ER_ST | COUNTER_INVALID)
-#define	NPI_TXDMA_CONFIG_INVALID	(TXDMA_ER_ST | CONFIG_INVALID)
-
-
-#define	NPI_TXDMA_GATHER_INVALID	(TXDMA_ER_ST | TXDMA_GATHER_INVALID)
-#define	NPI_TXDMA_XFER_LEN_INVALID	(TXDMA_ER_ST | TXDMA_XFER_LEN_INVALID)
-
-#define	NPI_TXDMA_RESET_FAILED		(TXDMA_ER_ST | RESET_FAILED)
-#define	NPI_TXDMA_STOP_FAILED		(TXDMA_ER_ST | TXDMA_HW_STOP_FAILED)
-#define	NPI_TXDMA_RESUME_FAILED		(TXDMA_ER_ST | TXDMA_HW_RESUME_FAILED)
-
-/*
- * Transmit DMA Channel NPI Prototypes.
- */
-npi_status_t npi_txdma_mode32_set(npi_handle_t, boolean_t);
-npi_status_t npi_txdma_log_page_set(npi_handle_t, uint8_t,
-		p_dma_log_page_t);
-npi_status_t npi_txdma_log_page_get(npi_handle_t, uint8_t,
-		p_dma_log_page_t);
-npi_status_t npi_txdma_log_page_handle_set(npi_handle_t, uint8_t,
-		p_log_page_hdl_t);
-npi_status_t npi_txdma_log_page_config(npi_handle_t, io_op_t,
-		txdma_log_cfg_t, uint8_t, p_dma_log_page_t);
-npi_status_t npi_txdma_log_page_vld_config(npi_handle_t, io_op_t,
-		uint8_t, p_log_page_vld_t);
-npi_status_t npi_txdma_drr_weight_set(npi_handle_t, uint8_t,
-		uint32_t);
-npi_status_t npi_txdma_channel_reset(npi_handle_t, uint8_t);
-npi_status_t npi_txdma_channel_init_enable(npi_handle_t,
-		uint8_t);
-npi_status_t npi_txdma_channel_enable(npi_handle_t, uint8_t);
-npi_status_t npi_txdma_channel_disable(npi_handle_t, uint8_t);
-npi_status_t npi_txdma_channel_resume(npi_handle_t, uint8_t);
-npi_status_t npi_txdma_channel_mmk_clear(npi_handle_t, uint8_t);
-npi_status_t npi_txdma_channel_mbox_enable(npi_handle_t, uint8_t);
-npi_status_t npi_txdma_channel_control(npi_handle_t,
-		txdma_cs_cntl_t, uint8_t);
-npi_status_t npi_txdma_control_status(npi_handle_t, io_op_t,
-		uint8_t, p_tx_cs_t);
-
-npi_status_t npi_txdma_event_mask(npi_handle_t, io_op_t,
-		uint8_t, p_tx_dma_ent_msk_t);
-npi_status_t npi_txdma_event_mask_config(npi_handle_t, io_op_t,
-		uint8_t, txdma_ent_msk_cfg_t *);
-npi_status_t npi_txdma_event_mask_mk_out(npi_handle_t, uint8_t);
-npi_status_t npi_txdma_event_mask_mk_in(npi_handle_t, uint8_t);
-
-npi_status_t npi_txdma_ring_addr_set(npi_handle_t, uint8_t,
-		uint64_t, uint32_t);
-npi_status_t npi_txdma_ring_config(npi_handle_t, io_op_t,
-		uint8_t, uint64_t *);
-npi_status_t npi_txdma_mbox_config(npi_handle_t, io_op_t,
-		uint8_t, uint64_t *);
-npi_status_t npi_txdma_desc_gather_set(npi_handle_t,
-		p_tx_desc_t, uint8_t,
-		boolean_t, uint8_t,
-		uint64_t, uint32_t);
-
-npi_status_t npi_txdma_desc_gather_sop_set(npi_handle_t,
-		p_tx_desc_t, boolean_t, uint8_t);
-
-npi_status_t npi_txdma_desc_gather_sop_set_1(npi_handle_t,
-		p_tx_desc_t, boolean_t, uint8_t,
-		uint32_t);
-
-npi_status_t npi_txdma_desc_set_xfer_len(npi_handle_t,
-		p_tx_desc_t, uint32_t);
-
-npi_status_t npi_txdma_desc_set_zero(npi_handle_t, uint16_t);
-npi_status_t npi_txdma_desc_mem_get(npi_handle_t, uint16_t,
-		p_tx_desc_t);
-npi_status_t npi_txdma_desc_kick_reg_set(npi_handle_t, uint8_t,
-		uint16_t, boolean_t);
-npi_status_t npi_txdma_desc_kick_reg_get(npi_handle_t, uint8_t,
-		p_tx_ring_kick_t);
-npi_status_t npi_txdma_ring_head_get(npi_handle_t, uint8_t,
-		p_tx_ring_hdl_t);
-npi_status_t npi_txdma_channel_mbox_get(npi_handle_t, uint8_t,
-		p_txdma_mailbox_t);
-npi_status_t npi_txdma_channel_pre_state_get(npi_handle_t,
-		uint8_t, p_tx_dma_pre_st_t);
-npi_status_t npi_txdma_ring_error_get(npi_handle_t,
-		uint8_t, p_txdma_ring_errlog_t);
-npi_status_t npi_txdma_inj_par_error_clear(npi_handle_t);
-npi_status_t npi_txdma_inj_par_error_set(npi_handle_t,
-		uint32_t);
-npi_status_t npi_txdma_inj_par_error_update(npi_handle_t,
-		uint32_t);
-npi_status_t npi_txdma_inj_par_error_get(npi_handle_t,
-		uint32_t *);
-npi_status_t npi_txdma_dbg_sel_set(npi_handle_t, uint8_t);
-npi_status_t npi_txdma_training_vector_set(npi_handle_t,
-		uint32_t);
-void npi_txdma_dump_desc_one(npi_handle_t, p_tx_desc_t,
-	int);
-npi_status_t npi_txdma_dump_tdc_regs(npi_handle_t, uint8_t);
-npi_status_t npi_txdma_dump_fzc_regs(npi_handle_t);
-npi_status_t npi_txdma_inj_int_error_set(npi_handle_t, uint8_t,
-	p_tdmc_intr_dbg_t);
-#ifdef	__cplusplus
-}
-#endif
-
-#endif	/* _NPI_TXDMA_H */
--- a/usr/src/uts/sun4v/io/nxge/npi/npi_vir.c	Mon Mar 19 18:02:35 2007 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,1538 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License (the "License").
- * You may not use this file except in compliance with the License.
- *
- * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
- * or http://www.opensolaris.org/os/licensing.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information: Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- */
-/*
- * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
- * Use is subject to license terms.
- */
-
-#pragma ident	"%Z%%M%	%I%	%E% SMI"
-
-#include <npi_vir.h>
-
-/* One register only */
-uint64_t pio_offset[] = {
-	DEV_FUNC_SR_REG
-};
-
-const char *pio_name[] = {
-	"DEV_FUNC_SR_REG",
-};
-
-/* One register only */
-uint64_t fzc_pio_offset[] = {
-	MULTI_PART_CTL_REG,
-	LDGITMRES_REG
-};
-
-const char *fzc_pio_name[] = {
-	"MULTI_PART_CTL_REG",
-	"LDGITMRES_REG"
-};
-
-/* 64 sets */
-uint64_t fzc_pio_dma_bind_offset[] = {
-	DMA_BIND_REG
-};
-
-const char *fzc_pio_dma_bind_name[] = {
-	"DMA_BIND_REG",
-};
-
-/* 69 logical devices */
-uint64_t fzc_pio_ldgnum_offset[] = {
-	LDG_NUM_REG
-};
-
-const char *fzc_pio_ldgnum_name[] = {
-	"LDG_NUM_REG",
-};
-
-/* PIO_LDSV, 64 sets by 8192 bytes */
-uint64_t pio_ldsv_offset[] = {
-	LDSV0_REG,
-	LDSV1_REG,
-	LDSV2_REG,
-	LDGIMGN_REG
-};
-const char *pio_ldsv_name[] = {
-	"LDSV0_REG",
-	"LDSV1_REG",
-	"LDSV2_REG",
-	"LDGIMGN_REG"
-};
-
-/* PIO_IMASK0: 64 by 8192 */
-uint64_t pio_imask0_offset[] = {
-	LD_IM0_REG,
-};
-
-const char *pio_imask0_name[] = {
-	"LD_IM0_REG",
-};
-
-/* PIO_IMASK1: 5 by 8192 */
-uint64_t pio_imask1_offset[] = {
-	LD_IM1_REG
-};
-
-const char *pio_imask1_name[] = {
-	"LD_IM1_REG"
-};
-
-/* SID: 64 by 8 */
-uint64_t fzc_pio_sid_offset[] = {
-	SID_REG
-};
-
-const char *fzc_pio_sid_name[] = {
-	"SID_REG"
-};
-
-npi_status_t
-npi_vir_dump_pio_fzc_regs_one(npi_handle_t handle)
-{
-	uint64_t value;
-	int num_regs, i;
-
-	NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
-		"\nPIO FZC Common Register Dump\n"));
-
-	num_regs = sizeof (pio_offset) / sizeof (uint64_t);
-	for (i = 0; i < num_regs; i++) {
-		value = 0;
-		NXGE_REG_RD64(handle, pio_offset[i], &value);
-		NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL, "0x%08llx "
-			"%s\t 0x%08llx \n",
-			pio_offset[i],
-			pio_name[i], value));
-	}
-
-	num_regs = sizeof (fzc_pio_offset) / sizeof (uint64_t);
-	for (i = 0; i < num_regs; i++) {
-		NXGE_REG_RD64(handle, fzc_pio_offset[i], &value);
-		NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL, "0x%08llx "
-			"%s\t 0x%08llx \n",
-			fzc_pio_offset[i],
-			fzc_pio_name[i], value));
-	}
-
-	NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
-		"\n PIO FZC Register Dump Done \n"));
-	return (NPI_SUCCESS);
-}
-
-npi_status_t
-npi_vir_dump_ldgnum(npi_handle_t handle)
-{
-	uint64_t value = 0, offset = 0;
-	int num_regs, i, ldv;
-
-	NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
-		"\nFZC PIO LDG Number Register Dump\n"));
-
-	num_regs = sizeof (fzc_pio_ldgnum_offset) / sizeof (uint64_t);
-	for (ldv = 0; ldv < NXGE_INT_MAX_LDS; ldv++) {
-		for (i = 0; i < num_regs; i++) {
-			value = 0;
-			offset = fzc_pio_ldgnum_offset[i] + 8 * ldv;
-			NXGE_REG_RD64(handle, offset, &value);
-			NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
-				"Logical Device %d: 0x%08llx "
-				"%s\t 0x%08llx \n",
-				ldv, offset,
-				fzc_pio_ldgnum_name[i], value));
-		}
-	}
-
-	NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
-		"\n FZC PIO LDG Register Dump Done \n"));
-
-	return (NPI_SUCCESS);
-}
-
-npi_status_t
-npi_vir_dump_ldsv(npi_handle_t handle)
-{
-	uint64_t value, offset;
-	int num_regs, i, ldg;
-
-	NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
-		"\nLD Device State Vector Register Dump\n"));
-
-	num_regs = sizeof (pio_ldsv_offset) / sizeof (uint64_t);
-	for (ldg = 0; ldg < NXGE_INT_MAX_LDGS; ldg++) {
-		for (i = 0; i < num_regs; i++) {
-			value = 0;
-			offset = pio_ldsv_offset[i] + 8192 * ldg;
-			NXGE_REG_RD64(handle, offset, &value);
-			NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
-				    "LDG State: group %d: 0x%08llx "
-				    "%s\t 0x%08llx \n",
-				ldg, offset,
-				pio_ldsv_name[i], value));
-		}
-	}
-
-	NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
-		"\n FZC PIO LDG Register Dump Done \n"));
-
-	return (NPI_SUCCESS);
-}
-
-npi_status_t
-npi_vir_dump_imask0(npi_handle_t handle)
-{
-	uint64_t value, offset;
-	int num_regs, i, ldv;
-
-	NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
-		"\nLD Interrupt Mask Register Dump\n"));
-
-	num_regs = sizeof (pio_imask0_offset) / sizeof (uint64_t);
-	for (ldv = 0; ldv < 64; ldv++) {
-		for (i = 0; i < num_regs; i++) {
-			value = 0;
-			offset = pio_imask0_offset[i] + 8192 * ldv;
-			NXGE_REG_RD64(handle, offset,
-				&value);
-			NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
-				"LD Interrupt Mask %d: 0x%08llx "
-				"%s\t 0x%08llx \n",
-				ldv, offset,
-				pio_imask0_name[i], value));
-		}
-	}
-	num_regs = sizeof (pio_imask1_offset) / sizeof (uint64_t);
-	for (ldv = 64; ldv < 69; ldv++) {
-		for (i = 0; i < num_regs; i++) {
-			value = 0;
-			offset = pio_imask1_offset[i] + 8192 * (ldv - 64);
-			NXGE_REG_RD64(handle, offset,
-				&value);
-			NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
-				"LD Interrupt Mask %d: 0x%08llx "
-				"%s\t 0x%08llx \n",
-				ldv, offset,
-				pio_imask1_name[i], value));
-		}
-	}
-
-	NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
-		"\n FZC PIO Logical Device Group Register Dump Done \n"));
-
-	return (NPI_SUCCESS);
-}
-
-npi_status_t
-npi_vir_dump_sid(npi_handle_t handle)
-{
-	uint64_t value, offset;
-	int num_regs, i, ldg;
-
-	NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
-		"\nSystem Interrupt Data Register Dump\n"));
-
-	num_regs = sizeof (fzc_pio_sid_offset) / sizeof (uint64_t);
-	for (ldg = 0; ldg < NXGE_INT_MAX_LDGS; ldg++) {
-		for (i = 0; i < num_regs; i++) {
-			value = 0;
-			offset = fzc_pio_sid_offset[i] + 8 * ldg;
-			NXGE_REG_RD64(handle, offset,
-				&value);
-			NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
-				"SID for group %d: 0x%08llx "
-				"%s\t 0x%08llx \n",
-				ldg, offset,
-				fzc_pio_sid_name[i], value));
-		}
-	}
-
-	NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
-		"\n FZC PIO SID Register Dump Done \n"));
-
-	return (NPI_SUCCESS);
-}
-
-/*
- * npi_dev_func_sr_init():
- *	This function is called to initialize the device function
- *	shared register (set the software implementation lock
- *	state to FREE).
- * Parameters:
- *	handle		- NPI handle
- * Return:
- *	NPI_SUCCESS	- If initialization is complete successfully.
- *			  (set sr bits to free).
- *	Error:
- *	NPI_FAILURE
- *		VIR_TAS_BUSY
- */
-
-npi_status_t
-npi_dev_func_sr_init(npi_handle_t handle)
-{
-	dev_func_sr_t		sr;
-	int			status = NPI_SUCCESS;
-
-	NXGE_REG_RD64(handle, DEV_FUNC_SR_REG, &sr.value);
-	if (!sr.bits.ldw.tas) {
-		/*
-		 * After read, this bit is set to 1 by hardware.
-		 * We own it if tas bit read as 0.
-		 * Set the lock state to free if it is in reset state.
-		 */
-		if (!sr.bits.ldw.sr) {
-			/* reset state */
-			sr.bits.ldw.sr |= NPI_DEV_SR_LOCK_ST_FREE;
-			NXGE_REG_WR64(handle, DEV_FUNC_SR_REG, sr.value);
-			sr.bits.ldw.tas = 0;
-			NXGE_REG_WR64(handle, DEV_FUNC_SR_REG, sr.value);
-		}
-
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-			" npi_dev_func_sr_init"
-			" sr <0x%x>",
-			sr.bits.ldw.sr));
-	} else {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				    " npi_dev_func_sr_init"
-				    " tas busy <0x%x>",
-				    sr.bits.ldw));
-		status = NPI_VIR_TAS_BUSY(sr.bits.ldw.funcid);
-	}
-
-	return (status);
-}
-
-/*
- * npi_dev_func_sr_lock_enter():
- *	This function is called to lock the function shared register
- *	by setting the lock state to busy.
- * Parameters:
- *	handle		- NPI handle
- * Return:
- *	NPI_SUCCESS	- If the function id can own the lock.
- *
- *	Error:
- *	NPI_FAILURE
- *		VIR_SR_RESET
- *		VIR_SR_BUSY
- *		VIR_SR_INVALID
- *		VIR_TAS_BUSY
- */
-
-npi_status_t
-npi_dev_func_sr_lock_enter(npi_handle_t handle)
-{
-	dev_func_sr_t		sr;
-	int			status = NPI_SUCCESS;
-	uint32_t		state;
-
-	NXGE_REG_RD64(handle, DEV_FUNC_SR_REG, &sr.value);
-	if (!sr.bits.ldw.tas) {
-		/*
-		 * tas bit will be set to 1 by hardware.
-		 * reset tas bit when we unlock the sr.
-		 */
-		state = sr.bits.ldw.sr & NPI_DEV_SR_LOCK_ST_MASK;
-		switch (state) {
-		case NPI_DEV_SR_LOCK_ST_FREE:
-			/*
-			 * set it to busy and our function id.
-			 */
-			sr.bits.ldw.sr |= (NPI_DEV_SR_LOCK_ST_BUSY |
-						(sr.bits.ldw.funcid <<
-						NPI_DEV_SR_LOCK_FID_SHIFT));
-			NXGE_REG_WR64(handle, DEV_FUNC_SR_REG, sr.value);
-			break;
-
-		case NPI_DEV_SR_LOCK_ST_RESET:
-			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-					    " npi_dev_func_sr_lock_enter"
-					    " reset state <0x%x>",
-					    sr.bits.ldw.sr));
-			status = NPI_VIR_SR_RESET(sr.bits.ldw.funcid);
-			break;
-
-		case NPI_DEV_SR_LOCK_ST_BUSY:
-			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-					    " npi_dev_func_sr_lock_enter"
-					    " busy <0x%x>",
-					    sr.bits.ldw.sr));
-			status = NPI_VIR_SR_BUSY(sr.bits.ldw.funcid);
-			break;
-
-		default:
-			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-					    " npi_dev_func_sr_lock_enter",
-					    " invalid state",
-					    sr.bits.ldw.sr));
-			status = NPI_VIR_SR_INVALID(sr.bits.ldw.funcid);
-			break;
-		}
-	} else {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				    " npi_dev_func_sr_lock_enter",
-				    " tas busy", sr.bits.ldw));
-		status = NPI_VIR_TAS_BUSY(sr.bits.ldw.funcid);
-	}
-
-	return (status);
-}
-
-/*
- * npi_dev_func_sr_lock_free():
- *	This function is called to free the function shared register
- *	by setting the lock state to free.
- * Parameters:
- *	handle		- NPI handle
- * Return:
- *	NPI_SUCCESS	- If the function id can free the lock.
- *
- *	Error:
- *	NPI_FAILURE
- *		VIR_SR_NOTOWNER
- *		VIR_TAS_NOTREAD
- */
-
-npi_status_t
-npi_dev_func_sr_lock_free(npi_handle_t handle)
-{
-	dev_func_sr_t		sr;
-	int			status = NPI_SUCCESS;
-
-	NXGE_REG_RD64(handle, DEV_FUNC_SR_REG, &sr.value);
-	if (sr.bits.ldw.tas) {
-		if (sr.bits.ldw.funcid == NPI_GET_LOCK_OWNER(sr.bits.ldw.sr)) {
-			sr.bits.ldw.sr &= NPI_DEV_SR_IMPL_ST_MASK;
-			sr.bits.ldw.sr |= NPI_DEV_SR_LOCK_ST_FREE;
-			sr.bits.ldw.tas = 0;
-			NXGE_REG_WR64(handle, DEV_FUNC_SR_REG, sr.value);
-		} else {
-			NPI_DEBUG_MSG((handle.function, NPI_VIR_CTL,
-					    " npi_dev_func_sr_lock_free"
-					    " not owner <0x%x>",
-					    sr.bits.ldw.sr));
-			status = NPI_VIR_SR_NOTOWNER(sr.bits.ldw.funcid);
-		}
-	} else {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				    " npi_dev_func_sr_lock_free",
-				    " invalid tas state <0x%x>",
-				    sr.bits.ldw.tas));
-		status = NPI_VIR_TAS_NOTREAD(sr.bits.ldw.funcid);
-	}
-
-	return (status);
-}
-
-/*
- * npi_dev_func_sr_funcid_get():
- *	This function is called to get the caller's function ID.
- *	(based on address bits [25:26] on read access.
- *	(After read, the TAS bit is always set to 1. Software needs
- *	to write 0 to clear.) This function will write 0 to clear
- *	the TAS bit if we own it.
- * Parameters:
- *	handle		- NPI handle
- *	funcid_p	- pointer to store the function id.
- * Return:
- *	NPI_SUCCESS	- If get function id is complete successfully.
- *
- *	Error:
- */
-
-npi_status_t
-npi_dev_func_sr_funcid_get(npi_handle_t handle, uint8_t *funcid_p)
-{
-	dev_func_sr_t		sr;
-
-	NXGE_REG_RD64(handle, DEV_FUNC_SR_REG, &sr.value);
-	*funcid_p = NXGE_VAL(DEV_FUNC_SR_FUNCID, sr.value);
-	if (!sr.bits.ldw.tas) {
-		/*
-		 * After read, this bit is set to 1 by hardware.
-		 * We own it if tas bit read as 0.
-		 */
-		sr.bits.ldw.tas = 0;
-		NXGE_REG_WR64(handle, DEV_FUNC_SR_REG, sr.value);
-	}
-
-	return (NPI_SUCCESS);
-}
-
-/*
- * npi_dev_func_sr_sr_get():
- *	This function is called to get the shared register value.
- *	(After read, the TAS bit is always set to 1. Software needs
- *	to write 0 to clear if we own it.)
- *
- * Parameters:
- *	handle		- NPI handle
- *	sr_p		- pointer to store the shared value of this register.
- *
- * Return:
- *	NPI_SUCCESS		- If shared value get is complete successfully.
- *
- *	Error:
- */
-npi_status_t
-npi_dev_func_sr_sr_raw_get(npi_handle_t handle, uint16_t *sr_p)
-{
-	dev_func_sr_t		sr;
-
-	NXGE_REG_RD64(handle, DEV_FUNC_SR_REG, &sr.value);
-	*sr_p = NXGE_VAL(DEV_FUNC_SR_FUNCID, sr.value);
-	if (!sr.bits.ldw.tas) {
-		/*
-		 * After read, this bit is set to 1 by hardware.
-		 * We own it if tas bit read as 0.
-		 */
-		sr.bits.ldw.tas = 0;
-		NXGE_REG_WR64(handle, DEV_FUNC_SR_REG, sr.value);
-	}
-
-	return (NPI_SUCCESS);
-}
-
-/*
- * npi_dev_func_sr_sr_get():
- *	This function is called to get the shared register value.
- *	(After read, the TAS bit is always set to 1. Software needs
- *	to write 0 to clear if we own it.)
- *
- * Parameters:
- *	handle	- NPI handle
- *	sr_p	- pointer to store the shared value of this register.
- *		. this will get only non-lock, non-function id portion
- *              . of the register
- *
- *
- * Return:
- *	NPI_SUCCESS		- If shared value get is complete successfully.
- *
- *	Error:
- */
-
-npi_status_t
-npi_dev_func_sr_sr_get(npi_handle_t handle, uint16_t *sr_p)
-{
-	dev_func_sr_t		sr;
-	uint16_t sr_impl = 0;
-
-	NXGE_REG_RD64(handle, DEV_FUNC_SR_REG, &sr.value);
-	sr_impl = NXGE_VAL(DEV_FUNC_SR_FUNCID, sr.value);
-	*sr_p =  (sr_impl << NPI_DEV_SR_IMPL_ST_SHIFT);
-	if (!sr.bits.ldw.tas) {
-		/*
-		 * After read, this bit is set to 1 by hardware.
-		 * We own it if tas bit read as 0.
-		 */
-		sr.bits.ldw.tas = 0;
-		NXGE_REG_WR64(handle, DEV_FUNC_SR_REG, sr.value);
-	}
-
-	return (NPI_SUCCESS);
-}
-
-/*
- * npi_dev_func_sr_sr_get_set_clear():
- *	This function is called to set the shared register value.
- *	(Shared register must be read first. If tas bit is 0, then
- *	it implies that the software can proceed to set). After
- *	setting, tas bit will be cleared.
- * Parameters:
- *	handle		- NPI handle
- *	impl_sr		- shared value to set (only the 8 bit
- *			  implementation specific state info).
- *
- * Return:
- *	NPI_SUCCESS		- If shared value is set successfully.
- *
- *	Error:
- *	NPI_FAILURE
- *		VIR_TAS_BUSY
- */
-
-npi_status_t
-npi_dev_func_sr_sr_get_set_clear(npi_handle_t handle, uint16_t impl_sr)
-{
-	dev_func_sr_t		sr;
-	int			status;
-
-	status = npi_dev_func_sr_lock_enter(handle);
-	if (status != NPI_SUCCESS) {
-		NPI_DEBUG_MSG((handle.function, NPI_VIR_CTL,
-				    " npi_dev_func_sr_src_get_set_clear"
-				    " unable to acquire lock:"
-				    " status <0x%x>", status));
-		return (status);
-	}
-
-	NXGE_REG_RD64(handle, DEV_FUNC_SR_REG, &sr.value);
-	sr.bits.ldw.sr |= (impl_sr << NPI_DEV_SR_IMPL_ST_SHIFT);
-	NXGE_REG_WR64(handle, DEV_FUNC_SR_REG, sr.value);
-
-	return (npi_dev_func_sr_lock_free(handle));
-}
-
-/*
- * npi_dev_func_sr_sr_set_only():
- *	This function is called to only set the shared register value.
- * Parameters:
- *	handle		- NPI handle
- *	impl_sr		- shared value to set.
- *
- * Return:
- *	NPI_SUCCESS		- If shared value is set successfully.
- *
- *	Error:
- *	NPI_FAILURE
- *		VIR_TAS_BUSY
- */
-
-npi_status_t
-npi_dev_func_sr_sr_set_only(npi_handle_t handle, uint16_t impl_sr)
-{
-	int		status = NPI_SUCCESS;
-	dev_func_sr_t	sr;
-
-	NXGE_REG_RD64(handle, DEV_FUNC_SR_REG, &sr.value);
-	/* must be the owner */
-	if (sr.bits.ldw.funcid == NPI_GET_LOCK_OWNER(sr.bits.ldw.sr)) {
-		sr.bits.ldw.sr |= (impl_sr << NPI_DEV_SR_IMPL_ST_SHIFT);
-		NXGE_REG_WR64(handle, DEV_FUNC_SR_REG, sr.value);
-	} else {
-		NPI_DEBUG_MSG((handle.function, NPI_VIR_CTL,
-				    " npi_dev_func_sr_sr_set_only"
-				    " not owner <0x%x>",
-				    sr.bits.ldw.sr));
-		status = NPI_VIR_SR_NOTOWNER(sr.bits.ldw.funcid);
-	}
-
-	return (status);
-}
-
-/*
- * npi_dev_func_sr_busy():
- *	This function is called to see if we can own the device.
- *	It will not reset the tas bit.
- * Parameters:
- *	handle		- NPI handle
- *	busy_p		- pointer to store busy flag.
- *				(B_TRUE: device is in use, B_FALSE: free).
- * Return:
- *	NPI_SUCCESS		- If tas bit is read successfully.
- *	Error:
- */
-
-npi_status_t
-npi_dev_func_sr_busy(npi_handle_t handle, boolean_t *busy_p)
-{
-	dev_func_sr_t	sr;
-
-	NXGE_REG_RD64(handle, DEV_FUNC_SR_REG, &sr.value);
-	if (!sr.bits.ldw.tas) {
-		sr.bits.ldw.tas = 0;
-		NXGE_REG_WR64(handle, DEV_FUNC_SR_REG, sr.value);
-		*busy_p = B_FALSE;
-	} else {
-		/* Other function already owns it */
-		*busy_p = B_TRUE;
-	}
-
-	return (NPI_SUCCESS);
-}
-
-/*
- * npi_dev_func_sr_tas_get():
- *	This function is called to get the tas bit
- *	(after read, this bit is always set to 1, software write 0
- *	 to clear it).
- *
- * Parameters:
- *	handle		- NPI handle
- *	tas_p		- pointer to store the tas value
- *
- * Return:
- *	NPI_SUCCESS		- If tas value get is complete successfully.
- *	Error:
- */
-
-npi_status_t
-npi_dev_func_sr_tas_get(npi_handle_t handle, uint8_t *tas_p)
-{
-	dev_func_sr_t		sr;
-
-	NXGE_REG_RD64(handle, DEV_FUNC_SR_REG, &sr.value);
-	*tas_p = sr.bits.ldw.tas;
-	if (!sr.bits.ldw.tas) {
-		sr.bits.ldw.tas = 0;
-		NXGE_REG_WR64(handle, DEV_FUNC_SR_REG, sr.value);
-
-	}
-
-	return (NPI_SUCCESS);
-}
-
-/*
- * npi_fzc_mpc_set():
- *	This function is called to enable the write access
- *	to FZC region to function zero.
- * Parameters:
- *	handle		- NPI handle
- * Return:
- *	NPI_SUCCESS	-
- *	Error:
- */
-
-npi_status_t
-npi_fzc_mpc_set(npi_handle_t handle, boolean_t mpc)
-{
-	multi_part_ctl_t	mp;
-
-	mp.value = 0;
-	if (mpc) {
-		mp.bits.ldw.mpc = 1;
-	}
-	NXGE_REG_WR64(handle, MULTI_PART_CTL_REG, mp.value);
-
-	return (NPI_SUCCESS);
-}
-
-/*
- * npi_fzc_mpc_get():
- *	This function is called to get the access mode.
- * Parameters:
- *	handle		- NPI handle
- * Return:
- *	NPI_SUCCESS	-
- *
- */
-
-npi_status_t
-npi_fzc_mpc_get(npi_handle_t handle, boolean_t *mpc_p)
-{
-	multi_part_ctl_t	mpc;
-
-	mpc.value = 0;
-	NXGE_REG_RD64(handle, MULTI_PART_CTL_REG, &mpc.value);
-	*mpc_p = mpc.bits.ldw.mpc;
-
-	return (NPI_SUCCESS);
-}
-
-/*
- * npi_fzc_dma_bind_set():
- *	This function is called to set DMA binding register.
- * Parameters:
- *	handle		- NPI handle
- *	dma_bind	- NPI defined data structure that
- *			  contains the tx/rx channel binding info.
- *			  to set.
- * Return:
- *	NPI_SUCCESS	-
- *	Error:
- *	NPI_FAILURE
- *
- */
-
-npi_status_t
-npi_fzc_dma_bind_set(npi_handle_t handle, fzc_dma_bind_t dma_bind)
-{
-	dma_bind_t	bind;
-	int		status;
-	uint8_t		fn, region, id, tn, rn;
-
-	fn = dma_bind.function_id;
-	region = dma_bind.sub_vir_region;
-	id = dma_bind.vir_index;
-	tn = dma_bind.tx_channel;
-	rn = dma_bind.rx_channel;
-
-	DMA_BIND_VADDR_VALIDATE(fn, region, id, status);
-	if (status) {
-		return (status);
-	}
-
-	if (dma_bind.tx_bind) {
-		DMA_BIND_TX_VALIDATE(tn, status);
-		if (status) {
-			return (status);
-		}
-	}
-
-	if (dma_bind.rx_bind) {
-		DMA_BIND_RX_VALIDATE(rn, status);
-		if (status) {
-			return (status);
-		}
-	}
-
-	bind.value = 0;
-	if (dma_bind.tx_bind) {
-		bind.bits.ldw.tx_bind = 1;
-		bind.bits.ldw.tx = tn;
-	}
-	if (dma_bind.rx_bind) {
-		bind.bits.ldw.rx_bind = 1;
-		bind.bits.ldw.rx = rn;
-	}
-
-	NXGE_REG_WR64(handle, DMA_BIND_REG +
-		DMA_BIND_REG_OFFSET(fn, rn, id), bind.value);
-
-	return (status);
-}
-
-/*
- * npi_fzc_ldg_num_set():
- *	This function is called to set up a logical group number that
- *	a logical device belongs to.
- * Parameters:
- *	handle		- NPI handle
- *	ld		- logical device number (0 - 68)
- *	ldg		- logical device group number (0 - 63)
- * Return:
- *	NPI_SUCCESS	-
- *	Error:
- *	NPI_FAILURE
- *
- */
-
-npi_status_t
-npi_fzc_ldg_num_set(npi_handle_t handle, uint8_t ld, uint8_t ldg)
-{
-	ldg_num_t	gnum;
-
-	ASSERT(LD_VALID(ld));
-	if (!LD_VALID(ld)) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				    " npi_fzc_ldg_num_set"
-				    "ld <0x%x>", ld));
-		return (NPI_FAILURE | NPI_VIR_LD_INVALID(ld));
-	}
-
-	ASSERT(LDG_VALID(ldg));
-	if (!LDG_VALID(ldg)) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				    " npi_fzc_ldg_num_set"
-				    " ldg <0x%x>", ldg));
-		return (NPI_FAILURE | NPI_VIR_LDG_INVALID(ld));
-	}
-
-	gnum.value = 0;
-	gnum.bits.ldw.num = ldg;
-
-	NXGE_REG_WR64(handle, LDG_NUM_REG + LD_NUM_OFFSET(ld),
-		gnum.value);
-
-	return (NPI_SUCCESS);
-}
-
-/*
- * npi_fzc_ldg_num_get():
- *	This function is called to get the logical device group that
- *	a logical device belongs to.
- * Parameters:
- *	handle		- NPI handle
- *	ld		- logical device number (0 - 68)
- *	*ldg_p		- pointer to store its group number.
- * Return:
- *	NPI_SUCCESS	-
- *	Error:
- *	NPI_FAILURE
- */
-
-npi_status_t
-npi_fzc_ldg_num_get(npi_handle_t handle, uint8_t ld, uint8_t *ldg_p)
-{
-	uint64_t val;
-
-	ASSERT(LD_VALID(ld));
-	if (!LD_VALID(ld)) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				    " npi_fzc_ldg_num_get"
-				    " Invalid Input:",
-				    " ld <0x%x>", ld));
-		return (NPI_FAILURE | NPI_VIR_LD_INVALID(ld));
-	}
-
-	NXGE_REG_RD64(handle, LDG_NUM_REG + LD_NUM_OFFSET(ld), &val);
-
-	*ldg_p = (uint8_t)(val & LDG_NUM_NUM_MASK);
-
-	return (NPI_SUCCESS);
-}
-
-/*
- * npi_ldsv_ldfs_get():
- *	This function is called to get device state vectors.
- * Parameters:
- *	handle		- NPI handle
- *	ldg		- logical device group (0 - 63)
- *	*ldf_p		- pointer to store ldf0 and ldf1 flag bits.
- * Return:
- *	NPI_SUCCESS	-
- *	Error:
- *	NPI_FAILURE
- */
-
-npi_status_t
-npi_ldsv_ldfs_get(npi_handle_t handle, uint8_t ldg, uint64_t *vector0_p,
-	uint64_t *vector1_p, uint64_t *vector2_p)
-{
-	int	status;
-
-	if ((status = npi_ldsv_get(handle, ldg, VECTOR0, vector0_p))) {
-		return (status);
-	}
-	if ((status = npi_ldsv_get(handle, ldg, VECTOR1, vector1_p))) {
-		return (status);
-	}
-	if ((status = npi_ldsv_get(handle, ldg, VECTOR2, vector2_p))) {
-		return (status);
-	}
-
-	return (NPI_SUCCESS);
-}
-
-/*
- * npi_ldsv_get():
- *	This function is called to get device state vectors.
- * Parameters:
- *	handle		- NPI handle
- *	ldg		- logical device group (0 - 63)
- *	ldf_type	- either LDF0 (0) or LDF1 (1)
- *	vector		- vector type (0, 1 or 2)
- *	*ldf_p		- pointer to store its flag bits.
- * Return:
- *	NPI_SUCCESS	-
- *	Error:
- *	NPI_FAILURE
- */
-
-npi_status_t
-npi_ldsv_get(npi_handle_t handle, uint8_t ldg, ldsv_type_t vector,
-	uint64_t *ldf_p)
-{
-	uint64_t		offset;
-
-	ASSERT(LDG_VALID(ldg));
-	if (!LDG_VALID(ldg)) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				    " npi_ldsv_get"
-				    " Invalid Input "
-				    " ldg <0x%x>", ldg));
-		return (NPI_FAILURE | NPI_VIR_LDG_INVALID(ldg));
-	}
-
-	switch (vector) {
-	case VECTOR0:
-		offset = LDSV0_REG + LDSV_OFFSET(ldg);
-		break;
-
-	case VECTOR1:
-		offset = LDSV1_REG + LDSV_OFFSET(ldg);
-		break;
-
-	case VECTOR2:
-		offset = LDSV2_REG + LDSV_OFFSET(ldg);
-		break;
-
-	default:
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				    " npi_ldsv_get"
-				    " Invalid Input: "
-				    " ldsv type <0x%x>", vector));
-		return (NPI_FAILURE | NPI_VIR_LDSV_INVALID(vector));
-	}
-
-	NXGE_REG_RD64(handle, offset, ldf_p);
-
-	return (NPI_SUCCESS);
-}
-
-/*
- * npi_ldsv_ld_get():
- *	This function is called to get the flag bit value of a device.
- * Parameters:
- *	handle		- NPI handle
- *	ldg		- logical device group (0 - 63)
- *	ld		- logical device (0 - 68)
- *	ldf_type	- either LDF0 (0) or LDF1 (1)
- *	vector		- vector type (0, 1 or 2)
- *	*ldf_p		- pointer to store its flag bits.
- * Return:
- *	NPI_SUCCESS	-
- *	Error:
- *	NPI_FAILURE
- */
-
-npi_status_t
-npi_ldsv_ld_get(npi_handle_t handle, uint8_t ldg, uint8_t ld,
-	ldsv_type_t vector, ldf_type_t ldf_type, boolean_t *flag_p)
-{
-	uint64_t		sv;
-	uint64_t		offset;
-
-	ASSERT(LDG_VALID(ldg));
-	if (!LDG_VALID(ldg)) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				    " npi_ldsv_ld_get"
-				    " Invalid Input: "
-				    " ldg <0x%x>", ldg));
-		return (NPI_FAILURE | NPI_VIR_LDG_INVALID(ldg));
-	}
-	ASSERT((LD_VALID(ld)) &&	\
-		((vector != VECTOR2) || (ld >= NXGE_MAC_LD_START)));
-	if (!LD_VALID(ld)) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				    " npi_ldsv_ld_get Invalid Input: "
-				    " ld <9x%x>", ld));
-		return (NPI_FAILURE | NPI_VIR_LD_INVALID(ld));
-	} else if (vector == VECTOR2 && ld < NXGE_MAC_LD_START) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				    " npi_ldsv_ld_get Invalid Input:"
-				    " ld-vector2 <0x%x>", ld));
-		return (NPI_FAILURE | NPI_VIR_LD_INVALID(ld));
-	}
-
-	switch (vector) {
-	case VECTOR0:
-		offset = LDSV0_REG + LDSV_OFFSET(ldg);
-		break;
-
-	case VECTOR1:
-		offset = LDSV1_REG + LDSV_OFFSET(ldg);
-		break;
-
-	case VECTOR2:
-		offset = LDSV2_REG + LDSV_OFFSET(ldg);
-
-	default:
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL, "npi_ldsv_get"
-			"ldsv", vector));
-		return (NPI_FAILURE | NPI_VIR_LDSV_INVALID(vector));
-	}
-
-	NXGE_REG_RD64(handle, offset, &sv);
-	if (vector != VECTOR2) {
-		*flag_p = ((sv >> ld) & LDSV_MASK_ALL);
-	} else {
-		if (ldf_type) {
-			*flag_p = (((sv >> LDSV2_LDF1_SHIFT) >>
-				(ld - NXGE_MAC_LD_START)) & LDSV_MASK_ALL);
-		} else {
-			*flag_p = (((sv >> LDSV2_LDF0_SHIFT) >>
-				(ld - NXGE_MAC_LD_START)) & LDSV_MASK_ALL);
-		}
-	}
-
-	return (NPI_SUCCESS);
-}
-
-/*
- * npi_ldsv_ld_ldf0_get():
- *	This function is called to get the ldf0 bit value of a device.
- * Parameters:
- *	handle		- NPI handle
- *	ldg		- logical device group (0 - 63)
- *	ld		- logical device (0 - 68)
- *	*ldf_p		- pointer to store its flag bits.
- * Return:
- *	NPI_SUCCESS	-
- *	Error:
- *	NPI_FAILURE
- */
-
-npi_status_t
-npi_ldsv_ld_ldf0_get(npi_handle_t handle, uint8_t ldg, uint8_t ld,
-	boolean_t *flag_p)
-{
-	ldsv_type_t vector;
-
-	if (ld >= NXGE_MAC_LD_START) {
-		vector = VECTOR2;
-	}
-
-	return (npi_ldsv_ld_get(handle, ldg, ld, vector, LDF0, flag_p));
-}
-
-/*
- * npi_ldsv_ld_ldf1_get():
- *	This function is called to get the ldf1 bit value of a device.
- * Parameters:
- *	handle		- NPI handle
- *	ldg		- logical device group (0 - 63)
- *	ld		- logical device (0 - 68)
- *	*ldf_p		- pointer to store its flag bits.
- * Return:
- *	NPI_SUCCESS	-
- *	Error:
- *	NPI_FAILURE
- */
-
-npi_status_t
-npi_ldsv_ld_ldf1_get(npi_handle_t handle, uint8_t ldg, uint8_t ld,
-		boolean_t *flag_p)
-{
-	ldsv_type_t vector;
-
-	if (ld >= NXGE_MAC_LD_START) {
-		vector = VECTOR2;
-	}
-
-	return (npi_ldsv_ld_get(handle, ldg, ld, vector, LDF1, flag_p));
-}
-
-/*
- * npi_intr_mask_set():
- *	This function is called to select the mask bits for both ldf0 and ldf1.
- * Parameters:
- *	handle		- NPI handle
- *	ld		- logical device (0 - 68)
- *	ldf_mask	- mask value to set (both ldf0 and ldf1).
- * Return:
- *	NPI_SUCCESS	-
- *	Error:
- *	NPI_FAILURE
- */
-
-npi_status_t
-npi_intr_mask_set(npi_handle_t handle, uint8_t ld, uint8_t ldf_mask)
-{
-	uint64_t		offset;
-
-	ASSERT(LD_VALID(ld));
-	if (!LD_VALID(ld)) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-			    " npi_intr_mask_set ld", ld));
-		return (NPI_FAILURE | NPI_VIR_LD_INVALID(ld));
-	}
-
-	ldf_mask &= LD_IM0_MASK;
-	offset = LDSV_OFFSET_MASK(ld);
-
-	NPI_DEBUG_MSG((handle.function, NPI_VIR_CTL,
-		"npi_intr_mask_set: ld %d "
-		" offset 0x%0llx "
-		" mask 0x%x",
-		ld, offset, ldf_mask));
-
-	NXGE_REG_WR64(handle, offset, (uint64_t)ldf_mask);
-
-	return (NPI_SUCCESS);
-}
-
-/*
- * npi_intr_mask_get():
- *	This function is called to get the mask bits.
- * Parameters:
- *	handle		- NPI handle
- *	ld		- logical device (0 - 68)
- *	ldf_mask	- pointer to store mask bits info.
- * Return:
- *	NPI_SUCCESS	-
- *	Error:
- *	NPI_FAILURE
- */
-npi_status_t
-npi_intr_mask_get(npi_handle_t handle, uint8_t ld, uint8_t *ldf_mask_p)
-{
-	uint64_t		offset;
-	uint64_t		val;
-
-	ASSERT(LD_VALID(ld));
-	if (!LD_VALID(ld)) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-			    " npi_intr_mask_get ld", ld));
-		return (NPI_FAILURE | NPI_VIR_LD_INVALID(ld));
-	}
-
-	offset = LDSV_OFFSET_MASK(ld);
-
-	NXGE_REG_RD64(handle, offset, &val);
-
-	*ldf_mask_p = (uint8_t)(val & LD_IM_MASK);
-
-	return (NPI_SUCCESS);
-}
-
-/*
- * npi_intr_ldg_mgmt_set():
- *	This function is called to set interrupt timer and arm bit.
- * Parameters:
- *	handle		- NPI handle
- *	ldg		- logical device group (0 - 63)
- *	arm		- B_TRUE (arm) B_FALSE (disable)
- * Return:
- *	NPI_SUCCESS	-
- *	Error:
- *	NPI_FAILURE
- */
-
-npi_status_t
-npi_intr_ldg_mgmt_set(npi_handle_t handle, uint8_t ldg, boolean_t arm,
-			uint8_t timer)
-{
-	ldgimgm_t		mgm;
-	uint64_t		val;
-
-	ASSERT((LDG_VALID(ldg)) && (LD_INTTIMER_VALID(timer)));
-	if (!LDG_VALID(ldg)) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				    " npi_intr_ldg_mgmt_set"
-				    " Invalid Input: "
-				    " ldg <0x%x>", ldg));
-		return (NPI_FAILURE | NPI_VIR_LDG_INVALID(ldg));
-	}
-	if (!LD_INTTIMER_VALID(timer)) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				    " npi_intr_ldg_mgmt_set Invalid Input"
-				    " timer <0x%x>", timer));
-		return (NPI_FAILURE | NPI_VIR_INTM_TM_INVALID(ldg));
-	}
-
-	if (arm) {
-		mgm.bits.ldw.arm = 1;
-	} else {
-		NXGE_REG_RD64(handle, LDGIMGN_REG + LDSV_OFFSET(ldg), &val);
-		mgm.value = val & LDGIMGM_ARM_MASK;
-	}
-
-	mgm.bits.ldw.timer = timer;
-	NXGE_REG_WR64(handle, LDGIMGN_REG + LDSV_OFFSET(ldg),
-		mgm.value);
-
-	NPI_DEBUG_MSG((handle.function, NPI_VIR_CTL,
-		" npi_intr_ldg_mgmt_set: ldg %d"
-		" reg offset 0x%x",
-		ldg, LDGIMGN_REG + LDSV_OFFSET(ldg)));
-
-	return (NPI_SUCCESS);
-}
-
-/*
- * npi_intr_ldg_mgmt_timer_get():
- *	This function is called to get the timer counter
- * Parameters:
- *	handle		- NPI handle
- *	ldg		- logical device group (0 - 63)
- *	timer_p		- pointer to store the timer counter.
- * Return:
- *	NPI_SUCCESS	-
- *	Error:
- *	NPI_FAILURE
- */
-
-npi_status_t
-npi_intr_ldg_mgmt_timer_get(npi_handle_t handle, uint8_t ldg, uint8_t *timer_p)
-{
-	uint64_t val;
-
-	ASSERT(LDG_VALID(ldg));
-	if (!LDG_VALID(ldg)) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				    " npi_intr_ldg_mgmt_timer_get"
-				    " Invalid Input: ldg <0x%x>", ldg));
-		return (NPI_FAILURE | NPI_VIR_LDG_INVALID(ldg));
-	}
-
-	NXGE_REG_RD64(handle, LDGIMGN_REG + LDSV_OFFSET(ldg), &val);
-
-	*timer_p = (uint8_t)(val & LDGIMGM_TIMER_MASK);
-
-	NPI_DEBUG_MSG((handle.function, NPI_VIR_CTL,
-		" npi_intr_ldg_mgmt_timer_get: ldg %d"
-		" reg offset 0x%x",
-		ldg, LDGIMGN_REG + LDSV_OFFSET(ldg)));
-
-	return (NPI_SUCCESS);
-}
-
-/*
- * npi_intr_ldg_mgmt_arm():
- *	This function is called to arm the group.
- * Parameters:
- *	handle		- NPI handle
- *	ldg		- logical device group (0 - 63)
- * Return:
- *	NPI_SUCCESS	-
- *	Error:
- *	NPI_FAILURE
- */
-
-npi_status_t
-npi_intr_ldg_mgmt_arm(npi_handle_t handle, uint8_t ldg)
-{
-	ldgimgm_t		mgm;
-
-	ASSERT(LDG_VALID(ldg));
-	if (!LDG_VALID(ldg)) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				    " npi_intr_ldg_mgmt_arm"
-				    " Invalid Input: ldg <0x%x>",
-				    ldg));
-		return (NPI_FAILURE | NPI_VIR_LDG_INVALID(ldg));
-	}
-
-	NXGE_REG_RD64(handle, (LDGIMGN_REG + LDSV_OFFSET(ldg)), &mgm.value);
-	mgm.bits.ldw.arm = 1;
-
-	NXGE_REG_WR64(handle, LDGIMGN_REG + LDSV_OFFSET(ldg),
-			mgm.value);
-	NPI_DEBUG_MSG((handle.function, NPI_VIR_CTL,
-		" npi_intr_ldg_mgmt_arm: ldg %d"
-		" reg offset 0x%x",
-		ldg, LDGIMGN_REG + LDSV_OFFSET(ldg)));
-
-	return (NPI_SUCCESS);
-}
-
-/*
- * npi_fzc_ldg_timer_res_set():
- *	This function is called to set the timer resolution.
- * Parameters:
- *	handle		- NPI handle
- *	res		- timer resolution (# of system clocks)
- * Return:
- *	NPI_SUCCESS	-
- *	Error:
- *	NPI_FAILURE
- */
-
-npi_status_t
-npi_fzc_ldg_timer_res_set(npi_handle_t handle, uint32_t res)
-{
-	ASSERT(res <= LDGTITMRES_RES_MASK);
-	if (res > LDGTITMRES_RES_MASK) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				    " npi_fzc_ldg_timer_res_set"
-				    " Invalid Input: res <0x%x>",
-				    res));
-		return (NPI_FAILURE | NPI_VIR_TM_RES_INVALID);
-	}
-
-	NXGE_REG_WR64(handle, LDGITMRES_REG, (res & LDGTITMRES_RES_MASK));
-
-	return (NPI_SUCCESS);
-}
-
-/*
- * npi_fzc_ldg_timer_res_get():
- *	This function is called to get the timer resolution.
- * Parameters:
- *	handle		- NPI handle
- *	res_p		- pointer to store the timer resolution.
- * Return:
- *	NPI_SUCCESS	-
- *	Error:
- *	NPI_FAILURE
- */
-
-npi_status_t
-npi_fzc_ldg_timer_res_get(npi_handle_t handle, uint8_t *res_p)
-{
-	uint64_t val;
-
-	NXGE_REG_RD64(handle, LDGITMRES_REG, &val);
-
-	*res_p = (uint8_t)(val & LDGIMGM_TIMER_MASK);
-
-	return (NPI_SUCCESS);
-}
-
-/*
- * npi_fzc_sid_set():
- *	This function is called to set the system interrupt data.
- * Parameters:
- *	handle		- NPI handle
- *	ldg		- logical group (0 - 63)
- *	sid		- NPI defined data to set
- * Return:
- *	NPI_SUCCESS	-
- *	Error:
- *	NPI_FAILURE
- */
-
-npi_status_t
-npi_fzc_sid_set(npi_handle_t handle, fzc_sid_t sid)
-{
-	sid_t		sd;
-
-	ASSERT(LDG_VALID(sid.ldg));
-	if (!LDG_VALID(sid.ldg)) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				    " npi_fzc_sid_set"
-				    " Invalid Input: ldg <0x%x>",
-				    sid.ldg));
-		return (NPI_FAILURE | NPI_VIR_LDG_INVALID(sid.ldg));
-	}
-	if (!sid.niu) {
-		ASSERT(FUNC_VALID(sid.func));
-		if (!FUNC_VALID(sid.func)) {
-			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-					    " npi_fzc_sid_set"
-					    " Invalid Input: func <0x%x>",
-					    sid.func));
-			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				"invalid FUNC: npi_fzc_sid_set(%d)", sid.func));
-			return (NPI_FAILURE | NPI_VIR_FUNC_INVALID(sid.func));
-		}
-
-		ASSERT(SID_VECTOR_VALID(sid.vector));
-		if (!SID_VECTOR_VALID(sid.vector)) {
-			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-					    " npi_fzc_sid_set"
-					    " Invalid Input: vector <0x%x>",
-					    sid.vector));
-			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				    " invalid VECTOR: npi_fzc_sid_set(%d)",
-				    sid.vector));
-			return (NPI_FAILURE |
-				NPI_VIR_SID_VEC_INVALID(sid.vector));
-		}
-	}
-	sd.value = 0;
-	if (!sid.niu) {
-		sd.bits.ldw.data = ((sid.func << SID_DATA_FUNCNUM_SHIFT) |
-				(sid.vector & SID_DATA_INTNUM_MASK));
-	}
-
-	NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-	    " npi_fzc_sid_set: group %d 0x%llx", sid.ldg, sd.value));
-
-	NXGE_REG_WR64(handle,  SID_REG + LDG_SID_OFFSET(sid.ldg), sd.value);
-
-	return (NPI_SUCCESS);
-}
-
-/*
- * npi_fzc_sid_get():
- *	This function is called to get the system interrupt data.
- * Parameters:
- *	handle		- NPI handle
- *	ldg		- logical group (0 - 63)
- *	sid_p		- NPI defined data to get
- * Return:
- *	NPI_SUCCESS	-
- *	Error:
- *	NPI_FAILURE
- */
-
-npi_status_t
-npi_fzc_sid_get(npi_handle_t handle, p_fzc_sid_t sid_p)
-{
-	sid_t		sd;
-
-	ASSERT(LDG_VALID(sid_p->ldg));
-	if (!LDG_VALID(sid_p->ldg)) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				    " npi_fzc_sid_get"
-				    " Invalid Input: ldg <0x%x>",
-				    sid_p->ldg));
-		return (NPI_FAILURE | NPI_VIR_LDG_INVALID(sid_p->ldg));
-	}
-	NXGE_REG_RD64(handle, (SID_REG + LDG_SID_OFFSET(sid_p->ldg)),
-		&sd.value);
-	if (!sid_p->niu) {
-		sid_p->func = ((sd.bits.ldw.data & SID_DATA_FUNCNUM_MASK) >>
-			SID_DATA_FUNCNUM_SHIFT);
-		sid_p->vector = ((sd.bits.ldw.data & SID_DATA_INTNUM_MASK) >>
-			SID_DATA_INTNUM_SHIFT);
-	} else {
-		sid_p->vector = (sd.value & SID_DATA_MASK);
-	}
-
-	return (NPI_SUCCESS);
-}
-
-/*
- * npi_fzc_sys_err_mask_set():
- *	This function is called to mask/unmask the device error mask bits.
- *
- * Parameters:
- *	handle		- NPI handle
- *	mask		- set bit mapped mask
- * Return:
- *	NPI_SUCCESS	-
- *	Error:
- *	NPI_FAILURE
- */
-
-npi_status_t
-npi_fzc_sys_err_mask_set(npi_handle_t handle, uint64_t mask)
-{
-	NXGE_REG_WR64(handle,  SYS_ERR_MASK_REG, mask);
-	return (NPI_SUCCESS);
-}
-
-/*
- * npi_fzc_sys_err_stat_get():
- *	This function is called to get the system error stats.
- *
- * Parameters:
- *	handle		- NPI handle
- *	err_stat	- sys_err_stat structure to hold stats.
- * Return:
- *	NPI_SUCCESS	-
- *	Error:
- *	NPI_FAILURE
- */
-
-npi_status_t
-npi_fzc_sys_err_stat_get(npi_handle_t handle, p_sys_err_stat_t statp)
-{
-	NXGE_REG_RD64(handle,  SYS_ERR_STAT_REG, &statp->value);
-	return (NPI_SUCCESS);
-}
-
-npi_status_t
-npi_fzc_rst_ctl_get(npi_handle_t handle, p_rst_ctl_t rstp)
-{
-	NXGE_REG_RD64(handle, RST_CTL_REG, &rstp->value);
-
-	return (NPI_SUCCESS);
-}
-
-/*
- * npi_fzc_mpc_get():
- *	This function is called to get the access mode.
- * Parameters:
- *	handle		- NPI handle
- * Return:
- *	NPI_SUCCESS	-
- *
- */
-
-npi_status_t
-npi_fzc_rst_ctl_reset_mac(npi_handle_t handle, uint8_t port)
-{
-	rst_ctl_t 		rst;
-
-	rst.value = 0;
-	NXGE_REG_RD64(handle, RST_CTL_REG, &rst.value);
-	rst.value |= (1 << (RST_CTL_MAC_RST0_SHIFT + port));
-	NXGE_REG_WR64(handle, RST_CTL_REG, rst.value);
-
-	return (NPI_SUCCESS);
-}
--- a/usr/src/uts/sun4v/io/nxge/npi/npi_vir.h	Mon Mar 19 18:02:35 2007 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,690 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License (the "License").
- * You may not use this file except in compliance with the License.
- *
- * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
- * or http://www.opensolaris.org/os/licensing.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information: Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- */
-/*
- * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
- * Use is subject to license terms.
- */
-
-#ifndef _NPI_VIR_H
-#define	_NPI_VIR_H
-
-#pragma ident	"%Z%%M%	%I%	%E% SMI"
-
-#ifdef	__cplusplus
-extern "C" {
-#endif
-
-#include <npi.h>
-#include <nxge_hw.h>
-
-/*
- * Virtualization and Logical devices NPI error codes
- */
-#define	FUNCID_INVALID		PORT_INVALID
-#define	VIR_ERR_ST		(VIR_BLK_ID << NPI_BLOCK_ID_SHIFT)
-#define	VIR_ID_SHIFT(n)		(n << NPI_PORT_CHAN_SHIFT)
-
-#define	VIR_HW_BUSY		(NPI_BK_HW_ERROR_START | 0x1)
-
-#define	VIR_TAS_BUSY		(NPI_BK_ERROR_START | 0x1)
-#define	VIR_TAS_NOTREAD	(NPI_BK_ERROR_START | 0x2)
-
-#define	VIR_SR_RESET		(NPI_BK_ERROR_START | 0x3)
-#define	VIR_SR_FREE		(NPI_BK_ERROR_START | 0x4)
-#define	VIR_SR_BUSY		(NPI_BK_ERROR_START | 0x5)
-#define	VIR_SR_INVALID		(NPI_BK_ERROR_START | 0x6)
-#define	VIR_SR_NOTOWNER	(NPI_BK_ERROR_START | 0x7)
-#define	VIR_SR_INITIALIZED	(NPI_BK_ERROR_START | 0x8)
-
-#define	VIR_MPC_DENY		(NPI_BK_ERROR_START | 0x10)
-
-#define	VIR_BD_FUNC_INVALID	(NPI_BK_ERROR_START | 0x20)
-#define	VIR_BD_REG_INVALID	(NPI_BK_ERROR_START | 0x21)
-#define	VIR_BD_ID_INVALID	(NPI_BK_ERROR_START | 0x22)
-#define	VIR_BD_TXDMA_INVALID	(NPI_BK_ERROR_START | 0x23)
-#define	VIR_BD_RXDMA_INVALID	(NPI_BK_ERROR_START | 0x24)
-
-#define	VIR_LD_INVALID		(NPI_BK_ERROR_START | 0x30)
-#define	VIR_LDG_INVALID		(NPI_BK_ERROR_START | 0x31)
-#define	VIR_LDSV_INVALID	(NPI_BK_ERROR_START | 0x32)
-
-#define	VIR_INTM_TM_INVALID	(NPI_BK_ERROR_START | 0x33)
-#define	VIR_TM_RES_INVALID	(NPI_BK_ERROR_START | 0x34)
-#define	VIR_SID_VEC_INVALID	(NPI_BK_ERROR_START | 0x35)
-
-#define	NPI_VIR_OCODE_INVALID(n) (VIR_ID_SHIFT(n) | VIR_ERR_ST | OPCODE_INVALID)
-#define	NPI_VIR_FUNC_INVALID(n)	 (VIR_ID_SHIFT(n) | VIR_ERR_ST | FUNCID_INVALID)
-#define	NPI_VIR_CN_INVALID(n)	(VIR_ID_SHIFT(n) | VIR_ERR_ST | CHANNEL_INVALID)
-
-/*
- * Errors codes of shared register functions.
- */
-#define	NPI_VIR_TAS_BUSY(n)	(VIR_ID_SHIFT(n) | VIR_ERR_ST | VIR_TAS_BUSY)
-#define	NPI_VIR_TAS_NOTREAD(n)	(VIR_ID_SHIFT(n) | VIR_ERR_ST | VIR_TAS_NOTREAD)
-#define	NPI_VIR_SR_RESET(n)	(VIR_ID_SHIFT(n) | VIR_ERR_ST | VIR_SR_RESET)
-#define	NPI_VIR_SR_FREE(n)	(VIR_ID_SHIFT(n) | VIR_ERR_ST | VIR_SR_FREE)
-#define	NPI_VIR_SR_BUSY(n)	(VIR_ID_SHIFT(n) | VIR_ERR_ST | VIR_SR_BUSY)
-#define	NPI_VIR_SR_INVALID(n)	(VIR_ID_SHIFT(n) | VIR_ERR_ST | VIR_SR_INVALID)
-#define	NPI_VIR_SR_NOTOWNER(n)	(VIR_ID_SHIFT(n) | VIR_ERR_ST | VIR_SR_NOTOWNER)
-#define	NPI_VIR_SR_INITIALIZED(n) (VIR_ID_SHIFT(n) | \
-					VIR_ERR_ST | VIR_SR_INITIALIZED)
-
-/*
- * Error codes of muti-partition control register functions.
- */
-#define	NPI_VIR_MPC_DENY	(VIR_ERR_ST | VIR_MPU_DENY)
-
-/*
- * Error codes of DMA binding functions.
- */
-#define	NPI_VIR_BD_FUNC_INVALID(n)	(VIR_ID_SHIFT(n) | \
-					VIR_ERR_ST | VIR_BD_FUNC_INVALID)
-#define	NPI_VIR_BD_REG_INVALID(n)	(VIR_ID_SHIFT(n) | \
-					VIR_ERR_ST | VIR_BD_REG_INVALID)
-#define	NPI_VIR_BD_ID_INVALID(n)	(VIR_ID_SHIFT(n) | \
-					VIR_ERR_ST | VIR_BD_ID_INVALID)
-#define	NPI_VIR_BD_TXDMA_INVALID(n)	(VIR_ID_SHIFT(n) | \
-					VIR_ERR_ST | VIR_BD_TXDMA_INVALID)
-#define	NPI_VIR_BD_RXDMA_INVALID(n)	(VIR_ID_SHIFT(n) | \
-					VIR_ERR_ST | VIR_BD_RXDMA_INVALID)
-
-/*
- * Error codes of logical devices and groups functions.
- */
-#define	NPI_VIR_LD_INVALID(n) 	(VIR_ID_SHIFT(n) | VIR_ERR_ST | VIR_LD_INVALID)
-#define	NPI_VIR_LDG_INVALID(n)	(VIR_ID_SHIFT(n) | VIR_ERR_ST | VIR_LDG_INVALID)
-#define	NPI_VIR_LDSV_INVALID(n) (VIR_ID_SHIFT(n) | \
-					VIR_ERR_ST | VIR_LDSV_INVALID)
-#define	NPI_VIR_INTM_TM_INVALID(n)	(VIR_ID_SHIFT(n) | \
-					VIR_ERR_ST | VIR_INTM_TM_INVALID)
-#define	NPI_VIR_TM_RES_INVALID		(VIR_ERR_ST | VIR_TM_RES_INVALID)
-#define	NPI_VIR_SID_VEC_INVALID(n)	(VIR_ID_SHIFT(n) | \
-						VIR_ERR_ST | VIR_TM_RES_INVALID)
-
-/*
- * Bit definition ([15:0] of the shared register
- * used by the driver as locking mechanism.
- *	[1:0]		lock state (RESET, FREE, BUSY)
- *	[3:2]		function ID (owner)
- *	[11:4]		Implementation specific states
- *	[15:12]  	Individual function state
- */
-#define	NPI_DEV_SR_LOCK_ST_RESET	0
-#define	NPI_DEV_SR_LOCK_ST_FREE		1
-#define	NPI_DEV_SR_LOCK_ST_BUSY		2
-
-#define	NPI_DEV_SR_LOCK_ST_SHIFT	0
-#define	NPI_DEV_SR_LOCK_ST_MASK		0x03
-#define	NPI_DEV_SR_LOCK_FID_SHIFT	2
-#define	NPI_DEV_SR_LOCK_FID_MASK	0x0C
-
-#define	NPI_DEV_SR_IMPL_ST_SHIFT	4
-#define	NPI_DEV_SR_IMPL_ST_MASK	0xfff0
-
-#define	NPI_GET_LOCK_OWNER(sr)		((sr & NPI_DEV_SR_LOCK_FID_MASK) \
-						>> NPI_DEV_SR_LOCK_FID_SHIFT)
-#define	NPI_GET_LOCK_ST(sr)		(sr & NPI_DEV_SR_LOCK_ST_MASK)
-#define	NPI_GET_LOCK_IMPL_ST(sr)	((sr & NPI_DEV_SR_IMPL_ST_MASK) \
-						>> NPI_DEV_SR_IMPL_ST_SHIFT)
-
-/*
- * DMA channel binding definitions.
- */
-#define	DMA_BIND_VADDR_VALIDATE(fn, rn, id, status)			\
-{									\
-	status = NPI_SUCCESS;						\
-	if (!TXDMA_FUNC_VALID(fn)) {					\
-		status = (NPI_FAILURE | NPI_VIR_BD_FUNC_INVALID(fn));	\
-	} else if (!SUBREGION_VALID(rn)) {				\
-		status = (NPI_FAILURE | NPI_VIR_BD_REG_INVALID(rn));	\
-	} else if (!VIR_PAGE_INDEX_VALID(id)) {				\
-		status = (NPI_FAILURE | NPI_VIR_BD_ID_INVALID(id));	\
-	}								\
-}
-
-#define	DMA_BIND_TX_VALIDATE(n, status)					\
-{									\
-	status = NPI_SUCCESS;						\
-	if (!TXDMA_CHANNEL_VALID(n)) {					\
-		status = (NPI_FAILURE | NPI_VIR_BD_TXDMA_INVALID(n));	\
-	}								\
-}
-
-#define	DMA_BIND_RX_VALIDATE(n, status)					\
-{									\
-	status = NPI_SUCCESS;						\
-	if (!VRXDMA_CHANNEL_VALID(n)) {					\
-		status = (NPI_FAILURE | NPI_VIR_BD_RXDMA_INVALID(n));	\
-	}								\
-}
-
-#define	DMA_BIND_STEP			8
-#define	DMA_BIND_REG_OFFSET(fn, rn, id)	(DMA_BIND_STEP * \
-					(fn * 2 * VIR_PAGE_INDEX_MAX + \
-					rn * VIR_PAGE_INDEX_MAX) + id)
-
-/*
- * NPI defined data structure to program the DMA binding register.
- */
-typedef struct _fzc_dma_bind {
-	uint8_t		function_id;	/* 0 to 3 */
-	uint8_t		sub_vir_region;	/* 0 or 1 */
-	uint8_t		vir_index;	/* 0 to 7 */
-	boolean_t	tx_bind;	/* set 1 to bind */
-	uint8_t		tx_channel;	/* hardware channel number (0 - 23) */
-	boolean_t	rx_bind;	/* set 1 to bind */
-	uint8_t		rx_channel;	/* hardware channel number (0 - 15) */
-} fzc_dma_bind_t, *p_fzc_dma_bind;
-
-/*
- * Logical device definitions.
- */
-#define	LD_NUM_STEP		8
-#define	LD_NUM_OFFSET(ld)	(ld * LDG_NUM_STEP)
-#define	LDG_NUM_STEP		8
-#define	LDG_NUM_OFFSET(ldg)	(ldg * LDG_NUM_STEP)
-#define	LDGNUM_OFFSET(ldg)	(ldg * LDG_NUM_STEP)
-#define	LDSV_STEP		8192
-#define	LDSVG_OFFSET(ldg)	(ldg * LDSV_STEP)
-#define	LDSV_OFFSET(ldv)	(ldv * LDSV_STEP)
-
-#define	LDSV_OFFSET_MASK(ld)			\
-	(((ld < NXGE_MAC_LD_START) ?		\
-	(LD_IM0_REG + LDSV_OFFSET(ld)) :	\
-	(LD_IM1_REG + LDSV_OFFSET((ld - NXGE_MAC_LD_START))))); \
-
-#define	LDG_SID_STEP		8
-#define	LDG_SID_OFFSET(ldg)	(ldg * LDG_SID_STEP)
-
-typedef enum {
-	LDF0,
-	LDF1
-} ldf_type_t;
-
-typedef enum {
-	VECTOR0,
-	VECTOR1,
-	VECTOR2
-} ldsv_type_t;
-
-/*
- * Definitions for the system interrupt data.
- */
-typedef struct _fzc_sid {
-	boolean_t	niu;
-	uint8_t		ldg;
-	uint8_t		func;
-	uint8_t		vector;
-} fzc_sid_t, *p_fzc_sid_t;
-
-/*
- * Virtualization and Interrupt Prototypes.
- */
-/*
- * npi_dev_func_sr_init():
- *	This function is called to initialize the device function
- *	shared register (set the software implementation lock
- *	state to FREE).
- * Parameters:
- *	handle		- NPI handle
- * Return:
- *	NPI_SUCCESS	- If initialization is complete successfully.
- *			  (set sr bits to free).
- *	Error:
- *	NPI_FAILURE
- *		VIR_TAS_BUSY
- */
-npi_status_t npi_dev_func_sr_init(npi_handle_t);
-
-/*
- * npi_dev_func_sr_lock_enter():
- *	This function is called to lock the function shared register
- *	by setting the lock state to busy.
- * Parameters:
- *	handle		- NPI handle
- * Return:
- *	NPI_SUCCESS	- If the function id can own the lock.
- *
- *	Error:
- *	NPI_FAILURE
- *		VIR_SR_RESET
- *		VIR_SR_BUSY
- *		VIR_SR_INVALID
- *		VIR_TAS_BUSY
- */
-npi_status_t npi_dev_func_sr_lock_enter(npi_handle_t);
-
-/*
- * npi_dev_func_sr_lock_free():
- *	This function is called to free the function shared register
- *	by setting the lock state to free.
- * Parameters:
- *	handle		- NPI handle
- * Return:
- *	NPI_SUCCESS	- If the function id can free the lock.
- *
- *	Error:
- *	NPI_FAILURE
- *		VIR_SR_NOTOWNER
- *		VIR_TAS_NOTREAD
- */
-npi_status_t npi_dev_func_sr_lock_free(npi_handle_t);
-
-/*
- * npi_dev_func_sr_funcid_get():
- *	This function is called to get the caller's function ID.
- *	(based on address bits [25:26] on read access.
- *	(After read, the TAS bit is always set to 1. Software needs
- *	to write 0 to clear.) This function will write 0 to clear
- *	the TAS bit if we own it.
- * Parameters:
- *	handle		- NPI handle
- *	funcid_p	- pointer to store the function id.
- * Return:
- *	NPI_SUCCESS	- If get function id is complete successfully.
- *
- *	Error:
- */
-npi_status_t npi_dev_func_sr_funcid_get(npi_handle_t, uint8_t *);
-
-/*
- * npi_dev_func_sr_sr_raw_get():
- *	This function is called to get the shared register value.
- *	(After read, the TAS bit is always set to 1. Software needs
- *	to write 0 to clear if we own it.)
- *
- * Parameters:
- *	handle		- NPI handle
- *	sr_p		- pointer to store the shared value of this register.
- *
- * Return:
- *	NPI_SUCCESS		- If shared value get is complete successfully.
- *
- *	Error:
- */
-npi_status_t npi_dev_func_sr_sr_raw_get(npi_handle_t, uint16_t *);
-
-/*
- * npi_dev_func_sr_sr_get():
- *	This function is called to get the shared register value.
- *	(After read, the TAS bit is always set to 1. Software needs
- *	to write 0 to clear if we own it.)
- *
- * Parameters:
- *	handle		- NPI handle
- *	sr_p		- pointer to store the shared value of this register.
- *		    . this will get only non-lock, non-function id portion
- *              . of the register
- *
- *
- * Return:
- *	NPI_SUCCESS		- If shared value get is complete successfully.
- *
- *	Error:
- */
-
-npi_status_t npi_dev_func_sr_sr_get(npi_handle_t, uint16_t *);
-
-/*
- * npi_dev_func_sr_sr_get_set_clear():
- *	This function is called to set the shared register value.
- *	(Shared register must be read first. If tas bit is 0, then
- *	it implies that the software can proceed to set). After
- *	setting, tas bit will be cleared.
- * Parameters:
- *	handle		- NPI handle
- *	impl_sr		- shared value to set (only the 8 bit
- *			  implementation specific state info).
- *
- * Return:
- *	NPI_SUCCESS		- If shared value is set successfully.
- *
- *	Error:
- *	NPI_FAILURE
- *		VIR_TAS_BUSY
- */
-npi_status_t npi_dev_func_sr_sr_get_set_clear(npi_handle_t,
-					    uint16_t);
-
-/*
- * npi_dev_func_sr_sr_set_only():
- *	This function is called to only set the shared register value.
- * Parameters:
- *	handle		- NPI handle
- *	impl_sr		- shared value to set.
- *
- * Return:
- *	NPI_SUCCESS		- If shared value is set successfully.
- *
- *	Error:
- *	NPI_FAILURE
- *		VIR_TAS_BUSY
- */
-npi_status_t npi_dev_func_sr_sr_set_only(npi_handle_t, uint16_t);
-
-/*
- * npi_dev_func_sr_busy():
- *	This function is called to see if we can own the device.
- *	It will not reset the tas bit.
- * Parameters:
- *	handle		- NPI handle
- *	busy_p		- pointer to store busy flag.
- *				(B_TRUE: device is in use, B_FALSE: free).
- * Return:
- *	NPI_SUCCESS		- If tas bit is read successfully.
- *	Error:
- */
-npi_status_t npi_dev_func_sr_busy(npi_handle_t, boolean_t *);
-
-/*
- * npi_dev_func_sr_tas_get():
- *	This function is called to get the tas bit
- *	(after read, this bit is always set to 1, software write 0
- *	 to clear it).
- *
- * Parameters:
- *	handle		- NPI handle
- *	tas_p		- pointer to store the tas value
- *
- * Return:
- *	NPI_SUCCESS		- If tas value get is complete successfully.
- *	Error:
- */
-npi_status_t npi_dev_func_sr_tas_get(npi_handle_t, uint8_t *);
-
-/*
- * npi_fzc_mpc_set():
- *	This function is called to enable the write access
- *	to FZC region to function zero.
- * Parameters:
- *	handle		- NPI handle
- * Return:
- *	NPI_SUCCESS	-
- *	Error:
- */
-npi_status_t npi_fzc_mpc_set(npi_handle_t, boolean_t);
-
-/*
- * npi_fzc_mpc_get():
- *	This function is called to get the access mode.
- * Parameters:
- *	handle		- NPI handle
- * Return:
- *	NPI_SUCCESS	-
- *
- */
-npi_status_t npi_fzc_mpc_get(npi_handle_t, boolean_t *);
-
-/*
- * npi_fzc_dma_bind_set():
- *	This function is called to set DMA binding register.
- * Parameters:
- *	handle		- NPI handle
- *	dma_bind	- NPI defined data structure that
- *			  contains the tx/rx channel binding info.
- *			  to set.
- * Return:
- *	NPI_SUCCESS	-
- *	Error:
- *	NPI_FAILURE
- *
- */
-npi_status_t npi_fzc_dma_bind_set(npi_handle_t, fzc_dma_bind_t);
-
-/*
- * npi_fzc_ldg_num_set():
- *	This function is called to set up a logical group number that
- *	a logical device belongs to.
- * Parameters:
- *	handle		- NPI handle
- *	ld		- logical device number (0 - 68)
- *	ldg		- logical device group number (0 - 63)
- * Return:
- *	NPI_SUCCESS	-
- *	Error:
- *	NPI_FAILURE
- *
- */
-npi_status_t npi_fzc_ldg_num_set(npi_handle_t, uint8_t, uint8_t);
-
-/*
- * npi_fzc_ldg_num_get():
- *	This function is called to get the logical device group that
- *	a logical device belongs to.
- * Parameters:
- *	handle		- NPI handle
- *	ld		- logical device number (0 - 68)
- *	*ldg_p		- pointer to store its group number.
- * Return:
- *	NPI_SUCCESS	-
- *	Error:
- *	NPI_FAILURE
- */
-npi_status_t npi_fzc_ldg_num_get(npi_handle_t, uint8_t,
-		uint8_t *);
-
-npi_status_t npi_ldsv_ldfs_get(npi_handle_t, uint8_t,
-		uint64_t *, uint64_t *, uint64_t *);
-/*
- * npi_ldsv_get():
- *	This function is called to get device state vectors.
- * Parameters:
- *	handle		- NPI handle
- *	ldg		- logical device group (0 - 63)
- *	ldf_type	- either LDF0 (0) or LDF1 (1)
- *	vector		- vector type (0, 1 or 2)
- *	*ldf_p		- pointer to store its flag bits.
- * Return:
- *	NPI_SUCCESS	-
- *	Error:
- *	NPI_FAILURE
- */
-npi_status_t npi_ldsv_get(npi_handle_t, uint8_t, ldsv_type_t,
-		uint64_t *);
-
-/*
- * npi_ldsv_ld_get():
- *	This function is called to get the flag bit value of a device.
- * Parameters:
- *	handle		- NPI handle
- *	ldg		- logical device group (0 - 63)
- *	ld		- logical device (0 - 68)
- *	ldf_type	- either LDF0 (0) or LDF1 (1)
- *	vector		- vector type (0, 1 or 2)
- *	*ldf_p		- pointer to store its flag bits.
- * Return:
- *	NPI_SUCCESS	-
- *	Error:
- *	NPI_FAILURE
- */
-npi_status_t npi_ldsv_ld_get(npi_handle_t, uint8_t, uint8_t,
-		ldsv_type_t, ldf_type_t, boolean_t *);
-/*
- * npi_ldsv_ld_ldf0_get():
- *	This function is called to get the ldf0 bit value of a device.
- * Parameters:
- *	handle		- NPI handle
- *	ldg		- logical device group (0 - 63)
- *	ld		- logical device (0 - 68)
- *	*ldf_p		- pointer to store its flag bits.
- * Return:
- *	NPI_SUCCESS	-
- *	Error:
- *	NPI_FAILURE
- */
-npi_status_t npi_ldsv_ld_ldf0_get(npi_handle_t, uint8_t, uint8_t,
-		boolean_t *);
-
-/*
- * npi_ldsv_ld_ldf1_get():
- *	This function is called to get the ldf1 bit value of a device.
- * Parameters:
- *	handle		- NPI handle
- *	ldg		- logical device group (0 - 63)
- *	ld		- logical device (0 - 68)
- *	*ldf_p		- pointer to store its flag bits.
- * Return:
- *	NPI_SUCCESS	-
- *	Error:
- *	NPI_FAILURE
- */
-npi_status_t npi_ldsv_ld_ldf1_get(npi_handle_t, uint8_t, uint8_t,
-		boolean_t *);
-/*
- * npi_intr_mask_set():
- *	This function is called to select the mask bits for both ldf0 and ldf1.
- * Parameters:
- *	handle		- NPI handle
- *	ld		- logical device (0 - 68)
- *	ldf_mask	- mask value to set (both ldf0 and ldf1).
- * Return:
- *	NPI_SUCCESS	-
- *	Error:
- *	NPI_FAILURE
- */
-npi_status_t npi_intr_mask_set(npi_handle_t, uint8_t,
-			uint8_t);
-
-/*
- * npi_intr_mask_get():
- *	This function is called to get the mask bits.
- * Parameters:
- *	handle		- NPI handle
- *	ld		- logical device (0 - 68)
- *	ldf_mask	- pointer to store mask bits info.
- * Return:
- *	NPI_SUCCESS	-
- *	Error:
- *	NPI_FAILURE
- */
-npi_status_t npi_intr_mask_get(npi_handle_t, uint8_t,
-			uint8_t *);
-
-/*
- * npi_intr_ldg_mgmt_set():
- *	This function is called to set interrupt timer and arm bit.
- * Parameters:
- *	handle		- NPI handle
- *	ldg		- logical device group (0 - 63)
- *	arm		- B_TRUE (arm) B_FALSE (disable)
- * Return:
- *	NPI_SUCCESS	-
- *	Error:
- *	NPI_FAILURE
- */
-npi_status_t npi_intr_ldg_mgmt_set(npi_handle_t, uint8_t,
-			boolean_t, uint8_t);
-
-
-/*
- * npi_intr_ldg_mgmt_timer_get():
- *	This function is called to get the timer counter
- * Parameters:
- *	handle		- NPI handle
- *	ldg		- logical device group (0 - 63)
- *	timer_p		- pointer to store the timer counter.
- * Return:
- *	NPI_SUCCESS	-
- *	Error:
- *	NPI_FAILURE
- */
-npi_status_t npi_intr_ldg_mgmt_timer_get(npi_handle_t, uint8_t,
-		uint8_t *);
-
-/*
- * npi_intr_ldg_mgmt_arm():
- *	This function is called to arm the group.
- * Parameters:
- *	handle		- NPI handle
- *	ldg		- logical device group (0 - 63)
- * Return:
- *	NPI_SUCCESS	-
- *	Error:
- *	NPI_FAILURE
- */
-npi_status_t npi_intr_ldg_mgmt_arm(npi_handle_t, uint8_t);
-
-/*
- * npi_fzc_ldg_timer_res_set():
- *	This function is called to set the timer resolution.
- * Parameters:
- *	handle		- NPI handle
- *	res		- timer resolution (# of system clocks)
- * Return:
- *	NPI_SUCCESS	-
- *	Error:
- *	NPI_FAILURE
- */
-npi_status_t npi_fzc_ldg_timer_res_set(npi_handle_t, uint32_t);
-
-/*
- * npi_fzc_ldg_timer_res_get():
- *	This function is called to get the timer resolution.
- * Parameters:
- *	handle		- NPI handle
- *	res_p		- pointer to store the timer resolution.
- * Return:
- *	NPI_SUCCESS	-
- *	Error:
- *	NPI_FAILURE
- */
-npi_status_t npi_fzc_ldg_timer_res_get(npi_handle_t, uint8_t *);
-
-/*
- * npi_fzc_sid_set():
- *	This function is called to set the system interrupt data.
- * Parameters:
- *	handle		- NPI handle
- *	ldg		- logical group (0 - 63)
- *	sid		- NPI defined data to set
- * Return:
- *	NPI_SUCCESS	-
- *	Error:
- *	NPI_FAILURE
- */
-npi_status_t npi_fzc_sid_set(npi_handle_t, fzc_sid_t);
-
-/*
- * npi_fzc_sid_get():
- *	This function is called to get the system interrupt data.
- * Parameters:
- *	handle		- NPI handle
- *	ldg		- logical group (0 - 63)
- *	sid_p		- NPI defined data to get
- * Return:
- *	NPI_SUCCESS	-
- *	Error:
- *	NPI_FAILURE
- */
-npi_status_t npi_fzc_sid_get(npi_handle_t, p_fzc_sid_t);
-npi_status_t npi_fzc_sys_err_mask_set(npi_handle_t, uint64_t);
-npi_status_t npi_fzc_sys_err_stat_get(npi_handle_t,
-						p_sys_err_stat_t);
-npi_status_t npi_vir_dump_pio_fzc_regs_one(npi_handle_t);
-npi_status_t npi_vir_dump_ldgnum(npi_handle_t);
-npi_status_t npi_vir_dump_ldsv(npi_handle_t);
-npi_status_t npi_vir_dump_imask0(npi_handle_t);
-npi_status_t npi_vir_dump_sid(npi_handle_t);
-#ifdef	__cplusplus
-}
-#endif
-
-#endif	/* _NPI_VIR_H */
--- a/usr/src/uts/sun4v/io/nxge/npi/npi_zcp.c	Mon Mar 19 18:02:35 2007 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,757 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License (the "License").
- * You may not use this file except in compliance with the License.
- *
- * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
- * or http://www.opensolaris.org/os/licensing.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information: Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- */
-/*
- * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
- * Use is subject to license terms.
- */
-
-#pragma ident	"%Z%%M%	%I%	%E% SMI"
-
-#include <npi_zcp.h>
-
-static int zcp_mem_read(npi_handle_t, uint16_t, uint8_t,
-	uint16_t, zcp_ram_unit_t *);
-static int zcp_mem_write(npi_handle_t, uint16_t, uint8_t,
-	uint32_t, uint16_t, zcp_ram_unit_t *);
-
-npi_status_t
-npi_zcp_config(npi_handle_t handle, config_op_t op, zcp_config_t config)
-{
-	uint64_t val = 0;
-
-	switch (op) {
-	case ENABLE:
-	case DISABLE:
-		if ((config == 0) || (config & ~CFG_ZCP_ALL) != 0) {
-			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-					    " npi_zcp_config"
-					    " Invalid Input: config <0x%x>",
-					    config));
-			return (NPI_FAILURE | NPI_ZCP_CONFIG_INVALID);
-		}
-
-		NXGE_REG_RD64(handle, ZCP_CONFIG_REG, &val);
-		if (op == ENABLE) {
-			if (config & CFG_ZCP)
-				val |= ZC_ENABLE;
-			if (config & CFG_ZCP_ECC_CHK)
-				val &= ~ECC_CHK_DIS;
-			if (config & CFG_ZCP_PAR_CHK)
-				val &= ~PAR_CHK_DIS;
-			if (config & CFG_ZCP_BUF_RESP)
-				val &= ~DIS_BUFF_RN;
-			if (config & CFG_ZCP_BUF_REQ)
-				val &= ~DIS_BUFF_RQ_IF;
-		} else {
-			if (config & CFG_ZCP)
-				val &= ~ZC_ENABLE;
-			if (config & CFG_ZCP_ECC_CHK)
-				val |= ECC_CHK_DIS;
-			if (config & CFG_ZCP_PAR_CHK)
-				val |= PAR_CHK_DIS;
-			if (config & CFG_ZCP_BUF_RESP)
-				val |= DIS_BUFF_RN;
-			if (config & CFG_ZCP_BUF_REQ)
-				val |= DIS_BUFF_RQ_IF;
-		}
-		NXGE_REG_WR64(handle, ZCP_CONFIG_REG, val);
-
-		break;
-	case INIT:
-		NXGE_REG_RD64(handle, ZCP_CONFIG_REG, &val);
-		val &= ((ZCP_DEBUG_SEL_MASK) | (RDMA_TH_MASK));
-		if (config & CFG_ZCP)
-			val |= ZC_ENABLE;
-		else
-			val &= ~ZC_ENABLE;
-		if (config & CFG_ZCP_ECC_CHK)
-			val &= ~ECC_CHK_DIS;
-		else
-			val |= ECC_CHK_DIS;
-		if (config & CFG_ZCP_PAR_CHK)
-			val &= ~PAR_CHK_DIS;
-		else
-			val |= PAR_CHK_DIS;
-		if (config & CFG_ZCP_BUF_RESP)
-			val &= ~DIS_BUFF_RN;
-		else
-			val |= DIS_BUFF_RN;
-		if (config & CFG_ZCP_BUF_REQ)
-			val &= DIS_BUFF_RQ_IF;
-		else
-			val |= DIS_BUFF_RQ_IF;
-		NXGE_REG_WR64(handle, ZCP_CONFIG_REG, val);
-
-		break;
-	default:
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-					    " npi_zcp_config"
-					    " Invalid Input: config <0x%x>",
-					    config));
-		return (NPI_FAILURE | NPI_ZCP_OPCODE_INVALID);
-	}
-
-	return (NPI_SUCCESS);
-}
-
-npi_status_t
-npi_zcp_iconfig(npi_handle_t handle, config_op_t op, zcp_iconfig_t iconfig)
-{
-	uint64_t val = 0;
-
-	switch (op) {
-	case ENABLE:
-	case DISABLE:
-		if ((iconfig == 0) || (iconfig & ~ICFG_ZCP_ALL) != 0) {
-			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-					    " npi_zcp_iconfig"
-					    " Invalid Input: iconfig <0x%x>",
-					    iconfig));
-			return (NPI_FAILURE | NPI_ZCP_CONFIG_INVALID);
-		}
-
-		NXGE_REG_RD64(handle, ZCP_INT_MASK_REG, &val);
-		if (op == ENABLE)
-			val |= iconfig;
-		else
-			val &= ~iconfig;
-		NXGE_REG_WR64(handle, ZCP_INT_MASK_REG, val);
-
-		break;
-
-	case INIT:
-		if ((iconfig & ~ICFG_ZCP_ALL) != 0) {
-			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-					    " npi_zcp_iconfig"
-					    " Invalid Input: iconfig <0x%x>",
-					    iconfig));
-			return (NPI_FAILURE | NPI_ZCP_CONFIG_INVALID);
-		}
-		val = (uint64_t)iconfig;
-		NXGE_REG_WR64(handle, ZCP_INT_MASK_REG, val);
-
-		break;
-	default:
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				    " npi_zcp_iconfig"
-				    " Invalid Input: iconfig <0x%x>",
-				    iconfig));
-		return (NPI_FAILURE | NPI_ZCP_OPCODE_INVALID);
-	}
-
-	return (NPI_SUCCESS);
-}
-
-npi_status_t
-npi_zcp_get_istatus(npi_handle_t handle, zcp_iconfig_t *istatus)
-{
-	uint64_t val;
-
-	NXGE_REG_RD64(handle, ZCP_INT_STAT_REG, &val);
-	*istatus = (uint32_t)val;
-
-	return (NPI_SUCCESS);
-}
-
-npi_status_t
-npi_zcp_clear_istatus(npi_handle_t handle)
-{
-	uint64_t val;
-
-	val = (uint64_t)0xffff;
-	NXGE_REG_WR64(handle, ZCP_INT_STAT_REG, val);
-	return (NPI_SUCCESS);
-}
-
-
-npi_status_t
-npi_zcp_set_dma_thresh(npi_handle_t handle, uint16_t dma_thres)
-{
-	uint64_t val = 0;
-
-	if ((dma_thres & ~RDMA_TH_BITS) != 0) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				    " npi_zcp_set_dma_thresh"
-				    " Invalid Input: dma_thres <0x%x>",
-				    dma_thres));
-		return (NPI_FAILURE | NPI_ZCP_DMA_THRES_INVALID);
-	}
-
-	NXGE_REG_RD64(handle, ZCP_CONFIG_REG, &val);
-
-	val &= ~RDMA_TH_MASK;
-	val |= (dma_thres << RDMA_TH_SHIFT);
-
-	NXGE_REG_WR64(handle, ZCP_CONFIG_REG, val);
-
-	return (NPI_SUCCESS);
-}
-
-npi_status_t
-npi_zcp_set_bam_region(npi_handle_t handle, zcp_buf_region_t region,
-			zcp_bam_region_reg_t *region_attr)
-{
-
-	ASSERT(IS_VALID_BAM_REGION(region));
-	if (!IS_VALID_BAM_REGION(region)) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				    " npi_zcp_set_bam_region"
-				    " Invalid Input: region <0x%x>",
-				    region));
-		return (NPI_FAILURE | ZCP_BAM_REGION_INVALID);
-	}
-
-	switch (region) {
-	case BAM_4BUF:
-		NXGE_REG_WR64(handle, ZCP_BAM4_RE_CTL_REG, region_attr->value);
-		break;
-	case BAM_8BUF:
-		NXGE_REG_WR64(handle, ZCP_BAM8_RE_CTL_REG, region_attr->value);
-		break;
-	case BAM_16BUF:
-		NXGE_REG_WR64(handle, ZCP_BAM16_RE_CTL_REG, region_attr->value);
-		break;
-	case BAM_32BUF:
-		NXGE_REG_WR64(handle, ZCP_BAM32_RE_CTL_REG, region_attr->value);
-		break;
-	}
-
-	return (NPI_SUCCESS);
-}
-
-npi_status_t
-npi_zcp_set_dst_region(npi_handle_t handle, zcp_buf_region_t region,
-				uint16_t row_idx)
-{
-	uint64_t val = 0;
-
-	ASSERT(IS_VALID_BAM_REGION(region));
-	if (!IS_VALID_BAM_REGION(region)) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				    " npi_zcp_set_dst_region"
-				    " Invalid Input: region <0x%x>",
-				    region));
-		return (NPI_FAILURE | NPI_ZCP_BAM_REGION_INVALID);
-	}
-
-	if ((row_idx & ~0x3FF) != 0) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				    " npi_zcp_set_dst_region"
-				    " Invalid Input: row_idx", row_idx));
-		return (NPI_FAILURE | NPI_ZCP_ROW_INDEX_INVALID);
-	}
-
-	val = (uint64_t)row_idx;
-
-	switch (region) {
-	case BAM_4BUF:
-		NXGE_REG_WR64(handle, ZCP_DST4_RE_CTL_REG, val);
-		break;
-	case BAM_8BUF:
-		NXGE_REG_WR64(handle, ZCP_DST8_RE_CTL_REG, val);
-		break;
-	case BAM_16BUF:
-		NXGE_REG_WR64(handle, ZCP_DST16_RE_CTL_REG, val);
-		break;
-	case BAM_32BUF:
-		NXGE_REG_WR64(handle, ZCP_DST32_RE_CTL_REG, val);
-		break;
-	}
-
-	return (NPI_SUCCESS);
-}
-
-npi_status_t
-npi_zcp_tt_static_entry(npi_handle_t handle, io_op_t op, uint16_t flow_id,
-			tte_sflow_attr_mask_t mask, tte_sflow_attr_t *sflow)
-{
-	uint32_t		byte_en = 0;
-	tte_sflow_attr_t	val;
-
-	if ((op != OP_SET) && (op != OP_GET)) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				    " npi_zcp_tt_static_entry"
-				    " Invalid Input: op <0x%x>",
-				    op));
-		return (NPI_FAILURE | NPI_ZCP_OPCODE_INVALID);
-	}
-
-	if ((mask & TTE_SFLOW_ATTR_ALL) == 0) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				    " npi_zcp_tt_static_entry"
-				    " Invalid Input: mask <0x%x>",
-				    mask));
-		return (NPI_FAILURE | NPI_ZCP_SFLOW_ATTR_INVALID);
-	}
-
-	if ((flow_id & ~0x0FFF) != 0) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				    " npi_zcp_tt_static_entry"
-				    " Invalid Input: flow_id<0x%x>",
-				    flow_id));
-		return (NPI_FAILURE | NPI_ZCP_FLOW_ID_INVALID);
-	}
-
-	if (zcp_mem_read(handle, flow_id, ZCP_RAM_SEL_TT_STATIC, NULL,
-			(zcp_ram_unit_t *)&val) != 0) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				    " npi_zcp_tt_static_entry"
-				    " HW Error: ZCP_RAM_ACC <0x%x>",
-				    NULL));
-		return (NPI_FAILURE | NPI_ZCP_MEM_READ_FAILED);
-	}
-
-	if (op == OP_SET) {
-		if (mask & TTE_RDC_TBL_OFF) {
-			val.qw0.bits.ldw.rdc_tbl_offset =
-					sflow->qw0.bits.ldw.rdc_tbl_offset;
-			byte_en |= TTE_RDC_TBL_SFLOW_BITS_EN;
-		}
-		if (mask & TTE_BUF_SIZE) {
-			val.qw0.bits.ldw.buf_size =
-					sflow->qw0.bits.ldw.buf_size;
-			byte_en |= TTE_BUF_SIZE_BITS_EN;
-		}
-		if (mask & TTE_NUM_BUF) {
-			val.qw0.bits.ldw.num_buf = sflow->qw0.bits.ldw.num_buf;
-			byte_en |= TTE_NUM_BUF_BITS_EN;
-		}
-		if (mask & TTE_ULP_END) {
-			val.qw0.bits.ldw.ulp_end = sflow->qw0.bits.ldw.ulp_end;
-			byte_en |=  TTE_ULP_END_BITS_EN;
-		}
-		if (mask & TTE_ULP_END) {
-			val.qw1.bits.ldw.ulp_end = sflow->qw1.bits.ldw.ulp_end;
-			byte_en |= TTE_ULP_END_BITS_EN;
-		}
-		if (mask & TTE_ULP_END_EN) {
-			val.qw1.bits.ldw.ulp_end_en =
-				sflow->qw1.bits.ldw.ulp_end_en;
-			byte_en |= TTE_ULP_END_EN_BITS_EN;
-		}
-		if (mask & TTE_UNMAP_ALL_EN) {
-			val.qw1.bits.ldw.unmap_all_en =
-					sflow->qw1.bits.ldw.unmap_all_en;
-			byte_en |= TTE_UNMAP_ALL_EN;
-		}
-		if (mask & TTE_TMODE) {
-			val.qw1.bits.ldw.tmode = sflow->qw1.bits.ldw.tmode;
-			byte_en |= TTE_TMODE_BITS_EN;
-		}
-		if (mask & TTE_SKIP) {
-			val.qw1.bits.ldw.skip = sflow->qw1.bits.ldw.skip;
-			byte_en |= TTE_SKIP_BITS_EN;
-		}
-		if (mask & TTE_HBM_RING_BASE_ADDR) {
-			val.qw1.bits.ldw.ring_base =
-					sflow->qw1.bits.ldw.ring_base;
-			byte_en |= TTE_RING_BASE_ADDR_BITS_EN;
-		}
-		if (mask & TTE_HBM_RING_BASE_ADDR) {
-			val.qw2.bits.ldw.ring_base =
-					sflow->qw2.bits.ldw.ring_base;
-			byte_en |= TTE_RING_BASE_ADDR_BITS_EN;
-		}
-		if (mask & TTE_HBM_RING_SIZE) {
-			val.qw2.bits.ldw.ring_size =
-					sflow->qw2.bits.ldw.ring_size;
-			byte_en |= TTE_RING_SIZE_BITS_EN;
-		}
-		if (mask & TTE_HBM_BUSY) {
-			val.qw2.bits.ldw.busy = sflow->qw2.bits.ldw.busy;
-			byte_en |= TTE_BUSY_BITS_EN;
-		}
-		if (mask & TTE_HBM_TOQ) {
-			val.qw3.bits.ldw.toq = sflow->qw3.bits.ldw.toq;
-			byte_en |= TTE_TOQ_BITS_EN;
-		}
-
-		if (zcp_mem_write(handle, flow_id, ZCP_RAM_SEL_TT_STATIC,
-					byte_en, NULL,
-					(zcp_ram_unit_t *)&val) != 0) {
-			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-					    " npi_zcp_tt_static_entry"
-					    " HW Error: ZCP_RAM_ACC <0x%x>",
-					    NULL));
-			return (NPI_FAILURE | NPI_ZCP_MEM_WRITE_FAILED);
-		}
-	} else {
-		sflow->qw0.value = val.qw0.value;
-		sflow->qw1.value = val.qw1.value;
-		sflow->qw2.value = val.qw2.value;
-		sflow->qw3.value = val.qw3.value;
-		sflow->qw4.value = val.qw4.value;
-	}
-
-	return (NPI_SUCCESS);
-}
-
-npi_status_t
-npi_zcp_tt_dynamic_entry(npi_handle_t handle, io_op_t op, uint16_t flow_id,
-			tte_dflow_attr_mask_t mask, tte_dflow_attr_t *dflow)
-{
-	uint32_t		byte_en = 0;
-	tte_dflow_attr_t	val;
-
-	if ((op != OP_SET) && (op != OP_GET)) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				    " npi_zcp_tt_dynamic_entry"
-				    " Invalid Input: op <0x%x>", op));
-		return (NPI_FAILURE | NPI_ZCP_OPCODE_INVALID);
-	}
-
-	if ((mask & TTE_DFLOW_ATTR_ALL) == 0) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				    " npi_zcp_tt_dynamic_entry"
-				    " Invalid Input: mask <0x%x>",
-				    mask));
-		return (NPI_FAILURE | NPI_ZCP_DFLOW_ATTR_INVALID);
-	}
-
-	if ((flow_id & ~0x0FFF) != 0) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				    " npi_zcp_tt_dynamic_entry"
-				    " Invalid Input: flow_id <0x%x>",
-				    flow_id));
-		return (NPI_FAILURE | NPI_ZCP_FLOW_ID_INVALID);
-	}
-
-	if (zcp_mem_read(handle, flow_id, ZCP_RAM_SEL_TT_DYNAMIC, NULL,
-			(zcp_ram_unit_t *)&val) != 0) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				    " npi_zcp_tt_dynamic_entry"
-				    " HW Error: ZCP_RAM_ACC <0x%x>",
-				    NULL));
-		return (NPI_FAILURE | NPI_ZCP_MEM_READ_FAILED);
-	}
-
-	if (op == OP_SET) {
-
-		/* Get data read */
-		if (mask & TTE_MAPPED_IN) {
-			val.qw0.bits.ldw.mapped_in =
-					dflow->qw0.bits.ldw.mapped_in;
-			byte_en |= TTE_MAPPED_IN_BITS_EN;
-		}
-		if (mask & TTE_ANCHOR_SEQ) {
-			val.qw1.bits.ldw.anchor_seq =
-					dflow->qw1.bits.ldw.anchor_seq;
-			byte_en |= TTE_ANCHOR_SEQ_BITS_EN;
-		}
-		if (mask & TTE_ANCHOR_OFFSET) {
-			val.qw2.bits.ldw.anchor_offset =
-					dflow->qw2.bits.ldw.anchor_offset;
-			byte_en |= TTE_ANCHOR_OFFSET_BITS_EN;
-		}
-		if (mask & TTE_ANCHOR_BUFFER) {
-			val.qw2.bits.ldw.anchor_buf =
-					dflow->qw2.bits.ldw.anchor_buf;
-			byte_en |= TTE_ANCHOR_BUFFER_BITS_EN;
-		}
-		if (mask & TTE_ANCHOR_BUF_FLAG) {
-			val.qw2.bits.ldw.anchor_buf_flag =
-					dflow->qw2.bits.ldw.anchor_buf_flag;
-			byte_en |= TTE_ANCHOR_BUF_FLAG_BITS_EN;
-		}
-		if (mask & TTE_UNMAP_ON_LEFT) {
-			val.qw2.bits.ldw.unmap_on_left =
-					dflow->qw2.bits.ldw.unmap_on_left;
-			byte_en |= TTE_UNMAP_ON_LEFT_BITS_EN;
-		}
-		if (mask & TTE_ULP_END_REACHED) {
-			val.qw2.bits.ldw.ulp_end_reached =
-					dflow->qw2.bits.ldw.ulp_end_reached;
-			byte_en |= TTE_ULP_END_REACHED_BITS_EN;
-		}
-		if (mask & TTE_ERR_STAT) {
-			val.qw3.bits.ldw.err_stat =
-					dflow->qw3.bits.ldw.err_stat;
-			byte_en |= TTE_ERR_STAT_BITS_EN;
-		}
-		if (mask & TTE_HBM_WR_PTR) {
-			val.qw3.bits.ldw.wr_ptr = dflow->qw3.bits.ldw.wr_ptr;
-			byte_en |= TTE_WR_PTR_BITS_EN;
-		}
-		if (mask & TTE_HBM_HOQ) {
-			val.qw3.bits.ldw.hoq = dflow->qw3.bits.ldw.hoq;
-			byte_en |= TTE_HOQ_BITS_EN;
-		}
-		if (mask & TTE_HBM_PREFETCH_ON) {
-			val.qw3.bits.ldw.prefetch_on =
-					dflow->qw3.bits.ldw.prefetch_on;
-			byte_en |= TTE_PREFETCH_ON_BITS_EN;
-		}
-
-		if (zcp_mem_write(handle, flow_id, ZCP_RAM_SEL_TT_DYNAMIC,
-					byte_en, NULL,
-					(zcp_ram_unit_t *)&val) != 0) {
-			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-					    " npi_zcp_tt_dynamic_entry"
-					    " HW Error: ZCP_RAM_ACC <0x%x>",
-					    NULL));
-			return (NPI_FAILURE | NPI_ZCP_MEM_WRITE_FAILED);
-		}
-	} else {
-		dflow->qw0.value = val.qw0.value;
-		dflow->qw1.value = val.qw1.value;
-		dflow->qw2.value = val.qw2.value;
-		dflow->qw3.value = val.qw3.value;
-		dflow->qw4.value = val.qw4.value;
-	}
-
-	return (NPI_SUCCESS);
-}
-
-npi_status_t
-npi_zcp_tt_bam_entry(npi_handle_t handle, io_op_t op, uint16_t flow_id,
-			uint8_t bankn, uint8_t word_en, zcp_ram_unit_t *data)
-{
-	zcp_ram_unit_t val;
-
-	if ((op != OP_SET) && (op != OP_GET)) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				    " npi_zcp_tt_bam_entry"
-				    " Invalid Input: op <0x%x>", op));
-		return (NPI_FAILURE | NPI_ZCP_OPCODE_INVALID);
-	}
-
-	if ((flow_id & ~0x0FFF) != 0) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				    " npi_zcp_tt_dynamic_entry"
-				    " Invalid Input: flow_id <0x%x>",
-				    flow_id));
-		return (NPI_FAILURE | NPI_ZCP_FLOW_ID_INVALID);
-	}
-
-	if (bankn >= MAX_BAM_BANKS) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				    " npi_zcp_tt_bam_entry"
-				    " Invalid Input: bankn <0x%x>",
-				    bankn));
-		return (NPI_FAILURE | NPI_ZCP_BAM_BANK_INVALID);
-	}
-
-	if ((word_en & ~0xF) != 0) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				    " npi_zcp_tt_bam_entry"
-				    " Invalid Input: word_en <0x%x>",
-				    word_en));
-		return (NPI_FAILURE | NPI_ZCP_BAM_WORD_EN_INVALID);
-	}
-
-	if (zcp_mem_read(handle, flow_id, ZCP_RAM_SEL_BAM0 + bankn, NULL,
-				(zcp_ram_unit_t *)&val) != 0) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				    " npi_zcp_tt_bam_entry"
-				    " HW Error: ZCP_RAM_ACC <0x%x>",
-				    NULL));
-		return (NPI_FAILURE | NPI_ZCP_MEM_READ_FAILED);
-	}
-
-	if (op == OP_SET) {
-		if (zcp_mem_write(handle, flow_id, ZCP_RAM_SEL_BAM0 + bankn,
-					word_en, NULL,
-					(zcp_ram_unit_t *)&val) != 0) {
-			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-					    " npi_zcp_tt_bam_entry"
-					    " HW Error: ZCP_RAM_ACC <0x%x>",
-					    NULL));
-			return (NPI_FAILURE | NPI_ZCP_MEM_WRITE_FAILED);
-		}
-	} else {
-		data->w0 = val.w0;
-		data->w1 = val.w1;
-		data->w2 = val.w2;
-		data->w3 = val.w3;
-	}
-
-	return (NPI_SUCCESS);
-}
-
-npi_status_t
-npi_zcp_tt_cfifo_entry(npi_handle_t handle, io_op_t op, uint8_t portn,
-			uint16_t entryn, zcp_ram_unit_t *data)
-{
-	if ((op != OP_SET) && (op != OP_GET)) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				    " npi_zcp_tt_cfifo_entry"
-				    " Invalid Input: op <0x%x>", op));
-		return (NPI_FAILURE | NPI_ZCP_OPCODE_INVALID);
-	}
-
-	if (portn > 3) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				    " npi_zcp_tt_cfifo_entry"
-				    " Invalid Input: portn <%d>", portn));
-		return (NPI_FAILURE | NPI_ZCP_PORT_INVALID(portn));
-	}
-
-	if (op == OP_SET) {
-		if (zcp_mem_write(handle, NULL, ZCP_RAM_SEL_CFIFO0 + portn,
-					0x1ffff, entryn, data) != 0) {
-			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-					    " npi_zcp_tt_cfifo_entry"
-					    " HW Error: ZCP_RAM_ACC <0x%x>",
-					    NULL));
-			return (NPI_FAILURE | NPI_ZCP_MEM_WRITE_FAILED);
-		}
-	} else {
-		if (zcp_mem_read(handle, NULL, ZCP_RAM_SEL_CFIFO0 + portn,
-					entryn, data) != 0) {
-			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-					    " npi_zcp_tt_cfifo_entry"
-					    " HW Error: ZCP_RAM_ACC  <0x%x>",
-					NULL));
-			return (NPI_FAILURE | NPI_ZCP_MEM_READ_FAILED);
-		}
-	}
-
-	return (NPI_SUCCESS);
-}
-
-npi_status_t
-npi_zcp_rest_cfifo_port(npi_handle_t handle, uint8_t port)
-{
-	uint64_t offset = ZCP_RESET_CFIFO_REG;
-	zcp_reset_cfifo_t cfifo_reg;
-	NXGE_REG_RD64(handle, offset, &cfifo_reg.value);
-	cfifo_reg.value &= ZCP_RESET_CFIFO_MASK;
-
-	switch (port) {
-		case 0:
-			cfifo_reg.bits.ldw.reset_cfifo0 = 1;
-			NXGE_REG_WR64(handle, offset, cfifo_reg.value);
-			cfifo_reg.bits.ldw.reset_cfifo0 = 0;
-
-			break;
-		case 1:
-			cfifo_reg.bits.ldw.reset_cfifo1 = 1;
-			NXGE_REG_WR64(handle, offset, cfifo_reg.value);
-			cfifo_reg.bits.ldw.reset_cfifo1 = 0;
-			break;
-		case 2:
-			cfifo_reg.bits.ldw.reset_cfifo2 = 1;
-			NXGE_REG_WR64(handle, offset, cfifo_reg.value);
-			cfifo_reg.bits.ldw.reset_cfifo2 = 0;
-			break;
-		case 3:
-			cfifo_reg.bits.ldw.reset_cfifo3 = 1;
-			NXGE_REG_WR64(handle, offset, cfifo_reg.value);
-			cfifo_reg.bits.ldw.reset_cfifo3 = 0;
-			break;
-		default:
-			break;
-	}
-
-	NXGE_DELAY(ZCP_CFIFIO_RESET_WAIT);
-	NXGE_REG_WR64(handle, offset, cfifo_reg.value);
-
-	return (NPI_SUCCESS);
-}
-
-npi_status_t
-npi_zcp_rest_cfifo_all(npi_handle_t handle)
-{
-	uint64_t offset = ZCP_RESET_CFIFO_REG;
-	zcp_reset_cfifo_t cfifo_reg;
-
-	cfifo_reg.value = ZCP_RESET_CFIFO_MASK;
-	NXGE_REG_WR64(handle, offset, cfifo_reg.value);
-	cfifo_reg.value = 0;
-	NXGE_DELAY(ZCP_CFIFIO_RESET_WAIT);
-	NXGE_REG_WR64(handle, offset, cfifo_reg.value);
-	return (NPI_SUCCESS);
-}
-
-static int
-zcp_mem_read(npi_handle_t handle, uint16_t flow_id, uint8_t ram_sel,
-		uint16_t cfifo_entryn, zcp_ram_unit_t *val)
-{
-	zcp_ram_access_t ram_ctl;
-
-	ram_ctl.value = 0;
-	ram_ctl.bits.ldw.ram_sel = ram_sel;
-	ram_ctl.bits.ldw.zcfid = flow_id;
-	ram_ctl.bits.ldw.rdwr = ZCP_RAM_RD;
-	ram_ctl.bits.ldw.cfifo = cfifo_entryn;
-
-	/* Wait for RAM ready to be read */
-	ZCP_WAIT_RAM_READY(handle, ram_ctl.value);
-	if (ram_ctl.bits.ldw.busy != 0) {
-		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
-				    " npi_zcp_tt_static_entry"
-				    " HW Error: ZCP_RAM_ACC <0x%x>",
-				    ram_ctl.value));
-		return (-1);
-	}
-
-	/* Read from RAM */
-	NXGE_REG_WR64(handle, ZCP_RAM_ACC_REG, ram_ctl.value);
-
-	/* Wait for RAM read done */
-	ZCP_WAIT_RAM_READY(handle, ram_ctl.value);
-	if (ram_ctl.bits.ldw.busy != 0)
-		return (-1);
-
-	/* Get data */
-	NXGE_REG_RD64(handle, ZCP_RAM_DATA0_REG, &val->w0);
-	NXGE_REG_RD64(handle, ZCP_RAM_DATA1_REG, &val->w1);
-	NXGE_REG_RD64(handle, ZCP_RAM_DATA2_REG, &val->w2);
-	NXGE_REG_RD64(handle, ZCP_RAM_DATA3_REG, &val->w3);
-	NXGE_REG_RD64(handle, ZCP_RAM_DATA4_REG, &val->w4);
-
-	return (0);
-}
-
-static int
-zcp_mem_write(npi_handle_t handle, uint16_t flow_id, uint8_t ram_sel,
-		uint32_t byte_en, uint16_t cfifo_entryn, zcp_ram_unit_t *val)
-{
-	zcp_ram_access_t	ram_ctl;
-	zcp_ram_benable_t	ram_en;
-
-	ram_ctl.value = 0;
-	ram_ctl.bits.ldw.ram_sel = ram_sel;
-	ram_ctl.bits.ldw.zcfid = flow_id;
-	ram_ctl.bits.ldw.rdwr = ZCP_RAM_WR;
-	ram_en.bits.ldw.be = byte_en;
-	ram_ctl.bits.ldw.cfifo = cfifo_entryn;
-
-	/* Setup data */
-	NXGE_REG_WR64(handle, ZCP_RAM_DATA0_REG, val->w0);
-	NXGE_REG_WR64(handle, ZCP_RAM_DATA1_REG, val->w1);
-	NXGE_REG_WR64(handle, ZCP_RAM_DATA2_REG, val->w2);
-	NXGE_REG_WR64(handle, ZCP_RAM_DATA3_REG, val->w3);
-	NXGE_REG_WR64(handle, ZCP_RAM_DATA4_REG, val->w4);
-
-	/* Set byte mask */
-	NXGE_REG_WR64(handle, ZCP_RAM_BE_REG, ram_en.value);
-
-	/* Write to RAM */
-	NXGE_REG_WR64(handle, ZCP_RAM_ACC_REG, ram_ctl.value);
-
-	/* Wait for RAM write complete */
-	ZCP_WAIT_RAM_READY(handle, ram_ctl.value);
-	if (ram_ctl.bits.ldw.busy != 0)
-		return (-1);
-
-	return (0);
-}
--- a/usr/src/uts/sun4v/io/nxge/npi/npi_zcp.h	Mon Mar 19 18:02:35 2007 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,187 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License (the "License").
- * You may not use this file except in compliance with the License.
- *
- * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
- * or http://www.opensolaris.org/os/licensing.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information: Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- */
-/*
- * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
- * Use is subject to license terms.
- */
-
-#ifndef _NPI_ZCP_H
-#define	_NPI_ZCP_H
-
-#pragma ident	"%Z%%M%	%I%	%E% SMI"
-
-#ifdef	__cplusplus
-extern "C" {
-#endif
-
-#include <npi.h>
-#include <nxge_zcp_hw.h>
-
-typedef	enum zcp_buf_region_e {
-	BAM_4BUF			= 1,
-	BAM_8BUF			= 2,
-	BAM_16BUF			= 3,
-	BAM_32BUF			= 4
-} zcp_buf_region_t;
-
-typedef enum zcp_config_e {
-	CFG_ZCP				= 0x01,
-	CFG_ZCP_ECC_CHK			= 0x02,
-	CFG_ZCP_PAR_CHK			= 0x04,
-	CFG_ZCP_BUF_RESP		= 0x08,
-	CFG_ZCP_BUF_REQ			= 0x10,
-	CFG_ZCP_ALL			= 0x1F
-} zcp_config_t;
-
-typedef enum zcp_iconfig_e {
-	ICFG_ZCP_RRFIFO_UNDERRUN	= RRFIFO_UNDERRUN,
-	ICFG_ZCP_RRFIFO_OVERRUN		= RRFIFO_OVERRUN,
-	ICFG_ZCP_RSPFIFO_UNCORR_ERR	= RSPFIFO_UNCORR_ERR,
-	ICFG_ZCP_BUFFER_OVERFLOW	= BUFFER_OVERFLOW,
-	ICFG_ZCP_STAT_TBL_PERR		= STAT_TBL_PERR,
-	ICFG_ZCP_DYN_TBL_PERR		= BUF_DYN_TBL_PERR,
-	ICFG_ZCP_BUF_TBL_PERR		= BUF_TBL_PERR,
-	ICFG_ZCP_TT_PROGRAM_ERR		= TT_PROGRAM_ERR,
-	ICFG_ZCP_RSP_TT_INDEX_ERR	= RSP_TT_INDEX_ERR,
-	ICFG_ZCP_SLV_TT_INDEX_ERR	= SLV_TT_INDEX_ERR,
-	ICFG_ZCP_TT_INDEX_ERR		= ZCP_TT_INDEX_ERR,
-	ICFG_ZCP_CFIFO_ECC3		= CFIFO_ECC3,
-	ICFG_ZCP_CFIFO_ECC2		= CFIFO_ECC2,
-	ICFG_ZCP_CFIFO_ECC1		= CFIFO_ECC1,
-	ICFG_ZCP_CFIFO_ECC0		= CFIFO_ECC0,
-	ICFG_ZCP_ALL			= (RRFIFO_UNDERRUN | RRFIFO_OVERRUN |
-				RSPFIFO_UNCORR_ERR | STAT_TBL_PERR |
-				BUF_DYN_TBL_PERR | BUF_TBL_PERR |
-				TT_PROGRAM_ERR | RSP_TT_INDEX_ERR |
-				SLV_TT_INDEX_ERR | ZCP_TT_INDEX_ERR |
-				CFIFO_ECC3 | CFIFO_ECC2 |  CFIFO_ECC1 |
-				CFIFO_ECC0 | BUFFER_OVERFLOW)
-} zcp_iconfig_t;
-
-typedef enum tte_sflow_attr_mask_e {
-	TTE_RDC_TBL_OFF			= 0x0001,
-	TTE_BUF_SIZE			= 0x0002,
-	TTE_NUM_BUF			= 0x0004,
-	TTE_ULP_END			= 0x0008,
-	TTE_ULP_END_EN			= 0x0010,
-	TTE_UNMAP_ALL_EN		= 0x0020,
-	TTE_TMODE			= 0x0040,
-	TTE_SKIP			= 0x0080,
-	TTE_HBM_RING_BASE_ADDR		= 0x0100,
-	TTE_HBM_RING_SIZE		= 0x0200,
-	TTE_HBM_BUSY			= 0x0400,
-	TTE_HBM_TOQ			= 0x0800,
-	TTE_SFLOW_ATTR_ALL		= 0x0FFF
-} tte_sflow_attr_mask_t;
-
-typedef	enum tte_dflow_attr_mask_e {
-	TTE_MAPPED_IN			= 0x0001,
-	TTE_ANCHOR_SEQ			= 0x0002,
-	TTE_ANCHOR_OFFSET		= 0x0004,
-	TTE_ANCHOR_BUFFER		= 0x0008,
-	TTE_ANCHOR_BUF_FLAG		= 0x0010,
-	TTE_UNMAP_ON_LEFT		= 0x0020,
-	TTE_ULP_END_REACHED		= 0x0040,
-	TTE_ERR_STAT			= 0x0080,
-	TTE_HBM_WR_PTR			= 0x0100,
-	TTE_HBM_HOQ			= 0x0200,
-	TTE_HBM_PREFETCH_ON		= 0x0400,
-	TTE_DFLOW_ATTR_ALL		= 0x07FF
-} tte_dflow_attr_mask_t;
-
-#define	IS_VALID_BAM_REGION(region)\
-		((region == BAM_4BUF) || (region == BAM_8BUF) ||\
-		(region == BAM_16BUF) || (region == BAM_32BUF))
-
-#define	ZCP_WAIT_RAM_READY(handle, val) {\
-	uint32_t cnt = MAX_PIO_RETRIES;\
-	do {\
-		NXGE_REG_RD64(handle, ZCP_RAM_ACC_REG, &val);\
-		cnt--;\
-	} while ((ram_ctl.bits.ldw.busy != 0) && (cnt > 0));\
-}
-
-#define	ZCP_DMA_THRES_INVALID		0x10
-#define	ZCP_BAM_REGION_INVALID		0x11
-#define	ZCP_ROW_INDEX_INVALID		0x12
-#define	ZCP_SFLOW_ATTR_INVALID		0x13
-#define	ZCP_DFLOW_ATTR_INVALID		0x14
-#define	ZCP_FLOW_ID_INVALID		0x15
-#define	ZCP_BAM_BANK_INVALID		0x16
-#define	ZCP_BAM_WORD_EN_INVALID		0x17
-
-#define	NPI_ZCP_OPCODE_INVALID		((ZCP_BLK_ID << 8) | OPCODE_INVALID)
-#define	NPI_ZCP_CONFIG_INVALID		((ZCP_BLK_ID << 8) | CONFIG_INVALID)
-#define	NPI_ZCP_DMA_THRES_INVALID	((ZCP_BLK_ID << 8) |\
-					ZCP_DMA_THRES_INVALID)
-#define	NPI_ZCP_BAM_REGION_INVALID	((ZCP_BLK_ID << 8) |\
-					ZCP_BAM_REGION_INVALID)
-#define	NPI_ZCP_ROW_INDEX_INVALID	((ZCP_BLK_ID << 8) |\
-					ZCP_ROW_INDEX_INVALID)
-#define	NPI_ZCP_SFLOW_ATTR_INVALID	((ZCP_BLK_ID << 8) |\
-					ZCP_SFLOW_ATTR_INVALID)
-#define	NPI_ZCP_DFLOW_ATTR_INVALID	((ZCP_BLK_ID << 8) |\
-					ZCP_DFLOW_ATTR_INVALID)
-#define	NPI_ZCP_FLOW_ID_INVALID		((ZCP_BLK_ID << 8) |\
-					ZCP_FLOW_ID_INVALID)
-#define	NPI_ZCP_MEM_WRITE_FAILED	((ZCP_BLK_ID << 8) | WRITE_FAILED)
-#define	NPI_ZCP_MEM_READ_FAILED		((ZCP_BLK_ID << 8) | READ_FAILED)
-#define	NPI_ZCP_BAM_BANK_INVALID	((ZCP_BLK_ID << 8) |\
-					(ZCP_BAM_BANK_INVALID))
-#define	NPI_ZCP_BAM_WORD_EN_INVALID	((ZCP_BLK_ID << 8) |\
-					(ZCP_BAM_WORD_EN_INVALID))
-#define	NPI_ZCP_PORT_INVALID(portn)	((ZCP_BLK_ID << 8) | PORT_INVALID |\
-					(portn << 12))
-
-/* ZCP HW NPI Prototypes */
-npi_status_t npi_zcp_config(npi_handle_t, config_op_t,
-				zcp_config_t);
-npi_status_t npi_zcp_iconfig(npi_handle_t, config_op_t,
-				zcp_iconfig_t);
-npi_status_t npi_zcp_get_istatus(npi_handle_t, zcp_iconfig_t *);
-npi_status_t npi_zcp_clear_istatus(npi_handle_t);
-npi_status_t npi_zcp_set_dma_thresh(npi_handle_t, uint16_t);
-npi_status_t npi_zcp_set_bam_region(npi_handle_t,
-				zcp_buf_region_t,
-				zcp_bam_region_reg_t *);
-npi_status_t npi_zcp_set_sdt_region(npi_handle_t,
-				zcp_buf_region_t, uint16_t);
-npi_status_t npi_zcp_tt_static_entry(npi_handle_t, io_op_t,
-				uint16_t, tte_sflow_attr_mask_t,
-				tte_sflow_attr_t *);
-npi_status_t npi_zcp_tt_dynamic_entry(npi_handle_t, io_op_t,
-				uint16_t, tte_dflow_attr_mask_t,
-				tte_dflow_attr_t *);
-npi_status_t npi_zcp_tt_bam_entry(npi_handle_t, io_op_t,
-				uint16_t, uint8_t,
-				uint8_t, zcp_ram_unit_t *);
-npi_status_t npi_zcp_tt_cfifo_entry(npi_handle_t, io_op_t,
-				uint8_t, uint16_t,
-				zcp_ram_unit_t *);
-
-npi_status_t npi_zcp_rest_cfifo_port(npi_handle_t, uint8_t);
-npi_status_t npi_zcp_rest_cfifo_all(npi_handle_t);
-
-#ifdef	__cplusplus
-}
-#endif
-
-#endif	/* _NPI_ZCP_H */
--- a/usr/src/uts/sun4v/io/nxge/nxge_classify.c	Mon Mar 19 18:02:35 2007 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,239 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License (the "License").
- * You may not use this file except in compliance with the License.
- *
- * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
- * or http://www.opensolaris.org/os/licensing.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information: Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- */
-/*
- * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
- * Use is subject to license terms.
- */
-
-#pragma ident	"%Z%%M%	%I%	%E% SMI"
-
-#include <npi_fflp.h>
-#include <nxge_defs.h>
-#include <nxge_fflp.h>
-#include <nxge_flow.h>
-#include <nxge_impl.h>
-#include <nxge_common.h>
-
-/*
- * Globals: tunable parameters (/etc/system or adb)
- *
- */
-int nxge_tcam_class_enable = 0;
-int nxge_tcam_lookup_enable = 0;
-int nxge_flow_dist_enable = NXGE_CLASS_FLOW_USE_DST_PORT |
-	NXGE_CLASS_FLOW_USE_SRC_PORT | NXGE_CLASS_FLOW_USE_IPDST |
-	NXGE_CLASS_FLOW_USE_IPSRC | NXGE_CLASS_FLOW_USE_PROTO |
-	NXGE_CLASS_FLOW_USE_PORTNUM;
-
-/*
- * Bit mapped
- * 0x80000000:      Drop
- * 0x0000:      NO TCAM Lookup Needed
- * 0x0001:      TCAM Lookup Needed with Dest Addr (IPv6)
- * 0x0003:      TCAM Lookup Needed with SRC Addr (IPv6)
- * 0x0010:      use MAC Port
- * 0x0020:      use L2DA
- * 0x0040:      use VLAN
- * 0x0080:      use proto
- * 0x0100:      use IP src addr
- * 0x0200:      use IP dest addr
- * 0x0400:      use Src Port
- * 0x0800:      use Dest Port
- * 0x0fff:      enable all options for IPv6 (with src addr)
- * 0x0ffd:      enable all options for IPv6 (with dest addr)
- * 0x0fff:      enable all options for IPv4
- * 0x0ffd:      enable all options for IPv4
- *
- */
-
-/*
- * the default is to distribute as function of:
- * protocol
- * ip src address
- * ip dest address
- * src port
- * dest port
- *
- * 0x0f80
- *
- */
-
-int nxge_tcp4_class = NXGE_CLASS_FLOW_USE_DST_PORT |
-	NXGE_CLASS_FLOW_USE_SRC_PORT | NXGE_CLASS_FLOW_USE_IPDST |
-	NXGE_CLASS_FLOW_USE_IPSRC | NXGE_CLASS_FLOW_USE_PROTO |
-	NXGE_CLASS_FLOW_USE_PORTNUM;
-
-int nxge_udp4_class = NXGE_CLASS_FLOW_USE_DST_PORT |
-	NXGE_CLASS_FLOW_USE_SRC_PORT | NXGE_CLASS_FLOW_USE_IPDST |
-	NXGE_CLASS_FLOW_USE_IPSRC | NXGE_CLASS_FLOW_USE_PROTO |
-	NXGE_CLASS_FLOW_USE_PORTNUM;
-
-int nxge_ah4_class = NXGE_CLASS_FLOW_USE_DST_PORT |
-	NXGE_CLASS_FLOW_USE_SRC_PORT | NXGE_CLASS_FLOW_USE_IPDST |
-	NXGE_CLASS_FLOW_USE_IPSRC | NXGE_CLASS_FLOW_USE_PROTO |
-	NXGE_CLASS_FLOW_USE_PORTNUM;
-int nxge_sctp4_class = NXGE_CLASS_FLOW_USE_DST_PORT |
-	NXGE_CLASS_FLOW_USE_SRC_PORT | NXGE_CLASS_FLOW_USE_IPDST |
-	NXGE_CLASS_FLOW_USE_IPSRC | NXGE_CLASS_FLOW_USE_PROTO |
-	NXGE_CLASS_FLOW_USE_PORTNUM;
-
-int nxge_tcp6_class = NXGE_CLASS_FLOW_USE_DST_PORT |
-	NXGE_CLASS_FLOW_USE_SRC_PORT | NXGE_CLASS_FLOW_USE_IPDST |
-	NXGE_CLASS_FLOW_USE_IPSRC | NXGE_CLASS_FLOW_USE_PROTO |
-	NXGE_CLASS_FLOW_USE_PORTNUM;
-
-int nxge_udp6_class = NXGE_CLASS_FLOW_USE_DST_PORT |
-	NXGE_CLASS_FLOW_USE_SRC_PORT | NXGE_CLASS_FLOW_USE_IPDST |
-	NXGE_CLASS_FLOW_USE_IPSRC | NXGE_CLASS_FLOW_USE_PROTO |
-	NXGE_CLASS_FLOW_USE_PORTNUM;
-
-int nxge_ah6_class = NXGE_CLASS_FLOW_USE_DST_PORT |
-	NXGE_CLASS_FLOW_USE_SRC_PORT | NXGE_CLASS_FLOW_USE_IPDST |
-	NXGE_CLASS_FLOW_USE_IPSRC | NXGE_CLASS_FLOW_USE_PROTO |
-	NXGE_CLASS_FLOW_USE_PORTNUM;
-
-int nxge_sctp6_class = NXGE_CLASS_FLOW_USE_DST_PORT |
-	NXGE_CLASS_FLOW_USE_SRC_PORT | NXGE_CLASS_FLOW_USE_IPDST |
-	NXGE_CLASS_FLOW_USE_IPSRC | NXGE_CLASS_FLOW_USE_PROTO |
-	NXGE_CLASS_FLOW_USE_PORTNUM;
-
-uint32_t nxge_fflp_init_h1 = 0xffffffff;
-uint32_t nxge_fflp_init_h2 = 0xffff;
-
-uint64_t class_quick_config_distribute[NXGE_CLASS_CONFIG_PARAMS] = {
-	0xffffffffULL,		/* h1_init */
-	0xffffULL,		/* h2_init */
-	0x0,			/* cfg_ether_usr1 */
-	0x0,			/* cfg_ether_usr2 */
-	0x0,			/* cfg_ip_usr4 */
-	0x0,			/* cfg_ip_usr5 */
-	0x0,			/* cfg_ip_usr6 */
-	0x0,			/* cfg_ip_usr7 */
-	0x0,			/* opt_ip_usr4 */
-	0x0,			/* opt_ip_usr5 */
-	0x0,			/* opt_ip_usr6 */
-	0x0,			/* opt_ip_usr7 */
-	NXGE_CLASS_FLOW_GEN_SERVER,	/* opt_ipv4_tcp */
-	NXGE_CLASS_FLOW_GEN_SERVER,	/* opt_ipv4_udp */
-	NXGE_CLASS_FLOW_GEN_SERVER,	/* opt_ipv4_ah */
-	NXGE_CLASS_FLOW_GEN_SERVER,	/* opt_ipv4_sctp */
-	NXGE_CLASS_FLOW_GEN_SERVER,	/* opt_ipv6_tcp */
-	NXGE_CLASS_FLOW_GEN_SERVER,	/* opt_ipv6_udp */
-	NXGE_CLASS_FLOW_GEN_SERVER,	/* opt_ipv6_ah */
-	NXGE_CLASS_FLOW_GEN_SERVER	/* opt_ipv6_sctp */
-};
-
-uint64_t class_quick_config_web_server[NXGE_CLASS_CONFIG_PARAMS] = {
-	0xffffffffULL,		/* h1_init */
-	0xffffULL,		/* h2_init */
-	0x0,			/* cfg_ether_usr1 */
-	0x0,			/* cfg_ether_usr2 */
-	0x0,			/* cfg_ip_usr4 */
-	0x0,			/* cfg_ip_usr5 */
-	0x0,			/* cfg_ip_usr6 */
-	0x0,			/* cfg_ip_usr7 */
-	0x0,			/* opt_ip_usr4 */
-	0x0,			/* opt_ip_usr5 */
-	0x0,			/* opt_ip_usr6 */
-	0x0,			/* opt_ip_usr7 */
-	NXGE_CLASS_FLOW_WEB_SERVER,	/* opt_ipv4_tcp */
-	NXGE_CLASS_FLOW_GEN_SERVER,	/* opt_ipv4_udp */
-	NXGE_CLASS_FLOW_GEN_SERVER,	/* opt_ipv4_ah */
-	NXGE_CLASS_FLOW_GEN_SERVER,	/* opt_ipv4_sctp */
-	NXGE_CLASS_FLOW_GEN_SERVER,	/* opt_ipv6_tcp */
-	NXGE_CLASS_FLOW_GEN_SERVER,	/* opt_ipv6_udp */
-	NXGE_CLASS_FLOW_GEN_SERVER,	/* opt_ipv6_ah */
-	NXGE_CLASS_FLOW_GEN_SERVER	/* opt_ipv6_sctp */
-};
-
-nxge_status_t
-nxge_classify_init(p_nxge_t nxgep)
-{
-	nxge_status_t status = NXGE_OK;
-
-	status = nxge_classify_init_sw(nxgep);
-	if (status != NXGE_OK)
-		return (status);
-	status = nxge_set_hw_classify_config(nxgep);
-	if (status != NXGE_OK)
-		return (status);
-
-	status = nxge_classify_init_hw(nxgep);
-	if (status != NXGE_OK)
-		return (status);
-
-	return (NXGE_OK);
-}
-
-nxge_status_t
-nxge_classify_uninit(p_nxge_t nxgep)
-{
-	nxge_status_t status = NXGE_OK;
-
-	status = nxge_classify_exit_sw(nxgep);
-	if (status != NXGE_OK) {
-		return (status);
-	}
-	return (NXGE_OK);
-}
-
-/* ARGSUSED */
-uint64_t
-nxge_classify_get_cfg_value(p_nxge_t nxgep, uint8_t cfg_type, uint8_t cfg_param)
-{
-	uint64_t cfg_value;
-
-	if (cfg_param >= NXGE_CLASS_CONFIG_PARAMS)
-		return (-1);
-	switch (cfg_type) {
-	case CFG_L3_WEB:
-		cfg_value = class_quick_config_web_server[cfg_param];
-		break;
-	case CFG_L3_DISTRIBUTE:
-	default:
-		cfg_value = class_quick_config_distribute[cfg_param];
-		break;
-	}
-	return (cfg_value);
-}
-
-nxge_status_t
-nxge_set_hw_classify_config(p_nxge_t nxgep)
-{
-	p_nxge_dma_pt_cfg_t p_all_cfgp;
-	p_nxge_hw_pt_cfg_t p_cfgp;
-
-	NXGE_DEBUG_MSG((nxgep, OBP_CTL, "==> nxge_get_hw_classify_config"));
-
-	/* Get mac rdc table info from HW/Prom/.conf etc ...... */
-	/* for now, get it from dma configs */
-	p_all_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
-	p_cfgp = (p_nxge_hw_pt_cfg_t)&p_all_cfgp->hw_config;
-
-	/*
-	 * classify_init needs to call first.
-	 */
-	nxgep->class_config.mac_rdcgrp = p_cfgp->def_mac_rxdma_grpid;
-	nxgep->class_config.mcast_rdcgrp = p_cfgp->def_mac_rxdma_grpid;
-	NXGE_DEBUG_MSG((nxgep, OBP_CTL, "<== nxge_get_hw_classify_config"));
-
-	return (NXGE_OK);
-}
--- a/usr/src/uts/sun4v/io/nxge/nxge_espc.c	Mon Mar 19 18:02:35 2007 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,218 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License (the "License").
- * You may not use this file except in compliance with the License.
- *
- * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
- * or http://www.opensolaris.org/os/licensing.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information: Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- */
-/*
- * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
- * Use is subject to license terms.
- */
-
-#pragma ident	"%Z%%M%	%I%	%E% SMI"
-
-#include <nxge_impl.h>
-#include <nxge_mac.h>
-#include <npi_espc.h>
-#include <nxge_espc.h>
-
-static void
-nxge_espc_get_next_mac_addr(uint8_t *, uint8_t, struct ether_addr *);
-
-static void
-nxge_espc_get_next_mac_addr(uint8_t *st_mac, uint8_t nxt_cnt,
-			    struct ether_addr *final_mac)
-{
-	uint64_t	mac[ETHERADDRL];
-	uint64_t	mac_addr = 0;
-	int		i, j;
-
-	for (i = ETHERADDRL - 1, j = 0; j < ETHERADDRL; i--, j++) {
-		mac[j] = st_mac[i];
-		mac_addr |= (mac[j] << (j*8));
-	}
-
-	mac_addr += nxt_cnt;
-
-	final_mac->ether_addr_octet[0] = (mac_addr & 0xff0000000000) >> 40;
-	final_mac->ether_addr_octet[1] = (mac_addr & 0xff00000000) >> 32;
-	final_mac->ether_addr_octet[2] = (mac_addr & 0xff000000) >> 24;
-	final_mac->ether_addr_octet[3] = (mac_addr & 0xff0000) >> 16;
-	final_mac->ether_addr_octet[4] = (mac_addr & 0xff00) >> 8;
-	final_mac->ether_addr_octet[5] = (mac_addr & 0xff);
-}
-
-nxge_status_t
-nxge_espc_mac_addrs_get(p_nxge_t nxgep)
-{
-	nxge_status_t	status = NXGE_OK;
-	npi_status_t	npi_status = NPI_SUCCESS;
-	uint8_t		port_num = nxgep->mac.portnum;
-	npi_handle_t	handle = NXGE_DEV_NPI_HANDLE(nxgep);
-	uint8_t		mac_addr[ETHERADDRL];
-
-	NXGE_DEBUG_MSG((nxgep, MAC_CTL,
-			    "==> nxge_espc_mac_addr_get, port[%d]",
-			    port_num));
-
-	npi_status = npi_espc_mac_addr_get(handle, mac_addr);
-	if (npi_status != NPI_SUCCESS) {
-		status = (NXGE_ERROR | npi_status);
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-				    "nxge_espc_mac_addr_get, port[%d] failed",
-				    port_num));
-		goto exit;
-	}
-
-	nxge_espc_get_next_mac_addr(mac_addr, port_num, &nxgep->factaddr);
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"Got MAC Addr: %2x:%2x:%2x:%2x:%2x%:%2x%c \n",
-			mac_addr[0], mac_addr[1],
-			mac_addr[2], mac_addr[3],
-			mac_addr[4], mac_addr[5]));
-
-exit:
-	NXGE_DEBUG_MSG((nxgep, CFG_CTL, "<== nxge_espc_mac_addr_get, "
-			"status [0x%x]", status));
-
-	return (status);
-}
-
-nxge_status_t
-nxge_espc_num_macs_get(p_nxge_t nxgep, uint8_t *nmacs)
-{
-	nxge_status_t   status = NXGE_OK;
-	npi_status_t    npi_status = NPI_SUCCESS;
-	npi_handle_t    handle = NXGE_DEV_NPI_HANDLE(nxgep);
-	NXGE_DEBUG_MSG((nxgep, CFG_CTL, "==> nxge_espc_num_macs_get"));
-
-	npi_status = npi_espc_num_macs_get(handle, nmacs);
-	if (npi_status != NPI_SUCCESS) {
-		status = (NXGE_ERROR | npi_status);
-	}
-
-	NXGE_DEBUG_MSG((nxgep, CFG_CTL, "<== nxge_espc_num_macs_get, "
-		"status [0x%x]", status));
-
-	return (status);
-}
-
-nxge_status_t
-nxge_espc_num_ports_get(p_nxge_t nxgep)
-{
-	nxge_status_t	status = NXGE_OK;
-	npi_status_t	npi_status = NPI_SUCCESS;
-	npi_handle_t	handle = NXGE_DEV_NPI_HANDLE(nxgep);
-	uint8_t		nports = 0;
-	NXGE_DEBUG_MSG((nxgep, CFG_CTL, "==> nxge_espc_num_ports_get"));
-
-	npi_status = npi_espc_num_ports_get(handle, &nports);
-	if (npi_status != NPI_SUCCESS) {
-		status = (NXGE_ERROR | npi_status);
-	}
-	nxgep->nports = nports;
-	NXGE_DEBUG_MSG((nxgep, CFG_CTL, " nxge_espc_num_ports_get "
-			"ports [0x%x]", nports));
-
-	NXGE_DEBUG_MSG((nxgep, CFG_CTL, "<== nxge_espc_num_ports_get, "
-			"status [0x%x]", status));
-
-	return (status);
-}
-
-nxge_status_t
-nxge_espc_phy_type_get(p_nxge_t nxgep)
-{
-	nxge_status_t	status = NXGE_OK;
-	npi_status_t	npi_status = NPI_SUCCESS;
-	npi_handle_t	handle = NXGE_DEV_NPI_HANDLE(nxgep);
-	uint8_t		port_num = nxgep->mac.portnum;
-	uint8_t		phy_type;
-
-	NXGE_DEBUG_MSG((nxgep, CFG_CTL, "==> nxge_espc_phy_type_get, port[%d]",
-			port_num));
-
-	npi_status = npi_espc_port_phy_type_get(handle, &phy_type,
-						port_num);
-	if (npi_status != NPI_SUCCESS) {
-		status = (NXGE_ERROR | npi_status);
-		goto exit;
-	}
-
-	switch (phy_type) {
-	case ESC_PHY_10G_FIBER:
-		nxgep->mac.portmode = PORT_10G_FIBER;
-		nxgep->statsp->mac_stats.xcvr_inuse = XPCS_XCVR;
-		cmn_err(CE_NOTE, "!SPROM Read phy type 10G Fiber \n");
-		break;
-	case ESC_PHY_10G_COPPER:
-		nxgep->mac.portmode = PORT_10G_COPPER;
-		nxgep->statsp->mac_stats.xcvr_inuse = XPCS_XCVR;
-		cmn_err(CE_NOTE, "!SPROM Read phy type 10G Copper \n");
-
-		break;
-	case ESC_PHY_1G_FIBER:
-		nxgep->mac.portmode = PORT_1G_FIBER;
-		nxgep->statsp->mac_stats.xcvr_inuse = PCS_XCVR;
-		cmn_err(CE_NOTE, "!SPROM Read phy type 1G Fiber \n");
-
-		break;
-	case ESC_PHY_1G_COPPER:
-		nxgep->mac.portmode = PORT_1G_COPPER;
-		nxgep->statsp->mac_stats.xcvr_inuse = INT_MII_XCVR;
-		cmn_err(CE_NOTE, "!SPROM Read phy type 1G Copper \n");
-
-		break;
-	case ESC_PHY_NONE:
-		status = NXGE_ERROR;
-		NXGE_DEBUG_MSG((nxgep, CFG_CTL, "nxge_espc_phy_type_get:"
-				"No phy type set"));
-		break;
-	default:
-		status = NXGE_ERROR;
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_espc_phy_type_get: "
-				"Unknown phy type [%d]", phy_type));
-		break;
-	}
-
-exit:
-
-	NXGE_DEBUG_MSG((nxgep, CFG_CTL, "<== nxge_espc_phy_type_get, "
-			"status [0x%x]", status));
-
-	return (status);
-}
-
-nxge_status_t
-nxge_espc_max_frame_sz_get(p_nxge_t nxgep)
-{
-	nxge_status_t	status = NXGE_OK;
-	npi_status_t	npi_status = NPI_SUCCESS;
-	npi_handle_t	handle = NXGE_DEV_NPI_HANDLE(nxgep);
-
-	NXGE_DEBUG_MSG((nxgep, CFG_CTL, "==> nxge_espc_max_frame_sz_get"));
-
-	npi_status = npi_espc_max_frame_get(handle, &nxgep->mac.maxframesize);
-	if (npi_status != NPI_SUCCESS) {
-		status = (NXGE_ERROR | npi_status);
-	}
-
-	NXGE_DEBUG_MSG((nxgep, CFG_CTL, " nxge_espc_max_frame_sz_get, "
-			    "status [0x%x]", status));
-
-	return (status);
-}
--- a/usr/src/uts/sun4v/io/nxge/nxge_fflp.c	Mon Mar 19 18:02:35 2007 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,2060 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License (the "License").
- * You may not use this file except in compliance with the License.
- *
- * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
- * or http://www.opensolaris.org/os/licensing.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information: Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- */
-/*
- * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
- * Use is subject to license terms.
- */
-
-#pragma ident	"%Z%%M%	%I%	%E% SMI"
-
-#include <npi_fflp.h>
-#include <npi_mac.h>
-#include <nxge_defs.h>
-#include <nxge_flow.h>
-#include <nxge_fflp.h>
-#include <nxge_impl.h>
-#include <nxge_fflp_hash.h>
-#include <nxge_common.h>
-
-
-/*
- * Function prototypes
- */
-static nxge_status_t nxge_fflp_vlan_tbl_clear_all(p_nxge_t);
-static nxge_status_t nxge_fflp_tcam_invalidate_all(p_nxge_t);
-static nxge_status_t nxge_fflp_tcam_init(p_nxge_t);
-static nxge_status_t nxge_fflp_fcram_invalidate_all(p_nxge_t);
-static nxge_status_t nxge_fflp_fcram_init(p_nxge_t);
-static int nxge_flow_need_hash_lookup(p_nxge_t, flow_resource_t *);
-static void nxge_fill_tcam_entry_tcp(p_nxge_t, flow_spec_t *, tcam_entry_t *);
-static void nxge_fill_tcam_entry_udp(p_nxge_t, flow_spec_t *, tcam_entry_t *);
-static void nxge_fill_tcam_entry_sctp(p_nxge_t, flow_spec_t *, tcam_entry_t *);
-static void nxge_fill_tcam_entry_tcp_ipv6(p_nxge_t, flow_spec_t *,
-	tcam_entry_t *);
-static void nxge_fill_tcam_entry_udp_ipv6(p_nxge_t, flow_spec_t *,
-	tcam_entry_t *);
-static void nxge_fill_tcam_entry_sctp_ipv6(p_nxge_t, flow_spec_t *,
-	tcam_entry_t *);
-static uint8_t nxge_get_rdc_offset(p_nxge_t, uint8_t, intptr_t);
-static uint8_t nxge_get_rdc_group(p_nxge_t, uint8_t, intptr_t);
-static tcam_location_t nxge_get_tcam_location(p_nxge_t, uint8_t);
-
-/*
- * functions used outside this file
- */
-nxge_status_t nxge_fflp_config_vlan_table(p_nxge_t, uint16_t);
-nxge_status_t nxge_fflp_ip_class_config_all(p_nxge_t);
-nxge_status_t nxge_add_flow(p_nxge_t, flow_resource_t *);
-static nxge_status_t nxge_tcam_handle_ip_fragment(p_nxge_t);
-nxge_status_t nxge_add_tcam_entry(p_nxge_t, flow_resource_t *);
-nxge_status_t nxge_add_fcram_entry(p_nxge_t, flow_resource_t *);
-nxge_status_t nxge_flow_get_hash(p_nxge_t, flow_resource_t *,
-	uint32_t *, uint16_t *);
-
-nxge_status_t
-nxge_tcam_dump_entry(p_nxge_t nxgep, uint32_t location)
-{
-	tcam_entry_t tcam_rdptr;
-	uint64_t asc_ram = 0;
-	npi_handle_t handle;
-	npi_status_t status;
-
-	handle = nxgep->npi_reg_handle;
-
-	bzero((char *)&tcam_rdptr, sizeof (struct tcam_entry));
-	status = npi_fflp_tcam_entry_read(handle, (tcam_location_t)location,
-		(struct tcam_entry *)&tcam_rdptr);
-	if (status & NPI_FAILURE) {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			" nxge_tcam_dump_entry:"
-			"  tcam read failed at location %d ", location));
-		return (NXGE_ERROR);
-	}
-	status = npi_fflp_tcam_asc_ram_entry_read(handle,
-		(tcam_location_t)location, &asc_ram);
-
-	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "location %x\n"
-		" key:  %llx %llx %llx %llx \n"
-		" mask: %llx %llx %llx %llx \n"
-		" ASC RAM %llx \n", location,
-		tcam_rdptr.key0, tcam_rdptr.key1,
-		tcam_rdptr.key2, tcam_rdptr.key3,
-		tcam_rdptr.mask0, tcam_rdptr.mask1,
-		tcam_rdptr.mask2, tcam_rdptr.mask3, asc_ram));
-	return (NXGE_OK);
-}
-
-void
-nxge_get_tcam(p_nxge_t nxgep, p_mblk_t mp)
-{
-	uint32_t tcam_loc;
-	int *lptr;
-	int location;
-
-	uint32_t start_location = 0;
-	uint32_t stop_location = nxgep->classifier.tcam_size;
-	lptr = (int *)mp->b_rptr;
-	location = *lptr;
-
-	if ((location >= nxgep->classifier.tcam_size) || (location < -1)) {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"nxge_tcam_dump: Invalid location %d \n", location));
-		return;
-	}
-	if (location == -1) {
-		start_location = 0;
-		stop_location = nxgep->classifier.tcam_size;
-	} else {
-		start_location = location;
-		stop_location = location + 1;
-	}
-	for (tcam_loc = start_location; tcam_loc < stop_location; tcam_loc++)
-		(void) nxge_tcam_dump_entry(nxgep, tcam_loc);
-}
-
-/*
- * nxge_fflp_vlan_table_invalidate_all
- * invalidates the vlan RDC table entries.
- * INPUT
- * nxge    soft state data structure
- * Return
- *      NXGE_OK
- *      NXGE_ERROR
- *
- */
-
-static nxge_status_t
-nxge_fflp_vlan_tbl_clear_all(p_nxge_t nxgep)
-{
-	vlan_id_t vlan_id;
-	npi_handle_t handle;
-	npi_status_t rs = NPI_SUCCESS;
-	vlan_id_t start = 0, stop = NXGE_MAX_VLANS;
-
-	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "==> nxge_fflp_vlan_tbl_clear_all "));
-	handle = nxgep->npi_reg_handle;
-	for (vlan_id = start; vlan_id < stop; vlan_id++) {
-		rs = npi_fflp_cfg_vlan_table_clear(handle, vlan_id);
-		if (rs != NPI_SUCCESS) {
-			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-				"VLAN Table invalidate failed for vlan id %d ",
-				vlan_id));
-			return (NXGE_ERROR | rs);
-		}
-	}
-	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "<== nxge_fflp_vlan_tbl_clear_all "));
-	return (NXGE_OK);
-}
-
-/*
- * The following functions are used by other modules to init
- * the fflp module.
- * these functions are the basic API used to init
- * the fflp modules (tcam, fcram etc ......)
- *
- * The TCAM search future would be disabled  by default.
- */
-
-static nxge_status_t
-nxge_fflp_tcam_init(p_nxge_t nxgep)
-{
-	uint8_t access_ratio;
-	tcam_class_t class;
-	npi_status_t rs = NPI_SUCCESS;
-	npi_handle_t handle;
-
-	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "==> nxge_fflp_tcam_init"));
-	handle = nxgep->npi_reg_handle;
-
-	rs = npi_fflp_cfg_tcam_disable(handle);
-	if (rs != NPI_SUCCESS) {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "failed TCAM Disable\n"));
-		return (NXGE_ERROR | rs);
-	}
-
-	access_ratio = nxgep->param_arr[param_tcam_access_ratio].value;
-	rs = npi_fflp_cfg_tcam_access(handle, access_ratio);
-	if (rs != NPI_SUCCESS) {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-				"failed TCAM Access cfg\n"));
-		return (NXGE_ERROR | rs);
-	}
-
-	/* disable configurable classes */
-	/* disable the configurable ethernet classes; */
-	for (class = TCAM_CLASS_ETYPE_1;
-		class <= TCAM_CLASS_ETYPE_2; class++) {
-		rs = npi_fflp_cfg_enet_usr_cls_disable(handle, class);
-		if (rs != NPI_SUCCESS) {
-			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-				"TCAM USR Ether Class config failed."));
-			return (NXGE_ERROR | rs);
-		}
-	}
-
-	/* disable the configurable ip classes; */
-	for (class = TCAM_CLASS_IP_USER_4;
-		class <= TCAM_CLASS_IP_USER_7; class++) {
-		rs = npi_fflp_cfg_ip_usr_cls_disable(handle, class);
-		if (rs != NPI_SUCCESS) {
-			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-				"TCAM USR IP Class cnfg failed."));
-			return (NXGE_ERROR | rs);
-		}
-	}
-	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "<== nxge_fflp_tcam_init"));
-	return (NXGE_OK);
-}
-
-/*
- * nxge_fflp_tcam_invalidate_all
- * invalidates all the tcam entries.
- * INPUT
- * nxge    soft state data structure
- * Return
- *      NXGE_OK
- *      NXGE_ERROR
- *
- */
-
-
-static nxge_status_t
-nxge_fflp_tcam_invalidate_all(p_nxge_t nxgep)
-{
-	uint16_t location;
-	npi_status_t rs = NPI_SUCCESS;
-	npi_handle_t handle;
-	uint16_t start = 0, stop = nxgep->classifier.tcam_size;
-	p_nxge_hw_list_t hw_p;
-
-	NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
-		"==> nxge_fflp_tcam_invalidate_all"));
-	handle = nxgep->npi_reg_handle;
-	if ((hw_p = nxgep->nxge_hw_p) == NULL) {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			" nxge_fflp_tcam_invalidate_all:"
-			" common hardware not set", nxgep->niu_type));
-		return (NXGE_ERROR);
-	}
-	MUTEX_ENTER(&hw_p->nxge_tcam_lock);
-	for (location = start; location < stop; location++) {
-		rs = npi_fflp_tcam_entry_invalidate(handle, location);
-		if (rs != NPI_SUCCESS) {
-			MUTEX_EXIT(&hw_p->nxge_tcam_lock);
-			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-				"TCAM invalidate failed at loc %d ", location));
-			return (NXGE_ERROR | rs);
-		}
-	}
-	MUTEX_EXIT(&hw_p->nxge_tcam_lock);
-	NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
-			"<== nxge_fflp_tcam_invalidate_all"));
-	return (NXGE_OK);
-}
-
-/*
- * nxge_fflp_fcram_entry_invalidate_all
- * invalidates all the FCRAM entries.
- * INPUT
- * nxge    soft state data structure
- * Return
- *      NXGE_OK
- *      NXGE_ERROR
- *
- */
-
-static nxge_status_t
-nxge_fflp_fcram_invalidate_all(p_nxge_t nxgep)
-{
-	npi_handle_t handle;
-	npi_status_t rs = NPI_SUCCESS;
-	part_id_t pid = 0;
-	uint8_t base_mask, base_reloc;
-	fcram_entry_t fc;
-	uint32_t location;
-	uint32_t increment, last_location;
-
-	/*
-	 * (1) configure and enable partition 0 with no relocation
-	 * (2) Assume the FCRAM is used as IPv4 exact match entry cells
-	 * (3) Invalidate these cells by clearing the valid bit in
-	 * the subareas 0 and 4
-	 * (4) disable the partition
-	 *
-	 */
-
-	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "==> nxge_fflp_fcram_invalidate_all"));
-
-	base_mask = base_reloc = 0x0;
-	handle = nxgep->npi_reg_handle;
-	rs = npi_fflp_cfg_fcram_partition(handle, pid, base_mask, base_reloc);
-
-	if (rs != NPI_SUCCESS) {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "failed partition cfg\n"));
-		return (NXGE_ERROR | rs);
-	}
-	rs = npi_fflp_cfg_fcram_partition_disable(handle, pid);
-
-	if (rs != NPI_SUCCESS) {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"failed partition enable\n"));
-		return (NXGE_ERROR | rs);
-	}
-	fc.dreg[0].value = 0;
-	fc.hash_hdr_valid = 0;
-	fc.hash_hdr_ext = 1;	/* specify as IPV4 exact match entry */
-	increment = sizeof (hash_ipv4_t);
-	last_location = FCRAM_SIZE * 0x40;
-
-	for (location = 0; location < last_location; location += increment) {
-		rs = npi_fflp_fcram_subarea_write(handle, pid,
-			location,
-			fc.value[0]);
-		if (rs != NPI_SUCCESS) {
-			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-					"failed write"
-					"at location %x ",
-					location));
-			return (NXGE_ERROR | rs);
-		}
-	}
-	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "<== nxge_fflp_fcram_invalidate_all"));
-	return (NXGE_OK);
-}
-
-static nxge_status_t
-nxge_fflp_fcram_init(p_nxge_t nxgep)
-{
-	fflp_fcram_output_drive_t strength;
-	fflp_fcram_qs_t qs;
-	npi_status_t rs = NPI_SUCCESS;
-	uint8_t access_ratio;
-	int partition;
-	npi_handle_t handle;
-	uint32_t min_time, max_time, sys_time;
-
-	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "==> nxge_fflp_fcram_init"));
-
-	/*
-	 * Recommended values are needed.
-	 */
-	min_time = FCRAM_REFRESH_DEFAULT_MIN_TIME;
-	max_time = FCRAM_REFRESH_DEFAULT_MAX_TIME;
-	sys_time = FCRAM_REFRESH_DEFAULT_SYS_TIME;
-
-	handle = nxgep->npi_reg_handle;
-	strength = FCRAM_OUTDR_NORMAL;
-	qs = FCRAM_QS_MODE_QS;
-	rs = npi_fflp_cfg_fcram_reset(handle, strength, qs);
-	if (rs != NPI_SUCCESS) {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "failed FCRAM Reset. "));
-		return (NXGE_ERROR | rs);
-	}
-
-	access_ratio = nxgep->param_arr[param_fcram_access_ratio].value;
-	rs = npi_fflp_cfg_fcram_access(handle, access_ratio);
-	if (rs != NPI_SUCCESS) {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "failed FCRAM Access ratio"
-			"configuration \n"));
-		return (NXGE_ERROR | rs);
-	}
-	rs = npi_fflp_cfg_fcram_refresh_time(handle, min_time,
-		max_time, sys_time);
-	if (rs != NPI_SUCCESS) {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"failed FCRAM refresh cfg"));
-		return (NXGE_ERROR);
-	}
-
-	/* disable all the partitions until explicitly enabled */
-	for (partition = 0; partition < FFLP_FCRAM_MAX_PARTITION; partition++) {
-		rs = npi_fflp_cfg_fcram_partition_disable(handle, partition);
-		if (rs != NPI_SUCCESS) {
-			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-				"failed FCRAM partition"
-				" enable for partition %d ", partition));
-			return (NXGE_ERROR | rs);
-		}
-	}
-
-	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "<== nxge_fflp_fcram_init"));
-	return (NXGE_OK);
-}
-
-nxge_status_t
-nxge_logical_mac_assign_rdc_table(p_nxge_t nxgep, uint8_t alt_mac)
-{
-	npi_status_t rs = NPI_SUCCESS;
-	hostinfo_t mac_rdc;
-	npi_handle_t handle;
-	p_nxge_class_pt_cfg_t p_class_cfgp;
-
-	p_class_cfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config;
-	if (p_class_cfgp->mac_host_info[alt_mac].flag == 0) {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			" nxge_logical_mac_assign_rdc_table"
-			" unconfigured alt MAC addr %d ", alt_mac));
-		return (NXGE_ERROR);
-	}
-	handle = nxgep->npi_reg_handle;
-	mac_rdc.value = 0;
-	mac_rdc.bits.w0.rdc_tbl_num =
-		p_class_cfgp->mac_host_info[alt_mac].rdctbl;
-	mac_rdc.bits.w0.mac_pref = p_class_cfgp->mac_host_info[alt_mac].mpr_npr;
-
-	rs = npi_mac_hostinfo_entry(handle, OP_SET,
-		nxgep->function_num, alt_mac, &mac_rdc);
-
-	if (rs != NPI_SUCCESS) {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"failed Assign RDC table"));
-		return (NXGE_ERROR | rs);
-	}
-	return (NXGE_OK);
-}
-
-nxge_status_t
-nxge_main_mac_assign_rdc_table(p_nxge_t nxgep)
-{
-	npi_status_t rs = NPI_SUCCESS;
-	hostinfo_t mac_rdc;
-	npi_handle_t handle;
-
-	handle = nxgep->npi_reg_handle;
-	mac_rdc.value = 0;
-	mac_rdc.bits.w0.rdc_tbl_num = nxgep->class_config.mac_rdcgrp;
-	mac_rdc.bits.w0.mac_pref = 1;
-	switch (nxgep->function_num) {
-	case 0:
-	case 1:
-		rs = npi_mac_hostinfo_entry(handle, OP_SET,
-			nxgep->function_num, XMAC_UNIQUE_HOST_INFO_ENTRY,
-			&mac_rdc);
-		break;
-	case 2:
-	case 3:
-		rs = npi_mac_hostinfo_entry(handle, OP_SET,
-			nxgep->function_num, BMAC_UNIQUE_HOST_INFO_ENTRY,
-			&mac_rdc);
-		break;
-	default:
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"failed Assign RDC table (invalid function #)"));
-		return (NXGE_ERROR);
-	}
-
-	if (rs != NPI_SUCCESS) {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-				"failed Assign RDC table"));
-		return (NXGE_ERROR | rs);
-	}
-	return (NXGE_OK);
-}
-
-/*
- * Initialize hostinfo registers for alternate MAC addresses and
- * multicast MAC address.
- */
-nxge_status_t
-nxge_alt_mcast_mac_assign_rdc_table(p_nxge_t nxgep)
-{
-	npi_status_t rs = NPI_SUCCESS;
-	hostinfo_t mac_rdc;
-	npi_handle_t handle;
-	int i;
-
-	handle = nxgep->npi_reg_handle;
-	mac_rdc.value = 0;
-	mac_rdc.bits.w0.rdc_tbl_num = nxgep->class_config.mcast_rdcgrp;
-	mac_rdc.bits.w0.mac_pref = 1;
-	switch (nxgep->function_num) {
-	case 0:
-	case 1:
-		/*
-		 * Tests indicate that it is OK not to re-initialize the
-		 * hostinfo registers for the XMAC's alternate MAC
-		 * addresses. But that is necessary for BMAC (case 2
-		 * and case 3 below)
-		 */
-		rs = npi_mac_hostinfo_entry(handle, OP_SET,
-			nxgep->function_num,
-			XMAC_MULTI_HOST_INFO_ENTRY, &mac_rdc);
-		break;
-	case 2:
-	case 3:
-		for (i = 1; i <= BMAC_MAX_ALT_ADDR_ENTRY; i++)
-			rs |= npi_mac_hostinfo_entry(handle, OP_SET,
-			nxgep->function_num, i, &mac_rdc);
-
-		rs |= npi_mac_hostinfo_entry(handle, OP_SET,
-			nxgep->function_num,
-			BMAC_MULTI_HOST_INFO_ENTRY, &mac_rdc);
-		break;
-	default:
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"failed Assign RDC table (invalid funcion #)"));
-		return (NXGE_ERROR);
-	}
-
-	if (rs != NPI_SUCCESS) {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"failed Assign RDC table"));
-		return (NXGE_ERROR | rs);
-	}
-	return (NXGE_OK);
-}
-
-nxge_status_t
-nxge_fflp_init_hostinfo(p_nxge_t nxgep)
-{
-	nxge_status_t status = NXGE_OK;
-
-	status = nxge_alt_mcast_mac_assign_rdc_table(nxgep);
-	status |= nxge_main_mac_assign_rdc_table(nxgep);
-	return (status);
-}
-
-nxge_status_t
-nxge_fflp_hw_reset(p_nxge_t nxgep)
-{
-	npi_handle_t handle;
-	npi_status_t rs = NPI_SUCCESS;
-	nxge_status_t status = NXGE_OK;
-
-	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, " ==> nxge_fflp_hw_reset"));
-
-	if (nxgep->niu_type == NEPTUNE) {
-		status = nxge_fflp_fcram_init(nxgep);
-		if (status != NXGE_OK) {
-			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-				" failed FCRAM init. "));
-			return (status);
-		}
-	}
-
-	status = nxge_fflp_tcam_init(nxgep);
-	if (status != NXGE_OK) {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"failed TCAM init."));
-		return (status);
-	}
-
-	handle = nxgep->npi_reg_handle;
-	rs = npi_fflp_cfg_llcsnap_enable(handle);
-	if (rs != NPI_SUCCESS) {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"failed LLCSNAP enable. "));
-		return (NXGE_ERROR | rs);
-	}
-
-	rs = npi_fflp_cfg_cam_errorcheck_disable(handle);
-	if (rs != NPI_SUCCESS) {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"failed CAM Error Check enable. "));
-		return (NXGE_ERROR | rs);
-	}
-
-	/* init the hash generators */
-	rs = npi_fflp_cfg_hash_h1poly(handle, 0);
-	if (rs != NPI_SUCCESS) {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"failed H1 Poly Init. "));
-		return (NXGE_ERROR | rs);
-	}
-
-	rs = npi_fflp_cfg_hash_h2poly(handle, 0);
-	if (rs != NPI_SUCCESS) {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"failed H2 Poly Init. "));
-		return (NXGE_ERROR | rs);
-	}
-
-	/* invalidate TCAM entries */
-	status = nxge_fflp_tcam_invalidate_all(nxgep);
-	if (status != NXGE_OK) {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"failed TCAM Entry Invalidate. "));
-		return (status);
-	}
-
-	/* invalidate FCRAM entries */
-	if (nxgep->niu_type == NEPTUNE) {
-		status = nxge_fflp_fcram_invalidate_all(nxgep);
-		if (status != NXGE_OK) {
-			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-					"failed FCRAM Entry Invalidate."));
-			return (status);
-		}
-	}
-
-	/* invalidate VLAN RDC tables */
-	status = nxge_fflp_vlan_tbl_clear_all(nxgep);
-	if (status != NXGE_OK) {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"failed VLAN Table Invalidate. "));
-		return (status);
-	}
-	nxgep->classifier.state |= NXGE_FFLP_HW_RESET;
-
-	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "<== nxge_fflp_hw_reset"));
-	return (NXGE_OK);
-}
-
-nxge_status_t
-nxge_cfg_ip_cls_flow_key(p_nxge_t nxgep, tcam_class_t l3_class,
-	uint32_t class_config)
-{
-	flow_key_cfg_t fcfg;
-	npi_handle_t handle;
-	npi_status_t rs = NPI_SUCCESS;
-
-	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, " ==> nxge_cfg_ip_cls_flow_key"));
-	handle = nxgep->npi_reg_handle;
-	bzero(&fcfg, sizeof (flow_key_cfg_t));
-
-	if (class_config & NXGE_CLASS_FLOW_USE_PROTO)
-		fcfg.use_proto = 1;
-	if (class_config & NXGE_CLASS_FLOW_USE_DST_PORT)
-		fcfg.use_dport = 1;
-	if (class_config & NXGE_CLASS_FLOW_USE_SRC_PORT)
-		fcfg.use_sport = 1;
-	if (class_config & NXGE_CLASS_FLOW_USE_IPDST)
-		fcfg.use_daddr = 1;
-	if (class_config & NXGE_CLASS_FLOW_USE_IPSRC)
-		fcfg.use_saddr = 1;
-	if (class_config & NXGE_CLASS_FLOW_USE_VLAN)
-		fcfg.use_vlan = 1;
-	if (class_config & NXGE_CLASS_FLOW_USE_L2DA)
-		fcfg.use_l2da = 1;
-	if (class_config & NXGE_CLASS_FLOW_USE_PORTNUM)
-		fcfg.use_portnum = 1;
-	fcfg.ip_opts_exist = 0;
-
-	rs = npi_fflp_cfg_ip_cls_flow_key(handle, l3_class, &fcfg);
-	if (rs & NPI_FFLP_ERROR) {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, " nxge_cfg_ip_cls_flow_key"
-			" opt %x for class %d failed ",
-			class_config, l3_class));
-		return (NXGE_ERROR | rs);
-	}
-	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, " <== nxge_cfg_ip_cls_flow_key"));
-	return (NXGE_OK);
-}
-
-nxge_status_t
-nxge_cfg_ip_cls_flow_key_get(p_nxge_t nxgep, tcam_class_t l3_class,
-	uint32_t *class_config)
-{
-	flow_key_cfg_t fcfg;
-	npi_handle_t handle;
-	npi_status_t rs = NPI_SUCCESS;
-	uint32_t ccfg = 0;
-
-	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, " ==> nxge_cfg_ip_cls_flow_key_get"));
-	handle = nxgep->npi_reg_handle;
-	bzero(&fcfg, sizeof (flow_key_cfg_t));
-
-	rs = npi_fflp_cfg_ip_cls_flow_key_get(handle, l3_class, &fcfg);
-	if (rs & NPI_FFLP_ERROR) {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, " nxge_cfg_ip_cls_flow_key"
-				" opt %x for class %d failed ",
-				class_config, l3_class));
-		return (NXGE_ERROR | rs);
-	}
-
-	if (fcfg.use_proto)
-		ccfg |= NXGE_CLASS_FLOW_USE_PROTO;
-	if (fcfg.use_dport)
-		ccfg |= NXGE_CLASS_FLOW_USE_DST_PORT;
-	if (fcfg.use_sport)
-		ccfg |= NXGE_CLASS_FLOW_USE_SRC_PORT;
-	if (fcfg.use_daddr)
-		ccfg |= NXGE_CLASS_FLOW_USE_IPDST;
-	if (fcfg.use_saddr)
-		ccfg |= NXGE_CLASS_FLOW_USE_IPSRC;
-	if (fcfg.use_vlan)
-		ccfg |= NXGE_CLASS_FLOW_USE_VLAN;
-	if (fcfg.use_l2da)
-		ccfg |= NXGE_CLASS_FLOW_USE_L2DA;
-	if (fcfg.use_portnum)
-		ccfg |= NXGE_CLASS_FLOW_USE_PORTNUM;
-
-	NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
-		" nxge_cfg_ip_cls_flow_key_get %x", ccfg));
-	*class_config = ccfg;
-
-	NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
-		" <== nxge_cfg_ip_cls_flow_key_get"));
-	return (NXGE_OK);
-}
-
-static nxge_status_t
-nxge_cfg_tcam_ip_class_get(p_nxge_t nxgep, tcam_class_t class,
-	uint32_t *class_config)
-{
-	npi_status_t rs = NPI_SUCCESS;
-	tcam_key_cfg_t cfg;
-	npi_handle_t handle;
-	uint32_t ccfg = 0;
-
-	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "==> nxge_cfg_tcam_ip_class"));
-
-	bzero(&cfg, sizeof (tcam_key_cfg_t));
-	handle = nxgep->npi_reg_handle;
-
-	rs = npi_fflp_cfg_ip_cls_tcam_key_get(handle, class, &cfg);
-	if (rs & NPI_FFLP_ERROR) {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, " nxge_cfg_tcam_ip_class"
-			" opt %x for class %d failed ",
-			class_config, class));
-		return (NXGE_ERROR | rs);
-	}
-	if (cfg.discard)
-		ccfg |= NXGE_CLASS_DISCARD;
-	if (cfg.lookup_enable)
-		ccfg |= NXGE_CLASS_TCAM_LOOKUP;
-	if (cfg.use_ip_daddr)
-		ccfg |= NXGE_CLASS_TCAM_USE_SRC_ADDR;
-	*class_config = ccfg;
-	NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
-			" ==> nxge_cfg_tcam_ip_class %x", ccfg));
-	return (NXGE_OK);
-}
-
-static nxge_status_t
-nxge_cfg_tcam_ip_class(p_nxge_t nxgep, tcam_class_t class,
-	uint32_t class_config)
-{
-	npi_status_t rs = NPI_SUCCESS;
-	tcam_key_cfg_t cfg;
-	npi_handle_t handle;
-	p_nxge_class_pt_cfg_t p_class_cfgp;
-
-	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "==> nxge_cfg_tcam_ip_class"));
-
-	p_class_cfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config;
-	p_class_cfgp->class_cfg[class] = class_config;
-
-	bzero(&cfg, sizeof (tcam_key_cfg_t));
-	handle = nxgep->npi_reg_handle;
-	cfg.discard = 0;
-	cfg.lookup_enable = 0;
-	cfg.use_ip_daddr = 0;
-	if (class_config & NXGE_CLASS_DISCARD)
-		cfg.discard = 1;
-	if (class_config & NXGE_CLASS_TCAM_LOOKUP)
-		cfg.lookup_enable = 1;
-	if (class_config & NXGE_CLASS_TCAM_USE_SRC_ADDR)
-		cfg.use_ip_daddr = 1;
-
-	rs = npi_fflp_cfg_ip_cls_tcam_key(handle, class, &cfg);
-	if (rs & NPI_FFLP_ERROR) {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, " nxge_cfg_tcam_ip_class"
-			" opt %x for class %d failed ",
-			class_config, class));
-		return (NXGE_ERROR | rs);
-	}
-	return (NXGE_OK);
-}
-
-nxge_status_t
-nxge_fflp_set_hash1(p_nxge_t nxgep, uint32_t h1)
-{
-	npi_status_t rs = NPI_SUCCESS;
-	npi_handle_t handle;
-	p_nxge_class_pt_cfg_t p_class_cfgp;
-
-	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, " ==> nxge_fflp_init_h1"));
-	p_class_cfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config;
-	p_class_cfgp->init_h1 = h1;
-	handle = nxgep->npi_reg_handle;
-	rs = npi_fflp_cfg_hash_h1poly(handle, h1);
-	if (rs & NPI_FFLP_ERROR) {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			" nxge_fflp_init_h1 %x failed ", h1));
-		return (NXGE_ERROR | rs);
-	}
-	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, " <== nxge_fflp_init_h1"));
-	return (NXGE_OK);
-}
-
-nxge_status_t
-nxge_fflp_set_hash2(p_nxge_t nxgep, uint16_t h2)
-{
-	npi_status_t rs = NPI_SUCCESS;
-	npi_handle_t handle;
-	p_nxge_class_pt_cfg_t p_class_cfgp;
-
-	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, " ==> nxge_fflp_init_h2"));
-	p_class_cfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config;
-	p_class_cfgp->init_h2 = h2;
-
-	handle = nxgep->npi_reg_handle;
-	rs = npi_fflp_cfg_hash_h2poly(handle, h2);
-	if (rs & NPI_FFLP_ERROR) {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			" nxge_fflp_init_h2 %x failed ", h2));
-		return (NXGE_ERROR | rs);
-	}
-	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, " <== nxge_fflp_init_h2"));
-	return (NXGE_OK);
-}
-
-nxge_status_t
-nxge_classify_init_sw(p_nxge_t nxgep)
-{
-	int alloc_size;
-	nxge_classify_t *classify_ptr;
-
-	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "==> nxge_classify_init_sw"));
-	classify_ptr = &nxgep->classifier;
-
-	if (classify_ptr->state & NXGE_FFLP_SW_INIT) {
-		NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
-			"nxge_classify_init_sw already init"));
-		return (NXGE_OK);
-	}
-	/* Init SW structures */
-	classify_ptr->tcam_size = TCAM_NIU_TCAM_MAX_ENTRY;
-
-	/* init data structures, based on HW type */
-	if (nxgep->niu_type == NEPTUNE) {
-		classify_ptr->tcam_size = TCAM_NXGE_TCAM_MAX_ENTRY;
-		/*
-		 * check if fcram based classification is required and init the
-		 * flow storage
-		 */
-	}
-	alloc_size = sizeof (tcam_flow_spec_t) * classify_ptr->tcam_size;
-	classify_ptr->tcam_entries = KMEM_ZALLOC(alloc_size, NULL);
-
-	/* Init defaults */
-	/*
-	 * add hacks required for HW shortcomings for example, code to handle
-	 * fragmented packets
-	 */
-	nxge_init_h1_table();
-	nxge_crc_ccitt_init();
-	nxgep->classifier.tcam_location = nxgep->function_num;
-	nxgep->classifier.fragment_bug = 1;
-	classify_ptr->state |= NXGE_FFLP_SW_INIT;
-
-	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "<== nxge_classify_init_sw"));
-	return (NXGE_OK);
-}
-
-nxge_status_t
-nxge_classify_exit_sw(p_nxge_t nxgep)
-{
-	int alloc_size;
-	nxge_classify_t *classify_ptr;
-	int fsize;
-
-	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "==> nxge_classify_exit_sw"));
-	classify_ptr = &nxgep->classifier;
-
-	fsize = sizeof (tcam_flow_spec_t);
-	if (classify_ptr->tcam_entries) {
-		alloc_size = fsize * classify_ptr->tcam_size;
-		KMEM_FREE((void *) classify_ptr->tcam_entries, alloc_size);
-	}
-	nxgep->classifier.state = NULL;
-
-	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "<== nxge_classify_exit_sw"));
-	return (NXGE_OK);
-}
-
-/*
- * Figures out the location where the TCAM entry is
- * to be inserted.
- *
- * The current implementation is just a place holder and it
- * returns the next tcam location.
- * The real location determining algorithm would consider
- * the priority, partition etc ... before deciding which
- * location to insert.
- *
- */
-
-/* ARGSUSED */
-static tcam_location_t
-nxge_get_tcam_location(p_nxge_t nxgep, uint8_t class)
-{
-	tcam_location_t location;
-
-	location = nxgep->classifier.tcam_location;
-	nxgep->classifier.tcam_location = (location + nxgep->nports) %
-		nxgep->classifier.tcam_size;
-	NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
-		"nxge_get_tcam_location: location %d next %d \n",
-		location, nxgep->classifier.tcam_location));
-	return (location);
-}
-
-/*
- * Figures out the RDC Group for the entry
- *
- * The current implementation is just a place holder and it
- * returns 0.
- * The real location determining algorithm would consider
- * the partition etc ... before deciding w
- *
- */
-
-/* ARGSUSED */
-static uint8_t
-nxge_get_rdc_group(p_nxge_t nxgep, uint8_t class, intptr_t cookie)
-{
-	int use_port_rdc_grp = 0;
-	uint8_t rdc_grp = 0;
-	p_nxge_dma_pt_cfg_t p_dma_cfgp;
-	p_nxge_hw_pt_cfg_t p_cfgp;
-	p_nxge_rdc_grp_t rdc_grp_p;
-
-	p_dma_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
-	p_cfgp = (p_nxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config;
-	rdc_grp_p = &p_dma_cfgp->rdc_grps[use_port_rdc_grp];
-	rdc_grp = p_cfgp->start_rdc_grpid;
-
-	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-		"nxge_get_rdc_group: grp 0x%x real_grp %x grpp $%p\n",
-		cookie, rdc_grp, rdc_grp_p));
-	return (rdc_grp);
-}
-
-/* ARGSUSED */
-static uint8_t
-nxge_get_rdc_offset(p_nxge_t nxgep, uint8_t class, intptr_t cookie)
-{
-	return ((uint8_t)cookie);
-}
-
-/* ARGSUSED */
-static void
-nxge_fill_tcam_entry_udp(p_nxge_t nxgep, flow_spec_t *flow_spec,
-	tcam_entry_t *tcam_ptr)
-{
-	udpip4_spec_t *fspec_key;
-	udpip4_spec_t *fspec_mask;
-
-	fspec_key = (udpip4_spec_t *)&flow_spec->uh.udpip4spec;
-	fspec_mask = (udpip4_spec_t *)&flow_spec->um.udpip4spec;
-	TCAM_IPV4_ADDR(tcam_ptr->ip4_dest_key, fspec_key->ip4dst);
-	TCAM_IPV4_ADDR(tcam_ptr->ip4_dest_mask, fspec_mask->ip4dst);
-	TCAM_IPV4_ADDR(tcam_ptr->ip4_src_key, fspec_key->ip4src);
-	TCAM_IPV4_ADDR(tcam_ptr->ip4_src_mask, fspec_mask->ip4src);
-	TCAM_IP_PORTS(tcam_ptr->ip4_port_key,
-		fspec_key->pdst, fspec_key->psrc);
-	TCAM_IP_PORTS(tcam_ptr->ip4_port_mask,
-		fspec_mask->pdst, fspec_mask->psrc);
-	TCAM_IP_CLASS(tcam_ptr->ip4_class_key,
-		tcam_ptr->ip4_class_mask,
-		TCAM_CLASS_UDP_IPV4);
-	TCAM_IP_PROTO(tcam_ptr->ip4_proto_key,
-		tcam_ptr->ip4_proto_mask,
-		IPPROTO_UDP);
-}
-
-static void
-nxge_fill_tcam_entry_udp_ipv6(p_nxge_t nxgep, flow_spec_t *flow_spec,
-	tcam_entry_t *tcam_ptr)
-{
-	udpip6_spec_t *fspec_key;
-	udpip6_spec_t *fspec_mask;
-	p_nxge_class_pt_cfg_t p_class_cfgp;
-
-	fspec_key = (udpip6_spec_t *)&flow_spec->uh.udpip6spec;
-	fspec_mask = (udpip6_spec_t *)&flow_spec->um.udpip6spec;
-	p_class_cfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config;
-	if (p_class_cfgp->class_cfg[TCAM_CLASS_UDP_IPV6] &
-			NXGE_CLASS_TCAM_USE_SRC_ADDR) {
-		TCAM_IPV6_ADDR(tcam_ptr->ip6_ip_addr_key, fspec_key->ip6src);
-		TCAM_IPV6_ADDR(tcam_ptr->ip6_ip_addr_mask, fspec_mask->ip6src);
-	} else {
-		TCAM_IPV6_ADDR(tcam_ptr->ip6_ip_addr_key, fspec_key->ip6dst);
-		TCAM_IPV6_ADDR(tcam_ptr->ip6_ip_addr_mask, fspec_mask->ip6dst);
-	}
-
-	TCAM_IP_CLASS(tcam_ptr->ip6_class_key,
-		tcam_ptr->ip6_class_mask, TCAM_CLASS_UDP_IPV6);
-	TCAM_IP_PROTO(tcam_ptr->ip6_nxt_hdr_key,
-		tcam_ptr->ip6_nxt_hdr_mask, IPPROTO_UDP);
-	TCAM_IP_PORTS(tcam_ptr->ip6_port_key,
-		fspec_key->pdst, fspec_key->psrc);
-	TCAM_IP_PORTS(tcam_ptr->ip6_port_mask,
-		fspec_mask->pdst, fspec_mask->psrc);
-}
-
-/* ARGSUSED */
-static void
-nxge_fill_tcam_entry_tcp(p_nxge_t nxgep, flow_spec_t *flow_spec,
-	tcam_entry_t *tcam_ptr)
-{
-	tcpip4_spec_t *fspec_key;
-	tcpip4_spec_t *fspec_mask;
-
-	fspec_key = (tcpip4_spec_t *)&flow_spec->uh.tcpip4spec;
-	fspec_mask = (tcpip4_spec_t *)&flow_spec->um.tcpip4spec;
-
-	TCAM_IPV4_ADDR(tcam_ptr->ip4_dest_key, fspec_key->ip4dst);
-	TCAM_IPV4_ADDR(tcam_ptr->ip4_dest_mask, fspec_mask->ip4dst);
-	TCAM_IPV4_ADDR(tcam_ptr->ip4_src_key, fspec_key->ip4src);
-	TCAM_IPV4_ADDR(tcam_ptr->ip4_src_mask, fspec_mask->ip4src);
-	TCAM_IP_PORTS(tcam_ptr->ip4_port_key,
-		fspec_key->pdst, fspec_key->psrc);
-	TCAM_IP_PORTS(tcam_ptr->ip4_port_mask,
-		fspec_mask->pdst, fspec_mask->psrc);
-	TCAM_IP_CLASS(tcam_ptr->ip4_class_key,
-		tcam_ptr->ip4_class_mask, TCAM_CLASS_TCP_IPV4);
-	TCAM_IP_PROTO(tcam_ptr->ip4_proto_key,
-		tcam_ptr->ip4_proto_mask, IPPROTO_TCP);
-}
-
-/* ARGSUSED */
-static void
-nxge_fill_tcam_entry_sctp(p_nxge_t nxgep, flow_spec_t *flow_spec,
-	tcam_entry_t *tcam_ptr)
-{
-	tcpip4_spec_t *fspec_key;
-	tcpip4_spec_t *fspec_mask;
-
-	fspec_key = (tcpip4_spec_t *)&flow_spec->uh.tcpip4spec;
-	fspec_mask = (tcpip4_spec_t *)&flow_spec->um.tcpip4spec;
-
-	TCAM_IPV4_ADDR(tcam_ptr->ip4_dest_key, fspec_key->ip4dst);
-	TCAM_IPV4_ADDR(tcam_ptr->ip4_dest_mask, fspec_mask->ip4dst);
-	TCAM_IPV4_ADDR(tcam_ptr->ip4_src_key, fspec_key->ip4src);
-	TCAM_IPV4_ADDR(tcam_ptr->ip4_src_mask, fspec_mask->ip4src);
-	TCAM_IP_CLASS(tcam_ptr->ip4_class_key,
-		tcam_ptr->ip4_class_mask, TCAM_CLASS_SCTP_IPV4);
-	TCAM_IP_PROTO(tcam_ptr->ip4_proto_key,
-		tcam_ptr->ip4_proto_mask, IPPROTO_SCTP);
-	TCAM_IP_PORTS(tcam_ptr->ip4_port_key,
-		fspec_key->pdst, fspec_key->psrc);
-	TCAM_IP_PORTS(tcam_ptr->ip4_port_mask,
-		fspec_mask->pdst, fspec_mask->psrc);
-}
-
-static void
-nxge_fill_tcam_entry_tcp_ipv6(p_nxge_t nxgep, flow_spec_t *flow_spec,
-	tcam_entry_t *tcam_ptr)
-{
-	tcpip6_spec_t *fspec_key;
-	tcpip6_spec_t *fspec_mask;
-	p_nxge_class_pt_cfg_t p_class_cfgp;
-
-	fspec_key = (tcpip6_spec_t *)&flow_spec->uh.tcpip6spec;
-	fspec_mask = (tcpip6_spec_t *)&flow_spec->um.tcpip6spec;
-
-	p_class_cfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config;
-	if (p_class_cfgp->class_cfg[TCAM_CLASS_UDP_IPV6] &
-			NXGE_CLASS_TCAM_USE_SRC_ADDR) {
-		TCAM_IPV6_ADDR(tcam_ptr->ip6_ip_addr_key, fspec_key->ip6src);
-		TCAM_IPV6_ADDR(tcam_ptr->ip6_ip_addr_mask, fspec_mask->ip6src);
-	} else {
-		TCAM_IPV6_ADDR(tcam_ptr->ip6_ip_addr_key, fspec_key->ip6dst);
-		TCAM_IPV6_ADDR(tcam_ptr->ip6_ip_addr_mask, fspec_mask->ip6dst);
-	}
-
-	TCAM_IP_CLASS(tcam_ptr->ip6_class_key,
-		tcam_ptr->ip6_class_mask, TCAM_CLASS_TCP_IPV6);
-	TCAM_IP_PROTO(tcam_ptr->ip6_nxt_hdr_key,
-		tcam_ptr->ip6_nxt_hdr_mask, IPPROTO_TCP);
-	TCAM_IP_PORTS(tcam_ptr->ip6_port_key,
-		fspec_key->pdst, fspec_key->psrc);
-	TCAM_IP_PORTS(tcam_ptr->ip6_port_mask,
-		fspec_mask->pdst, fspec_mask->psrc);
-}
-
-static void
-nxge_fill_tcam_entry_sctp_ipv6(p_nxge_t nxgep, flow_spec_t *flow_spec,
-	tcam_entry_t *tcam_ptr)
-{
-	tcpip6_spec_t *fspec_key;
-	tcpip6_spec_t *fspec_mask;
-	p_nxge_class_pt_cfg_t p_class_cfgp;
-
-	fspec_key = (tcpip6_spec_t *)&flow_spec->uh.tcpip6spec;
-	fspec_mask = (tcpip6_spec_t *)&flow_spec->um.tcpip6spec;
-	p_class_cfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config;
-
-	if (p_class_cfgp->class_cfg[TCAM_CLASS_UDP_IPV6] &
-			NXGE_CLASS_TCAM_USE_SRC_ADDR) {
-		TCAM_IPV6_ADDR(tcam_ptr->ip6_ip_addr_key, fspec_key->ip6src);
-		TCAM_IPV6_ADDR(tcam_ptr->ip6_ip_addr_mask, fspec_mask->ip6src);
-	} else {
-		TCAM_IPV6_ADDR(tcam_ptr->ip6_ip_addr_key, fspec_key->ip6dst);
-		TCAM_IPV6_ADDR(tcam_ptr->ip6_ip_addr_mask, fspec_mask->ip6dst);
-	}
-
-	TCAM_IP_CLASS(tcam_ptr->ip6_class_key,
-		tcam_ptr->ip6_class_mask, TCAM_CLASS_SCTP_IPV6);
-	TCAM_IP_PROTO(tcam_ptr->ip6_nxt_hdr_key,
-		tcam_ptr->ip6_nxt_hdr_mask, IPPROTO_SCTP);
-	TCAM_IP_PORTS(tcam_ptr->ip6_port_key,
-		fspec_key->pdst, fspec_key->psrc);
-	TCAM_IP_PORTS(tcam_ptr->ip6_port_mask,
-		fspec_mask->pdst, fspec_mask->psrc);
-}
-
-nxge_status_t
-nxge_flow_get_hash(p_nxge_t nxgep, flow_resource_t *flow_res,
-	uint32_t *H1, uint16_t *H2)
-{
-	flow_spec_t *flow_spec;
-	uint32_t class_cfg;
-	flow_template_t ft;
-	p_nxge_class_pt_cfg_t p_class_cfgp;
-
-	int ft_size = sizeof (flow_template_t);
-
-	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "==> nxge_flow_get_hash"));
-
-	flow_spec = (flow_spec_t *)&flow_res->flow_spec;
-	bzero((char *)&ft, ft_size);
-	p_class_cfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config;
-
-	switch (flow_spec->flow_type) {
-	case FSPEC_TCPIP4:
-		class_cfg = p_class_cfgp->class_cfg[TCAM_CLASS_TCP_IPV4];
-		if (class_cfg & NXGE_CLASS_FLOW_USE_PROTO)
-			ft.ip_proto = IPPROTO_TCP;
-		if (class_cfg & NXGE_CLASS_FLOW_USE_IPSRC)
-			ft.ip4_saddr = flow_res->flow_spec.uh.tcpip4spec.ip4src;
-		if (class_cfg & NXGE_CLASS_FLOW_USE_IPDST)
-			ft.ip4_daddr = flow_res->flow_spec.uh.tcpip4spec.ip4dst;
-		if (class_cfg & NXGE_CLASS_FLOW_USE_SRC_PORT)
-			ft.ip_src_port = flow_res->flow_spec.uh.tcpip4spec.psrc;
-		if (class_cfg & NXGE_CLASS_FLOW_USE_DST_PORT)
-			ft.ip_dst_port = flow_res->flow_spec.uh.tcpip4spec.pdst;
-		break;
-
-	case FSPEC_UDPIP4:
-		class_cfg = p_class_cfgp->class_cfg[TCAM_CLASS_UDP_IPV4];
-		if (class_cfg & NXGE_CLASS_FLOW_USE_PROTO)
-			ft.ip_proto = IPPROTO_UDP;
-		if (class_cfg & NXGE_CLASS_FLOW_USE_IPSRC)
-			ft.ip4_saddr = flow_res->flow_spec.uh.udpip4spec.ip4src;
-		if (class_cfg & NXGE_CLASS_FLOW_USE_IPDST)
-			ft.ip4_daddr = flow_res->flow_spec.uh.udpip4spec.ip4dst;
-		if (class_cfg & NXGE_CLASS_FLOW_USE_SRC_PORT)
-			ft.ip_src_port = flow_res->flow_spec.uh.udpip4spec.psrc;
-		if (class_cfg & NXGE_CLASS_FLOW_USE_DST_PORT)
-			ft.ip_dst_port = flow_res->flow_spec.uh.udpip4spec.pdst;
-		break;
-
-	default:
-		return (NXGE_ERROR);
-	}
-
-	*H1 = nxge_compute_h1(p_class_cfgp->init_h1,
-		(uint32_t *)&ft, ft_size) & 0xfffff;
-	*H2 = nxge_compute_h2(p_class_cfgp->init_h2,
-		(uint8_t *)&ft, ft_size);
-
-	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "<== nxge_flow_get_hash"));
-	return (NXGE_OK);
-}
-
-nxge_status_t
-nxge_add_fcram_entry(p_nxge_t nxgep, flow_resource_t *flow_res)
-{
-	uint32_t H1;
-	uint16_t H2;
-	nxge_status_t status = NXGE_OK;
-
-	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "==> nxge_add_fcram_entry"));
-	status = nxge_flow_get_hash(nxgep, flow_res, &H1, &H2);
-	if (status != NXGE_OK) {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			" nxge_add_fcram_entry failed "));
-		return (status);
-	}
-
-	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "<== nxge_add_fcram_entry"));
-	return (NXGE_OK);
-}
-
-/*
- * Already decided this flow goes into the tcam
- */
-
-nxge_status_t
-nxge_add_tcam_entry(p_nxge_t nxgep, flow_resource_t *flow_res)
-{
-	npi_handle_t handle;
-	intptr_t channel_cookie;
-	intptr_t flow_cookie;
-	flow_spec_t *flow_spec;
-	npi_status_t rs = NPI_SUCCESS;
-	tcam_entry_t tcam_ptr;
-	tcam_location_t location = 0;
-	uint8_t offset, rdc_grp;
-	p_nxge_hw_list_t hw_p;
-
-	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "==> nxge_add_tcam_entry"));
-	handle = nxgep->npi_reg_handle;
-
-	bzero((void *)&tcam_ptr, sizeof (tcam_entry_t));
-	flow_spec = (flow_spec_t *)&flow_res->flow_spec;
-	flow_cookie = flow_res->flow_cookie;
-	channel_cookie = flow_res->channel_cookie;
-
-	switch (flow_spec->flow_type) {
-	case FSPEC_TCPIP4:
-		nxge_fill_tcam_entry_tcp(nxgep, flow_spec, &tcam_ptr);
-		location = nxge_get_tcam_location(nxgep,
-			TCAM_CLASS_TCP_IPV4);
-		rdc_grp = nxge_get_rdc_group(nxgep, TCAM_CLASS_TCP_IPV4,
-			flow_cookie);
-		offset = nxge_get_rdc_offset(nxgep, TCAM_CLASS_TCP_IPV4,
-			channel_cookie);
-		break;
-
-	case FSPEC_UDPIP4:
-		nxge_fill_tcam_entry_udp(nxgep, flow_spec, &tcam_ptr);
-		location = nxge_get_tcam_location(nxgep,
-			TCAM_CLASS_UDP_IPV4);
-		rdc_grp = nxge_get_rdc_group(nxgep,
-			TCAM_CLASS_UDP_IPV4,
-			flow_cookie);
-		offset = nxge_get_rdc_offset(nxgep,
-			TCAM_CLASS_UDP_IPV4,
-			channel_cookie);
-		break;
-
-	case FSPEC_TCPIP6:
-		nxge_fill_tcam_entry_tcp_ipv6(nxgep,
-			flow_spec, &tcam_ptr);
-		location = nxge_get_tcam_location(nxgep,
-			TCAM_CLASS_TCP_IPV6);
-		rdc_grp = nxge_get_rdc_group(nxgep, TCAM_CLASS_TCP_IPV6,
-			flow_cookie);
-		offset = nxge_get_rdc_offset(nxgep, TCAM_CLASS_TCP_IPV6,
-			channel_cookie);
-		break;
-
-	case FSPEC_UDPIP6:
-		nxge_fill_tcam_entry_udp_ipv6(nxgep,
-			flow_spec, &tcam_ptr);
-		location = nxge_get_tcam_location(nxgep,
-			TCAM_CLASS_UDP_IPV6);
-		rdc_grp = nxge_get_rdc_group(nxgep,
-			TCAM_CLASS_UDP_IPV6,
-			channel_cookie);
-		offset = nxge_get_rdc_offset(nxgep,
-			TCAM_CLASS_UDP_IPV6,
-			flow_cookie);
-		break;
-
-	case FSPEC_SCTPIP4:
-		nxge_fill_tcam_entry_sctp(nxgep, flow_spec, &tcam_ptr);
-		location = nxge_get_tcam_location(nxgep,
-			TCAM_CLASS_SCTP_IPV4);
-		rdc_grp = nxge_get_rdc_group(nxgep,
-			TCAM_CLASS_SCTP_IPV4,
-			channel_cookie);
-		offset = nxge_get_rdc_offset(nxgep,
-			TCAM_CLASS_SCTP_IPV4,
-			flow_cookie);
-		break;
-
-	case FSPEC_SCTPIP6:
-		nxge_fill_tcam_entry_sctp_ipv6(nxgep,
-			flow_spec, &tcam_ptr);
-		location = nxge_get_tcam_location(nxgep,
-			TCAM_CLASS_SCTP_IPV4);
-		rdc_grp = nxge_get_rdc_group(nxgep,
-			TCAM_CLASS_SCTP_IPV6,
-			channel_cookie);
-		offset = nxge_get_rdc_offset(nxgep,
-			TCAM_CLASS_SCTP_IPV6,
-			flow_cookie);
-		break;
-
-	default:
-		return (NXGE_OK);
-	}
-
-	NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
-		" nxge_add_tcam_entry write"
-		" for location %d offset %d", location, offset));
-
-	if ((hw_p = nxgep->nxge_hw_p) == NULL) {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			" nxge_add_tcam_entry: common hardware not set",
-			nxgep->niu_type));
-		return (NXGE_ERROR);
-	}
-
-	MUTEX_ENTER(&hw_p->nxge_tcam_lock);
-	rs = npi_fflp_tcam_entry_write(handle, location, &tcam_ptr);
-
-	if (rs & NPI_FFLP_ERROR) {
-		MUTEX_EXIT(&hw_p->nxge_tcam_lock);
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			" nxge_add_tcam_entry write"
-			" failed for location %d", location));
-		return (NXGE_ERROR | rs);
-	}
-
-	tcam_ptr.match_action.value = 0;
-	tcam_ptr.match_action.bits.ldw.rdctbl = rdc_grp;
-	tcam_ptr.match_action.bits.ldw.offset = offset;
-	tcam_ptr.match_action.bits.ldw.tres =
-		TRES_TERM_OVRD_L2RDC;
-	if (channel_cookie == -1)
-		tcam_ptr.match_action.bits.ldw.disc = 1;
-	rs = npi_fflp_tcam_asc_ram_entry_write(handle,
-		location, tcam_ptr.match_action.value);
-	if (rs & NPI_FFLP_ERROR) {
-		MUTEX_EXIT(&hw_p->nxge_tcam_lock);
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			" nxge_add_tcam_entry write"
-			" failed for ASC RAM location %d", location));
-		return (NXGE_ERROR | rs);
-	}
-	bcopy((void *) &tcam_ptr,
-		(void *) &nxgep->classifier.tcam_entries[location].tce,
-		sizeof (tcam_entry_t));
-
-	MUTEX_EXIT(&hw_p->nxge_tcam_lock);
-	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "<== nxge_add_tcam_entry"));
-	return (NXGE_OK);
-}
-
-static nxge_status_t
-nxge_tcam_handle_ip_fragment(p_nxge_t nxgep)
-{
-	tcam_entry_t tcam_ptr;
-	tcam_location_t location;
-	uint8_t class;
-	uint32_t class_config;
-	npi_handle_t handle;
-	npi_status_t rs = NPI_SUCCESS;
-	p_nxge_hw_list_t hw_p;
-	nxge_status_t status = NXGE_OK;
-
-	handle = nxgep->npi_reg_handle;
-	class = 0;
-	bzero((void *)&tcam_ptr, sizeof (tcam_entry_t));
-	tcam_ptr.ip4_noport_key = 1;
-	tcam_ptr.ip4_noport_mask = 1;
-	location = nxgep->function_num;
-	nxgep->classifier.fragment_bug_location = location;
-
-	if ((hw_p = nxgep->nxge_hw_p) == NULL) {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			" nxge_tcam_handle_ip_fragment:"
-			" common hardware not set",
-			nxgep->niu_type));
-		return (NXGE_ERROR);
-	}
-	MUTEX_ENTER(&hw_p->nxge_tcam_lock);
-	rs = npi_fflp_tcam_entry_write(handle,
-		location, &tcam_ptr);
-
-	if (rs & NPI_FFLP_ERROR) {
-		MUTEX_EXIT(&hw_p->nxge_tcam_lock);
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			" nxge_tcam_handle_ip_fragment "
-			" tcam_entry write"
-			" failed for location %d", location));
-		return (NXGE_ERROR);
-	}
-	tcam_ptr.match_action.bits.ldw.rdctbl = nxgep->class_config.mac_rdcgrp;
-	tcam_ptr.match_action.bits.ldw.offset = 0;	/* use the default */
-	tcam_ptr.match_action.bits.ldw.tres =
-		TRES_TERM_USE_OFFSET;
-	rs = npi_fflp_tcam_asc_ram_entry_write(handle,
-		location, tcam_ptr.match_action.value);
-
-	if (rs & NPI_FFLP_ERROR) {
-		MUTEX_EXIT(&hw_p->nxge_tcam_lock);
-		NXGE_DEBUG_MSG((nxgep,
-			FFLP_CTL,
-			" nxge_tcam_handle_ip_fragment "
-			" tcam_entry write"
-			" failed for ASC RAM location %d", location));
-		return (NXGE_ERROR);
-	}
-	bcopy((void *) &tcam_ptr,
-		(void *) &nxgep->classifier.tcam_entries[location].tce,
-		sizeof (tcam_entry_t));
-	for (class = TCAM_CLASS_TCP_IPV4;
-		class <= TCAM_CLASS_SCTP_IPV6; class++) {
-		class_config = nxgep->class_config.class_cfg[class];
-		class_config |= NXGE_CLASS_TCAM_LOOKUP;
-		status = nxge_fflp_ip_class_config(nxgep, class, class_config);
-
-		if (status & NPI_FFLP_ERROR) {
-			MUTEX_EXIT(&hw_p->nxge_tcam_lock);
-			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-				"nxge_tcam_handle_ip_fragment "
-				"nxge_fflp_ip_class_config failed "
-				" class %d config %x ", class, class_config));
-			return (NXGE_ERROR);
-		}
-	}
-
-	rs = npi_fflp_cfg_tcam_enable(handle);
-	if (rs & NPI_FFLP_ERROR) {
-		MUTEX_EXIT(&hw_p->nxge_tcam_lock);
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"nxge_tcam_handle_ip_fragment "
-			" nxge_fflp_config_tcam_enable failed"));
-		return (NXGE_ERROR);
-	}
-	MUTEX_EXIT(&hw_p->nxge_tcam_lock);
-	return (NXGE_OK);
-}
-
-/* ARGSUSED */
-static int
-nxge_flow_need_hash_lookup(p_nxge_t nxgep, flow_resource_t *flow_res)
-{
-	return (0);
-}
-
-nxge_status_t
-nxge_add_flow(p_nxge_t nxgep, flow_resource_t *flow_res)
-{
-
-	int insert_hash = 0;
-	nxge_status_t status = NXGE_OK;
-
-	if (nxgep->niu_type == NEPTUNE) {
-		/* determine whether to do TCAM or Hash flow */
-		insert_hash = nxge_flow_need_hash_lookup(nxgep, flow_res);
-	}
-	if (insert_hash) {
-		status = nxge_add_fcram_entry(nxgep, flow_res);
-	} else {
-		status = nxge_add_tcam_entry(nxgep, flow_res);
-	}
-	return (status);
-}
-
-void
-nxge_put_tcam(p_nxge_t nxgep, p_mblk_t mp)
-{
-	flow_resource_t *fs;
-
-	fs = (flow_resource_t *)mp->b_rptr;
-	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-		"nxge_put_tcam addr fs $%p  type %x offset %x",
-		fs, fs->flow_spec.flow_type, fs->channel_cookie));
-	(void) nxge_add_tcam_entry(nxgep, fs);
-}
-
-nxge_status_t
-nxge_fflp_config_tcam_enable(p_nxge_t nxgep)
-{
-	npi_handle_t handle = nxgep->npi_reg_handle;
-	npi_status_t rs = NPI_SUCCESS;
-
-	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, " ==> nxge_fflp_config_tcam_enable"));
-	rs = npi_fflp_cfg_tcam_enable(handle);
-	if (rs & NPI_FFLP_ERROR) {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			" nxge_fflp_config_tcam_enable failed"));
-		return (NXGE_ERROR | rs);
-	}
-	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, " <== nxge_fflp_config_tcam_enable"));
-	return (NXGE_OK);
-}
-
-nxge_status_t
-nxge_fflp_config_tcam_disable(p_nxge_t nxgep)
-{
-	npi_handle_t handle = nxgep->npi_reg_handle;
-	npi_status_t rs = NPI_SUCCESS;
-
-	NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
-		" ==> nxge_fflp_config_tcam_disable"));
-	rs = npi_fflp_cfg_tcam_disable(handle);
-	if (rs & NPI_FFLP_ERROR) {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-				" nxge_fflp_config_tcam_disable failed"));
-		return (NXGE_ERROR | rs);
-	}
-	NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
-		" <== nxge_fflp_config_tcam_disable"));
-	return (NXGE_OK);
-}
-
-nxge_status_t
-nxge_fflp_config_hash_lookup_enable(p_nxge_t nxgep)
-{
-	npi_handle_t handle = nxgep->npi_reg_handle;
-	npi_status_t rs = NPI_SUCCESS;
-	p_nxge_dma_pt_cfg_t p_dma_cfgp;
-	p_nxge_hw_pt_cfg_t p_cfgp;
-	uint8_t partition;
-
-	NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
-		" ==> nxge_fflp_config_hash_lookup_enable"));
-	p_dma_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
-	p_cfgp = (p_nxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config;
-
-	for (partition = p_cfgp->start_rdc_grpid;
-		partition < p_cfgp->max_rdc_grpids; partition++) {
-		rs = npi_fflp_cfg_fcram_partition_enable(handle, partition);
-		if (rs != NPI_SUCCESS) {
-			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-				" nxge_fflp_config_hash_lookup_enable"
-				"failed FCRAM partition"
-				" enable for partition %d ", partition));
-			return (NXGE_ERROR | rs);
-		}
-	}
-
-	NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
-		" <== nxge_fflp_config_hash_lookup_enable"));
-	return (NXGE_OK);
-}
-
-nxge_status_t
-nxge_fflp_config_hash_lookup_disable(p_nxge_t nxgep)
-{
-	npi_handle_t handle = nxgep->npi_reg_handle;
-	npi_status_t rs = NPI_SUCCESS;
-	p_nxge_dma_pt_cfg_t p_dma_cfgp;
-	p_nxge_hw_pt_cfg_t p_cfgp;
-	uint8_t partition;
-
-	NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
-		" ==> nxge_fflp_config_hash_lookup_disable"));
-	p_dma_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
-	p_cfgp = (p_nxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config;
-
-	for (partition = p_cfgp->start_rdc_grpid;
-		partition < p_cfgp->max_rdc_grpids; partition++) {
-		rs = npi_fflp_cfg_fcram_partition_disable(handle,
-			partition);
-		if (rs != NPI_SUCCESS) {
-			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-				" nxge_fflp_config_hash_lookup_disable"
-				" failed FCRAM partition"
-				" disable for partition %d ", partition));
-			return (NXGE_ERROR | rs);
-		}
-	}
-
-	NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
-		" <== nxge_fflp_config_hash_lookup_disable"));
-	return (NXGE_OK);
-}
-
-nxge_status_t
-nxge_fflp_config_llc_snap_enable(p_nxge_t nxgep)
-{
-	npi_handle_t handle = nxgep->npi_reg_handle;
-	npi_status_t rs = NPI_SUCCESS;
-
-	NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
-		" ==> nxge_fflp_config_llc_snap_enable"));
-	rs = npi_fflp_cfg_llcsnap_enable(handle);
-	if (rs & NPI_FFLP_ERROR) {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			" nxge_fflp_config_llc_snap_enable failed"));
-		return (NXGE_ERROR | rs);
-	}
-	NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
-		" <== nxge_fflp_config_llc_snap_enable"));
-	return (NXGE_OK);
-}
-
-nxge_status_t
-nxge_fflp_config_llc_snap_disable(p_nxge_t nxgep)
-{
-	npi_handle_t handle = nxgep->npi_reg_handle;
-	npi_status_t rs = NPI_SUCCESS;
-
-	NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
-		" ==> nxge_fflp_config_llc_snap_disable"));
-	rs = npi_fflp_cfg_llcsnap_disable(handle);
-	if (rs & NPI_FFLP_ERROR) {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			" nxge_fflp_config_llc_snap_disable failed"));
-		return (NXGE_ERROR | rs);
-	}
-	NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
-		" <== nxge_fflp_config_llc_snap_disable"));
-	return (NXGE_OK);
-}
-
-nxge_status_t
-nxge_fflp_ip_usr_class_config(p_nxge_t nxgep, tcam_class_t class,
-	uint32_t config)
-{
-	npi_status_t rs = NPI_SUCCESS;
-	npi_handle_t handle = nxgep->npi_reg_handle;
-	uint8_t tos, tos_mask, proto, ver = 0;
-	uint8_t class_enable = 0;
-
-	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "==> nxge_fflp_ip_usr_class_config"));
-
-	tos = (config & NXGE_CLASS_CFG_IP_TOS_MASK) >>
-		NXGE_CLASS_CFG_IP_TOS_SHIFT;
-	tos_mask = (config & NXGE_CLASS_CFG_IP_TOS_MASK_MASK) >>
-		NXGE_CLASS_CFG_IP_TOS_MASK_SHIFT;
-	proto = (config & NXGE_CLASS_CFG_IP_PROTO_MASK) >>
-		NXGE_CLASS_CFG_IP_PROTO_SHIFT;
-	if (config & NXGE_CLASS_CFG_IP_IPV6_MASK)
-		ver = 1;
-	if (config & NXGE_CLASS_CFG_IP_ENABLE_MASK)
-		class_enable = 1;
-	rs = npi_fflp_cfg_ip_usr_cls_set(handle, class, tos, tos_mask,
-		proto, ver);
-	if (rs & NPI_FFLP_ERROR) {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			" nxge_fflp_ip_usr_class_config"
-			" for class %d failed ", class));
-		return (NXGE_ERROR | rs);
-	}
-	if (class_enable)
-		rs = npi_fflp_cfg_ip_usr_cls_enable(handle, class);
-	else
-		rs = npi_fflp_cfg_ip_usr_cls_disable(handle, class);
-
-	if (rs & NPI_FFLP_ERROR) {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			" nxge_fflp_ip_usr_class_config"
-			" TCAM enable/disable for class %d failed ", class));
-		return (NXGE_ERROR | rs);
-	}
-	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "<== nxge_fflp_ip_usr_class_config"));
-	return (NXGE_OK);
-}
-
-nxge_status_t
-nxge_fflp_ip_class_config(p_nxge_t nxgep, tcam_class_t class, uint32_t config)
-{
-	uint32_t class_config;
-	nxge_status_t t_status = NXGE_OK;
-	nxge_status_t f_status = NXGE_OK;
-	p_nxge_class_pt_cfg_t p_class_cfgp;
-
-	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, " ==> nxge_fflp_ip_class_config"));
-
-	p_class_cfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config;
-	class_config = p_class_cfgp->class_cfg[class];
-
-	if (class_config != config) {
-		p_class_cfgp->class_cfg[class] = config;
-		class_config = config;
-	}
-
-	t_status = nxge_cfg_tcam_ip_class(nxgep, class, class_config);
-	f_status = nxge_cfg_ip_cls_flow_key(nxgep, class, class_config);
-
-	if (t_status & NPI_FFLP_ERROR) {
-		NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
-			" nxge_fflp_ip_class_config %x"
-			" for class %d tcam failed", config, class));
-		return (t_status);
-	}
-	if (f_status & NPI_FFLP_ERROR) {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			" nxge_fflp_ip_class_config %x"
-			" for class %d flow key failed", config, class));
-		return (f_status);
-	}
-	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "<== nxge_fflp_ip_class_config"));
-	return (NXGE_OK);
-}
-
-nxge_status_t
-nxge_fflp_ip_class_config_get(p_nxge_t nxgep, tcam_class_t class,
-	uint32_t *config)
-{
-	uint32_t t_class_config, f_class_config;
-	int t_status = NXGE_OK;
-	int f_status = NXGE_OK;
-
-	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, " ==> nxge_fflp_ip_class_config"));
-
-	t_class_config = f_class_config = 0;
-	t_status = nxge_cfg_tcam_ip_class_get(nxgep, class, &t_class_config);
-	f_status = nxge_cfg_ip_cls_flow_key_get(nxgep, class, &f_class_config);
-
-	if (t_status & NPI_FFLP_ERROR) {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			" nxge_fflp_ip_class_config_get  "
-			" for class %d tcam failed", class));
-		return (t_status);
-	}
-
-	if (f_status & NPI_FFLP_ERROR) {
-		NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
-			" nxge_fflp_ip_class_config_get  "
-			" for class %d flow key failed", class));
-		return (f_status);
-	}
-
-	NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
-		" nxge_fflp_ip_class_config tcam %x flow %x",
-		t_class_config, f_class_config));
-
-	*config = t_class_config | f_class_config;
-	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "<== nxge_fflp_ip_class_config_get"));
-	return (NXGE_OK);
-}
-
-nxge_status_t
-nxge_fflp_ip_class_config_all(p_nxge_t nxgep)
-{
-	uint32_t class_config;
-	tcam_class_t class;
-
-#ifdef	NXGE_DEBUG
-	int status = NXGE_OK;
-#endif
-
-	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "==> nxge_fflp_ip_class_config"));
-	for (class = TCAM_CLASS_TCP_IPV4;
-		class <= TCAM_CLASS_SCTP_IPV6; class++) {
-		class_config = nxgep->class_config.class_cfg[class];
-#ifndef	NXGE_DEBUG
-		(void) nxge_fflp_ip_class_config(nxgep, class, class_config);
-#else
-		status = nxge_fflp_ip_class_config(nxgep, class, class_config);
-		if (status & NPI_FFLP_ERROR) {
-			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-				"nxge_fflp_ip_class_config failed "
-				" class %d config %x ",
-				class, class_config));
-		}
-#endif
-	}
-	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "<== nxge_fflp_ip_class_config"));
-	return (NXGE_OK);
-}
-
-nxge_status_t
-nxge_fflp_config_vlan_table(p_nxge_t nxgep, uint16_t vlan_id)
-{
-	uint8_t port, rdc_grp;
-	npi_handle_t handle;
-	npi_status_t rs = NPI_SUCCESS;
-	uint8_t priority = 1;
-	p_nxge_mv_cfg_t vlan_table;
-	p_nxge_class_pt_cfg_t p_class_cfgp;
-	p_nxge_hw_list_t hw_p;
-
-	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "==> nxge_fflp_config_vlan_table"));
-	p_class_cfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config;
-	handle = nxgep->npi_reg_handle;
-	vlan_table = p_class_cfgp->vlan_tbl;
-	port = nxgep->function_num;
-
-	if (vlan_table[vlan_id].flag == 0) {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			" nxge_fflp_config_vlan_table"
-			" vlan id is not configured %d", vlan_id));
-		return (NXGE_ERROR);
-	}
-
-	if ((hw_p = nxgep->nxge_hw_p) == NULL) {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			" nxge_fflp_config_vlan_table:"
-			" common hardware not set", nxgep->niu_type));
-		return (NXGE_ERROR);
-	}
-	MUTEX_ENTER(&hw_p->nxge_vlan_lock);
-	rdc_grp = vlan_table[vlan_id].rdctbl;
-	rs = npi_fflp_cfg_enet_vlan_table_assoc(handle,
-		port, vlan_id,
-		rdc_grp, priority);
-
-	MUTEX_EXIT(&hw_p->nxge_vlan_lock);
-	if (rs & NPI_FFLP_ERROR) {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"nxge_fflp_config_vlan_table failed "
-			" Port %d vlan_id %d rdc_grp %d",
-			port, vlan_id, rdc_grp));
-		return (NXGE_ERROR | rs);
-	}
-
-	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "<== nxge_fflp_config_vlan_table"));
-	return (NXGE_OK);
-}
-
-nxge_status_t
-nxge_fflp_update_hw(p_nxge_t nxgep)
-{
-	nxge_status_t status = NXGE_OK;
-	p_nxge_param_t pa;
-	uint64_t cfgd_vlans;
-	uint64_t *val_ptr;
-	int i;
-	int num_macs;
-	uint8_t alt_mac;
-	nxge_param_map_t *p_map;
-	p_nxge_mv_cfg_t vlan_table;
-	p_nxge_class_pt_cfg_t p_class_cfgp;
-	p_nxge_dma_pt_cfg_t p_all_cfgp;
-	p_nxge_hw_pt_cfg_t p_cfgp;
-
-	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "==> nxge_fflp_update_hw"));
-
-	p_class_cfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config;
-	p_all_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
-	p_cfgp = (p_nxge_hw_pt_cfg_t)&p_all_cfgp->hw_config;
-
-	status = nxge_fflp_set_hash1(nxgep, p_class_cfgp->init_h1);
-	if (status != NXGE_OK) {
-		NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
-			"nxge_fflp_set_hash1 Failed"));
-		return (NXGE_ERROR);
-	}
-
-	status = nxge_fflp_set_hash2(nxgep, p_class_cfgp->init_h2);
-	if (status != NXGE_OK) {
-		NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
-			"nxge_fflp_set_hash2 Failed"));
-		return (NXGE_ERROR);
-	}
-	vlan_table = p_class_cfgp->vlan_tbl;
-
-	/* configure vlan tables */
-	pa = (p_nxge_param_t)&nxgep->param_arr[param_vlan_2rdc_grp];
-	val_ptr = (uint64_t *)pa->value;
-	cfgd_vlans = ((pa->type & NXGE_PARAM_ARRAY_CNT_MASK) >>
-		NXGE_PARAM_ARRAY_CNT_SHIFT);
-
-	for (i = 0; i < cfgd_vlans; i++) {
-		p_map = (nxge_param_map_t *)&val_ptr[i];
-		if (vlan_table[p_map->param_id].flag) {
-			status = nxge_fflp_config_vlan_table(nxgep,
-				p_map->param_id);
-			if (status != NXGE_OK) {
-				NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
-					"nxge_fflp_config_vlan_table Failed"));
-				return (NXGE_ERROR);
-			}
-		}
-	}
-
-	/* config MAC addresses */
-	num_macs = p_cfgp->max_macs;
-	pa = (p_nxge_param_t)&nxgep->param_arr[param_mac_2rdc_grp];
-	val_ptr = (uint64_t *)pa->value;
-
-	for (alt_mac = 0; alt_mac < num_macs; alt_mac++) {
-		if (p_class_cfgp->mac_host_info[alt_mac].flag) {
-			status = nxge_logical_mac_assign_rdc_table(nxgep,
-				alt_mac);
-			if (status != NXGE_OK) {
-				NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
-					"nxge_logical_mac_assign_rdc_table"
-					" Failed"));
-				return (NXGE_ERROR);
-			}
-		}
-	}
-
-	/* Config Hash values */
-	/* config classess */
-	status = nxge_fflp_ip_class_config_all(nxgep);
-	if (status != NXGE_OK) {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"nxge_fflp_ip_class_config_all Failed"));
-		return (NXGE_ERROR);
-	}
-	return (NXGE_OK);
-}
-
-nxge_status_t
-nxge_classify_init_hw(p_nxge_t nxgep)
-{
-	nxge_status_t status = NXGE_OK;
-
-	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "==> nxge_classify_init_hw"));
-
-	if (nxgep->classifier.state & NXGE_FFLP_HW_INIT) {
-		NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
-			"nxge_classify_init_hw already init"));
-		return (NXGE_OK);
-	}
-
-	/* Now do a real configuration */
-	status = nxge_fflp_update_hw(nxgep);
-	if (status != NXGE_OK) {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"nxge_fflp_update_hw failed"));
-		return (NXGE_ERROR);
-	}
-
-	/* Init RDC tables? ? who should do that? rxdma or fflp ? */
-	/* attach rdc table to the MAC port. */
-	status = nxge_main_mac_assign_rdc_table(nxgep);
-	if (status != NXGE_OK) {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-				"nxge_main_mac_assign_rdc_table failed"));
-		return (NXGE_ERROR);
-	}
-
-	status = nxge_alt_mcast_mac_assign_rdc_table(nxgep);
-	if (status != NXGE_OK) {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"nxge_multicast_mac_assign_rdc_table failed"));
-		return (NXGE_ERROR);
-	}
-
-	status = nxge_tcam_handle_ip_fragment(nxgep);
-	if (status != NXGE_OK) {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"nxge_tcam_handle_ip_fragment failed"));
-		return (NXGE_ERROR);
-	}
-
-	nxgep->classifier.state |= NXGE_FFLP_HW_INIT;
-	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "<== nxge_classify_init_hw"));
-	return (NXGE_OK);
-}
-
-nxge_status_t
-nxge_fflp_handle_sys_errors(p_nxge_t nxgep)
-{
-	npi_handle_t handle;
-	p_nxge_fflp_stats_t statsp;
-	uint8_t portn, rdc_grp;
-	p_nxge_dma_pt_cfg_t p_dma_cfgp;
-	p_nxge_hw_pt_cfg_t p_cfgp;
-	vlan_par_err_t vlan_err;
-	tcam_err_t tcam_err;
-	hash_lookup_err_log1_t fcram1_err;
-	hash_lookup_err_log2_t fcram2_err;
-	hash_tbl_data_log_t fcram_err;
-
-	handle = nxgep->npi_handle;
-	statsp = (p_nxge_fflp_stats_t)&nxgep->statsp->fflp_stats;
-	portn = nxgep->mac.portnum;
-
-	/*
-	 * need to read the fflp error registers to figure out what the error
-	 * is
-	 */
-	npi_fflp_vlan_error_get(handle, &vlan_err);
-	npi_fflp_tcam_error_get(handle, &tcam_err);
-
-	if (vlan_err.bits.ldw.m_err || vlan_err.bits.ldw.err) {
-		NXGE_ERROR_MSG((nxgep, FFLP_CTL,
-			" vlan table parity error on port %d"
-			" addr: 0x%x data: 0x%x",
-			portn, vlan_err.bits.ldw.addr,
-			vlan_err.bits.ldw.data));
-		statsp->vlan_parity_err++;
-
-		if (vlan_err.bits.ldw.m_err) {
-			NXGE_ERROR_MSG((nxgep, FFLP_CTL,
-				" vlan table multiple errors on port %d",
-				portn));
-		}
-		statsp->errlog.vlan = (uint32_t)vlan_err.value;
-		NXGE_FM_REPORT_ERROR(nxgep, NULL, NULL,
-			NXGE_FM_EREPORT_FFLP_VLAN_PAR_ERR);
-		npi_fflp_vlan_error_clear(handle);
-	}
-
-	if (tcam_err.bits.ldw.err) {
-		if (tcam_err.bits.ldw.p_ecc != 0) {
-			NXGE_ERROR_MSG((nxgep, FFLP_CTL,
-				" TCAM ECC error on port %d"
-				" TCAM entry: 0x%x syndrome: 0x%x",
-				portn, tcam_err.bits.ldw.addr,
-				tcam_err.bits.ldw.syndrome));
-			statsp->tcam_ecc_err++;
-		} else {
-			NXGE_ERROR_MSG((nxgep, FFLP_CTL,
-				" TCAM Parity error on port %d"
-				" addr: 0x%x parity value: 0x%x",
-				portn, tcam_err.bits.ldw.addr,
-				tcam_err.bits.ldw.syndrome));
-			statsp->tcam_parity_err++;
-		}
-
-		if (tcam_err.bits.ldw.mult) {
-			NXGE_ERROR_MSG((nxgep, FFLP_CTL,
-				" TCAM Multiple errors on port %d", portn));
-		} else {
-			NXGE_ERROR_MSG((nxgep, FFLP_CTL,
-					" TCAM PIO error on port %d",
-					portn));
-		}
-
-		statsp->errlog.tcam = (uint32_t)tcam_err.value;
-		NXGE_FM_REPORT_ERROR(nxgep, NULL, NULL,
-			NXGE_FM_EREPORT_FFLP_TCAM_ERR);
-		npi_fflp_tcam_error_clear(handle);
-	}
-
-	p_dma_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
-	p_cfgp = (p_nxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config;
-
-	for (rdc_grp = p_cfgp->start_rdc_grpid;
-		rdc_grp < p_cfgp->max_rdc_grpids; rdc_grp++) {
-		npi_fflp_fcram_error_get(handle, &fcram_err, rdc_grp);
-		if (fcram_err.bits.ldw.pio_err) {
-			NXGE_ERROR_MSG((nxgep, FFLP_CTL,
-				" FCRAM PIO ECC error on port %d"
-				" rdc group: %d Hash Table addr: 0x%x"
-				" syndrome: 0x%x",
-				portn, rdc_grp,
-				fcram_err.bits.ldw.fcram_addr,
-				fcram_err.bits.ldw.syndrome));
-			statsp->hash_pio_err[rdc_grp]++;
-			statsp->errlog.hash_pio[rdc_grp] =
-				(uint32_t)fcram_err.value;
-			NXGE_FM_REPORT_ERROR(nxgep, NULL, NULL,
-				NXGE_FM_EREPORT_FFLP_HASHT_DATA_ERR);
-			npi_fflp_fcram_error_clear(handle, rdc_grp);
-		}
-	}
-
-	npi_fflp_fcram_error_log1_get(handle, &fcram1_err);
-	if (fcram1_err.bits.ldw.ecc_err) {
-		char *multi_str = "";
-		char *multi_bit_str = "";
-
-		npi_fflp_fcram_error_log2_get(handle, &fcram2_err);
-		if (fcram1_err.bits.ldw.mult_lk) {
-			multi_str = "multiple";
-		}
-		if (fcram1_err.bits.ldw.mult_bit) {
-			multi_bit_str = "multiple bits";
-		}
-		NXGE_ERROR_MSG((nxgep, FFLP_CTL,
-			" FCRAM %s lookup %s ECC error on port %d"
-			" H1: 0x%x Subarea: 0x%x Syndrome: 0x%x",
-			multi_str, multi_bit_str, portn,
-			fcram2_err.bits.ldw.h1,
-			fcram2_err.bits.ldw.subarea,
-			fcram2_err.bits.ldw.syndrome));
-		NXGE_FM_REPORT_ERROR(nxgep, NULL, NULL,
-			NXGE_FM_EREPORT_FFLP_HASHT_LOOKUP_ERR);
-	}
-	statsp->errlog.hash_lookup1 = (uint32_t)fcram1_err.value;
-	statsp->errlog.hash_lookup2 = (uint32_t)fcram2_err.value;
-	return (NXGE_OK);
-}
--- a/usr/src/uts/sun4v/io/nxge/nxge_fflp_hash.c	Mon Mar 19 18:02:35 2007 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,375 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License (the "License").
- * You may not use this file except in compliance with the License.
- *
- * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
- * or http://www.opensolaris.org/os/licensing.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information: Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- */
-/*
- * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
- * Use is subject to license terms.
- */
-#pragma ident	"%Z%%M%	%I%	%E% SMI"
-
-#include <sys/types.h>
-#include <nxge_fflp_hash.h>
-
-static void nxge_crc32c_word(uint32_t *crcptr, const uint32_t *buf, int len);
-
-/*
- * The crc32c algorithms are taken from sctp_crc32 implementation
- * common/inet/sctp_crc32.{c,h}
- *
- */
-
-/*
- * Fast CRC32C calculation algorithm.  The basic idea is to look at it
- * four bytes (one word) at a time, using four tables.  The
- * standard algorithm in RFC 3309 uses one table.
- */
-
-/*
- * SCTP uses reflected/reverse polynomial CRC32 with generating
- * polynomial 0x1EDC6F41L
- */
-#define	SCTP_POLY 0x1EDC6F41L
-
-/* CRC-CCITT Polynomial */
-#define	CRC_CCITT_POLY 0x1021
-
-/* The four CRC32c tables. */
-static uint32_t crc32c_tab[4][256];
-
-/* The four CRC-CCITT tables. */
-static uint16_t crc_ccitt_tab[4][256];
-
-/* the four tables for H1 Computation */
-static uint32_t h1table[4][256];
-
-#define	CRC_32C_POLY 0x1EDC6F41L
-
-#define	COMPUTE_H1_BYTE(crc, data) \
-	(crc = (crc<<8)^h1table[0][((crc >> 24) ^data) & 0xff])
-
-static uint32_t
-reflect_32(uint32_t b)
-{
-	int i;
-	uint32_t rw = 0;
-
-	for (i = 0; i < 32; i++) {
-		if (b & 1) {
-			rw |= 1 << (31 - i);
-		}
-		b >>= 1;
-	}
-	return (rw);
-}
-
-static uint32_t
-flip32(uint32_t w)
-{
-	return (((w >> 24) | ((w >> 8) & 0xff00) |
-		((w << 8) & 0xff0000) | (w << 24)));
-}
-
-/*
- * reference crc-ccitt implementation
- */
-
-uint16_t
-crc_ccitt(uint16_t crcin, uint8_t data)
-{
-	uint16_t mcrc, crc = 0, bits = 0;
-
-	mcrc = (((crcin >> 8) ^ data) & 0xff) << 8;
-	for (bits = 0; bits < 8; bits++) {
-		crc = ((crc ^ mcrc) & 0x8000) ?
-			(crc << 1) ^ CRC_CCITT_POLY :
-			crc << 1;
-		mcrc <<= 1;
-	}
-	return ((crcin << 8) ^ crc);
-}
-
-/*
- * Initialize the crc32c tables.
- */
-
-void
-nxge_crc32c_init(void)
-{
-	uint32_t index, bit, byte, crc;
-
-	for (index = 0; index < 256; index++) {
-		crc = reflect_32(index);
-		for (byte = 0; byte < 4; byte++) {
-			for (bit = 0; bit < 8; bit++) {
-				crc = (crc & 0x80000000) ?
-					(crc << 1) ^ SCTP_POLY : crc << 1;
-			}
-#ifdef _BIG_ENDIAN
-			crc32c_tab[3 - byte][index] = flip32(reflect_32(crc));
-#else
-			crc32c_tab[byte][index] = reflect_32(crc);
-#endif
-		}
-	}
-}
-
-/*
- * Initialize the crc-ccitt tables.
- */
-
-void
-nxge_crc_ccitt_init(void)
-{
-	uint16_t crc;
-	uint16_t index, bit, byte;
-
-	for (index = 0; index < 256; index++) {
-		crc = index << 8;
-		for (byte = 0; byte < 4; byte++) {
-			for (bit = 0; bit < 8; bit++) {
-				crc = (crc & 0x8000) ?
-					(crc << 1) ^ CRC_CCITT_POLY : crc << 1;
-			}
-#ifdef _BIG_ENDIAN
-			crc_ccitt_tab[3 - byte][index] = crc;
-#else
-			crc_ccitt_tab[byte][index] = crc;
-#endif
-		}
-	}
-}
-
-/*
- * Lookup  the crc32c for a byte stream
- */
-
-static void
-nxge_crc32c_byte(uint32_t *crcptr, const uint8_t *buf, int len)
-{
-	uint32_t crc;
-	int i;
-
-	crc = *crcptr;
-	for (i = 0; i < len; i++) {
-#ifdef _BIG_ENDIAN
-		crc = (crc << 8) ^ crc32c_tab[3][buf[i] ^ (crc >> 24)];
-#else
-		crc = (crc >> 8) ^ crc32c_tab[0][buf[i] ^ (crc & 0xff)];
-#endif
-	}
-	*crcptr = crc;
-}
-
-/*
- * Lookup  the crc-ccitt for a byte stream
- */
-
-static void
-nxge_crc_ccitt_byte(uint16_t *crcptr, const uint8_t *buf, int len)
-{
-	uint16_t crc;
-	int i;
-
-	crc = *crcptr;
-	for (i = 0; i < len; i++) {
-
-#ifdef _BIG_ENDIAN
-		crc = (crc << 8) ^ crc_ccitt_tab[3][buf[i] ^ (crc >> 8)];
-#else
-		crc = (crc << 8) ^ crc_ccitt_tab[0][buf[i] ^ (crc >> 8)];
-#endif
-	}
-	*crcptr = crc;
-}
-
-/*
- * Lookup  the crc32c for a 32 bit word stream
- * Lookup is done fro the 4 bytes in parallel
- * from the tables computed earlier
- *
- */
-
-static void
-nxge_crc32c_word(uint32_t *crcptr, const uint32_t *buf, int len)
-{
-	uint32_t w, crc;
-	int i;
-
-	crc = *crcptr;
-	for (i = 0; i < len; i++) {
-		w = crc ^ buf[i];
-		crc = crc32c_tab[0][w >> 24] ^
-			crc32c_tab[1][(w >> 16) & 0xff] ^
-			crc32c_tab[2][(w >> 8) & 0xff] ^
-			crc32c_tab[3][w & 0xff];
-	}
-	*crcptr = crc;
-}
-
-/*
- * Lookup  the crc-ccitt for a stream of bytes
- *
- * Since the parallel lookup version doesn't work yet,
- * use the byte stream version (lookup crc for a byte
- * at a time
- *
- */
-
-uint16_t
-nxge_crc_ccitt(uint16_t crc16, const uint8_t *buf, int len)
-{
-	nxge_crc_ccitt_byte(&crc16, buf, len);
-	return (crc16);
-}
-
-/*
- * Lookup  the crc32c for a stream of bytes
- *
- * Tries to lookup the CRC on 4 byte words
- * If the buffer is not 4 byte aligned, first compute
- * with byte lookup until aligned. Then compute crc
- * for each 4 bytes. If there are bytes left at the end of
- * the buffer, then perform a byte lookup for the remaining bytes
- *
- *
- */
-
-uint32_t
-nxge_crc32c(uint32_t crc32, const uint8_t *buf, int len)
-{
-	int rem;
-
-	rem = 4 - ((uintptr_t)buf) & 3;
-	if (rem != 0) {
-		if (len < rem) {
-			rem = len;
-		}
-		nxge_crc32c_byte(&crc32, buf, rem);
-		buf = buf + rem;
-		len = len - rem;
-	}
-	if (len > 3) {
-		nxge_crc32c_word(&crc32, (const uint32_t *) buf, len / 4);
-	}
-	rem = len & 3;
-	if (rem != 0) {
-		nxge_crc32c_byte(&crc32, buf + len - rem, rem);
-	}
-	return (crc32);
-}
-
-void
-nxge_init_h1_table()
-{
-	uint32_t crc, bit, byte, index;
-
-	for (index = 0; index < 256; index++) {
-		crc = index << 24;
-		for (byte = 0; byte < 4; byte++) {
-			for (bit = 0; bit < 8; bit++) {
-				crc = ((crc & 0x80000000)) ?
-					(crc << 1) ^ CRC_32C_POLY : crc << 1;
-			}
-			h1table[byte][index] = crc;
-		}
-	}
-}
-
-/*
- * Reference Neptune H1 computation function
- *
- * It is a slightly modified implementation of
- * CRC-32C implementation
- */
-
-uint32_t
-nxge_compute_h1_serial(uint32_t init_value, uint32_t *flow, uint32_t len)
-{
-	int bit, byte;
-	uint32_t crc_h1 = init_value;
-	uint8_t *buf;
-
-	buf = (uint8_t *)flow;
-	for (byte = 0; byte < len; byte++) {
-		for (bit = 0; bit < 8; bit++) {
-			crc_h1 = (((crc_h1 >> 24) & 0x80) ^
-				((buf[byte] << bit) & 0x80)) ?
-				(crc_h1 << 1) ^ CRC_32C_POLY : crc_h1 << 1;
-		}
-	}
-
-	return (crc_h1);
-}
-
-/*
- * table based implementation
- * uses 4 four tables in parallel
- * 1 for each byte of a 32 bit word
- *
- * This is the default h1 computing function
- *
- */
-
-uint32_t
-nxge_compute_h1_table4(uint32_t crcin, uint32_t *flow, uint32_t length)
-{
-	uint32_t w, fw, i, crch1 = crcin;
-	uint32_t *buf;
-
-	buf = (uint32_t *)flow;
-
-	for (i = 0; i < length / 4; i++) {
-#ifdef _BIG_ENDIAN
-		fw = buf[i];
-#else
-		fw = flip32(buf[i]);
-		fw = buf[i];
-#endif
-		w = crch1 ^ fw;
-		crch1 = h1table[3][w >> 24] ^ h1table[2][(w >> 16) & 0xff] ^
-			h1table[1][(w >> 8) & 0xff] ^ h1table[0][w & 0xff];
-	}
-	return (crch1);
-}
-
-/*
- * table based implementation
- * uses a single table and computes h1 for a byte
- * at a time.
- *
- */
-
-uint32_t
-nxge_compute_h1_table1(uint32_t crcin, uint32_t *flow, uint32_t length)
-{
-
-	uint32_t i, crch1, tmp = crcin;
-	uint8_t *buf;
-
-	buf = (uint8_t *)flow;
-
-	tmp = crcin;
-	for (i = 0; i < length; i++) {
-		crch1 = COMPUTE_H1_BYTE(tmp, buf[i]);
-		tmp = crch1;
-	}
-
-	return (crch1);
-}
--- a/usr/src/uts/sun4v/io/nxge/nxge_fm.c	Mon Mar 19 18:02:35 2007 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,966 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License (the "License").
- * You may not use this file except in compliance with the License.
- *
- * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
- * or http://www.opensolaris.org/os/licensing.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information: Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- */
-/*
- * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
- * Use is subject to license terms.
- */
-
-#pragma ident	"%Z%%M%	%I%	%E% SMI"
-
-#include <sys/nxge/nxge_impl.h>
-#include <sys/ddifm.h>
-#include <sys/fm/protocol.h>
-#include <sys/fm/util.h>
-#include <sys/fm/io/ddi.h>
-
-static nxge_fm_ereport_attr_t
-*nxge_fm_get_ereport_attr(nxge_fm_ereport_id_t);
-
-nxge_fm_ereport_attr_t	nxge_fm_ereport_pcs[] = {
-	{NXGE_FM_EREPORT_XPCS_LINK_DOWN,	"10g_link_down",
-						DDI_FM_DEVICE_INVAL_STATE,
-						DDI_SERVICE_LOST},
-	{NXGE_FM_EREPORT_XPCS_TX_LINK_FAULT,	"10g_tx_link_fault",
-						DDI_FM_DEVICE_INVAL_STATE,
-						DDI_SERVICE_DEGRADED},
-	{NXGE_FM_EREPORT_XPCS_RX_LINK_FAULT,	"10g_rx_link_fault",
-						DDI_FM_DEVICE_INVAL_STATE,
-						DDI_SERVICE_DEGRADED},
-	{NXGE_FM_EREPORT_PCS_LINK_DOWN,		"1g_link_down",
-						DDI_FM_DEVICE_INVAL_STATE,
-						DDI_SERVICE_LOST},
-	{NXGE_FM_EREPORT_PCS_REMOTE_FAULT,	"1g_remote_fault",
-						DDI_FM_DEVICE_INVAL_STATE,
-						DDI_SERVICE_DEGRADED},
-};
-
-nxge_fm_ereport_attr_t	nxge_fm_ereport_mif[] = {
-	{NXGE_FM_EREPORT_MIF_ACCESS_FAIL,	"transceiver_access_fail"}
-};
-
-nxge_fm_ereport_attr_t nxge_fm_ereport_fflp[] = {
-	{NXGE_FM_EREPORT_FFLP_TCAM_ERR,		"classifier_tcam_err",
-						DDI_FM_DEVICE_INTERN_UNCORR,
-						DDI_SERVICE_UNAFFECTED},
-	{NXGE_FM_EREPORT_FFLP_VLAN_PAR_ERR,	"classifier_vlan_par_err",
-						DDI_FM_DEVICE_INTERN_UNCORR,
-						DDI_SERVICE_UNAFFECTED},
-	{NXGE_FM_EREPORT_FFLP_HASHT_DATA_ERR,	"classifier_hasht_data_err",
-						DDI_FM_DEVICE_INTERN_UNCORR,
-						DDI_SERVICE_UNAFFECTED},
-	{NXGE_FM_EREPORT_FFLP_HASHT_LOOKUP_ERR,	"classifier_hasht_lookup_err",
-						DDI_FM_DEVICE_INTERN_UNCORR,
-						DDI_SERVICE_UNAFFECTED},
-	{NXGE_FM_EREPORT_FFLP_ACCESS_FAIL,	"classifier_access_fail",
-						DDI_FM_DEVICE_NO_RESPONSE,
-						DDI_SERVICE_DEGRADED}
-};
-
-nxge_fm_ereport_attr_t nxge_fm_ereport_ipp[] = {
-	{NXGE_FM_EREPORT_IPP_EOP_MISS,		"rx_eop_miss",
-						DDI_FM_DEVICE_INVAL_STATE,
-						DDI_SERVICE_LOST},
-	{NXGE_FM_EREPORT_IPP_SOP_MISS,		"rx_sop_miss",
-						DDI_FM_DEVICE_INVAL_STATE,
-						DDI_SERVICE_LOST},
-	{NXGE_FM_EREPORT_IPP_DFIFO_UE,		"rx_dfifo_ucorr_err",
-						DDI_FM_DEVICE_INTERN_UNCORR,
-						DDI_SERVICE_LOST},
-	{NXGE_FM_EREPORT_IPP_DFIFO_CE,		"rx_dfifo_corr_err",
-						DDI_FM_DEVICE_INTERN_CORR,
-						DDI_SERVICE_UNAFFECTED},
-	{NXGE_FM_EREPORT_IPP_PFIFO_PERR,	"rx_dfifo_parity_err",
-						DDI_FM_DEVICE_INTERN_UNCORR,
-						DDI_SERVICE_LOST},
-	{NXGE_FM_EREPORT_IPP_ECC_ERR_MAX,	"rx_ecc_err_max",
-						DDI_FM_DEVICE_INTERN_UNCORR,
-						DDI_SERVICE_UNAFFECTED},
-	{NXGE_FM_EREPORT_IPP_PFIFO_OVER,	"rx_pfifo_overflow",
-						DDI_FM_DEVICE_INVAL_STATE,
-						DDI_SERVICE_LOST},
-	{NXGE_FM_EREPORT_IPP_PFIFO_UND,		"rx_pfifo_underrun",
-						DDI_FM_DEVICE_INVAL_STATE,
-						DDI_SERVICE_LOST},
-	{NXGE_FM_EREPORT_IPP_BAD_CS_MX,		"rx_bad_cksum_max",
-						DDI_FM_DEVICE_INTERN_UNCORR,
-						DDI_SERVICE_UNAFFECTED},
-	{NXGE_FM_EREPORT_IPP_PKT_DIS_MX,	"rx_pkt_discard_max",
-						DDI_FM_DEVICE_INTERN_UNCORR,
-						DDI_SERVICE_UNAFFECTED},
-	{NXGE_FM_EREPORT_IPP_RESET_FAIL,	"rx_reset_fail",
-						DDI_FM_DEVICE_NO_RESPONSE,
-						DDI_SERVICE_LOST}
-};
-
-nxge_fm_ereport_attr_t nxge_fm_ereport_rdmc[] = {
-	{NXGE_FM_EREPORT_RDMC_DCF_ERR,		"rxdma_dcf_err",
-						DDI_FM_DEVICE_INTERN_UNCORR,
-						DDI_SERVICE_LOST},
-	{NXGE_FM_EREPORT_RDMC_RCR_ACK_ERR,	"rxdma_rcr_ack_err",
-						DDI_FM_DEVICE_INTERN_UNCORR,
-						DDI_SERVICE_DEGRADED},
-	{NXGE_FM_EREPORT_RDMC_DC_FIFO_ERR,	"rxdma_dc_fifo_err",
-						DDI_FM_DEVICE_INTERN_UNCORR,
-						DDI_SERVICE_LOST},
-	{NXGE_FM_EREPORT_RDMC_RCR_SHA_PAR,	"rxdma_rcr_sha_par_err",
-						DDI_FM_DEVICE_INTERN_UNCORR,
-						DDI_SERVICE_DEGRADED},
-	{NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR,	"rxdma_rbr_pre_par_err",
-						DDI_FM_DEVICE_INTERN_UNCORR,
-						DDI_SERVICE_DEGRADED},
-	{NXGE_FM_EREPORT_RDMC_RBR_TMOUT,	"rxdma_rbr_tmout",
-						DDI_FM_DEVICE_NO_RESPONSE,
-						DDI_SERVICE_DEGRADED},
-	{NXGE_FM_EREPORT_RDMC_RSP_CNT_ERR,	"rxdma_rsp_cnt_err",
-						DDI_FM_DEVICE_INVAL_STATE,
-						DDI_SERVICE_DEGRADED},
-	{NXGE_FM_EREPORT_RDMC_BYTE_EN_BUS,	"rxdma_byte_en_bus",
-						DDI_FM_DEVICE_INVAL_STATE,
-						DDI_SERVICE_DEGRADED},
-	{NXGE_FM_EREPORT_RDMC_RSP_DAT_ERR,	"rxdma_rsp_dat_err",
-						DDI_FM_DEVICE_INTERN_UNCORR,
-						DDI_SERVICE_DEGRADED},
-	{NXGE_FM_EREPORT_RDMC_ID_MISMATCH,	"rxdma_id_mismatch",
-						DDI_FM_DEVICE_INVAL_STATE,
-						DDI_SERVICE_LOST},
-	{NXGE_FM_EREPORT_RDMC_ZCP_EOP_ERR,	"rxdma_zcp_eop_err",
-						DDI_FM_DEVICE_INVAL_STATE,
-						DDI_SERVICE_LOST},
-	{NXGE_FM_EREPORT_RDMC_IPP_EOP_ERR,	"rxdma_ipp_eop_err",
-						DDI_FM_DEVICE_INVAL_STATE,
-						DDI_SERVICE_LOST},
-	{NXGE_FM_EREPORT_RDMC_COMPLETION_ERR,	"rxdma_completion_err",
-						DDI_FM_DEVICE_INTERN_UNCORR,
-						DDI_SERVICE_UNAFFECTED},
-	{NXGE_FM_EREPORT_RDMC_CONFIG_ERR,	"rxdma_config_err",
-						DDI_FM_DEVICE_INVAL_STATE,
-						DDI_SERVICE_DEGRADED},
-	{NXGE_FM_EREPORT_RDMC_RCRINCON,		"rxdma_rcrincon",
-						DDI_FM_DEVICE_INVAL_STATE,
-						DDI_SERVICE_DEGRADED},
-	{NXGE_FM_EREPORT_RDMC_RCRFULL,		"rxdma_rcrfull",
-						DDI_FM_DEVICE_INVAL_STATE,
-						DDI_SERVICE_DEGRADED},
-	{NXGE_FM_EREPORT_RDMC_RBRFULL,		"rxdma_rbrfull",
-						DDI_FM_DEVICE_INVAL_STATE,
-						DDI_SERVICE_DEGRADED},
-	{NXGE_FM_EREPORT_RDMC_RBRLOGPAGE,	"rxdma_rbrlogpage",
-						DDI_FM_DEVICE_INVAL_STATE,
-						DDI_SERVICE_DEGRADED},
-	{NXGE_FM_EREPORT_RDMC_CFIGLOGPAGE,	"rxdma_cfiglogpage",
-						DDI_FM_DEVICE_INVAL_STATE,
-						DDI_SERVICE_DEGRADED}
-};
-
-nxge_fm_ereport_attr_t nxge_fm_ereport_zcp[] = {
-	{NXGE_FM_EREPORT_ZCP_RRFIFO_UNDERRUN,	"rxzcopy_rrfifo_underrun",
-						DDI_FM_DEVICE_INVAL_STATE,
-						DDI_SERVICE_UNAFFECTED},
-	{NXGE_FM_EREPORT_ZCP_RSPFIFO_UNCORR_ERR,
-						"rxzcopy_rspfifo_uncorr_err",
-						DDI_FM_DEVICE_INTERN_UNCORR,
-						DDI_SERVICE_UNAFFECTED},
-	{NXGE_FM_EREPORT_ZCP_STAT_TBL_PERR,	"rxzcopy_stat_tbl_perr",
-						DDI_FM_DEVICE_INTERN_UNCORR,
-						DDI_SERVICE_UNAFFECTED},
-	{NXGE_FM_EREPORT_ZCP_DYN_TBL_PERR,	"rxzcopy_dyn_tbl_perr",
-						DDI_FM_DEVICE_INTERN_UNCORR,
-						DDI_SERVICE_UNAFFECTED},
-	{NXGE_FM_EREPORT_ZCP_BUF_TBL_PERR,	"rxzcopy_buf_tbl_perr",
-						DDI_FM_DEVICE_INTERN_UNCORR,
-						DDI_SERVICE_UNAFFECTED},
-	{NXGE_FM_EREPORT_ZCP_CFIFO_ECC,		"rxzcopy_cfifo_ecc",
-						DDI_FM_DEVICE_INTERN_CORR,
-						DDI_SERVICE_UNAFFECTED},
-	{NXGE_FM_EREPORT_ZCP_RRFIFO_OVERRUN,	"rxzcopy_rrfifo_overrun",
-						DDI_FM_DEVICE_INVAL_STATE,
-						DDI_SERVICE_UNAFFECTED},
-	{NXGE_FM_EREPORT_ZCP_BUFFER_OVERFLOW,	"rxzcopy_buffer_overflow",
-						DDI_FM_DEVICE_INVAL_STATE,
-						DDI_SERVICE_LOST},
-	{NXGE_FM_EREPORT_ZCP_TT_PROGRAM_ERR,	"rxzcopy_tt_program_err",
-						DDI_FM_DEVICE_INVAL_STATE,
-						DDI_SERVICE_UNAFFECTED},
-	{NXGE_FM_EREPORT_ZCP_RSP_TT_INDEX_ERR,	"rxzcopy_rsp_tt_index_err",
-						DDI_FM_DEVICE_INVAL_STATE,
-						DDI_SERVICE_UNAFFECTED},
-	{NXGE_FM_EREPORT_ZCP_SLV_TT_INDEX_ERR,	"rxzcopy_slv_tt_index_err",
-						DDI_FM_DEVICE_INVAL_STATE,
-						DDI_SERVICE_UNAFFECTED},
-	{NXGE_FM_EREPORT_ZCP_TT_INDEX_ERR,	"rxzcopy_tt_index_err",
-						DDI_FM_DEVICE_INVAL_STATE,
-						DDI_SERVICE_UNAFFECTED},
-	{NXGE_FM_EREPORT_ZCP_ACCESS_FAIL,	"rxzcopy_access_fail",
-						DDI_FM_DEVICE_NO_RESPONSE,
-						DDI_SERVICE_LOST},
-};
-
-nxge_fm_ereport_attr_t nxge_fm_ereport_rxmac[] = {
-	{NXGE_FM_EREPORT_RXMAC_UNDERFLOW,	"rxmac_underflow",
-						DDI_FM_DEVICE_INVAL_STATE,
-						DDI_SERVICE_UNAFFECTED},
-	{NXGE_FM_EREPORT_RXMAC_CRC_ERRCNT_EXP,	"rxmac_crc_errcnt_exp",
-						DDI_FM_DEVICE_INTERN_UNCORR,
-						DDI_SERVICE_UNAFFECTED},
-	{NXGE_FM_EREPORT_RXMAC_LENGTH_ERRCNT_EXP,
-						"rxmac_length_errcnt_exp",
-						DDI_FM_DEVICE_INTERN_UNCORR,
-						DDI_SERVICE_UNAFFECTED},
-	{NXGE_FM_EREPORT_RXMAC_VIOL_ERRCNT_EXP,	"rxmac_viol_errcnt_exp",
-						DDI_FM_DEVICE_INTERN_UNCORR,
-						DDI_SERVICE_UNAFFECTED},
-	{NXGE_FM_EREPORT_RXMAC_RXFRAG_CNT_EXP,	"rxmac_rxfrag_cnt_exp",
-						DDI_FM_DEVICE_INTERN_UNCORR,
-						DDI_SERVICE_UNAFFECTED},
-	{NXGE_FM_EREPORT_RXMAC_ALIGN_ECNT_EXP,	"rxmac_align_ecnt_exp",
-						DDI_FM_DEVICE_INTERN_UNCORR,
-						DDI_SERVICE_UNAFFECTED},
-	{NXGE_FM_EREPORT_RXMAC_LINKFAULT_CNT_EXP,
-						"rxmac_linkfault_cnt_exp",
-						DDI_FM_DEVICE_INTERN_UNCORR,
-						DDI_SERVICE_UNAFFECTED},
-	{NXGE_FM_EREPORT_RXMAC_RESET_FAIL,	"rxmac_reset_fail",
-						DDI_FM_DEVICE_NO_RESPONSE,
-						DDI_SERVICE_UNAFFECTED},
-};
-
-nxge_fm_ereport_attr_t nxge_fm_ereport_tdmc[] = {
-	{NXGE_FM_EREPORT_TDMC_PREF_BUF_PAR_ERR,	"txdma_pref_buf_par_err",
-						DDI_FM_DEVICE_INTERN_UNCORR,
-						DDI_SERVICE_DEGRADED},
-	{NXGE_FM_EREPORT_TDMC_MBOX_ERR,		"txdma_mbox_err",
-						DDI_FM_DEVICE_NO_RESPONSE,
-						DDI_SERVICE_DEGRADED},
-	{NXGE_FM_EREPORT_TDMC_NACK_PREF,	"txdma_nack_pref",
-						DDI_FM_DEVICE_NO_RESPONSE,
-						DDI_SERVICE_DEGRADED},
-	{NXGE_FM_EREPORT_TDMC_NACK_PKT_RD,	"txdma_nack_pkt_rd",
-						DDI_FM_DEVICE_NO_RESPONSE,
-						DDI_SERVICE_DEGRADED},
-	{NXGE_FM_EREPORT_TDMC_PKT_SIZE_ERR,	"txdma_pkt_size_err",
-						DDI_FM_DEVICE_INVAL_STATE,
-						DDI_SERVICE_DEGRADED},
-	{NXGE_FM_EREPORT_TDMC_TX_RING_OFLOW,	"txdma_tx_ring_oflow",
-						DDI_FM_DEVICE_INVAL_STATE,
-						DDI_SERVICE_DEGRADED},
-	{NXGE_FM_EREPORT_TDMC_CONF_PART_ERR,	"txdma_conf_part_err",
-						DDI_FM_DEVICE_INVAL_STATE,
-						DDI_SERVICE_DEGRADED},
-	{NXGE_FM_EREPORT_TDMC_PKT_PRT_ERR,	"txdma_pkt_prt_err",
-						DDI_FM_DEVICE_INTERN_UNCORR,
-						DDI_SERVICE_DEGRADED},
-	{NXGE_FM_EREPORT_TDMC_RESET_FAIL,	"txdma_reset_fail",
-						DDI_FM_DEVICE_NO_RESPONSE,
-						DDI_SERVICE_LOST},
-};
-
-nxge_fm_ereport_attr_t nxge_fm_ereport_txc[] = {
-	{NXGE_FM_EREPORT_TXC_RO_CORRECT_ERR,	"tx_ro_correct_err",
-						DDI_FM_DEVICE_INTERN_CORR,
-						DDI_SERVICE_UNAFFECTED},
-	{NXGE_FM_EREPORT_TXC_RO_UNCORRECT_ERR,	"tx_ro_uncorrect_err",
-						DDI_FM_DEVICE_INTERN_UNCORR,
-						DDI_SERVICE_UNAFFECTED},
-	{NXGE_FM_EREPORT_TXC_SF_CORRECT_ERR,	"tx_sf_correct_err",
-						DDI_FM_DEVICE_INTERN_CORR,
-						DDI_SERVICE_UNAFFECTED},
-	{NXGE_FM_EREPORT_TXC_SF_UNCORRECT_ERR,	"tx_sf_uncorrect_err",
-						DDI_FM_DEVICE_INTERN_UNCORR,
-						DDI_SERVICE_UNAFFECTED},
-	{NXGE_FM_EREPORT_TXC_ASSY_DEAD,		"tx_assembly_uncorrect_err",
-						DDI_FM_DEVICE_INTERN_UNCORR,
-						DDI_SERVICE_UNAFFECTED},
-	{NXGE_FM_EREPORT_TXC_REORDER_ERR,	"tx_reorder_err",
-						DDI_FM_DEVICE_INVAL_STATE,
-						DDI_SERVICE_LOST},
-};
-
-nxge_fm_ereport_attr_t nxge_fm_ereport_txmac[] = {
-	{NXGE_FM_EREPORT_TXMAC_UNDERFLOW,	"txmac_underflow",
-						DDI_FM_DEVICE_INVAL_STATE,
-						DDI_SERVICE_UNAFFECTED},
-	{NXGE_FM_EREPORT_TXMAC_OVERFLOW,	"txmac_overflow",
-						DDI_FM_DEVICE_INVAL_STATE,
-						DDI_SERVICE_UNAFFECTED},
-	{NXGE_FM_EREPORT_TXMAC_TXFIFO_XFR_ERR,	"txmac_txfifo_xfr_err",
-						DDI_FM_DEVICE_INVAL_STATE,
-						DDI_SERVICE_UNAFFECTED},
-	{NXGE_FM_EREPORT_TXMAC_MAX_PKT_ERR,	"txmac_max_pkt_err",
-						DDI_FM_DEVICE_INTERN_UNCORR,
-						DDI_SERVICE_UNAFFECTED},
-	{NXGE_FM_EREPORT_TXMAC_RESET_FAIL,	"txmac_reset_fail",
-						DDI_FM_DEVICE_NO_RESPONSE,
-						DDI_SERVICE_UNAFFECTED},
-};
-
-nxge_fm_ereport_attr_t nxge_fm_ereport_espc[] = {
-	{NXGE_FM_EREPORT_ESPC_ACCESS_FAIL,	"eprom_access_fail",
-						DDI_FM_DEVICE_NO_RESPONSE,
-						DDI_SERVICE_LOST},
-};
-
-nxge_fm_ereport_attr_t nxge_fm_ereport_sw[] = {
-	{NXGE_FM_EREPORT_SW_INVALID_PORT_NUM,	"invalid_port_num",
-						DDI_FM_DEVICE_INVAL_STATE,
-						DDI_SERVICE_LOST},
-	{NXGE_FM_EREPORT_SW_INVALID_CHAN_NUM,	"invalid_chan_num",
-						DDI_FM_DEVICE_INVAL_STATE,
-						DDI_SERVICE_LOST},
-	{NXGE_FM_EREPORT_SW_INVALID_PARAM,	"invalid_param",
-						DDI_FM_DEVICE_INVAL_STATE,
-						DDI_SERVICE_LOST},
-};
-
-void
-nxge_fm_init(p_nxge_t nxgep, ddi_device_acc_attr_t *reg_attr,
-		ddi_device_acc_attr_t *desc_attr, ddi_dma_attr_t *dma_attr)
-{
-	ddi_iblock_cookie_t iblk;
-
-	nxgep->fm_capabilities = ddi_prop_get_int(DDI_DEV_T_ANY, nxgep->dip,
-			DDI_PROP_DONTPASS, "fm-capable", 1);
-	NXGE_DEBUG_MSG((nxgep, DDI_CTL,
-		"FM capable = %d\n", nxgep->fm_capabilities));
-
-	/* Only register with IO Fault Services if we have some capability */
-	if (nxgep->fm_capabilities) {
-		reg_attr->devacc_attr_access = DDI_FLAGERR_ACC;
-		desc_attr->devacc_attr_access = DDI_FLAGERR_ACC;
-		dma_attr->dma_attr_flags = DDI_DMA_FLAGERR;
-
-		/* Register capabilities with IO Fault Services */
-		ddi_fm_init(nxgep->dip, &nxgep->fm_capabilities, &iblk);
-
-		/*
-		 * Initialize pci ereport capabilities if ereport capable
-		 */
-		if (DDI_FM_EREPORT_CAP(nxgep->fm_capabilities) ||
-		    DDI_FM_ERRCB_CAP(nxgep->fm_capabilities))
-			pci_ereport_setup(nxgep->dip);
-	} else {
-		/*
-		 * These fields have to be cleared of FMA if there are no
-		 * FMA capabilities at runtime.
-		 */
-		reg_attr->devacc_attr_access = DDI_DEFAULT_ACC;
-		desc_attr->devacc_attr_access = DDI_DEFAULT_ACC;
-		dma_attr->dma_attr_flags = 0;
-	}
-}
-
-void
-nxge_fm_fini(p_nxge_t nxgep)
-{
-	/* Only unregister FMA capabilities if we registered some */
-	if (nxgep->fm_capabilities) {
-
-		/*
-		 * Release any resources allocated by pci_ereport_setup()
-		 */
-		if (DDI_FM_EREPORT_CAP(nxgep->fm_capabilities) ||
-		    DDI_FM_ERRCB_CAP(nxgep->fm_capabilities))
-			pci_ereport_teardown(nxgep->dip);
-
-		/*
-		 * Un-register error callback if error callback capable
-		 */
-		if (DDI_FM_ERRCB_CAP(nxgep->fm_capabilities))
-			ddi_fm_handler_unregister(nxgep->dip);
-
-		/* Unregister from IO Fault Services */
-		ddi_fm_fini(nxgep->dip);
-	}
-}
-
-void
-nxge_fm_npi_error_handler(p_nxge_t nxgep, npi_status_t status)
-{
-	uint8_t			block_id;
-	uint8_t			error_type;
-	nxge_fm_ereport_id_t	fm_ereport_id;
-	nxge_fm_ereport_attr_t	*fm_ereport_attr;
-	char			*class_name;
-	uint64_t		ena;
-	uint8_t			portn = 0;
-	uint8_t			chan = 0;
-	boolean_t		is_port;
-	boolean_t		is_chan;
-
-	if (status == NPI_SUCCESS)
-		return;
-
-	block_id = (status >> NPI_BLOCK_ID_SHIFT) & 0xF;
-	error_type = status & 0xFF;
-	is_port = (status & IS_PORT)? B_TRUE: B_FALSE;
-	is_chan = (status & IS_CHAN)? B_TRUE: B_FALSE;
-
-	if (is_port)
-		portn = (status >> NPI_PORT_CHAN_SHIFT) & 0xF;
-	else if (is_chan)
-		chan = (status >> NPI_PORT_CHAN_SHIFT) & 0xF;
-
-	/* Map error type into FM ereport id */
-
-	/* Handle all software errors */
-
-	if (((error_type >= COMMON_SW_ERR_START) &&
-				(error_type <= COMMON_SW_ERR_END)) ||
-		((error_type >= BLK_SPEC_SW_ERR_START) &&
-				(error_type <= BLK_SPEC_SW_ERR_END))) {
-		switch (error_type) {
-		case PORT_INVALID:
-			fm_ereport_id = NXGE_FM_EREPORT_SW_INVALID_PORT_NUM;
-			break;
-		case CHANNEL_INVALID:
-			fm_ereport_id = NXGE_FM_EREPORT_SW_INVALID_CHAN_NUM;
-			break;
-		default:
-			fm_ereport_id = NXGE_FM_EREPORT_SW_INVALID_PARAM;
-		}
-	} else if (((error_type >= COMMON_HW_ERR_START) &&
-				(error_type <= COMMON_HW_ERR_END)) ||
-		((error_type >= BLK_SPEC_HW_ERR_START) &&
-				(error_type <= BLK_SPEC_SW_ERR_END))) {
-		/* Handle hardware errors */
-		switch (error_type) {
-		case RESET_FAILED:
-			switch (block_id) {
-			case TXMAC_BLK_ID:
-				fm_ereport_id =
-					NXGE_FM_EREPORT_TXMAC_RESET_FAIL;
-				break;
-			case RXMAC_BLK_ID:
-				fm_ereport_id =
-					NXGE_FM_EREPORT_RXMAC_RESET_FAIL;
-				break;
-			case IPP_BLK_ID:
-				fm_ereport_id = NXGE_FM_EREPORT_IPP_RESET_FAIL;
-				break;
-			case TXDMA_BLK_ID:
-				fm_ereport_id = NXGE_FM_EREPORT_TDMC_RESET_FAIL;
-				break;
-			default:
-				fm_ereport_id = NXGE_FM_EREPORT_UNKNOWN;
-			}
-			break;
-		case WRITE_FAILED:
-		case READ_FAILED:
-			switch (block_id) {
-			case MIF_BLK_ID:
-				fm_ereport_id = NXGE_FM_EREPORT_MIF_ACCESS_FAIL;
-				break;
-			case ZCP_BLK_ID:
-				fm_ereport_id = NXGE_FM_EREPORT_ZCP_ACCESS_FAIL;
-				break;
-			case ESPC_BLK_ID:
-				fm_ereport_id =
-					NXGE_FM_EREPORT_ESPC_ACCESS_FAIL;
-				break;
-			case FFLP_BLK_ID:
-				fm_ereport_id =
-					NXGE_FM_EREPORT_FFLP_ACCESS_FAIL;
-				break;
-			default:
-				fm_ereport_id = NXGE_FM_EREPORT_UNKNOWN;
-			}
-			break;
-		case TXDMA_HW_STOP_FAILED:
-		case TXDMA_HW_RESUME_FAILED:
-			fm_ereport_id = NXGE_FM_EREPORT_TDMC_RESET_FAIL;
-			break;
-		}
-	}
-
-	fm_ereport_attr = nxge_fm_get_ereport_attr(fm_ereport_id);
-	if (fm_ereport_attr == NULL)
-		return;
-	class_name = fm_ereport_attr->eclass;
-
-	ena = fm_ena_generate(0, FM_ENA_FMT1);
-
-	if ((is_port == B_FALSE) && (is_chan == B_FALSE)) {
-		ddi_fm_ereport_post(nxgep->dip, class_name, ena,
-			DDI_NOSLEEP,
-			FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0,
-			NULL);
-	} else if ((is_port == B_TRUE) && (is_chan == B_FALSE)) {
-		ddi_fm_ereport_post(nxgep->dip, class_name, ena, DDI_NOSLEEP,
-			FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0,
-			ERNAME_ERR_PORTN, DATA_TYPE_UINT8, portn,
-			NULL);
-	} else if ((is_port == B_FALSE) && (is_chan == B_TRUE)) {
-		ddi_fm_ereport_post(nxgep->dip, class_name, ena, DDI_NOSLEEP,
-			FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0,
-			ERNAME_ERR_DCHAN, DATA_TYPE_UINT8, chan,
-			NULL);
-	} else if ((is_port == B_TRUE) && (is_chan == B_TRUE)) {
-		ddi_fm_ereport_post(nxgep->dip, class_name, ena, DDI_NOSLEEP,
-			FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0,
-			ERNAME_ERR_PORTN, DATA_TYPE_UINT8, portn,
-			ERNAME_ERR_DCHAN, DATA_TYPE_UINT8, chan,
-			NULL);
-	}
-}
-
-static nxge_fm_ereport_attr_t *
-nxge_fm_get_ereport_attr(nxge_fm_ereport_id_t ereport_id)
-{
-	nxge_fm_ereport_attr_t *attr;
-	uint8_t	blk_id = ((ereport_id >> EREPORT_FM_ID_SHIFT) &
-							EREPORT_FM_ID_MASK);
-	uint8_t index = (ereport_id & EREPORT_INDEX_MASK);
-
-	switch (blk_id) {
-	case FM_SW_ID:
-		attr = &nxge_fm_ereport_sw[index];
-		break;
-	case FM_PCS_ID:
-		attr = &nxge_fm_ereport_pcs[index];
-		break;
-	case FM_TXMAC_ID:
-		attr = &nxge_fm_ereport_txmac[index];
-		break;
-	case FM_RXMAC_ID:
-		attr = &nxge_fm_ereport_rxmac[index];
-		break;
-	case FM_MIF_ID:
-		attr = &nxge_fm_ereport_mif[index];
-		break;
-	case FM_FFLP_ID:
-		attr = &nxge_fm_ereport_fflp[index];
-		break;
-	case FM_ZCP_ID:
-		attr = &nxge_fm_ereport_zcp[index];
-		break;
-	case FM_RXDMA_ID:
-		attr = &nxge_fm_ereport_rdmc[index];
-		break;
-	case FM_TXDMA_ID:
-		attr = &nxge_fm_ereport_tdmc[index];
-		break;
-	case FM_IPP_ID:
-		attr = &nxge_fm_ereport_ipp[index];
-		break;
-	case FM_TXC_ID:
-		attr = &nxge_fm_ereport_txc[index];
-		break;
-	case FM_ESPC_ID:
-		attr = &nxge_fm_ereport_espc[index];
-		break;
-	default:
-		attr = NULL;
-	}
-
-	return (attr);
-}
-
-static void
-nxge_fm_ereport(p_nxge_t nxgep, uint8_t err_portn, uint8_t err_chan,
-					nxge_fm_ereport_attr_t *ereport)
-{
-	uint64_t		ena;
-	char			eclass[FM_MAX_CLASS];
-	char			*err_str;
-	p_nxge_stats_t		statsp;
-
-	(void) snprintf(eclass, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE,
-			ereport->eclass);
-	err_str = ereport->str;
-	ena = fm_ena_generate(0, FM_ENA_FMT1);
-	statsp = nxgep->statsp;
-
-	if (DDI_FM_EREPORT_CAP(nxgep->fm_capabilities)) {
-		switch (ereport->index) {
-		case NXGE_FM_EREPORT_XPCS_LINK_DOWN:
-		case NXGE_FM_EREPORT_XPCS_TX_LINK_FAULT:
-		case NXGE_FM_EREPORT_XPCS_RX_LINK_FAULT:
-		case NXGE_FM_EREPORT_PCS_LINK_DOWN:
-		case NXGE_FM_EREPORT_PCS_REMOTE_FAULT:
-			ddi_fm_ereport_post(nxgep->dip, eclass, ena,
-				DDI_NOSLEEP,
-				FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0,
-				ERNAME_ERR_PORTN, DATA_TYPE_UINT8, err_portn,
-				NULL);
-			break;
-		case NXGE_FM_EREPORT_IPP_EOP_MISS:
-		case NXGE_FM_EREPORT_IPP_SOP_MISS:
-			ddi_fm_ereport_post(nxgep->dip, eclass, ena,
-				DDI_NOSLEEP,
-				FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0,
-				ERNAME_ERR_PORTN, DATA_TYPE_UINT8, err_portn,
-				ERNAME_DFIFO_RD_PTR, DATA_TYPE_UINT16,
-					statsp->ipp_stats.errlog.dfifo_rd_ptr,
-				ERNAME_IPP_STATE_MACH, DATA_TYPE_UINT32,
-					statsp->ipp_stats.errlog.state_mach,
-				NULL);
-			break;
-		case NXGE_FM_EREPORT_IPP_DFIFO_UE:
-			ddi_fm_ereport_post(nxgep->dip, eclass, ena,
-				DDI_NOSLEEP,
-				FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0,
-				ERNAME_ERR_PORTN, DATA_TYPE_UINT8, err_portn,
-				ERNAME_DFIFO_ENTRY, DATA_TYPE_UINT16,
-				nxgep->ipp.status.bits.w0.dfifo_ecc_err_idx,
-				ERNAME_DFIFO_SYNDROME, DATA_TYPE_UINT16,
-					statsp->ipp_stats.errlog.ecc_syndrome,
-				NULL);
-			break;
-		case NXGE_FM_EREPORT_IPP_PFIFO_PERR:
-			ddi_fm_ereport_post(nxgep->dip, eclass, ena,
-				DDI_NOSLEEP,
-				FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0,
-				ERNAME_ERR_PORTN, DATA_TYPE_UINT8, err_portn,
-				ERNAME_PFIFO_ENTRY, DATA_TYPE_UINT8,
-				nxgep->ipp.status.bits.w0.pre_fifo_perr_idx,
-				NULL);
-			break;
-		case NXGE_FM_EREPORT_IPP_DFIFO_CE:
-		case NXGE_FM_EREPORT_IPP_ECC_ERR_MAX:
-			ddi_fm_ereport_post(nxgep->dip, eclass, ena,
-				DDI_NOSLEEP,
-				FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0,
-				ERNAME_ERR_PORTN, DATA_TYPE_UINT8, err_portn,
-				NULL);
-			break;
-		case NXGE_FM_EREPORT_IPP_PFIFO_OVER:
-		case NXGE_FM_EREPORT_IPP_PFIFO_UND:
-			ddi_fm_ereport_post(nxgep->dip, eclass, ena,
-				DDI_NOSLEEP,
-				FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0,
-				ERNAME_ERR_PORTN, DATA_TYPE_UINT8, err_portn,
-				ERNAME_IPP_STATE_MACH, DATA_TYPE_UINT32,
-					statsp->ipp_stats.errlog.state_mach,
-				NULL);
-			break;
-		case NXGE_FM_EREPORT_IPP_BAD_CS_MX:
-		case NXGE_FM_EREPORT_IPP_PKT_DIS_MX:
-			ddi_fm_ereport_post(nxgep->dip, eclass, ena,
-				DDI_NOSLEEP,
-				FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0,
-				ERNAME_ERR_PORTN, DATA_TYPE_UINT8, err_portn,
-				NULL);
-			break;
-		case NXGE_FM_EREPORT_FFLP_TCAM_ERR:
-			ddi_fm_ereport_post(nxgep->dip, eclass, ena,
-				DDI_NOSLEEP,
-				FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0,
-				ERNAME_TCAM_ERR_LOG, DATA_TYPE_UINT32,
-					statsp->fflp_stats.errlog.tcam,
-				NULL);
-			break;
-		case NXGE_FM_EREPORT_FFLP_VLAN_PAR_ERR:
-			ddi_fm_ereport_post(nxgep->dip, eclass, ena,
-				DDI_NOSLEEP,
-				FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0,
-				ERNAME_VLANTAB_ERR_LOG, DATA_TYPE_UINT32,
-					statsp->fflp_stats.errlog.vlan,
-				NULL);
-			break;
-		case NXGE_FM_EREPORT_FFLP_HASHT_DATA_ERR:
-		{
-			int rdc_grp;
-			hash_tbl_data_log_t hash_log;
-
-			for (rdc_grp = 0; rdc_grp < MAX_PARTITION; rdc_grp++) {
-				hash_log.value = nxgep->classifier.fflp_stats->
-						errlog.hash_pio[rdc_grp];
-				if (hash_log.bits.ldw.pio_err) {
-					ddi_fm_ereport_post(nxgep->dip, eclass,
-						ena, DDI_NOSLEEP,
-						FM_VERSION, DATA_TYPE_UINT8,
-						FM_EREPORT_VERS0,
-						ERNAME_HASHTAB_ERR_LOG,
-						DATA_TYPE_UINT32,
-						nxgep->classifier.fflp_stats->
-						errlog.hash_pio[rdc_grp], NULL);
-				}
-			}
-		}
-			break;
-		case NXGE_FM_EREPORT_FFLP_HASHT_LOOKUP_ERR:
-			ddi_fm_ereport_post(nxgep->dip, eclass, ena,
-				DDI_NOSLEEP,
-				FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0,
-				ERNAME_HASHT_LOOKUP_ERR_LOG0, DATA_TYPE_UINT32,
-					statsp->fflp_stats.errlog. hash_lookup1,
-				ERNAME_HASHT_LOOKUP_ERR_LOG1, DATA_TYPE_UINT32,
-					statsp->fflp_stats.errlog.hash_lookup2,
-				NULL);
-			break;
-		case NXGE_FM_EREPORT_RDMC_DCF_ERR:
-		case NXGE_FM_EREPORT_RDMC_RBR_TMOUT:
-		case NXGE_FM_EREPORT_RDMC_RSP_CNT_ERR:
-		case NXGE_FM_EREPORT_RDMC_BYTE_EN_BUS:
-		case NXGE_FM_EREPORT_RDMC_RSP_DAT_ERR:
-		case NXGE_FM_EREPORT_RDMC_RCR_ACK_ERR:
-		case NXGE_FM_EREPORT_RDMC_DC_FIFO_ERR:
-		case NXGE_FM_EREPORT_RDMC_CONFIG_ERR:
-		case NXGE_FM_EREPORT_RDMC_RCRINCON:
-		case NXGE_FM_EREPORT_RDMC_RCRFULL:
-		case NXGE_FM_EREPORT_RDMC_RBRFULL:
-		case NXGE_FM_EREPORT_RDMC_RBRLOGPAGE:
-		case NXGE_FM_EREPORT_RDMC_CFIGLOGPAGE:
-		case NXGE_FM_EREPORT_RDMC_ID_MISMATCH:
-		case NXGE_FM_EREPORT_RDMC_ZCP_EOP_ERR:
-		case NXGE_FM_EREPORT_RDMC_IPP_EOP_ERR:
-			ddi_fm_ereport_post(nxgep->dip, eclass, ena,
-				DDI_NOSLEEP,
-				FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0,
-				ERNAME_ERR_PORTN, DATA_TYPE_UINT8, err_portn,
-				ERNAME_ERR_DCHAN, DATA_TYPE_UINT8, err_chan,
-				NULL);
-			break;
-		case NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR:
-		case NXGE_FM_EREPORT_RDMC_RCR_SHA_PAR:
-			{
-			uint32_t err_log;
-			if (ereport->index == NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR)
-				err_log = (uint32_t)statsp->
-				rdc_stats[err_chan].errlog.pre_par.value;
-			else
-				err_log = (uint32_t)statsp->
-				rdc_stats[err_chan].errlog.sha_par.value;
-			ddi_fm_ereport_post(nxgep->dip, eclass, ena,
-				DDI_NOSLEEP,
-				FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0,
-				ERNAME_ERR_PORTN, DATA_TYPE_UINT8, err_portn,
-				ERNAME_ERR_DCHAN, DATA_TYPE_UINT8, err_chan,
-				ERNAME_RDMC_PAR_ERR_LOG, DATA_TYPE_UINT8,
-				err_log, NULL);
-			}
-			break;
-		case NXGE_FM_EREPORT_RDMC_COMPLETION_ERR:
-			{
-			uint8_t err_type;
-			err_type = statsp->
-				rdc_stats[err_chan].errlog.compl_err_type;
-			ddi_fm_ereport_post(nxgep->dip, eclass, ena,
-				DDI_NOSLEEP,
-				FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0,
-				ERNAME_ERR_PORTN, DATA_TYPE_UINT8, err_portn,
-				ERNAME_ERR_DCHAN, DATA_TYPE_UINT8, err_chan,
-				ERNAME_RDC_ERR_TYPE, DATA_TYPE_UINT8,
-				err_type, NULL);
-			}
-			break;
-
-		case NXGE_FM_EREPORT_ZCP_RRFIFO_UNDERRUN:
-		case NXGE_FM_EREPORT_ZCP_RRFIFO_OVERRUN:
-		case NXGE_FM_EREPORT_ZCP_BUFFER_OVERFLOW:
-			{
-			uint32_t sm;
-			sm = statsp->
-				zcp_stats.errlog.state_mach.bits.ldw.state;
-			ddi_fm_ereport_post(nxgep->dip, eclass, ena,
-				DDI_NOSLEEP,
-				FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0,
-				sm, DATA_TYPE_UINT32,
-				NULL);
-			break;
-			}
-		case NXGE_FM_EREPORT_ZCP_CFIFO_ECC:
-			ddi_fm_ereport_post(nxgep->dip, eclass, ena,
-				DDI_NOSLEEP,
-				FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0,
-				ERNAME_ERR_PORTN, DATA_TYPE_UINT8,
-				err_portn,
-				NULL);
-			break;
-		case NXGE_FM_EREPORT_ZCP_RSPFIFO_UNCORR_ERR:
-		case NXGE_FM_EREPORT_ZCP_STAT_TBL_PERR:
-		case NXGE_FM_EREPORT_ZCP_DYN_TBL_PERR:
-		case NXGE_FM_EREPORT_ZCP_BUF_TBL_PERR:
-		case NXGE_FM_EREPORT_ZCP_TT_PROGRAM_ERR:
-		case NXGE_FM_EREPORT_ZCP_RSP_TT_INDEX_ERR:
-		case NXGE_FM_EREPORT_ZCP_SLV_TT_INDEX_ERR:
-		case NXGE_FM_EREPORT_ZCP_TT_INDEX_ERR:
-		case NXGE_FM_EREPORT_RXMAC_UNDERFLOW:
-		case NXGE_FM_EREPORT_RXMAC_CRC_ERRCNT_EXP:
-		case NXGE_FM_EREPORT_RXMAC_LENGTH_ERRCNT_EXP:
-		case NXGE_FM_EREPORT_RXMAC_VIOL_ERRCNT_EXP:
-		case NXGE_FM_EREPORT_RXMAC_RXFRAG_CNT_EXP:
-		case NXGE_FM_EREPORT_RXMAC_LINKFAULT_CNT_EXP:
-		case NXGE_FM_EREPORT_RXMAC_ALIGN_ECNT_EXP:
-			ddi_fm_ereport_post(nxgep->dip, eclass, ena,
-				DDI_NOSLEEP,
-				FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0,
-				ERNAME_ERR_PORTN, DATA_TYPE_UINT8, err_portn,
-				NULL);
-			break;
-		case NXGE_FM_EREPORT_TDMC_MBOX_ERR:
-		case NXGE_FM_EREPORT_TDMC_TX_RING_OFLOW:
-			ddi_fm_ereport_post(nxgep->dip, eclass, ena,
-				DDI_NOSLEEP,
-				FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0,
-				ERNAME_ERR_PORTN, DATA_TYPE_UINT8, err_portn,
-				ERNAME_ERR_DCHAN, DATA_TYPE_UINT8, err_chan,
-				NULL);
-			break;
-		case NXGE_FM_EREPORT_TDMC_PREF_BUF_PAR_ERR:
-		case NXGE_FM_EREPORT_TDMC_NACK_PREF:
-		case NXGE_FM_EREPORT_TDMC_NACK_PKT_RD:
-		case NXGE_FM_EREPORT_TDMC_PKT_SIZE_ERR:
-		case NXGE_FM_EREPORT_TDMC_CONF_PART_ERR:
-		case NXGE_FM_EREPORT_TDMC_PKT_PRT_ERR:
-			ddi_fm_ereport_post(nxgep->dip, eclass, ena,
-				DDI_NOSLEEP,
-				FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0,
-				ERNAME_ERR_PORTN, DATA_TYPE_UINT8, err_portn,
-				ERNAME_ERR_DCHAN, DATA_TYPE_UINT8, err_chan,
-				ERNAME_TDMC_ERR_LOG1, DATA_TYPE_UINT32,
-					statsp->
-					tdc_stats[err_chan].errlog.logl.value,
-				ERNAME_TDMC_ERR_LOG1, DATA_TYPE_UINT32,
-				statsp->tdc_stats[err_chan].errlog.logh.value,
-					DATA_TYPE_UINT32,
-				NULL);
-			break;
-		case NXGE_FM_EREPORT_TXC_RO_CORRECT_ERR:
-		case NXGE_FM_EREPORT_TXC_RO_UNCORRECT_ERR:
-			ddi_fm_ereport_post(nxgep->dip, eclass, ena,
-				DDI_NOSLEEP,
-				FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0,
-				ERNAME_ERR_PORTN, DATA_TYPE_UINT8, err_portn,
-				ERNAME_TXC_ROECC_ADDR, DATA_TYPE_UINT16,
-					statsp->txc_stats.errlog.ro_st.roecc.
-					bits.ldw.ecc_address,
-				ERNAME_TXC_ROECC_DATA0, DATA_TYPE_UINT32,
-					statsp->txc_stats.errlog.ro_st.d0.
-					bits.ldw.ro_ecc_data0,
-				ERNAME_TXC_ROECC_DATA1, DATA_TYPE_UINT32,
-					statsp->txc_stats.errlog.ro_st.d1.
-					bits.ldw.ro_ecc_data1,
-				ERNAME_TXC_ROECC_DATA2, DATA_TYPE_UINT32,
-					statsp->txc_stats.errlog.ro_st.d2.
-					bits.ldw.ro_ecc_data2,
-				ERNAME_TXC_ROECC_DATA3, DATA_TYPE_UINT32,
-					statsp->txc_stats.errlog.ro_st.d3.
-					bits.ldw.ro_ecc_data3,
-				ERNAME_TXC_ROECC_DATA4, DATA_TYPE_UINT32,
-					statsp->txc_stats.errlog.ro_st.d4.
-					bits.ldw.ro_ecc_data4,
-				NULL);
-			break;
-		case NXGE_FM_EREPORT_TXC_REORDER_ERR:
-			ddi_fm_ereport_post(nxgep->dip, eclass, ena,
-				DDI_NOSLEEP,
-				FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0,
-				ERNAME_DETAILED_ERR_TYPE, DATA_TYPE_STRING,
-					err_str,
-				ERNAME_ERR_PORTN, DATA_TYPE_UINT8, err_portn,
-				ERNAME_TXC_RO_STATE0, DATA_TYPE_UINT32,
-					(uint32_t)statsp->
-					txc_stats.errlog.ro_st.st0.value,
-				ERNAME_TXC_RO_STATE1, DATA_TYPE_UINT32,
-					(uint32_t)statsp->
-					txc_stats.errlog.ro_st.st1.value,
-				ERNAME_TXC_RO_STATE2, DATA_TYPE_UINT32,
-					(uint32_t)statsp->
-					txc_stats.errlog.ro_st.st2.value,
-				ERNAME_TXC_RO_STATE3, DATA_TYPE_UINT32,
-					(uint32_t)statsp->
-					txc_stats.errlog.ro_st.st3.value,
-				ERNAME_TXC_RO_STATE_CTL, DATA_TYPE_UINT32,
-					(uint32_t)statsp->
-					txc_stats.errlog.ro_st.ctl.value,
-				ERNAME_TXC_RO_TIDS, DATA_TYPE_UINT32,
-					(uint32_t)statsp->
-					txc_stats.errlog.ro_st.tids.value,
-				NULL);
-			break;
-		case NXGE_FM_EREPORT_TXC_SF_CORRECT_ERR:
-		case NXGE_FM_EREPORT_TXC_SF_UNCORRECT_ERR:
-			ddi_fm_ereport_post(nxgep->dip, eclass, ena,
-				DDI_NOSLEEP,
-				FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0,
-				ERNAME_ERR_PORTN, DATA_TYPE_UINT8, err_portn,
-				ERNAME_TXC_SFECC_ADDR, DATA_TYPE_UINT32,
-					statsp->txc_stats.errlog.sf_st.sfecc.
-					bits.ldw.ecc_address,
-				ERNAME_TXC_SFECC_DATA0, DATA_TYPE_UINT32,
-					statsp->txc_stats.errlog.sf_st.d0.
-					bits.ldw.sf_ecc_data0,
-				ERNAME_TXC_SFECC_DATA0, DATA_TYPE_UINT32,
-					statsp->txc_stats.errlog.sf_st.d1.
-					bits.ldw.sf_ecc_data1,
-				ERNAME_TXC_SFECC_DATA0, DATA_TYPE_UINT32,
-					statsp->txc_stats.errlog.sf_st.d2.
-					bits.ldw.sf_ecc_data2,
-				ERNAME_TXC_SFECC_DATA0, DATA_TYPE_UINT32,
-					statsp->txc_stats.errlog.sf_st.d3.
-					bits.ldw.sf_ecc_data3,
-				ERNAME_TXC_SFECC_DATA0, DATA_TYPE_UINT32,
-					statsp->txc_stats.errlog.sf_st.d4.
-					bits.ldw.sf_ecc_data4,
-				NULL);
-			break;
-		case NXGE_FM_EREPORT_TXMAC_UNDERFLOW:
-		case NXGE_FM_EREPORT_TXMAC_OVERFLOW:
-		case NXGE_FM_EREPORT_TXMAC_TXFIFO_XFR_ERR:
-		case NXGE_FM_EREPORT_TXMAC_MAX_PKT_ERR:
-		case NXGE_FM_EREPORT_SW_INVALID_PORT_NUM:
-		case NXGE_FM_EREPORT_SW_INVALID_CHAN_NUM:
-		case NXGE_FM_EREPORT_SW_INVALID_PARAM:
-			ddi_fm_ereport_post(nxgep->dip, eclass, ena,
-				DDI_NOSLEEP,
-				FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0,
-				ERNAME_ERR_PORTN, DATA_TYPE_UINT8, err_portn,
-				NULL);
-			break;
-		}
-
-	}
-}
-
-void
-nxge_fm_report_error(p_nxge_t nxgep, uint8_t err_portn, uint8_t err_chan,
-					nxge_fm_ereport_id_t fm_ereport_id)
-{
-	nxge_fm_ereport_attr_t	*fm_ereport_attr;
-
-	fm_ereport_attr = nxge_fm_get_ereport_attr(fm_ereport_id);
-
-	if (fm_ereport_attr != NULL) {
-		nxge_fm_ereport(nxgep, err_portn, err_chan, fm_ereport_attr);
-		ddi_fm_service_impact(nxgep->dip, fm_ereport_attr->impact);
-	}
-}
-
-int
-fm_check_acc_handle(ddi_acc_handle_t handle)
-{
-	ddi_fm_error_t err;
-
-	ddi_fm_acc_err_get(handle, &err, DDI_FME_VERSION);
-#ifndef	NXGE_FM_S10
-	ddi_fm_acc_err_clear(handle, DDI_FME_VERSION);
-#endif
-	return (err.fme_status);
-}
-
-int
-fm_check_dma_handle(ddi_dma_handle_t handle)
-{
-	ddi_fm_error_t err;
-
-	ddi_fm_dma_err_get(handle, &err, DDI_FME_VERSION);
-	return (err.fme_status);
-}
--- a/usr/src/uts/sun4v/io/nxge/nxge_fzc.c	Mon Mar 19 18:02:35 2007 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,1039 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License (the "License").
- * You may not use this file except in compliance with the License.
- *
- * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
- * or http://www.opensolaris.org/os/licensing.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information: Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- */
-/*
- * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
- * Use is subject to license terms.
- */
-
-#pragma ident	"%Z%%M%	%I%	%E% SMI"
-
-#include	<nxge_impl.h>
-#include	<npi_mac.h>
-#include	<npi_rxdma.h>
-
-#if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
-static int	nxge_herr2kerr(uint64_t);
-#endif
-
-/*
- * The following interfaces are controlled by the
- * function control registers. Some global registers
- * are to be initialized by only byt one of the 2/4 functions.
- * Use the test and set register.
- */
-/*ARGSUSED*/
-nxge_status_t
-nxge_test_and_set(p_nxge_t nxgep, uint8_t tas)
-{
-	npi_handle_t		handle;
-	npi_status_t		rs = NPI_SUCCESS;
-
-	handle = NXGE_DEV_NPI_HANDLE(nxgep);
-	if ((rs = npi_dev_func_sr_sr_get_set_clear(handle, tas))
-			!= NPI_SUCCESS) {
-		return (NXGE_ERROR | rs);
-	}
-
-	return (NXGE_OK);
-}
-
-nxge_status_t
-nxge_set_fzc_multi_part_ctl(p_nxge_t nxgep, boolean_t mpc)
-{
-	npi_handle_t		handle;
-	npi_status_t		rs = NPI_SUCCESS;
-
-	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_set_fzc_multi_part_ctl"));
-
-	/*
-	 * In multi-partitioning, the partition manager
-	 * who owns function zero should set this multi-partition
-	 * control bit.
-	 */
-	if (nxgep->use_partition && nxgep->function_num) {
-		return (NXGE_ERROR);
-	}
-
-	handle = NXGE_DEV_NPI_HANDLE(nxgep);
-	if ((rs = npi_fzc_mpc_set(handle, mpc)) != NPI_SUCCESS) {
-		NXGE_DEBUG_MSG((nxgep, DMA_CTL,
-			"<== nxge_set_fzc_multi_part_ctl"));
-		return (NXGE_ERROR | rs);
-	}
-
-	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_set_fzc_multi_part_ctl"));
-
-	return (NXGE_OK);
-}
-
-nxge_status_t
-nxge_get_fzc_multi_part_ctl(p_nxge_t nxgep, boolean_t *mpc_p)
-{
-	npi_handle_t		handle;
-	npi_status_t		rs = NPI_SUCCESS;
-
-	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_get_fzc_multi_part_ctl"));
-
-	handle = NXGE_DEV_NPI_HANDLE(nxgep);
-	if ((rs = npi_fzc_mpc_get(handle, mpc_p)) != NPI_SUCCESS) {
-		NXGE_DEBUG_MSG((nxgep, DMA_CTL,
-			"<== nxge_set_fzc_multi_part_ctl"));
-		return (NXGE_ERROR | rs);
-	}
-	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_get_fzc_multi_part_ctl"));
-
-	return (NXGE_OK);
-}
-
-/*
- * System interrupt registers that are under function zero
- * management.
- */
-nxge_status_t
-nxge_fzc_intr_init(p_nxge_t nxgep)
-{
-	nxge_status_t	status = NXGE_OK;
-
-	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_fzc_intr_init"));
-
-	/* Configure the initial timer resolution */
-	if ((status = nxge_fzc_intr_tmres_set(nxgep)) != NXGE_OK) {
-		return (status);
-	}
-
-	switch (nxgep->niu_type) {
-	case NEPTUNE:
-	case NEPTUNE_2:
-		/*
-		 * Set up the logical device group's logical devices that
-		 * the group owns.
-		 */
-		if ((status = nxge_fzc_intr_ldg_num_set(nxgep))
-				!= NXGE_OK) {
-			break;
-		}
-
-		/* Configure the system interrupt data */
-		if ((status = nxge_fzc_intr_sid_set(nxgep)) != NXGE_OK) {
-			break;
-		}
-
-		break;
-	}
-
-	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_fzc_intr_init"));
-
-	return (status);
-}
-
-nxge_status_t
-nxge_fzc_intr_ldg_num_set(p_nxge_t nxgep)
-{
-	p_nxge_ldg_t	ldgp;
-	p_nxge_ldv_t	ldvp;
-	npi_handle_t	handle;
-	int		i, j;
-	npi_status_t	rs = NPI_SUCCESS;
-
-	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_fzc_intr_ldg_num_set"));
-
-	if (nxgep->ldgvp == NULL) {
-		return (NXGE_ERROR);
-	}
-
-	ldgp = nxgep->ldgvp->ldgp;
-	ldvp = nxgep->ldgvp->ldvp;
-	if (ldgp == NULL || ldvp == NULL) {
-		return (NXGE_ERROR);
-	}
-
-	handle = NXGE_DEV_NPI_HANDLE(nxgep);
-
-	for (i = 0; i < nxgep->ldgvp->ldg_intrs; i++, ldgp++) {
-		NXGE_DEBUG_MSG((nxgep, INT_CTL,
-			"==> nxge_fzc_intr_ldg_num_set "
-			"<== nxge_f(Neptune): # ldv %d "
-			"in group %d", ldgp->nldvs, ldgp->ldg));
-
-		for (j = 0; j < ldgp->nldvs; j++, ldvp++) {
-			rs = npi_fzc_ldg_num_set(handle, ldvp->ldv,
-				ldvp->ldg_assigned);
-			if (rs != NPI_SUCCESS) {
-				NXGE_DEBUG_MSG((nxgep, INT_CTL,
-					"<== nxge_fzc_intr_ldg_num_set failed "
-					" rs 0x%x ldv %d ldg %d",
-					rs, ldvp->ldv, ldvp->ldg_assigned));
-				return (NXGE_ERROR | rs);
-			}
-			NXGE_DEBUG_MSG((nxgep, INT_CTL,
-				"<== nxge_fzc_intr_ldg_num_set OK "
-				" ldv %d ldg %d",
-				ldvp->ldv, ldvp->ldg_assigned));
-		}
-	}
-
-	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_fzc_intr_ldg_num_set"));
-
-	return (NXGE_OK);
-}
-
-nxge_status_t
-nxge_fzc_intr_tmres_set(p_nxge_t nxgep)
-{
-	npi_handle_t	handle;
-	npi_status_t	rs = NPI_SUCCESS;
-
-	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_fzc_intr_tmrese_set"));
-	if (nxgep->ldgvp == NULL) {
-		return (NXGE_ERROR);
-	}
-	handle = NXGE_DEV_NPI_HANDLE(nxgep);
-	if ((rs = npi_fzc_ldg_timer_res_set(handle, nxgep->ldgvp->tmres))) {
-		return (NXGE_ERROR | rs);
-	}
-	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_fzc_intr_tmrese_set"));
-
-	return (NXGE_OK);
-}
-
-nxge_status_t
-nxge_fzc_intr_sid_set(p_nxge_t nxgep)
-{
-	npi_handle_t	handle;
-	p_nxge_ldg_t	ldgp;
-	fzc_sid_t	sid;
-	int		i;
-	npi_status_t	rs = NPI_SUCCESS;
-
-	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_fzc_intr_sid_set"));
-	if (nxgep->ldgvp == NULL) {
-		NXGE_DEBUG_MSG((nxgep, INT_CTL,
-			"<== nxge_fzc_intr_sid_set: no ldg"));
-		return (NXGE_ERROR);
-	}
-	handle = NXGE_DEV_NPI_HANDLE(nxgep);
-	ldgp = nxgep->ldgvp->ldgp;
-	NXGE_DEBUG_MSG((nxgep, INT_CTL,
-		"==> nxge_fzc_intr_sid_set: #int %d", nxgep->ldgvp->ldg_intrs));
-	for (i = 0; i < nxgep->ldgvp->ldg_intrs; i++, ldgp++) {
-		sid.ldg = ldgp->ldg;
-		sid.niu = B_FALSE;
-		sid.func = ldgp->func;
-		sid.vector = ldgp->vector;
-		NXGE_DEBUG_MSG((nxgep, INT_CTL,
-			"==> nxge_fzc_intr_sid_set(%d): func %d group %d "
-			"vector %d",
-			i, sid.func, sid.ldg, sid.vector));
-		rs = npi_fzc_sid_set(handle, sid);
-		if (rs != NPI_SUCCESS) {
-			NXGE_DEBUG_MSG((nxgep, INT_CTL,
-				"<== nxge_fzc_intr_sid_set:failed 0x%x",
-				rs));
-			return (NXGE_ERROR | rs);
-		}
-	}
-
-	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_fzc_intr_sid_set"));
-
-	return (NXGE_OK);
-
-}
-
-/*
- * Receive DMA registers that are under function zero
- * management.
- */
-/*ARGSUSED*/
-nxge_status_t
-nxge_init_fzc_rxdma_channel(p_nxge_t nxgep, uint16_t channel,
-	p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t mbox_p)
-{
-	nxge_status_t	status = NXGE_OK;
-	NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_init_fzc_rxdma_channel"));
-
-	switch (nxgep->niu_type) {
-	case NEPTUNE:
-	case NEPTUNE_2:
-	default:
-		/* Initialize the RXDMA logical pages */
-		status = nxge_init_fzc_rxdma_channel_pages(nxgep, channel,
-			rbr_p);
-		if (status != NXGE_OK) {
-			return (status);
-		}
-
-		break;
-
-#ifndef	NIU_HV_WORKAROUND
-	case N2_NIU:
-#if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
-		NXGE_DEBUG_MSG((nxgep, RX_CTL,
-			"==> nxge_init_fzc_rxdma_channel: N2_NIU - call HV "
-			"set up logical pages"));
-		/* Initialize the RXDMA logical pages */
-		status = nxge_init_hv_fzc_rxdma_channel_pages(nxgep, channel,
-			rbr_p);
-		if (status != NXGE_OK) {
-			return (status);
-		}
-#endif
-		break;
-#else
-	case N2_NIU:
-		NXGE_DEBUG_MSG((nxgep, RX_CTL,
-			"==> nxge_init_fzc_rxdma_channel: N2_NIU - NEED to "
-			"set up logical pages"));
-		/* Initialize the RXDMA logical pages */
-		status = nxge_init_fzc_rxdma_channel_pages(nxgep, channel,
-			rbr_p);
-		if (status != NXGE_OK) {
-			return (status);
-		}
-
-		break;
-#endif
-	}
-
-	/* Configure RED parameters */
-	status = nxge_init_fzc_rxdma_channel_red(nxgep, channel, rcr_p);
-
-	NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_init_fzc_rxdma_channel"));
-	return (status);
-}
-
-/*ARGSUSED*/
-nxge_status_t
-nxge_init_fzc_rxdma_channel_pages(p_nxge_t nxgep,
-		uint16_t channel, p_rx_rbr_ring_t rbrp)
-{
-	npi_handle_t		handle;
-	dma_log_page_t		cfg;
-	npi_status_t		rs = NPI_SUCCESS;
-
-	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
-		"==> nxge_init_fzc_rxdma_channel_pages"));
-
-	handle = NXGE_DEV_NPI_HANDLE(nxgep);
-	/*
-	 * Initialize logical page 1.
-	 */
-	cfg.func_num = nxgep->function_num;
-	cfg.page_num = 0;
-	cfg.valid = rbrp->page_valid.bits.ldw.page0;
-	cfg.value = rbrp->page_value_1.value;
-	cfg.mask = rbrp->page_mask_1.value;
-	cfg.reloc = rbrp->page_reloc_1.value;
-	rs = npi_rxdma_cfg_logical_page(handle, channel,
-			(p_dma_log_page_t)&cfg);
-	if (rs != NPI_SUCCESS) {
-		return (NXGE_ERROR | rs);
-	}
-
-	/*
-	 * Initialize logical page 2.
-	 */
-	cfg.page_num = 1;
-	cfg.valid = rbrp->page_valid.bits.ldw.page1;
-	cfg.value = rbrp->page_value_2.value;
-	cfg.mask = rbrp->page_mask_2.value;
-	cfg.reloc = rbrp->page_reloc_2.value;
-
-	rs = npi_rxdma_cfg_logical_page(handle, channel, &cfg);
-	if (rs != NPI_SUCCESS) {
-		return (NXGE_ERROR | rs);
-	}
-
-	/* Initialize the page handle */
-	rs = npi_rxdma_cfg_logical_page_handle(handle, channel,
-			rbrp->page_hdl.bits.ldw.handle);
-
-	if (rs != NPI_SUCCESS) {
-		return (NXGE_ERROR | rs);
-	}
-
-	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
-		"<== nxge_init_fzc_rxdma_channel_pages"));
-
-	return (NXGE_OK);
-}
-
-/*ARGSUSED*/
-nxge_status_t
-nxge_init_fzc_rxdma_channel_red(p_nxge_t nxgep,
-	uint16_t channel, p_rx_rcr_ring_t rcr_p)
-{
-	npi_handle_t		handle;
-	rdc_red_para_t		red;
-	npi_status_t		rs = NPI_SUCCESS;
-
-	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_init_fzc_rxdma_channel_red"));
-
-	handle = NXGE_DEV_NPI_HANDLE(nxgep);
-	red.value = 0;
-	red.bits.ldw.win = RXDMA_RED_WINDOW_DEFAULT;
-	red.bits.ldw.thre = (rcr_p->comp_size - RXDMA_RED_LESS_ENTRIES);
-	red.bits.ldw.win_syn = RXDMA_RED_WINDOW_DEFAULT;
-	red.bits.ldw.thre_sync = (rcr_p->comp_size - RXDMA_RED_LESS_ENTRIES);
-
-	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
-		"==> nxge_init_fzc_rxdma_channel_red(thre_sync %d(%x))",
-		red.bits.ldw.thre_sync,
-		red.bits.ldw.thre_sync));
-
-	rs = npi_rxdma_cfg_wred_param(handle, channel, &red);
-	if (rs != NPI_SUCCESS) {
-		return (NXGE_ERROR | rs);
-	}
-
-	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
-		"<== nxge_init_fzc_rxdma_channel_red"));
-
-	return (NXGE_OK);
-}
-
-/*ARGSUSED*/
-nxge_status_t
-nxge_init_fzc_txdma_channel(p_nxge_t nxgep, uint16_t channel,
-	p_tx_ring_t tx_ring_p, p_tx_mbox_t mbox_p)
-{
-	nxge_status_t	status = NXGE_OK;
-
-	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
-		"==> nxge_init_fzc_txdma_channel"));
-
-	switch (nxgep->niu_type) {
-	case NEPTUNE:
-	case NEPTUNE_2:
-	default:
-		/* Initialize the TXDMA logical pages */
-		(void) nxge_init_fzc_txdma_channel_pages(nxgep, channel,
-			tx_ring_p);
-		break;
-
-#ifndef	NIU_HV_WORKAROUND
-	case N2_NIU:
-#if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
-		NXGE_DEBUG_MSG((nxgep, DMA_CTL,
-			"==> nxge_init_fzc_txdma_channel "
-			"N2_NIU: call HV to set up txdma logical pages"));
-		status = nxge_init_hv_fzc_txdma_channel_pages(nxgep, channel,
-			tx_ring_p);
-		if (status != NXGE_OK) {
-			return (status);
-		}
-#endif
-		break;
-#else
-	case N2_NIU:
-		NXGE_DEBUG_MSG((nxgep, DMA_CTL,
-			"==> nxge_init_fzc_txdma_channel "
-			"N2_NIU: NEED to set up txdma logical pages"));
-		/* Initialize the TXDMA logical pages */
-		(void) nxge_init_fzc_txdma_channel_pages(nxgep, channel,
-			tx_ring_p);
-		break;
-#endif
-	}
-
-	/*
-	 * Configure Transmit DRR Weight parameters
-	 * (It actually programs the TXC max burst register).
-	 */
-	(void) nxge_init_fzc_txdma_channel_drr(nxgep, channel, tx_ring_p);
-
-	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
-		"<== nxge_init_fzc_txdma_channel"));
-	return (status);
-}
-
-nxge_status_t
-nxge_init_fzc_common(p_nxge_t nxgep)
-{
-	nxge_status_t	status = NXGE_OK;
-
-	(void) nxge_init_fzc_rx_common(nxgep);
-
-	return (status);
-}
-
-nxge_status_t
-nxge_init_fzc_rx_common(p_nxge_t nxgep)
-{
-	npi_handle_t	handle;
-	npi_status_t	rs = NPI_SUCCESS;
-	nxge_status_t	status = NXGE_OK;
-	clock_t		lbolt;
-
-	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_init_fzc_rx_common"));
-	handle = NXGE_DEV_NPI_HANDLE(nxgep);
-	if (!handle.regp) {
-		NXGE_DEBUG_MSG((nxgep, DMA_CTL,
-			"==> nxge_init_fzc_rx_common null ptr"));
-		return (NXGE_ERROR);
-	}
-
-	/*
-	 * Configure the rxdma clock divider
-	 * This is the granularity counter based on
-	 * the hardware system clock (i.e. 300 Mhz) and
-	 * it is running around 3 nanoseconds.
-	 * So, set the clock divider counter to 1000 to get
-	 * microsecond granularity.
-	 * For example, for a 3 microsecond timeout, the timeout
-	 * will be set to 1.
-	 */
-	rs = npi_rxdma_cfg_clock_div_set(handle, RXDMA_CK_DIV_DEFAULT);
-	if (rs != NPI_SUCCESS)
-		return (NXGE_ERROR | rs);
-
-#if defined(__i386)
-	rs = npi_rxdma_cfg_32bitmode_enable(handle);
-	if (rs != NPI_SUCCESS)
-		return (NXGE_ERROR | rs);
-	rs = npi_txdma_mode32_set(handle, B_TRUE);
-	if (rs != NPI_SUCCESS)
-		return (NXGE_ERROR | rs);
-#endif
-
-	/*
-	 * Enable WRED and program an initial value.
-	 * Use time to set the initial random number.
-	 */
-	(void) drv_getparm(LBOLT, &lbolt);
-	rs = npi_rxdma_cfg_red_rand_init(handle, (uint16_t)lbolt);
-	if (rs != NPI_SUCCESS)
-		return (NXGE_ERROR | rs);
-
-	/* Initialize the RDC tables for each group */
-	status = nxge_init_fzc_rdc_tbl(nxgep);
-
-
-	/* Ethernet Timeout Counter (?) */
-
-	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
-		"<== nxge_init_fzc_rx_common:status 0x%08x", status));
-
-	return (status);
-}
-
-nxge_status_t
-nxge_init_fzc_rdc_tbl(p_nxge_t nxgep)
-{
-	npi_handle_t		handle;
-	p_nxge_dma_pt_cfg_t	p_dma_cfgp;
-	p_nxge_hw_pt_cfg_t	p_cfgp;
-	p_nxge_rdc_grp_t	rdc_grp_p;
-	uint8_t 		grp_tbl_id;
-	int			ngrps;
-	int			i;
-	npi_status_t		rs = NPI_SUCCESS;
-	nxge_status_t		status = NXGE_OK;
-
-	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_init_fzc_rdc_tbl"));
-
-	handle = NXGE_DEV_NPI_HANDLE(nxgep);
-	p_dma_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
-	p_cfgp = (p_nxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config;
-
-	grp_tbl_id = p_cfgp->start_rdc_grpid;
-	rdc_grp_p = &p_dma_cfgp->rdc_grps[0];
-	ngrps = p_cfgp->max_rdc_grpids;
-	for (i = 0; i < ngrps; i++, rdc_grp_p++) {
-		rs = npi_rxdma_cfg_rdc_table(handle, grp_tbl_id++,
-			rdc_grp_p->rdc);
-		if (rs != NPI_SUCCESS) {
-			status = NXGE_ERROR | rs;
-			break;
-		}
-	}
-
-	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_init_fzc_rdc_tbl"));
-	return (status);
-}
-
-nxge_status_t
-nxge_init_fzc_rxdma_port(p_nxge_t nxgep)
-{
-	npi_handle_t		handle;
-	p_nxge_dma_pt_cfg_t	p_all_cfgp;
-	p_nxge_hw_pt_cfg_t	p_cfgp;
-	hostinfo_t 		hostinfo;
-	int			i;
-	npi_status_t		rs = NPI_SUCCESS;
-	p_nxge_class_pt_cfg_t 	p_class_cfgp;
-	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_init_fzc_rxdma_port"));
-
-	p_all_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
-	p_cfgp = (p_nxge_hw_pt_cfg_t)&p_all_cfgp->hw_config;
-	handle = NXGE_DEV_NPI_HANDLE(nxgep);
-	/*
-	 * Initialize the port scheduler DRR weight.
-	 * npi_rxdma_cfg_port_ddr_weight();
-	 */
-
-	if (nxgep->niu_type == NEPTUNE) {
-		if ((nxgep->mac.portmode == PORT_1G_COPPER) ||
-			(nxgep->mac.portmode == PORT_1G_FIBER)) {
-			rs = npi_rxdma_cfg_port_ddr_weight(handle,
-							    nxgep->function_num,
-							    NXGE_RX_DRR_WT_1G);
-			if (rs != NPI_SUCCESS) {
-				return (NXGE_ERROR | rs);
-			}
-		}
-	}
-
-	/* Program the default RDC of a port */
-	rs = npi_rxdma_cfg_default_port_rdc(handle, nxgep->function_num,
-			p_cfgp->def_rdc);
-	if (rs != NPI_SUCCESS) {
-		return (NXGE_ERROR | rs);
-	}
-
-	/*
-	 * Configure the MAC host info table with RDC tables
-	 */
-	hostinfo.value = 0;
-	p_class_cfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config;
-	for (i = 0; i < p_cfgp->max_macs; i++) {
-		hostinfo.bits.w0.rdc_tbl_num = p_cfgp->start_rdc_grpid;
-		hostinfo.bits.w0.mac_pref = p_cfgp->mac_pref;
-		if (p_class_cfgp->mac_host_info[i].flag) {
-			hostinfo.bits.w0.rdc_tbl_num =
-				p_class_cfgp->mac_host_info[i].rdctbl;
-			hostinfo.bits.w0.mac_pref =
-				p_class_cfgp->mac_host_info[i].mpr_npr;
-		}
-
-		rs = npi_mac_hostinfo_entry(handle, OP_SET,
-				nxgep->function_num, i, &hostinfo);
-		if (rs != NPI_SUCCESS)
-			return (NXGE_ERROR | rs);
-	}
-
-	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
-		"<== nxge_init_fzc_rxdma_port rs 0x%08x", rs));
-
-	return (NXGE_OK);
-
-}
-
-nxge_status_t
-nxge_fzc_dmc_def_port_rdc(p_nxge_t nxgep, uint8_t port, uint16_t rdc)
-{
-	npi_status_t rs = NPI_SUCCESS;
-	rs = npi_rxdma_cfg_default_port_rdc(nxgep->npi_reg_handle,
-				    port, rdc);
-	if (rs & NPI_FAILURE)
-		return (NXGE_ERROR | rs);
-	return (NXGE_OK);
-}
-
-nxge_status_t
-nxge_init_fzc_txdma_channel_pages(p_nxge_t nxgep, uint16_t channel,
-	p_tx_ring_t tx_ring_p)
-{
-	npi_handle_t		handle;
-	dma_log_page_t		cfg;
-	npi_status_t		rs = NPI_SUCCESS;
-
-	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
-		"==> nxge_init_fzc_txdma_channel_pages"));
-
-#ifndef	NIU_HV_WORKAROUND
-	if (nxgep->niu_type == N2_NIU) {
-		NXGE_DEBUG_MSG((nxgep, DMA_CTL,
-			"<== nxge_init_fzc_txdma_channel_pages: "
-			"N2_NIU: no need to set txdma logical pages"));
-		return (NXGE_OK);
-	}
-#else
-	if (nxgep->niu_type == N2_NIU) {
-		NXGE_DEBUG_MSG((nxgep, DMA_CTL,
-			"<== nxge_init_fzc_txdma_channel_pages: "
-			"N2_NIU: NEED to set txdma logical pages"));
-	}
-#endif
-
-	/*
-	 * Initialize logical page 1.
-	 */
-	handle = NXGE_DEV_NPI_HANDLE(nxgep);
-	cfg.func_num = nxgep->function_num;
-	cfg.page_num = 0;
-	cfg.valid = tx_ring_p->page_valid.bits.ldw.page0;
-	cfg.value = tx_ring_p->page_value_1.value;
-	cfg.mask = tx_ring_p->page_mask_1.value;
-	cfg.reloc = tx_ring_p->page_reloc_1.value;
-
-	rs = npi_txdma_log_page_set(handle, channel,
-		(p_dma_log_page_t)&cfg);
-	if (rs != NPI_SUCCESS) {
-		return (NXGE_ERROR | rs);
-	}
-
-	/*
-	 * Initialize logical page 2.
-	 */
-	cfg.page_num = 1;
-	cfg.valid = tx_ring_p->page_valid.bits.ldw.page1;
-	cfg.value = tx_ring_p->page_value_2.value;
-	cfg.mask = tx_ring_p->page_mask_2.value;
-	cfg.reloc = tx_ring_p->page_reloc_2.value;
-
-	rs = npi_txdma_log_page_set(handle, channel, &cfg);
-	if (rs != NPI_SUCCESS) {
-		return (NXGE_ERROR | rs);
-	}
-
-	/* Initialize the page handle */
-	rs = npi_txdma_log_page_handle_set(handle, channel,
-			&tx_ring_p->page_hdl);
-
-	if (rs == NPI_SUCCESS) {
-		return (NXGE_OK);
-	} else {
-		return (NXGE_ERROR | rs);
-	}
-}
-
-
-nxge_status_t
-nxge_init_fzc_txdma_channel_drr(p_nxge_t nxgep, uint16_t channel,
-	p_tx_ring_t tx_ring_p)
-{
-	npi_status_t	rs = NPI_SUCCESS;
-	npi_handle_t	handle;
-
-	handle = NXGE_DEV_NPI_HANDLE(nxgep);
-	rs = npi_txc_dma_max_burst_set(handle, channel,
-			tx_ring_p->max_burst.value);
-	if (rs == NPI_SUCCESS) {
-		return (NXGE_OK);
-	} else {
-		return (NXGE_ERROR | rs);
-	}
-}
-
-nxge_status_t
-nxge_fzc_sys_err_mask_set(p_nxge_t nxgep, uint64_t mask)
-{
-	npi_status_t	rs = NPI_SUCCESS;
-	npi_handle_t	handle;
-
-	handle = NXGE_DEV_NPI_HANDLE(nxgep);
-	rs = npi_fzc_sys_err_mask_set(handle, mask);
-	if (rs == NPI_SUCCESS) {
-		return (NXGE_OK);
-	} else {
-		return (NXGE_ERROR | rs);
-	}
-}
-
-#if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
-nxge_status_t
-nxge_init_hv_fzc_txdma_channel_pages(p_nxge_t nxgep, uint16_t channel,
-	p_tx_ring_t tx_ring_p)
-{
-	int			err;
-	uint64_t		hverr;
-#ifdef	DEBUG
-	uint64_t		ra, size;
-#endif
-
-	NXGE_DEBUG_MSG((nxgep, TX_CTL,
-		"==> nxge_init_hv_fzc_txdma_channel_pages"));
-
-	if (tx_ring_p->hv_set) {
-		return (NXGE_OK);
-	}
-
-	/*
-	 * Initialize logical page 1 for data buffers.
-	 */
-	hverr = hv_niu_tx_logical_page_conf((uint64_t)channel,
-			(uint64_t)0,
-			tx_ring_p->hv_tx_buf_base_ioaddr_pp,
-			tx_ring_p->hv_tx_buf_ioaddr_size);
-
-	err = (nxge_status_t)nxge_herr2kerr(hverr);
-	if (err != 0) {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"<== nxge_init_hv_fzc_txdma_channel_pages: channel %d "
-			"error status 0x%x "
-			"(page 0 data buf) hverr 0x%llx "
-			"ioaddr_pp $%p "
-			"size 0x%llx ",
-			channel,
-			err,
-			hverr,
-			tx_ring_p->hv_tx_buf_base_ioaddr_pp,
-			tx_ring_p->hv_tx_buf_ioaddr_size));
-		return (NXGE_ERROR | err);
-	}
-
-#ifdef	DEBUG
-	ra = size = 0;
-	hverr = hv_niu_tx_logical_page_info((uint64_t)channel,
-			(uint64_t)0,
-			&ra,
-			&size);
-
-	NXGE_DEBUG_MSG((nxgep, TX_CTL,
-		"==> nxge_init_hv_fzc_txdma_channel_pages: channel %d "
-		"ok status 0x%x "
-		"(page 0 data buf) hverr 0x%llx "
-		"set ioaddr_pp $%p "
-		"set size 0x%llx "
-		"get ra ioaddr_pp $%p "
-		"get size 0x%llx ",
-		channel,
-		err,
-		hverr,
-		tx_ring_p->hv_tx_buf_base_ioaddr_pp,
-		tx_ring_p->hv_tx_buf_ioaddr_size,
-		ra,
-		size));
-#endif
-
-	NXGE_DEBUG_MSG((nxgep, TX_CTL,
-		"==> nxge_init_hv_fzc_txdma_channel_pages: channel %d "
-		"(page 0 data buf) hverr 0x%llx "
-		"ioaddr_pp $%p "
-		"size 0x%llx ",
-		channel,
-		hverr,
-		tx_ring_p->hv_tx_buf_base_ioaddr_pp,
-		tx_ring_p->hv_tx_buf_ioaddr_size));
-
-	/*
-	 * Initialize logical page 2 for control buffers.
-	 */
-	hverr = hv_niu_tx_logical_page_conf((uint64_t)channel,
-			(uint64_t)1,
-			tx_ring_p->hv_tx_cntl_base_ioaddr_pp,
-			tx_ring_p->hv_tx_cntl_ioaddr_size);
-
-	err = (nxge_status_t)nxge_herr2kerr(hverr);
-
-	NXGE_DEBUG_MSG((nxgep, TX_CTL,
-		"==> nxge_init_hv_fzc_txdma_channel_pages: channel %d"
-		"ok status 0x%x "
-		"(page 1 cntl buf) hverr 0x%llx "
-		"ioaddr_pp $%p "
-		"size 0x%llx ",
-		channel,
-		err,
-		hverr,
-		tx_ring_p->hv_tx_cntl_base_ioaddr_pp,
-		tx_ring_p->hv_tx_cntl_ioaddr_size));
-
-	if (err != 0) {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"<== nxge_init_hv_fzc_txdma_channel_pages: channel %d"
-			"error status 0x%x "
-			"(page 1 cntl buf) hverr 0x%llx "
-			"ioaddr_pp $%p "
-			"size 0x%llx ",
-			channel,
-			err,
-			hverr,
-			tx_ring_p->hv_tx_cntl_base_ioaddr_pp,
-			tx_ring_p->hv_tx_cntl_ioaddr_size));
-		return (NXGE_ERROR | err);
-	}
-
-#ifdef	DEBUG
-	ra = size = 0;
-	hverr = hv_niu_tx_logical_page_info((uint64_t)channel,
-			(uint64_t)1,
-			&ra,
-			&size);
-
-	NXGE_DEBUG_MSG((nxgep, TX_CTL,
-		"==> nxge_init_hv_fzc_txdma_channel_pages: channel %d "
-		"(page 1 cntl buf) hverr 0x%llx "
-		"set ioaddr_pp $%p "
-		"set size 0x%llx "
-		"get ra ioaddr_pp $%p "
-		"get size 0x%llx ",
-		channel,
-		hverr,
-		tx_ring_p->hv_tx_cntl_base_ioaddr_pp,
-		tx_ring_p->hv_tx_cntl_ioaddr_size,
-		ra,
-		size));
-#endif
-
-	tx_ring_p->hv_set = B_TRUE;
-
-	NXGE_DEBUG_MSG((nxgep, TX_CTL,
-		"<== nxge_init_hv_fzc_txdma_channel_pages"));
-
-	return (NXGE_OK);
-}
-
-/*ARGSUSED*/
-nxge_status_t
-nxge_init_hv_fzc_rxdma_channel_pages(p_nxge_t nxgep,
-		uint16_t channel, p_rx_rbr_ring_t rbrp)
-{
-	int			err;
-	uint64_t		hverr;
-#ifdef	DEBUG
-	uint64_t		ra, size;
-#endif
-
-	NXGE_DEBUG_MSG((nxgep, RX_CTL,
-		"==> nxge_init_hv_fzc_rxdma_channel_pages"));
-
-	if (rbrp->hv_set) {
-		return (NXGE_OK);
-	}
-
-	/* Initialize data buffers for page 0 */
-	hverr = hv_niu_rx_logical_page_conf((uint64_t)channel,
-			(uint64_t)0,
-			rbrp->hv_rx_buf_base_ioaddr_pp,
-			rbrp->hv_rx_buf_ioaddr_size);
-	err = (nxge_status_t)nxge_herr2kerr(hverr);
-	if (err != 0) {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"<== nxge_init_hv_fzc_rxdma_channel_pages: channel %d"
-			"error status 0x%x "
-			"(page 0 data buf) hverr 0x%llx "
-			"ioaddr_pp $%p "
-			"size 0x%llx ",
-			channel,
-			err,
-			hverr,
-			rbrp->hv_rx_buf_base_ioaddr_pp,
-			rbrp->hv_rx_buf_ioaddr_size));
-
-		return (NXGE_ERROR | err);
-	}
-
-#ifdef	DEBUG
-	ra = size = 0;
-	(void) hv_niu_rx_logical_page_info((uint64_t)channel,
-			(uint64_t)0,
-			&ra,
-			&size);
-
-	NXGE_DEBUG_MSG((nxgep, RX_CTL,
-		"==> nxge_init_hv_fzc_rxdma_channel_pages: channel %d "
-		"ok status 0x%x "
-		"(page 0 data buf) hverr 0x%llx "
-		"set databuf ioaddr_pp $%p "
-		"set databuf size 0x%llx "
-		"get databuf ra ioaddr_pp %p "
-		"get databuf size 0x%llx",
-		channel,
-		err,
-		hverr,
-		rbrp->hv_rx_buf_base_ioaddr_pp,
-		rbrp->hv_rx_buf_ioaddr_size,
-		ra,
-		size));
-#endif
-
-	/* Initialize control buffers for logical page 1.  */
-	hverr = hv_niu_rx_logical_page_conf((uint64_t)channel,
-			(uint64_t)1,
-			rbrp->hv_rx_cntl_base_ioaddr_pp,
-			rbrp->hv_rx_cntl_ioaddr_size);
-
-	err = (nxge_status_t)nxge_herr2kerr(hverr);
-	if (err != 0) {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"<== nxge_init_hv_fzc_rxdma_channel_pages: channel %d"
-			"error status 0x%x "
-			"(page 1 cntl buf) hverr 0x%llx "
-			"ioaddr_pp $%p "
-			"size 0x%llx ",
-			channel,
-			err,
-			hverr,
-			rbrp->hv_rx_buf_base_ioaddr_pp,
-			rbrp->hv_rx_buf_ioaddr_size));
-
-		return (NXGE_ERROR | err);
-	}
-
-#ifdef	DEBUG
-	ra = size = 0;
-	(void) hv_niu_rx_logical_page_info((uint64_t)channel,
-			(uint64_t)1,
-			&ra,
-			&size);
-
-
-	NXGE_DEBUG_MSG((nxgep, RX_CTL,
-		"==> nxge_init_hv_fzc_rxdma_channel_pages: channel %d "
-		"error status 0x%x "
-		"(page 1 cntl buf) hverr 0x%llx "
-		"set cntl ioaddr_pp $%p "
-		"set cntl size 0x%llx "
-		"get cntl ioaddr_pp $%p "
-		"get cntl size 0x%llx ",
-		channel,
-		err,
-		hverr,
-		rbrp->hv_rx_cntl_base_ioaddr_pp,
-		rbrp->hv_rx_cntl_ioaddr_size,
-		ra,
-		size));
-#endif
-
-	rbrp->hv_set = B_FALSE;
-
-	NXGE_DEBUG_MSG((nxgep, RX_CTL,
-		"<== nxge_init_hv_fzc_rxdma_channel_pages"));
-
-	return (NXGE_OK);
-}
-
-/*
- * Map hypervisor error code to errno. Only
- * H_ENORADDR, H_EBADALIGN and H_EINVAL are meaningful
- * for niu driver. Any other error codes are mapped to EINVAL.
- */
-static int
-nxge_herr2kerr(uint64_t hv_errcode)
-{
-	int	s_errcode;
-
-	switch (hv_errcode) {
-	case H_ENORADDR:
-	case H_EBADALIGN:
-		s_errcode = EFAULT;
-		break;
-	case H_EOK:
-		s_errcode = 0;
-		break;
-	default:
-		s_errcode = EINVAL;
-		break;
-	}
-	return (s_errcode);
-}
-
-#endif	/* sun4v and NIU_LP_WORKAROUND */
--- a/usr/src/uts/sun4v/io/nxge/nxge_hcall.s	Mon Mar 19 18:02:35 2007 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,114 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License (the "License").
- * You may not use this file except in compliance with the License.
- *
- * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
- * or http://www.opensolaris.org/os/licensing.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information: Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- */
-/*
- * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
- * Use is subject to license terms.
- */
-
-#pragma ident	"%Z%%M%	%I%	%E% SMI"
-
-/*
- * Hypervisor calls called by niu leaf driver.
-*/
-
-#include <sys/asm_linkage.h>
-#include <sys/hypervisor_api.h>
-#include <sys/nxge/nxge_impl.h>
-
-#if defined(lint) || defined(__lint)
-
-/*ARGSUSED*/
-uint64_t
-hv_niu_rx_logical_page_conf(uint64_t chidx, uint64_t pgidx,
-	uint64_t raddr, uint64_t size)
-{ return (0); }
-
-/*ARGSUSED*/
-uint64_t
-hv_niu_rx_logical_page_info(uint64_t chidx, uint64_t pgidx,
-	uint64_t *raddr, uint64_t *size)
-{ return (0); }
-
-/*ARGSUSED*/
-uint64_t
-hv_niu_tx_logical_page_conf(uint64_t chidx, uint64_t pgidx,
-	uint64_t raddr, uint64_t size)
-{ return (0); }
-
-/*ARGSUSED*/
-uint64_t
-hv_niu_tx_logical_page_info(uint64_t chidx, uint64_t pgidx,
-	uint64_t *raddr, uint64_t *size)
-{ return (0); }
-
-#else	/* lint || __lint */
-
-	/*
-	 * hv_niu_rx_logical_page_conf(uint64_t chidx, uint64_t pgidx,
-	 *	uint64_t raddr, uint64_t size)
-	 */
-	ENTRY(hv_niu_rx_logical_page_conf)
-	mov	N2NIU_RX_LP_CONF, %o5
-	ta	FAST_TRAP
-	retl
-	nop
-	SET_SIZE(hv_niu_rx_logical_page_conf)
-
-	/*
-	 * hv_niu_rx_logical_page_info(uint64_t chidx, uint64_t pgidx,
-	 *	uint64_t *raddr, uint64_t *size)
-	 */
-	ENTRY(hv_niu_rx_logical_page_info)
-	mov	%o2, %g1
-	mov	%o3, %g2
-	mov	N2NIU_RX_LP_INFO, %o5
-	ta	FAST_TRAP
-	stx	%o1, [%g1]
-	retl
-	stx	%o2, [%g2]
-	SET_SIZE(hv_niu_rx_logical_page_info)
-
-	/*
-	 * hv_niu_tx_logical_page_conf(uint64_t chidx, uint64_t pgidx,
-	 *	uint64_t raddr, uint64_t size)
-	 */
-	ENTRY(hv_niu_tx_logical_page_conf)
-	mov	N2NIU_TX_LP_CONF, %o5
-	ta	FAST_TRAP
-	retl
-	nop
-	SET_SIZE(hv_niu_tx_logical_page_conf)
-
-	/*
-	 * hv_niu_tx_logical_page_info(uint64_t chidx, uint64_t pgidx,
-	 *	uint64_t *raddr, uint64_t *size)
-	 */
-	ENTRY(hv_niu_tx_logical_page_info)
-	mov	%o2, %g1
-	mov	%o3, %g2
-	mov	N2NIU_TX_LP_INFO, %o5
-	ta	FAST_TRAP
-	stx	%o1, [%g1]
-	retl
-	stx	%o2, [%g2]
-	SET_SIZE(hv_niu_tx_logical_page_info)
-
-#endif	/* lint || __lint */
--- a/usr/src/uts/sun4v/io/nxge/nxge_hw.c	Mon Mar 19 18:02:35 2007 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,1021 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License (the "License").
- * You may not use this file except in compliance with the License.
- *
- * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
- * or http://www.opensolaris.org/os/licensing.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information: Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- */
-/*
- * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
- * Use is subject to license terms.
- */
-
-#pragma ident	"%Z%%M%	%I%	%E% SMI"
-
-#include <sys/nxge/nxge_impl.h>
-
-/*
- * Tunable Receive Completion Ring Configuration B parameters.
- */
-uint16_t nxge_rx_pkt_thres;	/* 16 bits */
-uint8_t nxge_rx_pkt_timeout;	/* 6 bits based on DMA clock divider */
-
-lb_property_t lb_normal = {normal, "normal", nxge_lb_normal};
-lb_property_t lb_external10g = {external, "external10g", nxge_lb_ext10g};
-lb_property_t lb_external1000 = {external, "external1000", nxge_lb_ext1000};
-lb_property_t lb_external100 = {external, "external100", nxge_lb_ext100};
-lb_property_t lb_external10 = {external, "external10", nxge_lb_ext10};
-lb_property_t lb_phy10g = {internal, "phy10g", nxge_lb_phy10g};
-lb_property_t lb_phy1000 = {internal, "phy1000", nxge_lb_phy1000};
-lb_property_t lb_phy = {internal, "phy", nxge_lb_phy};
-lb_property_t lb_serdes10g = {internal, "serdes10g", nxge_lb_serdes10g};
-lb_property_t lb_serdes1000 = {internal, "serdes", nxge_lb_serdes1000};
-lb_property_t lb_mac10g = {internal, "mac10g", nxge_lb_mac10g};
-lb_property_t lb_mac1000 = {internal, "mac1000", nxge_lb_mac1000};
-lb_property_t lb_mac = {internal, "mac10/100", nxge_lb_mac};
-
-uint32_t nxge_lb_dbg = 1;
-void nxge_get_mii(p_nxge_t nxgep, p_mblk_t mp);
-void nxge_put_mii(p_nxge_t nxgep, p_mblk_t mp);
-
-extern uint32_t nxge_rx_mode;
-extern uint32_t nxge_jumbo_mtu;
-extern boolean_t nxge_jumbo_enable;
-
-static void
-nxge_rtrace_ioctl(p_nxge_t, queue_t *, mblk_t *, struct iocblk *);
-
-/* ARGSUSED */
-void
-nxge_global_reset(p_nxge_t nxgep)
-{
-	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_global_reset"));
-
-	(void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP);
-	(void) nxge_intr_hw_disable(nxgep);
-
-	if ((nxgep->suspended) ||
-			((nxgep->statsp->port_stats.lb_mode ==
-			nxge_lb_phy1000) ||
-			(nxgep->statsp->port_stats.lb_mode ==
-			nxge_lb_phy10g) ||
-			(nxgep->statsp->port_stats.lb_mode ==
-			nxge_lb_serdes1000) ||
-			(nxgep->statsp->port_stats.lb_mode ==
-			nxge_lb_serdes10g))) {
-		(void) nxge_link_init(nxgep);
-	}
-	(void) nxge_link_monitor(nxgep, LINK_MONITOR_START);
-	(void) nxge_mac_init(nxgep);
-	(void) nxge_intr_hw_enable(nxgep);
-
-	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_global_reset"));
-}
-
-/* ARGSUSED */
-void
-nxge_hw_id_init(p_nxge_t nxgep)
-{
-	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_hw_id_init"));
-	/*
-	 * Set up initial hardware parameters required such as mac mtu size.
-	 */
-	nxgep->mac.is_jumbo = B_FALSE;
-	nxgep->mac.maxframesize = NXGE_MTU_DEFAULT_MAX;	/* 1522 */
-	if (nxgep->param_arr[param_accept_jumbo].value || nxge_jumbo_enable) {
-		nxgep->mac.maxframesize = (uint16_t)nxge_jumbo_mtu;
-		nxgep->mac.is_jumbo = B_TRUE;
-	}
-	NXGE_DEBUG_MSG((nxgep, DDI_CTL,
-		"==> nxge_hw_id_init: maxframesize %d",
-		nxgep->mac.maxframesize));
-
-	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_hw_id_init"));
-}
-
-/* ARGSUSED */
-void
-nxge_hw_init_niu_common(p_nxge_t nxgep)
-{
-	p_nxge_hw_list_t hw_p;
-
-	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_hw_init_niu_common"));
-
-	if ((hw_p = nxgep->nxge_hw_p) == NULL) {
-		return;
-	}
-	MUTEX_ENTER(&hw_p->nxge_cfg_lock);
-	if (hw_p->flags & COMMON_INIT_DONE) {
-		NXGE_DEBUG_MSG((nxgep, MOD_CTL,
-			"nxge_hw_init_niu_common"
-			" already done for dip $%p function %d exiting",
-			hw_p->parent_devp, nxgep->function_num));
-		MUTEX_EXIT(&hw_p->nxge_cfg_lock);
-		return;
-	}
-
-	hw_p->flags = COMMON_INIT_START;
-	NXGE_DEBUG_MSG((nxgep, MOD_CTL, "nxge_hw_init_niu_common"
-		" Started for device id %x with function %d",
-		hw_p->parent_devp, nxgep->function_num));
-
-	/* per neptune common block init */
-	(void) nxge_fflp_hw_reset(nxgep);
-
-	hw_p->flags = COMMON_INIT_DONE;
-	MUTEX_EXIT(&hw_p->nxge_cfg_lock);
-
-	NXGE_DEBUG_MSG((nxgep, MOD_CTL, "nxge_hw_init_niu_common"
-		" Done for device id %x with function %d",
-		hw_p->parent_devp, nxgep->function_num));
-	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_hw_init_niu_common"));
-}
-
-/* ARGSUSED */
-uint_t
-nxge_intr(void *arg1, void *arg2)
-{
-	p_nxge_ldv_t ldvp = (p_nxge_ldv_t)arg1;
-	p_nxge_t nxgep = (p_nxge_t)arg2;
-	uint_t serviced = DDI_INTR_UNCLAIMED;
-	uint8_t ldv;
-	npi_handle_t handle;
-	p_nxge_ldgv_t ldgvp;
-	p_nxge_ldg_t ldgp, t_ldgp;
-	p_nxge_ldv_t t_ldvp;
-	uint64_t vector0 = 0, vector1 = 0, vector2 = 0;
-	int i, j, nldvs, nintrs = 1;
-	npi_status_t rs = NPI_SUCCESS;
-
-	/* DDI interface returns second arg as NULL (n2 niumx driver) !!! */
-	if (arg2 == NULL || (void *) ldvp->nxgep != arg2) {
-		nxgep = ldvp->nxgep;
-	}
-	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intr"));
-
-	if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
-		NXGE_ERROR_MSG((nxgep, INT_CTL,
-			"<== nxge_intr: not initialized 0x%x", serviced));
-		return (serviced);
-	}
-
-	ldgvp = nxgep->ldgvp;
-	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intr: ldgvp $%p", ldgvp));
-	if (ldvp == NULL && ldgvp) {
-		t_ldvp = ldvp = ldgvp->ldvp;
-	}
-	if (ldvp) {
-		ldgp = t_ldgp = ldvp->ldgp;
-	}
-	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intr: "
-		"ldgvp $%p ldvp $%p ldgp $%p", ldgvp, ldvp, ldgp));
-	if (ldgvp == NULL || ldvp == NULL || ldgp == NULL) {
-		NXGE_ERROR_MSG((nxgep, INT_CTL, "==> nxge_intr: "
-			"ldgvp $%p ldvp $%p ldgp $%p", ldgvp, ldvp, ldgp));
-		NXGE_ERROR_MSG((nxgep, INT_CTL, "<== nxge_intr: not ready"));
-		return (DDI_INTR_UNCLAIMED);
-	}
-	/*
-	 * This interrupt handler will have to go through all the logical
-	 * devices to find out which logical device interrupts us and then call
-	 * its handler to process the events.
-	 */
-	handle = NXGE_DEV_NPI_HANDLE(nxgep);
-	t_ldgp = ldgp;
-	t_ldvp = ldgp->ldvp;
-
-	nldvs = ldgp->nldvs;
-
-	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intr: #ldvs %d #intrs %d",
-			nldvs, ldgvp->ldg_intrs));
-
-	serviced = DDI_INTR_CLAIMED;
-	for (i = 0; i < nintrs; i++, t_ldgp++) {
-		NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intr(%d): #ldvs %d "
-				" #intrs %d", i, nldvs, nintrs));
-		/* Get this group's flag bits.  */
-		t_ldgp->interrupted = B_FALSE;
-		rs = npi_ldsv_ldfs_get(handle, t_ldgp->ldg,
-			&vector0, &vector1, &vector2);
-		if (rs) {
-			continue;
-		}
-		if (!vector0 && !vector1 && !vector2) {
-			NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intr: "
-				"no interrupts on group %d", t_ldgp->ldg));
-			continue;
-		}
-		NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intr: "
-			"vector0 0x%llx vector1 0x%llx vector2 0x%llx",
-			vector0, vector1, vector2));
-		t_ldgp->interrupted = B_TRUE;
-		nldvs = t_ldgp->nldvs;
-		for (j = 0; j < nldvs; j++, t_ldvp++) {
-			/*
-			 * Call device's handler if flag bits are on.
-			 */
-			ldv = t_ldvp->ldv;
-			if (((ldv < NXGE_MAC_LD_START) &&
-					(LDV_ON(ldv, vector0) |
-					(LDV_ON(ldv, vector1)))) ||
-					(ldv >= NXGE_MAC_LD_START &&
-					((LDV2_ON_1(ldv, vector2)) ||
-					(LDV2_ON_2(ldv, vector2))))) {
-				(void) (t_ldvp->ldv_intr_handler)(
-					(caddr_t)t_ldvp, arg2);
-				NXGE_DEBUG_MSG((nxgep, INT_CTL,
-					"==> nxge_intr: "
-					"calling device %d #ldvs %d #intrs %d",
-					j, nldvs, nintrs));
-			}
-		}
-	}
-
-	t_ldgp = ldgp;
-	for (i = 0; i < nintrs; i++, t_ldgp++) {
-		/* rearm group interrupts */
-		if (t_ldgp->interrupted) {
-			NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intr: arm "
-				"group %d", t_ldgp->ldg));
-			(void) npi_intr_ldg_mgmt_set(handle, t_ldgp->ldg,
-				t_ldgp->arm, t_ldgp->ldg_timer);
-		}
-	}
-
-	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intr: serviced 0x%x",
-		serviced));
-	return (serviced);
-}
-
-/* ARGSUSED */
-uint_t
-nxge_syserr_intr(void *arg1, void *arg2)
-{
-	p_nxge_ldv_t ldvp = (p_nxge_ldv_t)arg1;
-	p_nxge_t nxgep = (p_nxge_t)arg2;
-	p_nxge_ldg_t ldgp = NULL;
-	npi_handle_t handle;
-	sys_err_stat_t estat;
-	uint_t serviced = DDI_INTR_UNCLAIMED;
-
-	if (arg1 == NULL && arg2 == NULL) {
-		return (serviced);
-	}
-	if (arg2 == NULL || ((ldvp != NULL && (void *) ldvp->nxgep != arg2))) {
-		if (ldvp != NULL) {
-			nxgep = ldvp->nxgep;
-		}
-	}
-	NXGE_DEBUG_MSG((nxgep, SYSERR_CTL,
-		"==> nxge_syserr_intr: arg2 $%p arg1 $%p", nxgep, ldvp));
-	if (ldvp != NULL && ldvp->use_timer == B_FALSE) {
-		ldgp = ldvp->ldgp;
-		if (ldgp == NULL) {
-			NXGE_ERROR_MSG((nxgep, SYSERR_CTL,
-				"<== nxge_syserrintr(no logical group): "
-				"arg2 $%p arg1 $%p", nxgep, ldvp));
-			return (DDI_INTR_UNCLAIMED);
-		}
-		/*
-		 * Get the logical device state if the function uses interrupt.
-		 */
-	}
-
-	/* This interrupt handler is for system error interrupts.  */
-	handle = NXGE_DEV_NPI_HANDLE(nxgep);
-	estat.value = 0;
-	(void) npi_fzc_sys_err_stat_get(handle, &estat);
-	NXGE_DEBUG_MSG((nxgep, SYSERR_CTL,
-		"==> nxge_syserr_intr: device error 0x%016llx", estat.value));
-
-	if (estat.bits.ldw.smx) {
-		/* SMX */
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"==> nxge_syserr_intr: device error - SMX"));
-	} else if (estat.bits.ldw.mac) {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"==> nxge_syserr_intr: device error - MAC"));
-		/*
-		 * There is nothing to be done here. All MAC errors go to per
-		 * MAC port interrupt. MIF interrupt is the only MAC sub-block
-		 * that can generate status here. MIF status reported will be
-		 * ignored here. It is checked by per port timer instead.
-		 */
-	} else if (estat.bits.ldw.ipp) {
-		NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
-			"==> nxge_syserr_intr: device error - IPP"));
-		(void) nxge_ipp_handle_sys_errors(nxgep);
-	} else if (estat.bits.ldw.zcp) {
-		/* ZCP */
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"==> nxge_syserr_intr: device error - ZCP"));
-		(void) nxge_zcp_handle_sys_errors(nxgep);
-	} else if (estat.bits.ldw.tdmc) {
-		/* TDMC */
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"==> nxge_syserr_intr: device error - TDMC"));
-		/*
-		 * There is no TDMC system errors defined in the PRM. All TDMC
-		 * channel specific errors are reported on a per channel basis.
-		 */
-	} else if (estat.bits.ldw.rdmc) {
-		/* RDMC */
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"==> nxge_syserr_intr: device error - RDMC"));
-		(void) nxge_rxdma_handle_sys_errors(nxgep);
-	} else if (estat.bits.ldw.txc) {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"==> nxge_syserr_intr: device error - TXC"));
-		(void) nxge_txc_handle_sys_errors(nxgep);
-	} else if ((nxgep->niu_type != N2_NIU) && estat.bits.ldw.peu) {
-		/* PCI-E */
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"==> nxge_syserr_intr: device error - PCI-E"));
-	} else if (estat.bits.ldw.meta1) {
-		/* META1 */
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"==> nxge_syserr_intr: device error - META1"));
-	} else if (estat.bits.ldw.meta2) {
-		/* META2 */
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"==> nxge_syserr_intr: device error - META2"));
-	} else if (estat.bits.ldw.fflp) {
-		/* FFLP */
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"==> nxge_syserr_intr: device error - FFLP"));
-		(void) nxge_fflp_handle_sys_errors(nxgep);
-	}
-	serviced = DDI_INTR_CLAIMED;
-
-	if (ldgp != NULL && ldvp != NULL && ldgp->nldvs == 1 &&
-		!ldvp->use_timer) {
-		(void) npi_intr_ldg_mgmt_set(handle, ldgp->ldg,
-			B_TRUE, ldgp->ldg_timer);
-	}
-	NXGE_DEBUG_MSG((nxgep, SYSERR_CTL, "<== nxge_syserr_intr"));
-	return (serviced);
-}
-
-/* ARGSUSED */
-void
-nxge_intr_hw_enable(p_nxge_t nxgep)
-{
-	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intr_hw_enable"));
-	(void) nxge_intr_mask_mgmt_set(nxgep, B_TRUE);
-	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intr_hw_enable"));
-}
-
-/* ARGSUSED */
-void
-nxge_intr_hw_disable(p_nxge_t nxgep)
-{
-	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intr_hw_disable"));
-	(void) nxge_intr_mask_mgmt_set(nxgep, B_FALSE);
-	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intr_hw_disable"));
-}
-
-/* ARGSUSED */
-void
-nxge_rx_hw_blank(void *arg, time_t ticks, uint_t count)
-{
-	p_nxge_t nxgep = (p_nxge_t)arg;
-	uint8_t channel;
-	npi_handle_t handle;
-	p_nxge_ldgv_t ldgvp;
-	p_nxge_ldv_t ldvp;
-	int i;
-
-	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_rx_hw_blank"));
-	handle = NXGE_DEV_NPI_HANDLE(nxgep);
-
-	if ((ldgvp = nxgep->ldgvp) == NULL) {
-		NXGE_ERROR_MSG((nxgep, INT_CTL,
-			"<== nxge_rx_hw_blank (not enabled)"));
-		return;
-	}
-	ldvp = nxgep->ldgvp->ldvp;
-	if (ldvp == NULL) {
-		return;
-	}
-	for (i = 0; i < ldgvp->nldvs; i++, ldvp++) {
-		if (ldvp->is_rxdma) {
-			channel = ldvp->channel;
-			(void) npi_rxdma_cfg_rdc_rcr_threshold(handle,
-				channel, count);
-			(void) npi_rxdma_cfg_rdc_rcr_timeout(handle,
-				channel, ticks);
-		}
-	}
-
-	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_rx_hw_blank"));
-}
-
-/* ARGSUSED */
-void
-nxge_hw_stop(p_nxge_t nxgep)
-{
-	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_hw_stop"));
-
-	(void) nxge_tx_mac_disable(nxgep);
-	(void) nxge_rx_mac_disable(nxgep);
-	(void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_STOP);
-	(void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_STOP);
-
-	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_hw_stop"));
-}
-
-/* ARGSUSED */
-void
-nxge_hw_ioctl(p_nxge_t nxgep, queue_t *wq, mblk_t *mp, struct iocblk *iocp)
-{
-	int cmd;
-
-	NXGE_DEBUG_MSG((nxgep, IOC_CTL, "==> nxge_hw_ioctl"));
-
-	if (nxgep == NULL) {
-		miocnak(wq, mp, 0, EINVAL);
-		return;
-	}
-	iocp->ioc_error = 0;
-	cmd = iocp->ioc_cmd;
-
-	switch (cmd) {
-	default:
-		miocnak(wq, mp, 0, EINVAL);
-		return;
-
-	case NXGE_GET_MII:
-		nxge_get_mii(nxgep, mp->b_cont);
-		miocack(wq, mp, sizeof (uint16_t), 0);
-		break;
-
-	case NXGE_PUT_MII:
-		nxge_put_mii(nxgep, mp->b_cont);
-		miocack(wq, mp, 0, 0);
-		break;
-
-	case NXGE_GET64:
-		nxge_get64(nxgep, mp->b_cont);
-		miocack(wq, mp, sizeof (uint32_t), 0);
-		break;
-
-	case NXGE_PUT64:
-		nxge_put64(nxgep, mp->b_cont);
-		miocack(wq, mp, 0, 0);
-		break;
-
-	case NXGE_PUT_TCAM:
-		nxge_put_tcam(nxgep, mp->b_cont);
-		miocack(wq, mp, 0, 0);
-		break;
-
-	case NXGE_GET_TCAM:
-		nxge_get_tcam(nxgep, mp->b_cont);
-		miocack(wq, mp, 0, 0);
-		break;
-
-	case NXGE_TX_REGS_DUMP:
-		nxge_txdma_regs_dump_channels(nxgep);
-		miocack(wq, mp, 0, 0);
-		break;
-	case NXGE_RX_REGS_DUMP:
-		nxge_rxdma_regs_dump_channels(nxgep);
-		miocack(wq, mp, 0, 0);
-		break;
-	case NXGE_VIR_INT_REGS_DUMP:
-	case NXGE_INT_REGS_DUMP:
-		nxge_virint_regs_dump(nxgep);
-		miocack(wq, mp, 0, 0);
-		break;
-	case NXGE_RTRACE:
-		nxge_rtrace_ioctl(nxgep, wq, mp, iocp);
-		break;
-	}
-}
-
-/* ARGSUSED */
-void
-nxge_loopback_ioctl(p_nxge_t nxgep, queue_t *wq, mblk_t *mp,
-	struct iocblk *iocp)
-{
-	p_lb_property_t lb_props;
-
-	size_t size;
-	int i;
-
-	if (mp->b_cont == NULL) {
-		miocnak(wq, mp, 0, EINVAL);
-	}
-	switch (iocp->ioc_cmd) {
-	case LB_GET_MODE:
-		NXGE_DEBUG_MSG((nxgep, IOC_CTL, "NXGE_GET_LB_MODE command"));
-		if (nxgep != NULL) {
-			*(lb_info_sz_t *)mp->b_cont->b_rptr =
-				nxgep->statsp->port_stats.lb_mode;
-			miocack(wq, mp, sizeof (nxge_lb_t), 0);
-		} else
-			miocnak(wq, mp, 0, EINVAL);
-		break;
-	case LB_SET_MODE:
-		NXGE_DEBUG_MSG((nxgep, IOC_CTL, "NXGE_SET_LB_MODE command"));
-		if (iocp->ioc_count != sizeof (uint32_t)) {
-			miocack(wq, mp, 0, 0);
-			break;
-		}
-		if ((nxgep != NULL) && nxge_set_lb(nxgep, wq, mp->b_cont)) {
-			miocack(wq, mp, 0, 0);
-		} else {
-			miocnak(wq, mp, 0, EPROTO);
-		}
-		break;
-	case LB_GET_INFO_SIZE:
-		NXGE_DEBUG_MSG((nxgep, IOC_CTL, "LB_GET_INFO_SIZE command"));
-		if (nxgep != NULL) {
-			size = sizeof (lb_normal);
-			if (nxgep->statsp->mac_stats.cap_10gfdx) {
-				size += sizeof (lb_external10g);
-				size += sizeof (lb_phy10g);
-				size += sizeof (lb_serdes10g);
-				size += sizeof (lb_mac10g);
-			}
-			if (nxgep->statsp->mac_stats.cap_1000fdx) {
-				size += sizeof (lb_external1000);
-				size += sizeof (lb_mac1000);
-				if (nxgep->mac.portmode == PORT_1G_COPPER)
-					size += sizeof (lb_phy1000);
-			}
-			if (nxgep->statsp->mac_stats.cap_100fdx)
-				size += sizeof (lb_external100);
-			if (nxgep->statsp->mac_stats.cap_10fdx)
-				size += sizeof (lb_external10);
-			else if (nxgep->mac.portmode == PORT_1G_FIBER)
-				size += sizeof (lb_serdes1000);
-			*(lb_info_sz_t *)mp->b_cont->b_rptr = size;
-
-			NXGE_DEBUG_MSG((nxgep, IOC_CTL,
-				"NXGE_GET_LB_INFO command: size %d", size));
-			miocack(wq, mp, sizeof (lb_info_sz_t), 0);
-		} else
-			miocnak(wq, mp, 0, EINVAL);
-		break;
-
-	case LB_GET_INFO:
-		NXGE_DEBUG_MSG((nxgep, IOC_CTL, "NXGE_GET_LB_INFO command"));
-		if (nxgep != NULL) {
-			size = sizeof (lb_normal);
-			if (nxgep->statsp->mac_stats.cap_10gfdx) {
-				size += sizeof (lb_external10g);
-				size += sizeof (lb_phy10g);
-				size += sizeof (lb_serdes10g);
-				size += sizeof (lb_mac10g);
-			}
-			if (nxgep->statsp->mac_stats.cap_1000fdx) {
-				size += sizeof (lb_external1000);
-				size += sizeof (lb_mac1000);
-				if (nxgep->mac.portmode == PORT_1G_COPPER)
-					size += sizeof (lb_phy1000);
-			}
-			if (nxgep->statsp->mac_stats.cap_100fdx)
-				size += sizeof (lb_external100);
-			if (nxgep->statsp->mac_stats.cap_10fdx)
-				size += sizeof (lb_external10);
-			else if (nxgep->mac.portmode == PORT_1G_FIBER)
-				size += sizeof (lb_serdes1000);
-
-			NXGE_DEBUG_MSG((nxgep, IOC_CTL,
-				"NXGE_GET_LB_INFO command: size %d", size));
-			if (size == iocp->ioc_count) {
-				i = 0;
-				lb_props = (p_lb_property_t)mp->b_cont->b_rptr;
-				lb_props[i++] = lb_normal;
-				if (nxgep->statsp->mac_stats.cap_10gfdx) {
-					lb_props[i++] = lb_mac10g;
-					lb_props[i++] = lb_serdes10g;
-					lb_props[i++] = lb_phy10g;
-					lb_props[i++] = lb_external10g;
-				}
-				if (nxgep->statsp->mac_stats.cap_1000fdx)
-					lb_props[i++] = lb_external1000;
-				if (nxgep->statsp->mac_stats.cap_100fdx)
-					lb_props[i++] = lb_external100;
-				if (nxgep->statsp->mac_stats.cap_10fdx)
-					lb_props[i++] = lb_external10;
-				if (nxgep->statsp->mac_stats.cap_1000fdx)
-					lb_props[i++] = lb_mac1000;
-				if (nxgep->mac.portmode == PORT_1G_COPPER) {
-					if (nxgep->statsp->mac_stats.
-						cap_1000fdx)
-						lb_props[i++] = lb_phy1000;
-				} else if (nxgep->mac.portmode ==
-					PORT_1G_FIBER)
-					lb_props[i++] = lb_serdes1000;
-				miocack(wq, mp, size, 0);
-			} else
-				miocnak(wq, mp, 0, EINVAL);
-		} else {
-			miocnak(wq, mp, 0, EINVAL);
-			cmn_err(CE_NOTE, "!nxge_hw_ioctl: invalid command 0x%x",
-				iocp->ioc_cmd);
-		}
-		break;
-	}
-}
-
-/*
- * DMA channel interfaces to access various channel specific
- * hardware functions.
- */
-/* ARGSUSED */
-void
-nxge_rxdma_channel_put64(nxge_os_acc_handle_t handle, void *reg_addrp,
-	uint32_t reg_base, uint16_t channel, uint64_t reg_data)
-{
-	uint64_t reg_offset;
-
-	NXGE_DEBUG_MSG((NULL, DMA_CTL, "<== nxge_rxdma_channel_put64"));
-
-	/*
-	 * Channel is assumed to be from 0 to the maximum DMA channel #. If we
-	 * use the virtual DMA CSR address space from the config space (in PCI
-	 * case), then the following code need to be use different offset
-	 * computation macro.
-	 */
-	reg_offset = reg_base + DMC_OFFSET(channel);
-	NXGE_PIO_WRITE64(handle, reg_addrp, reg_offset, reg_data);
-
-	NXGE_DEBUG_MSG((NULL, DMA_CTL, "<== nxge_rxdma_channel_put64"));
-}
-
-/* ARGSUSED */
-uint64_t
-nxge_rxdma_channel_get64(nxge_os_acc_handle_t handle, void *reg_addrp,
-	uint32_t reg_base, uint16_t channel)
-{
-	uint64_t reg_offset;
-
-	NXGE_DEBUG_MSG((NULL, DMA_CTL, "<== nxge_rxdma_channel_get64"));
-
-	/*
-	 * Channel is assumed to be from 0 to the maximum DMA channel #. If we
-	 * use the virtual DMA CSR address space from the config space (in PCI
-	 * case), then the following code need to be use different offset
-	 * computation macro.
-	 */
-	reg_offset = reg_base + DMC_OFFSET(channel);
-
-	NXGE_DEBUG_MSG((NULL, DMA_CTL, "<== nxge_rxdma_channel_get64"));
-
-	return (NXGE_PIO_READ64(handle, reg_addrp, reg_offset));
-}
-
-/* ARGSUSED */
-void
-nxge_get32(p_nxge_t nxgep, p_mblk_t mp)
-{
-	nxge_os_acc_handle_t nxge_regh;
-
-	NXGE_DEBUG_MSG((nxgep, IOC_CTL, "nxge_get32"));
-	nxge_regh = nxgep->dev_regs->nxge_regh;
-
-	*(uint32_t *)mp->b_rptr = NXGE_PIO_READ32(nxge_regh,
-		nxgep->dev_regs->nxge_regp, *(uint32_t *)mp->b_rptr);
-
-	NXGE_DEBUG_MSG((nxgep, IOC_CTL, "value = 0x%08X",
-		*(uint32_t *)mp->b_rptr));
-	NXGE_DEBUG_MSG((nxgep, IOC_CTL, "nxge_get32"));
-}
-
-/* ARGSUSED */
-void
-nxge_put32(p_nxge_t nxgep, p_mblk_t mp)
-{
-	nxge_os_acc_handle_t nxge_regh;
-	uint32_t *buf;
-	uint8_t *reg;
-
-	NXGE_DEBUG_MSG((nxgep, IOC_CTL, "nxge_put32"));
-	nxge_regh = nxgep->dev_regs->nxge_regh;
-
-	buf = (uint32_t *)mp->b_rptr;
-	reg = (uint8_t *)(nxgep->dev_regs->nxge_regp) + buf[0];
-	NXGE_DEBUG_MSG((nxgep, IOC_CTL,
-		"reg = 0x%016llX index = 0x%08X value = 0x%08X",
-		reg, buf[0], buf[1]));
-	NXGE_PIO_WRITE32(nxge_regh, (uint32_t *)reg, 0, buf[1]);
-	NXGE_DEBUG_MSG((nxgep, IOC_CTL, "nxge_put32"));
-}
-
-/*ARGSUSED*/
-boolean_t
-nxge_set_lb(p_nxge_t nxgep, queue_t *wq, p_mblk_t mp)
-{
-	boolean_t status = B_TRUE;
-	uint32_t lb_mode;
-	lb_property_t *lb_info;
-
-	NXGE_DEBUG_MSG((nxgep, IOC_CTL, "<== nxge_set_lb"));
-	lb_mode = nxgep->statsp->port_stats.lb_mode;
-	if (lb_mode == *(uint32_t *)mp->b_rptr) {
-		cmn_err(CE_NOTE,
-			"!nxge%d: Loopback mode already set (lb_mode %d).\n",
-			nxgep->instance, lb_mode);
-		status = B_FALSE;
-		goto nxge_set_lb_exit;
-	}
-	lb_mode = *(uint32_t *)mp->b_rptr;
-	lb_info = NULL;
-	if (lb_mode == lb_normal.value)
-		lb_info = &lb_normal;
-	else if ((lb_mode == lb_external10g.value) &&
-		(nxgep->statsp->mac_stats.cap_10gfdx))
-		lb_info = &lb_external10g;
-	else if ((lb_mode == lb_external1000.value) &&
-		(nxgep->statsp->mac_stats.cap_1000fdx))
-		lb_info = &lb_external1000;
-	else if ((lb_mode == lb_external100.value) &&
-		(nxgep->statsp->mac_stats.cap_100fdx))
-		lb_info = &lb_external100;
-	else if ((lb_mode == lb_external10.value) &&
-		(nxgep->statsp->mac_stats.cap_10fdx))
-		lb_info = &lb_external10;
-	else if ((lb_mode == lb_phy10g.value) &&
-			((nxgep->mac.portmode == PORT_10G_COPPER) ||
-			(nxgep->mac.portmode == PORT_10G_FIBER)))
-		lb_info = &lb_phy10g;
-	else if ((lb_mode == lb_phy1000.value) &&
-		(nxgep->mac.portmode == PORT_1G_COPPER))
-		lb_info = &lb_phy1000;
-	else if ((lb_mode == lb_phy.value) &&
-		(nxgep->mac.portmode == PORT_1G_COPPER))
-		lb_info = &lb_phy;
-	else if ((lb_mode == lb_serdes10g.value) &&
-			(nxgep->mac.portmode == PORT_10G_FIBER) ||
-		(nxgep->mac.portmode == PORT_10G_COPPER))
-		lb_info = &lb_serdes10g;
-	else if ((lb_mode == lb_serdes1000.value) &&
-		(nxgep->mac.portmode == PORT_1G_FIBER))
-		lb_info = &lb_serdes1000;
-	else if (lb_mode == lb_mac10g.value)
-		lb_info = &lb_mac10g;
-	else if (lb_mode == lb_mac1000.value)
-		lb_info = &lb_mac1000;
-	else if (lb_mode == lb_mac.value)
-		lb_info = &lb_mac;
-	else {
-		cmn_err(CE_NOTE,
-			"!nxge%d: Loopback mode not supported(mode %d).\n",
-			nxgep->instance, lb_mode);
-		status = B_FALSE;
-		goto nxge_set_lb_exit;
-	}
-
-	if (lb_mode == nxge_lb_normal) {
-		if (nxge_lb_dbg) {
-			cmn_err(CE_NOTE,
-				"!nxge%d: Returning to normal operation",
-				nxgep->instance);
-		}
-		nxge_set_lb_normal(nxgep);
-		goto nxge_set_lb_exit;
-	}
-	nxgep->statsp->port_stats.lb_mode = lb_mode;
-
-	if (nxge_lb_dbg)
-		cmn_err(CE_NOTE,
-			"!nxge%d: Adapter now in %s loopback mode",
-			nxgep->instance, lb_info->key);
-	nxgep->param_arr[param_autoneg].value = 0;
-	nxgep->param_arr[param_anar_10gfdx].value =
-		(nxgep->statsp->port_stats.lb_mode == nxge_lb_ext10g) ||
-		(nxgep->statsp->port_stats.lb_mode == nxge_lb_mac10g) ||
-		(nxgep->statsp->port_stats.lb_mode == nxge_lb_phy10g) ||
-		(nxgep->statsp->port_stats.lb_mode == nxge_lb_serdes10g);
-	nxgep->param_arr[param_anar_10ghdx].value = 0;
-	nxgep->param_arr[param_anar_1000fdx].value =
-		(nxgep->statsp->port_stats.lb_mode == nxge_lb_ext1000) ||
-		(nxgep->statsp->port_stats.lb_mode == nxge_lb_mac1000) ||
-		(nxgep->statsp->port_stats.lb_mode == nxge_lb_phy1000) ||
-		(nxgep->statsp->port_stats.lb_mode == nxge_lb_serdes1000);
-	nxgep->param_arr[param_anar_1000hdx].value = 0;
-	nxgep->param_arr[param_anar_100fdx].value =
-		(nxgep->statsp->port_stats.lb_mode == nxge_lb_phy) ||
-		(nxgep->statsp->port_stats.lb_mode == nxge_lb_mac) ||
-		(nxgep->statsp->port_stats.lb_mode == nxge_lb_ext100);
-	nxgep->param_arr[param_anar_100hdx].value = 0;
-	nxgep->param_arr[param_anar_10fdx].value =
-		(nxgep->statsp->port_stats.lb_mode == nxge_lb_mac) ||
-		(nxgep->statsp->port_stats.lb_mode == nxge_lb_ext10);
-	if (nxgep->statsp->port_stats.lb_mode == nxge_lb_ext1000) {
-		nxgep->param_arr[param_master_cfg_enable].value = 1;
-		nxgep->param_arr[param_master_cfg_value].value = 1;
-	}
-	if ((nxgep->statsp->port_stats.lb_mode == nxge_lb_ext10g) ||
-		(nxgep->statsp->port_stats.lb_mode == nxge_lb_ext1000) ||
-		(nxgep->statsp->port_stats.lb_mode == nxge_lb_ext100) ||
-		(nxgep->statsp->port_stats.lb_mode == nxge_lb_ext10) ||
-		(nxgep->statsp->port_stats.lb_mode == nxge_lb_phy10g) ||
-		(nxgep->statsp->port_stats.lb_mode == nxge_lb_phy1000) ||
-		(nxgep->statsp->port_stats.lb_mode == nxge_lb_phy)) {
-
-		(void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP);
-		(void) nxge_xcvr_find(nxgep);
-		(void) nxge_link_init(nxgep);
-		(void) nxge_link_monitor(nxgep, LINK_MONITOR_START);
-	}
-	if (lb_info->lb_type == internal) {
-		if ((nxgep->statsp->port_stats.lb_mode == nxge_lb_mac10g) ||
-				(nxgep->statsp->port_stats.lb_mode ==
-				nxge_lb_phy10g) ||
-				(nxgep->statsp->port_stats.lb_mode ==
-				nxge_lb_serdes10g)) {
-			nxgep->statsp->mac_stats.link_speed = 10000;
-		} else if ((nxgep->statsp->port_stats.lb_mode
-				== nxge_lb_mac1000) ||
-				(nxgep->statsp->port_stats.lb_mode ==
-				nxge_lb_phy1000) ||
-				(nxgep->statsp->port_stats.lb_mode ==
-				nxge_lb_serdes1000)) {
-			nxgep->statsp->mac_stats.link_speed = 1000;
-		} else {
-			nxgep->statsp->mac_stats.link_speed = 100;
-		}
-		nxgep->statsp->mac_stats.link_duplex = 2;
-		nxgep->statsp->mac_stats.link_up = 1;
-	}
-	nxge_global_reset(nxgep);
-
-nxge_set_lb_exit:
-	NXGE_DEBUG_MSG((nxgep, DDI_CTL,
-		"<== nxge_set_lb status = 0x%08x", status));
-	return (status);
-}
-
-/* ARGSUSED */
-void
-nxge_set_lb_normal(p_nxge_t nxgep)
-{
-	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_set_lb_normal"));
-	nxgep->statsp->port_stats.lb_mode = nxge_lb_normal;
-	nxgep->param_arr[param_autoneg].value =
-		nxgep->param_arr[param_autoneg].old_value;
-	nxgep->param_arr[param_anar_1000fdx].value =
-		nxgep->param_arr[param_anar_1000fdx].old_value;
-	nxgep->param_arr[param_anar_1000hdx].value =
-		nxgep->param_arr[param_anar_1000hdx].old_value;
-	nxgep->param_arr[param_anar_100fdx].value =
-		nxgep->param_arr[param_anar_100fdx].old_value;
-	nxgep->param_arr[param_anar_100hdx].value =
-		nxgep->param_arr[param_anar_100hdx].old_value;
-	nxgep->param_arr[param_anar_10fdx].value =
-		nxgep->param_arr[param_anar_10fdx].old_value;
-	nxgep->param_arr[param_master_cfg_enable].value =
-		nxgep->param_arr[param_master_cfg_enable].old_value;
-	nxgep->param_arr[param_master_cfg_value].value =
-		nxgep->param_arr[param_master_cfg_value].old_value;
-
-	nxge_global_reset(nxgep);
-
-	(void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP);
-	(void) nxge_xcvr_find(nxgep);
-	(void) nxge_link_init(nxgep);
-	(void) nxge_link_monitor(nxgep, LINK_MONITOR_START);
-
-	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_set_lb_normal"));
-}
-
-/* ARGSUSED */
-void
-nxge_get_mii(p_nxge_t nxgep, p_mblk_t mp)
-{
-	uint16_t reg;
-
-	NXGE_DEBUG_MSG((nxgep, IOC_CTL, "==> nxge_get_mii"));
-
-	reg = *(uint16_t *)mp->b_rptr;
-	(void) nxge_mii_read(nxgep, nxgep->statsp->mac_stats.xcvr_portn, reg,
-		(uint16_t *)mp->b_rptr);
-	NXGE_DEBUG_MSG((nxgep, IOC_CTL, "reg = 0x%08X value = 0x%04X",
-		reg, *(uint16_t *)mp->b_rptr));
-	NXGE_DEBUG_MSG((nxgep, IOC_CTL, "<== nxge_get_mii"));
-}
-
-/* ARGSUSED */
-void
-nxge_put_mii(p_nxge_t nxgep, p_mblk_t mp)
-{
-	uint16_t *buf;
-	uint8_t reg;
-
-	NXGE_DEBUG_MSG((nxgep, IOC_CTL, "==> nxge_put_mii"));
-	buf = (uint16_t *)mp->b_rptr;
-	reg = (uint8_t)buf[0];
-	NXGE_DEBUG_MSG((nxgep, IOC_CTL,
-		"reg = 0x%08X index = 0x%08X value = 0x%08X",
-		reg, buf[0], buf[1]));
-	(void) nxge_mii_write(nxgep, nxgep->statsp->mac_stats.xcvr_portn,
-		reg, buf[1]);
-	NXGE_DEBUG_MSG((nxgep, IOC_CTL, "<== nxge_put_mii"));
-}
-
-/* ARGSUSED */
-void
-nxge_check_hw_state(p_nxge_t nxgep)
-{
-	p_nxge_ldgv_t ldgvp;
-	p_nxge_ldv_t t_ldvp;
-
-	NXGE_DEBUG_MSG((nxgep, SYSERR_CTL, "==> nxge_check_hw_state"));
-
-	MUTEX_ENTER(nxgep->genlock);
-	nxgep->nxge_timerid = 0;
-	if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
-		goto nxge_check_hw_state_exit;
-	}
-	nxge_check_tx_hang(nxgep);
-
-	ldgvp = nxgep->ldgvp;
-	if (ldgvp == NULL || (ldgvp->ldvp_syserr == NULL)) {
-		NXGE_ERROR_MSG((nxgep, SYSERR_CTL, "<== nxge_check_hw_state: "
-				"NULL ldgvp (interrupt not ready)."));
-		goto nxge_check_hw_state_exit;
-	}
-	t_ldvp = ldgvp->ldvp_syserr;
-	if (!t_ldvp->use_timer) {
-		NXGE_DEBUG_MSG((nxgep, SYSERR_CTL, "<== nxge_check_hw_state: "
-				"ldgvp $%p t_ldvp $%p use_timer flag %d",
-				ldgvp, t_ldvp, t_ldvp->use_timer));
-		goto nxge_check_hw_state_exit;
-	}
-	if (fm_check_acc_handle(nxgep->dev_regs->nxge_regh) != DDI_FM_OK) {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"port%d Bad register acc handle", nxgep->mac.portnum));
-	}
-	(void) nxge_syserr_intr((void *) t_ldvp, (void *) nxgep);
-
-	nxgep->nxge_timerid = nxge_start_timer(nxgep, nxge_check_hw_state,
-		NXGE_CHECK_TIMER);
-
-nxge_check_hw_state_exit:
-	MUTEX_EXIT(nxgep->genlock);
-	NXGE_DEBUG_MSG((nxgep, SYSERR_CTL, "<== nxge_check_hw_state"));
-}
-
-/*ARGSUSED*/
-static void
-nxge_rtrace_ioctl(p_nxge_t nxgep, queue_t *wq, mblk_t *mp,
-	struct iocblk *iocp)
-{
-	ssize_t size;
-	rtrace_t *rtp;
-	mblk_t *nmp;
-	uint32_t i, j;
-	uint32_t start_blk;
-	uint32_t base_entry;
-	uint32_t num_entries;
-
-	NXGE_DEBUG_MSG((nxgep, STR_CTL, "==> nxge_rtrace_ioctl"));
-
-	size = 1024;
-	if (mp->b_cont == NULL || MBLKL(mp->b_cont) < size) {
-		NXGE_DEBUG_MSG((nxgep, STR_CTL,
-				"malformed M_IOCTL MBLKL = %d size = %d",
-				MBLKL(mp->b_cont), size));
-		miocnak(wq, mp, 0, EINVAL);
-		return;
-	}
-	nmp = mp->b_cont;
-	rtp = (rtrace_t *)nmp->b_rptr;
-	start_blk = rtp->next_idx;
-	num_entries = rtp->last_idx;
-	base_entry = start_blk * MAX_RTRACE_IOC_ENTRIES;
-
-	NXGE_DEBUG_MSG((nxgep, STR_CTL, "start_blk = %d\n", start_blk));
-	NXGE_DEBUG_MSG((nxgep, STR_CTL, "num_entries = %d\n", num_entries));
-	NXGE_DEBUG_MSG((nxgep, STR_CTL, "base_entry = %d\n", base_entry));
-
-	rtp->next_idx = npi_rtracebuf.next_idx;
-	rtp->last_idx = npi_rtracebuf.last_idx;
-	rtp->wrapped = npi_rtracebuf.wrapped;
-	for (i = 0, j = base_entry; i < num_entries; i++, j++) {
-		rtp->buf[i].ctl_addr = npi_rtracebuf.buf[j].ctl_addr;
-		rtp->buf[i].val_l32 = npi_rtracebuf.buf[j].val_l32;
-		rtp->buf[i].val_h32 = npi_rtracebuf.buf[j].val_h32;
-	}
-
-	nmp->b_wptr = nmp->b_rptr + size;
-	NXGE_DEBUG_MSG((nxgep, STR_CTL, "<== nxge_rtrace_ioctl"));
-	miocack(wq, mp, (int)size, 0);
-}
--- a/usr/src/uts/sun4v/io/nxge/nxge_ipp.c	Mon Mar 19 18:02:35 2007 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,675 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License (the "License").
- * You may not use this file except in compliance with the License.
- *
- * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
- * or http://www.opensolaris.org/os/licensing.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information: Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- */
-/*
- * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
- * Use is subject to license terms.
- */
-
-#pragma ident	"%Z%%M%	%I%	%E% SMI"
-
-#include <nxge_impl.h>
-#include <nxge_ipp.h>
-
-#define	NXGE_IPP_FIFO_SYNC_TRY_COUNT 100
-
-/* ARGSUSED */
-nxge_status_t
-nxge_ipp_init(p_nxge_t nxgep)
-{
-	uint8_t portn;
-	uint32_t config;
-	npi_handle_t handle;
-	uint32_t pkt_size;
-	ipp_status_t istatus;
-	npi_status_t rs = NPI_SUCCESS;
-	uint64_t val;
-	uint32_t d0, d1, d2, d3, d4;
-	int i;
-	uint32_t dfifo_entries;
-
-	handle = nxgep->npi_handle;
-	portn = NXGE_GET_PORT_NUM(nxgep->function_num);
-
-	NXGE_DEBUG_MSG((nxgep, IPP_CTL, "==> nxge_ipp_init: port%d", portn));
-
-	/* Initialize ECC and parity in SRAM of DFIFO and PFIFO */
-	if ((nxgep->niu_type == NEPTUNE) || (nxgep->niu_type == NEPTUNE_2)) {
-		if (portn < 2)
-			dfifo_entries = IPP_P0_P1_DFIFO_ENTRIES;
-		else
-			dfifo_entries = IPP_P2_P3_DFIFO_ENTRIES;
-	} else if (nxgep->niu_type == N2_NIU) {
-		dfifo_entries = IPP_NIU_DFIFO_ENTRIES;
-	} else
-		goto fail;
-
-	for (i = 0; i < dfifo_entries; i++) {
-		if ((rs = npi_ipp_write_dfifo(handle,
-				portn, i, 0, 0, 0, 0, 0)) != NPI_SUCCESS)
-			goto fail;
-		if ((rs = npi_ipp_read_dfifo(handle, portn,
-				i, &d0, &d1, &d2, &d3, &d4)) != NPI_SUCCESS)
-			goto fail;
-	}
-
-	/* Clear PFIFO DFIFO status bits */
-	if ((rs = npi_ipp_get_status(handle, portn, &istatus)) != NPI_SUCCESS)
-		goto fail;
-	if ((rs = npi_ipp_get_status(handle, portn, &istatus)) != NPI_SUCCESS)
-		goto fail;
-
-	/*
-	 * Soft reset to make sure we bring the FIFO pointers back to the
-	 * original initial position.
-	 */
-	if ((rs = npi_ipp_reset(handle, portn)) != NPI_SUCCESS)
-		goto fail;
-
-	/* Clean up ECC counter */
-	IPP_REG_RD(nxgep->npi_handle, portn, IPP_ECC_ERR_COUNTER_REG, &val);
-	IPP_REG_RD(nxgep->npi_handle, portn, IPP_TCP_CKSUM_ERR_CNT_REG, &val);
-	IPP_REG_RD(nxgep->npi_handle, portn, IPP_DISCARD_PKT_CNT_REG, &val);
-
-	if ((rs = npi_ipp_get_status(handle, portn, &istatus)) != NPI_SUCCESS)
-		goto fail;
-
-	/* Configure IPP port */
-	if ((rs = npi_ipp_iconfig(handle, INIT, portn, ICFG_IPP_ALL))
-			!= NPI_SUCCESS)
-		goto fail;
-	nxgep->ipp.iconfig = ICFG_IPP_ALL;
-
-	config = CFG_IPP | CFG_IPP_DFIFO_ECC_CORRECT | CFG_IPP_DROP_BAD_CRC |
-		CFG_IPP_TCP_UDP_CKSUM;
-	if ((rs = npi_ipp_config(handle, INIT, portn, config)) != NPI_SUCCESS)
-		goto fail;
-	nxgep->ipp.config = config;
-
-	/* Set max packet size */
-	pkt_size = IPP_MAX_PKT_SIZE;
-	if ((rs = npi_ipp_set_max_pktsize(handle, portn,
-			IPP_MAX_PKT_SIZE)) != NPI_SUCCESS)
-		goto fail;
-	nxgep->ipp.max_pkt_size = pkt_size;
-
-	NXGE_DEBUG_MSG((nxgep, IPP_CTL, "<== nxge_ipp_init: port%d", portn));
-
-	return (NXGE_OK);
-fail:
-	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"nxge_ipp_init: Fail to initialize IPP Port #%d\n",
-			portn));
-	return (NXGE_ERROR | rs);
-}
-
-/* ARGSUSED */
-nxge_status_t
-nxge_ipp_disable(p_nxge_t nxgep)
-{
-	uint8_t portn;
-	uint32_t config;
-	npi_handle_t handle;
-	npi_status_t rs = NPI_SUCCESS;
-	uint16_t wr_ptr, rd_ptr;
-	uint32_t try_count;
-
-	handle = nxgep->npi_handle;
-	portn = NXGE_GET_PORT_NUM(nxgep->function_num);
-
-	NXGE_DEBUG_MSG((nxgep, IPP_CTL, "==> nxge_ipp_disable: port%d", portn));
-	(void) nxge_rx_mac_disable(nxgep);
-
-	/*
-	 * Wait until ip read and write fifo pointers are equal
-	 */
-	(void) npi_ipp_get_dfifo_rd_ptr(handle, portn, &rd_ptr);
-	(void) npi_ipp_get_dfifo_wr_ptr(handle, portn, &wr_ptr);
-	try_count = NXGE_IPP_FIFO_SYNC_TRY_COUNT;
-
-	while ((try_count > 0) && (rd_ptr != wr_ptr)) {
-		(void) npi_ipp_get_dfifo_rd_ptr(handle, portn, &rd_ptr);
-		(void) npi_ipp_get_dfifo_wr_ptr(handle, portn, &wr_ptr);
-		try_count--;
-	}
-
-	if (try_count == 0) {
-		if ((rd_ptr != 0) && (wr_ptr != 1)) {
-			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-				" nxge_ipp_disable: port%d failed"
-				" rd_fifo != wr_fifo", portn));
-			goto fail;
-		}
-	}
-	/* disable the IPP */
-	config = nxgep->ipp.config;
-	if ((rs = npi_ipp_config(handle, DISABLE,
-			portn, config)) != NPI_SUCCESS)
-		goto fail;
-
-	/* IPP soft reset */
-	if ((rs = npi_ipp_reset(handle, portn)) != NPI_SUCCESS)
-		goto fail;
-
-	NXGE_DEBUG_MSG((nxgep, IPP_CTL, "<== nxge_ipp_disable: port%d", portn));
-	return (NXGE_OK);
-fail:
-	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-		"nxge_ipp_disable: Fail to disable IPP Port #%d\n", portn));
-	return (NXGE_ERROR | rs);
-}
-
-/* ARGSUSED */
-nxge_status_t
-nxge_ipp_reset(p_nxge_t nxgep)
-{
-	uint8_t portn;
-	uint32_t config;
-	npi_handle_t handle;
-	npi_status_t rs = NPI_SUCCESS;
-	uint16_t wr_ptr, rd_ptr;
-	uint32_t try_count;
-
-	handle = nxgep->npi_handle;
-	portn = NXGE_GET_PORT_NUM(nxgep->function_num);
-
-	NXGE_DEBUG_MSG((nxgep, IPP_CTL, "==> nxge_ipp_reset: port%d", portn));
-
-	/* disable the IPP */
-	config = nxgep->ipp.config;
-	if ((rs = npi_ipp_config(handle, DISABLE,
-			portn, config)) != NPI_SUCCESS)
-		goto fail;
-
-	/*
-	 * Wait until ip read and write fifo pointers are equal
-	 */
-	(void) npi_ipp_get_dfifo_rd_ptr(handle, portn, &rd_ptr);
-	(void) npi_ipp_get_dfifo_wr_ptr(handle, portn, &wr_ptr);
-	try_count = NXGE_IPP_FIFO_SYNC_TRY_COUNT;
-
-	while ((try_count > 0) && (rd_ptr != wr_ptr)) {
-		(void) npi_ipp_get_dfifo_rd_ptr(handle, portn, &rd_ptr);
-		(void) npi_ipp_get_dfifo_wr_ptr(handle, portn, &wr_ptr);
-		try_count--;
-	}
-
-	if (try_count == 0) {
-		if ((rd_ptr != 0) && (wr_ptr != 1)) {
-			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-				" nxge_ipp_disable: port%d failed"
-				" rd_fifo != wr_fifo", portn));
-			goto fail;
-		}
-	}
-
-	/* IPP soft reset */
-	if ((rs = npi_ipp_reset(handle, portn)) != NPI_SUCCESS) {
-		goto fail;
-	}
-
-	/* to reset control FIFO */
-	if ((rs = npi_zcp_rest_cfifo_port(handle, portn)) != NPI_SUCCESS)
-		goto fail;
-
-	/*
-	 * Making sure that error source is cleared if this is an injected
-	 * error.
-	 */
-	IPP_REG_WR(handle, portn, IPP_ECC_CTRL_REG, 0);
-
-	NXGE_DEBUG_MSG((nxgep, IPP_CTL, "<== nxge_ipp_reset: port%d", portn));
-	return (NXGE_OK);
-fail:
-	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"nxge_ipp_init: Fail to Reset IPP Port #%d\n",
-			portn));
-	return (NXGE_ERROR | rs);
-}
-
-/* ARGSUSED */
-nxge_status_t
-nxge_ipp_enable(p_nxge_t nxgep)
-{
-	uint8_t portn;
-	uint32_t config;
-	npi_handle_t handle;
-	uint32_t pkt_size;
-	npi_status_t rs = NPI_SUCCESS;
-
-	handle = nxgep->npi_handle;
-	portn = NXGE_GET_PORT_NUM(nxgep->function_num);
-
-	NXGE_DEBUG_MSG((nxgep, IPP_CTL, "==> nxge_ipp_enable: port%d", portn));
-
-	config = CFG_IPP | CFG_IPP_DFIFO_ECC_CORRECT | CFG_IPP_DROP_BAD_CRC |
-		CFG_IPP_TCP_UDP_CKSUM;
-	if ((rs = npi_ipp_config(handle, INIT, portn, config)) != NPI_SUCCESS)
-		goto fail;
-	nxgep->ipp.config = config;
-
-	/* Set max packet size */
-	pkt_size = IPP_MAX_PKT_SIZE;
-	if ((rs = npi_ipp_set_max_pktsize(handle, portn,
-			IPP_MAX_PKT_SIZE)) != NPI_SUCCESS)
-		goto fail;
-	nxgep->ipp.max_pkt_size = pkt_size;
-
-	NXGE_DEBUG_MSG((nxgep, IPP_CTL, "<== nxge_ipp_enable: port%d", portn));
-	return (NXGE_OK);
-fail:
-	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-		"nxge_ipp_init: Fail to Enable IPP Port #%d\n", portn));
-	return (NXGE_ERROR | rs);
-}
-
-/* ARGSUSED */
-nxge_status_t
-nxge_ipp_handle_sys_errors(p_nxge_t nxgep)
-{
-	npi_handle_t handle;
-	npi_status_t rs = NPI_SUCCESS;
-	p_nxge_ipp_stats_t statsp;
-	ipp_status_t istatus;
-	uint8_t portn;
-	p_ipp_errlog_t errlogp;
-	boolean_t rxport_fatal = B_FALSE;
-	nxge_status_t status = NXGE_OK;
-
-	handle = nxgep->npi_handle;
-	statsp = (p_nxge_ipp_stats_t)&nxgep->statsp->ipp_stats;
-	portn = nxgep->mac.portnum;
-
-	errlogp = (p_ipp_errlog_t)&statsp->errlog;
-
-	if ((rs = npi_ipp_get_status(handle, portn, &istatus)) != NPI_SUCCESS)
-		return (NXGE_ERROR | rs);
-
-	if (istatus.value == 0) {
-		/*
-		 * The error is not initiated from this port, so just exit.
-		 */
-		return (NXGE_OK);
-	}
-
-	if (istatus.bits.w0.dfifo_missed_sop) {
-		statsp->sop_miss++;
-		if ((rs = npi_ipp_get_dfifo_eopm_rdptr(handle, portn,
-					&errlogp->dfifo_rd_ptr)) != NPI_SUCCESS)
-			return (NXGE_ERROR | rs);
-		if ((rs = npi_ipp_get_state_mach(handle, portn,
-				&errlogp->state_mach)) != NPI_SUCCESS)
-			return (NXGE_ERROR | rs);
-		NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
-			NXGE_FM_EREPORT_IPP_SOP_MISS);
-		if (statsp->sop_miss < IPP_MAX_ERR_SHOW)
-			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-				"nxge_ipp_err_evnts: fatal error: sop_miss\n"));
-		rxport_fatal = B_TRUE;
-	}
-	if (istatus.bits.w0.dfifo_missed_eop) {
-		statsp->eop_miss++;
-		if ((rs = npi_ipp_get_dfifo_eopm_rdptr(handle, portn,
-				&errlogp->dfifo_rd_ptr)) != NPI_SUCCESS)
-			return (NXGE_ERROR | rs);
-		if ((rs = npi_ipp_get_state_mach(handle, portn,
-				&errlogp->state_mach)) != NPI_SUCCESS)
-			return (NXGE_ERROR | rs);
-		NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
-			NXGE_FM_EREPORT_IPP_EOP_MISS);
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"nxge_ipp_err_evnts: fatal error: eop_miss\n"));
-		rxport_fatal = B_TRUE;
-	}
-	if (istatus.bits.w0.dfifo_uncorr_ecc_err) {
-		boolean_t ue_ecc_valid;
-
-		if ((status = nxge_ipp_eccue_valid_check(nxgep,
-				&ue_ecc_valid)) != NXGE_OK)
-			return (status);
-
-		if (ue_ecc_valid) {
-			statsp->dfifo_ue++;
-			if ((rs = npi_ipp_get_ecc_syndrome(handle, portn,
-					&errlogp->ecc_syndrome)) != NPI_SUCCESS)
-				return (NXGE_ERROR | rs);
-			NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
-				NXGE_FM_EREPORT_IPP_DFIFO_UE);
-			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-				"nxge_ipp_err_evnts: fatal error: dfifo_ue\n"));
-			rxport_fatal = B_TRUE;
-		}
-	}
-	if (istatus.bits.w0.pre_fifo_perr) {
-		statsp->pfifo_perr++;
-		NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
-			NXGE_FM_EREPORT_IPP_PFIFO_PERR);
-		if (statsp->pfifo_perr < IPP_MAX_ERR_SHOW)
-			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-				"nxge_ipp_err_evnts: "
-				"fatal error: pre_pifo_perr\n"));
-		rxport_fatal = B_TRUE;
-	}
-	if (istatus.bits.w0.pre_fifo_overrun) {
-		statsp->pfifo_over++;
-		NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
-			NXGE_FM_EREPORT_IPP_PFIFO_OVER);
-		if (statsp->pfifo_over < IPP_MAX_ERR_SHOW)
-			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-				"nxge_ipp_err_evnts: "
-				"fatal error: pfifo_over\n"));
-		rxport_fatal = B_TRUE;
-	}
-	if (istatus.bits.w0.pre_fifo_underrun) {
-		statsp->pfifo_und++;
-		NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
-			NXGE_FM_EREPORT_IPP_PFIFO_UND);
-		if (statsp->pfifo_und < IPP_MAX_ERR_SHOW)
-			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-				"nxge_ipp_err_evnts: "
-				"fatal error: pfifo_und\n"));
-		rxport_fatal = B_TRUE;
-	}
-	if (istatus.bits.w0.bad_cksum_cnt_ovfl) {
-		statsp->bad_cs_cnt += IPP_BAD_CS_CNT_MASK;
-		NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
-			NXGE_FM_EREPORT_IPP_BAD_CS_MX);
-		if (statsp->bad_cs_cnt < (IPP_MAX_ERR_SHOW *
-				IPP_BAD_CS_CNT_MASK))
-			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-				"nxge_ipp_err_evnts: bad_cs_max\n"));
-	}
-	if (istatus.bits.w0.pkt_discard_cnt_ovfl) {
-		statsp->pkt_dis_cnt += IPP_PKT_DIS_CNT_MASK;
-		NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
-			NXGE_FM_EREPORT_IPP_PKT_DIS_MX);
-		if (statsp->pkt_dis_cnt < (IPP_MAX_ERR_SHOW *
-				IPP_PKT_DIS_CNT_MASK))
-			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-				"nxge_ipp_err_evnts: pkt_dis_max\n"));
-	}
-
-	/*
-	 * Making sure that error source is cleared if this is an injected
-	 * error.
-	 */
-	IPP_REG_WR(handle, portn, IPP_ECC_CTRL_REG, 0);
-
-	if (rxport_fatal) {
-		NXGE_DEBUG_MSG((nxgep, IPP_CTL,
-			" nxge_ipp_handle_sys_errors:"
-			" fatal Error on  Port #%d\n", portn));
-		status = nxge_ipp_fatal_err_recover(nxgep);
-		if (status == NXGE_OK) {
-			FM_SERVICE_RESTORED(nxgep);
-		}
-	}
-	return (status);
-}
-
-/* ARGSUSED */
-void
-nxge_ipp_inject_err(p_nxge_t nxgep, uint32_t err_id)
-{
-	ipp_status_t ipps;
-	ipp_ecc_ctrl_t ecc_ctrl;
-	uint8_t portn = nxgep->mac.portnum;
-
-	switch (err_id) {
-	case NXGE_FM_EREPORT_IPP_DFIFO_UE:
-		ecc_ctrl.value = 0;
-		ecc_ctrl.bits.w0.cor_dbl = 1;
-		ecc_ctrl.bits.w0.cor_1 = 1;
-		ecc_ctrl.bits.w0.cor_lst = 1;
-		cmn_err(CE_NOTE, "!Write 0x%llx to IPP_ECC_CTRL_REG\n",
-			(unsigned long long) ecc_ctrl.value);
-		IPP_REG_WR(nxgep->npi_handle, portn, IPP_ECC_CTRL_REG,
-			ecc_ctrl.value);
-		break;
-
-	case NXGE_FM_EREPORT_IPP_DFIFO_CE:
-		ecc_ctrl.value = 0;
-		ecc_ctrl.bits.w0.cor_sng = 1;
-		ecc_ctrl.bits.w0.cor_1 = 1;
-		ecc_ctrl.bits.w0.cor_snd = 1;
-		cmn_err(CE_NOTE, "!Write 0x%llx to IPP_ECC_CTRL_REG\n",
-			(unsigned long long) ecc_ctrl.value);
-		IPP_REG_WR(nxgep->npi_handle, portn, IPP_ECC_CTRL_REG,
-			ecc_ctrl.value);
-		break;
-
-	case NXGE_FM_EREPORT_IPP_EOP_MISS:
-	case NXGE_FM_EREPORT_IPP_SOP_MISS:
-	case NXGE_FM_EREPORT_IPP_PFIFO_PERR:
-	case NXGE_FM_EREPORT_IPP_ECC_ERR_MAX:
-	case NXGE_FM_EREPORT_IPP_PFIFO_OVER:
-	case NXGE_FM_EREPORT_IPP_PFIFO_UND:
-	case NXGE_FM_EREPORT_IPP_BAD_CS_MX:
-	case NXGE_FM_EREPORT_IPP_PKT_DIS_MX:
-	case NXGE_FM_EREPORT_IPP_RESET_FAIL:
-		IPP_REG_RD(nxgep->npi_handle, portn, IPP_INT_STATUS_REG,
-			&ipps.value);
-		if (err_id == NXGE_FM_EREPORT_IPP_EOP_MISS)
-			ipps.bits.w0.dfifo_missed_eop = 1;
-		else if (err_id == NXGE_FM_EREPORT_IPP_SOP_MISS)
-			ipps.bits.w0.dfifo_missed_sop = 1;
-		else if (err_id == NXGE_FM_EREPORT_IPP_DFIFO_UE)
-			ipps.bits.w0.dfifo_uncorr_ecc_err = 1;
-		else if (err_id == NXGE_FM_EREPORT_IPP_DFIFO_CE)
-			ipps.bits.w0.dfifo_corr_ecc_err = 1;
-		else if (err_id == NXGE_FM_EREPORT_IPP_PFIFO_PERR)
-			ipps.bits.w0.pre_fifo_perr = 1;
-		else if (err_id == NXGE_FM_EREPORT_IPP_ECC_ERR_MAX)
-			ipps.bits.w0.ecc_err_cnt_ovfl = 1;
-		else if (err_id == NXGE_FM_EREPORT_IPP_PFIFO_OVER)
-			ipps.bits.w0.pre_fifo_overrun = 1;
-		else if (err_id == NXGE_FM_EREPORT_IPP_PFIFO_UND)
-			ipps.bits.w0.pre_fifo_underrun = 1;
-		else if (err_id == NXGE_FM_EREPORT_IPP_BAD_CS_MX)
-			ipps.bits.w0.bad_cksum_cnt_ovfl = 1;
-		else if (err_id == NXGE_FM_EREPORT_IPP_PKT_DIS_MX)
-			ipps.bits.w0.pkt_discard_cnt_ovfl = 1;
-		cmn_err(CE_NOTE, "!Write 0x%llx to IPP_INT_STATUS_REG\n",
-			(unsigned long long) ipps.value);
-		IPP_REG_WR(nxgep->npi_handle, portn, IPP_INT_STATUS_REG,
-			ipps.value);
-		break;
-	}
-}
-
-/* ARGSUSED */
-nxge_status_t
-nxge_ipp_fatal_err_recover(p_nxge_t nxgep)
-{
-	npi_handle_t handle;
-	npi_status_t rs = NPI_SUCCESS;
-	nxge_status_t status = NXGE_OK;
-	uint8_t portn;
-	uint16_t wr_ptr;
-	uint16_t rd_ptr;
-	uint32_t try_count;
-	uint32_t dfifo_entries;
-	ipp_status_t istatus;
-	uint32_t d0, d1, d2, d3, d4;
-	int i;
-
-	NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_ipp_fatal_err_recover"));
-	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-		"Recovering from RxPort error..."));
-
-	handle = nxgep->npi_handle;
-	portn = nxgep->mac.portnum;
-
-	/*
-	 * Making sure that error source is cleared if this is an injected
-	 * error.
-	 */
-	IPP_REG_WR(handle, portn, IPP_ECC_CTRL_REG, 0);
-
-	/* Disable RxMAC */
-	if (nxge_rx_mac_disable(nxgep) != NXGE_OK)
-		goto fail;
-
-	/* When recovering from IPP, RxDMA channel resets are not necessary */
-	/* Reset ZCP CFIFO */
-	NXGE_DEBUG_MSG((nxgep, IPP_CTL, "port%d Reset ZCP CFIFO...", portn));
-	if ((rs = npi_zcp_rest_cfifo_port(handle, portn)) != NPI_SUCCESS)
-		goto fail;
-
-	/*
-	 * Wait until ip read and write fifo pointers are equal
-	 */
-	(void) npi_ipp_get_dfifo_rd_ptr(handle, portn, &rd_ptr);
-	(void) npi_ipp_get_dfifo_wr_ptr(handle, portn, &wr_ptr);
-	try_count = 512;
-
-	while ((try_count > 0) && (rd_ptr != wr_ptr)) {
-		(void) npi_ipp_get_dfifo_rd_ptr(handle, portn, &rd_ptr);
-		(void) npi_ipp_get_dfifo_wr_ptr(handle, portn, &wr_ptr);
-		try_count--;
-	}
-
-	if (try_count == 0) {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			" nxge_ipp_reset: port%d IPP stalled..."
-			" rd_fifo_ptr = 0x%x wr_fifo_ptr = 0x%x",
-			portn, rd_ptr, wr_ptr));
-		/*
-		 * This means the fatal error occurred on the first line of the
-		 * fifo. In this case, just reset the IPP without draining the
-		 * PFIFO.
-		 */
-	}
-
-	if ((nxgep->niu_type == NEPTUNE) || (nxgep->niu_type == NEPTUNE_2)) {
-		if (portn < 2)
-			dfifo_entries = IPP_P0_P1_DFIFO_ENTRIES;
-		else
-			dfifo_entries = IPP_P2_P3_DFIFO_ENTRIES;
-	} else if (nxgep->niu_type == N2_NIU) {
-		dfifo_entries = IPP_NIU_DFIFO_ENTRIES;
-	} else
-		goto fail;
-
-	/* Clean up DFIFO SRAM entries */
-	for (i = 0; i < dfifo_entries; i++) {
-		if ((rs = npi_ipp_write_dfifo(handle, portn,
-				i, 0, 0, 0, 0, 0)) != NPI_SUCCESS)
-			goto fail;
-		if ((rs = npi_ipp_read_dfifo(handle, portn, i,
-				&d0, &d1, &d2, &d3, &d4)) != NPI_SUCCESS)
-			goto fail;
-	}
-
-	/* Clear PFIFO DFIFO status bits */
-	if ((rs = npi_ipp_get_status(handle, portn, &istatus)) != NPI_SUCCESS)
-		goto fail;
-	if ((rs = npi_ipp_get_status(handle, portn, &istatus)) != NPI_SUCCESS)
-		goto fail;
-
-	/* Reset IPP */
-	NXGE_DEBUG_MSG((nxgep, IPP_CTL, "port%d Reset IPP...", portn));
-	if ((rs = npi_ipp_reset(handle, portn)) != NPI_SUCCESS)
-		goto fail;
-
-	NXGE_DEBUG_MSG((nxgep, IPP_CTL, "port%d Reset RxMAC...", portn));
-	if (nxge_rx_mac_reset(nxgep) != NXGE_OK)
-		goto fail;
-
-	NXGE_DEBUG_MSG((nxgep, IPP_CTL, "port%d Initialize RxMAC...", portn));
-	if ((status = nxge_rx_mac_init(nxgep)) != NXGE_OK)
-		goto fail;
-
-	NXGE_DEBUG_MSG((nxgep, IPP_CTL, "port%d Enable RxMAC...", portn));
-	if (nxge_rx_mac_enable(nxgep) != NXGE_OK)
-		goto fail;
-
-	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-		"Recovery Sucessful, RxPort Restored"));
-	NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_ipp_fatal_err_recover"));
-
-	return (NXGE_OK);
-fail:
-	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed"));
-	return (status | rs);
-}
-
-/* ARGSUSED */
-nxge_status_t
-nxge_ipp_eccue_valid_check(p_nxge_t nxgep, boolean_t *valid)
-{
-	npi_handle_t handle;
-	npi_status_t rs = NPI_SUCCESS;
-	uint8_t portn;
-	uint16_t rd_ptr;
-	uint16_t wr_ptr;
-	uint16_t curr_rd_ptr;
-	uint16_t curr_wr_ptr;
-	uint32_t stall_cnt;
-	uint32_t d0, d1, d2, d3, d4;
-
-	handle = nxgep->npi_handle;
-	portn = nxgep->mac.portnum;
-	*valid = B_TRUE;
-
-	if ((rs = npi_ipp_get_dfifo_rd_ptr(handle, portn, &rd_ptr))
-		!= NPI_SUCCESS)
-		goto fail;
-	if ((rs = npi_ipp_get_dfifo_rd_ptr(handle, portn, &wr_ptr))
-		!= NPI_SUCCESS)
-		goto fail;
-
-	if (rd_ptr == wr_ptr) {
-		cmn_err(CE_NOTE,
-			"nxge_ipp_eccue_valid_check: rd_ptr = %d wr_ptr = %d\n",
-			rd_ptr, wr_ptr);
-		*valid = B_FALSE;	/* IPP not stuck */
-	} else {
-		stall_cnt = 0;
-		while (stall_cnt < 16) {
-			if ((rs = npi_ipp_get_dfifo_rd_ptr(handle,
-					portn, &curr_rd_ptr)) != NPI_SUCCESS)
-				goto fail;
-			if ((rs = npi_ipp_get_dfifo_wr_ptr(handle,
-					portn, &curr_wr_ptr)) != NPI_SUCCESS)
-				goto fail;
-
-			if ((rd_ptr == curr_rd_ptr) && (wr_ptr == curr_wr_ptr))
-				stall_cnt++;
-			else {
-				*valid = B_FALSE;
-				break;
-			}
-		}
-
-		if (valid) {
-			/* futher check to see if ECC UE is valid */
-			if ((rs = npi_ipp_read_dfifo(handle, portn,
-					rd_ptr, &d0, &d1, &d2, &d3,
-					&d4)) != NPI_SUCCESS)
-				goto fail;
-			if ((d4 & 0x1) == 0)	/* Not the 1st line */
-				*valid = B_FALSE;
-		}
-	}
-	return (NXGE_OK);
-fail:
-	return (NXGE_ERROR | rs);
-}
--- a/usr/src/uts/sun4v/io/nxge/nxge_kstats.c	Mon Mar 19 18:02:35 2007 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,2345 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License (the "License").
- * You may not use this file except in compliance with the License.
- *
- * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
- * or http://www.opensolaris.org/os/licensing.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information: Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- */
-/*
- * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
- * Use is subject to license terms.
- */
-
-#pragma ident	"%Z%%M%	%I%	%E% SMI"
-
-#include <sys/nxge/nxge_impl.h>
-#include <inet/mi.h>
-
-#define	RDC_NAME_FORMAT1	"RDC Channel"
-#define	TDC_NAME_FORMAT1	"TDC Channel"
-#define	CH_NAME_FORMAT		" %d Stats"
-#define	TDC_NAME_FORMAT		"TDC Channel %d Stats"
-#define	RDC_NAME_FORMAT		"RDC Channel %d Stats"
-
-void nxge_mac_init_kstats(p_nxge_t, struct kstat *);
-void nxge_xmac_init_kstats(struct kstat *);
-void nxge_bmac_init_kstats(struct kstat *);
-
-/* ARGSUSED */
-void
-nxge_init_statsp(p_nxge_t nxgep)
-{
-	size_t stats_size;
-
-	NXGE_DEBUG_MSG((nxgep, KST_CTL, "==> nxge_init_statsp"));
-
-	stats_size = sizeof (nxge_stats_t);
-	nxgep->statsp = KMEM_ZALLOC(stats_size, KM_SLEEP);
-	nxgep->statsp->stats_size = stats_size;
-
-	NXGE_DEBUG_MSG((nxgep, KST_CTL, " <== nxge_init_statsp"));
-}
-
-typedef struct {
-	uint8_t index;
-	uint8_t type;
-	char *name;
-} nxge_kstat_index_t;
-
-typedef enum {
-	RDC_STAT_PACKETS = 0,
-	RDC_STAT_BYTES,
-	RDC_STAT_ERRORS,
-	RDC_STAT_DCF_ERR,
-	RDC_STAT_RCR_ACK_ERR,
-	RDC_STAT_RCR_DC_FIFOFLOW_ERR,
-	RDC_STAT_RCR_SHA_PAR_ERR,
-	RDC_STAT_RBR_PRE_PAR_ERR,
-	RDC_STAT_WRED_DROP,
-	RDC_STAT_RBR_PRE_EMTY,
-	RDC_STAT_RCR_SHADOW_FULL,
-	RDC_STAT_RBR_TMOUT,
-	RDC_STAT_RSP_CNT_ERR,
-	RDC_STAT_BYTE_EN_BUS,
-	RDC_STAT_RSP_DAT_ERR,
-	RDC_STAT_COMPL_L2_ERR,
-	RDC_STAT_COMPL_L4_CKSUM_ERR,
-	RDC_STAT_COMPL_ZCP_SOFT_ERR,
-	RDC_STAT_COMPL_FFLP_SOFT_ERR,
-	RDC_STAT_CONFIG_ERR,
-	RDC_STAT_RCRINCON,
-	RDC_STAT_RCRFULL,
-	RDC_STAT_RBR_EMPTY,
-	RDC_STAT_RBR_FULL,
-	RDC_STAT_RBRLOGPAGE,
-	RDC_STAT_CFIGLOGPAGE,
-	RDC_STAT_PORT_DROP_PKT,
-	RDC_STAT_RCRTO,
-	RDC_STAT_RCRTHRES,
-	RDC_STAT_MEX,
-	RDC_STAT_ID_MIS,
-	RDC_STAT_ZCP_EOP,
-	RDC_STAT_IPP_EOP,
-	RDC_STAT_END
-} nxge_rdc_stat_index_t;
-
-nxge_kstat_index_t nxge_rdc_stats[] = {
-	{RDC_STAT_PACKETS, KSTAT_DATA_UINT64, "rdc_packets"},
-	{RDC_STAT_BYTES, KSTAT_DATA_UINT64, "rdc_bytes"},
-	{RDC_STAT_ERRORS, KSTAT_DATA_ULONG, "rdc_errors"},
-	{RDC_STAT_DCF_ERR, KSTAT_DATA_ULONG, "rdc_dcf_err"},
-	{RDC_STAT_RCR_ACK_ERR, KSTAT_DATA_ULONG, "rdc_rcr_ack_err"},
-	{RDC_STAT_RCR_DC_FIFOFLOW_ERR, KSTAT_DATA_ULONG, "rdc_dc_fifoflow_err"},
-	{RDC_STAT_RCR_SHA_PAR_ERR, KSTAT_DATA_ULONG, "rdc_rcr_sha_par_err"},
-	{RDC_STAT_RBR_PRE_PAR_ERR, KSTAT_DATA_ULONG, "rdc_rbr_pre_par_err"},
-	{RDC_STAT_WRED_DROP, KSTAT_DATA_ULONG, "rdc_wred_drop"},
-	{RDC_STAT_RBR_PRE_EMTY, KSTAT_DATA_ULONG, "rdc_rbr_pre_empty"},
-	{RDC_STAT_RCR_SHADOW_FULL, KSTAT_DATA_ULONG, "rdc_rcr_shadow_full"},
-	{RDC_STAT_RBR_TMOUT, KSTAT_DATA_ULONG, "rdc_rbr_tmout"},
-	{RDC_STAT_RSP_CNT_ERR, KSTAT_DATA_ULONG, "rdc_rsp_cnt_err"},
-	{RDC_STAT_BYTE_EN_BUS, KSTAT_DATA_ULONG, "rdc_byte_en_bus"},
-	{RDC_STAT_RSP_DAT_ERR, KSTAT_DATA_ULONG, "rdc_rsp_dat_err"},
-	{RDC_STAT_COMPL_L2_ERR, KSTAT_DATA_ULONG, "rdc_compl_l2_err"},
-	{RDC_STAT_COMPL_L4_CKSUM_ERR, KSTAT_DATA_ULONG, "rdc_compl_l4_cksum"},
-	{RDC_STAT_COMPL_ZCP_SOFT_ERR, KSTAT_DATA_ULONG,
-		"rdc_compl_zcp_soft_err"},
-	{RDC_STAT_COMPL_FFLP_SOFT_ERR, KSTAT_DATA_ULONG,
-		"rdc_compl_fflp_soft_err"},
-	{RDC_STAT_CONFIG_ERR, KSTAT_DATA_ULONG, "rdc_config_err"},
-	{RDC_STAT_RCRINCON, KSTAT_DATA_ULONG, "rdc_rcrincon"},
-	{RDC_STAT_RCRFULL, KSTAT_DATA_ULONG, "rdc_rcrfull"},
-	{RDC_STAT_RBR_EMPTY, KSTAT_DATA_ULONG, "rdc_rbr_empty"},
-	{RDC_STAT_RBR_FULL, KSTAT_DATA_ULONG, "rdc_rbrfull"},
-	{RDC_STAT_RBRLOGPAGE, KSTAT_DATA_ULONG, "rdc_rbrlogpage"},
-	{RDC_STAT_CFIGLOGPAGE, KSTAT_DATA_ULONG, "rdc_cfiglogpage"},
-	{RDC_STAT_PORT_DROP_PKT, KSTAT_DATA_ULONG, "rdc_port_drop_pkt"},
-	{RDC_STAT_RCRTO, KSTAT_DATA_ULONG, "rdc_rcrto"},
-	{RDC_STAT_RCRTHRES, KSTAT_DATA_ULONG, "rdc_rcrthres"},
-	{RDC_STAT_MEX, KSTAT_DATA_ULONG, "rdc_mex"},
-	{RDC_STAT_ID_MIS, KSTAT_DATA_ULONG, "rdc_id_mismatch"},
-	{RDC_STAT_ZCP_EOP, KSTAT_DATA_ULONG, "rdc_zcp_eop"},
-	{RDC_STAT_IPP_EOP, KSTAT_DATA_ULONG, "rdc_ipp_eop"},
-	{RDC_STAT_END, NULL, NULL}
-};
-
-typedef enum {
-	RDC_SYS_STAT_PRE_PAR_ERR = 0,
-	RDC_SYS_STAT_SHA_PAR_ERR,
-	RDC_SYS_STAT_ID_MISMATCH,
-	RDC_SYS_STAT_IPP_EOP_ERR,
-	RDC_SYS_STAT_ZCP_EOP_ERR,
-	RDC_SYS_STAT_END
-} nxge_rdc_sys_stat_idx_t;
-
-nxge_kstat_index_t nxge_rdc_sys_stats[] = {
-	{RDC_SYS_STAT_PRE_PAR_ERR, KSTAT_DATA_UINT64, "rdc_pre_par_err"},
-	{RDC_SYS_STAT_SHA_PAR_ERR, KSTAT_DATA_UINT64, "rdc_sha_par_err"},
-	{RDC_SYS_STAT_ID_MISMATCH, KSTAT_DATA_UINT64, "rdc_stat_id_mismatch"},
-	{RDC_SYS_STAT_IPP_EOP_ERR, KSTAT_DATA_UINT64, "rdc_ipp_eop_err"},
-	{RDC_SYS_STAT_ZCP_EOP_ERR, KSTAT_DATA_UINT64, "rdc_zcp_eop_err"},
-	{RDC_SYS_STAT_END, NULL, NULL}
-};
-
-typedef enum {
-	TDC_STAT_PACKETS = 0,
-	TDC_STAT_BYTES,
-	TDC_STAT_ERRORS,
-	TDC_STAT_TX_INITS,
-	TDC_STAT_TX_NO_BUF,
-	TDC_STAT_MBOX_ERR,
-	TDC_STAT_PKT_SIZE_ERR,
-	TDC_STAT_TX_RING_OFLOW,
-	TDC_STAT_PREF_BUF_ECC_ERR,
-	TDC_STAT_NACK_PREF,
-	TDC_STAT_NACK_PKT_RD,
-	TDC_STAT_CONF_PART_ERR,
-	TDC_STAT_PKT_PRT_ERR,
-	TDC_STAT_RESET_FAIL,
-	TDC_STAT_TX_STARTS,
-	TDC_STAT_TX_NOCANPUT,
-	TDC_STAT_TX_MSGDUP_FAIL,
-	TDC_STAT_TX_ALLOCB_FAIL,
-	TDC_STAT_TX_NO_DESC,
-	TDC_STAT_TX_DMA_BIND_FAIL,
-	TDC_STAT_TX_UFLOW,
-	TDC_STAT_TX_HDR_PKTS,
-	TDC_STAT_TX_DDI_PKTS,
-	TDC_STAT_TX_DVMA_PKTS,
-	TDC_STAT_TX_MAX_PEND,
-	TDC_STAT_END
-} nxge_tdc_stats_index_t;
-
-nxge_kstat_index_t nxge_tdc_stats[] = {
-	{TDC_STAT_PACKETS, KSTAT_DATA_UINT64, "tdc_packets"},
-	{TDC_STAT_BYTES, KSTAT_DATA_UINT64, "tdc_bytes"},
-	{TDC_STAT_ERRORS, KSTAT_DATA_UINT64, "tdc_errors"},
-	{TDC_STAT_TX_INITS, KSTAT_DATA_ULONG, "tdc_tx_inits"},
-	{TDC_STAT_TX_NO_BUF, KSTAT_DATA_ULONG, "tdc_tx_no_buf"},
-	{TDC_STAT_MBOX_ERR, KSTAT_DATA_ULONG, "tdc_mbox_err"},
-	{TDC_STAT_PKT_SIZE_ERR, KSTAT_DATA_ULONG, "tdc_pkt_size_err"},
-	{TDC_STAT_TX_RING_OFLOW,
-		KSTAT_DATA_ULONG, "tdc_tx_ring_oflow"},
-	{TDC_STAT_PREF_BUF_ECC_ERR,
-		KSTAT_DATA_ULONG, "tdc_pref_buf_err_err"},
-	{TDC_STAT_NACK_PREF, KSTAT_DATA_ULONG, "tdc_nack_pref"},
-	{TDC_STAT_NACK_PKT_RD, KSTAT_DATA_ULONG, "tdc_nack_pkt_rd"},
-	{TDC_STAT_CONF_PART_ERR,
-		KSTAT_DATA_ULONG, "tdc_conf_part_err"},
-	{TDC_STAT_PKT_PRT_ERR, KSTAT_DATA_ULONG, "tdc_pkt_prt_err"},
-	{TDC_STAT_RESET_FAIL, KSTAT_DATA_ULONG, "tdc_reset_fail"},
-	{TDC_STAT_TX_STARTS, KSTAT_DATA_ULONG, "tdc_tx_starts"},
-	{TDC_STAT_TX_NOCANPUT, KSTAT_DATA_ULONG, "tdc_tx_nocanput"},
-	{TDC_STAT_TX_MSGDUP_FAIL, KSTAT_DATA_ULONG, "tdc_tx_msgdup_fail"},
-	{TDC_STAT_TX_ALLOCB_FAIL, KSTAT_DATA_ULONG, "tdc_tx_allocb_fail"},
-	{TDC_STAT_TX_NO_DESC, KSTAT_DATA_ULONG, "tdc_tx_no_desc"},
-	{TDC_STAT_TX_DMA_BIND_FAIL, KSTAT_DATA_ULONG, "tdc_tx_dma_bind_fail"},
-	{TDC_STAT_TX_UFLOW, KSTAT_DATA_ULONG, "tdc_tx_uflow"},
-	{TDC_STAT_TX_HDR_PKTS, KSTAT_DATA_ULONG, "tdc_tx_hdr_pkts"},
-	{TDC_STAT_TX_DDI_PKTS, KSTAT_DATA_ULONG, "tdc_tx_ddi_pkts"},
-	{TDC_STAT_TX_DVMA_PKTS, KSTAT_DATA_ULONG, "tdc_tx_dvma_pkts"},
-	{TDC_STAT_TX_MAX_PEND, KSTAT_DATA_ULONG, "tdc_tx_max_pend"},
-	{TDC_STAT_END, NULL, NULL}
-};
-
-/* IPP Statistics definitions */
-typedef enum {
-	IPP_STAT_EOP_MISS = 0,
-	IPP_STAT_SOP_MISS,
-	IPP_STAT_DFIFO_UE,
-	IPP_STAT_ECC_ERR,
-	IPP_STAT_PFIFO_OVER,
-	IPP_STAT_PFIFO_UND,
-	IPP_STAT_BAD_CS,
-	IPP_STAT_BAD_DIS,
-	IPP_STAT_CS_FAIL,
-	IPP_STAT_END
-} nxge_ipp_stat_index_t;
-
-nxge_kstat_index_t nxge_ipp_stats[] = {
-	{IPP_STAT_EOP_MISS, KSTAT_DATA_ULONG, "rxipp_eop_miss"},
-	{IPP_STAT_SOP_MISS, KSTAT_DATA_ULONG, "rxipp_sop_miss"},
-	{IPP_STAT_DFIFO_UE, KSTAT_DATA_ULONG, "rxipp_dfifo_ue"},
-	{IPP_STAT_ECC_ERR, KSTAT_DATA_ULONG, "rxipp_ecc_err"},
-	{IPP_STAT_PFIFO_OVER, KSTAT_DATA_ULONG, "rxipp_pfifo_over"},
-	{IPP_STAT_PFIFO_UND, KSTAT_DATA_ULONG, "rxipp_pfifo_und"},
-	{IPP_STAT_BAD_CS, KSTAT_DATA_ULONG, "rxipp_bad_cs"},
-	{IPP_STAT_BAD_DIS, KSTAT_DATA_ULONG, "rxipp_bad_dis"},
-	{IPP_STAT_CS_FAIL, KSTAT_DATA_ULONG, "rxipp_cs_fail"},
-	{IPP_STAT_END, NULL, NULL}
-};
-
-/* TXC Statistics definitions */
-typedef enum {
-	TXC_STAT_PKT_STUFFED = 0,
-	TXC_STAT_PKT_XMIT,
-	TXC_STAT_RO_CORRECT_ERR,
-	TXC_STAT_RO_UNCORRECT_ERR,
-	TXC_STAT_SF_CORRECT_ERR,
-	TXC_STAT_SF_UNCORRECT_ERR,
-	TXC_STAT_ADDRESS_FAILED,
-	TXC_STAT_DMA_FAILED,
-	TXC_STAT_LENGTH_FAILED,
-	TXC_STAT_PKT_ASSY_DEAD,
-	TXC_STAT_REORDER_ERR,
-	TXC_STAT_END
-} nxge_txc_stat_index_t;
-
-nxge_kstat_index_t nxge_txc_stats[] = {
-	{TXC_STAT_PKT_STUFFED, KSTAT_DATA_ULONG, "txc_pkt_stuffed"},
-	{TXC_STAT_PKT_XMIT, KSTAT_DATA_ULONG, "txc_pkt_xmit"},
-	{TXC_STAT_RO_CORRECT_ERR, KSTAT_DATA_ULONG, "txc_ro_correct_err"},
-	{TXC_STAT_RO_UNCORRECT_ERR, KSTAT_DATA_ULONG, "txc_ro_uncorrect_err"},
-	{TXC_STAT_SF_CORRECT_ERR, KSTAT_DATA_ULONG, "txc_sf_correct_err"},
-	{TXC_STAT_SF_UNCORRECT_ERR, KSTAT_DATA_ULONG, "txc_sf_uncorrect_err"},
-	{TXC_STAT_ADDRESS_FAILED, KSTAT_DATA_ULONG, "txc_address_failed"},
-	{TXC_STAT_DMA_FAILED, KSTAT_DATA_ULONG, "txc_dma_failed"},
-	{TXC_STAT_LENGTH_FAILED, KSTAT_DATA_ULONG, "txc_length_failed"},
-	{TXC_STAT_PKT_ASSY_DEAD, KSTAT_DATA_ULONG, "txc_pkt_assy_dead"},
-	{TXC_STAT_REORDER_ERR, KSTAT_DATA_ULONG, "txc_reorder_err"},
-	{TXC_STAT_END, NULL, NULL}
-};
-
-typedef enum {
-	XMAC_STAT_TX_FRAME_CNT = 0,
-	XMAC_STAT_TX_UNDERFLOW_ERR,
-	XMAC_STAT_TX_MAXPKTSIZE_ERR,
-	XMAC_STAT_TX_OVERFLOW_ERR,
-	XMAC_STAT_TX_FIFO_XFR_ERR,
-	XMAC_STAT_TX_BYTE_CNT,
-	XMAC_STAT_RX_FRAME_CNT,
-	XMAC_STAT_RX_UNDERFLOW_ERR,
-	XMAC_STAT_RX_OVERFLOW_ERR,
-	XMAC_STAT_RX_CRC_ERR_CNT,
-	XMAC_STAT_RX_LEN_ERR_CNT,
-	XMAC_STAT_RX_VIOL_ERR_CNT,
-	XMAC_STAT_RX_BYTE_CNT,
-	XMAC_STAT_RX_HIST1_CNT,
-	XMAC_STAT_RX_HIST2_CNT,
-	XMAC_STAT_RX_HIST3_CNT,
-	XMAC_STAT_RX_HIST4_CNT,
-	XMAC_STAT_RX_HIST5_CNT,
-	XMAC_STAT_RX_HIST6_CNT,
-	XMAC_STAT_RX_HIST7_CNT,
-	XMAC_STAT_RX_BROADCAST_CNT,
-	XMAC_STAT_RX_MULT_CNT,
-	XMAC_STAT_RX_FRAG_CNT,
-	XMAC_STAT_RX_FRAME_ALIGN_ERR_CNT,
-	XMAC_STAT_RX_LINKFAULT_ERR_CNT,
-	XMAC_STAT_RX_REMOTEFAULT_ERR,
-	XMAC_STAT_RX_LOCALFAULT_ERR,
-	XMAC_STAT_RX_PAUSE_CNT,
-	XMAC_STAT_TX_PAUSE_STATE,
-	XMAC_STAT_TX_NOPAUSE_STATE,
-	XMAC_STAT_XPCS_DESKEW_ERR_CNT,
-#ifdef	NXGE_DEBUG_SYMBOL_ERR
-	XMAC_STAT_XPCS_SYMBOL_L0_ERR_CNT,
-	XMAC_STAT_XPCS_SYMBOL_L1_ERR_CNT,
-	XMAC_STAT_XPCS_SYMBOL_L2_ERR_CNT,
-	XMAC_STAT_XPCS_SYMBOL_L3_ERR_CNT,
-#endif
-	XMAC_STAT_END
-} nxge_xmac_stat_index_t;
-
-nxge_kstat_index_t nxge_xmac_stats[] = {
-	{XMAC_STAT_TX_FRAME_CNT, KSTAT_DATA_ULONG, "txmac_frame_cnt"},
-	{XMAC_STAT_TX_UNDERFLOW_ERR, KSTAT_DATA_ULONG, "tmac_underflow_err"},
-	{XMAC_STAT_TX_MAXPKTSIZE_ERR, KSTAT_DATA_ULONG, "txmac_maxpktsize_err"},
-	{XMAC_STAT_TX_OVERFLOW_ERR, KSTAT_DATA_ULONG, "txmac_overflow_err"},
-	{XMAC_STAT_TX_FIFO_XFR_ERR, KSTAT_DATA_ULONG, "txmac_fifo_xfr_err"},
-	{XMAC_STAT_TX_BYTE_CNT, KSTAT_DATA_ULONG, "txmac_byte_cnt"},
-	{XMAC_STAT_RX_FRAME_CNT, KSTAT_DATA_ULONG, "rxmac_frame_cnt"},
-	{XMAC_STAT_RX_UNDERFLOW_ERR, KSTAT_DATA_ULONG, "rxmac_underflow_err"},
-	{XMAC_STAT_RX_OVERFLOW_ERR, KSTAT_DATA_ULONG, "rxmac_overflow_err"},
-	{XMAC_STAT_RX_CRC_ERR_CNT, KSTAT_DATA_ULONG, "rxmac_crc_err"},
-	{XMAC_STAT_RX_LEN_ERR_CNT, KSTAT_DATA_ULONG, "rxmac_length_err"},
-	{XMAC_STAT_RX_VIOL_ERR_CNT, KSTAT_DATA_ULONG, "rxmac_code_violations"},
-	{XMAC_STAT_RX_BYTE_CNT, KSTAT_DATA_ULONG, "rxmac_byte_cnt"},
-	{XMAC_STAT_RX_HIST1_CNT, KSTAT_DATA_ULONG, "rxmac_64_cnt"},
-	{XMAC_STAT_RX_HIST2_CNT, KSTAT_DATA_ULONG, "rxmac_65_127_cnt"},
-	{XMAC_STAT_RX_HIST3_CNT, KSTAT_DATA_ULONG, "rxmac_128_255_cnt"},
-	{XMAC_STAT_RX_HIST4_CNT, KSTAT_DATA_ULONG, "rxmac_256_511_cnt"},
-	{XMAC_STAT_RX_HIST5_CNT, KSTAT_DATA_ULONG, "rxmac_512_1023_cnt"},
-	{XMAC_STAT_RX_HIST6_CNT, KSTAT_DATA_ULONG, "rxmac_1024_1522_cnt"},
-	{XMAC_STAT_RX_HIST7_CNT, KSTAT_DATA_ULONG, "rxmac_jumbo_cnt"},
-	{XMAC_STAT_RX_BROADCAST_CNT, KSTAT_DATA_ULONG, "rxmac_broadcast_cnt"},
-	{XMAC_STAT_RX_MULT_CNT, KSTAT_DATA_ULONG, "rxmac_multicast_cnt"},
-	{XMAC_STAT_RX_FRAG_CNT, KSTAT_DATA_ULONG, "rxmac_fragment_cnt"},
-	{XMAC_STAT_RX_FRAME_ALIGN_ERR_CNT,
-		KSTAT_DATA_ULONG, "rxmac_alignment_err"},
-	{XMAC_STAT_RX_LINKFAULT_ERR_CNT,
-		KSTAT_DATA_ULONG, "rxmac_linkfault_errs"},
-	{XMAC_STAT_RX_REMOTEFAULT_ERR,
-		KSTAT_DATA_ULONG, "rxmac_remote_faults"},
-	{XMAC_STAT_RX_LOCALFAULT_ERR,
-		KSTAT_DATA_ULONG, "rxmac_local_faults"},
-	{XMAC_STAT_RX_PAUSE_CNT, KSTAT_DATA_ULONG, "rxmac_pause_cnt"},
-	{XMAC_STAT_TX_PAUSE_STATE, KSTAT_DATA_ULONG, "txmac_pause_state"},
-	{XMAC_STAT_TX_NOPAUSE_STATE, KSTAT_DATA_ULONG, "txmac_nopause_state"},
-	{XMAC_STAT_XPCS_DESKEW_ERR_CNT,
-		KSTAT_DATA_ULONG, "xpcs_deskew_err_cnt"},
-#ifdef	NXGE_DEBUG_SYMBOL_ERR
-	{XMAC_STAT_XPCS_SYMBOL_L0_ERR_CNT,
-		KSTAT_DATA_ULONG, "xpcs_ln0_symbol_err_cnt"},
-	{XMAC_STAT_XPCS_SYMBOL_L1_ERR_CNT,
-		KSTAT_DATA_ULONG, "xpcs_ln1_symbol_err_cnt"},
-	{XMAC_STAT_XPCS_SYMBOL_L2_ERR_CNT,
-		KSTAT_DATA_ULONG, "xpcs_ln2_symbol_err_cnt"},
-	{XMAC_STAT_XPCS_SYMBOL_L3_ERR_CNT,
-		KSTAT_DATA_ULONG, "xpcs_ln3_symbol_err_cnt"},
-#endif
-	{XMAC_STAT_END, NULL, NULL}
-};
-
-typedef enum {
-	BMAC_STAT_TX_FRAME_CNT = 0,
-	BMAC_STAT_TX_UNDERRUN_ERR,
-	BMAC_STAT_TX_MAX_PKT_ERR,
-	BMAC_STAT_TX_BYTE_CNT,
-	BMAC_STAT_RX_FRAME_CNT,
-	BMAC_STAT_RX_BYTE_CNT,
-	BMAC_STAT_RX_OVERFLOW_ERR,
-	BMAC_STAT_RX_ALIGN_ERR_CNT,
-	BMAC_STAT_RX_CRC_ERR_CNT,
-	BMAC_STAT_RX_LEN_ERR_CNT,
-	BMAC_STAT_RX_VIOL_ERR_CNT,
-	BMAC_STAT_RX_PAUSE_CNT,
-	BMAC_STAT_RX_PAUSE_STATE,
-	BMAC_STAT_RX_NOPAUSE_STATE,
-	BMAC_STAT_END
-} nxge_bmac_stat_index_t;
-
-nxge_kstat_index_t nxge_bmac_stats[] = {
-	{BMAC_STAT_TX_FRAME_CNT, KSTAT_DATA_ULONG, "txmac_frame_cnt"},
-	{BMAC_STAT_TX_UNDERRUN_ERR, KSTAT_DATA_ULONG, "txmac_underrun_err"},
-	{BMAC_STAT_TX_MAX_PKT_ERR, KSTAT_DATA_ULONG, "txmac_max_pkt_err"},
-	{BMAC_STAT_TX_BYTE_CNT, KSTAT_DATA_ULONG, "txmac_byte_cnt"},
-	{BMAC_STAT_RX_FRAME_CNT, KSTAT_DATA_ULONG, "rxmac_frame_cnt"},
-	{BMAC_STAT_RX_BYTE_CNT, KSTAT_DATA_ULONG, "rxmac_byte_cnt"},
-	{BMAC_STAT_RX_OVERFLOW_ERR, KSTAT_DATA_ULONG, "rxmac_overflow_err"},
-	{BMAC_STAT_RX_ALIGN_ERR_CNT, KSTAT_DATA_ULONG, "rxmac_align_err_cnt"},
-	{BMAC_STAT_RX_CRC_ERR_CNT, KSTAT_DATA_ULONG, "rxmac_crc_err_cnt"},
-	{BMAC_STAT_RX_LEN_ERR_CNT, KSTAT_DATA_ULONG, "rxmac_len_err_cnt"},
-	{BMAC_STAT_RX_VIOL_ERR_CNT, KSTAT_DATA_ULONG, "rxmac_viol_err_cnt"},
-	{BMAC_STAT_RX_PAUSE_CNT, KSTAT_DATA_ULONG, "rxmac_pause_cnt"},
-	{BMAC_STAT_RX_PAUSE_STATE, KSTAT_DATA_ULONG, "txmac_pause_state"},
-	{BMAC_STAT_RX_NOPAUSE_STATE, KSTAT_DATA_ULONG, "tx_nopause_state"},
-	{BMAC_STAT_END, NULL, NULL}
-};
-
-typedef enum {
-	ZCP_STAT_ERRORS,
-	ZCP_STAT_INITS,
-	ZCP_STAT_RRFIFO_UNDERRUN,
-	ZCP_STAT_RRFIFO_OVERRUN,
-	ZCP_STAT_RSPFIFO_UNCORR_ERR,
-	ZCP_STAT_BUFFER_OVERFLOW,
-	ZCP_STAT_STAT_TBL_PERR,
-	ZCP_STAT_DYN_TBL_PERR,
-	ZCP_STAT_BUF_TBL_PERR,
-	ZCP_STAT_TT_PROGRAM_ERR,
-	ZCP_STAT_RSP_TT_INDEX_ERR,
-	ZCP_STAT_SLV_TT_INDEX_ERR,
-	ZCP_STAT_ZCP_TT_INDEX_ERR,
-	ZCP_STAT_ZCP_ACCESS_FAIL,
-	ZCP_CFIFO_ECC,
-	ZCP_STAT_END
-} nxge_zcp_stat_index_t;
-
-nxge_kstat_index_t nxge_zcp_stats[] = {
-	{ZCP_STAT_ERRORS, KSTAT_DATA_ULONG, "zcp_erros"},
-	{ZCP_STAT_INITS, KSTAT_DATA_ULONG, "zcp_inits"},
-	{ZCP_STAT_RRFIFO_UNDERRUN, KSTAT_DATA_ULONG, "zcp_rrfifo_underrun"},
-	{ZCP_STAT_RRFIFO_OVERRUN, KSTAT_DATA_ULONG, "zcp_rrfifo_overrun"},
-	{ZCP_STAT_RSPFIFO_UNCORR_ERR, KSTAT_DATA_ULONG,
-	"zcp_rspfifo_uncorr_err"},
-	{ZCP_STAT_BUFFER_OVERFLOW, KSTAT_DATA_ULONG, "zcp_buffer_overflow"},
-	{ZCP_STAT_STAT_TBL_PERR, KSTAT_DATA_ULONG, "zcp_stat_tbl_perr"},
-	{ZCP_STAT_DYN_TBL_PERR, KSTAT_DATA_ULONG, "zcp_dyn_tbl_perr"},
-	{ZCP_STAT_BUF_TBL_PERR, KSTAT_DATA_ULONG, "zcp_buf_tbl_perr"},
-	{ZCP_STAT_TT_PROGRAM_ERR, KSTAT_DATA_ULONG, "zcp_tt_program_err"},
-	{ZCP_STAT_RSP_TT_INDEX_ERR, KSTAT_DATA_ULONG, "zcp_rsp_tt_index_err"},
-	{ZCP_STAT_SLV_TT_INDEX_ERR, KSTAT_DATA_ULONG, "zcp_slv_tt_index_err"},
-	{ZCP_STAT_ZCP_TT_INDEX_ERR, KSTAT_DATA_ULONG, "zcp_zcp_tt_index_err"},
-	{ZCP_STAT_ZCP_ACCESS_FAIL, KSTAT_DATA_ULONG, "zcp_access_fail"},
-	{ZCP_STAT_ZCP_ACCESS_FAIL, KSTAT_DATA_ULONG, "zcp_cfifo_ecc"},
-	{ZCP_STAT_END, NULL, NULL}
-};
-
-typedef enum {
-	FFLP_STAT_TCAM_PERR,
-	FFLP_STAT_TCAM_ECC_ERR,
-	FFLP_STAT_VLAN_PERR,
-	FFLP_STAT_HASH_LOOKUP_ERR,
-	FFLP_STAT_HASH_P0_PIO_ERR,
-	FFLP_STAT_HASH_P1_PIO_ERR,
-	FFLP_STAT_HASH_P2_PIO_ERR,
-	FFLP_STAT_HASH_P3_PIO_ERR,
-	FFLP_STAT_HASH_P4_PIO_ERR,
-	FFLP_STAT_HASH_P5_PIO_ERR,
-	FFLP_STAT_HASH_P6_PIO_ERR,
-	FFLP_STAT_HASH_P7_PIO_ERR,
-	FFLP_STAT_END
-} nxge_fflp_stat_index_t;
-
-nxge_kstat_index_t nxge_fflp_stats[] = {
-	{FFLP_STAT_TCAM_PERR, KSTAT_DATA_ULONG, "fflp_tcam_perr"},
-	{FFLP_STAT_TCAM_ECC_ERR, KSTAT_DATA_ULONG, "fflp_tcam_ecc_err"},
-	{FFLP_STAT_VLAN_PERR, KSTAT_DATA_ULONG, "fflp_vlan_perr"},
-	{FFLP_STAT_HASH_LOOKUP_ERR, KSTAT_DATA_ULONG, "fflp_hash_lookup_err"},
-	{FFLP_STAT_HASH_P0_PIO_ERR, KSTAT_DATA_ULONG, "fflp_hash_p0_pio_err"},
-	{FFLP_STAT_HASH_P1_PIO_ERR, KSTAT_DATA_ULONG, "fflp_hash_p1_pio_err"},
-	{FFLP_STAT_HASH_P2_PIO_ERR, KSTAT_DATA_ULONG, "fflp_hash_p2_pio_err"},
-	{FFLP_STAT_HASH_P3_PIO_ERR, KSTAT_DATA_ULONG, "fflp_hash_p3_pio_err"},
-	{FFLP_STAT_HASH_P4_PIO_ERR, KSTAT_DATA_ULONG, "fflp_hash_p4_pio_err"},
-	{FFLP_STAT_HASH_P5_PIO_ERR, KSTAT_DATA_ULONG, "fflp_hash_p5_pio_err"},
-	{FFLP_STAT_HASH_P6_PIO_ERR, KSTAT_DATA_ULONG, "fflp_hash_p6_pio_err"},
-	{FFLP_STAT_HASH_P7_PIO_ERR, KSTAT_DATA_ULONG, "fflp_hash_p7_pio_err"},
-	{FFLP_STAT_END, NULL, NULL}
-};
-
-typedef enum {
-	MMAC_MAX_ADDR,
-	MMAC_AVAIL_ADDR,
-	MMAC_ADDR_POOL1,
-	MMAC_ADDR_POOL2,
-	MMAC_ADDR_POOL3,
-	MMAC_ADDR_POOL4,
-	MMAC_ADDR_POOL5,
-	MMAC_ADDR_POOL6,
-	MMAC_ADDR_POOL7,
-	MMAC_ADDR_POOL8,
-	MMAC_ADDR_POOL9,
-	MMAC_ADDR_POOL10,
-	MMAC_ADDR_POOL11,
-	MMAC_ADDR_POOL12,
-	MMAC_ADDR_POOL13,
-	MMAC_ADDR_POOL14,
-	MMAC_ADDR_POOL15,
-	MMAC_ADDR_POOL16,
-	MMAC_STATS_END
-} nxge_mmac_stat_index_t;
-
-nxge_kstat_index_t nxge_mmac_stats[] = {
-	{MMAC_MAX_ADDR, KSTAT_DATA_UINT64, "max_mmac_addr"},
-	{MMAC_AVAIL_ADDR, KSTAT_DATA_UINT64, "avail_mmac_addr"},
-	{MMAC_ADDR_POOL1, KSTAT_DATA_UINT64, "mmac_addr_1"},
-	{MMAC_ADDR_POOL2, KSTAT_DATA_UINT64, "mmac_addr_2"},
-	{MMAC_ADDR_POOL3, KSTAT_DATA_UINT64, "mmac_addr_3"},
-	{MMAC_ADDR_POOL4, KSTAT_DATA_UINT64, "mmac_addr_4"},
-	{MMAC_ADDR_POOL5, KSTAT_DATA_UINT64, "mmac_addr_5"},
-	{MMAC_ADDR_POOL6, KSTAT_DATA_UINT64, "mmac_addr_6"},
-	{MMAC_ADDR_POOL7, KSTAT_DATA_UINT64, "mmac_addr_7"},
-	{MMAC_ADDR_POOL8, KSTAT_DATA_UINT64, "mmac_addr_8"},
-	{MMAC_ADDR_POOL9, KSTAT_DATA_UINT64, "mmac_addr_9"},
-	{MMAC_ADDR_POOL10, KSTAT_DATA_UINT64, "mmac_addr_10"},
-	{MMAC_ADDR_POOL11, KSTAT_DATA_UINT64, "mmac_addr_11"},
-	{MMAC_ADDR_POOL12, KSTAT_DATA_UINT64, "mmac_addr_12"},
-	{MMAC_ADDR_POOL13, KSTAT_DATA_UINT64, "mmac_addr_13"},
-	{MMAC_ADDR_POOL14, KSTAT_DATA_UINT64, "mmac_addr_14"},
-	{MMAC_ADDR_POOL15, KSTAT_DATA_UINT64, "mmac_addr_15"},
-	{MMAC_ADDR_POOL16, KSTAT_DATA_UINT64, "mmac_addr_16"},
-	{MMAC_STATS_END, NULL, NULL},
-};
-
-/* ARGSUSED */
-int
-nxge_tdc_stat_update(kstat_t *ksp, int rw)
-{
-	p_nxge_t nxgep;
-	p_nxge_tdc_kstat_t tdc_kstatsp;
-	p_nxge_tx_ring_stats_t statsp;
-	int channel;
-	char *ch_name, *end;
-
-	nxgep = (p_nxge_t)ksp->ks_private;
-	if (nxgep == NULL)
-		return (-1);
-
-	NXGE_DEBUG_MSG((nxgep, KST_CTL, "==> nxge_rxstat_update"));
-
-	ch_name = ksp->ks_name;
-	ch_name += strlen(TDC_NAME_FORMAT1);
-	channel = mi_strtol(ch_name, &end, 10);
-
-	tdc_kstatsp = (p_nxge_tdc_kstat_t)ksp->ks_data;
-	statsp = (p_nxge_tx_ring_stats_t)&nxgep->statsp->tdc_stats[channel];
-
-	NXGE_DEBUG_MSG((nxgep, KST_CTL,
-		"nxge_tdc_stat_update data $%p statsp $%p channel %d",
-		ksp->ks_data, statsp, channel));
-
-	if (rw == KSTAT_WRITE) {
-		statsp->opackets = tdc_kstatsp->opackets.value.ull;
-		statsp->obytes = tdc_kstatsp->obytes.value.ull;
-		statsp->oerrors = tdc_kstatsp->oerrors.value.ull;
-		statsp->mbox_err = tdc_kstatsp->mbox_err.value.ul;
-		statsp->pkt_size_err = tdc_kstatsp->pkt_size_err.value.ul;
-		statsp->tx_ring_oflow = tdc_kstatsp->tx_ring_oflow.value.ul;
-		statsp->pre_buf_par_err =
-			tdc_kstatsp->pref_buf_ecc_err.value.ul;
-		statsp->nack_pref = tdc_kstatsp->nack_pref.value.ul;
-		statsp->nack_pkt_rd = tdc_kstatsp->nack_pkt_rd.value.ul;
-		statsp->conf_part_err = tdc_kstatsp->conf_part_err.value.ul;
-		statsp->pkt_part_err = tdc_kstatsp->pkt_prt_err.value.ul;
-	} else {
-		tdc_kstatsp->opackets.value.ull = statsp->opackets;
-		tdc_kstatsp->obytes.value.ull = statsp->obytes;
-		tdc_kstatsp->oerrors.value.ull = statsp->oerrors;
-		tdc_kstatsp->tx_hdr_pkts.value.ull = statsp->tx_hdr_pkts;
-		tdc_kstatsp->tx_ddi_pkts.value.ull = statsp->tx_ddi_pkts;
-		tdc_kstatsp->tx_dvma_pkts.value.ull = statsp->tx_dvma_pkts;
-		tdc_kstatsp->tx_max_pend.value.ull = statsp->tx_max_pend;
-		tdc_kstatsp->mbox_err.value.ul = statsp->mbox_err;
-		tdc_kstatsp->pkt_size_err.value.ul = statsp->pkt_size_err;
-		tdc_kstatsp->tx_ring_oflow.value.ul = statsp->tx_ring_oflow;
-		tdc_kstatsp->pref_buf_ecc_err.value.ul =
-			statsp->pre_buf_par_err;
-		tdc_kstatsp->nack_pref.value.ul = statsp->nack_pref;
-		tdc_kstatsp->nack_pkt_rd.value.ul = statsp->nack_pkt_rd;
-		tdc_kstatsp->conf_part_err.value.ul = statsp->conf_part_err;
-		tdc_kstatsp->pkt_prt_err.value.ul = statsp->pkt_part_err;
-		tdc_kstatsp->tx_starts.value.ul = statsp->tx_starts;
-		tdc_kstatsp->tx_nocanput.value.ul = statsp->tx_nocanput;
-		tdc_kstatsp->tx_msgdup_fail.value.ul = statsp->tx_msgdup_fail;
-		tdc_kstatsp->tx_allocb_fail.value.ul = statsp->tx_allocb_fail;
-		tdc_kstatsp->tx_no_desc.value.ul = statsp->tx_no_desc;
-		tdc_kstatsp->tx_dma_bind_fail.value.ul =
-			statsp->tx_dma_bind_fail;
-	}
-	NXGE_DEBUG_MSG((nxgep, KST_CTL, " <== nxge_tdc_stat_update"));
-	return (0);
-}
-
-/* ARGSUSED */
-int
-nxge_rdc_stat_update(kstat_t *ksp, int rw)
-{
-	p_nxge_t nxgep;
-	p_nxge_rdc_kstat_t rdc_kstatsp;
-	p_nxge_rx_ring_stats_t statsp;
-	int channel;
-	char *ch_name, *end;
-
-	nxgep = (p_nxge_t)ksp->ks_private;
-	if (nxgep == NULL)
-		return (-1);
-
-	NXGE_DEBUG_MSG((nxgep, KST_CTL, "==> nxge_rdc_stat_update"));
-
-	ch_name = ksp->ks_name;
-	ch_name += strlen(RDC_NAME_FORMAT1);
-	channel = mi_strtol(ch_name, &end, 10);
-
-	rdc_kstatsp = (p_nxge_rdc_kstat_t)ksp->ks_data;
-	statsp = (p_nxge_rx_ring_stats_t)&nxgep->statsp->rdc_stats[channel];
-
-	NXGE_DEBUG_MSG((nxgep, KST_CTL,
-		"nxge_rdc_stat_update $%p statsp $%p channel %d",
-		ksp->ks_data, statsp, channel));
-
-	if (rw == KSTAT_WRITE) {
-		statsp->dcf_err = rdc_kstatsp->dcf_err.value.ul;
-		statsp->rcr_ack_err = rdc_kstatsp->rcr_ack_err.value.ul;
-		statsp->dc_fifo_err = rdc_kstatsp->dc_fifoflow_err.value.ul;
-		statsp->rcr_sha_par = rdc_kstatsp->rcr_sha_par_err.value.ul;
-		statsp->rbr_pre_par = rdc_kstatsp->rbr_pre_par_err.value.ul;
-		statsp->wred_drop = rdc_kstatsp->wred_drop.value.ul;
-		statsp->rbr_pre_empty = rdc_kstatsp->rbr_pre_emty.value.ul;
-		statsp->rcr_shadow_full = rdc_kstatsp->rcr_shadow_full.value.ul;
-		statsp->rx_rbr_tmout = rdc_kstatsp->rbr_tmout.value.ul;
-		statsp->rsp_cnt_err = rdc_kstatsp->rsp_cnt_err.value.ul;
-		statsp->byte_en_bus = rdc_kstatsp->byte_en_bus.value.ul;
-		statsp->rsp_dat_err = rdc_kstatsp->rsp_dat_err.value.ul;
-		statsp->l2_err = rdc_kstatsp->compl_l2_err.value.ul;
-		statsp->l4_cksum_err = rdc_kstatsp->compl_l4_cksum_err.value.ul;
-		statsp->fflp_soft_err =
-			rdc_kstatsp->compl_fflp_soft_err.value.ul;
-		statsp->zcp_soft_err = rdc_kstatsp->compl_zcp_soft_err.value.ul;
-		statsp->config_err = rdc_kstatsp->config_err.value.ul;
-		statsp->rcrincon = rdc_kstatsp->rcrincon.value.ul;
-		statsp->rcrfull = rdc_kstatsp->rcrfull.value.ul;
-		statsp->rbr_empty = rdc_kstatsp->rbr_empty.value.ul;
-		statsp->rbrfull = rdc_kstatsp->rbrfull.value.ul;
-		statsp->rbrlogpage = rdc_kstatsp->rbrlogpage.value.ul;
-		statsp->cfiglogpage = rdc_kstatsp->cfiglogpage.value.ul;
-	} else {
-		rdc_kstatsp->ipackets.value.ull = statsp->ipackets;
-		rdc_kstatsp->rbytes.value.ull = statsp->ibytes;
-		rdc_kstatsp->errors.value.ul = statsp->ierrors;
-		rdc_kstatsp->dcf_err.value.ul = statsp->dcf_err;
-		rdc_kstatsp->rcr_ack_err.value.ul = statsp->rcr_ack_err;
-		rdc_kstatsp->dc_fifoflow_err.value.ul = statsp->dc_fifo_err;
-		rdc_kstatsp->rcr_sha_par_err.value.ul = statsp->rcr_sha_par;
-		rdc_kstatsp->rbr_pre_par_err.value.ul = statsp->rbr_pre_par;
-		rdc_kstatsp->wred_drop.value.ul = statsp->wred_drop;
-		rdc_kstatsp->port_drop_pkt.value.ul = statsp->port_drop_pkt;
-		rdc_kstatsp->rbr_pre_emty.value.ul = statsp->rbr_pre_empty;
-		rdc_kstatsp->rcr_shadow_full.value.ul = statsp->rcr_shadow_full;
-		rdc_kstatsp->rbr_tmout.value.ul = statsp->rx_rbr_tmout;
-		rdc_kstatsp->rsp_cnt_err.value.ul = statsp->rsp_cnt_err;
-		rdc_kstatsp->byte_en_bus.value.ul = statsp->byte_en_bus;
-		rdc_kstatsp->rsp_dat_err.value.ul = statsp->rsp_dat_err;
-		rdc_kstatsp->compl_l2_err.value.ul = statsp->l2_err;
-		rdc_kstatsp->compl_l4_cksum_err.value.ul = statsp->l4_cksum_err;
-		rdc_kstatsp->compl_fflp_soft_err.value.ul =
-			statsp->fflp_soft_err;
-		rdc_kstatsp->compl_zcp_soft_err.value.ul = statsp->zcp_soft_err;
-		rdc_kstatsp->config_err.value.ul = statsp->config_err;
-		rdc_kstatsp->rcrincon.value.ul = statsp->rcrincon;
-		rdc_kstatsp->rcrfull.value.ul = statsp->rcrfull;
-		rdc_kstatsp->rbr_empty.value.ul = statsp->rbr_empty;
-		rdc_kstatsp->rbrfull.value.ul = statsp->rbrfull;
-		rdc_kstatsp->rbrlogpage.value.ul = statsp->rbrlogpage;
-		rdc_kstatsp->cfiglogpage.value.ul = statsp->cfiglogpage;
-	}
-
-	NXGE_DEBUG_MSG((nxgep, KST_CTL, " <== nxge_rdc_stat_update"));
-	return (0);
-}
-
-/* ARGSUSED */
-int
-nxge_rdc_sys_stat_update(kstat_t *ksp, int rw)
-{
-	p_nxge_t nxgep;
-	p_nxge_rdc_sys_kstat_t rdc_sys_kstatsp;
-	p_nxge_rdc_sys_stats_t statsp;
-
-	nxgep = (p_nxge_t)ksp->ks_private;
-	if (nxgep == NULL)
-		return (-1);
-
-	NXGE_DEBUG_MSG((nxgep, KST_CTL, "==> nxge_rdc_sys_stat_update"));
-
-	rdc_sys_kstatsp = (p_nxge_rdc_sys_kstat_t)ksp->ks_data;
-	statsp = (p_nxge_rdc_sys_stats_t)&nxgep->statsp->rdc_sys_stats;
-
-	NXGE_DEBUG_MSG((nxgep, KST_CTL, "nxge_rdc_sys_stat_update %llx",
-		ksp->ks_data));
-
-	if (rw == KSTAT_WRITE) {
-		statsp->id_mismatch = rdc_sys_kstatsp->id_mismatch.value.ul;
-		statsp->ipp_eop_err = rdc_sys_kstatsp->ipp_eop_err.value.ul;
-		statsp->zcp_eop_err = rdc_sys_kstatsp->zcp_eop_err.value.ul;
-	} else {
-		rdc_sys_kstatsp->id_mismatch.value.ul = statsp->id_mismatch;
-		rdc_sys_kstatsp->ipp_eop_err.value.ul = statsp->ipp_eop_err;
-		rdc_sys_kstatsp->zcp_eop_err.value.ul = statsp->zcp_eop_err;
-	}
-	NXGE_DEBUG_MSG((nxgep, KST_CTL, " <== nxge_rdc_sys_stat_update"));
-	return (0);
-}
-
-/* ARGSUSED */
-static int
-nxge_txc_stat_update(kstat_t *ksp, int rw)
-{
-	p_nxge_t nxgep;
-	p_nxge_txc_kstat_t txc_kstatsp;
-	p_nxge_txc_stats_t statsp;
-
-	nxgep = (p_nxge_t)ksp->ks_private;
-
-	if (nxgep == NULL)
-		return (-1);
-
-	NXGE_DEBUG_MSG((nxgep, KST_CTL, "==> nxge_txc_stat_update"));
-
-	txc_kstatsp = (p_nxge_txc_kstat_t)ksp->ks_data;
-	statsp = (p_nxge_txc_stats_t)&nxgep->statsp->txc_stats;
-
-	if (rw == KSTAT_WRITE) {
-		statsp->pkt_stuffed = txc_kstatsp->pkt_stuffed.value.ul;
-		statsp->pkt_xmit = txc_kstatsp->pkt_xmit.value.ul;
-		statsp->ro_correct_err = txc_kstatsp->ro_correct_err.value.ul;
-		statsp->ro_uncorrect_err =
-			txc_kstatsp->ro_uncorrect_err.value.ul;
-		statsp->sf_correct_err = txc_kstatsp->sf_correct_err.value.ul;
-		statsp->sf_uncorrect_err =
-			txc_kstatsp->sf_uncorrect_err.value.ul;
-		statsp->address_failed = txc_kstatsp->address_failed.value.ul;
-		statsp->dma_failed = txc_kstatsp->dma_failed.value.ul;
-		statsp->length_failed = txc_kstatsp->length_failed.value.ul;
-		statsp->pkt_assy_dead = txc_kstatsp->pkt_assy_dead.value.ul;
-		statsp->reorder_err = txc_kstatsp->reorder_err.value.ul;
-	} else {
-		txc_kstatsp->pkt_stuffed.value.ul = statsp->pkt_stuffed;
-		txc_kstatsp->pkt_xmit.value.ul = statsp->pkt_xmit;
-		txc_kstatsp->ro_correct_err.value.ul = statsp->ro_correct_err;
-		txc_kstatsp->ro_uncorrect_err.value.ul =
-			statsp->ro_uncorrect_err;
-		txc_kstatsp->sf_correct_err.value.ul = statsp->sf_correct_err;
-		txc_kstatsp->sf_uncorrect_err.value.ul =
-			statsp->sf_uncorrect_err;
-		txc_kstatsp->address_failed.value.ul = statsp->address_failed;
-		txc_kstatsp->dma_failed.value.ul = statsp->dma_failed;
-		txc_kstatsp->length_failed.value.ul = statsp->length_failed;
-		txc_kstatsp->pkt_assy_dead.value.ul = statsp->pkt_assy_dead;
-		txc_kstatsp->reorder_err.value.ul = statsp->reorder_err;
-	}
-	NXGE_DEBUG_MSG((nxgep, KST_CTL, "<== nxge_txc_stat_update"));
-	return (0);
-}
-
-/* ARGSUSED */
-int
-nxge_ipp_stat_update(kstat_t *ksp, int rw)
-{
-	p_nxge_t nxgep;
-	p_nxge_ipp_kstat_t ipp_kstatsp;
-	p_nxge_ipp_stats_t statsp;
-
-	nxgep = (p_nxge_t)ksp->ks_private;
-	if (nxgep == NULL)
-		return (-1);
-
-	NXGE_DEBUG_MSG((nxgep, KST_CTL, "==> nxge_ipp_stat_update"));
-
-	ipp_kstatsp = (p_nxge_ipp_kstat_t)ksp->ks_data;
-	statsp = (p_nxge_ipp_stats_t)&nxgep->statsp->ipp_stats;
-
-	if (rw == KSTAT_WRITE) {
-		statsp->eop_miss = ipp_kstatsp->eop_miss.value.ul;
-		statsp->sop_miss = ipp_kstatsp->sop_miss.value.ul;
-		statsp->dfifo_ue = ipp_kstatsp->dfifo_ue.value.ul;
-		statsp->ecc_err_cnt = ipp_kstatsp->ecc_err_cnt.value.ul;
-		statsp->pfifo_over = ipp_kstatsp->pfifo_over.value.ul;
-		statsp->pfifo_und = ipp_kstatsp->pfifo_und.value.ul;
-		statsp->bad_cs_cnt = ipp_kstatsp->bad_cs_cnt.value.ul;
-		statsp->pkt_dis_cnt = ipp_kstatsp->pkt_dis_cnt.value.ul;
-		statsp->bad_cs_cnt = ipp_kstatsp->cs_fail.value.ul;
-	} else {
-		ipp_kstatsp->eop_miss.value.ul = statsp->eop_miss;
-		ipp_kstatsp->sop_miss.value.ul = statsp->sop_miss;
-		ipp_kstatsp->dfifo_ue.value.ul = statsp->dfifo_ue;
-		ipp_kstatsp->ecc_err_cnt.value.ul = statsp->ecc_err_cnt;
-		ipp_kstatsp->pfifo_over.value.ul = statsp->pfifo_over;
-		ipp_kstatsp->pfifo_und.value.ul = statsp->pfifo_und;
-		ipp_kstatsp->bad_cs_cnt.value.ul = statsp->bad_cs_cnt;
-		ipp_kstatsp->pkt_dis_cnt.value.ul = statsp->pkt_dis_cnt;
-		ipp_kstatsp->cs_fail.value.ul = statsp->bad_cs_cnt;
-	}
-	NXGE_DEBUG_MSG((nxgep, KST_CTL, "<== nxge_ipp_stat_update"));
-	return (0);
-}
-
-/* ARGSUSED */
-int
-nxge_xmac_stat_update(kstat_t *ksp, int rw)
-{
-	p_nxge_t nxgep;
-	p_nxge_xmac_kstat_t xmac_kstatsp;
-	p_nxge_xmac_stats_t statsp;
-
-	nxgep = (p_nxge_t)ksp->ks_private;
-	if (nxgep == NULL)
-		return (-1);
-
-	NXGE_DEBUG_MSG((nxgep, KST_CTL, "==> nxge_xmac_stat_update"));
-
-	xmac_kstatsp = (p_nxge_xmac_kstat_t)ksp->ks_data;
-	statsp = (p_nxge_xmac_stats_t)&nxgep->statsp->xmac_stats;
-
-	if (rw == KSTAT_WRITE) {
-		statsp->tx_frame_cnt = xmac_kstatsp->tx_frame_cnt.value.ul;
-		statsp->tx_underflow_err =
-			xmac_kstatsp->tx_underflow_err.value.ul;
-		statsp->tx_maxpktsize_err =
-			xmac_kstatsp->tx_maxpktsize_err.value.ul;
-		statsp->tx_overflow_err =
-			xmac_kstatsp->tx_overflow_err.value.ul;
-		statsp->tx_fifo_xfr_err =
-			xmac_kstatsp->tx_fifo_xfr_err.value.ul;
-		statsp->tx_byte_cnt = xmac_kstatsp->tx_byte_cnt.value.ul;
-		statsp->rx_underflow_err =
-			xmac_kstatsp->rx_underflow_err.value.ul;
-		statsp->rx_overflow_err =
-			xmac_kstatsp->rx_overflow_err.value.ul;
-		statsp->rx_crc_err_cnt = xmac_kstatsp->rx_crc_err_cnt.value.ul;
-		statsp->rx_len_err_cnt = xmac_kstatsp->rx_len_err_cnt.value.ul;
-		statsp->rx_viol_err_cnt =
-			xmac_kstatsp->rx_viol_err_cnt.value.ul;
-		statsp->rx_byte_cnt = xmac_kstatsp->rx_byte_cnt.value.ul;
-		statsp->rx_hist1_cnt = xmac_kstatsp->rx_hist1_cnt.value.ul;
-		statsp->rx_hist2_cnt = xmac_kstatsp->rx_hist2_cnt.value.ul;
-		statsp->rx_hist3_cnt = xmac_kstatsp->rx_hist3_cnt.value.ul;
-		statsp->rx_hist4_cnt = xmac_kstatsp->rx_hist4_cnt.value.ul;
-		statsp->rx_hist5_cnt = xmac_kstatsp->rx_hist5_cnt.value.ul;
-		statsp->rx_hist6_cnt = xmac_kstatsp->rx_hist6_cnt.value.ul;
-		statsp->rx_mult_cnt = xmac_kstatsp->rx_mult_cnt.value.ul;
-		statsp->rx_frag_cnt = xmac_kstatsp->rx_frag_cnt.value.ul;
-		statsp->rx_frame_align_err_cnt =
-			xmac_kstatsp->rx_frame_align_err_cnt.value.ul;
-		statsp->rx_linkfault_err_cnt =
-			xmac_kstatsp->rx_linkfault_err_cnt.value.ul;
-		statsp->rx_localfault_err =
-			xmac_kstatsp->rx_local_fault_err_cnt.value.ul;
-		statsp->rx_remotefault_err =
-			xmac_kstatsp->rx_remote_fault_err_cnt.value.ul;
-		statsp->xpcs_deskew_err_cnt =
-			xmac_kstatsp->xpcs_deskew_err_cnt.value.ul;
-#ifdef	NXGE_DEBUG_SYMBOL_ERR
-		statsp->xpcs_ln0_symbol_err_cnt =
-			xmac_kstatsp->xpcs_ln0_symbol_err_cnt.value.ul;
-		statsp->xpcs_ln1_symbol_err_cnt =
-			xmac_kstatsp->xpcs_ln1_symbol_err_cnt.value.ul;
-		statsp->xpcs_ln2_symbol_err_cnt =
-			xmac_kstatsp->xpcs_ln2_symbol_err_cnt.value.ul;
-		statsp->xpcs_ln3_symbol_err_cnt =
-			xmac_kstatsp->xpcs_ln3_symbol_err_cnt.value.ul;
-#endif
-	} else {
-		xmac_kstatsp->tx_frame_cnt.value.ul = statsp->tx_frame_cnt;
-		xmac_kstatsp->tx_underflow_err.value.ul =
-			statsp->tx_underflow_err;
-		xmac_kstatsp->tx_maxpktsize_err.value.ul =
-			statsp->tx_maxpktsize_err;
-		xmac_kstatsp->tx_overflow_err.value.ul =
-			statsp->tx_overflow_err;
-		xmac_kstatsp->tx_fifo_xfr_err.value.ul =
-			statsp->tx_fifo_xfr_err;
-		xmac_kstatsp->tx_byte_cnt.value.ul = statsp->tx_byte_cnt;
-		xmac_kstatsp->rx_underflow_err.value.ul =
-			statsp->rx_underflow_err;
-		xmac_kstatsp->rx_overflow_err.value.ul =
-			statsp->rx_overflow_err;
-		xmac_kstatsp->rx_crc_err_cnt.value.ul = statsp->rx_crc_err_cnt;
-		xmac_kstatsp->rx_len_err_cnt.value.ul = statsp->rx_len_err_cnt;
-		xmac_kstatsp->rx_viol_err_cnt.value.ul =
-			statsp->rx_viol_err_cnt;
-		xmac_kstatsp->rx_byte_cnt.value.ul = statsp->rx_byte_cnt;
-		xmac_kstatsp->rx_hist1_cnt.value.ul = statsp->rx_hist1_cnt;
-		xmac_kstatsp->rx_hist2_cnt.value.ul = statsp->rx_hist2_cnt;
-		xmac_kstatsp->rx_hist3_cnt.value.ul = statsp->rx_hist3_cnt;
-		xmac_kstatsp->rx_hist4_cnt.value.ul = statsp->rx_hist4_cnt;
-		xmac_kstatsp->rx_hist5_cnt.value.ul = statsp->rx_hist5_cnt;
-		xmac_kstatsp->rx_hist6_cnt.value.ul = statsp->rx_hist6_cnt;
-		xmac_kstatsp->rx_mult_cnt.value.ul = statsp->rx_mult_cnt;
-		xmac_kstatsp->rx_frag_cnt.value.ul = statsp->rx_frag_cnt;
-		xmac_kstatsp->rx_frame_align_err_cnt.value.ul =
-			statsp->rx_frame_align_err_cnt;
-		xmac_kstatsp->rx_linkfault_err_cnt.value.ul =
-			statsp->rx_linkfault_err_cnt;
-		xmac_kstatsp->rx_local_fault_err_cnt.value.ul =
-			statsp->rx_localfault_err;
-		xmac_kstatsp->rx_remote_fault_err_cnt.value.ul =
-			statsp->rx_remotefault_err;
-		xmac_kstatsp->xpcs_deskew_err_cnt.value.ul =
-			statsp->xpcs_deskew_err_cnt;
-#ifdef	NXGE_DEBUG_SYMBOL_ERR
-		xmac_kstatsp->xpcs_ln0_symbol_err_cnt.value.ul =
-			statsp->xpcs_ln0_symbol_err_cnt;
-		xmac_kstatsp->xpcs_ln1_symbol_err_cnt.value.ul =
-			statsp->xpcs_ln1_symbol_err_cnt;
-		xmac_kstatsp->xpcs_ln2_symbol_err_cnt.value.ul =
-			statsp->xpcs_ln2_symbol_err_cnt;
-		xmac_kstatsp->xpcs_ln3_symbol_err_cnt.value.ul =
-			statsp->xpcs_ln3_symbol_err_cnt;
-#endif
-	}
-	NXGE_DEBUG_MSG((nxgep, KST_CTL, "<== nxge_xmac_stat_update"));
-	return (0);
-}
-
-/* ARGSUSED */
-int
-nxge_bmac_stat_update(kstat_t *ksp, int rw)
-{
-	p_nxge_t nxgep;
-	p_nxge_bmac_kstat_t bmac_kstatsp;
-	p_nxge_bmac_stats_t statsp;
-
-	nxgep = (p_nxge_t)ksp->ks_private;
-	if (nxgep == NULL)
-		return (-1);
-
-	NXGE_DEBUG_MSG((nxgep, KST_CTL, "==> nxge_bmac_stat_update"));
-
-	bmac_kstatsp = (p_nxge_bmac_kstat_t)ksp->ks_data;
-	statsp = (p_nxge_bmac_stats_t)&nxgep->statsp->bmac_stats;
-
-	if (rw == KSTAT_WRITE) {
-		statsp->tx_frame_cnt = bmac_kstatsp->tx_frame_cnt.value.ul;
-		statsp->tx_underrun_err =
-			bmac_kstatsp->tx_underrun_err.value.ul;
-		statsp->tx_max_pkt_err = bmac_kstatsp->tx_max_pkt_err.value.ul;
-		statsp->tx_byte_cnt = bmac_kstatsp->tx_byte_cnt.value.ul;
-		statsp->rx_frame_cnt = bmac_kstatsp->rx_frame_cnt.value.ul;
-		statsp->rx_byte_cnt = bmac_kstatsp->rx_byte_cnt.value.ul;
-		statsp->rx_overflow_err =
-			bmac_kstatsp->rx_overflow_err.value.ul;
-		statsp->rx_align_err_cnt =
-			bmac_kstatsp->rx_align_err_cnt.value.ul;
-		statsp->rx_crc_err_cnt = bmac_kstatsp->rx_crc_err_cnt.value.ul;
-		statsp->rx_len_err_cnt = bmac_kstatsp->rx_len_err_cnt.value.ul;
-		statsp->rx_viol_err_cnt =
-			bmac_kstatsp->rx_viol_err_cnt.value.ul;
-	} else {
-		bmac_kstatsp->tx_frame_cnt.value.ul = statsp->tx_frame_cnt;
-		bmac_kstatsp->tx_underrun_err.value.ul =
-			statsp->tx_underrun_err;
-		bmac_kstatsp->tx_max_pkt_err.value.ul = statsp->tx_max_pkt_err;
-		bmac_kstatsp->tx_byte_cnt.value.ul = statsp->tx_byte_cnt;
-		bmac_kstatsp->rx_frame_cnt.value.ul = statsp->rx_frame_cnt;
-		bmac_kstatsp->rx_byte_cnt.value.ul = statsp->rx_byte_cnt;
-		bmac_kstatsp->rx_overflow_err.value.ul =
-			statsp->rx_overflow_err;
-		bmac_kstatsp->rx_align_err_cnt.value.ul =
-			statsp->rx_align_err_cnt;
-		bmac_kstatsp->rx_crc_err_cnt.value.ul = statsp->rx_crc_err_cnt;
-		bmac_kstatsp->rx_len_err_cnt.value.ul = statsp->rx_len_err_cnt;
-		bmac_kstatsp->rx_viol_err_cnt.value.ul =
-			statsp->rx_viol_err_cnt;
-	}
-	NXGE_DEBUG_MSG((nxgep, KST_CTL, "<== nxge_bmac_stat_update"));
-	return (0);
-}
-
-/* ARGSUSED */
-int
-nxge_zcp_stat_update(kstat_t *ksp, int rw)
-{
-	p_nxge_t nxgep;
-	p_nxge_zcp_kstat_t zcp_kstatsp;
-	p_nxge_zcp_stats_t statsp;
-
-	nxgep = (p_nxge_t)ksp->ks_private;
-	if (nxgep == NULL)
-		return (-1);
-
-	NXGE_DEBUG_MSG((nxgep, KST_CTL, "==> nxge_zcp_stat_update"));
-
-	zcp_kstatsp = (p_nxge_zcp_kstat_t)ksp->ks_data;
-	statsp = (p_nxge_zcp_stats_t)&nxgep->statsp->zcp_stats;
-
-	if (rw == KSTAT_WRITE) {
-		statsp->rrfifo_underrun = zcp_kstatsp->rrfifo_underrun.value.ul;
-		statsp->rrfifo_overrun = zcp_kstatsp->rrfifo_overrun.value.ul;
-		statsp->rspfifo_uncorr_err =
-			zcp_kstatsp->rspfifo_uncorr_err.value.ul;
-		statsp->buffer_overflow = zcp_kstatsp->buffer_overflow.value.ul;
-		statsp->stat_tbl_perr = zcp_kstatsp->stat_tbl_perr.value.ul;
-		statsp->dyn_tbl_perr = zcp_kstatsp->dyn_tbl_perr.value.ul;
-		statsp->buf_tbl_perr = zcp_kstatsp->buf_tbl_perr.value.ul;
-		statsp->tt_program_err = zcp_kstatsp->tt_program_err.value.ul;
-		statsp->rsp_tt_index_err =
-			zcp_kstatsp->rsp_tt_index_err.value.ul;
-		statsp->slv_tt_index_err =
-			zcp_kstatsp->slv_tt_index_err.value.ul;
-		statsp->zcp_tt_index_err =
-			zcp_kstatsp->zcp_tt_index_err.value.ul;
-		statsp->cfifo_ecc = zcp_kstatsp->cfifo_ecc.value.ul;
-	} else {
-		zcp_kstatsp->rrfifo_underrun.value.ul = statsp->rrfifo_underrun;
-		zcp_kstatsp->rrfifo_overrun.value.ul = statsp->rrfifo_overrun;
-		zcp_kstatsp->rspfifo_uncorr_err.value.ul =
-			statsp->rspfifo_uncorr_err;
-		zcp_kstatsp->buffer_overflow.value.ul =
-			statsp->buffer_overflow;
-		zcp_kstatsp->stat_tbl_perr.value.ul = statsp->stat_tbl_perr;
-		zcp_kstatsp->dyn_tbl_perr.value.ul = statsp->dyn_tbl_perr;
-		zcp_kstatsp->buf_tbl_perr.value.ul = statsp->buf_tbl_perr;
-		zcp_kstatsp->tt_program_err.value.ul = statsp->tt_program_err;
-		zcp_kstatsp->rsp_tt_index_err.value.ul =
-			statsp->rsp_tt_index_err;
-		zcp_kstatsp->slv_tt_index_err.value.ul =
-			statsp->slv_tt_index_err;
-		zcp_kstatsp->zcp_tt_index_err.value.ul =
-			statsp->zcp_tt_index_err;
-		zcp_kstatsp->cfifo_ecc.value.ul = statsp->cfifo_ecc;
-	}
-	NXGE_DEBUG_MSG((nxgep, KST_CTL, "<== nxge_zcp_stat_update"));
-	return (0);
-}
-
-/* ARGSUSED */
-int
-nxge_fflp_stat_update(kstat_t *ksp, int rw)
-{
-	p_nxge_t nxgep;
-	p_nxge_fflp_kstat_t fflp_kstatsp;
-	p_nxge_fflp_stats_t statsp;
-	int ldc_grp;
-
-	nxgep = (p_nxge_t)ksp->ks_private;
-	if (nxgep == NULL)
-		return (-1);
-
-	NXGE_DEBUG_MSG((nxgep, KST_CTL, "==> nxge_fflp_stat_update"));
-
-	fflp_kstatsp = (p_nxge_fflp_kstat_t)ksp->ks_data;
-	statsp = (p_nxge_fflp_stats_t)&nxgep->statsp->fflp_stats;
-
-	if (rw == KSTAT_WRITE) {
-		statsp->tcam_parity_err = fflp_kstatsp->fflp_tcam_perr.value.ul;
-		statsp->tcam_ecc_err = fflp_kstatsp->fflp_tcam_ecc_err.value.ul;
-		statsp->vlan_parity_err = fflp_kstatsp->fflp_vlan_perr.value.ul;
-		statsp->hash_lookup_err =
-			fflp_kstatsp->fflp_hasht_lookup_err.value.ul;
-		for (ldc_grp = 0; ldc_grp < MAX_PARTITION; ldc_grp++) {
-			statsp->hash_pio_err[ldc_grp] =
-				fflp_kstatsp->fflp_hasht_data_err[ldc_grp].
-				value.ul;
-		}
-	} else {
-		fflp_kstatsp->fflp_tcam_perr.value.ul =
-			fflp_kstatsp->fflp_tcam_perr.value.ul;
-		fflp_kstatsp->fflp_tcam_ecc_err.value.ul = statsp->tcam_ecc_err;
-		fflp_kstatsp->fflp_vlan_perr.value.ul = statsp->vlan_parity_err;
-		fflp_kstatsp->fflp_hasht_lookup_err.value.ul =
-			statsp->hash_lookup_err;
-		for (ldc_grp = 0; ldc_grp < MAX_PARTITION; ldc_grp++) {
-			fflp_kstatsp->fflp_hasht_data_err[ldc_grp].value.ul =
-				statsp->hash_pio_err[ldc_grp];
-		}
-	}
-	NXGE_DEBUG_MSG((nxgep, KST_CTL, "<== nxge_fflp_stat_update"));
-	return (0);
-}
-
-/* ARGSUSED */
-static uint64_t
-nxge_mac_octet_to_u64(struct ether_addr addr)
-{
-	int i;
-	uint64_t addr64 = 0;
-
-	for (i = ETHERADDRL - 1; i >= 0; i--) {
-		addr64 <<= 8;
-		addr64 |= addr.ether_addr_octet[i];
-	}
-	return (addr64);
-}
-
-/* ARGSUSED */
-int
-nxge_mmac_stat_update(kstat_t *ksp, int rw)
-{
-	p_nxge_t nxgep;
-	p_nxge_mmac_kstat_t mmac_kstatsp;
-	p_nxge_mmac_stats_t statsp;
-
-	nxgep = (p_nxge_t)ksp->ks_private;
-	if (nxgep == NULL)
-		return (-1);
-
-	NXGE_DEBUG_MSG((nxgep, KST_CTL, "==> nxge_mmac_stat_update"));
-
-	mmac_kstatsp = (p_nxge_mmac_kstat_t)ksp->ks_data;
-	statsp = (p_nxge_mmac_stats_t)&nxgep->statsp->mmac_stats;
-
-	if (rw == KSTAT_WRITE) {
-		cmn_err(CE_WARN, "Can not write mmac stats");
-	} else {
-		mmac_kstatsp->mmac_max_addr_cnt.value.ul =
-			statsp->mmac_max_cnt;
-		mmac_kstatsp->mmac_avail_addr_cnt.value.ul =
-			statsp->mmac_avail_cnt;
-		mmac_kstatsp->mmac_addr1.value.ul =
-			nxge_mac_octet_to_u64(statsp->mmac_avail_pool[0]);
-		mmac_kstatsp->mmac_addr2.value.ul =
-			nxge_mac_octet_to_u64(statsp->mmac_avail_pool[1]);
-		mmac_kstatsp->mmac_addr3.value.ul =
-			nxge_mac_octet_to_u64(statsp->mmac_avail_pool[2]);
-		mmac_kstatsp->mmac_addr4.value.ul =
-			nxge_mac_octet_to_u64(statsp->mmac_avail_pool[3]);
-		mmac_kstatsp->mmac_addr5.value.ul =
-			nxge_mac_octet_to_u64(statsp->mmac_avail_pool[4]);
-		mmac_kstatsp->mmac_addr6.value.ul =
-			nxge_mac_octet_to_u64(statsp->mmac_avail_pool[5]);
-		mmac_kstatsp->mmac_addr7.value.ul =
-			nxge_mac_octet_to_u64(statsp->mmac_avail_pool[6]);
-		mmac_kstatsp->mmac_addr8.value.ul =
-			nxge_mac_octet_to_u64(statsp->mmac_avail_pool[7]);
-		mmac_kstatsp->mmac_addr9.value.ul =
-			nxge_mac_octet_to_u64(statsp->mmac_avail_pool[8]);
-		mmac_kstatsp->mmac_addr10.value.ul =
-			nxge_mac_octet_to_u64(statsp->mmac_avail_pool[9]);
-		mmac_kstatsp->mmac_addr11.value.ul =
-			nxge_mac_octet_to_u64(statsp->mmac_avail_pool[10]);
-		mmac_kstatsp->mmac_addr12.value.ul =
-			nxge_mac_octet_to_u64(statsp->mmac_avail_pool[11]);
-		mmac_kstatsp->mmac_addr13.value.ul =
-			nxge_mac_octet_to_u64(statsp->mmac_avail_pool[12]);
-		mmac_kstatsp->mmac_addr14.value.ul =
-			nxge_mac_octet_to_u64(statsp->mmac_avail_pool[13]);
-		mmac_kstatsp->mmac_addr15.value.ul =
-			nxge_mac_octet_to_u64(statsp->mmac_avail_pool[14]);
-		mmac_kstatsp->mmac_addr16.value.ul =
-			nxge_mac_octet_to_u64(statsp->mmac_avail_pool[15]);
-	}
-	NXGE_DEBUG_MSG((nxgep, KST_CTL, "<== nxge_mmac_stat_update"));
-	return (0);
-}
-
-/* ARGSUSED */
-static kstat_t *
-nxge_setup_local_kstat(p_nxge_t nxgep, int instance, char *name,
-	const nxge_kstat_index_t *ksip, size_t count,
-	int (*update) (kstat_t *, int))
-{
-	kstat_t *ksp;
-	kstat_named_t *knp;
-	int i;
-
-	ksp = kstat_create(NXGE_DRIVER_NAME, instance, name, "net",
-		KSTAT_TYPE_NAMED, count, 0);
-	if (ksp == NULL)
-		return (NULL);
-
-	ksp->ks_private = (void *)nxgep;
-	ksp->ks_update = update;
-	knp = ksp->ks_data;
-
-	for (i = 0; ksip[i].name != NULL; i++) {
-		kstat_named_init(&knp[i], ksip[i].name, ksip[i].type);
-	}
-
-	kstat_install(ksp);
-	return (ksp);
-}
-
-/* ARGSUSED */
-void
-nxge_setup_kstats(p_nxge_t nxgep)
-{
-	struct kstat *ksp;
-	p_nxge_port_kstat_t nxgekp;
-	size_t nxge_kstat_sz;
-	char stat_name[64];
-	char mmac_name[64];
-	int i;
-
-	NXGE_DEBUG_MSG((nxgep, KST_CTL, "==> nxge_setup_kstats"));
-
-
-	/* Setup RDC statistics */
-	for (i = 0; i < nxgep->nrdc; i++) {
-		(void) sprintf(stat_name, "%s" CH_NAME_FORMAT,
-			RDC_NAME_FORMAT1, i);
-		nxgep->statsp->rdc_ksp[i] = nxge_setup_local_kstat(nxgep,
-			nxgep->instance, stat_name,
-			&nxge_rdc_stats[0], RDC_STAT_END, nxge_rdc_stat_update);
-#ifdef	NXGE_DEBUG_ERROR
-		if (nxgep->statsp->rdc_ksp[i] == NULL)
-			NXGE_DEBUG_MSG((nxgep, KST_CTL,
-				"kstat_create failed for rdc channel %d", i));
-#endif
-	}
-
-	/* Setup RDC System statistics */
-	nxgep->statsp->rdc_sys_ksp = nxge_setup_local_kstat(nxgep,
-		nxgep->instance,
-		"RDC System Stats",
-		&nxge_rdc_sys_stats[0],
-		RDC_SYS_STAT_END,
-		nxge_rdc_sys_stat_update);
-
-	/* Setup IPP statistics */
-	nxgep->statsp->ipp_ksp = nxge_setup_local_kstat(nxgep,
-		nxgep->instance,
-		"IPP Stats",
-		&nxge_ipp_stats[0],
-		IPP_STAT_END,
-		nxge_ipp_stat_update);
-#ifdef	NXGE_DEBUG_ERROR
-	if (nxgep->istatsp->pp_ksp == NULL)
-		NXGE_DEBUG_MSG((nxgep, KST_CTL, "kstat_create failed for ipp"));
-#endif
-
-	/* Setup TDC statistics */
-	for (i = 0; i < nxgep->ntdc; i++) {
-		(void) sprintf(stat_name, "%s" CH_NAME_FORMAT,
-			TDC_NAME_FORMAT1, i);
-		nxgep->statsp->tdc_ksp[i] = nxge_setup_local_kstat(nxgep,
-			nxgep->instance,
-			stat_name,
-			&nxge_tdc_stats[0],
-			TDC_STAT_END,
-			nxge_tdc_stat_update);
-#ifdef	NXGE_DEBUG_ERROR
-		if (nxgep->statsp->tdc_ksp[i] == NULL) {
-			NXGE_DEBUG_MSG((nxgep, KST_CTL,
-				"kstat_create failed for tdc channel %d", i));
-		}
-#endif
-	}
-
-	/* Setup TXC statistics */
-	nxgep->statsp->txc_ksp = nxge_setup_local_kstat(nxgep,
-		nxgep->instance, "TXC Stats", &nxge_txc_stats[0],
-		TXC_STAT_END, nxge_txc_stat_update);
-#ifdef	NXGE_DEBUG_ERROR
-	if (nxgep->statsp->txc_ksp == NULL)
-		NXGE_DEBUG_MSG((nxgep, KST_CTL, "kstat_create failed for txc"));
-#endif
-
-	/* Setup ZCP statistics */
-	nxgep->statsp->zcp_ksp = nxge_setup_local_kstat(nxgep,
-		nxgep->instance, "ZCP Stats", &nxge_zcp_stats[0],
-		ZCP_STAT_END, nxge_zcp_stat_update);
-#ifdef	NXGE_DEBUG_ERROR
-	if (nxgep->statsp->zcp_ksp == NULL)
-		NXGE_DEBUG_MSG((nxgep, KST_CTL, "kstat_create failed for zcp"));
-#endif
-
-	/* Setup FFLP statistics */
-	nxgep->statsp->fflp_ksp[0] = nxge_setup_local_kstat(nxgep,
-		nxgep->instance, "FFLP Stats", &nxge_fflp_stats[0],
-		FFLP_STAT_END, nxge_fflp_stat_update);
-
-#ifdef	NXGE_DEBUG_ERROR
-	if (nxgep->statsp->fflp_ksp == NULL)
-		NXGE_DEBUG_MSG((nxgep, KST_CTL,
-			"kstat_create failed for fflp"));
-#endif
-
-	(void) sprintf(mmac_name, "MMAC Stats%d", nxgep->instance);
-	nxgep->statsp->mmac_ksp = nxge_setup_local_kstat(nxgep,
-		nxgep->instance, "MMAC Stats", &nxge_mmac_stats[0],
-		MMAC_STATS_END, nxge_mmac_stat_update);
-
-	nxge_kstat_sz = sizeof (nxge_port_kstat_t) +
-		sizeof (nxge_mac_kstat_t) - sizeof (kstat_named_t);
-
-	if ((ksp = kstat_create(NXGE_DRIVER_NAME, nxgep->instance,
-			"Port Stats", "net", KSTAT_TYPE_NAMED,
-			nxge_kstat_sz / sizeof (kstat_named_t), 0)) == NULL) {
-		NXGE_DEBUG_MSG((nxgep, KST_CTL, "kstat_create failed"));
-		NXGE_DEBUG_MSG((nxgep, KST_CTL, "<== nxge_setup_kstats"));
-		return;
-	}
-
-	/*
-	 * kstats
-	 */
-	nxgekp = (p_nxge_port_kstat_t)ksp->ks_data;
-
-	/*
-	 * transceiver state informations.
-	 */
-	kstat_named_init(&nxgekp->xcvr_inits, "xcvr_inits",
-		KSTAT_DATA_ULONG);
-	kstat_named_init(&nxgekp->xcvr_inuse, "xcvr_inuse",
-		KSTAT_DATA_ULONG);
-	kstat_named_init(&nxgekp->xcvr_addr, "xcvr_addr",
-		KSTAT_DATA_ULONG);
-	kstat_named_init(&nxgekp->xcvr_id, "xcvr_id",
-		KSTAT_DATA_ULONG);
-	kstat_named_init(&nxgekp->cap_autoneg, "cap_autoneg",
-		KSTAT_DATA_ULONG);
-	kstat_named_init(&nxgekp->cap_10gfdx, "cap_10gfdx",
-		KSTAT_DATA_ULONG);
-	kstat_named_init(&nxgekp->cap_10ghdx, "cap_10ghdx",
-		KSTAT_DATA_ULONG);
-	kstat_named_init(&nxgekp->cap_1000fdx, "cap_1000fdx",
-		KSTAT_DATA_ULONG);
-	kstat_named_init(&nxgekp->cap_1000hdx, "cap_1000hdx",
-		KSTAT_DATA_ULONG);
-	kstat_named_init(&nxgekp->cap_100T4, "cap_100T4",
-		KSTAT_DATA_ULONG);
-	kstat_named_init(&nxgekp->cap_100fdx, "cap_100fdx",
-		KSTAT_DATA_ULONG);
-	kstat_named_init(&nxgekp->cap_100hdx, "cap_100hdx",
-		KSTAT_DATA_ULONG);
-	kstat_named_init(&nxgekp->cap_10fdx, "cap_10fdx",
-		KSTAT_DATA_ULONG);
-	kstat_named_init(&nxgekp->cap_10hdx, "cap_10hdx",
-		KSTAT_DATA_ULONG);
-	kstat_named_init(&nxgekp->cap_asmpause, "cap_asmpause",
-		KSTAT_DATA_ULONG);
-	kstat_named_init(&nxgekp->cap_pause, "cap_pause",
-		KSTAT_DATA_ULONG);
-
-	/*
-	 * Link partner capabilities.
-	 */
-	kstat_named_init(&nxgekp->lp_cap_autoneg, "lp_cap_autoneg",
-		KSTAT_DATA_ULONG);
-	kstat_named_init(&nxgekp->lp_cap_10gfdx, "lp_cap_10gfdx",
-		KSTAT_DATA_ULONG);
-	kstat_named_init(&nxgekp->lp_cap_10ghdx, "lp_cap_10ghdx",
-		KSTAT_DATA_ULONG);
-	kstat_named_init(&nxgekp->lp_cap_1000fdx, "lp_cap_1000fdx",
-		KSTAT_DATA_ULONG);
-	kstat_named_init(&nxgekp->lp_cap_1000hdx, "lp_cap_1000hdx",
-		KSTAT_DATA_ULONG);
-	kstat_named_init(&nxgekp->lp_cap_100T4, "lp_cap_100T4",
-		KSTAT_DATA_ULONG);
-	kstat_named_init(&nxgekp->lp_cap_100fdx, "lp_cap_100fdx",
-		KSTAT_DATA_ULONG);
-	kstat_named_init(&nxgekp->lp_cap_100hdx, "lp_cap_100hdx",
-		KSTAT_DATA_ULONG);
-	kstat_named_init(&nxgekp->lp_cap_10fdx, "lp_cap_10fdx",
-		KSTAT_DATA_ULONG);
-	kstat_named_init(&nxgekp->lp_cap_10hdx, "lp_cap_10hdx",
-		KSTAT_DATA_ULONG);
-	kstat_named_init(&nxgekp->lp_cap_asmpause, "lp_cap_asmpause",
-		KSTAT_DATA_ULONG);
-	kstat_named_init(&nxgekp->lp_cap_pause, "lp_cap_pause",
-		KSTAT_DATA_ULONG);
-	/*
-	 * Shared link setup.
-	 */
-	kstat_named_init(&nxgekp->link_T4, "link_T4",
-		KSTAT_DATA_ULONG);
-	kstat_named_init(&nxgekp->link_speed, "link_speed",
-		KSTAT_DATA_ULONG);
-	kstat_named_init(&nxgekp->link_duplex, "link_duplex",
-		KSTAT_DATA_CHAR);
-	kstat_named_init(&nxgekp->link_asmpause, "link_asmpause",
-		KSTAT_DATA_ULONG);
-	kstat_named_init(&nxgekp->link_pause, "link_pause",
-		KSTAT_DATA_ULONG);
-	kstat_named_init(&nxgekp->link_up, "link_up",
-		KSTAT_DATA_ULONG);
-
-	/*
-	 * Let the user know the MTU currently in use by the physical MAC
-	 * port.
-	 */
-	kstat_named_init(&nxgekp->mac_mtu, "mac_mtu",
-		KSTAT_DATA_ULONG);
-
-	/*
-	 * Loopback statistics.
-	 */
-	kstat_named_init(&nxgekp->lb_mode, "lb_mode",
-		KSTAT_DATA_ULONG);
-
-	/*
-	 * This tells the user whether the driver is in QOS mode or not.
-	 */
-	kstat_named_init(&nxgekp->qos_mode, "qos_mode",
-		KSTAT_DATA_ULONG);
-
-	/*
-	 * This tells whether the instance is trunked or not
-	 */
-	kstat_named_init(&nxgekp->trunk_mode, "trunk_mode",
-		KSTAT_DATA_ULONG);
-
-#if defined MULTI_DATA_TX || defined MULTI_DATA_TXV2
-	kstat_named_init(&nxgekp->mdt_reqs, "mdt_reqs",
-		KSTAT_DATA_ULONG);
-	kstat_named_init(&nxgekp->mdt_hdr_bufs, "mdt_hdr_bufs",
-		KSTAT_DATA_ULONG);
-	kstat_named_init(&nxgekp->mdt_pld_bufs, "mdt_pld_bufs",
-		KSTAT_DATA_ULONG);
-	kstat_named_init(&nxgekp->mdt_pkts, "mdt_pkts",
-		KSTAT_DATA_ULONG);
-	kstat_named_init(&nxgekp->mdt_hdrs, "mdt_hdrs",
-		KSTAT_DATA_ULONG);
-	kstat_named_init(&nxgekp->mdt_plds, "mdt_plds",
-		KSTAT_DATA_ULONG);
-	kstat_named_init(&nxgekp->mdt_hdr_bind_fail, "mdt_hdr_bind_fail",
-		KSTAT_DATA_ULONG);
-	kstat_named_init(&nxgekp->mdt_pld_bind_fail, "mdt_pld_bind_fail",
-		KSTAT_DATA_ULONG);
-#endif
-#ifdef ACNXGEPT_JUMBO
-	kstat_named_init(&nxgekp->tx_jumbo_pkts, "tx_jumbo_pkts",
-		KSTAT_DATA_ULONG);
-#endif
-
-	/*
-	 * Rx Statistics.
-	 */
-#ifdef ACNXGEPT_JUMBO
-	kstat_named_init(&nxgekp->rx_jumbo_pkts, "rx_jumbo_pkts",
-		KSTAT_DATA_ULONG);
-#endif
-	/* General MAC statistics */
-	kstat_named_init(&nxgekp->ifspeed, "ifspeed",
-		KSTAT_DATA_UINT64);
-	kstat_named_init(&nxgekp->promisc, "promisc",
-		KSTAT_DATA_CHAR);
-	kstat_named_init(&nxgekp->rev_id, "rev_id",
-		KSTAT_DATA_ULONG);
-
-	ksp->ks_update = nxge_port_kstat_update;
-	ksp->ks_private = (void *) nxgep;
-	if (nxgep->mac.porttype == PORT_TYPE_XMAC)
-		nxge_xmac_init_kstats(ksp);
-	else
-		nxge_bmac_init_kstats(ksp);
-	kstat_install(ksp);
-	nxgep->statsp->port_ksp = ksp;
-	NXGE_DEBUG_MSG((nxgep, KST_CTL, "<== nxge_setup_kstats"));
-}
-
-/* ARGSUSED */
-void
-nxge_xmac_init_kstats(struct kstat *ksp)
-{
-	p_nxge_xmac_kstat_t nxgekp;
-
-	nxgekp = (p_nxge_xmac_kstat_t)ksp->ks_data;
-
-	/*
-	 * Transmit MAC statistics.
-	 */
-	kstat_named_init(&nxgekp->tx_frame_cnt, "txmac_frame_cnt",
-		KSTAT_DATA_ULONG);
-	kstat_named_init(&nxgekp->tx_underflow_err, "txmac_underflow_err",
-		KSTAT_DATA_ULONG);
-	kstat_named_init(&nxgekp->tx_overflow_err, "txmac_overflow_err",
-		KSTAT_DATA_ULONG);
-	kstat_named_init(&nxgekp->tx_maxpktsize_err, "txmac_maxpktsize_err",
-		KSTAT_DATA_ULONG);
-	kstat_named_init(&nxgekp->tx_fifo_xfr_err, "txmac_fifo_xfr_err",
-		KSTAT_DATA_ULONG);
-	kstat_named_init(&nxgekp->tx_byte_cnt, "txmac_byte_cnt",
-		KSTAT_DATA_ULONG);
-
-	/* Receive MAC statistics */
-	kstat_named_init(&nxgekp->rx_overflow_err, "rxmac_overflow_err",
-		KSTAT_DATA_ULONG);
-	kstat_named_init(&nxgekp->rx_underflow_err, "rxmac_underflow_err",
-		KSTAT_DATA_ULONG);
-	kstat_named_init(&nxgekp->rx_crc_err_cnt, "rxmac_crc_err",
-		KSTAT_DATA_ULONG);
-	kstat_named_init(&nxgekp->rx_len_err_cnt, "rxmac_length_err",
-		KSTAT_DATA_ULONG);
-	kstat_named_init(&nxgekp->rx_viol_err_cnt, "rxmac_code_violations",
-		KSTAT_DATA_ULONG);
-	kstat_named_init(&nxgekp->rx_byte_cnt, "rxmac_byte_cnt",
-		KSTAT_DATA_ULONG);
-	kstat_named_init(&nxgekp->rx_frame_align_err_cnt,
-		"rxmac_alignment_err",
-		KSTAT_DATA_ULONG);
-	kstat_named_init(&nxgekp->rx_hist1_cnt, "rxmac_64_cnt",
-		KSTAT_DATA_ULONG);
-	kstat_named_init(&nxgekp->rx_hist2_cnt, "rxmac_65_127_cnt",
-		KSTAT_DATA_ULONG);
-	kstat_named_init(&nxgekp->rx_hist3_cnt, "rxmac_128_255_cnt",
-		KSTAT_DATA_ULONG);
-	kstat_named_init(&nxgekp->rx_hist4_cnt, "rxmac_256_511_cnt",
-		KSTAT_DATA_ULONG);
-	kstat_named_init(&nxgekp->rx_hist5_cnt, "rxmac_512_1023_cnt",
-		KSTAT_DATA_ULONG);
-	kstat_named_init(&nxgekp->rx_hist6_cnt, "rxmac_1024_1522_cnt",
-		KSTAT_DATA_ULONG);
-	kstat_named_init(&nxgekp->rx_broadcast_cnt, "rxmac_broadcast_cnt",
-		KSTAT_DATA_ULONG);
-	kstat_named_init(&nxgekp->rx_mult_cnt, "rxmac_multicast_cnt",
-		KSTAT_DATA_ULONG);
-	kstat_named_init(&nxgekp->rx_frag_cnt, "rxmac_fragment_cnt",
-		KSTAT_DATA_ULONG);
-	kstat_named_init(&nxgekp->rx_linkfault_err_cnt, "rxmac_linkfault_errs",
-		KSTAT_DATA_ULONG);
-	kstat_named_init(&nxgekp->rx_remote_fault_err_cnt,
-		"rxmac_remote_faults",
-		KSTAT_DATA_ULONG);
-	kstat_named_init(&nxgekp->rx_local_fault_err_cnt, "rxmac_local_faults",
-		KSTAT_DATA_ULONG);
-
-	/* XPCS statistics */
-
-	kstat_named_init(&nxgekp->xpcs_deskew_err_cnt, "xpcs_deskew_err_cnt",
-		KSTAT_DATA_ULONG);
-#ifdef	NXGE_DEBUG_SYMBOL_ERR
-	kstat_named_init(&nxgekp->xpcs_ln0_symbol_err_cnt,
-		"xpcs_ln0_symbol_err_cnt",
-		KSTAT_DATA_ULONG);
-	kstat_named_init(&nxgekp->xpcs_ln1_symbol_err_cnt,
-		"xpcs_ln1_symbol_err_cnt",
-		KSTAT_DATA_ULONG);
-	kstat_named_init(&nxgekp->xpcs_ln2_symbol_err_cnt,
-		"xpcs_ln2_symbol_err_cnt",
-		KSTAT_DATA_ULONG);
-	kstat_named_init(&nxgekp->xpcs_ln3_symbol_err_cnt,
-		"xpcs_ln3_symbol_err_cnt",
-		KSTAT_DATA_ULONG);
-#endif
-}
-
-/* ARGSUSED */
-void
-nxge_bmac_init_kstats(struct kstat *ksp)
-{
-	p_nxge_bmac_kstat_t nxgekp;
-
-	nxgekp = (p_nxge_bmac_kstat_t)ksp->ks_data;
-
-	/*
-	 * Transmit MAC statistics.
-	 */
-	kstat_named_init(&nxgekp->tx_frame_cnt, "txmac_frame_cnt",
-		KSTAT_DATA_ULONG);
-	kstat_named_init(&nxgekp->tx_underrun_err, "txmac_underflow_err",
-		KSTAT_DATA_ULONG);
-	kstat_named_init(&nxgekp->tx_max_pkt_err, "txmac_maxpktsize_err",
-		KSTAT_DATA_ULONG);
-	kstat_named_init(&nxgekp->tx_byte_cnt, "txmac_byte_cnt",
-		KSTAT_DATA_ULONG);
-
-	/* Receive MAC statistics */
-	kstat_named_init(&nxgekp->rx_overflow_err, "rxmac_overflow_err",
-		KSTAT_DATA_ULONG);
-	kstat_named_init(&nxgekp->rx_crc_err_cnt, "rxmac_crc_err",
-		KSTAT_DATA_ULONG);
-	kstat_named_init(&nxgekp->rx_len_err_cnt, "rxmac_length_err",
-		KSTAT_DATA_ULONG);
-	kstat_named_init(&nxgekp->rx_viol_err_cnt, "rxmac_code_violations",
-		KSTAT_DATA_ULONG);
-	kstat_named_init(&nxgekp->rx_byte_cnt, "rxmac_byte_cnt",
-		KSTAT_DATA_ULONG);
-	kstat_named_init(&nxgekp->rx_align_err_cnt, "rxmac_alignment_err",
-		KSTAT_DATA_ULONG);
-	kstat_named_init(&nxgekp->rx_frame_cnt, "rxmac_frame_cnt",
-		KSTAT_DATA_ULONG);
-}
-
-/* ARGSUSED */
-void
-nxge_mac_init_kstats(p_nxge_t nxgep, struct kstat *ksp)
-{
-	p_nxge_mac_kstat_t nxgekp;
-
-	nxgekp = (p_nxge_mac_kstat_t)ksp->ks_data;
-
-	/*
-	 * Transmit MAC statistics.
-	 */
-	kstat_named_init(&nxgekp->tx_frame_cnt, "txmac_frame_cnt",
-		KSTAT_DATA_ULONG);
-	kstat_named_init(&nxgekp->tx_underflow_err, "txmac_underflow_err",
-		KSTAT_DATA_ULONG);
-	kstat_named_init(&nxgekp->tx_overflow_err, "txmac_overflow_err",
-		KSTAT_DATA_ULONG);
-	kstat_named_init(&nxgekp->tx_maxpktsize_err, "txmac_maxpktsize_err",
-		KSTAT_DATA_ULONG);
-	kstat_named_init(&nxgekp->tx_fifo_xfr_err, "txmac_fifo_xfr_err",
-		KSTAT_DATA_ULONG);
-	kstat_named_init(&nxgekp->tx_byte_cnt, "txmac_byte_cnt",
-		KSTAT_DATA_ULONG);
-
-	/*
-	 * Receive MAC statistics
-	 */
-	kstat_named_init(&nxgekp->rx_overflow_err, "rxmac_overflow_err",
-		KSTAT_DATA_ULONG);
-	kstat_named_init(&nxgekp->rx_underflow_err, "rxmac_underflow_err",
-		KSTAT_DATA_ULONG);
-	kstat_named_init(&nxgekp->rx_crc_err_cnt, "rxmac_crc_err",
-		KSTAT_DATA_ULONG);
-	kstat_named_init(&nxgekp->rx_len_err_cnt, "rxmac_length_err",
-		KSTAT_DATA_ULONG);
-	kstat_named_init(&nxgekp->rx_viol_err_cnt, "rxmac_code_violations",
-		KSTAT_DATA_ULONG);
-	kstat_named_init(&nxgekp->rx_byte_cnt, "rxmac_byte_cnt",
-		KSTAT_DATA_ULONG);
-	kstat_named_init(&nxgekp->rx_frame_align_err_cnt,
-		"rxmac_alignment_err",
-		KSTAT_DATA_ULONG);
-	if (nxgep->mac.porttype == PORT_TYPE_XMAC) {
-		kstat_named_init(&nxgekp->rx_hist1_cnt, "rxmac_64_cnt",
-			KSTAT_DATA_ULONG);
-		kstat_named_init(&nxgekp->rx_hist2_cnt, "rxmac_65_127_cnt",
-			KSTAT_DATA_ULONG);
-		kstat_named_init(&nxgekp->rx_hist3_cnt, "rxmac_128_255_cnt",
-			KSTAT_DATA_ULONG);
-		kstat_named_init(&nxgekp->rx_hist4_cnt, "rxmac_256_511_cnt",
-			KSTAT_DATA_ULONG);
-		kstat_named_init(&nxgekp->rx_hist5_cnt, "rxmac_512_1023_cnt",
-			KSTAT_DATA_ULONG);
-		kstat_named_init(&nxgekp->rx_hist6_cnt, "rxmac_1024_1522_cnt",
-			KSTAT_DATA_ULONG);
-		kstat_named_init(&nxgekp->rx_broadcast_cnt,
-			"rxmac_broadcast_cnt",
-			KSTAT_DATA_ULONG);
-		kstat_named_init(&nxgekp->rx_mult_cnt, "rxmac_multicast_cnt",
-			KSTAT_DATA_ULONG);
-		kstat_named_init(&nxgekp->rx_frag_cnt, "rxmac_fragment_cnt",
-			KSTAT_DATA_ULONG);
-		kstat_named_init(&nxgekp->rx_linkfault_err_cnt,
-			"rxmac_linkfault_errs",
-			KSTAT_DATA_ULONG);
-		kstat_named_init(&nxgekp->rx_remote_fault_err_cnt,
-			"rxmac_remote_faults",
-			KSTAT_DATA_ULONG);
-		kstat_named_init(&nxgekp->rx_local_fault_err_cnt,
-			"rxmac_local_faults",
-			KSTAT_DATA_ULONG);
-	} else if (nxgep->mac.porttype == PORT_TYPE_BMAC) {
-		kstat_named_init(&nxgekp->rx_frame_cnt, "rxmac_frame_cnt",
-			KSTAT_DATA_ULONG);
-	}
-}
-
-/* ARGSUSED */
-void
-nxge_destroy_kstats(p_nxge_t nxgep)
-{
-	int channel;
-	p_nxge_dma_pt_cfg_t p_dma_cfgp;
-	p_nxge_hw_pt_cfg_t p_cfgp;
-
-	NXGE_DEBUG_MSG((nxgep, KST_CTL, "==> nxge_destroy_kstats"));
-
-	if (nxgep->statsp == NULL)
-		return;
-	if (nxgep->statsp->ksp)
-		kstat_delete(nxgep->statsp->ksp);
-
-	p_dma_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
-	p_cfgp = (p_nxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config;
-
-	for (channel = 0; channel < p_cfgp->max_rdcs; channel++) {
-		if (nxgep->statsp->rdc_ksp[channel])
-			kstat_delete(nxgep->statsp->rdc_ksp[channel]);
-	}
-
-	for (channel = 0; channel < p_cfgp->max_tdcs; channel++) {
-		if (nxgep->statsp->tdc_ksp[channel])
-			kstat_delete(nxgep->statsp->tdc_ksp[channel]);
-	}
-
-	if (nxgep->statsp->rdc_sys_ksp)
-		kstat_delete(nxgep->statsp->rdc_sys_ksp);
-	if (nxgep->statsp->fflp_ksp[0])
-		kstat_delete(nxgep->statsp->fflp_ksp[0]);
-	if (nxgep->statsp->ipp_ksp)
-		kstat_delete(nxgep->statsp->ipp_ksp);
-	if (nxgep->statsp->txc_ksp)
-		kstat_delete(nxgep->statsp->txc_ksp);
-	if (nxgep->statsp->mac_ksp)
-		kstat_delete(nxgep->statsp->mac_ksp);
-	if (nxgep->statsp->zcp_ksp)
-		kstat_delete(nxgep->statsp->zcp_ksp);
-	if (nxgep->statsp->port_ksp)
-		kstat_delete(nxgep->statsp->port_ksp);
-	if (nxgep->statsp->mmac_ksp)
-		kstat_delete(nxgep->statsp->mmac_ksp);
-	if (nxgep->statsp)
-		KMEM_FREE(nxgep->statsp, nxgep->statsp->stats_size);
-
-	NXGE_DEBUG_MSG((nxgep, KST_CTL, "<== nxge_destroy_kstats"));
-}
-
-/* ARGSUSED */
-int
-nxge_port_kstat_update(kstat_t *ksp, int rw)
-{
-	p_nxge_t nxgep;
-	p_nxge_stats_t statsp;
-	p_nxge_port_kstat_t nxgekp;
-
-	nxgep = (p_nxge_t)ksp->ks_private;
-	if (nxgep == NULL)
-		return (-1);
-
-	NXGE_DEBUG_MSG((nxgep, KST_CTL, "==> nxge_port_kstat_update"));
-	statsp = (p_nxge_stats_t)nxgep->statsp;
-	nxgekp = (p_nxge_port_kstat_t)ksp->ks_data;
-	nxge_save_cntrs(nxgep);
-
-	if (rw == KSTAT_WRITE) {
-		/*
-		 * transceiver state informations.
-		 */
-		statsp->mac_stats.xcvr_inits = nxgekp->xcvr_inits.value.ul;
-
-		/*
-		 * Tx Statistics.
-		 */
-#if defined MULTI_DATA_TX || defined MULTI_DATA_TXV2
-		statsp->port_stats.mdt_reqs = nxgekp->mdt_reqs.value.ul;
-		statsp->port_stats.mdt_hdr_bufs = nxgekp->mdt_hdr_bufs.value.ul;
-		statsp->port_stats.mdt_pld_bufs = nxgekp->mdt_pld_bufs.value.ul;
-		statsp->port_stats.mdt_pkts = nxgekp->mdt_pkts.value.ul;
-		statsp->port_stats.mdt_hdrs = nxgekp->mdt_hdrs.value.ul;
-		statsp->port_stats.mdt_plds = nxgekp->mdt_plds.value.ul;
-		statsp->port_stats.mdt_hdr_bind_fail =
-			nxgekp->mdt_hdr_bind_fail.value.ul;
-		statsp->port_stats.mdt_pld_bind_fail =
-			nxgekp->mdt_pld_bind_fail.value.ul;
-#endif
-#ifdef ACCEPT_JUMBO
-		statsp->port_stats.tx_jumbo_pkts =
-			nxgekp->tx_jumbo_pkts.value.ul;
-#endif
-		/*
-		 * Rx Statistics.
-		 */
-#ifdef ACNXGEPT_JUMBO
-		statsp->port_stats.rx_jumbo_pkts =
-			nxgekp->rx_jumbo_pkts.value.ul;
-#endif
-		(void) nxge_xmac_stat_update(ksp, KSTAT_WRITE);
-		return (0);
-	} else {
-		if (nxgep->filter.all_phys_cnt)
-			(void) strcpy(nxgekp->promisc.value.c, "phys");
-		else if (nxgep->filter.all_multicast_cnt)
-			(void) strcpy(nxgekp->promisc.value.c, "multi");
-		else
-			(void) strcpy(nxgekp->promisc.value.c, "off");
-		nxgekp->ifspeed.value.ul =
-			statsp->mac_stats.link_speed * 1000000ULL;
-		nxgekp->rev_id.value.ul = statsp->mac_stats.rev_id;
-
-		/*
-		 * transceiver state informations.
-		 */
-		nxgekp->xcvr_inits.value.ul = statsp->mac_stats.xcvr_inits;
-		nxgekp->xcvr_inuse.value.ul = statsp->mac_stats.xcvr_inuse;
-		nxgekp->xcvr_addr.value.ul = statsp->mac_stats.xcvr_portn;
-		nxgekp->xcvr_id.value.ul = statsp->mac_stats.xcvr_id;
-		nxgekp->cap_autoneg.value.ul = statsp->mac_stats.cap_autoneg;
-		nxgekp->cap_10gfdx.value.ul = statsp->mac_stats.cap_10gfdx;
-		nxgekp->cap_10ghdx.value.ul = statsp->mac_stats.cap_10ghdx;
-		nxgekp->cap_1000fdx.value.ul = statsp->mac_stats.cap_1000fdx;
-		nxgekp->cap_1000hdx.value.ul = statsp->mac_stats.cap_1000hdx;
-		nxgekp->cap_100T4.value.ul = statsp->mac_stats.cap_100T4;
-		nxgekp->cap_100fdx.value.ul = statsp->mac_stats.cap_100fdx;
-		nxgekp->cap_100hdx.value.ul = statsp->mac_stats.cap_100hdx;
-		nxgekp->cap_10fdx.value.ul = statsp->mac_stats.cap_10fdx;
-		nxgekp->cap_10hdx.value.ul = statsp->mac_stats.cap_10hdx;
-		nxgekp->cap_asmpause.value.ul =
-			statsp->mac_stats.cap_asmpause;
-		nxgekp->cap_pause.value.ul = statsp->mac_stats.cap_pause;
-
-		/*
-		 * Link partner capabilities.
-		 */
-		nxgekp->lp_cap_autoneg.value.ul =
-			statsp->mac_stats.lp_cap_autoneg;
-		nxgekp->lp_cap_10gfdx.value.ul =
-			statsp->mac_stats.lp_cap_10gfdx;
-		nxgekp->lp_cap_10ghdx.value.ul =
-			statsp->mac_stats.lp_cap_10ghdx;
-		nxgekp->lp_cap_1000fdx.value.ul =
-			statsp->mac_stats.lp_cap_1000fdx;
-		nxgekp->lp_cap_1000hdx.value.ul =
-			statsp->mac_stats.lp_cap_1000hdx;
-		nxgekp->lp_cap_100T4.value.ul =
-			statsp->mac_stats.lp_cap_100T4;
-		nxgekp->lp_cap_100fdx.value.ul =
-			statsp->mac_stats.lp_cap_100fdx;
-		nxgekp->lp_cap_100hdx.value.ul =
-			statsp->mac_stats.lp_cap_100hdx;
-		nxgekp->lp_cap_10fdx.value.ul =
-			statsp->mac_stats.lp_cap_10fdx;
-		nxgekp->lp_cap_10hdx.value.ul =
-			statsp->mac_stats.lp_cap_10hdx;
-		nxgekp->lp_cap_asmpause.value.ul =
-			statsp->mac_stats.lp_cap_asmpause;
-		nxgekp->lp_cap_pause.value.ul =
-			statsp->mac_stats.lp_cap_pause;
-
-		/*
-		 * Physical link statistics.
-		 */
-		nxgekp->link_T4.value.ul = statsp->mac_stats.link_T4;
-		nxgekp->link_speed.value.ul = statsp->mac_stats.link_speed;
-		if (statsp->mac_stats.link_duplex == 2)
-			(void) strcpy(nxgekp->link_duplex.value.c, "full");
-		else if (statsp->mac_stats.link_duplex == 1)
-			(void) strcpy(nxgekp->link_duplex.value.c, "half");
-		else
-			(void) strcpy(nxgekp->link_duplex.value.c, "unknown");
-		nxgekp->link_asmpause.value.ul =
-			statsp->mac_stats.link_asmpause;
-		nxgekp->link_pause.value.ul = statsp->mac_stats.link_pause;
-		nxgekp->link_up.value.ul = statsp->mac_stats.link_up;
-
-		/*
-		 * Lets the user know the MTU currently in use by the physical
-		 * MAC port.
-		 */
-		nxgekp->mac_mtu.value.ul = statsp->mac_stats.mac_mtu;
-
-		/*
-		 * Loopback statistics.
-		 */
-		nxgekp->lb_mode.value.ul = statsp->port_stats.lb_mode;
-
-		/*
-		 * This tells the user whether the driver is in QOS mode or
-		 * not.
-		 */
-		nxgekp->qos_mode.value.ul = statsp->port_stats.qos_mode;
-
-		/*
-		 * This tells whether the instance is trunked or not
-		 */
-		nxgekp->trunk_mode.value.ul = statsp->port_stats.trunk_mode;
-
-#if defined MULTI_DATA_TX || defined MULTI_DATA_TXV2
-		nxgekp->mdt_reqs.value.ul = statsp->port_stats.mdt_reqs;
-		nxgekp->mdt_hdr_bufs.value.ul =
-			statsp->port_stats.mdt_hdr_bufs;
-		nxgekp->mdt_pld_bufs.value.ul =
-			statsp->port_stats.mdt_pld_bufs;
-		nxgekp->mdt_pkts.value.ul = statsp->port_stats.mdt_pkts;
-		nxgekp->mdt_hdrs.value.ul = statsp->port_stats.mdt_hdrs;
-		nxgekp->mdt_plds.value.ul = statsp->port_stats.mdt_plds;
-		nxgekp->mdt_hdr_bind_fail.value.ul =
-			statsp->port_stats.mdt_hdr_bind_fail;
-		nxgekp->mdt_pld_bind_fail.value.ul =
-			statsp->port_stats.mdt_pld_bind_fail;
-#endif
-#ifdef ACCEPT_JUMBO
-		nxgekp->tx_jumbo_pkts.value.ul =
-			statsp->port_stats.tx_jumbo_pkts;
-#endif
-#ifdef TX_MBLK_DEST
-		nxgekp->tx_1_desc.value.ul = statsp->port_stats.tx_1_desc;
-		nxgekp->tx_2_desc.value.ul = statsp->port_stats.tx_2_desc;
-		nxgekp->tx_3_desc.value.ul = statsp->port_stats.tx_3_desc;
-		nxgekp->tx_4_desc.value.ul = statsp->port_stats.tx_4_desc;
-		nxgekp->tx_5_desc.value.ul = statsp->port_stats.tx_5_desc;
-		nxgekp->tx_6_desc.value.ul = statsp->port_stats.tx_6_desc;
-		nxgekp->tx_7_desc.value.ul = statsp->port_stats.tx_7_desc;
-		nxgekp->tx_8_desc.value.ul = statsp->port_stats.tx_8_desc;
-		nxgekp->tx_max_desc.value.ul =
-			statsp->port_stats.tx_max_desc;
-#endif
-		/*
-		 * Rx Statistics.
-		 */
-#ifdef ACCEPT_JUMBO
-		nxgekp->rx_jumbo_pkts.value.ul =
-			statsp->port_stats.rx_jumbo_pkts;
-#endif
-		(void) nxge_xmac_stat_update(ksp, KSTAT_READ);
-	}
-
-	NXGE_DEBUG_MSG((nxgep, KST_CTL, "<== nxge_port_kstat_update"));
-	return (0);
-}
-
-/*
- * if this is the first init do not bother to save the
- * counters.
- */
-/* ARGSUSED */
-void
-nxge_save_cntrs(p_nxge_t nxgep)
-{
-	p_nxge_stats_t statsp;
-	uint64_t val;
-	npi_handle_t handle;
-	uint8_t portn;
-	uint8_t cnt8;
-	uint16_t cnt16;
-	uint32_t cnt32;
-
-	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_save_cntrs"));
-
-	statsp = (p_nxge_stats_t)nxgep->statsp;
-	handle = nxgep->npi_handle;
-	portn = nxgep->mac.portnum;
-
-	MUTEX_ENTER(&nxgep->ouraddr_lock);
-
-	if (nxgep->mac.porttype == PORT_TYPE_XMAC) {
-		/*
-		 * Transmit MAC statistics.
-		 */
-		XMAC_REG_RD(handle, portn, XTXMAC_FRM_CNT_REG, &val);
-		statsp->xmac_stats.tx_frame_cnt += (val & XTXMAC_FRM_CNT_MASK);
-		XMAC_REG_RD(handle, portn, XTXMAC_BYTE_CNT_REG, &val);
-		statsp->xmac_stats.tx_byte_cnt += (val & XTXMAC_BYTE_CNT_MASK);
-		/*
-		 * Receive XMAC statistics.
-		 */
-		XMAC_REG_RD(handle, portn, XRXMAC_CRC_ER_CNT_REG, &val);
-		statsp->xmac_stats.rx_crc_err_cnt +=
-			(val & XRXMAC_CRC_ER_CNT_MASK);
-		XMAC_REG_RD(handle, portn, XRXMAC_MPSZER_CNT_REG, &val);
-		statsp->xmac_stats.rx_len_err_cnt +=
-			(val & XRXMAC_MPSZER_CNT_MASK);
-		XMAC_REG_RD(handle, portn, XRXMAC_CD_VIO_CNT_REG, &val);
-		statsp->xmac_stats.rx_viol_err_cnt +=
-			(val & XRXMAC_CD_VIO_CNT_MASK);
-		XMAC_REG_RD(handle, portn, XRXMAC_BT_CNT_REG, &val);
-		statsp->xmac_stats.rx_byte_cnt += (val & XRXMAC_BT_CNT_MASK);
-		XMAC_REG_RD(handle, portn, XRXMAC_HIST_CNT1_REG, &val);
-		statsp->xmac_stats.rx_hist1_cnt +=
-			(val & XRXMAC_HIST_CNT1_MASK);
-		XMAC_REG_RD(handle, portn, XRXMAC_HIST_CNT2_REG, &val);
-		statsp->xmac_stats.rx_hist2_cnt +=
-			(val & XRXMAC_HIST_CNT2_MASK);
-		XMAC_REG_RD(handle, portn, XRXMAC_HIST_CNT3_REG, &val);
-		statsp->xmac_stats.rx_hist3_cnt +=
-			(val & XRXMAC_HIST_CNT3_MASK);
-		XMAC_REG_RD(handle, portn, XRXMAC_HIST_CNT4_REG, &val);
-		statsp->xmac_stats.rx_hist4_cnt +=
-			(val & XRXMAC_HIST_CNT4_MASK);
-		XMAC_REG_RD(handle, portn, XRXMAC_HIST_CNT5_REG, &val);
-		statsp->xmac_stats.rx_hist5_cnt +=
-			(val & XRXMAC_HIST_CNT5_MASK);
-		XMAC_REG_RD(handle, portn, XRXMAC_HIST_CNT6_REG, &val);
-		statsp->xmac_stats.rx_hist6_cnt +=
-			(val & XRXMAC_HIST_CNT6_MASK);
-		XMAC_REG_RD(handle, portn, XRXMAC_BC_FRM_CNT_REG, &val);
-		statsp->xmac_stats.rx_broadcast_cnt +=
-			(val & XRXMAC_BC_FRM_CNT_MASK);
-		XMAC_REG_RD(handle, portn, XRXMAC_MC_FRM_CNT_REG, &val);
-		statsp->xmac_stats.rx_mult_cnt +=
-			(val & XRXMAC_MC_FRM_CNT_MASK);
-		XMAC_REG_RD(handle, portn, XRXMAC_FRAG_CNT_REG, &val);
-		statsp->xmac_stats.rx_frag_cnt += (val & XRXMAC_FRAG_CNT_MASK);
-		XMAC_REG_RD(handle, portn, XRXMAC_AL_ER_CNT_REG, &val);
-		statsp->xmac_stats.rx_frame_align_err_cnt +=
-			(val & XRXMAC_AL_ER_CNT_MASK);
-		XMAC_REG_RD(handle, portn, XMAC_LINK_FLT_CNT_REG, &val);
-		statsp->xmac_stats.rx_linkfault_err_cnt +=
-			(val & XMAC_LINK_FLT_CNT_MASK);
-		(void) npi_xmac_xpcs_read(handle, portn,
-			XPCS_REG_DESCWERR_COUNTER, &cnt32);
-		statsp->xmac_stats.xpcs_deskew_err_cnt +=
-			(val & XMAC_XPCS_DESKEW_ERR_CNT_MASK);
-#ifdef	NXGE_DEBUG_SYMBOL_ERR
-		(void) npi_xmac_xpcs_read(handle, portn,
-			XPCS_REG_SYMBOL_ERR_L0_1_COUNTER, &cnt32);
-		statsp->xmac_stats.xpcs_ln0_symbol_err_cnt +=
-			(cnt32 & XMAC_XPCS_SYM_ERR_CNT_L0_MASK);
-		statsp->xmac_stats.xpcs_ln1_symbol_err_cnt +=
-			((cnt32 & XMAC_XPCS_SYM_ERR_CNT_L1_MASK) >>
-			XMAC_XPCS_SYM_ERR_CNT_L1_SHIFT);
-		(void) npi_xmac_xpcs_read(handle, portn,
-			XPCS_REG_SYMBOL_ERR_L2_3_COUNTER, &cnt32);
-		statsp->xmac_stats.xpcs_ln2_symbol_err_cnt +=
-			(cnt32 & XMAC_XPCS_SYM_ERR_CNT_L2_MASK);
-		statsp->xmac_stats.xpcs_ln3_symbol_err_cnt +=
-			((cnt32 & XMAC_XPCS_SYM_ERR_CNT_L3_MASK) >>
-			XMAC_XPCS_SYM_ERR_CNT_L3_SHIFT);
-#endif
-	} else if (nxgep->mac.porttype == PORT_TYPE_BMAC) {
-		/*
-		 * Transmit MAC statistics.
-		 */
-		BMAC_REG_RD(handle, portn, BTXMAC_FRM_CNT_REG, &val);
-		statsp->bmac_stats.tx_frame_cnt += (val & BTXMAC_FRM_CNT_MASK);
-		XMAC_REG_RD(handle, portn, BTXMAC_BYTE_CNT_REG, &val);
-		statsp->bmac_stats.tx_byte_cnt += (val & BTXMAC_BYTE_CNT_MASK);
-
-		/*
-		 * Receive MAC statistics.
-		 */
-		XMAC_REG_RD(handle, portn, RXMAC_FRM_CNT_REG, &val);
-		statsp->bmac_stats.rx_frame_cnt += (val & RXMAC_FRM_CNT_MASK);
-		XMAC_REG_RD(handle, portn, BRXMAC_BYTE_CNT_REG, &val);
-		statsp->bmac_stats.rx_byte_cnt += (val & BRXMAC_BYTE_CNT_MASK);
-		XMAC_REG_RD(handle, portn, BMAC_AL_ER_CNT_REG, &val);
-		statsp->bmac_stats.rx_align_err_cnt +=
-			(val & BMAC_AL_ER_CNT_MASK);
-		XMAC_REG_RD(handle, portn, MAC_LEN_ER_CNT_REG, &val);
-		statsp->bmac_stats.rx_len_err_cnt +=
-			(val & MAC_LEN_ER_CNT_MASK);
-		XMAC_REG_RD(handle, portn, BMAC_CRC_ER_CNT_REG, &val);
-		statsp->bmac_stats.rx_crc_err_cnt +=
-			(val & BMAC_CRC_ER_CNT_MASK);
-		XMAC_REG_RD(handle, portn, BMAC_CD_VIO_CNT_REG, &val);
-		statsp->bmac_stats.rx_viol_err_cnt +=
-			(val & BMAC_CD_VIO_CNT_MASK);
-	}
-	/* Update IPP counters */
-	(void) npi_ipp_get_ecc_err_count(handle, portn, &cnt8);
-	statsp->ipp_stats.ecc_err_cnt += cnt8;
-	(void) npi_ipp_get_pkt_dis_count(handle, portn, &cnt16);
-	statsp->ipp_stats.pkt_dis_cnt += cnt16;
-	(void) npi_ipp_get_cs_err_count(handle, portn, &cnt16);
-	statsp->ipp_stats.bad_cs_cnt += cnt16;
-
-	MUTEX_EXIT(&nxgep->ouraddr_lock);
-
-nxge_save_cntrs_exit:
-	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_save_cntrs"));
-}
-
-/* ARGSUSED */
-int
-nxge_m_stat(void *arg, uint_t stat, uint64_t *value)
-{
-	p_nxge_t nxgep = (p_nxge_t)arg;
-	p_nxge_stats_t statsp;
-	uint64_t val = 0;
-	int channel;
-
-	NXGE_DEBUG_MSG((nxgep, KST_CTL, "==> nxge_m_stat"));
-	statsp = (p_nxge_stats_t)nxgep->statsp;
-
-	switch (stat) {
-	case MAC_STAT_IFSPEED:
-		val = statsp->mac_stats.link_speed * 1000000ull;
-		break;
-
-	case MAC_STAT_MULTIRCV:
-		val = statsp->port_stats.multircv;
-		break;
-
-	case MAC_STAT_BRDCSTRCV:
-		val = statsp->port_stats.brdcstrcv;
-		break;
-
-	case MAC_STAT_MULTIXMT:
-		val = statsp->port_stats.multixmt;
-		break;
-
-	case MAC_STAT_BRDCSTXMT:
-		val = statsp->port_stats.brdcstxmt;
-		break;
-
-	case MAC_STAT_NORCVBUF:
-		val = statsp->port_stats.norcvbuf;
-		break;
-
-	case MAC_STAT_IERRORS:
-	case ETHER_STAT_MACRCV_ERRORS:
-		val = 0;
-		for (channel = 0; channel < nxgep->nrdc; channel++) {
-			val += statsp->rdc_stats[channel].ierrors;
-		}
-		break;
-
-	case MAC_STAT_NOXMTBUF:
-		val = statsp->port_stats.noxmtbuf;
-		break;
-
-	case MAC_STAT_OERRORS:
-		for (channel = 0; channel < nxgep->ntdc; channel++) {
-			val += statsp->tdc_stats[channel].oerrors;
-		}
-
-		break;
-
-	case MAC_STAT_COLLISIONS:
-		val = 0;
-		break;
-
-	case MAC_STAT_RBYTES:
-		for (channel = 0; channel < nxgep->nrdc; channel++) {
-			val += statsp->rdc_stats[channel].ibytes;
-		}
-		break;
-
-	case MAC_STAT_IPACKETS:
-		for (channel = 0; channel < nxgep->nrdc; channel++) {
-			val += statsp->rdc_stats[channel].ipackets;
-		}
-		break;
-
-	case MAC_STAT_OBYTES:
-		for (channel = 0; channel < nxgep->ntdc; channel++) {
-			val += statsp->tdc_stats[channel].obytes;
-		}
-		break;
-
-	case MAC_STAT_OPACKETS:
-		for (channel = 0; channel < nxgep->ntdc; channel++) {
-			val += statsp->tdc_stats[channel].opackets;
-		}
-		break;
-	case MAC_STAT_LINK_STATE:
-		val = statsp->mac_stats.link_duplex;
-		break;
-	case MAC_STAT_LINK_UP:
-		val = statsp->mac_stats.link_up;
-		break;
-	case MAC_STAT_PROMISC:
-		val = statsp->mac_stats.promisc;
-		break;
-	case ETHER_STAT_SQE_ERRORS:
-		val = 0;
-		break;
-
-	case ETHER_STAT_ALIGN_ERRORS:
-		if (nxgep->mac.porttype == PORT_TYPE_XMAC)
-			val = statsp->xmac_stats.rx_frame_align_err_cnt;
-		else if (nxgep->mac.porttype == PORT_TYPE_BMAC)
-			val = statsp->bmac_stats.rx_align_err_cnt;
-		else
-			val = 0;
-		break;
-
-	case ETHER_STAT_FCS_ERRORS:
-		if (nxgep->mac.porttype == PORT_TYPE_XMAC)
-			val = statsp->xmac_stats.rx_crc_err_cnt;
-		else if (nxgep->mac.porttype == PORT_TYPE_BMAC)
-			val = statsp->bmac_stats.rx_crc_err_cnt;
-		else
-			val = 0;
-		break;
-
-	case ETHER_STAT_FIRST_COLLISIONS:
-		val = 0;
-		break;
-
-	case ETHER_STAT_MULTI_COLLISIONS:
-		val = 0;
-		break;
-
-	case ETHER_STAT_TX_LATE_COLLISIONS:
-		val = 0;
-		break;
-
-	case ETHER_STAT_EX_COLLISIONS:
-		val = 0;
-		break;
-
-	case ETHER_STAT_DEFER_XMTS:
-		val = 0;
-		break;
-
-	case ETHER_STAT_MACXMT_ERRORS:
-		if (nxgep->mac.porttype == PORT_TYPE_XMAC) {
-			val = statsp->xmac_stats.tx_underflow_err +
-				statsp->xmac_stats.tx_maxpktsize_err +
-				statsp->xmac_stats.tx_overflow_err +
-				statsp->xmac_stats.tx_fifo_xfr_err;
-		} else {
-			val = statsp->bmac_stats.tx_underrun_err +
-				statsp->bmac_stats.tx_max_pkt_err;
-		}
-		break;
-
-	case ETHER_STAT_CARRIER_ERRORS:
-		if (nxgep->mac.porttype == PORT_TYPE_XMAC) {
-			val = statsp->xmac_stats.rx_linkfault_err_cnt;
-		} else {
-			val = statsp->mac_stats.xcvr_inits +
-				statsp->mac_stats.serdes_inits;
-		}
-		break;
-
-	case ETHER_STAT_TOOLONG_ERRORS:
-		if (nxgep->mac.porttype == PORT_TYPE_XMAC) {
-			val = statsp->xmac_stats.tx_maxpktsize_err +
-				statsp->xmac_stats.rx_len_err_cnt;
-
-		} else {
-			val = statsp->bmac_stats.rx_len_err_cnt +
-				statsp->bmac_stats.tx_max_pkt_err;
-		}
-		break;
-
-
-	case ETHER_STAT_XCVR_ADDR:
-		val = statsp->mac_stats.xcvr_portn;
-		break;
-	case ETHER_STAT_XCVR_ID:
-		val = statsp->mac_stats.xcvr_id;
-		break;
-
-	case ETHER_STAT_XCVR_INUSE:
-		val = statsp->mac_stats.xcvr_inuse;
-		break;
-
-	case ETHER_STAT_CAP_1000FDX:
-		val = statsp->mac_stats.cap_1000fdx;
-		break;
-
-	case ETHER_STAT_CAP_1000HDX:
-		val = statsp->mac_stats.cap_1000hdx;
-		break;
-
-	case ETHER_STAT_CAP_100FDX:
-		val = statsp->mac_stats.cap_100fdx;
-		break;
-
-	case ETHER_STAT_CAP_100HDX:
-		val = statsp->mac_stats.cap_100hdx;
-		break;
-
-	case ETHER_STAT_CAP_10FDX:
-		val = statsp->mac_stats.cap_10fdx;
-		break;
-
-	case ETHER_STAT_CAP_10HDX:
-		val = statsp->mac_stats.cap_10hdx;
-		break;
-
-	case ETHER_STAT_CAP_ASMPAUSE:
-		val = statsp->mac_stats.cap_asmpause;
-		val = 1;
-		break;
-
-	case ETHER_STAT_CAP_PAUSE:
-		val = statsp->mac_stats.cap_pause;
-		break;
-
-	case ETHER_STAT_CAP_AUTONEG:
-		val = statsp->mac_stats.cap_autoneg;
-		break;
-
-	case ETHER_STAT_ADV_CAP_1000FDX:
-		val = statsp->mac_stats.adv_cap_1000fdx;
-		break;
-
-	case ETHER_STAT_ADV_CAP_1000HDX:
-		val = statsp->mac_stats.adv_cap_1000hdx;
-		break;
-
-	case ETHER_STAT_ADV_CAP_100FDX:
-		val = statsp->mac_stats.adv_cap_100fdx;
-		break;
-
-	case ETHER_STAT_ADV_CAP_100HDX:
-		val = statsp->mac_stats.adv_cap_100hdx;
-		break;
-
-	case ETHER_STAT_ADV_CAP_10FDX:
-		val = statsp->mac_stats.adv_cap_10fdx;
-		break;
-
-	case ETHER_STAT_ADV_CAP_10HDX:
-		val = statsp->mac_stats.adv_cap_10hdx;
-		break;
-
-	case ETHER_STAT_ADV_CAP_ASMPAUSE:
-		val = statsp->mac_stats.adv_cap_asmpause;
-		break;
-
-	case ETHER_STAT_ADV_CAP_PAUSE:
-		val = statsp->mac_stats.adv_cap_pause;
-		break;
-
-	case ETHER_STAT_ADV_CAP_AUTONEG:
-		val = statsp->mac_stats.adv_cap_autoneg;
-		break;
-
-	case ETHER_STAT_LP_CAP_1000FDX:
-		val = statsp->mac_stats.lp_cap_1000fdx;
-		break;
-
-	case ETHER_STAT_LP_CAP_1000HDX:
-		val = statsp->mac_stats.lp_cap_1000hdx;
-		break;
-
-	case ETHER_STAT_LP_CAP_100FDX:
-		val = statsp->mac_stats.lp_cap_100fdx;
-		break;
-
-	case ETHER_STAT_LP_CAP_100HDX:
-		val = statsp->mac_stats.lp_cap_100hdx;
-		break;
-
-	case ETHER_STAT_LP_CAP_10FDX:
-		val = statsp->mac_stats.lp_cap_10fdx;
-		break;
-
-	case ETHER_STAT_LP_CAP_10HDX:
-		val = statsp->mac_stats.lp_cap_10hdx;
-		break;
-
-	case ETHER_STAT_LP_CAP_ASMPAUSE:
-		val = statsp->mac_stats.lp_cap_asmpause;
-		break;
-
-	case ETHER_STAT_LP_CAP_PAUSE:
-		val = statsp->mac_stats.lp_cap_pause;
-		break;
-
-	case ETHER_STAT_LP_CAP_AUTONEG:
-		val = statsp->mac_stats.lp_cap_autoneg;
-		break;
-
-	case ETHER_STAT_LINK_ASMPAUSE:
-		val = statsp->mac_stats.link_asmpause;
-		break;
-
-	case ETHER_STAT_LINK_PAUSE:
-		val = statsp->mac_stats.link_pause;
-		break;
-
-	case ETHER_STAT_LINK_AUTONEG:
-		val = statsp->mac_stats.cap_autoneg;
-		break;
-
-	case ETHER_STAT_LINK_DUPLEX:
-		val = statsp->mac_stats.link_duplex;
-		break;
-
-	default:
-		/*
-		 * Shouldn't reach here...
-		 */
-#ifdef NXGE_DEBUG
-		NXGE_ERROR_MSG((nxgep, KST_CTL,
-			"nxge_m_stat: unrecognized parameter value = 0x%x",
-			stat));
-#endif
-
-		return (ENOTSUP);
-	}
-	*value = val;
-	return (0);
-}
--- a/usr/src/uts/sun4v/io/nxge/nxge_mac.c	Mon Mar 19 18:02:35 2007 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,3325 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License (the "License").
- * You may not use this file except in compliance with the License.
- *
- * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
- * or http://www.opensolaris.org/os/licensing.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information: Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- */
-/*
- * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
- * Use is subject to license terms.
- */
-
-#pragma ident	"%Z%%M%	%I%	%E% SMI"
-
-#include <sys/nxge/nxge_impl.h>
-#include <sys/nxge/nxge_mac.h>
-
-extern uint32_t nxge_no_link_notify;
-extern uint32_t nxge_no_msg;
-extern uint32_t nxge_lb_dbg;
-extern nxge_os_mutex_t	nxge_mdio_lock;
-extern nxge_os_mutex_t	nxge_mii_lock;
-extern boolean_t nxge_jumbo_enable;
-
-/*
- * Ethernet broadcast address definition.
- */
-static ether_addr_st etherbroadcastaddr =
-				{{0xff, 0xff, 0xff, 0xff, 0xff, 0xff}};
-
-nxge_status_t nxge_mac_init(p_nxge_t);
-
-/* Initialize the entire MAC and physical layer */
-
-nxge_status_t
-nxge_mac_init(p_nxge_t nxgep)
-{
-	uint8_t			portn;
-	nxge_status_t		status = NXGE_OK;
-
-	portn = NXGE_GET_PORT_NUM(nxgep->function_num);
-
-	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "==> nxge_mac_init: port<%d>", portn));
-
-	nxgep->mac.portnum = portn;
-	nxgep->mac.porttype = PORT_TYPE_XMAC;
-
-	if ((portn == BMAC_PORT_0) || (portn == BMAC_PORT_1))
-		nxgep->mac.porttype = PORT_TYPE_BMAC;
-
-	/* Initialize XIF to configure a network mode */
-	if ((status = nxge_xif_init(nxgep)) != NXGE_OK) {
-		goto fail;
-	}
-
-	if ((status = nxge_pcs_init(nxgep)) != NXGE_OK) {
-		goto fail;
-	}
-
-	/* Initialize TX and RX MACs */
-	/*
-	 * Always perform XIF init first, before TX and RX MAC init
-	 */
-	if ((status = nxge_tx_mac_reset(nxgep)) != NXGE_OK)
-		goto fail;
-
-	if ((status = nxge_tx_mac_init(nxgep)) != NXGE_OK)
-		goto fail;
-
-	if ((status = nxge_rx_mac_reset(nxgep)) != NXGE_OK)
-		goto fail;
-
-	if ((status = nxge_rx_mac_init(nxgep)) != NXGE_OK)
-		goto fail;
-
-	if ((status = nxge_tx_mac_enable(nxgep)) != NXGE_OK)
-		goto fail;
-
-	if ((status = nxge_rx_mac_enable(nxgep)) != NXGE_OK)
-		goto fail;
-
-	nxgep->statsp->mac_stats.mac_mtu = nxgep->mac.maxframesize;
-
-	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "<== nxge_mac_init: port<%d>", portn));
-
-	return (NXGE_OK);
-fail:
-	NXGE_DEBUG_MSG((nxgep, MAC_CTL,
-			"nxge_mac_init: failed to initialize MAC port<%d>",
-			portn));
-	return (status);
-}
-
-/* Initialize the Ethernet Link */
-
-nxge_status_t
-nxge_link_init(p_nxge_t nxgep)
-{
-	nxge_status_t		status = NXGE_OK;
-#ifdef	NXGE_DEBUG
-	uint8_t			portn;
-
-	portn = nxgep->mac.portnum;
-
-	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "==> nxge_link_init: port<%d>", portn));
-#endif
-
-	if (nxgep->niu_type == N2_NIU) {
-		/* Workaround to get link up in both NIU ports */
-		if ((status = nxge_xcvr_init(nxgep)) != NXGE_OK)
-			goto fail;
-	}
-	NXGE_DELAY(200000);
-	/* Initialize internal serdes */
-	if ((status = nxge_serdes_init(nxgep)) != NXGE_OK)
-		goto fail;
-	NXGE_DELAY(200000);
-	if ((status = nxge_xcvr_init(nxgep)) != NXGE_OK)
-		goto fail;
-
-	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "<== nxge_link_init: port<%d>", portn));
-
-	return (NXGE_OK);
-
-fail:
-	NXGE_DEBUG_MSG((nxgep, MAC_CTL,
-		"nxge_link_init: ",
-		"failed to initialize Ethernet link on port<%d>",
-		portn));
-
-	return (status);
-}
-
-
-/* Initialize the XIF sub-block within the MAC */
-
-nxge_status_t
-nxge_xif_init(p_nxge_t nxgep)
-{
-	uint32_t		xif_cfg = 0;
-	npi_attr_t		ap;
-	uint8_t			portn;
-	nxge_port_t		portt;
-	nxge_port_mode_t	portmode;
-	p_nxge_stats_t		statsp;
-	npi_status_t		rs = NPI_SUCCESS;
-	npi_handle_t		handle;
-
-	portn = NXGE_GET_PORT_NUM(nxgep->function_num);
-
-	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "==> nxge_xif_init: port<%d>", portn));
-
-	handle = nxgep->npi_handle;
-	portmode = nxgep->mac.portmode;
-	portt = nxgep->mac.porttype;
-	statsp = nxgep->statsp;
-
-	if (portt == PORT_TYPE_XMAC) {
-
-		/* Setup XIF Configuration for XMAC */
-
-		if ((portmode == PORT_10G_FIBER) ||
-					(portmode == PORT_10G_COPPER))
-			xif_cfg |= CFG_XMAC_XIF_LFS;
-
-		if (portmode == PORT_1G_COPPER) {
-			xif_cfg |= CFG_XMAC_XIF_1G_PCS_BYPASS;
-		}
-
-		/* Set MAC Internal Loopback if necessary */
-		if (statsp->port_stats.lb_mode == nxge_lb_mac1000)
-			xif_cfg |= CFG_XMAC_XIF_LOOPBACK;
-
-		if (statsp->mac_stats.link_speed == 100)
-			xif_cfg |= CFG_XMAC_XIF_SEL_CLK_25MHZ;
-
-		xif_cfg |= CFG_XMAC_XIF_TX_OUTPUT;
-
-		if (portmode == PORT_10G_FIBER) {
-			if (statsp->mac_stats.link_up) {
-				xif_cfg |= CFG_XMAC_XIF_LED_POLARITY;
-			} else {
-				xif_cfg |= CFG_XMAC_XIF_LED_FORCE;
-			}
-		}
-
-		rs = npi_xmac_xif_config(handle, INIT, portn, xif_cfg);
-		if (rs != NPI_SUCCESS)
-			goto fail;
-
-		nxgep->mac.xif_config = xif_cfg;
-
-		/* Set Port Mode */
-		if ((portmode == PORT_10G_FIBER) ||
-					(portmode == PORT_10G_COPPER)) {
-			SET_MAC_ATTR1(handle, ap, portn, MAC_PORT_MODE,
-						MAC_XGMII_MODE, rs);
-			if (rs != NPI_SUCCESS)
-				goto fail;
-			if (statsp->mac_stats.link_up) {
-				if (nxge_10g_link_led_on(nxgep) != NXGE_OK)
-					goto fail;
-			} else {
-				if (nxge_10g_link_led_off(nxgep) != NXGE_OK)
-					goto fail;
-			}
-		} else if ((portmode == PORT_1G_FIBER) ||
-						(portmode == PORT_1G_COPPER)) {
-			if (statsp->mac_stats.link_speed == 1000) {
-				SET_MAC_ATTR1(handle, ap, portn, MAC_PORT_MODE,
-							MAC_GMII_MODE, rs);
-			} else {
-				SET_MAC_ATTR1(handle, ap, portn, MAC_PORT_MODE,
-							MAC_MII_MODE, rs);
-			}
-			if (rs != NPI_SUCCESS)
-				goto fail;
-		} else {
-			NXGE_DEBUG_MSG((nxgep, MAC_CTL,
-					"nxge_xif_init: Unknown port mode (%d)"
-					" for port<%d>", portmode, portn));
-			goto fail;
-		}
-
-	} else if (portt == PORT_TYPE_BMAC) {
-
-		/* Setup XIF Configuration for BMAC */
-
-		if (portmode == PORT_1G_COPPER) {
-			if (statsp->mac_stats.link_speed == 100)
-				xif_cfg |= CFG_BMAC_XIF_SEL_CLK_25MHZ;
-		}
-
-		if (statsp->port_stats.lb_mode == nxge_lb_mac1000)
-			xif_cfg |= CFG_BMAC_XIF_LOOPBACK;
-
-		if (statsp->mac_stats.link_speed == 1000)
-			xif_cfg |= CFG_BMAC_XIF_GMII_MODE;
-
-		xif_cfg |= CFG_BMAC_XIF_TX_OUTPUT;
-
-		rs = npi_bmac_xif_config(handle, INIT, portn, xif_cfg);
-		if (rs != NPI_SUCCESS)
-			goto fail;
-		nxgep->mac.xif_config = xif_cfg;
-	}
-
-	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "<== nxge_xif_init: port<%d>", portn));
-	return (NXGE_OK);
-fail:
-	NXGE_DEBUG_MSG((nxgep, MAC_CTL,
-			"nxge_xif_init: Failed to initialize XIF port<%d>",
-			portn));
-	return (NXGE_ERROR | rs);
-}
-
-/* Initialize the PCS sub-block in the MAC */
-
-nxge_status_t
-nxge_pcs_init(p_nxge_t nxgep)
-{
-	pcs_cfg_t		pcs_cfg;
-	uint32_t		val;
-	uint8_t			portn;
-	nxge_port_mode_t	portmode;
-	npi_handle_t		handle;
-	p_nxge_stats_t		statsp;
-	npi_status_t		rs = NPI_SUCCESS;
-
-	handle = nxgep->npi_handle;
-	portmode = nxgep->mac.portmode;
-	portn = nxgep->mac.portnum;
-	statsp = nxgep->statsp;
-
-	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "==> nxge_pcs_init: port<%d>", portn));
-
-	if (portmode == PORT_1G_FIBER) {
-		/* Initialize port's PCS */
-		pcs_cfg.value = 0;
-		pcs_cfg.bits.w0.enable = 1;
-		pcs_cfg.bits.w0.mask = 1;
-		PCS_REG_WR(handle, portn, PCS_CONFIG_REG, pcs_cfg.value);
-		PCS_REG_WR(handle, portn, PCS_DATAPATH_MODE_REG, 0);
-		if ((rs = npi_mac_pcs_reset(handle, portn)) != NPI_SUCCESS)
-			goto fail;
-
-	} else if ((portmode == PORT_10G_FIBER) ||
-						(portmode == PORT_10G_COPPER)) {
-		/* Use internal XPCS, bypass 1G PCS */
-		XMAC_REG_RD(handle, portn, XMAC_CONFIG_REG, &val);
-		val &= ~XMAC_XIF_XPCS_BYPASS;
-		XMAC_REG_WR(handle, portn, XMAC_CONFIG_REG, val);
-
-		if ((rs = npi_xmac_xpcs_reset(handle, portn)) != NPI_SUCCESS)
-			goto fail;
-
-		/* Set XPCS Internal Loopback if necessary */
-		if ((rs = npi_xmac_xpcs_read(handle, portn,
-						XPCS_REG_CONTROL1, &val))
-						!= NPI_SUCCESS)
-			goto fail;
-		if ((statsp->port_stats.lb_mode == nxge_lb_mac10g) ||
-			(statsp->port_stats.lb_mode == nxge_lb_mac1000))
-			val |= XPCS_CTRL1_LOOPBK;
-		else
-			val &= ~XPCS_CTRL1_LOOPBK;
-		if ((rs = npi_xmac_xpcs_write(handle, portn,
-						XPCS_REG_CONTROL1, val))
-						!= NPI_SUCCESS)
-			goto fail;
-
-		/* Clear descw errors */
-		if ((rs = npi_xmac_xpcs_write(handle, portn,
-						XPCS_REG_DESCWERR_COUNTER, 0))
-						!= NPI_SUCCESS)
-			goto fail;
-		/* Clear symbol errors */
-		if ((rs = npi_xmac_xpcs_read(handle, portn,
-					XPCS_REG_SYMBOL_ERR_L0_1_COUNTER, &val))
-					!= NPI_SUCCESS)
-			goto fail;
-		if ((rs = npi_xmac_xpcs_read(handle, portn,
-					XPCS_REG_SYMBOL_ERR_L2_3_COUNTER, &val))
-					!= NPI_SUCCESS)
-			goto fail;
-
-	} else if (portmode == PORT_1G_COPPER) {
-		if (portn < 4) {
-			PCS_REG_WR(handle, portn, PCS_DATAPATH_MODE_REG,
-					PCS_DATAPATH_MODE_MII);
-		}
-		if ((rs = npi_mac_pcs_reset(handle, portn)) != NPI_SUCCESS)
-			goto fail;
-
-	} else {
-		goto fail;
-	}
-pass:
-	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "<== nxge_pcs_init: port<%d>", portn));
-	return (NXGE_OK);
-fail:
-	NXGE_DEBUG_MSG((nxgep, MAC_CTL,
-			"nxge_pcs_init: Failed to initialize PCS port<%d>",
-			portn));
-	return (NXGE_ERROR | rs);
-}
-
-/* Initialize the Internal Serdes */
-
-nxge_status_t
-nxge_serdes_init(p_nxge_t nxgep)
-{
-	p_nxge_stats_t		statsp;
-#ifdef	NXGE_DEBUG
-	uint8_t			portn;
-#endif
-	nxge_status_t		status = NXGE_OK;
-
-#ifdef	NXGE_DEBUG
-	portn = nxgep->mac.portnum;
-	NXGE_DEBUG_MSG((nxgep, MAC_CTL,
-		"==> nxge_serdes_init port<%d>", portn));
-#endif
-
-	statsp = nxgep->statsp;
-
-	if (nxgep->niu_type == N2_NIU) {
-		if (nxge_n2_serdes_init(nxgep) != NXGE_OK)
-			goto fail;
-	} else if ((nxgep->niu_type == NEPTUNE) ||
-				(nxgep->niu_type == NEPTUNE_2)) {
-			if ((status = nxge_neptune_serdes_init(nxgep))
-								!= NXGE_OK)
-				goto fail;
-	} else {
-		goto fail;
-	}
-
-	statsp->mac_stats.serdes_inits++;
-
-	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "<== nxge_serdes_init port<%d>",
-			portn));
-
-	return (NXGE_OK);
-
-fail:
-	NXGE_DEBUG_MSG((nxgep, MAC_CTL,
-		"nxge_serdes_init: Failed to initialize serdes for port<%d>",
-			portn));
-
-	return (status);
-}
-
-/* Initialize the TI Hedwig Internal Serdes (N2-NIU only) */
-
-nxge_status_t
-nxge_n2_serdes_init(p_nxge_t nxgep)
-{
-	uint8_t portn;
-	int chan;
-	esr_ti_cfgpll_l_t pll_cfg_l;
-	esr_ti_cfgrx_l_t rx_cfg_l;
-	esr_ti_cfgrx_h_t rx_cfg_h;
-	esr_ti_cfgtx_l_t tx_cfg_l;
-	esr_ti_cfgtx_h_t tx_cfg_h;
-	esr_ti_testcfg_t test_cfg;
-	nxge_status_t status = NXGE_OK;
-
-	portn = nxgep->mac.portnum;
-
-	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "==> nxge_n2_serdes_init port<%d>",
-			portn));
-
-	tx_cfg_l.value = 0;
-	tx_cfg_h.value = 0;
-	rx_cfg_l.value = 0;
-	rx_cfg_h.value = 0;
-	pll_cfg_l.value = 0;
-	test_cfg.value = 0;
-
-	if (nxgep->mac.portmode == PORT_10G_FIBER) {
-		/* 0x0E01 */
-		tx_cfg_l.bits.entx = 1;
-		tx_cfg_l.bits.swing = CFGTX_SWING_1375MV;
-
-		/* 0x9101 */
-		rx_cfg_l.bits.enrx = 1;
-		rx_cfg_l.bits.term = CFGRX_TERM_0P8VDDT;
-		rx_cfg_l.bits.align = CFGRX_ALIGN_EN;
-		rx_cfg_l.bits.los = CFGRX_LOS_LOTHRES;
-
-		/* 0x0008 */
-		rx_cfg_h.bits.eq = CFGRX_EQ_ADAPTIVE_LP_ADAPTIVE_ZF;
-
-		/* Set loopback mode if necessary */
-		if (nxgep->statsp->port_stats.lb_mode == nxge_lb_serdes10g) {
-			tx_cfg_l.bits.entest = 1;
-			rx_cfg_l.bits.entest = 1;
-			test_cfg.bits.loopback = TESTCFG_INNER_CML_DIS_LOOPBACK;
-			if ((status = nxge_mdio_write(nxgep, portn,
-				ESR_N2_DEV_ADDR,
-				ESR_N2_TEST_CFG_REG, test_cfg.value))
-				!= NXGE_OK)
-			goto fail;
-		}
-
-		/* Use default PLL value */
-
-	} else if (nxgep->mac.portmode == PORT_1G_FIBER) {
-
-		/* 0x0E21 */
-		tx_cfg_l.bits.entx = 1;
-		tx_cfg_l.bits.rate = CFGTX_RATE_HALF;
-		tx_cfg_l.bits.swing = CFGTX_SWING_1375MV;
-
-		/* 0x9121 */
-		rx_cfg_l.bits.enrx = 1;
-		rx_cfg_l.bits.rate = CFGRX_RATE_HALF;
-		rx_cfg_l.bits.term = CFGRX_TERM_0P8VDDT;
-		rx_cfg_l.bits.align = CFGRX_ALIGN_EN;
-		rx_cfg_l.bits.los = CFGRX_LOS_LOTHRES;
-
-		/* 0x8 */
-		rx_cfg_h.bits.eq = CFGRX_EQ_ADAPTIVE_LP_ADAPTIVE_ZF;
-
-		/* MPY = 0x100 */
-		pll_cfg_l.bits.mpy = CFGPLL_MPY_8X;
-
-		/* Set PLL */
-		pll_cfg_l.bits.enpll = 1;
-		if ((status = nxge_mdio_write(nxgep, portn, ESR_N2_DEV_ADDR,
-				ESR_N2_PLL_CFG_L_REG, pll_cfg_l.value))
-				!= NXGE_OK)
-			goto fail;
-	} else {
-		goto fail;
-	}
-
-	/*   MIF_REG_WR(handle, MIF_MASK_REG, ~mask); */
-
-	NXGE_DELAY(20);
-
-	/* init TX channels */
-	for (chan = 0; chan < 4; chan++) {
-		if ((status = nxge_mdio_write(nxgep, portn, ESR_N2_DEV_ADDR,
-				ESR_N2_TX_CFG_L_REG_ADDR(chan), tx_cfg_l.value))
-				!= NXGE_OK)
-			goto fail;
-
-		if ((status = nxge_mdio_write(nxgep, portn, ESR_N2_DEV_ADDR,
-				ESR_N2_TX_CFG_H_REG_ADDR(chan), tx_cfg_h.value))
-				!= NXGE_OK)
-			goto fail;
-	}
-
-	/* init RX channels */
-	for (chan = 0; chan < 4; chan++) {
-		if ((status = nxge_mdio_write(nxgep, portn, ESR_N2_DEV_ADDR,
-				ESR_N2_RX_CFG_L_REG_ADDR(chan), rx_cfg_l.value))
-				!= NXGE_OK)
-			goto fail;
-
-		if ((status = nxge_mdio_write(nxgep, portn, ESR_N2_DEV_ADDR,
-				ESR_N2_RX_CFG_H_REG_ADDR(chan), rx_cfg_h.value))
-				!= NXGE_OK)
-			goto fail;
-	}
-
-	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "<== nxge_n2_serdes_init port<%d>",
-			portn));
-
-	return (NXGE_OK);
-fail:
-	NXGE_DEBUG_MSG((nxgep, MAC_CTL,
-	"nxge_n2_serdes_init: Failed to initialize N2 serdes for port<%d>",
-				portn));
-
-	return (status);
-}
-
-/* Initialize Neptune Internal Serdes (Neptune only) */
-
-nxge_status_t
-nxge_neptune_serdes_init(p_nxge_t nxgep)
-{
-	npi_handle_t		handle;
-	uint8_t			portn;
-	nxge_port_mode_t	portmode;
-	int			chan;
-	sr_rx_tx_ctrl_l_t	rx_tx_ctrl_l;
-	sr_rx_tx_ctrl_h_t	rx_tx_ctrl_h;
-	sr_glue_ctrl0_l_t	glue_ctrl0_l;
-	sr_glue_ctrl0_h_t	glue_ctrl0_h;
-	uint64_t		val;
-	uint16_t		val16l;
-	uint16_t		val16h;
-	nxge_status_t		status = NXGE_OK;
-
-	portn = nxgep->mac.portnum;
-
-	if ((portn != 0) && (portn != 1))
-		return (NXGE_OK);
-
-	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "==> nxge_neptune_serdes_init port<%d>",
-			portn));
-
-	handle = nxgep->npi_handle;
-	portmode = nxgep->mac.portmode;
-
-	if ((portmode == PORT_10G_FIBER) || (portmode == PORT_10G_COPPER)) {
-
-		switch (portn) {
-		case 0:
-			ESR_REG_WR(handle, ESR_0_CONTROL_REG,
-				ESR_CTL_EN_SYNCDET_0 | ESR_CTL_EN_SYNCDET_1 |
-				ESR_CTL_EN_SYNCDET_2 | ESR_CTL_EN_SYNCDET_3 |
-				(0x5 << ESR_CTL_OUT_EMPH_0_SHIFT) |
-				(0x5 << ESR_CTL_OUT_EMPH_1_SHIFT) |
-				(0x5 << ESR_CTL_OUT_EMPH_2_SHIFT) |
-				(0x5 << ESR_CTL_OUT_EMPH_3_SHIFT) |
-				(0x5 << ESR_CTL_OUT_EMPH_3_SHIFT) |
-				(0x1 << ESR_CTL_LOSADJ_0_SHIFT) |
-				(0x1 << ESR_CTL_LOSADJ_1_SHIFT) |
-				(0x1 << ESR_CTL_LOSADJ_2_SHIFT) |
-				(0x1 << ESR_CTL_LOSADJ_3_SHIFT));
-
-				/* Set Serdes0 Internal Loopback if necessary */
-				if (nxgep->statsp->port_stats.lb_mode ==
-							nxge_lb_serdes10g) {
-					ESR_REG_WR(handle,
-						ESR_0_TEST_CONFIG_REG,
-						ESR_PAD_LOOPBACK_CH3 |
-						ESR_PAD_LOOPBACK_CH2 |
-						ESR_PAD_LOOPBACK_CH1 |
-						ESR_PAD_LOOPBACK_CH0);
-				} else {
-					ESR_REG_WR(handle,
-						ESR_0_TEST_CONFIG_REG, 0);
-				}
-			break;
-		case 1:
-			ESR_REG_WR(handle, ESR_1_CONTROL_REG,
-				ESR_CTL_EN_SYNCDET_0 | ESR_CTL_EN_SYNCDET_1 |
-				ESR_CTL_EN_SYNCDET_2 | ESR_CTL_EN_SYNCDET_3 |
-				(0x5 << ESR_CTL_OUT_EMPH_0_SHIFT) |
-				(0x5 << ESR_CTL_OUT_EMPH_1_SHIFT) |
-				(0x5 << ESR_CTL_OUT_EMPH_2_SHIFT) |
-				(0x5 << ESR_CTL_OUT_EMPH_3_SHIFT) |
-				(0x5 << ESR_CTL_OUT_EMPH_3_SHIFT) |
-				(0x1 << ESR_CTL_LOSADJ_0_SHIFT) |
-				(0x1 << ESR_CTL_LOSADJ_1_SHIFT) |
-				(0x1 << ESR_CTL_LOSADJ_2_SHIFT) |
-				(0x1 << ESR_CTL_LOSADJ_3_SHIFT));
-
-				/* Set Serdes1 Internal Loopback if necessary */
-				if (nxgep->statsp->port_stats.lb_mode ==
-							nxge_lb_serdes10g) {
-					ESR_REG_WR(handle,
-						ESR_1_TEST_CONFIG_REG,
-						ESR_PAD_LOOPBACK_CH3 |
-						ESR_PAD_LOOPBACK_CH2 |
-						ESR_PAD_LOOPBACK_CH1 |
-						ESR_PAD_LOOPBACK_CH0);
-				} else {
-					ESR_REG_WR(handle,
-						ESR_1_TEST_CONFIG_REG, 0);
-				}
-			break;
-		default:
-			/* Nothing to do here */
-			goto done;
-		}
-
-		/* init TX RX channels */
-		for (chan = 0; chan < 4; chan++) {
-			if ((status = nxge_mdio_read(nxgep, portn,
-					ESR_NEPTUNE_DEV_ADDR,
-					ESR_NEP_RX_TX_CONTROL_L_ADDR(chan),
-					&rx_tx_ctrl_l.value)) != NXGE_OK)
-				goto fail;
-			if ((status = nxge_mdio_read(nxgep, portn,
-					ESR_NEPTUNE_DEV_ADDR,
-					ESR_NEP_RX_TX_CONTROL_H_ADDR(chan),
-					&rx_tx_ctrl_h.value)) != NXGE_OK)
-				goto fail;
-			if ((status = nxge_mdio_read(nxgep, portn,
-					ESR_NEPTUNE_DEV_ADDR,
-					ESR_NEP_GLUE_CONTROL0_L_ADDR(chan),
-					&glue_ctrl0_l.value)) != NXGE_OK)
-				goto fail;
-			if ((status = nxge_mdio_read(nxgep, portn,
-					ESR_NEPTUNE_DEV_ADDR,
-					ESR_NEP_GLUE_CONTROL0_H_ADDR(chan),
-					&glue_ctrl0_h.value)) != NXGE_OK)
-				goto fail;
-			rx_tx_ctrl_l.bits.enstretch = 1;
-			rx_tx_ctrl_h.bits.vmuxlo = 2;
-			rx_tx_ctrl_h.bits.vpulselo = 2;
-			glue_ctrl0_l.bits.rxlosenable = 1;
-			glue_ctrl0_l.bits.samplerate = 0xF;
-			glue_ctrl0_l.bits.thresholdcount = 0xFF;
-			glue_ctrl0_h.bits.bitlocktime = BITLOCKTIME_300_CYCLES;
-			if ((status = nxge_mdio_write(nxgep, portn,
-					ESR_NEPTUNE_DEV_ADDR,
-					ESR_NEP_RX_TX_CONTROL_L_ADDR(chan),
-					rx_tx_ctrl_l.value)) != NXGE_OK)
-				goto fail;
-			if ((status = nxge_mdio_write(nxgep, portn,
-					ESR_NEPTUNE_DEV_ADDR,
-					ESR_NEP_RX_TX_CONTROL_H_ADDR(chan),
-					rx_tx_ctrl_h.value)) != NXGE_OK)
-				goto fail;
-			if ((status = nxge_mdio_write(nxgep, portn,
-					ESR_NEPTUNE_DEV_ADDR,
-					ESR_NEP_GLUE_CONTROL0_L_ADDR(chan),
-					glue_ctrl0_l.value)) != NXGE_OK)
-				goto fail;
-			if ((status = nxge_mdio_write(nxgep, portn,
-					ESR_NEPTUNE_DEV_ADDR,
-					ESR_NEP_GLUE_CONTROL0_H_ADDR(chan),
-					glue_ctrl0_h.value)) != NXGE_OK)
-				goto fail;
-		}
-
-		/* Apply Tx core reset */
-		if ((status = nxge_mdio_write(nxgep, portn,
-					ESR_NEPTUNE_DEV_ADDR,
-					ESR_NEP_RX_TX_RESET_CONTROL_L_ADDR(),
-					(uint16_t)0)) != NXGE_OK)
-			goto fail;
-
-		if ((status = nxge_mdio_write(nxgep, portn,
-					ESR_NEPTUNE_DEV_ADDR,
-					ESR_NEP_RX_TX_RESET_CONTROL_H_ADDR(),
-					(uint16_t)0xffff)) != NXGE_OK)
-			goto fail;
-
-		NXGE_DELAY(200);
-
-		/* Apply Rx core reset */
-		if ((status = nxge_mdio_write(nxgep, portn,
-					ESR_NEPTUNE_DEV_ADDR,
-					ESR_NEP_RX_TX_RESET_CONTROL_L_ADDR(),
-					(uint16_t)0xffff)) != NXGE_OK)
-			goto fail;
-
-		NXGE_DELAY(200);
-		if ((status = nxge_mdio_write(nxgep, portn,
-					ESR_NEPTUNE_DEV_ADDR,
-					ESR_NEP_RX_TX_RESET_CONTROL_H_ADDR(),
-					(uint16_t)0)) != NXGE_OK)
-			goto fail;
-
-		NXGE_DELAY(200);
-		if ((status = nxge_mdio_read(nxgep, portn,
-					ESR_NEPTUNE_DEV_ADDR,
-					ESR_NEP_RX_TX_RESET_CONTROL_L_ADDR(),
-					&val16l)) != NXGE_OK)
-			goto fail;
-		if ((status = nxge_mdio_read(nxgep, portn,
-					ESR_NEPTUNE_DEV_ADDR,
-					ESR_NEP_RX_TX_RESET_CONTROL_H_ADDR(),
-					&val16h)) != NXGE_OK)
-			goto fail;
-		if ((val16l != 0) || (val16h != 0)) {
-			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-					"Failed to reset port<%d> XAUI Serdes",
-					portn));
-		}
-
-		ESR_REG_RD(handle, ESR_INTERNAL_SIGNALS_REG, &val);
-
-		if (portn == 0) {
-			if ((val & ESR_SIG_P0_BITS_MASK) !=
-				(ESR_SIG_SERDES_RDY0_P0 | ESR_SIG_DETECT0_P0 |
-					ESR_SIG_XSERDES_RDY_P0 |
-					ESR_SIG_XDETECT_P0_CH3 |
-					ESR_SIG_XDETECT_P0_CH2 |
-					ESR_SIG_XDETECT_P0_CH1 |
-					ESR_SIG_XDETECT_P0_CH0)) {
-				goto fail;
-			}
-		} else if (portn == 1) {
-			if ((val & ESR_SIG_P1_BITS_MASK) !=
-				(ESR_SIG_SERDES_RDY0_P1 | ESR_SIG_DETECT0_P1 |
-					ESR_SIG_XSERDES_RDY_P1 |
-					ESR_SIG_XDETECT_P1_CH3 |
-					ESR_SIG_XDETECT_P1_CH2 |
-					ESR_SIG_XDETECT_P1_CH1 |
-					ESR_SIG_XDETECT_P1_CH0)) {
-				goto fail;
-			}
-		}
-
-	} else if (portmode == PORT_1G_FIBER) {
-		ESR_REG_RD(handle, ESR_1_PLL_CONFIG_REG, &val)
-		val &= ~ESR_PLL_CFG_FBDIV_2;
-		switch (portn) {
-		case 0:
-			val |= ESR_PLL_CFG_HALF_RATE_0;
-			break;
-		case 1:
-			val |= ESR_PLL_CFG_HALF_RATE_1;
-			break;
-		case 2:
-			val |= ESR_PLL_CFG_HALF_RATE_2;
-			break;
-		case 3:
-			val |= ESR_PLL_CFG_HALF_RATE_3;
-			break;
-		default:
-			goto fail;
-		}
-
-		ESR_REG_WR(handle, ESR_1_PLL_CONFIG_REG, val);
-	}
-
-done:
-	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "<== nxge_neptune_serdes_init port<%d>",
-			portn));
-	return (NXGE_OK);
-fail:
-	NXGE_DEBUG_MSG((nxgep, TX_CTL,
-			"nxge_neptune_serdes_init: "
-			"Failed to initialize Neptune serdes for port<%d>",
-			portn));
-
-	return (status);
-}
-
-/* Look for transceiver type */
-
-nxge_status_t
-nxge_xcvr_find(p_nxge_t nxgep)
-{
-	uint8_t		portn;
-
-	portn = nxgep->mac.portnum;
-	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "==> nxge_xcvr_find: port<%d>", portn));
-
-	if (nxge_get_xcvr_type(nxgep) != NXGE_OK)
-		return (NXGE_ERROR);
-
-	nxgep->mac.linkchkmode = LINKCHK_TIMER;
-	if (nxgep->mac.portmode == PORT_10G_FIBER) {
-		nxgep->statsp->mac_stats.xcvr_inuse = PCS_XCVR;
-		if ((nxgep->niu_type == NEPTUNE) ||
-			(nxgep->niu_type == NEPTUNE_2)) {
-			nxgep->statsp->mac_stats.xcvr_portn =
-					BCM8704_NEPTUNE_PORT_ADDR_BASE + portn;
-		} else if (nxgep->niu_type == N2_NIU) {
-			nxgep->statsp->mac_stats.xcvr_portn =
-					BCM8704_N2_PORT_ADDR_BASE + portn;
-		} else
-			return (NXGE_ERROR);
-	} else if (nxgep->mac.portmode == PORT_1G_COPPER) {
-		nxgep->statsp->mac_stats.xcvr_inuse = INT_MII_XCVR;
-		/*
-		 * For Altas, Xcvr port numbers are swapped with ethernet
-		 * port number. This is designed for better signal
-		 * integrity in routing.
-		 */
-
-		switch (portn) {
-		case 0:
-			nxgep->statsp->mac_stats.xcvr_portn =
-					BCM5464_NEPTUNE_PORT_ADDR_BASE + 3;
-			break;
-		case 1:
-			nxgep->statsp->mac_stats.xcvr_portn =
-					BCM5464_NEPTUNE_PORT_ADDR_BASE + 2;
-			break;
-		case 2:
-			nxgep->statsp->mac_stats.xcvr_portn =
-					BCM5464_NEPTUNE_PORT_ADDR_BASE + 1;
-			break;
-		case 3:
-			nxgep->statsp->mac_stats.xcvr_portn =
-					BCM5464_NEPTUNE_PORT_ADDR_BASE;
-			break;
-		default:
-			return (NXGE_ERROR);
-		}
-	} else if (nxgep->mac.portmode == PORT_1G_FIBER) {
-		nxgep->statsp->mac_stats.xcvr_inuse = PCS_XCVR;
-		nxgep->statsp->mac_stats.xcvr_portn = portn;
-	} else {
-		return (NXGE_ERROR);
-	}
-
-	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "<== nxge_xcvr_find: xcvr_inuse = %d",
-					nxgep->statsp->mac_stats.xcvr_inuse));
-	return (NXGE_OK);
-}
-
-/* Initialize transceiver */
-
-nxge_status_t
-nxge_xcvr_init(p_nxge_t nxgep)
-{
-	p_nxge_param_t		param_arr;
-	p_nxge_stats_t		statsp;
-	uint16_t		val;
-#ifdef	NXGE_DEBUG
-	uint8_t			portn;
-	uint16_t		val1;
-#endif
-	uint8_t			phy_port_addr;
-	pmd_tx_control_t	tx_ctl;
-	control_t		ctl;
-	phyxs_control_t		phyxs_ctl;
-	pcs_control_t		pcs_ctl;
-	uint32_t		delay = 0;
-	optics_dcntr_t		op_ctr;
-	nxge_status_t		status = NXGE_OK;
-#ifdef	NXGE_DEBUG
-	portn = nxgep->mac.portnum;
-#endif
-	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "==> nxge_xcvr_init: port<%d>", portn));
-
-	param_arr = nxgep->param_arr;
-	statsp = nxgep->statsp;
-
-	/*
-	 * Initialize the xcvr statistics.
-	 */
-	statsp->mac_stats.cap_autoneg = 0;
-	statsp->mac_stats.cap_100T4 = 0;
-	statsp->mac_stats.cap_100fdx = 0;
-	statsp->mac_stats.cap_100hdx = 0;
-	statsp->mac_stats.cap_10fdx = 0;
-	statsp->mac_stats.cap_10hdx = 0;
-	statsp->mac_stats.cap_asmpause = 0;
-	statsp->mac_stats.cap_pause = 0;
-	statsp->mac_stats.cap_1000fdx = 0;
-	statsp->mac_stats.cap_1000hdx = 0;
-	statsp->mac_stats.cap_10gfdx = 0;
-	statsp->mac_stats.cap_10ghdx = 0;
-
-	/*
-	 * Initialize the link statistics.
-	 */
-	statsp->mac_stats.link_T4 = 0;
-	statsp->mac_stats.link_asmpause = 0;
-	statsp->mac_stats.link_pause = 0;
-
-	phy_port_addr = nxgep->statsp->mac_stats.xcvr_portn;
-
-	switch (nxgep->mac.portmode) {
-	case PORT_10G_FIBER:
-		/* Disable Link LEDs */
-		if (nxge_10g_link_led_off(nxgep) != NXGE_OK)
-			goto fail;
-
-		/* Set Clause 45 */
-		npi_mac_mif_set_indirect_mode(nxgep->npi_handle, B_TRUE);
-
-		/* Reset the transceiver */
-		if ((status = nxge_mdio_read(nxgep,
-				phy_port_addr,
-				BCM8704_PHYXS_ADDR,
-				BCM8704_PHYXS_CONTROL_REG,
-				&phyxs_ctl.value)) != NXGE_OK)
-			goto fail;
-
-		phyxs_ctl.bits.reset = 1;
-		if ((status = nxge_mdio_write(nxgep,
-				phy_port_addr,
-				BCM8704_PHYXS_ADDR,
-				BCM8704_PHYXS_CONTROL_REG,
-				phyxs_ctl.value)) != NXGE_OK)
-			goto fail;
-
-		do {
-			drv_usecwait(500);
-			if ((status = nxge_mdio_read(nxgep,
-					phy_port_addr,
-					BCM8704_PHYXS_ADDR,
-					BCM8704_PHYXS_CONTROL_REG,
-					&phyxs_ctl.value)) != NXGE_OK)
-				goto fail;
-			delay++;
-		} while ((phyxs_ctl.bits.reset) && (delay < 100));
-		if (delay == 100) {
-			NXGE_DEBUG_MSG((nxgep, MAC_CTL,
-				"nxge_xcvr_init: "
-				"failed to reset Transceiver on port<%d>",
-				portn));
-			status = NXGE_ERROR;
-			goto fail;
-		}
-
-		/* Set to 0x7FBF */
-		ctl.value = 0;
-		ctl.bits.res1 = 0x3F;
-		ctl.bits.optxon_lvl = 1;
-		ctl.bits.oprxflt_lvl = 1;
-		ctl.bits.optrxlos_lvl = 1;
-		ctl.bits.optxflt_lvl = 1;
-		ctl.bits.opprflt_lvl = 1;
-		ctl.bits.obtmpflt_lvl = 1;
-		ctl.bits.opbiasflt_lvl = 1;
-		ctl.bits.optxrst_lvl = 1;
-		if ((status = nxge_mdio_write(nxgep,
-				phy_port_addr,
-				BCM8704_USER_DEV3_ADDR,
-				BCM8704_USER_CONTROL_REG, ctl.value))
-				!= NXGE_OK)
-			goto fail;
-
-		/* Set to 0x164 */
-		tx_ctl.value = 0;
-		tx_ctl.bits.tsck_lpwren = 1;
-		tx_ctl.bits.tx_dac_txck = 0x2;
-		tx_ctl.bits.tx_dac_txd = 0x1;
-		tx_ctl.bits.xfp_clken = 1;
-		if ((status = nxge_mdio_write(nxgep,
-				phy_port_addr,
-				BCM8704_USER_DEV3_ADDR,
-				BCM8704_USER_PMD_TX_CONTROL_REG, tx_ctl.value))
-				!= NXGE_OK)
-			goto fail;
-		/*
-		 * According to Broadcom's instruction, SW needs to read
-		 * back these registers twice after written.
-		 */
-		if ((status = nxge_mdio_read(nxgep,
-				phy_port_addr,
-				BCM8704_USER_DEV3_ADDR,
-				BCM8704_USER_CONTROL_REG, &val))
-				!= NXGE_OK)
-			goto fail;
-
-		if ((status = nxge_mdio_read(nxgep,
-				phy_port_addr,
-				BCM8704_USER_DEV3_ADDR,
-				BCM8704_USER_CONTROL_REG, &val))
-				!= NXGE_OK)
-			goto fail;
-
-		if ((status = nxge_mdio_read(nxgep,
-				phy_port_addr,
-				BCM8704_USER_DEV3_ADDR,
-				BCM8704_USER_PMD_TX_CONTROL_REG, &val))
-				!= NXGE_OK)
-			goto fail;
-
-		if ((status = nxge_mdio_read(nxgep,
-				phy_port_addr,
-				BCM8704_USER_DEV3_ADDR,
-				BCM8704_USER_PMD_TX_CONTROL_REG, &val))
-				!= NXGE_OK)
-			goto fail;
-
-
-		/* Enable Tx and Rx LEDs to be driven by traffic */
-		if ((status = nxge_mdio_read(nxgep,
-					phy_port_addr,
-					BCM8704_USER_DEV3_ADDR,
-					BCM8704_USER_OPTICS_DIGITAL_CTRL_REG,
-					&op_ctr.value)) != NXGE_OK)
-			goto fail;
-		op_ctr.bits.gpio_sel = 0x3;
-		if ((status = nxge_mdio_write(nxgep,
-					phy_port_addr,
-					BCM8704_USER_DEV3_ADDR,
-					BCM8704_USER_OPTICS_DIGITAL_CTRL_REG,
-					op_ctr.value)) != NXGE_OK)
-			goto fail;
-
-		NXGE_DELAY(1000000);
-
-		/* Set BCM8704 Internal Loopback mode if necessary */
-		if ((status = nxge_mdio_read(nxgep,
-					phy_port_addr,
-					BCM8704_PCS_DEV_ADDR,
-					BCM8704_PCS_CONTROL_REG,
-					&pcs_ctl.value)) != NXGE_OK)
-			goto fail;
-		if (nxgep->statsp->port_stats.lb_mode == nxge_lb_phy10g)
-			pcs_ctl.bits.loopback = 1;
-		else
-			pcs_ctl.bits.loopback = 0;
-		if ((status = nxge_mdio_write(nxgep,
-					phy_port_addr,
-					BCM8704_PCS_DEV_ADDR,
-					BCM8704_PCS_CONTROL_REG,
-					pcs_ctl.value)) != NXGE_OK)
-			goto fail;
-
-		status = nxge_mdio_read(nxgep, phy_port_addr,
-				0x1, 0xA, &val);
-		if (status != NXGE_OK)
-			goto fail;
-		NXGE_DEBUG_MSG((nxgep, MAC_CTL,
-				"BCM8704 port<%d> Dev 1 Reg 0xA = 0x%x\n",
-				portn, val));
-		status = nxge_mdio_read(nxgep, phy_port_addr, 0x3, 0x20, &val);
-		if (status != NXGE_OK)
-			goto fail;
-		NXGE_DEBUG_MSG((nxgep, MAC_CTL,
-				"BCM8704 port<%d> Dev 3 Reg 0x20 = 0x%x\n",
-				portn, val));
-		status = nxge_mdio_read(nxgep, phy_port_addr, 0x4, 0x18, &val);
-		if (status != NXGE_OK)
-			goto fail;
-		NXGE_DEBUG_MSG((nxgep, MAC_CTL,
-				"BCM8704 port<%d> Dev 4 Reg 0x18 = 0x%x\n",
-				portn, val));
-
-#ifdef	NXGE_DEBUG
-		/* Diagnose link issue if link is not up */
-		status = nxge_mdio_read(nxgep, phy_port_addr,
-					BCM8704_USER_DEV3_ADDR,
-					BCM8704_USER_ANALOG_STATUS0_REG,
-					&val);
-		if (status != NXGE_OK)
-			goto fail;
-
-		status = nxge_mdio_read(nxgep, phy_port_addr,
-					BCM8704_USER_DEV3_ADDR,
-					BCM8704_USER_ANALOG_STATUS0_REG,
-					&val);
-		if (status != NXGE_OK)
-			goto fail;
-
-		status = nxge_mdio_read(nxgep, phy_port_addr,
-					BCM8704_USER_DEV3_ADDR,
-					BCM8704_USER_TX_ALARM_STATUS_REG,
-					&val1);
-		if (status != NXGE_OK)
-			goto fail;
-
-		status = nxge_mdio_read(nxgep, phy_port_addr,
-					BCM8704_USER_DEV3_ADDR,
-					BCM8704_USER_TX_ALARM_STATUS_REG,
-					&val1);
-		if (status != NXGE_OK)
-			goto fail;
-
-		if (val != 0x3FC) {
-			if ((val == 0x43BC) && (val1 != 0)) {
-				NXGE_DEBUG_MSG((nxgep, MAC_CTL,
-					"Cable not connected to peer or bad"
-					" cable on port<%d>\n", portn));
-			} else if (val == 0x639C) {
-				NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-					"Optical module (XFP) is bad or absence"
-					" on port<%d>\n", portn));
-			}
-		}
-#endif
-
-		statsp->mac_stats.cap_10gfdx = 1;
-		statsp->mac_stats.lp_cap_10gfdx = 1;
-		break;
-	case PORT_10G_COPPER:
-		break;
-	case PORT_1G_FIBER:
-	case PORT_1G_COPPER:
-		/* Set Clause 22 */
-		npi_mac_mif_set_indirect_mode(nxgep->npi_handle, B_FALSE);
-
-		/* Set capability flags */
-		statsp->mac_stats.cap_1000fdx =
-					param_arr[param_anar_1000fdx].value;
-		statsp->mac_stats.cap_100fdx =
-					param_arr[param_anar_100fdx].value;
-		statsp->mac_stats.cap_10fdx = param_arr[param_anar_10fdx].value;
-
-		if ((status = nxge_mii_xcvr_init(nxgep)) != NXGE_OK)
-			goto fail;
-		break;
-	default:
-		goto fail;
-	}
-
-	statsp->mac_stats.xcvr_inits++;
-
-	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "==> nxge_xcvr_init: port<%d>", portn));
-	return (NXGE_OK);
-
-fail:
-	NXGE_DEBUG_MSG((nxgep, MAC_CTL,
-		"nxge_xcvr_init: failed to initialize transceiver for port<%d>",
-		portn));
-	return (status);
-}
-
-
-/* Initialize the TxMAC sub-block */
-
-nxge_status_t
-nxge_tx_mac_init(p_nxge_t nxgep)
-{
-	npi_attr_t		ap;
-	uint8_t			portn;
-	nxge_port_mode_t	portmode;
-	nxge_port_t		portt;
-	npi_handle_t		handle;
-	npi_status_t		rs = NPI_SUCCESS;
-
-	portn = NXGE_GET_PORT_NUM(nxgep->function_num);
-	portt    = nxgep->mac.porttype;
-	handle   = nxgep->npi_handle;
-	portmode = nxgep->mac.portmode;
-
-	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "==> nxge_tx_mac_init: port<%d>",
-			portn));
-
-	/* Set Max and Min Frame Size */
-	if (nxgep->param_arr[param_accept_jumbo].value || nxge_jumbo_enable) {
-		SET_MAC_ATTR2(handle, ap, portn,
-		    MAC_PORT_FRAME_SIZE, 64, 0x2400, rs);
-	} else {
-		SET_MAC_ATTR2(handle, ap, portn,
-		    MAC_PORT_FRAME_SIZE, 64, 0x5EE + 4, rs);
-	}
-
-	if (rs != NPI_SUCCESS)
-		goto fail;
-	if (nxgep->param_arr[param_accept_jumbo].value ||
-		nxgep->mac.is_jumbo == B_TRUE)
-		nxgep->mac.maxframesize = 0x2400;
-	else
-		nxgep->mac.maxframesize = 0x5EE + 4;
-	nxgep->mac.minframesize = 64;
-
-	if (portt == PORT_TYPE_XMAC) {
-		if ((rs = npi_xmac_tx_iconfig(handle, INIT, portn,
-				0)) != NPI_SUCCESS)
-			goto fail;
-		nxgep->mac.tx_iconfig = NXGE_XMAC_TX_INTRS;
-		if ((portmode == PORT_10G_FIBER) ||
-					(portmode == PORT_10G_COPPER)) {
-			SET_MAC_ATTR1(handle, ap, portn, XMAC_10G_PORT_IPG,
-					XGMII_IPG_12_15, rs);
-			if (rs != NPI_SUCCESS)
-				goto fail;
-			nxgep->mac.ipg[0] = XGMII_IPG_12_15;
-		} else {
-			SET_MAC_ATTR1(handle, ap, portn, XMAC_PORT_IPG,
-					MII_GMII_IPG_12, rs);
-			if (rs != NPI_SUCCESS)
-				goto fail;
-			nxgep->mac.ipg[0] = MII_GMII_IPG_12;
-		}
-		if ((rs = npi_xmac_tx_config(handle, INIT, portn,
-				CFG_XMAC_TX_CRC | CFG_XMAC_TX)) != NPI_SUCCESS)
-			goto fail;
-		nxgep->mac.tx_config = CFG_XMAC_TX_CRC | CFG_XMAC_TX;
-		nxgep->mac.maxburstsize = 0;	/* not programmable */
-		nxgep->mac.ctrltype = 0;	/* not programmable */
-		nxgep->mac.pa_size = 0;		/* not programmable */
-
-		if ((rs = npi_xmac_zap_tx_counters(handle, portn))
-							!= NPI_SUCCESS)
-			goto fail;
-
-	} else {
-		if ((rs = npi_bmac_tx_iconfig(handle, INIT, portn,
-				0)) != NPI_SUCCESS)
-			goto fail;
-		nxgep->mac.tx_iconfig = NXGE_BMAC_TX_INTRS;
-
-		SET_MAC_ATTR1(handle, ap, portn, BMAC_PORT_CTRL_TYPE, 0x8808,
-				rs);
-		if (rs != NPI_SUCCESS)
-			goto fail;
-		nxgep->mac.ctrltype = 0x8808;
-
-		SET_MAC_ATTR1(handle, ap, portn, BMAC_PORT_PA_SIZE, 0x7, rs);
-		if (rs != NPI_SUCCESS)
-			goto fail;
-		nxgep->mac.pa_size = 0x7;
-
-		if ((rs = npi_bmac_tx_config(handle, INIT, portn,
-				CFG_BMAC_TX_CRC | CFG_BMAC_TX)) != NPI_SUCCESS)
-			goto fail;
-		nxgep->mac.tx_config = CFG_BMAC_TX_CRC | CFG_BMAC_TX;
-	}
-
-	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "<== nxge_tx_mac_init: port<%d>",
-			portn));
-
-	return (NXGE_OK);
-fail:
-	NXGE_DEBUG_MSG((nxgep, MAC_CTL,
-		"nxge_tx_mac_init: failed to initialize port<%d> TXMAC",
-					portn));
-
-	return (NXGE_ERROR | rs);
-}
-
-/* Initialize the RxMAC sub-block */
-
-nxge_status_t
-nxge_rx_mac_init(p_nxge_t nxgep)
-{
-	npi_attr_t		ap;
-	uint32_t		i;
-	uint16_t		hashtab_e;
-	p_hash_filter_t		hash_filter;
-	nxge_port_t		portt;
-	uint8_t			portn;
-	npi_handle_t		handle;
-	npi_status_t		rs = NPI_SUCCESS;
-	uint16_t 		*addr16p;
-	uint16_t 		addr0, addr1, addr2;
-	xmac_rx_config_t	xconfig;
-	bmac_rx_config_t	bconfig;
-
-	portn = NXGE_GET_PORT_NUM(nxgep->function_num);
-
-	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "==> nxge_rx_mac_init: port<%d>\n",
-			portn));
-	handle = nxgep->npi_handle;
-	portt = nxgep->mac.porttype;
-
-	addr16p = (uint16_t *)nxgep->ouraddr.ether_addr_octet;
-	addr0 = ntohs(addr16p[2]);
-	addr1 = ntohs(addr16p[1]);
-	addr2 = ntohs(addr16p[0]);
-	SET_MAC_ATTR3(handle, ap, portn, MAC_PORT_ADDR, addr0, addr1, addr2,
-		rs);
-
-	if (rs != NPI_SUCCESS)
-		goto fail;
-	SET_MAC_ATTR3(handle, ap, portn, MAC_PORT_ADDR_FILTER, 0, 0, 0, rs);
-	if (rs != NPI_SUCCESS)
-		goto fail;
-	SET_MAC_ATTR2(handle, ap, portn, MAC_PORT_ADDR_FILTER_MASK, 0, 0, rs);
-	if (rs != NPI_SUCCESS)
-		goto fail;
-
-	/*
-	 * Load the multicast hash filter bits.
-	 */
-	hash_filter = nxgep->hash_filter;
-	for (i = 0; i < MAC_MAX_HASH_ENTRY; i++) {
-		if (hash_filter != NULL) {
-			hashtab_e = (uint16_t)hash_filter->hash_filter_regs[
-				(NMCFILTER_REGS - 1) - i];
-		} else {
-			hashtab_e = 0;
-		}
-
-		if ((rs = npi_mac_hashtab_entry(handle, OP_SET, portn, i,
-					(uint16_t *)&hashtab_e)) != NPI_SUCCESS)
-			goto fail;
-	}
-
-	if (portt == PORT_TYPE_XMAC) {
-		if ((rs = npi_xmac_rx_iconfig(handle, INIT, portn,
-				0)) != NPI_SUCCESS)
-			goto fail;
-		nxgep->mac.rx_iconfig = NXGE_XMAC_RX_INTRS;
-
-		(void) nxge_fflp_init_hostinfo(nxgep);
-
-		xconfig = CFG_XMAC_RX_ERRCHK | CFG_XMAC_RX_CRC_CHK |
-			CFG_XMAC_RX | CFG_XMAC_RX_CODE_VIO_CHK &
-			~CFG_XMAC_RX_STRIP_CRC;
-
-		if (nxgep->filter.all_phys_cnt != 0)
-			xconfig |= CFG_XMAC_RX_PROMISCUOUS;
-
-		if (nxgep->filter.all_multicast_cnt != 0)
-			xconfig |= CFG_XMAC_RX_PROMISCUOUSGROUP;
-
-		xconfig |= CFG_XMAC_RX_HASH_FILTER;
-
-		if ((rs = npi_xmac_rx_config(handle, INIT, portn,
-					xconfig)) != NPI_SUCCESS)
-			goto fail;
-		nxgep->mac.rx_config = xconfig;
-
-		/* Comparison of mac unique address is always enabled on XMAC */
-
-		if ((rs = npi_xmac_zap_rx_counters(handle, portn))
-							!= NPI_SUCCESS)
-			goto fail;
-	} else {
-		(void) nxge_fflp_init_hostinfo(nxgep);
-
-		if (npi_bmac_rx_iconfig(nxgep->npi_handle, INIT, portn,
-					0) != NPI_SUCCESS)
-			goto fail;
-		nxgep->mac.rx_iconfig = NXGE_BMAC_RX_INTRS;
-
-		bconfig = CFG_BMAC_RX_DISCARD_ON_ERR | CFG_BMAC_RX &
-			~CFG_BMAC_RX_STRIP_CRC;
-
-		if (nxgep->filter.all_phys_cnt != 0)
-			bconfig |= CFG_BMAC_RX_PROMISCUOUS;
-
-		if (nxgep->filter.all_multicast_cnt != 0)
-			bconfig |= CFG_BMAC_RX_PROMISCUOUSGROUP;
-
-		bconfig |= CFG_BMAC_RX_HASH_FILTER;
-		if ((rs = npi_bmac_rx_config(handle, INIT, portn,
-					bconfig)) != NPI_SUCCESS)
-			goto fail;
-		nxgep->mac.rx_config = bconfig;
-
-		/* Always enable comparison of mac unique address */
-		if ((rs = npi_mac_altaddr_enable(handle, portn, 0))
-					!= NPI_SUCCESS)
-			goto fail;
-	}
-
-	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "<== nxge_rx_mac_init: port<%d>\n",
-			portn));
-
-	return (NXGE_OK);
-
-fail:
-	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-		"nxge_rx_mac_init: Failed to Initialize port<%d> RxMAC",
-				portn));
-
-	return (NXGE_ERROR | rs);
-}
-
-/* Enable TXMAC */
-
-nxge_status_t
-nxge_tx_mac_enable(p_nxge_t nxgep)
-{
-	npi_handle_t	handle;
-	npi_status_t	rs = NPI_SUCCESS;
-	nxge_status_t	status = NXGE_OK;
-
-	handle = nxgep->npi_handle;
-
-	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "==> nxge_tx_mac_enable: port<%d>",
-			nxgep->mac.portnum));
-
-	if ((status = nxge_tx_mac_init(nxgep)) != NXGE_OK)
-		goto fail;
-
-	/* based on speed */
-	nxgep->msg_min = ETHERMIN;
-
-	if (nxgep->mac.porttype == PORT_TYPE_XMAC) {
-		if ((rs = npi_xmac_tx_config(handle, ENABLE, nxgep->mac.portnum,
-						CFG_XMAC_TX)) != NPI_SUCCESS)
-			goto fail;
-	} else {
-		if ((rs = npi_bmac_tx_config(handle, ENABLE, nxgep->mac.portnum,
-						CFG_BMAC_TX)) != NPI_SUCCESS)
-			goto fail;
-	}
-
-	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "<== nxge_tx_mac_enable: port<%d>",
-			nxgep->mac.portnum));
-
-	return (NXGE_OK);
-fail:
-	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"nxgep_tx_mac_enable: Failed to enable port<%d> TxMAC",
-			nxgep->mac.portnum));
-	if (rs != NPI_SUCCESS)
-		return (NXGE_ERROR | rs);
-	else
-		return (status);
-}
-
-/* Disable TXMAC */
-
-nxge_status_t
-nxge_tx_mac_disable(p_nxge_t nxgep)
-{
-	npi_handle_t	handle;
-	npi_status_t	rs = NPI_SUCCESS;
-
-	handle = nxgep->npi_handle;
-
-	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "==> nxge_tx_mac_disable: port<%d>",
-			nxgep->mac.portnum));
-
-	if (nxgep->mac.porttype == PORT_TYPE_XMAC) {
-		if ((rs = npi_xmac_tx_config(handle, DISABLE,
-			nxgep->mac.portnum, CFG_XMAC_TX)) != NPI_SUCCESS)
-			goto fail;
-	} else {
-		if ((rs = npi_bmac_tx_config(handle, DISABLE,
-			nxgep->mac.portnum, CFG_BMAC_TX)) != NPI_SUCCESS)
-			goto fail;
-	}
-
-	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "<== nxge_tx_mac_disable: port<%d>",
-			nxgep->mac.portnum));
-	return (NXGE_OK);
-fail:
-	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"nxge_tx_mac_disable: Failed to disable port<%d> TxMAC",
-			nxgep->mac.portnum));
-	return (NXGE_ERROR | rs);
-}
-
-/* Enable RXMAC */
-
-nxge_status_t
-nxge_rx_mac_enable(p_nxge_t nxgep)
-{
-	npi_handle_t	handle;
-	uint8_t 	portn;
-	npi_status_t	rs = NPI_SUCCESS;
-	nxge_status_t	status = NXGE_OK;
-
-	handle = nxgep->npi_handle;
-	portn = nxgep->mac.portnum;
-
-	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "==> nxge_rx_mac_enable: port<%d>",
-			portn));
-
-	if ((status = nxge_rx_mac_init(nxgep)) != NXGE_OK)
-		goto fail;
-
-	if (nxgep->mac.porttype == PORT_TYPE_XMAC) {
-		if ((rs = npi_xmac_rx_config(handle, ENABLE, portn,
-						CFG_XMAC_RX)) != NPI_SUCCESS)
-			goto fail;
-	} else {
-		if ((rs = npi_bmac_rx_config(handle, ENABLE, portn,
-						CFG_BMAC_RX)) != NPI_SUCCESS)
-			goto fail;
-	}
-
-	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "<== nxge_rx_mac_enable: port<%d>",
-			portn));
-
-	return (NXGE_OK);
-fail:
-	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"nxgep_rx_mac_enable: Failed to enable port<%d> RxMAC",
-			portn));
-
-	if (rs != NPI_SUCCESS)
-		return (NXGE_ERROR | rs);
-	else
-		return (status);
-}
-
-/* Disable RXMAC */
-
-nxge_status_t
-nxge_rx_mac_disable(p_nxge_t nxgep)
-{
-	npi_handle_t	handle;
-	uint8_t		portn;
-	npi_status_t	rs = NPI_SUCCESS;
-
-	handle = nxgep->npi_handle;
-	portn = nxgep->mac.portnum;
-
-	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "==> nxge_rx_mac_disable: port<%d>",
-			portn));
-
-	if (nxgep->mac.porttype == PORT_TYPE_XMAC) {
-		if ((rs = npi_xmac_rx_config(handle, DISABLE, portn,
-						CFG_XMAC_RX)) != NPI_SUCCESS)
-			goto fail;
-	} else {
-		if ((rs = npi_bmac_rx_config(handle, DISABLE, portn,
-						CFG_BMAC_RX)) != NPI_SUCCESS)
-			goto fail;
-	}
-
-	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "<== nxge_rx_mac_disable: port<%d>",
-			portn));
-	return (NXGE_OK);
-fail:
-	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"nxgep_rx_mac_disable: ",
-			"Failed to disable port<%d> RxMAC",
-			portn));
-
-	return (NXGE_ERROR | rs);
-}
-
-/* Reset TXMAC */
-
-nxge_status_t
-nxge_tx_mac_reset(p_nxge_t nxgep)
-{
-	npi_handle_t	handle;
-	uint8_t		portn;
-	npi_status_t	rs = NPI_SUCCESS;
-
-	handle = nxgep->npi_handle;
-	portn = nxgep->mac.portnum;
-
-	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "==> nxge_tx_mac_reset: port<%d>",
-			portn));
-
-	if (nxgep->mac.porttype == PORT_TYPE_XMAC) {
-		if ((rs = npi_xmac_reset(handle, portn, XTX_MAC_RESET_ALL))
-		    != NPI_SUCCESS)
-			goto fail;
-	} else {
-		if ((rs = npi_bmac_reset(handle, portn, TX_MAC_RESET))
-					!= NPI_SUCCESS)
-			goto fail;
-	}
-
-	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "<== nxge_tx_mac_reset: port<%d>",
-			portn));
-
-	return (NXGE_OK);
-fail:
-	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"nxge_tx_mac_reset: Failed to Reset TxMAC port<%d>",
-			portn));
-
-	return (NXGE_ERROR | rs);
-}
-
-/* Reset RXMAC */
-
-nxge_status_t
-nxge_rx_mac_reset(p_nxge_t nxgep)
-{
-	npi_handle_t	handle;
-	uint8_t		portn;
-	npi_status_t	rs = NPI_SUCCESS;
-
-	handle = nxgep->npi_handle;
-	portn = nxgep->mac.portnum;
-
-	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "==> nxge_rx_mac_reset: port<%d>",
-			portn));
-
-	if (nxgep->mac.porttype == PORT_TYPE_XMAC) {
-		if ((rs = npi_xmac_reset(handle, portn, XRX_MAC_RESET_ALL))
-		    != NPI_SUCCESS)
-		goto fail;
-	} else {
-		if ((rs = npi_bmac_reset(handle, portn, RX_MAC_RESET))
-					!= NPI_SUCCESS)
-		goto fail;
-	}
-
-	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "<== nxge_rx_mac_reset: port<%d>",
-			portn));
-
-	return (NXGE_OK);
-fail:
-	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"nxge_rx_mac_reset: Failed to Reset RxMAC port<%d>",
-			portn));
-	return (NXGE_ERROR | rs);
-}
-
-
-/* Enable/Disable MII Link Status change interrupt */
-
-nxge_status_t
-nxge_link_intr(p_nxge_t nxgep, link_intr_enable_t enable)
-{
-	uint8_t			portn;
-	nxge_port_mode_t	portmode;
-	npi_status_t		rs = NPI_SUCCESS;
-
-	portn = nxgep->mac.portnum;
-	portmode = nxgep->mac.portmode;
-
-	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "==> nxge_link_intr: port<%d>", portn));
-
-	if (enable == LINK_INTR_START) {
-		if (portmode == PORT_10G_FIBER) {
-			if ((rs = npi_xmac_xpcs_link_intr_enable(
-						nxgep->npi_handle,
-						portn)) != NPI_SUCCESS)
-				goto fail;
-		} else if (portmode == PORT_1G_FIBER) {
-			if ((rs = npi_mac_pcs_link_intr_enable(
-						nxgep->npi_handle,
-						portn)) != NPI_SUCCESS)
-				goto fail;
-		} else if (portmode == PORT_1G_COPPER) {
-			if ((rs = npi_mac_mif_link_intr_enable(
-				nxgep->npi_handle,
-				portn, MII_BMSR, BMSR_LSTATUS)) != NPI_SUCCESS)
-				goto fail;
-		} else
-			goto fail;
-	} else if (enable == LINK_INTR_STOP) {
-		if (portmode == PORT_10G_FIBER) {
-			if ((rs = npi_xmac_xpcs_link_intr_disable(
-						nxgep->npi_handle,
-						portn)) != NPI_SUCCESS)
-				goto fail;
-		} else  if (portmode == PORT_1G_FIBER) {
-			if ((rs = npi_mac_pcs_link_intr_disable(
-						nxgep->npi_handle,
-						portn)) != NPI_SUCCESS)
-				goto fail;
-		} else if (portmode == PORT_1G_COPPER) {
-			if ((rs = npi_mac_mif_link_intr_disable(
-						nxgep->npi_handle,
-						portn)) != NPI_SUCCESS)
-				goto fail;
-		} else
-			goto fail;
-	}
-
-	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "<== nxge_link_intr: port<%d>", portn));
-
-	return (NXGE_OK);
-fail:
-	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"nxge_link_intr: Failed to set port<%d> mif intr mode",
-			portn));
-
-	return (NXGE_ERROR | rs);
-}
-
-/* Initialize 1G Fiber / Copper transceiver using Clause 22 */
-
-nxge_status_t
-nxge_mii_xcvr_init(p_nxge_t nxgep)
-{
-	p_nxge_param_t	param_arr;
-	p_nxge_stats_t	statsp;
-	uint8_t		xcvr_portn;
-	p_mii_regs_t	mii_regs;
-	mii_bmcr_t	bmcr;
-	mii_bmsr_t	bmsr;
-	mii_anar_t	anar;
-	mii_gcr_t	gcr;
-	mii_esr_t	esr;
-	mii_aux_ctl_t	bcm5464r_aux;
-	int		status = NXGE_OK;
-
-	uint_t delay;
-
-	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "==> nxge_mii_xcvr_init"));
-
-	param_arr = nxgep->param_arr;
-	statsp = nxgep->statsp;
-	xcvr_portn = statsp->mac_stats.xcvr_portn;
-
-	mii_regs = NULL;
-
-	NXGE_DEBUG_MSG((nxgep, MAC_CTL,
-		"nxge_param_autoneg = 0x%02x", param_arr[param_autoneg].value));
-
-	/*
-	 * Reset the transceiver.
-	 */
-	delay = 0;
-	bmcr.value = 0;
-	bmcr.bits.reset = 1;
-	if ((status = nxge_mii_write(nxgep, xcvr_portn,
-		(uint8_t)(uint64_t)&mii_regs->bmcr, bmcr.value)) != NXGE_OK)
-		goto fail;
-	do {
-		drv_usecwait(500);
-		if ((status = nxge_mii_read(nxgep, xcvr_portn,
-			(uint8_t)(uint64_t)&mii_regs->bmcr, &bmcr.value))
-				!= NXGE_OK)
-			goto fail;
-		delay++;
-	} while ((bmcr.bits.reset) && (delay < 1000));
-	if (delay == 1000) {
-		NXGE_DEBUG_MSG((nxgep, MAC_CTL, "Xcvr reset failed."));
-		goto fail;
-	}
-
-	if ((status = nxge_mii_read(nxgep, xcvr_portn,
-			(uint8_t)(uint64_t)(&mii_regs->bmsr),
-			&bmsr.value)) != NXGE_OK)
-		goto fail;
-
-	param_arr[param_autoneg].value &= bmsr.bits.auto_neg_able;
-	param_arr[param_anar_100T4].value &= bmsr.bits.link_100T4;
-	param_arr[param_anar_100fdx].value &= bmsr.bits.link_100fdx;
-	param_arr[param_anar_100hdx].value = 0;
-	param_arr[param_anar_10fdx].value &= bmsr.bits.link_10fdx;
-	param_arr[param_anar_10hdx].value = 0;
-
-	/*
-	 * Initialize the xcvr statistics.
-	 */
-	statsp->mac_stats.cap_autoneg = bmsr.bits.auto_neg_able;
-	statsp->mac_stats.cap_100T4 = bmsr.bits.link_100T4;
-	statsp->mac_stats.cap_100fdx = bmsr.bits.link_100fdx;
-	statsp->mac_stats.cap_100hdx = 0;
-	statsp->mac_stats.cap_10fdx = bmsr.bits.link_10fdx;
-	statsp->mac_stats.cap_10hdx = 0;
-	statsp->mac_stats.cap_asmpause = param_arr[param_anar_asmpause].value;
-	statsp->mac_stats.cap_pause = param_arr[param_anar_pause].value;
-
-	/*
-	 * Initialise the xcvr advertised capability statistics.
-	 */
-	statsp->mac_stats.adv_cap_autoneg = param_arr[param_autoneg].value;
-	statsp->mac_stats.adv_cap_1000fdx = param_arr[param_anar_1000fdx].value;
-	statsp->mac_stats.adv_cap_1000hdx = param_arr[param_anar_1000hdx].value;
-	statsp->mac_stats.adv_cap_100T4 = param_arr[param_anar_100T4].value;
-	statsp->mac_stats.adv_cap_100fdx = param_arr[param_anar_100fdx].value;
-	statsp->mac_stats.adv_cap_100hdx = param_arr[param_anar_100hdx].value;
-	statsp->mac_stats.adv_cap_10fdx = param_arr[param_anar_10fdx].value;
-	statsp->mac_stats.adv_cap_10hdx = param_arr[param_anar_10hdx].value;
-	statsp->mac_stats.adv_cap_asmpause =
-					param_arr[param_anar_asmpause].value;
-	statsp->mac_stats.adv_cap_pause = param_arr[param_anar_pause].value;
-
-
-	/*
-	 * Check for extended status just in case we're
-	 * running a Gigibit phy.
-	 */
-	if (bmsr.bits.extend_status) {
-		if ((status = nxge_mii_read(nxgep, xcvr_portn,
-			(uint8_t)(uint64_t)(&mii_regs->esr), &esr.value))
-				!= NXGE_OK)
-			goto fail;
-		param_arr[param_anar_1000fdx].value &=
-					esr.bits.link_1000fdx;
-		param_arr[param_anar_1000hdx].value = 0;
-
-		statsp->mac_stats.cap_1000fdx =
-			(esr.bits.link_1000Xfdx ||
-				esr.bits.link_1000fdx);
-		statsp->mac_stats.cap_1000hdx = 0;
-	} else {
-		param_arr[param_anar_1000fdx].value = 0;
-		param_arr[param_anar_1000hdx].value = 0;
-	}
-
-	/*
-	 * Initialize 1G Statistics once the capability is established.
-	 */
-	statsp->mac_stats.adv_cap_1000fdx = param_arr[param_anar_1000fdx].value;
-	statsp->mac_stats.adv_cap_1000hdx = param_arr[param_anar_1000hdx].value;
-
-	/*
-	 * Initialise the link statistics.
-	 */
-	statsp->mac_stats.link_T4 = 0;
-	statsp->mac_stats.link_asmpause = 0;
-	statsp->mac_stats.link_pause = 0;
-	statsp->mac_stats.link_speed = 0;
-	statsp->mac_stats.link_duplex = 0;
-	statsp->mac_stats.link_up = 0;
-
-	/*
-	 * Switch off Auto-negotiation, 100M and full duplex.
-	 */
-	bmcr.value = 0;
-	if ((status = nxge_mii_write(nxgep, xcvr_portn,
-		(uint8_t)(uint64_t)(&mii_regs->bmcr), bmcr.value)) != NXGE_OK)
-		goto fail;
-
-	if ((statsp->port_stats.lb_mode == nxge_lb_phy) ||
-			(statsp->port_stats.lb_mode == nxge_lb_phy1000)) {
-		bmcr.bits.loopback = 1;
-		bmcr.bits.enable_autoneg = 0;
-		if (statsp->port_stats.lb_mode == nxge_lb_phy1000)
-			bmcr.bits.speed_1000_sel = 1;
-		bmcr.bits.duplex_mode = 1;
-		param_arr[param_autoneg].value = 0;
-	} else {
-		bmcr.bits.loopback = 0;
-	}
-
-	if ((statsp->port_stats.lb_mode == nxge_lb_ext1000) ||
-		(statsp->port_stats.lb_mode == nxge_lb_ext100) ||
-		(statsp->port_stats.lb_mode == nxge_lb_ext10)) {
-		param_arr[param_autoneg].value = 0;
-		bcm5464r_aux.value = 0;
-		bcm5464r_aux.bits.ext_lb = 1;
-		bcm5464r_aux.bits.write_1 = 1;
-		if ((status = nxge_mii_write(nxgep, xcvr_portn,
-				BCM5464R_AUX_CTL, bcm5464r_aux.value))
-				!= NXGE_OK)
-			goto fail;
-	}
-
-	if (param_arr[param_autoneg].value) {
-		NXGE_DEBUG_MSG((nxgep, MAC_CTL,
-				"Restarting Auto-negotiation."));
-		/*
-		 * Setup our Auto-negotiation advertisement register.
-		 */
-		anar.value = 0;
-		anar.bits.selector = 1;
-		anar.bits.cap_100T4 = param_arr[param_anar_100T4].value;
-		anar.bits.cap_100fdx = param_arr[param_anar_100fdx].value;
-		anar.bits.cap_100hdx = param_arr[param_anar_100hdx].value;
-		anar.bits.cap_10fdx = param_arr[param_anar_10fdx].value;
-		anar.bits.cap_10hdx = param_arr[param_anar_10hdx].value;
-		anar.bits.cap_asmpause = 0;
-		anar.bits.cap_pause = 0;
-		if (param_arr[param_anar_1000fdx].value ||
-			param_arr[param_anar_100fdx].value ||
-			param_arr[param_anar_10fdx].value) {
-			anar.bits.cap_asmpause = statsp->mac_stats.cap_asmpause;
-			anar.bits.cap_pause = statsp->mac_stats.cap_pause;
-		}
-
-		if ((status = nxge_mii_write(nxgep, xcvr_portn,
-			(uint8_t)(uint64_t)(&mii_regs->anar), anar.value))
-				!= NXGE_OK)
-			goto fail;
-		if (bmsr.bits.extend_status) {
-			gcr.value = 0;
-			gcr.bits.ms_mode_en =
-				param_arr[param_master_cfg_enable].value;
-			gcr.bits.master =
-				param_arr[param_master_cfg_value].value;
-			gcr.bits.link_1000fdx =
-				param_arr[param_anar_1000fdx].value;
-			gcr.bits.link_1000hdx =
-				param_arr[param_anar_1000hdx].value;
-			if ((status = nxge_mii_write(nxgep, xcvr_portn,
-				(uint8_t)(uint64_t)(&mii_regs->gcr), gcr.value))
-				!= NXGE_OK)
-				goto fail;
-		}
-
-		bmcr.bits.enable_autoneg = 1;
-		bmcr.bits.restart_autoneg = 1;
-
-	} else {
-		NXGE_DEBUG_MSG((nxgep, MAC_CTL, "Going into forced mode."));
-		bmcr.bits.speed_1000_sel =
-			param_arr[param_anar_1000fdx].value |
-				param_arr[param_anar_1000hdx].value;
-		bmcr.bits.speed_sel = (~bmcr.bits.speed_1000_sel) &
-			(param_arr[param_anar_100fdx].value |
-				param_arr[param_anar_100hdx].value);
-		if (bmcr.bits.speed_1000_sel) {
-			statsp->mac_stats.link_speed = 1000;
-			gcr.value = 0;
-			gcr.bits.ms_mode_en =
-				param_arr[param_master_cfg_enable].value;
-			gcr.bits.master =
-				param_arr[param_master_cfg_value].value;
-			if ((status = nxge_mii_write(nxgep, xcvr_portn,
-				(uint8_t)(uint64_t)(&mii_regs->gcr),
-				gcr.value))
-				!= NXGE_OK)
-				goto fail;
-			if (param_arr[param_anar_1000fdx].value) {
-				bmcr.bits.duplex_mode = 1;
-				statsp->mac_stats.link_duplex = 2;
-			} else
-				statsp->mac_stats.link_duplex = 1;
-		} else if (bmcr.bits.speed_sel) {
-			statsp->mac_stats.link_speed = 100;
-			if (param_arr[param_anar_100fdx].value) {
-				bmcr.bits.duplex_mode = 1;
-				statsp->mac_stats.link_duplex = 2;
-			} else
-				statsp->mac_stats.link_duplex = 1;
-		} else {
-			statsp->mac_stats.link_speed = 10;
-			if (param_arr[param_anar_10fdx].value) {
-				bmcr.bits.duplex_mode = 1;
-				statsp->mac_stats.link_duplex = 2;
-			} else
-				statsp->mac_stats.link_duplex = 1;
-		}
-		if (statsp->mac_stats.link_duplex != 1) {
-			statsp->mac_stats.link_asmpause =
-						statsp->mac_stats.cap_asmpause;
-			statsp->mac_stats.link_pause =
-						statsp->mac_stats.cap_pause;
-		}
-
-		if ((statsp->port_stats.lb_mode == nxge_lb_ext1000) ||
-			(statsp->port_stats.lb_mode == nxge_lb_ext100) ||
-			(statsp->port_stats.lb_mode == nxge_lb_ext10)) {
-			if (statsp->port_stats.lb_mode == nxge_lb_ext1000) {
-				/* BCM5464R 1000mbps external loopback mode */
-				gcr.value = 0;
-				gcr.bits.ms_mode_en = 1;
-				gcr.bits.master = 1;
-				if ((status = nxge_mii_write(nxgep, xcvr_portn,
-					(uint8_t)(uint64_t)(&mii_regs->gcr),
-					gcr.value))
-					!= NXGE_OK)
-					goto fail;
-				bmcr.value = 0;
-				bmcr.bits.speed_1000_sel = 1;
-				statsp->mac_stats.link_speed = 1000;
-			} else if (statsp->port_stats.lb_mode
-			    == nxge_lb_ext100) {
-				/* BCM5464R 100mbps external loopback mode */
-				bmcr.value = 0;
-				bmcr.bits.speed_sel = 1;
-				bmcr.bits.duplex_mode = 1;
-				statsp->mac_stats.link_speed = 100;
-			} else if (statsp->port_stats.lb_mode
-			    == nxge_lb_ext10) {
-				/* BCM5464R 10mbps external loopback mode */
-				bmcr.value = 0;
-				bmcr.bits.duplex_mode = 1;
-				statsp->mac_stats.link_speed = 10;
-			}
-		}
-	}
-
-	if ((status = nxge_mii_write(nxgep, xcvr_portn,
-			(uint8_t)(uint64_t)(&mii_regs->bmcr),
-			bmcr.value)) != NXGE_OK)
-		goto fail;
-
-	if ((status = nxge_mii_read(nxgep, xcvr_portn,
-		(uint8_t)(uint64_t)(&mii_regs->bmcr), &bmcr.value)) != NXGE_OK)
-		goto fail;
-	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "bmcr = 0x%04X", bmcr.value));
-
-	/*
-	 * Initialize the xcvr status kept in the context structure.
-	 */
-	nxgep->soft_bmsr.value = 0;
-
-	if ((status = nxge_mii_read(nxgep, xcvr_portn,
-		(uint8_t)(uint64_t)(&mii_regs->bmsr),
-			&nxgep->bmsr.value)) != NXGE_OK)
-		goto fail;
-
-	statsp->mac_stats.xcvr_inits++;
-	nxgep->bmsr.value = 0;
-
-fail:
-	NXGE_DEBUG_MSG((nxgep, MAC_CTL,
-			"<== nxge_mii_xcvr_init status 0x%x", status));
-	return (status);
-}
-
-/* Read from a MII compliant register */
-
-nxge_status_t
-nxge_mii_read(p_nxge_t nxgep, uint8_t xcvr_portn, uint8_t xcvr_reg,
-		uint16_t *value)
-{
-	npi_status_t rs = NPI_SUCCESS;
-
-	NXGE_DEBUG_MSG((nxgep, MIF_CTL, "==> nxge_mii_read: xcvr_port<%d>"
-			"xcvr_reg<%d>", xcvr_portn, xcvr_reg));
-
-	MUTEX_ENTER(&nxge_mii_lock);
-
-	if (nxgep->mac.portmode == PORT_1G_COPPER) {
-		if ((rs = npi_mac_mif_mii_read(nxgep->npi_handle,
-				xcvr_portn, xcvr_reg, value)) != NPI_SUCCESS)
-			goto fail;
-	} else if (nxgep->mac.portmode == PORT_1G_FIBER) {
-		if ((rs = npi_mac_pcs_mii_read(nxgep->npi_handle,
-				xcvr_portn, xcvr_reg, value)) != NPI_SUCCESS)
-			goto fail;
-	} else
-		goto fail;
-
-	MUTEX_EXIT(&nxge_mii_lock);
-
-	NXGE_DEBUG_MSG((nxgep, MIF_CTL, "<== nxge_mii_read: xcvr_port<%d>"
-			"xcvr_reg<%d> value=0x%x",
-			xcvr_portn, xcvr_reg, *value));
-	return (NXGE_OK);
-fail:
-	MUTEX_EXIT(&nxge_mii_lock);
-	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"nxge_mii_read: Failed to read mii on xcvr %d",
-			xcvr_portn));
-
-	return (NXGE_ERROR | rs);
-}
-
-/* Write to a MII compliant Register */
-
-nxge_status_t
-nxge_mii_write(p_nxge_t nxgep, uint8_t xcvr_portn, uint8_t xcvr_reg,
-		uint16_t value)
-{
-	npi_status_t rs = NPI_SUCCESS;
-
-	NXGE_DEBUG_MSG((nxgep, MIF_CTL, "==> nxge_mii_write: xcvr_port<%d>"
-			"xcvr_reg<%d> value=0x%x", xcvr_portn, xcvr_reg,
-			value));
-
-	MUTEX_ENTER(&nxge_mii_lock);
-
-	if (nxgep->mac.portmode == PORT_1G_COPPER) {
-		if ((rs = npi_mac_mif_mii_write(nxgep->npi_handle,
-				xcvr_portn, xcvr_reg, value)) != NPI_SUCCESS)
-			goto fail;
-	} else if (nxgep->mac.portmode == PORT_1G_FIBER) {
-		if ((rs = npi_mac_pcs_mii_write(nxgep->npi_handle,
-				xcvr_portn, xcvr_reg, value)) != NPI_SUCCESS)
-			goto fail;
-	} else
-		goto fail;
-
-	MUTEX_EXIT(&nxge_mii_lock);
-
-	NXGE_DEBUG_MSG((nxgep, MIF_CTL, "<== nxge_mii_write: xcvr_port<%d>"
-			"xcvr_reg<%d>", xcvr_portn, xcvr_reg));
-	return (NXGE_OK);
-fail:
-	MUTEX_EXIT(&nxge_mii_lock);
-
-	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"nxge_mii_write: Failed to write mii on xcvr %d",
-			xcvr_portn));
-
-	return (NXGE_ERROR | rs);
-}
-
-/* Perform read from Clause45 serdes / transceiver device */
-
-nxge_status_t
-nxge_mdio_read(p_nxge_t nxgep, uint8_t xcvr_portn, uint8_t device,
-		uint16_t xcvr_reg, uint16_t *value)
-{
-	npi_status_t rs = NPI_SUCCESS;
-
-	NXGE_DEBUG_MSG((nxgep, MIF_CTL, "==> nxge_mdio_read: xcvr_port<%d>",
-			xcvr_portn));
-
-	MUTEX_ENTER(&nxge_mdio_lock);
-
-	if ((rs = npi_mac_mif_mdio_read(nxgep->npi_handle,
-			xcvr_portn, device, xcvr_reg, value)) != NPI_SUCCESS)
-		goto fail;
-
-	MUTEX_EXIT(&nxge_mdio_lock);
-
-	NXGE_DEBUG_MSG((nxgep, MIF_CTL, "<== nxge_mdio_read: xcvr_port<%d>",
-			xcvr_portn));
-	return (NXGE_OK);
-fail:
-	MUTEX_EXIT(&nxge_mdio_lock);
-
-	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"nxge_mdio_read: Failed to read mdio on xcvr %d",
-			xcvr_portn));
-
-	return (NXGE_ERROR | rs);
-}
-
-/* Perform write to Clause45 serdes / transceiver device */
-
-nxge_status_t
-nxge_mdio_write(p_nxge_t nxgep, uint8_t xcvr_portn, uint8_t device,
-		uint16_t xcvr_reg, uint16_t value)
-{
-	npi_status_t rs = NPI_SUCCESS;
-
-	NXGE_DEBUG_MSG((nxgep, MIF_CTL, "==> nxge_mdio_write: xcvr_port<%d>",
-			xcvr_portn));
-
-	MUTEX_ENTER(&nxge_mdio_lock);
-
-	if ((rs = npi_mac_mif_mdio_write(nxgep->npi_handle,
-			xcvr_portn, device, xcvr_reg, value)) != NPI_SUCCESS)
-		goto fail;
-
-	MUTEX_EXIT(&nxge_mdio_lock);
-
-	NXGE_DEBUG_MSG((nxgep, MIF_CTL, "<== nxge_mdio_write: xcvr_port<%d>",
-			xcvr_portn));
-	return (NXGE_OK);
-fail:
-	MUTEX_EXIT(&nxge_mdio_lock);
-
-	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"nxge_mdio_write: Failed to write mdio on xcvr %d",
-			xcvr_portn));
-
-	return (NXGE_ERROR | rs);
-}
-
-
-/* Check MII to see if there is any link status change */
-
-nxge_status_t
-nxge_mii_check(p_nxge_t nxgep, mii_bmsr_t bmsr, mii_bmsr_t bmsr_ints,
-		nxge_link_state_t *link_up)
-{
-	p_nxge_param_t	param_arr;
-	p_nxge_stats_t	statsp;
-	p_mii_regs_t	mii_regs;
-	p_mii_bmsr_t	soft_bmsr;
-	mii_anar_t	anar;
-	mii_anlpar_t	anlpar;
-	mii_anar_t	an_common;
-	mii_aner_t	aner;
-	mii_gsr_t	gsr;
-	nxge_status_t	status = NXGE_OK;
-
-	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "==> nxge_mii_check"));
-
-	mii_regs = NULL;
-	param_arr = nxgep->param_arr;
-	statsp = nxgep->statsp;
-	soft_bmsr = &nxgep->soft_bmsr;
-	*link_up = LINK_NO_CHANGE;
-
-	if (bmsr_ints.bits.link_status) {
-		if (bmsr.bits.link_status) {
-			soft_bmsr->bits.link_status = 1;
-		} else {
-			statsp->mac_stats.link_up = 0;
-			soft_bmsr->bits.link_status = 0;
-			NXGE_DEBUG_MSG((nxgep, MAC_CTL,
-					"Link down cable problem"));
-			*link_up = LINK_IS_DOWN;
-		}
-	}
-
-	if (param_arr[param_autoneg].value) {
-		if (bmsr_ints.bits.auto_neg_complete) {
-			if (bmsr.bits.auto_neg_complete)
-				soft_bmsr->bits.auto_neg_complete = 1;
-			else
-				soft_bmsr->bits.auto_neg_complete = 0;
-		}
-		if (soft_bmsr->bits.link_status == 0) {
-			statsp->mac_stats.link_T4 = 0;
-			statsp->mac_stats.link_speed = 0;
-			statsp->mac_stats.link_duplex = 0;
-			statsp->mac_stats.link_asmpause = 0;
-			statsp->mac_stats.link_pause = 0;
-			statsp->mac_stats.lp_cap_autoneg = 0;
-			statsp->mac_stats.lp_cap_100T4 = 0;
-			statsp->mac_stats.lp_cap_1000fdx = 0;
-			statsp->mac_stats.lp_cap_1000hdx = 0;
-			statsp->mac_stats.lp_cap_100fdx = 0;
-			statsp->mac_stats.lp_cap_100hdx = 0;
-			statsp->mac_stats.lp_cap_10fdx = 0;
-			statsp->mac_stats.lp_cap_10hdx = 0;
-			statsp->mac_stats.lp_cap_10gfdx = 0;
-			statsp->mac_stats.lp_cap_10ghdx = 0;
-			statsp->mac_stats.lp_cap_asmpause = 0;
-			statsp->mac_stats.lp_cap_pause = 0;
-		}
-	} else
-		soft_bmsr->bits.auto_neg_complete = 1;
-
-	if ((bmsr_ints.bits.link_status ||
-		bmsr_ints.bits.auto_neg_complete) &&
-		soft_bmsr->bits.link_status &&
-		soft_bmsr->bits.auto_neg_complete) {
-		statsp->mac_stats.link_up = 1;
-		if (param_arr[param_autoneg].value) {
-			if ((status = nxge_mii_read(nxgep,
-				statsp->mac_stats.xcvr_portn,
-				(uint8_t)(uint64_t)(&mii_regs->anar),
-					&anar.value)) != NXGE_OK)
-				goto fail;
-			if ((status = nxge_mii_read(nxgep,
-				statsp->mac_stats.xcvr_portn,
-				(uint8_t)(uint64_t)(&mii_regs->anlpar),
-					&anlpar.value)) != NXGE_OK)
-				goto fail;
-			if ((status = nxge_mii_read(nxgep,
-				statsp->mac_stats.xcvr_portn,
-				(uint8_t)(uint64_t)(&mii_regs->aner),
-					&aner.value)) != NXGE_OK)
-				goto fail;
-			statsp->mac_stats.lp_cap_autoneg = aner.bits.lp_an_able;
-			statsp->mac_stats.lp_cap_100T4 = anlpar.bits.cap_100T4;
-			statsp->mac_stats.lp_cap_100fdx =
-							anlpar.bits.cap_100fdx;
-			statsp->mac_stats.lp_cap_100hdx =
-							anlpar.bits.cap_100hdx;
-			statsp->mac_stats.lp_cap_10fdx = anlpar.bits.cap_10fdx;
-			statsp->mac_stats.lp_cap_10hdx = anlpar.bits.cap_10hdx;
-			statsp->mac_stats.lp_cap_asmpause =
-						anlpar.bits.cap_asmpause;
-			statsp->mac_stats.lp_cap_pause = anlpar.bits.cap_pause;
-			an_common.value = anar.value & anlpar.value;
-			if (param_arr[param_anar_1000fdx].value ||
-				param_arr[param_anar_1000hdx].value) {
-				if ((status = nxge_mii_read(nxgep,
-					statsp->mac_stats.xcvr_portn,
-					(uint8_t)(uint64_t)(&mii_regs->gsr),
-						&gsr.value))
-						!= NXGE_OK)
-					goto fail;
-				statsp->mac_stats.lp_cap_1000fdx =
-					gsr.bits.link_1000fdx;
-				statsp->mac_stats.lp_cap_1000hdx =
-					gsr.bits.link_1000hdx;
-				if (param_arr[param_anar_1000fdx].value &&
-					gsr.bits.link_1000fdx) {
-					statsp->mac_stats.link_speed = 1000;
-					statsp->mac_stats.link_duplex = 2;
-				} else if (
-					param_arr[param_anar_1000hdx].value &&
-						gsr.bits.link_1000hdx) {
-					statsp->mac_stats.link_speed = 1000;
-					statsp->mac_stats.link_duplex = 1;
-				}
-			}
-			if ((an_common.value != 0) &&
-					!(statsp->mac_stats.link_speed)) {
-				if (an_common.bits.cap_100T4) {
-					statsp->mac_stats.link_T4 = 1;
-					statsp->mac_stats.link_speed = 100;
-					statsp->mac_stats.link_duplex = 1;
-				} else if (an_common.bits.cap_100fdx) {
-					statsp->mac_stats.link_speed = 100;
-					statsp->mac_stats.link_duplex = 2;
-				} else if (an_common.bits.cap_100hdx) {
-					statsp->mac_stats.link_speed = 100;
-					statsp->mac_stats.link_duplex = 1;
-				} else if (an_common.bits.cap_10fdx) {
-					statsp->mac_stats.link_speed = 10;
-					statsp->mac_stats.link_duplex = 2;
-				} else if (an_common.bits.cap_10hdx) {
-					statsp->mac_stats.link_speed = 10;
-					statsp->mac_stats.link_duplex = 1;
-				} else {
-					goto fail;
-				}
-			}
-			if (statsp->mac_stats.link_duplex != 1) {
-				statsp->mac_stats.link_asmpause =
-					an_common.bits.cap_asmpause;
-				if (statsp->mac_stats.link_asmpause)
-				if ((statsp->mac_stats.cap_pause == 0) &&
-						(statsp->mac_stats.lp_cap_pause
-						== 1))
-						statsp->mac_stats.link_pause
-						= 0;
-					else
-						statsp->mac_stats.link_pause
-						= 1;
-				else
-					statsp->mac_stats.link_pause =
-						an_common.bits.cap_pause;
-			}
-		}
-		*link_up = LINK_IS_UP;
-	}
-
-	if (nxgep->link_notify) {
-		*link_up = ((statsp->mac_stats.link_up) ? LINK_IS_UP :
-				LINK_IS_DOWN);
-		nxgep->link_notify = B_FALSE;
-	}
-	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "<== nxge_mii_check"));
-	return (NXGE_OK);
-fail:
-	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"nxge_mii_check: Unable to check MII"));
-	return (status);
-}
-
-/* Add a multicast address entry into the HW hash table */
-
-nxge_status_t
-nxge_add_mcast_addr(p_nxge_t nxgep, struct ether_addr *addrp)
-{
-	uint32_t mchash;
-	p_hash_filter_t hash_filter;
-	uint16_t hash_bit;
-	boolean_t rx_init = B_FALSE;
-	uint_t j;
-	nxge_status_t status = NXGE_OK;
-
-	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "==> nxge_add_mcast_addr"));
-
-	RW_ENTER_WRITER(&nxgep->filter_lock);
-	mchash = crc32_mchash(addrp);
-	if (nxgep->hash_filter == NULL) {
-		NXGE_DEBUG_MSG((NULL, STR_CTL,
-			"Allocating hash filter storage."));
-		nxgep->hash_filter = KMEM_ZALLOC(sizeof (hash_filter_t),
-					KM_SLEEP);
-	}
-	hash_filter = nxgep->hash_filter;
-	j = mchash / HASH_REG_WIDTH;
-	hash_bit = (1 << (mchash % HASH_REG_WIDTH));
-	hash_filter->hash_filter_regs[j] |= hash_bit;
-	hash_filter->hash_bit_ref_cnt[mchash]++;
-	if (hash_filter->hash_bit_ref_cnt[mchash] == 1) {
-		hash_filter->hash_ref_cnt++;
-		rx_init = B_TRUE;
-	}
-	if (rx_init) {
-		if ((status = nxge_rx_mac_disable(nxgep)) != NXGE_OK)
-			goto fail;
-		if ((status = nxge_rx_mac_enable(nxgep)) != NXGE_OK)
-			goto fail;
-	}
-
-	RW_EXIT(&nxgep->filter_lock);
-
-	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "<== nxge_add_mcast_addr"));
-
-	return (NXGE_OK);
-fail:
-	RW_EXIT(&nxgep->filter_lock);
-	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_add_mcast_addr: "
-					"Unable to add multicast address"));
-	return (status);
-}
-
-/* Remove a multicast address entry from the HW hash table */
-
-nxge_status_t
-nxge_del_mcast_addr(p_nxge_t nxgep, struct ether_addr *addrp)
-{
-	uint32_t mchash;
-	p_hash_filter_t hash_filter;
-	uint16_t hash_bit;
-	boolean_t rx_init = B_FALSE;
-	uint_t j;
-	nxge_status_t status = NXGE_OK;
-
-	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "==> nxge_del_mcast_addr"));
-	RW_ENTER_WRITER(&nxgep->filter_lock);
-	mchash = crc32_mchash(addrp);
-	if (nxgep->hash_filter == NULL) {
-		NXGE_DEBUG_MSG((NULL, STR_CTL,
-			"Hash filter already de_allocated."));
-		RW_EXIT(&nxgep->filter_lock);
-		NXGE_DEBUG_MSG((nxgep, MAC_CTL, "<== nxge_del_mcast_addr"));
-		return (NXGE_OK);
-	}
-	hash_filter = nxgep->hash_filter;
-	hash_filter->hash_bit_ref_cnt[mchash]--;
-	if (hash_filter->hash_bit_ref_cnt[mchash] == 0) {
-		j = mchash / HASH_REG_WIDTH;
-		hash_bit = (1 << (mchash % HASH_REG_WIDTH));
-		hash_filter->hash_filter_regs[j] &= ~hash_bit;
-		hash_filter->hash_ref_cnt--;
-		rx_init = B_TRUE;
-	}
-	if (hash_filter->hash_ref_cnt == 0) {
-		NXGE_DEBUG_MSG((NULL, STR_CTL,
-			"De-allocating hash filter storage."));
-		KMEM_FREE(hash_filter, sizeof (hash_filter_t));
-		nxgep->hash_filter = NULL;
-	}
-
-	if (rx_init) {
-		if ((status = nxge_rx_mac_disable(nxgep)) != NXGE_OK)
-			goto fail;
-		if ((status = nxge_rx_mac_enable(nxgep)) != NXGE_OK)
-			goto fail;
-	}
-	RW_EXIT(&nxgep->filter_lock);
-	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "<== nxge_del_mcast_addr"));
-
-	return (NXGE_OK);
-fail:
-	RW_EXIT(&nxgep->filter_lock);
-	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_del_mcast_addr: "
-			"Unable to remove multicast address"));
-
-	return (status);
-}
-
-/* Set MAC address into MAC address HW registers */
-
-nxge_status_t
-nxge_set_mac_addr(p_nxge_t nxgep, struct ether_addr *addrp)
-{
-	nxge_status_t status = NXGE_OK;
-
-	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "==> nxge_set_mac_addr"));
-
-	MUTEX_ENTER(&nxgep->ouraddr_lock);
-	/*
-	 * Exit if the address is same as ouraddr or multicast or broadcast
-	 */
-	if (((addrp->ether_addr_octet[0] & 01) == 1) ||
-		(ether_cmp(addrp, &etherbroadcastaddr) == 0) ||
-		(ether_cmp(addrp, &nxgep->ouraddr) == 0)) {
-		goto nxge_set_mac_addr_exit;
-	}
-	nxgep->ouraddr = *addrp;
-	/*
-	 * Set new interface local address and re-init device.
-	 * This is destructive to any other streams attached
-	 * to this device.
-	 */
-	RW_ENTER_WRITER(&nxgep->filter_lock);
-	if ((status = nxge_rx_mac_disable(nxgep)) != NXGE_OK)
-		goto fail;
-	if ((status = nxge_rx_mac_enable(nxgep)) != NXGE_OK)
-		goto fail;
-
-	RW_EXIT(&nxgep->filter_lock);
-	MUTEX_EXIT(&nxgep->ouraddr_lock);
-	goto nxge_set_mac_addr_end;
-nxge_set_mac_addr_exit:
-	MUTEX_EXIT(&nxgep->ouraddr_lock);
-nxge_set_mac_addr_end:
-	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "<== nxge_set_mac_addr"));
-
-	return (NXGE_OK);
-fail:
-	MUTEX_EXIT(&nxgep->ouraddr_lock);
-	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_set_mac_addr: "
-			"Unable to set mac address"));
-	return (status);
-}
-
-/* Check status of MII (MIF or PCS) link */
-
-nxge_status_t
-nxge_check_mii_link(p_nxge_t nxgep)
-{
-	mii_bmsr_t bmsr_ints, bmsr_data;
-	mii_anlpar_t anlpar;
-	mii_gsr_t gsr;
-	p_mii_regs_t mii_regs;
-	nxge_status_t status = NXGE_OK;
-	uint8_t portn;
-	nxge_link_state_t link_up;
-
-	portn = nxgep->mac.portnum;
-
-	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "==> nxge_check_mii_link port<%d>",
-				portn));
-
-	mii_regs = NULL;
-
-	RW_ENTER_WRITER(&nxgep->filter_lock);
-
-	if (nxgep->statsp->port_stats.lb_mode > nxge_lb_ext10)
-		goto nxge_check_mii_link_exit;
-
-	if ((status = nxge_mii_read(nxgep, nxgep->statsp->mac_stats.xcvr_portn,
-		(uint8_t)(uint64_t)(&mii_regs->bmsr),
-		&bmsr_data.value)) != NXGE_OK)
-		goto fail;
-
-	if (nxgep->param_arr[param_autoneg].value) {
-		if ((status = nxge_mii_read(nxgep,
-			nxgep->statsp->mac_stats.xcvr_portn,
-			(uint8_t)(uint64_t)(&mii_regs->gsr),
-			&gsr.value)) != NXGE_OK)
-			goto fail;
-		if ((status = nxge_mii_read(nxgep,
-			nxgep->statsp->mac_stats.xcvr_portn,
-			(uint8_t)(uint64_t)(&mii_regs->anlpar),
-			&anlpar.value)) != NXGE_OK)
-			goto fail;
-		if (nxgep->statsp->mac_stats.link_up &&
-			((nxgep->statsp->mac_stats.lp_cap_1000fdx ^
-				gsr.bits.link_1000fdx) ||
-			(nxgep->statsp->mac_stats.lp_cap_1000hdx ^
-				gsr.bits.link_1000hdx) ||
-			(nxgep->statsp->mac_stats.lp_cap_100T4 ^
-				anlpar.bits.cap_100T4) ||
-			(nxgep->statsp->mac_stats.lp_cap_100fdx ^
-				anlpar.bits.cap_100fdx) ||
-			(nxgep->statsp->mac_stats.lp_cap_100hdx ^
-				anlpar.bits.cap_100hdx) ||
-			(nxgep->statsp->mac_stats.lp_cap_10fdx ^
-				anlpar.bits.cap_10fdx) ||
-			(nxgep->statsp->mac_stats.lp_cap_10hdx ^
-				anlpar.bits.cap_10hdx))) {
-			bmsr_data.bits.link_status = 0;
-		}
-	}
-
-	/* Workaround for link down issue */
-	if (bmsr_data.value == 0) {
-		cmn_err(CE_NOTE, "!LINK DEBUG: Read zero bmsr\n");
-		goto nxge_check_mii_link_exit;
-	}
-
-	bmsr_ints.value = nxgep->bmsr.value ^ bmsr_data.value;
-	nxgep->bmsr.value = bmsr_data.value;
-	if ((status = nxge_mii_check(nxgep, bmsr_data, bmsr_ints, &link_up))
-			!= NXGE_OK)
-		goto fail;
-
-nxge_check_mii_link_exit:
-	RW_EXIT(&nxgep->filter_lock);
-	if (link_up == LINK_IS_UP) {
-		nxge_link_is_up(nxgep);
-	} else if (link_up == LINK_IS_DOWN) {
-		nxge_link_is_down(nxgep);
-	}
-
-	(void) nxge_link_monitor(nxgep, LINK_MONITOR_START);
-
-	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "<== nxge_check_mii_link port<%d>",
-				portn));
-	return (NXGE_OK);
-
-fail:
-	RW_EXIT(&nxgep->filter_lock);
-
-	(void) nxge_link_monitor(nxgep, LINK_MONITOR_START);
-	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"nxge_check_mii_link: Failed to check link port<%d>",
-			portn));
-	return (status);
-}
-
-
-/*ARGSUSED*/
-nxge_status_t
-nxge_check_10g_link(p_nxge_t nxgep)
-{
-	uint8_t		portn;
-
-	nxge_status_t	status = NXGE_OK;
-	boolean_t	link_up;
-
-	portn = nxgep->mac.portnum;
-
-	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "==> nxge_check_10g_link port<%d>",
-				portn));
-
-	status = nxge_check_bcm8704_link(nxgep, &link_up);
-
-	if (status != NXGE_OK)
-		goto fail;
-
-	if (link_up) {
-		if (nxgep->link_notify ||
-			nxgep->statsp->mac_stats.link_up == 0) {
-			if (nxge_10g_link_led_on(nxgep) != NXGE_OK)
-				goto fail;
-			nxgep->statsp->mac_stats.link_up = 1;
-			nxgep->statsp->mac_stats.link_speed = 10000;
-			nxgep->statsp->mac_stats.link_duplex = 2;
-
-			nxge_link_is_up(nxgep);
-			nxgep->link_notify = B_FALSE;
-		}
-	} else {
-		if (nxgep->link_notify ||
-			nxgep->statsp->mac_stats.link_up == 1) {
-			if (nxge_10g_link_led_off(nxgep) != NXGE_OK)
-				goto fail;
-			NXGE_DEBUG_MSG((nxgep, MAC_CTL,
-					"Link down cable problem"));
-			nxgep->statsp->mac_stats.link_up = 0;
-			nxgep->statsp->mac_stats.link_speed = 0;
-			nxgep->statsp->mac_stats.link_duplex = 0;
-
-			nxge_link_is_down(nxgep);
-			nxgep->link_notify = B_FALSE;
-		}
-	}
-
-	(void) nxge_link_monitor(nxgep, LINK_MONITOR_START);
-	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "<== nxge_check_10g_link port<%d>",
-				portn));
-	return (NXGE_OK);
-
-fail:
-	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"nxge_check_10g_link: Failed to check link port<%d>",
-			portn));
-	return (status);
-}
-
-
-/* Declare link down */
-
-void
-nxge_link_is_down(p_nxge_t nxgep)
-{
-	p_nxge_stats_t statsp;
-	char link_stat_msg[64];
-
-	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "==> nxge_link_is_down"));
-
-	statsp = nxgep->statsp;
-	(void) sprintf(link_stat_msg, "xcvr addr:0x%02x - link down",
-			statsp->mac_stats.xcvr_portn);
-
-	mac_link_update(nxgep->mach, LINK_STATE_DOWN);
-
-	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "<== nxge_link_is_down"));
-}
-
-/* Declare link up */
-
-void
-nxge_link_is_up(p_nxge_t nxgep)
-{
-	p_nxge_stats_t statsp;
-	char link_stat_msg[64];
-	uint32_t val;
-
-	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "==> nxge_link_is_up"));
-
-	statsp = nxgep->statsp;
-	(void) sprintf(link_stat_msg, "xcvr addr:0x%02x - link up %d Mbps ",
-				statsp->mac_stats.xcvr_portn,
-				statsp->mac_stats.link_speed);
-
-	if (statsp->mac_stats.link_T4)
-		(void) strcat(link_stat_msg, "T4");
-	else if (statsp->mac_stats.link_duplex == 2)
-		(void) strcat(link_stat_msg, "full duplex");
-	else
-		(void) strcat(link_stat_msg, "half duplex");
-
-	(void) nxge_xif_init(nxgep);
-
-	/* Clean up symbol errors incurred during link transition */
-	if (nxgep->mac.portmode == PORT_10G_FIBER) {
-		(void) npi_xmac_xpcs_read(nxgep->npi_handle, nxgep->mac.portnum,
-					XPCS_REG_SYMBOL_ERR_L0_1_COUNTER, &val);
-		(void) npi_xmac_xpcs_read(nxgep->npi_handle, nxgep->mac.portnum,
-					XPCS_REG_SYMBOL_ERR_L2_3_COUNTER, &val);
-	}
-
-	mac_link_update(nxgep->mach, LINK_STATE_UP);
-
-	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "<== nxge_link_is_up"));
-}
-
-/*
- * Calculate the bit in the multicast address filter
- * that selects the given * address.
- * Note: For GEM, the last 8-bits are used.
- */
-uint32_t
-crc32_mchash(p_ether_addr_t addr)
-{
-	uint8_t *cp;
-	uint32_t crc;
-	uint32_t c;
-	int byte;
-	int bit;
-
-	cp = (uint8_t *)addr;
-	crc = (uint32_t)0xffffffff;
-	for (byte = 0; byte < 6; byte++) {
-		c = (uint32_t)cp[byte];
-		for (bit = 0; bit < 8; bit++) {
-			if ((c & 0x1) ^ (crc & 0x1))
-				crc = (crc >> 1)^0xedb88320;
-			else
-				crc = (crc >> 1);
-			c >>= 1;
-		}
-	}
-	return ((~crc) >> (32 - HASH_BITS));
-}
-
-/* Reset serdes */
-
-nxge_status_t
-nxge_serdes_reset(p_nxge_t nxgep)
-{
-	npi_handle_t		handle;
-
-	handle = nxgep->npi_handle;
-
-	ESR_REG_WR(handle, ESR_RESET_REG, ESR_RESET_0 | ESR_RESET_1);
-	drv_usecwait(500);
-	ESR_REG_WR(handle, ESR_CONFIG_REG, 0);
-
-	return (NXGE_OK);
-}
-
-/* Monitor link status using interrupt or polling */
-
-nxge_status_t
-nxge_link_monitor(p_nxge_t nxgep, link_mon_enable_t enable)
-{
-	nxge_status_t status = NXGE_OK;
-
-	/*
-	 * Make sure that we don't check the link if this happen to
-	 * be not port0 or 1 and it is not BMAC port.
-	 */
-	if ((nxgep->mac.portmode == PORT_10G_FIBER) && (nxgep->mac.portnum > 1))
-		return (NXGE_OK);
-
-	if (nxgep->statsp == NULL) {
-		/* stats has not been allocated. */
-		return (NXGE_OK);
-	}
-	/* Don't check link if we're not in internal loopback mode */
-	if (nxgep->statsp->port_stats.lb_mode != nxge_lb_normal)
-		return (NXGE_OK);
-
-	NXGE_DEBUG_MSG((nxgep, MAC_CTL,
-			"==> nxge_link_monitor port<%d> enable=%d",
-			nxgep->mac.portnum, enable));
-	if (enable == LINK_MONITOR_START) {
-		if (nxgep->mac.linkchkmode == LINKCHK_INTR) {
-			if ((status = nxge_link_intr(nxgep, LINK_INTR_START))
-							!= NXGE_OK)
-				goto fail;
-		} else {
-			switch (nxgep->mac.portmode) {
-			case PORT_10G_FIBER:
-				nxgep->nxge_link_poll_timerid = timeout(
-						(fptrv_t)nxge_check_10g_link,
-						nxgep,
-						drv_usectohz(1000 * 1000));
-			break;
-
-			case PORT_1G_COPPER:
-			case PORT_1G_FIBER:
-				nxgep->nxge_link_poll_timerid = timeout(
-						(fptrv_t)nxge_check_mii_link,
-						nxgep,
-						drv_usectohz(1000 * 1000));
-			break;
-			default:
-				;
-			}
-		}
-	} else {
-		if (nxgep->mac.linkchkmode == LINKCHK_INTR) {
-			if ((status = nxge_link_intr(nxgep, LINK_INTR_STOP))
-							!= NXGE_OK)
-				goto fail;
-		} else {
-			if (nxgep->nxge_link_poll_timerid != 0) {
-				(void) untimeout(nxgep->nxge_link_poll_timerid);
-				nxgep->nxge_link_poll_timerid = 0;
-			}
-		}
-	}
-	NXGE_DEBUG_MSG((nxgep, MAC_CTL,
-			"<== nxge_link_monitor port<%d> enable=%d",
-			nxgep->mac.portnum, enable));
-	return (NXGE_OK);
-fail:
-	return (status);
-}
-
-/* Set promiscous mode */
-
-nxge_status_t
-nxge_set_promisc(p_nxge_t nxgep, boolean_t on)
-{
-	nxge_status_t status = NXGE_OK;
-
-	NXGE_DEBUG_MSG((nxgep, MAC_CTL,
-		"==> nxge_set_promisc: on %d", on));
-
-	nxgep->filter.all_phys_cnt = ((on) ? 1 : 0);
-
-	RW_ENTER_WRITER(&nxgep->filter_lock);
-
-	if ((status = nxge_rx_mac_disable(nxgep)) != NXGE_OK) {
-		goto fail;
-	}
-	if ((status = nxge_rx_mac_enable(nxgep)) != NXGE_OK) {
-		goto fail;
-	}
-
-	RW_EXIT(&nxgep->filter_lock);
-
-	if (on)
-		nxgep->statsp->mac_stats.promisc = B_TRUE;
-	else
-		nxgep->statsp->mac_stats.promisc = B_FALSE;
-
-	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "<== nxge_set_promisc"));
-
-	return (NXGE_OK);
-fail:
-	RW_EXIT(&nxgep->filter_lock);
-	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_set_promisc: "
-			"Unable to set promisc (%d)", on));
-
-	return (status);
-}
-
-/*ARGSUSED*/
-uint_t
-nxge_mif_intr(void *arg1, void *arg2)
-{
-#ifdef	NXGE_DEBUG
-	p_nxge_t		nxgep = (p_nxge_t)arg2;
-#endif
-#if NXGE_MIF
-	p_nxge_ldv_t		ldvp = (p_nxge_ldv_t)arg1;
-	uint32_t		status;
-	npi_handle_t		handle;
-	uint8_t			portn;
-	p_nxge_stats_t		statsp;
-#endif
-
-#ifdef	NXGE_MIF
-	if (arg2 == NULL || (void *)ldvp->nxgep != arg2) {
-		nxgep = ldvp->nxgep;
-	}
-	nxgep = ldvp->nxgep;
-#endif
-	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_mif_intr"));
-
-	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_mif_intr"));
-	return (DDI_INTR_CLAIMED);
-
-mif_intr_fail:
-	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_mif_intr"));
-	return (DDI_INTR_UNCLAIMED);
-}
-
-/*ARGSUSED*/
-uint_t
-nxge_mac_intr(void *arg1, void *arg2)
-{
-	p_nxge_t		nxgep = (p_nxge_t)arg2;
-	p_nxge_ldv_t		ldvp = (p_nxge_ldv_t)arg1;
-	p_nxge_ldg_t		ldgp;
-	uint32_t		status;
-	npi_handle_t		handle;
-	uint8_t			portn;
-	p_nxge_stats_t		statsp;
-	npi_status_t		rs = NPI_SUCCESS;
-
-	if (arg2 == NULL || (void *)ldvp->nxgep != arg2) {
-		nxgep = ldvp->nxgep;
-	}
-
-	ldgp = ldvp->ldgp;
-	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_mac_intr: "
-		"group %d", ldgp->ldg));
-
-	handle = NXGE_DEV_NPI_HANDLE(nxgep);
-	/*
-	 * This interrupt handler is for a specific
-	 * mac port.
-	 */
-	statsp = (p_nxge_stats_t)nxgep->statsp;
-	portn = nxgep->mac.portnum;
-
-	NXGE_DEBUG_MSG((nxgep, INT_CTL,
-		"==> nxge_mac_intr: reading mac stats: port<%d>", portn));
-
-	if (nxgep->mac.porttype == PORT_TYPE_XMAC) {
-		rs = npi_xmac_tx_get_istatus(handle, portn,
-					(xmac_tx_iconfig_t *)&status);
-		if (rs != NPI_SUCCESS)
-			goto npi_fail;
-		if (status & ICFG_XMAC_TX_ALL) {
-			if (status & ICFG_XMAC_TX_UNDERRUN) {
-				statsp->xmac_stats.tx_underflow_err++;
-				NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
-					NXGE_FM_EREPORT_TXMAC_UNDERFLOW);
-			}
-			if (status & ICFG_XMAC_TX_MAX_PACKET_ERR) {
-				statsp->xmac_stats.tx_maxpktsize_err++;
-				NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
-					NXGE_FM_EREPORT_TXMAC_MAX_PKT_ERR);
-			}
-			if (status & ICFG_XMAC_TX_OVERFLOW) {
-				statsp->xmac_stats.tx_overflow_err++;
-				NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
-					NXGE_FM_EREPORT_TXMAC_OVERFLOW);
-			}
-			if (status & ICFG_XMAC_TX_FIFO_XFR_ERR) {
-				statsp->xmac_stats.tx_fifo_xfr_err++;
-				NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
-					NXGE_FM_EREPORT_TXMAC_TXFIFO_XFR_ERR);
-			}
-			if (status & ICFG_XMAC_TX_BYTE_CNT_EXP) {
-				statsp->xmac_stats.tx_byte_cnt +=
-							XTXMAC_BYTE_CNT_MASK;
-			}
-			if (status & ICFG_XMAC_TX_FRAME_CNT_EXP) {
-				statsp->xmac_stats.tx_frame_cnt +=
-							XTXMAC_FRM_CNT_MASK;
-			}
-		}
-
-		rs = npi_xmac_rx_get_istatus(handle, portn,
-					(xmac_rx_iconfig_t *)&status);
-		if (rs != NPI_SUCCESS)
-			goto npi_fail;
-		if (status & ICFG_XMAC_RX_ALL) {
-			if (status & ICFG_XMAC_RX_OVERFLOW)
-				statsp->xmac_stats.rx_overflow_err++;
-			if (status & ICFG_XMAC_RX_UNDERFLOW) {
-				statsp->xmac_stats.rx_underflow_err++;
-				NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
-					NXGE_FM_EREPORT_RXMAC_UNDERFLOW);
-			}
-			if (status & ICFG_XMAC_RX_CRC_ERR_CNT_EXP) {
-				statsp->xmac_stats.rx_crc_err_cnt +=
-							XRXMAC_CRC_ER_CNT_MASK;
-				NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
-					NXGE_FM_EREPORT_RXMAC_CRC_ERRCNT_EXP);
-			}
-			if (status & ICFG_XMAC_RX_LEN_ERR_CNT_EXP) {
-				statsp->xmac_stats.rx_len_err_cnt +=
-							MAC_LEN_ER_CNT_MASK;
-				NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
-				NXGE_FM_EREPORT_RXMAC_LENGTH_ERRCNT_EXP);
-			}
-			if (status & ICFG_XMAC_RX_VIOL_ERR_CNT_EXP) {
-				statsp->xmac_stats.rx_viol_err_cnt +=
-							XRXMAC_CD_VIO_CNT_MASK;
-				NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
-					NXGE_FM_EREPORT_RXMAC_VIOL_ERRCNT_EXP);
-			}
-			if (status & ICFG_XMAC_RX_OCT_CNT_EXP) {
-				statsp->xmac_stats.rx_byte_cnt +=
-							XRXMAC_BT_CNT_MASK;
-			}
-			if (status & ICFG_XMAC_RX_HST_CNT1_EXP) {
-				statsp->xmac_stats.rx_hist1_cnt +=
-							XRXMAC_HIST_CNT1_MASK;
-			}
-			if (status & ICFG_XMAC_RX_HST_CNT2_EXP) {
-				statsp->xmac_stats.rx_hist2_cnt +=
-							XRXMAC_HIST_CNT2_MASK;
-			}
-			if (status & ICFG_XMAC_RX_HST_CNT3_EXP) {
-				statsp->xmac_stats.rx_hist3_cnt +=
-							XRXMAC_HIST_CNT3_MASK;
-			}
-			if (status & ICFG_XMAC_RX_HST_CNT4_EXP) {
-				statsp->xmac_stats.rx_hist4_cnt +=
-							XRXMAC_HIST_CNT4_MASK;
-			}
-			if (status & ICFG_XMAC_RX_HST_CNT5_EXP) {
-				statsp->xmac_stats.rx_hist5_cnt +=
-							XRXMAC_HIST_CNT5_MASK;
-			}
-			if (status & ICFG_XMAC_RX_HST_CNT6_EXP) {
-				statsp->xmac_stats.rx_hist6_cnt +=
-							XRXMAC_HIST_CNT6_MASK;
-			}
-			if (status & ICFG_XMAC_RX_BCAST_CNT_EXP) {
-				statsp->xmac_stats.rx_broadcast_cnt +=
-							XRXMAC_BC_FRM_CNT_MASK;
-			}
-			if (status & ICFG_XMAC_RX_MCAST_CNT_EXP) {
-				statsp->xmac_stats.rx_mult_cnt +=
-							XRXMAC_MC_FRM_CNT_MASK;
-			}
-			if (status & ICFG_XMAC_RX_FRAG_CNT_EXP) {
-				statsp->xmac_stats.rx_frag_cnt +=
-							XRXMAC_FRAG_CNT_MASK;
-				NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
-					NXGE_FM_EREPORT_RXMAC_RXFRAG_CNT_EXP);
-			}
-			if (status & ICFG_XMAC_RX_ALIGNERR_CNT_EXP) {
-				statsp->xmac_stats.rx_frame_align_err_cnt +=
-							XRXMAC_AL_ER_CNT_MASK;
-				NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
-					NXGE_FM_EREPORT_RXMAC_ALIGN_ECNT_EXP);
-			}
-			if (status & ICFG_XMAC_RX_LINK_FLT_CNT_EXP) {
-				statsp->xmac_stats.rx_linkfault_err_cnt +=
-							XMAC_LINK_FLT_CNT_MASK;
-				NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
-				NXGE_FM_EREPORT_RXMAC_LINKFAULT_CNT_EXP);
-			}
-			if (status & ICFG_XMAC_RX_REMOTE_FLT_DET) {
-				statsp->xmac_stats.rx_remotefault_err++;
-			}
-			if (status & ICFG_XMAC_RX_LOCAL_FLT_DET) {
-				statsp->xmac_stats.rx_localfault_err++;
-			}
-		}
-
-		rs = npi_xmac_ctl_get_istatus(handle, portn,
-						(xmac_ctl_iconfig_t *)&status);
-		if (rs != NPI_SUCCESS)
-			goto npi_fail;
-		if (status & ICFG_XMAC_CTRL_ALL) {
-			if (status & ICFG_XMAC_CTRL_PAUSE_RCVD)
-				statsp->xmac_stats.rx_pause_cnt++;
-			if (status & ICFG_XMAC_CTRL_PAUSE_STATE)
-				statsp->xmac_stats.tx_pause_state++;
-			if (status & ICFG_XMAC_CTRL_NOPAUSE_STATE)
-				statsp->xmac_stats.tx_nopause_state++;
-		}
-	} else if (nxgep->mac.porttype == PORT_TYPE_BMAC) {
-		rs = npi_bmac_tx_get_istatus(handle, portn,
-						(bmac_tx_iconfig_t *)&status);
-		if (rs != NPI_SUCCESS)
-			goto npi_fail;
-		if (status & ICFG_BMAC_TX_ALL) {
-			if (status & ICFG_BMAC_TX_UNDERFLOW) {
-				statsp->bmac_stats.tx_underrun_err++;
-				NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
-					NXGE_FM_EREPORT_TXMAC_UNDERFLOW);
-			}
-			if (status & ICFG_BMAC_TX_MAXPKTSZ_ERR) {
-				statsp->bmac_stats.tx_max_pkt_err++;
-				NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
-					NXGE_FM_EREPORT_TXMAC_MAX_PKT_ERR);
-			}
-			if (status & ICFG_BMAC_TX_BYTE_CNT_EXP) {
-				statsp->bmac_stats.tx_byte_cnt +=
-							BTXMAC_BYTE_CNT_MASK;
-			}
-			if (status & ICFG_BMAC_TX_FRAME_CNT_EXP) {
-				statsp->bmac_stats.tx_frame_cnt +=
-							BTXMAC_FRM_CNT_MASK;
-			}
-		}
-
-		rs = npi_bmac_rx_get_istatus(handle, portn,
-						(bmac_rx_iconfig_t *)&status);
-		if (rs != NPI_SUCCESS)
-			goto npi_fail;
-		if (status & ICFG_BMAC_RX_ALL) {
-			if (status & ICFG_BMAC_RX_OVERFLOW) {
-				statsp->bmac_stats.rx_overflow_err++;
-			}
-			if (status & ICFG_BMAC_RX_FRAME_CNT_EXP) {
-				statsp->bmac_stats.rx_frame_cnt +=
-							RXMAC_FRM_CNT_MASK;
-			}
-			if (status & ICFG_BMAC_RX_CRC_ERR_CNT_EXP) {
-				statsp->bmac_stats.rx_crc_err_cnt +=
-							BMAC_CRC_ER_CNT_MASK;
-				NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
-					NXGE_FM_EREPORT_RXMAC_CRC_ERRCNT_EXP);
-			}
-			if (status & ICFG_BMAC_RX_LEN_ERR_CNT_EXP) {
-				statsp->bmac_stats.rx_len_err_cnt +=
-							MAC_LEN_ER_CNT_MASK;
-				NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
-				NXGE_FM_EREPORT_RXMAC_LENGTH_ERRCNT_EXP);
-			}
-			if (status & ICFG_BMAC_RX_VIOL_ERR_CNT_EXP)
-				statsp->bmac_stats.rx_viol_err_cnt +=
-							BMAC_CD_VIO_CNT_MASK;
-				NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
-					NXGE_FM_EREPORT_RXMAC_VIOL_ERRCNT_EXP);
-			}
-			if (status & ICFG_BMAC_RX_BYTE_CNT_EXP) {
-				statsp->bmac_stats.rx_byte_cnt +=
-							BRXMAC_BYTE_CNT_MASK;
-			}
-			if (status & ICFG_BMAC_RX_ALIGNERR_CNT_EXP) {
-				statsp->bmac_stats.rx_align_err_cnt +=
-							BMAC_AL_ER_CNT_MASK;
-				NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
-					NXGE_FM_EREPORT_RXMAC_ALIGN_ECNT_EXP);
-			}
-
-			rs = npi_bmac_ctl_get_istatus(handle, portn,
-						(bmac_ctl_iconfig_t *)&status);
-			if (rs != NPI_SUCCESS)
-				goto npi_fail;
-
-			if (status & ICFG_BMAC_CTL_ALL) {
-				if (status & ICFG_BMAC_CTL_RCVPAUSE)
-					statsp->bmac_stats.rx_pause_cnt++;
-				if (status & ICFG_BMAC_CTL_INPAUSE_ST)
-					statsp->bmac_stats.tx_pause_state++;
-				if (status & ICFG_BMAC_CTL_INNOTPAUSE_ST)
-					statsp->bmac_stats.tx_nopause_state++;
-			}
-		}
-
-	if (ldgp->nldvs == 1) {
-		(void) npi_intr_ldg_mgmt_set(handle, ldgp->ldg,
-			B_TRUE, ldgp->ldg_timer);
-	}
-
-	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_mac_intr"));
-	return (DDI_INTR_CLAIMED);
-
-npi_fail:
-	NXGE_ERROR_MSG((nxgep, INT_CTL, "<== nxge_mac_intr"));
-	return (DDI_INTR_UNCLAIMED);
-}
-
-nxge_status_t
-nxge_check_bcm8704_link(p_nxge_t nxgep, boolean_t *link_up)
-{
-	uint8_t		phy_port_addr;
-	nxge_status_t	status = NXGE_OK;
-	boolean_t	rx_sig_ok;
-	boolean_t	pcs_blk_lock;
-	boolean_t	link_align;
-	uint16_t	val1, val2, val3;
-#ifdef	NXGE_DEBUG_SYMBOL_ERR
-	uint16_t	val_debug;
-	uint16_t	val;
-#endif
-
-	phy_port_addr = nxgep->statsp->mac_stats.xcvr_portn;
-
-#ifdef	NXGE_DEBUG_SYMBOL_ERR
-	/* Check Device 3 Register Device 3 0xC809 */
-	(void) nxge_mdio_read(nxgep, phy_port_addr, 0x3, 0xC809, &val_debug);
-	if ((val_debug & ~0x200) != 0) {
-		cmn_err(CE_NOTE, "!Port%d BCM8704 Dev3 Reg 0xc809 = 0x%x\n",
-				nxgep->mac.portnum, val_debug);
-		(void) nxge_mdio_read(nxgep, phy_port_addr, 0x4, 0x18,
-				&val_debug);
-		cmn_err(CE_NOTE, "!Port%d BCM8704 Dev4 Reg 0x18 = 0x%x\n",
-				nxgep->mac.portnum, val_debug);
-	}
-
-	(void) npi_xmac_xpcs_read(nxgep->npi_handle, nxgep->mac.portnum,
-					XPCS_REG_DESCWERR_COUNTER, &val);
-	if (val != 0)
-		cmn_err(CE_NOTE, "!XPCS DESCWERR = 0x%x\n", val);
-
-	(void) npi_xmac_xpcs_read(nxgep->npi_handle, nxgep->mac.portnum,
-					XPCS_REG_SYMBOL_ERR_L0_1_COUNTER, &val);
-	if (val != 0)
-		cmn_err(CE_NOTE, "!XPCS SYMBOL_ERR_L0_1 = 0x%x\n", val);
-
-	(void) npi_xmac_xpcs_read(nxgep->npi_handle, nxgep->mac.portnum,
-					XPCS_REG_SYMBOL_ERR_L2_3_COUNTER, &val);
-	if (val != 0)
-		cmn_err(CE_NOTE, "!XPCS SYMBOL_ERR_L2_3 = 0x%x\n", val);
-#endif
-
-	/* Check from BCM8704 if 10G link is up or down */
-
-	/* Check Device 1 Register 0xA bit0 */
-	status = nxge_mdio_read(nxgep, phy_port_addr,
-			BCM8704_PMA_PMD_DEV_ADDR,
-			BCM8704_PMD_RECEIVE_SIG_DETECT,
-			&val1);
-	if (status != NXGE_OK)
-		goto fail;
-	rx_sig_ok = ((val1 & GLOB_PMD_RX_SIG_OK) ? B_TRUE : B_FALSE);
-
-	/* Check Device 3 Register 0x20 bit0 */
-	if ((status = nxge_mdio_read(nxgep, phy_port_addr,
-			BCM8704_PCS_DEV_ADDR,
-			BCM8704_10GBASE_R_PCS_STATUS_REG,
-			&val2)) != NPI_SUCCESS)
-		goto fail;
-	pcs_blk_lock = ((val2 & PCS_10GBASE_R_PCS_BLK_LOCK) ? B_TRUE : B_FALSE);
-
-	/* Check Device 4 Register 0x18 bit12 */
-	status = nxge_mdio_read(nxgep, phy_port_addr,
-			BCM8704_PHYXS_ADDR,
-			BCM8704_PHYXS_XGXS_LANE_STATUS_REG,
-			&val3);
-	if (status != NXGE_OK)
-		goto fail;
-	link_align = (val3 == (XGXS_LANE_ALIGN_STATUS | XGXS_LANE3_SYNC |
-				XGXS_LANE2_SYNC | XGXS_LANE1_SYNC |
-				XGXS_LANE0_SYNC | 0x400)) ? B_TRUE : B_FALSE;
-
-#ifdef	NXGE_DEBUG_ALIGN_ERR
-	/* Temp workaround for link down issue */
-	if (pcs_blk_lock == B_FALSE) {
-		if (val2 != 0x4) {
-			pcs_blk_lock = B_TRUE;
-			cmn_err(CE_NOTE,
-				"!LINK DEBUG: port%d PHY Dev3 "
-				"Reg 0x20 = 0x%x\n",
-				nxgep->mac.portnum, val2);
-		}
-	}
-
-	if (link_align == B_FALSE) {
-		if (val3 != 0x140f) {
-			link_align = B_TRUE;
-			cmn_err(CE_NOTE,
-				"!LINK DEBUG: port%d PHY Dev4 "
-				"Reg 0x18 = 0x%x\n",
-				nxgep->mac.portnum, val3);
-		}
-	}
-
-	if (rx_sig_ok == B_FALSE) {
-		if ((val2 == 0) || (val3 == 0)) {
-			rx_sig_ok = B_TRUE;
-			cmn_err(CE_NOTE,
-				"!LINK DEBUG: port %d Dev3 or Dev4 read zero\n",
-				nxgep->mac.portnum);
-		}
-	}
-#endif
-
-	*link_up = ((rx_sig_ok == B_TRUE) && (pcs_blk_lock == B_TRUE) &&
-			(link_align == B_TRUE)) ? B_TRUE : B_FALSE;
-
-	return (NXGE_OK);
-fail:
-	return (status);
-}
-
-
-nxge_status_t
-nxge_get_xcvr_type(p_nxge_t nxgep)
-{
-	nxge_status_t status = NXGE_OK;
-
-#if defined(_BIG_ENDIAN)
-	char *prop_val;
-
-	if (ddi_prop_lookup_string(DDI_DEV_T_ANY, nxgep->dip, 0,
-		"phy-type", &prop_val) == DDI_PROP_SUCCESS) {
-		if (strcmp("xgf", prop_val) == 0) {
-			nxgep->statsp->mac_stats.xcvr_inuse = XPCS_XCVR;
-			nxgep->mac.portmode = PORT_10G_FIBER;
-			NXGE_DEBUG_MSG((nxgep, MAC_CTL, "10G Fiber Xcvr"));
-		} else if (strcmp("mif", prop_val)	== 0) {
-			nxgep->statsp->mac_stats.xcvr_inuse = INT_MII_XCVR;
-			nxgep->mac.portmode = PORT_1G_COPPER;
-			NXGE_DEBUG_MSG((nxgep, MAC_CTL, "1G Copper Xcvr"));
-		} else if (strcmp("pcs", prop_val) == 0) {
-			nxgep->statsp->mac_stats.xcvr_inuse = PCS_XCVR;
-			nxgep->mac.portmode = PORT_1G_FIBER;
-			NXGE_DEBUG_MSG((nxgep, MAC_CTL, "1G Fiber Xcvr"));
-		} else if (strcmp("xgc", prop_val) == 0) {
-			nxgep->statsp->mac_stats.xcvr_inuse = XPCS_XCVR;
-			nxgep->mac.portmode = PORT_10G_COPPER;
-			NXGE_DEBUG_MSG((nxgep, MAC_CTL, "10G Copper Xcvr"));
-		} else {
-			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-					    "Unknown phy-type: %s",
-					    prop_val));
-			ddi_prop_free(prop_val);
-			return (NXGE_ERROR);
-		}
-		status = NXGE_OK;
-		(void) ddi_prop_update_string(DDI_DEV_T_NONE, nxgep->dip,
-						    "phy-type", prop_val);
-		ddi_prop_free(prop_val);
-	} else {
-		/*
-		 * This should really be an error. But for now default
-		 * this to 10G fiber.
-		 */
-		if (nxgep->niu_type == N2_NIU) {
-			nxgep->statsp->mac_stats.xcvr_inuse = XPCS_XCVR;
-			nxgep->mac.portmode = PORT_10G_FIBER;
-			NXGE_DEBUG_MSG((nxgep, MAC_CTL,
-					    "Cannot find phy-type: "
-					    " Default to 10G Fiber Xcvr"));
-			status = NXGE_OK;
-		} else {
-			NXGE_DEBUG_MSG((nxgep, MAC_CTL,
-					    "Cannot get phy-type"));
-			return (NXGE_ERROR);
-		}
-	}
-#else
-	status = nxge_espc_phy_type_get(nxgep);
-#endif
-
-	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "<== nxge_get_xcvr_type"));
-	return (status);
-}
-
-nxge_status_t
-nxge_10g_link_led_on(p_nxge_t nxgep)
-{
-	if (npi_xmac_xif_led(nxgep->npi_handle, nxgep->mac.portnum, B_TRUE)
-							!= NPI_SUCCESS)
-		return (NXGE_ERROR);
-	else
-		return (NXGE_OK);
-}
-
-nxge_status_t
-nxge_10g_link_led_off(p_nxge_t nxgep)
-{
-	if (npi_xmac_xif_led(nxgep->npi_handle, nxgep->mac.portnum, B_FALSE)
-							!= NPI_SUCCESS)
-		return (NXGE_ERROR);
-	else
-		return (NXGE_OK);
-}
--- a/usr/src/uts/sun4v/io/nxge/nxge_main.c	Mon Mar 19 18:02:35 2007 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,4752 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License (the "License").
- * You may not use this file except in compliance with the License.
- *
- * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
- * or http://www.opensolaris.org/os/licensing.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information: Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- */
-/*
- * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
- * Use is subject to license terms.
- */
-
-#pragma ident	"%Z%%M%	%I%	%E% SMI"
-
-/*
- * SunOs MT STREAMS NIU/Neptune 10Gb Ethernet Device Driver.
- */
-#include	<sys/nxge/nxge_impl.h>
-#include	<sys/pcie.h>
-
-uint32_t 	nxge_use_partition = 0;		/* debug partition flag */
-uint32_t 	nxge_dma_obp_props_only = 1;	/* use obp published props */
-uint32_t 	nxge_use_rdc_intr = 1;		/* debug to assign rdc intr */
-/*
- * until MSIX supported, assume msi, use 2 for msix
- */
-uint32_t	nxge_msi_enable = 1;		/* debug: turn msi off */
-
-/*
- * Globals: tunable parameters (/etc/system or adb)
- *
- */
-uint32_t 	nxge_rbr_size = NXGE_RBR_RBB_DEFAULT;
-uint32_t 	nxge_rbr_spare_size = 0;
-uint32_t 	nxge_rcr_size = NXGE_RCR_DEFAULT;
-uint32_t 	nxge_tx_ring_size = NXGE_TX_RING_DEFAULT;
-uint32_t 	nxge_no_msg = 0;		/* control message display */
-uint32_t 	nxge_no_link_notify = 0;	/* control DL_NOTIFY */
-uint32_t 	nxge_bcopy_thresh = TX_BCOPY_MAX;
-uint32_t 	nxge_dvma_thresh = TX_FASTDVMA_MIN;
-uint32_t 	nxge_dma_stream_thresh = TX_STREAM_MIN;
-uint32_t	nxge_jumbo_mtu	= TX_JUMBO_MTU;
-boolean_t	nxge_jumbo_enable = B_FALSE;
-uint16_t	nxge_rcr_timeout = NXGE_RDC_RCR_TIMEOUT;
-uint16_t	nxge_rcr_threshold = NXGE_RDC_RCR_THRESHOLD;
-
-/*
- * Debugging flags:
- *		nxge_no_tx_lb : transmit load balancing
- *		nxge_tx_lb_policy: 0 - TCP port (default)
- *				   3 - DEST MAC
- */
-uint32_t 	nxge_no_tx_lb = 0;
-uint32_t 	nxge_tx_lb_policy = NXGE_TX_LB_TCPUDP;
-
-/*
- * Add tunable to reduce the amount of time spent in the
- * ISR doing Rx Processing.
- */
-uint32_t nxge_max_rx_pkts = 1024;
-
-/*
- * Tunables to manage the receive buffer blocks.
- *
- * nxge_rx_threshold_hi: copy all buffers.
- * nxge_rx_bcopy_size_type: receive buffer block size type.
- * nxge_rx_threshold_lo: copy only up to tunable block size type.
- */
-nxge_rxbuf_threshold_t nxge_rx_threshold_hi = NXGE_RX_COPY_6;
-nxge_rxbuf_type_t nxge_rx_buf_size_type = RCR_PKTBUFSZ_0;
-nxge_rxbuf_threshold_t nxge_rx_threshold_lo = NXGE_RX_COPY_3;
-
-rtrace_t npi_rtracebuf;
-
-#if	defined(sun4v)
-/*
- * Hypervisor N2/NIU services information.
- */
-static hsvc_info_t niu_hsvc = {
-	HSVC_REV_1, NULL, HSVC_GROUP_NIU, NIU_MAJOR_VER,
-	NIU_MINOR_VER, "nxge"
-};
-#endif
-
-/*
- * Function Prototypes
- */
-static int nxge_attach(dev_info_t *, ddi_attach_cmd_t);
-static int nxge_detach(dev_info_t *, ddi_detach_cmd_t);
-static void nxge_unattach(p_nxge_t);
-
-#if NXGE_PROPERTY
-static void nxge_remove_hard_properties(p_nxge_t);
-#endif
-
-static nxge_status_t nxge_setup_system_dma_pages(p_nxge_t);
-
-static nxge_status_t nxge_setup_mutexes(p_nxge_t);
-static void nxge_destroy_mutexes(p_nxge_t);
-
-static nxge_status_t nxge_map_regs(p_nxge_t nxgep);
-static void nxge_unmap_regs(p_nxge_t nxgep);
-#ifdef	NXGE_DEBUG
-static void nxge_test_map_regs(p_nxge_t nxgep);
-#endif
-
-static nxge_status_t nxge_add_intrs(p_nxge_t nxgep);
-static nxge_status_t nxge_add_soft_intrs(p_nxge_t nxgep);
-static void nxge_remove_intrs(p_nxge_t nxgep);
-static void nxge_remove_soft_intrs(p_nxge_t nxgep);
-
-static nxge_status_t nxge_add_intrs_adv(p_nxge_t nxgep);
-static nxge_status_t nxge_add_intrs_adv_type(p_nxge_t, uint32_t);
-static nxge_status_t nxge_add_intrs_adv_type_fix(p_nxge_t, uint32_t);
-static void nxge_intrs_enable(p_nxge_t nxgep);
-static void nxge_intrs_disable(p_nxge_t nxgep);
-
-static void nxge_suspend(p_nxge_t);
-static nxge_status_t nxge_resume(p_nxge_t);
-
-static nxge_status_t nxge_setup_dev(p_nxge_t);
-static void nxge_destroy_dev(p_nxge_t);
-
-static nxge_status_t nxge_alloc_mem_pool(p_nxge_t);
-static void nxge_free_mem_pool(p_nxge_t);
-
-static nxge_status_t nxge_alloc_rx_mem_pool(p_nxge_t);
-static void nxge_free_rx_mem_pool(p_nxge_t);
-
-static nxge_status_t nxge_alloc_tx_mem_pool(p_nxge_t);
-static void nxge_free_tx_mem_pool(p_nxge_t);
-
-static nxge_status_t nxge_dma_mem_alloc(p_nxge_t, dma_method_t,
-	struct ddi_dma_attr *,
-	size_t, ddi_device_acc_attr_t *, uint_t,
-	p_nxge_dma_common_t);
-
-static void nxge_dma_mem_free(p_nxge_dma_common_t);
-
-static nxge_status_t nxge_alloc_rx_buf_dma(p_nxge_t, uint16_t,
-	p_nxge_dma_common_t *, size_t, size_t, uint32_t *);
-static void nxge_free_rx_buf_dma(p_nxge_t, p_nxge_dma_common_t, uint32_t);
-
-static nxge_status_t nxge_alloc_rx_cntl_dma(p_nxge_t, uint16_t,
-	p_nxge_dma_common_t *, size_t);
-static void nxge_free_rx_cntl_dma(p_nxge_t, p_nxge_dma_common_t);
-
-static nxge_status_t nxge_alloc_tx_buf_dma(p_nxge_t, uint16_t,
-	p_nxge_dma_common_t *, size_t, size_t, uint32_t *);
-static void nxge_free_tx_buf_dma(p_nxge_t, p_nxge_dma_common_t, uint32_t);
-
-static nxge_status_t nxge_alloc_tx_cntl_dma(p_nxge_t, uint16_t,
-	p_nxge_dma_common_t *,
-	size_t);
-static void nxge_free_tx_cntl_dma(p_nxge_t, p_nxge_dma_common_t);
-
-static int nxge_init_common_dev(p_nxge_t);
-static void nxge_uninit_common_dev(p_nxge_t);
-
-/*
- * The next declarations are for the GLDv3 interface.
- */
-static int nxge_m_start(void *);
-static void nxge_m_stop(void *);
-static int nxge_m_unicst(void *, const uint8_t *);
-static int nxge_m_multicst(void *, boolean_t, const uint8_t *);
-static int nxge_m_promisc(void *, boolean_t);
-static void nxge_m_ioctl(void *, queue_t *, mblk_t *);
-static void nxge_m_resources(void *);
-mblk_t *nxge_m_tx(void *arg, mblk_t *);
-static nxge_status_t nxge_mac_register(p_nxge_t);
-static int nxge_altmac_set(p_nxge_t nxgep, uint8_t *mac_addr,
-	mac_addr_slot_t slot);
-static void nxge_mmac_kstat_update(p_nxge_t nxgep, mac_addr_slot_t slot,
-	boolean_t factory);
-static int nxge_m_mmac_add(void *arg, mac_multi_addr_t *maddr);
-static int nxge_m_mmac_reserve(void *arg, mac_multi_addr_t *maddr);
-static int nxge_m_mmac_remove(void *arg, mac_addr_slot_t slot);
-static int nxge_m_mmac_modify(void *arg, mac_multi_addr_t *maddr);
-static int nxge_m_mmac_get(void *arg, mac_multi_addr_t *maddr);
-
-#define	NXGE_NEPTUNE_MAGIC	0x4E584745UL
-#define	MAX_DUMP_SZ 256
-
-#define	NXGE_M_CALLBACK_FLAGS	(MC_RESOURCES | MC_IOCTL | MC_GETCAPAB)
-
-static	boolean_t	nxge_m_getcapab(void *, mac_capab_t, void *);
-static mac_callbacks_t nxge_m_callbacks = {
-	NXGE_M_CALLBACK_FLAGS,
-	nxge_m_stat,
-	nxge_m_start,
-	nxge_m_stop,
-	nxge_m_promisc,
-	nxge_m_multicst,
-	nxge_m_unicst,
-	nxge_m_tx,
-	nxge_m_resources,
-	nxge_m_ioctl,
-	nxge_m_getcapab
-};
-
-void
-nxge_err_inject(p_nxge_t, queue_t *, mblk_t *);
-
-/*
- * These global variables control the message
- * output.
- */
-out_dbgmsg_t nxge_dbgmsg_out = DBG_CONSOLE | STR_LOG;
-uint64_t nxge_debug_level = 0;
-
-/*
- * This list contains the instance structures for the Neptune
- * devices present in the system. The lock exists to guarantee
- * mutually exclusive access to the list.
- */
-void 			*nxge_list = NULL;
-
-void			*nxge_hw_list = NULL;
-nxge_os_mutex_t 	nxge_common_lock;
-
-nxge_os_mutex_t		nxge_mii_lock;
-static uint32_t		nxge_mii_lock_init = 0;
-nxge_os_mutex_t		nxge_mdio_lock;
-static uint32_t		nxge_mdio_lock_init = 0;
-
-extern uint64_t 	npi_debug_level;
-
-extern nxge_status_t	nxge_ldgv_init(p_nxge_t, int *, int *);
-extern nxge_status_t	nxge_ldgv_init_n2(p_nxge_t, int *, int *);
-extern nxge_status_t	nxge_ldgv_uninit(p_nxge_t);
-extern nxge_status_t	nxge_intr_ldgv_init(p_nxge_t);
-extern void		nxge_fm_init(p_nxge_t,
-					ddi_device_acc_attr_t *,
-					ddi_device_acc_attr_t *,
-					ddi_dma_attr_t *);
-extern void		nxge_fm_fini(p_nxge_t);
-extern npi_status_t	npi_mac_altaddr_disable(npi_handle_t, uint8_t, uint8_t);
-
-/*
- * Count used to maintain the number of buffers being used
- * by Neptune instances and loaned up to the upper layers.
- */
-uint32_t nxge_mblks_pending = 0;
-
-/*
- * Device register access attributes for PIO.
- */
-static ddi_device_acc_attr_t nxge_dev_reg_acc_attr = {
-	DDI_DEVICE_ATTR_V0,
-	DDI_STRUCTURE_LE_ACC,
-	DDI_STRICTORDER_ACC,
-};
-
-/*
- * Device descriptor access attributes for DMA.
- */
-static ddi_device_acc_attr_t nxge_dev_desc_dma_acc_attr = {
-	DDI_DEVICE_ATTR_V0,
-	DDI_STRUCTURE_LE_ACC,
-	DDI_STRICTORDER_ACC
-};
-
-/*
- * Device buffer access attributes for DMA.
- */
-static ddi_device_acc_attr_t nxge_dev_buf_dma_acc_attr = {
-	DDI_DEVICE_ATTR_V0,
-	DDI_STRUCTURE_BE_ACC,
-	DDI_STRICTORDER_ACC
-};
-
-ddi_dma_attr_t nxge_desc_dma_attr = {
-	DMA_ATTR_V0,		/* version number. */
-	0,			/* low address */
-	0xffffffffffffffff,	/* high address */
-	0xffffffffffffffff,	/* address counter max */
-#ifndef NIU_PA_WORKAROUND
-	0x100000,		/* alignment */
-#else
-	0x2000,
-#endif
-	0xfc00fc,		/* dlim_burstsizes */
-	0x1,			/* minimum transfer size */
-	0xffffffffffffffff,	/* maximum transfer size */
-	0xffffffffffffffff,	/* maximum segment size */
-	1,			/* scatter/gather list length */
-	(unsigned int) 1,	/* granularity */
-	0			/* attribute flags */
-};
-
-ddi_dma_attr_t nxge_tx_dma_attr = {
-	DMA_ATTR_V0,		/* version number. */
-	0,			/* low address */
-	0xffffffffffffffff,	/* high address */
-	0xffffffffffffffff,	/* address counter max */
-#if defined(_BIG_ENDIAN)
-	0x2000,			/* alignment */
-#else
-	0x1000,			/* alignment */
-#endif
-	0xfc00fc,		/* dlim_burstsizes */
-	0x1,			/* minimum transfer size */
-	0xffffffffffffffff,	/* maximum transfer size */
-	0xffffffffffffffff,	/* maximum segment size */
-	5,			/* scatter/gather list length */
-	(unsigned int) 1,	/* granularity */
-	0			/* attribute flags */
-};
-
-ddi_dma_attr_t nxge_rx_dma_attr = {
-	DMA_ATTR_V0,		/* version number. */
-	0,			/* low address */
-	0xffffffffffffffff,	/* high address */
-	0xffffffffffffffff,	/* address counter max */
-	0x2000,			/* alignment */
-	0xfc00fc,		/* dlim_burstsizes */
-	0x1,			/* minimum transfer size */
-	0xffffffffffffffff,	/* maximum transfer size */
-	0xffffffffffffffff,	/* maximum segment size */
-	1,			/* scatter/gather list length */
-	(unsigned int) 1,	/* granularity */
-	0			/* attribute flags */
-};
-
-ddi_dma_lim_t nxge_dma_limits = {
-	(uint_t)0,		/* dlim_addr_lo */
-	(uint_t)0xffffffff,	/* dlim_addr_hi */
-	(uint_t)0xffffffff,	/* dlim_cntr_max */
-	(uint_t)0xfc00fc,	/* dlim_burstsizes for 32 and 64 bit xfers */
-	0x1,			/* dlim_minxfer */
-	1024			/* dlim_speed */
-};
-
-dma_method_t nxge_force_dma = DVMA;
-
-/*
- * dma chunk sizes.
- *
- * Try to allocate the largest possible size
- * so that fewer number of dma chunks would be managed
- */
-#ifdef NIU_PA_WORKAROUND
-size_t alloc_sizes [] = {0x2000};
-#else
-size_t alloc_sizes [] = {0x1000, 0x2000, 0x4000, 0x8000,
-		0x10000, 0x20000, 0x40000, 0x80000,
-		0x100000, 0x200000, 0x400000, 0x800000, 0x1000000};
-#endif
-
-/*
- * Translate "dev_t" to a pointer to the associated "dev_info_t".
- */
-
-static int
-nxge_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
-{
-	p_nxge_t	nxgep = NULL;
-	int		instance;
-	int		status = DDI_SUCCESS;
-	nxge_status_t	nxge_status = NXGE_OK;
-	uint8_t		portn;
-	nxge_mmac_t	*mmac_info;
-
-	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_attach"));
-
-	/*
-	 * Get the device instance since we'll need to setup
-	 * or retrieve a soft state for this instance.
-	 */
-	instance = ddi_get_instance(dip);
-
-	switch (cmd) {
-	case DDI_ATTACH:
-		NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_ATTACH"));
-		break;
-
-	case DDI_RESUME:
-		NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_RESUME"));
-		nxgep = (p_nxge_t)ddi_get_soft_state(nxge_list, instance);
-		if (nxgep == NULL) {
-			status = DDI_FAILURE;
-			break;
-		}
-		if (nxgep->dip != dip) {
-			status = DDI_FAILURE;
-			break;
-		}
-		if (nxgep->suspended == DDI_PM_SUSPEND) {
-			status = ddi_dev_is_needed(nxgep->dip, 0, 1);
-		} else {
-			nxge_status = nxge_resume(nxgep);
-		}
-		goto nxge_attach_exit;
-
-	case DDI_PM_RESUME:
-		NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_PM_RESUME"));
-		nxgep = (p_nxge_t)ddi_get_soft_state(nxge_list, instance);
-		if (nxgep == NULL) {
-			status = DDI_FAILURE;
-			break;
-		}
-		if (nxgep->dip != dip) {
-			status = DDI_FAILURE;
-			break;
-		}
-		nxge_status = nxge_resume(nxgep);
-		goto nxge_attach_exit;
-
-	default:
-		NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing unknown"));
-		status = DDI_FAILURE;
-		goto nxge_attach_exit;
-	}
-
-
-	if (ddi_soft_state_zalloc(nxge_list, instance) == DDI_FAILURE) {
-		status = DDI_FAILURE;
-		goto nxge_attach_exit;
-	}
-
-	nxgep = ddi_get_soft_state(nxge_list, instance);
-	if (nxgep == NULL) {
-		goto nxge_attach_fail;
-	}
-
-	nxgep->drv_state = 0;
-	nxgep->dip = dip;
-	nxgep->instance = instance;
-	nxgep->p_dip = ddi_get_parent(dip);
-	nxgep->nxge_debug_level = nxge_debug_level;
-	npi_debug_level = nxge_debug_level;
-
-	nxge_fm_init(nxgep, &nxge_dev_reg_acc_attr, &nxge_dev_desc_dma_acc_attr,
-				&nxge_rx_dma_attr);
-
-	status = nxge_map_regs(nxgep);
-	if (status != NXGE_OK) {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_map_regs failed"));
-		goto nxge_attach_fail;
-	}
-
-	status = nxge_init_common_dev(nxgep);
-	if (status != NXGE_OK) {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"nxge_init_common_dev failed"));
-		goto nxge_attach_fail;
-	}
-
-	portn = NXGE_GET_PORT_NUM(nxgep->function_num);
-	nxgep->mac.portnum = portn;
-	if ((portn == 0) || (portn == 1))
-		nxgep->mac.porttype = PORT_TYPE_XMAC;
-	else
-		nxgep->mac.porttype = PORT_TYPE_BMAC;
-	/*
-	 * Neptune has 4 ports, the first 2 ports use XMAC (10G MAC)
-	 * internally, the rest 2 ports use BMAC (1G "Big" MAC).
-	 * The two types of MACs have different characterizations.
-	 */
-	mmac_info = &nxgep->nxge_mmac_info;
-	if (nxgep->function_num < 2) {
-		mmac_info->num_mmac = XMAC_MAX_ALT_ADDR_ENTRY;
-		mmac_info->naddrfree = XMAC_MAX_ALT_ADDR_ENTRY;
-	} else {
-		mmac_info->num_mmac = BMAC_MAX_ALT_ADDR_ENTRY;
-		mmac_info->naddrfree = BMAC_MAX_ALT_ADDR_ENTRY;
-	}
-	/*
-	 * Setup the Ndd parameters for the this instance.
-	 */
-	nxge_init_param(nxgep);
-
-	/*
-	 * Setup Register Tracing Buffer.
-	 */
-	npi_rtrace_buf_init((rtrace_t *)&npi_rtracebuf);
-
-	/* init stats ptr */
-	nxge_init_statsp(nxgep);
-	status = nxge_get_xcvr_type(nxgep);
-
-	if (status != NXGE_OK) {
-		NXGE_DEBUG_MSG((nxgep, DDI_CTL, "nxge_attach: "
-				    " Couldn't determine card type"
-				    " .... exit "));
-		goto nxge_attach_fail;
-	}
-
-	if ((nxgep->niu_type == NEPTUNE) &&
-		(nxgep->mac.portmode == PORT_10G_FIBER)) {
-		nxgep->niu_type = NEPTUNE_2;
-	}
-
-	status = nxge_get_config_properties(nxgep);
-
-	if (status != NXGE_OK) {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "get_hw create failed"));
-		goto nxge_attach_fail;
-	}
-
-	nxge_get_xcvr_properties(nxgep);
-
-	/*
-	 * Setup the Kstats for the driver.
-	 */
-	nxge_setup_kstats(nxgep);
-
-	nxge_setup_param(nxgep);
-
-	status = nxge_setup_system_dma_pages(nxgep);
-	if (status != NXGE_OK) {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "set dma page failed"));
-		goto nxge_attach_fail;
-	}
-
-#if	defined(sun4v)
-	if (nxgep->niu_type == N2_NIU) {
-		nxgep->niu_hsvc_available = B_FALSE;
-		bcopy(&niu_hsvc, &nxgep->niu_hsvc, sizeof (hsvc_info_t));
-		if ((status =
-			hsvc_register(&nxgep->niu_hsvc,
-					&nxgep->niu_min_ver)) != 0) {
-				NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-					"nxge_attach: "
-					"%s: cannot negotiate "
-					"hypervisor services "
-					"revision %d "
-					"group: 0x%lx "
-					"major: 0x%lx minor: 0x%lx "
-					"errno: %d",
-					niu_hsvc.hsvc_modname,
-					niu_hsvc.hsvc_rev,
-					niu_hsvc.hsvc_group,
-					niu_hsvc.hsvc_major,
-					niu_hsvc.hsvc_minor,
-					status));
-				status = DDI_FAILURE;
-				goto nxge_attach_fail;
-		}
-
-		nxgep->niu_hsvc_available = B_TRUE;
-		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
-			"NIU Hypervisor service enabled"));
-	}
-#endif
-
-	nxge_hw_id_init(nxgep);
-	nxge_hw_init_niu_common(nxgep);
-
-	status = nxge_setup_mutexes(nxgep);
-	if (status != NXGE_OK) {
-		NXGE_DEBUG_MSG((nxgep, DDI_CTL, "set mutex failed"));
-		goto nxge_attach_fail;
-	}
-
-	status = nxge_setup_dev(nxgep);
-	if (status != DDI_SUCCESS) {
-		NXGE_DEBUG_MSG((nxgep, DDI_CTL, "set dev failed"));
-		goto nxge_attach_fail;
-	}
-
-	status = nxge_add_intrs(nxgep);
-	if (status != DDI_SUCCESS) {
-		NXGE_DEBUG_MSG((nxgep, DDI_CTL, "add_intr failed"));
-		goto nxge_attach_fail;
-	}
-	status = nxge_add_soft_intrs(nxgep);
-	if (status != DDI_SUCCESS) {
-		NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, "add_soft_intr failed"));
-		goto nxge_attach_fail;
-	}
-
-	/*
-	 * Enable interrupts.
-	 */
-	nxge_intrs_enable(nxgep);
-
-	if ((status = nxge_mac_register(nxgep)) != DDI_SUCCESS) {
-		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
-			"unable to register to mac layer (%d)", status));
-		goto nxge_attach_fail;
-	}
-
-	mac_link_update(nxgep->mach, LINK_STATE_UNKNOWN);
-
-	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "registered to mac (instance %d)",
-		instance));
-
-	(void) nxge_link_monitor(nxgep, LINK_MONITOR_START);
-
-	goto nxge_attach_exit;
-
-nxge_attach_fail:
-	nxge_unattach(nxgep);
-	if (nxge_status != NXGE_OK)
-		nxge_status = (NXGE_ERROR | NXGE_DDI_FAILED);
-	nxgep = NULL;
-
-nxge_attach_exit:
-	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_attach status = 0x%08x",
-		status));
-
-	return (status);
-}
-
-static int
-nxge_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
-{
-	int 		status = DDI_SUCCESS;
-	int 		instance;
-	p_nxge_t 	nxgep = NULL;
-
-	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_detach"));
-	instance = ddi_get_instance(dip);
-	nxgep = ddi_get_soft_state(nxge_list, instance);
-	if (nxgep == NULL) {
-		status = DDI_FAILURE;
-		goto nxge_detach_exit;
-	}
-
-	switch (cmd) {
-	case DDI_DETACH:
-		NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_DETACH"));
-		break;
-
-	case DDI_PM_SUSPEND:
-		NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_PM_SUSPEND"));
-		nxgep->suspended = DDI_PM_SUSPEND;
-		nxge_suspend(nxgep);
-		break;
-
-	case DDI_SUSPEND:
-		NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_SUSPEND"));
-		if (nxgep->suspended != DDI_PM_SUSPEND) {
-			nxgep->suspended = DDI_SUSPEND;
-			nxge_suspend(nxgep);
-		}
-		break;
-
-	default:
-		status = DDI_FAILURE;
-	}
-
-	if (cmd != DDI_DETACH)
-		goto nxge_detach_exit;
-
-	/*
-	 * Stop the xcvr polling.
-	 */
-	nxgep->suspended = cmd;
-
-	(void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP);
-
-	if (nxgep->mach && (status = mac_unregister(nxgep->mach)) != 0) {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"<== nxge_detach status = 0x%08X", status));
-		return (DDI_FAILURE);
-	}
-
-	NXGE_DEBUG_MSG((nxgep, DDI_CTL,
-		"<== nxge_detach (mac_unregister) status = 0x%08X", status));
-
-	nxge_unattach(nxgep);
-	nxgep = NULL;
-
-nxge_detach_exit:
-	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_detach status = 0x%08X",
-		status));
-
-	return (status);
-}
-
-static void
-nxge_unattach(p_nxge_t nxgep)
-{
-	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_unattach"));
-
-	if (nxgep == NULL || nxgep->dev_regs == NULL) {
-		return;
-	}
-
-	if (nxgep->nxge_hw_p) {
-		nxge_uninit_common_dev(nxgep);
-		nxgep->nxge_hw_p = NULL;
-	}
-
-	if (nxgep->nxge_timerid) {
-		nxge_stop_timer(nxgep, nxgep->nxge_timerid);
-		nxgep->nxge_timerid = 0;
-	}
-
-#if	defined(sun4v)
-	if (nxgep->niu_type == N2_NIU && nxgep->niu_hsvc_available == B_TRUE) {
-		(void) hsvc_unregister(&nxgep->niu_hsvc);
-		nxgep->niu_hsvc_available = B_FALSE;
-	}
-#endif
-	/*
-	 * Stop any further interrupts.
-	 */
-	nxge_remove_intrs(nxgep);
-
-	/* remove soft interrups */
-	nxge_remove_soft_intrs(nxgep);
-
-	/*
-	 * Stop the device and free resources.
-	 */
-	nxge_destroy_dev(nxgep);
-
-	/*
-	 * Tear down the ndd parameters setup.
-	 */
-	nxge_destroy_param(nxgep);
-
-	/*
-	 * Tear down the kstat setup.
-	 */
-	nxge_destroy_kstats(nxgep);
-
-	/*
-	 * Destroy all mutexes.
-	 */
-	nxge_destroy_mutexes(nxgep);
-
-	/*
-	 * Remove the list of ndd parameters which
-	 * were setup during attach.
-	 */
-	if (nxgep->dip) {
-		NXGE_DEBUG_MSG((nxgep, OBP_CTL,
-				    " nxge_unattach: remove all properties"));
-
-		(void) ddi_prop_remove_all(nxgep->dip);
-	}
-
-#if NXGE_PROPERTY
-	nxge_remove_hard_properties(nxgep);
-#endif
-
-	/*
-	 * Unmap the register setup.
-	 */
-	nxge_unmap_regs(nxgep);
-
-	nxge_fm_fini(nxgep);
-
-	ddi_soft_state_free(nxge_list, nxgep->instance);
-
-	NXGE_DEBUG_MSG((NULL, DDI_CTL, "<== nxge_unattach"));
-}
-
-static char n2_siu_name[] = "niu";
-
-static nxge_status_t
-nxge_map_regs(p_nxge_t nxgep)
-{
-	int		ddi_status = DDI_SUCCESS;
-	p_dev_regs_t 	dev_regs;
-	char		buf[MAXPATHLEN + 1];
-	char 		*devname;
-#ifdef	NXGE_DEBUG
-	char 		*sysname;
-#endif
-	off_t		regsize;
-	nxge_status_t	status = NXGE_OK;
-#if !defined(_BIG_ENDIAN)
-	off_t pci_offset;
-	uint16_t pcie_devctl;
-#endif
-
-	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_map_regs"));
-	nxgep->dev_regs = NULL;
-	dev_regs = KMEM_ZALLOC(sizeof (dev_regs_t), KM_SLEEP);
-	dev_regs->nxge_regh = NULL;
-	dev_regs->nxge_pciregh = NULL;
-	dev_regs->nxge_msix_regh = NULL;
-	dev_regs->nxge_vir_regh = NULL;
-	dev_regs->nxge_vir2_regh = NULL;
-	nxgep->niu_type = NEPTUNE;
-
-	devname = ddi_pathname(nxgep->dip, buf);
-	ASSERT(strlen(devname) > 0);
-	NXGE_DEBUG_MSG((nxgep, DDI_CTL,
-		"nxge_map_regs: pathname devname %s", devname));
-
-	if (strstr(devname, n2_siu_name)) {
-		/* N2/NIU */
-		nxgep->niu_type = N2_NIU;
-		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
-			"nxge_map_regs: N2/NIU devname %s", devname));
-		/* get function number */
-		nxgep->function_num =
-			(devname[strlen(devname) -1] == '1' ? 1 : 0);
-		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
-			"nxge_map_regs: N2/NIU function number %d",
-			nxgep->function_num));
-	} else {
-		int		*prop_val;
-		uint_t 		prop_len;
-		uint8_t 	func_num;
-
-		if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, nxgep->dip,
-				0, "reg",
-				&prop_val, &prop_len) != DDI_PROP_SUCCESS) {
-			NXGE_DEBUG_MSG((nxgep, VPD_CTL,
-				"Reg property not found"));
-			ddi_status = DDI_FAILURE;
-			goto nxge_map_regs_fail0;
-
-		} else {
-			func_num = (prop_val[0] >> 8) & 0x7;
-			NXGE_DEBUG_MSG((nxgep, DDI_CTL,
-				"Reg property found: fun # %d",
-				func_num));
-			nxgep->function_num = func_num;
-			ddi_prop_free(prop_val);
-		}
-	}
-
-	switch (nxgep->niu_type) {
-	case NEPTUNE:
-	case NEPTUNE_2:
-	default:
-		(void) ddi_dev_regsize(nxgep->dip, 0, &regsize);
-		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
-			"nxge_map_regs: pci config size 0x%x", regsize));
-
-		ddi_status = ddi_regs_map_setup(nxgep->dip, 0,
-			(caddr_t *)&(dev_regs->nxge_pciregp), 0, 0,
-			&nxge_dev_reg_acc_attr, &dev_regs->nxge_pciregh);
-		if (ddi_status != DDI_SUCCESS) {
-			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-				"ddi_map_regs, nxge bus config regs failed"));
-			goto nxge_map_regs_fail0;
-		}
-		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
-			"nxge_map_reg: PCI config addr 0x%0llx "
-			" handle 0x%0llx", dev_regs->nxge_pciregp,
-			dev_regs->nxge_pciregh));
-			/*
-			 * IMP IMP
-			 * workaround  for bit swapping bug in HW
-			 * which ends up in no-snoop = yes
-			 * resulting, in DMA not synched properly
-			 */
-#if !defined(_BIG_ENDIAN)
-		/* workarounds for x86 systems */
-		pci_offset = 0x80 + PCIE_DEVCTL;
-		pcie_devctl = 0x0;
-		pcie_devctl &= PCIE_DEVCTL_ENABLE_NO_SNOOP;
-		pcie_devctl |= PCIE_DEVCTL_RO_EN;
-		pci_config_put16(dev_regs->nxge_pciregh, pci_offset,
-				    pcie_devctl);
-#endif
-
-		(void) ddi_dev_regsize(nxgep->dip, 1, &regsize);
-		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
-			"nxge_map_regs: pio size 0x%x", regsize));
-		/* set up the device mapped register */
-		ddi_status = ddi_regs_map_setup(nxgep->dip, 1,
-			(caddr_t *)&(dev_regs->nxge_regp), 0, 0,
-			&nxge_dev_reg_acc_attr, &dev_regs->nxge_regh);
-		if (ddi_status != DDI_SUCCESS) {
-			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-				"ddi_map_regs for Neptune global reg failed"));
-			goto nxge_map_regs_fail1;
-		}
-
-		/* set up the msi/msi-x mapped register */
-		(void) ddi_dev_regsize(nxgep->dip, 2, &regsize);
-		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
-			"nxge_map_regs: msix size 0x%x", regsize));
-		ddi_status = ddi_regs_map_setup(nxgep->dip, 2,
-			(caddr_t *)&(dev_regs->nxge_msix_regp), 0, 0,
-			&nxge_dev_reg_acc_attr, &dev_regs->nxge_msix_regh);
-		if (ddi_status != DDI_SUCCESS) {
-			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-				"ddi_map_regs for msi reg failed"));
-			goto nxge_map_regs_fail2;
-		}
-
-		/* set up the vio region mapped register */
-		(void) ddi_dev_regsize(nxgep->dip, 3, &regsize);
-		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
-			"nxge_map_regs: vio size 0x%x", regsize));
-		ddi_status = ddi_regs_map_setup(nxgep->dip, 3,
-			(caddr_t *)&(dev_regs->nxge_vir_regp), 0, 0,
-			&nxge_dev_reg_acc_attr, &dev_regs->nxge_vir_regh);
-
-		if (ddi_status != DDI_SUCCESS) {
-			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-				"ddi_map_regs for nxge vio reg failed"));
-			goto nxge_map_regs_fail3;
-		}
-		nxgep->dev_regs = dev_regs;
-
-		NPI_PCI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_pciregh);
-		NPI_PCI_ADD_HANDLE_SET(nxgep,
-			(npi_reg_ptr_t)dev_regs->nxge_pciregp);
-		NPI_MSI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_msix_regh);
-		NPI_MSI_ADD_HANDLE_SET(nxgep,
-			(npi_reg_ptr_t)dev_regs->nxge_msix_regp);
-
-		NPI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh);
-		NPI_ADD_HANDLE_SET(nxgep, (npi_reg_ptr_t)dev_regs->nxge_regp);
-
-		NPI_REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh);
-		NPI_REG_ADD_HANDLE_SET(nxgep,
-			(npi_reg_ptr_t)dev_regs->nxge_regp);
-
-		NPI_VREG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir_regh);
-		NPI_VREG_ADD_HANDLE_SET(nxgep,
-			(npi_reg_ptr_t)dev_regs->nxge_vir_regp);
-
-		break;
-
-	case N2_NIU:
-		NXGE_DEBUG_MSG((nxgep, DDI_CTL, "ddi_map_regs, NIU"));
-		/*
-		 * Set up the device mapped register (FWARC 2006/556)
-		 * (changed back to 1: reg starts at 1!)
-		 */
-		(void) ddi_dev_regsize(nxgep->dip, 1, &regsize);
-		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
-			"nxge_map_regs: dev size 0x%x", regsize));
-		ddi_status = ddi_regs_map_setup(nxgep->dip, 1,
-				(caddr_t *)&(dev_regs->nxge_regp), 0, 0,
-				&nxge_dev_reg_acc_attr, &dev_regs->nxge_regh);
-
-		if (ddi_status != DDI_SUCCESS) {
-			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-				"ddi_map_regs for N2/NIU, global reg failed "));
-			goto nxge_map_regs_fail1;
-		}
-
-		/* set up the vio region mapped register */
-		(void) ddi_dev_regsize(nxgep->dip, 2, &regsize);
-		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
-			"nxge_map_regs: vio (1) size 0x%x", regsize));
-		ddi_status = ddi_regs_map_setup(nxgep->dip, 2,
-			(caddr_t *)&(dev_regs->nxge_vir_regp), 0, 0,
-			&nxge_dev_reg_acc_attr, &dev_regs->nxge_vir_regh);
-
-		if (ddi_status != DDI_SUCCESS) {
-			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-				"ddi_map_regs for nxge vio reg failed"));
-			goto nxge_map_regs_fail2;
-		}
-		/* set up the vio region mapped register */
-		(void) ddi_dev_regsize(nxgep->dip, 3, &regsize);
-		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
-			"nxge_map_regs: vio (3) size 0x%x", regsize));
-		ddi_status = ddi_regs_map_setup(nxgep->dip, 3,
-			(caddr_t *)&(dev_regs->nxge_vir2_regp), 0, 0,
-			&nxge_dev_reg_acc_attr, &dev_regs->nxge_vir2_regh);
-
-		if (ddi_status != DDI_SUCCESS) {
-			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-				"ddi_map_regs for nxge vio2 reg failed"));
-			goto nxge_map_regs_fail3;
-		}
-		nxgep->dev_regs = dev_regs;
-
-		NPI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh);
-		NPI_ADD_HANDLE_SET(nxgep, (npi_reg_ptr_t)dev_regs->nxge_regp);
-
-		NPI_REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh);
-		NPI_REG_ADD_HANDLE_SET(nxgep,
-			(npi_reg_ptr_t)dev_regs->nxge_regp);
-
-		NPI_VREG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir_regh);
-		NPI_VREG_ADD_HANDLE_SET(nxgep,
-			(npi_reg_ptr_t)dev_regs->nxge_vir_regp);
-
-		NPI_V2REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir2_regh);
-		NPI_V2REG_ADD_HANDLE_SET(nxgep,
-			(npi_reg_ptr_t)dev_regs->nxge_vir2_regp);
-
-		break;
-	}
-
-	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "nxge_map_reg: hardware addr 0x%0llx "
-		" handle 0x%0llx", dev_regs->nxge_regp, dev_regs->nxge_regh));
-
-	goto nxge_map_regs_exit;
-nxge_map_regs_fail3:
-	if (dev_regs->nxge_msix_regh) {
-		ddi_regs_map_free(&dev_regs->nxge_msix_regh);
-	}
-	if (dev_regs->nxge_vir_regh) {
-		ddi_regs_map_free(&dev_regs->nxge_regh);
-	}
-nxge_map_regs_fail2:
-	if (dev_regs->nxge_regh) {
-		ddi_regs_map_free(&dev_regs->nxge_regh);
-	}
-nxge_map_regs_fail1:
-	if (dev_regs->nxge_pciregh) {
-		ddi_regs_map_free(&dev_regs->nxge_pciregh);
-	}
-nxge_map_regs_fail0:
-	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "Freeing register set memory"));
-	kmem_free(dev_regs, sizeof (dev_regs_t));
-
-nxge_map_regs_exit:
-	if (ddi_status != DDI_SUCCESS)
-		status |= (NXGE_ERROR | NXGE_DDI_FAILED);
-	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_map_regs"));
-	return (status);
-}
-
-static void
-nxge_unmap_regs(p_nxge_t nxgep)
-{
-	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_unmap_regs"));
-	if (nxgep->dev_regs) {
-		if (nxgep->dev_regs->nxge_pciregh) {
-			NXGE_DEBUG_MSG((nxgep, DDI_CTL,
-				"==> nxge_unmap_regs: bus"));
-			ddi_regs_map_free(&nxgep->dev_regs->nxge_pciregh);
-			nxgep->dev_regs->nxge_pciregh = NULL;
-		}
-		if (nxgep->dev_regs->nxge_regh) {
-			NXGE_DEBUG_MSG((nxgep, DDI_CTL,
-				"==> nxge_unmap_regs: device registers"));
-			ddi_regs_map_free(&nxgep->dev_regs->nxge_regh);
-			nxgep->dev_regs->nxge_regh = NULL;
-		}
-		if (nxgep->dev_regs->nxge_msix_regh) {
-			NXGE_DEBUG_MSG((nxgep, DDI_CTL,
-				"==> nxge_unmap_regs: device interrupts"));
-			ddi_regs_map_free(&nxgep->dev_regs->nxge_msix_regh);
-			nxgep->dev_regs->nxge_msix_regh = NULL;
-		}
-		if (nxgep->dev_regs->nxge_vir_regh) {
-			NXGE_DEBUG_MSG((nxgep, DDI_CTL,
-				"==> nxge_unmap_regs: vio region"));
-			ddi_regs_map_free(&nxgep->dev_regs->nxge_vir_regh);
-			nxgep->dev_regs->nxge_vir_regh = NULL;
-		}
-		if (nxgep->dev_regs->nxge_vir2_regh) {
-			NXGE_DEBUG_MSG((nxgep, DDI_CTL,
-				"==> nxge_unmap_regs: vio2 region"));
-			ddi_regs_map_free(&nxgep->dev_regs->nxge_vir2_regh);
-			nxgep->dev_regs->nxge_vir2_regh = NULL;
-		}
-
-		kmem_free(nxgep->dev_regs, sizeof (dev_regs_t));
-		nxgep->dev_regs = NULL;
-	}
-
-	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_unmap_regs"));
-}
-
-static nxge_status_t
-nxge_setup_mutexes(p_nxge_t nxgep)
-{
-	int ddi_status = DDI_SUCCESS;
-	nxge_status_t status = NXGE_OK;
-	nxge_classify_t *classify_ptr;
-	int partition;
-
-	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_setup_mutexes"));
-
-	/*
-	 * Get the interrupt cookie so the mutexes can be
-	 * Initialized.
-	 */
-	ddi_status = ddi_get_iblock_cookie(nxgep->dip, 0,
-					&nxgep->interrupt_cookie);
-	if (ddi_status != DDI_SUCCESS) {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"<== nxge_setup_mutexes: failed 0x%x", ddi_status));
-		goto nxge_setup_mutexes_exit;
-	}
-
-	/* Initialize global mutex */
-
-	if (nxge_mdio_lock_init == 0) {
-		MUTEX_INIT(&nxge_mdio_lock, NULL, MUTEX_DRIVER, NULL);
-	}
-	atomic_add_32(&nxge_mdio_lock_init, 1);
-
-	if (nxge_mii_lock_init == 0) {
-		MUTEX_INIT(&nxge_mii_lock, NULL, MUTEX_DRIVER, NULL);
-	}
-	atomic_add_32(&nxge_mii_lock_init, 1);
-
-	nxgep->drv_state |= STATE_MDIO_LOCK_INIT;
-	nxgep->drv_state |= STATE_MII_LOCK_INIT;
-
-	/*
-	 * Initialize mutex's for this device.
-	 */
-	MUTEX_INIT(nxgep->genlock, NULL,
-		MUTEX_DRIVER, (void *)nxgep->interrupt_cookie);
-	MUTEX_INIT(&nxgep->ouraddr_lock, NULL,
-		MUTEX_DRIVER, (void *)nxgep->interrupt_cookie);
-	MUTEX_INIT(&nxgep->mif_lock, NULL,
-		MUTEX_DRIVER, (void *)nxgep->interrupt_cookie);
-	RW_INIT(&nxgep->filter_lock, NULL,
-		RW_DRIVER, (void *)nxgep->interrupt_cookie);
-
-	classify_ptr = &nxgep->classifier;
-		/*
-		 * FFLP Mutexes are never used in interrupt context
-		 * as fflp operation can take very long time to
-		 * complete and hence not suitable to invoke from interrupt
-		 * handlers.
-		 */
-	MUTEX_INIT(&classify_ptr->tcam_lock, NULL,
-			    NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie);
-	if (nxgep->niu_type == NEPTUNE) {
-		MUTEX_INIT(&classify_ptr->fcram_lock, NULL,
-			    NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie);
-		for (partition = 0; partition < MAX_PARTITION; partition++) {
-			MUTEX_INIT(&classify_ptr->hash_lock[partition], NULL,
-			    NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie);
-		}
-	}
-
-nxge_setup_mutexes_exit:
-	NXGE_DEBUG_MSG((nxgep, DDI_CTL,
-			"<== nxge_setup_mutexes status = %x", status));
-
-	if (ddi_status != DDI_SUCCESS)
-		status |= (NXGE_ERROR | NXGE_DDI_FAILED);
-
-	return (status);
-}
-
-static void
-nxge_destroy_mutexes(p_nxge_t nxgep)
-{
-	int partition;
-	nxge_classify_t *classify_ptr;
-
-	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_destroy_mutexes"));
-	RW_DESTROY(&nxgep->filter_lock);
-	MUTEX_DESTROY(&nxgep->mif_lock);
-	MUTEX_DESTROY(&nxgep->ouraddr_lock);
-	MUTEX_DESTROY(nxgep->genlock);
-
-	classify_ptr = &nxgep->classifier;
-	MUTEX_DESTROY(&classify_ptr->tcam_lock);
-
-		/* free data structures, based on HW type */
-	if (nxgep->niu_type == NEPTUNE) {
-		MUTEX_DESTROY(&classify_ptr->fcram_lock);
-		for (partition = 0; partition < MAX_PARTITION; partition++) {
-			MUTEX_DESTROY(&classify_ptr->hash_lock[partition]);
-		}
-	}
-	if (nxgep->drv_state & STATE_MDIO_LOCK_INIT) {
-		if (nxge_mdio_lock_init == 1) {
-			MUTEX_DESTROY(&nxge_mdio_lock);
-		}
-		atomic_add_32(&nxge_mdio_lock_init, -1);
-	}
-	if (nxgep->drv_state & STATE_MII_LOCK_INIT) {
-		if (nxge_mii_lock_init == 1) {
-			MUTEX_DESTROY(&nxge_mii_lock);
-		}
-		atomic_add_32(&nxge_mii_lock_init, -1);
-	}
-
-	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_destroy_mutexes"));
-}
-
-nxge_status_t
-nxge_init(p_nxge_t nxgep)
-{
-	nxge_status_t	status = NXGE_OK;
-
-	NXGE_DEBUG_MSG((nxgep, STR_CTL, "==> nxge_init"));
-
-	if (nxgep->drv_state & STATE_HW_INITIALIZED) {
-		return (status);
-	}
-
-	/*
-	 * Allocate system memory for the receive/transmit buffer blocks
-	 * and receive/transmit descriptor rings.
-	 */
-	status = nxge_alloc_mem_pool(nxgep);
-	if (status != NXGE_OK) {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "alloc mem failed\n"));
-		goto nxge_init_fail1;
-	}
-
-	/*
-	 * Initialize and enable TXC registers
-	 * (Globally enable TX controller,
-	 *  enable a port, configure dma channel bitmap,
-	 *  configure the max burst size).
-	 */
-	status = nxge_txc_init(nxgep);
-	if (status != NXGE_OK) {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init txc failed\n"));
-		goto nxge_init_fail2;
-	}
-
-	/*
-	 * Initialize and enable TXDMA channels.
-	 */
-	status = nxge_init_txdma_channels(nxgep);
-	if (status != NXGE_OK) {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init txdma failed\n"));
-		goto nxge_init_fail3;
-	}
-
-	/*
-	 * Initialize and enable RXDMA channels.
-	 */
-	status = nxge_init_rxdma_channels(nxgep);
-	if (status != NXGE_OK) {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init rxdma failed\n"));
-		goto nxge_init_fail4;
-	}
-
-	/*
-	 * Initialize TCAM and FCRAM (Neptune).
-	 */
-	status = nxge_classify_init(nxgep);
-	if (status != NXGE_OK) {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init classify failed\n"));
-		goto nxge_init_fail5;
-	}
-
-	/*
-	 * Initialize ZCP
-	 */
-	status = nxge_zcp_init(nxgep);
-	if (status != NXGE_OK) {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init ZCP failed\n"));
-		goto nxge_init_fail5;
-	}
-
-	/*
-	 * Initialize IPP.
-	 */
-	status = nxge_ipp_init(nxgep);
-	if (status != NXGE_OK) {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init IPP failed\n"));
-		goto nxge_init_fail5;
-	}
-
-	/*
-	 * Initialize the MAC block.
-	 */
-	status = nxge_mac_init(nxgep);
-	if (status != NXGE_OK) {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init MAC failed\n"));
-		goto nxge_init_fail5;
-	}
-
-	nxge_intrs_enable(nxgep);
-
-	/*
-	 * Enable hardware interrupts.
-	 */
-	nxge_intr_hw_enable(nxgep);
-	nxgep->drv_state |= STATE_HW_INITIALIZED;
-
-	goto nxge_init_exit;
-
-nxge_init_fail5:
-	nxge_uninit_rxdma_channels(nxgep);
-nxge_init_fail4:
-	nxge_uninit_txdma_channels(nxgep);
-nxge_init_fail3:
-	(void) nxge_txc_uninit(nxgep);
-nxge_init_fail2:
-	nxge_free_mem_pool(nxgep);
-nxge_init_fail1:
-	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-		"<== nxge_init status (failed) = 0x%08x", status));
-	return (status);
-
-nxge_init_exit:
-
-	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_init status = 0x%08x",
-		status));
-	return (status);
-}
-
-
-timeout_id_t
-nxge_start_timer(p_nxge_t nxgep, fptrv_t func, int msec)
-{
-	if ((nxgep->suspended == 0) ||
-			(nxgep->suspended == DDI_RESUME)) {
-		return (timeout(func, (caddr_t)nxgep,
-			drv_usectohz(1000 * msec)));
-	}
-	return (NULL);
-}
-
-/*ARGSUSED*/
-void
-nxge_stop_timer(p_nxge_t nxgep, timeout_id_t timerid)
-{
-	if (timerid) {
-		(void) untimeout(timerid);
-	}
-}
-
-void
-nxge_uninit(p_nxge_t nxgep)
-{
-	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_uninit"));
-
-	if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
-		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
-			"==> nxge_uninit: not initialized"));
-		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
-			"<== nxge_uninit"));
-		return;
-	}
-
-	/* stop timer */
-	if (nxgep->nxge_timerid) {
-		nxge_stop_timer(nxgep, nxgep->nxge_timerid);
-		nxgep->nxge_timerid = 0;
-	}
-
-	(void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP);
-	(void) nxge_intr_hw_disable(nxgep);
-
-	/*
-	 * Reset the receive MAC side.
-	 */
-	(void) nxge_rx_mac_disable(nxgep);
-
-	/* Disable and soft reset the IPP */
-	(void) nxge_ipp_disable(nxgep);
-
-	/* Free classification resources */
-	(void) nxge_classify_uninit(nxgep);
-
-	/*
-	 * Reset the transmit/receive DMA side.
-	 */
-	(void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_STOP);
-	(void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_STOP);
-
-	nxge_uninit_txdma_channels(nxgep);
-	nxge_uninit_rxdma_channels(nxgep);
-
-	/*
-	 * Reset the transmit MAC side.
-	 */
-	(void) nxge_tx_mac_disable(nxgep);
-
-	nxge_free_mem_pool(nxgep);
-
-	(void) nxge_link_monitor(nxgep, LINK_MONITOR_START);
-
-	nxgep->drv_state &= ~STATE_HW_INITIALIZED;
-
-	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_uninit: "
-		"nxge_mblks_pending %d", nxge_mblks_pending));
-}
-
-void
-nxge_get64(p_nxge_t nxgep, p_mblk_t mp)
-{
-	uint64_t	reg;
-	uint64_t	regdata;
-	int		i, retry;
-
-	bcopy((char *)mp->b_rptr, (char *)&reg, sizeof (uint64_t));
-	regdata = 0;
-	retry = 1;
-
-	for (i = 0; i < retry; i++) {
-		NXGE_REG_RD64(nxgep->npi_handle, reg, &regdata);
-	}
-	bcopy((char *)&regdata, (char *)mp->b_rptr, sizeof (uint64_t));
-}
-
-void
-nxge_put64(p_nxge_t nxgep, p_mblk_t mp)
-{
-	uint64_t	reg;
-	uint64_t	buf[2];
-
-	bcopy((char *)mp->b_rptr, (char *)&buf[0], 2 * sizeof (uint64_t));
-	reg = buf[0];
-
-	NXGE_NPI_PIO_WRITE64(nxgep->npi_handle, reg, buf[1]);
-}
-
-
-nxge_os_mutex_t nxgedebuglock;
-int nxge_debug_init = 0;
-
-/*ARGSUSED*/
-/*VARARGS*/
-void
-nxge_debug_msg(p_nxge_t nxgep, uint64_t level, char *fmt, ...)
-{
-	char msg_buffer[1048];
-	char prefix_buffer[32];
-	int instance;
-	uint64_t debug_level;
-	int cmn_level = CE_CONT;
-	va_list ap;
-
-	debug_level = (nxgep == NULL) ? nxge_debug_level :
-		nxgep->nxge_debug_level;
-
-	if ((level & debug_level) ||
-		(level == NXGE_NOTE) ||
-		(level == NXGE_ERR_CTL)) {
-		/* do the msg processing */
-		if (nxge_debug_init == 0) {
-			MUTEX_INIT(&nxgedebuglock, NULL, MUTEX_DRIVER, NULL);
-			nxge_debug_init = 1;
-		}
-
-		MUTEX_ENTER(&nxgedebuglock);
-
-		if ((level & NXGE_NOTE)) {
-			cmn_level = CE_NOTE;
-		}
-
-		if (level & NXGE_ERR_CTL) {
-			cmn_level = CE_WARN;
-		}
-
-		va_start(ap, fmt);
-		(void) vsprintf(msg_buffer, fmt, ap);
-		va_end(ap);
-		if (nxgep == NULL) {
-			instance = -1;
-			(void) sprintf(prefix_buffer, "%s :", "nxge");
-		} else {
-			instance = nxgep->instance;
-			(void) sprintf(prefix_buffer,
-						    "%s%d :", "nxge", instance);
-		}
-
-		MUTEX_EXIT(&nxgedebuglock);
-		cmn_err(cmn_level, "!%s %s\n",
-				prefix_buffer, msg_buffer);
-
-	}
-}
-
-char *
-nxge_dump_packet(char *addr, int size)
-{
-	uchar_t *ap = (uchar_t *)addr;
-	int i;
-	static char etherbuf[1024];
-	char *cp = etherbuf;
-	char digits[] = "0123456789abcdef";
-
-	if (!size)
-		size = 60;
-
-	if (size > MAX_DUMP_SZ) {
-		/* Dump the leading bytes */
-		for (i = 0; i < MAX_DUMP_SZ/2; i++) {
-			if (*ap > 0x0f)
-				*cp++ = digits[*ap >> 4];
-			*cp++ = digits[*ap++ & 0xf];
-			*cp++ = ':';
-		}
-		for (i = 0; i < 20; i++)
-			*cp++ = '.';
-		/* Dump the last MAX_DUMP_SZ/2 bytes */
-		ap = (uchar_t *)(addr + (size - MAX_DUMP_SZ/2));
-		for (i = 0; i < MAX_DUMP_SZ/2; i++) {
-			if (*ap > 0x0f)
-				*cp++ = digits[*ap >> 4];
-			*cp++ = digits[*ap++ & 0xf];
-			*cp++ = ':';
-		}
-	} else {
-		for (i = 0; i < size; i++) {
-			if (*ap > 0x0f)
-				*cp++ = digits[*ap >> 4];
-			*cp++ = digits[*ap++ & 0xf];
-			*cp++ = ':';
-		}
-	}
-	*--cp = 0;
-	return (etherbuf);
-}
-
-#ifdef	NXGE_DEBUG
-static void
-nxge_test_map_regs(p_nxge_t nxgep)
-{
-	ddi_acc_handle_t cfg_handle;
-	p_pci_cfg_t	cfg_ptr;
-	ddi_acc_handle_t dev_handle;
-	char		*dev_ptr;
-	ddi_acc_handle_t pci_config_handle;
-	uint32_t	regval;
-	int		i;
-
-	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_test_map_regs"));
-
-	dev_handle = nxgep->dev_regs->nxge_regh;
-	dev_ptr = (char *)nxgep->dev_regs->nxge_regp;
-
-	if (nxgep->niu_type == NEPTUNE) {
-		cfg_handle = nxgep->dev_regs->nxge_pciregh;
-		cfg_ptr = (void *)nxgep->dev_regs->nxge_pciregp;
-
-		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
-			"Neptune PCI regp cfg_ptr 0x%llx", (char *)cfg_ptr));
-		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
-			"Neptune PCI cfg_ptr vendor id ptr 0x%llx",
-			&cfg_ptr->vendorid));
-		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
-			"\tvendorid 0x%x devid 0x%x",
-			NXGE_PIO_READ16(cfg_handle, &cfg_ptr->vendorid, 0),
-			NXGE_PIO_READ16(cfg_handle, &cfg_ptr->devid,    0)));
-		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
-			"PCI BAR: base 0x%x base14 0x%x base 18 0x%x "
-			"bar1c 0x%x",
-			NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base,   0),
-			NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base14, 0),
-			NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base18, 0),
-			NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base1c, 0)));
-		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
-			"\nNeptune PCI BAR: base20 0x%x base24 0x%x "
-			"base 28 0x%x bar2c 0x%x\n",
-			NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base20, 0),
-			NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base24, 0),
-			NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base28, 0),
-			NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base2c, 0)));
-		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
-			"\nNeptune PCI BAR: base30 0x%x\n",
-			NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base30, 0)));
-
-		cfg_handle = nxgep->dev_regs->nxge_pciregh;
-		cfg_ptr = (void *)nxgep->dev_regs->nxge_pciregp;
-		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
-			"first  0x%llx second 0x%llx third 0x%llx "
-			"last 0x%llx ",
-			NXGE_PIO_READ64(dev_handle,
-				    (uint64_t *)(dev_ptr + 0),  0),
-			NXGE_PIO_READ64(dev_handle,
-				    (uint64_t *)(dev_ptr + 8),  0),
-			NXGE_PIO_READ64(dev_handle,
-				    (uint64_t *)(dev_ptr + 16), 0),
-			NXGE_PIO_READ64(cfg_handle,
-				    (uint64_t *)(dev_ptr + 24), 0)));
-	}
-}
-
-#endif
-
-static void
-nxge_suspend(p_nxge_t nxgep)
-{
-	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_suspend"));
-
-	nxge_intrs_disable(nxgep);
-	nxge_destroy_dev(nxgep);
-
-	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_suspend"));
-}
-
-static nxge_status_t
-nxge_resume(p_nxge_t nxgep)
-{
-	nxge_status_t status = NXGE_OK;
-
-	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_resume"));
-	nxgep->suspended = DDI_RESUME;
-
-	nxge_global_reset(nxgep);
-	nxgep->suspended = 0;
-
-	NXGE_DEBUG_MSG((nxgep, DDI_CTL,
-			"<== nxge_resume status = 0x%x", status));
-	return (status);
-}
-
-static nxge_status_t
-nxge_setup_dev(p_nxge_t nxgep)
-{
-	nxge_status_t	status = NXGE_OK;
-
-	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_setup_dev port %d",
-			nxgep->mac.portnum));
-
-	status = nxge_xcvr_find(nxgep);
-	if (status != NXGE_OK) {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			    " nxge_setup_dev status "
-			    " (xcvr find 0x%08x)", status));
-		goto nxge_setup_dev_exit;
-	}
-
-	status = nxge_link_init(nxgep);
-
-	if (fm_check_acc_handle(nxgep->dev_regs->nxge_regh) != DDI_FM_OK) {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"port%d Bad register acc handle", nxgep->mac.portnum));
-		status = NXGE_ERROR;
-	}
-
-	if (status != NXGE_OK) {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			    " nxge_setup_dev status "
-			    "(xcvr init 0x%08x)", status));
-		goto nxge_setup_dev_exit;
-	}
-
-nxge_setup_dev_exit:
-	NXGE_DEBUG_MSG((nxgep, DDI_CTL,
-		"<== nxge_setup_dev port %d status = 0x%08x",
-		nxgep->mac.portnum, status));
-
-	return (status);
-}
-
-static void
-nxge_destroy_dev(p_nxge_t nxgep)
-{
-	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_destroy_dev"));
-
-	(void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP);
-
-	(void) nxge_hw_stop(nxgep);
-
-	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_destroy_dev"));
-}
-
-static nxge_status_t
-nxge_setup_system_dma_pages(p_nxge_t nxgep)
-{
-	int 			ddi_status = DDI_SUCCESS;
-	uint_t 			count;
-	ddi_dma_cookie_t 	cookie;
-	uint_t 			iommu_pagesize;
-	nxge_status_t		status = NXGE_OK;
-
-	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_setup_system_dma_pages"));
-	nxgep->sys_page_sz = ddi_ptob(nxgep->dip, (ulong_t)1);
-	if (nxgep->niu_type != N2_NIU) {
-		iommu_pagesize = dvma_pagesize(nxgep->dip);
-		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
-			" nxge_setup_system_dma_pages: page %d (ddi_ptob %d) "
-			" default_block_size %d iommu_pagesize %d",
-			nxgep->sys_page_sz,
-			ddi_ptob(nxgep->dip, (ulong_t)1),
-			nxgep->rx_default_block_size,
-			iommu_pagesize));
-
-		if (iommu_pagesize != 0) {
-			if (nxgep->sys_page_sz == iommu_pagesize) {
-				if (iommu_pagesize > 0x4000)
-					nxgep->sys_page_sz = 0x4000;
-			} else {
-				if (nxgep->sys_page_sz > iommu_pagesize)
-					nxgep->sys_page_sz = iommu_pagesize;
-			}
-		}
-	}
-	nxgep->sys_page_mask = ~(nxgep->sys_page_sz - 1);
-	NXGE_DEBUG_MSG((nxgep, DDI_CTL,
-		"==> nxge_setup_system_dma_pages: page %d (ddi_ptob %d) "
-		"default_block_size %d page mask %d",
-		nxgep->sys_page_sz,
-		ddi_ptob(nxgep->dip, (ulong_t)1),
-		nxgep->rx_default_block_size,
-		nxgep->sys_page_mask));
-
-
-	switch (nxgep->sys_page_sz) {
-	default:
-		nxgep->sys_page_sz = 0x1000;
-		nxgep->sys_page_mask = ~(nxgep->sys_page_sz - 1);
-		nxgep->rx_default_block_size = 0x1000;
-		nxgep->rx_bksize_code = RBR_BKSIZE_4K;
-		break;
-	case 0x1000:
-		nxgep->rx_default_block_size = 0x1000;
-		nxgep->rx_bksize_code = RBR_BKSIZE_4K;
-		break;
-	case 0x2000:
-		nxgep->rx_default_block_size = 0x2000;
-		nxgep->rx_bksize_code = RBR_BKSIZE_8K;
-		break;
-	case 0x4000:
-		nxgep->rx_default_block_size = 0x4000;
-		nxgep->rx_bksize_code = RBR_BKSIZE_16K;
-		break;
-	case 0x8000:
-		nxgep->rx_default_block_size = 0x8000;
-		nxgep->rx_bksize_code = RBR_BKSIZE_32K;
-		break;
-	}
-
-#ifndef USE_RX_BIG_BUF
-	nxge_rx_dma_attr.dma_attr_align = nxgep->sys_page_sz;
-#else
-		nxgep->rx_default_block_size = 0x2000;
-		nxgep->rx_bksize_code = RBR_BKSIZE_8K;
-#endif
-	/*
-	 * Get the system DMA burst size.
-	 */
-	ddi_status = ddi_dma_alloc_handle(nxgep->dip, &nxge_tx_dma_attr,
-			DDI_DMA_DONTWAIT, 0,
-			&nxgep->dmasparehandle);
-	if (ddi_status != DDI_SUCCESS) {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"ddi_dma_alloc_handle: failed "
-			" status 0x%x", ddi_status));
-		goto nxge_get_soft_properties_exit;
-	}
-
-	ddi_status = ddi_dma_addr_bind_handle(nxgep->dmasparehandle, NULL,
-				(caddr_t)nxgep->dmasparehandle,
-				sizeof (nxgep->dmasparehandle),
-				DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
-				DDI_DMA_DONTWAIT, 0,
-				&cookie, &count);
-	if (ddi_status != DDI_DMA_MAPPED) {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"Binding spare handle to find system"
-			" burstsize failed."));
-		ddi_status = DDI_FAILURE;
-		goto nxge_get_soft_properties_fail1;
-	}
-
-	nxgep->sys_burst_sz = ddi_dma_burstsizes(nxgep->dmasparehandle);
-	(void) ddi_dma_unbind_handle(nxgep->dmasparehandle);
-
-nxge_get_soft_properties_fail1:
-	ddi_dma_free_handle(&nxgep->dmasparehandle);
-
-nxge_get_soft_properties_exit:
-
-	if (ddi_status != DDI_SUCCESS)
-		status |= (NXGE_ERROR | NXGE_DDI_FAILED);
-
-	NXGE_DEBUG_MSG((nxgep, DDI_CTL,
-		"<== nxge_setup_system_dma_pages status = 0x%08x", status));
-	return (status);
-}
-
-static nxge_status_t
-nxge_alloc_mem_pool(p_nxge_t nxgep)
-{
-	nxge_status_t	status = NXGE_OK;
-
-	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_alloc_mem_pool"));
-
-	status = nxge_alloc_rx_mem_pool(nxgep);
-	if (status != NXGE_OK) {
-		return (NXGE_ERROR);
-	}
-
-	status = nxge_alloc_tx_mem_pool(nxgep);
-	if (status != NXGE_OK) {
-		nxge_free_rx_mem_pool(nxgep);
-		return (NXGE_ERROR);
-	}
-
-	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_alloc_mem_pool"));
-	return (NXGE_OK);
-}
-
-static void
-nxge_free_mem_pool(p_nxge_t nxgep)
-{
-	NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_free_mem_pool"));
-
-	nxge_free_rx_mem_pool(nxgep);
-	nxge_free_tx_mem_pool(nxgep);
-
-	NXGE_DEBUG_MSG((nxgep, MEM_CTL, "<== nxge_free_mem_pool"));
-}
-
-static nxge_status_t
-nxge_alloc_rx_mem_pool(p_nxge_t nxgep)
-{
-	int			i, j;
-	uint32_t		ndmas, st_rdc;
-	p_nxge_dma_pt_cfg_t	p_all_cfgp;
-	p_nxge_hw_pt_cfg_t	p_cfgp;
-	p_nxge_dma_pool_t	dma_poolp;
-	p_nxge_dma_common_t	*dma_buf_p;
-	p_nxge_dma_pool_t	dma_cntl_poolp;
-	p_nxge_dma_common_t	*dma_cntl_p;
-	size_t			rx_buf_alloc_size;
-	size_t			rx_cntl_alloc_size;
-	uint32_t 		*num_chunks; /* per dma */
-	nxge_status_t		status = NXGE_OK;
-
-	uint32_t		nxge_port_rbr_size;
-	uint32_t		nxge_port_rbr_spare_size;
-	uint32_t		nxge_port_rcr_size;
-
-	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_mem_pool"));
-
-	p_all_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
-	p_cfgp = (p_nxge_hw_pt_cfg_t)&p_all_cfgp->hw_config;
-	st_rdc = p_cfgp->start_rdc;
-	ndmas = p_cfgp->max_rdcs;
-
-	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
-		" nxge_alloc_rx_mem_pool st_rdc %d ndmas %d", st_rdc, ndmas));
-
-	/*
-	 * Allocate memory for each receive DMA channel.
-	 */
-	dma_poolp = (p_nxge_dma_pool_t)KMEM_ZALLOC(sizeof (nxge_dma_pool_t),
-			KM_SLEEP);
-	dma_buf_p = (p_nxge_dma_common_t *)KMEM_ZALLOC(
-			sizeof (p_nxge_dma_common_t) * ndmas, KM_SLEEP);
-
-	dma_cntl_poolp = (p_nxge_dma_pool_t)
-				KMEM_ZALLOC(sizeof (nxge_dma_pool_t), KM_SLEEP);
-	dma_cntl_p = (p_nxge_dma_common_t *)KMEM_ZALLOC(
-			sizeof (p_nxge_dma_common_t) * ndmas, KM_SLEEP);
-
-	num_chunks = (uint32_t *)KMEM_ZALLOC(
-			sizeof (uint32_t) * ndmas, KM_SLEEP);
-
-	/*
-	 * Assume that each DMA channel will be configured with default
-	 * block size.
-	 * rbr block counts are mod of batch count (16).
-	 */
-	nxge_port_rbr_size = p_all_cfgp->rbr_size;
-	nxge_port_rcr_size = p_all_cfgp->rcr_size;
-
-	if (!nxge_port_rbr_size) {
-		nxge_port_rbr_size = NXGE_RBR_RBB_DEFAULT;
-	}
-	if (nxge_port_rbr_size % NXGE_RXDMA_POST_BATCH) {
-		nxge_port_rbr_size = (NXGE_RXDMA_POST_BATCH *
-			(nxge_port_rbr_size / NXGE_RXDMA_POST_BATCH + 1));
-	}
-
-	p_all_cfgp->rbr_size = nxge_port_rbr_size;
-	nxge_port_rbr_spare_size = nxge_rbr_spare_size;
-
-	if (nxge_port_rbr_spare_size % NXGE_RXDMA_POST_BATCH) {
-		nxge_port_rbr_spare_size = (NXGE_RXDMA_POST_BATCH *
-			(nxge_port_rbr_spare_size / NXGE_RXDMA_POST_BATCH + 1));
-	}
-
-	/*
-	 * N2/NIU has limitation on the descriptor sizes (contiguous
-	 * memory allocation on data buffers to 4M (contig_mem_alloc)
-	 * and little endian for control buffers (must use the ddi/dki mem alloc
-	 * function).
-	 */
-#if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
-	if (nxgep->niu_type == N2_NIU) {
-		nxge_port_rbr_spare_size = 0;
-		if ((nxge_port_rbr_size > NXGE_NIU_CONTIG_RBR_MAX) ||
-				(!ISP2(nxge_port_rbr_size))) {
-			nxge_port_rbr_size = NXGE_NIU_CONTIG_RBR_MAX;
-		}
-		if ((nxge_port_rcr_size > NXGE_NIU_CONTIG_RCR_MAX) ||
-				(!ISP2(nxge_port_rcr_size))) {
-			nxge_port_rcr_size = NXGE_NIU_CONTIG_RCR_MAX;
-		}
-	}
-#endif
-
-	rx_buf_alloc_size = (nxgep->rx_default_block_size *
-		(nxge_port_rbr_size + nxge_port_rbr_spare_size));
-
-	/*
-	 * Addresses of receive block ring, receive completion ring and the
-	 * mailbox must be all cache-aligned (64 bytes).
-	 */
-	rx_cntl_alloc_size = nxge_port_rbr_size + nxge_port_rbr_spare_size;
-	rx_cntl_alloc_size *= (sizeof (rx_desc_t));
-	rx_cntl_alloc_size += (sizeof (rcr_entry_t) * nxge_port_rcr_size);
-	rx_cntl_alloc_size += sizeof (rxdma_mailbox_t);
-
-	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_alloc_rx_mem_pool: "
-		"nxge_port_rbr_size = %d nxge_port_rbr_spare_size = %d "
-		"nxge_port_rcr_size = %d "
-		"rx_cntl_alloc_size = %d",
-		nxge_port_rbr_size, nxge_port_rbr_spare_size,
-		nxge_port_rcr_size,
-		rx_cntl_alloc_size));
-
-#if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
-	if (nxgep->niu_type == N2_NIU) {
-		if (!ISP2(rx_buf_alloc_size)) {
-			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-				"==> nxge_alloc_rx_mem_pool: "
-				" must be power of 2"));
-			status |= (NXGE_ERROR | NXGE_DDI_FAILED);
-			goto nxge_alloc_rx_mem_pool_exit;
-		}
-
-		if (rx_buf_alloc_size > (1 << 22)) {
-			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-				"==> nxge_alloc_rx_mem_pool: "
-				" limit size to 4M"));
-			status |= (NXGE_ERROR | NXGE_DDI_FAILED);
-			goto nxge_alloc_rx_mem_pool_exit;
-		}
-
-		if (rx_cntl_alloc_size < 0x2000) {
-			rx_cntl_alloc_size = 0x2000;
-		}
-	}
-#endif
-	nxgep->nxge_port_rbr_size = nxge_port_rbr_size;
-	nxgep->nxge_port_rcr_size = nxge_port_rcr_size;
-
-	/*
-	 * Allocate memory for receive buffers and descriptor rings.
-	 * Replace allocation functions with interface functions provided
-	 * by the partition manager when it is available.
-	 */
-	/*
-	 * Allocate memory for the receive buffer blocks.
-	 */
-	for (i = 0; i < ndmas; i++) {
-		NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
-			" nxge_alloc_rx_mem_pool to alloc mem: "
-			" dma %d dma_buf_p %llx &dma_buf_p %llx",
-			i, dma_buf_p[i], &dma_buf_p[i]));
-		num_chunks[i] = 0;
-		status = nxge_alloc_rx_buf_dma(nxgep, st_rdc, &dma_buf_p[i],
-				rx_buf_alloc_size,
-				nxgep->rx_default_block_size, &num_chunks[i]);
-		if (status != NXGE_OK) {
-			break;
-		}
-		st_rdc++;
-		NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
-			" nxge_alloc_rx_mem_pool DONE  alloc mem: "
-			"dma %d dma_buf_p %llx &dma_buf_p %llx", i,
-			dma_buf_p[i], &dma_buf_p[i]));
-	}
-	if (i < ndmas) {
-		goto nxge_alloc_rx_mem_fail1;
-	}
-	/*
-	 * Allocate memory for descriptor rings and mailbox.
-	 */
-	st_rdc = p_cfgp->start_rdc;
-	for (j = 0; j < ndmas; j++) {
-		status = nxge_alloc_rx_cntl_dma(nxgep, st_rdc, &dma_cntl_p[j],
-					rx_cntl_alloc_size);
-		if (status != NXGE_OK) {
-			break;
-		}
-		st_rdc++;
-	}
-	if (j < ndmas) {
-		goto nxge_alloc_rx_mem_fail2;
-	}
-
-	dma_poolp->ndmas = ndmas;
-	dma_poolp->num_chunks = num_chunks;
-	dma_poolp->buf_allocated = B_TRUE;
-	nxgep->rx_buf_pool_p = dma_poolp;
-	dma_poolp->dma_buf_pool_p = dma_buf_p;
-
-	dma_cntl_poolp->ndmas = ndmas;
-	dma_cntl_poolp->buf_allocated = B_TRUE;
-	nxgep->rx_cntl_pool_p = dma_cntl_poolp;
-	dma_cntl_poolp->dma_buf_pool_p = dma_cntl_p;
-
-	goto nxge_alloc_rx_mem_pool_exit;
-
-nxge_alloc_rx_mem_fail2:
-	/* Free control buffers */
-	j--;
-	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
-		"==> nxge_alloc_rx_mem_pool: freeing control bufs (%d)", j));
-	for (; j >= 0; j--) {
-		nxge_free_rx_cntl_dma(nxgep,
-			(p_nxge_dma_common_t)dma_cntl_p[i]);
-		NXGE_DEBUG_MSG((nxgep, DMA_CTL,
-			"==> nxge_alloc_rx_mem_pool: control bufs freed (%d)",
-			j));
-	}
-	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
-		"==> nxge_alloc_rx_mem_pool: control bufs freed (%d)", j));
-
-nxge_alloc_rx_mem_fail1:
-	/* Free data buffers */
-	i--;
-	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
-		"==> nxge_alloc_rx_mem_pool: freeing data bufs (%d)", i));
-	for (; i >= 0; i--) {
-		nxge_free_rx_buf_dma(nxgep, (p_nxge_dma_common_t)dma_buf_p[i],
-			num_chunks[i]);
-	}
-	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
-		"==> nxge_alloc_rx_mem_pool: data bufs freed (%d)", i));
-
-	KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas);
-	KMEM_FREE(dma_poolp, sizeof (nxge_dma_pool_t));
-	KMEM_FREE(dma_buf_p, ndmas * sizeof (p_nxge_dma_common_t));
-	KMEM_FREE(dma_cntl_poolp, sizeof (nxge_dma_pool_t));
-	KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_nxge_dma_common_t));
-
-nxge_alloc_rx_mem_pool_exit:
-	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
-		"<== nxge_alloc_rx_mem_pool:status 0x%08x", status));
-
-	return (status);
-}
-
-static void
-nxge_free_rx_mem_pool(p_nxge_t nxgep)
-{
-	uint32_t		i, ndmas;
-	p_nxge_dma_pool_t	dma_poolp;
-	p_nxge_dma_common_t	*dma_buf_p;
-	p_nxge_dma_pool_t	dma_cntl_poolp;
-	p_nxge_dma_common_t	*dma_cntl_p;
-	uint32_t 		*num_chunks;
-
-	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_free_rx_mem_pool"));
-
-	dma_poolp = nxgep->rx_buf_pool_p;
-	if (dma_poolp == NULL || (!dma_poolp->buf_allocated)) {
-		NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
-			"<== nxge_free_rx_mem_pool "
-			"(null rx buf pool or buf not allocated"));
-		return;
-	}
-
-	dma_cntl_poolp = nxgep->rx_cntl_pool_p;
-	if (dma_cntl_poolp == NULL || (!dma_cntl_poolp->buf_allocated)) {
-		NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
-			"<== nxge_free_rx_mem_pool "
-			"(null rx cntl buf pool or cntl buf not allocated"));
-		return;
-	}
-
-	dma_buf_p = dma_poolp->dma_buf_pool_p;
-	num_chunks = dma_poolp->num_chunks;
-
-	dma_cntl_p = dma_cntl_poolp->dma_buf_pool_p;
-	ndmas = dma_cntl_poolp->ndmas;
-
-	for (i = 0; i < ndmas; i++) {
-		nxge_free_rx_buf_dma(nxgep, dma_buf_p[i], num_chunks[i]);
-	}
-
-	for (i = 0; i < ndmas; i++) {
-		nxge_free_rx_cntl_dma(nxgep, dma_cntl_p[i]);
-	}
-
-	for (i = 0; i < ndmas; i++) {
-		KMEM_FREE(dma_buf_p[i],
-			sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK);
-		KMEM_FREE(dma_cntl_p[i], sizeof (nxge_dma_common_t));
-	}
-
-	KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas);
-	KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_nxge_dma_common_t));
-	KMEM_FREE(dma_cntl_poolp, sizeof (nxge_dma_pool_t));
-	KMEM_FREE(dma_buf_p, ndmas * sizeof (p_nxge_dma_common_t));
-	KMEM_FREE(dma_poolp, sizeof (nxge_dma_pool_t));
-
-	nxgep->rx_buf_pool_p = NULL;
-	nxgep->rx_cntl_pool_p = NULL;
-
-	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_free_rx_mem_pool"));
-}
-
-
-static nxge_status_t
-nxge_alloc_rx_buf_dma(p_nxge_t nxgep, uint16_t dma_channel,
-	p_nxge_dma_common_t *dmap,
-	size_t alloc_size, size_t block_size, uint32_t *num_chunks)
-{
-	p_nxge_dma_common_t 	rx_dmap;
-	nxge_status_t		status = NXGE_OK;
-	size_t			total_alloc_size;
-	size_t			allocated = 0;
-	int			i, size_index, array_size;
-
-	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_buf_dma"));
-
-	rx_dmap = (p_nxge_dma_common_t)
-			KMEM_ZALLOC(sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK,
-			KM_SLEEP);
-
-	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
-		" alloc_rx_buf_dma rdc %d asize %x bsize %x bbuf %llx ",
-		dma_channel, alloc_size, block_size, dmap));
-
-	total_alloc_size = alloc_size;
-
-#if defined(RX_USE_RECLAIM_POST)
-	total_alloc_size = alloc_size + alloc_size/4;
-#endif
-
-	i = 0;
-	size_index = 0;
-	array_size =  sizeof (alloc_sizes)/sizeof (size_t);
-	while ((alloc_sizes[size_index] < alloc_size) &&
-			(size_index < array_size))
-			size_index++;
-	if (size_index >= array_size) {
-		size_index = array_size - 1;
-	}
-
-	while ((allocated < total_alloc_size) &&
-			(size_index >= 0) && (i < NXGE_DMA_BLOCK)) {
-		rx_dmap[i].dma_chunk_index = i;
-		rx_dmap[i].block_size = block_size;
-		rx_dmap[i].alength = alloc_sizes[size_index];
-		rx_dmap[i].orig_alength = rx_dmap[i].alength;
-		rx_dmap[i].nblocks = alloc_sizes[size_index] / block_size;
-		rx_dmap[i].dma_channel = dma_channel;
-		rx_dmap[i].contig_alloc_type = B_FALSE;
-
-		/*
-		 * N2/NIU: data buffers must be contiguous as the driver
-		 *	   needs to call Hypervisor api to set up
-		 *	   logical pages.
-		 */
-		if ((nxgep->niu_type == N2_NIU) && (NXGE_DMA_BLOCK == 1)) {
-			rx_dmap[i].contig_alloc_type = B_TRUE;
-		}
-
-		NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
-			"alloc_rx_buf_dma rdc %d chunk %d bufp %llx size %x "
-			"i %d nblocks %d alength %d",
-			dma_channel, i, &rx_dmap[i], block_size,
-			i, rx_dmap[i].nblocks,
-			rx_dmap[i].alength));
-		status = nxge_dma_mem_alloc(nxgep, nxge_force_dma,
-			&nxge_rx_dma_attr,
-			rx_dmap[i].alength,
-			&nxge_dev_buf_dma_acc_attr,
-			DDI_DMA_READ | DDI_DMA_STREAMING,
-			(p_nxge_dma_common_t)(&rx_dmap[i]));
-		if (status != NXGE_OK) {
-			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-				" nxge_alloc_rx_buf_dma: Alloc Failed "));
-			size_index--;
-		} else {
-			NXGE_DEBUG_MSG((nxgep, DMA_CTL,
-				" alloc_rx_buf_dma allocated rdc %d "
-				"chunk %d size %x dvma %x bufp %llx ",
-				dma_channel, i, rx_dmap[i].alength,
-				rx_dmap[i].ioaddr_pp, &rx_dmap[i]));
-			i++;
-			allocated += alloc_sizes[size_index];
-		}
-	}
-
-
-	if (allocated < total_alloc_size) {
-		goto nxge_alloc_rx_mem_fail1;
-	}
-
-	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
-		" alloc_rx_buf_dma rdc %d allocated %d chunks",
-		dma_channel, i));
-	*num_chunks = i;
-	*dmap = rx_dmap;
-
-	goto nxge_alloc_rx_mem_exit;
-
-nxge_alloc_rx_mem_fail1:
-	KMEM_FREE(rx_dmap, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK);
-
-nxge_alloc_rx_mem_exit:
-	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
-		"<== nxge_alloc_rx_buf_dma status 0x%08x", status));
-
-	return (status);
-}
-
-/*ARGSUSED*/
-static void
-nxge_free_rx_buf_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap,
-    uint32_t num_chunks)
-{
-	int		i;
-
-	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
-		"==> nxge_free_rx_buf_dma: # of chunks %d", num_chunks));
-
-	for (i = 0; i < num_chunks; i++) {
-		NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
-			"==> nxge_free_rx_buf_dma: chunk %d dmap 0x%llx",
-				i, dmap));
-		nxge_dma_mem_free(dmap++);
-	}
-
-	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_free_rx_buf_dma"));
-}
-
-/*ARGSUSED*/
-static nxge_status_t
-nxge_alloc_rx_cntl_dma(p_nxge_t nxgep, uint16_t dma_channel,
-    p_nxge_dma_common_t *dmap, size_t size)
-{
-	p_nxge_dma_common_t 	rx_dmap;
-	nxge_status_t		status = NXGE_OK;
-
-	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_cntl_dma"));
-
-	rx_dmap = (p_nxge_dma_common_t)
-			KMEM_ZALLOC(sizeof (nxge_dma_common_t), KM_SLEEP);
-
-	rx_dmap->contig_alloc_type = B_FALSE;
-
-	status = nxge_dma_mem_alloc(nxgep, nxge_force_dma,
-			&nxge_desc_dma_attr,
-			size,
-			&nxge_dev_desc_dma_acc_attr,
-			DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
-			rx_dmap);
-	if (status != NXGE_OK) {
-		goto nxge_alloc_rx_cntl_dma_fail1;
-	}
-
-	*dmap = rx_dmap;
-	goto nxge_alloc_rx_cntl_dma_exit;
-
-nxge_alloc_rx_cntl_dma_fail1:
-	KMEM_FREE(rx_dmap, sizeof (nxge_dma_common_t));
-
-nxge_alloc_rx_cntl_dma_exit:
-	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
-		"<== nxge_alloc_rx_cntl_dma status 0x%08x", status));
-
-	return (status);
-}
-
-/*ARGSUSED*/
-static void
-nxge_free_rx_cntl_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap)
-{
-	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_free_rx_cntl_dma"));
-
-	nxge_dma_mem_free(dmap);
-
-	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_free_rx_cntl_dma"));
-}
-
-static nxge_status_t
-nxge_alloc_tx_mem_pool(p_nxge_t nxgep)
-{
-	nxge_status_t		status = NXGE_OK;
-	int			i, j;
-	uint32_t		ndmas, st_tdc;
-	p_nxge_dma_pt_cfg_t	p_all_cfgp;
-	p_nxge_hw_pt_cfg_t	p_cfgp;
-	p_nxge_dma_pool_t	dma_poolp;
-	p_nxge_dma_common_t	*dma_buf_p;
-	p_nxge_dma_pool_t	dma_cntl_poolp;
-	p_nxge_dma_common_t	*dma_cntl_p;
-	size_t			tx_buf_alloc_size;
-	size_t			tx_cntl_alloc_size;
-	uint32_t		*num_chunks; /* per dma */
-
-	NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_alloc_tx_mem_pool"));
-
-	p_all_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
-	p_cfgp = (p_nxge_hw_pt_cfg_t)&p_all_cfgp->hw_config;
-	st_tdc = p_cfgp->start_tdc;
-	ndmas = p_cfgp->max_tdcs;
-
-	NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_alloc_tx_mem_pool: "
-		"p_cfgp 0x%016llx start_tdc %d ndmas %d nxgep->max_tdcs %d",
-		p_cfgp, p_cfgp->start_tdc, p_cfgp->max_tdcs, nxgep->max_tdcs));
-	/*
-	 * Allocate memory for each transmit DMA channel.
-	 */
-	dma_poolp = (p_nxge_dma_pool_t)KMEM_ZALLOC(sizeof (nxge_dma_pool_t),
-			KM_SLEEP);
-	dma_buf_p = (p_nxge_dma_common_t *)KMEM_ZALLOC(
-			sizeof (p_nxge_dma_common_t) * ndmas, KM_SLEEP);
-
-	dma_cntl_poolp = (p_nxge_dma_pool_t)
-			KMEM_ZALLOC(sizeof (nxge_dma_pool_t), KM_SLEEP);
-	dma_cntl_p = (p_nxge_dma_common_t *)KMEM_ZALLOC(
-			sizeof (p_nxge_dma_common_t) * ndmas, KM_SLEEP);
-
-#if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
-	/*
-	 * N2/NIU has limitation on the descriptor sizes (contiguous
-	 * memory allocation on data buffers to 4M (contig_mem_alloc)
-	 * and little endian for control buffers (must use the ddi/dki mem alloc
-	 * function). The transmit ring is limited to 8K (includes the
-	 * mailbox).
-	 */
-	if (nxgep->niu_type == N2_NIU) {
-		if ((nxge_tx_ring_size > NXGE_NIU_CONTIG_TX_MAX) ||
-			(!ISP2(nxge_tx_ring_size))) {
-			nxge_tx_ring_size = NXGE_NIU_CONTIG_TX_MAX;
-		}
-	}
-#endif
-
-	nxgep->nxge_port_tx_ring_size = nxge_tx_ring_size;
-
-	/*
-	 * Assume that each DMA channel will be configured with default
-	 * transmit bufer size for copying transmit data.
-	 * (For packet payload over this limit, packets will not be
-	 *  copied.)
-	 */
-	tx_buf_alloc_size = (nxge_bcopy_thresh * nxge_tx_ring_size);
-
-	/*
-	 * Addresses of transmit descriptor ring and the
-	 * mailbox must be all cache-aligned (64 bytes).
-	 */
-	tx_cntl_alloc_size = nxge_tx_ring_size;
-	tx_cntl_alloc_size *= (sizeof (tx_desc_t));
-	tx_cntl_alloc_size += sizeof (txdma_mailbox_t);
-
-#if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
-	if (nxgep->niu_type == N2_NIU) {
-		if (!ISP2(tx_buf_alloc_size)) {
-			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-				"==> nxge_alloc_tx_mem_pool: "
-				" must be power of 2"));
-			status |= (NXGE_ERROR | NXGE_DDI_FAILED);
-			goto nxge_alloc_tx_mem_pool_exit;
-		}
-
-		if (tx_buf_alloc_size > (1 << 22)) {
-			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-				"==> nxge_alloc_tx_mem_pool: "
-				" limit size to 4M"));
-			status |= (NXGE_ERROR | NXGE_DDI_FAILED);
-			goto nxge_alloc_tx_mem_pool_exit;
-		}
-
-		if (tx_cntl_alloc_size < 0x2000) {
-			tx_cntl_alloc_size = 0x2000;
-		}
-	}
-#endif
-
-	num_chunks = (uint32_t *)KMEM_ZALLOC(
-			sizeof (uint32_t) * ndmas, KM_SLEEP);
-
-	/*
-	 * Allocate memory for transmit buffers and descriptor rings.
-	 * Replace allocation functions with interface functions provided
-	 * by the partition manager when it is available.
-	 *
-	 * Allocate memory for the transmit buffer pool.
-	 */
-	for (i = 0; i < ndmas; i++) {
-		num_chunks[i] = 0;
-		status = nxge_alloc_tx_buf_dma(nxgep, st_tdc, &dma_buf_p[i],
-					tx_buf_alloc_size,
-					nxge_bcopy_thresh, &num_chunks[i]);
-		if (status != NXGE_OK) {
-			break;
-		}
-		st_tdc++;
-	}
-	if (i < ndmas) {
-		goto nxge_alloc_tx_mem_pool_fail1;
-	}
-
-	st_tdc = p_cfgp->start_tdc;
-	/*
-	 * Allocate memory for descriptor rings and mailbox.
-	 */
-	for (j = 0; j < ndmas; j++) {
-		status = nxge_alloc_tx_cntl_dma(nxgep, st_tdc, &dma_cntl_p[j],
-					tx_cntl_alloc_size);
-		if (status != NXGE_OK) {
-			break;
-		}
-		st_tdc++;
-	}
-	if (j < ndmas) {
-		goto nxge_alloc_tx_mem_pool_fail2;
-	}
-
-	dma_poolp->ndmas = ndmas;
-	dma_poolp->num_chunks = num_chunks;
-	dma_poolp->buf_allocated = B_TRUE;
-	dma_poolp->dma_buf_pool_p = dma_buf_p;
-	nxgep->tx_buf_pool_p = dma_poolp;
-
-	dma_cntl_poolp->ndmas = ndmas;
-	dma_cntl_poolp->buf_allocated = B_TRUE;
-	dma_cntl_poolp->dma_buf_pool_p = dma_cntl_p;
-	nxgep->tx_cntl_pool_p = dma_cntl_poolp;
-
-	NXGE_DEBUG_MSG((nxgep, MEM_CTL,
-		"==> nxge_alloc_tx_mem_pool: start_tdc %d "
-		"ndmas %d poolp->ndmas %d",
-		st_tdc, ndmas, dma_poolp->ndmas));
-
-	goto nxge_alloc_tx_mem_pool_exit;
-
-nxge_alloc_tx_mem_pool_fail2:
-	/* Free control buffers */
-	j--;
-	for (; j >= 0; j--) {
-		nxge_free_tx_cntl_dma(nxgep,
-			(p_nxge_dma_common_t)dma_cntl_p[i]);
-	}
-
-nxge_alloc_tx_mem_pool_fail1:
-	/* Free data buffers */
-	i--;
-	for (; i >= 0; i--) {
-		nxge_free_tx_buf_dma(nxgep, (p_nxge_dma_common_t)dma_buf_p[i],
-			num_chunks[i]);
-	}
-
-	KMEM_FREE(dma_poolp, sizeof (nxge_dma_pool_t));
-	KMEM_FREE(dma_buf_p, ndmas * sizeof (p_nxge_dma_common_t));
-	KMEM_FREE(dma_cntl_poolp, sizeof (nxge_dma_pool_t));
-	KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_nxge_dma_common_t));
-	KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas);
-
-nxge_alloc_tx_mem_pool_exit:
-	NXGE_DEBUG_MSG((nxgep, MEM_CTL,
-		"<== nxge_alloc_tx_mem_pool:status 0x%08x", status));
-
-	return (status);
-}
-
-static nxge_status_t
-nxge_alloc_tx_buf_dma(p_nxge_t nxgep, uint16_t dma_channel,
-    p_nxge_dma_common_t *dmap, size_t alloc_size,
-    size_t block_size, uint32_t *num_chunks)
-{
-	p_nxge_dma_common_t 	tx_dmap;
-	nxge_status_t		status = NXGE_OK;
-	size_t			total_alloc_size;
-	size_t			allocated = 0;
-	int			i, size_index, array_size;
-
-	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_tx_buf_dma"));
-
-	tx_dmap = (p_nxge_dma_common_t)
-		KMEM_ZALLOC(sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK,
-			KM_SLEEP);
-
-	total_alloc_size = alloc_size;
-	i = 0;
-	size_index = 0;
-	array_size =  sizeof (alloc_sizes) /  sizeof (size_t);
-	while ((alloc_sizes[size_index] < alloc_size) &&
-		(size_index < array_size))
-		size_index++;
-	if (size_index >= array_size) {
-		size_index = array_size - 1;
-	}
-
-	while ((allocated < total_alloc_size) &&
-			(size_index >= 0) && (i < NXGE_DMA_BLOCK)) {
-
-		tx_dmap[i].dma_chunk_index = i;
-		tx_dmap[i].block_size = block_size;
-		tx_dmap[i].alength = alloc_sizes[size_index];
-		tx_dmap[i].orig_alength = tx_dmap[i].alength;
-		tx_dmap[i].nblocks = alloc_sizes[size_index] / block_size;
-		tx_dmap[i].dma_channel = dma_channel;
-		tx_dmap[i].contig_alloc_type = B_FALSE;
-
-		/*
-		 * N2/NIU: data buffers must be contiguous as the driver
-		 *	   needs to call Hypervisor api to set up
-		 *	   logical pages.
-		 */
-		if ((nxgep->niu_type == N2_NIU) && (NXGE_DMA_BLOCK == 1)) {
-			tx_dmap[i].contig_alloc_type = B_TRUE;
-		}
-
-		status = nxge_dma_mem_alloc(nxgep, nxge_force_dma,
-			&nxge_tx_dma_attr,
-			tx_dmap[i].alength,
-			&nxge_dev_buf_dma_acc_attr,
-			DDI_DMA_WRITE | DDI_DMA_STREAMING,
-			(p_nxge_dma_common_t)(&tx_dmap[i]));
-		if (status != NXGE_OK) {
-			size_index--;
-		} else {
-			i++;
-			allocated += alloc_sizes[size_index];
-		}
-	}
-
-	if (allocated < total_alloc_size) {
-		goto nxge_alloc_tx_mem_fail1;
-	}
-
-	*num_chunks = i;
-	*dmap = tx_dmap;
-	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
-		"==> nxge_alloc_tx_buf_dma dmap 0x%016llx num chunks %d",
-		*dmap, i));
-	goto nxge_alloc_tx_mem_exit;
-
-nxge_alloc_tx_mem_fail1:
-	KMEM_FREE(tx_dmap, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK);
-
-nxge_alloc_tx_mem_exit:
-	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
-		"<== nxge_alloc_tx_buf_dma status 0x%08x", status));
-
-	return (status);
-}
-
-/*ARGSUSED*/
-static void
-nxge_free_tx_buf_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap,
-    uint32_t num_chunks)
-{
-	int		i;
-
-	NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_free_tx_buf_dma"));
-
-	for (i = 0; i < num_chunks; i++) {
-		nxge_dma_mem_free(dmap++);
-	}
-
-	NXGE_DEBUG_MSG((nxgep, MEM_CTL, "<== nxge_free_tx_buf_dma"));
-}
-
-/*ARGSUSED*/
-static nxge_status_t
-nxge_alloc_tx_cntl_dma(p_nxge_t nxgep, uint16_t dma_channel,
-    p_nxge_dma_common_t *dmap, size_t size)
-{
-	p_nxge_dma_common_t 	tx_dmap;
-	nxge_status_t		status = NXGE_OK;
-
-	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_tx_cntl_dma"));
-	tx_dmap = (p_nxge_dma_common_t)
-			KMEM_ZALLOC(sizeof (nxge_dma_common_t), KM_SLEEP);
-
-	tx_dmap->contig_alloc_type = B_FALSE;
-
-	status = nxge_dma_mem_alloc(nxgep, nxge_force_dma,
-			&nxge_desc_dma_attr,
-			size,
-			&nxge_dev_desc_dma_acc_attr,
-			DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
-			tx_dmap);
-	if (status != NXGE_OK) {
-		goto nxge_alloc_tx_cntl_dma_fail1;
-	}
-
-	*dmap = tx_dmap;
-	goto nxge_alloc_tx_cntl_dma_exit;
-
-nxge_alloc_tx_cntl_dma_fail1:
-	KMEM_FREE(tx_dmap, sizeof (nxge_dma_common_t));
-
-nxge_alloc_tx_cntl_dma_exit:
-	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
-		"<== nxge_alloc_tx_cntl_dma status 0x%08x", status));
-
-	return (status);
-}
-
-/*ARGSUSED*/
-static void
-nxge_free_tx_cntl_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap)
-{
-	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_free_tx_cntl_dma"));
-
-	nxge_dma_mem_free(dmap);
-
-	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_free_tx_cntl_dma"));
-}
-
-static void
-nxge_free_tx_mem_pool(p_nxge_t nxgep)
-{
-	uint32_t		i, ndmas;
-	p_nxge_dma_pool_t	dma_poolp;
-	p_nxge_dma_common_t	*dma_buf_p;
-	p_nxge_dma_pool_t	dma_cntl_poolp;
-	p_nxge_dma_common_t	*dma_cntl_p;
-	uint32_t 		*num_chunks;
-
-	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_free_tx_mem_pool"));
-
-	dma_poolp = nxgep->tx_buf_pool_p;
-	if (dma_poolp == NULL || (!dma_poolp->buf_allocated)) {
-		NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
-			"<== nxge_free_tx_mem_pool "
-			"(null rx buf pool or buf not allocated"));
-		return;
-	}
-
-	dma_cntl_poolp = nxgep->tx_cntl_pool_p;
-	if (dma_cntl_poolp == NULL || (!dma_cntl_poolp->buf_allocated)) {
-		NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
-			"<== nxge_free_tx_mem_pool "
-			"(null tx cntl buf pool or cntl buf not allocated"));
-		return;
-	}
-
-	dma_buf_p = dma_poolp->dma_buf_pool_p;
-	num_chunks = dma_poolp->num_chunks;
-
-	dma_cntl_p = dma_cntl_poolp->dma_buf_pool_p;
-	ndmas = dma_cntl_poolp->ndmas;
-
-	for (i = 0; i < ndmas; i++) {
-		nxge_free_tx_buf_dma(nxgep, dma_buf_p[i], num_chunks[i]);
-	}
-
-	for (i = 0; i < ndmas; i++) {
-		nxge_free_tx_cntl_dma(nxgep, dma_cntl_p[i]);
-	}
-
-	for (i = 0; i < ndmas; i++) {
-		KMEM_FREE(dma_buf_p[i],
-			sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK);
-		KMEM_FREE(dma_cntl_p[i], sizeof (nxge_dma_common_t));
-	}
-
-	KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas);
-	KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_nxge_dma_common_t));
-	KMEM_FREE(dma_cntl_poolp, sizeof (nxge_dma_pool_t));
-	KMEM_FREE(dma_buf_p, ndmas * sizeof (p_nxge_dma_common_t));
-	KMEM_FREE(dma_poolp, sizeof (nxge_dma_pool_t));
-
-	nxgep->tx_buf_pool_p = NULL;
-	nxgep->tx_cntl_pool_p = NULL;
-
-	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_free_tx_mem_pool"));
-}
-
-/*ARGSUSED*/
-static nxge_status_t
-nxge_dma_mem_alloc(p_nxge_t nxgep, dma_method_t method,
-	struct ddi_dma_attr *dma_attrp,
-	size_t length, ddi_device_acc_attr_t *acc_attr_p, uint_t xfer_flags,
-	p_nxge_dma_common_t dma_p)
-{
-	caddr_t 		kaddrp;
-	int			ddi_status = DDI_SUCCESS;
-	boolean_t		contig_alloc_type;
-
-	contig_alloc_type = dma_p->contig_alloc_type;
-
-	if (contig_alloc_type && (nxgep->niu_type != N2_NIU)) {
-		/*
-		 * contig_alloc_type for contiguous memory only allowed
-		 * for N2/NIU.
-		 */
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"nxge_dma_mem_alloc: alloc type not allows (%d)",
-			dma_p->contig_alloc_type));
-		return (NXGE_ERROR | NXGE_DDI_FAILED);
-	}
-
-	dma_p->dma_handle = NULL;
-	dma_p->acc_handle = NULL;
-	dma_p->kaddrp = dma_p->last_kaddrp = NULL;
-	dma_p->first_ioaddr_pp = dma_p->last_ioaddr_pp = NULL;
-	ddi_status = ddi_dma_alloc_handle(nxgep->dip, dma_attrp,
-		DDI_DMA_DONTWAIT, NULL, &dma_p->dma_handle);
-	if (ddi_status != DDI_SUCCESS) {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"nxge_dma_mem_alloc:ddi_dma_alloc_handle failed."));
-		return (NXGE_ERROR | NXGE_DDI_FAILED);
-	}
-
-	switch (contig_alloc_type) {
-	case B_FALSE:
-		ddi_status = ddi_dma_mem_alloc(dma_p->dma_handle, length,
-			acc_attr_p,
-			xfer_flags,
-			DDI_DMA_DONTWAIT, 0, &kaddrp, &dma_p->alength,
-			&dma_p->acc_handle);
-		if (ddi_status != DDI_SUCCESS) {
-			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-				"nxge_dma_mem_alloc:ddi_dma_mem_alloc failed"));
-			ddi_dma_free_handle(&dma_p->dma_handle);
-			dma_p->dma_handle = NULL;
-			return (NXGE_ERROR | NXGE_DDI_FAILED);
-		}
-		if (dma_p->alength < length) {
-			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-				"nxge_dma_mem_alloc:ddi_dma_mem_alloc "
-				"< length."));
-			ddi_dma_mem_free(&dma_p->acc_handle);
-			ddi_dma_free_handle(&dma_p->dma_handle);
-			dma_p->acc_handle = NULL;
-			dma_p->dma_handle = NULL;
-			return (NXGE_ERROR);
-		}
-
-		ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, NULL,
-			kaddrp, dma_p->alength, xfer_flags, DDI_DMA_DONTWAIT, 0,
-			&dma_p->dma_cookie, &dma_p->ncookies);
-		if (ddi_status != DDI_DMA_MAPPED) {
-			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-				"nxge_dma_mem_alloc:di_dma_addr_bind failed "
-				"(staus 0x%x ncookies %d.)", ddi_status,
-				dma_p->ncookies));
-			if (dma_p->acc_handle) {
-				ddi_dma_mem_free(&dma_p->acc_handle);
-				dma_p->acc_handle = NULL;
-			}
-			ddi_dma_free_handle(&dma_p->dma_handle);
-			dma_p->dma_handle = NULL;
-			return (NXGE_ERROR | NXGE_DDI_FAILED);
-		}
-
-		if (dma_p->ncookies != 1) {
-			NXGE_DEBUG_MSG((nxgep, DMA_CTL,
-				"nxge_dma_mem_alloc:ddi_dma_addr_bind "
-				"> 1 cookie"
-				"(staus 0x%x ncookies %d.)", ddi_status,
-				dma_p->ncookies));
-			if (dma_p->acc_handle) {
-				ddi_dma_mem_free(&dma_p->acc_handle);
-				dma_p->acc_handle = NULL;
-			}
-			ddi_dma_free_handle(&dma_p->dma_handle);
-			dma_p->dma_handle = NULL;
-			return (NXGE_ERROR);
-		}
-		break;
-
-#if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
-	case B_TRUE:
-		kaddrp = (caddr_t)contig_mem_alloc(length);
-		if (kaddrp == NULL) {
-			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-				"nxge_dma_mem_alloc:contig_mem_alloc failed."));
-			ddi_dma_free_handle(&dma_p->dma_handle);
-			return (NXGE_ERROR | NXGE_DDI_FAILED);
-		}
-
-		dma_p->alength = length;
-		ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, NULL,
-			kaddrp, dma_p->alength, xfer_flags, DDI_DMA_DONTWAIT, 0,
-			&dma_p->dma_cookie, &dma_p->ncookies);
-		if (ddi_status != DDI_DMA_MAPPED) {
-			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-				"nxge_dma_mem_alloc:di_dma_addr_bind failed "
-				"(status 0x%x ncookies %d.)", ddi_status,
-				dma_p->ncookies));
-
-			NXGE_DEBUG_MSG((nxgep, DMA_CTL,
-				"==> nxge_dma_mem_alloc: (not mapped)"
-				"length %lu (0x%x) "
-				"free contig kaddrp $%p "
-				"va_to_pa $%p",
-				length, length,
-				kaddrp,
-				va_to_pa(kaddrp)));
-
-
-			contig_mem_free((void *)kaddrp, length);
-			ddi_dma_free_handle(&dma_p->dma_handle);
-
-			dma_p->dma_handle = NULL;
-			dma_p->acc_handle = NULL;
-			dma_p->alength = NULL;
-			dma_p->kaddrp = NULL;
-
-			return (NXGE_ERROR | NXGE_DDI_FAILED);
-		}
-
-		if (dma_p->ncookies != 1 ||
-			(dma_p->dma_cookie.dmac_laddress == NULL)) {
-			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-				"nxge_dma_mem_alloc:di_dma_addr_bind > 1 "
-				"cookie or "
-				"dmac_laddress is NULL $%p size %d "
-				" (status 0x%x ncookies %d.)",
-				ddi_status,
-				dma_p->dma_cookie.dmac_laddress,
-				dma_p->dma_cookie.dmac_size,
-				dma_p->ncookies));
-
-			contig_mem_free((void *)kaddrp, length);
-			ddi_dma_free_handle(&dma_p->dma_handle);
-
-			dma_p->alength = 0;
-			dma_p->dma_handle = NULL;
-			dma_p->acc_handle = NULL;
-			dma_p->kaddrp = NULL;
-
-			return (NXGE_ERROR | NXGE_DDI_FAILED);
-		}
-		break;
-
-#else
-	case B_TRUE:
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"nxge_dma_mem_alloc: invalid alloc type for !sun4v"));
-		return (NXGE_ERROR | NXGE_DDI_FAILED);
-#endif
-	}
-
-	dma_p->kaddrp = kaddrp;
-	dma_p->last_kaddrp = (unsigned char *)kaddrp +
-			dma_p->alength - RXBUF_64B_ALIGNED;
-	dma_p->ioaddr_pp = (unsigned char *)dma_p->dma_cookie.dmac_laddress;
-	dma_p->last_ioaddr_pp =
-		(unsigned char *)dma_p->dma_cookie.dmac_laddress +
-				dma_p->alength - RXBUF_64B_ALIGNED;
-
-	NPI_DMA_ACC_HANDLE_SET(dma_p, dma_p->acc_handle);
-
-#if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
-	dma_p->orig_ioaddr_pp =
-		(unsigned char *)dma_p->dma_cookie.dmac_laddress;
-	dma_p->orig_alength = length;
-	dma_p->orig_kaddrp = kaddrp;
-	dma_p->orig_vatopa = (uint64_t)va_to_pa(kaddrp);
-#endif
-
-	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_dma_mem_alloc: "
-		"dma buffer allocated: dma_p $%p "
-		"return dmac_ladress from cookie $%p cookie dmac_size %d "
-		"dma_p->ioaddr_p $%p "
-		"dma_p->orig_ioaddr_p $%p "
-		"orig_vatopa $%p "
-		"alength %d (0x%x) "
-		"kaddrp $%p "
-		"length %d (0x%x)",
-		dma_p,
-		dma_p->dma_cookie.dmac_laddress, dma_p->dma_cookie.dmac_size,
-		dma_p->ioaddr_pp,
-		dma_p->orig_ioaddr_pp,
-		dma_p->orig_vatopa,
-		dma_p->alength, dma_p->alength,
-		kaddrp,
-		length, length));
-
-	return (NXGE_OK);
-}
-
-static void
-nxge_dma_mem_free(p_nxge_dma_common_t dma_p)
-{
-	if (dma_p->dma_handle != NULL) {
-		if (dma_p->ncookies) {
-			(void) ddi_dma_unbind_handle(dma_p->dma_handle);
-			dma_p->ncookies = 0;
-		}
-		ddi_dma_free_handle(&dma_p->dma_handle);
-		dma_p->dma_handle = NULL;
-	}
-
-	if (dma_p->acc_handle != NULL) {
-		ddi_dma_mem_free(&dma_p->acc_handle);
-		dma_p->acc_handle = NULL;
-		NPI_DMA_ACC_HANDLE_SET(dma_p, NULL);
-	}
-
-#if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
-	if (dma_p->contig_alloc_type &&
-			dma_p->orig_kaddrp && dma_p->orig_alength) {
-		NXGE_DEBUG_MSG((NULL, DMA_CTL, "nxge_dma_mem_free: "
-			"kaddrp $%p (orig_kaddrp $%p)"
-			"mem type %d ",
-			"orig_alength %d "
-			"alength 0x%x (%d)",
-			dma_p->kaddrp,
-			dma_p->orig_kaddrp,
-			dma_p->contig_alloc_type,
-			dma_p->orig_alength,
-			dma_p->alength, dma_p->alength));
-
-		contig_mem_free(dma_p->orig_kaddrp, dma_p->orig_alength);
-		dma_p->orig_alength = NULL;
-		dma_p->orig_kaddrp = NULL;
-		dma_p->contig_alloc_type = B_FALSE;
-	}
-#endif
-	dma_p->kaddrp = NULL;
-	dma_p->alength = NULL;
-}
-
-/*
- *	nxge_m_start() -- start transmitting and receiving.
- *
- *	This function is called by the MAC layer when the first
- *	stream is open to prepare the hardware ready for sending
- *	and transmitting packets.
- */
-static int
-nxge_m_start(void *arg)
-{
-	p_nxge_t 	nxgep = (p_nxge_t)arg;
-
-	NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_start"));
-
-	MUTEX_ENTER(nxgep->genlock);
-	if (nxge_init(nxgep) != NXGE_OK) {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"<== nxge_m_start: initialization failed"));
-		MUTEX_EXIT(nxgep->genlock);
-		return (EIO);
-	}
-
-	if (nxgep->nxge_mac_state == NXGE_MAC_STARTED)
-		goto nxge_m_start_exit;
-	/*
-	 * Start timer to check the system error and tx hangs
-	 */
-	nxgep->nxge_timerid = nxge_start_timer(nxgep, nxge_check_hw_state,
-		NXGE_CHECK_TIMER);
-
-	nxgep->link_notify = B_TRUE;
-
-	nxgep->nxge_mac_state = NXGE_MAC_STARTED;
-
-nxge_m_start_exit:
-	MUTEX_EXIT(nxgep->genlock);
-	NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_start"));
-	return (0);
-}
-
-/*
- *	nxge_m_stop(): stop transmitting and receiving.
- */
-static void
-nxge_m_stop(void *arg)
-{
-	p_nxge_t 	nxgep = (p_nxge_t)arg;
-
-	NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_stop"));
-
-	if (nxgep->nxge_timerid) {
-		nxge_stop_timer(nxgep, nxgep->nxge_timerid);
-		nxgep->nxge_timerid = 0;
-	}
-
-	MUTEX_ENTER(nxgep->genlock);
-	nxge_uninit(nxgep);
-
-	nxgep->nxge_mac_state = NXGE_MAC_STOPPED;
-
-	MUTEX_EXIT(nxgep->genlock);
-
-	NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_stop"));
-}
-
-static int
-nxge_m_unicst(void *arg, const uint8_t *macaddr)
-{
-	p_nxge_t 	nxgep = (p_nxge_t)arg;
-	struct 		ether_addr addrp;
-
-	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "==> nxge_m_unicst"));
-
-	bcopy(macaddr, (uint8_t *)&addrp, ETHERADDRL);
-	if (nxge_set_mac_addr(nxgep, &addrp)) {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"<== nxge_m_unicst: set unitcast failed"));
-		return (EINVAL);
-	}
-
-	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "<== nxge_m_unicst"));
-
-	return (0);
-}
-
-static int
-nxge_m_multicst(void *arg, boolean_t add, const uint8_t *mca)
-{
-	p_nxge_t 	nxgep = (p_nxge_t)arg;
-	struct 		ether_addr addrp;
-
-	NXGE_DEBUG_MSG((nxgep, MAC_CTL,
-		"==> nxge_m_multicst: add %d", add));
-
-	bcopy(mca, (uint8_t *)&addrp, ETHERADDRL);
-	if (add) {
-		if (nxge_add_mcast_addr(nxgep, &addrp)) {
-			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-				"<== nxge_m_multicst: add multicast failed"));
-			return (EINVAL);
-		}
-	} else {
-		if (nxge_del_mcast_addr(nxgep, &addrp)) {
-			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-				"<== nxge_m_multicst: del multicast failed"));
-			return (EINVAL);
-		}
-	}
-
-	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "<== nxge_m_multicst"));
-
-	return (0);
-}
-
-static int
-nxge_m_promisc(void *arg, boolean_t on)
-{
-	p_nxge_t 	nxgep = (p_nxge_t)arg;
-
-	NXGE_DEBUG_MSG((nxgep, MAC_CTL,
-		"==> nxge_m_promisc: on %d", on));
-
-	if (nxge_set_promisc(nxgep, on)) {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"<== nxge_m_promisc: set promisc failed"));
-		return (EINVAL);
-	}
-
-	NXGE_DEBUG_MSG((nxgep, MAC_CTL,
-		"<== nxge_m_promisc: on %d", on));
-
-	return (0);
-}
-
-static void
-nxge_m_ioctl(void *arg,  queue_t *wq, mblk_t *mp)
-{
-	p_nxge_t 	nxgep = (p_nxge_t)arg;
-	struct 		iocblk *iocp = (struct iocblk *)mp->b_rptr;
-	boolean_t 	need_privilege;
-	int 		err;
-	int 		cmd;
-
-	NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_ioctl"));
-
-	iocp = (struct iocblk *)mp->b_rptr;
-	iocp->ioc_error = 0;
-	need_privilege = B_TRUE;
-	cmd = iocp->ioc_cmd;
-	NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_ioctl: cmd 0x%08x", cmd));
-	switch (cmd) {
-	default:
-		miocnak(wq, mp, 0, EINVAL);
-		NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_ioctl: invalid"));
-		return;
-
-	case LB_GET_INFO_SIZE:
-	case LB_GET_INFO:
-	case LB_GET_MODE:
-		need_privilege = B_FALSE;
-		break;
-	case LB_SET_MODE:
-		break;
-
-	case ND_GET:
-		need_privilege = B_FALSE;
-		break;
-	case ND_SET:
-		break;
-
-	case NXGE_GET_MII:
-	case NXGE_PUT_MII:
-	case NXGE_GET64:
-	case NXGE_PUT64:
-	case NXGE_GET_TX_RING_SZ:
-	case NXGE_GET_TX_DESC:
-	case NXGE_TX_SIDE_RESET:
-	case NXGE_RX_SIDE_RESET:
-	case NXGE_GLOBAL_RESET:
-	case NXGE_RESET_MAC:
-	case NXGE_TX_REGS_DUMP:
-	case NXGE_RX_REGS_DUMP:
-	case NXGE_INT_REGS_DUMP:
-	case NXGE_VIR_INT_REGS_DUMP:
-	case NXGE_PUT_TCAM:
-	case NXGE_GET_TCAM:
-	case NXGE_RTRACE:
-	case NXGE_RDUMP:
-
-		need_privilege = B_FALSE;
-		break;
-	case NXGE_INJECT_ERR:
-		cmn_err(CE_NOTE, "!nxge_m_ioctl: Inject error\n");
-		nxge_err_inject(nxgep, wq, mp);
-		break;
-	}
-
-	if (need_privilege) {
-		if (secpolicy_net_config != NULL)
-			err = secpolicy_net_config(iocp->ioc_cr, B_FALSE);
-		else
-			err = drv_priv(iocp->ioc_cr);
-		if (err != 0) {
-			miocnak(wq, mp, 0, err);
-			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-				"<== nxge_m_ioctl: no priv"));
-			return;
-		}
-	}
-
-	switch (cmd) {
-	case ND_GET:
-		NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "ND_GET command"));
-	case ND_SET:
-		NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "ND_SET command"));
-		nxge_param_ioctl(nxgep, wq, mp, iocp);
-		break;
-
-	case LB_GET_MODE:
-	case LB_SET_MODE:
-	case LB_GET_INFO_SIZE:
-	case LB_GET_INFO:
-		nxge_loopback_ioctl(nxgep, wq, mp, iocp);
-		break;
-
-	case NXGE_GET_MII:
-	case NXGE_PUT_MII:
-	case NXGE_PUT_TCAM:
-	case NXGE_GET_TCAM:
-	case NXGE_GET64:
-	case NXGE_PUT64:
-	case NXGE_GET_TX_RING_SZ:
-	case NXGE_GET_TX_DESC:
-	case NXGE_TX_SIDE_RESET:
-	case NXGE_RX_SIDE_RESET:
-	case NXGE_GLOBAL_RESET:
-	case NXGE_RESET_MAC:
-	case NXGE_TX_REGS_DUMP:
-	case NXGE_RX_REGS_DUMP:
-	case NXGE_INT_REGS_DUMP:
-	case NXGE_VIR_INT_REGS_DUMP:
-		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
-			"==> nxge_m_ioctl: cmd 0x%x", cmd));
-		nxge_hw_ioctl(nxgep, wq, mp, iocp);
-		break;
-	}
-
-	NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_ioctl"));
-}
-
-extern void nxge_rx_hw_blank(void *arg, time_t ticks, uint_t count);
-
-static void
-nxge_m_resources(void *arg)
-{
-	p_nxge_t		nxgep = arg;
-	mac_rx_fifo_t 		mrf;
-	p_rx_rcr_rings_t	rcr_rings;
-	p_rx_rcr_ring_t		*rcr_p;
-	uint32_t		i, ndmas;
-	nxge_status_t		status;
-
-	NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_m_resources"));
-
-	MUTEX_ENTER(nxgep->genlock);
-
-	/*
-	 * CR 6492541 Check to see if the drv_state has been initialized,
-	 * if not * call nxge_init().
-	 */
-	if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
-		status = nxge_init(nxgep);
-		if (status != NXGE_OK)
-			goto nxge_m_resources_exit;
-	}
-
-	mrf.mrf_type = MAC_RX_FIFO;
-	mrf.mrf_blank = nxge_rx_hw_blank;
-	mrf.mrf_arg = (void *)nxgep;
-
-	mrf.mrf_normal_blank_time = 128;
-	mrf.mrf_normal_pkt_count = 8;
-	rcr_rings = nxgep->rx_rcr_rings;
-	rcr_p = rcr_rings->rcr_rings;
-	ndmas = rcr_rings->ndmas;
-
-	/*
-	 * Export our receive resources to the MAC layer.
-	 */
-	for (i = 0; i < ndmas; i++) {
-		((p_rx_rcr_ring_t)rcr_p[i])->rcr_mac_handle =
-				mac_resource_add(nxgep->mach,
-				    (mac_resource_t *)&mrf);
-
-		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
-			"==> nxge_m_resources: vdma %d dma %d "
-			"rcrptr 0x%016llx mac_handle 0x%016llx",
-			i, ((p_rx_rcr_ring_t)rcr_p[i])->rdc,
-			rcr_p[i],
-			((p_rx_rcr_ring_t)rcr_p[i])->rcr_mac_handle));
-	}
-
-nxge_m_resources_exit:
-	MUTEX_EXIT(nxgep->genlock);
-	NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_m_resources"));
-}
-
-static void
-nxge_mmac_kstat_update(p_nxge_t nxgep, mac_addr_slot_t slot, boolean_t factory)
-{
-	p_nxge_mmac_stats_t mmac_stats;
-	int i;
-	nxge_mmac_t *mmac_info;
-
-	mmac_info = &nxgep->nxge_mmac_info;
-
-	mmac_stats = &nxgep->statsp->mmac_stats;
-	mmac_stats->mmac_max_cnt = mmac_info->num_mmac;
-	mmac_stats->mmac_avail_cnt = mmac_info->naddrfree;
-
-	for (i = 0; i < ETHERADDRL; i++) {
-		if (factory) {
-			mmac_stats->mmac_avail_pool[slot-1].ether_addr_octet[i]
-			= mmac_info->factory_mac_pool[slot][(ETHERADDRL-1) - i];
-		} else {
-			mmac_stats->mmac_avail_pool[slot-1].ether_addr_octet[i]
-			= mmac_info->mac_pool[slot].addr[(ETHERADDRL - 1) - i];
-		}
-	}
-}
-
-/*
- * nxge_altmac_set() -- Set an alternate MAC address
- */
-static int
-nxge_altmac_set(p_nxge_t nxgep, uint8_t *maddr, mac_addr_slot_t slot)
-{
-	uint8_t addrn;
-	uint8_t portn;
-	npi_mac_addr_t altmac;
-
-	altmac.w2 = ((uint16_t)maddr[0] << 8) | ((uint16_t)maddr[1] & 0x0ff);
-	altmac.w1 = ((uint16_t)maddr[2] << 8) | ((uint16_t)maddr[3] & 0x0ff);
-	altmac.w0 = ((uint16_t)maddr[4] << 8) | ((uint16_t)maddr[5] & 0x0ff);
-
-	portn = nxgep->mac.portnum;
-	addrn = (uint8_t)slot - 1;
-
-	if (npi_mac_altaddr_entry(nxgep->npi_handle, OP_SET, portn,
-		addrn, &altmac) != NPI_SUCCESS)
-		return (EIO);
-	/*
-	 * Enable comparison with the alternate MAC address.
-	 * While the first alternate addr is enabled by bit 1 of register
-	 * BMAC_ALTAD_CMPEN, it is enabled by bit 0 of register
-	 * XMAC_ADDR_CMPEN, so slot needs to be converted to addrn
-	 * accordingly before calling npi_mac_altaddr_entry.
-	 */
-	if (portn == XMAC_PORT_0 || portn == XMAC_PORT_1)
-		addrn = (uint8_t)slot - 1;
-	else
-		addrn = (uint8_t)slot;
-
-	if (npi_mac_altaddr_enable(nxgep->npi_handle, portn, addrn)
-		!= NPI_SUCCESS)
-		return (EIO);
-
-	return (0);
-}
-
-/*
- * nxeg_m_mmac_add() - find an unused address slot, set the address
- * value to the one specified, enable the port to start filtering on
- * the new MAC address.  Returns 0 on success.
- */
-static int
-nxge_m_mmac_add(void *arg, mac_multi_addr_t *maddr)
-{
-	p_nxge_t nxgep = arg;
-	mac_addr_slot_t slot;
-	nxge_mmac_t *mmac_info;
-	int err;
-	nxge_status_t status;
-
-	mutex_enter(nxgep->genlock);
-
-	/*
-	 * Make sure that nxge is initialized, if _start() has
-	 * not been called.
-	 */
-	if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
-		status = nxge_init(nxgep);
-		if (status != NXGE_OK) {
-			mutex_exit(nxgep->genlock);
-			return (ENXIO);
-		}
-	}
-
-	mmac_info = &nxgep->nxge_mmac_info;
-	if (mmac_info->naddrfree == 0) {
-		mutex_exit(nxgep->genlock);
-		return (ENOSPC);
-	}
-	if (!mac_unicst_verify(nxgep->mach, maddr->mma_addr,
-		maddr->mma_addrlen)) {
-		mutex_exit(nxgep->genlock);
-		return (EINVAL);
-	}
-	/*
-	 * 	Search for the first available slot. Because naddrfree
-	 * is not zero, we are guaranteed to find one.
-	 * 	Slot 0 is for unique (primary) MAC. The first alternate
-	 * MAC slot is slot 1.
-	 *	Each of the first two ports of Neptune has 16 alternate
-	 * MAC slots but only the first 7 (of 15) slots have assigned factory
-	 * MAC addresses. We first search among the slots without bundled
-	 * factory MACs. If we fail to find one in that range, then we
-	 * search the slots with bundled factory MACs.  A factory MAC
-	 * will be wasted while the slot is used with a user MAC address.
-	 * But the slot could be used by factory MAC again after calling
-	 * nxge_m_mmac_remove and nxge_m_mmac_reserve.
-	 */
-	if (mmac_info->num_factory_mmac < mmac_info->num_mmac) {
-		for (slot = mmac_info->num_factory_mmac + 1;
-			slot <= mmac_info->num_mmac; slot++) {
-			if (!(mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED))
-				break;
-		}
-		if (slot > mmac_info->num_mmac) {
-			for (slot = 1; slot <= mmac_info->num_factory_mmac;
-				slot++) {
-				if (!(mmac_info->mac_pool[slot].flags
-					& MMAC_SLOT_USED))
-					break;
-			}
-		}
-	} else {
-		for (slot = 1; slot <= mmac_info->num_mmac; slot++) {
-			if (!(mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED))
-				break;
-		}
-	}
-	ASSERT(slot <= mmac_info->num_mmac);
-	if ((err = nxge_altmac_set(nxgep, maddr->mma_addr, slot)) != 0) {
-		mutex_exit(nxgep->genlock);
-		return (err);
-	}
-	bcopy(maddr->mma_addr, mmac_info->mac_pool[slot].addr, ETHERADDRL);
-	mmac_info->mac_pool[slot].flags |= MMAC_SLOT_USED;
-	mmac_info->mac_pool[slot].flags &= ~MMAC_VENDOR_ADDR;
-	mmac_info->naddrfree--;
-	nxge_mmac_kstat_update(nxgep, slot, B_FALSE);
-
-	maddr->mma_slot = slot;
-
-	mutex_exit(nxgep->genlock);
-	return (0);
-}
-
-/*
- * This function reserves an unused slot and programs the slot and the HW
- * with a factory mac address.
- */
-static int
-nxge_m_mmac_reserve(void *arg, mac_multi_addr_t *maddr)
-{
-	p_nxge_t nxgep = arg;
-	mac_addr_slot_t slot;
-	nxge_mmac_t *mmac_info;
-	int err;
-	nxge_status_t status;
-
-	mutex_enter(nxgep->genlock);
-
-	/*
-	 * Make sure that nxge is initialized, if _start() has
-	 * not been called.
-	 */
-	if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
-		status = nxge_init(nxgep);
-		if (status != NXGE_OK) {
-			mutex_exit(nxgep->genlock);
-			return (ENXIO);
-		}
-	}
-
-	mmac_info = &nxgep->nxge_mmac_info;
-	if (mmac_info->naddrfree == 0) {
-		mutex_exit(nxgep->genlock);
-		return (ENOSPC);
-	}
-
-	slot = maddr->mma_slot;
-	if (slot == -1) {  /* -1: Take the first available slot */
-		for (slot = 1; slot <= mmac_info->num_factory_mmac; slot++) {
-			if (!(mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED))
-				break;
-		}
-		if (slot > mmac_info->num_factory_mmac) {
-			mutex_exit(nxgep->genlock);
-			return (ENOSPC);
-		}
-	}
-	if (slot < 1 || slot > mmac_info->num_factory_mmac) {
-		/*
-		 * Do not support factory MAC at a slot greater than
-		 * num_factory_mmac even when there are available factory
-		 * MAC addresses because the alternate MACs are bundled with
-		 * slot[1] through slot[num_factory_mmac]
-		 */
-		mutex_exit(nxgep->genlock);
-		return (EINVAL);
-	}
-	if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) {
-		mutex_exit(nxgep->genlock);
-		return (EBUSY);
-	}
-	/* Verify the address to be reserved */
-	if (!mac_unicst_verify(nxgep->mach,
-		mmac_info->factory_mac_pool[slot], ETHERADDRL)) {
-		mutex_exit(nxgep->genlock);
-		return (EINVAL);
-	}
-	if (err = nxge_altmac_set(nxgep,
-		mmac_info->factory_mac_pool[slot], slot)) {
-		mutex_exit(nxgep->genlock);
-		return (err);
-	}
-	bcopy(mmac_info->factory_mac_pool[slot], maddr->mma_addr, ETHERADDRL);
-	mmac_info->mac_pool[slot].flags |= MMAC_SLOT_USED | MMAC_VENDOR_ADDR;
-	mmac_info->naddrfree--;
-
-	nxge_mmac_kstat_update(nxgep, slot, B_TRUE);
-	mutex_exit(nxgep->genlock);
-
-	/* Pass info back to the caller */
-	maddr->mma_slot = slot;
-	maddr->mma_addrlen = ETHERADDRL;
-	maddr->mma_flags = MMAC_SLOT_USED | MMAC_VENDOR_ADDR;
-
-	return (0);
-}
-
-/*
- * Remove the specified mac address and update the HW not to filter
- * the mac address anymore.
- */
-static int
-nxge_m_mmac_remove(void *arg, mac_addr_slot_t slot)
-{
-	p_nxge_t nxgep = arg;
-	nxge_mmac_t *mmac_info;
-	uint8_t addrn;
-	uint8_t portn;
-	int err = 0;
-	nxge_status_t status;
-
-	mutex_enter(nxgep->genlock);
-
-	/*
-	 * Make sure that nxge is initialized, if _start() has
-	 * not been called.
-	 */
-	if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
-		status = nxge_init(nxgep);
-		if (status != NXGE_OK) {
-			mutex_exit(nxgep->genlock);
-			return (ENXIO);
-		}
-	}
-
-	mmac_info = &nxgep->nxge_mmac_info;
-	if (slot < 1 || slot > mmac_info->num_mmac) {
-		mutex_exit(nxgep->genlock);
-		return (EINVAL);
-	}
-
-	portn = nxgep->mac.portnum;
-	if (portn == XMAC_PORT_0 || portn == XMAC_PORT_1)
-		addrn = (uint8_t)slot - 1;
-	else
-		addrn = (uint8_t)slot;
-
-	if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) {
-		if (npi_mac_altaddr_disable(nxgep->npi_handle, portn, addrn)
-				== NPI_SUCCESS) {
-			mmac_info->naddrfree++;
-			mmac_info->mac_pool[slot].flags &= ~MMAC_SLOT_USED;
-			/*
-			 * Regardless if the MAC we just stopped filtering
-			 * is a user addr or a facory addr, we must set
-			 * the MMAC_VENDOR_ADDR flag if this slot has an
-			 * associated factory MAC to indicate that a factory
-			 * MAC is available.
-			 */
-			if (slot <= mmac_info->num_factory_mmac) {
-				mmac_info->mac_pool[slot].flags
-					|= MMAC_VENDOR_ADDR;
-			}
-			/*
-			 * Clear mac_pool[slot].addr so that kstat shows 0
-			 * alternate MAC address if the slot is not used.
-			 * (But nxge_m_mmac_get returns the factory MAC even
-			 * when the slot is not used!)
-			 */
-			bzero(mmac_info->mac_pool[slot].addr, ETHERADDRL);
-			nxge_mmac_kstat_update(nxgep, slot, B_FALSE);
-		} else {
-			err = EIO;
-		}
-	} else {
-		err = EINVAL;
-	}
-
-	mutex_exit(nxgep->genlock);
-	return (err);
-}
-
-
-/*
- * Modify a mac address added by nxge_m_mmac_add or nxge_m_mmac_reserve().
- */
-static int
-nxge_m_mmac_modify(void *arg, mac_multi_addr_t *maddr)
-{
-	p_nxge_t nxgep = arg;
-	mac_addr_slot_t slot;
-	nxge_mmac_t *mmac_info;
-	int err = 0;
-	nxge_status_t status;
-
-	if (!mac_unicst_verify(nxgep->mach, maddr->mma_addr,
-			maddr->mma_addrlen))
-		return (EINVAL);
-
-	slot = maddr->mma_slot;
-
-	mutex_enter(nxgep->genlock);
-
-	/*
-	 * Make sure that nxge is initialized, if _start() has
-	 * not been called.
-	 */
-	if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
-		status = nxge_init(nxgep);
-		if (status != NXGE_OK) {
-			mutex_exit(nxgep->genlock);
-			return (ENXIO);
-		}
-	}
-
-	mmac_info = &nxgep->nxge_mmac_info;
-	if (slot < 1 || slot > mmac_info->num_mmac) {
-		mutex_exit(nxgep->genlock);
-		return (EINVAL);
-	}
-	if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) {
-		if ((err = nxge_altmac_set(nxgep, maddr->mma_addr, slot))
-			!= 0) {
-			bcopy(maddr->mma_addr, mmac_info->mac_pool[slot].addr,
-				ETHERADDRL);
-			/*
-			 * Assume that the MAC passed down from the caller
-			 * is not a factory MAC address (The user should
-			 * call mmac_remove followed by mmac_reserve if
-			 * he wants to use the factory MAC for this slot).
-			 */
-			mmac_info->mac_pool[slot].flags &= ~MMAC_VENDOR_ADDR;
-			nxge_mmac_kstat_update(nxgep, slot, B_FALSE);
-		}
-	} else {
-		err = EINVAL;
-	}
-	mutex_exit(nxgep->genlock);
-	return (err);
-}
-
-/*
- * nxge_m_mmac_get() - Get the MAC address and other information
- * related to the slot.  mma_flags should be set to 0 in the call.
- * Note: although kstat shows MAC address as zero when a slot is
- * not used, Crossbow expects nxge_m_mmac_get to copy factory MAC
- * to the caller as long as the slot is not using a user MAC address.
- * The following table shows the rules,
- *
- *				   USED    VENDOR    mma_addr
- * ------------------------------------------------------------
- * (1) Slot uses a user MAC:        yes      no     user MAC
- * (2) Slot uses a factory MAC:     yes      yes    factory MAC
- * (3) Slot is not used but is
- *     factory MAC capable:         no       yes    factory MAC
- * (4) Slot is not used and is
- *     not factory MAC capable:     no       no        0
- * ------------------------------------------------------------
- */
-static int
-nxge_m_mmac_get(void *arg, mac_multi_addr_t *maddr)
-{
-	nxge_t *nxgep = arg;
-	mac_addr_slot_t slot;
-	nxge_mmac_t *mmac_info;
-	nxge_status_t status;
-
-	slot = maddr->mma_slot;
-
-	mutex_enter(nxgep->genlock);
-
-	/*
-	 * Make sure that nxge is initialized, if _start() has
-	 * not been called.
-	 */
-	if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
-		status = nxge_init(nxgep);
-		if (status != NXGE_OK) {
-			mutex_exit(nxgep->genlock);
-			return (ENXIO);
-		}
-	}
-
-	mmac_info = &nxgep->nxge_mmac_info;
-
-	if (slot < 1 || slot > mmac_info->num_mmac) {
-		mutex_exit(nxgep->genlock);
-		return (EINVAL);
-	}
-	maddr->mma_flags = 0;
-	if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED)
-		maddr->mma_flags |= MMAC_SLOT_USED;
-
-	if (mmac_info->mac_pool[slot].flags & MMAC_VENDOR_ADDR) {
-		maddr->mma_flags |= MMAC_VENDOR_ADDR;
-		bcopy(mmac_info->factory_mac_pool[slot],
-			maddr->mma_addr, ETHERADDRL);
-		maddr->mma_addrlen = ETHERADDRL;
-	} else {
-		if (maddr->mma_flags & MMAC_SLOT_USED) {
-			bcopy(mmac_info->mac_pool[slot].addr,
-				maddr->mma_addr, ETHERADDRL);
-			maddr->mma_addrlen = ETHERADDRL;
-		} else {
-			bzero(maddr->mma_addr, ETHERADDRL);
-			maddr->mma_addrlen = 0;
-		}
-	}
-	mutex_exit(nxgep->genlock);
-	return (0);
-}
-
-
-static boolean_t
-nxge_m_getcapab(void *arg, mac_capab_t cap, void *cap_data)
-{
-	nxge_t *nxgep = arg;
-	uint32_t *txflags = cap_data;
-	multiaddress_capab_t *mmacp = cap_data;
-
-	switch (cap) {
-	case MAC_CAPAB_HCKSUM:
-		*txflags = HCKSUM_INET_PARTIAL;
-		break;
-	case MAC_CAPAB_POLL:
-		/*
-		 * There's nothing for us to fill in, simply returning
-		 * B_TRUE stating that we support polling is sufficient.
-		 */
-		break;
-
-	case MAC_CAPAB_MULTIADDRESS:
-		mutex_enter(nxgep->genlock);
-
-		mmacp->maddr_naddr = nxgep->nxge_mmac_info.num_mmac;
-		mmacp->maddr_naddrfree = nxgep->nxge_mmac_info.naddrfree;
-		mmacp->maddr_flag = 0; /* 0 is requried by PSARC2006/265 */
-		/*
-		 * maddr_handle is driver's private data, passed back to
-		 * entry point functions as arg.
-		 */
-		mmacp->maddr_handle	= nxgep;
-		mmacp->maddr_add	= nxge_m_mmac_add;
-		mmacp->maddr_remove	= nxge_m_mmac_remove;
-		mmacp->maddr_modify	= nxge_m_mmac_modify;
-		mmacp->maddr_get	= nxge_m_mmac_get;
-		mmacp->maddr_reserve	= nxge_m_mmac_reserve;
-
-		mutex_exit(nxgep->genlock);
-		break;
-	default:
-		return (B_FALSE);
-	}
-	return (B_TRUE);
-}
-
-/*
- * Module loading and removing entry points.
- */
-
-static	struct cb_ops 	nxge_cb_ops = {
-	nodev,			/* cb_open */
-	nodev,			/* cb_close */
-	nodev,			/* cb_strategy */
-	nodev,			/* cb_print */
-	nodev,			/* cb_dump */
-	nodev,			/* cb_read */
-	nodev,			/* cb_write */
-	nodev,			/* cb_ioctl */
-	nodev,			/* cb_devmap */
-	nodev,			/* cb_mmap */
-	nodev,			/* cb_segmap */
-	nochpoll,		/* cb_chpoll */
-	ddi_prop_op,		/* cb_prop_op */
-	NULL,
-	D_MP, 			/* cb_flag */
-	CB_REV,			/* rev */
-	nodev,			/* int (*cb_aread)() */
-	nodev			/* int (*cb_awrite)() */
-};
-
-static struct dev_ops nxge_dev_ops = {
-	DEVO_REV,		/* devo_rev */
-	0,			/* devo_refcnt */
-	nulldev,
-	nulldev,		/* devo_identify */
-	nulldev,		/* devo_probe */
-	nxge_attach,		/* devo_attach */
-	nxge_detach,		/* devo_detach */
-	nodev,			/* devo_reset */
-	&nxge_cb_ops,		/* devo_cb_ops */
-	(struct bus_ops *)NULL, /* devo_bus_ops	*/
-	ddi_power		/* devo_power */
-};
-
-extern	struct	mod_ops	mod_driverops;
-
-#define	NXGE_DESC_VER		"Sun NIU 10Gb Ethernet %I%"
-
-/*
- * Module linkage information for the kernel.
- */
-static struct modldrv 	nxge_modldrv = {
-	&mod_driverops,
-	NXGE_DESC_VER,
-	&nxge_dev_ops
-};
-
-static struct modlinkage modlinkage = {
-	MODREV_1, (void *) &nxge_modldrv, NULL
-};
-
-int
-_init(void)
-{
-	int		status;
-
-	NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _init"));
-	mac_init_ops(&nxge_dev_ops, "nxge");
-	status = ddi_soft_state_init(&nxge_list, sizeof (nxge_t), 0);
-	if (status != 0) {
-		NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL,
-			"failed to init device soft state"));
-		goto _init_exit;
-	}
-
-	status = mod_install(&modlinkage);
-	if (status != 0) {
-		ddi_soft_state_fini(&nxge_list);
-		NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, "Mod install failed"));
-		goto _init_exit;
-	}
-
-	MUTEX_INIT(&nxge_common_lock, NULL, MUTEX_DRIVER, NULL);
-
-_init_exit:
-	NXGE_DEBUG_MSG((NULL, MOD_CTL, "_init status = 0x%X", status));
-
-	return (status);
-}
-
-int
-_fini(void)
-{
-	int		status;
-
-	NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini"));
-
-	NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini: mod_remove"));
-
-	if (nxge_mblks_pending)
-		return (EBUSY);
-
-	status = mod_remove(&modlinkage);
-	if (status != DDI_SUCCESS) {
-		NXGE_DEBUG_MSG((NULL, MOD_CTL,
-			    "Module removal failed 0x%08x",
-			    status));
-		goto _fini_exit;
-	}
-
-	mac_fini_ops(&nxge_dev_ops);
-
-	ddi_soft_state_fini(&nxge_list);
-
-	MUTEX_DESTROY(&nxge_common_lock);
-_fini_exit:
-	NXGE_DEBUG_MSG((NULL, MOD_CTL, "_fini status = 0x%08x", status));
-
-	return (status);
-}
-
-int
-_info(struct modinfo *modinfop)
-{
-	int		status;
-
-	NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _info"));
-	status = mod_info(&modlinkage, modinfop);
-	NXGE_DEBUG_MSG((NULL, MOD_CTL, " _info status = 0x%X", status));
-
-	return (status);
-}
-
-/*ARGSUSED*/
-static nxge_status_t
-nxge_add_intrs(p_nxge_t nxgep)
-{
-
-	int		intr_types;
-	int		type = 0;
-	int		ddi_status = DDI_SUCCESS;
-	nxge_status_t	status = NXGE_OK;
-
-	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs"));
-
-	nxgep->nxge_intr_type.intr_registered = B_FALSE;
-	nxgep->nxge_intr_type.intr_enabled = B_FALSE;
-	nxgep->nxge_intr_type.msi_intx_cnt = 0;
-	nxgep->nxge_intr_type.intr_added = 0;
-	nxgep->nxge_intr_type.niu_msi_enable = B_FALSE;
-	nxgep->nxge_intr_type.intr_type = 0;
-
-	if (nxgep->niu_type == N2_NIU) {
-		nxgep->nxge_intr_type.niu_msi_enable = B_TRUE;
-	} else if (nxge_msi_enable) {
-		nxgep->nxge_intr_type.niu_msi_enable = B_TRUE;
-	}
-
-	/* Get the supported interrupt types */
-	if ((ddi_status = ddi_intr_get_supported_types(nxgep->dip, &intr_types))
-			!= DDI_SUCCESS) {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_add_intrs: "
-			"ddi_intr_get_supported_types failed: status 0x%08x",
-			ddi_status));
-		return (NXGE_ERROR | NXGE_DDI_FAILED);
-	}
-	nxgep->nxge_intr_type.intr_types = intr_types;
-
-	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: "
-		"ddi_intr_get_supported_types: 0x%08x", intr_types));
-
-	/*
-	 * Solaris MSIX is not supported yet. use MSI for now.
-	 * nxge_msi_enable (1):
-	 *	1 - MSI		2 - MSI-X	others - FIXED
-	 */
-	switch (nxge_msi_enable) {
-	default:
-		type = DDI_INTR_TYPE_FIXED;
-		NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: "
-			"use fixed (intx emulation) type %08x",
-			type));
-		break;
-
-	case 2:
-		NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: "
-			"ddi_intr_get_supported_types: 0x%08x", intr_types));
-		if (intr_types & DDI_INTR_TYPE_MSIX) {
-			type = DDI_INTR_TYPE_MSIX;
-			NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: "
-				"ddi_intr_get_supported_types: MSIX 0x%08x",
-				type));
-		} else if (intr_types & DDI_INTR_TYPE_MSI) {
-			type = DDI_INTR_TYPE_MSI;
-			NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: "
-				"ddi_intr_get_supported_types: MSI 0x%08x",
-				type));
-		} else if (intr_types & DDI_INTR_TYPE_FIXED) {
-			type = DDI_INTR_TYPE_FIXED;
-			NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: "
-				"ddi_intr_get_supported_types: MSXED0x%08x",
-				type));
-		}
-		break;
-
-	case 1:
-		if (intr_types & DDI_INTR_TYPE_MSI) {
-			type = DDI_INTR_TYPE_MSI;
-			NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: "
-				"ddi_intr_get_supported_types: MSI 0x%08x",
-				type));
-		} else if (intr_types & DDI_INTR_TYPE_MSIX) {
-			type = DDI_INTR_TYPE_MSIX;
-			NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: "
-				"ddi_intr_get_supported_types: MSIX 0x%08x",
-				type));
-		} else if (intr_types & DDI_INTR_TYPE_FIXED) {
-			type = DDI_INTR_TYPE_FIXED;
-			NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: "
-				"ddi_intr_get_supported_types: MSXED0x%08x",
-				type));
-		}
-	}
-
-	nxgep->nxge_intr_type.intr_type = type;
-	if ((type == DDI_INTR_TYPE_MSIX || type == DDI_INTR_TYPE_MSI ||
-		type == DDI_INTR_TYPE_FIXED) &&
-			nxgep->nxge_intr_type.niu_msi_enable) {
-		if ((status = nxge_add_intrs_adv(nxgep)) != DDI_SUCCESS) {
-			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-				    " nxge_add_intrs: "
-				    " nxge_add_intrs_adv failed: status 0x%08x",
-				    status));
-			return (status);
-		} else {
-			NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: "
-			"interrupts registered : type %d", type));
-			nxgep->nxge_intr_type.intr_registered = B_TRUE;
-
-			NXGE_DEBUG_MSG((nxgep, DDI_CTL,
-				"\nAdded advanced nxge add_intr_adv "
-					"intr type 0x%x\n", type));
-
-			return (status);
-		}
-	}
-
-	if (!nxgep->nxge_intr_type.intr_registered) {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "==> nxge_add_intrs: "
-			"failed to register interrupts"));
-		return (NXGE_ERROR | NXGE_DDI_FAILED);
-	}
-
-	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_add_intrs"));
-	return (status);
-}
-
-/*ARGSUSED*/
-static nxge_status_t
-nxge_add_soft_intrs(p_nxge_t nxgep)
-{
-
-	int		ddi_status = DDI_SUCCESS;
-	nxge_status_t	status = NXGE_OK;
-
-	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_soft_intrs"));
-
-	nxgep->resched_id = NULL;
-	nxgep->resched_running = B_FALSE;
-	ddi_status = ddi_add_softintr(nxgep->dip, DDI_SOFTINT_LOW,
-			&nxgep->resched_id,
-		NULL, NULL, nxge_reschedule, (caddr_t)nxgep);
-	if (ddi_status != DDI_SUCCESS) {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_add_soft_intrs: "
-			"ddi_add_softintrs failed: status 0x%08x",
-			ddi_status));
-		return (NXGE_ERROR | NXGE_DDI_FAILED);
-	}
-
-	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_ddi_add_soft_intrs"));
-
-	return (status);
-}
-
-static nxge_status_t
-nxge_add_intrs_adv(p_nxge_t nxgep)
-{
-	int		intr_type;
-	p_nxge_intr_t	intrp;
-
-	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs_adv"));
-
-	intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type;
-	intr_type = intrp->intr_type;
-	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs_adv: type 0x%x",
-		intr_type));
-
-	switch (intr_type) {
-	case DDI_INTR_TYPE_MSI: /* 0x2 */
-	case DDI_INTR_TYPE_MSIX: /* 0x4 */
-		return (nxge_add_intrs_adv_type(nxgep, intr_type));
-
-	case DDI_INTR_TYPE_FIXED: /* 0x1 */
-		return (nxge_add_intrs_adv_type_fix(nxgep, intr_type));
-
-	default:
-		return (NXGE_ERROR);
-	}
-}
-
-
-/*ARGSUSED*/
-static nxge_status_t
-nxge_add_intrs_adv_type(p_nxge_t nxgep, uint32_t int_type)
-{
-	dev_info_t		*dip = nxgep->dip;
-	p_nxge_ldg_t		ldgp;
-	p_nxge_intr_t		intrp;
-	uint_t			*inthandler;
-	void			*arg1, *arg2;
-	int			behavior;
-	int			nintrs, navail;
-	int			nactual, nrequired;
-	int			inum = 0;
-	int			x, y;
-	int			ddi_status = DDI_SUCCESS;
-	nxge_status_t		status = NXGE_OK;
-
-	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs_adv_type"));
-	intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type;
-	intrp->start_inum = 0;
-
-	ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs);
-	if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"ddi_intr_get_nintrs() failed, status: 0x%x%, "
-			    "nintrs: %d", ddi_status, nintrs));
-		return (NXGE_ERROR | NXGE_DDI_FAILED);
-	}
-
-	ddi_status = ddi_intr_get_navail(dip, int_type, &navail);
-	if ((ddi_status != DDI_SUCCESS) || (navail == 0)) {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"ddi_intr_get_navail() failed, status: 0x%x%, "
-			    "nintrs: %d", ddi_status, navail));
-		return (NXGE_ERROR | NXGE_DDI_FAILED);
-	}
-
-	NXGE_DEBUG_MSG((nxgep, INT_CTL,
-		"ddi_intr_get_navail() returned: nintrs %d, navail %d",
-		    nintrs, navail));
-
-	if (int_type == DDI_INTR_TYPE_MSI && !ISP2(navail)) {
-		/* MSI must be power of 2 */
-		if ((navail & 16) == 16) {
-			navail = 16;
-		} else if ((navail & 8) == 8) {
-			navail = 8;
-		} else if ((navail & 4) == 4) {
-			navail = 4;
-		} else if ((navail & 2) == 2) {
-			navail = 2;
-		} else {
-			navail = 1;
-		}
-		NXGE_DEBUG_MSG((nxgep, INT_CTL,
-			"ddi_intr_get_navail(): (msi power of 2) nintrs %d, "
-			"navail %d", nintrs, navail));
-	}
-
-	behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT :
-			DDI_INTR_ALLOC_NORMAL);
-	intrp->intr_size = navail * sizeof (ddi_intr_handle_t);
-	intrp->htable = kmem_alloc(intrp->intr_size, KM_SLEEP);
-	ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum,
-		    navail, &nactual, behavior);
-	if (ddi_status != DDI_SUCCESS || nactual == 0) {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-				    " ddi_intr_alloc() failed: %d",
-				    ddi_status));
-		kmem_free(intrp->htable, intrp->intr_size);
-		return (NXGE_ERROR | NXGE_DDI_FAILED);
-	}
-
-	if ((ddi_status = ddi_intr_get_pri(intrp->htable[0],
-			(uint_t *)&intrp->pri)) != DDI_SUCCESS) {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-				    " ddi_intr_get_pri() failed: %d",
-				    ddi_status));
-		/* Free already allocated interrupts */
-		for (y = 0; y < nactual; y++) {
-			(void) ddi_intr_free(intrp->htable[y]);
-		}
-
-		kmem_free(intrp->htable, intrp->intr_size);
-		return (NXGE_ERROR | NXGE_DDI_FAILED);
-	}
-
-	nrequired = 0;
-	switch (nxgep->niu_type) {
-	case NEPTUNE:
-	case NEPTUNE_2:
-	default:
-		status = nxge_ldgv_init(nxgep, &nactual, &nrequired);
-		break;
-
-	case N2_NIU:
-		status = nxge_ldgv_init_n2(nxgep, &nactual, &nrequired);
-		break;
-	}
-
-	if (status != NXGE_OK) {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"nxge_add_intrs_adv_typ:nxge_ldgv_init "
-			"failed: 0x%x", status));
-		/* Free already allocated interrupts */
-		for (y = 0; y < nactual; y++) {
-			(void) ddi_intr_free(intrp->htable[y]);
-		}
-
-		kmem_free(intrp->htable, intrp->intr_size);
-		return (status);
-	}
-
-	ldgp = nxgep->ldgvp->ldgp;
-	for (x = 0; x < nrequired; x++, ldgp++) {
-		ldgp->vector = (uint8_t)x;
-		ldgp->intdata = SID_DATA(ldgp->func, x);
-		arg1 = ldgp->ldvp;
-		arg2 = nxgep;
-		if (ldgp->nldvs == 1) {
-			inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler;
-			NXGE_DEBUG_MSG((nxgep, INT_CTL,
-				"nxge_add_intrs_adv_type: "
-				"arg1 0x%x arg2 0x%x: "
-				"1-1 int handler (entry %d intdata 0x%x)\n",
-				arg1, arg2,
-				x, ldgp->intdata));
-		} else if (ldgp->nldvs > 1) {
-			inthandler = (uint_t *)ldgp->sys_intr_handler;
-			NXGE_DEBUG_MSG((nxgep, INT_CTL,
-				"nxge_add_intrs_adv_type: "
-				"arg1 0x%x arg2 0x%x: "
-				"nldevs %d int handler "
-				"(entry %d intdata 0x%x)\n",
-				arg1, arg2,
-				ldgp->nldvs, x, ldgp->intdata));
-		}
-
-		NXGE_DEBUG_MSG((nxgep, INT_CTL,
-			"==> nxge_add_intrs_adv_type: ddi_add_intr(inum) #%d "
-			"htable 0x%llx", x, intrp->htable[x]));
-
-		if ((ddi_status = ddi_intr_add_handler(intrp->htable[x],
-			(ddi_intr_handler_t *)inthandler, arg1, arg2))
-				!= DDI_SUCCESS) {
-			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-				"==> nxge_add_intrs_adv_type: failed #%d "
-				"status 0x%x", x, ddi_status));
-			for (y = 0; y < intrp->intr_added; y++) {
-				(void) ddi_intr_remove_handler(
-						intrp->htable[y]);
-			}
-			/* Free already allocated intr */
-			for (y = 0; y < nactual; y++) {
-				(void) ddi_intr_free(intrp->htable[y]);
-			}
-			kmem_free(intrp->htable, intrp->intr_size);
-
-			(void) nxge_ldgv_uninit(nxgep);
-
-			return (NXGE_ERROR | NXGE_DDI_FAILED);
-		}
-		intrp->intr_added++;
-	}
-
-	intrp->msi_intx_cnt = nactual;
-
-	NXGE_DEBUG_MSG((nxgep, DDI_CTL,
-		"Requested: %d, Allowed: %d msi_intx_cnt %d intr_added %d",
-		navail, nactual,
-		intrp->msi_intx_cnt,
-		intrp->intr_added));
-
-	(void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap);
-
-	(void) nxge_intr_ldgv_init(nxgep);
-
-	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_add_intrs_adv_type"));
-
-	return (status);
-}
-
-/*ARGSUSED*/
-static nxge_status_t
-nxge_add_intrs_adv_type_fix(p_nxge_t nxgep, uint32_t int_type)
-{
-	dev_info_t		*dip = nxgep->dip;
-	p_nxge_ldg_t		ldgp;
-	p_nxge_intr_t		intrp;
-	uint_t			*inthandler;
-	void			*arg1, *arg2;
-	int			behavior;
-	int			nintrs, navail;
-	int			nactual, nrequired;
-	int			inum = 0;
-	int			x, y;
-	int			ddi_status = DDI_SUCCESS;
-	nxge_status_t		status = NXGE_OK;
-
-	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs_adv_type_fix"));
-	intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type;
-	intrp->start_inum = 0;
-
-	ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs);
-	if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) {
-		NXGE_DEBUG_MSG((nxgep, INT_CTL,
-			"ddi_intr_get_nintrs() failed, status: 0x%x%, "
-			    "nintrs: %d", status, nintrs));
-		return (NXGE_ERROR | NXGE_DDI_FAILED);
-	}
-
-	ddi_status = ddi_intr_get_navail(dip, int_type, &navail);
-	if ((ddi_status != DDI_SUCCESS) || (navail == 0)) {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"ddi_intr_get_navail() failed, status: 0x%x%, "
-			    "nintrs: %d", ddi_status, navail));
-		return (NXGE_ERROR | NXGE_DDI_FAILED);
-	}
-
-	NXGE_DEBUG_MSG((nxgep, INT_CTL,
-		"ddi_intr_get_navail() returned: nintrs %d, naavail %d",
-		    nintrs, navail));
-
-	behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT :
-			DDI_INTR_ALLOC_NORMAL);
-	intrp->intr_size = navail * sizeof (ddi_intr_handle_t);
-	intrp->htable = kmem_alloc(intrp->intr_size, KM_SLEEP);
-	ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum,
-		    navail, &nactual, behavior);
-	if (ddi_status != DDI_SUCCESS || nactual == 0) {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			    " ddi_intr_alloc() failed: %d",
-			    ddi_status));
-		kmem_free(intrp->htable, intrp->intr_size);
-		return (NXGE_ERROR | NXGE_DDI_FAILED);
-	}
-
-	if ((ddi_status = ddi_intr_get_pri(intrp->htable[0],
-			(uint_t *)&intrp->pri)) != DDI_SUCCESS) {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-				    " ddi_intr_get_pri() failed: %d",
-				    ddi_status));
-		/* Free already allocated interrupts */
-		for (y = 0; y < nactual; y++) {
-			(void) ddi_intr_free(intrp->htable[y]);
-		}
-
-		kmem_free(intrp->htable, intrp->intr_size);
-		return (NXGE_ERROR | NXGE_DDI_FAILED);
-	}
-
-	nrequired = 0;
-	switch (nxgep->niu_type) {
-	case NEPTUNE:
-	case NEPTUNE_2:
-	default:
-		status = nxge_ldgv_init(nxgep, &nactual, &nrequired);
-		break;
-
-	case N2_NIU:
-		status = nxge_ldgv_init_n2(nxgep, &nactual, &nrequired);
-		break;
-	}
-
-	if (status != NXGE_OK) {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"nxge_add_intrs_adv_type_fix:nxge_ldgv_init "
-			"failed: 0x%x", status));
-		/* Free already allocated interrupts */
-		for (y = 0; y < nactual; y++) {
-			(void) ddi_intr_free(intrp->htable[y]);
-		}
-
-		kmem_free(intrp->htable, intrp->intr_size);
-		return (status);
-	}
-
-	ldgp = nxgep->ldgvp->ldgp;
-	for (x = 0; x < nrequired; x++, ldgp++) {
-		ldgp->vector = (uint8_t)x;
-		if (nxgep->niu_type != N2_NIU) {
-			ldgp->intdata = SID_DATA(ldgp->func, x);
-		}
-
-		arg1 = ldgp->ldvp;
-		arg2 = nxgep;
-		if (ldgp->nldvs == 1) {
-			inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler;
-			NXGE_DEBUG_MSG((nxgep, INT_CTL,
-				"nxge_add_intrs_adv_type_fix: "
-				"1-1 int handler(%d) ldg %d ldv %d "
-				"arg1 $%p arg2 $%p\n",
-				x, ldgp->ldg, ldgp->ldvp->ldv,
-				arg1, arg2));
-		} else if (ldgp->nldvs > 1) {
-			inthandler = (uint_t *)ldgp->sys_intr_handler;
-			NXGE_DEBUG_MSG((nxgep, INT_CTL,
-				"nxge_add_intrs_adv_type_fix: "
-				"shared ldv %d int handler(%d) ldv %d ldg %d"
-				"arg1 0x%016llx arg2 0x%016llx\n",
-				x, ldgp->nldvs, ldgp->ldg, ldgp->ldvp->ldv,
-				arg1, arg2));
-		}
-
-		if ((ddi_status = ddi_intr_add_handler(intrp->htable[x],
-			(ddi_intr_handler_t *)inthandler, arg1, arg2))
-				!= DDI_SUCCESS) {
-			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-				"==> nxge_add_intrs_adv_type_fix: failed #%d "
-				"status 0x%x", x, ddi_status));
-			for (y = 0; y < intrp->intr_added; y++) {
-				(void) ddi_intr_remove_handler(
-						intrp->htable[y]);
-			}
-			for (y = 0; y < nactual; y++) {
-				(void) ddi_intr_free(intrp->htable[y]);
-			}
-			/* Free already allocated intr */
-			kmem_free(intrp->htable, intrp->intr_size);
-
-			(void) nxge_ldgv_uninit(nxgep);
-
-			return (NXGE_ERROR | NXGE_DDI_FAILED);
-		}
-		intrp->intr_added++;
-	}
-
-	intrp->msi_intx_cnt = nactual;
-
-	(void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap);
-
-	status = nxge_intr_ldgv_init(nxgep);
-	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_add_intrs_adv_type_fix"));
-
-	return (status);
-}
-
-static void
-nxge_remove_intrs(p_nxge_t nxgep)
-{
-	int		i, inum;
-	p_nxge_intr_t	intrp;
-
-	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_remove_intrs"));
-	intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type;
-	if (!intrp->intr_registered) {
-		NXGE_DEBUG_MSG((nxgep, INT_CTL,
-			"<== nxge_remove_intrs: interrupts not registered"));
-		return;
-	}
-
-	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_remove_intrs:advanced"));
-
-	if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) {
-		(void) ddi_intr_block_disable(intrp->htable,
-			intrp->intr_added);
-	} else {
-		for (i = 0; i < intrp->intr_added; i++) {
-			(void) ddi_intr_disable(intrp->htable[i]);
-		}
-	}
-
-	for (inum = 0; inum < intrp->intr_added; inum++) {
-		if (intrp->htable[inum]) {
-			(void) ddi_intr_remove_handler(intrp->htable[inum]);
-		}
-	}
-
-	for (inum = 0; inum < intrp->msi_intx_cnt; inum++) {
-		if (intrp->htable[inum]) {
-			NXGE_DEBUG_MSG((nxgep, DDI_CTL,
-				"nxge_remove_intrs: ddi_intr_free inum %d "
-				"msi_intx_cnt %d intr_added %d",
-				inum,
-				intrp->msi_intx_cnt,
-				intrp->intr_added));
-
-			(void) ddi_intr_free(intrp->htable[inum]);
-		}
-	}
-
-	kmem_free(intrp->htable, intrp->intr_size);
-	intrp->intr_registered = B_FALSE;
-	intrp->intr_enabled = B_FALSE;
-	intrp->msi_intx_cnt = 0;
-	intrp->intr_added = 0;
-
-	(void) nxge_ldgv_uninit(nxgep);
-
-	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_remove_intrs"));
-}
-
-/*ARGSUSED*/
-static void
-nxge_remove_soft_intrs(p_nxge_t nxgep)
-{
-	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_remove_soft_intrs"));
-	if (nxgep->resched_id) {
-		ddi_remove_softintr(nxgep->resched_id);
-		NXGE_DEBUG_MSG((nxgep, INT_CTL,
-			"==> nxge_remove_soft_intrs: removed"));
-		nxgep->resched_id = NULL;
-	}
-
-	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_remove_soft_intrs"));
-}
-
-/*ARGSUSED*/
-static void
-nxge_intrs_enable(p_nxge_t nxgep)
-{
-	p_nxge_intr_t	intrp;
-	int		i;
-	int		status;
-
-	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable"));
-
-	intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type;
-
-	if (!intrp->intr_registered) {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_intrs_enable: "
-			"interrupts are not registered"));
-		return;
-	}
-
-	if (intrp->intr_enabled) {
-		NXGE_DEBUG_MSG((nxgep, INT_CTL,
-			"<== nxge_intrs_enable: already enabled"));
-		return;
-	}
-
-	if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) {
-		status = ddi_intr_block_enable(intrp->htable,
-			intrp->intr_added);
-		NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable "
-			"block enable - status 0x%x total inums #%d\n",
-			status, intrp->intr_added));
-	} else {
-		for (i = 0; i < intrp->intr_added; i++) {
-			status = ddi_intr_enable(intrp->htable[i]);
-			NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable "
-				"ddi_intr_enable:enable - status 0x%x "
-				"total inums %d enable inum #%d\n",
-				status, intrp->intr_added, i));
-			if (status == DDI_SUCCESS) {
-				intrp->intr_enabled = B_TRUE;
-			}
-		}
-	}
-
-	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_enable"));
-}
-
-/*ARGSUSED*/
-static void
-nxge_intrs_disable(p_nxge_t nxgep)
-{
-	p_nxge_intr_t	intrp;
-	int		i;
-
-	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_disable"));
-
-	intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type;
-
-	if (!intrp->intr_registered) {
-		NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_disable: "
-			"interrupts are not registered"));
-		return;
-	}
-
-	if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) {
-		(void) ddi_intr_block_disable(intrp->htable,
-			intrp->intr_added);
-	} else {
-		for (i = 0; i < intrp->intr_added; i++) {
-			(void) ddi_intr_disable(intrp->htable[i]);
-		}
-	}
-
-	intrp->intr_enabled = B_FALSE;
-	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_disable"));
-}
-
-static nxge_status_t
-nxge_mac_register(p_nxge_t nxgep)
-{
-	mac_register_t *macp;
-	int		status;
-
-	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_mac_register"));
-
-	if ((macp = mac_alloc(MAC_VERSION)) == NULL)
-		return (NXGE_ERROR);
-
-	macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
-	macp->m_driver = nxgep;
-	macp->m_dip = nxgep->dip;
-	macp->m_src_addr = nxgep->ouraddr.ether_addr_octet;
-	macp->m_callbacks = &nxge_m_callbacks;
-	macp->m_min_sdu = 0;
-	macp->m_max_sdu = nxgep->mac.maxframesize -
-		sizeof (struct ether_header) - ETHERFCSL - 4;
-
-	status = mac_register(macp, &nxgep->mach);
-	mac_free(macp);
-
-	if (status != 0) {
-		cmn_err(CE_WARN,
-			"!nxge_mac_register failed (status %d instance %d)",
-			status, nxgep->instance);
-		return (NXGE_ERROR);
-	}
-
-	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_mac_register success "
-		"(instance %d)", nxgep->instance));
-
-	return (NXGE_OK);
-}
-
-void
-nxge_err_inject(p_nxge_t nxgep, queue_t *wq, mblk_t *mp)
-{
-	ssize_t		size;
-	mblk_t		*nmp;
-	uint8_t		blk_id;
-	uint8_t		chan;
-	uint32_t	err_id;
-	err_inject_t	*eip;
-
-	NXGE_DEBUG_MSG((nxgep, STR_CTL, "==> nxge_err_inject"));
-
-	size = 1024;
-	nmp = mp->b_cont;
-	eip = (err_inject_t *)nmp->b_rptr;
-	blk_id = eip->blk_id;
-	err_id = eip->err_id;
-	chan = eip->chan;
-	cmn_err(CE_NOTE, "!blk_id = 0x%x\n", blk_id);
-	cmn_err(CE_NOTE, "!err_id = 0x%x\n", err_id);
-	cmn_err(CE_NOTE, "!chan = 0x%x\n", chan);
-	switch (blk_id) {
-	case MAC_BLK_ID:
-		break;
-	case TXMAC_BLK_ID:
-		break;
-	case RXMAC_BLK_ID:
-		break;
-	case MIF_BLK_ID:
-		break;
-	case IPP_BLK_ID:
-		nxge_ipp_inject_err(nxgep, err_id);
-		break;
-	case TXC_BLK_ID:
-		nxge_txc_inject_err(nxgep, err_id);
-		break;
-	case TXDMA_BLK_ID:
-		nxge_txdma_inject_err(nxgep, err_id, chan);
-		break;
-	case RXDMA_BLK_ID:
-		nxge_rxdma_inject_err(nxgep, err_id, chan);
-		break;
-	case ZCP_BLK_ID:
-		nxge_zcp_inject_err(nxgep, err_id);
-		break;
-	case ESPC_BLK_ID:
-		break;
-	case FFLP_BLK_ID:
-		break;
-	case PHY_BLK_ID:
-		break;
-	case ETHER_SERDES_BLK_ID:
-		break;
-	case PCIE_SERDES_BLK_ID:
-		break;
-	case VIR_BLK_ID:
-		break;
-	}
-
-	nmp->b_wptr = nmp->b_rptr + size;
-	NXGE_DEBUG_MSG((nxgep, STR_CTL, "<== nxge_err_inject"));
-
-	miocack(wq, mp, (int)size, 0);
-}
-
-static int
-nxge_init_common_dev(p_nxge_t nxgep)
-{
-	p_nxge_hw_list_t	hw_p;
-	dev_info_t 		*p_dip;
-
-	NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==> nxge_init_common_device"));
-
-	p_dip = nxgep->p_dip;
-	MUTEX_ENTER(&nxge_common_lock);
-	NXGE_DEBUG_MSG((nxgep, MOD_CTL,
-		"==> nxge_init_common_dev:func # %d",
-			nxgep->function_num));
-	/*
-	 * Loop through existing per neptune hardware list.
-	 */
-	for (hw_p = nxge_hw_list; hw_p; hw_p = hw_p->next) {
-		NXGE_DEBUG_MSG((nxgep, MOD_CTL,
-			"==> nxge_init_common_device:func # %d "
-			"hw_p $%p parent dip $%p",
-			nxgep->function_num,
-			hw_p,
-			p_dip));
-		if (hw_p->parent_devp == p_dip) {
-			nxgep->nxge_hw_p = hw_p;
-			hw_p->ndevs++;
-			hw_p->nxge_p[nxgep->function_num] = nxgep;
-			NXGE_DEBUG_MSG((nxgep, MOD_CTL,
-				"==> nxge_init_common_device:func # %d "
-				"hw_p $%p parent dip $%p "
-				"ndevs %d (found)",
-				nxgep->function_num,
-				hw_p,
-				p_dip,
-				hw_p->ndevs));
-			break;
-		}
-	}
-
-	if (hw_p == NULL) {
-		NXGE_DEBUG_MSG((nxgep, MOD_CTL,
-			"==> nxge_init_common_device:func # %d "
-			"parent dip $%p (new)",
-			nxgep->function_num,
-			p_dip));
-		hw_p = kmem_zalloc(sizeof (nxge_hw_list_t), KM_SLEEP);
-		hw_p->parent_devp = p_dip;
-		hw_p->magic = NXGE_NEPTUNE_MAGIC;
-		nxgep->nxge_hw_p = hw_p;
-		hw_p->ndevs++;
-		hw_p->nxge_p[nxgep->function_num] = nxgep;
-		hw_p->next = nxge_hw_list;
-
-		MUTEX_INIT(&hw_p->nxge_cfg_lock, NULL, MUTEX_DRIVER, NULL);
-		MUTEX_INIT(&hw_p->nxge_tcam_lock, NULL, MUTEX_DRIVER, NULL);
-		MUTEX_INIT(&hw_p->nxge_vlan_lock, NULL, MUTEX_DRIVER, NULL);
-		MUTEX_INIT(&hw_p->nxge_mdio_lock, NULL, MUTEX_DRIVER, NULL);
-		MUTEX_INIT(&hw_p->nxge_mii_lock, NULL, MUTEX_DRIVER, NULL);
-
-		nxge_hw_list = hw_p;
-	}
-
-	MUTEX_EXIT(&nxge_common_lock);
-	NXGE_DEBUG_MSG((nxgep, MOD_CTL,
-		"==> nxge_init_common_device (nxge_hw_list) $%p",
-		nxge_hw_list));
-	NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<== nxge_init_common_device"));
-
-	return (NXGE_OK);
-}
-
-static void
-nxge_uninit_common_dev(p_nxge_t nxgep)
-{
-	p_nxge_hw_list_t	hw_p, h_hw_p;
-	dev_info_t 		*p_dip;
-
-	NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==> nxge_uninit_common_device"));
-	if (nxgep->nxge_hw_p == NULL) {
-		NXGE_DEBUG_MSG((nxgep, MOD_CTL,
-			"<== nxge_uninit_common_device (no common)"));
-		return;
-	}
-
-	MUTEX_ENTER(&nxge_common_lock);
-	h_hw_p = nxge_hw_list;
-	for (hw_p = nxge_hw_list; hw_p; hw_p = hw_p->next) {
-		p_dip = hw_p->parent_devp;
-		if (nxgep->nxge_hw_p == hw_p &&
-			p_dip == nxgep->p_dip &&
-			nxgep->nxge_hw_p->magic == NXGE_NEPTUNE_MAGIC &&
-			hw_p->magic == NXGE_NEPTUNE_MAGIC) {
-
-			NXGE_DEBUG_MSG((nxgep, MOD_CTL,
-				"==> nxge_uninit_common_device:func # %d "
-				"hw_p $%p parent dip $%p "
-				"ndevs %d (found)",
-				nxgep->function_num,
-				hw_p,
-				p_dip,
-				hw_p->ndevs));
-
-			nxgep->nxge_hw_p = NULL;
-			if (hw_p->ndevs) {
-				hw_p->ndevs--;
-			}
-			hw_p->nxge_p[nxgep->function_num] = NULL;
-			if (!hw_p->ndevs) {
-				MUTEX_DESTROY(&hw_p->nxge_vlan_lock);
-				MUTEX_DESTROY(&hw_p->nxge_tcam_lock);
-				MUTEX_DESTROY(&hw_p->nxge_cfg_lock);
-				MUTEX_DESTROY(&hw_p->nxge_mdio_lock);
-				MUTEX_DESTROY(&hw_p->nxge_mii_lock);
-				NXGE_DEBUG_MSG((nxgep, MOD_CTL,
-					"==> nxge_uninit_common_device: "
-					"func # %d "
-					"hw_p $%p parent dip $%p "
-					"ndevs %d (last)",
-					nxgep->function_num,
-					hw_p,
-					p_dip,
-					hw_p->ndevs));
-
-				if (hw_p == nxge_hw_list) {
-					NXGE_DEBUG_MSG((nxgep, MOD_CTL,
-						"==> nxge_uninit_common_device:"
-						"remove head func # %d "
-						"hw_p $%p parent dip $%p "
-						"ndevs %d (head)",
-						nxgep->function_num,
-						hw_p,
-						p_dip,
-						hw_p->ndevs));
-					nxge_hw_list = hw_p->next;
-				} else {
-					NXGE_DEBUG_MSG((nxgep, MOD_CTL,
-						"==> nxge_uninit_common_device:"
-						"remove middle func # %d "
-						"hw_p $%p parent dip $%p "
-						"ndevs %d (middle)",
-						nxgep->function_num,
-						hw_p,
-						p_dip,
-						hw_p->ndevs));
-					h_hw_p->next = hw_p->next;
-				}
-
-				KMEM_FREE(hw_p, sizeof (nxge_hw_list_t));
-			}
-			break;
-		} else {
-			h_hw_p = hw_p;
-		}
-	}
-
-	MUTEX_EXIT(&nxge_common_lock);
-	NXGE_DEBUG_MSG((nxgep, MOD_CTL,
-		"==> nxge_uninit_common_device (nxge_hw_list) $%p",
-		nxge_hw_list));
-
-	NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<= nxge_uninit_common_device"));
-}
--- a/usr/src/uts/sun4v/io/nxge/nxge_ndd.c	Mon Mar 19 18:02:35 2007 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,2547 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License (the "License").
- * You may not use this file except in compliance with the License.
- *
- * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
- * or http://www.opensolaris.org/os/licensing.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information: Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- */
-/*
- * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
- * Use is subject to license terms.
- */
-
-#pragma ident	"%Z%%M%	%I%	%E% SMI"
-
-#include <sys/nxge/nxge_impl.h>
-#include <inet/common.h>
-#include <inet/mi.h>
-#include <inet/nd.h>
-
-extern uint64_t npi_debug_level;
-
-#define	NXGE_PARAM_MAC_RW \
-	NXGE_PARAM_RW | NXGE_PARAM_MAC | \
-	NXGE_PARAM_NDD_WR_OK | NXGE_PARAM_READ_PROP
-
-#define	NXGE_PARAM_MAC_DONT_SHOW \
-	NXGE_PARAM_RW | NXGE_PARAM_MAC | NXGE_PARAM_DONT_SHOW
-
-#define	NXGE_PARAM_RXDMA_RW \
-	NXGE_PARAM_RWP | NXGE_PARAM_RXDMA | NXGE_PARAM_NDD_WR_OK | \
-	NXGE_PARAM_READ_PROP
-
-#define	NXGE_PARAM_RXDMA_RWC \
-	NXGE_PARAM_RWP | NXGE_PARAM_RXDMA | NXGE_PARAM_INIT_ONLY | \
-	NXGE_PARAM_READ_PROP
-
-#define	NXGE_PARAM_L2CLASS_CFG \
-	NXGE_PARAM_RW | NXGE_PARAM_PROP_ARR32 | NXGE_PARAM_READ_PROP | \
-	NXGE_PARAM_NDD_WR_OK
-
-#define	NXGE_PARAM_CLASS_RWS \
-	NXGE_PARAM_RWS |  NXGE_PARAM_READ_PROP
-
-#define	NXGE_PARAM_ARRAY_INIT_SIZE	0x20ULL
-
-#define	SET_RX_INTR_TIME_DISABLE 0
-#define	SET_RX_INTR_TIME_ENABLE 1
-#define	SET_RX_INTR_PKTS 2
-
-#define	BASE_ANY	0
-#define	BASE_BINARY 	2
-#define	BASE_HEX	16
-#define	BASE_DECIMAL	10
-#define	ALL_FF_64	0xFFFFFFFFFFFFFFFFULL
-#define	ALL_FF_32	0xFFFFFFFFUL
-
-#define	NXGE_NDD_INFODUMP_BUFF_SIZE	2048 /* is 2k enough? */
-#define	NXGE_NDD_INFODUMP_BUFF_8K	8192
-#define	NXGE_NDD_INFODUMP_BUFF_16K	0x2000
-#define	NXGE_NDD_INFODUMP_BUFF_64K	0x8000
-
-#define	PARAM_OUTOF_RANGE(vptr, eptr, rval, pa)	\
-	((vptr == eptr) || (rval < pa->minimum) || (rval > pa->maximum))
-
-#define	ADVANCE_PRINT_BUFFER(pmp, plen, rlen) { \
-	((mblk_t *)pmp)->b_wptr += plen; \
-	rlen -= plen; \
-}
-
-static int nxge_param_rx_intr_pkts(p_nxge_t, queue_t *,
-	mblk_t *, char *, caddr_t);
-static int nxge_param_rx_intr_time(p_nxge_t, queue_t *,
-	mblk_t *, char *, caddr_t);
-static int nxge_param_set_mac(p_nxge_t, queue_t *,
-	mblk_t *, char *, caddr_t);
-static int nxge_param_set_port_rdc(p_nxge_t, queue_t *,
-	mblk_t *, char *, caddr_t);
-static int nxge_param_set_grp_rdc(p_nxge_t, queue_t *,
-	mblk_t *, char *, caddr_t);
-static int nxge_param_set_ether_usr(p_nxge_t,
-	queue_t *, mblk_t *, char *, caddr_t);
-static int nxge_param_set_ip_usr(p_nxge_t,
-	queue_t *, mblk_t *, char *, caddr_t);
-static int nxge_param_set_ip_opt(p_nxge_t,
-	queue_t *, mblk_t *, char *, caddr_t);
-static int nxge_param_set_vlan_rdcgrp(p_nxge_t,
-	queue_t *, mblk_t *, char *, caddr_t);
-static int nxge_param_set_mac_rdcgrp(p_nxge_t,
-	queue_t *, mblk_t *, char *, caddr_t);
-static int nxge_param_fflp_hash_init(p_nxge_t,
-	queue_t *, mblk_t *, char *, caddr_t);
-static int nxge_param_llc_snap_enable(p_nxge_t, queue_t *,
-	mblk_t *, char *, caddr_t);
-static int nxge_param_hash_lookup_enable(p_nxge_t, queue_t *,
-	mblk_t *, char *, caddr_t);
-static int nxge_param_tcam_enable(p_nxge_t, queue_t *,
-	mblk_t *, char *, caddr_t);
-static int nxge_param_get_rxdma_info(p_nxge_t, queue_t *q,
-	p_mblk_t, caddr_t);
-static int nxge_param_get_txdma_info(p_nxge_t, queue_t *q,
-	p_mblk_t, caddr_t);
-static int nxge_param_get_vlan_rdcgrp(p_nxge_t, queue_t *,
-	p_mblk_t, caddr_t);
-static int nxge_param_get_mac_rdcgrp(p_nxge_t, queue_t *,
-	p_mblk_t, caddr_t);
-static int nxge_param_get_rxdma_rdcgrp_info(p_nxge_t, queue_t *,
-	p_mblk_t, caddr_t);
-static int nxge_param_get_ip_opt(p_nxge_t, queue_t *, mblk_t *, caddr_t);
-static int nxge_param_get_mac(p_nxge_t, queue_t *q, p_mblk_t, caddr_t);
-static int nxge_param_get_debug_flag(p_nxge_t, queue_t *, p_mblk_t, caddr_t);
-static int nxge_param_set_nxge_debug_flag(p_nxge_t, queue_t *, mblk_t *,
-	char *, caddr_t);
-static int nxge_param_set_npi_debug_flag(p_nxge_t,
-	queue_t *, mblk_t *, char *, caddr_t);
-static int nxge_param_dump_rdc(p_nxge_t, queue_t *q, p_mblk_t, caddr_t);
-static int nxge_param_dump_tdc(p_nxge_t, queue_t *q, p_mblk_t, caddr_t);
-static int nxge_param_dump_mac_regs(p_nxge_t, queue_t *, p_mblk_t, caddr_t);
-static int nxge_param_dump_ipp_regs(p_nxge_t, queue_t *, p_mblk_t, caddr_t);
-static int nxge_param_dump_fflp_regs(p_nxge_t, queue_t *, p_mblk_t, caddr_t);
-static int nxge_param_dump_vlan_table(p_nxge_t, queue_t *, p_mblk_t, caddr_t);
-static int nxge_param_dump_rdc_table(p_nxge_t, queue_t *, p_mblk_t, caddr_t);
-static int nxge_param_dump_ptrs(p_nxge_t, queue_t *, p_mblk_t, caddr_t);
-static boolean_t nxge_param_link_update(p_nxge_t);
-
-/*
- * Global array of Neptune changable parameters.
- * This array is initialized to correspond to the default
- * Neptune 4 port configuration. This array would be copied
- * into each port's parameter structure and modifed per
- * fcode and nxge.conf configuration. Later, the parameters are
- * exported to ndd to display and run-time configuration (at least
- * some of them).
- *
- */
-
-static nxge_param_t	nxge_param_arr[] = {
-	/*
-	 * min	max	value	old	hw-name	conf-name
-	 */
-	{ nxge_param_get_generic, NULL, NXGE_PARAM_READ,
-		0, 999, 1000, 0, "instance", "instance"},
-
-	{ nxge_param_get_generic, NULL, NXGE_PARAM_READ,
-		0, 999, 1000, 0, "main-instance", "main_instance"},
-
-	{ nxge_param_get_generic, NULL, NXGE_PARAM_READ,
-		0, 3, 0, 0, "function-number", "function_number"},
-
-	/* Partition Id */
-	{ nxge_param_get_generic, NULL, NXGE_PARAM_READ,
-		0, 8, 0, 0, "partition-id", "partition_id"},
-
-	/* Read Write Permission Mode */
-	{ nxge_param_get_generic, NULL, NXGE_PARAM_READ | NXGE_PARAM_DONT_SHOW,
-		0, 2, 0, 0, "read-write-mode", "read_write_mode"},
-
-	/* hw cfg types */
-	/* control the DMA config of Neptune/NIU */
-	{ nxge_param_get_generic, NULL, NXGE_PARAM_READ,
-		CFG_DEFAULT, CFG_CUSTOM, CFG_DEFAULT, CFG_DEFAULT,
-		"niu-cfg-type", "niu_cfg_type"},
-
-	/* control the TXDMA config of the Port controlled by tx-quick-cfg */
-	{ nxge_param_get_generic, NULL, NXGE_PARAM_READ,
-		CFG_DEFAULT, CFG_CUSTOM, CFG_NOT_SPECIFIED, CFG_DEFAULT,
-		"tx-qcfg-type", "tx_qcfg_type"},
-
-	/* control the RXDMA config of the Port controlled by rx-quick-cfg */
-	{ nxge_param_get_generic, NULL, NXGE_PARAM_READ,
-		CFG_DEFAULT, CFG_CUSTOM, CFG_NOT_SPECIFIED, CFG_DEFAULT,
-		"rx-qcfg-type", "rx_qcfg_type"},
-
-	{ nxge_param_get_mac, nxge_param_set_mac,
-		NXGE_PARAM_RW  | NXGE_PARAM_DONT_SHOW,
-		0, 1, 0, 0, "master-cfg-enable", "master_cfg_enable"},
-
-	{ nxge_param_get_mac, nxge_param_set_mac,
-		NXGE_PARAM_RW | NXGE_PARAM_DONT_SHOW,
-		0, 1, 0, 0, "master-cfg-value", "master_cfg_value"},
-
-	{ nxge_param_get_mac, nxge_param_set_mac, NXGE_PARAM_MAC_RW,
-		0, 1, 1, 1, "adv-autoneg-cap", "adv_autoneg_cap"},
-
-	{ nxge_param_get_mac, nxge_param_set_mac, NXGE_PARAM_MAC_RW,
-		0, 1, 1, 1, "adv-10gfdx-cap", "adv_10gfdx_cap"},
-
-	{ nxge_param_get_mac, nxge_param_set_mac, NXGE_PARAM_MAC_DONT_SHOW,
-		0, 1, 0, 0, "adv-10ghdx-cap", "adv_10ghdx_cap"},
-
-	{ nxge_param_get_mac, nxge_param_set_mac, NXGE_PARAM_MAC_RW,
-		0, 1, 1, 1, "adv-1000fdx-cap", "adv_1000fdx_cap"},
-
-	{ nxge_param_get_mac, nxge_param_set_mac, NXGE_PARAM_MAC_DONT_SHOW,
-		0, 1, 0, 0, "adv-1000hdx-cap",	"adv_1000hdx_cap"},
-
-	{ nxge_param_get_mac, nxge_param_set_mac, NXGE_PARAM_MAC_DONT_SHOW,
-		0, 1, 0, 0, "adv-100T4-cap", "adv_100T4_cap"},
-
-	{ nxge_param_get_mac, nxge_param_set_mac, NXGE_PARAM_MAC_RW,
-		0, 1, 1, 1, "adv-100fdx-cap", "adv_100fdx_cap"},
-
-	{ nxge_param_get_mac, nxge_param_set_mac, NXGE_PARAM_MAC_DONT_SHOW,
-		0, 1, 0, 0, "adv-100hdx-cap", "adv_100hdx_cap"},
-
-	{ nxge_param_get_mac, nxge_param_set_mac, NXGE_PARAM_MAC_RW,
-		0, 1, 1, 1, "adv-10fdx-cap", "adv_10fdx_cap"},
-
-	{ nxge_param_get_mac, nxge_param_set_mac, NXGE_PARAM_MAC_DONT_SHOW,
-		0, 1, 0, 0, "adv-10hdx-cap", "adv_10hdx_cap"},
-
-	{ nxge_param_get_mac, nxge_param_set_mac, NXGE_PARAM_MAC_RW,
-		0, 1, 0, 0, "adv-asmpause-cap",	"adv_asmpause_cap"},
-
-	{ nxge_param_get_mac, nxge_param_set_mac, NXGE_PARAM_MAC_RW,
-		0, 1, 0, 0, "adv-pause-cap", "adv_pause_cap"},
-
-	{ nxge_param_get_mac, nxge_param_set_mac, NXGE_PARAM_MAC_RW,
-		0, 1, 0, 0, "use-int-xcvr", "use_int_xcvr"},
-
-	{ nxge_param_get_mac, nxge_param_set_mac, NXGE_PARAM_MAC_RW,
-		0, 1, 1, 1, "enable-ipg0", "enable_ipg0"},
-
-	{ nxge_param_get_mac, nxge_param_set_mac, NXGE_PARAM_MAC_RW,
-		0, 255,	8, 8, "ipg0", "ipg0"},
-
-	{ nxge_param_get_mac, nxge_param_set_mac, NXGE_PARAM_MAC_RW,
-		0, 255,	8, 8, "ipg1", "ipg1"},
-
-	{ nxge_param_get_mac, nxge_param_set_mac, NXGE_PARAM_MAC_RW,
-		0, 255,	4, 4, "ipg2", "ipg2"},
-
-	{ nxge_param_get_mac, nxge_param_set_mac, NXGE_PARAM_MAC_RW,
-		0, 1, 0, 0, "accept-jumbo", "accept_jumbo"},
-
-	/* Transmit DMA channels */
-	{ nxge_param_get_generic, NULL, NXGE_PARAM_READ | NXGE_PARAM_READ_PROP,
-		0, 3, 0, 0, "tx-dma-weight", "tx_dma_weight"},
-
-	{ nxge_param_get_generic, NULL, NXGE_PARAM_READ | NXGE_PARAM_READ_PROP,
-		0, 31, 0, 0, "tx-dma-channels-begin", "tx_dma_channels_begin"},
-
-	{ nxge_param_get_generic, NULL, NXGE_PARAM_READ | NXGE_PARAM_READ_PROP,
-		0, 32, 0, 0, "tx-dma-channels", "tx_dma_channels"},
-	{ nxge_param_get_txdma_info, NULL,
-		NXGE_PARAM_READ | NXGE_PARAM_READ_PROP,
-		0, 32, 0, 0, "tx-dma-info", "tx_dma_info"},
-
-	/* Receive DMA channels */
-	{ nxge_param_get_generic, NULL,
-		NXGE_PARAM_READ | NXGE_PARAM_READ_PROP,
-		0, 31, 0, 0, "rx-dma-channels-begin", "rx_dma_channels_begin"},
-
-	{ nxge_param_get_generic, NULL, NXGE_PARAM_READ | NXGE_PARAM_READ_PROP,
-		0, 32, 0, 0, "rx-dma-channels",	"rx_dma_channels"},
-
-	{ nxge_param_get_generic, NULL, NXGE_PARAM_READ | NXGE_PARAM_READ_PROP,
-		0, 65535, PT_DRR_WT_DEFAULT_10G, 0,
-		"rx-drr-weight", "rx_drr_weight"},
-
-	{ nxge_param_get_generic, NULL, NXGE_PARAM_READ | NXGE_PARAM_READ_PROP,
-		0, 1, 1, 0, "rx-full-header", "rx_full_header"},
-
-	{ nxge_param_get_rxdma_info, NULL, NXGE_PARAM_READ,
-		0, 32, 0, 0, "rx-dma-info", "rx_dma_info"},
-
-	{ nxge_param_get_rxdma_info, NULL,
-		NXGE_PARAM_READ | NXGE_PARAM_DONT_SHOW,
-		NXGE_RBR_RBB_MIN, NXGE_RBR_RBB_MAX, NXGE_RBR_RBB_DEFAULT, 0,
-		"rx-rbr-size", "rx_rbr_size"},
-
-	{ nxge_param_get_rxdma_info, NULL,
-		NXGE_PARAM_READ | NXGE_PARAM_DONT_SHOW,
-		NXGE_RCR_MIN, NXGE_RCR_MAX, NXGE_RCR_DEFAULT, 0,
-		"rx-rcr-size", "rx_rcr_size"},
-
-	{ nxge_param_get_generic, nxge_param_set_port_rdc, NXGE_PARAM_RXDMA_RW,
-		0, 15, 0, 0, "default-port-rdc", "default_port_rdc"},
-
-	{ nxge_param_get_generic, nxge_param_rx_intr_time, NXGE_PARAM_RXDMA_RW,
-		NXGE_RDC_RCR_TIMEOUT_MIN, NXGE_RDC_RCR_TIMEOUT_MAX,
-		RXDMA_RCR_TO_DEFAULT, 0, "rxdma-intr-time", "rxdma_intr_time"},
-
-	{ nxge_param_get_generic, nxge_param_rx_intr_pkts, NXGE_PARAM_RXDMA_RW,
-		NXGE_RDC_RCR_THRESHOLD_MIN, NXGE_RDC_RCR_THRESHOLD_MAX,
-		RXDMA_RCR_PTHRES_DEFAULT, 0,
-		"rxdma-intr-pkts", "rxdma_intr_pkts"},
-
-	{ nxge_param_get_generic, NULL, NXGE_PARAM_READ_PROP,
-		0, 8, 0, 0, "rx-rdc-grps-begin", "rx_rdc_grps_begin"},
-
-	{ nxge_param_get_generic, NULL, NXGE_PARAM_READ_PROP,
-		0, 8, 0, 0, "rx-rdc-grps", "rx_rdc_grps"},
-
-	{ nxge_param_get_generic, nxge_param_set_grp_rdc, NXGE_PARAM_RXDMA_RW,
-		0, 15, 0, 0, "default-grp0-rdc", "default_grp0_rdc"},
-
-	{ nxge_param_get_generic, nxge_param_set_grp_rdc, NXGE_PARAM_RXDMA_RW,
-		0, 15,	2, 0, "default-grp1-rdc", "default_grp1_rdc"},
-
-	{ nxge_param_get_generic, nxge_param_set_grp_rdc, NXGE_PARAM_RXDMA_RW,
-		0, 15, 4, 0, "default-grp2-rdc", "default_grp2_rdc"},
-
-	{ nxge_param_get_generic, nxge_param_set_grp_rdc, NXGE_PARAM_RXDMA_RW,
-		0, 15, 6, 0, "default-grp3-rdc", "default_grp3_rdc"},
-
-	{ nxge_param_get_generic, nxge_param_set_grp_rdc, NXGE_PARAM_RXDMA_RW,
-		0, 15, 8, 0, "default-grp4-rdc", "default_grp4_rdc"},
-
-	{ nxge_param_get_generic, nxge_param_set_grp_rdc, NXGE_PARAM_RXDMA_RW,
-		0, 15, 10, 0, "default-grp5-rdc", "default_grp5_rdc"},
-
-	{ nxge_param_get_generic, nxge_param_set_grp_rdc, NXGE_PARAM_RXDMA_RW,
-		0, 15, 12, 0, "default-grp6-rdc", "default_grp6_rdc"},
-
-	{ nxge_param_get_generic, nxge_param_set_grp_rdc, NXGE_PARAM_RXDMA_RW,
-		0, 15, 14, 0, "default-grp7-rdc", "default_grp7_rdc"},
-
-	{ nxge_param_get_rxdma_rdcgrp_info, NULL,
-		NXGE_PARAM_READ | NXGE_PARAM_CMPLX,
-		0, 8, 0, 0, "rdc-groups-info", "rdc_groups_info"},
-
-	/* Logical device groups */
-	{ nxge_param_get_generic, NULL, NXGE_PARAM_READ,
-		0, 63, 0, 0, "start-ldg", "start_ldg"},
-
-	{ nxge_param_get_generic, NULL, NXGE_PARAM_READ,
-		0, 64, 0, 0, "max-ldg", "max_ldg" },
-
-	/* MAC table information */
-	{ nxge_param_get_mac_rdcgrp, nxge_param_set_mac_rdcgrp,
-		NXGE_PARAM_L2CLASS_CFG,
-		0, 31, 0, 0, "mac-2rdc-grp", "mac_2rdc_grp"},
-
-	/* VLAN table information */
-	{ nxge_param_get_vlan_rdcgrp, nxge_param_set_vlan_rdcgrp,
-		NXGE_PARAM_L2CLASS_CFG,
-		0, 31, 0, 0, "vlan-2rdc-grp", "vlan_2rdc_grp"},
-
-	{ nxge_param_get_generic, NULL,
-		NXGE_PARAM_READ_PROP | NXGE_PARAM_READ | NXGE_PARAM_PROP_ARR32,
-		0, 0x0ffff, 0x0ffff, 0, "fcram-part-cfg", "fcram_part_cfg"},
-
-	{ nxge_param_get_generic, NULL, NXGE_PARAM_CLASS_RWS,
-		0, 0x10, 0xa, 0, "fcram-access-ratio", "fcram_access_ratio"},
-
-	{ nxge_param_get_generic, NULL, NXGE_PARAM_CLASS_RWS,
-		0, 0x10, 0xa, 0, "tcam-access-ratio", "tcam_access_ratio"},
-
-	{ nxge_param_get_generic, nxge_param_tcam_enable,
-		NXGE_PARAM_CLASS_RWS,
-		0, 0x1, 0x0, 0, "tcam-enable", "tcam_enable"},
-
-	{ nxge_param_get_generic, nxge_param_hash_lookup_enable,
-		NXGE_PARAM_CLASS_RWS,
-		0, 0x01, 0x0, 0, "hash-lookup-enable", "hash_lookup_enable"},
-
-	{ nxge_param_get_generic, nxge_param_llc_snap_enable,
-		NXGE_PARAM_CLASS_RWS,
-		0, 0x01, 0x01, 0, "llc-snap-enable", "llc_snap_enable"},
-
-	{ nxge_param_get_generic, nxge_param_fflp_hash_init,
-		NXGE_PARAM_CLASS_RWS,
-		0, ALL_FF_32, ALL_FF_32, 0, "h1-init-value", "h1_init_value"},
-
-	{ nxge_param_get_generic,	nxge_param_fflp_hash_init,
-		NXGE_PARAM_CLASS_RWS,
-		0, 0x0ffff, 0x0ffff, 0, "h2-init-value", "h2_init_value"},
-
-	{ nxge_param_get_generic, nxge_param_set_ether_usr,
-		NXGE_PARAM_CLASS_RWS | NXGE_PARAM_DONT_SHOW,
-		0, ALL_FF_32, 0x0, 0,
-		"class-cfg-ether-usr1", "class_cfg_ether_usr1"},
-
-	{ nxge_param_get_generic, nxge_param_set_ether_usr,
-		NXGE_PARAM_CLASS_RWS | NXGE_PARAM_DONT_SHOW,
-		0, ALL_FF_32, 0x0, 0,
-		"class-cfg-ether-usr2", "class_cfg_ether_usr2"},
-
-	{ nxge_param_get_generic, nxge_param_set_ip_usr,
-		NXGE_PARAM_CLASS_RWS | NXGE_PARAM_DONT_SHOW,
-		0, ALL_FF_32, 0x0, 0,
-		"class-cfg-ip-usr4", "class_cfg_ip_usr4"},
-
-	{ nxge_param_get_generic, nxge_param_set_ip_usr,
-		NXGE_PARAM_CLASS_RWS | NXGE_PARAM_DONT_SHOW,
-		0, ALL_FF_32, 0x0, 0,
-		"class-cfg-ip-usr5", "class_cfg_ip_usr5"},
-
-	{ nxge_param_get_generic, nxge_param_set_ip_usr,
-		NXGE_PARAM_CLASS_RWS | NXGE_PARAM_DONT_SHOW,
-		0, ALL_FF_32, 0x0, 0,
-		"class-cfg-ip-usr6", "class_cfg_ip_usr6"},
-
-	{ nxge_param_get_generic, nxge_param_set_ip_usr,
-		NXGE_PARAM_CLASS_RWS | NXGE_PARAM_DONT_SHOW,
-		0, ALL_FF_32, 0x0, 0,
-		"class-cfg-ip-usr7", "class_cfg_ip_usr7"},
-
-	{ nxge_param_get_ip_opt, nxge_param_set_ip_opt,
-		NXGE_PARAM_CLASS_RWS | NXGE_PARAM_DONT_SHOW,
-		0, ALL_FF_32, 0x0, 0,
-		"class-opt-ip-usr4", "class_opt_ip_usr4"},
-
-	{ nxge_param_get_ip_opt, nxge_param_set_ip_opt,
-		NXGE_PARAM_CLASS_RWS | NXGE_PARAM_DONT_SHOW,
-		0, ALL_FF_32, 0x0, 0,
-		"class-opt-ip-usr5", "class_opt_ip_usr5"},
-
-	{ nxge_param_get_ip_opt, nxge_param_set_ip_opt,
-		NXGE_PARAM_CLASS_RWS | NXGE_PARAM_DONT_SHOW,
-		0, ALL_FF_32, 0x0, 0,
-		"class-opt-ip-usr6", "class_opt_ip_usr6"},
-
-	{ nxge_param_get_ip_opt, nxge_param_set_ip_opt,
-		NXGE_PARAM_CLASS_RWS | NXGE_PARAM_DONT_SHOW,
-		0, ALL_FF_32, 0x0, 0,
-		"class-opt-ip-usr7", "class_opt_ip_usr7"},
-
-	{ nxge_param_get_ip_opt, nxge_param_set_ip_opt,
-		NXGE_PARAM_CLASS_RWS,
-		0, ALL_FF_32, NXGE_CLASS_FLOW_GEN_SERVER, 0,
-		"class-opt-ipv4-tcp", "class_opt_ipv4_tcp"},
-
-	{ nxge_param_get_ip_opt, nxge_param_set_ip_opt,
-		NXGE_PARAM_CLASS_RWS,
-		0, ALL_FF_32, NXGE_CLASS_FLOW_GEN_SERVER, 0,
-		"class-opt-ipv4-udp", "class_opt_ipv4_udp"},
-
-	{ nxge_param_get_ip_opt, nxge_param_set_ip_opt,
-		NXGE_PARAM_CLASS_RWS,
-		0, ALL_FF_32, NXGE_CLASS_FLOW_GEN_SERVER, 0,
-		"class-opt-ipv4-ah", "class_opt_ipv4_ah"},
-
-	{ nxge_param_get_ip_opt, nxge_param_set_ip_opt,
-		NXGE_PARAM_CLASS_RWS,
-		0, ALL_FF_32, NXGE_CLASS_FLOW_GEN_SERVER, 0,
-		"class-opt-ipv4-sctp", "class_opt_ipv4_sctp"},
-
-	{ nxge_param_get_ip_opt, nxge_param_set_ip_opt, NXGE_PARAM_CLASS_RWS,
-		0, ALL_FF_32, NXGE_CLASS_FLOW_GEN_SERVER, 0,
-		"class-opt-ipv6-tcp", "class_opt_ipv6_tcp"},
-
-	{ nxge_param_get_ip_opt, nxge_param_set_ip_opt, NXGE_PARAM_CLASS_RWS,
-		0, ALL_FF_32, NXGE_CLASS_FLOW_GEN_SERVER, 0,
-		"class-opt-ipv6-udp", "class_opt_ipv6_udp"},
-
-	{ nxge_param_get_ip_opt, nxge_param_set_ip_opt, NXGE_PARAM_CLASS_RWS,
-		0, ALL_FF_32, NXGE_CLASS_FLOW_GEN_SERVER, 0,
-		"class-opt-ipv6-ah", "class_opt_ipv6_ah"},
-
-	{ nxge_param_get_ip_opt, nxge_param_set_ip_opt, NXGE_PARAM_CLASS_RWS,
-		0, ALL_FF_32, NXGE_CLASS_FLOW_GEN_SERVER, 0,
-		"class-opt-ipv6-sctp",	"class_opt_ipv6_sctp"},
-
-	{ nxge_param_get_debug_flag, nxge_param_set_nxge_debug_flag,
-		NXGE_PARAM_RW,
-		0ULL, ALL_FF_64, 0ULL, 0ULL,
-		"nxge-debug-flag", "nxge_debug_flag"},
-
-	{ nxge_param_get_debug_flag, nxge_param_set_npi_debug_flag,
-		NXGE_PARAM_RW,
-		0ULL, ALL_FF_64, 0ULL, 0ULL,
-		"npi-debug-flag", "npi_debug_flag"},
-
-	{ nxge_param_dump_tdc, NULL, NXGE_PARAM_READ,
-		0, 0x0fffffff, 0x0fffffff, 0, "dump-tdc", "dump_tdc"},
-
-	{ nxge_param_dump_rdc, NULL, NXGE_PARAM_READ,
-		0, 0x0fffffff, 0x0fffffff, 0, "dump-rdc", "dump_rdc"},
-
-	{ nxge_param_dump_mac_regs, NULL, NXGE_PARAM_READ,
-		0, 0x0fffffff, 0x0fffffff, 0, "dump-mac-regs", "dump_mac_regs"},
-
-	{ nxge_param_dump_ipp_regs, NULL, NXGE_PARAM_READ,
-		0, 0x0fffffff, 0x0fffffff, 0, "dump-ipp-regs", "dump_ipp_regs"},
-
-	{ nxge_param_dump_fflp_regs, NULL, NXGE_PARAM_READ,
-		0, 0x0fffffff, 0x0fffffff, 0,
-		"dump-fflp-regs", "dump_fflp_regs"},
-
-	{ nxge_param_dump_vlan_table, NULL, NXGE_PARAM_READ,
-		0, 0x0fffffff, 0x0fffffff, 0,
-		"dump-vlan-table", "dump_vlan_table"},
-
-	{ nxge_param_dump_rdc_table, NULL, NXGE_PARAM_READ,
-		0, 0x0fffffff, 0x0fffffff, 0,
-		"dump-rdc-table", "dump_rdc_table"},
-
-	{ nxge_param_dump_ptrs,	NULL, NXGE_PARAM_READ,
-		0, 0x0fffffff, 0x0fffffff, 0, "dump-ptrs", "dump_ptrs"},
-
-	{  NULL, NULL, NXGE_PARAM_READ | NXGE_PARAM_DONT_SHOW,
-		0, 0x0fffffff, 0x0fffffff, 0, "end", "end"},
-};
-
-extern void 		*nxge_list;
-
-void
-nxge_get_param_soft_properties(p_nxge_t nxgep)
-{
-
-	p_nxge_param_t 		param_arr;
-	uint_t 			prop_len;
-	int 			i, j;
-	uint32_t		param_count;
-	uint32_t		*int_prop_val;
-
-	NXGE_DEBUG_MSG((nxgep, DDI_CTL, " ==> nxge_get_param_soft_properties"));
-
-	param_arr = nxgep->param_arr;
-	param_count = nxgep->param_count;
-	for (i = 0; i < param_count; i++) {
-		if ((param_arr[i].type & NXGE_PARAM_READ_PROP) == 0)
-			continue;
-		if ((param_arr[i].type & NXGE_PARAM_PROP_STR))
-			continue;
-		if ((param_arr[i].type & NXGE_PARAM_PROP_ARR32) ||
-				(param_arr[i].type & NXGE_PARAM_PROP_ARR64)) {
-			if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY,
-					nxgep->dip, 0, param_arr[i].fcode_name,
-					(int **)&int_prop_val,
-					(uint_t *)&prop_len)
-					== DDI_PROP_SUCCESS) {
-				uint32_t *cfg_value;
-				uint64_t prop_count;
-
-				if (prop_len > NXGE_PARAM_ARRAY_INIT_SIZE)
-					prop_len = NXGE_PARAM_ARRAY_INIT_SIZE;
-				cfg_value = (uint32_t *)param_arr[i].value;
-				for (j = 0; j < prop_len; j++) {
-					cfg_value[j] = int_prop_val[j];
-				}
-				prop_count = prop_len;
-				param_arr[i].type |=
-				    (prop_count << NXGE_PARAM_ARRAY_CNT_SHIFT);
-				ddi_prop_free(int_prop_val);
-			}
-			continue;
-		}
-
-		if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, nxgep->dip, 0,
-				param_arr[i].fcode_name,
-				(int **)&int_prop_val,
-				&prop_len) == DDI_PROP_SUCCESS) {
-			if ((*int_prop_val >= param_arr[i].minimum) &&
-					(*int_prop_val <= param_arr[i].maximum))
-				param_arr[i].value = *int_prop_val;
-#ifdef NXGE_DEBUG_ERROR
-			else {
-				NXGE_DEBUG_MSG((nxgep, OBP_CTL,
-					"nxge%d: 'prom' file parameter error\n",
-					nxgep->instance));
-				NXGE_DEBUG_MSG((nxgep, OBP_CTL,
-					"Parameter keyword '%s'"
-					" is outside valid range\n",
-					param_arr[i].name));
-			}
-#endif
-			ddi_prop_free(int_prop_val);
-		}
-
-		if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, nxgep->dip, 0,
-				param_arr[i].name,
-				(int **)&int_prop_val,
-				&prop_len) == DDI_PROP_SUCCESS) {
-			if ((*int_prop_val >= param_arr[i].minimum) &&
-				(*int_prop_val <= param_arr[i].maximum))
-				param_arr[i].value = *int_prop_val;
-#ifdef NXGE_DEBUG_ERROR
-			else {
-				NXGE_DEBUG_MSG((nxgep, OBP_CTL,
-					"nxge%d: 'conf' file parameter error\n",
-					nxgep->instance));
-				NXGE_DEBUG_MSG((nxgep, OBP_CTL,
-					"Parameter keyword '%s'"
-					"is outside valid range\n",
-					param_arr[i].name));
-			}
-#endif
-			ddi_prop_free(int_prop_val);
-		}
-	}
-}
-
-static int
-nxge_private_param_register(p_nxge_t nxgep, p_nxge_param_t param_arr)
-{
-	int status = B_TRUE;
-	int channel;
-	uint8_t grp;
-	char *prop_name;
-	char *end;
-	uint32_t name_chars;
-
-	NXGE_DEBUG_MSG((nxgep, NDD2_CTL,
-		"nxge_private_param_register %s", param_arr->name));
-
-	if ((param_arr->type & NXGE_PARAM_PRIV) != NXGE_PARAM_PRIV)
-		return (B_TRUE);
-
-	prop_name =  param_arr->name;
-	if (param_arr->type & NXGE_PARAM_RXDMA) {
-		if (strncmp("rxdma_intr", prop_name, 10) == 0)
-			return (B_TRUE);
-		name_chars = strlen("default_grp");
-		if (strncmp("default_grp", prop_name, name_chars) == 0) {
-			prop_name += name_chars;
-			grp = mi_strtol(prop_name, &end, 10);
-				/* now check if this rdcgrp is in config */
-			return (nxge_check_rdcgrp_port_member(nxgep, grp));
-		}
-		name_chars = strlen(prop_name);
-		if (strncmp("default_port_rdc", prop_name, name_chars) == 0) {
-			return (B_TRUE);
-		}
-		return (B_FALSE);
-	}
-
-	if (param_arr->type & NXGE_PARAM_TXDMA) {
-		name_chars = strlen("txdma");
-		if (strncmp("txdma", prop_name, name_chars) == 0) {
-			prop_name += name_chars;
-			channel = mi_strtol(prop_name, &end, 10);
-				/* now check if this rdc is in config */
-			NXGE_DEBUG_MSG((nxgep, NDD2_CTL,
-					    " nxge_private_param_register: %d",
-					    channel));
-			return (nxge_check_txdma_port_member(nxgep, channel));
-		}
-		return (B_FALSE);
-	}
-
-	status = B_FALSE;
-	NXGE_DEBUG_MSG((nxgep, NDD2_CTL, "<== nxge_private_param_register"));
-
-	return (status);
-}
-
-void
-nxge_setup_param(p_nxge_t nxgep)
-{
-	p_nxge_param_t param_arr;
-	int i;
-	pfi_t set_pfi;
-
-	NXGE_DEBUG_MSG((nxgep, NDD_CTL, "==> nxge_setup_param"));
-
-	/*
-	 * Make sure the param_instance is set to a valid device instance.
-	 */
-	if (nxge_param_arr[param_instance].value == 1000)
-		nxge_param_arr[param_instance].value = nxgep->instance;
-
-	param_arr = nxgep->param_arr;
-	param_arr[param_instance].value = nxgep->instance;
-	param_arr[param_function_number].value = nxgep->function_num;
-
-	for (i = 0; i < nxgep->param_count; i++) {
-		if ((param_arr[i].type & NXGE_PARAM_PRIV) &&
-				(nxge_private_param_register(nxgep,
-				&param_arr[i]) == B_FALSE)) {
-			param_arr[i].setf = NULL;
-			param_arr[i].getf = NULL;
-		}
-
-		if (param_arr[i].type & NXGE_PARAM_CMPLX)
-			param_arr[i].setf = NULL;
-
-		if (param_arr[i].type & NXGE_PARAM_DONT_SHOW) {
-			param_arr[i].setf = NULL;
-			param_arr[i].getf = NULL;
-		}
-
-		set_pfi = (pfi_t)param_arr[i].setf;
-
-		if ((set_pfi) && (param_arr[i].type & NXGE_PARAM_INIT_ONLY)) {
-			set_pfi = NULL;
-		}
-
-		if (!nxge_nd_load(&nxgep->param_list, param_arr[i].name,
-				(pfi_t)param_arr[i].getf, set_pfi,
-				(caddr_t)&param_arr[i])) {
-			(void) nxge_nd_free(&nxgep->param_list);
-			break;
-		}
-	}
-	NXGE_DEBUG_MSG((nxgep, NDD_CTL, "<== nxge_setup_param"));
-}
-
-void
-nxge_init_param(p_nxge_t nxgep)
-{
-	p_nxge_param_t param_arr;
-	int i, alloc_size;
-	uint64_t alloc_count;
-	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_init_param"));
-	/*
-	 * Make sure the param_instance is set to a valid device instance.
-	 */
-	if (nxge_param_arr[param_instance].value == 1000)
-		nxge_param_arr[param_instance].value = nxgep->instance;
-
-	param_arr = nxgep->param_arr;
-	if (param_arr == NULL) {
-		param_arr = (p_nxge_param_t)
-			KMEM_ZALLOC(sizeof (nxge_param_arr), KM_SLEEP);
-	}
-
-	for (i = 0; i < sizeof (nxge_param_arr)/sizeof (nxge_param_t); i++) {
-		param_arr[i] = nxge_param_arr[i];
-		if ((param_arr[i].type & NXGE_PARAM_PROP_ARR32) ||
-			(param_arr[i].type & NXGE_PARAM_PROP_ARR64)) {
-			alloc_count = NXGE_PARAM_ARRAY_INIT_SIZE;
-			alloc_size = alloc_count * sizeof (uint64_t);
-			param_arr[i].value =
-			    (uint64_t)KMEM_ZALLOC(alloc_size, KM_SLEEP);
-			param_arr[i].old_value =
-				    (uint64_t)KMEM_ZALLOC(alloc_size, KM_SLEEP);
-			param_arr[i].type |=
-				(alloc_count << NXGE_PARAM_ARRAY_ALLOC_SHIFT);
-		}
-	}
-
-	nxgep->param_arr = param_arr;
-	nxgep->param_count = sizeof (nxge_param_arr)/sizeof (nxge_param_t);
-	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_init_param: count %d",
-		nxgep->param_count));
-}
-
-void
-nxge_destroy_param(p_nxge_t nxgep)
-{
-	int i;
-	uint64_t free_size, free_count;
-
-	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_destroy_param"));
-
-	/*
-	 * Make sure the param_instance is set to a valid device instance.
-	 */
-	if (nxge_param_arr[param_instance].value == nxgep->instance) {
-		for (i = 0; i <= nxge_param_arr[param_instance].maximum; i++) {
-			if ((ddi_get_soft_state(nxge_list, i) != NULL) &&
-				(i != nxgep->instance))
-				break;
-		}
-		nxge_param_arr[param_instance].value = i;
-	}
-
-	if (nxgep->param_list)
-		nxge_nd_free(&nxgep->param_list);
-	for (i = 0; i < nxgep->param_count; i++)
-		if ((nxgep->param_arr[i].type & NXGE_PARAM_PROP_ARR32) ||
-			(nxgep->param_arr[i].type & NXGE_PARAM_PROP_ARR64)) {
-			free_count = ((nxgep->param_arr[i].type &
-					    NXGE_PARAM_ARRAY_ALLOC_MASK) >>
-					    NXGE_PARAM_ARRAY_ALLOC_SHIFT);
-			free_count = NXGE_PARAM_ARRAY_INIT_SIZE;
-			free_size = sizeof (uint64_t) * free_count;
-			KMEM_FREE((void *)nxgep->param_arr[i].value, free_size);
-			KMEM_FREE((void *)nxgep->param_arr[i].old_value,
-				free_size);
-		}
-
-	KMEM_FREE(nxgep->param_arr, sizeof (nxge_param_arr));
-	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_destroy_param"));
-}
-
-/*
- * Extracts the value from the 'nxge' parameter array and prints the
- * parameter value. cp points to the required parameter.
- */
-
-/* ARGSUSED */
-int
-nxge_param_get_generic(p_nxge_t nxgep, queue_t *q, p_mblk_t mp, caddr_t cp)
-{
-	p_nxge_param_t pa = (p_nxge_param_t)cp;
-
-	NXGE_DEBUG_MSG((nxgep, NDD_CTL,
-		"==> nxge_param_get_generic name %s ", pa->name));
-
-	if (pa->value > 0xffffffff)
-		(void) mi_mpprintf(mp, "%x%x",
-			(int)(pa->value >> 32), (int)(pa->value & 0xffffffff));
-	else
-		(void) mi_mpprintf(mp, "%x", (int)pa->value);
-
-	NXGE_DEBUG_MSG((nxgep, NDD_CTL, "<== nxge_param_get_generic"));
-	return (0);
-}
-
-/* ARGSUSED */
-static int
-nxge_param_get_mac(p_nxge_t nxgep, queue_t *q, p_mblk_t mp, caddr_t cp)
-{
-	p_nxge_param_t pa = (p_nxge_param_t)cp;
-
-	NXGE_DEBUG_MSG((nxgep, NDD_CTL, "==> nxge_param_get_mac"));
-
-	(void) mi_mpprintf(mp, "%d", (uint32_t)pa->value);
-	NXGE_DEBUG_MSG((nxgep, NDD_CTL, "<== nxge_param_get_mac"));
-	return (0);
-}
-
-/* ARGSUSED */
-int
-nxge_param_get_txdma_info(p_nxge_t nxgep, queue_t *q, p_mblk_t mp, caddr_t cp)
-{
-
-	uint_t	print_len, buf_len;
-	p_mblk_t np;
-	int tdc;
-
-	int buff_alloc_size = NXGE_NDD_INFODUMP_BUFF_SIZE;
-	NXGE_DEBUG_MSG((nxgep, NDD_CTL, "==> nxge_param_get_txdma_info"));
-
-	(void) mi_mpprintf(mp, "TXDMA Information for Port\t %d \n",
-		nxgep->function_num);
-
-
-	if ((np = allocb(buff_alloc_size, BPRI_HI)) == NULL) {
-		(void) mi_mpprintf(mp, "%s\n", "out of buffer");
-		return (0);
-	}
-
-	buf_len = buff_alloc_size;
-	mp->b_cont = np;
-
-	print_len = snprintf((char *)((mblk_t *)np)->b_wptr, buf_len,
-		"Total TDCs\t %d\n", nxgep->ntdc);
-
-	((mblk_t *)np)->b_wptr += print_len;
-	buf_len -= print_len;
-	print_len = snprintf((char *)((mblk_t *)np)->b_wptr, buf_len,
-		"TDC\t HW TDC\t\n");
-	((mblk_t *)np)->b_wptr += print_len;
-
-	buf_len -= print_len;
-	for (tdc = 0; tdc < nxgep->ntdc; tdc++) {
-		print_len = snprintf((char *)((mblk_t *)np)->b_wptr,
-					    buf_len, "%d\t %d\n",
-					    tdc, nxgep->tdc[tdc]);
-		((mblk_t *)np)->b_wptr += print_len;
-		buf_len -= print_len;
-	}
-
-	NXGE_DEBUG_MSG((nxgep, NDD_CTL, "<== nxge_param_get_txdma_info"));
-	return (0);
-}
-
-/* ARGSUSED */
-int
-nxge_param_get_rxdma_info(p_nxge_t nxgep, queue_t *q, p_mblk_t mp, caddr_t cp)
-{
-	uint_t			print_len, buf_len;
-	p_mblk_t		np;
-	int			rdc;
-	p_nxge_dma_pt_cfg_t	p_dma_cfgp;
-	p_nxge_hw_pt_cfg_t	p_cfgp;
-	int			buff_alloc_size = NXGE_NDD_INFODUMP_BUFF_SIZE;
-	p_rx_rcr_rings_t 	rx_rcr_rings;
-	p_rx_rcr_ring_t		*rcr_rings;
-	p_rx_rbr_rings_t 	rx_rbr_rings;
-	p_rx_rbr_ring_t		*rbr_rings;
-
-	NXGE_DEBUG_MSG((nxgep, NDD_CTL, "==> nxge_param_get_rxdma_info"));
-
-	(void) mi_mpprintf(mp, "RXDMA Information for Port\t %d \n",
-		nxgep->function_num);
-
-	if ((np = allocb(buff_alloc_size, BPRI_HI)) == NULL) {
-		/* The following may work even if we cannot get a large buf. */
-		(void) mi_mpprintf(mp, "%s\n", "out of buffer");
-		return (0);
-	}
-
-	buf_len = buff_alloc_size;
-	mp->b_cont = np;
-	p_dma_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
-	p_cfgp = (p_nxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config;
-
-	rx_rcr_rings = nxgep->rx_rcr_rings;
-	rcr_rings = rx_rcr_rings->rcr_rings;
-	rx_rbr_rings = nxgep->rx_rbr_rings;
-	rbr_rings = rx_rbr_rings->rbr_rings;
-
-	print_len = snprintf((char *)((mblk_t *)np)->b_wptr, buf_len,
-		"Total RDCs\t %d\n", p_cfgp->max_rdcs);
-
-	((mblk_t *)np)->b_wptr += print_len;
-	buf_len -= print_len;
-	print_len = snprintf((char *)((mblk_t *)np)->b_wptr, buf_len,
-		"RDC\t HW RDC\t Timeout\t Packets RBR ptr \t"
-		"chunks\t RCR ptr\n");
-
-	((mblk_t *)np)->b_wptr += print_len;
-	buf_len -= print_len;
-	for (rdc = 0; rdc < p_cfgp->max_rdcs; rdc++) {
-		print_len = snprintf((char *)((mblk_t *)np)->b_wptr, buf_len,
-			" %d\t  %d\t   %x\t\t %x\t $%p\t 0x%x\t $%p\n",
-			rdc, nxgep->rdc[rdc],
-			p_dma_cfgp->rcr_timeout[rdc],
-			p_dma_cfgp->rcr_threshold[rdc],
-			rbr_rings[rdc],
-			rbr_rings[rdc]->num_blocks, rcr_rings[rdc]);
-			((mblk_t *)np)->b_wptr += print_len;
-			buf_len -= print_len;
-	}
-
-	NXGE_DEBUG_MSG((nxgep, NDD_CTL, "<== nxge_param_get_rxdma_info"));
-	return (0);
-}
-
-/* ARGSUSED */
-int
-nxge_param_get_rxdma_rdcgrp_info(p_nxge_t nxgep, queue_t *q,
-	p_mblk_t mp, caddr_t cp)
-{
-	uint_t			print_len, buf_len;
-	p_mblk_t		np;
-	int			offset, rdc, i, rdc_grp;
-	p_nxge_rdc_grp_t	rdc_grp_p;
-	p_nxge_dma_pt_cfg_t	p_dma_cfgp;
-	p_nxge_hw_pt_cfg_t	p_cfgp;
-
-	int buff_alloc_size = NXGE_NDD_INFODUMP_BUFF_SIZE;
-	NXGE_DEBUG_MSG((nxgep, NDD_CTL,
-		"==> nxge_param_get_rxdma_rdcgrp_info"));
-
-	p_dma_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
-	p_cfgp = (p_nxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config;
-
-	(void) mi_mpprintf(mp, "RXDMA RDC Group Information for Port\t %d \n",
-		nxgep->function_num);
-
-	rdc_grp = p_cfgp->start_rdc_grpid;
-	if ((np = allocb(buff_alloc_size, BPRI_HI)) == NULL) {
-		/* The following may work even if we cannot get a large buf. */
-		(void) mi_mpprintf(mp, "%s\n", "out of buffer");
-		return (0);
-	}
-
-	buf_len = buff_alloc_size;
-	mp->b_cont = np;
-	print_len = snprintf((char *)((mblk_t *)np)->b_wptr, buf_len,
-		"Total RDC Groups\t %d \n"
-		"start RDC group\t %d\n",
-		p_cfgp->max_rdc_grpids,
-		p_cfgp->start_rdc_grpid);
-
-	((mblk_t *)np)->b_wptr += print_len;
-	buf_len -= print_len;
-
-	for (i = 0, rdc_grp = p_cfgp->start_rdc_grpid;
-	    rdc_grp < (p_cfgp->max_rdc_grpids + p_cfgp->start_rdc_grpid);
-	    rdc_grp++, i++) {
-		rdc_grp_p = &p_dma_cfgp->rdc_grps[i];
-		print_len = snprintf((char *)((mblk_t *)np)->b_wptr, buf_len,
-			"\nRDC Group Info for Group [%d] %d\n"
-			"RDC Count %d\tstart RDC %d\n"
-			"RDC Group Population Information"
-			" (offsets 0 - 15)\n",
-			i, rdc_grp, rdc_grp_p->max_rdcs,
-			rdc_grp_p->start_rdc);
-
-		((mblk_t *)np)->b_wptr += print_len;
-		buf_len -= print_len;
-		print_len = snprintf((char *)((mblk_t *)np)->b_wptr,
-			buf_len, "\n");
-		((mblk_t *)np)->b_wptr += print_len;
-		buf_len -= print_len;
-
-		for (rdc = 0; rdc < rdc_grp_p->max_rdcs; rdc++) {
-			print_len = snprintf((char *)((mblk_t *)np)->b_wptr,
-				buf_len, "[%d]=%d ", rdc,
-				rdc_grp_p->start_rdc + rdc);
-			((mblk_t *)np)->b_wptr += print_len;
-			buf_len -= print_len;
-		}
-		print_len = snprintf((char *)((mblk_t *)np)->b_wptr,
-					    buf_len, "\n");
-		((mblk_t *)np)->b_wptr += print_len;
-		buf_len -= print_len;
-
-		for (offset = 0; offset < 16; offset++) {
-			print_len = snprintf((char *)((mblk_t *)np)->b_wptr,
-				buf_len, " %2d ",
-				rdc_grp_p->rdc[offset]);
-			((mblk_t *)np)->b_wptr += print_len;
-			buf_len -= print_len;
-		}
-		print_len = snprintf((char *)((mblk_t *)np)->b_wptr,
-			buf_len, "\n");
-		((mblk_t *)np)->b_wptr += print_len;
-		buf_len -= print_len;
-	}
-	NXGE_DEBUG_MSG((nxgep, NDD_CTL,
-		"<== nxge_param_get_rxdma_rdcgrp_info"));
-	return (0);
-}
-
-int
-nxge_mk_mblk_tail_space(p_mblk_t mp, p_mblk_t *nmp, size_t size)
-{
-	p_mblk_t tmp;
-
-	tmp = mp;
-	while (tmp->b_cont)
-		tmp = tmp->b_cont;
-	if ((tmp->b_wptr + size) >= tmp->b_datap->db_lim) {
-		tmp->b_cont = allocb(1024, BPRI_HI);
-		tmp = tmp->b_cont;
-		if (!tmp)
-			return (ENOMEM);
-	}
-
-	*nmp = tmp;
-	return (0);
-}
-
-/*
- * Sets the ge parameter to the value in the nxge_param_register using
- * nxge_nd_load().
- */
-
-/* ARGSUSED */
-int
-nxge_param_set_generic(p_nxge_t nxgep, queue_t *q, mblk_t *mp,
-			    char *value, caddr_t cp)
-{
-	char *end;
-	uint32_t new_value;
-	p_nxge_param_t pa = (p_nxge_param_t)cp;
-
-	NXGE_DEBUG_MSG((nxgep, IOC_CTL, " ==> nxge_param_set_generic"));
-	new_value = (uint32_t)mi_strtol(value, &end, 10);
-	if (end == value || new_value < pa->minimum ||
-		new_value > pa->maximum) {
-			return (EINVAL);
-	}
-	pa->value = new_value;
-	NXGE_DEBUG_MSG((nxgep, IOC_CTL, " <== nxge_param_set_generic"));
-	return (0);
-}
-
-/*
- * Sets the ge parameter to the value in the nxge_param_register using
- * nxge_nd_load().
- */
-
-/* ARGSUSED */
-int
-nxge_param_set_instance(p_nxge_t nxgep, queue_t *q, mblk_t *mp,
-	char *value, caddr_t cp)
-{
-	NXGE_DEBUG_MSG((nxgep, NDD_CTL, " ==> nxge_param_set_instance"));
-	NXGE_DEBUG_MSG((nxgep, NDD_CTL, " <== nxge_param_set_instance"));
-	return (0);
-}
-
-/*
- * Sets the ge parameter to the value in the nxge_param_register using
- * nxge_nd_load().
- */
-
-/* ARGSUSED */
-int
-nxge_param_set_mac(p_nxge_t nxgep, queue_t *q, mblk_t *mp,
-	char *value, caddr_t cp)
-{
-	char		*end;
-	uint32_t	new_value;
-	int		status = 0;
-	p_nxge_param_t	pa = (p_nxge_param_t)cp;
-
-	NXGE_DEBUG_MSG((nxgep, NDD_CTL, "==> nxge_param_set_mac"));
-	new_value = (uint32_t)mi_strtol(value, &end, BASE_DECIMAL);
-	if (PARAM_OUTOF_RANGE(value, end, new_value, pa)) {
-		return (EINVAL);
-	}
-
-	if (pa->value != new_value) {
-		pa->old_value = pa->value;
-		pa->value = new_value;
-	}
-
-	if (!nxge_param_link_update(nxgep)) {
-		NXGE_DEBUG_MSG((nxgep, NDD_CTL,
-				    " false ret from nxge_param_link_update"));
-		status = EINVAL;
-	}
-
-	NXGE_DEBUG_MSG((nxgep, NDD_CTL, "<== nxge_param_set_mac"));
-	return (status);
-}
-
-/* ARGSUSED */
-static int
-nxge_param_rx_intr_pkts(p_nxge_t nxgep, queue_t *q, mblk_t *mp,
-	char *value, caddr_t cp)
-{
-	char		*end;
-	uint32_t	cfg_value;
-	p_nxge_param_t	pa = (p_nxge_param_t)cp;
-
-	NXGE_DEBUG_MSG((nxgep, NDD_CTL, "==> nxge_param_rx_intr_pkts"));
-
-	cfg_value = (uint32_t)mi_strtol(value, &end, BASE_ANY);
-
-	if ((cfg_value > NXGE_RDC_RCR_THRESHOLD_MAX) ||
-		(cfg_value < NXGE_RDC_RCR_THRESHOLD_MIN)) {
-		return (EINVAL);
-	}
-
-	if ((pa->value != cfg_value)) {
-		pa->old_value = pa->value;
-		pa->value = cfg_value;
-		nxgep->intr_threshold = pa->value;
-	}
-
-	NXGE_DEBUG_MSG((nxgep, NDD_CTL, "<== nxge_param_rx_intr_pkts"));
-	return (0);
-}
-
-/* ARGSUSED */
-static int
-nxge_param_rx_intr_time(p_nxge_t nxgep, queue_t *q, mblk_t *mp,
-	char *value, caddr_t cp)
-{
-	char		*end;
-	uint32_t	cfg_value;
-	p_nxge_param_t	pa = (p_nxge_param_t)cp;
-
-	NXGE_DEBUG_MSG((nxgep, NDD_CTL, "==> nxge_param_rx_intr_time"));
-
-	cfg_value = (uint32_t)mi_strtol(value, &end, BASE_ANY);
-
-	if ((cfg_value > NXGE_RDC_RCR_TIMEOUT_MAX) ||
-		(cfg_value < NXGE_RDC_RCR_TIMEOUT_MIN)) {
-		return (EINVAL);
-	}
-
-	if ((pa->value != cfg_value)) {
-		pa->old_value = pa->value;
-		pa->value = cfg_value;
-		nxgep->intr_timeout = pa->value;
-	}
-
-	NXGE_DEBUG_MSG((nxgep, NDD_CTL, "<== nxge_param_rx_intr_time"));
-	return (0);
-}
-
-/* ARGSUSED */
-static int
-nxge_param_set_mac_rdcgrp(p_nxge_t nxgep, queue_t *q,
-	mblk_t *mp, char *value, caddr_t cp)
-{
-	char			 *end;
-	uint32_t		status = 0, cfg_value;
-	p_nxge_param_t		pa = (p_nxge_param_t)cp;
-	uint32_t		cfg_it = B_FALSE;
-	p_nxge_dma_pt_cfg_t	p_dma_cfgp;
-	p_nxge_hw_pt_cfg_t	p_cfgp;
-	uint32_t		*val_ptr, *old_val_ptr;
-	nxge_param_map_t	*mac_map;
-	p_nxge_class_pt_cfg_t	p_class_cfgp;
-	nxge_mv_cfg_t		*mac_host_info;
-
-	NXGE_DEBUG_MSG((nxgep, NDD_CTL, "==> nxge_param_set_mac_rdcgrp "));
-
-	p_dma_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
-	p_cfgp = (p_nxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config;
-	p_class_cfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config;
-	mac_host_info = (nxge_mv_cfg_t	*)&p_class_cfgp->mac_host_info[0];
-	cfg_value = (uint32_t)mi_strtol(value, &end, BASE_HEX);
-
-	/*
-	 * now do decoding
-	 */
-	mac_map = (nxge_param_map_t *)&cfg_value;
-	NXGE_DEBUG_MSG((nxgep, NDD_CTL, " cfg_value %x id %x map_to %x",
-		cfg_value, mac_map->param_id, mac_map->map_to));
-
-	if ((mac_map->param_id < p_cfgp->max_macs) &&
-			(mac_map->map_to < (p_cfgp->max_rdc_grpids +
-			p_cfgp->start_rdc_grpid)) && (mac_map->map_to >=
-			p_cfgp->start_rdc_grpid)) {
-		NXGE_DEBUG_MSG((nxgep, NDD_CTL,
-			" nxge_param_set_mac_rdcgrp mapping"
-			" id %d grp %d", mac_map->param_id, mac_map->map_to));
-		val_ptr = (uint32_t *)pa->value;
-		old_val_ptr = (uint32_t *)pa->old_value;
-		if (val_ptr[mac_map->param_id] != cfg_value) {
-			old_val_ptr[mac_map->param_id] =
-				    val_ptr[mac_map->param_id];
-			val_ptr[mac_map->param_id] = cfg_value;
-			mac_host_info[mac_map->param_id].mpr_npr =
-				    mac_map->pref;
-			mac_host_info[mac_map->param_id].flag = 1;
-			mac_host_info[mac_map->param_id].rdctbl =
-				    mac_map->map_to;
-			cfg_it = B_TRUE;
-		}
-	} else {
-		return (EINVAL);
-	}
-
-	if (cfg_it == B_TRUE) {
-		status = nxge_logical_mac_assign_rdc_table(nxgep,
-						    (uint8_t)mac_map->param_id);
-		if (status != NXGE_OK)
-			return (EINVAL);
-	}
-
-	NXGE_DEBUG_MSG((nxgep, NDD_CTL, "<== nxge_param_set_mac_rdcgrp"));
-	return (0);
-}
-
-/* ARGSUSED */
-static int
-nxge_param_set_vlan_rdcgrp(p_nxge_t nxgep, queue_t *q,
-	mblk_t	*mp, char *value, caddr_t cp)
-{
-	char			*end;
-	uint32_t		status = 0, cfg_value;
-	p_nxge_param_t		pa = (p_nxge_param_t)cp;
-	uint32_t		cfg_it = B_FALSE;
-	p_nxge_dma_pt_cfg_t	p_dma_cfgp;
-	p_nxge_hw_pt_cfg_t	p_cfgp;
-	uint32_t		*val_ptr, *old_val_ptr;
-	nxge_param_map_t	*vmap, *old_map;
-	p_nxge_class_pt_cfg_t	p_class_cfgp;
-	uint64_t		cfgd_vlans;
-	int			i, inc = 0, cfg_position;
-	nxge_mv_cfg_t		*vlan_tbl;
-
-	NXGE_DEBUG_MSG((nxgep, NDD_CTL, "==> nxge_param_set_vlan_rdcgrp "));
-
-	p_dma_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
-	p_cfgp = (p_nxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config;
-	p_class_cfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config;
-	vlan_tbl = (nxge_mv_cfg_t *)&p_class_cfgp->vlan_tbl[0];
-
-	cfg_value = (uint32_t)mi_strtol(value, &end, BASE_HEX);
-
-	/* now do decoding */
-	cfgd_vlans = ((pa->type &  NXGE_PARAM_ARRAY_CNT_MASK) >>
-		NXGE_PARAM_ARRAY_CNT_SHIFT);
-
-	if (cfgd_vlans == NXGE_PARAM_ARRAY_INIT_SIZE) {
-		/*
-		 * for now, we process only upto max
-		 * NXGE_PARAM_ARRAY_INIT_SIZE parameters
-		 * In the future, we may want to expand
-		 * the storage array and continue
-		 */
-		return (EINVAL);
-	}
-
-	vmap = (nxge_param_map_t *)&cfg_value;
-	if ((vmap->param_id) &&
-		(vmap->param_id < NXGE_MAX_VLANS) &&
-		(vmap->map_to < p_cfgp->max_rdc_grpids)) {
-		NXGE_DEBUG_MSG((nxgep, NDD_CTL,
-			"nxge_param_set_vlan_rdcgrp mapping"
-			" id %d grp %d",
-			vmap->param_id, vmap->map_to));
-		val_ptr = (uint32_t *)pa->value;
-		old_val_ptr = (uint32_t *)pa->old_value;
-
-		/* search to see if this vlan id is already configured */
-		for (i = 0; i < cfgd_vlans; i++) {
-			old_map = (nxge_param_map_t *)&val_ptr[i];
-			if ((old_map->param_id == 0) ||
-				(vmap->param_id == old_map->param_id) ||
-				(vlan_tbl[vmap->param_id].flag)) {
-				cfg_position = i;
-				break;
-			}
-		}
-
-		if (cfgd_vlans == 0) {
-			cfg_position = 0;
-			inc++;
-		}
-
-		if (i == cfgd_vlans) {
-			cfg_position = i;
-			inc++;
-		}
-
-		NXGE_DEBUG_MSG((nxgep, NDD2_CTL,
-			"set_vlan_rdcgrp mapping"
-			" i %d cfgd_vlans %llx position %d ",
-			i, cfgd_vlans, cfg_position));
-		if (val_ptr[cfg_position] != cfg_value) {
-			old_val_ptr[cfg_position] = val_ptr[cfg_position];
-			val_ptr[cfg_position] = cfg_value;
-			vlan_tbl[vmap->param_id].mpr_npr = vmap->pref;
-			vlan_tbl[vmap->param_id].flag = 1;
-			vlan_tbl[vmap->param_id].rdctbl =
-			    vmap->map_to + p_cfgp->start_rdc_grpid;
-			cfg_it = B_TRUE;
-			if (inc) {
-				cfgd_vlans++;
-				pa->type &= ~NXGE_PARAM_ARRAY_CNT_MASK;
-				pa->type |= (cfgd_vlans <<
-						    NXGE_PARAM_ARRAY_CNT_SHIFT);
-
-			}
-			NXGE_DEBUG_MSG((nxgep, NDD2_CTL,
-				"after: param_set_vlan_rdcgrp "
-				" cfg_vlans %llx position %d \n",
-				cfgd_vlans, cfg_position));
-		}
-	} else {
-		return (EINVAL);
-	}
-
-	if (cfg_it == B_TRUE) {
-		status = nxge_fflp_config_vlan_table(nxgep,
-			(uint16_t)vmap->param_id);
-		if (status != NXGE_OK)
-			return (EINVAL);
-	}
-
-	NXGE_DEBUG_MSG((nxgep, NDD_CTL, "<== nxge_param_set_vlan_rdcgrp"));
-	return (0);
-}
-
-/* ARGSUSED */
-static int
-nxge_param_get_vlan_rdcgrp(p_nxge_t nxgep, queue_t *q,
-	mblk_t *mp, caddr_t cp)
-{
-
-	uint_t 			print_len, buf_len;
-	p_mblk_t		np;
-	int			i;
-	uint32_t		*val_ptr;
-	nxge_param_map_t	*vmap;
-	p_nxge_param_t		pa = (p_nxge_param_t)cp;
-	p_nxge_class_pt_cfg_t 	p_class_cfgp;
-	p_nxge_dma_pt_cfg_t	p_dma_cfgp;
-	p_nxge_hw_pt_cfg_t	p_cfgp;
-	uint64_t		cfgd_vlans = 0;
-	nxge_mv_cfg_t		*vlan_tbl;
-	int			buff_alloc_size =
-					NXGE_NDD_INFODUMP_BUFF_SIZE * 32;
-
-	NXGE_DEBUG_MSG((nxgep, NDD_CTL, "==> nxge_param_set_vlan_rdcgrp "));
-	(void) mi_mpprintf(mp, "VLAN RDC Mapping Information for Port\t %d \n",
-		nxgep->function_num);
-
-	if ((np = allocb(buff_alloc_size, BPRI_HI)) == NULL) {
-		(void) mi_mpprintf(mp, "%s\n", "out of buffer");
-		return (0);
-	}
-
-	p_dma_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
-	p_cfgp = (p_nxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config;
-
-	buf_len = buff_alloc_size;
-	mp->b_cont = np;
-	cfgd_vlans = (pa->type &  NXGE_PARAM_ARRAY_CNT_MASK) >>
-		NXGE_PARAM_ARRAY_CNT_SHIFT;
-
-	i = (int)cfgd_vlans;
-	p_class_cfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config;
-	vlan_tbl = (nxge_mv_cfg_t *)&p_class_cfgp->vlan_tbl[0];
-	print_len = snprintf((char *)((mblk_t *)np)->b_wptr, buf_len,
-		"Configured VLANs %d\n"
-		"VLAN ID\t RDC GRP (Actual/Port)\t"
-		" Prefernce\n", i);
-	((mblk_t *)np)->b_wptr += print_len;
-	buf_len -= print_len;
-
-	val_ptr = (uint32_t *)pa->value;
-
-	for (i = 0; i < cfgd_vlans; i++) {
-		vmap = (nxge_param_map_t *)&val_ptr[i];
-		if (p_class_cfgp->vlan_tbl[vmap->param_id].flag) {
-			print_len = snprintf((char *)((mblk_t *)np)->b_wptr,
-				buf_len,
-				"  %d\t\t %d/%d\t\t %d\n",
-				vmap->param_id,
-				vlan_tbl[vmap->param_id].rdctbl,
-				vlan_tbl[vmap->param_id].rdctbl -
-				p_cfgp->start_rdc_grpid,
-				vlan_tbl[vmap->param_id].mpr_npr);
-			((mblk_t *)np)->b_wptr += print_len;
-			buf_len -= print_len;
-		}
-	}
-
-	NXGE_DEBUG_MSG((nxgep, NDD_CTL, "<== nxge_param_get_vlan_rdcgrp"));
-	return (0);
-}
-
-/* ARGSUSED */
-static int
-nxge_param_get_mac_rdcgrp(p_nxge_t nxgep, queue_t *q,
-	mblk_t *mp, caddr_t cp)
-{
-	uint_t			print_len, buf_len;
-	p_mblk_t		np;
-	int			i;
-	p_nxge_class_pt_cfg_t 	p_class_cfgp;
-	p_nxge_dma_pt_cfg_t	p_dma_cfgp;
-	p_nxge_hw_pt_cfg_t	p_cfgp;
-	nxge_mv_cfg_t		*mac_host_info;
-
-	int buff_alloc_size = NXGE_NDD_INFODUMP_BUFF_SIZE * 32;
-	NXGE_DEBUG_MSG((nxgep, NDD_CTL, "==> nxge_param_get_mac_rdcgrp "));
-	(void) mi_mpprintf(mp,
-		"MAC ADDR RDC Mapping Information for Port\t %d\n",
-		nxgep->function_num);
-
-	if ((np = allocb(buff_alloc_size, BPRI_HI)) == NULL) {
-		(void) mi_mpprintf(mp, "%s\n", "out of buffer");
-		return (0);
-	}
-
-	buf_len = buff_alloc_size;
-	mp->b_cont = np;
-	p_class_cfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config;
-	p_dma_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
-	p_cfgp = (p_nxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config;
-	mac_host_info = (nxge_mv_cfg_t	*)&p_class_cfgp->mac_host_info[0];
-	print_len = snprintf((char *)np->b_wptr, buf_len,
-		"MAC ID\t RDC GRP (Actual/Port)\t"
-		" Prefernce\n");
-	((mblk_t *)np)->b_wptr += print_len;
-	buf_len -= print_len;
-	for (i = 0; i < p_cfgp->max_macs; i++) {
-		if (mac_host_info[i].flag) {
-			print_len = snprintf((char *)((mblk_t *)np)->b_wptr,
-				buf_len,
-				"   %d\t  %d/%d\t\t %d\n",
-				i, mac_host_info[i].rdctbl,
-				mac_host_info[i].rdctbl -
-				p_cfgp->start_rdc_grpid,
-				mac_host_info[i].mpr_npr);
-			((mblk_t *)np)->b_wptr += print_len;
-			buf_len -= print_len;
-		}
-	}
-	print_len = snprintf((char *)((mblk_t *)np)->b_wptr, buf_len,
-		"Done Info Dumping \n");
-	((mblk_t *)np)->b_wptr += print_len;
-	buf_len -= print_len;
-	NXGE_DEBUG_MSG((nxgep, NDD_CTL, "<== nxge_param_get_macrdcgrp"));
-	return (0);
-}
-
-/* ARGSUSED */
-static int
-nxge_param_tcam_enable(p_nxge_t nxgep, queue_t *q,
-	mblk_t *mp, char *value, caddr_t cp)
-{
-	uint32_t	status = 0, cfg_value;
-	p_nxge_param_t	pa = (p_nxge_param_t)cp;
-	uint32_t	cfg_it = B_FALSE;
-	char		*end;
-
-	NXGE_DEBUG_MSG((nxgep, NDD_CTL, "==> nxge_param_tcam_enable"));
-
-	cfg_value = (uint32_t)mi_strtol(value, &end, BASE_BINARY);
-	if (pa->value != cfg_value) {
-		pa->old_value = pa->value;
-		pa->value = cfg_value;
-		cfg_it = B_TRUE;
-	}
-
-	if (cfg_it == B_TRUE) {
-		if (pa->value)
-			status = nxge_fflp_config_tcam_enable(nxgep);
-		else
-			status = nxge_fflp_config_tcam_disable(nxgep);
-		if (status != NXGE_OK)
-			return (EINVAL);
-	}
-
-	NXGE_DEBUG_MSG((nxgep, NDD_CTL, " <== nxge_param_tcam_enable"));
-	return (0);
-}
-
-/* ARGSUSED */
-static int
-nxge_param_hash_lookup_enable(p_nxge_t nxgep, queue_t *q,
-	mblk_t *mp, char *value, caddr_t cp)
-{
-	uint32_t	status = 0, cfg_value;
-	p_nxge_param_t	pa = (p_nxge_param_t)cp;
-	uint32_t	cfg_it = B_FALSE;
-	char		*end;
-
-	NXGE_DEBUG_MSG((nxgep, NDD_CTL, "==> nxge_param_hash_lookup_enable"));
-
-	cfg_value = (uint32_t)mi_strtol(value, &end, BASE_BINARY);
-	if (pa->value != cfg_value) {
-		pa->old_value = pa->value;
-		pa->value = cfg_value;
-		cfg_it = B_TRUE;
-	}
-
-	if (cfg_it == B_TRUE) {
-		if (pa->value)
-			status = nxge_fflp_config_hash_lookup_enable(nxgep);
-		else
-			status = nxge_fflp_config_hash_lookup_disable(nxgep);
-		if (status != NXGE_OK)
-			return (EINVAL);
-	}
-
-	NXGE_DEBUG_MSG((nxgep, NDD_CTL, " <== nxge_param_hash_lookup_enable"));
-	return (0);
-}
-
-/* ARGSUSED */
-static int
-nxge_param_llc_snap_enable(p_nxge_t nxgep, queue_t *q,
-	mblk_t *mp, char *value, caddr_t cp)
-{
-	char		*end;
-	uint32_t	status = 0, cfg_value;
-	p_nxge_param_t	pa = (p_nxge_param_t)cp;
-	uint32_t	cfg_it = B_FALSE;
-
-	NXGE_DEBUG_MSG((nxgep, NDD_CTL, "==> nxge_param_llc_snap_enable"));
-
-	cfg_value = (uint32_t)mi_strtol(value, &end, BASE_BINARY);
-	if (pa->value != cfg_value) {
-		pa->old_value = pa->value;
-		pa->value = cfg_value;
-		cfg_it = B_TRUE;
-	}
-
-	if (cfg_it == B_TRUE) {
-		if (pa->value)
-			status = nxge_fflp_config_tcam_enable(nxgep);
-		else
-			status = nxge_fflp_config_tcam_disable(nxgep);
-		if (status != NXGE_OK)
-			return (EINVAL);
-	}
-
-	NXGE_DEBUG_MSG((nxgep, NDD_CTL, " <== nxge_param_llc_snap_enable"));
-	return (0);
-}
-
-/* ARGSUSED */
-static int
-nxge_param_set_ether_usr(p_nxge_t nxgep, queue_t *q,
-	mblk_t	*mp, char *value, caddr_t cp)
-{
-	char		*end;
-	uint8_t		ether_class;
-	uint32_t	status = 0, cfg_value;
-	p_nxge_param_t	pa = (p_nxge_param_t)cp;
-	uint8_t		cfg_it = B_FALSE;
-
-	NXGE_DEBUG_MSG((nxgep, NDD_CTL, "==> nxge_param_set_ether_usr"));
-
-	cfg_value = (uint32_t)mi_strtol(value, &end, BASE_HEX);
-	if (PARAM_OUTOF_RANGE(value, end, cfg_value, pa)) {
-		return (EINVAL);
-	}
-
-	if (pa->value != cfg_value) {
-		pa->old_value = pa->value;
-		pa->value = cfg_value;
-		cfg_it = B_TRUE;
-	}
-
-	/* do the actual hw setup  */
-	if (cfg_it == B_TRUE) {
-		ether_class = mi_strtol(pa->name, &end, 10);
-#ifdef lint
-		ether_class = ether_class;
-#endif
-		NXGE_DEBUG_MSG((nxgep, NDD_CTL, " nxge_param_set_ether_usr"));
-	}
-
-	NXGE_DEBUG_MSG((nxgep, NDD_CTL, "<== nxge_param_set_ether_usr"));
-	return (status);
-}
-
-/* ARGSUSED */
-static int
-nxge_param_set_ip_usr(p_nxge_t nxgep, queue_t *q,
-	mblk_t *mp, char *value, caddr_t cp)
-{
-	char		*end;
-	tcam_class_t	class;
-	uint32_t	status, cfg_value;
-	p_nxge_param_t	pa = (p_nxge_param_t)cp;
-	uint32_t	cfg_it = B_FALSE;
-
-	NXGE_DEBUG_MSG((nxgep, NDD_CTL, "==> nxge_param_set_ip_usr"));
-
-	cfg_value = (uint32_t)mi_strtol(value, &end, BASE_HEX);
-	if (PARAM_OUTOF_RANGE(value, end, cfg_value, pa)) {
-		return (EINVAL);
-	}
-
-	if (pa->value != cfg_value) {
-		pa->old_value = pa->value;
-		pa->value = cfg_value;
-		cfg_it = B_TRUE;
-	}
-
-	/* do the actual hw setup with cfg_value. */
-	if (cfg_it == B_TRUE) {
-		class = mi_strtol(pa->name, &end, 10);
-		status = nxge_fflp_ip_usr_class_config(nxgep, class, pa->value);
-	}
-
-	NXGE_DEBUG_MSG((nxgep, NDD_CTL, "<== nxge_param_set_ip_usr"));
-	return (status);
-}
-
-/* ARGSUSED */
-static int
-nxge_class_name_2value(p_nxge_t nxgep, char *name)
-{
-	int		i;
-	int		class_instance = param_class_opt_ip_usr4;
-	p_nxge_param_t	param_arr;
-
-	param_arr = nxgep->param_arr;
-	for (i = TCAM_CLASS_IP_USER_4; i <= TCAM_CLASS_SCTP_IPV6; i++) {
-		if (strcmp(param_arr[class_instance].name, name) == 0)
-			return (i);
-		class_instance++;
-	}
-	return (-1);
-}
-
-/* ARGSUSED */
-static int
-nxge_param_set_ip_opt(p_nxge_t nxgep, queue_t *q,
-	mblk_t *mp, char *value, caddr_t cp)
-{
-	char		*end;
-	uint32_t	status, cfg_value;
-	p_nxge_param_t	pa = (p_nxge_param_t)cp;
-	tcam_class_t	class;
-	uint32_t	cfg_it = B_FALSE;
-
-	NXGE_DEBUG_MSG((nxgep, NDD_CTL, "==> nxge_param_set_ip_opt"));
-
-	cfg_value = (uint32_t)mi_strtol(value, &end, BASE_HEX);
-	if (PARAM_OUTOF_RANGE(value, end, cfg_value, pa)) {
-		return (EINVAL);
-	}
-
-	if (pa->value != cfg_value) {
-		pa->old_value = pa->value;
-		pa->value = cfg_value;
-		cfg_it = B_TRUE;
-	}
-
-	if (cfg_it == B_TRUE) {
-		/* do the actual hw setup  */
-		class = nxge_class_name_2value(nxgep, pa->name);
-		if (class == -1)
-			return (EINVAL);
-
-		status = nxge_fflp_ip_class_config(nxgep, class, pa->value);
-		if (status != NXGE_OK)
-			return (EINVAL);
-	}
-
-	NXGE_DEBUG_MSG((nxgep, NDD_CTL, "<== nxge_param_set_ip_opt"));
-	return (0);
-}
-
-/* ARGSUSED */
-static int
-nxge_param_get_ip_opt(p_nxge_t nxgep, queue_t *q,
-	mblk_t *mp, caddr_t cp)
-{
-	uint32_t status, cfg_value;
-	p_nxge_param_t pa = (p_nxge_param_t)cp;
-	tcam_class_t class;
-
-	NXGE_DEBUG_MSG((nxgep, NDD_CTL, "==> nxge_param_get_ip_opt"));
-
-	/* do the actual hw setup  */
-	class = nxge_class_name_2value(nxgep, pa->name);
-	if (class == -1)
-		return (EINVAL);
-
-	cfg_value = 0;
-	status = nxge_fflp_ip_class_config_get(nxgep, class, &cfg_value);
-	if (status != NXGE_OK)
-		return (EINVAL);
-
-	NXGE_DEBUG_MSG((nxgep, NDD_CTL,
-		"nxge_param_get_ip_opt_get %x ", cfg_value));
-
-	pa->value = cfg_value;
-	(void) mi_mpprintf(mp, "%x", cfg_value);
-
-	NXGE_DEBUG_MSG((nxgep, NDD_CTL, "<== nxge_param_get_ip_opt status "));
-	return (0);
-}
-
-/* ARGSUSED */
-static int
-nxge_param_fflp_hash_init(p_nxge_t nxgep, queue_t *q,
-	mblk_t *mp, char *value, caddr_t cp)
-{
-	char		*end;
-	uint32_t	status, cfg_value;
-	p_nxge_param_t	pa = (p_nxge_param_t)cp;
-	tcam_class_t	class;
-	uint32_t	cfg_it = B_FALSE;
-
-	NXGE_DEBUG_MSG((nxgep, NDD_CTL, "==> nxge_param_fflp_hash_init"));
-
-	cfg_value = (uint32_t)mi_strtol(value, &end, BASE_HEX);
-	if (PARAM_OUTOF_RANGE(value, end, cfg_value, pa)) {
-		return (EINVAL);
-	}
-
-	NXGE_DEBUG_MSG((nxgep, NDD_CTL,
-		"nxge_param_fflp_hash_init value %x", cfg_value));
-
-	if (pa->value != cfg_value) {
-		pa->old_value = pa->value;
-		pa->value = cfg_value;
-		cfg_it = B_TRUE;
-	}
-
-	if (cfg_it == B_TRUE) {
-		char *h_name;
-
-		/* do the actual hw setup */
-		h_name = pa->name;
-		h_name++;
-		class = mi_strtol(h_name, &end, 10);
-		switch (class) {
-			case 1:
-				status = nxge_fflp_set_hash1(nxgep,
-					(uint32_t)pa->value);
-				break;
-			case 2:
-				status = nxge_fflp_set_hash2(nxgep,
-					(uint16_t)pa->value);
-				break;
-
-			default:
-			NXGE_DEBUG_MSG((nxgep, NDD_CTL,
-				" nxge_param_fflp_hash_init"
-				" %s Wrong hash var %d",
-				pa->name, class));
-			return (EINVAL);
-		}
-		if (status != NXGE_OK)
-			return (EINVAL);
-	}
-
-	NXGE_DEBUG_MSG((nxgep, NDD_CTL, " <== nxge_param_fflp_hash_init"));
-	return (0);
-}
-
-/* ARGSUSED */
-static int
-nxge_param_set_grp_rdc(p_nxge_t nxgep, queue_t *q,
-	mblk_t *mp, char *value, caddr_t cp)
-{
-	char			*end;
-	uint32_t		status = 0, cfg_value;
-	p_nxge_param_t		pa = (p_nxge_param_t)cp;
-	uint32_t		cfg_it = B_FALSE;
-	int			rdc_grp;
-	uint8_t			real_rdc;
-	p_nxge_dma_pt_cfg_t	p_dma_cfgp;
-	p_nxge_hw_pt_cfg_t	p_cfgp;
-	p_nxge_rdc_grp_t	rdc_grp_p;
-
-	p_dma_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
-	p_cfgp = (p_nxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config;
-
-	NXGE_DEBUG_MSG((nxgep, NDD_CTL, "==> nxge_param_set_grp_rdc"));
-
-	cfg_value = (uint32_t)mi_strtol(value, &end, BASE_ANY);
-	if (PARAM_OUTOF_RANGE(value, end, cfg_value, pa)) {
-		return (EINVAL);
-	}
-
-	if (cfg_value >= p_cfgp->max_rdcs) {
-		return (EINVAL);
-	}
-
-	if (pa->value != cfg_value) {
-		pa->old_value = pa->value;
-		pa->value = cfg_value;
-		cfg_it = B_TRUE;
-	}
-
-	if (cfg_it == B_TRUE) {
-		char *grp_name;
-		grp_name = pa->name;
-		grp_name += strlen("default-grp");
-		rdc_grp = mi_strtol(grp_name, &end, 10);
-		rdc_grp_p = &p_dma_cfgp->rdc_grps[rdc_grp];
-		real_rdc = rdc_grp_p->start_rdc + cfg_value;
-		if (nxge_check_rxdma_rdcgrp_member(nxgep, rdc_grp,
-				cfg_value) == B_FALSE) {
-			pa->value = pa->old_value;
-			NXGE_DEBUG_MSG((nxgep, NDD_CTL,
-				" nxge_param_set_grp_rdc"
-				" %d read %d actual %d outof range",
-				rdc_grp, cfg_value, real_rdc));
-			return (EINVAL);
-		}
-		status = nxge_rxdma_cfg_rdcgrp_default_rdc(nxgep, rdc_grp,
-							    real_rdc);
-		if (status != NXGE_OK)
-			return (EINVAL);
-	}
-
-	NXGE_DEBUG_MSG((nxgep, NDD_CTL, "<== nxge_param_set_grp_rdc"));
-	return (0);
-}
-
-/* ARGSUSED */
-static int
-nxge_param_set_port_rdc(p_nxge_t nxgep, queue_t *q,
-	mblk_t *mp, char *value, caddr_t cp)
-{
-	char		*end;
-	uint32_t	status = B_TRUE, cfg_value;
-	p_nxge_param_t	pa = (p_nxge_param_t)cp;
-	uint32_t	cfg_it = B_FALSE;
-
-	p_nxge_dma_pt_cfg_t	p_dma_cfgp;
-	p_nxge_hw_pt_cfg_t	p_cfgp;
-
-	NXGE_DEBUG_MSG((nxgep, NDD_CTL, "==> nxge_param_set_port_rdc"));
-	p_dma_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
-	p_cfgp = (p_nxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config;
-
-	cfg_value = (uint32_t)mi_strtol(value, &end, BASE_ANY);
-	if (PARAM_OUTOF_RANGE(value, end, cfg_value, pa)) {
-		return (EINVAL);
-	}
-
-	if (pa->value != cfg_value) {
-		if (cfg_value >= p_cfgp->max_rdcs)
-			return (EINVAL);
-		pa->old_value = pa->value;
-		pa->value = cfg_value;
-		cfg_it = B_TRUE;
-	}
-
-	if (cfg_it == B_TRUE) {
-		status = nxge_rxdma_cfg_port_default_rdc(nxgep,
-			nxgep->function_num,
-			nxgep->rdc[cfg_value]);
-		if (status != NXGE_OK)
-			return (EINVAL);
-	}
-
-	NXGE_DEBUG_MSG((nxgep, NDD_CTL, "<== nxge_param_set_port_rdc"));
-	return (0);
-}
-
-/* ARGSUSED */
-static int
-nxge_param_set_nxge_debug_flag(p_nxge_t nxgep, queue_t *q,
-	mblk_t *mp, char *value, caddr_t cp)
-{
-	char *end;
-	uint32_t status = 0;
-	uint64_t cfg_value = 0;
-	p_nxge_param_t pa = (p_nxge_param_t)cp;
-	uint32_t cfg_it = B_FALSE;
-
-	NXGE_DEBUG_MSG((nxgep, NDD_CTL, "==> nxge_param_set_nxge_debug_flag"));
-	cfg_value = mi_strtol(value, &end, BASE_HEX);
-
-	if (PARAM_OUTOF_RANGE(value, end, cfg_value, pa)) {
-		NXGE_DEBUG_MSG((nxgep, NDD_CTL,
-			" nxge_param_set_nxge_debug_flag"
-			" outof range %llx", cfg_value));
-		return (EINVAL);
-	}
-	if (pa->value != cfg_value) {
-		pa->old_value = pa->value;
-		pa->value = cfg_value;
-		cfg_it = B_TRUE;
-	}
-
-	if (cfg_it == B_TRUE) {
-		nxgep->nxge_debug_level = pa->value;
-	}
-
-	NXGE_DEBUG_MSG((nxgep, NDD_CTL, "<== nxge_param_set_nxge_debug_flag"));
-	return (status);
-}
-
-/* ARGSUSED */
-static int
-nxge_param_get_debug_flag(p_nxge_t nxgep, queue_t *q, p_mblk_t mp, caddr_t cp)
-{
-	int		status = 0;
-	p_nxge_param_t	pa = (p_nxge_param_t)cp;
-
-	NXGE_DEBUG_MSG((nxgep, NDD_CTL, "==> nxge_param_get_debug_flag"));
-
-	if (pa->value > 0xffffffff)
-		(void) mi_mpprintf(mp, "%x%x",  (int)(pa->value >> 32),
-			(int)(pa->value & 0xffffffff));
-	else
-		(void) mi_mpprintf(mp, "%x", (int)pa->value);
-
-	NXGE_DEBUG_MSG((nxgep, NDD_CTL, "<== nxge_param_get_debug_flag"));
-	return (status);
-}
-
-/* ARGSUSED */
-static int
-nxge_param_set_npi_debug_flag(p_nxge_t nxgep, queue_t *q,
-	mblk_t *mp, char *value, caddr_t cp)
-{
-	char		*end;
-	uint32_t	status = 0;
-	uint64_t	 cfg_value = 0;
-	p_nxge_param_t	pa;
-	uint32_t	cfg_it = B_FALSE;
-
-	NXGE_DEBUG_MSG((nxgep, NDD_CTL, "==> nxge_param_set_npi_debug_flag"));
-	cfg_value = mi_strtol(value, &end, BASE_HEX);
-	pa = (p_nxge_param_t)cp;
-	if (PARAM_OUTOF_RANGE(value, end, cfg_value, pa)) {
-		NXGE_DEBUG_MSG((nxgep, NDD_CTL, " nxge_param_set_npi_debug_flag"
-				    " outof range %llx", cfg_value));
-		return (EINVAL);
-	}
-	if (pa->value != cfg_value) {
-		pa->old_value = pa->value;
-		pa->value = cfg_value;
-		cfg_it = B_TRUE;
-	}
-
-	if (cfg_it == B_TRUE) {
-		npi_debug_level = pa->value;
-	}
-	NXGE_DEBUG_MSG((nxgep, NDD_CTL, "<== nxge_param_set_debug_flag"));
-	return (status);
-}
-
-/* ARGSUSED */
-static int
-nxge_param_dump_rdc(p_nxge_t nxgep, queue_t *q, p_mblk_t mp, caddr_t cp)
-{
-	uint_t rdc;
-
-	NXGE_DEBUG_MSG((nxgep, IOC_CTL, "==> nxge_param_dump_rdc"));
-
-	(void) npi_rxdma_dump_fzc_regs(NXGE_DEV_NPI_HANDLE(nxgep));
-	for (rdc = 0; rdc < nxgep->nrdc; rdc++)
-		(void) nxge_dump_rxdma_channel(nxgep, nxgep->rdc[rdc]);
-
-	NXGE_DEBUG_MSG((nxgep, IOC_CTL, "<== nxge_param_dump_rdc"));
-	return (0);
-}
-
-/* ARGSUSED */
-static int
-nxge_param_dump_tdc(p_nxge_t nxgep, queue_t *q, p_mblk_t mp, caddr_t cp)
-{
-	uint_t	tdc;
-
-	NXGE_DEBUG_MSG((nxgep, IOC_CTL, "==> nxge_param_dump_tdc"));
-
-	for (tdc = 0; tdc < nxgep->ntdc; tdc++)
-		(void) nxge_txdma_regs_dump(nxgep, nxgep->tdc[tdc]);
-
-	NXGE_DEBUG_MSG((nxgep, IOC_CTL, "<== nxge_param_dump_tdc"));
-	return (0);
-}
-
-/* ARGSUSED */
-static int
-nxge_param_dump_fflp_regs(p_nxge_t nxgep, queue_t *q, p_mblk_t mp, caddr_t cp)
-{
-	NXGE_DEBUG_MSG((nxgep, IOC_CTL, "==> nxge_param_dump_fflp_regs"));
-
-	(void) npi_fflp_dump_regs(NXGE_DEV_NPI_HANDLE(nxgep));
-
-	NXGE_DEBUG_MSG((nxgep, IOC_CTL, "<== nxge_param_dump_fflp_regs"));
-	return (0);
-}
-
-/* ARGSUSED */
-static int
-nxge_param_dump_mac_regs(p_nxge_t nxgep, queue_t *q, p_mblk_t mp, caddr_t cp)
-{
-	NXGE_DEBUG_MSG((nxgep, IOC_CTL, "==> nxge_param_dump_mac_regs"));
-
-	(void) npi_mac_dump_regs(NXGE_DEV_NPI_HANDLE(nxgep),
-		nxgep->function_num);
-
-	NXGE_DEBUG_MSG((nxgep, IOC_CTL, "<== nxge_param_dump_mac_regs"));
-	return (0);
-}
-
-/* ARGSUSED */
-static int
-nxge_param_dump_ipp_regs(p_nxge_t nxgep, queue_t *q, p_mblk_t mp, caddr_t cp)
-{
-	NXGE_DEBUG_MSG((nxgep, IOC_CTL, "==> nxge_param_dump_ipp_regs"));
-
-	(void) npi_ipp_dump_regs(NXGE_DEV_NPI_HANDLE(nxgep),
-		nxgep->function_num);
-	NXGE_DEBUG_MSG((nxgep, IOC_CTL, "<== nxge_param_dump_ipp_regs"));
-	return (0);
-}
-
-/* ARGSUSED */
-static int
-nxge_param_dump_vlan_table(p_nxge_t nxgep, queue_t *q, p_mblk_t mp, caddr_t cp)
-{
-	NXGE_DEBUG_MSG((nxgep, NDD_CTL, "==> nxge_param_dump_vlan_table"));
-
-	(void) npi_fflp_vlan_tbl_dump(NXGE_DEV_NPI_HANDLE(nxgep));
-
-	NXGE_DEBUG_MSG((nxgep, NDD_CTL, "<== nxge_param_dump_vlan_table"));
-	return (0);
-}
-
-/* ARGSUSED */
-static int
-nxge_param_dump_rdc_table(p_nxge_t nxgep, queue_t *q, p_mblk_t mp, caddr_t cp)
-{
-	uint8_t	table;
-
-	NXGE_DEBUG_MSG((nxgep, NDD_CTL, "==> nxge_param_dump_rdc_table"));
-	for (table = 0; table < NXGE_MAX_RDC_GROUPS; table++) {
-		(void) npi_rxdma_dump_rdc_table(NXGE_DEV_NPI_HANDLE(nxgep),
-					    table);
-	}
-
-	NXGE_DEBUG_MSG((nxgep, NDD_CTL, "<== nxge_param_dump_rdc_table"));
-	return (0);
-}
-
-typedef struct block_info {
-	char		*name;
-	uint32_t	offset;
-} block_info_t;
-
-block_info_t reg_block[] = {
-	{"PIO",		PIO},
-	{"FZC_PIO",	FZC_PIO},
-	{"FZC_XMAC",	FZC_MAC},
-	{"FZC_IPP",	FZC_IPP},
-	{"FFLP",	FFLP},
-	{"FZC_FFLP",	FZC_FFLP},
-	{"PIO_VADDR",	PIO_VADDR},
-	{"ZCP",	ZCP},
-	{"FZC_ZCP",	FZC_ZCP},
-	{"DMC",	DMC},
-	{"FZC_DMC",	FZC_DMC},
-	{"TXC",	TXC},
-	{"FZC_TXC",	FZC_TXC},
-	{"PIO_LDSV",	PIO_LDSV},
-	{"PIO_LDGIM",	PIO_LDGIM},
-	{"PIO_IMASK0",	PIO_IMASK0},
-	{"PIO_IMASK1",	PIO_IMASK1},
-	{"FZC_PROM",	FZC_PROM},
-	{"END",	ALL_FF_32},
-};
-
-/* ARGSUSED */
-static int
-nxge_param_dump_ptrs(p_nxge_t nxgep, queue_t *q, p_mblk_t mp, caddr_t cp)
-{
-	uint_t			print_len, buf_len;
-	p_mblk_t		np;
-	int			rdc, tdc, block;
-	uint64_t		base;
-	p_nxge_dma_pt_cfg_t	p_dma_cfgp;
-	p_nxge_hw_pt_cfg_t	p_cfgp;
-	int			buff_alloc_size = NXGE_NDD_INFODUMP_BUFF_8K;
-	p_tx_ring_t 		*tx_rings;
-	p_rx_rcr_rings_t 	rx_rcr_rings;
-	p_rx_rcr_ring_t		*rcr_rings;
-	p_rx_rbr_rings_t 	rx_rbr_rings;
-	p_rx_rbr_ring_t		*rbr_rings;
-
-	NXGE_DEBUG_MSG((nxgep, IOC_CTL,
-		"==> nxge_param_dump_ptrs"));
-
-	(void) mi_mpprintf(mp, "ptr information for Port\t %d \n",
-		nxgep->function_num);
-
-	if ((np = allocb(buff_alloc_size, BPRI_HI)) == NULL) {
-		/* The following may work even if we cannot get a large buf. */
-		(void) mi_mpprintf(mp, "%s\n", "out of buffer");
-		return (0);
-	}
-
-	buf_len = buff_alloc_size;
-	mp->b_cont = np;
-	p_dma_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
-	p_cfgp = (p_nxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config;
-
-	rx_rcr_rings = nxgep->rx_rcr_rings;
-	rcr_rings = rx_rcr_rings->rcr_rings;
-	rx_rbr_rings = nxgep->rx_rbr_rings;
-	rbr_rings = rx_rbr_rings->rbr_rings;
-	print_len = snprintf((char *)((mblk_t *)np)->b_wptr, buf_len,
-		"nxgep (nxge_t) $%p\n"
-		"dev_regs (dev_regs_t) $%p\n",
-		nxgep, nxgep->dev_regs);
-
-	ADVANCE_PRINT_BUFFER(np, print_len, buf_len);
-
-	/* do register pointers */
-	print_len = snprintf((char *)((mblk_t *)np)->b_wptr, buf_len,
-		"reg base (npi_reg_ptr_t) $%p\t "
-		"pci reg (npi_reg_ptr_t) $%p\n",
-		nxgep->dev_regs->nxge_regp,
-		nxgep->dev_regs->nxge_pciregp);
-
-	ADVANCE_PRINT_BUFFER(np, print_len, buf_len);
-
-	print_len = snprintf((char *)((mblk_t *)np)->b_wptr, buf_len,
-		"\nBlock \t Offset \n");
-
-	ADVANCE_PRINT_BUFFER(np, print_len, buf_len);
-	block = 0;
-	base = (uint64_t)nxgep->dev_regs->nxge_regp;
-	while (reg_block[block].offset != ALL_FF_32) {
-		print_len = snprintf((char *)((mblk_t *)np)->b_wptr, buf_len,
-			"%9s\t 0x%llx\n",
-			reg_block[block].name,
-			(unsigned long long)(reg_block[block].offset + base));
-		ADVANCE_PRINT_BUFFER(np, print_len, buf_len);
-		block++;
-	}
-
-	print_len = snprintf((char *)((mblk_t *)np)->b_wptr, buf_len,
-		"\nRDC\t rcrp (rx_rcr_ring_t)\t "
-		"rbrp (rx_rbr_ring_t)\n");
-
-	ADVANCE_PRINT_BUFFER(np, print_len, buf_len);
-
-	for (rdc = 0; rdc < p_cfgp->max_rdcs; rdc++) {
-		print_len = snprintf((char *)((mblk_t *)np)->b_wptr, buf_len,
-			" %d\t  $%p\t\t   $%p\n",
-			rdc, rcr_rings[rdc],
-			rbr_rings[rdc]);
-		ADVANCE_PRINT_BUFFER(np, print_len, buf_len);
-	}
-
-	print_len = snprintf((char *)((mblk_t *)np)->b_wptr, buf_len,
-			    "\nTDC\t tdcp (tx_ring_t)\n");
-
-	ADVANCE_PRINT_BUFFER(np, print_len, buf_len);
-	tx_rings = nxgep->tx_rings->rings;
-	for (tdc = 0; tdc < p_cfgp->max_tdcs; tdc++) {
-		print_len = snprintf((char *)((mblk_t *)np)->b_wptr, buf_len,
-			" %d\t  $%p\n", tdc, tx_rings[tdc]);
-		ADVANCE_PRINT_BUFFER(np, print_len, buf_len);
-	}
-
-	print_len = snprintf((char *)((mblk_t *)np)->b_wptr, buf_len, "\n\n");
-
-	ADVANCE_PRINT_BUFFER(np, print_len, buf_len);
-	NXGE_DEBUG_MSG((nxgep, IOC_CTL, "<== nxge_param_dump_ptrs"));
-	return (0);
-}
-
-/*
- * Load 'name' into the named dispatch table pointed to by 'ndp'.
- * 'ndp' should be the address of a char pointer cell.  If the table
- * does not exist (*ndp == 0), a new table is allocated and 'ndp'
- * is stuffed.  If there is not enough space in the table for a new
- * entry, more space is allocated.
- */
-/* ARGSUSED */
-boolean_t
-nxge_nd_load(caddr_t *pparam, char *name,
-	pfi_t get_pfi, pfi_t set_pfi, caddr_t data)
-{
-	ND	*nd;
-	NDE	*nde;
-
-	NXGE_DEBUG_MSG((NULL, NDD2_CTL, " ==> nxge_nd_load"));
-	if (!pparam)
-		return (B_FALSE);
-
-	if ((nd = (ND *)*pparam) == NULL) {
-		if ((nd = (ND *)KMEM_ZALLOC(sizeof (ND), KM_NOSLEEP)) == NULL)
-			return (B_FALSE);
-		*pparam = (caddr_t)nd;
-	}
-
-	if (nd->nd_tbl) {
-		for (nde = nd->nd_tbl; nde->nde_name; nde++) {
-			if (strcmp(name, nde->nde_name) == 0)
-				goto fill_it;
-		}
-	}
-
-	if (nd->nd_free_count <= 1) {
-		if ((nde = (NDE *)KMEM_ZALLOC(nd->nd_size +
-					NDE_ALLOC_SIZE, KM_NOSLEEP)) == NULL)
-			return (B_FALSE);
-		nd->nd_free_count += NDE_ALLOC_COUNT;
-		if (nd->nd_tbl) {
-			bcopy((char *)nd->nd_tbl, (char *)nde, nd->nd_size);
-			KMEM_FREE((char *)nd->nd_tbl, nd->nd_size);
-		} else {
-			nd->nd_free_count--;
-			nde->nde_name = "?";
-			nde->nde_get_pfi = nxge_nd_get_names;
-			nde->nde_set_pfi = nxge_set_default;
-		}
-		nde->nde_data = (caddr_t)nd;
-		nd->nd_tbl = nde;
-		nd->nd_size += NDE_ALLOC_SIZE;
-	}
-	for (nde = nd->nd_tbl; nde->nde_name; nde++)
-		noop;
-	nd->nd_free_count--;
-fill_it:
-	nde->nde_name = name;
-	nde->nde_get_pfi = get_pfi;
-	nde->nde_set_pfi = set_pfi;
-	nde->nde_data = data;
-	NXGE_DEBUG_MSG((NULL, NDD2_CTL, " <== nxge_nd_load"));
-
-	return (B_TRUE);
-}
-
-/*
- * Free the table pointed to by 'pparam'
- */
-void
-nxge_nd_free(caddr_t *pparam)
-{
-	ND *nd;
-
-	if ((nd = (ND *)*pparam) != NULL) {
-		if (nd->nd_tbl)
-			KMEM_FREE((char *)nd->nd_tbl, nd->nd_size);
-		KMEM_FREE((char *)nd, sizeof (ND));
-		*pparam = nil(caddr_t);
-	}
-}
-
-int
-nxge_nd_getset(p_nxge_t nxgep, queue_t *q, caddr_t param, p_mblk_t mp)
-{
-	int		err;
-	IOCP		iocp;
-	p_mblk_t	mp1, mp2;
-	ND		*nd;
-	NDE		*nde;
-	char		*valp;
-	size_t		avail;
-
-	if (!param) {
-		return (B_FALSE);
-	}
-
-	nd = (ND *)param;
-	iocp = (IOCP)mp->b_rptr;
-	if ((iocp->ioc_count == 0) || !(mp1 = mp->b_cont)) {
-		mp->b_datap->db_type = M_IOCACK;
-		iocp->ioc_count = 0;
-		iocp->ioc_error = EINVAL;
-		return (B_FALSE);
-	}
-
-	/*
-	 * NOTE - logic throughout nd_xxx assumes single data block for ioctl.
-	 *	However, existing code sends in some big buffers.
-	 */
-	avail = iocp->ioc_count;
-	if (mp1->b_cont) {
-		freemsg(mp1->b_cont);
-		mp1->b_cont = NULL;
-	}
-
-	mp1->b_datap->db_lim[-1] = '\0';	/* Force null termination */
-	for (valp = (char *)mp1->b_rptr; *valp != '\0'; valp++) {
-		if (*valp == '-')
-			*valp = '_';
-	}
-
-	valp = (char *)mp1->b_rptr;
-
-	for (nde = nd->nd_tbl; /* */; nde++) {
-		if (!nde->nde_name)
-			return (B_FALSE);
-		if (strcmp(nde->nde_name, valp) == 0)
-			break;
-	}
-	err = EINVAL;
-	while (*valp++)
-		noop;
-	if (!*valp || valp >= (char *)mp1->b_wptr)
-		valp = nilp(char);
-	switch (iocp->ioc_cmd) {
-	case ND_GET:
-		/*
-		 * (temporary) hack: "*valp" is size of user buffer for
-		 * copyout. If result of action routine is too big, free
-		 * excess and return ioc_rval as buffer size needed.
-		 * Return as many mblocks as will fit, free the rest.  For
-		 * backward compatibility, assume size of original ioctl
-		 * buffer if "*valp" bad or not given.
-		 */
-		if (valp)
-			avail = mi_strtol(valp, (char **)0, 10);
-		/*
-		 * We overwrite the name/value with the reply data
-		 */
-		mp2 = mp1;
-		while (mp2) {
-			mp2->b_wptr = mp2->b_rptr;
-			mp2 = mp2->b_cont;
-		}
-
-		err = (*nde->nde_get_pfi)(nxgep, q, mp1, nde->nde_data);
-
-		if (!err) {
-			size_t	size_out = 0;
-			size_t	excess;
-
-			iocp->ioc_rval = 0;
-
-			/* Tack on the null */
-			err = nxge_mk_mblk_tail_space(mp1, &mp2, 1);
-			if (!err) {
-				*mp2->b_wptr++ = '\0';
-				size_out = msgdsize(mp1);
-				excess = size_out - avail;
-				if (excess > 0) {
-					iocp->ioc_rval = (int)size_out;
-					size_out -= excess;
-					(void) adjmsg(mp1, -(excess + 1));
-					err = nxge_mk_mblk_tail_space(
-							mp1, &mp2, 1);
-					if (!err)
-						*mp2->b_wptr++ = '\0';
-					else
-						size_out = 0;
-				}
-			} else
-				size_out = 0;
-			iocp->ioc_count = size_out;
-		}
-		break;
-
-	case ND_SET:
-		if (valp) {
-			if (nde->nde_set_pfi) {
-				err = (*nde->nde_set_pfi)(nxgep, q, mp1, valp,
-							    nde->nde_data);
-				iocp->ioc_count = 0;
-				freemsg(mp1);
-				mp->b_cont = NULL;
-			}
-		}
-		break;
-
-	default:
-		break;
-	}
-	iocp->ioc_error = err;
-	mp->b_datap->db_type = M_IOCACK;
-	return (B_TRUE);
-}
-
-/* ARGSUSED */
-int
-nxge_nd_get_names(p_nxge_t nxgep, queue_t *q, p_mblk_t mp, caddr_t param)
-{
-	ND		*nd;
-	NDE		*nde;
-	char		*rwtag;
-	boolean_t	get_ok, set_ok;
-	size_t		param_len;
-	int		status = 0;
-
-	nd = (ND *)param;
-	if (!nd)
-		return (ENOENT);
-
-	for (nde = nd->nd_tbl; nde->nde_name; nde++) {
-		get_ok = (nde->nde_get_pfi != nxge_get_default) &&
-				(nde->nde_get_pfi != NULL);
-		set_ok = (nde->nde_set_pfi != nxge_set_default) &&
-				(nde->nde_set_pfi != NULL);
-		if (get_ok) {
-			if (set_ok)
-				rwtag = "read and write";
-			else
-				rwtag = "read only";
-		} else if (set_ok)
-			rwtag = "write only";
-		else {
-			continue;
-		}
-		param_len = strlen(rwtag);
-		param_len += strlen(nde->nde_name);
-		param_len += 4;
-
-		(void) mi_mpprintf(mp, "%s (%s)", nde->nde_name, rwtag);
-	}
-	return (status);
-}
-
-/* ARGSUSED */
-int
-nxge_get_default(p_nxge_t nxgep, queue_t *q, p_mblk_t mp, caddr_t data)
-{
-	return (EACCES);
-}
-
-/* ARGSUSED */
-int
-nxge_set_default(p_nxge_t nxgep, queue_t *q, p_mblk_t mp, char *value,
-	caddr_t data)
-{
-	return (EACCES);
-}
-
-void
-nxge_param_ioctl(p_nxge_t nxgep, queue_t *wq, mblk_t *mp, struct iocblk *iocp)
-{
-	int		cmd;
-	int		status = B_FALSE;
-
-	NXGE_DEBUG_MSG((nxgep, IOC_CTL, "==> nxge_param_ioctl"));
-	cmd = iocp->ioc_cmd;
-
-	switch (cmd) {
-	default:
-		NXGE_DEBUG_MSG((nxgep, IOC_CTL,
-			"nxge_param_ioctl: bad cmd 0x%0x", cmd));
-		break;
-
-	case ND_GET:
-	case ND_SET:
-		NXGE_DEBUG_MSG((nxgep, IOC_CTL,
-			"nxge_param_ioctl: cmd 0x%0x", cmd));
-		if (!nxge_nd_getset(nxgep, wq, nxgep->param_list, mp)) {
-			NXGE_DEBUG_MSG((nxgep, IOC_CTL,
-				"false ret from nxge_nd_getset"));
-			break;
-		}
-		status = B_TRUE;
-		break;
-	}
-
-	if (status) {
-		qreply(wq, mp);
-	} else {
-		miocnak(wq, mp, 0, EINVAL);
-	}
-	NXGE_DEBUG_MSG((nxgep, IOC_CTL, "<== nxge_param_ioctl"));
-}
-
-/* ARGSUSED */
-static boolean_t
-nxge_param_link_update(p_nxge_t nxgep)
-{
-	p_nxge_param_t 		param_arr;
-	nxge_param_index_t 	i;
-	boolean_t 		update_xcvr;
-	boolean_t 		update_dev;
-	int 			instance;
-	boolean_t 		status = B_TRUE;
-
-	NXGE_DEBUG_MSG((nxgep, IOC_CTL, "==> nxge_param_link_update"));
-
-	param_arr = nxgep->param_arr;
-	instance = nxgep->instance;
-	update_xcvr = B_FALSE;
-	for (i = param_anar_1000fdx; i < param_anar_asmpause; i++) {
-		update_xcvr |= param_arr[i].value;
-	}
-
-	if (update_xcvr) {
-		update_xcvr = B_FALSE;
-		for (i = param_autoneg; i < param_enable_ipg0; i++) {
-			update_xcvr |=
-				(param_arr[i].value != param_arr[i].old_value);
-			param_arr[i].old_value = param_arr[i].value;
-		}
-		if (update_xcvr) {
-			RW_ENTER_WRITER(&nxgep->filter_lock);
-			(void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP);
-			(void) nxge_link_init(nxgep);
-			(void) nxge_mac_init(nxgep);
-			(void) nxge_link_monitor(nxgep, LINK_MONITOR_START);
-			RW_EXIT(&nxgep->filter_lock);
-		}
-	} else {
-		cmn_err(CE_WARN, " Last setting will leave nxge%d with "
-				" no link capabilities.", instance);
-		cmn_err(CE_WARN, " Restoring previous setting.");
-		for (i = param_anar_1000fdx; i < param_anar_asmpause; i++)
-			param_arr[i].value = param_arr[i].old_value;
-	}
-
-	update_dev = B_FALSE;
-
-	if (update_dev) {
-		RW_ENTER_WRITER(&nxgep->filter_lock);
-		(void) nxge_rx_mac_disable(nxgep);
-		(void) nxge_tx_mac_disable(nxgep);
-		(void) nxge_tx_mac_enable(nxgep);
-		(void) nxge_rx_mac_enable(nxgep);
-		RW_EXIT(&nxgep->filter_lock);
-	}
-
-nxge_param_hw_update_exit:
-	NXGE_DEBUG_MSG((nxgep, DDI_CTL,
-			"<== nxge_param_link_update status = 0x%08x", status));
-	return (status);
-}
--- a/usr/src/uts/sun4v/io/nxge/nxge_rxdma.c	Mon Mar 19 18:02:35 2007 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,4538 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License (the "License").
- * You may not use this file except in compliance with the License.
- *
- * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
- * or http://www.opensolaris.org/os/licensing.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information: Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- */
-/*
- * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
- * Use is subject to license terms.
- */
-
-#pragma ident	"%Z%%M%	%I%	%E% SMI"
-
-#include <sys/nxge/nxge_impl.h>
-#include <sys/nxge/nxge_rxdma.h>
-
-#define	NXGE_ACTUAL_RDCGRP(nxgep, rdcgrp)	\
-	(rdcgrp + nxgep->pt_config.hw_config.start_rdc_grpid)
-#define	NXGE_ACTUAL_RDC(nxgep, rdc)	\
-	(rdc + nxgep->pt_config.hw_config.start_rdc)
-
-/*
- * Globals: tunable parameters (/etc/system or adb)
- *
- */
-extern uint32_t nxge_rbr_size;
-extern uint32_t nxge_rcr_size;
-extern uint32_t	nxge_rbr_spare_size;
-
-extern uint32_t nxge_mblks_pending;
-
-/*
- * Tunable to reduce the amount of time spent in the
- * ISR doing Rx Processing.
- */
-extern uint32_t nxge_max_rx_pkts;
-boolean_t nxge_jumbo_enable;
-
-/*
- * Tunables to manage the receive buffer blocks.
- *
- * nxge_rx_threshold_hi: copy all buffers.
- * nxge_rx_bcopy_size_type: receive buffer block size type.
- * nxge_rx_threshold_lo: copy only up to tunable block size type.
- */
-extern nxge_rxbuf_threshold_t nxge_rx_threshold_hi;
-extern nxge_rxbuf_type_t nxge_rx_buf_size_type;
-extern nxge_rxbuf_threshold_t nxge_rx_threshold_lo;
-
-static nxge_status_t nxge_map_rxdma(p_nxge_t);
-static void nxge_unmap_rxdma(p_nxge_t);
-
-static nxge_status_t nxge_rxdma_hw_start_common(p_nxge_t);
-static void nxge_rxdma_hw_stop_common(p_nxge_t);
-
-static nxge_status_t nxge_rxdma_hw_start(p_nxge_t);
-static void nxge_rxdma_hw_stop(p_nxge_t);
-
-static nxge_status_t nxge_map_rxdma_channel(p_nxge_t, uint16_t,
-    p_nxge_dma_common_t *,  p_rx_rbr_ring_t *,
-    uint32_t,
-    p_nxge_dma_common_t *, p_rx_rcr_ring_t *,
-    p_rx_mbox_t *);
-static void nxge_unmap_rxdma_channel(p_nxge_t, uint16_t,
-    p_rx_rbr_ring_t, p_rx_rcr_ring_t, p_rx_mbox_t);
-
-static nxge_status_t nxge_map_rxdma_channel_cfg_ring(p_nxge_t,
-    uint16_t,
-    p_nxge_dma_common_t *, p_rx_rbr_ring_t *,
-    p_rx_rcr_ring_t *, p_rx_mbox_t *);
-static void nxge_unmap_rxdma_channel_cfg_ring(p_nxge_t,
-    p_rx_rcr_ring_t, p_rx_mbox_t);
-
-static nxge_status_t nxge_map_rxdma_channel_buf_ring(p_nxge_t,
-    uint16_t,
-    p_nxge_dma_common_t *,
-    p_rx_rbr_ring_t *, uint32_t);
-static void nxge_unmap_rxdma_channel_buf_ring(p_nxge_t,
-    p_rx_rbr_ring_t);
-
-static nxge_status_t nxge_rxdma_start_channel(p_nxge_t, uint16_t,
-    p_rx_rbr_ring_t, p_rx_rcr_ring_t, p_rx_mbox_t);
-static nxge_status_t nxge_rxdma_stop_channel(p_nxge_t, uint16_t);
-
-mblk_t *
-nxge_rx_pkts(p_nxge_t, uint_t, p_nxge_ldv_t,
-    p_rx_rcr_ring_t *, rx_dma_ctl_stat_t);
-
-static void nxge_receive_packet(p_nxge_t,
-	p_rx_rcr_ring_t,
-	p_rcr_entry_t,
-	boolean_t *,
-	mblk_t **, mblk_t **);
-
-nxge_status_t nxge_disable_rxdma_channel(p_nxge_t, uint16_t);
-
-static p_rx_msg_t nxge_allocb(size_t, uint32_t, p_nxge_dma_common_t);
-static void nxge_freeb(p_rx_msg_t);
-static void nxge_rx_pkts_vring(p_nxge_t, uint_t,
-    p_nxge_ldv_t, rx_dma_ctl_stat_t);
-static nxge_status_t nxge_rx_err_evnts(p_nxge_t, uint_t,
-				p_nxge_ldv_t, rx_dma_ctl_stat_t);
-
-static nxge_status_t nxge_rxdma_handle_port_errors(p_nxge_t,
-				uint32_t, uint32_t);
-
-static nxge_status_t nxge_rxbuf_index_info_init(p_nxge_t,
-    p_rx_rbr_ring_t);
-
-
-static nxge_status_t
-nxge_rxdma_fatal_err_recover(p_nxge_t, uint16_t);
-
-nxge_status_t
-nxge_rx_port_fatal_err_recover(p_nxge_t);
-
-static uint16_t
-nxge_get_pktbuf_size(p_nxge_t nxgep, int bufsz_type, rbr_cfig_b_t rbr_cfgb);
-
-nxge_status_t
-nxge_init_rxdma_channels(p_nxge_t nxgep)
-{
-	nxge_status_t	status = NXGE_OK;
-
-	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_init_rxdma_channels"));
-
-	status = nxge_map_rxdma(nxgep);
-	if (status != NXGE_OK) {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"<== nxge_init_rxdma: status 0x%x", status));
-		return (status);
-	}
-
-	status = nxge_rxdma_hw_start_common(nxgep);
-	if (status != NXGE_OK) {
-		nxge_unmap_rxdma(nxgep);
-	}
-
-	status = nxge_rxdma_hw_start(nxgep);
-	if (status != NXGE_OK) {
-		nxge_unmap_rxdma(nxgep);
-	}
-
-	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
-		"<== nxge_init_rxdma_channels: status 0x%x", status));
-
-	return (status);
-}
-
-void
-nxge_uninit_rxdma_channels(p_nxge_t nxgep)
-{
-	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_uninit_rxdma_channels"));
-
-	nxge_rxdma_hw_stop(nxgep);
-	nxge_rxdma_hw_stop_common(nxgep);
-	nxge_unmap_rxdma(nxgep);
-
-	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
-		"<== nxge_uinit_rxdma_channels"));
-}
-
-nxge_status_t
-nxge_reset_rxdma_channel(p_nxge_t nxgep, uint16_t channel)
-{
-	npi_handle_t		handle;
-	npi_status_t		rs = NPI_SUCCESS;
-	nxge_status_t		status = NXGE_OK;
-
-	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_reset_rxdma_channel"));
-
-	handle = NXGE_DEV_NPI_HANDLE(nxgep);
-	rs = npi_rxdma_cfg_rdc_reset(handle, channel);
-
-	if (rs != NPI_SUCCESS) {
-		status = NXGE_ERROR | rs;
-	}
-
-	return (status);
-}
-
-void
-nxge_rxdma_regs_dump_channels(p_nxge_t nxgep)
-{
-	int			i, ndmas;
-	uint16_t		channel;
-	p_rx_rbr_rings_t 	rx_rbr_rings;
-	p_rx_rbr_ring_t		*rbr_rings;
-	npi_handle_t		handle;
-
-	NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_regs_dump_channels"));
-
-	handle = NXGE_DEV_NPI_HANDLE(nxgep);
-	(void) npi_rxdma_dump_fzc_regs(handle);
-
-	rx_rbr_rings = nxgep->rx_rbr_rings;
-	if (rx_rbr_rings == NULL) {
-		NXGE_DEBUG_MSG((nxgep, RX_CTL,
-			"<== nxge_rxdma_regs_dump_channels: "
-			"NULL ring pointer"));
-		return;
-	}
-	if (rx_rbr_rings->rbr_rings == NULL) {
-		NXGE_DEBUG_MSG((nxgep, RX_CTL,
-			"<== nxge_rxdma_regs_dump_channels: "
-			" NULL rbr rings pointer"));
-		return;
-	}
-
-	ndmas = rx_rbr_rings->ndmas;
-	if (!ndmas) {
-		NXGE_DEBUG_MSG((nxgep, RX_CTL,
-			"<== nxge_rxdma_regs_dump_channels: no channel"));
-		return;
-	}
-
-	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
-		"==> nxge_rxdma_regs_dump_channels (ndmas %d)", ndmas));
-
-	rbr_rings = rx_rbr_rings->rbr_rings;
-	for (i = 0; i < ndmas; i++) {
-		if (rbr_rings == NULL || rbr_rings[i] == NULL) {
-			continue;
-		}
-		channel = rbr_rings[i]->rdc;
-		(void) nxge_dump_rxdma_channel(nxgep, channel);
-	}
-
-	NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_regs_dump"));
-
-}
-
-nxge_status_t
-nxge_dump_rxdma_channel(p_nxge_t nxgep, uint8_t channel)
-{
-	npi_handle_t		handle;
-	npi_status_t		rs = NPI_SUCCESS;
-	nxge_status_t		status = NXGE_OK;
-
-	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_dump_rxdma_channel"));
-
-	handle = NXGE_DEV_NPI_HANDLE(nxgep);
-	rs = npi_rxdma_dump_rdc_regs(handle, channel);
-
-	if (rs != NPI_SUCCESS) {
-		status = NXGE_ERROR | rs;
-	}
-	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_dump_rxdma_channel"));
-	return (status);
-}
-
-nxge_status_t
-nxge_init_rxdma_channel_event_mask(p_nxge_t nxgep, uint16_t channel,
-    p_rx_dma_ent_msk_t mask_p)
-{
-	npi_handle_t		handle;
-	npi_status_t		rs = NPI_SUCCESS;
-	nxge_status_t		status = NXGE_OK;
-
-	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
-		"<== nxge_init_rxdma_channel_event_mask"));
-
-	handle = NXGE_DEV_NPI_HANDLE(nxgep);
-	rs = npi_rxdma_event_mask(handle, OP_SET, channel, mask_p);
-	if (rs != NPI_SUCCESS) {
-		status = NXGE_ERROR | rs;
-	}
-
-	return (status);
-}
-
-nxge_status_t
-nxge_init_rxdma_channel_cntl_stat(p_nxge_t nxgep, uint16_t channel,
-    p_rx_dma_ctl_stat_t cs_p)
-{
-	npi_handle_t		handle;
-	npi_status_t		rs = NPI_SUCCESS;
-	nxge_status_t		status = NXGE_OK;
-
-	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
-		"<== nxge_init_rxdma_channel_cntl_stat"));
-
-	handle = NXGE_DEV_NPI_HANDLE(nxgep);
-	rs = npi_rxdma_control_status(handle, OP_SET, channel, cs_p);
-
-	if (rs != NPI_SUCCESS) {
-		status = NXGE_ERROR | rs;
-	}
-
-	return (status);
-}
-
-nxge_status_t
-nxge_rxdma_cfg_rdcgrp_default_rdc(p_nxge_t nxgep, uint8_t rdcgrp,
-				    uint8_t rdc)
-{
-	npi_handle_t		handle;
-	npi_status_t		rs = NPI_SUCCESS;
-	p_nxge_dma_pt_cfg_t	p_dma_cfgp;
-	p_nxge_rdc_grp_t	rdc_grp_p;
-	uint8_t actual_rdcgrp, actual_rdc;
-
-	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
-			    " ==> nxge_rxdma_cfg_rdcgrp_default_rdc"));
-	p_dma_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
-
-	handle = NXGE_DEV_NPI_HANDLE(nxgep);
-
-	rdc_grp_p = &p_dma_cfgp->rdc_grps[rdcgrp];
-	rdc_grp_p->rdc[0] = rdc;
-
-	actual_rdcgrp = NXGE_ACTUAL_RDCGRP(nxgep, rdcgrp);
-	actual_rdc = NXGE_ACTUAL_RDC(nxgep, rdc);
-
-	rs = npi_rxdma_cfg_rdc_table_default_rdc(handle, actual_rdcgrp,
-							    actual_rdc);
-
-	if (rs != NPI_SUCCESS) {
-		return (NXGE_ERROR | rs);
-	}
-	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
-			    " <== nxge_rxdma_cfg_rdcgrp_default_rdc"));
-	return (NXGE_OK);
-}
-
-nxge_status_t
-nxge_rxdma_cfg_port_default_rdc(p_nxge_t nxgep, uint8_t port, uint8_t rdc)
-{
-	npi_handle_t		handle;
-
-	uint8_t actual_rdc;
-	npi_status_t		rs = NPI_SUCCESS;
-
-	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
-			    " ==> nxge_rxdma_cfg_port_default_rdc"));
-
-	handle = NXGE_DEV_NPI_HANDLE(nxgep);
-	actual_rdc = NXGE_ACTUAL_RDC(nxgep, rdc);
-	rs = npi_rxdma_cfg_default_port_rdc(handle, port, actual_rdc);
-
-
-	if (rs != NPI_SUCCESS) {
-		return (NXGE_ERROR | rs);
-	}
-	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
-			    " <== nxge_rxdma_cfg_port_default_rdc"));
-
-	return (NXGE_OK);
-}
-
-nxge_status_t
-nxge_rxdma_cfg_rcr_threshold(p_nxge_t nxgep, uint8_t channel,
-				    uint16_t pkts)
-{
-	npi_status_t	rs = NPI_SUCCESS;
-	npi_handle_t	handle;
-	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
-			    " ==> nxge_rxdma_cfg_rcr_threshold"));
-	handle = NXGE_DEV_NPI_HANDLE(nxgep);
-
-	rs = npi_rxdma_cfg_rdc_rcr_threshold(handle, channel, pkts);
-
-	if (rs != NPI_SUCCESS) {
-		return (NXGE_ERROR | rs);
-	}
-	NXGE_DEBUG_MSG((nxgep, RX2_CTL, " <== nxge_rxdma_cfg_rcr_threshold"));
-	return (NXGE_OK);
-}
-
-nxge_status_t
-nxge_rxdma_cfg_rcr_timeout(p_nxge_t nxgep, uint8_t channel,
-			    uint16_t tout, uint8_t enable)
-{
-	npi_status_t	rs = NPI_SUCCESS;
-	npi_handle_t	handle;
-	NXGE_DEBUG_MSG((nxgep, RX2_CTL, " ==> nxge_rxdma_cfg_rcr_timeout"));
-	handle = NXGE_DEV_NPI_HANDLE(nxgep);
-	if (enable == 0) {
-		rs = npi_rxdma_cfg_rdc_rcr_timeout_disable(handle, channel);
-	} else {
-		rs = npi_rxdma_cfg_rdc_rcr_timeout(handle, channel,
-							    tout);
-	}
-
-	if (rs != NPI_SUCCESS) {
-		return (NXGE_ERROR | rs);
-	}
-	NXGE_DEBUG_MSG((nxgep, RX2_CTL, " <== nxge_rxdma_cfg_rcr_timeout"));
-	return (NXGE_OK);
-}
-
-nxge_status_t
-nxge_enable_rxdma_channel(p_nxge_t nxgep, uint16_t channel,
-    p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t mbox_p)
-{
-	npi_handle_t		handle;
-	rdc_desc_cfg_t 		rdc_desc;
-	p_rcrcfig_b_t		cfgb_p;
-	npi_status_t		rs = NPI_SUCCESS;
-
-	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_enable_rxdma_channel"));
-	handle = NXGE_DEV_NPI_HANDLE(nxgep);
-	/*
-	 * Use configuration data composed at init time.
-	 * Write to hardware the receive ring configurations.
-	 */
-	rdc_desc.mbox_enable = 1;
-	rdc_desc.mbox_addr = mbox_p->mbox_addr;
-	NXGE_DEBUG_MSG((nxgep, RX_CTL,
-		"==> nxge_enable_rxdma_channel: mboxp $%p($%p)",
-		mbox_p->mbox_addr, rdc_desc.mbox_addr));
-
-	rdc_desc.rbr_len = rbr_p->rbb_max;
-	rdc_desc.rbr_addr = rbr_p->rbr_addr;
-
-	switch (nxgep->rx_bksize_code) {
-	case RBR_BKSIZE_4K:
-		rdc_desc.page_size = SIZE_4KB;
-		break;
-	case RBR_BKSIZE_8K:
-		rdc_desc.page_size = SIZE_8KB;
-		break;
-	case RBR_BKSIZE_16K:
-		rdc_desc.page_size = SIZE_16KB;
-		break;
-	case RBR_BKSIZE_32K:
-		rdc_desc.page_size = SIZE_32KB;
-		break;
-	}
-
-	rdc_desc.size0 = rbr_p->npi_pkt_buf_size0;
-	rdc_desc.valid0 = 1;
-
-	rdc_desc.size1 = rbr_p->npi_pkt_buf_size1;
-	rdc_desc.valid1 = 1;
-
-	rdc_desc.size2 = rbr_p->npi_pkt_buf_size2;
-	rdc_desc.valid2 = 1;
-
-	rdc_desc.full_hdr = rcr_p->full_hdr_flag;
-	rdc_desc.offset = rcr_p->sw_priv_hdr_len;
-
-	rdc_desc.rcr_len = rcr_p->comp_size;
-	rdc_desc.rcr_addr = rcr_p->rcr_addr;
-
-	cfgb_p = &(rcr_p->rcr_cfgb);
-	rdc_desc.rcr_threshold = cfgb_p->bits.ldw.pthres;
-	rdc_desc.rcr_timeout = cfgb_p->bits.ldw.timeout;
-	rdc_desc.rcr_timeout_enable = cfgb_p->bits.ldw.entout;
-
-	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_enable_rxdma_channel: "
-		"rbr_len qlen %d pagesize code %d rcr_len %d",
-		rdc_desc.rbr_len, rdc_desc.page_size, rdc_desc.rcr_len));
-	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_enable_rxdma_channel: "
-		"size 0 %d size 1 %d size 2 %d",
-		rbr_p->npi_pkt_buf_size0, rbr_p->npi_pkt_buf_size1,
-		rbr_p->npi_pkt_buf_size2));
-
-	rs = npi_rxdma_cfg_rdc_ring(handle, rbr_p->rdc, &rdc_desc);
-	if (rs != NPI_SUCCESS) {
-		return (NXGE_ERROR | rs);
-	}
-
-	/*
-	 * Enable the timeout and threshold.
-	 */
-	rs = npi_rxdma_cfg_rdc_rcr_threshold(handle, channel,
-			rdc_desc.rcr_threshold);
-	if (rs != NPI_SUCCESS) {
-		return (NXGE_ERROR | rs);
-	}
-
-	rs = npi_rxdma_cfg_rdc_rcr_timeout(handle, channel,
-			rdc_desc.rcr_timeout);
-	if (rs != NPI_SUCCESS) {
-		return (NXGE_ERROR | rs);
-	}
-
-	/* Enable the DMA */
-	rs = npi_rxdma_cfg_rdc_enable(handle, channel);
-	if (rs != NPI_SUCCESS) {
-		return (NXGE_ERROR | rs);
-	}
-
-	/* Kick the DMA engine. */
-	npi_rxdma_rdc_rbr_kick(handle, channel, rbr_p->rbb_max);
-	/* Clear the rbr empty bit */
-	(void) npi_rxdma_channel_rbr_empty_clear(handle, channel);
-
-	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_enable_rxdma_channel"));
-
-	return (NXGE_OK);
-}
-
-nxge_status_t
-nxge_disable_rxdma_channel(p_nxge_t nxgep, uint16_t channel)
-{
-	npi_handle_t		handle;
-	npi_status_t		rs = NPI_SUCCESS;
-
-	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_disable_rxdma_channel"));
-	handle = NXGE_DEV_NPI_HANDLE(nxgep);
-
-	/* disable the DMA */
-	rs = npi_rxdma_cfg_rdc_disable(handle, channel);
-	if (rs != NPI_SUCCESS) {
-		NXGE_DEBUG_MSG((nxgep, RX_CTL,
-			"<== nxge_disable_rxdma_channel:failed (0x%x)",
-			rs));
-		return (NXGE_ERROR | rs);
-	}
-
-	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_disable_rxdma_channel"));
-	return (NXGE_OK);
-}
-
-nxge_status_t
-nxge_rxdma_channel_rcrflush(p_nxge_t nxgep, uint8_t channel)
-{
-	npi_handle_t		handle;
-	nxge_status_t		status = NXGE_OK;
-
-	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
-		"<== nxge_init_rxdma_channel_rcrflush"));
-
-	handle = NXGE_DEV_NPI_HANDLE(nxgep);
-	npi_rxdma_rdc_rcr_flush(handle, channel);
-
-	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
-		"<== nxge_init_rxdma_channel_rcrflsh"));
-	return (status);
-
-}
-
-#define	MID_INDEX(l, r) ((r + l + 1) >> 1)
-
-#define	TO_LEFT -1
-#define	TO_RIGHT 1
-#define	BOTH_RIGHT (TO_RIGHT + TO_RIGHT)
-#define	BOTH_LEFT (TO_LEFT + TO_LEFT)
-#define	IN_MIDDLE (TO_RIGHT + TO_LEFT)
-#define	NO_HINT 0xffffffff
-
-/*ARGSUSED*/
-nxge_status_t
-nxge_rxbuf_pp_to_vp(p_nxge_t nxgep, p_rx_rbr_ring_t rbr_p,
-	uint8_t pktbufsz_type, uint64_t *pkt_buf_addr_pp,
-	uint64_t **pkt_buf_addr_p, uint32_t *bufoffset, uint32_t *msg_index)
-{
-	int			bufsize;
-	uint64_t		pktbuf_pp;
-	uint64_t 		dvma_addr;
-	rxring_info_t 		*ring_info;
-	int 			base_side, end_side;
-	int 			r_index, l_index, anchor_index;
-	int 			found, search_done;
-	uint32_t offset, chunk_size, block_size, page_size_mask;
-	uint32_t chunk_index, block_index, total_index;
-	int 			max_iterations, iteration;
-	rxbuf_index_info_t 	*bufinfo;
-
-	NXGE_DEBUG_MSG((nxgep, RX2_CTL, "==> nxge_rxbuf_pp_to_vp"));
-
-	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
-		"==> nxge_rxbuf_pp_to_vp: buf_pp $%p btype %d",
-		pkt_buf_addr_pp,
-		pktbufsz_type));
-
-	pktbuf_pp = (uint64_t)pkt_buf_addr_pp;
-
-	switch (pktbufsz_type) {
-	case 0:
-		bufsize = rbr_p->pkt_buf_size0;
-		break;
-	case 1:
-		bufsize = rbr_p->pkt_buf_size1;
-		break;
-	case 2:
-		bufsize = rbr_p->pkt_buf_size2;
-		break;
-	case RCR_SINGLE_BLOCK:
-		bufsize = 0;
-		anchor_index = 0;
-		break;
-	default:
-		return (NXGE_ERROR);
-	}
-
-	if (rbr_p->num_blocks == 1) {
-		anchor_index = 0;
-		ring_info = rbr_p->ring_info;
-		bufinfo = (rxbuf_index_info_t *)ring_info->buffer;
-		NXGE_DEBUG_MSG((nxgep, RX2_CTL,
-			"==> nxge_rxbuf_pp_to_vp: (found, 1 block) "
-			"buf_pp $%p btype %d anchor_index %d "
-			"bufinfo $%p",
-			pkt_buf_addr_pp,
-			pktbufsz_type,
-			anchor_index,
-			bufinfo));
-
-		goto found_index;
-	}
-
-	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
-		"==> nxge_rxbuf_pp_to_vp: "
-		"buf_pp $%p btype %d  anchor_index %d",
-		pkt_buf_addr_pp,
-		pktbufsz_type,
-		anchor_index));
-
-	ring_info = rbr_p->ring_info;
-	found = B_FALSE;
-	bufinfo = (rxbuf_index_info_t *)ring_info->buffer;
-	iteration = 0;
-	max_iterations = ring_info->max_iterations;
-		/*
-		 * First check if this block has been seen
-		 * recently. This is indicated by a hint which
-		 * is initialized when the first buffer of the block
-		 * is seen. The hint is reset when the last buffer of
-		 * the block has been processed.
-		 * As three block sizes are supported, three hints
-		 * are kept. The idea behind the hints is that once
-		 * the hardware  uses a block for a buffer  of that
-		 * size, it will use it exclusively for that size
-		 * and will use it until it is exhausted. It is assumed
-		 * that there would a single block being used for the same
-		 * buffer sizes at any given time.
-		 */
-	if (ring_info->hint[pktbufsz_type] != NO_HINT) {
-		anchor_index = ring_info->hint[pktbufsz_type];
-		dvma_addr =  bufinfo[anchor_index].dvma_addr;
-		chunk_size = bufinfo[anchor_index].buf_size;
-		if ((pktbuf_pp >= dvma_addr) &&
-			(pktbuf_pp < (dvma_addr + chunk_size))) {
-			found = B_TRUE;
-				/*
-				 * check if this is the last buffer in the block
-				 * If so, then reset the hint for the size;
-				 */
-
-			if ((pktbuf_pp + bufsize) >= (dvma_addr + chunk_size))
-				ring_info->hint[pktbufsz_type] = NO_HINT;
-		}
-	}
-
-	if (found == B_FALSE) {
-		NXGE_DEBUG_MSG((nxgep, RX2_CTL,
-			"==> nxge_rxbuf_pp_to_vp: (!found)"
-			"buf_pp $%p btype %d anchor_index %d",
-			pkt_buf_addr_pp,
-			pktbufsz_type,
-			anchor_index));
-
-			/*
-			 * This is the first buffer of the block of this
-			 * size. Need to search the whole information
-			 * array.
-			 * the search algorithm uses a binary tree search
-			 * algorithm. It assumes that the information is
-			 * already sorted with increasing order
-			 * info[0] < info[1] < info[2]  .... < info[n-1]
-			 * where n is the size of the information array
-			 */
-		r_index = rbr_p->num_blocks - 1;
-		l_index = 0;
-		search_done = B_FALSE;
-		anchor_index = MID_INDEX(r_index, l_index);
-		while (search_done == B_FALSE) {
-			if ((r_index == l_index) ||
-				(iteration >= max_iterations))
-				search_done = B_TRUE;
-			end_side = TO_RIGHT; /* to the right */
-			base_side = TO_LEFT; /* to the left */
-			/* read the DVMA address information and sort it */
-			dvma_addr =  bufinfo[anchor_index].dvma_addr;
-			chunk_size = bufinfo[anchor_index].buf_size;
-			NXGE_DEBUG_MSG((nxgep, RX2_CTL,
-				"==> nxge_rxbuf_pp_to_vp: (searching)"
-				"buf_pp $%p btype %d "
-				"anchor_index %d chunk_size %d dvmaaddr $%p",
-				pkt_buf_addr_pp,
-				pktbufsz_type,
-				anchor_index,
-				chunk_size,
-				dvma_addr));
-
-			if (pktbuf_pp >= dvma_addr)
-				base_side = TO_RIGHT; /* to the right */
-			if (pktbuf_pp < (dvma_addr + chunk_size))
-				end_side = TO_LEFT; /* to the left */
-
-			switch (base_side + end_side) {
-				case IN_MIDDLE:
-					/* found */
-					found = B_TRUE;
-					search_done = B_TRUE;
-					if ((pktbuf_pp + bufsize) <
-						(dvma_addr + chunk_size))
-						ring_info->hint[pktbufsz_type] =
-						bufinfo[anchor_index].buf_index;
-					break;
-				case BOTH_RIGHT:
-						/* not found: go to the right */
-					l_index = anchor_index + 1;
-					anchor_index =
-						MID_INDEX(r_index, l_index);
-					break;
-
-				case  BOTH_LEFT:
-						/* not found: go to the left */
-					r_index = anchor_index - 1;
-					anchor_index = MID_INDEX(r_index,
-						l_index);
-					break;
-				default: /* should not come here */
-					return (NXGE_ERROR);
-			}
-			iteration++;
-		}
-
-		NXGE_DEBUG_MSG((nxgep, RX2_CTL,
-			"==> nxge_rxbuf_pp_to_vp: (search done)"
-			"buf_pp $%p btype %d anchor_index %d",
-			pkt_buf_addr_pp,
-			pktbufsz_type,
-			anchor_index));
-	}
-
-	if (found == B_FALSE) {
-		NXGE_DEBUG_MSG((nxgep, RX2_CTL,
-			"==> nxge_rxbuf_pp_to_vp: (search failed)"
-			"buf_pp $%p btype %d anchor_index %d",
-			pkt_buf_addr_pp,
-			pktbufsz_type,
-			anchor_index));
-		return (NXGE_ERROR);
-	}
-
-found_index:
-	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
-		"==> nxge_rxbuf_pp_to_vp: (FOUND1)"
-		"buf_pp $%p btype %d bufsize %d anchor_index %d",
-		pkt_buf_addr_pp,
-		pktbufsz_type,
-		bufsize,
-		anchor_index));
-
-	/* index of the first block in this chunk */
-	chunk_index = bufinfo[anchor_index].start_index;
-	dvma_addr =  bufinfo[anchor_index].dvma_addr;
-	page_size_mask = ring_info->block_size_mask;
-
-	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
-		"==> nxge_rxbuf_pp_to_vp: (FOUND3), get chunk)"
-		"buf_pp $%p btype %d bufsize %d "
-		"anchor_index %d chunk_index %d dvma $%p",
-		pkt_buf_addr_pp,
-		pktbufsz_type,
-		bufsize,
-		anchor_index,
-		chunk_index,
-		dvma_addr));
-
-	offset = pktbuf_pp - dvma_addr; /* offset within the chunk */
-	block_size = rbr_p->block_size; /* System  block(page) size */
-
-	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
-		"==> nxge_rxbuf_pp_to_vp: (FOUND4), get chunk)"
-		"buf_pp $%p btype %d bufsize %d "
-		"anchor_index %d chunk_index %d dvma $%p "
-		"offset %d block_size %d",
-		pkt_buf_addr_pp,
-		pktbufsz_type,
-		bufsize,
-		anchor_index,
-		chunk_index,
-		dvma_addr,
-		offset,
-		block_size));
-
-	NXGE_DEBUG_MSG((nxgep, RX2_CTL, "==> getting total index"));
-
-	block_index = (offset / block_size); /* index within chunk */
-	total_index = chunk_index + block_index;
-
-
-	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
-		"==> nxge_rxbuf_pp_to_vp: "
-		"total_index %d dvma_addr $%p "
-		"offset %d block_size %d "
-		"block_index %d ",
-		total_index, dvma_addr,
-		offset, block_size,
-		block_index));
-
-	*pkt_buf_addr_p = (uint64_t *)((uint64_t)bufinfo[anchor_index].kaddr
-				+ offset);
-
-	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
-		"==> nxge_rxbuf_pp_to_vp: "
-		"total_index %d dvma_addr $%p "
-		"offset %d block_size %d "
-		"block_index %d "
-		"*pkt_buf_addr_p $%p",
-		total_index, dvma_addr,
-		offset, block_size,
-		block_index,
-		*pkt_buf_addr_p));
-
-
-	*msg_index = total_index;
-	*bufoffset =  (offset & page_size_mask);
-
-	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
-		"==> nxge_rxbuf_pp_to_vp: get msg index: "
-		"msg_index %d bufoffset_index %d",
-		*msg_index,
-		*bufoffset));
-
-	NXGE_DEBUG_MSG((nxgep, RX2_CTL, "<== nxge_rxbuf_pp_to_vp"));
-
-	return (NXGE_OK);
-}
-
-/*
- * used by quick sort (qsort) function
- * to perform comparison
- */
-static int
-nxge_sort_compare(const void *p1, const void *p2)
-{
-
-	rxbuf_index_info_t *a, *b;
-
-	a = (rxbuf_index_info_t *)p1;
-	b = (rxbuf_index_info_t *)p2;
-
-	if (a->dvma_addr > b->dvma_addr)
-		return (1);
-	if (a->dvma_addr < b->dvma_addr)
-		return (-1);
-	return (0);
-}
-
-
-
-/*
- * grabbed this sort implementation from common/syscall/avl.c
- *
- */
-/*
- * Generic shellsort, from K&R (1st ed, p 58.), somewhat modified.
- * v = Ptr to array/vector of objs
- * n = # objs in the array
- * s = size of each obj (must be multiples of a word size)
- * f = ptr to function to compare two objs
- *	returns (-1 = less than, 0 = equal, 1 = greater than
- */
-void
-nxge_ksort(caddr_t v, int n, int s, int (*f)())
-{
-	int g, i, j, ii;
-	unsigned int *p1, *p2;
-	unsigned int tmp;
-
-	/* No work to do */
-	if (v == NULL || n <= 1)
-		return;
-	/* Sanity check on arguments */
-	ASSERT(((uintptr_t)v & 0x3) == 0 && (s & 0x3) == 0);
-	ASSERT(s > 0);
-
-	for (g = n / 2; g > 0; g /= 2) {
-		for (i = g; i < n; i++) {
-			for (j = i - g; j >= 0 &&
-				(*f)(v + j * s, v + (j + g) * s) == 1;
-					j -= g) {
-				p1 = (unsigned *)(v + j * s);
-				p2 = (unsigned *)(v + (j + g) * s);
-				for (ii = 0; ii < s / 4; ii++) {
-					tmp = *p1;
-					*p1++ = *p2;
-					*p2++ = tmp;
-				}
-			}
-		}
-	}
-}
-
-/*
- * Initialize data structures required for rxdma
- * buffer dvma->vmem address lookup
- */
-/*ARGSUSED*/
-static nxge_status_t
-nxge_rxbuf_index_info_init(p_nxge_t nxgep, p_rx_rbr_ring_t rbrp)
-{
-
-	int index;
-	rxring_info_t *ring_info;
-	int max_iteration = 0, max_index = 0;
-
-	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_rxbuf_index_info_init"));
-
-	ring_info = rbrp->ring_info;
-	ring_info->hint[0] = NO_HINT;
-	ring_info->hint[1] = NO_HINT;
-	ring_info->hint[2] = NO_HINT;
-	max_index = rbrp->num_blocks;
-
-		/* read the DVMA address information and sort it */
-		/* do init of the information array */
-
-
-	NXGE_DEBUG_MSG((nxgep, DMA2_CTL,
-		" nxge_rxbuf_index_info_init Sort ptrs"));
-
-		/* sort the array */
-	nxge_ksort((void *)ring_info->buffer, max_index,
-		sizeof (rxbuf_index_info_t), nxge_sort_compare);
-
-
-
-	for (index = 0; index < max_index; index++) {
-		NXGE_DEBUG_MSG((nxgep, DMA2_CTL,
-			" nxge_rxbuf_index_info_init: sorted chunk %d "
-			" ioaddr $%p kaddr $%p size %x",
-			index, ring_info->buffer[index].dvma_addr,
-			ring_info->buffer[index].kaddr,
-			ring_info->buffer[index].buf_size));
-	}
-
-	max_iteration = 0;
-	while (max_index >= (1ULL << max_iteration))
-		max_iteration++;
-	ring_info->max_iterations = max_iteration + 1;
-	NXGE_DEBUG_MSG((nxgep, DMA2_CTL,
-		" nxge_rxbuf_index_info_init Find max iter %d",
-					ring_info->max_iterations));
-
-	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_rxbuf_index_info_init"));
-	return (NXGE_OK);
-}
-
-/* ARGSUSED */
-void
-nxge_dump_rcr_entry(p_nxge_t nxgep, p_rcr_entry_t entry_p)
-{
-#ifdef	NXGE_DEBUG
-
-	uint32_t bptr;
-	uint64_t pp;
-
-	bptr = entry_p->bits.hdw.pkt_buf_addr;
-
-	NXGE_DEBUG_MSG((nxgep, RX_CTL,
-		"\trcr entry $%p "
-		"\trcr entry 0x%0llx "
-		"\trcr entry 0x%08x "
-		"\trcr entry 0x%08x "
-		"\tvalue 0x%0llx\n"
-		"\tmulti = %d\n"
-		"\tpkt_type = 0x%x\n"
-		"\tzero_copy = %d\n"
-		"\tnoport = %d\n"
-		"\tpromis = %d\n"
-		"\terror = 0x%04x\n"
-		"\tdcf_err = 0x%01x\n"
-		"\tl2_len = %d\n"
-		"\tpktbufsize = %d\n"
-		"\tpkt_buf_addr = $%p\n"
-		"\tpkt_buf_addr (<< 6) = $%p\n",
-		entry_p,
-		*(int64_t *)entry_p,
-		*(int32_t *)entry_p,
-		*(int32_t *)((char *)entry_p + 32),
-		entry_p->value,
-		entry_p->bits.hdw.multi,
-		entry_p->bits.hdw.pkt_type,
-		entry_p->bits.hdw.zero_copy,
-		entry_p->bits.hdw.noport,
-		entry_p->bits.hdw.promis,
-		entry_p->bits.hdw.error,
-		entry_p->bits.hdw.dcf_err,
-		entry_p->bits.hdw.l2_len,
-		entry_p->bits.hdw.pktbufsz,
-		bptr,
-		entry_p->bits.ldw.pkt_buf_addr));
-
-	pp = (entry_p->value & RCR_PKT_BUF_ADDR_MASK) <<
-		RCR_PKT_BUF_ADDR_SHIFT;
-
-	NXGE_DEBUG_MSG((nxgep, RX_CTL, "rcr pp 0x%llx l2 len %d",
-		pp, (*(int64_t *)entry_p >> 40) & 0x3fff));
-#endif
-}
-
-void
-nxge_rxdma_regs_dump(p_nxge_t nxgep, int rdc)
-{
-	npi_handle_t		handle;
-	rbr_stat_t 		rbr_stat;
-	addr44_t 		hd_addr;
-	addr44_t 		tail_addr;
-	uint16_t 		qlen;
-
-	NXGE_DEBUG_MSG((nxgep, RX_CTL,
-		"==> nxge_rxdma_regs_dump: rdc channel %d", rdc));
-
-	handle = NXGE_DEV_NPI_HANDLE(nxgep);
-
-	/* RBR head */
-	hd_addr.addr = 0;
-	(void) npi_rxdma_rdc_rbr_head_get(handle, rdc, &hd_addr);
-	printf("nxge_rxdma_regs_dump: got hdptr $%p \n",
-		(void *)hd_addr.addr);
-
-	/* RBR stats */
-	(void) npi_rxdma_rdc_rbr_stat_get(handle, rdc, &rbr_stat);
-	printf("nxge_rxdma_regs_dump: rbr len %d \n", rbr_stat.bits.ldw.qlen);
-
-	/* RCR tail */
-	tail_addr.addr = 0;
-	(void) npi_rxdma_rdc_rcr_tail_get(handle, rdc, &tail_addr);
-	printf("nxge_rxdma_regs_dump: got tail ptr $%p \n",
-		(void *)tail_addr.addr);
-
-	/* RCR qlen */
-	(void) npi_rxdma_rdc_rcr_qlen_get(handle, rdc, &qlen);
-	printf("nxge_rxdma_regs_dump: rcr len %x \n", qlen);
-
-	NXGE_DEBUG_MSG((nxgep, RX_CTL,
-		"<== nxge_rxdma_regs_dump: rdc rdc %d", rdc));
-}
-
-void
-nxge_rxdma_stop(p_nxge_t nxgep)
-{
-	NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_stop"));
-
-	(void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP);
-	(void) nxge_rx_mac_disable(nxgep);
-	(void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_STOP);
-	NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_stop"));
-}
-
-void
-nxge_rxdma_stop_reinit(p_nxge_t nxgep)
-{
-	NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_stop_reinit"));
-
-	(void) nxge_rxdma_stop(nxgep);
-	(void) nxge_uninit_rxdma_channels(nxgep);
-	(void) nxge_init_rxdma_channels(nxgep);
-
-#ifndef	AXIS_DEBUG_LB
-	(void) nxge_xcvr_init(nxgep);
-	(void) nxge_link_monitor(nxgep, LINK_MONITOR_START);
-#endif
-	(void) nxge_rx_mac_enable(nxgep);
-
-	NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_stop_reinit"));
-}
-
-nxge_status_t
-nxge_rxdma_hw_mode(p_nxge_t nxgep, boolean_t enable)
-{
-	int			i, ndmas;
-	uint16_t		channel;
-	p_rx_rbr_rings_t 	rx_rbr_rings;
-	p_rx_rbr_ring_t		*rbr_rings;
-	npi_handle_t		handle;
-	npi_status_t		rs = NPI_SUCCESS;
-	nxge_status_t		status = NXGE_OK;
-
-	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
-		"==> nxge_rxdma_hw_mode: mode %d", enable));
-
-	if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
-		NXGE_DEBUG_MSG((nxgep, RX_CTL,
-			"<== nxge_rxdma_mode: not initialized"));
-		return (NXGE_ERROR);
-	}
-
-	rx_rbr_rings = nxgep->rx_rbr_rings;
-	if (rx_rbr_rings == NULL) {
-		NXGE_DEBUG_MSG((nxgep, RX_CTL,
-			"<== nxge_rxdma_mode: NULL ring pointer"));
-		return (NXGE_ERROR);
-	}
-	if (rx_rbr_rings->rbr_rings == NULL) {
-		NXGE_DEBUG_MSG((nxgep, RX_CTL,
-			"<== nxge_rxdma_mode: NULL rbr rings pointer"));
-		return (NXGE_ERROR);
-	}
-
-	ndmas = rx_rbr_rings->ndmas;
-	if (!ndmas) {
-		NXGE_DEBUG_MSG((nxgep, RX_CTL,
-			"<== nxge_rxdma_mode: no channel"));
-		return (NXGE_ERROR);
-	}
-
-	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
-		"==> nxge_rxdma_mode (ndmas %d)", ndmas));
-
-	rbr_rings = rx_rbr_rings->rbr_rings;
-
-	handle = NXGE_DEV_NPI_HANDLE(nxgep);
-	for (i = 0; i < ndmas; i++) {
-		if (rbr_rings == NULL || rbr_rings[i] == NULL) {
-			continue;
-		}
-		channel = rbr_rings[i]->rdc;
-		if (enable) {
-			NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
-				"==> nxge_rxdma_hw_mode: channel %d (enable)",
-				channel));
-			rs = npi_rxdma_cfg_rdc_enable(handle, channel);
-		} else {
-			NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
-				"==> nxge_rxdma_hw_mode: channel %d (disable)",
-				channel));
-			rs = npi_rxdma_cfg_rdc_disable(handle, channel);
-		}
-	}
-
-	status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs);
-
-	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
-		"<== nxge_rxdma_hw_mode: status 0x%x", status));
-
-	return (status);
-}
-
-void
-nxge_rxdma_enable_channel(p_nxge_t nxgep, uint16_t channel)
-{
-	npi_handle_t		handle;
-
-	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
-		"==> nxge_rxdma_enable_channel: channel %d", channel));
-
-	handle = NXGE_DEV_NPI_HANDLE(nxgep);
-	(void) npi_rxdma_cfg_rdc_enable(handle, channel);
-
-	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_rxdma_enable_channel"));
-}
-
-void
-nxge_rxdma_disable_channel(p_nxge_t nxgep, uint16_t channel)
-{
-	npi_handle_t		handle;
-
-	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
-		"==> nxge_rxdma_disable_channel: channel %d", channel));
-
-	handle = NXGE_DEV_NPI_HANDLE(nxgep);
-	(void) npi_rxdma_cfg_rdc_disable(handle, channel);
-
-	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_rxdma_disable_channel"));
-}
-
-void
-nxge_hw_start_rx(p_nxge_t nxgep)
-{
-	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_hw_start_rx"));
-
-	(void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_START);
-	(void) nxge_rx_mac_enable(nxgep);
-
-	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_hw_start_rx"));
-}
-
-/*ARGSUSED*/
-void
-nxge_fixup_rxdma_rings(p_nxge_t nxgep)
-{
-	int			i, ndmas;
-	uint16_t		rdc;
-	p_rx_rbr_rings_t 	rx_rbr_rings;
-	p_rx_rbr_ring_t		*rbr_rings;
-	p_rx_rcr_rings_t 	rx_rcr_rings;
-
-	NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_fixup_rxdma_rings"));
-
-	rx_rbr_rings = nxgep->rx_rbr_rings;
-	if (rx_rbr_rings == NULL) {
-		NXGE_DEBUG_MSG((nxgep, RX_CTL,
-			"<== nxge_fixup_rxdma_rings: NULL ring pointer"));
-		return;
-	}
-	ndmas = rx_rbr_rings->ndmas;
-	if (!ndmas) {
-		NXGE_DEBUG_MSG((nxgep, RX_CTL,
-			"<== nxge_fixup_rxdma_rings: no channel"));
-		return;
-	}
-
-	rx_rcr_rings = nxgep->rx_rcr_rings;
-	if (rx_rcr_rings == NULL) {
-		NXGE_DEBUG_MSG((nxgep, RX_CTL,
-			"<== nxge_fixup_rxdma_rings: NULL ring pointer"));
-		return;
-	}
-	NXGE_DEBUG_MSG((nxgep, RX_CTL,
-		"==> nxge_fixup_rxdma_rings (ndmas %d)", ndmas));
-
-	nxge_rxdma_hw_stop(nxgep);
-
-	rbr_rings = rx_rbr_rings->rbr_rings;
-	for (i = 0; i < ndmas; i++) {
-		rdc = rbr_rings[i]->rdc;
-		NXGE_DEBUG_MSG((nxgep, RX_CTL,
-			"==> nxge_fixup_rxdma_rings: channel %d "
-			"ring $%px", rdc, rbr_rings[i]));
-		(void) nxge_rxdma_fixup_channel(nxgep, rdc, i);
-	}
-
-	NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_fixup_rxdma_rings"));
-}
-
-void
-nxge_rxdma_fix_channel(p_nxge_t nxgep, uint16_t channel)
-{
-	int		i;
-
-	NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_fix_channel"));
-	i = nxge_rxdma_get_ring_index(nxgep, channel);
-	if (i < 0) {
-		NXGE_DEBUG_MSG((nxgep, RX_CTL,
-			"<== nxge_rxdma_fix_channel: no entry found"));
-		return;
-	}
-
-	nxge_rxdma_fixup_channel(nxgep, channel, i);
-
-	NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_txdma_fix_channel"));
-}
-
-void
-nxge_rxdma_fixup_channel(p_nxge_t nxgep, uint16_t channel, int entry)
-{
-	int			ndmas;
-	p_rx_rbr_rings_t 	rx_rbr_rings;
-	p_rx_rbr_ring_t		*rbr_rings;
-	p_rx_rcr_rings_t 	rx_rcr_rings;
-	p_rx_rcr_ring_t		*rcr_rings;
-	p_rx_mbox_areas_t 	rx_mbox_areas_p;
-	p_rx_mbox_t		*rx_mbox_p;
-	p_nxge_dma_pool_t	dma_buf_poolp;
-	p_nxge_dma_pool_t	dma_cntl_poolp;
-	p_rx_rbr_ring_t 	rbrp;
-	p_rx_rcr_ring_t 	rcrp;
-	p_rx_mbox_t 		mboxp;
-	p_nxge_dma_common_t 	dmap;
-	nxge_status_t		status = NXGE_OK;
-
-	NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_fixup_channel"));
-
-	(void) nxge_rxdma_stop_channel(nxgep, channel);
-
-	dma_buf_poolp = nxgep->rx_buf_pool_p;
-	dma_cntl_poolp = nxgep->rx_cntl_pool_p;
-
-	if (!dma_buf_poolp->buf_allocated || !dma_cntl_poolp->buf_allocated) {
-		NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
-			"<== nxge_rxdma_fixup_channel: buf not allocated"));
-		return;
-	}
-
-	ndmas = dma_buf_poolp->ndmas;
-	if (!ndmas) {
-		NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
-			"<== nxge_rxdma_fixup_channel: no dma allocated"));
-		return;
-	}
-
-	rx_rbr_rings = nxgep->rx_rbr_rings;
-	rx_rcr_rings = nxgep->rx_rcr_rings;
-	rbr_rings = rx_rbr_rings->rbr_rings;
-	rcr_rings = rx_rcr_rings->rcr_rings;
-	rx_mbox_areas_p = nxgep->rx_mbox_areas_p;
-	rx_mbox_p = rx_mbox_areas_p->rxmbox_areas;
-
-	/* Reinitialize the receive block and completion rings */
-	rbrp = (p_rx_rbr_ring_t)rbr_rings[entry],
-	rcrp = (p_rx_rcr_ring_t)rcr_rings[entry],
-	mboxp = (p_rx_mbox_t)rx_mbox_p[entry];
-
-
-	rbrp->rbr_wr_index = (rbrp->rbb_max - 1);
-	rbrp->rbr_rd_index = 0;
-	rcrp->comp_rd_index = 0;
-	rcrp->comp_wt_index = 0;
-
-	dmap = (p_nxge_dma_common_t)&rcrp->rcr_desc;
-	bzero((caddr_t)dmap->kaddrp, dmap->alength);
-
-	status = nxge_rxdma_start_channel(nxgep, channel,
-			rbrp, rcrp, mboxp);
-	if (status != NXGE_OK) {
-		goto nxge_rxdma_fixup_channel_fail;
-	}
-	if (status != NXGE_OK) {
-		goto nxge_rxdma_fixup_channel_fail;
-	}
-
-nxge_rxdma_fixup_channel_fail:
-	NXGE_DEBUG_MSG((nxgep, RX_CTL,
-		"==> nxge_rxdma_fixup_channel: failed (0x%08x)", status));
-
-	NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_fixup_channel"));
-}
-
-int
-nxge_rxdma_get_ring_index(p_nxge_t nxgep, uint16_t channel)
-{
-	int			i, ndmas;
-	uint16_t		rdc;
-	p_rx_rbr_rings_t 	rx_rbr_rings;
-	p_rx_rbr_ring_t		*rbr_rings;
-
-	NXGE_DEBUG_MSG((nxgep, RX_CTL,
-		"==> nxge_rxdma_get_ring_index: channel %d", channel));
-
-	rx_rbr_rings = nxgep->rx_rbr_rings;
-	if (rx_rbr_rings == NULL) {
-		NXGE_DEBUG_MSG((nxgep, RX_CTL,
-			"<== nxge_rxdma_get_ring_index: NULL ring pointer"));
-		return (-1);
-	}
-	ndmas = rx_rbr_rings->ndmas;
-	if (!ndmas) {
-		NXGE_DEBUG_MSG((nxgep, RX_CTL,
-			"<== nxge_rxdma_get_ring_index: no channel"));
-		return (-1);
-	}
-
-	NXGE_DEBUG_MSG((nxgep, RX_CTL,
-		"==> nxge_rxdma_get_ring_index (ndmas %d)", ndmas));
-
-	rbr_rings = rx_rbr_rings->rbr_rings;
-	for (i = 0; i < ndmas; i++) {
-		rdc = rbr_rings[i]->rdc;
-		if (channel == rdc) {
-			NXGE_DEBUG_MSG((nxgep, RX_CTL,
-				"==> nxge_rxdma_get_rbr_ring: "
-				"channel %d (index %d) "
-				"ring %d", channel, i,
-				rbr_rings[i]));
-			return (i);
-		}
-	}
-
-	NXGE_DEBUG_MSG((nxgep, RX_CTL,
-		"<== nxge_rxdma_get_rbr_ring_index: not found"));
-
-	return (-1);
-}
-
-p_rx_rbr_ring_t
-nxge_rxdma_get_rbr_ring(p_nxge_t nxgep, uint16_t channel)
-{
-	int			i, ndmas;
-	uint16_t		rdc;
-	p_rx_rbr_rings_t 	rx_rbr_rings;
-	p_rx_rbr_ring_t		*rbr_rings;
-
-	NXGE_DEBUG_MSG((nxgep, RX_CTL,
-		"==> nxge_rxdma_get_rbr_ring: channel %d", channel));
-
-	rx_rbr_rings = nxgep->rx_rbr_rings;
-	if (rx_rbr_rings == NULL) {
-		NXGE_DEBUG_MSG((nxgep, RX_CTL,
-			"<== nxge_rxdma_get_rbr_ring: NULL ring pointer"));
-		return (NULL);
-	}
-	ndmas = rx_rbr_rings->ndmas;
-	if (!ndmas) {
-		NXGE_DEBUG_MSG((nxgep, RX_CTL,
-			"<== nxge_rxdma_get_rbr_ring: no channel"));
-		return (NULL);
-	}
-
-	NXGE_DEBUG_MSG((nxgep, RX_CTL,
-		"==> nxge_rxdma_get_ring (ndmas %d)", ndmas));
-
-	rbr_rings = rx_rbr_rings->rbr_rings;
-	for (i = 0; i < ndmas; i++) {
-		rdc = rbr_rings[i]->rdc;
-		if (channel == rdc) {
-			NXGE_DEBUG_MSG((nxgep, RX_CTL,
-				"==> nxge_rxdma_get_rbr_ring: channel %d "
-				"ring $%p", channel, rbr_rings[i]));
-			return (rbr_rings[i]);
-		}
-	}
-
-	NXGE_DEBUG_MSG((nxgep, RX_CTL,
-		"<== nxge_rxdma_get_rbr_ring: not found"));
-
-	return (NULL);
-}
-
-p_rx_rcr_ring_t
-nxge_rxdma_get_rcr_ring(p_nxge_t nxgep, uint16_t channel)
-{
-	int			i, ndmas;
-	uint16_t		rdc;
-	p_rx_rcr_rings_t 	rx_rcr_rings;
-	p_rx_rcr_ring_t		*rcr_rings;
-
-	NXGE_DEBUG_MSG((nxgep, RX_CTL,
-		"==> nxge_rxdma_get_rcr_ring: channel %d", channel));
-
-	rx_rcr_rings = nxgep->rx_rcr_rings;
-	if (rx_rcr_rings == NULL) {
-		NXGE_DEBUG_MSG((nxgep, RX_CTL,
-			"<== nxge_rxdma_get_rcr_ring: NULL ring pointer"));
-		return (NULL);
-	}
-	ndmas = rx_rcr_rings->ndmas;
-	if (!ndmas) {
-		NXGE_DEBUG_MSG((nxgep, RX_CTL,
-			"<== nxge_rxdma_get_rcr_ring: no channel"));
-		return (NULL);
-	}
-
-	NXGE_DEBUG_MSG((nxgep, RX_CTL,
-		"==> nxge_rxdma_get_rcr_ring (ndmas %d)", ndmas));
-
-	rcr_rings = rx_rcr_rings->rcr_rings;
-	for (i = 0; i < ndmas; i++) {
-		rdc = rcr_rings[i]->rdc;
-		if (channel == rdc) {
-			NXGE_DEBUG_MSG((nxgep, RX_CTL,
-				"==> nxge_rxdma_get_rcr_ring: channel %d "
-				"ring $%p", channel, rcr_rings[i]));
-			return (rcr_rings[i]);
-		}
-	}
-
-	NXGE_DEBUG_MSG((nxgep, RX_CTL,
-		"<== nxge_rxdma_get_rcr_ring: not found"));
-
-	return (NULL);
-}
-
-/*
- * Static functions start here.
- */
-
-static p_rx_msg_t
-nxge_allocb(size_t size, uint32_t pri, p_nxge_dma_common_t dmabuf_p)
-{
-	p_rx_msg_t nxge_mp 		= NULL;
-	p_nxge_dma_common_t		dmamsg_p;
-	uchar_t 			*buffer;
-
-	nxge_mp = KMEM_ZALLOC(sizeof (rx_msg_t), KM_NOSLEEP);
-	if (nxge_mp == NULL) {
-		NXGE_DEBUG_MSG((NULL, MEM_CTL,
-			"Allocation of a rx msg failed."));
-		goto nxge_allocb_exit;
-	}
-
-	nxge_mp->use_buf_pool = B_FALSE;
-	if (dmabuf_p) {
-		nxge_mp->use_buf_pool = B_TRUE;
-		dmamsg_p = (p_nxge_dma_common_t)&nxge_mp->buf_dma;
-		*dmamsg_p = *dmabuf_p;
-		dmamsg_p->nblocks = 1;
-		dmamsg_p->block_size = size;
-		dmamsg_p->alength = size;
-		buffer = (uchar_t *)dmabuf_p->kaddrp;
-
-		dmabuf_p->kaddrp = (void *)
-				((char *)dmabuf_p->kaddrp + size);
-		dmabuf_p->ioaddr_pp = (void *)
-				((char *)dmabuf_p->ioaddr_pp + size);
-		dmabuf_p->alength -= size;
-		dmabuf_p->offset += size;
-		dmabuf_p->dma_cookie.dmac_laddress += size;
-		dmabuf_p->dma_cookie.dmac_size -= size;
-
-	} else {
-		buffer = KMEM_ALLOC(size, KM_NOSLEEP);
-		if (buffer == NULL) {
-			NXGE_DEBUG_MSG((NULL, MEM_CTL,
-				"Allocation of a receive page failed."));
-			goto nxge_allocb_fail1;
-		}
-	}
-
-	nxge_mp->rx_mblk_p = desballoc(buffer, size, pri, &nxge_mp->freeb);
-	if (nxge_mp->rx_mblk_p == NULL) {
-		NXGE_DEBUG_MSG((NULL, MEM_CTL, "desballoc failed."));
-		goto nxge_allocb_fail2;
-	}
-
-	nxge_mp->buffer = buffer;
-	nxge_mp->block_size = size;
-	nxge_mp->freeb.free_func = (void (*)())nxge_freeb;
-	nxge_mp->freeb.free_arg = (caddr_t)nxge_mp;
-	nxge_mp->ref_cnt = 1;
-	nxge_mp->free = B_TRUE;
-	nxge_mp->rx_use_bcopy = B_FALSE;
-
-	atomic_inc_32(&nxge_mblks_pending);
-
-	goto nxge_allocb_exit;
-
-nxge_allocb_fail2:
-	if (!nxge_mp->use_buf_pool) {
-		KMEM_FREE(buffer, size);
-	}
-
-nxge_allocb_fail1:
-	KMEM_FREE(nxge_mp, sizeof (rx_msg_t));
-	nxge_mp = NULL;
-
-nxge_allocb_exit:
-	return (nxge_mp);
-}
-
-p_mblk_t
-nxge_dupb(p_rx_msg_t nxge_mp, uint_t offset, size_t size)
-{
-	p_mblk_t mp;
-
-	NXGE_DEBUG_MSG((NULL, MEM_CTL, "==> nxge_dupb"));
-	NXGE_DEBUG_MSG((NULL, MEM_CTL, "nxge_mp = $%p "
-		"offset = 0x%08X "
-		"size = 0x%08X",
-		nxge_mp, offset, size));
-
-	mp = desballoc(&nxge_mp->buffer[offset], size,
-				0, &nxge_mp->freeb);
-	if (mp == NULL) {
-		NXGE_DEBUG_MSG((NULL, RX_CTL, "desballoc failed"));
-		goto nxge_dupb_exit;
-	}
-	atomic_inc_32(&nxge_mp->ref_cnt);
-	atomic_inc_32(&nxge_mblks_pending);
-
-
-nxge_dupb_exit:
-	NXGE_DEBUG_MSG((NULL, MEM_CTL, "<== nxge_dupb mp = $%p",
-		nxge_mp));
-	return (mp);
-}
-
-p_mblk_t
-nxge_dupb_bcopy(p_rx_msg_t nxge_mp, uint_t offset, size_t size)
-{
-	p_mblk_t mp;
-	uchar_t *dp;
-
-	mp = allocb(size + NXGE_RXBUF_EXTRA, 0);
-	if (mp == NULL) {
-		NXGE_DEBUG_MSG((NULL, RX_CTL, "desballoc failed"));
-		goto nxge_dupb_bcopy_exit;
-	}
-	dp = mp->b_rptr = mp->b_rptr + NXGE_RXBUF_EXTRA;
-	bcopy((void *)&nxge_mp->buffer[offset], dp, size);
-	mp->b_wptr = dp + size;
-
-nxge_dupb_bcopy_exit:
-	NXGE_DEBUG_MSG((NULL, MEM_CTL, "<== nxge_dupb mp = $%p",
-		nxge_mp));
-	return (mp);
-}
-
-void nxge_post_page(p_nxge_t nxgep, p_rx_rbr_ring_t rx_rbr_p,
-	p_rx_msg_t rx_msg_p);
-
-void
-nxge_post_page(p_nxge_t nxgep, p_rx_rbr_ring_t rx_rbr_p, p_rx_msg_t rx_msg_p)
-{
-
-	npi_handle_t		handle;
-
-	NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_post_page"));
-
-	/* Reuse this buffer */
-	rx_msg_p->free = B_FALSE;
-	rx_msg_p->cur_usage_cnt = 0;
-	rx_msg_p->max_usage_cnt = 0;
-	rx_msg_p->pkt_buf_size = 0;
-
-	if (rx_rbr_p->rbr_use_bcopy) {
-		rx_msg_p->rx_use_bcopy = B_FALSE;
-		atomic_dec_32(&rx_rbr_p->rbr_consumed);
-	}
-
-	/*
-	 * Get the rbr header pointer and its offset index.
-	 */
-	MUTEX_ENTER(&rx_rbr_p->post_lock);
-
-
-	rx_rbr_p->rbr_wr_index =  ((rx_rbr_p->rbr_wr_index + 1) &
-					    rx_rbr_p->rbr_wrap_mask);
-	rx_rbr_p->rbr_desc_vp[rx_rbr_p->rbr_wr_index] = rx_msg_p->shifted_addr;
-	MUTEX_EXIT(&rx_rbr_p->post_lock);
-	handle = NXGE_DEV_NPI_HANDLE(nxgep);
-	npi_rxdma_rdc_rbr_kick(handle, rx_rbr_p->rdc, 1);
-
-	NXGE_DEBUG_MSG((nxgep, RX_CTL,
-		"<== nxge_post_page (channel %d post_next_index %d)",
-		rx_rbr_p->rdc, rx_rbr_p->rbr_wr_index));
-
-	NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_post_page"));
-}
-
-void
-nxge_freeb(p_rx_msg_t rx_msg_p)
-{
-	size_t size;
-	uchar_t *buffer = NULL;
-	int ref_cnt;
-
-	NXGE_DEBUG_MSG((NULL, MEM2_CTL, "==> nxge_freeb"));
-	NXGE_DEBUG_MSG((NULL, MEM2_CTL,
-		"nxge_freeb:rx_msg_p = $%p (block pending %d)",
-		rx_msg_p, nxge_mblks_pending));
-
-
-	ref_cnt = atomic_add_32_nv(&rx_msg_p->ref_cnt, -1);
-	atomic_dec_32(&nxge_mblks_pending);
-	if (!ref_cnt) {
-		buffer = rx_msg_p->buffer;
-		size = rx_msg_p->block_size;
-		NXGE_DEBUG_MSG((NULL, MEM2_CTL, "nxge_freeb: "
-			"will free: rx_msg_p = $%p (block pending %d)",
-			(long long)rx_msg_p, nxge_mblks_pending));
-
-		if (!rx_msg_p->use_buf_pool) {
-			KMEM_FREE(buffer, size);
-		}
-
-		KMEM_FREE(rx_msg_p, sizeof (rx_msg_t));
-		return;
-	}
-
-	/*
-	 * Repost buffer.
-	 */
-	if ((ref_cnt == 1) && (rx_msg_p->free == B_TRUE)) {
-		NXGE_DEBUG_MSG((NULL, RX_CTL,
-		    "nxge_freeb: post page $%p:", rx_msg_p));
-		nxge_post_page(rx_msg_p->nxgep, rx_msg_p->rx_rbr_p,
-		    rx_msg_p);
-	}
-
-	NXGE_DEBUG_MSG((NULL, MEM2_CTL, "<== nxge_freeb"));
-}
-
-uint_t
-nxge_rx_intr(void *arg1, void *arg2)
-{
-	p_nxge_ldv_t		ldvp = (p_nxge_ldv_t)arg1;
-	p_nxge_t		nxgep = (p_nxge_t)arg2;
-	p_nxge_ldg_t		ldgp;
-	uint8_t			channel;
-	npi_handle_t		handle;
-	rx_dma_ctl_stat_t	cs;
-
-#ifdef	NXGE_DEBUG
-	rxdma_cfig1_t		cfg;
-#endif
-	uint_t 			serviced = DDI_INTR_UNCLAIMED;
-
-	if (ldvp == NULL) {
-		NXGE_DEBUG_MSG((NULL, INT_CTL,
-			"<== nxge_rx_intr: arg2 $%p arg1 $%p",
-			nxgep, ldvp));
-
-		return (DDI_INTR_CLAIMED);
-	}
-
-	if (arg2 == NULL || (void *)ldvp->nxgep != arg2) {
-		nxgep = ldvp->nxgep;
-	}
-	NXGE_DEBUG_MSG((nxgep, RX_CTL,
-		"==> nxge_rx_intr: arg2 $%p arg1 $%p",
-		nxgep, ldvp));
-
-	/*
-	 * This interrupt handler is for a specific
-	 * receive dma channel.
-	 */
-	handle = NXGE_DEV_NPI_HANDLE(nxgep);
-	/*
-	 * Get the control and status for this channel.
-	 */
-	channel = ldvp->channel;
-	ldgp = ldvp->ldgp;
-	RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel, &cs.value);
-
-	NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_intr:channel %d "
-		"cs 0x%016llx rcrto 0x%x rcrthres %x",
-		channel,
-		cs.value,
-		cs.bits.hdw.rcrto,
-		cs.bits.hdw.rcrthres));
-
-	nxge_rx_pkts_vring(nxgep, ldvp->vdma_index, ldvp, cs);
-	serviced = DDI_INTR_CLAIMED;
-
-	/* error events. */
-	if (cs.value & RX_DMA_CTL_STAT_ERROR) {
-		(void) nxge_rx_err_evnts(nxgep, ldvp->vdma_index, ldvp, cs);
-	}
-
-nxge_intr_exit:
-
-
-	/*
-	 * Enable the mailbox update interrupt if we want
-	 * to use mailbox. We probably don't need to use
-	 * mailbox as it only saves us one pio read.
-	 * Also write 1 to rcrthres and rcrto to clear
-	 * these two edge triggered bits.
-	 */
-
-	cs.value &= RX_DMA_CTL_STAT_WR1C;
-	cs.bits.hdw.mex = 1;
-	RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel,
-			cs.value);
-
-	/*
-	 * Rearm this logical group if this is a single device
-	 * group.
-	 */
-	if (ldgp->nldvs == 1) {
-		ldgimgm_t		mgm;
-		mgm.value = 0;
-		mgm.bits.ldw.arm = 1;
-		mgm.bits.ldw.timer = ldgp->ldg_timer;
-		NXGE_REG_WR64(handle,
-			    LDGIMGN_REG + LDSV_OFFSET(ldgp->ldg),
-			    mgm.value);
-	}
-
-	NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rx_intr: serviced %d",
-		serviced));
-	return (serviced);
-}
-
-/*
- * Process the packets received in the specified logical device
- * and pass up a chain of message blocks to the upper layer.
- */
-static void
-nxge_rx_pkts_vring(p_nxge_t nxgep, uint_t vindex, p_nxge_ldv_t ldvp,
-				    rx_dma_ctl_stat_t cs)
-{
-	p_mblk_t		mp;
-	p_rx_rcr_ring_t		rcrp;
-
-	NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts_vring"));
-	if ((mp = nxge_rx_pkts(nxgep, vindex, ldvp, &rcrp, cs)) == NULL) {
-		NXGE_DEBUG_MSG((nxgep, RX_CTL,
-			"<== nxge_rx_pkts_vring: no mp"));
-		return;
-	}
-
-	NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts_vring: $%p",
-		mp));
-
-#ifdef  NXGE_DEBUG
-		NXGE_DEBUG_MSG((nxgep, RX_CTL,
-			"==> nxge_rx_pkts_vring:calling mac_rx "
-			"LEN %d mp $%p mp->b_cont $%p mp->b_next $%p rcrp $%p "
-			"mac_handle $%p",
-			mp->b_wptr - mp->b_rptr,
-			mp, mp->b_cont, mp->b_next,
-			rcrp, rcrp->rcr_mac_handle));
-
-		NXGE_DEBUG_MSG((nxgep, RX_CTL,
-			"==> nxge_rx_pkts_vring: dump packets "
-			"(mp $%p b_rptr $%p b_wptr $%p):\n %s",
-			mp,
-			mp->b_rptr,
-			mp->b_wptr,
-			nxge_dump_packet((char *)mp->b_rptr,
-			mp->b_wptr - mp->b_rptr)));
-		if (mp->b_cont) {
-			NXGE_DEBUG_MSG((nxgep, RX_CTL,
-				"==> nxge_rx_pkts_vring: dump b_cont packets "
-				"(mp->b_cont $%p b_rptr $%p b_wptr $%p):\n %s",
-				mp->b_cont,
-				mp->b_cont->b_rptr,
-				mp->b_cont->b_wptr,
-				nxge_dump_packet((char *)mp->b_cont->b_rptr,
-				mp->b_cont->b_wptr - mp->b_cont->b_rptr)));
-		}
-		if (mp->b_next) {
-			NXGE_DEBUG_MSG((nxgep, RX_CTL,
-				"==> nxge_rx_pkts_vring: dump next packets "
-				"(b_rptr $%p): %s",
-				mp->b_next->b_rptr,
-				nxge_dump_packet((char *)mp->b_next->b_rptr,
-				mp->b_next->b_wptr - mp->b_next->b_rptr)));
-		}
-#endif
-
-	mac_rx(nxgep->mach, rcrp->rcr_mac_handle, mp);
-}
-
-
-/*
- * This routine is the main packet receive processing function.
- * It gets the packet type, error code, and buffer related
- * information from the receive completion entry.
- * How many completion entries to process is based on the number of packets
- * queued by the hardware, a hardware maintained tail pointer
- * and a configurable receive packet count.
- *
- * A chain of message blocks will be created as result of processing
- * the completion entries. This chain of message blocks will be returned and
- * a hardware control status register will be updated with the number of
- * packets were removed from the hardware queue.
- *
- */
-mblk_t *
-nxge_rx_pkts(p_nxge_t nxgep, uint_t vindex, p_nxge_ldv_t ldvp,
-    p_rx_rcr_ring_t *rcrp, rx_dma_ctl_stat_t cs)
-{
-	npi_handle_t		handle;
-	uint8_t			channel;
-	p_rx_rcr_rings_t	rx_rcr_rings;
-	p_rx_rcr_ring_t		rcr_p;
-	uint32_t		comp_rd_index;
-	p_rcr_entry_t		rcr_desc_rd_head_p;
-	p_rcr_entry_t		rcr_desc_rd_head_pp;
-	p_mblk_t		nmp, mp_cont, head_mp, *tail_mp;
-	uint16_t		qlen, nrcr_read, npkt_read;
-	uint32_t qlen_hw;
-	boolean_t		multi;
-	rcrcfig_b_t rcr_cfg_b;
-#if defined(_BIG_ENDIAN)
-	npi_status_t		rs = NPI_SUCCESS;
-#endif
-
-	NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts:vindex %d "
-		"channel %d", vindex, ldvp->channel));
-
-	if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
-		return (NULL);
-	}
-	handle = NXGE_DEV_NPI_HANDLE(nxgep);
-	rx_rcr_rings = nxgep->rx_rcr_rings;
-	rcr_p = rx_rcr_rings->rcr_rings[vindex];
-	channel = rcr_p->rdc;
-	if (channel != ldvp->channel) {
-		NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts:index %d "
-			"channel %d, and rcr channel %d not matched.",
-			vindex, ldvp->channel, channel));
-		return (NULL);
-	}
-
-	NXGE_DEBUG_MSG((nxgep, RX_CTL,
-		"==> nxge_rx_pkts: START: rcr channel %d "
-		"head_p $%p head_pp $%p  index %d ",
-		channel, rcr_p->rcr_desc_rd_head_p,
-		rcr_p->rcr_desc_rd_head_pp,
-		rcr_p->comp_rd_index));
-
-
-#if !defined(_BIG_ENDIAN)
-	qlen = RXDMA_REG_READ32(handle, RCRSTAT_A_REG, channel) & 0xffff;
-#else
-	rs = npi_rxdma_rdc_rcr_qlen_get(handle, channel, &qlen);
-	if (rs != NPI_SUCCESS) {
-		NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts:index %d "
-		"channel %d, get qlen failed 0x%08x",
-		vindex, ldvp->channel, rs));
-		return (NULL);
-	}
-#endif
-	NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts:rcr channel %d "
-		"qlen %d", channel, qlen));
-
-
-
-	if (!qlen) {
-		NXGE_DEBUG_MSG((nxgep, RX_CTL,
-			"==> nxge_rx_pkts:rcr channel %d "
-			"qlen %d (no pkts)", channel, qlen));
-
-		return (NULL);
-	}
-
-	comp_rd_index = rcr_p->comp_rd_index;
-
-	rcr_desc_rd_head_p = rcr_p->rcr_desc_rd_head_p;
-	rcr_desc_rd_head_pp = rcr_p->rcr_desc_rd_head_pp;
-	nrcr_read = npkt_read = 0;
-
-	/*
-	 * Number of packets queued
-	 * (The jumbo or multi packet will be counted as only one
-	 *  packets and it may take up more than one completion entry).
-	 */
-	qlen_hw = (qlen < nxge_max_rx_pkts) ?
-		qlen : nxge_max_rx_pkts;
-	head_mp = NULL;
-	tail_mp = &head_mp;
-	nmp = mp_cont = NULL;
-	multi = B_FALSE;
-
-	while (qlen_hw) {
-
-#ifdef NXGE_DEBUG
-		nxge_dump_rcr_entry(nxgep, rcr_desc_rd_head_p);
-#endif
-		/*
-		 * Process one completion ring entry.
-		 */
-		nxge_receive_packet(nxgep,
-			rcr_p, rcr_desc_rd_head_p, &multi, &nmp, &mp_cont);
-
-		/*
-		 * message chaining modes
-		 */
-		if (nmp) {
-			nmp->b_next = NULL;
-			if (!multi && !mp_cont) { /* frame fits a partition */
-				*tail_mp = nmp;
-				tail_mp = &nmp->b_next;
-				nmp = NULL;
-			} else if (multi && !mp_cont) { /* first segment */
-				*tail_mp = nmp;
-				tail_mp = &nmp->b_cont;
-			} else if (multi && mp_cont) {	/* mid of multi segs */
-				*tail_mp = mp_cont;
-				tail_mp = &mp_cont->b_cont;
-			} else if (!multi && mp_cont) { /* last segment */
-				*tail_mp = mp_cont;
-				tail_mp = &nmp->b_next;
-				nmp = NULL;
-			}
-		}
-		NXGE_DEBUG_MSG((nxgep, RX_CTL,
-			"==> nxge_rx_pkts: loop: rcr channel %d "
-			"before updating: multi %d "
-			"nrcr_read %d "
-			"npk read %d "
-			"head_pp $%p  index %d ",
-			channel,
-			multi,
-			nrcr_read, npkt_read, rcr_desc_rd_head_pp,
-			comp_rd_index));
-
-		if (!multi) {
-			qlen_hw--;
-			npkt_read++;
-		}
-
-		/*
-		 * Update the next read entry.
-		 */
-		comp_rd_index = NEXT_ENTRY(comp_rd_index,
-					rcr_p->comp_wrap_mask);
-
-		rcr_desc_rd_head_p = NEXT_ENTRY_PTR(rcr_desc_rd_head_p,
-				rcr_p->rcr_desc_first_p,
-				rcr_p->rcr_desc_last_p);
-
-		nrcr_read++;
-
-		NXGE_DEBUG_MSG((nxgep, RX_CTL,
-			"<== nxge_rx_pkts: (SAM, process one packet) "
-			"nrcr_read %d",
-			nrcr_read));
-		NXGE_DEBUG_MSG((nxgep, RX_CTL,
-			"==> nxge_rx_pkts: loop: rcr channel %d "
-			"multi %d "
-			"nrcr_read %d "
-			"npk read %d "
-			"head_pp $%p  index %d ",
-			channel,
-			multi,
-			nrcr_read, npkt_read, rcr_desc_rd_head_pp,
-			comp_rd_index));
-
-	}
-
-	rcr_p->rcr_desc_rd_head_pp = rcr_desc_rd_head_pp;
-	rcr_p->comp_rd_index = comp_rd_index;
-	rcr_p->rcr_desc_rd_head_p = rcr_desc_rd_head_p;
-
-	if ((nxgep->intr_timeout != rcr_p->intr_timeout) ||
-		(nxgep->intr_threshold != rcr_p->intr_threshold)) {
-		rcr_p->intr_timeout = nxgep->intr_timeout;
-		rcr_p->intr_threshold = nxgep->intr_threshold;
-		rcr_cfg_b.value = 0x0ULL;
-		if (rcr_p->intr_timeout)
-			rcr_cfg_b.bits.ldw.entout = 1;
-		rcr_cfg_b.bits.ldw.timeout = rcr_p->intr_timeout;
-		rcr_cfg_b.bits.ldw.pthres = rcr_p->intr_threshold;
-		RXDMA_REG_WRITE64(handle, RCRCFIG_B_REG,
-				    channel, rcr_cfg_b.value);
-	}
-
-	cs.bits.ldw.pktread = npkt_read;
-	cs.bits.ldw.ptrread = nrcr_read;
-	RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG,
-			    channel, cs.value);
-	NXGE_DEBUG_MSG((nxgep, RX_CTL,
-		"==> nxge_rx_pkts: EXIT: rcr channel %d "
-		"head_pp $%p  index %016llx ",
-		channel,
-		rcr_p->rcr_desc_rd_head_pp,
-		rcr_p->comp_rd_index));
-	/*
-	 * Update RCR buffer pointer read and number of packets
-	 * read.
-	 */
-
-	*rcrp = rcr_p;
-	NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rx_pkts"));
-	return (head_mp);
-}
-
-void
-nxge_receive_packet(p_nxge_t nxgep,
-    p_rx_rcr_ring_t rcr_p, p_rcr_entry_t rcr_desc_rd_head_p,
-    boolean_t *multi_p, mblk_t **mp, mblk_t **mp_cont)
-{
-	p_mblk_t		nmp = NULL;
-	uint64_t		multi;
-	uint64_t		dcf_err;
-	uint8_t			channel;
-
-	boolean_t		first_entry = B_TRUE;
-	boolean_t		is_tcp_udp = B_FALSE;
-	boolean_t		buffer_free = B_FALSE;
-	boolean_t		error_send_up = B_FALSE;
-	uint8_t			error_type;
-	uint16_t		l2_len;
-	uint16_t		skip_len;
-	uint8_t			pktbufsz_type;
-	uint16_t		pktbufsz;
-	uint64_t		rcr_entry;
-	uint64_t		*pkt_buf_addr_pp;
-	uint64_t		*pkt_buf_addr_p;
-	uint32_t		buf_offset;
-	uint32_t		bsize;
-	uint32_t		error_disp_cnt;
-	uint32_t		msg_index;
-	p_rx_rbr_ring_t		rx_rbr_p;
-	p_rx_msg_t 		*rx_msg_ring_p;
-	p_rx_msg_t		rx_msg_p;
-	uint16_t		sw_offset_bytes = 0, hdr_size = 0;
-	nxge_status_t		status = NXGE_OK;
-	boolean_t		is_valid = B_FALSE;
-	p_nxge_rx_ring_stats_t	rdc_stats;
-	uint32_t		bytes_read;
-	uint64_t		pkt_type;
-	uint64_t		frag;
-#ifdef	NXGE_DEBUG
-	int			dump_len;
-#endif
-	NXGE_DEBUG_MSG((nxgep, RX2_CTL, "==> nxge_receive_packet"));
-	first_entry = (*mp == NULL) ? B_TRUE : B_FALSE;
-
-	rcr_entry = *((uint64_t *)rcr_desc_rd_head_p);
-
-	multi = (rcr_entry & RCR_MULTI_MASK);
-	dcf_err = (rcr_entry & RCR_DCF_ERROR_MASK);
-	pkt_type = (rcr_entry & RCR_PKT_TYPE_MASK);
-
-	error_type = ((rcr_entry & RCR_ERROR_MASK) >> RCR_ERROR_SHIFT);
-	frag = (rcr_entry & RCR_FRAG_MASK);
-
-	l2_len = ((rcr_entry & RCR_L2_LEN_MASK) >> RCR_L2_LEN_SHIFT);
-
-	pktbufsz_type = ((rcr_entry & RCR_PKTBUFSZ_MASK) >>
-				RCR_PKTBUFSZ_SHIFT);
-
-	pkt_buf_addr_pp = (uint64_t *)((rcr_entry & RCR_PKT_BUF_ADDR_MASK) <<
-			RCR_PKT_BUF_ADDR_SHIFT);
-
-	channel = rcr_p->rdc;
-
-	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
-		"==> nxge_receive_packet: entryp $%p entry 0x%0llx "
-		"pkt_buf_addr_pp $%p l2_len %d multi 0x%llx "
-		"error_type 0x%x pkt_type 0x%x  "
-		"pktbufsz_type %d ",
-		rcr_desc_rd_head_p,
-		rcr_entry, pkt_buf_addr_pp, l2_len,
-		multi,
-		error_type,
-		pkt_type,
-		pktbufsz_type));
-
-	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
-		"==> nxge_receive_packet: entryp $%p entry 0x%0llx "
-		"pkt_buf_addr_pp $%p l2_len %d multi 0x%llx "
-		"error_type 0x%x pkt_type 0x%x ", rcr_desc_rd_head_p,
-		rcr_entry, pkt_buf_addr_pp, l2_len,
-		multi,
-		error_type,
-		pkt_type));
-
-	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
-		"==> (rbr) nxge_receive_packet: entry 0x%0llx "
-		"full pkt_buf_addr_pp $%p l2_len %d",
-		rcr_entry, pkt_buf_addr_pp, l2_len));
-
-	/* get the stats ptr */
-	rdc_stats = rcr_p->rdc_stats;
-
-	if (!l2_len) {
-
-		NXGE_DEBUG_MSG((nxgep, RX_CTL,
-			"<== nxge_receive_packet: failed: l2 length is 0."));
-		return;
-	}
-
-	/* shift 6 bits to get the full io address */
-	pkt_buf_addr_pp = (uint64_t *)((uint64_t)pkt_buf_addr_pp <<
-				RCR_PKT_BUF_ADDR_SHIFT_FULL);
-	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
-		"==> (rbr) nxge_receive_packet: entry 0x%0llx "
-		"full pkt_buf_addr_pp $%p l2_len %d",
-		rcr_entry, pkt_buf_addr_pp, l2_len));
-
-	rx_rbr_p = rcr_p->rx_rbr_p;
-	rx_msg_ring_p = rx_rbr_p->rx_msg_ring;
-
-	if (first_entry) {
-		hdr_size = (rcr_p->full_hdr_flag ? RXDMA_HDR_SIZE_FULL :
-			RXDMA_HDR_SIZE_DEFAULT);
-
-		NXGE_DEBUG_MSG((nxgep, RX_CTL,
-			"==> nxge_receive_packet: first entry 0x%016llx "
-			"pkt_buf_addr_pp $%p l2_len %d hdr %d",
-			rcr_entry, pkt_buf_addr_pp, l2_len,
-			hdr_size));
-	}
-
-	MUTEX_ENTER(&rcr_p->lock);
-	MUTEX_ENTER(&rx_rbr_p->lock);
-
-	bytes_read = rcr_p->rcvd_pkt_bytes;
-
-	NXGE_DEBUG_MSG((nxgep, RX_CTL,
-		"==> (rbr 1) nxge_receive_packet: entry 0x%0llx "
-		"full pkt_buf_addr_pp $%p l2_len %d",
-		rcr_entry, pkt_buf_addr_pp, l2_len));
-
-	/*
-	 * Packet buffer address in the completion entry points
-	 * to the starting buffer address (offset 0).
-	 * Use the starting buffer address to locate the corresponding
-	 * kernel address.
-	 */
-	status = nxge_rxbuf_pp_to_vp(nxgep, rx_rbr_p,
-			pktbufsz_type, pkt_buf_addr_pp, &pkt_buf_addr_p,
-			&buf_offset,
-			&msg_index);
-
-	NXGE_DEBUG_MSG((nxgep, RX_CTL,
-		"==> (rbr 2) nxge_receive_packet: entry 0x%0llx "
-		"full pkt_buf_addr_pp $%p l2_len %d",
-		rcr_entry, pkt_buf_addr_pp, l2_len));
-
-	if (status != NXGE_OK) {
-		MUTEX_EXIT(&rx_rbr_p->lock);
-		MUTEX_EXIT(&rcr_p->lock);
-		NXGE_DEBUG_MSG((nxgep, RX_CTL,
-			"<== nxge_receive_packet: found vaddr failed %d",
-				status));
-		return;
-	}
-
-	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
-		"==> (rbr 3) nxge_receive_packet: entry 0x%0llx "
-		"full pkt_buf_addr_pp $%p l2_len %d",
-		rcr_entry, pkt_buf_addr_pp, l2_len));
-
-	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
-		"==> (rbr 4 msgindex %d) nxge_receive_packet: entry 0x%0llx "
-		"full pkt_buf_addr_pp $%p l2_len %d",
-		msg_index, rcr_entry, pkt_buf_addr_pp, l2_len));
-
-	rx_msg_p = rx_msg_ring_p[msg_index];
-
-	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
-		"==> (rbr 4 msgindex %d) nxge_receive_packet: entry 0x%0llx "
-		"full pkt_buf_addr_pp $%p l2_len %d",
-		msg_index, rcr_entry, pkt_buf_addr_pp, l2_len));
-
-	switch (pktbufsz_type) {
-	case RCR_PKTBUFSZ_0:
-		bsize = rx_rbr_p->pkt_buf_size0_bytes;
-		NXGE_DEBUG_MSG((nxgep, RX2_CTL,
-			"==> nxge_receive_packet: 0 buf %d", bsize));
-		break;
-	case RCR_PKTBUFSZ_1:
-		bsize = rx_rbr_p->pkt_buf_size1_bytes;
-		NXGE_DEBUG_MSG((nxgep, RX2_CTL,
-			"==> nxge_receive_packet: 1 buf %d", bsize));
-		break;
-	case RCR_PKTBUFSZ_2:
-		bsize = rx_rbr_p->pkt_buf_size2_bytes;
-		NXGE_DEBUG_MSG((nxgep, RX_CTL,
-			"==> nxge_receive_packet: 2 buf %d", bsize));
-		break;
-	case RCR_SINGLE_BLOCK:
-		bsize = rx_msg_p->block_size;
-		NXGE_DEBUG_MSG((nxgep, RX2_CTL,
-			"==> nxge_receive_packet: single %d", bsize));
-
-		break;
-	default:
-		MUTEX_EXIT(&rx_rbr_p->lock);
-		MUTEX_EXIT(&rcr_p->lock);
-		return;
-	}
-
-	DMA_COMMON_SYNC_OFFSET(rx_msg_p->buf_dma,
-		(buf_offset + sw_offset_bytes),
-		(hdr_size + l2_len),
-		DDI_DMA_SYNC_FORCPU);
-
-	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
-		"==> nxge_receive_packet: after first dump:usage count"));
-
-	if (rx_msg_p->cur_usage_cnt == 0) {
-		if (rx_rbr_p->rbr_use_bcopy) {
-			atomic_inc_32(&rx_rbr_p->rbr_consumed);
-			if (rx_rbr_p->rbr_consumed <
-					rx_rbr_p->rbr_threshold_hi) {
-				if (rx_rbr_p->rbr_threshold_lo == 0 ||
-					((rx_rbr_p->rbr_consumed >=
-						rx_rbr_p->rbr_threshold_lo) &&
-						(rx_rbr_p->rbr_bufsize_type >=
-							pktbufsz_type))) {
-					rx_msg_p->rx_use_bcopy = B_TRUE;
-				}
-			} else {
-				rx_msg_p->rx_use_bcopy = B_TRUE;
-			}
-		}
-		NXGE_DEBUG_MSG((nxgep, RX2_CTL,
-			"==> nxge_receive_packet: buf %d (new block) ",
-			bsize));
-
-		rx_msg_p->pkt_buf_size_code = pktbufsz_type;
-		rx_msg_p->pkt_buf_size = bsize;
-		rx_msg_p->cur_usage_cnt = 1;
-		if (pktbufsz_type == RCR_SINGLE_BLOCK) {
-			NXGE_DEBUG_MSG((nxgep, RX2_CTL,
-				"==> nxge_receive_packet: buf %d "
-				"(single block) ",
-				bsize));
-			/*
-			 * Buffer can be reused once the free function
-			 * is called.
-			 */
-			rx_msg_p->max_usage_cnt = 1;
-			buffer_free = B_TRUE;
-		} else {
-			rx_msg_p->max_usage_cnt = rx_msg_p->block_size/bsize;
-			if (rx_msg_p->max_usage_cnt == 1) {
-				buffer_free = B_TRUE;
-			}
-		}
-	} else {
-		rx_msg_p->cur_usage_cnt++;
-		if (rx_msg_p->cur_usage_cnt == rx_msg_p->max_usage_cnt) {
-			buffer_free = B_TRUE;
-		}
-	}
-
-	NXGE_DEBUG_MSG((nxgep, RX_CTL,
-	    "msgbuf index = %d l2len %d bytes usage %d max_usage %d ",
-		msg_index, l2_len,
-		rx_msg_p->cur_usage_cnt, rx_msg_p->max_usage_cnt));
-
-	if ((error_type) || (dcf_err)) {
-		rdc_stats->ierrors++;
-		if (dcf_err) {
-			rdc_stats->dcf_err++;
-#ifdef	NXGE_DEBUG
-			if (!rdc_stats->dcf_err) {
-				NXGE_DEBUG_MSG((nxgep, RX_CTL,
-				"nxge_receive_packet: channel %d dcf_err rcr"
-				" 0x%llx", channel, rcr_entry));
-			}
-#endif
-			NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, NULL,
-					NXGE_FM_EREPORT_RDMC_DCF_ERR);
-		} else {
-				/* Update error stats */
-			error_disp_cnt = NXGE_ERROR_SHOW_MAX;
-			rdc_stats->errlog.compl_err_type = error_type;
-			NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, NULL,
-				    NXGE_FM_EREPORT_RDMC_COMPLETION_ERR);
-
-			switch (error_type) {
-				case RCR_L2_ERROR:
-					rdc_stats->l2_err++;
-					if (rdc_stats->l2_err <
-						error_disp_cnt)
-						NXGE_ERROR_MSG((nxgep,
-						NXGE_ERR_CTL,
-						" nxge_receive_packet:"
-						" channel %d RCR L2_ERROR",
-						channel));
-					break;
-				case RCR_L4_CSUM_ERROR:
-					error_send_up = B_TRUE;
-					rdc_stats->l4_cksum_err++;
-					if (rdc_stats->l4_cksum_err <
-						error_disp_cnt)
-						NXGE_ERROR_MSG((nxgep,
-						NXGE_ERR_CTL,
-							" nxge_receive_packet:"
-							" channel %d"
-							" RCR L4_CSUM_ERROR",
-							channel));
-					break;
-				case RCR_FFLP_SOFT_ERROR:
-					error_send_up = B_TRUE;
-					rdc_stats->fflp_soft_err++;
-					if (rdc_stats->fflp_soft_err <
-						error_disp_cnt)
-						NXGE_ERROR_MSG((nxgep,
-							NXGE_ERR_CTL,
-							" nxge_receive_packet:"
-							" channel %d"
-							" RCR FFLP_SOFT_ERROR",
-							channel));
-					break;
-				case RCR_ZCP_SOFT_ERROR:
-					error_send_up = B_TRUE;
-					rdc_stats->fflp_soft_err++;
-					if (rdc_stats->zcp_soft_err <
-						error_disp_cnt)
-						NXGE_ERROR_MSG((nxgep,
-							NXGE_ERR_CTL,
-							" nxge_receive_packet:"
-							" Channel %d"
-							" RCR ZCP_SOFT_ERROR",
-							channel));
-					break;
-				default:
-					NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-							" nxge_receive_packet:"
-							" Channel %d"
-							" RCR entry 0x%llx"
-							" error 0x%x",
-							rcr_entry, channel,
-							error_type));
-					break;
-			}
-		}
-
-		/*
-		 * Update and repost buffer block if max usage
-		 * count is reached.
-		 */
-		if (error_send_up == B_FALSE) {
-			if (buffer_free == B_TRUE) {
-				rx_msg_p->free = B_TRUE;
-			}
-
-			atomic_inc_32(&rx_msg_p->ref_cnt);
-			MUTEX_EXIT(&rx_rbr_p->lock);
-			MUTEX_EXIT(&rcr_p->lock);
-			nxge_freeb(rx_msg_p);
-			return;
-		}
-	}
-
-	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
-		"==> nxge_receive_packet: DMA sync second "));
-
-	skip_len = sw_offset_bytes + hdr_size;
-	if (!rx_msg_p->rx_use_bcopy) {
-		nmp = nxge_dupb(rx_msg_p, buf_offset, bsize);
-	} else {
-		nmp = nxge_dupb_bcopy(rx_msg_p, buf_offset + skip_len, l2_len);
-		NXGE_DEBUG_MSG((nxgep, RX_CTL,
-			"==> nxge_receive_packet: use bcopy "
-			"rbr consumed %d "
-			"pktbufsz_type %d "
-			"offset %d "
-			"hdr_size %d l2_len %d "
-			"nmp->b_rptr $%p",
-			rx_rbr_p->rbr_consumed,
-			pktbufsz_type,
-			buf_offset, hdr_size, l2_len,
-			nmp->b_rptr));
-	}
-	if (nmp != NULL) {
-		pktbufsz = nxge_get_pktbuf_size(nxgep, pktbufsz_type,
-			rx_rbr_p->rbr_cfgb);
-		if (!rx_msg_p->rx_use_bcopy) {
-			if (first_entry) {
-				bytes_read = 0;
-				nmp->b_rptr = &nmp->b_rptr[skip_len];
-				if (l2_len > pktbufsz - skip_len)
-					nmp->b_wptr = &nmp->b_rptr[pktbufsz
-						- skip_len];
-				else
-					nmp->b_wptr = &nmp->b_rptr[l2_len];
-			} else {
-				if (l2_len - bytes_read > pktbufsz)
-					nmp->b_wptr = &nmp->b_rptr[pktbufsz];
-				else
-					nmp->b_wptr =
-					    &nmp->b_rptr[l2_len - bytes_read];
-			}
-			bytes_read += nmp->b_wptr - nmp->b_rptr;
-			NXGE_DEBUG_MSG((nxgep, RX_CTL,
-				"==> nxge_receive_packet after dupb: "
-				"rbr consumed %d "
-				"pktbufsz_type %d "
-				"nmp $%p rptr $%p wptr $%p "
-				"buf_offset %d bzise %d l2_len %d skip_len %d",
-				rx_rbr_p->rbr_consumed,
-				pktbufsz_type,
-				nmp, nmp->b_rptr, nmp->b_wptr,
-				buf_offset, bsize, l2_len, skip_len));
-		}
-	} else {
-		cmn_err(CE_WARN, "!nxge_receive_packet: "
-			"update stats (error)");
-	}
-	if (buffer_free == B_TRUE) {
-		rx_msg_p->free = B_TRUE;
-	}
-
-	/*
-	 * ERROR, FRAG and PKT_TYPE are only reported
-	 * in the first entry.
-	 * If a packet is not fragmented and no error bit is set, then
-	 * L4 checksum is OK.
-	 */
-	is_valid = (nmp != NULL);
-	rdc_stats->ibytes += l2_len;
-	rdc_stats->ipackets++;
-	MUTEX_EXIT(&rx_rbr_p->lock);
-	MUTEX_EXIT(&rcr_p->lock);
-
-	if (rx_msg_p->free && rx_msg_p->rx_use_bcopy) {
-		atomic_inc_32(&rx_msg_p->ref_cnt);
-		nxge_freeb(rx_msg_p);
-	}
-
-	if (is_valid) {
-		nmp->b_cont = NULL;
-		if (first_entry) {
-			*mp = nmp;
-			*mp_cont = NULL;
-		} else
-			*mp_cont = nmp;
-	}
-
-	/*
-	 * Update stats and hardware checksuming.
-	 */
-	if (is_valid && !multi) {
-
-		is_tcp_udp = ((pkt_type == RCR_PKT_IS_TCP ||
-				pkt_type == RCR_PKT_IS_UDP) ?
-					B_TRUE: B_FALSE);
-
-		NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_receive_packet: "
-			"is_valid 0x%x multi 0x%llx pkt %d frag %d error %d",
-			is_valid, multi, is_tcp_udp, frag, error_type));
-
-		if (is_tcp_udp && !frag && !error_type) {
-			(void) hcksum_assoc(nmp, NULL, NULL, 0, 0, 0, 0,
-				HCK_FULLCKSUM_OK | HCK_FULLCKSUM, 0);
-			NXGE_DEBUG_MSG((nxgep, RX_CTL,
-				"==> nxge_receive_packet: Full tcp/udp cksum "
-				"is_valid 0x%x multi 0x%llx pkt %d frag %d "
-				"error %d",
-				is_valid, multi, is_tcp_udp, frag, error_type));
-		}
-	}
-
-	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
-		"==> nxge_receive_packet: *mp 0x%016llx", *mp));
-
-	*multi_p = (multi == RCR_MULTI_MASK);
-	NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_receive_packet: "
-		"multi %d nmp 0x%016llx *mp 0x%016llx *mp_cont 0x%016llx",
-		*multi_p, nmp, *mp, *mp_cont));
-}
-
-/*ARGSUSED*/
-static nxge_status_t
-nxge_rx_err_evnts(p_nxge_t nxgep, uint_t index, p_nxge_ldv_t ldvp,
-						rx_dma_ctl_stat_t cs)
-{
-	p_nxge_rx_ring_stats_t	rdc_stats;
-	npi_handle_t		handle;
-	npi_status_t		rs;
-	boolean_t		rxchan_fatal = B_FALSE;
-	boolean_t		rxport_fatal = B_FALSE;
-	uint8_t			channel;
-	uint8_t			portn;
-	nxge_status_t		status = NXGE_OK;
-	uint32_t		error_disp_cnt = NXGE_ERROR_SHOW_MAX;
-	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_rx_err_evnts"));
-
-	handle = NXGE_DEV_NPI_HANDLE(nxgep);
-	channel = ldvp->channel;
-	portn = nxgep->mac.portnum;
-	rdc_stats = &nxgep->statsp->rdc_stats[ldvp->vdma_index];
-
-	if (cs.bits.hdw.rbr_tmout) {
-		rdc_stats->rx_rbr_tmout++;
-		NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
-					NXGE_FM_EREPORT_RDMC_RBR_TMOUT);
-		rxchan_fatal = B_TRUE;
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"==> nxge_rx_err_evnts: rx_rbr_timeout"));
-	}
-	if (cs.bits.hdw.rsp_cnt_err) {
-		rdc_stats->rsp_cnt_err++;
-		NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
-					NXGE_FM_EREPORT_RDMC_RSP_CNT_ERR);
-		rxchan_fatal = B_TRUE;
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"==> nxge_rx_err_evnts(channel %d): "
-			"rsp_cnt_err", channel));
-	}
-	if (cs.bits.hdw.byte_en_bus) {
-		rdc_stats->byte_en_bus++;
-		NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
-					NXGE_FM_EREPORT_RDMC_BYTE_EN_BUS);
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"==> nxge_rx_err_evnts(channel %d): "
-			"fatal error: byte_en_bus", channel));
-		rxchan_fatal = B_TRUE;
-	}
-	if (cs.bits.hdw.rsp_dat_err) {
-		rdc_stats->rsp_dat_err++;
-		NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
-					NXGE_FM_EREPORT_RDMC_RSP_DAT_ERR);
-		rxchan_fatal = B_TRUE;
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"==> nxge_rx_err_evnts(channel %d): "
-			"fatal error: rsp_dat_err", channel));
-	}
-	if (cs.bits.hdw.rcr_ack_err) {
-		rdc_stats->rcr_ack_err++;
-		NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
-					NXGE_FM_EREPORT_RDMC_RCR_ACK_ERR);
-		rxchan_fatal = B_TRUE;
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"==> nxge_rx_err_evnts(channel %d): "
-			"fatal error: rcr_ack_err", channel));
-	}
-	if (cs.bits.hdw.dc_fifo_err) {
-		rdc_stats->dc_fifo_err++;
-		NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
-					NXGE_FM_EREPORT_RDMC_DC_FIFO_ERR);
-		/* This is not a fatal error! */
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"==> nxge_rx_err_evnts(channel %d): "
-			"dc_fifo_err", channel));
-		rxport_fatal = B_TRUE;
-	}
-	if ((cs.bits.hdw.rcr_sha_par) || (cs.bits.hdw.rbr_pre_par)) {
-		if ((rs = npi_rxdma_ring_perr_stat_get(handle,
-				&rdc_stats->errlog.pre_par,
-				&rdc_stats->errlog.sha_par))
-				!= NPI_SUCCESS) {
-			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-				"==> nxge_rx_err_evnts(channel %d): "
-				"rcr_sha_par: get perr", channel));
-			return (NXGE_ERROR | rs);
-		}
-		if (cs.bits.hdw.rcr_sha_par) {
-			rdc_stats->rcr_sha_par++;
-			NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
-					NXGE_FM_EREPORT_RDMC_RCR_SHA_PAR);
-			rxchan_fatal = B_TRUE;
-			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-				"==> nxge_rx_err_evnts(channel %d): "
-				"fatal error: rcr_sha_par", channel));
-		}
-		if (cs.bits.hdw.rbr_pre_par) {
-			rdc_stats->rbr_pre_par++;
-			NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
-					NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR);
-			rxchan_fatal = B_TRUE;
-			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-				"==> nxge_rx_err_evnts(channel %d): "
-				"fatal error: rbr_pre_par", channel));
-		}
-	}
-	if (cs.bits.hdw.port_drop_pkt) {
-		rdc_stats->port_drop_pkt++;
-		if (rdc_stats->port_drop_pkt < error_disp_cnt)
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"==> nxge_rx_err_evnts (channel %d): "
-			"port_drop_pkt", channel));
-	}
-	if (cs.bits.hdw.wred_drop) {
-		rdc_stats->wred_drop++;
-		NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
-			"==> nxge_rx_err_evnts(channel %d): "
-		"wred_drop", channel));
-	}
-	if (cs.bits.hdw.rbr_pre_empty) {
-		rdc_stats->rbr_pre_empty++;
-		if (rdc_stats->rbr_pre_empty < error_disp_cnt)
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"==> nxge_rx_err_evnts(channel %d): "
-			"rbr_pre_empty", channel));
-	}
-	if (cs.bits.hdw.rcr_shadow_full) {
-		rdc_stats->rcr_shadow_full++;
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"==> nxge_rx_err_evnts(channel %d): "
-			"rcr_shadow_full", channel));
-	}
-	if (cs.bits.hdw.config_err) {
-		rdc_stats->config_err++;
-		NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
-					NXGE_FM_EREPORT_RDMC_CONFIG_ERR);
-		rxchan_fatal = B_TRUE;
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"==> nxge_rx_err_evnts(channel %d): "
-			"config error", channel));
-	}
-	if (cs.bits.hdw.rcrincon) {
-		rdc_stats->rcrincon++;
-		NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
-					NXGE_FM_EREPORT_RDMC_RCRINCON);
-		rxchan_fatal = B_TRUE;
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"==> nxge_rx_err_evnts(channel %d): "
-			"fatal error: rcrincon error", channel));
-	}
-	if (cs.bits.hdw.rcrfull) {
-		rdc_stats->rcrfull++;
-		NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
-					NXGE_FM_EREPORT_RDMC_RCRFULL);
-		rxchan_fatal = B_TRUE;
-		if (rdc_stats->rcrfull < error_disp_cnt)
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"==> nxge_rx_err_evnts(channel %d): "
-			"fatal error: rcrfull error", channel));
-	}
-	if (cs.bits.hdw.rbr_empty) {
-		rdc_stats->rbr_empty++;
-		if (rdc_stats->rbr_empty < error_disp_cnt)
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"==> nxge_rx_err_evnts(channel %d): "
-			"rbr empty error", channel));
-	}
-	if (cs.bits.hdw.rbrfull) {
-		rdc_stats->rbrfull++;
-		NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
-					NXGE_FM_EREPORT_RDMC_RBRFULL);
-		rxchan_fatal = B_TRUE;
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"==> nxge_rx_err_evnts(channel %d): "
-			"fatal error: rbr_full error", channel));
-	}
-	if (cs.bits.hdw.rbrlogpage) {
-		rdc_stats->rbrlogpage++;
-		NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
-					NXGE_FM_EREPORT_RDMC_RBRLOGPAGE);
-		rxchan_fatal = B_TRUE;
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"==> nxge_rx_err_evnts(channel %d): "
-			"fatal error: rbr logical page error", channel));
-	}
-	if (cs.bits.hdw.cfiglogpage) {
-		rdc_stats->cfiglogpage++;
-		NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
-					NXGE_FM_EREPORT_RDMC_CFIGLOGPAGE);
-		rxchan_fatal = B_TRUE;
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"==> nxge_rx_err_evnts(channel %d): "
-			"fatal error: cfig logical page error", channel));
-	}
-
-	if (rxport_fatal)  {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-				" nxge_rx_err_evnts: "
-				" fatal error on Port #%d\n",
-				portn));
-		status = nxge_ipp_fatal_err_recover(nxgep);
-		if (status == NXGE_OK) {
-			FM_SERVICE_RESTORED(nxgep);
-		}
-	}
-
-	if (rxchan_fatal) {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-				" nxge_rx_err_evnts: "
-				" fatal error on Channel #%d\n",
-				channel));
-		status = nxge_rxdma_fatal_err_recover(nxgep, channel);
-		if (status == NXGE_OK) {
-			FM_SERVICE_RESTORED(nxgep);
-		}
-	}
-
-	NXGE_DEBUG_MSG((nxgep, RX2_CTL, "<== nxge_rx_err_evnts"));
-
-	return (status);
-}
-
-static nxge_status_t
-nxge_map_rxdma(p_nxge_t nxgep)
-{
-	int			i, ndmas;
-	uint16_t		channel;
-	p_rx_rbr_rings_t 	rx_rbr_rings;
-	p_rx_rbr_ring_t		*rbr_rings;
-	p_rx_rcr_rings_t 	rx_rcr_rings;
-	p_rx_rcr_ring_t		*rcr_rings;
-	p_rx_mbox_areas_t 	rx_mbox_areas_p;
-	p_rx_mbox_t		*rx_mbox_p;
-	p_nxge_dma_pool_t	dma_buf_poolp;
-	p_nxge_dma_pool_t	dma_cntl_poolp;
-	p_nxge_dma_common_t	*dma_buf_p;
-	p_nxge_dma_common_t	*dma_cntl_p;
-	uint32_t		*num_chunks;
-	nxge_status_t		status = NXGE_OK;
-#if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
-	p_nxge_dma_common_t	t_dma_buf_p;
-	p_nxge_dma_common_t	t_dma_cntl_p;
-#endif
-
-	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_map_rxdma"));
-
-	dma_buf_poolp = nxgep->rx_buf_pool_p;
-	dma_cntl_poolp = nxgep->rx_cntl_pool_p;
-
-	if (!dma_buf_poolp->buf_allocated || !dma_cntl_poolp->buf_allocated) {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"<== nxge_map_rxdma: buf not allocated"));
-		return (NXGE_ERROR);
-	}
-
-	ndmas = dma_buf_poolp->ndmas;
-	if (!ndmas) {
-		NXGE_DEBUG_MSG((nxgep, RX_CTL,
-			"<== nxge_map_rxdma: no dma allocated"));
-		return (NXGE_ERROR);
-	}
-
-	num_chunks = dma_buf_poolp->num_chunks;
-	dma_buf_p = dma_buf_poolp->dma_buf_pool_p;
-	dma_cntl_p = dma_cntl_poolp->dma_buf_pool_p;
-
-	rx_rbr_rings = (p_rx_rbr_rings_t)
-		KMEM_ZALLOC(sizeof (rx_rbr_rings_t), KM_SLEEP);
-	rbr_rings = (p_rx_rbr_ring_t *)
-		KMEM_ZALLOC(sizeof (p_rx_rbr_ring_t) * ndmas, KM_SLEEP);
-	rx_rcr_rings = (p_rx_rcr_rings_t)
-		KMEM_ZALLOC(sizeof (rx_rcr_rings_t), KM_SLEEP);
-	rcr_rings = (p_rx_rcr_ring_t *)
-		KMEM_ZALLOC(sizeof (p_rx_rcr_ring_t) * ndmas, KM_SLEEP);
-	rx_mbox_areas_p = (p_rx_mbox_areas_t)
-		KMEM_ZALLOC(sizeof (rx_mbox_areas_t), KM_SLEEP);
-	rx_mbox_p = (p_rx_mbox_t *)
-		KMEM_ZALLOC(sizeof (p_rx_mbox_t) * ndmas, KM_SLEEP);
-
-	/*
-	 * Timeout should be set based on the system clock divider.
-	 * The following timeout value of 1 assumes that the
-	 * granularity (1000) is 3 microseconds running at 300MHz.
-	 */
-
-	nxgep->intr_threshold = RXDMA_RCR_PTHRES_DEFAULT;
-	nxgep->intr_timeout = RXDMA_RCR_TO_DEFAULT;
-
-	/*
-	 * Map descriptors from the buffer polls for each dam channel.
-	 */
-	for (i = 0; i < ndmas; i++) {
-		/*
-		 * Set up and prepare buffer blocks, descriptors
-		 * and mailbox.
-		 */
-		channel = ((p_nxge_dma_common_t)dma_buf_p[i])->dma_channel;
-		status = nxge_map_rxdma_channel(nxgep, channel,
-				(p_nxge_dma_common_t *)&dma_buf_p[i],
-				(p_rx_rbr_ring_t *)&rbr_rings[i],
-				num_chunks[i],
-				(p_nxge_dma_common_t *)&dma_cntl_p[i],
-				(p_rx_rcr_ring_t *)&rcr_rings[i],
-				(p_rx_mbox_t *)&rx_mbox_p[i]);
-		if (status != NXGE_OK) {
-			goto nxge_map_rxdma_fail1;
-		}
-		rbr_rings[i]->index = (uint16_t)i;
-		rcr_rings[i]->index = (uint16_t)i;
-		rcr_rings[i]->rdc_stats = &nxgep->statsp->rdc_stats[i];
-
-#if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
-		if (nxgep->niu_type == N2_NIU && NXGE_DMA_BLOCK == 1) {
-			rbr_rings[i]->hv_set = B_FALSE;
-			t_dma_buf_p = (p_nxge_dma_common_t)dma_buf_p[i];
-			t_dma_cntl_p =
-				(p_nxge_dma_common_t)dma_cntl_p[i];
-
-			rbr_rings[i]->hv_rx_buf_base_ioaddr_pp =
-				(uint64_t)t_dma_buf_p->orig_ioaddr_pp;
-			rbr_rings[i]->hv_rx_buf_ioaddr_size =
-				(uint64_t)t_dma_buf_p->orig_alength;
-			NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
-				"==> nxge_map_rxdma_channel: "
-				"channel %d "
-				"data buf base io $%p ($%p) "
-				"size 0x%llx (%d 0x%x)",
-				channel,
-				rbr_rings[i]->hv_rx_buf_base_ioaddr_pp,
-				t_dma_cntl_p->ioaddr_pp,
-				rbr_rings[i]->hv_rx_buf_ioaddr_size,
-				t_dma_buf_p->orig_alength,
-				t_dma_buf_p->orig_alength));
-
-			rbr_rings[i]->hv_rx_cntl_base_ioaddr_pp =
-				(uint64_t)t_dma_cntl_p->orig_ioaddr_pp;
-			rbr_rings[i]->hv_rx_cntl_ioaddr_size =
-				(uint64_t)t_dma_cntl_p->orig_alength;
-			NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
-				"==> nxge_map_rxdma_channel: "
-				"channel %d "
-				"cntl base io $%p ($%p) "
-				"size 0x%llx (%d 0x%x)",
-				channel,
-				rbr_rings[i]->hv_rx_cntl_base_ioaddr_pp,
-				t_dma_cntl_p->ioaddr_pp,
-				rbr_rings[i]->hv_rx_cntl_ioaddr_size,
-				t_dma_cntl_p->orig_alength,
-				t_dma_cntl_p->orig_alength));
-		}
-
-#endif	/* sun4v and NIU_LP_WORKAROUND */
-	}
-
-	rx_rbr_rings->ndmas = rx_rcr_rings->ndmas = ndmas;
-	rx_rbr_rings->rbr_rings = rbr_rings;
-	nxgep->rx_rbr_rings = rx_rbr_rings;
-	rx_rcr_rings->rcr_rings = rcr_rings;
-	nxgep->rx_rcr_rings = rx_rcr_rings;
-
-	rx_mbox_areas_p->rxmbox_areas = rx_mbox_p;
-	nxgep->rx_mbox_areas_p = rx_mbox_areas_p;
-
-	goto nxge_map_rxdma_exit;
-
-nxge_map_rxdma_fail1:
-	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-		"==> nxge_map_rxdma: unmap rbr,rcr "
-		"(status 0x%x channel %d i %d)",
-		status, channel, i));
-	for (; i >= 0; i--) {
-		channel = ((p_nxge_dma_common_t)dma_buf_p[i])->dma_channel;
-		nxge_unmap_rxdma_channel(nxgep, channel,
-			rbr_rings[i],
-			rcr_rings[i],
-			rx_mbox_p[i]);
-	}
-
-	KMEM_FREE(rbr_rings, sizeof (p_rx_rbr_ring_t) * ndmas);
-	KMEM_FREE(rx_rbr_rings, sizeof (rx_rbr_rings_t));
-	KMEM_FREE(rcr_rings, sizeof (p_rx_rcr_ring_t) * ndmas);
-	KMEM_FREE(rx_rcr_rings, sizeof (rx_rcr_rings_t));
-	KMEM_FREE(rx_mbox_p, sizeof (p_rx_mbox_t) * ndmas);
-	KMEM_FREE(rx_mbox_areas_p, sizeof (rx_mbox_areas_t));
-
-nxge_map_rxdma_exit:
-	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
-		"<== nxge_map_rxdma: "
-		"(status 0x%x channel %d)",
-		status, channel));
-
-	return (status);
-}
-
-static void
-nxge_unmap_rxdma(p_nxge_t nxgep)
-{
-	int			i, ndmas;
-	uint16_t		channel;
-	p_rx_rbr_rings_t 	rx_rbr_rings;
-	p_rx_rbr_ring_t		*rbr_rings;
-	p_rx_rcr_rings_t 	rx_rcr_rings;
-	p_rx_rcr_ring_t		*rcr_rings;
-	p_rx_mbox_areas_t 	rx_mbox_areas_p;
-	p_rx_mbox_t		*rx_mbox_p;
-	p_nxge_dma_pool_t	dma_buf_poolp;
-	p_nxge_dma_pool_t	dma_cntl_poolp;
-	p_nxge_dma_common_t	*dma_buf_p;
-
-	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_unmap_rxdma"));
-
-	dma_buf_poolp = nxgep->rx_buf_pool_p;
-	dma_cntl_poolp = nxgep->rx_cntl_pool_p;
-
-	if (!dma_buf_poolp->buf_allocated || !dma_cntl_poolp->buf_allocated) {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"<== nxge_unmap_rxdma: NULL buf pointers"));
-		return;
-	}
-
-	rx_rbr_rings = nxgep->rx_rbr_rings;
-	rx_rcr_rings = nxgep->rx_rcr_rings;
-	if (rx_rbr_rings == NULL || rx_rcr_rings == NULL) {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"<== nxge_unmap_rxdma: NULL ring pointers"));
-		return;
-	}
-	ndmas = rx_rbr_rings->ndmas;
-	if (!ndmas) {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"<== nxge_unmap_rxdma: no channel"));
-		return;
-	}
-
-	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
-		"==> nxge_unmap_rxdma (ndmas %d)", ndmas));
-	rbr_rings = rx_rbr_rings->rbr_rings;
-	rcr_rings = rx_rcr_rings->rcr_rings;
-	rx_mbox_areas_p = nxgep->rx_mbox_areas_p;
-	rx_mbox_p = rx_mbox_areas_p->rxmbox_areas;
-	dma_buf_p = dma_buf_poolp->dma_buf_pool_p;
-
-	for (i = 0; i < ndmas; i++) {
-		channel = ((p_nxge_dma_common_t)dma_buf_p[i])->dma_channel;
-		NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
-			"==> nxge_unmap_rxdma (ndmas %d) channel %d",
-				ndmas, channel));
-		(void) nxge_unmap_rxdma_channel(nxgep, channel,
-				(p_rx_rbr_ring_t)rbr_rings[i],
-				(p_rx_rcr_ring_t)rcr_rings[i],
-				(p_rx_mbox_t)rx_mbox_p[i]);
-	}
-
-	KMEM_FREE(rx_rbr_rings, sizeof (rx_rbr_rings_t));
-	KMEM_FREE(rbr_rings, sizeof (p_rx_rbr_ring_t) * ndmas);
-	KMEM_FREE(rx_rcr_rings, sizeof (rx_rcr_rings_t));
-	KMEM_FREE(rcr_rings, sizeof (p_rx_rcr_ring_t) * ndmas);
-	KMEM_FREE(rx_mbox_areas_p, sizeof (rx_mbox_areas_t));
-	KMEM_FREE(rx_mbox_p, sizeof (p_rx_mbox_t) * ndmas);
-
-	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
-		"<== nxge_unmap_rxdma"));
-}
-
-nxge_status_t
-nxge_map_rxdma_channel(p_nxge_t nxgep, uint16_t channel,
-    p_nxge_dma_common_t *dma_buf_p,  p_rx_rbr_ring_t *rbr_p,
-    uint32_t num_chunks,
-    p_nxge_dma_common_t *dma_cntl_p, p_rx_rcr_ring_t *rcr_p,
-    p_rx_mbox_t *rx_mbox_p)
-{
-	int	status = NXGE_OK;
-
-	/*
-	 * Set up and prepare buffer blocks, descriptors
-	 * and mailbox.
-	 */
-	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
-		"==> nxge_map_rxdma_channel (channel %d)", channel));
-	/*
-	 * Receive buffer blocks
-	 */
-	status = nxge_map_rxdma_channel_buf_ring(nxgep, channel,
-			dma_buf_p, rbr_p, num_chunks);
-	if (status != NXGE_OK) {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"==> nxge_map_rxdma_channel (channel %d): "
-			"map buffer failed 0x%x", channel, status));
-		goto nxge_map_rxdma_channel_exit;
-	}
-
-	/*
-	 * Receive block ring, completion ring and mailbox.
-	 */
-	status = nxge_map_rxdma_channel_cfg_ring(nxgep, channel,
-			dma_cntl_p, rbr_p, rcr_p, rx_mbox_p);
-	if (status != NXGE_OK) {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"==> nxge_map_rxdma_channel (channel %d): "
-			"map config failed 0x%x", channel, status));
-		goto nxge_map_rxdma_channel_fail2;
-	}
-
-	goto nxge_map_rxdma_channel_exit;
-
-nxge_map_rxdma_channel_fail3:
-	/* Free rbr, rcr */
-	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-		"==> nxge_map_rxdma_channel: free rbr/rcr "
-		"(status 0x%x channel %d)",
-		status, channel));
-	nxge_unmap_rxdma_channel_cfg_ring(nxgep,
-		*rcr_p, *rx_mbox_p);
-
-nxge_map_rxdma_channel_fail2:
-	/* Free buffer blocks */
-	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-		"==> nxge_map_rxdma_channel: free rx buffers"
-		"(nxgep 0x%x status 0x%x channel %d)",
-		nxgep, status, channel));
-	nxge_unmap_rxdma_channel_buf_ring(nxgep, *rbr_p);
-
-nxge_map_rxdma_channel_exit:
-	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
-		"<== nxge_map_rxdma_channel: "
-		"(nxgep 0x%x status 0x%x channel %d)",
-		nxgep, status, channel));
-
-	return (status);
-}
-
-/*ARGSUSED*/
-static void
-nxge_unmap_rxdma_channel(p_nxge_t nxgep, uint16_t channel,
-    p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t rx_mbox_p)
-{
-	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
-		"==> nxge_unmap_rxdma_channel (channel %d)", channel));
-
-	/*
-	 * unmap receive block ring, completion ring and mailbox.
-	 */
-	(void) nxge_unmap_rxdma_channel_cfg_ring(nxgep,
-			rcr_p, rx_mbox_p);
-
-	/* unmap buffer blocks */
-	(void) nxge_unmap_rxdma_channel_buf_ring(nxgep, rbr_p);
-
-	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_unmap_rxdma_channel"));
-}
-
-/*ARGSUSED*/
-static nxge_status_t
-nxge_map_rxdma_channel_cfg_ring(p_nxge_t nxgep, uint16_t dma_channel,
-    p_nxge_dma_common_t *dma_cntl_p, p_rx_rbr_ring_t *rbr_p,
-    p_rx_rcr_ring_t *rcr_p, p_rx_mbox_t *rx_mbox_p)
-{
-	p_rx_rbr_ring_t 	rbrp;
-	p_rx_rcr_ring_t 	rcrp;
-	p_rx_mbox_t 		mboxp;
-	p_nxge_dma_common_t 	cntl_dmap;
-	p_nxge_dma_common_t 	dmap;
-	p_rx_msg_t 		*rx_msg_ring;
-	p_rx_msg_t 		rx_msg_p;
-	p_rbr_cfig_a_t		rcfga_p;
-	p_rbr_cfig_b_t		rcfgb_p;
-	p_rcrcfig_a_t		cfga_p;
-	p_rcrcfig_b_t		cfgb_p;
-	p_rxdma_cfig1_t		cfig1_p;
-	p_rxdma_cfig2_t		cfig2_p;
-	p_rbr_kick_t		kick_p;
-	uint32_t		dmaaddrp;
-	uint32_t		*rbr_vaddrp;
-	uint32_t		bkaddr;
-	nxge_status_t		status = NXGE_OK;
-	int			i;
-	uint32_t 		nxge_port_rcr_size;
-
-	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
-		"==> nxge_map_rxdma_channel_cfg_ring"));
-
-	cntl_dmap = *dma_cntl_p;
-
-	/* Map in the receive block ring */
-	rbrp = *rbr_p;
-	dmap = (p_nxge_dma_common_t)&rbrp->rbr_desc;
-	nxge_setup_dma_common(dmap, cntl_dmap, rbrp->rbb_max, 4);
-	/*
-	 * Zero out buffer block ring descriptors.
-	 */
-	bzero((caddr_t)dmap->kaddrp, dmap->alength);
-
-	rcfga_p = &(rbrp->rbr_cfga);
-	rcfgb_p = &(rbrp->rbr_cfgb);
-	kick_p = &(rbrp->rbr_kick);
-	rcfga_p->value = 0;
-	rcfgb_p->value = 0;
-	kick_p->value = 0;
-	rbrp->rbr_addr = dmap->dma_cookie.dmac_laddress;
-	rcfga_p->value = (rbrp->rbr_addr &
-				(RBR_CFIG_A_STDADDR_MASK |
-				RBR_CFIG_A_STDADDR_BASE_MASK));
-	rcfga_p->value |= ((uint64_t)rbrp->rbb_max << RBR_CFIG_A_LEN_SHIFT);
-
-	rcfgb_p->bits.ldw.bufsz0 = rbrp->pkt_buf_size0;
-	rcfgb_p->bits.ldw.vld0 = 1;
-	rcfgb_p->bits.ldw.bufsz1 = rbrp->pkt_buf_size1;
-	rcfgb_p->bits.ldw.vld1 = 1;
-	rcfgb_p->bits.ldw.bufsz2 = rbrp->pkt_buf_size2;
-	rcfgb_p->bits.ldw.vld2 = 1;
-	rcfgb_p->bits.ldw.bksize = nxgep->rx_bksize_code;
-
-	/*
-	 * For each buffer block, enter receive block address to the ring.
-	 */
-	rbr_vaddrp = (uint32_t *)dmap->kaddrp;
-	rbrp->rbr_desc_vp = (uint32_t *)dmap->kaddrp;
-	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
-		"==> nxge_map_rxdma_channel_cfg_ring: channel %d "
-		"rbr_vaddrp $%p", dma_channel, rbr_vaddrp));
-
-	rx_msg_ring = rbrp->rx_msg_ring;
-	for (i = 0; i < rbrp->tnblocks; i++) {
-		rx_msg_p = rx_msg_ring[i];
-		rx_msg_p->nxgep = nxgep;
-		rx_msg_p->rx_rbr_p = rbrp;
-		bkaddr = (uint32_t)
-			((rx_msg_p->buf_dma.dma_cookie.dmac_laddress
-				>> RBR_BKADDR_SHIFT));
-		rx_msg_p->free = B_FALSE;
-		rx_msg_p->max_usage_cnt = 0xbaddcafe;
-
-		*rbr_vaddrp++ = bkaddr;
-	}
-
-	kick_p->bits.ldw.bkadd = rbrp->rbb_max;
-	rbrp->rbr_wr_index = (rbrp->rbb_max - 1);
-
-	rbrp->rbr_rd_index = 0;
-
-	rbrp->rbr_consumed = 0;
-	rbrp->rbr_use_bcopy = B_TRUE;
-	rbrp->rbr_bufsize_type = RCR_PKTBUFSZ_0;
-	/*
-	 * Do bcopy on packets greater than bcopy size once
-	 * the lo threshold is reached.
-	 * This lo threshold should be less than the hi threshold.
-	 *
-	 * Do bcopy on every packet once the hi threshold is reached.
-	 */
-	if (nxge_rx_threshold_lo >= nxge_rx_threshold_hi) {
-		/* default it to use hi */
-		nxge_rx_threshold_lo = nxge_rx_threshold_hi;
-	}
-
-	if (nxge_rx_buf_size_type > NXGE_RBR_TYPE2) {
-		nxge_rx_buf_size_type = NXGE_RBR_TYPE2;
-	}
-	rbrp->rbr_bufsize_type = nxge_rx_buf_size_type;
-
-	switch (nxge_rx_threshold_hi) {
-	default:
-	case	NXGE_RX_COPY_NONE:
-		/* Do not do bcopy at all */
-		rbrp->rbr_use_bcopy = B_FALSE;
-		rbrp->rbr_threshold_hi = rbrp->rbb_max;
-		break;
-
-	case NXGE_RX_COPY_1:
-	case NXGE_RX_COPY_2:
-	case NXGE_RX_COPY_3:
-	case NXGE_RX_COPY_4:
-	case NXGE_RX_COPY_5:
-	case NXGE_RX_COPY_6:
-	case NXGE_RX_COPY_7:
-		rbrp->rbr_threshold_hi =
-			rbrp->rbb_max *
-			(nxge_rx_threshold_hi)/NXGE_RX_BCOPY_SCALE;
-		break;
-
-	case NXGE_RX_COPY_ALL:
-		rbrp->rbr_threshold_hi = 0;
-		break;
-	}
-
-	switch (nxge_rx_threshold_lo) {
-	default:
-	case	NXGE_RX_COPY_NONE:
-		/* Do not do bcopy at all */
-		if (rbrp->rbr_use_bcopy) {
-			rbrp->rbr_use_bcopy = B_FALSE;
-		}
-		rbrp->rbr_threshold_lo = rbrp->rbb_max;
-		break;
-
-	case NXGE_RX_COPY_1:
-	case NXGE_RX_COPY_2:
-	case NXGE_RX_COPY_3:
-	case NXGE_RX_COPY_4:
-	case NXGE_RX_COPY_5:
-	case NXGE_RX_COPY_6:
-	case NXGE_RX_COPY_7:
-		rbrp->rbr_threshold_lo =
-			rbrp->rbb_max *
-			(nxge_rx_threshold_lo)/NXGE_RX_BCOPY_SCALE;
-		break;
-
-	case NXGE_RX_COPY_ALL:
-		rbrp->rbr_threshold_lo = 0;
-		break;
-	}
-
-	NXGE_DEBUG_MSG((nxgep, RX_CTL,
-		"nxge_map_rxdma_channel_cfg_ring: channel %d "
-		"rbb_max %d "
-		"rbrp->rbr_bufsize_type %d "
-		"rbb_threshold_hi %d "
-		"rbb_threshold_lo %d",
-		dma_channel,
-		rbrp->rbb_max,
-		rbrp->rbr_bufsize_type,
-		rbrp->rbr_threshold_hi,
-		rbrp->rbr_threshold_lo));
-
-	rbrp->page_valid.value = 0;
-	rbrp->page_mask_1.value = rbrp->page_mask_2.value = 0;
-	rbrp->page_value_1.value = rbrp->page_value_2.value = 0;
-	rbrp->page_reloc_1.value = rbrp->page_reloc_2.value = 0;
-	rbrp->page_hdl.value = 0;
-
-	rbrp->page_valid.bits.ldw.page0 = 1;
-	rbrp->page_valid.bits.ldw.page1 = 1;
-
-	/* Map in the receive completion ring */
-	rcrp = (p_rx_rcr_ring_t)
-		KMEM_ZALLOC(sizeof (rx_rcr_ring_t), KM_SLEEP);
-	rcrp->rdc = dma_channel;
-
-	nxge_port_rcr_size = nxgep->nxge_port_rcr_size;
-	rcrp->comp_size = nxge_port_rcr_size;
-	rcrp->comp_wrap_mask = nxge_port_rcr_size - 1;
-
-	rcrp->max_receive_pkts = nxge_max_rx_pkts;
-
-	dmap = (p_nxge_dma_common_t)&rcrp->rcr_desc;
-	nxge_setup_dma_common(dmap, cntl_dmap, rcrp->comp_size,
-			sizeof (rcr_entry_t));
-	rcrp->comp_rd_index = 0;
-	rcrp->comp_wt_index = 0;
-	rcrp->rcr_desc_rd_head_p = rcrp->rcr_desc_first_p =
-		(p_rcr_entry_t)DMA_COMMON_VPTR(rcrp->rcr_desc);
-	rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp =
-		(p_rcr_entry_t)DMA_COMMON_IOADDR(rcrp->rcr_desc);
-
-	rcrp->rcr_desc_last_p = rcrp->rcr_desc_rd_head_p +
-			(nxge_port_rcr_size - 1);
-	rcrp->rcr_desc_last_pp = rcrp->rcr_desc_rd_head_pp +
-			(nxge_port_rcr_size - 1);
-
-	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
-		"==> nxge_map_rxdma_channel_cfg_ring: "
-		"channel %d "
-		"rbr_vaddrp $%p "
-		"rcr_desc_rd_head_p $%p "
-		"rcr_desc_rd_head_pp $%p "
-		"rcr_desc_rd_last_p $%p "
-		"rcr_desc_rd_last_pp $%p ",
-		dma_channel,
-		rbr_vaddrp,
-		rcrp->rcr_desc_rd_head_p,
-		rcrp->rcr_desc_rd_head_pp,
-		rcrp->rcr_desc_last_p,
-		rcrp->rcr_desc_last_pp));
-
-	/*
-	 * Zero out buffer block ring descriptors.
-	 */
-	bzero((caddr_t)dmap->kaddrp, dmap->alength);
-	rcrp->intr_timeout = nxgep->intr_timeout;
-	rcrp->intr_threshold = nxgep->intr_threshold;
-	rcrp->full_hdr_flag = B_FALSE;
-	rcrp->sw_priv_hdr_len = 0;
-
-	cfga_p = &(rcrp->rcr_cfga);
-	cfgb_p = &(rcrp->rcr_cfgb);
-	cfga_p->value = 0;
-	cfgb_p->value = 0;
-	rcrp->rcr_addr = dmap->dma_cookie.dmac_laddress;
-	cfga_p->value = (rcrp->rcr_addr &
-			    (RCRCFIG_A_STADDR_MASK |
-			    RCRCFIG_A_STADDR_BASE_MASK));
-
-	rcfga_p->value |= ((uint64_t)rcrp->comp_size <<
-				RCRCFIG_A_LEN_SHIF);
-
-	/*
-	 * Timeout should be set based on the system clock divider.
-	 * The following timeout value of 1 assumes that the
-	 * granularity (1000) is 3 microseconds running at 300MHz.
-	 */
-	cfgb_p->bits.ldw.pthres = rcrp->intr_threshold;
-	cfgb_p->bits.ldw.timeout = rcrp->intr_timeout;
-	cfgb_p->bits.ldw.entout = 1;
-
-	/* Map in the mailbox */
-	mboxp = (p_rx_mbox_t)
-			KMEM_ZALLOC(sizeof (rx_mbox_t), KM_SLEEP);
-	dmap = (p_nxge_dma_common_t)&mboxp->rx_mbox;
-	nxge_setup_dma_common(dmap, cntl_dmap, 1, sizeof (rxdma_mailbox_t));
-	cfig1_p = (p_rxdma_cfig1_t)&mboxp->rx_cfg1;
-	cfig2_p = (p_rxdma_cfig2_t)&mboxp->rx_cfg2;
-	cfig1_p->value = cfig2_p->value = 0;
-
-	mboxp->mbox_addr = dmap->dma_cookie.dmac_laddress;
-	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
-		"==> nxge_map_rxdma_channel_cfg_ring: "
-		"channel %d cfg1 0x%016llx cfig2 0x%016llx cookie 0x%016llx",
-		dma_channel, cfig1_p->value, cfig2_p->value,
-		mboxp->mbox_addr));
-
-	dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress >> 32
-			& 0xfff);
-	cfig1_p->bits.ldw.mbaddr_h = dmaaddrp;
-
-
-	dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress & 0xffffffff);
-	dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress &
-				RXDMA_CFIG2_MBADDR_L_MASK);
-
-	cfig2_p->bits.ldw.mbaddr = (dmaaddrp >> RXDMA_CFIG2_MBADDR_L_SHIFT);
-
-	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
-		"==> nxge_map_rxdma_channel_cfg_ring: "
-		"channel %d damaddrp $%p "
-		"cfg1 0x%016llx cfig2 0x%016llx",
-		dma_channel, dmaaddrp,
-		cfig1_p->value, cfig2_p->value));
-
-	cfig2_p->bits.ldw.full_hdr = rcrp->full_hdr_flag;
-	cfig2_p->bits.ldw.offset = rcrp->sw_priv_hdr_len;
-
-	rbrp->rx_rcr_p = rcrp;
-	rcrp->rx_rbr_p = rbrp;
-	*rcr_p = rcrp;
-	*rx_mbox_p = mboxp;
-
-	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
-		"<== nxge_map_rxdma_channel_cfg_ring status 0x%08x", status));
-
-	return (status);
-}
-
-/*ARGSUSED*/
-static void
-nxge_unmap_rxdma_channel_cfg_ring(p_nxge_t nxgep,
-    p_rx_rcr_ring_t rcr_p, p_rx_mbox_t rx_mbox_p)
-{
-	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
-		"==> nxge_unmap_rxdma_channel_cfg_ring: channel %d",
-		rcr_p->rdc));
-
-	KMEM_FREE(rcr_p, sizeof (rx_rcr_ring_t));
-	KMEM_FREE(rx_mbox_p, sizeof (rx_mbox_t));
-
-	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
-		"<== nxge_unmap_rxdma_channel_cfg_ring"));
-}
-
-static nxge_status_t
-nxge_map_rxdma_channel_buf_ring(p_nxge_t nxgep, uint16_t channel,
-    p_nxge_dma_common_t *dma_buf_p,
-    p_rx_rbr_ring_t *rbr_p, uint32_t num_chunks)
-{
-	p_rx_rbr_ring_t 	rbrp;
-	p_nxge_dma_common_t 	dma_bufp, tmp_bufp;
-	p_rx_msg_t 		*rx_msg_ring;
-	p_rx_msg_t 		rx_msg_p;
-	p_mblk_t 		mblk_p;
-
-	rxring_info_t *ring_info;
-	nxge_status_t		status = NXGE_OK;
-	int			i, j, index;
-	uint32_t		size, bsize, nblocks, nmsgs;
-
-	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
-		"==> nxge_map_rxdma_channel_buf_ring: channel %d",
-		channel));
-
-	dma_bufp = tmp_bufp = *dma_buf_p;
-	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
-		" nxge_map_rxdma_channel_buf_ring: channel %d to map %d "
-		"chunks bufp 0x%016llx",
-		channel, num_chunks, dma_bufp));
-
-	nmsgs = 0;
-	for (i = 0; i < num_chunks; i++, tmp_bufp++) {
-		NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
-			"==> nxge_map_rxdma_channel_buf_ring: channel %d "
-			"bufp 0x%016llx nblocks %d nmsgs %d",
-			channel, tmp_bufp, tmp_bufp->nblocks, nmsgs));
-		nmsgs += tmp_bufp->nblocks;
-	}
-	if (!nmsgs) {
-		NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
-			"<== nxge_map_rxdma_channel_buf_ring: channel %d "
-			"no msg blocks",
-			channel));
-		status = NXGE_ERROR;
-		goto nxge_map_rxdma_channel_buf_ring_exit;
-	}
-
-	rbrp = (p_rx_rbr_ring_t)
-		KMEM_ZALLOC(sizeof (rx_rbr_ring_t), KM_SLEEP);
-
-	size = nmsgs * sizeof (p_rx_msg_t);
-	rx_msg_ring = KMEM_ZALLOC(size, KM_SLEEP);
-	ring_info = (rxring_info_t *)KMEM_ZALLOC(sizeof (rxring_info_t),
-		KM_SLEEP);
-
-	MUTEX_INIT(&rbrp->lock, NULL, MUTEX_DRIVER,
-				(void *)nxgep->interrupt_cookie);
-	MUTEX_INIT(&rbrp->post_lock, NULL, MUTEX_DRIVER,
-				(void *)nxgep->interrupt_cookie);
-	rbrp->rdc = channel;
-	rbrp->num_blocks = num_chunks;
-	rbrp->tnblocks = nmsgs;
-	rbrp->rbb_max = nmsgs;
-	rbrp->rbr_max_size = nmsgs;
-	rbrp->rbr_wrap_mask = (rbrp->rbb_max - 1);
-
-	/*
-	 * Buffer sizes suggested by NIU architect.
-	 * 256, 512 and 2K.
-	 */
-
-	rbrp->pkt_buf_size0 = RBR_BUFSZ0_256B;
-	rbrp->pkt_buf_size0_bytes = RBR_BUFSZ0_256_BYTES;
-	rbrp->npi_pkt_buf_size0 = SIZE_256B;
-
-	rbrp->pkt_buf_size1 = RBR_BUFSZ1_1K;
-	rbrp->pkt_buf_size1_bytes = RBR_BUFSZ1_1K_BYTES;
-	rbrp->npi_pkt_buf_size1 = SIZE_1KB;
-
-	rbrp->block_size = nxgep->rx_default_block_size;
-
-	if (!nxge_jumbo_enable && !nxgep->param_arr[param_accept_jumbo].value) {
-		rbrp->pkt_buf_size2 = RBR_BUFSZ2_2K;
-		rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_2K_BYTES;
-		rbrp->npi_pkt_buf_size2 = SIZE_2KB;
-	} else {
-		if (rbrp->block_size >= 0x2000) {
-			rbrp->pkt_buf_size2 = RBR_BUFSZ2_8K;
-			rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_8K_BYTES;
-			rbrp->npi_pkt_buf_size2 = SIZE_8KB;
-		} else {
-			rbrp->pkt_buf_size2 = RBR_BUFSZ2_4K;
-			rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_4K_BYTES;
-			rbrp->npi_pkt_buf_size2 = SIZE_4KB;
-		}
-	}
-
-	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
-		"==> nxge_map_rxdma_channel_buf_ring: channel %d "
-		"actual rbr max %d rbb_max %d nmsgs %d "
-		"rbrp->block_size %d default_block_size %d "
-		"(config nxge_rbr_size %d nxge_rbr_spare_size %d)",
-		channel, rbrp->rbr_max_size, rbrp->rbb_max, nmsgs,
-		rbrp->block_size, nxgep->rx_default_block_size,
-		nxge_rbr_size, nxge_rbr_spare_size));
-
-	/* Map in buffers from the buffer pool.  */
-	index = 0;
-	for (i = 0; i < rbrp->num_blocks; i++, dma_bufp++) {
-		bsize = dma_bufp->block_size;
-		nblocks = dma_bufp->nblocks;
-		ring_info->buffer[i].dvma_addr = (uint64_t)dma_bufp->ioaddr_pp;
-		ring_info->buffer[i].buf_index = i;
-		ring_info->buffer[i].buf_size = dma_bufp->alength;
-		ring_info->buffer[i].start_index = index;
-		ring_info->buffer[i].kaddr = (uint64_t)dma_bufp->kaddrp;
-
-		NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
-			" nxge_map_rxdma_channel_buf_ring: map channel %d "
-			"chunk %d"
-			" nblocks %d chunk_size %x block_size 0x%x "
-			"dma_bufp $%p", channel, i,
-			dma_bufp->nblocks, ring_info->buffer[i].buf_size, bsize,
-			dma_bufp));
-
-		for (j = 0; j < nblocks; j++) {
-			if ((rx_msg_p = nxge_allocb(bsize, BPRI_LO,
-					dma_bufp)) == NULL) {
-				NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
-					"allocb failed"));
-				break;
-			}
-			rx_msg_ring[index] = rx_msg_p;
-			rx_msg_p->block_index = index;
-			rx_msg_p->shifted_addr = (uint32_t)
-				((rx_msg_p->buf_dma.dma_cookie.dmac_laddress >>
-					    RBR_BKADDR_SHIFT));
-
-			NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
-				"index %d j %d rx_msg_p $%p",
-				index, j, rx_msg_p));
-
-			mblk_p = rx_msg_p->rx_mblk_p;
-			mblk_p->b_wptr = mblk_p->b_rptr + bsize;
-			index++;
-			rx_msg_p->buf_dma.dma_channel = channel;
-		}
-	}
-	if (i < rbrp->num_blocks) {
-		goto nxge_map_rxdma_channel_buf_ring_fail1;
-	}
-
-	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
-		"nxge_map_rxdma_channel_buf_ring: done buf init "
-			"channel %d msg block entries %d",
-			channel, index));
-	ring_info->block_size_mask = bsize - 1;
-	rbrp->rx_msg_ring = rx_msg_ring;
-	rbrp->dma_bufp = dma_buf_p;
-	rbrp->ring_info = ring_info;
-
-	status = nxge_rxbuf_index_info_init(nxgep, rbrp);
-	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
-		" nxge_map_rxdma_channel_buf_ring: "
-		"channel %d done buf info init", channel));
-
-	*rbr_p = rbrp;
-	goto nxge_map_rxdma_channel_buf_ring_exit;
-
-nxge_map_rxdma_channel_buf_ring_fail1:
-	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
-		" nxge_map_rxdma_channel_buf_ring: failed channel (0x%x)",
-		channel, status));
-
-	index--;
-	for (; index >= 0; index--) {
-		rx_msg_p = rx_msg_ring[index];
-		if (rx_msg_p != NULL) {
-			freeb(rx_msg_p->rx_mblk_p);
-			rx_msg_ring[index] = NULL;
-		}
-	}
-nxge_map_rxdma_channel_buf_ring_fail:
-	MUTEX_DESTROY(&rbrp->post_lock);
-	MUTEX_DESTROY(&rbrp->lock);
-	KMEM_FREE(ring_info, sizeof (rxring_info_t));
-	KMEM_FREE(rx_msg_ring, size);
-	KMEM_FREE(rbrp, sizeof (rx_rbr_ring_t));
-
-nxge_map_rxdma_channel_buf_ring_exit:
-	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
-		"<== nxge_map_rxdma_channel_buf_ring status 0x%08x", status));
-
-	return (status);
-}
-
-/*ARGSUSED*/
-static void
-nxge_unmap_rxdma_channel_buf_ring(p_nxge_t nxgep,
-    p_rx_rbr_ring_t rbr_p)
-{
-	p_rx_msg_t 		*rx_msg_ring;
-	p_rx_msg_t 		rx_msg_p;
-	rxring_info_t 		*ring_info;
-	int			i;
-	uint32_t		size;
-#ifdef	NXGE_DEBUG
-	int			num_chunks;
-#endif
-
-	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
-		"==> nxge_unmap_rxdma_channel_buf_ring"));
-	if (rbr_p == NULL) {
-		NXGE_DEBUG_MSG((nxgep, RX_CTL,
-			"<== nxge_unmap_rxdma_channel_buf_ring: NULL rbrp"));
-		return;
-	}
-	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
-		"==> nxge_unmap_rxdma_channel_buf_ring: channel %d",
-		rbr_p->rdc));
-
-	rx_msg_ring = rbr_p->rx_msg_ring;
-	ring_info = rbr_p->ring_info;
-
-	if (rx_msg_ring == NULL || ring_info == NULL) {
-			NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
-		"<== nxge_unmap_rxdma_channel_buf_ring: "
-		"rx_msg_ring $%p ring_info $%p",
-		rx_msg_p, ring_info));
-		return;
-	}
-
-#ifdef	NXGE_DEBUG
-	num_chunks = rbr_p->num_blocks;
-#endif
-	size = rbr_p->tnblocks * sizeof (p_rx_msg_t);
-	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
-		" nxge_unmap_rxdma_channel_buf_ring: channel %d chunks %d "
-		"tnblocks %d (max %d) size ptrs %d ",
-		rbr_p->rdc, num_chunks,
-		rbr_p->tnblocks, rbr_p->rbr_max_size, size));
-
-	for (i = 0; i < rbr_p->tnblocks; i++) {
-		rx_msg_p = rx_msg_ring[i];
-		NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
-			" nxge_unmap_rxdma_channel_buf_ring: "
-			"rx_msg_p $%p",
-			rx_msg_p));
-		if (rx_msg_p != NULL) {
-			freeb(rx_msg_p->rx_mblk_p);
-			rx_msg_ring[i] = NULL;
-		}
-	}
-
-	MUTEX_DESTROY(&rbr_p->post_lock);
-	MUTEX_DESTROY(&rbr_p->lock);
-	KMEM_FREE(ring_info, sizeof (rxring_info_t));
-	KMEM_FREE(rx_msg_ring, size);
-	KMEM_FREE(rbr_p, sizeof (rx_rbr_ring_t));
-
-	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
-		"<== nxge_unmap_rxdma_channel_buf_ring"));
-}
-
-static nxge_status_t
-nxge_rxdma_hw_start_common(p_nxge_t nxgep)
-{
-	nxge_status_t		status = NXGE_OK;
-
-	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start_common"));
-
-	/*
-	 * Load the sharable parameters by writing to the
-	 * function zero control registers. These FZC registers
-	 * should be initialized only once for the entire chip.
-	 */
-	(void) nxge_init_fzc_rx_common(nxgep);
-
-	/*
-	 * Initialize the RXDMA port specific FZC control configurations.
-	 * These FZC registers are pertaining to each port.
-	 */
-	(void) nxge_init_fzc_rxdma_port(nxgep);
-
-	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start_common"));
-
-	return (status);
-}
-
-/*ARGSUSED*/
-static void
-nxge_rxdma_hw_stop_common(p_nxge_t nxgep)
-{
-	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_stop_common"));
-
-	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_stop_common"));
-}
-
-static nxge_status_t
-nxge_rxdma_hw_start(p_nxge_t nxgep)
-{
-	int			i, ndmas;
-	uint16_t		channel;
-	p_rx_rbr_rings_t 	rx_rbr_rings;
-	p_rx_rbr_ring_t		*rbr_rings;
-	p_rx_rcr_rings_t 	rx_rcr_rings;
-	p_rx_rcr_ring_t		*rcr_rings;
-	p_rx_mbox_areas_t 	rx_mbox_areas_p;
-	p_rx_mbox_t		*rx_mbox_p;
-	nxge_status_t		status = NXGE_OK;
-
-	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start"));
-
-	rx_rbr_rings = nxgep->rx_rbr_rings;
-	rx_rcr_rings = nxgep->rx_rcr_rings;
-	if (rx_rbr_rings == NULL || rx_rcr_rings == NULL) {
-		NXGE_DEBUG_MSG((nxgep, RX_CTL,
-			"<== nxge_rxdma_hw_start: NULL ring pointers"));
-		return (NXGE_ERROR);
-	}
-	ndmas = rx_rbr_rings->ndmas;
-	if (ndmas == 0) {
-		NXGE_DEBUG_MSG((nxgep, RX_CTL,
-			"<== nxge_rxdma_hw_start: no dma channel allocated"));
-		return (NXGE_ERROR);
-	}
-
-	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
-		"==> nxge_rxdma_hw_start (ndmas %d)", ndmas));
-
-	rbr_rings = rx_rbr_rings->rbr_rings;
-	rcr_rings = rx_rcr_rings->rcr_rings;
-	rx_mbox_areas_p = nxgep->rx_mbox_areas_p;
-	if (rx_mbox_areas_p) {
-		rx_mbox_p = rx_mbox_areas_p->rxmbox_areas;
-	}
-
-	for (i = 0; i < ndmas; i++) {
-		channel = rbr_rings[i]->rdc;
-		NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
-			"==> nxge_rxdma_hw_start (ndmas %d) channel %d",
-				ndmas, channel));
-		status = nxge_rxdma_start_channel(nxgep, channel,
-				(p_rx_rbr_ring_t)rbr_rings[i],
-				(p_rx_rcr_ring_t)rcr_rings[i],
-				(p_rx_mbox_t)rx_mbox_p[i]);
-		if (status != NXGE_OK) {
-			goto nxge_rxdma_hw_start_fail1;
-		}
-	}
-
-	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start: "
-		"rx_rbr_rings 0x%016llx rings 0x%016llx",
-		rx_rbr_rings, rx_rcr_rings));
-
-	goto nxge_rxdma_hw_start_exit;
-
-nxge_rxdma_hw_start_fail1:
-	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-		"==> nxge_rxdma_hw_start: disable "
-		"(status 0x%x channel %d i %d)", status, channel, i));
-	for (; i >= 0; i--) {
-		channel = rbr_rings[i]->rdc;
-		(void) nxge_rxdma_stop_channel(nxgep, channel);
-	}
-
-nxge_rxdma_hw_start_exit:
-	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
-		"==> nxge_rxdma_hw_start: (status 0x%x)", status));
-
-	return (status);
-}
-
-static void
-nxge_rxdma_hw_stop(p_nxge_t nxgep)
-{
-	int			i, ndmas;
-	uint16_t		channel;
-	p_rx_rbr_rings_t 	rx_rbr_rings;
-	p_rx_rbr_ring_t		*rbr_rings;
-	p_rx_rcr_rings_t 	rx_rcr_rings;
-
-	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_stop"));
-
-	rx_rbr_rings = nxgep->rx_rbr_rings;
-	rx_rcr_rings = nxgep->rx_rcr_rings;
-	if (rx_rbr_rings == NULL || rx_rcr_rings == NULL) {
-		NXGE_DEBUG_MSG((nxgep, RX_CTL,
-			"<== nxge_rxdma_hw_stop: NULL ring pointers"));
-		return;
-	}
-	ndmas = rx_rbr_rings->ndmas;
-	if (!ndmas) {
-		NXGE_DEBUG_MSG((nxgep, RX_CTL,
-			"<== nxge_rxdma_hw_stop: no dma channel allocated"));
-		return;
-	}
-
-	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
-		"==> nxge_rxdma_hw_stop (ndmas %d)", ndmas));
-
-	rbr_rings = rx_rbr_rings->rbr_rings;
-
-	for (i = 0; i < ndmas; i++) {
-		channel = rbr_rings[i]->rdc;
-		NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
-			"==> nxge_rxdma_hw_stop (ndmas %d) channel %d",
-				ndmas, channel));
-		(void) nxge_rxdma_stop_channel(nxgep, channel);
-	}
-
-	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_stop: "
-		"rx_rbr_rings 0x%016llx rings 0x%016llx",
-		rx_rbr_rings, rx_rcr_rings));
-
-	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_rxdma_hw_stop"));
-}
-
-
-static nxge_status_t
-nxge_rxdma_start_channel(p_nxge_t nxgep, uint16_t channel,
-    p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t mbox_p)
-
-{
-	npi_handle_t		handle;
-	npi_status_t		rs = NPI_SUCCESS;
-	rx_dma_ctl_stat_t	cs;
-	rx_dma_ent_msk_t	ent_mask;
-	nxge_status_t		status = NXGE_OK;
-
-	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel"));
-
-	handle = NXGE_DEV_NPI_HANDLE(nxgep);
-
-	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "nxge_rxdma_start_channel: "
-		"npi handle addr $%p acc $%p",
-		nxgep->npi_handle.regp, nxgep->npi_handle.regh));
-
-	/* Reset RXDMA channel */
-	rs = npi_rxdma_cfg_rdc_reset(handle, channel);
-	if (rs != NPI_SUCCESS) {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"==> nxge_rxdma_start_channel: "
-			"reset rxdma failed (0x%08x channel %d)",
-			status, channel));
-		return (NXGE_ERROR | rs);
-	}
-
-	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
-		"==> nxge_rxdma_start_channel: reset done: channel %d",
-		channel));
-
-	/*
-	 * Initialize the RXDMA channel specific FZC control
-	 * configurations. These FZC registers are pertaining
-	 * to each RX channel (logical pages).
-	 */
-	status = nxge_init_fzc_rxdma_channel(nxgep,
-			channel, rbr_p, rcr_p, mbox_p);
-	if (status != NXGE_OK) {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"==> nxge_rxdma_start_channel: "
-			"init fzc rxdma failed (0x%08x channel %d)",
-			status, channel));
-		return (status);
-	}
-
-	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
-		"==> nxge_rxdma_start_channel: fzc done"));
-
-	/*
-	 * Zero out the shadow  and prefetch ram.
-	 */
-
-	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: "
-		"ram done"));
-
-	/* Set up the interrupt event masks. */
-	ent_mask.value = 0;
-	ent_mask.value |= RX_DMA_ENT_MSK_RBREMPTY_MASK;
-	rs = npi_rxdma_event_mask(handle, OP_SET, channel,
-			&ent_mask);
-	if (rs != NPI_SUCCESS) {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"==> nxge_rxdma_start_channel: "
-			"init rxdma event masks failed (0x%08x channel %d)",
-			status, channel));
-		return (NXGE_ERROR | rs);
-	}
-
-	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: "
-		"event done: channel %d (mask 0x%016llx)",
-		channel, ent_mask.value));
-
-	/* Initialize the receive DMA control and status register */
-	cs.value = 0;
-	cs.bits.hdw.mex = 1;
-	cs.bits.hdw.rcrthres = 1;
-	cs.bits.hdw.rcrto = 1;
-	cs.bits.hdw.rbr_empty = 1;
-	status = nxge_init_rxdma_channel_cntl_stat(nxgep, channel, &cs);
-	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: "
-		"channel %d rx_dma_cntl_stat 0x%0016llx", channel, cs.value));
-	if (status != NXGE_OK) {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"==> nxge_rxdma_start_channel: "
-			"init rxdma control register failed (0x%08x channel %d",
-			status, channel));
-		return (status);
-	}
-
-	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: "
-		"control done - channel %d cs 0x%016llx", channel, cs.value));
-
-	/*
-	 * Load RXDMA descriptors, buffers, mailbox,
-	 * initialise the receive DMA channels and
-	 * enable each DMA channel.
-	 */
-	status = nxge_enable_rxdma_channel(nxgep,
-			channel, rbr_p, rcr_p, mbox_p);
-
-	if (status != NXGE_OK) {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			    " nxge_rxdma_start_channel: "
-			    " init enable rxdma failed (0x%08x channel %d)",
-			    status, channel));
-		return (status);
-	}
-
-	ent_mask.value = 0;
-	ent_mask.value |= (RX_DMA_ENT_MSK_WRED_DROP_MASK |
-				RX_DMA_ENT_MSK_PTDROP_PKT_MASK);
-	rs = npi_rxdma_event_mask(handle, OP_SET, channel,
-			&ent_mask);
-	if (rs != NPI_SUCCESS) {
-		NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
-			"==> nxge_rxdma_start_channel: "
-			"init rxdma event masks failed (0x%08x channel %d)",
-			status, channel));
-		return (NXGE_ERROR | rs);
-	}
-
-	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: "
-		"control done - channel %d cs 0x%016llx", channel, cs.value));
-
-	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
-		"==> nxge_rxdma_start_channel: enable done"));
-
-	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_rxdma_start_channel"));
-
-	return (NXGE_OK);
-}
-
-static nxge_status_t
-nxge_rxdma_stop_channel(p_nxge_t nxgep, uint16_t channel)
-{
-	npi_handle_t		handle;
-	npi_status_t		rs = NPI_SUCCESS;
-	rx_dma_ctl_stat_t	cs;
-	rx_dma_ent_msk_t	ent_mask;
-	nxge_status_t		status = NXGE_OK;
-
-	NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_stop_channel"));
-
-	handle = NXGE_DEV_NPI_HANDLE(nxgep);
-
-	NXGE_DEBUG_MSG((nxgep, RX_CTL, "nxge_rxdma_stop_channel: "
-		"npi handle addr $%p acc $%p",
-		nxgep->npi_handle.regp, nxgep->npi_handle.regh));
-
-	/* Reset RXDMA channel */
-	rs = npi_rxdma_cfg_rdc_reset(handle, channel);
-	if (rs != NPI_SUCCESS) {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			    " nxge_rxdma_stop_channel: "
-			    " reset rxdma failed (0x%08x channel %d)",
-			    rs, channel));
-		return (NXGE_ERROR | rs);
-	}
-
-	NXGE_DEBUG_MSG((nxgep, RX_CTL,
-		"==> nxge_rxdma_stop_channel: reset done"));
-
-	/* Set up the interrupt event masks. */
-	ent_mask.value = RX_DMA_ENT_MSK_ALL;
-	rs = npi_rxdma_event_mask(handle, OP_SET, channel,
-			&ent_mask);
-	if (rs != NPI_SUCCESS) {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			    "==> nxge_rxdma_stop_channel: "
-			    "set rxdma event masks failed (0x%08x channel %d)",
-			    rs, channel));
-		return (NXGE_ERROR | rs);
-	}
-
-	NXGE_DEBUG_MSG((nxgep, RX_CTL,
-		"==> nxge_rxdma_stop_channel: event done"));
-
-	/* Initialize the receive DMA control and status register */
-	cs.value = 0;
-	status = nxge_init_rxdma_channel_cntl_stat(nxgep, channel,
-			&cs);
-	NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_stop_channel: control "
-		" to default (all 0s) 0x%08x", cs.value));
-	if (status != NXGE_OK) {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			    " nxge_rxdma_stop_channel: init rxdma"
-			    " control register failed (0x%08x channel %d",
-			status, channel));
-		return (status);
-	}
-
-	NXGE_DEBUG_MSG((nxgep, RX_CTL,
-		"==> nxge_rxdma_stop_channel: control done"));
-
-	/* disable dma channel */
-	status = nxge_disable_rxdma_channel(nxgep, channel);
-
-	if (status != NXGE_OK) {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			    " nxge_rxdma_stop_channel: "
-			    " init enable rxdma failed (0x%08x channel %d)",
-			    status, channel));
-		return (status);
-	}
-
-	NXGE_DEBUG_MSG((nxgep,
-		RX_CTL, "==> nxge_rxdma_stop_channel: disable done"));
-
-	NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_stop_channel"));
-
-	return (NXGE_OK);
-}
-
-nxge_status_t
-nxge_rxdma_handle_sys_errors(p_nxge_t nxgep)
-{
-	npi_handle_t		handle;
-	p_nxge_rdc_sys_stats_t	statsp;
-	rx_ctl_dat_fifo_stat_t	stat;
-	uint32_t		zcp_err_status;
-	uint32_t		ipp_err_status;
-	nxge_status_t		status = NXGE_OK;
-	npi_status_t		rs = NPI_SUCCESS;
-	boolean_t		my_err = B_FALSE;
-
-	handle = nxgep->npi_handle;
-	statsp = (p_nxge_rdc_sys_stats_t)&nxgep->statsp->rdc_sys_stats;
-
-	rs = npi_rxdma_rxctl_fifo_error_intr_get(handle, &stat);
-
-	if (rs != NPI_SUCCESS)
-		return (NXGE_ERROR | rs);
-
-	if (stat.bits.ldw.id_mismatch) {
-		statsp->id_mismatch++;
-		NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, NULL,
-					NXGE_FM_EREPORT_RDMC_ID_MISMATCH);
-		/* Global fatal error encountered */
-	}
-
-	if ((stat.bits.ldw.zcp_eop_err) || (stat.bits.ldw.ipp_eop_err)) {
-		switch (nxgep->mac.portnum) {
-		case 0:
-			if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT0) ||
-				(stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT0)) {
-				my_err = B_TRUE;
-				zcp_err_status = stat.bits.ldw.zcp_eop_err;
-				ipp_err_status = stat.bits.ldw.ipp_eop_err;
-			}
-			break;
-		case 1:
-			if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT1) ||
-				(stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT1)) {
-				my_err = B_TRUE;
-				zcp_err_status = stat.bits.ldw.zcp_eop_err;
-				ipp_err_status = stat.bits.ldw.ipp_eop_err;
-			}
-			break;
-		case 2:
-			if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT2) ||
-				(stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT2)) {
-				my_err = B_TRUE;
-				zcp_err_status = stat.bits.ldw.zcp_eop_err;
-				ipp_err_status = stat.bits.ldw.ipp_eop_err;
-			}
-			break;
-		case 3:
-			if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT3) ||
-				(stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT3)) {
-				my_err = B_TRUE;
-				zcp_err_status = stat.bits.ldw.zcp_eop_err;
-				ipp_err_status = stat.bits.ldw.ipp_eop_err;
-			}
-			break;
-		default:
-			return (NXGE_ERROR);
-		}
-	}
-
-	if (my_err) {
-		status = nxge_rxdma_handle_port_errors(nxgep, ipp_err_status,
-							zcp_err_status);
-		if (status != NXGE_OK)
-			return (status);
-	}
-
-	return (NXGE_OK);
-}
-
-static nxge_status_t
-nxge_rxdma_handle_port_errors(p_nxge_t nxgep, uint32_t ipp_status,
-							uint32_t zcp_status)
-{
-	boolean_t		rxport_fatal = B_FALSE;
-	p_nxge_rdc_sys_stats_t	statsp;
-	nxge_status_t		status = NXGE_OK;
-	uint8_t			portn;
-
-	portn = nxgep->mac.portnum;
-	statsp = (p_nxge_rdc_sys_stats_t)&nxgep->statsp->rdc_sys_stats;
-
-	if (ipp_status & (0x1 << portn)) {
-		statsp->ipp_eop_err++;
-		NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
-					NXGE_FM_EREPORT_RDMC_IPP_EOP_ERR);
-		rxport_fatal = B_TRUE;
-	}
-
-	if (zcp_status & (0x1 << portn)) {
-		statsp->zcp_eop_err++;
-		NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
-					NXGE_FM_EREPORT_RDMC_ZCP_EOP_ERR);
-		rxport_fatal = B_TRUE;
-	}
-
-	if (rxport_fatal) {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			    " nxge_rxdma_handle_port_error: "
-			    " fatal error on Port #%d\n",
-				portn));
-		status = nxge_rx_port_fatal_err_recover(nxgep);
-		if (status == NXGE_OK) {
-			FM_SERVICE_RESTORED(nxgep);
-		}
-	}
-
-	return (status);
-}
-
-static nxge_status_t
-nxge_rxdma_fatal_err_recover(p_nxge_t nxgep, uint16_t channel)
-{
-	npi_handle_t		handle;
-	npi_status_t		rs = NPI_SUCCESS;
-	nxge_status_t		status = NXGE_OK;
-	p_rx_rbr_ring_t		rbrp;
-	p_rx_rcr_ring_t		rcrp;
-	p_rx_mbox_t		mboxp;
-	rx_dma_ent_msk_t	ent_mask;
-	p_nxge_dma_common_t	dmap;
-	int			ring_idx;
-	uint32_t		ref_cnt;
-	p_rx_msg_t		rx_msg_p;
-	int			i;
-	uint32_t		nxge_port_rcr_size;
-
-	NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_fatal_err_recover"));
-	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"Recovering from RxDMAChannel#%d error...", channel));
-
-	/*
-	 * Stop the dma channel waits for the stop done.
-	 * If the stop done bit is not set, then create
-	 * an error.
-	 */
-
-	handle = NXGE_DEV_NPI_HANDLE(nxgep);
-	NXGE_DEBUG_MSG((nxgep, RX_CTL, "Rx DMA stop..."));
-
-	ring_idx = nxge_rxdma_get_ring_index(nxgep, channel);
-	rbrp = (p_rx_rbr_ring_t)nxgep->rx_rbr_rings->rbr_rings[ring_idx];
-	rcrp = (p_rx_rcr_ring_t)nxgep->rx_rcr_rings->rcr_rings[ring_idx];
-
-	MUTEX_ENTER(&rcrp->lock);
-	MUTEX_ENTER(&rbrp->lock);
-	MUTEX_ENTER(&rbrp->post_lock);
-
-	NXGE_DEBUG_MSG((nxgep, RX_CTL, "Disable RxDMA channel..."));
-
-	rs = npi_rxdma_cfg_rdc_disable(handle, channel);
-	if (rs != NPI_SUCCESS) {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"nxge_disable_rxdma_channel:failed"));
-		goto fail;
-	}
-
-	NXGE_DEBUG_MSG((nxgep, RX_CTL, "Disable RxDMA interrupt..."));
-
-	/* Disable interrupt */
-	ent_mask.value = RX_DMA_ENT_MSK_ALL;
-	rs = npi_rxdma_event_mask(handle, OP_SET, channel, &ent_mask);
-	if (rs != NPI_SUCCESS) {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-				"nxge_rxdma_stop_channel: "
-				"set rxdma event masks failed (channel %d)",
-				channel));
-	}
-
-	NXGE_DEBUG_MSG((nxgep, RX_CTL, "RxDMA channel reset..."));
-
-	/* Reset RXDMA channel */
-	rs = npi_rxdma_cfg_rdc_reset(handle, channel);
-	if (rs != NPI_SUCCESS) {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"nxge_rxdma_fatal_err_recover: "
-				" reset rxdma failed (channel %d)", channel));
-		goto fail;
-	}
-
-	nxge_port_rcr_size = nxgep->nxge_port_rcr_size;
-
-	mboxp =
-	(p_rx_mbox_t)nxgep->rx_mbox_areas_p->rxmbox_areas[ring_idx];
-
-	rbrp->rbr_wr_index = (rbrp->rbb_max - 1);
-	rbrp->rbr_rd_index = 0;
-
-	rcrp->comp_rd_index = 0;
-	rcrp->comp_wt_index = 0;
-	rcrp->rcr_desc_rd_head_p = rcrp->rcr_desc_first_p =
-		(p_rcr_entry_t)DMA_COMMON_VPTR(rcrp->rcr_desc);
-	rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp =
-		(p_rcr_entry_t)DMA_COMMON_IOADDR(rcrp->rcr_desc);
-
-	rcrp->rcr_desc_last_p = rcrp->rcr_desc_rd_head_p +
-		(nxge_port_rcr_size - 1);
-	rcrp->rcr_desc_last_pp = rcrp->rcr_desc_rd_head_pp +
-		(nxge_port_rcr_size - 1);
-
-	dmap = (p_nxge_dma_common_t)&rcrp->rcr_desc;
-	bzero((caddr_t)dmap->kaddrp, dmap->alength);
-
-	cmn_err(CE_NOTE, "!rbr entries = %d\n", rbrp->rbr_max_size);
-
-	for (i = 0; i < rbrp->rbr_max_size; i++) {
-		rx_msg_p = rbrp->rx_msg_ring[i];
-		ref_cnt = rx_msg_p->ref_cnt;
-		if (ref_cnt != 1) {
-			if (rx_msg_p->cur_usage_cnt !=
-					rx_msg_p->max_usage_cnt) {
-				NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-						"buf[%d]: cur_usage_cnt = %d "
-						"max_usage_cnt = %d\n", i,
-						rx_msg_p->cur_usage_cnt,
-						rx_msg_p->max_usage_cnt));
-			} else {
-				/* Buffer can be re-posted */
-				rx_msg_p->free = B_TRUE;
-				rx_msg_p->cur_usage_cnt = 0;
-				rx_msg_p->max_usage_cnt = 0xbaddcafe;
-				rx_msg_p->pkt_buf_size = 0;
-			}
-		}
-	}
-
-	NXGE_DEBUG_MSG((nxgep, RX_CTL, "RxDMA channel re-start..."));
-
-	status = nxge_rxdma_start_channel(nxgep, channel, rbrp, rcrp, mboxp);
-	if (status != NXGE_OK) {
-		goto fail;
-	}
-
-	MUTEX_EXIT(&rbrp->post_lock);
-	MUTEX_EXIT(&rbrp->lock);
-	MUTEX_EXIT(&rcrp->lock);
-
-	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"Recovery Successful, RxDMAChannel#%d Restored",
-			channel));
-	NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_fatal_err_recover"));
-
-	return (NXGE_OK);
-fail:
-	MUTEX_EXIT(&rbrp->post_lock);
-	MUTEX_EXIT(&rbrp->lock);
-	MUTEX_EXIT(&rcrp->lock);
-	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed"));
-
-	return (NXGE_ERROR | rs);
-}
-
-nxge_status_t
-nxge_rx_port_fatal_err_recover(p_nxge_t nxgep)
-{
-	nxge_status_t		status = NXGE_OK;
-	p_nxge_dma_common_t	*dma_buf_p;
-	uint16_t		channel;
-	int			ndmas;
-	int			i;
-
-	NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rx_port_fatal_err_recover"));
-	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-				"Recovering from RxPort error..."));
-	/* Disable RxMAC */
-
-	NXGE_DEBUG_MSG((nxgep, RX_CTL, "Disable RxMAC...\n"));
-	if (nxge_rx_mac_disable(nxgep) != NXGE_OK)
-		goto fail;
-
-	NXGE_DELAY(1000);
-
-	NXGE_DEBUG_MSG((nxgep, RX_CTL, "Stop all RxDMA channels..."));
-
-	ndmas = nxgep->rx_buf_pool_p->ndmas;
-	dma_buf_p = nxgep->rx_buf_pool_p->dma_buf_pool_p;
-
-	for (i = 0; i < ndmas; i++) {
-		channel = ((p_nxge_dma_common_t)dma_buf_p[i])->dma_channel;
-		if (nxge_rxdma_fatal_err_recover(nxgep, channel) != NXGE_OK) {
-			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-					"Could not recover channel %d",
-					channel));
-		}
-	}
-
-	NXGE_DEBUG_MSG((nxgep, RX_CTL, "Reset IPP..."));
-
-	/* Reset IPP */
-	if (nxge_ipp_reset(nxgep) != NXGE_OK) {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"nxge_rx_port_fatal_err_recover: "
-			"Failed to reset IPP"));
-		goto fail;
-	}
-
-	NXGE_DEBUG_MSG((nxgep, RX_CTL, "Reset RxMAC..."));
-
-	/* Reset RxMAC */
-	if (nxge_rx_mac_reset(nxgep) != NXGE_OK) {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"nxge_rx_port_fatal_err_recover: "
-			"Failed to reset RxMAC"));
-		goto fail;
-	}
-
-	NXGE_DEBUG_MSG((nxgep, RX_CTL, "Re-initialize IPP..."));
-
-	/* Re-Initialize IPP */
-	if (nxge_ipp_init(nxgep) != NXGE_OK) {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"nxge_rx_port_fatal_err_recover: "
-			"Failed to init IPP"));
-		goto fail;
-	}
-
-	NXGE_DEBUG_MSG((nxgep, RX_CTL, "Re-initialize RxMAC..."));
-
-	/* Re-Initialize RxMAC */
-	if ((status = nxge_rx_mac_init(nxgep)) != NXGE_OK) {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"nxge_rx_port_fatal_err_recover: "
-			"Failed to reset RxMAC"));
-		goto fail;
-	}
-
-	NXGE_DEBUG_MSG((nxgep, RX_CTL, "Re-enable RxMAC..."));
-
-	/* Re-enable RxMAC */
-	if ((status = nxge_rx_mac_enable(nxgep)) != NXGE_OK) {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"nxge_rx_port_fatal_err_recover: "
-			"Failed to enable RxMAC"));
-		goto fail;
-	}
-
-	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"Recovery Successful, RxPort Restored"));
-
-	return (NXGE_OK);
-fail:
-	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed"));
-	return (status);
-}
-
-void
-nxge_rxdma_inject_err(p_nxge_t nxgep, uint32_t err_id, uint8_t chan)
-{
-	rx_dma_ctl_stat_t	cs;
-	rx_ctl_dat_fifo_stat_t	cdfs;
-
-	switch (err_id) {
-	case NXGE_FM_EREPORT_RDMC_RCR_ACK_ERR:
-	case NXGE_FM_EREPORT_RDMC_DC_FIFO_ERR:
-	case NXGE_FM_EREPORT_RDMC_RCR_SHA_PAR:
-	case NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR:
-	case NXGE_FM_EREPORT_RDMC_RBR_TMOUT:
-	case NXGE_FM_EREPORT_RDMC_RSP_CNT_ERR:
-	case NXGE_FM_EREPORT_RDMC_BYTE_EN_BUS:
-	case NXGE_FM_EREPORT_RDMC_RSP_DAT_ERR:
-	case NXGE_FM_EREPORT_RDMC_RCRINCON:
-	case NXGE_FM_EREPORT_RDMC_RCRFULL:
-	case NXGE_FM_EREPORT_RDMC_RBRFULL:
-	case NXGE_FM_EREPORT_RDMC_RBRLOGPAGE:
-	case NXGE_FM_EREPORT_RDMC_CFIGLOGPAGE:
-	case NXGE_FM_EREPORT_RDMC_CONFIG_ERR:
-		RXDMA_REG_READ64(nxgep->npi_handle, RX_DMA_CTL_STAT_DBG_REG,
-			chan, &cs.value);
-		if (err_id == NXGE_FM_EREPORT_RDMC_RCR_ACK_ERR)
-			cs.bits.hdw.rcr_ack_err = 1;
-		else if (err_id == NXGE_FM_EREPORT_RDMC_DC_FIFO_ERR)
-			cs.bits.hdw.dc_fifo_err = 1;
-		else if (err_id == NXGE_FM_EREPORT_RDMC_RCR_SHA_PAR)
-			cs.bits.hdw.rcr_sha_par = 1;
-		else if (err_id == NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR)
-			cs.bits.hdw.rbr_pre_par = 1;
-		else if (err_id == NXGE_FM_EREPORT_RDMC_RBR_TMOUT)
-			cs.bits.hdw.rbr_tmout = 1;
-		else if (err_id == NXGE_FM_EREPORT_RDMC_RSP_CNT_ERR)
-			cs.bits.hdw.rsp_cnt_err = 1;
-		else if (err_id == NXGE_FM_EREPORT_RDMC_BYTE_EN_BUS)
-			cs.bits.hdw.byte_en_bus = 1;
-		else if (err_id == NXGE_FM_EREPORT_RDMC_RSP_DAT_ERR)
-			cs.bits.hdw.rsp_dat_err = 1;
-		else if (err_id == NXGE_FM_EREPORT_RDMC_CONFIG_ERR)
-			cs.bits.hdw.config_err = 1;
-		else if (err_id == NXGE_FM_EREPORT_RDMC_RCRINCON)
-			cs.bits.hdw.rcrincon = 1;
-		else if (err_id == NXGE_FM_EREPORT_RDMC_RCRFULL)
-			cs.bits.hdw.rcrfull = 1;
-		else if (err_id == NXGE_FM_EREPORT_RDMC_RBRFULL)
-			cs.bits.hdw.rbrfull = 1;
-		else if (err_id == NXGE_FM_EREPORT_RDMC_RBRLOGPAGE)
-			cs.bits.hdw.rbrlogpage = 1;
-		else if (err_id == NXGE_FM_EREPORT_RDMC_CFIGLOGPAGE)
-			cs.bits.hdw.cfiglogpage = 1;
-		cmn_err(CE_NOTE, "!Write 0x%lx to RX_DMA_CTL_STAT_DBG_REG\n",
-				cs.value);
-		RXDMA_REG_WRITE64(nxgep->npi_handle, RX_DMA_CTL_STAT_DBG_REG,
-			chan, cs.value);
-		break;
-	case NXGE_FM_EREPORT_RDMC_ID_MISMATCH:
-	case NXGE_FM_EREPORT_RDMC_ZCP_EOP_ERR:
-	case NXGE_FM_EREPORT_RDMC_IPP_EOP_ERR:
-		cdfs.value = 0;
-		if (err_id ==  NXGE_FM_EREPORT_RDMC_ID_MISMATCH)
-			cdfs.bits.ldw.id_mismatch = (1 << nxgep->mac.portnum);
-		else if (err_id == NXGE_FM_EREPORT_RDMC_ZCP_EOP_ERR)
-			cdfs.bits.ldw.zcp_eop_err = (1 << nxgep->mac.portnum);
-		else if (err_id == NXGE_FM_EREPORT_RDMC_IPP_EOP_ERR)
-			cdfs.bits.ldw.ipp_eop_err = (1 << nxgep->mac.portnum);
-		cmn_err(CE_NOTE,
-			"!Write 0x%lx to RX_CTL_DAT_FIFO_STAT_DBG_REG\n",
-			cdfs.value);
-		RXDMA_REG_WRITE64(nxgep->npi_handle,
-			RX_CTL_DAT_FIFO_STAT_DBG_REG, chan, cdfs.value);
-		break;
-	case NXGE_FM_EREPORT_RDMC_DCF_ERR:
-		break;
-	case NXGE_FM_EREPORT_RDMC_COMPLETION_ERR:
-		break;
-	}
-}
-
-
-static uint16_t
-nxge_get_pktbuf_size(p_nxge_t nxgep, int bufsz_type, rbr_cfig_b_t rbr_cfgb)
-{
-	uint16_t sz = RBR_BKSIZE_8K_BYTES;
-
-	switch (bufsz_type) {
-	case RCR_PKTBUFSZ_0:
-		switch (rbr_cfgb.bits.ldw.bufsz0) {
-		case RBR_BUFSZ0_256B:
-			sz = RBR_BUFSZ0_256_BYTES;
-			break;
-		case RBR_BUFSZ0_512B:
-			sz = RBR_BUFSZ0_512B_BYTES;
-			break;
-		case RBR_BUFSZ0_1K:
-			sz = RBR_BUFSZ0_1K_BYTES;
-			break;
-		case RBR_BUFSZ0_2K:
-			sz = RBR_BUFSZ0_2K_BYTES;
-			break;
-		default:
-			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"nxge_get_pktbug_size: bad bufsz0"));
-			break;
-		}
-		break;
-	case RCR_PKTBUFSZ_1:
-		switch (rbr_cfgb.bits.ldw.bufsz1) {
-		case RBR_BUFSZ1_1K:
-			sz = RBR_BUFSZ1_1K_BYTES;
-			break;
-		case RBR_BUFSZ1_2K:
-			sz = RBR_BUFSZ1_2K_BYTES;
-			break;
-		case RBR_BUFSZ1_4K:
-			sz = RBR_BUFSZ1_4K_BYTES;
-			break;
-		case RBR_BUFSZ1_8K:
-			sz = RBR_BUFSZ1_8K_BYTES;
-			break;
-		default:
-			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"nxge_get_pktbug_size: bad bufsz1"));
-			break;
-		}
-		break;
-	case RCR_PKTBUFSZ_2:
-		switch (rbr_cfgb.bits.ldw.bufsz2) {
-		case RBR_BUFSZ2_2K:
-			sz = RBR_BUFSZ2_2K_BYTES;
-			break;
-		case RBR_BUFSZ2_4K:
-			sz = RBR_BUFSZ2_4K_BYTES;
-			break;
-		case RBR_BUFSZ2_8K:
-			sz = RBR_BUFSZ2_8K_BYTES;
-			break;
-		case RBR_BUFSZ2_16K:
-			sz = RBR_BUFSZ2_16K_BYTES;
-			break;
-		default:
-			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"nxge_get_pktbug_size: bad bufsz2"));
-			break;
-		}
-		break;
-	case RCR_SINGLE_BLOCK:
-		switch (rbr_cfgb.bits.ldw.bksize) {
-		case BKSIZE_4K:
-			sz = RBR_BKSIZE_4K_BYTES;
-			break;
-		case BKSIZE_8K:
-			sz = RBR_BKSIZE_8K_BYTES;
-			break;
-		case BKSIZE_16K:
-			sz = RBR_BKSIZE_16K_BYTES;
-			break;
-		case BKSIZE_32K:
-			sz = RBR_BKSIZE_32K_BYTES;
-			break;
-		default:
-			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"nxge_get_pktbug_size: bad bksize"));
-			break;
-		}
-		break;
-	default:
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-		"nxge_get_pktbug_size: bad bufsz_type"));
-		break;
-	}
-	return (sz);
-}
--- a/usr/src/uts/sun4v/io/nxge/nxge_send.c	Mon Mar 19 18:02:35 2007 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,1035 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License (the "License").
- * You may not use this file except in compliance with the License.
- *
- * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
- * or http://www.opensolaris.org/os/licensing.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information: Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- */
-/*
- * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
- * Use is subject to license terms.
- */
-
-#pragma ident	"%Z%%M%	%I%	%E% SMI"
-
-#include <sys/nxge/nxge_impl.h>
-
-extern uint32_t		nxge_reclaim_pending;
-extern uint32_t 	nxge_bcopy_thresh;
-extern uint32_t 	nxge_dvma_thresh;
-extern uint32_t 	nxge_dma_stream_thresh;
-extern uint32_t		nxge_tx_minfree;
-extern uint32_t		nxge_tx_intr_thres;
-extern uint32_t		nxge_tx_max_gathers;
-extern uint32_t		nxge_tx_tiny_pack;
-extern uint32_t		nxge_tx_use_bcopy;
-extern uint32_t		nxge_tx_lb_policy;
-extern uint32_t		nxge_no_tx_lb;
-
-typedef struct _mac_tx_hint {
-	uint16_t	sap;
-	uint16_t	vid;
-	void		*hash;
-} mac_tx_hint_t, *p_mac_tx_hint_t;
-
-int nxge_tx_lb_ring_1(p_mblk_t, uint32_t, p_mac_tx_hint_t);
-
-int
-nxge_start(p_nxge_t nxgep, p_tx_ring_t tx_ring_p, p_mblk_t mp)
-{
-	int 			status = 0;
-	p_tx_desc_t 		tx_desc_ring_vp;
-	npi_handle_t		npi_desc_handle;
-	nxge_os_dma_handle_t 	tx_desc_dma_handle;
-	p_tx_desc_t 		tx_desc_p;
-	p_tx_msg_t 		tx_msg_ring;
-	p_tx_msg_t 		tx_msg_p;
-	tx_desc_t		tx_desc, *tmp_desc_p;
-	tx_desc_t		sop_tx_desc, *sop_tx_desc_p;
-	p_tx_pkt_header_t	hdrp;
-	p_tx_pkt_hdr_all_t	pkthdrp;
-	uint8_t			npads = 0;
-	uint64_t 		dma_ioaddr;
-	uint32_t		dma_flags;
-	int			last_bidx;
-	uint8_t 		*b_rptr;
-	caddr_t 		kaddr;
-	uint32_t		nmblks;
-	uint32_t		ngathers;
-	uint32_t		clen;
-	int 			len;
-	uint32_t		pkt_len, pack_len, min_len;
-	uint32_t		bcopy_thresh;
-	int 			i, cur_index, sop_index;
-	uint16_t		tail_index;
-	boolean_t		tail_wrap = B_FALSE;
-	nxge_dma_common_t	desc_area;
-	nxge_os_dma_handle_t 	dma_handle;
-	ddi_dma_cookie_t 	dma_cookie;
-	npi_handle_t		npi_handle;
-	p_mblk_t 		nmp;
-	p_mblk_t		t_mp;
-	uint32_t 		ncookies;
-	boolean_t 		good_packet;
-	boolean_t 		mark_mode = B_FALSE;
-	p_nxge_stats_t 		statsp;
-	p_nxge_tx_ring_stats_t tdc_stats;
-	t_uscalar_t 		start_offset = 0;
-	t_uscalar_t 		stuff_offset = 0;
-	t_uscalar_t 		end_offset = 0;
-	t_uscalar_t 		value = 0;
-	t_uscalar_t 		cksum_flags = 0;
-	boolean_t		cksum_on = B_FALSE;
-	uint32_t		boff = 0;
-	uint64_t		tot_xfer_len = 0, tmp_len = 0;
-	boolean_t		header_set = B_FALSE;
-#ifdef NXGE_DEBUG
-	p_tx_desc_t 		tx_desc_ring_pp;
-	p_tx_desc_t 		tx_desc_pp;
-	tx_desc_t		*save_desc_p;
-	int			dump_len;
-	int			sad_len;
-	uint64_t		sad;
-	int			xfer_len;
-	uint32_t		msgsize;
-#endif
-
-	NXGE_DEBUG_MSG((nxgep, TX_CTL,
-		"==> nxge_start: tx dma channel %d", tx_ring_p->tdc));
-	NXGE_DEBUG_MSG((nxgep, TX_CTL,
-		"==> nxge_start: Starting tdc %d desc pending %d",
-		tx_ring_p->tdc, tx_ring_p->descs_pending));
-
-	statsp = nxgep->statsp;
-
-	if (nxgep->statsp->port_stats.lb_mode == nxge_lb_normal) {
-		if (!statsp->mac_stats.link_up) {
-			freemsg(mp);
-			NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_start: "
-				"link not up or LB mode"));
-			goto nxge_start_fail1;
-		}
-	}
-
-	hcksum_retrieve(mp, NULL, NULL, &start_offset,
-		&stuff_offset, &end_offset, &value, &cksum_flags);
-	if (!NXGE_IS_VLAN_PACKET(mp->b_rptr)) {
-		start_offset += sizeof (ether_header_t);
-		stuff_offset += sizeof (ether_header_t);
-	} else {
-		start_offset += sizeof (struct ether_vlan_header);
-		stuff_offset += sizeof (struct ether_vlan_header);
-	}
-
-	if (cksum_flags & HCK_PARTIALCKSUM) {
-		NXGE_DEBUG_MSG((nxgep, TX_CTL,
-			"==> nxge_start: cksum_flags 0x%x (partial checksum) ",
-			cksum_flags));
-		cksum_on = B_TRUE;
-	}
-
-#ifdef	NXGE_DEBUG
-	if (tx_ring_p->descs_pending) {
-		NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_start: "
-			"desc pending %d ", tx_ring_p->descs_pending));
-	}
-
-	dump_len = (int)(MBLKL(mp));
-	dump_len = (dump_len > 128) ? 128: dump_len;
-
-	NXGE_DEBUG_MSG((nxgep, TX_CTL,
-		"==> nxge_start: tdc %d: dumping ...: b_rptr $%p "
-		"(Before header reserve: ORIGINAL LEN %d)",
-		tx_ring_p->tdc,
-		mp->b_rptr,
-		dump_len));
-
-	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_start: dump packets "
-		"(IP ORIGINAL b_rptr $%p): %s", mp->b_rptr,
-		nxge_dump_packet((char *)mp->b_rptr, dump_len)));
-#endif
-
-	MUTEX_ENTER(&tx_ring_p->lock);
-	tdc_stats = tx_ring_p->tdc_stats;
-	mark_mode = (tx_ring_p->descs_pending &&
-		((tx_ring_p->tx_ring_size - tx_ring_p->descs_pending)
-		< nxge_tx_minfree));
-
-	NXGE_DEBUG_MSG((nxgep, TX_CTL,
-		"TX Descriptor ring is channel %d mark mode %d",
-		tx_ring_p->tdc, mark_mode));
-
-	if (!nxge_txdma_reclaim(nxgep, tx_ring_p, nxge_tx_minfree)) {
-		NXGE_DEBUG_MSG((nxgep, TX_CTL,
-			"TX Descriptor ring is full: channel %d",
-			tx_ring_p->tdc));
-		cas32((uint32_t *)&tx_ring_p->queueing, 0, 1);
-		tdc_stats->tx_no_desc++;
-		MUTEX_EXIT(&tx_ring_p->lock);
-		if (nxgep->resched_needed && !nxgep->resched_running) {
-			nxgep->resched_running = B_TRUE;
-			ddi_trigger_softintr(nxgep->resched_id);
-		}
-		status = 1;
-		goto nxge_start_fail1;
-	}
-
-	nmp = mp;
-	i = sop_index = tx_ring_p->wr_index;
-	nmblks = 0;
-	ngathers = 0;
-	pkt_len = 0;
-	pack_len = 0;
-	clen = 0;
-	last_bidx = -1;
-	good_packet = B_TRUE;
-
-	desc_area = tx_ring_p->tdc_desc;
-	npi_handle = desc_area.npi_handle;
-	npi_desc_handle.regh = (nxge_os_acc_handle_t)
-			DMA_COMMON_ACC_HANDLE(desc_area);
-	tx_desc_ring_vp = (p_tx_desc_t)DMA_COMMON_VPTR(desc_area);
-#ifdef	NXGE_DEBUG
-	tx_desc_ring_pp = (p_tx_desc_t)DMA_COMMON_IOADDR(desc_area);
-#endif
-	tx_desc_dma_handle = (nxge_os_dma_handle_t)
-			DMA_COMMON_HANDLE(desc_area);
-	tx_msg_ring = tx_ring_p->tx_msg_ring;
-
-	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_start: wr_index %d i %d",
-		sop_index, i));
-
-#ifdef	NXGE_DEBUG
-	msgsize = msgdsize(nmp);
-	NXGE_DEBUG_MSG((nxgep, TX_CTL,
-		"==> nxge_start(1): wr_index %d i %d msgdsize %d",
-		sop_index, i, msgsize));
-#endif
-	/*
-	 * The first 16 bytes of the premapped buffer are reserved
-	 * for header. No padding will be used.
-	 */
-	pkt_len = pack_len = boff = TX_PKT_HEADER_SIZE;
-	if (nxge_tx_use_bcopy) {
-		bcopy_thresh = (nxge_bcopy_thresh - TX_PKT_HEADER_SIZE);
-	} else {
-		bcopy_thresh = (TX_BCOPY_SIZE - TX_PKT_HEADER_SIZE);
-	}
-	while (nmp) {
-		good_packet = B_TRUE;
-		b_rptr = nmp->b_rptr;
-		len = MBLKL(nmp);
-		if (len <= 0) {
-			nmp = nmp->b_cont;
-			continue;
-		}
-		nmblks++;
-
-		NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_start(1): nmblks %d "
-			"len %d pkt_len %d pack_len %d",
-			nmblks, len, pkt_len, pack_len));
-		/*
-		 * Hardware limits the transfer length to 4K for NIU and
-		 * 4076 (TX_MAX_TRANSFER_LENGTH) for Neptune. But we just
-		 * use TX_MAX_TRANSFER_LENGTH as the limit for both.
-		 * If len is longer than the limit, then we break nmp into
-		 * two chunks: Make the first chunk equal to the limit and
-		 * the second chunk for the remaining data. If the second
-		 * chunk is still larger than the limit, then it will be
-		 * broken into two in the next pass.
-		 */
-		if (len > TX_MAX_TRANSFER_LENGTH - TX_PKT_HEADER_SIZE) {
-			t_mp = dupb(nmp);
-			nmp->b_wptr = nmp->b_rptr +
-				(TX_MAX_TRANSFER_LENGTH - TX_PKT_HEADER_SIZE);
-			t_mp->b_rptr = nmp->b_wptr;
-			t_mp->b_cont = nmp->b_cont;
-			nmp->b_cont = t_mp;
-			len = MBLKL(nmp);
-		}
-
-		tx_desc.value = 0;
-		tx_desc_p = &tx_desc_ring_vp[i];
-#ifdef	NXGE_DEBUG
-		tx_desc_pp = &tx_desc_ring_pp[i];
-#endif
-		tx_msg_p = &tx_msg_ring[i];
-		npi_desc_handle.regp = (uint64_t)tx_desc_p;
-		if (!header_set &&
-			((!nxge_tx_use_bcopy && (len > TX_BCOPY_SIZE)) ||
-				(len >= bcopy_thresh))) {
-			header_set = B_TRUE;
-			bcopy_thresh += TX_PKT_HEADER_SIZE;
-			boff = 0;
-			pack_len = 0;
-			kaddr = (caddr_t)DMA_COMMON_VPTR(tx_msg_p->buf_dma);
-			hdrp = (p_tx_pkt_header_t)kaddr;
-			clen = pkt_len;
-			dma_handle = tx_msg_p->buf_dma_handle;
-			dma_ioaddr = DMA_COMMON_IOADDR(tx_msg_p->buf_dma);
-			(void) ddi_dma_sync(dma_handle,
-				i * nxge_bcopy_thresh, nxge_bcopy_thresh,
-				DDI_DMA_SYNC_FORDEV);
-
-			tx_msg_p->flags.dma_type = USE_BCOPY;
-			goto nxge_start_control_header_only;
-		}
-
-		pkt_len += len;
-		pack_len += len;
-
-		NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_start(3): "
-			"desc entry %d "
-			"DESC IOADDR $%p "
-			"desc_vp $%p tx_desc_p $%p "
-			"desc_pp $%p tx_desc_pp $%p "
-			"len %d pkt_len %d pack_len %d",
-			i,
-			DMA_COMMON_IOADDR(desc_area),
-			tx_desc_ring_vp, tx_desc_p,
-			tx_desc_ring_pp, tx_desc_pp,
-			len, pkt_len, pack_len));
-
-		if (len < bcopy_thresh) {
-			NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_start(4): "
-				"USE BCOPY: "));
-			if (nxge_tx_tiny_pack) {
-				uint32_t blst =
-					TXDMA_DESC_NEXT_INDEX(i, -1,
-						tx_ring_p->tx_wrap_mask);
-				NXGE_DEBUG_MSG((nxgep, TX_CTL,
-					"==> nxge_start(5): pack"));
-				if ((pack_len <= bcopy_thresh) &&
-					(last_bidx == blst)) {
-					NXGE_DEBUG_MSG((nxgep, TX_CTL,
-						"==> nxge_start: pack(6) "
-						"(pkt_len %d pack_len %d)",
-						pkt_len, pack_len));
-					i = blst;
-					tx_desc_p = &tx_desc_ring_vp[i];
-#ifdef	NXGE_DEBUG
-					tx_desc_pp = &tx_desc_ring_pp[i];
-#endif
-					tx_msg_p = &tx_msg_ring[i];
-					boff = pack_len - len;
-					ngathers--;
-				} else if (pack_len > bcopy_thresh &&
-					header_set) {
-					pack_len = len;
-					boff = 0;
-					bcopy_thresh = nxge_bcopy_thresh;
-					NXGE_DEBUG_MSG((nxgep, TX_CTL,
-						"==> nxge_start(7): > max NEW "
-						"bcopy thresh %d "
-						"pkt_len %d pack_len %d(next)",
-						bcopy_thresh,
-						pkt_len, pack_len));
-				}
-				last_bidx = i;
-			}
-			kaddr = (caddr_t)DMA_COMMON_VPTR(tx_msg_p->buf_dma);
-			if ((boff == TX_PKT_HEADER_SIZE) && (nmblks == 1)) {
-				hdrp = (p_tx_pkt_header_t)kaddr;
-				header_set = B_TRUE;
-				NXGE_DEBUG_MSG((nxgep, TX_CTL,
-					"==> nxge_start(7_x2): "
-					"pkt_len %d pack_len %d (new hdrp $%p)",
-					pkt_len, pack_len, hdrp));
-			}
-			tx_msg_p->flags.dma_type = USE_BCOPY;
-			kaddr += boff;
-			NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_start(8): "
-				"USE BCOPY: before bcopy "
-				"DESC IOADDR $%p entry %d "
-				"bcopy packets %d "
-				"bcopy kaddr $%p "
-				"bcopy ioaddr (SAD) $%p "
-				"bcopy clen %d "
-				"bcopy boff %d",
-				DMA_COMMON_IOADDR(desc_area), i,
-				tdc_stats->tx_hdr_pkts,
-				kaddr,
-				dma_ioaddr,
-				clen,
-				boff));
-			NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_start: "
-				"1USE BCOPY: "));
-			NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_start: "
-				"2USE BCOPY: "));
-			NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_start: "
-				"last USE BCOPY: copy from b_rptr $%p "
-				"to KADDR $%p (len %d offset %d",
-				b_rptr, kaddr, len, boff));
-
-			bcopy(b_rptr, kaddr, len);
-
-#ifdef	NXGE_DEBUG
-			dump_len = (len > 128) ? 128: len;
-			NXGE_DEBUG_MSG((nxgep, TX_CTL,
-				"==> nxge_start: dump packets "
-				"(After BCOPY len %d)"
-				"(b_rptr $%p): %s", len, nmp->b_rptr,
-				nxge_dump_packet((char *)nmp->b_rptr,
-				dump_len)));
-#endif
-
-			dma_handle = tx_msg_p->buf_dma_handle;
-			dma_ioaddr = DMA_COMMON_IOADDR(tx_msg_p->buf_dma);
-			(void) ddi_dma_sync(dma_handle,
-				i * nxge_bcopy_thresh, nxge_bcopy_thresh,
-					DDI_DMA_SYNC_FORDEV);
-			clen = len + boff;
-			tdc_stats->tx_hdr_pkts++;
-			NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_start(9): "
-				"USE BCOPY: "
-				"DESC IOADDR $%p entry %d "
-				"bcopy packets %d "
-				"bcopy kaddr $%p "
-				"bcopy ioaddr (SAD) $%p "
-				"bcopy clen %d "
-				"bcopy boff %d",
-				DMA_COMMON_IOADDR(desc_area),
-				i,
-				tdc_stats->tx_hdr_pkts,
-				kaddr,
-				dma_ioaddr,
-				clen,
-				boff));
-		} else {
-			NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_start(12): "
-				"USE DVMA: len %d", len));
-			tx_msg_p->flags.dma_type = USE_DMA;
-			dma_flags = DDI_DMA_WRITE;
-			if (len < nxge_dma_stream_thresh) {
-				dma_flags |= DDI_DMA_CONSISTENT;
-			} else {
-				dma_flags |= DDI_DMA_STREAMING;
-			}
-
-			dma_handle = tx_msg_p->dma_handle;
-			status = ddi_dma_addr_bind_handle(dma_handle, NULL,
-				(caddr_t)b_rptr, len, dma_flags,
-				DDI_DMA_DONTWAIT, NULL,
-				&dma_cookie, &ncookies);
-			if (status == DDI_DMA_MAPPED) {
-				dma_ioaddr = dma_cookie.dmac_laddress;
-				len = (int)dma_cookie.dmac_size;
-				clen = (uint32_t)dma_cookie.dmac_size;
-				NXGE_DEBUG_MSG((nxgep, TX_CTL,
-					"==> nxge_start(12_1): "
-					"USE DVMA: len %d clen %d "
-					"ngathers %d",
-					len, clen,
-					ngathers));
-
-				npi_desc_handle.regp = (uint64_t)tx_desc_p;
-				while (ncookies > 1) {
-					ngathers++;
-					/*
-					 * this is the fix for multiple
-					 * cookies, which are basicaly
-					 * a descriptor entry, we don't set
-					 * SOP bit as well as related fields
-					 */
-
-					(void) npi_txdma_desc_gather_set(
-						npi_desc_handle,
-						&tx_desc,
-						(ngathers -1),
-						mark_mode,
-						ngathers,
-						dma_ioaddr,
-						clen);
-
-					tx_msg_p->tx_msg_size = clen;
-					NXGE_DEBUG_MSG((nxgep, TX_CTL,
-						"==> nxge_start:  DMA "
-						"ncookie %d "
-						"ngathers %d "
-						"dma_ioaddr $%p len %d"
-						"desc $%p descp $%p (%d)",
-						ncookies,
-						ngathers,
-						dma_ioaddr, clen,
-						*tx_desc_p, tx_desc_p, i));
-
-					ddi_dma_nextcookie(dma_handle,
-							&dma_cookie);
-					dma_ioaddr =
-						dma_cookie.dmac_laddress;
-
-					len = (int)dma_cookie.dmac_size;
-					clen = (uint32_t)dma_cookie.dmac_size;
-					NXGE_DEBUG_MSG((nxgep, TX_CTL,
-						"==> nxge_start(12_2): "
-						"USE DVMA: len %d clen %d ",
-						len, clen));
-
-					i = TXDMA_DESC_NEXT_INDEX(i, 1,
-						tx_ring_p->tx_wrap_mask);
-					tx_desc_p = &tx_desc_ring_vp[i];
-
-					npi_desc_handle.regp =
-						(uint64_t)tx_desc_p;
-					tx_msg_p = &tx_msg_ring[i];
-					tx_msg_p->flags.dma_type = USE_NONE;
-					tx_desc.value = 0;
-
-					ncookies--;
-				}
-				tdc_stats->tx_ddi_pkts++;
-				NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_start:"
-					"DMA: ddi packets %d",
-					tdc_stats->tx_ddi_pkts));
-			} else {
-				NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-				    "dma mapping failed for %d "
-				    "bytes addr $%p flags %x (%d)",
-				    len, b_rptr, status, status));
-				good_packet = B_FALSE;
-				tdc_stats->tx_dma_bind_fail++;
-				tx_msg_p->flags.dma_type = USE_NONE;
-				goto nxge_start_fail2;
-			}
-		} /* ddi dvma */
-
-		nmp = nmp->b_cont;
-nxge_start_control_header_only:
-		npi_desc_handle.regp = (uint64_t)tx_desc_p;
-		ngathers++;
-
-		if (ngathers == 1) {
-#ifdef	NXGE_DEBUG
-			save_desc_p = &sop_tx_desc;
-#endif
-			sop_tx_desc_p = &sop_tx_desc;
-			sop_tx_desc_p->value = 0;
-			sop_tx_desc_p->bits.hdw.tr_len = clen;
-			sop_tx_desc_p->bits.hdw.sad = dma_ioaddr >> 32;
-			sop_tx_desc_p->bits.ldw.sad = dma_ioaddr & 0xffffffff;
-		} else {
-#ifdef	NXGE_DEBUG
-			save_desc_p = &tx_desc;
-#endif
-			tmp_desc_p = &tx_desc;
-			tmp_desc_p->value = 0;
-			tmp_desc_p->bits.hdw.tr_len = clen;
-			tmp_desc_p->bits.hdw.sad = dma_ioaddr >> 32;
-			tmp_desc_p->bits.ldw.sad = dma_ioaddr & 0xffffffff;
-
-			tx_desc_p->value = tmp_desc_p->value;
-		}
-
-		NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_start(13): "
-			"Desc_entry %d ngathers %d "
-			"desc_vp $%p tx_desc_p $%p "
-			"len %d clen %d pkt_len %d pack_len %d nmblks %d "
-			"dma_ioaddr (SAD) $%p mark %d",
-			i, ngathers,
-			tx_desc_ring_vp, tx_desc_p,
-			len, clen, pkt_len, pack_len, nmblks,
-			dma_ioaddr, mark_mode));
-
-#ifdef NXGE_DEBUG
-		npi_desc_handle.nxgep = nxgep;
-		npi_desc_handle.function.function = nxgep->function_num;
-		npi_desc_handle.function.instance = nxgep->instance;
-		sad = (save_desc_p->value & TX_PKT_DESC_SAD_MASK);
-		xfer_len = ((save_desc_p->value & TX_PKT_DESC_TR_LEN_MASK) >>
-			TX_PKT_DESC_TR_LEN_SHIFT);
-
-
-		NXGE_DEBUG_MSG((nxgep, TX_CTL, "\n\t: value 0x%llx\n"
-			"\t\tsad $%p\ttr_len %d len %d\tnptrs %d\t"
-			"mark %d sop %d\n",
-			save_desc_p->value,
-			sad,
-			save_desc_p->bits.hdw.tr_len,
-			xfer_len,
-			save_desc_p->bits.hdw.num_ptr,
-			save_desc_p->bits.hdw.mark,
-			save_desc_p->bits.hdw.sop));
-
-		npi_txdma_dump_desc_one(npi_desc_handle, NULL, i);
-#endif
-
-		tx_msg_p->tx_msg_size = clen;
-		i = TXDMA_DESC_NEXT_INDEX(i, 1, tx_ring_p->tx_wrap_mask);
-		if (ngathers > nxge_tx_max_gathers) {
-			good_packet = B_FALSE;
-			hcksum_retrieve(mp, NULL, NULL, &start_offset,
-				&stuff_offset, &end_offset, &value,
-				&cksum_flags);
-
-			NXGE_DEBUG_MSG((NULL, TX_CTL,
-				"==> nxge_start(14): pull msg - "
-				"len %d pkt_len %d ngathers %d",
-				len, pkt_len, ngathers));
-			/* Pull all message blocks from b_cont */
-			if ((msgpullup(mp, -1)) == NULL) {
-				goto nxge_start_fail2;
-			}
-			goto nxge_start_fail2;
-		}
-	} /* while (nmp) */
-
-	tx_msg_p->tx_message = mp;
-	tx_desc_p = &tx_desc_ring_vp[sop_index];
-	npi_desc_handle.regp = (uint64_t)tx_desc_p;
-
-	pkthdrp = (p_tx_pkt_hdr_all_t)hdrp;
-	pkthdrp->reserved = 0;
-	hdrp->value = 0;
-	(void) nxge_fill_tx_hdr(mp, B_FALSE, cksum_on,
-		(pkt_len - TX_PKT_HEADER_SIZE), npads, pkthdrp);
-
-	if (pkt_len > NXGE_MTU_DEFAULT_MAX) {
-		tdc_stats->tx_jumbo_pkts++;
-	}
-
-	min_len = (nxgep->msg_min + TX_PKT_HEADER_SIZE + (npads * 2));
-	if (pkt_len < min_len) {
-		/* Assume we use bcopy to premapped buffers */
-		kaddr = (caddr_t)DMA_COMMON_VPTR(tx_msg_p->buf_dma);
-		NXGE_DEBUG_MSG((NULL, TX_CTL,
-			"==> nxge_start(14-1): < (msg_min + 16)"
-			"len %d pkt_len %d min_len %d bzero %d ngathers %d",
-			len, pkt_len, min_len, (min_len - pkt_len), ngathers));
-		bzero((kaddr + pkt_len), (min_len - pkt_len));
-		pkt_len = tx_msg_p->tx_msg_size = min_len;
-
-		sop_tx_desc_p->bits.hdw.tr_len = min_len;
-
-		NXGE_MEM_PIO_WRITE64(npi_desc_handle, sop_tx_desc_p->value);
-		tx_desc_p->value = sop_tx_desc_p->value;
-
-		NXGE_DEBUG_MSG((NULL, TX_CTL,
-			"==> nxge_start(14-2): < msg_min - "
-			"len %d pkt_len %d min_len %d ngathers %d",
-			len, pkt_len, min_len, ngathers));
-	}
-
-	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_start: cksum_flags 0x%x ",
-		cksum_flags));
-	if (cksum_flags & HCK_PARTIALCKSUM) {
-		NXGE_DEBUG_MSG((nxgep, TX_CTL,
-			"==> nxge_start: cksum_flags 0x%x (partial checksum) ",
-			cksum_flags));
-		cksum_on = B_TRUE;
-		NXGE_DEBUG_MSG((nxgep, TX_CTL,
-			"==> nxge_start: from IP cksum_flags 0x%x "
-			"(partial checksum) "
-			"start_offset %d stuff_offset %d",
-			cksum_flags, start_offset, stuff_offset));
-		tmp_len = (uint64_t)(start_offset >> 1);
-		hdrp->value |= (tmp_len << TX_PKT_HEADER_L4START_SHIFT);
-		tmp_len = (uint64_t)(stuff_offset >> 1);
-		hdrp->value |= (tmp_len << TX_PKT_HEADER_L4STUFF_SHIFT);
-
-		NXGE_DEBUG_MSG((nxgep, TX_CTL,
-			"==> nxge_start: from IP cksum_flags 0x%x "
-			"(partial checksum) "
-			"after SHIFT start_offset %d stuff_offset %d",
-			cksum_flags, start_offset, stuff_offset));
-	}
-	{
-		uint64_t	tmp_len;
-
-		/* pkt_len already includes 16 + paddings!! */
-		/* Update the control header length */
-		tot_xfer_len = (pkt_len - TX_PKT_HEADER_SIZE);
-		tmp_len = hdrp->value |
-			(tot_xfer_len << TX_PKT_HEADER_TOT_XFER_LEN_SHIFT);
-
-		NXGE_DEBUG_MSG((nxgep, TX_CTL,
-			"==> nxge_start(15_x1): setting SOP "
-			"tot_xfer_len 0x%llx (%d) pkt_len %d tmp_len "
-			"0x%llx hdrp->value 0x%llx",
-			tot_xfer_len, tot_xfer_len, pkt_len,
-			tmp_len, hdrp->value));
-#if defined(_BIG_ENDIAN)
-		hdrp->value = ddi_swap64(tmp_len);
-#else
-		hdrp->value = tmp_len;
-#endif
-		NXGE_DEBUG_MSG((nxgep,
-			TX_CTL, "==> nxge_start(15_x2): setting SOP "
-			"after SWAP: tot_xfer_len 0x%llx pkt_len %d "
-			"tmp_len 0x%llx hdrp->value 0x%llx",
-			tot_xfer_len, pkt_len,
-			tmp_len, hdrp->value));
-	}
-
-	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_start(15): setting SOP "
-		"wr_index %d "
-		"tot_xfer_len (%d) pkt_len %d npads %d",
-		sop_index,
-		tot_xfer_len, pkt_len,
-		npads));
-
-	sop_tx_desc_p->bits.hdw.sop = 1;
-	sop_tx_desc_p->bits.hdw.mark = mark_mode;
-	sop_tx_desc_p->bits.hdw.num_ptr = ngathers;
-
-	NXGE_MEM_PIO_WRITE64(npi_desc_handle, sop_tx_desc_p->value);
-
-	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_start(16): set SOP done"));
-
-#ifdef NXGE_DEBUG
-	npi_desc_handle.nxgep = nxgep;
-	npi_desc_handle.function.function = nxgep->function_num;
-	npi_desc_handle.function.instance = nxgep->instance;
-
-	NXGE_DEBUG_MSG((nxgep, TX_CTL, "\n\t: value 0x%llx\n"
-		"\t\tsad $%p\ttr_len %d len %d\tnptrs %d\tmark %d sop %d\n",
-		save_desc_p->value,
-		sad,
-		save_desc_p->bits.hdw.tr_len,
-		xfer_len,
-		save_desc_p->bits.hdw.num_ptr,
-		save_desc_p->bits.hdw.mark,
-		save_desc_p->bits.hdw.sop));
-	(void) npi_txdma_dump_desc_one(npi_desc_handle, NULL, sop_index);
-
-	dump_len = (pkt_len > 128) ? 128: pkt_len;
-	NXGE_DEBUG_MSG((nxgep, TX_CTL,
-		"==> nxge_start: dump packets(17) (after sop set, len "
-		" (len/dump_len/pkt_len/tot_xfer_len) %d/%d/%d/%d):\n"
-		"ptr $%p: %s", len, dump_len, pkt_len, tot_xfer_len,
-		(char *)hdrp,
-		nxge_dump_packet((char *)hdrp, dump_len)));
-	NXGE_DEBUG_MSG((nxgep, TX_CTL,
-		"==> nxge_start(18): TX desc sync: sop_index %d",
-			sop_index));
-#endif
-
-	if ((ngathers == 1) || tx_ring_p->wr_index < i) {
-		(void) ddi_dma_sync(tx_desc_dma_handle,
-			sop_index * sizeof (tx_desc_t),
-			ngathers * sizeof (tx_desc_t),
-			DDI_DMA_SYNC_FORDEV);
-
-		NXGE_DEBUG_MSG((nxgep, TX_CTL, "nxge_start(19): sync 1 "
-			"cs_off = 0x%02X cs_s_off = 0x%02X "
-			"pkt_len %d ngathers %d sop_index %d\n",
-			stuff_offset, start_offset,
-			pkt_len, ngathers, sop_index));
-	} else { /* more than one descriptor and wrap around */
-		uint32_t nsdescs = tx_ring_p->tx_ring_size - sop_index;
-		(void) ddi_dma_sync(tx_desc_dma_handle,
-			sop_index * sizeof (tx_desc_t),
-			nsdescs * sizeof (tx_desc_t),
-			DDI_DMA_SYNC_FORDEV);
-		NXGE_DEBUG_MSG((nxgep, TX_CTL, "nxge_start(20): sync 1 "
-			"cs_off = 0x%02X cs_s_off = 0x%02X "
-			"pkt_len %d ngathers %d sop_index %d\n",
-			stuff_offset, start_offset,
-				pkt_len, ngathers, sop_index));
-
-		(void) ddi_dma_sync(tx_desc_dma_handle,
-			0,
-			(ngathers - nsdescs) * sizeof (tx_desc_t),
-			DDI_DMA_SYNC_FORDEV);
-		NXGE_DEBUG_MSG((nxgep, TX_CTL, "nxge_start(21): sync 2 "
-			"cs_off = 0x%02X cs_s_off = 0x%02X "
-			"pkt_len %d ngathers %d sop_index %d\n",
-			stuff_offset, start_offset,
-			pkt_len, ngathers, sop_index));
-	}
-
-	tail_index = tx_ring_p->wr_index;
-	tail_wrap = tx_ring_p->wr_index_wrap;
-
-	tx_ring_p->wr_index = i;
-	if (tx_ring_p->wr_index <= tail_index) {
-		tx_ring_p->wr_index_wrap = ((tail_wrap == B_TRUE) ?
-						B_FALSE : B_TRUE);
-	}
-
-	tx_ring_p->descs_pending += ngathers;
-	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_start: TX kick: "
-		"channel %d wr_index %d wrap %d ngathers %d desc_pend %d",
-		tx_ring_p->tdc,
-		tx_ring_p->wr_index,
-		tx_ring_p->wr_index_wrap,
-		ngathers,
-		tx_ring_p->descs_pending));
-
-	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_start: TX KICKING: "));
-
-	{
-		tx_ring_kick_t		kick;
-
-		kick.value = 0;
-		kick.bits.ldw.wrap = tx_ring_p->wr_index_wrap;
-		kick.bits.ldw.tail = (uint16_t)tx_ring_p->wr_index;
-
-		/* Kick start the Transmit kick register */
-		TXDMA_REG_WRITE64(NXGE_DEV_NPI_HANDLE(nxgep),
-			TX_RING_KICK_REG,
-			(uint8_t)tx_ring_p->tdc,
-			kick.value);
-	}
-
-	tdc_stats->tx_starts++;
-
-	MUTEX_EXIT(&tx_ring_p->lock);
-
-	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_start"));
-
-	return (status);
-
-nxge_start_fail2:
-	if (good_packet == B_FALSE) {
-		cur_index = sop_index;
-		NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_start: clean up"));
-		for (i = 0; i < ngathers; i++) {
-			tx_desc_p = &tx_desc_ring_vp[cur_index];
-			npi_handle.regp = (uint64_t)tx_desc_p;
-			tx_msg_p = &tx_msg_ring[cur_index];
-			(void) npi_txdma_desc_set_zero(npi_handle, 1);
-			if (tx_msg_p->flags.dma_type == USE_DVMA) {
-				NXGE_DEBUG_MSG((nxgep, TX_CTL,
-					"tx_desc_p = %X index = %d",
-					tx_desc_p, tx_ring_p->rd_index));
-				(void) dvma_unload(
-						tx_msg_p->dvma_handle,
-						0, -1);
-				tx_msg_p->dvma_handle = NULL;
-				if (tx_ring_p->dvma_wr_index ==
-					tx_ring_p->dvma_wrap_mask)
-					tx_ring_p->dvma_wr_index = 0;
-				else
-					tx_ring_p->dvma_wr_index++;
-				tx_ring_p->dvma_pending--;
-			} else if (tx_msg_p->flags.dma_type ==
-					USE_DMA) {
-				if (ddi_dma_unbind_handle(
-					tx_msg_p->dma_handle))
-					cmn_err(CE_WARN, "!nxge_start: "
-						"ddi_dma_unbind_handle failed");
-			}
-			tx_msg_p->flags.dma_type = USE_NONE;
-			cur_index = TXDMA_DESC_NEXT_INDEX(cur_index, 1,
-				tx_ring_p->tx_wrap_mask);
-
-		}
-
-		nxgep->resched_needed = B_TRUE;
-	}
-
-	MUTEX_EXIT(&tx_ring_p->lock);
-
-nxge_start_fail1:
-	/* Add FMA to check the access handle nxge_hregh */
-
-	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_start"));
-
-	return (status);
-}
-
-boolean_t
-nxge_send(p_nxge_t nxgep, mblk_t *mp, p_mac_tx_hint_t hp)
-{
-	p_tx_ring_t 		*tx_rings;
-	uint8_t			ring_index;
-
-	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_send"));
-
-	ASSERT(mp->b_next == NULL);
-
-	ring_index = nxge_tx_lb_ring_1(mp, nxgep->max_tdcs, hp);
-	tx_rings = nxgep->tx_rings->rings;
-	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_tx_msg: tx_rings $%p",
-		tx_rings));
-	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_tx_msg: max_tdcs %d "
-		"ring_index %d", nxgep->max_tdcs, ring_index));
-
-	if (nxge_start(nxgep, tx_rings[ring_index], mp)) {
-		NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_send: failed "
-			"ring index %d", ring_index));
-		return (B_FALSE);
-	}
-
-	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_send: ring index %d",
-		ring_index));
-
-	return (B_TRUE);
-}
-
-/*
- * nxge_m_tx() - send a chain of packets
- */
-mblk_t *
-nxge_m_tx(void *arg, mblk_t *mp)
-{
-	p_nxge_t 		nxgep = (p_nxge_t)arg;
-	mblk_t 			*next;
-	mac_tx_hint_t		hint;
-
-	if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
-		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
-			"==> nxge_m_tx: hardware not initialized"));
-		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
-			"<== nxge_m_tx"));
-		return (mp);
-	}
-
-	hint.hash =  NULL;
-	hint.vid =  0;
-	hint.sap =  0;
-
-	while (mp != NULL) {
-		next = mp->b_next;
-		mp->b_next = NULL;
-
-		/*
-		 * Until Nemo tx resource works, the mac driver
-		 * does the load balancing based on TCP port,
-		 * or CPU. For debugging, we use a system
-		 * configurable parameter.
-		 */
-		if (!nxge_send(nxgep, mp, &hint)) {
-			mp->b_next = next;
-			break;
-		}
-
-		mp = next;
-	}
-
-	return (mp);
-}
-
-int
-nxge_tx_lb_ring_1(p_mblk_t mp, uint32_t maxtdcs, p_mac_tx_hint_t hp)
-{
-	uint8_t 		ring_index = 0;
-	uint8_t 		*tcp_port;
-	p_mblk_t 		nmp;
-	size_t 			mblk_len;
-	size_t 			iph_len;
-	size_t 			hdrs_size;
-	uint8_t			hdrs_buf[sizeof (struct  ether_header) +
-					IP_MAX_HDR_LENGTH + sizeof (uint32_t)];
-				/*
-				 * allocate space big enough to cover
-				 * the max ip header length and the first
-				 * 4 bytes of the TCP/IP header.
-				 */
-
-	boolean_t		qos = B_FALSE;
-
-	NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_lb_ring"));
-
-	if (hp->vid) {
-		qos = B_TRUE;
-	}
-	switch (nxge_tx_lb_policy) {
-	case NXGE_TX_LB_TCPUDP: /* default IPv4 TCP/UDP */
-	default:
-		tcp_port = mp->b_rptr;
-		if (!nxge_no_tx_lb && !qos &&
-			(ntohs(((p_ether_header_t)tcp_port)->ether_type)
-				== ETHERTYPE_IP)) {
-			nmp = mp;
-			mblk_len = MBLKL(nmp);
-			tcp_port = NULL;
-			if (mblk_len > sizeof (struct ether_header) +
-					sizeof (uint8_t)) {
-				tcp_port = nmp->b_rptr +
-					sizeof (struct ether_header);
-				mblk_len -= sizeof (struct ether_header);
-				iph_len = ((*tcp_port) & 0x0f) << 2;
-				if (mblk_len > (iph_len + sizeof (uint32_t))) {
-					tcp_port = nmp->b_rptr;
-				} else {
-					tcp_port = NULL;
-				}
-			}
-			if (tcp_port == NULL) {
-				hdrs_size = 0;
-				((p_ether_header_t)hdrs_buf)->ether_type = 0;
-				while ((nmp) && (hdrs_size <
-						sizeof (hdrs_buf))) {
-					mblk_len = MBLKL(nmp);
-					if (mblk_len >=
-						(sizeof (hdrs_buf) - hdrs_size))
-						mblk_len = sizeof (hdrs_buf) -
-							hdrs_size;
-					bcopy(nmp->b_rptr,
-						&hdrs_buf[hdrs_size], mblk_len);
-					hdrs_size += mblk_len;
-					nmp = nmp->b_cont;
-				}
-				tcp_port = hdrs_buf;
-			}
-			tcp_port += sizeof (ether_header_t);
-			if (!(tcp_port[6] & 0x3f) && !(tcp_port[7] & 0xff)) {
-				if ((tcp_port[9] == IPPROTO_TCP) ||
-						(tcp_port[9] == IPPROTO_UDP)) {
-					tcp_port += ((*tcp_port) & 0x0f) << 2;
-					ring_index =
-						((tcp_port[1] ^ tcp_port[3])
-						% maxtdcs);
-				} else {
-					ring_index = tcp_port[19] % maxtdcs;
-				}
-			} else { /* fragmented packet */
-				ring_index = tcp_port[19] % maxtdcs;
-			}
-		} else {
-			ring_index = mp->b_band % maxtdcs;
-		}
-		break;
-
-	case NXGE_TX_LB_HASH:
-		if (hp->hash) {
-			ring_index = ((uint64_t)(hp->hash) % maxtdcs);
-		} else {
-			ring_index = mp->b_band % maxtdcs;
-		}
-		break;
-
-	case NXGE_TX_LB_DEST_MAC: /* Use destination MAC address */
-		tcp_port = mp->b_rptr;
-		ring_index = tcp_port[5] % maxtdcs;
-		break;
-	}
-
-	NXGE_DEBUG_MSG((NULL, TX_CTL, "<== nxge_tx_lb_ring"));
-
-	return (ring_index);
-}
-
-uint_t
-nxge_reschedule(caddr_t arg)
-{
-	p_nxge_t nxgep;
-
-	nxgep = (p_nxge_t)arg;
-
-	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_reschedule"));
-
-	if (nxgep->nxge_mac_state == NXGE_MAC_STARTED &&
-			nxgep->resched_needed) {
-		mac_tx_update(nxgep->mach);
-		nxgep->resched_needed = B_FALSE;
-		nxgep->resched_running = B_FALSE;
-	}
-
-	NXGE_DEBUG_MSG((NULL, TX_CTL, "<== nxge_reschedule"));
-	return (DDI_INTR_CLAIMED);
-}
--- a/usr/src/uts/sun4v/io/nxge/nxge_txc.c	Mon Mar 19 18:02:35 2007 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,420 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License (the "License").
- * You may not use this file except in compliance with the License.
- *
- * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
- * or http://www.opensolaris.org/os/licensing.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information: Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- */
-/*
- * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
- * Use is subject to license terms.
- */
-
-#pragma ident	"%Z%%M%	%I%	%E% SMI"
-
-#include <sys/nxge/nxge_impl.h>
-#include <sys/nxge/nxge_txc.h>
-
-static nxge_status_t
-nxge_txc_handle_port_errors(p_nxge_t, uint32_t);
-static void
-nxge_txc_inject_port_err(uint8_t, txc_int_stat_dbg_t *,
-			uint8_t istats);
-extern nxge_status_t nxge_tx_port_fatal_err_recover(p_nxge_t);
-
-nxge_status_t
-nxge_txc_init(p_nxge_t nxgep)
-{
-	uint8_t			port;
-	npi_handle_t		handle;
-	npi_status_t		rs = NPI_SUCCESS;
-
-	handle = NXGE_DEV_NPI_HANDLE(nxgep);
-	port = NXGE_GET_PORT_NUM(nxgep->function_num);
-
-	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txc_init: portn %d", port));
-
-	/*
-	 * Enable the TXC controller.
-	 */
-	if ((rs = npi_txc_global_enable(handle)) != NPI_SUCCESS) {
-		goto fail;
-	}
-
-	/* Enable this port within the TXC. */
-	if ((rs = npi_txc_port_enable(handle, port)) != NPI_SUCCESS) {
-		goto fail;
-	}
-
-	/* Bind DMA channels to this port. */
-	if ((rs = npi_txc_port_dma_enable(handle, port,
-			TXDMA_PORT_BITMAP(nxgep))) != NPI_SUCCESS) {
-		goto fail;
-	}
-
-	/* Unmask all TXC interrupts */
-	npi_txc_global_imask_set(handle, port, 0);
-
-	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txc_init: portn %d", port));
-
-	return (NXGE_OK);
-fail:
-	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"nxge_txc_init: Failed to initialize txc on port %d",
-			port));
-
-	return (NXGE_ERROR | rs);
-}
-
-nxge_status_t
-nxge_txc_uninit(p_nxge_t nxgep)
-{
-	uint8_t			port;
-	npi_handle_t		handle;
-	npi_status_t		rs = NPI_SUCCESS;
-
-	handle = NXGE_DEV_NPI_HANDLE(nxgep);
-	port = NXGE_GET_PORT_NUM(nxgep->function_num);
-
-	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txc_uninit: portn %d", port));
-
-	/*
-	 * disable the TXC controller.
-	 */
-	if ((rs = npi_txc_global_disable(handle)) != NPI_SUCCESS) {
-		goto fail;
-	}
-
-	/* disable this port within the TXC. */
-	if ((rs = npi_txc_port_disable(handle, port)) != NPI_SUCCESS) {
-		goto fail;
-	}
-
-	/* unbind DMA channels to this port. */
-	if ((rs = npi_txc_port_dma_enable(handle, port, 0)) != NPI_SUCCESS) {
-		goto fail;
-	}
-
-	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txc_uninit: portn %d", port));
-
-	return (NXGE_OK);
-fail:
-	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"nxge_txc_init: Failed to initialize txc on port %d",
-			port));
-
-	return (NXGE_ERROR | rs);
-}
-
-void
-nxge_txc_regs_dump(p_nxge_t nxgep)
-{
-	uint32_t		cnt1, cnt2;
-	npi_handle_t		handle;
-	txc_control_t		control;
-	uint32_t		bitmap = 0;
-
-	NXGE_DEBUG_MSG((nxgep, TX_CTL, "\nTXC dump: func # %d:\n",
-		nxgep->function_num));
-
-	handle = NXGE_DEV_NPI_HANDLE(nxgep);
-
-	(void) npi_txc_control(handle, OP_GET, &control);
-	(void) npi_txc_port_dma_list_get(handle, nxgep->function_num, &bitmap);
-
-	NXGE_DEBUG_MSG((nxgep, TX_CTL, "\n\tTXC port control 0x%0llx",
-		(long long)control.value));
-	NXGE_DEBUG_MSG((nxgep, TX_CTL, "\n\tTXC port bitmap 0x%x", bitmap));
-
-	(void) npi_txc_pkt_xmt_to_mac_get(handle, nxgep->function_num,
-	    &cnt1, &cnt2);
-	NXGE_DEBUG_MSG((nxgep, TX_CTL, "\n\tTXC bytes to MAC %d "
-		"packets to MAC %d",
-		cnt1, cnt2));
-
-	(void) npi_txc_pkt_stuffed_get(handle, nxgep->function_num,
-					    &cnt1, &cnt2);
-	NXGE_DEBUG_MSG((nxgep, TX_CTL,
-		"\n\tTXC ass packets %d reorder packets %d",
-		cnt1 & 0xffff, cnt2 & 0xffff));
-
-	(void) npi_txc_reorder_get(handle, nxgep->function_num, &cnt1);
-	NXGE_DEBUG_MSG((nxgep, TX_CTL,
-		"\n\tTXC reorder resource %d", cnt1 & 0xff));
-}
-
-nxge_status_t
-nxge_txc_handle_sys_errors(p_nxge_t nxgep)
-{
-	npi_handle_t		handle;
-	txc_int_stat_t		istatus;
-	uint32_t		err_status;
-	uint8_t			err_portn;
-	boolean_t		my_err = B_FALSE;
-	nxge_status_t		status = NXGE_OK;
-
-	handle = nxgep->npi_handle;
-	npi_txc_global_istatus_get(handle, (txc_int_stat_t *)&istatus.value);
-	switch (nxgep->mac.portnum) {
-	case 0:
-		if (istatus.bits.ldw.port0_int_status) {
-			my_err = B_TRUE;
-			err_portn = 0;
-			err_status = istatus.bits.ldw.port0_int_status;
-		}
-		break;
-	case 1:
-		if (istatus.bits.ldw.port1_int_status) {
-			my_err = B_TRUE;
-			err_portn = 1;
-			err_status = istatus.bits.ldw.port1_int_status;
-		}
-		break;
-	case 2:
-		if (istatus.bits.ldw.port2_int_status) {
-			my_err = B_TRUE;
-			err_portn = 2;
-			err_status = istatus.bits.ldw.port2_int_status;
-		}
-		break;
-	case 3:
-		if (istatus.bits.ldw.port3_int_status) {
-			my_err = B_TRUE;
-			err_portn = 3;
-			err_status = istatus.bits.ldw.port3_int_status;
-		}
-		break;
-	default:
-		return (NXGE_ERROR);
-	}
-	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			    " nxge_txc_handle_sys_errors: errored port %d",
-			    err_portn));
-	if (my_err) {
-		status = nxge_txc_handle_port_errors(nxgep, err_status);
-	}
-
-	return (status);
-}
-
-static nxge_status_t
-nxge_txc_handle_port_errors(p_nxge_t nxgep, uint32_t err_status)
-{
-	npi_handle_t		handle;
-	npi_status_t		rs = NPI_SUCCESS;
-	p_nxge_txc_stats_t	statsp;
-	txc_int_stat_t		istatus;
-	boolean_t		txport_fatal = B_FALSE;
-	uint8_t			portn;
-	nxge_status_t		status = NXGE_OK;
-
-	handle = nxgep->npi_handle;
-	statsp = (p_nxge_txc_stats_t)&nxgep->statsp->txc_stats;
-	portn = nxgep->mac.portnum;
-	istatus.value = 0;
-
-	if ((err_status & TXC_INT_STAT_RO_CORR_ERR) ||
-			(err_status & TXC_INT_STAT_RO_CORR_ERR) ||
-			(err_status & TXC_INT_STAT_RO_UNCORR_ERR) ||
-			(err_status & TXC_INT_STAT_REORDER_ERR)) {
-		if ((rs = npi_txc_ro_states_get(handle, portn,
-				&statsp->errlog.ro_st)) != NPI_SUCCESS) {
-			return (NXGE_ERROR | rs);
-		}
-
-		if (err_status & TXC_INT_STAT_RO_CORR_ERR) {
-			statsp->ro_correct_err++;
-			NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
-					NXGE_FM_EREPORT_TXC_RO_CORRECT_ERR);
-			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-				"nxge_txc_err_evnts: "
-				"RO FIFO correctable error"));
-		}
-		if (err_status & TXC_INT_STAT_RO_UNCORR_ERR) {
-			statsp->ro_uncorrect_err++;
-			NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
-					NXGE_FM_EREPORT_TXC_RO_UNCORRECT_ERR);
-			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-				"nxge_txc_err_evnts: "
-				"RO FIFO uncorrectable error"));
-		}
-		if (err_status & TXC_INT_STAT_REORDER_ERR) {
-			statsp->reorder_err++;
-			NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
-					NXGE_FM_EREPORT_TXC_REORDER_ERR);
-			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-				"nxge_txc_err_evnts: "
-				"fatal error: Reorder error"));
-			txport_fatal = B_TRUE;
-		}
-
-		if ((err_status & TXC_INT_STAT_RO_CORR_ERR) ||
-			(err_status & TXC_INT_STAT_RO_CORR_ERR) ||
-			(err_status & TXC_INT_STAT_RO_UNCORR_ERR)) {
-
-			if ((rs = npi_txc_ro_ecc_state_clr(handle, portn))
-							!= NPI_SUCCESS)
-				return (NXGE_ERROR | rs);
-			/*
-			 * Making sure that error source is cleared if this is
-			 * an injected error.
-			 */
-			TXC_FZC_CNTL_REG_WRITE64(handle, TXC_ROECC_CTL_REG,
-								portn, 0);
-		}
-	}
-
-	if ((err_status & TXC_INT_STAT_SF_CORR_ERR) ||
-			(err_status & TXC_INT_STAT_SF_UNCORR_ERR)) {
-		if ((rs = npi_txc_sf_states_get(handle, portn,
-				&statsp->errlog.sf_st)) != NPI_SUCCESS) {
-			return (NXGE_ERROR | rs);
-		}
-		if (err_status & TXC_INT_STAT_SF_CORR_ERR) {
-			statsp->sf_correct_err++;
-			NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
-					NXGE_FM_EREPORT_TXC_SF_CORRECT_ERR);
-			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-				"nxge_txc_err_evnts: "
-				"SF FIFO correctable error"));
-		}
-		if (err_status & TXC_INT_STAT_SF_UNCORR_ERR) {
-			statsp->sf_uncorrect_err++;
-			NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
-					NXGE_FM_EREPORT_TXC_SF_UNCORRECT_ERR);
-			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-				"nxge_txc_err_evnts: "
-				"SF FIFO uncorrectable error"));
-		}
-		if ((rs = npi_txc_sf_ecc_state_clr(handle, portn))
-							!= NPI_SUCCESS)
-			return (NXGE_ERROR | rs);
-		/*
-		 * Making sure that error source is cleared if this is
-		 * an injected error.
-		 */
-		TXC_FZC_CNTL_REG_WRITE64(handle, TXC_SFECC_CTL_REG, portn, 0);
-	}
-
-	/* Clear corresponding errors */
-	switch (portn) {
-	case 0:
-		istatus.bits.ldw.port0_int_status = err_status;
-		break;
-	case 1:
-		istatus.bits.ldw.port1_int_status = err_status;
-		break;
-	case 2:
-		istatus.bits.ldw.port2_int_status = err_status;
-		break;
-	case 3:
-		istatus.bits.ldw.port3_int_status = err_status;
-		break;
-	default:
-		return (NXGE_ERROR);
-	}
-
-	npi_txc_global_istatus_clear(handle, istatus.value);
-
-	if (txport_fatal) {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-				" nxge_txc_handle_port_errors:"
-				" fatal Error on Port#%d\n",
-				portn));
-		status = nxge_tx_port_fatal_err_recover(nxgep);
-		if (status == NXGE_OK) {
-			FM_SERVICE_RESTORED(nxgep);
-		}
-	}
-
-	return (status);
-}
-
-void
-nxge_txc_inject_err(p_nxge_t nxgep, uint32_t err_id)
-{
-	txc_int_stat_dbg_t	txcs;
-	txc_roecc_ctl_t		ro_ecc_ctl;
-	txc_sfecc_ctl_t		sf_ecc_ctl;
-	uint8_t			portn = nxgep->mac.portnum;
-
-	cmn_err(CE_NOTE, "!TXC error Inject\n");
-	switch (err_id) {
-	case NXGE_FM_EREPORT_TXC_RO_CORRECT_ERR:
-	case NXGE_FM_EREPORT_TXC_RO_UNCORRECT_ERR:
-		ro_ecc_ctl.value = 0;
-		ro_ecc_ctl.bits.ldw.all_pkts = 1;
-		ro_ecc_ctl.bits.ldw.second_line_pkt = 1;
-		if (err_id == NXGE_FM_EREPORT_TXC_RO_CORRECT_ERR)
-			ro_ecc_ctl.bits.ldw.single_bit_err = 1;
-		else
-			ro_ecc_ctl.bits.ldw.double_bit_err = 1;
-		cmn_err(CE_NOTE, "!Write 0x%lx to TXC_ROECC_CTL_REG\n",
-					ro_ecc_ctl.value);
-		TXC_FZC_CNTL_REG_WRITE64(nxgep->npi_handle, TXC_ROECC_CTL_REG,
-					portn, ro_ecc_ctl.value);
-		break;
-	case NXGE_FM_EREPORT_TXC_SF_CORRECT_ERR:
-	case NXGE_FM_EREPORT_TXC_SF_UNCORRECT_ERR:
-		sf_ecc_ctl.value = 0;
-		sf_ecc_ctl.bits.ldw.all_pkts = 1;
-		sf_ecc_ctl.bits.ldw.second_line_pkt = 1;
-		if (err_id == NXGE_FM_EREPORT_TXC_SF_CORRECT_ERR)
-			sf_ecc_ctl.bits.ldw.single_bit_err = 1;
-		else
-			sf_ecc_ctl.bits.ldw.double_bit_err = 1;
-		cmn_err(CE_NOTE, "!Write 0x%lx to TXC_SFECC_CTL_REG\n",
-					sf_ecc_ctl.value);
-		TXC_FZC_CNTL_REG_WRITE64(nxgep->npi_handle, TXC_SFECC_CTL_REG,
-					portn, sf_ecc_ctl.value);
-		break;
-	case NXGE_FM_EREPORT_TXC_REORDER_ERR:
-		NXGE_REG_RD64(nxgep->npi_handle, TXC_INT_STAT_DBG_REG,
-					&txcs.value);
-		nxge_txc_inject_port_err(portn, &txcs,
-						TXC_INT_STAT_REORDER_ERR);
-		cmn_err(CE_NOTE, "!Write 0x%lx to TXC_INT_STAT_DBG_REG\n",
-					txcs.value);
-		NXGE_REG_WR64(nxgep->npi_handle, TXC_INT_STAT_DBG_REG,
-					txcs.value);
-		break;
-	default:
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-				"nxge_txc_inject_err: Unknown err_id"));
-	}
-}
-
-static void
-nxge_txc_inject_port_err(uint8_t portn, txc_int_stat_dbg_t *txcs,
-				uint8_t istats)
-{
-	switch (portn) {
-	case 0:
-		txcs->bits.ldw.port0_int_status |= istats;
-		break;
-	case 1:
-		txcs->bits.ldw.port1_int_status |= istats;
-		break;
-	case 2:
-		txcs->bits.ldw.port2_int_status |= istats;
-		break;
-	case 3:
-		txcs->bits.ldw.port3_int_status |= istats;
-		break;
-	default:
-		;
-	}
-}
--- a/usr/src/uts/sun4v/io/nxge/nxge_txdma.c	Mon Mar 19 18:02:35 2007 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,3263 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License (the "License").
- * You may not use this file except in compliance with the License.
- *
- * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
- * or http://www.opensolaris.org/os/licensing.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information: Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- */
-/*
- * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
- * Use is subject to license terms.
- */
-
-#pragma ident	"%Z%%M%	%I%	%E% SMI"
-
-#include <sys/nxge/nxge_impl.h>
-#include <sys/nxge/nxge_txdma.h>
-#include <sys/llc1.h>
-
-uint32_t 	nxge_reclaim_pending = TXDMA_RECLAIM_PENDING_DEFAULT;
-uint32_t	nxge_tx_minfree = 32;
-uint32_t	nxge_tx_intr_thres = 0;
-uint32_t	nxge_tx_max_gathers = TX_MAX_GATHER_POINTERS;
-uint32_t	nxge_tx_tiny_pack = 1;
-uint32_t	nxge_tx_use_bcopy = 1;
-
-extern uint32_t 	nxge_tx_ring_size;
-extern uint32_t 	nxge_bcopy_thresh;
-extern uint32_t 	nxge_dvma_thresh;
-extern uint32_t 	nxge_dma_stream_thresh;
-extern dma_method_t 	nxge_force_dma;
-
-/* Device register access attributes for PIO.  */
-extern ddi_device_acc_attr_t nxge_dev_reg_acc_attr;
-/* Device descriptor access attributes for DMA.  */
-extern ddi_device_acc_attr_t nxge_dev_desc_dma_acc_attr;
-/* Device buffer access attributes for DMA.  */
-extern ddi_device_acc_attr_t nxge_dev_buf_dma_acc_attr;
-extern ddi_dma_attr_t nxge_desc_dma_attr;
-extern ddi_dma_attr_t nxge_tx_dma_attr;
-
-static nxge_status_t nxge_map_txdma(p_nxge_t);
-static void nxge_unmap_txdma(p_nxge_t);
-
-static nxge_status_t nxge_txdma_hw_start(p_nxge_t);
-static void nxge_txdma_hw_stop(p_nxge_t);
-
-static nxge_status_t nxge_map_txdma_channel(p_nxge_t, uint16_t,
-	p_nxge_dma_common_t *, p_tx_ring_t *,
-	uint32_t, p_nxge_dma_common_t *,
-	p_tx_mbox_t *);
-static void nxge_unmap_txdma_channel(p_nxge_t, uint16_t,
-	p_tx_ring_t, p_tx_mbox_t);
-
-static nxge_status_t nxge_map_txdma_channel_buf_ring(p_nxge_t, uint16_t,
-	p_nxge_dma_common_t *, p_tx_ring_t *, uint32_t);
-static void nxge_unmap_txdma_channel_buf_ring(p_nxge_t, p_tx_ring_t);
-
-static void nxge_map_txdma_channel_cfg_ring(p_nxge_t, uint16_t,
-	p_nxge_dma_common_t *, p_tx_ring_t,
-	p_tx_mbox_t *);
-static void nxge_unmap_txdma_channel_cfg_ring(p_nxge_t,
-	p_tx_ring_t, p_tx_mbox_t);
-
-static nxge_status_t nxge_txdma_start_channel(p_nxge_t, uint16_t,
-    p_tx_ring_t, p_tx_mbox_t);
-static nxge_status_t nxge_txdma_stop_channel(p_nxge_t, uint16_t,
-	p_tx_ring_t, p_tx_mbox_t);
-
-static p_tx_ring_t nxge_txdma_get_ring(p_nxge_t, uint16_t);
-static nxge_status_t nxge_tx_err_evnts(p_nxge_t, uint_t,
-	p_nxge_ldv_t, tx_cs_t);
-static p_tx_mbox_t nxge_txdma_get_mbox(p_nxge_t, uint16_t);
-static nxge_status_t nxge_txdma_fatal_err_recover(p_nxge_t,
-	uint16_t, p_tx_ring_t);
-
-nxge_status_t
-nxge_init_txdma_channels(p_nxge_t nxgep)
-{
-	nxge_status_t		status = NXGE_OK;
-
-	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_init_txdma_channels"));
-
-	status = nxge_map_txdma(nxgep);
-	if (status != NXGE_OK) {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"<== nxge_init_txdma_channels: status 0x%x", status));
-		return (status);
-	}
-
-	status = nxge_txdma_hw_start(nxgep);
-	if (status != NXGE_OK) {
-		nxge_unmap_txdma(nxgep);
-		return (status);
-	}
-
-	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
-		"<== nxge_init_txdma_channels: status 0x%x", status));
-
-	return (NXGE_OK);
-}
-
-void
-nxge_uninit_txdma_channels(p_nxge_t nxgep)
-{
-	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_uninit_txdma_channels"));
-
-	nxge_txdma_hw_stop(nxgep);
-	nxge_unmap_txdma(nxgep);
-
-	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
-		"<== nxge_uinit_txdma_channels"));
-}
-
-void
-nxge_setup_dma_common(p_nxge_dma_common_t dest_p, p_nxge_dma_common_t src_p,
-	uint32_t entries, uint32_t size)
-{
-	size_t		tsize;
-	*dest_p = *src_p;
-	tsize = size * entries;
-	dest_p->alength = tsize;
-	dest_p->nblocks = entries;
-	dest_p->block_size = size;
-	dest_p->offset += tsize;
-
-	src_p->kaddrp = (caddr_t)dest_p->kaddrp + tsize;
-	src_p->alength -= tsize;
-	src_p->dma_cookie.dmac_laddress += tsize;
-	src_p->dma_cookie.dmac_size -= tsize;
-}
-
-nxge_status_t
-nxge_reset_txdma_channel(p_nxge_t nxgep, uint16_t channel, uint64_t reg_data)
-{
-	npi_status_t		rs = NPI_SUCCESS;
-	nxge_status_t		status = NXGE_OK;
-	npi_handle_t		handle;
-
-	NXGE_DEBUG_MSG((nxgep, TX_CTL, " ==> nxge_reset_txdma_channel"));
-
-	handle = NXGE_DEV_NPI_HANDLE(nxgep);
-	if ((reg_data & TX_CS_RST_MASK) == TX_CS_RST_MASK) {
-		rs = npi_txdma_channel_reset(handle, channel);
-	} else {
-		rs = npi_txdma_channel_control(handle, TXDMA_RESET,
-				channel);
-	}
-
-	if (rs != NPI_SUCCESS) {
-		status = NXGE_ERROR | rs;
-	}
-
-	/*
-	 * Reset the tail (kick) register to 0.
-	 * (Hardware will not reset it. Tx overflow fatal
-	 * error if tail is not set to 0 after reset!
-	 */
-	TXDMA_REG_WRITE64(handle, TX_RING_KICK_REG, channel, 0);
-
-	NXGE_DEBUG_MSG((nxgep, TX_CTL, " <== nxge_reset_txdma_channel"));
-	return (status);
-}
-
-nxge_status_t
-nxge_init_txdma_channel_event_mask(p_nxge_t nxgep, uint16_t channel,
-		p_tx_dma_ent_msk_t mask_p)
-{
-	npi_handle_t		handle;
-	npi_status_t		rs = NPI_SUCCESS;
-	nxge_status_t		status = NXGE_OK;
-
-	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
-		"<== nxge_init_txdma_channel_event_mask"));
-
-	handle = NXGE_DEV_NPI_HANDLE(nxgep);
-	rs = npi_txdma_event_mask(handle, OP_SET, channel, mask_p);
-	if (rs != NPI_SUCCESS) {
-		status = NXGE_ERROR | rs;
-	}
-
-	return (status);
-}
-
-nxge_status_t
-nxge_init_txdma_channel_cntl_stat(p_nxge_t nxgep, uint16_t channel,
-	uint64_t reg_data)
-{
-	npi_handle_t		handle;
-	npi_status_t		rs = NPI_SUCCESS;
-	nxge_status_t		status = NXGE_OK;
-
-	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
-		"<== nxge_init_txdma_channel_cntl_stat"));
-
-	handle = NXGE_DEV_NPI_HANDLE(nxgep);
-	rs = npi_txdma_control_status(handle, OP_SET, channel,
-			(p_tx_cs_t)&reg_data);
-
-	if (rs != NPI_SUCCESS) {
-		status = NXGE_ERROR | rs;
-	}
-
-	return (status);
-}
-
-nxge_status_t
-nxge_enable_txdma_channel(p_nxge_t nxgep,
-	uint16_t channel, p_tx_ring_t tx_desc_p, p_tx_mbox_t mbox_p)
-{
-	npi_handle_t		handle;
-	npi_status_t		rs = NPI_SUCCESS;
-	nxge_status_t		status = NXGE_OK;
-
-	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_enable_txdma_channel"));
-
-	handle = NXGE_DEV_NPI_HANDLE(nxgep);
-	/*
-	 * Use configuration data composed at init time.
-	 * Write to hardware the transmit ring configurations.
-	 */
-	rs = npi_txdma_ring_config(handle, OP_SET, channel,
-			(uint64_t *)&(tx_desc_p->tx_ring_cfig.value));
-
-	if (rs != NPI_SUCCESS) {
-		return (NXGE_ERROR | rs);
-	}
-
-	/* Write to hardware the mailbox */
-	rs = npi_txdma_mbox_config(handle, OP_SET, channel,
-		(uint64_t *)&mbox_p->tx_mbox.dma_cookie.dmac_laddress);
-
-	if (rs != NPI_SUCCESS) {
-		return (NXGE_ERROR | rs);
-	}
-
-	/* Start the DMA engine. */
-	rs = npi_txdma_channel_init_enable(handle, channel);
-
-	if (rs != NPI_SUCCESS) {
-		return (NXGE_ERROR | rs);
-	}
-
-	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_enable_txdma_channel"));
-
-	return (status);
-}
-
-void
-nxge_fill_tx_hdr(p_mblk_t mp, boolean_t fill_len,
-		boolean_t l4_cksum, int pkt_len, uint8_t npads,
-		p_tx_pkt_hdr_all_t pkthdrp)
-{
-	p_tx_pkt_header_t	hdrp;
-	p_mblk_t 		nmp;
-	uint64_t		tmp;
-	size_t 			mblk_len;
-	size_t 			iph_len;
-	size_t 			hdrs_size;
-	uint8_t			hdrs_buf[sizeof (struct ether_header) +
-					64 + sizeof (uint32_t)];
-	uint8_t 		*ip_buf;
-	uint16_t		eth_type;
-	uint8_t			ipproto;
-	boolean_t		is_vlan = B_FALSE;
-	size_t			eth_hdr_size;
-
-	NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: mp $%p", mp));
-
-	/*
-	 * Caller should zero out the headers first.
-	 */
-	hdrp = (p_tx_pkt_header_t)&pkthdrp->pkthdr;
-
-	if (fill_len) {
-		NXGE_DEBUG_MSG((NULL, TX_CTL,
-			"==> nxge_fill_tx_hdr: pkt_len %d "
-			"npads %d", pkt_len, npads));
-		tmp = (uint64_t)pkt_len;
-		hdrp->value |= (tmp << TX_PKT_HEADER_TOT_XFER_LEN_SHIFT);
-		goto fill_tx_header_done;
-	}
-
-	tmp = (uint64_t)npads;
-	hdrp->value |= (tmp << TX_PKT_HEADER_PAD_SHIFT);
-
-	/*
-	 * mp is the original data packet (does not include the
-	 * Neptune transmit header).
-	 */
-	nmp = mp;
-	mblk_len = (size_t)nmp->b_wptr - (size_t)nmp->b_rptr;
-	NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: "
-		"mp $%p b_rptr $%p len %d",
-		mp, nmp->b_rptr, mblk_len));
-	ip_buf = NULL;
-	bcopy(nmp->b_rptr, &hdrs_buf[0], sizeof (struct ether_vlan_header));
-	eth_type = ntohs(((p_ether_header_t)hdrs_buf)->ether_type);
-	NXGE_DEBUG_MSG((NULL, TX_CTL, "==> : nxge_fill_tx_hdr: (value 0x%llx) "
-		"ether type 0x%x", eth_type, hdrp->value));
-
-	if (eth_type < ETHERMTU) {
-		tmp = 1ull;
-		hdrp->value |= (tmp << TX_PKT_HEADER_LLC_SHIFT);
-		NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: LLC "
-			"value 0x%llx", hdrp->value));
-		if (*(hdrs_buf + sizeof (struct ether_header))
-				== LLC_SNAP_SAP) {
-			eth_type = ntohs(*((uint16_t *)(hdrs_buf +
-					sizeof (struct ether_header) + 6)));
-			NXGE_DEBUG_MSG((NULL, TX_CTL,
-				"==> nxge_tx_pkt_hdr_init: LLC ether type 0x%x",
-				eth_type));
-		} else {
-			goto fill_tx_header_done;
-		}
-	} else if (eth_type == VLAN_ETHERTYPE) {
-		tmp = 1ull;
-		hdrp->value |= (tmp << TX_PKT_HEADER_VLAN__SHIFT);
-
-		eth_type = ntohs(((struct ether_vlan_header *)
-			hdrs_buf)->ether_type);
-		is_vlan = B_TRUE;
-		NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: VLAN "
-			"value 0x%llx", hdrp->value));
-	}
-
-	if (!is_vlan) {
-		eth_hdr_size = sizeof (struct ether_header);
-	} else {
-		eth_hdr_size = sizeof (struct ether_vlan_header);
-	}
-
-	switch (eth_type) {
-	case ETHERTYPE_IP:
-		if (mblk_len > eth_hdr_size + sizeof (uint8_t)) {
-			ip_buf = nmp->b_rptr + eth_hdr_size;
-			mblk_len -= eth_hdr_size;
-			iph_len = ((*ip_buf) & 0x0f);
-			if (mblk_len > (iph_len + sizeof (uint32_t))) {
-				ip_buf = nmp->b_rptr;
-				ip_buf += eth_hdr_size;
-			} else {
-				ip_buf = NULL;
-			}
-
-		}
-		if (ip_buf == NULL) {
-			hdrs_size = 0;
-			((p_ether_header_t)hdrs_buf)->ether_type = 0;
-			while ((nmp) && (hdrs_size <
-					sizeof (hdrs_buf))) {
-				mblk_len = (size_t)nmp->b_wptr -
-					(size_t)nmp->b_rptr;
-				if (mblk_len >=
-					(sizeof (hdrs_buf) - hdrs_size))
-					mblk_len = sizeof (hdrs_buf) -
-						hdrs_size;
-				bcopy(nmp->b_rptr,
-					&hdrs_buf[hdrs_size], mblk_len);
-				hdrs_size += mblk_len;
-				nmp = nmp->b_cont;
-			}
-			ip_buf = hdrs_buf;
-			ip_buf += eth_hdr_size;
-			iph_len = ((*ip_buf) & 0x0f);
-		}
-
-		ipproto = ip_buf[9];
-
-		tmp = (uint64_t)iph_len;
-		hdrp->value |= (tmp << TX_PKT_HEADER_IHL_SHIFT);
-		tmp = (uint64_t)(eth_hdr_size >> 1);
-		hdrp->value |= (tmp << TX_PKT_HEADER_L3START_SHIFT);
-
-		NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: IPv4 "
-			" iph_len %d l3start %d eth_hdr_size %d proto 0x%x"
-			"tmp 0x%x",
-			iph_len, hdrp->bits.hdw.l3start, eth_hdr_size,
-			ipproto, tmp));
-		NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: IP "
-			"value 0x%llx", hdrp->value));
-
-		break;
-
-	case ETHERTYPE_IPV6:
-		hdrs_size = 0;
-		((p_ether_header_t)hdrs_buf)->ether_type = 0;
-		while ((nmp) && (hdrs_size <
-				sizeof (hdrs_buf))) {
-			mblk_len = (size_t)nmp->b_wptr - (size_t)nmp->b_rptr;
-			if (mblk_len >=
-				(sizeof (hdrs_buf) - hdrs_size))
-				mblk_len = sizeof (hdrs_buf) -
-					hdrs_size;
-			bcopy(nmp->b_rptr,
-				&hdrs_buf[hdrs_size], mblk_len);
-			hdrs_size += mblk_len;
-			nmp = nmp->b_cont;
-		}
-		ip_buf = hdrs_buf;
-		ip_buf += eth_hdr_size;
-
-		tmp = 1ull;
-		hdrp->value |= (tmp << TX_PKT_HEADER_IP_VER_SHIFT);
-
-		tmp = (eth_hdr_size >> 1);
-		hdrp->value |= (tmp << TX_PKT_HEADER_L3START_SHIFT);
-
-		/* byte 6 is the next header protocol */
-		ipproto = ip_buf[6];
-
-		NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: IPv6 "
-			" iph_len %d l3start %d eth_hdr_size %d proto 0x%x",
-			iph_len, hdrp->bits.hdw.l3start, eth_hdr_size,
-			ipproto));
-		NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: IPv6 "
-			"value 0x%llx", hdrp->value));
-
-		break;
-
-	default:
-		NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: non-IP"));
-		goto fill_tx_header_done;
-	}
-
-	switch (ipproto) {
-	case IPPROTO_TCP:
-		NXGE_DEBUG_MSG((NULL, TX_CTL,
-			"==> nxge_fill_tx_hdr: TCP (cksum flag %d)", l4_cksum));
-		if (l4_cksum) {
-			tmp = 1ull;
-			hdrp->value |= (tmp << TX_PKT_HEADER_PKT_TYPE_SHIFT);
-			NXGE_DEBUG_MSG((NULL, TX_CTL,
-				"==> nxge_tx_pkt_hdr_init: TCP CKSUM"
-				"value 0x%llx", hdrp->value));
-		}
-
-		NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: TCP "
-			"value 0x%llx", hdrp->value));
-		break;
-
-	case IPPROTO_UDP:
-		NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: UDP"));
-		if (l4_cksum) {
-			tmp = 0x2ull;
-			hdrp->value |= (tmp << TX_PKT_HEADER_PKT_TYPE_SHIFT);
-		}
-		NXGE_DEBUG_MSG((NULL, TX_CTL,
-			"==> nxge_tx_pkt_hdr_init: UDP"
-			"value 0x%llx", hdrp->value));
-		break;
-
-	default:
-		goto fill_tx_header_done;
-	}
-
-fill_tx_header_done:
-	NXGE_DEBUG_MSG((NULL, TX_CTL,
-		"==> nxge_fill_tx_hdr: pkt_len %d  "
-		"npads %d value 0x%llx", pkt_len, npads, hdrp->value));
-
-	NXGE_DEBUG_MSG((NULL, TX_CTL, "<== nxge_fill_tx_hdr"));
-}
-
-/*ARGSUSED*/
-p_mblk_t
-nxge_tx_pkt_header_reserve(p_mblk_t mp, uint8_t *npads)
-{
-	p_mblk_t 		newmp = NULL;
-
-	if ((newmp = allocb(TX_PKT_HEADER_SIZE, BPRI_MED)) == NULL) {
-		NXGE_DEBUG_MSG((NULL, TX_CTL,
-			"<== nxge_tx_pkt_header_reserve: allocb failed"));
-		return (NULL);
-	}
-
-	NXGE_DEBUG_MSG((NULL, TX_CTL,
-		"==> nxge_tx_pkt_header_reserve: get new mp"));
-	DB_TYPE(newmp) = M_DATA;
-	newmp->b_rptr = newmp->b_wptr = DB_LIM(newmp);
-	linkb(newmp, mp);
-	newmp->b_rptr -= TX_PKT_HEADER_SIZE;
-
-	NXGE_DEBUG_MSG((NULL, TX_CTL, "==>nxge_tx_pkt_header_reserve: "
-		"b_rptr $%p b_wptr $%p",
-		newmp->b_rptr, newmp->b_wptr));
-
-	NXGE_DEBUG_MSG((NULL, TX_CTL,
-		"<== nxge_tx_pkt_header_reserve: use new mp"));
-
-	return (newmp);
-}
-
-int
-nxge_tx_pkt_nmblocks(p_mblk_t mp, int *tot_xfer_len_p)
-{
-	uint_t 			nmblks;
-	ssize_t			len;
-	uint_t 			pkt_len;
-	p_mblk_t 		nmp, bmp, tmp;
-	uint8_t 		*b_wptr;
-
-	NXGE_DEBUG_MSG((NULL, TX_CTL,
-		"==> nxge_tx_pkt_nmblocks: mp $%p rptr $%p wptr $%p "
-		"len %d", mp, mp->b_rptr, mp->b_wptr, MBLKL(mp)));
-
-	nmp = mp;
-	bmp = mp;
-	nmblks = 0;
-	pkt_len = 0;
-	*tot_xfer_len_p = 0;
-
-	while (nmp) {
-		len = MBLKL(nmp);
-		NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_nmblocks: "
-			"len %d pkt_len %d nmblks %d tot_xfer_len %d",
-			len, pkt_len, nmblks,
-			*tot_xfer_len_p));
-
-		if (len <= 0) {
-			bmp = nmp;
-			nmp = nmp->b_cont;
-			NXGE_DEBUG_MSG((NULL, TX_CTL,
-				"==> nxge_tx_pkt_nmblocks: "
-				"len (0) pkt_len %d nmblks %d",
-				pkt_len, nmblks));
-			continue;
-		}
-
-		*tot_xfer_len_p += len;
-		NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_nmblocks: "
-			"len %d pkt_len %d nmblks %d tot_xfer_len %d",
-			len, pkt_len, nmblks,
-			*tot_xfer_len_p));
-
-		if (len < nxge_bcopy_thresh) {
-			NXGE_DEBUG_MSG((NULL, TX_CTL,
-				"==> nxge_tx_pkt_nmblocks: "
-				"len %d (< thresh) pkt_len %d nmblks %d",
-				len, pkt_len, nmblks));
-			if (pkt_len == 0)
-				nmblks++;
-			pkt_len += len;
-			if (pkt_len >= nxge_bcopy_thresh) {
-				pkt_len = 0;
-				len = 0;
-				nmp = bmp;
-			}
-		} else {
-			NXGE_DEBUG_MSG((NULL, TX_CTL,
-				"==> nxge_tx_pkt_nmblocks: "
-				"len %d (> thresh) pkt_len %d nmblks %d",
-				len, pkt_len, nmblks));
-			pkt_len = 0;
-			nmblks++;
-			/*
-			 * Hardware limits the transfer length to 4K.
-			 * If len is more than 4K, we need to break
-			 * it up to at most 2 more blocks.
-			 */
-			if (len > TX_MAX_TRANSFER_LENGTH) {
-				uint32_t	nsegs;
-
-				NXGE_DEBUG_MSG((NULL, TX_CTL,
-					"==> nxge_tx_pkt_nmblocks: "
-					"len %d pkt_len %d nmblks %d nsegs %d",
-					len, pkt_len, nmblks, nsegs));
-				nsegs = 1;
-				if (len % (TX_MAX_TRANSFER_LENGTH * 2)) {
-					++nsegs;
-				}
-				do {
-					b_wptr = nmp->b_rptr +
-						TX_MAX_TRANSFER_LENGTH;
-					nmp->b_wptr = b_wptr;
-					if ((tmp = dupb(nmp)) == NULL) {
-						return (0);
-					}
-					tmp->b_rptr = b_wptr;
-					tmp->b_wptr = nmp->b_wptr;
-					tmp->b_cont = nmp->b_cont;
-					nmp->b_cont = tmp;
-					nmblks++;
-					if (--nsegs) {
-						nmp = tmp;
-					}
-				} while (nsegs);
-				nmp = tmp;
-			}
-		}
-
-		/*
-		 * Hardware limits the transmit gather pointers to 15.
-		 */
-		if (nmp->b_cont && (nmblks + TX_GATHER_POINTERS_THRESHOLD) >
-				TX_MAX_GATHER_POINTERS) {
-			NXGE_DEBUG_MSG((NULL, TX_CTL,
-				"==> nxge_tx_pkt_nmblocks: pull msg - "
-				"len %d pkt_len %d nmblks %d",
-				len, pkt_len, nmblks));
-			/* Pull all message blocks from b_cont */
-			if ((tmp = msgpullup(nmp->b_cont, -1)) == NULL) {
-				return (0);
-			}
-			freemsg(nmp->b_cont);
-			nmp->b_cont = tmp;
-			pkt_len = 0;
-		}
-		bmp = nmp;
-		nmp = nmp->b_cont;
-	}
-
-	NXGE_DEBUG_MSG((NULL, TX_CTL,
-		"<== nxge_tx_pkt_nmblocks: rptr $%p wptr $%p "
-		"nmblks %d len %d tot_xfer_len %d",
-		mp->b_rptr, mp->b_wptr, nmblks,
-		MBLKL(mp), *tot_xfer_len_p));
-
-	return (nmblks);
-}
-
-boolean_t
-nxge_txdma_reclaim(p_nxge_t nxgep, p_tx_ring_t tx_ring_p, int nmblks)
-{
-	boolean_t 		status = B_TRUE;
-	p_nxge_dma_common_t	tx_desc_dma_p;
-	nxge_dma_common_t	desc_area;
-	p_tx_desc_t 		tx_desc_ring_vp;
-	p_tx_desc_t 		tx_desc_p;
-	p_tx_desc_t 		tx_desc_pp;
-	tx_desc_t 		r_tx_desc;
-	p_tx_msg_t 		tx_msg_ring;
-	p_tx_msg_t 		tx_msg_p;
-	npi_handle_t		handle;
-	tx_ring_hdl_t		tx_head;
-	uint32_t 		pkt_len;
-	uint_t			tx_rd_index;
-	uint16_t		head_index, tail_index;
-	uint8_t			tdc;
-	boolean_t		head_wrap, tail_wrap;
-	p_nxge_tx_ring_stats_t tdc_stats;
-	int			rc;
-
-	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_reclaim"));
-
-	status = ((tx_ring_p->descs_pending < nxge_reclaim_pending) &&
-			(nmblks != 0));
-	NXGE_DEBUG_MSG((nxgep, TX_CTL,
-		"==> nxge_txdma_reclaim: pending %d  reclaim %d nmblks %d",
-			tx_ring_p->descs_pending, nxge_reclaim_pending,
-			nmblks));
-	if (!status) {
-		tx_desc_dma_p = &tx_ring_p->tdc_desc;
-		desc_area = tx_ring_p->tdc_desc;
-		handle = NXGE_DEV_NPI_HANDLE(nxgep);
-		tx_desc_ring_vp = tx_desc_dma_p->kaddrp;
-		tx_desc_ring_vp =
-			(p_tx_desc_t)DMA_COMMON_VPTR(desc_area);
-		tx_rd_index = tx_ring_p->rd_index;
-		tx_desc_p = &tx_desc_ring_vp[tx_rd_index];
-		tx_msg_ring = tx_ring_p->tx_msg_ring;
-		tx_msg_p = &tx_msg_ring[tx_rd_index];
-		tdc = tx_ring_p->tdc;
-		tdc_stats = tx_ring_p->tdc_stats;
-		if (tx_ring_p->descs_pending > tdc_stats->tx_max_pend) {
-			tdc_stats->tx_max_pend = tx_ring_p->descs_pending;
-		}
-
-		tail_index = tx_ring_p->wr_index;
-		tail_wrap = tx_ring_p->wr_index_wrap;
-
-		NXGE_DEBUG_MSG((nxgep, TX_CTL,
-			"==> nxge_txdma_reclaim: tdc %d tx_rd_index %d "
-			"tail_index %d tail_wrap %d "
-			"tx_desc_p $%p ($%p) ",
-			tdc, tx_rd_index, tail_index, tail_wrap,
-			tx_desc_p, (*(uint64_t *)tx_desc_p)));
-		/*
-		 * Read the hardware maintained transmit head
-		 * and wrap around bit.
-		 */
-		TXDMA_REG_READ64(handle, TX_RING_HDL_REG, tdc, &tx_head.value);
-		head_index =  tx_head.bits.ldw.head;
-		head_wrap = tx_head.bits.ldw.wrap;
-		NXGE_DEBUG_MSG((nxgep, TX_CTL,
-			"==> nxge_txdma_reclaim: "
-			"tx_rd_index %d tail %d tail_wrap %d "
-			"head %d wrap %d",
-			tx_rd_index, tail_index, tail_wrap,
-			head_index, head_wrap));
-
-		if (head_index == tail_index) {
-			if (TXDMA_RING_EMPTY(head_index, head_wrap,
-					tail_index, tail_wrap) &&
-					(head_index == tx_rd_index)) {
-				NXGE_DEBUG_MSG((nxgep, TX_CTL,
-					"==> nxge_txdma_reclaim: EMPTY"));
-				return (B_TRUE);
-			}
-
-			NXGE_DEBUG_MSG((nxgep, TX_CTL,
-				"==> nxge_txdma_reclaim: Checking "
-					"if ring full"));
-			if (TXDMA_RING_FULL(head_index, head_wrap, tail_index,
-					tail_wrap)) {
-				NXGE_DEBUG_MSG((nxgep, TX_CTL,
-					"==> nxge_txdma_reclaim: full"));
-				return (B_FALSE);
-			}
-		}
-
-		NXGE_DEBUG_MSG((nxgep, TX_CTL,
-			"==> nxge_txdma_reclaim: tx_rd_index and head_index"));
-
-		tx_desc_pp = &r_tx_desc;
-		while ((tx_rd_index != head_index) &&
-			(tx_ring_p->descs_pending != 0)) {
-
-			NXGE_DEBUG_MSG((nxgep, TX_CTL,
-				"==> nxge_txdma_reclaim: Checking if pending"));
-
-			NXGE_DEBUG_MSG((nxgep, TX_CTL,
-				"==> nxge_txdma_reclaim: "
-				"descs_pending %d ",
-				tx_ring_p->descs_pending));
-
-			NXGE_DEBUG_MSG((nxgep, TX_CTL,
-				"==> nxge_txdma_reclaim: "
-				"(tx_rd_index %d head_index %d "
-				"(tx_desc_p $%p)",
-				tx_rd_index, head_index,
-				tx_desc_p));
-
-			tx_desc_pp->value = tx_desc_p->value;
-			NXGE_DEBUG_MSG((nxgep, TX_CTL,
-				"==> nxge_txdma_reclaim: "
-				"(tx_rd_index %d head_index %d "
-				"tx_desc_p $%p (desc value 0x%llx) ",
-				tx_rd_index, head_index,
-				tx_desc_pp, (*(uint64_t *)tx_desc_pp)));
-
-			NXGE_DEBUG_MSG((nxgep, TX_CTL,
-				"==> nxge_txdma_reclaim: dump desc:"));
-
-			pkt_len = tx_desc_pp->bits.hdw.tr_len;
-			tdc_stats->obytes += pkt_len;
-			tdc_stats->opackets += tx_desc_pp->bits.hdw.sop;
-			NXGE_DEBUG_MSG((nxgep, TX_CTL,
-				"==> nxge_txdma_reclaim: pkt_len %d "
-				"tdc channel %d opackets %d",
-				pkt_len,
-				tdc,
-				tdc_stats->opackets));
-
-			if (tx_msg_p->flags.dma_type == USE_DVMA) {
-				NXGE_DEBUG_MSG((nxgep, TX_CTL,
-					"tx_desc_p = $%p "
-					"tx_desc_pp = $%p "
-					"index = %d",
-					tx_desc_p,
-					tx_desc_pp,
-					tx_ring_p->rd_index));
-				(void) dvma_unload(tx_msg_p->dvma_handle,
-					0, -1);
-				tx_msg_p->dvma_handle = NULL;
-				if (tx_ring_p->dvma_wr_index ==
-					tx_ring_p->dvma_wrap_mask) {
-					tx_ring_p->dvma_wr_index = 0;
-				} else {
-					tx_ring_p->dvma_wr_index++;
-				}
-				tx_ring_p->dvma_pending--;
-			} else if (tx_msg_p->flags.dma_type ==
-					USE_DMA) {
-				NXGE_DEBUG_MSG((nxgep, TX_CTL,
-					"==> nxge_txdma_reclaim: "
-					"USE DMA"));
-				if (rc = ddi_dma_unbind_handle
-					(tx_msg_p->dma_handle)) {
-					cmn_err(CE_WARN, "!nxge_reclaim: "
-						"ddi_dma_unbind_handle "
-						"failed. status %d", rc);
-				}
-			}
-			NXGE_DEBUG_MSG((nxgep, TX_CTL,
-				"==> nxge_txdma_reclaim: count packets"));
-			/*
-			 * count a chained packet only once.
-			 */
-			if (tx_msg_p->tx_message != NULL) {
-				freemsg(tx_msg_p->tx_message);
-				tx_msg_p->tx_message = NULL;
-			}
-
-			tx_msg_p->flags.dma_type = USE_NONE;
-			tx_rd_index = tx_ring_p->rd_index;
-			tx_rd_index = (tx_rd_index + 1) &
-					tx_ring_p->tx_wrap_mask;
-			tx_ring_p->rd_index = tx_rd_index;
-			tx_ring_p->descs_pending--;
-			tx_desc_p = &tx_desc_ring_vp[tx_rd_index];
-			tx_msg_p = &tx_msg_ring[tx_rd_index];
-		}
-
-		status = (nmblks <= (tx_ring_p->tx_ring_size -
-				tx_ring_p->descs_pending -
-				TX_FULL_MARK));
-		if (status) {
-			cas32((uint32_t *)&tx_ring_p->queueing, 1, 0);
-		}
-	} else {
-		status = (nmblks <=
-			(tx_ring_p->tx_ring_size -
-				tx_ring_p->descs_pending -
-				TX_FULL_MARK));
-	}
-
-	NXGE_DEBUG_MSG((nxgep, TX_CTL,
-		"<== nxge_txdma_reclaim status = 0x%08x", status));
-
-	return (status);
-}
-
-uint_t
-nxge_tx_intr(void *arg1, void *arg2)
-{
-	p_nxge_ldv_t		ldvp = (p_nxge_ldv_t)arg1;
-	p_nxge_t		nxgep = (p_nxge_t)arg2;
-	p_nxge_ldg_t		ldgp;
-	uint8_t			channel;
-	uint32_t		vindex;
-	npi_handle_t		handle;
-	tx_cs_t			cs;
-	p_tx_ring_t 		*tx_rings;
-	p_tx_ring_t 		tx_ring_p;
-	npi_status_t		rs = NPI_SUCCESS;
-	uint_t 			serviced = DDI_INTR_UNCLAIMED;
-	nxge_status_t 		status = NXGE_OK;
-
-	if (ldvp == NULL) {
-		NXGE_DEBUG_MSG((NULL, INT_CTL,
-			"<== nxge_tx_intr: nxgep $%p ldvp $%p",
-			nxgep, ldvp));
-		return (DDI_INTR_UNCLAIMED);
-	}
-
-	if (arg2 == NULL || (void *)ldvp->nxgep != arg2) {
-		nxgep = ldvp->nxgep;
-	}
-	NXGE_DEBUG_MSG((nxgep, INT_CTL,
-		"==> nxge_tx_intr: nxgep(arg2) $%p ldvp(arg1) $%p",
-		nxgep, ldvp));
-	/*
-	 * This interrupt handler is for a specific
-	 * transmit dma channel.
-	 */
-	handle = NXGE_DEV_NPI_HANDLE(nxgep);
-	/* Get the control and status for this channel. */
-	channel = ldvp->channel;
-	ldgp = ldvp->ldgp;
-	NXGE_DEBUG_MSG((nxgep, INT_CTL,
-		"==> nxge_tx_intr: nxgep $%p ldvp (ldvp) $%p "
-		"channel %d",
-		nxgep, ldvp, channel));
-
-	rs = npi_txdma_control_status(handle, OP_GET, channel, &cs);
-	vindex = ldvp->vdma_index;
-	NXGE_DEBUG_MSG((nxgep, INT_CTL,
-		"==> nxge_tx_intr:channel %d ring index %d status 0x%08x",
-		channel, vindex, rs));
-	if (!rs && cs.bits.ldw.mk) {
-		NXGE_DEBUG_MSG((nxgep, INT_CTL,
-			"==> nxge_tx_intr:channel %d ring index %d "
-			"status 0x%08x (mk bit set)",
-			channel, vindex, rs));
-		tx_rings = nxgep->tx_rings->rings;
-		tx_ring_p = tx_rings[vindex];
-		NXGE_DEBUG_MSG((nxgep, INT_CTL,
-			"==> nxge_tx_intr:channel %d ring index %d "
-			"status 0x%08x (mk bit set, calling reclaim)",
-			channel, vindex, rs));
-
-		MUTEX_ENTER(&tx_ring_p->lock);
-		(void) nxge_txdma_reclaim(nxgep, tx_rings[vindex], 0);
-		MUTEX_EXIT(&tx_ring_p->lock);
-		mac_tx_update(nxgep->mach);
-	}
-
-	/*
-	 * Process other transmit control and status.
-	 * Check the ldv state.
-	 */
-	status = nxge_tx_err_evnts(nxgep, ldvp->vdma_index, ldvp, cs);
-	/*
-	 * Rearm this logical group if this is a single device
-	 * group.
-	 */
-	if (ldgp->nldvs == 1) {
-		NXGE_DEBUG_MSG((nxgep, INT_CTL,
-			"==> nxge_tx_intr: rearm"));
-		if (status == NXGE_OK) {
-			(void) npi_intr_ldg_mgmt_set(handle, ldgp->ldg,
-				B_TRUE, ldgp->ldg_timer);
-		}
-	}
-
-	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_tx_intr"));
-	serviced = DDI_INTR_CLAIMED;
-	return (serviced);
-}
-
-void
-nxge_txdma_stop(p_nxge_t nxgep)
-{
-	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_stop"));
-
-	(void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP);
-
-	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_stop"));
-}
-
-void
-nxge_txdma_stop_start(p_nxge_t nxgep)
-{
-	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_stop_start"));
-
-	(void) nxge_txdma_stop(nxgep);
-
-	(void) nxge_fixup_txdma_rings(nxgep);
-	(void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_START);
-	(void) nxge_tx_mac_enable(nxgep);
-	(void) nxge_txdma_hw_kick(nxgep);
-
-	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_stop_start"));
-}
-
-nxge_status_t
-nxge_txdma_hw_mode(p_nxge_t nxgep, boolean_t enable)
-{
-	int			i, ndmas;
-	uint16_t		channel;
-	p_tx_rings_t 		tx_rings;
-	p_tx_ring_t 		*tx_desc_rings;
-	npi_handle_t		handle;
-	npi_status_t		rs = NPI_SUCCESS;
-	nxge_status_t		status = NXGE_OK;
-
-	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
-		"==> nxge_txdma_hw_mode: enable mode %d", enable));
-
-	if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
-		NXGE_DEBUG_MSG((nxgep, TX_CTL,
-			"<== nxge_txdma_mode: not initialized"));
-		return (NXGE_ERROR);
-	}
-
-	tx_rings = nxgep->tx_rings;
-	if (tx_rings == NULL) {
-		NXGE_DEBUG_MSG((nxgep, TX_CTL,
-			"<== nxge_txdma_hw_mode: NULL global ring pointer"));
-		return (NXGE_ERROR);
-	}
-
-	tx_desc_rings = tx_rings->rings;
-	if (tx_desc_rings == NULL) {
-		NXGE_DEBUG_MSG((nxgep, TX_CTL,
-			"<== nxge_txdma_hw_mode: NULL rings pointer"));
-		return (NXGE_ERROR);
-	}
-
-	ndmas = tx_rings->ndmas;
-	if (!ndmas) {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"<== nxge_txdma_hw_mode: no dma channel allocated"));
-		return (NXGE_ERROR);
-	}
-
-	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_mode: "
-		"tx_rings $%p tx_desc_rings $%p ndmas %d",
-		tx_rings, tx_desc_rings, ndmas));
-
-	handle = NXGE_DEV_NPI_HANDLE(nxgep);
-	for (i = 0; i < ndmas; i++) {
-		if (tx_desc_rings[i] == NULL) {
-			continue;
-		}
-		channel = tx_desc_rings[i]->tdc;
-		NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
-			"==> nxge_txdma_hw_mode: channel %d", channel));
-		if (enable) {
-			rs = npi_txdma_channel_enable(handle, channel);
-			NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
-				"==> nxge_txdma_hw_mode: channel %d (enable) "
-				"rs 0x%x", channel, rs));
-		} else {
-			/*
-			 * Stop the dma channel and waits for the stop done.
-			 * If the stop done bit is not set, then force
-			 * an error so TXC will stop.
-			 * All channels bound to this port need to be stopped
-			 * and reset after injecting an interrupt error.
-			 */
-			rs = npi_txdma_channel_disable(handle, channel);
-			NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
-				"==> nxge_txdma_hw_mode: channel %d (disable) "
-				"rs 0x%x", channel, rs));
-			{
-				tdmc_intr_dbg_t		intr_dbg;
-
-				if (rs != NPI_SUCCESS) {
-					/* Inject any error */
-					intr_dbg.value = 0;
-					intr_dbg.bits.ldw.nack_pref = 1;
-					NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
-						"==> nxge_txdma_hw_mode: "
-						"channel %d (stop failed 0x%x) "
-						"(inject err)", rs, channel));
-					(void) npi_txdma_inj_int_error_set(
-						handle, channel, &intr_dbg);
-					rs = npi_txdma_channel_disable(handle,
-						channel);
-					NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
-						"==> nxge_txdma_hw_mode: "
-						"channel %d (stop again 0x%x) "
-						"(after inject err)",
-						rs, channel));
-				}
-			}
-		}
-	}
-
-	status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs);
-
-	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
-		"<== nxge_txdma_hw_mode: status 0x%x", status));
-
-	return (status);
-}
-
-void
-nxge_txdma_enable_channel(p_nxge_t nxgep, uint16_t channel)
-{
-	npi_handle_t		handle;
-
-	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
-		"==> nxge_txdma_enable_channel: channel %d", channel));
-
-	handle = NXGE_DEV_NPI_HANDLE(nxgep);
-	/* enable the transmit dma channels */
-	(void) npi_txdma_channel_enable(handle, channel);
-
-	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_txdma_enable_channel"));
-}
-
-void
-nxge_txdma_disable_channel(p_nxge_t nxgep, uint16_t channel)
-{
-	npi_handle_t		handle;
-
-	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
-		"==> nxge_txdma_disable_channel: channel %d", channel));
-
-	handle = NXGE_DEV_NPI_HANDLE(nxgep);
-	/* stop the transmit dma channels */
-	(void) npi_txdma_channel_disable(handle, channel);
-
-	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_disable_channel"));
-}
-
-int
-nxge_txdma_stop_inj_err(p_nxge_t nxgep, int channel)
-{
-	npi_handle_t		handle;
-	tdmc_intr_dbg_t		intr_dbg;
-	int			status;
-	npi_status_t		rs = NPI_SUCCESS;
-
-	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_stop_inj_err"));
-	/*
-	 * Stop the dma channel waits for the stop done.
-	 * If the stop done bit is not set, then create
-	 * an error.
-	 */
-	handle = NXGE_DEV_NPI_HANDLE(nxgep);
-	rs = npi_txdma_channel_disable(handle, channel);
-	status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs);
-	if (status == NXGE_OK) {
-		NXGE_DEBUG_MSG((nxgep, TX_CTL,
-			"<== nxge_txdma_stop_inj_err (channel %d): "
-			"stopped OK", channel));
-		return (status);
-	}
-
-	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-		"==> nxge_txdma_stop_inj_err (channel %d): stop failed (0x%x) "
-		"injecting error", channel, rs));
-	/* Inject any error */
-	intr_dbg.value = 0;
-	intr_dbg.bits.ldw.nack_pref = 1;
-	(void) npi_txdma_inj_int_error_set(handle, channel, &intr_dbg);
-
-	/* Stop done bit will be set as a result of error injection */
-	rs = npi_txdma_channel_disable(handle, channel);
-	status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs);
-	if (!(rs & NPI_TXDMA_STOP_FAILED)) {
-		NXGE_DEBUG_MSG((nxgep, TX_CTL,
-			"<== nxge_txdma_stop_inj_err (channel %d): "
-			"stopped OK ", channel));
-		return (status);
-	}
-
-#if	defined(NXGE_DEBUG)
-	nxge_txdma_regs_dump_channels(nxgep);
-#endif
-	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-		"==> nxge_txdma_stop_inj_err (channel): stop failed (0x%x) "
-		" (injected error but still not stopped)", channel, rs));
-
-	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_stop_inj_err"));
-	return (status);
-}
-
-void
-nxge_hw_start_tx(p_nxge_t nxgep)
-{
-	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_hw_start_tx"));
-
-	(void) nxge_txdma_hw_start(nxgep);
-	(void) nxge_tx_mac_enable(nxgep);
-
-	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_hw_start_tx"));
-}
-
-/*ARGSUSED*/
-void
-nxge_fixup_txdma_rings(p_nxge_t nxgep)
-{
-	int			index, ndmas;
-	uint16_t		channel;
-	p_tx_rings_t 		tx_rings;
-
-	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_fixup_txdma_rings"));
-
-	/*
-	 * For each transmit channel, reclaim each descriptor and
-	 * free buffers.
-	 */
-	tx_rings = nxgep->tx_rings;
-	if (tx_rings == NULL) {
-		NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
-			"<== nxge_fixup_txdma_rings: NULL ring pointer"));
-		return;
-	}
-
-	ndmas = tx_rings->ndmas;
-	if (!ndmas) {
-		NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
-			"<== nxge_fixup_txdma_rings: no channel allocated"));
-		return;
-	}
-
-	if (tx_rings->rings == NULL) {
-		NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
-			"<== nxge_fixup_txdma_rings: NULL rings pointer"));
-		return;
-	}
-
-	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_fixup_txdma_rings: "
-		"tx_rings $%p tx_desc_rings $%p ndmas %d",
-		tx_rings, tx_rings->rings, ndmas));
-
-	for (index = 0; index < ndmas; index++) {
-		channel = tx_rings->rings[index]->tdc;
-		NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
-			"==> nxge_fixup_txdma_rings: channel %d", channel));
-
-		nxge_txdma_fixup_channel(nxgep, tx_rings->rings[index],
-			channel);
-	}
-
-	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_fixup_txdma_rings"));
-}
-
-/*ARGSUSED*/
-void
-nxge_txdma_fix_channel(p_nxge_t nxgep, uint16_t channel)
-{
-	p_tx_ring_t	ring_p;
-
-	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fix_channel"));
-	ring_p = nxge_txdma_get_ring(nxgep, channel);
-	if (ring_p == NULL) {
-		NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fix_channel"));
-		return;
-	}
-
-	if (ring_p->tdc != channel) {
-		NXGE_DEBUG_MSG((nxgep, TX_CTL,
-			"<== nxge_txdma_fix_channel: channel not matched "
-			"ring tdc %d passed channel",
-			ring_p->tdc, channel));
-		return;
-	}
-
-	nxge_txdma_fixup_channel(nxgep, ring_p, channel);
-
-	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fix_channel"));
-}
-
-/*ARGSUSED*/
-void
-nxge_txdma_fixup_channel(p_nxge_t nxgep, p_tx_ring_t ring_p, uint16_t channel)
-{
-	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fixup_channel"));
-
-	if (ring_p == NULL) {
-		NXGE_DEBUG_MSG((nxgep, TX_CTL,
-			"<== nxge_txdma_fixup_channel: NULL ring pointer"));
-		return;
-	}
-
-	if (ring_p->tdc != channel) {
-		NXGE_DEBUG_MSG((nxgep, TX_CTL,
-			"<== nxge_txdma_fixup_channel: channel not matched "
-			"ring tdc %d passed channel",
-			ring_p->tdc, channel));
-		return;
-	}
-
-	MUTEX_ENTER(&ring_p->lock);
-	(void) nxge_txdma_reclaim(nxgep, ring_p, 0);
-	ring_p->rd_index = 0;
-	ring_p->wr_index = 0;
-	ring_p->ring_head.value = 0;
-	ring_p->ring_kick_tail.value = 0;
-	ring_p->descs_pending = 0;
-	MUTEX_EXIT(&ring_p->lock);
-
-	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fixup_channel"));
-}
-
-/*ARGSUSED*/
-void
-nxge_txdma_hw_kick(p_nxge_t nxgep)
-{
-	int			index, ndmas;
-	uint16_t		channel;
-	p_tx_rings_t 		tx_rings;
-
-	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_hw_kick"));
-
-	tx_rings = nxgep->tx_rings;
-	if (tx_rings == NULL) {
-		NXGE_DEBUG_MSG((nxgep, TX_CTL,
-			"<== nxge_txdma_hw_kick: NULL ring pointer"));
-		return;
-	}
-
-	ndmas = tx_rings->ndmas;
-	if (!ndmas) {
-		NXGE_DEBUG_MSG((nxgep, TX_CTL,
-			"<== nxge_txdma_hw_kick: no channel allocated"));
-		return;
-	}
-
-	if (tx_rings->rings == NULL) {
-		NXGE_DEBUG_MSG((nxgep, TX_CTL,
-			"<== nxge_txdma_hw_kick: NULL rings pointer"));
-		return;
-	}
-
-	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_kick: "
-		"tx_rings $%p tx_desc_rings $%p ndmas %d",
-		tx_rings, tx_rings->rings, ndmas));
-
-	for (index = 0; index < ndmas; index++) {
-		channel = tx_rings->rings[index]->tdc;
-		NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
-			"==> nxge_txdma_hw_kick: channel %d", channel));
-		nxge_txdma_hw_kick_channel(nxgep, tx_rings->rings[index],
-			channel);
-	}
-
-	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_hw_kick"));
-}
-
-/*ARGSUSED*/
-void
-nxge_txdma_kick_channel(p_nxge_t nxgep, uint16_t channel)
-{
-	p_tx_ring_t	ring_p;
-
-	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_kick_channel"));
-
-	ring_p = nxge_txdma_get_ring(nxgep, channel);
-	if (ring_p == NULL) {
-		NXGE_DEBUG_MSG((nxgep, TX_CTL,
-			    " nxge_txdma_kick_channel"));
-		return;
-	}
-
-	if (ring_p->tdc != channel) {
-		NXGE_DEBUG_MSG((nxgep, TX_CTL,
-			"<== nxge_txdma_kick_channel: channel not matched "
-			"ring tdc %d passed channel",
-			ring_p->tdc, channel));
-		return;
-	}
-
-	nxge_txdma_hw_kick_channel(nxgep, ring_p, channel);
-
-	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_kick_channel"));
-}
-
-/*ARGSUSED*/
-void
-nxge_txdma_hw_kick_channel(p_nxge_t nxgep, p_tx_ring_t ring_p, uint16_t channel)
-{
-
-	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_hw_kick_channel"));
-
-	if (ring_p == NULL) {
-		NXGE_DEBUG_MSG((nxgep, TX_CTL,
-			"<== nxge_txdma_hw_kick_channel: NULL ring pointer"));
-		return;
-	}
-
-	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_hw_kick_channel"));
-}
-
-/*ARGSUSED*/
-void
-nxge_check_tx_hang(p_nxge_t nxgep)
-{
-
-	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_check_tx_hang"));
-
-	/*
-	 * Needs inputs from hardware for regs:
-	 *	head index had not moved since last timeout.
-	 *	packets not transmitted or stuffed registers.
-	 */
-	if (nxge_txdma_hung(nxgep)) {
-		nxge_fixup_hung_txdma_rings(nxgep);
-	}
-	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_check_tx_hang"));
-}
-
-int
-nxge_txdma_hung(p_nxge_t nxgep)
-{
-	int			index, ndmas;
-	uint16_t		channel;
-	p_tx_rings_t 		tx_rings;
-	p_tx_ring_t 		tx_ring_p;
-
-	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_hung"));
-	tx_rings = nxgep->tx_rings;
-	if (tx_rings == NULL) {
-		NXGE_DEBUG_MSG((nxgep, TX_CTL,
-			"<== nxge_txdma_hung: NULL ring pointer"));
-		return (B_FALSE);
-	}
-
-	ndmas = tx_rings->ndmas;
-	if (!ndmas) {
-		NXGE_DEBUG_MSG((nxgep, TX_CTL,
-			"<== nxge_txdma_hung: no channel "
-			"allocated"));
-		return (B_FALSE);
-	}
-
-	if (tx_rings->rings == NULL) {
-		NXGE_DEBUG_MSG((nxgep, TX_CTL,
-			"<== nxge_txdma_hung: NULL rings pointer"));
-		return (B_FALSE);
-	}
-
-	for (index = 0; index < ndmas; index++) {
-		channel = tx_rings->rings[index]->tdc;
-		tx_ring_p = tx_rings->rings[index];
-		NXGE_DEBUG_MSG((nxgep, TX_CTL,
-			"==> nxge_txdma_hung: channel %d", channel));
-		if (nxge_txdma_channel_hung(nxgep, tx_ring_p, channel)) {
-			return (B_TRUE);
-		}
-	}
-
-	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_hung"));
-
-	return (B_FALSE);
-}
-
-int
-nxge_txdma_channel_hung(p_nxge_t nxgep, p_tx_ring_t tx_ring_p, uint16_t channel)
-{
-	uint16_t		head_index, tail_index;
-	boolean_t		head_wrap, tail_wrap;
-	npi_handle_t		handle;
-	tx_ring_hdl_t		tx_head;
-	uint_t			tx_rd_index;
-
-	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_channel_hung"));
-
-	handle = NXGE_DEV_NPI_HANDLE(nxgep);
-	NXGE_DEBUG_MSG((nxgep, TX_CTL,
-		"==> nxge_txdma_channel_hung: channel %d", channel));
-	MUTEX_ENTER(&tx_ring_p->lock);
-	(void) nxge_txdma_reclaim(nxgep, tx_ring_p, 0);
-
-	tail_index = tx_ring_p->wr_index;
-	tail_wrap = tx_ring_p->wr_index_wrap;
-	tx_rd_index = tx_ring_p->rd_index;
-	MUTEX_EXIT(&tx_ring_p->lock);
-
-	NXGE_DEBUG_MSG((nxgep, TX_CTL,
-		"==> nxge_txdma_channel_hung: tdc %d tx_rd_index %d "
-		"tail_index %d tail_wrap %d ",
-		channel, tx_rd_index, tail_index, tail_wrap));
-	/*
-	 * Read the hardware maintained transmit head
-	 * and wrap around bit.
-	 */
-	(void) npi_txdma_ring_head_get(handle, channel, &tx_head);
-	head_index =  tx_head.bits.ldw.head;
-	head_wrap = tx_head.bits.ldw.wrap;
-	NXGE_DEBUG_MSG((nxgep, TX_CTL,
-		"==> nxge_txdma_channel_hung: "
-		"tx_rd_index %d tail %d tail_wrap %d "
-		"head %d wrap %d",
-		tx_rd_index, tail_index, tail_wrap,
-		head_index, head_wrap));
-
-	if (TXDMA_RING_EMPTY(head_index, head_wrap,
-			tail_index, tail_wrap) &&
-			(head_index == tx_rd_index)) {
-		NXGE_DEBUG_MSG((nxgep, TX_CTL,
-			"==> nxge_txdma_channel_hung: EMPTY"));
-		return (B_FALSE);
-	}
-
-	NXGE_DEBUG_MSG((nxgep, TX_CTL,
-		"==> nxge_txdma_channel_hung: Checking if ring full"));
-	if (TXDMA_RING_FULL(head_index, head_wrap, tail_index,
-			tail_wrap)) {
-		NXGE_DEBUG_MSG((nxgep, TX_CTL,
-			"==> nxge_txdma_channel_hung: full"));
-		return (B_TRUE);
-	}
-
-	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_channel_hung"));
-
-	return (B_FALSE);
-}
-
-/*ARGSUSED*/
-void
-nxge_fixup_hung_txdma_rings(p_nxge_t nxgep)
-{
-	int			index, ndmas;
-	uint16_t		channel;
-	p_tx_rings_t 		tx_rings;
-
-	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_fixup_hung_txdma_rings"));
-	tx_rings = nxgep->tx_rings;
-	if (tx_rings == NULL) {
-		NXGE_DEBUG_MSG((nxgep, TX_CTL,
-			"<== nxge_fixup_hung_txdma_rings: NULL ring pointer"));
-		return;
-	}
-
-	ndmas = tx_rings->ndmas;
-	if (!ndmas) {
-		NXGE_DEBUG_MSG((nxgep, TX_CTL,
-			"<== nxge_fixup_hung_txdma_rings: no channel "
-			"allocated"));
-		return;
-	}
-
-	if (tx_rings->rings == NULL) {
-		NXGE_DEBUG_MSG((nxgep, TX_CTL,
-			"<== nxge_fixup_hung_txdma_rings: NULL rings pointer"));
-		return;
-	}
-
-	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_fixup_hung_txdma_rings: "
-		"tx_rings $%p tx_desc_rings $%p ndmas %d",
-		tx_rings, tx_rings->rings, ndmas));
-
-	for (index = 0; index < ndmas; index++) {
-		channel = tx_rings->rings[index]->tdc;
-		NXGE_DEBUG_MSG((nxgep, TX_CTL,
-			"==> nxge_fixup_hung_txdma_rings: channel %d",
-			channel));
-
-		nxge_txdma_fixup_hung_channel(nxgep, tx_rings->rings[index],
-			channel);
-	}
-
-	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_fixup_hung_txdma_rings"));
-}
-
-/*ARGSUSED*/
-void
-nxge_txdma_fix_hung_channel(p_nxge_t nxgep, uint16_t channel)
-{
-	p_tx_ring_t	ring_p;
-
-	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fix_hung_channel"));
-	ring_p = nxge_txdma_get_ring(nxgep, channel);
-	if (ring_p == NULL) {
-		NXGE_DEBUG_MSG((nxgep, TX_CTL,
-			"<== nxge_txdma_fix_hung_channel"));
-		return;
-	}
-
-	if (ring_p->tdc != channel) {
-		NXGE_DEBUG_MSG((nxgep, TX_CTL,
-			"<== nxge_txdma_fix_hung_channel: channel not matched "
-			"ring tdc %d passed channel",
-			ring_p->tdc, channel));
-		return;
-	}
-
-	nxge_txdma_fixup_channel(nxgep, ring_p, channel);
-
-	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fix_hung_channel"));
-}
-
-/*ARGSUSED*/
-void
-nxge_txdma_fixup_hung_channel(p_nxge_t nxgep, p_tx_ring_t ring_p,
-	uint16_t channel)
-{
-	npi_handle_t		handle;
-	tdmc_intr_dbg_t		intr_dbg;
-	int			status = NXGE_OK;
-
-	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fixup_hung_channel"));
-
-	if (ring_p == NULL) {
-		NXGE_DEBUG_MSG((nxgep, TX_CTL,
-			"<== nxge_txdma_fixup_channel: NULL ring pointer"));
-		return;
-	}
-
-	if (ring_p->tdc != channel) {
-		NXGE_DEBUG_MSG((nxgep, TX_CTL,
-			"<== nxge_txdma_fixup_hung_channel: channel "
-			"not matched "
-			"ring tdc %d passed channel",
-			ring_p->tdc, channel));
-		return;
-	}
-
-	/* Reclaim descriptors */
-	MUTEX_ENTER(&ring_p->lock);
-	(void) nxge_txdma_reclaim(nxgep, ring_p, 0);
-	MUTEX_EXIT(&ring_p->lock);
-
-	handle = NXGE_DEV_NPI_HANDLE(nxgep);
-	/*
-	 * Stop the dma channel waits for the stop done.
-	 * If the stop done bit is not set, then force
-	 * an error.
-	 */
-	status = npi_txdma_channel_disable(handle, channel);
-	if (!(status & NPI_TXDMA_STOP_FAILED)) {
-		NXGE_DEBUG_MSG((nxgep, TX_CTL,
-			"<== nxge_txdma_fixup_hung_channel: stopped OK "
-			"ring tdc %d passed channel %d",
-			ring_p->tdc, channel));
-		return;
-	}
-
-	/* Inject any error */
-	intr_dbg.value = 0;
-	intr_dbg.bits.ldw.nack_pref = 1;
-	(void) npi_txdma_inj_int_error_set(handle, channel, &intr_dbg);
-
-	/* Stop done bit will be set as a result of error injection */
-	status = npi_txdma_channel_disable(handle, channel);
-	if (!(status & NPI_TXDMA_STOP_FAILED)) {
-		NXGE_DEBUG_MSG((nxgep, TX_CTL,
-			"<== nxge_txdma_fixup_hung_channel: stopped again"
-			"ring tdc %d passed channel",
-			ring_p->tdc, channel));
-		return;
-	}
-
-	NXGE_DEBUG_MSG((nxgep, TX_CTL,
-		"<== nxge_txdma_fixup_hung_channel: stop done still not set!! "
-		"ring tdc %d passed channel",
-		ring_p->tdc, channel));
-
-	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fixup_hung_channel"));
-}
-
-/*ARGSUSED*/
-void
-nxge_reclaim_rings(p_nxge_t nxgep)
-{
-	int			index, ndmas;
-	uint16_t		channel;
-	p_tx_rings_t 		tx_rings;
-	p_tx_ring_t 		tx_ring_p;
-
-	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_reclaim_ring"));
-	tx_rings = nxgep->tx_rings;
-	if (tx_rings == NULL) {
-		NXGE_DEBUG_MSG((nxgep, TX_CTL,
-			"<== nxge_reclain_rimgs: NULL ring pointer"));
-		return;
-	}
-
-	ndmas = tx_rings->ndmas;
-	if (!ndmas) {
-		NXGE_DEBUG_MSG((nxgep, TX_CTL,
-			"<== nxge_reclain_rimgs: no channel "
-			"allocated"));
-		return;
-	}
-
-	if (tx_rings->rings == NULL) {
-		NXGE_DEBUG_MSG((nxgep, TX_CTL,
-			"<== nxge_reclain_rimgs: NULL rings pointer"));
-		return;
-	}
-
-	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_reclain_rimgs: "
-		"tx_rings $%p tx_desc_rings $%p ndmas %d",
-		tx_rings, tx_rings->rings, ndmas));
-
-	for (index = 0; index < ndmas; index++) {
-		channel = tx_rings->rings[index]->tdc;
-		NXGE_DEBUG_MSG((nxgep, TX_CTL,
-			"==> reclain_rimgs: channel %d",
-			channel));
-		tx_ring_p = tx_rings->rings[index];
-		MUTEX_ENTER(&tx_ring_p->lock);
-		(void) nxge_txdma_reclaim(nxgep, tx_ring_p, channel);
-		MUTEX_EXIT(&tx_ring_p->lock);
-	}
-
-	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_reclaim_rings"));
-}
-
-void
-nxge_txdma_regs_dump_channels(p_nxge_t nxgep)
-{
-	int			index, ndmas;
-	uint16_t		channel;
-	p_tx_rings_t 		tx_rings;
-	npi_handle_t		handle;
-
-	NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_txdma_regs_dump_channels"));
-
-	handle = NXGE_DEV_NPI_HANDLE(nxgep);
-	(void) npi_txdma_dump_fzc_regs(handle);
-
-	tx_rings = nxgep->tx_rings;
-	if (tx_rings == NULL) {
-		NXGE_DEBUG_MSG((nxgep, TX_CTL,
-			"<== nxge_txdma_regs_dump_channels: NULL ring"));
-		return;
-	}
-
-	ndmas = tx_rings->ndmas;
-	if (!ndmas) {
-		NXGE_DEBUG_MSG((nxgep, TX_CTL,
-			"<== nxge_txdma_regs_dump_channels: "
-			"no channel allocated"));
-		return;
-	}
-
-	if (tx_rings->rings == NULL) {
-		NXGE_DEBUG_MSG((nxgep, TX_CTL,
-			"<== nxge_txdma_regs_dump_channels: NULL rings"));
-		return;
-	}
-
-	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_regs_dump_channels: "
-		"tx_rings $%p tx_desc_rings $%p ndmas %d",
-		tx_rings, tx_rings->rings, ndmas));
-
-	for (index = 0; index < ndmas; index++) {
-		channel = tx_rings->rings[index]->tdc;
-		NXGE_DEBUG_MSG((nxgep, TX_CTL,
-			"==> nxge_txdma_regs_dump_channels: channel %d",
-			channel));
-		(void) npi_txdma_dump_tdc_regs(handle, channel);
-	}
-
-	/* Dump TXC registers */
-	(void) npi_txc_dump_fzc_regs(handle);
-	(void) npi_txc_dump_port_fzc_regs(handle, nxgep->function_num);
-
-	for (index = 0; index < ndmas; index++) {
-		channel = tx_rings->rings[index]->tdc;
-		NXGE_DEBUG_MSG((nxgep, TX_CTL,
-			"==> nxge_txdma_regs_dump_channels: channel %d",
-			channel));
-		(void) npi_txc_dump_tdc_fzc_regs(handle, channel);
-	}
-
-	for (index = 0; index < ndmas; index++) {
-		channel = tx_rings->rings[index]->tdc;
-		NXGE_DEBUG_MSG((nxgep, TX_CTL,
-			"==> nxge_txdma_regs_dump_channels: channel %d",
-			channel));
-		nxge_txdma_regs_dump(nxgep, channel);
-	}
-
-	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_regs_dump"));
-
-}
-
-void
-nxge_txdma_regs_dump(p_nxge_t nxgep, int channel)
-{
-	npi_handle_t		handle;
-	tx_ring_hdl_t 		hdl;
-	tx_ring_kick_t 		kick;
-	tx_cs_t 		cs;
-	txc_control_t		control;
-	uint32_t		bitmap = 0;
-	uint32_t		burst = 0;
-	uint32_t		bytes = 0;
-	dma_log_page_t		cfg;
-
-	printf("\n\tfunc # %d tdc %d ",
-		nxgep->function_num, channel);
-	cfg.page_num = 0;
-	handle = NXGE_DEV_NPI_HANDLE(nxgep);
-	(void) npi_txdma_log_page_get(handle, channel, &cfg);
-	printf("\n\tlog page func %d valid page 0 %d",
-		cfg.func_num, cfg.valid);
-	cfg.page_num = 1;
-	(void) npi_txdma_log_page_get(handle, channel, &cfg);
-	printf("\n\tlog page func %d valid page 1 %d",
-		cfg.func_num, cfg.valid);
-
-	(void) npi_txdma_ring_head_get(handle, channel, &hdl);
-	(void) npi_txdma_desc_kick_reg_get(handle, channel, &kick);
-	printf("\n\thead value is 0x%0llx",
-		(long long)hdl.value);
-	printf("\n\thead index %d", hdl.bits.ldw.head);
-	printf("\n\tkick value is 0x%0llx",
-		(long long)kick.value);
-	printf("\n\ttail index %d\n", kick.bits.ldw.tail);
-
-	(void) npi_txdma_control_status(handle, OP_GET, channel, &cs);
-	printf("\n\tControl statue is 0x%0llx", (long long)cs.value);
-	printf("\n\tControl status RST state %d", cs.bits.ldw.rst);
-
-	(void) npi_txc_control(handle, OP_GET, &control);
-	(void) npi_txc_port_dma_list_get(handle, nxgep->function_num, &bitmap);
-	(void) npi_txc_dma_max_burst(handle, OP_GET, channel, &burst);
-	(void) npi_txc_dma_bytes_transmitted(handle, channel, &bytes);
-
-	printf("\n\tTXC port control 0x%0llx",
-		(long long)control.value);
-	printf("\n\tTXC port bitmap 0x%x", bitmap);
-	printf("\n\tTXC max burst %d", burst);
-	printf("\n\tTXC bytes xmt %d\n", bytes);
-
-	{
-		ipp_status_t status;
-
-		(void) npi_ipp_get_status(handle, nxgep->function_num, &status);
-		printf("\n\tIPP status 0x%lux\n", (uint64_t)status.value);
-	}
-}
-
-/*
- * Static functions start here.
- */
-static nxge_status_t
-nxge_map_txdma(p_nxge_t nxgep)
-{
-	int			i, ndmas;
-	uint16_t		channel;
-	p_tx_rings_t 		tx_rings;
-	p_tx_ring_t 		*tx_desc_rings;
-	p_tx_mbox_areas_t 	tx_mbox_areas_p;
-	p_tx_mbox_t		*tx_mbox_p;
-	p_nxge_dma_pool_t	dma_buf_poolp;
-	p_nxge_dma_pool_t	dma_cntl_poolp;
-	p_nxge_dma_common_t	*dma_buf_p;
-	p_nxge_dma_common_t	*dma_cntl_p;
-	nxge_status_t		status = NXGE_OK;
-#if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
-	p_nxge_dma_common_t	t_dma_buf_p;
-	p_nxge_dma_common_t	t_dma_cntl_p;
-#endif
-
-	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma"));
-
-	dma_buf_poolp = nxgep->tx_buf_pool_p;
-	dma_cntl_poolp = nxgep->tx_cntl_pool_p;
-
-	if (!dma_buf_poolp->buf_allocated || !dma_cntl_poolp->buf_allocated) {
-		NXGE_DEBUG_MSG((nxgep, TX_CTL,
-			"==> nxge_map_txdma: buf not allocated"));
-		return (NXGE_ERROR);
-	}
-
-	ndmas = dma_buf_poolp->ndmas;
-	if (!ndmas) {
-		NXGE_DEBUG_MSG((nxgep, TX_CTL,
-			"<== nxge_map_txdma: no dma allocated"));
-		return (NXGE_ERROR);
-	}
-
-	dma_buf_p = dma_buf_poolp->dma_buf_pool_p;
-	dma_cntl_p = dma_cntl_poolp->dma_buf_pool_p;
-
-	tx_rings = (p_tx_rings_t)
-			KMEM_ZALLOC(sizeof (tx_rings_t), KM_SLEEP);
-	tx_desc_rings = (p_tx_ring_t *)KMEM_ZALLOC(
-			sizeof (p_tx_ring_t) * ndmas, KM_SLEEP);
-
-	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma: "
-		"tx_rings $%p tx_desc_rings $%p",
-		tx_rings, tx_desc_rings));
-
-	tx_mbox_areas_p = (p_tx_mbox_areas_t)
-			KMEM_ZALLOC(sizeof (tx_mbox_areas_t), KM_SLEEP);
-	tx_mbox_p = (p_tx_mbox_t *)KMEM_ZALLOC(
-			sizeof (p_tx_mbox_t) * ndmas, KM_SLEEP);
-
-	/*
-	 * Map descriptors from the buffer pools for each dma channel.
-	 */
-	for (i = 0; i < ndmas; i++) {
-		/*
-		 * Set up and prepare buffer blocks, descriptors
-		 * and mailbox.
-		 */
-		channel = ((p_nxge_dma_common_t)dma_buf_p[i])->dma_channel;
-		status = nxge_map_txdma_channel(nxgep, channel,
-				(p_nxge_dma_common_t *)&dma_buf_p[i],
-				(p_tx_ring_t *)&tx_desc_rings[i],
-				dma_buf_poolp->num_chunks[i],
-				(p_nxge_dma_common_t *)&dma_cntl_p[i],
-				(p_tx_mbox_t *)&tx_mbox_p[i]);
-		if (status != NXGE_OK) {
-			goto nxge_map_txdma_fail1;
-		}
-		tx_desc_rings[i]->index = (uint16_t)i;
-		tx_desc_rings[i]->tdc_stats = &nxgep->statsp->tdc_stats[i];
-
-#if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
-		if (nxgep->niu_type == N2_NIU && NXGE_DMA_BLOCK == 1) {
-			tx_desc_rings[i]->hv_set = B_FALSE;
-			t_dma_buf_p = (p_nxge_dma_common_t)dma_buf_p[i];
-			t_dma_cntl_p = (p_nxge_dma_common_t)dma_cntl_p[i];
-
-			tx_desc_rings[i]->hv_tx_buf_base_ioaddr_pp =
-				(uint64_t)t_dma_buf_p->orig_ioaddr_pp;
-			tx_desc_rings[i]->hv_tx_buf_ioaddr_size =
-				(uint64_t)t_dma_buf_p->orig_alength;
-
-			NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
-				"==> nxge_map_txdma_channel: "
-				"hv data buf base io $%p "
-				"size 0x%llx (%d) "
-				"buf base io $%p "
-				"orig vatopa base io $%p "
-				"orig_len 0x%llx (%d)",
-				tx_desc_rings[i]->hv_tx_buf_base_ioaddr_pp,
-				tx_desc_rings[i]->hv_tx_buf_ioaddr_size,
-				tx_desc_rings[i]->hv_tx_buf_ioaddr_size,
-				t_dma_buf_p->ioaddr_pp,
-				t_dma_buf_p->orig_vatopa,
-				t_dma_buf_p->orig_alength,
-				t_dma_buf_p->orig_alength));
-
-			tx_desc_rings[i]->hv_tx_cntl_base_ioaddr_pp =
-				(uint64_t)t_dma_cntl_p->orig_ioaddr_pp;
-			tx_desc_rings[i]->hv_tx_cntl_ioaddr_size =
-				(uint64_t)t_dma_cntl_p->orig_alength;
-
-			NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
-				"==> nxge_map_txdma_channel: "
-				"hv cntl base io $%p "
-				"orig ioaddr_pp ($%p) "
-				"orig vatopa ($%p) "
-				"size 0x%llx (%d 0x%x)",
-				tx_desc_rings[i]->hv_tx_cntl_base_ioaddr_pp,
-				t_dma_cntl_p->orig_ioaddr_pp,
-				t_dma_cntl_p->orig_vatopa,
-				tx_desc_rings[i]->hv_tx_cntl_ioaddr_size,
-				t_dma_cntl_p->orig_alength,
-				t_dma_cntl_p->orig_alength));
-		}
-#endif
-	}
-
-	tx_rings->ndmas = ndmas;
-	tx_rings->rings = tx_desc_rings;
-	nxgep->tx_rings = tx_rings;
-	tx_mbox_areas_p->txmbox_areas_p = tx_mbox_p;
-	nxgep->tx_mbox_areas_p = tx_mbox_areas_p;
-
-	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma: "
-		"tx_rings $%p rings $%p",
-		nxgep->tx_rings, nxgep->tx_rings->rings));
-	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma: "
-		"tx_rings $%p tx_desc_rings $%p",
-		nxgep->tx_rings, tx_desc_rings));
-
-	goto nxge_map_txdma_exit;
-
-nxge_map_txdma_fail1:
-	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
-		"==> nxge_map_txdma: uninit tx desc "
-		"(status 0x%x channel %d i %d)",
-		nxgep, status, channel, i));
-	i--;
-	for (; i >= 0; i--) {
-		channel = ((p_nxge_dma_common_t)dma_buf_p[i])->dma_channel;
-		nxge_unmap_txdma_channel(nxgep, channel,
-			tx_desc_rings[i],
-			tx_mbox_p[i]);
-	}
-
-	KMEM_FREE(tx_desc_rings, sizeof (p_tx_ring_t) * ndmas);
-	KMEM_FREE(tx_rings, sizeof (tx_rings_t));
-	KMEM_FREE(tx_mbox_p, sizeof (p_tx_mbox_t) * ndmas);
-	KMEM_FREE(tx_mbox_areas_p, sizeof (tx_mbox_areas_t));
-
-nxge_map_txdma_exit:
-	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
-		"==> nxge_map_txdma: "
-		"(status 0x%x channel %d)",
-		status, channel));
-
-	return (status);
-}
-
-static void
-nxge_unmap_txdma(p_nxge_t nxgep)
-{
-	int			i, ndmas;
-	uint8_t			channel;
-	p_tx_rings_t 		tx_rings;
-	p_tx_ring_t 		*tx_desc_rings;
-	p_tx_mbox_areas_t 	tx_mbox_areas_p;
-	p_tx_mbox_t		*tx_mbox_p;
-	p_nxge_dma_pool_t	dma_buf_poolp;
-
-	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_unmap_txdma"));
-
-	dma_buf_poolp = nxgep->tx_buf_pool_p;
-	if (!dma_buf_poolp->buf_allocated) {
-		NXGE_DEBUG_MSG((nxgep, TX_CTL,
-			"==> nxge_unmap_txdma: buf not allocated"));
-		return;
-	}
-
-	ndmas = dma_buf_poolp->ndmas;
-	if (!ndmas) {
-		NXGE_DEBUG_MSG((nxgep, TX_CTL,
-			"<== nxge_unmap_txdma: no dma allocated"));
-		return;
-	}
-
-	tx_rings = nxgep->tx_rings;
-	tx_desc_rings = tx_rings->rings;
-	if (tx_rings == NULL) {
-		NXGE_DEBUG_MSG((nxgep, TX_CTL,
-			"<== nxge_unmap_txdma: NULL ring pointer"));
-		return;
-	}
-
-	tx_desc_rings = tx_rings->rings;
-	if (tx_desc_rings == NULL) {
-		NXGE_DEBUG_MSG((nxgep, TX_CTL,
-			"<== nxge_unmap_txdma: NULL ring pointers"));
-		return;
-	}
-
-	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_unmap_txdma: "
-		"tx_rings $%p tx_desc_rings $%p ndmas %d",
-		tx_rings, tx_desc_rings, ndmas));
-
-	tx_mbox_areas_p = nxgep->tx_mbox_areas_p;
-	tx_mbox_p = tx_mbox_areas_p->txmbox_areas_p;
-
-	for (i = 0; i < ndmas; i++) {
-		channel = tx_desc_rings[i]->tdc;
-		(void) nxge_unmap_txdma_channel(nxgep, channel,
-				(p_tx_ring_t)tx_desc_rings[i],
-				(p_tx_mbox_t)tx_mbox_p[i]);
-	}
-
-	KMEM_FREE(tx_desc_rings, sizeof (p_tx_ring_t) * ndmas);
-	KMEM_FREE(tx_rings, sizeof (tx_rings_t));
-	KMEM_FREE(tx_mbox_p, sizeof (p_tx_mbox_t) * ndmas);
-	KMEM_FREE(tx_mbox_areas_p, sizeof (tx_mbox_areas_t));
-
-	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
-		"<== nxge_unmap_txdma"));
-}
-
-static nxge_status_t
-nxge_map_txdma_channel(p_nxge_t nxgep, uint16_t channel,
-	p_nxge_dma_common_t *dma_buf_p,
-	p_tx_ring_t *tx_desc_p,
-	uint32_t num_chunks,
-	p_nxge_dma_common_t *dma_cntl_p,
-	p_tx_mbox_t *tx_mbox_p)
-{
-	int	status = NXGE_OK;
-
-	/*
-	 * Set up and prepare buffer blocks, descriptors
-	 * and mailbox.
-	 */
-	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
-		"==> nxge_map_txdma_channel (channel %d)", channel));
-	/*
-	 * Transmit buffer blocks
-	 */
-	status = nxge_map_txdma_channel_buf_ring(nxgep, channel,
-			dma_buf_p, tx_desc_p, num_chunks);
-	if (status != NXGE_OK) {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"==> nxge_map_txdma_channel (channel %d): "
-			"map buffer failed 0x%x", channel, status));
-		goto nxge_map_txdma_channel_exit;
-	}
-
-	/*
-	 * Transmit block ring, and mailbox.
-	 */
-	nxge_map_txdma_channel_cfg_ring(nxgep, channel, dma_cntl_p, *tx_desc_p,
-					tx_mbox_p);
-
-	goto nxge_map_txdma_channel_exit;
-
-nxge_map_txdma_channel_fail1:
-	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
-		"==> nxge_map_txdma_channel: unmap buf"
-		"(status 0x%x channel %d)",
-		status, channel));
-	nxge_unmap_txdma_channel_buf_ring(nxgep, *tx_desc_p);
-
-nxge_map_txdma_channel_exit:
-	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
-		"<== nxge_map_txdma_channel: "
-		"(status 0x%x channel %d)",
-		status, channel));
-
-	return (status);
-}
-
-/*ARGSUSED*/
-static void
-nxge_unmap_txdma_channel(p_nxge_t nxgep, uint16_t channel,
-	p_tx_ring_t tx_ring_p,
-	p_tx_mbox_t tx_mbox_p)
-{
-	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
-		"==> nxge_unmap_txdma_channel (channel %d)", channel));
-	/*
-	 * unmap tx block ring, and mailbox.
-	 */
-	(void) nxge_unmap_txdma_channel_cfg_ring(nxgep,
-			tx_ring_p, tx_mbox_p);
-
-	/* unmap buffer blocks */
-	(void) nxge_unmap_txdma_channel_buf_ring(nxgep, tx_ring_p);
-
-	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_unmap_txdma_channel"));
-}
-
-/*ARGSUSED*/
-static void
-nxge_map_txdma_channel_cfg_ring(p_nxge_t nxgep, uint16_t dma_channel,
-	p_nxge_dma_common_t *dma_cntl_p,
-	p_tx_ring_t tx_ring_p,
-	p_tx_mbox_t *tx_mbox_p)
-{
-	p_tx_mbox_t 		mboxp;
-	p_nxge_dma_common_t 	cntl_dmap;
-	p_nxge_dma_common_t 	dmap;
-	p_tx_rng_cfig_t		tx_ring_cfig_p;
-	p_tx_ring_kick_t	tx_ring_kick_p;
-	p_tx_cs_t		tx_cs_p;
-	p_tx_dma_ent_msk_t	tx_evmask_p;
-	p_txdma_mbh_t		mboxh_p;
-	p_txdma_mbl_t		mboxl_p;
-	uint64_t		tx_desc_len;
-
-	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
-		"==> nxge_map_txdma_channel_cfg_ring"));
-
-	cntl_dmap = *dma_cntl_p;
-
-	dmap = (p_nxge_dma_common_t)&tx_ring_p->tdc_desc;
-	nxge_setup_dma_common(dmap, cntl_dmap, tx_ring_p->tx_ring_size,
-			sizeof (tx_desc_t));
-	/*
-	 * Zero out transmit ring descriptors.
-	 */
-	bzero((caddr_t)dmap->kaddrp, dmap->alength);
-	tx_ring_cfig_p = &(tx_ring_p->tx_ring_cfig);
-	tx_ring_kick_p = &(tx_ring_p->tx_ring_kick);
-	tx_cs_p = &(tx_ring_p->tx_cs);
-	tx_evmask_p = &(tx_ring_p->tx_evmask);
-	tx_ring_cfig_p->value = 0;
-	tx_ring_kick_p->value = 0;
-	tx_cs_p->value = 0;
-	tx_evmask_p->value = 0;
-
-	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
-		"==> nxge_map_txdma_channel_cfg_ring: channel %d des $%p",
-		dma_channel,
-		dmap->dma_cookie.dmac_laddress));
-
-	tx_ring_cfig_p->value = 0;
-	tx_desc_len = (uint64_t)(tx_ring_p->tx_ring_size >> 3);
-	tx_ring_cfig_p->value =
-		(dmap->dma_cookie.dmac_laddress & TX_RNG_CFIG_ADDR_MASK) |
-		(tx_desc_len << TX_RNG_CFIG_LEN_SHIFT);
-
-	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
-		"==> nxge_map_txdma_channel_cfg_ring: channel %d cfg 0x%llx",
-		dma_channel,
-		tx_ring_cfig_p->value));
-
-	tx_cs_p->bits.ldw.rst = 1;
-
-	/* Map in mailbox */
-	mboxp = (p_tx_mbox_t)
-		KMEM_ZALLOC(sizeof (tx_mbox_t), KM_SLEEP);
-	dmap = (p_nxge_dma_common_t)&mboxp->tx_mbox;
-	nxge_setup_dma_common(dmap, cntl_dmap, 1, sizeof (txdma_mailbox_t));
-	mboxh_p = (p_txdma_mbh_t)&tx_ring_p->tx_mbox_mbh;
-	mboxl_p = (p_txdma_mbl_t)&tx_ring_p->tx_mbox_mbl;
-	mboxh_p->value = mboxl_p->value = 0;
-
-	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
-		"==> nxge_map_txdma_channel_cfg_ring: mbox 0x%lx",
-		dmap->dma_cookie.dmac_laddress));
-
-	mboxh_p->bits.ldw.mbaddr = ((dmap->dma_cookie.dmac_laddress >>
-				TXDMA_MBH_ADDR_SHIFT) & TXDMA_MBH_MASK);
-
-	mboxl_p->bits.ldw.mbaddr = ((dmap->dma_cookie.dmac_laddress &
-				TXDMA_MBL_MASK) >> TXDMA_MBL_SHIFT);
-
-	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
-		"==> nxge_map_txdma_channel_cfg_ring: mbox 0x%lx",
-		dmap->dma_cookie.dmac_laddress));
-	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
-		"==> nxge_map_txdma_channel_cfg_ring: hmbox $%p "
-		"mbox $%p",
-		mboxh_p->bits.ldw.mbaddr, mboxl_p->bits.ldw.mbaddr));
-	tx_ring_p->page_valid.value = 0;
-	tx_ring_p->page_mask_1.value = tx_ring_p->page_mask_2.value = 0;
-	tx_ring_p->page_value_1.value = tx_ring_p->page_value_2.value = 0;
-	tx_ring_p->page_reloc_1.value = tx_ring_p->page_reloc_2.value = 0;
-	tx_ring_p->page_hdl.value = 0;
-
-	tx_ring_p->page_valid.bits.ldw.page0 = 1;
-	tx_ring_p->page_valid.bits.ldw.page1 = 1;
-
-	tx_ring_p->max_burst.value = 0;
-	tx_ring_p->max_burst.bits.ldw.dma_max_burst = TXC_DMA_MAX_BURST_DEFAULT;
-
-	*tx_mbox_p = mboxp;
-
-	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
-				"<== nxge_map_txdma_channel_cfg_ring"));
-}
-
-/*ARGSUSED*/
-static void
-nxge_unmap_txdma_channel_cfg_ring(p_nxge_t nxgep,
-	p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p)
-{
-	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
-		"==> nxge_unmap_txdma_channel_cfg_ring: channel %d",
-		tx_ring_p->tdc));
-
-	KMEM_FREE(tx_mbox_p, sizeof (tx_mbox_t));
-
-	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
-		"<== nxge_unmap_txdma_channel_cfg_ring"));
-}
-
-static nxge_status_t
-nxge_map_txdma_channel_buf_ring(p_nxge_t nxgep, uint16_t channel,
-	p_nxge_dma_common_t *dma_buf_p,
-	p_tx_ring_t *tx_desc_p, uint32_t num_chunks)
-{
-	p_nxge_dma_common_t 	dma_bufp, tmp_bufp;
-	p_nxge_dma_common_t 	dmap;
-	nxge_os_dma_handle_t	tx_buf_dma_handle;
-	p_tx_ring_t 		tx_ring_p;
-	p_tx_msg_t 		tx_msg_ring;
-	nxge_status_t		status = NXGE_OK;
-	int			ddi_status = DDI_SUCCESS;
-	int			i, j, index;
-	uint32_t		size, bsize;
-	uint32_t 		nblocks, nmsgs;
-
-	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
-		"==> nxge_map_txdma_channel_buf_ring"));
-
-	dma_bufp = tmp_bufp = *dma_buf_p;
-		NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
-		" nxge_map_txdma_channel_buf_ring: channel %d to map %d "
-		"chunks bufp $%p",
-		channel, num_chunks, dma_bufp));
-
-	nmsgs = 0;
-	for (i = 0; i < num_chunks; i++, tmp_bufp++) {
-		nmsgs += tmp_bufp->nblocks;
-		NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
-			"==> nxge_map_txdma_channel_buf_ring: channel %d "
-			"bufp $%p nblocks %d nmsgs %d",
-			channel, tmp_bufp, tmp_bufp->nblocks, nmsgs));
-	}
-	if (!nmsgs) {
-		NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
-			"<== nxge_map_txdma_channel_buf_ring: channel %d "
-			"no msg blocks",
-			channel));
-		status = NXGE_ERROR;
-		goto nxge_map_txdma_channel_buf_ring_exit;
-	}
-
-	tx_ring_p = (p_tx_ring_t)
-		KMEM_ZALLOC(sizeof (tx_ring_t), KM_SLEEP);
-	MUTEX_INIT(&tx_ring_p->lock, NULL, MUTEX_DRIVER,
-		(void *)nxgep->interrupt_cookie);
-	/*
-	 * Allocate transmit message rings and handles for packets
-	 * not to be copied to premapped buffers.
-	 */
-	size = nmsgs * sizeof (tx_msg_t);
-	tx_msg_ring = KMEM_ZALLOC(size, KM_SLEEP);
-	for (i = 0; i < nmsgs; i++) {
-		ddi_status = ddi_dma_alloc_handle(nxgep->dip, &nxge_tx_dma_attr,
-				DDI_DMA_DONTWAIT, 0,
-				&tx_msg_ring[i].dma_handle);
-		if (ddi_status != DDI_SUCCESS) {
-			status |= NXGE_DDI_FAILED;
-			break;
-		}
-	}
-	if (i < nmsgs) {
-		NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "Allocate handles failed."));
-		goto nxge_map_txdma_channel_buf_ring_fail1;
-	}
-
-	tx_ring_p->tdc = channel;
-	tx_ring_p->tx_msg_ring = tx_msg_ring;
-	tx_ring_p->tx_ring_size = nmsgs;
-	tx_ring_p->num_chunks = num_chunks;
-	if (!nxge_tx_intr_thres) {
-		nxge_tx_intr_thres = tx_ring_p->tx_ring_size/4;
-	}
-	tx_ring_p->tx_wrap_mask = tx_ring_p->tx_ring_size - 1;
-	tx_ring_p->rd_index = 0;
-	tx_ring_p->wr_index = 0;
-	tx_ring_p->ring_head.value = 0;
-	tx_ring_p->ring_kick_tail.value = 0;
-	tx_ring_p->descs_pending = 0;
-
-	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
-		"==> nxge_map_txdma_channel_buf_ring: channel %d "
-		"actual tx desc max %d nmsgs %d "
-		"(config nxge_tx_ring_size %d)",
-		channel, tx_ring_p->tx_ring_size, nmsgs,
-		nxge_tx_ring_size));
-
-	/*
-	 * Map in buffers from the buffer pool.
-	 */
-	index = 0;
-	bsize = dma_bufp->block_size;
-
-	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma_channel_buf_ring: "
-		"dma_bufp $%p tx_rng_p $%p "
-		"tx_msg_rng_p $%p bsize %d",
-		dma_bufp, tx_ring_p, tx_msg_ring, bsize));
-
-	tx_buf_dma_handle = dma_bufp->dma_handle;
-	for (i = 0; i < num_chunks; i++, dma_bufp++) {
-		bsize = dma_bufp->block_size;
-		nblocks = dma_bufp->nblocks;
-		NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
-			"==> nxge_map_txdma_channel_buf_ring: dma chunk %d "
-			"size %d dma_bufp $%p",
-			i, sizeof (nxge_dma_common_t), dma_bufp));
-
-		for (j = 0; j < nblocks; j++) {
-			tx_msg_ring[index].buf_dma_handle = tx_buf_dma_handle;
-			dmap = &tx_msg_ring[index++].buf_dma;
-#ifdef TX_MEM_DEBUG
-			NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
-				"==> nxge_map_txdma_channel_buf_ring: j %d"
-				"dmap $%p", i, dmap));
-#endif
-			nxge_setup_dma_common(dmap, dma_bufp, 1,
-				bsize);
-		}
-	}
-
-	if (i < num_chunks) {
-		goto nxge_map_txdma_channel_buf_ring_fail1;
-	}
-
-	*tx_desc_p = tx_ring_p;
-
-	goto nxge_map_txdma_channel_buf_ring_exit;
-
-nxge_map_txdma_channel_buf_ring_fail1:
-	index--;
-	for (; index >= 0; index--) {
-		if (tx_msg_ring[i].dma_handle != NULL) {
-			ddi_dma_free_handle(&tx_msg_ring[i].dma_handle);
-		}
-	}
-	MUTEX_DESTROY(&tx_ring_p->lock);
-	KMEM_FREE(tx_msg_ring, sizeof (tx_msg_t) * tx_ring_p->tx_ring_size);
-	KMEM_FREE(tx_ring_p, sizeof (tx_ring_t));
-
-nxge_map_txdma_channel_buf_ring_exit:
-	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
-		"<== nxge_map_txdma_channel_buf_ring status 0x%x", status));
-
-	return (status);
-}
-
-/*ARGSUSED*/
-static void
-nxge_unmap_txdma_channel_buf_ring(p_nxge_t nxgep, p_tx_ring_t tx_ring_p)
-{
-	p_tx_msg_t 		tx_msg_ring;
-	p_tx_msg_t 		tx_msg_p;
-	int			i;
-
-	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
-		"==> nxge_unmap_txdma_channel_buf_ring"));
-	if (tx_ring_p == NULL) {
-		NXGE_DEBUG_MSG((nxgep, TX_CTL,
-			"<== nxge_unmap_txdma_channel_buf_ring: NULL ringp"));
-		return;
-	}
-	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
-		"==> nxge_unmap_txdma_channel_buf_ring: channel %d",
-		tx_ring_p->tdc));
-
-	tx_msg_ring = tx_ring_p->tx_msg_ring;
-	for (i = 0; i < tx_ring_p->tx_ring_size; i++) {
-		tx_msg_p = &tx_msg_ring[i];
-		if (tx_msg_p->flags.dma_type == USE_DVMA) {
-			NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
-				"entry = %d",
-				i));
-			(void) dvma_unload(tx_msg_p->dvma_handle,
-				0, -1);
-			tx_msg_p->dvma_handle = NULL;
-			if (tx_ring_p->dvma_wr_index ==
-				tx_ring_p->dvma_wrap_mask) {
-				tx_ring_p->dvma_wr_index = 0;
-			} else {
-				tx_ring_p->dvma_wr_index++;
-			}
-			tx_ring_p->dvma_pending--;
-		} else if (tx_msg_p->flags.dma_type ==
-				USE_DMA) {
-			if (ddi_dma_unbind_handle
-				(tx_msg_p->dma_handle)) {
-				cmn_err(CE_WARN, "!nxge_unmap_tx_bug_ring: "
-					"ddi_dma_unbind_handle "
-					"failed.");
-			}
-		}
-
-		if (tx_msg_p->tx_message != NULL) {
-			freemsg(tx_msg_p->tx_message);
-			tx_msg_p->tx_message = NULL;
-		}
-	}
-
-	for (i = 0; i < tx_ring_p->tx_ring_size; i++) {
-		if (tx_msg_ring[i].dma_handle != NULL) {
-			ddi_dma_free_handle(&tx_msg_ring[i].dma_handle);
-		}
-	}
-
-	MUTEX_DESTROY(&tx_ring_p->lock);
-	KMEM_FREE(tx_msg_ring, sizeof (tx_msg_t) * tx_ring_p->tx_ring_size);
-	KMEM_FREE(tx_ring_p, sizeof (tx_ring_t));
-
-	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
-		"<== nxge_unmap_txdma_channel_buf_ring"));
-}
-
-static nxge_status_t
-nxge_txdma_hw_start(p_nxge_t nxgep)
-{
-	int			i, ndmas;
-	uint16_t		channel;
-	p_tx_rings_t 		tx_rings;
-	p_tx_ring_t 		*tx_desc_rings;
-	p_tx_mbox_areas_t 	tx_mbox_areas_p;
-	p_tx_mbox_t		*tx_mbox_p;
-	nxge_status_t		status = NXGE_OK;
-
-	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start"));
-
-	tx_rings = nxgep->tx_rings;
-	if (tx_rings == NULL) {
-		NXGE_DEBUG_MSG((nxgep, TX_CTL,
-			"<== nxge_txdma_hw_start: NULL ring pointer"));
-		return (NXGE_ERROR);
-	}
-	tx_desc_rings = tx_rings->rings;
-	if (tx_desc_rings == NULL) {
-		NXGE_DEBUG_MSG((nxgep, TX_CTL,
-			"<== nxge_txdma_hw_start: NULL ring pointers"));
-		return (NXGE_ERROR);
-	}
-
-	ndmas = tx_rings->ndmas;
-	if (!ndmas) {
-		NXGE_DEBUG_MSG((nxgep, TX_CTL,
-			"<== nxge_txdma_hw_start: no dma channel allocated"));
-		return (NXGE_ERROR);
-	}
-
-	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start: "
-		"tx_rings $%p tx_desc_rings $%p ndmas %d",
-		tx_rings, tx_desc_rings, ndmas));
-
-	tx_mbox_areas_p = nxgep->tx_mbox_areas_p;
-	tx_mbox_p = tx_mbox_areas_p->txmbox_areas_p;
-
-	for (i = 0; i < ndmas; i++) {
-		channel = tx_desc_rings[i]->tdc,
-		status = nxge_txdma_start_channel(nxgep, channel,
-				(p_tx_ring_t)tx_desc_rings[i],
-				(p_tx_mbox_t)tx_mbox_p[i]);
-		if (status != NXGE_OK) {
-			goto nxge_txdma_hw_start_fail1;
-		}
-	}
-
-	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start: "
-		"tx_rings $%p rings $%p",
-		nxgep->tx_rings, nxgep->tx_rings->rings));
-	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start: "
-		"tx_rings $%p tx_desc_rings $%p",
-		nxgep->tx_rings, tx_desc_rings));
-
-	goto nxge_txdma_hw_start_exit;
-
-nxge_txdma_hw_start_fail1:
-	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
-		"==> nxge_txdma_hw_start: disable "
-		"(status 0x%x channel %d i %d)", status, channel, i));
-	for (; i >= 0; i--) {
-		channel = tx_desc_rings[i]->tdc,
-		(void) nxge_txdma_stop_channel(nxgep, channel,
-			(p_tx_ring_t)tx_desc_rings[i],
-			(p_tx_mbox_t)tx_mbox_p[i]);
-	}
-
-nxge_txdma_hw_start_exit:
-	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
-		"==> nxge_txdma_hw_start: (status 0x%x)", status));
-
-	return (status);
-}
-
-static void
-nxge_txdma_hw_stop(p_nxge_t nxgep)
-{
-	int			i, ndmas;
-	uint16_t		channel;
-	p_tx_rings_t 		tx_rings;
-	p_tx_ring_t 		*tx_desc_rings;
-	p_tx_mbox_areas_t 	tx_mbox_areas_p;
-	p_tx_mbox_t		*tx_mbox_p;
-
-	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_stop"));
-
-	tx_rings = nxgep->tx_rings;
-	if (tx_rings == NULL) {
-		NXGE_DEBUG_MSG((nxgep, TX_CTL,
-			"<== nxge_txdma_hw_stop: NULL ring pointer"));
-		return;
-	}
-	tx_desc_rings = tx_rings->rings;
-	if (tx_desc_rings == NULL) {
-		NXGE_DEBUG_MSG((nxgep, TX_CTL,
-			"<== nxge_txdma_hw_stop: NULL ring pointers"));
-		return;
-	}
-
-	ndmas = tx_rings->ndmas;
-	if (!ndmas) {
-		NXGE_DEBUG_MSG((nxgep, TX_CTL,
-			"<== nxge_txdma_hw_stop: no dma channel allocated"));
-		return;
-	}
-
-	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_stop: "
-		"tx_rings $%p tx_desc_rings $%p",
-		tx_rings, tx_desc_rings));
-
-	tx_mbox_areas_p = nxgep->tx_mbox_areas_p;
-	tx_mbox_p = tx_mbox_areas_p->txmbox_areas_p;
-
-	for (i = 0; i < ndmas; i++) {
-		channel = tx_desc_rings[i]->tdc;
-		(void) nxge_txdma_stop_channel(nxgep, channel,
-				(p_tx_ring_t)tx_desc_rings[i],
-				(p_tx_mbox_t)tx_mbox_p[i]);
-	}
-
-	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_stop: "
-		"tx_rings $%p tx_desc_rings $%p",
-		tx_rings, tx_desc_rings));
-
-	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_txdma_hw_stop"));
-}
-
-static nxge_status_t
-nxge_txdma_start_channel(p_nxge_t nxgep, uint16_t channel,
-    p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p)
-
-{
-	nxge_status_t		status = NXGE_OK;
-
-	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
-		"==> nxge_txdma_start_channel (channel %d)", channel));
-	/*
-	 * TXDMA/TXC must be in stopped state.
-	 */
-	(void) nxge_txdma_stop_inj_err(nxgep, channel);
-
-	/*
-	 * Reset TXDMA channel
-	 */
-	tx_ring_p->tx_cs.value = 0;
-	tx_ring_p->tx_cs.bits.ldw.rst = 1;
-	status = nxge_reset_txdma_channel(nxgep, channel,
-			tx_ring_p->tx_cs.value);
-	if (status != NXGE_OK) {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"==> nxge_txdma_start_channel (channel %d)"
-			" reset channel failed 0x%x", channel, status));
-		goto nxge_txdma_start_channel_exit;
-	}
-
-	/*
-	 * Initialize the TXDMA channel specific FZC control
-	 * configurations. These FZC registers are pertaining
-	 * to each TX channel (i.e. logical pages).
-	 */
-	status = nxge_init_fzc_txdma_channel(nxgep, channel,
-			tx_ring_p, tx_mbox_p);
-	if (status != NXGE_OK) {
-		goto nxge_txdma_start_channel_exit;
-	}
-
-	/*
-	 * Initialize the event masks.
-	 */
-	tx_ring_p->tx_evmask.value = 0;
-	status = nxge_init_txdma_channel_event_mask(nxgep,
-			channel, &tx_ring_p->tx_evmask);
-	if (status != NXGE_OK) {
-		goto nxge_txdma_start_channel_exit;
-	}
-
-	/*
-	 * Load TXDMA descriptors, buffers, mailbox,
-	 * initialise the DMA channels and
-	 * enable each DMA channel.
-	 */
-	status = nxge_enable_txdma_channel(nxgep, channel,
-			tx_ring_p, tx_mbox_p);
-	if (status != NXGE_OK) {
-		goto nxge_txdma_start_channel_exit;
-	}
-
-nxge_txdma_start_channel_exit:
-	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_txdma_start_channel"));
-
-	return (status);
-}
-
-/*ARGSUSED*/
-static nxge_status_t
-nxge_txdma_stop_channel(p_nxge_t nxgep, uint16_t channel,
-	p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p)
-{
-	int		status = NXGE_OK;
-
-	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
-		"==> nxge_txdma_stop_channel: channel %d", channel));
-
-	/*
-	 * Stop (disable) TXDMA and TXC (if stop bit is set
-	 * and STOP_N_GO bit not set, the TXDMA reset state will
-	 * not be set if reset TXDMA.
-	 */
-	(void) nxge_txdma_stop_inj_err(nxgep, channel);
-
-	/*
-	 * Reset TXDMA channel
-	 */
-	tx_ring_p->tx_cs.value = 0;
-	tx_ring_p->tx_cs.bits.ldw.rst = 1;
-	status = nxge_reset_txdma_channel(nxgep, channel,
-			tx_ring_p->tx_cs.value);
-	if (status != NXGE_OK) {
-		goto nxge_txdma_stop_channel_exit;
-	}
-
-#ifdef HARDWARE_REQUIRED
-	/* Set up the interrupt event masks. */
-	tx_ring_p->tx_evmask.value = 0;
-	status = nxge_init_txdma_channel_event_mask(nxgep,
-			channel, &tx_ring_p->tx_evmask);
-	if (status != NXGE_OK) {
-		goto nxge_txdma_stop_channel_exit;
-	}
-
-	/* Initialize the DMA control and status register */
-	tx_ring_p->tx_cs.value = TX_ENT_MSK_MK_ALL;
-	status = nxge_init_txdma_channel_cntl_stat(nxgep, channel,
-			tx_ring_p->tx_cs.value);
-	if (status != NXGE_OK) {
-		goto nxge_txdma_stop_channel_exit;
-	}
-
-	/* Disable channel */
-	status = nxge_disable_txdma_channel(nxgep, channel,
-			tx_ring_p, tx_mbox_p);
-	if (status != NXGE_OK) {
-		goto nxge_txdma_start_channel_exit;
-	}
-
-	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
-		"==> nxge_txdma_stop_channel: event done"));
-
-#endif
-
-nxge_txdma_stop_channel_exit:
-	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_txdma_stop_channel"));
-	return (status);
-}
-
-static p_tx_ring_t
-nxge_txdma_get_ring(p_nxge_t nxgep, uint16_t channel)
-{
-	int			index, ndmas;
-	uint16_t		tdc;
-	p_tx_rings_t 		tx_rings;
-
-	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_get_ring"));
-
-	tx_rings = nxgep->tx_rings;
-	if (tx_rings == NULL) {
-		NXGE_DEBUG_MSG((nxgep, TX_CTL,
-			"<== nxge_txdma_get_ring: NULL ring pointer"));
-		return (NULL);
-	}
-
-	ndmas = tx_rings->ndmas;
-	if (!ndmas) {
-		NXGE_DEBUG_MSG((nxgep, TX_CTL,
-			"<== nxge_txdma_get_ring: no channel allocated"));
-		return (NULL);
-	}
-
-	if (tx_rings->rings == NULL) {
-		NXGE_DEBUG_MSG((nxgep, TX_CTL,
-			"<== nxge_txdma_get_ring: NULL rings pointer"));
-		return (NULL);
-	}
-
-	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_get_ring: "
-		"tx_rings $%p tx_desc_rings $%p ndmas %d",
-		tx_rings, tx_rings, ndmas));
-
-	for (index = 0; index < ndmas; index++) {
-		tdc = tx_rings->rings[index]->tdc;
-		NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
-			"==> nxge_fixup_txdma_rings: channel %d", tdc));
-		if (channel == tdc) {
-			NXGE_DEBUG_MSG((nxgep, TX_CTL,
-				"<== nxge_txdma_get_ring: tdc %d "
-				"ring $%p",
-				tdc, tx_rings->rings[index]));
-			return (p_tx_ring_t)(tx_rings->rings[index]);
-		}
-	}
-
-	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_get_ring"));
-	return (NULL);
-}
-
-static p_tx_mbox_t
-nxge_txdma_get_mbox(p_nxge_t nxgep, uint16_t channel)
-{
-	int			index, tdc, ndmas;
-	p_tx_rings_t 		tx_rings;
-	p_tx_mbox_areas_t 	tx_mbox_areas_p;
-	p_tx_mbox_t		*tx_mbox_p;
-
-	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_get_mbox"));
-
-	tx_rings = nxgep->tx_rings;
-	if (tx_rings == NULL) {
-		NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
-			"<== nxge_txdma_get_mbox: NULL ring pointer"));
-		return (NULL);
-	}
-
-	tx_mbox_areas_p = nxgep->tx_mbox_areas_p;
-	if (tx_mbox_areas_p == NULL) {
-		NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
-			"<== nxge_txdma_get_mbox: NULL mbox pointer"));
-		return (NULL);
-	}
-
-	tx_mbox_p = tx_mbox_areas_p->txmbox_areas_p;
-
-	ndmas = tx_rings->ndmas;
-	if (!ndmas) {
-		NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
-			"<== nxge_txdma_get_mbox: no channel allocated"));
-		return (NULL);
-	}
-
-	if (tx_rings->rings == NULL) {
-		NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
-			"<== nxge_txdma_get_mbox: NULL rings pointer"));
-		return (NULL);
-	}
-
-	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_get_mbox: "
-		"tx_rings $%p tx_desc_rings $%p ndmas %d",
-		tx_rings, tx_rings, ndmas));
-
-	for (index = 0; index < ndmas; index++) {
-		tdc = tx_rings->rings[index]->tdc;
-		NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
-			"==> nxge_txdma_get_mbox: channel %d", tdc));
-		if (channel == tdc) {
-			NXGE_DEBUG_MSG((nxgep, TX_CTL,
-				"<== nxge_txdma_get_mbox: tdc %d "
-				"ring $%p",
-				tdc, tx_rings->rings[index]));
-			return (p_tx_mbox_t)(tx_mbox_p[index]);
-		}
-	}
-
-	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_get_mbox"));
-	return (NULL);
-}
-
-/*ARGSUSED*/
-static nxge_status_t
-nxge_tx_err_evnts(p_nxge_t nxgep, uint_t index, p_nxge_ldv_t ldvp, tx_cs_t cs)
-{
-	npi_handle_t		handle;
-	npi_status_t		rs;
-	uint8_t			channel;
-	p_tx_ring_t 		*tx_rings;
-	p_tx_ring_t 		tx_ring_p;
-	p_nxge_tx_ring_stats_t	tdc_stats;
-	boolean_t		txchan_fatal = B_FALSE;
-	nxge_status_t		status = NXGE_OK;
-	tdmc_inj_par_err_t	par_err;
-	uint32_t		value;
-
-	NXGE_DEBUG_MSG((nxgep, RX2_CTL, "==> nxge_tx_err_evnts"));
-	handle = NXGE_DEV_NPI_HANDLE(nxgep);
-	channel = ldvp->channel;
-
-	tx_rings = nxgep->tx_rings->rings;
-	tx_ring_p = tx_rings[index];
-	tdc_stats = tx_ring_p->tdc_stats;
-	if ((cs.bits.ldw.pkt_size_err) || (cs.bits.ldw.pref_buf_par_err) ||
-		(cs.bits.ldw.nack_pref) || (cs.bits.ldw.nack_pkt_rd) ||
-		(cs.bits.ldw.conf_part_err) || (cs.bits.ldw.pkt_prt_err)) {
-		if ((rs = npi_txdma_ring_error_get(handle, channel,
-					&tdc_stats->errlog)) != NPI_SUCCESS)
-			return (NXGE_ERROR | rs);
-	}
-
-	if (cs.bits.ldw.mbox_err) {
-		tdc_stats->mbox_err++;
-		NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel,
-					NXGE_FM_EREPORT_TDMC_MBOX_ERR);
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"==> nxge_tx_err_evnts(channel %d): "
-			"fatal error: mailbox", channel));
-		txchan_fatal = B_TRUE;
-	}
-	if (cs.bits.ldw.pkt_size_err) {
-		tdc_stats->pkt_size_err++;
-		NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel,
-					NXGE_FM_EREPORT_TDMC_PKT_SIZE_ERR);
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"==> nxge_tx_err_evnts(channel %d): "
-			"fatal error: pkt_size_err", channel));
-		txchan_fatal = B_TRUE;
-	}
-	if (cs.bits.ldw.tx_ring_oflow) {
-		tdc_stats->tx_ring_oflow++;
-		NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel,
-					NXGE_FM_EREPORT_TDMC_TX_RING_OFLOW);
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"==> nxge_tx_err_evnts(channel %d): "
-			"fatal error: tx_ring_oflow", channel));
-		txchan_fatal = B_TRUE;
-	}
-	if (cs.bits.ldw.pref_buf_par_err) {
-		tdc_stats->pre_buf_par_err++;
-		NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel,
-					NXGE_FM_EREPORT_TDMC_PREF_BUF_PAR_ERR);
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"==> nxge_tx_err_evnts(channel %d): "
-			"fatal error: pre_buf_par_err", channel));
-		/* Clear error injection source for parity error */
-		(void) npi_txdma_inj_par_error_get(handle, &value);
-		par_err.value = value;
-		par_err.bits.ldw.inject_parity_error &= ~(1 << channel);
-		(void) npi_txdma_inj_par_error_set(handle, par_err.value);
-		txchan_fatal = B_TRUE;
-	}
-	if (cs.bits.ldw.nack_pref) {
-		tdc_stats->nack_pref++;
-		NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel,
-					NXGE_FM_EREPORT_TDMC_NACK_PREF);
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"==> nxge_tx_err_evnts(channel %d): "
-			"fatal error: nack_pref", channel));
-		txchan_fatal = B_TRUE;
-	}
-	if (cs.bits.ldw.nack_pkt_rd) {
-		tdc_stats->nack_pkt_rd++;
-		NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel,
-					NXGE_FM_EREPORT_TDMC_NACK_PKT_RD);
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"==> nxge_tx_err_evnts(channel %d): "
-			"fatal error: nack_pkt_rd", channel));
-		txchan_fatal = B_TRUE;
-	}
-	if (cs.bits.ldw.conf_part_err) {
-		tdc_stats->conf_part_err++;
-		NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel,
-					NXGE_FM_EREPORT_TDMC_CONF_PART_ERR);
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"==> nxge_tx_err_evnts(channel %d): "
-			"fatal error: config_partition_err", channel));
-		txchan_fatal = B_TRUE;
-	}
-	if (cs.bits.ldw.pkt_prt_err) {
-		tdc_stats->pkt_part_err++;
-		NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel,
-					NXGE_FM_EREPORT_TDMC_PKT_PRT_ERR);
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"==> nxge_tx_err_evnts(channel %d): "
-			"fatal error: pkt_prt_err", channel));
-		txchan_fatal = B_TRUE;
-	}
-
-	/* Clear error injection source in case this is an injected error */
-	TXDMA_REG_WRITE64(nxgep->npi_handle, TDMC_INTR_DBG_REG, channel, 0);
-
-	if (txchan_fatal) {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			" nxge_tx_err_evnts: "
-			" fatal error on channel %d cs 0x%llx\n",
-			channel, cs.value));
-		status = nxge_txdma_fatal_err_recover(nxgep, channel,
-								tx_ring_p);
-		if (status == NXGE_OK) {
-			FM_SERVICE_RESTORED(nxgep);
-		}
-	}
-
-	NXGE_DEBUG_MSG((nxgep, RX2_CTL, "<== nxge_tx_err_evnts"));
-
-	return (status);
-}
-
-static nxge_status_t
-nxge_txdma_fatal_err_recover(p_nxge_t nxgep, uint16_t channel,
-						p_tx_ring_t tx_ring_p)
-{
-	npi_handle_t	handle;
-	npi_status_t	rs = NPI_SUCCESS;
-	p_tx_mbox_t	tx_mbox_p;
-	nxge_status_t	status = NXGE_OK;
-
-	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fatal_err_recover"));
-	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"Recovering from TxDMAChannel#%d error...", channel));
-
-	/*
-	 * Stop the dma channel waits for the stop done.
-	 * If the stop done bit is not set, then create
-	 * an error.
-	 */
-
-	handle = NXGE_DEV_NPI_HANDLE(nxgep);
-	NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel stop..."));
-	MUTEX_ENTER(&tx_ring_p->lock);
-	rs = npi_txdma_channel_control(handle, TXDMA_STOP, channel);
-	if (rs != NPI_SUCCESS) {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"==> nxge_txdma_fatal_err_recover (channel %d): "
-			"stop failed ", channel));
-		goto fail;
-	}
-
-	NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel reclaim..."));
-	(void) nxge_txdma_reclaim(nxgep, tx_ring_p, 0);
-
-	/*
-	 * Reset TXDMA channel
-	 */
-	NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel reset..."));
-	if ((rs = npi_txdma_channel_control(handle, TXDMA_RESET, channel)) !=
-						NPI_SUCCESS) {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"==> nxge_txdma_fatal_err_recover (channel %d)"
-			" reset channel failed 0x%x", channel, rs));
-		goto fail;
-	}
-
-	/*
-	 * Reset the tail (kick) register to 0.
-	 * (Hardware will not reset it. Tx overflow fatal
-	 * error if tail is not set to 0 after reset!
-	 */
-	TXDMA_REG_WRITE64(handle, TX_RING_KICK_REG, channel, 0);
-
-	/* Restart TXDMA channel */
-
-	/*
-	 * Initialize the TXDMA channel specific FZC control
-	 * configurations. These FZC registers are pertaining
-	 * to each TX channel (i.e. logical pages).
-	 */
-	tx_mbox_p = nxge_txdma_get_mbox(nxgep, channel);
-
-	NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel restart..."));
-	status = nxge_init_fzc_txdma_channel(nxgep, channel,
-						tx_ring_p, tx_mbox_p);
-	if (status != NXGE_OK)
-		goto fail;
-
-	/*
-	 * Initialize the event masks.
-	 */
-	tx_ring_p->tx_evmask.value = 0;
-	status = nxge_init_txdma_channel_event_mask(nxgep, channel,
-							&tx_ring_p->tx_evmask);
-	if (status != NXGE_OK)
-		goto fail;
-
-	tx_ring_p->wr_index_wrap = B_FALSE;
-	tx_ring_p->wr_index = 0;
-	tx_ring_p->rd_index = 0;
-
-	/*
-	 * Load TXDMA descriptors, buffers, mailbox,
-	 * initialise the DMA channels and
-	 * enable each DMA channel.
-	 */
-	NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel enable..."));
-	status = nxge_enable_txdma_channel(nxgep, channel,
-						tx_ring_p, tx_mbox_p);
-	MUTEX_EXIT(&tx_ring_p->lock);
-	if (status != NXGE_OK)
-		goto fail;
-
-	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"Recovery Successful, TxDMAChannel#%d Restored",
-			channel));
-	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fatal_err_recover"));
-
-	return (NXGE_OK);
-
-fail:
-	MUTEX_EXIT(&tx_ring_p->lock);
-	NXGE_DEBUG_MSG((nxgep, TX_CTL,
-		"nxge_txdma_fatal_err_recover (channel %d): "
-		"failed to recover this txdma channel", channel));
-	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed"));
-
-	return (status);
-}
-
-nxge_status_t
-nxge_tx_port_fatal_err_recover(p_nxge_t nxgep)
-{
-	npi_handle_t	handle;
-	npi_status_t	rs = NPI_SUCCESS;
-	nxge_status_t	status = NXGE_OK;
-	p_tx_ring_t 	*tx_desc_rings;
-	p_tx_rings_t	tx_rings;
-	p_tx_ring_t	tx_ring_p;
-	p_tx_mbox_t	tx_mbox_p;
-	int		i, ndmas;
-	uint16_t	channel;
-
-	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_tx_port_fatal_err_recover"));
-	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"Recovering from TxPort error..."));
-
-	/*
-	 * Stop the dma channel waits for the stop done.
-	 * If the stop done bit is not set, then create
-	 * an error.
-	 */
-
-	handle = NXGE_DEV_NPI_HANDLE(nxgep);
-	NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxPort stop all DMA channels..."));
-
-	tx_rings = nxgep->tx_rings;
-	tx_desc_rings = tx_rings->rings;
-	ndmas = tx_rings->ndmas;
-
-	for (i = 0; i < ndmas; i++) {
-		if (tx_desc_rings[i] == NULL) {
-			continue;
-		}
-		tx_ring_p = tx_rings->rings[i];
-		MUTEX_ENTER(&tx_ring_p->lock);
-	}
-
-	for (i = 0; i < ndmas; i++) {
-		if (tx_desc_rings[i] == NULL) {
-			continue;
-		}
-		channel = tx_desc_rings[i]->tdc;
-		tx_ring_p = tx_rings->rings[i];
-		rs = npi_txdma_channel_control(handle, TXDMA_STOP, channel);
-		if (rs != NPI_SUCCESS) {
-			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"==> nxge_txdma_fatal_err_recover (channel %d): "
-			"stop failed ", channel));
-			goto fail;
-		}
-	}
-
-	NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxPort reclaim all DMA channels..."));
-
-	for (i = 0; i < ndmas; i++) {
-		if (tx_desc_rings[i] == NULL) {
-			continue;
-		}
-		tx_ring_p = tx_rings->rings[i];
-		(void) nxge_txdma_reclaim(nxgep, tx_ring_p, 0);
-	}
-
-	/*
-	 * Reset TXDMA channel
-	 */
-	NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxPort reset all DMA channels..."));
-
-	for (i = 0; i < ndmas; i++) {
-		if (tx_desc_rings[i] == NULL) {
-			continue;
-		}
-		channel = tx_desc_rings[i]->tdc;
-		tx_ring_p = tx_rings->rings[i];
-		if ((rs = npi_txdma_channel_control(handle, TXDMA_RESET,
-				channel)) != NPI_SUCCESS) {
-			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-				"==> nxge_txdma_fatal_err_recover (channel %d)"
-				" reset channel failed 0x%x", channel, rs));
-			goto fail;
-		}
-
-		/*
-		 * Reset the tail (kick) register to 0.
-		 * (Hardware will not reset it. Tx overflow fatal
-		 * error if tail is not set to 0 after reset!
-		 */
-
-		TXDMA_REG_WRITE64(handle, TX_RING_KICK_REG, channel, 0);
-
-	}
-
-	/*
-	 * Initialize the TXDMA channel specific FZC control
-	 * configurations. These FZC registers are pertaining
-	 * to each TX channel (i.e. logical pages).
-	 */
-
-	/* Restart TXDMA channels */
-
-	NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxPort re-start all DMA channels..."));
-
-	for (i = 0; i < ndmas; i++) {
-		if (tx_desc_rings[i] == NULL) {
-			continue;
-		}
-		channel = tx_desc_rings[i]->tdc;
-		tx_ring_p = tx_rings->rings[i];
-		tx_mbox_p = nxge_txdma_get_mbox(nxgep, channel);
-		status = nxge_init_fzc_txdma_channel(nxgep, channel,
-						tx_ring_p, tx_mbox_p);
-		tx_ring_p->tx_evmask.value = 0;
-		/*
-		 * Initialize the event masks.
-		 */
-		status = nxge_init_txdma_channel_event_mask(nxgep, channel,
-							&tx_ring_p->tx_evmask);
-
-		tx_ring_p->wr_index_wrap = B_FALSE;
-		tx_ring_p->wr_index = 0;
-		tx_ring_p->rd_index = 0;
-
-		if (status != NXGE_OK)
-			goto fail;
-		if (status != NXGE_OK)
-			goto fail;
-	}
-
-	/*
-	 * Load TXDMA descriptors, buffers, mailbox,
-	 * initialise the DMA channels and
-	 * enable each DMA channel.
-	 */
-	NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxPort re-enable all DMA channels..."));
-
-	for (i = 0; i < ndmas; i++) {
-		if (tx_desc_rings[i] == NULL) {
-			continue;
-		}
-		channel = tx_desc_rings[i]->tdc;
-		tx_ring_p = tx_rings->rings[i];
-		tx_mbox_p = nxge_txdma_get_mbox(nxgep, channel);
-		status = nxge_enable_txdma_channel(nxgep, channel,
-						tx_ring_p, tx_mbox_p);
-		if (status != NXGE_OK)
-			goto fail;
-	}
-
-	for (i = 0; i < ndmas; i++) {
-		if (tx_desc_rings[i] == NULL) {
-			continue;
-		}
-		tx_ring_p = tx_rings->rings[i];
-		MUTEX_EXIT(&tx_ring_p->lock);
-	}
-
-	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"Recovery Successful, TxPort Restored"));
-	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_tx_port_fatal_err_recover"));
-
-	return (NXGE_OK);
-
-fail:
-	for (i = 0; i < ndmas; i++) {
-		if (tx_desc_rings[i] == NULL) {
-			continue;
-		}
-		tx_ring_p = tx_rings->rings[i];
-		MUTEX_EXIT(&tx_ring_p->lock);
-	}
-
-	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed"));
-	NXGE_DEBUG_MSG((nxgep, TX_CTL,
-		"nxge_txdma_fatal_err_recover (channel %d): "
-		"failed to recover this txdma channel"));
-
-	return (status);
-}
-
-void
-nxge_txdma_inject_err(p_nxge_t nxgep, uint32_t err_id, uint8_t chan)
-{
-	tdmc_intr_dbg_t		tdi;
-	tdmc_inj_par_err_t	par_err;
-	uint32_t		value;
-	npi_handle_t		handle;
-
-	switch (err_id) {
-
-	case NXGE_FM_EREPORT_TDMC_PREF_BUF_PAR_ERR:
-		handle = NXGE_DEV_NPI_HANDLE(nxgep);
-		/* Clear error injection source for parity error */
-		(void) npi_txdma_inj_par_error_get(handle, &value);
-		par_err.value = value;
-		par_err.bits.ldw.inject_parity_error &= ~(1 << chan);
-		(void) npi_txdma_inj_par_error_set(handle, par_err.value);
-
-		par_err.bits.ldw.inject_parity_error = (1 << chan);
-		(void) npi_txdma_inj_par_error_get(handle, &value);
-		par_err.value = value;
-		par_err.bits.ldw.inject_parity_error |= (1 << chan);
-		cmn_err(CE_NOTE, "!Write 0x%llx to TDMC_INJ_PAR_ERR_REG\n",
-				(unsigned long long)par_err.value);
-		(void) npi_txdma_inj_par_error_set(handle, par_err.value);
-		break;
-
-	case NXGE_FM_EREPORT_TDMC_MBOX_ERR:
-	case NXGE_FM_EREPORT_TDMC_NACK_PREF:
-	case NXGE_FM_EREPORT_TDMC_NACK_PKT_RD:
-	case NXGE_FM_EREPORT_TDMC_PKT_SIZE_ERR:
-	case NXGE_FM_EREPORT_TDMC_TX_RING_OFLOW:
-	case NXGE_FM_EREPORT_TDMC_CONF_PART_ERR:
-	case NXGE_FM_EREPORT_TDMC_PKT_PRT_ERR:
-		TXDMA_REG_READ64(nxgep->npi_handle, TDMC_INTR_DBG_REG,
-			chan, &tdi.value);
-		if (err_id == NXGE_FM_EREPORT_TDMC_PREF_BUF_PAR_ERR)
-			tdi.bits.ldw.pref_buf_par_err = 1;
-		else if (err_id == NXGE_FM_EREPORT_TDMC_MBOX_ERR)
-			tdi.bits.ldw.mbox_err = 1;
-		else if (err_id == NXGE_FM_EREPORT_TDMC_NACK_PREF)
-			tdi.bits.ldw.nack_pref = 1;
-		else if (err_id == NXGE_FM_EREPORT_TDMC_NACK_PKT_RD)
-			tdi.bits.ldw.nack_pkt_rd = 1;
-		else if (err_id == NXGE_FM_EREPORT_TDMC_PKT_SIZE_ERR)
-			tdi.bits.ldw.pkt_size_err = 1;
-		else if (err_id == NXGE_FM_EREPORT_TDMC_TX_RING_OFLOW)
-			tdi.bits.ldw.tx_ring_oflow = 1;
-		else if (err_id == NXGE_FM_EREPORT_TDMC_CONF_PART_ERR)
-			tdi.bits.ldw.conf_part_err = 1;
-		else if (err_id == NXGE_FM_EREPORT_TDMC_PKT_PRT_ERR)
-			tdi.bits.ldw.pkt_part_err = 1;
-		cmn_err(CE_NOTE, "!Write 0x%lx to TDMC_INTR_DBG_REG\n",
-				tdi.value);
-		TXDMA_REG_WRITE64(nxgep->npi_handle, TDMC_INTR_DBG_REG,
-			chan, tdi.value);
-
-		break;
-	}
-}
--- a/usr/src/uts/sun4v/io/nxge/nxge_virtual.c	Mon Mar 19 18:02:35 2007 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,3650 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License (the "License").
- * You may not use this file except in compliance with the License.
- *
- * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
- * or http://www.opensolaris.org/os/licensing.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information: Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- */
-/*
- * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
- * Use is subject to license terms.
- */
-
-#pragma ident	"%Z%%M%	%I%	%E% SMI"
-
-#include <sys/nxge/nxge_impl.h>
-#include <sys/nxge/nxge_mac.h>
-
-static void nxge_get_niu_property(dev_info_t *, niu_type_t *);
-static nxge_status_t nxge_get_mac_addr_properties(p_nxge_t);
-static nxge_status_t nxge_use_cfg_n2niu_properties(p_nxge_t);
-static void nxge_use_cfg_neptune_properties(p_nxge_t);
-static void nxge_use_cfg_dma_config(p_nxge_t);
-static void nxge_use_cfg_vlan_class_config(p_nxge_t);
-static void nxge_use_cfg_mac_class_config(p_nxge_t);
-static void nxge_use_cfg_class_config(p_nxge_t);
-static void nxge_use_cfg_link_cfg(p_nxge_t);
-static void nxge_set_hw_dma_config(p_nxge_t);
-static void nxge_set_hw_vlan_class_config(p_nxge_t);
-static void nxge_set_hw_mac_class_config(p_nxge_t);
-static void nxge_set_hw_class_config(p_nxge_t);
-static nxge_status_t nxge_use_default_dma_config_n2(p_nxge_t);
-static void nxge_ldgv_setup(p_nxge_ldg_t *, p_nxge_ldv_t *, uint8_t,
-	uint8_t, int *);
-static void nxge_init_mmac(p_nxge_t);
-
-uint32_t nxge_use_hw_property = 1;
-uint32_t nxge_groups_per_port = 2;
-
-extern uint32_t nxge_use_partition;
-extern uint32_t nxge_dma_obp_props_only;
-
-extern uint16_t nxge_rcr_timeout;
-extern uint16_t nxge_rcr_threshold;
-
-extern uint_t nxge_rx_intr(void *, void *);
-extern uint_t nxge_tx_intr(void *, void *);
-extern uint_t nxge_mif_intr(void *, void *);
-extern uint_t nxge_mac_intr(void *, void *);
-extern uint_t nxge_syserr_intr(void *, void *);
-extern void *nxge_list;
-
-#define	NXGE_SHARED_REG_SW_SIM
-
-#ifdef NXGE_SHARED_REG_SW_SIM
-uint64_t global_dev_ctrl = 0;
-#endif
-
-#define	MAX_SIBLINGS	NXGE_MAX_PORTS
-
-extern uint32_t nxge_rbr_size;
-extern uint32_t nxge_rcr_size;
-extern uint32_t nxge_tx_ring_size;
-extern uint32_t nxge_rbr_spare_size;
-
-extern npi_status_t npi_mac_altaddr_disable(npi_handle_t, uint8_t, uint8_t);
-
-static uint8_t p2_tx_fair[2] = {12, 12};
-static uint8_t p2_tx_equal[2] = {12, 12};
-static uint8_t p4_tx_fair[4] = {6, 6, 6, 6};
-static uint8_t p4_tx_equal[4] = {6, 6, 6, 6};
-static uint8_t p2_rx_fair[2] = {8, 8};
-static uint8_t p2_rx_equal[2] = {8, 8};
-
-static uint8_t p4_rx_fair[4] = {4, 4, 4, 4};
-static uint8_t p4_rx_equal[4] = {4, 4, 4, 4};
-
-static uint8_t p2_rdcgrp_fair[2] = {4, 4};
-static uint8_t p2_rdcgrp_equal[2] = {4, 4};
-static uint8_t p4_rdcgrp_fair[4] = {2, 2, 1, 1};
-static uint8_t p4_rdcgrp_equal[4] = {2, 2, 2, 2};
-static uint8_t p2_rdcgrp_cls[2] = {1, 1};
-static uint8_t p4_rdcgrp_cls[4] = {1, 1, 1, 1};
-
-typedef enum {
-	DEFAULT = 0,
-	EQUAL,
-	FAIR,
-	CUSTOM,
-	CLASSIFY,
-	L2_CLASSIFY,
-	L3_DISTRIBUTE,
-	L3_CLASSIFY,
-	L3_TCAM,
-	CONFIG_TOKEN_NONE
-} config_token_t;
-
-static char *token_names[] = {
-	"default",
-	"equal",
-	"fair",
-	"custom",
-	"classify",
-	"l2_classify",
-	"l3_distribute",
-	"l3_classify",
-	"l3_tcam",
-	"none",
-};
-
-void nxge_virint_regs_dump(p_nxge_t nxgep);
-
-void
-nxge_virint_regs_dump(p_nxge_t nxgep)
-{
-	npi_handle_t handle;
-
-	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_virint_regs_dump"));
-	handle = NXGE_DEV_NPI_HANDLE(nxgep);
-	(void) npi_vir_dump_pio_fzc_regs_one(handle);
-	(void) npi_vir_dump_ldgnum(handle);
-	(void) npi_vir_dump_ldsv(handle);
-	(void) npi_vir_dump_imask0(handle);
-	(void) npi_vir_dump_sid(handle);
-	(void) npi_mac_dump_regs(handle, nxgep->function_num);
-	(void) npi_ipp_dump_regs(handle, nxgep->function_num);
-	(void) npi_fflp_dump_regs(handle);
-	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_virint_regs_dump"));
-}
-
-/*
- * For now: we hard coded the DMA configurations.
- *	    and assume for one partition only.
- *
- *       OBP. Then OBP will pass this partition's
- *	 Neptune configurations to fcode to create
- *	 properties for them.
- *
- *	Since Neptune(PCI-E) and NIU (Niagara-2) has
- *	different bus interfaces, the driver needs
- *	to know which bus it is connected to.
- *  	Ravinder suggested: create a device property.
- *	In partitioning environment, we cannot
- *	use .conf file (need to check). If conf changes,
- *	need to reboot the system.
- *	The following function assumes that we will
- *	retrieve its properties from a virtualized nexus driver.
- */
-
-nxge_status_t
-nxge_cntlops(dev_info_t *dip, nxge_ctl_enum_t ctlop, void *arg, void *result)
-{
-	nxge_status_t status = NXGE_OK;
-	int instance;
-	p_nxge_t nxgep;
-
-#ifndef NXGE_SHARED_REG_SW_SIM
-	npi_handle_t handle;
-	uint16_t sr16, cr16;
-#endif
-	instance = ddi_get_instance(dip);
-	NXGE_DEBUG_MSG((NULL, VIR_CTL, "Instance %d ", instance));
-
-	if (nxge_list == NULL) {
-		NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL,
-				"nxge_cntlops: nxge_list null"));
-		return (NXGE_ERROR);
-	}
-	nxgep = (p_nxge_t)ddi_get_soft_state(nxge_list, instance);
-	if (nxgep == NULL) {
-		NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL,
-				"nxge_cntlops: nxgep null"));
-		return (NXGE_ERROR);
-	}
-#ifndef NXGE_SHARED_REG_SW_SIM
-	handle = nxgep->npi_reg_handle;
-#endif
-	switch (ctlop) {
-	case NXGE_CTLOPS_NIUTYPE:
-		nxge_get_niu_property(dip, (niu_type_t *)result);
-		return (status);
-
-	case NXGE_CTLOPS_GET_SHARED_REG:
-#ifdef NXGE_SHARED_REG_SW_SIM
-		*(uint64_t *)result = global_dev_ctrl;
-		return (0);
-#else
-		status = npi_dev_func_sr_sr_get(handle, &sr16);
-		*(uint16_t *)result = sr16;
-		NXGE_DEBUG_MSG((NULL, VIR_CTL,
-			"nxge_cntlops: NXGE_CTLOPS_GET_SHARED_REG"));
-		return (0);
-#endif
-
-	case NXGE_CTLOPS_SET_SHARED_REG_LOCK:
-#ifdef NXGE_SHARED_REG_SW_SIM
-		global_dev_ctrl = *(uint64_t *)arg;
-		return (0);
-#else
-		status = NPI_FAILURE;
-		while (status != NPI_SUCCESS)
-			status = npi_dev_func_sr_lock_enter(handle);
-
-		sr16 = *(uint16_t *)arg;
-		status = npi_dev_func_sr_sr_set_only(handle, &sr16);
-		status = npi_dev_func_sr_lock_free(handle);
-		NXGE_DEBUG_MSG((NULL, VIR_CTL,
-			"nxge_cntlops: NXGE_CTLOPS_SET_SHARED_REG"));
-		return (0);
-#endif
-
-	case NXGE_CTLOPS_UPDATE_SHARED_REG:
-#ifdef NXGE_SHARED_REG_SW_SIM
-		global_dev_ctrl |= *(uint64_t *)arg;
-		return (0);
-#else
-		status = NPI_FAILURE;
-		while (status != NPI_SUCCESS)
-			status = npi_dev_func_sr_lock_enter(handle);
-		status = npi_dev_func_sr_sr_get(handle, &sr16);
-		sr16 |= *(uint16_t *)arg;
-		status = npi_dev_func_sr_sr_set_only(handle, &sr16);
-		status = npi_dev_func_sr_lock_free(handle);
-		NXGE_DEBUG_MSG((NULL, VIR_CTL,
-			"nxge_cntlops: NXGE_CTLOPS_SET_SHARED_REG"));
-		return (0);
-#endif
-
-	case NXGE_CTLOPS_CLEAR_BIT_SHARED_REG_UL:
-#ifdef NXGE_SHARED_REG_SW_SIM
-		global_dev_ctrl |= *(uint64_t *)arg;
-		return (0);
-#else
-		status = npi_dev_func_sr_sr_get(handle, &sr16);
-		cr16 = *(uint16_t *)arg;
-		sr16 &= ~cr16;
-		status = npi_dev_func_sr_sr_set_only(handle, &sr16);
-		NXGE_DEBUG_MSG((NULL, VIR_CTL,
-			"nxge_cntlops: NXGE_CTLOPS_SET_SHARED_REG"));
-		return (0);
-#endif
-
-	case NXGE_CTLOPS_CLEAR_BIT_SHARED_REG:
-#ifdef NXGE_SHARED_REG_SW_SIM
-		global_dev_ctrl |= *(uint64_t *)arg;
-		return (0);
-#else
-		status = NPI_FAILURE;
-		while (status != NPI_SUCCESS)
-			status = npi_dev_func_sr_lock_enter(handle);
-		status = npi_dev_func_sr_sr_get(handle, &sr16);
-		cr16 = *(uint16_t *)arg;
-		sr16 &= ~cr16;
-		status = npi_dev_func_sr_sr_set_only(handle, &sr16);
-		status = npi_dev_func_sr_lock_free(handle);
-		NXGE_DEBUG_MSG((NULL, VIR_CTL,
-			"nxge_cntlops: NXGE_CTLOPS_SET_SHARED_REG"));
-		return (0);
-#endif
-
-	case NXGE_CTLOPS_GET_LOCK_BLOCK:
-#ifdef NXGE_SHARED_REG_SW_SIM
-		global_dev_ctrl |= *(uint64_t *)arg;
-		return (0);
-#else
-		status = NPI_FAILURE;
-		while (status != NPI_SUCCESS)
-			status = npi_dev_func_sr_lock_enter(handle);
-		NXGE_DEBUG_MSG((NULL, VIR_CTL,
-			"nxge_cntlops: NXGE_CTLOPS_GET_LOCK_BLOCK"));
-		return (0);
-#endif
-	case NXGE_CTLOPS_GET_LOCK_TRY:
-#ifdef NXGE_SHARED_REG_SW_SIM
-		global_dev_ctrl |= *(uint64_t *)arg;
-		return (0);
-#else
-		status = npi_dev_func_sr_lock_enter(handle);
-		NXGE_DEBUG_MSG((NULL, VIR_CTL,
-			"nxge_cntlops: NXGE_CTLOPS_GET_LOCK_TRY"));
-		if (status == NPI_SUCCESS)
-			return (NXGE_OK);
-		else
-			return (NXGE_ERROR);
-#endif
-	case NXGE_CTLOPS_FREE_LOCK:
-#ifdef NXGE_SHARED_REG_SW_SIM
-		global_dev_ctrl |= *(uint64_t *)arg;
-		return (0);
-#else
-		status = npi_dev_func_sr_lock_free(handle);
-		NXGE_DEBUG_MSG((NULL, VIR_CTL,
-			"nxge_cntlops: NXGE_CTLOPS_GET_LOCK_FREE"));
-		if (status == NPI_SUCCESS)
-			return (NXGE_OK);
-		else
-			return (NXGE_ERROR);
-#endif
-
-	default:
-		status = NXGE_ERROR;
-	}
-
-	return (status);
-}
-
-void
-nxge_common_lock_get(p_nxge_t nxgep)
-{
-	uint32_t status = NPI_FAILURE;
-	npi_handle_t handle;
-
-#if	defined(NXGE_SHARE_REG_SW_SIM)
-	return;
-#endif
-	handle = nxgep->npi_reg_handle;
-	while (status != NPI_SUCCESS)
-		status = npi_dev_func_sr_lock_enter(handle);
-}
-
-void
-nxge_common_lock_free(p_nxge_t nxgep)
-{
-	npi_handle_t handle;
-
-#if	defined(NXGE_SHARE_REG_SW_SIM)
-	return;
-#endif
-	handle = nxgep->npi_reg_handle;
-	(void) npi_dev_func_sr_lock_free(handle);
-}
-
-static void
-nxge_get_niu_property(dev_info_t *dip, niu_type_t *niu_type)
-{
-	uchar_t *prop_val;
-	uint_t prop_len;
-
-	*niu_type = NEPTUNE;
-	if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, dip, 0,
-			"niu-type", (uchar_t **)&prop_val,
-			&prop_len) == DDI_PROP_SUCCESS) {
-		if (strncmp("niu", (caddr_t)prop_val, (size_t)prop_len) == 0) {
-			*niu_type = N2_NIU;
-		}
-		ddi_prop_free(prop_val);
-	}
-}
-
-static config_token_t
-nxge_get_config_token(char *prop)
-{
-	config_token_t token = DEFAULT;
-
-	while (token < CONFIG_TOKEN_NONE) {
-		if (strncmp(prop, token_names[token], 4) == 0)
-			break;
-		token++;
-	}
-	return (token);
-}
-
-/* per port */
-
-static nxge_status_t
-nxge_update_rxdma_grp_properties(p_nxge_t nxgep, config_token_t token,
-	dev_info_t *s_dip[])
-{
-	nxge_status_t status = NXGE_OK;
-	int ddi_status;
-	int num_ports = nxgep->nports;
-	int port, bits, j;
-	uint8_t start_grp = 0, num_grps = 0;
-	p_nxge_param_t param_arr;
-	uint32_t grp_bitmap[MAX_SIBLINGS];
-	int custom_start_grp[MAX_SIBLINGS];
-	int custom_num_grp[MAX_SIBLINGS];
-	uint8_t bad_config = B_FALSE;
-	char *start_prop, *num_prop, *cfg_prop;
-
-	start_grp = 0;
-	param_arr = nxgep->param_arr;
-	start_prop = param_arr[param_rdc_grps_start].fcode_name;
-	num_prop = param_arr[param_rx_rdc_grps].fcode_name;
-
-	switch (token) {
-	case FAIR:
-		cfg_prop = "fair";
-		for (port = 0; port < num_ports; port++) {
-			custom_num_grp[port] =
-				(num_ports == 4) ?
-				p4_rdcgrp_fair[port] :
-				p2_rdcgrp_fair[port];
-			custom_start_grp[port] = start_grp;
-			start_grp += custom_num_grp[port];
-		}
-		break;
-
-	case EQUAL:
-		cfg_prop = "equal";
-		for (port = 0; port < num_ports; port++) {
-			custom_num_grp[port] =
-				(num_ports == 4) ?
-				p4_rdcgrp_equal[port] :
-				p2_rdcgrp_equal[port];
-			custom_start_grp[port] = start_grp;
-			start_grp += custom_num_grp[port];
-		}
-		break;
-
-
-	case CLASSIFY:
-		cfg_prop = "classify";
-		for (port = 0; port < num_ports; port++) {
-			custom_num_grp[port] = (num_ports == 4) ?
-				p4_rdcgrp_cls[port] : p2_rdcgrp_cls[port];
-			custom_start_grp[port] = start_grp;
-			start_grp += custom_num_grp[port];
-		}
-		break;
-
-	case CUSTOM:
-		cfg_prop = "custom";
-		/* See if it is good config */
-		num_grps = 0;
-		for (port = 0; port < num_ports; port++) {
-			custom_start_grp[port] =
-				ddi_prop_get_int(DDI_DEV_T_NONE, s_dip[port],
-				DDI_PROP_DONTPASS, start_prop, -1);
-			if ((custom_start_grp[port] == -1) ||
-				(custom_start_grp[port] >=
-					NXGE_MAX_RDC_GRPS)) {
-				bad_config = B_TRUE;
-				break;
-			}
-			custom_num_grp[port] = ddi_prop_get_int(
-				DDI_DEV_T_NONE,
-				s_dip[port],
-				DDI_PROP_DONTPASS,
-				num_prop, -1);
-
-			if ((custom_num_grp[port] == -1) ||
-				(custom_num_grp[port] >
-					NXGE_MAX_RDC_GRPS) ||
-				((custom_num_grp[port] +
-						custom_start_grp[port]) >=
-					NXGE_MAX_RDC_GRPS)) {
-				bad_config = B_TRUE;
-				break;
-			}
-			num_grps += custom_num_grp[port];
-			if (num_grps > NXGE_MAX_RDC_GRPS) {
-				bad_config = B_TRUE;
-				break;
-			}
-			grp_bitmap[port] = 0;
-			for (bits = 0;
-				bits < custom_num_grp[port];
-				bits++) {
-				grp_bitmap[port] |=
-					(1 << (bits + custom_start_grp[port]));
-			}
-
-		}
-
-		if (bad_config == B_FALSE) {
-			/* check for overlap */
-			for (port = 0; port < num_ports - 1; port++) {
-				for (j = port + 1; j < num_ports; j++) {
-					if (grp_bitmap[port] &
-						grp_bitmap[j]) {
-						bad_config = B_TRUE;
-						break;
-					}
-				}
-				if (bad_config == B_TRUE)
-					break;
-			}
-		}
-		if (bad_config == B_TRUE) {
-			/* use default config */
-			for (port = 0; port < num_ports; port++) {
-				custom_num_grp[port] =
-					(num_ports == 4) ?
-					p4_rx_fair[port] : p2_rx_fair[port];
-				custom_start_grp[port] = start_grp;
-				start_grp += custom_num_grp[port];
-			}
-		}
-		break;
-
-	default:
-		/* use default config */
-		cfg_prop = "fair";
-		for (port = 0; port < num_ports; port++) {
-			custom_num_grp[port] = (num_ports == 4) ?
-				p4_rx_fair[port] : p2_rx_fair[port];
-			custom_start_grp[port] = start_grp;
-			start_grp += custom_num_grp[port];
-		}
-		break;
-	}
-
-	/* Now Update the rx properties */
-	for (port = 0; port < num_ports; port++) {
-		ddi_status = ddi_prop_update_string(DDI_DEV_T_NONE, s_dip[port],
-			"rxdma-grp-cfg", cfg_prop);
-		if (ddi_status != DDI_PROP_SUCCESS) {
-			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-					" property %s not updating",
-					cfg_prop));
-			status |= NXGE_DDI_FAILED;
-		}
-		ddi_status = ddi_prop_update_int(DDI_DEV_T_NONE, s_dip[port],
-			num_prop, custom_num_grp[port]);
-
-		if (ddi_status != DDI_PROP_SUCCESS) {
-			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-					" property %s not updating",
-					num_prop));
-			status |= NXGE_DDI_FAILED;
-		}
-		ddi_status = ddi_prop_update_int(DDI_DEV_T_NONE, s_dip[port],
-			start_prop, custom_start_grp[port]);
-
-		if (ddi_status != DDI_PROP_SUCCESS) {
-			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-					" property %s not updating",
-					start_prop));
-			status |= NXGE_DDI_FAILED;
-		}
-	}
-	if (status & NXGE_DDI_FAILED)
-		status |= NXGE_ERROR;
-
-	return (status);
-}
-
-static nxge_status_t
-nxge_update_rxdma_properties(p_nxge_t nxgep, config_token_t token,
-	dev_info_t *s_dip[])
-{
-	nxge_status_t status = NXGE_OK;
-	int ddi_status;
-	int num_ports = nxgep->nports;
-	int port, bits, j;
-	uint8_t start_rdc = 0, num_rdc = 0;
-	p_nxge_param_t param_arr;
-	uint32_t rdc_bitmap[MAX_SIBLINGS];
-	int custom_start_rdc[MAX_SIBLINGS];
-	int custom_num_rdc[MAX_SIBLINGS];
-	uint8_t bad_config = B_FALSE;
-	int *prop_val;
-	uint_t prop_len;
-	char *start_rdc_prop, *num_rdc_prop, *cfg_prop;
-
-	start_rdc = 0;
-	param_arr = nxgep->param_arr;
-	start_rdc_prop = param_arr[param_rxdma_channels_begin].fcode_name;
-	num_rdc_prop = param_arr[param_rxdma_channels].fcode_name;
-
-	switch (token) {
-	case FAIR:
-		cfg_prop = "fair";
-		for (port = 0; port < num_ports; port++) {
-			custom_num_rdc[port] = (num_ports == 4) ?
-				p4_rx_fair[port] : p2_rx_fair[port];
-			custom_start_rdc[port] = start_rdc;
-			start_rdc += custom_num_rdc[port];
-		}
-		break;
-
-	case EQUAL:
-		cfg_prop = "equal";
-		for (port = 0; port < num_ports; port++) {
-			custom_num_rdc[port] = (num_ports == 4) ?
-				p4_rx_equal[port] :
-				p2_rx_equal[port];
-			custom_start_rdc[port] = start_rdc;
-			start_rdc += custom_num_rdc[port];
-		}
-		break;
-
-	case CUSTOM:
-		cfg_prop = "custom";
-		/* See if it is good config */
-		num_rdc = 0;
-		for (port = 0; port < num_ports; port++) {
-			ddi_status = ddi_prop_lookup_int_array(
-				DDI_DEV_T_ANY,
-				s_dip[port], 0,
-				start_rdc_prop,
-				&prop_val,
-				&prop_len);
-			if (ddi_status == DDI_SUCCESS)
-				custom_start_rdc[port] = *prop_val;
-			else {
-				NXGE_DEBUG_MSG((nxgep, CFG_CTL,
-						" %s custom start port %d"
-						" read failed ",
-						" rxdma-cfg", port));
-				bad_config = B_TRUE;
-				status |= NXGE_DDI_FAILED;
-			}
-			if ((custom_start_rdc[port] == -1) ||
-				(custom_start_rdc[port] >=
-					NXGE_MAX_RDCS)) {
-				NXGE_DEBUG_MSG((nxgep, CFG_CTL,
-						" %s custom start %d"
-						" out of range %x ",
-						" rxdma-cfg",
-						port,
-						custom_start_rdc[port]));
-				bad_config = B_TRUE;
-				break;
-			}
-			ddi_status = ddi_prop_lookup_int_array(
-				DDI_DEV_T_ANY,
-				s_dip[port],
-				0,
-				num_rdc_prop,
-				&prop_val,
-				&prop_len);
-
-			if (ddi_status == DDI_SUCCESS)
-				custom_num_rdc[port] = *prop_val;
-			else {
-				NXGE_DEBUG_MSG((nxgep, CFG_CTL,
-					" %s custom num port %d"
-					" read failed ",
-					"rxdma-cfg", port));
-				bad_config = B_TRUE;
-				status |= NXGE_DDI_FAILED;
-			}
-
-			if ((custom_num_rdc[port] == -1) ||
-					(custom_num_rdc[port] >
-						NXGE_MAX_RDCS) ||
-					((custom_num_rdc[port] +
-						custom_start_rdc[port]) >
-					NXGE_MAX_RDCS)) {
-				NXGE_DEBUG_MSG((nxgep, CFG_CTL,
-					" %s custom num %d"
-					" out of range %x ",
-					" rxdma-cfg",
-					port, custom_num_rdc[port]));
-				bad_config = B_TRUE;
-				break;
-			}
-			num_rdc += custom_num_rdc[port];
-			if (num_rdc > NXGE_MAX_RDCS) {
-				bad_config = B_TRUE;
-				break;
-			}
-			rdc_bitmap[port] = 0;
-			for (bits = 0;
-				bits < custom_num_rdc[port]; bits++) {
-				rdc_bitmap[port] |=
-					(1 << (bits + custom_start_rdc[port]));
-			}
-		}
-
-		if (bad_config == B_FALSE) {
-			/* check for overlap */
-			for (port = 0; port < num_ports - 1; port++) {
-				for (j = port + 1; j < num_ports; j++) {
-					if (rdc_bitmap[port] &
-						rdc_bitmap[j]) {
-						NXGE_DEBUG_MSG((nxgep,
-							CFG_CTL,
-							" rxdma-cfg"
-							" property custom"
-							" bit overlap"
-							" %d %d ",
-							port, j));
-						bad_config = B_TRUE;
-						break;
-					}
-				}
-				if (bad_config == B_TRUE)
-					break;
-			}
-		}
-		if (bad_config == B_TRUE) {
-			/* use default config */
-			NXGE_DEBUG_MSG((nxgep, CFG_CTL,
-				" rxdma-cfg property:"
-				" bad custom config:"
-				" use default"));
-			for (port = 0; port < num_ports; port++) {
-				custom_num_rdc[port] =
-					(num_ports == 4) ?
-					p4_rx_fair[port] :
-					p2_rx_fair[port];
-				custom_start_rdc[port] = start_rdc;
-				start_rdc += custom_num_rdc[port];
-			}
-		}
-		break;
-
-	default:
-		/* use default config */
-		cfg_prop = "fair";
-		for (port = 0; port < num_ports; port++) {
-			custom_num_rdc[port] = (num_ports == 4) ?
-				p4_rx_fair[port] : p2_rx_fair[port];
-			custom_start_rdc[port] = start_rdc;
-			start_rdc += custom_num_rdc[port];
-		}
-		break;
-	}
-
-	/* Now Update the rx properties */
-	for (port = 0; port < num_ports; port++) {
-		NXGE_DEBUG_MSG((nxgep, CFG_CTL,
-			" update property rxdma-cfg with %s ", cfg_prop));
-		ddi_status = ddi_prop_update_string(DDI_DEV_T_NONE, s_dip[port],
-			"rxdma-cfg", cfg_prop);
-		if (ddi_status != DDI_PROP_SUCCESS) {
-			NXGE_DEBUG_MSG((nxgep, CFG_CTL,
-				" property rxdma-cfg is not updating to %s",
-				cfg_prop));
-			status |= NXGE_DDI_FAILED;
-		}
-		NXGE_DEBUG_MSG((nxgep, CFG_CTL, " update property %s with %d ",
-			num_rdc_prop, custom_num_rdc[port]));
-
-		ddi_status = ddi_prop_update_int(DDI_DEV_T_NONE, s_dip[port],
-			num_rdc_prop, custom_num_rdc[port]);
-
-		if (ddi_status != DDI_PROP_SUCCESS) {
-			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-				" property %s not updating with %d",
-				num_rdc_prop, custom_num_rdc[port]));
-			status |= NXGE_DDI_FAILED;
-		}
-		NXGE_DEBUG_MSG((nxgep, CFG_CTL, " update property %s with %d ",
-			start_rdc_prop, custom_start_rdc[port]));
-		ddi_status = ddi_prop_update_int(DDI_DEV_T_NONE, s_dip[port],
-			start_rdc_prop, custom_start_rdc[port]);
-
-		if (ddi_status != DDI_PROP_SUCCESS) {
-			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-				" property %s not updating with %d ",
-				start_rdc_prop, custom_start_rdc[port]));
-			status |= NXGE_DDI_FAILED;
-		}
-	}
-	if (status & NXGE_DDI_FAILED)
-		status |= NXGE_ERROR;
-	return (status);
-}
-
-static nxge_status_t
-nxge_update_txdma_properties(p_nxge_t nxgep, config_token_t token,
-	dev_info_t *s_dip[])
-{
-	nxge_status_t status = NXGE_OK;
-	int ddi_status = DDI_SUCCESS;
-	int num_ports = nxgep->nports;
-	int port, bits, j;
-	uint8_t start_tdc = 0, num_tdc = 0;
-	p_nxge_param_t param_arr;
-	uint32_t tdc_bitmap[MAX_SIBLINGS];
-	int custom_start_tdc[MAX_SIBLINGS];
-	int custom_num_tdc[MAX_SIBLINGS];
-	uint8_t bad_config = B_FALSE;
-	int *prop_val;
-	uint_t prop_len;
-	char *start_tdc_prop, *num_tdc_prop, *cfg_prop;
-
-	start_tdc = 0;
-	param_arr = nxgep->param_arr;
-	start_tdc_prop = param_arr[param_txdma_channels_begin].fcode_name;
-	num_tdc_prop = param_arr[param_txdma_channels].fcode_name;
-
-	switch (token) {
-	case FAIR:
-		cfg_prop = "fair";
-		for (port = 0; port < num_ports; port++) {
-			custom_num_tdc[port] = (num_ports == 4) ?
-				p4_tx_fair[port] : p2_tx_fair[port];
-			custom_start_tdc[port] = start_tdc;
-			start_tdc += custom_num_tdc[port];
-		}
-		break;
-
-	case EQUAL:
-		cfg_prop = "equal";
-		for (port = 0; port < num_ports; port++) {
-			custom_num_tdc[port] = (num_ports == 4) ?
-				p4_tx_equal[port] : p2_tx_equal[port];
-			custom_start_tdc[port] = start_tdc;
-			start_tdc += custom_num_tdc[port];
-		}
-		break;
-
-	case CUSTOM:
-		cfg_prop = "custom";
-		/* See if it is good config */
-		num_tdc = 0;
-		for (port = 0; port < num_ports; port++) {
-			ddi_status = ddi_prop_lookup_int_array(
-				DDI_DEV_T_ANY, s_dip[port], 0, start_tdc_prop,
-				&prop_val, &prop_len);
-			if (ddi_status == DDI_SUCCESS)
-				custom_start_tdc[port] = *prop_val;
-			else {
-				NXGE_DEBUG_MSG((nxgep, CFG_CTL,
-					" %s custom start port %d"
-					" read failed ", " txdma-cfg", port));
-				bad_config = B_TRUE;
-				status |= NXGE_DDI_FAILED;
-			}
-
-			if ((custom_start_tdc[port] == -1) ||
-					(custom_start_tdc[port] >=
-					NXGE_MAX_RDCS)) {
-				NXGE_DEBUG_MSG((nxgep, CFG_CTL,
-					" %s custom start %d"
-					" out of range %x ", " txdma-cfg",
-					port, custom_start_tdc[port]));
-				bad_config = B_TRUE;
-				break;
-			}
-
-			ddi_status = ddi_prop_lookup_int_array(
-				DDI_DEV_T_ANY, s_dip[port], 0, num_tdc_prop,
-				&prop_val, &prop_len);
-			if (ddi_status == DDI_SUCCESS)
-				custom_num_tdc[port] = *prop_val;
-			else {
-				NXGE_DEBUG_MSG((nxgep, CFG_CTL,
-					" %s custom num port %d"
-					" read failed ", " txdma-cfg", port));
-				bad_config = B_TRUE;
-				status |= NXGE_DDI_FAILED;
-			}
-
-			if ((custom_num_tdc[port] == -1) ||
-					(custom_num_tdc[port] >
-						NXGE_MAX_TDCS) ||
-					((custom_num_tdc[port] +
-						custom_start_tdc[port]) >
-					NXGE_MAX_TDCS)) {
-				NXGE_DEBUG_MSG((nxgep, CFG_CTL,
-					" %s custom num %d"
-					" out of range %x ", " rxdma-cfg",
-					port, custom_num_tdc[port]));
-				bad_config = B_TRUE;
-				break;
-			}
-			num_tdc += custom_num_tdc[port];
-			if (num_tdc > NXGE_MAX_TDCS) {
-				bad_config = B_TRUE;
-				break;
-			}
-			tdc_bitmap[port] = 0;
-			for (bits = 0;
-				bits < custom_num_tdc[port]; bits++) {
-				tdc_bitmap[port] |=
-					(1 <<
-					(bits + custom_start_tdc[port]));
-			}
-
-		}
-
-		if (bad_config == B_FALSE) {
-			/* check for overlap */
-			for (port = 0; port < num_ports - 1; port++) {
-				for (j = port + 1; j < num_ports; j++) {
-					if (tdc_bitmap[port] &
-						tdc_bitmap[j]) {
-						NXGE_DEBUG_MSG((nxgep, CFG_CTL,
-							" rxdma-cfg"
-							" property custom"
-							" bit overlap"
-							" %d %d ",
-							port, j));
-						bad_config = B_TRUE;
-						break;
-					}
-				}
-				if (bad_config == B_TRUE)
-					break;
-			}
-		}
-		if (bad_config == B_TRUE) {
-			/* use default config */
-			NXGE_DEBUG_MSG((nxgep, CFG_CTL,
-				" txdma-cfg property:"
-				" bad custom config:" " use default"));
-
-			for (port = 0; port < num_ports; port++) {
-				custom_num_tdc[port] = (num_ports == 4) ?
-					p4_tx_fair[port] : p2_tx_fair[port];
-				custom_start_tdc[port] = start_tdc;
-				start_tdc += custom_num_tdc[port];
-			}
-		}
-		break;
-
-	default:
-		/* use default config */
-		cfg_prop = "fair";
-		for (port = 0; port < num_ports; port++) {
-			custom_num_tdc[port] = (num_ports == 4) ?
-				p4_tx_fair[port] : p2_tx_fair[port];
-			custom_start_tdc[port] = start_tdc;
-			start_tdc += custom_num_tdc[port];
-		}
-		break;
-	}
-
-	/* Now Update the tx properties */
-	for (port = 0; port < num_ports; port++) {
-		NXGE_DEBUG_MSG((nxgep, CFG_CTL,
-			" update property txdma-cfg with %s ", cfg_prop));
-		ddi_status = ddi_prop_update_string(DDI_DEV_T_NONE, s_dip[port],
-			"txdma-cfg", cfg_prop);
-		if (ddi_status != DDI_PROP_SUCCESS) {
-			NXGE_DEBUG_MSG((nxgep, CFG_CTL,
-				" property txdma-cfg is not updating to %s",
-				cfg_prop));
-			status |= NXGE_DDI_FAILED;
-		}
-		NXGE_DEBUG_MSG((nxgep, CFG_CTL, " update property %s with %d ",
-			num_tdc_prop, custom_num_tdc[port]));
-
-		ddi_status = ddi_prop_update_int(DDI_DEV_T_NONE, s_dip[port],
-			num_tdc_prop, custom_num_tdc[port]);
-
-		if (ddi_status != DDI_PROP_SUCCESS) {
-			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-				" property %s not updating with %d",
-				num_tdc_prop,
-				custom_num_tdc[port]));
-			status |= NXGE_DDI_FAILED;
-		}
-
-		NXGE_DEBUG_MSG((nxgep, CFG_CTL, " update property %s with %d ",
-			start_tdc_prop, custom_start_tdc[port]));
-
-		ddi_status = ddi_prop_update_int(DDI_DEV_T_NONE, s_dip[port],
-			start_tdc_prop, custom_start_tdc[port]);
-		if (ddi_status != DDI_PROP_SUCCESS) {
-			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-				" property %s not updating with %d ",
-				start_tdc_prop, custom_start_tdc[port]));
-			status |= NXGE_DDI_FAILED;
-		}
-	}
-	if (status & NXGE_DDI_FAILED)
-		status |= NXGE_ERROR;
-	return (status);
-}
-
-static nxge_status_t
-nxge_update_cfg_properties(p_nxge_t nxgep, uint32_t flags,
-	config_token_t token, dev_info_t *s_dip[])
-{
-	nxge_status_t status = NXGE_OK;
-
-	switch (flags) {
-	case COMMON_TXDMA_CFG:
-		if (nxge_dma_obp_props_only == 0)
-			status = nxge_update_txdma_properties(nxgep,
-				token, s_dip);
-		break;
-	case COMMON_RXDMA_CFG:
-		if (nxge_dma_obp_props_only == 0)
-			status = nxge_update_rxdma_properties(nxgep,
-				token, s_dip);
-
-		break;
-	case COMMON_RXDMA_GRP_CFG:
-		status = nxge_update_rxdma_grp_properties(nxgep,
-			token, s_dip);
-		break;
-	default:
-		return (NXGE_ERROR);
-	}
-	return (status);
-}
-
-/*
- * verify consistence.
- * (May require publishing the properties on all the ports.
- *
- * What if properties are published on function 0 device only?
- *
- *
- * rxdma-cfg, txdma-cfg, rxdma-grp-cfg (required )
- * What about class configs?
- *
- * If consistent, update the property on all the siblings.
- * set  a flag on hardware shared register
- * The rest of the siblings will check the flag
- * if the flag is set, they will use the updated property
- * without doing any validation.
- */
-
-nxge_status_t
-nxge_cfg_verify_set_classify_prop(p_nxge_t nxgep, char *prop,
-	uint64_t known_cfg, uint32_t override, dev_info_t *c_dip[])
-{
-	nxge_status_t status = NXGE_OK;
-	int ddi_status = DDI_SUCCESS;
-	int i = 0, found = 0, update_prop = B_TRUE;
-	int *cfg_val;
-	uint_t new_value, cfg_value[MAX_SIBLINGS];
-	uint_t prop_len;
-	uint_t known_cfg_value;
-
-	known_cfg_value = (uint_t)known_cfg;
-
-	if (override == B_TRUE) {
-		new_value = known_cfg_value;
-		for (i = 0; i < nxgep->nports; i++) {
-			ddi_status = ddi_prop_update_int(DDI_DEV_T_NONE,
-				c_dip[i], prop, new_value);
-#ifdef NXGE_DEBUG_ERROR
-			if (ddi_status != DDI_PROP_SUCCESS)
-				NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-					" property %s failed update ", prop));
-#endif
-		}
-		if (ddi_status != DDI_PROP_SUCCESS)
-			return (NXGE_ERROR | NXGE_DDI_FAILED);
-	}
-	for (i = 0; i < nxgep->nports; i++) {
-		cfg_value[i] = known_cfg_value;
-		if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, c_dip[i], 0,
-				prop, &cfg_val,
-				&prop_len) == DDI_PROP_SUCCESS) {
-			cfg_value[i] = *cfg_val;
-			ddi_prop_free(cfg_val);
-			found++;
-		}
-	}
-
-	if (found != i) {
-		NXGE_DEBUG_MSG((nxgep, CFG_CTL,
-			" property %s not specified on all ports", prop));
-		if (found == 0) {
-			/* not specified: Use default */
-			NXGE_DEBUG_MSG((nxgep, CFG_CTL,
-				" property %s not specified on any port:"
-				" Using default", prop));
-			new_value = known_cfg_value;
-		} else {
-			/* specified on some */
-			NXGE_DEBUG_MSG((nxgep, CFG_CTL,
-				" property %s not specified"
-				" on some ports: Using default", prop));
-			/* ? use p0 value instead ? */
-			new_value = known_cfg_value;
-		}
-	} else {
-		/* check type and consistence */
-		/* found on all devices */
-		for (i = 1; i < found; i++) {
-			if (cfg_value[i] != cfg_value[i - 1]) {
-				NXGE_DEBUG_MSG((nxgep, CFG_CTL,
-					" property %s inconsistent:"
-					" Using default", prop));
-				new_value = known_cfg_value;
-				break;
-			}
-			/*
-			 * Found on all the ports and consistent. Nothing to
-			 * do.
-			 */
-			update_prop = B_FALSE;
-		}
-	}
-
-	if (update_prop == B_TRUE) {
-		for (i = 0; i < nxgep->nports; i++) {
-			ddi_status = ddi_prop_update_int(DDI_DEV_T_NONE,
-				c_dip[i], prop, new_value);
-#ifdef NXGE_DEBUG_ERROR
-			if (ddi_status != DDI_SUCCESS)
-				NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-					" property %s not updating with %d"
-					" Using default",
-					prop, new_value));
-#endif
-			if (ddi_status != DDI_PROP_SUCCESS)
-				status |= NXGE_DDI_FAILED;
-		}
-	}
-	if (status & NXGE_DDI_FAILED)
-		status |= NXGE_ERROR;
-
-	return (status);
-}
-
-static uint64_t
-nxge_class_get_known_cfg(p_nxge_t nxgep, int class_prop, int rx_quick_cfg)
-{
-	int start_prop;
-	uint64_t cfg_value;
-	p_nxge_param_t param_arr;
-
-	param_arr = nxgep->param_arr;
-	cfg_value = param_arr[class_prop].value;
-	start_prop = param_h1_init_value;
-
-	/* update the properties per quick config */
-	switch (rx_quick_cfg) {
-	case CFG_L3_WEB:
-	case CFG_L3_DISTRIBUTE:
-		cfg_value = nxge_classify_get_cfg_value(nxgep,
-			rx_quick_cfg, class_prop - start_prop);
-		break;
-	default:
-		cfg_value = param_arr[class_prop].value;
-		break;
-	}
-	return (cfg_value);
-}
-
-static nxge_status_t
-nxge_cfg_verify_set_classify(p_nxge_t nxgep, dev_info_t *c_dip[])
-{
-	nxge_status_t status = NXGE_OK;
-	int rx_quick_cfg, class_prop, start_prop, end_prop;
-	char *prop_name;
-	int override = B_TRUE;
-	uint64_t cfg_value;
-	p_nxge_param_t param_arr;
-
-	param_arr = nxgep->param_arr;
-	rx_quick_cfg = param_arr[param_rx_quick_cfg].value;
-	start_prop = param_h1_init_value;
-	end_prop = param_class_opt_ipv6_sctp;
-
-	/* update the properties per quick config */
-	if (rx_quick_cfg == CFG_NOT_SPECIFIED)
-		override = B_FALSE;
-
-	/*
-	 * these parameter affect the classification outcome.
-	 * these parameters are used to configure the Flow key and
-	 * the TCAM key for each of the IP classes.
-	 * Included here are also the H1 and H2 initial values
-	 * which affect the distribution as well as final hash value
-	 * (hence the offset into RDC table and FCRAM bucket location)
-	 *
-	 */
-	for (class_prop = start_prop; class_prop <= end_prop; class_prop++) {
-		prop_name = param_arr[class_prop].fcode_name;
-		cfg_value = nxge_class_get_known_cfg(nxgep,
-			class_prop, rx_quick_cfg);
-		status = nxge_cfg_verify_set_classify_prop(nxgep, prop_name,
-			cfg_value, override, c_dip);
-	}
-
-	/*
-	 * these properties do not affect the actual classification outcome.
-	 * used to enable/disable or tune the fflp hardware
-	 *
-	 * fcram_access_ratio, tcam_access_ratio, tcam_enable, llc_snap_enable
-	 *
-	 */
-	override = B_FALSE;
-	for (class_prop = param_fcram_access_ratio;
-			class_prop <= param_llc_snap_enable; class_prop++) {
-		prop_name = param_arr[class_prop].fcode_name;
-		cfg_value = param_arr[class_prop].value;
-		status = nxge_cfg_verify_set_classify_prop(nxgep, prop_name,
-			cfg_value, override, c_dip);
-	}
-
-	return (status);
-}
-
-nxge_status_t
-nxge_cfg_verify_set(p_nxge_t nxgep, uint32_t flag)
-{
-	nxge_status_t status = NXGE_OK;
-	int i = 0, found = 0;
-	int num_siblings;
-	dev_info_t *c_dip[MAX_SIBLINGS + 1];
-	char *prop_val[MAX_SIBLINGS];
-	config_token_t c_token[MAX_SIBLINGS];
-	char *prop;
-
-	if (nxge_dma_obp_props_only)
-		return (NXGE_OK);
-
-	num_siblings = 0;
-	c_dip[num_siblings] = ddi_get_child(nxgep->p_dip);
-	while (c_dip[num_siblings]) {
-		c_dip[num_siblings + 1] =
-			ddi_get_next_sibling(c_dip[num_siblings]);
-		num_siblings++;
-	}
-
-	switch (flag) {
-	case COMMON_TXDMA_CFG:
-		prop = "txdma-cfg";
-		break;
-	case COMMON_RXDMA_CFG:
-		prop = "rxdma-cfg";
-		break;
-	case COMMON_RXDMA_GRP_CFG:
-		prop = "rxdma-grp-cfg";
-		break;
-	case COMMON_CLASS_CFG:
-		status = nxge_cfg_verify_set_classify(nxgep, c_dip);
-		return (status);
-	default:
-		return (NXGE_ERROR);
-	}
-
-	i = 0;
-	while (i < num_siblings) {
-		if (ddi_prop_lookup_string(DDI_DEV_T_ANY, c_dip[i], 0, prop,
-				(char **)&prop_val[i]) == DDI_PROP_SUCCESS) {
-			c_token[i] = nxge_get_config_token(prop_val[i]);
-			ddi_prop_free(prop_val[i]);
-			found++;
-		} else
-			c_token[i] = CONFIG_TOKEN_NONE;
-		i++;
-	}
-
-	if (found != i) {
-		if (found == 0) {
-			/* not specified: Use default */
-			NXGE_DEBUG_MSG((nxgep, CFG_CTL,
-				" property %s not specified on any port:"
-					" Using default", prop));
-
-			status = nxge_update_cfg_properties(nxgep,
-				flag, FAIR, c_dip);
-			return (status);
-		} else {
-			/*
-			 * if  the convention is to use function 0 device then
-			 * populate the other devices with this configuration.
-			 *
-			 * The other alternative is to use the default config.
-			 */
-			/* not specified: Use default */
-			NXGE_DEBUG_MSG((nxgep, CFG_CTL,
-				" property %s not specified on some ports:"
-				" Using default", prop));
-			status = nxge_update_cfg_properties(nxgep,
-				flag, FAIR, c_dip);
-			return (status);
-		}
-	}
-
-	/* check type and consistence */
-	/* found on all devices */
-	for (i = 1; i < found; i++) {
-		if (c_token[i] != c_token[i - 1]) {
-			NXGE_DEBUG_MSG((nxgep, CFG_CTL,
-				" property %s inconsistent:"
-				" Using default", prop));
-			status = nxge_update_cfg_properties(nxgep,
-				flag, FAIR, c_dip);
-			return (status);
-		}
-	}
-
-	/*
-	 * Found on all the ports check if it is custom configuration. if
-	 * custom, then verify consistence
-	 *
-	 * finally create soft properties
-	 */
-	status = nxge_update_cfg_properties(nxgep, flag, c_token[0], c_dip);
-	return (status);
-}
-
-nxge_status_t
-nxge_cfg_verify_set_quick_config(p_nxge_t nxgep)
-{
-	nxge_status_t status = NXGE_OK;
-	int ddi_status = DDI_SUCCESS;
-	char *prop_val;
-	char *rx_prop;
-	char *prop;
-	uint32_t cfg_value = CFG_NOT_SPECIFIED;
-	p_nxge_param_t param_arr;
-
-	param_arr = nxgep->param_arr;
-	rx_prop = param_arr[param_rx_quick_cfg].fcode_name;
-
-	prop = "rx-quick-cfg";
-
-	/*
-	 * good value are
-	 *
-	 * "web-server" "generic-server" "l3-classify" "flow-classify"
-	 */
-	if (ddi_prop_lookup_string(DDI_DEV_T_ANY, nxgep->dip, 0,
-			prop, (char **)&prop_val) != DDI_PROP_SUCCESS) {
-		NXGE_DEBUG_MSG((nxgep, VPD_CTL,
-			" property %s not specified: using default ", prop));
-		cfg_value = CFG_NOT_SPECIFIED;
-	} else {
-		cfg_value = CFG_L3_DISTRIBUTE;
-		if (strncmp("web-server", (caddr_t)prop_val, 8) == 0) {
-			cfg_value = CFG_L3_WEB;
-			NXGE_DEBUG_MSG((nxgep, CFG_CTL,
-				" %s: web server ", prop));
-		}
-		if (strncmp("generic-server", (caddr_t)prop_val, 8) == 0) {
-			cfg_value = CFG_L3_DISTRIBUTE;
-			NXGE_DEBUG_MSG((nxgep, CFG_CTL,
-				" %s: distribute ", prop));
-		}
-		/* more */
-		ddi_prop_free(prop_val);
-	}
-
-	ddi_status = ddi_prop_update_int(DDI_DEV_T_NONE, nxgep->dip,
-		rx_prop, cfg_value);
-	if (ddi_status != DDI_PROP_SUCCESS)
-		status |= NXGE_DDI_FAILED;
-
-	/* now handle specified cases: */
-	if (status & NXGE_DDI_FAILED)
-		status |= NXGE_ERROR;
-	return (status);
-}
-
-static void
-nxge_use_cfg_link_cfg(p_nxge_t nxgep)
-{
-	int *prop_val;
-	uint_t prop_len;
-	dev_info_t *dip;
-	int speed;
-	int duplex;
-	int adv_autoneg_cap;
-	int adv_10gfdx_cap;
-	int adv_10ghdx_cap;
-	int adv_1000fdx_cap;
-	int adv_1000hdx_cap;
-	int adv_100fdx_cap;
-	int adv_100hdx_cap;
-	int adv_10fdx_cap;
-	int adv_10hdx_cap;
-	int status = DDI_SUCCESS;
-
-	dip = nxgep->dip;
-
-	/*
-	 * first find out the card type and the supported link speeds and
-	 * features
-	 */
-	/* add code for card type */
-	if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, 0, "adv-autoneg-cap",
-			&prop_val, &prop_len) == DDI_PROP_SUCCESS) {
-		ddi_prop_free(prop_val);
-		return;
-	}
-
-	if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, 0, "adv-10gfdx-cap",
-			&prop_val, &prop_len) == DDI_PROP_SUCCESS) {
-		ddi_prop_free(prop_val);
-		return;
-	}
-
-	if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, 0, "adv-1000hdx-cap",
-			&prop_val, &prop_len) == DDI_PROP_SUCCESS) {
-		ddi_prop_free(prop_val);
-		return;
-	}
-
-	if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, 0, "adv-1000fdx-cap",
-			&prop_val, &prop_len) == DDI_PROP_SUCCESS) {
-		ddi_prop_free(prop_val);
-		return;
-	}
-
-	if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, 0, "adv-100fdx-cap",
-			&prop_val, &prop_len) == DDI_PROP_SUCCESS) {
-		ddi_prop_free(prop_val);
-		return;
-	}
-
-	if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, 0, "adv-100hdx-cap",
-			&prop_val, &prop_len) == DDI_PROP_SUCCESS) {
-		ddi_prop_free(prop_val);
-		return;
-	}
-
-	if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, 0, "adv-10fdx-cap",
-			&prop_val, &prop_len) == DDI_PROP_SUCCESS) {
-		ddi_prop_free(prop_val);
-		return;
-	}
-
-	if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, 0, "adv-10hdx-cap",
-			&prop_val, &prop_len) == DDI_PROP_SUCCESS) {
-		ddi_prop_free(prop_val);
-		return;
-	}
-
-	if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, dip, 0, "speed",
-			(uchar_t **)&prop_val, &prop_len) == DDI_PROP_SUCCESS) {
-		if (strncmp("10000", (caddr_t)prop_val,
-				(size_t)prop_len) == 0) {
-			speed = 10000;
-		} else if (strncmp("1000", (caddr_t)prop_val,
-				(size_t)prop_len) == 0) {
-			speed = 1000;
-		} else if (strncmp("100", (caddr_t)prop_val,
-				(size_t)prop_len) == 0) {
-			speed = 100;
-		} else if (strncmp("10", (caddr_t)prop_val,
-				(size_t)prop_len) == 0) {
-			speed = 10;
-		} else if (strncmp("auto", (caddr_t)prop_val,
-				(size_t)prop_len) == 0) {
-			speed = 0;
-		} else {
-			NXGE_ERROR_MSG((nxgep, NXGE_NOTE,
-				"speed property is invalid reverting to auto"));
-			speed = 0;
-		}
-		ddi_prop_free(prop_val);
-	} else
-		speed = 0;
-
-	if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, dip, 0, "duplex",
-			(uchar_t **)&prop_val, &prop_len) == DDI_PROP_SUCCESS) {
-		if (strncmp("full", (caddr_t)prop_val,
-				(size_t)prop_len) == 0) {
-			duplex = 2;
-		} else if (strncmp("half", (caddr_t)prop_val,
-				(size_t)prop_len) == 0) {
-			duplex = 1;
-		} else if (strncmp("auto", (caddr_t)prop_val,
-				(size_t)prop_len) == 0) {
-			duplex = 0;
-		} else {
-			NXGE_ERROR_MSG((nxgep, NXGE_NOTE,
-				"duplex property is invalid"
-				" reverting to auto"));
-			duplex = 0;
-		}
-		ddi_prop_free(prop_val);
-	} else
-		duplex = 0;
-
-	adv_autoneg_cap = (speed == 0) || (duplex == 0);
-	if (adv_autoneg_cap == 0) {
-		adv_10gfdx_cap = ((speed == 10000) && (duplex == 2));
-		adv_10ghdx_cap = adv_10gfdx_cap;
-		adv_10ghdx_cap |= ((speed == 10000) && (duplex == 1));
-		adv_1000fdx_cap = adv_10ghdx_cap;
-		adv_1000fdx_cap |= ((speed == 1000) && (duplex == 2));
-		adv_1000hdx_cap = adv_1000fdx_cap;
-		adv_1000hdx_cap |= ((speed == 1000) && (duplex == 1));
-		adv_100fdx_cap = adv_1000hdx_cap;
-		adv_100fdx_cap |= ((speed == 100) && (duplex == 2));
-		adv_100hdx_cap = adv_100fdx_cap;
-		adv_100hdx_cap |= ((speed == 100) && (duplex == 1));
-		adv_10fdx_cap = adv_100hdx_cap;
-		adv_10fdx_cap |= ((speed == 10) && (duplex == 2));
-		adv_10hdx_cap = adv_10fdx_cap;
-		adv_10hdx_cap |= ((speed == 10) && (duplex == 1));
-	} else if (speed == 0) {
-		adv_10gfdx_cap = (duplex == 2);
-		adv_10ghdx_cap = (duplex == 1);
-		adv_1000fdx_cap = (duplex == 2);
-		adv_1000hdx_cap = (duplex == 1);
-		adv_100fdx_cap = (duplex == 2);
-		adv_100hdx_cap = (duplex == 1);
-		adv_10fdx_cap = (duplex == 2);
-		adv_10hdx_cap = (duplex == 1);
-	}
-	if (duplex == 0) {
-		adv_10gfdx_cap = (speed == 0);
-		adv_10gfdx_cap |= (speed == 10000);
-		adv_10ghdx_cap = adv_10gfdx_cap;
-		adv_10ghdx_cap |= (speed == 10000);
-		adv_1000fdx_cap = adv_10ghdx_cap;
-		adv_1000fdx_cap |= (speed == 1000);
-		adv_1000hdx_cap = adv_1000fdx_cap;
-		adv_1000hdx_cap |= (speed == 1000);
-		adv_100fdx_cap = adv_1000hdx_cap;
-		adv_100fdx_cap |= (speed == 100);
-		adv_100hdx_cap = adv_100fdx_cap;
-		adv_100hdx_cap |= (speed == 100);
-		adv_10fdx_cap = adv_100hdx_cap;
-		adv_10fdx_cap |= (speed == 10);
-		adv_10hdx_cap = adv_10fdx_cap;
-		adv_10hdx_cap |= (speed == 10);
-	}
-	status = ddi_prop_update_int_array(DDI_DEV_T_NONE, dip,
-		"adv-autoneg-cap", &adv_autoneg_cap, 1);
-	if (status)
-		return;
-
-	status = ddi_prop_update_int_array(DDI_DEV_T_NONE, dip,
-		"adv-10gfdx-cap", &adv_10gfdx_cap, 1);
-	if (status)
-		goto nxge_map_myargs_to_gmii_fail1;
-
-	status = ddi_prop_update_int_array(DDI_DEV_T_NONE, dip,
-		"adv-10ghdx-cap", &adv_10ghdx_cap, 1);
-	if (status)
-		goto nxge_map_myargs_to_gmii_fail2;
-
-	status = ddi_prop_update_int_array(DDI_DEV_T_NONE, dip,
-		"adv-1000fdx-cap", &adv_1000fdx_cap, 1);
-	if (status)
-		goto nxge_map_myargs_to_gmii_fail3;
-
-	status = ddi_prop_update_int_array(DDI_DEV_T_NONE, dip,
-		"adv-1000hdx-cap", &adv_1000hdx_cap, 1);
-	if (status)
-		goto nxge_map_myargs_to_gmii_fail4;
-
-	status = ddi_prop_update_int_array(DDI_DEV_T_NONE, dip,
-		"adv-100fdx-cap", &adv_100fdx_cap, 1);
-	if (status)
-		goto nxge_map_myargs_to_gmii_fail5;
-
-	status = ddi_prop_update_int_array(DDI_DEV_T_NONE, dip,
-		"adv-100hdx-cap", &adv_100hdx_cap, 1);
-	if (status)
-		goto nxge_map_myargs_to_gmii_fail6;
-
-	status = ddi_prop_update_int_array(DDI_DEV_T_NONE, dip,
-		"adv-10fdx-cap", &adv_10fdx_cap, 1);
-	if (status)
-		goto nxge_map_myargs_to_gmii_fail7;
-
-	status = ddi_prop_update_int_array(DDI_DEV_T_NONE, dip,
-		"adv-10hdx-cap", &adv_10hdx_cap, 1);
-	if (status)
-		goto nxge_map_myargs_to_gmii_fail8;
-
-	return;
-
-nxge_map_myargs_to_gmii_fail9:
-	(void) ddi_prop_remove(DDI_DEV_T_NONE, dip, "adv-10hdx-cap");
-
-nxge_map_myargs_to_gmii_fail8:
-	(void) ddi_prop_remove(DDI_DEV_T_NONE, dip, "adv-10fdx-cap");
-
-nxge_map_myargs_to_gmii_fail7:
-	(void) ddi_prop_remove(DDI_DEV_T_NONE, dip, "adv-100hdx-cap");
-
-nxge_map_myargs_to_gmii_fail6:
-	(void) ddi_prop_remove(DDI_DEV_T_NONE, dip, "adv-100fdx-cap");
-
-nxge_map_myargs_to_gmii_fail5:
-	(void) ddi_prop_remove(DDI_DEV_T_NONE, dip, "adv-1000hdx-cap");
-
-nxge_map_myargs_to_gmii_fail4:
-	(void) ddi_prop_remove(DDI_DEV_T_NONE, dip, "adv-1000fdx-cap");
-
-nxge_map_myargs_to_gmii_fail3:
-	(void) ddi_prop_remove(DDI_DEV_T_NONE, dip, "adv-10ghdx-cap");
-
-nxge_map_myargs_to_gmii_fail2:
-	(void) ddi_prop_remove(DDI_DEV_T_NONE, dip, "adv-10gfdx-cap");
-
-nxge_map_myargs_to_gmii_fail1:
-	(void) ddi_prop_remove(DDI_DEV_T_NONE, dip, "adv-autoneg-cap");
-}
-
-nxge_status_t
-nxge_get_config_properties(p_nxge_t nxgep)
-{
-	nxge_status_t status = NXGE_OK;
-	p_nxge_hw_list_t hw_p;
-	uint_t prop_len;
-	uchar_t *prop_val8;
-
-	NXGE_DEBUG_MSG((nxgep, VPD_CTL, " ==> nxge_get_config_properties"));
-
-	if ((hw_p = nxgep->nxge_hw_p) == NULL) {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			" nxge_get_config_properties:"
-			" common hardware not set", nxgep->niu_type));
-		return (NXGE_ERROR);
-	}
-
-	/*
-	 * Get info on how many ports Neptune card has.
-	 */
-	switch (nxgep->niu_type) {
-	case N2_NIU:
-		nxgep->nports = 2;
-		nxgep->classifier.tcam_size = TCAM_NIU_TCAM_MAX_ENTRY;
-		if (nxgep->function_num > 1) {
-			return (NXGE_ERROR);
-		}
-		break;
-	case NEPTUNE_2:
-		if (nxgep->function_num > 1)
-			return (NXGE_ERROR);
-
-		/* Set Board Version Number */
-		nxgep->board_ver = 0;
-		if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY,
-				nxgep->dip, 0, "board-model", &prop_val8,
-				&prop_len) == DDI_PROP_SUCCESS) {
-			if (prop_len > 9) {
-				if ((prop_val8[9] == '0') &&
-						(prop_val8[10] == '4'))
-					nxgep->board_ver = 4;
-			}
-			ddi_prop_free(prop_val8);
-		}
-		status = nxge_espc_num_ports_get(nxgep);
-		if (status != NXGE_OK)
-			return (NXGE_ERROR);
-
-		nxgep->classifier.tcam_size = TCAM_NXGE_TCAM_MAX_ENTRY;
-		break;
-
-	case NEPTUNE:
-	default:
-		status = nxge_espc_num_ports_get(nxgep);
-		if (status != NXGE_OK)
-			return (NXGE_ERROR);
-		nxgep->classifier.tcam_size = TCAM_NXGE_TCAM_MAX_ENTRY;
-		break;
-	}
-
-	status = nxge_get_mac_addr_properties(nxgep);
-	if (status != NXGE_OK)
-		return (NXGE_ERROR);
-
-	/*
-	 * read the configuration type. If none is specified, used default.
-	 * Config types: equal: (default) DMA channels, RDC groups, TCAM, FCRAM
-	 * are shared equally across all the ports.
-	 *
-	 * Fair: DMA channels, RDC groups, TCAM, FCRAM are shared proportional
-	 * to the port speed.
-	 *
-	 *
-	 * custom: DMA channels, RDC groups, TCAM, FCRAM partition is
-	 * specified in nxge.conf. Need to read each parameter and set
-	 * up the parameters in nxge structures.
-	 *
-	 */
-	switch (nxgep->niu_type) {
-	case N2_NIU:
-		NXGE_DEBUG_MSG((nxgep, VPD_CTL,
-			" ==> nxge_get_config_properties: N2"));
-		MUTEX_ENTER(&hw_p->nxge_cfg_lock);
-		if ((hw_p->flags & COMMON_CFG_VALID) !=
-			COMMON_CFG_VALID) {
-			status = nxge_cfg_verify_set(nxgep,
-				COMMON_RXDMA_GRP_CFG);
-			status = nxge_cfg_verify_set(nxgep,
-				COMMON_CLASS_CFG);
-			hw_p->flags |= COMMON_CFG_VALID;
-		}
-		MUTEX_EXIT(&hw_p->nxge_cfg_lock);
-		status = nxge_use_cfg_n2niu_properties(nxgep);
-		break;
-
-	case NEPTUNE:
-		NXGE_DEBUG_MSG((nxgep, VPD_CTL,
-			" ==> nxge_get_config_properties: Neptune"));
-		status = nxge_cfg_verify_set_quick_config(nxgep);
-		MUTEX_ENTER(&hw_p->nxge_cfg_lock);
-		if ((hw_p->flags & COMMON_CFG_VALID) !=
-			COMMON_CFG_VALID) {
-			status = nxge_cfg_verify_set(nxgep,
-				COMMON_TXDMA_CFG);
-			status = nxge_cfg_verify_set(nxgep,
-				COMMON_RXDMA_CFG);
-			status = nxge_cfg_verify_set(nxgep,
-				COMMON_RXDMA_GRP_CFG);
-			status = nxge_cfg_verify_set(nxgep,
-				COMMON_CLASS_CFG);
-			hw_p->flags |= COMMON_CFG_VALID;
-		}
-		MUTEX_EXIT(&hw_p->nxge_cfg_lock);
-		nxge_use_cfg_neptune_properties(nxgep);
-		status = NXGE_OK;
-		break;
-
-	case NEPTUNE_2:
-		NXGE_DEBUG_MSG((nxgep, VPD_CTL,
-			" ==> nxge_get_config_properties: Neptune-2"));
-		if (nxgep->function_num > 1)
-			return (NXGE_ERROR);
-		status = nxge_cfg_verify_set_quick_config(nxgep);
-		MUTEX_ENTER(&hw_p->nxge_cfg_lock);
-
-		if ((hw_p->flags & COMMON_CFG_VALID) !=
-			COMMON_CFG_VALID) {
-			status = nxge_cfg_verify_set(nxgep,
-				COMMON_TXDMA_CFG);
-			status = nxge_cfg_verify_set(nxgep,
-				COMMON_RXDMA_CFG);
-			status = nxge_cfg_verify_set(nxgep,
-				COMMON_RXDMA_GRP_CFG);
-			status = nxge_cfg_verify_set(nxgep,
-				COMMON_CLASS_CFG);
-			hw_p->flags |= COMMON_CFG_VALID;
-		}
-		MUTEX_EXIT(&hw_p->nxge_cfg_lock);
-
-		nxge_use_cfg_neptune_properties(nxgep);
-		status = NXGE_OK;
-		break;
-
-	default:
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			" nxge_get_config_properties:"
-			" unknown NIU type %x", nxgep->niu_type));
-		return (NXGE_ERROR);
-	}
-
-	NXGE_DEBUG_MSG((nxgep, VPD_CTL, " <== nxge_get_config_properties"));
-	return (status);
-}
-
-static nxge_status_t
-nxge_use_cfg_n2niu_properties(p_nxge_t nxgep)
-{
-	nxge_status_t status = NXGE_OK;
-
-	NXGE_DEBUG_MSG((nxgep, CFG_CTL, " ==> nxge_use_cfg_n2niu_properties"));
-
-	status = nxge_use_default_dma_config_n2(nxgep);
-	if (status != NXGE_OK) {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			" ==> nxge_use_cfg_n2niu_properties (err 0x%x)",
-			status));
-		return (status | NXGE_ERROR);
-	}
-
-	(void) nxge_use_cfg_vlan_class_config(nxgep);
-	(void) nxge_use_cfg_mac_class_config(nxgep);
-	(void) nxge_use_cfg_class_config(nxgep);
-	(void) nxge_use_cfg_link_cfg(nxgep);
-
-	/*
-	 * Read in the hardware (fcode) properties. Use the ndd array to read
-	 * each property.
-	 */
-	(void) nxge_get_param_soft_properties(nxgep);
-	NXGE_DEBUG_MSG((nxgep, CFG_CTL, " <== nxge_use_cfg_n2niu_properties"));
-
-	return (status);
-}
-
-static void
-nxge_use_cfg_neptune_properties(p_nxge_t nxgep)
-{
-	NXGE_DEBUG_MSG((nxgep, CFG_CTL, "==> nxge_use_cfg_neptune_properties"));
-
-	(void) nxge_use_cfg_dma_config(nxgep);
-	(void) nxge_use_cfg_vlan_class_config(nxgep);
-	(void) nxge_use_cfg_mac_class_config(nxgep);
-	(void) nxge_use_cfg_class_config(nxgep);
-	(void) nxge_use_cfg_link_cfg(nxgep);
-
-	/*
-	 * Read in the hardware (fcode) properties. Use the ndd array to read
-	 * each property.
-	 */
-	(void) nxge_get_param_soft_properties(nxgep);
-	NXGE_DEBUG_MSG((nxgep, CFG_CTL, "<== nxge_use_cfg_neptune_properties"));
-}
-
-/*
- * FWARC 2006/556
- */
-
-static nxge_status_t
-nxge_use_default_dma_config_n2(p_nxge_t nxgep)
-{
-	int ndmas;
-	int nrxgp;
-	uint8_t func;
-	p_nxge_dma_pt_cfg_t p_dma_cfgp;
-	p_nxge_hw_pt_cfg_t p_cfgp;
-	int *prop_val;
-	uint_t prop_len;
-	int i;
-	nxge_status_t status = NXGE_OK;
-
-	NXGE_DEBUG_MSG((nxgep, OBP_CTL, "==> nxge_use_default_dma_config_n2"));
-
-	p_dma_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
-	p_cfgp = (p_nxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config;
-
-	func = nxgep->function_num;
-	p_cfgp->function_number = func;
-	ndmas = NXGE_TDMA_PER_NIU_PORT;
-	if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, nxgep->dip, 0,
-			"tx-dma-channels", (int **)&prop_val,
-			&prop_len) == DDI_PROP_SUCCESS) {
-		p_cfgp->start_tdc = prop_val[0];
-		NXGE_DEBUG_MSG((nxgep, OBP_CTL,
-			"==> nxge_use_default_dma_config_n2: tdc starts %d "
-			"(#%d)", p_cfgp->start_tdc, prop_len));
-
-		ndmas = prop_val[1];
-		NXGE_DEBUG_MSG((nxgep, OBP_CTL,
-			"==> nxge_use_default_dma_config_n2: #tdc %d (#%d)",
-			ndmas, prop_len));
-		ddi_prop_free(prop_val);
-	} else {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"==> nxge_use_default_dma_config_n2: "
-			"get tx-dma-channels failed"));
-		return (NXGE_DDI_FAILED);
-	}
-
-	p_cfgp->max_tdcs = nxgep->max_tdcs = ndmas;
-	nxgep->tdc_mask = (ndmas - 1);
-
-	NXGE_DEBUG_MSG((nxgep, OBP_CTL, "==> nxge_use_default_dma_config_n2: "
-		"p_cfgp 0x%llx max_tdcs %d nxgep->max_tdcs %d start %d",
-		p_cfgp, p_cfgp->max_tdcs, nxgep->max_tdcs, p_cfgp->start_tdc));
-
-	/* Receive DMA */
-	ndmas = NXGE_RDMA_PER_NIU_PORT;
-	if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, nxgep->dip, 0,
-			"rx-dma-channels", (int **)&prop_val,
-			&prop_len) == DDI_PROP_SUCCESS) {
-		p_cfgp->start_rdc = prop_val[0];
-		NXGE_DEBUG_MSG((nxgep, OBP_CTL,
-			"==> nxge_use_default_dma_config_n2(obp): rdc start %d"
-			" (#%d)", p_cfgp->start_rdc, prop_len));
-		ndmas = prop_val[1];
-		NXGE_DEBUG_MSG((nxgep, OBP_CTL,
-			"==> nxge_use_default_dma_config_n2(obp):#rdc %d (#%d)",
-			ndmas, prop_len));
-		ddi_prop_free(prop_val);
-	} else {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"==> nxge_use_default_dma_config_n2: "
-			"get rx-dma-channel failed"));
-		return (NXGE_DDI_FAILED);
-	}
-
-	p_cfgp->max_rdcs = nxgep->max_rdcs = ndmas;
-	nxgep->rdc_mask = (ndmas - 1);
-
-	/* Hypervisor: rdc # and group # use the same # !! */
-	p_cfgp->max_grpids = p_cfgp->max_rdcs + p_cfgp->max_tdcs;
-	p_cfgp->start_grpid = 0;
-	p_cfgp->mif_ldvid = p_cfgp->mac_ldvid = p_cfgp->ser_ldvid = 0;
-
-	if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, nxgep->dip, 0,
-			"interrupts", (int **)&prop_val,
-			&prop_len) == DDI_PROP_SUCCESS) {
-		/*
-		 * For each device assigned, the content of each interrupts
-		 * property is its logical device group.
-		 *
-		 * Assignment of interrupts property is in the the following
-		 * order:
-		 *
-		 * MAC MIF (if configured) SYSTEM ERROR (if configured) first
-		 * receive channel next channel...... last receive channel
-		 * first transmit channel next channel...... last transmit
-		 * channel
-		 *
-		 * prop_len should be at least for one mac and total # of rx and
-		 * tx channels. Function 0 owns MIF and ERROR
-		 */
-		NXGE_DEBUG_MSG((nxgep, OBP_CTL,
-			"==> nxge_use_default_dma_config_n2(obp): "
-			"# interrupts %d", prop_len));
-
-		switch (func) {
-		case 0:
-			p_cfgp->ldg_chn_start = 3;
-			p_cfgp->mac_ldvid = NXGE_MAC_LD_PORT0;
-			p_cfgp->mif_ldvid = NXGE_MIF_LD;
-			p_cfgp->ser_ldvid = NXGE_SYS_ERROR_LD;
-
-			break;
-		case 1:
-			p_cfgp->ldg_chn_start = 1;
-			p_cfgp->mac_ldvid = NXGE_MAC_LD_PORT1;
-
-			break;
-		default:
-			status = NXGE_DDI_FAILED;
-			break;
-		}
-
-		if (status != NXGE_OK)
-			return (status);
-
-		for (i = 0; i < prop_len; i++) {
-			p_cfgp->ldg[i] = prop_val[i];
-			NXGE_DEBUG_MSG((nxgep, OBP_CTL,
-				"==> nxge_use_default_dma_config_n2(obp): "
-				"interrupt #%d, ldg %d",
-				i, p_cfgp->ldg[i]));
-		}
-
-		p_cfgp->max_grpids = prop_len;
-		NXGE_DEBUG_MSG((nxgep, OBP_CTL,
-			"==> nxge_use_default_dma_config_n2(obp): %d "
-			"(#%d) maxgrpids %d channel starts %d",
-			p_cfgp->mac_ldvid, i, p_cfgp->max_grpids,
-			p_cfgp->ldg_chn_start));
-		ddi_prop_free(prop_val);
-	} else {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"==> nxge_use_default_dma_config_n2: "
-			"get interrupts failed"));
-		return (NXGE_DDI_FAILED);
-	}
-
-	p_cfgp->max_ldgs = p_cfgp->max_grpids;
-	NXGE_DEBUG_MSG((nxgep, OBP_CTL,
-		"==> nxge_use_default_dma_config_n2: "
-		"p_cfgp 0x%llx max_rdcs %d nxgep->max_rdcs %d max_grpids %d"
-		"start_grpid %d macid %d mifid %d serrid %d",
-		p_cfgp, p_cfgp->max_rdcs, nxgep->max_rdcs, p_cfgp->max_grpids,
-		p_cfgp->start_grpid,
-		p_cfgp->mac_ldvid, p_cfgp->mif_ldvid, p_cfgp->ser_ldvid));
-
-	NXGE_DEBUG_MSG((nxgep, OBP_CTL, "==> nxge_use_default_dma_config_n2: "
-		"p_cfgp p%p start_ldg %d nxgep->max_ldgs %d",
-		p_cfgp, p_cfgp->start_ldg, p_cfgp->max_ldgs));
-
-	/*
-	 * RDC groups and the beginning RDC group assigned to this function.
-	 */
-	nrxgp = 2;
-	p_cfgp->max_rdc_grpids = nrxgp;
-	p_cfgp->start_rdc_grpid = (nxgep->function_num * nrxgp);
-
-	status = ddi_prop_update_int(DDI_DEV_T_NONE, nxgep->dip,
-		"rx-rdc-grps", nrxgp);
-	if (status) {
-		return (NXGE_DDI_FAILED);
-	}
-	status = ddi_prop_update_int(DDI_DEV_T_NONE, nxgep->dip,
-		"rx-rdc-grps-begin", p_cfgp->start_rdc_grpid);
-	if (status) {
-		(void) ddi_prop_remove(DDI_DEV_T_NONE, nxgep->dip,
-			"rx-rdc-grps");
-		return (NXGE_DDI_FAILED);
-	}
-	NXGE_DEBUG_MSG((nxgep, OBP_CTL, "==> nxge_use_default_dma_config_n2: "
-		"p_cfgp $%p # rdc groups %d start rdc group id %d",
-		p_cfgp, p_cfgp->max_rdc_grpids,
-		p_cfgp->start_rdc_grpid));
-
-	nxge_set_hw_dma_config(nxgep);
-	NXGE_DEBUG_MSG((nxgep, OBP_CTL, "<== nxge_use_default_dma_config_n2"));
-	return (status);
-}
-
-static void
-nxge_use_cfg_dma_config(p_nxge_t nxgep)
-{
-	int tx_ndmas, rx_ndmas, nrxgp;
-	p_nxge_dma_pt_cfg_t p_dma_cfgp;
-	p_nxge_hw_pt_cfg_t p_cfgp;
-	dev_info_t *dip;
-	p_nxge_param_t param_arr;
-	char *prop;
-	int *prop_val;
-	uint_t prop_len;
-
-	NXGE_DEBUG_MSG((nxgep, CFG_CTL, " ==> nxge_use_cfg_dma_config"));
-	param_arr = nxgep->param_arr;
-
-	p_dma_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
-	p_cfgp = (p_nxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config;
-	dip = nxgep->dip;
-	p_cfgp->function_number = nxgep->function_num;
-	prop = param_arr[param_txdma_channels_begin].fcode_name;
-
-	if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, 0, prop,
-			&prop_val, &prop_len) == DDI_PROP_SUCCESS) {
-		p_cfgp->start_tdc = *prop_val;
-		ddi_prop_free(prop_val);
-	} else {
-		if (nxgep->nports == 2) {
-			tx_ndmas = (nxgep->function_num * p2_tx_equal[0]);
-		} else {
-			tx_ndmas = (nxgep->function_num * p4_tx_equal[0]);
-		}
-		(void) ddi_prop_update_int(DDI_DEV_T_NONE, nxgep->dip,
-			prop, tx_ndmas);
-		p_cfgp->start_tdc = tx_ndmas;
-	}
-
-	prop = param_arr[param_txdma_channels].fcode_name;
-	if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, 0, prop,
-			&prop_val, &prop_len) == DDI_PROP_SUCCESS) {
-		tx_ndmas = *prop_val;
-		ddi_prop_free(prop_val);
-	} else {
-		if (nxgep->nports == 2) {
-			tx_ndmas = p2_tx_equal[0];
-		} else {
-			tx_ndmas = p4_tx_equal[0];
-		}
-		(void) ddi_prop_update_int(DDI_DEV_T_NONE, nxgep->dip,
-			prop, tx_ndmas);
-	}
-
-	p_cfgp->max_tdcs = nxgep->max_tdcs = tx_ndmas;
-	nxgep->tdc_mask = (tx_ndmas - 1);
-	NXGE_DEBUG_MSG((nxgep, CFG_CTL, "==> nxge_use_cfg_dma_config: "
-		"p_cfgp 0x%llx max_tdcs %d nxgep->max_tdcs %d",
-		p_cfgp, p_cfgp->max_tdcs, nxgep->max_tdcs));
-
-	prop = param_arr[param_rxdma_channels_begin].fcode_name;
-
-	if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, 0, prop,
-			&prop_val, &prop_len) == DDI_PROP_SUCCESS) {
-		p_cfgp->start_rdc = *prop_val;
-		ddi_prop_free(prop_val);
-	} else {
-		if (nxgep->nports == 2) {
-			rx_ndmas = (nxgep->function_num * p2_rx_equal[0]);
-		} else {
-			rx_ndmas = (nxgep->function_num * p4_rx_equal[0]);
-		}
-		(void) ddi_prop_update_int(DDI_DEV_T_NONE, nxgep->dip,
-			prop, rx_ndmas);
-		p_cfgp->start_rdc = rx_ndmas;
-	}
-
-	prop = param_arr[param_rxdma_channels].fcode_name;
-
-	if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, 0, prop,
-			&prop_val, &prop_len) == DDI_PROP_SUCCESS) {
-		rx_ndmas = *prop_val;
-		ddi_prop_free(prop_val);
-	} else {
-		if (nxgep->nports == 2) {
-			rx_ndmas = p2_rx_equal[0];
-		} else {
-			rx_ndmas = p4_rx_equal[0];
-		}
-		(void) ddi_prop_update_int(DDI_DEV_T_NONE, nxgep->dip,
-			prop, rx_ndmas);
-	}
-
-	p_cfgp->max_rdcs = nxgep->max_rdcs = rx_ndmas;
-
-	prop = param_arr[param_rdc_grps_start].fcode_name;
-	if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, 0, prop,
-			&prop_val, &prop_len) == DDI_PROP_SUCCESS) {
-		p_cfgp->start_rdc_grpid = *prop_val;
-		ddi_prop_free(prop_val);
-		NXGE_DEBUG_MSG((nxgep, CFG_CTL,
-			"==> nxge_use_default_dma_config: "
-			"use property " "start_grpid %d ",
-			p_cfgp->start_grpid));
-	} else {
-		p_cfgp->start_rdc_grpid = nxgep->function_num;
-		(void) ddi_prop_update_int(DDI_DEV_T_NONE, nxgep->dip,
-			prop, p_cfgp->start_rdc_grpid);
-
-		NXGE_DEBUG_MSG((nxgep, CFG_CTL,
-			"==> nxge_use_default_dma_config: "
-			"use default "
-			"start_grpid %d (same as function #)",
-			p_cfgp->start_grpid));
-	}
-
-	prop = param_arr[param_rx_rdc_grps].fcode_name;
-	if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, 0, prop,
-			&prop_val, &prop_len) == DDI_PROP_SUCCESS) {
-		nrxgp = *prop_val;
-		ddi_prop_free(prop_val);
-	} else {
-		nrxgp = 1;
-		(void) ddi_prop_update_int(DDI_DEV_T_NONE, nxgep->dip,
-			prop, nrxgp);
-		NXGE_DEBUG_MSG((nxgep, CFG_CTL,
-			"==> nxge_use_default_dma_config: "
-			"num_rdc_grpid not found: use def:# of "
-			"rdc groups %d\n", nrxgp));
-	}
-
-	p_cfgp->max_rdc_grpids = nrxgp;
-
-	/*
-	 * 2/4 ports have the same hard-wired logical groups assigned.
-	 */
-	p_cfgp->start_ldg = nxgep->function_num * NXGE_LDGRP_PER_4PORTS;
-	p_cfgp->max_ldgs = NXGE_LDGRP_PER_4PORTS;
-
-	NXGE_DEBUG_MSG((nxgep, CFG_CTL, "==> nxge_use_default_dma_config: "
-		"p_cfgp 0x%llx max_rdcs %d nxgep->max_rdcs %d max_grpids %d"
-		"start_grpid %d",
-		p_cfgp, p_cfgp->max_rdcs, nxgep->max_rdcs, p_cfgp->max_grpids,
-		p_cfgp->start_grpid));
-
-	NXGE_DEBUG_MSG((nxgep, CFG_CTL, "==> nxge_use_cfg_dma_config: "
-		"p_cfgp 0x%016llx start_ldg %d nxgep->max_ldgs %d "
-		"start_rdc_grpid %d",
-		p_cfgp, p_cfgp->start_ldg, p_cfgp->max_ldgs,
-		p_cfgp->start_rdc_grpid));
-
-	prop = param_arr[param_rxdma_intr_time].fcode_name;
-
-	if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, 0, prop,
-			&prop_val, &prop_len) == DDI_PROP_SUCCESS) {
-		if ((prop_len > 0) && (prop_len <= p_cfgp->max_rdcs)) {
-			(void) ddi_prop_update_int_array(DDI_DEV_T_NONE,
-				nxgep->dip, prop, prop_val, prop_len);
-		}
-		ddi_prop_free(prop_val);
-	}
-	prop = param_arr[param_rxdma_intr_pkts].fcode_name;
-
-	if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, 0, prop,
-			&prop_val, &prop_len) == DDI_PROP_SUCCESS) {
-		if ((prop_len > 0) && (prop_len <= p_cfgp->max_rdcs)) {
-			(void) ddi_prop_update_int_array(DDI_DEV_T_NONE,
-				nxgep->dip, prop, prop_val, prop_len);
-		}
-		ddi_prop_free(prop_val);
-	}
-	nxge_set_hw_dma_config(nxgep);
-
-	NXGE_DEBUG_MSG((nxgep, CFG_CTL, "<== nxge_use_cfg_dma_config"));
-}
-
-static void
-nxge_use_cfg_vlan_class_config(p_nxge_t nxgep)
-{
-	uint_t vlan_cnt;
-	int *vlan_cfg_val;
-	int status;
-	p_nxge_param_t param_arr;
-	char *prop;
-
-	NXGE_DEBUG_MSG((nxgep, CFG_CTL, " ==> nxge_use_cfg_vlan_config"));
-	param_arr = nxgep->param_arr;
-	prop = param_arr[param_vlan_2rdc_grp].fcode_name;
-
-	status = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, nxgep->dip, 0, prop,
-		&vlan_cfg_val, &vlan_cnt);
-	if (status == DDI_PROP_SUCCESS) {
-		status = ddi_prop_update_int_array(DDI_DEV_T_NONE,
-			nxgep->dip, prop, vlan_cfg_val, vlan_cnt);
-		ddi_prop_free(vlan_cfg_val);
-	}
-	nxge_set_hw_vlan_class_config(nxgep);
-	NXGE_DEBUG_MSG((nxgep, CFG_CTL, " <== nxge_use_cfg_vlan_config"));
-}
-
-static void
-nxge_use_cfg_mac_class_config(p_nxge_t nxgep)
-{
-	p_nxge_dma_pt_cfg_t p_dma_cfgp;
-	p_nxge_hw_pt_cfg_t p_cfgp;
-	uint_t mac_cnt;
-	int *mac_cfg_val;
-	int status;
-	p_nxge_param_t param_arr;
-	char *prop;
-
-	NXGE_DEBUG_MSG((nxgep, CFG_CTL, "==> nxge_use_cfg_mac_class_config"));
-	p_dma_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
-	p_cfgp = (p_nxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config;
-	p_cfgp->start_mac_entry = 0;
-	param_arr = nxgep->param_arr;
-	prop = param_arr[param_mac_2rdc_grp].fcode_name;
-
-	switch (nxgep->function_num) {
-	case 0:
-	case 1:
-		/* 10G ports */
-		p_cfgp->max_macs = NXGE_MAX_MACS_XMACS;
-		break;
-	case 2:
-	case 3:
-		/* 1G ports */
-	default:
-		p_cfgp->max_macs = NXGE_MAX_MACS_BMACS;
-		break;
-	}
-
-	p_cfgp->mac_pref = 1;
-	p_cfgp->def_mac_rxdma_grpid = p_cfgp->start_rdc_grpid;
-
-	NXGE_DEBUG_MSG((nxgep, OBP_CTL,
-		"== nxge_use_cfg_mac_class_config: "
-		" mac_pref bit set def_mac_rxdma_grpid %d",
-		p_cfgp->def_mac_rxdma_grpid));
-
-	status = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, nxgep->dip, 0, prop,
-		&mac_cfg_val, &mac_cnt);
-	if (status == DDI_PROP_SUCCESS) {
-		if (mac_cnt <= p_cfgp->max_macs)
-			status = ddi_prop_update_int_array(DDI_DEV_T_NONE,
-				nxgep->dip, prop, mac_cfg_val, mac_cnt);
-		ddi_prop_free(mac_cfg_val);
-	}
-	nxge_set_hw_mac_class_config(nxgep);
-	NXGE_DEBUG_MSG((nxgep, CFG_CTL, " <== nxge_use_cfg_mac_class_config"));
-}
-
-static void
-nxge_use_cfg_class_config(p_nxge_t nxgep)
-{
-	nxge_set_hw_class_config(nxgep);
-}
-
-static void
-nxge_set_rdc_intr_property(p_nxge_t nxgep)
-{
-	int i;
-	p_nxge_dma_pt_cfg_t p_dma_cfgp;
-
-	NXGE_DEBUG_MSG((nxgep, CFG_CTL, " ==> nxge_set_rdc_intr_property"));
-	p_dma_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
-
-	for (i = 0; i < NXGE_MAX_RDCS; i++) {
-		p_dma_cfgp->rcr_timeout[i] = nxge_rcr_timeout;
-		p_dma_cfgp->rcr_threshold[i] = nxge_rcr_threshold;
-	}
-
-	NXGE_DEBUG_MSG((nxgep, CFG_CTL, " <== nxge_set_rdc_intr_property"));
-}
-
-static void
-nxge_set_hw_dma_config(p_nxge_t nxgep)
-{
-	int i, j, rdc, ndmas, ngrps, bitmap, end, st_rdc;
-	int32_t status;
-	uint8_t rdcs_per_grp;
-	p_nxge_dma_pt_cfg_t p_dma_cfgp;
-	p_nxge_hw_pt_cfg_t p_cfgp;
-	p_nxge_rdc_grp_t rdc_grp_p;
-	int rdcgrp_cfg = CFG_NOT_SPECIFIED, rx_quick_cfg;
-	char *prop, *prop_val;
-	p_nxge_param_t param_arr;
-	config_token_t token;
-
-	NXGE_DEBUG_MSG((nxgep, CFG_CTL, "==> nxge_set_hw_dma_config"));
-
-	p_dma_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
-	p_cfgp = (p_nxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config;
-	rdc_grp_p = p_dma_cfgp->rdc_grps;
-
-	/* Transmit DMA Channels */
-	bitmap = 0;
-	end = p_cfgp->start_tdc + p_cfgp->max_tdcs;
-	nxgep->ntdc = p_cfgp->max_tdcs;
-	p_dma_cfgp->tx_dma_map = 0;
-	for (i = p_cfgp->start_tdc; i < end; i++) {
-		bitmap |= (1 << i);
-		nxgep->tdc[i - p_cfgp->start_tdc] = (uint8_t)i;
-	}
-
-	p_dma_cfgp->tx_dma_map = bitmap;
-	param_arr = nxgep->param_arr;
-
-	/* Assume RDCs are evenly distributed */
-	rx_quick_cfg = param_arr[param_rx_quick_cfg].value;
-	switch (rx_quick_cfg) {
-	case CFG_NOT_SPECIFIED:
-		prop = "rxdma-grp-cfg";
-		status = ddi_prop_lookup_string(DDI_DEV_T_NONE,
-			nxgep->dip, 0, prop, (char **)&prop_val);
-		if (status != DDI_PROP_SUCCESS) {
-			NXGE_DEBUG_MSG((nxgep, CFG_CTL,
-				" property %s not found", prop));
-			rdcgrp_cfg = CFG_L3_DISTRIBUTE;
-		} else {
-			token = nxge_get_config_token(prop_val);
-			switch (token) {
-			case L2_CLASSIFY:
-				break;
-			case CLASSIFY:
-			case L3_CLASSIFY:
-			case L3_DISTRIBUTE:
-			case L3_TCAM:
-				rdcgrp_cfg = CFG_L3_DISTRIBUTE;
-				break;
-			default:
-				rdcgrp_cfg = CFG_L3_DISTRIBUTE;
-				break;
-			}
-			ddi_prop_free(prop_val);
-		}
-		break;
-	case CFG_L3_WEB:
-	case CFG_L3_DISTRIBUTE:
-	case CFG_L2_CLASSIFY:
-	case CFG_L3_TCAM:
-		rdcgrp_cfg = rx_quick_cfg;
-		break;
-	default:
-		rdcgrp_cfg = CFG_L3_DISTRIBUTE;
-		break;
-	}
-
-	/* Receive DMA Channels */
-	st_rdc = p_cfgp->start_rdc;
-	nxgep->nrdc = p_cfgp->max_rdcs;
-
-	for (i = 0; i < p_cfgp->max_rdcs; i++) {
-		nxgep->rdc[i] = i + p_cfgp->start_rdc;
-	}
-
-	switch (rdcgrp_cfg) {
-	case CFG_L3_DISTRIBUTE:
-	case CFG_L3_WEB:
-	case CFG_L3_TCAM:
-		ndmas = p_cfgp->max_rdcs;
-		ngrps = 1;
-		rdcs_per_grp = ndmas / ngrps;
-		break;
-	case CFG_L2_CLASSIFY:
-		ndmas = p_cfgp->max_rdcs / 2;
-		if (p_cfgp->max_rdcs < 2)
-			ndmas = 1;
-		ngrps = 1;
-		rdcs_per_grp = ndmas / ngrps;
-		break;
-	default:
-		ngrps = p_cfgp->max_rdc_grpids;
-		ndmas = p_cfgp->max_rdcs;
-		rdcs_per_grp = ndmas / ngrps;
-		break;
-	}
-
-	for (i = 0; i < ngrps; i++) {
-		rdc_grp_p = &p_dma_cfgp->rdc_grps[i];
-		rdc_grp_p->start_rdc = st_rdc + i * rdcs_per_grp;
-		rdc_grp_p->max_rdcs = rdcs_per_grp;
-
-		/* default to: 0, 1, 2, 3, ...., 0, 1, 2, 3.... */
-		rdc_grp_p->config_method = RDC_TABLE_ENTRY_METHOD_SEQ;
-		rdc = rdc_grp_p->start_rdc;
-		for (j = 0; j < NXGE_MAX_RDCS; j++) {
-			rdc_grp_p->rdc[j] = rdc++;
-			if (rdc == (rdc_grp_p->start_rdc + rdcs_per_grp)) {
-				rdc = rdc_grp_p->start_rdc;
-			}
-		}
-		rdc_grp_p->def_rdc = rdc_grp_p->rdc[0];
-		rdc_grp_p->flag = 1;	/* configured */
-	}
-
-	/* default RDC */
-	p_cfgp->def_rdc = p_cfgp->start_rdc;
-	nxgep->def_rdc = p_cfgp->start_rdc;
-
-	/* full 18 byte header ? */
-	p_dma_cfgp->rcr_full_header = NXGE_RCR_FULL_HEADER;
-	p_dma_cfgp->rx_drr_weight = PT_DRR_WT_DEFAULT_10G;
-	if (nxgep->function_num > 1)
-		p_dma_cfgp->rx_drr_weight = PT_DRR_WT_DEFAULT_1G;
-	p_dma_cfgp->rbr_size = nxge_rbr_size;
-	p_dma_cfgp->rcr_size = nxge_rcr_size;
-
-	nxge_set_rdc_intr_property(nxgep);
-	NXGE_DEBUG_MSG((nxgep, CFG_CTL, " <== nxge_set_hw_dma_config"));
-}
-
-boolean_t
-nxge_check_rxdma_port_member(p_nxge_t nxgep, uint8_t rdc)
-{
-	p_nxge_dma_pt_cfg_t p_dma_cfgp;
-	p_nxge_hw_pt_cfg_t p_cfgp;
-	int status = B_TRUE;
-
-	NXGE_DEBUG_MSG((nxgep, CFG2_CTL, "==> nxge_check_rxdma_port_member"));
-
-	p_dma_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
-	p_cfgp = (p_nxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config;
-
-	/* Receive DMA Channels */
-	if (rdc < p_cfgp->max_rdcs)
-		status = B_TRUE;
-	NXGE_DEBUG_MSG((nxgep, CFG2_CTL, " <== nxge_check_rxdma_port_member"));
-	return (status);
-}
-
-boolean_t
-nxge_check_txdma_port_member(p_nxge_t nxgep, uint8_t tdc)
-{
-	p_nxge_dma_pt_cfg_t p_dma_cfgp;
-	p_nxge_hw_pt_cfg_t p_cfgp;
-	int status = B_FALSE;
-
-	NXGE_DEBUG_MSG((nxgep, CFG2_CTL, "==> nxge_check_rxdma_port_member"));
-
-	p_dma_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
-	p_cfgp = (p_nxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config;
-
-	/* Receive DMA Channels */
-	if (tdc < p_cfgp->max_tdcs)
-		status = B_TRUE;
-	NXGE_DEBUG_MSG((nxgep, CFG2_CTL, " <== nxge_check_rxdma_port_member"));
-	return (status);
-}
-
-boolean_t
-nxge_check_rxdma_rdcgrp_member(p_nxge_t nxgep, uint8_t rdc_grp, uint8_t rdc)
-{
-	p_nxge_dma_pt_cfg_t p_dma_cfgp;
-	int status = B_TRUE;
-	p_nxge_rdc_grp_t rdc_grp_p;
-
-	NXGE_DEBUG_MSG((nxgep, CFG2_CTL,
-		" ==> nxge_check_rxdma_rdcgrp_member"));
-	NXGE_DEBUG_MSG((nxgep, CFG2_CTL, "  nxge_check_rxdma_rdcgrp_member"
-		" rdc  %d group %d", rdc, rdc_grp));
-	p_dma_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
-
-	rdc_grp_p = &p_dma_cfgp->rdc_grps[rdc_grp];
-	NXGE_DEBUG_MSG((nxgep, CFG2_CTL, "  max  %d ", rdc_grp_p->max_rdcs));
-	if (rdc >= rdc_grp_p->max_rdcs) {
-		status = B_FALSE;
-	}
-	NXGE_DEBUG_MSG((nxgep, CFG2_CTL,
-		" <== nxge_check_rxdma_rdcgrp_member"));
-	return (status);
-}
-
-boolean_t
-nxge_check_rdcgrp_port_member(p_nxge_t nxgep, uint8_t rdc_grp)
-{
-	p_nxge_dma_pt_cfg_t p_dma_cfgp;
-	p_nxge_hw_pt_cfg_t p_cfgp;
-	int status = B_TRUE;
-
-	NXGE_DEBUG_MSG((nxgep, CFG2_CTL, "==> nxge_check_rdcgrp_port_member"));
-
-	p_dma_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
-	p_cfgp = (p_nxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config;
-
-	if (rdc_grp >= p_cfgp->max_rdc_grpids)
-		status = B_FALSE;
-	NXGE_DEBUG_MSG((nxgep, CFG2_CTL, " <== nxge_check_rdcgrp_port_member"));
-	return (status);
-}
-
-static void
-nxge_set_hw_vlan_class_config(p_nxge_t nxgep)
-{
-	int i;
-	p_nxge_dma_pt_cfg_t p_dma_cfgp;
-	p_nxge_hw_pt_cfg_t p_cfgp;
-	p_nxge_param_t param_arr;
-	uint_t vlan_cnt;
-	int *vlan_cfg_val;
-	nxge_param_map_t *vmap;
-	char *prop;
-	p_nxge_class_pt_cfg_t p_class_cfgp;
-	uint32_t good_cfg[32];
-	int good_count = 0;
-	nxge_mv_cfg_t *vlan_tbl;
-
-	NXGE_DEBUG_MSG((nxgep, CFG_CTL, " ==> nxge_set_hw_vlan_config"));
-	p_dma_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
-	p_cfgp = (p_nxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config;
-	p_class_cfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config;
-
-	param_arr = nxgep->param_arr;
-	prop = param_arr[param_vlan_2rdc_grp].fcode_name;
-
-	/*
-	 * By default, VLAN to RDC group mapping is disabled Need to read HW or
-	 * .conf properties to find out if mapping is required
-	 *
-	 * Format
-	 *
-	 * uint32_t array, each array entry specifying the VLAN id and the
-	 * mapping
-	 *
-	 * bit[30] = add bit[29] = remove bit[28]  = preference bits[23-16] =
-	 * rdcgrp bits[15-0] = VLAN ID ( )
-	 */
-
-	for (i = 0; i < NXGE_MAX_VLANS; i++) {
-		p_class_cfgp->vlan_tbl[i].flag = 0;
-	}
-
-	vlan_tbl = (nxge_mv_cfg_t *)&p_class_cfgp->vlan_tbl[0];
-	if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, nxgep->dip, 0, prop,
-			&vlan_cfg_val, &vlan_cnt) == DDI_PROP_SUCCESS) {
-		for (i = 0; i < vlan_cnt; i++) {
-			vmap = (nxge_param_map_t *)&vlan_cfg_val[i];
-			if ((vmap->param_id) &&
-					(vmap->param_id < NXGE_MAX_VLANS) &&
-					(vmap->map_to <
-						p_cfgp->max_rdc_grpids) &&
-					(vmap->map_to >= (uint8_t)0)) {
-				NXGE_DEBUG_MSG((nxgep, CFG2_CTL,
-					" nxge_vlan_config mapping"
-					" id %d grp %d",
-					vmap->param_id, vmap->map_to));
-				good_cfg[good_count] = vlan_cfg_val[i];
-				if (vlan_tbl[vmap->param_id].flag == 0)
-					good_count++;
-				vlan_tbl[vmap->param_id].flag = 1;
-				vlan_tbl[vmap->param_id].rdctbl =
-					vmap->map_to + p_cfgp->start_rdc_grpid;
-				vlan_tbl[vmap->param_id].mpr_npr = vmap->pref;
-			}
-		}
-		ddi_prop_free(vlan_cfg_val);
-		if (good_count != vlan_cnt) {
-			(void) ddi_prop_update_int_array(DDI_DEV_T_NONE,
-				nxgep->dip, prop, (int *)good_cfg, good_count);
-		}
-	}
-	NXGE_DEBUG_MSG((nxgep, CFG_CTL, "<== nxge_set_hw_vlan_config"));
-}
-
-static void
-nxge_set_hw_mac_class_config(p_nxge_t nxgep)
-{
-	int i;
-	p_nxge_dma_pt_cfg_t p_dma_cfgp;
-	p_nxge_hw_pt_cfg_t p_cfgp;
-	p_nxge_param_t param_arr;
-	uint_t mac_cnt;
-	int *mac_cfg_val;
-	nxge_param_map_t *mac_map;
-	char *prop;
-	p_nxge_class_pt_cfg_t p_class_cfgp;
-	int good_count = 0;
-	int good_cfg[NXGE_MAX_MACS];
-	nxge_mv_cfg_t *mac_host_info;
-
-	NXGE_DEBUG_MSG((nxgep, CFG_CTL, "==> nxge_set_hw_mac_config"));
-
-	p_dma_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
-	p_cfgp = (p_nxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config;
-	p_class_cfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config;
-	mac_host_info = (nxge_mv_cfg_t *)&p_class_cfgp->mac_host_info[0];
-
-	param_arr = nxgep->param_arr;
-	prop = param_arr[param_mac_2rdc_grp].fcode_name;
-
-	for (i = 0; i < NXGE_MAX_MACS; i++) {
-		p_class_cfgp->mac_host_info[i].flag = 0;
-	}
-
-	if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, nxgep->dip, 0, prop,
-			&mac_cfg_val, &mac_cnt) == DDI_PROP_SUCCESS) {
-		for (i = 0; i < mac_cnt; i++) {
-			mac_map = (nxge_param_map_t *)&mac_cfg_val[i];
-			if ((mac_map->param_id < p_cfgp->max_macs) &&
-					(mac_map->map_to <
-						p_cfgp->max_rdc_grpids) &&
-					(mac_map->map_to >= (uint8_t)0)) {
-				NXGE_DEBUG_MSG((nxgep, CFG2_CTL,
-					" nxge_mac_config mapping"
-					" id %d grp %d",
-					mac_map->param_id, mac_map->map_to));
-				mac_host_info[mac_map->param_id].mpr_npr =
-					mac_map->pref;
-				mac_host_info[mac_map->param_id].rdctbl =
-					mac_map->map_to +
-					p_cfgp->start_rdc_grpid;
-				good_cfg[good_count] = mac_cfg_val[i];
-				if (mac_host_info[mac_map->param_id].flag == 0)
-					good_count++;
-				mac_host_info[mac_map->param_id].flag = 1;
-			}
-		}
-		ddi_prop_free(mac_cfg_val);
-		if (good_count != mac_cnt) {
-			(void) ddi_prop_update_int_array(DDI_DEV_T_NONE,
-				nxgep->dip, prop, good_cfg, good_count);
-		}
-	}
-	NXGE_DEBUG_MSG((nxgep, CFG_CTL, "<== nxge_set_hw_mac_config"));
-}
-
-static void
-nxge_set_hw_class_config(p_nxge_t nxgep)
-{
-	int i;
-	p_nxge_param_t param_arr;
-	int *int_prop_val;
-	uint32_t cfg_value;
-	char *prop;
-	p_nxge_class_pt_cfg_t p_class_cfgp;
-	int start_prop, end_prop;
-	uint_t prop_cnt;
-
-	NXGE_DEBUG_MSG((nxgep, CFG_CTL, " ==> nxge_set_hw_class_config"));
-
-	p_class_cfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config;
-	param_arr = nxgep->param_arr;
-	start_prop = param_class_opt_ip_usr4;
-	end_prop = param_class_opt_ipv6_sctp;
-
-	for (i = start_prop; i <= end_prop; i++) {
-		prop = param_arr[i].fcode_name;
-		if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, nxgep->dip,
-				0, prop, &int_prop_val,
-				&prop_cnt) == DDI_PROP_SUCCESS) {
-			cfg_value = (uint32_t)*int_prop_val;
-			ddi_prop_free(int_prop_val);
-		} else {
-			cfg_value = (uint32_t)param_arr[i].value;
-		}
-		p_class_cfgp->class_cfg[i - start_prop] = cfg_value;
-	}
-
-	prop = param_arr[param_h1_init_value].fcode_name;
-
-	if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, nxgep->dip, 0, prop,
-			&int_prop_val, &prop_cnt) == DDI_PROP_SUCCESS) {
-		cfg_value = (uint32_t)*int_prop_val;
-		ddi_prop_free(int_prop_val);
-	} else {
-		cfg_value = (uint32_t)param_arr[param_h1_init_value].value;
-	}
-
-	p_class_cfgp->init_h1 = (uint32_t)cfg_value;
-	prop = param_arr[param_h2_init_value].fcode_name;
-
-	if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, nxgep->dip, 0, prop,
-			&int_prop_val, &prop_cnt) == DDI_PROP_SUCCESS) {
-		cfg_value = (uint32_t)*int_prop_val;
-		ddi_prop_free(int_prop_val);
-	} else {
-		cfg_value = (uint32_t)param_arr[param_h2_init_value].value;
-	}
-
-	p_class_cfgp->init_h2 = (uint16_t)cfg_value;
-	NXGE_DEBUG_MSG((nxgep, CFG_CTL, " <== nxge_set_hw_class_config"));
-}
-
-nxge_status_t
-nxge_ldgv_init_n2(p_nxge_t nxgep, int *navail_p, int *nrequired_p)
-{
-	int i, maxldvs, maxldgs, start, end, nldvs;
-	int ldv, endldg;
-	uint8_t func;
-	uint8_t channel;
-	uint8_t chn_start;
-	boolean_t own_sys_err = B_FALSE, own_fzc = B_FALSE;
-	p_nxge_dma_pt_cfg_t p_dma_cfgp;
-	p_nxge_hw_pt_cfg_t p_cfgp;
-	p_nxge_ldgv_t ldgvp;
-	p_nxge_ldg_t ldgp, ptr;
-	p_nxge_ldv_t ldvp;
-	nxge_status_t status = NXGE_OK;
-
-	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_ldgv_init_n2"));
-	if (!*navail_p) {
-		*nrequired_p = 0;
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"<== nxge_ldgv_init:no avail"));
-		return (NXGE_ERROR);
-	}
-	/*
-	 * N2/NIU: one logical device owns one logical group. and each
-	 * device/group will be assigned one vector by Hypervisor.
-	 */
-	p_dma_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
-	p_cfgp = (p_nxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config;
-	maxldgs = p_cfgp->max_ldgs;
-	if (!maxldgs) {
-		/* No devices configured. */
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_ldgv_init_n2: "
-			"no logical groups configured."));
-		return (NXGE_ERROR);
-	} else {
-		maxldvs = maxldgs + 1;
-	}
-
-	/*
-	 * If function zero instance, it needs to handle the system and MIF
-	 * error interrupts. MIF interrupt may not be needed for N2/NIU.
-	 */
-	func = nxgep->function_num;
-	if (func == 0) {
-		own_sys_err = B_TRUE;
-		if (!p_cfgp->ser_ldvid) {
-			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-				"nxge_ldgv_init_n2: func 0, ERR ID not set!"));
-		}
-		/* MIF interrupt */
-		if (!p_cfgp->mif_ldvid) {
-			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-				"nxge_ldgv_init_n2: func 0, MIF ID not set!"));
-		}
-	}
-
-	/*
-	 * Assume single partition, each function owns mac.
-	 */
-	if (!nxge_use_partition)
-		own_fzc = B_TRUE;
-
-	ldgvp = nxgep->ldgvp;
-	if (ldgvp == NULL) {
-		ldgvp = KMEM_ZALLOC(sizeof (nxge_ldgv_t), KM_SLEEP);
-		nxgep->ldgvp = ldgvp;
-		ldgvp->maxldgs = (uint8_t)maxldgs;
-		ldgvp->maxldvs = (uint8_t)maxldvs;
-		ldgp = ldgvp->ldgp = KMEM_ZALLOC(sizeof (nxge_ldg_t) * maxldgs,
-			KM_SLEEP);
-		ldvp = ldgvp->ldvp = KMEM_ZALLOC(sizeof (nxge_ldv_t) * maxldvs,
-			KM_SLEEP);
-	} else {
-		ldgp = ldgvp->ldgp;
-		ldvp = ldgvp->ldvp;
-	}
-
-	ldgvp->ndma_ldvs = p_cfgp->max_tdcs + p_cfgp->max_rdcs;
-	ldgvp->tmres = NXGE_TIMER_RESO;
-
-	NXGE_DEBUG_MSG((nxgep, INT_CTL,
-		"==> nxge_ldgv_init_n2: maxldvs %d maxldgs %d",
-		maxldvs, maxldgs));
-
-	/* logical start_ldg is ldv */
-	ptr = ldgp;
-	for (i = 0; i < maxldgs; i++) {
-		ptr->func = func;
-		ptr->arm = B_TRUE;
-		ptr->vldg_index = (uint8_t)i;
-		ptr->ldg_timer = NXGE_TIMER_LDG;
-		ptr->ldg = p_cfgp->ldg[i];
-		ptr->sys_intr_handler = nxge_intr;
-		ptr->nldvs = 0;
-		ptr->ldvp = NULL;
-		ptr->nxgep = nxgep;
-		NXGE_DEBUG_MSG((nxgep, INT_CTL,
-			"==> nxge_ldgv_init_n2: maxldvs %d maxldgs %d "
-			"ldg %d ldgptr $%p",
-			maxldvs, maxldgs, ptr->ldg, ptr));
-		ptr++;
-	}
-
-	endldg = NXGE_INT_MAX_LDG;
-	nldvs = 0;
-	ldgvp->nldvs = 0;
-	ldgp->ldvp = NULL;
-	*nrequired_p = 0;
-
-	/*
-	 * logical device group table is organized in the following order (same
-	 * as what interrupt property has). function 0: owns MAC, MIF, error,
-	 * rx, tx. function 1: owns MAC, rx, tx.
-	 */
-
-	if (own_fzc && p_cfgp->mac_ldvid) {
-		/* Each function should own MAC interrupt */
-		ldv = p_cfgp->mac_ldvid;
-		ldvp->ldv = (uint8_t)ldv;
-		ldvp->is_mac = B_TRUE;
-		ldvp->ldv_intr_handler = nxge_mac_intr;
-		ldvp->ldv_ldf_masks = 0;
-		ldvp->nxgep = nxgep;
-		NXGE_DEBUG_MSG((nxgep, INT_CTL,
-			"==> nxge_ldgv_init_n2(mac): maxldvs %d ldv %d "
-			"ldg %d ldgptr $%p ldvptr $%p",
-			maxldvs, ldv, ldgp->ldg, ldgp, ldvp));
-		nxge_ldgv_setup(&ldgp, &ldvp, ldv, endldg, nrequired_p);
-		nldvs++;
-	}
-
-	if (own_fzc && p_cfgp->mif_ldvid) {
-		ldv = p_cfgp->mif_ldvid;
-		ldvp->ldv = (uint8_t)ldv;
-		ldvp->is_mif = B_TRUE;
-		ldvp->ldv_intr_handler = nxge_mif_intr;
-		ldvp->ldv_ldf_masks = 0;
-		ldvp->nxgep = nxgep;
-		NXGE_DEBUG_MSG((nxgep, INT_CTL,
-			"==> nxge_ldgv_init_n2(mif): maxldvs %d ldv %d "
-			"ldg %d ldgptr $%p ldvptr $%p",
-			maxldvs, ldv, ldgp->ldg, ldgp, ldvp));
-		nxge_ldgv_setup(&ldgp, &ldvp, ldv, endldg, nrequired_p);
-		nldvs++;
-	}
-
-	ldv = NXGE_SYS_ERROR_LD;
-	ldvp->use_timer = B_TRUE;
-	if (own_sys_err && p_cfgp->ser_ldvid) {
-		ldv = p_cfgp->ser_ldvid;
-		/*
-		 * Unmask the system interrupt states.
-		 */
-		(void) nxge_fzc_sys_err_mask_set(nxgep, SYS_ERR_SMX_MASK |
-			SYS_ERR_IPP_MASK | SYS_ERR_TXC_MASK |
-			SYS_ERR_ZCP_MASK);
-	}
-	ldvp->ldv = (uint8_t)ldv;
-	ldvp->is_syserr = B_TRUE;
-	ldvp->ldv_intr_handler = nxge_syserr_intr;
-	ldvp->ldv_ldf_masks = 0;
-	ldvp->nxgep = nxgep;
-	ldgvp->ldvp_syserr = ldvp;
-
-	NXGE_DEBUG_MSG((nxgep, INT_CTL,
-		"==> nxge_ldgv_init_n2(syserr): maxldvs %d ldv %d "
-		"ldg %d ldgptr $%p ldvptr p%p",
-		maxldvs, ldv, ldgp->ldg, ldgp, ldvp));
-
-	if (own_sys_err && p_cfgp->ser_ldvid) {
-		(void) nxge_ldgv_setup(&ldgp, &ldvp, ldv, endldg, nrequired_p);
-	} else {
-		ldvp++;
-	}
-
-	nldvs++;
-
-	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_ldgv_init_n2: "
-		"(before rx) func %d nldvs %d navail %d nrequired %d",
-		func, nldvs, *navail_p, *nrequired_p));
-
-	/*
-	 * Receive DMA channels.
-	 */
-	channel = p_cfgp->start_rdc;
-	start = p_cfgp->start_rdc + NXGE_RDMA_LD_START;
-	end = start + p_cfgp->max_rdcs;
-	chn_start = p_cfgp->ldg_chn_start;
-	/*
-	 * Start with RDC to configure logical devices for each group.
-	 */
-	for (i = 0, ldv = start; ldv < end; i++, ldv++, chn_start++) {
-		ldvp->is_rxdma = B_TRUE;
-		ldvp->ldv = (uint8_t)ldv;
-		ldvp->channel = channel++;
-		ldvp->vdma_index = (uint8_t)i;
-		ldvp->ldv_intr_handler = nxge_rx_intr;
-		ldvp->ldv_ldf_masks = 0;
-		ldvp->nxgep = nxgep;
-		ldgp->ldg = p_cfgp->ldg[chn_start];
-
-		NXGE_DEBUG_MSG((nxgep, INT_CTL,
-			"==> nxge_ldgv_init_n2(rx%d): maxldvs %d ldv %d "
-			"ldg %d ldgptr 0x%016llx ldvptr 0x%016llx",
-			i, maxldvs, ldv, ldgp->ldg, ldgp, ldvp));
-		nxge_ldgv_setup(&ldgp, &ldvp, ldv, endldg, nrequired_p);
-		nldvs++;
-	}
-
-	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_ldgv_init_n2: "
-		"func %d nldvs %d navail %d nrequired %d",
-		func, nldvs, *navail_p, *nrequired_p));
-
-	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_ldgv_init_n2: "
-		"func %d nldvs %d navail %d nrequired %d ldgp 0x%llx "
-		"ldvp 0x%llx",
-		func, nldvs, *navail_p, *nrequired_p, ldgp, ldvp));
-	/*
-	 * Transmit DMA channels.
-	 */
-	channel = p_cfgp->start_tdc;
-	start = p_cfgp->start_tdc + NXGE_TDMA_LD_START;
-	end = start + p_cfgp->max_tdcs;
-	for (i = 0, ldv = start; ldv < end; i++, ldv++, chn_start++) {
-		ldvp->is_txdma = B_TRUE;
-		ldvp->ldv = (uint8_t)ldv;
-		ldvp->channel = channel++;
-		ldvp->vdma_index = (uint8_t)i;
-		ldvp->ldv_intr_handler = nxge_tx_intr;
-		ldvp->ldv_ldf_masks = 0;
-		ldgp->ldg = p_cfgp->ldg[chn_start];
-		ldvp->nxgep = nxgep;
-		NXGE_DEBUG_MSG((nxgep, INT_CTL,
-			"==> nxge_ldgv_init_n2(tx%d): maxldvs %d ldv %d "
-			"ldg %d ldgptr 0x%016llx ldvptr 0x%016llx",
-			i, maxldvs, ldv, ldgp->ldg, ldgp, ldvp));
-		nxge_ldgv_setup(&ldgp, &ldvp, ldv, endldg, nrequired_p);
-		nldvs++;
-	}
-
-	ldgvp->ldg_intrs = *nrequired_p;
-	ldgvp->nldvs = (uint8_t)nldvs;
-
-	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_ldgv_init_n2: "
-		"func %d nldvs %d maxgrps %d navail %d nrequired %d",
-		func, nldvs, maxldgs, *navail_p, *nrequired_p));
-
-	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_ldgv_init_n2"));
-	return (status);
-}
-
-/*
- * Interrupts related interface functions.
- */
-
-nxge_status_t
-nxge_ldgv_init(p_nxge_t nxgep, int *navail_p, int *nrequired_p)
-{
-	int i, maxldvs, maxldgs, start, end, nldvs;
-	int ldv, ldg, endldg, ngrps;
-	uint8_t func;
-	uint8_t channel;
-	boolean_t own_sys_err = B_FALSE, own_fzc = B_FALSE;
-	p_nxge_dma_pt_cfg_t p_dma_cfgp;
-	p_nxge_hw_pt_cfg_t p_cfgp;
-	p_nxge_ldgv_t ldgvp;
-	p_nxge_ldg_t ldgp, ptr;
-	p_nxge_ldv_t ldvp;
-	nxge_status_t status = NXGE_OK;
-
-	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_ldgv_init"));
-	if (!*navail_p) {
-		*nrequired_p = 0;
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"<== nxge_ldgv_init:no avail"));
-		return (NXGE_ERROR);
-	}
-	p_dma_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
-	p_cfgp = (p_nxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config;
-
-	nldvs = p_cfgp->max_tdcs + p_cfgp->max_rdcs;
-
-	/*
-	 * If function zero instance, it needs to handle the system error
-	 * interrupts.
-	 */
-	func = nxgep->function_num;
-	if (func == 0) {
-		nldvs++;
-		own_sys_err = B_TRUE;
-	} else {
-		/* use timer */
-		nldvs++;
-	}
-
-	/*
-	 * Assume single partition, each function owns mac.
-	 */
-	if (!nxge_use_partition) {
-		/* mac */
-		nldvs++;
-		/* MIF */
-		nldvs++;
-		own_fzc = B_TRUE;
-	}
-	maxldvs = nldvs;
-	maxldgs = p_cfgp->max_ldgs;
-	if (!maxldvs || !maxldgs) {
-		/* No devices configured. */
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_ldgv_init: "
-			"no logical devices or groups configured."));
-		return (NXGE_ERROR);
-	}
-	ldgvp = nxgep->ldgvp;
-	if (ldgvp == NULL) {
-		ldgvp = KMEM_ZALLOC(sizeof (nxge_ldgv_t), KM_SLEEP);
-		nxgep->ldgvp = ldgvp;
-		ldgvp->maxldgs = (uint8_t)maxldgs;
-		ldgvp->maxldvs = (uint8_t)maxldvs;
-		ldgp = ldgvp->ldgp = KMEM_ZALLOC(sizeof (nxge_ldg_t) * maxldgs,
-			KM_SLEEP);
-		ldvp = ldgvp->ldvp = KMEM_ZALLOC(sizeof (nxge_ldv_t) * maxldvs,
-			KM_SLEEP);
-	}
-	ldgvp->ndma_ldvs = p_cfgp->max_tdcs + p_cfgp->max_rdcs;
-	ldgvp->tmres = NXGE_TIMER_RESO;
-
-	NXGE_DEBUG_MSG((nxgep, INT_CTL,
-		"==> nxge_ldgv_init: maxldvs %d maxldgs %d nldvs %d",
-		maxldvs, maxldgs, nldvs));
-	ldg = p_cfgp->start_ldg;
-	ptr = ldgp;
-	for (i = 0; i < maxldgs; i++) {
-		ptr->func = func;
-		ptr->arm = B_TRUE;
-		ptr->vldg_index = (uint8_t)i;
-		ptr->ldg_timer = NXGE_TIMER_LDG;
-		ptr->ldg = ldg++;
-		ptr->sys_intr_handler = nxge_intr;
-		ptr->nldvs = 0;
-		ptr->nxgep = nxgep;
-		NXGE_DEBUG_MSG((nxgep, INT_CTL,
-			"==> nxge_ldgv_init: maxldvs %d maxldgs %d ldg %d",
-			maxldvs, maxldgs, ptr->ldg));
-		ptr++;
-	}
-
-	ldg = p_cfgp->start_ldg;
-	if (maxldgs > *navail_p) {
-		ngrps = *navail_p;
-	} else {
-		ngrps = maxldgs;
-	}
-	endldg = ldg + ngrps;
-
-	/*
-	 * Receive DMA channels.
-	 */
-	channel = p_cfgp->start_rdc;
-	start = p_cfgp->start_rdc + NXGE_RDMA_LD_START;
-	end = start + p_cfgp->max_rdcs;
-	nldvs = 0;
-	ldgvp->nldvs = 0;
-	ldgp->ldvp = NULL;
-	*nrequired_p = 0;
-
-	/*
-	 * Start with RDC to configure logical devices for each group.
-	 */
-	for (i = 0, ldv = start; ldv < end; i++, ldv++) {
-		ldvp->is_rxdma = B_TRUE;
-		ldvp->ldv = (uint8_t)ldv;
-		/* If non-seq needs to change the following code */
-		ldvp->channel = channel++;
-		ldvp->vdma_index = (uint8_t)i;
-		ldvp->ldv_intr_handler = nxge_rx_intr;
-		ldvp->ldv_ldf_masks = 0;
-		ldvp->use_timer = B_FALSE;
-		ldvp->nxgep = nxgep;
-		nxge_ldgv_setup(&ldgp, &ldvp, ldv, endldg, nrequired_p);
-		nldvs++;
-	}
-
-	/*
-	 * Transmit DMA channels.
-	 */
-	channel = p_cfgp->start_tdc;
-	start = p_cfgp->start_tdc + NXGE_TDMA_LD_START;
-	end = start + p_cfgp->max_tdcs;
-	for (i = 0, ldv = start; ldv < end; i++, ldv++) {
-		ldvp->is_txdma = B_TRUE;
-		ldvp->ldv = (uint8_t)ldv;
-		ldvp->channel = channel++;
-		ldvp->vdma_index = (uint8_t)i;
-		ldvp->ldv_intr_handler = nxge_tx_intr;
-		ldvp->ldv_ldf_masks = 0;
-		ldvp->use_timer = B_FALSE;
-		ldvp->nxgep = nxgep;
-		nxge_ldgv_setup(&ldgp, &ldvp, ldv, endldg, nrequired_p);
-		nldvs++;
-	}
-
-	if (own_fzc) {
-		ldv = NXGE_MIF_LD;
-		ldvp->ldv = (uint8_t)ldv;
-		ldvp->is_mif = B_TRUE;
-		ldvp->ldv_intr_handler = nxge_mif_intr;
-		ldvp->ldv_ldf_masks = 0;
-		ldvp->use_timer = B_FALSE;
-		ldvp->nxgep = nxgep;
-		nxge_ldgv_setup(&ldgp, &ldvp, ldv, endldg, nrequired_p);
-		nldvs++;
-	}
-	/*
-	 * MAC port (function zero control)
-	 */
-	if (own_fzc) {
-		ldvp->is_mac = B_TRUE;
-		ldvp->ldv_intr_handler = nxge_mac_intr;
-		ldvp->ldv_ldf_masks = 0;
-		ldv = func + NXGE_MAC_LD_START;
-		ldvp->ldv = (uint8_t)ldv;
-		ldvp->use_timer = B_FALSE;
-		ldvp->nxgep = nxgep;
-		nxge_ldgv_setup(&ldgp, &ldvp, ldv, endldg, nrequired_p);
-		nldvs++;
-	}
-	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_ldgv_init: "
-		"func %d nldvs %d navail %d nrequired %d",
-		func, nldvs, *navail_p, *nrequired_p));
-	/*
-	 * Function 0 owns system error interrupts.
-	 */
-	ldvp->use_timer = B_TRUE;
-	if (own_sys_err) {
-		ldv = NXGE_SYS_ERROR_LD;
-		ldvp->ldv = (uint8_t)ldv;
-		ldvp->is_syserr = B_TRUE;
-		ldvp->ldv_intr_handler = nxge_syserr_intr;
-		ldvp->ldv_ldf_masks = 0;
-		ldvp->nxgep = nxgep;
-		ldgvp->ldvp_syserr = ldvp;
-		/*
-		 * Unmask the system interrupt states.
-		 */
-		(void) nxge_fzc_sys_err_mask_set(nxgep, SYS_ERR_SMX_MASK |
-			SYS_ERR_IPP_MASK | SYS_ERR_TXC_MASK |
-			SYS_ERR_ZCP_MASK);
-
-		(void) nxge_ldgv_setup(&ldgp, &ldvp, ldv, endldg, nrequired_p);
-		nldvs++;
-	} else {
-		ldv = NXGE_SYS_ERROR_LD;
-		ldvp->ldv = (uint8_t)ldv;
-		ldvp->is_syserr = B_TRUE;
-		ldvp->ldv_intr_handler = nxge_syserr_intr;
-		ldvp->nxgep = nxgep;
-		ldvp->ldv_ldf_masks = 0;
-		ldgvp->ldvp_syserr = ldvp;
-	}
-
-	ldgvp->ldg_intrs = *nrequired_p;
-
-	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_ldgv_init: "
-		"func %d nldvs %d navail %d nrequired %d",
-		func, nldvs, *navail_p, *nrequired_p));
-
-	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_ldgv_init"));
-	return (status);
-}
-
-nxge_status_t
-nxge_ldgv_uninit(p_nxge_t nxgep)
-{
-	p_nxge_ldgv_t ldgvp;
-
-	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_ldgv_uninit"));
-	ldgvp = nxgep->ldgvp;
-	if (ldgvp == NULL) {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_ldgv_uninit: "
-				"no logical group configured."));
-		return (NXGE_OK);
-	}
-	if (ldgvp->ldgp) {
-		KMEM_FREE(ldgvp->ldgp, sizeof (nxge_ldg_t) * ldgvp->maxldgs);
-	}
-	if (ldgvp->ldvp) {
-		KMEM_FREE(ldgvp->ldvp, sizeof (nxge_ldv_t) * ldgvp->maxldvs);
-	}
-	KMEM_FREE(ldgvp, sizeof (nxge_ldgv_t));
-	nxgep->ldgvp = NULL;
-
-	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_ldgv_uninit"));
-	return (NXGE_OK);
-}
-
-nxge_status_t
-nxge_intr_ldgv_init(p_nxge_t nxgep)
-{
-	nxge_status_t status = NXGE_OK;
-
-	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intr_ldgv_init"));
-	/*
-	 * Configure the logical device group numbers, state vectors and
-	 * interrupt masks for each logical device.
-	 */
-	status = nxge_fzc_intr_init(nxgep);
-
-	/*
-	 * Configure logical device masks and timers.
-	 */
-	status = nxge_intr_mask_mgmt(nxgep);
-
-	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intr_ldgv_init"));
-	return (status);
-}
-
-nxge_status_t
-nxge_intr_mask_mgmt(p_nxge_t nxgep)
-{
-	p_nxge_ldgv_t ldgvp;
-	p_nxge_ldg_t ldgp;
-	p_nxge_ldv_t ldvp;
-	npi_handle_t handle;
-	int i, j;
-	npi_status_t rs = NPI_SUCCESS;
-
-	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intr_mask_mgmt"));
-
-	if ((ldgvp = nxgep->ldgvp) == NULL) {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"<== nxge_intr_mask_mgmt: Null ldgvp"));
-		return (NXGE_ERROR);
-	}
-	handle = NXGE_DEV_NPI_HANDLE(nxgep);
-	ldgp = ldgvp->ldgp;
-	ldvp = ldgvp->ldvp;
-	if (ldgp == NULL || ldvp == NULL) {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"<== nxge_intr_mask_mgmt: Null ldgp or ldvp"));
-		return (NXGE_ERROR);
-	}
-	NXGE_DEBUG_MSG((nxgep, INT_CTL,
-		"==> nxge_intr_mask_mgmt: # of intrs %d ", ldgvp->ldg_intrs));
-	/* Initialize masks. */
-	if (nxgep->niu_type != N2_NIU) {
-		NXGE_DEBUG_MSG((nxgep, INT_CTL,
-			"==> nxge_intr_mask_mgmt(Neptune): # intrs %d ",
-			ldgvp->ldg_intrs));
-		for (i = 0; i < ldgvp->ldg_intrs; i++, ldgp++) {
-			NXGE_DEBUG_MSG((nxgep, INT_CTL,
-				"==> nxge_intr_mask_mgmt(Neptune): # ldv %d "
-				"in group %d", ldgp->nldvs, ldgp->ldg));
-			for (j = 0; j < ldgp->nldvs; j++, ldvp++) {
-				NXGE_DEBUG_MSG((nxgep, INT_CTL,
-					"==> nxge_intr_mask_mgmt: set ldv # %d "
-					"for ldg %d", ldvp->ldv, ldgp->ldg));
-				rs = npi_intr_mask_set(handle, ldvp->ldv,
-					ldvp->ldv_ldf_masks);
-				if (rs != NPI_SUCCESS) {
-					NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-						"<== nxge_intr_mask_mgmt: "
-						"set mask failed "
-						" rs 0x%x ldv %d mask 0x%x",
-						rs, ldvp->ldv,
-						ldvp->ldv_ldf_masks));
-					return (NXGE_ERROR | rs);
-				}
-				NXGE_DEBUG_MSG((nxgep, INT_CTL,
-					"==> nxge_intr_mask_mgmt: "
-					"set mask OK "
-					" rs 0x%x ldv %d mask 0x%x",
-					rs, ldvp->ldv,
-					ldvp->ldv_ldf_masks));
-			}
-		}
-	}
-	ldgp = ldgvp->ldgp;
-	/* Configure timer and arm bit */
-	for (i = 0; i < nxgep->ldgvp->ldg_intrs; i++, ldgp++) {
-		rs = npi_intr_ldg_mgmt_set(handle, ldgp->ldg,
-			ldgp->arm, ldgp->ldg_timer);
-		if (rs != NPI_SUCCESS) {
-			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-				"<== nxge_intr_mask_mgmt: "
-				"set timer failed "
-				" rs 0x%x dg %d timer 0x%x",
-				rs, ldgp->ldg, ldgp->ldg_timer));
-			return (NXGE_ERROR | rs);
-		}
-		NXGE_DEBUG_MSG((nxgep, INT_CTL,
-			"==> nxge_intr_mask_mgmt: "
-			"set timer OK "
-			" rs 0x%x ldg %d timer 0x%x",
-			rs, ldgp->ldg, ldgp->ldg_timer));
-	}
-
-	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_fzc_intr_mask_mgmt"));
-	return (NXGE_OK);
-}
-
-nxge_status_t
-nxge_intr_mask_mgmt_set(p_nxge_t nxgep, boolean_t on)
-{
-	p_nxge_ldgv_t ldgvp;
-	p_nxge_ldg_t ldgp;
-	p_nxge_ldv_t ldvp;
-	npi_handle_t handle;
-	int i, j;
-	npi_status_t rs = NPI_SUCCESS;
-
-	NXGE_DEBUG_MSG((nxgep, INT_CTL,
-		"==> nxge_intr_mask_mgmt_set (%d)", on));
-
-	if (nxgep->niu_type == N2_NIU) {
-		NXGE_DEBUG_MSG((nxgep, INT_CTL,
-			"<== nxge_intr_mask_mgmt_set (%d) not set (N2/NIU)",
-			on));
-		return (NXGE_ERROR);
-	}
-
-	if ((ldgvp = nxgep->ldgvp) == NULL) {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"==> nxge_intr_mask_mgmt_set: Null ldgvp"));
-		return (NXGE_ERROR);
-	}
-
-	handle = NXGE_DEV_NPI_HANDLE(nxgep);
-	ldgp = ldgvp->ldgp;
-	ldvp = ldgvp->ldvp;
-	if (ldgp == NULL || ldvp == NULL) {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"<== nxge_intr_mask_mgmt_set: Null ldgp or ldvp"));
-		return (NXGE_ERROR);
-	}
-	/* set masks. */
-	for (i = 0; i < ldgvp->ldg_intrs; i++, ldgp++) {
-		NXGE_DEBUG_MSG((nxgep, INT_CTL,
-			"==> nxge_intr_mask_mgmt_set: flag %d ldg %d"
-			"set mask nldvs %d", on, ldgp->ldg, ldgp->nldvs));
-		for (j = 0; j < ldgp->nldvs; j++, ldvp++) {
-			NXGE_DEBUG_MSG((nxgep, INT_CTL,
-				"==> nxge_intr_mask_mgmt_set: "
-				"for %d %d flag %d", i, j, on));
-			if (on) {
-				ldvp->ldv_ldf_masks = 0;
-				NXGE_DEBUG_MSG((nxgep, INT_CTL,
-					"==> nxge_intr_mask_mgmt_set: "
-					"ON mask off"));
-			} else if (!on) {
-				ldvp->ldv_ldf_masks = (uint8_t)LD_IM1_MASK;
-				NXGE_DEBUG_MSG((nxgep, INT_CTL,
-					"==> nxge_intr_mask_mgmt_set:mask on"));
-			}
-			rs = npi_intr_mask_set(handle, ldvp->ldv,
-				ldvp->ldv_ldf_masks);
-			if (rs != NPI_SUCCESS) {
-				NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-					"==> nxge_intr_mask_mgmt_set: "
-					"set mask failed "
-					" rs 0x%x ldv %d mask 0x%x",
-					rs, ldvp->ldv, ldvp->ldv_ldf_masks));
-				return (NXGE_ERROR | rs);
-			}
-			NXGE_DEBUG_MSG((nxgep, INT_CTL,
-				"==> nxge_intr_mask_mgmt_set: flag %d"
-				"set mask OK "
-				" ldv %d mask 0x%x",
-				on, ldvp->ldv, ldvp->ldv_ldf_masks));
-		}
-	}
-
-	ldgp = ldgvp->ldgp;
-	/* set the arm bit */
-	for (i = 0; i < nxgep->ldgvp->ldg_intrs; i++, ldgp++) {
-		if (on && !ldgp->arm) {
-			ldgp->arm = B_TRUE;
-		} else if (!on && ldgp->arm) {
-			ldgp->arm = B_FALSE;
-		}
-		rs = npi_intr_ldg_mgmt_set(handle, ldgp->ldg,
-			ldgp->arm, ldgp->ldg_timer);
-		if (rs != NPI_SUCCESS) {
-			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-				"<== nxge_intr_mask_mgmt_set: "
-				"set timer failed "
-				" rs 0x%x ldg %d timer 0x%x",
-				rs, ldgp->ldg, ldgp->ldg_timer));
-			return (NXGE_ERROR | rs);
-		}
-		NXGE_DEBUG_MSG((nxgep, INT_CTL,
-			"==> nxge_intr_mask_mgmt_set: OK (flag %d) "
-			"set timer "
-			" ldg %d timer 0x%x",
-			on, ldgp->ldg, ldgp->ldg_timer));
-	}
-
-	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intr_mask_mgmt_set"));
-	return (NXGE_OK);
-}
-
-static nxge_status_t
-nxge_get_mac_addr_properties(p_nxge_t nxgep)
-{
-	uchar_t *prop_val;
-	uint_t prop_len;
-	uint_t i;
-	uint8_t func_num;
-	uint8_t total_factory_macs;
-
-	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_get_mac_addr_properties "));
-
-#if defined(_BIG_ENDIAN)
-	/*
-	 * Get the ethernet address.
-	 */
-	(void) localetheraddr((struct ether_addr *)NULL, &nxgep->ouraddr);
-
-	/*
-	 * Check if it is an adapter with its own local mac address If it is
-	 * present, override the system mac address.
-	 */
-	if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, nxgep->dip, 0,
-			"local-mac-address", &prop_val,
-			&prop_len) == DDI_PROP_SUCCESS) {
-		if (prop_len == ETHERADDRL) {
-			nxgep->factaddr = *(p_ether_addr_t)prop_val;
-			NXGE_DEBUG_MSG((nxgep, DDI_CTL, "Local mac address = "
-				"%02x:%02x:%02x:%02x:%02x:%02x",
-				prop_val[0], prop_val[1], prop_val[2],
-				prop_val[3], prop_val[4], prop_val[5]));
-		}
-		ddi_prop_free(prop_val);
-	}
-	if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, nxgep->dip, 0,
-			"local-mac-address?", &prop_val,
-			&prop_len) == DDI_PROP_SUCCESS) {
-		if (strncmp("true", (caddr_t)prop_val, (size_t)prop_len) == 0) {
-			nxgep->ouraddr = nxgep->factaddr;
-			NXGE_DEBUG_MSG((nxgep, DDI_CTL,
-				"Using local MAC address"));
-		}
-		ddi_prop_free(prop_val);
-	} else {
-		nxgep->ouraddr = nxgep->factaddr;
-	}
-#else
-	(void) nxge_espc_mac_addrs_get(nxgep);
-	nxgep->ouraddr = nxgep->factaddr;
-#endif
-
-	func_num = nxgep->function_num;
-
-	/*
-	 * total_factory_macs is the total number of MACs the factory assigned
-	 * to the whole Neptune device.  NIU does not need this parameter
-	 * because it derives the number of factory MACs for each port from
-	 * the device properties.
-	 */
-	if (nxgep->niu_type == NEPTUNE || nxgep->niu_type == NEPTUNE_2) {
-		if (nxge_espc_num_macs_get(nxgep, &total_factory_macs)
-			== NXGE_OK) {
-			nxgep->nxge_mmac_info.total_factory_macs
-				= total_factory_macs;
-	} else {
-			NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL,
-				"nxge_espc_num_macs_get: espc access failed"));
-			return (NXGE_ERROR);
-		}
-	}
-
-	/*
-	 * Note: mac-addresses of n2-niu is the list of mac addresses for a
-	 * port. #mac-addresses stored in Neptune's SEEPROM is the total number
-	 * of MAC addresses allocated for a board.
-	 */
-	if (nxgep->niu_type == N2_NIU) {
-		if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, nxgep->dip, 0,
-				"mac-addresses", &prop_val, &prop_len) ==
-			DDI_PROP_SUCCESS) {
-			/*
-			 * XAUI may have up to 18 MACs, more than the XMAC can
-			 * use (1 unique MAC plus 16 alternate MACs)
-			 */
-			nxgep->nxge_mmac_info.num_factory_mmac
-			    = prop_len / ETHERADDRL - 1;
-			if (nxgep->nxge_mmac_info.num_factory_mmac >
-				XMAC_MAX_ALT_ADDR_ENTRY) {
-				nxgep->nxge_mmac_info.num_factory_mmac =
-					XMAC_MAX_ALT_ADDR_ENTRY;
-			}
-			ddi_prop_free(prop_val);
-		}
-	} else {
-		/*
-		 * total_factory_macs = 32
-		 * num_factory_mmac = (32 >> (nports/2)) - 1
-		 * So if nports = 4, then num_factory_mmac =  7
-		 *    if nports = 2, then num_factory_mmac = 15
-		 */
-		nxgep->nxge_mmac_info.num_factory_mmac
-			= ((nxgep->nxge_mmac_info.total_factory_macs >>
-			(nxgep->nports >> 1))) - 1;
-	}
-	for (i = 0; i <= nxgep->nxge_mmac_info.num_mmac; i++) {
-		(void) npi_mac_altaddr_disable(nxgep->npi_handle,
-			NXGE_GET_PORT_NUM(func_num), i);
-	}
-
-	(void) nxge_init_mmac(nxgep);
-	return (NXGE_OK);
-}
-
-void
-nxge_get_xcvr_properties(p_nxge_t nxgep)
-{
-	uchar_t *prop_val;
-	uint_t prop_len;
-
-	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_get_xcvr_properties"));
-
-	/*
-	 * Read the type of physical layer interface being used.
-	 */
-	nxgep->statsp->mac_stats.xcvr_inuse = INT_MII_XCVR;
-	if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, nxgep->dip, 0,
-			"phy-type", &prop_val, &prop_len) == DDI_PROP_SUCCESS) {
-		if (strncmp("pcs", (caddr_t)prop_val,
-				(size_t)prop_len) == 0) {
-			nxgep->statsp->mac_stats.xcvr_inuse = PCS_XCVR;
-		} else {
-			nxgep->statsp->mac_stats.xcvr_inuse = INT_MII_XCVR;
-		}
-		ddi_prop_free(prop_val);
-	} else if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, nxgep->dip, 0,
-			"phy-interface", &prop_val,
-			&prop_len) == DDI_PROP_SUCCESS) {
-		if (strncmp("pcs", (caddr_t)prop_val, (size_t)prop_len) == 0) {
-			nxgep->statsp->mac_stats.xcvr_inuse = PCS_XCVR;
-		} else {
-			nxgep->statsp->mac_stats.xcvr_inuse = INT_MII_XCVR;
-		}
-		ddi_prop_free(prop_val);
-	}
-}
-
-/*
- * Static functions start here.
- */
-
-static void
-nxge_ldgv_setup(p_nxge_ldg_t *ldgp, p_nxge_ldv_t *ldvp, uint8_t ldv,
-	uint8_t endldg, int *ngrps)
-{
-	NXGE_DEBUG_MSG((NULL, INT_CTL, "==> nxge_ldgv_setup"));
-	/* Assign the group number for each device. */
-	(*ldvp)->ldg_assigned = (*ldgp)->ldg;
-	(*ldvp)->ldgp = *ldgp;
-	(*ldvp)->ldv = ldv;
-
-	NXGE_DEBUG_MSG((NULL, INT_CTL, "==> nxge_ldgv_setup: "
-		"ldv %d endldg %d ldg %d, ldvp $%p",
-		ldv, endldg, (*ldgp)->ldg, (*ldgp)->ldvp));
-
-	(*ldgp)->nldvs++;
-	if ((*ldgp)->ldg == (endldg - 1)) {
-		if ((*ldgp)->ldvp == NULL) {
-			(*ldgp)->ldvp = *ldvp;
-			*ngrps += 1;
-			NXGE_DEBUG_MSG((NULL, INT_CTL,
-				"==> nxge_ldgv_setup: ngrps %d", *ngrps));
-		}
-		NXGE_DEBUG_MSG((NULL, INT_CTL,
-			"==> nxge_ldgv_setup: ldvp $%p ngrps %d",
-			*ldvp, *ngrps));
-		++*ldvp;
-	} else {
-		(*ldgp)->ldvp = *ldvp;
-		*ngrps += 1;
-		NXGE_DEBUG_MSG((NULL, INT_CTL, "==> nxge_ldgv_setup(done): "
-			"ldv %d endldg %d ldg %d, ldvp $%p",
-			ldv, endldg, (*ldgp)->ldg, (*ldgp)->ldvp));
-		(*ldvp) = ++*ldvp;
-		(*ldgp) = ++*ldgp;
-		NXGE_DEBUG_MSG((NULL, INT_CTL,
-			"==> nxge_ldgv_setup: new ngrps %d", *ngrps));
-	}
-
-	NXGE_DEBUG_MSG((NULL, INT_CTL, "==> nxge_ldgv_setup: "
-		"ldv %d ldvp $%p endldg %d ngrps %d",
-		ldv, ldvp, endldg, *ngrps));
-
-	NXGE_DEBUG_MSG((NULL, INT_CTL, "<== nxge_ldgv_setup"));
-}
-
-/*
- * Note: This function assumes the following distribution of mac
- * addresses among 4 ports in neptune:
- *
- *      -------------
- *    0|            |0 - local-mac-address for fn 0
- *      -------------
- *    1|            |1 - local-mac-address for fn 1
- *      -------------
- *    2|            |2 - local-mac-address for fn 2
- *      -------------
- *    3|            |3 - local-mac-address for fn 3
- *      -------------
- *     |            |4 - Start of alt. mac addr. for fn 0
- *     |            |
- *     |            |
- *     |            |10
- *     --------------
- *     |            |11 - Start of alt. mac addr. for fn 1
- *     |            |
- *     |            |
- *     |            |17
- *     --------------
- *     |            |18 - Start of alt. mac addr. for fn 2
- *     |            |
- *     |            |
- *     |            |24
- *     --------------
- *     |            |25 - Start of alt. mac addr. for fn 3
- *     |            |
- *     |            |
- *     |            |31
- *     --------------
- *
- * For N2/NIU the mac addresses is from XAUI card.
- */
-
-static void
-nxge_init_mmac(p_nxge_t nxgep)
-{
-	int slot;
-	uint8_t func_num;
-	uint16_t *base_mmac_addr;
-	uint32_t alt_mac_ls4b;
-	uint16_t *mmac_addr;
-	uint32_t base_mac_ls4b; /* least significant 4 bytes */
-	nxge_mmac_t *mmac_info;
-	npi_mac_addr_t mac_addr;
-
-	func_num = nxgep->function_num;
-	base_mmac_addr = (uint16_t *)&nxgep->factaddr;
-	mmac_info = (nxge_mmac_t *)&nxgep->nxge_mmac_info;
-
-	base_mac_ls4b = ((uint32_t)base_mmac_addr[1]) << 16 |
-		base_mmac_addr[2];
-
-	if (nxgep->niu_type == N2_NIU) {
-		alt_mac_ls4b = base_mac_ls4b + 1; /* ls4b of 1st altmac */
-	} else {			/* Neptune */
-		alt_mac_ls4b = base_mac_ls4b + (nxgep->nports - func_num)
-			+ (func_num * (mmac_info->num_factory_mmac));
-	}
-
-	/* Set flags for unique MAC */
-	mmac_info->mac_pool[0].flags |= MMAC_SLOT_USED | MMAC_VENDOR_ADDR;
-
-	/* Clear flags of all alternate MAC slots */
-	for (slot = 1; slot <= mmac_info->num_mmac; slot++) {
-		if (slot <= mmac_info->num_factory_mmac)
-			mmac_info->mac_pool[slot].flags = MMAC_VENDOR_ADDR;
-		else
-			mmac_info->mac_pool[slot].flags = 0;
-	}
-
-	/* Generate and store factory alternate MACs */
-	for (slot = 1; slot <= mmac_info->num_factory_mmac; slot++) {
-		mmac_addr = (uint16_t *)&mmac_info->factory_mac_pool[slot];
-		mmac_addr[0] = base_mmac_addr[0];
-		mac_addr.w2 = mmac_addr[0];
-
-		mmac_addr[1] = (alt_mac_ls4b >> 16) & 0x0FFFF;
-		mac_addr.w1 = mmac_addr[1];
-
-		mmac_addr[2] = alt_mac_ls4b & 0x0FFFF;
-		mac_addr.w0 = mmac_addr[2];
-		/*
-		 * slot minus 1 because npi_mac_alraddr_entry expects 0
-		 * for the first alternate mac address.
-		 */
-		(void) npi_mac_altaddr_entry(nxgep->npi_handle, OP_SET,
-			NXGE_GET_PORT_NUM(func_num), slot - 1, &mac_addr);
-
-		alt_mac_ls4b++;
-	}
-	/* Initialize the first two parameters for mmac kstat */
-	nxgep->statsp->mmac_stats.mmac_max_cnt = mmac_info->num_mmac;
-	nxgep->statsp->mmac_stats.mmac_avail_cnt = mmac_info->num_mmac;
-}
--- a/usr/src/uts/sun4v/io/nxge/nxge_zcp.c	Mon Mar 19 18:02:35 2007 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,473 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License (the "License").
- * You may not use this file except in compliance with the License.
- *
- * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
- * or http://www.opensolaris.org/os/licensing.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information: Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- */
-/*
- * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
- * Use is subject to license terms.
- */
-
-#pragma ident	"%Z%%M%	%I%	%E% SMI"
-
-#include <nxge_impl.h>
-#include <nxge_zcp.h>
-#include <nxge_ipp.h>
-
-nxge_status_t
-nxge_zcp_init(p_nxge_t nxgep)
-{
-	uint8_t portn;
-	npi_handle_t handle;
-	zcp_iconfig_t istatus;
-	npi_status_t rs = NPI_SUCCESS;
-	int i;
-	zcp_ram_unit_t w_data;
-	zcp_ram_unit_t r_data;
-	uint32_t cfifo_depth;
-
-	handle = nxgep->npi_handle;
-	portn = NXGE_GET_PORT_NUM(nxgep->function_num);
-
-	if ((nxgep->niu_type == NEPTUNE) || (nxgep->niu_type == NEPTUNE_2)) {
-		if (portn < 2)
-			cfifo_depth = ZCP_P0_P1_CFIFO_DEPTH;
-		else
-			cfifo_depth = ZCP_P2_P3_CFIFO_DEPTH;
-	} else if (nxgep->niu_type == N2_NIU)
-		cfifo_depth = ZCP_NIU_CFIFO_DEPTH;
-
-	/* Clean up CFIFO */
-	w_data.w0 = 0;
-	w_data.w1 = 0;
-	w_data.w2 = 0;
-	w_data.w3 = 0;
-	w_data.w4 = 0;
-
-	for (i = 0; i < cfifo_depth; i++) {
-		if (npi_zcp_tt_cfifo_entry(handle, OP_SET,
-				portn, i, &w_data) != NPI_SUCCESS)
-			goto fail;
-		if (npi_zcp_tt_cfifo_entry(handle, OP_GET,
-				portn, i, &r_data) != NPI_SUCCESS)
-			goto fail;
-	}
-
-	if (npi_zcp_rest_cfifo_port(handle, portn) != NPI_SUCCESS)
-		goto fail;
-
-	/*
-	 * Making sure that error source is cleared if this is an injected
-	 * error.
-	 */
-	switch (portn) {
-	case 0:
-		NXGE_REG_WR64(handle, ZCP_CFIFO_ECC_PORT0_REG, 0);
-		break;
-	case 1:
-		NXGE_REG_WR64(handle, ZCP_CFIFO_ECC_PORT1_REG, 0);
-		break;
-	case 2:
-		NXGE_REG_WR64(handle, ZCP_CFIFO_ECC_PORT2_REG, 0);
-		break;
-	case 3:
-		NXGE_REG_WR64(handle, ZCP_CFIFO_ECC_PORT3_REG, 0);
-		break;
-	}
-
-	if ((rs = npi_zcp_clear_istatus(handle)) != NPI_SUCCESS)
-		return (NXGE_ERROR | rs);
-	if ((rs = npi_zcp_get_istatus(handle, &istatus)) != NPI_SUCCESS)
-		return (NXGE_ERROR | rs);
-	if ((rs = npi_zcp_iconfig(handle, INIT, ICFG_ZCP_ALL)) != NPI_SUCCESS)
-		goto fail;
-
-	NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_zcp_init: port%d", portn));
-	return (NXGE_OK);
-
-fail:
-	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-		"nxge_zcp_init: Fail to initialize ZCP Port #%d\n", portn));
-	return (NXGE_ERROR | rs);
-}
-
-nxge_status_t
-nxge_zcp_handle_sys_errors(p_nxge_t nxgep)
-{
-	npi_handle_t handle;
-	npi_status_t rs = NPI_SUCCESS;
-	p_nxge_zcp_stats_t statsp;
-	uint8_t portn;
-	zcp_iconfig_t istatus;
-	boolean_t rxport_fatal = B_FALSE;
-	nxge_status_t status = NXGE_OK;
-
-	handle = nxgep->npi_handle;
-	statsp = (p_nxge_zcp_stats_t)&nxgep->statsp->zcp_stats;
-	portn = nxgep->mac.portnum;
-
-	if ((rs = npi_zcp_get_istatus(handle, &istatus)) != NPI_SUCCESS)
-		return (NXGE_ERROR | rs);
-
-	if (istatus & ICFG_ZCP_RRFIFO_UNDERRUN) {
-		statsp->rrfifo_underrun++;
-		NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
-			NXGE_FM_EREPORT_ZCP_RRFIFO_UNDERRUN);
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"nxge_zcp_err_evnts: rrfifo_underrun"));
-	}
-
-	if (istatus & ICFG_ZCP_RRFIFO_OVERRUN) {
-		statsp->rrfifo_overrun++;
-		NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
-			NXGE_FM_EREPORT_ZCP_RRFIFO_OVERRUN);
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"nxge_zcp_err_evnts: buf_rrfifo_overrun"));
-	}
-
-	if (istatus & ICFG_ZCP_RSPFIFO_UNCORR_ERR) {
-		statsp->rspfifo_uncorr_err++;
-		NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
-			NXGE_FM_EREPORT_ZCP_RSPFIFO_UNCORR_ERR);
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"nxge_zcp_err_evnts: rspfifo_uncorr_err"));
-	}
-
-	if (istatus & ICFG_ZCP_BUFFER_OVERFLOW) {
-		statsp->buffer_overflow++;
-		NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
-			NXGE_FM_EREPORT_ZCP_BUFFER_OVERFLOW);
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"nxge_zcp_err_evnts: buffer_overflow"));
-		rxport_fatal = B_TRUE;
-	}
-
-	if (istatus & ICFG_ZCP_STAT_TBL_PERR) {
-		statsp->stat_tbl_perr++;
-		NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
-			NXGE_FM_EREPORT_ZCP_STAT_TBL_PERR);
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"nxge_zcp_err_evnts: stat_tbl_perr"));
-	}
-
-	if (istatus & ICFG_ZCP_DYN_TBL_PERR) {
-		statsp->dyn_tbl_perr++;
-		NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
-			NXGE_FM_EREPORT_ZCP_DYN_TBL_PERR);
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"nxge_zcp_err_evnts: dyn_tbl_perr"));
-	}
-
-	if (istatus & ICFG_ZCP_BUF_TBL_PERR) {
-		statsp->buf_tbl_perr++;
-		NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
-			NXGE_FM_EREPORT_ZCP_BUF_TBL_PERR);
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"nxge_zcp_err_evnts: buf_tbl_perr"));
-	}
-
-	if (istatus & ICFG_ZCP_TT_PROGRAM_ERR) {
-		statsp->tt_program_err++;
-		NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
-			NXGE_FM_EREPORT_ZCP_TT_PROGRAM_ERR);
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"nxge_zcp_err_evnts: tt_program_err"));
-	}
-
-	if (istatus & ICFG_ZCP_RSP_TT_INDEX_ERR) {
-		statsp->rsp_tt_index_err++;
-		NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
-			NXGE_FM_EREPORT_ZCP_RSP_TT_INDEX_ERR);
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"nxge_zcp_err_evnts: rsp_tt_index_err"));
-	}
-
-	if (istatus & ICFG_ZCP_SLV_TT_INDEX_ERR) {
-		statsp->slv_tt_index_err++;
-		NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
-			NXGE_FM_EREPORT_ZCP_SLV_TT_INDEX_ERR);
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"nxge_zcp_err_evnts: slv_tt_index_err"));
-	}
-
-	if (istatus & ICFG_ZCP_TT_INDEX_ERR) {
-		statsp->zcp_tt_index_err++;
-		NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
-			NXGE_FM_EREPORT_ZCP_TT_INDEX_ERR);
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			"nxge_zcp_err_evnts: tt_index_err"));
-	}
-
-	if (((portn == 0) && (istatus & ICFG_ZCP_CFIFO_ECC0)) ||
-			((portn == 1) && (istatus & ICFG_ZCP_CFIFO_ECC1)) ||
-			((portn == 2) && (istatus & ICFG_ZCP_CFIFO_ECC2)) ||
-			((portn == 3) && (istatus & ICFG_ZCP_CFIFO_ECC3))) {
-		boolean_t ue_ecc_valid;
-
-		if ((status = nxge_ipp_eccue_valid_check(nxgep,
-				&ue_ecc_valid)) != NXGE_OK)
-			return (status);
-
-		if (ue_ecc_valid) {
-			statsp->cfifo_ecc++;
-			NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
-				NXGE_FM_EREPORT_ZCP_CFIFO_ECC);
-			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-				"nxge_zcp_err_evnts: port%d buf_cfifo_ecc",
-				portn));
-			rxport_fatal = B_TRUE;
-		}
-	}
-
-	/*
-	 * Making sure that error source is cleared if this is an injected
-	 * error.
-	 */
-	switch (portn) {
-	case 0:
-		NXGE_REG_WR64(handle, ZCP_CFIFO_ECC_PORT0_REG, 0);
-		break;
-	case 1:
-		NXGE_REG_WR64(handle, ZCP_CFIFO_ECC_PORT1_REG, 0);
-		break;
-	case 2:
-		NXGE_REG_WR64(handle, ZCP_CFIFO_ECC_PORT2_REG, 0);
-		break;
-	case 3:
-		NXGE_REG_WR64(handle, ZCP_CFIFO_ECC_PORT3_REG, 0);
-		break;
-	}
-
-	(void) npi_zcp_clear_istatus(handle);
-
-	if (rxport_fatal) {
-		NXGE_DEBUG_MSG((nxgep, IPP_CTL,
-			" nxge_zcp_handle_sys_errors:"
-			" fatal Error on  Port #%d\n", portn));
-		status = nxge_zcp_fatal_err_recover(nxgep);
-		if (status == NXGE_OK) {
-			FM_SERVICE_RESTORED(nxgep);
-		}
-	}
-	return (status);
-}
-
-void
-nxge_zcp_inject_err(p_nxge_t nxgep, uint32_t err_id)
-{
-	zcp_int_stat_reg_t zcps;
-	uint8_t portn = nxgep->mac.portnum;
-	zcp_ecc_ctrl_t ecc_ctrl;
-
-	switch (err_id) {
-	case NXGE_FM_EREPORT_ZCP_CFIFO_ECC:
-		ecc_ctrl.value = 0;
-		ecc_ctrl.bits.w0.cor_dbl = 1;
-		ecc_ctrl.bits.w0.cor_lst = 1;
-		ecc_ctrl.bits.w0.cor_all = 0;
-		switch (portn) {
-		case 0:
-			cmn_err(CE_NOTE,
-				"!Write 0x%llx to port%d ZCP_CFIFO_ECC_PORT\n",
-				(unsigned long long) ecc_ctrl.value, portn);
-			NXGE_REG_WR64(nxgep->npi_handle,
-				ZCP_CFIFO_ECC_PORT0_REG,
-				ecc_ctrl.value);
-			break;
-		case 1:
-			cmn_err(CE_NOTE,
-				"!Write 0x%llx to port%d ZCP_CFIFO_ECC_PORT\n",
-				(unsigned long long) ecc_ctrl.value, portn);
-			NXGE_REG_WR64(nxgep->npi_handle,
-				ZCP_CFIFO_ECC_PORT1_REG,
-				ecc_ctrl.value);
-			break;
-		case 2:
-			cmn_err(CE_NOTE,
-				"!Write 0x%llx to port%d ZCP_CFIFO_ECC_PORT\n",
-				(unsigned long long) ecc_ctrl.value, portn);
-			NXGE_REG_WR64(nxgep->npi_handle,
-				ZCP_CFIFO_ECC_PORT2_REG,
-				ecc_ctrl.value);
-			break;
-		case 3:
-			cmn_err(CE_NOTE,
-				"!Write 0x%llx to port%d ZCP_CFIFO_ECC_PORT\n",
-				(unsigned long long) ecc_ctrl.value, portn);
-			NXGE_REG_WR64(nxgep->npi_handle,
-				ZCP_CFIFO_ECC_PORT3_REG,
-				ecc_ctrl.value);
-			break;
-		}
-		break;
-
-	case NXGE_FM_EREPORT_ZCP_RRFIFO_UNDERRUN:
-	case NXGE_FM_EREPORT_ZCP_RSPFIFO_UNCORR_ERR:
-	case NXGE_FM_EREPORT_ZCP_STAT_TBL_PERR:
-	case NXGE_FM_EREPORT_ZCP_DYN_TBL_PERR:
-	case NXGE_FM_EREPORT_ZCP_BUF_TBL_PERR:
-	case NXGE_FM_EREPORT_ZCP_RRFIFO_OVERRUN:
-	case NXGE_FM_EREPORT_ZCP_BUFFER_OVERFLOW:
-	case NXGE_FM_EREPORT_ZCP_TT_PROGRAM_ERR:
-	case NXGE_FM_EREPORT_ZCP_RSP_TT_INDEX_ERR:
-	case NXGE_FM_EREPORT_ZCP_SLV_TT_INDEX_ERR:
-	case NXGE_FM_EREPORT_ZCP_TT_INDEX_ERR:
-		NXGE_REG_RD64(nxgep->npi_handle, ZCP_INT_STAT_TEST_REG,
-			&zcps.value);
-		if (err_id == NXGE_FM_EREPORT_ZCP_RRFIFO_UNDERRUN)
-			zcps.bits.ldw.rrfifo_urun = 1;
-		if (err_id == NXGE_FM_EREPORT_ZCP_RSPFIFO_UNCORR_ERR)
-			zcps.bits.ldw.rspfifo_uc_err = 1;
-		if (err_id == NXGE_FM_EREPORT_ZCP_STAT_TBL_PERR)
-			zcps.bits.ldw.stat_tbl_perr = 1;
-		if (err_id == NXGE_FM_EREPORT_ZCP_DYN_TBL_PERR)
-			zcps.bits.ldw.dyn_tbl_perr = 1;
-		if (err_id == NXGE_FM_EREPORT_ZCP_BUF_TBL_PERR)
-			zcps.bits.ldw.buf_tbl_perr = 1;
-		if (err_id == NXGE_FM_EREPORT_ZCP_CFIFO_ECC) {
-			switch (portn) {
-			case 0:
-				zcps.bits.ldw.cfifo_ecc0 = 1;
-				break;
-			case 1:
-				zcps.bits.ldw.cfifo_ecc1 = 1;
-				break;
-			case 2:
-				zcps.bits.ldw.cfifo_ecc2 = 1;
-				break;
-			case 3:
-				zcps.bits.ldw.cfifo_ecc3 = 1;
-				break;
-			}
-		}
-
-	default:
-		if (err_id == NXGE_FM_EREPORT_ZCP_RRFIFO_OVERRUN)
-			zcps.bits.ldw.rrfifo_orun = 1;
-		if (err_id == NXGE_FM_EREPORT_ZCP_BUFFER_OVERFLOW)
-			zcps.bits.ldw.buf_overflow = 1;
-		if (err_id == NXGE_FM_EREPORT_ZCP_TT_PROGRAM_ERR)
-			zcps.bits.ldw.tt_tbl_perr = 1;
-		if (err_id == NXGE_FM_EREPORT_ZCP_RSP_TT_INDEX_ERR)
-			zcps.bits.ldw.rsp_tt_index_err = 1;
-		if (err_id == NXGE_FM_EREPORT_ZCP_SLV_TT_INDEX_ERR)
-			zcps.bits.ldw.slv_tt_index_err = 1;
-		if (err_id == NXGE_FM_EREPORT_ZCP_TT_INDEX_ERR)
-			zcps.bits.ldw.zcp_tt_index_err = 1;
-		cmn_err(CE_NOTE, "!Write 0x%lx to ZCP_INT_STAT_TEST_REG\n",
-			zcps.value);
-		NXGE_REG_WR64(nxgep->npi_handle, ZCP_INT_STAT_TEST_REG,
-			zcps.value);
-		break;
-	}
-}
-
-nxge_status_t
-nxge_zcp_fatal_err_recover(p_nxge_t nxgep)
-{
-	npi_handle_t handle;
-	npi_status_t rs = NPI_SUCCESS;
-	nxge_status_t status = NXGE_OK;
-	uint8_t portn;
-	zcp_ram_unit_t w_data;
-	zcp_ram_unit_t r_data;
-	uint32_t cfifo_depth;
-	int i;
-
-	NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_zcp_fatal_err_recover"));
-	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-		"Recovering from RxPort error..."));
-
-	handle = nxgep->npi_handle;
-	portn = nxgep->mac.portnum;
-
-	/* Disable RxMAC */
-	if (nxge_rx_mac_disable(nxgep) != NXGE_OK)
-		goto fail;
-
-	/* Make sure source is clear if this is an injected error */
-	switch (portn) {
-	case 0:
-		NXGE_REG_WR64(handle, ZCP_CFIFO_ECC_PORT0_REG, 0);
-		break;
-	case 1:
-		NXGE_REG_WR64(handle, ZCP_CFIFO_ECC_PORT1_REG, 0);
-		break;
-	case 2:
-		NXGE_REG_WR64(handle, ZCP_CFIFO_ECC_PORT2_REG, 0);
-		break;
-	case 3:
-		NXGE_REG_WR64(handle, ZCP_CFIFO_ECC_PORT3_REG, 0);
-		break;
-	}
-
-	/* Clear up CFIFO */
-	if ((nxgep->niu_type == NEPTUNE) || (nxgep->niu_type == NEPTUNE_2)) {
-		if (portn < 2)
-			cfifo_depth = ZCP_P0_P1_CFIFO_DEPTH;
-		else
-			cfifo_depth = ZCP_P2_P3_CFIFO_DEPTH;
-	} else if (nxgep->niu_type == N2_NIU)
-		cfifo_depth = ZCP_NIU_CFIFO_DEPTH;
-
-	w_data.w0 = 0;
-	w_data.w1 = 0;
-	w_data.w2 = 0;
-	w_data.w3 = 0;
-	w_data.w4 = 0;
-
-	for (i = 0; i < cfifo_depth; i++) {
-		if (npi_zcp_tt_cfifo_entry(handle, OP_SET,
-				portn, i, &w_data) != NPI_SUCCESS)
-			goto fail;
-		if (npi_zcp_tt_cfifo_entry(handle, OP_GET,
-				portn, i, &r_data) != NPI_SUCCESS)
-			goto fail;
-	}
-
-	/* When recovering from ZCP, RxDMA channel resets are not necessary */
-	/* Reset ZCP CFIFO */
-	NXGE_DEBUG_MSG((nxgep, RX_CTL, "port%d Reset ZCP CFIFO...", portn));
-	if ((rs = npi_zcp_rest_cfifo_port(handle, portn)) != NPI_SUCCESS)
-		goto fail;
-
-	/* Reset IPP */
-	NXGE_DEBUG_MSG((nxgep, RX_CTL, "port%d Reset IPP...", portn));
-	if ((rs = npi_ipp_reset(handle, portn)) != NPI_SUCCESS)
-		goto fail;
-
-	NXGE_DEBUG_MSG((nxgep, RX_CTL, "port%d Reset RxMAC...", portn));
-	if (nxge_rx_mac_reset(nxgep) != NXGE_OK)
-		goto fail;
-
-	NXGE_DEBUG_MSG((nxgep, RX_CTL, "port%d Initialize RxMAC...", portn));
-	if ((status = nxge_rx_mac_init(nxgep)) != NXGE_OK)
-		goto fail;
-
-	NXGE_DEBUG_MSG((nxgep, RX_CTL, "port%d Enable RxMAC...", portn));
-	if (nxge_rx_mac_enable(nxgep) != NXGE_OK)
-		goto fail;
-
-	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-		"Recovery Sucessful, RxPort Restored"));
-	NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_zcp_fatal_err_recover"));
-	return (NXGE_OK);
-fail:
-	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed"));
-	return (status | rs);
-}
--- a/usr/src/uts/sun4v/nxge/Makefile	Mon Mar 19 18:02:35 2007 -0700
+++ b/usr/src/uts/sun4v/nxge/Makefile	Mon Mar 19 19:37:22 2007 -0700
@@ -20,14 +20,14 @@
 #
 # uts/sun4v/nxge/Makefile
 #
-# Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
+# Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
 # Use is subject to license terms.
 #
 #
 # ident	"%Z%%M%	%I%	%E% SMI"
 #
 #	This makefile drives the production of the N2 NIU
-#	10G Ethernet leaf driver kernel module.
+#	10G and SUN 10G/1G Ethernet leaf driver kernel module.
 #
 #	sun4v implementation architecture dependent
 #
@@ -41,10 +41,11 @@
 #	Define the module and object file sets.
 #
 MODULE		= nxge
-OBJECTS		= $(NXGE_OBJS:%=$(OBJS_DIR)/%) $(NXGE_NPI_OBJS:%=$(OBJS_DIR)/%)
-LINTS		= $(NXGE_OBJS:%.o=$(LINTS_DIR)/%.ln)	\
-			$(NXGE_NPI_OBJS:%.o=$(LINTS_DIR)/%.ln)
+NXGE_OBJECTS =	$(NXGE_OBJS) $(NXGE_NPI_OBJS) $(NXGE_HCALL_OBJS)
+OBJECTS		=  $(NXGE_OBJECTS:%=$(OBJS_DIR)/%)
+LINTS		= $(NXGE_OBJECTS:%.o=$(LINTS_DIR)/%.ln)
 ROOTMODULE	= $(ROOT_PSM_DRV_DIR)/$(MODULE)
+CONF_SRCDIR	= $(UTSBASE)/common/io/nxge
 
 #
 #	Include common rules.
@@ -63,7 +64,7 @@
 #
 ALL_TARGET	= $(BINARY)
 LINT_TARGET	= $(MODULE).lint
-INSTALL_TARGET	= $(BINARY) $(ROOTMODULE)
+INSTALL_TARGET	= $(BINARY) $(ROOTMODULE) $(ROOT_CONFFILE)
 
 #
 #
@@ -73,8 +74,8 @@
 #
 # Include nxge specific header files
 #
-INC_PATH	+= -I$(UTSBASE)/sun4v/io/nxge/npi
-INC_PATH	+= -I$(UTSBASE)/sun4v/sys/nxge
+INC_PATH	+= -I$(UTSBASE)/common/io/nxge/npi
+INC_PATH	+= -I$(UTSBASE)/common/sys/nxge
 #
 #
 # lint pass one enforcement
@@ -139,4 +140,4 @@
 #
 #	Include common targets.
 #
-include $(UTSBASE)/$(PLATFORM)/Makefile.targ
+include $(UTSBASE)/sun4v/Makefile.targ
--- a/usr/src/uts/sun4v/sys/Makefile	Mon Mar 19 18:02:35 2007 -0700
+++ b/usr/src/uts/sun4v/sys/Makefile	Mon Mar 19 19:37:22 2007 -0700
@@ -97,29 +97,6 @@
 	memtestio_n2.h		\
 	memtestio_v.h
 
-NXGEHDRS=			\
-	nxge.h			\
-	nxge_common.h		\
-	nxge_common_impl.h	\
-	nxge_defs.h		\
-	nxge_hw.h		\
-	nxge_impl.h		\
-	nxge_ipp.h		\
-	nxge_ipp_hw.h		\
-	nxge_mac.h		\
-	nxge_mac_hw.h		\
-	nxge_fflp.h		\
-	nxge_fflp_hw.h		\
-	nxge_mii.h		\
-	nxge_rxdma.h		\
-	nxge_rxdma_hw.h		\
-	nxge_txc.h		\
-	nxge_txc_hw.h		\
-	nxge_txdma.h		\
-	nxge_txdma_hw.h		\
-	nxge_virtual.h		\
-	nxge_espc.h
-
 ROOTHDRS=		$(HDRS:%=$(USR_PSM_ISYS_DIR)/%)
 $(CLOSED_BUILD)ROOTHDRS += $(CLOSED_HDRS:%=$(USR_PSM_ISYS_DIR)/%)
 
@@ -133,8 +110,7 @@
 LINKDEST=		../../../../platform/$(PLATFORM)/include/sys
 
 CHECKHDRS=		$(HDRS:%.h=%.check) \
-			$(SUN4_HDRS:%.h=%.cmncheck)	\
-			$(NXGEHDRS:%.h=nxge/%.check)
+			$(SUN4_HDRS:%.h=%.cmncheck)
 
 $(CLOSED_BUILD)CHECKHDRS +=			\
 	$(CLOSED_HDRS:%.h=%.check)		\
--- a/usr/src/uts/sun4v/sys/nxge/nxge.h	Mon Mar 19 18:02:35 2007 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,1044 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License (the "License").
- * You may not use this file except in compliance with the License.
- *
- * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
- * or http://www.opensolaris.org/os/licensing.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information: Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- */
-/*
- * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
- * Use is subject to license terms.
- */
-
-#ifndef	_SYS_NXGE_NXGE_H
-#define	_SYS_NXGE_NXGE_H
-
-#pragma ident	"%Z%%M%	%I%	%E% SMI"
-
-#ifdef	__cplusplus
-extern "C" {
-#endif
-
-#if defined(_KERNEL) || defined(COSIM)
-#include <nxge_mac.h>
-#include <nxge_ipp.h>
-#include <nxge_fflp.h>
-#endif
-
-/*
- * NXGE diagnostics IOCTLS.
- */
-#define	NXGE_IOC		((((('N' << 8) + 'X') << 8) + 'G') << 8)
-
-#define	NXGE_GET64		(NXGE_IOC|1)
-#define	NXGE_PUT64		(NXGE_IOC|2)
-#define	NXGE_GET_TX_RING_SZ	(NXGE_IOC|3)
-#define	NXGE_GET_TX_DESC	(NXGE_IOC|4)
-#define	NXGE_GLOBAL_RESET	(NXGE_IOC|5)
-#define	NXGE_TX_SIDE_RESET	(NXGE_IOC|6)
-#define	NXGE_RX_SIDE_RESET	(NXGE_IOC|7)
-#define	NXGE_RESET_MAC		(NXGE_IOC|8)
-
-#define	NXGE_GET_MII		(NXGE_IOC|11)
-#define	NXGE_PUT_MII		(NXGE_IOC|12)
-#define	NXGE_RTRACE		(NXGE_IOC|13)
-#define	NXGE_RTRACE_TEST	(NXGE_IOC|20)
-#define	NXGE_TX_REGS_DUMP	(NXGE_IOC|21)
-#define	NXGE_RX_REGS_DUMP	(NXGE_IOC|22)
-#define	NXGE_INT_REGS_DUMP	(NXGE_IOC|23)
-#define	NXGE_VIR_REGS_DUMP	(NXGE_IOC|24)
-#define	NXGE_VIR_INT_REGS_DUMP	(NXGE_IOC|25)
-#define	NXGE_RDUMP		(NXGE_IOC|26)
-#define	NXGE_RDC_GRPS_DUMP	(NXGE_IOC|27)
-#define	NXGE_PIO_TEST		(NXGE_IOC|28)
-
-#define	NXGE_GET_TCAM		(NXGE_IOC|29)
-#define	NXGE_PUT_TCAM		(NXGE_IOC|30)
-#define	NXGE_INJECT_ERR		(NXGE_IOC|40)
-
-#if (defined(SOLARIS) && defined(_KERNEL)) || defined(COSIM)
-#define	NXGE_OK			0
-#define	NXGE_ERROR		0x40000000
-#define	NXGE_DDI_FAILED		0x20000000
-#define	NXGE_GET_PORT_NUM(n)	n
-
-/*
- * Definitions for module_info.
- */
-#define	NXGE_IDNUM		(0)			/* module ID number */
-#define	NXGE_DRIVER_NAME	"nxge"			/* module name */
-
-#define	NXGE_MINPSZ		(0)			/* min packet size */
-#define	NXGE_MAXPSZ		(ETHERMTU)		/* max packet size */
-#define	NXGE_HIWAT		(2048 * NXGE_MAXPSZ)	/* hi-water mark */
-#define	NXGE_LOWAT		(1)			/* lo-water mark */
-#define	NXGE_HIWAT_MAX		(192000 * NXGE_MAXPSZ)
-#define	NXGE_HIWAT_MIN		(2 * NXGE_MAXPSZ)
-#define	NXGE_LOWAT_MAX		(192000 * NXGE_MAXPSZ)
-#define	NXGE_LOWAT_MIN		(1)
-
-#ifndef	D_HOTPLUG
-#define	D_HOTPLUG		0x00
-#endif
-
-#define	INIT_BUCKET_SIZE	16	/* Initial Hash Bucket Size */
-
-#define	NXGE_CHECK_TIMER	(5000)
-
-typedef enum {
-	param_instance,
-	param_main_instance,
-	param_function_number,
-	param_partition_id,
-	param_read_write_mode,
-	param_niu_cfg_type,
-	param_tx_quick_cfg,
-	param_rx_quick_cfg,
-	param_master_cfg_enable,
-	param_master_cfg_value,
-
-	param_autoneg,
-	param_anar_10gfdx,
-	param_anar_10ghdx,
-	param_anar_1000fdx,
-	param_anar_1000hdx,
-	param_anar_100T4,
-	param_anar_100fdx,
-	param_anar_100hdx,
-	param_anar_10fdx,
-	param_anar_10hdx,
-
-	param_anar_asmpause,
-	param_anar_pause,
-	param_use_int_xcvr,
-	param_enable_ipg0,
-	param_ipg0,
-	param_ipg1,
-	param_ipg2,
-	param_accept_jumbo,
-	param_txdma_weight,
-	param_txdma_channels_begin,
-
-	param_txdma_channels,
-	param_txdma_info,
-	param_rxdma_channels_begin,
-	param_rxdma_channels,
-	param_rxdma_drr_weight,
-	param_rxdma_full_header,
-	param_rxdma_info,
-	param_rxdma_rbr_size,
-	param_rxdma_rcr_size,
-	param_default_port_rdc,
-	param_rxdma_intr_time,
-	param_rxdma_intr_pkts,
-
-	param_rdc_grps_start,
-	param_rx_rdc_grps,
-	param_default_grp0_rdc,
-	param_default_grp1_rdc,
-	param_default_grp2_rdc,
-	param_default_grp3_rdc,
-	param_default_grp4_rdc,
-	param_default_grp5_rdc,
-	param_default_grp6_rdc,
-	param_default_grp7_rdc,
-
-	param_info_rdc_groups,
-	param_start_ldg,
-	param_max_ldg,
-	param_mac_2rdc_grp,
-	param_vlan_2rdc_grp,
-	param_fcram_part_cfg,
-	param_fcram_access_ratio,
-	param_tcam_access_ratio,
-	param_tcam_enable,
-	param_hash_lookup_enable,
-	param_llc_snap_enable,
-
-	param_h1_init_value,
-	param_h2_init_value,
-	param_class_cfg_ether_usr1,
-	param_class_cfg_ether_usr2,
-	param_class_cfg_ip_usr4,
-	param_class_cfg_ip_usr5,
-	param_class_cfg_ip_usr6,
-	param_class_cfg_ip_usr7,
-	param_class_opt_ip_usr4,
-	param_class_opt_ip_usr5,
-	param_class_opt_ip_usr6,
-	param_class_opt_ip_usr7,
-	param_class_opt_ipv4_tcp,
-	param_class_opt_ipv4_udp,
-	param_class_opt_ipv4_ah,
-	param_class_opt_ipv4_sctp,
-	param_class_opt_ipv6_tcp,
-	param_class_opt_ipv6_udp,
-	param_class_opt_ipv6_ah,
-	param_class_opt_ipv6_sctp,
-	param_nxge_debug_flag,
-	param_npi_debug_flag,
-	param_dump_rdc,
-	param_dump_tdc,
-	param_dump_mac_regs,
-	param_dump_ipp_regs,
-	param_dump_fflp_regs,
-	param_dump_vlan_table,
-	param_dump_rdc_table,
-	param_dump_ptrs,
-	param_end
-} nxge_param_index_t;
-
-
-/*
- * Named Dispatch Parameter Management Structure
- */
-typedef	int (*nxge_ndgetf_t)(p_nxge_t, queue_t *, MBLKP, caddr_t, cred_t *);
-typedef	int (*nxge_ndsetf_t)(p_nxge_t, queue_t *,
-	    MBLKP, char *, caddr_t, cred_t *);
-
-#define	NXGE_PARAM_READ			0x00000001ULL
-#define	NXGE_PARAM_WRITE		0x00000002ULL
-#define	NXGE_PARAM_SHARED		0x00000004ULL
-#define	NXGE_PARAM_PRIV			0x00000008ULL
-#define	NXGE_PARAM_RW			NXGE_PARAM_READ | NXGE_PARAM_WRITE
-#define	NXGE_PARAM_RWS			NXGE_PARAM_RW | NXGE_PARAM_SHARED
-#define	NXGE_PARAM_RWP			NXGE_PARAM_RW | NXGE_PARAM_PRIV
-
-#define	NXGE_PARAM_RXDMA		0x00000010ULL
-#define	NXGE_PARAM_TXDMA		0x00000020ULL
-#define	NXGE_PARAM_CLASS_GEN	0x00000040ULL
-#define	NXGE_PARAM_MAC			0x00000080ULL
-#define	NXGE_PARAM_CLASS_BIN	NXGE_PARAM_CLASS_GEN | NXGE_PARAM_BASE_BIN
-#define	NXGE_PARAM_CLASS_HEX	NXGE_PARAM_CLASS_GEN | NXGE_PARAM_BASE_HEX
-#define	NXGE_PARAM_CLASS		NXGE_PARAM_CLASS_HEX
-
-#define	NXGE_PARAM_CMPLX		0x00010000ULL
-#define	NXGE_PARAM_NDD_WR_OK		0x00020000ULL
-#define	NXGE_PARAM_INIT_ONLY		0x00040000ULL
-#define	NXGE_PARAM_INIT_CONFIG		0x00080000ULL
-
-#define	NXGE_PARAM_READ_PROP		0x00100000ULL
-#define	NXGE_PARAM_PROP_ARR32		0x00200000ULL
-#define	NXGE_PARAM_PROP_ARR64		0x00400000ULL
-#define	NXGE_PARAM_PROP_STR		0x00800000ULL
-
-#define	NXGE_PARAM_BASE_DEC		0x00000000ULL
-#define	NXGE_PARAM_BASE_BIN		0x10000000ULL
-#define	NXGE_PARAM_BASE_HEX		0x20000000ULL
-#define	NXGE_PARAM_BASE_STR		0x40000000ULL
-#define	NXGE_PARAM_DONT_SHOW		0x80000000ULL
-
-#define	NXGE_PARAM_ARRAY_CNT_MASK	0x0000ffff00000000ULL
-#define	NXGE_PARAM_ARRAY_CNT_SHIFT	32ULL
-#define	NXGE_PARAM_ARRAY_ALLOC_MASK	0xffff000000000000ULL
-#define	NXGE_PARAM_ARRAY_ALLOC_SHIFT	48ULL
-
-typedef struct _nxge_param_t {
-	int (*getf)();
-	int (*setf)();   /* null for read only */
-	uint64_t type;  /* R/W/ Common/Port/ .... */
-	uint64_t minimum;
-	uint64_t maximum;
-	uint64_t value;	/* for array params, pointer to value array */
-	uint64_t old_value; /* for array params, pointer to old_value array */
-	char   *fcode_name;
-	char   *name;
-} nxge_param_t, *p_nxge_param_t;
-
-
-
-typedef enum {
-	nxge_lb_normal,
-	nxge_lb_ext10g,
-	nxge_lb_ext1000,
-	nxge_lb_ext100,
-	nxge_lb_ext10,
-	nxge_lb_phy10g,
-	nxge_lb_phy1000,
-	nxge_lb_phy,
-	nxge_lb_serdes10g,
-	nxge_lb_serdes1000,
-	nxge_lb_serdes,
-	nxge_lb_mac10g,
-	nxge_lb_mac1000,
-	nxge_lb_mac
-} nxge_lb_t;
-
-enum nxge_mac_state {
-	NXGE_MAC_STOPPED = 0,
-	NXGE_MAC_STARTED
-};
-
-/*
- * Private DLPI full dlsap address format.
- */
-typedef struct _nxge_dladdr_t {
-	ether_addr_st dl_phys;
-	uint16_t dl_sap;
-} nxge_dladdr_t, *p_nxge_dladdr_t;
-
-typedef struct _mc_addr_t {
-	ether_addr_st multcast_addr;
-	uint_t mc_addr_cnt;
-} mc_addr_t, *p_mc_addr_t;
-
-typedef struct _mc_bucket_t {
-	p_mc_addr_t addr_list;
-	uint_t list_size;
-} mc_bucket_t, *p_mc_bucket_t;
-
-typedef struct _mc_table_t {
-	p_mc_bucket_t bucket_list;
-	uint_t buckets_used;
-} mc_table_t, *p_mc_table_t;
-
-typedef struct _filter_t {
-	uint32_t all_phys_cnt;
-	uint32_t all_multicast_cnt;
-	uint32_t all_sap_cnt;
-} filter_t, *p_filter_t;
-
-#if defined(_KERNEL) || defined(COSIM)
-
-
-typedef struct _nxge_port_stats_t {
-	/*
-	 *  Overall structure size
-	 */
-	size_t			stats_size;
-
-	/*
-	 * Link Input/Output stats
-	 */
-	uint64_t		ipackets;
-	uint64_t		ierrors;
-	uint64_t		opackets;
-	uint64_t		oerrors;
-	uint64_t		collisions;
-
-	/*
-	 * MIB II variables
-	 */
-	uint64_t		rbytes;    /* # bytes received */
-	uint64_t		obytes;    /* # bytes transmitted */
-	uint32_t		multircv;  /* # multicast packets received */
-	uint32_t		multixmt;  /* # multicast packets for xmit */
-	uint32_t		brdcstrcv; /* # broadcast packets received */
-	uint32_t		brdcstxmt; /* # broadcast packets for xmit */
-	uint32_t		norcvbuf;  /* # rcv packets discarded */
-	uint32_t		noxmtbuf;  /* # xmit packets discarded */
-
-	/*
-	 * Lets the user know the MTU currently in use by
-	 * the physical MAC port.
-	 */
-	nxge_lb_t		lb_mode;
-	uint32_t		qos_mode;
-	uint32_t		trunk_mode;
-	uint32_t		poll_mode;
-
-	/*
-	 * Tx Statistics.
-	 */
-	uint32_t		tx_inits;
-	uint32_t		tx_starts;
-	uint32_t		tx_nocanput;
-	uint32_t		tx_msgdup_fail;
-	uint32_t		tx_allocb_fail;
-	uint32_t		tx_no_desc;
-	uint32_t		tx_dma_bind_fail;
-	uint32_t		tx_uflo;
-	uint32_t		tx_hdr_pkts;
-	uint32_t		tx_ddi_pkts;
-	uint32_t		tx_dvma_pkts;
-
-	uint32_t		tx_max_pend;
-
-	/*
-	 * Rx Statistics.
-	 */
-	uint32_t		rx_inits;
-	uint32_t		rx_hdr_pkts;
-	uint32_t		rx_mtu_pkts;
-	uint32_t		rx_split_pkts;
-	uint32_t		rx_no_buf;
-	uint32_t		rx_no_comp_wb;
-	uint32_t		rx_ov_flow;
-	uint32_t		rx_len_mm;
-	uint32_t		rx_tag_err;
-	uint32_t		rx_nocanput;
-	uint32_t		rx_msgdup_fail;
-	uint32_t		rx_allocb_fail;
-
-	/*
-	 * Receive buffer management statistics.
-	 */
-	uint32_t		rx_new_pages;
-	uint32_t		rx_new_hdr_pgs;
-	uint32_t		rx_new_mtu_pgs;
-	uint32_t		rx_new_nxt_pgs;
-	uint32_t		rx_reused_pgs;
-	uint32_t		rx_hdr_drops;
-	uint32_t		rx_mtu_drops;
-	uint32_t		rx_nxt_drops;
-
-	/*
-	 * Receive flow statistics
-	 */
-	uint32_t		rx_rel_flow;
-	uint32_t		rx_rel_bit;
-
-	uint32_t		rx_pkts_dropped;
-
-	/*
-	 * PCI-E Bus Statistics.
-	 */
-	uint32_t		pci_bus_speed;
-	uint32_t		pci_err;
-	uint32_t		pci_rta_err;
-	uint32_t		pci_rma_err;
-	uint32_t		pci_parity_err;
-	uint32_t		pci_bad_ack_err;
-	uint32_t		pci_drto_err;
-	uint32_t		pci_dmawz_err;
-	uint32_t		pci_dmarz_err;
-
-	uint32_t		rx_taskq_waits;
-
-	uint32_t		tx_jumbo_pkts;
-
-	/*
-	 * Some statistics added to support bringup, these
-	 * should be removed.
-	 */
-	uint32_t		user_defined;
-} nxge_port_stats_t, *p_nxge_port_stats_t;
-
-
-typedef struct _nxge_stats_t {
-	/*
-	 *  Overall structure size
-	 */
-	size_t			stats_size;
-
-	kstat_t			*ksp;
-	kstat_t			*rdc_ksp[NXGE_MAX_RDCS];
-	kstat_t			*tdc_ksp[NXGE_MAX_TDCS];
-	kstat_t			*rdc_sys_ksp;
-	kstat_t			*fflp_ksp[1];
-	kstat_t			*ipp_ksp;
-	kstat_t			*txc_ksp;
-	kstat_t			*mac_ksp;
-	kstat_t			*zcp_ksp;
-	kstat_t			*port_ksp;
-	kstat_t			*mmac_ksp;
-
-	nxge_mac_stats_t	mac_stats;	/* Common MAC Statistics */
-	nxge_xmac_stats_t	xmac_stats;	/* XMAC Statistics */
-	nxge_bmac_stats_t	bmac_stats;	/* BMAC Statistics */
-
-	nxge_rx_ring_stats_t	rx_stats;	/* per port RX stats */
-	nxge_ipp_stats_t	ipp_stats;	/* per port IPP stats */
-	nxge_zcp_stats_t	zcp_stats;	/* per port IPP stats */
-	nxge_rx_ring_stats_t	rdc_stats[NXGE_MAX_RDCS]; /* per rdc stats */
-	nxge_rdc_sys_stats_t	rdc_sys_stats;	/* per port RDC stats */
-
-	nxge_tx_ring_stats_t	tx_stats;	/* per port TX stats */
-	nxge_txc_stats_t	txc_stats;	/* per port TX stats */
-	nxge_tx_ring_stats_t	tdc_stats[NXGE_MAX_TDCS]; /* per tdc stats */
-	nxge_fflp_stats_t	fflp_stats;	/* fflp stats */
-	nxge_port_stats_t	port_stats;	/* fflp stats */
-	nxge_mmac_stats_t	mmac_stats;	/* Multi mac. stats */
-
-} nxge_stats_t, *p_nxge_stats_t;
-
-
-
-typedef struct _nxge_intr_t {
-	boolean_t		intr_registered; /* interrupts are registered */
-	boolean_t		intr_enabled; 	/* interrupts are enabled */
-	boolean_t		niu_msi_enable;	/* debug or configurable? */
-	uint8_t			nldevs;		/* # of logical devices */
-	int			intr_types;	/* interrupt types supported */
-	int			intr_type;	/* interrupt type to add */
-	int			max_int_cnt;	/* max MSIX/INT HW supports */
-	int			start_inum;	/* start inum (in sequence?) */
-	int			msi_intx_cnt;	/* # msi/intx ints returned */
-	int			intr_added;	/* # ints actually needed */
-	int			intr_cap;	/* interrupt capabilities */
-	size_t			intr_size;	/* size of array to allocate */
-	ddi_intr_handle_t 	*htable;	/* For array of interrupts */
-	/* Add interrupt number for each interrupt vector */
-	int			pri;
-} nxge_intr_t, *p_nxge_intr_t;
-
-typedef struct _nxge_ldgv_t {
-	uint8_t			ndma_ldvs;
-	uint8_t			nldvs;
-	uint8_t			start_ldg;
-	uint8_t			start_ldg_tx;
-	uint8_t			start_ldg_rx;
-	uint8_t			maxldgs;
-	uint8_t			maxldvs;
-	uint8_t			ldg_intrs;
-	boolean_t		own_sys_err;
-	boolean_t		own_max_ldv;
-	uint32_t		tmres;
-	p_nxge_ldg_t		ldgp;
-	p_nxge_ldv_t		ldvp;
-	p_nxge_ldv_t		ldvp_syserr;
-} nxge_ldgv_t, *p_nxge_ldgv_t;
-
-/*
- * Neptune Device instance state information.
- *
- * Each instance is dynamically allocated on first attach.
- */
-struct _nxge_t {
-	dev_info_t		*dip;		/* device instance */
-	dev_info_t		*p_dip;		/* Parent's device instance */
-	int			instance;	/* instance number */
-	int			function_num;	/* device function number */
-	int			nports;		/* # of ports on this device */
-	int			board_ver;	/* Board Version */
-	int			partition_id;	/* partition ID */
-	int			use_partition;	/* partition is enabled */
-	uint32_t		drv_state;	/* driver state bit flags */
-	uint64_t		nxge_debug_level; /* driver state bit flags */
-	kmutex_t		genlock[1];
-	enum nxge_mac_state	nxge_mac_state;
-	ddi_softintr_t		resched_id;	/* reschedule callback	*/
-	boolean_t		resched_needed;
-	boolean_t		resched_running;
-
-	p_dev_regs_t		dev_regs;
-	npi_handle_t		npi_handle;
-	npi_handle_t		npi_pci_handle;
-	npi_handle_t		npi_reg_handle;
-	npi_handle_t		npi_msi_handle;
-	npi_handle_t		npi_vreg_handle;
-	npi_handle_t		npi_v2reg_handle;
-
-	nxge_mac_t		mac;
-	nxge_ipp_t		ipp;
-	nxge_txc_t		txc;
-	nxge_classify_t		classifier;
-
-	mac_handle_t		mach;	/* mac module handle */
-	p_nxge_stats_t		statsp;
-	uint32_t		param_count;
-	p_nxge_param_t		param_arr;
-	nxge_hw_list_t		*nxge_hw_p; 	/* pointer to per Neptune */
-	niu_type_t		niu_type;
-	boolean_t		os_addr_mode32;	/* set to 1 for 32 bit mode */
-	uint8_t			nrdc;
-	uint8_t			def_rdc;
-	uint8_t			rdc[NXGE_MAX_RDCS];
-	uint8_t			ntdc;
-	uint8_t			tdc[NXGE_MAX_TDCS];
-
-	nxge_intr_t		nxge_intr_type;
-	nxge_dma_pt_cfg_t 	pt_config;
-	nxge_class_pt_cfg_t 	class_config;
-
-	/* Logical device and group data structures. */
-	p_nxge_ldgv_t		ldgvp;
-
-	caddr_t			param_list;	/* Parameter list */
-
-	ether_addr_st		factaddr;	/* factory mac address	    */
-	ether_addr_st		ouraddr;	/* individual address	    */
-	kmutex_t		ouraddr_lock;	/* lock to protect to uradd */
-
-	ddi_iblock_cookie_t	interrupt_cookie;
-
-	/*
-	 * Blocks of memory may be pre-allocated by the
-	 * partition manager or the driver. They may include
-	 * blocks for configuration and buffers. The idea is
-	 * to preallocate big blocks of contiguous areas in
-	 * system memory (i.e. with IOMMU). These blocks then
-	 * will be broken up to a fixed number of blocks with
-	 * each block having the same block size (4K, 8K, 16K or
-	 * 32K) in the case of buffer blocks. For systems that
-	 * do not support DVMA, more than one big block will be
-	 * allocated.
-	 */
-	uint32_t		rx_default_block_size;
-	nxge_rx_block_size_t	rx_bksize_code;
-
-	p_nxge_dma_pool_t	rx_buf_pool_p;
-	p_nxge_dma_pool_t	rx_cntl_pool_p;
-
-	p_nxge_dma_pool_t	tx_buf_pool_p;
-	p_nxge_dma_pool_t	tx_cntl_pool_p;
-
-	/* Receive buffer block ring and completion ring. */
-	p_rx_rbr_rings_t 	rx_rbr_rings;
-	p_rx_rcr_rings_t 	rx_rcr_rings;
-	p_rx_mbox_areas_t 	rx_mbox_areas_p;
-
-	p_rx_tx_params_t	rx_params;
-	uint32_t		start_rdc;
-	uint32_t		max_rdcs;
-	uint32_t		rdc_mask;
-
-	/* Transmit descriptors rings */
-	p_tx_rings_t 		tx_rings;
-	p_tx_mbox_areas_t	tx_mbox_areas_p;
-
-	uint32_t		start_tdc;
-	uint32_t		max_tdcs;
-	uint32_t		tdc_mask;
-
-	p_rx_tx_params_t	tx_params;
-
-	ddi_dma_handle_t 	dmasparehandle;
-
-	ulong_t 		sys_page_sz;
-	ulong_t 		sys_page_mask;
-	int 			suspended;
-
-	mii_bmsr_t 		bmsr;		/* xcvr status at last poll. */
-	mii_bmsr_t 		soft_bmsr;	/* xcvr status kept by SW. */
-
-	kmutex_t 		mif_lock;	/* Lock to protect the list. */
-
-	void 			(*mii_read)();
-	void 			(*mii_write)();
-	void 			(*mii_poll)();
-	filter_t 		filter;		/* Current instance filter */
-	p_hash_filter_t 	hash_filter;	/* Multicast hash filter. */
-	krwlock_t		filter_lock;	/* Lock to protect filters. */
-
-	ulong_t 		sys_burst_sz;
-
-	uint8_t 		cache_line;
-
-	timeout_id_t 		nxge_link_poll_timerid;
-	timeout_id_t 		nxge_timerid;
-
-	uint_t 			need_periodic_reclaim;
-	timeout_id_t 		reclaim_timer;
-
-	uint8_t 		msg_min;
-	uint8_t 		crc_size;
-
-	boolean_t 		hard_props_read;
-
-	boolean_t 		nxge_htraffic;
-	uint32_t 		nxge_ncpus;
-	uint32_t 		nxge_cpumask;
-	uint16_t 		intr_timeout;
-	uint16_t 		intr_threshold;
-	uchar_t 		nxge_rxmode;
-	uint32_t 		active_threads;
-
-	rtrace_t		rtrace;
-	int			fm_capabilities; /* FMA capabilities */
-
-	uint32_t 		nxge_port_rbr_size;
-	uint32_t 		nxge_port_rcr_size;
-	uint32_t 		nxge_port_tx_ring_size;
-	nxge_mmac_t		nxge_mmac_info;
-#if	defined(sun4v)
-	boolean_t		niu_hsvc_available;
-	hsvc_info_t		niu_hsvc;
-	uint64_t		niu_min_ver;
-#endif
-	boolean_t		link_notify;
-};
-
-/*
- * Driver state flags.
- */
-#define	STATE_REGS_MAPPED	0x000000001	/* device registers mapped */
-#define	STATE_KSTATS_SETUP	0x000000002	/* kstats allocated	*/
-#define	STATE_NODE_CREATED	0x000000004	/* device node created	*/
-#define	STATE_HW_CONFIG_CREATED	0x000000008	/* hardware properties	*/
-#define	STATE_HW_INITIALIZED	0x000000010	/* hardware initialized	*/
-#define	STATE_MDIO_LOCK_INIT	0x000000020	/* mdio lock initialized */
-#define	STATE_MII_LOCK_INIT	0x000000040	/* mii lock initialized */
-
-#define	STOP_POLL_THRESH 	9
-#define	START_POLL_THRESH	2
-
-typedef struct _nxge_port_kstat_t {
-	/*
-	 * Transciever state informations.
-	 */
-	kstat_named_t	xcvr_inits;
-	kstat_named_t	xcvr_inuse;
-	kstat_named_t	xcvr_addr;
-	kstat_named_t	xcvr_id;
-	kstat_named_t	cap_autoneg;
-	kstat_named_t	cap_10gfdx;
-	kstat_named_t	cap_10ghdx;
-	kstat_named_t	cap_1000fdx;
-	kstat_named_t	cap_1000hdx;
-	kstat_named_t	cap_100T4;
-	kstat_named_t	cap_100fdx;
-	kstat_named_t	cap_100hdx;
-	kstat_named_t	cap_10fdx;
-	kstat_named_t	cap_10hdx;
-	kstat_named_t	cap_asmpause;
-	kstat_named_t	cap_pause;
-
-	/*
-	 * Link partner capabilities.
-	 */
-	kstat_named_t	lp_cap_autoneg;
-	kstat_named_t	lp_cap_10gfdx;
-	kstat_named_t	lp_cap_10ghdx;
-	kstat_named_t	lp_cap_1000fdx;
-	kstat_named_t	lp_cap_1000hdx;
-	kstat_named_t	lp_cap_100T4;
-	kstat_named_t	lp_cap_100fdx;
-	kstat_named_t	lp_cap_100hdx;
-	kstat_named_t	lp_cap_10fdx;
-	kstat_named_t	lp_cap_10hdx;
-	kstat_named_t	lp_cap_asmpause;
-	kstat_named_t	lp_cap_pause;
-
-	/*
-	 * Shared link setup.
-	 */
-	kstat_named_t	link_T4;
-	kstat_named_t	link_speed;
-	kstat_named_t	link_duplex;
-	kstat_named_t	link_asmpause;
-	kstat_named_t	link_pause;
-	kstat_named_t	link_up;
-
-	/*
-	 * Lets the user know the MTU currently in use by
-	 * the physical MAC port.
-	 */
-	kstat_named_t	mac_mtu;
-	kstat_named_t	lb_mode;
-	kstat_named_t	qos_mode;
-	kstat_named_t	trunk_mode;
-
-	/*
-	 * Misc MAC statistics.
-	 */
-	kstat_named_t	ifspeed;
-	kstat_named_t	promisc;
-	kstat_named_t	rev_id;
-
-	/*
-	 * Some statistics added to support bringup, these
-	 * should be removed.
-	 */
-	kstat_named_t	user_defined;
-} nxge_port_kstat_t, *p_nxge_port_kstat_t;
-
-typedef struct _nxge_rdc_kstat {
-	/*
-	 * Receive DMA channel statistics.
-	 */
-	kstat_named_t	ipackets;
-	kstat_named_t	rbytes;
-	kstat_named_t	errors;
-	kstat_named_t	dcf_err;
-	kstat_named_t	rcr_ack_err;
-
-	kstat_named_t	dc_fifoflow_err;
-	kstat_named_t	rcr_sha_par_err;
-	kstat_named_t	rbr_pre_par_err;
-	kstat_named_t	wred_drop;
-	kstat_named_t	rbr_pre_emty;
-
-	kstat_named_t	rcr_shadow_full;
-	kstat_named_t	rbr_tmout;
-	kstat_named_t	rsp_cnt_err;
-	kstat_named_t	byte_en_bus;
-	kstat_named_t	rsp_dat_err;
-
-	kstat_named_t	compl_l2_err;
-	kstat_named_t	compl_l4_cksum_err;
-	kstat_named_t	compl_zcp_soft_err;
-	kstat_named_t	compl_fflp_soft_err;
-	kstat_named_t	config_err;
-
-	kstat_named_t	rcrincon;
-	kstat_named_t	rcrfull;
-	kstat_named_t	rbr_empty;
-	kstat_named_t	rbrfull;
-	kstat_named_t	rbrlogpage;
-
-	kstat_named_t	cfiglogpage;
-	kstat_named_t	port_drop_pkt;
-	kstat_named_t	rcr_to;
-	kstat_named_t	rcr_thresh;
-	kstat_named_t	rcr_mex;
-	kstat_named_t	id_mismatch;
-	kstat_named_t	zcp_eop_err;
-	kstat_named_t	ipp_eop_err;
-} nxge_rdc_kstat_t, *p_nxge_rdc_kstat_t;
-
-typedef struct _nxge_rdc_sys_kstat {
-	/*
-	 * Receive DMA system statistics.
-	 */
-	kstat_named_t	pre_par;
-	kstat_named_t	sha_par;
-	kstat_named_t	id_mismatch;
-	kstat_named_t	ipp_eop_err;
-	kstat_named_t	zcp_eop_err;
-} nxge_rdc_sys_kstat_t, *p_nxge_rdc_sys_kstat_t;
-
-typedef	struct _nxge_tdc_kstat {
-	/*
-	 * Transmit DMA channel statistics.
-	 */
-	kstat_named_t	opackets;
-	kstat_named_t	obytes;
-	kstat_named_t	oerrors;
-	kstat_named_t	tx_inits;
-	kstat_named_t	tx_no_buf;
-
-	kstat_named_t	mbox_err;
-	kstat_named_t	pkt_size_err;
-	kstat_named_t	tx_ring_oflow;
-	kstat_named_t	pref_buf_ecc_err;
-	kstat_named_t	nack_pref;
-	kstat_named_t	nack_pkt_rd;
-	kstat_named_t	conf_part_err;
-	kstat_named_t	pkt_prt_err;
-	kstat_named_t	reset_fail;
-/* used to in the common (per port) counter */
-
-	kstat_named_t	tx_starts;
-	kstat_named_t	tx_nocanput;
-	kstat_named_t	tx_msgdup_fail;
-	kstat_named_t	tx_allocb_fail;
-	kstat_named_t	tx_no_desc;
-	kstat_named_t	tx_dma_bind_fail;
-	kstat_named_t	tx_uflo;
-	kstat_named_t	tx_hdr_pkts;
-	kstat_named_t	tx_ddi_pkts;
-	kstat_named_t	tx_dvma_pkts;
-	kstat_named_t	tx_max_pend;
-} nxge_tdc_kstat_t, *p_nxge_tdc_kstat_t;
-
-typedef	struct _nxge_txc_kstat {
-	/*
-	 * Transmit port TXC block statistics.
-	 */
-	kstat_named_t	pkt_stuffed;
-	kstat_named_t	pkt_xmit;
-	kstat_named_t	ro_correct_err;
-	kstat_named_t	ro_uncorrect_err;
-	kstat_named_t	sf_correct_err;
-	kstat_named_t	sf_uncorrect_err;
-	kstat_named_t	address_failed;
-	kstat_named_t	dma_failed;
-	kstat_named_t	length_failed;
-	kstat_named_t	pkt_assy_dead;
-	kstat_named_t	reorder_err;
-} nxge_txc_kstat_t, *p_nxge_txc_kstat_t;
-
-typedef struct _nxge_ipp_kstat {
-	/*
-	 * Receive port IPP block statistics.
-	 */
-	kstat_named_t	eop_miss;
-	kstat_named_t	sop_miss;
-	kstat_named_t	dfifo_ue;
-	kstat_named_t	ecc_err_cnt;
-	kstat_named_t	dfifo_perr;
-	kstat_named_t	pfifo_over;
-	kstat_named_t	pfifo_und;
-	kstat_named_t	bad_cs_cnt;
-	kstat_named_t	pkt_dis_cnt;
-	kstat_named_t	cs_fail;
-} nxge_ipp_kstat_t, *p_nxge_ipp_kstat_t;
-
-typedef	struct _nxge_zcp_kstat {
-	/*
-	 * ZCP statistics.
-	 */
-	kstat_named_t	errors;
-	kstat_named_t	inits;
-	kstat_named_t	rrfifo_underrun;
-	kstat_named_t	rrfifo_overrun;
-	kstat_named_t	rspfifo_uncorr_err;
-	kstat_named_t	buffer_overflow;
-	kstat_named_t	stat_tbl_perr;
-	kstat_named_t	dyn_tbl_perr;
-	kstat_named_t	buf_tbl_perr;
-	kstat_named_t	tt_program_err;
-	kstat_named_t	rsp_tt_index_err;
-	kstat_named_t	slv_tt_index_err;
-	kstat_named_t	zcp_tt_index_err;
-	kstat_named_t	access_fail;
-	kstat_named_t	cfifo_ecc;
-} nxge_zcp_kstat_t, *p_nxge_zcp_kstat_t;
-
-typedef	struct _nxge_mac_kstat {
-	/*
-	 * Transmit MAC statistics.
-	 */
-	kstat_named_t	tx_frame_cnt;
-	kstat_named_t	tx_underflow_err;
-	kstat_named_t	tx_overflow_err;
-	kstat_named_t	tx_maxpktsize_err;
-	kstat_named_t	tx_fifo_xfr_err;
-	kstat_named_t	tx_byte_cnt;
-
-	/*
-	 * Receive MAC statistics.
-	 */
-	kstat_named_t	rx_frame_cnt;
-	kstat_named_t	rx_underflow_err;
-	kstat_named_t	rx_overflow_err;
-	kstat_named_t	rx_len_err_cnt;
-	kstat_named_t	rx_crc_err_cnt;
-	kstat_named_t	rx_viol_err_cnt;
-	kstat_named_t	rx_byte_cnt;
-	kstat_named_t	rx_hist1_cnt;
-	kstat_named_t	rx_hist2_cnt;
-	kstat_named_t	rx_hist3_cnt;
-	kstat_named_t	rx_hist4_cnt;
-	kstat_named_t	rx_hist5_cnt;
-	kstat_named_t	rx_hist6_cnt;
-	kstat_named_t	rx_broadcast_cnt;
-	kstat_named_t	rx_mult_cnt;
-	kstat_named_t	rx_frag_cnt;
-	kstat_named_t	rx_frame_align_err_cnt;
-	kstat_named_t	rx_linkfault_err_cnt;
-	kstat_named_t	rx_local_fault_err_cnt;
-	kstat_named_t	rx_remote_fault_err_cnt;
-} nxge_mac_kstat_t, *p_nxge_mac_kstat_t;
-
-typedef	struct _nxge_xmac_kstat {
-	/*
-	 * XMAC statistics.
-	 */
-	kstat_named_t	tx_frame_cnt;
-	kstat_named_t	tx_underflow_err;
-	kstat_named_t	tx_maxpktsize_err;
-	kstat_named_t	tx_overflow_err;
-	kstat_named_t	tx_fifo_xfr_err;
-	kstat_named_t	tx_byte_cnt;
-	kstat_named_t	rx_frame_cnt;
-	kstat_named_t	rx_underflow_err;
-	kstat_named_t	rx_overflow_err;
-	kstat_named_t	rx_crc_err_cnt;
-	kstat_named_t	rx_len_err_cnt;
-	kstat_named_t	rx_viol_err_cnt;
-	kstat_named_t	rx_byte_cnt;
-	kstat_named_t	rx_hist1_cnt;
-	kstat_named_t	rx_hist2_cnt;
-	kstat_named_t	rx_hist3_cnt;
-	kstat_named_t	rx_hist4_cnt;
-	kstat_named_t	rx_hist5_cnt;
-	kstat_named_t	rx_hist6_cnt;
-	kstat_named_t	rx_hist7_cnt;
-	kstat_named_t	rx_broadcast_cnt;
-	kstat_named_t	rx_mult_cnt;
-	kstat_named_t	rx_frag_cnt;
-	kstat_named_t	rx_frame_align_err_cnt;
-	kstat_named_t	rx_linkfault_err_cnt;
-	kstat_named_t	rx_remote_fault_err_cnt;
-	kstat_named_t	rx_local_fault_err_cnt;
-	kstat_named_t	rx_pause_cnt;
-	kstat_named_t	xpcs_deskew_err_cnt;
-	kstat_named_t	xpcs_ln0_symbol_err_cnt;
-	kstat_named_t	xpcs_ln1_symbol_err_cnt;
-	kstat_named_t	xpcs_ln2_symbol_err_cnt;
-	kstat_named_t	xpcs_ln3_symbol_err_cnt;
-} nxge_xmac_kstat_t, *p_nxge_xmac_kstat_t;
-
-typedef	struct _nxge_bmac_kstat {
-	/*
-	 * BMAC statistics.
-	 */
-	kstat_named_t tx_frame_cnt;
-	kstat_named_t tx_underrun_err;
-	kstat_named_t tx_max_pkt_err;
-	kstat_named_t tx_byte_cnt;
-	kstat_named_t rx_frame_cnt;
-	kstat_named_t rx_byte_cnt;
-	kstat_named_t rx_overflow_err;
-	kstat_named_t rx_align_err_cnt;
-	kstat_named_t rx_crc_err_cnt;
-	kstat_named_t rx_len_err_cnt;
-	kstat_named_t rx_viol_err_cnt;
-	kstat_named_t rx_pause_cnt;
-	kstat_named_t tx_pause_state;
-	kstat_named_t tx_nopause_state;
-} nxge_bmac_kstat_t, *p_nxge_bmac_kstat_t;
-
-
-typedef struct _nxge_fflp_kstat {
-	/*
-	 * FFLP statistics.
-	 */
-
-	kstat_named_t	fflp_tcam_ecc_err;
-	kstat_named_t	fflp_tcam_perr;
-	kstat_named_t	fflp_vlan_perr;
-	kstat_named_t	fflp_hasht_lookup_err;
-	kstat_named_t	fflp_access_fail;
-	kstat_named_t	fflp_hasht_data_err[MAX_PARTITION];
-} nxge_fflp_kstat_t, *p_nxge_fflp_kstat_t;
-
-typedef struct _nxge_mmac_kstat {
-	kstat_named_t	mmac_max_addr_cnt;
-	kstat_named_t	mmac_avail_addr_cnt;
-	kstat_named_t	mmac_addr1;
-	kstat_named_t	mmac_addr2;
-	kstat_named_t	mmac_addr3;
-	kstat_named_t	mmac_addr4;
-	kstat_named_t	mmac_addr5;
-	kstat_named_t	mmac_addr6;
-	kstat_named_t	mmac_addr7;
-	kstat_named_t	mmac_addr8;
-	kstat_named_t	mmac_addr9;
-	kstat_named_t	mmac_addr10;
-	kstat_named_t	mmac_addr11;
-	kstat_named_t	mmac_addr12;
-	kstat_named_t	mmac_addr13;
-	kstat_named_t	mmac_addr14;
-	kstat_named_t	mmac_addr15;
-	kstat_named_t	mmac_addr16;
-} nxge_mmac_kstat_t, *p_nxge_mmac_kstat_t;
-
-#endif	/* _KERNEL */
-
-/*
- * Prototype definitions.
- */
-nxge_status_t nxge_init(p_nxge_t);
-void nxge_uninit(p_nxge_t);
-void nxge_get64(p_nxge_t, p_mblk_t);
-void nxge_put64(p_nxge_t, p_mblk_t);
-void nxge_pio_loop(p_nxge_t, p_mblk_t);
-
-#ifndef COSIM
-typedef	void	(*fptrv_t)();
-timeout_id_t nxge_start_timer(p_nxge_t, fptrv_t, int);
-void nxge_stop_timer(p_nxge_t, timeout_id_t);
-#endif
-#endif
-
-#ifdef	__cplusplus
-}
-#endif
-
-#endif	/* _SYS_NXGE_NXGE_H */
--- a/usr/src/uts/sun4v/sys/nxge/nxge_common.h	Mon Mar 19 18:02:35 2007 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,487 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License (the "License").
- * You may not use this file except in compliance with the License.
- *
- * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
- * or http://www.opensolaris.org/os/licensing.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information: Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- */
-/*
- * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
- * Use is subject to license terms.
- */
-
-#ifndef	_SYS_NXGE_NXGE_COMMON_H
-#define	_SYS_NXGE_NXGE_COMMON_H
-
-#pragma ident	"%Z%%M%	%I%	%E% SMI"
-
-#ifdef	__cplusplus
-extern "C" {
-#endif
-
-#define	NXGE_DMA_START			B_TRUE
-#define	NXGE_DMA_STOP			B_FALSE
-
-/*
- * Default DMA configurations.
- */
-#define	NXGE_RDMA_PER_NIU_PORT		(NXGE_MAX_RDCS/NXGE_PORTS_NIU)
-#define	NXGE_TDMA_PER_NIU_PORT		(NXGE_MAX_TDCS_NIU/NXGE_PORTS_NIU)
-#define	NXGE_RDMA_PER_NEP_PORT		(NXGE_MAX_RDCS/NXGE_PORTS_NEPTUNE)
-#define	NXGE_TDMA_PER_NEP_PORT		(NXGE_MAX_TDCS/NXGE_PORTS_NEPTUNE)
-#define	NXGE_RDCGRP_PER_NIU_PORT	(NXGE_MAX_RDC_GROUPS/NXGE_PORTS_NIU)
-#define	NXGE_RDCGRP_PER_NEP_PORT	(NXGE_MAX_RDC_GROUPS/NXGE_PORTS_NEPTUNE)
-
-#define	NXGE_TIMER_RESO			2
-
-#define	NXGE_TIMER_LDG			2
-
-/*
- * Receive and Transmit DMA definitions
- */
-#if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
-/*
- * N2/NIU: Maximum descriptors if we need to call
- *	   Hypervisor to set up the logical pages
- *	   and the driver must use contiguous memory.
- */
-#define	NXGE_NIU_MAX_ENTRY		(1 << 9) /* 512 */
-#define	NXGE_NIU_CONTIG_RBR_MAX		(NXGE_NIU_MAX_ENTRY)
-#define	NXGE_NIU_CONTIG_RCR_MAX		(NXGE_NIU_MAX_ENTRY)
-#define	NXGE_NIU_CONTIG_TX_MAX		(NXGE_NIU_MAX_ENTRY)
-#endif
-
-#ifdef	_DMA_USES_VIRTADDR
-#ifdef	NIU_PA_WORKAROUND
-#define	NXGE_DMA_BLOCK		(16 * 64 * 4)
-#else
-#define	NXGE_DMA_BLOCK		1
-#endif
-#else
-#define	NXGE_DMA_BLOCK		(64 * 64)
-#endif
-
-#define	NXGE_RBR_RBB_MIN	(128)
-#define	NXGE_RBR_RBB_MAX	(64 * 128 -1)
-
-#if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
-#define	NXGE_RBR_RBB_DEFAULT	512
-#define	NXGE_RBR_SPARE		0
-#else
-#define	NXGE_RBR_RBB_DEFAULT	(64 * 16) /* x86 hello */
-#define	NXGE_RBR_SPARE		0
-#endif
-
-
-#define	NXGE_RCR_MIN		(NXGE_RBR_RBB_MIN * 2)
-
-#if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
-#define	NXGE_RCR_MAX		(NXGE_NIU_CONTIG_RCR_MAX)
-#define	NXGE_RCR_DEFAULT	(512)
-#define	NXGE_TX_RING_DEFAULT	(512)
-#else
-#ifndef	NIU_PA_WORKAROUND
-#define	NXGE_RCR_MAX		(65355) /* MAX hardware supported */
-#if defined(_BIG_ENDIAN)
-#define	NXGE_RCR_DEFAULT	(NXGE_RBR_RBB_DEFAULT * 8)
-#else
-#ifdef USE_RX_BIG_BUF
-#define	NXGE_RCR_DEFAULT	(NXGE_RBR_RBB_DEFAULT * 8)
-#else
-#define	NXGE_RCR_DEFAULT	(NXGE_RBR_RBB_DEFAULT * 4)
-#endif
-#endif
-#define	NXGE_TX_RING_DEFAULT	(1024)
-#define	NXGE_TX_RING_MAX	(64 * 128 - 1)
-#else
-#define	NXGE_RCR_DEFAULT	(512)
-#define	NXGE_TX_RING_DEFAULT	(512)
-#define	NXGE_RCR_MAX		(1024)
-#define	NXGE_TX_RING_MAX	(1024)
-#endif
-#endif
-
-#define	NXGE_TX_RECLAIM 	32
-
-/* per receive DMA channel configuration data structure */
-typedef struct  nxge_rdc_cfg {
-	uint32_t	flag;		/* 0: not configured, 1: configured */
-	struct nxge_hw_list *nxge_hw_p;
-	uint32_t	partition_id;
-	uint32_t	port;		/* function number */
-	uint32_t	rx_group_id;
-
-	/* Partitioning, DMC function zero. */
-	uint32_t	rx_log_page_vld_page0;	/* TRUE or FALSE */
-	uint32_t	rx_log_page_vld_page1;	/* TRUE or FALSE */
-	uint64_t	rx_log_mask1;
-	uint64_t	rx_log_value1;
-	uint64_t	rx_log_mask2;
-	uint64_t	rx_log_value2;
-	uint64_t	rx_log_page_relo1;
-	uint64_t	rx_log_page_relo2;
-	uint64_t	rx_log_page_hdl;
-
-	/* WRED parameters, DMC function zero */
-	uint32_t	red_enable;
-
-	uint32_t	thre_syn;
-	uint32_t	win_syn;
-	uint32_t	threshold;
-	uint32_t	win_non_syn;
-
-	/* RXDMA configuration, DMC */
-	char		*rdc_mbaddr_p;	/* mailbox address */
-	uint32_t	min_flag;	/* TRUE for 18 bytes header */
-
-	/* Software Reserved Packet Buffer Offset, DMC */
-	uint32_t	sw_offset;
-
-	/* RBR Configuration A */
-	uint64_t	rbr_staddr;	/* starting address of RBR */
-	uint32_t	rbr_nblks;	/* # of RBR entries */
-	uint32_t	rbr_len;	/* # of RBR entries in 64B lines */
-
-	/* RBR Configuration B */
-	uint32_t	bksize;		/* Block size is fixed. */
-#define	RBR_BKSIZE_4K			0
-#define	RBR_BKSIZE_4K_BYTES		(4 * 1024)
-#define	RBR_BKSIZE_8K			1
-#define	RBR_BKSIZE_8K_BYTES		(8 * 1024)
-#define	RBR_BKSIZE_16K			2
-#define	RBR_BKSIZE_16K_BYTES		(16 * 1024)
-#define	RBR_BKSIZE_32K			3
-#define	RBR_BKSIZE_32K_BYTES		(32 * 1024)
-
-	uint32_t	bufsz2;
-#define	RBR_BUFSZ2_2K			0
-#define	RBR_BUFSZ2_2K_BYTES		(2 * 1024)
-#define	RBR_BUFSZ2_4K			1
-#define	RBR_BUFSZ2_4K_BYTES		(4 * 1024)
-#define	RBR_BUFSZ2_8K			2
-#define	RBR_BUFSZ2_8K_BYTES		(8 * 1024)
-#define	RBR_BUFSZ2_16K			3
-#define	RBR_BUFSZ2_16K_BYTES		(16 * 1024)
-
-	uint32_t	bufsz1;
-#define	RBR_BUFSZ1_1K			0
-#define	RBR_BUFSZ1_1K_BYTES		1024
-#define	RBR_BUFSZ1_2K			1
-#define	RBR_BUFSZ1_2K_BYTES		(2 * 1024)
-#define	RBR_BUFSZ1_4K			2
-#define	RBR_BUFSZ1_4K_BYTES		(4 * 1024)
-#define	RBR_BUFSZ1_8K			3
-#define	RBR_BUFSZ1_8K_BYTES		(8 * 1024)
-
-	uint32_t	bufsz0;
-#define	RBR_BUFSZ0_256B			0
-#define	RBR_BUFSZ0_256_BYTES		256
-#define	RBR_BUFSZ0_512B			1
-#define	RBR_BUFSZ0_512B_BYTES		512
-#define	RBR_BUFSZ0_1K			2
-#define	RBR_BUFSZ0_1K_BYTES		(1024)
-#define	RBR_BUFSZ0_2K			3
-#define	RBR_BUFSZ0_2K_BYTES		(2 * 1024)
-
-	/* Receive buffers added by the software */
-	uint32_t	bkadd;		/* maximum size is 1 million */
-
-	/* Receive Completion Ring Configuration A */
-	uint32_t	rcr_len;	/* # of 64B blocks, each RCR is 8B */
-	uint64_t	rcr_staddr;
-
-	/* Receive Completion Ring Configuration B */
-	uint32_t	pthres;		/* packet threshold */
-	uint32_t	entout;		/* enable timeout */
-	uint32_t	timeout;	/* timeout value */
-
-	/* Logical Device Group Number */
-	uint16_t	rx_ldg;
-	uint16_t	rx_ld_state_flags;
-
-	/* Receive DMA Channel Event Mask */
-	uint64_t	rx_dma_ent_mask;
-
-	/* 32 bit (set to 1) or 64 bit (set to 0) addressing mode */
-	uint32_t	rx_addr_md;
-} nxge_rdc_cfg_t, *p_nxge_rdc_cfg_t;
-
-/*
- * Per Transmit DMA Channel Configuration Data Structure (32 TDC)
- */
-typedef struct  nxge_tdc_cfg {
-	uint32_t	flag;		/* 0: not configured 1: configured */
-	struct nxge_hw_list *nxge_hw_p;
-	uint32_t	partition_id;
-	uint32_t	port; 		/* function number */
-	/* partitioning, DMC function zero (All 0s for non-partitioning) */
-	uint32_t	tx_log_page_vld_page0;	/* TRUE or FALSE */
-	uint32_t	tx_log_page_vld_page1;	/* TRUE or FALSE */
-	uint64_t	tx_log_mask1;
-	uint64_t	tx_log_value1;
-	uint64_t	tx_log_mask2;
-	uint64_t	tx_log_value2;
-	uint64_t	tx_log_page_relo1;
-	uint64_t	tx_log_page_relo2;
-	uint64_t	tx_log_page_hdl;
-
-	/* Transmit Ring Configuration */
-	uint64_t	tx_staddr;
-	uint64_t	tx_rng_len;	/* in 64 B Blocks */
-#define	TX_MAX_BUF_SIZE			4096
-
-	/* TXDMA configuration, DMC */
-	char		*tdc_mbaddr_p;	/* mailbox address */
-
-	/* Logical Device Group Number */
-	uint16_t	tx_ldg;
-	uint16_t	tx_ld_state_flags;
-
-	/* TXDMA event flags */
-	uint64_t	tx_event_mask;
-
-	/* Transmit threshold before reclamation */
-	uint32_t	tx_rng_threshold;
-#define	TX_RING_THRESHOLD		(TX_DEFAULT_MAX_GPS/4)
-#define	TX_RING_JUMBO_THRESHOLD		(TX_DEFAULT_JUMBO_MAX_GPS/4)
-
-	/* For reclaim: a wrap-around counter (packets transmitted) */
-	uint32_t	tx_pkt_cnt;
-	/* last packet with the mark bit set */
-	uint32_t	tx_lastmark;
-} nxge_tdc_cfg_t, *p_nxge_tdc_cfg_t;
-
-#define	RDC_TABLE_ENTRY_METHOD_SEQ	0
-#define	RDC_TABLE_ENTRY_METHOD_REP	1
-
-/* per receive DMA channel table group data structure */
-typedef struct nxge_rdc_grp {
-	uint32_t	flag;		/* 0:not configured 1: configured */
-	uint8_t	port;
-	uint8_t	partition_id;
-	uint8_t	rx_group_id;
-	uint8_t	start_rdc;	/* assume assigned in sequence	*/
-	uint8_t	max_rdcs;
-	uint8_t	def_rdc;
-	uint8_t		rdc[NXGE_MAX_RDCS];
-	uint16_t	config_method;
-} nxge_rdc_grp_t, *p_nxge_rdc_grp_t;
-
-/* Common RDC and TDC configuration of DMC */
-typedef struct _nxge_dma_common_cfg_t {
-	uint16_t	rdc_red_ran_init; /* RED initial seed value */
-
-	/* Transmit Ring */
-} nxge_dma_common_cfg_t, *p_nxge_dma_common_cfg_t;
-
-/*
- * VLAN and MAC table configurations:
- *  Each VLAN ID should belong to at most one RDC group.
- *  Each port could own multiple RDC groups.
- *  Each MAC should belong to one RDC group.
- */
-typedef struct nxge_mv_cfg {
-	uint8_t		flag;			/* 0:unconfigure 1:configured */
-	uint8_t		rdctbl;			/* RDC channel table group */
-	uint8_t		mpr_npr;		/* MAC and VLAN preference */
-	uint8_t		odd_parity;
-} nxge_mv_cfg_t, *p_nxge_mv_cfg_t;
-
-typedef struct nxge_param_map {
-#if defined(_BIG_ENDIAN)
-	uint32_t		rsrvd2:2;	/* [30:31] rsrvd */
-	uint32_t		remove:1;	/* [29] Remove */
-	uint32_t		pref:1;		/* [28] preference */
-	uint32_t		rsrv:4;		/* [27:24] preference */
-	uint32_t		map_to:8;	/* [23:16] map to resource */
-	uint32_t		param_id:16;	/* [15:0] Param ID */
-#else
-	uint32_t		param_id:16;	/* [15:0] Param ID */
-	uint32_t		map_to:8;	/* [23:16] map to resource */
-	uint32_t		rsrv:4;		/* [27:24] preference */
-	uint32_t		pref:1;		/* [28] preference */
-	uint32_t		remove:1;	/* [29] Remove */
-	uint32_t		rsrvd2:2;	/* [30:31] rsrvd */
-#endif
-} nxge_param_map_t, *p_nxge_param_map_t;
-
-typedef struct nxge_rcr_param {
-#if defined(_BIG_ENDIAN)
-	uint32_t		rsrvd2:2;	/* [30:31] rsrvd */
-	uint32_t		remove:1;	/* [29] Remove */
-	uint32_t		rsrv:5;		/* [28:24] preference */
-	uint32_t		rdc:8;		/* [23:16] rdc # */
-	uint32_t		cfg_val:16;	/* [15:0] interrupt parameter */
-#else
-	uint32_t		cfg_val:16;	/* [15:0] interrupt parameter */
-	uint32_t		rdc:8;		/* [23:16] rdc # */
-	uint32_t		rsrv:5;		/* [28:24] preference */
-	uint32_t		remove:1;	/* [29] Remove */
-	uint32_t		rsrvd2:2;	/* [30:31] rsrvd */
-#endif
-} nxge_rcr_param_t, *p_nxge_rcr_param_t;
-
-/* Needs to have entries in the ndd table */
-/*
- * Hardware properties created by fcode.
- * In order for those properties visible to the user
- * command ndd, we need to add the following properties
- * to the ndd defined parameter array and data structures.
- *
- * Use default static configuration for x86.
- */
-typedef struct nxge_hw_pt_cfg {
-	uint32_t	partition_id;	 /* partition Id		*/
-	uint32_t	read_write_mode; /* read write permission mode	*/
-	uint32_t	function_number; /* function number		*/
-	uint32_t	start_tdc;	 /* start TDC (0 - 31)		*/
-	uint32_t	max_tdcs;	 /* max TDC in sequence		*/
-	uint32_t	start_rdc;	 /* start RDC (0 - 31)		*/
-	uint32_t	max_rdcs;	 /* max rdc in sequence		*/
-	uint32_t	ninterrupts;	/* obp interrupts(mac/mif/syserr) */
-	uint32_t	mac_ldvid;
-	uint32_t	mif_ldvid;
-	uint32_t	ser_ldvid;
-	uint32_t	def_rdc;	 /* default RDC			*/
-	uint32_t	drr_wt;		 /* port DRR weight		*/
-	uint32_t	rx_full_header;	 /* select the header flag	*/
-	uint32_t	start_grpid;	 /* starting group ID		*/
-	uint32_t	max_grpids;	 /* max group ID		*/
-	uint32_t	start_rdc_grpid; /* starting RDC group ID	*/
-	uint32_t	max_rdc_grpids;	 /* max RDC group ID		*/
-	uint32_t	start_ldg;	 /* starting logical group # 	*/
-	uint32_t	max_ldgs;	 /* max logical device group	*/
-	uint32_t	max_ldvs;	 /* max logical devices		*/
-	uint32_t	start_mac_entry; /* where to put the first mac	*/
-	uint32_t	max_macs;	 /* the max mac entry allowed	*/
-	uint32_t	mac_pref;	 /* preference over VLAN	*/
-	uint32_t	def_mac_rxdma_grpid; /* default RDC group ID	*/
-	uint32_t	start_vlan;	 /* starting VLAN ID		*/
-	uint32_t	max_vlans;	 /* max VLAN ID			*/
-	uint32_t	vlan_pref;	 /* preference over MAC		*/
-	uint32_t	def_vlan_rxdma_grpid; /* default RDC group Id	*/
-
-	/* Expand if we have more hardware or default configurations    */
-	uint16_t	ldg[NXGE_INT_MAX_LDG];
-	uint16_t	ldg_chn_start;
-} nxge_hw_pt_cfg_t, *p_nxge_hw_pt_cfg_t;
-
-
-/* per port configuration */
-typedef struct nxge_dma_pt_cfg {
-	uint8_t		mac_port;	/* MAC port (function)		*/
-	nxge_hw_pt_cfg_t hw_config;	/* hardware configuration 	*/
-
-	uint32_t alloc_buf_size;
-	uint32_t rbr_size;
-	uint32_t rcr_size;
-
-	/*
-	 * Configuration for hardware initialization based on the
-	 * hardware properties or the default properties.
-	 */
-	uint32_t	tx_dma_map;	/* Transmit DMA channel bit map */
-
-	/* Receive DMA channel */
-	nxge_rdc_grp_t	rdc_grps[NXGE_MAX_RDC_GROUPS];
-
-	uint16_t	rcr_timeout[NXGE_MAX_RDCS];
-	uint16_t	rcr_threshold[NXGE_MAX_RDCS];
-	uint8_t	rcr_full_header;
-	uint16_t	rx_drr_weight;
-
-	/* Add more stuff later */
-} nxge_dma_pt_cfg_t, *p_nxge_dma_pt_cfg_t;
-
-/* classification configuration */
-typedef struct nxge_class_pt_cfg {
-
-	/* MAC table */
-	nxge_mv_cfg_t	mac_host_info[NXGE_MAX_MACS];
-
-	/* VLAN table */
-	nxge_mv_cfg_t	vlan_tbl[NXGE_MAX_VLANS];
-	/* class config value */
-	uint32_t	init_h1;
-	uint16_t	init_h2;
-	uint8_t mcast_rdcgrp;
-	uint8_t mac_rdcgrp;
-	uint32_t	class_cfg[TCAM_CLASS_MAX];
-} nxge_class_pt_cfg_t, *p_nxge_class_pt_cfg_t;
-
-/* per Neptune sharable resources among ports */
-typedef struct nxge_common {
-	uint32_t		partition_id;
-	boolean_t		mode32;
-	/* DMA Channels: RDC and TDC */
-	nxge_rdc_cfg_t		rdc_config[NXGE_MAX_RDCS];
-	nxge_tdc_cfg_t		tdc_config[NXGE_MAX_TDCS];
-	nxge_dma_common_cfg_t	dma_common_config;
-
-	uint32_t		timer_res;
-	boolean_t		ld_sys_error_set;
-	uint8_t			sys_error_owner;
-
-	/* Layer 2/3/4 */
-	uint16_t		class2_etype;
-	uint16_t		class3_etype;
-
-	/* FCRAM (hashing) */
-	uint32_t		hash1_initval;
-	uint32_t		hash2_initval;
-} nxge_common_t, *p_nxge_common_t;
-
-/*
- * Partition (logical domain) configuration per Neptune/NIU.
- */
-typedef struct nxge_part_cfg {
-	uint32_t	rdc_grpbits;	/* RDC group bit masks */
-	uint32_t	tdc_bitmap;	/* bounded TDC */
-	nxge_dma_pt_cfg_t pt_config[NXGE_MAX_PORTS];
-
-	/* Flow Classification Partition (flow partition select register) */
-	uint8_t		hash_lookup;	/* external lookup is available */
-	uint8_t		base_mask;	/* select bits in base_h1 to replace */
-					/* bits [19:15} in Hash 1. */
-	uint8_t		base_h1;	/* value to replace Hash 1 [19:15]. */
-
-	/* Add more here */
-	uint32_t	attributes;	/* permission and attribute bits */
-#define	FZC_SERVICE_ENTITY		0x01
-#define	FZC_READ_WRITE			0x02
-#define	FZC_READ_ONLY			0x04
-} nxge_part_cfg_t, *p_nxge_part_cfg_t;
-
-typedef struct nxge_hw_list {
-	struct nxge_hw_list 	*next;
-	nxge_os_mutex_t 	nxge_cfg_lock;
-	nxge_os_mutex_t 	nxge_tcam_lock;
-	nxge_os_mutex_t 	nxge_vlan_lock;
-	nxge_os_mutex_t 	nxge_mdio_lock;
-	nxge_os_mutex_t 	nxge_mii_lock;
-
-	nxge_dev_info_t		*parent_devp;
-	struct _nxge_t		*nxge_p[NXGE_MAX_PORTS];
-	uint32_t		ndevs;
-	uint32_t 		flags;
-	uint32_t 		magic;
-} nxge_hw_list_t, *p_nxge_hw_list_t;
-
-#ifdef	__cplusplus
-}
-#endif
-
-#endif	/* _SYS_NXGE_NXGE_COMMON_H */
--- a/usr/src/uts/sun4v/sys/nxge/nxge_common_impl.h	Mon Mar 19 18:02:35 2007 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,384 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License (the "License").
- * You may not use this file except in compliance with the License.
- *
- * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
- * or http://www.opensolaris.org/os/licensing.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information: Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- */
-/*
- * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
- * Use is subject to license terms.
- */
-
-#ifndef	_SYS_NXGE_NXGE_COMMON_IMPL_H
-#define	_SYS_NXGE_NXGE_COMMON_IMPL_H
-
-#pragma ident	"%Z%%M%	%I%	%E% SMI"
-
-#ifdef	__cplusplus
-extern "C" {
-#endif
-
-#define	NPI_REGH(npi_handle)		(npi_handle.regh)
-#define	NPI_REGP(npi_handle)		(npi_handle.regp)
-
-#if defined(NXGE_DEBUG_DMA) || defined(NXGE_DEBUG_TXC)
-#define	__NXGE_STATIC
-#define	__NXGE_INLINE
-#else
-#define	__NXGE_STATIC			static
-#define	__NXGE_INLINE			inline
-#endif
-
-#ifdef	AXIS_DEBUG
-#define	AXIS_WAIT			(100000)
-#define	AXIS_LONG_WAIT			(100000)
-#define	AXIS_WAIT_W			(80000)
-#define	AXIS_WAIT_R			(100000)
-#define	AXIS_WAIT_LOOP			(4000)
-#define	AXIS_WAIT_PER_LOOP		(AXIS_WAIT_R/AXIS_WAIT_LOOP)
-#endif
-
-#define	NO_DEBUG	0x0000000000000000ULL
-#define	MDT_CTL		0x0000000000000001ULL
-#define	RX_CTL		0x0000000000000002ULL
-#define	TX_CTL		0x0000000000000004ULL
-#define	OBP_CTL		0x0000000000000008ULL
-
-#define	VPD_CTL		0x0000000000000010ULL
-#define	DDI_CTL		0x0000000000000020ULL
-#define	MEM_CTL		0x0000000000000040ULL
-#define	SAP_CTL		0x0000000000000080ULL
-
-#define	IOC_CTL		0x0000000000000100ULL
-#define	MOD_CTL		0x0000000000000200ULL
-#define	DMA_CTL		0x0000000000000400ULL
-#define	STR_CTL		0x0000000000000800ULL
-
-#define	INT_CTL		0x0000000000001000ULL
-#define	SYSERR_CTL	0x0000000000002000ULL
-#define	KST_CTL		0x0000000000004000ULL
-#define	PCS_CTL		0x0000000000008000ULL
-
-#define	MII_CTL		0x0000000000010000ULL
-#define	MIF_CTL		0x0000000000020000ULL
-#define	FCRAM_CTL	0x0000000000040000ULL
-#define	MAC_CTL		0x0000000000080000ULL
-
-#define	IPP_CTL		0x0000000000100000ULL
-#define	DMA2_CTL	0x0000000000200000ULL
-#define	RX2_CTL		0x0000000000400000ULL
-#define	TX2_CTL		0x0000000000800000ULL
-
-#define	MEM2_CTL	0x0000000001000000ULL
-#define	MEM3_CTL	0x0000000002000000ULL
-#define	NXGE_CTL	0x0000000004000000ULL
-#define	NDD_CTL		0x0000000008000000ULL
-#define	NDD2_CTL	0x0000000010000000ULL
-
-#define	TCAM_CTL	0x0000000020000000ULL
-#define	CFG_CTL		0x0000000040000000ULL
-#define	CFG2_CTL	0x0000000080000000ULL
-
-#define	FFLP_CTL	TCAM_CTL | FCRAM_CTL
-
-#define	VIR_CTL		0x0000000100000000ULL
-#define	VIR2_CTL	0x0000000200000000ULL
-
-#define	NXGE_NOTE	0x0000001000000000ULL
-#define	NXGE_ERR_CTL	0x0000002000000000ULL
-
-#define	DUMP_ALWAYS	0x2000000000000000ULL
-
-/* NPI Debug and Error defines */
-#define	NPI_RDC_CTL	0x0000000000000001ULL
-#define	NPI_TDC_CTL	0x0000000000000002ULL
-#define	NPI_TXC_CTL	0x0000000000000004ULL
-#define	NPI_IPP_CTL	0x0000000000000008ULL
-
-#define	NPI_XPCS_CTL	0x0000000000000010ULL
-#define	NPI_PCS_CTL	0x0000000000000020ULL
-#define	NPI_ESR_CTL	0x0000000000000040ULL
-#define	NPI_BMAC_CTL	0x0000000000000080ULL
-#define	NPI_XMAC_CTL	0x0000000000000100ULL
-#define	NPI_MAC_CTL	NPI_BMAC_CTL | NPI_XMAC_CTL
-
-#define	NPI_ZCP_CTL	0x0000000000000200ULL
-#define	NPI_TCAM_CTL	0x0000000000000400ULL
-#define	NPI_FCRAM_CTL	0x0000000000000800ULL
-#define	NPI_FFLP_CTL	NPI_TCAM_CTL | NPI_FCRAM_CTL
-
-#define	NPI_VIR_CTL	0x0000000000001000ULL
-#define	NPI_PIO_CTL	0x0000000000002000ULL
-#define	NPI_VIO_CTL	0x0000000000004000ULL
-
-#define	NPI_REG_CTL	0x0000000040000000ULL
-#define	NPI_CTL		0x0000000080000000ULL
-#define	NPI_ERR_CTL	0x0000000080000000ULL
-
-#if defined(SOLARIS) && defined(_KERNEL)
-
-#include <sys/types.h>
-#include <sys/ddi.h>
-#include <sys/sunddi.h>
-#include <sys/dditypes.h>
-#include <sys/ethernet.h>
-
-#ifdef NXGE_DEBUG
-#define	NXGE_DEBUG_MSG(params) nxge_debug_msg params
-#else
-#define	NXGE_DEBUG_MSG(params)
-#endif
-
-#if 1
-#define	NXGE_ERROR_MSG(params)	nxge_debug_msg params
-#define	NXGE_WARN_MSG(params)	nxge_debug_msg params
-#else
-#define	NXGE_ERROR_MSG(params)
-#define	NXGE_WARN_MSG(params)
-#endif
-
-typedef kmutex_t			nxge_os_mutex_t;
-typedef	krwlock_t			nxge_os_rwlock_t;
-
-typedef	dev_info_t			nxge_dev_info_t;
-typedef	ddi_iblock_cookie_t 		nxge_intr_cookie_t;
-
-typedef ddi_acc_handle_t		nxge_os_acc_handle_t;
-typedef	nxge_os_acc_handle_t		npi_reg_handle_t;
-typedef	uint64_t			npi_reg_ptr_t;
-
-typedef ddi_dma_handle_t		nxge_os_dma_handle_t;
-typedef struct _nxge_dma_common_t	nxge_os_dma_common_t;
-typedef struct _nxge_block_mv_t		nxge_os_block_mv_t;
-typedef frtn_t				nxge_os_frtn_t;
-
-#define	NXGE_MUTEX_DRIVER		MUTEX_DRIVER
-#define	MUTEX_INIT(lock, name, type, arg) \
-	mutex_init(lock, name, type, arg)
-#define	MUTEX_ENTER(lock)		mutex_enter(lock)
-#define	MUTEX_TRY_ENTER(lock)		mutex_tryenter(lock)
-#define	MUTEX_EXIT(lock)		mutex_exit(lock)
-#define	MUTEX_DESTROY(lock)		mutex_destroy(lock)
-
-#define	RW_INIT(lock, name, type, arg)	rw_init(lock, name, type, arg)
-#define	RW_ENTER_WRITER(lock)		rw_enter(lock, RW_WRITER)
-#define	RW_ENTER_READER(lock)		rw_enter(lock, RW_READER)
-#define	RW_TRY_ENTER(lock, type)	rw_tryenter(lock, type)
-#define	RW_EXIT(lock)			rw_exit(lock)
-#define	RW_DESTROY(lock)		rw_destroy(lock)
-#define	KMEM_ALLOC(size, flag)		kmem_alloc(size, flag)
-#define	KMEM_ZALLOC(size, flag)		kmem_zalloc(size, flag)
-#define	KMEM_FREE(buf, size)		kmem_free(buf, size)
-
-#define	NXGE_DELAY(microseconds)	 (drv_usecwait(microseconds))
-
-#define	NXGE_PIO_READ8(handle, devaddr, offset) \
-	(ddi_get8(handle, (uint8_t *)((caddr_t)devaddr + offset)))
-
-#define	NXGE_PIO_READ16(handle, devaddr, offset) \
-	(ddi_get16(handle, (uint16_t *)((caddr_t)devaddr + offset)))
-
-#define	NXGE_PIO_READ32(handle, devaddr, offset) \
-	(ddi_get32(handle, (uint32_t *)((caddr_t)devaddr + offset)))
-
-#define	NXGE_PIO_READ64(handle, devaddr, offset) \
-	(ddi_get64(handle, (uint64_t *)((caddr_t)devaddr + offset)))
-
-#define	NXGE_PIO_WRITE8(handle, devaddr, offset, data) \
-	(ddi_put8(handle, (uint8_t *)((caddr_t)devaddr + offset), data))
-
-#define	NXGE_PIO_WRITE16(handle, devaddr, offset, data) \
-	(ddi_get16(handle, (uint16_t *)((caddr_t)devaddr + offset), data))
-
-#define	NXGE_PIO_WRITE32(handle, devaddr, offset, data)	\
-	(ddi_put32(handle, (uint32_t *)((caddr_t)devaddr + offset), data))
-
-#define	NXGE_PIO_WRITE64(handle, devaddr, offset, data) \
-	(ddi_put64(handle, (uint64_t *)((caddr_t)devaddr + offset), data))
-
-#define	NXGE_NPI_PIO_READ8(npi_handle, offset) \
-	(ddi_get8(NPI_REGH(npi_handle),	\
-	(uint8_t *)(NPI_REGP(npi_handle) + offset)))
-
-#define	NXGE_NPI_PIO_READ16(npi_handle, offset) \
-	(ddi_get16(NPI_REGH(npi_handle), \
-	(uint16_t *)(NPI_REGP(npi_handle) + offset)))
-
-#define	NXGE_NPI_PIO_READ32(npi_handle, offset) \
-	(ddi_get32(NPI_REGH(npi_handle), \
-	(uint32_t *)(NPI_REGP(npi_handle) + offset)))
-
-#define	NXGE_NPI_PIO_READ64(npi_handle, offset)		\
-	(ddi_get64(NPI_REGH(npi_handle),		\
-	(uint64_t *)(NPI_REGP(npi_handle) + offset)))
-
-#define	NXGE_NPI_PIO_WRITE8(npi_handle, offset, data)	\
-	(ddi_put8(NPI_REGH(npi_handle),			\
-	(uint8_t *)(NPI_REGP(npi_handle) + offset), data))
-
-#define	NXGE_NPI_PIO_WRITE16(npi_handle, offset, data)	\
-	(ddi_put16(NPI_REGH(npi_handle),		\
-	(uint16_t *)(NPI_REGP(npi_handle) + offset), data))
-
-#define	NXGE_NPI_PIO_WRITE32(npi_handle, offset, data)	\
-	(ddi_put32(NPI_REGH(npi_handle),		\
-	(uint32_t *)(NPI_REGP(npi_handle) + offset), data))
-
-#define	NXGE_NPI_PIO_WRITE64(npi_handle, offset, data)	\
-	(ddi_put64(NPI_REGH(npi_handle),		\
-	(uint64_t *)(NPI_REGP(npi_handle) + offset), data))
-
-#define	NXGE_MEM_PIO_READ8(npi_handle)		\
-	(ddi_get8(NPI_REGH(npi_handle), (uint8_t *)NPI_REGP(npi_handle)))
-
-#define	NXGE_MEM_PIO_READ16(npi_handle)		\
-	(ddi_get16(NPI_REGH(npi_handle), (uint16_t *)NPI_REGP(npi_handle)))
-
-#define	NXGE_MEM_PIO_READ32(npi_handle)		\
-	(ddi_get32(NPI_REGH(npi_handle), (uint32_t *)NPI_REGP(npi_handle)))
-
-#define	NXGE_MEM_PIO_READ64(npi_handle)		\
-	(ddi_get64(NPI_REGH(npi_handle), (uint64_t *)NPI_REGP(npi_handle)))
-
-#define	NXGE_MEM_PIO_WRITE8(npi_handle, data)	\
-	(ddi_put8(NPI_REGH(npi_handle), (uint8_t *)NPI_REGP(npi_handle), data))
-
-#define	NXGE_MEM_PIO_WRITE16(npi_handle, data)	\
-		(ddi_put16(NPI_REGH(npi_handle),	\
-		(uint16_t *)NPI_REGP(npi_handle), data))
-
-#define	NXGE_MEM_PIO_WRITE32(npi_handle, data)	\
-		(ddi_put32(NPI_REGH(npi_handle),	\
-		(uint32_t *)NPI_REGP(npi_handle), data))
-
-#define	NXGE_MEM_PIO_WRITE64(npi_handle, data)	\
-		(ddi_put64(NPI_REGH(npi_handle),	\
-		(uint64_t *)NPI_REGP(npi_handle), data))
-
-#define	SERVICE_LOST		DDI_SERVICE_LOST
-#define	SERVICE_DEGRADED	DDI_SERVICE_DEGRADED
-#define	SERVICE_UNAFFECTED	DDI_SERVICE_UNAFFECTED
-#define	SERVICE_RESTORED	DDI_SERVICE_RESTORED
-
-#define	DATAPATH_FAULT		DDI_DATAPATH_FAULT
-#define	DEVICE_FAULT		DDI_DEVICE_FAULT
-#define	EXTERNAL_FAULT		DDI_EXTERNAL_FAULT
-
-#define	NOTE_LINK_UP		DL_NOTE_LINK_UP
-#define	NOTE_LINK_DOWN		DL_NOTE_LINK_DOWN
-#define	NOTE_SPEED		DL_NOTE_SPEED
-#define	NOTE_PHYS_ADDR		DL_NOTE_PHYS_ADDR
-#define	NOTE_AGGR_AVAIL		DL_NOTE_AGGR_AVAIL
-#define	NOTE_AGGR_UNAVAIL	DL_NOTE_AGGR_UNAVAIL
-
-#define	FM_REPORT_FAULT(nxgep, impact, location, msg)\
-		ddi_dev_report_fault(nxgep->dip, impact, location, msg)
-#define	FM_CHECK_DEV_HANDLE(nxgep)\
-		ddi_check_acc_handle(nxgep->dev_regs->nxge_regh)
-#define	FM_GET_DEVSTATE(nxgep)\
-		ddi_get_devstate(nxgep->dip)
-#define	FM_SERVICE_RESTORED(nxgep)\
-		ddi_fm_service_impact(nxgep->dip, DDI_SERVICE_RESTORED)
-#define	NXGE_FM_REPORT_ERROR(nxgep, portn, chan, ereport_id)\
-		nxge_fm_report_error(nxgep, portn, chan, ereport_id)
-#define	FM_CHECK_ACC_HANDLE(nxgep, handle)\
-		fm_check_acc_handle(handle)
-#define	FM_CHECK_DMA_HANDLE(nxgep, handle)\
-		fm_check_dma_handle(handle)
-
-#endif
-
-#if defined(REG_TRACE)
-#define	NXGE_REG_RD64(handle, offset, val_p) {\
-	*(val_p) = NXGE_NPI_PIO_READ64(handle, offset);\
-	npi_rtrace_update(handle, B_FALSE, &npi_rtracebuf, (uint32_t)offset, \
-			(uint64_t)(*(val_p)));\
-}
-#elif defined(REG_SHOW)
-	/*
-	 * Send 0xbadbad to tell rs_show_reg that we do not have
-	 * a valid RTBUF index to pass
-	 */
-#define	NXGE_REG_RD64(handle, offset, val_p) {\
-	*(val_p) = NXGE_NPI_PIO_READ64(handle, offset);\
-	rt_show_reg(0xbadbad, B_FALSE, (uint32_t)offset, (uint64_t)(*(val_p)));\
-}
-#elif defined(AXIS_DEBUG) && !defined(LEGION)
-#define	NXGE_REG_RD64(handle, offset, val_p) {\
-	int	n;				\
-	for (n = 0; n < AXIS_WAIT_LOOP; n++) {	\
-		*(val_p) = 0;		\
-		*(val_p) = NXGE_NPI_PIO_READ64(handle, offset);\
-		if (*(val_p) != (~0)) { \
-			break; \
-		}	\
-		drv_usecwait(AXIS_WAIT_PER_LOOP); \
-		if (n < 20) { \
-			cmn_err(CE_WARN, "NXGE_REG_RD64: loop %d " \
-			"REG 0x%x(0x%llx)", \
-			n, offset, *val_p);\
-		}	\
-	} \
-	if (n >= AXIS_WAIT_LOOP) {	\
-		cmn_err(CE_WARN, "(FATAL)NXGE_REG_RD64 on offset 0x%x " \
-			"with -1!!!", offset); \
-	}	\
-}
-#else
-
-#define	NXGE_REG_RD64(handle, offset, val_p) {\
-	*(val_p) = NXGE_NPI_PIO_READ64(handle, offset);\
-}
-#endif
-
-/*
- *	 In COSIM mode, we could loop for very long time when polling
- *  for the completion of a Clause45 frame MDIO operations. Display
- *  one rtrace line for each poll can result in messy screen.  Add
- *  this MACRO for no rtrace show.
- */
-#define	NXGE_REG_RD64_NO_SHOW(handle, offset, val_p) {\
-	*(val_p) = NXGE_NPI_PIO_READ64(handle, offset);\
-}
-
-
-#if defined(REG_TRACE)
-#define	NXGE_REG_WR64(handle, offset, val) {\
-	NXGE_NPI_PIO_WRITE64(handle, (offset), (val));\
-	npi_rtrace_update(handle, B_TRUE, &npi_rtracebuf, (uint32_t)offset,\
-				(uint64_t)(val));\
-}
-#elif defined(REG_SHOW)
-/*
- * Send 0xbadbad to tell rs_show_reg that we do not have
- * a valid RTBUF index to pass
- */
-#define	NXGE_REG_WR64(handle, offset, val) {\
-	NXGE_NPI_PIO_WRITE64(handle, offset, (val));\
-	rt_show_reg(0xbadbad, B_TRUE, (uint32_t)offset, (uint64_t)(val));\
-}
-#else
-#define	NXGE_REG_WR64(handle, offset, val) {\
-	NXGE_NPI_PIO_WRITE64(handle, (offset), (val));\
-}
-#endif
-
-#ifdef	__cplusplus
-}
-#endif
-
-#endif	/* _SYS_NXGE_NXGE_COMMON_IMPL_H */
--- a/usr/src/uts/sun4v/sys/nxge/nxge_defs.h	Mon Mar 19 18:02:35 2007 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,465 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License (the "License").
- * You may not use this file except in compliance with the License.
- *
- * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
- * or http://www.opensolaris.org/os/licensing.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information: Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- */
-/*
- * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
- * Use is subject to license terms.
- */
-
-#ifndef	_SYS_NXGE_NXGE_DEFS_H
-#define	_SYS_NXGE_NXGE_DEFS_H
-
-#pragma ident	"%Z%%M%	%I%	%E% SMI"
-
-#ifdef	__cplusplus
-extern "C" {
-#endif
-
-/*
- * Block Address Assignment (24-bit base address)
- * (bits [23:20]: block	 [19]: set to 1 for FZC	)
- */
-#define	PIO			0x000000
-#define	FZC_PIO			0x080000
-#define	RESERVED_1		0x100000
-#define	FZC_MAC			0x180000
-#define	RESERVED_2		0x200000
-#define	FZC_IPP			0x280000
-#define	FFLP			0x300000
-#define	FZC_FFLP		0x380000
-#define	PIO_VADDR		0x400000
-#define	RESERVED_3		0x480000
-#define	ZCP			0x500000
-#define	FZC_ZCP			0x580000
-#define	DMC			0x600000
-#define	FZC_DMC			0x680000
-#define	TXC			0x700000
-#define	FZC_TXC			0x780000
-#define	PIO_LDSV		0x800000
-#define	RESERVED_4		0x880000
-#define	PIO_LDGIM		0x900000
-#define	RESERVED_5		0x980000
-#define	PIO_IMASK0		0xa00000
-#define	RESERVED_6		0xa80000
-#define	PIO_IMASK1		0xb00000
-#define	RESERVED_7_START	0xb80000
-#define	RESERVED_7_END		0xc00000
-#define	FZC_PROM		0xc80000
-#define	RESERVED_8		0xd00000
-#define	FZC_PIM			0xd80000
-#define	RESERVED_9_START 	0xe00000
-#define	RESERVED_9_END 		0xf80000
-
-/* PIO		(0x000000) */
-
-
-/* FZC_PIO	(0x080000) */
-#define	LDGITMRES		(FZC_PIO + 0x00008)	/* timer resolution */
-#define	SID			(FZC_PIO + 0x10200)	/* 64 LDG, INT data */
-#define	LDG_NUM			(FZC_PIO + 0x20000)	/* 69 LDs */
-
-
-
-/* FZC_IPP 	(0x280000) */
-
-
-/* FFLP		(0x300000), Header Parser */
-
-/* PIO_VADDR	(0x400000), PIO Virtaul DMA Address */
-/* ?? how to access DMA via PIO_VADDR? */
-#define	VADDR			(PIO_VADDR + 0x00000) /* ?? not for driver */
-
-
-/* ZCP		(0x500000), Neptune Only */
-
-
-/* FZC_ZCP	(0x580000), Neptune Only */
-
-
-/* DMC 		(0x600000), register offset (32 DMA channels) */
-
-/* Transmit Ring Register Offset (32 Channels) */
-#define	TX_RNG_CFIG		(DMC + 0x40000)
-#define	TX_RING_HDH		(DMC + 0x40008)
-#define	TX_RING_HDL		(DMC + 0x40010)
-#define	TX_RING_KICK		(DMC + 0x40018)
-/* Transmit Operations (32 Channels) */
-#define	TX_ENT_MSK		(DMC + 0x40020)
-#define	TX_CS			(DMC + 0x40028)
-#define	TXDMA_MBH		(DMC + 0x40030)
-#define	TXDMA_MBL		(DMC + 0x40038)
-#define	TX_DMA_PRE_ST		(DMC + 0x40040)
-#define	TX_RNG_ERR_LOGH		(DMC + 0x40048)
-#define	TX_RNG_ERR_LOGL		(DMC + 0x40050)
-#if OLD
-#define	SH_TX_RNG_ERR_LOGH	(DMC + 0x40058)
-#define	SH_TX_RNG_ERR_LOGL	(DMC + 0x40060)
-#endif
-
-/* FZC_DMC RED Initial Random Value register offset (global) */
-#define	RED_RAN_INIT		(FZC_DMC + 0x00068)
-
-#define	RX_ADDR_MD		(FZC_DMC + 0x00070)
-
-/* FZC_DMC Ethernet Timeout Countue register offset (global) */
-#define	EING_TIMEOUT		(FZC_DMC + 0x00078)
-
-/* RDC Table */
-#define	RDC_TBL			(FZC_DMC + 0x10000)	/* 256 * 8 */
-
-/* FZC_DMC partitioning support register offset (32 channels) */
-
-#define	TX_LOG_PAGE_VLD		(FZC_DMC + 0x40000)
-#define	TX_LOG_MASK1		(FZC_DMC + 0x40008)
-#define	TX_LOG_VAL1		(FZC_DMC + 0x40010)
-#define	TX_LOG_MASK2		(FZC_DMC + 0x40018)
-#define	TX_LOG_VAL2		(FZC_DMC + 0x40020)
-#define	TX_LOG_PAGE_RELO1	(FZC_DMC + 0x40028)
-#define	TX_LOG_PAGE_RELO2	(FZC_DMC + 0x40030)
-#define	TX_LOG_PAGE_HDL		(FZC_DMC + 0x40038)
-
-#define	TX_ADDR_MOD		(FZC_DMC + 0x41000) /* only one? */
-
-
-/* FZC_DMC RED Parameters register offset (32 channels) */
-#define	RDC_RED_PARA1		(FZC_DMC + 0x30000)
-#define	RDC_RED_PARA2		(FZC_DMC + 0x30008)
-/* FZC_DMC RED Discard Cound Register offset (32 channels) */
-#define	RED_DIS_CNT		(FZC_DMC + 0x30010)
-
-#if OLD /* This has been moved to TXC */
-/* Transmit Ring Scheduler (per port) */
-#define	TX_DMA_MAP0		(FZC_DMC + 0x50000)
-#define	TX_DMA_MAP1		(FZC_DMC + 0x50008)
-#define	TX_DMA_MAP2		(FZC_DMC + 0x50010)
-#define	TX_DMA_MAP3		(FZC_DMC + 0x50018)
-#endif
-
-/* Transmit Ring Scheduler: DRR Weight (32 Channels) */
-#define	DRR_WT			(FZC_DMC + 0x51000)
-#if OLD
-#define	TXRNG_USE		(FZC_DMC + 0x51008)
-#endif
-
-/* TXC		(0x700000)??	*/
-
-
-/* FZC_TXC	(0x780000)??	*/
-
-
-/*
- * PIO_LDSV	(0x800000)
- * Logical Device State Vector 0, 1, 2.
- * (69 logical devices, 8192 apart, partitioning control)
- */
-#define	LDSV0			(PIO_LDSV + 0x00000)	/* RO (64 - 69) */
-#define	LDSV1			(PIO_LDSV + 0x00008)	/* RO (32 - 63) */
-#define	LDSV2			(PIO_LDSV + 0x00010)	/* RO ( 0 - 31) */
-
-/*
- * PIO_LDGIM	(0x900000)
- * Logical Device Group Interrupt Management (64 groups).
- * (count 64, step 8192)
- */
-#define	LDGIMGN			(PIO_LDGIMGN + 0x00000)	/* RW */
-
-/*
- * PIO_IMASK0	(0xA000000)
- *
- * Logical Device Masks 0, 1.
- * (64 logical devices, 8192 apart, partitioning control)
- */
-#define	LD_IM0			(PIO_IMASK0 + 0x00000)	/* RW ( 0 - 63) */
-
-/*
- * PIO_IMASK0	(0xB000000)
- *
- * Logical Device Masks 0, 1.
- * (5 logical devices, 8192 apart, partitioning control)
- */
-#define	LD_IM1			(PIO_IMASK1 + 0x00000)	/* RW (64 - 69) */
-
-
-/* DMC/TMC CSR size */
-#define	DMA_CSR_SIZE		512
-#define	DMA_CSR_MIN_PAGE_SIZE	1024
-
-/*
- * Define the Default RBR, RCR
- */
-#define	RBR_DEFAULT_MAX_BLKS	4096	/* each entry (16 blockaddr/64B) */
-#define	RBR_NBLK_PER_LINE	16	/* 16 block addresses per 64 B line */
-#define	RBR_DEFAULT_MAX_LEN	(RBR_DEFAULT_MAX_BLKS)
-#define	RBR_DEFAULT_MIN_LEN	1
-
-#define	SW_OFFSET_NO_OFFSET		0
-#define	SW_OFFSET_64			1	/* 64 bytes */
-#define	SW_OFFSET_128			2	/* 128 bytes */
-#define	SW_OFFSET_INVALID		3
-
-/*
- * RBR block descriptor is 32 bits (bits [43:12]
- */
-#define	RBR_BKADDR_SHIFT	12
-
-
-#define	RCR_DEFAULT_MAX_BLKS	4096	/* each entry (8 blockaddr/64B) */
-#define	RCR_NBLK_PER_LINE	8	/* 8 block addresses per 64 B line */
-#define	RCR_DEFAULT_MAX_LEN	(RCR_DEFAULT_MAX_BLKS)
-#define	RCR_DEFAULT_MIN_LEN	1
-
-/*  DMA Channels.  */
-#define	NXGE_MAX_DMCS		(NXGE_MAX_RDCS + NXGE_MAX_TDCS)
-#define	NXGE_MAX_RDCS		16
-#define	NXGE_MAX_TDCS		24
-#define	NXGE_MAX_TDCS_NIU	16
-/*
- * original mapping from Hypervisor
- */
-#ifdef	ORIGINAL
-#define	NXGE_N2_RXDMA_START_LDG	0
-#define	NXGE_N2_TXDMA_START_LDG	16
-#define	NXGE_N2_MIF_LDG		32
-#define	NXGE_N2_MAC_0_LDG	33
-#define	NXGE_N2_MAC_1_LDG	34
-#define	NXGE_N2_SYS_ERROR_LDG	35
-#endif
-
-#define	NXGE_N2_RXDMA_START_LDG	19
-#define	NXGE_N2_TXDMA_START_LDG	27
-#define	NXGE_N2_MIF_LDG		17
-#define	NXGE_N2_MAC_0_LDG	16
-#define	NXGE_N2_MAC_1_LDG	35
-#define	NXGE_N2_SYS_ERROR_LDG	18
-#define	NXGE_N2_LDG_GAP		17
-
-#define	NXGE_MAX_RDC_GRPS	8
-
-/*
- * Max. ports per Neptune and NIU
- */
-#define	NXGE_MAX_PORTS			4
-#define	NXGE_PORTS_NEPTUNE		4
-#define	NXGE_PORTS_NIU			2
-
-/* Max. RDC table groups */
-#define	NXGE_MAX_RDC_GROUPS		8
-#define	NXGE_MAX_RDCS			16
-#define	NXGE_MAX_DMAS			32
-
-
-#define	NXGE_MAX_MACS_XMACS		16
-#define	NXGE_MAX_MACS_BMACS		8
-#define	NXGE_MAX_MACS			(NXGE_MAX_PORTS * NXGE_MAX_MACS_XMACS)
-
-#define	NXGE_MAX_VLANS			4096
-#define	VLAN_ETHERTYPE			(0x8100)
-
-
-/* Scaling factor for RBR (receive block ring) */
-#define	RBR_SCALE_1		0
-#define	RBR_SCALE_2		1
-#define	RBR_SCALE_3		2
-#define	RBR_SCALE_4		3
-#define	RBR_SCALE_5		4
-#define	RBR_SCALE_6		5
-#define	RBR_SCALE_7		6
-#define	RBR_SCALE_8		7
-
-
-#define	MAX_PORTS_PER_NXGE	4
-#define	MAX_MACS		32
-
-#define	TX_GATHER_POINTER_SZ	8
-#define	TX_GP_PER_BLOCK		8
-#define	TX_DEFAULT_MAX_GPS	1024	/* Max. # of gather pointers */
-#define	TX_DEFAULT_JUMBO_MAX_GPS 4096	/* Max. # of gather pointers */
-#define	TX_DEFAULT_MAX_LEN	(TX_DEFAULT_MAX_GPS/TX_GP_PER_BLOCK)
-#define	TX_DEFAULT_JUMBO_MAX_LEN (TX_DEFAULT_JUMBO_MAX_GPS/TX_GP_PER_BLOCK)
-
-#define	TX_RING_THRESHOLD		(TX_DEFAULT_MAX_GPS/4)
-#define	TX_RING_JUMBO_THRESHOLD		(TX_DEFAULT_JUMBO_MAX_GPS/4)
-
-#define	TRANSMIT_HEADER_SIZE		16	/* 16 B frame header */
-
-#define	TX_DESC_SAD_SHIFT	0
-#define	TX_DESC_SAD_MASK	0x00000FFFFFFFFFFFULL	/* start address */
-#define	TX_DESC_TR_LEN_SHIFT	44
-#define	TX_DESC_TR_LEN_MASK	0x00FFF00000000000ULL	/* Transfer Length */
-#define	TX_DESC_NUM_PTR_SHIFT	58
-#define	TX_DESC_NUM_PTR_MASK	0x2C00000000000000ULL	/* gather pointers */
-#define	TX_DESC_MASK_SHIFT	62
-#define	TX_DESC_MASK_MASK	0x4000000000000000ULL	/* Mark bit */
-#define	TX_DESC_SOP_SHIF	63
-#define	TX_DESC_NUM_MASK	0x8000000000000000ULL	/* Start of packet */
-
-#define	TCAM_FLOW_KEY_MAX_CLASS		12
-#define	TCAM_L3_MAX_USER_CLASS		4
-#define	TCAM_NIU_TCAM_MAX_ENTRY		128
-#define	TCAM_NXGE_TCAM_MAX_ENTRY	256
-
-
-
-/* TCAM entry formats */
-#define	TCAM_IPV4_5TUPLE_FORMAT	0x00
-#define	TCAM_IPV6_5TUPLE_FORMAT	0x01
-#define	TCAM_ETHERTYPE_FORMAT	0x02
-
-
-/* TCAM */
-#define	TCAM_SELECT_IPV6	0x01
-#define	TCAM_LOOKUP		0x04
-#define	TCAM_DISCARD		0x08
-
-/* FLOW Key */
-#define	FLOW_L4_1_34_BYTES	0x10
-#define	FLOW_L4_1_78_BYTES	0x11
-#define	FLOW_L4_0_12_BYTES	(0x10 << 2)
-#define	FLOW_L4_0_56_BYTES	(0x11 << 2)
-#define	FLOW_PROTO_NEXT		0x10
-#define	FLOW_IPDA		0x20
-#define	FLOW_IPSA		0x40
-#define	FLOW_VLAN		0x80
-#define	FLOW_L2DA		0x100
-#define	FLOW_PORT		0x200
-
-/* TCAM */
-#define	MAX_EFRAME	11
-
-#define	TCAM_USE_L2RDC_FLOW_LOOKUP	0x00
-#define	TCAM_USE_OFFSET_DONE		0x01
-#define	TCAM_OVERRIDE_L2_FLOW_LOOKUP	0x02
-#define	TCAM_OVERRIDE_L2_USE_OFFSET	0x03
-
-/*
- * FCRAM (Hashing):
- *	1. IPv4 exact match
- *	2. IPv6 exact match
- *	3. IPv4 Optimistic match
- *	4. IPv6 Optimistic match
- *
- */
-#define	FCRAM_IPV4_EXT_MATCH	0x00
-#define	FCRAM_IPV6_EXT_MATCH	0x01
-#define	FCRAM_IPV4_OPTI_MATCH	0x02
-#define	FCRAM_IPV6_OPTI_MATCH	0x03
-
-
-#define	NXGE_HASH_MAX_ENTRY	256
-
-
-#define	MAC_ADDR_LENGTH		6
-
-/* convert values */
-#define	NXGE_BASE(x, y)		(((y) << (x ## _SHIFT)) & (x ## _MASK))
-#define	NXGE_VAL(x, y)		(((y) & (x ## _MASK)) >> (x ## _SHIFT))
-
-/*
- * Locate the DMA channel start offset (PIO_VADDR)
- * (DMA virtual address space of the PIO block)
- */
-#define	TDMC_PIOVADDR_OFFSET(channel)	(2 * DMA_CSR_SIZE * channel)
-#define	RDMC_PIOVADDR_OFFSET(channel)	(TDMC_OFFSET(channel) + DMA_CSR_SIZE)
-
-/*
- * PIO access using the DMC block directly (DMC)
- */
-#define	DMC_OFFSET(channel)	(DMA_CSR_SIZE * channel)
-#define	TDMC_OFFSET(channel)	(TX_RNG_CFIG + DMA_CSR_SIZE * channel)
-
-/*
- * Number of logical pages.
- */
-#define	NXGE_MAX_LOGICAL_PAGES		2
-
-#ifdef	SOLARIS
-#ifndef	i386
-#define	_BIT_FIELDS_BIG_ENDIAN		_BIT_FIELDS_HTOL
-#else
-#define	_BIT_FIELDS_LITTLE_ENDIAN	_BIT_FIELDS_LTOH
-#endif
-#else
-#define	_BIT_FIELDS_LITTLE_ENDIAN	_LITTLE_ENDIAN_BITFIELD
-#endif
-
-#ifdef COSIM
-#define	MAX_PIO_RETRIES		3200
-#else
-#define	MAX_PIO_RETRIES		32
-#endif
-
-#define	IS_PORT_NUM_VALID(portn)\
-	(portn < 4)
-
-/*
- * The following macros expect unsigned input values.
- */
-#define	TXDMA_CHANNEL_VALID(cn)		(cn < NXGE_MAX_TDCS)
-#define	TXDMA_PAGE_VALID(pn)		(pn < NXGE_MAX_LOGICAL_PAGES)
-#define	TXDMA_FUNC_VALID(fn)		(fn < MAX_PORTS_PER_NXGE)
-#define	FUNC_VALID(n)			(n < MAX_PORTS_PER_NXGE)
-
-/*
- * DMA channel binding definitions.
- */
-#define	VIR_PAGE_INDEX_MAX		8
-#define	VIR_SUB_REGIONS			2
-#define	VIR_DMA_BIND			1
-
-#define	SUBREGION_VALID(n)		(n < VIR_SUB_REGIONS)
-#define	VIR_PAGE_INDEX_VALID(n)		(n < VIR_PAGE_INDEX_MAX)
-#define	VRXDMA_CHANNEL_VALID(n)		(n < NXGE_MAX_RDCS)
-
-/*
- * Logical device definitions.
- */
-#define	NXGE_INT_MAX_LD		69
-#define	NXGE_INT_MAX_LDG	64
-
-#define	NXGE_RDMA_LD_START	 0
-#define	NXGE_TDMA_LD_START	32
-#define	NXGE_MIF_LD		63
-#define	NXGE_MAC_LD_PORT0	64
-#define	NXGE_MAC_LD_PORT1	65
-#define	NXGE_MAC_LD_PORT2	66
-#define	NXGE_MAC_LD_PORT3	67
-#define	NXGE_SYS_ERROR_LD	68
-
-#define	LDG_VALID(n)			(n < NXGE_INT_MAX_LDG)
-#define	LD_VALID(n)			(n < NXGE_INT_MAX_LD)
-#define	LD_RXDMA_LD_VALID(n)		(n < NXGE_MAX_RDCS)
-#define	LD_TXDMA_LD_VALID(n)		(n >= NXGE_MAX_RDCS && \
-					((n - NXGE_MAX_RDCS) < NXGE_MAX_TDCS)))
-#define	LD_MAC_VALID(n)			(IS_PORT_NUM_VALID(n))
-
-#define	LD_TIMER_MAX			0x3f
-#define	LD_INTTIMER_VALID(n)		(n <= LD_TIMER_MAX)
-
-/* System Interrupt Data */
-#define	SID_VECTOR_MAX			0x1f
-#define	SID_VECTOR_VALID(n)		(n <= SID_VECTOR_MAX)
-
-#define	NXGE_COMPILE_32
-
-#ifdef	__cplusplus
-}
-#endif
-
-#endif	/* _SYS_NXGE_NXGE_DEFS_H */
--- a/usr/src/uts/sun4v/sys/nxge/nxge_espc.h	Mon Mar 19 18:02:35 2007 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,236 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License (the "License").
- * You may not use this file except in compliance with the License.
- *
- * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
- * or http://www.opensolaris.org/os/licensing.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information: Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- */
-/*
- * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
- * Use is subject to license terms.
- */
-
-#ifndef	_SYS_NXGE_NXGE_ESPC_H
-#define	_SYS_NXGE_NXGE_ESPC_H
-
-#pragma ident	"%Z%%M%	%I%	%E% SMI"
-
-#ifdef	__cplusplus
-extern "C" {
-#endif
-
-#include <nxge_espc_hw.h>
-
-#define	ESPC_MAC_ADDR_0		ESPC_NCR_REGN(0)
-#define	ESPC_MAC_ADDR_1		ESPC_NCR_REGN(1)
-#define	ESPC_NUM_PORTS_MACS	ESPC_NCR_REGN(2)
-#define	ESPC_MOD_STR_LEN	ESPC_NCR_REGN(4)
-#define	ESPC_MOD_STR_1		ESPC_NCR_REGN(5)
-#define	ESPC_MOD_STR_2		ESPC_NCR_REGN(6)
-#define	ESPC_MOD_STR_3		ESPC_NCR_REGN(7)
-#define	ESPC_MOD_STR_4		ESPC_NCR_REGN(8)
-#define	ESPC_MOD_STR_5		ESPC_NCR_REGN(9)
-#define	ESPC_MOD_STR_6		ESPC_NCR_REGN(10)
-#define	ESPC_MOD_STR_7		ESPC_NCR_REGN(11)
-#define	ESPC_MOD_STR_8		ESPC_NCR_REGN(12)
-#define	ESPC_BD_MOD_STR_LEN	ESPC_NCR_REGN(13)
-#define	ESPC_BD_MOD_STR_1	ESPC_NCR_REGN(14)
-#define	ESPC_BD_MOD_STR_2	ESPC_NCR_REGN(15)
-#define	ESPC_BD_MOD_STR_3	ESPC_NCR_REGN(16)
-#define	ESPC_BD_MOD_STR_4	ESPC_NCR_REGN(17)
-#define	ESPC_PHY_TYPE		ESPC_NCR_REGN(18)
-#define	ESPC_MAX_FM_SZ		ESPC_NCR_REGN(19)
-#define	ESPC_INTR_NUM		ESPC_NCR_REGN(20)
-#define	ESPC_VER_IMGSZ		ESPC_NCR_REGN(21)
-#define	ESPC_CHKSUM		ESPC_NCR_REGN(22)
-
-#define	NUM_PORTS_MASK		0xff
-#define	NUM_MAC_ADDRS_MASK	0xff0000
-#define	NUM_MAC_ADDRS_SHIFT	16
-#define	MOD_STR_LEN_MASK	0xffff
-#define	BD_MOD_STR_LEN_MASK	0xffff
-#define	MAX_FM_SZ_MASK		0xffff
-#define	VER_NUM_MASK		0xffff
-#define	IMG_SZ_MASK		0xffff0000
-#define	IMG_SZ_SHIFT		16
-#define	CHKSUM_MASK		0xff
-
-/* 0 <= n < 8 */
-#define	ESPC_MOD_STR(n)		(ESPC_MOD_STR_1 + n*8)
-#define	MAX_MOD_STR_LEN		32
-
-/* 0 <= n < 4 */
-#define	ESPC_BD_MOD_STR(n)	(ESPC_BD_MOD_STR_1 + n*8)
-#define	MAX_BD_MOD_STR_LEN	16
-
-#define	ESC_PHY_10G_FIBER	0x0
-#define	ESC_PHY_10G_COPPER	0x1
-#define	ESC_PHY_1G_FIBER	0x2
-#define	ESC_PHY_1G_COPPER	0x3
-#define	ESC_PHY_NONE		0xf
-
-#define	ESC_IMG_CHKSUM_VAL	0xab
-
-typedef union _mac_addr_0_t {
-	uint64_t value;
-
-	struct {
-#if defined(_BIG_ENDIAN)
-		uint32_t msw;	/* Most significant word */
-		uint32_t lsw;	/* Least significant word */
-#elif defined(_LITTLE_ENDIAN)
-		uint32_t lsw;	/* Least significant word */
-		uint32_t msw;	/* Most significant word */
-#endif
-	} val;
-	struct {
-#if defined(_BIG_ENDIAN)
-		uint32_t	w1;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-		uint32_t byte3		: 8;
-		uint32_t byte2		: 8;
-		uint32_t byte1		: 8;
-		uint32_t byte0		: 8;
-#elif defined(_BIT_FIELDS_LTOH)
-		uint32_t byte0		: 8;
-		uint32_t byte1		: 8;
-		uint32_t byte2		: 8;
-		uint32_t byte3		: 8;
-#endif
-		} w0;
-
-#if defined(_LITTLE_ENDIAN)
-		uint32_t	w1;
-#endif
-	} bits;
-} mac_addr_0_t;
-
-typedef union _mac_addr_1_t {
-	uint64_t value;
-
-	struct {
-#if defined(_BIG_ENDIAN)
-		uint32_t msw;	/* Most significant word */
-		uint32_t lsw;	/* Least significant word */
-#elif defined(_LITTLE_ENDIAN)
-		uint32_t lsw;	/* Least significant word */
-		uint32_t msw;	/* Most significant word */
-#endif
-	} val;
-	struct {
-#if defined(_BIG_ENDIAN)
-		uint32_t	w1;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-		uint32_t res		: 16;
-		uint32_t byte5		: 8;
-		uint32_t byte4		: 8;
-#elif defined(_BIT_FIELDS_LTOH)
-		uint32_t byte4		: 8;
-		uint32_t byte5		: 8;
-		uint32_t res		: 16;
-#endif
-		} w0;
-
-#if defined(_LITTLE_ENDIAN)
-		uint32_t	w1;
-#endif
-	} bits;
-} mac_addr_1_t;
-
-
-typedef union _phy_type_t {
-	uint64_t value;
-
-	struct {
-#if defined(_BIG_ENDIAN)
-		uint32_t msw;	/* Most significant word */
-		uint32_t lsw;	/* Least significant word */
-#elif defined(_LITTLE_ENDIAN)
-		uint32_t lsw;	/* Least significant word */
-		uint32_t msw;	/* Most significant word */
-#endif
-	} val;
-	struct {
-#if defined(_BIG_ENDIAN)
-		uint32_t	w1;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-		uint32_t pt0_phy_type	: 8;
-		uint32_t pt1_phy_type	: 8;
-		uint32_t pt2_phy_type	: 8;
-		uint32_t pt3_phy_type	: 8;
-#elif defined(_BIT_FIELDS_LTOH)
-		uint32_t pt3_phy_type	: 8;
-		uint32_t pt2_phy_type	: 8;
-		uint32_t pt1_phy_type	: 8;
-		uint32_t pt0_phy_type	: 8;
-#endif
-		} w0;
-
-#if defined(_LITTLE_ENDIAN)
-		uint32_t	w1;
-#endif
-	} bits;
-} phy_type_t;
-
-
-typedef union _intr_num_t {
-	uint64_t value;
-
-	struct {
-#if defined(_BIG_ENDIAN)
-		uint32_t msw;	/* Most significant word */
-		uint32_t lsw;	/* Least significant word */
-#elif defined(_LITTLE_ENDIAN)
-		uint32_t lsw;	/* Least significant word */
-		uint32_t msw;	/* Most significant word */
-#endif
-	} val;
-	struct {
-#if defined(_BIG_ENDIAN)
-		uint32_t	w1;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-		uint32_t pt0_intr_num	: 8;
-		uint32_t pt1_intr_num	: 8;
-		uint32_t pt2_intr_num	: 8;
-		uint32_t pt3_intr_num	: 8;
-#elif defined(_BIT_FIELDS_LTOH)
-		uint32_t pt3_intr_num	: 8;
-		uint32_t pt2_intr_num	: 8;
-		uint32_t pt1_intr_num	: 8;
-		uint32_t pt0_intr_num	: 8;
-#endif
-		} w0;
-
-#if defined(_LITTLE_ENDIAN)
-		uint32_t	w1;
-#endif
-	} bits;
-} intr_num_t;
-
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif	/* _SYS_NXGE_NXGE_ESPC_H */
--- a/usr/src/uts/sun4v/sys/nxge/nxge_espc_hw.h	Mon Mar 19 18:02:35 2007 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,64 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License (the "License").
- * You may not use this file except in compliance with the License.
- *
- * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
- * or http://www.opensolaris.org/os/licensing.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information: Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- */
-/*
- * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
- * Use is subject to license terms.
- */
-
-#ifndef	_SYS_NXGE_NXGE_ESPC_HW_H
-#define	_SYS_NXGE_NXGE_ESPC_HW_H
-
-#pragma ident	"%Z%%M%	%I%	%E% SMI"
-
-#ifdef	__cplusplus
-extern "C" {
-#endif
-
-#include <nxge_defs.h>
-
-/* EPC / SPC Registers offsets */
-#define	ESPC_PIO_EN_REG		0x040000
-#define	ESPC_PIO_EN_MASK	0x0000000000000001ULL
-#define	ESPC_PIO_STATUS_REG	0x040008
-
-/* EPC Status Register */
-#define	EPC_READ_INITIATE	(1ULL << 31)
-#define	EPC_READ_COMPLETE	(1 << 30)
-#define	EPC_WRITE_INITIATE	(1 << 29)
-#define	EPC_WRITE_COMPLETE	(1 << 28)
-#define	EPC_EEPROM_ADDR_BITS	0x3FFFF
-#define	EPC_EEPROM_ADDR_SHIFT	8
-#define	EPC_EEPROM_ADDR_MASK	(EPC_EEPROM_ADDR_BITS << EPC_EEPROM_ADDR_SHIFT)
-#define	EPC_EEPROM_DATA_MASK	0xFF
-
-#define	EPC_RW_WAIT		10	/* TBD */
-
-#define	ESPC_NCR_REG		0x040020   /* Count 128, step 8 */
-#define	ESPC_REG_ADDR(reg)	(FZC_PROM + (reg))
-
-#define	ESPC_NCR_REGN(n)	((ESPC_REG_ADDR(ESPC_NCR_REG)) + n*8)
-#define	ESPC_NCR_VAL_MASK	0x00000000FFFFFFFFULL
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif	/* _SYS_NXGE_NXGE_ESPC_HW_H */
--- a/usr/src/uts/sun4v/sys/nxge/nxge_fflp.h	Mon Mar 19 18:02:35 2007 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,233 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License (the "License").
- * You may not use this file except in compliance with the License.
- *
- * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
- * or http://www.opensolaris.org/os/licensing.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information: Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- */
-/*
- * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
- * Use is subject to license terms.
- */
-
-#ifndef	_SYS_NXGE_NXGE_FFLP_H
-#define	_SYS_NXGE_NXGE_FFLP_H
-
-#pragma ident	"%Z%%M%	%I%	%E% SMI"
-
-#ifdef	__cplusplus
-extern "C" {
-#endif
-
-#include <npi_fflp.h>
-
-#define	MAX_PARTITION 8
-
-typedef	struct _fflp_errlog {
-	uint32_t		vlan;
-	uint32_t		tcam;
-	uint32_t		hash_pio[MAX_PARTITION];
-	uint32_t		hash_lookup1;
-	uint32_t		hash_lookup2;
-} fflp_errlog_t, *p_fflp_errlog_t;
-
-typedef struct _fflp_stats {
-	uint32_t 		tcam_entries;
-	uint32_t 		fcram_entries;
-	uint32_t 		tcam_parity_err;
-	uint32_t 		tcam_ecc_err;
-	uint32_t 		vlan_parity_err;
-	uint32_t 		hash_lookup_err;
-	uint32_t 		hash_pio_err[MAX_PARTITION];
-	fflp_errlog_t		errlog;
-} nxge_fflp_stats_t, *p_nxge_fflp_stats_t;
-
-/*
- * The FCRAM (hash table) cosnists of 1 meg cells
- * each 64 byte wide. Each cell can hold either of:
- * 2 IPV4 Exact match entry (each 32 bytes)
- * 1 IPV6 Exact match entry (each 56 bytes) and
- *    1 Optimistic match entry (each 8 bytes)
- * 8 Optimistic match entries (each 8 bytes)
- * In the case IPV4 Exact match, half of the cell
- * (the first or the second 32 bytes) could be used
- * to hold 4 Optimistic matches
- */
-
-#define	FCRAM_CELL_EMPTY	0x00
-#define	FCRAM_CELL_IPV4_IPV4	0x01
-#define	FCRAM_CELL_IPV4_OPT	0x02
-#define	FCRAM_CELL_OPT_IPV4	0x04
-#define	FCRAM_CELL_IPV6_OPT	0x08
-#define	FCRAM_CELL_OPT_OPT	0x10
-
-
-#define	FCRAM_SUBAREA0_OCCUPIED	0x01
-#define	FCRAM_SUBAREA1_OCCUPIED	0x02
-#define	FCRAM_SUBAREA2_OCCUPIED	0x04
-#define	FCRAM_SUBAREA3_OCCUPIED	0x08
-
-#define	FCRAM_SUBAREA4_OCCUPIED	0x10
-#define	FCRAM_SUBAREA5_OCCUPIED	0x20
-#define	FCRAM_SUBAREA6_OCCUPIED	0x40
-#define	FCRAM_SUBAREA7_OCCUPIED	0x20
-
-#define	FCRAM_IPV4_SUBAREA0_OCCUPIED \
-	(FCRAM_SUBAREA0_OCCUPIED | FCRAM_SUBAREA1_OCCUPIED | \
-	FCRAM_SUBAREA2_OCCUPIED | FCRAM_SUBAREA3_OCCUPIED)
-
-#define	FCRAM_IPV4_SUBAREA4_OCCUPIED \
-	(FCRAM_SUBAREA4_OCCUPIED | FCRAM_SUBAREA5_OCCUPIED | \
-	FCRAM_SUBAREA6_OCCUPIED | FCRAM_SUBAREA7_OCCUPIED)
-
-
-#define	FCRAM_IPV6_SUBAREA0_OCCUPIED \
-	(FCRAM_SUBAREA0_OCCUPIED | FCRAM_SUBAREA1_OCCUPIED | \
-	FCRAM_SUBAREA2_OCCUPIED | FCRAM_SUBAREA3_OCCUPIED | \
-	FCRAM_SUBAREA4_OCCUPIED | FCRAM_SUBAREA5_OCCUPIED | \
-	FCRAM_SUBAREA6_OCCUPIED)
-
-	/*
-	 * The current occupancy state of each FCRAM cell isy
-	 * described by the fcram_cell_t data structure.
-	 * The "type" field denotes the type of entry (or combination)
-	 * the cell holds (FCRAM_CELL_EMPTY ...... FCRAM_CELL_OPT_OPT)
-	 * The "occupied" field indicates if individual 8 bytes (subareas)
-	 * with in the cell are occupied
-	 */
-
-typedef struct _fcram_cell {
-	uint32_t 		type:8;
-	uint32_t 		occupied:8;
-	uint32_t 		shadow_loc:16;
-} fcram_cell_t, *p_fcram_cell_t;
-
-typedef struct _fcram_parition {
-	uint8_t 		id;
-	uint8_t 		base;
-	uint8_t 		mask;
-	uint8_t 		reloc;
-	uint32_t 		flags;
-#define	HASH_PARTITION_ENABLED 1
-	uint32_t 		offset;
-	uint32_t 		size;
-} fcram_parition_t, *p_fcram_partition_t;
-
-
-typedef struct _tcam_flow_spec {
-	tcam_entry_t tce;
-	uint64_t flags;
-	uint64_t user_info;
-} tcam_flow_spec_t, *p_tcam_flow_spec_t;
-
-
-/*
- * Used for configuration.
- * ndd as well nxge.conf use the following definitions
- */
-
-#define	NXGE_CLASS_CONFIG_PARAMS	20
-/* Used for ip class flow key and tcam key config */
-
-#define	NXGE_CLASS_TCAM_LOOKUP		0x0001
-#define	NXGE_CLASS_TCAM_USE_SRC_ADDR	0x0002
-#define	NXGE_CLASS_FLOW_USE_PORTNUM	0x0010
-#define	NXGE_CLASS_FLOW_USE_L2DA	0x0020
-#define	NXGE_CLASS_FLOW_USE_VLAN	0x0040
-#define	NXGE_CLASS_FLOW_USE_PROTO	0x0080
-#define	NXGE_CLASS_FLOW_USE_IPSRC	0x0100
-#define	NXGE_CLASS_FLOW_USE_IPDST	0x0200
-#define	NXGE_CLASS_FLOW_USE_SRC_PORT	0x0400
-#define	NXGE_CLASS_FLOW_USE_DST_PORT	0x0800
-#define	NXGE_CLASS_DISCARD		0x80000000
-
-/* these are used for quick configs */
-#define	NXGE_CLASS_FLOW_WEB_SERVER	NXGE_CLASS_FLOW_USE_IPSRC | \
-					NXGE_CLASS_FLOW_USE_SRC_PORT
-
-#define	NXGE_CLASS_FLOW_GEN_SERVER	NXGE_CLASS_FLOW_USE_IPSRC | \
-					NXGE_CLASS_FLOW_USE_IPDST | \
-					NXGE_CLASS_FLOW_USE_SRC_PORT |	\
-					NXGE_CLASS_FLOW_USE_DST_PORT | \
-					NXGE_CLASS_FLOW_USE_PROTO | \
-					NXGE_CLASS_FLOW_USE_L2DA | \
-					NXGE_CLASS_FLOW_USE_VLAN
-
-/*
- * used for use classes
- */
-
-
-/* Ethernet Classes */
-#define	NXGE_CLASS_CFG_ETHER_TYPE_MASK		0x0000FFFF
-#define	NXGE_CLASS_CFG_ETHER_ENABLE_MASK	0x40000000
-
-/* IP Classes */
-#define	NXGE_CLASS_CFG_IP_TOS_MASK		0x000000FF
-#define	NXGE_CLASS_CFG_IP_TOS_SHIFT		0
-#define	NXGE_CLASS_CFG_IP_TOS_MASK_MASK		0x0000FF00
-#define	NXGE_CLASS_CFG_IP_TOS_MASK_SHIFT	8
-#define	NXGE_CLASS_CFG_IP_PROTO_MASK		0x00FFFF00
-#define	NXGE_CLASS_CFG_IP_PROTO_SHIFT		16
-
-#define	NXGE_CLASS_CFG_IP_IPV6_MASK		0x01000000
-#define	NXGE_CLASS_CFG_IP_PARAM_MASK	NXGE_CLASS_CFG_IP_TOS_MASK | \
-					NXGE_CLASS_CFG_IP_TOS_MASK_MASK | \
-					NXGE_CLASS_CFG_IP_PROTO_MASK | \
-					NXGE_CLASS_CFG_IP_IPV6_MASK
-
-#define	NXGE_CLASS_CFG_IP_ENABLE_MASK		0x40000000
-
-typedef struct _vlan_rdcgrp_map {
-	uint32_t		rsrvd:8;
-	uint32_t		vid:16;
-	uint32_t		rdc_grp:8;
-}	vlan_rdcgrp_map_t, *p_vlan_rdcgrp_map_t;
-
-#define	NXGE_INIT_VLAN_RDCG_TBL	32
-
-typedef struct _nxge_classify {
-	nxge_os_mutex_t 	tcam_lock;
-	nxge_os_mutex_t		fcram_lock;
-	nxge_os_mutex_t		hash_lock[MAX_PARTITION];
-	uint32_t 		tcam_size;
-	uint32_t 		state;
-#define	NXGE_FFLP_HW_RESET	0x1
-#define	NXGE_FFLP_HW_INIT	0x2
-#define	NXGE_FFLP_SW_INIT	0x4
-#define	NXGE_FFLP_FCRAM_PART	0x80000000
-	p_nxge_fflp_stats_t	fflp_stats;
-
-	tcam_flow_spec_t    *tcam_entries;
-	uint8_t		    tcam_location;
-#define	NXGE_FLOW_NO_SUPPORT  0x0
-#define	NXGE_FLOW_USE_TCAM    0x1
-#define	NXGE_FLOW_USE_FCRAM   0x2
-#define	NXGE_FLOW_USE_TCAM_FCRAM   0x3
-
-#define	NXGE_FLOW_COMPUTE_H1   0x10
-#define	NXGE_FLOW_COMPUTE_H2   0x20
-	uint8_t	fragment_bug;
-	uint8_t	fragment_bug_location;
-	fcram_cell_t		*hash_table; /* allocated for Neptune only */
-	fcram_parition_t    partition[MAX_PARTITION];
-} nxge_classify_t, *p_nxge_classify_t;
-
-#ifdef	__cplusplus
-}
-#endif
-
-#endif	/* _SYS_NXGE_NXGE_FFLP_H */
--- a/usr/src/uts/sun4v/sys/nxge/nxge_fflp_hash.h	Mon Mar 19 18:02:35 2007 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,58 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License (the "License").
- * You may not use this file except in compliance with the License.
- *
- * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
- * or http://www.opensolaris.org/os/licensing.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information: Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- */
-/*
- * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
- * Use is subject to license terms.
- */
-
-#ifndef _SYS_NXGE_NXGE_CRC_H
-#define	_SYS_NXGE_NXGE_CRC_H
-
-#pragma ident	"%Z%%M%	%I%	%E% SMI"
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-void nxge_crc32c_init(void);
-uint32_t nxge_crc32c(uint32_t, const uint8_t *, int);
-
-void nxge_crc_ccitt_init(void);
-uint16_t nxge_crc_ccitt(uint16_t, const uint8_t *, int);
-
-uint32_t nxge_compute_h1_table1(uint32_t, uint32_t *, uint32_t);
-uint32_t nxge_compute_h1_table4(uint32_t, uint32_t *, uint32_t);
-uint32_t nxge_compute_h1_serial(uint32_t crcin, uint32_t *, uint32_t);
-
-#define	nxge_compute_h2(cin, flow, len)			\
-	nxge_crc_ccitt(cin, flow, len)
-
-void nxge_init_h1_table(void);
-
-#define	nxge_compute_h1(cin, flow, len)			\
-	nxge_compute_h1_table4(cin, flow, len)
-
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _SYS_NXGE_NXGE_CRC_H */
--- a/usr/src/uts/sun4v/sys/nxge/nxge_fflp_hw.h	Mon Mar 19 18:02:35 2007 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,1664 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License (the "License").
- * You may not use this file except in compliance with the License.
- *
- * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
- * or http://www.opensolaris.org/os/licensing.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information: Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- */
-/*
- * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
- * Use is subject to license terms.
- */
-
-#ifndef	_SYS_NXGE_NXGE_FFLP_HW_H
-#define	_SYS_NXGE_NXGE_FFLP_HW_H
-
-#pragma ident	"%Z%%M%	%I%	%E% SMI"
-
-#ifdef	__cplusplus
-extern "C" {
-#endif
-
-#include <nxge_defs.h>
-
-
-/* FZC_FFLP Offsets */
-#define	    FFLP_ENET_VLAN_TBL_REG	(FZC_FFLP + 0x00000)
-
-	/* defines for FFLP_ENET_VLAN_TBL */
-
-#define	ENET_VLAN_TBL_VLANRDCTBLN0_MASK 	0x0000000000000003ULL
-#define	ENET_VLAN_TBL_VLANRDCTBLN0_SHIFT 	0
-#define	ENET_VLAN_TBL_VPR0_MASK			0x00000000000000008ULL
-#define	ENET_VLAN_TBL_VPR0_SHIFT		3
-
-#define	ENET_VLAN_TBL_VLANRDCTBLN1_MASK 	0x0000000000000030ULL
-#define	ENET_VLAN_TBL_VLANRDCTBLN1_SHIFT	4
-#define	ENET_VLAN_TBL_VPR1_MASK			0x00000000000000080ULL
-#define	ENET_VLAN_TBL_VPR1_SHIFT		7
-
-#define	ENET_VLAN_TBL_VLANRDCTBLN2_MASK 	0x0000000000000300ULL
-#define	ENET_VLAN_TBL_VLANRDCTBLN2_SHIFT 	8
-#define	ENET_VLAN_TBL_VPR2_MASK			0x00000000000000800ULL
-#define	ENET_VLAN_TBL_VPR2_SHIFT		11
-
-#define	ENET_VLAN_TBL_VLANRDCTBLN3_MASK 	0x0000000000003000ULL
-#define	ENET_VLAN_TBL_VLANRDCTBLN3_SHIFT 	12
-#define	ENET_VLAN_TBL_VPR3_MASK			0x0000000000008000ULL
-#define	ENET_VLAN_TBL_VPR3_SHIFT		15
-
-#define	ENET_VLAN_TBL_PARITY0_MASK		0x0000000000010000ULL
-#define	ENET_VLAN_TBL_PARITY0_SHIFT		16
-#define	ENET_VLAN_TBL_PARITY1_MASK		0x0000000000020000ULL
-#define	ENET_VLAN_TBL_PARITY1_SHIFT		17
-
-
-
-typedef union _fflp_enet_vlan_tbl_t {
-    uint64_t value;
-    struct {
-#if	defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-		struct {
-#ifdef _BIT_FIELDS_HTOL
-			uint32_t rsrvd:14;
-			uint32_t parity1:1;
-			uint32_t parity0:1;
-			uint32_t vpr3:1;
-			uint32_t vlanrdctbln3:3;
-			uint32_t vpr2:1;
-			uint32_t vlanrdctbln2:3;
-			uint32_t vpr1:1;
-			uint32_t vlanrdctbln1:3;
-			uint32_t vpr0:1;
-			uint32_t vlanrdctbln0:3;
-#else
-			uint32_t vlanrdctbln0:3;
-			uint32_t vpr0:1;
-			uint32_t vlanrdctbln1:3;
-			uint32_t vpr1:1;
-			uint32_t vlanrdctbln2:3;
-			uint32_t vpr2:1;
-			uint32_t vlanrdctbln3:3;
-			uint32_t vpr3:1;
-			uint32_t parity0:1;
-			uint32_t parity1:1;
-			uint32_t rsrvd:14;
-#endif
-		} ldw;
-#ifndef _BIG_ENDIAN
-		uint32_t hdw;
-#endif
-	} bits;
-} fflp_enet_vlan_tbl_t, *p_fflp_enet_vlan_tbl_t;
-
-
-#define	FFLP_TCAM_CLS_BASE_OFFSET (FZC_FFLP + 0x20000)
-#define	FFLP_L2_CLS_ENET1_REG	  (FZC_FFLP + 0x20000)
-#define	FFLP_L2_CLS_ENET2_REG	  (FZC_FFLP + 0x20008)
-
-
-
-typedef union _tcam_class_prg_ether_t {
-#define	TCAM_ENET_USR_CLASS_ENABLE   0x1
-#define	TCAM_ENET_USR_CLASS_DISABLE  0x0
-
-    uint64_t value;
-    struct {
-#ifdef	_BIG_ENDIAN
-		uint32_t hdw;
-#endif
-		struct {
-#ifdef _BIT_FIELDS_HTOL
-			uint32_t rsrvd:15;
-			uint32_t valid:1;
-			uint32_t etype:16;
-#else
-			uint32_t etype:16;
-			uint32_t valid:1;
-			uint32_t rsrvd:15;
-#endif
-		} ldw;
-#ifndef _BIG_ENDIAN
-		uint32_t hdw;
-#endif
-	} bits;
-} tcam_class_prg_ether_t, *p_tcam_class_prg_ether_t;
-
-
-#define		FFLP_L3_CLS_IP_U4_REG	(FZC_FFLP + 0x20010)
-#define		FFLP_L3_CLS_IP_U5_REG	(FZC_FFLP + 0x20018)
-#define		FFLP_L3_CLS_IP_U6_REG	(FZC_FFLP + 0x20020)
-#define		FFLP_L3_CLS_IP_U7_REG	(FZC_FFLP + 0x20028)
-
-typedef union _tcam_class_prg_ip_t {
-#define	TCAM_IP_USR_CLASS_ENABLE   0x1
-#define	TCAM_IP_USR_CLASS_DISABLE  0x0
-
-    uint64_t value;
-    struct {
-#if	defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-		struct {
-#ifdef _BIT_FIELDS_HTOL
-			uint32_t rsrvd:6;
-			uint32_t valid:1;
-			uint32_t ipver:1;
-			uint32_t pid:8;
-			uint32_t tosmask:8;
-			uint32_t tos:8;
-#else
-			uint32_t tos:8;
-			uint32_t tosmask:8;
-			uint32_t pid:8;
-			uint32_t ipver:1;
-			uint32_t valid:1;
-			uint32_t rsrvd:6;
-#endif
-		} ldw;
-#ifndef _BIG_ENDIAN
-		uint32_t hdw;
-#endif
-	} bits;
-} tcam_class_prg_ip_t, *p_tcam_class_prg_ip_t;
-/* define the classes which use the above structure */
-
-typedef enum fflp_tcam_class {
-    TCAM_CLASS_INVALID = 0,
-    TCAM_CLASS_DUMMY = 1,
-    TCAM_CLASS_ETYPE_1 = 2,
-    TCAM_CLASS_ETYPE_2,
-    TCAM_CLASS_IP_USER_4,
-    TCAM_CLASS_IP_USER_5,
-    TCAM_CLASS_IP_USER_6,
-    TCAM_CLASS_IP_USER_7,
-    TCAM_CLASS_TCP_IPV4,
-    TCAM_CLASS_UDP_IPV4,
-    TCAM_CLASS_AH_ESP_IPV4,
-    TCAM_CLASS_SCTP_IPV4,
-    TCAM_CLASS_TCP_IPV6,
-    TCAM_CLASS_UDP_IPV6,
-    TCAM_CLASS_AH_ESP_IPV6,
-    TCAM_CLASS_SCTP_IPV6,
-    TCAM_CLASS_ARP,
-    TCAM_CLASS_RARP,
-    TCAM_CLASS_DUMMY_12,
-    TCAM_CLASS_DUMMY_13,
-    TCAM_CLASS_DUMMY_14,
-    TCAM_CLASS_DUMMY_15,
-    TCAM_CLASS_MAX
-} tcam_class_t;
-
-
-
-/*
- * Specify how to build TCAM key for L3
- * IP Classes. Both User configured and
- * hardwired IP services are included.
- * These are the supported 12 classes.
- */
-
-#define		FFLP_TCAM_KEY_BASE_OFFSET	(FZC_FFLP + 0x20030)
-#define		FFLP_TCAM_KEY_IP_USR4_REG		(FZC_FFLP + 0x20030)
-#define		FFLP_TCAM_KEY_IP_USR5_REG		(FZC_FFLP + 0x20038)
-#define		FFLP_TCAM_KEY_IP_USR6_REG		(FZC_FFLP + 0x20040)
-#define		FFLP_TCAM_KEY_IP_USR7_REG		(FZC_FFLP + 0x20048)
-#define		FFLP_TCAM_KEY_IP4_TCP_REG		(FZC_FFLP + 0x20050)
-#define		FFLP_TCAM_KEY_IP4_UDP_REG		(FZC_FFLP + 0x20058)
-#define		FFLP_TCAM_KEY_IP4_AH_ESP_REG	(FZC_FFLP + 0x20060)
-#define		FFLP_TCAM_KEY_IP4_SCTP_REG		(FZC_FFLP + 0x20068)
-#define		FFLP_TCAM_KEY_IP6_TCP_REG		(FZC_FFLP + 0x20070)
-#define		FFLP_TCAM_KEY_IP6_UDP_REG		(FZC_FFLP + 0x20078)
-#define		FFLP_TCAM_KEY_IP6_AH_ESP_REG	(FZC_FFLP + 0x20080)
-#define		FFLP_TCAM_KEY_IP6_SCTP_REG		(FZC_FFLP + 0x20088)
-
-
-typedef union _tcam_class_key_ip_t {
-    uint64_t value;
-    struct {
-#if	defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-		struct {
-#ifdef _BIT_FIELDS_HTOL
-			uint32_t rsrvd2:28;
-			uint32_t discard:1;
-			uint32_t tsel:1;
-			uint32_t rsrvd:1;
-			uint32_t ipaddr:1;
-#else
-			uint32_t ipaddr:1;
-			uint32_t rsrvd:1;
-			uint32_t tsel:1;
-			uint32_t discard:1;
-			uint32_t rsrvd2:28;
-#endif
-		} ldw;
-#ifndef _BIG_ENDIAN
-		uint32_t hdw;
-#endif
-	} bits;
-} tcam_class_key_ip_t, *p_tcam_class_key_ip_t;
-
-
-
-#define	FFLP_TCAM_KEY_0_REG			(FZC_FFLP + 0x20090)
-#define	FFLP_TCAM_KEY_1_REG		(FZC_FFLP + 0x20098)
-#define	FFLP_TCAM_KEY_2_REG		(FZC_FFLP + 0x200A0)
-#define	FFLP_TCAM_KEY_3_REG	(FZC_FFLP + 0x200A8)
-#define	FFLP_TCAM_MASK_0_REG	(FZC_FFLP + 0x200B0)
-#define	FFLP_TCAM_MASK_1_REG	(FZC_FFLP + 0x200B8)
-#define	FFLP_TCAM_MASK_2_REG	(FZC_FFLP + 0x200C0)
-#define	FFLP_TCAM_MASK_3_REG	(FZC_FFLP + 0x200C8)
-
-#define		FFLP_TCAM_CTL_REG		(FZC_FFLP + 0x200D0)
-
-/* bit defines for FFLP_TCAM_CTL register */
-#define	   TCAM_CTL_TCAM_WR		  0x0ULL
-#define	   TCAM_CTL_TCAM_RD		  0x040000ULL
-#define	   TCAM_CTL_TCAM_CMP		  0x080000ULL
-#define	   TCAM_CTL_RAM_WR		  0x100000ULL
-#define	   TCAM_CTL_RAM_RD		  0x140000ULL
-#define	   TCAM_CTL_RWC_STAT		  0x0020000ULL
-#define	   TCAM_CTL_RWC_MATCH		  0x0010000ULL
-
-
-typedef union _tcam_ctl_t {
-#define	TCAM_CTL_RWC_TCAM_WR	0x0
-#define	TCAM_CTL_RWC_TCAM_RD	0x1
-#define	TCAM_CTL_RWC_TCAM_CMP	0x2
-#define	TCAM_CTL_RWC_RAM_WR	0x4
-#define	TCAM_CTL_RWC_RAM_RD	0x5
-#define	TCAM_CTL_RWC_RWC_STAT	0x1
-#define	TCAM_CTL_RWC_RWC_MATCH	0x1
-
-	uint64_t value;
-	struct {
-#if	defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-		struct {
-#ifdef _BIT_FIELDS_HTOL
-			uint32_t rsrvd2:11;
-			uint32_t rwc:3;
-			uint32_t stat:1;
-			uint32_t match:1;
-			uint32_t rsrvd:6;
-			uint32_t location:10;
-#else
-			uint32_t location:10;
-			uint32_t rsrvd:6;
-			uint32_t match:1;
-			uint32_t stat:1;
-			uint32_t rwc:3;
-			uint32_t rsrvd2:11;
-#endif
-		} ldw;
-#ifndef _BIG_ENDIAN
-		uint32_t hdw;
-#endif
-	} bits;
-} tcam_ctl_t, *p_tcam_ctl_t;
-
-
-
-/* Bit defines for TCAM ASC RAM */
-
-
-typedef union _tcam_res_t {
-	uint64_t value;
-	struct {
-#if	defined(_BIG_ENDIAN)
-		struct {
-			uint32_t rsrvd:22;
-			uint32_t syndrome:10;
-		} hdw;
-#endif
-		struct {
-#ifdef _BIT_FIELDS_HTOL
-			uint32_t syndrome:6;
-			uint32_t zfid:12;
-			uint32_t v4_ecc_ck:1;
-			uint32_t disc:1;
-			uint32_t tres:2;
-			uint32_t rdctbl:3;
-			uint32_t offset:5;
-			uint32_t zfld:1;
-			uint32_t age:1;
-#else
-			uint32_t age:1;
-			uint32_t zfld:1;
-			uint32_t offset:5;
-			uint32_t rdctbl:3;
-			uint32_t tres:2;
-			uint32_t disc:1;
-			uint32_t v4_ecc_ck:1;
-			uint32_t zfid:12;
-			uint32_t syndrome:6;
-#endif
-		} ldw;
-#ifndef _BIG_ENDIAN
-		struct {
-			uint32_t syndrome:10;
-			uint32_t rsrvd:22;
-		} hdw;
-#endif
-	} bits;
-} tcam_res_t, *p_tcam_res_t;
-
-
-
-#define	TCAM_ASC_DATA_AGE		0x0000000000000001ULL
-#define	TCAM_ASC_DATA_AGE_SHIFT		0x0
-#define	TCAM_ASC_DATA_ZFVLD		0x0000000000000002ULL
-#define	TCAM_ASC_DATA_ZFVLD_SHIFT	1
-
-#define	TCAM_ASC_DATA_OFFSET_MASK	0x000000000000007CULL
-#define	TCAM_ASC_DATA_OFFSET_SHIFT	2
-
-#define	TCAM_ASC_DATA_RDCTBL_MASK	0x0000000000000038ULL
-#define	TCAM_ASC_DATA_RDCTBL_SHIFT	7
-#define	TCAM_ASC_DATA_TRES_MASK		0x0000000000000C00ULL
-#define	TRES_CONT_USE_L2RDC		0x00
-#define	TRES_TERM_USE_OFFSET		0x01
-#define	TRES_CONT_OVRD_L2RDC		0x02
-#define	TRES_TERM_OVRD_L2RDC		0x03
-
-#define	TCAM_ASC_DATA_TRES_SHIFT	10
-#define	TCAM_TRES_CONT_USE_L2RDC	\
-		(0x0000000000000000ULL << TCAM_ASC_DATA_TRES_SHIFT)
-#define	TCAM_TRES_TERM_USE_OFFSET	\
-		(0x0000000000000001ULL << TCAM_ASC_DATA_TRES_SHIFT)
-#define	TCAM_TRES_CONT_OVRD_L2RDC	\
-		(0x0000000000000002ULL << TCAM_ASC_DATA_TRES_SHIFT)
-#define	TCAM_TRES_TERM_OVRD_L2RDC	\
-		(0x0000000000000003ULL << TCAM_ASC_DATA_TRES_SHIFT)
-
-#define	TCAM_ASC_DATA_DISC_MASK		0x0000000000001000ULL
-#define	TCAM_ASC_DATA_DISC_SHIFT	12
-#define	TCAM_ASC_DATA_V4_ECC_OK_MASK    0x0000000000002000ULL
-#define	TCAM_ASC_DATA_V4_ECC_OK_SHIFT	13
-#define	TCAM_ASC_DATA_V4_ECC_OK		\
-		(0x0000000000000001ULL << TCAM_ASC_DATA_V4_ECC_OK_MASK_SHIFT)
-
-#define	TCAM_ASC_DATA_ZFID_MASK		0x0000000003FF3000ULL
-#define	TCAM_ASC_DATA_ZFID_SHIFT	14
-#define	TCAM_ASC_DATA_ZFID(value)	\
-		((value & TCAM_ASC_DATA_ZFID_MASK) >> TCAM_ASC_DATA_ZFID_SHIFT)
-
-#define	TCAM_ASC_DATA_SYNDR_MASK	0x000003FFF3000000ULL
-#define	TCAM_ASC_DATA_SYNDR_SHIFT	26
-#define	TCAM_ASC_DATA_SYNDR(value)  \
-	((value & TCAM_ASC_DATA_SYNDR_MASK) >> TCAM_ASC_DATA_SYNDR_SHIFT)
-
-
-	/* error registers */
-
-#define	FFLP_VLAN_PAR_ERR_REG		(FZC_FFLP + 0x08000)
-
-typedef union _vlan_par_err_t {
-    uint64_t value;
-    struct {
-#if	defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-		struct {
-#ifdef _BIT_FIELDS_HTOL
-			uint32_t err:1;
-			uint32_t m_err:1;
-			uint32_t addr:12;
-			uint32_t data:18;
-#else
-			uint32_t data:18;
-			uint32_t addr:12;
-			uint32_t m_err:1;
-			uint32_t err:1;
-#endif
-		} ldw;
-#ifndef _BIG_ENDIAN
-		uint32_t hdw;
-#endif
-	} bits;
-} vlan_par_err_t, *p_vlan_par_err_t;
-
-
-#define		FFLP_TCAM_ERR_REG		(FZC_FFLP + 0x200D8)
-
-typedef union _tcam_err_t {
-    uint64_t value;
-    struct {
-#if	defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-		struct {
-#ifdef _BIT_FIELDS_HTOL
-			uint32_t err:1;
-			uint32_t p_ecc:1;
-			uint32_t mult:1;
-			uint32_t rsrvd:5;
-			uint32_t addr:8;
-			uint32_t syndrome:16;
-#else
-			uint32_t syndrome:16;
-			uint32_t addr:8;
-			uint32_t rsrvd:5;
-			uint32_t mult:1;
-			uint32_t p_ecc:1;
-			uint32_t err:1;
-#endif
-		} ldw;
-#ifndef _BIG_ENDIAN
-		uint32_t hdw;
-#endif
-	} bits;
-} tcam_err_t, *p_tcam_err_t;
-
-
-#define		TCAM_ERR_SYNDROME_MASK		0x000000000000FFFFULL
-#define		TCAM_ERR_MULT_SHIFT		29
-#define		TCAM_ERR_MULT			0x0000000020000000ULL
-#define		TCAM_ERR_P_ECC			0x0000000040000000ULL
-#define		TCAM_ERR_ERR			0x0000000080000000ULL
-
-#define		HASH_LKUP_ERR_LOG1_REG		(FZC_FFLP + 0x200E0)
-#define		HASH_LKUP_ERR_LOG2_REG		(FZC_FFLP + 0x200E8)
-
-
-
-typedef union _hash_lookup_err_log1_t {
-    uint64_t value;
-    struct {
-#if	defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-		struct {
-#ifdef _BIT_FIELDS_HTOL
-			uint32_t rsrvd:28;
-			uint32_t ecc_err:1;
-			uint32_t mult_lk:1;
-			uint32_t cu:1;
-			uint32_t mult_bit:1;
-#else
-			uint32_t mult_bit:1;
-			uint32_t cu:1;
-			uint32_t mult_lk:1;
-			uint32_t ecc_err:1;
-			uint32_t rsrvd:28;
-#endif
-		} ldw;
-#ifndef _BIG_ENDIAN
-		uint32_t hdw;
-#endif
-	} bits;
-} hash_lookup_err_log1_t, *p_hash_lookup_err_log1_t;
-
-
-
-typedef union _hash_lookup_err_log2_t {
-    uint64_t value;
-    struct {
-#if	defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-		struct {
-#ifdef _BIT_FIELDS_HTOL
-			uint32_t rsrvd:1;
-			uint32_t h1:20;
-			uint32_t subarea:3;
-			uint32_t syndrome:8;
-#else
-			uint32_t syndrome:8;
-			uint32_t subarea:3;
-			uint32_t h1:20;
-			uint32_t rsrvd:1;
-#endif
-		} ldw;
-#ifndef _BIG_ENDIAN
-		uint32_t hdw;
-#endif
-	} bits;
-} hash_lookup_err_log2_t, *p_hash_lookup_err_log2_t;
-
-
-
-#define		FFLP_FCRAM_ERR_TST0_REG	(FZC_FFLP + 0x20128)
-
-typedef union _fcram_err_tst0_t {
-    uint64_t value;
-    struct {
-#if	defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-		struct {
-#ifdef _BIT_FIELDS_HTOL
-			uint32_t rsrvd:24;
-			uint32_t syndrome_mask:8;
-#else
-			uint32_t syndrome_mask:10;
-			uint32_t rsrvd:24;
-#endif
-		} ldw;
-#ifndef _BIG_ENDIAN
-		uint32_t hdw;
-#endif
-	} bits;
-} fcram_err_tst0_t, *p_fcram_err_tst0_t;
-
-
-#define		FFLP_FCRAM_ERR_TST1_REG	(FZC_FFLP + 0x20130)
-#define		FFLP_FCRAM_ERR_TST2_REG	(FZC_FFLP + 0x20138)
-
-typedef union _fcram_err_tst_t {
-    uint64_t value;
-    struct {
-#if	defined(_BIG_ENDIAN)
-		struct {
-			uint32_t dat;
-		} hdw;
-#endif
-		struct {
-			uint32_t dat;
-		} ldw;
-#ifndef _BIG_ENDIAN
-		struct {
-			uint32_t dat;
-		} hdw;
-#endif
-	} bits;
-} fcram_err_tst1_t, *p_fcram_err_tst1_t,
-	fcram_err_tst2_t, *p_fcram_err_tst2_t,
-	fcram_err_data_t, *p_fcram_err_data_t;
-
-
-
-#define		FFLP_ERR_MSK_REG	(FZC_FFLP + 0x20140)
-
-typedef union _fflp_err_mask_t {
-    uint64_t value;
-    struct {
-#if	defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-		struct {
-#ifdef _BIT_FIELDS_HTOL
-			uint32_t rsrvd:21;
-			uint32_t hash_tbl_dat:8;
-			uint32_t hash_tbl_lkup:1;
-			uint32_t tcam:1;
-			uint32_t vlan:1;
-#else
-			uint32_t vlan:1;
-			uint32_t tcam:1;
-			uint32_t hash_tbl_lkup:1;
-			uint32_t hash_tbl_dat:8;
-			uint32_t rsrvd:21;
-#endif
-		} ldw;
-#ifndef _BIG_ENDIAN
-		uint32_t hdw;
-#endif
-	} bits;
-} fflp_err_mask_t, *p_fflp_err_mask_t;
-
-#define	FFLP_ERR_VLAN_MASK 0x00000001ULL
-#define	FFLP_ERR_VLAN 0x00000001ULL
-#define	FFLP_ERR_VLAN_SHIFT 0x0
-
-#define	FFLP_ERR_TCAM_MASK 0x00000002ULL
-#define	FFLP_ERR_TCAM 0x00000001ULL
-#define	FFLP_ERR_TCAM_SHIFT 0x1
-
-#define	FFLP_ERR_HASH_TBL_LKUP_MASK 0x00000004ULL
-#define	FFLP_ERR_HASH_TBL_LKUP 0x00000001ULL
-#define	FFLP_ERR_HASH_TBL_LKUP_SHIFT 0x2
-
-#define	FFLP_ERR_HASH_TBL_DAT_MASK 0x00000007F8ULL
-#define	FFLP_ERR_HASH_TBL_DAT 0x0000000FFULL
-#define	FFLP_ERR_HASH_TBL_DAT_SHIFT 0x3
-
-#define	FFLP_ERR_MASK_ALL (FFLP_ERR_VLAN_MASK | FFLP_ERR_TCAM_MASK | \
-			    FFLP_ERR_HASH_TBL_LKUP_MASK | \
-			    FFLP_ERR_HASH_TBL_DAT_MASK)
-
-
-#define		FFLP_CFG_1_REG	(FZC_FFLP + 0x20100)
-
-typedef union _fflp_cfg_1_t {
-    uint64_t value;
-    struct {
-#if	defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-		struct {
-#ifdef _BIT_FIELDS_HTOL
-			uint32_t rsrvd:5;
-			uint32_t tcam_disable:1;
-			uint32_t pio_dbg_sel:3;
-			uint32_t pio_fio_rst:1;
-			uint32_t pio_fio_lat:2;
-			uint32_t camlatency:4;
-			uint32_t camratio:4;
-			uint32_t fcramratio:4;
-			uint32_t fcramoutdr:4;
-			uint32_t fcramqs:1;
-			uint32_t errordis:1;
-			uint32_t fflpinitdone:1;
-			uint32_t llcsnap:1;
-#else
-			uint32_t llcsnap:1;
-			uint32_t fflpinitdone:1;
-			uint32_t errordis:1;
-			uint32_t fcramqs:1;
-			uint32_t fcramoutdr:4;
-			uint32_t fcramratio:4;
-			uint32_t camratio:4;
-			uint32_t camlatency:4;
-			uint32_t pio_fio_lat:2;
-			uint32_t pio_fio_rst:1;
-			uint32_t pio_dbg_sel:3;
-			uint32_t tcam_disable:1;
-			uint32_t rsrvd:5;
-#endif
-		} ldw;
-#ifndef _BIG_ENDIAN
-		uint32_t hdw;
-#endif
-	} bits;
-} fflp_cfg_1_t, *p_fflp_cfg_1_t;
-
-
-typedef	enum fflp_fcram_output_drive {
-    FCRAM_OUTDR_NORMAL	= 0x0,
-    FCRAM_OUTDR_STRONG	= 0x5,
-    FCRAM_OUTDR_WEAK	= 0xa
-} fflp_fcram_output_drive_t;
-
-
-typedef	enum fflp_fcram_qs {
-    FCRAM_QS_MODE_QS	= 0x0,
-    FCRAM_QS_MODE_FREE	= 0x1
-} fflp_fcram_qs_t;
-
-#define		FCRAM_PIO_HIGH_PRI	0xf
-#define		FCRAM_PIO_MED_PRI	0xa
-#define		FCRAM_LOOKUP_HIGH_PRI	0x0
-#define		FCRAM_LOOKUP_HIGH_PRI	0x0
-#define		FCRAM_IO_DEFAULT_PRI	FCRAM_PIO_MED_PRI
-
-#define		TCAM_PIO_HIGH_PRI	0xf
-#define		TCAM_PIO_MED_PRI	0xa
-#define		TCAM_LOOKUP_HIGH_PRI	0x0
-#define		TCAM_LOOKUP_HIGH_PRI	0x0
-#define		TCAM_IO_DEFAULT_PRI	TCAM_PIO_MED_PRI
-
-#define		TCAM_DEFAULT_LATENCY	0x4
-
-
-#define		FFLP_DBG_TRAIN_VCT_REG	(FZC_FFLP + 0x20148)
-
-typedef union _fflp_dbg_train_vct_t {
-    uint64_t value;
-    struct {
-#if	defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-		struct {
-#ifdef _BIT_FIELDS_HTOL
-			uint32_t vector;
-#else
-			uint32_t vector;
-#endif
-		} ldw;
-#ifndef _BIG_ENDIAN
-		uint32_t hdw;
-#endif
-	} bits;
-} fflp_dbg_train_vct_t, *p_fflp_dbg_train_vct_t;
-
-
-
-#define		FFLP_TCP_CFLAG_MSK_REG	(FZC_FFLP + 0x20108)
-
-typedef union _tcp_cflag_mask_t {
-    uint64_t value;
-    struct {
-#if	defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-		struct {
-#ifdef _BIT_FIELDS_HTOL
-			uint32_t rsrvd:20;
-			uint32_t mask:12;
-#else
-			uint32_t mask:12;
-			uint32_t rsrvd:20;
-#endif
-		} ldw;
-#ifndef _BIG_ENDIAN
-		uint32_t hdw;
-#endif
-	} bits;
-} tcp_cflag_mask_t, *p_tcp_cflag_mask_t;
-
-
-
-#define		FFLP_FCRAM_REF_TMR_REG		(FZC_FFLP + 0x20110)
-
-
-typedef union _fcram_ref_tmr_t {
-#define		FCRAM_REFRESH_DEFAULT_MAX_TIME	0x200
-#define		FCRAM_REFRESH_DEFAULT_MIN_TIME	0x200
-#define		FCRAM_REFRESH_DEFAULT_SYS_TIME	0x200
-#define		FCRAM_REFRESH_MAX_TICK		39 /* usecs */
-#define		FCRAM_REFRESH_MIN_TICK		400 /* nsecs */
-
-    uint64_t value;
-    struct {
-#if	defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-		struct {
-#ifdef _BIT_FIELDS_HTOL
-			uint32_t max:16;
-			uint32_t min:16;
-#else
-			uint32_t min:16;
-			uint32_t max:16;
-#endif
-		} ldw;
-#ifndef _BIG_ENDIAN
-		uint32_t hdw;
-#endif
-	} bits;
-} fcram_ref_tmr_t, *p_fcram_ref_tmr_t;
-
-
-
-
-#define		FFLP_FCRAM_FIO_ADDR_REG	(FZC_FFLP + 0x20118)
-
-typedef union _fcram_fio_addr_t {
-    uint64_t value;
-    struct {
-#if	defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-		struct {
-#ifdef _BIT_FIELDS_HTOL
-			uint32_t rsrvd:22;
-			uint32_t addr:10;
-#else
-			uint32_t addr:10;
-			uint32_t rsrvd:22;
-#endif
-		} ldw;
-#ifndef _BIG_ENDIAN
-		uint32_t hdw;
-#endif
-	} bits;
-} fcram_fio_addr_t, *p_fcram_fio_addr_t;
-
-
-#define		FFLP_FCRAM_FIO_DAT_REG	(FZC_FFLP + 0x20120)
-
-typedef union _fcram_fio_dat_t {
-    uint64_t value;
-    struct {
-#if	defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-		struct {
-#ifdef _BIT_FIELDS_HTOL
-			uint32_t rsrvd:22;
-			uint32_t addr:10;
-#else
-			uint32_t addr:10;
-			uint32_t rsrvd:22;
-#endif
-		} ldw;
-#ifndef _BIG_ENDIAN
-		uint32_t hdw;
-#endif
-	} bits;
-} fcram_fio_dat_t, *p_fcram_fio_dat_t;
-
-
-#define	FFLP_FCRAM_PHY_RD_LAT_REG	(FZC_FFLP + 0x20150)
-
-typedef union _fcram_phy_rd_lat_t {
-	uint64_t value;
-	struct {
-#if	defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-		struct {
-#ifdef _BIT_FIELDS_HTOL
-			uint32_t rsrvd:24;
-			uint32_t lat:8;
-#else
-			uint32_t lat:8;
-			uint32_t rsrvd:24;
-#endif
-		} ldw;
-#ifndef _BIG_ENDIAN
-		uint32_t hdw;
-#endif
-	} bits;
-} fcram_phy_rd_lat_t, *p_fcram_phy_rd_lat_t;
-
-
-/*
- * Specify how to build a flow key for IP
- * classes, both programmable and hardwired
- */
-#define		FFLP_FLOW_KEY_BASE_OFFSET		(FZC_FFLP + 0x40000)
-#define		FFLP_FLOW_KEY_IP_USR4_REG		(FZC_FFLP + 0x40000)
-#define		FFLP_FLOW_KEY_IP_USR5_REG		(FZC_FFLP + 0x40008)
-#define		FFLP_FLOW_KEY_IP_USR6_REG		(FZC_FFLP + 0x40010)
-#define		FFLP_FLOW_KEY_IP_USR7_REG		(FZC_FFLP + 0x40018)
-#define		FFLP_FLOW_KEY_IP4_TCP_REG		(FZC_FFLP + 0x40020)
-#define		FFLP_FLOW_KEY_IP4_UDP_REG		(FZC_FFLP + 0x40028)
-#define		FFLP_FLOW_KEY_IP4_AH_ESP_REG	(FZC_FFLP + 0x40030)
-#define		FFLP_FLOW_KEY_IP4_SCTP_REG		(FZC_FFLP + 0x40038)
-#define		FFLP_FLOW_KEY_IP6_TCP_REG		(FZC_FFLP + 0x40040)
-#define		FFLP_FLOW_KEY_IP6_UDP_REG		(FZC_FFLP + 0x40048)
-#define		FFLP_FLOW_KEY_IP6_AH_ESP_REG	(FZC_FFLP + 0x40050)
-#define		FFLP_FLOW_KEY_IP6_SCTP_REG		(FZC_FFLP + 0x40058)
-
-typedef union _flow_class_key_ip_t {
-    uint64_t value;
-    struct {
-#if	defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-		struct {
-#ifdef _BIT_FIELDS_HTOL
-			uint32_t rsrvd2:22;
-			uint32_t port:1;
-			uint32_t l2da:1;
-			uint32_t vlan:1;
-			uint32_t ipsa:1;
-			uint32_t ipda:1;
-			uint32_t proto:1;
-			uint32_t l4_0:2;
-			uint32_t l4_1:2;
-#else
-			uint32_t l4_1:2;
-			uint32_t l4_0:2;
-			uint32_t proto:1;
-			uint32_t ipda:1;
-			uint32_t ipsa:1;
-			uint32_t vlan:1;
-			uint32_t l2da:1;
-			uint32_t port:1;
-			uint32_t rsrvd2:22;
-#endif
-		} ldw;
-#ifndef _BIG_ENDIAN
-		uint32_t hdw;
-#endif
-	} bits;
-} flow_class_key_ip_t, *p_flow_class_key_ip_t;
-
-
-#define		FFLP_H1POLY_REG		(FZC_FFLP + 0x40060)
-
-
-typedef union _hash_h1poly_t {
-    uint64_t value;
-    struct {
-#if	defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-		struct {
-			uint32_t init_value;
-		} ldw;
-#ifndef _BIG_ENDIAN
-		uint32_t hdw;
-#endif
-	} bits;
-} hash_h1poly_t, *p_hash_h1poly_t;
-
-#define		FFLP_H2POLY_REG		(FZC_FFLP + 0x40068)
-
-typedef union _hash_h2poly_t {
-    uint64_t value;
-    struct {
-#if	defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-		struct {
-#ifdef _BIT_FIELDS_HTOL
-			uint32_t rsrvd:16;
-			uint32_t init_value:16;
-#else
-			uint32_t init_value:16;
-			uint32_t rsrvd:16;
-#endif
-		} ldw;
-#ifndef _BIG_ENDIAN
-		uint32_t hdw;
-#endif
-	} bits;
-} hash_h2poly_t, *p_hash_h2poly_t;
-
-#define		FFLP_FLW_PRT_SEL_REG		(FZC_FFLP + 0x40070)
-
-
-typedef union _flow_prt_sel_t {
-#define		FFLP_FCRAM_MAX_PARTITION	8
-    uint64_t value;
-    struct {
-#if	defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-		struct {
-#ifdef _BIT_FIELDS_HTOL
-			uint32_t rsrvd3:15;
-			uint32_t ext:1;
-			uint32_t rsrvd2:3;
-			uint32_t mask:5;
-			uint32_t rsrvd:3;
-			uint32_t base:5;
-#else
-			uint32_t base:5;
-			uint32_t rsrvd:3;
-			uint32_t mask:5;
-			uint32_t rsrvd2:3;
-			uint32_t ext:1;
-			uint32_t rsrvd3:15;
-#endif
-		} ldw;
-#ifndef _BIG_ENDIAN
-		uint32_t hdw;
-#endif
-	} bits;
-} flow_prt_sel_t, *p_flow_prt_sel_t;
-
-
-
-/* FFLP Offsets */
-
-
-#define		FFLP_HASH_TBL_ADDR_REG		(FFLP + 0x00000)
-
-typedef union _hash_tbl_addr_t {
-    uint64_t value;
-    struct {
-#if	defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-		struct {
-#ifdef _BIT_FIELDS_HTOL
-			uint32_t rsrvd:8;
-			uint32_t autoinc:1;
-			uint32_t addr:23;
-#else
-			uint32_t addr:23;
-			uint32_t autoinc:1;
-			uint32_t rsrvd:8;
-#endif
-		} ldw;
-#ifndef _BIG_ENDIAN
-		uint32_t hdw;
-#endif
-	} bits;
-} hash_tbl_addr_t, *p_hash_tbl_addr_t;
-
-
-#define		FFLP_HASH_TBL_DATA_REG		(FFLP + 0x00008)
-
-typedef union _hash_tbl_data_t {
-    uint64_t value;
-    struct {
-#ifdef	_BIG_ENDIAN
-		uint32_t hdw;
-		uint32_t ldw;
-#else
-		uint32_t ldw;
-		uint32_t hdw;
-#endif
-	} bits;
-} hash_tbl_data_t, *p_hash_tbl_data_t;
-
-
-#define		FFLP_HASH_TBL_DATA_LOG_REG		(FFLP + 0x00010)
-
-
-typedef union _hash_tbl_data_log_t {
-    uint64_t value;
-    struct {
-#if	defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-		struct {
-#ifdef _BIT_FIELDS_HTOL
-			uint32_t pio_err:1;
-			uint32_t fcram_addr:23;
-			uint32_t syndrome:8;
-#else
-			uint32_t syndrome:8;
-			uint32_t fcram_addr:23;
-			uint32_t pio_err:1;
-#endif
-		} ldw;
-#ifndef _BIG_ENDIAN
-		uint32_t hdw;
-#endif
-	} bits;
-} hash_tbl_data_log_t, *p_hash_tbl_data_log_t;
-
-
-
-#define	REG_PIO_WRITE64(handle, offset, value) \
-		NXGE_REG_WR64((handle), (offset), (value))
-#define	REG_PIO_READ64(handle, offset, val_p) \
-		NXGE_REG_RD64((handle), (offset), (val_p))
-
-
-#define	WRITE_TCAM_REG_CTL(handle, ctl) \
-		REG_PIO_WRITE64(handle, FFLP_TCAM_CTL_REG, ctl)
-
-#define	READ_TCAM_REG_CTL(handle, val_p) \
-		REG_PIO_READ64(handle, FFLP_TCAM_CTL_REG, val_p)
-
-
-#define	WRITE_TCAM_REG_KEY0(handle, key)	\
-		REG_PIO_WRITE64(handle,  FFLP_TCAM_KEY_0_REG, key)
-#define	WRITE_TCAM_REG_KEY1(handle, key) \
-		REG_PIO_WRITE64(handle,  FFLP_TCAM_KEY_1_REG, key)
-#define	WRITE_TCAM_REG_KEY2(handle, key) \
-		REG_PIO_WRITE64(handle,  FFLP_TCAM_KEY_2_REG, key)
-#define	WRITE_TCAM_REG_KEY3(handle, key) \
-		REG_PIO_WRITE64(handle,  FFLP_TCAM_KEY_3_REG, key)
-#define	WRITE_TCAM_REG_MASK0(handle, mask)   \
-		REG_PIO_WRITE64(handle,  FFLP_TCAM_MASK_0_REG, mask)
-#define	WRITE_TCAM_REG_MASK1(handle, mask)   \
-		REG_PIO_WRITE64(handle,  FFLP_TCAM_MASK_1_REG, mask)
-#define	WRITE_TCAM_REG_MASK2(handle, mask)   \
-		REG_PIO_WRITE64(handle,  FFLP_TCAM_MASK_2_REG, mask)
-#define	WRITE_TCAM_REG_MASK3(handle, mask)   \
-		REG_PIO_WRITE64(handle,  FFLP_TCAM_MASK_3_REG, mask)
-
-#define	READ_TCAM_REG_KEY0(handle, val_p)	\
-		REG_PIO_READ64(handle,  FFLP_TCAM_KEY_0_REG, val_p)
-#define	READ_TCAM_REG_KEY1(handle, val_p)	\
-		REG_PIO_READ64(handle,  FFLP_TCAM_KEY_1_REG, val_p)
-#define	READ_TCAM_REG_KEY2(handle, val_p)	\
-		REG_PIO_READ64(handle,  FFLP_TCAM_KEY_2_REG, val_p)
-#define	READ_TCAM_REG_KEY3(handle, val_p)	\
-		REG_PIO_READ64(handle,  FFLP_TCAM_KEY_3_REG, val_p)
-#define	READ_TCAM_REG_MASK0(handle, val_p)	\
-		REG_PIO_READ64(handle,  FFLP_TCAM_MASK_0_REG, val_p)
-#define	READ_TCAM_REG_MASK1(handle, val_p)	\
-		REG_PIO_READ64(handle,  FFLP_TCAM_MASK_1_REG, val_p)
-#define	READ_TCAM_REG_MASK2(handle, val_p)	\
-		REG_PIO_READ64(handle,  FFLP_TCAM_MASK_2_REG, val_p)
-#define	READ_TCAM_REG_MASK3(handle, val_p)	\
-		REG_PIO_READ64(handle,  FFLP_TCAM_MASK_3_REG, val_p)
-
-
-
-
-typedef struct tcam_ipv4 {
-#if defined(_BIG_ENDIAN)
-	uint32_t	reserved6;		/* 255 : 224 */
-	uint32_t	reserved5 : 24;		/* 223 : 200 */
-	uint32_t	cls_code : 5;		/* 199 : 195 */
-	uint32_t	reserved4 : 3;		/* 194 : 192 */
-	uint32_t	l2rd_tbl_num : 5;	/* 191: 187  */
-	uint32_t	noport : 1;		/* 186 */
-	uint32_t	reserved3 : 26;		/* 185: 160  */
-	uint32_t	reserved2;		/* 159: 128  */
-	uint32_t	reserved : 16;		/* 127 : 112 */
-	uint32_t	tos : 8;		/* 111 : 104 */
-	uint32_t	proto : 8;		/* 103 : 96  */
-	uint32_t	l4_port_spi;		/* 95 : 64   */
-	uint32_t	ip_src;			/* 63 : 32   */
-	uint32_t	ip_dest;		/* 31 : 0    */
-#else
-	uint32_t	ip_dest;		/* 31 : 0    */
-	uint32_t	ip_src;			/* 63 : 32   */
-	uint32_t	l4_port_spi;		/* 95 : 64   */
-	uint32_t	proto : 8;		/* 103 : 96  */
-	uint32_t	tos : 8;		/* 111 : 104 */
-	uint32_t	reserved : 16;		/* 127 : 112 */
-	uint32_t	reserved2;		/* 159: 128  */
-	uint32_t	reserved3 : 26;		/* 185: 160  */
-	uint32_t	noport : 1;		/* 186	*/
-	uint32_t	l2rd_tbl_num : 5;	/* 191: 187  */
-	uint32_t	reserved4 : 3;		/* 194 : 192 */
-	uint32_t	cls_code : 5;		/* 199 : 195 */
-	uint32_t	reserved5 : 24;		/* 223 : 200 */
-	uint32_t	reserved6;		/* 255 : 224 */
-#endif
-} tcam_ipv4_t;
-
-
-
-typedef struct tcam_reg {
-#if defined(_BIG_ENDIAN)
-    uint64_t		reg0;
-    uint64_t		reg1;
-    uint64_t		reg2;
-    uint64_t		reg3;
-#else
-    uint64_t		reg3;
-    uint64_t		reg2;
-    uint64_t		reg1;
-    uint64_t		reg0;
-#endif
-} tcam_reg_t;
-
-
-typedef struct tcam_ether {
-#if defined(_BIG_ENDIAN)
-	uint8_t		reserved3[7];		/* 255 : 200 */
-	uint8_t		cls_code : 5;		/* 199 : 195 */
-	uint8_t		reserved2 : 3;		/* 194 : 192 */
-	uint8_t		ethframe[11];		/* 191 : 104 */
-	uint8_t		reserved[13];		/* 103 : 0   */
-#else
-	uint8_t		reserved[13];		/* 103 : 0   */
-	uint8_t		ethframe[11];		/* 191 : 104 */
-	uint8_t		reserved2 : 3;		/* 194 : 192 */
-	uint8_t		cls_code : 5;		/* 199 : 195 */
-	uint8_t		reserved3[7];		/* 255 : 200 */
-#endif
-} tcam_ether_t;
-
-
-typedef struct tcam_ipv6 {
-#if defined(_BIG_ENDIAN)
-	uint32_t	reserved4;		/* 255 : 224 */
-	uint32_t	reserved3 : 24;		/* 223 : 200 */
-	uint32_t	cls_code : 5;		/* 199 : 195 */
-	uint32_t	reserved2 : 3;		/* 194 : 192 */
-	uint32_t	l2rd_tbl_num : 5;	/* 191: 187  */
-	uint32_t	noport : 1;		/* 186  */
-	uint32_t	reserved : 10;		/* 185 : 176 */
-	uint32_t	tos : 8;		/* 175 : 168 */
-	uint32_t	nxt_hdr : 8;		/* 167 : 160 */
-	uint32_t	l4_port_spi;		/* 159 : 128 */
-	uint32_t	ip_addr[4];		/* 127 : 0   */
-#else
-	uint32_t	ip_addr[4];		/* 127 : 0   */
-	uint32_t	l4_port_spi;		/* 159 : 128 */
-	uint32_t	nxt_hdr : 8;		/* 167 : 160 */
-	uint32_t	tos : 8;		/* 175 : 168 */
-	uint32_t	reserved : 10;		/* 185 : 176 */
-	uint32_t	noport : 1;		/* 186 */
-	uint32_t	l2rd_tbl_num : 5;	/* 191: 187  */
-	uint32_t	reserved2 : 3;		/* 194 : 192 */
-	uint32_t	cls_code : 5;		/* 199 : 195 */
-	uint32_t	reserved3 : 24;		/* 223 : 200 */
-	uint32_t	reserved4;		/* 255 : 224 */
-#endif
-} tcam_ipv6_t;
-
-
-typedef struct tcam_entry {
-    union  _tcam_entry {
-	tcam_reg_t	   regs_e;
-	tcam_ether_t	   ether_e;
-	tcam_ipv4_t	   ipv4_e;
-	tcam_ipv6_t	   ipv6_e;
-	} key, mask;
-	tcam_res_t	match_action;
-} tcam_entry_t;
-
-
-#define		key_reg0		key.regs_e.reg0
-#define		key_reg1		key.regs_e.reg1
-#define		key_reg2		key.regs_e.reg2
-#define		key_reg3		key.regs_e.reg3
-#define		mask_reg0		mask.regs_e.reg0
-#define		mask_reg1		mask.regs_e.reg1
-#define		mask_reg2		mask.regs_e.reg2
-#define		mask_reg3		mask.regs_e.reg3
-
-
-#define		key0			key.regs_e.reg0
-#define		key1			key.regs_e.reg1
-#define		key2			key.regs_e.reg2
-#define		key3			key.regs_e.reg3
-#define		mask0			mask.regs_e.reg0
-#define		mask1			mask.regs_e.reg1
-#define		mask2			mask.regs_e.reg2
-#define		mask3			mask.regs_e.reg3
-
-
-#define		ip4_src_key		key.ipv4_e.ip_src
-#define		ip4_dest_key		key.ipv4_e.ip_dest
-#define		ip4_proto_key		key.ipv4_e.proto
-#define		ip4_port_key		key.ipv4_e.l4_port_spi
-#define		ip4_tos_key		key.ipv4_e.tos
-#define		ip4_noport_key		key.ipv4_e.noport
-#define		ip4_nrdc_key		key.ipv4_e.l2rdc_tbl_num
-#define		ip4_class_key		key.ipv4_e.cls_code
-
-#define		ip4_src_mask		mask.ipv4_e.ip_src
-#define		ip4_dest_mask		mask.ipv4_e.ip_dest
-#define		ip4_proto_mask		mask.ipv4_e.proto
-#define		ip4_port_mask		mask.ipv4_e.l4_port_spi
-#define		ip4_tos_mask		mask.ipv4_e.tos
-#define		ip4_nrdc_mask		mask.ipv4_e.l2rdc_tbl_num
-#define		ip4_noport_mask		mask.ipv4_e.noport
-#define		ip4_class_mask		mask.ipv4_e.cls_code
-
-
-#define		ip6_ip_addr_key		key.ipv6_e.ip_addr
-#define		ip6_port_key		key.ipv6_e.l4_port_spi
-#define		ip6_nxt_hdr_key		key.ipv6_e.nxt_hdr
-#define		ip6_tos_key		key.ipv6_e.tos
-#define		ip6_nrdc_key		key.ipv6_e.l2rdc_tbl_num
-#define		ip6_noport_key		key.ipv6_e.noport
-#define		ip6_class_key		key.ipv6_e.cls_code
-
-
-#define		ip6_ip_addr_mask	mask.ipv6_e.ip_addr
-#define		ip6_port_mask		mask.ipv6_e.l4_port_spi
-#define		ip6_nxt_hdr_mask	mask.ipv6_e.nxt_hdr
-#define		ip6_tos_mask		mask.ipv6_e.tos
-#define		ip6_nrdc_mask		mask.ipv6_e.l2rdc_tbl_num
-#define		ip6_noport_mask		mask.ipv6_e.noport
-#define		ip6_class_mask		mask.ipv6_e.cls_code
-
-#define		ether_class_key		key.ether_e.cls_code
-#define		ether_ethframe_key	key.ether_e.ethframe
-#define		ether_class_mask	mask.ether_e.cls_code
-#define		ether_ethframe_mask	mask.ether_e.ethframe
-
-
-/*
- * flow template structure
- * The flow header is passed through the hash function
- * which generates the H1 (and the H2 ) hash value.
- * Hash computation is started at the 22 zeros.
- *
- * Since this structure uses the ip address fields,
- * /usr/include/netinet/in.h has to be included
- * before this header file.
- * Need to move these includes to impl files ...
- */
-
-#if defined(SOLARIS) || defined(COSIM)
-#include <netinet/in.h>
-#endif
-
-typedef union flow_template {
-
-	struct {
-#if defined(_BIG_ENDIAN)
-		uint32_t l4_0:16;  /* src port */
-		uint32_t l4_1:16;  /* dest Port */
-
-		uint32_t pid:8;
-		uint32_t port:2;
-		uint32_t zeros:22; /* 0 */
-
-		union {
-			struct {
-				struct in6_addr daddr;
-				struct in6_addr saddr;
-			} ip6_addr;
-
-			struct  {
-				uint32_t rsrvd1;
-				struct in_addr daddr;
-				uint32_t rsrvd2[3];
-				struct in_addr saddr;
-				uint32_t rsrvd5[2];
-			} ip4_addr;
-		} ipaddr;
-
-		union {
-			uint64_t l2_info;
-			struct {
-				uint32_t vlan_valid : 4;
-				uint32_t l2da_1 : 28;
-				uint32_t l2da_0 : 20;
-				uint32_t vlanid : 12;
-
-			}l2_bits;
-		}l2;
-#else
-
-		uint32_t l4_1:16;  /* dest Port */
-		uint32_t l4_0:16;  /* src port */
-
-		uint32_t zeros:22; /* 0 */
-		uint32_t port:2;
-		uint32_t pid:8;
-
-		union {
-			struct {
-				struct in6_addr daddr;
-				struct in6_addr saddr;
-			} ip6_addr;
-
-			struct  {
-				uint32_t rsrvd1;
-				struct in_addr daddr;
-				uint32_t rsrvd2[3];
-				struct in_addr saddr;
-				uint32_t rsrvd5[2];
-			} ip4_addr;
-		} ipaddr;
-
-		union {
-			uint64_t l2_info;
-			struct {
-
-				uint32_t l2da_1 : 28;
-				uint32_t vlan_valid : 4;
-
-				uint32_t vlanid : 12;
-				uint32_t l2da_0 : 20;
-			}l2_bits;
-		}l2;
-#endif
-	} bits;
-
-} flow_template_t;
-
-
-
-#define	ip4_saddr bits.ipaddr.ip4_addr.saddr.s_addr
-#define	ip4_daddr bits.ipaddr.ip4_addr.daddr.s_addr
-
-#define	ip_src_port  bits.l4_0
-#define	ip_dst_port  bits.l4_1
-#define	ip_proto  bits.pid
-
-#define	ip6_saddr bits.ipaddr.ip6_addr.saddr
-#define	ip6_daddr bits.ipaddr.ip6_addr.daddr
-
-
-
-
-typedef struct _flow_key_cfg_t {
-    uint32_t rsrvd:23;
-    uint32_t use_portnum:1;
-    uint32_t use_l2da:1;
-    uint32_t use_vlan:1;
-    uint32_t use_saddr:1;
-    uint32_t use_daddr:1;
-    uint32_t use_sport:1;
-    uint32_t use_dport:1;
-    uint32_t use_proto:1;
-    uint32_t ip_opts_exist:1;
-} flow_key_cfg_t;
-
-
-typedef struct _tcam_key_cfg_t {
-    uint32_t rsrvd:28;
-    uint32_t use_ip_daddr:1;
-    uint32_t use_ip_saddr:1;
-    uint32_t lookup_enable:1;
-    uint32_t discard:1;
-} tcam_key_cfg_t;
-
-
-
-/*
- * FCRAM Entry Formats
- *
- * ip6 and ip4 entries, the first 64 bits layouts are identical
- * optimistic entry has only 64 bit layout
- * The first three bits, fmt, ext and valid are the same
- * accoross all the entries
- */
-
-typedef union hash_optim {
-    uint64_t value;
-    struct _bits {
-#if defined(_BIG_ENDIAN)
-		uint32_t	fmt : 1;	/* 63  set to zero */
-		uint32_t	ext : 1;	/* 62  set to zero */
-		uint32_t	valid : 1;	/* 61 */
-		uint32_t	rdc_offset : 5;	/* 60 : 56 */
-		uint32_t	h2 : 16;	/* 55 : 40 */
-		uint32_t	rsrvd : 8;	/* 32 : 32 */
-		uint32_t	usr_info;	/* 31 : 0   */
-#else
-		uint32_t	usr_info;	/* 31 : 0   */
-		uint32_t	rsrvd : 8;	/* 39 : 32  */
-		uint32_t	h2 : 16;	/* 55 : 40  */
-		uint32_t	rdc_offset : 5;	/* 60 : 56  */
-		uint32_t	valid : 1;	/* 61 */
-		uint32_t	ext : 1;	/* 62  set to zero */
-		uint32_t	fmt : 1;	/* 63  set to zero */
-#endif
-	} bits;
-} hash_optim_t;
-
-
-typedef    union _hash_hdr {
-    uint64_t value;
-    struct _exact_hdr {
-#if defined(_BIG_ENDIAN)
-		uint32_t	fmt : 1;	/* 63  1 for ipv6, 0 for ipv4 */
-		uint32_t	ext : 1;	/* 62  set to 1 */
-		uint32_t	valid : 1;	/* 61 */
-		uint32_t	rsrvd : 1;	/* 60 */
-		uint32_t	l2da_1 : 28;	/* 59 : 32 */
-		uint32_t	l2da_0 : 20;	/* 31 : 12 */
-		uint32_t	vlan : 12;	/* 12 : 0   */
-#else
-		uint32_t	vlan : 12;	/* 12 : 0   */
-		uint32_t	l2da_0 : 20;	/* 31 : 12 */
-		uint32_t	l2da_1 : 28;	/* 59 : 32 */
-		uint32_t	rsrvd : 1;	/* 60 */
-		uint32_t	valid : 1;	/* 61 */
-		uint32_t	ext : 1;	/* 62  set to 1 */
-		uint32_t	fmt : 1;	/* 63  1 for ipv6, 0 for ipv4 */
-#endif
-	} exact_hdr;
-    hash_optim_t optim_hdr;
-} hash_hdr_t;
-
-
-
-typedef    union _hash_ports {
-    uint64_t value;
-    struct _ports_bits {
-#if defined(_BIG_ENDIAN)
-		uint32_t	ip_dport : 16;	/* 63 : 48 */
-		uint32_t	ip_sport : 16;	/* 47 : 32 */
-		uint32_t	proto : 8;	/* 31 : 24 */
-		uint32_t	port : 2;	/* 23 : 22 */
-		uint32_t	rsrvd : 22;	/* 21 : 0   */
-#else
-		uint32_t	rsrvd : 22;	/* 21 : 0   */
-		uint32_t	port : 2;	/* 23 : 22 */
-		uint32_t	proto : 8;	/* 31 : 24 */
-		uint32_t	ip_sport : 16;	/* 47 : 32 */
-		uint32_t	ip_dport : 16;	/* 63 : 48 */
-#endif
-	} ports_bits;
-} hash_ports_t;
-
-
-
-typedef    union _hash_match_action {
-    uint64_t value;
-    struct _action_bits {
-#if defined(_BIG_ENDIAN)
-		uint32_t	rsrvd2 : 3;	/* 63 : 61  */
-		uint32_t	rdc_offset : 5;	/* 60 : 56 */
-		uint32_t	zfvld : 1;	/* 55 */
-		uint32_t	rsrvd : 3;	/* 54 : 52   */
-		uint32_t	zfid : 12;	/* 51 : 40 */
-		uint32_t	_rsrvd : 8;	/* 39 : 32 */
-		uint32_t	usr_info;	/* 31 : 0   */
-#else
-		uint32_t	usr_info;	/* 31 : 0   */
-		uint32_t	_rsrvd : 8;	/* 39 : 32  */
-		uint32_t	zfid : 12;	/* 51 : 40 */
-		uint32_t	rsrvd : 3;	/* 54 : 52   */
-		uint32_t	zfvld : 1;	/* 55 */
-		uint32_t	rdc_offset : 5;	/* 60 : 56 */
-		uint32_t	rsrvd2 : 1;	/* 63 : 61  */
-#endif
-	} action_bits;
-} hash_match_action_t;
-
-
-typedef    struct _ipaddr6 {
-    struct in6_addr	 saddr;
-    struct in6_addr	 daddr;
-} ip6_addr_t;
-
-
-typedef    struct   _ipaddr4   {
-#if defined(_BIG_ENDIAN)
-    struct in_addr	saddr;
-    struct in_addr	daddr;
-#else
-    struct in_addr	daddr;
-    struct in_addr	saddr;
-#endif
-} ip4_addr_t;
-
-
-	/* ipv4 has 32 byte layout */
-
-typedef struct hash_ipv4 {
-    hash_hdr_t		 hdr;
-    ip4_addr_t		 ip_addr;
-    hash_ports_t	 proto_ports;
-    hash_match_action_t	 action;
-} hash_ipv4_t;
-
-
-	/* ipv4 has 56 byte layout */
-typedef struct hash_ipv6 {
-	hash_hdr_t	hdr;
-    ip6_addr_t		  ip_addr;
-    hash_ports_t	  proto_ports;
-    hash_match_action_t	  action;
-} hash_ipv6_t;
-
-
-
-typedef union fcram_entry {
-    uint64_t		  value[8];
-    hash_tbl_data_t	  dreg[8];
-    hash_ipv6_t		  ipv6_entry;
-    hash_ipv4_t		  ipv4_entry;
-    hash_optim_t	  optim_entry;
-} fcram_entry_t;
-
-
-
-#define	hash_hdr_fmt	ipv4_entry.hdr.exact_hdr.fmt
-#define	hash_hdr_ext	ipv4_entry.hdr.exact_hdr.ext
-#define	hash_hdr_valid	ipv4_entry.hdr.exact_hdr.valid
-
-#define	HASH_ENTRY_EXACT(fc)	\
-	(fc->ipv4_entry.hdr.exact_hdr.ext == 1)
-#define	HASH_ENTRY_OPTIM(fc)	\
-	((fc->ipv4_entry.hdr.exact_hdr.ext == 0) && \
-	(fc->ipv6_entry.hdr.exact_hdr.fmt == 0))
-#define	HASH_ENTRY_EXACT_IP6(fc) \
-	((fc->ipv6_entry.hdr.exact_hdr.fmt == 1) && \
-	(fc->ipv4_entry.hdr.exact_hdr.ext == 1))
-
-#define	HASH_ENTRY_EXACT_IP4(fc) \
-	((fc->ipv6_entry.hdr.exact_hdr.fmt == 0) && \
-	(fc->ipv4_entry.hdr.exact_hdr.ext == 1))
-
-#define	HASH_ENTRY_TYPE(fc)	\
-	(fc->ipv4_entry.hdr.exact_hdr.ext | \
-	(fc->ipv4_entry.hdr.exact_hdr.fmt << 1))
-
-
-
-typedef enum fcram_entry_format {
-	FCRAM_ENTRY_OPTIM = 0x0,
-	FCRAM_ENTRY_EX_IP4 = 0x2,
-	FCRAM_ENTRY_EX_IP6 = 0x3,
-	FCRAM_ENTRY_UNKOWN = 0x1
-} fcram_entry_format_t;
-
-
-#define		HASH_ENTRY_TYPE_OPTIM		FCRAM_ENTRY_OPTIM
-#define		HASH_ENTRY_TYPE_OPTIM_IP4	FCRAM_ENTRY_OPTIM
-#define		HASH_ENTRY_TYPE_OPTIM_IP4	FCRAM_ENTRY_OPTIM
-#define		HASH_ENTRY_TYPE_EX_IP4		FCRAM_ENTRY_EX_IP4
-#define		HASH_ENTRY_TYPE_EX_IP6		FCRAM_ENTRY_EX_IP6
-
-
-
-
-	/* error xxx formats */
-
-
-typedef struct _hash_lookup_err_log {
-    uint32_t rsrvd:28;
-    uint32_t lookup_err:1;
-    uint32_t ecc_err:1;
-    uint32_t uncor_err:1;
-    uint32_t multi_lkup:1;
-    uint32_t multi_bit:1;
-    uint32_t subarea:3;
-    uint32_t syndrome:8;
-    uint32_t h1:20;
-} hash_lookup_err_log_t, *p_hash_lookup_err_log_t;
-
-
-
-typedef struct _hash_pio_err_log {
-    uint32_t rsrvd:32;
-    uint32_t pio_err:1;
-    uint32_t syndrome:8;
-    uint32_t addr:23;
-} hash_pio_err_log_t, *p_hash_pio_err_log_t;
-
-
-
-typedef struct _tcam_err_log {
-    uint32_t rsrvd:2;
-    uint32_t tcam_err:1;
-    uint32_t parity_err:1;
-    uint32_t ecc_err:1;
-    uint32_t multi_lkup:1;
-    uint32_t location:8;
-    uint32_t syndrome:16;
-} tcam_err_log_t, *p_tcam_err_log_t;
-
-
-typedef struct _vlan_tbl_err_log {
-    uint32_t rsrvd:32;
-    uint32_t err:1;
-    uint32_t multi:1;
-    uint32_t addr:12;
-    uint32_t data:18;
-} vlan_tbl_err_log_t, *p_vlan_tbl_err_log_t;
-
-
-#define		NEPTUNE_TCAM_SIZE		0x100
-#define		NIU_TCAM_SIZE			0x80
-#define		FCRAM_SIZE			0x100000
-
-#ifdef	__cplusplus
-}
-#endif
-
-#endif	/* _SYS_NXGE_NXGE_FFLP_HW_H */
--- a/usr/src/uts/sun4v/sys/nxge/nxge_flow.h	Mon Mar 19 18:02:35 2007 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,186 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License (the "License").
- * You may not use this file except in compliance with the License.
- *
- * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
- * or http://www.opensolaris.org/os/licensing.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information: Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- */
-/*
- * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
- * Use is subject to license terms.
- */
-
-#ifndef	_SYS_NXGE_NXGE_FLOW_H
-#define	_SYS_NXGE_NXGE_FLOW_H
-
-#pragma ident	"%Z%%M%	%I%	%E% SMI"
-
-#ifdef	__cplusplus
-extern "C" {
-#endif
-
-#if defined(SOLARIS) && defined(_KERNEL)
-#include <netinet/in.h>
-#define	S6_addr32	_S6_un._S6_u32
-#endif
-
-typedef struct tcpip4_spec_s {
-	in_addr_t  ip4src;
-	in_addr_t  ip4dst;
-	in_port_t  psrc;
-	in_port_t  pdst;
-} tcpip4_spec_t;
-
-typedef struct tcpip6_spec_s {
-	struct in6_addr ip6src;
-	struct in6_addr ip6dst;
-	in_port_t  psrc;
-	in_port_t  pdst;
-} tcpip6_spec_t;
-
-typedef struct udpip4_spec_s {
-	in_addr_t  ip4src;
-	in_addr_t  ip4dst;
-	in_port_t  psrc;
-	in_port_t  pdst;
-} udpip4_spec_t;
-
-typedef struct udpip6_spec_s {
-	struct in6_addr ip6src;
-	struct in6_addr ip6dst;
-	in_port_t  psrc;
-	in_port_t  pdst;
-} udpip6_spec_t;
-
-typedef struct ahip4_spec_s {
-	in_addr_t  ip4src;
-	in_addr_t  ip4dst;
-	uint32_t   spi;
-} ahip4_spec_t;
-
-typedef struct ahip6_spec_s {
-	struct in6_addr ip6src;
-	struct in6_addr ip6dst;
-	uint32_t   spi;
-} ahip6_spec_t;
-
-typedef ahip4_spec_t espip4_spec_t;
-typedef ahip6_spec_t espip6_spec_t;
-
-typedef struct rawip4_spec_s {
-	struct in6_addr ip4src;
-	struct in6_addr ip4dst;
-	uint8_t    hdata[64];
-} rawip4_spec_t;
-
-typedef struct rawip6_spec_s {
-	struct in6_addr ip6src;
-	struct in6_addr ip6dst;
-	uint8_t    hdata[64];
-} rawip6_spec_t;
-
-
-typedef struct ether_spec_s {
-	uint16_t   ether_type;
-	uint8_t    frame_size;
-	uint8_t    eframe[16];
-} ether_spec_t;
-
-
-typedef struct ip_user_spec_s {
-	uint8_t    id;
-	uint8_t    ip_ver;
-	uint8_t    proto;
-	uint8_t    tos_mask;
-	uint8_t    tos;
-} ip_user_spec_t;
-
-
-
-typedef ether_spec_t arpip_spec_t;
-typedef ether_spec_t ether_user_spec_t;
-
-
-typedef struct flow_spec_s {
-	uint32_t  flow_type;
-	union {
-		tcpip4_spec_t tcpip4spec;
-		tcpip6_spec_t tcpip6spec;
-		udpip4_spec_t udpip4spec;
-		udpip6_spec_t udpip6spec;
-		arpip_spec_t  arpipspec;
-		ahip4_spec_t  ahip4spec;
-		ahip6_spec_t  ahip6spec;
-		espip4_spec_t espip4spec;
-		espip6_spec_t espip6spec;
-		rawip4_spec_t rawip4spec;
-		rawip6_spec_t rawip6spec;
-		ether_spec_t  etherspec;
-		ip_user_spec_t  ip_usr_spec;
-		uint8_t		hdata[64];
-	} uh, um; /* entry, mask */
-} flow_spec_t;
-
-#define	FSPEC_TCPIP4	0x1	/* TCP/IPv4 Flow */
-#define	FSPEC_TCPIP6	0x2	/* TCP/IPv6 */
-#define	FSPEC_UDPIP4	0x3	/* UDP/IPv4 */
-#define	FSPEC_UDPIP6	0x4	/* UDP/IPv6 */
-#define	FSPEC_ARPIP	0x5	/* ARP/IPv4 */
-#define	FSPEC_AHIP4	0x6	/* AH/IP4   */
-#define	FSPEC_AHIP6	0x7	/* AH/IP6   */
-#define	FSPEC_ESPIP4	0x8	/* ESP/IP4  */
-#define	FSPEC_ESPIP6	0x9	/* ESP/IP6  */
-#define	FSPEC_SCTPIP4	0xA	/* ESP/IP4  */
-#define	FSPEC_SCTPIP6	0xB	/* ESP/IP6  */
-#define	FSPEC_RAW4	0xC	/* RAW/IP4  */
-#define	FSPEC_RAW6	0xD	/* RAW/IP6  */
-#define	FSPEC_ETHER	0xE	/* ETHER Programmable  */
-#define	FSPEC_IP_USR	0xF	/* IP Programmable  */
-#define	FSPEC_HDATA	0x10	/* Pkt Headers eth-da,sa,etype,ip,tcp(Bitmap) */
-
-
-#define	TCAM_IPV6_ADDR(m32, ip6addr) {		\
-		m32[0] = ip6addr.S6_addr32[0]; \
-		m32[1] = ip6addr.S6_addr32[1]; \
-		m32[2] = ip6addr.S6_addr32[2]; \
-		m32[3] = ip6addr.S6_addr32[3]; \
-	}
-
-
-#define	TCAM_IPV4_ADDR(m32, ip4addr) (m32 = ip4addr)
-#define	TCAM_IP_PORTS(port32, dp, sp)	  (port32 = dp | (sp << 16))
-#define	TCAM_IP_CLASS(key, mask, class)	  {		\
-		key = class; \
-		mask = 0x1f; \
-	}
-
-#define	TCAM_IP_PROTO(key, mask, proto) {		\
-		key = proto; \
-		mask = 0xff; \
-	}
-
-
-typedef struct flow_resource_s {
-	uint64_t channel_cookie;
-	uint64_t flow_cookie;
-	flow_spec_t flow_spec;
-} flow_resource_t;
-
-#ifdef	__cplusplus
-}
-#endif
-
-#endif	/* _SYS_NXGE_NXGE_FLOW_H */
--- a/usr/src/uts/sun4v/sys/nxge/nxge_fm.h	Mon Mar 19 18:02:35 2007 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,249 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License (the "License").
- * You may not use this file except in compliance with the License.
- *
- * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
- * or http://www.opensolaris.org/os/licensing.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information: Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- */
-/*
- * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
- * Use is subject to license terms.
- */
-
-#ifndef	_SYS_NXGE_NXGE_FM_H
-#define	_SYS_NXGE_NXGE_FM_H
-
-#pragma ident	"%Z%%M%	%I%	%E% SMI"
-
-#ifdef	__cplusplus
-extern "C" {
-#endif
-
-#include <sys/ddi.h>
-
-#define	ERNAME_DETAILED_ERR_TYPE	"detailed error type"
-#define	ERNAME_ERR_PORTN		"port number"
-#define	ERNAME_ERR_DCHAN		"dma channel number"
-#define	ERNAME_TCAM_ERR_LOG		"tcam error log"
-#define	ERNAME_VLANTAB_ERR_LOG		"vlan table error log"
-#define	ERNAME_HASHTAB_ERR_LOG		"hash table error log"
-#define	ERNAME_HASHT_LOOKUP_ERR_LOG0	"hash table lookup error log0"
-#define	ERNAME_HASHT_LOOKUP_ERR_LOG1	"hash table lookup error log1"
-#define	ERNAME_RDMC_PAR_ERR_LOG		"rdmc parity error log"
-#define	ERNAME_DFIFO_RD_PTR		"dfifo read pointer"
-#define	ERNAME_IPP_STATE_MACH		"ipp state machine"
-#define	ERNAME_DFIFO_ENTRY		"dfifo entry"
-#define	ERNAME_DFIFO_SYNDROME		"dfifo syndrome"
-#define	ERNAME_PFIFO_ENTRY		"pfifo entry"
-#define	ERNAME_ZCP_STATE_MACH		"zcp state machine"
-#define	ERNAME_CFIFO_PORT_NUM		"cfifo port number"
-#define	ERNAME_RDC_ERR_TYPE		"completion error type"
-#define	ERNAME_TDMC_ERR_LOG0		"tdmc error log0"
-#define	ERNAME_TDMC_ERR_LOG1		"tdmc error log1"
-#define	ERNAME_TXC_ROECC_ADDR		"txc reorder FIFO ECC error address"
-#define	ERNAME_TXC_ROECC_DATA0		"txc reorder FIFO data0"
-#define	ERNAME_TXC_ROECC_DATA1		"txc reorder FIFO data1"
-#define	ERNAME_TXC_ROECC_DATA2		"txc reorder FIFO data2"
-#define	ERNAME_TXC_ROECC_DATA3		"txc reorder FIFO data3"
-#define	ERNAME_TXC_ROECC_DATA4		"txc reorder FIFO data4"
-#define	ERNAME_TXC_RO_STATE0		"txc reorder FIFO error state0" \
-					"(duplicate TID)"
-#define	ERNAME_TXC_RO_STATE1		"txc reorder FIFO error state1" \
-					"(uninitialized TID)"
-#define	ERNAME_TXC_RO_STATE2		"txc reorder FIFO error state2" \
-					"(timed out TIDs)"
-#define	ERNAME_TXC_RO_STATE3		"txc reorder FIFO error state3"
-#define	ERNAME_TXC_RO_STATE_CTL		"txc reorder FIFO error control"
-#define	ERNAME_TXC_RO_TIDS		"txc reorder tids"
-#define	ERNAME_TXC_SFECC_ADDR		"txc store forward FIFO ECC error "\
-					"address"
-#define	ERNAME_TXC_SFECC_DATA0		"txc store forward FIFO data0"
-#define	ERNAME_TXC_SFECC_DATA1		"txc store forward FIFO data1"
-#define	ERNAME_TXC_SFECC_DATA2		"txc store forward FIFO data2"
-#define	ERNAME_TXC_SFECC_DATA3		"txc store forward FIFO data3"
-#define	ERNAME_TXC_SFECC_DATA4		"txc store forward FIFO data4"
-
-#define	EREPORT_FM_ID_SHIFT		16
-#define	EREPORT_FM_ID_MASK		0xFF
-#define	EREPORT_INDEX_MASK		0xFF
-#define	NXGE_FM_EREPORT_UNKNOWN		0
-
-#define	FM_SW_ID			0xFF
-#define	FM_PCS_ID			MAC_BLK_ID
-#define	FM_TXMAC_ID			TXMAC_BLK_ID
-#define	FM_RXMAC_ID			RXMAC_BLK_ID
-#define	FM_MIF_ID			MIF_BLK_ID
-#define	FM_IPP_ID			IPP_BLK_ID
-#define	FM_TXC_ID			TXC_BLK_ID
-#define	FM_TXDMA_ID			TXDMA_BLK_ID
-#define	FM_RXDMA_ID			RXDMA_BLK_ID
-#define	FM_ZCP_ID			ZCP_BLK_ID
-#define	FM_ESPC_ID			ESPC_BLK_ID
-#define	FM_FFLP_ID			FFLP_BLK_ID
-#define	FM_PCIE_ID			PCIE_BLK_ID
-#define	FM_ETHER_SERDES_ID		ETHER_SERDES_BLK_ID
-#define	FM_PCIE_SERDES_ID		PCIE_SERDES_BLK_ID
-#define	FM_VIR_ID			VIR_BLK_ID
-
-typedef	uint32_t nxge_fm_ereport_id_t;
-
-typedef	struct _nxge_fm_ereport_attr {
-	uint32_t		index;
-	char			*str;
-	char			*eclass;
-	ddi_fault_impact_t	impact;
-} nxge_fm_ereport_attr_t;
-
-/* General MAC ereports */
-typedef	enum {
-	NXGE_FM_EREPORT_XPCS_LINK_DOWN = (FM_PCS_ID << EREPORT_FM_ID_SHIFT),
-	NXGE_FM_EREPORT_XPCS_TX_LINK_FAULT,
-	NXGE_FM_EREPORT_XPCS_RX_LINK_FAULT,
-	NXGE_FM_EREPORT_PCS_LINK_DOWN,
-	NXGE_FM_EREPORT_PCS_REMOTE_FAULT
-} nxge_fm_ereport_pcs_t;
-
-/* MIF ereports */
-typedef	enum {
-	NXGE_FM_EREPORT_MIF_ACCESS_FAIL = (FM_MIF_ID << EREPORT_FM_ID_SHIFT)
-} nxge_fm_ereport_mif_t;
-
-/* FFLP ereports */
-typedef	enum {
-	NXGE_FM_EREPORT_FFLP_TCAM_ERR = (FM_FFLP_ID << EREPORT_FM_ID_SHIFT),
-	NXGE_FM_EREPORT_FFLP_VLAN_PAR_ERR,
-	NXGE_FM_EREPORT_FFLP_HASHT_DATA_ERR,
-	NXGE_FM_EREPORT_FFLP_HASHT_LOOKUP_ERR,
-	NXGE_FM_EREPORT_FFLP_ACCESS_FAIL
-} nxge_fm_ereport_fflp_t;
-
-/* IPP ereports */
-typedef	enum {
-	NXGE_FM_EREPORT_IPP_EOP_MISS = (FM_IPP_ID << EREPORT_FM_ID_SHIFT),
-	NXGE_FM_EREPORT_IPP_SOP_MISS,
-	NXGE_FM_EREPORT_IPP_DFIFO_UE,
-	NXGE_FM_EREPORT_IPP_DFIFO_CE,
-	NXGE_FM_EREPORT_IPP_PFIFO_PERR,
-	NXGE_FM_EREPORT_IPP_ECC_ERR_MAX,
-	NXGE_FM_EREPORT_IPP_PFIFO_OVER,
-	NXGE_FM_EREPORT_IPP_PFIFO_UND,
-	NXGE_FM_EREPORT_IPP_BAD_CS_MX,
-	NXGE_FM_EREPORT_IPP_PKT_DIS_MX,
-	NXGE_FM_EREPORT_IPP_RESET_FAIL
-} nxge_fm_ereport_ipp_t;
-
-/* RDMC ereports */
-typedef	enum {
-	NXGE_FM_EREPORT_RDMC_DCF_ERR = (FM_RXDMA_ID << EREPORT_FM_ID_SHIFT),
-	NXGE_FM_EREPORT_RDMC_RCR_ACK_ERR,
-	NXGE_FM_EREPORT_RDMC_DC_FIFO_ERR,
-	NXGE_FM_EREPORT_RDMC_RCR_SHA_PAR,
-	NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR,
-	NXGE_FM_EREPORT_RDMC_RBR_TMOUT,
-	NXGE_FM_EREPORT_RDMC_RSP_CNT_ERR,
-	NXGE_FM_EREPORT_RDMC_BYTE_EN_BUS,
-	NXGE_FM_EREPORT_RDMC_RSP_DAT_ERR,
-	NXGE_FM_EREPORT_RDMC_ID_MISMATCH,
-	NXGE_FM_EREPORT_RDMC_ZCP_EOP_ERR,
-	NXGE_FM_EREPORT_RDMC_IPP_EOP_ERR,
-	NXGE_FM_EREPORT_RDMC_COMPLETION_ERR,
-	NXGE_FM_EREPORT_RDMC_CONFIG_ERR,
-	NXGE_FM_EREPORT_RDMC_RCRINCON,
-	NXGE_FM_EREPORT_RDMC_RCRFULL,
-	NXGE_FM_EREPORT_RDMC_RBRFULL,
-	NXGE_FM_EREPORT_RDMC_RBRLOGPAGE,
-	NXGE_FM_EREPORT_RDMC_CFIGLOGPAGE
-} nxge_fm_ereport_rdmc_t;
-
-/* ZCP ereports */
-typedef	enum {
-	NXGE_FM_EREPORT_ZCP_RRFIFO_UNDERRUN =
-					(FM_ZCP_ID << EREPORT_FM_ID_SHIFT),
-	NXGE_FM_EREPORT_ZCP_RSPFIFO_UNCORR_ERR,
-	NXGE_FM_EREPORT_ZCP_STAT_TBL_PERR,
-	NXGE_FM_EREPORT_ZCP_DYN_TBL_PERR,
-	NXGE_FM_EREPORT_ZCP_BUF_TBL_PERR,
-	NXGE_FM_EREPORT_ZCP_CFIFO_ECC,
-	NXGE_FM_EREPORT_ZCP_RRFIFO_OVERRUN,
-	NXGE_FM_EREPORT_ZCP_BUFFER_OVERFLOW,
-	NXGE_FM_EREPORT_ZCP_TT_PROGRAM_ERR,
-	NXGE_FM_EREPORT_ZCP_RSP_TT_INDEX_ERR,
-	NXGE_FM_EREPORT_ZCP_SLV_TT_INDEX_ERR,
-	NXGE_FM_EREPORT_ZCP_TT_INDEX_ERR,
-	NXGE_FM_EREPORT_ZCP_ACCESS_FAIL
-} nxge_fm_ereport_zcp_t;
-
-typedef enum {
-	NXGE_FM_EREPORT_RXMAC_UNDERFLOW = (FM_RXMAC_ID << EREPORT_FM_ID_SHIFT),
-	NXGE_FM_EREPORT_RXMAC_CRC_ERRCNT_EXP,
-	NXGE_FM_EREPORT_RXMAC_LENGTH_ERRCNT_EXP,
-	NXGE_FM_EREPORT_RXMAC_VIOL_ERRCNT_EXP,
-	NXGE_FM_EREPORT_RXMAC_RXFRAG_CNT_EXP,
-	NXGE_FM_EREPORT_RXMAC_ALIGN_ECNT_EXP,
-	NXGE_FM_EREPORT_RXMAC_LINKFAULT_CNT_EXP,
-	NXGE_FM_EREPORT_RXMAC_RESET_FAIL
-} nxge_fm_ereport_rxmac_t;
-
-typedef	enum {
-	NXGE_FM_EREPORT_TDMC_PREF_BUF_PAR_ERR =
-				(FM_TXDMA_ID << EREPORT_FM_ID_SHIFT),
-	NXGE_FM_EREPORT_TDMC_MBOX_ERR,
-	NXGE_FM_EREPORT_TDMC_NACK_PREF,
-	NXGE_FM_EREPORT_TDMC_NACK_PKT_RD,
-	NXGE_FM_EREPORT_TDMC_PKT_SIZE_ERR,
-	NXGE_FM_EREPORT_TDMC_TX_RING_OFLOW,
-	NXGE_FM_EREPORT_TDMC_CONF_PART_ERR,
-	NXGE_FM_EREPORT_TDMC_PKT_PRT_ERR,
-	NXGE_FM_EREPORT_TDMC_RESET_FAIL
-} nxge_fm_ereport_attr_tdmc_t;
-
-typedef	enum {
-	NXGE_FM_EREPORT_TXC_RO_CORRECT_ERR =
-				(FM_TXC_ID << EREPORT_FM_ID_SHIFT),
-	NXGE_FM_EREPORT_TXC_RO_UNCORRECT_ERR,
-	NXGE_FM_EREPORT_TXC_SF_CORRECT_ERR,
-	NXGE_FM_EREPORT_TXC_SF_UNCORRECT_ERR,
-	NXGE_FM_EREPORT_TXC_ASSY_DEAD,
-	NXGE_FM_EREPORT_TXC_REORDER_ERR
-} nxge_fm_ereport_attr_txc_t;
-
-typedef	enum {
-	NXGE_FM_EREPORT_TXMAC_UNDERFLOW =
-				(FM_TXMAC_ID << EREPORT_FM_ID_SHIFT),
-	NXGE_FM_EREPORT_TXMAC_OVERFLOW,
-	NXGE_FM_EREPORT_TXMAC_TXFIFO_XFR_ERR,
-	NXGE_FM_EREPORT_TXMAC_MAX_PKT_ERR,
-	NXGE_FM_EREPORT_TXMAC_RESET_FAIL
-} nxge_fm_ereport_attr_txmac_t;
-
-typedef	enum {
-	NXGE_FM_EREPORT_ESPC_ACCESS_FAIL = (FM_ESPC_ID << EREPORT_FM_ID_SHIFT)
-} nxge_fm_ereport_espc_t;
-
-typedef	enum {
-	NXGE_FM_EREPORT_SW_INVALID_PORT_NUM = (FM_SW_ID << EREPORT_FM_ID_SHIFT),
-	NXGE_FM_EREPORT_SW_INVALID_CHAN_NUM,
-	NXGE_FM_EREPORT_SW_INVALID_PARAM
-} nxge_fm_ereport_sw_t;
-
-#define	NXGE_FM_EREPORT_UNKNOWN			0
-#define	NXGE_FM_EREPORT_UNKNOWN_NAME		""
-
-#ifdef	__cplusplus
-}
-#endif
-
-#endif	/* _SYS_NXGE_NXGE_FM_H */
--- a/usr/src/uts/sun4v/sys/nxge/nxge_fzc.h	Mon Mar 19 18:02:35 2007 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,93 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License (the "License").
- * You may not use this file except in compliance with the License.
- *
- * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
- * or http://www.opensolaris.org/os/licensing.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information: Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- */
-/*
- * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
- * Use is subject to license terms.
- */
-
-#ifndef	_SYS_NXGE_NXGE_FZC_H
-#define	_SYS_NXGE_NXGE_FZC_H
-
-#pragma ident	"%Z%%M%	%I%	%E% SMI"
-
-#ifdef	__cplusplus
-extern "C" {
-#endif
-
-#include <npi_vir.h>
-
-nxge_status_t nxge_fzc_intr_init(p_nxge_t);
-nxge_status_t nxge_fzc_intr_ldg_num_set(p_nxge_t);
-nxge_status_t nxge_fzc_intr_tmres_set(p_nxge_t);
-nxge_status_t nxge_fzc_intr_sid_set(p_nxge_t);
-
-nxge_status_t nxge_fzc_dmc_rx_log_page_vld(p_nxge_t, uint16_t,
-	uint32_t, boolean_t);
-nxge_status_t nxge_fzc_dmc_rx_log_page_mask(p_nxge_t, uint16_t,
-	uint32_t, uint32_t, uint32_t);
-
-void nxge_init_fzc_txdma_channels(p_nxge_t);
-
-nxge_status_t nxge_init_fzc_txdma_channel(p_nxge_t, uint16_t,
-	p_tx_ring_t, p_tx_mbox_t);
-nxge_status_t nxge_init_fzc_txdma_port(p_nxge_t);
-
-nxge_status_t nxge_init_fzc_rxdma_channel(p_nxge_t, uint16_t,
-	p_rx_rbr_ring_t, p_rx_rcr_ring_t, p_rx_mbox_t);
-
-nxge_status_t nxge_init_fzc_rdc_tbl(p_nxge_t);
-nxge_status_t nxge_init_fzc_rx_common(p_nxge_t);
-nxge_status_t nxge_init_fzc_rxdma_port(p_nxge_t);
-
-nxge_status_t nxge_init_fzc_rxdma_channel_pages(p_nxge_t,
-	uint16_t, p_rx_rbr_ring_t);
-
-nxge_status_t nxge_init_fzc_rxdma_channel_red(p_nxge_t,
-	uint16_t, p_rx_rcr_ring_t);
-
-nxge_status_t nxge_init_fzc_rxdma_channel_clrlog(p_nxge_t,
-	uint16_t, p_rx_rbr_ring_t);
-
-nxge_status_t nxge_init_fzc_txdma_channel_pages(p_nxge_t,
-	uint16_t, p_tx_ring_t);
-
-nxge_status_t nxge_init_fzc_txdma_channel_drr(p_nxge_t, uint16_t,
-	p_tx_ring_t);
-
-nxge_status_t nxge_init_fzc_txdma_port(p_nxge_t);
-
-void nxge_init_fzc_ldg_num(p_nxge_t);
-void nxge_init_fzc_sys_int_data(p_nxge_t);
-void nxge_init_fzc_ldg_int_timer(p_nxge_t);
-nxge_status_t nxge_fzc_sys_err_mask_set(p_nxge_t, uint64_t);
-
-#if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
-nxge_status_t nxge_init_hv_fzc_rxdma_channel_pages(p_nxge_t,
-	uint16_t, p_rx_rbr_ring_t);
-nxge_status_t nxge_init_hv_fzc_txdma_channel_pages(p_nxge_t,
-	uint16_t, p_tx_ring_t);
-#endif
-
-#ifdef	__cplusplus
-}
-#endif
-
-#endif	/* _SYS_NXGE_NXGE_FZC_H */
--- a/usr/src/uts/sun4v/sys/nxge/nxge_hw.h	Mon Mar 19 18:02:35 2007 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,1057 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License (the "License").
- * You may not use this file except in compliance with the License.
- *
- * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
- * or http://www.opensolaris.org/os/licensing.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information: Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- */
-/*
- * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
- * Use is subject to license terms.
- */
-
-#ifndef	_SYS_NXGE_NXGE_HW_H
-#define	_SYS_NXGE_NXGE_HW_H
-
-#pragma ident	"%Z%%M%	%I%	%E% SMI"
-
-#ifdef	__cplusplus
-extern "C" {
-#endif
-
-#if	!defined(_BIG_ENDIAN) && !defined(_LITTLE_ENDIAN) && \
-		!defined(__BIG_ENDIAN) && !defined(__LITTLE_ENDIAN)
-#error	Host endianness not defined
-#endif
-
-#if	!defined(_BIT_FIELDS_HTOL) && !defined(_BIT_FIELDS_LTOH) && \
-		!defined(__BIT_FIELDS_HTOL) && !defined(__BIT_FIELDS_LTOH)
-#error	Bit ordering not defined
-#endif
-
-#include <nxge_fflp_hw.h>
-#include <nxge_ipp_hw.h>
-#include <nxge_mac_hw.h>
-#include <nxge_rxdma_hw.h>
-#include <nxge_txc_hw.h>
-#include <nxge_txdma_hw.h>
-#include <nxge_zcp_hw.h>
-#include <nxge_espc_hw.h>
-#include <nxge_n2_esr_hw.h>
-#include <nxge_sr_hw.h>
-#include <nxge_phy_hw.h>
-
-
-/* Modes of NXGE core */
-typedef	enum nxge_mode_e {
-	NXGE_MODE_NE		= 1,
-	NXGE_MODE_N2		= 2
-} nxge_mode_t;
-
-/*
- * Function control Register
- * (bit 31 is reset to 0. Read back 0 then free to use it.
- * (once done with it, bit 0:15 can be used to store SW status)
- */
-#define	DEV_FUNC_SR_REG			(PIO + 0x10000)
-#define	DEV_FUNC_SR_SR_SHIFT		0
-#define	DEV_FUNC_SR_SR_MASK		0x000000000000FFFFULL
-#define	DEV_FUNC_SR_FUNCID_SHIFT	16
-#define	DEV_FUNC_SR_FUNCID_MASK		0x0000000000030000ULL
-#define	DEV_FUNC_SR_TAS_SHIFT		31
-#define	DEV_FUNC_SR_TAS_MASK		0x0000000080000000ULL
-
-typedef union _dev_func_sr_t {
-	uint64_t value;
-	struct {
-#if defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t tas:1;
-			uint32_t res2:13;
-			uint32_t funcid:2;
-			uint32_t sr:16;
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t sr:16;
-			uint32_t funcid:2;
-			uint32_t res2:13;
-			uint32_t tas:1;
-#endif
-		} ldw;
-#if !defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-	} bits;
-} dev_func_sr_t, *p_dev_func_sr_t;
-
-
-/*
- * Multi Parition Control Register (partitiion manager)
- */
-#define	MULTI_PART_CTL_REG	(FZC_PIO + 0x00000)
-#define	MULTI_PART_CTL_MPC	0x0000000000000001ULL
-
-typedef union _multi_part_ctl_t {
-	uint64_t value;
-	struct {
-#if defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t res1:31;
-			uint32_t mpc:1;
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t mpc:1;
-			uint32_t res1:31;
-#endif
-		} ldw;
-#if !defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-	} bits;
-} multi_part_ctl_t, *p_multi_part_ctl_t;
-
-/*
- * Virtual DMA CSR Address (partition manager)
- */
-#define	VADDR_REG		(PIO_VADDR + 0x00000)
-
-/*
- * DMA Channel Binding Register (partition manager)
- */
-#define	DMA_BIND_REG		(FZC_PIO + 0x10000)
-#define	DMA_BIND_RX_SHIFT	0
-#define	DMA_BIND_RX_MASK	0x000000000000001FULL
-#define	DMA_BIND_RX_BIND_SHIFT	5
-#define	DMA_BIND_RX_BIND_SET	0x0000000000000020ULL
-#define	DMA_BIND_RX_BIND_MASK	0x0000000000000020ULL
-#define	DMA_BIND_TX_SHIFT	8
-#define	DMA_BIND_TX_MASK	0x0000000000001f00ULL
-#define	DMA_BIND_TX_BIND_SHIFT	13
-#define	DMA_BIND_TX_BIND_SET	0x0000000000002000ULL
-#define	DMA_BIND_TX_BIND_MASK	0x0000000000002000ULL
-
-typedef union _dma_bind_t {
-	uint64_t value;
-	struct {
-#if defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t res1_1:16;
-			uint32_t tx_bind:1;
-			uint32_t tx:5;
-			uint32_t res2:2;
-			uint32_t rx_bind:1;
-			uint32_t rx:5;
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t rx:5;
-			uint32_t rx_bind:1;
-			uint32_t res2:2;
-			uint32_t tx:5;
-			uint32_t tx_bind:1;
-			uint32_t res1_1:16;
-#endif
-		} ldw;
-#if !defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-	} bits;
-}  dma_bind_t, *p_dma_bind_t;
-
-/*
- * System interrupts:
- *	Logical device and group definitions.
- */
-#define	NXGE_INT_MAX_LDS		69
-#define	NXGE_INT_MAX_LDGS		64
-#define	NXGE_LDGRP_PER_NIU_PORT		(NXGE_INT_MAX_LDGS/2)
-#define	NXGE_LDGRP_PER_NEP_PORT		(NXGE_INT_MAX_LDGS/4)
-#define	NXGE_LDGRP_PER_2PORTS		(NXGE_INT_MAX_LDGS/2)
-#define	NXGE_LDGRP_PER_4PORTS		(NXGE_INT_MAX_LDGS/4)
-
-#define	NXGE_RDMA_LD_START		0
-#define	NXGE_TDMA_LD_START		32
-#define	NXGE_MIF_LD			63
-#define	NXGE_MAC_LD_START		64
-#define	NXGE_MAC_LD_PORT0		64
-#define	NXGE_MAC_LD_PORT1		65
-#define	NXGE_MAC_LD_PORT2		66
-#define	NXGE_MAC_LD_PORT3		67
-#define	NXGE_SYS_ERROR_LD		68
-
-/*
- * Logical Device Group Number
- */
-#define	LDG_NUM_REG		(FZC_PIO + 0x20000)
-#define	LDG_NUM_NUM_SHIFT	0
-#define	LDG_NUM_NUM_MASK	0x000000000000001FULL
-
-typedef union _ldg_num_t {
-	uint64_t value;
-	struct {
-#if defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t res1_1:26;
-			uint32_t num:6;
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t num:6;
-			uint32_t res1_1:26;
-#endif
-		} ldw;
-#if !defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-	} bits;
-} ldg_num_t, *p_ldg_num_t;
-
-/*
- * Logical Device State Vector
- */
-#define	LDSV0_REG		(PIO_LDSV + 0x00000)
-#define	LDSV0_LDF_SHIFT		0
-#define	LDSV0_LDF_MASK		0x00000000000003FFULL
-#define	LDG_NUM_NUM_MASK	0x000000000000001FULL
-#define	LDSV_MASK_ALL		0x0000000000000001ULL
-
-/*
- * Logical Device State Vector 1
- */
-#define	LDSV1_REG		(PIO_LDSV + 0x00008)
-
-/*
- * Logical Device State Vector 2
- */
-#define	LDSV2_REG		(PIO_LDSV + 0x00010)
-
-/* For Logical Device State Vector 0 and 1 */
-typedef union _ldsv_t {
-	uint64_t value;
-	struct {
-#if defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-		uint32_t ldw;
-#if !defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-	} bits;
-} ldsv_t, *p_ldsv_t;
-
-#define	LDSV2_LDF0_SHIFT		0
-#define	LDSV2_LDF0_MASK			0x000000000000001FULL
-#define	LDSV2_LDF1_SHIFT		5
-#define	LDSV2_LDF1_MASK			0x00000000000001E0ULL
-
-typedef union _ldsv2_t {
-	uint64_t value;
-	struct {
-#if defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t res1_1:22;
-			uint32_t ldf1:5;
-			uint32_t ldf0:5;
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t ldf0:5;
-			uint32_t ldf1:5;
-			uint32_t res1_1:22;
-#endif
-		} ldw;
-#if !defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-	} bits;
-} ldsv2_t, *p_ldsv2_t;
-
-/*
- * Logical Device Interrupt Mask 0
- */
-#define	LD_IM0_REG		(PIO_IMASK0 + 0x00000)
-#define	LD_IM0_SHIFT		0
-#define	LD_IM0_MASK		0x0000000000000003ULL
-#define	LD_IM_MASK		0x0000000000000003ULL
-
-/*
- * Logical Device Interrupt Mask 1
- */
-#define	LD_IM1_REG		(PIO_IMASK1 + 0x00000)
-#define	LD_IM1_SHIFT		0
-#define	LD_IM1_MASK		0x0000000000000003ULL
-
-/* For Lofical Device Interrupt Mask 0 and 1 */
-typedef union _ld_im_t {
-	uint64_t value;
-	struct {
-#if defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-		struct {
-
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t res1_1:30;
-			uint32_t ldf_mask:2;
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t ldf_mask:2;
-			uint32_t res1_1:30;
-#endif
-		} ldw;
-#if !defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-	} bits;
-} ld_im_t, *p_ld_im_t;
-
-/*
- * Logical Device Group Interrupt Management
- */
-#define	LDGIMGN_REG		(PIO_LDSV + 0x00018)
-#define	LDGIMGN_TIMER_SHIFT	0
-#define	LDGIMGM_TIMER_MASK	0x000000000000003FULL
-#define	LDGIMGN_ARM_SHIFT	31
-#define	LDGIMGM_ARM		0x0000000080000000ULL
-#define	LDGIMGM_ARM_MASK	0x0000000080000000ULL
-
-typedef union _ldgimgm_t {
-	uint64_t value;
-	struct {
-#if defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-		uint32_t arm:1;
-		uint32_t res2:25;
-		uint32_t timer:6;
-#elif defined(_BIT_FIELDS_LTOH)
-		uint32_t timer:6;
-		uint32_t res2:25;
-		uint32_t arm:1;
-#endif
-		} ldw;
-#if !defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-	} bits;
-} ldgimgm_t, *p_ldgimgm_t;
-
-/*
- * Logical Device Group Interrupt Timer Resolution
- */
-#define	LDGITMRES_REG		(FZC_PIO + 0x00008)
-#define	LDGTITMRES_RES_SHIFT	0			/* bits 19:0 */
-#define	LDGTITMRES_RES_MASK	0x00000000000FFFFFULL
-typedef union _ldgitmres_t {
-	uint64_t value;
-	struct {
-#if defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-		uint32_t res1_1:12;
-		uint32_t res:20;
-#elif defined(_BIT_FIELDS_LTOH)
-		uint32_t res:20;
-		uint32_t res1_1:12;
-#endif
-		} ldw;
-#if !defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-	} bits;
-} ldgitmres_t, *p_ldgitmres_t;
-
-/*
- * System Interrupt Data
- */
-#define	SID_REG			(FZC_PIO + 0x10200)
-#define	SID_DATA_SHIFT		0			/* bits 6:0 */
-#define	SID_DATA_MASK		0x000000000000007FULL
-#define	SID_DATA_INTNUM_SHIFT	0			/* bits 4:0 */
-#define	SID_DATA_INTNUM_MASK	0x000000000000001FULL
-#define	SID_DATA_FUNCNUM_SHIFT	5			/* bits 6:5 */
-#define	SID_DATA_FUNCNUM_MASK	0x0000000000000060ULL
-#define	SID_PCI_FUNCTION_SHIFT	(1 << 5)
-#define	SID_N2_INDEX		(1 << 6)
-
-#define	SID_DATA(f, v)		((f << SID_DATA_FUNCNUM_SHIFT) |	\
-				((v << SID_DATA_SHIFT) & SID_DATA_INTNUM_MASK))
-
-#define	SID_DATA_N2(v)		(v | SID_N2_INDEX)
-
-typedef union _sid_t {
-	uint64_t value;
-	struct {
-#if defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-		uint32_t res1_1:25;
-		uint32_t data:7;
-#elif defined(_BIT_FIELDS_LTOH)
-		uint32_t data:7;
-		uint32_t res1_1:25;
-#endif
-		} ldw;
-#if !defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-	} bits;
-} sid_t, *p_sid_t;
-
-/*
- * Reset Control
- */
-#define	RST_CTL_REG		(FZC_PIO + 0x00038)
-#define	RST_CTL_MAC_RST3	0x0000000000400000ULL
-#define	RST_CTL_MAC_RST3_SHIFT	22
-#define	RST_CTL_MAC_RST2	0x0000000000200000ULL
-#define	RST_CTL_MAC_RST2_SHIFT	21
-#define	RST_CTL_MAC_RST1	0x0000000000100000ULL
-#define	RST_CTL_MAC_RST1_SHIFT	20
-#define	RST_CTL_MAC_RST0	0x0000000000080000ULL
-#define	RST_CTL_MAC_RST0_SHIFT	19
-#define	RST_CTL_EN_ACK_TO	0x0000000000000800ULL
-#define	RST_CTL_EN_ACK_TO_SHIFT	11
-#define	RST_CTL_ACK_TO_MASK	0x00000000000007FEULL
-#define	RST_CTL_ACK_TO_SHIFT	1
-
-
-typedef union _rst_ctl_t {
-	uint64_t value;
-	struct {
-#if defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-		uint32_t res1:9;
-		uint32_t mac_rst3:1;
-		uint32_t mac_rst2:1;
-		uint32_t mac_rst1:1;
-		uint32_t mac_rst0:1;
-		uint32_t res2:7;
-		uint32_t ack_to_en:1;
-		uint32_t ack_to_val:10;
-		uint32_t res3:1;
-#elif defined(_BIT_FIELDS_LTOH)
-		uint32_t res3:1;
-		uint32_t ack_to_val:10;
-		uint32_t ack_to_en:1;
-		uint32_t res2:7;
-		uint32_t mac_rst0:1;
-		uint32_t mac_rst1:1;
-		uint32_t mac_rst2:1;
-		uint32_t mac_rst3:1;
-		uint32_t res1:9;
-#endif
-		} ldw;
-#if !defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-	} bits;
-} rst_ctl_t, *p_rst_ctl_t;
-
-/*
- * System Error Mask
- */
-#define	SYS_ERR_MASK_REG	(FZC_PIO + 0x00090)
-
-/*
- * System Error Status
- */
-#define	SYS_ERR_STAT_REG	(FZC_PIO + 0x00098)
-
-
-#define	SYS_ERR_META2_MASK	0x0000000000000400ULL
-#define	SYS_ERR_META2_SHIFT	10
-#define	SYS_ERR_META1_MASK	0x0000000000000200ULL
-#define	SYS_ERR_META1_SHIFT	9
-#define	SYS_ERR_PEU_MASK	0x0000000000000100ULL
-#define	SYS_ERR_PEU_SHIFT	8
-#define	SYS_ERR_TXC_MASK	0x0000000000000080ULL
-#define	SYS_ERR_TXC_SHIFT	7
-#define	SYS_ERR_RDMC_MASK	0x0000000000000040ULL
-#define	SYS_ERR_RDMC_SHIFT	6
-#define	SYS_ERR_TDMC_MASK	0x0000000000000020ULL
-#define	SYS_ERR_TDMC_SHIFT	5
-#define	SYS_ERR_ZCP_MASK	0x0000000000000010ULL
-#define	SYS_ERR_ZCP_SHIFT	4
-#define	SYS_ERR_FFLP_MASK	0x0000000000000008ULL
-#define	SYS_ERR_FFLP_SHIFT	3
-#define	SYS_ERR_IPP_MASK	0x0000000000000004ULL
-#define	SYS_ERR_IPP_SHIFT	2
-#define	SYS_ERR_MAC_MASK	0x0000000000000002ULL
-#define	SYS_ERR_MAC_SHIFT	1
-#define	SYS_ERR_SMX_MASK	0x0000000000000001ULL
-#define	SYS_ERR_SMX_SHIFT	0
-#define	SYS_ERR_MASK_ALL	(SYS_ERR_SMX_MASK | SYS_ERR_MAC_MASK | \
-				SYS_ERR_IPP_MASK | SYS_ERR_FFLP_MASK | \
-				SYS_ERR_ZCP_MASK | SYS_ERR_TDMC_MASK | \
-				SYS_ERR_RDMC_MASK | SYS_ERR_TXC_MASK | \
-				SYS_ERR_PEU_MASK | SYS_ERR_META1_MASK | \
-				SYS_ERR_META2_MASK)
-
-
-typedef union _sys_err_mask_t {
-	uint64_t value;
-	struct {
-#if defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-		uint32_t res:21;
-		uint32_t meta2:1;
-		uint32_t meta1:1;
-		uint32_t peu:1;
-		uint32_t txc:1;
-		uint32_t rdmc:1;
-		uint32_t tdmc:1;
-		uint32_t zcp:1;
-		uint32_t fflp:1;
-		uint32_t ipp:1;
-		uint32_t mac:1;
-		uint32_t smx:1;
-#elif defined(_BIT_FIELDS_LTOH)
-		uint32_t smx:1;
-		uint32_t mac:1;
-		uint32_t ipp:1;
-		uint32_t fflp:1;
-		uint32_t zcp:1;
-		uint32_t tdmc:1;
-		uint32_t rdmc:1;
-		uint32_t txc:1;
-		uint32_t peu:1;
-		uint32_t meta1:1;
-		uint32_t meta2:1;
-		uint32_t res:21;
-#endif
-		} ldw;
-#if !defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-	} bits;
-} sys_err_mask_t, sys_err_stat_t, *p_sys_err_mask_t, *p_sys_err_stat_t;
-
-
-/*
- * Meta Arbiter Dirty Transaction ID Control
- */
-
-#define	DIRTY_TID_CTL_REG		(FZC_PIO + 0x0010)
-#define	DIRTY_TID_CTL_WR_THRES_MASK	0x00000000003F0000ULL
-#define	DIRTY_TID_CTL_WR_THRES_SHIFT    16
-#define	DIRTY_TID_CTL_RD_THRES_MASK	0x00000000000003F0ULL
-#define	DIRTY_TID_CTL_RD_THRES_SHIFT	4
-#define	DIRTY_TID_CTL_DTID_CLR		0x0000000000000002ULL
-#define	DIRTY_TID_CTL_DTID_CLR_SHIFT	1
-#define	DIRTY_TID_CTL_DTID_EN		0x0000000000000001ULL
-#define	DIRTY_TID_CTL_DTID_EN_SHIFT	0
-
-typedef union _dty_tid_ctl_t {
-	uint64_t value;
-	struct {
-#if defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-		uint32_t res1:10;
-		uint32_t np_wr_thres_val:6;
-		uint32_t res2:6;
-		uint32_t np_rd_thres_val:6;
-		uint32_t res3:2;
-		uint32_t dty_tid_clr:1;
-		uint32_t dty_tid_en:1;
-#elif defined(_BIT_FIELDS_LTOH)
-		uint32_t dty_tid_en:1;
-		uint32_t dty_tid_clr:1;
-		uint32_t res3:2;
-		uint32_t np_rd_thres_val:6;
-		uint32_t res2:6;
-		uint32_t np_wr_thres_val:6;
-		uint32_t res1:10;
-#endif
-		} ldw;
-#if !defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-	} bits;
-} dty_tid_ctl_t, *p_dty_tid_ctl_t;
-
-
-/*
- * Meta Arbiter Dirty Transaction ID Status
- */
-#define	DIRTY_TID_STAT_REG			(FZC_PIO + 0x0018)
-#define	DIRTY_TID_STAT_WR_TID_DTY_CNT_MASK	0x0000000000003F00ULL
-#define	DIRTY_TID_STAT_WR_TID_DTY_CNT_SHIFT	8
-#define	DIRTY_TID_STAT_RD_TID_DTY_CNT_MASK	0x000000000000003FULL
-#define	DIRTY_TID_STAT_RD_TID_DTY_CNT_SHIFT	0
-
-typedef union _dty_tid_stat_t {
-	uint64_t value;
-	struct {
-#if defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-		uint32_t res1:18;
-		uint32_t wr_tid_dirty_cnt:6;
-		uint32_t res2:2;
-		uint32_t rd_tid_dirty_cnt:6;
-#elif defined(_BIT_FIELDS_LTOH)
-		uint32_t rd_tid_dirty_cnt:6;
-		uint32_t res2:2;
-		uint32_t wr_tid_dirty_cnt:6;
-		uint32_t res1:18;
-#endif
-		} ldw;
-#if !defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-	} bits;
-} dty_tid_stat_t, *p_dty_tid_stat_t;
-
-
-/*
- * SMX Registers
- */
-#define	SMX_CFIG_DAT_REG		(FZC_PIO + 0x00040)
-#define	SMX_CFIG_DAT_RAS_DET_EN_MASK	0x0000000080000000ULL
-#define	SMX_CFIG_DAT_RAS_DET_EN_SHIFT	31
-#define	SMX_CFIG_DAT_RAS_INJ_EN_MASK	0x0000000040000000ULL
-#define	SMX_CFIG_DAT_RAS_INJ_EN_SHIFT	30
-#define	SMX_CFIG_DAT_TRANS_TO_MASK	0x000000000FFFFFFFULL
-#define	SMX_CFIG_DAT_TRANS_TO_SHIFT	0
-
-typedef union _smx_cfg_dat_t {
-	uint64_t value;
-	struct {
-#if defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-		uint32_t res_err_det:1;
-		uint32_t ras_err_inj_en:1;
-		uint32_t res:2;
-		uint32_t trans_to_val:28;
-#elif defined(_BIT_FIELDS_LTOH)
-		uint32_t trans_to_val:28;
-		uint32_t res:2;
-		uint32_t ras_err_inj_en:1;
-		uint32_t res_err_det:1;
-#endif
-		} ldw;
-#if !defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-	} bits;
-} smx_cfg_dat_t, *p_smx_cfg_dat_t;
-
-
-#define	SMX_INT_STAT_REG	(FZC_PIO + 0x00048)
-#define	SMX_INT_STAT_SM_MASK	0x00000000FFFFFFC0ULL
-#define	SMX_INT_STAT_SM_SHIFT	6
-
-typedef union _smx_int_stat_t {
-	uint64_t value;
-	struct {
-#if defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-		uint32_t st_mc_stat:26;
-		uint32_t res:6;
-#elif defined(_BIT_FIELDS_LTOH)
-		uint32_t res:6;
-		uint32_t st_mc_stat:26;
-#endif
-		} ldw;
-#if !defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-	} bits;
-} smx_int_stat_t, *p_smx_int_stat_t;
-
-
-#define		SMX_CTL_REG	(FZC_PIO + 0x00050)
-
-typedef union _smx_ctl_t {
-	uint64_t value;
-	struct {
-#if defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-		uint32_t res1:21;
-		uint32_t resp_err_inj:3;
-		uint32_t res2:1;
-		uint32_t xtb_err_inj:3;
-		uint32_t res3:1;
-		uint32_t dbg_sel:3;
-#elif defined(_BIT_FIELDS_LTOH)
-		uint32_t dbg_sel:3;
-		uint32_t res3:1;
-		uint32_t xtb_err_inj:3;
-		uint32_t res2:1;
-		uint32_t resp_err_inj:3;
-		uint32_t res1:21;
-#endif
-		} ldw;
-#if !defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-	} bits;
-} smx_ctl_t, *p_smx_ctl_t;
-
-
-#define	SMX_DBG_VEC_REG	(FZC_PIO + 0x00058)
-
-typedef union _smx_dbg_vec_t {
-	uint64_t value;
-	struct {
-#if defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-		struct {
-		uint32_t dbg_tng_vec;
-		} ldw;
-#if !defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-	} bits;
-} smx_dbg_vec_t, *p_smx_dbg_vec_t;
-
-
-/*
- * Debug registers
- */
-
-#define	PIO_DBG_SEL_REG	(FZC_PIO + 0x00060)
-
-typedef union _pio_dbg_sel_t {
-	uint64_t value;
-	struct {
-#if defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-		struct {
-		uint32_t sel;
-		} ldw;
-#if !defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-	} bits;
-} pio_dbg_sel_t, *p_pio_dbg_sel_t;
-
-
-#define	PIO_TRAIN_VEC_REG	(FZC_PIO + 0x00068)
-
-typedef union _pio_tng_vec_t {
-	uint64_t value;
-	struct {
-#if defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-		struct {
-		uint32_t training_vec;
-		} ldw;
-#if !defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-	} bits;
-} pio_tng_vec_t, *p_pio_tng_vec_t;
-
-#define	PIO_ARB_CTL_REG	(FZC_PIO + 0x00070)
-
-typedef union _pio_arb_ctl_t {
-	uint64_t value;
-	struct {
-#if defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-		struct {
-		uint32_t ctl;
-		} ldw;
-#if !defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-	} bits;
-} pio_arb_ctl_t, *p_pio_arb_ctl_t;
-
-#define	PIO_ARB_DBG_VEC_REG	(FZC_PIO + 0x00078)
-
-typedef union _pio_arb_dbg_vec_t {
-	uint64_t value;
-	struct {
-#if defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-		struct {
-		uint32_t dbg_vector;
-		} ldw;
-#if !defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-	} bits;
-} pio_arb_dbg_vec_t, *p_pio_arb_dbg_vec_t;
-
-
-/*
- * GPIO Registers
- */
-
-#define	GPIO_EN_REG	(FZC_PIO + 0x00028)
-#define	GPIO_EN_ENABLE_MASK	 0x000000000000FFFFULL
-#define	GPIO_EN_ENABLE_SHIFT	 0
-typedef union _gpio_en_t {
-	uint64_t value;
-	struct {
-#if defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-		uint32_t res:16;
-		uint32_t enable:16;
-#elif defined(_BIT_FIELDS_LTOH)
-		uint32_t enable:16;
-		uint32_t res:16;
-#endif
-		} ldw;
-#if !defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-	} bits;
-} gpio_en_t, *p_gpio_en_t;
-
-#define	GPIO_DATA_IN_REG	(FZC_PIO + 0x00030)
-#define	GPIO_DATA_IN_MASK	0x000000000000FFFFULL
-#define	GPIO_DATA_IN_SHIFT	0
-typedef union _gpio_data_in_t {
-	uint64_t value;
-	struct {
-#if defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-		uint32_t res:16;
-		uint32_t data_in:16;
-#elif defined(_BIT_FIELDS_LTOH)
-		uint32_t data_in:16;
-		uint32_t res:16;
-#endif
-		} ldw;
-#if !defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-	} bits;
-} gpio_data_in_t, *p_gpio_data_in_t;
-
-
-/*
- * PCI Express Interface Module (PIM) registers
- */
-#define	PIM_CONTROL_REG	(FZC_PIM + 0x0)
-#define	PIM_CONTROL_DBG_SEL_MASK 0x000000000000000FULL
-#define	PIM_CONTROL_DBG_SEL_SHIFT	0
-typedef union _pim_ctl_t {
-	uint64_t value;
-	struct {
-#if defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-		uint32_t res:28;
-		uint32_t dbg_sel:4;
-#elif defined(_BIT_FIELDS_LTOH)
-		uint32_t dbg_sel:4;
-		uint32_t res:28;
-#endif
-		} ldw;
-#if !defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-	} bits;
-} pim_ctl_t, *p_pim_ctl_t;
-
-#define	PIM_DBG_TRAINING_VEC_REG	(FZC_PIM + 0x00008)
-#define	PIM_DBG_TRAINING_VEC_MASK	0x00000000FFFFFFFFULL
-
-#define	PIM_INTR_STATUS_REG		(FZC_PIM + 0x00010)
-#define	PIM_INTR_STATUS_MASK		0x00000000FFFFFFFFULL
-
-#define	PIM_INTERNAL_STATUS_REG		(FZC_PIM + 0x00018)
-#define	PIM_INTERNAL_STATUS_MASK	0x00000000FFFFFFFFULL
-
-#define	PIM_INTR_MASK_REG		(FZC_PIM + 0x00020)
-#define	PIM_INTR_MASK_MASK		0x00000000FFFFFFFFULL
-
-/*
- * Partitioning Logical pages Definition registers.
- * (used by both receive and transmit DMA channels)
- */
-
-/* Logical page definitions */
-typedef union _log_page_vld_t {
-	uint64_t value;
-	struct {
-#if defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t res1_1:28;
-			uint32_t func:2;
-			uint32_t page1:1;
-			uint32_t page0:1;
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t page0:1;
-			uint32_t page1:1;
-			uint32_t func:2;
-			uint32_t res1_1:28;
-#endif
-		} ldw;
-#if !defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-	} bits;
-} log_page_vld_t, *p_log_page_vld_t;
-
-
-#define	DMA_LOG_PAGE_MASK_SHIFT		0
-#define	DMA_LOG_PAGE_MASK_MASK		0x00000000ffffffffULL
-
-/* Receive Logical Page Mask */
-typedef union _log_page_mask_t {
-	uint64_t value;
-	struct {
-#if defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t mask:32;
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t mask:32;
-#endif
-		} ldw;
-#if !defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-	} bits;
-} log_page_mask_t, *p_log_page_mask_t;
-
-
-/* Receive Logical Page Value */
-#define	DMA_LOG_PAGE_VALUE_SHIFT	0
-#define	DMA_LOG_PAGE_VALUE_MASK		0x00000000ffffffffULL
-
-/* Receive Logical Page Value */
-typedef union _log_page_value_t {
-	uint64_t value;
-	struct {
-#if defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t value:32;
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t value:32;
-#endif
-		} ldw;
-#if !defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-	} bits;
-} log_page_value_t, *p_log_page_value_t;
-
-/* Receive Logical Page Relocation */
-#define	DMA_LOG_PAGE_RELO_SHIFT		0			/* bits 31:0 */
-#define	DMA_LOG_PAGE_RELO_MASK		0x00000000ffffffffULL
-
-/* Receive Logical Page Relocation */
-typedef union _log_page_relo_t {
-	uint64_t value;
-	struct {
-#if defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t relo:32;
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t relo:32;
-#endif
-		} ldw;
-#if !defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-	} bits;
-} log_page_relo_t, *p_log_page_relo_t;
-
-
-/* Receive Logical Page Handle */
-#define	DMA_LOG_PAGE_HANDLE_SHIFT	0			/* bits 19:0 */
-#define	DMA_LOG_PAGE_HANDLE_MASK	0x00000000ffffffffULL
-
-/* Receive Logical Page Handle */
-typedef union _log_page_hdl_t {
-	uint64_t value;
-	struct {
-#if defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t res1_1:12;
-			uint32_t handle:20;
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t handle:20;
-			uint32_t res1_1:12;
-#endif
-		} ldw;
-#if !defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-	} bits;
-} log_page_hdl_t, *p_log_page_hdl_t;
-
-#ifdef	__cplusplus
-}
-#endif
-
-#endif	/* _SYS_NXGE_NXGE_HW_H */
--- a/usr/src/uts/sun4v/sys/nxge/nxge_impl.h	Mon Mar 19 18:02:35 2007 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,878 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License (the "License").
- * You may not use this file except in compliance with the License.
- *
- * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
- * or http://www.opensolaris.org/os/licensing.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information: Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- */
-/*
- * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
- * Use is subject to license terms.
- */
-
-#ifndef	_SYS_NXGE_NXGE_IMPL_H
-#define	_SYS_NXGE_NXGE_IMPL_H
-
-#pragma ident	"%Z%%M%	%I%	%E% SMI"
-
-#ifdef	__cplusplus
-extern "C" {
-#endif
-
-/*
- * NIU HV API version definitions.
- */
-#define	NIU_MAJOR_VER		1
-#define	NIU_MINOR_VER		1
-
-/*
- * NIU HV API v1.0 definitions
- */
-#define	N2NIU_RX_LP_CONF		0x142
-#define	N2NIU_RX_LP_INFO		0x143
-#define	N2NIU_TX_LP_CONF		0x144
-#define	N2NIU_TX_LP_INFO		0x145
-
-#ifndef _ASM
-
-#include	<sys/types.h>
-#include	<sys/byteorder.h>
-#include	<sys/debug.h>
-#include	<sys/stropts.h>
-#include	<sys/stream.h>
-#include	<sys/strlog.h>
-#ifndef	COSIM
-#include	<sys/strsubr.h>
-#endif
-#include	<sys/cmn_err.h>
-#include	<sys/vtrace.h>
-#include	<sys/kmem.h>
-#include	<sys/ddi.h>
-#include	<sys/sunddi.h>
-#include	<sys/strsun.h>
-#include	<sys/stat.h>
-#include	<sys/cpu.h>
-#include	<sys/kstat.h>
-#include	<inet/common.h>
-#include	<inet/ip.h>
-#include	<sys/dlpi.h>
-#include	<inet/nd.h>
-#include	<netinet/in.h>
-#include	<sys/ethernet.h>
-#include	<sys/vlan.h>
-#include	<sys/pci.h>
-#include	<sys/taskq.h>
-#include	<sys/atomic.h>
-
-#include 	<sys/nxge/nxge_defs.h>
-#include 	<sys/nxge/nxge_hw.h>
-#include 	<sys/nxge/nxge_mac.h>
-#include	<sys/nxge/nxge_mii.h>
-#include	<sys/nxge/nxge_fm.h>
-#if !defined(IODIAG)
-#include	<sys/netlb.h>
-#endif
-
-#include	<sys/ddi_intr.h>
-
-#if	defined(_KERNEL)
-#include 	<sys/mac.h>
-#include	<sys/mac_impl.h>
-#include	<sys/mac_ether.h>
-#endif
-
-#if	defined(sun4v)
-#include	<sys/hypervisor_api.h>
-#include 	<sys/machsystm.h>
-#include 	<sys/hsvc.h>
-#endif
-
-/*
- * Handy macros (taken from bge driver)
- */
-#define	RBR_SIZE			4
-#define	DMA_COMMON_CHANNEL(area)	((area.dma_channel))
-#define	DMA_COMMON_VPTR(area)		((area.kaddrp))
-#define	DMA_COMMON_VPTR_INDEX(area, index)	\
-					(((char *)(area.kaddrp)) + \
-					(index * RBR_SIZE))
-#define	DMA_COMMON_HANDLE(area)		((area.dma_handle))
-#define	DMA_COMMON_ACC_HANDLE(area)	((area.acc_handle))
-#define	DMA_COMMON_IOADDR(area)		((area.dma_cookie.dmac_laddress))
-#define	DMA_COMMON_IOADDR_INDEX(area, index)	\
-					((area.dma_cookie.dmac_laddress) + \
-						(index * RBR_SIZE))
-
-#define	DMA_NPI_HANDLE(area)		((area.npi_handle)
-
-#define	DMA_COMMON_SYNC(area, flag)	((void) ddi_dma_sync((area).dma_handle,\
-						(area).offset, (area).alength, \
-						(flag)))
-#define	DMA_COMMON_SYNC_OFFSET(area, bufoffset, len, flag)	\
-					((void) ddi_dma_sync((area).dma_handle,\
-					(area.offset + bufoffset), len, \
-					(flag)))
-
-#define	DMA_COMMON_SYNC_RBR_DESC(area, index, flag)	\
-				((void) ddi_dma_sync((area).dma_handle,\
-				(index * RBR_SIZE), RBR_SIZE,	\
-				(flag)))
-
-#define	DMA_COMMON_SYNC_RBR_DESC_MULTI(area, index, count, flag)	\
-			((void) ddi_dma_sync((area).dma_handle,\
-			(index * RBR_SIZE), count * RBR_SIZE,	\
-				(flag)))
-#define	DMA_COMMON_SYNC_ENTRY(area, index, flag)	\
-				((void) ddi_dma_sync((area).dma_handle,\
-				(index * (area).block_size),	\
-				(area).block_size, \
-				(flag)))
-
-#define	NEXT_ENTRY(index, wrap)		((index + 1) & wrap)
-#define	NEXT_ENTRY_PTR(ptr, first, last)	\
-					((ptr == last) ? first : (ptr + 1))
-
-/*
- * NPI related macros
- */
-#define	NXGE_DEV_NPI_HANDLE(nxgep)	(nxgep->npi_handle)
-
-#define	NPI_PCI_ACC_HANDLE_SET(nxgep, ah) (nxgep->npi_pci_handle.regh = ah)
-#define	NPI_PCI_ADD_HANDLE_SET(nxgep, ap) (nxgep->npi_pci_handle.regp = ap)
-
-#define	NPI_ACC_HANDLE_SET(nxgep, ah)	(nxgep->npi_handle.regh = ah)
-#define	NPI_ADD_HANDLE_SET(nxgep, ap)	\
-		nxgep->npi_handle.is_vraddr = B_FALSE;	\
-		nxgep->npi_handle.function.instance = nxgep->instance;   \
-		nxgep->npi_handle.function.function = nxgep->function_num;   \
-		nxgep->npi_handle.nxgep = (void *) nxgep;   \
-		nxgep->npi_handle.regp = ap;
-
-#define	NPI_REG_ACC_HANDLE_SET(nxgep, ah) (nxgep->npi_reg_handle.regh = ah)
-#define	NPI_REG_ADD_HANDLE_SET(nxgep, ap)	\
-		nxgep->npi_reg_handle.is_vraddr = B_FALSE;	\
-		nxgep->npi_handle.function.instance = nxgep->instance;   \
-		nxgep->npi_handle.function.function = nxgep->function_num;   \
-		nxgep->npi_reg_handle.nxgep = (void *) nxgep;   \
-		nxgep->npi_reg_handle.regp = ap;
-
-#define	NPI_MSI_ACC_HANDLE_SET(nxgep, ah) (nxgep->npi_msi_handle.regh = ah)
-#define	NPI_MSI_ADD_HANDLE_SET(nxgep, ap) (nxgep->npi_msi_handle.regp = ap)
-
-#define	NPI_VREG_ACC_HANDLE_SET(nxgep, ah) (nxgep->npi_vreg_handle.regh = ah)
-#define	NPI_VREG_ADD_HANDLE_SET(nxgep, ap)	\
-		nxgep->npi_vreg_handle.is_vraddr = B_TRUE; \
-		nxgep->npi_handle.function.instance = nxgep->instance;   \
-		nxgep->npi_handle.function.function = nxgep->function_num;   \
-		nxgep->npi_vreg_handle.nxgep = (void *) nxgep;   \
-		nxgep->npi_vreg_handle.regp = ap;
-
-#define	NPI_V2REG_ACC_HANDLE_SET(nxgep, ah) (nxgep->npi_v2reg_handle.regh = ah)
-#define	NPI_V2REG_ADD_HANDLE_SET(nxgep, ap)	\
-		nxgep->npi_v2reg_handle.is_vraddr = B_TRUE; \
-		nxgep->npi_handle.function.instance = nxgep->instance;   \
-		nxgep->npi_handle.function.function = nxgep->function_num;   \
-		nxgep->npi_v2reg_handle.nxgep = (void *) nxgep;   \
-		nxgep->npi_v2reg_handle.regp = ap;
-
-#define	NPI_PCI_ACC_HANDLE_GET(nxgep) (nxgep->npi_pci_handle.regh)
-#define	NPI_PCI_ADD_HANDLE_GET(nxgep) (nxgep->npi_pci_handle.regp)
-#define	NPI_ACC_HANDLE_GET(nxgep) (nxgep->npi_handle.regh)
-#define	NPI_ADD_HANDLE_GET(nxgep) (nxgep->npi_handle.regp)
-#define	NPI_REG_ACC_HANDLE_GET(nxgep) (nxgep->npi_reg_handle.regh)
-#define	NPI_REG_ADD_HANDLE_GET(nxgep) (nxgep->npi_reg_handle.regp)
-#define	NPI_MSI_ACC_HANDLE_GET(nxgep) (nxgep->npi_msi_handle.regh)
-#define	NPI_MSI_ADD_HANDLE_GET(nxgep) (nxgep->npi_msi_handle.regp)
-#define	NPI_VREG_ACC_HANDLE_GET(nxgep) (nxgep->npi_vreg_handle.regh)
-#define	NPI_VREG_ADD_HANDLE_GET(nxgep) (nxgep->npi_vreg_handle.regp)
-#define	NPI_V2REG_ACC_HANDLE_GET(nxgep) (nxgep->npi_v2reg_handle.regh)
-#define	NPI_V2REG_ADD_HANDLE_GET(nxgep) (nxgep->npi_v2reg_handle.regp)
-
-#define	NPI_DMA_ACC_HANDLE_SET(dmap, ah) (dmap->npi_handle.regh = ah)
-#define	NPI_DMA_ACC_HANDLE_GET(dmap) 	(dmap->npi_handle.regh)
-
-/*
- * DMA handles.
- */
-#define	NXGE_DESC_D_HANDLE_GET(desc)	(desc.dma_handle)
-#define	NXGE_DESC_D_IOADD_GET(desc)	(desc.dma_cookie.dmac_laddress)
-#define	NXGE_DMA_IOADD_GET(dma_cookie) (dma_cookie.dmac_laddress)
-#define	NXGE_DMA_AREA_IOADD_GET(dma_area) (dma_area.dma_cookie.dmac_laddress)
-
-#define	LDV_ON(ldv, vector)	((vector >> ldv) & 0x1)
-#define	LDV2_ON_1(ldv, vector)	((vector >> (ldv - 64)) & 0x1)
-#define	LDV2_ON_2(ldv, vector)	(((vector >> 5) >> (ldv - 64)) & 0x1)
-
-typedef uint32_t		nxge_status_t;
-
-typedef enum  {
-	IDLE,
-	PROGRESS,
-	CONFIGURED
-} dev_func_shared_t;
-
-typedef enum  {
-	DVMA,
-	DMA,
-	SDMA
-} dma_method_t;
-
-typedef enum  {
-	BKSIZE_4K,
-	BKSIZE_8K,
-	BKSIZE_16K,
-	BKSIZE_32K
-} nxge_rx_block_size_t;
-
-#ifdef TX_ONE_BUF
-#define	TX_BCOPY_MAX 1514
-#else
-#if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
-#define	TX_BCOPY_MAX	4096
-#define	TX_BCOPY_SIZE	4096
-#else
-#define	TX_BCOPY_MAX	2048
-#define	TX_BCOPY_SIZE	2048
-#endif
-#endif
-
-#define	TX_STREAM_MIN 512
-#define	TX_FASTDVMA_MIN 1024
-
-#define	NXGE_ERROR_SHOW_MAX	0
-
-/*
- * Defaults
- */
-#define	NXGE_RDC_RCR_THRESHOLD		8
-#define	NXGE_RDC_RCR_TIMEOUT		16
-
-#define	NXGE_RDC_RCR_THRESHOLD_MAX	1024
-#define	NXGE_RDC_RCR_TIMEOUT_MAX	64
-#define	NXGE_RDC_RCR_THRESHOLD_MIN	1
-#define	NXGE_RDC_RCR_TIMEOUT_MIN	1
-#define	NXGE_RCR_FULL_HEADER		1
-
-#define	NXGE_IS_VLAN_PACKET(ptr)				\
-	((((struct ether_vlan_header *)ptr)->ether_tpid) ==	\
-	htons(VLAN_ETHERTYPE))
-
-typedef enum {
-	NONE,
-	SMALL,
-	MEDIUM,
-	LARGE
-} dma_size_t;
-
-typedef enum {
-	USE_NONE,
-	USE_BCOPY,
-	USE_DVMA,
-	USE_DMA,
-	USE_SDMA
-} dma_type_t;
-
-typedef enum {
-	NOT_IN_USE,
-	HDR_BUF,
-	MTU_BUF,
-	RE_ASSEMBLY_BUF,
-	FREE_BUF
-} rx_page_state_t;
-
-struct _nxge_block_mv_t {
-	uint32_t msg_type;
-	dma_type_t dma_type;
-};
-
-typedef struct _nxge_block_mv_t nxge_block_mv_t, *p_nxge_block_mv_t;
-
-typedef enum {
-	NEPTUNE,	/* 4 ports */
-	NEPTUNE_2,	/* 2 ports */
-	N2_NIU		/* N2/NIU 2 ports */
-} niu_type_t;
-
-typedef enum {
-	CFG_DEFAULT = 0,	/* default cfg */
-	CFG_EQUAL,	/* Equal */
-	CFG_FAIR,	/* Equal */
-	CFG_CLASSIFY,
-	CFG_L2_CLASSIFY,
-	CFG_L3_CLASSIFY,
-	CFG_L3_DISTRIBUTE,
-	CFG_L3_WEB,
-	CFG_L3_TCAM,
-	CFG_NOT_SPECIFIED,
-	CFG_CUSTOM	/* Custom */
-} cfg_type_t;
-
-typedef enum {
-	NO_MSG = 0x0,		/* No message output or storage. */
-	CONSOLE = 0x1,		/* Messages are go to the console. */
-	BUFFER = 0x2,		/* Messages are go to the system buffer. */
-	CON_BUF = 0x3,		/* Messages are go to the console and */
-				/* system buffer. */
-	VERBOSE = 0x4		/* Messages are go out only in VERBOSE node. */
-} out_msg_t, *p_out_msg_t;
-
-typedef enum {
-	DBG_NO_MSG = 0x0,	/* No message output or storage. */
-	DBG_CONSOLE = 0x1,	/* Messages are go to the console. */
-	DBG_BUFFER = 0x2,	/* Messages are go to the system buffer. */
-	DBG_CON_BUF = 0x3,	/* Messages are go to the console and */
-				/* system buffer. */
-	STR_LOG = 4		/* Sessage sent to streams logging driver. */
-} out_dbgmsg_t, *p_out_dbgmsg_t;
-
-
-
-#if defined(_KERNEL) || defined(COSIM)
-
-typedef struct ether_addr ether_addr_st, *p_ether_addr_t;
-typedef struct ether_header ether_header_t, *p_ether_header_t;
-typedef queue_t *p_queue_t;
-
-#if !defined(IODIAG)
-typedef mblk_t *p_mblk_t;
-#endif
-
-/*
- * Common DMA data elements.
- */
-struct _nxge_dma_common_t {
-	uint16_t		dma_channel;
-	void			*kaddrp;
-	void			*first_kaddrp;
-	void			*last_kaddrp;
-	void			*ioaddr_pp;
-	void			*first_ioaddr_pp;
-	void			*last_ioaddr_pp;
-	ddi_dma_cookie_t 	dma_cookie;
-	uint32_t		ncookies;
-
-	nxge_block_mv_t		msg_dma_flags;
-	ddi_dma_handle_t	dma_handle;
-	nxge_os_acc_handle_t	acc_handle;
-	npi_handle_t		npi_handle;
-
-	size_t			block_size;
-	uint32_t		nblocks;
-	size_t			alength;
-	uint_t			offset;
-	uint_t			dma_chunk_index;
-	void			*orig_ioaddr_pp;
-	uint64_t		orig_vatopa;
-	void			*orig_kaddrp;
-	size_t			orig_alength;
-	boolean_t		contig_alloc_type;
-};
-
-typedef struct _nxge_t nxge_t, *p_nxge_t;
-typedef struct _nxge_dma_common_t nxge_dma_common_t, *p_nxge_dma_common_t;
-
-typedef struct _nxge_dma_pool_t {
-	p_nxge_dma_common_t	*dma_buf_pool_p;
-	uint32_t		ndmas;
-	uint32_t		*num_chunks;
-	boolean_t		buf_allocated;
-} nxge_dma_pool_t, *p_nxge_dma_pool_t;
-
-/*
- * Each logical device (69):
- *	- LDG #
- *	- flag bits
- *	- masks.
- *	- interrupt handler function.
- *
- * Generic system interrupt handler with two arguments:
- *	(nxge_sys_intr_t)
- *	Per device instance data structure
- *	Logical group data structure.
- *
- * Logical device interrupt handler with two arguments:
- *	(nxge_ldv_intr_t)
- *	Per device instance data structure
- *	Logical device number
- */
-typedef struct	_nxge_ldg_t nxge_ldg_t, *p_nxge_ldg_t;
-typedef struct	_nxge_ldv_t nxge_ldv_t, *p_nxge_ldv_t;
-typedef uint_t	(*nxge_sys_intr_t)(void *arg1, void *arg2);
-typedef uint_t	(*nxge_ldv_intr_t)(void *arg1, void *arg2);
-
-/*
- * Each logical device Group (64) needs to have the following
- * configurations:
- *	- timer counter (6 bits)
- *	- timer resolution (20 bits, number of system clocks)
- *	- system data (7 bits)
- */
-struct _nxge_ldg_t {
-	uint8_t			ldg;		/* logical group number */
-	uint8_t			vldg_index;
-	boolean_t		arm;
-	boolean_t		interrupted;
-	uint16_t		ldg_timer;	/* counter */
-	uint8_t			func;
-	uint8_t			vector;
-	uint8_t			intdata;
-	uint8_t			nldvs;
-	p_nxge_ldv_t		ldvp;
-	nxge_sys_intr_t		sys_intr_handler;
-	uint_t			(*ih_cb_func)(caddr_t, caddr_t);
-	p_nxge_t		nxgep;
-};
-
-struct _nxge_ldv_t {
-	uint8_t			ldg_assigned;
-	uint8_t			ldv;
-	boolean_t		is_rxdma;
-	boolean_t		is_txdma;
-	boolean_t		is_mif;
-	boolean_t		is_mac;
-	boolean_t		is_syserr;
-	boolean_t		use_timer;
-	uint8_t			channel;
-	uint8_t			vdma_index;
-	uint8_t			func;
-	p_nxge_ldg_t		ldgp;
-	uint8_t			ldv_flags;
-	boolean_t		is_leve;
-	boolean_t		is_edge;
-	uint8_t			ldv_ldf_masks;
-	nxge_ldv_intr_t		ldv_intr_handler;
-	uint_t			(*ih_cb_func)(caddr_t, caddr_t);
-	p_nxge_t		nxgep;
-};
-#endif
-
-typedef struct _nxge_logical_page_t {
-	uint16_t		dma;
-	uint16_t		page;
-	boolean_t		valid;
-	uint64_t		mask;
-	uint64_t		value;
-	uint64_t		reloc;
-	uint32_t		handle;
-} nxge_logical_page_t, *p_nxge_logical_page_t;
-
-/*
- * (Internal) return values from ioctl subroutines.
- */
-enum nxge_ioc_reply {
-	IOC_INVAL = -1,				/* bad, NAK with EINVAL	*/
-	IOC_DONE,				/* OK, reply sent	*/
-	IOC_ACK,				/* OK, just send ACK	*/
-	IOC_REPLY,				/* OK, just send reply	*/
-	IOC_RESTART_ACK,			/* OK, restart & ACK	*/
-	IOC_RESTART_REPLY			/* OK, restart & reply	*/
-};
-
-typedef struct _pci_cfg_t {
-	uint16_t vendorid;
-	uint16_t devid;
-	uint16_t command;
-	uint16_t status;
-	uint8_t  revid;
-	uint8_t  res0;
-	uint16_t junk1;
-	uint8_t  cache_line;
-	uint8_t  latency;
-	uint8_t  header;
-	uint8_t  bist;
-	uint32_t base;
-	uint32_t base14;
-	uint32_t base18;
-	uint32_t base1c;
-	uint32_t base20;
-	uint32_t base24;
-	uint32_t base28;
-	uint32_t base2c;
-	uint32_t base30;
-	uint32_t res1[2];
-	uint8_t int_line;
-	uint8_t int_pin;
-	uint8_t	min_gnt;
-	uint8_t max_lat;
-} pci_cfg_t, *p_pci_cfg_t;
-
-#if defined(_KERNEL) || defined(COSIM)
-
-typedef struct _dev_regs_t {
-	nxge_os_acc_handle_t	nxge_pciregh;	/* PCI config DDI IO handle */
-	p_pci_cfg_t		nxge_pciregp;	/* mapped PCI registers */
-
-	nxge_os_acc_handle_t	nxge_regh;	/* device DDI IO (BAR 0) */
-	void			*nxge_regp;	/* mapped device registers */
-
-	nxge_os_acc_handle_t	nxge_msix_regh;	/* MSI/X DDI handle (BAR 2) */
-	void 			*nxge_msix_regp; /* MSI/X register */
-
-	nxge_os_acc_handle_t	nxge_vir_regh;	/* virtualization (BAR 4) */
-	unsigned char		*nxge_vir_regp;	/* virtualization register */
-
-	nxge_os_acc_handle_t	nxge_vir2_regh;	/* second virtualization */
-	unsigned char		*nxge_vir2_regp; /* second virtualization */
-
-	nxge_os_acc_handle_t	nxge_romh;	/* fcode rom handle */
-	unsigned char		*nxge_romp;	/* fcode pointer */
-} dev_regs_t, *p_dev_regs_t;
-
-
-typedef struct _nxge_mac_addr_t {
-	ether_addr_t	addr;
-	uint_t		flags;
-} nxge_mac_addr_t;
-
-/*
- * The hardware supports 1 unique MAC and 16 alternate MACs (num_mmac)
- * for each XMAC port and supports 1 unique MAC and 7 alternate MACs
- * for each BMAC port.  The number of MACs assigned by the factory is
- * different and is as follows,
- * 	BMAC port:		   num_factory_mmac = num_mmac = 7
- *	XMAC port on a 2-port NIC: num_factory_mmac = num_mmac - 1 = 15
- *	XMAC port on a 4-port NIC: num_factory_mmac = 7
- * So num_factory_mmac is smaller than num_mmac.  nxge_m_mmac_add uses
- * num_mmac and nxge_m_mmac_reserve uses num_factory_mmac.
- *
- * total_factory_macs is the total number of factory MACs, including
- * the unique MAC, assigned to a Neptune based NIC card, it is 32.
- */
-typedef struct _nxge_mmac_t {
-	uint8_t		total_factory_macs;
-	uint8_t		num_mmac;
-	uint8_t		num_factory_mmac;
-	nxge_mac_addr_t	mac_pool[XMAC_MAX_ADDR_ENTRY];
-	ether_addr_t	factory_mac_pool[XMAC_MAX_ADDR_ENTRY];
-	uint8_t		naddrfree;  /* number of alt mac addr available */
-} nxge_mmac_t;
-
-/*
- * mmac stats structure
- */
-typedef struct _nxge_mmac_stats_t {
-	uint8_t mmac_max_cnt;
-	uint8_t	mmac_avail_cnt;
-	struct ether_addr mmac_avail_pool[16];
-} nxge_mmac_stats_t, *p_nxge_mmac_stats_t;
-
-#define	NXGE_MAX_MMAC_ADDRS	32
-#define	NXGE_NUM_MMAC_ADDRS	8
-#define	NXGE_NUM_OF_PORTS	4
-
-#endif
-
-#include 	<sys/nxge/nxge_common_impl.h>
-#include 	<sys/nxge/nxge_common.h>
-#include	<sys/nxge/nxge_txc.h>
-#include	<sys/nxge/nxge_rxdma.h>
-#include	<sys/nxge/nxge_txdma.h>
-#include	<sys/nxge/nxge_fflp.h>
-#include	<sys/nxge/nxge_ipp.h>
-#include	<sys/nxge/nxge_zcp.h>
-#include	<sys/nxge/nxge_fzc.h>
-#include	<sys/nxge/nxge_flow.h>
-#include	<sys/nxge/nxge_virtual.h>
-
-#include 	<sys/nxge/nxge.h>
-
-#include	<sys/modctl.h>
-#include	<sys/pattr.h>
-
-#include	<npi_vir.h>
-
-/*
- * Reconfiguring the network devices requires the net_config privilege
- * in Solaris 10+.  Prior to this, root privilege is required.  In order
- * that the driver binary can run on both S10+ and earlier versions, we
- * make the decisiion as to which to use at runtime.  These declarations
- * allow for either (or both) to exist ...
- */
-extern int secpolicy_net_config(const cred_t *, boolean_t);
-extern int drv_priv(cred_t *);
-extern void nxge_fm_report_error(p_nxge_t, uint8_t,
-			uint8_t, nxge_fm_ereport_id_t);
-extern int fm_check_acc_handle(ddi_acc_handle_t);
-extern int fm_check_dma_handle(ddi_dma_handle_t);
-
-#pragma weak    secpolicy_net_config
-
-/* nxge_classify.c */
-nxge_status_t nxge_classify_init(p_nxge_t);
-nxge_status_t nxge_classify_uninit(p_nxge_t);
-nxge_status_t nxge_set_hw_classify_config(p_nxge_t);
-nxge_status_t nxge_classify_exit_sw(p_nxge_t);
-
-/* nxge_fflp.c */
-void nxge_put_tcam(p_nxge_t, p_mblk_t);
-void nxge_get_tcam(p_nxge_t, p_mblk_t);
-nxge_status_t nxge_classify_init_hw(p_nxge_t);
-nxge_status_t nxge_classify_init_sw(p_nxge_t);
-nxge_status_t nxge_fflp_ip_class_config_all(p_nxge_t);
-nxge_status_t nxge_fflp_ip_class_config(p_nxge_t, tcam_class_t,
-				    uint32_t);
-
-nxge_status_t nxge_fflp_ip_class_config_get(p_nxge_t,
-				    tcam_class_t,
-				    uint32_t *);
-
-nxge_status_t nxge_cfg_ip_cls_flow_key(p_nxge_t, tcam_class_t,
-				    uint32_t);
-
-nxge_status_t nxge_fflp_ip_usr_class_config(p_nxge_t, tcam_class_t,
-				    uint32_t);
-
-uint64_t nxge_classify_get_cfg_value(p_nxge_t, uint8_t, uint8_t);
-nxge_status_t nxge_add_flow(p_nxge_t, flow_resource_t *);
-nxge_status_t nxge_fflp_config_tcam_enable(p_nxge_t);
-nxge_status_t nxge_fflp_config_tcam_disable(p_nxge_t);
-
-nxge_status_t nxge_fflp_config_hash_lookup_enable(p_nxge_t);
-nxge_status_t nxge_fflp_config_hash_lookup_disable(p_nxge_t);
-
-nxge_status_t nxge_fflp_config_llc_snap_enable(p_nxge_t);
-nxge_status_t nxge_fflp_config_llc_snap_disable(p_nxge_t);
-
-nxge_status_t nxge_logical_mac_assign_rdc_table(p_nxge_t, uint8_t);
-nxge_status_t nxge_fflp_config_vlan_table(p_nxge_t, uint16_t);
-
-nxge_status_t nxge_fflp_set_hash1(p_nxge_t, uint32_t);
-
-nxge_status_t nxge_fflp_set_hash2(p_nxge_t, uint16_t);
-
-nxge_status_t nxge_fflp_init_hostinfo(p_nxge_t);
-
-void nxge_handle_tcam_fragment_bug(p_nxge_t);
-nxge_status_t nxge_fflp_hw_reset(p_nxge_t);
-nxge_status_t nxge_fflp_handle_sys_errors(p_nxge_t);
-nxge_status_t nxge_zcp_handle_sys_errors(p_nxge_t);
-
-/* nxge_kstats.c */
-void nxge_init_statsp(p_nxge_t);
-void nxge_setup_kstats(p_nxge_t);
-void nxge_destroy_kstats(p_nxge_t);
-int nxge_port_kstat_update(kstat_t *, int);
-void nxge_save_cntrs(p_nxge_t);
-
-int nxge_m_stat(void *arg, uint_t, uint64_t *);
-
-/* nxge_hw.c */
-void
-nxge_hw_ioctl(p_nxge_t, queue_t *, mblk_t *, struct iocblk *);
-void nxge_loopback_ioctl(p_nxge_t, queue_t *, mblk_t *, struct iocblk *);
-void nxge_global_reset(p_nxge_t);
-uint_t nxge_intr(void *, void *);
-void nxge_intr_enable(p_nxge_t);
-void nxge_intr_disable(p_nxge_t);
-void nxge_hw_blank(void *arg, time_t, uint_t);
-void nxge_hw_id_init(p_nxge_t);
-void nxge_hw_init_niu_common(p_nxge_t);
-void nxge_intr_hw_enable(p_nxge_t);
-void nxge_intr_hw_disable(p_nxge_t);
-void nxge_hw_stop(p_nxge_t);
-void nxge_global_reset(p_nxge_t);
-void nxge_check_hw_state(p_nxge_t);
-
-void nxge_rxdma_channel_put64(nxge_os_acc_handle_t,
-	void *, uint32_t, uint16_t,
-	uint64_t);
-uint64_t nxge_rxdma_channel_get64(nxge_os_acc_handle_t, void *,
-	uint32_t, uint16_t);
-
-
-void nxge_get32(p_nxge_t, p_mblk_t);
-void nxge_put32(p_nxge_t, p_mblk_t);
-
-void nxge_hw_set_mac_modes(p_nxge_t);
-
-/* nxge_send.c. */
-uint_t nxge_reschedule(caddr_t);
-
-/* nxge_rxdma.c */
-nxge_status_t nxge_rxdma_cfg_rdcgrp_default_rdc(p_nxge_t,
-					    uint8_t, uint8_t);
-
-nxge_status_t nxge_rxdma_cfg_port_default_rdc(p_nxge_t,
-				    uint8_t, uint8_t);
-nxge_status_t nxge_rxdma_cfg_rcr_threshold(p_nxge_t, uint8_t,
-				    uint16_t);
-nxge_status_t nxge_rxdma_cfg_rcr_timeout(p_nxge_t, uint8_t,
-				    uint16_t, uint8_t);
-
-/* nxge_ndd.c */
-void nxge_get_param_soft_properties(p_nxge_t);
-void nxge_copy_hw_default_to_param(p_nxge_t);
-void nxge_copy_param_hw_to_config(p_nxge_t);
-void nxge_setup_param(p_nxge_t);
-void nxge_init_param(p_nxge_t);
-void nxge_destroy_param(p_nxge_t);
-boolean_t nxge_check_rxdma_rdcgrp_member(p_nxge_t, uint8_t, uint8_t);
-boolean_t nxge_check_rxdma_port_member(p_nxge_t, uint8_t);
-boolean_t nxge_check_rdcgrp_port_member(p_nxge_t, uint8_t);
-
-boolean_t nxge_check_txdma_port_member(p_nxge_t, uint8_t);
-
-int nxge_param_get_generic(p_nxge_t, queue_t *, mblk_t *, caddr_t);
-int nxge_param_set_generic(p_nxge_t, queue_t *, mblk_t *, char *, caddr_t);
-int nxge_get_default(p_nxge_t, queue_t *, p_mblk_t, caddr_t);
-int nxge_set_default(p_nxge_t, queue_t *, p_mblk_t, char *, caddr_t);
-int nxge_nd_get_names(p_nxge_t, queue_t *, p_mblk_t, caddr_t);
-int nxge_mk_mblk_tail_space(p_mblk_t, p_mblk_t *, size_t);
-long nxge_strtol(char *, char **, int);
-boolean_t nxge_param_get_instance(queue_t *, mblk_t *);
-void nxge_param_ioctl(p_nxge_t, queue_t *, mblk_t *, struct iocblk *);
-boolean_t nxge_nd_load(caddr_t *, char *, pfi_t, pfi_t, caddr_t);
-void nxge_nd_free(caddr_t *);
-int nxge_nd_getset(p_nxge_t, queue_t *, caddr_t, p_mblk_t);
-
-void nxge_set_lb_normal(p_nxge_t);
-boolean_t nxge_set_lb(p_nxge_t, queue_t *, p_mblk_t);
-
-/* nxge_virtual.c */
-nxge_status_t nxge_cntlops(dev_info_t *, nxge_ctl_enum_t, void *, void *);
-void nxge_common_lock_get(p_nxge_t);
-void nxge_common_lock_free(p_nxge_t);
-
-nxge_status_t nxge_get_config_properties(p_nxge_t);
-void nxge_get_xcvr_properties(p_nxge_t);
-void nxge_init_vlan_config(p_nxge_t);
-void nxge_init_mac_config(p_nxge_t);
-
-
-void nxge_init_logical_devs(p_nxge_t);
-int nxge_init_ldg_intrs(p_nxge_t);
-
-void nxge_set_ldgimgmt(p_nxge_t, uint32_t, boolean_t,
-	uint32_t);
-
-void nxge_init_fzc_txdma_channels(p_nxge_t);
-
-nxge_status_t nxge_init_fzc_txdma_channel(p_nxge_t, uint16_t,
-	p_tx_ring_t, p_tx_mbox_t);
-nxge_status_t nxge_init_fzc_txdma_port(p_nxge_t);
-
-nxge_status_t nxge_init_fzc_rxdma_channel(p_nxge_t, uint16_t,
-	p_rx_rbr_ring_t, p_rx_rcr_ring_t, p_rx_mbox_t);
-
-nxge_status_t nxge_init_fzc_rdc_tbl(p_nxge_t);
-nxge_status_t nxge_init_fzc_rx_common(p_nxge_t);
-nxge_status_t nxge_init_fzc_rxdma_port(p_nxge_t);
-
-nxge_status_t nxge_init_fzc_rxdma_channel_pages(p_nxge_t,
-	uint16_t, p_rx_rbr_ring_t);
-nxge_status_t nxge_init_fzc_rxdma_channel_red(p_nxge_t,
-	uint16_t, p_rx_rcr_ring_t);
-
-nxge_status_t nxge_init_fzc_rxdma_channel_clrlog(p_nxge_t,
-	uint16_t, p_rx_rbr_ring_t);
-
-
-nxge_status_t nxge_init_fzc_txdma_channel_pages(p_nxge_t,
-	uint16_t, p_tx_ring_t);
-
-nxge_status_t nxge_init_fzc_txdma_channel_drr(p_nxge_t, uint16_t,
-	p_tx_ring_t);
-
-nxge_status_t nxge_init_fzc_txdma_port(p_nxge_t);
-
-void nxge_init_fzc_ldg_num(p_nxge_t);
-void nxge_init_fzc_sys_int_data(p_nxge_t);
-void nxge_init_fzc_ldg_int_timer(p_nxge_t);
-nxge_status_t nxge_intr_mask_mgmt_set(p_nxge_t, boolean_t on);
-
-/* MAC functions */
-nxge_status_t nxge_mac_init(p_nxge_t);
-nxge_status_t nxge_link_init(p_nxge_t);
-nxge_status_t nxge_xif_init(p_nxge_t);
-nxge_status_t nxge_pcs_init(p_nxge_t);
-nxge_status_t nxge_serdes_init(p_nxge_t);
-nxge_status_t nxge_n2_serdes_init(p_nxge_t);
-nxge_status_t nxge_neptune_serdes_init(p_nxge_t);
-nxge_status_t nxge_xcvr_find(p_nxge_t);
-nxge_status_t nxge_get_xcvr_type(p_nxge_t);
-nxge_status_t nxge_xcvr_init(p_nxge_t);
-nxge_status_t nxge_tx_mac_init(p_nxge_t);
-nxge_status_t nxge_rx_mac_init(p_nxge_t);
-nxge_status_t nxge_tx_mac_enable(p_nxge_t);
-nxge_status_t nxge_tx_mac_disable(p_nxge_t);
-nxge_status_t nxge_rx_mac_enable(p_nxge_t);
-nxge_status_t nxge_rx_mac_disable(p_nxge_t);
-nxge_status_t nxge_tx_mac_reset(p_nxge_t);
-nxge_status_t nxge_rx_mac_reset(p_nxge_t);
-nxge_status_t nxge_link_intr(p_nxge_t, link_intr_enable_t);
-nxge_status_t nxge_mii_xcvr_init(p_nxge_t);
-nxge_status_t nxge_mii_read(p_nxge_t, uint8_t,
-			uint8_t, uint16_t *);
-nxge_status_t nxge_mii_write(p_nxge_t, uint8_t,
-			uint8_t, uint16_t);
-nxge_status_t nxge_mdio_read(p_nxge_t, uint8_t, uint8_t,
-			uint16_t, uint16_t *);
-nxge_status_t nxge_mdio_write(p_nxge_t, uint8_t,
-			uint8_t, uint16_t, uint16_t);
-nxge_status_t nxge_mii_check(p_nxge_t, mii_bmsr_t,
-			mii_bmsr_t, nxge_link_state_t *);
-nxge_status_t nxge_add_mcast_addr(p_nxge_t, struct ether_addr *);
-nxge_status_t nxge_del_mcast_addr(p_nxge_t, struct ether_addr *);
-nxge_status_t nxge_set_mac_addr(p_nxge_t, struct ether_addr *);
-nxge_status_t nxge_check_mii_link(p_nxge_t);
-nxge_status_t nxge_check_10g_link(p_nxge_t);
-nxge_status_t nxge_check_serdes_link(p_nxge_t);
-nxge_status_t nxge_check_bcm8704_link(p_nxge_t, boolean_t *);
-void nxge_link_is_down(p_nxge_t);
-void nxge_link_is_up(p_nxge_t);
-nxge_status_t nxge_link_monitor(p_nxge_t, link_mon_enable_t);
-uint32_t crc32_mchash(p_ether_addr_t);
-nxge_status_t nxge_set_promisc(p_nxge_t, boolean_t);
-nxge_status_t nxge_mac_handle_sys_errors(p_nxge_t);
-nxge_status_t nxge_10g_link_led_on(p_nxge_t);
-nxge_status_t nxge_10g_link_led_off(p_nxge_t);
-
-/* espc (sprom) prototypes */
-nxge_status_t nxge_espc_mac_addrs_get(p_nxge_t);
-nxge_status_t nxge_espc_num_macs_get(p_nxge_t, uint8_t *);
-nxge_status_t nxge_espc_num_ports_get(p_nxge_t);
-nxge_status_t nxge_espc_phy_type_get(p_nxge_t);
-
-
-void nxge_debug_msg(p_nxge_t, uint64_t, char *, ...);
-
-uint64_t hv_niu_rx_logical_page_conf(uint64_t, uint64_t,
-	uint64_t, uint64_t);
-#pragma weak	hv_niu_rx_logical_page_conf
-
-uint64_t hv_niu_rx_logical_page_info(uint64_t, uint64_t,
-	uint64_t *, uint64_t *);
-#pragma weak	hv_niu_rx_logical_page_info
-
-uint64_t hv_niu_tx_logical_page_conf(uint64_t, uint64_t,
-	uint64_t, uint64_t);
-#pragma weak	hv_niu_tx_logical_page_conf
-
-uint64_t hv_niu_tx_logical_page_info(uint64_t, uint64_t,
-	uint64_t *, uint64_t *);
-#pragma weak	hv_niu_tx_logical_page_info
-
-#ifdef NXGE_DEBUG
-char *nxge_dump_packet(char *, int);
-#endif
-
-#endif	/* !_ASM */
-
-#ifdef	__cplusplus
-}
-#endif
-
-#endif	/* _SYS_NXGE_NXGE_IMPL_H */
--- a/usr/src/uts/sun4v/sys/nxge/nxge_ipp.h	Mon Mar 19 18:02:35 2007 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,84 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License (the "License").
- * You may not use this file except in compliance with the License.
- *
- * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
- * or http://www.opensolaris.org/os/licensing.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information: Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- */
-/*
- * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
- * Use is subject to license terms.
- */
-
-#ifndef _SYS_NXGE_NXGE_IPP_H
-#define	_SYS_NXGE_NXGE_IPP_H
-
-#pragma ident	"%Z%%M%	%I%	%E% SMI"
-
-#ifdef	__cplusplus
-extern "C" {
-#endif
-
-#include <nxge_ipp_hw.h>
-#include <npi_ipp.h>
-
-#define	IPP_MAX_PKT_SIZE	0x1FFFF
-#define	IPP_MAX_ERR_SHOW	10
-
-typedef	struct _ipp_errlog {
-	boolean_t		multiple_err;
-	uint16_t		dfifo_rd_ptr;
-	uint32_t		state_mach;
-	uint16_t		ecc_syndrome;
-} ipp_errlog_t, *p_ipp_errlog_t;
-
-typedef struct _nxge_ipp_stats {
-	uint32_t 		errors;
-	uint32_t 		inits;
-	uint32_t 		sop_miss;
-	uint32_t 		eop_miss;
-	uint32_t 		dfifo_ue;
-	uint32_t 		ecc_err_cnt;
-	uint32_t 		pfifo_perr;
-	uint32_t 		pfifo_over;
-	uint32_t 		pfifo_und;
-	uint32_t 		bad_cs_cnt;
-	uint32_t 		pkt_dis_cnt;
-	ipp_errlog_t		errlog;
-} nxge_ipp_stats_t, *p_nxge_ipp_stats_t;
-
-typedef	struct _nxge_ipp {
-	uint32_t		config;
-	uint32_t		iconfig;
-	ipp_status_t		status;
-	uint32_t		max_pkt_size;
-	nxge_ipp_stats_t	*stat;
-} nxge_ipp_t;
-
-/* IPP prototypes */
-nxge_status_t nxge_ipp_reset(p_nxge_t);
-nxge_status_t nxge_ipp_init(p_nxge_t);
-nxge_status_t nxge_ipp_disable(p_nxge_t);
-nxge_status_t nxge_ipp_handle_sys_errors(p_nxge_t);
-nxge_status_t nxge_ipp_fatal_err_recover(p_nxge_t);
-nxge_status_t nxge_ipp_eccue_valid_check(p_nxge_t, boolean_t *);
-void nxge_ipp_inject_err(p_nxge_t, uint32_t);
-
-#ifdef	__cplusplus
-}
-#endif
-
-#endif	/* _SYS_NXGE_NXGE_IPP_H */
--- a/usr/src/uts/sun4v/sys/nxge/nxge_ipp_hw.h	Mon Mar 19 18:02:35 2007 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,251 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License (the "License").
- * You may not use this file except in compliance with the License.
- *
- * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
- * or http://www.opensolaris.org/os/licensing.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information: Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- */
-/*
- * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
- * Use is subject to license terms.
- */
-
-#ifndef _SYS_NXGE_NXGE_IPP_HW_H
-#define	_SYS_NXGE_NXGE_IPP_HW_H
-
-#pragma ident	"%Z%%M%	%I%	%E% SMI"
-
-#ifdef	__cplusplus
-extern "C" {
-#endif
-
-#include <nxge_defs.h>
-
-/* IPP Registers */
-#define	IPP_CONFIG_REG				0x000
-#define	IPP_DISCARD_PKT_CNT_REG			0x020
-#define	IPP_TCP_CKSUM_ERR_CNT_REG		0x028
-#define	IPP_ECC_ERR_COUNTER_REG			0x030
-#define	IPP_INT_STATUS_REG			0x040
-#define	IPP_INT_MASK_REG			0x048
-
-#define	IPP_PFIFO_RD_DATA0_REG			0x060
-#define	IPP_PFIFO_RD_DATA1_REG			0x068
-#define	IPP_PFIFO_RD_DATA2_REG			0x070
-#define	IPP_PFIFO_RD_DATA3_REG			0x078
-#define	IPP_PFIFO_RD_DATA4_REG			0x080
-#define	IPP_PFIFO_WR_DATA0_REG			0x088
-#define	IPP_PFIFO_WR_DATA1_REG			0x090
-#define	IPP_PFIFO_WR_DATA2_REG			0x098
-#define	IPP_PFIFO_WR_DATA3_REG			0x0a0
-#define	IPP_PFIFO_WR_DATA4_REG			0x0a8
-#define	IPP_PFIFO_RD_PTR_REG			0x0b0
-#define	IPP_PFIFO_WR_PTR_REG			0x0b8
-#define	IPP_DFIFO_RD_DATA0_REG			0x0c0
-#define	IPP_DFIFO_RD_DATA1_REG			0x0c8
-#define	IPP_DFIFO_RD_DATA2_REG			0x0d0
-#define	IPP_DFIFO_RD_DATA3_REG			0x0d8
-#define	IPP_DFIFO_RD_DATA4_REG			0x0e0
-#define	IPP_DFIFO_WR_DATA0_REG			0x0e8
-#define	IPP_DFIFO_WR_DATA1_REG			0x0f0
-#define	IPP_DFIFO_WR_DATA2_REG			0x0f8
-#define	IPP_DFIFO_WR_DATA3_REG			0x100
-#define	IPP_DFIFO_WR_DATA4_REG			0x108
-#define	IPP_DFIFO_RD_PTR_REG			0x110
-#define	IPP_DFIFO_WR_PTR_REG			0x118
-#define	IPP_STATE_MACHINE_REG			0x120
-#define	IPP_CKSUM_STATUS_REG			0x128
-#define	IPP_FFLP_CKSUM_INFO_REG			0x130
-#define	IPP_DEBUG_SELECT_REG			0x138
-#define	IPP_DFIFO_ECC_SYNDROME_REG		0x140
-#define	IPP_DFIFO_EOPM_RD_PTR_REG		0x148
-#define	IPP_ECC_CTRL_REG			0x150
-
-#define	IPP_PORT_OFFSET				0x4000
-#define	IPP_PORT0_OFFSET			0
-#define	IPP_PORT1_OFFSET			0x8000
-#define	IPP_PORT2_OFFSET			0x4000
-#define	IPP_PORT3_OFFSET			0xc000
-#define	IPP_REG_ADDR(port_num, reg)\
-	((port_num == 0) ? FZC_IPP + reg : \
-	FZC_IPP + reg + (((port_num % 2) * IPP_PORT_OFFSET) + \
-	((port_num / 3) * IPP_PORT_OFFSET) + IPP_PORT_OFFSET))
-#define	IPP_PORT_ADDR(port_num)\
-	((port_num == 0) ? FZC_IPP: \
-	FZC_IPP + (((port_num % 2) * IPP_PORT_OFFSET) + \
-	((port_num / 3) * IPP_PORT_OFFSET) + IPP_PORT_OFFSET))
-
-/* IPP Configuration Register */
-
-#define	IPP_SOFT_RESET				(1ULL << 31)
-#define	IPP_IP_MAX_PKT_BYTES_SHIFT		8
-#define	IPP_IP_MAX_PKT_BYTES_MASK		0x1FFFF
-#define	IPP_FFLP_CKSUM_INFO_PIO_WR_EN		(1 << 7)
-#define	IPP_PRE_FIFO_PIO_WR_EN			(1 << 6)
-#define	IPP_DFIFO_PIO_WR_EN			(1 << 5)
-#define	IPP_TCP_UDP_CKSUM_EN			(1 << 4)
-#define	IPP_DROP_BAD_CRC_EN			(1 << 3)
-#define	IPP_DFIFO_ECC_CORRECT_EN		(1 << 2)
-#define	IPP_EN					(1 << 0)
-
-/* IPP Interrupt Status Registers */
-
-#define	IPP_DFIFO_MISSED_SOP			(1ULL << 31)
-#define	IPP_DFIFO_MISSED_EOP			(1 << 30)
-#define	IPP_DFIFO_ECC_UNCORR_ERR_MASK		0x3
-#define	IPP_DFIFO_ECC_UNCORR_ERR_SHIFT		28
-#define	IPP_DFIFO_ECC_CORR_ERR_MASK		0x3
-#define	IPP_DFIFO_ECC_CORR_ERR_SHIFT		26
-#define	IPP_DFIFO_ECC_ERR_MASK			0x3
-#define	IPP_DFIFO_ECC_ERR_SHIFT			24
-#define	IPP_DFIFO_NO_ECC_ERR			(1 << 23)
-#define	IPP_DFIFO_ECC_ERR_ENTRY_INDEX_MASK	0x7FF
-#define	IPP_DFIFO_ECC_ERR_ENTRY_INDEX_SHIFT	12
-#define	IPP_PRE_FIFO_PERR			(1 << 11)
-#define	IPP_ECC_ERR_CNT_MAX			(1 << 10)
-#define	IPP_PRE_FIFO_PERR_ENTRY_INDEX_MASK	0x3F
-#define	IPP_PRE_FIFO_PERR_ENTRY_INDEX_SHIFT	4
-#define	IPP_PRE_FIFO_OVERRUN			(1 << 3)
-#define	IPP_PRE_FIFO_UNDERRUN			(1 << 2)
-#define	IPP_BAD_TCPIP_CHKSUM_CNT_MAX		(1 << 1)
-#define	IPP_PKT_DISCARD_CNT_MAX			(1 << 0)
-
-#define	IPP_P0_P1_DFIFO_ENTRIES			2048
-#define	IPP_P2_P3_DFIFO_ENTRIES			1024
-#define	IPP_NIU_DFIFO_ENTRIES			1024
-
-typedef	union _ipp_status {
-	uint64_t value;
-
-	struct {
-#if defined(_BIG_ENDIAN)
-		uint32_t	w1;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-		uint32_t dfifo_missed_sop	: 1;
-		uint32_t dfifo_missed_eop	: 1;
-		uint32_t dfifo_uncorr_ecc_err	: 2;
-		uint32_t dfifo_corr_ecc_err	: 2;
-		uint32_t dfifo_ecc_err		: 2;
-		uint32_t dfifo_no_ecc_err	: 1;
-		uint32_t dfifo_ecc_err_idx	: 11;
-		uint32_t pre_fifo_perr		: 1;
-		uint32_t ecc_err_cnt_ovfl	: 1;
-		uint32_t pre_fifo_perr_idx	: 6;
-		uint32_t pre_fifo_overrun	: 1;
-		uint32_t pre_fifo_underrun	: 1;
-		uint32_t bad_cksum_cnt_ovfl	: 1;
-		uint32_t pkt_discard_cnt_ovfl	: 1;
-#elif defined(_BIT_FIELDS_LTOH)
-		uint32_t pkt_discard_cnt_ovfl	: 1;
-		uint32_t bad_cksum_cnt_ovfl	: 1;
-		uint32_t pre_fifo_underrun	: 1;
-		uint32_t pre_fifo_overrun	: 1;
-		uint32_t pre_fifo_perr_idx	: 6;
-		uint32_t ecc_err_cnt_ovfl	: 1;
-		uint32_t pre_fifo_perr		: 1;
-		uint32_t dfifo_ecc_err_idx	: 11;
-		uint32_t dfifo_no_ecc_err	: 1;
-		uint32_t dfifo_ecc_err		: 2;
-		uint32_t dfifo_corr_ecc_err	: 2;
-		uint32_t dfifo_uncorr_ecc_err	: 2;
-		uint32_t dfifo_missed_eop	: 1;
-		uint32_t dfifo_missed_sop	: 1;
-#else
-#error	one of _BIT_FIELDS_HTOL or _BIT_FIELDS_LTOH must be defined
-#endif
-	} w0;
-
-#if !defined(_BIG_ENDIAN)
-		uint32_t	w1;
-#endif
-	} bits;
-} ipp_status_t;
-
-typedef	union _ipp_ecc_ctrl {
-	uint64_t value;
-
-	struct {
-#if defined(_BIG_ENDIAN)
-		uint32_t	w1;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-		uint32_t dis_dbl	: 1;
-		uint32_t res3		: 13;
-		uint32_t cor_dbl	: 1;
-		uint32_t cor_sng	: 1;
-		uint32_t rsvd		: 5;
-		uint32_t cor_all	: 1;
-		uint32_t res2		: 1;
-		uint32_t cor_1		: 1;
-		uint32_t res1		: 5;
-		uint32_t cor_lst	: 1;
-		uint32_t cor_snd	: 1;
-		uint32_t cor_fst	: 1;
-#elif defined(_BIT_FIELDS_LTOH)
-		uint32_t cor_fst	: 1;
-		uint32_t cor_snd	: 1;
-		uint32_t cor_lst	: 1;
-		uint32_t res1		: 5;
-		uint32_t cor_1		: 1;
-		uint32_t res2		: 1;
-		uint32_t cor_all	: 1;
-		uint32_t rsvd		: 5;
-		uint32_t cor_sng	: 1;
-		uint32_t cor_dbl	: 1;
-		uint32_t res3		: 13;
-		uint32_t dis_dbl	: 1;
-#else
-#error	one of _BIT_FIELDS_HTOL or _BIT_FIELDS_LTOH must be defined
-#endif
-	} w0;
-
-#if !defined(_BIG_ENDIAN)
-		uint32_t	w1;
-#endif
-	} bits;
-} ipp_ecc_ctrl_t;
-
-
-/* IPP Interrupt Mask Registers */
-
-#define	IPP_ECC_ERR_CNT_MAX_INTR_DIS		(1 << 7)
-#define	IPP_DFIFO_MISSING_EOP_SOP_INTR_DIS	(1 << 6)
-#define	IPP_DFIFO_ECC_UNCORR_ERR_INTR_DIS	(1 << 5)
-#define	IPP_PRE_FIFO_PERR_INTR_DIS		(1 << 4)
-#define	IPP_PRE_FIFO_OVERRUN_INTR_DIS		(1 << 3)
-#define	IPP_PRE_FIFO_UNDERRUN_INTR_DIS		(1 << 2)
-#define	IPP_BAD_TCPIP_CKSUM_CNT_INTR_DIS	(1 << 1)
-#define	IPP_PKT_DISCARD_CNT_INTR_DIS		(1 << 0)
-
-#define	IPP_RESET_WAIT				10
-
-/* DFIFO RD/WR pointers mask */
-
-#define	IPP_XMAC_DFIFO_PTR_MASK			0x7FF
-#define	IPP_BMAC_DFIFO_PTR_MASK			0x3FF
-
-#define	IPP_ECC_CNT_MASK			0xFF
-#define	IPP_BAD_CS_CNT_MASK			0x3FFF
-#define	IPP_PKT_DIS_CNT_MASK			0x3FFF
-
-#ifdef	__cplusplus
-}
-#endif
-
-#endif	/* _SYS_NXGE_NXGE_IPP_HW_H */
--- a/usr/src/uts/sun4v/sys/nxge/nxge_mac.h	Mon Mar 19 18:02:35 2007 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,245 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License (the "License").
- * You may not use this file except in compliance with the License.
- *
- * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
- * or http://www.opensolaris.org/os/licensing.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information: Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- */
-/*
- * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
- * Use is subject to license terms.
- */
-
-#ifndef	_SYS_NXGE_NXGE_MAC_H
-#define	_SYS_NXGE_NXGE_MAC_H
-
-#pragma ident	"%Z%%M%	%I%	%E% SMI"
-
-#ifdef	__cplusplus
-extern "C" {
-#endif
-
-#include <nxge_mac_hw.h>
-#include <npi_mac.h>
-
-#define	NXGE_MTU_DEFAULT_MAX	1522	/* 0x5f2 */
-
-#define	NXGE_XMAC_TX_INTRS	(ICFG_XMAC_TX_ALL & \
-					~(ICFG_XMAC_TX_FRAME_XMIT |\
-					ICFG_XMAC_TX_BYTE_CNT_EXP |\
-					ICFG_XMAC_TX_FRAME_CNT_EXP))
-#define	NXGE_XMAC_RX_INTRS	(ICFG_XMAC_RX_ALL & \
-					~(ICFG_XMAC_RX_FRAME_RCVD |\
-					ICFG_XMAC_RX_OCT_CNT_EXP |\
-					ICFG_XMAC_RX_HST_CNT1_EXP |\
-					ICFG_XMAC_RX_HST_CNT2_EXP |\
-					ICFG_XMAC_RX_HST_CNT3_EXP |\
-					ICFG_XMAC_RX_HST_CNT4_EXP |\
-					ICFG_XMAC_RX_HST_CNT5_EXP |\
-					ICFG_XMAC_RX_HST_CNT6_EXP |\
-					ICFG_XMAC_RX_BCAST_CNT_EXP |\
-					ICFG_XMAC_RX_MCAST_CNT_EXP |\
-					ICFG_XMAC_RX_HST_CNT7_EXP))
-#define	NXGE_BMAC_TX_INTRS	(ICFG_BMAC_TX_ALL & \
-					~(ICFG_BMAC_TX_FRAME_SENT |\
-					ICFG_BMAC_TX_BYTE_CNT_EXP |\
-					ICFG_BMAC_TX_FRAME_CNT_EXP))
-#define	NXGE_BMAC_RX_INTRS	(ICFG_BMAC_RX_ALL & \
-					~(ICFG_BMAC_RX_FRAME_RCVD |\
-					ICFG_BMAC_RX_FRAME_CNT_EXP |\
-					ICFG_BMAC_RX_BYTE_CNT_EXP))
-
-typedef enum  {
-	LINK_NO_CHANGE,
-	LINK_IS_UP,
-	LINK_IS_DOWN
-} nxge_link_state_t;
-
-/* Common MAC statistics */
-
-typedef	struct _nxge_mac_stats {
-	/*
-	 * MTU size
-	 */
-	uint32_t	mac_mtu;
-	uint16_t	rev_id;
-
-	/*
-	 * Transciever state informations.
-	 */
-	uint32_t	xcvr_inits;
-	xcvr_inuse_t	xcvr_inuse;
-	uint32_t	xcvr_portn;
-	uint32_t	xcvr_id;
-	uint32_t	serdes_inits;
-	uint32_t	serdes_portn;
-	uint32_t	cap_autoneg;
-	uint32_t	cap_10gfdx;
-	uint32_t	cap_10ghdx;
-	uint32_t	cap_1000fdx;
-	uint32_t	cap_1000hdx;
-	uint32_t	cap_100T4;
-	uint32_t	cap_100fdx;
-	uint32_t	cap_100hdx;
-	uint32_t	cap_10fdx;
-	uint32_t	cap_10hdx;
-	uint32_t	cap_asmpause;
-	uint32_t	cap_pause;
-
-	/*
-	 * Advertised capabilities.
-	 */
-	uint32_t	adv_cap_autoneg;
-	uint32_t	adv_cap_10gfdx;
-	uint32_t	adv_cap_10ghdx;
-	uint32_t	adv_cap_1000fdx;
-	uint32_t	adv_cap_1000hdx;
-	uint32_t	adv_cap_100T4;
-	uint32_t	adv_cap_100fdx;
-	uint32_t	adv_cap_100hdx;
-	uint32_t	adv_cap_10fdx;
-	uint32_t	adv_cap_10hdx;
-	uint32_t	adv_cap_asmpause;
-	uint32_t	adv_cap_pause;
-
-	/*
-	 * Link partner capabilities.
-	 */
-	uint32_t	lp_cap_autoneg;
-	uint32_t	lp_cap_10gfdx;
-	uint32_t	lp_cap_10ghdx;
-	uint32_t	lp_cap_1000fdx;
-	uint32_t	lp_cap_1000hdx;
-	uint32_t	lp_cap_100T4;
-	uint32_t	lp_cap_100fdx;
-	uint32_t	lp_cap_100hdx;
-	uint32_t	lp_cap_10fdx;
-	uint32_t	lp_cap_10hdx;
-	uint32_t	lp_cap_asmpause;
-	uint32_t	lp_cap_pause;
-
-	/*
-	 * Physical link statistics.
-	 */
-	uint32_t	link_T4;
-	uint32_t	link_speed;
-	uint32_t	link_duplex;
-	uint32_t	link_asmpause;
-	uint32_t	link_pause;
-	uint32_t	link_up;
-
-	/* Promiscous mode */
-	boolean_t	promisc;
-} nxge_mac_stats_t;
-
-/* XMAC Statistics */
-
-typedef	struct _nxge_xmac_stats {
-	uint32_t tx_frame_cnt;
-	uint32_t tx_underflow_err;
-	uint32_t tx_maxpktsize_err;
-	uint32_t tx_overflow_err;
-	uint32_t tx_fifo_xfr_err;
-	uint64_t tx_byte_cnt;
-	uint32_t rx_frame_cnt;
-	uint32_t rx_underflow_err;
-	uint32_t rx_overflow_err;
-	uint32_t rx_crc_err_cnt;
-	uint32_t rx_len_err_cnt;
-	uint32_t rx_viol_err_cnt;
-	uint64_t rx_byte_cnt;
-	uint64_t rx_hist1_cnt;
-	uint64_t rx_hist2_cnt;
-	uint64_t rx_hist3_cnt;
-	uint64_t rx_hist4_cnt;
-	uint64_t rx_hist5_cnt;
-	uint64_t rx_hist6_cnt;
-	uint64_t rx_hist7_cnt;
-	uint64_t rx_broadcast_cnt;
-	uint64_t rx_mult_cnt;
-	uint32_t rx_frag_cnt;
-	uint32_t rx_frame_align_err_cnt;
-	uint32_t rx_linkfault_err_cnt;
-	uint32_t rx_remotefault_err;
-	uint32_t rx_localfault_err;
-	uint32_t rx_pause_cnt;
-	uint32_t tx_pause_state;
-	uint32_t tx_nopause_state;
-	uint32_t xpcs_deskew_err_cnt;
-	uint32_t xpcs_ln0_symbol_err_cnt;
-	uint32_t xpcs_ln1_symbol_err_cnt;
-	uint32_t xpcs_ln2_symbol_err_cnt;
-	uint32_t xpcs_ln3_symbol_err_cnt;
-} nxge_xmac_stats_t, *p_nxge_xmac_stats_t;
-
-/* BMAC Statistics */
-
-typedef	struct _nxge_bmac_stats {
-	uint64_t tx_frame_cnt;
-	uint32_t tx_underrun_err;
-	uint32_t tx_max_pkt_err;
-	uint64_t tx_byte_cnt;
-	uint64_t rx_frame_cnt;
-	uint64_t rx_byte_cnt;
-	uint32_t rx_overflow_err;
-	uint32_t rx_align_err_cnt;
-	uint32_t rx_crc_err_cnt;
-	uint32_t rx_len_err_cnt;
-	uint32_t rx_viol_err_cnt;
-	uint32_t rx_pause_cnt;
-	uint32_t tx_pause_state;
-	uint32_t tx_nopause_state;
-} nxge_bmac_stats_t, *p_nxge_bmac_stats_t;
-
-typedef struct _hash_filter_t {
-	uint_t hash_ref_cnt;
-	uint16_t hash_filter_regs[NMCFILTER_REGS];
-	uint32_t hash_bit_ref_cnt[NMCFILTER_BITS];
-} hash_filter_t, *p_hash_filter_t;
-
-typedef	struct _nxge_mac {
-	uint8_t			portnum;
-	nxge_port_t		porttype;
-	nxge_port_mode_t	portmode;
-	nxge_linkchk_mode_t	linkchkmode;
-	boolean_t		is_jumbo;
-	uint32_t		tx_config;
-	uint32_t		rx_config;
-	uint32_t		xif_config;
-	uint32_t		tx_iconfig;
-	uint32_t		rx_iconfig;
-	uint32_t		ctl_iconfig;
-	uint16_t		minframesize;
-	uint16_t		maxframesize;
-	uint16_t		maxburstsize;
-	uint16_t		ctrltype;
-	uint16_t		pa_size;
-	uint8_t			ipg[3];
-	struct ether_addr	mac_addr;
-	struct ether_addr	alt_mac_addr[MAC_MAX_ALT_ADDR_ENTRY];
-	struct ether_addr	mac_addr_filter;
-	uint16_t		hashtab[MAC_MAX_HASH_ENTRY];
-	hostinfo_t		hostinfo[MAC_MAX_HOST_INFO_ENTRY];
-	nxge_mac_stats_t	*mac_stats;
-	nxge_xmac_stats_t	*xmac_stats;
-	nxge_bmac_stats_t	*bmac_stats;
-} nxge_mac_t;
-
-#ifdef	__cplusplus
-}
-#endif
-
-#endif	/* _SYS_NXGE_NXGE_MAC_H */
--- a/usr/src/uts/sun4v/sys/nxge/nxge_mac_hw.h	Mon Mar 19 18:02:35 2007 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,2410 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License (the "License").
- * You may not use this file except in compliance with the License.
- *
- * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
- * or http://www.opensolaris.org/os/licensing.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information: Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- */
-/*
- * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
- * Use is subject to license terms.
- */
-
-#ifndef	_SYS_MAC_NXGE_MAC_HW_H
-#define	_SYS_MAC_NXGE_MAC_HW_H
-
-#pragma ident	"%Z%%M%	%I%	%E% SMI"
-
-#ifdef	__cplusplus
-extern "C" {
-#endif
-
-#include <nxge_defs.h>
-
-/* -------------------------- From May's template --------------------------- */
-
-#define	NXGE_1GETHERMIN			255
-#define	NXGE_ETHERMIN			97
-#define	NXGE_MAX_HEADER			250
-
-/* Hardware reset */
-typedef enum  {
-	NXGE_TX_DISABLE,			/* Disable Tx side */
-	NXGE_RX_DISABLE,			/* Disable Rx side */
-	NXGE_CHIP_RESET				/* Full chip reset */
-} nxge_reset_t;
-
-#define	NXGE_DELAY_AFTER_TXRX		10000	/* 10ms after idling rx/tx */
-#define	NXGE_DELAY_AFTER_RESET		1000	/* 1ms after the reset */
-#define	NXGE_DELAY_AFTER_EE_RESET	10000	/* 10ms after EEPROM reset */
-#define	NXGE_DELAY_AFTER_LINK_RESET	13	/* 13 Us after link reset */
-#define	NXGE_LINK_RESETS		8	/* Max PHY resets to wait for */
-						/* linkup */
-
-#define	FILTER_M_CTL 			0xDCEF1
-#define	HASH_BITS			8
-#define	NMCFILTER_BITS			(1 << HASH_BITS)
-#define	HASH_REG_WIDTH			16
-#define	BROADCAST_HASH_WORD		0x0f
-#define	BROADCAST_HASH_BIT		0x8000
-#define	NMCFILTER_REGS			NMCFILTER_BITS / HASH_REG_WIDTH
-					/* Number of multicast filter regs */
-
-/* -------------------------------------------------------------------------- */
-
-#define	XMAC_PORT_0			0
-#define	XMAC_PORT_1			1
-#define	BMAC_PORT_0			2
-#define	BMAC_PORT_1			3
-
-#define	MAC_RESET_WAIT			10	/* usecs */
-
-#define	MAC_ADDR_REG_MASK		0xFFFF
-
-/* Network Modes */
-
-typedef enum nxge_network_mode {
-	NET_2_10GE_FIBER = 1,
-	NET_2_10GE_COPPER,
-	NET_1_10GE_FIBER_3_1GE_COPPER,
-	NET_1_10GE_COPPER_3_1GE_COPPER,
-	NET_1_10GE_FIBER_3_1GE_FIBER,
-	NET_1_10GE_COPPER_3_1GE_FIBER,
-	NET_2_1GE_FIBER_2_1GE_COPPER,
-	NET_QGE_FIBER,
-	NET_QGE_COPPER
-} nxge_network_mode_t;
-
-typedef	enum nxge_port {
-	PORT_TYPE_XMAC = 1,
-	PORT_TYPE_BMAC
-} nxge_port_t;
-
-typedef	enum nxge_port_mode {
-	PORT_1G_COPPER = 1,
-	PORT_1G_FIBER,
-	PORT_10G_COPPER,
-	PORT_10G_FIBER
-} nxge_port_mode_t;
-
-typedef	enum nxge_linkchk_mode {
-	LINKCHK_INTR = 1,
-	LINKCHK_TIMER
-} nxge_linkchk_mode_t;
-
-typedef enum {
-	LINK_INTR_STOP,
-	LINK_INTR_START
-} link_intr_enable_t, *link_intr_enable_pt;
-
-typedef	enum {
-	LINK_MONITOR_STOP,
-	LINK_MONITOR_START
-} link_mon_enable_t, *link_mon_enable_pt;
-
-typedef enum {
-	NO_XCVR,
-	INT_MII_XCVR,
-	EXT_MII_XCVR,
-	PCS_XCVR,
-	XPCS_XCVR
-} xcvr_inuse_t;
-
-/* macros for port offset calculations */
-
-#define	PORT_1_OFFSET			0x6000
-#define	PORT_GT_1_OFFSET		0x4000
-
-/* XMAC address macros */
-
-#define	XMAC_ADDR_OFFSET_0		0
-#define	XMAC_ADDR_OFFSET_1		0x6000
-
-#define	XMAC_ADDR_OFFSET(port_num)\
-	(XMAC_ADDR_OFFSET_0 + ((port_num) * PORT_1_OFFSET))
-
-#define	XMAC_REG_ADDR(port_num, reg)\
-	(FZC_MAC + (XMAC_ADDR_OFFSET(port_num)) + (reg))
-
-#define	XMAC_PORT_ADDR(port_num)\
-	(FZC_MAC + XMAC_ADDR_OFFSET(port_num))
-
-/* BMAC address macros */
-
-#define	BMAC_ADDR_OFFSET_2		0x0C000
-#define	BMAC_ADDR_OFFSET_3		0x10000
-
-#define	BMAC_ADDR_OFFSET(port_num)\
-	(BMAC_ADDR_OFFSET_2 + (((port_num) - 2) * PORT_GT_1_OFFSET))
-
-#define	BMAC_REG_ADDR(port_num, reg)\
-	(FZC_MAC + (BMAC_ADDR_OFFSET(port_num)) + (reg))
-
-#define	BMAC_PORT_ADDR(port_num)\
-	(FZC_MAC + BMAC_ADDR_OFFSET(port_num))
-
-/* PCS address macros */
-
-#define	PCS_ADDR_OFFSET_0		0x04000
-#define	PCS_ADDR_OFFSET_1		0x0A000
-#define	PCS_ADDR_OFFSET_2		0x0E000
-#define	PCS_ADDR_OFFSET_3		0x12000
-
-#define	PCS_ADDR_OFFSET(port_num)\
-	((port_num <= 1) ? \
-	(PCS_ADDR_OFFSET_0 + (port_num) * PORT_1_OFFSET) : \
-	(PCS_ADDR_OFFSET_2 + (((port_num) - 2) * PORT_GT_1_OFFSET)))
-
-#define	PCS_REG_ADDR(port_num, reg)\
-	(FZC_MAC + (PCS_ADDR_OFFSET((port_num)) + (reg)))
-
-#define	PCS_PORT_ADDR(port_num)\
-	(FZC_MAC + (PCS_ADDR_OFFSET(port_num)))
-
-/* XPCS address macros */
-
-#define	XPCS_ADDR_OFFSET_0		0x02000
-#define	XPCS_ADDR_OFFSET_1		0x08000
-#define	XPCS_ADDR_OFFSET(port_num)\
-	(XPCS_ADDR_OFFSET_0 + ((port_num) * PORT_1_OFFSET))
-
-#define	XPCS_ADDR(port_num, reg)\
-	(FZC_MAC + (XPCS_ADDR_OFFSET((port_num)) + (reg)))
-
-#define	XPCS_PORT_ADDR(port_num)\
-	(FZC_MAC + (XPCS_ADDR_OFFSET(port_num)))
-
-/* ESR address macro */
-#define	ESR_ADDR_OFFSET		0x14000
-#define	ESR_ADDR(reg)\
-	(FZC_MAC + (ESR_ADDR_OFFSET) + (reg))
-
-/* MIF address macros */
-#define	MIF_ADDR_OFFSET		0x16000
-#define	MIF_ADDR(reg)\
-	(FZC_MAC + (MIF_ADDR_OFFSET) + (reg))
-
-/* BMAC registers offset */
-#define	BTXMAC_SW_RST_REG		0x000	/* TX MAC software reset */
-#define	BRXMAC_SW_RST_REG		0x008	/* RX MAC software reset */
-#define	MAC_SEND_PAUSE_REG		0x010	/* send pause command */
-#define	BTXMAC_STATUS_REG		0x020	/* TX MAC status */
-#define	BRXMAC_STATUS_REG		0x028	/* RX MAC status */
-#define	BMAC_CTRL_STAT_REG		0x030	/* MAC control status */
-#define	BTXMAC_STAT_MSK_REG		0x040	/* TX MAC mask */
-#define	BRXMAC_STAT_MSK_REG		0x048	/* RX MAC mask */
-#define	BMAC_C_S_MSK_REG		0x050	/* MAC control mask */
-#define	TXMAC_CONFIG_REG		0x060	/* TX MAC config */
-/* cfg register bitmap */
-
-typedef union _btxmac_config_t {
-	uint64_t value;
-
-	struct {
-#if defined(_BIG_ENDIAN)
-		uint32_t msw;	/* Most significant word */
-		uint32_t lsw;	/* Least significant word */
-#elif defined(_LITTLE_ENDIAN)
-		uint32_t lsw;	/* Least significant word */
-		uint32_t msw;	/* Most significant word */
-#endif
-	} val;
-	struct {
-#if defined(_BIG_ENDIAN)
-		uint32_t	w1;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t rsrvd	: 22;
-			uint32_t hdx_ctrl2	: 1;
-			uint32_t no_fcs	: 1;
-			uint32_t hdx_ctrl	: 7;
-			uint32_t txmac_enable	: 1;
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t txmac_enable	: 1;
-			uint32_t hdx_ctrl	: 7;
-			uint32_t no_fcs	: 1;
-			uint32_t hdx_ctrl2	: 1;
-			uint32_t rsrvd	: 22;
-#endif
-		} w0;
-
-#if defined(_LITTLE_ENDIAN)
-		uint32_t	w1;
-#endif
-	} bits;
-} btxmac_config_t, *p_btxmac_config_t;
-
-#define	RXMAC_CONFIG_REG		0x068	/* RX MAC config */
-
-typedef union _brxmac_config_t {
-	uint64_t value;
-
-	struct {
-#if defined(_BIG_ENDIAN)
-		uint32_t msw;	/* Most significant word */
-		uint32_t lsw;	/* Least significant word */
-#elif defined(_LITTLE_ENDIAN)
-		uint32_t lsw;	/* Least significant word */
-		uint32_t msw;	/* Most significant word */
-#endif
-	} val;
-	struct {
-#if defined(_BIG_ENDIAN)
-		uint32_t	w1;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t rsrvd	: 20;
-			uint32_t mac_reg_sw_test : 2;
-			uint32_t mac2ipp_pkt_cnt_en : 1;
-			uint32_t rx_crs_extend_en : 1;
-			uint32_t error_chk_dis	: 1;
-			uint32_t addr_filter_en	: 1;
-			uint32_t hash_filter_en	: 1;
-			uint32_t promiscuous_group	: 1;
-			uint32_t promiscuous	: 1;
-			uint32_t strip_fcs	: 1;
-			uint32_t strip_pad	: 1;
-			uint32_t rxmac_enable	: 1;
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t rxmac_enable	: 1;
-			uint32_t strip_pad	: 1;
-			uint32_t strip_fcs	: 1;
-			uint32_t promiscuous	: 1;
-			uint32_t promiscuous_group	: 1;
-			uint32_t hash_filter_en	: 1;
-			uint32_t addr_filter_en	: 1;
-			uint32_t error_chk_dis	: 1;
-			uint32_t rx_crs_extend_en : 1;
-			uint32_t mac2ipp_pkt_cnt_en : 1;
-			uint32_t mac_reg_sw_test : 2;
-			uint32_t rsrvd	: 20;
-#endif
-		} w0;
-
-#if defined(_LITTLE_ENDIAN)
-		uint32_t	w1;
-#endif
-	} bits;
-} brxmac_config_t, *p_brxmac_config_t;
-
-#define	MAC_CTRL_CONFIG_REG		0x070	/* MAC control config */
-#define	MAC_XIF_CONFIG_REG		0x078	/* XIF config */
-
-typedef union _bxif_config_t {
-	uint64_t value;
-
-	struct {
-#if defined(_BIG_ENDIAN)
-		uint32_t msw;	/* Most significant word */
-		uint32_t lsw;	/* Least significant word */
-#elif defined(_LITTLE_ENDIAN)
-		uint32_t lsw;	/* Least significant word */
-		uint32_t msw;	/* Most significant word */
-#endif
-	} val;
-	struct {
-#if defined(_BIG_ENDIAN)
-		uint32_t	w1;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t rsrvd2		: 24;
-			uint32_t sel_clk_25mhz	: 1;
-			uint32_t led_polarity	: 1;
-			uint32_t force_led_on	: 1;
-			uint32_t used		: 1;
-			uint32_t gmii_mode	: 1;
-			uint32_t rsrvd		: 1;
-			uint32_t loopback	: 1;
-			uint32_t tx_output_en	: 1;
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t tx_output_en	: 1;
-			uint32_t loopback	: 1;
-			uint32_t rsrvd		: 1;
-			uint32_t gmii_mode	: 1;
-			uint32_t used		: 1;
-			uint32_t force_led_on	: 1;
-			uint32_t led_polarity	: 1;
-			uint32_t sel_clk_25mhz	: 1;
-			uint32_t rsrvd2		: 24;
-#endif
-		} w0;
-
-#if defined(_LITTLE_ENDIAN)
-		uint32_t	w1;
-#endif
-	} bits;
-} bxif_config_t, *p_bxif_config_t;
-
-#define	BMAC_MIN_REG			0x0a0	/* min frame size */
-#define	BMAC_MAX_REG			0x0a8	/* max frame size reg */
-#define	MAC_PA_SIZE_REG			0x0b0	/* num of preamble bytes */
-#define	MAC_CTRL_TYPE_REG		0x0c8	/* type field of MAC ctrl */
-#define	BMAC_ADDR0_REG			0x100	/* MAC unique ad0 reg (HI 0) */
-#define	BMAC_ADDR1_REG			0x108	/* MAC unique ad1 reg */
-#define	BMAC_ADDR2_REG			0x110	/* MAC unique ad2 reg */
-#define	BMAC_ADDR3_REG			0x118	/* MAC alt ad0 reg (HI 1) */
-#define	BMAC_ADDR4_REG			0x120	/* MAC alt ad0 reg */
-#define	BMAC_ADDR5_REG			0x128	/* MAC alt ad0 reg */
-#define	BMAC_ADDR6_REG			0x130	/* MAC alt ad1 reg (HI 2) */
-#define	BMAC_ADDR7_REG			0x138	/* MAC alt ad1 reg */
-#define	BMAC_ADDR8_REG			0x140	/* MAC alt ad1 reg */
-#define	BMAC_ADDR9_REG			0x148	/* MAC alt ad2 reg (HI 3) */
-#define	BMAC_ADDR10_REG			0x150	/* MAC alt ad2 reg */
-#define	BMAC_ADDR11_REG			0x158	/* MAC alt ad2 reg */
-#define	BMAC_ADDR12_REG			0x160	/* MAC alt ad3 reg (HI 4) */
-#define	BMAC_ADDR13_REG			0x168	/* MAC alt ad3 reg */
-#define	BMAC_ADDR14_REG			0x170	/* MAC alt ad3 reg */
-#define	BMAC_ADDR15_REG			0x178	/* MAC alt ad4 reg (HI 5) */
-#define	BMAC_ADDR16_REG			0x180	/* MAC alt ad4 reg */
-#define	BMAC_ADDR17_REG			0x188	/* MAC alt ad4 reg */
-#define	BMAC_ADDR18_REG			0x190	/* MAC alt ad5 reg (HI 6) */
-#define	BMAC_ADDR19_REG			0x198	/* MAC alt ad5 reg */
-#define	BMAC_ADDR20_REG			0x1a0	/* MAC alt ad5 reg */
-#define	BMAC_ADDR21_REG			0x1a8	/* MAC alt ad6 reg (HI 7) */
-#define	BMAC_ADDR22_REG			0x1b0	/* MAC alt ad6 reg */
-#define	BMAC_ADDR23_REG			0x1b8	/* MAC alt ad6 reg */
-#define	MAC_FC_ADDR0_REG		0x268	/* FC frame addr0 (HI 0, p3) */
-#define	MAC_FC_ADDR1_REG		0x270	/* FC frame addr1 */
-#define	MAC_FC_ADDR2_REG		0x278	/* FC frame addr2 */
-#define	MAC_ADDR_FILT0_REG		0x298	/* bits [47:32] (HI 0, p2) */
-#define	MAC_ADDR_FILT1_REG		0x2a0	/* bits [31:16] */
-#define	MAC_ADDR_FILT2_REG		0x2a8	/* bits [15:0]  */
-#define	MAC_ADDR_FILT12_MASK_REG 	0x2b0	/* addr filter 2 & 1 mask */
-#define	MAC_ADDR_FILT00_MASK_REG	0x2b8	/* addr filter 0 mask */
-#define	MAC_HASH_TBL0_REG		0x2c0	/* hash table 0 reg */
-#define	MAC_HASH_TBL1_REG		0x2c8	/* hash table 1 reg */
-#define	MAC_HASH_TBL2_REG		0x2d0	/* hash table 2 reg */
-#define	MAC_HASH_TBL3_REG		0x2d8	/* hash table 3 reg */
-#define	MAC_HASH_TBL4_REG		0x2e0	/* hash table 4 reg */
-#define	MAC_HASH_TBL5_REG		0x2e8	/* hash table 5 reg */
-#define	MAC_HASH_TBL6_REG		0x2f0	/* hash table 6 reg */
-#define	MAC_HASH_TBL7_REG		0x2f8	/* hash table 7 reg */
-#define	MAC_HASH_TBL8_REG		0x300	/* hash table 8 reg */
-#define	MAC_HASH_TBL9_REG		0x308	/* hash table 9 reg */
-#define	MAC_HASH_TBL10_REG		0x310	/* hash table 10 reg */
-#define	MAC_HASH_TBL11_REG		0x318	/* hash table 11 reg */
-#define	MAC_HASH_TBL12_REG		0x320	/* hash table 12 reg */
-#define	MAC_HASH_TBL13_REG		0x328	/* hash table 13 reg */
-#define	MAC_HASH_TBL14_REG		0x330	/* hash table 14 reg */
-#define	MAC_HASH_TBL15_REG		0x338	/* hash table 15 reg */
-#define	RXMAC_FRM_CNT_REG		0x370	/* receive frame counter */
-#define	MAC_LEN_ER_CNT_REG		0x378	/* length error counter */
-#define	BMAC_AL_ER_CNT_REG		0x380	/* alignment error counter */
-#define	BMAC_CRC_ER_CNT_REG		0x388	/* FCS error counter */
-#define	BMAC_CD_VIO_CNT_REG		0x390	/* RX code violation err */
-#define	BMAC_SM_REG			0x3a0	/* (ro) state machine reg */
-#define	BMAC_ALTAD_CMPEN_REG		0x3f8	/* Alt addr compare enable */
-#define	BMAC_HOST_INF0_REG		0x400	/* Host info */
-						/* (own da, add filter, fc) */
-#define	BMAC_HOST_INF1_REG		0x408	/* Host info (alt ad 0) */
-#define	BMAC_HOST_INF2_REG		0x410	/* Host info (alt ad 1) */
-#define	BMAC_HOST_INF3_REG		0x418	/* Host info (alt ad 2) */
-#define	BMAC_HOST_INF4_REG		0x420	/* Host info (alt ad 3) */
-#define	BMAC_HOST_INF5_REG		0x428	/* Host info (alt ad 4) */
-#define	BMAC_HOST_INF6_REG		0x430	/* Host info (alt ad 5) */
-#define	BMAC_HOST_INF7_REG		0x438	/* Host info (alt ad 6) */
-#define	BMAC_HOST_INF8_REG		0x440	/* Host info (hash hit, miss) */
-#define	BTXMAC_BYTE_CNT_REG		0x448	/* Tx byte count */
-#define	BTXMAC_FRM_CNT_REG		0x450	/* frame count */
-#define	BRXMAC_BYTE_CNT_REG		0x458	/* Rx byte count */
-/* x ranges from 0 to 6 (BMAC_MAX_ALT_ADDR_ENTRY - 1) */
-#define	BMAC_ALT_ADDR0N_REG_ADDR(x)	(BMAC_ADDR3_REG + (x) * 24)
-#define	BMAC_ALT_ADDR1N_REG_ADDR(x)	(BMAC_ADDR3_REG + 8 + (x) * 24)
-#define	BMAC_ALT_ADDR2N_REG_ADDR(x)	(BMAC_ADDR3_REG + 0x10 + (x) * 24)
-#define	BMAC_HASH_TBLN_REG_ADDR(x)	(MAC_HASH_TBL0_REG + (x) * 8)
-#define	BMAC_HOST_INFN_REG_ADDR(x)	(BMAC_HOST_INF0_REG + (x) * 8)
-
-/* XMAC registers offset */
-#define	XTXMAC_SW_RST_REG		0x000	/* XTX MAC soft reset */
-#define	XRXMAC_SW_RST_REG		0x008	/* XRX MAC soft reset */
-#define	XTXMAC_STATUS_REG		0x020	/* XTX MAC status */
-#define	XRXMAC_STATUS_REG		0x028	/* XRX MAC status */
-#define	XMAC_CTRL_STAT_REG		0x030	/* Control / Status */
-#define	XTXMAC_STAT_MSK_REG		0x040	/* XTX MAC Status mask */
-#define	XRXMAC_STAT_MSK_REG		0x048	/* XRX MAC Status mask */
-#define	XMAC_C_S_MSK_REG		0x050	/* Control / Status mask */
-#define	XMAC_CONFIG_REG			0x060	/* Configuration */
-
-/* xmac config bit fields */
-typedef union _xmac_cfg_t {
-	uint64_t value;
-
-	struct {
-#if defined(_BIG_ENDIAN)
-		uint32_t msw;	/* Most significant word */
-		uint32_t lsw;	/* Least significant word */
-#elif defined(_LITTLE_ENDIAN)
-		uint32_t lsw;	/* Least significant word */
-		uint32_t msw;	/* Most significant word */
-#endif
-	} val;
-	struct {
-#if defined(_BIG_ENDIAN)
-		uint32_t	w1;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-		uint32_t sel_clk_25mhz : 1;
-		uint32_t pcs_bypass	: 1;
-		uint32_t xpcs_bypass	: 1;
-		uint32_t mii_gmii_mode	: 2;
-		uint32_t lfs_disable	: 1;
-		uint32_t loopback	: 1;
-		uint32_t tx_output_en	: 1;
-		uint32_t sel_por_clk_src : 1;
-		uint32_t led_polarity	: 1;
-		uint32_t force_led_on	: 1;
-		uint32_t pass_fctl_frames : 1;
-		uint32_t recv_pause_en	: 1;
-		uint32_t mac2ipp_pkt_cnt_en : 1;
-		uint32_t strip_crc	: 1;
-		uint32_t addr_filter_en	: 1;
-		uint32_t hash_filter_en	: 1;
-		uint32_t code_viol_chk_dis	: 1;
-		uint32_t reserved_mcast	: 1;
-		uint32_t rx_crc_chk_dis	: 1;
-		uint32_t error_chk_dis	: 1;
-		uint32_t promisc_grp	: 1;
-		uint32_t promiscuous	: 1;
-		uint32_t rx_mac_enable	: 1;
-		uint32_t warning_msg_en	: 1;
-		uint32_t used		: 3;
-		uint32_t always_no_crc	: 1;
-		uint32_t var_min_ipg_en	: 1;
-		uint32_t strech_mode	: 1;
-		uint32_t tx_enable	: 1;
-#elif defined(_BIT_FIELDS_LTOH)
-		uint32_t tx_enable	: 1;
-		uint32_t strech_mode	: 1;
-		uint32_t var_min_ipg_en	: 1;
-		uint32_t always_no_crc	: 1;
-		uint32_t used		: 3;
-		uint32_t warning_msg_en	: 1;
-		uint32_t rx_mac_enable	: 1;
-		uint32_t promiscuous	: 1;
-		uint32_t promisc_grp	: 1;
-		uint32_t error_chk_dis	: 1;
-		uint32_t rx_crc_chk_dis	: 1;
-		uint32_t reserved_mcast	: 1;
-		uint32_t code_viol_chk_dis	: 1;
-		uint32_t hash_filter_en	: 1;
-		uint32_t addr_filter_en	: 1;
-		uint32_t strip_crc	: 1;
-		uint32_t mac2ipp_pkt_cnt_en : 1;
-		uint32_t recv_pause_en	: 1;
-		uint32_t pass_fctl_frames : 1;
-		uint32_t force_led_on	: 1;
-		uint32_t led_polarity	: 1;
-		uint32_t sel_por_clk_src : 1;
-		uint32_t tx_output_en	: 1;
-		uint32_t loopback	: 1;
-		uint32_t lfs_disable	: 1;
-		uint32_t mii_gmii_mode	: 2;
-		uint32_t xpcs_bypass	: 1;
-		uint32_t pcs_bypass	: 1;
-		uint32_t sel_clk_25mhz : 1;
-#endif
-		} w0;
-
-#if defined(_LITTLE_ENDIAN)
-		uint32_t	w1;
-#endif
-	} bits;
-} xmac_cfg_t, *p_xmac_cfg_t;
-
-#define	XMAC_IPG_REG			0x080	/* Inter-Packet-Gap */
-#define	XMAC_MIN_REG			0x088	/* min frame size register */
-#define	XMAC_MAX_REG			0x090	/* max frame/burst size */
-#define	XMAC_ADDR0_REG			0x0a0	/* [47:32] of MAC addr (HI17) */
-#define	XMAC_ADDR1_REG			0x0a8	/* [31:16] of MAC addr */
-#define	XMAC_ADDR2_REG			0x0b0	/* [15:0] of MAC addr */
-#define	XRXMAC_BT_CNT_REG		0x100	/* bytes received / 8 */
-#define	XRXMAC_BC_FRM_CNT_REG		0x108	/* good BC frames received */
-#define	XRXMAC_MC_FRM_CNT_REG		0x110	/* good MC frames received */
-#define	XRXMAC_FRAG_CNT_REG		0x118	/* frag frames rejected */
-#define	XRXMAC_HIST_CNT1_REG		0x120	/* 64 bytes frames */
-#define	XRXMAC_HIST_CNT2_REG		0x128	/* 65-127 bytes frames */
-#define	XRXMAC_HIST_CNT3_REG		0x130	/* 128-255 bytes frames */
-#define	XRXMAC_HIST_CNT4_REG		0x138	/* 256-511 bytes frames */
-#define	XRXMAC_HIST_CNT5_REG		0x140	/* 512-1023 bytes frames */
-#define	XRXMAC_HIST_CNT6_REG		0x148	/* 1024-1522 bytes frames */
-#define	XRXMAC_MPSZER_CNT_REG		0x150	/* frames > maxframesize */
-#define	XRXMAC_CRC_ER_CNT_REG		0x158	/* frames failed CRC */
-#define	XRXMAC_CD_VIO_CNT_REG		0x160	/* frames with code vio */
-#define	XRXMAC_AL_ER_CNT_REG		0x168	/* frames with align error */
-#define	XTXMAC_FRM_CNT_REG		0x170	/* tx frames */
-#define	XTXMAC_BYTE_CNT_REG		0x178	/* tx bytes / 8 */
-#define	XMAC_LINK_FLT_CNT_REG		0x180	/* link faults */
-#define	XRXMAC_HIST_CNT7_REG		0x188	/* MAC2IPP/>1523 bytes frames */
-#define	XMAC_SM_REG			0x1a8	/* State machine */
-#define	XMAC_INTERN1_REG		0x1b0	/* internal signals for diag */
-#define	XMAC_INTERN2_REG		0x1b8	/* internal signals for diag */
-#define	XMAC_ADDR_CMPEN_REG		0x208	/* alt MAC addr check */
-#define	XMAC_ADDR3_REG			0x218	/* alt MAC addr 0 (HI 0) */
-#define	XMAC_ADDR4_REG			0x220	/* alt MAC addr 0 */
-#define	XMAC_ADDR5_REG			0x228	/* alt MAC addr 0 */
-#define	XMAC_ADDR6_REG			0x230	/* alt MAC addr 1 (HI 1) */
-#define	XMAC_ADDR7_REG			0x238	/* alt MAC addr 1 */
-#define	XMAC_ADDR8_REG			0x240	/* alt MAC addr 1 */
-#define	XMAC_ADDR9_REG			0x248	/* alt MAC addr 2 (HI 2) */
-#define	XMAC_ADDR10_REG			0x250	/* alt MAC addr 2 */
-#define	XMAC_ADDR11_REG			0x258	/* alt MAC addr 2 */
-#define	XMAC_ADDR12_REG			0x260	/* alt MAC addr 3 (HI 3) */
-#define	XMAC_ADDR13_REG			0x268	/* alt MAC addr 3 */
-#define	XMAC_ADDR14_REG			0x270	/* alt MAC addr 3 */
-#define	XMAC_ADDR15_REG			0x278	/* alt MAC addr 4 (HI 4) */
-#define	XMAC_ADDR16_REG			0x280	/* alt MAC addr 4 */
-#define	XMAC_ADDR17_REG			0x288	/* alt MAC addr 4 */
-#define	XMAC_ADDR18_REG			0x290	/* alt MAC addr 5 (HI 5) */
-#define	XMAC_ADDR19_REG			0x298	/* alt MAC addr 5 */
-#define	XMAC_ADDR20_REG			0x2a0	/* alt MAC addr 5 */
-#define	XMAC_ADDR21_REG			0x2a8	/* alt MAC addr 6 (HI 6) */
-#define	XMAC_ADDR22_REG			0x2b0	/* alt MAC addr 6 */
-#define	XMAC_ADDR23_REG			0x2b8	/* alt MAC addr 6 */
-#define	XMAC_ADDR24_REG			0x2c0	/* alt MAC addr 7 (HI 7) */
-#define	XMAC_ADDR25_REG			0x2c8	/* alt MAC addr 7 */
-#define	XMAC_ADDR26_REG			0x2d0	/* alt MAC addr 7 */
-#define	XMAC_ADDR27_REG			0x2d8	/* alt MAC addr 8 (HI 8) */
-#define	XMAC_ADDR28_REG			0x2e0	/* alt MAC addr 8 */
-#define	XMAC_ADDR29_REG			0x2e8	/* alt MAC addr 8 */
-#define	XMAC_ADDR30_REG			0x2f0	/* alt MAC addr 9 (HI 9) */
-#define	XMAC_ADDR31_REG			0x2f8	/* alt MAC addr 9 */
-#define	XMAC_ADDR32_REG			0x300	/* alt MAC addr 9 */
-#define	XMAC_ADDR33_REG			0x308	/* alt MAC addr 10 (HI 10) */
-#define	XMAC_ADDR34_REG			0x310	/* alt MAC addr 10 */
-#define	XMAC_ADDR35_REG			0x318	/* alt MAC addr 10 */
-#define	XMAC_ADDR36_REG			0x320	/* alt MAC addr 11 (HI 11) */
-#define	XMAC_ADDR37_REG			0x328	/* alt MAC addr 11 */
-#define	XMAC_ADDR38_REG			0x330	/* alt MAC addr 11 */
-#define	XMAC_ADDR39_REG			0x338	/* alt MAC addr 12 (HI 12) */
-#define	XMAC_ADDR40_REG			0x340	/* alt MAC addr 12 */
-#define	XMAC_ADDR41_REG			0x348	/* alt MAC addr 12 */
-#define	XMAC_ADDR42_REG			0x350	/* alt MAC addr 13 (HI 13) */
-#define	XMAC_ADDR43_REG			0x358	/* alt MAC addr 13 */
-#define	XMAC_ADDR44_REG			0x360	/* alt MAC addr 13 */
-#define	XMAC_ADDR45_REG			0x368	/* alt MAC addr 14 (HI 14) */
-#define	XMAC_ADDR46_REG			0x370	/* alt MAC addr 14 */
-#define	XMAC_ADDR47_REG			0x378	/* alt MAC addr 14 */
-#define	XMAC_ADDR48_REG			0x380	/* alt MAC addr 15 (HI 15) */
-#define	XMAC_ADDR49_REG			0x388	/* alt MAC addr 15 */
-#define	XMAC_ADDR50_REG			0x390	/* alt MAC addr 15 */
-#define	XMAC_ADDR_FILT0_REG		0x818	/* [47:32] addr filter (HI18) */
-#define	XMAC_ADDR_FILT1_REG		0x820	/* [31:16] of addr filter */
-#define	XMAC_ADDR_FILT2_REG		0x828	/* [15:0] of addr filter */
-#define	XMAC_ADDR_FILT12_MASK_REG 	0x830	/* addr filter 2 & 1 mask */
-#define	XMAC_ADDR_FILT0_MASK_REG	0x838	/* addr filter 0 mask */
-#define	XMAC_HASH_TBL0_REG		0x840	/* hash table 0 reg */
-#define	XMAC_HASH_TBL1_REG		0x848	/* hash table 1 reg */
-#define	XMAC_HASH_TBL2_REG		0x850	/* hash table 2 reg */
-#define	XMAC_HASH_TBL3_REG		0x858	/* hash table 3 reg */
-#define	XMAC_HASH_TBL4_REG		0x860	/* hash table 4 reg */
-#define	XMAC_HASH_TBL5_REG		0x868	/* hash table 5 reg */
-#define	XMAC_HASH_TBL6_REG		0x870	/* hash table 6 reg */
-#define	XMAC_HASH_TBL7_REG		0x878	/* hash table 7 reg */
-#define	XMAC_HASH_TBL8_REG		0x880	/* hash table 8 reg */
-#define	XMAC_HASH_TBL9_REG		0x888	/* hash table 9 reg */
-#define	XMAC_HASH_TBL10_REG		0x890	/* hash table 10 reg */
-#define	XMAC_HASH_TBL11_REG		0x898	/* hash table 11 reg */
-#define	XMAC_HASH_TBL12_REG		0x8a0	/* hash table 12 reg */
-#define	XMAC_HASH_TBL13_REG		0x8a8	/* hash table 13 reg */
-#define	XMAC_HASH_TBL14_REG		0x8b0	/* hash table 14 reg */
-#define	XMAC_HASH_TBL15_REG		0x8b8	/* hash table 15 reg */
-#define	XMAC_HOST_INF0_REG		0x900	/* Host info 0 (alt ad 0) */
-#define	XMAC_HOST_INF1_REG		0x908	/* Host info 1 (alt ad 1) */
-#define	XMAC_HOST_INF2_REG		0x910	/* Host info 2 (alt ad 2) */
-#define	XMAC_HOST_INF3_REG		0x918	/* Host info 3 (alt ad 3) */
-#define	XMAC_HOST_INF4_REG		0x920	/* Host info 4 (alt ad 4) */
-#define	XMAC_HOST_INF5_REG		0x928	/* Host info 5 (alt ad 5) */
-#define	XMAC_HOST_INF6_REG		0x930	/* Host info 6 (alt ad 6) */
-#define	XMAC_HOST_INF7_REG		0x938	/* Host info 7 (alt ad 7) */
-#define	XMAC_HOST_INF8_REG		0x940	/* Host info 8 (alt ad 8) */
-#define	XMAC_HOST_INF9_REG		0x948	/* Host info 9 (alt ad 9) */
-#define	XMAC_HOST_INF10_REG		0x950	/* Host info 10 (alt ad 10) */
-#define	XMAC_HOST_INF11_REG		0x958	/* Host info 11 (alt ad 11) */
-#define	XMAC_HOST_INF12_REG		0x960	/* Host info 12 (alt ad 12) */
-#define	XMAC_HOST_INF13_REG		0x968	/* Host info 13 (alt ad 13) */
-#define	XMAC_HOST_INF14_REG		0x970	/* Host info 14 (alt ad 14) */
-#define	XMAC_HOST_INF15_REG		0x978	/* Host info 15 (alt ad 15) */
-#define	XMAC_HOST_INF16_REG		0x980	/* Host info 16 (hash hit) */
-#define	XMAC_HOST_INF17_REG		0x988	/* Host info 17 (own da) */
-#define	XMAC_HOST_INF18_REG		0x990	/* Host info 18 (filter hit) */
-#define	XMAC_HOST_INF19_REG		0x998	/* Host info 19 (fc hit) */
-#define	XMAC_PA_DATA0_REG		0xb80	/* preamble [31:0] */
-#define	XMAC_PA_DATA1_REG		0xb88	/* preamble [63:32] */
-#define	XMAC_DEBUG_SEL_REG		0xb90	/* debug select */
-#define	XMAC_TRAINING_VECT_REG		0xb98	/* training vector */
-/* x ranges from 0 to 15 (XMAC_MAX_ALT_ADDR_ENTRY - 1) */
-#define	XMAC_ALT_ADDR0N_REG_ADDR(x)	(XMAC_ADDR3_REG + (x) * 24)
-#define	XMAC_ALT_ADDR1N_REG_ADDR(x)	(XMAC_ADDR3_REG + 8 + (x) * 24)
-#define	XMAC_ALT_ADDR2N_REG_ADDR(x)	(XMAC_ADDR3_REG + 16 + (x) * 24)
-#define	XMAC_HASH_TBLN_REG_ADDR(x)	(XMAC_HASH_TBL0_REG + (x) * 8)
-#define	XMAC_HOST_INFN_REG_ADDR(x)	(XMAC_HOST_INF0_REG + (x) * 8)
-
-/* MIF registers offset */
-#define	MIF_BB_MDC_REG			0	   /* MIF bit-bang clock */
-#define	MIF_BB_MDO_REG			0x008	   /* MIF bit-bang data */
-#define	MIF_BB_MDO_EN_REG		0x010	   /* MIF bit-bang output en */
-#define	MIF_OUTPUT_FRAME_REG		0x018	   /* MIF frame/output reg */
-#define	MIF_CONFIG_REG			0x020	   /* MIF config reg */
-#define	MIF_POLL_STATUS_REG		0x028	   /* MIF poll status reg */
-#define	MIF_POLL_MASK_REG		0x030	   /* MIF poll mask reg */
-#define	MIF_STATE_MACHINE_REG		0x038	   /* MIF state machine reg */
-#define	MIF_STATUS_REG			0x040	   /* MIF status reg */
-#define	MIF_MASK_REG			0x048	   /* MIF mask reg */
-
-
-/* PCS registers offset */
-#define	PCS_MII_CTRL_REG		0	   /* PCS MII control reg */
-#define	PCS_MII_STATUS_REG		0x008	   /* PCS MII status reg */
-#define	PCS_MII_ADVERT_REG		0x010	   /* PCS MII advertisement */
-#define	PCS_MII_LPA_REG			0x018	   /* link partner ability */
-#define	PCS_CONFIG_REG			0x020	   /* PCS config reg */
-#define	PCS_STATE_MACHINE_REG		0x028	   /* PCS state machine */
-#define	PCS_INTR_STATUS_REG		0x030	/* PCS interrupt status */
-#define	PCS_DATAPATH_MODE_REG		0x0a0	   /* datapath mode reg */
-#define	PCS_PACKET_COUNT_REG		0x0c0	   /* PCS packet counter */
-
-#define	XPCS_CTRL_1_REG			0	/* Control */
-#define	XPCS_STATUS_1_REG		0x008
-#define	XPCS_DEV_ID_REG			0x010	/* 32bits IEEE manufacture ID */
-#define	XPCS_SPEED_ABILITY_REG		0x018
-#define	XPCS_DEV_IN_PKG_REG		0x020
-#define	XPCS_CTRL_2_REG			0x028
-#define	XPCS_STATUS_2_REG		0x030
-#define	XPCS_PKG_ID_REG			0x038	/* Package ID */
-#define	XPCS_STATUS_REG			0x040
-#define	XPCS_TEST_CTRL_REG		0x048
-#define	XPCS_CFG_VENDOR_1_REG		0x050
-#define	XPCS_DIAG_VENDOR_2_REG		0x058
-#define	XPCS_MASK_1_REG			0x060
-#define	XPCS_PKT_CNTR_REG		0x068
-#define	XPCS_TX_STATE_MC_REG		0x070
-#define	XPCS_DESKEW_ERR_CNTR_REG	0x078
-#define	XPCS_SYM_ERR_CNTR_L0_L1_REG	0x080
-#define	XPCS_SYM_ERR_CNTR_L2_L3_REG	0x088
-#define	XPCS_TRAINING_VECTOR_REG	0x090
-
-/* ESR registers offset */
-#define	ESR_RESET_REG			0
-#define	ESR_CONFIG_REG			0x008
-#define	ESR_0_PLL_CONFIG_REG		0x010
-#define	ESR_0_CONTROL_REG		0x018
-#define	ESR_0_TEST_CONFIG_REG		0x020
-#define	ESR_1_PLL_CONFIG_REG		0x028
-#define	ESR_1_CONTROL_REG		0x030
-#define	ESR_1_TEST_CONFIG_REG		0x038
-#define	ESR_ENET_RGMII_CFG_REG		0x040
-#define	ESR_INTERNAL_SIGNALS_REG	0x800
-#define	ESR_DEBUG_SEL_REG		0x808
-
-
-/* Reset Register */
-#define	MAC_SEND_PAUSE_TIME_MASK	0x0000FFFF /* value of pause time */
-#define	MAC_SEND_PAUSE_SEND		0x00010000 /* send pause flow ctrl */
-
-/* Tx MAC Status Register */
-#define	MAC_TX_FRAME_XMIT		0x00000001 /* successful tx frame */
-#define	MAC_TX_UNDERRUN			0x00000002 /* starvation in xmit */
-#define	MAC_TX_MAX_PACKET_ERR		0x00000004 /* TX frame exceeds max */
-#define	MAC_TX_BYTE_CNT_EXP		0x00000400 /* TX byte cnt overflow */
-#define	MAC_TX_FRAME_CNT_EXP		0x00000800 /* Tx frame cnt overflow */
-
-/* Rx MAC Status Register */
-#define	MAC_RX_FRAME_RECV		0x00000001 /* successful rx frame */
-#define	MAC_RX_OVERFLOW			0x00000002 /* RX FIFO overflow */
-#define	MAC_RX_FRAME_COUNT		0x00000004 /* rx frame cnt rollover */
-#define	MAC_RX_ALIGN_ERR		0x00000008 /* alignment err rollover */
-#define	MAC_RX_CRC_ERR			0x00000010 /* crc error cnt rollover */
-#define	MAC_RX_LEN_ERR			0x00000020 /* length err cnt rollover */
-#define	MAC_RX_VIOL_ERR			0x00000040 /* code vio err rollover */
-#define	MAC_RX_BYTE_CNT_EXP		0x00000080 /* RX MAC byte rollover */
-
-/* MAC Control Status Register */
-#define	MAC_CTRL_PAUSE_RECEIVED		0x00000001 /* successful pause frame */
-#define	MAC_CTRL_PAUSE_STATE		0x00000002 /* notpause-->pause */
-#define	MAC_CTRL_NOPAUSE_STATE		0x00000004 /* pause-->notpause */
-#define	MAC_CTRL_PAUSE_TIME_MASK	0xFFFF0000 /* value of pause time */
-#define	MAC_CTRL_PAUSE_TIME_SHIFT	16
-
-/* Tx MAC Configuration Register */
-#define	MAC_TX_CFG_TXMAC_ENABLE		0x00000001 /* enable TX MAC. */
-#define	MAC_TX_CFG_NO_FCS		0x00000100 /* TX not generate CRC */
-
-/* Rx MAC Configuration Register */
-#define	MAC_RX_CFG_RXMAC_ENABLE		0x00000001 /* enable RX MAC */
-#define	MAC_RX_CFG_STRIP_PAD		0x00000002 /* not supported, set to 0 */
-#define	MAC_RX_CFG_STRIP_FCS		0x00000004 /* strip last 4bytes (CRC) */
-#define	MAC_RX_CFG_PROMISC		0x00000008 /* promisc mode enable */
-#define	MAC_RX_CFG_PROMISC_GROUP  	0x00000010 /* accept all MC frames */
-#define	MAC_RX_CFG_HASH_FILTER_EN	0x00000020 /* use hash table */
-#define	MAC_RX_CFG_ADDR_FILTER_EN    	0x00000040 /* use address filter */
-#define	MAC_RX_CFG_DISABLE_DISCARD	0x00000080 /* do not set abort bit */
-#define	MAC_RX_MAC2IPP_PKT_CNT_EN	0x00000200 /* rx pkt cnt -> BMAC-IPP */
-#define	MAC_RX_MAC_REG_RW_TEST_MASK	0x00000c00 /* BMAC reg RW test */
-#define	MAC_RX_MAC_REG_RW_TEST_SHIFT	10
-
-/* MAC Control Configuration Register */
-#define	MAC_CTRL_CFG_SEND_PAUSE_EN	0x00000001 /* send pause flow ctrl */
-#define	MAC_CTRL_CFG_RECV_PAUSE_EN	0x00000002 /* receive pause flow ctrl */
-#define	MAC_CTRL_CFG_PASS_CTRL		0x00000004 /* accept MAC ctrl pkts */
-
-/* MAC XIF Configuration Register */
-#define	MAC_XIF_TX_OUTPUT_EN		0x00000001 /* enable Tx output driver */
-#define	MAC_XIF_MII_INT_LOOPBACK	0x00000002 /* loopback GMII xmit data */
-#define	MAC_XIF_GMII_MODE		0x00000008 /* operates with GMII clks */
-#define	MAC_XIF_LINK_LED		0x00000020 /* LINKLED# active (low) */
-#define	MAC_XIF_LED_POLARITY		0x00000040 /* LED polarity */
-#define	MAC_XIF_SEL_CLK_25MHZ		0x00000080 /* Select 10/100Mbps */
-
-/* MAC IPG Registers */
-#define	BMAC_MIN_FRAME_MASK		0x3FF	   /* 10-bit reg */
-
-/* MAC Max Frame Size Register */
-#define	BMAC_MAX_BURST_MASK    		0x3FFF0000 /* max burst size [30:16] */
-#define	BMAC_MAX_BURST_SHIFT   		16
-#define	BMAC_MAX_FRAME_MASK    		0x00007FFF /* max frame size [14:0] */
-#define	BMAC_MAX_FRAME_SHIFT   		0
-
-/* MAC Preamble size register */
-#define	BMAC_PA_SIZE_MASK		0x000003FF
-	/* # of preable bytes TxMAC sends at the beginning of each frame */
-
-/*
- * mac address registers:
- *	register	contains			comparison
- *	--------	--------			----------
- *	0		16 MSB of primary MAC addr	[47:32] of DA field
- *	1		16 middle bits ""		[31:16] of DA field
- *	2		16 LSB ""			[15:0] of DA field
- *	3*x		16MSB of alt MAC addr 1-7	[47:32] of DA field
- *	4*x		16 middle bits ""		[31:16]
- *	5*x		16 LSB ""			[15:0]
- *	42		16 MSB of MAC CTRL addr		[47:32] of DA.
- *	43		16 middle bits ""		[31:16]
- *	44		16 LSB ""			[15:0]
- *	MAC CTRL addr must be the reserved multicast addr for MAC CTRL frames.
- *	if there is a match, MAC will set the bit for alternative address
- *	filter pass [15]
- *
- *	here is the map of registers given MAC address notation: a:b:c:d:e:f
- *			ab		cd		ef
- *	primary addr	reg 2		reg 1		reg 0
- *	alt addr 1	reg 5		reg 4		reg 3
- *	alt addr x	reg 5*x		reg 4*x		reg 3*x
- *	|		|		|		|
- *	|		|		|		|
- *	alt addr 7	reg 23		reg 22		reg 21
- *	ctrl addr	reg 44		reg 43		reg 42
- */
-
-#define	BMAC_ALT_ADDR_BASE		0x118
-#define	BMAC_MAX_ALT_ADDR_ENTRY		7	   /* 7 alternate MAC addr */
-#define	BMAC_MAX_ADDR_ENTRY		(BMAC_MAX_ALT_ADDR_ENTRY + 1)
-
-/* hash table registers */
-#define	MAC_MAX_HASH_ENTRY		16
-
-/* 27-bit register has the current state for key state machines in the MAC */
-#define	MAC_SM_RLM_MASK			0x07800000
-#define	MAC_SM_RLM_SHIFT		23
-#define	MAC_SM_RX_FC_MASK		0x00700000
-#define	MAC_SM_RX_FC_SHIFT		20
-#define	MAC_SM_TLM_MASK			0x000F0000
-#define	MAC_SM_TLM_SHIFT		16
-#define	MAC_SM_ENCAP_SM_MASK		0x0000F000
-#define	MAC_SM_ENCAP_SM_SHIFT		12
-#define	MAC_SM_TX_REQ_MASK		0x00000C00
-#define	MAC_SM_TX_REQ_SHIFT		10
-#define	MAC_SM_TX_FC_MASK		0x000003C0
-#define	MAC_SM_TX_FC_SHIFT		6
-#define	MAC_SM_FIFO_WRITE_SEL_MASK	0x00000038
-#define	MAC_SM_FIFO_WRITE_SEL_SHIFT	3
-#define	MAC_SM_TX_FIFO_EMPTY_MASK	0x00000007
-#define	MAC_SM_TX_FIFO_EMPTY_SHIFT	0
-
-#define	BMAC_ADDR0_CMPEN		0x00000001
-#define	BMAC_ADDRN_CMPEN(x)		(BMAC_ADDR0_CMP_EN << (x))
-
-/* MAC Host Info Table Registers */
-#define	BMAC_MAX_HOST_INFO_ENTRY	9 	/* 9 host entries */
-
-/*
- * ********************* XMAC registers *********************************
- */
-
-/* Reset Register */
-#define	XTXMAC_SOFT_RST			0x00000001 /* XTX MAC software reset */
-#define	XTXMAC_REG_RST			0x00000002 /* XTX MAC registers reset */
-#define	XRXMAC_SOFT_RST			0x00000001 /* XRX MAC software reset */
-#define	XRXMAC_REG_RST			0x00000002 /* XRX MAC registers reset */
-
-/* XTX MAC Status Register */
-#define	XMAC_TX_FRAME_XMIT		0x00000001 /* successful tx frame */
-#define	XMAC_TX_UNDERRUN		0x00000002 /* starvation in xmit */
-#define	XMAC_TX_MAX_PACKET_ERR		0x00000004 /* XTX frame exceeds max */
-#define	XMAC_TX_OVERFLOW		0x00000008 /* XTX byte cnt overflow */
-#define	XMAC_TX_FIFO_XFR_ERR		0x00000010 /* xtlm state mach error */
-#define	XMAC_TX_BYTE_CNT_EXP		0x00000400 /* XTX byte cnt overflow */
-#define	XMAC_TX_FRAME_CNT_EXP		0x00000800 /* XTX frame cnt overflow */
-
-/* XRX MAC Status Register */
-#define	XMAC_RX_FRAME_RCVD		0x00000001 /* successful rx frame */
-#define	XMAC_RX_OVERFLOW		0x00000002 /* RX FIFO overflow */
-#define	XMAC_RX_UNDERFLOW		0x00000004 /* RX FIFO underrun */
-#define	XMAC_RX_CRC_ERR_CNT_EXP		0x00000008 /* crc error cnt rollover */
-#define	XMAC_RX_LEN_ERR_CNT_EXP		0x00000010 /* length err cnt rollover */
-#define	XMAC_RX_VIOL_ERR_CNT_EXP	0x00000020 /* code vio err rollover */
-#define	XMAC_RX_OCT_CNT_EXP		0x00000040 /* XRX MAC byte rollover */
-#define	XMAC_RX_HST_CNT1_EXP		0x00000080 /* XRX MAC hist1 rollover */
-#define	XMAC_RX_HST_CNT2_EXP		0x00000100 /* XRX MAC hist2 rollover */
-#define	XMAC_RX_HST_CNT3_EXP		0x00000200 /* XRX MAC hist3 rollover */
-#define	XMAC_RX_HST_CNT4_EXP		0x00000400 /* XRX MAC hist4 rollover */
-#define	XMAC_RX_HST_CNT5_EXP		0x00000800 /* XRX MAC hist5 rollover */
-#define	XMAC_RX_HST_CNT6_EXP		0x00001000 /* XRX MAC hist6 rollover */
-#define	XMAC_RX_BCAST_CNT_EXP		0x00002000 /* XRX BC cnt rollover */
-#define	XMAC_RX_MCAST_CNT_EXP		0x00004000 /* XRX MC cnt rollover */
-#define	XMAC_RX_FRAG_CNT_EXP		0x00008000 /* fragment cnt rollover */
-#define	XMAC_RX_ALIGNERR_CNT_EXP	0x00010000 /* framealign err rollover */
-#define	XMAC_RX_LINK_FLT_CNT_EXP	0x00020000 /* link fault cnt rollover */
-#define	XMAC_RX_REMOTE_FLT_DET		0x00040000 /* Remote Fault detected */
-#define	XMAC_RX_LOCAL_FLT_DET		0x00080000 /* Local Fault detected */
-#define	XMAC_RX_HST_CNT7_EXP		0x00100000 /* XRX MAC hist7 rollover */
-
-
-#define	XMAC_CTRL_PAUSE_RCVD		0x00000001 /* successful pause frame */
-#define	XMAC_CTRL_PAUSE_STATE		0x00000002 /* notpause-->pause */
-#define	XMAC_CTRL_NOPAUSE_STATE		0x00000004 /* pause-->notpause */
-#define	XMAC_CTRL_PAUSE_TIME_MASK	0xFFFF0000 /* value of pause time */
-#define	XMAC_CTRL_PAUSE_TIME_SHIFT	16
-
-/* XMAC Configuration Register */
-#define	XMAC_CONFIG_TX_BIT_MASK		0x000000ff /* bits [7:0] */
-#define	XMAC_CONFIG_RX_BIT_MASK		0x001fff00 /* bits [20:8] */
-#define	XMAC_CONFIG_XIF_BIT_MASK	0xffe00000 /* bits [31:21] */
-
-/* XTX MAC config bits */
-#define	XMAC_TX_CFG_TX_ENABLE		0x00000001 /* enable XTX MAC */
-#define	XMAC_TX_CFG_STRETCH_MD		0x00000002 /* WAN application */
-#define	XMAC_TX_CFG_VAR_MIN_IPG_EN	0x00000004 /* Transmit pkts < minpsz */
-#define	XMAC_TX_CFG_ALWAYS_NO_CRC	0x00000008 /* No CRC generated */
-
-#define	XMAC_WARNING_MSG_ENABLE		0x00000080 /* Sim warning msg enable */
-
-/* XRX MAC config bits */
-#define	XMAC_RX_CFG_RX_ENABLE		0x00000100 /* enable XRX MAC */
-#define	XMAC_RX_CFG_PROMISC		0x00000200 /* promisc mode enable */
-#define	XMAC_RX_CFG_PROMISC_GROUP  	0x00000400 /* accept all MC frames */
-#define	XMAC_RX_CFG_ERR_CHK_DISABLE	0x00000800 /* do not set abort bit */
-#define	XMAC_RX_CFG_CRC_CHK_DISABLE	0x00001000 /* disable CRC logic */
-#define	XMAC_RX_CFG_RESERVED_MCAST	0x00002000 /* reserved MCaddr compare */
-#define	XMAC_RX_CFG_CD_VIO_CHK		0x00004000 /* rx code violation chk */
-#define	XMAC_RX_CFG_HASH_FILTER_EN	0x00008000 /* use hash table */
-#define	XMAC_RX_CFG_ADDR_FILTER_EN	0x00010000 /* use alt addr filter */
-#define	XMAC_RX_CFG_STRIP_CRC		0x00020000 /* strip last 4bytes (CRC) */
-#define	XMAC_RX_MAC2IPP_PKT_CNT_EN	0x00040000 /* histo_cntr7 cnt mode */
-#define	XMAC_RX_CFG_RX_PAUSE_EN		0x00080000 /* receive pause flow ctrl */
-#define	XMAC_RX_CFG_PASS_FLOW_CTRL	0x00100000 /* accept MAC ctrl pkts */
-
-
-/* MAC transceiver (XIF) configuration registers */
-
-#define	XMAC_XIF_FORCE_LED_ON		0x00200000 /* Force Link LED on */
-#define	XMAC_XIF_LED_POLARITY		0x00400000 /* LED polarity */
-#define	XMAC_XIF_SEL_POR_CLK_SRC	0x00800000 /* Select POR clk src */
-#define	XMAC_XIF_TX_OUTPUT_EN		0x01000000 /* enable MII/GMII modes */
-#define	XMAC_XIF_LOOPBACK		0x02000000 /* loopback xmac xgmii tx */
-#define	XMAC_XIF_LFS_DISABLE		0x04000000 /* disable link fault sig */
-#define	XMAC_XIF_MII_MODE_MASK		0x18000000 /* MII/GMII/XGMII mode */
-#define	XMAC_XIF_MII_MODE_SHIFT		27
-#define	XMAC_XIF_XGMII_MODE		0x00
-#define	XMAC_XIF_GMII_MODE		0x01
-#define	XMAC_XIF_MII_MODE		0x02
-#define	XMAC_XIF_ILLEGAL_MODE		0x03
-#define	XMAC_XIF_XPCS_BYPASS		0x20000000 /* use external xpcs */
-#define	XMAC_XIF_1G_PCS_BYPASS		0x40000000 /* use external pcs */
-#define	XMAC_XIF_SEL_CLK_25MHZ		0x80000000 /* 25Mhz clk for 100mbps */
-
-/* IPG register */
-#define	XMAC_IPG_VALUE_MASK		0x00000007 /* IPG in XGMII mode */
-#define	XMAC_IPG_VALUE_SHIFT		0
-#define	XMAC_IPG_VALUE1_MASK		0x0000ff00 /* IPG in GMII/MII mode */
-#define	XMAC_IPG_VALUE1_SHIFT		8
-#define	XMAC_IPG_STRETCH_RATIO_MASK	0x001f0000
-#define	XMAC_IPG_STRETCH_RATIO_SHIFT	16
-#define	XMAC_IPG_STRETCH_CONST_MASK	0x00e00000
-#define	XMAC_IPG_STRETCH_CONST_SHIFT	21
-
-#define	IPG_12_15_BYTE			3
-#define	IPG_16_19_BYTE			4
-#define	IPG_20_23_BYTE			5
-#define	IPG1_12_BYTES			10
-#define	IPG1_13_BYTES			11
-#define	IPG1_14_BYTES			12
-#define	IPG1_15_BYTES			13
-#define	IPG1_16_BYTES			14
-
-
-#define	XMAC_MIN_TX_FRM_SZ_MASK		0x3ff	   /* Min tx frame size */
-#define	XMAC_MIN_TX_FRM_SZ_SHIFT	0
-#define	XMAC_SLOT_TIME_MASK		0x0003fc00 /* slot time */
-#define	XMAC_SLOT_TIME_SHIFT		10
-#define	XMAC_MIN_RX_FRM_SZ_MASK		0x3ff00000 /* Min rx frame size */
-#define	XMAC_MIN_RX_FRM_SZ_SHIFT	20
-#define	XMAC_MAX_FRM_SZ_MASK		0x00003fff /* max tx frame size */
-
-/* State Machine Register */
-#define	XMAC_SM_TX_LNK_MGMT_MASK	0x00000007
-#define	XMAC_SM_TX_LNK_MGMT_SHIFT	0
-#define	XMAC_SM_SOP_DETECT		0x00000008
-#define	XMAC_SM_LNK_FLT_SIG_MASK	0x00000030
-#define	XMAC_SM_LNK_FLT_SIG_SHIFT	4
-#define	XMAC_SM_MII_GMII_MD_RX_LNK	0x00000040
-#define	XMAC_SM_XGMII_MD_RX_LNK		0x00000080
-#define	XMAC_SM_XGMII_ONLY_VAL_SIG	0x00000100
-#define	XMAC_SM_ALT_ADR_N_HSH_FN_SIG	0x00000200
-#define	XMAC_SM_RXMAC_IPP_STAT_MASK	0x00001c00
-#define	XMAC_SM_RXMAC_IPP_STAT_SHIFT	10
-#define	XMAC_SM_RXFIFO_WPTR_CLK_MASK	0x007c0000
-#define	XMAC_SM_RXFIFO_WPTR_CLK_SHIFT	18
-#define	XMAC_SM_RXFIFO_RPTR_CLK_MASK	0x0F800000
-#define	XMAC_SM_RXFIFO_RPTR_CLK_SHIFT	23
-#define	XMAC_SM_TXFIFO_FULL_CLK		0x10000000
-#define	XMAC_SM_TXFIFO_EMPTY_CLK	0x20000000
-#define	XMAC_SM_RXFIFO_FULL_CLK		0x40000000
-#define	XMAC_SM_RXFIFO_EMPTY_CLK	0x80000000
-
-/* Internal Signals 1 Register */
-#define	XMAC_IS1_OPP_TXMAC_STAT_MASK	0x0000000F
-#define	XMAC_IS1_OPP_TXMAC_STAT_SHIFT	0
-#define	XMAC_IS1_OPP_TXMAC_ABORT	0x00000010
-#define	XMAC_IS1_OPP_TXMAC_TAG 		0x00000020
-#define	XMAC_IS1_OPP_TXMAC_ACK		0x00000040
-#define	XMAC_IS1_TXMAC_OPP_REQ		0x00000080
-#define	XMAC_IS1_RXMAC_IPP_STAT_MASK	0x0FFFFF00
-#define	XMAC_IS1_RXMAC_IPP_STAT_SHIFT	8
-#define	XMAC_IS1_RXMAC_IPP_CTRL		0x10000000
-#define	XMAC_IS1_RXMAC_IPP_TAG		0x20000000
-#define	XMAC_IS1_IPP_RXMAC_REQ		0x40000000
-#define	XMAC_IS1_RXMAC_IPP_ACK		0x80000000
-
-/* Internal Signals 2 Register */
-#define	XMAC_IS2_TX_HB_TIMER_MASK	0x0000000F
-#define	XMAC_IS2_TX_HB_TIMER_SHIFT	0
-#define	XMAC_IS2_RX_HB_TIMER_MASK	0x000000F0
-#define	XMAC_IS2_RX_HB_TIMER_SHIFT	4
-#define	XMAC_IS2_XPCS_RXC_MASK		0x0000FF00
-#define	XMAC_IS2_XPCS_RXC_SHIFT		8
-#define	XMAC_IS2_XPCS_TXC_MASK		0x00FF0000
-#define	XMAC_IS2_XPCS_TXC_SHIFT		16
-#define	XMAC_IS2_LOCAL_FLT_OC_SYNC	0x01000000
-#define	XMAC_IS2_RMT_FLT_OC_SYNC	0x02000000
-
-/* Register size masking */
-
-#define	XTXMAC_FRM_CNT_MASK		0xFFFFFFFF
-#define	XTXMAC_BYTE_CNT_MASK		0xFFFFFFFF
-#define	XRXMAC_CRC_ER_CNT_MASK		0x000000FF
-#define	XRXMAC_MPSZER_CNT_MASK		0x000000FF
-#define	XRXMAC_CD_VIO_CNT_MASK		0x000000FF
-#define	XRXMAC_BT_CNT_MASK		0xFFFFFFFF
-#define	XRXMAC_HIST_CNT1_MASK		0x001FFFFF
-#define	XRXMAC_HIST_CNT2_MASK		0x001FFFFF
-#define	XRXMAC_HIST_CNT3_MASK		0x000FFFFF
-#define	XRXMAC_HIST_CNT4_MASK		0x0007FFFF
-#define	XRXMAC_HIST_CNT5_MASK		0x0003FFFF
-#define	XRXMAC_HIST_CNT6_MASK		0x0001FFFF
-#define	XRXMAC_BC_FRM_CNT_MASK		0x001FFFFF
-#define	XRXMAC_MC_FRM_CNT_MASK		0x001FFFFF
-#define	XRXMAC_FRAG_CNT_MASK		0x001FFFFF
-#define	XRXMAC_AL_ER_CNT_MASK		0x000000FF
-#define	XMAC_LINK_FLT_CNT_MASK		0x000000FF
-#define	BTXMAC_FRM_CNT_MASK		0x001FFFFF
-#define	BTXMAC_BYTE_CNT_MASK		0x07FFFFFF
-#define	RXMAC_FRM_CNT_MASK		0x0000FFFF
-#define	BRXMAC_BYTE_CNT_MASK		0x07FFFFFF
-#define	BMAC_AL_ER_CNT_MASK		0x0000FFFF
-#define	MAC_LEN_ER_CNT_MASK		0x0000FFFF
-#define	BMAC_CRC_ER_CNT_MASK		0x0000FFFF
-#define	BMAC_CD_VIO_CNT_MASK		0x0000FFFF
-#define	XMAC_XPCS_DESKEW_ERR_CNT_MASK	0x000000FF
-#define	XMAC_XPCS_SYM_ERR_CNT_L0_MASK	0x0000FFFF
-#define	XMAC_XPCS_SYM_ERR_CNT_L1_MASK	0xFFFF0000
-#define	XMAC_XPCS_SYM_ERR_CNT_L1_SHIFT	16
-#define	XMAC_XPCS_SYM_ERR_CNT_L2_MASK	0x0000FFFF
-#define	XMAC_XPCS_SYM_ERR_CNT_L3_MASK	0xFFFF0000
-#define	XMAC_XPCS_SYM_ERR_CNT_L3_SHIFT	16
-
-/* Alternate MAC address registers */
-#define	XMAC_MAX_ALT_ADDR_ENTRY		16	   /* 16 alternate MAC addrs */
-#define	XMAC_MAX_ADDR_ENTRY		(XMAC_MAX_ALT_ADDR_ENTRY + 1)
-
-/* Max / Min parameters for Neptune MAC */
-
-#define	MAC_MAX_ALT_ADDR_ENTRY		XMAC_MAX_ALT_ADDR_ENTRY
-#define	MAC_MAX_HOST_INFO_ENTRY		XMAC_MAX_HOST_INFO_ENTRY
-
-/* HostInfo entry for the unique MAC address */
-#define	XMAC_UNIQUE_HOST_INFO_ENTRY	17
-#define	BMAC_UNIQUE_HOST_INFO_ENTRY	0
-
-/* HostInfo entry for the multicat address */
-#define	XMAC_MULTI_HOST_INFO_ENTRY	16
-#define	BMAC_MULTI_HOST_INFO_ENTRY	8
-
-/* XMAC Host Info Register */
-typedef union hostinfo {
-
-	uint64_t value;
-
-	struct {
-#if defined(_BIG_ENDIAN)
-		uint32_t msw;	/* Most significant word */
-		uint32_t lsw;	/* Least significant word */
-#elif defined(_LITTLE_ENDIAN)
-		uint32_t lsw;	/* Least significant word */
-		uint32_t msw;	/* Most significant word */
-#endif
-	} val;
-	struct {
-#if defined(_BIG_ENDIAN)
-		uint32_t	w1;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-		uint32_t reserved2	: 23;
-		uint32_t mac_pref	: 1;
-		uint32_t reserved1	: 5;
-		uint32_t rdc_tbl_num	: 3;
-#elif defined(_BIT_FIELDS_LTOH)
-		uint32_t rdc_tbl_num	: 3;
-		uint32_t reserved1	: 5;
-		uint32_t mac_pref	: 1;
-		uint32_t reserved2	: 23;
-#endif
-		} w0;
-
-#if defined(_LITTLE_ENDIAN)
-		uint32_t	w1;
-#endif
-	} bits;
-
-} hostinfo_t;
-
-typedef union hostinfo *hostinfo_pt;
-
-#define	XMAC_HI_RDC_TBL_NUM_MASK	0x00000007
-#define	XMAC_HI_MAC_PREF		0x00000100
-
-#define	XMAC_MAX_HOST_INFO_ENTRY	20	   /* 20 host entries */
-
-/*
- * ******************** MIF registers *********************************
- */
-
-/*
- * 32-bit register serves as an instruction register when the MIF is
- * programmed in frame mode. load this register w/ a valid instruction
- * (as per IEEE 802.3u MII spec). poll this register to check for instruction
- * execution completion. during a read operation, this register will also
- * contain the 16-bit data returned by the transceiver. unless specified
- * otherwise, fields are considered "don't care" when polling for
- * completion.
- */
-
-#define	MIF_FRAME_START_MASK		0xC0000000 /* start of frame mask */
-#define	MIF_FRAME_ST_22			0x40000000 /* STart of frame, Cl 22 */
-#define	MIF_FRAME_ST_45			0x00000000 /* STart of frame, Cl 45 */
-#define	MIF_FRAME_OPCODE_MASK		0x30000000 /* opcode */
-#define	MIF_FRAME_OP_READ_22		0x20000000 /* read OPcode, Cl 22 */
-#define	MIF_FRAME_OP_WRITE_22		0x10000000 /* write OPcode, Cl 22 */
-#define	MIF_FRAME_OP_ADDR_45		0x00000000 /* addr of reg to access */
-#define	MIF_FRAME_OP_READ_45		0x30000000 /* read OPcode, Cl 45 */
-#define	MIF_FRAME_OP_WRITE_45		0x10000000 /* write OPcode, Cl 45 */
-#define	MIF_FRAME_OP_P_R_I_A_45		0x10000000 /* post-read-inc-addr */
-#define	MIF_FRAME_PHY_ADDR_MASK		0x0F800000 /* phy address mask */
-#define	MIF_FRAME_PHY_ADDR_SHIFT	23
-#define	MIF_FRAME_REG_ADDR_MASK		0x007C0000 /* reg addr in Cl 22 */
-						/* dev addr in Cl 45 */
-#define	MIF_FRAME_REG_ADDR_SHIFT	18
-#define	MIF_FRAME_TURN_AROUND_MSB	0x00020000 /* turn around, MSB. */
-#define	MIF_FRAME_TURN_AROUND_LSB	0x00010000 /* turn around, LSB. */
-#define	MIF_FRAME_DATA_MASK		0x0000FFFF /* instruction payload */
-
-/* Clause 45 frame field values */
-#define	FRAME45_ST		0
-#define	FRAME45_OP_ADDR		0
-#define	FRAME45_OP_WRITE	1
-#define	FRAME45_OP_READ_INC	2
-#define	FRAME45_OP_READ		3
-
-typedef union _mif_frame_t {
-
-	uint64_t value;
-
-	struct {
-#if defined(_BIG_ENDIAN)
-		uint32_t msw;	/* Most significant word */
-		uint32_t lsw;	/* Least significant word */
-#elif defined(_LITTLE_ENDIAN)
-		uint32_t lsw;	/* Least significant word */
-		uint32_t msw;	/* Most significant word */
-#endif
-	} val;
-	struct {
-#if defined(_BIG_ENDIAN)
-		uint32_t	w1;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-		uint32_t st		: 2;
-		uint32_t op		: 2;
-		uint32_t phyad		: 5;
-		uint32_t regad		: 5;
-		uint32_t ta_msb		: 1;
-		uint32_t ta_lsb		: 1;
-		uint32_t data		: 16;
-#elif defined(_BIT_FIELDS_LTOH)
-		uint32_t data		: 16;
-		uint32_t ta_lsb		: 1;
-		uint32_t ta_msb		: 1;
-		uint32_t regad		: 5;
-		uint32_t phyad		: 5;
-		uint32_t op		: 2;
-		uint32_t st		: 2;
-#endif
-		} w0;
-
-#if defined(_LITTLE_ENDIAN)
-		uint32_t	w1;
-#endif
-	} bits;
-} mif_frame_t;
-
-#define	MIF_CFG_POLL_EN			0x00000008 /* enable polling */
-#define	MIF_CFG_BB_MODE			0x00000010 /* bit-bang mode */
-#define	MIF_CFG_POLL_REG_MASK		0x000003E0 /* reg addr to be polled */
-#define	MIF_CFG_POLL_REG_SHIFT		5
-#define	MIF_CFG_POLL_PHY_MASK		0x00007C00 /* XCVR addr to be polled */
-#define	MIF_CFG_POLL_PHY_SHIFT		10
-#define	MIF_CFG_INDIRECT_MODE		0x0000800
-					/* used to decide if Cl 22 */
-					/* or Cl 45 frame is */
-					/* constructed. */
-					/* 1 = Clause 45,ST = '00' */
-					/* 0 = Clause 22,ST = '01' */
-#define	MIF_CFG_ATCE_GE_EN	0x00010000 /* Enable ATCA gigabit mode */
-
-typedef union _mif_cfg_t {
-
-	uint64_t value;
-
-	struct {
-#if defined(_BIG_ENDIAN)
-		uint32_t msw;	/* Most significant word */
-		uint32_t lsw;	/* Least significant word */
-
-#elif defined(_LITTLE_ENDIAN)
-		uint32_t lsw;	/* Least significant word */
-		uint32_t msw;	/* Most significant word */
-#endif
-	} val;
-	struct {
-#if defined(_BIG_ENDIAN)
-		uint32_t	w1;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-		uint32_t res2		: 15;
-		uint32_t atca_ge	: 1;
-		uint32_t indirect_md	: 1;
-		uint32_t phy_addr	: 5;
-		uint32_t reg_addr	: 5;
-		uint32_t bb_mode	: 1;
-		uint32_t poll_en	: 1;
-		uint32_t res1		: 2;
-		uint32_t res		: 1;
-#elif defined(_BIT_FIELDS_LTOH)
-		uint32_t res		: 1;
-		uint32_t res1		: 2;
-		uint32_t poll_en	: 1;
-		uint32_t bb_mode	: 1;
-		uint32_t reg_addr	: 5;
-		uint32_t phy_addr	: 5;
-		uint32_t indirect_md	: 1;
-		uint32_t atca_ge	: 1;
-		uint32_t res2		: 15;
-#endif
-		} w0;
-
-#if defined(_LITTLE_ENDIAN)
-		uint32_t	w1;
-#endif
-	} bits;
-
-} mif_cfg_t;
-
-#define	MIF_POLL_STATUS_DATA_MASK	0xffff0000
-#define	MIF_POLL_STATUS_STAT_MASK	0x0000ffff
-
-typedef union _mif_poll_stat_t {
-	uint64_t value;
-
-	struct {
-#if defined(_BIG_ENDIAN)
-		uint32_t msw;	/* Most significant word */
-		uint32_t lsw;	/* Least significant word */
-#elif defined(_LITTLE_ENDIAN)
-		uint32_t lsw;	/* Least significant word */
-		uint32_t msw;	/* Most significant word */
-#endif
-	} val;
-	struct {
-#if defined(_BIG_ENDIAN)
-		uint32_t	w1;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-		uint16_t data;
-		uint16_t status;
-#elif defined(_BIT_FIELDS_LTOH)
-		uint16_t status;
-		uint16_t data;
-#endif
-		} w0;
-
-#if defined(_LITTLE_ENDIAN)
-		uint32_t	w1;
-#endif
-	} bits;
-} mif_poll_stat_t;
-
-
-#define	MIF_POLL_MASK_MASK	0x0000ffff
-
-typedef union _mif_poll_mask_t {
-	uint64_t value;
-
-	struct {
-#if defined(_BIG_ENDIAN)
-		uint32_t msw;	/* Most significant word */
-		uint32_t lsw;	/* Least significant word */
-#elif defined(_LITTLE_ENDIAN)
-		uint32_t lsw;	/* Least significant word */
-		uint32_t msw;	/* Most significant word */
-#endif
-	} val;
-	struct {
-#if defined(_BIG_ENDIAN)
-		uint32_t	w1;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-		uint16_t rsvd;
-		uint16_t mask;
-#elif defined(_BIT_FIELDS_LTOH)
-		uint16_t mask;
-		uint16_t rsvd;
-#endif
-		} w0;
-
-#if defined(_LITTLE_ENDIAN)
-		uint32_t	w1;
-#endif
-	} bits;
-} mif_poll_mask_t;
-
-#define	MIF_STATUS_INIT_DONE_MASK	0x00000001
-#define	MIF_STATUS_XGE_ERR0_MASK	0x00000002
-#define	MIF_STATUS_XGE_ERR1_MASK	0x00000004
-#define	MIF_STATUS_PEU_ERR_MASK		0x00000008
-#define	MIF_STATUS_EXT_PHY_INTR0_MASK	0x00000010
-#define	MIF_STATUS_EXT_PHY_INTR1_MASK	0x00000020
-
-typedef union _mif_stat_t {
-	uint64_t value;
-
-	struct {
-#if defined(_BIG_ENDIAN)
-		uint32_t msw;	/* Most significant word */
-		uint32_t lsw;	/* Least significant word */
-#elif defined(_LITTLE_ENDIAN)
-		uint32_t lsw;	/* Least significant word */
-		uint32_t msw;	/* Most significant word */
-#endif
-	} val;
-	struct {
-#if defined(_BIG_ENDIAN)
-		uint32_t	w1;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-		uint32_t rsvd:26;
-		uint32_t ext_phy_intr_flag1:1;
-		uint32_t ext_phy_intr_flag0:1;
-		uint32_t peu_err:1;
-		uint32_t xge_err1:1;
-		uint32_t xge_err0:1;
-		uint32_t mif_init_done_stat:1;
-
-#elif defined(_BIT_FIELDS_LTOH)
-		uint32_t mif_init_done_stat:1;
-		uint32_t xge_err0:1;
-		uint32_t xge_err1:1;
-		uint32_t ext_phy_intr_flag0:1;
-		uint32_t ext_phy_intr_flag1:1;
-		uint32_t rsvd:26;
-#endif
-		} w0;
-
-#if defined(_LITTLE_ENDIAN)
-		uint32_t	w1;
-#endif
-	} bits;
-} mif_stat_t;
-
-/* MIF State Machine Register */
-
-#define	MIF_SM_EXECUTION_MASK		0x0000003f /* execution state */
-#define	MIF_SM_EXECUTION_SHIFT		0
-#define	MIF_SM_CONTROL_MASK		0x000001c0 /* control state */
-#define	MIF_SM_CONTROL_MASK_SHIFT	6
-#define	MIF_SM_MDI			0x00000200
-#define	MIF_SM_MDO			0x00000400
-#define	MIF_SM_MDO_EN			0x00000800
-#define	MIF_SM_MDC			0x00001000
-#define	MIF_SM_MDI_0			0x00002000
-#define	MIF_SM_MDI_1			0x00004000
-#define	MIF_SM_MDI_2			0x00008000
-#define	MIF_SM_PORT_ADDR_MASK		0x001f0000
-#define	MIF_SM_PORT_ADDR_SHIFT		16
-#define	MIF_SM_INT_SIG_MASK		0xffe00000
-#define	MIF_SM_INT_SIG_SHIFT		21
-
-
-/*
- * ******************** PCS registers *********************************
- */
-
-/* PCS Registers */
-#define	PCS_MII_CTRL_1000_SEL		0x0040	   /* reads 1. ignored on wr */
-#define	PCS_MII_CTRL_COLLISION_TEST	0x0080	   /* COL signal */
-#define	PCS_MII_CTRL_DUPLEX		0x0100	   /* forced 0x0. */
-#define	PCS_MII_RESTART_AUTONEG		0x0200	   /* self clearing. */
-#define	PCS_MII_ISOLATE			0x0400	   /* read 0. ignored on wr */
-#define	PCS_MII_POWER_DOWN		0x0800	   /* read 0. ignored on wr */
-#define	PCS_MII_AUTONEG_EN		0x1000	   /* autonegotiation */
-#define	PCS_MII_10_100_SEL		0x2000	   /* read 0. ignored on wr */
-#define	PCS_MII_RESET			0x8000	   /* reset PCS. */
-
-typedef union _pcs_ctrl_t {
-	uint64_t value;
-
-	struct {
-#if defined(_BIG_ENDIAN)
-		uint32_t msw;	/* Most significant word */
-		uint32_t lsw;	/* Least significant word */
-#elif defined(_LITTLE_ENDIAN)
-		uint32_t lsw;	/* Least significant word */
-		uint32_t msw;	/* Most significant word */
-#endif
-	} val;
-	struct {
-#if defined(_BIG_ENDIAN)
-		uint32_t	w1;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t res0		: 16;
-			uint32_t reset		: 1;
-			uint32_t res1		: 1;
-			uint32_t sel_10_100	: 1;
-			uint32_t an_enable	: 1;
-			uint32_t pwr_down	: 1;
-			uint32_t isolate	: 1;
-			uint32_t restart_an	: 1;
-			uint32_t duplex		: 1;
-			uint32_t col_test	: 1;
-			uint32_t sel_1000	: 1;
-			uint32_t res2		: 6;
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t res2		: 6;
-			uint32_t sel_1000	: 1;
-			uint32_t col_test	: 1;
-			uint32_t duplex		: 1;
-			uint32_t restart_an	: 1;
-			uint32_t isolate	: 1;
-			uint32_t pwr_down	: 1;
-			uint32_t an_enable	: 1;
-			uint32_t sel_10_100	: 1;
-			uint32_t res1		: 1;
-			uint32_t reset		: 1;
-			uint32_t res0		: 16;
-#endif
-		} w0;
-
-#if defined(_LITTLE_ENDIAN)
-		uint32_t	w1;
-#endif
-	} bits;
-} pcs_ctrl_t;
-
-#define	PCS_MII_STATUS_EXTEND_CAP	0x0001	   /* reads 0 */
-#define	PCS_MII_STATUS_JABBER_DETECT	0x0002	   /* reads 0 */
-#define	PCS_MII_STATUS_LINK_STATUS	0x0004	   /* link status */
-#define	PCS_MII_STATUS_AUTONEG_ABLE	0x0008	   /* reads 1 */
-#define	PCS_MII_STATUS_REMOTE_FAULT	0x0010	   /* remote fault detected */
-#define	PCS_MII_STATUS_AUTONEG_COMP	0x0020	   /* auto-neg completed */
-#define	PCS_MII_STATUS_EXTEND_STATUS	0x0100	   /* 1000 Base-X PHY */
-
-typedef union _pcs_stat_t {
-	uint64_t value;
-
-	struct {
-#if defined(_BIG_ENDIAN)
-		uint32_t msw;	/* Most significant word */
-		uint32_t lsw;	/* Least significant word */
-#elif defined(_LITTLE_ENDIAN)
-		uint32_t lsw;	/* Least significant word */
-		uint32_t msw;	/* Most significant word */
-#endif
-	} val;
-	struct {
-#if defined(_BIG_ENDIAN)
-		uint32_t	w1;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-		uint32_t res0		: 23;
-		uint32_t ext_stat	: 1;
-		uint32_t res1		: 2;
-		uint32_t an_complete	: 1;
-		uint32_t remote_fault	: 1;
-		uint32_t an_able	: 1;
-		uint32_t link_stat	: 1;
-		uint32_t jabber_detect	: 1;
-		uint32_t ext_cap	: 1;
-#elif defined(_BIT_FIELDS_LTOH)
-		uint32_t ext_cap	: 1;
-		uint32_t jabber_detect	: 1;
-		uint32_t link_stat	: 1;
-		uint32_t an_able	: 1;
-		uint32_t remote_fault	: 1;
-		uint32_t an_complete	: 1;
-		uint32_t res1		: 2;
-		uint32_t ext_stat	: 1;
-		uint32_t res0		: 23;
-#endif
-		} w0;
-
-#if defined(_LITTLE_ENDIAN)
-		uint32_t	w1;
-#endif
-	} bits;
-} pcs_stat_t;
-
-#define	PCS_MII_ADVERT_FD		0x0020	   /* advertise full duplex */
-#define	PCS_MII_ADVERT_HD		0x0040	   /* advertise half-duplex */
-#define	PCS_MII_ADVERT_SYM_PAUSE	0x0080	   /* advertise PAUSE sym */
-#define	PCS_MII_ADVERT_ASYM_PAUSE	0x0100	   /* advertises PAUSE asym */
-#define	PCS_MII_ADVERT_RF_MASK		0x3000	   /* remote fault */
-#define	PCS_MII_ADVERT_RF_SHIFT		12
-#define	PCS_MII_ADVERT_ACK		0x4000	   /* (ro) */
-#define	PCS_MII_ADVERT_NEXT_PAGE	0x8000	   /* (ro) forced 0x0 */
-
-#define	PCS_MII_LPA_FD			PCS_MII_ADVERT_FD
-#define	PCS_MII_LPA_HD			PCS_MII_ADVERT_HD
-#define	PCS_MII_LPA_SYM_PAUSE		PCS_MII_ADVERT_SYM_PAUSE
-#define	PCS_MII_LPA_ASYM_PAUSE		PCS_MII_ADVERT_ASYM_PAUSE
-#define	PCS_MII_LPA_RF_MASK		PCS_MII_ADVERT_RF_MASK
-#define	PCS_MII_LPA_RF_SHIFT		PCS_MII_ADVERT_RF_SHIFT
-#define	PCS_MII_LPA_ACK			PCS_MII_ADVERT_ACK
-#define	PCS_MII_LPA_NEXT_PAGE		PCS_MII_ADVERT_NEXT_PAGE
-
-typedef union _pcs_anar_t {
-	uint64_t value;
-
-	struct {
-#if defined(_BIG_ENDIAN)
-		uint32_t msw;	/* Most significant word */
-		uint32_t lsw;	/* Least significant word */
-#elif defined(_LITTLE_ENDIAN)
-		uint32_t lsw;	/* Least significant word */
-		uint32_t msw;	/* Most significant word */
-#endif
-	} val;
-	struct {
-#if defined(_BIG_ENDIAN)
-		uint32_t	w1;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-		uint32_t res0		: 16;
-		uint32_t next_page	: 1;
-		uint32_t ack		: 1;
-		uint32_t remote_fault	: 2;
-		uint32_t res1		: 3;
-		uint32_t asm_pause	: 1;
-		uint32_t pause		: 1;
-		uint32_t half_duplex	: 1;
-		uint32_t full_duplex	: 1;
-		uint32_t res2		: 5;
-#elif defined(_BIT_FIELDS_LTOH)
-		uint32_t res2		: 5;
-		uint32_t full_duplex	: 1;
-		uint32_t half_duplex	: 1;
-		uint32_t pause		: 1;
-		uint32_t asm_pause	: 1;
-		uint32_t res1		: 3;
-		uint32_t remore_fault	: 2;
-		uint32_t ack		: 1;
-		uint32_t next_page	: 1;
-		uint32_t res0		: 16;
-#endif
-		} w0;
-
-#if defined(_LITTLE_ENDIAN)
-		uint32_t	w1;
-#endif
-	} bits;
-} pcs_anar_t, *p_pcs_anar_t;
-
-#define	PCS_CFG_EN			0x0001	   /* enable PCS. */
-#define	PCS_CFG_SD_OVERRIDE		0x0002
-#define	PCS_CFG_SD_ACTIVE_LOW		0x0004	   /* sig detect active low */
-#define	PCS_CFG_JITTER_STUDY_MASK	0x0018	   /* jitter measurements */
-#define	PCS_CFG_JITTER_STUDY_SHIFT	4
-#define	PCS_CFG_10MS_TIMER_OVERRIDE	0x0020	   /* shortens autoneg timer */
-#define	PCS_CFG_MASK			0x0040	   /* PCS global mask bit */
-
-typedef union _pcs_cfg_t {
-	uint64_t value;
-
-	struct {
-#if defined(_BIG_ENDIAN)
-		uint32_t msw;	/* Most significant word */
-		uint32_t lsw;	/* Least significant word */
-#elif defined(_LITTLE_ENDIAN)
-		uint32_t lsw;	/* Least significant word */
-		uint32_t msw;	/* Most significant word */
-#endif
-	} val;
-	struct {
-#if defined(_BIG_ENDIAN)
-		uint32_t	w1;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-		uint32_t res0			: 25;
-		uint32_t mask			: 1;
-		uint32_t override_10ms_timer	: 1;
-		uint32_t jitter_study		: 2;
-		uint32_t sig_det_a_low		: 1;
-		uint32_t sig_det_override	: 1;
-		uint32_t enable			: 1;
-#elif defined(_BIT_FIELDS_LTOH)
-		uint32_t enable			: 1;
-		uint32_t sig_det_override	: 1;
-		uint32_t sig_det_a_low		: 1;
-		uint32_t jitter_study		: 2;
-		uint32_t override_10ms_timer	: 1;
-		uint32_t mask			: 1;
-		uint32_t res0			: 25;
-#endif
-		} w0;
-
-#if defined(_LITTLE_ENDIAN)
-		uint32_t	w1;
-#endif
-	} bits;
-} pcs_cfg_t, *p_pcs_cfg_t;
-
-
-/* used for diagnostic purposes. bits 20-22 autoclear on read */
-#define	PCS_SM_TX_STATE_MASK		0x0000000F /* Tx idle state mask */
-#define	PCS_SM_TX_STATE_SHIFT		0
-#define	PCS_SM_RX_STATE_MASK		0x000000F0 /* Rx idle state mask */
-#define	PCS_SM_RX_STATE_SHIFT		4
-#define	PCS_SM_WORD_SYNC_STATE_MASK	0x00000700 /* loss of sync state mask */
-#define	PCS_SM_WORD_SYNC_STATE_SHIFT	8
-#define	PCS_SM_SEQ_DETECT_STATE_MASK	0x00001800 /* sequence detect */
-#define	PCS_SM_SEQ_DETECT_STATE_SHIFT	11
-#define	PCS_SM_LINK_STATE_MASK		0x0001E000 /* link state */
-#define	PCS_SM_LINK_STATE_SHIFT		13
-#define	PCS_SM_LOSS_LINK_C		0x00100000 /* loss of link */
-#define	PCS_SM_LOSS_LINK_SYNC		0x00200000 /* loss of sync */
-#define	PCS_SM_LOSS_SIGNAL_DETECT	0x00400000 /* signal detect fail */
-#define	PCS_SM_NO_LINK_BREAKLINK	0x01000000 /* receipt of breaklink */
-#define	PCS_SM_NO_LINK_SERDES		0x02000000 /* serdes initializing */
-#define	PCS_SM_NO_LINK_C		0x04000000 /* C codes not stable */
-#define	PCS_SM_NO_LINK_SYNC		0x08000000 /* word sync not achieved */
-#define	PCS_SM_NO_LINK_WAIT_C		0x10000000 /* waiting for C codes */
-#define	PCS_SM_NO_LINK_NO_IDLE		0x20000000 /* linkpartner send C code */
-
-typedef union _pcs_stat_mc_t {
-	uint64_t value;
-
-	struct {
-#if defined(_BIG_ENDIAN)
-		uint32_t msw;	/* Most significant word */
-		uint32_t lsw;	/* Least significant word */
-#elif defined(_LITTLE_ENDIAN)
-		uint32_t lsw;	/* Least significant word */
-		uint32_t msw;	/* Most significant word */
-#endif
-	} val;
-	struct {
-#if defined(_BIG_ENDIAN)
-		uint32_t	w1;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-		uint32_t res2		: 2;
-		uint32_t lnk_dwn_ni	: 1;
-		uint32_t lnk_dwn_wc	: 1;
-		uint32_t lnk_dwn_ls	: 1;
-		uint32_t lnk_dwn_nc	: 1;
-		uint32_t lnk_dwn_ser	: 1;
-		uint32_t lnk_loss_bc	: 1;
-		uint32_t res1		: 1;
-		uint32_t loss_sd	: 1;
-		uint32_t lnk_loss_sync	: 1;
-		uint32_t lnk_loss_c	: 1;
-		uint32_t res0		: 3;
-		uint32_t link_cfg_stat	: 4;
-		uint32_t seq_detc_stat	: 2;
-		uint32_t word_sync	: 3;
-		uint32_t rx_ctrl	: 4;
-		uint32_t tx_ctrl	: 4;
-#elif defined(_BIT_FIELDS_LTOH)
-		uint32_t tx_ctrl	: 4;
-		uint32_t rx_ctrl	: 4;
-		uint32_t word_sync	: 3;
-		uint32_t seq_detc_stat	: 2;
-		uint32_t link_cfg_stat	: 4;
-		uint32_t res0		: 3;
-		uint32_t lnk_loss_c	: 1;
-		uint32_t lnk_loss_sync	: 1;
-		uint32_t loss_sd	: 1;
-		uint32_t res1		: 1;
-		uint32_t lnk_loss_bc	: 1;
-		uint32_t lnk_dwn_ser	: 1;
-		uint32_t lnk_dwn_nc	: 1;
-		uint32_t lnk_dwn_ls	: 1;
-		uint32_t lnk_dwn_wc	: 1;
-		uint32_t lnk_dwn_ni	: 1;
-		uint32_t res2		: 2;
-#endif
-		} w0;
-
-#if defined(_LITTLE_ENDIAN)
-		uint32_t	w1;
-#endif
-	} bits;
-} pcs_stat_mc_t, *p_pcs_stat_mc_t;
-
-#define	PCS_INTR_STATUS_LINK_CHANGE	0x04	/* link status has changed */
-
-/*
- * control which network interface is used. no more than one bit should
- * be set.
- */
-#define	PCS_DATAPATH_MODE_PCS		0	   /* Internal PCS is used */
-#define	PCS_DATAPATH_MODE_MII		0x00000002 /* GMII/RGMII is selected. */
-
-#define	PCS_PACKET_COUNT_TX_MASK	0x000007FF /* pkts xmitted by PCS */
-#define	PCS_PACKET_COUNT_RX_MASK	0x07FF0000 /* pkts recvd by PCS */
-#define	PCS_PACKET_COUNT_RX_SHIFT	16
-
-/*
- * ******************** XPCS registers *********************************
- */
-
-/* XPCS Base 10G Control1 Register */
-#define	XPCS_CTRL1_RST			0x8000 /* Self clearing reset. */
-#define	XPCS_CTRL1_LOOPBK		0x4000 /* xpcs Loopback */
-#define	XPCS_CTRL1_SPEED_SEL_3		0x2000 /* 1 indicates 10G speed */
-#define	XPCS_CTRL1_LOW_PWR		0x0800 /* low power mode. */
-#define	XPCS_CTRL1_SPEED_SEL_1		0x0040 /* 1 indicates 10G speed */
-#define	XPCS_CTRL1_SPEED_SEL_0_MASK	0x003c /* 0 indicates 10G speed. */
-#define	XPCS_CTRL1_SPEED_SEL_0_SHIFT	2
-
-
-
-typedef union _xpcs_ctrl1_t {
-	uint64_t value;
-
-	struct {
-#if defined(_BIG_ENDIAN)
-		uint32_t msw;	/* Most significant word */
-		uint32_t lsw;	/* Least significant word */
-#elif defined(_LITTLE_ENDIAN)
-		uint32_t lsw;	/* Least significant word */
-		uint32_t msw;	/* Most significant word */
-#endif
-	} val;
-	struct {
-#if defined(_BIG_ENDIAN)
-		uint32_t	w1;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-		uint32_t res3		: 16;
-		uint32_t reset		: 1;
-		uint32_t csr_lb		: 1;
-		uint32_t csr_speed_sel3	: 1;
-		uint32_t res2		: 1;
-		uint32_t csr_low_pwr	: 1;
-		uint32_t res1		: 4;
-		uint32_t csr_speed_sel1	: 1;
-		uint32_t csr_speed_sel0	: 4;
-		uint32_t res0		: 2;
-#elif defined(_BIT_FIELDS_LTOH)
-		uint32_t res0		: 2;
-		uint32_t csr_speed_sel0	: 4;
-		uint32_t csr_speed_sel1	: 1;
-		uint32_t res1		: 4;
-		uint32_t csr_low_pwr	: 1;
-		uint32_t res2		: 1;
-		uint32_t csr_speed_sel3	: 1;
-		uint32_t csr_lb		: 1;
-		uint32_t reset		: 1;
-		uint32_t res3		: 16;
-#endif
-		} w0;
-
-#if defined(_LITTLE_ENDIAN)
-		uint32_t	w1;
-#endif
-	} bits;
-} xpcs_ctrl1_t;
-
-
-/* XPCS Base 10G Status1 Register (Read Only) */
-#define	XPCS_STATUS1_FAULT		0x0080
-#define	XPCS_STATUS1_RX_LINK_STATUS_UP	0x0004 /* Link status interrupt */
-#define	XPCS_STATUS1_LOW_POWER_ABILITY	0x0002 /* low power mode */
-
-
-typedef	union _xpcs_stat1_t {
-	uint64_t value;
-
-	struct {
-#if defined(_BIG_ENDIAN)
-		uint32_t msw;	/* Most significant word */
-		uint32_t lsw;	/* Least significant word */
-#elif defined(_LITTLE_ENDIAN)
-		uint32_t lsw;	/* Least significant word */
-		uint32_t msw;	/* Most significant word */
-#endif
-	} val;
-	struct {
-#if defined(_BIG_ENDIAN)
-		uint32_t	w1;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-		uint32_t res4			: 16;
-		uint32_t res3			: 8;
-		uint32_t csr_fault		: 1;
-		uint32_t res1			: 4;
-		uint32_t csr_rx_link_stat	: 1;
-		uint32_t csr_low_pwr_ability	: 1;
-		uint32_t res0			: 1;
-#elif defined(_BIT_FIELDS_LTOH)
-		uint32_t res0			: 1;
-		uint32_t csr_low_pwr_ability	: 1;
-		uint32_t csr_rx_link_stat	: 1;
-		uint32_t res1			: 4;
-		uint32_t csr_fault		: 1;
-		uint32_t res3			: 8;
-		uint32_t res4			: 16;
-#endif
-		} w0;
-
-#if defined(_LITTLE_ENDIAN)
-		uint32_t	w1;
-#endif
-	} bits;
-} xpcs_stat1_t;
-
-
-/* XPCS Base Speed Ability Register. Indicates 10G capability */
-#define	XPCS_SPEED_ABILITY_10_GIG	0x0001
-
-
-typedef	union _xpcs_speed_ab_t {
-	uint64_t value;
-
-	struct {
-#if defined(_BIG_ENDIAN)
-		uint32_t msw;	/* Most significant word */
-		uint32_t lsw;	/* Least significant word */
-#elif defined(_LITTLE_ENDIAN)
-		uint32_t lsw;	/* Least significant word */
-		uint32_t msw;	/* Most significant word */
-#endif
-	} val;
-	struct {
-#if defined(_BIG_ENDIAN)
-		uint32_t	w1;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-		uint32_t res1		: 16;
-		uint32_t res0		: 15;
-		uint32_t csr_10gig	: 1;
-#elif defined(_BIT_FIELDS_LTOH)
-		uint32_t csr_10gig	: 1;
-		uint32_t res0		: 15;
-		uint32_t res1		: 16;
-#endif
-		} w0;
-
-#if defined(_LITTLE_ENDIAN)
-		uint32_t	w1;
-#endif
-	} bits;
-} xpcs_speed_ab_t;
-
-
-/* XPCS Base 10G Devices in Package Register */
-#define	XPCS_DEV_IN_PKG_CSR_VENDOR2	0x80000000
-#define	XPCS_DEV_IN_PKG_CSR_VENDOR1	0x40000000
-#define	XPCS_DEV_IN_PKG_DTE_XS		0x00000020
-#define	XPCS_DEV_IN_PKG_PHY_XS		0x00000010
-#define	XPCS_DEV_IN_PKG_PCS		0x00000008
-#define	XPCS_DEV_IN_PKG_WIS		0x00000004
-#define	XPCS_DEV_IN_PKG_PMD_PMA		0x00000002
-#define	XPCS_DEV_IN_PKG_CLS_22_REG	0x00000000
-
-
-
-typedef	union _xpcs_dev_in_pkg_t {
-	uint64_t value;
-
-	struct {
-#if defined(_BIG_ENDIAN)
-		uint32_t msw;	/* Most significant word */
-		uint32_t lsw;	/* Least significant word */
-#elif defined(_LITTLE_ENDIAN)
-		uint32_t lsw;	/* Least significant word */
-		uint32_t msw;	/* Most significant word */
-#endif
-	} val;
-	struct {
-#if defined(_BIG_ENDIAN)
-		uint32_t	w1;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-		uint32_t csr_vendor2	: 1;
-		uint32_t csr_vendor1	: 1;
-		uint32_t res1		: 14;
-		uint32_t res0		: 10;
-		uint32_t dte_xs		: 1;
-		uint32_t phy_xs		: 1;
-		uint32_t pcs		: 1;
-		uint32_t wis		: 1;
-		uint32_t pmd_pma	: 1;
-		uint32_t clause_22_reg	: 1;
-#elif defined(_BIT_FIELDS_LTOH)
-		uint32_t clause_22_reg	: 1;
-		uint32_t pmd_pma	: 1;
-		uint32_t wis		: 1;
-		uint32_t pcs		: 1;
-		uint32_t phy_xs		: 1;
-		uint32_t dte_xs		: 1;
-		uint32_t res0		: 10;
-		uint32_t res1		: 14;
-		uint32_t csr_vendor1	: 1;
-		uint32_t csr_vendor2	: 1;
-#endif
-		} w0;
-
-#if defined(_LITTLE_ENDIAN)
-		uint32_t	w1;
-#endif
-	} bits;
-} xpcs_dev_in_pkg_t;
-
-
-/* XPCS Base 10G Control2 Register */
-#define	XPCS_PSC_SEL_MASK		0x0003
-#define	PSC_SEL_10G_BASE_X_PCS		0x0001
-
-
-typedef	union _xpcs_ctrl2_t {
-	uint64_t value;
-
-	struct {
-#if defined(_BIG_ENDIAN)
-		uint32_t msw;	/* Most significant word */
-		uint32_t lsw;	/* Least significant word */
-#elif defined(_LITTLE_ENDIAN)
-		uint32_t lsw;	/* Least significant word */
-		uint32_t msw;	/* Most significant word */
-#endif
-	} val;
-	struct {
-#if defined(_BIG_ENDIAN)
-		uint32_t	w1;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-		uint32_t res1		: 16;
-		uint32_t res0		: 14;
-		uint32_t csr_psc_sel	: 2;
-#elif defined(_BIT_FIELDS_LTOH)
-		uint32_t csr_psc_sel	: 2;
-		uint32_t res0		: 14;
-		uint32_t res1		: 16;
-#endif
-		} w0;
-
-#if defined(_LITTLE_ENDIAN)
-		uint32_t	w1;
-#endif
-	} bits;
-} xpcs_ctrl2_t;
-
-
-/* XPCS Base10G Status2 Register */
-#define	XPCS_STATUS2_DEV_PRESENT_MASK	0xc000	/* ?????? */
-#define	XPCS_STATUS2_TX_FAULT		0x0800	/* Fault on tx path */
-#define	XPCS_STATUS2_RX_FAULT		0x0400	/* Fault on rx path */
-#define	XPCS_STATUS2_TEN_GBASE_W	0x0004	/* 10G-Base-W */
-#define	XPCS_STATUS2_TEN_GBASE_X	0x0002	/* 10G-Base-X */
-#define	XPCS_STATUS2_TEN_GBASE_R	0x0001	/* 10G-Base-R */
-
-typedef	union _xpcs_stat2_t {
-	uint64_t value;
-
-	struct {
-#if defined(_BIG_ENDIAN)
-		uint32_t msw;	/* Most significant word */
-		uint32_t lsw;	/* Least significant word */
-#elif defined(_LITTLE_ENDIAN)
-		uint32_t lsw;	/* Least significant word */
-		uint32_t msw;	/* Most significant word */
-#endif
-	} val;
-	struct {
-#if defined(_BIG_ENDIAN)
-		uint32_t	w1;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-		uint32_t res2		: 16;
-		uint32_t csr_dev_pres	: 2;
-		uint32_t res1		: 2;
-		uint32_t csr_tx_fault	: 1;
-		uint32_t csr_rx_fault	: 1;
-		uint32_t res0		: 7;
-		uint32_t ten_gbase_w	: 1;
-		uint32_t ten_gbase_x	: 1;
-		uint32_t ten_gbase_r	: 1;
-#elif defined(_BIT_FIELDS_LTOH)
-		uint32_t ten_gbase_r	: 1;
-		uint32_t ten_gbase_x	: 1;
-		uint32_t ten_gbase_w	: 1;
-		uint32_t res0		: 7;
-		uint32_t csr_rx_fault	: 1;
-		uint32_t csr_tx_fault	: 1;
-		uint32_t res1		: 2;
-		uint32_t csr_dev_pres	: 2;
-		uint32_t res2		: 16;
-#endif
-		} w0;
-
-#if defined(_LITTLE_ENDIAN)
-		uint32_t	w1;
-#endif
-	} bits;
-} xpcs_stat2_t;
-
-
-
-/* XPCS Base10G Status Register */
-#define	XPCS_STATUS_LANE_ALIGN		0x1000 /* 10GBaseX PCS rx lanes align */
-#define	XPCS_STATUS_PATTERN_TEST_ABLE	0x0800 /* able to generate patterns. */
-#define	XPCS_STATUS_LANE3_SYNC		0x0008 /* Lane 3 is synchronized */
-#define	XPCS_STATUS_LANE2_SYNC		0x0004 /* Lane 2 is synchronized */
-#define	XPCS_STATUS_LANE1_SYNC		0x0002 /* Lane 1 is synchronized */
-#define	XPCS_STATUS_LANE0_SYNC		0x0001 /* Lane 0 is synchronized */
-
-typedef	union _xpcs_stat_t {
-	uint64_t value;
-
-	struct {
-#if defined(_BIG_ENDIAN)
-		uint32_t msw;	/* Most significant word */
-		uint32_t lsw;	/* Least significant word */
-#elif defined(_LITTLE_ENDIAN)
-		uint32_t lsw;	/* Least significant word */
-		uint32_t msw;	/* Most significant word */
-#endif
-	} val;
-	struct {
-#if defined(_BIG_ENDIAN)
-		uint32_t	w1;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-		uint32_t res2			: 16;
-		uint32_t res1			: 3;
-		uint32_t csr_lane_align		: 1;
-		uint32_t csr_pattern_test_able	: 1;
-		uint32_t res0			: 7;
-		uint32_t csr_lane3_sync		: 1;
-		uint32_t csr_lane2_sync		: 1;
-		uint32_t csr_lane1_sync		: 1;
-		uint32_t csr_lane0_sync		: 1;
-#elif defined(_BIT_FIELDS_LTOH)
-		uint32_t csr_lane0_sync		: 1;
-		uint32_t csr_lane1_sync		: 1;
-		uint32_t csr_lane2_sync		: 1;
-		uint32_t csr_lane3_sync		: 1;
-		uint32_t res0			: 7;
-		uint32_t csr_pat_test_able	: 1;
-		uint32_t csr_lane_align		: 1;
-		uint32_t res1			: 3;
-		uint32_t res2			: 16;
-#endif
-		} w0;
-
-#if defined(_LITTLE_ENDIAN)
-		uint32_t	w1;
-#endif
-	} bits;
-} xpcs_stat_t;
-
-/* XPCS Base10G Test Control Register */
-#define	XPCS_TEST_CTRL_TX_TEST_ENABLE		0x0004
-#define	XPCS_TEST_CTRL_TEST_PATTERN_SEL_MASK	0x0003
-#define	TEST_PATTERN_HIGH_FREQ			0
-#define	TEST_PATTERN_LOW_FREQ			1
-#define	TEST_PATTERN_MIXED_FREQ			2
-
-typedef	union _xpcs_test_ctl_t {
-	uint64_t value;
-
-	struct {
-#if defined(_BIG_ENDIAN)
-		uint32_t msw;	/* Most significant word */
-		uint32_t lsw;	/* Least significant word */
-#elif defined(_LITTLE_ENDIAN)
-		uint32_t lsw;	/* Least significant word */
-		uint32_t msw;	/* Most significant word */
-#endif
-	} val;
-	struct {
-#if defined(_BIG_ENDIAN)
-		uint32_t	w1;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-		uint32_t res1			: 16;
-		uint32_t res0			: 13;
-		uint32_t csr_tx_test_en		: 1;
-		uint32_t csr_test_pat_sel	: 2;
-#elif defined(_BIT_FIELDS_LTOH)
-		uint32_t csr_test_pat_sel	: 2;
-		uint32_t csr_tx_test_en		: 1;
-		uint32_t res0			: 13;
-		uint32_t res1			: 16;
-#endif
-		} w0;
-
-#if defined(_LITTLE_ENDIAN)
-		uint32_t	w1;
-#endif
-	} bits;
-} xpcs_test_ctl_t;
-
-/* XPCS Base10G Diagnostic Register */
-#define	XPCS_DIAG_EB_ALIGN_ERR3		0x40
-#define	XPCS_DIAG_EB_ALIGN_ERR2		0x20
-#define	XPCS_DIAG_EB_ALIGN_ERR1		0x10
-#define	XPCS_DIAG_EB_DESKEW_OK		0x08
-#define	XPCS_DIAG_EB_ALIGN_DET3		0x04
-#define	XPCS_DIAG_EB_ALIGN_DET2		0x02
-#define	XPCS_DIAG_EB_ALIGN_DET1		0x01
-#define	XPCS_DIAG_EB_DESKEW_LOSS	0
-
-#define	XPCS_DIAG_SYNC_3_INVALID	0x8
-#define	XPCS_DIAG_SYNC_2_INVALID	0x4
-#define	XPCS_DIAG_SYNC_1_INVALID	0x2
-#define	XPCS_DIAG_SYNC_IN_SYNC		0x1
-#define	XPCS_DIAG_SYNC_LOSS_SYNC	0
-
-#define	XPCS_RX_SM_RECEIVE_STATE	1
-#define	XPCS_RX_SM_FAULT_STATE		0
-
-typedef	union _xpcs_diag_t {
-	uint64_t value;
-
-	struct {
-#if defined(_BIG_ENDIAN)
-		uint32_t msw;	/* Most significant word */
-		uint32_t lsw;	/* Least significant word */
-#elif defined(_LITTLE_ENDIAN)
-		uint32_t lsw;	/* Least significant word */
-		uint32_t msw;	/* Most significant word */
-#endif
-	} val;
-	struct {
-#if defined(_BIG_ENDIAN)
-		uint32_t	w1;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-		uint32_t res1			: 7;
-		uint32_t sync_sm_lane3		: 4;
-		uint32_t sync_sm_lane2		: 4;
-		uint32_t sync_sm_lane1		: 4;
-		uint32_t sync_sm_lane0		: 4;
-		uint32_t elastic_buffer_sm	: 8;
-		uint32_t receive_sm		: 1;
-#elif defined(_BIT_FIELDS_LTOH)
-		uint32_t receive_sm		: 1;
-		uint32_t elastic_buffer_sm	: 8;
-		uint32_t sync_sm_lane0		: 4;
-		uint32_t sync_sm_lane1		: 4;
-		uint32_t sync_sm_lane2		: 4;
-		uint32_t sync_sm_lane3		: 4;
-		uint32_t res1			: 7;
-#endif
-		} w0;
-
-#if defined(_LITTLE_ENDIAN)
-		uint32_t	w1;
-#endif
-	} bits;
-} xpcs_diag_t;
-
-/* XPCS Base10G Tx State Machine Register */
-#define	XPCS_TX_SM_SEND_UNDERRUN	0x9
-#define	XPCS_TX_SM_SEND_RANDOM_Q	0x8
-#define	XPCS_TX_SM_SEND_RANDOM_K	0x7
-#define	XPCS_TX_SM_SEND_RANDOM_A	0x6
-#define	XPCS_TX_SM_SEND_RANDOM_R	0x5
-#define	XPCS_TX_SM_SEND_Q		0x4
-#define	XPCS_TX_SM_SEND_K		0x3
-#define	XPCS_TX_SM_SEND_A		0x2
-#define	XPCS_TX_SM_SEND_SDP		0x1
-#define	XPCS_TX_SM_SEND_DATA		0
-
-/* XPCS Base10G Configuration Register */
-#define	XPCS_CFG_VENDOR_DBG_SEL_MASK	0x78
-#define	XPCS_CFG_VENDOR_DBG_SEL_SHIFT	3
-#define	XPCS_CFG_BYPASS_SIG_DETECT	0x0004
-#define	XPCS_CFG_ENABLE_TX_BUFFERS	0x0002
-#define	XPCS_CFG_XPCS_ENABLE		0x0001
-
-typedef	union _xpcs_config_t {
-	uint64_t value;
-
-	struct {
-#if defined(_BIG_ENDIAN)
-		uint32_t msw;	/* Most significant word */
-		uint32_t lsw;	/* Least significant word */
-#elif defined(_LITTLE_ENDIAN)
-		uint32_t lsw;	/* Least significant word */
-		uint32_t msw;	/* Most significant word */
-#endif
-	} val;
-	struct {
-#if defined(_BIG_ENDIAN)
-		uint32_t	w1;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-		uint32_t res1			: 16;
-		uint32_t res0			: 9;
-		uint32_t csr_vendor_dbg_sel	: 4;
-		uint32_t csr_bypass_sig_detect	: 1;
-		uint32_t csr_en_tx_buf		: 1;
-		uint32_t csr_xpcs_en		: 1;
-#elif defined(_BIT_FIELDS_LTOH)
-		uint32_t csr_xpcs_en		: 1;
-		uint32_t csr_en_tx_buf		: 1;
-		uint32_t csr_bypass_sig_detect	: 1;
-		uint32_t csr_vendor_dbg_sel	: 4;
-		uint32_t res0			: 9;
-		uint32_t res1			: 16;
-#endif
-		} w0;
-
-#if defined(_LITTLE_ENDIAN)
-		uint32_t	w1;
-#endif
-	} bits;
-} xpcs_config_t;
-
-
-
-/* XPCS Base10G Mask1 Register */
-#define	XPCS_MASK1_FAULT_MASK		0x0080	/* mask fault interrupt. */
-#define	XPCS_MASK1_RX_LINK_STATUS_MASK	0x0040	/* mask linkstat interrupt */
-
-/* XPCS Base10G Packet Counter */
-#define	XPCS_PKT_CNTR_TX_PKT_CNT_MASK	0xffff0000
-#define	XPCS_PKT_CNTR_TX_PKT_CNT_SHIFT	16
-#define	XPCS_PKT_CNTR_RX_PKT_CNT_MASK	0x0000ffff
-#define	XPCS_PKT_CNTR_RX_PKT_CNT_SHIFT	0
-
-/* XPCS Base10G TX State Machine status register */
-#define	XPCS_TX_STATE_MC_TX_STATE_MASK	0x0f
-#define	XPCS_DESKEW_ERR_CNTR_MASK	0xff
-
-/* XPCS Base10G Lane symbol error counters */
-#define	XPCS_SYM_ERR_CNT_L1_MASK  0xffff0000
-#define	XPCS_SYM_ERR_CNT_L0_MASK  0x0000ffff
-#define	XPCS_SYM_ERR_CNT_L3_MASK  0xffff0000
-#define	XPCS_SYM_ERR_CNT_L2_MASK  0x0000ffff
-
-#define	XPCS_SYM_ERR_CNT_MULTIPLIER	16
-
-/* ESR Reset Register */
-#define	ESR_RESET_1			2
-#define	ESR_RESET_0			1
-
-/* ESR Configuration Register */
-#define	ESR_BLUNT_END_LOOPBACK		2
-#define	ESR_FORCE_SERDES_SERDES_RDY	1
-
-/* ESR Neptune Serdes PLL Configuration */
-#define	ESR_PLL_CFG_FBDIV_0		0x1
-#define	ESR_PLL_CFG_FBDIV_1		0x2
-#define	ESR_PLL_CFG_FBDIV_2		0x4
-#define	ESR_PLL_CFG_HALF_RATE_0		0x8
-#define	ESR_PLL_CFG_HALF_RATE_1		0x10
-#define	ESR_PLL_CFG_HALF_RATE_2		0x20
-#define	ESR_PLL_CFG_HALF_RATE_3		0x40
-
-/* ESR Neptune Serdes Control Register */
-#define	ESR_CTL_EN_SYNCDET_0		0x00000001
-#define	ESR_CTL_EN_SYNCDET_1		0x00000002
-#define	ESR_CTL_EN_SYNCDET_2		0x00000004
-#define	ESR_CTL_EN_SYNCDET_3		0x00000008
-#define	ESR_CTL_OUT_EMPH_0_MASK		0x00000070
-#define	ESR_CTL_OUT_EMPH_0_SHIFT	4
-#define	ESR_CTL_OUT_EMPH_1_MASK		0x00000380
-#define	ESR_CTL_OUT_EMPH_1_SHIFT	7
-#define	ESR_CTL_OUT_EMPH_2_MASK		0x00001c00
-#define	ESR_CTL_OUT_EMPH_2_SHIFT	10
-#define	ESR_CTL_OUT_EMPH_3_MASK		0x0000e000
-#define	ESR_CTL_OUT_EMPH_3_SHIFT	13
-#define	ESR_CTL_LOSADJ_0_MASK		0x00070000
-#define	ESR_CTL_LOSADJ_0_SHIFT		16
-#define	ESR_CTL_LOSADJ_1_MASK		0x00380000
-#define	ESR_CTL_LOSADJ_1_SHIFT		19
-#define	ESR_CTL_LOSADJ_2_MASK		0x01c00000
-#define	ESR_CTL_LOSADJ_2_SHIFT		22
-#define	ESR_CTL_LOSADJ_3_MASK		0x0e000000
-#define	ESR_CTL_LOSADJ_3_SHIFT		25
-#define	ESR_CTL_RXITERM_0		0x10000000
-#define	ESR_CTL_RXITERM_1		0x20000000
-#define	ESR_CTL_RXITERM_2		0x40000000
-#define	ESR_CTL_RXITERM_3		0x80000000
-
-/* ESR Neptune Serdes Test Configuration Register */
-#define	ESR_TSTCFG_LBTEST_MD_0_MASK	0x00000003
-#define	ESR_TSTCFG_LBTEST_MD_0_SHIFT	0
-#define	ESR_TSTCFG_LBTEST_MD_1_MASK	0x0000000c
-#define	ESR_TSTCFG_LBTEST_MD_1_SHIFT	2
-#define	ESR_TSTCFG_LBTEST_MD_2_MASK	0x00000030
-#define	ESR_TSTCFG_LBTEST_MD_2_SHIFT	4
-#define	ESR_TSTCFG_LBTEST_MD_3_MASK	0x000000c0
-#define	ESR_TSTCFG_LBTEST_MD_3_SHIFT	6
-
-/* ESR Neptune Ethernet RGMII Configuration Register */
-#define	ESR_RGMII_PT0_IN_USE		0x00000001
-#define	ESR_RGMII_PT1_IN_USE		0x00000002
-#define	ESR_RGMII_PT2_IN_USE		0x00000004
-#define	ESR_RGMII_PT3_IN_USE		0x00000008
-#define	ESR_RGMII_REG_RW_TEST		0x00000010
-
-/* ESR Internal Signals Observation Register */
-#define	ESR_SIG_MASK			0xFFFFFFFF
-#define	ESR_SIG_P0_BITS_MASK		0x33E0000F
-#define	ESR_SIG_P1_BITS_MASK		0x0C1F00F0
-#define	ESR_SIG_SERDES_RDY0_P0		0x20000000
-#define	ESR_SIG_DETECT0_P0		0x10000000
-#define	ESR_SIG_SERDES_RDY0_P1		0x08000000
-#define	ESR_SIG_DETECT0_P1		0x04000000
-#define	ESR_SIG_XSERDES_RDY_P0		0x02000000
-#define	ESR_SIG_XDETECT_P0_CH3		0x01000000
-#define	ESR_SIG_XDETECT_P0_CH2		0x00800000
-#define	ESR_SIG_XDETECT_P0_CH1		0x00400000
-#define	ESR_SIG_XDETECT_P0_CH0		0x00200000
-#define	ESR_SIG_XSERDES_RDY_P1		0x00100000
-#define	ESR_SIG_XDETECT_P1_CH3		0x00080000
-#define	ESR_SIG_XDETECT_P1_CH2		0x00040000
-#define	ESR_SIG_XDETECT_P1_CH1		0x00020000
-#define	ESR_SIG_XDETECT_P1_CH0		0x00010000
-#define	ESR_SIG_LOS_P1_CH3		0x00000080
-#define	ESR_SIG_LOS_P1_CH2		0x00000040
-#define	ESR_SIG_LOS_P1_CH1		0x00000020
-#define	ESR_SIG_LOS_P1_CH0		0x00000010
-#define	ESR_SIG_LOS_P0_CH3		0x00000008
-#define	ESR_SIG_LOS_P0_CH2		0x00000004
-#define	ESR_SIG_LOS_P0_CH1		0x00000002
-#define	ESR_SIG_LOS_P0_CH0		0x00000001
-
-/* ESR Debug Selection Register */
-#define	ESR_DEBUG_SEL_MASK		0x00000003f
-
-/* ESR Test Configuration Register */
-#define	ESR_NO_LOOPBACK_CH3		(0x0 << 6)
-#define	ESR_EWRAP_CH3			(0x1 << 6)
-#define	ESR_PAD_LOOPBACK_CH3		(0x2 << 6)
-#define	ESR_REVLOOPBACK_CH3		(0x3 << 6)
-#define	ESR_NO_LOOPBACK_CH2		(0x0 << 4)
-#define	ESR_EWRAP_CH2			(0x1 << 4)
-#define	ESR_PAD_LOOPBACK_CH2		(0x2 << 4)
-#define	ESR_REVLOOPBACK_CH2		(0x3 << 4)
-#define	ESR_NO_LOOPBACK_CH1		(0x0 << 2)
-#define	ESR_EWRAP_CH1			(0x1 << 2)
-#define	ESR_PAD_LOOPBACK_CH1		(0x2 << 2)
-#define	ESR_REVLOOPBACK_CH1		(0x3 << 2)
-#define	ESR_NO_LOOPBACK_CH0		0x0
-#define	ESR_EWRAP_CH0			0x1
-#define	ESR_PAD_LOOPBACK_CH0		0x2
-#define	ESR_REVLOOPBACK_CH0		0x3
-
-/* convert values */
-#define	NXGE_BASE(x, y)	\
-	(((y) << (x ## _SHIFT)) & (x ## _MASK))
-
-#define	NXGE_VAL_GET(fieldname, regval)		\
-	(((regval) & ((fieldname) ## _MASK)) >> ((fieldname) ## _SHIFT))
-
-#define	NXGE_VAL_SET(fieldname, regval, val)		\
-{							\
-	(regval) &= ~((fieldname) ## _MASK);		\
-	(regval) |= ((val) << (fieldname ## _SHIFT)); 	\
-}
-
-
-#ifdef	__cplusplus
-}
-#endif
-
-#endif	/* _SYS_MAC_NXGE_MAC_HW_H */
--- a/usr/src/uts/sun4v/sys/nxge/nxge_mii.h	Mon Mar 19 18:02:35 2007 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,454 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License (the "License").
- * You may not use this file except in compliance with the License.
- *
- * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
- * or http://www.opensolaris.org/os/licensing.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information: Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- */
-/*
- * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
- * Use is subject to license terms.
- */
-
-#ifndef _SYS_NXGE_NXGE_MII_H_
-#define	_SYS_NXGE_NXGE_MII_H_
-
-#pragma ident	"%Z%%M%	%I%	%E% SMI"
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/*
- * Configuration Register space.
- */
-
-#define	MII_BMCR		0
-#define	MII_BMSR		1
-#define	MII_IDR1		2
-#define	MII_IDR2		3
-#define	MII_ANAR		4
-#define	MII_ANLPAR		5
-#define	MII_ANER		6
-#define	MII_NPTXR		7
-#define	MII_LPRXNPR		8
-#define	MII_GCR			9
-#define	MII_GSR			10
-#define	MII_RES0		11
-#define	MII_RES1		12
-#define	MII_RES2		13
-#define	MII_RES3		14
-#define	MII_ESR			15
-
-#define	NXGE_MAX_MII_REGS	32
-
-/*
- * Configuration Register space.
- */
-typedef struct _mii_regs {
-	uchar_t bmcr;		/* Basic mode control register */
-	uchar_t bmsr;		/* Basic mode status register */
-	uchar_t idr1;		/* Phy identifier register 1 */
-	uchar_t idr2;		/* Phy identifier register 2 */
-	uchar_t anar;		/* Auto-Negotiation advertisement register */
-	uchar_t anlpar;		/* Auto-Negotiation link Partner ability reg */
-	uchar_t aner;		/* Auto-Negotiation expansion register */
-	uchar_t nptxr;		/* Next page transmit register */
-	uchar_t lprxnpr;	/* Link partner received next page register */
-	uchar_t gcr;		/* Gigabit basic mode control register. */
-	uchar_t gsr;		/* Gigabit basic mode status register */
-	uchar_t mii_res1[4];	/* For future use by MII working group */
-	uchar_t esr;		/* Extended status register. */
-	uchar_t vendor_res[16];	/* For future use by Phy Vendors */
-} mii_regs_t, *p_mii_regs_t;
-
-/*
- * MII Register 0: Basic mode control register.
- */
-#define	BMCR_RES		0x003f  /* Unused... */
-#define	BMCR_SSEL_MSB		0x0040  /* Used to manually select speed */
-					/* (with * bit 6) when auto-neg */
-					/* disabled */
-#define	BMCR_COL_TEST		0x0080  /* Collision test */
-#define	BMCR_DPLX_MD		0x0100  /* Full duplex */
-#define	BMCR_RESTART_AN		0x0200  /* Auto negotiation restart */
-#define	BMCR_ISOLATE		0x0400	/* Disconnect BCM5464R from MII */
-#define	BMCR_PDOWN		0x0800	/* Powerdown the BCM5464R */
-#define	BMCR_ANENABLE		0x1000	/* Enable auto negotiation */
-#define	BMCR_SSEL_LSB		0x2000  /* Used to manually select speed */
-					/* (with bit 13) when auto-neg */
-					/* disabled */
-#define	BMCR_LOOPBACK		0x4000	/* TXD loopback bits */
-#define	BMCR_RESET		0x8000	/* Reset the BCM5464R */
-
-typedef union _mii_bmcr {
-	uint16_t value;
-	struct {
-#if defined(_BIT_FIELDS_HTOL)
-		uint16_t reset:1;
-		uint16_t loopback:1;
-		uint16_t speed_sel:1;
-		uint16_t enable_autoneg:1;
-		uint16_t power_down:1;
-		uint16_t isolate:1;
-		uint16_t restart_autoneg:1;
-		uint16_t duplex_mode:1;
-		uint16_t col_test:1;
-		uint16_t speed_1000_sel:1;
-		uint16_t res1:6;
-#elif defined(_BIT_FIELDS_LTOH)
-		uint16_t res1:6;
-		uint16_t speed_1000_sel:1;
-		uint16_t col_test:1;
-		uint16_t duplex_mode:1;
-		uint16_t restart_autoneg:1;
-		uint16_t isolate:1;
-		uint16_t power_down:1;
-		uint16_t enable_autoneg:1;
-		uint16_t speed_sel:1;
-		uint16_t loopback:1;
-		uint16_t reset:1;
-#endif
-	} bits;
-} mii_bmcr_t, *p_mii_bmcr_t;
-
-/*
- * MII Register 1:  Basic mode status register.
- */
-#define	BMSR_ERCAP		0x0001  /* Ext-reg capability */
-#define	BMSR_JCD		0x0002  /* Jabber detected */
-#define	BMSR_LSTATUS		0x0004  /* Link status */
-#define	BMSR_ANEGCAPABLE	0x0008  /* Able to do auto-negotiation */
-#define	BMSR_RFAULT		0x0010  /* Remote fault detected */
-#define	BMSR_ANEGCOMPLETE	0x0020  /* Auto-negotiation complete */
-#define	BMSR_MF_PRE_SUP		0x0040  /* Preamble for MIF frame suppressed, */
-					/* always 1 for BCM5464R */
-#define	BMSR_RESV		0x0080  /* Unused... */
-#define	BMSR_ESTAT		0x0100  /* Contains IEEE extended status reg */
-#define	BMSR_100BASE2HALF	0x0200  /* Can do 100mbps, 2k pkts half-dplx */
-#define	BMSR_100BASE2FULL	0x0400  /* Can do 100mbps, 2k pkts full-dplx */
-#define	BMSR_10HALF		0x0800  /* Can do 10mbps, half-duplex */
-#define	BMSR_10FULL		0x1000  /* Can do 10mbps, full-duplex */
-#define	BMSR_100HALF		0x2000  /* Can do 100mbps, half-duplex */
-#define	BMSR_100FULL		0x4000  /* Can do 100mbps, full-duplex */
-#define	BMSR_100BASE4		0x8000  /* Can do 100mbps, 4k packets */
-
-typedef union _mii_bmsr {
-	uint16_t value;
-	struct {
-#if defined(_BIT_FIELDS_HTOL)
-		uint16_t link_100T4:1;
-		uint16_t link_100fdx:1;
-		uint16_t link_100hdx:1;
-		uint16_t link_10fdx:1;
-		uint16_t link_10hdx:1;
-		uint16_t res2:2;
-		uint16_t extend_status:1;
-		uint16_t res1:1;
-		uint16_t preamble_supress:1;
-		uint16_t auto_neg_complete:1;
-		uint16_t remote_fault:1;
-		uint16_t auto_neg_able:1;
-		uint16_t link_status:1;
-		uint16_t jabber_detect:1;
-		uint16_t ext_cap:1;
-#elif defined(_BIT_FIELDS_LTOH)
-		int16_t ext_cap:1;
-		uint16_t jabber_detect:1;
-		uint16_t link_status:1;
-		uint16_t auto_neg_able:1;
-		uint16_t remote_fault:1;
-		uint16_t auto_neg_complete:1;
-		uint16_t preamble_supress:1;
-		uint16_t res1:1;
-		uint16_t extend_status:1;
-		uint16_t res2:2;
-		uint16_t link_10hdx:1;
-		uint16_t link_10fdx:1;
-		uint16_t link_100hdx:1;
-		uint16_t link_100fdx:1;
-		uint16_t link_100T4:1;
-#endif
-	} bits;
-} mii_bmsr_t, *p_mii_bmsr_t;
-
-/*
- * MII Register 2: Physical Identifier 1.
- */
-/* contains BCM OUI bits [3:18] */
-typedef union _mii_idr1 {
-	uint16_t value;
-	struct {
-		uint16_t ieee_address:16;
-	} bits;
-} mii_idr1_t, *p_mii_idr1_t;
-
-/*
- * MII Register 3: Physical Identifier 2.
- */
-typedef union _mii_idr2 {
-	uint16_t value;
-	struct {
-#if defined(_BIT_FIELDS_HTOL)
-		uint16_t ieee_address:6;
-		uint16_t model_no:6;
-		uint16_t rev_no:4;
-#elif defined(_BIT_FIELDS_LTOH)
-		uint16_t rev_no:4;
-		uint16_t model_no:6;
-		uint16_t ieee_address:6;
-#endif
-	} bits;
-} mii_idr2_t, *p_mii_idr2_t;
-
-/*
- * MII Register 4: Auto-negotiation advertisement register.
- */
-#define	ADVERTISE_SLCT		0x001f  /* Selector bits for proto, 0x01 */
-					/* indicates IEEE 802.3 CSMA/CD phy */
-#define	ADVERTISE_CSMA		0x0001  /* Only selector supported */
-#define	ADVERTISE_10HALF	0x0020  /* Try for 10mbps half-duplex  */
-#define	ADVERTISE_10FULL	0x0040  /* Try for 10mbps full-duplex  */
-#define	ADVERTISE_100HALF	0x0080  /* Try for 100mbps half-duplex */
-#define	ADVERTISE_100FULL	0x0100  /* Try for 100mbps full-duplex */
-#define	ADVERTISE_100BASE4	0x0200  /* Try for 100mbps 4k packets. set to */
-					/* 0, BCM5464R not 100BASE-T4 capable */
-#define	ADVERTISE_RES1		0x0400  /* Unused... */
-#define	ADVERTISE_ASM_PAUS	0x0800  /* advertise asymmetric pause */
-#define	ADVERTISE_PAUS		0x1000  /* can do full dplx pause */
-#define	ADVERTISE_RFAULT	0x2000  /* Say we can detect faults */
-#define	ADVERTISE_RES0		0x4000  /* Unused... */
-#define	ADVERTISE_NPAGE		0x8000  /* Next page bit */
-
-#define	ADVERTISE_FULL (ADVERTISE_100FULL | ADVERTISE_10FULL | \
-			ADVERTISE_CSMA)
-#define	ADVERTISE_ALL (ADVERTISE_10HALF | ADVERTISE_10FULL | \
-			ADVERTISE_100HALF | ADVERTISE_100FULL)
-
-typedef union _mii_anar {
-	uint16_t value;
-	struct {
-#if defined(_BIT_FIELDS_HTOL)
-		uint16_t np_indication:1;
-		uint16_t acknowledge:1;
-		uint16_t remote_fault:1;
-		uint16_t res1:1;
-		uint16_t cap_asmpause:1;
-		uint16_t cap_pause:1;
-		uint16_t cap_100T4:1;
-		uint16_t cap_100fdx:1;
-		uint16_t cap_100hdx:1;
-		uint16_t cap_10fdx:1;
-		uint16_t cap_10hdx:1;
-		uint16_t selector:5;
-#elif defined(_BIT_FIELDS_LTOH)
-		uint16_t selector:5;
-		uint16_t cap_10hdx:1;
-		uint16_t cap_10fdx:1;
-		uint16_t cap_100hdx:1;
-		uint16_t cap_100fdx:1;
-		uint16_t cap_100T4:1;
-		uint16_t cap_pause:1;
-		uint16_t cap_asmpause:1;
-		uint16_t res1:1;
-		uint16_t remote_fault:1;
-		uint16_t acknowledge:1;
-		uint16_t np_indication:1;
-#endif
-	} bits;
-} mii_anar_t, *p_mii_anar_t;
-
-/*
- * MII Register 5: Auto-negotiation link partner ability register.
- */
-#define	LPA_SLCT		0x001f  /* Same as advertise selector */
-#define	LPA_10HALF		0x0020  /* Can do 10mbps half-duplex */
-#define	LPA_10FULL		0x0040  /* Can do 10mbps full-duplex */
-#define	LPA_100HALF		0x0080  /* Can do 100mbps half-duplex */
-#define	LPA_100FULL		0x0100  /* Can do 100mbps full-duplex */
-#define	LPA_100BASE4		0x0200  /* Can do 100mbps 4k packets */
-#define	LPA_RES1		0x0400  /* Unused... */
-#define	LPA_ASM_PAUS		0x0800  /* advertise asymmetric pause */
-#define	LPA__PAUS		0x1000  /* can do full dplx pause */
-#define	LPA_RFAULT		0x2000	/* Link partner faulted */
-#define	LPA_LPACK		0x4000	/* Link partner acked us */
-#define	LPA_NPAGE		0x8000	/* Next page bit */
-
-#define	LPA_DUPLEX		(LPA_10FULL | LPA_100FULL)
-#define	LPA_100			(LPA_100FULL | LPA_100HALF | LPA_100BASE4)
-
-typedef mii_anar_t mii_anlpar_t, *pmii_anlpar_t;
-
-/*
- * MII Register 6: Auto-negotiation expansion register.
- */
-#define	EXPANSION_LP_AN_ABLE	0x0001	/* Link partner has auto-neg cap */
-#define	EXPANSION_PG_RX		0x0002	/* Got new RX page code word */
-#define	EXPANSION_NP_ABLE	0x0004	/* This enables npage words */
-#define	EXPANSION_LPNP_ABLE	0x0008	/* Link partner supports npage */
-#define	EXPANSION_MFAULTS	0x0010	/* Multiple link faults detected */
-#define	EXPANSION_RESV		0xffe0	/* Unused... */
-
-typedef union _mii_aner {
-	uint16_t value;
-	struct {
-#if defined(_BIT_FIELDS_HTOL)
-		uint16_t res:11;
-		uint16_t mlf:1;
-		uint16_t lp_np_able:1;
-		uint16_t np_able:1;
-		uint16_t page_rx:1;
-		uint16_t lp_an_able:1;
-#else
-		uint16_t lp_an_able:1;
-		uint16_t page_rx:1;
-		uint16_t np_able:1;
-		uint16_t lp_np_able:1;
-		uint16_t mlf:1;
-		uint16_t res:11;
-#endif
-	} bits;
-} mii_aner_t, *p_mii_aner_t;
-
-/*
- * MII Register 7: Next page transmit register.
- */
-typedef	union _mii_nptxr {
-	uint16_t value;
-	struct {
-#if defined(_BIT_FIELDS_HTOL)
-		uint16_t np:1;
-		uint16_t res:1;
-		uint16_t msgp:1;
-		uint16_t ack2:1;
-		uint16_t toggle:1;
-		uint16_t res1:11;
-#else
-		uint16_t res1:11;
-		uint16_t toggle:1;
-		uint16_t ack2:1;
-		uint16_t msgp:1;
-		uint16_t res:1;
-		uint16_t np:1;
-#endif
-	} bits;
-} mii_nptxr_t, *p_mii_nptxr_t;
-
-/*
- * MII Register 8: Link partner received next page register.
- */
-typedef union _mii_lprxnpr {
-	uint16_t value;
-	struct {
-#if defined(_BIT_FIELDS_HTOL)
-		uint16_t np:1;
-			uint16_t ack:1;
-		uint16_t msgp:1;
-		uint16_t ack2:1;
-		uint16_t toggle:1;
-		uint16_t mcf:11;
-#else
-		uint16_t mcf:11;
-		uint16_t toggle:1;
-		uint16_t ack2:1;
-		uint16_t msgp:1;
-		uint16_t ack:1;
-		uint16_t np:1;
-#endif
-	} bits;
-} mii_lprxnpr_t, *p_mii_lprxnpr_t;
-
-/*
- * MII Register 9: 1000BaseT control register.
- */
-typedef union _mii_gcr {
-	uint16_t value;
-	struct {
-#if defined(_BIT_FIELDS_HTOL)
-		uint16_t test_mode:3;
-		uint16_t ms_mode_en:1;
-		uint16_t master:1;
-		uint16_t dte_or_repeater:1;
-		uint16_t link_1000fdx:1;
-		uint16_t link_1000hdx:1;
-		uint16_t res:8;
-#else
-		uint16_t res:8;
-		uint16_t link_1000hdx:1;
-		uint16_t link_1000fdx:1;
-		uint16_t dte_or_repeater:1;
-		uint16_t master:1;
-		uint16_t ms_mode_en:1;
-		uint16_t test_mode:3;
-#endif
-	} bits;
-} mii_gcr_t, *p_mii_gcr_t;
-
-/*
- * MII Register 10: 1000BaseT status register.
- */
-typedef union _mii_gsr {
-	uint16_t value;
-	struct {
-#if defined(_BIT_FIELDS_HTOL)
-		uint16_t ms_config_fault:1;
-		uint16_t ms_resolve:1;
-		uint16_t local_rx_status:1;
-		uint16_t remote_rx_status:1;
-		uint16_t link_1000fdx:1;
-		uint16_t link_1000hdx:1;
-		uint16_t res:2;
-		uint16_t idle_err_cnt:8;
-#else
-		uint16_t idle_err_cnt:8;
-		uint16_t res:2;
-		uint16_t link_1000hdx:1;
-		uint16_t link_1000fdx:1;
-		uint16_t remote_rx_status:1;
-		uint16_t local_rx_status:1;
-		uint16_t ms_resolve:1;
-		uint16_t ms_config_fault:1;
-#endif
-	} bits;
-} mii_gsr_t, *p_mii_gsr_t;
-
-/*
- * MII Register 15: Extended status register.
- */
-typedef union _mii_esr {
-	uint16_t value;
-	struct {
-#if defined(_BIT_FIELDS_HTOL)
-		uint16_t link_1000Xfdx:1;
-		uint16_t link_1000Xhdx:1;
-		uint16_t link_1000fdx:1;
-		uint16_t link_1000hdx:1;
-		uint16_t res:12;
-#else
-			uint16_t res:12;
-		uint16_t link_1000hdx:1;
-		uint16_t link_1000fdx:1;
-		uint16_t link_1000Xhdx:1;
-		uint16_t link_1000Xfdx:1;
-#endif
-	} bits;
-} mii_esr_t, *p_mii_esr_t;
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _SYS_NXGE_NXGE_MII_H_ */
--- a/usr/src/uts/sun4v/sys/nxge/nxge_n2_esr_hw.h	Mon Mar 19 18:02:35 2007 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,363 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License (the "License").
- * You may not use this file except in compliance with the License.
- *
- * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
- * or http://www.opensolaris.org/os/licensing.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information: Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- */
-/*
- * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
- * Use is subject to license terms.
- */
-
-#ifndef _SYS_NXGE_NXGE_N2_ESR_HW_H
-#define	_SYS_NXGE_NXGE_N2_ESR_HW_H
-
-#pragma ident	"%Z%%M%	%I%	%E% SMI"
-
-#ifdef	__cplusplus
-extern "C" {
-#endif
-
-#define	ESR_N2_DEV_ADDR		0x1E
-#define	ESR_N2_BASE		0x8000
-
-/*
- * Definitions for TI WIZ6C2xxN2x0 Macro Family.
- */
-
-/* Register Blocks base address */
-
-#define	ESR_N2_PLL_REG_OFFSET		0
-#define	ESR_N2_TEST_REG_OFFSET		0x004
-#define	ESR_N2_TX_REG_OFFSET		0x100
-#define	ESR_N2_TX_0_REG_OFFSET		0x100
-#define	ESR_N2_TX_1_REG_OFFSET		0x104
-#define	ESR_N2_TX_2_REG_OFFSET		0x108
-#define	ESR_N2_TX_3_REG_OFFSET		0x10c
-#define	ESR_N2_TX_4_REG_OFFSET		0x110
-#define	ESR_N2_TX_5_REG_OFFSET		0x114
-#define	ESR_N2_TX_6_REG_OFFSET		0x118
-#define	ESR_N2_TX_7_REG_OFFSET		0x11c
-#define	ESR_N2_RX_REG_OFFSET		0x120
-#define	ESR_N2_RX_0_REG_OFFSET		0x120
-#define	ESR_N2_RX_1_REG_OFFSET		0x124
-#define	ESR_N2_RX_2_REG_OFFSET		0x128
-#define	ESR_N2_RX_3_REG_OFFSET		0x12c
-#define	ESR_N2_RX_4_REG_OFFSET		0x130
-#define	ESR_N2_RX_5_REG_OFFSET		0x134
-#define	ESR_N2_RX_6_REG_OFFSET		0x138
-#define	ESR_N2_RX_7_REG_OFFSET		0x13c
-#define	ESR_N2_P1_REG_OFFSET		0x400
-
-/* Register address */
-
-#define	ESR_N2_PLL_CFG_REG		ESR_N2_BASE + ESR_N2_PLL_REG_OFFSET
-#define	ESR_N2_PLL_CFG_L_REG	ESR_N2_BASE + ESR_N2_PLL_REG_OFFSET
-#define	ESR_N2_PLL_CFG_H_REG	ESR_N2_BASE + ESR_N2_PLL_REG_OFFSET + 1
-#define	ESR_N2_PLL_STS_REG		ESR_N2_BASE + ESR_N2_PLL_REG_OFFSET + 2
-#define	ESR_N2_PLL_STS_L_REG	ESR_N2_BASE + ESR_N2_PLL_REG_OFFSET + 2
-#define	ESR_N2_PLL_STS_H_REG	ESR_N2_BASE + ESR_N2_PLL_REG_OFFSET + 3
-#define	ESR_N2_TEST_CFG_REG		ESR_N2_BASE + ESR_N2_TEST_REG_OFFSET
-#define	ESR_N2_TEST_CFG_L_REG	ESR_N2_BASE + ESR_N2_TEST_REG_OFFSET
-#define	ESR_N2_TEST_CFG_H_REG	ESR_N2_BASE + ESR_N2_TEST_REG_OFFSET + 1
-
-#define	ESR_N2_TX_CFG_REG_ADDR(chan)	(ESR_N2_BASE + ESR_N2_TX_REG_OFFSET +\
-					(chan * 4))
-#define	ESR_N2_TX_CFG_L_REG_ADDR(chan)	(ESR_N2_BASE + ESR_N2_TX_REG_OFFSET +\
-					(chan * 4))
-#define	ESR_N2_TX_CFG_H_REG_ADDR(chan)	(ESR_N2_BASE + ESR_N2_TX_REG_OFFSET +\
-					(chan * 4) + 1)
-#define	ESR_N2_TX_STS_REG_ADDR(chan)	(ESR_N2_BASE + ESR_N2_TX_REG_OFFSET +\
-					(chan * 4) + 2)
-#define	ESR_N2_TX_STS_L_REG_ADDR(chan)	(ESR_N2_BASE + ESR_N2_TX_REG_OFFSET +\
-					(chan * 4) + 2)
-#define	ESR_N2_TX_STS_H_REG_ADDR(chan)	(ESR_N2_BASE + ESR_N2_TX_REG_OFFSET +\
-					(chan * 4) + 3)
-#define	ESR_N2_RX_CFG_REG_ADDR(chan)	(ESR_N2_BASE + ESR_N2_RX_REG_OFFSET +\
-					(chan * 4))
-#define	ESR_N2_RX_CFG_L_REG_ADDR(chan)	(ESR_N2_BASE + ESR_N2_RX_REG_OFFSET +\
-					(chan * 4))
-#define	ESR_N2_RX_CFG_H_REG_ADDR(chan)	(ESR_N2_BASE + ESR_N2_RX_REG_OFFSET +\
-					(chan * 4) + 1)
-#define	ESR_N2_RX_STS_REG_ADDR(chan)	(ESR_N2_BASE + ESR_N2_RX_REG_OFFSET +\
-					(chan * 4) + 2)
-#define	ESR_N2_RX_STS_L_REG_ADDR(chan)	(ESR_N2_BASE + ESR_N2_RX_REG_OFFSET +\
-					(chan * 4) + 2)
-#define	ESR_N2_RX_STS_H_REG_ADDR(chan)	(ESR_N2_BASE + ESR_N2_RX_REG_OFFSET +\
-					(chan * 4) + 3)
-
-/* PLL Configuration Low 16-bit word */
-typedef	union _esr_ti_cfgpll_l {
-	uint16_t value;
-
-	struct {
-#if defined(_BIT_FIELDS_HTOL)
-		uint16_t res2		: 6;
-		uint16_t lb			: 2;
-		uint16_t res1		: 3;
-		uint16_t mpy		: 4;
-		uint16_t enpll		: 1;
-#elif defined(_BIT_FIELDS_LTOH)
-		uint16_t enpll		: 1;
-		uint16_t mpy		: 4;
-		uint16_t res1		: 3;
-		uint16_t lb			: 2;
-		uint16_t res2		: 6;
-#endif
-	} bits;
-} esr_ti_cfgpll_l_t;
-
-/* PLL Configurations */
-#define	CFGPLL_LB_FREQ_DEP_BANDWIDTH	0
-#define	CFGPLL_LB_LOW_BANDWIDTH		0x2
-#define	CFGPLL_LB_HIGH_BANDWIDTH	0x3
-#define	CFGPLL_MPY_4X			0
-#define	CFGPLL_MPY_5X			0x1
-#define	CFGPLL_MPY_6X			0x2
-#define	CFGPLL_MPY_8X			0x4
-#define	CFGPLL_MPY_10X			0x5
-#define	CFGPLL_MPY_12X			0x6
-#define	CFGPLL_MPY_12P5X		0x7
-
-/* Rx Configuration Low 16-bit word */
-
-typedef	union _esr_ti_cfgrx_l {
-	uint16_t value;
-
-	struct {
-#if defined(_BIT_FIELDS_HTOL)
-		uint16_t los		: 2;
-		uint16_t align		: 2;
-		uint16_t res		: 1;
-		uint16_t term		: 3;
-		uint16_t invpair	: 1;
-		uint16_t rate		: 2;
-		uint16_t buswidth	: 3;
-		uint16_t entest		: 1;
-		uint16_t enrx		: 1;
-#elif defined(_BIT_FIELDS_LTOH)
-		uint16_t enrx		: 1;
-		uint16_t entest		: 1;
-		uint16_t buswidth	: 3;
-		uint16_t rate		: 2;
-		uint16_t invpair	: 1;
-		uint16_t term		: 3;
-		uint16_t res		: 1;
-		uint16_t align		: 2;
-		uint16_t los		: 2;
-#endif
-	} bits;
-} esr_ti_cfgrx_l_t;
-
-/* Rx Configuration High 16-bit word */
-
-typedef	union _esr_ti_cfgrx_h {
-	uint16_t value;
-
-	struct {
-#if defined(_BIT_FIELDS_HTOL)
-		uint16_t res2		: 6;
-		uint16_t bsinrxn	: 1;
-		uint16_t bsinrxp	: 1;
-		uint16_t res1		: 1;
-		uint16_t eq		: 4;
-		uint16_t cdr		: 3;
-#elif defined(_BIT_FIELDS_LTOH)
-		uint16_t cdr		: 3;
-		uint16_t eq		: 4;
-		uint16_t res1		: 1;
-		uint16_t bsinrxp	: 1;
-		uint16_t bsinrxn	: 1;
-		uint16_t res2		: 6;
-#endif
-	} bits;
-} esr_ti_cfgrx_h_t;
-
-/* Receive Configurations */
-#define	CFGRX_BUSWIDTH_10BIT			0
-#define	CFGRX_BUSWIDTH_8BIT			1
-#define	CFGRX_RATE_FULL				0
-#define	CFGRX_RATE_HALF				1
-#define	CFGRX_RATE_QUAD				2
-#define	CFGRX_TERM_VDDT				0
-#define	CFGRX_TERM_0P8VDDT			1
-#define	CFGRX_TERM_FLOAT			3
-#define	CFGRX_ALIGN_DIS				0
-#define	CFGRX_ALIGN_EN				1
-#define	CFGRX_ALIGN_JOG				2
-#define	CFGRX_LOS_DIS				0
-#define	CFGRX_LOS_HITHRES			1
-#define	CFGRX_LOS_LOTHRES			2
-#define	CFGRX_CDR_1ST_ORDER			0
-#define	CFGRX_CDR_2ND_ORDER_HP			1
-#define	CFGRX_CDR_2ND_ORDER_MP			2
-#define	CFGRX_CDR_2ND_ORDER_LP			3
-#define	CFGRX_CDR_1ST_ORDER_FAST_LOCK		4
-#define	CFGRX_CDR_2ND_ORDER_HP_FAST_LOCK	5
-#define	CFGRX_CDR_2ND_ORDER_MP_FAST_LOCK	6
-#define	CFGRX_CDR_2ND_ORDER_LP_FAST_LOCK	7
-#define	CFGRX_EQ_MAX_LF				0
-#define	CFGRX_EQ_ADAPTIVE_LP_ADAPTIVE_ZF	0x1
-#define	CFGRX_EQ_ADAPTIVE_LF_1084MHZ_ZF		0x8
-#define	CFGRX_EQ_ADAPTIVE_LF_805MHZ_ZF		0x9
-#define	CFGRX_EQ_ADAPTIVE_LP_573MHZ_ZF		0xA
-#define	CFGRX_EQ_ADAPTIVE_LP_402MHZ_ZF		0xB
-#define	CFGRX_EQ_ADAPTIVE_LP_304MHZ_ZF		0xC
-#define	CFGRX_EQ_ADAPTIVE_LP_216MHZ_ZF		0xD
-#define	CFGRX_EQ_ADAPTIVE_LP_156MHZ_ZF		0xE
-#define	CFGRX_EQ_ADAPTIVE_LP_135HZ_ZF		0xF
-
-/* Rx Status Low 16-bit word */
-
-typedef	union _esr_ti_stsrx_l {
-	uint16_t value;
-
-	struct {
-#if defined(_BIT_FIELDS_HTOL)
-		uint16_t res		: 10;
-		uint16_t bsrxn		: 1;
-		uint16_t bsrxp		: 1;
-		uint16_t losdtct	: 1;
-		uint16_t oddcg		: 1;
-		uint16_t sync		: 1;
-		uint16_t testfail	: 1;
-#elif defined(_BIT_FIELDS_LTOH)
-		uint16_t testfail	: 1;
-		uint16_t sync		: 1;
-		uint16_t oddcg		: 1;
-		uint16_t losdtct	: 1;
-		uint16_t bsrxp		: 1;
-		uint16_t bsrxn		: 1;
-		uint16_t res		: 10;
-#endif
-	} bits;
-} esr_ti_stsrx_l_t;
-
-/* Tx Configuration Low 16-bit word */
-
-typedef	union _esr_ti_cfgtx_l {
-	uint16_t value;
-
-	struct {
-#if defined(_BIT_FIELDS_HTOL)
-		uint16_t de		: 4;
-		uint16_t swing		: 3;
-		uint16_t cm		: 1;
-		uint16_t invpair	: 1;
-		uint16_t rate		: 2;
-		uint16_t buswwidth	: 3;
-		uint16_t entest		: 1;
-		uint16_t entx		: 1;
-#elif defined(_BIT_FIELDS_LTOH)
-		uint16_t entx		: 1;
-		uint16_t entest		: 1;
-		uint16_t buswwidth	: 3;
-		uint16_t rate		: 2;
-		uint16_t invpair	: 1;
-		uint16_t cm		: 1;
-		uint16_t swing		: 3;
-		uint16_t de		: 4;
-#endif
-	} bits;
-} esr_ti_cfgtx_l_t;
-
-/* Tx Configuration High 16-bit word */
-
-typedef	union _esr_ti_cfgtx_h {
-	uint16_t value;
-
-	struct {
-#if defined(_BIT_FIELDS_HTOL)
-		uint16_t res		: 14;
-		uint16_t bstx		: 1;
-		uint16_t enftp		: 1;
-#elif defined(_BIT_FIELDS_LTOH)
-		uint16_t enftp		: 1;
-		uint16_t bstx		: 1;
-		uint16_t res		: 14;
-#endif
-	} bits;
-} esr_ti_cfgtx_h_t;
-
-/* Transmit Configurations */
-#define	CFGTX_BUSWIDTH_10BIT		0
-#define	CFGTX_BUSWIDTH_8BIT		1
-#define	CFGTX_RATE_FULL			0
-#define	CFGTX_RATE_HALF			1
-#define	CFGTX_RATE_QUAD			2
-#define	CFGTX_SWING_125MV		0
-#define	CFGTX_SWING_250MV		1
-#define	CFGTX_SWING_500MV		2
-#define	CFGTX_SWING_625MV		3
-#define	CFGTX_SWING_750MV		4
-#define	CFGTX_SWING_1000MV		5
-#define	CFGTX_SWING_1250MV		6
-#define	CFGTX_SWING_1375MV		7
-#define	CFGTX_DE_0			0
-#define	CFGTX_DE_4P76			1
-#define	CFGTX_DE_9P52			2
-#define	CFGTX_DE_14P28			3
-#define	CFGTX_DE_19P04			4
-#define	CFGTX_DE_23P8			5
-#define	CFGTX_DE_28P56			6
-#define	CFGTX_DE_33P32			7
-
-/* Test Configuration */
-
-typedef	union _esr_ti_testcfg {
-	uint16_t value;
-
-	struct {
-#if defined(_BIT_FIELDS_HTOL)
-		uint16_t res1		: 1;
-		uint16_t invpat		: 1;
-		uint16_t rate		: 2;
-		uint16_t res		: 1;
-		uint16_t enbspls	: 1;
-		uint16_t enbsrx		: 1;
-		uint16_t enbstx		: 1;
-		uint16_t loopback	: 2;
-		uint16_t clkbyp		: 2;
-		uint16_t enrxpatt	: 1;
-		uint16_t entxpatt	: 1;
-		uint16_t testpatt	: 2;
-#elif defined(_BIT_FIELDS_LTOH)
-		uint16_t testpatt	: 2;
-		uint16_t entxpatt	: 1;
-		uint16_t enrxpatt	: 1;
-		uint16_t clkbyp		: 2;
-		uint16_t loopback	: 2;
-		uint16_t enbstx		: 1;
-		uint16_t enbsrx		: 1;
-		uint16_t enbspls	: 1;
-		uint16_t res		: 1;
-		uint16_t rate		: 2;
-		uint16_t invpat		: 1;
-		uint16_t res1		: 1;
-#endif
-	} bits;
-} esr_ti_testcfg_t;
-
-#define	TESTCFG_PAD_LOOPBACK		0x1
-#define	TESTCFG_INNER_CML_DIS_LOOPBACK	0x2
-#define	TESTCFG_INNER_CML_EN_LOOOPBACK	0x3
-
-#ifdef	__cplusplus
-}
-#endif
-
-#endif	/* _SYS_NXGE_NXGE_N2_ESR_HW_H */
--- a/usr/src/uts/sun4v/sys/nxge/nxge_phy_hw.h	Mon Mar 19 18:02:35 2007 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,633 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License (the "License").
- * You may not use this file except in compliance with the License.
- *
- * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
- * or http://www.opensolaris.org/os/licensing.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information: Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- */
-/*
- * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
- * Use is subject to license terms.
- */
-
-#ifndef	_SYS_NXGE_NXGE_PHY_HW_H
-#define	_SYS_NXGE_NXGE_PHY_HW_H
-
-#pragma ident	"%Z%%M%	%I%	%E% SMI"
-
-
-#ifdef	__cplusplus
-extern "C" {
-#endif
-
-#include <nxge_defs.h>
-
-#define	BCM5464_NEPTUNE_PORT_ADDR_BASE		10
-#define	BCM8704_NEPTUNE_PORT_ADDR_BASE		8
-#define	BCM8704_N2_PORT_ADDR_BASE		16
-#define	BCM8704_PMA_PMD_DEV_ADDR		1
-#define	BCM8704_PCS_DEV_ADDR			3
-#define	BCM8704_USER_DEV3_ADDR			3
-#define	BCM8704_PHYXS_ADDR			4
-#define	BCM8704_USER_DEV4_ADDR			4
-
-/* Definitions for BCM 5464R PHY chip */
-
-#define	BCM5464R_PHY_ECR	16
-#define	BCM5464R_PHY_ESR	17
-#define	BCM5464R_RXERR_CNT	18
-#define	BCM5464R_FALSECS_CNT	19
-#define	BCM5464R_RX_NOTOK_CNT	20
-#define	BCM5464R_ER_DATA	21
-#define	BCM5464R_RES		22
-#define	BCM5464R_ER_ACC		23
-#define	BCM5464R_AUX_CTL	24
-#define	BCM5464R_AUX_S		25
-#define	BCM5464R_INTR_S		26
-#define	BCM5464R_INTR_M		27
-#define	BCM5464R_MISC		28
-#define	BCM5464R_MISC1		29
-#define	BCM5464R_TESTR1		30
-
-#define	PHY_BCM_5464R_OUI	0x001018
-#define	PHY_BCM_5464R_MODEL	0x0B
-
-/*
- * MII Register 16:  PHY Extended Control Register
- */
-
-typedef	union _mii_phy_ecr_t {
-	uint16_t value;
-	struct {
-#ifdef _BIT_FIELDS_HTOL
-		uint16_t mac_phy_if_mode	: 1;
-		uint16_t dis_automdicross	: 1;
-		uint16_t tx_dis			: 1;
-		uint16_t intr_dis		: 1;
-		uint16_t force_intr		: 1;
-		uint16_t bypass_encdec		: 1;
-		uint16_t bypass_scrdes		: 1;
-		uint16_t bypass_mlt3		: 1;
-		uint16_t bypass_rx_sym		: 1;
-		uint16_t reset_scr		: 1;
-		uint16_t en_led_traffic		: 1;
-		uint16_t force_leds_on		: 1;
-		uint16_t force_leds_off		: 1;
-		uint16_t res			: 2;
-		uint16_t gmii_fifo_elas		: 1;
-#else
-		uint16_t gmii_fifo_elas		: 1;
-		uint16_t res			: 2;
-		uint16_t force_leds_off		: 1;
-		uint16_t force_leds_on		: 1;
-		uint16_t en_led_traffic		: 1;
-		uint16_t reset_scr		: 1;
-		uint16_t bypass_rx_sym		: 1;
-		uint16_t bypass_mlt3		: 1;
-		uint16_t bypass_scrdes		: 1;
-		uint16_t bypass_encdec		: 1;
-		uint16_t force_intr		: 1;
-		uint16_t intr_dis		: 1;
-		uint16_t tx_dis			: 1;
-		uint16_t dis_automdicross	: 1;
-		uint16_t mac_phy_if_mode	: 1;
-#endif
-	} bits;
-} mii_phy_ecr_t, *p_mii_phy_ecr_t;
-
-/*
- * MII Register 17:  PHY Extended Status Register
- */
-typedef	union _mii_phy_esr_t {
-	uint16_t value;
-	struct {
-#ifdef _BIT_FIELDS_HTOL
-		uint16_t anbpsfm		: 1;
-		uint16_t wsdwngr		: 1;
-		uint16_t mdi_crst		: 1;
-		uint16_t intr_s			: 1;
-		uint16_t rmt_rx_s		: 1;
-		uint16_t loc_rx_s		: 1;
-		uint16_t locked			: 1;
-		uint16_t link_s			: 1;
-		uint16_t crc_err		: 1;
-		uint16_t cext_err		: 1;
-		uint16_t bad_ssd		: 1;
-		uint16_t bad_esd		: 1;
-		uint16_t rx_err			: 1;
-		uint16_t tx_err			: 1;
-		uint16_t lock_err		: 1;
-		uint16_t mlt3_cerr		: 1;
-#else
-		uint16_t mlt3_cerr		: 1;
-		uint16_t lock_err		: 1;
-		uint16_t tx_err			: 1;
-		uint16_t rx_err			: 1;
-		uint16_t bad_esd		: 1;
-		uint16_t bad_ssd		: 1;
-		uint16_t cext_err		: 1;
-		uint16_t crc_err		: 1;
-		uint16_t link_s			: 1;
-		uint16_t locked			: 1;
-		uint16_t loc_rx_s		: 1;
-		uint16_t rmt_rx_s		: 1;
-		uint16_t intr_s			: 1;
-		uint16_t mdi_crst		: 1;
-		uint16_t wsdwngr		: 1;
-		uint16_t anbpsfm		: 1;
-#endif
-	} bits;
-} mii_phy_esr_t, *p_mii_phy_esr_t;
-
-/*
- * MII Register 18:  Receive Error Counter Register
- */
-typedef	union _mii_rxerr_cnt_t {
-	uint16_t value;
-	struct {
-		uint16_t rx_err_cnt		: 16;
-	} bits;
-} mii_rxerr_cnt_t, *p_mii_rxerr_cnt_t;
-
-/*
- * MII Register 19:  False Carrier Sense Counter Register
- */
-typedef	union _mii_falsecs_cnt_t {
-	uint16_t value;
-	struct {
-#ifdef _BIT_FIELDS_HTOL
-		uint16_t res			: 8;
-		uint16_t false_cs_cnt		: 8;
-#else
-		uint16_t false_cs_cnt		: 8;
-		uint16_t res			: 8;
-#endif
-	} bits;
-} mii_falsecs_cnt_t, *p_mii_falsecs_cnt_t;
-
-/*
- * MII Register 20:  Receiver NOT_OK Counter Register
- */
-typedef	union _mii_rx_notok_cnt_t {
-	uint16_t value;
-	struct {
-#ifdef _BIT_FIELDS_HTOL
-		uint16_t l_rx_notok_cnt		: 8;
-		uint16_t r_rx_notok_cnt		: 8;
-#else
-		uint16_t r_rx_notok_cnt		: 8;
-		uint16_t l_rx_notok_cnt		: 8;
-#endif
-	} bits;
-} mii_rx_notok_cnt_t, *p_mii_rx_notok_t;
-
-/*
- * MII Register 21:  Expansion Register Data Register
- */
-typedef	union _mii_er_data_t {
-	uint16_t value;
-	struct {
-		uint16_t reg_data;
-	} bits;
-} mii_er_data_t, *p_mii_er_data_t;
-
-/*
- * MII Register 23:  Expansion Register Access Register
- */
-typedef	union _mii_er_acc_t {
-	struct {
-#ifdef _BIT_FIELDS_HTOL
-		uint16_t res			: 4;
-		uint16_t er_sel			: 4;
-		uint16_t er_acc			: 8;
-#else
-		uint16_t er_acc			: 8;
-		uint16_t er_sel			: 4;
-		uint16_t res			: 4;
-#endif
-	} bits;
-} mii_er_acc_t, *p_mii_er_acc_t;
-
-#define	EXP_RXTX_PKT_CNT		0x0
-#define	EXP_INTR_STAT			0x1
-#define	MULTICOL_LED_SEL		0x4
-#define	MULTICOL_LED_FLASH_RATE_CTL	0x5
-#define	MULTICOL_LED_BLINK_CTL		0x6
-#define	CABLE_DIAG_CTL			0x10
-#define	CABLE_DIAG_RES			0x11
-#define	CABLE_DIAG_LEN_CH_2_1		0x12
-#define	CABLE_DIAG_LEN_CH_4_3		0x13
-
-/*
- * MII Register 24:  Auxiliary Control Register
- */
-typedef	union _mii_aux_ctl_t {
-	uint16_t value;
-	struct {
-#ifdef _BIT_FIELDS_HTOL
-		uint16_t ext_lb			: 1;
-		uint16_t ext_pkt_len		: 1;
-		uint16_t edge_rate_ctl_1000	: 2;
-		uint16_t res			: 1;
-		uint16_t write_1		: 1;
-		uint16_t res1			: 2;
-		uint16_t dis_partial_resp	: 1;
-		uint16_t res2			: 1;
-		uint16_t edge_rate_ctl_100	: 2;
-		uint16_t diag_mode		: 1;
-		uint16_t shadow_reg_sel		: 3;
-#else
-		uint16_t shadow_reg_sel		: 3;
-		uint16_t diag_mode		: 1;
-		uint16_t edge_rate_ctl_100	: 2;
-		uint16_t res2			: 1;
-		uint16_t dis_partial_resp	: 1;
-		uint16_t res1			: 2;
-		uint16_t write_1		: 1;
-		uint16_t res			: 1;
-		uint16_t edge_rate_ctl_1000	: 2;
-		uint16_t ext_pkt_len		: 1;
-		uint16_t ext_lb			: 1;
-#endif
-	} bits;
-} mii_aux_ctl_t, *p_mii_aux_ctl_t;
-
-#define	AUX_REG				0x0
-#define	AUX_10BASET			0x1
-#define	AUX_PWR_CTL			0x2
-#define	AUX_MISC_TEST			0x4
-#define	AUX_MISC_CTL			0x7
-
-/*
- * MII Register 25:  Auxiliary Status Summary Register
- */
-typedef	union _mii_aux_s_t {
-	uint16_t value;
-	struct {
-#ifdef _BIT_FIELDS_HTOL
-		uint16_t an_complete		: 1;
-		uint16_t an_complete_ack	: 1;
-		uint16_t an_ack_detect		: 1;
-		uint16_t an_ability_detect	: 1;
-		uint16_t an_np_wait		: 1;
-		uint16_t an_hcd			: 3;
-		uint16_t pd_fault		: 1;
-		uint16_t rmt_fault		: 1;
-		uint16_t an_page_rx		: 1;
-		uint16_t lp_an_ability		: 1;
-		uint16_t lp_np_ability		: 1;
-		uint16_t link_s			: 1;
-		uint16_t pause_res_rx_dir	: 1;
-		uint16_t pause_res_tx_dir	: 1;
-#else
-		uint16_t pause_res_tx_dir	: 1;
-		uint16_t pause_res_rx_dir	: 1;
-		uint16_t link_s			: 1;
-		uint16_t lp_np_ability		: 1;
-		uint16_t lp_an_ability		: 1;
-		uint16_t an_page_rx		: 1;
-		uint16_t rmt_fault		: 1;
-		uint16_t pd_fault		: 1;
-		uint16_t an_hcd			: 3;
-		uint16_t an_np_wait		: 1;
-		uint16_t an_ability_detect	: 1;
-		uint16_t an_ack_detect		: 1;
-		uint16_t an_complete_ack	: 1;
-		uint16_t an_complete		: 1;
-#endif
-	} bits;
-} mii_aux_s_t, *p_mii_aux_s_t;
-
-/*
- * MII Register 26, 27:  Interrupt Status and Mask Registers
- */
-typedef	union _mii_intr_t {
-	uint16_t value;
-	struct {
-#ifdef _BIT_FIELDS_HTOL
-		uint16_t res			: 1;
-		uint16_t illegal_pair_swap	: 1;
-		uint16_t mdix_status_change	: 1;
-		uint16_t exceed_hicnt_thres	: 1;
-		uint16_t exceed_locnt_thres	: 1;
-		uint16_t an_page_rx		: 1;
-		uint16_t hcd_nolink		: 1;
-		uint16_t no_hcd			: 1;
-		uint16_t neg_unsupported_hcd	: 1;
-		uint16_t scr_sync_err		: 1;
-		uint16_t rmt_rx_status_change	: 1;
-		uint16_t loc_rx_status_change	: 1;
-		uint16_t duplex_mode_change	: 1;
-		uint16_t link_speed_change	: 1;
-		uint16_t link_status_change	: 1;
-		uint16_t crc_err		: 1;
-#else
-		uint16_t crc_err		: 1;
-		uint16_t link_status_change	: 1;
-		uint16_t link_speed_change	: 1;
-		uint16_t duplex_mode_change	: 1;
-		uint16_t loc_rx_status_change	: 1;
-		uint16_t rmt_rx_status_change	: 1;
-		uint16_t scr_sync_err		: 1;
-		uint16_t neg_unsupported_hcd	: 1;
-		uint16_t no_hcd			: 1;
-		uint16_t hcd_nolink		: 1;
-		uint16_t an_page_rx		: 1;
-		uint16_t exceed_locnt_thres	: 1;
-		uint16_t exceed_hicnt_thres	: 1;
-		uint16_t mdix_status_change	: 1;
-		uint16_t illegal_pair_swap	: 1;
-		uint16_t res			: 1;
-#endif
-	} bits;
-} mii_intr_t, *p_mii_intr_t;
-
-/*
- * MII Register 28:  Register 1C Access Register
- */
-typedef	union _mii_misc_t {
-	uint16_t value;
-	struct {
-#ifdef _BIT_FIELDS_HTOL
-		uint16_t w_en			: 1;
-		uint16_t shadow_reg_sel		: 5;
-		uint16_t data			: 10;
-#else
-		uint16_t data			: 10;
-		uint16_t shadow_reg_sel		: 5;
-		uint16_t w_en			: 1;
-#endif
-	} bits;
-} mii_misc_t, *p_mii_misc_t;
-
-#define	LINK_LED_MODE			0x2
-#define	CLK_ALIGN_CTL			0x3
-#define	WIRE_SP_RETRY			0x4
-#define	CLK125				0x5
-#define	LED_STATUS			0x8
-#define	LED_CONTROL			0x9
-#define	AUTO_PWR_DOWN			0xA
-#define	LED_SEL1			0xD
-#define	LED_SEL2			0xE
-
-/*
- * MII Register 29:  Master/Slave Seed / HCD Status Register
- */
-
-typedef	union _mii_misc1_t {
-	uint16_t value;
-	struct {
-#ifdef _BIT_FIELDS_HTOL
-		uint16_t en_shadow_reg		: 1;
-		uint16_t data			: 15;
-#else
-		uint16_t data			: 15;
-		uint16_t en_shadow_reg		: 1;
-#endif
-	} bits;
-} mii_misc1_t, *p_mii_misc1_t;
-
-/*
- * MII Register 30:  Test Register 1
- */
-
-typedef	union _mii_test1_t {
-	uint16_t value;
-	struct {
-#ifdef _BIT_FIELDS_HTOL
-		uint16_t crc_err_cnt_sel	: 1;
-		uint16_t res			: 7;
-		uint16_t manual_swap_mdi_st	: 1;
-		uint16_t res1			: 7;
-#else
-		uint16_t res1			: 7;
-		uint16_t manual_swap_mdi_st	: 1;
-		uint16_t res			: 7;
-		uint16_t crc_err_cnt_sel	: 1;
-#endif
-	} bits;
-} mii_test1_t, *p_mii_test1_t;
-
-
-/* Definitions of BCM8704 */
-
-#define	BCM8704_PMD_CONTROL_REG			0
-#define	BCM8704_PMD_STATUS_REG			0x1
-#define	BCM8704_PMD_ID_0_REG			0x2
-#define	BCM8704_PMD_ID_1_REG			0x3
-#define	BCM8704_PMD_SPEED_ABIL_REG		0x4
-#define	BCM8704_PMD_DEV_IN_PKG1_REG		0x5
-#define	BCM8704_PMD_DEV_IN_PKG2_REG		0x6
-#define	BCM8704_PMD_CONTROL2_REG		0x7
-#define	BCM8704_PMD_STATUS2_REG			0x8
-#define	BCM8704_PMD_TRANSMIT_DIS_REG		0x9
-#define	BCM8704_PMD_RECEIVE_SIG_DETECT		0xa
-#define	BCM8704_PMD_ORG_UNIQUE_ID_0_REG		0xe
-#define	BCM8704_PMD_ORG_UNIQUE_ID_1_REG		0xf
-#define	BCM8704_PCS_CONTROL_REG			0
-#define	BCM8704_PCS_STATUS1_REG			0x1
-#define	BCM8704_PCS_ID_0_REG			0x2
-#define	BCM8704_PCS_ID_1_REG			0x3
-#define	BCM8704_PCS_SPEED_ABILITY_REG		0x4
-#define	BCM8704_PCS_DEV_IN_PKG1_REG		0x5
-#define	BCM8704_PCS_DEV_IN_PKG2_REG		0x6
-#define	BCM8704_PCS_CONTROL2_REG		0x7
-#define	BCM8704_PCS_STATUS2_REG			0x8
-#define	BCM8704_PCS_ORG_UNIQUE_ID_0_REG		0xe
-#define	BCM8704_PCS_ORG_UNIQUE_ID_1_REG		0xf
-#define	BCM8704_PCS_STATUS_REG			0x18
-#define	BCM8704_10GBASE_R_PCS_STATUS_REG	0x20
-#define	BCM8704_10GBASE_R_PCS_STATUS2_REG	0x21
-#define	BCM8704_PHYXS_CONTROL_REG		0
-#define	BCM8704_PHYXS_STATUS_REG		0x1
-#define	BCM8704_PHY_ID_0_REG			0x2
-#define	BCM8704_PHY_ID_1_REG			0x3
-#define	BCM8704_PHYXS_SPEED_ABILITY_REG		0x4
-#define	BCM8704_PHYXS_DEV_IN_PKG2_REG		0x5
-#define	BCM8704_PHYXS_DEV_IN_PKG1_REG		0x6
-#define	BCM8704_PHYXS_STATUS2_REG		0x8
-#define	BCM8704_PHYXS_ORG_UNIQUE_ID_0_REG	0xe
-#define	BCM8704_PHYXS_ORG_UNIQUE_ID_1_REG	0xf
-#define	BCM8704_PHYXS_XGXS_LANE_STATUS_REG	0x18
-#define	BCM8704_PHYXS_XGXS_TEST_CONTROL_REG	0x19
-#define	BCM8704_USER_CONTROL_REG		0xC800
-#define	BCM8704_USER_ANALOG_CLK_REG		0xC801
-#define	BCM8704_USER_PMD_RX_CONTROL_REG		0xC802
-#define	BCM8704_USER_PMD_TX_CONTROL_REG		0xC803
-#define	BCM8704_USER_ANALOG_STATUS0_REG		0xC804
-#define	BCM8704_USER_OPTICS_DIGITAL_CTRL_REG	0xC808
-#define	BCM8704_USER_RX2_CONTROL1_REG		0x80C6
-#define	BCM8704_USER_RX1_CONTROL1_REG		0x80D6
-#define	BCM8704_USER_RX0_CONTROL1_REG		0x80E6
-#define	BCM8704_USER_TX_ALARM_STATUS_REG	0x9004
-
-/* Rx Channel Control1 Register bits */
-#define	BCM8704_RXPOL_FLIP			0x20
-
-typedef	union _phyxs_control {
-	uint16_t value;
-	struct {
-#ifdef _BIT_FIELDS_HTOL
-		uint16_t reset			: 1;
-		uint16_t loopback		: 1;
-		uint16_t speed_sel2		: 1;
-		uint16_t res2			: 1;
-		uint16_t low_power		: 1;
-		uint16_t res1			: 4;
-		uint16_t speed_sel1		: 1;
-		uint16_t speed_sel0		: 4;
-		uint16_t res0			: 2;
-#else
-		uint16_t res0			: 2;
-		uint16_t speed_sel0		: 4;
-		uint16_t speed_sel1		: 1;
-		uint16_t res1			: 4;
-		uint16_t low_power		: 1;
-		uint16_t res2			: 1;
-		uint16_t speed_sel2		: 1;
-		uint16_t loopback		: 1;
-		uint16_t reset			: 1;
-#endif
-	} bits;
-} phyxs_control_t, *p_phyxs_control_t, pcs_control_t, *p_pcs_control_t;
-
-
-/* PMD/Optics Digital Control Register (Dev=3 Addr=0xc800) */
-
-typedef	union _control {
-	uint16_t value;
-	struct {
-#ifdef _BIT_FIELDS_HTOL
-		uint16_t optxenb_lvl		: 1;
-		uint16_t optxrst_lvl		: 1;
-		uint16_t opbiasflt_lvl		: 1;
-		uint16_t obtmpflt_lvl		: 1;
-		uint16_t opprflt_lvl		: 1;
-		uint16_t optxflt_lvl		: 1;
-		uint16_t optrxlos_lvl		: 1;
-		uint16_t oprxflt_lvl		: 1;
-		uint16_t optxon_lvl		: 1;
-		uint16_t res1			: 7;
-#else
-		uint16_t res1			: 7;
-		uint16_t optxon_lvl		: 1;
-		uint16_t oprxflt_lvl		: 1;
-		uint16_t optrxlos_lvl		: 1;
-		uint16_t optxflt_lvl		: 1;
-		uint16_t opprflt_lvl		: 1;
-		uint16_t obtmpflt_lvl		: 1;
-		uint16_t opbiasflt_lvl		: 1;
-		uint16_t optxrst_lvl		: 1;
-		uint16_t optxenb_lvl		: 1;
-#endif
-	} bits;
-} control_t, *p_control_t;
-
-typedef	union _pmd_tx_control {
-	uint16_t value;
-	struct {
-#ifdef _BIT_FIELDS_HTOL
-		uint16_t res1			: 7;
-		uint16_t xfp_clken		: 1;
-		uint16_t tx_dac_txd		: 2;
-		uint16_t tx_dac_txck		: 2;
-		uint16_t tsd_lpwren		: 1;
-		uint16_t tsck_lpwren		: 1;
-		uint16_t cmu_lpwren		: 1;
-		uint16_t sfiforst		: 1;
-#else
-		uint16_t sfiforst		: 1;
-		uint16_t cmu_lpwren		: 1;
-		uint16_t tsck_lpwren		: 1;
-		uint16_t tsd_lpwren		: 1;
-		uint16_t tx_dac_txck		: 2;
-		uint16_t tx_dac_txd		: 2;
-		uint16_t xfp_clken		: 1;
-		uint16_t res1			: 7;
-#endif
-	} bits;
-} pmd_tx_control_t, *p_pmd_tx_control_t;
-
-
-/* PMD/Optics Digital Control Register (Dev=3 Addr=0xc808) */
-
-
-/* PMD/Optics Digital Control Register (Dev=3 Addr=0xc808) */
-
-typedef	union _optics_dcntr {
-	uint16_t value;
-	struct {
-#ifdef _BIT_FIELDS_HTOL
-		uint16_t fault_mode		: 1;
-		uint16_t tx_pwrdown		: 1;
-		uint16_t rx_pwrdown		: 1;
-		uint16_t ext_flt_en		: 1;
-		uint16_t opt_rst		: 1;
-		uint16_t pcs_tx_inv_b		: 1;
-		uint16_t pcs_rx_inv		: 1;
-		uint16_t res3			: 2;
-		uint16_t gpio_sel		: 2;
-		uint16_t res2			: 1;
-		uint16_t lpbk_err_dis		: 1;
-		uint16_t res1			: 2;
-		uint16_t txonoff_pwdwn_dis	: 1;
-#else
-		uint16_t txonoff_pwdwn_dis	: 1;
-		uint16_t res1			: 2;
-		uint16_t lpbk_err_dis		: 1;
-		uint16_t res2			: 1;
-		uint16_t gpio_sel		: 2;
-		uint16_t res3			: 2;
-		uint16_t pcs_rx_inv		: 1;
-		uint16_t pcs_tx_inv_b		: 1;
-		uint16_t opt_rst		: 1;
-		uint16_t ext_flt_en		: 1;
-		uint16_t rx_pwrdown		: 1;
-		uint16_t tx_pwrdown		: 1;
-		uint16_t fault_mode		: 1;
-#endif
-	} bits;
-} optics_dcntr_t, *p_optics_dcntr_t;
-
-/* PMD Receive Signal Detect Register (Dev = 1 Register Address = 0x000A) */
-
-#define	PMD_RX_SIG_DET3			0x10
-#define	PMD_RX_SIG_DET2			0x08
-#define	PMD_RX_SIG_DET1			0x04
-#define	PMD_RX_SIG_DET0			0x02
-#define	GLOB_PMD_RX_SIG_OK		0x01
-
-/* 10GBase-R PCS Status Register (Dev = 3, Register Address = 0x0020) */
-
-#define	PCS_10GBASE_RX_LINK_STATUS	0x1000
-#define	PCS_PRBS31_ABLE			0x0004
-#define	PCS_10GBASE_R_HI_BER		0x0002
-#define	PCS_10GBASE_R_PCS_BLK_LOCK	0x0001
-
-/* XGXS Lane Status Register (Dev = 4, Register Address = 0x0018) */
-
-#define	XGXS_LANE_ALIGN_STATUS		0x1000
-#define	XGXS_PATTERN_TEST_ABILITY	0x0800
-#define	XGXS_LANE3_SYNC			0x0008
-#define	XGXS_LANE2_SYNC			0x0004
-#define	XGXS_LANE1_SYNC			0x0002
-#define	XGXS_LANE0_SYNC			0x0001
-
-#ifdef	__cplusplus
-}
-#endif
-
-#endif	/* _SYS_NXGE_NXGE_PHY_HW_H */
--- a/usr/src/uts/sun4v/sys/nxge/nxge_rxdma.h	Mon Mar 19 18:02:35 2007 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,465 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License (the "License").
- * You may not use this file except in compliance with the License.
- *
- * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
- * or http://www.opensolaris.org/os/licensing.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information: Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- */
-/*
- * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
- * Use is subject to license terms.
- */
-
-#ifndef	_SYS_NXGE_NXGE_RXDMA_H
-#define	_SYS_NXGE_NXGE_RXDMA_H
-
-#pragma ident	"%Z%%M%	%I%	%E% SMI"
-
-#ifdef	__cplusplus
-extern "C" {
-#endif
-
-#include <sys/nxge/nxge_rxdma_hw.h>
-#include <npi_rxdma.h>
-
-#define	RXDMA_CK_DIV_DEFAULT		7500 	/* 25 usec */
-/*
- * Hardware RDC designer: 8 cache lines during Atlas bringup.
- */
-#define	RXDMA_RED_LESS_BYTES		(8 * 64) /* 8 cache line */
-#define	RXDMA_RED_LESS_ENTRIES		(RXDMA_RED_LESS_BYTES/8)
-#define	RXDMA_RED_WINDOW_DEFAULT	0
-#define	RXDMA_RED_THRES_DEFAULT		0
-
-#define	RXDMA_RCR_PTHRES_DEFAULT	0x20
-#define	RXDMA_RCR_TO_DEFAULT		0x8
-
-/*
- * hardware workarounds: kick 16 (was 8 before)
- */
-#define	NXGE_RXDMA_POST_BATCH		16
-
-#define	RXBUF_START_ADDR(a, index, bsize)	((a & (index * bsize))
-#define	RXBUF_OFFSET_FROM_START(a, start)	(start - a)
-#define	RXBUF_64B_ALIGNED		64
-
-#define	NXGE_RXBUF_EXTRA		34
-/*
- * Receive buffer thresholds and buffer types
- */
-#define	NXGE_RX_BCOPY_SCALE	8	/* use 1/8 as lowest granularity */
-typedef enum  {
-	NXGE_RX_COPY_ALL = 0,		/* do bcopy on every packet	 */
-	NXGE_RX_COPY_1,			/* bcopy on 1/8 of buffer posted */
-	NXGE_RX_COPY_2,			/* bcopy on 2/8 of buffer posted */
-	NXGE_RX_COPY_3,			/* bcopy on 3/8 of buffer posted */
-	NXGE_RX_COPY_4,			/* bcopy on 4/8 of buffer posted */
-	NXGE_RX_COPY_5,			/* bcopy on 5/8 of buffer posted */
-	NXGE_RX_COPY_6,			/* bcopy on 6/8 of buffer posted */
-	NXGE_RX_COPY_7,			/* bcopy on 7/8 of buffer posted */
-	NXGE_RX_COPY_NONE		/* don't do bcopy at all	 */
-} nxge_rxbuf_threshold_t;
-
-typedef enum  {
-	NXGE_RBR_TYPE0 = RCR_PKTBUFSZ_0,  /* bcopy buffer size 0 (small) */
-	NXGE_RBR_TYPE1 = RCR_PKTBUFSZ_1,  /* bcopy buffer size 1 (medium) */
-	NXGE_RBR_TYPE2 = RCR_PKTBUFSZ_2	  /* bcopy buffer size 2 (large) */
-} nxge_rxbuf_type_t;
-
-typedef	struct _rdc_errlog {
-	rdmc_par_err_log_t	pre_par;
-	rdmc_par_err_log_t	sha_par;
-	uint8_t			compl_err_type;
-} rdc_errlog_t;
-
-/*
- * Receive  Statistics.
- */
-typedef struct _nxge_rx_ring_stats_t {
-	uint64_t	ipackets;
-	uint64_t	ibytes;
-	uint32_t	ierrors;
-	uint32_t	multircv;
-	uint32_t	brdcstrcv;
-	uint32_t	norcvbuf;
-
-	uint32_t	rx_inits;
-	uint32_t	rx_jumbo_pkts;
-	uint32_t	rx_multi_pkts;
-	uint32_t	rx_mtu_pkts;
-	uint32_t	rx_no_buf;
-
-	/*
-	 * Receive buffer management statistics.
-	 */
-	uint32_t	rx_new_pages;
-	uint32_t	rx_new_mtu_pgs;
-	uint32_t	rx_new_nxt_pgs;
-	uint32_t	rx_reused_pgs;
-	uint32_t	rx_mtu_drops;
-	uint32_t	rx_nxt_drops;
-
-	/*
-	 * Error event stats.
-	 */
-	uint32_t	rx_rbr_tmout;
-	uint32_t	l2_err;
-	uint32_t	l4_cksum_err;
-	uint32_t	fflp_soft_err;
-	uint32_t	zcp_soft_err;
-	uint32_t	dcf_err;
-	uint32_t 	rbr_tmout;
-	uint32_t 	rsp_cnt_err;
-	uint32_t 	byte_en_err;
-	uint32_t 	byte_en_bus;
-	uint32_t 	rsp_dat_err;
-	uint32_t 	rcr_ack_err;
-	uint32_t 	dc_fifo_err;
-	uint32_t 	rcr_sha_par;
-	uint32_t 	rbr_pre_par;
-	uint32_t 	port_drop_pkt;
-	uint32_t 	wred_drop;
-	uint32_t 	rbr_pre_empty;
-	uint32_t 	rcr_shadow_full;
-	uint32_t 	config_err;
-	uint32_t 	rcrincon;
-	uint32_t 	rcrfull;
-	uint32_t 	rbr_empty;
-	uint32_t 	rbrfull;
-	uint32_t 	rbrlogpage;
-	uint32_t 	cfiglogpage;
-	uint32_t 	rcrto;
-	uint32_t 	rcrthres;
-	uint32_t 	mex;
-	rdc_errlog_t	errlog;
-} nxge_rx_ring_stats_t, *p_nxge_rx_ring_stats_t;
-
-typedef struct _nxge_rdc_sys_stats {
-	uint32_t	pre_par;
-	uint32_t	sha_par;
-	uint32_t	id_mismatch;
-	uint32_t	ipp_eop_err;
-	uint32_t	zcp_eop_err;
-} nxge_rdc_sys_stats_t, *p_nxge_rdc_sys_stats_t;
-
-/*
- * Software reserved buffer offset
- */
-typedef struct _nxge_rxbuf_off_hdr_t {
-	uint32_t		index;
-} nxge_rxbuf_off_hdr_t, *p_nxge_rxbuf_off_hdr_t;
-
-/*
- * Definitions for each receive buffer block.
- */
-typedef struct _nxge_rbb_t {
-	nxge_os_dma_common_t	dma_buf_info;
-	uint8_t			rbr_page_num;
-	uint32_t		block_size;
-	uint16_t		dma_channel;
-	uint32_t		bytes_received;
-	uint32_t		ref_cnt;
-	uint_t			pkt_buf_size;
-	uint_t			max_pkt_bufs;
-	uint32_t		cur_usage_cnt;
-} nxge_rbb_t, *p_nxge_rbb_t;
-
-
-typedef struct _rx_tx_param_t {
-	nxge_logical_page_t logical_pages[NXGE_MAX_LOGICAL_PAGES];
-} rx_tx_param_t, *p_rx_tx_param_t;
-
-typedef struct _rx_tx_params {
-	struct _tx_param_t 	*tx_param_p;
-} rx_tx_params_t, *p_rx_tx_params_t;
-
-
-typedef struct _rx_msg_t {
-	nxge_os_dma_common_t	buf_dma;
-	nxge_os_mutex_t 	lock;
-	struct _nxge_t		*nxgep;
-	struct _rx_rbr_ring_t	*rx_rbr_p;
-	boolean_t 		spare_in_use;
-	boolean_t 		free;
-	uint32_t 		ref_cnt;
-#ifdef RXBUFF_USE_SEPARATE_UP_CNTR
-	uint32_t 		pass_up_cnt;
-	boolean_t 		release;
-#endif
-	nxge_os_frtn_t 		freeb;
-	size_t 			bytes_arrived;
-	size_t 			bytes_expected;
-	size_t 			block_size;
-	uint32_t		block_index;
-	uint32_t 		pkt_buf_size;
-	uint32_t 		pkt_buf_size_code;
-	uint32_t 		max_pkt_bufs;
-	uint32_t		cur_usage_cnt;
-	uint32_t		max_usage_cnt;
-	uchar_t			*buffer;
-	uint32_t 		pri;
-	uint32_t 		shifted_addr;
-	boolean_t		use_buf_pool;
-	p_mblk_t 		rx_mblk_p;
-	boolean_t		rx_use_bcopy;
-} rx_msg_t, *p_rx_msg_t;
-
-typedef struct _rx_dma_handle_t {
-	nxge_os_dma_handle_t	dma_handle;	/* DMA handle	*/
-	nxge_os_acc_handle_t	acc_handle;	/* DMA memory handle */
-	npi_handle_t		npi_handle;
-} rx_dma_handle_t, *p_rx_dma_handle_t;
-
-#define	RXCOMP_HIST_ELEMENTS 100000
-
-typedef struct _nxge_rxcomphist_t {
-	uint_t 			comp_cnt;
-	uint64_t 		rx_comp_entry;
-} nxge_rxcomphist_t, *p_nxge_rxcomphist_t;
-
-/* Receive Completion Ring */
-typedef struct _rx_rcr_ring_t {
-	nxge_os_dma_common_t	rcr_desc;
-	uint8_t			rcr_page_num;
-	uint8_t			rcr_buf_page_num;
-
-	struct _nxge_t		*nxgep;
-
-	p_nxge_rx_ring_stats_t	rdc_stats;
-
-	rcrcfig_a_t		rcr_cfga;
-	rcrcfig_b_t		rcr_cfgb;
-	boolean_t		cfg_set;
-
-	nxge_os_mutex_t 	lock;
-	uint16_t		index;
-	uint16_t		rdc;
-	uint16_t		rdc_grp_id;
-	uint16_t		ldg_group_id;
-	boolean_t		full_hdr_flag;	 /* 1: 18 bytes header */
-	uint16_t		sw_priv_hdr_len; /* 0 - 192 bytes (SW) */
-	uint32_t 		comp_size;	 /* # of RCR entries */
-	uint64_t		rcr_addr;
-	uint_t 			comp_wrap_mask;
-	uint_t 			comp_rd_index;
-	uint_t 			comp_wt_index;
-
-	p_rcr_entry_t		rcr_desc_first_p;
-	p_rcr_entry_t		rcr_desc_first_pp;
-	p_rcr_entry_t		rcr_desc_last_p;
-	p_rcr_entry_t		rcr_desc_last_pp;
-
-	p_rcr_entry_t		rcr_desc_rd_head_p;	/* software next read */
-	p_rcr_entry_t		rcr_desc_rd_head_pp;
-
-	p_rcr_entry_t		rcr_desc_wt_tail_p;	/* hardware write */
-	p_rcr_entry_t		rcr_desc_wt_tail_pp;
-
-	uint64_t		rcr_tail_pp;
-	uint64_t		rcr_head_pp;
-	struct _rx_rbr_ring_t	*rx_rbr_p;
-	uint32_t		intr_timeout;
-	uint32_t		intr_threshold;
-	uint64_t		max_receive_pkts;
-	p_mblk_t		rx_first_mp;
-	mac_resource_handle_t	rcr_mac_handle;
-	uint32_t		rcvd_pkt_bytes; /* Received bytes of a packet */
-} rx_rcr_ring_t, *p_rx_rcr_ring_t;
-
-
-
-/* Buffer index information */
-typedef struct _rxbuf_index_info_t {
-	uint32_t buf_index;
-	uint32_t start_index;
-	uint32_t buf_size;
-	uint64_t dvma_addr;
-	uint64_t kaddr;
-} rxbuf_index_info_t, *p_rxbuf_index_info_t;
-
-/* Buffer index information */
-
-typedef struct _rxring_info_t {
-	uint32_t hint[3];
-	uint32_t block_size_mask;
-	uint16_t max_iterations;
-	rxbuf_index_info_t buffer[NXGE_DMA_BLOCK];
-} rxring_info_t, *p_rxring_info_t;
-
-
-/* Receive Buffer Block Ring */
-typedef struct _rx_rbr_ring_t {
-	nxge_os_dma_common_t	rbr_desc;
-	p_rx_msg_t 		*rx_msg_ring;
-	p_nxge_dma_common_t 	*dma_bufp;
-	rbr_cfig_a_t		rbr_cfga;
-	rbr_cfig_b_t		rbr_cfgb;
-	rbr_kick_t		rbr_kick;
-	log_page_vld_t		page_valid;
-	log_page_mask_t		page_mask_1;
-	log_page_mask_t		page_mask_2;
-	log_page_value_t	page_value_1;
-	log_page_value_t	page_value_2;
-	log_page_relo_t		page_reloc_1;
-	log_page_relo_t		page_reloc_2;
-	log_page_hdl_t		page_hdl;
-
-	boolean_t		cfg_set;
-
-	nxge_os_mutex_t		lock;
-	nxge_os_mutex_t		post_lock;
-	uint16_t		index;
-	struct _nxge_t		*nxgep;
-	uint16_t		rdc;
-	uint16_t		rdc_grp_id;
-	uint_t 			rbr_max_size;
-	uint64_t		rbr_addr;
-	uint_t 			rbr_wrap_mask;
-	uint_t 			rbb_max;
-	uint_t 			rbb_added;
-	uint_t			block_size;
-	uint_t			num_blocks;
-	uint_t			tnblocks;
-	uint_t			pkt_buf_size0;
-	uint_t			pkt_buf_size0_bytes;
-	uint_t			npi_pkt_buf_size0;
-	uint_t			pkt_buf_size1;
-	uint_t			pkt_buf_size1_bytes;
-	uint_t			npi_pkt_buf_size1;
-	uint_t			pkt_buf_size2;
-	uint_t			pkt_buf_size2_bytes;
-	uint_t			npi_pkt_buf_size2;
-
-	uint64_t		rbr_head_pp;
-	uint64_t		rbr_tail_pp;
-	uint32_t		*rbr_desc_vp;
-
-	p_rx_rcr_ring_t		rx_rcr_p;
-
-	rx_dma_ent_msk_t	rx_dma_ent_mask;
-
-	rbr_hdh_t		rbr_head;
-	rbr_hdl_t		rbr_tail;
-	uint_t 			rbr_wr_index;
-	uint_t 			rbr_rd_index;
-	uint_t 			rbr_hw_head_index;
-	uint64_t 		rbr_hw_head_ptr;
-
-	/* may not be needed */
-	p_nxge_rbb_t		rbb_p;
-
-	rxring_info_t  *ring_info;
-#ifdef RX_USE_RECLAIM_POST
-	uint32_t hw_freed;
-	uint32_t sw_freed;
-	uint32_t msg_rd_index;
-	uint32_t msg_cnt;
-#endif
-#if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
-	uint64_t		hv_rx_buf_base_ioaddr_pp;
-	uint64_t		hv_rx_buf_ioaddr_size;
-	uint64_t		hv_rx_cntl_base_ioaddr_pp;
-	uint64_t		hv_rx_cntl_ioaddr_size;
-	boolean_t		hv_set;
-#endif
-	uint_t 			rbr_consumed;
-	uint_t 			rbr_threshold_hi;
-	uint_t 			rbr_threshold_lo;
-	nxge_rxbuf_type_t	rbr_bufsize_type;
-	boolean_t		rbr_use_bcopy;
-} rx_rbr_ring_t, *p_rx_rbr_ring_t;
-
-/* Receive Mailbox */
-typedef struct _rx_mbox_t {
-	nxge_os_dma_common_t	rx_mbox;
-	rxdma_cfig1_t		rx_cfg1;
-	rxdma_cfig2_t		rx_cfg2;
-	uint64_t		mbox_addr;
-	boolean_t		cfg_set;
-
-	nxge_os_mutex_t 	lock;
-	uint16_t		index;
-	struct _nxge_t		*nxgep;
-	uint16_t		rdc;
-} rx_mbox_t, *p_rx_mbox_t;
-
-
-typedef struct _rx_rbr_rings_t {
-	p_rx_rbr_ring_t 	*rbr_rings;
-	uint32_t			ndmas;
-	boolean_t		rxbuf_allocated;
-} rx_rbr_rings_t, *p_rx_rbr_rings_t;
-
-typedef struct _rx_rcr_rings_t {
-	p_rx_rcr_ring_t 	*rcr_rings;
-	uint32_t			ndmas;
-	boolean_t		cntl_buf_allocated;
-} rx_rcr_rings_t, *p_rx_rcr_rings_t;
-
-typedef struct _rx_mbox_areas_t {
-	p_rx_mbox_t 		*rxmbox_areas;
-	uint32_t			ndmas;
-	boolean_t		mbox_allocated;
-} rx_mbox_areas_t, *p_rx_mbox_areas_t;
-
-/*
- * Global register definitions per chip and they are initialized
- * using the function zero control registers.
- * .
- */
-
-typedef struct _rxdma_globals {
-	boolean_t		mode32;
-	uint16_t		rxdma_ck_div_cnt;
-	uint16_t		rxdma_red_ran_init;
-	uint32_t		rxdma_eing_timeout;
-} rxdma_globals_t, *p_rxdma_globals;
-
-
-/*
- * Receive DMA Prototypes.
- */
-nxge_status_t nxge_init_rxdma_channel_rcrflush(p_nxge_t, uint8_t);
-nxge_status_t nxge_init_rxdma_channels(p_nxge_t);
-void nxge_uninit_rxdma_channels(p_nxge_t);
-nxge_status_t nxge_reset_rxdma_channel(p_nxge_t, uint16_t);
-nxge_status_t nxge_init_rxdma_channel_cntl_stat(p_nxge_t,
-	uint16_t, p_rx_dma_ctl_stat_t);
-nxge_status_t nxge_enable_rxdma_channel(p_nxge_t,
-	uint16_t, p_rx_rbr_ring_t, p_rx_rcr_ring_t,
-	p_rx_mbox_t);
-nxge_status_t nxge_init_rxdma_channel_event_mask(p_nxge_t,
-		uint16_t, p_rx_dma_ent_msk_t);
-
-nxge_status_t nxge_rxdma_hw_mode(p_nxge_t, boolean_t);
-void nxge_hw_start_rx(p_nxge_t);
-void nxge_fixup_rxdma_rings(p_nxge_t);
-nxge_status_t nxge_dump_rxdma_channel(p_nxge_t, uint8_t);
-
-void nxge_rxdma_fix_channel(p_nxge_t, uint16_t);
-void nxge_rxdma_fixup_channel(p_nxge_t, uint16_t, int);
-int nxge_rxdma_get_ring_index(p_nxge_t, uint16_t);
-
-void nxge_rxdma_regs_dump_channels(p_nxge_t);
-nxge_status_t nxge_rxdma_handle_sys_errors(p_nxge_t);
-void nxge_rxdma_inject_err(p_nxge_t, uint32_t, uint8_t);
-
-
-#ifdef	__cplusplus
-}
-#endif
-
-#endif	/* _SYS_NXGE_NXGE_RXDMA_H */
--- a/usr/src/uts/sun4v/sys/nxge/nxge_rxdma_hw.h	Mon Mar 19 18:02:35 2007 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,1899 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License (the "License").
- * You may not use this file except in compliance with the License.
- *
- * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
- * or http://www.opensolaris.org/os/licensing.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information: Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- */
-/*
- * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
- * Use is subject to license terms.
- */
-
-#ifndef	_SYS_NXGE_NXGE_RXDMA_HW_H
-#define	_SYS_NXGE_NXGE_RXDMA_HW_H
-
-#pragma ident	"%Z%%M%	%I%	%E% SMI"
-
-#ifdef	__cplusplus
-extern "C" {
-#endif
-
-#include <nxge_defs.h>
-#include <nxge_hw.h>
-
-/*
- * NIU: Receive DMA Channels
- */
-/* Receive DMA Clock Divider */
-#define	RX_DMA_CK_DIV_REG	(FZC_DMC + 0x00000)
-#define	RX_DMA_CK_DIV_SHIFT	0			/* bits 15:0 */
-#define	RX_DMA_CK_DIV_MASK	0x000000000000FFFFULL
-
-typedef union _rx_dma_ck_div_t {
-	uint64_t value;
-	struct {
-#if defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t res1_1:16;
-			uint32_t cnt:16;
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t cnt:16;
-			uint32_t res1_1:16;
-#endif
-		} ldw;
-#if !defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-	} bits;
-} rx_dma_ck_div_t, *p_rx_dma_ck_div_t;
-
-
-/*
- * Default Port Receive DMA Channel (RDC)
- */
-#define	DEF_PT_RDC_REG(port)	(FZC_DMC + 0x00008 * (port + 1))
-#define	DEF_PT0_RDC_REG		(FZC_DMC + 0x00008)
-#define	DEF_PT1_RDC_REG		(FZC_DMC + 0x00010)
-#define	DEF_PT2_RDC_REG		(FZC_DMC + 0x00018)
-#define	DEF_PT3_RDC_REG		(FZC_DMC + 0x00020)
-#define	DEF_PT_RDC_SHIFT	0			/* bits 4:0 */
-#define	DEF_PT_RDC_MASK		0x000000000000001FULL
-
-
-#define	RDC_TBL_REG		(FZC_ZCP + 0x10000)
-#define	RDC_TBL_SHIFT		0			/* bits 4:0 */
-#define	RDC_TBL_MASK		0x000000000000001FULL
-
-/* For the default port RDC and RDC table */
-typedef union _def_pt_rdc_t {
-	uint64_t value;
-	struct {
-#if defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t res1_1:27;
-			uint32_t rdc:5;
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t rdc:5;
-			uint32_t res1_1:27;
-#endif
-		} ldw;
-#if !defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-	} bits;
-} def_pt_rdc_t, *p_def_pt_rdc_t;
-
-typedef union _rdc_tbl_t {
-	uint64_t value;
-	struct {
-#if defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t res1_1:28;
-			uint32_t rdc:4;
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t rdc:4;
-			uint32_t res1_1:28;
-#endif
-		} ldw;
-#if !defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-	} bits;
-} rdc_tbl_t, *p_rdc_tbl_t;
-
-/*
- * RDC: 32 bit Addressing mode
- */
-#define	RX_ADDR_MD_REG		(FZC_DMC + 0x00070)
-#define	RX_ADDR_MD_SHIFT	0			/* bits 0:0 */
-#define	RX_ADDR_MD_SET_32	0x0000000000000001ULL	/* 1 to select 32 bit */
-#define	RX_ADDR_MD_MASK		0x0000000000000001ULL
-
-typedef union _rx_addr_md_t {
-	uint64_t value;
-	struct {
-#if defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t res1_1:28;
-			uint32_t dbg_pt_mux_sel:2;
-			uint32_t ram_acc:1;
-			uint32_t mode32:1;
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t mode32:1;
-			uint32_t ram_acc:1;
-			uint32_t dbg_pt_mux_sel:2;
-			uint32_t res1_1:28;
-#endif
-		} ldw;
-#if !defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-	} bits;
-} rx_addr_md_t, *p_rx_addr_md_t;
-
-/*
- * RDC: Port Scheduler
- */
-
-#define	PT_DRR_WT_REG(portnm)		((FZC_DMC + 0x00028) + (portnm * 8))
-#define	PT_DRR_WT0_REG		(FZC_DMC + 0x00028)
-#define	PT_DRR_WT1_REG		(FZC_DMC + 0x00030)
-#define	PT_DRR_WT2_REG		(FZC_DMC + 0x00038)
-#define	PT_DRR_WT3_REG		(FZC_DMC + 0x00040)
-#define	PT_DRR_WT_SHIFT		0
-#define	PT_DRR_WT_MASK		0x000000000000FFFFULL	/* bits 15:0 */
-#define	PT_DRR_WT_DEFAULT_10G	0x0400
-#define	PT_DRR_WT_DEFAULT_1G	0x0066
-typedef union _pt_drr_wt_t {
-	uint64_t value;
-	struct {
-#if defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t res1_1:16;
-			uint32_t wt:16;
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t wt:16;
-			uint32_t res1_1:16;
-#endif
-		} ldw;
-#if !defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-	} bits;
-} pt_drr_wt_t, *p_pt_drr_wt_t;
-
-#define	NXGE_RX_DRR_WT_10G	0x400
-#define	NXGE_RX_DRR_WT_1G	0x066
-
-/* Port FIFO Usage */
-#define	PT_USE_REG(portnum)		((FZC_DMC + 0x00048) + (portnum * 8))
-#define	PT_USE0_REG		(FZC_DMC + 0x00048)
-#define	PT_USE1_REG		(FZC_DMC + 0x00050)
-#define	PT_USE2_REG		(FZC_DMC + 0x00058)
-#define	PT_USE3_REG		(FZC_DMC + 0x00060)
-#define	PT_USE_SHIFT		0			/* bits 19:0 */
-#define	PT_USE_MASK		0x00000000000FFFFFULL
-
-typedef union _pt_use_t {
-	uint64_t value;
-	struct {
-#if defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t res1_1:12;
-			uint32_t cnt:20;
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t cnt:20;
-			uint32_t res1_1:12;
-#endif
-		} ldw;
-#if !defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-	} bits;
-} pt_use_t, *p_pt_use_t;
-
-/*
- * RDC: Partitioning Support
- *	(Each of the following registers is for each RDC)
- * Please refer to nxge_hw.h for the common logical
- * page configuration register definitions.
- */
-#define	RX_LOG_REG_SIZE			0x40
-#define	RX_LOG_DMA_OFFSET(channel)	(channel * RX_LOG_REG_SIZE)
-
-#define	RX_LOG_PAGE_VLD_REG	(FZC_DMC + 0x20000)
-#define	RX_LOG_PAGE_MASK1_REG	(FZC_DMC + 0x20008)
-#define	RX_LOG_PAGE_VAL1_REG	(FZC_DMC + 0x20010)
-#define	RX_LOG_PAGE_MASK2_REG	(FZC_DMC + 0x20018)
-#define	RX_LOG_PAGE_VAL2_REG	(FZC_DMC + 0x20020)
-#define	RX_LOG_PAGE_RELO1_REG	(FZC_DMC + 0x20028)
-#define	RX_LOG_PAGE_RELO2_REG	(FZC_DMC + 0x20030)
-#define	RX_LOG_PAGE_HDL_REG	(FZC_DMC + 0x20038)
-
-/* RX and TX have the same definitions */
-#define	RX_LOG_PAGE1_VLD_SHIFT	1			/* bit 1 */
-#define	RX_LOG_PAGE0_VLD_SHIFT	0			/* bit 0 */
-#define	RX_LOG_PAGE1_VLD	0x0000000000000002ULL
-#define	RX_LOG_PAGE0_VLD	0x0000000000000001ULL
-#define	RX_LOG_PAGE1_VLD_MASK	0x0000000000000002ULL
-#define	RX_LOG_PAGE0_VLD_MASK	0x0000000000000001ULL
-#define	RX_LOG_FUNC_VLD_SHIFT	2			/* bit 3:2 */
-#define	RX_LOG_FUNC_VLD_MASK	0x000000000000000CULL
-
-#define	LOG_PAGE_ADDR_SHIFT	12	/* bits[43:12] --> bits[31:0] */
-
-/* RDC: Weighted Random Early Discard */
-#define	RED_RAN_INIT_REG	(FZC_DMC + 0x00068)
-
-#define	RED_RAN_INIT_SHIFT	0			/* bits 15:0 */
-#define	RED_RAN_INIT_MASK	0x000000000000ffffULL
-
-/* Weighted Random */
-typedef union _red_ran_init_t {
-	uint64_t value;
-	struct {
-#if defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t res1_1:15;
-			uint32_t enable:1;
-			uint32_t init:16;
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t init:16;
-			uint32_t enable:1;
-			uint32_t res1_1:15;
-#endif
-		} ldw;
-#if !defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-	} bits;
-} red_ran_init_t, *p_red_ran_init_t;
-
-/*
- * Buffer block descriptor
- */
-typedef struct _rx_desc_t {
-	uint32_t	block_addr;
-} rx_desc_t, *p_rx_desc_t;
-
-/*
- * RDC: RED Parameter
- *	(Each DMC has one RED register)
- */
-#define	RDC_RED_CHANNEL_SIZE		(0x40)
-#define	RDC_RED_CHANNEL_OFFSET(channel)	(channel * RDC_RED_CHANNEL_SIZE)
-
-#define	RDC_RED_PARA_REG		(FZC_DMC + 0x30000)
-#define	RDC_RED_RDC_PARA_REG(rdc)	\
-	(RDC_RED_PARA_REG + (rdc * RDC_RED_CHANNEL_SIZE))
-
-/* the layout of this register is  rx_disc_cnt_t */
-#define	RDC_RED_DISC_CNT_REG		(FZC_DMC + 0x30008)
-#define	RDC_RED_RDC_DISC_REG(rdc)	\
-	(RDC_RED_DISC_CNT_REG + (rdc * RDC_RED_CHANNEL_SIZE))
-
-
-#define	RDC_RED_PARA1_RBR_SCL_SHIFT	0			/* bits 2:0 */
-#define	RDC_RED_PARA1_RBR_SCL_MASK	0x0000000000000007ULL
-#define	RDC_RED_PARA1_ENB_SHIFT		3			/* bit 3 */
-#define	RDC_RED_PARA1_ENB		0x0000000000000008ULL
-#define	RDC_RED_PARA1_ENB_MASK		0x0000000000000008ULL
-
-#define	RDC_RED_PARA_WIN_SHIFT		0			/* bits 3:0 */
-#define	RDC_RED_PARA_WIN_MASK		0x000000000000000fULL
-#define	RDC_RED_PARA_THRE_SHIFT	4			/* bits 15:4 */
-#define	RDC_RED_PARA_THRE_MASK		0x00000000000000f0ULL
-#define	RDC_RED_PARA_WIN_SYN_SHIFT	16			/* bits 19:16 */
-#define	RDC_RED_PARA_WIN_SYN_MASK	0x00000000000000f0ULL
-#define	RDC_RED_PARA_THRE_SYN_SHIFT	20			/* bits 31:20 */
-#define	RDC_RED_PARA_THRE_SYN_MASK	0x00000000000fff00ULL
-
-/* RDC:  RED parameters  */
-typedef union _rdc_red_para_t {
-	uint64_t value;
-	struct {
-#if defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-		uint32_t thre_sync:12;
-		uint32_t win_syn:4;
-		uint32_t thre:12;
-		uint32_t win:4;
-#elif defined(_BIT_FIELDS_LTOH)
-		uint32_t win:4;
-		uint32_t thre:12;
-		uint32_t win_syn:4;
-		uint32_t thre_sync:12;
-#endif
-		} ldw;
-#if !defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-	} bits;
-} rdc_red_para_t, *p_rdc_red_para_t;
-
-/*
- * RDC: Receive DMA Datapath Configuration
- *	The following register definitions are for
- *	each DMA channel. Each DMA CSR is 512 bytes
- *	(0x200).
- */
-#define	RXDMA_CFIG1_REG			(DMC + 0x00000)
-#define	RXDMA_CFIG2_REG			(DMC + 0x00008)
-
-#define	RXDMA_CFIG1_MBADDR_H_SHIFT	0			/* bits 11:0 */
-#define	RXDMA_CFIG1_MBADDR_H_MASK	0x0000000000000fc0ULL
-#define	RXDMA_CFIG1_RST_SHIFT		30			/* bit 30 */
-#define	RXDMA_CFIG1_RST			0x0000000040000000ULL
-#define	RXDMA_CFIG1_RST_MASK		0x0000000040000000ULL
-#define	RXDMA_CFIG1_EN_SHIFT		31
-#define	RXDMA_CFIG1_EN			0x0000000080000000ULL
-#define	RXDMA_CFIG1_EN_MASK		0x0000000080000000ULL
-
-typedef union _rxdma_cfig1_t {
-	uint64_t value;
-	struct {
-#if defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t en:1;
-			uint32_t rst:1;
-			uint32_t qst:1;
-			uint32_t res2:17;
-			uint32_t mbaddr_h:12;
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t mbaddr_h:12;
-			uint32_t res2:17;
-			uint32_t qst:1;
-			uint32_t rst:1;
-			uint32_t en:1;
-#endif
-		} ldw;
-#if !defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-	} bits;
-} rxdma_cfig1_t, *p_rxdma_cfig1_t;
-
-#define	RXDMA_HDR_SIZE_DEFAULT		2
-#define	RXDMA_HDR_SIZE_FULL		18
-
-#define	RXDMA_CFIG2_FULL_HDR_SHIFT	0			/* Set to 1 */
-#define	RXDMA_CFIG2_FULL_HDR		0x0000000000000001ULL
-#define	RXDMA_CFIG2_FULL_HDR_MASK	0x0000000000000001ULL
-#define	RXDMA_CFIG2_OFFSET_SHIFT		1		/* bit 3:1 */
-#define	RXDMA_CFIG2_OFFSET_MASK		0x000000004000000eULL
-#define	RXDMA_CFIG2_MBADDR_L_SHIFT	6			/* bit 31:6 */
-#define	RXDMA_CFIG2_MBADDR_L_MASK	0x00000000ffffffc0ULL
-
-typedef union _rxdma_cfig2_t {
-	uint64_t value;
-	struct {
-#if defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t mbaddr:26;
-			uint32_t res2:3;
-			uint32_t offset:2;
-			uint32_t full_hdr:1;
-
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t full_hdr:1;
-			uint32_t offset:2;
-			uint32_t res2:3;
-			uint32_t mbaddr:26;
-#endif
-		} ldw;
-#if !defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-	} bits;
-} rxdma_cfig2_t, *p_rxdma_cfig2_t;
-
-/*
- * RDC: Receive Block Ring Configuration
- *	The following register definitions are for
- *	each DMA channel.
- */
-#define	RBR_CFIG_A_REG			(DMC + 0x00010)
-#define	RBR_CFIG_B_REG			(DMC + 0x00018)
-#define	RBR_KICK_REG			(DMC + 0x00020)
-#define	RBR_STAT_REG			(DMC + 0x00028)
-#define	RBR_HDH_REG			(DMC + 0x00030)
-#define	RBR_HDL_REG			(DMC + 0x00038)
-
-#define	RBR_CFIG_A_STADDR_SHIFT		6			/* bits 17:6 */
-#define	RBR_CFIG_A_STDADDR_MASK		0x000000000003ffc0ULL
-#define	RBR_CFIG_A_STADDR_BASE_SHIFT	18			/* bits 43:18 */
-#define	RBR_CFIG_A_STDADDR_BASE_MASK	0x00000ffffffc0000ULL
-#define	RBR_CFIG_A_LEN_SHIFT		48			/* bits 63:48 */
-#define	RBR_CFIG_A_LEN_MASK		0xFFFF000000000000ULL
-
-typedef union _rbr_cfig_a_t {
-	uint64_t value;
-	struct {
-#if defined(_BIG_ENDIAN)
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t len:16;
-			uint32_t res1:4;
-			uint32_t staddr_base:12;
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t staddr_base:12;
-			uint32_t res1:4;
-			uint32_t len:16;
-#endif
-		} hdw;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t staddr_base:14;
-			uint32_t staddr:12;
-			uint32_t res2:6;
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t res2:6;
-			uint32_t staddr:12;
-			uint32_t staddr_base:14;
-#endif
-		} ldw;
-#if !defined(_BIG_ENDIAN)
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t len:16;
-			uint32_t res1:4;
-			uint32_t staddr_base:12;
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t staddr_base:12;
-			uint32_t res1:4;
-			uint32_t len:16;
-#endif
-		} hdw;
-#endif
-	} bits;
-} rbr_cfig_a_t, *p_rbr_cfig_a_t;
-
-
-#define	RBR_CFIG_B_BUFSZ0_SHIFT		0			/* bit 1:0 */
-#define	RBR_CFIG_B_BUFSZ0_MASK		0x0000000000000001ULL
-#define	RBR_CFIG_B_VLD0_SHIFT		7			/* bit 7 */
-#define	RBR_CFIG_B_VLD0			0x0000000000000008ULL
-#define	RBR_CFIG_B_VLD0_MASK		0x0000000000000008ULL
-#define	RBR_CFIG_B_BUFSZ1_SHIFT		8			/* bit 9:8 */
-#define	RBR_CFIG_B_BUFSZ1_MASK		0x0000000000000300ULL
-#define	RBR_CFIG_B_VLD1_SHIFT		15			/* bit 15 */
-#define	RBR_CFIG_B_VLD1			0x0000000000008000ULL
-#define	RBR_CFIG_B_VLD1_MASK		0x0000000000008000ULL
-#define	RBR_CFIG_B_BUFSZ2_SHIFT		16			/* bit 17:16 */
-#define	RBR_CFIG_B_BUFSZ2_MASK		0x0000000000030000ULL
-#define	RBR_CFIG_B_VLD2_SHIFT		23			/* bit 23 */
-#define	RBR_CFIG_B_VLD2			0x0000000000800000ULL
-#define	RBR_CFIG_B_BKSIZE_SHIFT		24			/* bit 25:24 */
-#define	RBR_CFIG_B_BKSIZE_MASK		0x0000000003000000ULL
-
-
-typedef union _rbr_cfig_b_t {
-	uint64_t value;
-	struct {
-#if defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t res1_1:6;
-			uint32_t bksize:2;
-			uint32_t vld2:1;
-			uint32_t res2:5;
-			uint32_t bufsz2:2;
-			uint32_t vld1:1;
-			uint32_t res3:5;
-			uint32_t bufsz1:2;
-			uint32_t vld0:1;
-			uint32_t res4:5;
-			uint32_t bufsz0:2;
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t bufsz0:2;
-			uint32_t res4:5;
-			uint32_t vld0:1;
-			uint32_t bufsz1:2;
-			uint32_t res3:5;
-			uint32_t vld1:1;
-			uint32_t bufsz2:2;
-			uint32_t res2:5;
-			uint32_t vld2:1;
-			uint32_t bksize:2;
-			uint32_t res1_1:6;
-#endif
-		} ldw;
-#if !defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-	} bits;
-} rbr_cfig_b_t, *p_rbr_cfig_b_t;
-
-
-#define	RBR_KICK_SHIFT			0			/* bit 15:0 */
-#define	RBR_KICK_MASK			0x00000000000ffff1ULL
-
-
-typedef union _rbr_kick_t {
-	uint64_t value;
-	struct {
-#if defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t res1_1:16;
-			uint32_t bkadd:16;
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t bkadd:16;
-			uint32_t res1_1:16;
-#endif
-		} ldw;
-#if !defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-	} bits;
-} rbr_kick_t, *p_rbr_kick_t;
-
-#define	RBR_STAT_QLEN_SHIFT		0		/* bit bit 15:0 */
-#define	RBR_STAT_QLEN_MASK		0x000000000000ffffULL
-#define	RBR_STAT_OFLOW_SHIFT		16		/* bit 16 */
-#define	RBR_STAT_OFLOW			0x0000000000010000ULL
-#define	RBR_STAT_OFLOW_MASK		0x0000000000010000ULL
-
-typedef union _rbr_stat_t {
-	uint64_t value;
-	struct {
-#if defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t res1_1:15;
-			uint32_t oflow:1;
-			uint32_t qlen:16;
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t qlen:16;
-			uint32_t oflow:1;
-			uint32_t res1_1:15;
-#endif
-		} ldw;
-#if !defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-	} bits;
-} rbr_stat_t, *p_rbr_stat_t;
-
-
-#define	RBR_HDH_HEAD_H_SHIFT		0			/* bit 11:0 */
-#define	RBR_HDH_HEAD_H_MASK		0x0000000000000fffULL
-typedef union _rbr_hdh_t {
-	uint64_t value;
-	struct {
-#if defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t res1_1:20;
-			uint32_t head_h:12;
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t head_h:12;
-			uint32_t res1_1:20;
-#endif
-		} ldw;
-#if !defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-	} bits;
-} rbr_hdh_t, *p_rbr_hdh_t;
-
-#define	RBR_HDL_HEAD_L_SHIFT		2			/* bit 31:2 */
-#define	RBR_HDL_HEAD_L_MASK		0x00000000FFFFFFFCULL
-
-typedef union _rbr_hdl_t {
-	uint64_t value;
-	struct {
-#if defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t head_l:30;
-			uint32_t res2:2;
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t res2:2;
-			uint32_t head_l:30;
-#endif
-		} ldw;
-#if !defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-	} bits;
-} rbr_hdl_t, *p_rbr_hdl_t;
-
-/*
- * Receive Completion Ring (RCR)
- */
-#define	RCR_PKT_BUF_ADDR_SHIFT		0			/* bit 37:0 */
-#define	RCR_PKT_BUF_ADDR_SHIFT_FULL	6	/* fulll buffer address */
-#define	RCR_PKT_BUF_ADDR_MASK		0x0000003FFFFFFFFFULL
-#define	RCR_PKTBUFSZ_SHIFT		38			/* bit 39:38 */
-#define	RCR_PKTBUFSZ_MASK		0x000000C000000000ULL
-#define	RCR_L2_LEN_SHIFT		40			/* bit 39:38 */
-#define	RCR_L2_LEN_MASK			0x003fff0000000000ULL
-#define	RCR_DCF_ERROR_SHIFT		54			/* bit 54 */
-#define	RCR_DCF_ERROR_MASK		0x0040000000000000ULL
-#define	RCR_ERROR_SHIFT			55			/* bit 57:55 */
-#define	RCR_ERROR_MASK			0x0380000000000000ULL
-#define	RCR_PROMIS_SHIFT		58			/* bit 58 */
-#define	RCR_PROMIS_MASK			0x0400000000000000ULL
-#define	RCR_FRAG_SHIFT			59			/* bit 59 */
-#define	RCR_FRAG_MASK			0x0800000000000000ULL
-#define	RCR_ZERO_COPY_SHIFT		60			/* bit 60 */
-#define	RCR_ZERO_COPY_MASK		0x1000000000000000ULL
-#define	RCR_PKT_TYPE_SHIFT		61			/* bit 62:61 */
-#define	RCR_PKT_TYPE_MASK		0x6000000000000000ULL
-#define	RCR_MULTI_SHIFT			63			/* bit 63 */
-#define	RCR_MULTI_MASK			0x8000000000000000ULL
-
-#define	RCR_PKTBUFSZ_0			0x00
-#define	RCR_PKTBUFSZ_1			0x01
-#define	RCR_PKTBUFSZ_2			0x02
-#define	RCR_SINGLE_BLOCK		0x03
-
-#define	RCR_NO_ERROR			0x0
-#define	RCR_L2_ERROR			0x1
-#define	RCR_L4_CSUM_ERROR		0x3
-#define	RCR_FFLP_SOFT_ERROR		0x4
-#define	RCR_ZCP_SOFT_ERROR		0x5
-#define	RCR_ERROR_RESERVE		0x6
-#define	RCR_ERROR_RESERVE_END	0x7
-
-#define	RCR_PKT_TYPE_UDP		0x1
-#define	RCR_PKT_TYPE_TCP		0x2
-#define	RCR_PKT_TYPE_SCTP		0x3
-#define	RCR_PKT_TYPE_OTHERS		0x0
-#define	RCR_PKT_IS_TCP			0x2000000000000000ULL
-#define	RCR_PKT_IS_UDP			0x4000000000000000ULL
-#define	RCR_PKT_IS_SCTP			0x6000000000000000ULL
-
-
-typedef union _rcr_entry_t {
-	uint64_t value;
-	struct {
-#if defined(_BIG_ENDIAN)
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t multi:1;
-			uint32_t pkt_type:2;
-			uint32_t zero_copy:1;
-			uint32_t noport:1;
-			uint32_t promis:1;
-			uint32_t error:3;
-			uint32_t dcf_err:1;
-			uint32_t l2_len:14;
-			uint32_t pktbufsz:2;
-			uint32_t pkt_buf_addr:6;
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t pkt_buf_addr:6;
-			uint32_t pktbufsz:2;
-			uint32_t l2_len:14;
-			uint32_t dcf_err:1;
-			uint32_t error:3;
-			uint32_t promis:1;
-			uint32_t noport:1;
-			uint32_t zero_copy:1;
-			uint32_t pkt_type:2;
-			uint32_t multi:1;
-#endif
-		} hdw;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t pkt_buf_addr:32;
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t pkt_buf_addr:32;
-#endif
-		} ldw;
-#if !defined(_BIG_ENDIAN)
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t multi:1;
-			uint32_t pkt_type:2;
-			uint32_t zero_copy:1;
-			uint32_t noport:1;
-			uint32_t promis:1;
-			uint32_t error:3;
-			uint32_t dcf_err:1;
-			uint32_t l2_len:14;
-			uint32_t pktbufsz:2;
-			uint32_t pkt_buf_addr:6;
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t pkt_buf_addr:6;
-			uint32_t pktbufsz:2;
-			uint32_t l2_len:14;
-			uint32_t dcf_err:1;
-			uint32_t error:3;
-			uint32_t promis:1;
-			uint32_t noport:1;
-			uint32_t zero_copy:1;
-			uint32_t pkt_type:2;
-			uint32_t multi:1;
-#endif
-		} hdw;
-#endif
-	} bits;
-} rcr_entry_t, *p_rcr_entry_t;
-
-/*
- * Receive Completion Ring Configuration.
- * (for each DMA channel)
- */
-#define	RCRCFIG_A_REG			(DMC + 0x00040)
-#define	RCRCFIG_B_REG			(DMC + 0x00048)
-#define	RCRSTAT_A_REG			(DMC + 0x00050)
-#define	RCRSTAT_B_REG			(DMC + 0x00058)
-#define	RCRSTAT_C_REG			(DMC + 0x00060)
-#define	RX_DMA_ENT_MSK_REG		(DMC + 0x00068)
-#define	RX_DMA_CTL_STAT_REG		(DMC + 0x00070)
-#define	RCR_FLSH_REG			(DMC + 0x00078)
-#if OLD
-#define	RX_DMA_LOGA_REG			(DMC + 0x00080)
-#define	RX_DMA_LOGB_REG			(DMC + 0x00088)
-#endif
-#define	RX_DMA_CTL_STAT_DBG_REG		(DMC + 0x00098)
-
-/* (DMC + 0x00050) */
-#define	RCRCFIG_A_STADDR_SHIFT		6	/* bit 18:6 */
-#define	RCRCFIG_A_STADDR_MASK		0x000000000007FFC0ULL
-#define	RCRCFIG_A_STADDR_BASE_SHIF	19	/* bit 43:19 */
-#define	RCRCFIG_A_STADDR_BASE_MASK	0x00000FFFFFF80000ULL
-#define	RCRCFIG_A_LEN_SHIF		48	/* bit 63:48 */
-#define	RCRCFIG_A_LEN__MASK		0xFFFF000000000000ULL
-
-/* (DMC + 0x00058) */
-#define	RCRCFIG_B_TIMEOUT_SHIFT		0		/* bit 5:0 */
-#define	RCRCFIG_B_TIMEOUT_MASK		0x000000000000003FULL
-#define	RCRCFIG_B_ENTOUT_SHIFT		15		/* bit  15 */
-#define	RCRCFIG_B_TIMEOUT		0x0000000000008000ULL
-#define	RCRCFIG_B_PTHRES_SHIFT		16		/* bit 31:16 */
-#define	RCRCFIG_B_PTHRES_MASK		0x00000000FFFF0000ULL
-
-/* (DMC + 0x00060) */
-#define	RCRSTAT_A_QLEN_SHIFT		0		/* bit 15:0 */
-#define	RCRSTAT_A_QLEN_MASK		0x000000000000FFFFULL
-#define	RCRSTAT_A_PKT_OFL_SHIFT		16		/* bit 16 */
-#define	RCRSTAT_A_PKT_OFL_MASK		0x0000000000010000ULL
-#define	RCRSTAT_A_ENT_OFL_SHIFT		17		/* bit 17 */
-#define	RCRSTAT_A_ENT_QFL_MASK		0x0000000000020000ULL
-
-#define	RCRSTAT_C_TLPTR_H_SHIFT		0		/* bit 11:0 */
-#define	RCRSTAT_C_TLPTR_H_MASK		0x0000000000000FFFULL
-
-#define	RCRSTAT_D_TLPTR_L_SHIFT		3		/* bit 31:3 */
-#define	RCRSTAT_D_TLPTR_L_MASK		0x00000000FFFFFFF8ULL
-
-/* Receive DMA Interrupt Behavior: Event Mask  (DMC + 0x00068) */
-#define	RX_DMA_ENT_MSK_CFIGLOGPGE_SHIFT	0		/* bit 0: 0 to flag */
-#define	RX_DMA_ENT_MSK_CFIGLOGPGE_MASK	0x0000000000000001ULL
-#define	RX_DMA_ENT_MSK_RBRLOGPGE_SHIFT	1		/* bit 1: 0 to flag */
-#define	RX_DMA_ENT_MSK_RBRLOGPGE_MASK	0x0000000000000002ULL
-#define	RX_DMA_ENT_MSK_RBRFULL_SHIFT	2		/* bit 2: 0 to flag */
-#define	RX_DMA_ENT_MSK_RBRFULL_MASK	0x0000000000000004ULL
-#define	RX_DMA_ENT_MSK_RBREMPTY_SHIFT	3		/* bit 3: 0 to flag */
-#define	RX_DMA_ENT_MSK_RBREMPTY_MASK	0x0000000000000008ULL
-#define	RX_DMA_ENT_MSK_RCRFULL_SHIFT	4		/* bit 4: 0 to flag */
-#define	RX_DMA_ENT_MSK_RCRFULL_MASK	0x0000000000000010ULL
-#define	RX_DMA_ENT_MSK_RCRINCON_SHIFT	5		/* bit 5: 0 to flag */
-#define	RX_DMA_ENT_MSK_RCRINCON_MASK	0x0000000000000020ULL
-#define	RX_DMA_ENT_MSK_CONFIG_ERR_SHIFT	6		/* bit 6: 0 to flag */
-#define	RX_DMA_ENT_MSK_CONFIG_ERR_MASK	0x0000000000000040ULL
-#define	RX_DMA_ENT_MSK_RCRSH_FULL_SHIFT	7		/* bit 7: 0 to flag */
-#define	RX_DMA_ENT_MSK_RCRSH_FULL_MASK	0x0000000000000080ULL
-#define	RX_DMA_ENT_MSK_RBR_PRE_EMPTY_SHIFT	8	/* bit 8: 0 to flag */
-#define	RX_DMA_ENT_MSK_RBR_PRE_EMPTY_MASK	0x0000000000000100ULL
-#define	RX_DMA_ENT_MSK_WRED_DROP_SHIFT	9		/* bit 9: 0 to flag */
-#define	RX_DMA_ENT_MSK_WRED_DROP_MASK	0x0000000000000200ULL
-#define	RX_DMA_ENT_MSK_PTDROP_PKT_SHIFT	10		/* bit 10: 0 to flag */
-#define	RX_DMA_ENT_MSK_PTDROP_PKT_MASK	0x0000000000000400ULL
-#define	RX_DMA_ENT_MSK_RBR_PRE_PAR_SHIFT	11	/* bit 11: 0 to flag */
-#define	RX_DMA_ENT_MSK_RBR_PRE_PAR_MASK	0x0000000000000800ULL
-#define	RX_DMA_ENT_MSK_RCR_SHA_PAR_SHIFT	12	/* bit 12: 0 to flag */
-#define	RX_DMA_ENT_MSK_RCR_SHA_PAR_MASK	0x0000000000001000ULL
-#define	RX_DMA_ENT_MSK_RCRTO_SHIFT	13		/* bit 13: 0 to flag */
-#define	RX_DMA_ENT_MSK_RCRTO_MASK	0x0000000000002000ULL
-#define	RX_DMA_ENT_MSK_THRES_SHIFT	14		/* bit 14: 0 to flag */
-#define	RX_DMA_ENT_MSK_THRES_MASK	0x0000000000004000ULL
-#define	RX_DMA_ENT_MSK_DC_FIFO_ERR_SHIFT	16	/* bit 16: 0 to flag */
-#define	RX_DMA_ENT_MSK_DC_FIFO_ERR_MASK	0x0000000000010000ULL
-#define	RX_DMA_ENT_MSK_RCR_ACK_ERR_SHIFT	17	/* bit 17: 0 to flag */
-#define	RX_DMA_ENT_MSK_RCR_ACK_ERR_MASK	0x0000000000020000ULL
-#define	RX_DMA_ENT_MSK_RSP_DAT_ERR_SHIFT	18	/* bit 18: 0 to flag */
-#define	RX_DMA_ENT_MSK_RSP_DAT_ERR_MASK	0x0000000000040000ULL
-#define	RX_DMA_ENT_MSK_BYTE_EN_BUS_SHIFT	19	/* bit 19: 0 to flag */
-#define	RX_DMA_ENT_MSK_BYTE_EN_BUS_MASK	0x0000000000080000ULL
-#define	RX_DMA_ENT_MSK_RSP_CNT_ERR_SHIFT	20	/* bit 20: 0 to flag */
-#define	RX_DMA_ENT_MSK_RSP_CNT_ERR_MASK	0x0000000000100000ULL
-#define	RX_DMA_ENT_MSK_RBR_TMOUT_SHIFT	21		/* bit 21: 0 to flag */
-#define	RX_DMA_ENT_MSK_RBR_TMOUT_MASK	0x0000000000200000ULL
-#define	RX_DMA_ENT_MSK_ALL	(RX_DMA_ENT_MSK_CFIGLOGPGE_MASK |	\
-				RX_DMA_ENT_MSK_RBRLOGPGE_MASK |	\
-				RX_DMA_ENT_MSK_RBRFULL_MASK |		\
-				RX_DMA_ENT_MSK_RBREMPTY_MASK |		\
-				RX_DMA_ENT_MSK_RCRFULL_MASK |		\
-				RX_DMA_ENT_MSK_RCRINCON_MASK |		\
-				RX_DMA_ENT_MSK_CONFIG_ERR_MASK |	\
-				RX_DMA_ENT_MSK_RCRSH_FULL_MASK |	\
-				RX_DMA_ENT_MSK_RBR_PRE_EMPTY_MASK |	\
-				RX_DMA_ENT_MSK_WRED_DROP_MASK |	\
-				RX_DMA_ENT_MSK_PTDROP_PKT_MASK |	\
-				RX_DMA_ENT_MSK_PTDROP_PKT_MASK |	\
-				RX_DMA_ENT_MSK_RBR_PRE_PAR_MASK |	\
-				RX_DMA_ENT_MSK_RCR_SHA_PAR_MASK |	\
-				RX_DMA_ENT_MSK_RCRTO_MASK |		\
-				RX_DMA_ENT_MSK_THRES_MASK |		\
-				RX_DMA_ENT_MSK_DC_FIFO_ERR_MASK |	\
-				RX_DMA_ENT_MSK_RCR_ACK_ERR_MASK |	\
-				RX_DMA_ENT_MSK_RSP_DAT_ERR_MASK |	\
-				RX_DMA_ENT_MSK_BYTE_EN_BUS_MASK |	\
-				RX_DMA_ENT_MSK_RSP_CNT_ERR_MASK |	\
-				RX_DMA_ENT_MSK_RBR_TMOUT_MASK)
-
-/* Receive DMA Control and Status  (DMC + 0x00070) */
-#define	RX_DMA_CTL_STAT_PKTREAD_SHIFT	0	/* WO, bit 15:0 */
-#define	RX_DMA_CTL_STAT_PKTREAD_MASK	0x000000000000ffffULL
-#define	RX_DMA_CTL_STAT_PTRREAD_SHIFT	16	/* WO, bit 31:16 */
-#define	RX_DMA_CTL_STAT_PTRREAD_MASK	0x00000000FFFF0000ULL
-#define	RX_DMA_CTL_STAT_CFIGLOGPG_SHIFT 32	/* RO, bit 32 */
-#define	RX_DMA_CTL_STAT_CFIGLOGPG	0x0000000100000000ULL
-#define	RX_DMA_CTL_STAT_CFIGLOGPG_MASK	0x0000000100000000ULL
-#define	RX_DMA_CTL_STAT_RBRLOGPG_SHIFT	33	/* RO, bit 33 */
-#define	RX_DMA_CTL_STAT_RBRLOGPG	0x0000000200000000ULL
-#define	RX_DMA_CTL_STAT_RBRLOGPG_MASK	0x0000000200000000ULL
-#define	RX_DMA_CTL_STAT_RBRFULL_SHIFT	34	/* RO, bit 34 */
-#define	RX_DMA_CTL_STAT_RBRFULL		0x0000000400000000ULL
-#define	RX_DMA_CTL_STAT_RBRFULL_MASK	0x0000000400000000ULL
-#define	RX_DMA_CTL_STAT_RBREMPTY_SHIFT	35	/* RW1C, bit 35 */
-#define	RX_DMA_CTL_STAT_RBREMPTY	0x0000000800000000ULL
-#define	RX_DMA_CTL_STAT_RBREMPTY_MASK	0x0000000800000000ULL
-#define	RX_DMA_CTL_STAT_RCRFULL_SHIFT	36	/* RW1C, bit 36 */
-#define	RX_DMA_CTL_STAT_RCRFULL		0x0000001000000000ULL
-#define	RX_DMA_CTL_STAT_RCRFULL_MASK	0x0000001000000000ULL
-#define	RX_DMA_CTL_STAT_RCRINCON_SHIFT	37	/* RO, bit 37 */
-#define	RX_DMA_CTL_STAT_RCRINCON	0x0000002000000000ULL
-#define	RX_DMA_CTL_STAT_RCRINCON_MASK	0x0000002000000000ULL
-#define	RX_DMA_CTL_STAT_CONFIG_ERR_SHIFT 38	/* RO, bit 38 */
-#define	RX_DMA_CTL_STAT_CONFIG_ERR	0x0000004000000000ULL
-#define	RX_DMA_CTL_STAT_CONFIG_ERR_MASK	0x0000004000000000ULL
-#define	RX_DMA_CTL_STAT_RCR_SHDW_FULL_SHIFT 39	/* RO, bit 39 */
-#define	RX_DMA_CTL_STAT_RCR_SHDW_FULL 0x0000008000000000ULL
-#define	RX_DMA_CTL_STAT_RCR_SHDW_FULL_MASK 0x0000008000000000ULL
-#define	RX_DMA_CTL_STAT_RBR_PRE_EMTY_MASK  0x0000010000000000ULL
-#define	RX_DMA_CTL_STAT_RBR_PRE_EMTY_SHIFT 40	/* RO, bit 40 */
-#define	RX_DMA_CTL_STAT_RBR_PRE_EMTY 0x0000010000000000ULL
-#define	RX_DMA_CTL_STAT_RBR_PRE_EMTY_MASK  0x0000010000000000ULL
-#define	RX_DMA_CTL_STAT_WRED_DROP_SHIFT 41	/* RO, bit 41 */
-#define	RX_DMA_CTL_STAT_WRED_DROP 0x0000020000000000ULL
-#define	RX_DMA_CTL_STAT_WRED_DROP_MASK  0x0000020000000000ULL
-#define	RX_DMA_CTL_STAT_PORT_DROP_PKT_SHIFT 42	/* RO, bit 42 */
-#define	RX_DMA_CTL_STAT_PORT_DROP_PKT 0x0000040000000000ULL
-#define	RX_DMA_CTL_STAT_PORT_DROP_PKT_MASK  0x0000040000000000ULL
-#define	RX_DMA_CTL_STAT_RBR_PRE_PAR_SHIFT 43	/* RO, bit 43 */
-#define	RX_DMA_CTL_STAT_RBR_PRE_PAR 0x0000080000000000ULL
-#define	RX_DMA_CTL_STAT_RBR_PRE_PAR_MASK  0x0000080000000000ULL
-#define	RX_DMA_CTL_STAT_RCR_SHA_PAR_SHIFT 44	/* RO, bit 44 */
-#define	RX_DMA_CTL_STAT_RCR_SHA_PAR 0x0000100000000000ULL
-#define	RX_DMA_CTL_STAT_RCR_SHA_PAR_MASK  0x0000100000000000ULL
-#define	RX_DMA_CTL_STAT_RCRTO_SHIFT	45	/* RW1C, bit 45 */
-#define	RX_DMA_CTL_STAT_RCRTO		0x0000200000000000ULL
-#define	RX_DMA_CTL_STAT_RCRTO_MASK	0x0000200000000000ULL
-#define	RX_DMA_CTL_STAT_RCRTHRES_SHIFT	46	/* RO, bit 46 */
-#define	RX_DMA_CTL_STAT_RCRTHRES	0x0000400000000000ULL
-#define	RX_DMA_CTL_STAT_RCRTHRES_MASK	0x0000400000000000ULL
-#define	RX_DMA_CTL_STAT_MEX_SHIFT	47	/* RW, bit 47 */
-#define	RX_DMA_CTL_STAT_MEX		0x0000800000000000ULL
-#define	RX_DMA_CTL_STAT_MEX_MASK	0x0000800000000000ULL
-#define	RX_DMA_CTL_STAT_DC_FIFO_ERR_SHIFT	48	/* RW1C, bit 48 */
-#define	RX_DMA_CTL_STAT_DC_FIFO_ERR		0x0001000000000000ULL
-#define	RX_DMA_CTL_STAT_DC_FIFO_ERR_MASK	0x0001000000000000ULL
-#define	RX_DMA_CTL_STAT_RCR_ACK_ERR_SHIFT	49	/* RO, bit 49 */
-#define	RX_DMA_CTL_STAT_RCR_ACK_ERR		0x0002000000000000ULL
-#define	RX_DMA_CTL_STAT_RCR_ACK_ERR_MASK	0x0002000000000000ULL
-#define	RX_DMA_CTL_STAT_RSP_DAT_ERR_SHIFT	50	/* RO, bit 50 */
-#define	RX_DMA_CTL_STAT_RSP_DAT_ERR		0x0004000000000000ULL
-#define	RX_DMA_CTL_STAT_RSP_DAT_ERR_MASK	0x0004000000000000ULL
-
-#define	RX_DMA_CTL_STAT_BYTE_EN_BUS_SHIFT	51	/* RO, bit 51 */
-#define	RX_DMA_CTL_STAT_BYTE_EN_BUS		0x0008000000000000ULL
-#define	RX_DMA_CTL_STAT_BYTE_EN_BUS_MASK	0x0008000000000000ULL
-
-#define	RX_DMA_CTL_STAT_RSP_CNT_ERR_SHIFT	52	/* RO, bit 52 */
-#define	RX_DMA_CTL_STAT_RSP_CNT_ERR		0x0010000000000000ULL
-#define	RX_DMA_CTL_STAT_RSP_CNT_ERR_MASK	0x0010000000000000ULL
-
-#define	RX_DMA_CTL_STAT_RBR_TMOUT_SHIFT	53	/* RO, bit 53 */
-#define	RX_DMA_CTL_STAT_RBR_TMOUT		0x0020000000000000ULL
-#define	RX_DMA_CTL_STAT_RBR_TMOUT_MASK	0x0020000000000000ULL
-#define	RX_DMA_CTRL_STAT_ENT_MASK_SHIFT 32
-#define	RX_DMA_CTL_STAT_ERROR 			(RX_DMA_ENT_MSK_ALL << \
-						RX_DMA_CTRL_STAT_ENT_MASK_SHIFT)
-
-/* the following are write 1 to clear bits */
-#define	RX_DMA_CTL_STAT_WR1C	RX_DMA_CTL_STAT_RBREMPTY | \
-				RX_DMA_CTL_STAT_RCR_SHDW_FULL | \
-				RX_DMA_CTL_STAT_RBR_PRE_EMTY | \
-				RX_DMA_CTL_STAT_WRED_DROP | \
-				RX_DMA_CTL_STAT_PORT_DROP_PKT | \
-				RX_DMA_CTL_STAT_RCRTO | \
-				RX_DMA_CTL_STAT_RCRTHRES | \
-				RX_DMA_CTL_STAT_DC_FIFO_ERR
-
-/* Receive DMA Interrupt Behavior: Force an update to RCR  (DMC + 0x00078 */
-#define	RCR_FLSH_SHIFT			0	/* RW, bit 0:0 */
-#define	RCR_FLSH_SET			0x0000000000000001ULL
-#define	RCR_FLSH_MASK			0x0000000000000001ULL
-
-/* Receive DMA Interrupt Behavior: the first error log  (DMC + 0x00080 */
-#define	RX_DMA_LOGA_ADDR_SHIFT		0	/* RO, bit 11:0 */
-#define	RX_DMA_LOGA_ADDR		0x0000000000000FFFULL
-#define	RX_DMA_LOGA_ADDR_MASK		0x0000000000000FFFULL
-#define	RX_DMA_LOGA_TYPE_SHIFT		28	/* RO, bit 30:28 */
-#define	RX_DMA_LOGA_TYPE		0x0000000070000000ULL
-#define	RX_DMA_LOGA_TYPE_MASK		0x0000000070000FFFULL
-#define	RX_DMA_LOGA_MULTI_SHIFT		28	/* RO, bit 30:28 */
-#define	RX_DMA_LOGA_MULTI		0x0000000080000000ULL
-#define	RX_DMA_LOGA_MULTI_MASK		0x0000000080000FFFULL
-
-/* Receive DMA Interrupt Behavior: the first error log  (DMC + 0x00088 */
-#define	RX_DMA_LOGA_ADDR_L_SHIFT	0	/* RO, bit 31:0 */
-#define	RX_DMA_LOGA_ADDRL_L		0x00000000FFFFFFFFULL
-#define	RX_DMA_LOGA_ADDR_LMASK		0x00000000FFFFFFFFULL
-
-typedef union _rcrcfig_a_t {
-	uint64_t value;
-	struct {
-#if defined(_BIG_ENDIAN)
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t len:16;
-			uint32_t res1:4;
-			uint32_t staddr_base:12;
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t staddr_base:12;
-			uint32_t res1:4;
-			uint32_t len:16;
-#endif
-		} hdw;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t staddr_base:13;
-			uint32_t staddr:13;
-			uint32_t res2:6;
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t res2:6;
-			uint32_t staddr:13;
-			uint32_t staddr_base:13;
-#endif
-		} ldw;
-#if !defined(_BIG_ENDIAN)
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t len:16;
-			uint32_t res1:4;
-			uint32_t staddr_base:12;
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t staddr_base:12;
-			uint32_t res1:4;
-			uint32_t len:16;
-#endif
-		} hdw;
-#endif
-	} bits;
-} rcrcfig_a_t, *p_rcrcfig_a_t;
-
-
-typedef union _rcrcfig_b_t {
-	uint64_t value;
-	struct {
-#if defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t pthres:16;
-			uint32_t entout:1;
-			uint32_t res1:9;
-			uint32_t timeout:6;
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t timeout:6;
-			uint32_t res1:9;
-			uint32_t entout:1;
-			uint32_t pthres:16;
-#endif
-		} ldw;
-#if !defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-	} bits;
-} rcrcfig_b_t, *p_rcrcfig_b_t;
-
-
-typedef union _rcrstat_a_t {
-	uint64_t value;
-	struct {
-#if defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t res1:16;
-			uint32_t qlen:16;
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t qlen:16;
-			uint32_t res1:16;
-#endif
-		} ldw;
-#if !defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-	} bits;
-} rcrstat_a_t, *p_rcrstat_a_t;
-
-
-typedef union _rcrstat_b_t {
-	uint64_t value;
-	struct {
-#if defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t res1:20;
-			uint32_t tlptr_h:12;
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t tlptr_h:12;
-			uint32_t res1:20;
-#endif
-		} ldw;
-#if !defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-	} bits;
-} rcrstat_b_t, *p_rcrstat_b_t;
-
-
-typedef union _rcrstat_c_t {
-	uint64_t value;
-	struct {
-#if defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t tlptr_l:29;
-			uint32_t res1:3;
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t res1:3;
-			uint32_t tlptr_l:29;
-#endif
-		} ldw;
-#if !defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-	} bits;
-} rcrstat_c_t, *p_rcrstat_c_t;
-
-
-/* Receive DMA Event Mask */
-typedef union _rx_dma_ent_msk_t {
-	uint64_t value;
-	struct {
-#if defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t rsrvd2:10;
-			uint32_t rbr_tmout:1;
-			uint32_t rsp_cnt_err:1;
-			uint32_t byte_en_bus:1;
-			uint32_t rsp_dat_err:1;
-			uint32_t rcr_ack_err:1;
-			uint32_t dc_fifo_err:1;
-			uint32_t rsrvd:1;
-			uint32_t rcrthres:1;
-			uint32_t rcrto:1;
-			uint32_t rcr_sha_par:1;
-			uint32_t rbr_pre_par:1;
-			uint32_t port_drop_pkt:1;
-			uint32_t wred_drop:1;
-			uint32_t rbr_pre_empty:1;
-			uint32_t rcr_shadow_full:1;
-			uint32_t config_err:1;
-			uint32_t rcrincon:1;
-			uint32_t rcrfull:1;
-			uint32_t rbr_empty:1;
-			uint32_t rbrfull:1;
-			uint32_t rbrlogpage:1;
-			uint32_t cfiglogpage:1;
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t cfiglogpage:1;
-			uint32_t rbrlogpage:1;
-			uint32_t rbrfull:1;
-			uint32_t rbr_empty:1;
-			uint32_t rcrfull:1;
-			uint32_t rcrincon:1;
-			uint32_t config_err:1;
-			uint32_t rcr_shadow_full:1;
-			uint32_t rbr_pre_empty:1;
-			uint32_t wred_drop:1;
-			uint32_t port_drop_pkt:1;
-			uint32_t rbr_pre_par:1;
-			uint32_t rcr_sha_par:1;
-			uint32_t rcrto:1;
-			uint32_t rcrthres:1;
-			uint32_t rsrvd:1;
-			uint32_t dc_fifo_err:1;
-			uint32_t rcr_ack_err:1;
-			uint32_t rsp_dat_err:1;
-			uint32_t byte_en_bus:1;
-			uint32_t rsp_cnt_err:1;
-			uint32_t rbr_tmout:1;
-			uint32_t rsrvd2:10;
-#endif
-		} ldw;
-#if !defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-	} bits;
-} rx_dma_ent_msk_t, *p_rx_dma_ent_msk_t;
-
-
-/* Receive DMA Control and Status */
-typedef union _rx_dma_ctl_stat_t {
-	uint64_t value;
-	struct {
-#if defined(_BIG_ENDIAN)
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t rsrvd:10;
-			uint32_t rbr_tmout:1;
-			uint32_t rsp_cnt_err:1;
-			uint32_t byte_en_bus:1;
-			uint32_t rsp_dat_err:1;
-			uint32_t rcr_ack_err:1;
-			uint32_t dc_fifo_err:1;
-			uint32_t mex:1;
-			uint32_t rcrthres:1;
-			uint32_t rcrto:1;
-			uint32_t rcr_sha_par:1;
-			uint32_t rbr_pre_par:1;
-			uint32_t port_drop_pkt:1;
-			uint32_t wred_drop:1;
-			uint32_t rbr_pre_empty:1;
-			uint32_t rcr_shadow_full:1;
-			uint32_t config_err:1;
-			uint32_t rcrincon:1;
-			uint32_t rcrfull:1;
-			uint32_t rbr_empty:1;
-			uint32_t rbrfull:1;
-			uint32_t rbrlogpage:1;
-			uint32_t cfiglogpage:1;
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t cfiglogpage:1;
-			uint32_t rbrlogpage:1;
-			uint32_t rbrfull:1;
-			uint32_t rbr_empty:1;
-			uint32_t rcrfull:1;
-			uint32_t rcrincon:1;
-			uint32_t config_err:1;
-			uint32_t rcr_shadow_full:1;
-			uint32_t rbr_pre_empty:1;
-			uint32_t wred_drop:1;
-			uint32_t port_drop_pkt:1;
-			uint32_t rbr_pre_par:1;
-			uint32_t rcr_sha_par:1;
-			uint32_t rcrto:1;
-			uint32_t rcrthres:1;
-			uint32_t mex:1;
-			uint32_t dc_fifo_err:1;
-			uint32_t rcr_ack_err:1;
-			uint32_t rsp_dat_err:1;
-			uint32_t byte_en_bus:1;
-			uint32_t rsp_cnt_err:1;
-			uint32_t rbr_tmout:1;
-			uint32_t rsrvd:10;
-#endif
-		} hdw;
-
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t ptrread:16;
-			uint32_t pktread:16;
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t pktread:16;
-			uint32_t ptrread:16;
-
-#endif
-		} ldw;
-#if !defined(_BIG_ENDIAN)
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t rsrvd:10;
-			uint32_t rbr_tmout:1;
-			uint32_t rsp_cnt_err:1;
-			uint32_t byte_en_bus:1;
-			uint32_t rsp_dat_err:1;
-			uint32_t rcr_ack_err:1;
-			uint32_t dc_fifo_err:1;
-			uint32_t mex:1;
-			uint32_t rcrthres:1;
-			uint32_t rcrto:1;
-			uint32_t rcr_sha_par:1;
-			uint32_t rbr_pre_par:1;
-			uint32_t port_drop_pkt:1;
-			uint32_t wred_drop:1;
-			uint32_t rbr_pre_empty:1;
-			uint32_t rcr_shadow_full:1;
-			uint32_t config_err:1;
-			uint32_t rcrincon:1;
-			uint32_t rcrfull:1;
-			uint32_t rbr_empty:1;
-			uint32_t rbrfull:1;
-			uint32_t rbrlogpage:1;
-			uint32_t cfiglogpage:1;
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t cfiglogpage:1;
-			uint32_t rbrlogpage:1;
-			uint32_t rbrfull:1;
-			uint32_t rbr_empty:1;
-			uint32_t rcrfull:1;
-			uint32_t rcrincon:1;
-			uint32_t config_err:1;
-			uint32_t rcr_shadow_full:1;
-			uint32_t rbr_pre_empty:1;
-			uint32_t wred_drop:1;
-			uint32_t port_drop_pkt:1;
-			uint32_t rbr_pre_par:1;
-			uint32_t rcr_sha_par:1;
-			uint32_t rcrto:1;
-			uint32_t rcrthres:1;
-			uint32_t mex:1;
-			uint32_t dc_fifo_err:1;
-			uint32_t rcr_ack_err:1;
-			uint32_t rsp_dat_err:1;
-			uint32_t byte_en_bus:1;
-			uint32_t rsp_cnt_err:1;
-			uint32_t rbr_tmout:1;
-			uint32_t rsrvd:10;
-#endif
-		} hdw;
-#endif
-	} bits;
-} rx_dma_ctl_stat_t, *p_rx_dma_ctl_stat_t;
-
-typedef union _rcr_flsh_t {
-	uint64_t value;
-	struct {
-#if defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t res1_1:31;
-			uint32_t flsh:1;
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t flsh:1;
-			uint32_t res1_1:31;
-#endif
-		} ldw;
-#if !defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-	} bits;
-} rcr_flsh_t, *p_rcr_flsh_t;
-
-
-typedef union _rx_dma_loga_t {
-	uint64_t value;
-	struct {
-#if defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t multi:1;
-			uint32_t type:3;
-			uint32_t res1:16;
-			uint32_t addr:12;
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t addr:12;
-			uint32_t res1:16;
-			uint32_t type:3;
-			uint32_t multi:1;
-#endif
-		} ldw;
-#if !defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-	} bits;
-} rx_dma_loga_t, *p_rx_dma_loga_t;
-
-
-typedef union _rx_dma_logb_t {
-	uint64_t value;
-	struct {
-#if defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t addr_l:32;
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t addr_l:32;
-#endif
-		} ldw;
-#if !defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-	} bits;
-} rx_dma_logb_t, *p_rx_dma_logb_t;
-
-
-#define	RX_DMA_MAILBOX_BYTE_LENGTH	64
-#define	RX_DMA_MBOX_UNUSED_1		8
-#define	RX_DMA_MBOX_UNUSED_2		16
-
-typedef struct _rxdma_mailbox_t {
-	rx_dma_ctl_stat_t	rxdma_ctl_stat;		/* 8 bytes */
-	rbr_stat_t		rbr_stat;		/* 8 bytes */
-	uint32_t		rbr_hdl;		/* 4 bytes (31:0) */
-	uint32_t		rbr_hdh;		/* 4 bytes (31:0) */
-	uint32_t		resv_1[RX_DMA_MBOX_UNUSED_1];
-	uint32_t		rcrstat_c;		/* 4 bytes (31:0) */
-	uint32_t		rcrstat_b;		/* 4 bytes (31:0) */
-	rcrstat_a_t		rcrstat_a;		/* 8 bytes */
-	uint32_t		resv_2[RX_DMA_MBOX_UNUSED_2];
-} rxdma_mailbox_t, *p_rxdma_mailbox_t;
-
-
-
-typedef union _rx_disc_cnt_t {
-	uint64_t value;
-	struct {
-#if defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t res_1:15;
-			uint32_t oflow:1;
-			uint32_t count:16;
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t count:16;
-			uint32_t oflow:1;
-			uint32_t res_1:15;
-#endif
-		} ldw;
-#if !defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-	} bits;
-} rx_disc_cnt_t, *p_rx_disc_cnt_t;
-
-#define	RXMISC_DISCARD_REG		(DMC + 0x00090)
-
-#if OLD
-/*
- * RBR Empty: If the RBR is empty or the prefetch buffer is empty,
- * packets will be discarded (Each RBR has one).
- * (16 channels, 0x200)
- */
-#define	RDC_PRE_EMPTY_REG		(DMC + 0x000B0)
-#define	RDC_PRE_EMPTY_OFFSET(channel)	(RDC_PRE_EMPTY_REG + \
-						(DMC_OFFSET(channel))
-
-typedef union _rdc_pre_empty_t {
-	uint64_t value;
-	struct {
-#if defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t res_1:15;
-			uint32_t oflow:1;
-			uint32_t count:16;
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t count:16;
-			uint32_t oflow:1;
-			uint32_t res_1:15;
-#endif
-		} ldw;
-#if !defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-	} bits;
-} rdc_pre_empty_t, *p_rdc_pre_empty_t;
-#endif
-
-
-#define	FZC_DMC_REG_SIZE		0x20
-#define	FZC_DMC_OFFSET(channel)		(FZC_DMC_REG_SIZE * channel)
-
-/* WRED discard count register (16, 0x40) */
-#define	RED_DIS_CNT_REG			(FZC_DMC + 0x30008)
-#define	RED_DMC_OFFSET(channel)		(0x40 * channel)
-#define	RDC_DIS_CNT_OFFSET(rdc)	(RED_DIS_CNT_REG + RED_DMC_OFFSET(rdc))
-
-typedef union _red_disc_cnt_t {
-	uint64_t value;
-	struct {
-#if defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t res_1:15;
-			uint32_t oflow:1;
-			uint32_t count:16;
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t count:16;
-			uint32_t oflow:1;
-			uint32_t res_1:15;
-#endif
-		} ldw;
-#if !defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-	} bits;
-} red_disc_cnt_t, *p_red_disc_cnt_t;
-
-
-#define	RDMC_PRE_PAR_ERR_REG			(FZC_DMC + 0x00078)
-#define	RDMC_SHA_PAR_ERR_REG			(FZC_DMC + 0x00080)
-
-typedef union _rdmc_par_err_log {
-	uint64_t value;
-	struct {
-#if defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t res_1:16;
-			uint32_t err:1;
-			uint32_t merr:1;
-			uint32_t res:6;
-			uint32_t addr:8;
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t addr:8;
-			uint32_t res:6;
-			uint32_t merr:1;
-			uint32_t err:1;
-			uint32_t res_1:16;
-#endif
-		} ldw;
-#if !defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-	} bits;
-} rdmc_par_err_log_t, *p_rdmc_par_err_log_t;
-
-
-/* Used for accessing RDMC Memory */
-#define	RDMC_MEM_ADDR_REG			(FZC_DMC + 0x00088)
-
-
-typedef union _rdmc_mem_addr {
-	uint64_t value;
-	struct {
-#if defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-
-#define	RDMC_MEM_ADDR_PREFETCH 0
-#define	RDMC_MEM_ADDR_SHADOW 1
-
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t res_1:23;
-			uint32_t pre_shad:1;
-			uint32_t addr:8;
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t addr:8;
-			uint32_t pre_shad:1;
-			uint32_t res_1:23;
-#endif
-		} ldw;
-#if !defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-	} bits;
-} rdmc_mem_addr_t, *p_rdmc_mem_addr_t;
-
-
-#define	RDMC_MEM_DATA0_REG			(FZC_DMC + 0x00090)
-#define	RDMC_MEM_DATA1_REG			(FZC_DMC + 0x00098)
-#define	RDMC_MEM_DATA2_REG			(FZC_DMC + 0x000A0)
-#define	RDMC_MEM_DATA3_REG			(FZC_DMC + 0x000A8)
-#define	RDMC_MEM_DATA4_REG			(FZC_DMC + 0x000B0)
-
-typedef union _rdmc_mem_data {
-	uint64_t value;
-	struct {
-#if defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t data;
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t data;
-#endif
-		} ldw;
-#if !defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-	} bits;
-} rdmc_mem_data_t, *p_rdmc_mem_data_t;
-
-
-typedef union _rdmc_mem_access {
-#define	RDMC_MEM_READ 1
-#define	RDMC_MEM_WRITE 2
-	uint32_t data[5];
-	uint8_t addr;
-	uint8_t location;
-} rdmc_mem_access_t, *p_rdmc_mem_access_t;
-
-
-#define	RX_CTL_DAT_FIFO_STAT_REG			(FZC_DMC + 0x000B8)
-#define	RX_CTL_DAT_FIFO_MASK_REG			(FZC_DMC + 0x000C0)
-#define	RX_CTL_DAT_FIFO_STAT_DBG_REG		(FZC_DMC + 0x000D0)
-
-typedef union _rx_ctl_dat_fifo {
-#define	FIFO_EOP_PORT0 0x1
-#define	FIFO_EOP_PORT1 0x2
-#define	FIFO_EOP_PORT2 0x4
-#define	FIFO_EOP_PORT3 0x8
-#define	FIFO_EOP_ALL 0xF
-	uint64_t value;
-	struct {
-#if defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t res_1:23;
-			uint32_t id_mismatch:1;
-			uint32_t zcp_eop_err:4;
-			uint32_t ipp_eop_err:4;
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t ipp_eop_err:4;
-			uint32_t zcp_eop_err:4;
-			uint32_t id_mismatch:1;
-			uint32_t res_1:23;
-#endif
-		} ldw;
-#if !defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-	} bits;
-} rx_ctl_dat_fifo_mask_t, rx_ctl_dat_fifo_stat_t,
-	rx_ctl_dat_fifo_stat_dbg_t, *p_rx_ctl_dat_fifo_t;
-
-
-
-#define	RDMC_TRAINING_VECTOR_REG		(FZC_DMC + 0x000C8)
-
-typedef union _rx_training_vect {
-	uint64_t value;
-	struct {
-#if defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-		struct {
-			uint32_t tv;
-		} ldw;
-#if !defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-	} bits;
-} rx_training_vect_t, *p_rx_training_vect_t;
-
-#define	RXCTL_IPP_EOP_ERR_MASK	0x0000000FULL
-#define	RXCTL_IPP_EOP_ERR_SHIFT	0x0
-#define	RXCTL_ZCP_EOP_ERR_MASK	0x000000F0ULL
-#define	RXCTL_ZCP_EOP_ERR_SHIFT	0x4
-#define	RXCTL_ID_MISMATCH_MASK	0x00000100ULL
-#define	RXCTL_ID_MISMATCH_SHIFT	0x8
-
-
-/*
- * Receive Packet Header Format
- * Packet header before the packet.
- * The minimum is 2 bytes and the max size is 18 bytes.
- */
-/*
- * Packet header format 0 (2 bytes).
- */
-typedef union _rx_pkt_hdr0_t {
-	uint16_t value;
-	struct {
-#if	defined(_BIT_FIELDS_HTOL)
-		uint16_t inputport:2;
-		uint16_t maccheck:1;
-		uint16_t class:5;
-		uint16_t vlan:1;
-		uint16_t llcsnap:1;
-		uint16_t noport:1;
-		uint16_t badip:1;
-		uint16_t tcamhit:1;
-		uint16_t tres:2;
-		uint16_t tzfvld:1;
-#elif defined(_BIT_FIELDS_LTOH)
-		uint16_t tzfvld:1;
-		uint16_t tres:2;
-		uint16_t tcamhit:1;
-		uint16_t badip:1;
-		uint16_t noport:1;
-		uint16_t llcsnap:1;
-		uint16_t vlan:1;
-		uint16_t class:5;
-		uint16_t maccheck:1;
-		uint16_t inputport:2;
-#endif
-	} bits;
-} rx_pkt_hdr0_t, *p_rx_pkt_hdr0_t;
-
-
-/*
- * Packet header format 1.
- */
-typedef union _rx_pkt_hdr1_b0_t {
-	uint8_t value;
-	struct  {
-#if	defined(_BIT_FIELDS_HTOL)
-		uint8_t hwrsvd:8;
-#elif defined(_BIT_FIELDS_LTOH)
-		uint8_t hwrsvd:8;
-#endif
-	} bits;
-} rx_pkt_hdr1_b0_t, *p_rx_pkt_hdr1_b0_t;
-
-typedef union _rx_pkt_hdr1_b1_t {
-	uint8_t value;
-	struct  {
-#if	defined(_BIT_FIELDS_HTOL)
-		uint8_t tcammatch:8;
-#elif defined(_BIT_FIELDS_LTOH)
-		uint8_t tcammatch:8;
-#endif
-	} bits;
-} rx_pkt_hdr1_b1_t, *p_rx_pkt_hdr1_b1_t;
-
-typedef union _rx_pkt_hdr1_b2_t {
-	uint8_t value;
-	struct  {
-#if	defined(_BIT_FIELDS_HTOL)
-		uint8_t resv:2;
-		uint8_t hashhit:1;
-		uint8_t exact:1;
-		uint8_t hzfvld:1;
-		uint8_t hashidx:3;
-#elif defined(_BIT_FIELDS_LTOH)
-		uint8_t hashidx:3;
-		uint8_t hzfvld:1;
-		uint8_t exact:1;
-		uint8_t hashhit:1;
-		uint8_t resv:2;
-#endif
-	} bits;
-} rx_pkt_hdr1_b2_t, *p_rx_pkt_hdr1_b2_t;
-
-typedef union _rx_pkt_hdr1_b3_t {
-	uint8_t value;
-	struct  {
-#if	defined(_BIT_FIELDS_HTOL)
-		uint8_t zc_resv:8;
-#elif defined(_BIT_FIELDS_LTOH)
-		uint8_t zc_resv:8;
-#endif
-	} bits;
-} rx_pkt_hdr1_b3_t, *p_rx_pkt_hdr1_b3_t;
-
-typedef union _rx_pkt_hdr1_b4_t {
-	uint8_t value;
-	struct  {
-#if	defined(_BIT_FIELDS_HTOL)
-		uint8_t resv:4;
-		uint8_t zflowid:4;
-#elif defined(_BIT_FIELDS_LTOH)
-		uint8_t zflowid:4;
-		uint8_t resv:4;
-#endif
-	} bits;
-} rx_pkt_hdr1_b4_t, *p_rx_pkt_hdr1_b4_t;
-
-typedef union _rx_pkt_hdr1_b5_t {
-	uint8_t value;
-	struct  {
-#if	defined(_BIT_FIELDS_HTOL)
-		uint8_t zflowid:8;
-#elif defined(_BIT_FIELDS_LTOH)
-		uint8_t zflowid:8;
-#endif
-	} bits;
-} rx_pkt_hdr1_b5_t, *p_rx_pkt_hdr1_b5_t;
-
-typedef union _rx_pkt_hdr1_b6_t {
-	uint8_t value;
-	struct  {
-#if	defined(_BIT_FIELDS_HTOL)
-		uint8_t hashval2:8;
-#elif defined(_BIT_FIELDS_LTOH)
-		uint8_t hashval2:8;
-#endif
-	} bits;
-} rx_pkt_hdr1_b6_t, *p_rx_pkt_hdr1_b6_t;
-
-typedef union _rx_pkt_hdr1_b7_t {
-	uint8_t value;
-	struct  {
-#if	defined(_BIT_FIELDS_HTOL)
-		uint8_t hashval2:8;
-#elif defined(_BIT_FIELDS_LTOH)
-		uint8_t hashval2:8;
-#endif
-	} bits;
-} rx_pkt_hdr1_b7_t, *p_rx_pkt_hdr1_b7_t;
-
-typedef union _rx_pkt_hdr1_b8_t {
-	uint8_t value;
-	struct  {
-#if defined(_BIT_FIELDS_HTOL)
-		uint8_t resv:4;
-		uint8_t h1:4;
-#elif defined(_BIT_FIELDS_LTOH)
-		uint8_t h1:4;
-		uint8_t resv:4;
-#endif
-	} bits;
-} rx_pkt_hdr1_b8_t, *p_rx_pkt_hdr1_b8_t;
-
-typedef union _rx_pkt_hdr1_b9_t {
-	uint8_t value;
-	struct  {
-#if defined(_BIT_FIELDS_HTOL)
-		uint8_t h1:8;
-#elif defined(_BIT_FIELDS_LTOH)
-		uint8_t h1:8;
-#endif
-	} bits;
-} rx_pkt_hdr1_b9_t, *p_rx_pkt_hdr1_b9_t;
-
-typedef union _rx_pkt_hdr1_b10_t {
-	uint8_t value;
-	struct  {
-#if defined(_BIT_FIELDS_HTOL)
-		uint8_t resv:4;
-		uint8_t h1:4;
-#elif defined(_BIT_FIELDS_LTOH)
-		uint8_t h1:4;
-		uint8_t resv:4;
-#endif
-	} bits;
-} rx_pkt_hdr1_b10_t, *p_rx_pkt_hdr1_b10_t;
-
-typedef union _rx_pkt_hdr1_b11_b12_t {
-	uint16_t value;
-	struct {
-#if	defined(_BIT_FIELDS_HTOL)
-		uint16_t h1_1:8;
-		uint16_t h1_2:8;
-#elif defined(_BIT_FIELDS_LTOH)
-		uint16_t h1_2:8;
-		uint16_t h1_1:8;
-#endif
-	} bits;
-} rx_pkt_hdr1_b11_b12_t, *p_rx_pkt_hdr1_b11_b12_t;
-
-typedef union _rx_pkt_hdr1_b13_t {
-	uint8_t value;
-	struct  {
-#if defined(_BIT_FIELDS_HTOL)
-		uint8_t usr_data:8;
-#elif defined(_BIT_FIELDS_LTOH)
-		uint8_t usr_data:8;
-#endif
-	} bits;
-} rx_pkt_hdr1_b13_t, *p_rx_pkt_hdr1_b13_t;
-
-typedef union _rx_pkt_hdr1_b14_b17_t {
-	uint32_t value;
-	struct  {
-#if defined(_BIT_FIELDS_HTOL)
-		uint32_t usr_data_1:8;
-		uint32_t usr_data_2:8;
-		uint32_t usr_data_3:8;
-		uint32_t usr_data_4:8;
-#elif defined(_BIT_FIELDS_LTOH)
-		uint32_t usr_data_4:8;
-		uint32_t usr_data_3:8;
-		uint32_t usr_data_2:8;
-		uint32_t usr_data_1:8;
-#endif
-	} bits;
-} rx_pkt_hdr1_b14_b17_t, *p_rx_pkt_hdr1_b14_b17_t;
-
-/* Receive packet header 1 format (18 bytes) */
-typedef struct _rx_pkt_hdr_t {
-	rx_pkt_hdr1_b0_t		rx_hdr1_b0;
-	rx_pkt_hdr1_b1_t		rx_hdr1_b1;
-	rx_pkt_hdr1_b2_t		rx_hdr1_b2;
-	rx_pkt_hdr1_b3_t		rx_hdr1_b3;
-	rx_pkt_hdr1_b4_t		rx_hdr1_b4;
-	rx_pkt_hdr1_b5_t		rx_hdr1_b5;
-	rx_pkt_hdr1_b6_t		rx_hdr1_b6;
-	rx_pkt_hdr1_b7_t		rx_hdr1_b7;
-	rx_pkt_hdr1_b8_t		rx_hdr1_b8;
-	rx_pkt_hdr1_b9_t		rx_hdr1_b9;
-	rx_pkt_hdr1_b10_t		rx_hdr1_b10;
-	rx_pkt_hdr1_b11_b12_t		rx_hdr1_b11_b12;
-	rx_pkt_hdr1_b13_t		rx_hdr1_b13;
-	rx_pkt_hdr1_b14_b17_t		rx_hdr1_b14_b17;
-} rx_pkt_hdr1_t, *p_rx_pkt_hdr1_t;
-
-
-#ifdef	__cplusplus
-}
-#endif
-
-#endif	/* _SYS_NXGE_NXGE_RXDMA_HW_H */
--- a/usr/src/uts/sun4v/sys/nxge/nxge_sr_hw.h	Mon Mar 19 18:02:35 2007 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,793 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License (the "License").
- * You may not use this file except in compliance with the License.
- *
- * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
- * or http://www.opensolaris.org/os/licensing.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information: Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- */
-/*
- * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
- * Use is subject to license terms.
- */
-
-#ifndef	_SYS_NXGE_NXGE_SR_HW_H
-#define	_SYS_NXGE_NXGE_SR_HW_H
-
-#pragma ident	"%Z%%M%	%I%	%E% SMI"
-
-#ifdef	__cplusplus
-extern "C" {
-#endif
-
-#define	ESR_NEPTUNE_DEV_ADDR	0x1E
-#define	ESR_NEPTUNE_BASE	0
-#define	ESR_PORT_ADDR_BASE	0
-#define	PCISR_DEV_ADDR		0x1E
-#define	PCISR_BASE		0
-#define	PCISR_PORT_ADDR_BASE	2
-
-#define	PB	0
-
-#define	SR_RX_TX_COMMON_CONTROL	PB + 0x000
-#define	SR_RX_TX_RESET_CONTROL	PB + 0x004
-#define	SR_RX_POWER_CONTROL	PB + 0x008
-#define	SR_TX_POWER_CONTROL	PB + 0x00C
-#define	SR_MISC_POWER_CONTROL	PB + 0x010
-#define	SR_RX_TX_CONTROL_A	PB + 0x100
-#define	SR_RX_TX_TUNING_A	PB + 0x104
-#define	SR_RX_SYNCCHAR_A	PB + 0x108
-#define	SR_RX_TX_TEST_A		PB + 0x10C
-#define	SR_GLUE_CONTROL0_A	PB + 0x110
-#define	SR_GLUE_CONTROL1_A	PB + 0x114
-#define	SR_RX_TX_CONTROL_B	PB + 0x120
-#define	SR_RX_TX_TUNING_B	PB + 0x124
-#define	SR_RX_SYNCCHAR_B	PB + 0x128
-#define	SR_RX_TX_TEST_B		PB + 0x12C
-#define	SR_GLUE_CONTROL0_B	PB + 0x130
-#define	SR_GLUE_CONTROL1_B	PB + 0x134
-#define	SR_RX_TX_CONTROL_C	PB + 0x140
-#define	SR_RX_TX_TUNING_C	PB + 0x144
-#define	SR_RX_SYNCCHAR_C	PB + 0x148
-#define	SR_RX_TX_TEST_C		PB + 0x14C
-#define	SR_GLUE_CONTROL0_C	PB + 0x150
-#define	SR_GLUE_CONTROL1_C	PB + 0x154
-#define	SR_RX_TX_CONTROL_D	PB + 0x160
-#define	SR_RX_TX_TUNING_D	PB + 0x164
-#define	SR_RX_SYNCCHAR_D	PB + 0x168
-#define	SR_RX_TX_TEST_D		PB + 0x16C
-#define	SR_GLUE_CONTROL0_D	PB + 0x170
-#define	SR_GLUE_CONTROL1_D	PB + 0x174
-#define	SR_RX_TX_TUNING_1_A	PB + 0x184
-#define	SR_RX_TX_TUNING_1_B	PB + 0x1A4
-#define	SR_RX_TX_TUNING_1_C	PB + 0x1C4
-#define	SR_RX_TX_TUNING_1_D	PB + 0x1E4
-#define	SR_RX_TX_TUNING_2_A	PB + 0x204
-#define	SR_RX_TX_TUNING_2_B	PB + 0x224
-#define	SR_RX_TX_TUNING_2_C	PB + 0x244
-#define	SR_RX_TX_TUNING_2_D	PB + 0x264
-#define	SR_RX_TX_TUNING_3_A	PB + 0x284
-#define	SR_RX_TX_TUNING_3_B	PB + 0x2A4
-#define	SR_RX_TX_TUNING_3_C	PB + 0x2C4
-#define	SR_RX_TX_TUNING_3_D	PB + 0x2E4
-
-/*
- * Shift right by 1 because the PRM requires that all the serdes register
- * address be divided by 2
- */
-#define	ESR_NEP_RX_TX_COMMON_CONTROL_L_ADDR()	(ESR_NEPTUNE_BASE +\
-						(SR_RX_TX_COMMON_CONTROL >> 1))
-#define	ESR_NEP_RX_TX_COMMON_CONTROL_H_ADDR()	(ESR_NEPTUNE_BASE +\
-						(SR_RX_TX_COMMON_CONTROL >> 1)\
-						+ 1)
-#define	ESR_NEP_RX_TX_RESET_CONTROL_L_ADDR()	(ESR_NEPTUNE_BASE +\
-						(SR_RX_TX_RESET_CONTROL >> 1))
-#define	ESR_NEP_RX_TX_RESET_CONTROL_H_ADDR()	(ESR_NEPTUNE_BASE +\
-						(SR_RX_TX_RESET_CONTROL >> 1)\
-						+ 1)
-#define	ESR_NEP_RX_POWER_CONTROL_L_ADDR()	(ESR_NEPTUNE_BASE +\
-						(SR_RX_POWER_CONTROL >> 1))
-#define	ESR_NEP_RX_POWER_CONTROL_H_ADDR()	(ESR_NEPTUNE_BASE +\
-						(SR_RX_POWER_CONTROL >> 1) + 1)
-#define	ESR_NEP_TX_POWER_CONTROL_L_ADDR()	(ESR_NEPTUNE_BASE +\
-						(SR_TX_POWER_CONTROL >> 1))
-#define	ESR_NEP_TX_POWER_CONTROL_H_ADDR()	(ESR_NEPTUNE_BASE +\
-						(SR_TX_POWER_CONTROL >> 1) + 1)
-#define	ESR_NEP_MISC_POWER_CONTROL_L_ADDR()	(ESR_NEPTUNE_BASE +\
-						(SR_MISC_POWER_CONTROL >> 1))
-#define	ESR_NEP_MISC_POWER_CONTROL_H_ADDR()	(ESR_NEPTUNE_BASE +\
-						(SR_MISC_POWER_CONTROL >> 1)\
-						+ 1)
-#define	ESR_NEP_RX_TX_CONTROL_L_ADDR(chan)	((ESR_NEPTUNE_BASE +\
-						SR_RX_TX_CONTROL_A +\
-						(chan * 0x20)) >> 1)
-#define	ESR_NEP_RX_TX_CONTROL_H_ADDR(chan)	((ESR_NEPTUNE_BASE +\
-						SR_RX_TX_CONTROL_A +\
-						(chan * 0x20)) >> 1) + 1
-#define	ESR_NEP_RX_TX_TUNING_L_ADDR(chan)	((ESR_NEPTUNE_BASE +\
-						SR_RX_TX_TUNING_A +\
-						(chan * 0x20)) >> 1)
-#define	ESR_NEP_RX_TX_TUNING_H_ADDR(chan)	((ESR_NEPTUNE_BASE +\
-						SR_RX_TX_TUNING_A +\
-						(chan * 0x20)) >> 1) + 1
-#define	ESR_NEP_RX_TX_SYNCCHAR_L_ADDR(chan)	((ESR_NEPTUNE_BASE +\
-						SR_RX_SYNCCHAR_A +\
-						(chan * 0x20)) >> 1)
-#define	ESR_NEP_RX_TX_SYNCCHAR_H_ADDR(chan)	((ESR_NEPTUNE_BASE +\
-						SR_RX_SYNCCHAR_A +\
-						(chan * 0x20)) >> 1) + 1
-#define	ESR_NEP_RX_TX_TEST_L_ADDR(chan)		((ESR_NEPTUNE_BASE +\
-						SR_RX_TX_TEST_A +\
-						(chan * 0x20)) >> 1)
-#define	ESR_NEP_RX_TX_TEST_H_ADDR(chan)		((ESR_NEPTUNE_BASE +\
-						SR_RX_TX_TEST_A +\
-						(chan * 0x20)) >> 1) + 1
-#define	ESR_NEP_GLUE_CONTROL0_L_ADDR(chan)	((ESR_NEPTUNE_BASE +\
-						SR_GLUE_CONTROL0_A +\
-						(chan * 0x20)) >> 1)
-#define	ESR_NEP_GLUE_CONTROL0_H_ADDR(chan)	((ESR_NEPTUNE_BASE +\
-						SR_GLUE_CONTROL0_A +\
-						(chan * 0x20)) >> 1) + 1
-#define	ESR_NEP_GLUE_CONTROL1_L_ADDR(chan)	((ESR_NEPTUNE_BASE +\
-						SR_GLUE_CONTROL1_A +\
-						(chan * 0x20)) >> 1)
-#define	ESR_NEP_GLUE_CONTROL1_H_ADDR(chan)	((ESR_NEPTUNE_BASE +\
-						SR_GLUE_CONTROL1_A +\
-						(chan * 0x20)) >> 1) + 1
-#define	ESR_NEP_RX_TX_TUNING_1_L_ADDR(chan)	((ESR_NEPTUNE_BASE +\
-						SR_RX_TX_TUNING_1_A +\
-						(chan * 0x20)) >> 1)
-#define	ESR_NEP_RX_TX_TUNING_1_H_ADDR(chan)	((ESR_NEPTUNE_BASE +\
-						SR_RX_TX_TUNING_1_A +\
-						(chan * 0x20)) >> 1) + 1
-#define	ESR_NEP_RX_TX_TUNING_2_L_ADDR(chan)	((ESR_NEPTUNE_BASE +\
-						SR_RX_TX_TUNING_2_A +\
-						(chan * 0x20)) >> 1)
-#define	ESR_NEP_RX_TX_TUNING_2_H_ADDR(chan)	((ESR_NEPTUNE_BASE +\
-						SR_RX_TX_TUNING_2_A +\
-						(chan * 0x20)) >> 1) + 1
-#define	ESR_NEP_RX_TX_TUNING_3_L_ADDR(chan)	((ESR_NEPTUNE_BASE +\
-						SR_RX_TX_TUNING_3_A +\
-						(chan * 0x20)) >> 1)
-#define	ESR_NEP_RX_TX_TUNING_3_H_ADDR(chan)	((ESR_NEPTUNE_BASE +\
-						SR_RX_TX_TUNING_3_A +\
-						(chan * 0x20)) >> 1) + 1
-
-typedef	union _sr_rx_tx_common_ctrl_l {
-	uint16_t value;
-	struct {
-#if defined(_BIT_FIELDS_HTOL)
-		uint16_t res3		: 3;
-		uint16_t refclkr_freq	: 5;
-		uint16_t res4		: 8;
-#elif defined(_BIT_FIELDS_LTOH)
-		uint16_t res4		: 8;
-		uint16_t refclkr_freq	: 5;
-		uint16_t res3		: 3;
-#else
-#error one of _BIT_FIELDS_HTOL or _BIT_FIELDS_LTOH must be defined
-#endif
-	} bits;
-} sr_rx_tx_common_ctrl_l;
-
-typedef	union _sr_rx_tx_common_ctrl_h {
-	uint16_t value;
-	struct {
-#if defined(_BIT_FIELDS_HTOL)
-		uint16_t res1		: 5;
-		uint16_t tdmaster	: 3;
-		uint16_t tp		: 2;
-		uint16_t tz		: 2;
-		uint16_t res2		: 2;
-		uint16_t revlbrefsel	: 2;
-#elif defined(_BIT_FIELDS_LTOH)
-		uint16_t revlbrefsel	: 2;
-		uint16_t res2		: 2;
-		uint16_t tz		: 2;
-		uint16_t tp		: 2;
-		uint16_t tdmaster	: 3;
-		uint16_t res1		: 5;
-#else
-#error one of _BIT_FIELDS_HTOL or _BIT_FIELDS_LTOH must be defined
-#endif
-	} bits;
-} sr_rx_tx_common_ctrl_h;
-
-
-/* RX TX Common Control Register field values */
-
-#define	TDMASTER_LANE_A		0
-#define	TDMASTER_LANE_B		1
-#define	TDMASTER_LANE_C		2
-#define	TDMASTER_LANE_D		3
-
-#define	REVLBREFSEL_GBT_RBC_A_O		0
-#define	REVLBREFSEL_GBT_RBC_B_O		1
-#define	REVLBREFSEL_GBT_RBC_C_O		2
-#define	REVLBREFSEL_GBT_RBC_D_O		3
-
-#define	REFCLKR_FREQ_SIM		0
-#define	REFCLKR_FREQ_53_125		0x1
-#define	REFCLKR_FREQ_62_5		0x3
-#define	REFCLKR_FREQ_70_83		0x4
-#define	REFCLKR_FREQ_75			0x5
-#define	REFCLKR_FREQ_78_125		0x6
-#define	REFCLKR_FREQ_79_6875		0x7
-#define	REFCLKR_FREQ_83_33		0x8
-#define	REFCLKR_FREQ_85			0x9
-#define	REFCLKR_FREQ_100		0xA
-#define	REFCLKR_FREQ_104_17		0xB
-#define	REFCLKR_FREQ_106_25		0xC
-#define	REFCLKR_FREQ_120		0xF
-#define	REFCLKR_FREQ_125		0x10
-#define	REFCLKR_FREQ_127_5		0x11
-#define	REFCLKR_FREQ_141_67		0x13
-#define	REFCLKR_FREQ_150		0x15
-#define	REFCLKR_FREQ_156_25		0x16
-#define	REFCLKR_FREQ_159_375		0x17
-#define	REFCLKR_FREQ_170		0x19
-#define	REFCLKR_FREQ_212_5		0x1E
-
-typedef	union _sr_rx_tx_reset_ctrl_l {
-	uint16_t value;
-	struct {
-#if defined(_BIT_FIELDS_HTOL)
-		uint16_t rxreset_0a	: 1;
-		uint16_t rxreset_0b	: 1;
-		uint16_t rxreset_0c	: 1;
-		uint16_t rxreset_0d	: 1;
-		uint16_t rxreset_1a	: 1;
-		uint16_t rxreset_1b	: 1;
-		uint16_t rxreset_1c	: 1;
-		uint16_t rxreset_1d	: 1;
-		uint16_t rxreset_2a	: 1;
-		uint16_t rxreset_2b	: 1;
-		uint16_t rxreset_2c	: 1;
-		uint16_t rxreset_2d	: 1;
-		uint16_t rxreset_3a	: 1;
-		uint16_t rxreset_3b	: 1;
-		uint16_t rxreset_3c	: 1;
-		uint16_t rxreset_3d	: 1;
-#elif defined(_BIT_FIELDS_LTOH)
-		uint16_t rxreset_3d	: 1;
-		uint16_t rxreset_3c	: 1;
-		uint16_t rxreset_3b	: 1;
-		uint16_t rxreset_3a	: 1;
-		uint16_t rxreset_2d	: 1;
-		uint16_t rxreset_2c	: 1;
-		uint16_t rxreset_2b	: 1;
-		uint16_t rxreset_2a	: 1;
-		uint16_t rxreset_1d	: 1;
-		uint16_t rxreset_1c	: 1;
-		uint16_t rxreset_1b	: 1;
-		uint16_t rxreset_1a	: 1;
-		uint16_t rxreset_0d	: 1;
-		uint16_t rxreset_0c	: 1;
-		uint16_t rxreset_0b	: 1;
-		uint16_t rxreset_0a	: 1;
-#else
-#error one of _BIT_FIELDS_HTOL or _BIT_FIELDS_LTOH must be defined
-#endif
-	} bits;
-} sr_rx_tx_reset_ctrl_l;
-
-
-typedef	union _sr_rx_tx_reset_ctrl_h {
-	uint16_t value;
-	struct {
-#if defined(_BIT_FIELDS_HTOL)
-		uint16_t txreset_0a	: 1;
-		uint16_t txreset_0b	: 1;
-		uint16_t txreset_0c	: 1;
-		uint16_t txreset_0d	: 1;
-		uint16_t txreset_1a	: 1;
-		uint16_t txreset_1b	: 1;
-		uint16_t txreset_1c	: 1;
-		uint16_t txreset_1d	: 1;
-		uint16_t txreset_2a	: 1;
-		uint16_t txreset_2b	: 1;
-		uint16_t txreset_2c	: 1;
-		uint16_t txreset_2d	: 1;
-		uint16_t txreset_3a	: 1;
-		uint16_t txreset_3b	: 1;
-		uint16_t txreset_3c	: 1;
-		uint16_t txreset_3d	: 1;
-#elif defined(_BIT_FIELDS_LTOH)
-		uint16_t txreset_3d	: 1;
-		uint16_t txreset_3c	: 1;
-		uint16_t txreset_3b	: 1;
-		uint16_t txreset_3a	: 1;
-		uint16_t txreset_2d	: 1;
-		uint16_t txreset_2c	: 1;
-		uint16_t txreset_2b	: 1;
-		uint16_t txreset_2a	: 1;
-		uint16_t txreset_1d	: 1;
-		uint16_t txreset_1c	: 1;
-		uint16_t txreset_1b	: 1;
-		uint16_t txreset_1a	: 1;
-		uint16_t txreset_0d	: 1;
-		uint16_t txreset_0c	: 1;
-		uint16_t txreset_0b	: 1;
-		uint16_t txreset_0a	: 1;
-#else
-#error one of _BIT_FIELDS_HTOL or _BIT_FIELDS_LTOH must be defined
-#endif
-	} bits;
-} sr_rx_tx_reset_ctrl_h;
-
-typedef	union _sr_rx_power_ctrl_l {
-	uint16_t value;
-	struct {
-#if defined(_BIT_FIELDS_HTOL)
-		uint16_t pdrxlos_0a	: 1;
-		uint16_t pdrxlos_0b	: 1;
-		uint16_t pdrxlos_0c	: 1;
-		uint16_t pdrxlos_0d	: 1;
-		uint16_t pdrxlos_1a	: 1;
-		uint16_t pdrxlos_1b	: 1;
-		uint16_t pdrxlos_1c	: 1;
-		uint16_t pdrxlos_1d	: 1;
-		uint16_t pdrxlos_2a	: 1;
-		uint16_t pdrxlos_2b	: 1;
-		uint16_t pdrxlos_2c	: 1;
-		uint16_t pdrxlos_2d	: 1;
-		uint16_t pdrxlos_3a	: 1;
-		uint16_t pdrxlos_3b	: 1;
-		uint16_t pdrxlos_3c	: 1;
-		uint16_t pdrxlos_3d	: 1;
-#elif defined(_BIT_FIELDS_LTOH)
-		uint16_t pdrxlos_3d	: 1;
-		uint16_t pdrxlos_3c	: 1;
-		uint16_t pdrxlos_3b	: 1;
-		uint16_t pdrxlos_3a	: 1;
-		uint16_t pdrxlos_2d	: 1;
-		uint16_t pdrxlos_2c	: 1;
-		uint16_t pdrxlos_2b	: 1;
-		uint16_t pdrxlos_2a	: 1;
-		uint16_t pdrxlos_1d	: 1;
-		uint16_t pdrxlos_1c	: 1;
-		uint16_t pdrxlos_1b	: 1;
-		uint16_t pdrxlos_1a	: 1;
-		uint16_t pdrxlos_0d	: 1;
-		uint16_t pdrxlos_0c	: 1;
-		uint16_t pdrxlos_0b	: 1;
-		uint16_t pdrxlos_0a	: 1;
-#else
-#error one of _BIT_FIELDS_HTOL or _BIT_FIELDS_LTOH must be defined
-#endif
-	} bits;
-} sr_rx_power_ctrl_l_t;
-
-
-typedef	union _sr_rx_power_ctrl_h {
-	uint16_t value;
-	struct {
-#if defined(_BIT_FIELDS_HTOL)
-		uint16_t pdownr_0a	: 1;
-		uint16_t pdownr_0b	: 1;
-		uint16_t pdownr_0c	: 1;
-		uint16_t pdownr_0d	: 1;
-		uint16_t pdownr_1a	: 1;
-		uint16_t pdownr_1b	: 1;
-		uint16_t pdownr_1c	: 1;
-		uint16_t pdownr_1d	: 1;
-		uint16_t pdownr_2a	: 1;
-		uint16_t pdownr_2b	: 1;
-		uint16_t pdownr_2c	: 1;
-		uint16_t pdownr_2d	: 1;
-		uint16_t pdownr_3a	: 1;
-		uint16_t pdownr_3b	: 1;
-		uint16_t pdownr_3c	: 1;
-		uint16_t pdownr_3d	: 1;
-#elif defined(_BIT_FIELDS_LTOH)
-		uint16_t pdownr_3d	: 1;
-		uint16_t pdownr_3c	: 1;
-		uint16_t pdownr_3b	: 1;
-		uint16_t pdownr_3a	: 1;
-		uint16_t pdownr_2d	: 1;
-		uint16_t pdownr_2c	: 1;
-		uint16_t pdownr_2b	: 1;
-		uint16_t pdownr_2a	: 1;
-		uint16_t pdownr_1d	: 1;
-		uint16_t pdownr_1c	: 1;
-		uint16_t pdownr_1b	: 1;
-		uint16_t pdownr_1a	: 1;
-		uint16_t pdownr_0d	: 1;
-		uint16_t pdownr_0c	: 1;
-		uint16_t pdownr_0b	: 1;
-		uint16_t pdownr_0a	: 1;
-#else
-#error one of _BIT_FIELDS_HTOL or _BIT_FIELDS_LTOH must be defined
-#endif
-	} bits;
-} sr_rx_power_ctrl_h_t;
-
-typedef	union _sr_tx_power_ctrl_l {
-	uint16_t value;
-	struct {
-#if defined(_BIT_FIELDS_HTOL)
-		uint16_t res1		: 8;
-		uint16_t pdownppll0	: 1;
-		uint16_t pdownppll1	: 1;
-		uint16_t pdownppll2	: 1;
-		uint16_t pdownppll3	: 1;
-		uint16_t res2		: 4;
-#elif defined(_BIT_FIELDS_LTOH)
-		uint16_t res2		: 4;
-		uint16_t pdownppll3	: 1;
-		uint16_t pdownppll2	: 1;
-		uint16_t pdownppll1	: 1;
-		uint16_t pdownppll0	: 1;
-		uint16_t res1		: 8;
-#else
-#error one of _BIT_FIELDS_HTOL or _BIT_FIELDS_LTOH must be defined
-#endif
-	} bits;
-} sr_tx_power_ctrl_l_t;
-
-typedef	union _sr_tx_power_ctrl_h {
-	uint16_t value;
-	struct {
-#if defined(_BIT_FIELDS_HTOL)
-		uint16_t pdownt_0a	: 1;
-		uint16_t pdownt_0b	: 1;
-		uint16_t pdownt_0c	: 1;
-		uint16_t pdownt_0d	: 1;
-		uint16_t pdownt_1a	: 1;
-		uint16_t pdownt_1b	: 1;
-		uint16_t pdownt_1c	: 1;
-		uint16_t pdownt_1d	: 1;
-		uint16_t pdownt_2a	: 1;
-		uint16_t pdownt_2b	: 1;
-		uint16_t pdownt_2c	: 1;
-		uint16_t pdownt_2d	: 1;
-		uint16_t pdownt_3a	: 1;
-		uint16_t pdownt_3b	: 1;
-		uint16_t pdownt_3c	: 1;
-		uint16_t pdownt_3d	: 1;
-#elif defined(_BIT_FIELDS_LTOH)
-		uint16_t pdownt_3d	: 1;
-		uint16_t pdownt_3c	: 1;
-		uint16_t pdownt_3b	: 1;
-		uint16_t pdownt_3a	: 1;
-		uint16_t pdownt_2d	: 1;
-		uint16_t pdownt_2c	: 1;
-		uint16_t pdownt_2b	: 1;
-		uint16_t pdownt_2a	: 1;
-		uint16_t pdownt_1d	: 1;
-		uint16_t pdownt_1c	: 1;
-		uint16_t pdownt_1b	: 1;
-		uint16_t pdownt_1a	: 1;
-		uint16_t pdownt_0d	: 1;
-		uint16_t pdownt_0c	: 1;
-		uint16_t pdownt_0b	: 1;
-		uint16_t pdownt_0a	: 1;
-#else
-#error one of _BIT_FIELDS_HTOL or _BIT_FIELDS_LTOH must be defined
-#endif
-	} bits;
-} sr_tx_power_ctrl_h_t;
-
-typedef	union _sr_misc_power_ctrl_l {
-	uint16_t value;
-	struct {
-#if defined(_BIT_FIELDS_HTOL)
-		uint16_t res1		: 3;
-		uint16_t pdrtrim	: 1;
-		uint16_t pdownpecl0	: 1;
-		uint16_t pdownpecl1	: 1;
-		uint16_t pdownpecl2	: 1;
-		uint16_t pdownpecl3	: 1;
-		uint16_t pdownppll0	: 1;
-		uint16_t pdownppll1	: 1;
-		uint16_t pdownppll2	: 1;
-		uint16_t pdownppll3	: 1;
-		uint16_t res2		: 4;
-#elif defined(_BIT_FIELDS_LTOH)
-		uint16_t res2		: 4;
-		uint16_t pdownppll3	: 1;
-		uint16_t pdownppll2	: 1;
-		uint16_t pdownppll1	: 1;
-		uint16_t pdownppll0	: 1;
-		uint16_t pdownpecl3	: 1;
-		uint16_t pdownpecl2	: 1;
-		uint16_t pdownpecl1	: 1;
-		uint16_t pdownpecl0	: 1;
-		uint16_t pdrtrim	: 1;
-		uint16_t res1		: 3;
-#else
-#error one of _BIT_FIELDS_HTOL or _BIT_FIELDS_LTOH must be defined
-#endif
-	} bits;
-} sr_misc_power_ctrl_l_t;
-
-typedef	union _misc_power_ctrl_h {
-	uint16_t value;
-	struct {
-#if defined(_BIT_FIELDS_HTOL)
-		uint16_t pdclkout0	: 1;
-		uint16_t pdclkout1	: 1;
-		uint16_t pdclkout2	: 1;
-		uint16_t pdclkout3	: 1;
-		uint16_t res1		: 12;
-#elif defined(_BIT_FIELDS_LTOH)
-		uint16_t res1		: 12;
-		uint16_t pdclkout3	: 1;
-		uint16_t pdclkout2	: 1;
-		uint16_t pdclkout1	: 1;
-		uint16_t pdclkout0	: 1;
-#else
-#error one of _BIT_FIELDS_HTOL or _BIT_FIELDS_LTOH must be defined
-#endif
-	} bits;
-} misc_power_ctrl_h_t;
-
-typedef	union _sr_rx_tx_ctrl_l {
-	uint16_t value;
-	struct {
-#if defined(_BIT_FIELDS_HTOL)
-		uint16_t res1		: 2;
-		uint16_t rxpreswin	: 2;
-		uint16_t res2		: 1;
-		uint16_t risefall	: 3;
-		uint16_t res3		: 7;
-		uint16_t enstretch	: 1;
-#elif defined(_BIT_FIELDS_LTOH)
-		uint16_t enstretch	: 1;
-		uint16_t res3		: 7;
-		uint16_t risefall	: 3;
-		uint16_t res2		: 1;
-		uint16_t rxpreswin	: 2;
-		uint16_t res1		: 2;
-#else
-#error one of _BIT_FIELDS_HTOL or _BIT_FIELDS_LTOH must be defined
-#endif
-	} bits;
-} sr_rx_tx_ctrl_l_t;
-
-typedef	union _sr_rx_tx_ctrl_h {
-	uint16_t value;
-	struct {
-#if defined(_BIT_FIELDS_HTOL)
-		uint16_t biascntl	: 1;
-		uint16_t res1		: 5;
-		uint16_t tdenfifo	: 1;
-		uint16_t tdws20		: 1;
-		uint16_t vmuxlo		: 2;
-		uint16_t vpulselo	: 2;
-		uint16_t res2		: 4;
-#elif defined(_BIT_FIELDS_LTOH)
-		uint16_t res2		: 4;
-		uint16_t vpulselo	: 2;
-		uint16_t vmuxlo		: 2;
-		uint16_t tdws20		: 1;
-		uint16_t tdenfifo	: 1;
-		uint16_t res1		: 5;
-		uint16_t biascntl	: 1;
-#else
-#error one of _BIT_FIELDS_HTOL or _BIT_FIELDS_LTOH must be defined
-#endif
-	} bits;
-} sr_rx_tx_ctrl_h_t;
-
-#define	RXPRESWIN_52US_300BITTIMES	0
-#define	RXPRESWIN_53US_300BITTIMES	1
-#define	RXPRESWIN_54US_300BITTIMES	2
-#define	RXPRESWIN_55US_300BITTIMES	3
-
-typedef	union _sr_rx_tx_tuning_l {
-	uint16_t value;
-	struct {
-#if defined(_BIT_FIELDS_HTOL)
-		uint16_t rxeq		: 4;
-		uint16_t res1		: 12;
-#elif defined(_BIT_FIELDS_LTOH)
-		uint16_t res1		: 12;
-		uint16_t rxeq		: 4;
-#else
-#error one of _BIT_FIELDS_HTOL or _BIT_FIELDS_LTOH must be defined
-#endif
-	} bits;
-} sr_rx_tx_tuning_l_t;
-
-typedef	union _sr_rx_tx_tuning_h {
-	uint16_t value;
-	struct {
-#if defined(_BIT_FIELDS_HTOL)
-		uint16_t res1		: 8;
-		uint16_t rp		: 2;
-		uint16_t rz		: 2;
-		uint16_t vtxlo		: 4;
-#elif defined(_BIT_FIELDS_LTOH)
-		uint16_t vtxlo		: 4;
-		uint16_t rz		: 2;
-		uint16_t rp		: 2;
-		uint16_t res1		: 8;
-#else
-#error one of _BIT_FIELDS_HTOL or _BIT_FIELDS_LTOH must be defined
-#endif
-	} bits;
-} sr_rx_tx_tuning_h_t;
-
-typedef	union _sr_rx_syncchar_l {
-	uint16_t value;
-	struct {
-#if defined(_BIT_FIELDS_HTOL)
-		uint16_t syncchar_0_3	: 4;
-		uint16_t res1		: 2;
-		uint16_t syncmask	: 10;
-#elif defined(_BIT_FIELDS_LTOH)
-		uint16_t syncmask	: 10;
-		uint16_t res1		: 2;
-		uint16_t syncchar_0_3	: 4;
-#else
-#error one of _BIT_FIELDS_HTOL or _BIT_FIELDS_LTOH must be defined
-#endif
-	} bits;
-} sr_rx_syncchar_l_t;
-
-typedef	union _sr_rx_syncchar_h {
-	uint16_t value;
-	struct {
-#if defined(_BIT_FIELDS_HTOL)
-		uint16_t res1		: 1;
-		uint16_t syncpol	: 1;
-		uint16_t res2		: 8;
-		uint16_t syncchar_4_10	: 6;
-#elif defined(_BIT_FIELDS_LTOH)
-		uint16_t syncchar_4_10	: 6;
-		uint16_t res2		: 8;
-		uint16_t syncpol	: 1;
-		uint16_t res1		: 1;
-#else
-#error one of _BIT_FIELDS_HTOL or _BIT_FIELDS_LTOH must be defined
-#endif
-	} bits;
-} sr_rx_syncchar_h_t;
-
-typedef	union _sr_rx_tx_test_l {
-	uint16_t value;
-	struct {
-#if defined(_BIT_FIELDS_HTOL)
-		uint16_t res1		: 15;
-		uint16_t ref50		: 1;
-#elif defined(_BIT_FIELDS_LTOH)
-		uint16_t ref50		: 1;
-		uint16_t res1		: 15;
-#else
-#error one of _BIT_FIELDS_HTOL or _BIT_FIELDS_LTOH must be defined
-#endif
-	} bits;
-} sr_rx_tx_test_l_t;
-
-typedef	union _sr_rx_tx_test_h {
-	uint16_t value;
-	struct {
-#if defined(_BIT_FIELDS_HTOL)
-		uint16_t res1		: 5;
-		uint16_t selftest	: 3;
-		uint16_t res2		: 8;
-#elif defined(_BIT_FIELDS_LTOH)
-		uint16_t res2		: 8;
-		uint16_t selftest	: 3;
-		uint16_t res1		: 5;
-#else
-#error one of _BIT_FIELDS_HTOL or _BIT_FIELDS_LTOH must be defined
-#endif
-	} bits;
-} sr_rx_tx_test_h_t;
-
-typedef	union _sr_glue_ctrl0_l {
-	uint16_t value;
-	struct {
-#if defined(_BIT_FIELDS_HTOL)
-		uint16_t rxlos_test	: 1;
-		uint16_t res1		: 1;
-		uint16_t rxlosenable	: 1;
-		uint16_t fastresync	: 1;
-		uint16_t samplerate	: 4;
-		uint16_t thresholdcount	: 8;
-#elif defined(_BIT_FIELDS_LTOH)
-		uint16_t thresholdcount	: 8;
-		uint16_t samplerate	: 4;
-		uint16_t fastresync	: 1;
-		uint16_t rxlosenable	: 1;
-		uint16_t res1		: 1;
-		uint16_t rxlos_test	: 1;
-#else
-#error one of _BIT_FIELDS_HTOL or _BIT_FIELDS_LTOH must be defined
-#endif
-	} bits;
-} sr_glue_ctrl0_l_t;
-
-typedef	union _sr_glue_ctrl0_h {
-	uint16_t value;
-	struct {
-#if defined(_BIT_FIELDS_HTOL)
-		uint16_t res1		: 5;
-		uint16_t bitlocktime	: 3;
-		uint16_t res2		: 8;
-#elif defined(_BIT_FIELDS_LTOH)
-		uint16_t res2		: 8;
-		uint16_t bitlocktime	: 3;
-		uint16_t res1		: 5;
-#else
-#error one of _BIT_FIELDS_HTOL or _BIT_FIELDS_LTOH must be defined
-#endif
-	} bits;
-} sr_glue_ctrl0_h_t;
-
-#define	BITLOCKTIME_64_CYCLES		0
-#define	BITLOCKTIME_128_CYCLES		1
-#define	BITLOCKTIME_256_CYCLES		2
-#define	BITLOCKTIME_300_CYCLES		3
-#define	BITLOCKTIME_384_CYCLES		4
-#define	BITLOCKTIME_512_CYCLES		5
-#define	BITLOCKTIME_1024_CYCLES		6
-#define	BITLOCKTIME_2048_CYCLES		7
-
-typedef	union _sr_glue_ctrl1_l {
-	uint16_t value;
-	struct {
-#if defined(_BIT_FIELDS_HTOL)
-		uint16_t res1		: 14;
-		uint16_t inittime	: 2;
-#elif defined(_BIT_FIELDS_LTOH)
-		uint16_t inittime	: 2;
-		uint16_t res1		: 14;
-#else
-#error one of _BIT_FIELDS_HTOL or _BIT_FIELDS_LTOH must be defined
-#endif
-	} bits;
-} sr_glue_ctrl1_l_t;
-
-typedef	union glue_ctrl1_h {
-	uint16_t value;
-	struct {
-#if defined(_BIT_FIELDS_HTOL)
-		uint16_t termr_cfg	: 2;
-		uint16_t termt_cfg	: 2;
-		uint16_t rtrimen	: 2;
-		uint16_t res1		: 10;
-#elif defined(_BIT_FIELDS_LTOH)
-		uint16_t res1		: 10;
-		uint16_t rtrimen	: 2;
-		uint16_t termt_cfg	: 2;
-		uint16_t termr_cfg	: 2;
-#else
-#error one of _BIT_FIELDS_HTOL or _BIT_FIELDS_LTOH must be defined
-#endif
-	} bits;
-} glue_ctrl1_h_t;
-
-#define	TERM_CFG_67OHM		0
-#define	TERM_CFG_72OHM		1
-#define	TERM_CFG_80OHM		2
-#define	TERM_CFG_87OHM		3
-#define	TERM_CFG_46OHM		4
-#define	TERM_CFG_48OHM		5
-#define	TERM_CFG_52OHM		6
-#define	TERM_CFG_55OHM		7
-
-#define	INITTIME_60US		0
-#define	INITTIME_120US		1
-#define	INITTIME_240US		2
-#define	INITTIME_480US		3
-
-#ifdef	__cplusplus
-}
-#endif
-
-#endif	/* _SYS_NXGE_NXGE_SR_HW_H */
--- a/usr/src/uts/sun4v/sys/nxge/nxge_txc.h	Mon Mar 19 18:02:35 2007 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,83 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License (the "License").
- * You may not use this file except in compliance with the License.
- *
- * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
- * or http://www.opensolaris.org/os/licensing.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information: Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- */
-/*
- * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
- * Use is subject to license terms.
- */
-
-#ifndef	_SYS_NXGE_NXGE_TXC_H
-#define	_SYS_NXGE_NXGE_TXC_H
-
-#pragma ident	"%Z%%M%	%I%	%E% SMI"
-
-#ifdef	__cplusplus
-extern "C" {
-#endif
-
-#include <sys/nxge/nxge_txc_hw.h>
-#include <npi_txc.h>
-
-/* Suggested by hardware team 7/19/2006 */
-#define	TXC_DMA_MAX_BURST_DEFAULT	1530	/* Max burst used by DRR */
-
-typedef	struct _txc_errlog {
-	txc_ro_states_t		ro_st;
-	txc_sf_states_t		sf_st;
-} txc_errlog_t;
-
-typedef struct _nxge_txc_stats {
-	uint32_t		pkt_stuffed;
-	uint32_t		pkt_xmit;
-	uint32_t		ro_correct_err;
-	uint32_t		ro_uncorrect_err;
-	uint32_t		sf_correct_err;
-	uint32_t		sf_uncorrect_err;
-	uint32_t		address_failed;
-	uint32_t		dma_failed;
-	uint32_t		length_failed;
-	uint32_t		pkt_assy_dead;
-	uint32_t		reorder_err;
-	txc_errlog_t		errlog;
-} nxge_txc_stats_t, *p_nxge_txc_stats_t;
-
-typedef struct _nxge_txc {
-	uint32_t		dma_max_burst;
-	uint32_t		dma_length;
-	uint32_t		training;
-	uint8_t			debug_select;
-	uint64_t		control_status;
-	uint64_t		port_dma_list;
-	nxge_txc_stats_t	*txc_stats;
-} nxge_txc_t, *p_nxge_txc_t;
-
-/*
- * Transmit Controller (TXC) prototypes.
- */
-nxge_status_t nxge_txc_init(p_nxge_t);
-nxge_status_t nxge_txc_uninit(p_nxge_t);
-nxge_status_t nxge_txc_handle_sys_errors(p_nxge_t);
-void nxge_txc_inject_err(p_nxge_t, uint32_t);
-
-#ifdef	__cplusplus
-}
-#endif
-
-#endif	/* _SYS_NXGE_NXGE_TXC_H */
--- a/usr/src/uts/sun4v/sys/nxge/nxge_txc_hw.h	Mon Mar 19 18:02:35 2007 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,1270 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License (the "License").
- * You may not use this file except in compliance with the License.
- *
- * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
- * or http://www.opensolaris.org/os/licensing.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information: Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- */
-/*
- * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
- * Use is subject to license terms.
- */
-
-#ifndef	_SYS_NXGE_NXGE_TXC_HW_H
-#define	_SYS_NXGE_NXGE_TXC_HW_H
-
-#pragma ident	"%Z%%M%	%I%	%E% SMI"
-
-#ifdef	__cplusplus
-extern "C" {
-#endif
-
-#include <nxge_defs.h>
-
-/* Transmit Ring Scheduler Registers */
-#define	TXC_PORT_DMA_ENABLE_REG		(FZC_TXC + 0x20028)
-#define	TXC_PORT_DMA_LIST		0	/* RW bit 23:0 */
-#define	TXC_DMA_DMA_LIST_MASK		0x0000000000FFFFFFULL
-#define	TXC_DMA_DMA_LIST_MASK_N2	0x000000000000FFFFULL
-
-typedef union _txc_port_enable_t {
-	uint64_t value;
-	struct {
-#ifdef	_BIG_ENDIAN
-		uint32_t hdw;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t res:8;
-			uint32_t port_dma_list:24;
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t port_dma_list:24;
-			uint32_t res:8;
-#endif
-		} ldw;
-#ifndef _BIG_ENDIAN
-		uint32_t hdw;
-#endif
-	} bits;
-} txc_port_enable_t, *p_txc_port_enable_t;
-
-typedef union _txc_port_enable_n2_t {
-	uint64_t value;
-	struct {
-#ifdef	_BIG_ENDIAN
-		uint32_t hdw;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t res:16;
-			uint32_t port_dma_list:16;
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t port_dma_list:16;
-			uint32_t res:16;
-#endif
-		} ldw;
-#ifndef _BIG_ENDIAN
-		uint32_t hdw;
-#endif
-	} bits;
-} txc_port_enable_n2_t, *p_txc_port_enable_n2_t;
-
-/* Transmit Controller - Registers */
-#define	TXC_FZC_OFFSET			0x1000
-#define	TXC_FZC_PORT_OFFSET(port)	(port * TXC_FZC_OFFSET)
-#define	TXC_FZC_CHANNEL_OFFSET(channel)	(channel * TXC_FZC_OFFSET)
-#define	TXC_FZC_REG_CN_OFFSET(x, cn)	(x + TXC_FZC_CHANNEL_OFFSET(cn))
-
-#define	TXC_FZC_CONTROL_OFFSET		0x100
-#define	TXC_FZC_CNTL_PORT_OFFSET(port)	(port * TXC_FZC_CONTROL_OFFSET)
-#define	TXC_FZC_REG_PT_OFFSET(x, pt)	(x + TXC_FZC_CNTL_PORT_OFFSET(pt))
-
-#define	TXC_DMA_MAX_BURST_REG		(FZC_TXC + 0x00000)
-#define	TXC_DMA_MAX_BURST_SHIFT		0	/* RW bit 19:0 */
-#define	TXC_DMA_MAX_BURST_MASK		0x00000000000FFFFFULL
-
-#define	TXC_MAX_BURST_OFFSET(channel)	(TXC_DMA_MAX_BURST_REG + \
-					(channel * TXC_FZC_OFFSET))
-
-typedef union _txc_dma_max_burst_t {
-	uint64_t value;
-	struct {
-#ifdef	_BIG_ENDIAN
-		uint32_t hdw;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t res:12;
-			uint32_t dma_max_burst:20;
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t dma_max_burst:20;
-			uint32_t res:12;
-
-#endif
-		} ldw;
-#ifndef _BIG_ENDIAN
-		uint32_t hdw;
-#endif
-	} bits;
-} txc_dma_max_burst_t, *p_txc_dma_max_burst_t;
-
-/* DRR Performance Monitoring Register */
-#define	TXC_DMA_MAX_LENGTH_REG		(FZC_TXC + 0x00008)
-#define	TXC_DMA_MAX_LENGTH_SHIFT	/* RW bit 27:0 */
-#define	TXC_DMA_MAX_LENGTH_MASK		0x000000000FFFFFFFULL
-
-#define	TXC_DMA_MAX_LEN_OFFSET(channel)	(TXC_DMA_MAX_LENGTH_REG + \
-					(channel * TXC_FZC_OFFSET))
-
-typedef union _txc_dma_max_length_t {
-	uint64_t value;
-	struct {
-#ifdef	_BIG_ENDIAN
-		uint32_t hdw;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t res:4;
-			uint32_t dma_length:28;
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t dma_length:28;
-			uint32_t res:4;
-
-#endif
-		} ldw;
-#ifndef _BIG_ENDIAN
-		uint32_t hdw;
-#endif
-	} bits;
-} txc_dma_max_length_t, *p_txc_dma_max_length_t;
-
-
-#define	TXC_CONTROL_REG			(FZC_TXC + 0x20000)
-#define	TXC_DMA_LENGTH_SHIFT		0	/* RW bit 27:0 */
-#define	TXC_DMA_LENGTH_MASK		0x000000000FFFFFFFULL
-
-typedef union _txc_control_t {
-	uint64_t value;
-	struct {
-#ifdef	_BIG_ENDIAN
-		uint32_t hdw;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t res:27;
-			uint32_t txc_enabled:1;
-			uint32_t port3_enabled:1;
-			uint32_t port2_enabled:1;
-			uint32_t port1_enabled:1;
-			uint32_t port0_enabled:1;
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t port0_enabled:1;
-			uint32_t port1_enabled:1;
-			uint32_t port2_enabled:1;
-			uint32_t port3_enabled:1;
-			uint32_t txc_enabled:1;
-			uint32_t res:27;
-
-#endif
-		} ldw;
-#ifndef _BIG_ENDIAN
-		uint32_t hdw;
-#endif
-	} bits;
-} txc_control_t, *p_txc_control_t;
-
-typedef union _txc_control_n2_t {
-	uint64_t value;
-	struct {
-#ifdef	_BIG_ENDIAN
-		uint32_t hdw;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t res:27;
-			uint32_t txc_enabled:1;
-			uint32_t res1:2;
-			uint32_t port1_enabled:1;
-			uint32_t port0_enabled:1;
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t port0_enabled:1;
-			uint32_t port1_enabled:1;
-			uint32_t res1:2;
-			uint32_t txc_enabled:1;
-			uint32_t res:27;
-
-#endif
-		} ldw;
-#ifndef _BIG_ENDIAN
-		uint32_t hdw;
-#endif
-	} bits;
-} txc_control_n2_t, *p_txc_control_n2_t;
-
-
-#define	TXC_TRAINING_REG		(FZC_TXC + 0x20008)
-#define	TXC_TRAINING_VECTOR		0	/* RW bit 32:0 */
-#define	TXC_TRAINING_VECTOR_MASK	0x00000000FFFFFFFFULL
-
-typedef union _txc_training_t {
-	uint64_t value;
-	struct {
-#ifdef	_BIG_ENDIAN
-		uint32_t hdw;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t txc_training_vector:32;
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t txc_training_vector:32;
-
-#endif
-		} ldw;
-#ifndef _BIG_ENDIAN
-		uint32_t hdw;
-#endif
-	} bits;
-} txc_training_t, *p_txc_training_t;
-
-
-#define	TXC_DEBUG_SELECT_REG		(FZC_TXC + 0x20010)
-#define	TXC_DEBUG_SELECT_SHIFT		0	/* WO bit 5:0 */
-#define	TXC_DEBUG_SELECT_MASK		0x000000000000003FULL
-
-typedef union _txc_debug_select_t {
-	uint64_t value;
-	struct {
-#ifdef	_BIG_ENDIAN
-		uint32_t hdw;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t res:26;
-			uint32_t debug_select:6;
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t debug_select:6;
-			uint32_t res:26;
-
-#endif
-		} ldw;
-#ifndef _BIG_ENDIAN
-		uint32_t hdw;
-#endif
-	} bits;
-} txc_debug_select_t, *p_txc_debug_select_t;
-
-
-#define	TXC_MAX_REORDER_REG		(FZC_TXC + 0x20018)
-#define	TXC_MAX_REORDER_MASK_2		(0xf)
-#define	TXC_MAX_REORDER_MASK_4		(0x7)
-#define	TXC_MAX_REORDER_SHIFT_BITS	8
-#define	TXC_MAX_REORDER_SHIFT(port)	(port * (TXC_MAX_REORDER_SHIFT_BITS))
-
-typedef union _txc_max_reorder_t {
-	uint64_t value;
-	struct {
-#ifdef	_BIG_ENDIAN
-		uint32_t hdw;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t resv3:4;
-			uint32_t port3:4;
-			uint32_t resv2:4;
-			uint32_t port2:4;
-			uint32_t resv1:4;
-			uint32_t port1:4;
-			uint32_t resv0:4;
-			uint32_t port0:4;
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t port0:4;
-			uint32_t resv0:4;
-			uint32_t port1:4;
-			uint32_t resv1:4;
-			uint32_t port2:4;
-			uint32_t resv2:4;
-			uint32_t port3:4;
-			uint32_t resv3:4;
-#endif
-		} ldw;
-#ifndef _BIG_ENDIAN
-		uint32_t hdw;
-#endif
-	} bits;
-} txc_max_reorder_t, *p_txc_max_reorder_t;
-
-
-#define	TXC_PORT_CTL_REG		(FZC_TXC + 0x20020)	/* RO */
-#define	TXC_PORT_CTL_OFFSET(port)	(TXC_PORT_CTL_REG + \
-					(port * TXC_FZC_CONTROL_OFFSET))
-#define	TXC_PORT_CNTL_CLEAR		0x1
-
-typedef union _txc_port_ctl_t {
-	uint64_t value;
-	struct {
-#ifdef	_BIG_ENDIAN
-		uint32_t hdw;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t rsvd:31;
-			uint32_t clr_all_stat:1;
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t clr_all_stat:1;
-			uint32_t rsvd:31;
-#endif
-		} ldw;
-#ifndef _BIG_ENDIAN
-		uint32_t hdw;
-#endif
-	} bits;
-} txc_port_ctl_t, *p_txc_port_ctl_t;
-
-#define	TXC_PKT_STUFFED_REG		(FZC_TXC + 0x20030)
-#define	TXC_PKT_STUFF_PKTASY_SHIFT	16	/* RW bit 16:0 */
-#define	TXC_PKT_STUFF_PKTASY_MASK	0x000000000000FFFFULL
-#define	TXC_PKT_STUFF_REORDER_SHIFT	0	/* RW bit 31:16 */
-#define	TXC_PKT_STUFF_REORDER_MASK	0x00000000FFFF0000ULL
-
-typedef union _txc_pkt_stuffed_t {
-	uint64_t value;
-	struct {
-#ifdef	_BIG_ENDIAN
-		uint32_t hdw;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t pkt_pro_reorder:16;
-			uint32_t pkt_proc_pktasy:16;
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t pkt_proc_pktasy:16;
-			uint32_t pkt_pro_reorder:16;
-#endif
-		} ldw;
-#ifndef _BIG_ENDIAN
-		uint32_t hdw;
-#endif
-	} bits;
-} txc_pkt_stuffed_t, *p_txc_pkt_stuffed_t;
-
-
-#define	TXC_PKT_XMIT_REG		(FZC_TXC + 0x20038)
-#define	TXC_PKTS_XMIT_SHIFT		0	/* RW bit 15:0 */
-#define	TXC_PKTS_XMIT_MASK		0x000000000000FFFFULL
-#define	TXC_BYTES_XMIT_SHIFT		16	/* RW bit 31:16 */
-#define	TXC_BYTES_XMIT_MASK		0x00000000FFFF0000ULL
-
-typedef union _txc_pkt_xmit_t {
-	uint64_t value;
-	struct {
-#ifdef	_BIG_ENDIAN
-		uint32_t hdw;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t bytes_transmitted:16;
-			uint32_t pkts_transmitted:16;
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t pkts_transmitted:16;
-			uint32_t bytes_transmitted:16;
-#endif
-		} ldw;
-#ifndef _BIG_ENDIAN
-		uint32_t hdw;
-#endif
-	} bits;
-} txc_pkt_xmit, *p_txc_pkt_xmit;
-
-
-/* count 4 step 0x00100 */
-#define	TXC_ROECC_CTL_REG		(FZC_TXC + 0x20040)
-#define	TXC_ROECC_CTL_OFFSET(port)	(TXC_ROECC_CTL_REG + \
-					(TXC_FZC_CNTL_PORT_OFFSET(port)))
-
-typedef union _txc_roecc_ctl_t {
-	uint64_t value;
-	struct {
-#ifdef	_BIG_ENDIAN
-		uint32_t hdw;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t disable_ue_error:1;
-			uint32_t rsvd:13;
-			uint32_t double_bit_err:1;
-			uint32_t single_bit_err:1;
-			uint32_t rsvd_2:5;
-			uint32_t all_pkts:1;
-			uint32_t alternate_pkts:1;
-			uint32_t one_pkt:1;
-			uint32_t rsvd_3:5;
-			uint32_t last_line_pkt:1;
-			uint32_t second_line_pkt:1;
-			uint32_t firstd_line_pkt:1;
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t firstd_line_pkt:1;
-			uint32_t second_line_pkt:1;
-			uint32_t last_line_pkt:1;
-			uint32_t rsvd_3:5;
-			uint32_t one_pkt:1;
-			uint32_t alternate_pkts:1;
-			uint32_t all_pkts:1;
-			uint32_t rsvd_2:5;
-			uint32_t single_bit_err:1;
-			uint32_t double_bit_err:1;
-			uint32_t rsvd:13;
-			uint32_t disable_ue_error:1;
-#endif
-		} ldw;
-#ifndef _BIG_ENDIAN
-		uint32_t hdw;
-#endif
-	} bits;
-} txc_roecc_ctl_t, *p_txc_roecc_ctl_t;
-
-
-#define	TXC_ROECC_ST_REG		(FZC_TXC + 0x20048)
-
-#define	TXC_ROECC_ST_OFFSET(port)	(TXC_ROECC_ST_REG + \
-					(TXC_FZC_CNTL_PORT_OFFSET(port)))
-
-typedef union _txc_roecc_st_t {
-	uint64_t value;
-	struct {
-#ifdef	_BIG_ENDIAN
-		uint32_t hdw;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t clr_st:1;
-			uint32_t res:13;
-			uint32_t correct_error:1;
-			uint32_t uncorrect_error:1;
-			uint32_t rsvd:6;
-			uint32_t ecc_address:10;
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t ecc_address:10;
-			uint32_t rsvd:6;
-			uint32_t uncorrect_error:1;
-			uint32_t correct_error:1;
-			uint32_t res:13;
-			uint32_t clr_st:1;
-#endif
-		} ldw;
-#ifndef _BIG_ENDIAN
-		uint32_t hdw;
-#endif
-	} bits;
-} txc_roecc_st_t, *p_txc_roecc_st_t;
-
-
-#define	TXC_RO_DATA0_REG		(FZC_TXC + 0x20050)
-#define	TXC_RO_DATA0_OFFSET(port)	(TXC_RO_DATA0_REG + \
-					(TXC_FZC_CNTL_PORT_OFFSET(port)))
-
-typedef union _txc_ro_data0_t {
-	uint64_t value;
-	struct {
-#ifdef	_BIG_ENDIAN
-		uint32_t hdw;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t ro_ecc_data0:32;	/* ro_ecc_data[31:0] */
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t ro_ecc_data0:32;	/* ro_ecc_data[31:0] */
-#endif
-		} ldw;
-#ifndef _BIG_ENDIAN
-		uint32_t hdw;
-#endif
-	} bits;
-} txc_ro_data0_t, *p_txc_ro_data0_t;
-
-#define	TXC_RO_DATA1_REG		(FZC_TXC + 0x20058)
-#define	TXC_RO_DATA1_OFFSET(port)	(TXC_RO_DATA1_REG + \
-					(TXC_FZC_CNTL_PORT_OFFSET(port)))
-
-typedef union _txc_ro_data1_t {
-	uint64_t value;
-	struct {
-#ifdef	_BIG_ENDIAN
-		uint32_t hdw;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t ro_ecc_data1:32;	/* ro_ecc_data[63:32] */
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t ro_ecc_data1:32;	/* ro_ecc_data[31:32] */
-#endif
-		} ldw;
-#ifndef _BIG_ENDIAN
-		uint32_t hdw;
-#endif
-	} bits;
-} txc_ro_data1_t, *p_txc_ro_data1_t;
-
-
-#define	TXC_RO_DATA2_REG		(FZC_TXC + 0x20060)
-
-#define	TXC_RO_DATA2_OFFSET(port)	(TXC_RO_DATA2_REG + \
-					(TXC_FZC_CNTL_PORT_OFFSET(port)))
-
-typedef union _txc_ro_data2_t {
-	uint64_t value;
-	struct {
-#ifdef	_BIG_ENDIAN
-		uint32_t hdw;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t ro_ecc_data2:32;	/* ro_ecc_data[95:64] */
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t ro_ecc_data2:32;	/* ro_ecc_data[95:64] */
-#endif
-		} ldw;
-#ifndef _BIG_ENDIAN
-		uint32_t hdw;
-#endif
-	} bits;
-} txc_ro_data2_t, *p_txc_ro_data2_t;
-
-#define	TXC_RO_DATA3_REG		(FZC_TXC + 0x20068)
-#define	TXC_RO_DATA3_OFFSET(port)	(TXC_RO_DATA3_REG + \
-					(TXC_FZC_CNTL_PORT_OFFSET(port)))
-
-typedef union _txc_ro_data3_t {
-	uint64_t value;
-	struct {
-#ifdef	_BIG_ENDIAN
-		uint32_t hdw;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t ro_ecc_data3:32; /* ro_ecc_data[127:96] */
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t ro_ecc_data3:32; /* ro_ecc_data[127:96] */
-#endif
-		} ldw;
-#ifndef _BIG_ENDIAN
-		uint32_t hdw;
-#endif
-	} bits;
-} txc_ro_data3_t, *p_txc_ro_data3_t;
-
-#define	TXC_RO_DATA4_REG		(FZC_TXC + 0x20070)
-#define	TXC_RO_DATA4_OFFSET(port)	(TXC_RO_DATA4_REG + \
-					(TXC_FZC_CNTL_PORT_OFFSET(port)))
-
-typedef union _txc_ro_data4_t {
-	uint64_t value;
-	struct {
-#ifdef	_BIG_ENDIAN
-		uint32_t hdw;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t ro_ecc_data4:32; /* ro_ecc_data[151:128] */
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t ro_ecc_data4:32; /* ro_ecc_data[151:128] */
-#endif
-		} ldw;
-#ifndef _BIG_ENDIAN
-		uint32_t hdw;
-#endif
-	} bits;
-} txc_ro_data4_t, *p_txc_ro_data4_t;
-
-/* count 4 step 0x00100 */
-#define	TXC_SFECC_CTL_REG		(FZC_TXC + 0x20078)
-#define	TXC_SFECC_CTL_OFFSET(port)	(TXC_SFECC_CTL_REG + \
-					(TXC_FZC_CNTL_PORT_OFFSET(port)))
-
-typedef union _txc_sfecc_ctl_t {
-	uint64_t value;
-	struct {
-#ifdef	_BIG_ENDIAN
-		uint32_t hdw;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t disable_ue_error:1;
-			uint32_t rsvd:13;
-			uint32_t double_bit_err:1;
-			uint32_t single_bit_err:1;
-			uint32_t rsvd_2:5;
-			uint32_t all_pkts:1;
-			uint32_t alternate_pkts:1;
-			uint32_t one_pkt:1;
-			uint32_t rsvd_3:5;
-			uint32_t last_line_pkt:1;
-			uint32_t second_line_pkt:1;
-			uint32_t firstd_line_pkt:1;
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t firstd_line_pkt:1;
-			uint32_t second_line_pkt:1;
-			uint32_t last_line_pkt:1;
-			uint32_t rsvd_3:5;
-			uint32_t one_pkt:1;
-			uint32_t alternate_pkts:1;
-			uint32_t all_pkts:1;
-			uint32_t rsvd_2:5;
-			uint32_t single_bit_err:1;
-			uint32_t double_bit_err:1;
-			uint32_t rsvd:13;
-			uint32_t disable_ue_error:1;
-#endif
-		} ldw;
-#ifndef _BIG_ENDIAN
-		uint32_t hdw;
-#endif
-	} bits;
-} txc_sfecc_ctl_t, *p_txc_sfecc_ctl_t;
-
-#define	TXC_SFECC_ST_REG		(FZC_TXC + 0x20080)
-#define	TXC_SFECC_ST_OFFSET(port)	(TXC_SFECC_ST_REG + \
-					(TXC_FZC_CNTL_PORT_OFFSET(port)))
-
-typedef union _txc_sfecc_st_t {
-	uint64_t value;
-	struct {
-#ifdef	_BIG_ENDIAN
-		uint32_t hdw;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t clr_st:1;
-			uint32_t res:13;
-			uint32_t correct_error:1;
-			uint32_t uncorrect_error:1;
-			uint32_t rsvd:6;
-			uint32_t ecc_address:10;
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t ecc_address:10;
-			uint32_t rsvd:6;
-			uint32_t uncorrect_error:1;
-			uint32_t correct_error:1;
-			uint32_t res:13;
-			uint32_t clr_st:1;
-#endif
-		} ldw;
-#ifndef _BIG_ENDIAN
-		uint32_t hdw;
-#endif
-	} bits;
-} txc_sfecc_st_t, *p_txc_sfecc_st_t;
-
-#define	TXC_SF_DATA0_REG		(FZC_TXC + 0x20088)
-#define	TXC_SF_DATA0_OFFSET(port)	(TXC_SF_DATA0_REG + \
-					(TXC_FZC_CNTL_PORT_OFFSET(port)))
-
-typedef union _txc_sf_data0_t {
-	uint64_t value;
-	struct {
-#ifdef	_BIG_ENDIAN
-		uint32_t hdw;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t sf_ecc_data0:32;	/* sf_ecc_data[31:0] */
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t sf_ecc_data0:32;	/* sf_ecc_data[31:0] */
-#endif
-		} ldw;
-#ifndef _BIG_ENDIAN
-		uint32_t hdw;
-#endif
-	} bits;
-} txc_sf_data0_t, *p_txc_sf_data0_t;
-
-#define	TXC_SF_DATA1_REG		(FZC_TXC + 0x20090)
-#define	TXC_SF_DATA1_OFFSET(port)	(TXC_SF_DATA1_REG + \
-					(TXC_FZC_CNTL_PORT_OFFSET(port)))
-
-typedef union _txc_sf_data1_t {
-	uint64_t value;
-	struct {
-#ifdef	_BIG_ENDIAN
-		uint32_t hdw;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t sf_ecc_data1:32;	/* sf_ecc_data[63:32] */
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t sf_ecc_data1:32;	/* sf_ecc_data[31:32] */
-#endif
-		} ldw;
-#ifndef _BIG_ENDIAN
-		uint32_t hdw;
-#endif
-	} bits;
-} txc_sf_data1_t, *p_txc_sf_data1_t;
-
-
-#define	TXC_SF_DATA2_REG		(FZC_TXC + 0x20098)
-#define	TXC_SF_DATA2_OFFSET(port)	(TXC_SF_DATA2_REG + \
-					(TXC_FZC_CNTL_PORT_OFFSET(port)))
-
-typedef union _txc_sf_data2_t {
-	uint64_t value;
-	struct {
-#ifdef	_BIG_ENDIAN
-		uint32_t hdw;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t sf_ecc_data2:32;	/* sf_ecc_data[95:64] */
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t sf_ecc_data2:32;	/* sf_ecc_data[95:64] */
-#endif
-		} ldw;
-#ifndef _BIG_ENDIAN
-		uint32_t hdw;
-#endif
-	} bits;
-} txc_sf_data2_t, *p_txc_sf_data2_t;
-
-#define	TXC_SF_DATA3_REG		(FZC_TXC + 0x200A0)
-#define	TXC_SF_DATA3_OFFSET(port)	(TXC_SF_DATA3_REG + \
-					(TXC_FZC_CNTL_PORT_OFFSET(port)))
-
-typedef union _txc_sf_data3_t {
-	uint64_t value;
-	struct {
-#ifdef	_BIG_ENDIAN
-		uint32_t hdw;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t sf_ecc_data3:32; /* sf_ecc_data[127:96] */
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t sf_ecc_data3:32; /* sf_ecc_data[127:96] */
-#endif
-		} ldw;
-#ifndef _BIG_ENDIAN
-		uint32_t hdw;
-#endif
-	} bits;
-} txc_sf_data3_t, *p_txc_sf_data3_t;
-
-#define	TXC_SF_DATA4_REG		(FZC_TXC + 0x200A8)
-#define	TXC_SF_DATA4_OFFSET(port)	(TXC_SF_DATA4_REG + \
-					(TXC_FZC_CNTL_PORT_OFFSET(port)))
-
-typedef union _txc_sf_data4_t {
-	uint64_t value;
-	struct {
-#ifdef	_BIG_ENDIAN
-		uint32_t hdw;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t sf_ecc_data4:32; /* sf_ecc_data[151:128] */
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t sf_ecc_data4:32; /* sf_ecc_data[151:128] */
-#endif
-		} ldw;
-#ifndef _BIG_ENDIAN
-		uint32_t hdw;
-#endif
-	} bits;
-} txc_sf_data4_t, *p_txc_sf_data4_t;
-
-#define	TXC_RO_TIDS_REG			(FZC_TXC + 0x200B0)
-#define	TXC_RO_TIDS_OFFSET(port)	(TXC_RO_TIDS_REG + \
-					(TXC_FZC_CNTL_PORT_OFFSET(port)))
-#define	TXC_RO_TIDS_MASK		0x00000000FFFFFFFFULL
-
-typedef union _txc_ro_tids_t {
-	uint64_t value;
-	struct {
-#ifdef	_BIG_ENDIAN
-		uint32_t hdw;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t tids_in_use:32;
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t tids_in_use:32;
-#endif
-		} ldw;
-#ifndef _BIG_ENDIAN
-		uint32_t hdw;
-#endif
-	} bits;
-} txc_ro_tids_t, *p_txc_ro_tids_t;
-
-#define	TXC_RO_STATE0_REG		(FZC_TXC + 0x200B8)
-#define	TXC_RO_STATE0_OFFSET(port)	(TXC_STATE0_REG + \
-					(TXC_FZC_CNTL_PORT_OFFSET(port)))
-#define	TXC_RO_STATE0_MASK		0x00000000FFFFFFFFULL
-
-typedef union _txc_ro_state0_t {
-	uint64_t value;
-	struct {
-#ifdef	_BIG_ENDIAN
-		uint32_t hdw;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t duplicate_tid:32;
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t duplicate_tid:32;
-#endif
-		} ldw;
-#ifndef _BIG_ENDIAN
-		uint32_t hdw;
-#endif
-	} bits;
-} txc_ro_state0_t, *p_txc_ro_state0_t;
-
-#define	TXC_RO_STATE1_REG		(FZC_TXC + 0x200C0)
-#define	TXC_RO_STATE1_OFFSET(port)	(TXC_STATE1_REG + \
-					(TXC_FZC_CNTL_PORT_OFFSET(port)))
-#define	TXC_RO_STATE1_MASK		0x00000000FFFFFFFFULL
-
-typedef union _txc_ro_state1_t {
-	uint64_t value;
-	struct {
-#ifdef	_BIG_ENDIAN
-		uint32_t hdw;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t unused_tid:32;
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t unused_tid:32;
-#endif
-		} ldw;
-#ifndef _BIG_ENDIAN
-		uint32_t hdw;
-#endif
-	} bits;
-} txc_ro_state1_t, *p_txc_ro_state1_t;
-
-#define	TXC_RO_STATE2_REG		(FZC_TXC + 0x200C8)
-#define	TXC_RO_STATE2_OFFSET(port)	(TXC_STATE2_REG + \
-					(TXC_FZC_CNTL_PORT_OFFSET(port)))
-#define	TXC_RO_STATE2_MASK		0x00000000FFFFFFFFULL
-
-typedef union _txc_ro_state2_t {
-	uint64_t value;
-	struct {
-#ifdef	_BIG_ENDIAN
-		uint32_t hdw;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t transaction_timeout:32;
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t transaction_timeout:32;
-#endif
-		} ldw;
-#ifndef _BIG_ENDIAN
-		uint32_t hdw;
-#endif
-	} bits;
-} txc_ro_state2_t, *p_txc_ro_state2_t;
-
-#define	TXC_RO_STATE3_REG		(FZC_TXC + 0x200D0)
-#define	TXC_RO_STATE3_OFFSET(port)	(TXC_RO_STATE3_REG + \
-					(TXC_FZC_CNTL_PORT_OFFSET(port)))
-
-typedef union _txc_ro_state3_t {
-	uint64_t value;
-	struct {
-#ifdef	_BIG_ENDIAN
-		uint32_t hdw;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t enable_spacefilled_watermark:1;
-			uint32_t ro_spacefilled_watermask:10;
-			uint32_t ro_fifo_spaceavailable:10;
-			uint32_t rsv:2;
-			uint32_t enable_ro_watermark:1;
-			uint32_t highest_reorder_used:4;
-			uint32_t num_reorder_used:4;
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t num_reorder_used:4;
-			uint32_t highest_reorder_used:4;
-			uint32_t enable_ro_watermark:1;
-			uint32_t rsv:2;
-			uint32_t ro_fifo_spaceavailable:10;
-			uint32_t ro_spacefilled_watermask:10;
-			uint32_t enable_spacefilled_watermark:1;
-#endif
-		} ldw;
-#ifndef _BIG_ENDIAN
-		uint32_t hdw;
-#endif
-	} bits;
-} txc_ro_state3_t, *p_txc_ro_state3_t;
-
-#define	TXC_RO_CTL_REG			(FZC_TXC + 0x200D8)
-#define	TXC_RO_CTL_OFFSET(port)		(TXC_RO_CTL_REG + \
-					(TXC_FZC_CNTL_PORT_OFFSET(port)))
-
-typedef union _txc_ro_ctl_t {
-	uint64_t value;
-	struct {
-#ifdef	_BIG_ENDIAN
-		uint32_t hdw;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t clr_fail_state:1;
-			uint32_t rsvd3:3;
-			uint32_t ro_addr1:4;
-			uint32_t rsvd2:1;
-			uint32_t address_failed:1;
-			uint32_t dma_failed:1;
-			uint32_t length_failed:1;
-			uint32_t rsv:1;
-			uint32_t capture_address_fail:1;
-			uint32_t capture_dma_fail:1;
-			uint32_t capture_length_fail:1;
-			uint32_t rsvd:8;
-			uint32_t ro_state_rd_done:1;
-			uint32_t ro_state_wr_done:1;
-			uint32_t ro_state_rd:1;
-			uint32_t ro_state_wr:1;
-			uint32_t ro_state_addr:4;
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t ro_state_addr:4;
-			uint32_t ro_state_wr:1;
-			uint32_t ro_state_rd:1;
-			uint32_t ro_state_wr_done:1;
-			uint32_t ro_state_rd_done:1;
-			uint32_t rsvd:8;
-			uint32_t capture_length_fail:1;
-			uint32_t capture_dma_fail:1;
-			uint32_t capture_address_fail:1;
-			uint32_t rsv:1;
-			uint32_t length_failed:1;
-			uint32_t dma_failed:1;
-			uint32_t address_failed:1;
-			uint32_t rsvd2:1;
-			uint32_t ro_addr1:4;
-			uint32_t rsvd3:3;
-			uint32_t clr_fail_state:1;
-#endif
-		} ldw;
-#ifndef _BIG_ENDIAN
-		uint32_t hdw;
-#endif
-	} bits;
-} txc_ro_ctl_t, *p_txc_ro_ctl_t;
-
-
-#define	TXC_RO_ST_DATA0_REG		(FZC_TXC + 0x200E0)
-#define	TXC_RO_ST_DATA0_OFFSET(port)	(TXC_RO_ST_DATA0_REG + \
-					(TXC_FZC_CNTL_PORT_OFFSET(port)))
-#define	TXC_RO_ST_DATA0_MASK		0x00000000FFFFFFFFULL
-
-typedef union _txc_ro_st_data0_t {
-	uint64_t value;
-	struct {
-#ifdef	_BIG_ENDIAN
-		uint32_t hdw;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t ro_st_dat0:32;
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t ro_st_dat0:32;
-#endif
-		} ldw;
-#ifndef _BIG_ENDIAN
-		uint32_t hdw;
-#endif
-	} bits;
-} txc_ro_st_data0_t, *p_txc_ro_st_data0_t;
-
-
-#define	TXC_RO_ST_DATA1_REG		(FZC_TXC + 0x200E8)
-#define	TXC_RO_ST_DATA1_OFFSET(port)	(TXC_RO_ST_DATA1_REG + \
-					(TXC_FZC_CNTL_PORT_OFFSET(port)))
-#define	TXC_RO_ST_DATA1_MASK		0x00000000FFFFFFFFULL
-
-typedef union _txc_ro_st_data1_t {
-	uint64_t value;
-	struct {
-#ifdef	_BIG_ENDIAN
-		uint32_t hdw;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t ro_st_dat1:32;
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t ro_st_dat1:32;
-#endif
-		} ldw;
-#ifndef _BIG_ENDIAN
-		uint32_t hdw;
-#endif
-	} bits;
-} txc_ro_st_data1_t, *p_txc_ro_st_data1_t;
-
-
-#define	TXC_RO_ST_DATA2_REG		(FZC_TXC + 0x200F0)
-#define	TXC_RO_ST_DATA2_OFFSET(port)	(TXC_RO_ST_DATA2_REG + \
-					(TXC_FZC_CNTL_PORT_OFFSET(port)))
-#define	TXC_RO_ST_DATA2_MASK		0x00000000FFFFFFFFULL
-
-typedef union _txc_ro_st_data2_t {
-	uint64_t value;
-	struct {
-#ifdef	_BIG_ENDIAN
-		uint32_t hdw;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t ro_st_dat2:32;
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t ro_st_dat2:32;
-#endif
-		} ldw;
-#ifndef _BIG_ENDIAN
-		uint32_t hdw;
-#endif
-	} bits;
-} txc_ro_st_data2_t, *p_txc_ro_st_data2_t;
-
-#define	TXC_RO_ST_DATA3_REG		(FZC_TXC + 0x200F8)
-#define	TXC_RO_ST_DATA3_OFFSET(port)	(TXC_RO_ST_DATA3_REG + \
-					(TXC_FZC_CNTL_PORT_OFFSET(port)))
-#define	TXC_RO_ST_DATA3_MASK		0x00000000FFFFFFFFULL
-
-typedef union _txc_ro_st_data3_t {
-	uint64_t value;
-	struct {
-#ifdef	_BIG_ENDIAN
-		uint32_t hdw;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t ro_st_dat3:32;
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t ro_st_dat3:32;
-#endif
-		} ldw;
-#ifndef _BIG_ENDIAN
-		uint32_t hdw;
-#endif
-	} bits;
-} txc_ro_st_data3_t, *p_txc_ro_st_data3_t;
-
-#define	TXC_PORT_PACKET_REQ_REG		(FZC_TXC + 0x20100)
-#define	TXC_PORT_PACKET_REQ_OFFSET(port) (TXC_PORT_PACKET_REQ_REG + \
-					(TXC_FZC_CNTL_PORT_OFFSET(port)))
-#define	TXC_PORT_PACKET_REQ_MASK	0x00000000FFFFFFFFULL
-
-typedef union _txc_port_packet_req_t {
-	uint64_t value;
-	struct {
-#ifdef	_BIG_ENDIAN
-		uint32_t hdw;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t gather_req:4;
-			uint32_t packet_eq:12;
-			uint32_t pkterr_abort:16;
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t pkterr_abort:16;
-			uint32_t packet_eq:12;
-			uint32_t gather_req:4;
-#endif
-		} ldw;
-#ifndef _BIG_ENDIAN
-		uint32_t hdw;
-#endif
-	} bits;
-} txc_port_packet_req_t, *p_txc_port_packet_req_t;
-
-/* Reorder error bits in interrupt registers  */
-#define	TXC_INT_STAT_SF_CORR_ERR	0x01
-#define	TXC_INT_STAT_SF_UNCORR_ERR	0x02
-#define	TXC_INT_STAT_RO_CORR_ERR	0x04
-#define	TXC_INT_STAT_RO_UNCORR_ERR	0x08
-#define	TXC_INT_STAT_REORDER_ERR	0x10
-#define	TXC_INT_STAT_PKTASSYDEAD	0x20
-
-#define	TXC_INT_STAT_DBG_REG		(FZC_TXC + 0x20420)
-#define	TXC_INT_STAT_DBG_MASK		0x00000000FFFFFFFFULL
-
-typedef union _txc_int_stat_dbg_t {
-	uint64_t value;
-	struct {
-#ifdef	_BIG_ENDIAN
-		uint32_t hdw;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t rsvd3:2;
-			uint32_t port3_int_status:6;
-			uint32_t rsvd2:2;
-			uint32_t port2_int_status:6;
-			uint32_t rsvd1:2;
-			uint32_t port1_int_status:6;
-			uint32_t rsvd:2;
-			uint32_t port0_int_status:6;
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t port0_int_status:6;
-			uint32_t rsvd:2;
-			uint32_t port1_int_status:6;
-			uint32_t rsvd1:2;
-			uint32_t port2_int_status:6;
-			uint32_t rsvd2:2;
-			uint32_t port3_int_status:6;
-			uint32_t rsvd3:2;
-#endif
-		} ldw;
-#ifndef _BIG_ENDIAN
-		uint32_t hdw;
-#endif
-	} bits;
-} txc_int_stat_dbg_t, *p_txc_int_stat_dbg_t;
-
-
-#define	TXC_INT_STAT_REG		(FZC_TXC + 0x20428)
-#define	TXC_INT_STAT_MASK		0x00000000FFFFFFFFULL
-
-typedef union _txc_int_stat_t {
-	uint64_t value;
-	struct {
-#ifdef	_BIG_ENDIAN
-		uint32_t hdw;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t rsvd3:2;
-			uint32_t port3_int_status:6;
-			uint32_t rsvd2:2;
-			uint32_t port2_int_status:6;
-			uint32_t rsvd1:2;
-			uint32_t port1_int_status:6;
-			uint32_t rsvd:2;
-			uint32_t port0_int_status:6;
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t port0_int_status:6;
-			uint32_t rsvd:2;
-			uint32_t port1_int_status:6;
-			uint32_t rsvd1:2;
-			uint32_t port2_int_status:6;
-			uint32_t rsvd2:2;
-			uint32_t port3_int_status:6;
-			uint32_t rsvd3:2;
-#endif
-		} ldw;
-#ifndef _BIG_ENDIAN
-		uint32_t hdw;
-#endif
-	} bits;
-} txc_int_stat_t, *p_txc_int_stat_t;
-
-#define	TXC_INT_MASK_REG		(FZC_TXC + 0x20430)
-#define	TXC_INT_MASK_MASK		0x00000000FFFFFFFFULL
-
-typedef union _txc_int_mask_t {
-	uint64_t value;
-	struct {
-#ifdef	_BIG_ENDIAN
-		uint32_t hdw;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t rsvd3:2;
-			uint32_t port3_int_mask:6;
-			uint32_t rsvd2:2;
-			uint32_t port2_int_mask:6;
-			uint32_t rsvd1:2;
-			uint32_t port1_int_mask:6;
-			uint32_t rsvd:2;
-			uint32_t port0_int_mask:6;
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t port0_int_mask:6;
-			uint32_t rsvd:2;
-			uint32_t port1_int_mask:6;
-			uint32_t rsvd1:2;
-			uint32_t port2_int_mask:6;
-			uint32_t rsvd2:2;
-			uint32_t port3_int_mask:6;
-			uint32_t rsvd3:2;
-#endif
-		} ldw;
-#ifndef _BIG_ENDIAN
-		uint32_t hdw;
-#endif
-	} bits;
-} txc_int_mask_t, *p_txc_int_mask_t;
-
-/* 2 ports */
-typedef union _txc_int_mask_n2_t {
-	uint64_t value;
-	struct {
-#ifdef	_BIG_ENDIAN
-		uint32_t hdw;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t rsvd1:18;
-			uint32_t port1_int_mask:6;
-			uint32_t rsvd:2;
-			uint32_t port0_int_mask:6;
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t port0_int_mask:6;
-			uint32_t rsvd:2;
-			uint32_t port1_int_mask:6;
-			uint32_t rsvd1:18;
-#endif
-		} ldw;
-#ifndef _BIG_ENDIAN
-		uint32_t hdw;
-#endif
-	} bits;
-} txc_int_mask_n2_t, *p_txc_int_mask_n2_t;
-
-typedef	struct _txc_ro_states {
-	txc_roecc_st_t		roecc;
-	txc_ro_data0_t		d0;
-	txc_ro_data1_t		d1;
-	txc_ro_data2_t		d2;
-	txc_ro_data3_t		d3;
-	txc_ro_data4_t		d4;
-	txc_ro_tids_t		tids;
-	txc_ro_state0_t		st0;
-	txc_ro_state1_t		st1;
-	txc_ro_state2_t		st2;
-	txc_ro_state3_t		st3;
-	txc_ro_ctl_t		ctl;
-} txc_ro_states_t, *p_txc_ro_states_t;
-
-typedef	struct _txc_sf_states {
-	txc_sfecc_st_t		sfecc;
-	txc_sf_data0_t		d0;
-	txc_sf_data1_t		d1;
-	txc_sf_data2_t		d2;
-	txc_sf_data3_t		d3;
-	txc_sf_data4_t		d4;
-} txc_sf_states_t, *p_txc_sf_states_t;
-
-#ifdef	__cplusplus
-}
-#endif
-
-#endif	/* _SYS_NXGE_NXGE_TXC_HW_H */
--- a/usr/src/uts/sun4v/sys/nxge/nxge_txdma.h	Mon Mar 19 18:02:35 2007 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,304 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License (the "License").
- * You may not use this file except in compliance with the License.
- *
- * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
- * or http://www.opensolaris.org/os/licensing.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information: Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- */
-/*
- * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
- * Use is subject to license terms.
- */
-
-#ifndef	_SYS_NXGE_NXGE_TXDMA_H
-#define	_SYS_NXGE_NXGE_TXDMA_H
-
-#pragma ident	"%Z%%M%	%I%	%E% SMI"
-
-#ifdef	__cplusplus
-extern "C" {
-#endif
-
-#include <sys/nxge/nxge_txdma_hw.h>
-#include <npi_txdma.h>
-
-#define	TXDMA_PORT_BITMAP(nxgep)		(nxgep->pt_config.tx_dma_map)
-
-#define	TXDMA_RECLAIM_PENDING_DEFAULT		64
-#define	TX_FULL_MARK				3
-
-/*
- * Transmit load balancing definitions.
- */
-#define	NXGE_TX_LB_TCPUDP			0	/* default policy */
-#define	NXGE_TX_LB_HASH				1	/* from the hint data */
-#define	NXGE_TX_LB_DEST_MAC			2	/* Dest. MAC */
-
-/*
- * Descriptor ring empty:
- *		(1) head index is equal to tail index.
- *		(2) wrapped around bits are the same.
- * Descriptor ring full:
- *		(1) head index is equal to tail index.
- *		(2) wrapped around bits are different.
- *
- */
-#define	TXDMA_RING_EMPTY(head, head_wrap, tail, tail_wrap)	\
-	((head == tail && head_wrap == tail_wrap) ? B_TRUE : B_FALSE)
-
-#define	TXDMA_RING_FULL(head, head_wrap, tail, tail_wrap)	\
-	((head == tail && head_wrap != tail_wrap) ? B_TRUE : B_FALSE)
-
-#define	TXDMA_DESC_NEXT_INDEX(index, entries, wrap_mask) \
-			((index + entries) & wrap_mask)
-
-#define	TXDMA_DRR_WEIGHT_DEFAULT	0x001f
-
-typedef struct _tx_msg_t {
-	nxge_os_block_mv_t 	flags;		/* DMA, BCOPY, DVMA (?) */
-	nxge_os_dma_common_t	buf_dma;	/* premapped buffer blocks */
-	nxge_os_dma_handle_t	buf_dma_handle; /* premapped buffer handle */
-	nxge_os_dma_handle_t 	dma_handle;	/* DMA handle for normal send */
-	nxge_os_dma_handle_t 	dvma_handle;	/* Fast DVMA  handle */
-
-	p_mblk_t 		tx_message;
-	uint32_t 		tx_msg_size;
-	size_t			bytes_used;
-	int			head;
-	int			tail;
-} tx_msg_t, *p_tx_msg_t;
-
-/*
- * TX  Statistics.
- */
-typedef struct _nxge_tx_ring_stats_t {
-	uint64_t	opackets;
-	uint64_t	obytes;
-	uint64_t	oerrors;
-
-	uint32_t	tx_inits;
-	uint32_t	tx_no_buf;
-
-	uint32_t		mbox_err;
-	uint32_t		pkt_size_err;
-	uint32_t 		tx_ring_oflow;
-	uint32_t 		pre_buf_par_err;
-	uint32_t 		nack_pref;
-	uint32_t 		nack_pkt_rd;
-	uint32_t 		conf_part_err;
-	uint32_t 		pkt_part_err;
-	uint32_t		tx_starts;
-	uint32_t		tx_nocanput;
-	uint32_t		tx_msgdup_fail;
-	uint32_t		tx_allocb_fail;
-	uint32_t		tx_no_desc;
-	uint32_t		tx_dma_bind_fail;
-	uint32_t		tx_uflo;
-
-	uint32_t		tx_hdr_pkts;
-	uint32_t		tx_ddi_pkts;
-	uint32_t		tx_dvma_pkts;
-
-	uint32_t		tx_max_pend;
-	uint32_t		tx_jumbo_pkts;
-
-	txdma_ring_errlog_t	errlog;
-} nxge_tx_ring_stats_t, *p_nxge_tx_ring_stats_t;
-
-typedef struct _tx_ring_t {
-	nxge_os_dma_common_t	tdc_desc;
-	struct _nxge_t		*nxgep;
-	p_tx_msg_t 		tx_msg_ring;
-	uint32_t		tnblocks;
-	tx_rng_cfig_t		tx_ring_cfig;
-	tx_ring_hdl_t		tx_ring_hdl;
-	tx_ring_kick_t		tx_ring_kick;
-	tx_cs_t			tx_cs;
-	tx_dma_ent_msk_t	tx_evmask;
-	txdma_mbh_t		tx_mbox_mbh;
-	txdma_mbl_t		tx_mbox_mbl;
-	log_page_vld_t		page_valid;
-	log_page_mask_t		page_mask_1;
-	log_page_mask_t		page_mask_2;
-	log_page_value_t	page_value_1;
-	log_page_value_t	page_value_2;
-	log_page_relo_t		page_reloc_1;
-	log_page_relo_t		page_reloc_2;
-	log_page_hdl_t		page_hdl;
-	txc_dma_max_burst_t	max_burst;
-	boolean_t		cfg_set;
-	uint32_t		tx_ring_state;
-
-	nxge_os_mutex_t		lock;
-	uint16_t 		index;
-	uint16_t		tdc;
-	struct nxge_tdc_cfg	*tdc_p;
-	uint_t 			tx_ring_size;
-	uint32_t 		num_chunks;
-
-	uint_t 			tx_wrap_mask;
-	uint_t 			rd_index;
-	uint_t 			wr_index;
-	boolean_t		wr_index_wrap;
-	uint_t 			head_index;
-	boolean_t		head_wrap;
-	tx_ring_hdl_t		ring_head;
-	tx_ring_kick_t		ring_kick_tail;
-	txdma_mailbox_t		tx_mbox;
-
-	uint_t 			descs_pending;
-	boolean_t 		queueing;
-
-	nxge_os_mutex_t		sq_lock;
-
-	p_mblk_t 		head;
-	p_mblk_t 		tail;
-
-	uint16_t		ldg_group_id;
-	p_nxge_tx_ring_stats_t tdc_stats;
-
-	nxge_os_mutex_t 	dvma_lock;
-	uint_t 			dvma_wr_index;
-	uint_t 			dvma_rd_index;
-	uint_t 			dvma_pending;
-	uint_t 			dvma_available;
-	uint_t 			dvma_wrap_mask;
-
-	nxge_os_dma_handle_t 	*dvma_ring;
-
-#if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
-	uint64_t		hv_tx_buf_base_ioaddr_pp;
-	uint64_t		hv_tx_buf_ioaddr_size;
-	uint64_t		hv_tx_cntl_base_ioaddr_pp;
-	uint64_t		hv_tx_cntl_ioaddr_size;
-	boolean_t		hv_set;
-#endif
-} tx_ring_t, *p_tx_ring_t;
-
-
-/* Transmit Mailbox */
-typedef struct _tx_mbox_t {
-	nxge_os_mutex_t 	lock;
-	uint16_t		index;
-	struct _nxge_t		*nxgep;
-	uint16_t		tdc;
-	nxge_os_dma_common_t	tx_mbox;
-	txdma_mbl_t		tx_mbox_l;
-	txdma_mbh_t		tx_mbox_h;
-} tx_mbox_t, *p_tx_mbox_t;
-
-typedef struct _tx_rings_t {
-	p_tx_ring_t 		*rings;
-	boolean_t		txdesc_allocated;
-	uint32_t		ndmas;
-	nxge_os_dma_common_t	tdc_dma;
-	nxge_os_dma_common_t	tdc_mbox;
-} tx_rings_t, *p_tx_rings_t;
-
-
-#if defined(_KERNEL) || (defined(COSIM) && !defined(IODIAG))
-
-typedef struct _tx_buf_rings_t {
-	struct _tx_buf_ring_t 	*txbuf_rings;
-	boolean_t		txbuf_allocated;
-} tx_buf_rings_t, *p_tx_buf_rings_t;
-
-#endif
-
-typedef struct _tx_mbox_areas_t {
-	p_tx_mbox_t 		*txmbox_areas_p;
-	boolean_t		txmbox_allocated;
-} tx_mbox_areas_t, *p_tx_mbox_areas_t;
-
-typedef struct _tx_param_t {
-	nxge_logical_page_t tx_logical_pages[NXGE_MAX_LOGICAL_PAGES];
-} tx_param_t, *p_tx_param_t;
-
-typedef struct _tx_params {
-	struct _tx_param_t 	*tx_param_p;
-} tx_params_t, *p_tx_params_t;
-
-/*
- * Global register definitions per chip and they are initialized
- * using the function zero control registers.
- * .
- */
-typedef struct _txdma_globals {
-	boolean_t		mode32;
-} txdma_globals_t, *p_txdma_globals;
-
-
-#if	defined(SOLARIS) && (defined(_KERNEL) || \
-	(defined(COSIM) && !defined(IODIAG)))
-
-/*
- * Transmit prototypes.
- */
-nxge_status_t nxge_init_txdma_channels(p_nxge_t);
-void nxge_uninit_txdma_channels(p_nxge_t);
-void nxge_setup_dma_common(p_nxge_dma_common_t, p_nxge_dma_common_t,
-		uint32_t, uint32_t);
-nxge_status_t nxge_reset_txdma_channel(p_nxge_t, uint16_t,
-	uint64_t);
-nxge_status_t nxge_init_txdma_channel_event_mask(p_nxge_t,
-	uint16_t, p_tx_dma_ent_msk_t);
-nxge_status_t nxge_init_txdma_channel_cntl_stat(p_nxge_t,
-	uint16_t, uint64_t);
-nxge_status_t nxge_enable_txdma_channel(p_nxge_t, uint16_t,
-	p_tx_ring_t, p_tx_mbox_t);
-
-p_mblk_t nxge_tx_pkt_header_reserve(p_mblk_t, uint8_t *);
-int nxge_tx_pkt_nmblocks(p_mblk_t, int *);
-boolean_t nxge_txdma_reclaim(p_nxge_t, p_tx_ring_t, int);
-
-void nxge_fill_tx_hdr(p_mblk_t, boolean_t, boolean_t,
-	int, uint8_t, p_tx_pkt_hdr_all_t);
-
-nxge_status_t nxge_txdma_hw_mode(p_nxge_t, boolean_t);
-void nxge_hw_start_tx(p_nxge_t);
-void nxge_txdma_stop(p_nxge_t);
-void nxge_txdma_stop_start(p_nxge_t);
-void nxge_fixup_txdma_rings(p_nxge_t);
-void nxge_txdma_hw_kick(p_nxge_t);
-void nxge_txdma_fix_channel(p_nxge_t, uint16_t);
-void nxge_txdma_fixup_channel(p_nxge_t, p_tx_ring_t,
-	uint16_t);
-void nxge_txdma_hw_kick_channel(p_nxge_t, p_tx_ring_t,
-	uint16_t);
-
-void nxge_txdma_regs_dump(p_nxge_t, int);
-void nxge_txdma_regs_dump_channels(p_nxge_t);
-
-void nxge_check_tx_hang(p_nxge_t);
-void nxge_fixup_hung_txdma_rings(p_nxge_t);
-void nxge_txdma_fix_hung_channel(p_nxge_t, uint16_t);
-void nxge_txdma_fixup_hung_channel(p_nxge_t, p_tx_ring_t,
-	uint16_t);
-
-void nxge_reclaim_rings(p_nxge_t);
-int nxge_txdma_channel_hung(p_nxge_t,
-	p_tx_ring_t tx_ring_p, uint16_t);
-int nxge_txdma_hung(p_nxge_t);
-int nxge_txdma_stop_inj_err(p_nxge_t, int);
-void nxge_txdma_inject_err(p_nxge_t, uint32_t, uint8_t);
-
-#endif
-
-#ifdef	__cplusplus
-}
-#endif
-
-#endif	/* _SYS_NXGE_NXGE_TXDMA_H */
--- a/usr/src/uts/sun4v/sys/nxge/nxge_txdma_hw.h	Mon Mar 19 18:02:35 2007 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,1031 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License (the "License").
- * You may not use this file except in compliance with the License.
- *
- * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
- * or http://www.opensolaris.org/os/licensing.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information: Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- */
-/*
- * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
- * Use is subject to license terms.
- */
-
-#ifndef	_SYS_NXGE_NXGE_TXDMA_HW_H
-#define	_SYS_NXGE_NXGE_TXDMA_HW_H
-
-#pragma ident	"%Z%%M%	%I%	%E% SMI"
-
-#ifdef	__cplusplus
-extern "C" {
-#endif
-
-#include <nxge_defs.h>
-#include <nxge_hw.h>
-
-#if !defined(_BIG_ENDIAN)
-#define	SWAP(X)	(X)
-#else
-#define	SWAP(X)   \
-	(((X >> 32) & 0x00000000ffffffff) | \
-	((X << 32) & 0xffffffff00000000))
-#endif
-
-/*
- * Partitioning Suport: same as those defined for the RX
- */
-/*
- * TDC: Partitioning Support
- *	(Each of the following registers is for each TDC)
- */
-#define	TX_LOG_REG_SIZE			512
-#define	TX_LOG_DMA_OFFSET(channel)	(channel * TX_LOG_REG_SIZE)
-
-#define	TX_LOG_PAGE_VLD_REG		(FZC_DMC + 0x40000)
-#define	TX_LOG_PAGE_MASK1_REG		(FZC_DMC + 0x40008)
-#define	TX_LOG_PAGE_VAL1_REG		(FZC_DMC + 0x40010)
-#define	TX_LOG_PAGE_MASK2_REG		(FZC_DMC + 0x40018)
-#define	TX_LOG_PAGE_VAL2_REG		(FZC_DMC + 0x40020)
-#define	TX_LOG_PAGE_RELO1_REG		(FZC_DMC + 0x40028)
-#define	TX_LOG_PAGE_RELO2_REG		(FZC_DMC + 0x40030)
-#define	TX_LOG_PAGE_HDL_REG		(FZC_DMC + 0x40038)
-
-/* Transmit Addressing Mode: Set to 1 to select 32-bit addressing mode */
-#define	TX_ADDR_MD_REG			(FZC_DMC + 0x45000)
-
-#define	TX_ADDR_MD_SHIFT	0			/* bits 0:0 */
-#define	TX_ADDR_MD_SET_32	0x0000000000000001ULL	/* 1 to select 32 bit */
-#define	TX_ADDR_MD_MASK		0x0000000000000001ULL
-
-typedef union _tx_addr_md_t {
-	uint64_t value;
-	struct {
-#if defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t res1_1:31;
-			uint32_t mode32:1;
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t mode32:1;
-			uint32_t res1_1:31;
-#endif
-		} ldw;
-#if !defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-	} bits;
-} tx_addr_md_t, *p_tx_addr_md_t;
-
-/* Transmit Packet Descriptor Structure */
-#define	TX_PKT_DESC_SAD_SHIFT		0		/* bits 43:0 */
-#define	TX_PKT_DESC_SAD_MASK		0x00000FFFFFFFFFFFULL
-#define	TX_PKT_DESC_TR_LEN_SHIFT	44		/* bits 56:44 */
-#define	TX_PKT_DESC_TR_LEN_MASK		0x01FFF00000000000ULL
-#define	TX_PKT_DESC_NUM_PTR_SHIFT	58		/* bits 61:58 */
-#define	TX_PKT_DESC_NUM_PTR_MASK	0x3C00000000000000ULL
-#define	TX_PKT_DESC_MARK_SHIFT		62		/* bit 62 */
-#define	TX_PKT_DESC_MARK		0x4000000000000000ULL
-#define	TX_PKT_DESC_MARK_MASK		0x4000000000000000ULL
-#define	TX_PKT_DESC_SOP_SHIFT		63		/* bit 63 */
-#define	TX_PKT_DESC_SOP			0x8000000000000000ULL
-#define	TX_PKT_DESC_SOP_MASK		0x8000000000000000ULL
-
-typedef union _tx_desc_t {
-	uint64_t value;
-	struct {
-#if defined(_BIG_ENDIAN)
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t sop:1;
-			uint32_t mark:1;
-			uint32_t num_ptr:4;
-			uint32_t res1:1;
-			uint32_t tr_len:13;
-			uint32_t sad:12;
-
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t sad:12;
-			uint32_t tr_len:13;
-			uint32_t res1:1;
-			uint32_t num_ptr:4;
-			uint32_t mark:1;
-			uint32_t sop:1;
-
-#endif
-		} hdw;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t sad:32;
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t sad:32;
-#endif
-		} ldw;
-#if !defined(_BIG_ENDIAN)
-		struct {
-
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t sop:1;
-			uint32_t mark:1;
-			uint32_t num_ptr:4;
-			uint32_t res1:1;
-			uint32_t tr_len:13;
-			uint32_t sad:12;
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t sad:12;
-			uint32_t tr_len:13;
-			uint32_t res1:1;
-			uint32_t num_ptr:4;
-			uint32_t mark:1;
-			uint32_t sop:1;
-#endif
-		} hdw;
-#endif
-	} bits;
-} tx_desc_t, *p_tx_desc_t;
-
-
-/* Transmit Ring Configuration (24 Channels) */
-#define	TX_RNG_CFIG_REG			(DMC + 0x40000)
-#if OLD
-#define	TX_RING_HDH_REG			(DMC + 0x40008)
-#endif
-#define	TX_RING_HDL_REG			(DMC + 0x40010)
-#define	TX_RING_KICK_REG		(DMC + 0x40018)
-#define	TX_ENT_MSK_REG			(DMC + 0x40020)
-#define	TX_CS_REG			(DMC + 0x40028)
-#define	TXDMA_MBH_REG			(DMC + 0x40030)
-#define	TXDMA_MBL_REG			(DMC + 0x40038)
-#define	TX_DMA_PRE_ST_REG		(DMC + 0x40040)
-#define	TX_RNG_ERR_LOGH_REG		(DMC + 0x40048)
-#define	TX_RNG_ERR_LOGL_REG		(DMC + 0x40050)
-#define	TDMC_INTR_DBG_REG		(DMC + 0x40060)
-#define	TX_CS_DBG_REG			(DMC + 0x40068)
-
-/* Transmit Ring Configuration */
-#define	TX_RNG_CFIG_STADDR_SHIFT	6			/* bits 18:6 */
-#define	TX_RNG_CFIG_STADDR_MASK		0x000000000007FFC0ULL
-#define	TX_RNG_CFIG_ADDR_MASK		0x00000FFFFFFFFFC0ULL
-#define	TX_RNG_CFIG_STADDR_BASE_SHIFT	19			/* bits 43:19 */
-#define	TX_RNG_CFIG_STADDR_BASE_MASK	0x00000FFFFFF80000ULL
-#define	TX_RNG_CFIG_LEN_SHIFT		48			/* bits 60:48 */
-#define	TX_RNG_CFIG_LEN_MASK		0xFFF8000000000000ULL
-
-#define	TX_RNG_HEAD_TAIL_SHIFT		3
-#define	TX_RNG_HEAD_TAIL_WRAP_SHIFT	19
-
-typedef union _tx_rng_cfig_t {
-	uint64_t value;
-	struct {
-#if defined(_BIG_ENDIAN)
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t res2:3;
-			uint32_t len:13;
-			uint32_t res1:4;
-			uint32_t staddr_base:12;
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t staddr_base:12;
-			uint32_t res1:4;
-			uint32_t len:13;
-			uint32_t res2:3;
-#endif
-		} hdw;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t staddr_base:13;
-			uint32_t staddr:13;
-			uint32_t res2:6;
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t res2:6;
-			uint32_t staddr:13;
-			uint32_t staddr_base:13;
-#endif
-		} ldw;
-#ifndef _BIG_ENDIAN
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t res2:3;
-			uint32_t len:13;
-			uint32_t res1:4;
-			uint32_t staddr_base:12;
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t staddr_base:12;
-			uint32_t res1:4;
-			uint32_t len:13;
-			uint32_t res2:3;
-#endif
-		} hdw;
-#endif
-	} bits;
-} tx_rng_cfig_t, *p_tx_rng_cfig_t;
-
-/* Transmit Ring Head Low */
-#define	TX_RING_HDL_SHIFT		3			/* bit 31:3 */
-#define	TX_RING_HDL_MASK		0x00000000FFFFFFF8ULL
-
-typedef union _tx_ring_hdl_t {
-	uint64_t value;
-	struct {
-#if defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t res0:12;
-			uint32_t wrap:1;
-			uint32_t head:16;
-			uint32_t res2:3;
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t res2:3;
-			uint32_t head:16;
-			uint32_t wrap:1;
-			uint32_t res0:12;
-#endif
-		} ldw;
-#ifndef _BIG_ENDIAN
-		uint32_t hdw;
-#endif
-	} bits;
-} tx_ring_hdl_t, *p_tx_ring_hdl_t;
-
-/* Transmit Ring Kick */
-#define	TX_RING_KICK_TAIL_SHIFT		3			/* bit 43:3 */
-#define	TX_RING_KICK_TAIL_MASK		0x000000FFFFFFFFFF8ULL
-
-typedef union _tx_ring_kick_t {
-	uint64_t value;
-	struct {
-#ifdef	_BIG_ENDIAN
-		uint32_t hdw;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t res0:12;
-			uint32_t wrap:1;
-			uint32_t tail:16;
-			uint32_t res2:3;
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t res2:3;
-			uint32_t tail:16;
-			uint32_t wrap:1;
-			uint32_t res0:12;
-#endif
-		} ldw;
-#ifndef _BIG_ENDIAN
-		uint32_t hdw;
-#endif
-	} bits;
-} tx_ring_kick_t, *p_tx_ring_kick_t;
-
-/* Transmit Event Mask (DMC + 0x40020) */
-#define	TX_ENT_MSK_PKT_PRT_ERR_SHIFT		0	/* bit 0: 0 to flag */
-#define	TX_ENT_MSK_PKT_PRT_ERR_MASK		0x0000000000000001ULL
-#define	TX_ENT_MSK_CONF_PART_ERR_SHIFT		1	/* bit 1: 0 to flag */
-#define	TX_ENT_MSK_CONF_PART_ERR_MASK		0x0000000000000002ULL
-#define	TX_ENT_MSK_NACK_PKT_RD_SHIFT		2	/* bit 2: 0 to flag */
-#define	TX_ENT_MSK_NACK_PKT_RD_MASK		0x0000000000000004ULL
-#define	TX_ENT_MSK_NACK_PREF_SHIFT		3	/* bit 3: 0 to flag */
-#define	TX_ENT_MSK_NACK_PREF_MASK		0x0000000000000008ULL
-#define	TX_ENT_MSK_PREF_BUF_ECC_ERR_SHIFT	4	/* bit 4: 0 to flag */
-#define	TX_ENT_MSK_PREF_BUF_ECC_ERR_MASK	0x0000000000000010ULL
-#define	TX_ENT_MSK_TX_RING_OFLOW_SHIFT		5	/* bit 5: 0 to flag */
-#define	TX_ENT_MSK_TX_RING_OFLOW_MASK		0x0000000000000020ULL
-#define	TX_ENT_MSK_PKT_SIZE_ERR_SHIFT		6	/* bit 6: 0 to flag */
-#define	TX_ENT_MSK_PKT_SIZE_ERR_MASK		0x0000000000000040ULL
-#define	TX_ENT_MSK_MBOX_ERR_SHIFT		7	/* bit 7: 0 to flag */
-#define	TX_ENT_MSK_MBOX_ERR_MASK		0x0000000000000080ULL
-#define	TX_ENT_MSK_MK_SHIFT			15	/* bit 15: 0 to flag */
-#define	TX_ENT_MSK_MK_MASK			0x0000000000008000ULL
-#define	TX_ENT_MSK_MK_ALL		(TX_ENT_MSK_PKT_PRT_ERR_MASK | \
-					TX_ENT_MSK_CONF_PART_ERR_MASK |	\
-					TX_ENT_MSK_NACK_PKT_RD_MASK |	\
-					TX_ENT_MSK_NACK_PREF_MASK |	\
-					TX_ENT_MSK_PREF_BUF_ECC_ERR_MASK | \
-					TX_ENT_MSK_TX_RING_OFLOW_MASK |	\
-					TX_ENT_MSK_PKT_SIZE_ERR_MASK | \
-					TX_ENT_MSK_MBOX_ERR_MASK | \
-					TX_ENT_MSK_MK_MASK)
-
-
-typedef union _tx_dma_ent_msk_t {
-	uint64_t value;
-	struct {
-#ifdef	_BIG_ENDIAN
-		uint32_t hdw;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t res1_1:16;
-			uint32_t mk:1;
-			uint32_t res2:7;
-			uint32_t mbox_err:1;
-			uint32_t pkt_size_err:1;
-			uint32_t tx_ring_oflow:1;
-			uint32_t pref_buf_ecc_err:1;
-			uint32_t nack_pref:1;
-			uint32_t nack_pkt_rd:1;
-			uint32_t conf_part_err:1;
-			uint32_t pkt_prt_err:1;
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t pkt_prt_err:1;
-			uint32_t conf_part_err:1;
-			uint32_t nack_pkt_rd:1;
-			uint32_t nack_pref:1;
-			uint32_t pref_buf_ecc_err:1;
-			uint32_t tx_ring_oflow:1;
-			uint32_t pkt_size_err:1;
-			uint32_t mbox_err:1;
-			uint32_t res2:7;
-			uint32_t mk:1;
-			uint32_t res1_1:16;
-#endif
-		} ldw;
-#ifndef _BIG_ENDIAN
-		uint32_t hdw;
-#endif
-	} bits;
-} tx_dma_ent_msk_t, *p_tx_dma_ent_msk_t;
-
-
-/* Transmit Control and Status  (DMC + 0x40028) */
-#define	TX_CS_PKT_PRT_ERR_SHIFT			0	/* RO, bit 0 */
-#define	TX_CS_PKT_PRT_ERR_MASK			0x0000000000000001ULL
-#define	TX_CS_CONF_PART_ERR_SHIF		1	/* RO, bit 1 */
-#define	TX_CS_CONF_PART_ERR_MASK		0x0000000000000002ULL
-#define	TX_CS_NACK_PKT_RD_SHIFT			2	/* RO, bit 2 */
-#define	TX_CS_NACK_PKT_RD_MASK			0x0000000000000004ULL
-#define	TX_CS_PREF_SHIFT			3	/* RO, bit 3 */
-#define	TX_CS_PREF_MASK				0x0000000000000008ULL
-#define	TX_CS_PREF_BUF_PAR_ERR_SHIFT		4	/* RO, bit 4 */
-#define	TX_CS_PREF_BUF_PAR_ERR_MASK		0x0000000000000010ULL
-#define	TX_CS_RING_OFLOW_SHIFT			5	/* RO, bit 5 */
-#define	TX_CS_RING_OFLOW_MASK			0x0000000000000020ULL
-#define	TX_CS_PKT_SIZE_ERR_SHIFT		6	/* RW, bit 6 */
-#define	TX_CS_PKT_SIZE_ERR_MASK			0x0000000000000040ULL
-#define	TX_CS_MMK_SHIFT				14	/* RC, bit 14 */
-#define	TX_CS_MMK_MASK				0x0000000000004000ULL
-#define	TX_CS_MK_SHIFT				15	/* RCW1C, bit 15 */
-#define	TX_CS_MK_MASK				0x0000000000008000ULL
-#define	TX_CS_SNG_SHIFT				27	/* RO, bit 27 */
-#define	TX_CS_SNG_MASK				0x0000000008000000ULL
-#define	TX_CS_STOP_N_GO_SHIFT			28	/* RW, bit 28 */
-#define	TX_CS_STOP_N_GO_MASK			0x0000000010000000ULL
-#define	TX_CS_MB_SHIFT				29	/* RO, bit 29 */
-#define	TX_CS_MB_MASK				0x0000000020000000ULL
-#define	TX_CS_RST_STATE_SHIFT			30	/* Rw, bit 30 */
-#define	TX_CS_RST_STATE_MASK			0x0000000040000000ULL
-#define	TX_CS_RST_SHIFT				31	/* Rw, bit 31 */
-#define	TX_CS_RST_MASK				0x0000000080000000ULL
-#define	TX_CS_LASTMASK_SHIFT			32	/* RW, bit 43:32 */
-#define	TX_CS_LASTMARK_MASK			0x00000FFF00000000ULL
-#define	TX_CS_PKT_CNT_SHIFT			48	/* RW, bit 59:48 */
-#define	TX_CS_PKT_CNT_MASK			0x0FFF000000000000ULL
-
-/* Trasnmit Control and Status */
-typedef union _tx_cs_t {
-	uint64_t value;
-	struct {
-#ifdef	_BIG_ENDIAN
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t res1:4;
-			uint32_t pkt_cnt:12;
-			uint32_t res2:4;
-			uint32_t lastmark:12;
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t lastmark:12;
-			uint32_t res2:4;
-			uint32_t pkt_cnt:12;
-			uint32_t res1:4;
-#endif
-		} hdw;
-
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t rst:1;
-			uint32_t rst_state:1;
-			uint32_t mb:1;
-			uint32_t stop_n_go:1;
-			uint32_t sng_state:1;
-			uint32_t res1:11;
-			uint32_t mk:1;
-			uint32_t mmk:1;
-			uint32_t res2:6;
-			uint32_t mbox_err:1;
-			uint32_t pkt_size_err:1;
-			uint32_t tx_ring_oflow:1;
-			uint32_t pref_buf_par_err:1;
-			uint32_t nack_pref:1;
-			uint32_t nack_pkt_rd:1;
-			uint32_t conf_part_err:1;
-			uint32_t pkt_prt_err:1;
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t pkt_prt_err:1;
-			uint32_t conf_part_err:1;
-			uint32_t nack_pkt_rd:1;
-			uint32_t nack_pref:1;
-			uint32_t pref_buf_par_err:1;
-			uint32_t tx_ring_oflow:1;
-			uint32_t pkt_size_err:1;
-			uint32_t mbox_err:1;
-			uint32_t res2:6;
-			uint32_t mmk:1;
-			uint32_t mk:1;
-			uint32_t res1:11;
-			uint32_t sng_state:1;
-			uint32_t stop_n_go:1;
-			uint32_t mb:1;
-			uint32_t rst_state:1;
-			uint32_t rst:1;
-#endif
-		} ldw;
-#ifndef _BIG_ENDIAN
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t res1:4;
-			uint32_t pkt_cnt:12;
-			uint32_t res2:4;
-			uint32_t lastmark:12;
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t lastmark:12;
-			uint32_t res2:4;
-			uint32_t pkt_cnt:12;
-			uint32_t res1:4;
-#endif
-	} hdw;
-
-#endif
-	} bits;
-} tx_cs_t, *p_tx_cs_t;
-
-/* Trasnmit Mailbox High (DMC + 0x40030) */
-#define	TXDMA_MBH_SHIFT			0	/* bit 11:0 */
-#define	TXDMA_MBH_ADDR_SHIFT		32	/* bit 43:32 */
-#define	TXDMA_MBH_MASK			0x0000000000000FFFULL
-
-typedef union _txdma_mbh_t {
-	uint64_t value;
-	struct {
-#ifdef	_BIG_ENDIAN
-		uint32_t hdw;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t res1_1:20;
-			uint32_t mbaddr:12;
-
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t mbaddr:12;
-			uint32_t res1_1:20;
-#endif
-		} ldw;
-#ifndef _BIG_ENDIAN
-		uint32_t hdw;
-#endif
-	} bits;
-} txdma_mbh_t, *p_txdma_mbh_t;
-
-
-/* Trasnmit Mailbox Low (DMC + 0x40038) */
-#define	TXDMA_MBL_SHIFT			6	/* bit 31:6 */
-#define	TXDMA_MBL_MASK			0x00000000FFFFFFC0ULL
-
-typedef union _txdma_mbl_t {
-	uint64_t value;
-	struct {
-#ifdef	_BIG_ENDIAN
-		uint32_t hdw;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t mbaddr:26;
-			uint32_t res2:6;
-
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t res2:6;
-			uint32_t mbaddr:26;
-#endif
-		} ldw;
-#ifndef _BIG_ENDIAN
-		uint32_t hdw;
-#endif
-	} bits;
-} txdma_mbl_t, *p_txdma_mbl_t;
-
-/* Trasnmit Prefetch State High (DMC + 0x40040) */
-#define	TX_DMA_PREF_ST_SHIFT		0	/* bit 5:0 */
-#define	TX_DMA_PREF_ST_MASK		0x000000000000003FULL
-
-typedef union _tx_dma_pre_st_t {
-	uint64_t value;
-	struct {
-#ifdef	_BIG_ENDIAN
-		uint32_t hdw;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t res1_1:13;
-			uint32_t shadow_hd:19;
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t shadow_hd:19;
-			uint32_t res1_1:13;
-#endif
-		} ldw;
-#ifndef _BIG_ENDIAN
-		uint32_t hdw;
-#endif
-	} bits;
-} tx_dma_pre_st_t, *p_tx_dma_pre_st_t;
-
-/* Trasnmit Ring Error Log High (DMC + 0x40048) */
-#define	TX_RNG_ERR_LOGH_ERR_ADDR_SHIFT		0	/* RO bit 11:0 */
-#define	TX_RNG_ERR_LOGH_ERR_ADDR_MASK		0x0000000000000FFFULL
-#define	TX_RNG_ERR_LOGH_ADDR_SHIFT		32
-#define	TX_RNG_ERR_LOGH_ERRCODE_SHIFT		26	/* RO bit 29:26 */
-#define	TX_RNG_ERR_LOGH_ERRCODE_MASK		0x000000003C000000ULL
-#define	TX_RNG_ERR_LOGH_MERR_SHIFT		30	/* RO bit 30 */
-#define	TX_RNG_ERR_LOGH_MERR_MASK		0x0000000040000000ULL
-#define	TX_RNG_ERR_LOGH_ERR_SHIFT		31	/* RO bit 31 */
-#define	TX_RNG_ERR_LOGH_ERR_MASK		0x0000000080000000ULL
-
-/* Transmit Ring Error codes */
-#define	TXDMA_RING_PKT_PRT_ERR			0
-#define	TXDMA_RING_CONF_PART_ERR		0x01
-#define	TXDMA_RING_NACK_PKT_ERR			0x02
-#define	TXDMA_RING_NACK_PREF_ERR		0x03
-#define	TXDMA_RING_PREF_BUF_PAR_ERR		0x04
-#define	TXDMA_RING_TX_RING_OFLOW_ERR		0x05
-#define	TXDMA_RING_PKT_SIZE_ERR			0x06
-
-typedef union _tx_rng_err_logh_t {
-	uint64_t value;
-	struct {
-#ifdef	_BIG_ENDIAN
-		uint32_t hdw;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t err:1;
-			uint32_t merr:1;
-			uint32_t errcode:4;
-			uint32_t res2:14;
-			uint32_t err_addr:12;
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t err_addr:12;
-			uint32_t res2:14;
-			uint32_t errcode:4;
-			uint32_t merr:1;
-			uint32_t err:1;
-
-#endif
-		} ldw;
-#ifndef _BIG_ENDIAN
-		uint32_t hdw;
-#endif
-	} bits;
-} tx_rng_err_logh_t, *p_tx_rng_err_logh_t;
-
-
-/* Trasnmit Ring Error Log Log (DMC + 0x40050) */
-#define	TX_RNG_ERR_LOGL_ERR_ADDR_SHIFT		0	/* RO bit 31:0 */
-#define	TX_RNG_ERR_LOGL_ERR_ADDR_MASK		0x00000000FFFFFFFFULL
-
-typedef union _tx_rng_err_logl_t {
-	uint64_t value;
-	struct {
-#ifdef	_BIG_ENDIAN
-		uint32_t hdw;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t err_addr:32;
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t err_addr:32;
-
-#endif
-		} ldw;
-#ifndef _BIG_ENDIAN
-		uint32_t hdw;
-#endif
-	} bits;
-} tx_rng_err_logl_t, *p_tx_rng_err_logl_t;
-
-/*
- * TDMC_INTR_RBG_REG (DMC + 0x40060)
- */
-typedef union _tdmc_intr_dbg_t {
-	uint64_t value;
-	struct {
-#ifdef	_BIG_ENDIAN
-		uint32_t hdw;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t res:16;
-			uint32_t mk:1;
-			uint32_t rsvd:7;
-			uint32_t mbox_err:1;
-			uint32_t pkt_size_err:1;
-			uint32_t tx_ring_oflow:1;
-			uint32_t pref_buf_par_err:1;
-			uint32_t nack_pref:1;
-			uint32_t nack_pkt_rd:1;
-			uint32_t conf_part_err:1;
-			uint32_t pkt_part_err:1;
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t pkt_part_err:1;
-			uint32_t conf_part_err:1;
-			uint32_t nack_pkt_rd:1;
-			uint32_t nack_pref:1;
-			uint32_t pref_buf_par_err:1;
-			uint32_t tx_ring_oflow:1;
-			uint32_t pkt_size_err:1;
-			uint32_t mbox_err:1;
-			uint32_t rsvd:7;
-			uint32_t mk:1;
-			uint32_t res:16;
-#endif
-		} ldw;
-#ifndef _BIG_ENDIAN
-		uint32_t hdw;
-#endif
-	} bits;
-} tdmc_intr_dbg_t, *p_tdmc_intr_dbg_t;
-
-
-/*
- * TX_CS_DBG (DMC + 0x40068)
- */
-typedef union _tx_cs_dbg_t {
-	uint64_t value;
-	struct {
-#ifdef	_BIG_ENDIAN
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t res1:4;
-			uint32_t pkt_cnt:12;
-			uint32_t res2:16;
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t res2:16;
-			uint32_t pkt_cnt:12;
-			uint32_t res1:4;
-#endif
-		} hdw;
-
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t rsvd:32;
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t rsvd:32;
-
-#endif
-		} ldw;
-
-#ifndef _BIG_ENDIAN
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t res1:4;
-			uint32_t pkt_cnt:12;
-			uint32_t res2:16;
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t res2:16;
-			uint32_t pkt_cnt:12;
-			uint32_t res1:4;
-#endif
-	} hdw;
-
-#endif
-	} bits;
-} tx_cs_dbg_t, *p_tx_cs_dbg_t;
-
-#define	TXDMA_MAILBOX_BYTE_LENGTH		64
-#define	TXDMA_MAILBOX_UNUSED			24
-
-typedef struct _txdma_mailbox_t {
-	tx_cs_t			tx_cs;				/* 8 bytes */
-	tx_dma_pre_st_t		tx_dma_pre_st;			/* 8 bytes */
-	tx_ring_hdl_t		tx_ring_hdl;			/* 8 bytes */
-	tx_ring_kick_t		tx_ring_kick;			/* 8 bytes */
-	uint32_t		tx_rng_err_logh;		/* 4 bytes */
-	uint32_t		tx_rng_err_logl;		/* 4 bytes */
-	uint32_t		resv[TXDMA_MAILBOX_UNUSED];
-} txdma_mailbox_t, *p_txdma_mailbox_t;
-
-#if OLD
-/* Transmit Ring Scheduler (per port) */
-#define	TX_DMA_MAP_OFFSET(port)		(port * 8 + TX_DMA_MAP_REG)
-#define	TX_DMA_MAP_PORT_OFFSET(port)	(port * 8)
-#define	TX_DMA_MAP_REG			(FZC_DMC + 0x50000)
-#define	TX_DMA_MAP0_REG			(FZC_DMC + 0x50000)
-#define	TX_DMA_MAP1_REG			(FZC_DMC + 0x50008)
-#define	TX_DMA_MAP2_REG			(FZC_DMC + 0x50010)
-#define	TX_DMA_MAP3_REG			(FZC_DMC + 0x50018)
-
-#define	TX_DMA_MAP_SHIFT		0	/* RO bit 31:0 */
-#define	TX_DMA_MAPMASK			0x00000000FFFFFFFFULL
-
-typedef union _tx_dma_map_t {
-	uint64_t value;
-	struct {
-#ifdef	_BIG_ENDIAN
-		uint32_t hdw;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t bind:32;
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t bind:32;
-
-#endif
-		} ldw;
-#ifndef _BIG_ENDIAN
-		uint32_t hdw;
-#endif
-	} bits;
-} tx_dma_map_t, *p_tx_dma_map_t;
-#endif
-
-#if OLD
-/* Transmit Ring Scheduler: DRR Weight (32 Channels) */
-#define	DRR_WT_REG			(FZC_DMC + 0x51000)
-#define	DRR_WT_SHIFT			0	/* RO bit 19:0 */
-#define	DRR_WT_MASK			0x00000000000FFFFFULL
-
-#define	TXDMA_DRR_RNG_USE_OFFSET(channel)	(channel * 16)
-
-typedef union _drr_wt_t {
-	uint64_t value;
-	struct {
-#ifdef	_BIG_ENDIAN
-		uint32_t hdw;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t res1_1:12;
-			uint32_t wt:20;
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t wt:20;
-			uint32_t res1_1:12;
-#endif
-		} ldw;
-#ifndef _BIG_ENDIAN
-		uint32_t hdw;
-#endif
-	} bits;
-} drr_wt_t, *p_drr_wt_t;
-#endif
-
-#if OLD
-
-/* Performance Monitoring (32 Channels) */
-#define	TXRNG_USE_REG			(FZC_DMC + 0x51008)
-#define	TXRNG_USE_CNT_SHIFT		0	/* RO bit 26:0 */
-#define	TXRNG_USE_CNT_MASK		0x0000000007FFFFFFULL
-#define	TXRNG_USE_OFLOW_SHIFT		0	/* RO bit 27 */
-#define	TXRNG_USE_OFLOW_MASK		0x0000000008000000ULL
-
-typedef union _txrng_use_t {
-	uint64_t value;
-	struct {
-#ifdef	_BIG_ENDIAN
-		uint32_t hdw;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t res1_1:4;
-			uint32_t oflow:1;
-			uint32_t cnt:27;
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t cnt:27;
-			uint32_t oflow:1;
-			uint32_t res1_1:4;
-
-#endif
-		} ldw;
-#ifndef _BIG_ENDIAN
-		uint32_t hdw;
-#endif
-	} bits;
-} txrng_use_t, *p_txrng_use_t;
-
-#endif
-
-/*
- * Internal Transmit Packet Format (16 bytes)
- */
-#define	TX_PKT_HEADER_SIZE			16
-#define	TX_MAX_GATHER_POINTERS			15
-#define	TX_GATHER_POINTERS_THRESHOLD		8
-/*
- * There is bugs in the hardware
- * and max sfter len is changed from 4096 to 4076.
- *
- * Jumbo from 9500 to 9216
- */
-#define	TX_MAX_TRANSFER_LENGTH			4076
-#define	TX_JUMBO_MTU				9216
-
-#define	TX_PKT_HEADER_PAD_SHIFT			0	/* bit 2:0 */
-#define	TX_PKT_HEADER_PAD_MASK			0x0000000000000007ULL
-#define	TX_PKT_HEADER_TOT_XFER_LEN_SHIFT	16	/* bit 16:29 */
-#define	TX_PKT_HEADER_TOT_XFER_LEN_MASK		0x000000000000FFF8ULL
-#define	TX_PKT_HEADER_L4STUFF_SHIFT		32	/* bit 37:32 */
-#define	TX_PKT_HEADER_L4STUFF_MASK		0x0000003F00000000ULL
-#define	TX_PKT_HEADER_L4START_SHIFT		40	/* bit 45:40 */
-#define	TX_PKT_HEADER_L4START_MASK		0x00003F0000000000ULL
-#define	TX_PKT_HEADER_L3START_SHIFT		48	/* bit 45:40 */
-#define	TX_PKT_HEADER_IHL_SHIFT			52	/* bit 52 */
-#define	TX_PKT_HEADER_VLAN__SHIFT		56	/* bit 56 */
-#define	TX_PKT_HEADER_TCP_UDP_CRC32C_SHIFT	57	/* bit 57 */
-#define	TX_PKT_HEADER_LLC_SHIFT			57	/* bit 57 */
-#define	TX_PKT_HEADER_TCP_UDP_CRC32C_SET	0x0200000000000000ULL
-#define	TX_PKT_HEADER_TCP_UDP_CRC32C_MASK	0x0200000000000000ULL
-#define	TX_PKT_HEADER_L4_PROTO_OP_SHIFT		2	/* bit 59:58 */
-#define	TX_PKT_HEADER_L4_PROTO_OP_MASK		0x0C00000000000000ULL
-#define	TX_PKT_HEADER_V4_HDR_CS_SHIFT		60	/* bit 60 */
-#define	TX_PKT_HEADER_V4_HDR_CS_SET		0x1000000000000000ULL
-#define	TX_PKT_HEADER_V4_HDR_CS_MASK		0x1000000000000000ULL
-#define	TX_PKT_HEADER_IP_VER_SHIFT		61	/* bit 61 */
-#define	TX_PKT_HEADER_IP_VER_MASK		0x2000000000000000ULL
-#define	TX_PKT_HEADER_PKT_TYPE_SHIFT		62	/* bit 62 */
-#define	TX_PKT_HEADER_PKT_TYPE_MASK		0x4000000000000000ULL
-
-/* L4 Prototol Operations */
-#define	TX_PKT_L4_PROTO_OP_NOP			0x00
-#define	TX_PKT_L4_PROTO_OP_FULL_L4_CSUM		0x01
-#define	TX_PKT_L4_PROTO_OP_L4_PAYLOAD_CSUM	0x02
-#define	TX_PKT_L4_PROTO_OP_SCTP_CRC32		0x04
-
-/* Transmit Packet Types */
-#define	TX_PKT_PKT_TYPE_NOP			0x00
-#define	TX_PKT_PKT_TYPE_TCP			0x01
-#define	TX_PKT_PKT_TYPE_UDP			0x02
-#define	TX_PKT_PKT_TYPE_SCTP			0x03
-
-typedef union _tx_pkt_header_t {
-	uint64_t value;
-	struct {
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t pad:3;
-			uint32_t resv2:13;
-			uint32_t tot_xfer_len:14;
-			uint32_t resv1:2;
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t pad:3;
-			uint32_t resv2:13;
-			uint32_t tot_xfer_len:14;
-			uint32_t resv1:2;
-#endif
-		} ldw;
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t l4stuff:6;
-			uint32_t resv3:2;
-			uint32_t l4start:6;
-			uint32_t resv2:2;
-			uint32_t l3start:4;
-			uint32_t ihl:4;
-			uint32_t vlan:1;
-			uint32_t llc:1;
-			uint32_t res1:3;
-			uint32_t ip_ver:1;
-			uint32_t cksum_en_pkt_type:2;
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t l4stuff:6;
-			uint32_t resv3:2;
-			uint32_t l4start:6;
-			uint32_t resv2:2;
-			uint32_t l3start:4;
-			uint32_t ihl:4;
-			uint32_t vlan:1;
-			uint32_t llc:1;
-			uint32_t res1:3;
-			uint32_t ip_ver:1;
-			uint32_t cksum_en_pkt_type:2;
-#endif
-		} hdw;
-	} bits;
-} tx_pkt_header_t, *p_tx_pkt_header_t;
-
-typedef struct _tx_pkt_hdr_all_t {
-	tx_pkt_header_t		pkthdr;
-	uint64_t		reserved;
-} tx_pkt_hdr_all_t, *p_tx_pkt_hdr_all_t;
-
-/* Debug only registers */
-#define	TDMC_INJ_PAR_ERR_REG		(FZC_DMC + 0x45040)
-#define	TDMC_INJ_PAR_ERR_MASK		0x0000000000FFFFFFULL
-#define	TDMC_INJ_PAR_ERR_MASK_N2	0x000000000000FFFFULL
-
-typedef union _tdmc_inj_par_err_t {
-	uint64_t value;
-	struct {
-#ifdef	_BIG_ENDIAN
-		uint32_t hdw;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t rsvc:8;
-			uint32_t inject_parity_error:24;
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t inject_parity_error:24;
-			uint32_t rsvc:8;
-#endif
-		} ldw;
-#ifndef _BIG_ENDIAN
-		uint32_t hdw;
-#endif
-	} bits;
-} tdmc_inj_par_err_t, *p_tdmc_inj_par_err_t;
-
-typedef union _tdmc_inj_par_err_n2_t {
-	uint64_t value;
-	struct {
-#ifdef	_BIG_ENDIAN
-		uint32_t hdw;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t rsvc:16;
-			uint32_t inject_parity_error:16;
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t inject_parity_error:16;
-			uint32_t rsvc:16;
-#endif
-		} ldw;
-#ifndef _BIG_ENDIAN
-		uint32_t hdw;
-#endif
-	} bits;
-} tdmc_inj_par_err_n2_t, *p_tdmc_inj_par_err_n2_t;
-
-#define	TDMC_DBG_SEL_REG		(FZC_DMC + 0x45080)
-#define	TDMC_DBG_SEL_MASK		0x000000000000003FULL
-
-typedef union _tdmc_dbg_sel_t {
-	uint64_t value;
-	struct {
-#ifdef	_BIG_ENDIAN
-		uint32_t hdw;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t rsvc:26;
-			uint32_t dbg_sel:6;
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t dbg_sel:6;
-			uint32_t rsvc:26;
-#endif
-		} ldw;
-#ifndef _BIG_ENDIAN
-		uint32_t hdw;
-#endif
-	} bits;
-} tdmc_dbg_sel_t, *p_tdmc_dbg_sel_t;
-
-#define	TDMC_TRAINING_REG		(FZC_DMC + 0x45088)
-#define	TDMC_TRAINING_MASK		0x00000000FFFFFFFFULL
-
-typedef union _tdmc_training_t {
-	uint64_t value;
-	struct {
-#ifdef	_BIG_ENDIAN
-		uint32_t hdw;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t vec:32;
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t vec:6;
-#endif
-		} ldw;
-#ifndef _BIG_ENDIAN
-		uint32_t hdw;
-#endif
-	} bits;
-} tdmc_training_t, *p_tdmc_training_t;
-
-#ifdef	__cplusplus
-}
-#endif
-
-#endif	/* _SYS_NXGE_NXGE_TXDMA_HW_H */
--- a/usr/src/uts/sun4v/sys/nxge/nxge_virtual.h	Mon Mar 19 18:02:35 2007 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,80 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License (the "License").
- * You may not use this file except in compliance with the License.
- *
- * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
- * or http://www.opensolaris.org/os/licensing.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information: Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- */
-/*
- * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
- * Use is subject to license terms.
- */
-
-#ifndef	_SYS_NXGE_NXGE_VIRTUAL_H
-#define	_SYS_NXGE_NXGE_VIRTUAL_H
-
-#pragma ident	"%Z%%M%	%I%	%E% SMI"
-
-#ifdef	__cplusplus
-extern "C" {
-#endif
-
-/*
- * Neptune Virtualization Control Operations
- */
-typedef enum {
-	NXGE_CTLOPS_NIUTYPE,
-	NXGE_CTLOPS_GET_ATTRIBUTES,
-	NXGE_CTLOPS_GET_HWPROPERTIES,
-	NXGE_CTLOPS_SET_HWPROPERTIES,
-	NXGE_CTLOPS_GET_SHARED_REG,
-	NXGE_CTLOPS_SET_SHARED_REG,
-	NXGE_CTLOPS_UPDATE_SHARED_REG,
-	NXGE_CTLOPS_GET_LOCK_BLOCK,
-	NXGE_CTLOPS_GET_LOCK_TRY,
-	NXGE_CTLOPS_FREE_LOCK,
-	NXGE_CTLOPS_SET_SHARED_REG_LOCK,
-	NXGE_CTLOPS_CLEAR_BIT_SHARED_REG,
-	NXGE_CTLOPS_CLEAR_BIT_SHARED_REG_UL,
-	NXGE_CTLOPS_END
-} nxge_ctl_enum_t;
-
-/* 12 bits are available */
-#define	COMMON_CFG_VALID	0x01
-#define	COMMON_CFG_BUSY	0x02
-#define	COMMON_INIT_START	0x04
-#define	COMMON_INIT_DONE	0x08
-#define	COMMON_TCAM_BUSY	0x10
-#define	COMMON_VLAN_BUSY	0x20
-
-#define	NXGE_SR_FUNC_BUSY_SHIFT	0x8
-#define	NXGE_SR_FUNC_BUSY_MASK	0xf00
-
-
-#define	COMMON_TXDMA_CFG	1
-#define	COMMON_RXDMA_CFG	2
-#define	COMMON_RXDMA_GRP_CFG	4
-#define	COMMON_CLASS_CFG	8
-#define	COMMON_QUICK_CFG	0x10
-
-nxge_status_t nxge_intr_mask_mgmt(p_nxge_t nxgep);
-void nxge_virint_regs_dump(p_nxge_t nxgep);
-
-#ifdef	__cplusplus
-}
-#endif
-
-#endif	/* _SYS_NXGE_NXGE_VIRTUAL_H */
--- a/usr/src/uts/sun4v/sys/nxge/nxge_zcp.h	Mon Mar 19 18:02:35 2007 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,75 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License (the "License").
- * You may not use this file except in compliance with the License.
- *
- * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
- * or http://www.opensolaris.org/os/licensing.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information: Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- */
-/*
- * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
- * Use is subject to license terms.
- */
-
-#ifndef	_SYS_NXGE_NXGE_ZCP_H
-#define	_SYS_NXGE_NXGE_ZCP_H
-
-#pragma ident	"%Z%%M%	%I%	%E% SMI"
-
-#ifdef	__cplusplus
-extern "C" {
-#endif
-
-#include <nxge_zcp_hw.h>
-#include <npi_zcp.h>
-
-typedef	struct _zcp_errlog {
-	zcp_state_machine_t	state_mach;
-} zcp_errlog_t, *p_zcp_errlog_t;
-
-typedef struct _nxge_zcp_stats_t {
-	uint32_t 		errors;
-	uint32_t 		inits;
-	uint32_t 		rrfifo_underrun;
-	uint32_t 		rrfifo_overrun;
-	uint32_t 		rspfifo_uncorr_err;
-	uint32_t 		buffer_overflow;
-	uint32_t 		stat_tbl_perr;
-	uint32_t 		dyn_tbl_perr;
-	uint32_t 		buf_tbl_perr;
-	uint32_t 		tt_program_err;
-	uint32_t 		rsp_tt_index_err;
-	uint32_t 		slv_tt_index_err;
-	uint32_t 		zcp_tt_index_err;
-	uint32_t 		zcp_access_fail;
-	uint32_t 		cfifo_ecc;
-	zcp_errlog_t		errlog;
-} nxge_zcp_stats_t, *p_nxge_zcp_stats_t;
-
-typedef	struct _nxge_zcp {
-	uint32_t		config;
-	uint32_t		iconfig;
-	nxge_zcp_stats_t	*stat;
-} nxge_zcp_t;
-
-nxge_status_t nxge_zcp_init(p_nxge_t nxgep);
-void nxge_zcp_inject_err(p_nxge_t nxgep, uint32_t);
-nxge_status_t nxge_zcp_fatal_err_recover(p_nxge_t nxgep);
-
-#ifdef	__cplusplus
-}
-#endif
-
-#endif	/* _SYS_NXGE_NXGE_ZCP_H */
--- a/usr/src/uts/sun4v/sys/nxge/nxge_zcp_hw.h	Mon Mar 19 18:02:35 2007 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,771 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License (the "License").
- * You may not use this file except in compliance with the License.
- *
- * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
- * or http://www.opensolaris.org/os/licensing.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information: Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- */
-/*
- * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
- * Use is subject to license terms.
- */
-
-#ifndef	_SYS_NXGE_NXGE_ZCP_HW_H
-#define	_SYS_NXGE_NXGE_ZCP_HW_H
-
-#pragma ident	"%Z%%M%	%I%	%E% SMI"
-
-#ifdef	__cplusplus
-extern "C" {
-#endif
-
-#include <nxge_defs.h>
-
-/*
- * Neptune Zerocopy Hardware definitions
- * Updated to reflect PRM-0.8.
- */
-
-#define	ZCP_CONFIG_REG		(FZC_ZCP + 0x00000)
-#define	ZCP_INT_STAT_REG	(FZC_ZCP + 0x00008)
-#define	ZCP_INT_STAT_TEST_REG	(FZC_ZCP + 0x00108)
-#define	ZCP_INT_MASK_REG	(FZC_ZCP + 0x00010)
-
-#define	ZCP_BAM4_RE_CTL_REG 	(FZC_ZCP + 0x00018)
-#define	ZCP_BAM8_RE_CTL_REG 	(FZC_ZCP + 0x00020)
-#define	ZCP_BAM16_RE_CTL_REG 	(FZC_ZCP + 0x00028)
-#define	ZCP_BAM32_RE_CTL_REG 	(FZC_ZCP + 0x00030)
-
-#define	ZCP_DST4_RE_CTL_REG 	(FZC_ZCP + 0x00038)
-#define	ZCP_DST8_RE_CTL_REG 	(FZC_ZCP + 0x00040)
-#define	ZCP_DST16_RE_CTL_REG 	(FZC_ZCP + 0x00048)
-#define	ZCP_DST32_RE_CTL_REG 	(FZC_ZCP + 0x00050)
-
-#define	ZCP_RAM_DATA_REG	(FZC_ZCP + 0x00058)
-#define	ZCP_RAM_DATA0_REG	(FZC_ZCP + 0x00058)
-#define	ZCP_RAM_DATA1_REG	(FZC_ZCP + 0x00060)
-#define	ZCP_RAM_DATA2_REG	(FZC_ZCP + 0x00068)
-#define	ZCP_RAM_DATA3_REG	(FZC_ZCP + 0x00070)
-#define	ZCP_RAM_DATA4_REG	(FZC_ZCP + 0x00078)
-#define	ZCP_RAM_BE_REG		(FZC_ZCP + 0x00080)
-#define	ZCP_RAM_ACC_REG		(FZC_ZCP + 0x00088)
-
-#define	ZCP_TRAINING_VECTOR_REG	(FZC_ZCP + 0x000C0)
-#define	ZCP_STATE_MACHINE_REG	(FZC_ZCP + 0x000C8)
-#define	ZCP_CHK_BIT_DATA_REG	(FZC_ZCP + 0x00090)
-#define	ZCP_RESET_CFIFO_REG	(FZC_ZCP + 0x00098)
-#define	ZCP_RESET_CFIFO_MASK	0x0F
-
-#define	ZCP_CFIFIO_RESET_WAIT		10
-#define	ZCP_P0_P1_CFIFO_DEPTH		2048
-#define	ZCP_P2_P3_CFIFO_DEPTH		1024
-#define	ZCP_NIU_CFIFO_DEPTH		1024
-
-typedef union _zcp_reset_cfifo {
-	uint64_t value;
-	struct {
-#if defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t rsrvd:28;
-			uint32_t reset_cfifo3:1;
-			uint32_t reset_cfifo2:1;
-			uint32_t reset_cfifo1:1;
-			uint32_t reset_cfifo0:1;
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t reset_cfifo0:1;
-			uint32_t reset_cfifo1:1;
-			uint32_t reset_cfifo2:1;
-			uint32_t reset_cfifo3:1;
-			uint32_t rsrvd:28;
-#endif
-		} ldw;
-#if !defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-	} bits;
-} zcp_reset_cfifo_t, *p_zcp_reset_cfifo_t;
-
-#define	ZCP_CFIFO_ECC_PORT0_REG	(FZC_ZCP + 0x000A0)
-#define	ZCP_CFIFO_ECC_PORT1_REG	(FZC_ZCP + 0x000A8)
-#define	ZCP_CFIFO_ECC_PORT2_REG	(FZC_ZCP + 0x000B0)
-#define	ZCP_CFIFO_ECC_PORT3_REG	(FZC_ZCP + 0x000B8)
-
-/* NOTE: Same as RX_LOG_PAGE_HDL */
-#define	ZCP_PAGE_HDL_REG	(FZC_DMC + 0x20038)
-
-/* Data Structures */
-
-typedef union zcp_config_reg_u {
-	uint64_t value;
-	struct {
-#if defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t rsvd:7;
-			uint32_t mode_32_bit:1;
-			uint32_t debug_sel:8;
-			uint32_t rdma_th:11;
-			uint32_t ecc_chk_dis:1;
-			uint32_t par_chk_dis:1;
-			uint32_t dis_buf_rn:1;
-			uint32_t dis_buf_rq_if:1;
-			uint32_t zc_enable:1;
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t zc_enable:1;
-			uint32_t dis_buf_rq_if:1;
-			uint32_t dis_buf_rn:1;
-			uint32_t par_chk_dis:1;
-			uint32_t ecc_chk_dis:1;
-			uint32_t rdma_th:11;
-			uint32_t debug_sel:8;
-			uint32_t mode_32_bit:1;
-			uint32_t rsvd:7;
-#endif
-		} ldw;
-#if !defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-	} bits;
-} zcp_config_reg_t, *zcp_config_reg_pt;
-
-#define	ZCP_DEBUG_SEL_BITS	0xFF
-#define	ZCP_DEBUG_SEL_SHIFT	16
-#define	ZCP_DEBUG_SEL_MASK	(ZCP_DEBUG_SEL_BITS << ZCP_DEBUG_SEL_SHIFT)
-#define	RDMA_TH_BITS		0x7FF
-#define	RDMA_TH_SHIFT		5
-#define	RDMA_TH_MASK		(RDMA_TH_BITS << RDMA_TH_SHIFT)
-#define	ECC_CHK_DIS		(1 << 4)
-#define	PAR_CHK_DIS		(1 << 3)
-#define	DIS_BUFF_RN		(1 << 2)
-#define	DIS_BUFF_RQ_IF		(1 << 1)
-#define	ZC_ENABLE		(1 << 0)
-
-typedef union zcp_int_stat_reg_u {
-	uint64_t value;
-	struct {
-#if defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t rsvd:16;
-			uint32_t rrfifo_urun:1;
-			uint32_t rrfifo_orun:1;
-			uint32_t rsvd1:1;
-			uint32_t rspfifo_uc_err:1;
-			uint32_t buf_overflow:1;
-			uint32_t stat_tbl_perr:1;
-			uint32_t dyn_tbl_perr:1;
-			uint32_t buf_tbl_perr:1;
-			uint32_t tt_tbl_perr:1;
-			uint32_t rsp_tt_index_err:1;
-			uint32_t slv_tt_index_err:1;
-			uint32_t zcp_tt_index_err:1;
-			uint32_t cfifo_ecc3:1;
-			uint32_t cfifo_ecc2:1;
-			uint32_t cfifo_ecc1:1;
-			uint32_t cfifo_ecc0:1;
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t cfifo_ecc0:1;
-			uint32_t cfifo_ecc1:1;
-			uint32_t cfifo_ecc2:1;
-			uint32_t cfifo_ecc3:1;
-			uint32_t zcp_tt_index_err:1;
-			uint32_t slv_tt_index_err:1;
-			uint32_t rsp_tt_index_err:1;
-			uint32_t tt_tbl_perr:1;
-			uint32_t buf_tbl_perr:1;
-			uint32_t dyn_tbl_perr:1;
-			uint32_t stat_tbl_perr:1;
-			uint32_t buf_overflow:1;
-			uint32_t rspfifo_uc_err:1;
-			uint32_t rsvd1:1;
-			uint32_t rrfifo_orun:1;
-			uint32_t rrfifo_urun:1;
-			uint32_t rsvd:16;
-#endif
-		} ldw;
-#if !defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-	} bits;
-} zcp_int_stat_reg_t, *zcp_int_stat_reg_pt, zcp_int_mask_reg_t,
-	*zcp_int_mask_reg_pt;
-
-#define	RRFIFO_UNDERRUN		(1 << 15)
-#define	RRFIFO_OVERRUN		(1 << 14)
-#define	RSPFIFO_UNCORR_ERR	(1 << 12)
-#define	BUFFER_OVERFLOW		(1 << 11)
-#define	STAT_TBL_PERR		(1 << 10)
-#define	BUF_DYN_TBL_PERR	(1 << 9)
-#define	BUF_TBL_PERR		(1 << 8)
-#define	TT_PROGRAM_ERR		(1 << 7)
-#define	RSP_TT_INDEX_ERR	(1 << 6)
-#define	SLV_TT_INDEX_ERR	(1 << 5)
-#define	ZCP_TT_INDEX_ERR	(1 << 4)
-#define	CFIFO_ECC3		(1 << 3)
-#define	CFIFO_ECC0		(1 << 0)
-#define	CFIFO_ECC2		(1 << 2)
-#define	CFIFO_ECC1		(1 << 1)
-
-typedef union zcp_bam_region_reg_u {
-	uint64_t value;
-	struct {
-#if defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t loj:1;
-			uint32_t range_chk_en:1;
-			uint32_t last_zcfid:10;
-			uint32_t first_zcfid:10;
-			uint32_t offset:10;
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t offset:10;
-			uint32_t first_zcfid:10;
-			uint32_t last_zcfid:10;
-			uint32_t range_chk_en:1;
-			uint32_t loj:1;
-#endif
-		} ldw;
-#if !defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-	} bits;
-} zcp_bam_region_reg_t, *zcp_bam_region_reg_pt;
-
-typedef union zcp_dst_region_reg_u {
-	uint64_t value;
-	struct {
-#if defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t rsvd:22;
-			uint32_t ds_offset:10;
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t rsvd:22;
-			uint32_t ds_offset:10;
-#endif
-		} ldw;
-#if !defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-	} bits;
-} zcp_dst_region_reg_t, *zcp_dst_region_reg_pt;
-
-typedef	enum tbuf_size_e {
-	TBUF_4K		= 0,
-	TBUF_8K,
-	TBUF_16K,
-	TBUF_32K,
-	TBUF_64K,
-	TBUF_128K,
-	TBUF_256K,
-	TBUF_512K,
-	TBUF_1M,
-	TBUF_2M,
-	TBUF_4M,
-	TBUF_8M
-} tbuf_size_t;
-
-typedef	enum tbuf_num_e {
-	TBUF_NUM_4	= 0,
-	TBUF_NUM_8,
-	TBUF_NUM_16,
-	TBUF_NUM_32
-} tbuf_num_t;
-
-typedef	enum tmode_e {
-	TMODE_BASIC		= 0,
-	TMODE_AUTO_UNMAP	= 1,
-	TMODE_AUTO_ADV		= 3
-} tmode_t;
-
-typedef	struct tte_sflow_attr_s {
-	union {
-		uint64_t value;
-		struct {
-#if defined(_BIG_ENDIAN)
-			uint32_t hdw;
-#endif
-			struct {
-#if defined(_BIT_FIELDS_HTOL)
-				uint32_t ulp_end:18;
-				uint32_t num_buf:2;
-				uint32_t buf_size:4;
-				uint32_t rdc_tbl_offset:8;
-#elif defined(_BIT_FIELDS_LTOH)
-				uint32_t rdc_tbl_offset:8;
-				uint32_t buf_size:4;
-				uint32_t num_buf:2;
-				uint32_t ulp_end:18;
-#endif
-			} ldw;
-#if !defined(_BIG_ENDIAN)
-			uint32_t hdw;
-#endif
-		} bits;
-	} qw0;
-
-	union {
-		uint64_t value;
-		struct {
-#if defined(_BIG_ENDIAN)
-			uint32_t hdw;
-#endif
-			struct {
-#if defined(_BIT_FIELDS_HTOL)
-				uint32_t ring_base:12;
-				uint32_t skip:1;
-				uint32_t rsvd:1;
-				uint32_t tmode:2;
-				uint32_t unmap_all_en:1;
-				uint32_t ulp_end_en:1;
-				uint32_t ulp_end:14;
-#elif defined(_BIT_FIELDS_LTOH)
-				uint32_t ulp_end:14;
-				uint32_t ulp_end_en:1;
-				uint32_t unmap_all_en:1;
-				uint32_t tmode:2;
-				uint32_t rsvd:1;
-				uint32_t skip:1;
-				uint32_t ring_base:12;
-#endif
-			} ldw;
-#if !defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-		} bits;
-	} qw1;
-
-	union {
-		uint64_t value;
-		struct {
-#if defined(_BIG_ENDIAN)
-			uint32_t hdw;
-#endif
-			struct {
-#if defined(_BIT_FIELDS_HTOL)
-				uint32_t busy:1;
-				uint32_t ring_size:4;
-				uint32_t ring_base:27;
-#elif defined(_BIT_FIELDS_LTOH)
-				uint32_t ring_base:27;
-				uint32_t ring_size:4;
-				uint32_t busy:1;
-#endif
-			} ldw;
-#if !defined(_BIG_ENDIAN)
-			uint32_t hdw;
-#endif
-		} bits;
-	} qw2;
-
-	union {
-		uint64_t value;
-		struct {
-#if defined(_BIG_ENDIAN)
-			uint32_t hdw;
-#endif
-			struct {
-#if defined(_BIT_FIELDS_HTOL)
-				uint32_t rsvd:16;
-				uint32_t toq:16;
-#elif defined(_BIT_FIELDS_LTOH)
-				uint32_t toq:16;
-				uint32_t rsvd:16;
-#endif
-			} ldw;
-#if !defined(_BIG_ENDIAN)
-			uint32_t hdw;
-#endif
-		} bits;
-	} qw3;
-
-	union {
-		uint64_t value;
-		struct {
-#if defined(_BIG_ENDIAN)
-			uint32_t hdw;
-#endif
-			struct {
-#if defined(_BIT_FIELDS_HTOL)
-				uint32_t rsvd:28;
-				uint32_t dat4:4;
-#elif defined(_BIT_FIELDS_LTOH)
-				uint32_t dat4:4;
-				uint32_t rsvd:28;
-#endif
-			} ldw;
-#if !defined(_BIG_ENDIAN)
-			uint32_t hdw;
-#endif
-		} bits;
-	} qw4;
-
-} tte_sflow_attr_t, *tte_sflow_attr_pt;
-
-#define	TTE_RDC_TBL_SFLOW_BITS_EN	0x0001
-#define	TTE_BUF_SIZE_BITS_EN		0x0002
-#define	TTE_NUM_BUF_BITS_EN		0x0002
-#define	TTE_ULP_END_BITS_EN		0x003E
-#define	TTE_ULP_END_EN_BITS_EN		0x0020
-#define	TTE_UNMAP_ALL_BITS_EN		0x0020
-#define	TTE_TMODE_BITS_EN		0x0040
-#define	TTE_SKIP_BITS_EN		0x0040
-#define	TTE_RING_BASE_ADDR_BITS_EN	0x0FC0
-#define	TTE_RING_SIZE_BITS_EN		0x0800
-#define	TTE_BUSY_BITS_EN		0x0800
-#define	TTE_TOQ_BITS_EN			0x3000
-
-#define	TTE_MAPPED_IN_BITS_EN		0x0000F
-#define	TTE_ANCHOR_SEQ_BITS_EN		0x000F0
-#define	TTE_ANCHOR_OFFSET_BITS_EN	0x00700
-#define	TTE_ANCHOR_BUFFER_BITS_EN	0x00800
-#define	TTE_ANCHOR_BUF_FLAG_BITS_EN	0x00800
-#define	TTE_UNMAP_ON_LEFT_BITS_EN	0x00800
-#define	TTE_ULP_END_REACHED_BITS_EN	0x00800
-#define	TTE_ERR_STAT_BITS_EN		0x01000
-#define	TTE_WR_PTR_BITS_EN		0x01000
-#define	TTE_HOQ_BITS_EN			0x0E000
-#define	TTE_PREFETCH_ON_BITS_EN		0x08000
-
-typedef	enum tring_size_e {
-	TRING_SIZE_8		= 0,
-	TRING_SIZE_16,
-	TRING_SIZE_32,
-	TRING_SIZE_64,
-	TRING_SIZE_128,
-	TRING_SIZE_256,
-	TRING_SIZE_512,
-	TRING_SIZE_1K,
-	TRING_SIZE_2K,
-	TRING_SIZE_4K,
-	TRING_SIZE_8K,
-	TRING_SIZE_16K,
-	TRING_SIZE_32K
-} tring_size_t;
-
-typedef struct tte_dflow_attr_s {
-	union {
-		uint64_t value;
-		struct {
-#if defined(_BIG_ENDIAN)
-			uint32_t hdw;
-#endif
-			struct {
-#if defined(_BIT_FIELDS_HTOL)
-				uint32_t mapped_in;
-#elif defined(_BIT_FIELDS_LTOH)
-				uint32_t mapped_in;
-#endif
-			} ldw;
-#if !defined(_BIG_ENDIAN)
-			uint32_t hdw;
-#endif
-		} bits;
-	} qw0;
-
-	union {
-		uint64_t value;
-		struct {
-#if defined(_BIG_ENDIAN)
-			uint32_t hdw;
-#endif
-			struct {
-#if defined(_BIT_FIELDS_HTOL)
-				uint32_t anchor_seq;
-#elif defined(_BIT_FIELDS_LTOH)
-				uint32_t anchor_seq;
-#endif
-			} ldw;
-#if !defined(_BIG_ENDIAN)
-			uint32_t hdw;
-#endif
-		} bits;
-	} qw1;
-
-	union {
-		uint64_t value;
-		struct {
-#if defined(_BIG_ENDIAN)
-			uint32_t hdw;
-#endif
-			struct {
-#if defined(_BIT_FIELDS_HTOL)
-				uint32_t ulp_end_reached;
-				uint32_t unmap_on_left;
-				uint32_t anchor_buf_flag;
-				uint32_t anchor_buf:5;
-				uint32_t anchor_offset:24;
-#elif defined(_BIT_FIELDS_LTOH)
-				uint32_t anchor_offset:24;
-				uint32_t anchor_buf:5;
-				uint32_t anchor_buf_flag;
-				uint32_t unmap_on_left;
-				uint32_t ulp_end_reached;
-#endif
-			} ldw;
-#if !defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-		} bits;
-	} qw2;
-
-	union {
-		uint64_t value;
-		struct {
-#if defined(_BIG_ENDIAN)
-			uint32_t hdw;
-#endif
-			struct {
-#if defined(_BIT_FIELDS_HTOL)
-				uint32_t rsvd1:1;
-				uint32_t prefetch_on:1;
-				uint32_t hoq:16;
-				uint32_t rsvd:6;
-				uint32_t wr_ptr:6;
-				uint32_t err_stat:2;
-#elif defined(_BIT_FIELDS_LTOH)
-				uint32_t err_stat:2;
-				uint32_t wr_ptr:6;
-				uint32_t rsvd:6;
-				uint32_t hoq:16;
-				uint32_t prefetch_on:1;
-				uint32_t rsvd1:1;
-#endif
-			} ldw;
-#if !defined(_BIG_ENDIAN)
-			uint32_t hdw;
-#endif
-		} bits;
-	} qw3;
-
-	union {
-		uint64_t value;
-		struct {
-#if defined(_BIG_ENDIAN)
-			uint32_t hdw;
-#endif
-			struct {
-#if defined(_BIT_FIELDS_HTOL)
-				uint32_t rsvd:28;
-				uint32_t dat4:4;
-#elif defined(_BIT_FIELDS_LTOH)
-				uint32_t dat4:4;
-				uint32_t rsvd:28;
-#endif
-			} ldw;
-#if !defined(_BIG_ENDIAN)
-			uint32_t hdw;
-#endif
-		} bits;
-	} qw4;
-
-} tte_dflow_attr_t, *tte_dflow_attr_pt;
-
-#define	MAX_BAM_BANKS	8
-
-typedef	struct zcp_ram_unit_s {
-	uint32_t	w0;
-	uint32_t	w1;
-	uint32_t	w2;
-	uint32_t	w3;
-	uint32_t	w4;
-} zcp_ram_unit_t;
-
-typedef	enum dmaw_type_e {
-	DMAW_NO_CROSS_BUF	= 0,
-	DMAW_IP_CROSS_BUF_2,
-	DMAW_IP_CROSS_BUF_3,
-	DMAW_IP_CROSS_BUF_4
-} dmaw_type_t;
-
-typedef union zcp_ram_data_u {
-	tte_sflow_attr_t sentry;
-	tte_dflow_attr_t dentry;
-} zcp_ram_data_t, *zcp_ram_data_pt;
-
-typedef union zcp_ram_access_u {
-	uint64_t value;
-	struct {
-#if defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t busy:1;
-			uint32_t rdwr:1;
-			uint32_t rsvd:1;
-			uint32_t zcfid:12;
-			uint32_t ram_sel:5;
-			uint32_t cfifo:12;
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t cfifo:12;
-			uint32_t ram_sel:5;
-			uint32_t zcfid:12;
-			uint32_t rsvd:1;
-			uint32_t rdwr:1;
-			uint32_t busy:1;
-#endif
-		} ldw;
-#if !defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-	} bits;
-} zcp_ram_access_t, *zcp_ram_access_pt;
-
-#define	ZCP_RAM_WR		0
-#define	ZCP_RAM_RD		1
-#define	ZCP_RAM_SEL_BAM0	0
-#define	ZCP_RAM_SEL_BAM1	0x1
-#define	ZCP_RAM_SEL_BAM2	0x2
-#define	ZCP_RAM_SEL_BAM3	0x3
-#define	ZCP_RAM_SEL_BAM4	0x4
-#define	ZCP_RAM_SEL_BAM5	0x5
-#define	ZCP_RAM_SEL_BAM6	0x6
-#define	ZCP_RAM_SEL_BAM7	0x7
-#define	ZCP_RAM_SEL_TT_STATIC	0x8
-#define	ZCP_RAM_SEL_TT_DYNAMIC	0x9
-#define	ZCP_RAM_SEL_CFIFO0	0x10
-#define	ZCP_RAM_SEL_CFIFO1	0x11
-#define	ZCP_RAM_SEL_CFIFO2	0x12
-#define	ZCP_RAM_SEL_CFIFO3	0x13
-
-typedef union zcp_ram_benable_u {
-	uint64_t value;
-	struct {
-#if defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t rsvd:15;
-			uint32_t be:17;
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t be:17;
-			uint32_t rsvd:15;
-#endif
-		} ldw;
-#if !defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-	} bits;
-} zcp_ram_benable_t, *zcp_ram_benable_pt;
-
-typedef union zcp_training_vector_u {
-	uint64_t value;
-	struct {
-#if defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t train_vec;
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t train_vec;
-#endif
-		} ldw;
-#if !defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-	} bits;
-} zcp_training_vector_t, *zcp_training_vector_pt;
-
-typedef union zcp_state_machine_u {
-	uint64_t value;
-	struct {
-#if defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-			uint32_t state;
-#elif defined(_BIT_FIELDS_LTOH)
-			uint32_t state;
-#endif
-		} ldw;
-#if !defined(_BIG_ENDIAN)
-		uint32_t hdw;
-#endif
-	} bits;
-} zcp_state_machine_t, *zcp_state_machine_pt;
-
-typedef	struct zcp_hdr_s {
-	uint16_t	zflowid;
-	uint16_t	tcp_hdr_len;
-	uint16_t	tcp_payld_len;
-	uint16_t	head_of_que;
-	uint32_t	first_b_offset;
-	boolean_t	reach_buf_end;
-	dmaw_type_t	dmaw_type;
-	uint8_t		win_buf_offset;
-} zcp_hdr_t;
-
-typedef	union _zcp_ecc_ctrl {
-	uint64_t value;
-
-	struct {
-#if defined(_BIG_ENDIAN)
-		uint32_t	w1;
-#endif
-		struct {
-#if defined(_BIT_FIELDS_HTOL)
-		uint32_t dis_dbl	: 1;
-		uint32_t res3		: 13;
-		uint32_t cor_dbl	: 1;
-		uint32_t cor_sng	: 1;
-		uint32_t res2		: 5;
-		uint32_t cor_all	: 1;
-		uint32_t res1		: 7;
-		uint32_t cor_lst	: 1;
-		uint32_t cor_snd	: 1;
-		uint32_t cor_fst	: 1;
-#elif defined(_BIT_FIELDS_LTOH)
-		uint32_t cor_fst	: 1;
-		uint32_t cor_snd	: 1;
-		uint32_t cor_lst	: 1;
-		uint32_t res1		: 7;
-		uint32_t cor_all	: 1;
-		uint32_t res2		: 5;
-		uint32_t cor_sng	: 1;
-		uint32_t cor_dbl	: 1;
-		uint32_t res3		: 13;
-		uint32_t dis_dbl	: 1;
-#else
-#error	one of _BIT_FIELDS_HTOL or _BIT_FIELDS_LTOH must be defined
-#endif
-	} w0;
-
-#if !defined(_BIG_ENDIAN)
-		uint32_t	w1;
-#endif
-	} bits;
-} zcp_ecc_ctrl_t;
-
-#ifdef	__cplusplus
-}
-#endif
-
-#endif	/* _SYS_NXGE_NXGE_ZCP_HW_H */