changeset 10566:b09132fd6cd8

6839215 amd_iommu driver panics dom0 in snv_114.
author Vikram Hegde <Vikram.Hegde@Sun.COM>
date Mon, 14 Sep 2009 21:48:21 -0700
parents e92b63e831b0
children f8c0a7fe6191
files usr/src/pkgdefs/SUNWcakr.i/prototype_com usr/src/pkgdefs/SUNWckr/prototype_i386 usr/src/pkgdefs/SUNWhea/prototype_i386 usr/src/tools/scripts/bfu.sh usr/src/uts/i86pc/Makefile.files usr/src/uts/i86pc/Makefile.i86pc.shared usr/src/uts/i86pc/Makefile.rules usr/src/uts/i86pc/amd_iommu/Makefile usr/src/uts/i86pc/io/amd_iommu/amd_iommu.c usr/src/uts/i86pc/io/amd_iommu/amd_iommu.conf usr/src/uts/i86pc/io/amd_iommu/amd_iommu_acpi.c usr/src/uts/i86pc/io/amd_iommu/amd_iommu_acpi.h usr/src/uts/i86pc/io/amd_iommu/amd_iommu_cmd.c usr/src/uts/i86pc/io/amd_iommu/amd_iommu_impl.c usr/src/uts/i86pc/io/amd_iommu/amd_iommu_impl.h usr/src/uts/i86pc/io/amd_iommu/amd_iommu_log.c usr/src/uts/i86pc/io/amd_iommu/amd_iommu_log.h usr/src/uts/i86pc/io/amd_iommu/amd_iommu_page_tables.c usr/src/uts/i86pc/io/amd_iommu/amd_iommu_page_tables.h usr/src/uts/i86pc/sys/Makefile usr/src/uts/i86pc/sys/amd_iommu.h usr/src/uts/intel/Makefile.files usr/src/uts/intel/Makefile.intel.shared usr/src/uts/intel/Makefile.rules usr/src/uts/intel/amd_iommu/Makefile usr/src/uts/intel/io/amd_iommu/amd_iommu.c usr/src/uts/intel/io/amd_iommu/amd_iommu.conf usr/src/uts/intel/io/amd_iommu/amd_iommu_acpi.c usr/src/uts/intel/io/amd_iommu/amd_iommu_acpi.h usr/src/uts/intel/io/amd_iommu/amd_iommu_cmd.c usr/src/uts/intel/io/amd_iommu/amd_iommu_impl.c usr/src/uts/intel/io/amd_iommu/amd_iommu_impl.h usr/src/uts/intel/io/amd_iommu/amd_iommu_log.c usr/src/uts/intel/io/amd_iommu/amd_iommu_log.h usr/src/uts/intel/io/amd_iommu/amd_iommu_page_tables.c usr/src/uts/intel/io/amd_iommu/amd_iommu_page_tables.h usr/src/uts/intel/sys/Makefile usr/src/uts/intel/sys/amd_iommu.h
diffstat 38 files changed, 7127 insertions(+), 7114 deletions(-) [+]
line wrap: on
line diff
--- a/usr/src/pkgdefs/SUNWcakr.i/prototype_com	Wed Sep 16 22:03:43 2009 -0700
+++ b/usr/src/pkgdefs/SUNWcakr.i/prototype_com	Mon Sep 14 21:48:21 2009 -0700
@@ -68,6 +68,7 @@
 f none platform/i86pc/kernel/drv/acpippm 755 root sys
 f none platform/i86pc/kernel/drv/acpippm.conf 644 root sys
 f none platform/i86pc/kernel/drv/amd64/acpinex 755 root sys
+f none platform/i86pc/kernel/drv/amd64/amd_iommu 755 root sys
 f none platform/i86pc/kernel/drv/amd64/ppm 755 root sys
 f none platform/i86pc/kernel/drv/amd64/isa 755 root sys
 f none platform/i86pc/kernel/drv/amd64/npe 755 root sys
@@ -75,6 +76,8 @@
 f none platform/i86pc/kernel/drv/amd64/pit_beep 755 root sys
 f none platform/i86pc/kernel/drv/amd64/rootnex 755 root sys
 f none platform/i86pc/kernel/drv/acpinex 755 root sys
+f none platform/i86pc/kernel/drv/amd_iommu 755 root sys
+f none platform/i86pc/kernel/drv/amd_iommu.conf 644 root sys
 f none platform/i86pc/kernel/drv/cpudrv 755 root sys
 f none platform/i86pc/kernel/drv/isa 755 root sys
 f none platform/i86pc/kernel/drv/npe 755 root sys
--- a/usr/src/pkgdefs/SUNWckr/prototype_i386	Wed Sep 16 22:03:43 2009 -0700
+++ b/usr/src/pkgdefs/SUNWckr/prototype_i386	Mon Sep 14 21:48:21 2009 -0700
@@ -68,8 +68,6 @@
 f none kernel/drv/acpi_drv 755 root sys
 f none kernel/drv/acpi_drv.conf 644 root sys
 f none kernel/drv/acpi_toshiba 755 root sys
-f none kernel/drv/amd_iommu 755 root sys
-f none kernel/drv/amd_iommu.conf 644 root sys
 f none kernel/drv/bl 755 root sys
 f none kernel/drv/bmc 755 root sys
 f none kernel/drv/bmc.conf 644 root sys
@@ -296,7 +294,6 @@
 f none kernel/drv/amd64/arp 755 root sys
 f none kernel/drv/amd64/acpi_drv 755 root sys
 f none kernel/drv/amd64/acpi_toshiba 755 root sys
-f none kernel/drv/amd64/amd_iommu 755 root sys
 f none kernel/drv/amd64/bl 755 root sys
 f none kernel/drv/amd64/bmc 755 root sys
 f none kernel/drv/amd64/bridge 755 root sys
--- a/usr/src/pkgdefs/SUNWhea/prototype_i386	Wed Sep 16 22:03:43 2009 -0700
+++ b/usr/src/pkgdefs/SUNWhea/prototype_i386	Mon Sep 14 21:48:21 2009 -0700
@@ -75,7 +75,6 @@
 f none usr/include/ia32/sys/traptrace.h 644 root bin
 f none usr/include/sys/kdi_regs.h 644 root bin
 f none usr/include/stack_unwind.h 644 root bin
-f none usr/include/sys/amd_iommu.h 644 root bin
 f none usr/include/sys/bootregs.h 644 root bin
 f none usr/include/sys/bootsvcs.h 644 root bin
 f none usr/include/sys/controlregs.h 644 root bin
@@ -121,6 +120,7 @@
 d none usr/platform/i86pc/include/sys 755 root bin
 f none usr/platform/i86pc/include/sys/asm_misc.h 644 root bin
 f none usr/platform/i86pc/include/sys/acpidev.h 644 root bin
+f none usr/platform/i86pc/include/sys/amd_iommu.h 644 root bin
 f none usr/platform/i86pc/include/sys/clock.h 644 root bin
 f none usr/platform/i86pc/include/sys/cram.h 644 root bin
 f none usr/platform/i86pc/include/sys/debug_info.h 644 root bin
--- a/usr/src/tools/scripts/bfu.sh	Wed Sep 16 22:03:43 2009 -0700
+++ b/usr/src/tools/scripts/bfu.sh	Mon Sep 14 21:48:21 2009 -0700
@@ -7888,6 +7888,24 @@
 	rm -f $root/dev/rsr[0-9]*
 
 	#
+	# Remove old amd_iommu driver
+	#
+
+	#
+	# old: need to remove going forwards:
+	#
+	rm -f $root/kernel/drv/amd_iommu
+	rm -f $root/kernel/drv/amd_iommu.conf
+	rm -f $root/kernel/drv/amd64/amd_iommu
+
+	#
+	# new: need to remove going backwards:
+	#
+	rm -f $root/platform/i86pc/kernel/drv/amd_iommu.conf
+	rm -f $root/platform/i86pc/kernel/drv/amd_iommu
+	rm -f $root/platform/i86pc/kernel/drv/amd64/amd_iommu
+
+	#
 	# The pkg* commands should not be used after this point and before
 	# archive extraction as libcrypto/libssl may not be available.
 	#
--- a/usr/src/uts/i86pc/Makefile.files	Wed Sep 16 22:03:43 2009 -0700
+++ b/usr/src/uts/i86pc/Makefile.files	Mon Sep 14 21:48:21 2009 -0700
@@ -206,6 +206,8 @@
 TZMON_OBJS	+= tzmon.o
 UPPC_OBJS += uppc.o psm_common.o
 XSVC_OBJS += xsvc.o
+AMD_IOMMU_OBJS +=	amd_iommu.o amd_iommu_impl.o amd_iommu_acpi.o \
+			amd_iommu_cmd.o amd_iommu_log.o amd_iommu_page_tables.o
 
 #
 #	Build up defines and paths.
--- a/usr/src/uts/i86pc/Makefile.i86pc.shared	Wed Sep 16 22:03:43 2009 -0700
+++ b/usr/src/uts/i86pc/Makefile.i86pc.shared	Mon Sep 14 21:48:21 2009 -0700
@@ -255,11 +255,13 @@
 DRV_KMODS	+= tzmon
 DRV_KMODS	+= acpi_drv 
 DRV_KMODS	+= acpinex
+DRV_KMODS	+= amd_iommu
 DRV_KMODS	+= ioat
 DRV_KMODS	+= fipe
 
 DRV_KMODS	+= cpudrv
 
+
 #
 # Platform Power Modules
 # 
--- a/usr/src/uts/i86pc/Makefile.rules	Wed Sep 16 22:03:43 2009 -0700
+++ b/usr/src/uts/i86pc/Makefile.rules	Mon Sep 14 21:48:21 2009 -0700
@@ -79,6 +79,10 @@
 	$(COMPILE.c) -o $@ $<
 	$(CTFCONVERT_O)
 
+$(OBJS_DIR)/%.o:		$(UTSBASE)/i86pc/io/amd_iommu/%.c
+	$(COMPILE.c) -o $@ $<
+	$(CTFCONVERT_O)
+
 $(OBJS_DIR)/%.o:		$(UTSBASE)/i86pc/io/ioat/%.c
 	$(COMPILE.c) -o $@ $<
 	$(CTFCONVERT_O)
@@ -295,6 +299,9 @@
 $(LINTS_DIR)/%.ln:		$(UTSBASE)/i86pc/io/acpi/acpinex/%.c
 	@($(LHEAD) $(LINT.c) $< $(LTAIL))
 
+$(LINTS_DIR)/%.ln:              $(UTSBASE)/i86pc/io/amd_iommu/%.c
+	@($(LHEAD) $(LINT.c) $< $(LTAIL))
+
 $(LINTS_DIR)/%.ln:		$(UTSBASE)/i86pc/io/ioat/%.c
 	@($(LHEAD) $(LINT.c) $< $(LTAIL))
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/uts/i86pc/amd_iommu/Makefile	Mon Sep 14 21:48:21 2009 -0700
@@ -0,0 +1,83 @@
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+# Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
+# Use is subject to license terms.
+#
+#	This Makefile drives production of the amd_iommu driver kernel module.
+#
+#
+
+#
+#	Path to the base of the uts directory tree (usually /usr/src/uts).
+#
+UTSBASE	= ../..
+
+#
+#	Define the module and object file sets.
+#
+MODULE		= amd_iommu
+OBJECTS		= $(AMD_IOMMU_OBJS:%=$(OBJS_DIR)/%)
+LINTS		= $(AMD_IOMMU_OBJS:%.o=$(LINTS_DIR)/%.ln)
+ROOTMODULE	= $(ROOT_PSM_DRV_DIR)/$(MODULE)
+CONF_SRCDIR     = $(UTSBASE)/i86pc/io/amd_iommu
+
+#
+#	Include common rules.
+#
+include $(UTSBASE)/i86pc/Makefile.i86pc
+
+#
+#	Define targets
+#
+ALL_TARGET	= $(BINARY) $(SRC_CONFILE)
+LINT_TARGET	= $(MODULE).lint
+INSTALL_TARGET	= $(BINARY) $(ROOTMODULE) $(ROOT_CONFFILE)
+
+#
+# depends on misc/iommulib and misc/acpica
+#
+LDFLAGS         += -dy -Nmisc/iommulib -Nmisc/acpica
+
+#
+#	Default build targets.
+#
+.KEEP_STATE:
+
+def:		$(DEF_DEPS)
+
+all:		$(ALL_DEPS)
+
+clean:		$(CLEAN_DEPS)
+
+clobber:	$(CLOBBER_DEPS)
+
+lint:		$(LINT_DEPS)
+
+modlintlib:	$(MODLINTLIB_DEPS)
+
+clean.lint:	$(CLEAN_LINT_DEPS)
+
+install:	$(INSTALL_DEPS) $(CONF_INSTALL_DEPS)
+
+#
+#	Include common targets.
+#
+include $(UTSBASE)/i86pc/Makefile.targ
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/uts/i86pc/io/amd_iommu/amd_iommu.c	Mon Sep 14 21:48:21 2009 -0700
@@ -0,0 +1,444 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#include <sys/types.h>
+#include <sys/file.h>
+#include <sys/errno.h>
+#include <sys/open.h>
+#include <sys/stat.h>
+#include <sys/cred.h>
+#include <sys/modctl.h>
+#include <sys/conf.h>
+#include <sys/devops.h>
+#include <sys/ddi.h>
+#include <sys/x86_archext.h>
+
+#include <sys/amd_iommu.h>
+#include "amd_iommu_impl.h"
+#include "amd_iommu_acpi.h"
+
+
+#define	AMD_IOMMU_MINOR2INST(x)	(x)
+#define	AMD_IOMMU_INST2MINOR(x)	(x)
+#define	AMD_IOMMU_NODETYPE	"ddi_iommu"
+#define	AMD_IOMMU_MINOR_NAME	"amd-iommu"
+
+static int amd_iommu_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg,
+    void **result);
+static int amd_iommu_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
+static int amd_iommu_detach(dev_info_t *dip, ddi_detach_cmd_t cmd);
+static int amd_iommu_open(dev_t *devp, int flag, int otyp, cred_t *credp);
+static int amd_iommu_close(dev_t dev, int flag, int otyp, cred_t *credp);
+static int amd_iommu_ioctl(dev_t dev, int cmd, intptr_t arg, int mode,
+    cred_t *credp, int *rvalp);
+
+static struct cb_ops amd_iommu_cb_ops = {
+	amd_iommu_open,		/* cb_open */
+	amd_iommu_close,	/* cb_close */
+	nodev,			/* cb_strategy */
+	nodev,			/* cb_print */
+	nodev,			/* cb_dump */
+	nodev,			/* cb_read */
+	nodev,			/* cb_write */
+	amd_iommu_ioctl,	/* cb_ioctl */
+	nodev,			/* cb_devmap */
+	nodev,			/* cb_mmap */
+	nodev,			/* cb_segmap */
+	nochpoll,		/* cb_chpoll */
+	ddi_prop_op,		/* cb_prop_op */
+	NULL,			/* cb_str */
+	D_NEW | D_MP,		/* cb_flag */
+	CB_REV,			/* cb_rev */
+	nodev,			/* cb_aread */
+	nodev			/* cb_awrite */
+};
+
+static struct dev_ops amd_iommu_dev_ops = {
+	DEVO_REV,		/* devo_rev */
+	0,			/* devo_refcnt */
+	amd_iommu_getinfo,	/* devo_getinfo */
+	nulldev,		/* devo_identify */
+	nulldev,		/* devo_probe */
+	amd_iommu_attach,	/* devo_attach */
+	amd_iommu_detach,	/* devo_detach */
+	nodev,			/* devo_reset */
+	&amd_iommu_cb_ops,	/* devo_cb_ops */
+	NULL,			/* devo_bus_ops */
+	nulldev			/* devo_power */
+};
+
+static struct modldrv modldrv = {
+	&mod_driverops,
+	"AMD IOMMU 0.1",
+	&amd_iommu_dev_ops
+};
+
+static struct modlinkage modlinkage = {
+	MODREV_1,
+	(void *)&modldrv,
+	NULL
+};
+
+amd_iommu_debug_t amd_iommu_debug;
+kmutex_t amd_iommu_global_lock;
+const char *amd_iommu_modname = "amd_iommu";
+amd_iommu_alias_t **amd_iommu_alias;
+amd_iommu_page_table_hash_t amd_iommu_page_table_hash;
+static void *amd_iommu_statep;
+int amd_iommu_64bit_bug;
+int amd_iommu_unity_map;
+int amd_iommu_no_RW_perms;
+int amd_iommu_no_unmap;
+int amd_iommu_pageva_inval_all;
+int amd_iommu_disable;		/* disable IOMMU */
+char *amd_iommu_disable_list;	/* list of drivers bypassing IOMMU */
+
+int
+_init(void)
+{
+	int error = ENOTSUP;
+
+#if defined(__amd64) && !defined(__xpv)
+
+	if (get_hwenv() != HW_NATIVE)
+		return (ENOTSUP);
+
+	error = ddi_soft_state_init(&amd_iommu_statep,
+	    sizeof (struct amd_iommu_state), 1);
+	if (error) {
+		cmn_err(CE_WARN, "%s: _init: failed to init soft state.",
+		    amd_iommu_modname);
+		return (error);
+	}
+
+	if (amd_iommu_acpi_init() != DDI_SUCCESS) {
+		if (amd_iommu_debug) {
+			cmn_err(CE_WARN, "%s: _init: ACPI init failed.",
+			    amd_iommu_modname);
+		}
+		ddi_soft_state_fini(&amd_iommu_statep);
+		return (ENOTSUP);
+	}
+
+	amd_iommu_read_boot_props();
+
+	if (amd_iommu_page_table_hash_init(&amd_iommu_page_table_hash)
+	    != DDI_SUCCESS) {
+		cmn_err(CE_WARN, "%s: _init: Page table hash init failed.",
+		    amd_iommu_modname);
+		if (amd_iommu_disable_list) {
+			kmem_free(amd_iommu_disable_list,
+			    strlen(amd_iommu_disable_list) + 1);
+			amd_iommu_disable_list = NULL;
+		}
+		amd_iommu_acpi_fini();
+		ddi_soft_state_fini(&amd_iommu_statep);
+		amd_iommu_statep = NULL;
+		return (EFAULT);
+	}
+
+	error = mod_install(&modlinkage);
+	if (error) {
+		cmn_err(CE_WARN, "%s: _init: mod_install failed.",
+		    amd_iommu_modname);
+		amd_iommu_page_table_hash_fini(&amd_iommu_page_table_hash);
+		if (amd_iommu_disable_list) {
+			kmem_free(amd_iommu_disable_list,
+			    strlen(amd_iommu_disable_list) + 1);
+			amd_iommu_disable_list = NULL;
+		}
+		amd_iommu_acpi_fini();
+		ddi_soft_state_fini(&amd_iommu_statep);
+		amd_iommu_statep = NULL;
+		return (error);
+	}
+	error = 0;
+#endif
+
+	return (error);
+}
+
+int
+_info(struct modinfo *modinfop)
+{
+	return (mod_info(&modlinkage, modinfop));
+}
+
+int
+_fini(void)
+{
+	int error;
+
+	error = mod_remove(&modlinkage);
+	if (error)
+		return (error);
+
+	amd_iommu_page_table_hash_fini(&amd_iommu_page_table_hash);
+	if (amd_iommu_disable_list) {
+		kmem_free(amd_iommu_disable_list,
+		    strlen(amd_iommu_disable_list) + 1);
+		amd_iommu_disable_list = NULL;
+	}
+	amd_iommu_acpi_fini();
+	ddi_soft_state_fini(&amd_iommu_statep);
+	amd_iommu_statep = NULL;
+
+	return (0);
+}
+
+/*ARGSUSED*/
+static int
+amd_iommu_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **result)
+{
+	struct amd_iommu_state *statep;
+
+	ASSERT(result);
+
+	*result = NULL;
+
+	switch (cmd) {
+	case DDI_INFO_DEVT2DEVINFO:
+		statep = ddi_get_soft_state(amd_iommu_statep,
+		    AMD_IOMMU_MINOR2INST(getminor((dev_t)arg)));
+		if (statep) {
+			*result = statep->aioms_devi;
+			return (DDI_SUCCESS);
+		}
+		break;
+	case DDI_INFO_DEVT2INSTANCE:
+		*result = (void *)(uintptr_t)
+		    AMD_IOMMU_MINOR2INST(getminor((dev_t)arg));
+		return (DDI_SUCCESS);
+	}
+
+	return (DDI_FAILURE);
+}
+
+static int
+amd_iommu_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
+{
+	int instance = ddi_get_instance(dip);
+	const char *driver = ddi_driver_name(dip);
+	struct amd_iommu_state *statep;
+
+	ASSERT(instance >= 0);
+	ASSERT(driver);
+
+	switch (cmd) {
+	case DDI_ATTACH:
+		if (ddi_soft_state_zalloc(amd_iommu_statep, instance)
+		    != DDI_SUCCESS) {
+			cmn_err(CE_WARN, "Unable to allocate soft state for "
+			    "%s%d", driver, instance);
+			return (DDI_FAILURE);
+		}
+
+		statep = ddi_get_soft_state(amd_iommu_statep, instance);
+		if (statep == NULL) {
+			cmn_err(CE_WARN, "Unable to get soft state for "
+			    "%s%d", driver, instance);
+			ddi_soft_state_free(amd_iommu_statep, instance);
+			return (DDI_FAILURE);
+		}
+
+		if (ddi_create_minor_node(dip, AMD_IOMMU_MINOR_NAME, S_IFCHR,
+		    AMD_IOMMU_INST2MINOR(instance), AMD_IOMMU_NODETYPE,
+		    0) != DDI_SUCCESS) {
+			cmn_err(CE_WARN, "Unable to create minor node for "
+			    "%s%d", driver, instance);
+			ddi_remove_minor_node(dip, NULL);
+			ddi_soft_state_free(amd_iommu_statep, instance);
+			return (DDI_FAILURE);
+		}
+
+		statep->aioms_devi = dip;
+		statep->aioms_instance = instance;
+		statep->aioms_iommu_start = NULL;
+		statep->aioms_iommu_end = NULL;
+
+		amd_iommu_lookup_conf_props(dip);
+
+		if (amd_iommu_disable_list) {
+			cmn_err(CE_NOTE, "AMD IOMMU disabled for the following"
+			    " drivers:\n%s", amd_iommu_disable_list);
+		}
+
+		if (amd_iommu_disable) {
+			cmn_err(CE_NOTE, "AMD IOMMU disabled by user");
+		} else if (amd_iommu_setup(dip, statep) != DDI_SUCCESS) {
+			cmn_err(CE_WARN, "Unable to initialize AMD IOMMU "
+			    "%s%d", driver, instance);
+			ddi_remove_minor_node(dip, NULL);
+			ddi_soft_state_free(amd_iommu_statep, instance);
+			return (DDI_FAILURE);
+		}
+
+		ddi_report_dev(dip);
+
+		return (DDI_SUCCESS);
+
+	case DDI_RESUME:
+		return (DDI_SUCCESS);
+	default:
+		return (DDI_FAILURE);
+	}
+}
+
+static int
+amd_iommu_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
+{
+	int instance = ddi_get_instance(dip);
+	const char *driver = ddi_driver_name(dip);
+	struct amd_iommu_state *statep;
+
+	ASSERT(instance >= 0);
+	ASSERT(driver);
+
+	switch (cmd) {
+	case DDI_DETACH:
+		statep = ddi_get_soft_state(amd_iommu_statep, instance);
+		if (statep == NULL) {
+			cmn_err(CE_WARN, "%s%d: Cannot get soft state",
+			    driver, instance);
+			return (DDI_FAILURE);
+		}
+		return (DDI_FAILURE);
+	case DDI_SUSPEND:
+		return (DDI_SUCCESS);
+	default:
+		return (DDI_FAILURE);
+	}
+}
+
+/*ARGSUSED*/
+static int
+amd_iommu_open(dev_t *devp, int flag, int otyp, cred_t *credp)
+{
+	int instance = AMD_IOMMU_MINOR2INST(getminor(*devp));
+	struct amd_iommu_state *statep;
+	const char *f = "amd_iommu_open";
+
+	if (instance < 0) {
+		cmn_err(CE_WARN, "%s: invalid instance %d",
+		    f, instance);
+		return (ENXIO);
+	}
+
+	if (!(flag & (FREAD|FWRITE))) {
+		cmn_err(CE_WARN, "%s: invalid flags %d", f, flag);
+		return (EINVAL);
+	}
+
+	if (otyp != OTYP_CHR) {
+		cmn_err(CE_WARN, "%s: invalid otyp %d", f, otyp);
+		return (EINVAL);
+	}
+
+	statep = ddi_get_soft_state(amd_iommu_statep, instance);
+	if (statep == NULL) {
+		cmn_err(CE_WARN, "%s: cannot get soft state: instance %d",
+		    f, instance);
+		return (ENXIO);
+	}
+
+	ASSERT(statep->aioms_instance == instance);
+
+	return (0);
+}
+
+/*ARGSUSED*/
+static int
+amd_iommu_close(dev_t dev, int flag, int otyp, cred_t *credp)
+{
+	int instance = AMD_IOMMU_MINOR2INST(getminor(dev));
+	struct amd_iommu_state *statep;
+	const char *f = "amd_iommu_close";
+
+	if (instance < 0) {
+		cmn_err(CE_WARN, "%s: invalid instance %d", f, instance);
+		return (ENXIO);
+	}
+
+	if (!(flag & (FREAD|FWRITE))) {
+		cmn_err(CE_WARN, "%s: invalid flags %d", f, flag);
+		return (EINVAL);
+	}
+
+	if (otyp != OTYP_CHR) {
+		cmn_err(CE_WARN, "%s: invalid otyp %d", f, otyp);
+		return (EINVAL);
+	}
+
+	statep = ddi_get_soft_state(amd_iommu_statep, instance);
+	if (statep == NULL) {
+		cmn_err(CE_WARN, "%s: cannot get soft state: instance %d",
+		    f, instance);
+		return (ENXIO);
+	}
+
+	ASSERT(statep->aioms_instance == instance);
+	return (0);
+
+}
+
+/*ARGSUSED*/
+static int
+amd_iommu_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp,
+    int *rvalp)
+{
+	int instance = AMD_IOMMU_MINOR2INST(getminor(dev));
+	struct amd_iommu_state *statep;
+	const char *f = "amd_iommu_ioctl";
+
+	ASSERT(*rvalp);
+
+	if (instance < 0) {
+		cmn_err(CE_WARN, "%s: invalid instance %d", f, instance);
+		return (ENXIO);
+	}
+
+
+	if (!(mode & (FREAD|FWRITE))) {
+		cmn_err(CE_WARN, "%s: invalid mode %d", f, mode);
+		return (EINVAL);
+	}
+
+	if (mode & FKIOCTL) {
+		cmn_err(CE_WARN, "%s: FKIOCTL unsupported mode %d", f, mode);
+		return (EINVAL);
+	}
+
+	statep = ddi_get_soft_state(amd_iommu_statep, instance);
+	if (statep == NULL) {
+		cmn_err(CE_WARN, "%s: cannot get soft state: instance %d",
+		    f, instance);
+		return (ENXIO);
+	}
+
+	ASSERT(statep->aioms_instance == instance);
+
+	return (ENOTTY);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/uts/i86pc/io/amd_iommu/amd_iommu.conf	Mon Sep 14 21:48:21 2009 -0700
@@ -0,0 +1,27 @@
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+#
+# Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
+# Use is subject to license terms.
+#
+#
+# To enable IOMMU set this to "yes" and rebuild boot archive
+amd-iommu="yes";
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/uts/i86pc/io/amd_iommu/amd_iommu_acpi.c	Mon Sep 14 21:48:21 2009 -0700
@@ -0,0 +1,951 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#include "amd_iommu_acpi.h"
+#include "amd_iommu_impl.h"
+
+static int create_acpi_hash(amd_iommu_acpi_t *acpi);
+static void amd_iommu_acpi_table_fini(amd_iommu_acpi_t **acpipp);
+
+static void dump_acpi_aliases(void);
+
+
+/*
+ * Globals
+ */
+static amd_iommu_acpi_global_t *amd_iommu_acpi_global;
+static amd_iommu_acpi_ivhd_t **amd_iommu_acpi_ivhd_hash;
+static amd_iommu_acpi_ivmd_t **amd_iommu_acpi_ivmd_hash;
+
+static int
+type_byte_size(char *cp)
+{
+	uint8_t type8 = *((uint8_t *)cp);
+	uint8_t len_bits;
+
+	len_bits = AMD_IOMMU_REG_GET8(&type8, AMD_IOMMU_ACPI_DEVENTRY_LEN);
+
+	switch (len_bits) {
+	case 0:
+			return (4);
+	case 1:
+			return (8);
+	case 2:
+			return (16);
+	case 3:
+			return (32);
+	default:
+			cmn_err(CE_WARN, "%s: Invalid deventry len: %d",
+			    amd_iommu_modname, len_bits);
+			return (len_bits);
+	}
+	/*NOTREACHED*/
+}
+
+static void
+process_4byte_deventry(ivhd_container_t *c, char *cp)
+{
+	int entry_type = *((uint8_t *)cp);
+	ivhd_deventry_t deventry = {0};
+	ivhd_deventry_t *devp;
+	uint8_t datsetting8;
+	align_16_t al = {0};
+	int i;
+
+	/* 4 byte entry */
+	deventry.idev_len = 4;
+	deventry.idev_deviceid = -1;
+	deventry.idev_src_deviceid = -1;
+
+	for (i = 0; i < 2; i++) {
+		al.ent8[i] = *((uint8_t *)&cp[i + 1]);
+	}
+
+	switch (entry_type) {
+	case 1:
+		deventry.idev_type = DEVENTRY_ALL;
+		break;
+	case 2:
+		deventry.idev_type = DEVENTRY_SELECT;
+		deventry.idev_deviceid = al.ent16;
+		break;
+	case 3:
+		deventry.idev_type = DEVENTRY_RANGE;
+		deventry.idev_deviceid = al.ent16;
+		break;
+	case 4:
+		deventry.idev_type = DEVENTRY_RANGE_END;
+		deventry.idev_deviceid = al.ent16;
+		ASSERT(cp[3] == 0);
+		break;
+	case 0:
+		ASSERT(al.ent16 == 0);
+		ASSERT(cp[3] == 0);
+	default:
+		return;
+	}
+
+
+	devp = kmem_alloc(sizeof (ivhd_deventry_t), KM_SLEEP);
+	*devp = deventry;
+
+	if (c->ivhdc_first_deventry == NULL)
+		c->ivhdc_first_deventry =  devp;
+	else
+		c->ivhdc_last_deventry->idev_next = devp;
+
+	c->ivhdc_last_deventry = devp;
+
+	if (entry_type == 4)
+		return;
+
+	datsetting8 = (*((uint8_t *)&cp[3]));
+
+	devp->idev_Lint1Pass = AMD_IOMMU_REG_GET8(&datsetting8,
+	    AMD_IOMMU_ACPI_LINT1PASS);
+
+	devp->idev_Lint0Pass = AMD_IOMMU_REG_GET8(&datsetting8,
+	    AMD_IOMMU_ACPI_LINT0PASS);
+
+	devp->idev_SysMgt = AMD_IOMMU_REG_GET8(&datsetting8,
+	    AMD_IOMMU_ACPI_SYSMGT);
+
+	ASSERT(AMD_IOMMU_REG_GET8(&datsetting8,
+	    AMD_IOMMU_ACPI_DATRSV) == 0);
+
+	devp->idev_NMIPass = AMD_IOMMU_REG_GET8(&datsetting8,
+	    AMD_IOMMU_ACPI_NMIPASS);
+
+	devp->idev_ExtIntPass = AMD_IOMMU_REG_GET8(&datsetting8,
+	    AMD_IOMMU_ACPI_EXTINTPASS);
+
+	devp->idev_INITPass = AMD_IOMMU_REG_GET8(&datsetting8,
+	    AMD_IOMMU_ACPI_INITPASS);
+}
+
+static void
+process_8byte_deventry(ivhd_container_t *c, char *cp)
+{
+	uint8_t datsetting8;
+	int entry_type = (uint8_t)*cp;
+	ivhd_deventry_t deventry = {0};
+	ivhd_deventry_t *devp;
+	align_16_t al1 = {0};
+	align_16_t al2 = {0};
+	align_32_t al3 = {0};
+	int i;
+
+	/* Length is 8 bytes */
+	deventry.idev_len = 8;
+	deventry.idev_deviceid = -1;
+	deventry.idev_src_deviceid = -1;
+
+	for (i = 0; i < 2; i++) {
+		al1.ent8[i] = *((uint8_t *)&cp[i+1]);
+		al2.ent8[i] = *((uint8_t *)&cp[i+5]);
+	}
+
+	datsetting8 = *((uint8_t *)&cp[3]);
+
+	switch (entry_type) {
+	case 66:
+		deventry.idev_type = DEVENTRY_ALIAS_SELECT;
+		deventry.idev_deviceid = al1.ent16;
+		deventry.idev_src_deviceid = al2.ent16;
+		ASSERT(cp[4] == 0);
+		ASSERT(cp[7] == 0);
+		break;
+	case 67:
+		deventry.idev_type = DEVENTRY_ALIAS_RANGE;
+		deventry.idev_deviceid = al1.ent16;
+		deventry.idev_src_deviceid = al2.ent16;
+		ASSERT(cp[4] == 0);
+		ASSERT(cp[7] == 0);
+		break;
+	case 70:
+		deventry.idev_type = DEVENTRY_EXTENDED_SELECT;
+		deventry.idev_deviceid = al1.ent16;
+		break;
+	case 71:
+		deventry.idev_type = DEVENTRY_EXTENDED_RANGE;
+		deventry.idev_deviceid = al1.ent16;
+		break;
+	case 72:
+		deventry.idev_type = DEVENTRY_SPECIAL_DEVICE;
+		ASSERT(al1.ent16 == 0);
+		deventry.idev_deviceid = -1;
+		deventry.idev_handle = cp[4];
+		deventry.idev_variety = cp[7];
+		deventry.idev_src_deviceid = al2.ent16;
+	default:
+#ifdef BROKEN_ASSERT
+		for (i = 0; i < 7; i++) {
+			ASSERT(cp[i] == 0);
+		}
+#endif
+		return;
+	}
+
+
+	devp = kmem_alloc(sizeof (ivhd_deventry_t), KM_SLEEP);
+	*devp = deventry;
+
+	if (c->ivhdc_first_deventry == NULL)
+		c->ivhdc_first_deventry =  devp;
+	else
+		c->ivhdc_last_deventry->idev_next = devp;
+
+	c->ivhdc_last_deventry = devp;
+
+	devp->idev_Lint1Pass = AMD_IOMMU_REG_GET8(&datsetting8,
+	    AMD_IOMMU_ACPI_LINT1PASS);
+
+	devp->idev_Lint0Pass = AMD_IOMMU_REG_GET8(&datsetting8,
+	    AMD_IOMMU_ACPI_LINT0PASS);
+
+	devp->idev_SysMgt = AMD_IOMMU_REG_GET8(&datsetting8,
+	    AMD_IOMMU_ACPI_SYSMGT);
+
+	ASSERT(AMD_IOMMU_REG_GET8(&datsetting8,
+	    AMD_IOMMU_ACPI_DATRSV) == 0);
+
+	devp->idev_NMIPass = AMD_IOMMU_REG_GET8(&datsetting8,
+	    AMD_IOMMU_ACPI_NMIPASS);
+
+	devp->idev_ExtIntPass = AMD_IOMMU_REG_GET8(&datsetting8,
+	    AMD_IOMMU_ACPI_EXTINTPASS);
+
+	devp->idev_INITPass = AMD_IOMMU_REG_GET8(&datsetting8,
+	    AMD_IOMMU_ACPI_INITPASS);
+
+	if (entry_type != 70 && entry_type != 71) {
+		return;
+	}
+
+	/* Type 70 and 71 */
+	for (i = 0; i < 4; i++) {
+		al3.ent8[i] = *((uint8_t *)&cp[i+4]);
+	}
+
+	devp->idev_AtsDisabled = AMD_IOMMU_REG_GET8(&al3.ent32,
+	    AMD_IOMMU_ACPI_ATSDISABLED);
+
+	ASSERT(AMD_IOMMU_REG_GET8(&al3.ent32, AMD_IOMMU_ACPI_EXTDATRSV) == 0);
+}
+
+static void
+process_ivhd(amd_iommu_acpi_t *acpi, ivhd_t *ivhdp)
+{
+	ivhd_container_t *c;
+	caddr_t ivhd_end;
+	caddr_t ivhd_tot_end;
+	caddr_t cp;
+
+	ASSERT(ivhdp->ivhd_type == 0x10);
+
+	c = kmem_zalloc(sizeof (ivhd_container_t), KM_SLEEP);
+	c->ivhdc_ivhd = kmem_alloc(sizeof (ivhd_t), KM_SLEEP);
+	*(c->ivhdc_ivhd) = *ivhdp;
+
+	if (acpi->acp_first_ivhdc == NULL)
+		acpi->acp_first_ivhdc = c;
+	else
+		acpi->acp_last_ivhdc->ivhdc_next = c;
+
+	acpi->acp_last_ivhdc = c;
+
+	ivhd_end = (caddr_t)ivhdp + sizeof (ivhd_t);
+	ivhd_tot_end = (caddr_t)ivhdp + ivhdp->ivhd_len;
+
+	for (cp = ivhd_end; cp < ivhd_tot_end; cp += type_byte_size(cp)) {
+		/* 16 byte and 32 byte size are currently reserved */
+		switch (type_byte_size(cp)) {
+		case 4:
+			process_4byte_deventry(c, cp);
+			break;
+		case 8:
+			process_8byte_deventry(c, cp);
+			break;
+		case 16:
+		case 32:
+			/* Reserved */
+			break;
+		default:
+			cmn_err(CE_WARN, "%s: unsupported length for device "
+			    "entry in ACPI IVRS table's IVHD entry",
+			    amd_iommu_modname);
+			break;
+		}
+	}
+}
+
+static void
+process_ivmd(amd_iommu_acpi_t *acpi, ivmd_t *ivmdp)
+{
+	ivmd_container_t *c;
+
+	ASSERT(ivmdp->ivmd_type != 0x10);
+
+	c = kmem_zalloc(sizeof (ivmd_container_t), KM_SLEEP);
+	c->ivmdc_ivmd = kmem_alloc(sizeof (ivmd_t), KM_SLEEP);
+	*(c->ivmdc_ivmd) = *ivmdp;
+
+	if (acpi->acp_first_ivmdc == NULL)
+		acpi->acp_first_ivmdc = c;
+	else
+		acpi->acp_last_ivmdc->ivmdc_next = c;
+
+	acpi->acp_last_ivmdc = c;
+}
+
+int
+amd_iommu_acpi_init(void)
+{
+	ivrs_t *ivrsp;
+	caddr_t ivrsp_end;
+	caddr_t table_end;
+	caddr_t cp;
+	uint8_t type8;
+	amd_iommu_acpi_t *acpi;
+	align_ivhd_t al_vhd = {0};
+	align_ivmd_t al_vmd = {0};
+
+	if (AcpiGetTable(IVRS_SIG, 1, (ACPI_TABLE_HEADER **)&ivrsp) != AE_OK) {
+		cmn_err(CE_NOTE, "!amd_iommu: No AMD IOMMU ACPI IVRS table");
+		return (DDI_FAILURE);
+	}
+
+	/*
+	 * Reserved field must be 0
+	 */
+	ASSERT(ivrsp->ivrs_resv == 0);
+
+	ASSERT(AMD_IOMMU_REG_GET32(&ivrsp->ivrs_ivinfo,
+	    AMD_IOMMU_ACPI_IVINFO_RSV1) == 0);
+	ASSERT(AMD_IOMMU_REG_GET32(&ivrsp->ivrs_ivinfo,
+	    AMD_IOMMU_ACPI_IVINFO_RSV2) == 0);
+
+	ivrsp_end = (caddr_t)ivrsp + sizeof (struct ivrs);
+	table_end = (caddr_t)ivrsp + ivrsp->ivrs_hdr.Length;
+
+	acpi = kmem_zalloc(sizeof (amd_iommu_acpi_t), KM_SLEEP);
+	acpi->acp_ivrs = kmem_alloc(sizeof (ivrs_t), KM_SLEEP);
+	*(acpi->acp_ivrs) = *ivrsp;
+
+	for (cp = ivrsp_end; cp < table_end; cp += (al_vhd.ivhdp)->ivhd_len) {
+		al_vhd.cp = cp;
+		if (al_vhd.ivhdp->ivhd_type == 0x10)
+			process_ivhd(acpi, al_vhd.ivhdp);
+	}
+
+	for (cp = ivrsp_end; cp < table_end; cp += (al_vmd.ivmdp)->ivmd_len) {
+		al_vmd.cp = cp;
+		type8 = al_vmd.ivmdp->ivmd_type;
+		if (type8 == 0x20 || type8 == 0x21 || type8 == 0x22)
+			process_ivmd(acpi, al_vmd.ivmdp);
+	}
+
+	if (create_acpi_hash(acpi) != DDI_SUCCESS) {
+		return (DDI_FAILURE);
+	}
+
+	amd_iommu_acpi_table_fini(&acpi);
+
+	ASSERT(acpi == NULL);
+
+	if (amd_iommu_debug & AMD_IOMMU_DEBUG_ACPI) {
+		dump_acpi_aliases();
+		debug_enter("dump");
+	}
+
+	return (DDI_SUCCESS);
+}
+
+static ivhd_deventry_t *
+free_ivhd_deventry(ivhd_deventry_t *devp)
+{
+	ivhd_deventry_t *next = devp->idev_next;
+
+	kmem_free(devp, sizeof (ivhd_deventry_t));
+
+	return (next);
+}
+
+static ivhd_container_t *
+free_ivhd_container(ivhd_container_t *ivhdcp)
+{
+	ivhd_container_t *next = ivhdcp->ivhdc_next;
+	ivhd_deventry_t *devp;
+
+	for (devp = ivhdcp->ivhdc_first_deventry; devp; ) {
+		devp = free_ivhd_deventry(devp);
+	}
+
+	kmem_free(ivhdcp->ivhdc_ivhd, sizeof (ivhd_t));
+	kmem_free(ivhdcp, sizeof (ivhd_container_t));
+
+	return (next);
+}
+
+static ivmd_container_t *
+free_ivmd_container(ivmd_container_t *ivmdcp)
+{
+	ivmd_container_t *next = ivmdcp->ivmdc_next;
+
+	kmem_free(ivmdcp->ivmdc_ivmd, sizeof (ivmd_t));
+	kmem_free(ivmdcp, sizeof (ivmd_container_t));
+
+	return (next);
+}
+
+void
+amd_iommu_acpi_fini(void)
+{
+}
+
+/*
+ * TODO: Do we need to free the ACPI table for om GetFirmwareTable()
+ */
+static void
+amd_iommu_acpi_table_fini(amd_iommu_acpi_t **acpipp)
+{
+	amd_iommu_acpi_t *acpi = *acpipp;
+	ivhd_container_t *ivhdcp;
+	ivmd_container_t *ivmdcp;
+
+	ASSERT(acpi);
+
+	for (ivhdcp = acpi->acp_first_ivhdc; ivhdcp; ) {
+		ivhdcp = free_ivhd_container(ivhdcp);
+	}
+	for (ivmdcp = acpi->acp_first_ivmdc; ivmdcp; ) {
+		ivmdcp = free_ivmd_container(ivmdcp);
+	}
+
+	kmem_free(acpi->acp_ivrs, sizeof (struct ivrs));
+	kmem_free(acpi, sizeof (amd_iommu_acpi_t));
+
+	*acpipp = NULL;
+}
+
+static uint16_t
+deviceid_hashfn(uint16_t deviceid)
+{
+	return (deviceid % AMD_IOMMU_ACPI_INFO_HASH_SZ);
+}
+
+static void
+add_deventry_info(ivhd_t *ivhdp, ivhd_deventry_t *deventry,
+    amd_iommu_acpi_ivhd_t **hash)
+{
+	static amd_iommu_acpi_ivhd_t *last;
+	amd_iommu_acpi_ivhd_t *acpi_ivhdp;
+	uint8_t uint8_flags;
+	uint16_t uint16_info;
+	uint16_t idx;
+
+	if (deventry->idev_type == DEVENTRY_RANGE_END) {
+		ASSERT(last);
+		acpi_ivhdp = last;
+		last = NULL;
+		ASSERT(acpi_ivhdp->ach_dev_type == DEVENTRY_RANGE ||
+		    acpi_ivhdp->ach_dev_type == DEVENTRY_ALIAS_RANGE ||
+		    acpi_ivhdp->ach_dev_type == DEVENTRY_EXTENDED_RANGE);
+		ASSERT(acpi_ivhdp->ach_deviceid_end == -1);
+		acpi_ivhdp->ach_deviceid_end = deventry->idev_deviceid;
+		/* TODO ASSERT data is 0 */
+		return;
+	}
+
+	ASSERT(last == NULL);
+	acpi_ivhdp = kmem_zalloc(sizeof (*acpi_ivhdp), KM_SLEEP);
+
+	uint8_flags = ivhdp->ivhd_flags;
+
+#ifdef BROKEN_ASSERT
+	ASSERT(AMD_IOMMU_REG_GET8(&uint8_flags,
+	    AMD_IOMMU_ACPI_IVHD_FLAGS_RSV) == 0);
+#endif
+
+	acpi_ivhdp->ach_IotlbSup = AMD_IOMMU_REG_GET8(&uint8_flags,
+	    AMD_IOMMU_ACPI_IVHD_FLAGS_IOTLBSUP);
+	acpi_ivhdp->ach_Isoc = AMD_IOMMU_REG_GET8(&uint8_flags,
+	    AMD_IOMMU_ACPI_IVHD_FLAGS_ISOC);
+	acpi_ivhdp->ach_ResPassPW = AMD_IOMMU_REG_GET8(&uint8_flags,
+	    AMD_IOMMU_ACPI_IVHD_FLAGS_RESPASSPW);
+	acpi_ivhdp->ach_PassPW = AMD_IOMMU_REG_GET8(&uint8_flags,
+	    AMD_IOMMU_ACPI_IVHD_FLAGS_PASSPW);
+	acpi_ivhdp->ach_HtTunEn = AMD_IOMMU_REG_GET8(&uint8_flags,
+	    AMD_IOMMU_ACPI_IVHD_FLAGS_HTTUNEN);
+
+	/* IVHD fields */
+	acpi_ivhdp->ach_IOMMU_deviceid = ivhdp->ivhd_deviceid;
+	acpi_ivhdp->ach_IOMMU_cap_off = ivhdp->ivhd_cap_off;
+	acpi_ivhdp->ach_IOMMU_reg_base = ivhdp->ivhd_reg_base;
+	acpi_ivhdp->ach_IOMMU_pci_seg = ivhdp->ivhd_pci_seg;
+
+	/* IVHD IOMMU info fields */
+	uint16_info = ivhdp->ivhd_iommu_info;
+
+#ifdef BROKEN_ASSERT
+	ASSERT(AMD_IOMMU_REG_GET16(&uint16_info,
+	    AMD_IOMMU_ACPI_IOMMU_INFO_RSV1) == 0);
+#endif
+
+	acpi_ivhdp->ach_IOMMU_UnitID = AMD_IOMMU_REG_GET16(&uint16_info,
+	    AMD_IOMMU_ACPI_IOMMU_INFO_UNITID);
+	ASSERT(AMD_IOMMU_REG_GET16(&uint16_info,
+	    AMD_IOMMU_ACPI_IOMMU_INFO_RSV2) == 0);
+	acpi_ivhdp->ach_IOMMU_MSInum = AMD_IOMMU_REG_GET16(&uint16_info,
+	    AMD_IOMMU_ACPI_IOMMU_INFO_MSINUM);
+
+	/* Initialize  deviceids to -1 */
+	acpi_ivhdp->ach_deviceid_start = -1;
+	acpi_ivhdp->ach_deviceid_end = -1;
+	acpi_ivhdp->ach_src_deviceid = -1;
+
+	/* All range type entries are put on hash entry 0 */
+	switch (deventry->idev_type) {
+	case DEVENTRY_ALL:
+		acpi_ivhdp->ach_deviceid_start = 0;
+		acpi_ivhdp->ach_deviceid_end = (uint16_t)-1;
+		acpi_ivhdp->ach_dev_type = DEVENTRY_ALL;
+		idx = AMD_IOMMU_ACPI_INFO_HASH_SZ;
+		break;
+	case DEVENTRY_SELECT:
+		acpi_ivhdp->ach_deviceid_start = deventry->idev_deviceid;
+		acpi_ivhdp->ach_deviceid_end = deventry->idev_deviceid;
+		acpi_ivhdp->ach_dev_type = DEVENTRY_SELECT;
+		idx = deviceid_hashfn(deventry->idev_deviceid);
+		break;
+	case DEVENTRY_RANGE:
+		acpi_ivhdp->ach_deviceid_start = deventry->idev_deviceid;
+		acpi_ivhdp->ach_deviceid_end = -1;
+		acpi_ivhdp->ach_dev_type = DEVENTRY_RANGE;
+		idx = AMD_IOMMU_ACPI_INFO_HASH_SZ;
+		last = acpi_ivhdp;
+		break;
+	case DEVENTRY_RANGE_END:
+		cmn_err(CE_PANIC, "%s: Unexpected Range End Deventry",
+		    amd_iommu_modname);
+		/*NOTREACHED*/
+	case DEVENTRY_ALIAS_SELECT:
+		acpi_ivhdp->ach_deviceid_start = deventry->idev_deviceid;
+		acpi_ivhdp->ach_deviceid_end = deventry->idev_deviceid;
+		acpi_ivhdp->ach_src_deviceid = deventry->idev_src_deviceid;
+		acpi_ivhdp->ach_dev_type = DEVENTRY_ALIAS_SELECT;
+		idx = deviceid_hashfn(deventry->idev_deviceid);
+		break;
+	case DEVENTRY_ALIAS_RANGE:
+		acpi_ivhdp->ach_deviceid_start = deventry->idev_deviceid;
+		acpi_ivhdp->ach_deviceid_end = -1;
+		acpi_ivhdp->ach_src_deviceid = deventry->idev_src_deviceid;
+		acpi_ivhdp->ach_dev_type = DEVENTRY_ALIAS_RANGE;
+		idx = AMD_IOMMU_ACPI_INFO_HASH_SZ;
+		last = acpi_ivhdp;
+		break;
+	case DEVENTRY_EXTENDED_SELECT:
+		acpi_ivhdp->ach_deviceid_start = deventry->idev_deviceid;
+		acpi_ivhdp->ach_deviceid_end = deventry->idev_deviceid;
+		acpi_ivhdp->ach_dev_type = DEVENTRY_EXTENDED_SELECT;
+		idx = deviceid_hashfn(deventry->idev_deviceid);
+		break;
+	case DEVENTRY_EXTENDED_RANGE:
+		acpi_ivhdp->ach_deviceid_start = deventry->idev_deviceid;
+		acpi_ivhdp->ach_deviceid_end = -1;
+		acpi_ivhdp->ach_dev_type = DEVENTRY_EXTENDED_RANGE;
+		idx = AMD_IOMMU_ACPI_INFO_HASH_SZ;
+		last = acpi_ivhdp;
+		break;
+	case DEVENTRY_SPECIAL_DEVICE:
+		acpi_ivhdp->ach_deviceid_start = -1;
+		acpi_ivhdp->ach_deviceid_end = -1;
+		acpi_ivhdp->ach_src_deviceid = deventry->idev_src_deviceid;
+		acpi_ivhdp->ach_special_handle = deventry->idev_handle;
+		acpi_ivhdp->ach_special_variety = deventry->idev_variety;
+		idx = AMD_IOMMU_ACPI_INFO_HASH_SZ;
+	default:
+		cmn_err(CE_PANIC, "%s: Unsupported deventry type",
+		    amd_iommu_modname);
+		/*NOTREACHED*/
+	}
+
+	acpi_ivhdp->ach_Lint1Pass = deventry->idev_Lint1Pass;
+	acpi_ivhdp->ach_Lint0Pass = deventry->idev_Lint0Pass;
+	acpi_ivhdp->ach_SysMgt = deventry->idev_SysMgt;
+	acpi_ivhdp->ach_NMIPass = deventry->idev_NMIPass;
+	acpi_ivhdp->ach_ExtIntPass = deventry->idev_ExtIntPass;
+	acpi_ivhdp->ach_INITPass = deventry->idev_INITPass;
+
+
+	/* extended data */
+	if (acpi_ivhdp->ach_dev_type == DEVENTRY_EXTENDED_SELECT ||
+	    acpi_ivhdp->ach_dev_type == DEVENTRY_EXTENDED_RANGE) {
+		acpi_ivhdp->ach_AtsDisabled = deventry->idev_AtsDisabled;
+	}
+
+	/*
+	 * Now add it to the hash
+	 */
+	ASSERT(hash[idx] != acpi_ivhdp);
+	acpi_ivhdp->ach_next = hash[idx];
+	hash[idx] = acpi_ivhdp;
+}
+
+static void
+add_ivhdc_info(ivhd_container_t *ivhdcp, amd_iommu_acpi_ivhd_t **hash)
+{
+	ivhd_deventry_t *deventry;
+	ivhd_t *ivhdp = ivhdcp->ivhdc_ivhd;
+
+	for (deventry = ivhdcp->ivhdc_first_deventry; deventry;
+	    deventry = deventry->idev_next) {
+		add_deventry_info(ivhdp, deventry, hash);
+	}
+}
+
+static void
+add_ivhd_info(amd_iommu_acpi_t *acpi, amd_iommu_acpi_ivhd_t **hash)
+{
+	ivhd_container_t *ivhdcp;
+
+	for (ivhdcp = acpi->acp_first_ivhdc; ivhdcp;
+	    ivhdcp = ivhdcp->ivhdc_next) {
+		add_ivhdc_info(ivhdcp, hash);
+	}
+}
+
+static void
+set_ivmd_info(ivmd_t *ivmdp, amd_iommu_acpi_ivmd_t **hash)
+{
+	amd_iommu_acpi_ivmd_t *acpi_ivmdp;
+	uint8_t uint8_flags;
+	uint16_t idx;
+
+	uint8_flags = ivmdp->ivmd_flags;
+
+	acpi_ivmdp = kmem_zalloc(sizeof (*acpi_ivmdp), KM_SLEEP);
+
+	switch (ivmdp->ivmd_type) {
+	case 0x20:
+		acpi_ivmdp->acm_deviceid_start = 0;
+		acpi_ivmdp->acm_deviceid_end = (uint16_t)-1;
+		acpi_ivmdp->acm_dev_type = IVMD_DEVICEID_ALL;
+		idx = AMD_IOMMU_ACPI_INFO_HASH_SZ;
+		break;
+	case 0x21:
+		acpi_ivmdp->acm_deviceid_start = ivmdp->ivmd_deviceid;
+		acpi_ivmdp->acm_deviceid_end = ivmdp->ivmd_deviceid;
+		acpi_ivmdp->acm_dev_type = IVMD_DEVICEID_SELECT;
+		idx = deviceid_hashfn(ivmdp->ivmd_deviceid);
+		break;
+	case 0x22:
+		acpi_ivmdp->acm_deviceid_start = ivmdp->ivmd_deviceid;
+		acpi_ivmdp->acm_deviceid_end = ivmdp->ivmd_auxdata;
+		acpi_ivmdp->acm_dev_type = IVMD_DEVICEID_RANGE;
+		idx = AMD_IOMMU_ACPI_INFO_HASH_SZ;
+		break;
+	default:
+		cmn_err(CE_PANIC, "Unknown AMD IOMMU ACPI IVMD deviceid type: "
+		    "%x", ivmdp->ivmd_type);
+		/*NOTREACHED*/
+	}
+
+	ASSERT(AMD_IOMMU_REG_GET8(&uint8_flags,
+	    AMD_IOMMU_ACPI_IVMD_RSV) == 0);
+
+	acpi_ivmdp->acm_ExclRange = AMD_IOMMU_REG_GET8(&uint8_flags,
+	    AMD_IOMMU_ACPI_IVMD_EXCL_RANGE);
+	acpi_ivmdp->acm_IW = AMD_IOMMU_REG_GET8(&uint8_flags,
+	    AMD_IOMMU_ACPI_IVMD_IW);
+	acpi_ivmdp->acm_IR = AMD_IOMMU_REG_GET8(&uint8_flags,
+	    AMD_IOMMU_ACPI_IVMD_IR);
+	acpi_ivmdp->acm_Unity = AMD_IOMMU_REG_GET8(&uint8_flags,
+	    AMD_IOMMU_ACPI_IVMD_UNITY);
+
+	acpi_ivmdp->acm_ivmd_phys_start = ivmdp->ivmd_phys_start;
+	acpi_ivmdp->acm_ivmd_phys_len = ivmdp->ivmd_phys_len;
+
+	acpi_ivmdp->acm_next = hash[idx];
+	hash[idx] = acpi_ivmdp;
+}
+
+static void
+add_ivmdc_info(ivmd_container_t *ivmdcp, amd_iommu_acpi_ivmd_t **hash)
+{
+	set_ivmd_info(ivmdcp->ivmdc_ivmd, hash);
+}
+
+static void
+add_ivmd_info(amd_iommu_acpi_t *acpi, amd_iommu_acpi_ivmd_t **hash)
+{
+	ivmd_container_t *ivmdcp;
+
+	for (ivmdcp = acpi->acp_first_ivmdc; ivmdcp;
+	    ivmdcp = ivmdcp->ivmdc_next) {
+		add_ivmdc_info(ivmdcp, hash);
+	}
+}
+
+static void
+add_global_info(amd_iommu_acpi_t *acpi, amd_iommu_acpi_global_t *global)
+{
+	uint32_t ivrs_ivinfo = acpi->acp_ivrs->ivrs_ivinfo;
+
+	global->acg_HtAtsResv =
+	    AMD_IOMMU_REG_GET32(&ivrs_ivinfo, AMD_IOMMU_ACPI_HT_ATSRSV);
+	global->acg_VAsize =
+	    AMD_IOMMU_REG_GET32(&ivrs_ivinfo, AMD_IOMMU_ACPI_VA_SIZE);
+	global->acg_PAsize =
+	    AMD_IOMMU_REG_GET32(&ivrs_ivinfo, AMD_IOMMU_ACPI_PA_SIZE);
+}
+
+static int
+create_acpi_hash(amd_iommu_acpi_t *acpi)
+{
+	/* Last hash entry is for deviceid ranges including "all" */
+
+	amd_iommu_acpi_global = kmem_zalloc(sizeof (amd_iommu_acpi_global_t),
+	    KM_SLEEP);
+
+	amd_iommu_acpi_ivhd_hash = kmem_zalloc(sizeof (amd_iommu_acpi_ivhd_t *)
+	    * (AMD_IOMMU_ACPI_INFO_HASH_SZ + 1), KM_SLEEP);
+
+	amd_iommu_acpi_ivmd_hash = kmem_zalloc(sizeof (amd_iommu_acpi_ivmd_t *)
+	    * (AMD_IOMMU_ACPI_INFO_HASH_SZ + 1), KM_SLEEP);
+
+	add_global_info(acpi, amd_iommu_acpi_global);
+
+	add_ivhd_info(acpi, amd_iommu_acpi_ivhd_hash);
+
+	add_ivmd_info(acpi, amd_iommu_acpi_ivmd_hash);
+
+	return (DDI_SUCCESS);
+}
+
+amd_iommu_acpi_global_t *
+amd_iommu_lookup_acpi_global(void)
+{
+	ASSERT(amd_iommu_acpi_global);
+
+	return (amd_iommu_acpi_global);
+}
+
+amd_iommu_acpi_ivhd_t *
+amd_iommu_lookup_all_ivhd(void)
+{
+	amd_iommu_acpi_ivhd_t *hinfop;
+
+	hinfop = amd_iommu_acpi_ivhd_hash[AMD_IOMMU_ACPI_INFO_HASH_SZ];
+	for (; hinfop; hinfop = hinfop->ach_next) {
+		if (hinfop->ach_deviceid_start == 0 &&
+		    hinfop->ach_deviceid_end == (uint16_t)-1) {
+			break;
+		}
+	}
+
+	return (hinfop);
+}
+
+amd_iommu_acpi_ivmd_t *
+amd_iommu_lookup_all_ivmd(void)
+{
+	amd_iommu_acpi_ivmd_t *minfop;
+
+	minfop = amd_iommu_acpi_ivmd_hash[AMD_IOMMU_ACPI_INFO_HASH_SZ];
+	for (; minfop; minfop = minfop->acm_next) {
+		if (minfop->acm_deviceid_start == 0 &&
+		    minfop->acm_deviceid_end == (uint16_t)-1) {
+			break;
+		}
+	}
+
+	return (minfop);
+}
+
+amd_iommu_acpi_ivhd_t *
+amd_iommu_lookup_any_ivhd(void)
+{
+	int i;
+	amd_iommu_acpi_ivhd_t *hinfop;
+
+	for (i = AMD_IOMMU_ACPI_INFO_HASH_SZ; i >= 0; i--) {
+		/*LINTED*/
+		if (hinfop = amd_iommu_acpi_ivhd_hash[i])
+			break;
+	}
+
+	return (hinfop);
+}
+
+amd_iommu_acpi_ivmd_t *
+amd_iommu_lookup_any_ivmd(void)
+{
+	int i;
+	amd_iommu_acpi_ivmd_t *minfop;
+
+	for (i = AMD_IOMMU_ACPI_INFO_HASH_SZ; i >= 0; i--) {
+		/*LINTED*/
+		if (minfop = amd_iommu_acpi_ivmd_hash[i])
+			break;
+	}
+
+	return (minfop);
+}
+
+static void
+dump_acpi_aliases(void)
+{
+	amd_iommu_acpi_ivhd_t *hinfop;
+	uint16_t idx;
+
+	for (idx = 0; idx <= AMD_IOMMU_ACPI_INFO_HASH_SZ; idx++) {
+		hinfop = amd_iommu_acpi_ivhd_hash[idx];
+		for (; hinfop; hinfop = hinfop->ach_next) {
+			cmn_err(CE_NOTE, "start=%d, end=%d, src_bdf=%d",
+			    hinfop->ach_deviceid_start,
+			    hinfop->ach_deviceid_end,
+			    hinfop->ach_src_deviceid);
+		}
+	}
+}
+
+amd_iommu_acpi_ivhd_t *
+amd_iommu_lookup_ivhd(int32_t deviceid)
+{
+	amd_iommu_acpi_ivhd_t *hinfop;
+	uint16_t idx;
+
+	if (amd_iommu_debug == AMD_IOMMU_DEBUG_ACPI) {
+		cmn_err(CE_NOTE, "Attempting to get ACPI IVHD info "
+		    "for deviceid: %d", deviceid);
+	}
+
+	ASSERT(amd_iommu_acpi_ivhd_hash);
+
+	/* check if special device */
+	if (deviceid == -1) {
+		hinfop = amd_iommu_acpi_ivhd_hash[AMD_IOMMU_ACPI_INFO_HASH_SZ];
+		for (; hinfop; hinfop = hinfop->ach_next) {
+			if (hinfop->ach_deviceid_start  == -1 &&
+			    hinfop->ach_deviceid_end == -1) {
+				break;
+			}
+		}
+		return (hinfop);
+	}
+
+	/* First search for an exact match */
+
+	idx = deviceid_hashfn(deviceid);
+
+
+range:
+	hinfop = amd_iommu_acpi_ivhd_hash[idx];
+
+	for (; hinfop; hinfop = hinfop->ach_next) {
+		if (deviceid < hinfop->ach_deviceid_start ||
+		    deviceid > hinfop->ach_deviceid_end)
+			continue;
+
+		if (amd_iommu_debug == AMD_IOMMU_DEBUG_ACPI) {
+			cmn_err(CE_NOTE, "Found ACPI IVHD match: %p, "
+			    "actual deviceid = %u, start = %u, end = %u",
+			    (void *)hinfop, deviceid,
+			    hinfop->ach_deviceid_start,
+			    hinfop->ach_deviceid_end);
+		}
+		goto out;
+	}
+
+	if (idx !=  AMD_IOMMU_ACPI_INFO_HASH_SZ) {
+		idx = AMD_IOMMU_ACPI_INFO_HASH_SZ;
+		goto range;
+	} else {
+		cmn_err(CE_PANIC, "IVHD not found for deviceid: %x", deviceid);
+	}
+
+out:
+	if (amd_iommu_debug == AMD_IOMMU_DEBUG_ACPI) {
+		cmn_err(CE_NOTE, "%u: %s ACPI IVHD %p", deviceid,
+		    hinfop ? "GOT" : "Did NOT get", (void *)hinfop);
+	}
+
+	return (hinfop);
+}
+
+amd_iommu_acpi_ivmd_t *
+amd_iommu_lookup_ivmd(int32_t deviceid)
+{
+	amd_iommu_acpi_ivmd_t *minfop;
+	uint16_t idx;
+
+	if (amd_iommu_debug == AMD_IOMMU_DEBUG_ACPI) {
+		cmn_err(CE_NOTE, "Attempting to get ACPI IVMD info "
+		    "for deviceid: %u", deviceid);
+	}
+
+	ASSERT(amd_iommu_acpi_ivmd_hash);
+
+	/* First search for an exact match */
+
+	idx = deviceid_hashfn(deviceid);
+
+
+range:
+	minfop = amd_iommu_acpi_ivmd_hash[idx];
+
+	for (; minfop; minfop = minfop->acm_next) {
+		if (deviceid < minfop->acm_deviceid_start &&
+		    deviceid > minfop->acm_deviceid_end)
+			continue;
+
+		if (amd_iommu_debug == AMD_IOMMU_DEBUG_ACPI) {
+			cmn_err(CE_NOTE, "Found ACPI IVMD match: %p, "
+			    "actual deviceid = %u, start = %u, end = %u",
+			    (void *)minfop, deviceid,
+			    minfop->acm_deviceid_start,
+			    minfop->acm_deviceid_end);
+		}
+
+		goto out;
+	}
+
+	if (idx !=  AMD_IOMMU_ACPI_INFO_HASH_SZ) {
+		idx = AMD_IOMMU_ACPI_INFO_HASH_SZ;
+		goto range;
+	} else {
+		cmn_err(CE_PANIC, "IVMD not found for deviceid: %x", deviceid);
+	}
+
+out:
+	if (amd_iommu_debug == AMD_IOMMU_DEBUG_ACPI) {
+		cmn_err(CE_NOTE, "%u: %s ACPI IVMD info %p", deviceid,
+		    minfop ? "GOT" : "Did NOT get", (void *)minfop);
+	}
+
+	return (minfop);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/uts/i86pc/io/amd_iommu/amd_iommu_acpi.h	Mon Sep 14 21:48:21 2009 -0700
@@ -0,0 +1,306 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _AMD_IOMMU_ACPI_H
+#define	_AMD_IOMMU_ACPI_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <sys/sunddi.h>
+#include <sys/acpi/acpi.h>
+#include <sys/acpica.h>
+#include <sys/amd_iommu.h>
+#include "amd_iommu_impl.h"
+
+#ifdef _KERNEL
+
+#define	IVRS_SIG	"IVRS"
+
+/*
+ * IVINFO settings
+ */
+#define	AMD_IOMMU_ACPI_IVINFO_RSV1	(31 << 16 | 23)
+#define	AMD_IOMMU_ACPI_HT_ATSRSV	(22 << 16 | 22)
+#define	AMD_IOMMU_ACPI_VA_SIZE		(21 << 16 | 15)
+#define	AMD_IOMMU_ACPI_PA_SIZE		(14 << 16 | 8)
+#define	AMD_IOMMU_ACPI_IVINFO_RSV2	(7 << 16 | 0)
+
+/*
+ * IVHD Device entry len field
+ */
+#define	AMD_IOMMU_ACPI_DEVENTRY_LEN	(7 << 16 | 6)
+
+/*
+ * IVHD flag fields definition
+ */
+#define	AMD_IOMMU_ACPI_IVHD_FLAGS_RSV		(7 << 16 | 5)
+#define	AMD_IOMMU_ACPI_IVHD_FLAGS_IOTLBSUP	(4 << 16 | 4)
+#define	AMD_IOMMU_ACPI_IVHD_FLAGS_ISOC		(3 << 16 | 3)
+#define	AMD_IOMMU_ACPI_IVHD_FLAGS_RESPASSPW	(2 << 16 | 2)
+#define	AMD_IOMMU_ACPI_IVHD_FLAGS_PASSPW	(1 << 16 | 1)
+#define	AMD_IOMMU_ACPI_IVHD_FLAGS_HTTUNEN	(0 << 16 | 0)
+
+/*
+ * IVHD IOMMU info fields
+ */
+#define	AMD_IOMMU_ACPI_IOMMU_INFO_RSV1		(15 << 16 | 13)
+#define	AMD_IOMMU_ACPI_IOMMU_INFO_UNITID	(12 << 16 | 8)
+#define	AMD_IOMMU_ACPI_IOMMU_INFO_RSV2		(7 << 16 | 5)
+#define	AMD_IOMMU_ACPI_IOMMU_INFO_MSINUM	(4 << 16 | 0)
+
+/*
+ * IVHD deventry data settings
+ */
+#define	AMD_IOMMU_ACPI_LINT1PASS	(7 << 16 | 7)
+#define	AMD_IOMMU_ACPI_LINT0PASS	(6 << 16 | 6)
+#define	AMD_IOMMU_ACPI_SYSMGT		(5 << 16 | 4)
+#define	AMD_IOMMU_ACPI_DATRSV		(3 << 16 | 3)
+#define	AMD_IOMMU_ACPI_NMIPASS		(2 << 16 | 2)
+#define	AMD_IOMMU_ACPI_EXTINTPASS	(1 << 16 | 1)
+#define	AMD_IOMMU_ACPI_INITPASS		(0 << 16 | 0)
+
+/*
+ * IVHD deventry extended data settings
+ */
+#define	AMD_IOMMU_ACPI_ATSDISABLED	(31 << 16 | 31)
+#define	AMD_IOMMU_ACPI_EXTDATRSV	(30 << 16 | 0)
+
+/*
+ * IVMD flags fields settings
+ */
+#define	AMD_IOMMU_ACPI_IVMD_RSV		(7 << 16 | 4)
+#define	AMD_IOMMU_ACPI_IVMD_EXCL_RANGE	(3 << 16 | 3)
+#define	AMD_IOMMU_ACPI_IVMD_IW		(2 << 16 | 2)
+#define	AMD_IOMMU_ACPI_IVMD_IR		(1 << 16 | 1)
+#define	AMD_IOMMU_ACPI_IVMD_UNITY	(0 << 16 | 0)
+
+#define	AMD_IOMMU_ACPI_INFO_HASH_SZ	(256)
+
+/*
+ * Deventry special device "variety"
+ */
+#define	AMD_IOMMU_ACPI_SPECIAL_APIC	0x1
+#define	AMD_IOMMU_ACPI_SPECIAL_HPET	0x2
+
+typedef enum {
+	DEVENTRY_INVALID = 0,
+	DEVENTRY_ALL = 1,
+	DEVENTRY_SELECT,
+	DEVENTRY_RANGE,
+	DEVENTRY_RANGE_END,
+	DEVENTRY_ALIAS_SELECT,
+	DEVENTRY_ALIAS_RANGE,
+	DEVENTRY_EXTENDED_SELECT,
+	DEVENTRY_EXTENDED_RANGE,
+	DEVENTRY_SPECIAL_DEVICE
+} ivhd_deventry_type_t;
+
+typedef enum {
+	IVMD_DEVICE_INVALID = 0,
+	IVMD_DEVICEID_ALL,
+	IVMD_DEVICEID_SELECT,
+	IVMD_DEVICEID_RANGE
+} ivmd_deviceid_type_t;
+
+typedef struct ivhd_deventry {
+	uint8_t idev_len;
+	ivhd_deventry_type_t  idev_type;
+	int32_t idev_deviceid;
+	int32_t idev_src_deviceid;
+	uint8_t idev_handle;
+	uint8_t idev_variety;
+	uint8_t idev_Lint1Pass;
+	uint8_t idev_Lint0Pass;
+	uint8_t idev_SysMgt;
+	uint8_t idev_NMIPass;
+	uint8_t idev_ExtIntPass;
+	uint8_t idev_INITPass;
+	uint8_t idev_AtsDisabled;
+	struct ivhd_deventry *idev_next;
+} ivhd_deventry_t;
+
+typedef struct ivhd {
+	uint8_t ivhd_type;
+	uint8_t ivhd_flags;
+	uint16_t ivhd_len;
+	uint16_t ivhd_deviceid;
+	uint16_t ivhd_cap_off;
+	uint64_t ivhd_reg_base;
+	uint16_t ivhd_pci_seg;
+	uint16_t ivhd_iommu_info;
+	uint32_t ivhd_resv;
+} ivhd_t;
+
+typedef struct ivhd_container {
+	ivhd_t *ivhdc_ivhd;
+	ivhd_deventry_t *ivhdc_first_deventry;
+	ivhd_deventry_t *ivhdc_last_deventry;
+	struct ivhd_container *ivhdc_next;
+} ivhd_container_t;
+
+typedef struct ivmd {
+	uint8_t ivmd_type;
+	uint8_t ivmd_flags;
+	uint16_t ivmd_len;
+	uint16_t ivmd_deviceid;
+	uint16_t ivmd_auxdata;
+	uint64_t ivmd_resv;
+	uint64_t ivmd_phys_start;
+	uint64_t ivmd_phys_len;
+} ivmd_t;
+
+typedef struct ivmd_container {
+	ivmd_t *ivmdc_ivmd;
+	struct ivmd_container *ivmdc_next;
+} ivmd_container_t;
+
+typedef struct ivrs {
+	struct acpi_table_header ivrs_hdr;
+	uint32_t ivrs_ivinfo;
+	uint64_t ivrs_resv;
+} ivrs_t;
+
+typedef struct amd_iommu_acpi {
+	struct ivrs *acp_ivrs;
+	ivhd_container_t *acp_first_ivhdc;
+	ivhd_container_t *acp_last_ivhdc;
+	ivmd_container_t *acp_first_ivmdc;
+	ivmd_container_t *acp_last_ivmdc;
+} amd_iommu_acpi_t;
+
+
+/* Global IVINFo fields */
+typedef struct amd_iommu_acpi_global {
+	uint8_t acg_HtAtsResv;
+	uint8_t acg_VAsize;
+	uint8_t acg_PAsize;
+} amd_iommu_acpi_global_t;
+
+typedef struct amd_iommu_acpi_ivhd {
+	int32_t ach_deviceid_start;
+	int32_t ach_deviceid_end;
+
+	/* IVHD deventry type */
+	ivhd_deventry_type_t ach_dev_type;
+
+	/* IVHD flag fields */
+	uint8_t ach_IotlbSup;
+	uint8_t ach_Isoc;
+	uint8_t ach_ResPassPW;
+	uint8_t ach_PassPW;
+	uint8_t ach_HtTunEn;
+
+	/* IVHD fields */
+	uint16_t ach_IOMMU_deviceid;
+	uint16_t ach_IOMMU_cap_off;
+	uint64_t ach_IOMMU_reg_base;
+	uint16_t ach_IOMMU_pci_seg;
+
+	/* IVHD IOMMU info fields */
+	uint8_t ach_IOMMU_UnitID;
+	uint8_t ach_IOMMU_MSInum;
+
+	/* IVHD deventry data settings */
+	uint8_t ach_Lint1Pass;
+	uint8_t ach_Lint0Pass;
+	uint8_t ach_SysMgt;
+	uint8_t ach_NMIPass;
+	uint8_t ach_ExtIntPass;
+	uint8_t ach_INITPass;
+
+	/* alias */
+	int32_t ach_src_deviceid;
+
+	/* IVHD deventry extended data settings */
+	uint8_t ach_AtsDisabled;
+
+	/* IVHD deventry special device */
+	uint8_t ach_special_handle;
+	uint8_t ach_special_variety;
+
+	struct amd_iommu_acpi_ivhd *ach_next;
+} amd_iommu_acpi_ivhd_t;
+
+typedef struct amd_iommu_acpi_ivmd {
+	int32_t acm_deviceid_start;
+	int32_t acm_deviceid_end;
+
+	/* IVMD type */
+	ivmd_deviceid_type_t acm_dev_type;
+
+	/* IVMD flags */
+	uint8_t acm_ExclRange;
+	uint8_t acm_IW;
+	uint8_t acm_IR;
+	uint8_t acm_Unity;
+
+	/* IVMD mem block */
+	uint64_t acm_ivmd_phys_start;
+	uint64_t acm_ivmd_phys_len;
+
+	struct amd_iommu_acpi_ivmd *acm_next;
+} amd_iommu_acpi_ivmd_t;
+
+typedef union {
+	uint16_t   ent16;
+	uint8_t	   ent8[2];
+} align_16_t;
+
+typedef union {
+	uint32_t   ent32;
+	uint8_t	   ent8[4];
+} align_32_t;
+
+typedef union {
+	ivhd_t *ivhdp;
+	char   *cp;
+} align_ivhd_t;
+
+typedef union {
+	ivmd_t *ivmdp;
+	char   *cp;
+} align_ivmd_t;
+
+#pragma pack()
+
+int amd_iommu_acpi_init(void);
+void amd_iommu_acpi_fini(void);
+amd_iommu_acpi_ivhd_t *amd_iommu_lookup_all_ivhd(void);
+amd_iommu_acpi_ivmd_t *amd_iommu_lookup_all_ivmd(void);
+amd_iommu_acpi_ivhd_t *amd_iommu_lookup_any_ivhd(void);
+amd_iommu_acpi_ivmd_t *amd_iommu_lookup_any_ivmd(void);
+amd_iommu_acpi_global_t *amd_iommu_lookup_acpi_global(void);
+amd_iommu_acpi_ivhd_t *amd_iommu_lookup_ivhd(int32_t deviceid);
+amd_iommu_acpi_ivmd_t *amd_iommu_lookup_ivmd(int32_t deviceid);
+
+#endif /* _KERNEL */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif	/* _AMD_IOMMU_ACPI_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/uts/i86pc/io/amd_iommu/amd_iommu_cmd.c	Mon Sep 14 21:48:21 2009 -0700
@@ -0,0 +1,321 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#include <sys/sunddi.h>
+#include <sys/amd_iommu.h>
+#include "amd_iommu_impl.h"
+
+extern int servicing_interrupt(void);
+
+static void
+amd_iommu_wait_for_completion(amd_iommu_t *iommu)
+{
+	ASSERT(MUTEX_HELD(&iommu->aiomt_cmdlock));
+	while (AMD_IOMMU_REG_GET64(REGADDR64(
+	    iommu->aiomt_reg_status_va), AMD_IOMMU_COMWAIT_INT) != 1) {
+		AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
+		    AMD_IOMMU_CMDBUF_ENABLE, 1);
+		WAIT_SEC(1);
+	}
+}
+
+static int
+create_compl_wait_cmd(amd_iommu_t *iommu, amd_iommu_cmdargs_t *cmdargsp,
+    amd_iommu_cmd_flags_t flags, uint32_t *cmdptr)
+{
+	const char *driver = ddi_driver_name(iommu->aiomt_dip);
+	int instance = ddi_get_instance(iommu->aiomt_dip);
+	const char *f = "create_compl_wait_cmd";
+
+	ASSERT(cmdargsp == NULL);
+
+	if (flags & AMD_IOMMU_CMD_FLAGS_COMPL_WAIT_S) {
+		cmn_err(CE_WARN, "%s: %s%d: idx=%d: 'store' completion "
+		    "not supported for completion wait command",
+		    f, driver, instance, iommu->aiomt_idx);
+		return (DDI_FAILURE);
+	}
+
+	AMD_IOMMU_REG_SET32(&cmdptr[0], AMD_IOMMU_CMD_COMPL_WAIT_S, 0);
+	AMD_IOMMU_REG_SET32(&cmdptr[0], AMD_IOMMU_CMD_COMPL_WAIT_I, 1);
+	AMD_IOMMU_REG_SET32(&cmdptr[0], AMD_IOMMU_CMD_COMPL_WAIT_F,
+	    (flags & AMD_IOMMU_CMD_FLAGS_COMPL_WAIT_F) != 0);
+	AMD_IOMMU_REG_SET32(&cmdptr[0], AMD_IOMMU_CMD_COMPL_WAIT_STORE_ADDR_LO,
+	    0);
+	AMD_IOMMU_REG_SET32(&cmdptr[1], AMD_IOMMU_CMD_OPCODE, 0x01);
+	AMD_IOMMU_REG_SET32(&cmdptr[1], AMD_IOMMU_CMD_COMPL_WAIT_STORE_ADDR_HI,
+	    0);
+	cmdptr[2] = 0;
+	cmdptr[3] = 0;
+
+	return (DDI_SUCCESS);
+}
+
+static int
+create_inval_devtab_entry_cmd(amd_iommu_t *iommu, amd_iommu_cmdargs_t *cmdargsp,
+    amd_iommu_cmd_flags_t flags, uint32_t *cmdptr)
+{
+	const char *driver = ddi_driver_name(iommu->aiomt_dip);
+	int instance = ddi_get_instance(iommu->aiomt_dip);
+	const char *f = "create_inval_devtab_entry_cmd";
+	uint16_t deviceid;
+
+	ASSERT(cmdargsp);
+
+	if (flags != AMD_IOMMU_CMD_FLAGS_NONE) {
+		cmn_err(CE_WARN, "%s: %s%d: idx=%d: invalidate devtab entry "
+		    "no flags supported", f, driver, instance,
+		    iommu->aiomt_idx);
+		return (DDI_FAILURE);
+	}
+
+	deviceid = cmdargsp->ca_deviceid;
+
+	AMD_IOMMU_REG_SET32(&cmdptr[0], AMD_IOMMU_CMD_INVAL_DEVTAB_DEVICEID,
+	    deviceid);
+	AMD_IOMMU_REG_SET32(&cmdptr[1], AMD_IOMMU_CMD_OPCODE, 0x02);
+	cmdptr[2] = 0;
+	cmdptr[3] = 0;
+
+	return (DDI_SUCCESS);
+}
+
+/*ARGSUSED*/
+static int
+create_inval_iommu_pages_cmd(amd_iommu_t *iommu, amd_iommu_cmdargs_t *cmdargsp,
+    amd_iommu_cmd_flags_t flags, uint32_t *cmdptr)
+{
+	uint32_t addr_lo;
+	uint32_t addr_hi;
+
+	ASSERT(cmdargsp);
+
+	addr_lo = AMD_IOMMU_REG_GET64(REGADDR64(&cmdargsp->ca_addr),
+	    AMD_IOMMU_CMD_INVAL_PAGES_ADDR_LO);
+	addr_hi = AMD_IOMMU_REG_GET64(REGADDR64(&cmdargsp->ca_addr),
+	    AMD_IOMMU_CMD_INVAL_PAGES_ADDR_HI);
+
+	cmdptr[0] = 0;
+	AMD_IOMMU_REG_SET32(&cmdptr[1], AMD_IOMMU_CMD_INVAL_PAGES_DOMAINID,
+	    cmdargsp->ca_domainid);
+	AMD_IOMMU_REG_SET32(&cmdptr[1], AMD_IOMMU_CMD_OPCODE, 0x03);
+	AMD_IOMMU_REG_SET32(&cmdptr[2], AMD_IOMMU_CMD_INVAL_PAGES_PDE,
+	    (flags & AMD_IOMMU_CMD_FLAGS_PAGE_PDE_INVAL) != 0);
+	AMD_IOMMU_REG_SET32(&cmdptr[2], AMD_IOMMU_CMD_INVAL_PAGES_S,
+	    (flags & AMD_IOMMU_CMD_FLAGS_PAGE_INVAL_S) != 0);
+	AMD_IOMMU_REG_SET32(&cmdptr[2], AMD_IOMMU_CMD_INVAL_PAGES_ADDR_LO,
+	    addr_lo);
+	cmdptr[3] = addr_hi;
+
+	return (DDI_SUCCESS);
+
+}
+
+/*ARGSUSED*/
+static int
+create_inval_iotlb_pages_cmd(amd_iommu_t *iommu, amd_iommu_cmdargs_t *cmdargsp,
+    amd_iommu_cmd_flags_t flags, uint32_t *cmdptr)
+{
+	uint32_t addr_lo;
+	uint32_t addr_hi;
+
+	ASSERT(cmdargsp);
+
+	addr_lo = AMD_IOMMU_REG_GET64(REGADDR64(&cmdargsp->ca_addr),
+	    AMD_IOMMU_CMD_INVAL_IOTLB_ADDR_LO);
+
+	addr_hi = AMD_IOMMU_REG_GET64(REGADDR64(&cmdargsp->ca_addr),
+	    AMD_IOMMU_CMD_INVAL_IOTLB_ADDR_HI);
+
+	AMD_IOMMU_REG_SET32(&cmdptr[0], AMD_IOMMU_CMD_INVAL_IOTLB_DEVICEID,
+	    cmdargsp->ca_deviceid);
+	AMD_IOMMU_REG_SET32(&cmdptr[0], AMD_IOMMU_CMD_INVAL_IOTLB_MAXPEND,
+	    AMD_IOMMU_DEFAULT_MAXPEND);
+	AMD_IOMMU_REG_SET32(&cmdptr[1], AMD_IOMMU_CMD_OPCODE, 0x04);
+	AMD_IOMMU_REG_SET32(&cmdptr[1], AMD_IOMMU_CMD_INVAL_IOTLB_QUEUEID,
+	    cmdargsp->ca_deviceid);
+	AMD_IOMMU_REG_SET32(&cmdptr[2], AMD_IOMMU_CMD_INVAL_IOTLB_ADDR_LO,
+	    addr_lo);
+	AMD_IOMMU_REG_SET32(&cmdptr[2], AMD_IOMMU_CMD_INVAL_IOTLB_S,
+	    (flags & AMD_IOMMU_CMD_FLAGS_IOTLB_INVAL_S) != 0);
+	cmdptr[3] = addr_hi;
+
+	return (DDI_SUCCESS);
+}
+
+static int
+create_inval_intr_table_cmd(amd_iommu_t *iommu, amd_iommu_cmdargs_t *cmdargsp,
+    amd_iommu_cmd_flags_t flags, uint32_t *cmdptr)
+{
+	const char *driver = ddi_driver_name(iommu->aiomt_dip);
+	int instance = ddi_get_instance(iommu->aiomt_dip);
+	const char *f = "create_inval_intr_table_cmd";
+
+	ASSERT(cmdargsp);
+
+	if (flags != AMD_IOMMU_CMD_FLAGS_NONE) {
+		cmn_err(CE_WARN, "%s: %s%d: idx=%d: flags not supported "
+		    "for invalidate interrupt table command",
+		    f, driver, instance, iommu->aiomt_idx);
+		return (DDI_FAILURE);
+	}
+
+	AMD_IOMMU_REG_SET32(&cmdptr[0], AMD_IOMMU_CMD_INVAL_INTR_DEVICEID,
+	    cmdargsp->ca_deviceid);
+	AMD_IOMMU_REG_SET32(&cmdptr[1], AMD_IOMMU_CMD_OPCODE, 0x05);
+	cmdptr[2] = 0;
+	cmdptr[3] = 0;
+
+	return (DDI_SUCCESS);
+}
+
+int
+amd_iommu_cmd(amd_iommu_t *iommu, amd_iommu_cmd_t cmd,
+    amd_iommu_cmdargs_t *cmdargs, amd_iommu_cmd_flags_t flags, int lock_held)
+{
+	int error;
+	int i;
+	uint32_t cmdptr[4] = {0};
+	const char *driver = ddi_driver_name(iommu->aiomt_dip);
+	int instance = ddi_get_instance(iommu->aiomt_dip);
+	uint64_t cmdhead_off;
+	uint64_t cmdtail_off;
+	const char *f = "amd_iommu_cmd";
+
+	ASSERT(lock_held == 0 || lock_held == 1);
+	ASSERT(lock_held == 0 || MUTEX_HELD(&iommu->aiomt_cmdlock));
+
+	if (!lock_held)
+		mutex_enter(&iommu->aiomt_cmdlock);
+
+	/*
+	 * Prepare the command
+	 */
+	switch (cmd) {
+	case AMD_IOMMU_CMD_COMPL_WAIT:
+		if (flags & AMD_IOMMU_CMD_FLAGS_COMPL_WAIT) {
+			cmn_err(CE_WARN, "%s: %s%d: idx=%d: No completion wait "
+			    " after completion wait command",
+			    f, driver, instance, iommu->aiomt_idx);
+			error = DDI_FAILURE;
+			goto out;
+		}
+		error = create_compl_wait_cmd(iommu, cmdargs, flags, cmdptr);
+		break;
+	case AMD_IOMMU_CMD_INVAL_DEVTAB_ENTRY:
+		error = create_inval_devtab_entry_cmd(iommu, cmdargs,
+		    flags & ~AMD_IOMMU_CMD_FLAGS_COMPL_WAIT, cmdptr);
+		break;
+	case AMD_IOMMU_CMD_INVAL_IOMMU_PAGES:
+		error = create_inval_iommu_pages_cmd(iommu, cmdargs,
+		    flags & ~AMD_IOMMU_CMD_FLAGS_COMPL_WAIT, cmdptr);
+		break;
+	case AMD_IOMMU_CMD_INVAL_IOTLB_PAGES:
+		error = create_inval_iotlb_pages_cmd(iommu, cmdargs,
+		    flags & ~AMD_IOMMU_CMD_FLAGS_COMPL_WAIT, cmdptr);
+		break;
+	case AMD_IOMMU_CMD_INVAL_INTR_TABLE:
+		error = create_inval_intr_table_cmd(iommu, cmdargs,
+		    flags & ~AMD_IOMMU_CMD_FLAGS_COMPL_WAIT, cmdptr);
+		break;
+	default:
+		cmn_err(CE_WARN, "%s: %s%d: idx=%d: Unsupported cmd: %d",
+		    f, driver, instance, iommu->aiomt_idx, cmd);
+		error = DDI_FAILURE;
+		goto out;
+	}
+
+	if (error != DDI_SUCCESS) {
+		error = DDI_FAILURE;
+		goto out;
+	}
+
+	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
+	    AMD_IOMMU_CMDBUF_ENABLE, 1);
+
+	ASSERT(iommu->aiomt_cmd_tail != NULL);
+
+	for (i = 0; i < 4; i++) {
+		iommu->aiomt_cmd_tail[i] = cmdptr[i];
+	}
+
+wait_for_drain:
+	cmdhead_off = AMD_IOMMU_REG_GET64(
+	    REGADDR64(iommu->aiomt_reg_cmdbuf_head_va),
+	    AMD_IOMMU_CMDHEADPTR);
+
+	cmdhead_off = CMD2OFF(cmdhead_off);
+
+	ASSERT(cmdhead_off < iommu->aiomt_cmdbuf_sz);
+
+	/* check for overflow */
+	if ((caddr_t)iommu->aiomt_cmd_tail <
+	    (cmdhead_off + iommu->aiomt_cmdbuf)) {
+		if ((caddr_t)iommu->aiomt_cmd_tail + 16 >=
+		    (cmdhead_off + iommu->aiomt_cmdbuf))
+#ifdef DEBUG
+			cmn_err(CE_WARN, "cmdbuffer overflow: waiting for "
+			    "drain");
+#endif
+			goto wait_for_drain;
+	}
+
+	SYNC_FORDEV(iommu->aiomt_dmahdl);
+
+	/*
+	 * Update the tail pointer in soft state
+	 * and the tail pointer register
+	 */
+	iommu->aiomt_cmd_tail += 4;
+	if ((caddr_t)iommu->aiomt_cmd_tail >= (iommu->aiomt_cmdbuf
+	    + iommu->aiomt_cmdbuf_sz)) {
+		/* wraparound */
+		/*LINTED*/
+		iommu->aiomt_cmd_tail = (uint32_t *)iommu->aiomt_cmdbuf;
+		cmdtail_off = 0;
+	} else {
+		cmdtail_off = (caddr_t)iommu->aiomt_cmd_tail
+		/*LINTED*/
+		    - iommu->aiomt_cmdbuf;
+	}
+
+	ASSERT(cmdtail_off < iommu->aiomt_cmdbuf_sz);
+
+	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_cmdbuf_tail_va),
+	    AMD_IOMMU_CMDTAILPTR, OFF2CMD(cmdtail_off));
+
+	if (cmd == AMD_IOMMU_CMD_COMPL_WAIT) {
+		amd_iommu_wait_for_completion(iommu);
+	} else if (flags & AMD_IOMMU_CMD_FLAGS_COMPL_WAIT) {
+		error = amd_iommu_cmd(iommu, AMD_IOMMU_CMD_COMPL_WAIT,
+		    NULL, 0, 1);
+	}
+
+out:
+	if (!lock_held)
+		mutex_exit(&iommu->aiomt_cmdlock);
+	return (error);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/uts/i86pc/io/amd_iommu/amd_iommu_impl.c	Mon Sep 14 21:48:21 2009 -0700
@@ -0,0 +1,1880 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#include <sys/sunddi.h>
+#include <sys/iommulib.h>
+#include <sys/amd_iommu.h>
+#include <sys/pci_cap.h>
+#include <sys/bootconf.h>
+#include <sys/ddidmareq.h>
+
+#include "amd_iommu_impl.h"
+#include "amd_iommu_acpi.h"
+#include "amd_iommu_page_tables.h"
+
+static int amd_iommu_fini(amd_iommu_t *iommu);
+static void amd_iommu_teardown_interrupts(amd_iommu_t *iommu);
+static void amd_iommu_stop(amd_iommu_t *iommu);
+
+static int amd_iommu_probe(iommulib_handle_t handle, dev_info_t *rdip);
+static int amd_iommu_allochdl(iommulib_handle_t handle,
+    dev_info_t *dip, dev_info_t *rdip, ddi_dma_attr_t *attr,
+    int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *dma_handlep);
+static int amd_iommu_freehdl(iommulib_handle_t handle,
+    dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t dma_handle);
+static int amd_iommu_bindhdl(iommulib_handle_t handle, dev_info_t *dip,
+    dev_info_t *rdip, ddi_dma_handle_t dma_handle,
+    struct ddi_dma_req *dmareq, ddi_dma_cookie_t *cookiep,
+    uint_t *ccountp);
+static int amd_iommu_unbindhdl(iommulib_handle_t handle,
+    dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t dma_handle);
+static int amd_iommu_sync(iommulib_handle_t handle, dev_info_t *dip,
+    dev_info_t *rdip, ddi_dma_handle_t dma_handle, off_t off,
+    size_t len, uint_t cache_flags);
+static int amd_iommu_win(iommulib_handle_t handle, dev_info_t *dip,
+    dev_info_t *rdip, ddi_dma_handle_t dma_handle, uint_t win,
+    off_t *offp, size_t *lenp, ddi_dma_cookie_t *cookiep,
+    uint_t *ccountp);
+static int amd_iommu_map(iommulib_handle_t handle, dev_info_t *dip,
+    dev_info_t *rdip, struct ddi_dma_req *dmareq,
+    ddi_dma_handle_t *dma_handle);
+static int amd_iommu_mctl(iommulib_handle_t handle, dev_info_t *dip,
+    dev_info_t *rdip, ddi_dma_handle_t dma_handle,
+    enum ddi_dma_ctlops request, off_t *offp, size_t *lenp,
+    caddr_t *objpp, uint_t cache_flags);
+
+static int unmap_current_window(amd_iommu_t *iommu, dev_info_t *rdip,
+    ddi_dma_cookie_t *cookie_array, uint_t ccount, int ncookies, int locked);
+
+extern void *device_arena_alloc(size_t size, int vm_flag);
+extern void device_arena_free(void * vaddr, size_t size);
+
+ddi_dma_attr_t amd_iommu_dma_attr = {
+	DMA_ATTR_V0,
+	0U,				/* dma_attr_addr_lo */
+	0xffffffffffffffffULL,		/* dma_attr_addr_hi */
+	0xffffffffU,			/* dma_attr_count_max */
+	(uint64_t)4096,			/* dma_attr_align */
+	1,				/* dma_attr_burstsizes */
+	64,				/* dma_attr_minxfer */
+	0xffffffffU,			/* dma_attr_maxxfer */
+	0xffffffffU,			/* dma_attr_seg */
+	1,				/* dma_attr_sgllen, variable */
+	64,				/* dma_attr_granular */
+	0				/* dma_attr_flags */
+};
+
+ddi_device_acc_attr_t amd_iommu_devacc = {
+	DDI_DEVICE_ATTR_V0,
+	DDI_NEVERSWAP_ACC,
+	DDI_STRICTORDER_ACC
+};
+
+struct iommulib_ops amd_iommulib_ops = {
+	IOMMU_OPS_VERSION,
+	AMD_IOMMU,
+	"AMD IOMMU Vers. 1",
+	NULL,
+	amd_iommu_probe,
+	amd_iommu_allochdl,
+	amd_iommu_freehdl,
+	amd_iommu_bindhdl,
+	amd_iommu_unbindhdl,
+	amd_iommu_sync,
+	amd_iommu_win,
+	amd_iommu_map,
+	amd_iommu_mctl
+};
+
+static kmutex_t amd_iommu_pgtable_lock;
+
+static int
+amd_iommu_register(amd_iommu_t *iommu)
+{
+	dev_info_t *dip = iommu->aiomt_dip;
+	const char *driver = ddi_driver_name(dip);
+	int instance = ddi_get_instance(dip);
+	iommulib_ops_t *iommulib_ops;
+	iommulib_handle_t handle;
+	const char *f = "amd_iommu_register";
+
+	iommulib_ops = kmem_zalloc(sizeof (iommulib_ops_t), KM_SLEEP);
+
+	*iommulib_ops = amd_iommulib_ops;
+
+	iommulib_ops->ilops_data = (void *)iommu;
+	iommu->aiomt_iommulib_ops = iommulib_ops;
+
+	if (iommulib_iommu_register(dip, iommulib_ops, &handle)
+	    != DDI_SUCCESS) {
+		cmn_err(CE_WARN, "%s: %s%d: Register with iommulib "
+		    "failed idx=%d", f, driver, instance, iommu->aiomt_idx);
+		kmem_free(iommulib_ops, sizeof (iommulib_ops_t));
+		return (DDI_FAILURE);
+	}
+
+	iommu->aiomt_iommulib_handle = handle;
+
+	return (DDI_SUCCESS);
+}
+
+static int
+amd_iommu_unregister(amd_iommu_t *iommu)
+{
+	if (iommu->aiomt_iommulib_handle == NULL) {
+		/* we never registered */
+		return (DDI_SUCCESS);
+	}
+
+	if (iommulib_iommu_unregister(iommu->aiomt_iommulib_handle)
+	    != DDI_SUCCESS) {
+		return (DDI_FAILURE);
+	}
+
+	kmem_free(iommu->aiomt_iommulib_ops, sizeof (iommulib_ops_t));
+	iommu->aiomt_iommulib_ops = NULL;
+	iommu->aiomt_iommulib_handle = NULL;
+
+	return (DDI_SUCCESS);
+}
+
+static int
+amd_iommu_setup_passthru(amd_iommu_t *iommu)
+{
+	gfx_entry_t *gfxp;
+	dev_info_t *dip;
+
+	/*
+	 * Setup passthru mapping for "special" devices
+	 */
+	amd_iommu_set_passthru(iommu, NULL);
+
+	for (gfxp = gfx_devinfo_list; gfxp; gfxp = gfxp->g_next) {
+		gfxp->g_ref++;
+		dip = gfxp->g_dip;
+		if (dip) {
+			amd_iommu_set_passthru(iommu, dip);
+		}
+		gfxp->g_ref--;
+	}
+
+	return (DDI_SUCCESS);
+}
+
+static int
+amd_iommu_start(amd_iommu_t *iommu)
+{
+	dev_info_t *dip = iommu->aiomt_dip;
+	int instance = ddi_get_instance(dip);
+	const char *driver = ddi_driver_name(dip);
+	amd_iommu_acpi_ivhd_t *hinfop;
+	const char *f = "amd_iommu_start";
+
+	hinfop = amd_iommu_lookup_all_ivhd();
+
+	/*
+	 * Disable HT tunnel translation.
+	 * XXX use ACPI
+	 */
+	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
+	    AMD_IOMMU_HT_TUN_ENABLE, 0);
+
+	if (hinfop) {
+		if (amd_iommu_debug) {
+			cmn_err(CE_NOTE,
+			    "amd_iommu: using ACPI for CTRL registers");
+		}
+		AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
+		    AMD_IOMMU_ISOC, hinfop->ach_Isoc);
+		AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
+		    AMD_IOMMU_RESPASSPW, hinfop->ach_ResPassPW);
+		AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
+		    AMD_IOMMU_PASSPW, hinfop->ach_PassPW);
+	}
+
+	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
+	    AMD_IOMMU_INVTO, 5);
+
+
+	/*
+	 * The Device table entry bit 0 (V) controls whether the device
+	 * table entry is valid for address translation and Device table
+	 * entry bit 128 (IV) controls whether interrupt remapping is valid.
+	 * By setting both to zero we are essentially doing pass-thru. Since
+	 * this table is zeroed on allocation, essentially we will have
+	 * pass-thru when IOMMU is enabled.
+	 */
+
+	/* Finally enable the IOMMU ... */
+	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
+	    AMD_IOMMU_ENABLE, 1);
+
+	if (amd_iommu_debug) {
+		cmn_err(CE_NOTE, "%s: %s%d: AMD IOMMU idx=%d. "
+		    "Successfully started AMD IOMMU", f, driver, instance,
+		    iommu->aiomt_idx);
+	}
+	cmn_err(CE_NOTE, "AMD IOMMU (%d,%d) enabled",
+	    instance, iommu->aiomt_idx);
+
+	return (DDI_SUCCESS);
+}
+
+static void
+amd_iommu_stop(amd_iommu_t *iommu)
+{
+	dev_info_t *dip = iommu->aiomt_dip;
+	int instance = ddi_get_instance(dip);
+	const char *driver = ddi_driver_name(dip);
+	const char *f = "amd_iommu_stop";
+
+	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
+	    AMD_IOMMU_ENABLE, 0);
+	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
+	    AMD_IOMMU_EVENTINT_ENABLE, 0);
+	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
+	    AMD_IOMMU_COMWAITINT_ENABLE, 0);
+	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
+	    AMD_IOMMU_EVENTLOG_ENABLE, 0);
+
+	/*
+	 * Disable translation on HT tunnel traffic
+	 */
+	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
+	    AMD_IOMMU_HT_TUN_ENABLE, 0);
+	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
+	    AMD_IOMMU_CMDBUF_ENABLE, 0);
+
+	cmn_err(CE_NOTE, "%s: %s%d: AMD IOMMYU idx=%d. "
+	    "Successfully stopped AMD IOMMU", f, driver, instance,
+	    iommu->aiomt_idx);
+}
+
+static int
+amd_iommu_setup_tables_and_buffers(amd_iommu_t *iommu)
+{
+	dev_info_t *dip = iommu->aiomt_dip;
+	int instance = ddi_get_instance(dip);
+	const char *driver = ddi_driver_name(dip);
+	uint32_t dma_bufsz;
+	caddr_t addr;
+	uint32_t sz;
+	uint32_t p2sz;
+	int i;
+	uint64_t *dentry;
+	int err;
+	const char *f = "amd_iommu_setup_tables_and_buffers";
+
+	/*
+	 * We will put the Device Table, Command Buffer and
+	 * Event Log in contiguous memory. Allocate the maximum
+	 * size allowed for such structures
+	 * Device Table:  256b * 64K = 32B * 64K
+	 * Command Buffer: 128b * 32K = 16B * 32K
+	 * Event Log:  128b * 32K = 16B * 32K
+	 */
+	iommu->aiomt_devtbl_sz = (1<<AMD_IOMMU_DEVTBL_SZ) * AMD_IOMMU_DEVENT_SZ;
+	iommu->aiomt_cmdbuf_sz = (1<<AMD_IOMMU_CMDBUF_SZ) * AMD_IOMMU_CMD_SZ;
+	iommu->aiomt_eventlog_sz =
+	    (1<<AMD_IOMMU_EVENTLOG_SZ) * AMD_IOMMU_EVENT_SZ;
+
+	dma_bufsz = iommu->aiomt_devtbl_sz + iommu->aiomt_cmdbuf_sz
+	    + iommu->aiomt_eventlog_sz;
+
+	/*
+	 * Alloc a DMA handle.
+	 */
+	err = ddi_dma_alloc_handle(dip, &amd_iommu_dma_attr,
+	    DDI_DMA_SLEEP, NULL, &iommu->aiomt_dmahdl);
+	if (err != DDI_SUCCESS) {
+		cmn_err(CE_WARN, "%s: %s%d: Cannot alloc DMA handle for "
+		    "AMD IOMMU tables and buffers", f, driver, instance);
+		return (DDI_FAILURE);
+	}
+
+	/*
+	 * Alloc memory for tables and buffers
+	 * XXX remove cast to size_t
+	 */
+	err = ddi_dma_mem_alloc(iommu->aiomt_dmahdl, dma_bufsz,
+	    &amd_iommu_devacc, DDI_DMA_CONSISTENT|IOMEM_DATA_UNCACHED,
+	    DDI_DMA_SLEEP,  NULL, (caddr_t *)&iommu->aiomt_dma_bufva,
+	    (size_t *)&iommu->aiomt_dma_mem_realsz, &iommu->aiomt_dma_mem_hdl);
+	if (err != DDI_SUCCESS) {
+		cmn_err(CE_WARN, "%s: %s%d: Cannot alloc memory for DMA "
+		    "to AMD IOMMU tables and buffers", f, driver, instance);
+		iommu->aiomt_dma_bufva = NULL;
+		iommu->aiomt_dma_mem_realsz = 0;
+		ddi_dma_free_handle(&iommu->aiomt_dmahdl);
+		iommu->aiomt_dmahdl = NULL;
+		return (DDI_FAILURE);
+	}
+
+	/*
+	 * The VA must be 4K aligned and >= table size
+	 */
+	ASSERT(((uintptr_t)iommu->aiomt_dma_bufva &
+	    AMD_IOMMU_TABLE_ALIGN) == 0);
+	ASSERT(iommu->aiomt_dma_mem_realsz >= dma_bufsz);
+
+	/*
+	 * Now bind the handle
+	 */
+	err = ddi_dma_addr_bind_handle(iommu->aiomt_dmahdl, NULL,
+	    iommu->aiomt_dma_bufva, iommu->aiomt_dma_mem_realsz,
+	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
+	    NULL, &iommu->aiomt_buf_dma_cookie, &iommu->aiomt_buf_dma_ncookie);
+	if (err != DDI_DMA_MAPPED) {
+		cmn_err(CE_WARN, "%s: %s%d: Cannot bind memory for DMA "
+		    "to AMD IOMMU tables and buffers. bufrealsz=%p",
+		    f, driver, instance,
+		    (void *)(uintptr_t)iommu->aiomt_dma_mem_realsz);
+		iommu->aiomt_buf_dma_cookie.dmac_laddress = 0;
+		iommu->aiomt_buf_dma_cookie.dmac_size = 0;
+		iommu->aiomt_buf_dma_cookie.dmac_type = 0;
+		iommu->aiomt_buf_dma_ncookie = 0;
+		ddi_dma_mem_free(&iommu->aiomt_dma_mem_hdl);
+		iommu->aiomt_dma_mem_hdl = NULL;
+		iommu->aiomt_dma_bufva = NULL;
+		iommu->aiomt_dma_mem_realsz = 0;
+		ddi_dma_free_handle(&iommu->aiomt_dmahdl);
+		iommu->aiomt_dmahdl = NULL;
+		return (DDI_FAILURE);
+	}
+
+	/*
+	 * We assume the DMA engine on the IOMMU is capable of handling the
+	 * whole table buffer in a single cookie. If not and multiple cookies
+	 * are needed we fail.
+	 */
+	if (iommu->aiomt_buf_dma_ncookie != 1) {
+		cmn_err(CE_WARN, "%s: %s%d: Cannot handle multiple "
+		    "cookies for DMA to AMD IOMMU tables and buffers. "
+		    "#cookies=%u", f, driver, instance,
+		    iommu->aiomt_buf_dma_ncookie);
+		(void) ddi_dma_unbind_handle(iommu->aiomt_dmahdl);
+		iommu->aiomt_buf_dma_cookie.dmac_laddress = 0;
+		iommu->aiomt_buf_dma_cookie.dmac_size = 0;
+		iommu->aiomt_buf_dma_cookie.dmac_type = 0;
+		iommu->aiomt_buf_dma_ncookie = 0;
+		ddi_dma_mem_free(&iommu->aiomt_dma_mem_hdl);
+		iommu->aiomt_dma_mem_hdl = NULL;
+		iommu->aiomt_dma_bufva = NULL;
+		iommu->aiomt_dma_mem_realsz = 0;
+		ddi_dma_free_handle(&iommu->aiomt_dmahdl);
+		iommu->aiomt_dmahdl = NULL;
+		return (DDI_FAILURE);
+	}
+
+	/*
+	 * The address in the cookie must be 4K aligned and >= table size
+	 */
+	ASSERT((iommu->aiomt_buf_dma_cookie.dmac_cookie_addr
+	    & AMD_IOMMU_TABLE_ALIGN) == 0);
+	ASSERT(iommu->aiomt_buf_dma_cookie.dmac_size
+	    <= iommu->aiomt_dma_mem_realsz);
+	ASSERT(iommu->aiomt_buf_dma_cookie.dmac_size >= dma_bufsz);
+
+	/*
+	 * Setup the device table pointers in the iommu struct as
+	 * well as the IOMMU device table register
+	 */
+	iommu->aiomt_devtbl = iommu->aiomt_dma_bufva;
+	bzero(iommu->aiomt_devtbl, iommu->aiomt_devtbl_sz);
+
+	/*
+	 * Set V=1 and TV = 0, so any inadvertant pass-thrus cause
+	 * page faults. Also set SE bit so we aren't swamped with
+	 * page fault messages
+	 */
+	for (i = 0; i <= AMD_IOMMU_MAX_DEVICEID; i++) {
+		/*LINTED*/
+		dentry = (uint64_t *)&iommu->aiomt_devtbl
+		    [i * AMD_IOMMU_DEVTBL_ENTRY_SZ];
+		AMD_IOMMU_REG_SET64(dentry, AMD_IOMMU_DEVTBL_V, 1);
+		AMD_IOMMU_REG_SET64(&(dentry[1]), AMD_IOMMU_DEVTBL_SE, 1);
+	}
+
+	addr = (caddr_t)(uintptr_t)iommu->aiomt_buf_dma_cookie.dmac_cookie_addr;
+	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_devtbl_va),
+	    AMD_IOMMU_DEVTABBASE, ((uint64_t)(uintptr_t)addr) >> 12);
+	sz = (iommu->aiomt_devtbl_sz >> 12) - 1;
+	ASSERT(sz <= ((1 << 9) - 1));
+	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_devtbl_va),
+	    AMD_IOMMU_DEVTABSIZE, sz);
+
+	/*
+	 * Setup the command buffer pointers
+	 */
+	iommu->aiomt_cmdbuf = iommu->aiomt_devtbl +
+	    iommu->aiomt_devtbl_sz;
+	bzero(iommu->aiomt_cmdbuf, iommu->aiomt_cmdbuf_sz);
+	addr += iommu->aiomt_devtbl_sz;
+	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_cmdbuf_va),
+	    AMD_IOMMU_COMBASE, ((uint64_t)(uintptr_t)addr) >> 12);
+
+	p2sz = AMD_IOMMU_CMDBUF_SZ;
+	ASSERT(p2sz >= AMD_IOMMU_CMDBUF_MINSZ &&
+	    p2sz <= AMD_IOMMU_CMDBUF_MAXSZ);
+	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_cmdbuf_va),
+	    AMD_IOMMU_COMLEN, p2sz);
+	/*LINTED*/
+	iommu->aiomt_cmd_tail = (uint32_t *)iommu->aiomt_cmdbuf;
+	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_cmdbuf_head_va),
+	    AMD_IOMMU_CMDHEADPTR, 0);
+	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_cmdbuf_tail_va),
+	    AMD_IOMMU_CMDTAILPTR, 0);
+
+	/*
+	 * Setup the event log pointers
+	 */
+	iommu->aiomt_eventlog = iommu->aiomt_cmdbuf +
+	    iommu->aiomt_eventlog_sz;
+	bzero(iommu->aiomt_eventlog, iommu->aiomt_eventlog_sz);
+	addr += iommu->aiomt_cmdbuf_sz;
+	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_eventlog_va),
+	    AMD_IOMMU_EVENTBASE, ((uint64_t)(uintptr_t)addr) >> 12);
+	p2sz = AMD_IOMMU_EVENTLOG_SZ;
+	ASSERT(p2sz >= AMD_IOMMU_EVENTLOG_MINSZ &&
+	    p2sz <= AMD_IOMMU_EVENTLOG_MAXSZ);
+	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_eventlog_va),
+	    AMD_IOMMU_EVENTLEN, sz);
+	/*LINTED*/
+	iommu->aiomt_event_head = (uint32_t *)iommu->aiomt_eventlog;
+	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_eventlog_head_va),
+	    AMD_IOMMU_EVENTHEADPTR, 0);
+	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_eventlog_tail_va),
+	    AMD_IOMMU_EVENTTAILPTR, 0);
+
+	/* dma sync so device sees this init */
+	SYNC_FORDEV(iommu->aiomt_dmahdl);
+
+	if (amd_iommu_debug & AMD_IOMMU_DEBUG_TABLES) {
+		cmn_err(CE_NOTE, "%s: %s%d: successfully setup AMD IOMMU "
+		    "tables, idx=%d", f, driver, instance, iommu->aiomt_idx);
+	}
+
+	return (DDI_SUCCESS);
+}
+
+static void
+amd_iommu_teardown_tables_and_buffers(amd_iommu_t *iommu)
+{
+	dev_info_t *dip = iommu->aiomt_dip;
+	int instance = ddi_get_instance(dip);
+	const char *driver = ddi_driver_name(dip);
+	const char *f = "amd_iommu_teardown_tables_and_buffers";
+
+	iommu->aiomt_eventlog = NULL;
+	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_eventlog_va),
+	    AMD_IOMMU_EVENTBASE, 0);
+	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_eventlog_va),
+	    AMD_IOMMU_EVENTLEN, 0);
+
+	iommu->aiomt_cmdbuf = NULL;
+	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_cmdbuf_va),
+	    AMD_IOMMU_COMBASE, 0);
+	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_cmdbuf_va),
+	    AMD_IOMMU_COMLEN, 0);
+
+	iommu->aiomt_devtbl = NULL;
+	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_devtbl_va),
+	    AMD_IOMMU_DEVTABBASE, 0);
+	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_devtbl_va),
+	    AMD_IOMMU_DEVTABSIZE, 0);
+
+	if (iommu->aiomt_dmahdl == NULL)
+		return;
+
+	/* Unbind the handle */
+	if (ddi_dma_unbind_handle(iommu->aiomt_dmahdl) != DDI_SUCCESS) {
+		cmn_err(CE_WARN, "%s: %s%d: failed to unbind handle: "
+		    "%p for IOMMU idx=%d", f, driver, instance,
+		    (void *)iommu->aiomt_dmahdl, iommu->aiomt_idx);
+	}
+	iommu->aiomt_buf_dma_cookie.dmac_laddress = 0;
+	iommu->aiomt_buf_dma_cookie.dmac_size = 0;
+	iommu->aiomt_buf_dma_cookie.dmac_type = 0;
+	iommu->aiomt_buf_dma_ncookie = 0;
+
+	/* Free the table memory allocated for DMA */
+	ddi_dma_mem_free(&iommu->aiomt_dma_mem_hdl);
+	iommu->aiomt_dma_mem_hdl = NULL;
+	iommu->aiomt_dma_bufva = NULL;
+	iommu->aiomt_dma_mem_realsz = 0;
+
+	/* Free the DMA handle */
+	ddi_dma_free_handle(&iommu->aiomt_dmahdl);
+	iommu->aiomt_dmahdl = NULL;
+}
+
+static void
+amd_iommu_enable_interrupts(amd_iommu_t *iommu)
+{
+	ASSERT(AMD_IOMMU_REG_GET64(REGADDR64(iommu->aiomt_reg_status_va),
+	    AMD_IOMMU_CMDBUF_RUN) == 0);
+	ASSERT(AMD_IOMMU_REG_GET64(REGADDR64(iommu->aiomt_reg_status_va),
+	    AMD_IOMMU_EVENT_LOG_RUN) == 0);
+
+	/* Must be set prior to enabling command buffer */
+	/* Must be set prior to enabling event logging */
+	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
+	    AMD_IOMMU_CMDBUF_ENABLE, 1);
+	/* No interrupts for completion wait  - too heavy weight. use polling */
+	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
+	    AMD_IOMMU_COMWAITINT_ENABLE, 0);
+	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
+	    AMD_IOMMU_EVENTLOG_ENABLE, 1);
+	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
+	    AMD_IOMMU_EVENTINT_ENABLE, 1);
+}
+
+static int
+amd_iommu_setup_exclusion(amd_iommu_t *iommu)
+{
+	amd_iommu_acpi_ivmd_t *minfop;
+
+	minfop = amd_iommu_lookup_all_ivmd();
+
+	if (minfop && minfop->acm_ExclRange == 1) {
+		cmn_err(CE_NOTE, "Programming exclusion range");
+		AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_excl_base_va),
+		    AMD_IOMMU_EXCL_BASE_ADDR,
+		    minfop->acm_ivmd_phys_start >> 12);
+		AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_excl_base_va),
+		    AMD_IOMMU_EXCL_BASE_ALLOW, 1);
+		AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_excl_base_va),
+		    AMD_IOMMU_EXCL_BASE_EXEN, 1);
+		AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_excl_lim_va),
+		    AMD_IOMMU_EXCL_LIM, (minfop->acm_ivmd_phys_start +
+		    minfop->acm_ivmd_phys_len) >> 12);
+	} else {
+		if (amd_iommu_debug) {
+			cmn_err(CE_NOTE, "Skipping exclusion range");
+		}
+		AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_excl_base_va),
+		    AMD_IOMMU_EXCL_BASE_ADDR, 0);
+		AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_excl_base_va),
+		    AMD_IOMMU_EXCL_BASE_ALLOW, 1);
+		AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_excl_base_va),
+		    AMD_IOMMU_EXCL_BASE_EXEN, 0);
+		AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_excl_lim_va),
+		    AMD_IOMMU_EXCL_LIM, 0);
+	}
+
+	return (DDI_SUCCESS);
+}
+
+static void
+amd_iommu_teardown_exclusion(amd_iommu_t *iommu)
+{
+	(void) amd_iommu_setup_exclusion(iommu);
+}
+
+static uint_t
+amd_iommu_intr_handler(caddr_t arg1, caddr_t arg2)
+{
+	/*LINTED*/
+	amd_iommu_t *iommu = (amd_iommu_t *)arg1;
+	dev_info_t *dip = iommu->aiomt_dip;
+	int instance = ddi_get_instance(dip);
+	const char *driver = ddi_driver_name(dip);
+	const char *f = "amd_iommu_intr_handler";
+
+	ASSERT(arg1);
+	ASSERT(arg2 == NULL);
+
+	if (amd_iommu_debug & AMD_IOMMU_DEBUG_INTR) {
+		cmn_err(CE_NOTE, "%s: %s%d: IOMMU unit idx=%d. In INTR handler",
+		    f, driver, instance, iommu->aiomt_idx);
+	}
+
+	if (AMD_IOMMU_REG_GET64(REGADDR64(iommu->aiomt_reg_status_va),
+	    AMD_IOMMU_EVENT_LOG_INT) == 1) {
+		if (amd_iommu_debug & AMD_IOMMU_DEBUG_INTR) {
+			cmn_err(CE_NOTE, "%s: %s%d: IOMMU unit idx=%d "
+			    "Event Log Interrupt", f, driver, instance,
+			    iommu->aiomt_idx);
+		}
+		(void) amd_iommu_read_log(iommu, AMD_IOMMU_LOG_DISPLAY);
+		WAIT_SEC(1);
+		AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_status_va),
+		    AMD_IOMMU_EVENT_LOG_INT, 1);
+		return (DDI_INTR_CLAIMED);
+	}
+
+	if (AMD_IOMMU_REG_GET64(REGADDR64(iommu->aiomt_reg_status_va),
+	    AMD_IOMMU_EVENT_OVERFLOW_INT) == 1) {
+		cmn_err(CE_NOTE, "!%s: %s%d: IOMMU unit idx=%d "
+		    "Event Overflow Interrupt", f, driver, instance,
+		    iommu->aiomt_idx);
+		(void) amd_iommu_read_log(iommu, AMD_IOMMU_LOG_DISCARD);
+		AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_status_va),
+		    AMD_IOMMU_EVENT_LOG_INT, 1);
+		AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_status_va),
+		    AMD_IOMMU_EVENT_OVERFLOW_INT, 1);
+		return (DDI_INTR_CLAIMED);
+	}
+
+	return (DDI_INTR_UNCLAIMED);
+}
+
+
+static int
+amd_iommu_setup_interrupts(amd_iommu_t *iommu)
+{
+	dev_info_t *dip = iommu->aiomt_dip;
+	int instance = ddi_get_instance(dip);
+	const char *driver = ddi_driver_name(dip);
+	int intrcap0;
+	int intrcapN;
+	int type;
+	int err;
+	int req;
+	int avail;
+	int p2req;
+	int actual;
+	int i;
+	int j;
+	const char *f = "amd_iommu_setup_interrupts";
+
+	if (ddi_intr_get_supported_types(dip, &type) != DDI_SUCCESS) {
+		cmn_err(CE_WARN, "%s: %s%d: ddi_intr_get_supported_types "
+		    "failed: idx=%d", f, driver, instance, iommu->aiomt_idx);
+		return (DDI_FAILURE);
+	}
+
+	if (amd_iommu_debug & AMD_IOMMU_DEBUG_INTR) {
+		cmn_err(CE_NOTE, "%s: %s%d: AMD IOMMU idx=%d. "
+		    "Interrupt types supported = 0x%x", f, driver, instance,
+		    iommu->aiomt_idx, type);
+	}
+
+	/*
+	 * for now we only support MSI
+	 */
+	if ((type & DDI_INTR_TYPE_MSI) == 0) {
+		cmn_err(CE_WARN, "%s: %s%d: AMD IOMMU idx=%d. "
+		    "MSI interrupts not supported. Failing init.",
+		    f, driver, instance, iommu->aiomt_idx);
+		return (DDI_FAILURE);
+	}
+
+	if (amd_iommu_debug & AMD_IOMMU_DEBUG_INTR) {
+		cmn_err(CE_NOTE, "%s: %s%d: AMD IOMMU idx=%d. MSI supported",
+		    f, driver, instance, iommu->aiomt_idx);
+	}
+
+	err = ddi_intr_get_nintrs(dip, DDI_INTR_TYPE_MSI, &req);
+	if (err != DDI_SUCCESS) {
+		cmn_err(CE_WARN, "%s: %s%d: AMD IOMMU idx=%d. "
+		    "ddi_intr_get_nintrs failed err = %d",
+		    f, driver, instance, iommu->aiomt_idx, err);
+		return (DDI_FAILURE);
+	}
+
+	if (amd_iommu_debug & AMD_IOMMU_DEBUG_INTR) {
+		cmn_err(CE_NOTE, "%s: %s%d: AMD IOMMU idx=%d. "
+		    "MSI number of interrupts requested: %d",
+		    f, driver, instance, iommu->aiomt_idx, req);
+	}
+
+	if (req == 0) {
+		cmn_err(CE_WARN, "%s: %s%d: AMD IOMMU idx=%d: 0 MSI "
+		    "interrupts requested. Failing init", f,
+		    driver, instance, iommu->aiomt_idx);
+		return (DDI_FAILURE);
+	}
+
+	err = ddi_intr_get_navail(dip, DDI_INTR_TYPE_MSI, &avail);
+	if (err != DDI_SUCCESS) {
+		cmn_err(CE_WARN, "%s: %s%d: AMD IOMMU idx=%d "
+		    "ddi_intr_get_navail failed err = %d", f,
+		    driver, instance, iommu->aiomt_idx, err);
+		return (DDI_FAILURE);
+	}
+
+	if (amd_iommu_debug & AMD_IOMMU_DEBUG_INTR) {
+		cmn_err(CE_NOTE, "%s: %s%d: AMD IOMMU idx=%d. "
+		    "MSI number of interrupts available: %d",
+		    f, driver, instance, iommu->aiomt_idx, avail);
+	}
+
+	if (avail == 0) {
+		cmn_err(CE_WARN, "%s: %s%d: AMD IOMMU idx=%d: 0 MSI "
+		    "interrupts available. Failing init", f,
+		    driver, instance, iommu->aiomt_idx);
+		return (DDI_FAILURE);
+	}
+
+	if (avail < req) {
+		cmn_err(CE_WARN, "%s: %s%d: AMD IOMMU idx=%d: MSI "
+		    "interrupts: requested (%d) > available (%d). "
+		    "Failing init", f, driver, instance, iommu->aiomt_idx,
+		    req, avail);
+		return (DDI_FAILURE);
+	}
+
+	/* Allocate memory for DDI interrupt handles */
+	iommu->aiomt_intr_htable_sz = req * sizeof (ddi_intr_handle_t);
+	iommu->aiomt_intr_htable = kmem_zalloc(iommu->aiomt_intr_htable_sz,
+	    KM_SLEEP);
+
+	iommu->aiomt_intr_state = AMD_IOMMU_INTR_TABLE;
+
+	/* Convert req to a power of two as required by ddi_intr_alloc */
+	p2req = 0;
+	while (1<<p2req <= req)
+		p2req++;
+	p2req--;
+	req = 1<<p2req;
+
+	if (amd_iommu_debug & AMD_IOMMU_DEBUG_INTR) {
+		cmn_err(CE_NOTE, "%s: %s%d: AMD IOMMU idx=%d. "
+		    "MSI power of 2 number of interrupts: %d,%d",
+		    f, driver, instance, iommu->aiomt_idx, p2req, req);
+	}
+
+	err = ddi_intr_alloc(iommu->aiomt_dip, iommu->aiomt_intr_htable,
+	    DDI_INTR_TYPE_MSI, 0, req, &actual, DDI_INTR_ALLOC_STRICT);
+	if (err != DDI_SUCCESS) {
+		cmn_err(CE_WARN, "%s: %s%d: AMD IOMMU idx=%d: "
+		    "ddi_intr_alloc failed: err = %d",
+		    f, driver, instance, iommu->aiomt_idx, err);
+		amd_iommu_teardown_interrupts(iommu);
+		return (DDI_FAILURE);
+	}
+
+	iommu->aiomt_actual_intrs = actual;
+	iommu->aiomt_intr_state = AMD_IOMMU_INTR_ALLOCED;
+
+	if (amd_iommu_debug & AMD_IOMMU_DEBUG_INTR) {
+		cmn_err(CE_NOTE, "%s: %s%d: AMD IOMMU idx=%d. "
+		    "number of interrupts actually allocated %d",
+		    f, driver, instance, iommu->aiomt_idx, actual);
+	}
+
+	if (iommu->aiomt_actual_intrs < req) {
+		cmn_err(CE_WARN, "%s: %s%d: AMD IOMMU idx=%d: "
+		    "ddi_intr_alloc failed: actual (%d) < req (%d)",
+		    f, driver, instance, iommu->aiomt_idx,
+		    iommu->aiomt_actual_intrs, req);
+		amd_iommu_teardown_interrupts(iommu);
+		return (DDI_FAILURE);
+	}
+
+	for (i = 0; i < iommu->aiomt_actual_intrs; i++) {
+		if (ddi_intr_add_handler(iommu->aiomt_intr_htable[i],
+		    amd_iommu_intr_handler, (void *)iommu, NULL)
+		    != DDI_SUCCESS) {
+			cmn_err(CE_WARN, "%s: %s%d: AMD IOMMU idx=%d: "
+			    "ddi_intr_add_handler failed: intr = %d, err = %d",
+			    f, driver, instance, iommu->aiomt_idx, i, err);
+			for (j = 0; j < i; j++) {
+				(void) ddi_intr_remove_handler(
+				    iommu->aiomt_intr_htable[j]);
+			}
+			amd_iommu_teardown_interrupts(iommu);
+			return (DDI_FAILURE);
+		}
+	}
+	iommu->aiomt_intr_state = AMD_IOMMU_INTR_HANDLER;
+
+	intrcap0 = intrcapN = -1;
+	if (ddi_intr_get_cap(iommu->aiomt_intr_htable[0], &intrcap0)
+	    != DDI_SUCCESS ||
+	    ddi_intr_get_cap(
+	    iommu->aiomt_intr_htable[iommu->aiomt_actual_intrs - 1], &intrcapN)
+	    != DDI_SUCCESS || intrcap0 != intrcapN) {
+		cmn_err(CE_WARN, "%s: %s%d: AMD IOMMU idx=%d: "
+		    "ddi_intr_get_cap failed or inconsistent cap among "
+		    "interrupts: intrcap0 (%d) < intrcapN (%d)",
+		    f, driver, instance, iommu->aiomt_idx, intrcap0, intrcapN);
+		amd_iommu_teardown_interrupts(iommu);
+		return (DDI_FAILURE);
+	}
+	iommu->aiomt_intr_cap = intrcap0;
+
+	if (intrcap0 & DDI_INTR_FLAG_BLOCK) {
+		/* Need to call block enable */
+		if (amd_iommu_debug & AMD_IOMMU_DEBUG_INTR) {
+			cmn_err(CE_NOTE, "%s: %s%d: AMD IOMMU idx=%d: "
+			    "Need to call block enable",
+			    f, driver, instance, iommu->aiomt_idx);
+		}
+		if (ddi_intr_block_enable(iommu->aiomt_intr_htable,
+		    iommu->aiomt_actual_intrs) != DDI_SUCCESS) {
+			cmn_err(CE_WARN, "%s: %s%d: AMD IOMMU idx=%d: "
+			    "ddi_intr_block enable failed ", f, driver,
+			    instance, iommu->aiomt_idx);
+			(void) ddi_intr_block_disable(iommu->aiomt_intr_htable,
+			    iommu->aiomt_actual_intrs);
+			amd_iommu_teardown_interrupts(iommu);
+			return (DDI_FAILURE);
+		}
+	} else {
+		if (amd_iommu_debug & AMD_IOMMU_DEBUG_INTR) {
+			cmn_err(CE_NOTE, "%s: %s%d: AMD IOMMU idx=%d: "
+			    "Need to call individual enable",
+			    f, driver, instance, iommu->aiomt_idx);
+		}
+		for (i = 0; i < iommu->aiomt_actual_intrs; i++) {
+			if (ddi_intr_enable(iommu->aiomt_intr_htable[i])
+			    != DDI_SUCCESS) {
+				cmn_err(CE_WARN, "%s: %s%d: AMD IOMMU idx=%d: "
+				    "ddi_intr_enable failed: intr = %d", f,
+				    driver, instance, iommu->aiomt_idx, i);
+				for (j = 0; j < i; j++) {
+					(void) ddi_intr_disable(
+					    iommu->aiomt_intr_htable[j]);
+				}
+				amd_iommu_teardown_interrupts(iommu);
+				return (DDI_FAILURE);
+			}
+		}
+	}
+	iommu->aiomt_intr_state = AMD_IOMMU_INTR_ENABLED;
+
+	if (amd_iommu_debug & AMD_IOMMU_DEBUG_INTR) {
+		cmn_err(CE_NOTE, "%s: %s%d: AMD IOMMU idx=%d: "
+		    "Interrupts successfully %s enabled. # of interrupts = %d",
+		    f, driver, instance, iommu->aiomt_idx,
+		    (intrcap0 & DDI_INTR_FLAG_BLOCK) ? "(block)" :
+		    "(individually)", iommu->aiomt_actual_intrs);
+	}
+
+	return (DDI_SUCCESS);
+}
+
+static void
+amd_iommu_teardown_interrupts(amd_iommu_t *iommu)
+{
+	int i;
+
+	if (iommu->aiomt_intr_state & AMD_IOMMU_INTR_ENABLED) {
+		if (iommu->aiomt_intr_cap & DDI_INTR_FLAG_BLOCK) {
+			(void) ddi_intr_block_disable(iommu->aiomt_intr_htable,
+			    iommu->aiomt_actual_intrs);
+		} else {
+			for (i = 0; i < iommu->aiomt_actual_intrs; i++) {
+				(void) ddi_intr_disable(
+				    iommu->aiomt_intr_htable[i]);
+			}
+		}
+	}
+
+	if (iommu->aiomt_intr_state & AMD_IOMMU_INTR_HANDLER) {
+		for (i = 0; i < iommu->aiomt_actual_intrs; i++) {
+			(void) ddi_intr_remove_handler(
+			    iommu->aiomt_intr_htable[i]);
+		}
+	}
+
+	if (iommu->aiomt_intr_state & AMD_IOMMU_INTR_ALLOCED) {
+		for (i = 0; i < iommu->aiomt_actual_intrs; i++) {
+			(void) ddi_intr_free(iommu->aiomt_intr_htable[i]);
+		}
+	}
+	if (iommu->aiomt_intr_state & AMD_IOMMU_INTR_TABLE) {
+		kmem_free(iommu->aiomt_intr_htable,
+		    iommu->aiomt_intr_htable_sz);
+	}
+	iommu->aiomt_intr_htable = NULL;
+	iommu->aiomt_intr_htable_sz = 0;
+	iommu->aiomt_intr_state = AMD_IOMMU_INTR_INVALID;
+}
+
+static amd_iommu_t *
+amd_iommu_init(dev_info_t *dip, ddi_acc_handle_t handle, int idx,
+    uint16_t cap_base)
+{
+	amd_iommu_t *iommu;
+	int instance = ddi_get_instance(dip);
+	const char *driver = ddi_driver_name(dip);
+	uint32_t caphdr;
+	uint32_t low_addr32;
+	uint32_t hi_addr32;
+	uint32_t range;
+	uint32_t misc;
+	uint64_t pgoffset;
+	amd_iommu_acpi_global_t *global;
+	amd_iommu_acpi_ivhd_t *hinfop;
+	const char *f = "amd_iommu_init";
+
+	global = amd_iommu_lookup_acpi_global();
+	hinfop = amd_iommu_lookup_any_ivhd();
+
+	low_addr32 = PCI_CAP_GET32(handle, 0, cap_base,
+	    AMD_IOMMU_CAP_ADDR_LOW_OFF);
+	if (!(low_addr32 & AMD_IOMMU_REG_ADDR_LOCKED)) {
+		cmn_err(CE_WARN, "%s: %s%d: capability registers not locked. "
+		    "Unable to use IOMMU unit idx=%d - skipping ...", f, driver,
+		    instance, idx);
+		return (NULL);
+	}
+
+	iommu = kmem_zalloc(sizeof (amd_iommu_t), KM_SLEEP);
+	mutex_init(&iommu->aiomt_mutex, NULL, MUTEX_DRIVER, NULL);
+	mutex_enter(&iommu->aiomt_mutex);
+
+	mutex_init(&iommu->aiomt_cmdlock, NULL, MUTEX_DRIVER, NULL);
+	mutex_init(&iommu->aiomt_eventlock, NULL, MUTEX_DRIVER, NULL);
+
+	iommu->aiomt_dip = dip;
+	iommu->aiomt_idx = idx;
+
+	/*
+	 * Since everything in the capability block is locked and RO at this
+	 * point, copy everything into the IOMMU struct
+	 */
+
+	/* Get cap header */
+	caphdr = PCI_CAP_GET32(handle, 0, cap_base, AMD_IOMMU_CAP_HDR_OFF);
+	iommu->aiomt_cap_hdr = caphdr;
+	iommu->aiomt_npcache = AMD_IOMMU_REG_GET32(&caphdr,
+	    AMD_IOMMU_CAP_NPCACHE);
+	iommu->aiomt_httun = AMD_IOMMU_REG_GET32(&caphdr, AMD_IOMMU_CAP_HTTUN);
+
+	if (hinfop)
+		iommu->aiomt_iotlb = hinfop->ach_IotlbSup;
+	else
+		iommu->aiomt_iotlb =
+		    AMD_IOMMU_REG_GET32(&caphdr, AMD_IOMMU_CAP_IOTLB);
+
+	iommu->aiomt_captype = AMD_IOMMU_REG_GET32(&caphdr, AMD_IOMMU_CAP_TYPE);
+	iommu->aiomt_capid = AMD_IOMMU_REG_GET32(&caphdr, AMD_IOMMU_CAP_ID);
+
+	/*
+	 * Get address of IOMMU control registers
+	 */
+	hi_addr32 = PCI_CAP_GET32(handle, 0, cap_base,
+	    AMD_IOMMU_CAP_ADDR_HI_OFF);
+	iommu->aiomt_low_addr32 = low_addr32;
+	iommu->aiomt_hi_addr32 = hi_addr32;
+	low_addr32 &= ~AMD_IOMMU_REG_ADDR_LOCKED;
+
+	if (hinfop) {
+		iommu->aiomt_reg_pa =  hinfop->ach_IOMMU_reg_base;
+		ASSERT(hinfop->ach_IOMMU_pci_seg == 0);
+	} else {
+		iommu->aiomt_reg_pa =  ((uint64_t)hi_addr32 << 32 | low_addr32);
+	}
+
+	/*
+	 * Get cap range reg
+	 */
+	range = PCI_CAP_GET32(handle, 0, cap_base, AMD_IOMMU_CAP_RANGE_OFF);
+	iommu->aiomt_range = range;
+	iommu->aiomt_rng_valid = AMD_IOMMU_REG_GET32(&range,
+	    AMD_IOMMU_RNG_VALID);
+	if (iommu->aiomt_rng_valid) {
+		iommu->aiomt_rng_bus = AMD_IOMMU_REG_GET32(&range,
+		    AMD_IOMMU_RNG_BUS);
+		iommu->aiomt_first_devfn = AMD_IOMMU_REG_GET32(&range,
+		    AMD_IOMMU_FIRST_DEVFN);
+		iommu->aiomt_last_devfn = AMD_IOMMU_REG_GET32(&range,
+		    AMD_IOMMU_LAST_DEVFN);
+	} else {
+		iommu->aiomt_rng_bus = 0;
+		iommu->aiomt_first_devfn = 0;
+		iommu->aiomt_last_devfn = 0;
+	}
+
+	if (hinfop)
+		iommu->aiomt_ht_unitid = hinfop->ach_IOMMU_UnitID;
+	else
+		iommu->aiomt_ht_unitid = AMD_IOMMU_REG_GET32(&range,
+		    AMD_IOMMU_HT_UNITID);
+
+	/*
+	 * Get cap misc reg
+	 */
+	misc = PCI_CAP_GET32(handle, 0, cap_base, AMD_IOMMU_CAP_MISC_OFF);
+	iommu->aiomt_misc = misc;
+
+	if (global) {
+		iommu->aiomt_htatsresv = global->acg_HtAtsResv;
+		iommu->aiomt_vasize = global->acg_VAsize;
+		iommu->aiomt_pasize = global->acg_PAsize;
+	} else {
+		iommu->aiomt_htatsresv = AMD_IOMMU_REG_GET32(&misc,
+		    AMD_IOMMU_HT_ATSRSV);
+		iommu->aiomt_vasize = AMD_IOMMU_REG_GET32(&misc,
+		    AMD_IOMMU_VA_SIZE);
+		iommu->aiomt_pasize = AMD_IOMMU_REG_GET32(&misc,
+		    AMD_IOMMU_PA_SIZE);
+	}
+
+	if (hinfop) {
+		iommu->aiomt_msinum = hinfop->ach_IOMMU_MSInum;
+	} else {
+		iommu->aiomt_msinum =
+		    AMD_IOMMU_REG_GET32(&misc, AMD_IOMMU_MSINUM);
+	}
+
+	/*
+	 * Set up mapping between control registers PA and VA
+	 */
+	pgoffset = iommu->aiomt_reg_pa & MMU_PAGEOFFSET;
+	ASSERT(pgoffset == 0);
+	iommu->aiomt_reg_pages = mmu_btopr(AMD_IOMMU_REG_SIZE + pgoffset);
+	iommu->aiomt_reg_size = mmu_ptob(iommu->aiomt_reg_pages);
+
+	iommu->aiomt_va = (uintptr_t)device_arena_alloc(
+	    ptob(iommu->aiomt_reg_pages), VM_SLEEP);
+	if (iommu->aiomt_va == 0) {
+		cmn_err(CE_WARN, "%s: %s%d: Failed to alloc VA for IOMMU "
+		    "control regs. Skipping IOMMU idx=%d", f, driver,
+		    instance, idx);
+		mutex_exit(&iommu->aiomt_mutex);
+		(void) amd_iommu_fini(iommu);
+		return (NULL);
+	}
+
+	hat_devload(kas.a_hat, (void *)(uintptr_t)iommu->aiomt_va,
+	    iommu->aiomt_reg_size,
+	    mmu_btop(iommu->aiomt_reg_pa), PROT_READ | PROT_WRITE
+	    | HAT_STRICTORDER, HAT_LOAD_LOCK);
+
+	iommu->aiomt_reg_va = iommu->aiomt_va + pgoffset;
+
+	/*
+	 * Setup the various control register's VA
+	 */
+	iommu->aiomt_reg_devtbl_va = iommu->aiomt_reg_va +
+	    AMD_IOMMU_DEVTBL_REG_OFF;
+	iommu->aiomt_reg_cmdbuf_va = iommu->aiomt_reg_va +
+	    AMD_IOMMU_CMDBUF_REG_OFF;
+	iommu->aiomt_reg_eventlog_va = iommu->aiomt_reg_va +
+	    AMD_IOMMU_EVENTLOG_REG_OFF;
+	iommu->aiomt_reg_ctrl_va = iommu->aiomt_reg_va +
+	    AMD_IOMMU_CTRL_REG_OFF;
+	iommu->aiomt_reg_excl_base_va = iommu->aiomt_reg_va +
+	    AMD_IOMMU_EXCL_BASE_REG_OFF;
+	iommu->aiomt_reg_excl_lim_va = iommu->aiomt_reg_va +
+	    AMD_IOMMU_EXCL_LIM_REG_OFF;
+	iommu->aiomt_reg_cmdbuf_head_va = iommu->aiomt_reg_va +
+	    AMD_IOMMU_CMDBUF_HEAD_REG_OFF;
+	iommu->aiomt_reg_cmdbuf_tail_va = iommu->aiomt_reg_va +
+	    AMD_IOMMU_CMDBUF_TAIL_REG_OFF;
+	iommu->aiomt_reg_eventlog_head_va = iommu->aiomt_reg_va +
+	    AMD_IOMMU_EVENTLOG_HEAD_REG_OFF;
+	iommu->aiomt_reg_eventlog_tail_va = iommu->aiomt_reg_va +
+	    AMD_IOMMU_EVENTLOG_TAIL_REG_OFF;
+	iommu->aiomt_reg_status_va = iommu->aiomt_reg_va +
+	    AMD_IOMMU_STATUS_REG_OFF;
+
+
+	/*
+	 * Setup the DEVICE table, CMD buffer, and LOG buffer in
+	 * memory and setup DMA access to this memory location
+	 */
+	if (amd_iommu_setup_tables_and_buffers(iommu) != DDI_SUCCESS) {
+		mutex_exit(&iommu->aiomt_mutex);
+		(void) amd_iommu_fini(iommu);
+		return (NULL);
+	}
+
+	if (amd_iommu_setup_exclusion(iommu) != DDI_SUCCESS) {
+		mutex_exit(&iommu->aiomt_mutex);
+		(void) amd_iommu_fini(iommu);
+		return (NULL);
+	}
+
+	amd_iommu_enable_interrupts(iommu);
+
+	if (amd_iommu_setup_interrupts(iommu) != DDI_SUCCESS) {
+		mutex_exit(&iommu->aiomt_mutex);
+		(void) amd_iommu_fini(iommu);
+		return (NULL);
+	}
+
+	/*
+	 * need to setup domain table before gfx bypass
+	 */
+	amd_iommu_init_page_tables(iommu);
+
+	/*
+	 * Set pass-thru for special devices like IOAPIC and HPET
+	 *
+	 * Also, gfx devices don't use DDI for DMA. No need to register
+	 * before setting up gfx passthru
+	 */
+	if (amd_iommu_setup_passthru(iommu) != DDI_SUCCESS) {
+		mutex_exit(&iommu->aiomt_mutex);
+		(void) amd_iommu_fini(iommu);
+		return (NULL);
+	}
+
+	if (amd_iommu_start(iommu) != DDI_SUCCESS) {
+		mutex_exit(&iommu->aiomt_mutex);
+		(void) amd_iommu_fini(iommu);
+		return (NULL);
+	}
+
+	/* xxx register/start race  */
+	if (amd_iommu_register(iommu) != DDI_SUCCESS) {
+		mutex_exit(&iommu->aiomt_mutex);
+		(void) amd_iommu_fini(iommu);
+		return (NULL);
+	}
+
+	if (amd_iommu_debug) {
+		cmn_err(CE_NOTE, "%s: %s%d: IOMMU idx=%d inited.", f, driver,
+		    instance, idx);
+	}
+
+	return (iommu);
+}
+
+static int
+amd_iommu_fini(amd_iommu_t *iommu)
+{
+	int idx = iommu->aiomt_idx;
+	dev_info_t *dip = iommu->aiomt_dip;
+	int instance = ddi_get_instance(dip);
+	const char *driver = ddi_driver_name(dip);
+	const char *f = "amd_iommu_fini";
+
+	mutex_enter(&iommu->aiomt_mutex);
+	if (amd_iommu_unregister(iommu) != DDI_SUCCESS) {
+		cmn_err(CE_NOTE, "%s: %s%d: Fini of IOMMU unit failed. "
+		    "idx = %d", f, driver, instance, idx);
+		return (DDI_FAILURE);
+	}
+	amd_iommu_stop(iommu);
+	amd_iommu_fini_page_tables(iommu);
+	amd_iommu_teardown_interrupts(iommu);
+	amd_iommu_teardown_exclusion(iommu);
+	amd_iommu_teardown_tables_and_buffers(iommu);
+	if (iommu->aiomt_va != NULL) {
+		hat_unload(kas.a_hat, (void *)(uintptr_t)iommu->aiomt_va,
+		    iommu->aiomt_reg_size, HAT_UNLOAD_UNLOCK);
+		device_arena_free((void *)(uintptr_t)iommu->aiomt_va,
+		    ptob(iommu->aiomt_reg_pages));
+		iommu->aiomt_va = NULL;
+		iommu->aiomt_reg_va = NULL;
+	}
+	mutex_destroy(&iommu->aiomt_eventlock);
+	mutex_destroy(&iommu->aiomt_cmdlock);
+	mutex_exit(&iommu->aiomt_mutex);
+	mutex_destroy(&iommu->aiomt_mutex);
+	kmem_free(iommu, sizeof (amd_iommu_t));
+
+	cmn_err(CE_NOTE, "%s: %s%d: Fini of IOMMU unit complete. idx = %d",
+	    f, driver, instance, idx);
+
+	return (DDI_SUCCESS);
+}
+
+int
+amd_iommu_setup(dev_info_t *dip, amd_iommu_state_t *statep)
+{
+	int instance = ddi_get_instance(dip);
+	const char *driver = ddi_driver_name(dip);
+	ddi_acc_handle_t handle;
+	uint8_t base_class;
+	uint8_t sub_class;
+	uint8_t prog_class;
+	int idx;
+	uint32_t id;
+	uint16_t cap_base;
+	uint32_t caphdr;
+	uint8_t cap_type;
+	uint8_t cap_id;
+	amd_iommu_t *iommu;
+	const char *f = "amd_iommu_setup";
+
+	ASSERT(instance >= 0);
+	ASSERT(driver);
+
+	/* First setup PCI access to config space */
+
+	if (pci_config_setup(dip, &handle) != DDI_SUCCESS) {
+		cmn_err(CE_WARN, "%s: PCI config setup failed: %s%d",
+		    f, driver, instance);
+		return (DDI_FAILURE);
+	}
+
+	/*
+	 * The AMD IOMMU is part of an independent PCI function. There may be
+	 * more than one IOMMU in that PCI function
+	 */
+	base_class = pci_config_get8(handle, PCI_CONF_BASCLASS);
+	sub_class = pci_config_get8(handle, PCI_CONF_SUBCLASS);
+	prog_class = pci_config_get8(handle, PCI_CONF_PROGCLASS);
+
+	if (base_class != PCI_CLASS_PERIPH || sub_class != PCI_PERIPH_IOMMU ||
+	    prog_class != AMD_IOMMU_PCI_PROG_IF) {
+		cmn_err(CE_WARN, "%s: %s%d: invalid PCI class(0x%x)/"
+		    "subclass(0x%x)/programming interface(0x%x)", f, driver,
+		    instance, base_class, sub_class, prog_class);
+		pci_config_teardown(&handle);
+		return (DDI_FAILURE);
+	}
+
+	/*
+	 * Find and initialize all IOMMU units in this function
+	 */
+	for (idx = 0; ; idx++) {
+		if (pci_cap_probe(handle, idx, &id, &cap_base) != DDI_SUCCESS)
+			break;
+
+		/* check if cap ID is secure device cap id */
+		if (id != PCI_CAP_ID_SECURE_DEV) {
+			if (amd_iommu_debug) {
+				cmn_err(CE_WARN,
+				    "%s: %s%d: skipping IOMMU: idx(0x%x) "
+				    "cap ID (0x%x) != secure dev capid (0x%x)",
+				    f, driver, instance, idx, id,
+				    PCI_CAP_ID_SECURE_DEV);
+			}
+			continue;
+		}
+
+		/* check if cap type is IOMMU cap type */
+		caphdr = PCI_CAP_GET32(handle, 0, cap_base,
+		    AMD_IOMMU_CAP_HDR_OFF);
+		cap_type = AMD_IOMMU_REG_GET32(&caphdr, AMD_IOMMU_CAP_TYPE);
+		cap_id = AMD_IOMMU_REG_GET32(&caphdr, AMD_IOMMU_CAP_ID);
+
+		if (cap_type != AMD_IOMMU_CAP) {
+			cmn_err(CE_WARN, "%s: %s%d: skipping IOMMU: idx(0x%x) "
+			    "cap type (0x%x) != AMD IOMMU CAP (0x%x)", f,
+			    driver, instance, idx, cap_type, AMD_IOMMU_CAP);
+			continue;
+		}
+		ASSERT(cap_id == PCI_CAP_ID_SECURE_DEV);
+		ASSERT(cap_id == id);
+
+		iommu = amd_iommu_init(dip, handle, idx, cap_base);
+		if (iommu == NULL) {
+			cmn_err(CE_WARN, "%s: %s%d: skipping IOMMU: idx(0x%x) "
+			    "failed to init IOMMU", f,
+			    driver, instance, idx);
+			continue;
+		}
+
+		if (statep->aioms_iommu_start == NULL) {
+			statep->aioms_iommu_start = iommu;
+		} else {
+			statep->aioms_iommu_end->aiomt_next = iommu;
+		}
+		statep->aioms_iommu_end = iommu;
+
+		statep->aioms_nunits++;
+	}
+
+	pci_config_teardown(&handle);
+
+	if (amd_iommu_debug) {
+		cmn_err(CE_NOTE, "%s: %s%d: state=%p: setup %d IOMMU units",
+		    f, driver, instance, (void *)statep, statep->aioms_nunits);
+	}
+
+	return (DDI_SUCCESS);
+}
+
+int
+amd_iommu_teardown(dev_info_t *dip, amd_iommu_state_t *statep)
+{
+	int instance = ddi_get_instance(dip);
+	const char *driver = ddi_driver_name(dip);
+	amd_iommu_t *iommu;
+	int teardown;
+	int error = DDI_SUCCESS;
+	const char *f = "amd_iommu_teardown";
+
+	teardown = 0;
+	for (iommu = statep->aioms_iommu_start; iommu;
+	    iommu = iommu->aiomt_next) {
+		ASSERT(statep->aioms_nunits > 0);
+		if (amd_iommu_fini(iommu) != DDI_SUCCESS) {
+			error = DDI_FAILURE;
+			continue;
+		}
+		statep->aioms_nunits--;
+		teardown++;
+	}
+
+	cmn_err(CE_NOTE, "%s: %s%d: state=%p: toredown %d units. "
+	    "%d units left", f, driver, instance, (void *)statep,
+	    teardown, statep->aioms_nunits);
+
+	return (error);
+}
+
+/* Interface with IOMMULIB */
+/*ARGSUSED*/
+static int
+amd_iommu_probe(iommulib_handle_t handle, dev_info_t *rdip)
+{
+	const char *driver = ddi_driver_name(rdip);
+	char *s;
+	amd_iommu_t *iommu = iommulib_iommu_getdata(handle);
+
+	if (amd_iommu_disable_list) {
+		s = strstr(amd_iommu_disable_list, driver);
+		if (s == NULL)
+			return (DDI_SUCCESS);
+		if (s == amd_iommu_disable_list || *(s - 1) == ':') {
+			s += strlen(driver);
+			if (*s == '\0' || *s == ':') {
+				amd_iommu_set_passthru(iommu, rdip);
+				return (DDI_FAILURE);
+			}
+		}
+	}
+
+	return (DDI_SUCCESS);
+}
+
+/*ARGSUSED*/
+static int
+amd_iommu_allochdl(iommulib_handle_t handle,
+    dev_info_t *dip, dev_info_t *rdip, ddi_dma_attr_t *attr,
+    int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *dma_handlep)
+{
+	return (iommulib_iommu_dma_allochdl(dip, rdip, attr, waitfp,
+	    arg, dma_handlep));
+}
+
+/*ARGSUSED*/
+static int
+amd_iommu_freehdl(iommulib_handle_t handle,
+    dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t dma_handle)
+{
+	return (iommulib_iommu_dma_freehdl(dip, rdip, dma_handle));
+}
+
+/*ARGSUSED*/
+static int
+map_current_window(amd_iommu_t *iommu, dev_info_t *rdip, ddi_dma_attr_t *attrp,
+    struct ddi_dma_req *dmareq, ddi_dma_cookie_t *cookie_array, uint_t ccount,
+    int km_flags)
+{
+	const char *driver = ddi_driver_name(iommu->aiomt_dip);
+	int instance = ddi_get_instance(iommu->aiomt_dip);
+	int idx = iommu->aiomt_idx;
+	int i;
+	uint64_t start_va;
+	char *path;
+	int error = DDI_FAILURE;
+	const char *f = "map_current_window";
+
+	path = kmem_alloc(MAXPATHLEN, km_flags);
+	if (path == NULL) {
+		return (DDI_DMA_NORESOURCES);
+	}
+
+	(void) ddi_pathname(rdip, path);
+	mutex_enter(&amd_iommu_pgtable_lock);
+
+	if (amd_iommu_debug == AMD_IOMMU_DEBUG_PAGE_TABLES) {
+		cmn_err(CE_WARN, "%s: %s%d: idx=%d Attempting to get cookies "
+		    "from handle for device %s",
+		    f, driver, instance, idx, path);
+	}
+
+	start_va = 0;
+	for (i = 0; i < ccount; i++) {
+		if ((error = amd_iommu_map_pa2va(iommu, rdip, attrp, dmareq,
+		    cookie_array[i].dmac_cookie_addr,
+		    cookie_array[i].dmac_size,
+		    AMD_IOMMU_VMEM_MAP, &start_va, km_flags)) != DDI_SUCCESS) {
+			break;
+		}
+		cookie_array[i].dmac_cookie_addr = (uintptr_t)start_va;
+		cookie_array[i].dmac_type = 0;
+	}
+
+	if (i != ccount) {
+		cmn_err(CE_WARN, "%s: %s%d: idx=%d Cannot map cookie# %d "
+		    "for device %s", f, driver, instance, idx, i, path);
+		(void) unmap_current_window(iommu, rdip, cookie_array,
+		    ccount, i, 1);
+		goto out;
+	}
+
+	if (amd_iommu_debug & AMD_IOMMU_DEBUG_PAGE_TABLES) {
+		cmn_err(CE_NOTE, "%s: return SUCCESS", f);
+	}
+
+	error = DDI_DMA_MAPPED;
+out:
+	mutex_exit(&amd_iommu_pgtable_lock);
+	kmem_free(path, MAXPATHLEN);
+	return (error);
+}
+
+/*ARGSUSED*/
+static int
+unmap_current_window(amd_iommu_t *iommu, dev_info_t *rdip,
+    ddi_dma_cookie_t *cookie_array, uint_t ccount, int ncookies, int locked)
+{
+	const char *driver = ddi_driver_name(iommu->aiomt_dip);
+	int instance = ddi_get_instance(iommu->aiomt_dip);
+	int idx = iommu->aiomt_idx;
+	int i;
+	int error = DDI_FAILURE;
+	char *path;
+	int pathfree;
+	const char *f = "unmap_current_window";
+
+	if (!locked)
+		mutex_enter(&amd_iommu_pgtable_lock);
+
+	path = kmem_alloc(MAXPATHLEN, KM_NOSLEEP);
+	if (path) {
+		(void) ddi_pathname(rdip, path);
+		pathfree = 1;
+	} else {
+		path = "<path-mem-alloc-failed>";
+		pathfree = 0;
+	}
+
+	if (ncookies == -1)
+		ncookies = ccount;
+
+	for (i = 0; i < ncookies; i++) {
+		if (amd_iommu_unmap_va(iommu, rdip,
+		    cookie_array[i].dmac_cookie_addr,
+		    cookie_array[i].dmac_size,
+		    AMD_IOMMU_VMEM_MAP) != DDI_SUCCESS) {
+			break;
+		}
+	}
+
+	if (amd_iommu_cmd(iommu, AMD_IOMMU_CMD_COMPL_WAIT, NULL, 0, 0)
+	    != DDI_SUCCESS) {
+		cmn_err(CE_WARN, "%s: AMD IOMMU completion wait failed for: %s",
+		    f, path);
+	}
+
+	if (i != ncookies) {
+		cmn_err(CE_WARN, "%s: %s%d: idx=%d Cannot unmap cookie# %d "
+		    "for device %s", f, driver, instance, idx, i, path);
+		error = DDI_FAILURE;
+		goto out;
+	}
+
+	error = DDI_SUCCESS;
+
+out:
+	if (pathfree)
+		kmem_free(path, MAXPATHLEN);
+	if (!locked)
+		mutex_exit(&amd_iommu_pgtable_lock);
+	return (error);
+}
+
+/*ARGSUSED*/
+static int
+amd_iommu_bindhdl(iommulib_handle_t handle, dev_info_t *dip,
+    dev_info_t *rdip, ddi_dma_handle_t dma_handle,
+    struct ddi_dma_req *dmareq, ddi_dma_cookie_t *cookiep,
+    uint_t *ccountp)
+{
+	int dma_error = DDI_DMA_NOMAPPING;
+	int error;
+	char *path;
+	ddi_dma_cookie_t *cookie_array = NULL;
+	uint_t ccount = 0;
+	ddi_dma_impl_t *hp;
+	ddi_dma_attr_t *attrp;
+	int km_flags;
+	amd_iommu_t *iommu = iommulib_iommu_getdata(handle);
+	int instance = ddi_get_instance(rdip);
+	const char *driver = ddi_driver_name(rdip);
+	const char *f = "amd_iommu_bindhdl";
+
+	dma_error = iommulib_iommu_dma_bindhdl(dip, rdip, dma_handle,
+	    dmareq, cookiep, ccountp);
+
+	if (dma_error != DDI_DMA_MAPPED && dma_error != DDI_DMA_PARTIAL_MAP)
+		return (dma_error);
+
+	km_flags = iommulib_iommu_dma_get_sleep_flags(dip, dma_handle);
+
+	path = kmem_alloc(MAXPATHLEN, km_flags);
+	if (path) {
+		(void) ddi_pathname(rdip, path);
+	} else {
+		dma_error = DDI_DMA_NORESOURCES;
+		goto unbind;
+	}
+
+	if (amd_iommu_debug & AMD_IOMMU_DEBUG_BIND) {
+		cmn_err(CE_NOTE, "%s: %s got cookie (%p), #cookies: %d",
+		    f, path,
+		    (void *)cookiep->dmac_cookie_addr,
+		    *ccountp);
+	}
+
+	cookie_array = NULL;
+	ccount = 0;
+	if ((error = iommulib_iommu_dma_get_cookies(dip, dma_handle,
+	    &cookie_array, &ccount)) != DDI_SUCCESS) {
+		cmn_err(CE_WARN, "%s: %s%d: Cannot get cookies "
+		    "for device %s", f, driver, instance, path);
+		dma_error = error;
+		goto unbind;
+	}
+
+	hp = (ddi_dma_impl_t *)dma_handle;
+	attrp = &hp->dmai_attr;
+
+	error = map_current_window(iommu, rdip, attrp, dmareq,
+	    cookie_array, ccount, km_flags);
+	if (error != DDI_SUCCESS) {
+		dma_error = error;
+		goto unbind;
+	}
+
+	if ((error = iommulib_iommu_dma_set_cookies(dip, dma_handle,
+	    cookie_array, ccount)) != DDI_SUCCESS) {
+		cmn_err(CE_WARN, "%s: %s%d: Cannot set cookies "
+		    "for device %s", f, driver, instance, path);
+		dma_error = error;
+		goto unbind;
+	}
+
+	*cookiep = cookie_array[0];
+
+	if (amd_iommu_debug & AMD_IOMMU_DEBUG_BIND) {
+		cmn_err(CE_NOTE, "%s: %s remapped cookie (%p), #cookies: %d",
+		    f, path,
+		    (void *)(uintptr_t)cookiep->dmac_cookie_addr,
+		    *ccountp);
+	}
+
+	kmem_free(path, MAXPATHLEN);
+	ASSERT(dma_error == DDI_DMA_MAPPED || dma_error == DDI_DMA_PARTIAL_MAP);
+	return (dma_error);
+unbind:
+	kmem_free(path, MAXPATHLEN);
+	(void) iommulib_iommu_dma_unbindhdl(dip, rdip, dma_handle);
+	return (dma_error);
+}
+
+/*ARGSUSED*/
+static int
+amd_iommu_unbindhdl(iommulib_handle_t handle,
+    dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t dma_handle)
+{
+	amd_iommu_t *iommu = iommulib_iommu_getdata(handle);
+	ddi_dma_cookie_t *cookie_array = NULL;
+	uint_t ccount = 0;
+	int error = DDI_FAILURE;
+	int instance = ddi_get_instance(rdip);
+	const char *driver = ddi_driver_name(rdip);
+	const char *f = "amd_iommu_unbindhdl";
+
+	cookie_array = NULL;
+	ccount = 0;
+	if (iommulib_iommu_dma_get_cookies(dip, dma_handle, &cookie_array,
+	    &ccount) != DDI_SUCCESS) {
+		cmn_err(CE_WARN, "%s: %s%d: Cannot get cookies "
+		    "for device %p", f, driver, instance, (void *)rdip);
+		error = DDI_FAILURE;
+		goto out;
+	}
+
+	if (iommulib_iommu_dma_clear_cookies(dip, dma_handle) != DDI_SUCCESS) {
+		cmn_err(CE_WARN, "%s: %s%d: Cannot clear cookies "
+		    "for device %p", f, driver, instance, (void *)rdip);
+		error = DDI_FAILURE;
+		goto out;
+	}
+
+	if (iommulib_iommu_dma_unbindhdl(dip, rdip, dma_handle)
+	    != DDI_SUCCESS) {
+		cmn_err(CE_WARN, "%s: %s%d: failed to unbindhdl for dip=%p",
+		    f, driver, instance, (void *)rdip);
+		error = DDI_FAILURE;
+		goto out;
+	}
+
+	if (unmap_current_window(iommu, rdip, cookie_array, ccount, -1, 0)
+	    != DDI_SUCCESS) {
+		cmn_err(CE_WARN, "%s: %s%d: failed to unmap current window "
+		    "for dip=%p", f, driver, instance, (void *)rdip);
+		error = DDI_FAILURE;
+	} else {
+		error = DDI_SUCCESS;
+	}
+out:
+	if (cookie_array)
+		kmem_free(cookie_array, sizeof (ddi_dma_cookie_t) * ccount);
+	return (error);
+}
+
+/*ARGSUSED*/
+static int
+amd_iommu_sync(iommulib_handle_t handle, dev_info_t *dip,
+    dev_info_t *rdip, ddi_dma_handle_t dma_handle, off_t off,
+    size_t len, uint_t cache_flags)
+{
+	ddi_dma_cookie_t *cookie_array = NULL;
+	uint_t ccount = 0;
+	int error;
+	const char *f = "amd_iommu_sync";
+
+	cookie_array = NULL;
+	ccount = 0;
+	if (iommulib_iommu_dma_get_cookies(dip, dma_handle, &cookie_array,
+	    &ccount) != DDI_SUCCESS) {
+		ASSERT(cookie_array == NULL);
+		cmn_err(CE_WARN, "%s: Cannot get cookies "
+		    "for device %p", f, (void *)rdip);
+		error = DDI_FAILURE;
+		goto out;
+	}
+
+	if (iommulib_iommu_dma_clear_cookies(dip, dma_handle) != DDI_SUCCESS) {
+		cmn_err(CE_WARN, "%s: Cannot clear cookies "
+		    "for device %p", f, (void *)rdip);
+		error = DDI_FAILURE;
+		goto out;
+	}
+
+	error = iommulib_iommu_dma_sync(dip, rdip, dma_handle, off,
+	    len, cache_flags);
+
+	if (iommulib_iommu_dma_set_cookies(dip, dma_handle, cookie_array,
+	    ccount) != DDI_SUCCESS) {
+		cmn_err(CE_WARN, "%s: Cannot set cookies "
+		    "for device %p", f, (void *)rdip);
+		error = DDI_FAILURE;
+	} else {
+		cookie_array = NULL;
+		ccount = 0;
+	}
+
+out:
+	if (cookie_array)
+		kmem_free(cookie_array, sizeof (ddi_dma_cookie_t) * ccount);
+	return (error);
+}
+
+/*ARGSUSED*/
+static int
+amd_iommu_win(iommulib_handle_t handle, dev_info_t *dip,
+    dev_info_t *rdip, ddi_dma_handle_t dma_handle, uint_t win,
+    off_t *offp, size_t *lenp, ddi_dma_cookie_t *cookiep,
+    uint_t *ccountp)
+{
+	int error = DDI_FAILURE;
+	amd_iommu_t *iommu = iommulib_iommu_getdata(handle);
+	ddi_dma_cookie_t *cookie_array = NULL;
+	uint_t ccount = 0;
+	int km_flags;
+	ddi_dma_impl_t *hp;
+	ddi_dma_attr_t *attrp;
+	struct ddi_dma_req sdmareq = {0};
+	int instance = ddi_get_instance(rdip);
+	const char *driver = ddi_driver_name(rdip);
+	const char *f = "amd_iommu_win";
+
+	km_flags = iommulib_iommu_dma_get_sleep_flags(dip, dma_handle);
+
+	cookie_array = NULL;
+	ccount = 0;
+	if (iommulib_iommu_dma_get_cookies(dip, dma_handle, &cookie_array,
+	    &ccount) != DDI_SUCCESS) {
+		cmn_err(CE_WARN, "%s: %s%d: Cannot get cookies "
+		    "for device %p", f, driver, instance, (void *)rdip);
+		error = DDI_FAILURE;
+		goto out;
+	}
+
+	if (iommulib_iommu_dma_clear_cookies(dip, dma_handle) != DDI_SUCCESS) {
+		cmn_err(CE_WARN, "%s: %s%d: Cannot clear cookies "
+		    "for device %p", f, driver, instance, (void *)rdip);
+		error = DDI_FAILURE;
+		goto out;
+	}
+
+	if (iommulib_iommu_dma_win(dip, rdip, dma_handle, win,
+	    offp, lenp, cookiep, ccountp) != DDI_SUCCESS) {
+		cmn_err(CE_WARN, "%s: %s%d: failed switch windows for dip=%p",
+		    f, driver, instance, (void *)rdip);
+		error = DDI_FAILURE;
+		goto out;
+	}
+
+	(void) unmap_current_window(iommu, rdip, cookie_array, ccount, -1, 0);
+
+	if (cookie_array) {
+		kmem_free(cookie_array, sizeof (ddi_dma_cookie_t) * ccount);
+		cookie_array = NULL;
+		ccount = 0;
+	}
+
+	cookie_array = NULL;
+	ccount = 0;
+	if (iommulib_iommu_dma_get_cookies(dip, dma_handle, &cookie_array,
+	    &ccount) != DDI_SUCCESS) {
+		cmn_err(CE_WARN, "%s: %s%d: Cannot get cookies "
+		    "for device %p", f, driver, instance, (void *)rdip);
+		error = DDI_FAILURE;
+		goto out;
+	}
+
+	hp = (ddi_dma_impl_t *)dma_handle;
+	attrp = &hp->dmai_attr;
+
+	sdmareq.dmar_flags = DDI_DMA_RDWR;
+	error = map_current_window(iommu, rdip, attrp, &sdmareq,
+	    cookie_array, ccount, km_flags);
+
+	if (iommulib_iommu_dma_set_cookies(dip, dma_handle, cookie_array,
+	    ccount) != DDI_SUCCESS) {
+		cmn_err(CE_WARN, "%s: %s%d: Cannot set cookies "
+		    "for device %p", f, driver, instance, (void *)rdip);
+		error = DDI_FAILURE;
+		goto out;
+	}
+
+	*cookiep = cookie_array[0];
+
+	return (error == DDI_SUCCESS ? DDI_SUCCESS : DDI_FAILURE);
+out:
+	if (cookie_array)
+		kmem_free(cookie_array, sizeof (ddi_dma_cookie_t) * ccount);
+
+	return (error);
+}
+
+/* Obsoleted DMA routines */
+
+/*ARGSUSED*/
+static int
+amd_iommu_map(iommulib_handle_t handle, dev_info_t *dip,
+    dev_info_t *rdip, struct ddi_dma_req *dmareq,
+    ddi_dma_handle_t *dma_handle)
+{
+	ASSERT(0);
+	return (iommulib_iommu_dma_map(dip, rdip, dmareq, dma_handle));
+}
+
+/*ARGSUSED*/
+static int
+amd_iommu_mctl(iommulib_handle_t handle, dev_info_t *dip,
+    dev_info_t *rdip, ddi_dma_handle_t dma_handle,
+    enum ddi_dma_ctlops request, off_t *offp, size_t *lenp,
+    caddr_t *objpp, uint_t cache_flags)
+{
+	ASSERT(0);
+	return (iommulib_iommu_dma_mctl(dip, rdip, dma_handle,
+	    request, offp, lenp, objpp, cache_flags));
+}
+
+uint64_t
+amd_iommu_reg_get64_workaround(uint64_t *regp, uint32_t bits)
+{
+	split_t s;
+	uint32_t *ptr32 = (uint32_t *)regp;
+	uint64_t *s64p = &(s.u64);
+
+	s.u32[0] = ptr32[0];
+	s.u32[1] = ptr32[1];
+
+	return (AMD_IOMMU_REG_GET64_IMPL(s64p, bits));
+}
+
+uint64_t
+amd_iommu_reg_set64_workaround(uint64_t *regp, uint32_t bits, uint64_t value)
+{
+	split_t s;
+	uint32_t *ptr32 = (uint32_t *)regp;
+	uint64_t *s64p = &(s.u64);
+
+	s.u32[0] = ptr32[0];
+	s.u32[1] = ptr32[1];
+
+	AMD_IOMMU_REG_SET64_IMPL(s64p, bits, value);
+
+	*regp = s.u64;
+
+	return (s.u64);
+}
+
+void
+amd_iommu_read_boot_props(void)
+{
+	char *propval;
+
+	/*
+	 * if "amd-iommu = no/false" boot property is set,
+	 * ignore AMD iommu
+	 */
+	if (ddi_prop_lookup_string(DDI_DEV_T_ANY, ddi_root_node(),
+	    DDI_PROP_DONTPASS, "amd-iommu", &propval) == DDI_SUCCESS) {
+		if (strcmp(propval, "no") == 0 ||
+		    strcmp(propval, "false") == 0) {
+			amd_iommu_disable = 1;
+		}
+		ddi_prop_free(propval);
+	}
+
+	/*
+	 * Copy the list of drivers for which IOMMU is disabled by user.
+	 */
+	if (ddi_prop_lookup_string(DDI_DEV_T_ANY, ddi_root_node(),
+	    DDI_PROP_DONTPASS, "amd-iommu-disable-list", &propval)
+	    == DDI_SUCCESS) {
+		amd_iommu_disable_list = kmem_alloc(strlen(propval) + 1,
+		    KM_SLEEP);
+		(void) strcpy(amd_iommu_disable_list, propval);
+		ddi_prop_free(propval);
+	}
+
+}
+
+void
+amd_iommu_lookup_conf_props(dev_info_t *dip)
+{
+	char *disable;
+
+	if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip,
+	    DDI_PROP_DONTPASS|DDI_PROP_NOTPROM, "amd-iommu", &disable)
+	    == DDI_PROP_SUCCESS) {
+		if (strcmp(disable, "no") == 0) {
+			amd_iommu_disable = 1;
+		}
+		ddi_prop_free(disable);
+	}
+
+	if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip,
+	    DDI_PROP_DONTPASS|DDI_PROP_NOTPROM, "amd-iommu-disable-list",
+	    &disable) == DDI_PROP_SUCCESS) {
+		amd_iommu_disable_list = kmem_alloc(strlen(disable) + 1,
+		    KM_SLEEP);
+		(void) strcpy(amd_iommu_disable_list, disable);
+		ddi_prop_free(disable);
+	}
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/uts/i86pc/io/amd_iommu/amd_iommu_impl.h	Mon Sep 14 21:48:21 2009 -0700
@@ -0,0 +1,494 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef	_AMD_IOMMU_IMPL_H
+#define	_AMD_IOMMU_IMPL_H
+
+#ifdef	__cplusplus
+extern "C" {
+#endif
+
+#include <sys/pci.h>
+
+#ifdef	_KERNEL
+
+#define	AMD_IOMMU_PCI_PROG_IF	(0x0)
+
+#define	AMD_IOMMU_CAP		(0x3)
+
+#define	AMD_IOMMU_REG_SIZE	(0x2028)
+#define	AMD_IOMMU_DEVTBL_SZ	(16)
+#define	AMD_IOMMU_CMDBUF_SZ	(15)
+#define	AMD_IOMMU_EVENTLOG_SZ	(15)
+#define	AMD_IOMMU_DEVENT_SZ	(32)
+#define	AMD_IOMMU_CMD_SZ	(16)
+#define	AMD_IOMMU_EVENT_SZ	(16)
+
+/* Capability Register offsets */
+#define	AMD_IOMMU_CAP_HDR_OFF		(0x00)
+#define	AMD_IOMMU_CAP_ADDR_LOW_OFF	(0x04)
+#define	AMD_IOMMU_CAP_ADDR_HI_OFF	(0x08)
+#define	AMD_IOMMU_CAP_RANGE_OFF		(0x0C)
+#define	AMD_IOMMU_CAP_MISC_OFF		(0x10)
+
+/* ControL Registers offsets */
+#define	AMD_IOMMU_DEVTBL_REG_OFF	(0x00)
+#define	AMD_IOMMU_CMDBUF_REG_OFF	(0x08)
+#define	AMD_IOMMU_EVENTLOG_REG_OFF	(0x10)
+#define	AMD_IOMMU_CTRL_REG_OFF		(0x18)
+#define	AMD_IOMMU_EXCL_BASE_REG_OFF	(0x20)
+#define	AMD_IOMMU_EXCL_LIM_REG_OFF	(0x28)
+#define	AMD_IOMMU_CMDBUF_HEAD_REG_OFF	(0x2000)
+#define	AMD_IOMMU_CMDBUF_TAIL_REG_OFF	(0x2008)
+#define	AMD_IOMMU_EVENTLOG_HEAD_REG_OFF	(0x2010)
+#define	AMD_IOMMU_EVENTLOG_TAIL_REG_OFF	(0x2018)
+#define	AMD_IOMMU_STATUS_REG_OFF	(0x2020)
+
+/* Capability Header Register Bits */
+#define	AMD_IOMMU_CAP_NPCACHE	(26 << 16 | 26)
+#define	AMD_IOMMU_CAP_HTTUN	(25 << 16 | 25)
+#define	AMD_IOMMU_CAP_IOTLB	(24 << 16 | 24)
+#define	AMD_IOMMU_CAP_TYPE	(18 << 16 | 16)
+#define	AMD_IOMMU_CAP_ID	(7 << 16 | 0)
+
+/* Capability Range Register bits */
+#define	AMD_IOMMU_LAST_DEVFN	(31 << 16 | 24)
+#define	AMD_IOMMU_FIRST_DEVFN	(23 << 16 | 16)
+#define	AMD_IOMMU_RNG_BUS	(15 << 16 | 8)
+#define	AMD_IOMMU_RNG_VALID	(7 << 16 | 7)
+#define	AMD_IOMMU_HT_UNITID	(4 << 16 | 0)
+
+
+/* Capability Misc Register bits */
+#define	AMD_IOMMU_HT_ATSRSV	(22 << 16 | 22)
+#define	AMD_IOMMU_VA_SIZE	(21 << 16 | 15)
+#define	AMD_IOMMU_PA_SIZE	(14 << 16 | 8)
+#define	AMD_IOMMU_MSINUM	(4 << 16 | 0)
+
+/* Device Table Base Address register bits */
+#define	AMD_IOMMU_DEVTABBASE	(51 << 16 | 12)
+#define	AMD_IOMMU_DEVTABSIZE	(8 << 16 | 0)
+
+/* Command Buffer Base Address register bits */
+#define	AMD_IOMMU_COMLEN	(59 << 16 | 56)
+#define	AMD_IOMMU_COMBASE	(51 << 16 | 12)
+
+#define	AMD_IOMMU_CMDBUF_MINSZ	(8)
+#define	AMD_IOMMU_CMDBUF_MAXSZ	(15)
+
+/* Event Log Base Address register bits */
+#define	AMD_IOMMU_EVENTLEN	(59 << 16 | 56)
+#define	AMD_IOMMU_EVENTBASE	(51 << 16 | 12)
+
+#define	AMD_IOMMU_EVENTLOG_MINSZ	(8)
+#define	AMD_IOMMU_EVENTLOG_MAXSZ	(15)
+
+/* Control register bits */
+#define	AMD_IOMMU_CMDBUF_ENABLE		(12 << 16 | 12)
+#define	AMD_IOMMU_ISOC			(11 << 16 | 11)
+#define	AMD_IOMMU_COHERENT		(10 << 16 | 10)
+#define	AMD_IOMMU_RESPASSPW		(9 << 16 | 9)
+#define	AMD_IOMMU_PASSPW		(8 << 16 | 8)
+#define	AMD_IOMMU_INVTO			(7 << 16 | 5)
+#define	AMD_IOMMU_COMWAITINT_ENABLE	(4 << 16 | 4)
+#define	AMD_IOMMU_EVENTINT_ENABLE	(3 << 16 | 3)
+#define	AMD_IOMMU_EVENTLOG_ENABLE	(2 << 16 | 2)
+#define	AMD_IOMMU_HT_TUN_ENABLE		(1 << 16 | 1)
+#define	AMD_IOMMU_ENABLE		(0 << 16 | 0)
+
+/* Exclusion Base Register bits */
+#define	AMD_IOMMU_EXCL_BASE_ADDR	(51 << 16 | 12)
+#define	AMD_IOMMU_EXCL_BASE_ALLOW	(1 << 16 | 1)
+#define	AMD_IOMMU_EXCL_BASE_EXEN	(0 << 16 | 0)
+
+/* Exclusion Limit Register bits */
+#define	AMD_IOMMU_EXCL_LIM		(51 << 16 | 12)
+
+/* Command Buffer Head Pointer Register bits */
+#define	AMD_IOMMU_CMDHEADPTR		(18 << 16 | 4)
+
+/* Command Buffer Tail Pointer Register bits */
+#define	AMD_IOMMU_CMDTAILPTR		(18 << 16 | 4)
+
+/* Event Log Head Pointer Register bits */
+#define	AMD_IOMMU_EVENTHEADPTR		(18 << 16 | 4)
+
+/* Event Log Tail Pointer Register bits */
+#define	AMD_IOMMU_EVENTTAILPTR		(18 << 16 | 4)
+
+/* Status Register bits */
+#define	AMD_IOMMU_CMDBUF_RUN		(4 << 16 | 4)
+#define	AMD_IOMMU_EVENT_LOG_RUN		(3 << 16 | 3)
+#define	AMD_IOMMU_COMWAIT_INT		(2 << 16 | 2)
+#define	AMD_IOMMU_EVENT_LOG_INT		(1 << 16 | 1)
+#define	AMD_IOMMU_EVENT_OVERFLOW_INT	(0 << 16 | 0)
+
+/* Device Table Bits */
+
+/* size in bytes of each device table entry */
+#define	AMD_IOMMU_DEVTBL_ENTRY_SZ	(32)
+
+/* Interrupt Remapping related Device Table bits */
+#define	AMD_IOMMU_DEVTBL_LINT1PASS	((191-128) << 16 | (191-128))
+#define	AMD_IOMMU_DEVTBL_LINT0PASS	((190-128) << 16 | (190-128))
+#define	AMD_IOMMU_DEVTBL_INTCTL		((189-128) << 16 | (188-128))
+#define	AMD_IOMMU_DEVTBL_NMIPASS	((186-128) << 16 | (186-128))
+#define	AMD_IOMMU_DEVTBL_EXTINTPAS	((185-128) << 16 | (185-128))
+#define	AMD_IOMMU_DEVTBL_INITPASS	((184-128) << 16 | (184-128))
+#define	AMD_IOMMU_DEVTBL_INTR_ROOT	((179-128) << 16 | (134-128))
+#define	AMD_IOMMU_DEVTBL_IG		((133-128) << 16 | (133-128))
+#define	AMD_IOMMU_DEVTBL_INTTABLEN	((132-128) << 16 | (129-128))
+#define	AMD_IOMMU_DEVTBL_IV		((128-128) << 16 | (128-128))
+
+/* DMA Remapping related Device Table Bits */
+#define	AMD_IOMMU_DEVTBL_SYSMGT		((105-64) << 16 | (104-64))
+#define	AMD_IOMMU_DEVTBL_EX		((103-64) << 16 | (103-64))
+#define	AMD_IOMMU_DEVTBL_SD		((102-64) << 16 | (102-64))
+#define	AMD_IOMMU_DEVTBL_CACHE		((101-64) << 16 | (101-64))
+#define	AMD_IOMMU_DEVTBL_IOCTL		((100-64) << 16 | (99-64))
+#define	AMD_IOMMU_DEVTBL_SA		((98-64) << 16 | (98-64))
+#define	AMD_IOMMU_DEVTBL_SE		((97-64) << 16 | (97-64))
+#define	AMD_IOMMU_DEVTBL_IOTLB		((96-64) << 16 | (96-64))
+#define	AMD_IOMMU_DEVTBL_DOMAINID	((79-64) << 16 | (64-64))
+#define	AMD_IOMMU_DEVTBL_IW		(62 << 16 | 62)
+#define	AMD_IOMMU_DEVTBL_IR		(61 << 16 | 61)
+#define	AMD_IOMMU_DEVTBL_ROOT_PGTBL	(51 << 16 | 12)
+#define	AMD_IOMMU_DEVTBL_PG_MODE	(11 << 16 | 9)
+#define	AMD_IOMMU_DEVTBL_TV		(1 << 16 | 1)
+#define	AMD_IOMMU_DEVTBL_V		(0 << 16 | 0)
+
+#define	BUS_DEVFN_TO_BDF(b, devfn)	(devfn)
+#define	AMD_IOMMU_ALIAS_HASH_SZ		(256)
+
+#define	AMD_IOMMU_REG_ADDR_LOCKED	(0x1)
+
+/*
+ * IOMMU Command bits
+ */
+
+typedef enum {
+	AMD_IOMMU_CMD_INVAL = 0,
+	AMD_IOMMU_CMD_COMPL_WAIT,
+	AMD_IOMMU_CMD_INVAL_DEVTAB_ENTRY,
+	AMD_IOMMU_CMD_INVAL_IOMMU_PAGES,
+	AMD_IOMMU_CMD_INVAL_IOTLB_PAGES,
+	AMD_IOMMU_CMD_INVAL_INTR_TABLE,
+} amd_iommu_cmd_t;
+
+typedef enum {
+	AMD_IOMMU_CMD_FLAGS_NONE = 0,
+	AMD_IOMMU_CMD_FLAGS_COMPL_WAIT = 1,
+	AMD_IOMMU_CMD_FLAGS_COMPL_WAIT_F = 2,
+	AMD_IOMMU_CMD_FLAGS_COMPL_WAIT_S = 4,
+	AMD_IOMMU_CMD_FLAGS_PAGE_PDE_INVAL = 8,
+	AMD_IOMMU_CMD_FLAGS_PAGE_INVAL_S = 16,
+	AMD_IOMMU_CMD_FLAGS_IOTLB_INVAL_S = 32
+} amd_iommu_cmd_flags_t;
+
+/* Common command bits */
+#define	AMD_IOMMU_CMD_OPCODE		(31 << 16 | 28)
+
+/* Completion Wait command bits */
+#define	AMD_IOMMU_CMD_COMPL_WAIT_S		(0 << 16 | 0)
+#define	AMD_IOMMU_CMD_COMPL_WAIT_I		(1 << 16 | 1)
+#define	AMD_IOMMU_CMD_COMPL_WAIT_F		(2 << 16 | 2)
+#define	AMD_IOMMU_CMD_COMPL_WAIT_STORE_ADDR_LO	(31 << 16 | 3)
+#define	AMD_IOMMU_CMD_COMPL_WAIT_STORE_ADDR_HI	(19 << 16 | 0)
+
+/* Invalidate Device Table entry command bits */
+#define	AMD_IOMMU_CMD_INVAL_DEVTAB_DEVICEID		(15 << 16 | 0)
+
+/* Invalidate IOMMU Pages command bits */
+#define	AMD_IOMMU_CMD_INVAL_PAGES_DOMAINID		(15 << 16 | 0)
+#define	AMD_IOMMU_CMD_INVAL_PAGES_S			(0 << 16 | 0)
+#define	AMD_IOMMU_CMD_INVAL_PAGES_PDE			(1 << 16 | 1)
+#define	AMD_IOMMU_CMD_INVAL_PAGES_ADDR_LO		(31 << 16 | 12)
+#define	AMD_IOMMU_CMD_INVAL_PAGES_ADDR_HI		(63 << 16 | 32)
+
+
+/* Invalidate IOTLB command bits */
+#define	AMD_IOMMU_CMD_INVAL_IOTLB_DEVICEID		(15 << 16 | 0)
+#define	AMD_IOMMU_CMD_INVAL_IOTLB_MAXPEND		(31 << 16 | 24)
+#define	AMD_IOMMU_CMD_INVAL_IOTLB_QUEUEID		(15 << 16 | 0)
+#define	AMD_IOMMU_CMD_INVAL_IOTLB_S			(0 << 16 | 0)
+#define	AMD_IOMMU_CMD_INVAL_IOTLB_ADDR_LO		(31 << 16 | 12)
+#define	AMD_IOMMU_CMD_INVAL_IOTLB_ADDR_HI		(31 << 16 | 0)
+
+#define	AMD_IOMMU_DEFAULT_MAXPEND			(10)
+
+/* Invalidate Interrupt Table bits */
+#define	AMD_IOMMU_CMD_INVAL_INTR_DEVICEID		(15 << 16 | 0)
+
+#if defined(__amd64)
+#define	dmac_cookie_addr	dmac_laddress
+#else
+#define	dmac_cookie_addr	dmac_address
+#endif
+
+#define	AMD_IOMMU_TABLE_ALIGN	((1ULL << 12) - 1)
+
+#define	AMD_IOMMU_MAX_DEVICEID	(0xFFFF)
+
+/*
+ * DMA sync macros
+ * TODO: optimize sync only small ranges
+ */
+#define	SYNC_FORDEV(h)	(void) ddi_dma_sync(h, 0, 0, DDI_DMA_SYNC_FORDEV)
+#define	SYNC_FORKERN(h)	(void) ddi_dma_sync(h, 0, 0, DDI_DMA_SYNC_FORKERNEL)
+
+#define	WAIT_SEC(s)	drv_usecwait(1000000*(s))
+
+#define	CMD2OFF(c)	((c) << 4)
+#define	OFF2CMD(o)	((o) >> 4)
+
+typedef union split {
+	uint64_t u64;
+	uint32_t u32[2];
+} split_t;
+
+#define	BITPOS_START(b)	((b) >> 16)
+#define	BITPOS_END(b)	((b) & 0xFFFF)
+
+#define	START_MASK64(s)	(((s) == 63) ? ~((uint64_t)0) : \
+	(uint64_t)((1ULL << ((s)+1)) - 1))
+#define	START_MASK32(s)	(((s) == 31) ? ~((uint32_t)0) : \
+	(uint32_t)((1ULL << ((s)+1)) - 1))
+#define	START_MASK16(s)	(((s) == 15) ? ~((uint16_t)0) : \
+	(uint16_t)((1ULL << ((s)+1)) - 1))
+#define	START_MASK8(s)	(((s) == 7) ? ~((uint8_t)0) : \
+	(uint8_t)((1ULL << ((s)+1)) - 1))
+
+#define	END_MASK(e)	((1ULL << (e)) - 1)
+
+#define	BIT_MASK64(s, e)	(uint64_t)(START_MASK64(s) & ~END_MASK(e))
+#define	BIT_MASK32(s, e)	(uint32_t)(START_MASK32(s) & ~END_MASK(e))
+#define	BIT_MASK16(s, e)	(uint16_t)(START_MASK16(s) & ~END_MASK(e))
+#define	BIT_MASK8(s, e)		(uint8_t)(START_MASK8(s) & ~END_MASK(e))
+
+#define	AMD_IOMMU_REG_GET64_IMPL(rp, b) \
+	(((*(rp)) & (START_MASK64(BITPOS_START(b)))) >> BITPOS_END(b))
+#define	AMD_IOMMU_REG_GET64(rp, b) 					 \
+	((amd_iommu_64bit_bug) ? amd_iommu_reg_get64_workaround(rp, b) : \
+	AMD_IOMMU_REG_GET64_IMPL(rp, b))
+#define	AMD_IOMMU_REG_GET32(rp, b) \
+	(((*(rp)) & (START_MASK32(BITPOS_START(b)))) >> BITPOS_END(b))
+#define	AMD_IOMMU_REG_GET16(rp, b) \
+	(((*(rp)) & (START_MASK16(BITPOS_START(b)))) >> BITPOS_END(b))
+#define	AMD_IOMMU_REG_GET8(rp, b) \
+	(((*(rp)) & (START_MASK8(BITPOS_START(b)))) >> BITPOS_END(b))
+
+#define	AMD_IOMMU_REG_SET64_IMPL(rp, b, v) \
+	((*(rp)) = \
+	(((uint64_t)(*(rp)) & ~(BIT_MASK64(BITPOS_START(b), BITPOS_END(b)))) \
+	| ((uint64_t)(v) << BITPOS_END(b))))
+
+#define	AMD_IOMMU_REG_SET64(rp, b, v) 			\
+	(void) ((amd_iommu_64bit_bug) ?			\
+	amd_iommu_reg_set64_workaround(rp, b, v) : 	\
+	AMD_IOMMU_REG_SET64_IMPL(rp, b, v))
+
+#define	AMD_IOMMU_REG_SET32(rp, b, v) \
+	((*(rp)) = \
+	(((uint32_t)(*(rp)) & ~(BIT_MASK32(BITPOS_START(b), BITPOS_END(b)))) \
+	| ((uint32_t)(v) << BITPOS_END(b))))
+
+#define	AMD_IOMMU_REG_SET16(rp, b, v) \
+	((*(rp)) = \
+	(((uint16_t)(*(rp)) & ~(BIT_MASK16(BITPOS_START(b), BITPOS_END(b)))) \
+	| ((uint16_t)(v) << BITPOS_END(b))))
+
+#define	AMD_IOMMU_REG_SET8(rp, b, v) \
+	((*(rp)) = \
+	(((uint8_t)(*(rp)) & ~(BIT_MASK8(BITPOS_START(b), BITPOS_END(b)))) \
+	| ((uint8_t)(v) << BITPOS_END(b))))
+
+/*
+ * Cast a 64 bit pointer to a uint64_t *
+ */
+#define	REGADDR64(a)	((uint64_t *)(uintptr_t)(a))
+
+typedef enum {
+	AMD_IOMMU_INTR_INVALID = 0,
+	AMD_IOMMU_INTR_TABLE,
+	AMD_IOMMU_INTR_ALLOCED,
+	AMD_IOMMU_INTR_HANDLER,
+	AMD_IOMMU_INTR_ENABLED
+} amd_iommu_intr_state_t;
+
+
+typedef struct amd_iommu {
+	kmutex_t aiomt_mutex;
+	kmutex_t aiomt_eventlock;
+	kmutex_t aiomt_cmdlock;
+	dev_info_t *aiomt_dip;
+	int aiomt_idx;
+	iommulib_handle_t aiomt_iommulib_handle;
+	iommulib_ops_t *aiomt_iommulib_ops;
+	uint32_t aiomt_cap_hdr;
+	uint8_t aiomt_npcache;
+	uint8_t aiomt_httun;
+	uint8_t aiomt_iotlb;
+	uint8_t aiomt_captype;
+	uint8_t aiomt_capid;
+	uint32_t aiomt_low_addr32;
+	uint32_t aiomt_hi_addr32;
+	uint64_t aiomt_reg_pa;
+	uint64_t aiomt_va;
+	uint64_t aiomt_reg_va;
+	uint32_t aiomt_range;
+	uint8_t aiomt_rng_bus;
+	uint8_t aiomt_first_devfn;
+	uint8_t aiomt_last_devfn;
+	uint8_t aiomt_rng_valid;
+	uint8_t aiomt_ht_unitid;
+	uint32_t aiomt_misc;
+	uint8_t aiomt_htatsresv;
+	uint8_t aiomt_vasize;
+	uint8_t aiomt_pasize;
+	uint8_t aiomt_msinum;
+	uint8_t aiomt_reg_pages;
+	uint32_t aiomt_reg_size;
+	uint32_t aiomt_devtbl_sz;
+	uint32_t aiomt_cmdbuf_sz;
+	uint32_t aiomt_eventlog_sz;
+	caddr_t aiomt_devtbl;
+	caddr_t aiomt_cmdbuf;
+	caddr_t aiomt_eventlog;
+	uint32_t *aiomt_cmd_tail;
+	uint32_t *aiomt_event_head;
+	ddi_dma_handle_t aiomt_dmahdl;
+	void *aiomt_dma_bufva;
+	uint64_t aiomt_dma_mem_realsz;
+	ddi_acc_handle_t aiomt_dma_mem_hdl;
+	ddi_dma_cookie_t aiomt_buf_dma_cookie;
+	uint_t aiomt_buf_dma_ncookie;
+	amd_iommu_intr_state_t aiomt_intr_state;
+	ddi_intr_handle_t *aiomt_intr_htable;
+	uint32_t aiomt_intr_htable_sz;
+	uint32_t aiomt_actual_intrs;
+	uint32_t aiomt_intr_cap;
+	uint64_t aiomt_reg_devtbl_va;
+	uint64_t aiomt_reg_cmdbuf_va;
+	uint64_t aiomt_reg_eventlog_va;
+	uint64_t aiomt_reg_ctrl_va;
+	uint64_t aiomt_reg_excl_base_va;
+	uint64_t aiomt_reg_excl_lim_va;
+	uint64_t aiomt_reg_cmdbuf_head_va;
+	uint64_t aiomt_reg_cmdbuf_tail_va;
+	uint64_t aiomt_reg_eventlog_head_va;
+	uint64_t aiomt_reg_eventlog_tail_va;
+	uint64_t aiomt_reg_status_va;
+	struct amd_iommu *aiomt_next;
+} amd_iommu_t;
+
+typedef struct amd_iommu_dma_devtbl_ent {
+	uint16_t de_domainid;
+	uint8_t de_R;
+	uint8_t de_W;
+	caddr_t de_root_pgtbl;
+	uint8_t de_pgmode;
+} amd_iommu_dma_devtbl_entry_t;
+
+typedef struct amd_iommu_alias {
+	uint16_t al_bdf;
+	uint16_t al_src_bdf;
+	struct amd_iommu_alias *al_next;
+} amd_iommu_alias_t;
+
+typedef struct amd_iommu_cmdargs {
+	uint64_t ca_addr;
+	uint16_t ca_domainid;
+	uint16_t ca_deviceid;
+} amd_iommu_cmdargs_t;
+
+struct amd_iommu_page_table;
+
+typedef struct amd_iommu_page_table_hash {
+	kmutex_t ampt_lock;
+	struct amd_iommu_page_table **ampt_hash;
+} amd_iommu_page_table_hash_t;
+
+typedef enum {
+	AMD_IOMMU_LOG_INVALID_OP = 0,
+	AMD_IOMMU_LOG_DISPLAY,
+	AMD_IOMMU_LOG_DISCARD
+} amd_iommu_log_op_t;
+
+typedef enum {
+	AMD_IOMMU_DEBUG_NONE = 0,
+	AMD_IOMMU_DEBUG_ALLOCHDL = 0x1,
+	AMD_IOMMU_DEBUG_FREEHDL = 0x2,
+	AMD_IOMMU_DEBUG_BIND = 0x4,
+	AMD_IOMMU_DEBUG_UNBIND = 0x8,
+	AMD_IOMMU_DEBUG_WIN = 0x10,
+	AMD_IOMMU_DEBUG_PAGE_TABLES = 0x20,
+	AMD_IOMMU_DEBUG_DEVTBL = 0x40,
+	AMD_IOMMU_DEBUG_CMDBUF = 0x80,
+	AMD_IOMMU_DEBUG_EVENTLOG = 0x100,
+	AMD_IOMMU_DEBUG_ACPI = 0x200,
+	AMD_IOMMU_DEBUG_PA2VA = 0x400,
+	AMD_IOMMU_DEBUG_TABLES = 0x800,
+	AMD_IOMMU_DEBUG_EXCL = 0x1000,
+	AMD_IOMMU_DEBUG_INTR = 0x2000
+} amd_iommu_debug_t;
+
+extern const char *amd_iommu_modname;
+extern kmutex_t amd_iommu_global_lock;
+extern amd_iommu_alias_t **amd_iommu_alias;
+extern amd_iommu_page_table_hash_t amd_iommu_page_table_hash;
+extern ddi_device_acc_attr_t amd_iommu_devacc;
+extern amd_iommu_debug_t amd_iommu_debug;
+
+extern uint8_t amd_iommu_htatsresv;
+extern uint8_t amd_iommu_vasize;
+extern uint8_t amd_iommu_pasize;
+extern int amd_iommu_64bit_bug;
+extern int amd_iommu_unity_map;
+extern int amd_iommu_no_RW_perms;
+extern int amd_iommu_no_unmap;
+extern int amd_iommu_pageva_inval_all;
+extern int amd_iommu_disable;
+extern char *amd_iommu_disable_list;
+
+extern uint64_t amd_iommu_reg_get64_workaround(uint64_t *regp, uint32_t bits);
+extern uint64_t amd_iommu_reg_set64_workaround(uint64_t *regp, uint32_t bits,
+    uint64_t value);
+
+int amd_iommu_cmd(amd_iommu_t *iommu, amd_iommu_cmd_t cmd,
+    amd_iommu_cmdargs_t *cmdargs, amd_iommu_cmd_flags_t flags, int lock_held);
+int amd_iommu_page_table_hash_init(amd_iommu_page_table_hash_t *ampt);
+void amd_iommu_page_table_hash_fini(amd_iommu_page_table_hash_t *ampt);
+
+int amd_iommu_read_log(amd_iommu_t *iommu, amd_iommu_log_op_t op);
+void amd_iommu_read_boot_props(void);
+void amd_iommu_lookup_conf_props(dev_info_t *dip);
+
+#endif	/* _KERNEL */
+
+#ifdef	__cplusplus
+}
+#endif
+
+#endif	/* _AMD_IOMMU_IMPL_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/uts/i86pc/io/amd_iommu/amd_iommu_log.c	Mon Sep 14 21:48:21 2009 -0700
@@ -0,0 +1,582 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#include <sys/sunddi.h>
+#include <sys/amd_iommu.h>
+#include "amd_iommu_impl.h"
+#include "amd_iommu_log.h"
+
+
+static const char *
+get_hw_error(uint8_t type)
+{
+	const char *hwerr;
+
+	switch (type) {
+	case 0:
+		hwerr = "Reserved";
+		break;
+	case 1:
+		hwerr = "Master Abort";
+		break;
+	case 2:
+		hwerr = "Target Abort";
+		break;
+	case 3:
+		hwerr = "Data Error";
+		break;
+	default:
+		hwerr = "Unknown";
+		break;
+	}
+
+	return (hwerr);
+}
+
+const char *
+get_illegal_req(uint8_t type, uint8_t TR)
+{
+	const char *illreq;
+
+	switch (type) {
+	case 0:
+		illreq = (TR == 1) ? "Translation I=0/V=0/V=1&&TV=0" :
+		    "Read or Non-posted Write in INTR Range";
+		break;
+	case 1:
+		illreq = (TR == 1) ? "Translation INTR/Port-IO/SysMgt; OR"
+		    "Translation when SysMgt=11b/Port-IO when IOCTL=10b "
+		    "while V=1 && TV=0" :
+		    "Pre-translated transaction from device with I=0 or V=0";
+		break;
+	case 2:
+		illreq = (TR == 1) ? "Reserved":
+		    "Port-IO transaction for device with IoCtl = 00b";
+		break;
+	case 3:
+		illreq = (TR == 1) ? "Reserved":
+		    "Posted write to SysMgt with device SysMgt=00b "
+		    "OR SysMgt=10b && message not INTx "
+		    "OR Posted write to addr transaltion range with "
+		    "HtAtsResv=1";
+		break;
+	case 4:
+		illreq = (TR == 1) ? "Reserved":
+		    "Read request or non-posted write in SysMgt with "
+		    "device SysMgt=10b or 0xb"
+		    "OR Read request or non-posted write in "
+		    "addr translation range with HtAtsResv=1";
+		break;
+	case 5:
+		illreq = (TR == 1) ? "Reserved":
+		    "Posted write to Interrupt/EOI Range "
+		    "for device that has IntCtl=00b";
+		break;
+	case 6:
+		illreq = (TR == 1) ? "Reserved":
+		    "Posted write to reserved Interrupt Address Range";
+		break;
+	case 7:
+		illreq = (TR == 1) ? "Reserved":
+		    "transaction to SysMgt when SysMgt=11b OR "
+		    "transaction to Port-IO when IoCtl=10b while "
+		    "while V=1 TV=0";
+		break;
+	default:
+		illreq = "Unknown error";
+		break;
+	}
+	return (illreq);
+}
+
+static void
+devtab_illegal_entry(amd_iommu_t *iommu, uint32_t *event)
+{
+	uint16_t deviceid;
+	uint8_t TR;
+	uint8_t RZ;
+	uint8_t RW;
+	uint8_t I;
+	uint32_t vaddr_lo;
+	uint32_t vaddr_hi;
+	const char *driver = ddi_driver_name(iommu->aiomt_dip);
+	int instance = ddi_get_instance(iommu->aiomt_dip);
+	const char *f = "devtab_illegal_entry";
+
+	ASSERT(AMD_IOMMU_REG_GET32(&event[1], AMD_IOMMU_EVENT_TYPE) ==
+	    AMD_IOMMU_EVENT_DEVTAB_ILLEGAL_ENTRY);
+
+	deviceid = AMD_IOMMU_REG_GET32(&event[0],
+	    AMD_IOMMU_EVENT_DEVTAB_ILL_DEVICEID);
+
+	TR = AMD_IOMMU_REG_GET32(&event[1],
+	    AMD_IOMMU_EVENT_DEVTAB_ILL_TR);
+
+	RZ = AMD_IOMMU_REG_GET32(&event[1],
+	    AMD_IOMMU_EVENT_DEVTAB_ILL_RZ);
+
+	RW = AMD_IOMMU_REG_GET32(&event[1],
+	    AMD_IOMMU_EVENT_DEVTAB_ILL_RW);
+
+	I = AMD_IOMMU_REG_GET32(&event[1],
+	    AMD_IOMMU_EVENT_DEVTAB_ILL_INTR);
+
+	vaddr_lo = AMD_IOMMU_REG_GET32(&event[2],
+	    AMD_IOMMU_EVENT_DEVTAB_ILL_VADDR_LO);
+
+	vaddr_hi = event[3];
+
+	cmn_err(CE_WARN, "%s: %s%d: idx = %d. Illegal device table entry "
+	    "deviceid=%u, %s request, %s %s transaction, %s request, "
+	    "virtual address = %p",
+	    f, driver, instance, iommu->aiomt_idx,
+	    deviceid,
+	    TR == 1 ? "Translation" : "Transaction",
+	    RZ == 1 ? "Non-zero reserved bit" : "Illegal Level encoding",
+	    RW == 1 ? "Write" : "Read",
+	    I == 1 ? "Interrupt" : "Memory",
+	    (void *)(uintptr_t)(((uint64_t)vaddr_hi) << 32 | vaddr_lo));
+}
+
+static void
+io_page_fault(amd_iommu_t *iommu, uint32_t *event)
+{
+	uint16_t deviceid;
+	uint16_t domainid;
+	uint8_t TR;
+	uint8_t RZ;
+	uint8_t RW;
+	uint8_t PE;
+	uint8_t PR;
+	uint8_t I;
+	uint32_t vaddr_lo;
+	uint32_t vaddr_hi;
+	const char *driver = ddi_driver_name(iommu->aiomt_dip);
+	int instance = ddi_get_instance(iommu->aiomt_dip);
+	const char *f = "io_page_fault";
+
+	ASSERT(AMD_IOMMU_REG_GET32(&event[1], AMD_IOMMU_EVENT_TYPE) ==
+	    AMD_IOMMU_EVENT_IO_PAGE_FAULT);
+
+	deviceid = AMD_IOMMU_REG_GET32(&event[0],
+	    AMD_IOMMU_EVENT_IO_PGFAULT_DEVICEID);
+
+	TR = AMD_IOMMU_REG_GET32(&event[1], AMD_IOMMU_EVENT_IO_PGFAULT_TR);
+
+	RZ = AMD_IOMMU_REG_GET32(&event[1], AMD_IOMMU_EVENT_IO_PGFAULT_RZ);
+
+	PE = AMD_IOMMU_REG_GET32(&event[1], AMD_IOMMU_EVENT_IO_PGFAULT_PE);
+
+	RW = AMD_IOMMU_REG_GET32(&event[1], AMD_IOMMU_EVENT_IO_PGFAULT_RW);
+
+	PR = AMD_IOMMU_REG_GET32(&event[1], AMD_IOMMU_EVENT_IO_PGFAULT_PR);
+
+	I = AMD_IOMMU_REG_GET32(&event[1], AMD_IOMMU_EVENT_IO_PGFAULT_INTR);
+
+	domainid = AMD_IOMMU_REG_GET32(&event[1],
+	    AMD_IOMMU_EVENT_IO_PGFAULT_DOMAINID);
+
+	vaddr_lo = event[2];
+
+	vaddr_hi = event[3];
+
+	cmn_err(CE_WARN, "%s: %s%d: idx = %d. IO Page Fault. "
+	    "deviceid=%u, %s request, %s, %s permissions, %s transaction, "
+	    "%s, %s request, domainid=%u, virtual address = %p",
+	    f, driver, instance, iommu->aiomt_idx,
+	    deviceid,
+	    TR == 1 ? "Translation" : "Transaction",
+	    RZ == 1 ? "Non-zero reserved bit" : "Illegal Level encoding",
+	    PE == 1 ? "did not have" : "had",
+	    RW == 1 ? "Write" : "Read",
+	    PR == 1 ? "Page present or Interrupt Remapped" :
+	    "Page not present or Interrupt Blocked",
+	    I == 1 ? "Interrupt" : "Memory",
+	    domainid,
+	    (void *)(uintptr_t)(((uint64_t)vaddr_hi) << 32 | vaddr_lo));
+}
+
+static void
+devtab_hw_error(amd_iommu_t *iommu, uint32_t *event)
+{
+	uint16_t deviceid;
+	uint8_t type;
+	uint8_t TR;
+	uint8_t RW;
+	uint8_t I;
+	uint32_t physaddr_lo;
+	uint32_t physaddr_hi;
+	const char *hwerr;
+	const char *driver = ddi_driver_name(iommu->aiomt_dip);
+	int instance = ddi_get_instance(iommu->aiomt_dip);
+	const char *f = "devtab_hw_error";
+
+	ASSERT(AMD_IOMMU_REG_GET32(&event[1], AMD_IOMMU_EVENT_TYPE) ==
+	    AMD_IOMMU_EVENT_DEVTAB_HW_ERROR);
+
+	deviceid = AMD_IOMMU_REG_GET32(&event[0],
+	    AMD_IOMMU_EVENT_DEVTAB_HWERR_DEVICEID);
+
+	type = AMD_IOMMU_REG_GET32(&event[1],
+	    AMD_IOMMU_EVENT_DEVTAB_HWERR_TYPE);
+
+	hwerr = get_hw_error(type);
+
+	TR = AMD_IOMMU_REG_GET32(&event[1], AMD_IOMMU_EVENT_DEVTAB_HWERR_TR);
+
+	RW = AMD_IOMMU_REG_GET32(&event[1], AMD_IOMMU_EVENT_DEVTAB_HWERR_RW);
+
+	I = AMD_IOMMU_REG_GET32(&event[1], AMD_IOMMU_EVENT_DEVTAB_HWERR_INTR);
+
+	physaddr_lo = AMD_IOMMU_REG_GET32(&event[2],
+	    AMD_IOMMU_EVENT_DEVTAB_HWERR_PHYSADDR_LO);
+
+	physaddr_hi = event[3];
+
+	cmn_err(CE_WARN, "%s: %s%d: idx = %d. Device Table HW Error. "
+	    "deviceid=%u, HW error type: %s, %s request, %s transaction, "
+	    "%s request, physical address = %p",
+	    f, driver, instance, iommu->aiomt_idx,
+	    deviceid, hwerr,
+	    TR == 1 ? "Translation" : "Transaction",
+	    RW == 1 ? "Write" : "Read",
+	    I == 1 ? "Interrupt" : "Memory",
+	    (void *)(uintptr_t)(((uint64_t)physaddr_hi) << 32 | physaddr_lo));
+}
+
+
+static void
+pgtable_hw_error(amd_iommu_t *iommu, uint32_t *event)
+{
+	uint16_t deviceid;
+	uint16_t domainid;
+	uint8_t type;
+	uint8_t TR;
+	uint8_t RW;
+	uint8_t I;
+	uint32_t physaddr_lo;
+	uint32_t physaddr_hi;
+	const char *hwerr;
+	const char *driver = ddi_driver_name(iommu->aiomt_dip);
+	int instance = ddi_get_instance(iommu->aiomt_dip);
+	const char *f = "pgtable_hw_error";
+
+	ASSERT(AMD_IOMMU_REG_GET32(&event[1], AMD_IOMMU_EVENT_TYPE) ==
+	    AMD_IOMMU_EVENT_PGTABLE_HW_ERROR);
+
+	deviceid = AMD_IOMMU_REG_GET32(&event[0],
+	    AMD_IOMMU_EVENT_PGTABLE_HWERR_DEVICEID);
+
+	type = AMD_IOMMU_REG_GET32(&event[1],
+	    AMD_IOMMU_EVENT_DEVTAB_HWERR_TYPE);
+
+	hwerr = get_hw_error(type);
+
+	TR = AMD_IOMMU_REG_GET32(&event[1], AMD_IOMMU_EVENT_PGTABLE_HWERR_TR);
+
+	RW = AMD_IOMMU_REG_GET32(&event[1], AMD_IOMMU_EVENT_PGTABLE_HWERR_RW);
+
+	I = AMD_IOMMU_REG_GET32(&event[1], AMD_IOMMU_EVENT_PGTABLE_HWERR_INTR);
+
+	domainid = AMD_IOMMU_REG_GET32(&event[1],
+	    AMD_IOMMU_EVENT_PGTABLE_HWERR_DOMAINID);
+
+	physaddr_lo = AMD_IOMMU_REG_GET32(&event[2],
+	    AMD_IOMMU_EVENT_PGTABLE_HWERR_PHYSADDR_LO);
+
+	physaddr_hi = event[3];
+
+	cmn_err(CE_WARN, "%s: %s%d: idx = %d. Page Table HW Error. "
+	    "deviceid=%u, HW error type: %s, %s request, %s transaction, "
+	    "%s request, domainid=%u, physical address = %p",
+	    f, driver, instance, iommu->aiomt_idx,
+	    deviceid, hwerr,
+	    TR == 1 ? "Translation" : "Transaction",
+	    RW == 1 ? "Write" : "Read",
+	    I == 1 ? "Interrupt" : "Memory",
+	    domainid,
+	    (void *)(uintptr_t)(((uint64_t)physaddr_hi) << 32 | physaddr_lo));
+}
+
+static void
+cmdbuf_illegal_cmd(amd_iommu_t *iommu, uint32_t *event)
+{
+	uint32_t physaddr_lo;
+	uint32_t physaddr_hi;
+	const char *driver = ddi_driver_name(iommu->aiomt_dip);
+	int instance = ddi_get_instance(iommu->aiomt_dip);
+	const char *f = "cmdbuf_illegal_cmd";
+
+	ASSERT(AMD_IOMMU_REG_GET32(&event[1], AMD_IOMMU_EVENT_TYPE) ==
+	    AMD_IOMMU_EVENT_CMDBUF_ILLEGAL_CMD);
+
+	physaddr_lo = AMD_IOMMU_REG_GET32(&event[2],
+	    AMD_IOMMU_EVENT_CMDBUF_ILLEGAL_CMD_PHYS_LO);
+
+	physaddr_hi = event[3];
+
+	cmn_err(CE_WARN, "%s: %s%d: idx = %d. Illegal IOMMU command. "
+	    "command physical address = %p",
+	    f, driver, instance, iommu->aiomt_idx,
+	    (void *)(uintptr_t)(((uint64_t)physaddr_hi) << 32 | physaddr_lo));
+}
+
+static void
+cmdbuf_hw_error(amd_iommu_t *iommu, uint32_t *event)
+{
+	uint32_t physaddr_lo;
+	uint32_t physaddr_hi;
+	uint8_t type;
+	const char *hwerr;
+	const char *driver = ddi_driver_name(iommu->aiomt_dip);
+	int instance = ddi_get_instance(iommu->aiomt_dip);
+	const char *f = "cmdbuf_hw_error";
+
+	ASSERT(AMD_IOMMU_REG_GET32(&event[1], AMD_IOMMU_EVENT_TYPE) ==
+	    AMD_IOMMU_EVENT_CMDBUF_HW_ERROR);
+
+	type = AMD_IOMMU_REG_GET32(&event[1],
+	    AMD_IOMMU_EVENT_CMDBUF_HWERR_TYPE);
+
+	hwerr = get_hw_error(type);
+
+	physaddr_lo = AMD_IOMMU_REG_GET32(&event[2],
+	    AMD_IOMMU_EVENT_CMDBUF_HWERR_PHYS_LO);
+
+	physaddr_hi = event[3];
+
+	cmn_err(CE_WARN, "%s: %s%d: idx = %d. Command Buffer HW error. "
+	    "HW error type = %s, command buffer physical address = %p",
+	    f, driver, instance, iommu->aiomt_idx,
+	    hwerr,
+	    (void *)(uintptr_t)(((uint64_t)physaddr_hi) << 32 | physaddr_lo));
+}
+
+static void
+iotlb_inval_to(amd_iommu_t *iommu, uint32_t *event)
+{
+	uint16_t deviceid;
+	uint32_t physaddr_lo;
+	uint32_t physaddr_hi;
+	uint8_t type;
+	const char *hwerr;
+	const char *driver = ddi_driver_name(iommu->aiomt_dip);
+	int instance = ddi_get_instance(iommu->aiomt_dip);
+	const char *f = "iotlb_inval_to";
+
+	ASSERT(AMD_IOMMU_REG_GET32(&event[1], AMD_IOMMU_EVENT_TYPE) ==
+	    AMD_IOMMU_EVENT_IOTLB_INVAL_TO);
+
+	deviceid = AMD_IOMMU_REG_GET32(&event[0],
+	    AMD_IOMMU_EVENT_IOTLB_INVAL_TO_DEVICEID);
+
+	/*
+	 * XXX bug in spec. Is the type field available +04 26:25 or is
+	 * it reserved
+	 */
+	type = AMD_IOMMU_REG_GET32(&event[1],
+	    AMD_IOMMU_EVENT_IOTLB_INVAL_TO_TYPE);
+	hwerr = get_hw_error(type);
+
+	physaddr_lo = AMD_IOMMU_REG_GET32(&event[2],
+	    AMD_IOMMU_EVENT_IOTLB_INVAL_TO_PHYS_LO);
+
+	physaddr_hi = event[3];
+
+	cmn_err(CE_WARN, "%s: %s%d: idx = %d. deviceid = %u "
+	    "IOTLB invalidation Timeout. "
+	    "HW error type = %s, invalidation command physical address = %p",
+	    f, driver, instance, iommu->aiomt_idx, deviceid,
+	    hwerr,
+	    (void *)(uintptr_t)(((uint64_t)physaddr_hi) << 32 | physaddr_lo));
+}
+
+static void
+device_illegal_req(amd_iommu_t *iommu, uint32_t *event)
+{
+	uint16_t deviceid;
+	uint8_t TR;
+	uint32_t addr_lo;
+	uint32_t addr_hi;
+	uint8_t type;
+	const char *reqerr;
+	const char *driver = ddi_driver_name(iommu->aiomt_dip);
+	int instance = ddi_get_instance(iommu->aiomt_dip);
+	const char *f = "device_illegal_req";
+
+	ASSERT(AMD_IOMMU_REG_GET32(&event[1], AMD_IOMMU_EVENT_TYPE) ==
+	    AMD_IOMMU_EVENT_DEVICE_ILLEGAL_REQ);
+
+	deviceid = AMD_IOMMU_REG_GET32(&event[0],
+	    AMD_IOMMU_EVENT_DEVICE_ILLEGAL_REQ_DEVICEID);
+
+	TR = AMD_IOMMU_REG_GET32(&event[1],
+	    AMD_IOMMU_EVENT_DEVICE_ILLEGAL_REQ_TR);
+
+	type = AMD_IOMMU_REG_GET32(&event[1],
+	    AMD_IOMMU_EVENT_DEVICE_ILLEGAL_REQ_TYPE);
+
+	reqerr = get_illegal_req(type, TR);
+
+
+	addr_lo = event[2];
+	addr_hi = event[3];
+
+	cmn_err(CE_WARN, "%s: %s%d: idx = %d. deviceid = %d "
+	    "Illegal Device Request. "
+	    "Illegal Request type = %s, %s request, address accessed = %p",
+	    f, driver, instance, iommu->aiomt_idx, deviceid,
+	    reqerr,
+	    TR == 1 ? "Translation" : "Transaction",
+	    (void *)(uintptr_t)(((uint64_t)addr_hi) << 32 | addr_lo));
+}
+
+static void
+amd_iommu_process_one_event(amd_iommu_t *iommu)
+{
+	uint32_t event[4];
+	amd_iommu_event_t event_type;
+	int i;
+	const char *driver = ddi_driver_name(iommu->aiomt_dip);
+	int instance = ddi_get_instance(iommu->aiomt_dip);
+	const char *f = "amd_iommu_process_one_event";
+
+	ASSERT(MUTEX_HELD(&iommu->aiomt_eventlock));
+
+	SYNC_FORKERN(iommu->aiomt_dmahdl);
+	for (i = 0; i < 4; i++) {
+		event[i] =  iommu->aiomt_event_head[i];
+	}
+
+	event_type = AMD_IOMMU_REG_GET32(&event[1], AMD_IOMMU_EVENT_TYPE);
+
+	switch (event_type) {
+	case AMD_IOMMU_EVENT_DEVTAB_ILLEGAL_ENTRY:
+		devtab_illegal_entry(iommu, event);
+		break;
+	case AMD_IOMMU_EVENT_IO_PAGE_FAULT:
+		io_page_fault(iommu, event);
+		break;
+	case AMD_IOMMU_EVENT_DEVTAB_HW_ERROR:
+		devtab_hw_error(iommu, event);
+		break;
+	case AMD_IOMMU_EVENT_PGTABLE_HW_ERROR:
+		pgtable_hw_error(iommu, event);
+		break;
+	case AMD_IOMMU_EVENT_CMDBUF_HW_ERROR:
+		cmdbuf_hw_error(iommu, event);
+		break;
+	case AMD_IOMMU_EVENT_CMDBUF_ILLEGAL_CMD:
+		cmdbuf_illegal_cmd(iommu, event);
+		break;
+	case AMD_IOMMU_EVENT_IOTLB_INVAL_TO:
+		iotlb_inval_to(iommu, event);
+		break;
+	case AMD_IOMMU_EVENT_DEVICE_ILLEGAL_REQ:
+		device_illegal_req(iommu, event);
+		break;
+	default:
+		cmn_err(CE_WARN, "%s: %s%d: idx = %d. Unknown event: %u",
+		    f, driver, instance, iommu->aiomt_idx, event_type);
+		break;
+	}
+}
+
+int
+amd_iommu_read_log(amd_iommu_t *iommu, amd_iommu_log_op_t op)
+{
+	caddr_t evtail;
+	uint64_t evtail_off;
+	uint64_t evhead_off;
+
+	ASSERT(op != AMD_IOMMU_LOG_INVALID_OP);
+
+	mutex_enter(&iommu->aiomt_eventlock);
+
+	ASSERT(iommu->aiomt_event_head != NULL);
+
+	/* XXX verify */
+	evtail_off = AMD_IOMMU_REG_GET64(
+	    REGADDR64(iommu->aiomt_reg_eventlog_tail_va),
+	    AMD_IOMMU_EVENTTAILPTR);
+
+	evtail_off = EV2OFF(evtail_off);
+
+	ASSERT(evtail_off <  iommu->aiomt_eventlog_sz);
+
+	evtail = iommu->aiomt_eventlog + evtail_off;
+
+	if (op == AMD_IOMMU_LOG_DISCARD) {
+		/*LINTED*/
+		iommu->aiomt_event_head = (uint32_t *)evtail;
+		AMD_IOMMU_REG_SET64(REGADDR64(
+		    iommu->aiomt_reg_eventlog_head_va),
+		    AMD_IOMMU_EVENTHEADPTR, OFF2EV(evtail_off));
+		cmn_err(CE_NOTE, "Discarded IOMMU event log");
+		mutex_exit(&iommu->aiomt_eventlock);
+		return (DDI_SUCCESS);
+	}
+
+	/*LINTED*/
+	while (1) {
+		if ((caddr_t)iommu->aiomt_event_head == evtail)
+			break;
+
+		cmn_err(CE_WARN, "evtail_off = %p, head = %p, tail = %p",
+		    (void *)(uintptr_t)evtail_off,
+		    (void *)iommu->aiomt_event_head,
+		    (void *)evtail);
+
+		amd_iommu_process_one_event(iommu);
+
+		/*
+		 * Update the head pointer in soft state
+		 * and the head pointer register
+		 */
+		iommu->aiomt_event_head += 4;
+		if ((caddr_t)iommu->aiomt_event_head >=
+		    iommu->aiomt_eventlog + iommu->aiomt_eventlog_sz) {
+			/* wraparound */
+			iommu->aiomt_event_head =
+			/*LINTED*/
+			    (uint32_t *)iommu->aiomt_eventlog;
+			evhead_off = 0;
+		} else {
+			evhead_off =  (caddr_t)iommu->aiomt_event_head
+			/*LINTED*/
+			    - iommu->aiomt_eventlog;
+		}
+
+		ASSERT(evhead_off < iommu->aiomt_eventlog_sz);
+
+		AMD_IOMMU_REG_SET64(REGADDR64(
+		    iommu->aiomt_reg_eventlog_head_va),
+		    AMD_IOMMU_EVENTHEADPTR, OFF2EV(evhead_off));
+	}
+	mutex_exit(&iommu->aiomt_eventlock);
+
+	return (DDI_SUCCESS);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/uts/i86pc/io/amd_iommu/amd_iommu_log.h	Mon Sep 14 21:48:21 2009 -0700
@@ -0,0 +1,116 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _AMD_IOMMU_LOG_H
+#define	_AMD_IOMMU_LOG_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <sys/amd_iommu.h>
+
+#ifdef _KERNEL
+
+#define	EV2OFF(e)	((e) << 4)
+#define	OFF2EV(o)	((o) >> 4)
+
+typedef enum {
+	AMD_IOMMU_EVENT_INVALID = 0,
+	AMD_IOMMU_EVENT_DEVTAB_ILLEGAL_ENTRY = 1,
+	AMD_IOMMU_EVENT_IO_PAGE_FAULT = 2,
+	AMD_IOMMU_EVENT_DEVTAB_HW_ERROR = 3,
+	AMD_IOMMU_EVENT_PGTABLE_HW_ERROR = 4,
+	AMD_IOMMU_EVENT_CMDBUF_ILLEGAL_CMD = 5,
+	AMD_IOMMU_EVENT_CMDBUF_HW_ERROR = 6,
+	AMD_IOMMU_EVENT_IOTLB_INVAL_TO = 7,
+	AMD_IOMMU_EVENT_DEVICE_ILLEGAL_REQ = 8
+} amd_iommu_event_t;
+
+/* Common to all events */
+#define	AMD_IOMMU_EVENT_TYPE			(31 << 16 | 28)
+
+/* Illegal device Table Entry Event bits */
+#define	AMD_IOMMU_EVENT_DEVTAB_ILL_DEVICEID	(15 << 16 | 0)
+#define	AMD_IOMMU_EVENT_DEVTAB_ILL_TR		(24 << 16 | 24)
+#define	AMD_IOMMU_EVENT_DEVTAB_ILL_RZ		(23 << 16 | 23)
+#define	AMD_IOMMU_EVENT_DEVTAB_ILL_RW		(21 << 16 | 21)
+#define	AMD_IOMMU_EVENT_DEVTAB_ILL_INTR		(19 << 16 | 19)
+#define	AMD_IOMMU_EVENT_DEVTAB_ILL_VADDR_LO	(31 << 16 | 2)
+
+/* IO Page Fault event bits */
+#define	AMD_IOMMU_EVENT_IO_PGFAULT_DEVICEID	(15 << 16 | 0)
+#define	AMD_IOMMU_EVENT_IO_PGFAULT_TR		(24 << 16 | 24)
+#define	AMD_IOMMU_EVENT_IO_PGFAULT_RZ		(23 << 16 | 23)
+#define	AMD_IOMMU_EVENT_IO_PGFAULT_PE		(22 << 16 | 22)
+#define	AMD_IOMMU_EVENT_IO_PGFAULT_RW		(21 << 16 | 21)
+#define	AMD_IOMMU_EVENT_IO_PGFAULT_PR		(20 << 16 | 20)
+#define	AMD_IOMMU_EVENT_IO_PGFAULT_INTR		(19 << 16 | 19)
+#define	AMD_IOMMU_EVENT_IO_PGFAULT_DOMAINID	(15 << 16 | 0)
+
+
+/* Device Table HW Error event bits */
+#define	AMD_IOMMU_EVENT_DEVTAB_HWERR_DEVICEID	(15 << 16 | 0)
+#define	AMD_IOMMU_EVENT_DEVTAB_HWERR_TYPE	(26 << 16 | 25)
+#define	AMD_IOMMU_EVENT_DEVTAB_HWERR_TR		(24 << 16 | 24)
+#define	AMD_IOMMU_EVENT_DEVTAB_HWERR_RW		(21 << 16 | 21)
+#define	AMD_IOMMU_EVENT_DEVTAB_HWERR_INTR	(19 << 16 | 19)
+#define	AMD_IOMMU_EVENT_DEVTAB_HWERR_PHYSADDR_LO	(31 << 16 | 4)
+
+
+/* Page Table HW Error event bits */
+#define	AMD_IOMMU_EVENT_PGTABLE_HWERR_DEVICEID	(15 << 16 | 0)
+#define	AMD_IOMMU_EVENT_DEVTAB_HWERR_TYPE	(26 << 16 | 25)
+#define	AMD_IOMMU_EVENT_PGTABLE_HWERR_TR	(24 << 16 | 24)
+#define	AMD_IOMMU_EVENT_PGTABLE_HWERR_RW	(21 << 16 | 21)
+#define	AMD_IOMMU_EVENT_PGTABLE_HWERR_INTR	(19 << 16 | 19)
+#define	AMD_IOMMU_EVENT_PGTABLE_HWERR_DOMAINID  (15 << 16 | 0)
+#define	AMD_IOMMU_EVENT_PGTABLE_HWERR_PHYSADDR_LO	(31 << 16 | 3)
+
+/* Illegal Command Error event bits */
+#define	AMD_IOMMU_EVENT_CMDBUF_ILLEGAL_CMD_PHYS_LO	(31 << 16 | 4)
+
+/* Command Buffer HW Error event bits */
+#define	AMD_IOMMU_EVENT_CMDBUF_HWERR_TYPE	(26 << 16 | 25)
+#define	AMD_IOMMU_EVENT_CMDBUF_HWERR_PHYS_LO	(31 << 16 | 4)
+
+
+/* IOTLB Invalidation TO event bits */
+#define	AMD_IOMMU_EVENT_IOTLB_INVAL_TO_DEVICEID	(15 << 16 | 0)
+#define	AMD_IOMMU_EVENT_IOTLB_INVAL_TO_TYPE	(26 << 16 | 25)
+#define	AMD_IOMMU_EVENT_IOTLB_INVAL_TO_PHYS_LO	(31 << 16 | 4)
+
+/* Illegal Device request event bits */
+#define	AMD_IOMMU_EVENT_DEVICE_ILLEGAL_REQ_DEVICEID	(15 << 16 | 0)
+#define	AMD_IOMMU_EVENT_DEVICE_ILLEGAL_REQ_TYPE		(27 << 16 | 25)
+#define	AMD_IOMMU_EVENT_DEVICE_ILLEGAL_REQ_TR		(24 << 16 | 24)
+
+#endif /* _KERNEL */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif	/* _AMD_IOMMU_LOG_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/uts/i86pc/io/amd_iommu/amd_iommu_page_tables.c	Mon Sep 14 21:48:21 2009 -0700
@@ -0,0 +1,1699 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#include <sys/sunddi.h>
+#include <sys/sunndi.h>
+#include <sys/acpi/acpi.h>
+#include <sys/acpica.h>
+#include <sys/amd_iommu.h>
+#include <sys/bootconf.h>
+#include <sys/sysmacros.h>
+#include <sys/ddidmareq.h>
+
+#include "amd_iommu_impl.h"
+#include "amd_iommu_acpi.h"
+#include "amd_iommu_page_tables.h"
+
+ddi_dma_attr_t amd_iommu_pgtable_dma_attr = {
+	DMA_ATTR_V0,
+	0U,				/* dma_attr_addr_lo */
+	0xffffffffffffffffULL,		/* dma_attr_addr_hi */
+	0xffffffffU,			/* dma_attr_count_max */
+	(uint64_t)4096,			/* dma_attr_align */
+	1,				/* dma_attr_burstsizes */
+	64,				/* dma_attr_minxfer */
+	0xffffffffU,			/* dma_attr_maxxfer */
+	0xffffffffU,			/* dma_attr_seg */
+	1,				/* dma_attr_sgllen, variable */
+	64,				/* dma_attr_granular */
+	0				/* dma_attr_flags */
+};
+
+static amd_iommu_domain_t **amd_iommu_domain_table;
+
+static struct {
+	int f_count;
+	amd_iommu_page_table_t *f_list;
+} amd_iommu_pgtable_freelist;
+int amd_iommu_no_pgtable_freelist;
+
+/*ARGSUSED*/
+static int
+amd_iommu_get_src_bdf(amd_iommu_t *iommu, int32_t bdf, int32_t *src_bdfp)
+{
+	amd_iommu_acpi_ivhd_t *hinfop;
+
+	hinfop = amd_iommu_lookup_ivhd(bdf);
+	if (hinfop == NULL || hinfop->ach_src_deviceid == -1)
+		*src_bdfp = bdf;
+	else
+		*src_bdfp = hinfop->ach_src_deviceid;
+
+	return (DDI_SUCCESS);
+}
+
+static dev_info_t *
+amd_iommu_pci_dip(dev_info_t *rdip, const char *path)
+{
+	dev_info_t *pdip;
+	const char *driver = ddi_driver_name(rdip);
+	int instance = ddi_get_instance(rdip);
+	const char *f = "amd_iommu_pci_dip";
+
+	/* Hold rdip so it and its parents don't go away */
+	ndi_hold_devi(rdip);
+
+	if (ddi_is_pci_dip(rdip))
+		return (rdip);
+
+	pdip = rdip;
+	while (pdip = ddi_get_parent(pdip)) {
+		if (ddi_is_pci_dip(pdip)) {
+			ndi_hold_devi(pdip);
+			ndi_rele_devi(rdip);
+			return (pdip);
+		}
+	}
+
+	cmn_err(CE_WARN, "%s: %s%d dip = %p has no PCI parent, path = %s",
+	    f, driver, instance, (void *)rdip, path);
+
+	ndi_rele_devi(rdip);
+
+	ASSERT(0);
+
+	return (NULL);
+}
+
+/*ARGSUSED*/
+static int
+amd_iommu_get_domain(amd_iommu_t *iommu, dev_info_t *rdip, int alias,
+    uint16_t deviceid, domain_id_t *domainid, const char *path)
+{
+	const char *f = "amd_iommu_get_domain";
+
+	*domainid = AMD_IOMMU_INVALID_DOMAIN;
+
+	ASSERT(strcmp(ddi_driver_name(rdip), "agpgart") != 0);
+
+	switch (deviceid) {
+		case AMD_IOMMU_INVALID_DOMAIN:
+		case AMD_IOMMU_IDENTITY_DOMAIN:
+		case AMD_IOMMU_PASSTHRU_DOMAIN:
+		case AMD_IOMMU_SYS_DOMAIN:
+			*domainid = AMD_IOMMU_SYS_DOMAIN;
+			break;
+		default:
+			*domainid = deviceid;
+			break;
+	}
+
+	if (amd_iommu_debug & AMD_IOMMU_DEBUG_DEVTBL) {
+		cmn_err(CE_NOTE, "%s: domainid for %s = %d",
+		    f, path, *domainid);
+	}
+
+	return (DDI_SUCCESS);
+}
+
+static uint16_t
+hash_domain(domain_id_t domainid)
+{
+	return (domainid % AMD_IOMMU_DOMAIN_HASH_SZ);
+}
+
+/*ARGSUSED*/
+void
+amd_iommu_init_page_tables(amd_iommu_t *iommu)
+{
+	amd_iommu_domain_table = kmem_zalloc(
+	    sizeof (amd_iommu_domain_t *) * AMD_IOMMU_DOMAIN_HASH_SZ, KM_SLEEP);
+}
+
+/*ARGSUSED*/
+void
+amd_iommu_fini_page_tables(amd_iommu_t *iommu)
+{
+	if (amd_iommu_domain_table) {
+		kmem_free(amd_iommu_domain_table,
+		    sizeof (amd_iommu_domain_t *) * AMD_IOMMU_DOMAIN_HASH_SZ);
+		amd_iommu_domain_table = NULL;
+	}
+}
+
+static amd_iommu_domain_t *
+amd_iommu_lookup_domain(amd_iommu_t *iommu, domain_id_t domainid,
+    map_type_t type, int km_flags)
+{
+	uint16_t idx;
+	amd_iommu_domain_t *dp;
+	char name[AMD_IOMMU_VMEM_NAMELEN+1];
+
+	ASSERT(amd_iommu_domain_table);
+
+	idx = hash_domain(domainid);
+
+	for (dp = amd_iommu_domain_table[idx]; dp; dp = dp->d_next) {
+		if (dp->d_domainid == domainid)
+			return (dp);
+	}
+
+	ASSERT(type != AMD_IOMMU_INVALID_MAP);
+
+	dp = kmem_zalloc(sizeof (*dp), km_flags);
+	if (dp == NULL)
+		return (NULL);
+	dp->d_domainid = domainid;
+	dp->d_pgtable_root_4K = 0;	/* make this explicit */
+
+	if (type == AMD_IOMMU_VMEM_MAP) {
+		uint64_t base;
+		uint64_t size;
+		(void) snprintf(name, sizeof (name), "dvma_idx%d_domain%d",
+		    iommu->aiomt_idx, domainid);
+		base = MMU_PAGESIZE;
+		size = AMD_IOMMU_SIZE_4G - MMU_PAGESIZE;
+		dp->d_vmem = vmem_create(name, (void *)(uintptr_t)base, size,
+		    MMU_PAGESIZE, NULL, NULL, NULL, 0,
+		    km_flags == KM_SLEEP ? VM_SLEEP : VM_NOSLEEP);
+		if (dp->d_vmem == NULL) {
+			kmem_free(dp, sizeof (*dp));
+			return (NULL);
+		}
+	} else {
+		dp->d_vmem = NULL;
+	}
+
+	dp->d_next = amd_iommu_domain_table[idx];
+	dp->d_prev = NULL;
+	amd_iommu_domain_table[idx] = dp;
+	if (dp->d_next)
+		dp->d_next->d_prev = dp;
+	dp->d_ref = 0;
+
+
+	return (dp);
+}
+
+static void
+amd_iommu_teardown_domain(amd_iommu_t *iommu, amd_iommu_domain_t *dp)
+{
+	uint16_t idx;
+	int flags;
+	amd_iommu_cmdargs_t cmdargs = {0};
+	domain_id_t domainid = dp->d_domainid;
+	const char *f = "amd_iommu_teardown_domain";
+
+	ASSERT(dp->d_ref == 0);
+
+	idx = hash_domain(dp->d_domainid);
+
+	if (dp->d_prev == NULL)
+		amd_iommu_domain_table[idx] = dp->d_next;
+	else
+		dp->d_prev->d_next = dp->d_next;
+
+	if (dp->d_next)
+		dp->d_next->d_prev = dp->d_prev;
+
+	if (dp->d_vmem != NULL) {
+		vmem_destroy(dp->d_vmem);
+		dp->d_vmem = NULL;
+	}
+
+	kmem_free(dp, sizeof (*dp));
+
+	cmdargs.ca_domainid = (uint16_t)domainid;
+	cmdargs.ca_addr = (uintptr_t)0x7FFFFFFFFFFFF000;
+	flags = AMD_IOMMU_CMD_FLAGS_PAGE_PDE_INVAL |
+	    AMD_IOMMU_CMD_FLAGS_PAGE_INVAL_S;
+
+	if (amd_iommu_cmd(iommu, AMD_IOMMU_CMD_INVAL_IOMMU_PAGES,
+	    &cmdargs, flags, 0) != DDI_SUCCESS) {
+		cmn_err(CE_WARN, "%s: idx=%d: domainid=%d"
+		    "Failed to invalidate domain in IOMMU HW cache",
+		    f, iommu->aiomt_idx, cmdargs.ca_domainid);
+	}
+}
+
+static int
+amd_iommu_get_deviceid(amd_iommu_t *iommu, dev_info_t *rdip, int32_t *deviceid,
+    int *aliasp, const char *path)
+{
+	int bus = -1;
+	int device = -1;
+	int func = -1;
+	uint16_t bdf;
+	int32_t src_bdf;
+	dev_info_t *idip = iommu->aiomt_dip;
+	const char *driver = ddi_driver_name(idip);
+	int instance = ddi_get_instance(idip);
+	dev_info_t *pci_dip;
+	const char *f = "amd_iommu_get_deviceid";
+
+	/* be conservative. Always assume an alias */
+	*aliasp = 1;
+	*deviceid = 0;
+
+	/* Check for special special devices (rdip == NULL) */
+	if (rdip == NULL) {
+		if (amd_iommu_get_src_bdf(iommu, -1, &src_bdf) != DDI_SUCCESS) {
+			cmn_err(CE_WARN,
+			    "%s: %s%d: idx=%d, failed to get SRC BDF "
+			    "for special-device",
+			    f, driver, instance, iommu->aiomt_idx);
+			return (DDI_DMA_NOMAPPING);
+		}
+		*deviceid = src_bdf;
+		*aliasp = 1;
+		return (DDI_SUCCESS);
+	}
+
+	if (amd_iommu_debug & AMD_IOMMU_DEBUG_DEVTBL) {
+		cmn_err(CE_NOTE, "%s: attempting to get deviceid for %s",
+		    f, path);
+	}
+
+	pci_dip = amd_iommu_pci_dip(rdip, path);
+	if (pci_dip == NULL) {
+		cmn_err(CE_WARN, "%s: %s%d: idx = %d, failed to get PCI dip "
+		    "for rdip=%p, path = %s",
+		    f, driver, instance, iommu->aiomt_idx, (void *)rdip,
+		    path);
+		return (DDI_DMA_NOMAPPING);
+	}
+
+	if (acpica_get_bdf(pci_dip, &bus, &device, &func) != DDI_SUCCESS) {
+		ndi_rele_devi(pci_dip);
+		cmn_err(CE_WARN, "%s: %s%d: idx=%d, failed to get BDF for "
+		    "PCI dip (%p). rdip path = %s",
+		    f, driver, instance, iommu->aiomt_idx,
+		    (void *)pci_dip, path);
+		return (DDI_DMA_NOMAPPING);
+	}
+
+	ndi_rele_devi(pci_dip);
+
+	if (bus > UINT8_MAX || bus < 0 ||
+	    device > UINT8_MAX || device < 0 ||
+	    func > UINT8_MAX || func < 0) {
+		cmn_err(CE_WARN, "%s: %s%d:  idx=%d, invalid BDF(%d,%d,%d) "
+		    "for PCI dip (%p). rdip path = %s", f, driver, instance,
+		    iommu->aiomt_idx,
+		    bus, device, func,
+		    (void *)pci_dip, path);
+		return (DDI_DMA_NOMAPPING);
+	}
+
+	bdf = ((uint8_t)bus << 8) | ((uint8_t)device << 3) | (uint8_t)func;
+
+	if (amd_iommu_get_src_bdf(iommu, bdf, &src_bdf) != DDI_SUCCESS) {
+		cmn_err(CE_WARN, "%s: %s%d: idx=%d, failed to get SRC BDF "
+		    "for PCI dip (%p) rdip path = %s.",
+		    f, driver, instance, iommu->aiomt_idx, (void *)pci_dip,
+		    path);
+		return (DDI_DMA_NOMAPPING);
+	}
+
+	if (amd_iommu_debug & AMD_IOMMU_DEBUG_DEVTBL) {
+		cmn_err(CE_NOTE, "%s: Deviceid = %u for path = %s",
+		    f, src_bdf, path);
+	}
+
+	*deviceid = src_bdf;
+	*aliasp = (src_bdf != bdf);
+
+	return (DDI_SUCCESS);
+}
+
+/*ARGSUSED*/
+static int
+init_devtbl(amd_iommu_t *iommu, uint64_t *devtbl_entry, domain_id_t domainid,
+    amd_iommu_domain_t *dp)
+{
+	uint64_t entry[4] = {0};
+	int i;
+
+	/* If already passthru, don't touch */
+	if (AMD_IOMMU_REG_GET64(&(devtbl_entry[0]), AMD_IOMMU_DEVTBL_V) == 0 &&
+	    AMD_IOMMU_REG_GET64(&(devtbl_entry[0]), AMD_IOMMU_DEVTBL_TV) == 0) {
+		return (0);
+	}
+
+	if (AMD_IOMMU_REG_GET64(&(devtbl_entry[0]), AMD_IOMMU_DEVTBL_V) == 1 &&
+	    AMD_IOMMU_REG_GET64(&(devtbl_entry[0]), AMD_IOMMU_DEVTBL_TV) == 1) {
+
+		ASSERT(dp->d_pgtable_root_4K ==
+		    AMD_IOMMU_REG_GET64(&(devtbl_entry[0]),
+		    AMD_IOMMU_DEVTBL_ROOT_PGTBL));
+
+		ASSERT(dp->d_domainid == AMD_IOMMU_REG_GET64(&(devtbl_entry[1]),
+		    AMD_IOMMU_DEVTBL_DOMAINID));
+
+		return (0);
+	}
+
+	/* New devtbl entry for this domain. Bump up the domain ref-count */
+	dp->d_ref++;
+
+	entry[3] = 0;
+	entry[2] = 0;
+	AMD_IOMMU_REG_SET64(&(entry[1]), AMD_IOMMU_DEVTBL_SYSMGT, 1);
+	AMD_IOMMU_REG_SET64(&(entry[1]), AMD_IOMMU_DEVTBL_EX, 1);
+	AMD_IOMMU_REG_SET64(&(entry[1]), AMD_IOMMU_DEVTBL_SD, 0);
+	AMD_IOMMU_REG_SET64(&(entry[1]), AMD_IOMMU_DEVTBL_CACHE, 0);
+	AMD_IOMMU_REG_SET64(&(entry[1]), AMD_IOMMU_DEVTBL_IOCTL, 1);
+	AMD_IOMMU_REG_SET64(&(entry[1]), AMD_IOMMU_DEVTBL_SA, 0);
+	AMD_IOMMU_REG_SET64(&(entry[1]), AMD_IOMMU_DEVTBL_SE, 1);
+	AMD_IOMMU_REG_SET64(&(entry[1]), AMD_IOMMU_DEVTBL_IOTLB, 1);
+	AMD_IOMMU_REG_SET64(&(entry[1]), AMD_IOMMU_DEVTBL_DOMAINID,
+	    (uint16_t)domainid);
+	AMD_IOMMU_REG_SET64(&(entry[0]), AMD_IOMMU_DEVTBL_IW, 1);
+	AMD_IOMMU_REG_SET64(&(entry[0]), AMD_IOMMU_DEVTBL_IR, 1);
+	AMD_IOMMU_REG_SET64(&(entry[0]), AMD_IOMMU_DEVTBL_ROOT_PGTBL,
+	    dp->d_pgtable_root_4K);
+	AMD_IOMMU_REG_SET64(&(entry[0]), AMD_IOMMU_DEVTBL_PG_MODE,
+	    AMD_IOMMU_PGTABLE_MAXLEVEL);
+	AMD_IOMMU_REG_SET64(&(entry[0]), AMD_IOMMU_DEVTBL_TV,
+	    domainid == AMD_IOMMU_PASSTHRU_DOMAIN ? 0 : 1);
+	AMD_IOMMU_REG_SET64(&(entry[0]), AMD_IOMMU_DEVTBL_V,
+	    domainid == AMD_IOMMU_PASSTHRU_DOMAIN ? 0 : 1);
+
+	for (i = 1; i < 4; i++) {
+		devtbl_entry[i] = entry[i];
+	}
+	devtbl_entry[0] = entry[0];
+
+	/* we did an actual init */
+	return (1);
+}
+
+void
+amd_iommu_set_passthru(amd_iommu_t *iommu, dev_info_t *rdip)
+{
+	int32_t deviceid;
+	int alias;
+	uint64_t *devtbl_entry;
+	amd_iommu_cmdargs_t cmdargs = {0};
+	char *path;
+	int pathfree;
+	int V;
+	int TV;
+	int instance;
+	const char *driver;
+	const char *f = "amd_iommu_set_passthru";
+
+	if (rdip) {
+		driver = ddi_driver_name(rdip);
+		instance = ddi_get_instance(rdip);
+	} else {
+		driver = "special-device";
+		instance = 0;
+	}
+
+	path = kmem_alloc(MAXPATHLEN, KM_NOSLEEP);
+	if (path) {
+		if (rdip)
+			(void) ddi_pathname(rdip, path);
+		else
+			(void) strcpy(path, "special-device");
+		pathfree = 1;
+	} else {
+		pathfree = 0;
+		path = "<path-mem-alloc-failed>";
+	}
+
+	if (amd_iommu_get_deviceid(iommu, rdip, &deviceid, &alias, path)
+	    != DDI_SUCCESS) {
+		cmn_err(CE_WARN, "%s: %s%d: idx=%d: rdip=%p. "
+		    "Failed to get device ID for device %s.", f, driver,
+		    instance,
+		    iommu->aiomt_idx, (void *)rdip, path);
+		goto out;
+	}
+
+	/* No deviceid */
+	if (deviceid == -1) {
+		goto out;
+	}
+
+	if ((deviceid + 1) * AMD_IOMMU_DEVTBL_ENTRY_SZ >
+	    iommu->aiomt_devtbl_sz) {
+		cmn_err(CE_WARN, "%s: %s%d: IOMMU idx=%d, deviceid (%u) "
+		    "for rdip (%p) exceeds device table size (%u), path=%s",
+		    f, driver,
+		    instance, iommu->aiomt_idx, deviceid, (void *)rdip,
+		    iommu->aiomt_devtbl_sz, path);
+		goto out;
+	}
+
+	/*LINTED*/
+	devtbl_entry = (uint64_t *)&iommu->aiomt_devtbl
+	    [deviceid * AMD_IOMMU_DEVTBL_ENTRY_SZ];
+
+	V = AMD_IOMMU_REG_GET64(&(devtbl_entry[0]), AMD_IOMMU_DEVTBL_V);
+	TV = AMD_IOMMU_REG_GET64(&(devtbl_entry[0]), AMD_IOMMU_DEVTBL_TV);
+
+	/* Already passthru */
+	if (V == 0 && TV == 0) {
+		goto out;
+	}
+
+	/* Existing translations */
+	if (V == 1 && TV == 1) {
+		goto out;
+	}
+
+	/* Invalid setting */
+	if (V == 0 && TV == 1) {
+		goto out;
+	}
+
+	AMD_IOMMU_REG_SET64(&(devtbl_entry[0]), AMD_IOMMU_DEVTBL_V, 0);
+
+	cmdargs.ca_deviceid = (uint16_t)deviceid;
+	(void) amd_iommu_cmd(iommu, AMD_IOMMU_CMD_INVAL_DEVTAB_ENTRY,
+	    &cmdargs, 0, 0);
+
+out:
+	if (pathfree)
+		kmem_free(path, MAXPATHLEN);
+}
+
+static int
+amd_iommu_set_devtbl_entry(amd_iommu_t *iommu, dev_info_t *rdip,
+    domain_id_t domainid, uint16_t deviceid, amd_iommu_domain_t *dp,
+    const char *path)
+{
+	uint64_t *devtbl_entry;
+	amd_iommu_cmdargs_t cmdargs = {0};
+	int error;
+	dev_info_t *idip = iommu->aiomt_dip;
+	const char *driver = ddi_driver_name(idip);
+	int instance = ddi_get_instance(idip);
+	const char *f = "amd_iommu_set_devtbl_entry";
+
+	if (amd_iommu_debug & AMD_IOMMU_DEBUG_DEVTBL) {
+		cmn_err(CE_WARN, "%s: attempting to set devtbl entry for %s",
+		    f, path);
+	}
+
+	if ((deviceid + 1) * AMD_IOMMU_DEVTBL_ENTRY_SZ >
+	    iommu->aiomt_devtbl_sz) {
+		cmn_err(CE_WARN, "%s: %s%d: IOMMU idx=%d, deviceid (%u) "
+		    "for rdip (%p) exceeds device table size (%u), path=%s",
+		    f, driver,
+		    instance, iommu->aiomt_idx, deviceid, (void *)rdip,
+		    iommu->aiomt_devtbl_sz, path);
+		return (DDI_DMA_NOMAPPING);
+	}
+
+	/*LINTED*/
+	devtbl_entry = (uint64_t *)&iommu->aiomt_devtbl
+	    [deviceid * AMD_IOMMU_DEVTBL_ENTRY_SZ];
+
+	if (amd_iommu_debug & AMD_IOMMU_DEBUG_DEVTBL) {
+		cmn_err(CE_WARN, "%s: deviceid=%u devtbl entry (%p) for %s",
+		    f, deviceid, (void *)(uintptr_t)(*devtbl_entry), path);
+	}
+
+	if (init_devtbl(iommu, devtbl_entry, domainid, dp)) {
+		cmdargs.ca_deviceid = deviceid;
+		error = amd_iommu_cmd(iommu, AMD_IOMMU_CMD_INVAL_DEVTAB_ENTRY,
+		    &cmdargs, 0, 0);
+	}
+
+	return (error);
+}
+
+int
+amd_iommu_clear_devtbl_entry(amd_iommu_t *iommu, dev_info_t *rdip,
+    domain_id_t domainid, uint16_t deviceid, amd_iommu_domain_t *dp,
+    int *domain_freed, char *path)
+{
+	uint64_t *devtbl_entry;
+	int error = DDI_SUCCESS;
+	amd_iommu_cmdargs_t cmdargs = {0};
+	const char *driver = ddi_driver_name(iommu->aiomt_dip);
+	int instance = ddi_get_instance(iommu->aiomt_dip);
+	const char *f = "amd_iommu_clear_devtbl_entry";
+
+	if (amd_iommu_debug & AMD_IOMMU_DEBUG_DEVTBL) {
+		cmn_err(CE_NOTE, "%s: attempting to clear devtbl entry for "
+		    "domainid = %d, deviceid = %u, path = %s",
+		    f, domainid, deviceid, path);
+	}
+
+	if ((deviceid + 1) * AMD_IOMMU_DEVTBL_ENTRY_SZ >
+	    iommu->aiomt_devtbl_sz) {
+		cmn_err(CE_WARN, "%s: %s%d: IOMMU idx=%d, deviceid (%u) "
+		    "for rdip (%p) exceeds device table size (%u), path = %s",
+		    f, driver, instance,
+		    iommu->aiomt_idx, deviceid, (void *)rdip,
+		    iommu->aiomt_devtbl_sz, path);
+		return (DDI_FAILURE);
+	}
+
+	/*LINTED*/
+	devtbl_entry = (uint64_t *)&iommu->aiomt_devtbl
+	    [deviceid * AMD_IOMMU_DEVTBL_ENTRY_SZ];
+
+	if (amd_iommu_debug & AMD_IOMMU_DEBUG_DEVTBL) {
+		cmn_err(CE_WARN, "%s: deviceid=%u devtbl entry (%p) for %s",
+		    f, deviceid, (void *)(uintptr_t)(*devtbl_entry), path);
+	}
+
+	if (AMD_IOMMU_REG_GET64(&(devtbl_entry[0]), AMD_IOMMU_DEVTBL_TV) == 0) {
+		/* Nothing to do */
+		return (DDI_SUCCESS);
+	}
+
+	ASSERT(dp->d_pgtable_root_4K == AMD_IOMMU_REG_GET64(&(devtbl_entry[0]),
+	    AMD_IOMMU_DEVTBL_ROOT_PGTBL));
+
+	ASSERT(domainid == AMD_IOMMU_REG_GET64(&(devtbl_entry[1]),
+	    AMD_IOMMU_DEVTBL_DOMAINID));
+
+	AMD_IOMMU_REG_SET64(&(devtbl_entry[0]), AMD_IOMMU_DEVTBL_TV, 0);
+	AMD_IOMMU_REG_SET64(&(devtbl_entry[0]), AMD_IOMMU_DEVTBL_ROOT_PGTBL, 0);
+	AMD_IOMMU_REG_SET64(&(devtbl_entry[0]), AMD_IOMMU_DEVTBL_V, 1);
+
+	SYNC_FORDEV(iommu->aiomt_dmahdl);
+
+	dp->d_ref--;
+	ASSERT(dp->d_ref >= 0);
+
+	if (dp->d_ref == 0) {
+		*domain_freed = 1;
+	}
+
+	cmdargs.ca_deviceid = deviceid;
+	error = amd_iommu_cmd(iommu, AMD_IOMMU_CMD_INVAL_DEVTAB_ENTRY,
+	    &cmdargs, 0, 0);
+	if (error != DDI_SUCCESS)
+		error = DDI_FAILURE;
+
+	return (error);
+}
+
+int
+amd_iommu_page_table_hash_init(amd_iommu_page_table_hash_t *ampt)
+{
+	ampt->ampt_hash = kmem_zalloc(sizeof (amd_iommu_page_table_t *) *
+	    AMD_IOMMU_PGTABLE_HASH_SZ, KM_SLEEP);
+	return (DDI_SUCCESS);
+}
+
+void
+amd_iommu_page_table_hash_fini(amd_iommu_page_table_hash_t *ampt)
+{
+	kmem_free(ampt->ampt_hash,
+	    sizeof (amd_iommu_page_table_t *) * AMD_IOMMU_PGTABLE_HASH_SZ);
+	ampt->ampt_hash = NULL;
+}
+
+static uint32_t
+pt_hashfn(uint64_t pa_4K)
+{
+	return (pa_4K % AMD_IOMMU_PGTABLE_HASH_SZ);
+}
+
+static void
+amd_iommu_insert_pgtable_hash(amd_iommu_page_table_t *pt)
+{
+	uint64_t pa_4K = ((uint64_t)pt->pt_cookie.dmac_cookie_addr) >> 12;
+	uint32_t idx = pt_hashfn(pa_4K);
+
+	ASSERT((pt->pt_cookie.dmac_cookie_addr & AMD_IOMMU_PGTABLE_ALIGN) == 0);
+
+	mutex_enter(&amd_iommu_page_table_hash.ampt_lock);
+
+	pt->pt_next = amd_iommu_page_table_hash.ampt_hash[idx];
+	pt->pt_prev = NULL;
+	amd_iommu_page_table_hash.ampt_hash[idx] = pt;
+	if (pt->pt_next)
+		pt->pt_next->pt_prev = pt;
+
+	mutex_exit(&amd_iommu_page_table_hash.ampt_lock);
+}
+
+static void
+amd_iommu_remove_pgtable_hash(amd_iommu_page_table_t *pt)
+{
+	uint64_t pa_4K = (pt->pt_cookie.dmac_cookie_addr >> 12);
+	uint32_t idx = pt_hashfn(pa_4K);
+
+	ASSERT((pt->pt_cookie.dmac_cookie_addr & AMD_IOMMU_PGTABLE_ALIGN) == 0);
+
+	mutex_enter(&amd_iommu_page_table_hash.ampt_lock);
+
+	if (pt->pt_next)
+		pt->pt_next->pt_prev = pt->pt_prev;
+
+	if (pt->pt_prev)
+		pt->pt_prev->pt_next = pt->pt_next;
+	else
+		amd_iommu_page_table_hash.ampt_hash[idx] = pt->pt_next;
+
+	pt->pt_next = NULL;
+	pt->pt_prev = NULL;
+
+	mutex_exit(&amd_iommu_page_table_hash.ampt_lock);
+}
+
+static amd_iommu_page_table_t *
+amd_iommu_lookup_pgtable_hash(domain_id_t domainid, uint64_t pgtable_pa_4K)
+{
+	amd_iommu_page_table_t *pt;
+	uint32_t idx = pt_hashfn(pgtable_pa_4K);
+
+	mutex_enter(&amd_iommu_page_table_hash.ampt_lock);
+	pt = amd_iommu_page_table_hash.ampt_hash[idx];
+	for (; pt; pt = pt->pt_next) {
+		if (domainid != pt->pt_domainid)
+			continue;
+		ASSERT((pt->pt_cookie.dmac_cookie_addr &
+		    AMD_IOMMU_PGTABLE_ALIGN) == 0);
+		if ((pt->pt_cookie.dmac_cookie_addr >> 12) == pgtable_pa_4K) {
+			break;
+		}
+	}
+	mutex_exit(&amd_iommu_page_table_hash.ampt_lock);
+
+	return (pt);
+}
+
+/*ARGSUSED*/
+static amd_iommu_page_table_t *
+amd_iommu_lookup_pgtable(amd_iommu_t *iommu, amd_iommu_page_table_t *ppt,
+    amd_iommu_domain_t *dp, int level, uint16_t index)
+{
+	uint64_t *pdtep;
+	uint64_t pgtable_pa_4K;
+
+	ASSERT(level > 0 && level <= AMD_IOMMU_PGTABLE_MAXLEVEL);
+	ASSERT(dp);
+
+	if (level == AMD_IOMMU_PGTABLE_MAXLEVEL) {
+		ASSERT(ppt == NULL);
+		ASSERT(index == 0);
+		pgtable_pa_4K = dp->d_pgtable_root_4K;
+	} else {
+		ASSERT(ppt);
+		pdtep = &(ppt->pt_pgtblva[index]);
+		if (AMD_IOMMU_REG_GET64(pdtep, AMD_IOMMU_PTDE_PR) == 0) {
+			if (amd_iommu_debug & AMD_IOMMU_DEBUG_PAGE_TABLES) {
+				cmn_err(CE_NOTE, "Skipping PR=0 pdte: 0x%"
+				    PRIx64, *pdtep);
+			}
+			return (NULL);
+		}
+		pgtable_pa_4K = AMD_IOMMU_REG_GET64(pdtep, AMD_IOMMU_PTDE_ADDR);
+	}
+
+	return (amd_iommu_lookup_pgtable_hash(dp->d_domainid, pgtable_pa_4K));
+}
+
+static amd_iommu_page_table_t *
+amd_iommu_alloc_from_freelist(void)
+{
+	int i;
+	uint64_t *pte_array;
+	amd_iommu_page_table_t *pt;
+
+	if (amd_iommu_no_pgtable_freelist == 1)
+		return (NULL);
+
+	if (amd_iommu_pgtable_freelist.f_count == 0)
+		return (NULL);
+
+	pt = amd_iommu_pgtable_freelist.f_list;
+	amd_iommu_pgtable_freelist.f_list = pt->pt_next;
+	amd_iommu_pgtable_freelist.f_count--;
+
+	pte_array = pt->pt_pgtblva;
+	for (i = 0; i < AMD_IOMMU_PGTABLE_SZ / (sizeof (*pte_array)); i++) {
+		ASSERT(pt->pt_pte_ref[i] == 0);
+		ASSERT(AMD_IOMMU_REG_GET64(&(pte_array[i]),
+		    AMD_IOMMU_PTDE_PR)  == 0);
+	}
+
+	return (pt);
+}
+
+static int
+amd_iommu_alloc_pgtable(amd_iommu_t *iommu, domain_id_t domainid,
+    const char *path, amd_iommu_page_table_t **ptp, int km_flags)
+{
+	int err;
+	uint_t ncookies;
+	amd_iommu_page_table_t *pt;
+	dev_info_t *idip = iommu->aiomt_dip;
+	const char *driver = ddi_driver_name(idip);
+	int instance = ddi_get_instance(idip);
+	const char *f = "amd_iommu_alloc_pgtable";
+
+	*ptp = NULL;
+
+	pt = amd_iommu_alloc_from_freelist();
+	if (pt)
+		goto init_pgtable;
+
+	pt = kmem_zalloc(sizeof (amd_iommu_page_table_t), km_flags);
+	if (pt == NULL)
+		return (DDI_DMA_NORESOURCES);
+
+	/*
+	 * Each page table is 4K in size
+	 */
+	pt->pt_mem_reqsz = AMD_IOMMU_PGTABLE_SZ;
+
+	/*
+	 * Alloc a DMA handle. Use the IOMMU dip as we want this DMA
+	 * to *not* enter the IOMMU - no recursive entrance.
+	 */
+	err = ddi_dma_alloc_handle(idip, &amd_iommu_pgtable_dma_attr,
+	    km_flags == KM_SLEEP ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT,
+	    NULL, &pt->pt_dma_hdl);
+	if (err != DDI_SUCCESS) {
+		cmn_err(CE_WARN, "%s: %s%d: domainid = %d, path = %s. "
+		    "Cannot alloc DMA handle for IO Page Table",
+		    f, driver, instance, domainid, path);
+		kmem_free(pt, sizeof (amd_iommu_page_table_t));
+		return (err == DDI_DMA_NORESOURCES ? err : DDI_DMA_NOMAPPING);
+	}
+
+	/*
+	 * Alloc memory for IO Page Table.
+	 * XXX remove size_t cast kludge
+	 */
+	err = ddi_dma_mem_alloc(pt->pt_dma_hdl, pt->pt_mem_reqsz,
+	    &amd_iommu_devacc, DDI_DMA_CONSISTENT|IOMEM_DATA_UNCACHED,
+	    km_flags == KM_SLEEP ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT,
+	    NULL, (caddr_t *)&pt->pt_pgtblva,
+	    (size_t *)&pt->pt_mem_realsz, &pt->pt_mem_hdl);
+	if (err != DDI_SUCCESS) {
+		cmn_err(CE_WARN, "%s: %s%d: domainid=%d, path = %s. "
+		    "Cannot allocate DMA memory for IO Page table",
+		    f, driver, instance, domainid, path);
+		ddi_dma_free_handle(&pt->pt_dma_hdl);
+		kmem_free(pt, sizeof (amd_iommu_page_table_t));
+		return (DDI_DMA_NORESOURCES);
+	}
+
+	/*
+	 * The Page table DMA VA must be 4K aligned and
+	 * size >= than requested memory.
+	 *
+	 */
+	ASSERT(((uint64_t)(uintptr_t)pt->pt_pgtblva & AMD_IOMMU_PGTABLE_ALIGN)
+	    == 0);
+	ASSERT(pt->pt_mem_realsz >= pt->pt_mem_reqsz);
+
+	/*
+	 * Now bind the handle
+	 */
+	err = ddi_dma_addr_bind_handle(pt->pt_dma_hdl, NULL,
+	    (caddr_t)pt->pt_pgtblva, pt->pt_mem_realsz,
+	    DDI_DMA_READ | DDI_DMA_CONSISTENT,
+	    km_flags == KM_SLEEP ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT,
+	    NULL, &pt->pt_cookie, &ncookies);
+	if (err != DDI_DMA_MAPPED) {
+		cmn_err(CE_WARN, "%s: %s%d: domainid=%d, path = %s. "
+		    "Cannot bind memory for DMA to IO Page Tables. "
+		    "bufrealsz=%p",
+		    f, driver, instance, domainid, path,
+		    (void *)(uintptr_t)pt->pt_mem_realsz);
+		ddi_dma_mem_free(&pt->pt_mem_hdl);
+		ddi_dma_free_handle(&pt->pt_dma_hdl);
+		kmem_free(pt, sizeof (amd_iommu_page_table_t));
+		return (err == DDI_DMA_PARTIAL_MAP ? DDI_DMA_NOMAPPING :
+		    err);
+	}
+
+	/*
+	 * We assume the DMA engine on the IOMMU is capable of handling the
+	 * whole page table in a single cookie. If not and multiple cookies
+	 * are needed we fail.
+	 */
+	if (ncookies != 1) {
+		cmn_err(CE_WARN, "%s: %s%d: domainid = %d, path=%s "
+		    "Cannot handle multiple "
+		    "cookies for DMA to IO page Table, #cookies=%u",
+		    f, driver, instance, domainid, path, ncookies);
+		(void) ddi_dma_unbind_handle(pt->pt_dma_hdl);
+		ddi_dma_mem_free(&pt->pt_mem_hdl);
+		ddi_dma_free_handle(&pt->pt_dma_hdl);
+		kmem_free(pt, sizeof (amd_iommu_page_table_t));
+		return (DDI_DMA_NOMAPPING);
+	}
+
+init_pgtable:
+	/*
+	 * The address in the cookie must be 4K aligned and >= table size
+	 */
+	ASSERT(pt->pt_cookie.dmac_cookie_addr != NULL);
+	ASSERT((pt->pt_cookie.dmac_cookie_addr & AMD_IOMMU_PGTABLE_ALIGN) == 0);
+	ASSERT(pt->pt_cookie.dmac_size >= pt->pt_mem_realsz);
+	ASSERT(pt->pt_cookie.dmac_size >= pt->pt_mem_reqsz);
+	ASSERT(pt->pt_mem_reqsz >= AMD_IOMMU_PGTABLE_SIZE);
+	ASSERT(pt->pt_mem_realsz >= pt->pt_mem_reqsz);
+	ASSERT(pt->pt_pgtblva);
+
+	pt->pt_domainid = AMD_IOMMU_INVALID_DOMAIN;
+	pt->pt_level = 0x7;
+	pt->pt_index = 0;
+	pt->pt_ref = 0;
+	pt->pt_next = NULL;
+	pt->pt_prev = NULL;
+	pt->pt_parent = NULL;
+
+	bzero(pt->pt_pgtblva, pt->pt_mem_realsz);
+	SYNC_FORDEV(pt->pt_dma_hdl);
+
+	amd_iommu_insert_pgtable_hash(pt);
+
+	*ptp = pt;
+
+	return (DDI_SUCCESS);
+}
+
+static int
+amd_iommu_move_to_freelist(amd_iommu_page_table_t *pt)
+{
+	if (amd_iommu_no_pgtable_freelist == 1)
+		return (DDI_FAILURE);
+
+	if (amd_iommu_pgtable_freelist.f_count ==
+	    AMD_IOMMU_PGTABLE_FREELIST_MAX)
+		return (DDI_FAILURE);
+
+	pt->pt_next = amd_iommu_pgtable_freelist.f_list;
+	amd_iommu_pgtable_freelist.f_list = pt;
+	amd_iommu_pgtable_freelist.f_count++;
+
+	return (DDI_SUCCESS);
+}
+
+static void
+amd_iommu_free_pgtable(amd_iommu_t *iommu, amd_iommu_page_table_t *pt)
+{
+	int i;
+	uint64_t *pte_array;
+	dev_info_t *dip = iommu->aiomt_dip;
+	int instance = ddi_get_instance(dip);
+	const char *driver = ddi_driver_name(dip);
+	const char *f = "amd_iommu_free_pgtable";
+
+	ASSERT(pt->pt_ref == 0);
+
+	amd_iommu_remove_pgtable_hash(pt);
+
+	pte_array = pt->pt_pgtblva;
+	for (i = 0; i < AMD_IOMMU_PGTABLE_SZ / (sizeof (*pte_array)); i++) {
+		ASSERT(pt->pt_pte_ref[i] == 0);
+		ASSERT(AMD_IOMMU_REG_GET64(&(pte_array[i]),
+		    AMD_IOMMU_PTDE_PR)  == 0);
+	}
+
+	if (amd_iommu_move_to_freelist(pt) == DDI_SUCCESS)
+		return;
+
+	/* Unbind the handle */
+	if (ddi_dma_unbind_handle(pt->pt_dma_hdl) != DDI_SUCCESS) {
+		cmn_err(CE_WARN, "%s: %s%d: idx=%d, domainid=%d. "
+		    "Failed to unbind handle: %p for IOMMU Page Table",
+		    f, driver, instance, iommu->aiomt_idx, pt->pt_domainid,
+		    (void *)pt->pt_dma_hdl);
+	}
+	/* Free the table memory allocated for DMA */
+	ddi_dma_mem_free(&pt->pt_mem_hdl);
+
+	/* Free the DMA handle */
+	ddi_dma_free_handle(&pt->pt_dma_hdl);
+
+	kmem_free(pt, sizeof (amd_iommu_page_table_t));
+
+}
+
+static int
+init_pde(amd_iommu_page_table_t *ppt, amd_iommu_page_table_t *pt)
+{
+	uint64_t *pdep = &(ppt->pt_pgtblva[pt->pt_index]);
+	uint64_t next_pgtable_pa_4K = (pt->pt_cookie.dmac_cookie_addr) >> 12;
+
+	/* nothing to set. PDE is already set */
+	if (AMD_IOMMU_REG_GET64(pdep, AMD_IOMMU_PTDE_PR) == 1) {
+		ASSERT(PT_REF_VALID(ppt));
+		ASSERT(PT_REF_VALID(pt));
+		ASSERT(ppt->pt_pte_ref[pt->pt_index] == 0);
+		ASSERT(AMD_IOMMU_REG_GET64(pdep, AMD_IOMMU_PTDE_ADDR)
+		    == next_pgtable_pa_4K);
+		return (DDI_SUCCESS);
+	}
+
+	ppt->pt_ref++;
+	ASSERT(PT_REF_VALID(ppt));
+
+	/* Page Directories are always RW */
+	AMD_IOMMU_REG_SET64(pdep, AMD_IOMMU_PTDE_IW, 1);
+	AMD_IOMMU_REG_SET64(pdep, AMD_IOMMU_PTDE_IR, 1);
+	AMD_IOMMU_REG_SET64(pdep, AMD_IOMMU_PTDE_ADDR,
+	    next_pgtable_pa_4K);
+	pt->pt_parent = ppt;
+	AMD_IOMMU_REG_SET64(pdep, AMD_IOMMU_PTDE_NXT_LVL,
+	    pt->pt_level);
+	ppt->pt_pte_ref[pt->pt_index] = 0;
+	AMD_IOMMU_REG_SET64(pdep, AMD_IOMMU_PTDE_PR, 1);
+	SYNC_FORDEV(ppt->pt_dma_hdl);
+	ASSERT(AMD_IOMMU_REG_GET64(pdep, AMD_IOMMU_PTDE_PR) == 1);
+
+	return (DDI_SUCCESS);
+}
+
+static int
+init_pte(amd_iommu_page_table_t *pt, uint64_t pa, uint16_t index,
+    struct ddi_dma_req *dmareq)
+{
+	uint64_t *ptep = &(pt->pt_pgtblva[index]);
+	uint64_t pa_4K = pa >> 12;
+	int R;
+	int W;
+
+	/* nothing to set if PTE is already set */
+	if (AMD_IOMMU_REG_GET64(ptep, AMD_IOMMU_PTDE_PR) == 1) {
+		/*
+		 * Adjust current permissions
+		 * DDI_DMA_WRITE means direction of DMA is MEM -> I/O
+		 * so that requires Memory READ permissions i.e. sense
+		 * is inverted.
+		 * Note: either or both of DD_DMA_READ/WRITE may be set
+		 */
+		if (amd_iommu_no_RW_perms == 0) {
+			R = AMD_IOMMU_REG_GET64(ptep, AMD_IOMMU_PTDE_IR);
+			W = AMD_IOMMU_REG_GET64(ptep, AMD_IOMMU_PTDE_IW);
+			if (R == 0 && ((dmareq->dmar_flags & DDI_DMA_WRITE) ||
+			    (dmareq->dmar_flags & DDI_DMA_RDWR))) {
+				AMD_IOMMU_REG_SET64(ptep, AMD_IOMMU_PTDE_IR, 1);
+			}
+			if (W  == 0 && ((dmareq->dmar_flags & DDI_DMA_READ) ||
+			    (dmareq->dmar_flags & DDI_DMA_RDWR))) {
+				AMD_IOMMU_REG_SET64(ptep, AMD_IOMMU_PTDE_IW, 1);
+			}
+		}
+		ASSERT(PT_REF_VALID(pt));
+		pt->pt_pte_ref[index]++;
+		ASSERT(AMD_IOMMU_REG_GET64(ptep, AMD_IOMMU_PTDE_ADDR)
+		    == pa_4K);
+		return (DDI_SUCCESS);
+	}
+
+	pt->pt_ref++;
+	ASSERT(PT_REF_VALID(pt));
+
+	/* see comment above about inverting sense of RD/WR */
+	if (amd_iommu_no_RW_perms == 0) {
+		AMD_IOMMU_REG_SET64(ptep, AMD_IOMMU_PTDE_IR, 0);
+		AMD_IOMMU_REG_SET64(ptep, AMD_IOMMU_PTDE_IW, 0);
+		if (dmareq->dmar_flags & DDI_DMA_RDWR) {
+			AMD_IOMMU_REG_SET64(ptep, AMD_IOMMU_PTDE_IW, 1);
+			AMD_IOMMU_REG_SET64(ptep, AMD_IOMMU_PTDE_IR, 1);
+		} else {
+			if (dmareq->dmar_flags & DDI_DMA_WRITE) {
+				AMD_IOMMU_REG_SET64(ptep, AMD_IOMMU_PTDE_IR, 1);
+			}
+			if (dmareq->dmar_flags & DDI_DMA_READ) {
+				AMD_IOMMU_REG_SET64(ptep, AMD_IOMMU_PTDE_IW, 1);
+			}
+		}
+	} else {
+		AMD_IOMMU_REG_SET64(ptep, AMD_IOMMU_PTDE_IR, 1);
+		AMD_IOMMU_REG_SET64(ptep, AMD_IOMMU_PTDE_IW, 1);
+	}
+
+	/* TODO what is correct for FC and U */
+	AMD_IOMMU_REG_SET64(ptep, AMD_IOMMU_PTE_FC, 0);
+	AMD_IOMMU_REG_SET64(ptep, AMD_IOMMU_PTE_U, 0);
+	AMD_IOMMU_REG_SET64(ptep, AMD_IOMMU_PTDE_ADDR, pa_4K);
+	AMD_IOMMU_REG_SET64(ptep, AMD_IOMMU_PTDE_NXT_LVL, 0);
+	ASSERT(pt->pt_pte_ref[index] == 0);
+	pt->pt_pte_ref[index] = 1;
+	AMD_IOMMU_REG_SET64(ptep, AMD_IOMMU_PTDE_PR, 1);
+	SYNC_FORDEV(pt->pt_dma_hdl);
+	ASSERT(AMD_IOMMU_REG_GET64(ptep, AMD_IOMMU_PTDE_PR) == 1);
+
+	return (DDI_SUCCESS);
+}
+
+
+static void
+init_pt(amd_iommu_page_table_t *pt, amd_iommu_domain_t *dp,
+    int level, uint16_t index)
+{
+	ASSERT(dp);
+
+	if (level == AMD_IOMMU_PGTABLE_MAXLEVEL) {
+		dp->d_pgtable_root_4K = (pt->pt_cookie.dmac_cookie_addr) >> 12;
+	} else {
+		ASSERT(level >= 1 && level < AMD_IOMMU_PGTABLE_MAXLEVEL);
+	}
+
+	pt->pt_domainid = dp->d_domainid;
+	pt->pt_level = level;
+	pt->pt_index = index;
+}
+
+static int
+amd_iommu_setup_1_pgtable(amd_iommu_t *iommu, dev_info_t *rdip,
+    struct ddi_dma_req *dmareq,
+    domain_id_t domainid, amd_iommu_domain_t *dp,
+    amd_iommu_page_table_t *ppt,
+    uint16_t index, int level, uint64_t va, uint64_t pa,
+    amd_iommu_page_table_t **ptp,  uint16_t *next_idxp, const char *path,
+    int km_flags)
+{
+	int error;
+	amd_iommu_page_table_t *pt;
+	const char *driver = ddi_driver_name(rdip);
+	int instance = ddi_get_instance(rdip);
+	const char *f = "amd_iommu_setup_1_pgtable";
+
+	*ptp = NULL;
+	*next_idxp = 0;
+	error = DDI_SUCCESS;
+
+	ASSERT(level > 0 && level <= AMD_IOMMU_PGTABLE_MAXLEVEL);
+
+	ASSERT(dp);
+	if (level == AMD_IOMMU_PGTABLE_MAXLEVEL) {
+		ASSERT(ppt == NULL);
+		ASSERT(index == 0);
+	} else {
+		ASSERT(ppt);
+	}
+
+	/* Check if page table is already allocated */
+	if (pt = amd_iommu_lookup_pgtable(iommu, ppt, dp, level, index)) {
+		ASSERT(pt->pt_domainid == domainid);
+		ASSERT(pt->pt_level == level);
+		ASSERT(pt->pt_index == index);
+		goto out;
+	}
+
+	if ((error = amd_iommu_alloc_pgtable(iommu, domainid, path, &pt,
+	    km_flags)) != DDI_SUCCESS) {
+		cmn_err(CE_WARN, "%s: %s%d: idx = %u, domainid = %d, va = %p "
+		    "path = %s", f, driver, instance, iommu->aiomt_idx,
+		    domainid, (void *)(uintptr_t)va, path);
+		return (error);
+	}
+
+	ASSERT(dp->d_domainid == domainid);
+
+	init_pt(pt, dp, level, index);
+
+out:
+	if (level != AMD_IOMMU_PGTABLE_MAXLEVEL) {
+		error = init_pde(ppt, pt);
+	}
+
+	if (level == 1) {
+		ASSERT(error == DDI_SUCCESS);
+		error = init_pte(pt, pa, AMD_IOMMU_VA_BITS(va, level), dmareq);
+	} else {
+		*next_idxp = AMD_IOMMU_VA_BITS(va, level);
+		*ptp = pt;
+	}
+
+	return (error);
+}
+
+typedef enum {
+	PDTE_NOT_TORN = 0x1,
+	PDTE_TORN_DOWN = 0x2,
+	PGTABLE_TORN_DOWN = 0x4
+} pdte_tear_t;
+
+static pdte_tear_t
+amd_iommu_teardown_pdte(amd_iommu_t *iommu,
+    amd_iommu_page_table_t *pt, int index)
+{
+	uint8_t next_level;
+	pdte_tear_t retval;
+	uint64_t *ptdep = &(pt->pt_pgtblva[index]);
+
+	next_level = AMD_IOMMU_REG_GET64(ptdep,
+	    AMD_IOMMU_PTDE_NXT_LVL);
+
+	if (AMD_IOMMU_REG_GET64(ptdep, AMD_IOMMU_PTDE_PR) == 1) {
+		if (pt->pt_level == 1) {
+			ASSERT(next_level == 0);
+			/* PTE */
+			pt->pt_pte_ref[index]--;
+			if (pt->pt_pte_ref[index] != 0) {
+				return (PDTE_NOT_TORN);
+			}
+		} else {
+			ASSERT(next_level != 0 && next_level != 7);
+		}
+		ASSERT(pt->pt_pte_ref[index] == 0);
+		ASSERT(PT_REF_VALID(pt));
+
+		AMD_IOMMU_REG_SET64(ptdep, AMD_IOMMU_PTDE_PR, 0);
+		SYNC_FORDEV(pt->pt_dma_hdl);
+		ASSERT(AMD_IOMMU_REG_GET64(ptdep,
+		    AMD_IOMMU_PTDE_PR) == 0);
+		pt->pt_ref--;
+		ASSERT(PT_REF_VALID(pt));
+		retval = PDTE_TORN_DOWN;
+	} else {
+		ASSERT(0);
+		ASSERT(pt->pt_pte_ref[index] == 0);
+		ASSERT(PT_REF_VALID(pt));
+		retval = PDTE_NOT_TORN;
+	}
+
+	if (pt->pt_ref == 0) {
+		amd_iommu_free_pgtable(iommu, pt);
+		return (PGTABLE_TORN_DOWN);
+	}
+
+	return (retval);
+}
+
+static int
+amd_iommu_create_pgtables(amd_iommu_t *iommu, dev_info_t *rdip,
+    struct ddi_dma_req *dmareq, uint64_t va,
+    uint64_t pa, uint16_t deviceid, domain_id_t domainid,
+    amd_iommu_domain_t *dp, const char *path, int km_flags)
+{
+	int level;
+	uint16_t index;
+	uint16_t next_idx;
+	amd_iommu_page_table_t *pt;
+	amd_iommu_page_table_t *ppt;
+	int error;
+	const char *driver = ddi_driver_name(rdip);
+	int instance = ddi_get_instance(rdip);
+	const char *f = "amd_iommu_create_pgtables";
+
+	if (amd_iommu_debug & AMD_IOMMU_DEBUG_PAGE_TABLES) {
+		cmn_err(CE_NOTE, "%s: %s%d: idx = %u, domainid = %d, "
+		    "deviceid = %u, va = %p, pa = %p, path = %s",
+		    f, driver, instance,
+		    iommu->aiomt_idx, domainid, deviceid,
+		    (void *)(uintptr_t)va,
+		    (void *)(uintptr_t)pa, path);
+	}
+
+	if (domainid == AMD_IOMMU_PASSTHRU_DOMAIN) {
+		/* No need for pagetables. Just set up device table entry */
+		goto passthru;
+	}
+
+	index = 0;
+	ppt = NULL;
+	for (level = AMD_IOMMU_PGTABLE_MAXLEVEL; level > 0;
+	    level--, pt = NULL, next_idx = 0) {
+		if ((error = amd_iommu_setup_1_pgtable(iommu, rdip, dmareq,
+		    domainid, dp, ppt, index, level, va, pa, &pt,
+		    &next_idx, path, km_flags)) != DDI_SUCCESS) {
+			cmn_err(CE_WARN, "%s: %s%d: idx=%d: domainid=%d, "
+			    "deviceid=%u, va= %p, pa = %p, Failed to setup "
+			    "page table(s) at level = %d, path = %s.",
+			    f, driver, instance, iommu->aiomt_idx,
+			    domainid, deviceid, (void *)(uintptr_t)va,
+			    (void *)(uintptr_t)pa, level, path);
+			return (error);
+		}
+
+		if (level > 1) {
+			ASSERT(pt);
+			ASSERT(pt->pt_domainid == domainid);
+			ppt = pt;
+			index = next_idx;
+		} else {
+			ASSERT(level == 1);
+			ASSERT(pt == NULL);
+			ASSERT(next_idx == 0);
+			ppt = NULL;
+			index = 0;
+		}
+	}
+
+passthru:
+	if ((error = amd_iommu_set_devtbl_entry(iommu, rdip, domainid, deviceid,
+	    dp, path)) != DDI_SUCCESS) {
+		cmn_err(CE_WARN, "%s: %s%d: idx=%d: rdip=%p, deviceid=%u, "
+		    "domainid=%d."
+		    "Failed to set device table entry for path %s.",
+		    f, driver, instance,
+		    iommu->aiomt_idx, (void *)rdip, deviceid, domainid, path);
+		return (error);
+	}
+
+	SYNC_FORDEV(iommu->aiomt_dmahdl);
+
+	return (DDI_SUCCESS);
+}
+
+static int
+amd_iommu_destroy_pgtables(amd_iommu_t *iommu, dev_info_t *rdip,
+    uint64_t pageva, uint16_t deviceid, domain_id_t domainid,
+    amd_iommu_domain_t *dp, map_type_t type, int *domain_freed, char *path)
+{
+	int level;
+	int flags;
+	amd_iommu_cmdargs_t cmdargs = {0};
+	uint16_t index;
+	uint16_t prev_index;
+	amd_iommu_page_table_t *pt;
+	amd_iommu_page_table_t *ppt;
+	pdte_tear_t retval;
+	int tear_level;
+	int invalidate_pte;
+	int invalidate_pde;
+	int error = DDI_FAILURE;
+	const char *driver = ddi_driver_name(iommu->aiomt_dip);
+	int instance = ddi_get_instance(iommu->aiomt_dip);
+	const char *f = "amd_iommu_destroy_pgtables";
+
+	if (amd_iommu_debug & AMD_IOMMU_DEBUG_PAGE_TABLES) {
+		cmn_err(CE_NOTE, "%s: %s%d: idx = %u, domainid = %d, "
+		    "deviceid = %u, va = %p, path = %s",
+		    f, driver, instance,
+		    iommu->aiomt_idx, domainid, deviceid,
+		    (void *)(uintptr_t)pageva, path);
+	}
+
+	if (domainid == AMD_IOMMU_PASSTHRU_DOMAIN) {
+		/*
+		 * there are no pagetables for the passthru domain.
+		 * Just the device table entry
+		 */
+		error = DDI_SUCCESS;
+		goto passthru;
+	}
+
+	ppt = NULL;
+	index = 0;
+	for (level = AMD_IOMMU_PGTABLE_MAXLEVEL; level > 0; level--) {
+		pt = amd_iommu_lookup_pgtable(iommu, ppt, dp, level, index);
+		if (pt) {
+			ppt = pt;
+			index = AMD_IOMMU_VA_BITS(pageva, level);
+			continue;
+		}
+		break;
+	}
+
+	if (level == 0) {
+		uint64_t *ptep;
+		uint64_t pa_4K;
+
+		ASSERT(pt);
+		ASSERT(pt == ppt);
+		ASSERT(pt->pt_domainid == dp->d_domainid);
+
+		ptep = &(pt->pt_pgtblva[index]);
+
+		pa_4K = AMD_IOMMU_REG_GET64(ptep, AMD_IOMMU_PTDE_ADDR);
+		if (amd_iommu_unity_map || type == AMD_IOMMU_UNITY_MAP) {
+			ASSERT(pageva == (pa_4K << MMU_PAGESHIFT));
+		}
+	}
+
+	tear_level = -1;
+	invalidate_pde = 0;
+	invalidate_pte = 0;
+	for (++level; level <= AMD_IOMMU_PGTABLE_MAXLEVEL; level++) {
+		prev_index = pt->pt_index;
+		ppt = pt->pt_parent;
+		retval = amd_iommu_teardown_pdte(iommu, pt, index);
+		switch (retval) {
+			case PDTE_NOT_TORN:
+				goto invalidate;
+			case PDTE_TORN_DOWN:
+				invalidate_pte = 1;
+				goto invalidate;
+			case PGTABLE_TORN_DOWN:
+				invalidate_pte = 1;
+				invalidate_pde = 1;
+				tear_level = level;
+				break;
+		}
+		index = prev_index;
+		pt = ppt;
+	}
+
+invalidate:
+	/*
+	 * Now teardown the IOMMU HW caches if applicable
+	 */
+	if (invalidate_pte) {
+		cmdargs.ca_domainid = (uint16_t)domainid;
+		if (amd_iommu_pageva_inval_all) {
+			cmdargs.ca_addr = (uintptr_t)0x7FFFFFFFFFFFF000;
+			flags = AMD_IOMMU_CMD_FLAGS_PAGE_PDE_INVAL |
+			    AMD_IOMMU_CMD_FLAGS_PAGE_INVAL_S;
+		} else if (invalidate_pde) {
+			cmdargs.ca_addr =
+			    (uintptr_t)AMD_IOMMU_VA_INVAL(pageva, tear_level);
+			flags = AMD_IOMMU_CMD_FLAGS_PAGE_PDE_INVAL |
+			    AMD_IOMMU_CMD_FLAGS_PAGE_INVAL_S;
+		} else {
+			cmdargs.ca_addr = (uintptr_t)pageva;
+			flags = 0;
+		}
+		if (amd_iommu_cmd(iommu, AMD_IOMMU_CMD_INVAL_IOMMU_PAGES,
+		    &cmdargs, flags, 0) != DDI_SUCCESS) {
+			cmn_err(CE_WARN, "%s: %s%d: idx=%d: domainid=%d, "
+			    "rdip=%p. Failed to invalidate IOMMU HW cache "
+			    "for %s", f, driver, instance,
+			    iommu->aiomt_idx, domainid, (void *)rdip, path);
+			error = DDI_FAILURE;
+			goto out;
+		}
+	}
+
+passthru:
+	if (tear_level ==  AMD_IOMMU_PGTABLE_MAXLEVEL) {
+		error = amd_iommu_clear_devtbl_entry(iommu, rdip, domainid,
+		    deviceid, dp, domain_freed, path);
+	} else {
+		error = DDI_SUCCESS;
+	}
+
+out:
+	SYNC_FORDEV(iommu->aiomt_dmahdl);
+
+	return (error);
+}
+
+static int
+cvt_bind_error(int error)
+{
+	switch (error) {
+	case DDI_DMA_MAPPED:
+	case DDI_DMA_PARTIAL_MAP:
+	case DDI_DMA_NORESOURCES:
+	case DDI_DMA_NOMAPPING:
+		break;
+	default:
+		cmn_err(CE_PANIC, "Unsupported error code: %d", error);
+		/*NOTREACHED*/
+	}
+	return (error);
+}
+
+int
+amd_iommu_map_pa2va(amd_iommu_t *iommu, dev_info_t *rdip, ddi_dma_attr_t *attrp,
+    struct ddi_dma_req *dmareq, uint64_t start_pa, uint64_t pa_sz,
+    map_type_t type, uint64_t *start_vap, int km_flags)
+{
+	pfn_t pfn_start;
+	pfn_t pfn_end;
+	pfn_t pfn;
+	int alias;
+	int32_t deviceid;
+	domain_id_t domainid;
+	amd_iommu_domain_t *dp;
+	uint64_t end_pa;
+	uint64_t start_va;
+	uint64_t end_va;
+	uint64_t pg_start;
+	uint64_t pg_end;
+	uint64_t pg;
+	uint64_t va_sz;
+	char *path;
+	int error = DDI_DMA_NOMAPPING;
+	const char *driver = ddi_driver_name(iommu->aiomt_dip);
+	int instance = ddi_get_instance(iommu->aiomt_dip);
+	const char *f = "amd_iommu_map_pa2va";
+
+	ASSERT(pa_sz != 0);
+
+	*start_vap = 0;
+
+	ASSERT(rdip);
+
+	path = kmem_alloc(MAXPATHLEN, km_flags);
+	if (path == NULL) {
+		error = DDI_DMA_NORESOURCES;
+		goto out;
+	}
+	(void) ddi_pathname(rdip, path);
+
+	/*
+	 * First get deviceid
+	 */
+	if (amd_iommu_get_deviceid(iommu, rdip, &deviceid, &alias, path)
+	    != DDI_SUCCESS) {
+		cmn_err(CE_WARN, "%s: %s%d: idx=%d: rdip=%p. "
+		    "Failed to get device ID for %s.", f, driver, instance,
+		    iommu->aiomt_idx, (void *)rdip, path);
+		error = DDI_DMA_NOMAPPING;
+		goto out;
+	}
+
+	/*
+	 * Next get the domain for this rdip
+	 */
+	if (amd_iommu_get_domain(iommu, rdip, alias, deviceid, &domainid, path)
+	    != DDI_SUCCESS) {
+		cmn_err(CE_WARN, "%s: %s%d: idx=%d: rdip=%p, path=%s. "
+		    "Failed to get domain.", f, driver, instance,
+		    iommu->aiomt_idx, (void *)rdip, path);
+		error = DDI_DMA_NOMAPPING;
+		goto out;
+	}
+
+	dp = amd_iommu_lookup_domain(iommu, domainid, type, km_flags);
+	if (dp == NULL) {
+		cmn_err(CE_WARN, "%s: %s%d: idx=%d: domainid=%d, rdip=%p. "
+		    "Failed to get device ID for %s.", f, driver, instance,
+		    iommu->aiomt_idx, domainid, (void *)rdip, path);
+		error = DDI_DMA_NORESOURCES;
+		goto out;
+	}
+
+	ASSERT(dp->d_domainid == domainid);
+
+	pfn_start = start_pa >> MMU_PAGESHIFT;
+
+	if (amd_iommu_debug & AMD_IOMMU_DEBUG_PAGE_TABLES) {
+		cmn_err(CE_NOTE, "pa = %p, pfn_new = %p, pfn_start = %p, "
+		    "pgshift = %d",
+		    (void *)(uintptr_t)start_pa,
+		    (void *)(uintptr_t)(start_pa >> MMU_PAGESHIFT),
+		    (void *)(uintptr_t)pfn_start, MMU_PAGESHIFT);
+	}
+
+	end_pa = start_pa + pa_sz - 1;
+	pfn_end = end_pa >> MMU_PAGESHIFT;
+
+	if (amd_iommu_unity_map || type == AMD_IOMMU_UNITY_MAP) {
+		start_va = start_pa;
+		end_va = end_pa;
+		va_sz = pa_sz;
+		*start_vap = start_va;
+	} else {
+		va_sz = mmu_ptob(pfn_end - pfn_start + 1);
+		start_va = (uintptr_t)vmem_xalloc(dp->d_vmem, va_sz,
+		    MAX(attrp->dma_attr_align, MMU_PAGESIZE),
+		    0,
+		    attrp->dma_attr_seg + 1,
+		    (void *)(uintptr_t)attrp->dma_attr_addr_lo,
+		    (void *)(uintptr_t)MIN((attrp->dma_attr_addr_hi + 1),
+		    AMD_IOMMU_SIZE_4G),	/* XXX rollover */
+		    km_flags == KM_SLEEP ? VM_SLEEP : VM_NOSLEEP);
+		if (start_va == 0) {
+			cmn_err(CE_WARN, "%s: No VA resources",
+			    amd_iommu_modname);
+			error = DDI_DMA_NORESOURCES;
+			goto out;
+		}
+		ASSERT((start_va & MMU_PAGEOFFSET) == 0);
+		end_va = start_va + va_sz - 1;
+		*start_vap = start_va + (start_pa & MMU_PAGEOFFSET);
+	}
+
+	pg_start = start_va >> MMU_PAGESHIFT;
+	pg_end = end_va >> MMU_PAGESHIFT;
+
+	pg = pg_start;
+	for (pfn = pfn_start; pfn <= pfn_end; pfn++, pg++) {
+
+		if (amd_iommu_debug & AMD_IOMMU_DEBUG_PAGE_TABLES) {
+			cmn_err(CE_WARN, "%s: attempting to create page tables "
+			    "for pfn = %p, va = %p, path = %s",
+			    f, (void *)(uintptr_t)(pfn << MMU_PAGESHIFT),
+			    (void *)(uintptr_t)(pg << MMU_PAGESHIFT), path);
+
+		}
+
+		if (amd_iommu_unity_map || type == AMD_IOMMU_UNITY_MAP) {
+			ASSERT(pfn == pg);
+		}
+
+		if ((error = amd_iommu_create_pgtables(iommu, rdip, dmareq,
+		    pg << MMU_PAGESHIFT,
+		    pfn << MMU_PAGESHIFT, deviceid, domainid, dp, path,
+		    km_flags)) != DDI_SUCCESS) {
+			cmn_err(CE_WARN, "Failed to create_pgtables");
+			goto out;
+		}
+
+		if (amd_iommu_debug & AMD_IOMMU_DEBUG_PAGE_TABLES) {
+			cmn_err(CE_WARN, "%s: successfuly created page tables "
+			    "for pfn = %p, vapg = %p, path = %s",
+			    f, (void *)(uintptr_t)pfn,
+			    (void *)(uintptr_t)pg, path);
+		}
+
+	}
+	ASSERT(pg == pg_end + 1);
+
+
+	if (amd_iommu_debug & AMD_IOMMU_DEBUG_PA2VA) {
+		cmn_err(CE_NOTE, "pa=%p, va=%p",
+		    (void *)(uintptr_t)start_pa,
+		    (void *)(uintptr_t)(*start_vap));
+	}
+	error = DDI_DMA_MAPPED;
+
+out:
+	kmem_free(path, MAXPATHLEN);
+	return (cvt_bind_error(error));
+}
+
+int
+amd_iommu_unmap_va(amd_iommu_t *iommu, dev_info_t *rdip, uint64_t start_va,
+    uint64_t va_sz, map_type_t type)
+{
+	uint64_t end_va;
+	uint64_t pg_start;
+	uint64_t pg_end;
+	uint64_t pg;
+	uint64_t actual_sz;
+	char *path;
+	int pathfree;
+	int alias;
+	int32_t deviceid;
+	domain_id_t domainid;
+	amd_iommu_domain_t *dp;
+	int error;
+	int domain_freed;
+	const char *driver = ddi_driver_name(iommu->aiomt_dip);
+	int instance = ddi_get_instance(iommu->aiomt_dip);
+	const char *f = "amd_iommu_unmap_va";
+
+	if (amd_iommu_no_unmap)
+		return (DDI_SUCCESS);
+
+	path = kmem_alloc(MAXPATHLEN, KM_NOSLEEP);
+	if (path) {
+		(void) ddi_pathname(rdip, path);
+		pathfree = 1;
+	} else {
+		pathfree = 0;
+		path = "<path-mem-alloc-failed>";
+	}
+
+	/*
+	 * First get deviceid
+	 */
+	if (amd_iommu_get_deviceid(iommu, rdip, &deviceid, &alias, path)
+	    != DDI_SUCCESS) {
+		cmn_err(CE_WARN, "%s: %s%d: idx=%d: rdip=%p. "
+		    "Failed to get device ID for %s.", f, driver, instance,
+		    iommu->aiomt_idx, (void *)rdip, path);
+		error = DDI_FAILURE;
+		goto out;
+	}
+
+	/*
+	 * Next get the domain for this rdip
+	 */
+	if (amd_iommu_get_domain(iommu, rdip, alias, deviceid, &domainid, path)
+	    != DDI_SUCCESS) {
+		cmn_err(CE_WARN, "%s: %s%d: idx=%d: rdip=%p, path=%s. "
+		    "Failed to get domain.", f, driver, instance,
+		    iommu->aiomt_idx, (void *)rdip, path);
+		error = DDI_FAILURE;
+		goto out;
+	}
+
+	/* should never result in domain allocation/vmem_create */
+	dp = amd_iommu_lookup_domain(iommu, domainid, AMD_IOMMU_INVALID_MAP,
+	    KM_NOSLEEP);
+	if (dp == NULL) {
+		cmn_err(CE_WARN, "%s: %s%d: idx=%d: domainid=%d, rdip=%p. "
+		    "Failed to get device ID for %s.", f, driver, instance,
+		    iommu->aiomt_idx, domainid, (void *)rdip, path);
+		error = DDI_FAILURE;
+		goto out;
+	}
+
+	ASSERT(dp->d_domainid == domainid);
+
+	pg_start = start_va >> MMU_PAGESHIFT;
+	end_va = start_va + va_sz - 1;
+	pg_end = end_va >> MMU_PAGESHIFT;
+	actual_sz = (pg_end - pg_start + 1) << MMU_PAGESHIFT;
+
+	domain_freed = 0;
+	for (pg = pg_start; pg <= pg_end; pg++) {
+		domain_freed = 0;
+		if (amd_iommu_destroy_pgtables(iommu, rdip,
+		    pg << MMU_PAGESHIFT, deviceid, domainid, dp, type,
+		    &domain_freed, path) != DDI_SUCCESS) {
+			error = DDI_FAILURE;
+			goto out;
+		}
+		if (domain_freed) {
+			ASSERT(pg == pg_end);
+			break;
+		}
+	}
+
+	/*
+	 * vmem_xalloc() must be paired with vmem_xfree
+	 */
+	if (type == AMD_IOMMU_VMEM_MAP && !amd_iommu_unity_map) {
+		vmem_xfree(dp->d_vmem,
+		    (void *)(uintptr_t)(pg_start << MMU_PAGESHIFT), actual_sz);
+	}
+
+	if (domain_freed)
+		amd_iommu_teardown_domain(iommu, dp);
+
+	error = DDI_SUCCESS;
+out:
+	if (pathfree)
+		kmem_free(path, MAXPATHLEN);
+	return (error);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/uts/i86pc/io/amd_iommu/amd_iommu_page_tables.h	Mon Sep 14 21:48:21 2009 -0700
@@ -0,0 +1,134 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _AMD_IOMMU_PAGE_TABLES_H
+#define	_AMD_IOMMU_PAGE_TABLES_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef _KERNEL
+
+/* Common to PTEs and PDEs */
+#define	AMD_IOMMU_PTDE_IW		(62 << 16 | 62)
+#define	AMD_IOMMU_PTDE_IR		(61 << 16 | 61)
+#define	AMD_IOMMU_PTDE_ADDR		(51 << 16 | 12)
+#define	AMD_IOMMU_PTDE_NXT_LVL		(11 << 16 | 9)
+#define	AMD_IOMMU_PTDE_PR		(0 << 16 | 0)
+
+#define	AMD_IOMMU_PTE_FC		(60 << 16 | 60)
+#define	AMD_IOMMU_PTE_U			(59 << 16 | 59)
+
+#define	AMD_IOMMU_VA_NBITS(l)		((l) == 6 ? 7 : 9)
+#define	AMD_IOMMU_VA_BITMASK(l)		((1 << AMD_IOMMU_VA_NBITS(l)) - 1)
+#define	AMD_IOMMU_VA_SHIFT(v, l)	\
+	((v) >> (MMU_PAGESHIFT + (AMD_IOMMU_VA_NBITS(l - 1) * (l - 1))))
+#define	AMD_IOMMU_VA_BITS(v, l)		\
+	(AMD_IOMMU_VA_SHIFT(v, l) & AMD_IOMMU_VA_BITMASK(l))
+#define	AMD_IOMMU_VA_TOTBITS(l)		\
+	(((l) == 6 ? 7 + (l - 1) * 9: l*9) + MMU_PAGESHIFT)
+#define	AMD_IOMMU_VA_TOTMASK(l)		((1 << AMD_IOMMU_VA_TOTBITS(l)) - 1)
+#define	AMD_IOMMU_VA_INVAL_SETMASK(l)	\
+	(((1 << AMD_IOMMU_VA_TOTBITS(l)) - 1) >> 1)
+#define	AMD_IOMMU_VA_INVAL_CLRMASK(l)	\
+	(~(1 << (AMD_IOMMU_VA_TOTBITS(l) - 1)))
+#define	AMD_IOMMU_VA_INVAL(v, l)	\
+	(((v) & AMD_IOMMU_VA_INVAL_CLRMASK(l)) | AMD_IOMMU_VA_INVAL_SETMASK(l))
+
+#define	AMD_IOMMU_PGTABLE_SZ		(4096)
+#define	AMD_IOMMU_PGTABLE_MAXLEVEL	(6)
+#define	AMD_IOMMU_PGTABLE_HASH_SZ	(256)
+
+#define	AMD_IOMMU_PGTABLE_ALIGN		((1ULL << 12) - 1)
+#define	AMD_IOMMU_PGTABLE_SIZE		(1ULL << 12)
+
+#define	AMD_IOMMU_MAX_PDTE		(1ULL << AMD_IOMMU_VA_NBITS(1))
+#define	PT_REF_VALID(p)			((p)->pt_ref >= 0 && \
+					(p)->pt_ref <= AMD_IOMMU_MAX_PDTE)
+
+#define	AMD_IOMMU_DOMAIN_HASH_SZ	(256)
+#define	AMD_IOMMU_PGTABLE_FREELIST_MAX	(256)
+#define	AMD_IOMMU_PA2VA_HASH_SZ		(256)
+
+#define	AMD_IOMMU_SIZE_4G		((uint64_t)1 << 32)
+#define	AMD_IOMMU_VMEM_NAMELEN		(30)
+
+typedef enum {
+	AMD_IOMMU_INVALID_DOMAIN = 0,
+	AMD_IOMMU_IDENTITY_DOMAIN = 0xFFFD,
+	AMD_IOMMU_PASSTHRU_DOMAIN = 0xFFFE,
+	AMD_IOMMU_SYS_DOMAIN = 0xFFFF
+} domain_id_t;
+
+typedef enum {
+	AMD_IOMMU_INVALID_MAP = 0,
+	AMD_IOMMU_UNITY_MAP,
+	AMD_IOMMU_VMEM_MAP
+} map_type_t;
+
+typedef struct amd_iommu_page_table {
+	domain_id_t pt_domainid;
+	int pt_level;
+	ddi_dma_handle_t pt_dma_hdl;
+	ddi_acc_handle_t pt_mem_hdl;
+	uint64_t pt_mem_reqsz;
+	uint64_t pt_mem_realsz;
+	uint64_t *pt_pgtblva;
+	uint64_t pt_pte_ref[AMD_IOMMU_MAX_PDTE];
+	uint16_t pt_index;
+	int pt_ref;
+	ddi_dma_cookie_t pt_cookie;
+	struct amd_iommu_page_table *pt_next;
+	struct amd_iommu_page_table *pt_prev;
+	struct amd_iommu_page_table *pt_parent;
+} amd_iommu_page_table_t;
+
+typedef struct amd_iommu_domain {
+	domain_id_t d_domainid;
+	uint64_t d_pgtable_root_4K;
+	int64_t d_ref;
+	vmem_t *d_vmem;
+	struct amd_iommu_domain *d_prev;
+	struct amd_iommu_domain *d_next;
+} amd_iommu_domain_t;
+
+int amd_iommu_map_pa2va(amd_iommu_t *iommu, dev_info_t *rdip,
+    ddi_dma_attr_t *attrp, struct ddi_dma_req *dmareq,
+    uint64_t pa, uint64_t pa_sz, map_type_t type,
+    uint64_t *start_vap, int km_flags);
+int amd_iommu_unmap_va(amd_iommu_t *iommu, dev_info_t *rdip,
+    uint64_t va, uint64_t va_sz, map_type_t type);
+void amd_iommu_init_page_tables(amd_iommu_t *iommu);
+void amd_iommu_fini_page_tables(amd_iommu_t *iommu);
+void amd_iommu_set_passthru(amd_iommu_t *iommu, dev_info_t *rdip);
+
+#endif /* _KERNEL */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif	/* _AMD_IOMMU_PAGE_TABLES_H */
--- a/usr/src/uts/i86pc/sys/Makefile	Wed Sep 16 22:03:43 2009 -0700
+++ b/usr/src/uts/i86pc/sys/Makefile	Mon Sep 14 21:48:21 2009 -0700
@@ -38,6 +38,7 @@
 
 HDRS=  \
 	acpidev.h	\
+	amd_iommu.h	\
 	asm_misc.h	\
 	clock.h		\
 	cram.h		\
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/uts/i86pc/sys/amd_iommu.h	Mon Sep 14 21:48:21 2009 -0700
@@ -0,0 +1,56 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef	_SYS_AMD_IOMMU_H
+#define	_SYS_AMD_IOMMU_H
+
+#ifdef	__cplusplus
+extern "C" {
+#endif
+
+#include <sys/sunddi.h>
+#include <sys/iommulib.h>
+
+#ifdef _KERNEL
+
+typedef struct amd_iommu_state {
+	int	aioms_instance;			/* instance */
+	dev_info_t *aioms_devi;			/* dip */
+	struct amd_iommu *aioms_iommu_start;	/* start of list of IOMMUs */
+	struct amd_iommu *aioms_iommu_end;	/* end of list of IOMMUs */
+	int aioms_nunits;			/* # of IOMMUs in function */
+} amd_iommu_state_t;
+
+int amd_iommu_setup(dev_info_t *dip, amd_iommu_state_t *statep);
+int amd_iommu_teardown(dev_info_t *dip, amd_iommu_state_t *statep);
+int amd_iommu_lookup_src_bdf(uint16_t bdf, uint16_t *src_bdfp);
+
+#endif	/* _KERNEL */
+
+#ifdef	__cplusplus
+}
+#endif
+
+#endif	/* _SYS_AMD_IOMMU_H */
--- a/usr/src/uts/intel/Makefile.files	Wed Sep 16 22:03:43 2009 -0700
+++ b/usr/src/uts/intel/Makefile.files	Mon Sep 14 21:48:21 2009 -0700
@@ -252,17 +252,6 @@
 AMR_OBJS = amr.o
 
 #
-#	AMD_IOMMU module
-#
-AMD_IOMMU_OBJS = 		\
-	amd_iommu.o		\
-	amd_iommu_impl.o	\
-	amd_iommu_acpi.o	\
-	amd_iommu_cmd.o		\
-	amd_iommu_log.o		\
-	amd_iommu_page_tables.o	
-
-#
 #	IOMMULIB module
 #
 IOMMULIB_OBJS = iommulib.o
--- a/usr/src/uts/intel/Makefile.intel.shared	Wed Sep 16 22:03:43 2009 -0700
+++ b/usr/src/uts/intel/Makefile.intel.shared	Mon Sep 14 21:48:21 2009 -0700
@@ -401,7 +401,6 @@
 DRV_KMODS	+= rtls
 DRV_KMODS	+= sfe
 DRV_KMODS	+= amd8111s
-DRV_KMODS	+= amd_iommu
 DRV_KMODS	+= igb
 DRV_KMODS	+= ixgbe
 DRV_KMODS	+= vr
--- a/usr/src/uts/intel/Makefile.rules	Wed Sep 16 22:03:43 2009 -0700
+++ b/usr/src/uts/intel/Makefile.rules	Mon Sep 14 21:48:21 2009 -0700
@@ -153,10 +153,6 @@
 	$(COMPILE.c) -o $@ $<
 	$(CTFCONVERT_O)
 
-$(OBJS_DIR)/%.o:		$(UTSBASE)/intel/io/amd_iommu/%.c
-	$(COMPILE.c) -o $@ $<
-	$(CTFCONVERT_O)
-
 $(OBJS_DIR)/%.o:		$(UTSBASE)/intel/io/amr/%.c
 	$(COMPILE.c) -o $@ $<
 	$(CTFCONVERT_O)
@@ -394,9 +390,6 @@
 $(LINTS_DIR)/%.ln:		$(UTSBASE)/intel/io/amd8111s/%.c
 	@($(LHEAD) $(LINT.c) $< $(LTAIL))
 
-$(LINTS_DIR)/%.ln:		$(UTSBASE)/intel/io/amd_iommu/%.c
-	@($(LHEAD) $(LINT.c) $< $(LTAIL))
-
 $(LINTS_DIR)/%.ln:		$(UTSBASE)/intel/io/amr/%.c
 	@($(LHEAD) $(LINT.c) $< $(LTAIL))
 
--- a/usr/src/uts/intel/amd_iommu/Makefile	Wed Sep 16 22:03:43 2009 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,84 +0,0 @@
-#
-# CDDL HEADER START
-#
-# The contents of this file are subject to the terms of the
-# Common Development and Distribution License (the "License").
-# You may not use this file except in compliance with the License.
-#
-# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
-# or http://www.opensolaris.org/os/licensing.
-# See the License for the specific language governing permissions
-# and limitations under the License.
-#
-# When distributing Covered Code, include this CDDL HEADER in each
-# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
-# If applicable, add the following below this CDDL HEADER, with the
-# fields enclosed by brackets "[]" replaced with your own identifying
-# information: Portions Copyright [yyyy] [name of copyright owner]
-#
-# CDDL HEADER END
-#
-# Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
-# Use is subject to license terms.
-#
-#	This Makefile drives production of the amd_iommu driver kernel module.
-#
-#	intel implementation architecture dependent
-#
-
-#
-#	Path to the base of the uts directory tree (usually /usr/src/uts).
-#
-UTSBASE	= ../..
-
-#
-#	Define the module and object file sets.
-#
-MODULE		= amd_iommu
-OBJECTS		= $(AMD_IOMMU_OBJS:%=$(OBJS_DIR)/%)
-LINTS		= $(AMD_IOMMU_OBJS:%.o=$(LINTS_DIR)/%.ln)
-ROOTMODULE	= $(ROOT_DRV_DIR)/$(MODULE)
-CONF_SRCDIR     = $(UTSBASE)/intel/io/amd_iommu
-
-#
-#	Include common rules.
-#
-include $(UTSBASE)/intel/Makefile.intel
-
-#
-#	Define targets
-#
-ALL_TARGET	= $(BINARY) $(SRC_CONFILE)
-LINT_TARGET	= $(MODULE).lint
-INSTALL_TARGET	= $(BINARY) $(ROOTMODULE) $(ROOT_CONFFILE)
-
-#
-# depends on misc/iommulib and misc/acpica
-#
-LDFLAGS         += -dy -Nmisc/iommulib -Nmisc/acpica
-
-#
-#	Default build targets.
-#
-.KEEP_STATE:
-
-def:		$(DEF_DEPS)
-
-all:		$(ALL_DEPS)
-
-clean:		$(CLEAN_DEPS)
-
-clobber:	$(CLOBBER_DEPS)
-
-lint:		$(LINT_DEPS)
-
-modlintlib:	$(MODLINTLIB_DEPS)
-
-clean.lint:	$(CLEAN_LINT_DEPS)
-
-install:	$(INSTALL_DEPS) $(CONF_INSTALL_DEPS)
-
-#
-#	Include common targets.
-#
-include $(UTSBASE)/intel/Makefile.targ
--- a/usr/src/uts/intel/io/amd_iommu/amd_iommu.c	Wed Sep 16 22:03:43 2009 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,440 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License (the "License").
- * You may not use this file except in compliance with the License.
- *
- * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
- * or http://www.opensolaris.org/os/licensing.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information: Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- */
-
-/*
- * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
- * Use is subject to license terms.
- */
-
-#include <sys/types.h>
-#include <sys/file.h>
-#include <sys/errno.h>
-#include <sys/open.h>
-#include <sys/stat.h>
-#include <sys/cred.h>
-#include <sys/modctl.h>
-#include <sys/conf.h>
-#include <sys/devops.h>
-#include <sys/ddi.h>
-
-#include <sys/amd_iommu.h>
-#include "amd_iommu_impl.h"
-#include "amd_iommu_acpi.h"
-
-
-#define	AMD_IOMMU_MINOR2INST(x)	(x)
-#define	AMD_IOMMU_INST2MINOR(x)	(x)
-#define	AMD_IOMMU_NODETYPE	"ddi_iommu"
-#define	AMD_IOMMU_MINOR_NAME	"amd-iommu"
-
-static int amd_iommu_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg,
-    void **result);
-static int amd_iommu_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
-static int amd_iommu_detach(dev_info_t *dip, ddi_detach_cmd_t cmd);
-static int amd_iommu_open(dev_t *devp, int flag, int otyp, cred_t *credp);
-static int amd_iommu_close(dev_t dev, int flag, int otyp, cred_t *credp);
-static int amd_iommu_ioctl(dev_t dev, int cmd, intptr_t arg, int mode,
-    cred_t *credp, int *rvalp);
-
-static struct cb_ops amd_iommu_cb_ops = {
-	amd_iommu_open,		/* cb_open */
-	amd_iommu_close,	/* cb_close */
-	nodev,			/* cb_strategy */
-	nodev,			/* cb_print */
-	nodev,			/* cb_dump */
-	nodev,			/* cb_read */
-	nodev,			/* cb_write */
-	amd_iommu_ioctl,	/* cb_ioctl */
-	nodev,			/* cb_devmap */
-	nodev,			/* cb_mmap */
-	nodev,			/* cb_segmap */
-	nochpoll,		/* cb_chpoll */
-	ddi_prop_op,		/* cb_prop_op */
-	NULL,			/* cb_str */
-	D_NEW | D_MP,		/* cb_flag */
-	CB_REV,			/* cb_rev */
-	nodev,			/* cb_aread */
-	nodev			/* cb_awrite */
-};
-
-static struct dev_ops amd_iommu_dev_ops = {
-	DEVO_REV,		/* devo_rev */
-	0,			/* devo_refcnt */
-	amd_iommu_getinfo,	/* devo_getinfo */
-	nulldev,		/* devo_identify */
-	nulldev,		/* devo_probe */
-	amd_iommu_attach,	/* devo_attach */
-	amd_iommu_detach,	/* devo_detach */
-	nodev,			/* devo_reset */
-	&amd_iommu_cb_ops,	/* devo_cb_ops */
-	NULL,			/* devo_bus_ops */
-	nulldev			/* devo_power */
-};
-
-static struct modldrv modldrv = {
-	&mod_driverops,
-	"AMD IOMMU 0.1",
-	&amd_iommu_dev_ops
-};
-
-static struct modlinkage modlinkage = {
-	MODREV_1,
-	(void *)&modldrv,
-	NULL
-};
-
-amd_iommu_debug_t amd_iommu_debug;
-kmutex_t amd_iommu_global_lock;
-const char *amd_iommu_modname = "amd_iommu";
-amd_iommu_alias_t **amd_iommu_alias;
-amd_iommu_page_table_hash_t amd_iommu_page_table_hash;
-static void *amd_iommu_statep;
-int amd_iommu_64bit_bug;
-int amd_iommu_unity_map;
-int amd_iommu_no_RW_perms;
-int amd_iommu_no_unmap;
-int amd_iommu_pageva_inval_all;
-int amd_iommu_disable;		/* disable IOMMU */
-char *amd_iommu_disable_list;	/* list of drivers bypassing IOMMU */
-
-int
-_init(void)
-{
-	int error = ENOTSUP;
-
-#if defined(__amd64) && !defined(__xpv)
-
-	error = ddi_soft_state_init(&amd_iommu_statep,
-	    sizeof (struct amd_iommu_state), 1);
-	if (error) {
-		cmn_err(CE_WARN, "%s: _init: failed to init soft state.",
-		    amd_iommu_modname);
-		return (error);
-	}
-
-	if (amd_iommu_acpi_init() != DDI_SUCCESS) {
-		if (amd_iommu_debug) {
-			cmn_err(CE_WARN, "%s: _init: ACPI init failed.",
-			    amd_iommu_modname);
-		}
-		ddi_soft_state_fini(&amd_iommu_statep);
-		return (ENOTSUP);
-	}
-
-	amd_iommu_read_boot_props();
-
-	if (amd_iommu_page_table_hash_init(&amd_iommu_page_table_hash)
-	    != DDI_SUCCESS) {
-		cmn_err(CE_WARN, "%s: _init: Page table hash init failed.",
-		    amd_iommu_modname);
-		if (amd_iommu_disable_list) {
-			kmem_free(amd_iommu_disable_list,
-			    strlen(amd_iommu_disable_list) + 1);
-			amd_iommu_disable_list = NULL;
-		}
-		amd_iommu_acpi_fini();
-		ddi_soft_state_fini(&amd_iommu_statep);
-		amd_iommu_statep = NULL;
-		return (EFAULT);
-	}
-
-	error = mod_install(&modlinkage);
-	if (error) {
-		cmn_err(CE_WARN, "%s: _init: mod_install failed.",
-		    amd_iommu_modname);
-		amd_iommu_page_table_hash_fini(&amd_iommu_page_table_hash);
-		if (amd_iommu_disable_list) {
-			kmem_free(amd_iommu_disable_list,
-			    strlen(amd_iommu_disable_list) + 1);
-			amd_iommu_disable_list = NULL;
-		}
-		amd_iommu_acpi_fini();
-		ddi_soft_state_fini(&amd_iommu_statep);
-		amd_iommu_statep = NULL;
-		return (error);
-	}
-	error = 0;
-#endif
-
-	return (error);
-}
-
-int
-_info(struct modinfo *modinfop)
-{
-	return (mod_info(&modlinkage, modinfop));
-}
-
-int
-_fini(void)
-{
-	int error;
-
-	error = mod_remove(&modlinkage);
-	if (error)
-		return (error);
-
-	amd_iommu_page_table_hash_fini(&amd_iommu_page_table_hash);
-	if (amd_iommu_disable_list) {
-		kmem_free(amd_iommu_disable_list,
-		    strlen(amd_iommu_disable_list) + 1);
-		amd_iommu_disable_list = NULL;
-	}
-	amd_iommu_acpi_fini();
-	ddi_soft_state_fini(&amd_iommu_statep);
-	amd_iommu_statep = NULL;
-
-	return (0);
-}
-
-/*ARGSUSED*/
-static int
-amd_iommu_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **result)
-{
-	struct amd_iommu_state *statep;
-
-	ASSERT(result);
-
-	*result = NULL;
-
-	switch (cmd) {
-	case DDI_INFO_DEVT2DEVINFO:
-		statep = ddi_get_soft_state(amd_iommu_statep,
-		    AMD_IOMMU_MINOR2INST(getminor((dev_t)arg)));
-		if (statep) {
-			*result = statep->aioms_devi;
-			return (DDI_SUCCESS);
-		}
-		break;
-	case DDI_INFO_DEVT2INSTANCE:
-		*result = (void *)(uintptr_t)
-		    AMD_IOMMU_MINOR2INST(getminor((dev_t)arg));
-		return (DDI_SUCCESS);
-	}
-
-	return (DDI_FAILURE);
-}
-
-static int
-amd_iommu_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
-{
-	int instance = ddi_get_instance(dip);
-	const char *driver = ddi_driver_name(dip);
-	struct amd_iommu_state *statep;
-
-	ASSERT(instance >= 0);
-	ASSERT(driver);
-
-	switch (cmd) {
-	case DDI_ATTACH:
-		if (ddi_soft_state_zalloc(amd_iommu_statep, instance)
-		    != DDI_SUCCESS) {
-			cmn_err(CE_WARN, "Unable to allocate soft state for "
-			    "%s%d", driver, instance);
-			return (DDI_FAILURE);
-		}
-
-		statep = ddi_get_soft_state(amd_iommu_statep, instance);
-		if (statep == NULL) {
-			cmn_err(CE_WARN, "Unable to get soft state for "
-			    "%s%d", driver, instance);
-			ddi_soft_state_free(amd_iommu_statep, instance);
-			return (DDI_FAILURE);
-		}
-
-		if (ddi_create_minor_node(dip, AMD_IOMMU_MINOR_NAME, S_IFCHR,
-		    AMD_IOMMU_INST2MINOR(instance), AMD_IOMMU_NODETYPE,
-		    0) != DDI_SUCCESS) {
-			cmn_err(CE_WARN, "Unable to create minor node for "
-			    "%s%d", driver, instance);
-			ddi_remove_minor_node(dip, NULL);
-			ddi_soft_state_free(amd_iommu_statep, instance);
-			return (DDI_FAILURE);
-		}
-
-		statep->aioms_devi = dip;
-		statep->aioms_instance = instance;
-		statep->aioms_iommu_start = NULL;
-		statep->aioms_iommu_end = NULL;
-
-		amd_iommu_lookup_conf_props(dip);
-
-		if (amd_iommu_disable_list) {
-			cmn_err(CE_NOTE, "AMD IOMMU disabled for the following"
-			    " drivers:\n%s", amd_iommu_disable_list);
-		}
-
-		if (amd_iommu_disable) {
-			cmn_err(CE_NOTE, "AMD IOMMU disabled by user");
-		} else if (amd_iommu_setup(dip, statep) != DDI_SUCCESS) {
-			cmn_err(CE_WARN, "Unable to initialize AMD IOMMU "
-			    "%s%d", driver, instance);
-			ddi_remove_minor_node(dip, NULL);
-			ddi_soft_state_free(amd_iommu_statep, instance);
-			return (DDI_FAILURE);
-		}
-
-		ddi_report_dev(dip);
-
-		return (DDI_SUCCESS);
-
-	case DDI_RESUME:
-		return (DDI_SUCCESS);
-	default:
-		return (DDI_FAILURE);
-	}
-}
-
-static int
-amd_iommu_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
-{
-	int instance = ddi_get_instance(dip);
-	const char *driver = ddi_driver_name(dip);
-	struct amd_iommu_state *statep;
-
-	ASSERT(instance >= 0);
-	ASSERT(driver);
-
-	switch (cmd) {
-	case DDI_DETACH:
-		statep = ddi_get_soft_state(amd_iommu_statep, instance);
-		if (statep == NULL) {
-			cmn_err(CE_WARN, "%s%d: Cannot get soft state",
-			    driver, instance);
-			return (DDI_FAILURE);
-		}
-		return (DDI_FAILURE);
-	case DDI_SUSPEND:
-		return (DDI_SUCCESS);
-	default:
-		return (DDI_FAILURE);
-	}
-}
-
-/*ARGSUSED*/
-static int
-amd_iommu_open(dev_t *devp, int flag, int otyp, cred_t *credp)
-{
-	int instance = AMD_IOMMU_MINOR2INST(getminor(*devp));
-	struct amd_iommu_state *statep;
-	const char *f = "amd_iommu_open";
-
-	if (instance < 0) {
-		cmn_err(CE_WARN, "%s: invalid instance %d",
-		    f, instance);
-		return (ENXIO);
-	}
-
-	if (!(flag & (FREAD|FWRITE))) {
-		cmn_err(CE_WARN, "%s: invalid flags %d", f, flag);
-		return (EINVAL);
-	}
-
-	if (otyp != OTYP_CHR) {
-		cmn_err(CE_WARN, "%s: invalid otyp %d", f, otyp);
-		return (EINVAL);
-	}
-
-	statep = ddi_get_soft_state(amd_iommu_statep, instance);
-	if (statep == NULL) {
-		cmn_err(CE_WARN, "%s: cannot get soft state: instance %d",
-		    f, instance);
-		return (ENXIO);
-	}
-
-	ASSERT(statep->aioms_instance == instance);
-
-	return (0);
-}
-
-/*ARGSUSED*/
-static int
-amd_iommu_close(dev_t dev, int flag, int otyp, cred_t *credp)
-{
-	int instance = AMD_IOMMU_MINOR2INST(getminor(dev));
-	struct amd_iommu_state *statep;
-	const char *f = "amd_iommu_close";
-
-	if (instance < 0) {
-		cmn_err(CE_WARN, "%s: invalid instance %d", f, instance);
-		return (ENXIO);
-	}
-
-	if (!(flag & (FREAD|FWRITE))) {
-		cmn_err(CE_WARN, "%s: invalid flags %d", f, flag);
-		return (EINVAL);
-	}
-
-	if (otyp != OTYP_CHR) {
-		cmn_err(CE_WARN, "%s: invalid otyp %d", f, otyp);
-		return (EINVAL);
-	}
-
-	statep = ddi_get_soft_state(amd_iommu_statep, instance);
-	if (statep == NULL) {
-		cmn_err(CE_WARN, "%s: cannot get soft state: instance %d",
-		    f, instance);
-		return (ENXIO);
-	}
-
-	ASSERT(statep->aioms_instance == instance);
-	return (0);
-
-}
-
-/*ARGSUSED*/
-static int
-amd_iommu_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp,
-    int *rvalp)
-{
-	int instance = AMD_IOMMU_MINOR2INST(getminor(dev));
-	struct amd_iommu_state *statep;
-	const char *f = "amd_iommu_ioctl";
-
-	ASSERT(*rvalp);
-
-	if (instance < 0) {
-		cmn_err(CE_WARN, "%s: invalid instance %d", f, instance);
-		return (ENXIO);
-	}
-
-
-	if (!(mode & (FREAD|FWRITE))) {
-		cmn_err(CE_WARN, "%s: invalid mode %d", f, mode);
-		return (EINVAL);
-	}
-
-	if (mode & FKIOCTL) {
-		cmn_err(CE_WARN, "%s: FKIOCTL unsupported mode %d", f, mode);
-		return (EINVAL);
-	}
-
-	statep = ddi_get_soft_state(amd_iommu_statep, instance);
-	if (statep == NULL) {
-		cmn_err(CE_WARN, "%s: cannot get soft state: instance %d",
-		    f, instance);
-		return (ENXIO);
-	}
-
-	ASSERT(statep->aioms_instance == instance);
-
-	return (ENOTTY);
-}
--- a/usr/src/uts/intel/io/amd_iommu/amd_iommu.conf	Wed Sep 16 22:03:43 2009 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,27 +0,0 @@
-#
-# CDDL HEADER START
-#
-# The contents of this file are subject to the terms of the
-# Common Development and Distribution License (the "License").
-# You may not use this file except in compliance with the License.
-#
-# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
-# or http://www.opensolaris.org/os/licensing.
-# See the License for the specific language governing permissions
-# and limitations under the License.
-#
-# When distributing Covered Code, include this CDDL HEADER in each
-# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
-# If applicable, add the following below this CDDL HEADER, with the
-# fields enclosed by brackets "[]" replaced with your own identifying
-# information: Portions Copyright [yyyy] [name of copyright owner]
-#
-# CDDL HEADER END
-#
-#
-# Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
-# Use is subject to license terms.
-#
-#
-# To enable IOMMU set this to "yes" and rebuild boot archive
-amd-iommu="yes";
--- a/usr/src/uts/intel/io/amd_iommu/amd_iommu_acpi.c	Wed Sep 16 22:03:43 2009 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,951 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License (the "License").
- * You may not use this file except in compliance with the License.
- *
- * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
- * or http://www.opensolaris.org/os/licensing.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information: Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- */
-
-/*
- * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
- * Use is subject to license terms.
- */
-
-#include "amd_iommu_acpi.h"
-#include "amd_iommu_impl.h"
-
-static int create_acpi_hash(amd_iommu_acpi_t *acpi);
-static void amd_iommu_acpi_table_fini(amd_iommu_acpi_t **acpipp);
-
-static void dump_acpi_aliases(void);
-
-
-/*
- * Globals
- */
-static amd_iommu_acpi_global_t *amd_iommu_acpi_global;
-static amd_iommu_acpi_ivhd_t **amd_iommu_acpi_ivhd_hash;
-static amd_iommu_acpi_ivmd_t **amd_iommu_acpi_ivmd_hash;
-
-static int
-type_byte_size(char *cp)
-{
-	uint8_t type8 = *((uint8_t *)cp);
-	uint8_t len_bits;
-
-	len_bits = AMD_IOMMU_REG_GET8(&type8, AMD_IOMMU_ACPI_DEVENTRY_LEN);
-
-	switch (len_bits) {
-	case 0:
-			return (4);
-	case 1:
-			return (8);
-	case 2:
-			return (16);
-	case 3:
-			return (32);
-	default:
-			cmn_err(CE_WARN, "%s: Invalid deventry len: %d",
-			    amd_iommu_modname, len_bits);
-			return (len_bits);
-	}
-	/*NOTREACHED*/
-}
-
-static void
-process_4byte_deventry(ivhd_container_t *c, char *cp)
-{
-	int entry_type = *((uint8_t *)cp);
-	ivhd_deventry_t deventry = {0};
-	ivhd_deventry_t *devp;
-	uint8_t datsetting8;
-	align_16_t al = {0};
-	int i;
-
-	/* 4 byte entry */
-	deventry.idev_len = 4;
-	deventry.idev_deviceid = -1;
-	deventry.idev_src_deviceid = -1;
-
-	for (i = 0; i < 2; i++) {
-		al.ent8[i] = *((uint8_t *)&cp[i + 1]);
-	}
-
-	switch (entry_type) {
-	case 1:
-		deventry.idev_type = DEVENTRY_ALL;
-		break;
-	case 2:
-		deventry.idev_type = DEVENTRY_SELECT;
-		deventry.idev_deviceid = al.ent16;
-		break;
-	case 3:
-		deventry.idev_type = DEVENTRY_RANGE;
-		deventry.idev_deviceid = al.ent16;
-		break;
-	case 4:
-		deventry.idev_type = DEVENTRY_RANGE_END;
-		deventry.idev_deviceid = al.ent16;
-		ASSERT(cp[3] == 0);
-		break;
-	case 0:
-		ASSERT(al.ent16 == 0);
-		ASSERT(cp[3] == 0);
-	default:
-		return;
-	}
-
-
-	devp = kmem_alloc(sizeof (ivhd_deventry_t), KM_SLEEP);
-	*devp = deventry;
-
-	if (c->ivhdc_first_deventry == NULL)
-		c->ivhdc_first_deventry =  devp;
-	else
-		c->ivhdc_last_deventry->idev_next = devp;
-
-	c->ivhdc_last_deventry = devp;
-
-	if (entry_type == 4)
-		return;
-
-	datsetting8 = (*((uint8_t *)&cp[3]));
-
-	devp->idev_Lint1Pass = AMD_IOMMU_REG_GET8(&datsetting8,
-	    AMD_IOMMU_ACPI_LINT1PASS);
-
-	devp->idev_Lint0Pass = AMD_IOMMU_REG_GET8(&datsetting8,
-	    AMD_IOMMU_ACPI_LINT0PASS);
-
-	devp->idev_SysMgt = AMD_IOMMU_REG_GET8(&datsetting8,
-	    AMD_IOMMU_ACPI_SYSMGT);
-
-	ASSERT(AMD_IOMMU_REG_GET8(&datsetting8,
-	    AMD_IOMMU_ACPI_DATRSV) == 0);
-
-	devp->idev_NMIPass = AMD_IOMMU_REG_GET8(&datsetting8,
-	    AMD_IOMMU_ACPI_NMIPASS);
-
-	devp->idev_ExtIntPass = AMD_IOMMU_REG_GET8(&datsetting8,
-	    AMD_IOMMU_ACPI_EXTINTPASS);
-
-	devp->idev_INITPass = AMD_IOMMU_REG_GET8(&datsetting8,
-	    AMD_IOMMU_ACPI_INITPASS);
-}
-
-static void
-process_8byte_deventry(ivhd_container_t *c, char *cp)
-{
-	uint8_t datsetting8;
-	int entry_type = (uint8_t)*cp;
-	ivhd_deventry_t deventry = {0};
-	ivhd_deventry_t *devp;
-	align_16_t al1 = {0};
-	align_16_t al2 = {0};
-	align_32_t al3 = {0};
-	int i;
-
-	/* Length is 8 bytes */
-	deventry.idev_len = 8;
-	deventry.idev_deviceid = -1;
-	deventry.idev_src_deviceid = -1;
-
-	for (i = 0; i < 2; i++) {
-		al1.ent8[i] = *((uint8_t *)&cp[i+1]);
-		al2.ent8[i] = *((uint8_t *)&cp[i+5]);
-	}
-
-	datsetting8 = *((uint8_t *)&cp[3]);
-
-	switch (entry_type) {
-	case 66:
-		deventry.idev_type = DEVENTRY_ALIAS_SELECT;
-		deventry.idev_deviceid = al1.ent16;
-		deventry.idev_src_deviceid = al2.ent16;
-		ASSERT(cp[4] == 0);
-		ASSERT(cp[7] == 0);
-		break;
-	case 67:
-		deventry.idev_type = DEVENTRY_ALIAS_RANGE;
-		deventry.idev_deviceid = al1.ent16;
-		deventry.idev_src_deviceid = al2.ent16;
-		ASSERT(cp[4] == 0);
-		ASSERT(cp[7] == 0);
-		break;
-	case 70:
-		deventry.idev_type = DEVENTRY_EXTENDED_SELECT;
-		deventry.idev_deviceid = al1.ent16;
-		break;
-	case 71:
-		deventry.idev_type = DEVENTRY_EXTENDED_RANGE;
-		deventry.idev_deviceid = al1.ent16;
-		break;
-	case 72:
-		deventry.idev_type = DEVENTRY_SPECIAL_DEVICE;
-		ASSERT(al1.ent16 == 0);
-		deventry.idev_deviceid = -1;
-		deventry.idev_handle = cp[4];
-		deventry.idev_variety = cp[7];
-		deventry.idev_src_deviceid = al2.ent16;
-	default:
-#ifdef BROKEN_ASSERT
-		for (i = 0; i < 7; i++) {
-			ASSERT(cp[i] == 0);
-		}
-#endif
-		return;
-	}
-
-
-	devp = kmem_alloc(sizeof (ivhd_deventry_t), KM_SLEEP);
-	*devp = deventry;
-
-	if (c->ivhdc_first_deventry == NULL)
-		c->ivhdc_first_deventry =  devp;
-	else
-		c->ivhdc_last_deventry->idev_next = devp;
-
-	c->ivhdc_last_deventry = devp;
-
-	devp->idev_Lint1Pass = AMD_IOMMU_REG_GET8(&datsetting8,
-	    AMD_IOMMU_ACPI_LINT1PASS);
-
-	devp->idev_Lint0Pass = AMD_IOMMU_REG_GET8(&datsetting8,
-	    AMD_IOMMU_ACPI_LINT0PASS);
-
-	devp->idev_SysMgt = AMD_IOMMU_REG_GET8(&datsetting8,
-	    AMD_IOMMU_ACPI_SYSMGT);
-
-	ASSERT(AMD_IOMMU_REG_GET8(&datsetting8,
-	    AMD_IOMMU_ACPI_DATRSV) == 0);
-
-	devp->idev_NMIPass = AMD_IOMMU_REG_GET8(&datsetting8,
-	    AMD_IOMMU_ACPI_NMIPASS);
-
-	devp->idev_ExtIntPass = AMD_IOMMU_REG_GET8(&datsetting8,
-	    AMD_IOMMU_ACPI_EXTINTPASS);
-
-	devp->idev_INITPass = AMD_IOMMU_REG_GET8(&datsetting8,
-	    AMD_IOMMU_ACPI_INITPASS);
-
-	if (entry_type != 70 && entry_type != 71) {
-		return;
-	}
-
-	/* Type 70 and 71 */
-	for (i = 0; i < 4; i++) {
-		al3.ent8[i] = *((uint8_t *)&cp[i+4]);
-	}
-
-	devp->idev_AtsDisabled = AMD_IOMMU_REG_GET8(&al3.ent32,
-	    AMD_IOMMU_ACPI_ATSDISABLED);
-
-	ASSERT(AMD_IOMMU_REG_GET8(&al3.ent32, AMD_IOMMU_ACPI_EXTDATRSV) == 0);
-}
-
-static void
-process_ivhd(amd_iommu_acpi_t *acpi, ivhd_t *ivhdp)
-{
-	ivhd_container_t *c;
-	caddr_t ivhd_end;
-	caddr_t ivhd_tot_end;
-	caddr_t cp;
-
-	ASSERT(ivhdp->ivhd_type == 0x10);
-
-	c = kmem_zalloc(sizeof (ivhd_container_t), KM_SLEEP);
-	c->ivhdc_ivhd = kmem_alloc(sizeof (ivhd_t), KM_SLEEP);
-	*(c->ivhdc_ivhd) = *ivhdp;
-
-	if (acpi->acp_first_ivhdc == NULL)
-		acpi->acp_first_ivhdc = c;
-	else
-		acpi->acp_last_ivhdc->ivhdc_next = c;
-
-	acpi->acp_last_ivhdc = c;
-
-	ivhd_end = (caddr_t)ivhdp + sizeof (ivhd_t);
-	ivhd_tot_end = (caddr_t)ivhdp + ivhdp->ivhd_len;
-
-	for (cp = ivhd_end; cp < ivhd_tot_end; cp += type_byte_size(cp)) {
-		/* 16 byte and 32 byte size are currently reserved */
-		switch (type_byte_size(cp)) {
-		case 4:
-			process_4byte_deventry(c, cp);
-			break;
-		case 8:
-			process_8byte_deventry(c, cp);
-			break;
-		case 16:
-		case 32:
-			/* Reserved */
-			break;
-		default:
-			cmn_err(CE_WARN, "%s: unsupported length for device "
-			    "entry in ACPI IVRS table's IVHD entry",
-			    amd_iommu_modname);
-			break;
-		}
-	}
-}
-
-static void
-process_ivmd(amd_iommu_acpi_t *acpi, ivmd_t *ivmdp)
-{
-	ivmd_container_t *c;
-
-	ASSERT(ivmdp->ivmd_type != 0x10);
-
-	c = kmem_zalloc(sizeof (ivmd_container_t), KM_SLEEP);
-	c->ivmdc_ivmd = kmem_alloc(sizeof (ivmd_t), KM_SLEEP);
-	*(c->ivmdc_ivmd) = *ivmdp;
-
-	if (acpi->acp_first_ivmdc == NULL)
-		acpi->acp_first_ivmdc = c;
-	else
-		acpi->acp_last_ivmdc->ivmdc_next = c;
-
-	acpi->acp_last_ivmdc = c;
-}
-
-int
-amd_iommu_acpi_init(void)
-{
-	ivrs_t *ivrsp;
-	caddr_t ivrsp_end;
-	caddr_t table_end;
-	caddr_t cp;
-	uint8_t type8;
-	amd_iommu_acpi_t *acpi;
-	align_ivhd_t al_vhd = {0};
-	align_ivmd_t al_vmd = {0};
-
-	if (AcpiGetTable(IVRS_SIG, 1, (ACPI_TABLE_HEADER **)&ivrsp) != AE_OK) {
-		cmn_err(CE_NOTE, "!amd_iommu: No AMD IOMMU ACPI IVRS table");
-		return (DDI_FAILURE);
-	}
-
-	/*
-	 * Reserved field must be 0
-	 */
-	ASSERT(ivrsp->ivrs_resv == 0);
-
-	ASSERT(AMD_IOMMU_REG_GET32(&ivrsp->ivrs_ivinfo,
-	    AMD_IOMMU_ACPI_IVINFO_RSV1) == 0);
-	ASSERT(AMD_IOMMU_REG_GET32(&ivrsp->ivrs_ivinfo,
-	    AMD_IOMMU_ACPI_IVINFO_RSV2) == 0);
-
-	ivrsp_end = (caddr_t)ivrsp + sizeof (struct ivrs);
-	table_end = (caddr_t)ivrsp + ivrsp->ivrs_hdr.Length;
-
-	acpi = kmem_zalloc(sizeof (amd_iommu_acpi_t), KM_SLEEP);
-	acpi->acp_ivrs = kmem_alloc(sizeof (ivrs_t), KM_SLEEP);
-	*(acpi->acp_ivrs) = *ivrsp;
-
-	for (cp = ivrsp_end; cp < table_end; cp += (al_vhd.ivhdp)->ivhd_len) {
-		al_vhd.cp = cp;
-		if (al_vhd.ivhdp->ivhd_type == 0x10)
-			process_ivhd(acpi, al_vhd.ivhdp);
-	}
-
-	for (cp = ivrsp_end; cp < table_end; cp += (al_vmd.ivmdp)->ivmd_len) {
-		al_vmd.cp = cp;
-		type8 = al_vmd.ivmdp->ivmd_type;
-		if (type8 == 0x20 || type8 == 0x21 || type8 == 0x22)
-			process_ivmd(acpi, al_vmd.ivmdp);
-	}
-
-	if (create_acpi_hash(acpi) != DDI_SUCCESS) {
-		return (DDI_FAILURE);
-	}
-
-	amd_iommu_acpi_table_fini(&acpi);
-
-	ASSERT(acpi == NULL);
-
-	if (amd_iommu_debug & AMD_IOMMU_DEBUG_ACPI) {
-		dump_acpi_aliases();
-		debug_enter("dump");
-	}
-
-	return (DDI_SUCCESS);
-}
-
-static ivhd_deventry_t *
-free_ivhd_deventry(ivhd_deventry_t *devp)
-{
-	ivhd_deventry_t *next = devp->idev_next;
-
-	kmem_free(devp, sizeof (ivhd_deventry_t));
-
-	return (next);
-}
-
-static ivhd_container_t *
-free_ivhd_container(ivhd_container_t *ivhdcp)
-{
-	ivhd_container_t *next = ivhdcp->ivhdc_next;
-	ivhd_deventry_t *devp;
-
-	for (devp = ivhdcp->ivhdc_first_deventry; devp; ) {
-		devp = free_ivhd_deventry(devp);
-	}
-
-	kmem_free(ivhdcp->ivhdc_ivhd, sizeof (ivhd_t));
-	kmem_free(ivhdcp, sizeof (ivhd_container_t));
-
-	return (next);
-}
-
-static ivmd_container_t *
-free_ivmd_container(ivmd_container_t *ivmdcp)
-{
-	ivmd_container_t *next = ivmdcp->ivmdc_next;
-
-	kmem_free(ivmdcp->ivmdc_ivmd, sizeof (ivmd_t));
-	kmem_free(ivmdcp, sizeof (ivmd_container_t));
-
-	return (next);
-}
-
-void
-amd_iommu_acpi_fini(void)
-{
-}
-
-/*
- * TODO: Do we need to free the ACPI table for om GetFirmwareTable()
- */
-static void
-amd_iommu_acpi_table_fini(amd_iommu_acpi_t **acpipp)
-{
-	amd_iommu_acpi_t *acpi = *acpipp;
-	ivhd_container_t *ivhdcp;
-	ivmd_container_t *ivmdcp;
-
-	ASSERT(acpi);
-
-	for (ivhdcp = acpi->acp_first_ivhdc; ivhdcp; ) {
-		ivhdcp = free_ivhd_container(ivhdcp);
-	}
-	for (ivmdcp = acpi->acp_first_ivmdc; ivmdcp; ) {
-		ivmdcp = free_ivmd_container(ivmdcp);
-	}
-
-	kmem_free(acpi->acp_ivrs, sizeof (struct ivrs));
-	kmem_free(acpi, sizeof (amd_iommu_acpi_t));
-
-	*acpipp = NULL;
-}
-
-static uint16_t
-deviceid_hashfn(uint16_t deviceid)
-{
-	return (deviceid % AMD_IOMMU_ACPI_INFO_HASH_SZ);
-}
-
-static void
-add_deventry_info(ivhd_t *ivhdp, ivhd_deventry_t *deventry,
-    amd_iommu_acpi_ivhd_t **hash)
-{
-	static amd_iommu_acpi_ivhd_t *last;
-	amd_iommu_acpi_ivhd_t *acpi_ivhdp;
-	uint8_t uint8_flags;
-	uint16_t uint16_info;
-	uint16_t idx;
-
-	if (deventry->idev_type == DEVENTRY_RANGE_END) {
-		ASSERT(last);
-		acpi_ivhdp = last;
-		last = NULL;
-		ASSERT(acpi_ivhdp->ach_dev_type == DEVENTRY_RANGE ||
-		    acpi_ivhdp->ach_dev_type == DEVENTRY_ALIAS_RANGE ||
-		    acpi_ivhdp->ach_dev_type == DEVENTRY_EXTENDED_RANGE);
-		ASSERT(acpi_ivhdp->ach_deviceid_end == -1);
-		acpi_ivhdp->ach_deviceid_end = deventry->idev_deviceid;
-		/* TODO ASSERT data is 0 */
-		return;
-	}
-
-	ASSERT(last == NULL);
-	acpi_ivhdp = kmem_zalloc(sizeof (*acpi_ivhdp), KM_SLEEP);
-
-	uint8_flags = ivhdp->ivhd_flags;
-
-#ifdef BROKEN_ASSERT
-	ASSERT(AMD_IOMMU_REG_GET8(&uint8_flags,
-	    AMD_IOMMU_ACPI_IVHD_FLAGS_RSV) == 0);
-#endif
-
-	acpi_ivhdp->ach_IotlbSup = AMD_IOMMU_REG_GET8(&uint8_flags,
-	    AMD_IOMMU_ACPI_IVHD_FLAGS_IOTLBSUP);
-	acpi_ivhdp->ach_Isoc = AMD_IOMMU_REG_GET8(&uint8_flags,
-	    AMD_IOMMU_ACPI_IVHD_FLAGS_ISOC);
-	acpi_ivhdp->ach_ResPassPW = AMD_IOMMU_REG_GET8(&uint8_flags,
-	    AMD_IOMMU_ACPI_IVHD_FLAGS_RESPASSPW);
-	acpi_ivhdp->ach_PassPW = AMD_IOMMU_REG_GET8(&uint8_flags,
-	    AMD_IOMMU_ACPI_IVHD_FLAGS_PASSPW);
-	acpi_ivhdp->ach_HtTunEn = AMD_IOMMU_REG_GET8(&uint8_flags,
-	    AMD_IOMMU_ACPI_IVHD_FLAGS_HTTUNEN);
-
-	/* IVHD fields */
-	acpi_ivhdp->ach_IOMMU_deviceid = ivhdp->ivhd_deviceid;
-	acpi_ivhdp->ach_IOMMU_cap_off = ivhdp->ivhd_cap_off;
-	acpi_ivhdp->ach_IOMMU_reg_base = ivhdp->ivhd_reg_base;
-	acpi_ivhdp->ach_IOMMU_pci_seg = ivhdp->ivhd_pci_seg;
-
-	/* IVHD IOMMU info fields */
-	uint16_info = ivhdp->ivhd_iommu_info;
-
-#ifdef BROKEN_ASSERT
-	ASSERT(AMD_IOMMU_REG_GET16(&uint16_info,
-	    AMD_IOMMU_ACPI_IOMMU_INFO_RSV1) == 0);
-#endif
-
-	acpi_ivhdp->ach_IOMMU_UnitID = AMD_IOMMU_REG_GET16(&uint16_info,
-	    AMD_IOMMU_ACPI_IOMMU_INFO_UNITID);
-	ASSERT(AMD_IOMMU_REG_GET16(&uint16_info,
-	    AMD_IOMMU_ACPI_IOMMU_INFO_RSV2) == 0);
-	acpi_ivhdp->ach_IOMMU_MSInum = AMD_IOMMU_REG_GET16(&uint16_info,
-	    AMD_IOMMU_ACPI_IOMMU_INFO_MSINUM);
-
-	/* Initialize  deviceids to -1 */
-	acpi_ivhdp->ach_deviceid_start = -1;
-	acpi_ivhdp->ach_deviceid_end = -1;
-	acpi_ivhdp->ach_src_deviceid = -1;
-
-	/* All range type entries are put on hash entry 0 */
-	switch (deventry->idev_type) {
-	case DEVENTRY_ALL:
-		acpi_ivhdp->ach_deviceid_start = 0;
-		acpi_ivhdp->ach_deviceid_end = (uint16_t)-1;
-		acpi_ivhdp->ach_dev_type = DEVENTRY_ALL;
-		idx = AMD_IOMMU_ACPI_INFO_HASH_SZ;
-		break;
-	case DEVENTRY_SELECT:
-		acpi_ivhdp->ach_deviceid_start = deventry->idev_deviceid;
-		acpi_ivhdp->ach_deviceid_end = deventry->idev_deviceid;
-		acpi_ivhdp->ach_dev_type = DEVENTRY_SELECT;
-		idx = deviceid_hashfn(deventry->idev_deviceid);
-		break;
-	case DEVENTRY_RANGE:
-		acpi_ivhdp->ach_deviceid_start = deventry->idev_deviceid;
-		acpi_ivhdp->ach_deviceid_end = -1;
-		acpi_ivhdp->ach_dev_type = DEVENTRY_RANGE;
-		idx = AMD_IOMMU_ACPI_INFO_HASH_SZ;
-		last = acpi_ivhdp;
-		break;
-	case DEVENTRY_RANGE_END:
-		cmn_err(CE_PANIC, "%s: Unexpected Range End Deventry",
-		    amd_iommu_modname);
-		/*NOTREACHED*/
-	case DEVENTRY_ALIAS_SELECT:
-		acpi_ivhdp->ach_deviceid_start = deventry->idev_deviceid;
-		acpi_ivhdp->ach_deviceid_end = deventry->idev_deviceid;
-		acpi_ivhdp->ach_src_deviceid = deventry->idev_src_deviceid;
-		acpi_ivhdp->ach_dev_type = DEVENTRY_ALIAS_SELECT;
-		idx = deviceid_hashfn(deventry->idev_deviceid);
-		break;
-	case DEVENTRY_ALIAS_RANGE:
-		acpi_ivhdp->ach_deviceid_start = deventry->idev_deviceid;
-		acpi_ivhdp->ach_deviceid_end = -1;
-		acpi_ivhdp->ach_src_deviceid = deventry->idev_src_deviceid;
-		acpi_ivhdp->ach_dev_type = DEVENTRY_ALIAS_RANGE;
-		idx = AMD_IOMMU_ACPI_INFO_HASH_SZ;
-		last = acpi_ivhdp;
-		break;
-	case DEVENTRY_EXTENDED_SELECT:
-		acpi_ivhdp->ach_deviceid_start = deventry->idev_deviceid;
-		acpi_ivhdp->ach_deviceid_end = deventry->idev_deviceid;
-		acpi_ivhdp->ach_dev_type = DEVENTRY_EXTENDED_SELECT;
-		idx = deviceid_hashfn(deventry->idev_deviceid);
-		break;
-	case DEVENTRY_EXTENDED_RANGE:
-		acpi_ivhdp->ach_deviceid_start = deventry->idev_deviceid;
-		acpi_ivhdp->ach_deviceid_end = -1;
-		acpi_ivhdp->ach_dev_type = DEVENTRY_EXTENDED_RANGE;
-		idx = AMD_IOMMU_ACPI_INFO_HASH_SZ;
-		last = acpi_ivhdp;
-		break;
-	case DEVENTRY_SPECIAL_DEVICE:
-		acpi_ivhdp->ach_deviceid_start = -1;
-		acpi_ivhdp->ach_deviceid_end = -1;
-		acpi_ivhdp->ach_src_deviceid = deventry->idev_src_deviceid;
-		acpi_ivhdp->ach_special_handle = deventry->idev_handle;
-		acpi_ivhdp->ach_special_variety = deventry->idev_variety;
-		idx = AMD_IOMMU_ACPI_INFO_HASH_SZ;
-	default:
-		cmn_err(CE_PANIC, "%s: Unsupported deventry type",
-		    amd_iommu_modname);
-		/*NOTREACHED*/
-	}
-
-	acpi_ivhdp->ach_Lint1Pass = deventry->idev_Lint1Pass;
-	acpi_ivhdp->ach_Lint0Pass = deventry->idev_Lint0Pass;
-	acpi_ivhdp->ach_SysMgt = deventry->idev_SysMgt;
-	acpi_ivhdp->ach_NMIPass = deventry->idev_NMIPass;
-	acpi_ivhdp->ach_ExtIntPass = deventry->idev_ExtIntPass;
-	acpi_ivhdp->ach_INITPass = deventry->idev_INITPass;
-
-
-	/* extended data */
-	if (acpi_ivhdp->ach_dev_type == DEVENTRY_EXTENDED_SELECT ||
-	    acpi_ivhdp->ach_dev_type == DEVENTRY_EXTENDED_RANGE) {
-		acpi_ivhdp->ach_AtsDisabled = deventry->idev_AtsDisabled;
-	}
-
-	/*
-	 * Now add it to the hash
-	 */
-	ASSERT(hash[idx] != acpi_ivhdp);
-	acpi_ivhdp->ach_next = hash[idx];
-	hash[idx] = acpi_ivhdp;
-}
-
-static void
-add_ivhdc_info(ivhd_container_t *ivhdcp, amd_iommu_acpi_ivhd_t **hash)
-{
-	ivhd_deventry_t *deventry;
-	ivhd_t *ivhdp = ivhdcp->ivhdc_ivhd;
-
-	for (deventry = ivhdcp->ivhdc_first_deventry; deventry;
-	    deventry = deventry->idev_next) {
-		add_deventry_info(ivhdp, deventry, hash);
-	}
-}
-
-static void
-add_ivhd_info(amd_iommu_acpi_t *acpi, amd_iommu_acpi_ivhd_t **hash)
-{
-	ivhd_container_t *ivhdcp;
-
-	for (ivhdcp = acpi->acp_first_ivhdc; ivhdcp;
-	    ivhdcp = ivhdcp->ivhdc_next) {
-		add_ivhdc_info(ivhdcp, hash);
-	}
-}
-
-static void
-set_ivmd_info(ivmd_t *ivmdp, amd_iommu_acpi_ivmd_t **hash)
-{
-	amd_iommu_acpi_ivmd_t *acpi_ivmdp;
-	uint8_t uint8_flags;
-	uint16_t idx;
-
-	uint8_flags = ivmdp->ivmd_flags;
-
-	acpi_ivmdp = kmem_zalloc(sizeof (*acpi_ivmdp), KM_SLEEP);
-
-	switch (ivmdp->ivmd_type) {
-	case 0x20:
-		acpi_ivmdp->acm_deviceid_start = 0;
-		acpi_ivmdp->acm_deviceid_end = (uint16_t)-1;
-		acpi_ivmdp->acm_dev_type = IVMD_DEVICEID_ALL;
-		idx = AMD_IOMMU_ACPI_INFO_HASH_SZ;
-		break;
-	case 0x21:
-		acpi_ivmdp->acm_deviceid_start = ivmdp->ivmd_deviceid;
-		acpi_ivmdp->acm_deviceid_end = ivmdp->ivmd_deviceid;
-		acpi_ivmdp->acm_dev_type = IVMD_DEVICEID_SELECT;
-		idx = deviceid_hashfn(ivmdp->ivmd_deviceid);
-		break;
-	case 0x22:
-		acpi_ivmdp->acm_deviceid_start = ivmdp->ivmd_deviceid;
-		acpi_ivmdp->acm_deviceid_end = ivmdp->ivmd_auxdata;
-		acpi_ivmdp->acm_dev_type = IVMD_DEVICEID_RANGE;
-		idx = AMD_IOMMU_ACPI_INFO_HASH_SZ;
-		break;
-	default:
-		cmn_err(CE_PANIC, "Unknown AMD IOMMU ACPI IVMD deviceid type: "
-		    "%x", ivmdp->ivmd_type);
-		/*NOTREACHED*/
-	}
-
-	ASSERT(AMD_IOMMU_REG_GET8(&uint8_flags,
-	    AMD_IOMMU_ACPI_IVMD_RSV) == 0);
-
-	acpi_ivmdp->acm_ExclRange = AMD_IOMMU_REG_GET8(&uint8_flags,
-	    AMD_IOMMU_ACPI_IVMD_EXCL_RANGE);
-	acpi_ivmdp->acm_IW = AMD_IOMMU_REG_GET8(&uint8_flags,
-	    AMD_IOMMU_ACPI_IVMD_IW);
-	acpi_ivmdp->acm_IR = AMD_IOMMU_REG_GET8(&uint8_flags,
-	    AMD_IOMMU_ACPI_IVMD_IR);
-	acpi_ivmdp->acm_Unity = AMD_IOMMU_REG_GET8(&uint8_flags,
-	    AMD_IOMMU_ACPI_IVMD_UNITY);
-
-	acpi_ivmdp->acm_ivmd_phys_start = ivmdp->ivmd_phys_start;
-	acpi_ivmdp->acm_ivmd_phys_len = ivmdp->ivmd_phys_len;
-
-	acpi_ivmdp->acm_next = hash[idx];
-	hash[idx] = acpi_ivmdp;
-}
-
-static void
-add_ivmdc_info(ivmd_container_t *ivmdcp, amd_iommu_acpi_ivmd_t **hash)
-{
-	set_ivmd_info(ivmdcp->ivmdc_ivmd, hash);
-}
-
-static void
-add_ivmd_info(amd_iommu_acpi_t *acpi, amd_iommu_acpi_ivmd_t **hash)
-{
-	ivmd_container_t *ivmdcp;
-
-	for (ivmdcp = acpi->acp_first_ivmdc; ivmdcp;
-	    ivmdcp = ivmdcp->ivmdc_next) {
-		add_ivmdc_info(ivmdcp, hash);
-	}
-}
-
-static void
-add_global_info(amd_iommu_acpi_t *acpi, amd_iommu_acpi_global_t *global)
-{
-	uint32_t ivrs_ivinfo = acpi->acp_ivrs->ivrs_ivinfo;
-
-	global->acg_HtAtsResv =
-	    AMD_IOMMU_REG_GET32(&ivrs_ivinfo, AMD_IOMMU_ACPI_HT_ATSRSV);
-	global->acg_VAsize =
-	    AMD_IOMMU_REG_GET32(&ivrs_ivinfo, AMD_IOMMU_ACPI_VA_SIZE);
-	global->acg_PAsize =
-	    AMD_IOMMU_REG_GET32(&ivrs_ivinfo, AMD_IOMMU_ACPI_PA_SIZE);
-}
-
-static int
-create_acpi_hash(amd_iommu_acpi_t *acpi)
-{
-	/* Last hash entry is for deviceid ranges including "all" */
-
-	amd_iommu_acpi_global = kmem_zalloc(sizeof (amd_iommu_acpi_global_t),
-	    KM_SLEEP);
-
-	amd_iommu_acpi_ivhd_hash = kmem_zalloc(sizeof (amd_iommu_acpi_ivhd_t *)
-	    * (AMD_IOMMU_ACPI_INFO_HASH_SZ + 1), KM_SLEEP);
-
-	amd_iommu_acpi_ivmd_hash = kmem_zalloc(sizeof (amd_iommu_acpi_ivmd_t *)
-	    * (AMD_IOMMU_ACPI_INFO_HASH_SZ + 1), KM_SLEEP);
-
-	add_global_info(acpi, amd_iommu_acpi_global);
-
-	add_ivhd_info(acpi, amd_iommu_acpi_ivhd_hash);
-
-	add_ivmd_info(acpi, amd_iommu_acpi_ivmd_hash);
-
-	return (DDI_SUCCESS);
-}
-
-amd_iommu_acpi_global_t *
-amd_iommu_lookup_acpi_global(void)
-{
-	ASSERT(amd_iommu_acpi_global);
-
-	return (amd_iommu_acpi_global);
-}
-
-amd_iommu_acpi_ivhd_t *
-amd_iommu_lookup_all_ivhd(void)
-{
-	amd_iommu_acpi_ivhd_t *hinfop;
-
-	hinfop = amd_iommu_acpi_ivhd_hash[AMD_IOMMU_ACPI_INFO_HASH_SZ];
-	for (; hinfop; hinfop = hinfop->ach_next) {
-		if (hinfop->ach_deviceid_start == 0 &&
-		    hinfop->ach_deviceid_end == (uint16_t)-1) {
-			break;
-		}
-	}
-
-	return (hinfop);
-}
-
-amd_iommu_acpi_ivmd_t *
-amd_iommu_lookup_all_ivmd(void)
-{
-	amd_iommu_acpi_ivmd_t *minfop;
-
-	minfop = amd_iommu_acpi_ivmd_hash[AMD_IOMMU_ACPI_INFO_HASH_SZ];
-	for (; minfop; minfop = minfop->acm_next) {
-		if (minfop->acm_deviceid_start == 0 &&
-		    minfop->acm_deviceid_end == (uint16_t)-1) {
-			break;
-		}
-	}
-
-	return (minfop);
-}
-
-amd_iommu_acpi_ivhd_t *
-amd_iommu_lookup_any_ivhd(void)
-{
-	int i;
-	amd_iommu_acpi_ivhd_t *hinfop;
-
-	for (i = AMD_IOMMU_ACPI_INFO_HASH_SZ; i >= 0; i--) {
-		/*LINTED*/
-		if (hinfop = amd_iommu_acpi_ivhd_hash[i])
-			break;
-	}
-
-	return (hinfop);
-}
-
-amd_iommu_acpi_ivmd_t *
-amd_iommu_lookup_any_ivmd(void)
-{
-	int i;
-	amd_iommu_acpi_ivmd_t *minfop;
-
-	for (i = AMD_IOMMU_ACPI_INFO_HASH_SZ; i >= 0; i--) {
-		/*LINTED*/
-		if (minfop = amd_iommu_acpi_ivmd_hash[i])
-			break;
-	}
-
-	return (minfop);
-}
-
-static void
-dump_acpi_aliases(void)
-{
-	amd_iommu_acpi_ivhd_t *hinfop;
-	uint16_t idx;
-
-	for (idx = 0; idx <= AMD_IOMMU_ACPI_INFO_HASH_SZ; idx++) {
-		hinfop = amd_iommu_acpi_ivhd_hash[idx];
-		for (; hinfop; hinfop = hinfop->ach_next) {
-			cmn_err(CE_NOTE, "start=%d, end=%d, src_bdf=%d",
-			    hinfop->ach_deviceid_start,
-			    hinfop->ach_deviceid_end,
-			    hinfop->ach_src_deviceid);
-		}
-	}
-}
-
-amd_iommu_acpi_ivhd_t *
-amd_iommu_lookup_ivhd(int32_t deviceid)
-{
-	amd_iommu_acpi_ivhd_t *hinfop;
-	uint16_t idx;
-
-	if (amd_iommu_debug == AMD_IOMMU_DEBUG_ACPI) {
-		cmn_err(CE_NOTE, "Attempting to get ACPI IVHD info "
-		    "for deviceid: %d", deviceid);
-	}
-
-	ASSERT(amd_iommu_acpi_ivhd_hash);
-
-	/* check if special device */
-	if (deviceid == -1) {
-		hinfop = amd_iommu_acpi_ivhd_hash[AMD_IOMMU_ACPI_INFO_HASH_SZ];
-		for (; hinfop; hinfop = hinfop->ach_next) {
-			if (hinfop->ach_deviceid_start  == -1 &&
-			    hinfop->ach_deviceid_end == -1) {
-				break;
-			}
-		}
-		return (hinfop);
-	}
-
-	/* First search for an exact match */
-
-	idx = deviceid_hashfn(deviceid);
-
-
-range:
-	hinfop = amd_iommu_acpi_ivhd_hash[idx];
-
-	for (; hinfop; hinfop = hinfop->ach_next) {
-		if (deviceid < hinfop->ach_deviceid_start ||
-		    deviceid > hinfop->ach_deviceid_end)
-			continue;
-
-		if (amd_iommu_debug == AMD_IOMMU_DEBUG_ACPI) {
-			cmn_err(CE_NOTE, "Found ACPI IVHD match: %p, "
-			    "actual deviceid = %u, start = %u, end = %u",
-			    (void *)hinfop, deviceid,
-			    hinfop->ach_deviceid_start,
-			    hinfop->ach_deviceid_end);
-		}
-		goto out;
-	}
-
-	if (idx !=  AMD_IOMMU_ACPI_INFO_HASH_SZ) {
-		idx = AMD_IOMMU_ACPI_INFO_HASH_SZ;
-		goto range;
-	} else {
-		cmn_err(CE_PANIC, "IVHD not found for deviceid: %x", deviceid);
-	}
-
-out:
-	if (amd_iommu_debug == AMD_IOMMU_DEBUG_ACPI) {
-		cmn_err(CE_NOTE, "%u: %s ACPI IVHD %p", deviceid,
-		    hinfop ? "GOT" : "Did NOT get", (void *)hinfop);
-	}
-
-	return (hinfop);
-}
-
-amd_iommu_acpi_ivmd_t *
-amd_iommu_lookup_ivmd(int32_t deviceid)
-{
-	amd_iommu_acpi_ivmd_t *minfop;
-	uint16_t idx;
-
-	if (amd_iommu_debug == AMD_IOMMU_DEBUG_ACPI) {
-		cmn_err(CE_NOTE, "Attempting to get ACPI IVMD info "
-		    "for deviceid: %u", deviceid);
-	}
-
-	ASSERT(amd_iommu_acpi_ivmd_hash);
-
-	/* First search for an exact match */
-
-	idx = deviceid_hashfn(deviceid);
-
-
-range:
-	minfop = amd_iommu_acpi_ivmd_hash[idx];
-
-	for (; minfop; minfop = minfop->acm_next) {
-		if (deviceid < minfop->acm_deviceid_start &&
-		    deviceid > minfop->acm_deviceid_end)
-			continue;
-
-		if (amd_iommu_debug == AMD_IOMMU_DEBUG_ACPI) {
-			cmn_err(CE_NOTE, "Found ACPI IVMD match: %p, "
-			    "actual deviceid = %u, start = %u, end = %u",
-			    (void *)minfop, deviceid,
-			    minfop->acm_deviceid_start,
-			    minfop->acm_deviceid_end);
-		}
-
-		goto out;
-	}
-
-	if (idx !=  AMD_IOMMU_ACPI_INFO_HASH_SZ) {
-		idx = AMD_IOMMU_ACPI_INFO_HASH_SZ;
-		goto range;
-	} else {
-		cmn_err(CE_PANIC, "IVMD not found for deviceid: %x", deviceid);
-	}
-
-out:
-	if (amd_iommu_debug == AMD_IOMMU_DEBUG_ACPI) {
-		cmn_err(CE_NOTE, "%u: %s ACPI IVMD info %p", deviceid,
-		    minfop ? "GOT" : "Did NOT get", (void *)minfop);
-	}
-
-	return (minfop);
-}
--- a/usr/src/uts/intel/io/amd_iommu/amd_iommu_acpi.h	Wed Sep 16 22:03:43 2009 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,306 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License (the "License").
- * You may not use this file except in compliance with the License.
- *
- * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
- * or http://www.opensolaris.org/os/licensing.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information: Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- */
-/*
- * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
- * Use is subject to license terms.
- */
-
-#ifndef _AMD_IOMMU_ACPI_H
-#define	_AMD_IOMMU_ACPI_H
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <sys/sunddi.h>
-#include <sys/acpi/acpi.h>
-#include <sys/acpica.h>
-#include <sys/amd_iommu.h>
-#include "amd_iommu_impl.h"
-
-#ifdef _KERNEL
-
-#define	IVRS_SIG	"IVRS"
-
-/*
- * IVINFO settings
- */
-#define	AMD_IOMMU_ACPI_IVINFO_RSV1	(31 << 16 | 23)
-#define	AMD_IOMMU_ACPI_HT_ATSRSV	(22 << 16 | 22)
-#define	AMD_IOMMU_ACPI_VA_SIZE		(21 << 16 | 15)
-#define	AMD_IOMMU_ACPI_PA_SIZE		(14 << 16 | 8)
-#define	AMD_IOMMU_ACPI_IVINFO_RSV2	(7 << 16 | 0)
-
-/*
- * IVHD Device entry len field
- */
-#define	AMD_IOMMU_ACPI_DEVENTRY_LEN	(7 << 16 | 6)
-
-/*
- * IVHD flag fields definition
- */
-#define	AMD_IOMMU_ACPI_IVHD_FLAGS_RSV		(7 << 16 | 5)
-#define	AMD_IOMMU_ACPI_IVHD_FLAGS_IOTLBSUP	(4 << 16 | 4)
-#define	AMD_IOMMU_ACPI_IVHD_FLAGS_ISOC		(3 << 16 | 3)
-#define	AMD_IOMMU_ACPI_IVHD_FLAGS_RESPASSPW	(2 << 16 | 2)
-#define	AMD_IOMMU_ACPI_IVHD_FLAGS_PASSPW	(1 << 16 | 1)
-#define	AMD_IOMMU_ACPI_IVHD_FLAGS_HTTUNEN	(0 << 16 | 0)
-
-/*
- * IVHD IOMMU info fields
- */
-#define	AMD_IOMMU_ACPI_IOMMU_INFO_RSV1		(15 << 16 | 13)
-#define	AMD_IOMMU_ACPI_IOMMU_INFO_UNITID	(12 << 16 | 8)
-#define	AMD_IOMMU_ACPI_IOMMU_INFO_RSV2		(7 << 16 | 5)
-#define	AMD_IOMMU_ACPI_IOMMU_INFO_MSINUM	(4 << 16 | 0)
-
-/*
- * IVHD deventry data settings
- */
-#define	AMD_IOMMU_ACPI_LINT1PASS	(7 << 16 | 7)
-#define	AMD_IOMMU_ACPI_LINT0PASS	(6 << 16 | 6)
-#define	AMD_IOMMU_ACPI_SYSMGT		(5 << 16 | 4)
-#define	AMD_IOMMU_ACPI_DATRSV		(3 << 16 | 3)
-#define	AMD_IOMMU_ACPI_NMIPASS		(2 << 16 | 2)
-#define	AMD_IOMMU_ACPI_EXTINTPASS	(1 << 16 | 1)
-#define	AMD_IOMMU_ACPI_INITPASS		(0 << 16 | 0)
-
-/*
- * IVHD deventry extended data settings
- */
-#define	AMD_IOMMU_ACPI_ATSDISABLED	(31 << 16 | 31)
-#define	AMD_IOMMU_ACPI_EXTDATRSV	(30 << 16 | 0)
-
-/*
- * IVMD flags fields settings
- */
-#define	AMD_IOMMU_ACPI_IVMD_RSV		(7 << 16 | 4)
-#define	AMD_IOMMU_ACPI_IVMD_EXCL_RANGE	(3 << 16 | 3)
-#define	AMD_IOMMU_ACPI_IVMD_IW		(2 << 16 | 2)
-#define	AMD_IOMMU_ACPI_IVMD_IR		(1 << 16 | 1)
-#define	AMD_IOMMU_ACPI_IVMD_UNITY	(0 << 16 | 0)
-
-#define	AMD_IOMMU_ACPI_INFO_HASH_SZ	(256)
-
-/*
- * Deventry special device "variety"
- */
-#define	AMD_IOMMU_ACPI_SPECIAL_APIC	0x1
-#define	AMD_IOMMU_ACPI_SPECIAL_HPET	0x2
-
-typedef enum {
-	DEVENTRY_INVALID = 0,
-	DEVENTRY_ALL = 1,
-	DEVENTRY_SELECT,
-	DEVENTRY_RANGE,
-	DEVENTRY_RANGE_END,
-	DEVENTRY_ALIAS_SELECT,
-	DEVENTRY_ALIAS_RANGE,
-	DEVENTRY_EXTENDED_SELECT,
-	DEVENTRY_EXTENDED_RANGE,
-	DEVENTRY_SPECIAL_DEVICE
-} ivhd_deventry_type_t;
-
-typedef enum {
-	IVMD_DEVICE_INVALID = 0,
-	IVMD_DEVICEID_ALL,
-	IVMD_DEVICEID_SELECT,
-	IVMD_DEVICEID_RANGE
-} ivmd_deviceid_type_t;
-
-typedef struct ivhd_deventry {
-	uint8_t idev_len;
-	ivhd_deventry_type_t  idev_type;
-	int32_t idev_deviceid;
-	int32_t idev_src_deviceid;
-	uint8_t idev_handle;
-	uint8_t idev_variety;
-	uint8_t idev_Lint1Pass;
-	uint8_t idev_Lint0Pass;
-	uint8_t idev_SysMgt;
-	uint8_t idev_NMIPass;
-	uint8_t idev_ExtIntPass;
-	uint8_t idev_INITPass;
-	uint8_t idev_AtsDisabled;
-	struct ivhd_deventry *idev_next;
-} ivhd_deventry_t;
-
-typedef struct ivhd {
-	uint8_t ivhd_type;
-	uint8_t ivhd_flags;
-	uint16_t ivhd_len;
-	uint16_t ivhd_deviceid;
-	uint16_t ivhd_cap_off;
-	uint64_t ivhd_reg_base;
-	uint16_t ivhd_pci_seg;
-	uint16_t ivhd_iommu_info;
-	uint32_t ivhd_resv;
-} ivhd_t;
-
-typedef struct ivhd_container {
-	ivhd_t *ivhdc_ivhd;
-	ivhd_deventry_t *ivhdc_first_deventry;
-	ivhd_deventry_t *ivhdc_last_deventry;
-	struct ivhd_container *ivhdc_next;
-} ivhd_container_t;
-
-typedef struct ivmd {
-	uint8_t ivmd_type;
-	uint8_t ivmd_flags;
-	uint16_t ivmd_len;
-	uint16_t ivmd_deviceid;
-	uint16_t ivmd_auxdata;
-	uint64_t ivmd_resv;
-	uint64_t ivmd_phys_start;
-	uint64_t ivmd_phys_len;
-} ivmd_t;
-
-typedef struct ivmd_container {
-	ivmd_t *ivmdc_ivmd;
-	struct ivmd_container *ivmdc_next;
-} ivmd_container_t;
-
-typedef struct ivrs {
-	struct acpi_table_header ivrs_hdr;
-	uint32_t ivrs_ivinfo;
-	uint64_t ivrs_resv;
-} ivrs_t;
-
-typedef struct amd_iommu_acpi {
-	struct ivrs *acp_ivrs;
-	ivhd_container_t *acp_first_ivhdc;
-	ivhd_container_t *acp_last_ivhdc;
-	ivmd_container_t *acp_first_ivmdc;
-	ivmd_container_t *acp_last_ivmdc;
-} amd_iommu_acpi_t;
-
-
-/* Global IVINFo fields */
-typedef struct amd_iommu_acpi_global {
-	uint8_t acg_HtAtsResv;
-	uint8_t acg_VAsize;
-	uint8_t acg_PAsize;
-} amd_iommu_acpi_global_t;
-
-typedef struct amd_iommu_acpi_ivhd {
-	int32_t ach_deviceid_start;
-	int32_t ach_deviceid_end;
-
-	/* IVHD deventry type */
-	ivhd_deventry_type_t ach_dev_type;
-
-	/* IVHD flag fields */
-	uint8_t ach_IotlbSup;
-	uint8_t ach_Isoc;
-	uint8_t ach_ResPassPW;
-	uint8_t ach_PassPW;
-	uint8_t ach_HtTunEn;
-
-	/* IVHD fields */
-	uint16_t ach_IOMMU_deviceid;
-	uint16_t ach_IOMMU_cap_off;
-	uint64_t ach_IOMMU_reg_base;
-	uint16_t ach_IOMMU_pci_seg;
-
-	/* IVHD IOMMU info fields */
-	uint8_t ach_IOMMU_UnitID;
-	uint8_t ach_IOMMU_MSInum;
-
-	/* IVHD deventry data settings */
-	uint8_t ach_Lint1Pass;
-	uint8_t ach_Lint0Pass;
-	uint8_t ach_SysMgt;
-	uint8_t ach_NMIPass;
-	uint8_t ach_ExtIntPass;
-	uint8_t ach_INITPass;
-
-	/* alias */
-	int32_t ach_src_deviceid;
-
-	/* IVHD deventry extended data settings */
-	uint8_t ach_AtsDisabled;
-
-	/* IVHD deventry special device */
-	uint8_t ach_special_handle;
-	uint8_t ach_special_variety;
-
-	struct amd_iommu_acpi_ivhd *ach_next;
-} amd_iommu_acpi_ivhd_t;
-
-typedef struct amd_iommu_acpi_ivmd {
-	int32_t acm_deviceid_start;
-	int32_t acm_deviceid_end;
-
-	/* IVMD type */
-	ivmd_deviceid_type_t acm_dev_type;
-
-	/* IVMD flags */
-	uint8_t acm_ExclRange;
-	uint8_t acm_IW;
-	uint8_t acm_IR;
-	uint8_t acm_Unity;
-
-	/* IVMD mem block */
-	uint64_t acm_ivmd_phys_start;
-	uint64_t acm_ivmd_phys_len;
-
-	struct amd_iommu_acpi_ivmd *acm_next;
-} amd_iommu_acpi_ivmd_t;
-
-typedef union {
-	uint16_t   ent16;
-	uint8_t	   ent8[2];
-} align_16_t;
-
-typedef union {
-	uint32_t   ent32;
-	uint8_t	   ent8[4];
-} align_32_t;
-
-typedef union {
-	ivhd_t *ivhdp;
-	char   *cp;
-} align_ivhd_t;
-
-typedef union {
-	ivmd_t *ivmdp;
-	char   *cp;
-} align_ivmd_t;
-
-#pragma pack()
-
-int amd_iommu_acpi_init(void);
-void amd_iommu_acpi_fini(void);
-amd_iommu_acpi_ivhd_t *amd_iommu_lookup_all_ivhd(void);
-amd_iommu_acpi_ivmd_t *amd_iommu_lookup_all_ivmd(void);
-amd_iommu_acpi_ivhd_t *amd_iommu_lookup_any_ivhd(void);
-amd_iommu_acpi_ivmd_t *amd_iommu_lookup_any_ivmd(void);
-amd_iommu_acpi_global_t *amd_iommu_lookup_acpi_global(void);
-amd_iommu_acpi_ivhd_t *amd_iommu_lookup_ivhd(int32_t deviceid);
-amd_iommu_acpi_ivmd_t *amd_iommu_lookup_ivmd(int32_t deviceid);
-
-#endif /* _KERNEL */
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif	/* _AMD_IOMMU_ACPI_H */
--- a/usr/src/uts/intel/io/amd_iommu/amd_iommu_cmd.c	Wed Sep 16 22:03:43 2009 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,321 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License (the "License").
- * You may not use this file except in compliance with the License.
- *
- * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
- * or http://www.opensolaris.org/os/licensing.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information: Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- */
-
-/*
- * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
- * Use is subject to license terms.
- */
-
-#include <sys/sunddi.h>
-#include <sys/amd_iommu.h>
-#include "amd_iommu_impl.h"
-
-extern int servicing_interrupt(void);
-
-static void
-amd_iommu_wait_for_completion(amd_iommu_t *iommu)
-{
-	ASSERT(MUTEX_HELD(&iommu->aiomt_cmdlock));
-	while (AMD_IOMMU_REG_GET64(REGADDR64(
-	    iommu->aiomt_reg_status_va), AMD_IOMMU_COMWAIT_INT) != 1) {
-		AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
-		    AMD_IOMMU_CMDBUF_ENABLE, 1);
-		WAIT_SEC(1);
-	}
-}
-
-static int
-create_compl_wait_cmd(amd_iommu_t *iommu, amd_iommu_cmdargs_t *cmdargsp,
-    amd_iommu_cmd_flags_t flags, uint32_t *cmdptr)
-{
-	const char *driver = ddi_driver_name(iommu->aiomt_dip);
-	int instance = ddi_get_instance(iommu->aiomt_dip);
-	const char *f = "create_compl_wait_cmd";
-
-	ASSERT(cmdargsp == NULL);
-
-	if (flags & AMD_IOMMU_CMD_FLAGS_COMPL_WAIT_S) {
-		cmn_err(CE_WARN, "%s: %s%d: idx=%d: 'store' completion "
-		    "not supported for completion wait command",
-		    f, driver, instance, iommu->aiomt_idx);
-		return (DDI_FAILURE);
-	}
-
-	AMD_IOMMU_REG_SET32(&cmdptr[0], AMD_IOMMU_CMD_COMPL_WAIT_S, 0);
-	AMD_IOMMU_REG_SET32(&cmdptr[0], AMD_IOMMU_CMD_COMPL_WAIT_I, 1);
-	AMD_IOMMU_REG_SET32(&cmdptr[0], AMD_IOMMU_CMD_COMPL_WAIT_F,
-	    (flags & AMD_IOMMU_CMD_FLAGS_COMPL_WAIT_F) != 0);
-	AMD_IOMMU_REG_SET32(&cmdptr[0], AMD_IOMMU_CMD_COMPL_WAIT_STORE_ADDR_LO,
-	    0);
-	AMD_IOMMU_REG_SET32(&cmdptr[1], AMD_IOMMU_CMD_OPCODE, 0x01);
-	AMD_IOMMU_REG_SET32(&cmdptr[1], AMD_IOMMU_CMD_COMPL_WAIT_STORE_ADDR_HI,
-	    0);
-	cmdptr[2] = 0;
-	cmdptr[3] = 0;
-
-	return (DDI_SUCCESS);
-}
-
-static int
-create_inval_devtab_entry_cmd(amd_iommu_t *iommu, amd_iommu_cmdargs_t *cmdargsp,
-    amd_iommu_cmd_flags_t flags, uint32_t *cmdptr)
-{
-	const char *driver = ddi_driver_name(iommu->aiomt_dip);
-	int instance = ddi_get_instance(iommu->aiomt_dip);
-	const char *f = "create_inval_devtab_entry_cmd";
-	uint16_t deviceid;
-
-	ASSERT(cmdargsp);
-
-	if (flags != AMD_IOMMU_CMD_FLAGS_NONE) {
-		cmn_err(CE_WARN, "%s: %s%d: idx=%d: invalidate devtab entry "
-		    "no flags supported", f, driver, instance,
-		    iommu->aiomt_idx);
-		return (DDI_FAILURE);
-	}
-
-	deviceid = cmdargsp->ca_deviceid;
-
-	AMD_IOMMU_REG_SET32(&cmdptr[0], AMD_IOMMU_CMD_INVAL_DEVTAB_DEVICEID,
-	    deviceid);
-	AMD_IOMMU_REG_SET32(&cmdptr[1], AMD_IOMMU_CMD_OPCODE, 0x02);
-	cmdptr[2] = 0;
-	cmdptr[3] = 0;
-
-	return (DDI_SUCCESS);
-}
-
-/*ARGSUSED*/
-static int
-create_inval_iommu_pages_cmd(amd_iommu_t *iommu, amd_iommu_cmdargs_t *cmdargsp,
-    amd_iommu_cmd_flags_t flags, uint32_t *cmdptr)
-{
-	uint32_t addr_lo;
-	uint32_t addr_hi;
-
-	ASSERT(cmdargsp);
-
-	addr_lo = AMD_IOMMU_REG_GET64(REGADDR64(&cmdargsp->ca_addr),
-	    AMD_IOMMU_CMD_INVAL_PAGES_ADDR_LO);
-	addr_hi = AMD_IOMMU_REG_GET64(REGADDR64(&cmdargsp->ca_addr),
-	    AMD_IOMMU_CMD_INVAL_PAGES_ADDR_HI);
-
-	cmdptr[0] = 0;
-	AMD_IOMMU_REG_SET32(&cmdptr[1], AMD_IOMMU_CMD_INVAL_PAGES_DOMAINID,
-	    cmdargsp->ca_domainid);
-	AMD_IOMMU_REG_SET32(&cmdptr[1], AMD_IOMMU_CMD_OPCODE, 0x03);
-	AMD_IOMMU_REG_SET32(&cmdptr[2], AMD_IOMMU_CMD_INVAL_PAGES_PDE,
-	    (flags & AMD_IOMMU_CMD_FLAGS_PAGE_PDE_INVAL) != 0);
-	AMD_IOMMU_REG_SET32(&cmdptr[2], AMD_IOMMU_CMD_INVAL_PAGES_S,
-	    (flags & AMD_IOMMU_CMD_FLAGS_PAGE_INVAL_S) != 0);
-	AMD_IOMMU_REG_SET32(&cmdptr[2], AMD_IOMMU_CMD_INVAL_PAGES_ADDR_LO,
-	    addr_lo);
-	cmdptr[3] = addr_hi;
-
-	return (DDI_SUCCESS);
-
-}
-
-/*ARGSUSED*/
-static int
-create_inval_iotlb_pages_cmd(amd_iommu_t *iommu, amd_iommu_cmdargs_t *cmdargsp,
-    amd_iommu_cmd_flags_t flags, uint32_t *cmdptr)
-{
-	uint32_t addr_lo;
-	uint32_t addr_hi;
-
-	ASSERT(cmdargsp);
-
-	addr_lo = AMD_IOMMU_REG_GET64(REGADDR64(&cmdargsp->ca_addr),
-	    AMD_IOMMU_CMD_INVAL_IOTLB_ADDR_LO);
-
-	addr_hi = AMD_IOMMU_REG_GET64(REGADDR64(&cmdargsp->ca_addr),
-	    AMD_IOMMU_CMD_INVAL_IOTLB_ADDR_HI);
-
-	AMD_IOMMU_REG_SET32(&cmdptr[0], AMD_IOMMU_CMD_INVAL_IOTLB_DEVICEID,
-	    cmdargsp->ca_deviceid);
-	AMD_IOMMU_REG_SET32(&cmdptr[0], AMD_IOMMU_CMD_INVAL_IOTLB_MAXPEND,
-	    AMD_IOMMU_DEFAULT_MAXPEND);
-	AMD_IOMMU_REG_SET32(&cmdptr[1], AMD_IOMMU_CMD_OPCODE, 0x04);
-	AMD_IOMMU_REG_SET32(&cmdptr[1], AMD_IOMMU_CMD_INVAL_IOTLB_QUEUEID,
-	    cmdargsp->ca_deviceid);
-	AMD_IOMMU_REG_SET32(&cmdptr[2], AMD_IOMMU_CMD_INVAL_IOTLB_ADDR_LO,
-	    addr_lo);
-	AMD_IOMMU_REG_SET32(&cmdptr[2], AMD_IOMMU_CMD_INVAL_IOTLB_S,
-	    (flags & AMD_IOMMU_CMD_FLAGS_IOTLB_INVAL_S) != 0);
-	cmdptr[3] = addr_hi;
-
-	return (DDI_SUCCESS);
-}
-
-static int
-create_inval_intr_table_cmd(amd_iommu_t *iommu, amd_iommu_cmdargs_t *cmdargsp,
-    amd_iommu_cmd_flags_t flags, uint32_t *cmdptr)
-{
-	const char *driver = ddi_driver_name(iommu->aiomt_dip);
-	int instance = ddi_get_instance(iommu->aiomt_dip);
-	const char *f = "create_inval_intr_table_cmd";
-
-	ASSERT(cmdargsp);
-
-	if (flags != AMD_IOMMU_CMD_FLAGS_NONE) {
-		cmn_err(CE_WARN, "%s: %s%d: idx=%d: flags not supported "
-		    "for invalidate interrupt table command",
-		    f, driver, instance, iommu->aiomt_idx);
-		return (DDI_FAILURE);
-	}
-
-	AMD_IOMMU_REG_SET32(&cmdptr[0], AMD_IOMMU_CMD_INVAL_INTR_DEVICEID,
-	    cmdargsp->ca_deviceid);
-	AMD_IOMMU_REG_SET32(&cmdptr[1], AMD_IOMMU_CMD_OPCODE, 0x05);
-	cmdptr[2] = 0;
-	cmdptr[3] = 0;
-
-	return (DDI_SUCCESS);
-}
-
-int
-amd_iommu_cmd(amd_iommu_t *iommu, amd_iommu_cmd_t cmd,
-    amd_iommu_cmdargs_t *cmdargs, amd_iommu_cmd_flags_t flags, int lock_held)
-{
-	int error;
-	int i;
-	uint32_t cmdptr[4] = {0};
-	const char *driver = ddi_driver_name(iommu->aiomt_dip);
-	int instance = ddi_get_instance(iommu->aiomt_dip);
-	uint64_t cmdhead_off;
-	uint64_t cmdtail_off;
-	const char *f = "amd_iommu_cmd";
-
-	ASSERT(lock_held == 0 || lock_held == 1);
-	ASSERT(lock_held == 0 || MUTEX_HELD(&iommu->aiomt_cmdlock));
-
-	if (!lock_held)
-		mutex_enter(&iommu->aiomt_cmdlock);
-
-	/*
-	 * Prepare the command
-	 */
-	switch (cmd) {
-	case AMD_IOMMU_CMD_COMPL_WAIT:
-		if (flags & AMD_IOMMU_CMD_FLAGS_COMPL_WAIT) {
-			cmn_err(CE_WARN, "%s: %s%d: idx=%d: No completion wait "
-			    " after completion wait command",
-			    f, driver, instance, iommu->aiomt_idx);
-			error = DDI_FAILURE;
-			goto out;
-		}
-		error = create_compl_wait_cmd(iommu, cmdargs, flags, cmdptr);
-		break;
-	case AMD_IOMMU_CMD_INVAL_DEVTAB_ENTRY:
-		error = create_inval_devtab_entry_cmd(iommu, cmdargs,
-		    flags & ~AMD_IOMMU_CMD_FLAGS_COMPL_WAIT, cmdptr);
-		break;
-	case AMD_IOMMU_CMD_INVAL_IOMMU_PAGES:
-		error = create_inval_iommu_pages_cmd(iommu, cmdargs,
-		    flags & ~AMD_IOMMU_CMD_FLAGS_COMPL_WAIT, cmdptr);
-		break;
-	case AMD_IOMMU_CMD_INVAL_IOTLB_PAGES:
-		error = create_inval_iotlb_pages_cmd(iommu, cmdargs,
-		    flags & ~AMD_IOMMU_CMD_FLAGS_COMPL_WAIT, cmdptr);
-		break;
-	case AMD_IOMMU_CMD_INVAL_INTR_TABLE:
-		error = create_inval_intr_table_cmd(iommu, cmdargs,
-		    flags & ~AMD_IOMMU_CMD_FLAGS_COMPL_WAIT, cmdptr);
-		break;
-	default:
-		cmn_err(CE_WARN, "%s: %s%d: idx=%d: Unsupported cmd: %d",
-		    f, driver, instance, iommu->aiomt_idx, cmd);
-		error = DDI_FAILURE;
-		goto out;
-	}
-
-	if (error != DDI_SUCCESS) {
-		error = DDI_FAILURE;
-		goto out;
-	}
-
-	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
-	    AMD_IOMMU_CMDBUF_ENABLE, 1);
-
-	ASSERT(iommu->aiomt_cmd_tail != NULL);
-
-	for (i = 0; i < 4; i++) {
-		iommu->aiomt_cmd_tail[i] = cmdptr[i];
-	}
-
-wait_for_drain:
-	cmdhead_off = AMD_IOMMU_REG_GET64(
-	    REGADDR64(iommu->aiomt_reg_cmdbuf_head_va),
-	    AMD_IOMMU_CMDHEADPTR);
-
-	cmdhead_off = CMD2OFF(cmdhead_off);
-
-	ASSERT(cmdhead_off < iommu->aiomt_cmdbuf_sz);
-
-	/* check for overflow */
-	if ((caddr_t)iommu->aiomt_cmd_tail <
-	    (cmdhead_off + iommu->aiomt_cmdbuf)) {
-		if ((caddr_t)iommu->aiomt_cmd_tail + 16 >=
-		    (cmdhead_off + iommu->aiomt_cmdbuf))
-#ifdef DEBUG
-			cmn_err(CE_WARN, "cmdbuffer overflow: waiting for "
-			    "drain");
-#endif
-			goto wait_for_drain;
-	}
-
-	SYNC_FORDEV(iommu->aiomt_dmahdl);
-
-	/*
-	 * Update the tail pointer in soft state
-	 * and the tail pointer register
-	 */
-	iommu->aiomt_cmd_tail += 4;
-	if ((caddr_t)iommu->aiomt_cmd_tail >= (iommu->aiomt_cmdbuf
-	    + iommu->aiomt_cmdbuf_sz)) {
-		/* wraparound */
-		/*LINTED*/
-		iommu->aiomt_cmd_tail = (uint32_t *)iommu->aiomt_cmdbuf;
-		cmdtail_off = 0;
-	} else {
-		cmdtail_off = (caddr_t)iommu->aiomt_cmd_tail
-		/*LINTED*/
-		    - iommu->aiomt_cmdbuf;
-	}
-
-	ASSERT(cmdtail_off < iommu->aiomt_cmdbuf_sz);
-
-	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_cmdbuf_tail_va),
-	    AMD_IOMMU_CMDTAILPTR, OFF2CMD(cmdtail_off));
-
-	if (cmd == AMD_IOMMU_CMD_COMPL_WAIT) {
-		amd_iommu_wait_for_completion(iommu);
-	} else if (flags & AMD_IOMMU_CMD_FLAGS_COMPL_WAIT) {
-		error = amd_iommu_cmd(iommu, AMD_IOMMU_CMD_COMPL_WAIT,
-		    NULL, 0, 1);
-	}
-
-out:
-	if (!lock_held)
-		mutex_exit(&iommu->aiomt_cmdlock);
-	return (error);
-}
--- a/usr/src/uts/intel/io/amd_iommu/amd_iommu_impl.c	Wed Sep 16 22:03:43 2009 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,1880 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License (the "License").
- * You may not use this file except in compliance with the License.
- *
- * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
- * or http://www.opensolaris.org/os/licensing.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information: Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- */
-
-/*
- * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
- * Use is subject to license terms.
- */
-
-#include <sys/sunddi.h>
-#include <sys/iommulib.h>
-#include <sys/amd_iommu.h>
-#include <sys/pci_cap.h>
-#include <sys/bootconf.h>
-#include <sys/ddidmareq.h>
-
-#include "amd_iommu_impl.h"
-#include "amd_iommu_acpi.h"
-#include "amd_iommu_page_tables.h"
-
-static int amd_iommu_fini(amd_iommu_t *iommu);
-static void amd_iommu_teardown_interrupts(amd_iommu_t *iommu);
-static void amd_iommu_stop(amd_iommu_t *iommu);
-
-static int amd_iommu_probe(iommulib_handle_t handle, dev_info_t *rdip);
-static int amd_iommu_allochdl(iommulib_handle_t handle,
-    dev_info_t *dip, dev_info_t *rdip, ddi_dma_attr_t *attr,
-    int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *dma_handlep);
-static int amd_iommu_freehdl(iommulib_handle_t handle,
-    dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t dma_handle);
-static int amd_iommu_bindhdl(iommulib_handle_t handle, dev_info_t *dip,
-    dev_info_t *rdip, ddi_dma_handle_t dma_handle,
-    struct ddi_dma_req *dmareq, ddi_dma_cookie_t *cookiep,
-    uint_t *ccountp);
-static int amd_iommu_unbindhdl(iommulib_handle_t handle,
-    dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t dma_handle);
-static int amd_iommu_sync(iommulib_handle_t handle, dev_info_t *dip,
-    dev_info_t *rdip, ddi_dma_handle_t dma_handle, off_t off,
-    size_t len, uint_t cache_flags);
-static int amd_iommu_win(iommulib_handle_t handle, dev_info_t *dip,
-    dev_info_t *rdip, ddi_dma_handle_t dma_handle, uint_t win,
-    off_t *offp, size_t *lenp, ddi_dma_cookie_t *cookiep,
-    uint_t *ccountp);
-static int amd_iommu_map(iommulib_handle_t handle, dev_info_t *dip,
-    dev_info_t *rdip, struct ddi_dma_req *dmareq,
-    ddi_dma_handle_t *dma_handle);
-static int amd_iommu_mctl(iommulib_handle_t handle, dev_info_t *dip,
-    dev_info_t *rdip, ddi_dma_handle_t dma_handle,
-    enum ddi_dma_ctlops request, off_t *offp, size_t *lenp,
-    caddr_t *objpp, uint_t cache_flags);
-
-static int unmap_current_window(amd_iommu_t *iommu, dev_info_t *rdip,
-    ddi_dma_cookie_t *cookie_array, uint_t ccount, int ncookies, int locked);
-
-extern void *device_arena_alloc(size_t size, int vm_flag);
-extern void device_arena_free(void * vaddr, size_t size);
-
-ddi_dma_attr_t amd_iommu_dma_attr = {
-	DMA_ATTR_V0,
-	0U,				/* dma_attr_addr_lo */
-	0xffffffffffffffffULL,		/* dma_attr_addr_hi */
-	0xffffffffU,			/* dma_attr_count_max */
-	(uint64_t)4096,			/* dma_attr_align */
-	1,				/* dma_attr_burstsizes */
-	64,				/* dma_attr_minxfer */
-	0xffffffffU,			/* dma_attr_maxxfer */
-	0xffffffffU,			/* dma_attr_seg */
-	1,				/* dma_attr_sgllen, variable */
-	64,				/* dma_attr_granular */
-	0				/* dma_attr_flags */
-};
-
-ddi_device_acc_attr_t amd_iommu_devacc = {
-	DDI_DEVICE_ATTR_V0,
-	DDI_NEVERSWAP_ACC,
-	DDI_STRICTORDER_ACC
-};
-
-struct iommulib_ops amd_iommulib_ops = {
-	IOMMU_OPS_VERSION,
-	AMD_IOMMU,
-	"AMD IOMMU Vers. 1",
-	NULL,
-	amd_iommu_probe,
-	amd_iommu_allochdl,
-	amd_iommu_freehdl,
-	amd_iommu_bindhdl,
-	amd_iommu_unbindhdl,
-	amd_iommu_sync,
-	amd_iommu_win,
-	amd_iommu_map,
-	amd_iommu_mctl
-};
-
-static kmutex_t amd_iommu_pgtable_lock;
-
-static int
-amd_iommu_register(amd_iommu_t *iommu)
-{
-	dev_info_t *dip = iommu->aiomt_dip;
-	const char *driver = ddi_driver_name(dip);
-	int instance = ddi_get_instance(dip);
-	iommulib_ops_t *iommulib_ops;
-	iommulib_handle_t handle;
-	const char *f = "amd_iommu_register";
-
-	iommulib_ops = kmem_zalloc(sizeof (iommulib_ops_t), KM_SLEEP);
-
-	*iommulib_ops = amd_iommulib_ops;
-
-	iommulib_ops->ilops_data = (void *)iommu;
-	iommu->aiomt_iommulib_ops = iommulib_ops;
-
-	if (iommulib_iommu_register(dip, iommulib_ops, &handle)
-	    != DDI_SUCCESS) {
-		cmn_err(CE_WARN, "%s: %s%d: Register with iommulib "
-		    "failed idx=%d", f, driver, instance, iommu->aiomt_idx);
-		kmem_free(iommulib_ops, sizeof (iommulib_ops_t));
-		return (DDI_FAILURE);
-	}
-
-	iommu->aiomt_iommulib_handle = handle;
-
-	return (DDI_SUCCESS);
-}
-
-static int
-amd_iommu_unregister(amd_iommu_t *iommu)
-{
-	if (iommu->aiomt_iommulib_handle == NULL) {
-		/* we never registered */
-		return (DDI_SUCCESS);
-	}
-
-	if (iommulib_iommu_unregister(iommu->aiomt_iommulib_handle)
-	    != DDI_SUCCESS) {
-		return (DDI_FAILURE);
-	}
-
-	kmem_free(iommu->aiomt_iommulib_ops, sizeof (iommulib_ops_t));
-	iommu->aiomt_iommulib_ops = NULL;
-	iommu->aiomt_iommulib_handle = NULL;
-
-	return (DDI_SUCCESS);
-}
-
-static int
-amd_iommu_setup_passthru(amd_iommu_t *iommu)
-{
-	gfx_entry_t *gfxp;
-	dev_info_t *dip;
-
-	/*
-	 * Setup passthru mapping for "special" devices
-	 */
-	amd_iommu_set_passthru(iommu, NULL);
-
-	for (gfxp = gfx_devinfo_list; gfxp; gfxp = gfxp->g_next) {
-		gfxp->g_ref++;
-		dip = gfxp->g_dip;
-		if (dip) {
-			amd_iommu_set_passthru(iommu, dip);
-		}
-		gfxp->g_ref--;
-	}
-
-	return (DDI_SUCCESS);
-}
-
-static int
-amd_iommu_start(amd_iommu_t *iommu)
-{
-	dev_info_t *dip = iommu->aiomt_dip;
-	int instance = ddi_get_instance(dip);
-	const char *driver = ddi_driver_name(dip);
-	amd_iommu_acpi_ivhd_t *hinfop;
-	const char *f = "amd_iommu_start";
-
-	hinfop = amd_iommu_lookup_all_ivhd();
-
-	/*
-	 * Disable HT tunnel translation.
-	 * XXX use ACPI
-	 */
-	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
-	    AMD_IOMMU_HT_TUN_ENABLE, 0);
-
-	if (hinfop) {
-		if (amd_iommu_debug) {
-			cmn_err(CE_NOTE,
-			    "amd_iommu: using ACPI for CTRL registers");
-		}
-		AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
-		    AMD_IOMMU_ISOC, hinfop->ach_Isoc);
-		AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
-		    AMD_IOMMU_RESPASSPW, hinfop->ach_ResPassPW);
-		AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
-		    AMD_IOMMU_PASSPW, hinfop->ach_PassPW);
-	}
-
-	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
-	    AMD_IOMMU_INVTO, 5);
-
-
-	/*
-	 * The Device table entry bit 0 (V) controls whether the device
-	 * table entry is valid for address translation and Device table
-	 * entry bit 128 (IV) controls whether interrupt remapping is valid.
-	 * By setting both to zero we are essentially doing pass-thru. Since
-	 * this table is zeroed on allocation, essentially we will have
-	 * pass-thru when IOMMU is enabled.
-	 */
-
-	/* Finally enable the IOMMU ... */
-	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
-	    AMD_IOMMU_ENABLE, 1);
-
-	if (amd_iommu_debug) {
-		cmn_err(CE_NOTE, "%s: %s%d: AMD IOMMU idx=%d. "
-		    "Successfully started AMD IOMMU", f, driver, instance,
-		    iommu->aiomt_idx);
-	}
-	cmn_err(CE_NOTE, "AMD IOMMU (%d,%d) enabled",
-	    instance, iommu->aiomt_idx);
-
-	return (DDI_SUCCESS);
-}
-
-static void
-amd_iommu_stop(amd_iommu_t *iommu)
-{
-	dev_info_t *dip = iommu->aiomt_dip;
-	int instance = ddi_get_instance(dip);
-	const char *driver = ddi_driver_name(dip);
-	const char *f = "amd_iommu_stop";
-
-	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
-	    AMD_IOMMU_ENABLE, 0);
-	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
-	    AMD_IOMMU_EVENTINT_ENABLE, 0);
-	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
-	    AMD_IOMMU_COMWAITINT_ENABLE, 0);
-	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
-	    AMD_IOMMU_EVENTLOG_ENABLE, 0);
-
-	/*
-	 * Disable translation on HT tunnel traffic
-	 */
-	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
-	    AMD_IOMMU_HT_TUN_ENABLE, 0);
-	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
-	    AMD_IOMMU_CMDBUF_ENABLE, 0);
-
-	cmn_err(CE_NOTE, "%s: %s%d: AMD IOMMYU idx=%d. "
-	    "Successfully stopped AMD IOMMU", f, driver, instance,
-	    iommu->aiomt_idx);
-}
-
-static int
-amd_iommu_setup_tables_and_buffers(amd_iommu_t *iommu)
-{
-	dev_info_t *dip = iommu->aiomt_dip;
-	int instance = ddi_get_instance(dip);
-	const char *driver = ddi_driver_name(dip);
-	uint32_t dma_bufsz;
-	caddr_t addr;
-	uint32_t sz;
-	uint32_t p2sz;
-	int i;
-	uint64_t *dentry;
-	int err;
-	const char *f = "amd_iommu_setup_tables_and_buffers";
-
-	/*
-	 * We will put the Device Table, Command Buffer and
-	 * Event Log in contiguous memory. Allocate the maximum
-	 * size allowed for such structures
-	 * Device Table:  256b * 64K = 32B * 64K
-	 * Command Buffer: 128b * 32K = 16B * 32K
-	 * Event Log:  128b * 32K = 16B * 32K
-	 */
-	iommu->aiomt_devtbl_sz = (1<<AMD_IOMMU_DEVTBL_SZ) * AMD_IOMMU_DEVENT_SZ;
-	iommu->aiomt_cmdbuf_sz = (1<<AMD_IOMMU_CMDBUF_SZ) * AMD_IOMMU_CMD_SZ;
-	iommu->aiomt_eventlog_sz =
-	    (1<<AMD_IOMMU_EVENTLOG_SZ) * AMD_IOMMU_EVENT_SZ;
-
-	dma_bufsz = iommu->aiomt_devtbl_sz + iommu->aiomt_cmdbuf_sz
-	    + iommu->aiomt_eventlog_sz;
-
-	/*
-	 * Alloc a DMA handle.
-	 */
-	err = ddi_dma_alloc_handle(dip, &amd_iommu_dma_attr,
-	    DDI_DMA_SLEEP, NULL, &iommu->aiomt_dmahdl);
-	if (err != DDI_SUCCESS) {
-		cmn_err(CE_WARN, "%s: %s%d: Cannot alloc DMA handle for "
-		    "AMD IOMMU tables and buffers", f, driver, instance);
-		return (DDI_FAILURE);
-	}
-
-	/*
-	 * Alloc memory for tables and buffers
-	 * XXX remove cast to size_t
-	 */
-	err = ddi_dma_mem_alloc(iommu->aiomt_dmahdl, dma_bufsz,
-	    &amd_iommu_devacc, DDI_DMA_CONSISTENT|IOMEM_DATA_UNCACHED,
-	    DDI_DMA_SLEEP,  NULL, (caddr_t *)&iommu->aiomt_dma_bufva,
-	    (size_t *)&iommu->aiomt_dma_mem_realsz, &iommu->aiomt_dma_mem_hdl);
-	if (err != DDI_SUCCESS) {
-		cmn_err(CE_WARN, "%s: %s%d: Cannot alloc memory for DMA "
-		    "to AMD IOMMU tables and buffers", f, driver, instance);
-		iommu->aiomt_dma_bufva = NULL;
-		iommu->aiomt_dma_mem_realsz = 0;
-		ddi_dma_free_handle(&iommu->aiomt_dmahdl);
-		iommu->aiomt_dmahdl = NULL;
-		return (DDI_FAILURE);
-	}
-
-	/*
-	 * The VA must be 4K aligned and >= table size
-	 */
-	ASSERT(((uintptr_t)iommu->aiomt_dma_bufva &
-	    AMD_IOMMU_TABLE_ALIGN) == 0);
-	ASSERT(iommu->aiomt_dma_mem_realsz >= dma_bufsz);
-
-	/*
-	 * Now bind the handle
-	 */
-	err = ddi_dma_addr_bind_handle(iommu->aiomt_dmahdl, NULL,
-	    iommu->aiomt_dma_bufva, iommu->aiomt_dma_mem_realsz,
-	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
-	    NULL, &iommu->aiomt_buf_dma_cookie, &iommu->aiomt_buf_dma_ncookie);
-	if (err != DDI_DMA_MAPPED) {
-		cmn_err(CE_WARN, "%s: %s%d: Cannot bind memory for DMA "
-		    "to AMD IOMMU tables and buffers. bufrealsz=%p",
-		    f, driver, instance,
-		    (void *)(uintptr_t)iommu->aiomt_dma_mem_realsz);
-		iommu->aiomt_buf_dma_cookie.dmac_laddress = 0;
-		iommu->aiomt_buf_dma_cookie.dmac_size = 0;
-		iommu->aiomt_buf_dma_cookie.dmac_type = 0;
-		iommu->aiomt_buf_dma_ncookie = 0;
-		ddi_dma_mem_free(&iommu->aiomt_dma_mem_hdl);
-		iommu->aiomt_dma_mem_hdl = NULL;
-		iommu->aiomt_dma_bufva = NULL;
-		iommu->aiomt_dma_mem_realsz = 0;
-		ddi_dma_free_handle(&iommu->aiomt_dmahdl);
-		iommu->aiomt_dmahdl = NULL;
-		return (DDI_FAILURE);
-	}
-
-	/*
-	 * We assume the DMA engine on the IOMMU is capable of handling the
-	 * whole table buffer in a single cookie. If not and multiple cookies
-	 * are needed we fail.
-	 */
-	if (iommu->aiomt_buf_dma_ncookie != 1) {
-		cmn_err(CE_WARN, "%s: %s%d: Cannot handle multiple "
-		    "cookies for DMA to AMD IOMMU tables and buffers. "
-		    "#cookies=%u", f, driver, instance,
-		    iommu->aiomt_buf_dma_ncookie);
-		(void) ddi_dma_unbind_handle(iommu->aiomt_dmahdl);
-		iommu->aiomt_buf_dma_cookie.dmac_laddress = 0;
-		iommu->aiomt_buf_dma_cookie.dmac_size = 0;
-		iommu->aiomt_buf_dma_cookie.dmac_type = 0;
-		iommu->aiomt_buf_dma_ncookie = 0;
-		ddi_dma_mem_free(&iommu->aiomt_dma_mem_hdl);
-		iommu->aiomt_dma_mem_hdl = NULL;
-		iommu->aiomt_dma_bufva = NULL;
-		iommu->aiomt_dma_mem_realsz = 0;
-		ddi_dma_free_handle(&iommu->aiomt_dmahdl);
-		iommu->aiomt_dmahdl = NULL;
-		return (DDI_FAILURE);
-	}
-
-	/*
-	 * The address in the cookie must be 4K aligned and >= table size
-	 */
-	ASSERT((iommu->aiomt_buf_dma_cookie.dmac_cookie_addr
-	    & AMD_IOMMU_TABLE_ALIGN) == 0);
-	ASSERT(iommu->aiomt_buf_dma_cookie.dmac_size
-	    <= iommu->aiomt_dma_mem_realsz);
-	ASSERT(iommu->aiomt_buf_dma_cookie.dmac_size >= dma_bufsz);
-
-	/*
-	 * Setup the device table pointers in the iommu struct as
-	 * well as the IOMMU device table register
-	 */
-	iommu->aiomt_devtbl = iommu->aiomt_dma_bufva;
-	bzero(iommu->aiomt_devtbl, iommu->aiomt_devtbl_sz);
-
-	/*
-	 * Set V=1 and TV = 0, so any inadvertant pass-thrus cause
-	 * page faults. Also set SE bit so we aren't swamped with
-	 * page fault messages
-	 */
-	for (i = 0; i <= AMD_IOMMU_MAX_DEVICEID; i++) {
-		/*LINTED*/
-		dentry = (uint64_t *)&iommu->aiomt_devtbl
-		    [i * AMD_IOMMU_DEVTBL_ENTRY_SZ];
-		AMD_IOMMU_REG_SET64(dentry, AMD_IOMMU_DEVTBL_V, 1);
-		AMD_IOMMU_REG_SET64(&(dentry[1]), AMD_IOMMU_DEVTBL_SE, 1);
-	}
-
-	addr = (caddr_t)(uintptr_t)iommu->aiomt_buf_dma_cookie.dmac_cookie_addr;
-	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_devtbl_va),
-	    AMD_IOMMU_DEVTABBASE, ((uint64_t)(uintptr_t)addr) >> 12);
-	sz = (iommu->aiomt_devtbl_sz >> 12) - 1;
-	ASSERT(sz <= ((1 << 9) - 1));
-	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_devtbl_va),
-	    AMD_IOMMU_DEVTABSIZE, sz);
-
-	/*
-	 * Setup the command buffer pointers
-	 */
-	iommu->aiomt_cmdbuf = iommu->aiomt_devtbl +
-	    iommu->aiomt_devtbl_sz;
-	bzero(iommu->aiomt_cmdbuf, iommu->aiomt_cmdbuf_sz);
-	addr += iommu->aiomt_devtbl_sz;
-	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_cmdbuf_va),
-	    AMD_IOMMU_COMBASE, ((uint64_t)(uintptr_t)addr) >> 12);
-
-	p2sz = AMD_IOMMU_CMDBUF_SZ;
-	ASSERT(p2sz >= AMD_IOMMU_CMDBUF_MINSZ &&
-	    p2sz <= AMD_IOMMU_CMDBUF_MAXSZ);
-	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_cmdbuf_va),
-	    AMD_IOMMU_COMLEN, p2sz);
-	/*LINTED*/
-	iommu->aiomt_cmd_tail = (uint32_t *)iommu->aiomt_cmdbuf;
-	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_cmdbuf_head_va),
-	    AMD_IOMMU_CMDHEADPTR, 0);
-	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_cmdbuf_tail_va),
-	    AMD_IOMMU_CMDTAILPTR, 0);
-
-	/*
-	 * Setup the event log pointers
-	 */
-	iommu->aiomt_eventlog = iommu->aiomt_cmdbuf +
-	    iommu->aiomt_eventlog_sz;
-	bzero(iommu->aiomt_eventlog, iommu->aiomt_eventlog_sz);
-	addr += iommu->aiomt_cmdbuf_sz;
-	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_eventlog_va),
-	    AMD_IOMMU_EVENTBASE, ((uint64_t)(uintptr_t)addr) >> 12);
-	p2sz = AMD_IOMMU_EVENTLOG_SZ;
-	ASSERT(p2sz >= AMD_IOMMU_EVENTLOG_MINSZ &&
-	    p2sz <= AMD_IOMMU_EVENTLOG_MAXSZ);
-	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_eventlog_va),
-	    AMD_IOMMU_EVENTLEN, sz);
-	/*LINTED*/
-	iommu->aiomt_event_head = (uint32_t *)iommu->aiomt_eventlog;
-	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_eventlog_head_va),
-	    AMD_IOMMU_EVENTHEADPTR, 0);
-	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_eventlog_tail_va),
-	    AMD_IOMMU_EVENTTAILPTR, 0);
-
-	/* dma sync so device sees this init */
-	SYNC_FORDEV(iommu->aiomt_dmahdl);
-
-	if (amd_iommu_debug & AMD_IOMMU_DEBUG_TABLES) {
-		cmn_err(CE_NOTE, "%s: %s%d: successfully setup AMD IOMMU "
-		    "tables, idx=%d", f, driver, instance, iommu->aiomt_idx);
-	}
-
-	return (DDI_SUCCESS);
-}
-
-static void
-amd_iommu_teardown_tables_and_buffers(amd_iommu_t *iommu)
-{
-	dev_info_t *dip = iommu->aiomt_dip;
-	int instance = ddi_get_instance(dip);
-	const char *driver = ddi_driver_name(dip);
-	const char *f = "amd_iommu_teardown_tables_and_buffers";
-
-	iommu->aiomt_eventlog = NULL;
-	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_eventlog_va),
-	    AMD_IOMMU_EVENTBASE, 0);
-	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_eventlog_va),
-	    AMD_IOMMU_EVENTLEN, 0);
-
-	iommu->aiomt_cmdbuf = NULL;
-	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_cmdbuf_va),
-	    AMD_IOMMU_COMBASE, 0);
-	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_cmdbuf_va),
-	    AMD_IOMMU_COMLEN, 0);
-
-	iommu->aiomt_devtbl = NULL;
-	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_devtbl_va),
-	    AMD_IOMMU_DEVTABBASE, 0);
-	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_devtbl_va),
-	    AMD_IOMMU_DEVTABSIZE, 0);
-
-	if (iommu->aiomt_dmahdl == NULL)
-		return;
-
-	/* Unbind the handle */
-	if (ddi_dma_unbind_handle(iommu->aiomt_dmahdl) != DDI_SUCCESS) {
-		cmn_err(CE_WARN, "%s: %s%d: failed to unbind handle: "
-		    "%p for IOMMU idx=%d", f, driver, instance,
-		    (void *)iommu->aiomt_dmahdl, iommu->aiomt_idx);
-	}
-	iommu->aiomt_buf_dma_cookie.dmac_laddress = 0;
-	iommu->aiomt_buf_dma_cookie.dmac_size = 0;
-	iommu->aiomt_buf_dma_cookie.dmac_type = 0;
-	iommu->aiomt_buf_dma_ncookie = 0;
-
-	/* Free the table memory allocated for DMA */
-	ddi_dma_mem_free(&iommu->aiomt_dma_mem_hdl);
-	iommu->aiomt_dma_mem_hdl = NULL;
-	iommu->aiomt_dma_bufva = NULL;
-	iommu->aiomt_dma_mem_realsz = 0;
-
-	/* Free the DMA handle */
-	ddi_dma_free_handle(&iommu->aiomt_dmahdl);
-	iommu->aiomt_dmahdl = NULL;
-}
-
-static void
-amd_iommu_enable_interrupts(amd_iommu_t *iommu)
-{
-	ASSERT(AMD_IOMMU_REG_GET64(REGADDR64(iommu->aiomt_reg_status_va),
-	    AMD_IOMMU_CMDBUF_RUN) == 0);
-	ASSERT(AMD_IOMMU_REG_GET64(REGADDR64(iommu->aiomt_reg_status_va),
-	    AMD_IOMMU_EVENT_LOG_RUN) == 0);
-
-	/* Must be set prior to enabling command buffer */
-	/* Must be set prior to enabling event logging */
-	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
-	    AMD_IOMMU_CMDBUF_ENABLE, 1);
-	/* No interrupts for completion wait  - too heavy weight. use polling */
-	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
-	    AMD_IOMMU_COMWAITINT_ENABLE, 0);
-	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
-	    AMD_IOMMU_EVENTLOG_ENABLE, 1);
-	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
-	    AMD_IOMMU_EVENTINT_ENABLE, 1);
-}
-
-static int
-amd_iommu_setup_exclusion(amd_iommu_t *iommu)
-{
-	amd_iommu_acpi_ivmd_t *minfop;
-
-	minfop = amd_iommu_lookup_all_ivmd();
-
-	if (minfop && minfop->acm_ExclRange == 1) {
-		cmn_err(CE_NOTE, "Programming exclusion range");
-		AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_excl_base_va),
-		    AMD_IOMMU_EXCL_BASE_ADDR,
-		    minfop->acm_ivmd_phys_start >> 12);
-		AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_excl_base_va),
-		    AMD_IOMMU_EXCL_BASE_ALLOW, 1);
-		AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_excl_base_va),
-		    AMD_IOMMU_EXCL_BASE_EXEN, 1);
-		AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_excl_lim_va),
-		    AMD_IOMMU_EXCL_LIM, (minfop->acm_ivmd_phys_start +
-		    minfop->acm_ivmd_phys_len) >> 12);
-	} else {
-		if (amd_iommu_debug) {
-			cmn_err(CE_NOTE, "Skipping exclusion range");
-		}
-		AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_excl_base_va),
-		    AMD_IOMMU_EXCL_BASE_ADDR, 0);
-		AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_excl_base_va),
-		    AMD_IOMMU_EXCL_BASE_ALLOW, 1);
-		AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_excl_base_va),
-		    AMD_IOMMU_EXCL_BASE_EXEN, 0);
-		AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_excl_lim_va),
-		    AMD_IOMMU_EXCL_LIM, 0);
-	}
-
-	return (DDI_SUCCESS);
-}
-
-static void
-amd_iommu_teardown_exclusion(amd_iommu_t *iommu)
-{
-	(void) amd_iommu_setup_exclusion(iommu);
-}
-
-static uint_t
-amd_iommu_intr_handler(caddr_t arg1, caddr_t arg2)
-{
-	/*LINTED*/
-	amd_iommu_t *iommu = (amd_iommu_t *)arg1;
-	dev_info_t *dip = iommu->aiomt_dip;
-	int instance = ddi_get_instance(dip);
-	const char *driver = ddi_driver_name(dip);
-	const char *f = "amd_iommu_intr_handler";
-
-	ASSERT(arg1);
-	ASSERT(arg2 == NULL);
-
-	if (amd_iommu_debug & AMD_IOMMU_DEBUG_INTR) {
-		cmn_err(CE_NOTE, "%s: %s%d: IOMMU unit idx=%d. In INTR handler",
-		    f, driver, instance, iommu->aiomt_idx);
-	}
-
-	if (AMD_IOMMU_REG_GET64(REGADDR64(iommu->aiomt_reg_status_va),
-	    AMD_IOMMU_EVENT_LOG_INT) == 1) {
-		if (amd_iommu_debug & AMD_IOMMU_DEBUG_INTR) {
-			cmn_err(CE_NOTE, "%s: %s%d: IOMMU unit idx=%d "
-			    "Event Log Interrupt", f, driver, instance,
-			    iommu->aiomt_idx);
-		}
-		(void) amd_iommu_read_log(iommu, AMD_IOMMU_LOG_DISPLAY);
-		WAIT_SEC(1);
-		AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_status_va),
-		    AMD_IOMMU_EVENT_LOG_INT, 1);
-		return (DDI_INTR_CLAIMED);
-	}
-
-	if (AMD_IOMMU_REG_GET64(REGADDR64(iommu->aiomt_reg_status_va),
-	    AMD_IOMMU_EVENT_OVERFLOW_INT) == 1) {
-		cmn_err(CE_NOTE, "!%s: %s%d: IOMMU unit idx=%d "
-		    "Event Overflow Interrupt", f, driver, instance,
-		    iommu->aiomt_idx);
-		(void) amd_iommu_read_log(iommu, AMD_IOMMU_LOG_DISCARD);
-		AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_status_va),
-		    AMD_IOMMU_EVENT_LOG_INT, 1);
-		AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_status_va),
-		    AMD_IOMMU_EVENT_OVERFLOW_INT, 1);
-		return (DDI_INTR_CLAIMED);
-	}
-
-	return (DDI_INTR_UNCLAIMED);
-}
-
-
-static int
-amd_iommu_setup_interrupts(amd_iommu_t *iommu)
-{
-	dev_info_t *dip = iommu->aiomt_dip;
-	int instance = ddi_get_instance(dip);
-	const char *driver = ddi_driver_name(dip);
-	int intrcap0;
-	int intrcapN;
-	int type;
-	int err;
-	int req;
-	int avail;
-	int p2req;
-	int actual;
-	int i;
-	int j;
-	const char *f = "amd_iommu_setup_interrupts";
-
-	if (ddi_intr_get_supported_types(dip, &type) != DDI_SUCCESS) {
-		cmn_err(CE_WARN, "%s: %s%d: ddi_intr_get_supported_types "
-		    "failed: idx=%d", f, driver, instance, iommu->aiomt_idx);
-		return (DDI_FAILURE);
-	}
-
-	if (amd_iommu_debug & AMD_IOMMU_DEBUG_INTR) {
-		cmn_err(CE_NOTE, "%s: %s%d: AMD IOMMU idx=%d. "
-		    "Interrupt types supported = 0x%x", f, driver, instance,
-		    iommu->aiomt_idx, type);
-	}
-
-	/*
-	 * for now we only support MSI
-	 */
-	if ((type & DDI_INTR_TYPE_MSI) == 0) {
-		cmn_err(CE_WARN, "%s: %s%d: AMD IOMMU idx=%d. "
-		    "MSI interrupts not supported. Failing init.",
-		    f, driver, instance, iommu->aiomt_idx);
-		return (DDI_FAILURE);
-	}
-
-	if (amd_iommu_debug & AMD_IOMMU_DEBUG_INTR) {
-		cmn_err(CE_NOTE, "%s: %s%d: AMD IOMMU idx=%d. MSI supported",
-		    f, driver, instance, iommu->aiomt_idx);
-	}
-
-	err = ddi_intr_get_nintrs(dip, DDI_INTR_TYPE_MSI, &req);
-	if (err != DDI_SUCCESS) {
-		cmn_err(CE_WARN, "%s: %s%d: AMD IOMMU idx=%d. "
-		    "ddi_intr_get_nintrs failed err = %d",
-		    f, driver, instance, iommu->aiomt_idx, err);
-		return (DDI_FAILURE);
-	}
-
-	if (amd_iommu_debug & AMD_IOMMU_DEBUG_INTR) {
-		cmn_err(CE_NOTE, "%s: %s%d: AMD IOMMU idx=%d. "
-		    "MSI number of interrupts requested: %d",
-		    f, driver, instance, iommu->aiomt_idx, req);
-	}
-
-	if (req == 0) {
-		cmn_err(CE_WARN, "%s: %s%d: AMD IOMMU idx=%d: 0 MSI "
-		    "interrupts requested. Failing init", f,
-		    driver, instance, iommu->aiomt_idx);
-		return (DDI_FAILURE);
-	}
-
-	err = ddi_intr_get_navail(dip, DDI_INTR_TYPE_MSI, &avail);
-	if (err != DDI_SUCCESS) {
-		cmn_err(CE_WARN, "%s: %s%d: AMD IOMMU idx=%d "
-		    "ddi_intr_get_navail failed err = %d", f,
-		    driver, instance, iommu->aiomt_idx, err);
-		return (DDI_FAILURE);
-	}
-
-	if (amd_iommu_debug & AMD_IOMMU_DEBUG_INTR) {
-		cmn_err(CE_NOTE, "%s: %s%d: AMD IOMMU idx=%d. "
-		    "MSI number of interrupts available: %d",
-		    f, driver, instance, iommu->aiomt_idx, avail);
-	}
-
-	if (avail == 0) {
-		cmn_err(CE_WARN, "%s: %s%d: AMD IOMMU idx=%d: 0 MSI "
-		    "interrupts available. Failing init", f,
-		    driver, instance, iommu->aiomt_idx);
-		return (DDI_FAILURE);
-	}
-
-	if (avail < req) {
-		cmn_err(CE_WARN, "%s: %s%d: AMD IOMMU idx=%d: MSI "
-		    "interrupts: requested (%d) > available (%d). "
-		    "Failing init", f, driver, instance, iommu->aiomt_idx,
-		    req, avail);
-		return (DDI_FAILURE);
-	}
-
-	/* Allocate memory for DDI interrupt handles */
-	iommu->aiomt_intr_htable_sz = req * sizeof (ddi_intr_handle_t);
-	iommu->aiomt_intr_htable = kmem_zalloc(iommu->aiomt_intr_htable_sz,
-	    KM_SLEEP);
-
-	iommu->aiomt_intr_state = AMD_IOMMU_INTR_TABLE;
-
-	/* Convert req to a power of two as required by ddi_intr_alloc */
-	p2req = 0;
-	while (1<<p2req <= req)
-		p2req++;
-	p2req--;
-	req = 1<<p2req;
-
-	if (amd_iommu_debug & AMD_IOMMU_DEBUG_INTR) {
-		cmn_err(CE_NOTE, "%s: %s%d: AMD IOMMU idx=%d. "
-		    "MSI power of 2 number of interrupts: %d,%d",
-		    f, driver, instance, iommu->aiomt_idx, p2req, req);
-	}
-
-	err = ddi_intr_alloc(iommu->aiomt_dip, iommu->aiomt_intr_htable,
-	    DDI_INTR_TYPE_MSI, 0, req, &actual, DDI_INTR_ALLOC_STRICT);
-	if (err != DDI_SUCCESS) {
-		cmn_err(CE_WARN, "%s: %s%d: AMD IOMMU idx=%d: "
-		    "ddi_intr_alloc failed: err = %d",
-		    f, driver, instance, iommu->aiomt_idx, err);
-		amd_iommu_teardown_interrupts(iommu);
-		return (DDI_FAILURE);
-	}
-
-	iommu->aiomt_actual_intrs = actual;
-	iommu->aiomt_intr_state = AMD_IOMMU_INTR_ALLOCED;
-
-	if (amd_iommu_debug & AMD_IOMMU_DEBUG_INTR) {
-		cmn_err(CE_NOTE, "%s: %s%d: AMD IOMMU idx=%d. "
-		    "number of interrupts actually allocated %d",
-		    f, driver, instance, iommu->aiomt_idx, actual);
-	}
-
-	if (iommu->aiomt_actual_intrs < req) {
-		cmn_err(CE_WARN, "%s: %s%d: AMD IOMMU idx=%d: "
-		    "ddi_intr_alloc failed: actual (%d) < req (%d)",
-		    f, driver, instance, iommu->aiomt_idx,
-		    iommu->aiomt_actual_intrs, req);
-		amd_iommu_teardown_interrupts(iommu);
-		return (DDI_FAILURE);
-	}
-
-	for (i = 0; i < iommu->aiomt_actual_intrs; i++) {
-		if (ddi_intr_add_handler(iommu->aiomt_intr_htable[i],
-		    amd_iommu_intr_handler, (void *)iommu, NULL)
-		    != DDI_SUCCESS) {
-			cmn_err(CE_WARN, "%s: %s%d: AMD IOMMU idx=%d: "
-			    "ddi_intr_add_handler failed: intr = %d, err = %d",
-			    f, driver, instance, iommu->aiomt_idx, i, err);
-			for (j = 0; j < i; j++) {
-				(void) ddi_intr_remove_handler(
-				    iommu->aiomt_intr_htable[j]);
-			}
-			amd_iommu_teardown_interrupts(iommu);
-			return (DDI_FAILURE);
-		}
-	}
-	iommu->aiomt_intr_state = AMD_IOMMU_INTR_HANDLER;
-
-	intrcap0 = intrcapN = -1;
-	if (ddi_intr_get_cap(iommu->aiomt_intr_htable[0], &intrcap0)
-	    != DDI_SUCCESS ||
-	    ddi_intr_get_cap(
-	    iommu->aiomt_intr_htable[iommu->aiomt_actual_intrs - 1], &intrcapN)
-	    != DDI_SUCCESS || intrcap0 != intrcapN) {
-		cmn_err(CE_WARN, "%s: %s%d: AMD IOMMU idx=%d: "
-		    "ddi_intr_get_cap failed or inconsistent cap among "
-		    "interrupts: intrcap0 (%d) < intrcapN (%d)",
-		    f, driver, instance, iommu->aiomt_idx, intrcap0, intrcapN);
-		amd_iommu_teardown_interrupts(iommu);
-		return (DDI_FAILURE);
-	}
-	iommu->aiomt_intr_cap = intrcap0;
-
-	if (intrcap0 & DDI_INTR_FLAG_BLOCK) {
-		/* Need to call block enable */
-		if (amd_iommu_debug & AMD_IOMMU_DEBUG_INTR) {
-			cmn_err(CE_NOTE, "%s: %s%d: AMD IOMMU idx=%d: "
-			    "Need to call block enable",
-			    f, driver, instance, iommu->aiomt_idx);
-		}
-		if (ddi_intr_block_enable(iommu->aiomt_intr_htable,
-		    iommu->aiomt_actual_intrs) != DDI_SUCCESS) {
-			cmn_err(CE_WARN, "%s: %s%d: AMD IOMMU idx=%d: "
-			    "ddi_intr_block enable failed ", f, driver,
-			    instance, iommu->aiomt_idx);
-			(void) ddi_intr_block_disable(iommu->aiomt_intr_htable,
-			    iommu->aiomt_actual_intrs);
-			amd_iommu_teardown_interrupts(iommu);
-			return (DDI_FAILURE);
-		}
-	} else {
-		if (amd_iommu_debug & AMD_IOMMU_DEBUG_INTR) {
-			cmn_err(CE_NOTE, "%s: %s%d: AMD IOMMU idx=%d: "
-			    "Need to call individual enable",
-			    f, driver, instance, iommu->aiomt_idx);
-		}
-		for (i = 0; i < iommu->aiomt_actual_intrs; i++) {
-			if (ddi_intr_enable(iommu->aiomt_intr_htable[i])
-			    != DDI_SUCCESS) {
-				cmn_err(CE_WARN, "%s: %s%d: AMD IOMMU idx=%d: "
-				    "ddi_intr_enable failed: intr = %d", f,
-				    driver, instance, iommu->aiomt_idx, i);
-				for (j = 0; j < i; j++) {
-					(void) ddi_intr_disable(
-					    iommu->aiomt_intr_htable[j]);
-				}
-				amd_iommu_teardown_interrupts(iommu);
-				return (DDI_FAILURE);
-			}
-		}
-	}
-	iommu->aiomt_intr_state = AMD_IOMMU_INTR_ENABLED;
-
-	if (amd_iommu_debug & AMD_IOMMU_DEBUG_INTR) {
-		cmn_err(CE_NOTE, "%s: %s%d: AMD IOMMU idx=%d: "
-		    "Interrupts successfully %s enabled. # of interrupts = %d",
-		    f, driver, instance, iommu->aiomt_idx,
-		    (intrcap0 & DDI_INTR_FLAG_BLOCK) ? "(block)" :
-		    "(individually)", iommu->aiomt_actual_intrs);
-	}
-
-	return (DDI_SUCCESS);
-}
-
-static void
-amd_iommu_teardown_interrupts(amd_iommu_t *iommu)
-{
-	int i;
-
-	if (iommu->aiomt_intr_state & AMD_IOMMU_INTR_ENABLED) {
-		if (iommu->aiomt_intr_cap & DDI_INTR_FLAG_BLOCK) {
-			(void) ddi_intr_block_disable(iommu->aiomt_intr_htable,
-			    iommu->aiomt_actual_intrs);
-		} else {
-			for (i = 0; i < iommu->aiomt_actual_intrs; i++) {
-				(void) ddi_intr_disable(
-				    iommu->aiomt_intr_htable[i]);
-			}
-		}
-	}
-
-	if (iommu->aiomt_intr_state & AMD_IOMMU_INTR_HANDLER) {
-		for (i = 0; i < iommu->aiomt_actual_intrs; i++) {
-			(void) ddi_intr_remove_handler(
-			    iommu->aiomt_intr_htable[i]);
-		}
-	}
-
-	if (iommu->aiomt_intr_state & AMD_IOMMU_INTR_ALLOCED) {
-		for (i = 0; i < iommu->aiomt_actual_intrs; i++) {
-			(void) ddi_intr_free(iommu->aiomt_intr_htable[i]);
-		}
-	}
-	if (iommu->aiomt_intr_state & AMD_IOMMU_INTR_TABLE) {
-		kmem_free(iommu->aiomt_intr_htable,
-		    iommu->aiomt_intr_htable_sz);
-	}
-	iommu->aiomt_intr_htable = NULL;
-	iommu->aiomt_intr_htable_sz = 0;
-	iommu->aiomt_intr_state = AMD_IOMMU_INTR_INVALID;
-}
-
-static amd_iommu_t *
-amd_iommu_init(dev_info_t *dip, ddi_acc_handle_t handle, int idx,
-    uint16_t cap_base)
-{
-	amd_iommu_t *iommu;
-	int instance = ddi_get_instance(dip);
-	const char *driver = ddi_driver_name(dip);
-	uint32_t caphdr;
-	uint32_t low_addr32;
-	uint32_t hi_addr32;
-	uint32_t range;
-	uint32_t misc;
-	uint64_t pgoffset;
-	amd_iommu_acpi_global_t *global;
-	amd_iommu_acpi_ivhd_t *hinfop;
-	const char *f = "amd_iommu_init";
-
-	global = amd_iommu_lookup_acpi_global();
-	hinfop = amd_iommu_lookup_any_ivhd();
-
-	low_addr32 = PCI_CAP_GET32(handle, 0, cap_base,
-	    AMD_IOMMU_CAP_ADDR_LOW_OFF);
-	if (!(low_addr32 & AMD_IOMMU_REG_ADDR_LOCKED)) {
-		cmn_err(CE_WARN, "%s: %s%d: capability registers not locked. "
-		    "Unable to use IOMMU unit idx=%d - skipping ...", f, driver,
-		    instance, idx);
-		return (NULL);
-	}
-
-	iommu = kmem_zalloc(sizeof (amd_iommu_t), KM_SLEEP);
-	mutex_init(&iommu->aiomt_mutex, NULL, MUTEX_DRIVER, NULL);
-	mutex_enter(&iommu->aiomt_mutex);
-
-	mutex_init(&iommu->aiomt_cmdlock, NULL, MUTEX_DRIVER, NULL);
-	mutex_init(&iommu->aiomt_eventlock, NULL, MUTEX_DRIVER, NULL);
-
-	iommu->aiomt_dip = dip;
-	iommu->aiomt_idx = idx;
-
-	/*
-	 * Since everything in the capability block is locked and RO at this
-	 * point, copy everything into the IOMMU struct
-	 */
-
-	/* Get cap header */
-	caphdr = PCI_CAP_GET32(handle, 0, cap_base, AMD_IOMMU_CAP_HDR_OFF);
-	iommu->aiomt_cap_hdr = caphdr;
-	iommu->aiomt_npcache = AMD_IOMMU_REG_GET32(&caphdr,
-	    AMD_IOMMU_CAP_NPCACHE);
-	iommu->aiomt_httun = AMD_IOMMU_REG_GET32(&caphdr, AMD_IOMMU_CAP_HTTUN);
-
-	if (hinfop)
-		iommu->aiomt_iotlb = hinfop->ach_IotlbSup;
-	else
-		iommu->aiomt_iotlb =
-		    AMD_IOMMU_REG_GET32(&caphdr, AMD_IOMMU_CAP_IOTLB);
-
-	iommu->aiomt_captype = AMD_IOMMU_REG_GET32(&caphdr, AMD_IOMMU_CAP_TYPE);
-	iommu->aiomt_capid = AMD_IOMMU_REG_GET32(&caphdr, AMD_IOMMU_CAP_ID);
-
-	/*
-	 * Get address of IOMMU control registers
-	 */
-	hi_addr32 = PCI_CAP_GET32(handle, 0, cap_base,
-	    AMD_IOMMU_CAP_ADDR_HI_OFF);
-	iommu->aiomt_low_addr32 = low_addr32;
-	iommu->aiomt_hi_addr32 = hi_addr32;
-	low_addr32 &= ~AMD_IOMMU_REG_ADDR_LOCKED;
-
-	if (hinfop) {
-		iommu->aiomt_reg_pa =  hinfop->ach_IOMMU_reg_base;
-		ASSERT(hinfop->ach_IOMMU_pci_seg == 0);
-	} else {
-		iommu->aiomt_reg_pa =  ((uint64_t)hi_addr32 << 32 | low_addr32);
-	}
-
-	/*
-	 * Get cap range reg
-	 */
-	range = PCI_CAP_GET32(handle, 0, cap_base, AMD_IOMMU_CAP_RANGE_OFF);
-	iommu->aiomt_range = range;
-	iommu->aiomt_rng_valid = AMD_IOMMU_REG_GET32(&range,
-	    AMD_IOMMU_RNG_VALID);
-	if (iommu->aiomt_rng_valid) {
-		iommu->aiomt_rng_bus = AMD_IOMMU_REG_GET32(&range,
-		    AMD_IOMMU_RNG_BUS);
-		iommu->aiomt_first_devfn = AMD_IOMMU_REG_GET32(&range,
-		    AMD_IOMMU_FIRST_DEVFN);
-		iommu->aiomt_last_devfn = AMD_IOMMU_REG_GET32(&range,
-		    AMD_IOMMU_LAST_DEVFN);
-	} else {
-		iommu->aiomt_rng_bus = 0;
-		iommu->aiomt_first_devfn = 0;
-		iommu->aiomt_last_devfn = 0;
-	}
-
-	if (hinfop)
-		iommu->aiomt_ht_unitid = hinfop->ach_IOMMU_UnitID;
-	else
-		iommu->aiomt_ht_unitid = AMD_IOMMU_REG_GET32(&range,
-		    AMD_IOMMU_HT_UNITID);
-
-	/*
-	 * Get cap misc reg
-	 */
-	misc = PCI_CAP_GET32(handle, 0, cap_base, AMD_IOMMU_CAP_MISC_OFF);
-	iommu->aiomt_misc = misc;
-
-	if (global) {
-		iommu->aiomt_htatsresv = global->acg_HtAtsResv;
-		iommu->aiomt_vasize = global->acg_VAsize;
-		iommu->aiomt_pasize = global->acg_PAsize;
-	} else {
-		iommu->aiomt_htatsresv = AMD_IOMMU_REG_GET32(&misc,
-		    AMD_IOMMU_HT_ATSRSV);
-		iommu->aiomt_vasize = AMD_IOMMU_REG_GET32(&misc,
-		    AMD_IOMMU_VA_SIZE);
-		iommu->aiomt_pasize = AMD_IOMMU_REG_GET32(&misc,
-		    AMD_IOMMU_PA_SIZE);
-	}
-
-	if (hinfop) {
-		iommu->aiomt_msinum = hinfop->ach_IOMMU_MSInum;
-	} else {
-		iommu->aiomt_msinum =
-		    AMD_IOMMU_REG_GET32(&misc, AMD_IOMMU_MSINUM);
-	}
-
-	/*
-	 * Set up mapping between control registers PA and VA
-	 */
-	pgoffset = iommu->aiomt_reg_pa & MMU_PAGEOFFSET;
-	ASSERT(pgoffset == 0);
-	iommu->aiomt_reg_pages = mmu_btopr(AMD_IOMMU_REG_SIZE + pgoffset);
-	iommu->aiomt_reg_size = mmu_ptob(iommu->aiomt_reg_pages);
-
-	iommu->aiomt_va = (uintptr_t)device_arena_alloc(
-	    ptob(iommu->aiomt_reg_pages), VM_SLEEP);
-	if (iommu->aiomt_va == 0) {
-		cmn_err(CE_WARN, "%s: %s%d: Failed to alloc VA for IOMMU "
-		    "control regs. Skipping IOMMU idx=%d", f, driver,
-		    instance, idx);
-		mutex_exit(&iommu->aiomt_mutex);
-		(void) amd_iommu_fini(iommu);
-		return (NULL);
-	}
-
-	hat_devload(kas.a_hat, (void *)(uintptr_t)iommu->aiomt_va,
-	    iommu->aiomt_reg_size,
-	    mmu_btop(iommu->aiomt_reg_pa), PROT_READ | PROT_WRITE
-	    | HAT_STRICTORDER, HAT_LOAD_LOCK);
-
-	iommu->aiomt_reg_va = iommu->aiomt_va + pgoffset;
-
-	/*
-	 * Setup the various control register's VA
-	 */
-	iommu->aiomt_reg_devtbl_va = iommu->aiomt_reg_va +
-	    AMD_IOMMU_DEVTBL_REG_OFF;
-	iommu->aiomt_reg_cmdbuf_va = iommu->aiomt_reg_va +
-	    AMD_IOMMU_CMDBUF_REG_OFF;
-	iommu->aiomt_reg_eventlog_va = iommu->aiomt_reg_va +
-	    AMD_IOMMU_EVENTLOG_REG_OFF;
-	iommu->aiomt_reg_ctrl_va = iommu->aiomt_reg_va +
-	    AMD_IOMMU_CTRL_REG_OFF;
-	iommu->aiomt_reg_excl_base_va = iommu->aiomt_reg_va +
-	    AMD_IOMMU_EXCL_BASE_REG_OFF;
-	iommu->aiomt_reg_excl_lim_va = iommu->aiomt_reg_va +
-	    AMD_IOMMU_EXCL_LIM_REG_OFF;
-	iommu->aiomt_reg_cmdbuf_head_va = iommu->aiomt_reg_va +
-	    AMD_IOMMU_CMDBUF_HEAD_REG_OFF;
-	iommu->aiomt_reg_cmdbuf_tail_va = iommu->aiomt_reg_va +
-	    AMD_IOMMU_CMDBUF_TAIL_REG_OFF;
-	iommu->aiomt_reg_eventlog_head_va = iommu->aiomt_reg_va +
-	    AMD_IOMMU_EVENTLOG_HEAD_REG_OFF;
-	iommu->aiomt_reg_eventlog_tail_va = iommu->aiomt_reg_va +
-	    AMD_IOMMU_EVENTLOG_TAIL_REG_OFF;
-	iommu->aiomt_reg_status_va = iommu->aiomt_reg_va +
-	    AMD_IOMMU_STATUS_REG_OFF;
-
-
-	/*
-	 * Setup the DEVICE table, CMD buffer, and LOG buffer in
-	 * memory and setup DMA access to this memory location
-	 */
-	if (amd_iommu_setup_tables_and_buffers(iommu) != DDI_SUCCESS) {
-		mutex_exit(&iommu->aiomt_mutex);
-		(void) amd_iommu_fini(iommu);
-		return (NULL);
-	}
-
-	if (amd_iommu_setup_exclusion(iommu) != DDI_SUCCESS) {
-		mutex_exit(&iommu->aiomt_mutex);
-		(void) amd_iommu_fini(iommu);
-		return (NULL);
-	}
-
-	amd_iommu_enable_interrupts(iommu);
-
-	if (amd_iommu_setup_interrupts(iommu) != DDI_SUCCESS) {
-		mutex_exit(&iommu->aiomt_mutex);
-		(void) amd_iommu_fini(iommu);
-		return (NULL);
-	}
-
-	/*
-	 * need to setup domain table before gfx bypass
-	 */
-	amd_iommu_init_page_tables(iommu);
-
-	/*
-	 * Set pass-thru for special devices like IOAPIC and HPET
-	 *
-	 * Also, gfx devices don't use DDI for DMA. No need to register
-	 * before setting up gfx passthru
-	 */
-	if (amd_iommu_setup_passthru(iommu) != DDI_SUCCESS) {
-		mutex_exit(&iommu->aiomt_mutex);
-		(void) amd_iommu_fini(iommu);
-		return (NULL);
-	}
-
-	if (amd_iommu_start(iommu) != DDI_SUCCESS) {
-		mutex_exit(&iommu->aiomt_mutex);
-		(void) amd_iommu_fini(iommu);
-		return (NULL);
-	}
-
-	/* xxx register/start race  */
-	if (amd_iommu_register(iommu) != DDI_SUCCESS) {
-		mutex_exit(&iommu->aiomt_mutex);
-		(void) amd_iommu_fini(iommu);
-		return (NULL);
-	}
-
-	if (amd_iommu_debug) {
-		cmn_err(CE_NOTE, "%s: %s%d: IOMMU idx=%d inited.", f, driver,
-		    instance, idx);
-	}
-
-	return (iommu);
-}
-
-static int
-amd_iommu_fini(amd_iommu_t *iommu)
-{
-	int idx = iommu->aiomt_idx;
-	dev_info_t *dip = iommu->aiomt_dip;
-	int instance = ddi_get_instance(dip);
-	const char *driver = ddi_driver_name(dip);
-	const char *f = "amd_iommu_fini";
-
-	mutex_enter(&iommu->aiomt_mutex);
-	if (amd_iommu_unregister(iommu) != DDI_SUCCESS) {
-		cmn_err(CE_NOTE, "%s: %s%d: Fini of IOMMU unit failed. "
-		    "idx = %d", f, driver, instance, idx);
-		return (DDI_FAILURE);
-	}
-	amd_iommu_stop(iommu);
-	amd_iommu_fini_page_tables(iommu);
-	amd_iommu_teardown_interrupts(iommu);
-	amd_iommu_teardown_exclusion(iommu);
-	amd_iommu_teardown_tables_and_buffers(iommu);
-	if (iommu->aiomt_va != NULL) {
-		hat_unload(kas.a_hat, (void *)(uintptr_t)iommu->aiomt_va,
-		    iommu->aiomt_reg_size, HAT_UNLOAD_UNLOCK);
-		device_arena_free((void *)(uintptr_t)iommu->aiomt_va,
-		    ptob(iommu->aiomt_reg_pages));
-		iommu->aiomt_va = NULL;
-		iommu->aiomt_reg_va = NULL;
-	}
-	mutex_destroy(&iommu->aiomt_eventlock);
-	mutex_destroy(&iommu->aiomt_cmdlock);
-	mutex_exit(&iommu->aiomt_mutex);
-	mutex_destroy(&iommu->aiomt_mutex);
-	kmem_free(iommu, sizeof (amd_iommu_t));
-
-	cmn_err(CE_NOTE, "%s: %s%d: Fini of IOMMU unit complete. idx = %d",
-	    f, driver, instance, idx);
-
-	return (DDI_SUCCESS);
-}
-
-int
-amd_iommu_setup(dev_info_t *dip, amd_iommu_state_t *statep)
-{
-	int instance = ddi_get_instance(dip);
-	const char *driver = ddi_driver_name(dip);
-	ddi_acc_handle_t handle;
-	uint8_t base_class;
-	uint8_t sub_class;
-	uint8_t prog_class;
-	int idx;
-	uint32_t id;
-	uint16_t cap_base;
-	uint32_t caphdr;
-	uint8_t cap_type;
-	uint8_t cap_id;
-	amd_iommu_t *iommu;
-	const char *f = "amd_iommu_setup";
-
-	ASSERT(instance >= 0);
-	ASSERT(driver);
-
-	/* First setup PCI access to config space */
-
-	if (pci_config_setup(dip, &handle) != DDI_SUCCESS) {
-		cmn_err(CE_WARN, "%s: PCI config setup failed: %s%d",
-		    f, driver, instance);
-		return (DDI_FAILURE);
-	}
-
-	/*
-	 * The AMD IOMMU is part of an independent PCI function. There may be
-	 * more than one IOMMU in that PCI function
-	 */
-	base_class = pci_config_get8(handle, PCI_CONF_BASCLASS);
-	sub_class = pci_config_get8(handle, PCI_CONF_SUBCLASS);
-	prog_class = pci_config_get8(handle, PCI_CONF_PROGCLASS);
-
-	if (base_class != PCI_CLASS_PERIPH || sub_class != PCI_PERIPH_IOMMU ||
-	    prog_class != AMD_IOMMU_PCI_PROG_IF) {
-		cmn_err(CE_WARN, "%s: %s%d: invalid PCI class(0x%x)/"
-		    "subclass(0x%x)/programming interface(0x%x)", f, driver,
-		    instance, base_class, sub_class, prog_class);
-		pci_config_teardown(&handle);
-		return (DDI_FAILURE);
-	}
-
-	/*
-	 * Find and initialize all IOMMU units in this function
-	 */
-	for (idx = 0; ; idx++) {
-		if (pci_cap_probe(handle, idx, &id, &cap_base) != DDI_SUCCESS)
-			break;
-
-		/* check if cap ID is secure device cap id */
-		if (id != PCI_CAP_ID_SECURE_DEV) {
-			if (amd_iommu_debug) {
-				cmn_err(CE_WARN,
-				    "%s: %s%d: skipping IOMMU: idx(0x%x) "
-				    "cap ID (0x%x) != secure dev capid (0x%x)",
-				    f, driver, instance, idx, id,
-				    PCI_CAP_ID_SECURE_DEV);
-			}
-			continue;
-		}
-
-		/* check if cap type is IOMMU cap type */
-		caphdr = PCI_CAP_GET32(handle, 0, cap_base,
-		    AMD_IOMMU_CAP_HDR_OFF);
-		cap_type = AMD_IOMMU_REG_GET32(&caphdr, AMD_IOMMU_CAP_TYPE);
-		cap_id = AMD_IOMMU_REG_GET32(&caphdr, AMD_IOMMU_CAP_ID);
-
-		if (cap_type != AMD_IOMMU_CAP) {
-			cmn_err(CE_WARN, "%s: %s%d: skipping IOMMU: idx(0x%x) "
-			    "cap type (0x%x) != AMD IOMMU CAP (0x%x)", f,
-			    driver, instance, idx, cap_type, AMD_IOMMU_CAP);
-			continue;
-		}
-		ASSERT(cap_id == PCI_CAP_ID_SECURE_DEV);
-		ASSERT(cap_id == id);
-
-		iommu = amd_iommu_init(dip, handle, idx, cap_base);
-		if (iommu == NULL) {
-			cmn_err(CE_WARN, "%s: %s%d: skipping IOMMU: idx(0x%x) "
-			    "failed to init IOMMU", f,
-			    driver, instance, idx);
-			continue;
-		}
-
-		if (statep->aioms_iommu_start == NULL) {
-			statep->aioms_iommu_start = iommu;
-		} else {
-			statep->aioms_iommu_end->aiomt_next = iommu;
-		}
-		statep->aioms_iommu_end = iommu;
-
-		statep->aioms_nunits++;
-	}
-
-	pci_config_teardown(&handle);
-
-	if (amd_iommu_debug) {
-		cmn_err(CE_NOTE, "%s: %s%d: state=%p: setup %d IOMMU units",
-		    f, driver, instance, (void *)statep, statep->aioms_nunits);
-	}
-
-	return (DDI_SUCCESS);
-}
-
-int
-amd_iommu_teardown(dev_info_t *dip, amd_iommu_state_t *statep)
-{
-	int instance = ddi_get_instance(dip);
-	const char *driver = ddi_driver_name(dip);
-	amd_iommu_t *iommu;
-	int teardown;
-	int error = DDI_SUCCESS;
-	const char *f = "amd_iommu_teardown";
-
-	teardown = 0;
-	for (iommu = statep->aioms_iommu_start; iommu;
-	    iommu = iommu->aiomt_next) {
-		ASSERT(statep->aioms_nunits > 0);
-		if (amd_iommu_fini(iommu) != DDI_SUCCESS) {
-			error = DDI_FAILURE;
-			continue;
-		}
-		statep->aioms_nunits--;
-		teardown++;
-	}
-
-	cmn_err(CE_NOTE, "%s: %s%d: state=%p: toredown %d units. "
-	    "%d units left", f, driver, instance, (void *)statep,
-	    teardown, statep->aioms_nunits);
-
-	return (error);
-}
-
-/* Interface with IOMMULIB */
-/*ARGSUSED*/
-static int
-amd_iommu_probe(iommulib_handle_t handle, dev_info_t *rdip)
-{
-	const char *driver = ddi_driver_name(rdip);
-	char *s;
-	amd_iommu_t *iommu = iommulib_iommu_getdata(handle);
-
-	if (amd_iommu_disable_list) {
-		s = strstr(amd_iommu_disable_list, driver);
-		if (s == NULL)
-			return (DDI_SUCCESS);
-		if (s == amd_iommu_disable_list || *(s - 1) == ':') {
-			s += strlen(driver);
-			if (*s == '\0' || *s == ':') {
-				amd_iommu_set_passthru(iommu, rdip);
-				return (DDI_FAILURE);
-			}
-		}
-	}
-
-	return (DDI_SUCCESS);
-}
-
-/*ARGSUSED*/
-static int
-amd_iommu_allochdl(iommulib_handle_t handle,
-    dev_info_t *dip, dev_info_t *rdip, ddi_dma_attr_t *attr,
-    int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *dma_handlep)
-{
-	return (iommulib_iommu_dma_allochdl(dip, rdip, attr, waitfp,
-	    arg, dma_handlep));
-}
-
-/*ARGSUSED*/
-static int
-amd_iommu_freehdl(iommulib_handle_t handle,
-    dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t dma_handle)
-{
-	return (iommulib_iommu_dma_freehdl(dip, rdip, dma_handle));
-}
-
-/*ARGSUSED*/
-static int
-map_current_window(amd_iommu_t *iommu, dev_info_t *rdip, ddi_dma_attr_t *attrp,
-    struct ddi_dma_req *dmareq, ddi_dma_cookie_t *cookie_array, uint_t ccount,
-    int km_flags)
-{
-	const char *driver = ddi_driver_name(iommu->aiomt_dip);
-	int instance = ddi_get_instance(iommu->aiomt_dip);
-	int idx = iommu->aiomt_idx;
-	int i;
-	uint64_t start_va;
-	char *path;
-	int error = DDI_FAILURE;
-	const char *f = "map_current_window";
-
-	path = kmem_alloc(MAXPATHLEN, km_flags);
-	if (path == NULL) {
-		return (DDI_DMA_NORESOURCES);
-	}
-
-	(void) ddi_pathname(rdip, path);
-	mutex_enter(&amd_iommu_pgtable_lock);
-
-	if (amd_iommu_debug == AMD_IOMMU_DEBUG_PAGE_TABLES) {
-		cmn_err(CE_WARN, "%s: %s%d: idx=%d Attempting to get cookies "
-		    "from handle for device %s",
-		    f, driver, instance, idx, path);
-	}
-
-	start_va = 0;
-	for (i = 0; i < ccount; i++) {
-		if ((error = amd_iommu_map_pa2va(iommu, rdip, attrp, dmareq,
-		    cookie_array[i].dmac_cookie_addr,
-		    cookie_array[i].dmac_size,
-		    AMD_IOMMU_VMEM_MAP, &start_va, km_flags)) != DDI_SUCCESS) {
-			break;
-		}
-		cookie_array[i].dmac_cookie_addr = (uintptr_t)start_va;
-		cookie_array[i].dmac_type = 0;
-	}
-
-	if (i != ccount) {
-		cmn_err(CE_WARN, "%s: %s%d: idx=%d Cannot map cookie# %d "
-		    "for device %s", f, driver, instance, idx, i, path);
-		(void) unmap_current_window(iommu, rdip, cookie_array,
-		    ccount, i, 1);
-		goto out;
-	}
-
-	if (amd_iommu_debug & AMD_IOMMU_DEBUG_PAGE_TABLES) {
-		cmn_err(CE_NOTE, "%s: return SUCCESS", f);
-	}
-
-	error = DDI_DMA_MAPPED;
-out:
-	mutex_exit(&amd_iommu_pgtable_lock);
-	kmem_free(path, MAXPATHLEN);
-	return (error);
-}
-
-/*ARGSUSED*/
-static int
-unmap_current_window(amd_iommu_t *iommu, dev_info_t *rdip,
-    ddi_dma_cookie_t *cookie_array, uint_t ccount, int ncookies, int locked)
-{
-	const char *driver = ddi_driver_name(iommu->aiomt_dip);
-	int instance = ddi_get_instance(iommu->aiomt_dip);
-	int idx = iommu->aiomt_idx;
-	int i;
-	int error = DDI_FAILURE;
-	char *path;
-	int pathfree;
-	const char *f = "unmap_current_window";
-
-	if (!locked)
-		mutex_enter(&amd_iommu_pgtable_lock);
-
-	path = kmem_alloc(MAXPATHLEN, KM_NOSLEEP);
-	if (path) {
-		(void) ddi_pathname(rdip, path);
-		pathfree = 1;
-	} else {
-		path = "<path-mem-alloc-failed>";
-		pathfree = 0;
-	}
-
-	if (ncookies == -1)
-		ncookies = ccount;
-
-	for (i = 0; i < ncookies; i++) {
-		if (amd_iommu_unmap_va(iommu, rdip,
-		    cookie_array[i].dmac_cookie_addr,
-		    cookie_array[i].dmac_size,
-		    AMD_IOMMU_VMEM_MAP) != DDI_SUCCESS) {
-			break;
-		}
-	}
-
-	if (amd_iommu_cmd(iommu, AMD_IOMMU_CMD_COMPL_WAIT, NULL, 0, 0)
-	    != DDI_SUCCESS) {
-		cmn_err(CE_WARN, "%s: AMD IOMMU completion wait failed for: %s",
-		    f, path);
-	}
-
-	if (i != ncookies) {
-		cmn_err(CE_WARN, "%s: %s%d: idx=%d Cannot unmap cookie# %d "
-		    "for device %s", f, driver, instance, idx, i, path);
-		error = DDI_FAILURE;
-		goto out;
-	}
-
-	error = DDI_SUCCESS;
-
-out:
-	if (pathfree)
-		kmem_free(path, MAXPATHLEN);
-	if (!locked)
-		mutex_exit(&amd_iommu_pgtable_lock);
-	return (error);
-}
-
-/*ARGSUSED*/
-static int
-amd_iommu_bindhdl(iommulib_handle_t handle, dev_info_t *dip,
-    dev_info_t *rdip, ddi_dma_handle_t dma_handle,
-    struct ddi_dma_req *dmareq, ddi_dma_cookie_t *cookiep,
-    uint_t *ccountp)
-{
-	int dma_error = DDI_DMA_NOMAPPING;
-	int error;
-	char *path;
-	ddi_dma_cookie_t *cookie_array = NULL;
-	uint_t ccount = 0;
-	ddi_dma_impl_t *hp;
-	ddi_dma_attr_t *attrp;
-	int km_flags;
-	amd_iommu_t *iommu = iommulib_iommu_getdata(handle);
-	int instance = ddi_get_instance(rdip);
-	const char *driver = ddi_driver_name(rdip);
-	const char *f = "amd_iommu_bindhdl";
-
-	dma_error = iommulib_iommu_dma_bindhdl(dip, rdip, dma_handle,
-	    dmareq, cookiep, ccountp);
-
-	if (dma_error != DDI_DMA_MAPPED && dma_error != DDI_DMA_PARTIAL_MAP)
-		return (dma_error);
-
-	km_flags = iommulib_iommu_dma_get_sleep_flags(dip, dma_handle);
-
-	path = kmem_alloc(MAXPATHLEN, km_flags);
-	if (path) {
-		(void) ddi_pathname(rdip, path);
-	} else {
-		dma_error = DDI_DMA_NORESOURCES;
-		goto unbind;
-	}
-
-	if (amd_iommu_debug & AMD_IOMMU_DEBUG_BIND) {
-		cmn_err(CE_NOTE, "%s: %s got cookie (%p), #cookies: %d",
-		    f, path,
-		    (void *)cookiep->dmac_cookie_addr,
-		    *ccountp);
-	}
-
-	cookie_array = NULL;
-	ccount = 0;
-	if ((error = iommulib_iommu_dma_get_cookies(dip, dma_handle,
-	    &cookie_array, &ccount)) != DDI_SUCCESS) {
-		cmn_err(CE_WARN, "%s: %s%d: Cannot get cookies "
-		    "for device %s", f, driver, instance, path);
-		dma_error = error;
-		goto unbind;
-	}
-
-	hp = (ddi_dma_impl_t *)dma_handle;
-	attrp = &hp->dmai_attr;
-
-	error = map_current_window(iommu, rdip, attrp, dmareq,
-	    cookie_array, ccount, km_flags);
-	if (error != DDI_SUCCESS) {
-		dma_error = error;
-		goto unbind;
-	}
-
-	if ((error = iommulib_iommu_dma_set_cookies(dip, dma_handle,
-	    cookie_array, ccount)) != DDI_SUCCESS) {
-		cmn_err(CE_WARN, "%s: %s%d: Cannot set cookies "
-		    "for device %s", f, driver, instance, path);
-		dma_error = error;
-		goto unbind;
-	}
-
-	*cookiep = cookie_array[0];
-
-	if (amd_iommu_debug & AMD_IOMMU_DEBUG_BIND) {
-		cmn_err(CE_NOTE, "%s: %s remapped cookie (%p), #cookies: %d",
-		    f, path,
-		    (void *)(uintptr_t)cookiep->dmac_cookie_addr,
-		    *ccountp);
-	}
-
-	kmem_free(path, MAXPATHLEN);
-	ASSERT(dma_error == DDI_DMA_MAPPED || dma_error == DDI_DMA_PARTIAL_MAP);
-	return (dma_error);
-unbind:
-	kmem_free(path, MAXPATHLEN);
-	(void) iommulib_iommu_dma_unbindhdl(dip, rdip, dma_handle);
-	return (dma_error);
-}
-
-/*ARGSUSED*/
-static int
-amd_iommu_unbindhdl(iommulib_handle_t handle,
-    dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t dma_handle)
-{
-	amd_iommu_t *iommu = iommulib_iommu_getdata(handle);
-	ddi_dma_cookie_t *cookie_array = NULL;
-	uint_t ccount = 0;
-	int error = DDI_FAILURE;
-	int instance = ddi_get_instance(rdip);
-	const char *driver = ddi_driver_name(rdip);
-	const char *f = "amd_iommu_unbindhdl";
-
-	cookie_array = NULL;
-	ccount = 0;
-	if (iommulib_iommu_dma_get_cookies(dip, dma_handle, &cookie_array,
-	    &ccount) != DDI_SUCCESS) {
-		cmn_err(CE_WARN, "%s: %s%d: Cannot get cookies "
-		    "for device %p", f, driver, instance, (void *)rdip);
-		error = DDI_FAILURE;
-		goto out;
-	}
-
-	if (iommulib_iommu_dma_clear_cookies(dip, dma_handle) != DDI_SUCCESS) {
-		cmn_err(CE_WARN, "%s: %s%d: Cannot clear cookies "
-		    "for device %p", f, driver, instance, (void *)rdip);
-		error = DDI_FAILURE;
-		goto out;
-	}
-
-	if (iommulib_iommu_dma_unbindhdl(dip, rdip, dma_handle)
-	    != DDI_SUCCESS) {
-		cmn_err(CE_WARN, "%s: %s%d: failed to unbindhdl for dip=%p",
-		    f, driver, instance, (void *)rdip);
-		error = DDI_FAILURE;
-		goto out;
-	}
-
-	if (unmap_current_window(iommu, rdip, cookie_array, ccount, -1, 0)
-	    != DDI_SUCCESS) {
-		cmn_err(CE_WARN, "%s: %s%d: failed to unmap current window "
-		    "for dip=%p", f, driver, instance, (void *)rdip);
-		error = DDI_FAILURE;
-	} else {
-		error = DDI_SUCCESS;
-	}
-out:
-	if (cookie_array)
-		kmem_free(cookie_array, sizeof (ddi_dma_cookie_t) * ccount);
-	return (error);
-}
-
-/*ARGSUSED*/
-static int
-amd_iommu_sync(iommulib_handle_t handle, dev_info_t *dip,
-    dev_info_t *rdip, ddi_dma_handle_t dma_handle, off_t off,
-    size_t len, uint_t cache_flags)
-{
-	ddi_dma_cookie_t *cookie_array = NULL;
-	uint_t ccount = 0;
-	int error;
-	const char *f = "amd_iommu_sync";
-
-	cookie_array = NULL;
-	ccount = 0;
-	if (iommulib_iommu_dma_get_cookies(dip, dma_handle, &cookie_array,
-	    &ccount) != DDI_SUCCESS) {
-		ASSERT(cookie_array == NULL);
-		cmn_err(CE_WARN, "%s: Cannot get cookies "
-		    "for device %p", f, (void *)rdip);
-		error = DDI_FAILURE;
-		goto out;
-	}
-
-	if (iommulib_iommu_dma_clear_cookies(dip, dma_handle) != DDI_SUCCESS) {
-		cmn_err(CE_WARN, "%s: Cannot clear cookies "
-		    "for device %p", f, (void *)rdip);
-		error = DDI_FAILURE;
-		goto out;
-	}
-
-	error = iommulib_iommu_dma_sync(dip, rdip, dma_handle, off,
-	    len, cache_flags);
-
-	if (iommulib_iommu_dma_set_cookies(dip, dma_handle, cookie_array,
-	    ccount) != DDI_SUCCESS) {
-		cmn_err(CE_WARN, "%s: Cannot set cookies "
-		    "for device %p", f, (void *)rdip);
-		error = DDI_FAILURE;
-	} else {
-		cookie_array = NULL;
-		ccount = 0;
-	}
-
-out:
-	if (cookie_array)
-		kmem_free(cookie_array, sizeof (ddi_dma_cookie_t) * ccount);
-	return (error);
-}
-
-/*ARGSUSED*/
-static int
-amd_iommu_win(iommulib_handle_t handle, dev_info_t *dip,
-    dev_info_t *rdip, ddi_dma_handle_t dma_handle, uint_t win,
-    off_t *offp, size_t *lenp, ddi_dma_cookie_t *cookiep,
-    uint_t *ccountp)
-{
-	int error = DDI_FAILURE;
-	amd_iommu_t *iommu = iommulib_iommu_getdata(handle);
-	ddi_dma_cookie_t *cookie_array = NULL;
-	uint_t ccount = 0;
-	int km_flags;
-	ddi_dma_impl_t *hp;
-	ddi_dma_attr_t *attrp;
-	struct ddi_dma_req sdmareq = {0};
-	int instance = ddi_get_instance(rdip);
-	const char *driver = ddi_driver_name(rdip);
-	const char *f = "amd_iommu_win";
-
-	km_flags = iommulib_iommu_dma_get_sleep_flags(dip, dma_handle);
-
-	cookie_array = NULL;
-	ccount = 0;
-	if (iommulib_iommu_dma_get_cookies(dip, dma_handle, &cookie_array,
-	    &ccount) != DDI_SUCCESS) {
-		cmn_err(CE_WARN, "%s: %s%d: Cannot get cookies "
-		    "for device %p", f, driver, instance, (void *)rdip);
-		error = DDI_FAILURE;
-		goto out;
-	}
-
-	if (iommulib_iommu_dma_clear_cookies(dip, dma_handle) != DDI_SUCCESS) {
-		cmn_err(CE_WARN, "%s: %s%d: Cannot clear cookies "
-		    "for device %p", f, driver, instance, (void *)rdip);
-		error = DDI_FAILURE;
-		goto out;
-	}
-
-	if (iommulib_iommu_dma_win(dip, rdip, dma_handle, win,
-	    offp, lenp, cookiep, ccountp) != DDI_SUCCESS) {
-		cmn_err(CE_WARN, "%s: %s%d: failed switch windows for dip=%p",
-		    f, driver, instance, (void *)rdip);
-		error = DDI_FAILURE;
-		goto out;
-	}
-
-	(void) unmap_current_window(iommu, rdip, cookie_array, ccount, -1, 0);
-
-	if (cookie_array) {
-		kmem_free(cookie_array, sizeof (ddi_dma_cookie_t) * ccount);
-		cookie_array = NULL;
-		ccount = 0;
-	}
-
-	cookie_array = NULL;
-	ccount = 0;
-	if (iommulib_iommu_dma_get_cookies(dip, dma_handle, &cookie_array,
-	    &ccount) != DDI_SUCCESS) {
-		cmn_err(CE_WARN, "%s: %s%d: Cannot get cookies "
-		    "for device %p", f, driver, instance, (void *)rdip);
-		error = DDI_FAILURE;
-		goto out;
-	}
-
-	hp = (ddi_dma_impl_t *)dma_handle;
-	attrp = &hp->dmai_attr;
-
-	sdmareq.dmar_flags = DDI_DMA_RDWR;
-	error = map_current_window(iommu, rdip, attrp, &sdmareq,
-	    cookie_array, ccount, km_flags);
-
-	if (iommulib_iommu_dma_set_cookies(dip, dma_handle, cookie_array,
-	    ccount) != DDI_SUCCESS) {
-		cmn_err(CE_WARN, "%s: %s%d: Cannot set cookies "
-		    "for device %p", f, driver, instance, (void *)rdip);
-		error = DDI_FAILURE;
-		goto out;
-	}
-
-	*cookiep = cookie_array[0];
-
-	return (error == DDI_SUCCESS ? DDI_SUCCESS : DDI_FAILURE);
-out:
-	if (cookie_array)
-		kmem_free(cookie_array, sizeof (ddi_dma_cookie_t) * ccount);
-
-	return (error);
-}
-
-/* Obsoleted DMA routines */
-
-/*ARGSUSED*/
-static int
-amd_iommu_map(iommulib_handle_t handle, dev_info_t *dip,
-    dev_info_t *rdip, struct ddi_dma_req *dmareq,
-    ddi_dma_handle_t *dma_handle)
-{
-	ASSERT(0);
-	return (iommulib_iommu_dma_map(dip, rdip, dmareq, dma_handle));
-}
-
-/*ARGSUSED*/
-static int
-amd_iommu_mctl(iommulib_handle_t handle, dev_info_t *dip,
-    dev_info_t *rdip, ddi_dma_handle_t dma_handle,
-    enum ddi_dma_ctlops request, off_t *offp, size_t *lenp,
-    caddr_t *objpp, uint_t cache_flags)
-{
-	ASSERT(0);
-	return (iommulib_iommu_dma_mctl(dip, rdip, dma_handle,
-	    request, offp, lenp, objpp, cache_flags));
-}
-
-uint64_t
-amd_iommu_reg_get64_workaround(uint64_t *regp, uint32_t bits)
-{
-	split_t s;
-	uint32_t *ptr32 = (uint32_t *)regp;
-	uint64_t *s64p = &(s.u64);
-
-	s.u32[0] = ptr32[0];
-	s.u32[1] = ptr32[1];
-
-	return (AMD_IOMMU_REG_GET64_IMPL(s64p, bits));
-}
-
-uint64_t
-amd_iommu_reg_set64_workaround(uint64_t *regp, uint32_t bits, uint64_t value)
-{
-	split_t s;
-	uint32_t *ptr32 = (uint32_t *)regp;
-	uint64_t *s64p = &(s.u64);
-
-	s.u32[0] = ptr32[0];
-	s.u32[1] = ptr32[1];
-
-	AMD_IOMMU_REG_SET64_IMPL(s64p, bits, value);
-
-	*regp = s.u64;
-
-	return (s.u64);
-}
-
-void
-amd_iommu_read_boot_props(void)
-{
-	char *propval;
-
-	/*
-	 * if "amd-iommu = no/false" boot property is set,
-	 * ignore AMD iommu
-	 */
-	if (ddi_prop_lookup_string(DDI_DEV_T_ANY, ddi_root_node(),
-	    DDI_PROP_DONTPASS, "amd-iommu", &propval) == DDI_SUCCESS) {
-		if (strcmp(propval, "no") == 0 ||
-		    strcmp(propval, "false") == 0) {
-			amd_iommu_disable = 1;
-		}
-		ddi_prop_free(propval);
-	}
-
-	/*
-	 * Copy the list of drivers for which IOMMU is disabled by user.
-	 */
-	if (ddi_prop_lookup_string(DDI_DEV_T_ANY, ddi_root_node(),
-	    DDI_PROP_DONTPASS, "amd-iommu-disable-list", &propval)
-	    == DDI_SUCCESS) {
-		amd_iommu_disable_list = kmem_alloc(strlen(propval) + 1,
-		    KM_SLEEP);
-		(void) strcpy(amd_iommu_disable_list, propval);
-		ddi_prop_free(propval);
-	}
-
-}
-
-void
-amd_iommu_lookup_conf_props(dev_info_t *dip)
-{
-	char *disable;
-
-	if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip,
-	    DDI_PROP_DONTPASS|DDI_PROP_NOTPROM, "amd-iommu", &disable)
-	    == DDI_PROP_SUCCESS) {
-		if (strcmp(disable, "no") == 0) {
-			amd_iommu_disable = 1;
-		}
-		ddi_prop_free(disable);
-	}
-
-	if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip,
-	    DDI_PROP_DONTPASS|DDI_PROP_NOTPROM, "amd-iommu-disable-list",
-	    &disable) == DDI_PROP_SUCCESS) {
-		amd_iommu_disable_list = kmem_alloc(strlen(disable) + 1,
-		    KM_SLEEP);
-		(void) strcpy(amd_iommu_disable_list, disable);
-		ddi_prop_free(disable);
-	}
-}
--- a/usr/src/uts/intel/io/amd_iommu/amd_iommu_impl.h	Wed Sep 16 22:03:43 2009 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,494 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License (the "License").
- * You may not use this file except in compliance with the License.
- *
- * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
- * or http://www.opensolaris.org/os/licensing.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information: Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- */
-/*
- * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
- * Use is subject to license terms.
- */
-
-#ifndef	_AMD_IOMMU_IMPL_H
-#define	_AMD_IOMMU_IMPL_H
-
-#ifdef	__cplusplus
-extern "C" {
-#endif
-
-#include <sys/pci.h>
-
-#ifdef	_KERNEL
-
-#define	AMD_IOMMU_PCI_PROG_IF	(0x0)
-
-#define	AMD_IOMMU_CAP		(0x3)
-
-#define	AMD_IOMMU_REG_SIZE	(0x2028)
-#define	AMD_IOMMU_DEVTBL_SZ	(16)
-#define	AMD_IOMMU_CMDBUF_SZ	(15)
-#define	AMD_IOMMU_EVENTLOG_SZ	(15)
-#define	AMD_IOMMU_DEVENT_SZ	(32)
-#define	AMD_IOMMU_CMD_SZ	(16)
-#define	AMD_IOMMU_EVENT_SZ	(16)
-
-/* Capability Register offsets */
-#define	AMD_IOMMU_CAP_HDR_OFF		(0x00)
-#define	AMD_IOMMU_CAP_ADDR_LOW_OFF	(0x04)
-#define	AMD_IOMMU_CAP_ADDR_HI_OFF	(0x08)
-#define	AMD_IOMMU_CAP_RANGE_OFF		(0x0C)
-#define	AMD_IOMMU_CAP_MISC_OFF		(0x10)
-
-/* ControL Registers offsets */
-#define	AMD_IOMMU_DEVTBL_REG_OFF	(0x00)
-#define	AMD_IOMMU_CMDBUF_REG_OFF	(0x08)
-#define	AMD_IOMMU_EVENTLOG_REG_OFF	(0x10)
-#define	AMD_IOMMU_CTRL_REG_OFF		(0x18)
-#define	AMD_IOMMU_EXCL_BASE_REG_OFF	(0x20)
-#define	AMD_IOMMU_EXCL_LIM_REG_OFF	(0x28)
-#define	AMD_IOMMU_CMDBUF_HEAD_REG_OFF	(0x2000)
-#define	AMD_IOMMU_CMDBUF_TAIL_REG_OFF	(0x2008)
-#define	AMD_IOMMU_EVENTLOG_HEAD_REG_OFF	(0x2010)
-#define	AMD_IOMMU_EVENTLOG_TAIL_REG_OFF	(0x2018)
-#define	AMD_IOMMU_STATUS_REG_OFF	(0x2020)
-
-/* Capability Header Register Bits */
-#define	AMD_IOMMU_CAP_NPCACHE	(26 << 16 | 26)
-#define	AMD_IOMMU_CAP_HTTUN	(25 << 16 | 25)
-#define	AMD_IOMMU_CAP_IOTLB	(24 << 16 | 24)
-#define	AMD_IOMMU_CAP_TYPE	(18 << 16 | 16)
-#define	AMD_IOMMU_CAP_ID	(7 << 16 | 0)
-
-/* Capability Range Register bits */
-#define	AMD_IOMMU_LAST_DEVFN	(31 << 16 | 24)
-#define	AMD_IOMMU_FIRST_DEVFN	(23 << 16 | 16)
-#define	AMD_IOMMU_RNG_BUS	(15 << 16 | 8)
-#define	AMD_IOMMU_RNG_VALID	(7 << 16 | 7)
-#define	AMD_IOMMU_HT_UNITID	(4 << 16 | 0)
-
-
-/* Capability Misc Register bits */
-#define	AMD_IOMMU_HT_ATSRSV	(22 << 16 | 22)
-#define	AMD_IOMMU_VA_SIZE	(21 << 16 | 15)
-#define	AMD_IOMMU_PA_SIZE	(14 << 16 | 8)
-#define	AMD_IOMMU_MSINUM	(4 << 16 | 0)
-
-/* Device Table Base Address register bits */
-#define	AMD_IOMMU_DEVTABBASE	(51 << 16 | 12)
-#define	AMD_IOMMU_DEVTABSIZE	(8 << 16 | 0)
-
-/* Command Buffer Base Address register bits */
-#define	AMD_IOMMU_COMLEN	(59 << 16 | 56)
-#define	AMD_IOMMU_COMBASE	(51 << 16 | 12)
-
-#define	AMD_IOMMU_CMDBUF_MINSZ	(8)
-#define	AMD_IOMMU_CMDBUF_MAXSZ	(15)
-
-/* Event Log Base Address register bits */
-#define	AMD_IOMMU_EVENTLEN	(59 << 16 | 56)
-#define	AMD_IOMMU_EVENTBASE	(51 << 16 | 12)
-
-#define	AMD_IOMMU_EVENTLOG_MINSZ	(8)
-#define	AMD_IOMMU_EVENTLOG_MAXSZ	(15)
-
-/* Control register bits */
-#define	AMD_IOMMU_CMDBUF_ENABLE		(12 << 16 | 12)
-#define	AMD_IOMMU_ISOC			(11 << 16 | 11)
-#define	AMD_IOMMU_COHERENT		(10 << 16 | 10)
-#define	AMD_IOMMU_RESPASSPW		(9 << 16 | 9)
-#define	AMD_IOMMU_PASSPW		(8 << 16 | 8)
-#define	AMD_IOMMU_INVTO			(7 << 16 | 5)
-#define	AMD_IOMMU_COMWAITINT_ENABLE	(4 << 16 | 4)
-#define	AMD_IOMMU_EVENTINT_ENABLE	(3 << 16 | 3)
-#define	AMD_IOMMU_EVENTLOG_ENABLE	(2 << 16 | 2)
-#define	AMD_IOMMU_HT_TUN_ENABLE		(1 << 16 | 1)
-#define	AMD_IOMMU_ENABLE		(0 << 16 | 0)
-
-/* Exclusion Base Register bits */
-#define	AMD_IOMMU_EXCL_BASE_ADDR	(51 << 16 | 12)
-#define	AMD_IOMMU_EXCL_BASE_ALLOW	(1 << 16 | 1)
-#define	AMD_IOMMU_EXCL_BASE_EXEN	(0 << 16 | 0)
-
-/* Exclusion Limit Register bits */
-#define	AMD_IOMMU_EXCL_LIM		(51 << 16 | 12)
-
-/* Command Buffer Head Pointer Register bits */
-#define	AMD_IOMMU_CMDHEADPTR		(18 << 16 | 4)
-
-/* Command Buffer Tail Pointer Register bits */
-#define	AMD_IOMMU_CMDTAILPTR		(18 << 16 | 4)
-
-/* Event Log Head Pointer Register bits */
-#define	AMD_IOMMU_EVENTHEADPTR		(18 << 16 | 4)
-
-/* Event Log Tail Pointer Register bits */
-#define	AMD_IOMMU_EVENTTAILPTR		(18 << 16 | 4)
-
-/* Status Register bits */
-#define	AMD_IOMMU_CMDBUF_RUN		(4 << 16 | 4)
-#define	AMD_IOMMU_EVENT_LOG_RUN		(3 << 16 | 3)
-#define	AMD_IOMMU_COMWAIT_INT		(2 << 16 | 2)
-#define	AMD_IOMMU_EVENT_LOG_INT		(1 << 16 | 1)
-#define	AMD_IOMMU_EVENT_OVERFLOW_INT	(0 << 16 | 0)
-
-/* Device Table Bits */
-
-/* size in bytes of each device table entry */
-#define	AMD_IOMMU_DEVTBL_ENTRY_SZ	(32)
-
-/* Interrupt Remapping related Device Table bits */
-#define	AMD_IOMMU_DEVTBL_LINT1PASS	((191-128) << 16 | (191-128))
-#define	AMD_IOMMU_DEVTBL_LINT0PASS	((190-128) << 16 | (190-128))
-#define	AMD_IOMMU_DEVTBL_INTCTL		((189-128) << 16 | (188-128))
-#define	AMD_IOMMU_DEVTBL_NMIPASS	((186-128) << 16 | (186-128))
-#define	AMD_IOMMU_DEVTBL_EXTINTPAS	((185-128) << 16 | (185-128))
-#define	AMD_IOMMU_DEVTBL_INITPASS	((184-128) << 16 | (184-128))
-#define	AMD_IOMMU_DEVTBL_INTR_ROOT	((179-128) << 16 | (134-128))
-#define	AMD_IOMMU_DEVTBL_IG		((133-128) << 16 | (133-128))
-#define	AMD_IOMMU_DEVTBL_INTTABLEN	((132-128) << 16 | (129-128))
-#define	AMD_IOMMU_DEVTBL_IV		((128-128) << 16 | (128-128))
-
-/* DMA Remapping related Device Table Bits */
-#define	AMD_IOMMU_DEVTBL_SYSMGT		((105-64) << 16 | (104-64))
-#define	AMD_IOMMU_DEVTBL_EX		((103-64) << 16 | (103-64))
-#define	AMD_IOMMU_DEVTBL_SD		((102-64) << 16 | (102-64))
-#define	AMD_IOMMU_DEVTBL_CACHE		((101-64) << 16 | (101-64))
-#define	AMD_IOMMU_DEVTBL_IOCTL		((100-64) << 16 | (99-64))
-#define	AMD_IOMMU_DEVTBL_SA		((98-64) << 16 | (98-64))
-#define	AMD_IOMMU_DEVTBL_SE		((97-64) << 16 | (97-64))
-#define	AMD_IOMMU_DEVTBL_IOTLB		((96-64) << 16 | (96-64))
-#define	AMD_IOMMU_DEVTBL_DOMAINID	((79-64) << 16 | (64-64))
-#define	AMD_IOMMU_DEVTBL_IW		(62 << 16 | 62)
-#define	AMD_IOMMU_DEVTBL_IR		(61 << 16 | 61)
-#define	AMD_IOMMU_DEVTBL_ROOT_PGTBL	(51 << 16 | 12)
-#define	AMD_IOMMU_DEVTBL_PG_MODE	(11 << 16 | 9)
-#define	AMD_IOMMU_DEVTBL_TV		(1 << 16 | 1)
-#define	AMD_IOMMU_DEVTBL_V		(0 << 16 | 0)
-
-#define	BUS_DEVFN_TO_BDF(b, devfn)	(devfn)
-#define	AMD_IOMMU_ALIAS_HASH_SZ		(256)
-
-#define	AMD_IOMMU_REG_ADDR_LOCKED	(0x1)
-
-/*
- * IOMMU Command bits
- */
-
-typedef enum {
-	AMD_IOMMU_CMD_INVAL = 0,
-	AMD_IOMMU_CMD_COMPL_WAIT,
-	AMD_IOMMU_CMD_INVAL_DEVTAB_ENTRY,
-	AMD_IOMMU_CMD_INVAL_IOMMU_PAGES,
-	AMD_IOMMU_CMD_INVAL_IOTLB_PAGES,
-	AMD_IOMMU_CMD_INVAL_INTR_TABLE,
-} amd_iommu_cmd_t;
-
-typedef enum {
-	AMD_IOMMU_CMD_FLAGS_NONE = 0,
-	AMD_IOMMU_CMD_FLAGS_COMPL_WAIT = 1,
-	AMD_IOMMU_CMD_FLAGS_COMPL_WAIT_F = 2,
-	AMD_IOMMU_CMD_FLAGS_COMPL_WAIT_S = 4,
-	AMD_IOMMU_CMD_FLAGS_PAGE_PDE_INVAL = 8,
-	AMD_IOMMU_CMD_FLAGS_PAGE_INVAL_S = 16,
-	AMD_IOMMU_CMD_FLAGS_IOTLB_INVAL_S = 32
-} amd_iommu_cmd_flags_t;
-
-/* Common command bits */
-#define	AMD_IOMMU_CMD_OPCODE		(31 << 16 | 28)
-
-/* Completion Wait command bits */
-#define	AMD_IOMMU_CMD_COMPL_WAIT_S		(0 << 16 | 0)
-#define	AMD_IOMMU_CMD_COMPL_WAIT_I		(1 << 16 | 1)
-#define	AMD_IOMMU_CMD_COMPL_WAIT_F		(2 << 16 | 2)
-#define	AMD_IOMMU_CMD_COMPL_WAIT_STORE_ADDR_LO	(31 << 16 | 3)
-#define	AMD_IOMMU_CMD_COMPL_WAIT_STORE_ADDR_HI	(19 << 16 | 0)
-
-/* Invalidate Device Table entry command bits */
-#define	AMD_IOMMU_CMD_INVAL_DEVTAB_DEVICEID		(15 << 16 | 0)
-
-/* Invalidate IOMMU Pages command bits */
-#define	AMD_IOMMU_CMD_INVAL_PAGES_DOMAINID		(15 << 16 | 0)
-#define	AMD_IOMMU_CMD_INVAL_PAGES_S			(0 << 16 | 0)
-#define	AMD_IOMMU_CMD_INVAL_PAGES_PDE			(1 << 16 | 1)
-#define	AMD_IOMMU_CMD_INVAL_PAGES_ADDR_LO		(31 << 16 | 12)
-#define	AMD_IOMMU_CMD_INVAL_PAGES_ADDR_HI		(63 << 16 | 32)
-
-
-/* Invalidate IOTLB command bits */
-#define	AMD_IOMMU_CMD_INVAL_IOTLB_DEVICEID		(15 << 16 | 0)
-#define	AMD_IOMMU_CMD_INVAL_IOTLB_MAXPEND		(31 << 16 | 24)
-#define	AMD_IOMMU_CMD_INVAL_IOTLB_QUEUEID		(15 << 16 | 0)
-#define	AMD_IOMMU_CMD_INVAL_IOTLB_S			(0 << 16 | 0)
-#define	AMD_IOMMU_CMD_INVAL_IOTLB_ADDR_LO		(31 << 16 | 12)
-#define	AMD_IOMMU_CMD_INVAL_IOTLB_ADDR_HI		(31 << 16 | 0)
-
-#define	AMD_IOMMU_DEFAULT_MAXPEND			(10)
-
-/* Invalidate Interrupt Table bits */
-#define	AMD_IOMMU_CMD_INVAL_INTR_DEVICEID		(15 << 16 | 0)
-
-#if defined(__amd64)
-#define	dmac_cookie_addr	dmac_laddress
-#else
-#define	dmac_cookie_addr	dmac_address
-#endif
-
-#define	AMD_IOMMU_TABLE_ALIGN	((1ULL << 12) - 1)
-
-#define	AMD_IOMMU_MAX_DEVICEID	(0xFFFF)
-
-/*
- * DMA sync macros
- * TODO: optimize sync only small ranges
- */
-#define	SYNC_FORDEV(h)	(void) ddi_dma_sync(h, 0, 0, DDI_DMA_SYNC_FORDEV)
-#define	SYNC_FORKERN(h)	(void) ddi_dma_sync(h, 0, 0, DDI_DMA_SYNC_FORKERNEL)
-
-#define	WAIT_SEC(s)	drv_usecwait(1000000*(s))
-
-#define	CMD2OFF(c)	((c) << 4)
-#define	OFF2CMD(o)	((o) >> 4)
-
-typedef union split {
-	uint64_t u64;
-	uint32_t u32[2];
-} split_t;
-
-#define	BITPOS_START(b)	((b) >> 16)
-#define	BITPOS_END(b)	((b) & 0xFFFF)
-
-#define	START_MASK64(s)	(((s) == 63) ? ~((uint64_t)0) : \
-	(uint64_t)((1ULL << ((s)+1)) - 1))
-#define	START_MASK32(s)	(((s) == 31) ? ~((uint32_t)0) : \
-	(uint32_t)((1ULL << ((s)+1)) - 1))
-#define	START_MASK16(s)	(((s) == 15) ? ~((uint16_t)0) : \
-	(uint16_t)((1ULL << ((s)+1)) - 1))
-#define	START_MASK8(s)	(((s) == 7) ? ~((uint8_t)0) : \
-	(uint8_t)((1ULL << ((s)+1)) - 1))
-
-#define	END_MASK(e)	((1ULL << (e)) - 1)
-
-#define	BIT_MASK64(s, e)	(uint64_t)(START_MASK64(s) & ~END_MASK(e))
-#define	BIT_MASK32(s, e)	(uint32_t)(START_MASK32(s) & ~END_MASK(e))
-#define	BIT_MASK16(s, e)	(uint16_t)(START_MASK16(s) & ~END_MASK(e))
-#define	BIT_MASK8(s, e)		(uint8_t)(START_MASK8(s) & ~END_MASK(e))
-
-#define	AMD_IOMMU_REG_GET64_IMPL(rp, b) \
-	(((*(rp)) & (START_MASK64(BITPOS_START(b)))) >> BITPOS_END(b))
-#define	AMD_IOMMU_REG_GET64(rp, b) 					 \
-	((amd_iommu_64bit_bug) ? amd_iommu_reg_get64_workaround(rp, b) : \
-	AMD_IOMMU_REG_GET64_IMPL(rp, b))
-#define	AMD_IOMMU_REG_GET32(rp, b) \
-	(((*(rp)) & (START_MASK32(BITPOS_START(b)))) >> BITPOS_END(b))
-#define	AMD_IOMMU_REG_GET16(rp, b) \
-	(((*(rp)) & (START_MASK16(BITPOS_START(b)))) >> BITPOS_END(b))
-#define	AMD_IOMMU_REG_GET8(rp, b) \
-	(((*(rp)) & (START_MASK8(BITPOS_START(b)))) >> BITPOS_END(b))
-
-#define	AMD_IOMMU_REG_SET64_IMPL(rp, b, v) \
-	((*(rp)) = \
-	(((uint64_t)(*(rp)) & ~(BIT_MASK64(BITPOS_START(b), BITPOS_END(b)))) \
-	| ((uint64_t)(v) << BITPOS_END(b))))
-
-#define	AMD_IOMMU_REG_SET64(rp, b, v) 			\
-	(void) ((amd_iommu_64bit_bug) ?			\
-	amd_iommu_reg_set64_workaround(rp, b, v) : 	\
-	AMD_IOMMU_REG_SET64_IMPL(rp, b, v))
-
-#define	AMD_IOMMU_REG_SET32(rp, b, v) \
-	((*(rp)) = \
-	(((uint32_t)(*(rp)) & ~(BIT_MASK32(BITPOS_START(b), BITPOS_END(b)))) \
-	| ((uint32_t)(v) << BITPOS_END(b))))
-
-#define	AMD_IOMMU_REG_SET16(rp, b, v) \
-	((*(rp)) = \
-	(((uint16_t)(*(rp)) & ~(BIT_MASK16(BITPOS_START(b), BITPOS_END(b)))) \
-	| ((uint16_t)(v) << BITPOS_END(b))))
-
-#define	AMD_IOMMU_REG_SET8(rp, b, v) \
-	((*(rp)) = \
-	(((uint8_t)(*(rp)) & ~(BIT_MASK8(BITPOS_START(b), BITPOS_END(b)))) \
-	| ((uint8_t)(v) << BITPOS_END(b))))
-
-/*
- * Cast a 64 bit pointer to a uint64_t *
- */
-#define	REGADDR64(a)	((uint64_t *)(uintptr_t)(a))
-
-typedef enum {
-	AMD_IOMMU_INTR_INVALID = 0,
-	AMD_IOMMU_INTR_TABLE,
-	AMD_IOMMU_INTR_ALLOCED,
-	AMD_IOMMU_INTR_HANDLER,
-	AMD_IOMMU_INTR_ENABLED
-} amd_iommu_intr_state_t;
-
-
-typedef struct amd_iommu {
-	kmutex_t aiomt_mutex;
-	kmutex_t aiomt_eventlock;
-	kmutex_t aiomt_cmdlock;
-	dev_info_t *aiomt_dip;
-	int aiomt_idx;
-	iommulib_handle_t aiomt_iommulib_handle;
-	iommulib_ops_t *aiomt_iommulib_ops;
-	uint32_t aiomt_cap_hdr;
-	uint8_t aiomt_npcache;
-	uint8_t aiomt_httun;
-	uint8_t aiomt_iotlb;
-	uint8_t aiomt_captype;
-	uint8_t aiomt_capid;
-	uint32_t aiomt_low_addr32;
-	uint32_t aiomt_hi_addr32;
-	uint64_t aiomt_reg_pa;
-	uint64_t aiomt_va;
-	uint64_t aiomt_reg_va;
-	uint32_t aiomt_range;
-	uint8_t aiomt_rng_bus;
-	uint8_t aiomt_first_devfn;
-	uint8_t aiomt_last_devfn;
-	uint8_t aiomt_rng_valid;
-	uint8_t aiomt_ht_unitid;
-	uint32_t aiomt_misc;
-	uint8_t aiomt_htatsresv;
-	uint8_t aiomt_vasize;
-	uint8_t aiomt_pasize;
-	uint8_t aiomt_msinum;
-	uint8_t aiomt_reg_pages;
-	uint32_t aiomt_reg_size;
-	uint32_t aiomt_devtbl_sz;
-	uint32_t aiomt_cmdbuf_sz;
-	uint32_t aiomt_eventlog_sz;
-	caddr_t aiomt_devtbl;
-	caddr_t aiomt_cmdbuf;
-	caddr_t aiomt_eventlog;
-	uint32_t *aiomt_cmd_tail;
-	uint32_t *aiomt_event_head;
-	ddi_dma_handle_t aiomt_dmahdl;
-	void *aiomt_dma_bufva;
-	uint64_t aiomt_dma_mem_realsz;
-	ddi_acc_handle_t aiomt_dma_mem_hdl;
-	ddi_dma_cookie_t aiomt_buf_dma_cookie;
-	uint_t aiomt_buf_dma_ncookie;
-	amd_iommu_intr_state_t aiomt_intr_state;
-	ddi_intr_handle_t *aiomt_intr_htable;
-	uint32_t aiomt_intr_htable_sz;
-	uint32_t aiomt_actual_intrs;
-	uint32_t aiomt_intr_cap;
-	uint64_t aiomt_reg_devtbl_va;
-	uint64_t aiomt_reg_cmdbuf_va;
-	uint64_t aiomt_reg_eventlog_va;
-	uint64_t aiomt_reg_ctrl_va;
-	uint64_t aiomt_reg_excl_base_va;
-	uint64_t aiomt_reg_excl_lim_va;
-	uint64_t aiomt_reg_cmdbuf_head_va;
-	uint64_t aiomt_reg_cmdbuf_tail_va;
-	uint64_t aiomt_reg_eventlog_head_va;
-	uint64_t aiomt_reg_eventlog_tail_va;
-	uint64_t aiomt_reg_status_va;
-	struct amd_iommu *aiomt_next;
-} amd_iommu_t;
-
-typedef struct amd_iommu_dma_devtbl_ent {
-	uint16_t de_domainid;
-	uint8_t de_R;
-	uint8_t de_W;
-	caddr_t de_root_pgtbl;
-	uint8_t de_pgmode;
-} amd_iommu_dma_devtbl_entry_t;
-
-typedef struct amd_iommu_alias {
-	uint16_t al_bdf;
-	uint16_t al_src_bdf;
-	struct amd_iommu_alias *al_next;
-} amd_iommu_alias_t;
-
-typedef struct amd_iommu_cmdargs {
-	uint64_t ca_addr;
-	uint16_t ca_domainid;
-	uint16_t ca_deviceid;
-} amd_iommu_cmdargs_t;
-
-struct amd_iommu_page_table;
-
-typedef struct amd_iommu_page_table_hash {
-	kmutex_t ampt_lock;
-	struct amd_iommu_page_table **ampt_hash;
-} amd_iommu_page_table_hash_t;
-
-typedef enum {
-	AMD_IOMMU_LOG_INVALID_OP = 0,
-	AMD_IOMMU_LOG_DISPLAY,
-	AMD_IOMMU_LOG_DISCARD
-} amd_iommu_log_op_t;
-
-typedef enum {
-	AMD_IOMMU_DEBUG_NONE = 0,
-	AMD_IOMMU_DEBUG_ALLOCHDL = 0x1,
-	AMD_IOMMU_DEBUG_FREEHDL = 0x2,
-	AMD_IOMMU_DEBUG_BIND = 0x4,
-	AMD_IOMMU_DEBUG_UNBIND = 0x8,
-	AMD_IOMMU_DEBUG_WIN = 0x10,
-	AMD_IOMMU_DEBUG_PAGE_TABLES = 0x20,
-	AMD_IOMMU_DEBUG_DEVTBL = 0x40,
-	AMD_IOMMU_DEBUG_CMDBUF = 0x80,
-	AMD_IOMMU_DEBUG_EVENTLOG = 0x100,
-	AMD_IOMMU_DEBUG_ACPI = 0x200,
-	AMD_IOMMU_DEBUG_PA2VA = 0x400,
-	AMD_IOMMU_DEBUG_TABLES = 0x800,
-	AMD_IOMMU_DEBUG_EXCL = 0x1000,
-	AMD_IOMMU_DEBUG_INTR = 0x2000
-} amd_iommu_debug_t;
-
-extern const char *amd_iommu_modname;
-extern kmutex_t amd_iommu_global_lock;
-extern amd_iommu_alias_t **amd_iommu_alias;
-extern amd_iommu_page_table_hash_t amd_iommu_page_table_hash;
-extern ddi_device_acc_attr_t amd_iommu_devacc;
-extern amd_iommu_debug_t amd_iommu_debug;
-
-extern uint8_t amd_iommu_htatsresv;
-extern uint8_t amd_iommu_vasize;
-extern uint8_t amd_iommu_pasize;
-extern int amd_iommu_64bit_bug;
-extern int amd_iommu_unity_map;
-extern int amd_iommu_no_RW_perms;
-extern int amd_iommu_no_unmap;
-extern int amd_iommu_pageva_inval_all;
-extern int amd_iommu_disable;
-extern char *amd_iommu_disable_list;
-
-extern uint64_t amd_iommu_reg_get64_workaround(uint64_t *regp, uint32_t bits);
-extern uint64_t amd_iommu_reg_set64_workaround(uint64_t *regp, uint32_t bits,
-    uint64_t value);
-
-int amd_iommu_cmd(amd_iommu_t *iommu, amd_iommu_cmd_t cmd,
-    amd_iommu_cmdargs_t *cmdargs, amd_iommu_cmd_flags_t flags, int lock_held);
-int amd_iommu_page_table_hash_init(amd_iommu_page_table_hash_t *ampt);
-void amd_iommu_page_table_hash_fini(amd_iommu_page_table_hash_t *ampt);
-
-int amd_iommu_read_log(amd_iommu_t *iommu, amd_iommu_log_op_t op);
-void amd_iommu_read_boot_props(void);
-void amd_iommu_lookup_conf_props(dev_info_t *dip);
-
-#endif	/* _KERNEL */
-
-#ifdef	__cplusplus
-}
-#endif
-
-#endif	/* _AMD_IOMMU_IMPL_H */
--- a/usr/src/uts/intel/io/amd_iommu/amd_iommu_log.c	Wed Sep 16 22:03:43 2009 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,582 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License (the "License").
- * You may not use this file except in compliance with the License.
- *
- * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
- * or http://www.opensolaris.org/os/licensing.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information: Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- */
-
-/*
- * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
- * Use is subject to license terms.
- */
-
-#include <sys/sunddi.h>
-#include <sys/amd_iommu.h>
-#include "amd_iommu_impl.h"
-#include "amd_iommu_log.h"
-
-
-static const char *
-get_hw_error(uint8_t type)
-{
-	const char *hwerr;
-
-	switch (type) {
-	case 0:
-		hwerr = "Reserved";
-		break;
-	case 1:
-		hwerr = "Master Abort";
-		break;
-	case 2:
-		hwerr = "Target Abort";
-		break;
-	case 3:
-		hwerr = "Data Error";
-		break;
-	default:
-		hwerr = "Unknown";
-		break;
-	}
-
-	return (hwerr);
-}
-
-const char *
-get_illegal_req(uint8_t type, uint8_t TR)
-{
-	const char *illreq;
-
-	switch (type) {
-	case 0:
-		illreq = (TR == 1) ? "Translation I=0/V=0/V=1&&TV=0" :
-		    "Read or Non-posted Write in INTR Range";
-		break;
-	case 1:
-		illreq = (TR == 1) ? "Translation INTR/Port-IO/SysMgt; OR"
-		    "Translation when SysMgt=11b/Port-IO when IOCTL=10b "
-		    "while V=1 && TV=0" :
-		    "Pre-translated transaction from device with I=0 or V=0";
-		break;
-	case 2:
-		illreq = (TR == 1) ? "Reserved":
-		    "Port-IO transaction for device with IoCtl = 00b";
-		break;
-	case 3:
-		illreq = (TR == 1) ? "Reserved":
-		    "Posted write to SysMgt with device SysMgt=00b "
-		    "OR SysMgt=10b && message not INTx "
-		    "OR Posted write to addr transaltion range with "
-		    "HtAtsResv=1";
-		break;
-	case 4:
-		illreq = (TR == 1) ? "Reserved":
-		    "Read request or non-posted write in SysMgt with "
-		    "device SysMgt=10b or 0xb"
-		    "OR Read request or non-posted write in "
-		    "addr translation range with HtAtsResv=1";
-		break;
-	case 5:
-		illreq = (TR == 1) ? "Reserved":
-		    "Posted write to Interrupt/EOI Range "
-		    "for device that has IntCtl=00b";
-		break;
-	case 6:
-		illreq = (TR == 1) ? "Reserved":
-		    "Posted write to reserved Interrupt Address Range";
-		break;
-	case 7:
-		illreq = (TR == 1) ? "Reserved":
-		    "transaction to SysMgt when SysMgt=11b OR "
-		    "transaction to Port-IO when IoCtl=10b while "
-		    "while V=1 TV=0";
-		break;
-	default:
-		illreq = "Unknown error";
-		break;
-	}
-	return (illreq);
-}
-
-static void
-devtab_illegal_entry(amd_iommu_t *iommu, uint32_t *event)
-{
-	uint16_t deviceid;
-	uint8_t TR;
-	uint8_t RZ;
-	uint8_t RW;
-	uint8_t I;
-	uint32_t vaddr_lo;
-	uint32_t vaddr_hi;
-	const char *driver = ddi_driver_name(iommu->aiomt_dip);
-	int instance = ddi_get_instance(iommu->aiomt_dip);
-	const char *f = "devtab_illegal_entry";
-
-	ASSERT(AMD_IOMMU_REG_GET32(&event[1], AMD_IOMMU_EVENT_TYPE) ==
-	    AMD_IOMMU_EVENT_DEVTAB_ILLEGAL_ENTRY);
-
-	deviceid = AMD_IOMMU_REG_GET32(&event[0],
-	    AMD_IOMMU_EVENT_DEVTAB_ILL_DEVICEID);
-
-	TR = AMD_IOMMU_REG_GET32(&event[1],
-	    AMD_IOMMU_EVENT_DEVTAB_ILL_TR);
-
-	RZ = AMD_IOMMU_REG_GET32(&event[1],
-	    AMD_IOMMU_EVENT_DEVTAB_ILL_RZ);
-
-	RW = AMD_IOMMU_REG_GET32(&event[1],
-	    AMD_IOMMU_EVENT_DEVTAB_ILL_RW);
-
-	I = AMD_IOMMU_REG_GET32(&event[1],
-	    AMD_IOMMU_EVENT_DEVTAB_ILL_INTR);
-
-	vaddr_lo = AMD_IOMMU_REG_GET32(&event[2],
-	    AMD_IOMMU_EVENT_DEVTAB_ILL_VADDR_LO);
-
-	vaddr_hi = event[3];
-
-	cmn_err(CE_WARN, "%s: %s%d: idx = %d. Illegal device table entry "
-	    "deviceid=%u, %s request, %s %s transaction, %s request, "
-	    "virtual address = %p",
-	    f, driver, instance, iommu->aiomt_idx,
-	    deviceid,
-	    TR == 1 ? "Translation" : "Transaction",
-	    RZ == 1 ? "Non-zero reserved bit" : "Illegal Level encoding",
-	    RW == 1 ? "Write" : "Read",
-	    I == 1 ? "Interrupt" : "Memory",
-	    (void *)(uintptr_t)(((uint64_t)vaddr_hi) << 32 | vaddr_lo));
-}
-
-static void
-io_page_fault(amd_iommu_t *iommu, uint32_t *event)
-{
-	uint16_t deviceid;
-	uint16_t domainid;
-	uint8_t TR;
-	uint8_t RZ;
-	uint8_t RW;
-	uint8_t PE;
-	uint8_t PR;
-	uint8_t I;
-	uint32_t vaddr_lo;
-	uint32_t vaddr_hi;
-	const char *driver = ddi_driver_name(iommu->aiomt_dip);
-	int instance = ddi_get_instance(iommu->aiomt_dip);
-	const char *f = "io_page_fault";
-
-	ASSERT(AMD_IOMMU_REG_GET32(&event[1], AMD_IOMMU_EVENT_TYPE) ==
-	    AMD_IOMMU_EVENT_IO_PAGE_FAULT);
-
-	deviceid = AMD_IOMMU_REG_GET32(&event[0],
-	    AMD_IOMMU_EVENT_IO_PGFAULT_DEVICEID);
-
-	TR = AMD_IOMMU_REG_GET32(&event[1], AMD_IOMMU_EVENT_IO_PGFAULT_TR);
-
-	RZ = AMD_IOMMU_REG_GET32(&event[1], AMD_IOMMU_EVENT_IO_PGFAULT_RZ);
-
-	PE = AMD_IOMMU_REG_GET32(&event[1], AMD_IOMMU_EVENT_IO_PGFAULT_PE);
-
-	RW = AMD_IOMMU_REG_GET32(&event[1], AMD_IOMMU_EVENT_IO_PGFAULT_RW);
-
-	PR = AMD_IOMMU_REG_GET32(&event[1], AMD_IOMMU_EVENT_IO_PGFAULT_PR);
-
-	I = AMD_IOMMU_REG_GET32(&event[1], AMD_IOMMU_EVENT_IO_PGFAULT_INTR);
-
-	domainid = AMD_IOMMU_REG_GET32(&event[1],
-	    AMD_IOMMU_EVENT_IO_PGFAULT_DOMAINID);
-
-	vaddr_lo = event[2];
-
-	vaddr_hi = event[3];
-
-	cmn_err(CE_WARN, "%s: %s%d: idx = %d. IO Page Fault. "
-	    "deviceid=%u, %s request, %s, %s permissions, %s transaction, "
-	    "%s, %s request, domainid=%u, virtual address = %p",
-	    f, driver, instance, iommu->aiomt_idx,
-	    deviceid,
-	    TR == 1 ? "Translation" : "Transaction",
-	    RZ == 1 ? "Non-zero reserved bit" : "Illegal Level encoding",
-	    PE == 1 ? "did not have" : "had",
-	    RW == 1 ? "Write" : "Read",
-	    PR == 1 ? "Page present or Interrupt Remapped" :
-	    "Page not present or Interrupt Blocked",
-	    I == 1 ? "Interrupt" : "Memory",
-	    domainid,
-	    (void *)(uintptr_t)(((uint64_t)vaddr_hi) << 32 | vaddr_lo));
-}
-
-static void
-devtab_hw_error(amd_iommu_t *iommu, uint32_t *event)
-{
-	uint16_t deviceid;
-	uint8_t type;
-	uint8_t TR;
-	uint8_t RW;
-	uint8_t I;
-	uint32_t physaddr_lo;
-	uint32_t physaddr_hi;
-	const char *hwerr;
-	const char *driver = ddi_driver_name(iommu->aiomt_dip);
-	int instance = ddi_get_instance(iommu->aiomt_dip);
-	const char *f = "devtab_hw_error";
-
-	ASSERT(AMD_IOMMU_REG_GET32(&event[1], AMD_IOMMU_EVENT_TYPE) ==
-	    AMD_IOMMU_EVENT_DEVTAB_HW_ERROR);
-
-	deviceid = AMD_IOMMU_REG_GET32(&event[0],
-	    AMD_IOMMU_EVENT_DEVTAB_HWERR_DEVICEID);
-
-	type = AMD_IOMMU_REG_GET32(&event[1],
-	    AMD_IOMMU_EVENT_DEVTAB_HWERR_TYPE);
-
-	hwerr = get_hw_error(type);
-
-	TR = AMD_IOMMU_REG_GET32(&event[1], AMD_IOMMU_EVENT_DEVTAB_HWERR_TR);
-
-	RW = AMD_IOMMU_REG_GET32(&event[1], AMD_IOMMU_EVENT_DEVTAB_HWERR_RW);
-
-	I = AMD_IOMMU_REG_GET32(&event[1], AMD_IOMMU_EVENT_DEVTAB_HWERR_INTR);
-
-	physaddr_lo = AMD_IOMMU_REG_GET32(&event[2],
-	    AMD_IOMMU_EVENT_DEVTAB_HWERR_PHYSADDR_LO);
-
-	physaddr_hi = event[3];
-
-	cmn_err(CE_WARN, "%s: %s%d: idx = %d. Device Table HW Error. "
-	    "deviceid=%u, HW error type: %s, %s request, %s transaction, "
-	    "%s request, physical address = %p",
-	    f, driver, instance, iommu->aiomt_idx,
-	    deviceid, hwerr,
-	    TR == 1 ? "Translation" : "Transaction",
-	    RW == 1 ? "Write" : "Read",
-	    I == 1 ? "Interrupt" : "Memory",
-	    (void *)(uintptr_t)(((uint64_t)physaddr_hi) << 32 | physaddr_lo));
-}
-
-
-static void
-pgtable_hw_error(amd_iommu_t *iommu, uint32_t *event)
-{
-	uint16_t deviceid;
-	uint16_t domainid;
-	uint8_t type;
-	uint8_t TR;
-	uint8_t RW;
-	uint8_t I;
-	uint32_t physaddr_lo;
-	uint32_t physaddr_hi;
-	const char *hwerr;
-	const char *driver = ddi_driver_name(iommu->aiomt_dip);
-	int instance = ddi_get_instance(iommu->aiomt_dip);
-	const char *f = "pgtable_hw_error";
-
-	ASSERT(AMD_IOMMU_REG_GET32(&event[1], AMD_IOMMU_EVENT_TYPE) ==
-	    AMD_IOMMU_EVENT_PGTABLE_HW_ERROR);
-
-	deviceid = AMD_IOMMU_REG_GET32(&event[0],
-	    AMD_IOMMU_EVENT_PGTABLE_HWERR_DEVICEID);
-
-	type = AMD_IOMMU_REG_GET32(&event[1],
-	    AMD_IOMMU_EVENT_DEVTAB_HWERR_TYPE);
-
-	hwerr = get_hw_error(type);
-
-	TR = AMD_IOMMU_REG_GET32(&event[1], AMD_IOMMU_EVENT_PGTABLE_HWERR_TR);
-
-	RW = AMD_IOMMU_REG_GET32(&event[1], AMD_IOMMU_EVENT_PGTABLE_HWERR_RW);
-
-	I = AMD_IOMMU_REG_GET32(&event[1], AMD_IOMMU_EVENT_PGTABLE_HWERR_INTR);
-
-	domainid = AMD_IOMMU_REG_GET32(&event[1],
-	    AMD_IOMMU_EVENT_PGTABLE_HWERR_DOMAINID);
-
-	physaddr_lo = AMD_IOMMU_REG_GET32(&event[2],
-	    AMD_IOMMU_EVENT_PGTABLE_HWERR_PHYSADDR_LO);
-
-	physaddr_hi = event[3];
-
-	cmn_err(CE_WARN, "%s: %s%d: idx = %d. Page Table HW Error. "
-	    "deviceid=%u, HW error type: %s, %s request, %s transaction, "
-	    "%s request, domainid=%u, physical address = %p",
-	    f, driver, instance, iommu->aiomt_idx,
-	    deviceid, hwerr,
-	    TR == 1 ? "Translation" : "Transaction",
-	    RW == 1 ? "Write" : "Read",
-	    I == 1 ? "Interrupt" : "Memory",
-	    domainid,
-	    (void *)(uintptr_t)(((uint64_t)physaddr_hi) << 32 | physaddr_lo));
-}
-
-static void
-cmdbuf_illegal_cmd(amd_iommu_t *iommu, uint32_t *event)
-{
-	uint32_t physaddr_lo;
-	uint32_t physaddr_hi;
-	const char *driver = ddi_driver_name(iommu->aiomt_dip);
-	int instance = ddi_get_instance(iommu->aiomt_dip);
-	const char *f = "cmdbuf_illegal_cmd";
-
-	ASSERT(AMD_IOMMU_REG_GET32(&event[1], AMD_IOMMU_EVENT_TYPE) ==
-	    AMD_IOMMU_EVENT_CMDBUF_ILLEGAL_CMD);
-
-	physaddr_lo = AMD_IOMMU_REG_GET32(&event[2],
-	    AMD_IOMMU_EVENT_CMDBUF_ILLEGAL_CMD_PHYS_LO);
-
-	physaddr_hi = event[3];
-
-	cmn_err(CE_WARN, "%s: %s%d: idx = %d. Illegal IOMMU command. "
-	    "command physical address = %p",
-	    f, driver, instance, iommu->aiomt_idx,
-	    (void *)(uintptr_t)(((uint64_t)physaddr_hi) << 32 | physaddr_lo));
-}
-
-static void
-cmdbuf_hw_error(amd_iommu_t *iommu, uint32_t *event)
-{
-	uint32_t physaddr_lo;
-	uint32_t physaddr_hi;
-	uint8_t type;
-	const char *hwerr;
-	const char *driver = ddi_driver_name(iommu->aiomt_dip);
-	int instance = ddi_get_instance(iommu->aiomt_dip);
-	const char *f = "cmdbuf_hw_error";
-
-	ASSERT(AMD_IOMMU_REG_GET32(&event[1], AMD_IOMMU_EVENT_TYPE) ==
-	    AMD_IOMMU_EVENT_CMDBUF_HW_ERROR);
-
-	type = AMD_IOMMU_REG_GET32(&event[1],
-	    AMD_IOMMU_EVENT_CMDBUF_HWERR_TYPE);
-
-	hwerr = get_hw_error(type);
-
-	physaddr_lo = AMD_IOMMU_REG_GET32(&event[2],
-	    AMD_IOMMU_EVENT_CMDBUF_HWERR_PHYS_LO);
-
-	physaddr_hi = event[3];
-
-	cmn_err(CE_WARN, "%s: %s%d: idx = %d. Command Buffer HW error. "
-	    "HW error type = %s, command buffer physical address = %p",
-	    f, driver, instance, iommu->aiomt_idx,
-	    hwerr,
-	    (void *)(uintptr_t)(((uint64_t)physaddr_hi) << 32 | physaddr_lo));
-}
-
-static void
-iotlb_inval_to(amd_iommu_t *iommu, uint32_t *event)
-{
-	uint16_t deviceid;
-	uint32_t physaddr_lo;
-	uint32_t physaddr_hi;
-	uint8_t type;
-	const char *hwerr;
-	const char *driver = ddi_driver_name(iommu->aiomt_dip);
-	int instance = ddi_get_instance(iommu->aiomt_dip);
-	const char *f = "iotlb_inval_to";
-
-	ASSERT(AMD_IOMMU_REG_GET32(&event[1], AMD_IOMMU_EVENT_TYPE) ==
-	    AMD_IOMMU_EVENT_IOTLB_INVAL_TO);
-
-	deviceid = AMD_IOMMU_REG_GET32(&event[0],
-	    AMD_IOMMU_EVENT_IOTLB_INVAL_TO_DEVICEID);
-
-	/*
-	 * XXX bug in spec. Is the type field available +04 26:25 or is
-	 * it reserved
-	 */
-	type = AMD_IOMMU_REG_GET32(&event[1],
-	    AMD_IOMMU_EVENT_IOTLB_INVAL_TO_TYPE);
-	hwerr = get_hw_error(type);
-
-	physaddr_lo = AMD_IOMMU_REG_GET32(&event[2],
-	    AMD_IOMMU_EVENT_IOTLB_INVAL_TO_PHYS_LO);
-
-	physaddr_hi = event[3];
-
-	cmn_err(CE_WARN, "%s: %s%d: idx = %d. deviceid = %u "
-	    "IOTLB invalidation Timeout. "
-	    "HW error type = %s, invalidation command physical address = %p",
-	    f, driver, instance, iommu->aiomt_idx, deviceid,
-	    hwerr,
-	    (void *)(uintptr_t)(((uint64_t)physaddr_hi) << 32 | physaddr_lo));
-}
-
-static void
-device_illegal_req(amd_iommu_t *iommu, uint32_t *event)
-{
-	uint16_t deviceid;
-	uint8_t TR;
-	uint32_t addr_lo;
-	uint32_t addr_hi;
-	uint8_t type;
-	const char *reqerr;
-	const char *driver = ddi_driver_name(iommu->aiomt_dip);
-	int instance = ddi_get_instance(iommu->aiomt_dip);
-	const char *f = "device_illegal_req";
-
-	ASSERT(AMD_IOMMU_REG_GET32(&event[1], AMD_IOMMU_EVENT_TYPE) ==
-	    AMD_IOMMU_EVENT_DEVICE_ILLEGAL_REQ);
-
-	deviceid = AMD_IOMMU_REG_GET32(&event[0],
-	    AMD_IOMMU_EVENT_DEVICE_ILLEGAL_REQ_DEVICEID);
-
-	TR = AMD_IOMMU_REG_GET32(&event[1],
-	    AMD_IOMMU_EVENT_DEVICE_ILLEGAL_REQ_TR);
-
-	type = AMD_IOMMU_REG_GET32(&event[1],
-	    AMD_IOMMU_EVENT_DEVICE_ILLEGAL_REQ_TYPE);
-
-	reqerr = get_illegal_req(type, TR);
-
-
-	addr_lo = event[2];
-	addr_hi = event[3];
-
-	cmn_err(CE_WARN, "%s: %s%d: idx = %d. deviceid = %d "
-	    "Illegal Device Request. "
-	    "Illegal Request type = %s, %s request, address accessed = %p",
-	    f, driver, instance, iommu->aiomt_idx, deviceid,
-	    reqerr,
-	    TR == 1 ? "Translation" : "Transaction",
-	    (void *)(uintptr_t)(((uint64_t)addr_hi) << 32 | addr_lo));
-}
-
-static void
-amd_iommu_process_one_event(amd_iommu_t *iommu)
-{
-	uint32_t event[4];
-	amd_iommu_event_t event_type;
-	int i;
-	const char *driver = ddi_driver_name(iommu->aiomt_dip);
-	int instance = ddi_get_instance(iommu->aiomt_dip);
-	const char *f = "amd_iommu_process_one_event";
-
-	ASSERT(MUTEX_HELD(&iommu->aiomt_eventlock));
-
-	SYNC_FORKERN(iommu->aiomt_dmahdl);
-	for (i = 0; i < 4; i++) {
-		event[i] =  iommu->aiomt_event_head[i];
-	}
-
-	event_type = AMD_IOMMU_REG_GET32(&event[1], AMD_IOMMU_EVENT_TYPE);
-
-	switch (event_type) {
-	case AMD_IOMMU_EVENT_DEVTAB_ILLEGAL_ENTRY:
-		devtab_illegal_entry(iommu, event);
-		break;
-	case AMD_IOMMU_EVENT_IO_PAGE_FAULT:
-		io_page_fault(iommu, event);
-		break;
-	case AMD_IOMMU_EVENT_DEVTAB_HW_ERROR:
-		devtab_hw_error(iommu, event);
-		break;
-	case AMD_IOMMU_EVENT_PGTABLE_HW_ERROR:
-		pgtable_hw_error(iommu, event);
-		break;
-	case AMD_IOMMU_EVENT_CMDBUF_HW_ERROR:
-		cmdbuf_hw_error(iommu, event);
-		break;
-	case AMD_IOMMU_EVENT_CMDBUF_ILLEGAL_CMD:
-		cmdbuf_illegal_cmd(iommu, event);
-		break;
-	case AMD_IOMMU_EVENT_IOTLB_INVAL_TO:
-		iotlb_inval_to(iommu, event);
-		break;
-	case AMD_IOMMU_EVENT_DEVICE_ILLEGAL_REQ:
-		device_illegal_req(iommu, event);
-		break;
-	default:
-		cmn_err(CE_WARN, "%s: %s%d: idx = %d. Unknown event: %u",
-		    f, driver, instance, iommu->aiomt_idx, event_type);
-		break;
-	}
-}
-
-int
-amd_iommu_read_log(amd_iommu_t *iommu, amd_iommu_log_op_t op)
-{
-	caddr_t evtail;
-	uint64_t evtail_off;
-	uint64_t evhead_off;
-
-	ASSERT(op != AMD_IOMMU_LOG_INVALID_OP);
-
-	mutex_enter(&iommu->aiomt_eventlock);
-
-	ASSERT(iommu->aiomt_event_head != NULL);
-
-	/* XXX verify */
-	evtail_off = AMD_IOMMU_REG_GET64(
-	    REGADDR64(iommu->aiomt_reg_eventlog_tail_va),
-	    AMD_IOMMU_EVENTTAILPTR);
-
-	evtail_off = EV2OFF(evtail_off);
-
-	ASSERT(evtail_off <  iommu->aiomt_eventlog_sz);
-
-	evtail = iommu->aiomt_eventlog + evtail_off;
-
-	if (op == AMD_IOMMU_LOG_DISCARD) {
-		/*LINTED*/
-		iommu->aiomt_event_head = (uint32_t *)evtail;
-		AMD_IOMMU_REG_SET64(REGADDR64(
-		    iommu->aiomt_reg_eventlog_head_va),
-		    AMD_IOMMU_EVENTHEADPTR, OFF2EV(evtail_off));
-		cmn_err(CE_NOTE, "Discarded IOMMU event log");
-		mutex_exit(&iommu->aiomt_eventlock);
-		return (DDI_SUCCESS);
-	}
-
-	/*LINTED*/
-	while (1) {
-		if ((caddr_t)iommu->aiomt_event_head == evtail)
-			break;
-
-		cmn_err(CE_WARN, "evtail_off = %p, head = %p, tail = %p",
-		    (void *)(uintptr_t)evtail_off,
-		    (void *)iommu->aiomt_event_head,
-		    (void *)evtail);
-
-		amd_iommu_process_one_event(iommu);
-
-		/*
-		 * Update the head pointer in soft state
-		 * and the head pointer register
-		 */
-		iommu->aiomt_event_head += 4;
-		if ((caddr_t)iommu->aiomt_event_head >=
-		    iommu->aiomt_eventlog + iommu->aiomt_eventlog_sz) {
-			/* wraparound */
-			iommu->aiomt_event_head =
-			/*LINTED*/
-			    (uint32_t *)iommu->aiomt_eventlog;
-			evhead_off = 0;
-		} else {
-			evhead_off =  (caddr_t)iommu->aiomt_event_head
-			/*LINTED*/
-			    - iommu->aiomt_eventlog;
-		}
-
-		ASSERT(evhead_off < iommu->aiomt_eventlog_sz);
-
-		AMD_IOMMU_REG_SET64(REGADDR64(
-		    iommu->aiomt_reg_eventlog_head_va),
-		    AMD_IOMMU_EVENTHEADPTR, OFF2EV(evhead_off));
-	}
-	mutex_exit(&iommu->aiomt_eventlock);
-
-	return (DDI_SUCCESS);
-}
--- a/usr/src/uts/intel/io/amd_iommu/amd_iommu_log.h	Wed Sep 16 22:03:43 2009 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,116 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License (the "License").
- * You may not use this file except in compliance with the License.
- *
- * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
- * or http://www.opensolaris.org/os/licensing.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information: Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- */
-/*
- * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
- * Use is subject to license terms.
- */
-
-#ifndef _AMD_IOMMU_LOG_H
-#define	_AMD_IOMMU_LOG_H
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <sys/amd_iommu.h>
-
-#ifdef _KERNEL
-
-#define	EV2OFF(e)	((e) << 4)
-#define	OFF2EV(o)	((o) >> 4)
-
-typedef enum {
-	AMD_IOMMU_EVENT_INVALID = 0,
-	AMD_IOMMU_EVENT_DEVTAB_ILLEGAL_ENTRY = 1,
-	AMD_IOMMU_EVENT_IO_PAGE_FAULT = 2,
-	AMD_IOMMU_EVENT_DEVTAB_HW_ERROR = 3,
-	AMD_IOMMU_EVENT_PGTABLE_HW_ERROR = 4,
-	AMD_IOMMU_EVENT_CMDBUF_ILLEGAL_CMD = 5,
-	AMD_IOMMU_EVENT_CMDBUF_HW_ERROR = 6,
-	AMD_IOMMU_EVENT_IOTLB_INVAL_TO = 7,
-	AMD_IOMMU_EVENT_DEVICE_ILLEGAL_REQ = 8
-} amd_iommu_event_t;
-
-/* Common to all events */
-#define	AMD_IOMMU_EVENT_TYPE			(31 << 16 | 28)
-
-/* Illegal device Table Entry Event bits */
-#define	AMD_IOMMU_EVENT_DEVTAB_ILL_DEVICEID	(15 << 16 | 0)
-#define	AMD_IOMMU_EVENT_DEVTAB_ILL_TR		(24 << 16 | 24)
-#define	AMD_IOMMU_EVENT_DEVTAB_ILL_RZ		(23 << 16 | 23)
-#define	AMD_IOMMU_EVENT_DEVTAB_ILL_RW		(21 << 16 | 21)
-#define	AMD_IOMMU_EVENT_DEVTAB_ILL_INTR		(19 << 16 | 19)
-#define	AMD_IOMMU_EVENT_DEVTAB_ILL_VADDR_LO	(31 << 16 | 2)
-
-/* IO Page Fault event bits */
-#define	AMD_IOMMU_EVENT_IO_PGFAULT_DEVICEID	(15 << 16 | 0)
-#define	AMD_IOMMU_EVENT_IO_PGFAULT_TR		(24 << 16 | 24)
-#define	AMD_IOMMU_EVENT_IO_PGFAULT_RZ		(23 << 16 | 23)
-#define	AMD_IOMMU_EVENT_IO_PGFAULT_PE		(22 << 16 | 22)
-#define	AMD_IOMMU_EVENT_IO_PGFAULT_RW		(21 << 16 | 21)
-#define	AMD_IOMMU_EVENT_IO_PGFAULT_PR		(20 << 16 | 20)
-#define	AMD_IOMMU_EVENT_IO_PGFAULT_INTR		(19 << 16 | 19)
-#define	AMD_IOMMU_EVENT_IO_PGFAULT_DOMAINID	(15 << 16 | 0)
-
-
-/* Device Table HW Error event bits */
-#define	AMD_IOMMU_EVENT_DEVTAB_HWERR_DEVICEID	(15 << 16 | 0)
-#define	AMD_IOMMU_EVENT_DEVTAB_HWERR_TYPE	(26 << 16 | 25)
-#define	AMD_IOMMU_EVENT_DEVTAB_HWERR_TR		(24 << 16 | 24)
-#define	AMD_IOMMU_EVENT_DEVTAB_HWERR_RW		(21 << 16 | 21)
-#define	AMD_IOMMU_EVENT_DEVTAB_HWERR_INTR	(19 << 16 | 19)
-#define	AMD_IOMMU_EVENT_DEVTAB_HWERR_PHYSADDR_LO	(31 << 16 | 4)
-
-
-/* Page Table HW Error event bits */
-#define	AMD_IOMMU_EVENT_PGTABLE_HWERR_DEVICEID	(15 << 16 | 0)
-#define	AMD_IOMMU_EVENT_DEVTAB_HWERR_TYPE	(26 << 16 | 25)
-#define	AMD_IOMMU_EVENT_PGTABLE_HWERR_TR	(24 << 16 | 24)
-#define	AMD_IOMMU_EVENT_PGTABLE_HWERR_RW	(21 << 16 | 21)
-#define	AMD_IOMMU_EVENT_PGTABLE_HWERR_INTR	(19 << 16 | 19)
-#define	AMD_IOMMU_EVENT_PGTABLE_HWERR_DOMAINID  (15 << 16 | 0)
-#define	AMD_IOMMU_EVENT_PGTABLE_HWERR_PHYSADDR_LO	(31 << 16 | 3)
-
-/* Illegal Command Error event bits */
-#define	AMD_IOMMU_EVENT_CMDBUF_ILLEGAL_CMD_PHYS_LO	(31 << 16 | 4)
-
-/* Command Buffer HW Error event bits */
-#define	AMD_IOMMU_EVENT_CMDBUF_HWERR_TYPE	(26 << 16 | 25)
-#define	AMD_IOMMU_EVENT_CMDBUF_HWERR_PHYS_LO	(31 << 16 | 4)
-
-
-/* IOTLB Invalidation TO event bits */
-#define	AMD_IOMMU_EVENT_IOTLB_INVAL_TO_DEVICEID	(15 << 16 | 0)
-#define	AMD_IOMMU_EVENT_IOTLB_INVAL_TO_TYPE	(26 << 16 | 25)
-#define	AMD_IOMMU_EVENT_IOTLB_INVAL_TO_PHYS_LO	(31 << 16 | 4)
-
-/* Illegal Device request event bits */
-#define	AMD_IOMMU_EVENT_DEVICE_ILLEGAL_REQ_DEVICEID	(15 << 16 | 0)
-#define	AMD_IOMMU_EVENT_DEVICE_ILLEGAL_REQ_TYPE		(27 << 16 | 25)
-#define	AMD_IOMMU_EVENT_DEVICE_ILLEGAL_REQ_TR		(24 << 16 | 24)
-
-#endif /* _KERNEL */
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif	/* _AMD_IOMMU_LOG_H */
--- a/usr/src/uts/intel/io/amd_iommu/amd_iommu_page_tables.c	Wed Sep 16 22:03:43 2009 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,1699 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License (the "License").
- * You may not use this file except in compliance with the License.
- *
- * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
- * or http://www.opensolaris.org/os/licensing.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information: Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- */
-
-/*
- * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
- * Use is subject to license terms.
- */
-
-#include <sys/sunddi.h>
-#include <sys/sunndi.h>
-#include <sys/acpi/acpi.h>
-#include <sys/acpica.h>
-#include <sys/amd_iommu.h>
-#include <sys/bootconf.h>
-#include <sys/sysmacros.h>
-#include <sys/ddidmareq.h>
-
-#include "amd_iommu_impl.h"
-#include "amd_iommu_acpi.h"
-#include "amd_iommu_page_tables.h"
-
-ddi_dma_attr_t amd_iommu_pgtable_dma_attr = {
-	DMA_ATTR_V0,
-	0U,				/* dma_attr_addr_lo */
-	0xffffffffffffffffULL,		/* dma_attr_addr_hi */
-	0xffffffffU,			/* dma_attr_count_max */
-	(uint64_t)4096,			/* dma_attr_align */
-	1,				/* dma_attr_burstsizes */
-	64,				/* dma_attr_minxfer */
-	0xffffffffU,			/* dma_attr_maxxfer */
-	0xffffffffU,			/* dma_attr_seg */
-	1,				/* dma_attr_sgllen, variable */
-	64,				/* dma_attr_granular */
-	0				/* dma_attr_flags */
-};
-
-static amd_iommu_domain_t **amd_iommu_domain_table;
-
-static struct {
-	int f_count;
-	amd_iommu_page_table_t *f_list;
-} amd_iommu_pgtable_freelist;
-int amd_iommu_no_pgtable_freelist;
-
-/*ARGSUSED*/
-static int
-amd_iommu_get_src_bdf(amd_iommu_t *iommu, int32_t bdf, int32_t *src_bdfp)
-{
-	amd_iommu_acpi_ivhd_t *hinfop;
-
-	hinfop = amd_iommu_lookup_ivhd(bdf);
-	if (hinfop == NULL || hinfop->ach_src_deviceid == -1)
-		*src_bdfp = bdf;
-	else
-		*src_bdfp = hinfop->ach_src_deviceid;
-
-	return (DDI_SUCCESS);
-}
-
-static dev_info_t *
-amd_iommu_pci_dip(dev_info_t *rdip, const char *path)
-{
-	dev_info_t *pdip;
-	const char *driver = ddi_driver_name(rdip);
-	int instance = ddi_get_instance(rdip);
-	const char *f = "amd_iommu_pci_dip";
-
-	/* Hold rdip so it and its parents don't go away */
-	ndi_hold_devi(rdip);
-
-	if (ddi_is_pci_dip(rdip))
-		return (rdip);
-
-	pdip = rdip;
-	while (pdip = ddi_get_parent(pdip)) {
-		if (ddi_is_pci_dip(pdip)) {
-			ndi_hold_devi(pdip);
-			ndi_rele_devi(rdip);
-			return (pdip);
-		}
-	}
-
-	cmn_err(CE_WARN, "%s: %s%d dip = %p has no PCI parent, path = %s",
-	    f, driver, instance, (void *)rdip, path);
-
-	ndi_rele_devi(rdip);
-
-	ASSERT(0);
-
-	return (NULL);
-}
-
-/*ARGSUSED*/
-static int
-amd_iommu_get_domain(amd_iommu_t *iommu, dev_info_t *rdip, int alias,
-    uint16_t deviceid, domain_id_t *domainid, const char *path)
-{
-	const char *f = "amd_iommu_get_domain";
-
-	*domainid = AMD_IOMMU_INVALID_DOMAIN;
-
-	ASSERT(strcmp(ddi_driver_name(rdip), "agpgart") != 0);
-
-	switch (deviceid) {
-		case AMD_IOMMU_INVALID_DOMAIN:
-		case AMD_IOMMU_IDENTITY_DOMAIN:
-		case AMD_IOMMU_PASSTHRU_DOMAIN:
-		case AMD_IOMMU_SYS_DOMAIN:
-			*domainid = AMD_IOMMU_SYS_DOMAIN;
-			break;
-		default:
-			*domainid = deviceid;
-			break;
-	}
-
-	if (amd_iommu_debug & AMD_IOMMU_DEBUG_DEVTBL) {
-		cmn_err(CE_NOTE, "%s: domainid for %s = %d",
-		    f, path, *domainid);
-	}
-
-	return (DDI_SUCCESS);
-}
-
-static uint16_t
-hash_domain(domain_id_t domainid)
-{
-	return (domainid % AMD_IOMMU_DOMAIN_HASH_SZ);
-}
-
-/*ARGSUSED*/
-void
-amd_iommu_init_page_tables(amd_iommu_t *iommu)
-{
-	amd_iommu_domain_table = kmem_zalloc(
-	    sizeof (amd_iommu_domain_t *) * AMD_IOMMU_DOMAIN_HASH_SZ, KM_SLEEP);
-}
-
-/*ARGSUSED*/
-void
-amd_iommu_fini_page_tables(amd_iommu_t *iommu)
-{
-	if (amd_iommu_domain_table) {
-		kmem_free(amd_iommu_domain_table,
-		    sizeof (amd_iommu_domain_t *) * AMD_IOMMU_DOMAIN_HASH_SZ);
-		amd_iommu_domain_table = NULL;
-	}
-}
-
-static amd_iommu_domain_t *
-amd_iommu_lookup_domain(amd_iommu_t *iommu, domain_id_t domainid,
-    map_type_t type, int km_flags)
-{
-	uint16_t idx;
-	amd_iommu_domain_t *dp;
-	char name[AMD_IOMMU_VMEM_NAMELEN+1];
-
-	ASSERT(amd_iommu_domain_table);
-
-	idx = hash_domain(domainid);
-
-	for (dp = amd_iommu_domain_table[idx]; dp; dp = dp->d_next) {
-		if (dp->d_domainid == domainid)
-			return (dp);
-	}
-
-	ASSERT(type != AMD_IOMMU_INVALID_MAP);
-
-	dp = kmem_zalloc(sizeof (*dp), km_flags);
-	if (dp == NULL)
-		return (NULL);
-	dp->d_domainid = domainid;
-	dp->d_pgtable_root_4K = 0;	/* make this explicit */
-
-	if (type == AMD_IOMMU_VMEM_MAP) {
-		uint64_t base;
-		uint64_t size;
-		(void) snprintf(name, sizeof (name), "dvma_idx%d_domain%d",
-		    iommu->aiomt_idx, domainid);
-		base = MMU_PAGESIZE;
-		size = AMD_IOMMU_SIZE_4G - MMU_PAGESIZE;
-		dp->d_vmem = vmem_create(name, (void *)(uintptr_t)base, size,
-		    MMU_PAGESIZE, NULL, NULL, NULL, 0,
-		    km_flags == KM_SLEEP ? VM_SLEEP : VM_NOSLEEP);
-		if (dp->d_vmem == NULL) {
-			kmem_free(dp, sizeof (*dp));
-			return (NULL);
-		}
-	} else {
-		dp->d_vmem = NULL;
-	}
-
-	dp->d_next = amd_iommu_domain_table[idx];
-	dp->d_prev = NULL;
-	amd_iommu_domain_table[idx] = dp;
-	if (dp->d_next)
-		dp->d_next->d_prev = dp;
-	dp->d_ref = 0;
-
-
-	return (dp);
-}
-
-static void
-amd_iommu_teardown_domain(amd_iommu_t *iommu, amd_iommu_domain_t *dp)
-{
-	uint16_t idx;
-	int flags;
-	amd_iommu_cmdargs_t cmdargs = {0};
-	domain_id_t domainid = dp->d_domainid;
-	const char *f = "amd_iommu_teardown_domain";
-
-	ASSERT(dp->d_ref == 0);
-
-	idx = hash_domain(dp->d_domainid);
-
-	if (dp->d_prev == NULL)
-		amd_iommu_domain_table[idx] = dp->d_next;
-	else
-		dp->d_prev->d_next = dp->d_next;
-
-	if (dp->d_next)
-		dp->d_next->d_prev = dp->d_prev;
-
-	if (dp->d_vmem != NULL) {
-		vmem_destroy(dp->d_vmem);
-		dp->d_vmem = NULL;
-	}
-
-	kmem_free(dp, sizeof (*dp));
-
-	cmdargs.ca_domainid = (uint16_t)domainid;
-	cmdargs.ca_addr = (uintptr_t)0x7FFFFFFFFFFFF000;
-	flags = AMD_IOMMU_CMD_FLAGS_PAGE_PDE_INVAL |
-	    AMD_IOMMU_CMD_FLAGS_PAGE_INVAL_S;
-
-	if (amd_iommu_cmd(iommu, AMD_IOMMU_CMD_INVAL_IOMMU_PAGES,
-	    &cmdargs, flags, 0) != DDI_SUCCESS) {
-		cmn_err(CE_WARN, "%s: idx=%d: domainid=%d"
-		    "Failed to invalidate domain in IOMMU HW cache",
-		    f, iommu->aiomt_idx, cmdargs.ca_domainid);
-	}
-}
-
-static int
-amd_iommu_get_deviceid(amd_iommu_t *iommu, dev_info_t *rdip, int32_t *deviceid,
-    int *aliasp, const char *path)
-{
-	int bus = -1;
-	int device = -1;
-	int func = -1;
-	uint16_t bdf;
-	int32_t src_bdf;
-	dev_info_t *idip = iommu->aiomt_dip;
-	const char *driver = ddi_driver_name(idip);
-	int instance = ddi_get_instance(idip);
-	dev_info_t *pci_dip;
-	const char *f = "amd_iommu_get_deviceid";
-
-	/* be conservative. Always assume an alias */
-	*aliasp = 1;
-	*deviceid = 0;
-
-	/* Check for special special devices (rdip == NULL) */
-	if (rdip == NULL) {
-		if (amd_iommu_get_src_bdf(iommu, -1, &src_bdf) != DDI_SUCCESS) {
-			cmn_err(CE_WARN,
-			    "%s: %s%d: idx=%d, failed to get SRC BDF "
-			    "for special-device",
-			    f, driver, instance, iommu->aiomt_idx);
-			return (DDI_DMA_NOMAPPING);
-		}
-		*deviceid = src_bdf;
-		*aliasp = 1;
-		return (DDI_SUCCESS);
-	}
-
-	if (amd_iommu_debug & AMD_IOMMU_DEBUG_DEVTBL) {
-		cmn_err(CE_NOTE, "%s: attempting to get deviceid for %s",
-		    f, path);
-	}
-
-	pci_dip = amd_iommu_pci_dip(rdip, path);
-	if (pci_dip == NULL) {
-		cmn_err(CE_WARN, "%s: %s%d: idx = %d, failed to get PCI dip "
-		    "for rdip=%p, path = %s",
-		    f, driver, instance, iommu->aiomt_idx, (void *)rdip,
-		    path);
-		return (DDI_DMA_NOMAPPING);
-	}
-
-	if (acpica_get_bdf(pci_dip, &bus, &device, &func) != DDI_SUCCESS) {
-		ndi_rele_devi(pci_dip);
-		cmn_err(CE_WARN, "%s: %s%d: idx=%d, failed to get BDF for "
-		    "PCI dip (%p). rdip path = %s",
-		    f, driver, instance, iommu->aiomt_idx,
-		    (void *)pci_dip, path);
-		return (DDI_DMA_NOMAPPING);
-	}
-
-	ndi_rele_devi(pci_dip);
-
-	if (bus > UINT8_MAX || bus < 0 ||
-	    device > UINT8_MAX || device < 0 ||
-	    func > UINT8_MAX || func < 0) {
-		cmn_err(CE_WARN, "%s: %s%d:  idx=%d, invalid BDF(%d,%d,%d) "
-		    "for PCI dip (%p). rdip path = %s", f, driver, instance,
-		    iommu->aiomt_idx,
-		    bus, device, func,
-		    (void *)pci_dip, path);
-		return (DDI_DMA_NOMAPPING);
-	}
-
-	bdf = ((uint8_t)bus << 8) | ((uint8_t)device << 3) | (uint8_t)func;
-
-	if (amd_iommu_get_src_bdf(iommu, bdf, &src_bdf) != DDI_SUCCESS) {
-		cmn_err(CE_WARN, "%s: %s%d: idx=%d, failed to get SRC BDF "
-		    "for PCI dip (%p) rdip path = %s.",
-		    f, driver, instance, iommu->aiomt_idx, (void *)pci_dip,
-		    path);
-		return (DDI_DMA_NOMAPPING);
-	}
-
-	if (amd_iommu_debug & AMD_IOMMU_DEBUG_DEVTBL) {
-		cmn_err(CE_NOTE, "%s: Deviceid = %u for path = %s",
-		    f, src_bdf, path);
-	}
-
-	*deviceid = src_bdf;
-	*aliasp = (src_bdf != bdf);
-
-	return (DDI_SUCCESS);
-}
-
-/*ARGSUSED*/
-static int
-init_devtbl(amd_iommu_t *iommu, uint64_t *devtbl_entry, domain_id_t domainid,
-    amd_iommu_domain_t *dp)
-{
-	uint64_t entry[4] = {0};
-	int i;
-
-	/* If already passthru, don't touch */
-	if (AMD_IOMMU_REG_GET64(&(devtbl_entry[0]), AMD_IOMMU_DEVTBL_V) == 0 &&
-	    AMD_IOMMU_REG_GET64(&(devtbl_entry[0]), AMD_IOMMU_DEVTBL_TV) == 0) {
-		return (0);
-	}
-
-	if (AMD_IOMMU_REG_GET64(&(devtbl_entry[0]), AMD_IOMMU_DEVTBL_V) == 1 &&
-	    AMD_IOMMU_REG_GET64(&(devtbl_entry[0]), AMD_IOMMU_DEVTBL_TV) == 1) {
-
-		ASSERT(dp->d_pgtable_root_4K ==
-		    AMD_IOMMU_REG_GET64(&(devtbl_entry[0]),
-		    AMD_IOMMU_DEVTBL_ROOT_PGTBL));
-
-		ASSERT(dp->d_domainid == AMD_IOMMU_REG_GET64(&(devtbl_entry[1]),
-		    AMD_IOMMU_DEVTBL_DOMAINID));
-
-		return (0);
-	}
-
-	/* New devtbl entry for this domain. Bump up the domain ref-count */
-	dp->d_ref++;
-
-	entry[3] = 0;
-	entry[2] = 0;
-	AMD_IOMMU_REG_SET64(&(entry[1]), AMD_IOMMU_DEVTBL_SYSMGT, 1);
-	AMD_IOMMU_REG_SET64(&(entry[1]), AMD_IOMMU_DEVTBL_EX, 1);
-	AMD_IOMMU_REG_SET64(&(entry[1]), AMD_IOMMU_DEVTBL_SD, 0);
-	AMD_IOMMU_REG_SET64(&(entry[1]), AMD_IOMMU_DEVTBL_CACHE, 0);
-	AMD_IOMMU_REG_SET64(&(entry[1]), AMD_IOMMU_DEVTBL_IOCTL, 1);
-	AMD_IOMMU_REG_SET64(&(entry[1]), AMD_IOMMU_DEVTBL_SA, 0);
-	AMD_IOMMU_REG_SET64(&(entry[1]), AMD_IOMMU_DEVTBL_SE, 1);
-	AMD_IOMMU_REG_SET64(&(entry[1]), AMD_IOMMU_DEVTBL_IOTLB, 1);
-	AMD_IOMMU_REG_SET64(&(entry[1]), AMD_IOMMU_DEVTBL_DOMAINID,
-	    (uint16_t)domainid);
-	AMD_IOMMU_REG_SET64(&(entry[0]), AMD_IOMMU_DEVTBL_IW, 1);
-	AMD_IOMMU_REG_SET64(&(entry[0]), AMD_IOMMU_DEVTBL_IR, 1);
-	AMD_IOMMU_REG_SET64(&(entry[0]), AMD_IOMMU_DEVTBL_ROOT_PGTBL,
-	    dp->d_pgtable_root_4K);
-	AMD_IOMMU_REG_SET64(&(entry[0]), AMD_IOMMU_DEVTBL_PG_MODE,
-	    AMD_IOMMU_PGTABLE_MAXLEVEL);
-	AMD_IOMMU_REG_SET64(&(entry[0]), AMD_IOMMU_DEVTBL_TV,
-	    domainid == AMD_IOMMU_PASSTHRU_DOMAIN ? 0 : 1);
-	AMD_IOMMU_REG_SET64(&(entry[0]), AMD_IOMMU_DEVTBL_V,
-	    domainid == AMD_IOMMU_PASSTHRU_DOMAIN ? 0 : 1);
-
-	for (i = 1; i < 4; i++) {
-		devtbl_entry[i] = entry[i];
-	}
-	devtbl_entry[0] = entry[0];
-
-	/* we did an actual init */
-	return (1);
-}
-
-void
-amd_iommu_set_passthru(amd_iommu_t *iommu, dev_info_t *rdip)
-{
-	int32_t deviceid;
-	int alias;
-	uint64_t *devtbl_entry;
-	amd_iommu_cmdargs_t cmdargs = {0};
-	char *path;
-	int pathfree;
-	int V;
-	int TV;
-	int instance;
-	const char *driver;
-	const char *f = "amd_iommu_set_passthru";
-
-	if (rdip) {
-		driver = ddi_driver_name(rdip);
-		instance = ddi_get_instance(rdip);
-	} else {
-		driver = "special-device";
-		instance = 0;
-	}
-
-	path = kmem_alloc(MAXPATHLEN, KM_NOSLEEP);
-	if (path) {
-		if (rdip)
-			(void) ddi_pathname(rdip, path);
-		else
-			(void) strcpy(path, "special-device");
-		pathfree = 1;
-	} else {
-		pathfree = 0;
-		path = "<path-mem-alloc-failed>";
-	}
-
-	if (amd_iommu_get_deviceid(iommu, rdip, &deviceid, &alias, path)
-	    != DDI_SUCCESS) {
-		cmn_err(CE_WARN, "%s: %s%d: idx=%d: rdip=%p. "
-		    "Failed to get device ID for device %s.", f, driver,
-		    instance,
-		    iommu->aiomt_idx, (void *)rdip, path);
-		goto out;
-	}
-
-	/* No deviceid */
-	if (deviceid == -1) {
-		goto out;
-	}
-
-	if ((deviceid + 1) * AMD_IOMMU_DEVTBL_ENTRY_SZ >
-	    iommu->aiomt_devtbl_sz) {
-		cmn_err(CE_WARN, "%s: %s%d: IOMMU idx=%d, deviceid (%u) "
-		    "for rdip (%p) exceeds device table size (%u), path=%s",
-		    f, driver,
-		    instance, iommu->aiomt_idx, deviceid, (void *)rdip,
-		    iommu->aiomt_devtbl_sz, path);
-		goto out;
-	}
-
-	/*LINTED*/
-	devtbl_entry = (uint64_t *)&iommu->aiomt_devtbl
-	    [deviceid * AMD_IOMMU_DEVTBL_ENTRY_SZ];
-
-	V = AMD_IOMMU_REG_GET64(&(devtbl_entry[0]), AMD_IOMMU_DEVTBL_V);
-	TV = AMD_IOMMU_REG_GET64(&(devtbl_entry[0]), AMD_IOMMU_DEVTBL_TV);
-
-	/* Already passthru */
-	if (V == 0 && TV == 0) {
-		goto out;
-	}
-
-	/* Existing translations */
-	if (V == 1 && TV == 1) {
-		goto out;
-	}
-
-	/* Invalid setting */
-	if (V == 0 && TV == 1) {
-		goto out;
-	}
-
-	AMD_IOMMU_REG_SET64(&(devtbl_entry[0]), AMD_IOMMU_DEVTBL_V, 0);
-
-	cmdargs.ca_deviceid = (uint16_t)deviceid;
-	(void) amd_iommu_cmd(iommu, AMD_IOMMU_CMD_INVAL_DEVTAB_ENTRY,
-	    &cmdargs, 0, 0);
-
-out:
-	if (pathfree)
-		kmem_free(path, MAXPATHLEN);
-}
-
-static int
-amd_iommu_set_devtbl_entry(amd_iommu_t *iommu, dev_info_t *rdip,
-    domain_id_t domainid, uint16_t deviceid, amd_iommu_domain_t *dp,
-    const char *path)
-{
-	uint64_t *devtbl_entry;
-	amd_iommu_cmdargs_t cmdargs = {0};
-	int error;
-	dev_info_t *idip = iommu->aiomt_dip;
-	const char *driver = ddi_driver_name(idip);
-	int instance = ddi_get_instance(idip);
-	const char *f = "amd_iommu_set_devtbl_entry";
-
-	if (amd_iommu_debug & AMD_IOMMU_DEBUG_DEVTBL) {
-		cmn_err(CE_WARN, "%s: attempting to set devtbl entry for %s",
-		    f, path);
-	}
-
-	if ((deviceid + 1) * AMD_IOMMU_DEVTBL_ENTRY_SZ >
-	    iommu->aiomt_devtbl_sz) {
-		cmn_err(CE_WARN, "%s: %s%d: IOMMU idx=%d, deviceid (%u) "
-		    "for rdip (%p) exceeds device table size (%u), path=%s",
-		    f, driver,
-		    instance, iommu->aiomt_idx, deviceid, (void *)rdip,
-		    iommu->aiomt_devtbl_sz, path);
-		return (DDI_DMA_NOMAPPING);
-	}
-
-	/*LINTED*/
-	devtbl_entry = (uint64_t *)&iommu->aiomt_devtbl
-	    [deviceid * AMD_IOMMU_DEVTBL_ENTRY_SZ];
-
-	if (amd_iommu_debug & AMD_IOMMU_DEBUG_DEVTBL) {
-		cmn_err(CE_WARN, "%s: deviceid=%u devtbl entry (%p) for %s",
-		    f, deviceid, (void *)(uintptr_t)(*devtbl_entry), path);
-	}
-
-	if (init_devtbl(iommu, devtbl_entry, domainid, dp)) {
-		cmdargs.ca_deviceid = deviceid;
-		error = amd_iommu_cmd(iommu, AMD_IOMMU_CMD_INVAL_DEVTAB_ENTRY,
-		    &cmdargs, 0, 0);
-	}
-
-	return (error);
-}
-
-int
-amd_iommu_clear_devtbl_entry(amd_iommu_t *iommu, dev_info_t *rdip,
-    domain_id_t domainid, uint16_t deviceid, amd_iommu_domain_t *dp,
-    int *domain_freed, char *path)
-{
-	uint64_t *devtbl_entry;
-	int error = DDI_SUCCESS;
-	amd_iommu_cmdargs_t cmdargs = {0};
-	const char *driver = ddi_driver_name(iommu->aiomt_dip);
-	int instance = ddi_get_instance(iommu->aiomt_dip);
-	const char *f = "amd_iommu_clear_devtbl_entry";
-
-	if (amd_iommu_debug & AMD_IOMMU_DEBUG_DEVTBL) {
-		cmn_err(CE_NOTE, "%s: attempting to clear devtbl entry for "
-		    "domainid = %d, deviceid = %u, path = %s",
-		    f, domainid, deviceid, path);
-	}
-
-	if ((deviceid + 1) * AMD_IOMMU_DEVTBL_ENTRY_SZ >
-	    iommu->aiomt_devtbl_sz) {
-		cmn_err(CE_WARN, "%s: %s%d: IOMMU idx=%d, deviceid (%u) "
-		    "for rdip (%p) exceeds device table size (%u), path = %s",
-		    f, driver, instance,
-		    iommu->aiomt_idx, deviceid, (void *)rdip,
-		    iommu->aiomt_devtbl_sz, path);
-		return (DDI_FAILURE);
-	}
-
-	/*LINTED*/
-	devtbl_entry = (uint64_t *)&iommu->aiomt_devtbl
-	    [deviceid * AMD_IOMMU_DEVTBL_ENTRY_SZ];
-
-	if (amd_iommu_debug & AMD_IOMMU_DEBUG_DEVTBL) {
-		cmn_err(CE_WARN, "%s: deviceid=%u devtbl entry (%p) for %s",
-		    f, deviceid, (void *)(uintptr_t)(*devtbl_entry), path);
-	}
-
-	if (AMD_IOMMU_REG_GET64(&(devtbl_entry[0]), AMD_IOMMU_DEVTBL_TV) == 0) {
-		/* Nothing to do */
-		return (DDI_SUCCESS);
-	}
-
-	ASSERT(dp->d_pgtable_root_4K == AMD_IOMMU_REG_GET64(&(devtbl_entry[0]),
-	    AMD_IOMMU_DEVTBL_ROOT_PGTBL));
-
-	ASSERT(domainid == AMD_IOMMU_REG_GET64(&(devtbl_entry[1]),
-	    AMD_IOMMU_DEVTBL_DOMAINID));
-
-	AMD_IOMMU_REG_SET64(&(devtbl_entry[0]), AMD_IOMMU_DEVTBL_TV, 0);
-	AMD_IOMMU_REG_SET64(&(devtbl_entry[0]), AMD_IOMMU_DEVTBL_ROOT_PGTBL, 0);
-	AMD_IOMMU_REG_SET64(&(devtbl_entry[0]), AMD_IOMMU_DEVTBL_V, 1);
-
-	SYNC_FORDEV(iommu->aiomt_dmahdl);
-
-	dp->d_ref--;
-	ASSERT(dp->d_ref >= 0);
-
-	if (dp->d_ref == 0) {
-		*domain_freed = 1;
-	}
-
-	cmdargs.ca_deviceid = deviceid;
-	error = amd_iommu_cmd(iommu, AMD_IOMMU_CMD_INVAL_DEVTAB_ENTRY,
-	    &cmdargs, 0, 0);
-	if (error != DDI_SUCCESS)
-		error = DDI_FAILURE;
-
-	return (error);
-}
-
-int
-amd_iommu_page_table_hash_init(amd_iommu_page_table_hash_t *ampt)
-{
-	ampt->ampt_hash = kmem_zalloc(sizeof (amd_iommu_page_table_t *) *
-	    AMD_IOMMU_PGTABLE_HASH_SZ, KM_SLEEP);
-	return (DDI_SUCCESS);
-}
-
-void
-amd_iommu_page_table_hash_fini(amd_iommu_page_table_hash_t *ampt)
-{
-	kmem_free(ampt->ampt_hash,
-	    sizeof (amd_iommu_page_table_t *) * AMD_IOMMU_PGTABLE_HASH_SZ);
-	ampt->ampt_hash = NULL;
-}
-
-static uint32_t
-pt_hashfn(uint64_t pa_4K)
-{
-	return (pa_4K % AMD_IOMMU_PGTABLE_HASH_SZ);
-}
-
-static void
-amd_iommu_insert_pgtable_hash(amd_iommu_page_table_t *pt)
-{
-	uint64_t pa_4K = ((uint64_t)pt->pt_cookie.dmac_cookie_addr) >> 12;
-	uint32_t idx = pt_hashfn(pa_4K);
-
-	ASSERT((pt->pt_cookie.dmac_cookie_addr & AMD_IOMMU_PGTABLE_ALIGN) == 0);
-
-	mutex_enter(&amd_iommu_page_table_hash.ampt_lock);
-
-	pt->pt_next = amd_iommu_page_table_hash.ampt_hash[idx];
-	pt->pt_prev = NULL;
-	amd_iommu_page_table_hash.ampt_hash[idx] = pt;
-	if (pt->pt_next)
-		pt->pt_next->pt_prev = pt;
-
-	mutex_exit(&amd_iommu_page_table_hash.ampt_lock);
-}
-
-static void
-amd_iommu_remove_pgtable_hash(amd_iommu_page_table_t *pt)
-{
-	uint64_t pa_4K = (pt->pt_cookie.dmac_cookie_addr >> 12);
-	uint32_t idx = pt_hashfn(pa_4K);
-
-	ASSERT((pt->pt_cookie.dmac_cookie_addr & AMD_IOMMU_PGTABLE_ALIGN) == 0);
-
-	mutex_enter(&amd_iommu_page_table_hash.ampt_lock);
-
-	if (pt->pt_next)
-		pt->pt_next->pt_prev = pt->pt_prev;
-
-	if (pt->pt_prev)
-		pt->pt_prev->pt_next = pt->pt_next;
-	else
-		amd_iommu_page_table_hash.ampt_hash[idx] = pt->pt_next;
-
-	pt->pt_next = NULL;
-	pt->pt_prev = NULL;
-
-	mutex_exit(&amd_iommu_page_table_hash.ampt_lock);
-}
-
-static amd_iommu_page_table_t *
-amd_iommu_lookup_pgtable_hash(domain_id_t domainid, uint64_t pgtable_pa_4K)
-{
-	amd_iommu_page_table_t *pt;
-	uint32_t idx = pt_hashfn(pgtable_pa_4K);
-
-	mutex_enter(&amd_iommu_page_table_hash.ampt_lock);
-	pt = amd_iommu_page_table_hash.ampt_hash[idx];
-	for (; pt; pt = pt->pt_next) {
-		if (domainid != pt->pt_domainid)
-			continue;
-		ASSERT((pt->pt_cookie.dmac_cookie_addr &
-		    AMD_IOMMU_PGTABLE_ALIGN) == 0);
-		if ((pt->pt_cookie.dmac_cookie_addr >> 12) == pgtable_pa_4K) {
-			break;
-		}
-	}
-	mutex_exit(&amd_iommu_page_table_hash.ampt_lock);
-
-	return (pt);
-}
-
-/*ARGSUSED*/
-static amd_iommu_page_table_t *
-amd_iommu_lookup_pgtable(amd_iommu_t *iommu, amd_iommu_page_table_t *ppt,
-    amd_iommu_domain_t *dp, int level, uint16_t index)
-{
-	uint64_t *pdtep;
-	uint64_t pgtable_pa_4K;
-
-	ASSERT(level > 0 && level <= AMD_IOMMU_PGTABLE_MAXLEVEL);
-	ASSERT(dp);
-
-	if (level == AMD_IOMMU_PGTABLE_MAXLEVEL) {
-		ASSERT(ppt == NULL);
-		ASSERT(index == 0);
-		pgtable_pa_4K = dp->d_pgtable_root_4K;
-	} else {
-		ASSERT(ppt);
-		pdtep = &(ppt->pt_pgtblva[index]);
-		if (AMD_IOMMU_REG_GET64(pdtep, AMD_IOMMU_PTDE_PR) == 0) {
-			if (amd_iommu_debug & AMD_IOMMU_DEBUG_PAGE_TABLES) {
-				cmn_err(CE_NOTE, "Skipping PR=0 pdte: 0x%"
-				    PRIx64, *pdtep);
-			}
-			return (NULL);
-		}
-		pgtable_pa_4K = AMD_IOMMU_REG_GET64(pdtep, AMD_IOMMU_PTDE_ADDR);
-	}
-
-	return (amd_iommu_lookup_pgtable_hash(dp->d_domainid, pgtable_pa_4K));
-}
-
-static amd_iommu_page_table_t *
-amd_iommu_alloc_from_freelist(void)
-{
-	int i;
-	uint64_t *pte_array;
-	amd_iommu_page_table_t *pt;
-
-	if (amd_iommu_no_pgtable_freelist == 1)
-		return (NULL);
-
-	if (amd_iommu_pgtable_freelist.f_count == 0)
-		return (NULL);
-
-	pt = amd_iommu_pgtable_freelist.f_list;
-	amd_iommu_pgtable_freelist.f_list = pt->pt_next;
-	amd_iommu_pgtable_freelist.f_count--;
-
-	pte_array = pt->pt_pgtblva;
-	for (i = 0; i < AMD_IOMMU_PGTABLE_SZ / (sizeof (*pte_array)); i++) {
-		ASSERT(pt->pt_pte_ref[i] == 0);
-		ASSERT(AMD_IOMMU_REG_GET64(&(pte_array[i]),
-		    AMD_IOMMU_PTDE_PR)  == 0);
-	}
-
-	return (pt);
-}
-
-static int
-amd_iommu_alloc_pgtable(amd_iommu_t *iommu, domain_id_t domainid,
-    const char *path, amd_iommu_page_table_t **ptp, int km_flags)
-{
-	int err;
-	uint_t ncookies;
-	amd_iommu_page_table_t *pt;
-	dev_info_t *idip = iommu->aiomt_dip;
-	const char *driver = ddi_driver_name(idip);
-	int instance = ddi_get_instance(idip);
-	const char *f = "amd_iommu_alloc_pgtable";
-
-	*ptp = NULL;
-
-	pt = amd_iommu_alloc_from_freelist();
-	if (pt)
-		goto init_pgtable;
-
-	pt = kmem_zalloc(sizeof (amd_iommu_page_table_t), km_flags);
-	if (pt == NULL)
-		return (DDI_DMA_NORESOURCES);
-
-	/*
-	 * Each page table is 4K in size
-	 */
-	pt->pt_mem_reqsz = AMD_IOMMU_PGTABLE_SZ;
-
-	/*
-	 * Alloc a DMA handle. Use the IOMMU dip as we want this DMA
-	 * to *not* enter the IOMMU - no recursive entrance.
-	 */
-	err = ddi_dma_alloc_handle(idip, &amd_iommu_pgtable_dma_attr,
-	    km_flags == KM_SLEEP ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT,
-	    NULL, &pt->pt_dma_hdl);
-	if (err != DDI_SUCCESS) {
-		cmn_err(CE_WARN, "%s: %s%d: domainid = %d, path = %s. "
-		    "Cannot alloc DMA handle for IO Page Table",
-		    f, driver, instance, domainid, path);
-		kmem_free(pt, sizeof (amd_iommu_page_table_t));
-		return (err == DDI_DMA_NORESOURCES ? err : DDI_DMA_NOMAPPING);
-	}
-
-	/*
-	 * Alloc memory for IO Page Table.
-	 * XXX remove size_t cast kludge
-	 */
-	err = ddi_dma_mem_alloc(pt->pt_dma_hdl, pt->pt_mem_reqsz,
-	    &amd_iommu_devacc, DDI_DMA_CONSISTENT|IOMEM_DATA_UNCACHED,
-	    km_flags == KM_SLEEP ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT,
-	    NULL, (caddr_t *)&pt->pt_pgtblva,
-	    (size_t *)&pt->pt_mem_realsz, &pt->pt_mem_hdl);
-	if (err != DDI_SUCCESS) {
-		cmn_err(CE_WARN, "%s: %s%d: domainid=%d, path = %s. "
-		    "Cannot allocate DMA memory for IO Page table",
-		    f, driver, instance, domainid, path);
-		ddi_dma_free_handle(&pt->pt_dma_hdl);
-		kmem_free(pt, sizeof (amd_iommu_page_table_t));
-		return (DDI_DMA_NORESOURCES);
-	}
-
-	/*
-	 * The Page table DMA VA must be 4K aligned and
-	 * size >= than requested memory.
-	 *
-	 */
-	ASSERT(((uint64_t)(uintptr_t)pt->pt_pgtblva & AMD_IOMMU_PGTABLE_ALIGN)
-	    == 0);
-	ASSERT(pt->pt_mem_realsz >= pt->pt_mem_reqsz);
-
-	/*
-	 * Now bind the handle
-	 */
-	err = ddi_dma_addr_bind_handle(pt->pt_dma_hdl, NULL,
-	    (caddr_t)pt->pt_pgtblva, pt->pt_mem_realsz,
-	    DDI_DMA_READ | DDI_DMA_CONSISTENT,
-	    km_flags == KM_SLEEP ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT,
-	    NULL, &pt->pt_cookie, &ncookies);
-	if (err != DDI_DMA_MAPPED) {
-		cmn_err(CE_WARN, "%s: %s%d: domainid=%d, path = %s. "
-		    "Cannot bind memory for DMA to IO Page Tables. "
-		    "bufrealsz=%p",
-		    f, driver, instance, domainid, path,
-		    (void *)(uintptr_t)pt->pt_mem_realsz);
-		ddi_dma_mem_free(&pt->pt_mem_hdl);
-		ddi_dma_free_handle(&pt->pt_dma_hdl);
-		kmem_free(pt, sizeof (amd_iommu_page_table_t));
-		return (err == DDI_DMA_PARTIAL_MAP ? DDI_DMA_NOMAPPING :
-		    err);
-	}
-
-	/*
-	 * We assume the DMA engine on the IOMMU is capable of handling the
-	 * whole page table in a single cookie. If not and multiple cookies
-	 * are needed we fail.
-	 */
-	if (ncookies != 1) {
-		cmn_err(CE_WARN, "%s: %s%d: domainid = %d, path=%s "
-		    "Cannot handle multiple "
-		    "cookies for DMA to IO page Table, #cookies=%u",
-		    f, driver, instance, domainid, path, ncookies);
-		(void) ddi_dma_unbind_handle(pt->pt_dma_hdl);
-		ddi_dma_mem_free(&pt->pt_mem_hdl);
-		ddi_dma_free_handle(&pt->pt_dma_hdl);
-		kmem_free(pt, sizeof (amd_iommu_page_table_t));
-		return (DDI_DMA_NOMAPPING);
-	}
-
-init_pgtable:
-	/*
-	 * The address in the cookie must be 4K aligned and >= table size
-	 */
-	ASSERT(pt->pt_cookie.dmac_cookie_addr != NULL);
-	ASSERT((pt->pt_cookie.dmac_cookie_addr & AMD_IOMMU_PGTABLE_ALIGN) == 0);
-	ASSERT(pt->pt_cookie.dmac_size >= pt->pt_mem_realsz);
-	ASSERT(pt->pt_cookie.dmac_size >= pt->pt_mem_reqsz);
-	ASSERT(pt->pt_mem_reqsz >= AMD_IOMMU_PGTABLE_SIZE);
-	ASSERT(pt->pt_mem_realsz >= pt->pt_mem_reqsz);
-	ASSERT(pt->pt_pgtblva);
-
-	pt->pt_domainid = AMD_IOMMU_INVALID_DOMAIN;
-	pt->pt_level = 0x7;
-	pt->pt_index = 0;
-	pt->pt_ref = 0;
-	pt->pt_next = NULL;
-	pt->pt_prev = NULL;
-	pt->pt_parent = NULL;
-
-	bzero(pt->pt_pgtblva, pt->pt_mem_realsz);
-	SYNC_FORDEV(pt->pt_dma_hdl);
-
-	amd_iommu_insert_pgtable_hash(pt);
-
-	*ptp = pt;
-
-	return (DDI_SUCCESS);
-}
-
-static int
-amd_iommu_move_to_freelist(amd_iommu_page_table_t *pt)
-{
-	if (amd_iommu_no_pgtable_freelist == 1)
-		return (DDI_FAILURE);
-
-	if (amd_iommu_pgtable_freelist.f_count ==
-	    AMD_IOMMU_PGTABLE_FREELIST_MAX)
-		return (DDI_FAILURE);
-
-	pt->pt_next = amd_iommu_pgtable_freelist.f_list;
-	amd_iommu_pgtable_freelist.f_list = pt;
-	amd_iommu_pgtable_freelist.f_count++;
-
-	return (DDI_SUCCESS);
-}
-
-static void
-amd_iommu_free_pgtable(amd_iommu_t *iommu, amd_iommu_page_table_t *pt)
-{
-	int i;
-	uint64_t *pte_array;
-	dev_info_t *dip = iommu->aiomt_dip;
-	int instance = ddi_get_instance(dip);
-	const char *driver = ddi_driver_name(dip);
-	const char *f = "amd_iommu_free_pgtable";
-
-	ASSERT(pt->pt_ref == 0);
-
-	amd_iommu_remove_pgtable_hash(pt);
-
-	pte_array = pt->pt_pgtblva;
-	for (i = 0; i < AMD_IOMMU_PGTABLE_SZ / (sizeof (*pte_array)); i++) {
-		ASSERT(pt->pt_pte_ref[i] == 0);
-		ASSERT(AMD_IOMMU_REG_GET64(&(pte_array[i]),
-		    AMD_IOMMU_PTDE_PR)  == 0);
-	}
-
-	if (amd_iommu_move_to_freelist(pt) == DDI_SUCCESS)
-		return;
-
-	/* Unbind the handle */
-	if (ddi_dma_unbind_handle(pt->pt_dma_hdl) != DDI_SUCCESS) {
-		cmn_err(CE_WARN, "%s: %s%d: idx=%d, domainid=%d. "
-		    "Failed to unbind handle: %p for IOMMU Page Table",
-		    f, driver, instance, iommu->aiomt_idx, pt->pt_domainid,
-		    (void *)pt->pt_dma_hdl);
-	}
-	/* Free the table memory allocated for DMA */
-	ddi_dma_mem_free(&pt->pt_mem_hdl);
-
-	/* Free the DMA handle */
-	ddi_dma_free_handle(&pt->pt_dma_hdl);
-
-	kmem_free(pt, sizeof (amd_iommu_page_table_t));
-
-}
-
-static int
-init_pde(amd_iommu_page_table_t *ppt, amd_iommu_page_table_t *pt)
-{
-	uint64_t *pdep = &(ppt->pt_pgtblva[pt->pt_index]);
-	uint64_t next_pgtable_pa_4K = (pt->pt_cookie.dmac_cookie_addr) >> 12;
-
-	/* nothing to set. PDE is already set */
-	if (AMD_IOMMU_REG_GET64(pdep, AMD_IOMMU_PTDE_PR) == 1) {
-		ASSERT(PT_REF_VALID(ppt));
-		ASSERT(PT_REF_VALID(pt));
-		ASSERT(ppt->pt_pte_ref[pt->pt_index] == 0);
-		ASSERT(AMD_IOMMU_REG_GET64(pdep, AMD_IOMMU_PTDE_ADDR)
-		    == next_pgtable_pa_4K);
-		return (DDI_SUCCESS);
-	}
-
-	ppt->pt_ref++;
-	ASSERT(PT_REF_VALID(ppt));
-
-	/* Page Directories are always RW */
-	AMD_IOMMU_REG_SET64(pdep, AMD_IOMMU_PTDE_IW, 1);
-	AMD_IOMMU_REG_SET64(pdep, AMD_IOMMU_PTDE_IR, 1);
-	AMD_IOMMU_REG_SET64(pdep, AMD_IOMMU_PTDE_ADDR,
-	    next_pgtable_pa_4K);
-	pt->pt_parent = ppt;
-	AMD_IOMMU_REG_SET64(pdep, AMD_IOMMU_PTDE_NXT_LVL,
-	    pt->pt_level);
-	ppt->pt_pte_ref[pt->pt_index] = 0;
-	AMD_IOMMU_REG_SET64(pdep, AMD_IOMMU_PTDE_PR, 1);
-	SYNC_FORDEV(ppt->pt_dma_hdl);
-	ASSERT(AMD_IOMMU_REG_GET64(pdep, AMD_IOMMU_PTDE_PR) == 1);
-
-	return (DDI_SUCCESS);
-}
-
-static int
-init_pte(amd_iommu_page_table_t *pt, uint64_t pa, uint16_t index,
-    struct ddi_dma_req *dmareq)
-{
-	uint64_t *ptep = &(pt->pt_pgtblva[index]);
-	uint64_t pa_4K = pa >> 12;
-	int R;
-	int W;
-
-	/* nothing to set if PTE is already set */
-	if (AMD_IOMMU_REG_GET64(ptep, AMD_IOMMU_PTDE_PR) == 1) {
-		/*
-		 * Adjust current permissions
-		 * DDI_DMA_WRITE means direction of DMA is MEM -> I/O
-		 * so that requires Memory READ permissions i.e. sense
-		 * is inverted.
-		 * Note: either or both of DD_DMA_READ/WRITE may be set
-		 */
-		if (amd_iommu_no_RW_perms == 0) {
-			R = AMD_IOMMU_REG_GET64(ptep, AMD_IOMMU_PTDE_IR);
-			W = AMD_IOMMU_REG_GET64(ptep, AMD_IOMMU_PTDE_IW);
-			if (R == 0 && ((dmareq->dmar_flags & DDI_DMA_WRITE) ||
-			    (dmareq->dmar_flags & DDI_DMA_RDWR))) {
-				AMD_IOMMU_REG_SET64(ptep, AMD_IOMMU_PTDE_IR, 1);
-			}
-			if (W  == 0 && ((dmareq->dmar_flags & DDI_DMA_READ) ||
-			    (dmareq->dmar_flags & DDI_DMA_RDWR))) {
-				AMD_IOMMU_REG_SET64(ptep, AMD_IOMMU_PTDE_IW, 1);
-			}
-		}
-		ASSERT(PT_REF_VALID(pt));
-		pt->pt_pte_ref[index]++;
-		ASSERT(AMD_IOMMU_REG_GET64(ptep, AMD_IOMMU_PTDE_ADDR)
-		    == pa_4K);
-		return (DDI_SUCCESS);
-	}
-
-	pt->pt_ref++;
-	ASSERT(PT_REF_VALID(pt));
-
-	/* see comment above about inverting sense of RD/WR */
-	if (amd_iommu_no_RW_perms == 0) {
-		AMD_IOMMU_REG_SET64(ptep, AMD_IOMMU_PTDE_IR, 0);
-		AMD_IOMMU_REG_SET64(ptep, AMD_IOMMU_PTDE_IW, 0);
-		if (dmareq->dmar_flags & DDI_DMA_RDWR) {
-			AMD_IOMMU_REG_SET64(ptep, AMD_IOMMU_PTDE_IW, 1);
-			AMD_IOMMU_REG_SET64(ptep, AMD_IOMMU_PTDE_IR, 1);
-		} else {
-			if (dmareq->dmar_flags & DDI_DMA_WRITE) {
-				AMD_IOMMU_REG_SET64(ptep, AMD_IOMMU_PTDE_IR, 1);
-			}
-			if (dmareq->dmar_flags & DDI_DMA_READ) {
-				AMD_IOMMU_REG_SET64(ptep, AMD_IOMMU_PTDE_IW, 1);
-			}
-		}
-	} else {
-		AMD_IOMMU_REG_SET64(ptep, AMD_IOMMU_PTDE_IR, 1);
-		AMD_IOMMU_REG_SET64(ptep, AMD_IOMMU_PTDE_IW, 1);
-	}
-
-	/* TODO what is correct for FC and U */
-	AMD_IOMMU_REG_SET64(ptep, AMD_IOMMU_PTE_FC, 0);
-	AMD_IOMMU_REG_SET64(ptep, AMD_IOMMU_PTE_U, 0);
-	AMD_IOMMU_REG_SET64(ptep, AMD_IOMMU_PTDE_ADDR, pa_4K);
-	AMD_IOMMU_REG_SET64(ptep, AMD_IOMMU_PTDE_NXT_LVL, 0);
-	ASSERT(pt->pt_pte_ref[index] == 0);
-	pt->pt_pte_ref[index] = 1;
-	AMD_IOMMU_REG_SET64(ptep, AMD_IOMMU_PTDE_PR, 1);
-	SYNC_FORDEV(pt->pt_dma_hdl);
-	ASSERT(AMD_IOMMU_REG_GET64(ptep, AMD_IOMMU_PTDE_PR) == 1);
-
-	return (DDI_SUCCESS);
-}
-
-
-static void
-init_pt(amd_iommu_page_table_t *pt, amd_iommu_domain_t *dp,
-    int level, uint16_t index)
-{
-	ASSERT(dp);
-
-	if (level == AMD_IOMMU_PGTABLE_MAXLEVEL) {
-		dp->d_pgtable_root_4K = (pt->pt_cookie.dmac_cookie_addr) >> 12;
-	} else {
-		ASSERT(level >= 1 && level < AMD_IOMMU_PGTABLE_MAXLEVEL);
-	}
-
-	pt->pt_domainid = dp->d_domainid;
-	pt->pt_level = level;
-	pt->pt_index = index;
-}
-
-static int
-amd_iommu_setup_1_pgtable(amd_iommu_t *iommu, dev_info_t *rdip,
-    struct ddi_dma_req *dmareq,
-    domain_id_t domainid, amd_iommu_domain_t *dp,
-    amd_iommu_page_table_t *ppt,
-    uint16_t index, int level, uint64_t va, uint64_t pa,
-    amd_iommu_page_table_t **ptp,  uint16_t *next_idxp, const char *path,
-    int km_flags)
-{
-	int error;
-	amd_iommu_page_table_t *pt;
-	const char *driver = ddi_driver_name(rdip);
-	int instance = ddi_get_instance(rdip);
-	const char *f = "amd_iommu_setup_1_pgtable";
-
-	*ptp = NULL;
-	*next_idxp = 0;
-	error = DDI_SUCCESS;
-
-	ASSERT(level > 0 && level <= AMD_IOMMU_PGTABLE_MAXLEVEL);
-
-	ASSERT(dp);
-	if (level == AMD_IOMMU_PGTABLE_MAXLEVEL) {
-		ASSERT(ppt == NULL);
-		ASSERT(index == 0);
-	} else {
-		ASSERT(ppt);
-	}
-
-	/* Check if page table is already allocated */
-	if (pt = amd_iommu_lookup_pgtable(iommu, ppt, dp, level, index)) {
-		ASSERT(pt->pt_domainid == domainid);
-		ASSERT(pt->pt_level == level);
-		ASSERT(pt->pt_index == index);
-		goto out;
-	}
-
-	if ((error = amd_iommu_alloc_pgtable(iommu, domainid, path, &pt,
-	    km_flags)) != DDI_SUCCESS) {
-		cmn_err(CE_WARN, "%s: %s%d: idx = %u, domainid = %d, va = %p "
-		    "path = %s", f, driver, instance, iommu->aiomt_idx,
-		    domainid, (void *)(uintptr_t)va, path);
-		return (error);
-	}
-
-	ASSERT(dp->d_domainid == domainid);
-
-	init_pt(pt, dp, level, index);
-
-out:
-	if (level != AMD_IOMMU_PGTABLE_MAXLEVEL) {
-		error = init_pde(ppt, pt);
-	}
-
-	if (level == 1) {
-		ASSERT(error == DDI_SUCCESS);
-		error = init_pte(pt, pa, AMD_IOMMU_VA_BITS(va, level), dmareq);
-	} else {
-		*next_idxp = AMD_IOMMU_VA_BITS(va, level);
-		*ptp = pt;
-	}
-
-	return (error);
-}
-
-typedef enum {
-	PDTE_NOT_TORN = 0x1,
-	PDTE_TORN_DOWN = 0x2,
-	PGTABLE_TORN_DOWN = 0x4
-} pdte_tear_t;
-
-static pdte_tear_t
-amd_iommu_teardown_pdte(amd_iommu_t *iommu,
-    amd_iommu_page_table_t *pt, int index)
-{
-	uint8_t next_level;
-	pdte_tear_t retval;
-	uint64_t *ptdep = &(pt->pt_pgtblva[index]);
-
-	next_level = AMD_IOMMU_REG_GET64(ptdep,
-	    AMD_IOMMU_PTDE_NXT_LVL);
-
-	if (AMD_IOMMU_REG_GET64(ptdep, AMD_IOMMU_PTDE_PR) == 1) {
-		if (pt->pt_level == 1) {
-			ASSERT(next_level == 0);
-			/* PTE */
-			pt->pt_pte_ref[index]--;
-			if (pt->pt_pte_ref[index] != 0) {
-				return (PDTE_NOT_TORN);
-			}
-		} else {
-			ASSERT(next_level != 0 && next_level != 7);
-		}
-		ASSERT(pt->pt_pte_ref[index] == 0);
-		ASSERT(PT_REF_VALID(pt));
-
-		AMD_IOMMU_REG_SET64(ptdep, AMD_IOMMU_PTDE_PR, 0);
-		SYNC_FORDEV(pt->pt_dma_hdl);
-		ASSERT(AMD_IOMMU_REG_GET64(ptdep,
-		    AMD_IOMMU_PTDE_PR) == 0);
-		pt->pt_ref--;
-		ASSERT(PT_REF_VALID(pt));
-		retval = PDTE_TORN_DOWN;
-	} else {
-		ASSERT(0);
-		ASSERT(pt->pt_pte_ref[index] == 0);
-		ASSERT(PT_REF_VALID(pt));
-		retval = PDTE_NOT_TORN;
-	}
-
-	if (pt->pt_ref == 0) {
-		amd_iommu_free_pgtable(iommu, pt);
-		return (PGTABLE_TORN_DOWN);
-	}
-
-	return (retval);
-}
-
-static int
-amd_iommu_create_pgtables(amd_iommu_t *iommu, dev_info_t *rdip,
-    struct ddi_dma_req *dmareq, uint64_t va,
-    uint64_t pa, uint16_t deviceid, domain_id_t domainid,
-    amd_iommu_domain_t *dp, const char *path, int km_flags)
-{
-	int level;
-	uint16_t index;
-	uint16_t next_idx;
-	amd_iommu_page_table_t *pt;
-	amd_iommu_page_table_t *ppt;
-	int error;
-	const char *driver = ddi_driver_name(rdip);
-	int instance = ddi_get_instance(rdip);
-	const char *f = "amd_iommu_create_pgtables";
-
-	if (amd_iommu_debug & AMD_IOMMU_DEBUG_PAGE_TABLES) {
-		cmn_err(CE_NOTE, "%s: %s%d: idx = %u, domainid = %d, "
-		    "deviceid = %u, va = %p, pa = %p, path = %s",
-		    f, driver, instance,
-		    iommu->aiomt_idx, domainid, deviceid,
-		    (void *)(uintptr_t)va,
-		    (void *)(uintptr_t)pa, path);
-	}
-
-	if (domainid == AMD_IOMMU_PASSTHRU_DOMAIN) {
-		/* No need for pagetables. Just set up device table entry */
-		goto passthru;
-	}
-
-	index = 0;
-	ppt = NULL;
-	for (level = AMD_IOMMU_PGTABLE_MAXLEVEL; level > 0;
-	    level--, pt = NULL, next_idx = 0) {
-		if ((error = amd_iommu_setup_1_pgtable(iommu, rdip, dmareq,
-		    domainid, dp, ppt, index, level, va, pa, &pt,
-		    &next_idx, path, km_flags)) != DDI_SUCCESS) {
-			cmn_err(CE_WARN, "%s: %s%d: idx=%d: domainid=%d, "
-			    "deviceid=%u, va= %p, pa = %p, Failed to setup "
-			    "page table(s) at level = %d, path = %s.",
-			    f, driver, instance, iommu->aiomt_idx,
-			    domainid, deviceid, (void *)(uintptr_t)va,
-			    (void *)(uintptr_t)pa, level, path);
-			return (error);
-		}
-
-		if (level > 1) {
-			ASSERT(pt);
-			ASSERT(pt->pt_domainid == domainid);
-			ppt = pt;
-			index = next_idx;
-		} else {
-			ASSERT(level == 1);
-			ASSERT(pt == NULL);
-			ASSERT(next_idx == 0);
-			ppt = NULL;
-			index = 0;
-		}
-	}
-
-passthru:
-	if ((error = amd_iommu_set_devtbl_entry(iommu, rdip, domainid, deviceid,
-	    dp, path)) != DDI_SUCCESS) {
-		cmn_err(CE_WARN, "%s: %s%d: idx=%d: rdip=%p, deviceid=%u, "
-		    "domainid=%d."
-		    "Failed to set device table entry for path %s.",
-		    f, driver, instance,
-		    iommu->aiomt_idx, (void *)rdip, deviceid, domainid, path);
-		return (error);
-	}
-
-	SYNC_FORDEV(iommu->aiomt_dmahdl);
-
-	return (DDI_SUCCESS);
-}
-
-static int
-amd_iommu_destroy_pgtables(amd_iommu_t *iommu, dev_info_t *rdip,
-    uint64_t pageva, uint16_t deviceid, domain_id_t domainid,
-    amd_iommu_domain_t *dp, map_type_t type, int *domain_freed, char *path)
-{
-	int level;
-	int flags;
-	amd_iommu_cmdargs_t cmdargs = {0};
-	uint16_t index;
-	uint16_t prev_index;
-	amd_iommu_page_table_t *pt;
-	amd_iommu_page_table_t *ppt;
-	pdte_tear_t retval;
-	int tear_level;
-	int invalidate_pte;
-	int invalidate_pde;
-	int error = DDI_FAILURE;
-	const char *driver = ddi_driver_name(iommu->aiomt_dip);
-	int instance = ddi_get_instance(iommu->aiomt_dip);
-	const char *f = "amd_iommu_destroy_pgtables";
-
-	if (amd_iommu_debug & AMD_IOMMU_DEBUG_PAGE_TABLES) {
-		cmn_err(CE_NOTE, "%s: %s%d: idx = %u, domainid = %d, "
-		    "deviceid = %u, va = %p, path = %s",
-		    f, driver, instance,
-		    iommu->aiomt_idx, domainid, deviceid,
-		    (void *)(uintptr_t)pageva, path);
-	}
-
-	if (domainid == AMD_IOMMU_PASSTHRU_DOMAIN) {
-		/*
-		 * there are no pagetables for the passthru domain.
-		 * Just the device table entry
-		 */
-		error = DDI_SUCCESS;
-		goto passthru;
-	}
-
-	ppt = NULL;
-	index = 0;
-	for (level = AMD_IOMMU_PGTABLE_MAXLEVEL; level > 0; level--) {
-		pt = amd_iommu_lookup_pgtable(iommu, ppt, dp, level, index);
-		if (pt) {
-			ppt = pt;
-			index = AMD_IOMMU_VA_BITS(pageva, level);
-			continue;
-		}
-		break;
-	}
-
-	if (level == 0) {
-		uint64_t *ptep;
-		uint64_t pa_4K;
-
-		ASSERT(pt);
-		ASSERT(pt == ppt);
-		ASSERT(pt->pt_domainid == dp->d_domainid);
-
-		ptep = &(pt->pt_pgtblva[index]);
-
-		pa_4K = AMD_IOMMU_REG_GET64(ptep, AMD_IOMMU_PTDE_ADDR);
-		if (amd_iommu_unity_map || type == AMD_IOMMU_UNITY_MAP) {
-			ASSERT(pageva == (pa_4K << MMU_PAGESHIFT));
-		}
-	}
-
-	tear_level = -1;
-	invalidate_pde = 0;
-	invalidate_pte = 0;
-	for (++level; level <= AMD_IOMMU_PGTABLE_MAXLEVEL; level++) {
-		prev_index = pt->pt_index;
-		ppt = pt->pt_parent;
-		retval = amd_iommu_teardown_pdte(iommu, pt, index);
-		switch (retval) {
-			case PDTE_NOT_TORN:
-				goto invalidate;
-			case PDTE_TORN_DOWN:
-				invalidate_pte = 1;
-				goto invalidate;
-			case PGTABLE_TORN_DOWN:
-				invalidate_pte = 1;
-				invalidate_pde = 1;
-				tear_level = level;
-				break;
-		}
-		index = prev_index;
-		pt = ppt;
-	}
-
-invalidate:
-	/*
-	 * Now teardown the IOMMU HW caches if applicable
-	 */
-	if (invalidate_pte) {
-		cmdargs.ca_domainid = (uint16_t)domainid;
-		if (amd_iommu_pageva_inval_all) {
-			cmdargs.ca_addr = (uintptr_t)0x7FFFFFFFFFFFF000;
-			flags = AMD_IOMMU_CMD_FLAGS_PAGE_PDE_INVAL |
-			    AMD_IOMMU_CMD_FLAGS_PAGE_INVAL_S;
-		} else if (invalidate_pde) {
-			cmdargs.ca_addr =
-			    (uintptr_t)AMD_IOMMU_VA_INVAL(pageva, tear_level);
-			flags = AMD_IOMMU_CMD_FLAGS_PAGE_PDE_INVAL |
-			    AMD_IOMMU_CMD_FLAGS_PAGE_INVAL_S;
-		} else {
-			cmdargs.ca_addr = (uintptr_t)pageva;
-			flags = 0;
-		}
-		if (amd_iommu_cmd(iommu, AMD_IOMMU_CMD_INVAL_IOMMU_PAGES,
-		    &cmdargs, flags, 0) != DDI_SUCCESS) {
-			cmn_err(CE_WARN, "%s: %s%d: idx=%d: domainid=%d, "
-			    "rdip=%p. Failed to invalidate IOMMU HW cache "
-			    "for %s", f, driver, instance,
-			    iommu->aiomt_idx, domainid, (void *)rdip, path);
-			error = DDI_FAILURE;
-			goto out;
-		}
-	}
-
-passthru:
-	if (tear_level ==  AMD_IOMMU_PGTABLE_MAXLEVEL) {
-		error = amd_iommu_clear_devtbl_entry(iommu, rdip, domainid,
-		    deviceid, dp, domain_freed, path);
-	} else {
-		error = DDI_SUCCESS;
-	}
-
-out:
-	SYNC_FORDEV(iommu->aiomt_dmahdl);
-
-	return (error);
-}
-
-static int
-cvt_bind_error(int error)
-{
-	switch (error) {
-	case DDI_DMA_MAPPED:
-	case DDI_DMA_PARTIAL_MAP:
-	case DDI_DMA_NORESOURCES:
-	case DDI_DMA_NOMAPPING:
-		break;
-	default:
-		cmn_err(CE_PANIC, "Unsupported error code: %d", error);
-		/*NOTREACHED*/
-	}
-	return (error);
-}
-
-int
-amd_iommu_map_pa2va(amd_iommu_t *iommu, dev_info_t *rdip, ddi_dma_attr_t *attrp,
-    struct ddi_dma_req *dmareq, uint64_t start_pa, uint64_t pa_sz,
-    map_type_t type, uint64_t *start_vap, int km_flags)
-{
-	pfn_t pfn_start;
-	pfn_t pfn_end;
-	pfn_t pfn;
-	int alias;
-	int32_t deviceid;
-	domain_id_t domainid;
-	amd_iommu_domain_t *dp;
-	uint64_t end_pa;
-	uint64_t start_va;
-	uint64_t end_va;
-	uint64_t pg_start;
-	uint64_t pg_end;
-	uint64_t pg;
-	uint64_t va_sz;
-	char *path;
-	int error = DDI_DMA_NOMAPPING;
-	const char *driver = ddi_driver_name(iommu->aiomt_dip);
-	int instance = ddi_get_instance(iommu->aiomt_dip);
-	const char *f = "amd_iommu_map_pa2va";
-
-	ASSERT(pa_sz != 0);
-
-	*start_vap = 0;
-
-	ASSERT(rdip);
-
-	path = kmem_alloc(MAXPATHLEN, km_flags);
-	if (path == NULL) {
-		error = DDI_DMA_NORESOURCES;
-		goto out;
-	}
-	(void) ddi_pathname(rdip, path);
-
-	/*
-	 * First get deviceid
-	 */
-	if (amd_iommu_get_deviceid(iommu, rdip, &deviceid, &alias, path)
-	    != DDI_SUCCESS) {
-		cmn_err(CE_WARN, "%s: %s%d: idx=%d: rdip=%p. "
-		    "Failed to get device ID for %s.", f, driver, instance,
-		    iommu->aiomt_idx, (void *)rdip, path);
-		error = DDI_DMA_NOMAPPING;
-		goto out;
-	}
-
-	/*
-	 * Next get the domain for this rdip
-	 */
-	if (amd_iommu_get_domain(iommu, rdip, alias, deviceid, &domainid, path)
-	    != DDI_SUCCESS) {
-		cmn_err(CE_WARN, "%s: %s%d: idx=%d: rdip=%p, path=%s. "
-		    "Failed to get domain.", f, driver, instance,
-		    iommu->aiomt_idx, (void *)rdip, path);
-		error = DDI_DMA_NOMAPPING;
-		goto out;
-	}
-
-	dp = amd_iommu_lookup_domain(iommu, domainid, type, km_flags);
-	if (dp == NULL) {
-		cmn_err(CE_WARN, "%s: %s%d: idx=%d: domainid=%d, rdip=%p. "
-		    "Failed to get device ID for %s.", f, driver, instance,
-		    iommu->aiomt_idx, domainid, (void *)rdip, path);
-		error = DDI_DMA_NORESOURCES;
-		goto out;
-	}
-
-	ASSERT(dp->d_domainid == domainid);
-
-	pfn_start = start_pa >> MMU_PAGESHIFT;
-
-	if (amd_iommu_debug & AMD_IOMMU_DEBUG_PAGE_TABLES) {
-		cmn_err(CE_NOTE, "pa = %p, pfn_new = %p, pfn_start = %p, "
-		    "pgshift = %d",
-		    (void *)(uintptr_t)start_pa,
-		    (void *)(uintptr_t)(start_pa >> MMU_PAGESHIFT),
-		    (void *)(uintptr_t)pfn_start, MMU_PAGESHIFT);
-	}
-
-	end_pa = start_pa + pa_sz - 1;
-	pfn_end = end_pa >> MMU_PAGESHIFT;
-
-	if (amd_iommu_unity_map || type == AMD_IOMMU_UNITY_MAP) {
-		start_va = start_pa;
-		end_va = end_pa;
-		va_sz = pa_sz;
-		*start_vap = start_va;
-	} else {
-		va_sz = mmu_ptob(pfn_end - pfn_start + 1);
-		start_va = (uintptr_t)vmem_xalloc(dp->d_vmem, va_sz,
-		    MAX(attrp->dma_attr_align, MMU_PAGESIZE),
-		    0,
-		    attrp->dma_attr_seg + 1,
-		    (void *)(uintptr_t)attrp->dma_attr_addr_lo,
-		    (void *)(uintptr_t)MIN((attrp->dma_attr_addr_hi + 1),
-		    AMD_IOMMU_SIZE_4G),	/* XXX rollover */
-		    km_flags == KM_SLEEP ? VM_SLEEP : VM_NOSLEEP);
-		if (start_va == 0) {
-			cmn_err(CE_WARN, "%s: No VA resources",
-			    amd_iommu_modname);
-			error = DDI_DMA_NORESOURCES;
-			goto out;
-		}
-		ASSERT((start_va & MMU_PAGEOFFSET) == 0);
-		end_va = start_va + va_sz - 1;
-		*start_vap = start_va + (start_pa & MMU_PAGEOFFSET);
-	}
-
-	pg_start = start_va >> MMU_PAGESHIFT;
-	pg_end = end_va >> MMU_PAGESHIFT;
-
-	pg = pg_start;
-	for (pfn = pfn_start; pfn <= pfn_end; pfn++, pg++) {
-
-		if (amd_iommu_debug & AMD_IOMMU_DEBUG_PAGE_TABLES) {
-			cmn_err(CE_WARN, "%s: attempting to create page tables "
-			    "for pfn = %p, va = %p, path = %s",
-			    f, (void *)(uintptr_t)(pfn << MMU_PAGESHIFT),
-			    (void *)(uintptr_t)(pg << MMU_PAGESHIFT), path);
-
-		}
-
-		if (amd_iommu_unity_map || type == AMD_IOMMU_UNITY_MAP) {
-			ASSERT(pfn == pg);
-		}
-
-		if ((error = amd_iommu_create_pgtables(iommu, rdip, dmareq,
-		    pg << MMU_PAGESHIFT,
-		    pfn << MMU_PAGESHIFT, deviceid, domainid, dp, path,
-		    km_flags)) != DDI_SUCCESS) {
-			cmn_err(CE_WARN, "Failed to create_pgtables");
-			goto out;
-		}
-
-		if (amd_iommu_debug & AMD_IOMMU_DEBUG_PAGE_TABLES) {
-			cmn_err(CE_WARN, "%s: successfuly created page tables "
-			    "for pfn = %p, vapg = %p, path = %s",
-			    f, (void *)(uintptr_t)pfn,
-			    (void *)(uintptr_t)pg, path);
-		}
-
-	}
-	ASSERT(pg == pg_end + 1);
-
-
-	if (amd_iommu_debug & AMD_IOMMU_DEBUG_PA2VA) {
-		cmn_err(CE_NOTE, "pa=%p, va=%p",
-		    (void *)(uintptr_t)start_pa,
-		    (void *)(uintptr_t)(*start_vap));
-	}
-	error = DDI_DMA_MAPPED;
-
-out:
-	kmem_free(path, MAXPATHLEN);
-	return (cvt_bind_error(error));
-}
-
-int
-amd_iommu_unmap_va(amd_iommu_t *iommu, dev_info_t *rdip, uint64_t start_va,
-    uint64_t va_sz, map_type_t type)
-{
-	uint64_t end_va;
-	uint64_t pg_start;
-	uint64_t pg_end;
-	uint64_t pg;
-	uint64_t actual_sz;
-	char *path;
-	int pathfree;
-	int alias;
-	int32_t deviceid;
-	domain_id_t domainid;
-	amd_iommu_domain_t *dp;
-	int error;
-	int domain_freed;
-	const char *driver = ddi_driver_name(iommu->aiomt_dip);
-	int instance = ddi_get_instance(iommu->aiomt_dip);
-	const char *f = "amd_iommu_unmap_va";
-
-	if (amd_iommu_no_unmap)
-		return (DDI_SUCCESS);
-
-	path = kmem_alloc(MAXPATHLEN, KM_NOSLEEP);
-	if (path) {
-		(void) ddi_pathname(rdip, path);
-		pathfree = 1;
-	} else {
-		pathfree = 0;
-		path = "<path-mem-alloc-failed>";
-	}
-
-	/*
-	 * First get deviceid
-	 */
-	if (amd_iommu_get_deviceid(iommu, rdip, &deviceid, &alias, path)
-	    != DDI_SUCCESS) {
-		cmn_err(CE_WARN, "%s: %s%d: idx=%d: rdip=%p. "
-		    "Failed to get device ID for %s.", f, driver, instance,
-		    iommu->aiomt_idx, (void *)rdip, path);
-		error = DDI_FAILURE;
-		goto out;
-	}
-
-	/*
-	 * Next get the domain for this rdip
-	 */
-	if (amd_iommu_get_domain(iommu, rdip, alias, deviceid, &domainid, path)
-	    != DDI_SUCCESS) {
-		cmn_err(CE_WARN, "%s: %s%d: idx=%d: rdip=%p, path=%s. "
-		    "Failed to get domain.", f, driver, instance,
-		    iommu->aiomt_idx, (void *)rdip, path);
-		error = DDI_FAILURE;
-		goto out;
-	}
-
-	/* should never result in domain allocation/vmem_create */
-	dp = amd_iommu_lookup_domain(iommu, domainid, AMD_IOMMU_INVALID_MAP,
-	    KM_NOSLEEP);
-	if (dp == NULL) {
-		cmn_err(CE_WARN, "%s: %s%d: idx=%d: domainid=%d, rdip=%p. "
-		    "Failed to get device ID for %s.", f, driver, instance,
-		    iommu->aiomt_idx, domainid, (void *)rdip, path);
-		error = DDI_FAILURE;
-		goto out;
-	}
-
-	ASSERT(dp->d_domainid == domainid);
-
-	pg_start = start_va >> MMU_PAGESHIFT;
-	end_va = start_va + va_sz - 1;
-	pg_end = end_va >> MMU_PAGESHIFT;
-	actual_sz = (pg_end - pg_start + 1) << MMU_PAGESHIFT;
-
-	domain_freed = 0;
-	for (pg = pg_start; pg <= pg_end; pg++) {
-		domain_freed = 0;
-		if (amd_iommu_destroy_pgtables(iommu, rdip,
-		    pg << MMU_PAGESHIFT, deviceid, domainid, dp, type,
-		    &domain_freed, path) != DDI_SUCCESS) {
-			error = DDI_FAILURE;
-			goto out;
-		}
-		if (domain_freed) {
-			ASSERT(pg == pg_end);
-			break;
-		}
-	}
-
-	/*
-	 * vmem_xalloc() must be paired with vmem_xfree
-	 */
-	if (type == AMD_IOMMU_VMEM_MAP && !amd_iommu_unity_map) {
-		vmem_xfree(dp->d_vmem,
-		    (void *)(uintptr_t)(pg_start << MMU_PAGESHIFT), actual_sz);
-	}
-
-	if (domain_freed)
-		amd_iommu_teardown_domain(iommu, dp);
-
-	error = DDI_SUCCESS;
-out:
-	if (pathfree)
-		kmem_free(path, MAXPATHLEN);
-	return (error);
-}
--- a/usr/src/uts/intel/io/amd_iommu/amd_iommu_page_tables.h	Wed Sep 16 22:03:43 2009 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,134 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License (the "License").
- * You may not use this file except in compliance with the License.
- *
- * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
- * or http://www.opensolaris.org/os/licensing.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information: Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- */
-/*
- * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
- * Use is subject to license terms.
- */
-
-#ifndef _AMD_IOMMU_PAGE_TABLES_H
-#define	_AMD_IOMMU_PAGE_TABLES_H
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#ifdef _KERNEL
-
-/* Common to PTEs and PDEs */
-#define	AMD_IOMMU_PTDE_IW		(62 << 16 | 62)
-#define	AMD_IOMMU_PTDE_IR		(61 << 16 | 61)
-#define	AMD_IOMMU_PTDE_ADDR		(51 << 16 | 12)
-#define	AMD_IOMMU_PTDE_NXT_LVL		(11 << 16 | 9)
-#define	AMD_IOMMU_PTDE_PR		(0 << 16 | 0)
-
-#define	AMD_IOMMU_PTE_FC		(60 << 16 | 60)
-#define	AMD_IOMMU_PTE_U			(59 << 16 | 59)
-
-#define	AMD_IOMMU_VA_NBITS(l)		((l) == 6 ? 7 : 9)
-#define	AMD_IOMMU_VA_BITMASK(l)		((1 << AMD_IOMMU_VA_NBITS(l)) - 1)
-#define	AMD_IOMMU_VA_SHIFT(v, l)	\
-	((v) >> (MMU_PAGESHIFT + (AMD_IOMMU_VA_NBITS(l - 1) * (l - 1))))
-#define	AMD_IOMMU_VA_BITS(v, l)		\
-	(AMD_IOMMU_VA_SHIFT(v, l) & AMD_IOMMU_VA_BITMASK(l))
-#define	AMD_IOMMU_VA_TOTBITS(l)		\
-	(((l) == 6 ? 7 + (l - 1) * 9: l*9) + MMU_PAGESHIFT)
-#define	AMD_IOMMU_VA_TOTMASK(l)		((1 << AMD_IOMMU_VA_TOTBITS(l)) - 1)
-#define	AMD_IOMMU_VA_INVAL_SETMASK(l)	\
-	(((1 << AMD_IOMMU_VA_TOTBITS(l)) - 1) >> 1)
-#define	AMD_IOMMU_VA_INVAL_CLRMASK(l)	\
-	(~(1 << (AMD_IOMMU_VA_TOTBITS(l) - 1)))
-#define	AMD_IOMMU_VA_INVAL(v, l)	\
-	(((v) & AMD_IOMMU_VA_INVAL_CLRMASK(l)) | AMD_IOMMU_VA_INVAL_SETMASK(l))
-
-#define	AMD_IOMMU_PGTABLE_SZ		(4096)
-#define	AMD_IOMMU_PGTABLE_MAXLEVEL	(6)
-#define	AMD_IOMMU_PGTABLE_HASH_SZ	(256)
-
-#define	AMD_IOMMU_PGTABLE_ALIGN		((1ULL << 12) - 1)
-#define	AMD_IOMMU_PGTABLE_SIZE		(1ULL << 12)
-
-#define	AMD_IOMMU_MAX_PDTE		(1ULL << AMD_IOMMU_VA_NBITS(1))
-#define	PT_REF_VALID(p)			((p)->pt_ref >= 0 && \
-					(p)->pt_ref <= AMD_IOMMU_MAX_PDTE)
-
-#define	AMD_IOMMU_DOMAIN_HASH_SZ	(256)
-#define	AMD_IOMMU_PGTABLE_FREELIST_MAX	(256)
-#define	AMD_IOMMU_PA2VA_HASH_SZ		(256)
-
-#define	AMD_IOMMU_SIZE_4G		((uint64_t)1 << 32)
-#define	AMD_IOMMU_VMEM_NAMELEN		(30)
-
-typedef enum {
-	AMD_IOMMU_INVALID_DOMAIN = 0,
-	AMD_IOMMU_IDENTITY_DOMAIN = 0xFFFD,
-	AMD_IOMMU_PASSTHRU_DOMAIN = 0xFFFE,
-	AMD_IOMMU_SYS_DOMAIN = 0xFFFF
-} domain_id_t;
-
-typedef enum {
-	AMD_IOMMU_INVALID_MAP = 0,
-	AMD_IOMMU_UNITY_MAP,
-	AMD_IOMMU_VMEM_MAP
-} map_type_t;
-
-typedef struct amd_iommu_page_table {
-	domain_id_t pt_domainid;
-	int pt_level;
-	ddi_dma_handle_t pt_dma_hdl;
-	ddi_acc_handle_t pt_mem_hdl;
-	uint64_t pt_mem_reqsz;
-	uint64_t pt_mem_realsz;
-	uint64_t *pt_pgtblva;
-	uint64_t pt_pte_ref[AMD_IOMMU_MAX_PDTE];
-	uint16_t pt_index;
-	int pt_ref;
-	ddi_dma_cookie_t pt_cookie;
-	struct amd_iommu_page_table *pt_next;
-	struct amd_iommu_page_table *pt_prev;
-	struct amd_iommu_page_table *pt_parent;
-} amd_iommu_page_table_t;
-
-typedef struct amd_iommu_domain {
-	domain_id_t d_domainid;
-	uint64_t d_pgtable_root_4K;
-	int64_t d_ref;
-	vmem_t *d_vmem;
-	struct amd_iommu_domain *d_prev;
-	struct amd_iommu_domain *d_next;
-} amd_iommu_domain_t;
-
-int amd_iommu_map_pa2va(amd_iommu_t *iommu, dev_info_t *rdip,
-    ddi_dma_attr_t *attrp, struct ddi_dma_req *dmareq,
-    uint64_t pa, uint64_t pa_sz, map_type_t type,
-    uint64_t *start_vap, int km_flags);
-int amd_iommu_unmap_va(amd_iommu_t *iommu, dev_info_t *rdip,
-    uint64_t va, uint64_t va_sz, map_type_t type);
-void amd_iommu_init_page_tables(amd_iommu_t *iommu);
-void amd_iommu_fini_page_tables(amd_iommu_t *iommu);
-void amd_iommu_set_passthru(amd_iommu_t *iommu, dev_info_t *rdip);
-
-#endif /* _KERNEL */
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif	/* _AMD_IOMMU_PAGE_TABLES_H */
--- a/usr/src/uts/intel/sys/Makefile	Wed Sep 16 22:03:43 2009 -0700
+++ b/usr/src/uts/intel/sys/Makefile	Mon Sep 14 21:48:21 2009 -0700
@@ -31,7 +31,6 @@
 #	from being built, so these headers are not exported (installed).
 
 HDRS	=			\
-	amd_iommu.h		\
 	archsystm.h		\
 	asm_linkage.h		\
 	bootconf.h		\
--- a/usr/src/uts/intel/sys/amd_iommu.h	Wed Sep 16 22:03:43 2009 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,56 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License (the "License").
- * You may not use this file except in compliance with the License.
- *
- * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
- * or http://www.opensolaris.org/os/licensing.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information: Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- */
-/*
- * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
- * Use is subject to license terms.
- */
-
-#ifndef	_SYS_AMD_IOMMU_H
-#define	_SYS_AMD_IOMMU_H
-
-#ifdef	__cplusplus
-extern "C" {
-#endif
-
-#include <sys/sunddi.h>
-#include <sys/iommulib.h>
-
-#ifdef _KERNEL
-
-typedef struct amd_iommu_state {
-	int	aioms_instance;			/* instance */
-	dev_info_t *aioms_devi;			/* dip */
-	struct amd_iommu *aioms_iommu_start;	/* start of list of IOMMUs */
-	struct amd_iommu *aioms_iommu_end;	/* end of list of IOMMUs */
-	int aioms_nunits;			/* # of IOMMUs in function */
-} amd_iommu_state_t;
-
-int amd_iommu_setup(dev_info_t *dip, amd_iommu_state_t *statep);
-int amd_iommu_teardown(dev_info_t *dip, amd_iommu_state_t *statep);
-int amd_iommu_lookup_src_bdf(uint16_t bdf, uint16_t *src_bdfp);
-
-#endif	/* _KERNEL */
-
-#ifdef	__cplusplus
-}
-#endif
-
-#endif	/* _SYS_AMD_IOMMU_H */