changeset 10844:d0b917681076

Branch merge
author tide@sparcv490
date Tue, 07 Jul 2009 10:00:27 -0400
parents 5f868239e28c (current diff) 4486ead27470 (diff)
children 66d6c3c59c55
files
diffstat 31 files changed, 1342 insertions(+), 333 deletions(-) [+]
line wrap: on
line diff
--- a/.hgtags	Mon Jul 06 09:30:27 2009 -0400
+++ b/.hgtags	Tue Jul 07 10:00:27 2009 -0400
@@ -100,3 +100,4 @@
 e5944843c474f09cbf93e8c9b3a45e88ce652a33 onnv_116
 2d174a95bf15692f37c3523ff3e01fcb338fa51f onnv_117
 a4895b3dd543bc9203c5540fe0a79d779e99822c onnv_118
+8e9d94399508ace774a964d8eba8bcc31f10d291 onnv_119
--- a/usr/src/cmd/fm/eversholt/files/i386/i86pc/intel.esc	Mon Jul 06 09:30:27 2009 -0400
+++ b/usr/src/cmd/fm/eversholt/files/i386/i86pc/intel.esc	Tue Jul 07 10:00:27 2009 -0400
@@ -223,6 +223,8 @@
 engine stat.ce_pgflt@memory-controller/dram-channel/dimm;
 
 event ereport.cpu.intel.nb.mem_ue@motherboard/memory-controller{within(12s)};
+event ereport.cpu.intel.nb.ddr2_mem_ue@
+    motherboard/memory-controller{within(12s)};
 event ereport.cpu.intel.nb.fbd.ma@motherboard/memory-controller{within(12s)};
 event fault.memory.intel.page_ue@
     motherboard/memory-controller/dram-channel/dimm/rank,
@@ -236,18 +238,21 @@
     (payloadprop_defined("physaddr") || payloadprop_defined("offset")) &&
     SET_ADDR && SET_OFFSET } (1)->
     ereport.cpu.intel.nb.mem_ue@motherboard/memory-controller,
+    ereport.cpu.intel.nb.ddr2_mem_ue@motherboard/memory-controller,
     ereport.cpu.intel.nb.fbd.ma@motherboard/memory-controller;
 
 prop fault.memory.intel.dimm_ue@
     motherboard/memory-controller/dram-channel<channel_num>/dimm/rank[rank_num]
     { payloadprop_defined("rank") && rank_num == payloadprop("rank") } (1)->
     ereport.cpu.intel.nb.mem_ue@motherboard/memory-controller,
+    ereport.cpu.intel.nb.ddr2_mem_ue@motherboard/memory-controller,
     ereport.cpu.intel.nb.fbd.ma@motherboard/memory-controller;
 
 event upset.memory.intel.discard@motherboard/memory-controller{within(1s)};
 
 prop upset.memory.intel.discard@motherboard/memory-controller (0)->
     ereport.cpu.intel.nb.mem_ue@motherboard/memory-controller,
+    ereport.cpu.intel.nb.ddr2_mem_ue@motherboard/memory-controller,
     ereport.cpu.intel.nb.fbd.ma@motherboard/memory-controller;
 
 prop upset.memory.intel.discard@motherboard/memory-controller (0)->
@@ -260,6 +265,9 @@
 
 #define MBDIMM motherboard/memory-controller/dram-channel/dimm
 event ereport.cpu.intel.nb.mem_ce@MBDIMM/rank{within(12s)};
+event ereport.cpu.intel.nb.ddr2_mem_ce@MBDIMM/rank{within(12s)};
+event ereport.cpu.intel.nb.ddr2_mem_ce@
+    motherboard/memory-controller{within(12s)};
 
 engine serd.memory.intel.page_ce@MBDIMM/rank, N=PAGE_CE_COUNT, T=PAGE_CE_TIME;
 event fault.memory.intel.page_ce@MBDIMM/rank, message=0, response=0,
@@ -267,7 +275,8 @@
 prop fault.memory.intel.page_ce@MBDIMM/rank
     { (payloadprop_defined("physaddr") || payloadprop_defined("offset")) &&
     SET_ADDR && SET_OFFSET } (0)->
-    ereport.cpu.intel.nb.mem_ce@MBDIMM/rank;
+    ereport.cpu.intel.nb.mem_ce@MBDIMM/rank,
+    ereport.cpu.intel.nb.ddr2_mem_ce@MBDIMM/rank;
 
 engine serd.memory.intel.dimm_ce@MBDIMM/rank, N=DIMM_CE_COUNT, T=DIMM_CE_TIME;
 event fault.memory.intel.dimm_ce@MBDIMM/rank,
@@ -275,14 +284,15 @@
 prop fault.memory.intel.dimm_ce@MBDIMM/rank
     { !confprop_defined(MBDIMM, "dimm-size") &&
     count(stat.ce_pgflt@MBDIMM) > 512 } (1)->
-    ereport.cpu.intel.nb.mem_ce@MBDIMM/rank;
-
+    ereport.cpu.intel.nb.mem_ce@MBDIMM/rank,
+    ereport.cpu.intel.nb.ddr2_mem_ce@MBDIMM/rank;
 #define DIMM_CE(dimm_size, n, t, fault_rate) \
 	prop fault.memory.intel.dimm_ce@MBDIMM/rank { \
 	    confprop(MBDIMM, "dimm-size") == dimm_size && \
 	    count(stat.ce_pgflt@MBDIMM) > fault_rate && \
 	    setserdn(n) & setserdt(t) } (1)-> \
-    	    ereport.cpu.intel.nb.mem_ce@MBDIMM/rank;
+    	    ereport.cpu.intel.nb.mem_ce@MBDIMM/rank, \
+	    ereport.cpu.intel.nb.ddr2_mem_ce@MBDIMM/rank;
 	
 DIMM_CE("8G", 8, 1week, 2000)
 DIMM_CE("4G", 4, 1week, 1500)
@@ -291,6 +301,9 @@
 DIMM_CE("512M", 4, 8week, 250)
 DIMM_CE("256M", 4, 16week, 125)
 
+prop upset.memory.intel.discard@motherboard/memory-controller (0)->
+    ereport.cpu.intel.nb.ddr2_mem_ce@motherboard/memory-controller;
+
 event ereport.cpu.intel.nb.fbd.alert@rank{within(12s)};
 event fault.memory.intel.fbd.alert@rank, retire=0;
 
@@ -345,21 +358,26 @@
 event ereport.cpu.intel.nb.unknown@memory-controller {within(12s)};
 event ereport.cpu.intel.nb.unknown@memory-controller/dram-channel {within(12s)};
 event ereport.cpu.intel.nb.spd@memory-controller/dram-channel {within(12s)};
+event ereport.cpu.intel.nb.ddr2_spd@
+    memory-controller/dram-channel {within(12s)};
 event upset.discard@memory-controller;
 
 prop upset.discard@memory-controller (0)->
     ereport.cpu.intel.nb.unknown@memory-controller,
     ereport.cpu.intel.nb.unknown@memory-controller/dram-channel,
-    ereport.cpu.intel.nb.spd@memory-controller/dram-channel;
+    ereport.cpu.intel.nb.spd@memory-controller/dram-channel,
+    ereport.cpu.intel.nb.ddr2_spd@memory-controller/dram-channel;
 
 event ereport.cpu.intel.nb.mem_ds@memory-controller{within(30s)};
+event ereport.cpu.intel.nb.ddr2_mem_ds@memory-controller{within(30s)};
 event fault.memory.intel.fbd.mem_ds@memory-controller/dram-channel/dimm/rank,
     retire=0;
 
 prop fault.memory.intel.fbd.mem_ds@
     memory-controller/dram-channel/dimm/rank[rank_num]
     { payloadprop_defined("rank") && rank_num == payloadprop("rank") } (1)->
-    ereport.cpu.intel.nb.mem_ds@memory-controller;
+    ereport.cpu.intel.nb.mem_ds@memory-controller,
+    ereport.cpu.intel.nb.ddr2_mem_ds@memory-controller;
 
 event ereport.cpu.intel.nb.fsb@chip{within(12s)};
 event fault.cpu.intel.nb.fsb@chip, retire=0;
--- a/usr/src/cmd/iscsi/iscsitgtd/iscsi.d	Mon Jul 06 09:30:27 2009 -0400
+++ b/usr/src/cmd/iscsi/iscsitgtd/iscsi.d	Tue Jul 07 10:00:27 2009 -0400
@@ -26,26 +26,7 @@
 
 #pragma D depends_on library net.d
 #pragma D depends_on module genunix
-
-typedef struct iscsiinfo {
-	string ii_target;	/* target iqn */
-	string ii_initiator;	/* initiator iqn */
-	string ii_isid;         /* initiator session identifier */
-	string ii_tsih;         /* target session identifying handle */
-	string ii_transport;    /* transport type ("iser-ib", "sockets") */
-
-	uint64_t ii_lun;	/* target logical unit number */
-
-	uint32_t ii_itt;	/* initiator task tag */
-	uint32_t ii_ttt;	/* target transfer tag */
-
-	uint32_t ii_cmdsn;	/* command sequence number */
-	uint32_t ii_statsn;	/* status sequence number */
-	uint32_t ii_datasn;	/* data sequence number */
-
-	uint32_t ii_datalen;	/* length of data payload */
-	uint32_t ii_flags;	/* probe-specific flags */
-} iscsiinfo_t;
+#pragma D depends_on library scsi.d
 
 typedef struct uiscsiproto uiscsiproto_t;
 
--- a/usr/src/cmd/ndmpd/ndmp/ndmpd.h	Mon Jul 06 09:30:27 2009 -0400
+++ b/usr/src/cmd/ndmpd/ndmp/ndmpd.h	Tue Jul 07 10:00:27 2009 -0400
@@ -641,6 +641,12 @@
 	tlm_commands_t *tr_cmds;
 } ndmp_tar_reader_arg_t;
 
+typedef struct {
+	ndmpd_session_t *bs_session;
+	char *bs_jname;
+	char *bs_path;
+} ndmp_bkup_size_arg_t;
+
 /*
  * Variables from ndmpd_comm.c
  */
--- a/usr/src/cmd/ndmpd/ndmp/ndmpd_tar3.c	Mon Jul 06 09:30:27 2009 -0400
+++ b/usr/src/cmd/ndmpd/ndmp/ndmpd_tar3.c	Tue Jul 07 10:00:27 2009 -0400
@@ -1,5 +1,5 @@
 /*
- * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
  * Use is subject to license terms.
  */
 
@@ -61,9 +61,11 @@
 #define	QUAD_DECIMAL_LEN	20
 
 
-/* IS 'Y' OR "T' */
+/* Is Y=yes or T=true */
 #define	IS_YORT(c)	(strchr("YT", toupper(c)))
 
+/* Is F=file format (vs D=node-dir format) */
+#define	IS_F(c)		(toupper(c) == 'F')
 
 /*
  * If path is defined.
@@ -907,10 +909,18 @@
 		NLP_UNSET(nlp, NLPF_FH);
 	} else {
 		NDMP_LOG(LOG_DEBUG, "env(HIST): \"%s\"", envp);
-		if (IS_YORT(*envp))
+		if (IS_YORT(*envp) || IS_F(*envp))
 			NLP_SET(nlp, NLPF_FH);
 		else
 			NLP_UNSET(nlp, NLPF_FH);
+
+		/* Force file format if specified */
+		if (IS_F(*envp)) {
+			params->mp_file_history_path_func =
+			    ndmpd_api_file_history_file_v3;
+			params->mp_file_history_dir_func = 0;
+			params->mp_file_history_node_func = 0;
+		}
 	}
 }
 
@@ -2532,25 +2542,27 @@
  * of the progress of backup during NDMP backup.
  */
 void
-get_backup_size(ndmpd_session_t *session, char *path)
+get_backup_size(ndmp_bkup_size_arg_t *sarg)
 {
 	fs_traverse_t ft;
 	u_longlong_t bk_size;
+	char spath[PATH_MAX];
 	int rv;
 
-	if (path == NULL)
-		return;
-
 	bk_size = 0;
-
-	/* set traversing arguments */
-	ft.ft_path = path;
-	ft.ft_lpath = path;
-
+	if (fs_is_chkpntvol(sarg->bs_path)) {
+		ft.ft_path = sarg->bs_path;
+	} else {
+		(void) tlm_build_snapshot_name(sarg->bs_path,
+		    spath, sarg->bs_jname);
+		ft.ft_path = spath;
+	}
+
+	ft.ft_lpath = ft.ft_path;
 	ft.ft_callbk = size_cb;
 	ft.ft_arg = &bk_size;
 	ft.ft_logfp = (ft_log_t)ndmp_log;
-	ft.ft_flags = FST_VERBOSE;	/* Solaris */
+	ft.ft_flags = FST_VERBOSE;
 
 	if ((rv = traverse_level(&ft)) != 0) {
 		NDMP_LOG(LOG_DEBUG, "bksize err=%d", rv);
@@ -2559,7 +2571,7 @@
 		NDMP_LOG(LOG_DEBUG, "bksize %lld, %lldKB, %lldMB\n",
 		    bk_size, bk_size / 1024, bk_size /(1024 * 1024));
 	}
-	session->ns_data.dd_data_size = bk_size;
+	sarg->bs_session->ns_data.dd_data_size = bk_size;
 }
 
 /*
@@ -3688,6 +3700,8 @@
 	ndmpd_session_t *session;
 	ndmp_lbr_params_t *nlp;
 	char jname[TLM_MAX_BACKUP_JOB_NAME];
+	ndmp_bkup_size_arg_t sarg;
+	pthread_t tid;
 
 	session = (ndmpd_session_t *)(params->mp_daemon_cookie);
 	*(params->mp_module_cookie) = nlp = ndmp_get_nlp(session);
@@ -3706,10 +3720,16 @@
 	NDMP_LOG(LOG_DEBUG, "err %d, chkpnted %c",
 	    err, NDMP_YORN(NLP_ISCHKPNTED(nlp)));
 
-	/* Get an estimate of the data size */
-	get_backup_size(session, nlp->nlp_backup_path);
-
 	if (err == 0) {
+		sarg.bs_session = session;
+		sarg.bs_jname = jname;
+		sarg.bs_path = nlp->nlp_backup_path;
+
+		/* Get an estimate of the data size */
+		if (pthread_create(&tid, NULL, (funct_t)get_backup_size,
+		    (void *)&sarg) == 0)
+			(void) pthread_detach(tid);
+
 		err = ndmp_get_cur_bk_time(nlp, &nlp->nlp_cdate, jname);
 		if (err != 0) {
 			NDMP_LOG(LOG_DEBUG, "err %d", err);
--- a/usr/src/lib/fm/topo/modules/i86pc/chip/chip_intel.c	Mon Jul 06 09:30:27 2009 -0400
+++ b/usr/src/lib/fm/topo/modules/i86pc/chip/chip_intel.c	Tue Jul 07 10:00:27 2009 -0400
@@ -20,7 +20,7 @@
  */
 
 /*
- * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
  * Use is subject to license terms.
  */
 
@@ -94,10 +94,10 @@
 	return (mc_fd != -1);
 }
 
-void
+static void
 mc_add_ranks(topo_mod_t *mod, tnode_t *dnode, nvlist_t *auth, int dimm,
-    nvlist_t **ranks_nvp, int nranks, char *serial, char *part, char *rev,
-    int maxranks)
+    nvlist_t **ranks_nvp, int start_rank, int nranks, char *serial, char *part,
+    char *rev, int maxranks)
 {
 	int i;
 	int rank;
@@ -106,10 +106,14 @@
 	nvlist_t *fmri;
 	int err = 0;
 
-	rank = dimm * maxranks;
+	/*
+	 * If start_rank is defined, it is assigned to the first rank of this
+	 * dimm.
+	 */
+	rank = start_rank >= 0 ? start_rank : dimm * maxranks;
 	if (topo_node_range_create(mod, dnode, RANK, rank,
 	    rank + nranks - 1) < 0) {
-		whinge(mod, NULL, "mc_add_dimms: node range create failed"
+		whinge(mod, NULL, "mc_add_ranks: node range create failed"
 		    " for rank\n");
 		return;
 	}
@@ -162,6 +166,7 @@
 	nvpair_t *nvp;
 	int err;
 	nvlist_t **ranks_nvp;
+	int32_t start_rank = -1;
 	uint_t nranks = 0;
 	char *serial = NULL;
 	char *part = NULL;
@@ -181,6 +186,8 @@
 			if (strcmp(name, MCINTEL_NVLIST_RANKS) == 0) {
 				(void) nvpair_value_nvlist_array(nvp,
 				    &ranks_nvp, &nranks);
+			} else if (strcmp(name, MCINTEL_NVLIST_1ST_RANK) == 0) {
+				(void) nvpair_value_int32(nvp, &start_rank);
 			} else if (strcmp(name, FM_FMRI_HC_SERIAL_ID) == 0) {
 				(void) nvpair_value_string(nvp, &serial);
 			} else if (strcmp(name, FM_FMRI_HC_PART) == 0) {
@@ -219,7 +226,8 @@
 		    nvp = nvlist_next_nvpair(nvl[i], nvp)) {
 			name = nvpair_name(nvp);
 			if (strcmp(name, MCINTEL_NVLIST_RANKS) != 0 &&
-			    strcmp(name, FM_FAULT_FRU_LABEL) != 0) {
+			    strcmp(name, FM_FAULT_FRU_LABEL) != 0 &&
+			    strcmp(name, MCINTEL_NVLIST_1ST_RANK) != 0) {
 				(void) nvprop_add(mod, nvp, PGNAME(DIMM),
 				    dnode);
 			}
@@ -228,8 +236,8 @@
 			(void) topo_node_label_set(dnode, label, &err);
 
 		if (nranks) {
-			mc_add_ranks(mod, dnode, auth, i, ranks_nvp, nranks,
-			    serial, part, rev, maxranks);
+			mc_add_ranks(mod, dnode, auth, i, ranks_nvp, start_rank,
+			    nranks, serial, part, rev, maxranks);
 		}
 	}
 }
@@ -297,7 +305,13 @@
 		    "mc_nb_create: failed to find channel information\n");
 		return (-1);
 	}
-	if (nvlist_lookup_uint8(nvl, MCINTEL_NVLIST_NMEM, &nmc) != 0) {
+	if (nvlist_lookup_uint8(nvl, MCINTEL_NVLIST_NMEM, &nmc) == 0) {
+		/*
+		 * Assume channels are evenly divided among the controllers.
+		 * Convert nchannels to channels per controller
+		 */
+		nchannels = nchannels / nmc;
+	} else {
 		/*
 		 * if number of memory controllers is not specified then there
 		 * are two channels per controller and the nchannels is total
--- a/usr/src/lib/libdtrace/common/iscsit.d	Mon Jul 06 09:30:27 2009 -0400
+++ b/usr/src/lib/libdtrace/common/iscsit.d	Tue Jul 07 10:00:27 2009 -0400
@@ -26,8 +26,7 @@
 
 #pragma D depends_on library ip.d
 #pragma D depends_on library net.d	/* conninfo_t */
-#pragma D depends_on library scsi.d	/* scsicmd_t */
-#pragma D depends_on library iscsi.d	/* iscsiinfo_t */
+#pragma D depends_on library scsi.d	/* scsicmd_t and iscsiinfo_t */
 #pragma D depends_on module genunix
 #pragma D depends_on module iscsit
 #pragma D depends_on module idm
--- a/usr/src/lib/libdtrace/common/scsi.d	Mon Jul 06 09:30:27 2009 -0400
+++ b/usr/src/lib/libdtrace/common/scsi.d	Tue Jul 07 10:00:27 2009 -0400
@@ -62,3 +62,28 @@
 	uint32_t xfer_len;      /* transfer length */
 	uint32_t xfer_type;     /* Read (0) or Write (1) */
 } xferinfo_t;
+
+/*
+ * the iscsiinfo_t is used to provide identifying information about
+ * the target and the initiator and also some PDU level information
+ * such as lun, data length and sequence numbers.
+ */
+typedef struct iscsiinfo {
+	string ii_target;	/* target iqn */
+	string ii_initiator;	/* initiator iqn */
+	string ii_isid;         /* initiator session identifier */
+	string ii_tsih;         /* target session identifying handle */
+	string ii_transport;    /* transport type ("iser-ib", "sockets") */
+
+	uint64_t ii_lun;	/* target logical unit number */
+
+	uint32_t ii_itt;	/* initiator task tag */
+	uint32_t ii_ttt;	/* target transfer tag */
+
+	uint32_t ii_cmdsn;	/* command sequence number */
+	uint32_t ii_statsn;	/* status sequence number */
+	uint32_t ii_datasn;	/* data sequence number */
+
+	uint32_t ii_datalen;	/* length of data payload */
+	uint32_t ii_flags;	/* probe-specific flags */
+} iscsiinfo_t;
--- a/usr/src/uts/common/fs/zfs/dsl_dir.c	Mon Jul 06 09:30:27 2009 -0400
+++ b/usr/src/uts/common/fs/zfs/dsl_dir.c	Tue Jul 07 10:00:27 2009 -0400
@@ -96,7 +96,6 @@
 #endif
 	if (dd == NULL) {
 		dsl_dir_t *winner;
-		int err;
 
 		dd = kmem_zalloc(sizeof (dsl_dir_t), KM_SLEEP);
 		dd->dd_object = ddobj;
--- a/usr/src/uts/common/io/usb/clients/audio/usb_ac/usb_ac.c	Mon Jul 06 09:30:27 2009 -0400
+++ b/usr/src/uts/common/io/usb/clients/audio/usb_ac/usb_ac.c	Tue Jul 07 10:00:27 2009 -0400
@@ -4118,6 +4118,12 @@
 
 		mutex_enter(&uacp->usb_ac_mutex);
 
+		if (rv == ENODEV) {
+			USB_DPRINTF_L2(PRINT_MASK_ALL, uacp->usb_ac_log_handle,
+			    "Device is not availabe");
+			break;
+		}
+
 		if ((acp->acp_flags & ACP_ENABLED) && mp != NULL && rv == 0)
 			rv = usb_ac_read_msg(acp, mp);
 
--- a/usr/src/uts/common/io/usb/hcd/ehci/ehci_hub.c	Mon Jul 06 09:30:27 2009 -0400
+++ b/usr/src/uts/common/io/usb/hcd/ehci/ehci_hub.c	Tue Jul 07 10:00:27 2009 -0400
@@ -19,7 +19,7 @@
  * CDDL HEADER END
  */
 /*
- * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
  * Use is subject to license terms.
  */
 
@@ -905,6 +905,7 @@
 	uint16_t		port)
 {
 	uint_t			port_status;
+	int			i;
 
 	mutex_enter(&ehcip->ehci_int_mutex);
 
@@ -941,8 +942,22 @@
 
 	mutex_exit(&ehcip->ehci_int_mutex);
 
-	/* Wait 2ms for port to return to high speed mode */
-	delay(drv_usectohz(EHCI_PORT_RESUME_COMP_TIMEWAIT));
+	/*
+	 * Wait for port to return to high speed mode. It's necessary to poll
+	 * for resume completion for some high-speed devices to work correctly.
+	 */
+	for (i = 0; i < EHCI_PORT_RESUME_RETRY_MAX; i++) {
+		delay(drv_usectohz(EHCI_PORT_RESUME_COMP_TIMEWAIT));
+
+		mutex_enter(&ehcip->ehci_int_mutex);
+		port_status = Get_OpReg(ehci_rh_port_status[port]) &
+		    ~EHCI_RH_PORT_CLEAR_MASK;
+		mutex_exit(&ehcip->ehci_int_mutex);
+
+		if (!(port_status & EHCI_RH_PORT_RESUME)) {
+			break;
+		}
+	}
 }
 
 
@@ -958,6 +973,7 @@
 {
 	ehci_root_hub_t		*rh;
 	uint_t			port_status;
+	int			i;
 
 	mutex_enter(&ehcip->ehci_int_mutex);
 
@@ -999,7 +1015,7 @@
 
 		mutex_exit(&ehcip->ehci_int_mutex);
 
-		/* Wait 20ms for reset to complete */
+		/* Wait 50ms for reset to complete */
 		delay(drv_usectohz(EHCI_PORT_RESET_TIMEWAIT));
 
 		mutex_enter(&ehcip->ehci_int_mutex);
@@ -1013,10 +1029,23 @@
 		mutex_exit(&ehcip->ehci_int_mutex);
 
 		/*
-		 * Wait 2ms for hardware to enable this port
-		 * if connected usb device is high speed.
+		 * Wait for hardware to enable this port, if the connected
+		 * usb device is high speed. It's necessary to poll for reset
+		 * completion for some high-speed devices to recognized
+		 * correctly.
 		 */
-		delay(drv_usectohz(EHCI_PORT_RESET_COMP_TIMEWAIT));
+		for (i = 0; i < EHCI_PORT_RESET_RETRY_MAX; i++) {
+			delay(drv_usectohz(EHCI_PORT_RESET_COMP_TIMEWAIT));
+
+			mutex_enter(&ehcip->ehci_int_mutex);
+			port_status = Get_OpReg(ehci_rh_port_status[port]) &
+			    ~EHCI_RH_PORT_CLEAR_MASK;
+			mutex_exit(&ehcip->ehci_int_mutex);
+
+			if (!(port_status & EHCI_RH_PORT_RESET)) {
+				break;
+			}
+		}
 
 		mutex_enter(&ehcip->ehci_int_mutex);
 
--- a/usr/src/uts/common/krtld/kobj.c	Mon Jul 06 09:30:27 2009 -0400
+++ b/usr/src/uts/common/krtld/kobj.c	Tue Jul 07 10:00:27 2009 -0400
@@ -19,7 +19,7 @@
  * CDDL HEADER END
  */
 /*
- * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
  * Use is subject to license terms.
  */
 
@@ -3496,9 +3496,11 @@
 			kobj_free(fullname, maxpathlen);
 			return (file);
 		}
+		while (*pathp == ' ')
+			pathp++;
 		if (*pathp == 0)
 			break;
-		pathp++;
+
 	}
 	kobj_free(fullname, maxpathlen);
 	if (_moddebug & MODDEBUG_ERRMSG) {
--- a/usr/src/uts/common/os/sig.c	Mon Jul 06 09:30:27 2009 -0400
+++ b/usr/src/uts/common/os/sig.c	Tue Jul 07 10:00:27 2009 -0400
@@ -481,7 +481,7 @@
 				 * the process when lwp_nostop is set.
 				 */
 				if (!lwp->lwp_nostop ||
-				    PTOU(curproc)->u_signal[sig-1] != SIG_DFL ||
+				    PTOU(p)->u_signal[sig-1] != SIG_DFL ||
 				    !sigismember(&stopdefault, sig))
 					return (1);
 			}
@@ -683,8 +683,6 @@
 		 */
 		for (;;) {
 			if ((sig = fsig(&t->t_sig, t)) != 0) {
-				if (sig == SIGCLD)
-					sigcld_found = 1;
 				toproc = 0;
 				if (tracing(p, sig) ||
 				    sigismember(&t->t_sigwait, sig) ||
@@ -766,15 +764,10 @@
 	mutex_exit(&p->p_lock);
 
 	/*
-	 * If SIGCLD was dequeued, search for other pending SIGCLD's.
-	 * Don't do it if we are returning SIGCLD and the signal
-	 * handler will be reset by psig(); this enables reliable
-	 * delivery of SIGCLD even when using the old, broken
-	 * signal() interface for setting the signal handler.
+	 * If SIGCLD was dequeued from the process's signal queue,
+	 * search for other pending SIGCLD's from the list of children.
 	 */
-	if (sigcld_found &&
-	    (sig != SIGCLD || !sigismember(&PTOU(curproc)->u_sigresethand,
-	    SIGCLD)))
+	if (sigcld_found)
 		sigcld_repost();
 
 	if (sig != 0)
@@ -1586,7 +1579,6 @@
 			sigaddset(&PTOU(curproc)->u_sigonstack, sig);
 		else
 			sigdelset(&PTOU(curproc)->u_sigonstack, sig);
-
 	} else if (disp == SIG_IGN ||
 	    (disp == SIG_DFL && sigismember(&ignoredefault, sig))) {
 		/*
@@ -1605,7 +1597,6 @@
 			sigdelset(&t->t_extsig, sig);
 			sigdelq(p, t, sig);
 		} while ((t = t->t_forw) != p->p_tlist);
-
 	} else {
 		/*
 		 * The signal action is being set to SIG_DFL and the default
@@ -1729,7 +1720,8 @@
 }
 
 /*
- * Common code called from sigcld() and issig_forreal()
+ * Common code called from sigcld() and from
+ * waitid() and issig_forreal() via sigcld_repost().
  * Give the parent process a SIGCLD if it does not have one pending,
  * else mark the child process so a SIGCLD can be posted later.
  */
@@ -1737,22 +1729,20 @@
 post_sigcld(proc_t *cp, sigqueue_t *sqp)
 {
 	proc_t *pp = cp->p_parent;
-	void (*handler)() = PTOU(pp)->u_signal[SIGCLD - 1];
 	k_siginfo_t info;
 
 	ASSERT(MUTEX_HELD(&pidlock));
 	mutex_enter(&pp->p_lock);
 
 	/*
-	 * If a SIGCLD is pending, or if SIGCLD is not now being caught,
-	 * then just mark the child process so that its SIGCLD will
-	 * be posted later, when the first SIGCLD is taken off the
-	 * queue or when the parent is ready to receive it, if ever.
+	 * If a SIGCLD is pending, then just mark the child process
+	 * so that its SIGCLD will be posted later, when the first
+	 * SIGCLD is taken off the queue or when the parent is ready
+	 * to receive it or accept it, if ever.
 	 */
-	if (handler == SIG_DFL || handler == SIG_IGN ||
-	    sigismember(&pp->p_sig, SIGCLD))
+	if (sigismember(&pp->p_sig, SIGCLD)) {
 		cp->p_pidflag |= CLDPEND;
-	else {
+	} else {
 		cp->p_pidflag &= ~CLDPEND;
 		if (sqp == NULL) {
 			/*
@@ -1787,15 +1777,8 @@
 {
 	proc_t *pp = curproc;
 	proc_t *cp;
-	void (*handler)() = PTOU(pp)->u_signal[SIGCLD - 1];
 	sigqueue_t *sqp;
 
-	/*
-	 * Don't bother if SIGCLD is not now being caught.
-	 */
-	if (handler == SIG_DFL || handler == SIG_IGN)
-		return;
-
 	sqp = kmem_zalloc(sizeof (sigqueue_t), KM_SLEEP);
 	mutex_enter(&pidlock);
 	for (cp = pp->p_child; cp; cp = cp->p_sibling) {
--- a/usr/src/uts/common/sys/usb/hcd/ehci/ehci_hub.h	Mon Jul 06 09:30:27 2009 -0400
+++ b/usr/src/uts/common/sys/usb/hcd/ehci/ehci_hub.h	Tue Jul 07 10:00:27 2009 -0400
@@ -19,7 +19,7 @@
  * CDDL HEADER END
  */
 /*
- * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
  * Use is subject to license terms.
  */
 
@@ -100,11 +100,13 @@
  * These timeout values are specified in terms of microseconds.
  */
 #define	EHCI_RH_POLL_TIME		256000	/* RH polling interval */
-#define	EHCI_PORT_RESET_TIMEWAIT	20000	/* RH port reset time */
+#define	EHCI_PORT_RESET_TIMEWAIT	50000	/* RH port reset time */
 #define	EHCI_PORT_RESET_COMP_TIMEWAIT	2000	/* RH port reset complete */
 #define	EHCI_PORT_SUSPEND_TIMEWAIT	10000	/* RH port suspend time */
 #define	EHCI_PORT_RESUME_TIMEWAIT	20000	/* RH port resume time */
 #define	EHCI_PORT_RESUME_COMP_TIMEWAIT	2000	/* RH port resume complete */
+#define	EHCI_PORT_RESET_RETRY_MAX	10	/* RH port reset retry max */
+#define	EHCI_PORT_RESUME_RETRY_MAX	10	/* RH port resume retry max */
 
 #ifdef __cplusplus
 }
--- a/usr/src/uts/common/syscall/sigaction.c	Mon Jul 06 09:30:27 2009 -0400
+++ b/usr/src/uts/common/syscall/sigaction.c	Tue Jul 07 10:00:27 2009 -0400
@@ -20,16 +20,13 @@
  */
 
 /*
- * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
  * Use is subject to license terms.
  */
 
 /*	Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T	*/
 /*	  All Rights Reserved	*/
 
-
-#pragma ident	"%Z%%M%	%I%	%E% SMI"
-
 #include <sys/param.h>
 #include <sys/types.h>
 #include <sys/sysmacros.h>
@@ -115,11 +112,8 @@
 	}
 
 	if (actp) {
-		if (sig == SIGCLD &&
-		    act.sa_handler != SIG_IGN &&
-		    act.sa_handler != SIG_DFL)
+		if (sig == SIGCLD)
 			sigcld_look = 1;
-
 		sigutok(&act.sa_mask, &set);
 		setsigact(sig, act.sa_handler, set, act.sa_flags);
 	}
@@ -211,11 +205,8 @@
 	}
 
 	if (actp) {
-		if (sig == SIGCLD &&
-		    act32.sa_handler != (caddr32_t)SIG_IGN &&
-		    act32.sa_handler != (caddr32_t)SIG_DFL)
+		if (sig == SIGCLD)
 			sigcld_look = 1;
-
 		sigutok(&act32.sa_mask, &set);
 		setsigact(sig, (void (*)())(uintptr_t)act32.sa_handler, set,
 		    act32.sa_flags);
--- a/usr/src/uts/common/syscall/ssig.c	Mon Jul 06 09:30:27 2009 -0400
+++ b/usr/src/uts/common/syscall/ssig.c	Tue Jul 07 10:00:27 2009 -0400
@@ -18,16 +18,14 @@
  *
  * CDDL HEADER END
  */
-/*	Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T	*/
-/*	  All Rights Reserved	*/
-
 
 /*
- * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
  * Use is subject to license terms.
  */
 
-#pragma ident	"%Z%%M%	%I%	%E% SMI"
+/*	Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T	*/
+/*	  All Rights Reserved	*/
 
 #include <sys/param.h>
 #include <sys/types.h>
@@ -154,8 +152,7 @@
 		flags |= SA_NOCLDSTOP;
 		if (func == SIG_IGN)
 			flags |= SA_NOCLDWAIT;
-		else if (func != SIG_DFL)
-			sigcld_look = 1;
+		sigcld_look = 1;
 	}
 
 	setsigact(sig, func, nullsmask, flags);
--- a/usr/src/uts/intel/io/dktp/hba/ghd/ghd.c	Mon Jul 06 09:30:27 2009 -0400
+++ b/usr/src/uts/intel/io/dktp/hba/ghd/ghd.c	Tue Jul 07 10:00:27 2009 -0400
@@ -20,11 +20,10 @@
  */
 
 /*
- * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
  * Use is subject to license terms.
  */
 
-#pragma ident	"%Z%%M%	%I%	%E% SMI"
 
 #include <sys/types.h>
 #include <sys/kmem.h>
@@ -56,11 +55,12 @@
 /*
  * Local configuration variables
  */
+#define	DEFAULT_GHD_TIMEOUT    50000	/* Amount of time to poll(50ms) */
 
-ulong_t	ghd_tran_abort_timeout = 5;
-ulong_t	ghd_tran_abort_lun_timeout = 5;
-ulong_t	ghd_tran_reset_target_timeout = 5;
-ulong_t	ghd_tran_reset_bus_timeout = 5;
+ulong_t	ghd_tran_abort_timeout = DEFAULT_GHD_TIMEOUT;
+ulong_t	ghd_tran_abort_lun_timeout = DEFAULT_GHD_TIMEOUT;
+ulong_t	ghd_tran_reset_target_timeout = DEFAULT_GHD_TIMEOUT;
+ulong_t	ghd_tran_reset_bus_timeout = DEFAULT_GHD_TIMEOUT;
 
 static int
 ghd_doneq_init(ccc_t *cccp)
@@ -446,6 +446,7 @@
 	gcmd_t	*gcmdp;
 	L2el_t	 gcmd_hold_queue;
 	int	 got_it = FALSE;
+	clock_t  poll_lbolt;
 	clock_t	 start_lbolt;
 	clock_t	 current_lbolt;
 
@@ -454,6 +455,7 @@
 	L2_INIT(&gcmd_hold_queue);
 
 	/* Que hora es? */
+	poll_lbolt = drv_usectohz((clock_t)polltime);
 	start_lbolt = ddi_get_lbolt();
 
 	/* unqueue and save all CMD/CCBs until I find the right one */
@@ -461,7 +463,7 @@
 
 		/* Give up yet? */
 		current_lbolt = ddi_get_lbolt();
-		if (polltime && (current_lbolt - start_lbolt >= polltime))
+		if (poll_lbolt && (current_lbolt - start_lbolt >= poll_lbolt))
 			break;
 
 		/*
--- a/usr/src/uts/intel/io/intel_nb5000/dimm_addr.c	Mon Jul 06 09:30:27 2009 -0400
+++ b/usr/src/uts/intel/io/intel_nb5000/dimm_addr.c	Tue Jul 07 10:00:27 2009 -0400
@@ -20,7 +20,7 @@
  */
 
 /*
- * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
  * Use is subject to license terms.
  */
 
@@ -45,13 +45,18 @@
 dimm_getphys(int branch, int rank, int bank, int ras, int cas)
 {
 	uint8_t i;
+	int num_ranks_per_branch;
 	uint64_t m;
 	uint64_t pa;
 	struct rank_base *rp;
 	struct rank_geometry *rgp;
 
-	ASSERT(rank < nb_dimms_per_channel * 2);
-	rp = &rank_base[(branch * nb_dimms_per_channel * 2) + rank];
+	/* max number of ranks per branch */
+	num_ranks_per_branch = (nb_chipset == INTEL_NB_5100) ?
+	    NB_5100_RANKS_PER_CHANNEL :
+	    nb_dimms_per_channel * nb_channels_per_branch;
+	ASSERT(rank < num_ranks_per_branch);
+	rp = &rank_base[(branch * num_ranks_per_branch) + rank];
 	rgp = (struct rank_geometry *)rp->rank_geometry;
 	if (rgp == NULL)
 		return (-1LL);
@@ -98,6 +103,7 @@
 dimm_getoffset(int branch, int rank, int bank, int ras, int cas)
 {
 	uint8_t i;
+	int num_ranks_per_branch;
 	uint64_t m;
 	uint64_t offset;
 	struct dimm_geometry *dgp;
@@ -106,9 +112,14 @@
 	uint64_t pa;
 	uint64_t cal_pa;
 
-	ASSERT(rank < nb_dimms_per_channel * 2);
-	rp = &rank_base[(branch * nb_dimms_per_channel * 2) + rank];
-	dgp = dimm_geometry[(branch * nb_dimms_per_channel) + rank/2];
+	/* max number of ranks per branch */
+	num_ranks_per_branch = (nb_chipset == INTEL_NB_5100) ?
+	    NB_5100_RANKS_PER_CHANNEL :
+	    nb_dimms_per_channel * nb_channels_per_branch;
+	ASSERT(rank < num_ranks_per_branch);
+	rp = &rank_base[(branch * num_ranks_per_branch) + rank];
+	dgp = dimm_geometry[(branch * nb_dimms_per_channel) +
+	    nb_rank2dimm(branch, rank)];
 	if (dgp == NULL)
 		return (TCODE_OFFSET(rank, bank, ras, cas));
 	rgp = (struct rank_geometry *)&dgp->rank_geometry[0];
@@ -224,6 +235,7 @@
 static cmi_errno_t
 inb_unumtopa(void *arg, mc_unum_t *unump, nvlist_t *nvl, uint64_t *pap)
 {
+	int num_ranks_per_branch;
 	mc_unum_t unum;
 	uint64_t pa;
 	struct rank_base *rp;
@@ -244,7 +256,13 @@
 		*pap = pa;
 		return (CMI_SUCCESS);
 	}
-	rp = &rank_base[(unump->unum_mc * nb_dimms_per_channel * 2) +
+
+
+	/* max number of ranks per branch */
+	num_ranks_per_branch = (nb_chipset == INTEL_NB_5100) ?
+	    NB_5100_RANKS_PER_CHANNEL :
+	    nb_dimms_per_channel * nb_channels_per_branch;
+	rp = &rank_base[(unump->unum_mc * num_ranks_per_branch) +
 	    unump->unum_rank];
 	pa = rp->base + (unump->unum_offset * rp->interleave);
 
@@ -257,20 +275,36 @@
 void
 dimm_init()
 {
+	int num_ranks_per_branch;
+
 	dimm_geometry = kmem_zalloc(sizeof (void *) *
 	    nb_number_memory_controllers * nb_dimms_per_channel, KM_SLEEP);
+
+	/* max number of ranks per branch */
+	num_ranks_per_branch = (nb_chipset == INTEL_NB_5100) ?
+	    NB_5100_RANKS_PER_CHANNEL :
+	    nb_dimms_per_channel * nb_channels_per_branch;
+
 	rank_base = kmem_zalloc(sizeof (struct rank_base) *
-	    nb_number_memory_controllers * nb_dimms_per_channel * 2, KM_SLEEP);
+	    nb_number_memory_controllers * num_ranks_per_branch, KM_SLEEP);
 }
 
 void
 dimm_fini()
 {
+	int num_ranks_per_branch;
+
 	kmem_free(dimm_geometry, sizeof (void *) *
 	    nb_number_memory_controllers * nb_dimms_per_channel);
 	dimm_geometry = 0;
+
+	/* max number of ranks per branch */
+	num_ranks_per_branch = (nb_chipset == INTEL_NB_5100) ?
+	    NB_5100_RANKS_PER_CHANNEL :
+	    nb_dimms_per_channel * nb_channels_per_branch;
+
 	kmem_free(rank_base, sizeof (struct rank_base) *
-	    nb_number_memory_controllers * nb_dimms_per_channel * 2);
+	    nb_number_memory_controllers * num_ranks_per_branch);
 	rank_base = 0;
 }
 
@@ -299,9 +333,16 @@
 	struct dimm_geometry *dimm;
 	struct rank_base *rp;
 	int interleave_nbits;
+	int num_ranks_per_branch;
 
-	dimm = dimm_geometry[(branch * nb_dimms_per_channel) + (rank / 2)];
-	rp = &rank_base[(branch * nb_dimms_per_channel * 2) + rank];
+	dimm = dimm_geometry[(branch * nb_dimms_per_channel) +
+	    nb_rank2dimm(branch, rank)];
+
+	/* max number of ranks per branch */
+	num_ranks_per_branch = (nb_chipset == INTEL_NB_5100) ?
+	    NB_5100_RANKS_PER_CHANNEL :
+	    nb_dimms_per_channel * nb_channels_per_branch;
+	rp = &rank_base[(branch * num_ranks_per_branch) + rank];
 	if (interleave == 1)
 		interleave_nbits = 0;
 	else if (interleave == 2)
--- a/usr/src/uts/intel/io/intel_nb5000/intel_nb5000.c	Mon Jul 06 09:30:27 2009 -0400
+++ b/usr/src/uts/intel/io/intel_nb5000/intel_nb5000.c	Tue Jul 07 10:00:27 2009 -0400
@@ -44,6 +44,8 @@
 #include "nb_log.h"
 #include "dimm_phys.h"
 
+int nb_check_validlog = 1;
+
 static uint32_t uerrcnt[2];
 static uint32_t cerrcnta[2][2];
 static uint32_t cerrcntb[2][2];
@@ -306,6 +308,156 @@
 	return (intr);
 }
 
+static struct mch_error_code nf_mem_error_code[] = {
+	{ 21, EMASK_MEM_M21, ERR_NF_MEM_M21 },
+	{ 20, EMASK_MEM_M20, ERR_NF_MEM_M20 },
+	{ 18, EMASK_MEM_M18, ERR_NF_MEM_M18 },
+	{ 16, EMASK_MEM_M16, ERR_NF_MEM_M16 },
+	{ 15, EMASK_MEM_M15, ERR_NF_MEM_M15 },
+	{ 14, EMASK_MEM_M14, ERR_NF_MEM_M14 },
+	{ 12, EMASK_MEM_M12, ERR_NF_MEM_M12 },
+	{ 11, EMASK_MEM_M11, ERR_NF_MEM_M11 },
+	{ 10, EMASK_MEM_M10, ERR_NF_MEM_M10 },
+	{ 6, EMASK_MEM_M6, ERR_NF_MEM_M6 },
+	{ 5, EMASK_MEM_M5, ERR_NF_MEM_M5 },
+	{ 4, EMASK_MEM_M4, ERR_NF_MEM_M4 },
+	{ 1, EMASK_MEM_M1, ERR_NF_MEM_M1 }
+};
+
+static int
+intel_nf_mem_err(uint32_t nf_mem)
+{
+	int rt = -1;
+	int nerr = 0;
+	uint32_t emask_mem = 0;
+	int i;
+	int sz;
+
+	sz = sizeof (nf_mem_error_code) / sizeof (struct mch_error_code);
+
+	for (i = 0; i < sz; i++) {
+		if (nf_mem & nf_mem_error_code[i].error_bit) {
+			rt = nf_mem_error_code[i].intel_error_list;
+			emask_mem |= nf_mem_error_code[i].emask;
+			nerr++;
+		}
+	}
+	if (emask_mem)
+		nb_mem_mask_mc(emask_mem);
+	if (nerr > 1)
+		rt = -1;
+	return (rt);
+}
+
+static char *
+nf_mem_error(const nb_regs_t *rp, void *data)
+{
+	uint32_t ferr_nf_mem, recmema, recmemb;
+	uint32_t nrecmema, nrecmemb, validlog;
+	int channel;
+	char *intr = "nb.unknown";
+	nb_mem_scatchpad_t *sp = &((nb_scatchpad_t *)data)->ms;
+
+	sp->rank = -1;
+	sp->dimm = -1;
+	sp->bank = -1;
+	sp->cas = -1;
+	sp->ras = -1LL;
+	sp->pa = -1LL;
+	sp->offset = -1;
+	ferr_nf_mem = rp->nb.nf_mem_regs.ferr_nf_mem;
+	if ((ferr_nf_mem & ERR_NF_MEM_MASK) == 0) {
+		/* no first error found */
+		sp->branch = -1;
+		sp->channel = -1;
+		sp->intel_error_list =
+		    intel_nf_mem_err(rp->nb.nf_mem_regs.nerr_nf_mem);
+		return (intr);
+	}
+	sp->intel_error_list = intel_nf_mem_err(ferr_nf_mem);
+
+	channel = (ferr_nf_mem >> ERR_MEM_CH_SHIFT) & 0x1;
+	sp->branch = channel;
+	sp->channel = -1;
+	if (ferr_nf_mem & ERR_NF_MEM_MASK) {
+		if (ferr_nf_mem & ERR_NF_MEM_ECC_UE) {
+			/*
+			 * uncorrectable ECC M1,M4-M6,M10-M12
+			 * There is only channel per branch
+			 * Invalidate the channel number so the mem ereport
+			 * has the same detector with existing 5000 ereports.
+			 * so we can leverage the existing Everhsolt rule.
+			 */
+			validlog = rp->nb.nf_mem_regs.validlog;
+			if (ferr_nf_mem & ERR_NF_MEM_M1) {
+				nrecmema = rp->nb.nf_mem_regs.nrecmema;
+				nrecmemb = rp->nb.nf_mem_regs.nrecmemb;
+				/* check if the nrecmem log is valid */
+				if (validlog & 0x1 || nb_check_validlog == 0) {
+					sp->rank = (nrecmema >> 8) & RANK_MASK;
+					sp->bank = (nrecmema >> 12) & BANK_MASK;
+					sp->cas = (nrecmemb >> 16) & CAS_MASK;
+					sp->ras = nrecmemb & RAS_MASK;
+				}
+			} else {
+				recmema = rp->nb.nf_mem_regs.recmema;
+				recmemb = rp->nb.nf_mem_regs.recmemb;
+				/* check if the recmem log is valid */
+				if (validlog & 0x2 || nb_check_validlog == 0) {
+					sp->rank = (recmema >> 8) & RANK_MASK;
+					sp->bank = (recmema >> 12) & BANK_MASK;
+					sp->cas = (recmemb >> 16) & CAS_MASK;
+					sp->ras = recmemb & RAS_MASK;
+				}
+			}
+			intr = "nb.ddr2_mem_ue";
+		} else if ((ferr_nf_mem & ERR_NF_MEM_ECC_CE) != 0) {
+			/* correctable ECC M14-M16 */
+			recmema = rp->nb.nf_mem_regs.recmema;
+			recmemb = rp->nb.nf_mem_regs.recmemb;
+			validlog = rp->nb.nf_mem_regs.validlog;
+			/* check if the recmem log is valid */
+			if (validlog & 0x2 || nb_check_validlog == 0) {
+				sp->channel = channel;
+				sp->rank = (recmema >> 8) & RANK_MASK;
+				sp->dimm = nb_rank2dimm(sp->channel, sp->rank);
+				sp->bank = (recmema >> 12) & BANK_MASK;
+				sp->cas = (recmemb >> 16) & CAS_MASK;
+				sp->ras = recmemb & RAS_MASK;
+			}
+			intr = "nb.ddr2_mem_ce";
+		} else if ((ferr_nf_mem & ERR_NF_MEM_SPARE) != 0) {
+			/* spare dimm M20, M21 */
+			intr = "nb.ddr2_mem_ds";
+
+			/*
+			 * The channel can be valid here.
+			 * However, there is only one channel per branch and
+			 * to leverage the eversolt rules of other chipsets,
+			 * the channel is ignored and let the rule find it out
+			 * from the topology.
+			 */
+			if (rp->nb.nf_mem_regs.spcps & SPCPS_SPARE_DEPLOYED) {
+				sp->rank =
+				    SPCPS_FAILED_RANK(rp->nb.nf_mem_regs.spcps);
+				nb_used_spare_rank(sp->branch, sp->rank);
+				nb_config_gen++;
+			}
+		} else if ((ferr_nf_mem & ERR_NF_MEM_M18) != 0) {
+			sp->channel = channel;
+			intr = "nb.ddr2_spd";	/* SPD protocol */
+
+		}
+	}
+	if (sp->ras != -1) {
+		sp->pa = dimm_getphys(sp->branch, sp->rank, sp->bank, sp->ras,
+		    sp->cas);
+		sp->offset = dimm_getoffset(sp->branch, sp->rank, sp->bank,
+		    sp->ras, sp->cas);
+	}
+	return (intr);
+}
+
 static struct mch_error_code fat_int_error_code[] = {
 	{ 14, EMASK_INT_B14, ERR_FAT_INT_B14 },
 	{ 12, EMASK_INT_B12, ERR_FAT_INT_B12 },
@@ -1053,6 +1205,63 @@
 }
 
 static void
+log_nf_mem_err(nb_regs_t *rp, int willpanic, int *interpose)
+{
+	int channel, branch;
+	int t = 0;
+
+	rp->flag = NB_REG_LOG_NF_MEM;
+
+	/* Memmory err registers */
+	rp->nb.nf_mem_regs.ferr_nf_mem = FERR_NF_MEM_RD(interpose);
+	channel = (rp->nb.nf_mem_regs.ferr_nf_mem >> 28) & 0x1;
+	branch = channel;
+	rp->nb.nf_mem_regs.nerr_nf_mem = NERR_NF_MEM_RD(&t);
+	*interpose |= t;
+	rp->nb.nf_mem_regs.redmema = MEM_REDMEMA_RD(branch);
+	rp->nb.nf_mem_regs.redmemb = MEM_REDMEMB_RD(branch);
+	rp->nb.nf_mem_regs.recmema = MEM_RECMEMA_RD(branch);
+	rp->nb.nf_mem_regs.recmemb = MEM_RECMEMB_RD(branch);
+	rp->nb.nf_mem_regs.nrecmema = MEM_NRECMEMA_RD(branch);
+	rp->nb.nf_mem_regs.nrecmemb = MEM_NRECMEMB_RD(branch);
+
+	/* spare rank */
+	rp->nb.nf_mem_regs.spcps = SPCPS_RD(branch);
+	rp->nb.nf_mem_regs.spcpc = SPCPC_RD(branch);
+
+	/* RAS registers */
+	rp->nb.nf_mem_regs.cerrcnt = MEM_CERRCNT_RD(branch);
+	rp->nb.nf_mem_regs.cerrcnt_ext = (uint32_t)MEM_CERRCNT_EXT_RD(branch);
+	rp->nb.nf_mem_regs.cerrcnt_last = cerrcnta[branch][channel & 1];
+	rp->nb.nf_mem_regs.cerrcnt_ext_last = cerrcntb[branch][channel & 1];
+	cerrcnta[branch][channel & 1] = rp->nb.nf_mem_regs.cerrcnt;
+	cerrcntb[branch][channel & 1] = rp->nb.nf_mem_regs.cerrcnt_ext;
+	rp->nb.nf_mem_regs.badram = BADRAMA_RD(branch);
+	rp->nb.nf_mem_regs.badcnt = BADCNT_RD(branch);
+	rp->nb.nf_mem_regs.validlog = VALIDLOG_RD(branch);
+
+	if (!willpanic) {
+		if (rp->nb.nf_mem_regs.ferr_nf_mem || *interpose)
+			FERR_NF_MEM_WR(rp->nb.nf_mem_regs.ferr_nf_mem);
+		if (rp->nb.nf_mem_regs.nerr_nf_mem)
+			NERR_NF_MEM_WR(rp->nb.nf_mem_regs.nerr_nf_mem);
+		/*
+		 * if interpose, write read-only registers to clear from pci
+		 * cache
+		 */
+		if (*interpose) {
+			MEM_NRECMEMA_WR(branch);
+			MEM_NRECMEMB_WR(branch);
+			MEM_REDMEMA_WR(branch);
+			MEM_REDMEMB_WR(branch);
+			MEM_RECMEMA_WR(branch);
+			MEM_RECMEMB_WR(branch);
+			SPCPS_WR(branch);
+		}
+	}
+}
+
+static void
 log_ferr(uint64_t ferr, uint32_t *nerrp, nb_logout_t *log, int willpanic)
 {
 	nb_regs_t *rp = &log->nb_regs;
@@ -1070,6 +1279,9 @@
 	} else if ((ferr & GE_FBD_NF) != 0) {
 		log_nf_fbd_err(rp, willpanic, &interpose);
 		*nerrp = nerr & ~GE_NERR_FBD_NF;
+	} else if ((ferr & GE_MEM_NF) != 0) {
+		log_nf_mem_err(rp, willpanic, &interpose);
+		*nerrp = nerr & ~GE_NERR_MEM_NF;
 	} else if ((ferr & (GE_FERR_FSB_FATAL | GE_FERR_FSB_NF)) != 0) {
 		log_fsb_err(ferr, rp, willpanic, &interpose);
 		*nerrp = nerr & ~(GE_NERR_FSB_FATAL | GE_NERR_FSB_NF);
@@ -1113,6 +1325,9 @@
 	} else if ((err & GE_NERR_FBD_NF) != 0) {
 		log_nf_fbd_err(rp, willpanic, &interpose);
 		*errp = err & ~GE_NERR_FBD_NF;
+	} else if ((err & GE_NERR_MEM_NF) != 0) {
+		log_nf_mem_err(rp, willpanic, &interpose);
+		*errp = err & ~GE_NERR_MEM_NF;
 	} else if ((err & (GE_NERR_FSB_FATAL | GE_NERR_FSB_NF)) != 0) {
 		log_fsb_err(GE_NERR_TO_FERR_FSB(err), rp, willpanic,
 		    &interpose);
@@ -1453,6 +1668,78 @@
 }
 
 static void
+nb_nf_mem_err_payload(const nb_regs_t *nb_regs, nvlist_t *payload,
+    nb_scatchpad_t *data)
+{
+	nb_mem_scatchpad_t *sp;
+	char buf[32];
+
+	sp = &((nb_scatchpad_t *)data)->ms;
+
+	if (sp->dimm == -1 && sp->rank != -1) {
+		fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RANK,
+		    DATA_TYPE_INT32, sp->rank, NULL);
+	}
+	if (sp->ras != -1) {
+		fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_BANK,
+		    DATA_TYPE_INT32, sp->bank, NULL);
+		fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_CAS,
+		    DATA_TYPE_INT32, sp->cas, NULL);
+		fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RAS,
+		    DATA_TYPE_INT32, sp->ras, NULL);
+		if (sp->offset != -1LL) {
+			fm_payload_set(payload, FM_FMRI_MEM_OFFSET,
+			    DATA_TYPE_UINT64, sp->offset, NULL);
+		}
+		if (sp->pa != -1LL) {
+			fm_payload_set(payload, FM_FMRI_MEM_PHYSADDR,
+			    DATA_TYPE_UINT64, sp->pa, NULL);
+		}
+	}
+	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_FERR_NF_MEM,
+	    DATA_TYPE_UINT32, nb_regs->nb.nf_mem_regs.ferr_nf_mem, NULL);
+	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NERR_NF_MEM,
+	    DATA_TYPE_UINT32, nb_regs->nb.nf_mem_regs.nerr_nf_mem, NULL);
+	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RECMEMA,
+	    DATA_TYPE_UINT32, nb_regs->nb.nf_mem_regs.recmema, NULL);
+	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RECMEMB,
+	    DATA_TYPE_UINT32, nb_regs->nb.nf_mem_regs.recmemb, NULL);
+	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_REDMEMA,
+	    DATA_TYPE_UINT32, nb_regs->nb.nf_mem_regs.redmema, NULL);
+	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_REDMEMB,
+	    DATA_TYPE_UINT32, nb_regs->nb.nf_mem_regs.redmemb, NULL);
+	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NRECMEMA,
+	    DATA_TYPE_UINT32, nb_regs->nb.nf_mem_regs.nrecmema, NULL);
+	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NRECMEMB,
+	    DATA_TYPE_UINT32, nb_regs->nb.nf_mem_regs.nrecmemb, NULL);
+	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_SPCPS,
+	    DATA_TYPE_UINT8, nb_regs->nb.nf_mem_regs.spcps, NULL);
+	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_SPCPC,
+	    DATA_TYPE_UINT32, nb_regs->nb.nf_mem_regs.spcpc, NULL);
+	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_CERRCNT,
+	    DATA_TYPE_UINT32, nb_regs->nb.nf_mem_regs.cerrcnt, NULL);
+	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_CERRCNT_LAST,
+	    DATA_TYPE_UINT32, nb_regs->nb.nf_mem_regs.cerrcnt_last, NULL);
+	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_CERRCNT_EXT,
+	    DATA_TYPE_UINT32, nb_regs->nb.nf_mem_regs.cerrcnt_ext, NULL);
+	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_CERRCNT_EXT_LAST,
+	    DATA_TYPE_UINT32, nb_regs->nb.nf_mem_regs.cerrcnt_ext_last, NULL);
+	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_BADRAM,
+	    DATA_TYPE_UINT32, nb_regs->nb.nf_mem_regs.badram, NULL);
+	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_BADCNT,
+	    DATA_TYPE_UINT32, nb_regs->nb.nf_mem_regs.badcnt, NULL);
+	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_VALIDLOG,
+	    DATA_TYPE_UINT32, nb_regs->nb.nf_mem_regs.validlog, NULL);
+
+	if (sp->intel_error_list >= 0)
+		(void) snprintf(buf, sizeof (buf), "M%d", sp->intel_error_list);
+	else
+		(void) snprintf(buf, sizeof (buf), "Multiple or unknown error");
+	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_ERROR_NO,
+	    DATA_TYPE_STRING, buf, NULL);
+}
+
+static void
 nb_dma_err_payload(const nb_regs_t *nb_regs, nvlist_t *payload)
 {
 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_PCISTS,
@@ -1519,6 +1806,9 @@
 	case NB_REG_LOG_THR:
 		nb_thr_err_payload(nb_regs, payload, data);
 		break;
+	case NB_REG_LOG_NF_MEM:
+		nb_nf_mem_err_payload(nb_regs, payload, data);
+		break;
 	default:
 		fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_FERR_GLOBAL,
 		    DATA_TYPE_UINT64, nb_regs->ferr, NULL);
@@ -1720,6 +2010,41 @@
 	    FM_ERROR_CPU, FM_EREPORT_CPU_INTEL, "nb", "otf");
 }
 
+void
+nb_nf_mem_report(const nb_regs_t *nb_regs, char *class, nvlist_t *detector,
+    void *data)
+{
+	char *intr;
+	nb_mem_scatchpad_t *sp;
+
+	intr = nf_mem_error(nb_regs, data);
+	sp = &((nb_scatchpad_t *)data)->ms;
+
+	if (sp->dimm != -1) {
+		fm_fmri_hc_set(detector, FM_HC_SCHEME_VERSION, NULL, NULL, 5,
+		    "motherboard", 0,
+		    "memory-controller", sp->branch,
+		    "dram-channel", sp->channel,
+		    "dimm", sp->dimm,
+		    "rank", sp->rank);
+	} else if (sp->channel != -1) {
+		fm_fmri_hc_set(detector, FM_HC_SCHEME_VERSION, NULL, NULL, 3,
+		    "motherboard", 0,
+		    "memory-controller", sp->branch,
+		    "dram-channel", sp->channel);
+	} else if (sp->branch != -1) {
+		fm_fmri_hc_set(detector, FM_HC_SCHEME_VERSION, NULL, NULL, 2,
+		    "motherboard", 0,
+		    "memory-controller", sp->branch);
+	} else {
+		fm_fmri_hc_set(detector, FM_HC_SCHEME_VERSION, NULL, NULL, 1,
+		    "motherboard", 0);
+	}
+
+	(void) snprintf(class, FM_MAX_CLASS, "%s.%s.%s",
+	    FM_ERROR_CPU, FM_EREPORT_CPU_INTEL, intr);
+}
+
 
 nvlist_t *
 nb_report(const nb_regs_t *nb_regs, char *class, nv_alloc_t *nva, void *scratch)
@@ -1748,6 +2073,9 @@
 	case NB_REG_LOG_THR:
 		nb_thr_report(nb_regs, class, detector, scratch);
 		break;
+	case NB_REG_LOG_NF_MEM:
+		nb_nf_mem_report(nb_regs, class, detector, scratch);
+		break;
 	default:
 		fm_fmri_hc_set(detector, FM_HC_SCHEME_VERSION, NULL, NULL, 1,
 		    "motherboard", 0);
--- a/usr/src/uts/intel/io/intel_nb5000/intel_nbdrv.c	Mon Jul 06 09:30:27 2009 -0400
+++ b/usr/src/uts/intel/io/intel_nb5000/intel_nbdrv.c	Tue Jul 07 10:00:27 2009 -0400
@@ -106,6 +106,7 @@
 	return (base);
 }
 
+/*ARGSUSED*/
 void
 inb_rank(nvlist_t *newdimm, nb_dimm_t *nb_dimm, uint8_t channel, uint32_t dimm)
 {
@@ -122,7 +123,8 @@
 		uint64_t hole_base;
 		uint64_t hole_size;
 
-		dimm_base = rank_to_base(channel/2, dimm*2 + i, &interleave,
+		dimm_base = rank_to_base(channel/nb_channels_per_branch,
+		    nb_dimm->start_rank + i, &interleave,
 		    &limit, &hole_base, &hole_size, &way, &branch_interleave);
 		(void) nvlist_alloc(&newrank[i], NV_UNIQUE_NAME, KM_SLEEP);
 
@@ -178,6 +180,8 @@
 	    (uint32_t)nb_dimm->ncolumn);
 	(void) nvlist_add_uint32(newdimm, "nrow", (uint32_t)nb_dimm->nrow);
 	(void) nvlist_add_uint32(newdimm, "width", (uint32_t)nb_dimm->width);
+	(void) nvlist_add_int32(newdimm, MCINTEL_NVLIST_1ST_RANK,
+	    (int32_t)nb_dimm->start_rank);
 	(void) nvlist_add_uint32(newdimm, "ranks", (uint32_t)nb_dimm->nranks);
 	inb_rank(newdimm, nb_dimm, channel, dimm);
 	(void) nvlist_add_uint32(newdimm, "manufacture-id",
@@ -220,7 +224,7 @@
 {
 	nvlist_t **dimmlist;
 	nvlist_t **newchannel;
-	int nchannels = nb_number_memory_controllers * 2;
+	int nchannels = nb_number_memory_controllers * nb_channels_per_branch;
 	int nd;
 	uint8_t i, j;
 	nb_dimm_t **dimmpp;
@@ -274,6 +278,9 @@
 	case INTEL_NB_5400B:
 		mc = "Intel 5400B";
 		break;
+	case INTEL_NB_5100:
+		mc = "Intel 5100";
+		break;
 	case INTEL_NB_5000P:
 		mc = "Intel 5000P";
 		break;
@@ -302,6 +309,9 @@
 	(void) nvlist_add_uint8(nvl, MCINTEL_NVLIST_VERSTR,
 	    MCINTEL_NVLIST_VERS);
 	(void) nvlist_add_string(nvl, "memory-controller", inb_mc_name());
+	if (nb_chipset == INTEL_NB_5100)
+		(void) nvlist_add_uint8(nvl, MCINTEL_NVLIST_NMEM,
+		    (uint8_t)nb_number_memory_controllers);
 	inb_dimmlist(nvl);
 
 	if (inb_mc_nvl)
--- a/usr/src/uts/intel/io/intel_nb5000/nb5000.h	Mon Jul 06 09:30:27 2009 -0400
+++ b/usr/src/uts/intel/io/intel_nb5000/nb5000.h	Tue Jul 07 10:00:27 2009 -0400
@@ -34,8 +34,12 @@
 #include <sys/cpu_module.h>
 
 #define	NB_5000_MAX_MEM_CONTROLLERS	2
-#define	NB_MAX_DIMMS_PER_CHANNEL	(nb_chipset == INTEL_NB_7300 ? 8 : 4)
-#define	NB_MEM_BRANCH_SELECT		(nb_chipset == INTEL_NB_5400 ? 2 : 3)
+#define	NB_MAX_DIMMS_PER_CHANNEL	(nb_chipset == INTEL_NB_5100 ? 3 : \
+	(nb_chipset == INTEL_NB_7300 ? 8 : 4))
+#define	NB_MAX_CHANNELS_PER_BRANCH	2
+#define	NB_5100_RANKS_PER_CHANNEL	6
+#define	NB_MEM_BRANCH_SELECT \
+	(nb_chipset == INTEL_NB_5400 || nb_chipset == INTEL_NB_5100 ? 2 : 3)
 #define	NB_MAX_MEM_BRANCH_SELECT	3
 #define	NB_MEM_RANK_SELECT		(nb_chipset == INTEL_NB_7300 ? 7 : 5)
 #define	NB_MAX_MEM_RANK_SELECT		7
@@ -64,20 +68,29 @@
 #define	TLOW_MAX	0x100000000ULL
 
 #define	MTR_PRESENT(mtr) \
-	((mtr) & (nb_chipset == INTEL_NB_5400 ? 0x0400 : 0x0100))
+	((mtr) & (nb_chipset == INTEL_NB_5400 || nb_chipset == INTEL_NB_5100 ? \
+	0x0400 : 0x0100))
 #define	MTR_ETHROTTLE(mtr) \
-	((mtr) & (nb_chipset == INTEL_NB_5400 ? 0x0200 : 0x0080))
+	((mtr) & (nb_chipset == INTEL_NB_5400 || nb_chipset == INTEL_NB_5100 ? \
+	? 0x0200 : 0x0080))
 #define	MTR_WIDTH(mtr) \
-	(((mtr) & (nb_chipset == INTEL_NB_5400 ? 0x0100 : 0x0040)) ? 8 : 4)
+	((mtr) & (nb_chipset == INTEL_NB_5400 || nb_chipset == INTEL_NB_5100 ? \
+	0x0100 : 0x0040) ? 8 : 4)
 #define	MTR_NUMBANK(mtr) \
-	(((mtr) & (nb_chipset == INTEL_NB_5400 ? 0x0040 : 0x0020)) ? 8 : 4)
-#define	MTR_NUMRANK(mtr) \
-	(((mtr) & (nb_chipset == INTEL_NB_5400 ? 0x0020 : 0x0010)) ? 2 : 1)
+	((mtr) & (nb_chipset == INTEL_NB_5400 || nb_chipset == INTEL_NB_5100 ? \
+	0x0040 : 0x0020) ? 8 : 4)
+#define	MTR_NUMRANK(mtr) (nb_chipset == INTEL_NB_5100 ? 1 : \
+	(((mtr) & (nb_chipset == INTEL_NB_5400 ? 0x0020 : 0x0010)) ? 2 : 1))
 #define	MTR_NUMROW(mtr) ((((mtr) >> 2) & 3) + 13)
 #define	MTR_NUMCOL(mtr) (((mtr) & 3) + 10)
 
 #define	MTR_DIMMSIZE(mtr) 	((1ULL << (MTR_NUMCOL(mtr) + MTR_NUMROW(mtr))) \
 	* MTR_NUMRANK(mtr) * MTR_NUMBANK(mtr) * MTR_WIDTH(mtr))
+#define	DIMMSIZE(nrow, ncol, nrank, nbank, width) \
+	((1ULL << ((ncol) + (nrow))) * (nrank) * (nbank) * (width))
+#define	MTR_DDR2_DIMMSIZE(mtr, nrank) \
+	((1ULL << (MTR_NUMCOL(mtr) + MTR_NUMROW(mtr))) \
+	* (nrank) * MTR_NUMBANK(mtr) * MTR_WIDTH(mtr))
 
 /* FERR_GLOBAL and NERR_GLOBAL */
 #define	GE_FERR_FSB3_FATAL	0x800000000ULL	/* FSB3 Fatal Error */
@@ -114,6 +127,8 @@
 #define	GE_FERR_FBD1_NF	0x00000200	/* FBD channel 1 Non-Fatal Error */
 #define	GE_FERR_FBD0_NF	0x00000100	/* FBD channel 0 Non-Fatal Error */
 #define	GE_FERR_FBD_NF	0x00000800	/* FBD channel Non-Fatal Error */
+#define	GE_FERR_MEM1_NF	0x00000200	/* DDR channel 1 Non-Fatal Error */
+#define	GE_FERR_MEM0_NF	0x00000100	/* DDR channel 0 Non-Fatal Error */
 #define	GE_FERR_THERMAL_NF 0x00000400	/* Thermal Non-Fatal Error */
 #define	GE_PCIEX9_NF	0x00000200	/* PCI Express dev 9 Non-Fatal Error */
 #define	GE_PCIEX8_NF	0x00000100	/* PCI Express dev 8 Non-Fatal Error */
@@ -128,11 +143,14 @@
 
 #define	GE_NERR_FSB2_FATAL	0x08000000 /* FSB2 Fatal Error */
 #define	GE_NERR_FSB3_FATAL	0x04000000 /* FSB3 Fatal Error */
-#define	GE_NERR_FBD_FATAL	0x01000000 /* FBD channel Fatal Error */
+#define	GE_NERR_FBD_FATAL	(nb_chipset == INTEL_NB_5100 ? 0 : 0x01000000)
+					/* FBD channel Fatal Error */
 #define	GE_NERR_FSB2_NF		0x00000800 /* FSB2 Non-Fatal Error */
 #define	GE_NERR_FSB3_NF		0x00000400 /* FSB3 Non-Fatal Error */
-#define	GE_NERR_FBD_NF		0x00000100 /* FBD channel Non-Fatal Error */
-
+#define	GE_NERR_FBD_NF		(nb_chipset == INTEL_NB_5100 ? 0 : 0x00000100)
+					/* FBD channel Non-Fatal Error */
+#define	GE_NERR_MEM_NF		(nb_chipset == INTEL_NB_5100 ? 0x00000100 : 0)
+					/* DDR channel0,1 Non-Fatal Error */
 #define	ERR_FAT_FSB_F9		0x20	/* F9Msk FSB Protocol */
 #define	ERR_FAT_FSB_F2		0x08	/* F2Msk Unsupported Bus Transaction */
 #define	ERR_FAT_FSB_F1		0x01 	/* F1Msk Request/Address Parity */
@@ -287,6 +305,47 @@
 	EMASK_FBD_M11|EMASK_FBD_M10|EMASK_FBD_M9|EMASK_FBD_M8|EMASK_FBD_M7| \
 	EMASK_FBD_M6|EMASK_FBD_M5|EMASK_FBD_M4)
 
+/* FERR_NF_MEM: MC First non-fatal errors */
+#define	ERR_MEM_CH_SHIFT	28	/* channel index in nf_mem */
+
+#define	ERR_NF_MEM_M21	0x00200000	/* M21Err Spare Copy Completed */
+#define	ERR_NF_MEM_M20	0x00100000	/* M20Err Spare Copy Initiated */
+#define	ERR_NF_MEM_M18	0x00040000	/* M18Err SPD protocal */
+#define	ERR_NF_MEM_M16	0x00010000	/* M16Err Correctable Patrol Data ECC */
+#define	ERR_NF_MEM_M15	0x00008000	/* M15Err Correctable Spare-copy ECC */
+#define	ERR_NF_MEM_M14	0x00004000	/* M14Err Correctable demand data ECC */
+#define	ERR_NF_MEM_M12	0x00001000	/* M12Err non-aliased ue Patrol ECC */
+#define	ERR_NF_MEM_M11	0x00000800	/* M11Err non-aliased ue  Spare-copy */
+#define	ERR_NF_MEM_M10	0x00000400	/* M10Err non-aliased ue demand data */
+#define	ERR_NF_MEM_M6	0x00000040	/* M6Err aliased ue Patrol Data ECC */
+#define	ERR_NF_MEM_M5	0x00000020	/* M5Err aliased ue Spare-copy ECC */
+#define	ERR_NF_MEM_M4	0x00000010	/* M4Err aliased ue demand data ECC */
+#define	ERR_NF_MEM_M1	0x00000002	/* M1Err ue data ECC on replay */
+
+#define	ERR_NF_MEM_MASK 0x0003fffff
+#define	ERR_NF_MEM_ECC_UE	(ERR_NF_MEM_M12|ERR_NF_MEM_M11|ERR_NF_MEM_M10| \
+    ERR_NF_MEM_M6|ERR_NF_MEM_M5|ERR_NF_MEM_M4|ERR_NF_MEM_M1)
+#define	ERR_NF_MEM_ECC_CE	(ERR_NF_MEM_M16|ERR_NF_MEM_M15|ERR_NF_MEM_M14)
+#define	ERR_NF_MEM_SPARE	(ERR_NF_MEM_M21|ERR_NF_MEM_M20)
+
+#define	EMASK_MEM_M21	ERR_NF_MEM_M21
+#define	EMASK_MEM_M20	ERR_NF_MEM_M20
+#define	EMASK_MEM_M18	ERR_NF_MEM_M18
+#define	EMASK_MEM_M16	ERR_NF_MEM_M16
+#define	EMASK_MEM_M15	ERR_NF_MEM_M15
+#define	EMASK_MEM_M14	ERR_NF_MEM_M14
+#define	EMASK_MEM_M12	ERR_NF_MEM_M12
+#define	EMASK_MEM_M11	ERR_NF_MEM_M11
+#define	EMASK_MEM_M10	ERR_NF_MEM_M10
+#define	EMASK_MEM_M6	ERR_NF_MEM_M6
+#define	EMASK_MEM_M5	ERR_NF_MEM_M5
+#define	EMASK_MEM_M4	ERR_NF_MEM_M4
+#define	EMASK_MEM_M1	ERR_NF_MEM_M1
+
+#define	EMASK_MEM_NF (EMASK_FBD_M21|EMASK_FBD_M20|EMASK_FBD_M18|EMASK_FBD_M16| \
+	EMASK_FBD_M15|EMASK_FBD_M14|EMASK_FBD_M12|EMASK_FBD_M11|EMASK_FBD_M10| \
+	EMASK_MEM_M6|EMASK_MEM_M5|EMASK_MEM_M4|EMASK_MEM_M1)
+
 #define	ERR_INT_ALL	(nb_chipset == INTEL_NB_5400 ? 0xffffffff : 0xff)
 
 #define	ERR_FAT_INT_B14	0x0400	/* B14Msk SF Scrub DBE */
@@ -354,10 +413,14 @@
 	EMASK_INT_B1)
 #define	EMASK_INT_NF	(EMASK_INT_B8|EMASK_INT_B6|EMASK_INT_B5)
 #define	GE_FBD_FATAL ((nb_chipset == INTEL_NB_5400) ? GE_FERR_FBD_FATAL : \
+	(nb_chipset == INTEL_NB_5100) ? 0 : \
 	(GE_FERR_FBD0_FATAL|GE_FERR_FBD1_FATAL|GE_FERR_FBD2_FATAL| \
 	GE_FERR_FBD3_FATAL))
 #define	GE_FBD_NF ((nb_chipset == INTEL_NB_5400) ? GE_FERR_FBD_NF : \
+	(nb_chipset == INTEL_NB_5100) ? 0 : \
 	(GE_FERR_FBD0_NF|GE_FERR_FBD1_NF|GE_FERR_FBD2_NF|GE_FERR_FBD3_NF))
+#define	GE_MEM_NF	((nb_chipset == INTEL_NB_5100) ? \
+	(GE_FERR_MEM0_NF|GE_FERR_MEM1_NF) : 0)
 
 #define	EMASK_UNCOR_PEX_IO18	0x00200000	/* ESI Reset timeout */
 #define	EMASK_UNCOR_PEX_IO2	0x00100000	/* Received an unsupported */
@@ -807,11 +870,16 @@
 					nb_pci_putb(0, 16, 2, 0xd3, val); \
 				}
 
-#define	NRECINT_RD()		nb_pci_getl(0, 16, 2, 0xc4, 0)
-#define	RECINT_RD()		nb_pci_getl(0, 16, 2, 0xc8, 0)
+#define	NRECINT_RD()		nb_pci_getl(0, 16, 2, \
+	nb_chipset == INTEL_NB_5400 ? 0xc8 : 0xc4, 0)
+#define	RECINT_RD()		nb_pci_getl(0, 16, 2, \
+	nb_chipset == INTEL_NB_5400 ? 0xcc : 0xc8, 0)
 
-#define	NRECINT_WR()		nb_pci_putl(0, 16, 2, 0xc4, 0)
-#define	RECINT_WR()		nb_pci_putl(0, 16, 2, 0xc8, 0)
+#define	NRECINT_WR()		nb_pci_putl(0, 16, 2, \
+	nb_chipset == INTEL_NB_5400 ? 0xc8 : 0xc4, 0)
+#define	RECINT_WR()		nb_pci_putl(0, 16, 2, \
+	nb_chipset == INTEL_NB_5400 ? 0xcc : 0xc8, 0)
+
 
 #define	FERR_FAT_FBD_RD(ip)	nb_pci_getl(0, 16, 1, 0x98, ip)
 #define	NERR_FAT_FBD_RD(ip)	nb_pci_getl(0, 16, 1, 0x9c, ip)
@@ -1008,12 +1076,69 @@
 	else if (nb_chipset == INTEL_NB_7300) \
 		nb_pci_putw(0, 16, 1, 0xf8, 0); \
 
+#define	FERR_NF_MEM_RD(ip)	nb_pci_getl(0, 16, 1, 0xa0, ip)
+#define	NERR_NF_MEM_RD(ip)	nb_pci_getl(0, 16, 1, 0xa4, ip)
+#define	EMASK_MEM_RD()		nb_pci_getl(0, 16, 1, 0xa8, 0)
+#define	ERR0_MEM_RD()		nb_pci_getl(0, 16, 1, 0xac, 0)
+#define	ERR1_MEM_RD()		nb_pci_getl(0, 16, 1, 0xb0, 0)
+#define	ERR2_MEM_RD()		nb_pci_getl(0, 16, 1, 0xb4, 0)
+#define	MCERR_MEM_RD()		nb_pci_getl(0, 16, 1, 0xb8, 0)
+#define	FERR_NF_MEM_WR(val)	\
+	nb_pci_putl(0, 16, 1, 0xa0, (val))
+#define	NERR_NF_MEM_WR(val)	\
+	nb_pci_putl(0, 16, 1, 0xa4, (val))
+#define	EMASK_MEM_WR(val)	\
+	nb_pci_putl(0, 16, 1, 0xa8, (val))
+#define	ERR0_MEM_WR(val)	\
+	nb_pci_putl(0, 16, 1, 0xac, (val))
+#define	ERR1_MEM_WR(val)	\
+	nb_pci_putl(0, 16, 1, 0xb0, (val))
+#define	ERR2_MEM_WR(val)	\
+	nb_pci_putl(0, 16, 1, 0xb4, (val))
+#define	MCERR_MEM_WR(val)	\
+	nb_pci_putl(0, 16, 1, 0xb8, (val))
+#define	VALIDLOG_RD(branch)	\
+	nb_pci_getl(0, (branch) ? 22 : 21, 0, 0x18c, 0)
+#define	MEM_NRECMEMA_RD(branch) \
+	nb_pci_getl(0, (branch) ? 22 : 21, 0, 0x190, 0)
+#define	MEM_NRECMEMB_RD(branch) \
+	nb_pci_getl(0, (branch) ? 22 : 21, 0, 0x194, 0)
+#define	MEM_REDMEMA_RD(branch) \
+	nb_pci_getl(0, (branch) ? 22 : 21, 0, 0x198, 0)
+#define	MEM_REDMEMB_RD(branch) \
+	nb_pci_getl(0, (branch) ? 22 : 21, 0, 0x19c, 0)
+#define	MEM_RECMEMA_RD(branch) \
+	nb_pci_getl(0, (branch) ? 22 : 21, 0, 0x1a0, 0)
+#define	MEM_RECMEMB_RD(branch) \
+	nb_pci_getl(0, (branch) ? 22 : 21, 0, 0x1a4, 0)
+#define	MEM_CERRCNT_RD(branch) nb_pci_getl(0, 21, 0, 0x180, 0)
+#define	MEM_CERRCNT_EXT_RD(branch) nb_pci_getw(0, 21, 0, 0x184, 0)
+#define	MEM_NRECMEMA_WR(branch) \
+	nb_pci_putl(0, (branch) ? 22 : 21, 0, 0x190, 0)
+#define	MEM_NRECMEMB_WR(branch) \
+	nb_pci_putl(0, (branch) ? 22 : 21, 0, 0x194, 0)
+#define	MEM_REDMEMA_WR(branch) \
+	nb_pci_putl(0, (branch) ? 22 : 21, 0, 0x198, 0)
+#define	MEM_REDMEMB_WR(branch) \
+	nb_pci_putl(0, (branch) ? 22 : 21, 0, 0x19c, 0)
+#define	MEM_RECMEMA_WR(branch) \
+	nb_pci_putl(0, (branch) ? 22 : 21, 0, 0x1a0, 0)
+#define	MEM_RECMEMB_WR(branch) \
+	nb_pci_putl(0, (branch) ? 22 : 21, 0, 0x1a4, 0)
+
 #define	MC_RD()		nb_pci_getl(0, 16, 1, 0x40, 0)
 #define	MC_WR(val)	nb_pci_putl(0, 16, 1, 0x40, val)
 #define	MCA_RD()	nb_pci_getl(0, 16, 1, 0x58, 0)
 #define	TOLM_RD()	nb_pci_getw(0, 16, 1, 0x6c, 0)
 
-#define	MTR_RD(branch, dimm) (nb_chipset == INTEL_NB_5400 ? \
+#define	MTR_5100_RD(channel, rank) ((rank) < 4 ? \
+	nb_pci_getw(0, (channel) == 0 ? 21 : 22, 0, 0x154 + ((rank) * 2), 0) : \
+	nb_pci_getw(0, (channel) == 0 ? 21 : 22, 0, 0x1b0 + (((rank) & 3) * 2),\
+	0))
+
+#define	MTR_RD(branch, dimm) (nb_chipset == INTEL_NB_5100 ? \
+	MTR_5100_RD(branch, dimm) : \
+	nb_chipset == INTEL_NB_5400 ? \
 	nb_pci_getw(0, (branch) == 0 ? 21 : 22, 0, 0x80 + dimm * 2, 0) : \
 	((branch) == 0) ? \
 	nb_pci_getw(0, 21, 0, \
@@ -1024,6 +1149,8 @@
 #define	MIR_RD(reg)	nb_pci_getw(0, 16, 1, 0x80 + ((reg)*4), 0)
 
 #define	DMIR_RD(branch, reg) \
+	nb_chipset == INTEL_NB_5100 ? \
+	nb_pci_getl(0, ((branch) == 0) ? 21 : 22, 0, 0x15c + ((reg)*4), 0) : \
 	((branch) == 0) ? nb_pci_getl(0, 21, 0, 0x90 + ((reg)*4), 0) : \
 	(nb_number_memory_controllers == 2) ? \
 	nb_pci_getl(0, 22, 0, 0x90 + ((reg)*4), 0) : 0
@@ -1131,7 +1258,9 @@
 	(nb_number_memory_controllers == 2) ? \
 	nb_pci_putl(0, 22, 0, 0xb4, val) : 0
 
-#define	SPD_RD(branch, channel)	((branch) == 0) ? \
+#define	SPD_RD(branch, channel) \
+	nb_chipset == INTEL_NB_5100 ? nb_pci_getw(0, 16, 1, 0x48, 0) : \
+	((branch) == 0) ? \
 	nb_pci_getw(0, 21, 0, 0x74 + ((channel) * 2), 0) : \
 	(nb_number_memory_controllers == 2) ? \
 	nb_pci_getw(0, 22, 0, 0x74 + ((channel) * 2), 0) : 0
@@ -1142,7 +1271,9 @@
 
 #define	SPDCMD1_1_WR(val)	nb_pci_putl(0, 21, 0, 0x7c, val)
 #define	SPDCMD_WR(branch, channel, val)	\
-	if ((branch) == 0) \
+	if (nb_chipset == INTEL_NB_5100) \
+	nb_pci_putl(0, 16, 1, 0x4c, val); \
+	else if ((branch) == 0) \
 	nb_pci_putl(0, 21, 0, 0x78 + ((channel) * 4), val); \
 	else if (nb_number_memory_controllers == 2) \
 	nb_pci_putl(0, 22, 0, 0x78 + ((channel) * 4), val)
@@ -1211,7 +1342,8 @@
 
 #define	DMIR_RANKS(dmir, rank0, rank1, rank2, rank3) \
 	if (nb_chipset == INTEL_NB_5000P || nb_chipset == INTEL_NB_5000X || \
-	    nb_chipset == INTEL_NB_5000V || nb_chipset == INTEL_NB_5000Z) { \
+	    nb_chipset == INTEL_NB_5000V || nb_chipset == INTEL_NB_5000Z || \
+	    nb_chipset == INTEL_NB_5100) { \
 		rank0 = (dmir) & 3; \
 		rank1 = ((dmir) >> 3) & 3; \
 		rank2 = ((dmir) >> 6) & 3; \
@@ -1265,6 +1397,11 @@
 				/* throttling disabled */
 #define	EMASK_THR_F1	0x0001	/* catastrophic on-die thermal event */
 
+/* dimm type */
+#define	SPD_MEM_TYPE	2
+#define	SPD_DDR2	8
+#define	SPD_FBDIMM	9
+
 #ifdef __cplusplus
 }
 #endif
--- a/usr/src/uts/intel/io/intel_nb5000/nb5000_init.c	Mon Jul 06 09:30:27 2009 -0400
+++ b/usr/src/uts/intel/io/intel_nb5000/nb5000_init.c	Tue Jul 07 10:00:27 2009 -0400
@@ -52,6 +52,7 @@
 
 int nb_5000_memory_controller = 0;
 int nb_number_memory_controllers = NB_5000_MAX_MEM_CONTROLLERS;
+int nb_channels_per_branch = NB_MAX_CHANNELS_PER_BRANCH;
 int nb_dimms_per_channel = 0;
 
 nb_dimm_t **nb_dimms;
@@ -82,6 +83,12 @@
 static uint32_t nb_mcerr_fbd;
 static uint32_t nb_emask_fbd;
 
+static uint32_t nb_err0_mem;
+static uint32_t nb_err1_mem;
+static uint32_t nb_err2_mem;
+static uint32_t nb_mcerr_mem;
+static uint32_t nb_emask_mem;
+
 static uint16_t nb_err0_fsb;
 static uint16_t nb_err1_fsb;
 static uint16_t nb_err2_fsb;
@@ -102,6 +109,7 @@
 
 static uint32_t l_mcerr_int;
 static uint32_t l_mcerr_fbd;
+static uint32_t l_mcerr_mem;
 static uint16_t l_mcerr_fsb;
 static uint16_t l_mcerr_thr;
 
@@ -113,6 +121,9 @@
 uint_t nb5400_mask_poll_fbd = EMASK_5400_FBD_NF;
 uint_t nb5400_mask_bios_fbd = EMASK_5400_FBD_FATAL;
 
+int nb5100_reset_emask_mem = 1;
+uint_t nb5100_mask_poll_mem = EMASK_MEM_NF;
+
 uint_t nb5000_emask_fsb = 0;
 int nb5000_reset_emask_fsb = 1;
 uint_t nb5000_mask_poll_fsb = EMASK_FSB_NF;
@@ -143,6 +154,7 @@
 } find_dimm_label_t;
 
 static void x8450_dimm_label(int, char *, int);
+static void cp3250_dimm_label(int, char *, int);
 
 static struct platform_label {
 	const char *sys_vendor;		/* SMB_TYPE_SYSTEM vendor prefix */
@@ -152,6 +164,7 @@
 } platform_label[] = {
 	{ "SUN MICROSYSTEMS", "SUN BLADE X8450 SERVER MODULE",
 	    x8450_dimm_label, 8 },
+	{ "MiTAC,Shunde", "CP3250", cp3250_dimm_label, 0 },
 	{ NULL, NULL, NULL, 0 }
 };
 
@@ -225,7 +238,7 @@
 nb_fini()
 {
 	int i, j;
-	int nchannels = nb_number_memory_controllers * 2;
+	int nchannels = nb_number_memory_controllers * nb_channels_per_branch;
 	nb_dimm_t **dimmpp;
 	nb_dimm_t *dimmp;
 
@@ -264,22 +277,12 @@
 		cmi_mc_sw_memscrub_disable();
 }
 
-static nb_dimm_t *
-nb_dimm_init(int channel, int dimm, uint16_t mtr)
+static void
+fbd_eeprom(int channel, int dimm, nb_dimm_t *dp)
 {
-	nb_dimm_t *dp;
 	int i, t;
 	int spd_sz;
 
-	if (MTR_PRESENT(mtr) == 0)
-		return (NULL);
-	t = read_spd_eeprom(channel, dimm, 2) & 0xf;
-
-	if (t != 9)
-		return (NULL);
-
-	dp = kmem_zalloc(sizeof (nb_dimm_t), KM_SLEEP);
-
 	t = read_spd_eeprom(channel, dimm, 0) & 0xf;
 	if (t == 1)
 		spd_sz = 128;
@@ -308,7 +311,126 @@
 			    read_spd_eeprom(channel, dimm, 146 + i);
 		}
 	}
+}
+
+/* read the manR of the DDR2 dimm */
+static void
+ddr2_eeprom(int channel, int dimm, nb_dimm_t *dp)
+{
+	int i, t;
+	int slave;
+
+	slave = channel & 0x1 ? dimm + 4 : dimm;
+
+	/* byte[3]: number of row addresses */
+	dp->nrow = read_spd_eeprom(channel, slave, 3) & 0x1f;
+
+	/* byte[4]: number of column addresses */
+	dp->ncolumn = read_spd_eeprom(channel, slave, 4) & 0xf;
+
+	/* byte[5]: numranks; 0 means one rank */
+	dp->nranks = (read_spd_eeprom(channel, slave, 5) & 0x3) + 1;
+
+	/* byte[6]: data width */
+	dp->width = (read_spd_eeprom(channel, slave, 6) >> 5) << 2;
+
+	/* byte[17]: number of banks */
+	dp->nbanks = read_spd_eeprom(channel, slave, 17);
+
+	dp->dimm_size = DIMMSIZE(dp->nrow, dp->ncolumn, dp->nranks, dp->nbanks,
+	    dp->width);
+
+	/* manufacture-id - byte[64-65] */
+	dp->manufacture_id = read_spd_eeprom(channel, slave, 64) |
+	    (read_spd_eeprom(channel, dimm, 65) << 8);
+
+	/* location - byte[72] */
+	dp->manufacture_location = read_spd_eeprom(channel, slave, 72);
+
+	/* serial number - byte[95-98] */
+	dp->serial_number =
+	    (read_spd_eeprom(channel, slave, 98) << 24) |
+	    (read_spd_eeprom(channel, slave, 97) << 16) |
+	    (read_spd_eeprom(channel, slave, 96) << 8) |
+	    read_spd_eeprom(channel, slave, 95);
+
+	/* week - byte[94] */
+	t = read_spd_eeprom(channel, slave, 94);
+	dp->manufacture_week = (t >> 4) * 10 + (t & 0xf);
+	/* week - byte[93] */
+	t = read_spd_eeprom(channel, slave, 93);
+	dp->manufacture_year = (t >> 4) * 10 + (t & 0xf) + 2000;
+
+	/* part number - byte[73-81] */
+	for (i = 0; i < 8; i++) {
+		dp->part_number[i] = read_spd_eeprom(channel, slave, 73 + i);
+	}
+
+	/* revision - byte[91-92] */
+	for (i = 0; i < 2; i++) {
+		dp->revision[i] = read_spd_eeprom(channel, slave, 91 + i);
+	}
+}
+
+static boolean_t
+nb_dimm_present(int channel, int dimm)
+{
+	boolean_t rc = B_FALSE;
+
+	if (nb_chipset == INTEL_NB_5100) {
+		int t, slave;
+		slave = channel & 0x1 ? dimm + 4 : dimm;
+		/* read the type field from the dimm and check for DDR2 type */
+		if ((t = read_spd_eeprom(channel, slave, SPD_MEM_TYPE)) == -1)
+			return (B_FALSE);
+		rc = (t & 0xf) == SPD_DDR2;
+	} else {
+		rc = MTR_PRESENT(MTR_RD(channel, dimm)) != 0;
+	}
+
+	return (rc);
+}
+
+static nb_dimm_t *
+nb_ddr2_dimm_init(int channel, int dimm, int start_rank)
+{
+	nb_dimm_t *dp;
+
+	if (nb_dimm_present(channel, dimm) == B_FALSE)
+		return (NULL);
+
+	dp = kmem_zalloc(sizeof (nb_dimm_t), KM_SLEEP);
+
+	ddr2_eeprom(channel, dimm, dp);
+
+	/* The 1st rank of the dimm takes on this value */
+	dp->start_rank = (uint8_t)start_rank;
+
+	dp->mtr_present = 1;
+
+	return (dp);
+}
+
+static nb_dimm_t *
+nb_fbd_dimm_init(int channel, int dimm, uint16_t mtr)
+{
+	nb_dimm_t *dp;
+	int t;
+
+	if (MTR_PRESENT(mtr) == 0)
+		return (NULL);
+	t = read_spd_eeprom(channel, dimm, SPD_MEM_TYPE) & 0xf;
+
+	/* check for the dimm type */
+	if (t != SPD_FBDIMM)
+		return (NULL);
+
+	dp = kmem_zalloc(sizeof (nb_dimm_t), KM_SLEEP);
+
+	fbd_eeprom(channel, dimm, dp);
+
 	dp->mtr_present = MTR_PRESENT(mtr);
+	dp->start_rank = dimm << 1;
 	dp->nranks = MTR_NUMRANK(mtr);
 	dp->nbanks = MTR_NUMBANK(mtr);
 	dp->ncolumn = MTR_NUMCOL(mtr);
@@ -429,9 +551,6 @@
 			nb_ranks[i][j].hole_base = hole_base;
 			nb_ranks[i][j].hole_size = hole_size;
 			if (limit > base) {
-				dimm_add_rank(i, rank0, branch_interleave, 0,
-				    base, hole_base, hole_size, interleave,
-				    limit);
 				if (rank0 != rank1) {
 					dimm_add_rank(i, rank1,
 					    branch_interleave, 1, base,
@@ -480,7 +599,6 @@
 	smbios_system_t sy;
 	id_t id;
 	int i, j;
-	uint16_t mtr;
 	find_dimm_label_t *rt = NULL;
 
 	if (ksmbios != NULL && nb_no_smbios == 0) {
@@ -509,8 +627,7 @@
 		for (i = 0; i < nb_number_memory_controllers; i++) {
 			for (j = nb_dimms_per_channel;
 			    j < NB_MAX_DIMMS_PER_CHANNEL; j++) {
-				mtr = MTR_RD(i, j);
-				if (MTR_PRESENT(mtr))
+				if (nb_dimm_present(i, j))
 					nb_dimms_per_channel = j + 1;
 			}
 		}
@@ -615,8 +732,114 @@
 	(void) snprintf(label, label_sz, "D%d", (dimm * 4) + channel);
 }
 
+/*
+ * CP3250 DIMM labels
+ * Channel   Dimm   Label
+ *       0      0      A0
+ *       1      0      B0
+ *       0      1      A1
+ *       1      1      B1
+ *       0      2      A2
+ *       1      2      B2
+ */
 static void
-nb_dimms_init(find_dimm_label_t *label_function)
+cp3250_dimm_label(int dimm, char *label, int label_sz)
+{
+	int channel = dimm / nb_dimms_per_channel;
+
+	dimm = dimm % nb_dimms_per_channel;
+	(void) snprintf(label, label_sz, "%c%d", channel == 0 ? 'A' : 'B',
+	    dimm);
+}
+
+/*
+ * Map the rank id to dimm id of a channel
+ * For the 5100 chipset, walk through the dimm list of channel the check if
+ * the given rank id is within the rank range assigned to the dimm.
+ * For other chipsets, the dimm is rank/2.
+ */
+int
+nb_rank2dimm(int channel, int rank)
+{
+	int i;
+	nb_dimm_t **dimmpp = nb_dimms;
+
+	if (nb_chipset != INTEL_NB_5100)
+		return (rank >> 1);
+
+	dimmpp += channel * nb_dimms_per_channel;
+	for (i = 0; i < nb_dimms_per_channel; i++) {
+		if ((rank >= dimmpp[i]->start_rank) &&
+		    (rank < dimmpp[i]->start_rank + dimmpp[i]->nranks)) {
+			return (i);
+		}
+	}
+	return (-1);
+}
+
+static void
+nb_ddr2_dimms_init(find_dimm_label_t *label_function)
+{
+	int i, j;
+	int start_rank;
+	uint32_t spcpc;
+	uint8_t spcps;
+	nb_dimm_t **dimmpp;
+
+	nb_dimm_slots = nb_number_memory_controllers * nb_channels_per_branch *
+	    nb_dimms_per_channel;
+	nb_dimms = (nb_dimm_t **)kmem_zalloc(sizeof (nb_dimm_t *) *
+	    nb_dimm_slots, KM_SLEEP);
+	dimmpp = nb_dimms;
+	nb_mode = NB_MEMORY_NORMAL;
+	for (i = 0; i < nb_number_memory_controllers; i++) {
+		if (nb_mode == NB_MEMORY_NORMAL) {
+			spcpc = SPCPC_RD(i);
+			spcps = SPCPS_RD(i);
+			if ((spcpc & SPCPC_SPARE_ENABLE) != 0 &&
+			    (spcps & SPCPS_SPARE_DEPLOYED) != 0)
+				nb_mode = NB_MEMORY_SPARE_RANK;
+			spare_rank[i] = SPCPC_SPRANK(spcpc);
+		}
+
+		/* The 1st dimm of a channel starts at rank 0 */
+		start_rank = 0;
+
+		for (j = 0; j < nb_dimms_per_channel; j++) {
+			dimmpp[j] = nb_ddr2_dimm_init(i, j, start_rank);
+			if (dimmpp[j]) {
+				nb_ndimm ++;
+				dimm_add_geometry(i, j, dimmpp[j]->nbanks,
+				    dimmpp[j]->width, dimmpp[j]->ncolumn,
+				    dimmpp[j]->nrow);
+				if (label_function) {
+					label_function->label_function(
+					    (i * nb_dimms_per_channel) + j,
+					    dimmpp[j]->label,
+					    sizeof (dimmpp[j]->label));
+				}
+				start_rank += dimmpp[j]->nranks;
+				/*
+				 * add an extra rank because
+				 * single-ranked dimm still takes on two ranks.
+				 */
+				if (dimmpp[j]->nranks & 0x1)
+					start_rank++;
+				}
+		}
+		dimmpp += nb_dimms_per_channel;
+	}
+
+	/*
+	 * single channel is supported.
+	 */
+	if (nb_ndimm > 0 && nb_ndimm <= nb_dimms_per_channel) {
+		nb_mode = NB_MEMORY_SINGLE_CHANNEL;
+	}
+}
+
+static void
+nb_fbd_dimms_init(find_dimm_label_t *label_function)
 {
 	int i, j, k, l;
 	uint16_t mtr;
@@ -649,7 +872,7 @@
 		for (j = 0; j < nb_dimms_per_channel; j++) {
 			mtr = MTR_RD(i, j);
 			k = i * 2;
-			dimmpp[j] = nb_dimm_init(k, j, mtr);
+			dimmpp[j] = nb_fbd_dimm_init(k, j, mtr);
 			if (dimmpp[j]) {
 				nb_ndimm ++;
 				dimm_add_geometry(i, j, dimmpp[j]->nbanks,
@@ -663,7 +886,7 @@
 				}
 			}
 			dimmpp[j + nb_dimms_per_channel] =
-			    nb_dimm_init(k + 1, j, mtr);
+			    nb_fbd_dimm_init(k + 1, j, mtr);
 			l = j + nb_dimms_per_channel;
 			if (dimmpp[l]) {
 				if (label_function) {
@@ -677,11 +900,20 @@
 		}
 		dimmpp += nb_dimms_per_channel * 2;
 	}
+}
+
+static void
+nb_dimms_init(find_dimm_label_t *label_function)
+{
+	if (nb_chipset == INTEL_NB_5100)
+		nb_ddr2_dimms_init(label_function);
+	else
+		nb_fbd_dimms_init(label_function);
+
 	if (label_function == NULL)
 		nb_smbios();
 }
 
-
 /* Setup the ESI port registers to enable SERR for southbridge */
 static void
 nb_pex_init()
@@ -822,7 +1054,7 @@
 	}
 }
 
-void
+static void
 nb_fbd_init()
 {
 	uint32_t err0_fbd;
@@ -897,7 +1129,7 @@
 	}
 }
 
-void
+static void
 nb_fbd_fini()
 {
 	ERR0_FBD_WR(0xffffffff);
@@ -914,6 +1146,80 @@
 }
 
 static void
+nb_mem_init()
+{
+	uint32_t err0_mem;
+	uint32_t err1_mem;
+	uint32_t err2_mem;
+	uint32_t mcerr_mem;
+	uint32_t emask_mem;
+	uint32_t emask_poll_mem;
+
+	err0_mem = ERR0_MEM_RD();
+	err1_mem = ERR1_MEM_RD();
+	err2_mem = ERR2_MEM_RD();
+	mcerr_mem = MCERR_MEM_RD();
+	emask_mem = EMASK_MEM_RD();
+
+	nb_err0_mem = err0_mem;
+	nb_err1_mem = err1_mem;
+	nb_err2_mem = err2_mem;
+	nb_mcerr_mem = mcerr_mem;
+	nb_emask_mem = emask_mem;
+
+	ERR0_MEM_WR(0xffffffff);
+	ERR1_MEM_WR(0xffffffff);
+	ERR2_MEM_WR(0xffffffff);
+	MCERR_MEM_WR(0xffffffff);
+	EMASK_MEM_WR(0xffffffff);
+
+	emask_poll_mem = nb5100_mask_poll_mem;
+	mcerr_mem |= emask_poll_mem;
+	err0_mem |= emask_poll_mem;
+	err1_mem |= emask_poll_mem;
+	err2_mem |= emask_poll_mem;
+
+	l_mcerr_mem = mcerr_mem;
+	ERR0_MEM_WR(err0_mem);
+	ERR1_MEM_WR(err1_mem);
+	ERR2_MEM_WR(err2_mem);
+	MCERR_MEM_WR(mcerr_mem);
+	if (nb5100_reset_emask_mem) {
+		EMASK_MEM_WR(~nb5100_mask_poll_mem);
+	} else {
+		EMASK_MEM_WR(nb_emask_mem);
+	}
+}
+
+void
+nb_mem_mask_mc(uint32_t mc_mask_mem)
+{
+	uint32_t emask_mem;
+
+	emask_mem = MCERR_MEM_RD();
+	if ((emask_mem & mc_mask_mem) != mc_mask_mem) {
+		MCERR_MEM_WR(emask_mem|mc_mask_mem);
+		nb_mask_mc_set = 1;
+	}
+}
+
+static void
+nb_mem_fini()
+{
+	ERR0_MEM_WR(0xffffffff);
+	ERR1_MEM_WR(0xffffffff);
+	ERR2_MEM_WR(0xffffffff);
+	MCERR_MEM_WR(0xffffffff);
+	EMASK_MEM_WR(0xffffffff);
+
+	ERR0_MEM_WR(nb_err0_mem);
+	ERR1_MEM_WR(nb_err1_mem);
+	ERR2_MEM_WR(nb_err2_mem);
+	MCERR_MEM_WR(nb_mcerr_mem);
+	EMASK_MEM_WR(nb_emask_mem);
+}
+
+static void
 nb_fsb_init()
 {
 	uint16_t err0_fsb;
@@ -1152,7 +1458,10 @@
 void
 nb_mask_mc_reset()
 {
-	MCERR_FBD_WR(l_mcerr_fbd);
+	if (nb_chipset == INTEL_NB_5100)
+		MCERR_MEM_WR(l_mcerr_mem);
+	else
+		MCERR_FBD_WR(l_mcerr_fbd);
 	MCERR_INT_WR(l_mcerr_int);
 	MCERR_FSB_WR(0, l_mcerr_fsb);
 	MCERR_FSB_WR(1, l_mcerr_fsb);
@@ -1184,7 +1493,10 @@
 	nb_dimms_init(label_function_p);
 	nb_mc_init();
 	nb_pex_init();
-	nb_fbd_init();
+	if (nb_chipset == INTEL_NB_5100)
+		nb_mem_init();
+	else
+		nb_fbd_init();
 	nb_fsb_init();
 	nb_scrubber_enable();
 	return (0);
@@ -1212,6 +1524,9 @@
 	case INTEL_NB_5000Z:
 		nb_number_memory_controllers = 1;
 		break;
+	case INTEL_NB_5100:
+		nb_channels_per_branch = 1;
+		break;
 	case INTEL_NB_5400:
 	case INTEL_NB_5400A:
 	case INTEL_NB_5400B:
@@ -1245,7 +1560,10 @@
 	nb_pex_init();
 	nb_int_init();
 	nb_thr_init();
-	nb_fbd_init();
+	if (nb_chipset == INTEL_NB_5100)
+		nb_mem_init();
+	else
+		nb_fbd_init();
 	nb_fsb_init();
 	nb_scrubber_enable();
 
@@ -1271,7 +1589,10 @@
 	mutex_destroy(&nb_mutex);
 	nb_int_fini();
 	nb_thr_fini();
-	nb_fbd_fini();
+	if (nb_chipset == INTEL_NB_5100)
+		nb_mem_fini();
+	else
+		nb_fbd_fini();
 	nb_fsb_fini();
 	nb_pex_fini();
 	nb_fini();
--- a/usr/src/uts/intel/io/intel_nb5000/nb_log.h	Mon Jul 06 09:30:27 2009 -0400
+++ b/usr/src/uts/intel/io/intel_nb5000/nb_log.h	Tue Jul 07 10:00:27 2009 -0400
@@ -20,7 +20,7 @@
  */
 
 /*
- * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
  * Use is subject to license terms.
  */
 
@@ -130,6 +130,31 @@
 	uint32_t badcnt;	/* bad dram counter */
 } nb_nf_fbd_t;
 
+typedef struct nb_nf_mem {
+				/* Memory registers */
+	uint32_t ferr_nf_mem;	/* MC first non-fatal error */
+	uint32_t nerr_nf_mem;	/* MC next non-fatal error */
+	uint32_t nrecmema;	/* non-recoverable memory error log A */
+	uint32_t nrecmemb;	/* non-recoverable memory error log B */
+	uint32_t redmema;	/* recoverable memory data error log A */
+	uint32_t redmemb;	/* recoverable memory data error log B */
+	uint32_t recmema;	/* recoverable memory error log A */
+	uint32_t recmemb;	/* recoverable memory error log B */
+
+				/* Spare rank */
+	uint32_t spcpc;		/* spare copy control */
+	uint8_t spcps;		/* spare copy status */
+
+				/* RAS */
+	uint32_t cerrcnt;	/* correctable error count A */
+	uint32_t cerrcnt_ext;	/* correctable error count B */
+	uint32_t cerrcnt_last;	/* correctable error count A */
+	uint32_t cerrcnt_ext_last;	/* correctable error count B */
+	uint32_t badram;	/* bad dram marker */
+	uint32_t badcnt;	/* bad dram counter */
+	uint32_t validlog;	/* valid log markers */
+} nb_nf_mem_t;
+
 typedef struct nb_dma {
 	uint16_t pcists;
 	uint16_t pexdevsts;
@@ -155,6 +180,7 @@
 		nb_int_t int_regs;
 		nb_fat_fbd_t fat_fbd_regs;
 		nb_nf_fbd_t nf_fbd_regs;
+		nb_nf_mem_t nf_mem_regs;
 		nb_dma_t dma_regs;
 		nb_thr_t thr_regs;
 	} nb;
@@ -168,6 +194,7 @@
 #define	NB_REG_LOG_NF_FBD	5
 #define	NB_REG_LOG_DMA		6
 #define	NB_REG_LOG_THR		7
+#define	NB_REG_LOG_NF_MEM	8
 
 typedef struct nb_logout {
 	uint64_t acl_timestamp;
@@ -196,7 +223,8 @@
 typedef struct nb_dimm {
 	uint64_t dimm_size;
 	uint8_t mtr_present;
-	uint8_t nranks;
+	uint8_t start_rank;		/* id of the 1st rank */
+	uint8_t nranks;			/* number of ranks */
 	uint8_t nbanks;
 	uint8_t ncolumn;
 	uint8_t nrow;
@@ -232,6 +260,7 @@
 
 extern int nb_5000_memory_controller;
 extern int nb_number_memory_controllers;
+extern int nb_channels_per_branch;
 extern int nb_dimms_per_channel;
 
 extern nb_dimm_t **nb_dimms;
@@ -248,6 +277,7 @@
 	[NB_MAX_MEM_RANK_SELECT];
 extern uint8_t spare_rank[NB_5000_MAX_MEM_CONTROLLERS];
 extern enum nb_memory_mode nb_mode;
+extern int nb_rank2dimm(int, int);
 
 extern int inb_mc_register(cmi_hdl_t, void *, void *, void *);
 extern void nb_scrubber_enable(void);
@@ -267,6 +297,7 @@
 
 extern void nb_fsb_mask_mc(int, uint16_t);
 extern void nb_fbd_mask_mc(uint32_t);
+extern void nb_mem_mask_mc(uint32_t);
 extern void nb_int_mask_mc(uint32_t);
 extern void nb_thr_mask_mc(uint16_t);
 extern void nb_mask_mc_reset(void);
--- a/usr/src/uts/intel/io/intel_nb5000/nb_pci_cfg.c	Mon Jul 06 09:30:27 2009 -0400
+++ b/usr/src/uts/intel/io/intel_nb5000/nb_pci_cfg.c	Tue Jul 07 10:00:27 2009 -0400
@@ -39,6 +39,8 @@
 
 static ddi_acc_handle_t dev_16_hdl[NB_PCI_NFUNC];
 static ddi_acc_handle_t dev_17_hdl[NB_PCI_NFUNC];
+static ddi_acc_handle_t dev_21_hdl;
+static ddi_acc_handle_t dev_22_hdl;
 static ddi_acc_handle_t dev_pci_hdl[NB_PCI_DEV];
 
 void
@@ -76,6 +78,20 @@
 			    "intel_nb5000: pci_config_setup failed");
 		reg.pci_phys_hi += 1 << PCI_REG_FUNC_SHIFT;
 	}
+	reg.pci_phys_hi = 21 << PCI_REG_DEV_SHIFT; /* Bus=0, Dev=21, Func=0 */
+	if (ddi_prop_update_int_array(DDI_MAJOR_T_UNKNOWN, dip, "reg",
+	    (int *)&reg, sizeof (reg)/sizeof (int)) != DDI_PROP_SUCCESS)
+		cmn_err(CE_WARN,
+		    "nb_pci_cfg_setup: cannot create reg property");
+	if (pci_config_setup(dip, &dev_21_hdl) != DDI_SUCCESS)
+		cmn_err(CE_WARN, "intel_nb5000: pci_config_setup failed");
+	reg.pci_phys_hi = 22 << PCI_REG_DEV_SHIFT; /* Bus=0, Dev=22, Func=0 */
+	if (ddi_prop_update_int_array(DDI_MAJOR_T_UNKNOWN, dip, "reg",
+	    (int *)&reg, sizeof (reg)/sizeof (int)) != DDI_PROP_SUCCESS)
+		cmn_err(CE_WARN,
+		    "nb_pci_cfg_setup: cannot create reg property");
+	if (pci_config_setup(dip, &dev_22_hdl) != DDI_SUCCESS)
+		cmn_err(CE_WARN, "intel_nb5000: pci_config_setup failed");
 	reg.pci_phys_hi = 0;		/* Bus=0, Dev=0, Func=0 */
 	for (i = 0; i < NB_PCI_DEV; i++) {
 		if (ddi_prop_update_int_array(DDI_MAJOR_T_UNKNOWN, dip, "reg",
@@ -102,6 +118,8 @@
 	for (i = 0; i < NB_PCI_NFUNC; i++) {
 		pci_config_teardown(&dev_17_hdl[i]);
 	}
+	pci_config_teardown(&dev_21_hdl);
+	pci_config_teardown(&dev_22_hdl);
 	for (i = 0; i < NB_PCI_DEV; i++)
 		pci_config_teardown(&dev_pci_hdl[i]);
 }
@@ -117,6 +135,10 @@
 		hdl = dev_17_hdl[func];
 	} else if (bus == 0 && dev < NB_PCI_DEV && func == 0) {
 		hdl = dev_pci_hdl[dev];
+	} else if (bus == 0 && dev == 21 && func == 0) {
+		hdl = dev_21_hdl;
+	} else if (bus == 0 && dev == 22 && func == 0) {
+		hdl = dev_22_hdl;
 	} else {
 		hdl = 0;
 	}
--- a/usr/src/uts/intel/os/driver_aliases	Mon Jul 06 09:30:27 2009 -0400
+++ b/usr/src/uts/intel/os/driver_aliases	Tue Jul 07 10:00:27 2009 -0400
@@ -45,6 +45,7 @@
 intel_nb5000 "pci8086,4000"
 intel_nb5000 "pci8086,4001"
 intel_nb5000 "pci8086,4003"
+intel_nb5000 "pci8086,65c0"
 intel_nhm "pci8086,3423"
 xpv "pci5853,1.1"
 amd_iommu "pci1022,11ff"
--- a/usr/src/uts/intel/sys/mc_intel.h	Mon Jul 06 09:30:27 2009 -0400
+++ b/usr/src/uts/intel/sys/mc_intel.h	Tue Jul 07 10:00:27 2009 -0400
@@ -45,6 +45,7 @@
 #define	MCINTEL_NVLIST_DIMMSZ	"memory-dimm-size"
 #define	MCINTEL_NVLIST_NRANKS	"dimm-max-ranks"
 #define	MCINTEL_NVLIST_RANKS	"dimm-ranks"
+#define	MCINTEL_NVLIST_1ST_RANK	"dimm-start-rank"
 #define	MCINTEL_NVLIST_ROWS	"dimm-rows"
 #define	MCINTEL_NVLIST_COL	"dimm-column"
 #define	MCINTEL_NVLIST_BANK	"dimm-banks"
@@ -93,6 +94,7 @@
 #define	FM_EREPORT_PAYLOAD_NAME_RAS			"ras"
 #define	FM_EREPORT_PAYLOAD_NAME_FERR_FAT_FBD		"ferr_fat_fbd"
 #define	FM_EREPORT_PAYLOAD_NAME_NERR_FAT_FBD		"nerr_fat_fbd"
+#define	FM_EREPORT_PAYLOAD_NAME_VALIDLOG		"validlog"
 #define	FM_EREPORT_PAYLOAD_NAME_NRECMEMA		"nrecmema"
 #define	FM_EREPORT_PAYLOAD_NAME_NRECMEMB		"nrecmemb"
 #define	FM_EREPORT_PAYLOAD_NAME_NRECFGLOG		"nrecfglog"
@@ -106,6 +108,7 @@
 #define	FM_EREPORT_PAYLOAD_NAME_SPCPS			"spcps"
 #define	FM_EREPORT_PAYLOAD_NAME_UERRCNT			"uerrcnt"
 #define	FM_EREPORT_PAYLOAD_NAME_UERRCNT_LAST		"uerrcnt_last"
+#define	FM_EREPORT_PAYLOAD_NAME_BADRAM			"badram"
 #define	FM_EREPORT_PAYLOAD_NAME_BADRAMA			"badrama"
 #define	FM_EREPORT_PAYLOAD_NAME_BADRAMB			"badramb"
 #define	FM_EREPORT_PAYLOAD_NAME_BADCNT			"badcnt"
@@ -117,8 +120,12 @@
 #define	FM_EREPORT_PAYLOAD_NAME_DMIR			"dmir"
 #define	FM_EREPORT_PAYLOAD_NAME_FERR_NF_FBD		"ferr_nf_fbd"
 #define	FM_EREPORT_PAYLOAD_NAME_NERR_NF_FBD		"nerr_nf_fbd"
+#define	FM_EREPORT_PAYLOAD_NAME_FERR_NF_MEM		"ferr_nf_mem"
+#define	FM_EREPORT_PAYLOAD_NAME_NERR_NF_MEM		"nerr_nf_mem"
 #define	FM_EREPORT_PAYLOAD_NAME_RECMEMA			"recmema"
 #define	FM_EREPORT_PAYLOAD_NAME_RECMEMB			"recmemb"
+#define	FM_EREPORT_PAYLOAD_NAME_REDMEMA			"redmema"
+#define	FM_EREPORT_PAYLOAD_NAME_REDMEMB			"redmemb"
 #define	FM_EREPORT_PAYLOAD_NAME_RECFGLOG		"recfglog"
 #define	FM_EREPORT_PAYLOAD_NAME_RECFBDA			"recfbda"
 #define	FM_EREPORT_PAYLOAD_NAME_RECFBDB			"recfbdb"
@@ -128,6 +135,8 @@
 #define	FM_EREPORT_PAYLOAD_NAME_RECFBDF			"recfbdf"
 #define	FM_EREPORT_PAYLOAD_NAME_CERRCNT			"cerrcnt"
 #define	FM_EREPORT_PAYLOAD_NAME_CERRCNT_LAST		"cerrcnt_last"
+#define	FM_EREPORT_PAYLOAD_NAME_CERRCNT_EXT		"cerrcnt_ext"
+#define	FM_EREPORT_PAYLOAD_NAME_CERRCNT_EXT_LAST	"cerrcnt_ext_last"
 #define	FM_EREPORT_PAYLOAD_NAME_CERRCNTA		"cerrcnta"
 #define	FM_EREPORT_PAYLOAD_NAME_CERRCNTB		"cerrcntb"
 #define	FM_EREPORT_PAYLOAD_NAME_CERRCNTC		"cerrcntc"
@@ -199,6 +208,7 @@
 #define	INTEL_NB_5000V	0x25d48086
 #define	INTEL_NB_5000X	0x25c08086
 #define	INTEL_NB_5000Z	0x25d08086
+#define	INTEL_NB_5100	0x65c08086
 #define	INTEL_NB_5400	0x40008086
 #define	INTEL_NB_5400A	0x40018086
 #define	INTEL_NB_5400B	0x40038086
--- a/usr/src/uts/sun4/os/mlsetup.c	Mon Jul 06 09:30:27 2009 -0400
+++ b/usr/src/uts/sun4/os/mlsetup.c	Tue Jul 07 10:00:27 2009 -0400
@@ -512,11 +512,11 @@
 	 */
 	(void) strcpy(path, "/platform/");
 	(void) strcat(path, platname);
-	(void) strcat(path, "/kernel ");
+	(void) strcat(path, "/kernel");
 	if (!compat) {
-		(void) strcat(path, "/platform/");
+		(void) strcat(path, " /platform/");
 		(void) strcat(path, defname);
-		(void) strcat(path, "/kernel ");
+		(void) strcat(path, "/kernel");
 	}
 	return;
 
--- a/usr/src/uts/sun4v/io/ds_common.c	Mon Jul 06 09:30:27 2009 -0400
+++ b/usr/src/uts/sun4v/io/ds_common.c	Tue Jul 07 10:00:27 2009 -0400
@@ -168,6 +168,8 @@
 /* service utilities */
 static void ds_reset_svc(ds_svc_t *svc, ds_port_t *port);
 static int ds_svc_register_onport(ds_svc_t *svc, ds_port_t *port);
+static int ds_svc_register_onport_walker(ds_svc_t *svc, void *arg);
+static void ds_set_port_ready(ds_port_t *port, uint16_t major, uint16_t minor);
 
 /* port utilities */
 static void ds_port_reset(ds_port_t *port);
@@ -195,7 +197,6 @@
 static ds_svc_t *ds_find_svc_by_id_port(char *svc_id, int is_client,
     ds_port_t *port);
 static ds_svc_t *ds_svc_clone(ds_svc_t *svc);
-static void ds_portset_del_active_clients(char *service, ds_portset_t *portsp);
 static void ds_check_for_dup_services(ds_svc_t *svc);
 static void ds_delete_svc_entry(ds_svc_t *svc);
 
@@ -691,6 +692,8 @@
 	    __func__, msglen);
 	DS_DUMP_MSG(DS_DBG_FLAG_LDC, msg, msglen);
 
+	(void) ds_log_add_msg(DS_LOG_OUT(port->id), (uint8_t *)msg, msglen);
+
 	/*
 	 * Ensure that no other messages can be sent on this port by holding
 	 * the tx_lock mutex in case the write doesn't get sent with one write.
@@ -714,9 +717,10 @@
 			    (loopcnt++ < ds_retries)) {
 				drv_usecwait(ds_delay);
 			} else {
-				cmn_err(CE_WARN, "ds@%lx: send_msg: ldc_write "
-				    "failed (%d), %d bytes remaining" DS_EOL,
-				    PORTID(port), rv, (int)amt_left);
+				DS_DBG_PRCL(CE_NOTE, "ds@%lx: send_msg: "
+				    "ldc_write failed (%d), %d bytes "
+				    "remaining" DS_EOL, PORTID(port), rv,
+				    (int)amt_left);
 				goto error;
 			}
 		} else {
@@ -805,6 +809,10 @@
 	 */
 	(void) ds_send_msg(port, msg, msglen);
 	DS_FREE(msg, msglen);
+
+	if (match) {
+		ds_set_port_ready(port, req->major_vers, ack->minor_vers);
+	}
 }
 
 static void
@@ -812,6 +820,8 @@
 {
 	ds_init_ack_t	*ack;
 	ds_ver_t	*ver;
+	uint16_t	major;
+	uint16_t	minor;
 	size_t		explen = DS_MSG_LEN(ds_init_ack_t);
 
 	/* sanity check the incoming message */
@@ -826,6 +836,13 @@
 
 	mutex_enter(&port->lock);
 
+	if (port->state == DS_PORT_READY) {
+		DS_DBG_PRCL(CE_NOTE, "ds@%lx: <init_ack: port ready" DS_EOL,
+		    PORTID(port));
+		mutex_exit(&port->lock);
+		return;
+	}
+
 	if (port->state != DS_PORT_INIT_REQ) {
 		DS_DBG_PRCL(CE_NOTE, "ds@%lx: <init_ack: invalid state: %d"
 		    DS_EOL, PORTID(port), port->state);
@@ -834,49 +851,14 @@
 	}
 
 	ver = &(ds_vers[port->ver_idx]);
-
-	/* agreed upon a major version */
-	port->ver.major = ver->major;
-
-	/*
-	 * If the returned minor version is larger than
-	 * the requested minor version, use the lower of
-	 * the two, i.e. the requested version.
-	 */
-	if (ack->minor_vers >= ver->minor) {
-		/*
-		 * Use the minor version specified in the
-		 * original request.
-		 */
-		port->ver.minor = ver->minor;
-	} else {
-		/*
-		 * Use the lower minor version returned in
-		 * the ack. By definition, all lower minor
-		 * versions must be supported.
-		 */
-		port->ver.minor = ack->minor_vers;
-	}
-
-	port->state = DS_PORT_READY;
+	major = ver->major;
+	minor = MIN(ver->minor, ack->minor_vers);
+	mutex_exit(&port->lock);
 
 	DS_DBG_PRCL(CE_NOTE, "ds@%lx: <init_ack: port ready v%d.%d" DS_EOL,
-	    PORTID(port), port->ver.major, port->ver.minor);
-
-	mutex_exit(&port->lock);
-
-	/*
-	 * The port came up, so update all the services
-	 * with this information. Follow that up with an
-	 * attempt to register any service that is not
-	 * already registered.
-	 */
-	mutex_enter(&ds_svcs.lock);
-
-	(void) ds_walk_svcs(ds_svc_port_up, port);
-	(void) ds_walk_svcs(ds_svc_register, NULL);
-
-	mutex_exit(&ds_svcs.lock);
+	    PORTID(port), major, minor);
+
+	ds_set_port_ready(port, major, minor);
 }
 
 static void
@@ -1270,11 +1252,10 @@
 	/*
 	 * Get the ports that haven't been tried yet and are available to try.
 	 */
-	DS_PORTSET_SETNULL(totry);
+	DS_PORTSET_DUP(totry, svc->avail);
 	for (i = 0; i < DS_MAX_PORTS; i++) {
-		if (!DS_PORT_IN_SET(svc->tried, i) &&
-		    DS_PORT_IN_SET(svc->avail, i))
-			DS_PORTSET_ADD(totry, i);
+		if (DS_PORT_IN_SET(svc->tried, i))
+			DS_PORTSET_DEL(totry, i);
 	}
 
 	if (DS_PORTSET_ISNULL(totry))
@@ -1351,8 +1332,9 @@
 
 	/* make sure the message makes sense */
 	if (svc->state != DS_SVC_REG_PENDING) {
-		cmn_err(CE_WARN, "ds@%lx: <reg_nack: invalid state (%d)" DS_EOL,
-		    PORTID(port), svc->state);
+		DS_DBG_PRCL(CE_NOTE, "ds@%lx: <reg_nack: '%s' handle: 0x%llx "
+		    "invalid state (%d)" DS_EOL, PORTID(port), svc->cap.svc_id,
+		    (u_longlong_t)nack->svc_handle, svc->state);
 		goto done;
 	}
 
@@ -1447,7 +1429,7 @@
 		mutex_exit(&port->lock);
 		if (!is_up)
 			return;
-		cmn_err(CE_WARN, "ds@%lx: <unreg_req: invalid handle 0x%llx"
+		DS_DBG_PRCL(CE_NOTE, "ds@%lx: <unreg_req: invalid handle 0x%llx"
 		    DS_EOL, PORTID(port), (u_longlong_t)req->svc_handle);
 		ds_send_unreg_nack(port, req->svc_handle);
 		return;
@@ -2103,6 +2085,23 @@
 	return (0);
 }
 
+static void
+ds_set_svc_port_tried(char *svc_id, ds_port_t *port)
+{
+	int		idx;
+	ds_svc_t	*svc;
+
+	ASSERT(MUTEX_HELD(&ds_svcs.lock));
+
+	/* walk every table entry */
+	for (idx = 0; idx < ds_svcs.maxsvcs; idx++) {
+		svc = ds_svcs.tbl[idx];
+		if (!DS_SVC_ISFREE(svc) && (svc->flags & DSSF_ISCLIENT) != 0 &&
+		    strcmp(svc_id, svc->cap.svc_id) == 0)
+			DS_PORTSET_ADD(svc->tried, PORTID(port));
+	}
+}
+
 static int
 ds_svc_register_onport(ds_svc_t *svc, ds_port_t *port)
 {
@@ -2114,7 +2113,23 @@
 	if (!DS_PORT_IN_SET(svc->avail, PORTID(port)))
 		return (0);
 
-	DS_PORTSET_ADD(svc->tried, PORTID(port));
+	if (DS_PORT_IN_SET(svc->tried, PORTID(port)))
+		return (0);
+
+	if ((svc->flags & DSSF_ISCLIENT) == 0) {
+		DS_PORTSET_ADD(svc->tried, PORTID(port));
+		if (svc->state != DS_SVC_INACTIVE)
+			return (0);
+	} else {
+		ds_set_svc_port_tried(svc->cap.svc_id, port);
+
+		/*
+		 * Never send a client reg req to the SP.
+		 */
+		if (PORTID(port) == ds_sp_port_id) {
+			return (0);
+		}
+	}
 
 	if (ds_send_reg_req(svc, port) == 0) {
 		/* register sent successfully */
@@ -2128,6 +2143,18 @@
 	return (0);
 }
 
+static int
+ds_svc_register_onport_walker(ds_svc_t *svc, void *arg)
+{
+	ASSERT(MUTEX_HELD(&ds_svcs.lock));
+
+	if (DS_SVC_ISFREE(svc))
+		return (0);
+
+	(void) ds_svc_register_onport(svc, arg);
+	return (0);
+}
+
 int
 ds_svc_register(ds_svc_t *svc, void *arg)
 {
@@ -2143,7 +2170,10 @@
 
 	DS_PORTSET_DUP(ports, svc->avail);
 	if (svc->flags & DSSF_ISCLIENT) {
-		ds_portset_del_active_clients(svc->cap.svc_id, &ports);
+		for (idx = 0; idx < DS_MAX_PORTS; idx++) {
+			if (DS_PORT_IN_SET(svc->tried, idx))
+				DS_PORTSET_DEL(ports, idx);
+		}
 	} else if (svc->state != DS_SVC_INACTIVE)
 		return (0);
 
@@ -2169,7 +2199,6 @@
 		if (ds_svc_register_onport(svc, port)) {
 			if ((svc->flags & DSSF_ISCLIENT) == 0)
 				break;
-			DS_PORTSET_DEL(svc->avail, idx);
 		}
 	}
 
@@ -2242,6 +2271,37 @@
 	return (0);
 }
 
+static void
+ds_set_port_ready(ds_port_t *port, uint16_t major, uint16_t minor)
+{
+	boolean_t was_ready;
+
+	mutex_enter(&port->lock);
+	was_ready = (port->state == DS_PORT_READY);
+	if (!was_ready) {
+		port->state = DS_PORT_READY;
+		port->ver.major = major;
+		port->ver.minor = minor;
+	}
+	mutex_exit(&port->lock);
+
+	if (!was_ready) {
+
+		/*
+		 * The port came up, so update all the services
+		 * with this information. Follow that up with an
+		 * attempt to register any service that is not
+		 * already registered.
+		 */
+		mutex_enter(&ds_svcs.lock);
+
+		(void) ds_walk_svcs(ds_svc_port_up, port);
+		(void) ds_walk_svcs(ds_svc_register_onport_walker, port);
+
+		mutex_exit(&ds_svcs.lock);
+	}
+}
+
 ds_svc_t *
 ds_alloc_svc(void)
 {
@@ -3175,37 +3235,6 @@
 	return (0);
 }
 
-static void
-ds_portset_del_active_clients(char *service, ds_portset_t *portsp)
-{
-	ds_portset_t ports;
-	int idx;
-	ds_svc_t *svc;
-
-	ASSERT(MUTEX_HELD(&ds_svcs.lock));
-
-	DS_PORTSET_DUP(ports, *portsp);
-	for (idx = 0; idx < ds_svcs.maxsvcs; idx++) {
-		svc = ds_svcs.tbl[idx];
-		if (DS_SVC_ISFREE(svc))
-			continue;
-		if (strcmp(svc->cap.svc_id, service) == 0 &&
-		    (svc->flags & DSSF_ISCLIENT) != 0 &&
-		    svc->state != DS_SVC_INACTIVE &&
-		    svc->port != NULL) {
-			DS_PORTSET_DEL(ports, PORTID(svc->port));
-		}
-	}
-
-	/*
-	 * Never send a client reg req to the SP.
-	 */
-	if (ds_sp_port_id != DS_PORTID_INVALID) {
-		DS_PORTSET_DEL(ports, ds_sp_port_id);
-	}
-	DS_PORTSET_DUP(*portsp, ports);
-}
-
 /*
  * After an UNREG REQ, check if this is a client service with multiple
  * handles.  If it is, then we can eliminate this entry.
--- a/usr/src/uts/sun4v/io/vsw.c	Mon Jul 06 09:30:27 2009 -0400
+++ b/usr/src/uts/sun4v/io/vsw.c	Tue Jul 07 10:00:27 2009 -0400
@@ -80,7 +80,7 @@
 static	int vsw_unattach(vsw_t *vswp);
 static	int vsw_get_md_physname(vsw_t *, md_t *, mde_cookie_t, char *);
 static	int vsw_get_md_smodes(vsw_t *, md_t *, mde_cookie_t, uint8_t *);
-static	int vsw_mod_cleanup(void);
+void vsw_destroy_rxpools(void *);
 
 /* MDEG routines */
 static	int vsw_mdeg_register(vsw_t *vswp);
@@ -186,6 +186,8 @@
 int	vsw_ldc_delay = 1000;		/* 1 ms delay for ldc_close() */
 boolean_t vsw_ldc_rxthr_enabled = B_TRUE;	/* LDC Rx thread enabled */
 boolean_t vsw_ldc_txthr_enabled = B_TRUE;	/* LDC Tx thread enabled */
+int	vsw_rxpool_cleanup_delay = 100000;	/* 100ms */
+
 
 uint32_t	vsw_fdb_nchains = 8;	/* # of chains in fdb hash table */
 uint32_t	vsw_vlan_nchains = 4;	/* # of chains in vlan id hash table */
@@ -352,7 +354,6 @@
  * Linked list of "vsw_t" structures - one per instance.
  */
 vsw_t		*vsw_head = NULL;
-vio_mblk_pool_t	*vsw_rx_poolp = NULL;
 krwlock_t	vsw_rw;
 
 /*
@@ -485,10 +486,6 @@
 {
 	int status;
 
-	status = vsw_mod_cleanup();
-	if (status != 0)
-		return (status);
-
 	status = mod_remove(&modlinkage);
 	if (status != 0)
 		return (status);
@@ -594,6 +591,17 @@
 
 	progress |= PROG_taskq;
 
+	(void) snprintf(qname, TASKQ_NAMELEN, "vsw_rxp_taskq%d",
+	    vswp->instance);
+	if ((vswp->rxp_taskq = ddi_taskq_create(vswp->dip, qname, 1,
+	    TASKQ_DEFAULTPRI, 0)) == NULL) {
+		cmn_err(CE_WARN, "!vsw%d: Unable to create rxp task queue",
+		    vswp->instance);
+		goto vsw_attach_fail;
+	}
+
+	progress |= PROG_rxp_taskq;
+
 	/* prevent auto-detaching */
 	if (ddi_prop_update_int(DDI_DEV_T_NONE, vswp->dip,
 	    DDI_NO_AUTODETACH, 1) != DDI_SUCCESS) {
@@ -716,7 +724,6 @@
 static int
 vsw_unattach(vsw_t *vswp)
 {
-	vio_mblk_pool_t		*poolp, *npoolp;
 	vsw_attach_progress_t	progress;
 
 	progress = vswp->attach_progress;
@@ -747,24 +754,6 @@
 	if (progress & PROG_mdreg) {
 		vsw_mdeg_unregister(vswp);
 		vsw_detach_ports(vswp);
-
-		/*
-		 * At this point, we attempt to free receive mblk pools that
-		 * couldn't be destroyed when the ports were detached; if this
-		 * attempt also fails, we hook up the pool(s) to the module so
-		 * they can be cleaned up in _fini().
-		 */
-		poolp = vswp->rxh;
-		while (poolp != NULL) {
-			npoolp = vswp->rxh = poolp->nextp;
-			if (vio_destroy_mblks(poolp) != 0) {
-				WRITE_ENTER(&vsw_rw);
-				poolp->nextp = vsw_rx_poolp;
-				vsw_rx_poolp = poolp;
-				RW_EXIT(&vsw_rw);
-			}
-			poolp = npoolp;
-		}
 		progress &= ~PROG_mdreg;
 	}
 
@@ -796,6 +785,17 @@
 	}
 
 	/*
+	 * We now destroy the taskq used to clean up rx mblk pools that
+	 * couldn't be destroyed when the ports/channels were detached.
+	 * We implicitly wait for those tasks to complete in
+	 * ddi_taskq_destroy().
+	 */
+	if (progress & PROG_rxp_taskq) {
+		ddi_taskq_destroy(vswp->rxp_taskq);
+		progress &= ~PROG_rxp_taskq;
+	}
+
+	/*
 	 * By now any pending tasks have finished and the underlying
 	 * ldc's have been destroyed, so its safe to delete the control
 	 * message taskq.
@@ -844,32 +844,19 @@
 	return (0);
 }
 
-/*
- * one time cleanup.
- */
-static int
-vsw_mod_cleanup(void)
+void
+vsw_destroy_rxpools(void *arg)
 {
-	vio_mblk_pool_t		*poolp, *npoolp;
-
-	/*
-	 * If any rx mblk pools are still in use, return
-	 * error and stop the module from unloading.
-	 */
-	WRITE_ENTER(&vsw_rw);
-	poolp = vsw_rx_poolp;
+	vio_mblk_pool_t	*poolp = (vio_mblk_pool_t *)arg;
+	vio_mblk_pool_t	*npoolp;
+
 	while (poolp != NULL) {
-		npoolp = vsw_rx_poolp = poolp->nextp;
-		if (vio_destroy_mblks(poolp) != 0) {
-			vsw_rx_poolp = poolp;
-			RW_EXIT(&vsw_rw);
-			return (EBUSY);
+		npoolp =  poolp->nextp;
+		while (vio_destroy_mblks(poolp) != 0) {
+			drv_usecwait(vsw_rxpool_cleanup_delay);
 		}
 		poolp = npoolp;
 	}
-	RW_EXIT(&vsw_rw);
-
-	return (0);
 }
 
 /*
--- a/usr/src/uts/sun4v/io/vsw_ldc.c	Mon Jul 06 09:30:27 2009 -0400
+++ b/usr/src/uts/sun4v/io/vsw_ldc.c	Tue Jul 07 10:00:27 2009 -0400
@@ -209,7 +209,7 @@
 extern void vsw_publish_macaddr(vsw_t *vswp, vsw_port_t *portp);
 extern int vsw_mac_client_init(vsw_t *vswp, vsw_port_t *port, int type);
 extern void vsw_mac_client_cleanup(vsw_t *vswp, vsw_port_t *port, int type);
-
+extern void vsw_destroy_rxpools(void *arg);
 
 #define	VSW_NUM_VMPOOLS		3	/* number of vio mblk pools */
 
@@ -865,6 +865,7 @@
 	vsw_ldc_list_t	*ldcl = &port->p_ldclist;
 	int 		rv;
 	int		retries = 0;
+	vio_mblk_pool_t *fvmp = NULL;
 
 	prev_ldcp = ldcl->head;
 	for (; (ldcp = prev_ldcp) != NULL; prev_ldcp = ldcp->ldc_next) {
@@ -931,13 +932,17 @@
 
 
 	/*
-	 * Most likely some mblks are still in use and
-	 * have not been returned to the pool. These mblks are
-	 * added to the pool that is maintained in the device instance.
-	 * Another attempt will be made to destroy the pool
-	 * when the device detaches.
+	 * If we can't destroy all the rx pools for this channel, dispatch
+	 * a task to retry and clean up those rx pools. Note that we don't
+	 * need to wait for the task to complete. If the vsw device itself
+	 * gets detached (vsw_detach()), it will wait for the task to complete
+	 * implicitly in ddi_taskq_destroy().
 	 */
-	vio_destroy_multipools(&ldcp->vmp, &vswp->rxh);
+	vio_destroy_multipools(&ldcp->vmp, &fvmp);
+	if (fvmp != NULL) {
+		(void) ddi_taskq_dispatch(vswp->rxp_taskq,
+		    vsw_destroy_rxpools, fvmp, DDI_SLEEP);
+	}
 
 	/* unlink it from the list */
 	prev_ldcp = ldcp->ldc_next;
@@ -1606,11 +1611,22 @@
 	vsw_t		*vswp = ldcp->ldc_vswp;
 	vsw_port_t	*port;
 	vsw_ldc_list_t	*ldcl;
+	vio_mblk_pool_t *fvmp = NULL;
 
 	D1(vswp, "%s: enter", __func__);
 
-	/* free receive mblk pools for the channel */
-	vio_destroy_multipools(&ldcp->vmp, &vswp->rxh);
+	/*
+	 * If we can't destroy all the rx pools for this channel, dispatch
+	 * a task to retry and clean up those rx pools. Note that we don't
+	 * need to wait for the task to complete. If the vsw device itself
+	 * gets detached (vsw_detach()), it will wait for the task to complete
+	 * implicitly in ddi_taskq_destroy().
+	 */
+	vio_destroy_multipools(&ldcp->vmp, &fvmp);
+	if (fvmp != NULL) {
+		(void) ddi_taskq_dispatch(vswp->rxp_taskq,
+		    vsw_destroy_rxpools, fvmp, DDI_SLEEP);
+	}
 
 	port = ldcp->ldc_port;
 	ldcl = &port->p_ldclist;
--- a/usr/src/uts/sun4v/sys/vsw.h	Mon Jul 06 09:30:27 2009 -0400
+++ b/usr/src/uts/sun4v/sys/vsw.h	Tue Jul 07 10:00:27 2009 -0400
@@ -99,9 +99,10 @@
 	PROG_fdb = 0x04,
 	PROG_mfdb = 0x08,
 	PROG_taskq = 0x10,
-	PROG_swmode = 0x20,
-	PROG_macreg = 0x40,
-	PROG_mdreg = 0x80
+	PROG_rxp_taskq = 0x20,
+	PROG_swmode = 0x40,
+	PROG_macreg = 0x80,
+	PROG_mdreg = 0x100
 } vsw_attach_progress_t;
 
 /*
@@ -143,7 +144,7 @@
 	mod_hash_t		*mfdb;		/* multicast FDB */
 	krwlock_t		mfdbrw;		/* rwlock for mFDB */
 
-	vio_mblk_pool_t		*rxh;		/* Receive pool handle */
+	ddi_taskq_t		*rxp_taskq;	/* VIO rx pool taskq */
 	void			(*vsw_switch_frame)
 					(struct vsw *, mblk_t *, int,
 					vsw_port_t *, mac_resource_handle_t);