changeset 5668:7066e93e6b89 onnv_80

6627804 Repeated attach/detach to ISM segment fails with ENOMEM for 32bit program
author mec
date Mon, 10 Dec 2007 22:14:23 -0800
parents 84c56d89ad88
children f4e076788720
files usr/src/uts/common/sys/vmsystm.h usr/src/uts/common/vm/as.h usr/src/uts/common/vm/vm_as.c usr/src/uts/i86pc/vm/vm_machdep.c usr/src/uts/sun4/vm/vm_dep.c usr/src/uts/sun4u/vm/mach_vm_dep.c usr/src/uts/sun4v/vm/mach_vm_dep.c
diffstat 7 files changed, 263 insertions(+), 127 deletions(-) [+]
line wrap: on
line diff
--- a/usr/src/uts/common/sys/vmsystm.h	Mon Dec 10 19:38:49 2007 -0800
+++ b/usr/src/uts/common/sys/vmsystm.h	Mon Dec 10 22:14:23 2007 -0800
@@ -19,7 +19,7 @@
  * CDDL HEADER END
  */
 /*
- * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
+ * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
  * Use is subject to license terms.
  */
 
@@ -124,6 +124,9 @@
 
 extern	int valid_va_range(caddr_t *basep, size_t *lenp, size_t minlen,
 		int dir);
+extern	int valid_va_range_aligned(caddr_t *basep, size_t *lenp,
+    size_t minlen, int dir, size_t align, size_t redzone, size_t off);
+
 extern	int valid_usr_range(caddr_t, size_t, uint_t, struct as *, caddr_t);
 extern	int useracc(void *, size_t, int);
 extern	size_t map_pgsz(int maptype, struct proc *p, caddr_t addr, size_t len,
--- a/usr/src/uts/common/vm/as.h	Mon Dec 10 19:38:49 2007 -0800
+++ b/usr/src/uts/common/vm/as.h	Mon Dec 10 22:14:23 2007 -0800
@@ -19,7 +19,7 @@
  * CDDL HEADER END
  */
 /*
- * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
+ * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
  * Use is subject to license terms.
  */
 
@@ -265,6 +265,10 @@
 void	as_purge(struct as *as);
 int	as_gap(struct as *as, size_t minlen, caddr_t *basep, size_t *lenp,
 		uint_t flags, caddr_t addr);
+int	as_gap_aligned(struct as *as, size_t minlen, caddr_t *basep,
+	    size_t *lenp, uint_t flags, caddr_t addr, size_t align,
+	    size_t redzone, size_t off);
+
 int	as_memory(struct as *as, caddr_t *basep, size_t *lenp);
 size_t	as_swapout(struct as *as);
 int	as_incore(struct as *as, caddr_t addr, size_t size, char *vec,
--- a/usr/src/uts/common/vm/vm_as.c	Mon Dec 10 19:38:49 2007 -0800
+++ b/usr/src/uts/common/vm/vm_as.c	Mon Dec 10 22:14:23 2007 -0800
@@ -1815,7 +1815,12 @@
 }
 
 /*
- * Find a hole of at least size minlen within [base, base + len).
+ * Find a hole within [*basep, *basep + *lenp), which contains a mappable
+ * range of addresses at least "minlen" long, where the base of the range is
+ * at "off" phase from an "align" boundary and there is space for a
+ * "redzone"-sized redzone on eithe rside of the range.  Thus,
+ * if align was 4M and off was 16k, the user wants a hole which will start
+ * 16k into a 4M page.
  *
  * If flags specifies AH_HI, the hole will have the highest possible address
  * in the range.  We use the as->a_lastgap field to figure out where to
@@ -1825,15 +1830,14 @@
  *
  * If flags specifies AH_CONTAIN, the hole will contain the address addr.
  *
- * If an adequate hole is found, base and len are set to reflect the part of
- * the hole that is within range, and 0 is returned, otherwise,
- * -1 is returned.
+ * If an adequate hole is found, *basep and *lenp are set to reflect the part of
+ * the hole that is within range, and 0 is returned. On failure, -1 is returned.
  *
  * NOTE: This routine is not correct when base+len overflows caddr_t.
  */
 int
-as_gap(struct as *as, size_t minlen, caddr_t *basep, size_t *lenp, uint_t flags,
-    caddr_t addr)
+as_gap_aligned(struct as *as, size_t minlen, caddr_t *basep, size_t *lenp,
+    uint_t flags, caddr_t addr, size_t align, size_t redzone, size_t off)
 {
 	caddr_t lobound = *basep;
 	caddr_t hibound = lobound + *lenp;
@@ -1847,7 +1851,8 @@
 	save_len = *lenp;
 	AS_LOCK_ENTER(as, &as->a_lock, RW_READER);
 	if (AS_SEGFIRST(as) == NULL) {
-		if (valid_va_range(basep, lenp, minlen, flags & AH_DIR)) {
+		if (valid_va_range_aligned(basep, lenp, minlen, flags & AH_DIR,
+		    align, redzone, off)) {
 			AS_LOCK_EXIT(as, &as->a_lock);
 			return (0);
 		} else {
@@ -1920,8 +1925,8 @@
 		 */
 		*basep = lo;
 		*lenp = hi - lo;
-		if (valid_va_range(basep, lenp, minlen,
-		    forward ? AH_LO : AH_HI) &&
+		if (valid_va_range_aligned(basep, lenp, minlen,
+		    forward ? AH_LO : AH_HI, align, redzone, off) &&
 		    ((flags & AH_CONTAIN) == 0 ||
 		    (*basep <= addr && *basep + *lenp > addr))) {
 			if (!forward)
@@ -1956,6 +1961,31 @@
 }
 
 /*
+ * Find a hole of at least size minlen within [*basep, *basep + *lenp).
+ *
+ * If flags specifies AH_HI, the hole will have the highest possible address
+ * in the range.  We use the as->a_lastgap field to figure out where to
+ * start looking for a gap.
+ *
+ * Otherwise, the gap will have the lowest possible address.
+ *
+ * If flags specifies AH_CONTAIN, the hole will contain the address addr.
+ *
+ * If an adequate hole is found, base and len are set to reflect the part of
+ * the hole that is within range, and 0 is returned, otherwise,
+ * -1 is returned.
+ *
+ * NOTE: This routine is not correct when base+len overflows caddr_t.
+ */
+int
+as_gap(struct as *as, size_t minlen, caddr_t *basep, size_t *lenp, uint_t flags,
+    caddr_t addr)
+{
+
+	return (as_gap_aligned(as, minlen, basep, lenp, flags, addr, 0, 0, 0));
+}
+
+/*
  * Return the next range within [base, base + len) that is backed
  * with "real memory".  Skip holes and non-seg_vn segments.
  * We're lazy and only return one segment at a time.
--- a/usr/src/uts/i86pc/vm/vm_machdep.c	Mon Dec 10 19:38:49 2007 -0800
+++ b/usr/src/uts/i86pc/vm/vm_machdep.c	Mon Dec 10 22:14:23 2007 -0800
@@ -629,15 +629,24 @@
  * choose an address for the user.  We will pick an address
  * range which is the highest available below userlimit.
  *
+ * Every mapping will have a redzone of a single page on either side of
+ * the request. This is done to leave one page unmapped between segments.
+ * This is not required, but it's useful for the user because if their
+ * program strays across a segment boundary, it will catch a fault
+ * immediately making debugging a little easier.  Currently the redzone
+ * is mandatory.
+ *
  * addrp is a value/result parameter.
  *	On input it is a hint from the user to be used in a completely
  *	machine dependent fashion.  We decide to completely ignore this hint.
+ *	If MAP_ALIGN was specified, addrp contains the minimal alignment, which
+ *	must be some "power of two" multiple of pagesize.
  *
  *	On output it is NULL if no address can be found in the current
  *	processes address space or else an address that is currently
  *	not mapped for len bytes with a page of red zone on either side.
  *
- *	align is not needed on x86 (it's for viturally addressed caches)
+ *	vacalign is not needed on x86 (it's for viturally addressed caches)
  */
 /*ARGSUSED*/
 void
@@ -696,18 +705,10 @@
 #endif
 		slen = userlimit - base;
 
+	/* Make len be a multiple of PAGESIZE */
 	len = (len + PAGEOFFSET) & PAGEMASK;
 
 	/*
-	 * Redzone for each side of the request. This is done to leave
-	 * one page unmapped between segments. This is not required, but
-	 * it's useful for the user because if their program strays across
-	 * a segment boundary, it will catch a fault immediately making
-	 * debugging a little easier.
-	 */
-	len += 2 * MMU_PAGESIZE;
-
-	/*
 	 * figure out what the alignment should be
 	 *
 	 * XX64 -- is there an ELF_AMD64_MAXPGSZ or is it the same????
@@ -731,63 +732,86 @@
 	if ((flags & MAP_ALIGN) && ((uintptr_t)*addrp > align_amount))
 		align_amount = (uintptr_t)*addrp;
 
-	len += align_amount;
-
+	ASSERT(ISP2(align_amount));
+	ASSERT(align_amount == 0 || align_amount >= PAGESIZE);
+
+	off = off & (align_amount - 1);
 	/*
 	 * Look for a large enough hole starting below userlimit.
-	 * After finding it, use the upper part.  Addition of PAGESIZE
-	 * is for the redzone as described above.
+	 * After finding it, use the upper part.
 	 */
-	if (as_gap(as, len, &base, &slen, AH_HI, NULL) == 0) {
+	if (as_gap_aligned(as, len, &base, &slen, AH_HI, NULL, align_amount,
+	    PAGESIZE, off) == 0) {
 		caddr_t as_addr;
 
-		addr = base + slen - len + MMU_PAGESIZE;
+		/*
+		 * addr is the highest possible address to use since we have
+		 * a PAGESIZE redzone at the beginning and end.
+		 */
+		addr = base + slen - (PAGESIZE + len);
 		as_addr = addr;
 		/*
-		 * Round address DOWN to the alignment amount,
-		 * add the offset, and if this address is less
-		 * than the original address, add alignment amount.
+		 * Round address DOWN to the alignment amount and
+		 * add the offset in.
+		 * If addr is greater than as_addr, len would not be large
+		 * enough to include the redzone, so we must adjust down
+		 * by the alignment amount.
 		 */
 		addr = (caddr_t)((uintptr_t)addr & (~(align_amount - 1)));
-		addr += (uintptr_t)(off & (align_amount - 1));
-		if (addr < as_addr)
-			addr += align_amount;
-
-		ASSERT(addr <= (as_addr + align_amount));
+		addr += (uintptr_t)off;
+		if (addr > as_addr) {
+			addr -= align_amount;
+		}
+
+		ASSERT(addr > base);
+		ASSERT(addr + len < base + slen);
 		ASSERT(((uintptr_t)addr & (align_amount - 1)) ==
-		    ((uintptr_t)(off & (align_amount - 1))));
+		    ((uintptr_t)(off)));
 		*addrp = addr;
 	} else {
 		*addrp = NULL;	/* no more virtual space */
 	}
 }
 
+int valid_va_range_aligned_wraparound;
+
 /*
- * Determine whether [base, base+len] contains a valid range of
- * addresses at least minlen long. base and len are adjusted if
- * required to provide a valid range.
+ * Determine whether [*basep, *basep + *lenp) contains a mappable range of
+ * addresses at least "minlen" long, where the base of the range is at "off"
+ * phase from an "align" boundary and there is space for a "redzone"-sized
+ * redzone on either side of the range.  On success, 1 is returned and *basep
+ * and *lenp are adjusted to describe the acceptable range (including
+ * the redzone).  On failure, 0 is returned.
  */
 /*ARGSUSED3*/
 int
-valid_va_range(caddr_t *basep, size_t *lenp, size_t minlen, int dir)
+valid_va_range_aligned(caddr_t *basep, size_t *lenp, size_t minlen, int dir,
+    size_t align, size_t redzone, size_t off)
 {
 	uintptr_t hi, lo;
+	size_t tot_len;
+
+	ASSERT(align == 0 ? off == 0 : off < align);
+	ASSERT(ISP2(align));
+	ASSERT(align == 0 || align >= PAGESIZE);
 
 	lo = (uintptr_t)*basep;
 	hi = lo + *lenp;
+	tot_len = minlen + 2 * redzone; /* need at least this much space */
 
 	/*
 	 * If hi rolled over the top, try cutting back.
 	 */
 	if (hi < lo) {
-		if (0 - lo + hi < minlen)
-			return (0);
-		if (0 - lo < minlen)
-			return (0);
-		*lenp = 0 - lo;
-	} else if (hi - lo < minlen) {
+		*lenp = 0UL - lo - 1UL;
+		/* See if this really happens. If so, then we figure out why */
+		valid_va_range_aligned_wraparound++;
+		hi = lo + *lenp;
+	}
+	if (*lenp < tot_len) {
 		return (0);
 	}
+
 #if defined(__amd64)
 	/*
 	 * Deal with a possible hole in the address range between
@@ -803,9 +827,9 @@
 					/*
 					 * prefer lowest range
 					 */
-					if (hole_start - lo >= minlen)
+					if (hole_start - lo >= tot_len)
 						hi = hole_start;
-					else if (hi - hole_end >= minlen)
+					else if (hi - hole_end >= tot_len)
 						lo = hole_end;
 					else
 						return (0);
@@ -813,9 +837,9 @@
 					/*
 					 * prefer highest range
 					 */
-					if (hi - hole_end >= minlen)
+					if (hi - hole_end >= tot_len)
 						lo = hole_end;
-					else if (hole_start - lo >= minlen)
+					else if (hole_start - lo >= tot_len)
 						hi = hole_start;
 					else
 						return (0);
@@ -829,17 +853,41 @@
 		if (lo < hole_end)
 			lo = hole_end;
 	}
-
-	if (hi - lo < minlen)
+#endif
+
+	if (hi - lo < tot_len)
 		return (0);
 
+	if (align > 1) {
+		uintptr_t tlo = lo + redzone;
+		uintptr_t thi = hi - redzone;
+		tlo = (uintptr_t)P2PHASEUP(tlo, align, off);
+		if (tlo < lo + redzone) {
+			return (0);
+		}
+		if (thi < tlo || thi - tlo < minlen) {
+			return (0);
+		}
+	}
+
 	*basep = (caddr_t)lo;
 	*lenp = hi - lo;
-#endif
 	return (1);
 }
 
 /*
+ * Determine whether [*basep, *basep + *lenp) contains a mappable range of
+ * addresses at least "minlen" long.  On success, 1 is returned and *basep
+ * and *lenp are adjusted to describe the acceptable range.  On failure, 0
+ * is returned.
+ */
+int
+valid_va_range(caddr_t *basep, size_t *lenp, size_t minlen, int dir)
+{
+	return (valid_va_range_aligned(basep, lenp, minlen, dir, 0, 0, 0));
+}
+
+/*
  * Determine whether [addr, addr+len] are valid user addresses.
  */
 /*ARGSUSED*/
--- a/usr/src/uts/sun4/vm/vm_dep.c	Mon Dec 10 19:38:49 2007 -0800
+++ b/usr/src/uts/sun4/vm/vm_dep.c	Mon Dec 10 22:14:23 2007 -0800
@@ -258,39 +258,45 @@
 size_t  kpm_size;
 uchar_t kpm_size_shift;
 
+int valid_va_range_aligned_wraparound;
 /*
- * Determine whether [base, base+len] contains a mapable range of
- * addresses at least minlen long. base and len are adjusted if
- * required to provide a mapable range.
+ * Determine whether [*basep, *basep + *lenp) contains a mappable range of
+ * addresses at least "minlen" long, where the base of the range is at "off"
+ * phase from an "align" boundary and there is space for a "redzone"-sized
+ * redzone on either side of the range.  On success, 1 is returned and *basep
+ * and *lenp are adjusted to describe the acceptable range (including
+ * the redzone).  On failure, 0 is returned.
  */
-/* ARGSUSED */
 int
-valid_va_range(caddr_t *basep, size_t *lenp, size_t minlen, int dir)
+valid_va_range_aligned(caddr_t *basep, size_t *lenp, size_t minlen, int dir,
+    size_t align, size_t redzone, size_t off)
 {
 	caddr_t hi, lo;
+	size_t tot_len;
+
+	ASSERT(align == 0 ? off == 0 : off < align);
+	ASSERT(ISP2(align));
+	ASSERT(align == 0 || align >= PAGESIZE);
 
 	lo = *basep;
 	hi = lo + *lenp;
-
-	/*
-	 * If hi rolled over the top, try cutting back.
-	 */
-	if (hi < lo) {
-		size_t newlen = 0 - (uintptr_t)lo - 1l;
+	tot_len = minlen + 2 * redzone;	/* need at least this much space */
 
-		if (newlen + (uintptr_t)hi < minlen)
-			return (0);
-		if (newlen < minlen)
-			return (0);
-		*lenp = newlen;
-	} else if (hi - lo < minlen)
+	/* If hi rolled over the top try cutting back. */
+	if (hi < lo) {
+		*lenp = 0UL - (uintptr_t)lo - 1UL;
+		/* Trying to see if this really happens, and then if so, why */
+		valid_va_range_aligned_wraparound++;
+		hi = lo + *lenp;
+	}
+	if (*lenp < tot_len) {
 		return (0);
+	}
 
 	/*
 	 * Deal with a possible hole in the address range between
 	 * hole_start and hole_end that should never be mapped by the MMU.
 	 */
-	hi = lo + *lenp;
 
 	if (lo < hole_start) {
 		if (hi > hole_start)
@@ -302,9 +308,9 @@
 					/*
 					 * prefer lowest range
 					 */
-					if (hole_start - lo >= minlen)
+					if (hole_start - lo >= tot_len)
 						hi = hole_start;
-					else if (hi - hole_end >= minlen)
+					else if (hi - hole_end >= tot_len)
 						lo = hole_end;
 					else
 						return (0);
@@ -312,9 +318,9 @@
 					/*
 					 * prefer highest range
 					 */
-					if (hi - hole_end >= minlen)
+					if (hi - hole_end >= tot_len)
 						lo = hole_end;
-					else if (hole_start - lo >= minlen)
+					else if (hole_start - lo >= tot_len)
 						hi = hole_start;
 					else
 						return (0);
@@ -327,13 +333,36 @@
 			lo = hole_end;
 	}
 
-	if (hi - lo < minlen)
+	/* Check if remaining length is too small */
+	if (hi - lo < tot_len) {
 		return (0);
-
+	}
+	if (align > 1) {
+		caddr_t tlo = lo + redzone;
+		caddr_t thi = hi - redzone;
+		tlo = (caddr_t)P2PHASEUP((uintptr_t)tlo, align, off);
+		if (tlo < lo + redzone) {
+			return (0);
+		}
+		if (thi < tlo || thi - tlo < minlen) {
+			return (0);
+		}
+	}
 	*basep = lo;
 	*lenp = hi - lo;
+	return (1);
+}
 
-	return (1);
+/*
+ * Determine whether [*basep, *basep + *lenp) contains a mappable range of
+ * addresses at least "minlen" long.  On success, 1 is returned and *basep
+ * and *lenp are adjusted to describe the acceptable range.  On failure, 0
+ * is returned.
+ */
+int
+valid_va_range(caddr_t *basep, size_t *lenp, size_t minlen, int dir)
+{
+	return (valid_va_range_aligned(basep, lenp, minlen, dir, 0, 0, 0));
 }
 
 /*
--- a/usr/src/uts/sun4u/vm/mach_vm_dep.c	Mon Dec 10 19:38:49 2007 -0800
+++ b/usr/src/uts/sun4u/vm/mach_vm_dep.c	Mon Dec 10 22:14:23 2007 -0800
@@ -151,10 +151,19 @@
  * lower level code must manage the translations so that this
  * is not seen here (at the cost of efficiency, of course).
  *
+ * Every mapping will have a redzone of a single page on either side of
+ * the request. This is done to leave one page unmapped between segments.
+ * This is not required, but it's useful for the user because if their
+ * program strays across a segment boundary, it will catch a fault
+ * immediately making debugging a little easier.  Currently the redzone
+ * is mandatory.
+ *
+ *
  * addrp is a value/result parameter.
  *	On input it is a hint from the user to be used in a completely
  *	machine dependent fashion.  For MAP_ALIGN, addrp contains the
- *	minimal alignment.
+ *	minimal alignment, which must be some "power of two" multiple of
+ *	pagesize.
  *
  *	On output it is NULL if no address can be found in the current
  *	processes address space or else an address that is currently
@@ -190,18 +199,11 @@
 		    rctlproc_legacy[RLIMIT_STACK], p->p_rctls, p) + PAGEOFFSET)
 		    & PAGEMASK);
 	}
+
+	/* Make len be a multiple of PAGESIZE */
 	len = (len + PAGEOFFSET) & PAGEMASK;
 
 	/*
-	 * Redzone for each side of the request. This is done to leave
-	 * one page unmapped between segments. This is not required, but
-	 * it's useful for the user because if their program strays across
-	 * a segment boundary, it will catch a fault immediately making
-	 * debugging a little easier.
-	 */
-	len += (2 * PAGESIZE);
-
-	/*
 	 *  If the request is larger than the size of a particular
 	 *  mmu level, then we use that level to map the request.
 	 *  But this requires that both the virtual and the physical
@@ -219,11 +221,11 @@
 	}
 	if ((mmu_page_sizes == max_mmu_page_sizes) &&
 	    allow_largepage_alignment &&
-		(len >= MMU_PAGESIZE256M)) {	/* 256MB mappings */
+	    (len >= MMU_PAGESIZE256M)) {	/* 256MB mappings */
 		align_amount = MMU_PAGESIZE256M;
 	} else if ((mmu_page_sizes == max_mmu_page_sizes) &&
 	    allow_largepage_alignment &&
-		(len >= MMU_PAGESIZE32M)) {	/* 32MB mappings */
+	    (len >= MMU_PAGESIZE32M)) {	/* 32MB mappings */
 		align_amount = MMU_PAGESIZE32M;
 	} else if (len >= MMU_PAGESIZE4M) {  /* 4MB mappings */
 		align_amount = MMU_PAGESIZE4M;
@@ -239,7 +241,7 @@
 		 */
 		align_amount = ELF_SPARC_MAXPGSZ;
 		if ((flags & MAP_ALIGN) && ((uintptr_t)*addrp != 0) &&
-			((uintptr_t)*addrp < align_amount))
+		    ((uintptr_t)*addrp < align_amount))
 			align_amount = (uintptr_t)*addrp;
 	}
 
@@ -256,33 +258,43 @@
 	if ((flags & MAP_ALIGN) && ((uintptr_t)*addrp > align_amount)) {
 		align_amount = (uintptr_t)*addrp;
 	}
-	len += align_amount;
+
+	ASSERT(ISP2(align_amount));
+	ASSERT(align_amount == 0 || align_amount >= PAGESIZE);
 
 	/*
 	 * Look for a large enough hole starting below the stack limit.
-	 * After finding it, use the upper part.  Addition of PAGESIZE is
-	 * for the redzone as described above.
+	 * After finding it, use the upper part.
 	 */
 	as_purge(as);
-	if (as_gap(as, len, &base, &slen, AH_HI, NULL) == 0) {
+	off = off & (align_amount - 1);
+	if (as_gap_aligned(as, len, &base, &slen, AH_HI, NULL, align_amount,
+	    PAGESIZE, off) == 0) {
 		caddr_t as_addr;
 
-		addr = base + slen - len + PAGESIZE;
+		/*
+		 * addr is the highest possible address to use since we have
+		 * a PAGESIZE redzone at the beginning and end.
+		 */
+		addr = base + slen - (PAGESIZE + len);
 		as_addr = addr;
 		/*
-		 * Round address DOWN to the alignment amount,
-		 * add the offset, and if this address is less
-		 * than the original address, add alignment amount.
+		 * Round address DOWN to the alignment amount and
+		 * add the offset in.
+		 * If addr is greater than as_addr, len would not be large
+		 * enough to include the redzone, so we must adjust down
+		 * by the alignment amount.
 		 */
 		addr = (caddr_t)((uintptr_t)addr & (~(align_amount - 1l)));
-		addr += (long)(off & (align_amount - 1l));
-		if (addr < as_addr) {
-			addr += align_amount;
+		addr += (long)off;
+		if (addr > as_addr) {
+			addr -= align_amount;
 		}
 
-		ASSERT(addr <= (as_addr + align_amount));
+		ASSERT(addr > base);
+		ASSERT(addr + len < base + slen);
 		ASSERT(((uintptr_t)addr & (align_amount - 1l)) ==
-		    ((uintptr_t)(off & (align_amount - 1l))));
+		    ((uintptr_t)(off)));
 		*addrp = addr;
 
 #if defined(SF_ERRATA_57)
--- a/usr/src/uts/sun4v/vm/mach_vm_dep.c	Mon Dec 10 19:38:49 2007 -0800
+++ b/usr/src/uts/sun4v/vm/mach_vm_dep.c	Mon Dec 10 22:14:23 2007 -0800
@@ -182,10 +182,18 @@
  * lower level code must manage the translations so that this
  * is not seen here (at the cost of efficiency, of course).
  *
+ * Every mapping will have a redzone of a single page on either side of
+ * the request. This is done to leave one page unmapped between segments.
+ * This is not required, but it's useful for the user because if their
+ * program strays across a segment boundary, it will catch a fault
+ * immediately making debugging a little easier.  Currently the redzone
+ * is mandatory.
+ *
  * addrp is a value/result parameter.
  *	On input it is a hint from the user to be used in a completely
  *	machine dependent fashion.  For MAP_ALIGN, addrp contains the
- *	minimal alignment.
+ *	minimal alignment, which must be some "power of two" multiple of
+ *	pagesize.
  *
  *	On output it is NULL if no address can be found in the current
  *	processes address space or else an address that is currently
@@ -221,18 +229,10 @@
 		    rctlproc_legacy[RLIMIT_STACK], p->p_rctls, p) + PAGEOFFSET)
 		    & PAGEMASK);
 	}
+	/* Make len be a multiple of PAGESIZE */
 	len = (len + PAGEOFFSET) & PAGEMASK;
 
 	/*
-	 * Redzone for each side of the request. This is done to leave
-	 * one page unmapped between segments. This is not required, but
-	 * it's useful for the user because if their program strays across
-	 * a segment boundary, it will catch a fault immediately making
-	 * debugging a little easier.
-	 */
-	len += (2 * PAGESIZE);
-
-	/*
 	 *  If the request is larger than the size of a particular
 	 *  mmu level, then we use that level to map the request.
 	 *  But this requires that both the virtual and the physical
@@ -290,33 +290,43 @@
 	if ((flags & MAP_ALIGN) && ((uintptr_t)*addrp > align_amount)) {
 		align_amount = (uintptr_t)*addrp;
 	}
-	len += align_amount;
+
+	ASSERT(ISP2(align_amount));
+	ASSERT(align_amount == 0 || align_amount >= PAGESIZE);
 
 	/*
 	 * Look for a large enough hole starting below the stack limit.
-	 * After finding it, use the upper part.  Addition of PAGESIZE is
-	 * for the redzone as described above.
+	 * After finding it, use the upper part.
 	 */
 	as_purge(as);
-	if (as_gap(as, len, &base, &slen, AH_HI, NULL) == 0) {
+	off = off & (align_amount - 1);
+	if (as_gap_aligned(as, len, &base, &slen, AH_HI, NULL, align_amount,
+	    PAGESIZE, off) == 0) {
 		caddr_t as_addr;
 
-		addr = base + slen - len + PAGESIZE;
+		/*
+		 * addr is the highest possible address to use since we have
+		 * a PAGESIZE redzone at the beginning and end.
+		 */
+		addr = base + slen - (PAGESIZE + len);
 		as_addr = addr;
 		/*
-		 * Round address DOWN to the alignment amount,
-		 * add the offset, and if this address is less
-		 * than the original address, add alignment amount.
+		 * Round address DOWN to the alignment amount and
+		 * add the offset in.
+		 * If addr is greater than as_addr, len would not be large
+		 * enough to include the redzone, so we must adjust down
+		 * by the alignment amount.
 		 */
 		addr = (caddr_t)((uintptr_t)addr & (~(align_amount - 1l)));
-		addr += (long)(off & (align_amount - 1l));
-		if (addr < as_addr) {
-			addr += align_amount;
+		addr += (long)off;
+		if (addr > as_addr) {
+			addr -= align_amount;
 		}
 
-		ASSERT(addr <= (as_addr + align_amount));
+		ASSERT(addr > base);
+		ASSERT(addr + len < base + slen);
 		ASSERT(((uintptr_t)addr & (align_amount - 1l)) ==
-		    ((uintptr_t)(off & (align_amount - 1l))));
+		    ((uintptr_t)(off)));
 		*addrp = addr;
 
 	} else {