changeset 8684:ab80b1acd35d onnv_108

6797028 "NOTICE: lib_va_64_arena may not be optimized" message on the console
author Michael Corcoran <Michael.Corcoran@Sun.COM>
date Mon, 02 Feb 2009 21:49:19 -0800
parents 604c6d987af1
children 9b53b460639e
files usr/src/uts/common/os/main.c usr/src/uts/common/os/mmapobj.c usr/src/uts/common/sys/mmapobj.h
diffstat 3 files changed, 90 insertions(+), 91 deletions(-) [+]
line wrap: on
line diff
--- a/usr/src/uts/common/os/main.c	Mon Feb 02 20:48:11 2009 -0800
+++ b/usr/src/uts/common/os/main.c	Mon Feb 02 21:49:19 2009 -0800
@@ -555,14 +555,6 @@
 	lgrp_main_mp_init();
 
 	/*
-	 * Initialize lib_va arenas.  Needs to be done after start_other_cpus
-	 * so that USERLIMIT32 represents the final value after any
-	 * workarounds have been applied. Also need to be done before we
-	 * create any processes so that all libs can be cached.
-	 */
-	lib_va_init();
-
-	/*
 	 * After mp_init(), number of cpus are known (this is
 	 * true for the time being, when there are actually
 	 * hot pluggable cpus then this scheme  would not do).
--- a/usr/src/uts/common/os/mmapobj.c	Mon Feb 02 20:48:11 2009 -0800
+++ b/usr/src/uts/common/os/mmapobj.c	Mon Feb 02 21:49:19 2009 -0800
@@ -33,6 +33,7 @@
 #include <sys/cmn_err.h>
 #include <sys/cred.h>
 #include <sys/vmsystm.h>
+#include <sys/machsystm.h>
 #include <sys/debug.h>
 #include <vm/as.h>
 #include <vm/seg.h>
@@ -174,6 +175,7 @@
 	uint_t	mobjs_lib_va_find_delete;
 	uint_t	mobjs_lib_va_add_delay_delete;
 	uint_t	mobjs_lib_va_add_delete;
+	uint_t	mobjs_lib_va_create_failure;
 	uint_t	mobjs_min_align;
 #if defined(__sparc)
 	uint_t	mobjs_aout_uzero_fault;
@@ -251,6 +253,9 @@
 	(LIB_VA_MATCH_ID(arg1, arg2) && LIB_VA_MATCH_TIME(arg1, arg2))
 
 /*
+ * lib_va will be used for optimized allocation of address ranges for
+ * libraries, such that subsequent mappings of the same library will attempt
+ * to use the same VA as previous mappings of that library.
  * In order to map libraries at the same VA in many processes, we need to carve
  * out our own address space for them which is unique across many processes.
  * We use different arenas for 32 bit and 64 bit libraries.
@@ -263,6 +268,8 @@
 static vmem_t *lib_va_64_arena;
 uint_t lib_threshold = 20;	/* modifiable via /etc/system */
 
+static kmutex_t lib_va_init_mutex;	/* no need to initialize */
+
 /*
  * Number of 32 bit and 64 bit libraries in lib_va hash.
  */
@@ -270,66 +277,6 @@
 static uint_t libs_mapped_64 = 0;
 
 /*
- * Initialize the VA span of the lib_va arenas to about half of the VA space
- * of a user process.  These VAs will be used for optimized allocation of
- * libraries, such that subsequent mappings of the same library will attempt
- * to use the same VA as previous mappings of that library.
- */
-void
-lib_va_init(void)
-{
-	size_t start;
-	size_t end;
-	size_t len;
-	/*
-	 * On 32 bit sparc, the user stack and /lib/ld.so.1 will both live
-	 * above the end address that we choose.  On 32bit x86 only
-	 * /lib/ld.so.1 will live above the end address that we choose
-	 * because the user stack is at the bottom of the address space.
-	 *
-	 * We estimate the size of ld.so.1 to be 512K which leaves significant
-	 * room for growth without needing to change this value. Thus it is
-	 * safe for libraries to be mapped up to that address.
-	 *
-	 * If the length of ld.so.1 were to grow beyond 512K then
-	 * a library who has a reserved address in that range would always
-	 * fail to get that address and would have to call map_addr
-	 * to get an unused address range.  On DEBUG kernels, we will check
-	 * on the first use of lib_va that our address does not overlap
-	 * ld.so.1, and if it does, then we'll print a cmn_err message.
-	 */
-#if defined(__sparc)
-	end = _userlimit32 - DFLSSIZ - (512 * 1024);
-#elif defined(__i386) || defined(__amd64)
-	end = _userlimit32 - (512 * 1024);
-#else
-#error	"no recognized machine type is defined"
-#endif
-	len = end >> 1;
-	len = P2ROUNDUP(len, PAGESIZE);
-	start = end - len;
-	lib_va_32_arena = vmem_create("lib_va_32", (void *)start, len,
-	    PAGESIZE, NULL, NULL, NULL, 0, VM_NOSLEEP | VMC_IDENTIFIER);
-
-#if defined(_LP64)
-	/*
-	 * The user stack and /lib/ld.so.1 will both live above the end address
-	 * that we choose.  We estimate the size of a mapped ld.so.1 to be 2M
-	 * which leaves significant room for growth without needing to change
-	 * this value. Thus it is safe for libraries to be mapped up to
-	 * that address.  The same considerations for the size of ld.so.1 that
-	 * were mentioned above also apply here.
-	 */
-	end = _userlimit - DFLSSIZ - (2 * 1024 * 1024);
-	len = end >> 1;
-	len = P2ROUNDUP(len, PAGESIZE);
-	start = end - len;
-	lib_va_64_arena = vmem_create("lib_va_64", (void *)start, len,
-	    PAGESIZE, NULL, NULL, NULL, 0, VM_NOSLEEP | VMC_IDENTIFIER);
-#endif
-}
-
-/*
  * Free up the resources associated with lvp as well as lvp itself.
  * We also decrement the number of libraries mapped via a lib_va
  * cached virtual address.
@@ -761,6 +708,9 @@
 	uint_t ma_flags = _MAP_LOW32;
 	caddr_t base = NULL;
 	vmem_t *model_vmem;
+	size_t lib_va_start;
+	size_t lib_va_end;
+	size_t lib_va_len;
 
 	ASSERT(lvpp != NULL);
 
@@ -779,33 +729,90 @@
 		ma_flags |= MAP_ALIGN;
 	}
 	if (use_lib_va) {
+		/*
+		 * The first time through, we need to setup the lib_va arenas.
+		 * We call map_addr to find a suitable range of memory to map
+		 * the given library, and we will set the highest address
+		 * in our vmem arena to the end of this adddress range.
+		 * We allow up to half of the address space to be used
+		 * for lib_va addresses but we do not prevent any allocations
+		 * in this range from other allocation paths.
+		 */
+		if (lib_va_64_arena == NULL && model == DATAMODEL_LP64) {
+			mutex_enter(&lib_va_init_mutex);
+			if (lib_va_64_arena == NULL) {
+				base = (caddr_t)align;
+				as_rangelock(as);
+				map_addr(&base, len, 0, 1, ma_flags);
+				as_rangeunlock(as);
+				if (base == NULL) {
+					mutex_exit(&lib_va_init_mutex);
+					MOBJ_STAT_ADD(lib_va_create_failure);
+					goto nolibva;
+				}
+				lib_va_end = (size_t)base + len;
+				lib_va_len = lib_va_end >> 1;
+				lib_va_len = P2ROUNDUP(lib_va_len, PAGESIZE);
+				lib_va_start = lib_va_end - lib_va_len;
+
+				/*
+			 	 * Need to make sure we avoid the address hole.
+				 * We know lib_va_end is valid but we need to
+				 * make sure lib_va_start is as well.
+				 */
+				if ((lib_va_end > (size_t)hole_end) &&
+				    (lib_va_start < (size_t)hole_end)) {
+					lib_va_start = P2ROUNDUP(
+					   (size_t)hole_end, PAGESIZE);
+					lib_va_len = lib_va_end - lib_va_start;
+				}
+				lib_va_64_arena = vmem_create("lib_va_64",
+				    (void *)lib_va_start, lib_va_len, PAGESIZE,
+				    NULL, NULL, NULL, 0,
+				    VM_NOSLEEP | VMC_IDENTIFIER);
+				if (lib_va_64_arena == NULL) {
+					mutex_exit(&lib_va_init_mutex);
+					goto nolibva;
+				}
+			}
+			model_vmem = lib_va_64_arena;
+			mutex_exit(&lib_va_init_mutex);
+		} else if (lib_va_32_arena == NULL &&
+		    model == DATAMODEL_ILP32) {
+			mutex_enter(&lib_va_init_mutex);
+			if (lib_va_32_arena == NULL) {
+				base = (caddr_t)align;
+				as_rangelock(as);
+				map_addr(&base, len, 0, 1, ma_flags);
+				as_rangeunlock(as);
+				if (base == NULL) {
+					mutex_exit(&lib_va_init_mutex);
+					MOBJ_STAT_ADD(lib_va_create_failure);
+					goto nolibva;
+				}
+				lib_va_end = (size_t)base + len;
+				lib_va_len = lib_va_end >> 1;
+				lib_va_len = P2ROUNDUP(lib_va_len, PAGESIZE);
+				lib_va_start = lib_va_end - lib_va_len;
+				lib_va_32_arena = vmem_create("lib_va_32",
+				    (void *)lib_va_start, lib_va_len, PAGESIZE,
+				    NULL, NULL, NULL, 0,
+				    VM_NOSLEEP | VMC_IDENTIFIER);
+				if (lib_va_32_arena == NULL) {
+					mutex_exit(&lib_va_init_mutex);
+					goto nolibva;
+				}
+			}
+			model_vmem = lib_va_32_arena;
+			mutex_exit(&lib_va_init_mutex);
+		}
+
 		if (model == DATAMODEL_LP64 || libs_mapped_32 < lib_threshold) {
 			base = vmem_xalloc(model_vmem, len, align, 0, 0, NULL,
 			    NULL, VM_NOSLEEP | VM_ENDALLOC);
 			MOBJ_STAT_ADD(alloc_vmem);
 		}
-#ifdef DEBUG
-		/*
-		 * Check to see if we've run into ld.so.1.
-		 * If this is the first library we've mapped and we can not
-		 * use our reserved address space, then it's likely that
-		 * ld.so.1 is occupying some of this space and the
-		 * model_vmem arena bounds need to be changed.  If we've run
-		 * into something else besides ld.so.1 we'll see this message
-		 * on the first use of mmapobj and should ignore the message.
-		 */
-		if (base != NULL && libs_mapped_32 == 0 &&
-		    model == DATAMODEL_ILP32 &&
-		    as_gap(as, len, &base, &len, 0, NULL)) {
-			cmn_err(CE_NOTE,
-			    "lib_va_32_arena may not be optimized");
-		} else if (base != NULL && libs_mapped_64 == 0 &&
-		    model == DATAMODEL_LP64 &&
-		    as_gap(as, len, &base, &len, 0, NULL)) {
-			cmn_err(CE_NOTE,
-			    "lib_va_64_arena may not be optimized");
-		}
-#endif
+
 		/*
 		 * Even if the address fails to fit in our address space,
 		 * or we can't use a reserved address,
@@ -827,6 +834,7 @@
 		}
 	}
 
+nolibva:
 	as_rangelock(as);
 
 	/*
--- a/usr/src/uts/common/sys/mmapobj.h	Mon Feb 02 20:48:11 2009 -0800
+++ b/usr/src/uts/common/sys/mmapobj.h	Mon Feb 02 21:49:19 2009 -0800
@@ -38,7 +38,6 @@
 #define	LIBVA_CACHED_SEGS 3
 
 #ifdef _KERNEL
-extern void lib_va_init(void);
 extern void mmapobj_unmap(mmapobj_result_t *, int, int, ushort_t);
 #endif