changeset 21354:f03bf77d742d

IJ: OS-7753 THREAD_KPRI_RELEASE does nothing of the sort commit aa451f1e54 Conflicts: usr/src/uts/common/fs/lxproc/lxpr_subr.c
author Patrick Mooney <pmooney@pfmooney.com>
date Wed, 24 Apr 2019 14:45:41 +0000
parents a351bcf0178b
children a71032f0620a
files usr/src/uts/common/brand/lx/os/lx_pid.c usr/src/uts/common/brand/lx/procfs/lx_prsubr.c usr/src/uts/common/disp/fss.c usr/src/uts/common/disp/sysdc.c usr/src/uts/common/disp/ts.c usr/src/uts/common/fs/proc/prsubr.c usr/src/uts/common/fs/ufs/ufs_directio.c usr/src/uts/common/os/bio.c usr/src/uts/common/os/condvar.c usr/src/uts/common/os/pid.c usr/src/uts/common/os/rwlock.c usr/src/uts/common/sys/fss.h usr/src/uts/common/sys/ia.h usr/src/uts/common/sys/thread.h usr/src/uts/common/sys/ts.h usr/src/uts/common/vm/page_lock.c usr/src/uts/i86pc/ml/offsets.in usr/src/uts/intel/ia32/ml/lock_prim.s usr/src/uts/intel/ia32/os/syscall.c usr/src/uts/sfmmu/vm/hat_sfmmu.c usr/src/uts/sparc/os/syscall.c usr/src/uts/sparc/v9/ml/lock_prim.s usr/src/uts/sun4/ml/offsets.in
diffstat 23 files changed, 359 insertions(+), 687 deletions(-) [+]
line wrap: on
line diff
--- a/usr/src/uts/common/brand/lx/os/lx_pid.c	Thu May 16 19:23:22 2019 +0000
+++ b/usr/src/uts/common/brand/lx/os/lx_pid.c	Wed Apr 24 14:45:41 2019 +0000
@@ -22,7 +22,7 @@
 /*
  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
  * Use is subject to license terms.
- * Copyright (c) 2017, Joyent, Inc.
+ * Copyright 2019 Joyent, Inc.
  */
 
 #include <sys/types.h>
@@ -366,7 +366,6 @@
 			goto retry;
 		} else {
 			p->p_proc_flag |= P_PR_LOCK;
-			THREAD_KPRI_REQUEST();
 		}
 	}
 
--- a/usr/src/uts/common/brand/lx/procfs/lx_prsubr.c	Thu May 16 19:23:22 2019 +0000
+++ b/usr/src/uts/common/brand/lx/procfs/lx_prsubr.c	Wed Apr 24 14:45:41 2019 +0000
@@ -21,7 +21,7 @@
 /*
  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
  * Use is subject to license terms.
- * Copyright 2017 Joyent, Inc.
+ * Copyright 2019 Joyent, Inc.
  */
 
 /*
@@ -350,7 +350,6 @@
 	cv_signal(&pr_pid_cv[p->p_slot]);
 	p->p_proc_flag &= ~P_PR_LOCK;
 	mutex_exit(&p->p_lock);
-	THREAD_KPRI_RELEASE();
 }
 
 void
--- a/usr/src/uts/common/disp/fss.c	Thu May 16 19:23:22 2019 +0000
+++ b/usr/src/uts/common/disp/fss.c	Wed Apr 24 14:45:41 2019 +0000
@@ -21,7 +21,7 @@
 
 /*
  * Copyright (c) 1994, 2010, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2013, Joyent, Inc. All rights reserved.
+ * Copyright 2019 Joyent, Inc.
  */
 
 #include <sys/types.h>
@@ -1212,9 +1212,9 @@
 				 * If there is only one zone active on the pset
 				 * the above reduces to:
 				 *
-				 * 			zone_int_shares^2
+				 *			zone_int_shares^2
 				 * shusage = usage * ---------------------
-				 * 			kpj_shares^2
+				 *			kpj_shares^2
 				 *
 				 * If there's only one project active in the
 				 * zone this formula reduces to:
@@ -1373,8 +1373,6 @@
 		 */
 		if (t->t_cid != fss_cid)
 			goto next;
-		if ((fssproc->fss_flags & FSSKPRI) != 0)
-			goto next;
 
 		fssproj = FSSPROC2FSSPROJ(fssproc);
 		if (fssproj == NULL)
@@ -1889,7 +1887,7 @@
 	cpucaps_sc_init(&cfssproc->fss_caps);
 
 	cfssproc->fss_flags =
-	    pfssproc->fss_flags & ~(FSSKPRI | FSSBACKQ | FSSRESTORE);
+	    pfssproc->fss_flags & ~(FSSBACKQ | FSSRESTORE);
 	ct->t_cldata = (void *)cfssproc;
 	ct->t_schedflag |= TS_RUNQMATCH;
 	thread_unlock(pt);
@@ -1940,7 +1938,6 @@
 	fssproc->fss_timeleft = fss_quantum;
 	t->t_pri = fssproc->fss_umdpri;
 	ASSERT(t->t_pri >= 0 && t->t_pri <= fss_maxglobpri);
-	fssproc->fss_flags &= ~FSSKPRI;
 	THREAD_TRANSITION(t);
 
 	/*
@@ -2039,11 +2036,6 @@
 	fssproc->fss_nice = nice;
 	fss_newpri(fssproc, B_FALSE);
 
-	if ((fssproc->fss_flags & FSSKPRI) != 0) {
-		thread_unlock(t);
-		return (0);
-	}
-
 	fss_change_priority(t, fssproc);
 	thread_unlock(t);
 	return (0);
@@ -2158,7 +2150,7 @@
 		time_t swapout_time;
 
 		swapout_time = (ddi_get_lbolt() - t->t_stime) / hz;
-		if (INHERITED(t) || (fssproc->fss_flags & FSSKPRI)) {
+		if (INHERITED(t)) {
 			epri = (long)DISP_PRIO(t) + swapout_time;
 		} else {
 			/*
@@ -2190,7 +2182,6 @@
 static pri_t
 fss_swapout(kthread_t *t, int flags)
 {
-	fssproc_t *fssproc = FSSPROC(t);
 	long epri = -1;
 	proc_t *pp = ttoproc(t);
 	time_t swapin_time;
@@ -2198,7 +2189,6 @@
 	ASSERT(THREAD_LOCK_HELD(t));
 
 	if (INHERITED(t) ||
-	    (fssproc->fss_flags & FSSKPRI) ||
 	    (t->t_proc_flag & TP_LWPEXIT) ||
 	    (t->t_state & (TS_ZOMB|TS_FREE|TS_STOPPED|TS_ONPROC|TS_WAIT)) ||
 	    !(t->t_schedflag & TS_LOAD) ||
@@ -2241,16 +2231,11 @@
 }
 
 /*
- * If thread is currently at a kernel mode priority (has slept) and is
- * returning to the userland we assign it the appropriate user mode priority
- * and time quantum here.  If we're lowering the thread's priority below that
- * of other runnable threads then we will set runrun via cpu_surrender() to
- * cause preemption.
+ * Run swap-out checks when returning to userspace.
  */
 static void
 fss_trapret(kthread_t *t)
 {
-	fssproc_t *fssproc = FSSPROC(t);
 	cpu_t *cp = CPU;
 
 	ASSERT(THREAD_LOCK_HELD(t));
@@ -2258,20 +2243,6 @@
 	ASSERT(cp->cpu_dispthread == t);
 	ASSERT(t->t_state == TS_ONPROC);
 
-	t->t_kpri_req = 0;
-	if (fssproc->fss_flags & FSSKPRI) {
-		/*
-		 * If thread has blocked in the kernel
-		 */
-		THREAD_CHANGE_PRI(t, fssproc->fss_umdpri);
-		cp->cpu_dispatch_pri = DISP_PRIO(t);
-		ASSERT(t->t_pri >= 0 && t->t_pri <= fss_maxglobpri);
-		fssproc->fss_flags &= ~FSSKPRI;
-
-		if (DISP_MUST_SURRENDER(t))
-			cpu_surrender(t);
-	}
-
 	/*
 	 * Swapout lwp if the swapper is waiting for this thread to reach
 	 * a safe point.
@@ -2299,19 +2270,6 @@
 	ASSERT(t->t_state == TS_ONPROC);
 
 	/*
-	 * If preempted in the kernel, make sure the thread has a kernel
-	 * priority if needed.
-	 */
-	lwp = curthread->t_lwp;
-	if (!(fssproc->fss_flags & FSSKPRI) && lwp != NULL && t->t_kpri_req) {
-		fssproc->fss_flags |= FSSKPRI;
-		THREAD_CHANGE_PRI(t, minclsyspri);
-		ASSERT(t->t_pri >= 0 && t->t_pri <= fss_maxglobpri);
-		t->t_trapret = 1;	/* so that fss_trapret will run */
-		aston(t);
-	}
-
-	/*
 	 * This thread may be placed on wait queue by CPU Caps. In this case we
 	 * do not need to do anything until it is removed from the wait queue.
 	 * Do not enforce CPU caps on threads running at a kernel priority
@@ -2320,7 +2278,7 @@
 		(void) cpucaps_charge(t, &fssproc->fss_caps,
 		    CPUCAPS_CHARGE_ENFORCE);
 
-		if (!(fssproc->fss_flags & FSSKPRI) && CPUCAPS_ENFORCE(t))
+		if (CPUCAPS_ENFORCE(t))
 			return;
 	}
 
@@ -2329,6 +2287,7 @@
 	 * cannot be holding any kernel locks.
 	 */
 	ASSERT(t->t_schedflag & TS_DONT_SWAP);
+	lwp = ttolwp(t);
 	if (lwp != NULL && lwp->lwp_state == LWP_USER)
 		t->t_schedflag &= ~TS_DONT_SWAP;
 
@@ -2346,18 +2305,16 @@
 	if (t->t_schedctl && schedctl_get_nopreempt(t)) {
 		if (fssproc->fss_timeleft > -SC_MAX_TICKS) {
 			DTRACE_SCHED1(schedctl__nopreempt, kthread_t *, t);
-			if (!(fssproc->fss_flags & FSSKPRI)) {
-				/*
-				 * If not already remembered, remember current
-				 * priority for restoration in fss_yield().
-				 */
-				if (!(fssproc->fss_flags & FSSRESTORE)) {
-					fssproc->fss_scpri = t->t_pri;
-					fssproc->fss_flags |= FSSRESTORE;
-				}
-				THREAD_CHANGE_PRI(t, fss_maxumdpri);
-				t->t_schedflag |= TS_DONT_SWAP;
+			/*
+			 * If not already remembered, remember current
+			 * priority for restoration in fss_yield().
+			 */
+			if (!(fssproc->fss_flags & FSSRESTORE)) {
+				fssproc->fss_scpri = t->t_pri;
+				fssproc->fss_flags |= FSSRESTORE;
 			}
+			THREAD_CHANGE_PRI(t, fss_maxumdpri);
+			t->t_schedflag |= TS_DONT_SWAP;
 			schedctl_set_yield(t, 1);
 			setfrontdq(t);
 			return;
@@ -2374,15 +2331,12 @@
 		}
 	}
 
-	flags = fssproc->fss_flags & (FSSBACKQ | FSSKPRI);
+	flags = fssproc->fss_flags & FSSBACKQ;
 
 	if (flags == FSSBACKQ) {
 		fssproc->fss_timeleft = fss_quantum;
 		fssproc->fss_flags &= ~FSSBACKQ;
 		setbackdq(t);
-	} else if (flags == (FSSBACKQ | FSSKPRI)) {
-		fssproc->fss_flags &= ~FSSBACKQ;
-		setbackdq(t);
 	} else {
 		setfrontdq(t);
 	}
@@ -2404,12 +2358,7 @@
 	fssproc->fss_timeleft = fss_quantum;
 
 	fssproc->fss_flags &= ~FSSBACKQ;
-	/*
-	 * If previously were running at the kernel priority then keep that
-	 * priority and the fss_timeleft doesn't matter.
-	 */
-	if ((fssproc->fss_flags & FSSKPRI) == 0)
-		THREAD_CHANGE_PRI(t, fssproc->fss_umdpri);
+	THREAD_CHANGE_PRI(t, fssproc->fss_umdpri);
 
 	if (t->t_disp_time != ddi_get_lbolt())
 		setbackdq(t);
@@ -2418,8 +2367,7 @@
 }
 
 /*
- * Prepare thread for sleep. We reset the thread priority so it will run at the
- * kernel priority level when it wakes up.
+ * Prepare thread for sleep.
  */
 static void
 fss_sleep(kthread_t *t)
@@ -2437,31 +2385,6 @@
 	(void) CPUCAPS_CHARGE(t, &fssproc->fss_caps, CPUCAPS_CHARGE_ENFORCE);
 
 	fss_inactive(t);
-
-	/*
-	 * Assign a system priority to the thread and arrange for it to be
-	 * retained when the thread is next placed on the run queue (i.e.,
-	 * when it wakes up) instead of being given a new pri.  Also arrange
-	 * for trapret processing as the thread leaves the system call so it
-	 * will drop back to normal priority range.
-	 */
-	if (t->t_kpri_req) {
-		THREAD_CHANGE_PRI(t, minclsyspri);
-		fssproc->fss_flags |= FSSKPRI;
-		t->t_trapret = 1;	/* so that fss_trapret will run */
-		aston(t);
-	} else if (fssproc->fss_flags & FSSKPRI) {
-		/*
-		 * The thread has done a THREAD_KPRI_REQUEST(), slept, then
-		 * done THREAD_KPRI_RELEASE() (so no t_kpri_req is 0 again),
-		 * then slept again all without finishing the current system
-		 * call so trapret won't have cleared FSSKPRI
-		 */
-		fssproc->fss_flags &= ~FSSKPRI;
-		THREAD_CHANGE_PRI(t, fssproc->fss_umdpri);
-		if (DISP_MUST_SURRENDER(curthread))
-			cpu_surrender(t);
-	}
 	t->t_stime = ddi_get_lbolt();	/* time stamp for the swapper */
 }
 
@@ -2503,67 +2426,56 @@
 	 * Do not surrender CPU if running in the SYS class.
 	 */
 	if (CPUCAPS_ON()) {
-		cpucaps_enforce = cpucaps_charge(t,
-		    &fssproc->fss_caps, CPUCAPS_CHARGE_ENFORCE) &&
-		    !(fssproc->fss_flags & FSSKPRI);
+		cpucaps_enforce = cpucaps_charge(t, &fssproc->fss_caps,
+		    CPUCAPS_CHARGE_ENFORCE);
 	}
 
-	/*
-	 * A thread's execution time for threads running in the SYS class
-	 * is not tracked.
-	 */
-	if ((fssproc->fss_flags & FSSKPRI) == 0) {
-		/*
-		 * If thread is not in kernel mode, decrement its fss_timeleft
-		 */
-		if (--fssproc->fss_timeleft <= 0) {
-			pri_t new_pri;
+	if (--fssproc->fss_timeleft <= 0) {
+		pri_t new_pri;
 
-			/*
-			 * If we're doing preemption control and trying to
-			 * avoid preempting this thread, just note that the
-			 * thread should yield soon and let it keep running
-			 * (unless it's been a while).
-			 */
-			if (t->t_schedctl && schedctl_get_nopreempt(t)) {
-				if (fssproc->fss_timeleft > -SC_MAX_TICKS) {
-					DTRACE_SCHED1(schedctl__nopreempt,
-					    kthread_t *, t);
-					schedctl_set_yield(t, 1);
-					thread_unlock_nopreempt(t);
-					return;
-				}
+		/*
+		 * If we're doing preemption control and trying to avoid
+		 * preempting this thread, just note that the thread should
+		 * yield soon and let it keep running (unless it's been a
+		 * while).
+		 */
+		if (t->t_schedctl && schedctl_get_nopreempt(t)) {
+			if (fssproc->fss_timeleft > -SC_MAX_TICKS) {
+				DTRACE_SCHED1(schedctl__nopreempt,
+				    kthread_t *, t);
+				schedctl_set_yield(t, 1);
+				thread_unlock_nopreempt(t);
+				return;
 			}
-			fssproc->fss_flags &= ~FSSRESTORE;
+		}
+		fssproc->fss_flags &= ~FSSRESTORE;
 
-			fss_newpri(fssproc, B_TRUE);
-			new_pri = fssproc->fss_umdpri;
-			ASSERT(new_pri >= 0 && new_pri <= fss_maxglobpri);
+		fss_newpri(fssproc, B_TRUE);
+		new_pri = fssproc->fss_umdpri;
+		ASSERT(new_pri >= 0 && new_pri <= fss_maxglobpri);
 
-			/*
-			 * When the priority of a thread is changed, it may
-			 * be necessary to adjust its position on a sleep queue
-			 * or dispatch queue. The function thread_change_pri
-			 * accomplishes this.
-			 */
-			if (thread_change_pri(t, new_pri, 0)) {
-				if ((t->t_schedflag & TS_LOAD) &&
-				    (lwp = t->t_lwp) &&
-				    lwp->lwp_state == LWP_USER)
-					t->t_schedflag &= ~TS_DONT_SWAP;
-				fssproc->fss_timeleft = fss_quantum;
-			} else {
-				call_cpu_surrender = B_TRUE;
-			}
-		} else if (t->t_state == TS_ONPROC &&
-		    t->t_pri < t->t_disp_queue->disp_maxrunpri) {
-			/*
-			 * If there is a higher-priority thread which is
-			 * waiting for a processor, then thread surrenders
-			 * the processor.
-			 */
+		/*
+		 * When the priority of a thread is changed, it may be
+		 * necessary to adjust its position on a sleep queue or
+		 * dispatch queue. The function thread_change_pri accomplishes
+		 * this.
+		 */
+		if (thread_change_pri(t, new_pri, 0)) {
+			if ((t->t_schedflag & TS_LOAD) &&
+			    (lwp = t->t_lwp) &&
+			    lwp->lwp_state == LWP_USER)
+				t->t_schedflag &= ~TS_DONT_SWAP;
+			fssproc->fss_timeleft = fss_quantum;
+		} else {
 			call_cpu_surrender = B_TRUE;
 		}
+	} else if (t->t_state == TS_ONPROC &&
+	    t->t_pri < t->t_disp_queue->disp_maxrunpri) {
+		/*
+		 * If there is a higher-priority thread which is waiting for a
+		 * processor, then thread surrenders the processor.
+		 */
+		call_cpu_surrender = B_TRUE;
 	}
 
 	if (cpucaps_enforce && 2 * fssproc->fss_timeleft > fss_quantum) {
@@ -2618,32 +2530,13 @@
 	fssproc = FSSPROC(t);
 	fssproc->fss_flags &= ~FSSBACKQ;
 
-	if (fssproc->fss_flags & FSSKPRI) {
-		/*
-		 * If we already have a kernel priority assigned, then we
-		 * just use it.
-		 */
-		setbackdq(t);
-	} else if (t->t_kpri_req) {
-		/*
-		 * Give thread a priority boost if we were asked.
-		 */
-		fssproc->fss_flags |= FSSKPRI;
-		THREAD_CHANGE_PRI(t, minclsyspri);
+	/* Recalculate the priority. */
+	if (t->t_disp_time == ddi_get_lbolt()) {
+		setfrontdq(t);
+	} else {
+		fssproc->fss_timeleft = fss_quantum;
+		THREAD_CHANGE_PRI(t, fssproc->fss_umdpri);
 		setbackdq(t);
-		t->t_trapret = 1;	/* so that fss_trapret will run */
-		aston(t);
-	} else {
-		/*
-		 * Otherwise, we recalculate the priority.
-		 */
-		if (t->t_disp_time == ddi_get_lbolt()) {
-			setfrontdq(t);
-		} else {
-			fssproc->fss_timeleft = fss_quantum;
-			THREAD_CHANGE_PRI(t, fssproc->fss_umdpri);
-			setbackdq(t);
-		}
 	}
 }
 
--- a/usr/src/uts/common/disp/sysdc.c	Thu May 16 19:23:22 2019 +0000
+++ b/usr/src/uts/common/disp/sysdc.c	Wed Apr 24 14:45:41 2019 +0000
@@ -193,32 +193,6 @@
  *	flag.  This flag currently has no effect, but marks threads which
  *	do bulk processing.
  *
- * - t_kpri_req
- *
- *	The TS and FSS scheduling classes pay attention to t_kpri_req,
- *	which provides a simple form of priority inheritance for
- *	synchronization primitives (such as rwlocks held as READER) which
- *	cannot be traced to a unique thread.  The SDC class does not honor
- *	t_kpri_req, for a few reasons:
- *
- *	1.  t_kpri_req is notoriously inaccurate.  A measure of its
- *	    inaccuracy is that it needs to be cleared every time a thread
- *	    returns to user mode, because it is frequently non-zero at that
- *	    point.  This can happen because "ownership" of synchronization
- *	    primitives that use t_kpri_req can be silently handed off,
- *	    leaving no opportunity to will the t_kpri_req inheritance.
- *
- *	2.  Unlike in TS and FSS, threads in SDC *will* eventually run at
- *	    kernel priority.  This means that even if an SDC thread
- *	    is holding a synchronization primitive and running at low
- *	    priority, its priority will eventually be raised above 60,
- *	    allowing it to drive on and release the resource.
- *
- *	3.  The first consumer of SDC uses the taskq subsystem, which holds
- *	    a reader lock for the duration of the task's execution.  This
- *	    would mean that SDC threads would never drop below kernel
- *	    priority in practice, which defeats one of the purposes of SDC.
- *
  * - Why not FSS?
  *
  *	It might seem that the existing FSS scheduling class could solve
--- a/usr/src/uts/common/disp/ts.c	Thu May 16 19:23:22 2019 +0000
+++ b/usr/src/uts/common/disp/ts.c	Wed Apr 24 14:45:41 2019 +0000
@@ -21,11 +21,11 @@
 
 /*
  * Copyright (c) 1994, 2010, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2013, Joyent, Inc. All rights reserved.
+ * Copyright 2019 Joyent, Inc.
  */
 
 /*	Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T	*/
-/*	  All Rights Reserved  	*/
+/*	  All Rights Reserved	*/
 
 #include <sys/types.h>
 #include <sys/param.h>
@@ -229,7 +229,6 @@
 
 static void	ts_change_priority(kthread_t *, tsproc_t *);
 
-extern pri_t	ts_maxkmdpri;	/* maximum kernel mode ts priority */
 static pri_t	ts_maxglobpri;	/* maximum global priority used by ts class */
 static kmutex_t	ts_dptblock;	/* protects time sharing dispatch table */
 static kmutex_t	ts_list_lock[TS_LISTS];	/* protects tsproc lists */
@@ -541,8 +540,8 @@
  * to specified time-sharing priority.
  */
 static int
-ts_enterclass(kthread_t *t, id_t cid, void *parmsp,
-	cred_t *reqpcredp, void *bufp)
+ts_enterclass(kthread_t *t, id_t cid, void *parmsp, cred_t *reqpcredp,
+    void *bufp)
 {
 	tsparms_t	*tsparmsp = (tsparms_t *)parmsp;
 	tsproc_t	*tspp;
@@ -703,7 +702,7 @@
 	TS_NEWUMDPRI(ctspp);
 	ctspp->ts_nice = ptspp->ts_nice;
 	ctspp->ts_dispwait = 0;
-	ctspp->ts_flags = ptspp->ts_flags & ~(TSKPRI | TSBACKQ | TSRESTORE);
+	ctspp->ts_flags = ptspp->ts_flags & ~(TSBACKQ | TSRESTORE);
 	ctspp->ts_tp = ct;
 	cpucaps_sc_init(&ctspp->ts_caps);
 	thread_unlock(t);
@@ -754,7 +753,6 @@
 	tspp->ts_dispwait = 0;
 	t->t_pri = ts_dptbl[tspp->ts_umdpri].ts_globpri;
 	ASSERT(t->t_pri >= 0 && t->t_pri <= ts_maxglobpri);
-	tspp->ts_flags &= ~TSKPRI;
 	THREAD_TRANSITION(t);
 	ts_setrun(t);
 	thread_unlock(t);
@@ -1217,11 +1215,6 @@
 	TS_NEWUMDPRI(tspp);
 	tspp->ts_nice = nice;
 
-	if ((tspp->ts_flags & TSKPRI) != 0) {
-		thread_unlock(tx);
-		return (0);
-	}
-
 	tspp->ts_dispwait = 0;
 	ts_change_priority(tx, tspp);
 	thread_unlock(tx);
@@ -1237,7 +1230,7 @@
 	proc_t		*p;
 	pid_t		pid, pgid, sid;
 	pid_t		on, off;
-	struct stdata 	*stp;
+	struct stdata	*stp;
 	int		sess_held;
 
 	/*
@@ -1373,33 +1366,20 @@
 ts_preempt(kthread_t *t)
 {
 	tsproc_t	*tspp = (tsproc_t *)(t->t_cldata);
-	klwp_t		*lwp = curthread->t_lwp;
+	klwp_t		*lwp = ttolwp(t);
 	pri_t		oldpri = t->t_pri;
 
 	ASSERT(t == curthread);
 	ASSERT(THREAD_LOCK_HELD(curthread));
 
 	/*
-	 * If preempted in the kernel, make sure the thread has
-	 * a kernel priority if needed.
-	 */
-	if (!(tspp->ts_flags & TSKPRI) && lwp != NULL && t->t_kpri_req) {
-		tspp->ts_flags |= TSKPRI;
-		THREAD_CHANGE_PRI(t, ts_kmdpris[0]);
-		ASSERT(t->t_pri >= 0 && t->t_pri <= ts_maxglobpri);
-		t->t_trapret = 1;		/* so ts_trapret will run */
-		aston(t);
-	}
-
-	/*
 	 * This thread may be placed on wait queue by CPU Caps. In this case we
 	 * do not need to do anything until it is removed from the wait queue.
-	 * Do not enforce CPU caps on threads running at a kernel priority
 	 */
 	if (CPUCAPS_ON()) {
 		(void) cpucaps_charge(t, &tspp->ts_caps,
 		    CPUCAPS_CHARGE_ENFORCE);
-		if (!(tspp->ts_flags & TSKPRI) && CPUCAPS_ENFORCE(t))
+		if (CPUCAPS_ENFORCE(t))
 			return;
 	}
 
@@ -1425,18 +1405,16 @@
 	if (t->t_schedctl && schedctl_get_nopreempt(t)) {
 		if (tspp->ts_timeleft > -SC_MAX_TICKS) {
 			DTRACE_SCHED1(schedctl__nopreempt, kthread_t *, t);
-			if (!(tspp->ts_flags & TSKPRI)) {
-				/*
-				 * If not already remembered, remember current
-				 * priority for restoration in ts_yield().
-				 */
-				if (!(tspp->ts_flags & TSRESTORE)) {
-					tspp->ts_scpri = t->t_pri;
-					tspp->ts_flags |= TSRESTORE;
-				}
-				THREAD_CHANGE_PRI(t, ts_maxumdpri);
-				t->t_schedflag |= TS_DONT_SWAP;
+			/*
+			 * If not already remembered, remember current
+			 * priority for restoration in ts_yield().
+			 */
+			if (!(tspp->ts_flags & TSRESTORE)) {
+				tspp->ts_scpri = t->t_pri;
+				tspp->ts_flags |= TSRESTORE;
 			}
+			THREAD_CHANGE_PRI(t, ts_maxumdpri);
+			t->t_schedflag |= TS_DONT_SWAP;
 			schedctl_set_yield(t, 1);
 			setfrontdq(t);
 			goto done;
@@ -1456,14 +1434,11 @@
 		}
 	}
 
-	if ((tspp->ts_flags & (TSBACKQ|TSKPRI)) == TSBACKQ) {
+	if ((tspp->ts_flags & TSBACKQ) != 0) {
 		tspp->ts_timeleft = ts_dptbl[tspp->ts_cpupri].ts_quantum;
 		tspp->ts_dispwait = 0;
 		tspp->ts_flags &= ~TSBACKQ;
 		setbackdq(t);
-	} else if ((tspp->ts_flags & (TSBACKQ|TSKPRI)) == (TSBACKQ|TSKPRI)) {
-		tspp->ts_flags &= ~TSBACKQ;
-		setbackdq(t);
 	} else {
 		setfrontdq(t);
 	}
@@ -1485,11 +1460,8 @@
 		TS_NEWUMDPRI(tspp);
 		tspp->ts_timeleft = ts_dptbl[tspp->ts_cpupri].ts_quantum;
 		tspp->ts_dispwait = 0;
-		if ((tspp->ts_flags & TSKPRI) == 0) {
-			THREAD_CHANGE_PRI(t,
-			    ts_dptbl[tspp->ts_umdpri].ts_globpri);
-			ASSERT(t->t_pri >= 0 && t->t_pri <= ts_maxglobpri);
-		}
+		THREAD_CHANGE_PRI(t, ts_dptbl[tspp->ts_umdpri].ts_globpri);
+		ASSERT(t->t_pri >= 0 && t->t_pri <= ts_maxglobpri);
 	}
 
 	tspp->ts_flags &= ~TSBACKQ;
@@ -1509,14 +1481,12 @@
 
 
 /*
- * Prepare thread for sleep. We reset the thread priority so it will
- * run at the kernel priority level when it wakes up.
+ * Prepare thread for sleep.
  */
 static void
 ts_sleep(kthread_t *t)
 {
 	tsproc_t	*tspp = (tsproc_t *)(t->t_cldata);
-	int		flags;
 	pri_t		old_pri = t->t_pri;
 
 	ASSERT(t == curthread);
@@ -1527,18 +1497,7 @@
 	 */
 	(void) CPUCAPS_CHARGE(t, &tspp->ts_caps, CPUCAPS_CHARGE_ENFORCE);
 
-	flags = tspp->ts_flags;
-	if (t->t_kpri_req) {
-		tspp->ts_flags = flags | TSKPRI;
-		THREAD_CHANGE_PRI(t, ts_kmdpris[0]);
-		ASSERT(t->t_pri >= 0 && t->t_pri <= ts_maxglobpri);
-		t->t_trapret = 1;		/* so ts_trapret will run */
-		aston(t);
-	} else if (tspp->ts_dispwait > ts_dptbl[tspp->ts_umdpri].ts_maxwait) {
-		/*
-		 * If thread has blocked in the kernel (as opposed to
-		 * being merely preempted), recompute the user mode priority.
-		 */
+	if (tspp->ts_dispwait > ts_dptbl[tspp->ts_umdpri].ts_maxwait) {
 		tspp->ts_cpupri = ts_dptbl[tspp->ts_cpupri].ts_slpret;
 		TS_NEWUMDPRI(tspp);
 		tspp->ts_timeleft = ts_dptbl[tspp->ts_cpupri].ts_quantum;
@@ -1548,16 +1507,6 @@
 		    ts_dptbl[tspp->ts_umdpri].ts_globpri);
 		ASSERT(curthread->t_pri >= 0 &&
 		    curthread->t_pri <= ts_maxglobpri);
-		tspp->ts_flags = flags & ~TSKPRI;
-
-		if (DISP_MUST_SURRENDER(curthread))
-			cpu_surrender(curthread);
-	} else if (flags & TSKPRI) {
-		THREAD_CHANGE_PRI(curthread,
-		    ts_dptbl[tspp->ts_umdpri].ts_globpri);
-		ASSERT(curthread->t_pri >= 0 &&
-		    curthread->t_pri <= ts_maxglobpri);
-		tspp->ts_flags = flags & ~TSKPRI;
 
 		if (DISP_MUST_SURRENDER(curthread))
 			cpu_surrender(curthread);
@@ -1594,9 +1543,9 @@
 		time_t swapout_time;
 
 		swapout_time = (ddi_get_lbolt() - t->t_stime) / hz;
-		if (INHERITED(t) || (tspp->ts_flags & (TSKPRI | TSIASET)))
+		if (INHERITED(t) || (tspp->ts_flags & TSIASET)) {
 			epri = (long)DISP_PRIO(t) + swapout_time;
-		else {
+		} else {
 			/*
 			 * Threads which have been out for a long time,
 			 * have high user mode priority and are associated
@@ -1648,7 +1597,7 @@
 
 	ASSERT(THREAD_LOCK_HELD(t));
 
-	if (INHERITED(t) || (tspp->ts_flags & (TSKPRI | TSIASET)) ||
+	if (INHERITED(t) || (tspp->ts_flags & TSIASET) ||
 	    (t->t_proc_flag & TP_LWPEXIT) ||
 	    (t->t_state & (TS_ZOMB | TS_FREE | TS_STOPPED |
 	    TS_ONPROC | TS_WAIT)) ||
@@ -1717,62 +1666,59 @@
 	 */
 	if (CPUCAPS_ON()) {
 		call_cpu_surrender = cpucaps_charge(t, &tspp->ts_caps,
-		    CPUCAPS_CHARGE_ENFORCE) && !(tspp->ts_flags & TSKPRI);
+		    CPUCAPS_CHARGE_ENFORCE);
 	}
 
-	if ((tspp->ts_flags & TSKPRI) == 0) {
-		if (--tspp->ts_timeleft <= 0) {
-			pri_t	new_pri;
+	if (--tspp->ts_timeleft <= 0) {
+		pri_t	new_pri;
 
-			/*
-			 * If we're doing preemption control and trying to
-			 * avoid preempting this thread, just note that
-			 * the thread should yield soon and let it keep
-			 * running (unless it's been a while).
-			 */
-			if (t->t_schedctl && schedctl_get_nopreempt(t)) {
-				if (tspp->ts_timeleft > -SC_MAX_TICKS) {
-					DTRACE_SCHED1(schedctl__nopreempt,
-					    kthread_t *, t);
-					schedctl_set_yield(t, 1);
-					thread_unlock_nopreempt(t);
-					return;
-				}
-
-				TNF_PROBE_2(schedctl_failsafe,
-				    "schedctl TS ts_tick", /* CSTYLED */,
-				    tnf_pid, pid, ttoproc(t)->p_pid,
-				    tnf_lwpid, lwpid, t->t_tid);
+		/*
+		 * If we're doing preemption control and trying to avoid
+		 * preempting this thread, just note that the thread should
+		 * yield soon and let it keep running (unless it's been a
+		 * while).
+		 */
+		if (t->t_schedctl && schedctl_get_nopreempt(t)) {
+			if (tspp->ts_timeleft > -SC_MAX_TICKS) {
+				DTRACE_SCHED1(schedctl__nopreempt,
+				    kthread_t *, t);
+				schedctl_set_yield(t, 1);
+				thread_unlock_nopreempt(t);
+				return;
 			}
-			tspp->ts_flags &= ~TSRESTORE;
-			tspp->ts_cpupri = ts_dptbl[tspp->ts_cpupri].ts_tqexp;
-			TS_NEWUMDPRI(tspp);
-			tspp->ts_dispwait = 0;
-			new_pri = ts_dptbl[tspp->ts_umdpri].ts_globpri;
-			ASSERT(new_pri >= 0 && new_pri <= ts_maxglobpri);
-			/*
-			 * When the priority of a thread is changed,
-			 * it may be necessary to adjust its position
-			 * on a sleep queue or dispatch queue.
-			 * The function thread_change_pri accomplishes
-			 * this.
-			 */
-			if (thread_change_pri(t, new_pri, 0)) {
-				if ((t->t_schedflag & TS_LOAD) &&
-				    (lwp = t->t_lwp) &&
-				    lwp->lwp_state == LWP_USER)
-					t->t_schedflag &= ~TS_DONT_SWAP;
-				tspp->ts_timeleft =
-				    ts_dptbl[tspp->ts_cpupri].ts_quantum;
-			} else {
-				call_cpu_surrender = B_TRUE;
-			}
-			TRACE_2(TR_FAC_DISP, TR_TICK,
-			    "tick:tid %p old pri %d", t, oldpri);
-		} else if (t->t_state == TS_ONPROC &&
-		    t->t_pri < t->t_disp_queue->disp_maxrunpri) {
+
+			TNF_PROBE_2(schedctl_failsafe,
+			    "schedctl TS ts_tick", /* CSTYLED */,
+			    tnf_pid, pid, ttoproc(t)->p_pid,
+			    tnf_lwpid, lwpid, t->t_tid);
+		}
+		tspp->ts_flags &= ~TSRESTORE;
+		tspp->ts_cpupri = ts_dptbl[tspp->ts_cpupri].ts_tqexp;
+		TS_NEWUMDPRI(tspp);
+		tspp->ts_dispwait = 0;
+		new_pri = ts_dptbl[tspp->ts_umdpri].ts_globpri;
+		ASSERT(new_pri >= 0 && new_pri <= ts_maxglobpri);
+		/*
+		 * When the priority of a thread is changed, it may be
+		 * necessary to adjust its position on a sleep queue or
+		 * dispatch queue.  The function thread_change_pri accomplishes
+		 * this.
+		 */
+		if (thread_change_pri(t, new_pri, 0)) {
+			if ((t->t_schedflag & TS_LOAD) &&
+			    (lwp = t->t_lwp) &&
+			    lwp->lwp_state == LWP_USER)
+				t->t_schedflag &= ~TS_DONT_SWAP;
+			tspp->ts_timeleft =
+			    ts_dptbl[tspp->ts_cpupri].ts_quantum;
+		} else {
 			call_cpu_surrender = B_TRUE;
 		}
+		TRACE_2(TR_FAC_DISP, TR_TICK,
+		    "tick:tid %p old pri %d", t, oldpri);
+	} else if (t->t_state == TS_ONPROC &&
+	    t->t_pri < t->t_disp_queue->disp_maxrunpri) {
+		call_cpu_surrender = B_TRUE;
 	}
 
 	if (call_cpu_surrender) {
@@ -1785,11 +1731,8 @@
 
 
 /*
- * If thread is currently at a kernel mode priority (has slept)
- * we assign it the appropriate user mode priority and time quantum
- * here.  If we are lowering the thread's priority below that of
- * other runnable threads we will normally set runrun via cpu_surrender() to
- * cause preemption.
+ * If we are lowering the thread's priority below that of other runnable
+ * threads we will normally set runrun via cpu_surrender() to cause preemption.
  */
 static void
 ts_trapret(kthread_t *t)
@@ -1803,7 +1746,6 @@
 	ASSERT(cp->cpu_dispthread == t);
 	ASSERT(t->t_state == TS_ONPROC);
 
-	t->t_kpri_req = 0;
 	if (tspp->ts_dispwait > ts_dptbl[tspp->ts_umdpri].ts_maxwait) {
 		tspp->ts_cpupri = ts_dptbl[tspp->ts_cpupri].ts_slpret;
 		TS_NEWUMDPRI(tspp);
@@ -1817,27 +1759,14 @@
 		THREAD_CHANGE_PRI(t, ts_dptbl[tspp->ts_umdpri].ts_globpri);
 		cp->cpu_dispatch_pri = DISP_PRIO(t);
 		ASSERT(t->t_pri >= 0 && t->t_pri <= ts_maxglobpri);
-		tspp->ts_flags &= ~TSKPRI;
-
-		if (DISP_MUST_SURRENDER(t))
-			cpu_surrender(t);
-	} else if (tspp->ts_flags & TSKPRI) {
-		/*
-		 * If thread has blocked in the kernel (as opposed to
-		 * being merely preempted), recompute the user mode priority.
-		 */
-		THREAD_CHANGE_PRI(t, ts_dptbl[tspp->ts_umdpri].ts_globpri);
-		cp->cpu_dispatch_pri = DISP_PRIO(t);
-		ASSERT(t->t_pri >= 0 && t->t_pri <= ts_maxglobpri);
-		tspp->ts_flags &= ~TSKPRI;
 
 		if (DISP_MUST_SURRENDER(t))
 			cpu_surrender(t);
 	}
 
 	/*
-	 * Swapout lwp if the swapper is waiting for this thread to
-	 * reach a safe point.
+	 * Swapout lwp if the swapper is waiting for this thread to reach a
+	 * safe point.
 	 */
 	if ((t->t_schedflag & TS_SWAPENQ) && !(tspp->ts_flags & TSIASET)) {
 		thread_unlock(t);
@@ -1931,8 +1860,6 @@
 		    tx->t_clfuncs != &ia_classfuncs.thread)
 			goto next;
 		tspp->ts_dispwait++;
-		if ((tspp->ts_flags & TSKPRI) != 0)
-			goto next;
 		if (tspp->ts_dispwait <= ts_dptbl[tspp->ts_umdpri].ts_maxwait)
 			goto next;
 		if (tx->t_schedctl && schedctl_get_nopreempt(tx))
@@ -1968,12 +1895,7 @@
 }
 
 /*
- * Processes waking up go to the back of their queue.  We don't
- * need to assign a time quantum here because thread is still
- * at a kernel mode priority and the time slicing is not done
- * for threads running in the kernel after sleeping.  The proper
- * time quantum will be assigned by ts_trapret before the thread
- * returns to user mode.
+ * Processes waking up go to the back of their queue.
  */
 static void
 ts_wakeup(kthread_t *t)
@@ -1984,46 +1906,27 @@
 
 	t->t_stime = ddi_get_lbolt();		/* time stamp for the swapper */
 
-	if (tspp->ts_flags & TSKPRI) {
-		tspp->ts_flags &= ~TSBACKQ;
+	if (tspp->ts_dispwait > ts_dptbl[tspp->ts_umdpri].ts_maxwait) {
+		tspp->ts_cpupri = ts_dptbl[tspp->ts_cpupri].ts_slpret;
+		TS_NEWUMDPRI(tspp);
+		tspp->ts_timeleft = ts_dptbl[tspp->ts_cpupri].ts_quantum;
+		tspp->ts_dispwait = 0;
+		THREAD_CHANGE_PRI(t, ts_dptbl[tspp->ts_umdpri].ts_globpri);
+		ASSERT(t->t_pri >= 0 && t->t_pri <= ts_maxglobpri);
+	}
+
+	tspp->ts_flags &= ~TSBACKQ;
+
+	if (tspp->ts_flags & TSIA) {
 		if (tspp->ts_flags & TSIASET)
 			setfrontdq(t);
 		else
 			setbackdq(t);
-	} else if (t->t_kpri_req) {
-		/*
-		 * Give thread a priority boost if we were asked.
-		 */
-		tspp->ts_flags |= TSKPRI;
-		THREAD_CHANGE_PRI(t, ts_kmdpris[0]);
-		setbackdq(t);
-		t->t_trapret = 1;	/* so that ts_trapret will run */
-		aston(t);
 	} else {
-		if (tspp->ts_dispwait > ts_dptbl[tspp->ts_umdpri].ts_maxwait) {
-			tspp->ts_cpupri = ts_dptbl[tspp->ts_cpupri].ts_slpret;
-			TS_NEWUMDPRI(tspp);
-			tspp->ts_timeleft =
-			    ts_dptbl[tspp->ts_cpupri].ts_quantum;
-			tspp->ts_dispwait = 0;
-			THREAD_CHANGE_PRI(t,
-			    ts_dptbl[tspp->ts_umdpri].ts_globpri);
-			ASSERT(t->t_pri >= 0 && t->t_pri <= ts_maxglobpri);
-		}
-
-		tspp->ts_flags &= ~TSBACKQ;
-
-		if (tspp->ts_flags & TSIA) {
-			if (tspp->ts_flags & TSIASET)
-				setfrontdq(t);
-			else
-				setbackdq(t);
-		} else {
-			if (t->t_disp_time != ddi_get_lbolt())
-				setbackdq(t);
-			else
-				setfrontdq(t);
-		}
+		if (t->t_disp_time != ddi_get_lbolt())
+			setbackdq(t);
+		else
+			setfrontdq(t);
 	}
 }
 
@@ -2179,7 +2082,7 @@
  * and background processes as non-interactive iff the session
  * leader is interactive.  This routine is called from two places:
  *	strioctl:SPGRP when a new process group gets
- * 		control of the tty.
+ *		control of the tty.
  *	ia_parmsset-when the process in question is a session leader.
  * ia_set_process_group assumes that pidlock is held by the caller,
  * either strioctl or priocntlsys.  If the caller is priocntlsys
@@ -2189,7 +2092,7 @@
 static void
 ia_set_process_group(pid_t sid, pid_t bg_pgid, pid_t fg_pgid)
 {
-	proc_t 		*leader, *fg, *bg;
+	proc_t		*leader, *fg, *bg;
 	tsproc_t	*tspp;
 	kthread_t	*tx;
 	int		plocked = 0;
@@ -2291,10 +2194,6 @@
 			tspp->ts_flags |= TSIASET;
 			tspp->ts_boost = ia_boost;
 			TS_NEWUMDPRI(tspp);
-			if ((tspp->ts_flags & TSKPRI) != 0) {
-				thread_unlock(tx);
-				continue;
-			}
 			tspp->ts_dispwait = 0;
 			ts_change_priority(tx, tspp);
 			thread_unlock(tx);
@@ -2344,10 +2243,6 @@
 			tspp->ts_flags &= ~TSIASET;
 			tspp->ts_boost = -ia_boost;
 			TS_NEWUMDPRI(tspp);
-			if ((tspp->ts_flags & TSKPRI) != 0) {
-				thread_unlock(tx);
-				continue;
-			}
 
 			tspp->ts_dispwait = 0;
 			ts_change_priority(tx, tspp);
--- a/usr/src/uts/common/fs/proc/prsubr.c	Thu May 16 19:23:22 2019 +0000
+++ b/usr/src/uts/common/fs/proc/prsubr.c	Wed Apr 24 14:45:41 2019 +0000
@@ -25,7 +25,7 @@
  */
 
 /*	Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T	*/
-/*	  All Rights Reserved  	*/
+/*	  All Rights Reserved	*/
 
 #include <sys/types.h>
 #include <sys/t_lock.h>
@@ -715,7 +715,6 @@
 		mutex_enter(&p->p_lock);
 	}
 	p->p_proc_flag |= P_PR_LOCK;
-	THREAD_KPRI_REQUEST();
 	return (p);
 }
 
@@ -822,7 +821,6 @@
 
 	cv_signal(&pr_pid_cv[p->p_slot]);
 	p->p_proc_flag &= ~P_PR_LOCK;
-	THREAD_KPRI_RELEASE();
 }
 
 void
@@ -2695,7 +2693,7 @@
 #define	PR_COPY_TIMESPEC(s, d, field)				\
 	TIMESPEC_TO_TIMESPEC32(&d->field, &s->field);
 
-#define	PR_COPY_BUF(s, d, field)	 			\
+#define	PR_COPY_BUF(s, d, field)				\
 	bcopy(s->field, d->field, sizeof (d->field));
 
 #define	PR_IGNORE_FIELD(s, d, field)
--- a/usr/src/uts/common/fs/ufs/ufs_directio.c	Thu May 16 19:23:22 2019 +0000
+++ b/usr/src/uts/common/fs/ufs/ufs_directio.c	Wed Apr 24 14:45:41 2019 +0000
@@ -21,6 +21,7 @@
 /*
  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
  * Use is subject to license terms.
+ * Copyright 2019 Joyent, Inc.
  */
 
 /* Copyright (c) 1983, 1984, 1985, 1986, 1987, 1988, 1989 AT&T */
@@ -221,40 +222,22 @@
  * Wait for all of the direct IO operations to finish
  */
 
-uint32_t	ufs_directio_drop_kpri = 0;	/* enable kpri hack */
-
 static int
 directio_wait(struct directio_buf *tail, long *bytes_iop)
 {
 	int	error = 0, newerror;
 	struct directio_buf	*dbp;
-	uint_t	kpri_req_save;
 
 	/*
 	 * The linked list of directio buf structures is maintained
 	 * in reverse order (tail->last request->penultimate request->...)
 	 */
-	/*
-	 * This is the k_pri_req hack. Large numbers of threads
-	 * sleeping with kernel priority will cause scheduler thrashing
-	 * on an MP machine. This can be seen running Oracle using
-	 * directio to ufs files. Sleep at normal priority here to
-	 * more closely mimic physio to a device partition. This
-	 * workaround is disabled by default as a niced thread could
-	 * be starved from running while holding i_rwlock and i_contents.
-	 */
-	if (ufs_directio_drop_kpri) {
-		kpri_req_save = curthread->t_kpri_req;
-		curthread->t_kpri_req = 0;
-	}
 	while ((dbp = tail) != NULL) {
 		tail = dbp->next;
 		newerror = directio_wait_one(dbp, bytes_iop);
 		if (error == 0)
 			error = newerror;
 	}
-	if (ufs_directio_drop_kpri)
-		curthread->t_kpri_req = kpri_req_save;
 	return (error);
 }
 /*
@@ -262,8 +245,8 @@
  */
 static void
 directio_start(struct ufsvfs *ufsvfsp, struct inode *ip, size_t nbytes,
-	offset_t offset, char *addr, enum seg_rw rw, struct proc *procp,
-	struct directio_buf **tailp, page_t **pplist)
+    offset_t offset, char *addr, enum seg_rw rw, struct proc *procp,
+    struct directio_buf **tailp, page_t **pplist)
 {
 	buf_t *bp;
 	struct directio_buf *dbp;
@@ -343,7 +326,7 @@
 
 int
 ufs_directio_write(struct inode *ip, uio_t *arg_uio, int ioflag, int rewrite,
-	cred_t *cr, int *statusp)
+    cred_t *cr, int *statusp)
 {
 	long		resid, bytes_written;
 	u_offset_t	size, uoff;
@@ -414,11 +397,11 @@
 
 	/*
 	 * Synchronous, allocating writes run very slow in Direct-Mode
-	 * 	XXX - can be fixed with bmap_write changes for large writes!!!
+	 *	XXX - can be fixed with bmap_write changes for large writes!!!
 	 *	XXX - can be fixed for updates to "almost-full" files
 	 *	XXX - WARNING - system hangs if bmap_write() has to
-	 * 			allocate lots of pages since pageout
-	 * 			suspends on locked inode
+	 *			allocate lots of pages since pageout
+	 *			suspends on locked inode
 	 */
 	if (!rewrite && (ip->i_flag & ISYNC)) {
 		if ((uoff + resid) > size)
--- a/usr/src/uts/common/os/bio.c	Thu May 16 19:23:22 2019 +0000
+++ b/usr/src/uts/common/os/bio.c	Wed Apr 24 14:45:41 2019 +0000
@@ -21,7 +21,7 @@
 /*
  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
  * Use is subject to license terms.
- * Copyright 2011 Joyent, Inc.  All rights reserved.
+ * Copyright 2019 Joyent, Inc.
  */
 
 /*
@@ -1380,7 +1380,6 @@
 
 	VN_HOLD(vp);
 	bp->b_vp = vp;
-	THREAD_KPRI_RELEASE_N(btopr(len)); /* release kpri from page_locks */
 
 	/*
 	 * Caller sets dev & blkno and can adjust
--- a/usr/src/uts/common/os/condvar.c	Thu May 16 19:23:22 2019 +0000
+++ b/usr/src/uts/common/os/condvar.c	Wed Apr 24 14:45:41 2019 +0000
@@ -26,6 +26,7 @@
 
 /*
  * Copyright (c) 2012 by Delphix. All rights reserved.
+ * Copyright 2019 Joyent, Inc.
  */
 
 #include <sys/thread.h>
@@ -461,7 +462,7 @@
 
 /*
  * Returns:
- * 	Function result in order of precedence:
+ *	Function result in order of precedence:
  *		 0 if a signal was received
  *		-1 if timeout occured
  *		>0 if awakened via cv_signal() or cv_broadcast().
@@ -552,7 +553,6 @@
 	lwp->lwp_asleep = 1;
 	lwp->lwp_sysabort = 0;
 	thread_lock(t);
-	t->t_kpri_req = 0;	/* don't need kernel priority */
 	cv_block_sig(t, (condvar_impl_t *)cvp);
 	/* I can be swapped now */
 	curthread->t_schedflag &= ~TS_DONT_SWAP;
@@ -745,7 +745,7 @@
  * that a timeout occurred until the future time is passed.
  * If 'when' is a NULL pointer, no timeout will occur.
  * Returns:
- * 	Function result in order of precedence:
+ *	Function result in order of precedence:
  *		 0 if a signal was received
  *		-1 if timeout occured
  *	        >0 if awakened via cv_signal() or cv_broadcast()
@@ -763,8 +763,8 @@
  * does not need to deal with the time changing.
  */
 int
-cv_waituntil_sig(kcondvar_t *cvp, kmutex_t *mp,
-	timestruc_t *when, int timecheck)
+cv_waituntil_sig(kcondvar_t *cvp, kmutex_t *mp, timestruc_t *when,
+    int timecheck)
 {
 	timestruc_t now;
 	timestruc_t delta;
--- a/usr/src/uts/common/os/pid.c	Thu May 16 19:23:22 2019 +0000
+++ b/usr/src/uts/common/os/pid.c	Wed Apr 24 14:45:41 2019 +0000
@@ -21,11 +21,11 @@
 
 /*
  * Copyright (c) 1989, 2010, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2015 Joyent, Inc.
+ * Copyright 2019 Joyent, Inc.
  */
 
 /*	Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T	*/
-/*	  All Rights Reserved  	*/
+/*	  All Rights Reserved	*/
 
 #include <sys/types.h>
 #include <sys/param.h>
@@ -430,7 +430,6 @@
 		return (1);
 
 	p->p_proc_flag |= P_PR_LOCK;
-	THREAD_KPRI_REQUEST();
 
 	return (0);
 }
@@ -515,7 +514,6 @@
 	}
 
 	p->p_proc_flag |= P_PR_LOCK;
-	THREAD_KPRI_REQUEST();
 }
 
 void
@@ -532,7 +530,6 @@
 	cv_signal(&pr_pid_cv[p->p_slot]);
 	p->p_proc_flag &= ~P_PR_LOCK;
 	mutex_exit(&p->p_lock);
-	THREAD_KPRI_RELEASE();
 }
 
 /*
@@ -546,7 +543,6 @@
 
 	cv_signal(&pr_pid_cv[p->p_slot]);
 	p->p_proc_flag &= ~P_PR_LOCK;
-	THREAD_KPRI_RELEASE();
 }
 
 void
--- a/usr/src/uts/common/os/rwlock.c	Thu May 16 19:23:22 2019 +0000
+++ b/usr/src/uts/common/os/rwlock.c	Wed Apr 24 14:45:41 2019 +0000
@@ -24,7 +24,7 @@
  */
 
 /*
- * Copyright (c) 2013, Joyent, Inc.  All rights reserved.
+ * Copyright 2019 Joyent, Inc.
  */
 
 #include <sys/param.h>
@@ -269,9 +269,6 @@
 /*
  * Full-service implementation of rw_enter() to handle all the hard cases.
  * Called from the assembly version if anything complicated is going on.
- * The only semantic difference between calling rw_enter() and calling
- * rw_enter_sleep() directly is that we assume the caller has already done
- * a THREAD_KPRI_REQUEST() in the RW_READER cases.
  */
 void
 rw_enter_sleep(rwlock_impl_t *lp, krw_t rw)
@@ -342,15 +339,13 @@
 		}
 
 		/*
-		 * We really are going to block.  Bump the stats, and drop
-		 * kpri if we're a reader.
+		 * We really are going to block, so bump the stats.
 		 */
 		ASSERT(lp->rw_wwwh & lock_wait);
 		ASSERT(lp->rw_wwwh & RW_LOCKED);
 
 		sleep_time = -gethrtime();
 		if (rw != RW_WRITER) {
-			THREAD_KPRI_RELEASE();
 			CPU_STATS_ADDQ(CPU, sys, rw_rdfails, 1);
 			(void) turnstile_block(ts, TS_READER_Q, lp,
 			    &rw_sobj_ops, NULL, NULL);
@@ -366,8 +361,8 @@
 		    old >> RW_HOLD_COUNT_SHIFT);
 
 		/*
-		 * We wake up holding the lock (and having kpri if we're
-		 * a reader) via direct handoff from the previous owner.
+		 * We wake up holding the lock via direct handoff from the
+		 * previous owner.
 		 */
 		break;
 	}
@@ -394,7 +389,6 @@
 	while (next_reader != NULL) {
 		if (DISP_PRIO(next_reader) < wpri)
 			break;
-		next_reader->t_kpri_req++;
 		next_reader = next_reader->t_link;
 		count++;
 	}
@@ -523,7 +517,6 @@
 	}
 
 	if (lock_value == RW_READ_LOCK) {
-		THREAD_KPRI_RELEASE();
 		LOCKSTAT_RECORD(LS_RW_EXIT_RELEASE, lp, RW_READER);
 	} else {
 		LOCKSTAT_RECORD(LS_RW_EXIT_RELEASE, lp, RW_WRITER);
@@ -539,11 +532,9 @@
 	if (rw != RW_WRITER) {
 		uint_t backoff = 0;
 		int loop_count = 0;
-		THREAD_KPRI_REQUEST();
 		for (;;) {
 			if ((old = lp->rw_wwwh) & (rw == RW_READER ?
 			    RW_WRITE_CLAIMED : RW_WRITE_LOCKED)) {
-				THREAD_KPRI_RELEASE();
 				return (0);
 			}
 			if (casip(&lp->rw_wwwh, old, old + RW_READ_LOCK) == old)
@@ -573,7 +564,6 @@
 {
 	rwlock_impl_t *lp = (rwlock_impl_t *)rwlp;
 
-	THREAD_KPRI_REQUEST();
 	membar_exit();
 
 	if ((lp->rw_wwwh & RW_OWNER) != (uintptr_t)curthread) {
@@ -612,7 +602,6 @@
 	} while (casip(&lp->rw_wwwh, old, new) != old);
 
 	membar_enter();
-	THREAD_KPRI_RELEASE();
 	LOCKSTAT_RECORD0(LS_RW_TRYUPGRADE_UPGRADE, lp);
 	ASSERT(rw_locked(lp, RW_WRITER));
 	return (1);
--- a/usr/src/uts/common/sys/fss.h	Thu May 16 19:23:22 2019 +0000
+++ b/usr/src/uts/common/sys/fss.h	Wed Apr 24 14:45:41 2019 +0000
@@ -22,7 +22,7 @@
 /*
  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
  * Use is subject to license terms.
- * Copyright 2012 Joyent, Inc.  All rights reserved.
+ * Copyright 2019 Joyent, Inc.
  */
 
 #ifndef	_SYS_FSS_H
@@ -140,7 +140,7 @@
  * than one cpu partition then it will have a few of these structures.
  */
 typedef struct fsszone {
-	struct zone 	*fssz_zone;	/* ptr to our zone structure	*/
+	struct zone	*fssz_zone;	/* ptr to our zone structure	*/
 	struct fsszone	*fssz_next;	/* next fsszone_t in fsspset_t	*/
 	struct fsszone	*fssz_prev;	/* prev fsszone_t in fsspset_t	*/
 	uint32_t	fssz_shares;	/* sum of all project shares	*/
@@ -160,7 +160,7 @@
 /*
  * fss_flags
  */
-#define	FSSKPRI		0x01	/* the thread is in kernel mode	*/
+/* Formerly: FSSKPRI	0x01 - the thread is in kernel mode */
 #define	FSSBACKQ	0x02	/* thread should be placed at the back of */
 				/* the dispatch queue if preempted */
 #define	FSSRESTORE	0x04	/* thread was not preempted, due to schedctl */
--- a/usr/src/uts/common/sys/ia.h	Thu May 16 19:23:22 2019 +0000
+++ b/usr/src/uts/common/sys/ia.h	Wed Apr 24 14:45:41 2019 +0000
@@ -22,6 +22,7 @@
 /*
  * Copyright (c) 1997-1998 by Sun Microsystems, Inc.
  * All rights reserved.
+ * Copyright 2019 Joyent, Inc.
  */
 
 /*	Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T	*/
@@ -85,7 +86,7 @@
 
 
 /* flags */
-#define	IAKPRI	0x01	/* thread at kernel mode priority */
+/* Formerly: IAKPRI 0x01 - thread at kernel model priority */
 #define	IABACKQ	0x02	/* thread goes to back of disp q when preempted */
 #define	IASLEPT	0x04	/* thread had long-term suspend - give new slice */
 
--- a/usr/src/uts/common/sys/thread.h	Thu May 16 19:23:22 2019 +0000
+++ b/usr/src/uts/common/sys/thread.h	Wed Apr 24 14:45:41 2019 +0000
@@ -201,16 +201,15 @@
 	 * it should be grabbed only by thread_lock().
 	 */
 	disp_lock_t	*t_lockp;	/* pointer to the dispatcher lock */
-	ushort_t 	t_oldspl;	/* spl level before dispatcher locked */
+	ushort_t	t_oldspl;	/* spl level before dispatcher locked */
 	volatile char	t_pre_sys;	/* pre-syscall work needed */
 	lock_t		t_lock_flush;	/* for lock_mutex_flush() impl */
 	struct _disp	*t_disp_queue;	/* run queue for chosen CPU */
 	clock_t		t_disp_time;	/* last time this thread was running */
-	uint_t		t_kpri_req;	/* kernel priority required */
 
 	/*
 	 * Post-syscall / post-trap flags.
-	 * 	No lock is required to set these.
+	 *	No lock is required to set these.
 	 *	These must be cleared only by the thread itself.
 	 *
 	 *	t_astflag indicates that some post-trap processing is required,
@@ -219,7 +218,7 @@
 	 *	t_post_sys indicates that some unusualy post-system call
 	 *		handling is required, such as an error or tracing.
 	 *	t_sig_check indicates that some condition in ISSIG() must be
-	 * 		checked, but doesn't prevent returning to user.
+	 *		checked, but doesn't prevent returning to user.
 	 *	t_post_sys_ast is a way of checking whether any of these three
 	 *		flags are set.
 	 */
@@ -361,7 +360,7 @@
 /*
  * Thread flag (t_flag) definitions.
  *	These flags must be changed only for the current thread,
- * 	and not during preemption code, since the code being
+ *	and not during preemption code, since the code being
  *	preempted could be modifying the flags.
  *
  *	For the most part these flags do not need locking.
@@ -520,10 +519,10 @@
  *	convert a thread pointer to its proc pointer.
  *
  * ttoproj(x)
- * 	convert a thread pointer to its project pointer.
+ *	convert a thread pointer to its project pointer.
  *
  * ttozone(x)
- * 	convert a thread pointer to its zone pointer.
+ *	convert a thread pointer to its zone pointer.
  *
  * lwptot(x)
  *	convert a lwp pointer to its thread pointer.
@@ -617,20 +616,6 @@
 #define	THREAD_NAME_MAX	32	/* includes terminating NUL */
 
 /*
- * Macros to indicate that the thread holds resources that could be critical
- * to other kernel threads, so this thread needs to have kernel priority
- * if it blocks or is preempted.  Note that this is not necessary if the
- * resource is a mutex or a writer lock because of priority inheritance.
- *
- * The only way one thread may legally manipulate another thread's t_kpri_req
- * is to hold the target thread's thread lock while that thread is asleep.
- * (The rwlock code does this to implement direct handoff to waiting readers.)
- */
-#define	THREAD_KPRI_REQUEST()	(curthread->t_kpri_req++)
-#define	THREAD_KPRI_RELEASE()	(curthread->t_kpri_req--)
-#define	THREAD_KPRI_RELEASE_N(n) (curthread->t_kpri_req -= (n))
-
-/*
  * Macro to change a thread's priority.
  */
 #define	THREAD_CHANGE_PRI(t, pri) {					\
@@ -657,12 +642,12 @@
  * Point it at the transition lock, which is always held.
  * The previosly held lock is dropped.
  */
-#define	THREAD_TRANSITION(tp) 	thread_transition(tp);
+#define	THREAD_TRANSITION(tp)	thread_transition(tp);
 /*
  * Set the thread's lock to be the transition lock, without dropping
  * previosly held lock.
  */
-#define	THREAD_TRANSITION_NOLOCK(tp) 	((tp)->t_lockp = &transition_lock)
+#define	THREAD_TRANSITION_NOLOCK(tp)	((tp)->t_lockp = &transition_lock)
 
 /*
  * Put thread in run state, and set the lock pointer to the dispatcher queue
--- a/usr/src/uts/common/sys/ts.h	Thu May 16 19:23:22 2019 +0000
+++ b/usr/src/uts/common/sys/ts.h	Wed Apr 24 14:45:41 2019 +0000
@@ -22,6 +22,7 @@
 /*
  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
  * Use is subject to license terms.
+ * Copyright 2019 Joyent, Inc.
  */
 
 /*	Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T	*/
@@ -78,7 +79,8 @@
 } tsproc_t;
 
 /* flags */
-#define	TSKPRI		0x01	/* thread at kernel mode priority	*/
+
+/* Formerly: TSKPRI	0x01 - thread at kernel mode priority */
 #define	TSBACKQ		0x02	/* thread goes to back of dispq if preempted */
 #define	TSIA		0x04	/* thread is interactive		*/
 #define	TSIASET		0x08	/* interactive thread is "on"		*/
--- a/usr/src/uts/common/vm/page_lock.c	Thu May 16 19:23:22 2019 +0000
+++ b/usr/src/uts/common/vm/page_lock.c	Wed Apr 24 14:45:41 2019 +0000
@@ -20,7 +20,7 @@
  */
 /*
  * Copyright (c) 1991, 2010, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2018 Joyent, Inc.
+ * Copyright 2019 Joyent, Inc.
  */
 
 
@@ -364,7 +364,6 @@
 			retval = 0;
 		} else if ((pp->p_selock & ~SE_EWANTED) == 0) {
 			/* no reader/writer lock held */
-			THREAD_KPRI_REQUEST();
 			/* this clears our setting of the SE_EWANTED bit */
 			pp->p_selock = SE_WRITER;
 			retval = 1;
@@ -551,7 +550,6 @@
 	if (!(old & SE_EWANTED) || (es & SE_EXCL_WANTED)) {
 		if ((old & ~SE_EWANTED) == 0) {
 			/* no reader/writer lock held */
-			THREAD_KPRI_REQUEST();
 			/* this clears out our setting of the SE_EWANTED bit */
 			pp->p_selock = SE_WRITER;
 			mutex_exit(pse);
@@ -590,7 +588,6 @@
 
 	if (se == SE_EXCL) {
 		if (pp->p_selock == 0) {
-			THREAD_KPRI_REQUEST();
 			pp->p_selock = SE_WRITER;
 			mutex_exit(pse);
 			return (1);
@@ -628,7 +625,6 @@
 	} else if ((old & ~SE_EWANTED) == SE_DELETED) {
 		panic("page_unlock_nocapture: page %p is deleted", (void *)pp);
 	} else if (old < 0) {
-		THREAD_KPRI_RELEASE();
 		pp->p_selock &= SE_EWANTED;
 		if (CV_HAS_WAITERS(&pp->p_cv))
 			cv_broadcast(&pp->p_cv);
@@ -662,7 +658,6 @@
 	} else if ((old & ~SE_EWANTED) == SE_DELETED) {
 		panic("page_unlock: page %p is deleted", (void *)pp);
 	} else if (old < 0) {
-		THREAD_KPRI_RELEASE();
 		pp->p_selock &= SE_EWANTED;
 		if (CV_HAS_WAITERS(&pp->p_cv))
 			cv_broadcast(&pp->p_cv);
@@ -682,7 +677,6 @@
 		if ((pp->p_toxic & PR_CAPTURE) &&
 		    !(curthread->t_flag & T_CAPTURING) &&
 		    !PP_RETIRED(pp)) {
-			THREAD_KPRI_REQUEST();
 			pp->p_selock = SE_WRITER;
 			mutex_exit(pse);
 			page_unlock_capture(pp);
@@ -712,7 +706,6 @@
 	if (!(pp->p_selock & SE_EWANTED)) {
 		/* no threads want exclusive access, try upgrade */
 		if (pp->p_selock == SE_READER) {
-			THREAD_KPRI_REQUEST();
 			/* convert to exclusive lock */
 			pp->p_selock = SE_WRITER;
 			mutex_exit(pse);
@@ -738,7 +731,6 @@
 
 	mutex_enter(pse);
 	excl_waiting =  pp->p_selock & SE_EWANTED;
-	THREAD_KPRI_RELEASE();
 	pp->p_selock = SE_READER | excl_waiting;
 	if (CV_HAS_WAITERS(&pp->p_cv))
 		cv_broadcast(&pp->p_cv);
@@ -756,7 +748,6 @@
 	ASSERT(!PP_ISFREE(pp));
 
 	mutex_enter(pse);
-	THREAD_KPRI_RELEASE();
 	pp->p_selock = SE_DELETED;
 	if (CV_HAS_WAITERS(&pp->p_cv))
 		cv_broadcast(&pp->p_cv);
--- a/usr/src/uts/i86pc/ml/offsets.in	Thu May 16 19:23:22 2019 +0000
+++ b/usr/src/uts/i86pc/ml/offsets.in	Wed Apr 24 14:45:41 2019 +0000
@@ -1,7 +1,7 @@
 \
 \ Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
 \ Copyright 2012 Garrett D'Amore <garrett@damore.org>.  All rights reserved.
-\ Copyright 2018 Joyent, Inc.
+\ Copyright 2019 Joyent, Inc.
 \
 \ CDDL HEADER START
 \
@@ -88,7 +88,6 @@
 	t_lockstat
 	t_lockp
 	t_lock_flush
-	t_kpri_req
 	t_oldspl
 	t_pri
 	t_pil
--- a/usr/src/uts/intel/ia32/ml/lock_prim.s	Thu May 16 19:23:22 2019 +0000
+++ b/usr/src/uts/intel/ia32/ml/lock_prim.s	Wed Apr 24 14:45:41 2019 +0000
@@ -21,10 +21,9 @@
 /*
  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
  * Use is subject to license terms.
+ * Copyright 2019 Joyent, Inc.
  */
 
-#pragma ident	"%Z%%M%	%I%	%E% SMI"
-
 #if defined(lint) || defined(__lint)
 #include <sys/types.h>
 #include <sys/thread.h>
@@ -322,7 +321,7 @@
 	movl	8(%esp), %eax		/* get priority level */
 	pushl	%eax
 	call	splr			/* raise priority level */
-	movl 	8(%esp), %ecx		/* ecx = lock addr */
+	movl	8(%esp), %ecx		/* ecx = lock addr */
 	movl	$-1, %edx
 	addl	$4, %esp
 	xchgb	%dl, (%ecx)		/* try to set lock */
@@ -569,7 +568,7 @@
 	ret					/* nop space for lfence */
 	nop
 	nop
-.mutex_enter_lockstat_6323525_patch_point:	/* new patch point if lfence */ 
+.mutex_enter_lockstat_6323525_patch_point:	/* new patch point if lfence */
 	nop
 #else	/* OPTERON_WORKAROUND_6323525 */
 	ret
@@ -916,10 +915,8 @@
 #if defined(__amd64)
 
 	ENTRY(rw_enter)
-	movq	%gs:CPU_THREAD, %rdx		/* rdx = thread ptr */
 	cmpl	$RW_WRITER, %esi
 	je	.rw_write_enter
-	incl	T_KPRI_REQ(%rdx)		/* THREAD_KPRI_REQUEST() */
 	movq	(%rdi), %rax			/* rax = old rw_wwwh value */
 	testl	$RW_WRITE_LOCKED|RW_WRITE_WANTED, %eax
 	jnz	rw_enter_sleep
@@ -935,6 +932,7 @@
 	movl	$RW_READER, %edx
 	jmp	lockstat_wrapper_arg
 .rw_write_enter:
+	movq	%gs:CPU_THREAD, %rdx
 	orq	$RW_WRITE_LOCKED, %rdx		/* rdx = write-locked value */
 	xorl	%eax, %eax			/* rax = unheld value */
 	lock
@@ -970,8 +968,6 @@
 	lock
 	cmpxchgq %rdx, (%rdi)			/* try to drop read lock */
 	jnz	rw_exit_wakeup
-	movq	%gs:CPU_THREAD, %rcx		/* rcx = thread ptr */
-	decl	T_KPRI_REQ(%rcx)		/* THREAD_KPRI_RELEASE() */
 .rw_read_exit_lockstat_patch_point:
 	ret
 	movq	%rdi, %rsi			/* rsi = lock ptr */
@@ -1004,11 +1000,9 @@
 #else
 
 	ENTRY(rw_enter)
-	movl	%gs:CPU_THREAD, %edx		/* edx = thread ptr */
 	movl	4(%esp), %ecx			/* ecx = lock ptr */
 	cmpl	$RW_WRITER, 8(%esp)
 	je	.rw_write_enter
-	incl	T_KPRI_REQ(%edx)		/* THREAD_KPRI_REQUEST() */
 	movl	(%ecx), %eax			/* eax = old rw_wwwh value */
 	testl	$RW_WRITE_LOCKED|RW_WRITE_WANTED, %eax
 	jnz	rw_enter_sleep
@@ -1023,6 +1017,7 @@
 	pushl	$RW_READER
 	jmp	lockstat_wrapper_arg
 .rw_write_enter:
+	movl	%gs:CPU_THREAD, %edx
 	orl	$RW_WRITE_LOCKED, %edx		/* edx = write-locked value */
 	xorl	%eax, %eax			/* eax = unheld value */
 	lock
@@ -1058,8 +1053,6 @@
 	lock
 	cmpxchgl %edx, (%ecx)			/* try to drop read lock */
 	jnz	rw_exit_wakeup
-	movl	%gs:CPU_THREAD, %edx		/* edx = thread ptr */
-	decl	T_KPRI_REQ(%edx)		/* THREAD_KPRI_RELEASE() */
 .rw_read_exit_lockstat_patch_point:
 	ret
 	movl	$LS_RW_EXIT_RELEASE, %eax
@@ -1184,7 +1177,7 @@
 	addl	%ebx, %esi;			\
 	movl	$dstaddr, %edi;			\
 	addl	%ebx, %edi;			\
-0:      					\
+0:						\
 	decl	%esi;				\
 	decl	%edi;				\
 	pushl	$1;				\
@@ -1243,7 +1236,7 @@
 	movq	$normal_instr, %rsi;		\
 	movq	$active_instr, %rdi;		\
 	leaq	lockstat_probemap(%rip), %rax;	\
-	movl 	_MUL(event, DTRACE_IDSIZE)(%rax), %eax;	\
+	movl	_MUL(event, DTRACE_IDSIZE)(%rax), %eax;	\
 	testl	%eax, %eax;			\
 	jz	9f;				\
 	movq	%rdi, %rsi;			\
--- a/usr/src/uts/intel/ia32/os/syscall.c	Thu May 16 19:23:22 2019 +0000
+++ b/usr/src/uts/intel/ia32/os/syscall.c	Wed Apr 24 14:45:41 2019 +0000
@@ -21,6 +21,7 @@
 
 /*
  * Copyright (c) 1992, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2019 Joyent, Inc.
  */
 
 #include <sys/param.h>
@@ -1202,7 +1203,6 @@
 	 * Try to autoload the system call if necessary
 	 */
 	module_lock = lock_syscall(se, code);
-	THREAD_KPRI_RELEASE();	/* drop priority given by rw_enter */
 
 	/*
 	 * we've locked either the loaded syscall or nosys
@@ -1234,7 +1234,6 @@
 		}
 	}
 
-	THREAD_KPRI_REQUEST();	/* regain priority from read lock */
 	rw_exit(module_lock);
 	return (rval);
 }
--- a/usr/src/uts/sfmmu/vm/hat_sfmmu.c	Thu May 16 19:23:22 2019 +0000
+++ b/usr/src/uts/sfmmu/vm/hat_sfmmu.c	Wed Apr 24 14:45:41 2019 +0000
@@ -24,7 +24,7 @@
 /*
  * Copyright 2011 Nexenta Systems, Inc.  All rights reserved.
  * Copyright 2016 Gary Mills
- * Copyright 2017 Joyent, Inc.
+ * Copyright 2019 Joyent, Inc.
  */
 
 /*
@@ -112,35 +112,35 @@
 		ASSERT(_eaddr <= _rgnp->rgn_saddr + _rgnp->rgn_size);	\
 	}
 
-#define	SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, rgnp, rid) 	 	 \
-{						 			 \
-		caddr_t _hsva;						 \
-		caddr_t _heva;						 \
-		caddr_t _rsva;					 	 \
-		caddr_t _reva;					 	 \
-		int	_ttesz = get_hblk_ttesz(hmeblkp);		 \
-		int	_flagtte;					 \
-		ASSERT((srdp)->srd_refcnt != 0);			 \
-		ASSERT((rid) < SFMMU_MAX_HME_REGIONS);			 \
-		ASSERT((rgnp)->rgn_id == rid);				 \
-		ASSERT(!((rgnp)->rgn_flags & SFMMU_REGION_FREE));	 \
-		ASSERT(((rgnp)->rgn_flags & SFMMU_REGION_TYPE_MASK) ==	 \
-		    SFMMU_REGION_HME);					 \
-		ASSERT(_ttesz <= (rgnp)->rgn_pgszc);			 \
-		_hsva = (caddr_t)get_hblk_base(hmeblkp);		 \
-		_heva = get_hblk_endaddr(hmeblkp);			 \
-		_rsva = (caddr_t)P2ALIGN(				 \
-		    (uintptr_t)(rgnp)->rgn_saddr, HBLK_MIN_BYTES);	 \
-		_reva = (caddr_t)P2ROUNDUP(				 \
-		    (uintptr_t)((rgnp)->rgn_saddr + (rgnp)->rgn_size),	 \
-		    HBLK_MIN_BYTES);					 \
-		ASSERT(_hsva >= _rsva);				 	 \
-		ASSERT(_hsva < _reva);				 	 \
-		ASSERT(_heva > _rsva);				 	 \
-		ASSERT(_heva <= _reva);				 	 \
-		_flagtte = (_ttesz < HBLK_MIN_TTESZ) ? HBLK_MIN_TTESZ :  \
-			_ttesz;						 \
-		ASSERT(rgnp->rgn_hmeflags & (0x1 << _flagtte));		 \
+#define	SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, rgnp, rid)		\
+{									\
+		caddr_t _hsva;						\
+		caddr_t _heva;						\
+		caddr_t _rsva;						\
+		caddr_t _reva;						\
+		int	_ttesz = get_hblk_ttesz(hmeblkp);		\
+		int	_flagtte;					\
+		ASSERT((srdp)->srd_refcnt != 0);			\
+		ASSERT((rid) < SFMMU_MAX_HME_REGIONS);			\
+		ASSERT((rgnp)->rgn_id == rid);				\
+		ASSERT(!((rgnp)->rgn_flags & SFMMU_REGION_FREE));	\
+		ASSERT(((rgnp)->rgn_flags & SFMMU_REGION_TYPE_MASK) ==	\
+		    SFMMU_REGION_HME);					\
+		ASSERT(_ttesz <= (rgnp)->rgn_pgszc);			\
+		_hsva = (caddr_t)get_hblk_base(hmeblkp);		\
+		_heva = get_hblk_endaddr(hmeblkp);			\
+		_rsva = (caddr_t)P2ALIGN(				\
+		    (uintptr_t)(rgnp)->rgn_saddr, HBLK_MIN_BYTES);	\
+		_reva = (caddr_t)P2ROUNDUP(				\
+		    (uintptr_t)((rgnp)->rgn_saddr + (rgnp)->rgn_size),	\
+		    HBLK_MIN_BYTES);					\
+		ASSERT(_hsva >= _rsva);					\
+		ASSERT(_hsva < _reva);					\
+		ASSERT(_heva > _rsva);					\
+		ASSERT(_heva <= _reva);					\
+		_flagtte = (_ttesz < HBLK_MIN_TTESZ) ? HBLK_MIN_TTESZ : \
+			_ttesz;						\
+		ASSERT(rgnp->rgn_hmeflags & (0x1 << _flagtte));		\
 }
 
 #else /* DEBUG */
@@ -199,7 +199,7 @@
 
 /*
  * Flag to disable large page support.
- * 	value of 1 => disable all large pages.
+ *	value of 1 => disable all large pages.
  *	bits 1, 2, and 3 are to disable 64K, 512K and 4M pages respectively.
  *
  * For example, use the value 0x4 to disable 512K pages.
@@ -247,7 +247,7 @@
 static struct kmem_cache *sfmmu1_cache;
 static struct kmem_cache *pa_hment_cache;
 
-static kmutex_t 	ism_mlist_lock;	/* mutex for ism mapping list */
+static kmutex_t		ism_mlist_lock;	/* mutex for ism mapping list */
 /*
  * private data for ism
  */
@@ -393,7 +393,7 @@
  * Private sfmmu routines (prototypes)
  */
 static struct hme_blk *sfmmu_shadow_hcreate(sfmmu_t *, caddr_t, int, uint_t);
-static struct 	hme_blk *sfmmu_hblk_alloc(sfmmu_t *, caddr_t,
+static struct	hme_blk *sfmmu_hblk_alloc(sfmmu_t *, caddr_t,
 			struct hmehash_bucket *, uint_t, hmeblk_tag, uint_t,
 			uint_t);
 static caddr_t	sfmmu_hblk_unload(struct hat *, struct hme_blk *, caddr_t,
@@ -463,7 +463,7 @@
 static void	sfmmu_invalidate_ctx(sfmmu_t *);
 static void	sfmmu_sync_mmustate(sfmmu_t *);
 
-static void 	sfmmu_tsbinfo_setup_phys(struct tsb_info *, pfn_t);
+static void	sfmmu_tsbinfo_setup_phys(struct tsb_info *, pfn_t);
 static int	sfmmu_tsbinfo_alloc(struct tsb_info **, int, int, uint_t,
 			sfmmu_t *);
 static void	sfmmu_tsb_free(struct tsb_info *);
@@ -561,8 +561,8 @@
 struct hmehash_bucket *khme_hash;	/* kernel hmeblk hash table */
 uint64_t	uhme_hash_pa;		/* PA of uhme_hash */
 uint64_t	khme_hash_pa;		/* PA of khme_hash */
-int 		uhmehash_num;		/* # of buckets in user hash table */
-int 		khmehash_num;		/* # of buckets in kernel hash table */
+int		uhmehash_num;		/* # of buckets in user hash table */
+int		khmehash_num;		/* # of buckets in kernel hash table */
 
 uint_t		max_mmu_ctxdoms = 0;	/* max context domains in the system */
 mmu_ctx_t	**mmu_ctxs_tbl;		/* global array of context domains */
@@ -705,7 +705,7 @@
 /*
  * Global data
  */
-sfmmu_t 	*ksfmmup;		/* kernel's hat id */
+sfmmu_t		*ksfmmup;		/* kernel's hat id */
 
 #ifdef DEBUG
 static void	chk_tte(tte_t *, tte_t *, tte_t *, struct hme_blk *);
@@ -857,7 +857,7 @@
 	if (ttesz == TTE8K || ttesz == TTE4M) {				\
 		sfmmu_unload_tsb(sfmmup, addr, ttesz);			\
 	} else {							\
-		caddr_t sva = ismhat ? addr : 				\
+		caddr_t sva = ismhat ? addr :				\
 		    (caddr_t)get_hblk_base(hmeblkp);			\
 		caddr_t eva = sva + get_hblk_span(hmeblkp);		\
 		ASSERT(addr >= sva && addr < eva);			\
@@ -1020,7 +1020,7 @@
 void
 hat_init_pagesizes()
 {
-	int 		i;
+	int		i;
 
 	mmu_exported_page_sizes = 0;
 	for (i = TTE8K; i < max_mmu_page_sizes; i++) {
@@ -1061,7 +1061,7 @@
 void
 hat_init(void)
 {
-	int 		i;
+	int		i;
 	uint_t		sz;
 	size_t		size;
 
@@ -2123,7 +2123,7 @@
 /* ARGSUSED */
 int
 hat_dup(struct hat *hat, struct hat *newhat, caddr_t addr, size_t len,
-	uint_t flag)
+    uint_t flag)
 {
 	sf_srd_t *srdp;
 	sf_scd_t *scdp;
@@ -2188,7 +2188,7 @@
 
 void
 hat_memload(struct hat *hat, caddr_t addr, struct page *pp,
-	uint_t attr, uint_t flags)
+    uint_t attr, uint_t flags)
 {
 	hat_do_memload(hat, addr, pp, attr, flags,
 	    SFMMU_INVALID_SHMERID);
@@ -2196,7 +2196,7 @@
 
 void
 hat_memload_region(struct hat *hat, caddr_t addr, struct page *pp,
-	uint_t attr, uint_t flags, hat_region_cookie_t rcookie)
+    uint_t attr, uint_t flags, hat_region_cookie_t rcookie)
 {
 	uint_t rid;
 	if (rcookie == HAT_INVALID_REGION_COOKIE) {
@@ -2216,7 +2216,7 @@
  */
 static void
 hat_do_memload(struct hat *hat, caddr_t addr, struct page *pp,
-	uint_t attr, uint_t flags, uint_t rid)
+    uint_t attr, uint_t flags, uint_t rid)
 {
 	tte_t tte;
 
@@ -2273,7 +2273,7 @@
  */
 void
 hat_devload(struct hat *hat, caddr_t addr, size_t len, pfn_t pfn,
-	uint_t attr, int flags)
+    uint_t attr, int flags)
 {
 	tte_t tte;
 	struct page *pp = NULL;
@@ -2414,7 +2414,7 @@
 
 void
 hat_memload_array(struct hat *hat, caddr_t addr, size_t len,
-	struct page **pps, uint_t attr, uint_t flags)
+    struct page **pps, uint_t attr, uint_t flags)
 {
 	hat_do_memload_array(hat, addr, len, pps, attr, flags,
 	    SFMMU_INVALID_SHMERID);
@@ -2422,8 +2422,8 @@
 
 void
 hat_memload_array_region(struct hat *hat, caddr_t addr, size_t len,
-	struct page **pps, uint_t attr, uint_t flags,
-	hat_region_cookie_t rcookie)
+    struct page **pps, uint_t attr, uint_t flags,
+    hat_region_cookie_t rcookie)
 {
 	uint_t rid;
 	if (rcookie == HAT_INVALID_REGION_COOKIE) {
@@ -2449,7 +2449,7 @@
  */
 static void
 hat_do_memload_array(struct hat *hat, caddr_t addr, size_t len,
-	struct page **pps, uint_t attr, uint_t flags, uint_t rid)
+    struct page **pps, uint_t attr, uint_t flags, uint_t rid)
 {
 	int  ttesz;
 	size_t mapsz;
@@ -2559,7 +2559,7 @@
  */
 static void
 sfmmu_memload_batchsmall(struct hat *hat, caddr_t vaddr, page_t **pps,
-		    uint_t attr, uint_t flags, pgcnt_t npgs, uint_t rid)
+    uint_t attr, uint_t flags, pgcnt_t npgs, uint_t rid)
 {
 	tte_t	tte;
 	page_t *pp;
@@ -2675,7 +2675,7 @@
  */
 void
 sfmmu_tteload(struct hat *sfmmup, tte_t *ttep, caddr_t vaddr, page_t *pp,
-	uint_t flags)
+    uint_t flags)
 {
 	ASSERT(sfmmup == ksfmmup);
 	(void) sfmmu_tteload_array(sfmmup, ttep, vaddr, &pp, flags,
@@ -2878,11 +2878,11 @@
  */
 static int
 sfmmu_tteload_array(sfmmu_t *sfmmup, tte_t *ttep, caddr_t vaddr,
-	page_t **pps, uint_t flags, uint_t rid)
+    page_t **pps, uint_t flags, uint_t rid)
 {
 	struct hmehash_bucket *hmebp;
 	struct hme_blk *hmeblkp;
-	int 	ret;
+	int	ret;
 	uint_t	size;
 
 	/*
@@ -2947,7 +2947,7 @@
  */
 static struct hme_blk *
 sfmmu_tteload_find_hmeblk(sfmmu_t *sfmmup, struct hmehash_bucket *hmebp,
-	caddr_t vaddr, uint_t size, uint_t flags, uint_t rid)
+    caddr_t vaddr, uint_t size, uint_t flags, uint_t rid)
 {
 	hmeblk_tag hblktag;
 	int hmeshift;
@@ -3040,7 +3040,7 @@
  */
 static int
 sfmmu_tteload_addentry(sfmmu_t *sfmmup, struct hme_blk *hmeblkp, tte_t *ttep,
-	caddr_t vaddr, page_t **pps, uint_t flags, uint_t rid)
+    caddr_t vaddr, page_t **pps, uint_t flags, uint_t rid)
 {
 	page_t *pp = *pps;
 	int hmenum, size, remap;
@@ -3386,7 +3386,7 @@
 static int
 sfmmu_pagearray_setup(caddr_t addr, page_t **pps, tte_t *ttep, int remap)
 {
-	int 	i, index, ttesz;
+	int	i, index, ttesz;
 	pfn_t	pfnum;
 	pgcnt_t	npgs;
 	page_t *pp, *pp1;
@@ -3670,7 +3670,7 @@
  */
 static void
 sfmmu_shadow_hcleanup(sfmmu_t *sfmmup, struct hme_blk *hmeblkp,
-	struct hmehash_bucket *hmebp)
+    struct hmehash_bucket *hmebp)
 {
 	caddr_t addr, endaddr;
 	int hashno, size;
@@ -3698,7 +3698,7 @@
 
 static void
 sfmmu_free_hblks(sfmmu_t *sfmmup, caddr_t addr, caddr_t endaddr,
-	int hashno)
+    int hashno)
 {
 	int hmeshift, shadow = 0;
 	hmeblk_tag hblktag;
@@ -4212,10 +4212,10 @@
  */
 id_t
 hat_register_callback(int key,
-	int (*prehandler)(caddr_t, uint_t, uint_t, void *),
-	int (*posthandler)(caddr_t, uint_t, uint_t, void *, pfn_t),
-	int (*errhandler)(caddr_t, uint_t, uint_t, void *),
-	int capture_cpus)
+    int (*prehandler)(caddr_t, uint_t, uint_t, void *),
+    int (*posthandler)(caddr_t, uint_t, uint_t, void *, pfn_t),
+    int (*errhandler)(caddr_t, uint_t, uint_t, void *),
+    int capture_cpus)
 {
 	id_t id;
 
@@ -4288,17 +4288,17 @@
  */
 int
 hat_add_callback(id_t callback_id, caddr_t vaddr, uint_t len, uint_t flags,
-	void *pvt, pfn_t *rpfn, void **cookiep)
-{
-	struct 		hmehash_bucket *hmebp;
-	hmeblk_tag 	hblktag;
+    void *pvt, pfn_t *rpfn, void **cookiep)
+{
+	struct		hmehash_bucket *hmebp;
+	hmeblk_tag	hblktag;
 	struct hme_blk	*hmeblkp;
-	int 		hmeshift, hashno;
-	caddr_t 	saddr, eaddr, baseaddr;
+	int		hmeshift, hashno;
+	caddr_t		saddr, eaddr, baseaddr;
 	struct pa_hment *pahmep;
 	struct sf_hment *sfhmep, *osfhmep;
 	kmutex_t	*pml;
-	tte_t   	tte;
+	tte_t		tte;
 	page_t		*pp;
 	vnode_t		*vp;
 	u_offset_t	off;
@@ -4517,7 +4517,7 @@
  */
 void
 hat_delete_callback(caddr_t vaddr, uint_t len, void *pvt, uint_t flags,
-	void *cookie)
+    void *cookie)
 {
 	struct		hmehash_bucket *hmebp;
 	hmeblk_tag	hblktag;
@@ -4835,7 +4835,7 @@
  */
 static void
 sfmmu_chgattr(struct hat *sfmmup, caddr_t addr, size_t len, uint_t attr,
-	int mode)
+    int mode)
 {
 	struct hmehash_bucket *hmebp;
 	hmeblk_tag hblktag;
@@ -4931,7 +4931,7 @@
  */
 static caddr_t
 sfmmu_hblk_chgattr(struct hat *sfmmup, struct hme_blk *hmeblkp, caddr_t addr,
-	caddr_t endaddr, demap_range_t *dmrp, uint_t attr, int mode)
+    caddr_t endaddr, demap_range_t *dmrp, uint_t attr, int mode)
 {
 	tte_t tte, tteattr, tteflags, ttemod;
 	struct sf_hment *sfhmep;
@@ -5259,7 +5259,7 @@
  */
 static caddr_t
 sfmmu_hblk_chgprot(sfmmu_t *sfmmup, struct hme_blk *hmeblkp, caddr_t addr,
-	caddr_t endaddr, demap_range_t *dmrp, uint_t vprot)
+    caddr_t endaddr, demap_range_t *dmrp, uint_t vprot)
 {
 	uint_t pprot;
 	tte_t tte, ttemod;
@@ -5418,24 +5418,24 @@
 	case (PROT_EXEC):
 	case (PROT_EXEC | PROT_READ):
 		*tteflagsp = TTE_PRIV_INT | TTE_WRPRM_INT | TTE_HWWR_INT;
-		return (TTE_PRIV_INT); 		/* set prv and clr wrt */
+		return (TTE_PRIV_INT);		/* set prv and clr wrt */
 	case (PROT_WRITE):
 	case (PROT_WRITE | PROT_READ):
 	case (PROT_EXEC | PROT_WRITE):
 	case (PROT_EXEC | PROT_WRITE | PROT_READ):
 		*tteflagsp = TTE_PRIV_INT | TTE_WRPRM_INT;
-		return (TTE_PRIV_INT | TTE_WRPRM_INT); 	/* set prv and wrt */
+		return (TTE_PRIV_INT | TTE_WRPRM_INT);	/* set prv and wrt */
 	case (PROT_USER | PROT_READ):
 	case (PROT_USER | PROT_EXEC):
 	case (PROT_USER | PROT_EXEC | PROT_READ):
 		*tteflagsp = TTE_PRIV_INT | TTE_WRPRM_INT | TTE_HWWR_INT;
-		return (0); 			/* clr prv and wrt */
+		return (0);			/* clr prv and wrt */
 	case (PROT_USER | PROT_WRITE):
 	case (PROT_USER | PROT_WRITE | PROT_READ):
 	case (PROT_USER | PROT_EXEC | PROT_WRITE):
 	case (PROT_USER | PROT_EXEC | PROT_WRITE | PROT_READ):
 		*tteflagsp = TTE_PRIV_INT | TTE_WRPRM_INT;
-		return (TTE_WRPRM_INT); 	/* clr prv and set wrt */
+		return (TTE_WRPRM_INT);		/* clr prv and set wrt */
 	default:
 		panic("sfmmu_vtop_prot -- bad prot %x", vprot);
 	}
@@ -5449,12 +5449,8 @@
  * hash table to find and remove mappings.
  */
 static void
-hat_unload_large_virtual(
-	struct hat		*sfmmup,
-	caddr_t			startaddr,
-	size_t			len,
-	uint_t			flags,
-	hat_callback_t		*callback)
+hat_unload_large_virtual(struct hat *sfmmup, caddr_t startaddr, size_t len,
+    uint_t flags, hat_callback_t *callback)
 {
 	struct hmehash_bucket *hmebp;
 	struct hme_blk *hmeblkp;
@@ -5592,12 +5588,8 @@
 
 
 void
-hat_unload_callback(
-	struct hat *sfmmup,
-	caddr_t addr,
-	size_t len,
-	uint_t flags,
-	hat_callback_t *callback)
+hat_unload_callback(struct hat *sfmmup, caddr_t addr, size_t len, uint_t flags,
+    hat_callback_t *callback)
 {
 	struct hmehash_bucket *hmebp;
 	hmeblk_tag hblktag;
@@ -5920,7 +5912,7 @@
  */
 static caddr_t
 sfmmu_hblk_unload(struct hat *sfmmup, struct hme_blk *hmeblkp, caddr_t addr,
-	caddr_t endaddr, demap_range_t *dmrp, uint_t flags)
+    caddr_t endaddr, demap_range_t *dmrp, uint_t flags)
 {
 	tte_t	tte, ttemod;
 	struct	sf_hment *sfhmep;
@@ -6304,7 +6296,7 @@
 
 static caddr_t
 sfmmu_hblk_sync(struct hat *sfmmup, struct hme_blk *hmeblkp, caddr_t addr,
-	caddr_t endaddr, int clearflag)
+    caddr_t endaddr, int clearflag)
 {
 	tte_t	tte, ttemod;
 	struct sf_hment *sfhmep;
@@ -6379,7 +6371,7 @@
 sfmmu_ttesync(struct hat *sfmmup, caddr_t addr, tte_t *ttep, page_t *pp)
 {
 	uint_t rm = 0;
-	int   	sz;
+	int	sz;
 	pgcnt_t	npgs;
 
 	ASSERT(TTE_IS_VALID(ttep));
@@ -6744,7 +6736,7 @@
  *
  * Input:
  *
- * target : 	constituent pages are SE_EXCL locked.
+ * target :	constituent pages are SE_EXCL locked.
  * replacement:	constituent pages are SE_EXCL locked.
  *
  * Output:
@@ -7492,7 +7484,7 @@
  */
 static cpuset_t
 sfmmu_pagesync(struct page *pp, struct sf_hment *sfhme,
-	uint_t clearflag)
+    uint_t clearflag)
 {
 	caddr_t addr;
 	tte_t tte, ttemod;
@@ -8359,7 +8351,7 @@
 hat_get_mapped_size(struct hat *hat)
 {
 	size_t		assize = 0;
-	int 		i;
+	int		i;
 
 	if (hat == NULL)
 		return (0);
@@ -8459,12 +8451,12 @@
  * HATOP_SHARE()/UNSHARE() return 0
  */
 int
-hat_share(struct hat *sfmmup, caddr_t addr,
-	struct hat *ism_hatid, caddr_t sptaddr, size_t len, uint_t ismszc)
+hat_share(struct hat *sfmmup, caddr_t addr, struct hat *ism_hatid,
+    caddr_t sptaddr, size_t len, uint_t ismszc)
 {
 	ism_blk_t	*ism_blkp;
 	ism_blk_t	*new_iblk;
-	ism_map_t 	*ism_map;
+	ism_map_t	*ism_map;
 	ism_ment_t	*ism_ment;
 	int		i, added;
 	hatlock_t	*hatlockp;
@@ -8689,11 +8681,11 @@
 void
 hat_unshare(struct hat *sfmmup, caddr_t addr, size_t len, uint_t ismszc)
 {
-	ism_map_t 	*ism_map;
+	ism_map_t	*ism_map;
 	ism_ment_t	*free_ment = NULL;
 	ism_blk_t	*ism_blkp;
 	struct hat	*ism_hatid;
-	int 		found, i;
+	int		found, i;
 	hatlock_t	*hatlockp;
 	struct tsb_info	*tsbinfo;
 	uint_t		ismshift = page_get_shift(ismszc);
@@ -9299,7 +9291,7 @@
 	tte_t	tte;
 	caddr_t	vaddr;
 	int	clr_valid = 0;
-	int 	color, color1, bcolor;
+	int	color, color1, bcolor;
 	int	i, ncolors;
 
 	ASSERT(pp != NULL);
@@ -9369,7 +9361,7 @@
 
 void
 sfmmu_page_cache_array(page_t *pp, int flags, int cache_flush_flag,
-	pgcnt_t npages)
+    pgcnt_t npages)
 {
 	kmutex_t *pmtx;
 	int i, ncolors, bcolor;
@@ -10168,7 +10160,7 @@
 
 static void
 sfmmu_size_tsb(sfmmu_t *sfmmup, int growing, uint64_t tte8k_cnt,
-	uint64_t tte4m_cnt, int sectsb_thresh)
+    uint64_t tte4m_cnt, int sectsb_thresh)
 {
 	int tsb_bits;
 	uint_t tsb_szc;
@@ -10309,7 +10301,7 @@
 	ism_blk_t	*blkp, *nx_blkp;
 #ifdef	DEBUG
 	ism_map_t	*map;
-	int 		i;
+	int		i;
 #endif
 
 	ASSERT(sfmmup->sfmmu_ttecnt[TTE8K] == 0);
@@ -10882,7 +10874,6 @@
 {
 	hatlock_t *hatlockp;
 
-	THREAD_KPRI_REQUEST();
 	if (!hatlock_held)
 		hatlockp = sfmmu_hat_enter(sfmmup);
 	while (SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY))
@@ -10904,7 +10895,6 @@
 	cv_broadcast(&sfmmup->sfmmu_tsb_cv);
 	if (!hatlock_held)
 		sfmmu_hat_exit(hatlockp);
-	THREAD_KPRI_RELEASE();
 }
 
 /*
@@ -10916,7 +10906,7 @@
  *
  * (2) if we are allocating an hblk for mapping a slab in sfmmu_cache,
  *
- * 		(a) try to return an hblk from reserve pool of free hblks;
+ *		(a) try to return an hblk from reserve pool of free hblks;
  *		(b) if the reserve pool is empty, acquire hblk_reserve_lock
  *		    and return hblk_reserve.
  *
@@ -10933,8 +10923,8 @@
  */
 static struct hme_blk *
 sfmmu_hblk_alloc(sfmmu_t *sfmmup, caddr_t vaddr,
-	struct hmehash_bucket *hmebp, uint_t size, hmeblk_tag hblktag,
-	uint_t flags, uint_t rid)
+    struct hmehash_bucket *hmebp, uint_t size, hmeblk_tag hblktag,
+    uint_t flags, uint_t rid)
 {
 	struct hme_blk *hmeblkp = NULL;
 	struct hme_blk *newhblkp;
@@ -11441,7 +11431,7 @@
  */
 static int
 sfmmu_steal_this_hblk(struct hmehash_bucket *hmebp, struct hme_blk *hmeblkp,
-	uint64_t hblkpa, struct hme_blk *pr_hblk)
+    uint64_t hblkpa, struct hme_blk *pr_hblk)
 {
 	int shw_size, vshift;
 	struct hme_blk *shw_hblkp;
@@ -12066,14 +12056,14 @@
 /* ARGSUSED */
 static void
 sfmmu_ismtlbcache_demap(caddr_t addr, sfmmu_t *ism_sfmmup,
-	struct hme_blk *hmeblkp, pfn_t pfnum, int cache_flush_flag)
-{
-	cpuset_t 	cpuset;
-	caddr_t 	va;
+    struct hme_blk *hmeblkp, pfn_t pfnum, int cache_flush_flag)
+{
+	cpuset_t	cpuset;
+	caddr_t		va;
 	ism_ment_t	*ment;
 	sfmmu_t		*sfmmup;
 #ifdef VAC
-	int 		vcolor;
+	int		vcolor;
 #endif
 
 	sf_scd_t	*scdp;
@@ -12163,8 +12153,8 @@
  */
 static void
 sfmmu_tlbcache_demap(caddr_t addr, sfmmu_t *sfmmup, struct hme_blk *hmeblkp,
-	pfn_t pfnum, int tlb_noflush, int cpu_flag, int cache_flush_flag,
-	int hat_lock_held)
+    pfn_t pfnum, int tlb_noflush, int cpu_flag, int cache_flush_flag,
+    int hat_lock_held)
 {
 #ifdef VAC
 	int vcolor;
@@ -12252,7 +12242,7 @@
  */
 static void
 sfmmu_tlb_demap(caddr_t addr, sfmmu_t *sfmmup, struct hme_blk *hmeblkp,
-	int tlb_noflush, int hat_lock_held)
+    int tlb_noflush, int hat_lock_held)
 {
 	cpuset_t cpuset;
 	hatlock_t *hatlockp;
@@ -12449,7 +12439,7 @@
 	/* set HAT cnum invalid across all context domains. */
 	for (i = 0; i < max_mmu_ctxdoms; i++) {
 
-		cnum = 	sfmmup->sfmmu_ctxs[i].cnum;
+		cnum = sfmmup->sfmmu_ctxs[i].cnum;
 		if (cnum == INVALID_CONTEXT) {
 			continue;
 		}
@@ -12638,7 +12628,7 @@
 /* ARGSUSED */
 static int
 sfmmu_tsb_post_relocator(caddr_t va, uint_t tsbsz, uint_t flags,
-	void *tsbinfo, pfn_t newpfn)
+    void *tsbinfo, pfn_t newpfn)
 {
 	hatlock_t *hatlockp;
 	struct tsb_info *tsbinfop = (struct tsb_info *)tsbinfo;
@@ -12682,7 +12672,7 @@
  */
 static int
 sfmmu_tsbinfo_alloc(struct tsb_info **tsbinfopp, int tsb_szc, int tte_sz_mask,
-	uint_t flags, sfmmu_t *sfmmup)
+    uint_t flags, sfmmu_t *sfmmup)
 {
 	int err;
 
@@ -13812,15 +13802,9 @@
  * the ism_map structure for ism segments.
  */
 hat_region_cookie_t
-hat_join_region(struct hat *sfmmup,
-	caddr_t r_saddr,
-	size_t r_size,
-	void *r_obj,
-	u_offset_t r_objoff,
-	uchar_t r_perm,
-	uchar_t r_pgszc,
-	hat_rgn_cb_func_t r_cb_function,
-	uint_t flags)
+hat_join_region(struct hat *sfmmup, caddr_t r_saddr, size_t r_size,
+    void *r_obj, u_offset_t r_objoff, uchar_t r_perm, uchar_t r_pgszc,
+    hat_rgn_cb_func_t r_cb_function, uint_t flags)
 {
 	sf_srd_t *srdp = sfmmup->sfmmu_srdp;
 	uint_t rhash;
@@ -15384,7 +15368,7 @@
  * We cross-call to make sure that there are no threads on other cpus accessing
  * these hmblks and then complete the process of freeing them under the
  * following conditions:
- * 	The total number of pending hmeblks is greater than the threshold
+ *	The total number of pending hmeblks is greater than the threshold
  *	The reserve list has fewer than HBLK_RESERVE_CNT hmeblks
  *	It is at least 1 second since the last time we cross-called
  *
@@ -15462,7 +15446,7 @@
  */
 void
 sfmmu_hblk_hash_add(struct hmehash_bucket *hmebp, struct hme_blk *hmeblkp,
-	uint64_t hblkpa)
+    uint64_t hblkpa)
 {
 	ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp));
 #ifdef	DEBUG
@@ -15518,8 +15502,7 @@
  */
 void
 sfmmu_hblk_hash_rm(struct hmehash_bucket *hmebp, struct hme_blk *hmeblkp,
-    struct hme_blk *pr_hblk, struct hme_blk **listp,
-    int free_now)
+    struct hme_blk *pr_hblk, struct hme_blk **listp, int free_now)
 {
 	int shw_size, vshift;
 	struct hme_blk *shw_hblkp;
--- a/usr/src/uts/sparc/os/syscall.c	Thu May 16 19:23:22 2019 +0000
+++ b/usr/src/uts/sparc/os/syscall.c	Wed Apr 24 14:45:41 2019 +0000
@@ -21,6 +21,7 @@
 
 /*
  * Copyright (c) 1991, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2019 Joyent, Inc.
  */
 
 #include <sys/param.h>
@@ -191,7 +192,7 @@
 #endif
 
 /*
- * 	Save the system call arguments in a safe place.
+ *	Save the system call arguments in a safe place.
  *	lwp->lwp_ap normally points to the out regs in the reg structure.
  *	If the user is going to change the out registers, g1, or the stack,
  *	and might want to get the args (for /proc tracing), it must copy
@@ -1004,7 +1005,7 @@
 /*
  * Loadable syscall support.
  *	If needed, load the module, then reserve it by holding a read
- * 	lock for the duration of the call.
+ *	lock for the duration of the call.
  *	Later, if the syscall is not unloadable, it could patch the vector.
  */
 /*ARGSUSED*/
@@ -1026,7 +1027,6 @@
 	 * Try to autoload the system call if necessary.
 	 */
 	module_lock = lock_syscall(se, code);
-	THREAD_KPRI_RELEASE();	/* drop priority given by rw_enter */
 
 	/*
 	 * we've locked either the loaded syscall or nosys
@@ -1040,7 +1040,6 @@
 		rval = syscall_ap();
 	}
 
-	THREAD_KPRI_REQUEST();	/* regain priority from read lock */
 	rw_exit(module_lock);
 	return (rval);
 }
@@ -1070,7 +1069,7 @@
 	 * Handle argument setup, unless already done in pre_syscall().
 	 */
 	if (callp->sy_narg > 5) {
-		if (save_syscall_args()) 	/* move args to LWP array */
+		if (save_syscall_args())	/* move args to LWP array */
 			return ((int64_t)set_errno(EFAULT));
 	} else if (!lwp->lwp_argsaved) {
 		long *ap;
--- a/usr/src/uts/sparc/v9/ml/lock_prim.s	Thu May 16 19:23:22 2019 +0000
+++ b/usr/src/uts/sparc/v9/ml/lock_prim.s	Wed Apr 24 14:45:41 2019 +0000
@@ -21,10 +21,9 @@
 /*
  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
  * Use is subject to license terms.
+ * Copyright 2019 Joyent, Inc.
  */
 
-#pragma ident	"%Z%%M%	%I%	%E% SMI"
-
 #if defined(lint)
 #include <sys/types.h>
 #include <sys/thread.h>
@@ -260,7 +259,7 @@
 
 /*
  * lock_set_spl(lp, new_pil, *old_pil_addr)
- * 	Sets pil to new_pil, grabs lp, stores old pil in *old_pil_addr.
+ *	Sets pil to new_pil, grabs lp, stores old pil in *old_pil_addr.
  */
 
 #if defined(lint)
@@ -337,7 +336,7 @@
 
 /*
  * mutex_enter() and mutex_exit().
- * 
+ *
  * These routines handle the simple cases of mutex_enter() (adaptive
  * lock, not held) and mutex_exit() (adaptive lock, held, no waiters).
  * If anything complicated is going on we punt to mutex_vector_enter().
@@ -477,7 +476,7 @@
 
 /*
  * rw_enter() and rw_exit().
- * 
+ *
  * These routines handle the simple cases of rw_enter (write-locking an unheld
  * lock or read-locking a lock that's neither write-locked nor write-wanted)
  * and rw_exit (no waiters or not the last reader).  If anything complicated
@@ -502,13 +501,10 @@
 	cmp	%o1, RW_WRITER			! entering as writer?
 	be,a,pn	%icc, 2f			! if so, go do it ...
 	or	THREAD_REG, RW_WRITE_LOCKED, %o5 ! delay: %o5 = owner
-	ld	[THREAD_REG + T_KPRI_REQ], %o3	! begin THREAD_KPRI_REQUEST()
 	ldn	[%o0], %o4			! %o4 = old lock value
-	inc	%o3				! bump kpri
-	st	%o3, [THREAD_REG + T_KPRI_REQ]	! store new kpri
 1:
 	andcc	%o4, RW_WRITE_CLAIMED, %g0	! write-locked or write-wanted?
-	bz,pt	%xcc, 3f	 		! if so, prepare to block
+	bz,pt	%xcc, 3f			! if so, prepare to block
 	add	%o4, RW_READ_LOCK, %o5		! delay: increment hold count
 	sethi	%hi(rw_enter_sleep), %o2	! load up jump
 	jmp	%o2 + %lo(rw_enter_sleep)	! jmp to rw_enter_sleep
@@ -552,15 +548,14 @@
 	bnz,pn	%xcc, 2f			! single reader, no waiters?
 	clr	%o1
 1:
-	ld	[THREAD_REG + T_KPRI_REQ], %g1	! begin THREAD_KPRI_RELEASE()
 	srl	%o4, RW_HOLD_COUNT_SHIFT, %o3	! %o3 = hold count (lockstat)
 	casx	[%o0], %o4, %o5			! try to drop lock
 	cmp	%o4, %o5			! did we succeed?
 	bne,pn	%xcc, rw_exit_wakeup		! if not, go to C
-	dec	%g1				! delay: drop kpri
+	nop					! delay: do nothing
 .rw_read_exit_lockstat_patch_point:
 	retl
-	st	%g1, [THREAD_REG + T_KPRI_REQ]	! delay: store new kpri
+	nop					! delay: do nothing
 2:
 	andcc	%o4, RW_WRITE_LOCKED, %g0	! are we a writer?
 	bnz,a,pt %xcc, 3f
@@ -689,7 +684,7 @@
  * Does not keep statistics on the lock.
  *
  * Entry:	%l6 - points to mutex
- * 		%l7 - address of call (returns to %l7+8)
+ *		%l7 - address of call (returns to %l7+8)
  * Uses:	%l6, %l5
  */
 #ifndef lint
@@ -700,7 +695,7 @@
 	tst	%l5
 	bnz	3f			! lock already held - go spin
 	nop
-2:	
+2:
 	jmp	%l7 + 8			! return
 	membar	#LoadLoad
 	!
@@ -716,7 +711,7 @@
 
 	sethi	%hi(panicstr) , %l5
 	ldn	[%l5 + %lo(panicstr)], %l5
-	tst 	%l5
+	tst	%l5
 	bnz	2b			! after panic, feign success
 	nop
 	b	4b
@@ -732,7 +727,7 @@
  * running at high level already.
  *
  * Entry:	%l6 - points to mutex
- * 		%l7 - address of call (returns to %l7+8)
+ *		%l7 - address of call (returns to %l7+8)
  * Uses:	none
  */
 #ifndef lint
--- a/usr/src/uts/sun4/ml/offsets.in	Thu May 16 19:23:22 2019 +0000
+++ b/usr/src/uts/sun4/ml/offsets.in	Wed Apr 24 14:45:41 2019 +0000
@@ -1,6 +1,7 @@
 \ offsets.in: input file to produce assym.h using the stabs program
 \ Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
 \ Copyright 2012 Garrett D'Amore <garett@damore.org>.  All rights reserved.
+\ Copyright 2019 Joyent, Inc.
 \
 \ CDDL HEADER START
 \
@@ -24,44 +25,44 @@
 \
 \
 \ Guidelines:
-\ 
+\
 \ A blank line is required between structure/union/intrinsic names.
-\ 
+\
 \ The general form is:
-\ 
+\
 \	name size_define [shift_define]
 \		member_name [offset_define]
 \	{blank line}
-\ 
+\
 \ If offset_define is not specified then the member_name is
 \ converted to all caps and used instead.  If the size of an item is
 \ a power of two then an optional shift count may be output using
 \ shift_define as the name but only if shift_define was specified.
-\ 
+\
 \ Arrays cause stabs to automatically output the per-array-item increment
 \ in addition to the base address:
-\ 
+\
 \	 foo FOO_SIZE
 \		array	FOO_ARRAY
-\ 
+\
 \ results in:
-\ 
+\
 \	#define	FOO_ARRAY	0x0
 \	#define	FOO_ARRAY_INCR	0x4
-\ 
+\
 \ which allows \#define's to be used to specify array items:
-\ 
+\
 \	#define	FOO_0	(FOO_ARRAY + (0 * FOO_ARRAY_INCR))
 \	#define	FOO_1	(FOO_ARRAY + (1 * FOO_ARRAY_INCR))
 \	...
 \	#define	FOO_n	(FOO_ARRAY + (n * FOO_ARRAY_INCR))
-\ 
+\
 \ There are several examples below (search for _INCR).
-\ 
+\
 \ There is currently no manner in which to identify "anonymous"
 \ structures or unions so if they are to be used in assembly code
 \ they must be given names.
-\ 
+\
 \ When specifying the offsets of nested structures/unions each nested
 \ structure or union must be listed separately then use the
 \ "\#define" escapes to add the offsets from the base structure/union
@@ -165,7 +166,6 @@
 	_tu._ts._t_post_sys	T_POST_SYS
 	_tu._ts._t_trapret	T_TRAPRET
 	t_preempt_lk
-	t_kpri_req
 	t_lockstat
 	t_pil
 	t_intr_start
@@ -374,7 +374,7 @@
 	cpu_m.mpcb			CPU_MPCB
 	cpu_m.cpu_private		CPU_PRIVATE
 	cpu_m.cpu_mmu_idx		CPU_MMU_IDX
-	cpu_m.cpu_mmu_ctxp            	CPU_MMU_CTXP
+	cpu_m.cpu_mmu_ctxp		CPU_MMU_CTXP
 	cpu_m.ptl1_state		CPU_PTL1
 
 cpu_core_t	CPU_CORE_SIZE	CPU_CORE_SHIFT