changeset 24309:1214efd2d210

11797 i86pc: cast between incompatible function types Reviewed by: John Levon <john.levon@joyent.com> Approved by: Dan McDonald <danmcd@joyent.com>
author Toomas Soome <tsoome@me.com>
date Mon, 22 Oct 2018 12:33:03 +0300
parents 141c83e3abd8
children 194bdf0acf86
files usr/src/uts/common/os/cap_util.c usr/src/uts/common/vm/seg_kmem.c usr/src/uts/i86pc/io/cbe.c usr/src/uts/i86pc/os/cpupm/cpupm_throttle.c usr/src/uts/i86pc/os/cpupm/pwrnow.c usr/src/uts/i86pc/os/cpupm/speedstep.c usr/src/uts/i86pc/os/dtrace_subr.c usr/src/uts/i86pc/os/fastboot.c usr/src/uts/i86pc/os/machdep.c usr/src/uts/i86pc/os/mp_call.c usr/src/uts/i86pc/os/mp_pc.c usr/src/uts/i86pc/os/startup.c usr/src/uts/i86pc/sys/machsystm.h usr/src/uts/i86xpv/os/mp_xen.c usr/src/uts/intel/ia32/os/desctbls.c usr/src/uts/intel/kdi/kdi_idt.c
diffstat 16 files changed, 82 insertions(+), 62 deletions(-) [+]
line wrap: on
line diff
--- a/usr/src/uts/common/os/cap_util.c	Wed Oct 16 10:19:13 2019 +0100
+++ b/usr/src/uts/common/os/cap_util.c	Mon Oct 22 12:33:03 2018 +0300
@@ -1298,7 +1298,7 @@
 cu_cpu_kstat_create(pghw_t *pg, cu_cntr_info_t *cntr_info)
 {
 	kstat_t		*ks;
-	char 		*sharing = pghw_type_string(pg->pghw_hw);
+	char		*sharing = pghw_type_string(pg->pghw_hw);
 	char		name[KSTAT_STRLEN + 1];
 
 	/*
@@ -1417,7 +1417,7 @@
 	 * cpu_call() will call func on the CPU specified with given argument
 	 * and return func's return value in last argument
 	 */
-	cpu_call(cp, (cpu_call_func_t)func, arg, (uintptr_t)&error);
+	cpu_call(cp, (cpu_call_func_t)(uintptr_t)func, arg, (uintptr_t)&error);
 	return (error);
 }
 
@@ -1471,7 +1471,7 @@
 	 */
 	retval = 0;
 	if (move_to)
-		(void) cu_cpu_run(cp, (cu_cpu_func_t)kcpc_read,
+		(void) cu_cpu_run(cp, (cu_cpu_func_t)(uintptr_t)kcpc_read,
 		    (uintptr_t)cu_cpu_update_stats);
 	else {
 		retval = kcpc_read((kcpc_update_func_t)cu_cpu_update_stats);
--- a/usr/src/uts/common/vm/seg_kmem.c	Wed Oct 16 10:19:13 2019 +0100
+++ b/usr/src/uts/common/vm/seg_kmem.c	Mon Oct 22 12:33:03 2018 +0300
@@ -435,12 +435,12 @@
 	panic("segkmem_badop");
 }
 
-#define	SEGKMEM_BADOP(t)	(t(*)())segkmem_badop
+#define	SEGKMEM_BADOP(t)	(t(*)())(uintptr_t)segkmem_badop
 
 /*ARGSUSED*/
 static faultcode_t
 segkmem_fault(struct hat *hat, struct seg *seg, caddr_t addr, size_t size,
-	enum fault_type type, enum seg_rw rw)
+    enum fault_type type, enum seg_rw rw)
 {
 	pgcnt_t npages;
 	spgcnt_t pg;
@@ -677,7 +677,7 @@
 /*ARGSUSED*/
 static int
 segkmem_pagelock(struct seg *seg, caddr_t addr, size_t len,
-	page_t ***ppp, enum lock_type type, enum seg_rw rw)
+    page_t ***ppp, enum lock_type type, enum seg_rw rw)
 {
 	page_t **pplist, *pp;
 	pgcnt_t npages;
@@ -858,7 +858,7 @@
  */
 void *
 segkmem_xalloc(vmem_t *vmp, void *inaddr, size_t size, int vmflag, uint_t attr,
-	page_t *(*page_create_func)(void *, size_t, int, void *), void *pcarg)
+    page_t *(*page_create_func)(void *, size_t, int, void *), void *pcarg)
 {
 	page_t *ppl;
 	caddr_t addr = inaddr;
@@ -1222,7 +1222,7 @@
 segkmem_free_one_lp(caddr_t addr, size_t size)
 {
 	page_t		*pp, *rootpp = NULL;
-	pgcnt_t 	pgs_left = btopr(size);
+	pgcnt_t		pgs_left = btopr(size);
 
 	ASSERT(size == segkmem_lpsize);
 
@@ -1422,7 +1422,7 @@
 	pgcnt_t		nlpages = size >> segkmem_lpshift;
 	size_t		lpsize = segkmem_lpsize;
 	caddr_t		addr = inaddr;
-	pgcnt_t 	npages = btopr(size);
+	pgcnt_t		npages = btopr(size);
 	int		i;
 
 	ASSERT(vmp == heap_lp_arena);
--- a/usr/src/uts/i86pc/io/cbe.c	Wed Oct 16 10:19:13 2019 +0100
+++ b/usr/src/uts/i86pc/io/cbe.c	Mon Oct 22 12:33:03 2018 +0300
@@ -68,15 +68,15 @@
 
 void cbe_hres_tick(void);
 
-int
-cbe_softclock(void)
+uint_t
+cbe_softclock(caddr_t arg1 __unused, caddr_t arg2 __unused)
 {
 	cyclic_softint(CPU, CY_LOCK_LEVEL);
 	return (1);
 }
 
-int
-cbe_low_level(void)
+uint_t
+cbe_low_level(caddr_t arg1 __unused, caddr_t arg2 __unused)
 {
 	cpu_t *cpu = CPU;
 
@@ -90,8 +90,8 @@
  * spurious calls, it would not matter if we called cyclic_fire() in both
  * cases.
  */
-int
-cbe_fire(void)
+uint_t
+cbe_fire(caddr_t arg1 __unused, caddr_t arg2 __unused)
 {
 	cpu_t *cpu = CPU;
 	processorid_t me = cpu->cpu_id, i;
@@ -346,21 +346,21 @@
 	cyclic_init(&cbe, cbe_timer_resolution);
 	mutex_exit(&cpu_lock);
 
-	(void) add_avintr(NULL, CBE_HIGH_PIL, (avfunc)cbe_fire,
+	(void) add_avintr(NULL, CBE_HIGH_PIL, cbe_fire,
 	    "cbe_fire_master", cbe_vector, 0, NULL, NULL, NULL);
 
 	if (psm_get_ipivect != NULL) {
-		(void) add_avintr(NULL, CBE_HIGH_PIL, (avfunc)cbe_fire,
+		(void) add_avintr(NULL, CBE_HIGH_PIL, cbe_fire,
 		    "cbe_fire_slave",
 		    (*psm_get_ipivect)(CBE_HIGH_PIL, PSM_INTR_IPI_HI),
 		    0, NULL, NULL, NULL);
 	}
 
 	(void) add_avsoftintr((void *)&cbe_clock_hdl, CBE_LOCK_PIL,
-	    (avfunc)cbe_softclock, "softclock", NULL, NULL);
+	    cbe_softclock, "softclock", NULL, NULL);
 
 	(void) add_avsoftintr((void *)&cbe_low_hdl, CBE_LOW_PIL,
-	    (avfunc)cbe_low_level, "low level", NULL, NULL);
+	    cbe_low_level, "low level", NULL, NULL);
 
 	mutex_enter(&cpu_lock);
 
--- a/usr/src/uts/i86pc/os/cpupm/cpupm_throttle.c	Wed Oct 16 10:19:13 2019 +0100
+++ b/usr/src/uts/i86pc/os/cpupm/cpupm_throttle.c	Mon Oct 22 12:33:03 2018 +0300
@@ -153,9 +153,11 @@
 /*
  * Transition the current processor to the requested throttling state.
  */
-static void
-cpupm_tstate_transition(uint32_t req_state)
+static int
+cpupm_tstate_transition(xc_arg_t arg1, xc_arg_t arg2 __unused,
+    xc_arg_t arg3 __unused)
 {
+	uint32_t req_state = arg1;
 	cpupm_mach_state_t *mach_state =
 	    (cpupm_mach_state_t *)CPU->cpu_m.mcpu_pm_mach_state;
 	cpu_acpi_handle_t handle = mach_state->ms_acpi_handle;
@@ -174,7 +176,7 @@
 	 */
 	ctrl = CPU_ACPI_TSTATE_CTRL(req_tstate);
 	if (write_ctrl(handle, ctrl) != 0) {
-		return;
+		return (0);
 	}
 
 	/*
@@ -182,7 +184,7 @@
 	 * no status value comparison is required.
 	 */
 	if (CPU_ACPI_TSTATE_STAT(req_tstate) == 0) {
-		return;
+		return (0);
 	}
 
 	/* Wait until switch is complete, but bound the loop just in case. */
@@ -197,11 +199,14 @@
 	if (CPU_ACPI_TSTATE_STAT(req_tstate) != stat) {
 		DTRACE_PROBE(throttle_transition_incomplete);
 	}
+	return (0);
 }
 
 static void
 cpupm_throttle(cpuset_t set,  uint32_t throtl_lvl)
 {
+	xc_arg_t xc_arg = (xc_arg_t)throtl_lvl;
+
 	/*
 	 * If thread is already running on target CPU then just
 	 * make the transition request. Otherwise, we'll need to
@@ -209,12 +214,12 @@
 	 */
 	kpreempt_disable();
 	if (CPU_IN_SET(set, CPU->cpu_id)) {
-		cpupm_tstate_transition(throtl_lvl);
+		cpupm_tstate_transition(xc_arg, 0, 0);
 		CPUSET_DEL(set, CPU->cpu_id);
 	}
 	if (!CPUSET_ISNULL(set)) {
-		xc_call((xc_arg_t)throtl_lvl, 0, 0,
-		    CPUSET2BV(set), (xc_func_t)cpupm_tstate_transition);
+		xc_call(xc_arg, 0, 0,
+		    CPUSET2BV(set), cpupm_tstate_transition);
 	}
 	kpreempt_enable();
 }
--- a/usr/src/uts/i86pc/os/cpupm/pwrnow.c	Wed Oct 16 10:19:13 2019 +0100
+++ b/usr/src/uts/i86pc/os/cpupm/pwrnow.c	Mon Oct 22 12:33:03 2018 +0300
@@ -110,9 +110,11 @@
 /*
  * Transition the current processor to the requested state.
  */
-static void
-pwrnow_pstate_transition(uint32_t req_state)
+static int
+pwrnow_pstate_transition(xc_arg_t arg1, xc_arg_t arg2 __unused,
+    xc_arg_t arg3 __unused)
 {
+	uint32_t req_state = (uint32_t)arg1;
 	cpupm_mach_state_t *mach_state =
 	    (cpupm_mach_state_t *)CPU->cpu_m.mcpu_pm_mach_state;
 	cpu_acpi_handle_t handle = mach_state->ms_acpi_handle;
@@ -137,6 +139,7 @@
 
 	mach_state->ms_pstate.cma_state.pstate = req_state;
 	cpu_set_curr_clock((uint64_t)CPU_ACPI_FREQ(req_pstate) * 1000000);
+	return (0);
 }
 
 static void
@@ -149,12 +152,12 @@
 	 */
 	kpreempt_disable();
 	if (CPU_IN_SET(set, CPU->cpu_id)) {
-		pwrnow_pstate_transition(req_state);
+		(void) pwrnow_pstate_transition(req_state, 0, 0);
 		CPUSET_DEL(set, CPU->cpu_id);
 	}
 	if (!CPUSET_ISNULL(set)) {
 		xc_call((xc_arg_t)req_state, 0, 0,
-		    CPUSET2BV(set), (xc_func_t)pwrnow_pstate_transition);
+		    CPUSET2BV(set), pwrnow_pstate_transition);
 	}
 	kpreempt_enable();
 }
--- a/usr/src/uts/i86pc/os/cpupm/speedstep.c	Wed Oct 16 10:19:13 2019 +0100
+++ b/usr/src/uts/i86pc/os/cpupm/speedstep.c	Mon Oct 22 12:33:03 2018 +0300
@@ -126,9 +126,11 @@
 /*
  * Transition the current processor to the requested state.
  */
-void
-speedstep_pstate_transition(uint32_t req_state)
+int
+speedstep_pstate_transition(xc_arg_t arg1, xc_arg_t arg2 __unused,
+    xc_arg_t arg3 __unused)
 {
+	uint32_t req_state = (uint32_t)arg1;
 	cpupm_mach_state_t *mach_state =
 	    (cpupm_mach_state_t *)CPU->cpu_m.mcpu_pm_mach_state;
 	cpu_acpi_handle_t handle = mach_state->ms_acpi_handle;
@@ -152,6 +154,7 @@
 
 	mach_state->ms_pstate.cma_state.pstate = req_state;
 	cpu_set_curr_clock(((uint64_t)CPU_ACPI_FREQ(req_pstate) * 1000000));
+	return (0);
 }
 
 static void
@@ -164,12 +167,12 @@
 	 */
 	kpreempt_disable();
 	if (CPU_IN_SET(set, CPU->cpu_id)) {
-		speedstep_pstate_transition(req_state);
+		(void) speedstep_pstate_transition(req_state, 0, 0);
 		CPUSET_DEL(set, CPU->cpu_id);
 	}
 	if (!CPUSET_ISNULL(set)) {
 		xc_call((xc_arg_t)req_state, 0, 0, CPUSET2BV(set),
-		    (xc_func_t)speedstep_pstate_transition);
+		    speedstep_pstate_transition);
 	}
 	kpreempt_enable();
 }
--- a/usr/src/uts/i86pc/os/dtrace_subr.c	Wed Oct 16 10:19:13 2019 +0100
+++ b/usr/src/uts/i86pc/os/dtrace_subr.c	Mon Oct 22 12:33:03 2018 +0300
@@ -134,9 +134,10 @@
 }
 
 static int
-dtrace_xcall_func(dtrace_xcall_t func, void *arg)
+dtrace_xcall_func(xc_arg_t arg1, xc_arg_t arg2, xc_arg_t arg3 __unused)
 {
-	(*func)(arg);
+	dtrace_xcall_t func = (dtrace_xcall_t)arg1;
+	(*func)((void*)arg2);
 
 	return (0);
 }
@@ -157,7 +158,7 @@
 
 	kpreempt_disable();
 	xc_sync((xc_arg_t)func, (xc_arg_t)arg, 0, CPUSET2BV(set),
-	    (xc_func_t)dtrace_xcall_func);
+	    dtrace_xcall_func);
 	kpreempt_enable();
 }
 
--- a/usr/src/uts/i86pc/os/fastboot.c	Wed Oct 16 10:19:13 2019 +0100
+++ b/usr/src/uts/i86pc/os/fastboot.c	Mon Oct 22 12:33:03 2018 +0300
@@ -1293,8 +1293,9 @@
 
 /* ARGSUSED */
 static int
-fastboot_xc_func(fastboot_info_t *nk, xc_arg_t unused2, xc_arg_t unused3)
+fastboot_xc_func(xc_arg_t arg1, xc_arg_t arg2 __unused, xc_arg_t arg3 __unused)
 {
+	fastboot_info_t *nk = (fastboot_info_t *)arg1;
 	void (*fastboot_func)(fastboot_info_t *);
 	fastboot_file_t	*fb = &nk->fi_files[FASTBOOT_SWTCH];
 	fastboot_func = (void (*)())(fb->fb_va);
@@ -1372,11 +1373,11 @@
 		CPUSET_ZERO(cpuset);
 		CPUSET_ADD(cpuset, bootcpuid);
 		xc_priority((xc_arg_t)&newkernel, 0, 0, CPUSET2BV(cpuset),
-		    (xc_func_t)fastboot_xc_func);
+		    fastboot_xc_func);
 
 		panic_idle();
 	} else
-		(void) fastboot_xc_func(&newkernel, 0, 0);
+		(void) fastboot_xc_func((xc_arg_t)&newkernel, 0, 0);
 }
 
 
--- a/usr/src/uts/i86pc/os/machdep.c	Wed Oct 16 10:19:13 2019 +0100
+++ b/usr/src/uts/i86pc/os/machdep.c	Mon Oct 22 12:33:03 2018 +0300
@@ -403,7 +403,7 @@
 	cpuset_t xcset;
 
 	CPUSET_ALL_BUT(xcset, CPU->cpu_id);
-	xc_priority(0, 0, 0, CPUSET2BV(xcset), (xc_func_t)mach_cpu_halt);
+	xc_priority(0, 0, 0, CPUSET2BV(xcset), mach_cpu_halt);
 	restore_int_flag(s);
 }
 
--- a/usr/src/uts/i86pc/os/mp_call.c	Wed Oct 16 10:19:13 2019 +0100
+++ b/usr/src/uts/i86pc/os/mp_call.c	Mon Oct 22 12:33:03 2018 +0300
@@ -37,7 +37,7 @@
 
 /*
  * Interrupt another CPU.
- * 	This is useful to make the other CPU go through a trap so that
+ *	This is useful to make the other CPU go through a trap so that
  *	it recognizes an address space trap (AST) for preempting a thread.
  *
  *	It is possible to be preempted here and be resumed on the CPU
@@ -87,7 +87,7 @@
 	} else {
 		CPUSET_ONLY(set, cp->cpu_id);
 		xc_call((xc_arg_t)arg1, (xc_arg_t)arg2, 0, CPUSET2BV(set),
-		    (xc_func_t)func);
+		    (xc_func_t)(uintptr_t)func);
 	}
 	kpreempt_enable();
 }
--- a/usr/src/uts/i86pc/os/mp_pc.c	Wed Oct 16 10:19:13 2019 +0100
+++ b/usr/src/uts/i86pc/os/mp_pc.c	Mon Oct 22 12:33:03 2018 +0300
@@ -440,15 +440,18 @@
 /*
  * "Enter monitor."  Called via cross-call from stop_other_cpus().
  */
-void
-mach_cpu_halt(char *msg)
+int
+mach_cpu_halt(xc_arg_t arg1, xc_arg_t arg2 __unused, xc_arg_t arg3 __unused)
 {
+	char *msg = (char *)arg1;
+
 	if (msg)
 		prom_printf("%s\n", msg);
 
 	/*CONSTANTCONDITION*/
 	while (1)
 		;
+	return (0);
 }
 
 void
--- a/usr/src/uts/i86pc/os/startup.c	Wed Oct 16 10:19:13 2019 +0100
+++ b/usr/src/uts/i86pc/os/startup.c	Mon Oct 22 12:33:03 2018 +0300
@@ -2360,7 +2360,7 @@
 	 */
 	for (i = DDI_IPL_1; i <= DDI_IPL_10; i++) {
 		(void) add_avsoftintr((void *)&softlevel_hdl[i-1], i,
-		    (avfunc)ddi_periodic_softintr, "ddi_periodic",
+		    (avfunc)(uintptr_t)ddi_periodic_softintr, "ddi_periodic",
 		    (caddr_t)(uintptr_t)i, NULL);
 	}
 
--- a/usr/src/uts/i86pc/sys/machsystm.h	Wed Oct 16 10:19:13 2019 +0100
+++ b/usr/src/uts/i86pc/sys/machsystm.h	Mon Oct 22 12:33:03 2018 +0300
@@ -70,7 +70,7 @@
 } mach_cpu_add_arg_t;
 
 extern void mach_cpu_idle(void);
-extern void mach_cpu_halt(char *);
+extern int mach_cpu_halt(xc_arg_t, xc_arg_t, xc_arg_t);
 extern int mach_cpu_start(cpu_t *, void *);
 extern int mach_cpuid_start(processorid_t, void *);
 extern int mach_cpu_stop(cpu_t *, void *);
@@ -106,8 +106,8 @@
 
 struct system_hardware {
 	int		hd_nodes;		/* number of nodes */
-	int		hd_cpus_per_node; 	/* max cpus in a node */
-	struct memconf 	hd_mem[MAXNODES];
+	int		hd_cpus_per_node;	/* max cpus in a node */
+	struct memconf	hd_mem[MAXNODES];
 						/*
 						 * memory layout for each
 						 * node.
--- a/usr/src/uts/i86xpv/os/mp_xen.c	Wed Oct 16 10:19:13 2019 +0100
+++ b/usr/src/uts/i86xpv/os/mp_xen.c	Mon Oct 22 12:33:03 2018 +0300
@@ -558,12 +558,15 @@
 	}
 }
 
-void
-mach_cpu_halt(char *msg)
+int
+mach_cpu_halt(xc_arg_t arg1, xc_arg_t arg2 __unused, xc_arg_t arg3 __unused)
 {
+	char *msg = (char *)arg1;
+
 	if (msg)
 		prom_printf("%s\n", msg);
 	(void) xen_vcpu_down(CPU->cpu_id);
+	return (0);
 }
 
 /*ARGSUSED*/
--- a/usr/src/uts/intel/ia32/os/desctbls.c	Wed Oct 16 10:19:13 2019 +0100
+++ b/usr/src/uts/intel/ia32/os/desctbls.c	Mon Oct 22 12:33:03 2018 +0300
@@ -103,7 +103,7 @@
 desctbr_t	gdt0_default_r;
 #endif
 
-gate_desc_t	*idt0; 		/* interrupt descriptor table */
+gate_desc_t	*idt0;		/* interrupt descriptor table */
 #if defined(__i386)
 desctbr_t	idt0_default_r;		/* describes idt0 in IDTR format */
 #endif
@@ -147,10 +147,10 @@
 	fast_null,			/* T_FNULL routine */
 	fast_null,			/* T_FGETFP routine (initially null) */
 	fast_null,			/* T_FSETFP routine (initially null) */
-	(void (*)())get_hrtime,		/* T_GETHRTIME */
-	(void (*)())gethrvtime,		/* T_GETHRVTIME */
-	(void (*)())get_hrestime,	/* T_GETHRESTIME */
-	(void (*)())getlgrp		/* T_GETLGRP */
+	(void (*)())(uintptr_t)get_hrtime,	/* T_GETHRTIME */
+	(void (*)())(uintptr_t)gethrvtime,	/* T_GETHRVTIME */
+	(void (*)())(uintptr_t)get_hrestime,	/* T_GETHRESTIME */
+	(void (*)())(uintptr_t)getlgrp		/* T_GETLGRP */
 };
 
 /*
@@ -1342,7 +1342,7 @@
 brand_interpositioning_enable(void)
 {
 	gate_desc_t	*idt = CPU->cpu_idt;
-	int 		i;
+	int		i;
 
 	ASSERT(curthread->t_preempt != 0 || getpil() >= DISP_LEVEL);
 
--- a/usr/src/uts/intel/kdi/kdi_idt.c	Wed Oct 16 10:19:13 2019 +0100
+++ b/usr/src/uts/intel/kdi/kdi_idt.c	Mon Oct 22 12:33:03 2018 +0300
@@ -298,7 +298,8 @@
  * loaded at boot.
  */
 static int
-kdi_cpu_activate(void)
+kdi_cpu_activate(xc_arg_t arg1 __unused, xc_arg_t arg2 __unused,
+    xc_arg_t arg3 __unused)
 {
 	kdi_idt_gates_install(KCS_SEL, KDI_IDT_SAVE);
 	return (0);
@@ -346,13 +347,13 @@
 	if (boothowto & RB_KMDB) {
 		kdi_idt_gates_install(KMDBCODE_SEL, KDI_IDT_NOSAVE);
 	} else {
-		xc_call(0, 0, 0, CPUSET2BV(cpuset),
-		    (xc_func_t)kdi_cpu_activate);
+		xc_call(0, 0, 0, CPUSET2BV(cpuset), kdi_cpu_activate);
 	}
 }
 
 static int
-kdi_cpu_deactivate(void)
+kdi_cpu_deactivate(xc_arg_t arg1 __unused, xc_arg_t arg2 __unused,
+    xc_arg_t arg3 __unused)
 {
 	kdi_idt_gates_restore();
 	return (0);
@@ -364,7 +365,7 @@
 	cpuset_t cpuset;
 	CPUSET_ALL(cpuset);
 
-	xc_call(0, 0, 0, CPUSET2BV(cpuset), (xc_func_t)kdi_cpu_deactivate);
+	xc_call(0, 0, 0, CPUSET2BV(cpuset), kdi_cpu_deactivate);
 	kdi_nmemranges = 0;
 }