git: 1e3f42b6bad5 - main - arm64: Switch the address argument to cpu_*cache* to a pointer

From: John Baldwin <jhb_at_FreeBSD.org>
Date: Fri, 15 Mar 2024 17:10:39 UTC
The branch main has been updated by jhb:

URL: https://cgit.FreeBSD.org/src/commit/?id=1e3f42b6bad58f001b9c88404bd818991f34d398

commit 1e3f42b6bad58f001b9c88404bd818991f34d398
Author:     John Baldwin <jhb@FreeBSD.org>
AuthorDate: 2024-03-15 17:09:49 +0000
Commit:     John Baldwin <jhb@FreeBSD.org>
CommitDate: 2024-03-15 17:09:49 +0000

    arm64: Switch the address argument to cpu_*cache* to a pointer
    
    No functional change, but this reduces diffs with CheriBSD downstream.
    
    Reviewed by:    andrew
    Sponsored by:   University of Cambridge, Google, Inc.
    Differential Revision:  https://reviews.freebsd.org/D44342
---
 sys/arm64/arm64/busdma_bounce.c        | 18 +++++++++---------
 sys/arm64/arm64/cpufunc_asm.S          | 14 +++++++-------
 sys/arm64/arm64/db_interface.c         |  2 +-
 sys/arm64/arm64/elf_machdep.c          |  2 +-
 sys/arm64/arm64/freebsd32_machdep.c    |  3 ++-
 sys/arm64/arm64/gicv3_its.c            | 17 ++++++++---------
 sys/arm64/arm64/identcpu.c             |  2 +-
 sys/arm64/arm64/pmap.c                 | 16 +++++++++-------
 sys/arm64/include/cpufunc.h            | 16 ++++++++--------
 sys/arm64/include/kdb.h                |  2 +-
 sys/cddl/dev/fbt/aarch64/fbt_isa.c     |  2 +-
 sys/cddl/dev/kinst/aarch64/kinst_isa.c |  6 ++----
 12 files changed, 50 insertions(+), 50 deletions(-)

diff --git a/sys/arm64/arm64/busdma_bounce.c b/sys/arm64/arm64/busdma_bounce.c
index ec2dfe76894c..e62794da2753 100644
--- a/sys/arm64/arm64/busdma_bounce.c
+++ b/sys/arm64/arm64/busdma_bounce.c
@@ -985,15 +985,15 @@ bounce_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
 }
 
 static void
-dma_preread_safe(vm_offset_t va, vm_size_t size)
+dma_preread_safe(char *va, vm_size_t size)
 {
 	/*
 	 * Write back any partial cachelines immediately before and
 	 * after the DMA region.
 	 */
-	if (va & (dcache_line_size - 1))
+	if (!__is_aligned(va, dcache_line_size))
 		cpu_dcache_wb_range(va, 1);
-	if ((va + size) & (dcache_line_size - 1))
+	if (!__is_aligned(va + size, dcache_line_size))
 		cpu_dcache_wb_range(va + size, 1);
 
 	cpu_dcache_inv_range(va, size);
@@ -1030,7 +1030,7 @@ dma_dcache_sync(struct sync_list *sl, bus_dmasync_op_t op)
 		switch (op) {
 		case BUS_DMASYNC_PREWRITE:
 		case BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD:
-			cpu_dcache_wb_range(va, len);
+			cpu_dcache_wb_range((void *)va, len);
 			break;
 		case BUS_DMASYNC_PREREAD:
 			/*
@@ -1043,11 +1043,11 @@ dma_dcache_sync(struct sync_list *sl, bus_dmasync_op_t op)
 			 * misalignment.  Buffers which are not mbufs bounce if
 			 * they are not aligned to a cacheline.
 			 */
-			dma_preread_safe(va, len);
+			dma_preread_safe((void *)va, len);
 			break;
 		case BUS_DMASYNC_POSTREAD:
 		case BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE:
-			cpu_dcache_inv_range(va, len);
+			cpu_dcache_inv_range((void *)va, len);
 			break;
 		default:
 			panic("unsupported combination of sync operations: "
@@ -1097,7 +1097,7 @@ bounce_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map,
 				if (tempvaddr != 0)
 					pmap_quick_remove_page(tempvaddr);
 				if ((map->flags & DMAMAP_COHERENT) == 0)
-					cpu_dcache_wb_range(bpage->vaddr,
+					cpu_dcache_wb_range((void *)bpage->vaddr,
 					    bpage->datacount);
 				bpage = STAILQ_NEXT(bpage, links);
 			}
@@ -1105,7 +1105,7 @@ bounce_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map,
 		} else if ((op & BUS_DMASYNC_PREREAD) != 0) {
 			while (bpage != NULL) {
 				if ((map->flags & DMAMAP_COHERENT) == 0)
-					cpu_dcache_wbinv_range(bpage->vaddr,
+					cpu_dcache_wbinv_range((void *)bpage->vaddr,
 					    bpage->datacount);
 				bpage = STAILQ_NEXT(bpage, links);
 			}
@@ -1114,7 +1114,7 @@ bounce_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map,
 		if ((op & BUS_DMASYNC_POSTREAD) != 0) {
 			while (bpage != NULL) {
 				if ((map->flags & DMAMAP_COHERENT) == 0)
-					cpu_dcache_inv_range(bpage->vaddr,
+					cpu_dcache_inv_range((void *)bpage->vaddr,
 					    bpage->datacount);
 				tempvaddr = 0;
 				datavaddr = bpage->datavaddr;
diff --git a/sys/arm64/arm64/cpufunc_asm.S b/sys/arm64/arm64/cpufunc_asm.S
index 8163e6c3d0d0..5a668aeb542e 100644
--- a/sys/arm64/arm64/cpufunc_asm.S
+++ b/sys/arm64/arm64/cpufunc_asm.S
@@ -104,7 +104,7 @@ ENTRY(arm64_tlb_flushID)
 END(arm64_tlb_flushID)
 
 /*
- * void arm64_dcache_wb_range(vm_offset_t, vm_size_t)
+ * void arm64_dcache_wb_range(void *, vm_size_t)
  */
 ENTRY(arm64_dcache_wb_range)
 	cache_handle_range	dcop = cvac
@@ -112,7 +112,7 @@ ENTRY(arm64_dcache_wb_range)
 END(arm64_dcache_wb_range)
 
 /*
- * void arm64_dcache_wbinv_range(vm_offset_t, vm_size_t)
+ * void arm64_dcache_wbinv_range(void *, vm_size_t)
  */
 ENTRY(arm64_dcache_wbinv_range)
 	cache_handle_range	dcop = civac
@@ -120,7 +120,7 @@ ENTRY(arm64_dcache_wbinv_range)
 END(arm64_dcache_wbinv_range)
 
 /*
- * void arm64_dcache_inv_range(vm_offset_t, vm_size_t)
+ * void arm64_dcache_inv_range(void *, vm_size_t)
  *
  * Note, we must not invalidate everything.  If the range is too big we
  * must use wb-inv of the entire cache.
@@ -131,7 +131,7 @@ ENTRY(arm64_dcache_inv_range)
 END(arm64_dcache_inv_range)
 
 /*
- * void arm64_dic_idc_icache_sync_range(vm_offset_t, vm_size_t)
+ * void arm64_dic_idc_icache_sync_range(void *, vm_size_t)
  * When the CTR_EL0.IDC bit is set cleaning to PoU becomes a dsb.
  * When the CTR_EL0.DIC bit is set icache invalidation becomes an isb.
  */
@@ -142,7 +142,7 @@ ENTRY(arm64_dic_idc_icache_sync_range)
 END(arm64_dic_idc_icache_sync_range)
 
 /*
- * void arm64_idc_aliasing_icache_sync_range(vm_offset_t, vm_size_t)
+ * void arm64_idc_aliasing_icache_sync_range(void *, vm_size_t)
  * When the CTR_EL0.IDC bit is set cleaning to PoU becomes a dsb.
  */
 ENTRY(arm64_idc_aliasing_icache_sync_range)
@@ -154,7 +154,7 @@ ENTRY(arm64_idc_aliasing_icache_sync_range)
 END(arm64_idc_aliasing_icache_sync_range)
 
 /*
- * void arm64_aliasing_icache_sync_range(vm_offset_t, vm_size_t)
+ * void arm64_aliasing_icache_sync_range(void *, vm_size_t)
  */
 ENTRY(arm64_aliasing_icache_sync_range)
 	/*
@@ -170,7 +170,7 @@ ENTRY(arm64_aliasing_icache_sync_range)
 END(arm64_aliasing_icache_sync_range)
 
 /*
- * int arm64_icache_sync_range_checked(vm_offset_t, vm_size_t)
+ * int arm64_icache_sync_range_checked(void *, vm_size_t)
  */
 ENTRY(arm64_icache_sync_range_checked)
 	adr	x5, cache_maint_fault
diff --git a/sys/arm64/arm64/db_interface.c b/sys/arm64/arm64/db_interface.c
index 0b1c58ca88a0..1aaec9665550 100644
--- a/sys/arm64/arm64/db_interface.c
+++ b/sys/arm64/arm64/db_interface.c
@@ -175,7 +175,7 @@ db_write_bytes(vm_offset_t addr, size_t size, char *data)
 			 * Ensure the I & D cache are in sync if we wrote
 			 * to executable memory.
 			 */
-			cpu_icache_sync_range(addr, (vm_size_t)size);
+			cpu_icache_sync_range((void *)addr, (vm_size_t)size);
 		}
 	}
 	(void)kdb_jmpbuf(prev_jb);
diff --git a/sys/arm64/arm64/elf_machdep.c b/sys/arm64/arm64/elf_machdep.c
index 350651c42723..d5b420a8b519 100644
--- a/sys/arm64/arm64/elf_machdep.c
+++ b/sys/arm64/arm64/elf_machdep.c
@@ -299,7 +299,7 @@ elf_cpu_load_file(linker_file_t lf)
 {
 
 	if (lf->id != 1)
-		cpu_icache_sync_range((vm_offset_t)lf->address, lf->size);
+		cpu_icache_sync_range(lf->address, lf->size);
 	return (0);
 }
 
diff --git a/sys/arm64/arm64/freebsd32_machdep.c b/sys/arm64/arm64/freebsd32_machdep.c
index b25ebd50166d..fc979e193d1a 100644
--- a/sys/arm64/arm64/freebsd32_machdep.c
+++ b/sys/arm64/arm64/freebsd32_machdep.c
@@ -94,7 +94,8 @@ freebsd32_sysarch(struct thread *td, struct freebsd32_sysarch_args *uap)
 				return (error);
 			if ((uint64_t)args.addr + (uint64_t)args.size > 0xffffffff)
 				return (EINVAL);
-			cpu_icache_sync_range_checked(args.addr, args.size);
+			cpu_icache_sync_range_checked(
+			    (void *)(uintptr_t)args.addr, args.size);
 			return 0;
 		}
 	case ARM_GET_VFPSTATE:
diff --git a/sys/arm64/arm64/gicv3_its.c b/sys/arm64/arm64/gicv3_its.c
index 2ad5cce68704..31a0ded6c95d 100644
--- a/sys/arm64/arm64/gicv3_its.c
+++ b/sys/arm64/arm64/gicv3_its.c
@@ -744,7 +744,7 @@ gicv3_its_conftable_init(struct gicv3_its_softc *sc)
 	    LPI_CONFTAB_SIZE);
 
 	/* Flush the table to memory */
-	cpu_dcache_wb_range((vm_offset_t)sc->sc_conf_base, LPI_CONFTAB_SIZE);
+	cpu_dcache_wb_range(sc->sc_conf_base, LPI_CONFTAB_SIZE);
 }
 
 static void
@@ -761,7 +761,7 @@ gicv3_its_pendtables_init(struct gicv3_its_softc *sc)
 			    0, LPI_PENDTAB_MAX_ADDR, LPI_PENDTAB_ALIGN, 0);
 
 			/* Flush so the ITS can see the memory */
-			cpu_dcache_wb_range((vm_offset_t)sc->sc_pend_base[i],
+			cpu_dcache_wb_range(sc->sc_pend_base[i],
 			    LPI_PENDTAB_SIZE);
 		}
 	}
@@ -1158,7 +1158,7 @@ gicv3_its_disable_intr(device_t dev, struct intr_irqsrc *isrc)
 
 	if ((sc->sc_its_flags & ITS_FLAGS_LPI_CONF_FLUSH) != 0) {
 		/* Clean D-cache under command. */
-		cpu_dcache_wb_range((vm_offset_t)&conf[girq->gi_lpi], 1);
+		cpu_dcache_wb_range(&conf[girq->gi_lpi], 1);
 	} else {
 		/* DSB inner shareable, store */
 		dsb(ishst);
@@ -1182,7 +1182,7 @@ gicv3_its_enable_intr(device_t dev, struct intr_irqsrc *isrc)
 
 	if ((sc->sc_its_flags & ITS_FLAGS_LPI_CONF_FLUSH) != 0) {
 		/* Clean D-cache under command. */
-		cpu_dcache_wb_range((vm_offset_t)&conf[girq->gi_lpi], 1);
+		cpu_dcache_wb_range(&conf[girq->gi_lpi], 1);
 	} else {
 		/* DSB inner shareable, store */
 		dsb(ishst);
@@ -1396,12 +1396,11 @@ its_device_alloc(struct gicv3_its_softc *sc, int devid)
 	    ptable->ptab_page_size, 0);
 
 	if (!shareable)
-		cpu_dcache_wb_range((vm_offset_t)l2_table, ptable->ptab_l2_size);
+		cpu_dcache_wb_range(l2_table, ptable->ptab_l2_size);
 
 	table[index] = vtophys(l2_table) | GITS_BASER_VALID;
 	if (!shareable)
-		cpu_dcache_wb_range((vm_offset_t)&table[index],
-		    sizeof(table[index]));
+		cpu_dcache_wb_range(&table[index], sizeof(table[index]));
 
 	dsb(sy);
 	return (true);
@@ -1463,7 +1462,7 @@ its_device_get(device_t dev, device_t child, u_int nvecs)
 
 	/* Make sure device sees zeroed ITT. */
 	if ((sc->sc_its_flags & ITS_FLAGS_CMDQ_FLUSH) != 0)
-		cpu_dcache_wb_range((vm_offset_t)its_dev->itt, its_dev->itt_size);
+		cpu_dcache_wb_range(its_dev->itt, its_dev->itt_size);
 
 	mtx_lock_spin(&sc->sc_its_dev_lock);
 	TAILQ_INSERT_TAIL(&sc->sc_its_dev_list, its_dev, entry);
@@ -1861,7 +1860,7 @@ its_cmd_sync(struct gicv3_its_softc *sc, struct its_cmd *cmd)
 
 	if ((sc->sc_its_flags & ITS_FLAGS_CMDQ_FLUSH) != 0) {
 		/* Clean D-cache under command. */
-		cpu_dcache_wb_range((vm_offset_t)cmd, sizeof(*cmd));
+		cpu_dcache_wb_range(cmd, sizeof(*cmd));
 	} else {
 		/* DSB inner shareable, store */
 		dsb(ishst);
diff --git a/sys/arm64/arm64/identcpu.c b/sys/arm64/arm64/identcpu.c
index 7706c42f7fdb..c93b1292aca1 100644
--- a/sys/arm64/arm64/identcpu.c
+++ b/sys/arm64/arm64/identcpu.c
@@ -83,7 +83,7 @@ static void check_cpu_regs(u_int cpu, struct cpu_desc *desc,
  * The default implementation of I-cache sync assumes we have an
  * aliasing cache until we know otherwise.
  */
-void (*arm64_icache_sync_range)(vm_offset_t, vm_size_t) =
+void (*arm64_icache_sync_range)(void *, vm_size_t) =
     &arm64_aliasing_icache_sync_range;
 
 static int
diff --git a/sys/arm64/arm64/pmap.c b/sys/arm64/arm64/pmap.c
index 21912535bb6a..ba72f1dac8d0 100644
--- a/sys/arm64/arm64/pmap.c
+++ b/sys/arm64/arm64/pmap.c
@@ -4738,10 +4738,11 @@ validate:
 		    m->md.pv_memattr == VM_MEMATTR_WRITE_BACK &&
 		    (opa != pa || (orig_l3 & ATTR_S1_XN))) {
 			PMAP_ASSERT_STAGE1(pmap);
-			cpu_icache_sync_range(PHYS_TO_DMAP(pa), PAGE_SIZE);
+			cpu_icache_sync_range((void *)PHYS_TO_DMAP(pa),
+			    PAGE_SIZE);
 		}
 	} else {
-		cpu_dcache_wb_range(PHYS_TO_DMAP(pa), PAGE_SIZE);
+		cpu_dcache_wb_range((void *)PHYS_TO_DMAP(pa), PAGE_SIZE);
 	}
 
 	/*
@@ -5006,7 +5007,7 @@ pmap_enter_l2(pmap_t pmap, vm_offset_t va, pd_entry_t new_l2, u_int flags,
 	if ((new_l2 & ATTR_S1_XN) == 0 && (PTE_TO_PHYS(new_l2) !=
 	    PTE_TO_PHYS(old_l2) || (old_l2 & ATTR_S1_XN) != 0) &&
 	    pmap != kernel_pmap && m->md.pv_memattr == VM_MEMATTR_WRITE_BACK) {
-		cpu_icache_sync_range(PHYS_TO_DMAP(PTE_TO_PHYS(new_l2)),
+		cpu_icache_sync_range((void *)PHYS_TO_DMAP(PTE_TO_PHYS(new_l2)),
 		    L2_SIZE);
 	}
 
@@ -5219,7 +5220,7 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
 	/* Sync icache before the mapping is stored to PTE */
 	if ((prot & VM_PROT_EXECUTE) && pmap != kernel_pmap &&
 	    m->md.pv_memattr == VM_MEMATTR_WRITE_BACK)
-		cpu_icache_sync_range(PHYS_TO_DMAP(pa), PAGE_SIZE);
+		cpu_icache_sync_range((void *)PHYS_TO_DMAP(pa), PAGE_SIZE);
 
 	pmap_store(l3, l3_val);
 	dsb(ishst);
@@ -6990,7 +6991,7 @@ pmap_change_props_locked(vm_offset_t va, vm_size_t size, vm_prot_t prot,
 			 * the cache.
 			 */
 			if (mode == VM_MEMATTR_UNCACHEABLE)
-				cpu_dcache_wbinv_range(tmpva, pte_size);
+				cpu_dcache_wbinv_range((void *)tmpva, pte_size);
 			tmpva += pte_size;
 		}
 	}
@@ -7673,7 +7674,7 @@ pmap_sync_icache(pmap_t pmap, vm_offset_t va, vm_size_t sz)
 	    ("%s: Address not in canonical form: %lx", __func__, va));
 
 	if (ADDR_IS_KERNEL(va)) {
-		cpu_icache_sync_range(va, sz);
+		cpu_icache_sync_range((void *)va, sz);
 	} else {
 		u_int len, offset;
 		vm_paddr_t pa;
@@ -7686,7 +7687,8 @@ pmap_sync_icache(pmap_t pmap, vm_offset_t va, vm_size_t sz)
 			/* Extract the physical address & find it in the DMAP */
 			pa = pmap_extract(pmap, va);
 			if (pa != 0)
-				cpu_icache_sync_range(PHYS_TO_DMAP(pa), len);
+				cpu_icache_sync_range((void *)PHYS_TO_DMAP(pa),
+				    len);
 
 			/* Move to the next page */
 			sz -= len;
diff --git a/sys/arm64/include/cpufunc.h b/sys/arm64/include/cpufunc.h
index 4062da996ee3..1903af965a68 100644
--- a/sys/arm64/include/cpufunc.h
+++ b/sys/arm64/include/cpufunc.h
@@ -177,20 +177,20 @@ extern int64_t dczva_line_size;
 #define	cpu_dcache_inv_range(a, s)	arm64_dcache_inv_range((a), (s))
 #define	cpu_dcache_wb_range(a, s)	arm64_dcache_wb_range((a), (s))
 
-extern void (*arm64_icache_sync_range)(vm_offset_t, vm_size_t);
+extern void (*arm64_icache_sync_range)(void *, vm_size_t);
 
 #define	cpu_icache_sync_range(a, s)	arm64_icache_sync_range((a), (s))
 #define cpu_icache_sync_range_checked(a, s) arm64_icache_sync_range_checked((a), (s))
 
 void arm64_nullop(void);
 void arm64_tlb_flushID(void);
-void arm64_dic_idc_icache_sync_range(vm_offset_t, vm_size_t);
-void arm64_idc_aliasing_icache_sync_range(vm_offset_t, vm_size_t);
-void arm64_aliasing_icache_sync_range(vm_offset_t, vm_size_t);
-int arm64_icache_sync_range_checked(vm_offset_t, vm_size_t);
-void arm64_dcache_wbinv_range(vm_offset_t, vm_size_t);
-void arm64_dcache_inv_range(vm_offset_t, vm_size_t);
-void arm64_dcache_wb_range(vm_offset_t, vm_size_t);
+void arm64_dic_idc_icache_sync_range(void *, vm_size_t);
+void arm64_idc_aliasing_icache_sync_range(void *, vm_size_t);
+void arm64_aliasing_icache_sync_range(void *, vm_size_t);
+int arm64_icache_sync_range_checked(void *, vm_size_t);
+void arm64_dcache_wbinv_range(void *, vm_size_t);
+void arm64_dcache_inv_range(void *, vm_size_t);
+void arm64_dcache_wb_range(void *, vm_size_t);
 bool arm64_get_writable_addr(vm_offset_t, vm_offset_t *);
 
 #endif	/* _KERNEL */
diff --git a/sys/arm64/include/kdb.h b/sys/arm64/include/kdb.h
index e68c81824c15..aa36e7e756f9 100644
--- a/sys/arm64/include/kdb.h
+++ b/sys/arm64/include/kdb.h
@@ -44,7 +44,7 @@ static __inline void
 kdb_cpu_sync_icache(unsigned char *addr, size_t size)
 {
 
-	cpu_icache_sync_range((vm_offset_t)addr, size);
+	cpu_icache_sync_range(addr, size);
 }
 
 static __inline void
diff --git a/sys/cddl/dev/fbt/aarch64/fbt_isa.c b/sys/cddl/dev/fbt/aarch64/fbt_isa.c
index a3dad017e8b4..30117202f8e7 100644
--- a/sys/cddl/dev/fbt/aarch64/fbt_isa.c
+++ b/sys/cddl/dev/fbt/aarch64/fbt_isa.c
@@ -77,7 +77,7 @@ fbt_patch_tracepoint(fbt_probe_t *fbt, fbt_patchval_t val)
 		panic("%s: Unable to write new instruction", __func__);
 
 	*(fbt_patchval_t *)addr = val;
-	cpu_icache_sync_range((vm_offset_t)fbt->fbtp_patchpoint, 4);
+	cpu_icache_sync_range(fbt->fbtp_patchpoint, 4);
 }
 
 int
diff --git a/sys/cddl/dev/kinst/aarch64/kinst_isa.c b/sys/cddl/dev/kinst/aarch64/kinst_isa.c
index bf3ab1d35de3..0e5d5eee2979 100644
--- a/sys/cddl/dev/kinst/aarch64/kinst_isa.c
+++ b/sys/cddl/dev/kinst/aarch64/kinst_isa.c
@@ -153,8 +153,7 @@ kinst_trampoline_populate(struct kinst_probe *kp)
 	kinst_memcpy(kp->kp_tramp, &kp->kp_savedval, INSN_SIZE);
 	kinst_memcpy(&kp->kp_tramp[INSN_SIZE], &bpt, INSN_SIZE);
 
-	cpu_icache_sync_range((vm_offset_t)kp->kp_tramp,
-	    (vm_size_t)KINST_TRAMP_SIZE);
+	cpu_icache_sync_range(kp->kp_tramp, KINST_TRAMP_SIZE);
 }
 
 /*
@@ -241,8 +240,7 @@ kinst_patch_tracepoint(struct kinst_probe *kp, kinst_patchval_t val)
 	if (!arm64_get_writable_addr((vm_offset_t)kp->kp_patchpoint, &addr))
 		panic("%s: Unable to write new instruction", __func__);
 	*(kinst_patchval_t *)addr = val;
-	cpu_icache_sync_range((vm_offset_t)kp->kp_patchpoint,
-	    (vm_size_t)INSN_SIZE);
+	cpu_icache_sync_range(kp->kp_patchpoint, INSN_SIZE);
 }
 
 static void