git: bcdc599dc2a1 - main - Revert "cpuset(9): Add CPU_FOREACH_IS(SET|CLR) and modify consumers to use it"

Mark Johnston markj at FreeBSD.org
Tue Sep 21 17:52:37 UTC 2021


The branch main has been updated by markj:

URL: https://cgit.FreeBSD.org/src/commit/?id=bcdc599dc2a187052cb13e18f22d3f0c655f95e6

commit bcdc599dc2a187052cb13e18f22d3f0c655f95e6
Author:     Mark Johnston <markj at FreeBSD.org>
AuthorDate: 2021-09-21 17:51:42 +0000
Commit:     Mark Johnston <markj at FreeBSD.org>
CommitDate: 2021-09-21 17:51:42 +0000

    Revert "cpuset(9): Add CPU_FOREACH_IS(SET|CLR) and modify consumers to use it"
    
    This reverts commit 9068f6ea697b1b28ad1326a4c7a9ba86f08b985e.
    
    The underlying macro needs to be reworked to avoid problems with control
    flow statements.
    
    Reported by:    rlibby
---
 sys/amd64/amd64/mp_machdep.c | 11 ++++++++---
 sys/amd64/vmm/io/vlapic.c    | 13 ++++++++++---
 sys/amd64/vmm/vmm_lapic.c    |  4 +++-
 sys/i386/i386/mp_machdep.c   |  4 +++-
 sys/sys/cpuset.h             |  2 --
 sys/x86/x86/mp_x86.c         |  4 +++-
 6 files changed, 27 insertions(+), 11 deletions(-)

diff --git a/sys/amd64/amd64/mp_machdep.c b/sys/amd64/amd64/mp_machdep.c
index 16ec277e9c34..6c66bd622855 100644
--- a/sys/amd64/amd64/mp_machdep.c
+++ b/sys/amd64/amd64/mp_machdep.c
@@ -618,7 +618,7 @@ static void
 smp_targeted_tlb_shootdown(cpuset_t mask, pmap_t pmap, vm_offset_t addr1,
     vm_offset_t addr2, smp_invl_cb_t curcpu_cb, enum invl_op_codes op)
 {
-	cpuset_t other_cpus;
+	cpuset_t other_cpus, mask1;
 	uint32_t generation, *p_cpudone;
 	int cpu;
 	bool is_all;
@@ -662,7 +662,10 @@ smp_targeted_tlb_shootdown(cpuset_t mask, pmap_t pmap, vm_offset_t addr1,
 	/* Fence between filling smp_tlb fields and clearing scoreboard. */
 	atomic_thread_fence_rel();
 
-	CPU_FOREACH_ISSET(cpu, &mask) {
+	mask1 = mask;
+	while ((cpu = CPU_FFS(&mask1)) != 0) {
+		cpu--;
+		CPU_CLR(cpu, &mask1);
 		KASSERT(*invl_scoreboard_slot(cpu) != 0,
 		    ("IPI scoreboard is zero, initiator %d target %d",
 		    PCPU_GET(cpuid), cpu));
@@ -683,7 +686,9 @@ smp_targeted_tlb_shootdown(cpuset_t mask, pmap_t pmap, vm_offset_t addr1,
 		ipi_selected(mask, IPI_INVLOP);
 	}
 	curcpu_cb(pmap, addr1, addr2);
-	CPU_FOREACH_ISSET(cpu, &other_cpus) {
+	while ((cpu = CPU_FFS(&other_cpus)) != 0) {
+		cpu--;
+		CPU_CLR(cpu, &other_cpus);
 		p_cpudone = invl_scoreboard_slot(cpu);
 		while (atomic_load_int(p_cpudone) != generation)
 			ia32_pause();
diff --git a/sys/amd64/vmm/io/vlapic.c b/sys/amd64/vmm/io/vlapic.c
index 4e7ddbafd447..06df1c1a87e5 100644
--- a/sys/amd64/vmm/io/vlapic.c
+++ b/sys/amd64/vmm/io/vlapic.c
@@ -860,7 +860,10 @@ vlapic_calcdest(struct vm *vm, cpuset_t *dmask, uint32_t dest, bool phys,
 		 */
 		CPU_ZERO(dmask);
 		amask = vm_active_cpus(vm);
-		CPU_FOREACH_ISSET(vcpuid, &amask) {
+		while ((vcpuid = CPU_FFS(&amask)) != 0) {
+			vcpuid--;
+			CPU_CLR(vcpuid, &amask);
+
 			vlapic = vm_lapic(vm, vcpuid);
 			dfr = vlapic->apic_page->dfr;
 			ldr = vlapic->apic_page->ldr;
@@ -1000,7 +1003,9 @@ vlapic_icrlo_write_handler(struct vlapic *vlapic, bool *retu)
 			break;
 		}
 
-		CPU_FOREACH_ISSET(i, &dmask) {
+		while ((i = CPU_FFS(&dmask)) != 0) {
+			i--;
+			CPU_CLR(i, &dmask);
 			if (mode == APIC_DELMODE_FIXED) {
 				lapic_intr_edge(vlapic->vm, i, vec);
 				vmm_stat_array_incr(vlapic->vm, vlapic->vcpuid,
@@ -1549,7 +1554,9 @@ vlapic_deliver_intr(struct vm *vm, bool level, uint32_t dest, bool phys,
 	 */
 	vlapic_calcdest(vm, &dmask, dest, phys, lowprio, false);
 
-	CPU_FOREACH_ISSET(vcpuid, &dmask) {
+	while ((vcpuid = CPU_FFS(&dmask)) != 0) {
+		vcpuid--;
+		CPU_CLR(vcpuid, &dmask);
 		if (delmode == IOART_DELEXINT) {
 			vm_inject_extint(vm, vcpuid);
 		} else {
diff --git a/sys/amd64/vmm/vmm_lapic.c b/sys/amd64/vmm/vmm_lapic.c
index 8191da758100..89a1ebc8eff9 100644
--- a/sys/amd64/vmm/vmm_lapic.c
+++ b/sys/amd64/vmm/vmm_lapic.c
@@ -87,7 +87,9 @@ lapic_set_local_intr(struct vm *vm, int cpu, int vector)
 	else
 		CPU_SETOF(cpu, &dmask);
 	error = 0;
-	CPU_FOREACH_ISSET(cpu, &dmask) {
+	while ((cpu = CPU_FFS(&dmask)) != 0) {
+		cpu--;
+		CPU_CLR(cpu, &dmask);
 		vlapic = vm_lapic(vm, cpu);
 		error = vlapic_trigger_lvt(vlapic, vector);
 		if (error)
diff --git a/sys/i386/i386/mp_machdep.c b/sys/i386/i386/mp_machdep.c
index 777aefa021b3..156702118c45 100644
--- a/sys/i386/i386/mp_machdep.c
+++ b/sys/i386/i386/mp_machdep.c
@@ -598,7 +598,9 @@ smp_targeted_tlb_shootdown(cpuset_t mask, u_int vector, pmap_t pmap,
 		ipi_selected(mask, vector);
 	}
 	curcpu_cb(pmap, addr1, addr2);
-	CPU_FOREACH_ISSET(cpu, &other_cpus) {
+	while ((cpu = CPU_FFS(&other_cpus)) != 0) {
+		cpu--;
+		CPU_CLR(cpu, &other_cpus);
 		p_cpudone = &cpuid_to_pcpu[cpu]->pc_smp_tlb_done;
 		while (*p_cpudone != generation)
 			ia32_pause();
diff --git a/sys/sys/cpuset.h b/sys/sys/cpuset.h
index 9ef1a65f4506..1a96bb4766ce 100644
--- a/sys/sys/cpuset.h
+++ b/sys/sys/cpuset.h
@@ -66,8 +66,6 @@
 #define	CPU_COPY_STORE_REL(f, t)	BIT_COPY_STORE_REL(CPU_SETSIZE, f, t)
 #define	CPU_FFS(p)			BIT_FFS(CPU_SETSIZE, p)
 #define	CPU_FLS(p)			BIT_FLS(CPU_SETSIZE, p)
-#define	CPU_FOREACH_ISSET(i, p)		BIT_FOREACH_ISSET(CPU_SETSIZE, i, p)
-#define	CPU_FOREACH_ISCLR(i, p)		BIT_FOREACH_ISCLR(CPU_SETSIZE, i, p)
 #define	CPU_COUNT(p)			((int)BIT_COUNT(CPU_SETSIZE, p))
 #define	CPUSET_FSET			BITSET_FSET(_NCPUWORDS)
 #define	CPUSET_T_INITIALIZER		BITSET_T_INITIALIZER
diff --git a/sys/x86/x86/mp_x86.c b/sys/x86/x86/mp_x86.c
index 09d05d3f6de4..5e9a9735b09a 100644
--- a/sys/x86/x86/mp_x86.c
+++ b/sys/x86/x86/mp_x86.c
@@ -1290,7 +1290,9 @@ ipi_selected(cpuset_t cpus, u_int ipi)
 	if (ipi == IPI_STOP_HARD)
 		CPU_OR_ATOMIC(&ipi_stop_nmi_pending, &cpus);
 
-	CPU_FOREACH_ISSET(cpu, &cpus) {
+	while ((cpu = CPU_FFS(&cpus)) != 0) {
+		cpu--;
+		CPU_CLR(cpu, &cpus);
 		CTR3(KTR_SMP, "%s: cpu: %d ipi: %x", __func__, cpu, ipi);
 		ipi_send_cpu(cpu, ipi);
 	}


More information about the dev-commits-src-all mailing list