svn commit: r293673 - stable/10/sys/dev/nvme
Jim Harris
jimharris at FreeBSD.org
Mon Jan 11 17:33:52 UTC 2016
Author: jimharris
Date: Mon Jan 11 17:33:51 2016
New Revision: 293673
URL: https://svnweb.freebsd.org/changeset/base/293673
Log:
MFC r293354:
nvme: replace NVME_CEILING macro with howmany()
Modified:
stable/10/sys/dev/nvme/nvme_ctrlr.c
Modified: stable/10/sys/dev/nvme/nvme_ctrlr.c
==============================================================================
--- stable/10/sys/dev/nvme/nvme_ctrlr.c Mon Jan 11 17:32:56 2016 (r293672)
+++ stable/10/sys/dev/nvme/nvme_ctrlr.c Mon Jan 11 17:33:51 2016 (r293673)
@@ -42,12 +42,6 @@ __FBSDID("$FreeBSD$");
#include "nvme_private.h"
-/*
- * Used for calculating number of CPUs to assign to each core and number of I/O
- * queues to allocate per controller.
- */
-#define NVME_CEILING(num, div) ((((num) - 1) / (div)) + 1)
-
static void nvme_ctrlr_construct_and_submit_aer(struct nvme_controller *ctrlr,
struct nvme_async_event_request *aer);
static void nvme_ctrlr_setup_interrupts(struct nvme_controller *ctrlr);
@@ -152,7 +146,7 @@ nvme_ctrlr_construct_io_qpairs(struct nv
* a controller could theoretically support fewer I/O queues than
* MSI-X vectors. So calculate again here just to be safe.
*/
- ctrlr->num_cpus_per_ioq = NVME_CEILING(mp_ncpus, ctrlr->num_io_queues);
+ ctrlr->num_cpus_per_ioq = howmany(mp_ncpus, ctrlr->num_io_queues);
ctrlr->ioq = malloc(ctrlr->num_io_queues * sizeof(struct nvme_qpair),
M_NVME, M_ZERO | M_WAITOK);
@@ -1030,9 +1024,9 @@ nvme_ctrlr_setup_interrupts(struct nvme_
* admin queue.
*/
ctrlr->num_cpus_per_ioq = max(min_cpus_per_ioq,
- NVME_CEILING(mp_ncpus, num_vectors_available - 1));
+ howmany(mp_ncpus, num_vectors_available - 1));
- ctrlr->num_io_queues = NVME_CEILING(mp_ncpus, ctrlr->num_cpus_per_ioq);
+ ctrlr->num_io_queues = howmany(mp_ncpus, ctrlr->num_cpus_per_ioq);
num_vectors_requested = ctrlr->num_io_queues + 1;
num_vectors_allocated = num_vectors_requested;
More information about the svn-src-stable
mailing list