git: f3a69bc7223a - stable/13 - bhyve nvme: Check return value of mapped memory
- Go to: [ bottom of page ] [ top of archives ] [ this month ]
Date: Sat, 19 Nov 2022 18:46:14 UTC
The branch stable/13 has been updated by chuck:
URL: https://cgit.FreeBSD.org/src/commit/?id=f3a69bc7223ad3fc04e417a88e6bb878aa3bfaf2
commit f3a69bc7223ad3fc04e417a88e6bb878aa3bfaf2
Author: Chuck Tuffli <chuck@FreeBSD.org>
AuthorDate: 2022-08-14 14:45:21 +0000
Commit: Chuck Tuffli <chuck@FreeBSD.org>
CommitDate: 2022-11-20 02:21:32 +0000
bhyve nvme: Check return value of mapped memory
Fuzzing of bhyve using hyfuzz discovered a way to cause a segmentation
fault in the NVMe emulation. If a guest specifies a physical address in
either the PRP1 or PRP2 field of a command that cannot be mapped from
guest to host, the function paddr_guest2host() returns a NULL pointer.
The NVMe emulation did not check for this error case, which allowed for
the segmentation fault to occur.
Fix is to check for a return value of NULL and indicate an error back to
the guest (Data Transfer error). While in the area, slightly refactor
the write/read blockif function to use a common error exit path.
PR: 256317,256319,256320,256321,256322
(cherry picked from commit 3d3678627c3112c94d174a8c51d8c058d02befb3)
---
usr.sbin/bhyve/pci_nvme.c | 21 +++++++++++++--------
1 file changed, 13 insertions(+), 8 deletions(-)
diff --git a/usr.sbin/bhyve/pci_nvme.c b/usr.sbin/bhyve/pci_nvme.c
index d133d4817665..6004cc91707b 100644
--- a/usr.sbin/bhyve/pci_nvme.c
+++ b/usr.sbin/bhyve/pci_nvme.c
@@ -2219,6 +2219,8 @@ pci_nvme_append_iov_req(struct pci_nvme_softc *sc, struct pci_nvme_ioreq *req,
req->io_req.br_iov[iovidx].iov_base =
paddr_guest2host(req->sc->nsc_pi->pi_vmctx,
req->prev_gpaddr, size);
+ if (req->io_req.br_iov[iovidx].iov_base == NULL)
+ return (-1);
req->prev_size += size;
req->io_req.br_resid += size;
@@ -2235,6 +2237,8 @@ pci_nvme_append_iov_req(struct pci_nvme_softc *sc, struct pci_nvme_ioreq *req,
req->io_req.br_iov[iovidx].iov_base =
paddr_guest2host(req->sc->nsc_pi->pi_vmctx,
gpaddr, size);
+ if (req->io_req.br_iov[iovidx].iov_base == NULL)
+ return (-1);
req->io_req.br_iov[iovidx].iov_len = size;
@@ -2420,8 +2424,7 @@ nvme_write_read_blockif(struct pci_nvme_softc *sc,
size = MIN(PAGE_SIZE - (prp1 % PAGE_SIZE), bytes);
if (pci_nvme_append_iov_req(sc, req, prp1,
size, is_write, offset)) {
- pci_nvme_status_genc(&status,
- NVME_SC_DATA_TRANSFER_ERROR);
+ err = -1;
goto out;
}
@@ -2434,8 +2437,7 @@ nvme_write_read_blockif(struct pci_nvme_softc *sc,
size = bytes;
if (pci_nvme_append_iov_req(sc, req, prp2,
size, is_write, offset)) {
- pci_nvme_status_genc(&status,
- NVME_SC_DATA_TRANSFER_ERROR);
+ err = -1;
goto out;
}
} else {
@@ -2451,6 +2453,10 @@ nvme_write_read_blockif(struct pci_nvme_softc *sc,
prp_list = paddr_guest2host(vmctx, prp,
PAGE_SIZE - (prp % PAGE_SIZE));
+ if (prp_list == NULL) {
+ err = -1;
+ goto out;
+ }
last = prp_list + (NVME_PRP2_ITEMS - 1);
}
@@ -2458,8 +2464,7 @@ nvme_write_read_blockif(struct pci_nvme_softc *sc,
if (pci_nvme_append_iov_req(sc, req, *prp_list,
size, is_write, offset)) {
- pci_nvme_status_genc(&status,
- NVME_SC_DATA_TRANSFER_ERROR);
+ err = -1;
goto out;
}
@@ -2474,10 +2479,10 @@ nvme_write_read_blockif(struct pci_nvme_softc *sc,
err = blockif_write(nvstore->ctx, &req->io_req);
else
err = blockif_read(nvstore->ctx, &req->io_req);
-
+out:
if (err)
pci_nvme_status_genc(&status, NVME_SC_DATA_TRANSFER_ERROR);
-out:
+
return (status);
}