PERFORCE change 133514 for review
Scott Long
scottl at FreeBSD.org
Thu Jan 17 16:02:43 PST 2008
http://perforce.freebsd.org/chv.cgi?CH=133514
Change 133514 by scottl at scottl-ix on 2008/01/18 00:02:00
Selectively bring in driver changes from xen31 that are after the main
integration point.
Affected files ...
.. //depot/projects/xen31-xenbus/sys/dev/xen/blkfront/blkfront.c#2 integrate
.. //depot/projects/xen31-xenbus/sys/dev/xen/netfront/netfront.c#2 integrate
Differences ...
==== //depot/projects/xen31-xenbus/sys/dev/xen/blkfront/blkfront.c#2 (text+ko) ====
@@ -109,8 +109,6 @@
#define MAXIMUM_OUTSTANDING_BLOCK_REQS \
(BLKIF_MAX_SEGMENTS_PER_REQUEST * BLK_RING_SIZE)
-static struct mtx blkif_io_block_lock;
-
static void kick_pending_request_queues(struct blkfront_info *);
static int blkif_open(struct disk *dp);
static int blkif_close(struct disk *dp);
@@ -170,7 +168,7 @@
xb_diskinfo[sc->xb_unit].device, sc->xb_unit,
sc->xb_disk->d_mediasize);
#endif
- sc->xb_disk->d_flags = DISKFLAG_NEEDSGIANT;
+ sc->xb_disk->d_flags = 0;
disk_create(sc->xb_disk, DISK_VERSION_00);
bioq_init(&sc->xb_bioq);
@@ -195,7 +193,6 @@
xb_strategy(struct bio *bp)
{
struct xb_softc *sc = (struct xb_softc *)bp->bio_disk->d_drv1;
- int flags;
/* bogus disk? */
if (sc == NULL) {
@@ -206,14 +203,14 @@
DPRINTK("");
- flags = splbio();
/*
* Place it in the queue of disk activities for this disk
*/
+ mtx_lock(&blkif_io_lock);
bioq_disksort(&sc->xb_bioq, bp);
- splx(flags);
xb_startio(sc);
+ mtx_unlock(&blkif_io_lock);
return;
bad:
@@ -235,7 +232,6 @@
{
int err, vdevice, i;
struct blkfront_info *info;
-
/* FIXME: Use dynamic device id if this is not set. */
err = xenbus_scanf(XBT_NIL, dev->nodename,
@@ -378,8 +374,8 @@
info->ring_ref = err;
err = bind_listening_port_to_irqhandler(dev->otherend_id,
- "xbd", (driver_intr_t *)blkif_int,
- info, INTR_TYPE_BIO, NULL);
+ "xbd", (driver_intr_t *)blkif_int, info,
+ INTR_TYPE_BIO | INTR_MPSAFE, NULL);
if (err <= 0) {
xenbus_dev_fatal(dev, err,
"bind_evtchn_to_irqhandler failed");
@@ -767,24 +763,20 @@
xb_startio(struct xb_softc *sc)
{
struct bio *bp;
- int flags, queued = 0;
+ int queued = 0;
struct blkfront_info *info = sc->xb_info;
DPRINTK("");
- flags = splbio();
+ mtx_assert(&blkif_io_lock, MA_OWNED);
while ((bp = bioq_takefirst(&sc->xb_bioq)) != NULL) {
if (RING_FULL(&info->ring))
goto wait;
- splx(flags);
-
if (blkif_queue_request(bp)) {
- flags = splbio();
wait:
bioq_insert_head(&sc->xb_bioq, bp);
- splx(flags);
break;
}
queued++;
@@ -968,10 +960,10 @@
mtx_unlock(&blkif_io_lock);
/* Send off requeued requests */
+ mtx_lock(&blkif_io_lock);
flush_requests(info);
/* Kick any other new requests queued since we resumed */
- mtx_lock(&blkif_io_lock);
kick_pending_request_queues(info);
mtx_unlock(&blkif_io_lock);
}
@@ -1009,7 +1001,6 @@
}
MTX_SYSINIT(ioreq, &blkif_io_lock, "BIO LOCK", MTX_NOWITNESS); /* XXX how does one enroll a lock? */
-MTX_SYSINIT(ioreq_block, &blkif_io_block_lock, "BIO BLOCK LOCK", MTX_SPIN | MTX_NOWITNESS);
SYSINIT(xbdev, SI_SUB_PSEUDO, SI_ORDER_SECOND, xenbus_init, NULL);
==== //depot/projects/xen31-xenbus/sys/dev/xen/netfront/netfront.c#2 (text+ko) ====
@@ -144,93 +144,92 @@
* not the other way around. The size must track the free index arrays.
*/
struct xn_chain_data {
- struct mbuf *xn_tx_chain[NET_TX_RING_SIZE+1];
+ struct mbuf *xn_tx_chain[NET_TX_RING_SIZE+1];
struct mbuf *xn_rx_chain[NET_RX_RING_SIZE+1];
};
struct net_device_stats
{
- unsigned long rx_packets; /* total packets received */
- unsigned long tx_packets; /* total packets transmitted */
- unsigned long rx_bytes; /* total bytes received */
- unsigned long tx_bytes; /* total bytes transmitted */
- unsigned long rx_errors; /* bad packets received */
- unsigned long tx_errors; /* packet transmit problems */
- unsigned long rx_dropped; /* no space in linux buffers */
- unsigned long tx_dropped; /* no space available in linux */
- unsigned long multicast; /* multicast packets received */
- unsigned long collisions;
+ u_long rx_packets; /* total packets received */
+ u_long tx_packets; /* total packets transmitted */
+ u_long rx_bytes; /* total bytes received */
+ u_long tx_bytes; /* total bytes transmitted */
+ u_long rx_errors; /* bad packets received */
+ u_long tx_errors; /* packet transmit problems */
+ u_long rx_dropped; /* no space in linux buffers */
+ u_long tx_dropped; /* no space available in linux */
+ u_long multicast; /* multicast packets received */
+ u_long collisions;
/* detailed rx_errors: */
- unsigned long rx_length_errors;
- unsigned long rx_over_errors; /* receiver ring buff overflow */
- unsigned long rx_crc_errors; /* recved pkt with crc error */
- unsigned long rx_frame_errors; /* recv'd frame alignment error */
- unsigned long rx_fifo_errors; /* recv'r fifo overrun */
- unsigned long rx_missed_errors; /* receiver missed packet */
+ u_long rx_length_errors;
+ u_long rx_over_errors; /* receiver ring buff overflow */
+ u_long rx_crc_errors; /* recved pkt with crc error */
+ u_long rx_frame_errors; /* recv'd frame alignment error */
+ u_long rx_fifo_errors; /* recv'r fifo overrun */
+ u_long rx_missed_errors; /* receiver missed packet */
/* detailed tx_errors */
- unsigned long tx_aborted_errors;
- unsigned long tx_carrier_errors;
- unsigned long tx_fifo_errors;
- unsigned long tx_heartbeat_errors;
- unsigned long tx_window_errors;
+ u_long tx_aborted_errors;
+ u_long tx_carrier_errors;
+ u_long tx_fifo_errors;
+ u_long tx_heartbeat_errors;
+ u_long tx_window_errors;
/* for cslip etc */
- unsigned long rx_compressed;
- unsigned long tx_compressed;
+ u_long rx_compressed;
+ u_long tx_compressed;
};
struct netfront_info {
- struct ifnet *xn_ifp;
+ struct ifnet *xn_ifp;
+
+ struct net_device_stats stats;
+ u_int tx_full;
- struct net_device_stats stats;
- unsigned int tx_full;
-
- netif_tx_front_ring_t tx;
- netif_rx_front_ring_t rx;
+ netif_tx_front_ring_t tx;
+ netif_rx_front_ring_t rx;
- struct mtx tx_lock;
- struct mtx rx_lock;
- struct sx sc_lock;
+ struct mtx tx_lock;
+ struct mtx rx_lock;
+ struct sx sc_lock;
- unsigned int handle;
- unsigned int irq;
- unsigned int copying_receiver;
- unsigned int carrier;
+ u_int handle;
+ u_int irq;
+ u_int copying_receiver;
+ u_int carrier;
/* Receive-ring batched refills. */
#define RX_MIN_TARGET 32
#define RX_MAX_TARGET NET_RX_RING_SIZE
- int rx_min_target, rx_max_target, rx_target;
+ int rx_min_target, rx_max_target, rx_target;
- /*
- * {tx,rx}_skbs store outstanding skbuffs. The first entry in each
- * array is an index into a chain of free entries.
- */
+ /*
+ * {tx,rx}_skbs store outstanding skbuffs. The first entry in each
+ * array is an index into a chain of free entries.
+ */
- grant_ref_t gref_tx_head;
- grant_ref_t grant_tx_ref[NET_TX_RING_SIZE + 1];
- grant_ref_t gref_rx_head;
- grant_ref_t grant_rx_ref[NET_TX_RING_SIZE + 1];
+ grant_ref_t gref_tx_head;
+ grant_ref_t grant_tx_ref[NET_TX_RING_SIZE + 1];
+ grant_ref_t gref_rx_head;
+ grant_ref_t grant_rx_ref[NET_TX_RING_SIZE + 1];
#define TX_MAX_TARGET min(NET_RX_RING_SIZE, 256)
- struct xenbus_device *xbdev;
- int tx_ring_ref;
- int rx_ring_ref;
- uint8_t mac[ETHER_ADDR_LEN];
- struct xn_chain_data xn_cdata; /* mbufs */
- struct mbuf_head xn_rx_batch; /* head of the batch queue */
+ struct xenbus_device *xbdev;
+ int tx_ring_ref;
+ int rx_ring_ref;
+ uint8_t mac[ETHER_ADDR_LEN];
+ struct xn_chain_data xn_cdata; /* mbufs */
+ struct mbuf_head xn_rx_batch; /* head of the batch queue */
- int xn_if_flags;
- struct callout xn_stat_ch;
+ int xn_if_flags;
+ struct callout xn_stat_ch;
-
- unsigned long rx_pfn_array[NET_RX_RING_SIZE];
- multicall_entry_t rx_mcl[NET_RX_RING_SIZE+1];
- mmu_update_t rx_mmu[NET_RX_RING_SIZE];
+ u_long rx_pfn_array[NET_RX_RING_SIZE];
+ multicall_entry_t rx_mcl[NET_RX_RING_SIZE+1];
+ mmu_update_t rx_mmu[NET_RX_RING_SIZE];
};
#define rx_mbufs xn_cdata.xn_rx_chain
@@ -278,13 +277,13 @@
add_id_to_freelist(struct mbuf **list, unsigned short id)
{
list[id] = list[0];
- list[0] = (void *)(unsigned long)id;
+ list[0] = (void *)(u_long)id;
}
static inline unsigned short
get_id_from_freelist(struct mbuf **list)
{
- unsigned int id = (unsigned int)(unsigned long)list[0];
+ u_int id = (u_int)(u_long)list[0];
list[0] = list[id];
return (id);
}
@@ -661,24 +660,23 @@
static void
netif_release_tx_bufs(struct netfront_info *np)
{
- struct mbuf *m;
- int i;
+ struct mbuf *m;
+ int i;
- for (i = 1; i <= NET_TX_RING_SIZE; i++) {
- m = np->xn_cdata.xn_tx_chain[i];
+ for (i = 1; i <= NET_TX_RING_SIZE; i++) {
+ m = np->xn_cdata.xn_tx_chain[i];
- if (((unsigned long)m) < KERNBASE)
- continue;
- gnttab_grant_foreign_access_ref(
- np->grant_tx_ref[i], np->xbdev->otherend_id,
- virt_to_mfn(mtod(m, vm_offset_t)),
- GNTMAP_readonly);
- gnttab_release_grant_reference(
- &np->gref_tx_head, np->grant_tx_ref[i]);
- np->grant_tx_ref[i] = GRANT_INVALID_REF;
- add_id_to_freelist(np->tx_mbufs, i);
- m_freem(m);
- }
+ if (((u_long)m) < KERNBASE)
+ continue;
+ gnttab_grant_foreign_access_ref(np->grant_tx_ref[i],
+ np->xbdev->otherend_id, virt_to_mfn(mtod(m, vm_offset_t)),
+ GNTMAP_readonly);
+ gnttab_release_grant_reference(&np->gref_tx_head,
+ np->grant_tx_ref[i]);
+ np->grant_tx_ref[i] = GRANT_INVALID_REF;
+ add_id_to_freelist(np->tx_mbufs, i);
+ m_freem(m);
+ }
}
static void
@@ -693,7 +691,7 @@
int nr_flips;
netif_rx_request_t *req;
vm_offset_t vaddr;
- unsigned long pfn;
+ u_long pfn;
req_prod = sc->rx.req_prod_pvt;
@@ -702,27 +700,27 @@
/*
* Allocate skbuffs greedily, even though we batch updates to the
- * receive ring. This creates a less bursty demand on the memory allocator,
- * so should reduce the chance of failed allocation requests both for
- * ourself and for other kernel subsystems.
+ * receive ring. This creates a less bursty demand on the memory
+ * allocator, so should reduce the chance of failed allocation
+ * requests both for ourself and for other kernel subsystems.
*/
batch_target = sc->rx_target - (req_prod - sc->rx.rsp_cons);
for (i = mbufq_len(&sc->xn_rx_batch); i < batch_target; i++) {
MGETHDR(m_new, M_DONTWAIT, MT_DATA);
if (m_new == NULL)
- goto no_mbuf;
+ goto no_mbuf;
m_cljget(m_new, M_DONTWAIT, MJUMPAGESIZE);
if ((m_new->m_flags & M_EXT) == 0) {
- m_freem(m_new);
+ m_freem(m_new);
no_mbuf:
- if (i != 0)
- goto refill;
- /*
- * XXX set timer
- */
- break;
+ if (i != 0)
+ goto refill;
+ /*
+ * XXX set timer
+ */
+ break;
}
m_new->m_len = m_new->m_pkthdr.len = MJUMPAGESIZE;
@@ -732,62 +730,63 @@
/* Is the batch large enough to be worthwhile? */
if (i < (sc->rx_target/2)) {
- if (req_prod >sc->rx.sring->req_prod)
- goto push;
- return;
+ if (req_prod >sc->rx.sring->req_prod)
+ goto push;
+ return;
}
- /* Adjust our floating fill target if we risked running out of buffers. */
+ /* Adjust floating fill target if we risked running out of buffers. */
if ( ((req_prod - sc->rx.sring->rsp_prod) < (sc->rx_target / 4)) &&
((sc->rx_target *= 2) > sc->rx_max_target) )
sc->rx_target = sc->rx_max_target;
refill:
for (nr_flips = i = 0; ; i++) {
- if ((m_new = mbufq_dequeue(&sc->xn_rx_batch)) == NULL)
- break;
+ if ((m_new = mbufq_dequeue(&sc->xn_rx_batch)) == NULL)
+ break;
+
+ m_new->m_ext.ext_args = (vm_paddr_t *)(uintptr_t)(
+ vtophys(m_new->m_ext.ext_buf) >> PAGE_SHIFT);
+
+ id = xennet_rxidx(req_prod + i);
+
+ KASSERT(sc->xn_cdata.xn_rx_chain[id] == NULL,
+ ("non-NULL xm_rx_chain"));
+ sc->xn_cdata.xn_rx_chain[id] = m_new;
+
+ ref = gnttab_claim_grant_reference(&sc->gref_rx_head);
+ KASSERT((short)ref >= 0, ("negative ref"));
+ sc->grant_rx_ref[id] = ref;
- m_new->m_ext.ext_args = (vm_paddr_t *)(uintptr_t)(vtophys(m_new->m_ext.ext_buf) >> PAGE_SHIFT);
-
- id = xennet_rxidx(req_prod + i);
-
- PANIC_IF(sc->xn_cdata.xn_rx_chain[id] != NULL);
- sc->xn_cdata.xn_rx_chain[id] = m_new;
-
- ref = gnttab_claim_grant_reference(&sc->gref_rx_head);
- PANIC_IF((signed short)ref < 0);
- sc->grant_rx_ref[id] = ref;
-
- vaddr = mtod(m_new, vm_offset_t);
- pfn = vtophys(vaddr) >> PAGE_SHIFT;
- req = RING_GET_REQUEST(&sc->rx, req_prod + i);
+ vaddr = mtod(m_new, vm_offset_t);
+ pfn = vtophys(vaddr) >> PAGE_SHIFT;
+ req = RING_GET_REQUEST(&sc->rx, req_prod + i);
- if (sc->copying_receiver == 0) {
- gnttab_grant_foreign_transfer_ref(ref,
- sc->xbdev->otherend_id,
- pfn);
- sc->rx_pfn_array[nr_flips] = PFNTOMFN(pfn);
- if (!xen_feature(XENFEAT_auto_translated_physmap)) {
- /* Remove this page before passing
- * back to Xen.
- */
- set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
- MULTI_update_va_mapping(&sc->rx_mcl[i],
- vaddr, 0, 0);
- }
- nr_flips++;
- } else {
- gnttab_grant_foreign_access_ref(ref,
- sc->xbdev->otherend_id,
- PFNTOMFN(pfn),
- 0);
+ if (sc->copying_receiver == 0) {
+ gnttab_grant_foreign_transfer_ref(ref,
+ sc->xbdev->otherend_id, pfn);
+ sc->rx_pfn_array[nr_flips] = PFNTOMFN(pfn);
+ if (!xen_feature(XENFEAT_auto_translated_physmap)) {
+ /* Remove this page before passing
+ * back to Xen.
+ */
+ set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
+ MULTI_update_va_mapping(&sc->rx_mcl[i],
+ vaddr, 0, 0);
}
- req->id = id;
- req->gref = ref;
+ nr_flips++;
+ } else {
+ gnttab_grant_foreign_access_ref(ref,
+ sc->xbdev->otherend_id,
+ PFNTOMFN(pfn), 0);
+ }
+ req->id = id;
+ req->gref = ref;
- sc->rx_pfn_array[i] = vtomach(mtod(m_new,vm_offset_t)) >> PAGE_SHIFT;
+ sc->rx_pfn_array[i] =
+ vtomach(mtod(m_new,vm_offset_t)) >> PAGE_SHIFT;
}
- KASSERT(i, ("no mbufs processed")); /* should have returned earlier */
+ KASSERT(i, ("no mbufs processed")); /* should have returned earlier */
KASSERT(mbufq_len(&sc->xn_rx_batch) == 0, ("not all mbufs processed"));
/*
* We may have allocated buffers which have entries outstanding
@@ -796,39 +795,40 @@
PT_UPDATES_FLUSH();
if (nr_flips != 0) {
#ifdef notyet
- /* Tell the ballon driver what is going on. */
- balloon_update_driver_allowance(i);
+ /* Tell the ballon driver what is going on. */
+ balloon_update_driver_allowance(i);
#endif
- set_xen_guest_handle(reservation.extent_start, sc->rx_pfn_array);
- reservation.nr_extents = i;
- reservation.extent_order = 0;
- reservation.address_bits = 0;
- reservation.domid = DOMID_SELF;
+ set_xen_guest_handle(reservation.extent_start,sc->rx_pfn_array);
+ reservation.nr_extents = i;
+ reservation.extent_order = 0;
+ reservation.address_bits = 0;
+ reservation.domid = DOMID_SELF;
- if (!xen_feature(XENFEAT_auto_translated_physmap)) {
+ if (!xen_feature(XENFEAT_auto_translated_physmap)) {
- /* After all PTEs have been zapped, flush the TLB. */
- sc->rx_mcl[i-1].args[MULTI_UVMFLAGS_INDEX] =
- UVMF_TLB_FLUSH|UVMF_ALL;
+ /* After all PTEs have been zapped, flush the TLB. */
+ sc->rx_mcl[i-1].args[MULTI_UVMFLAGS_INDEX] =
+ UVMF_TLB_FLUSH|UVMF_ALL;
- /* Give away a batch of pages. */
- sc->rx_mcl[i].op = __HYPERVISOR_memory_op;
- sc->rx_mcl[i].args[0] = XENMEM_decrease_reservation;
- sc->rx_mcl[i].args[1] = (unsigned long)&reservation;
- /* Zap PTEs and give away pages in one big multicall. */
- (void)HYPERVISOR_multicall(sc->rx_mcl, i+1);
-
- /* Check return status of HYPERVISOR_dom_mem_op(). */
- if (unlikely(sc->rx_mcl[i].result != i))
- panic("Unable to reduce memory reservation\n");
+ /* Give away a batch of pages. */
+ sc->rx_mcl[i].op = __HYPERVISOR_memory_op;
+ sc->rx_mcl[i].args[0] = XENMEM_decrease_reservation;
+ sc->rx_mcl[i].args[1] = (u_long)&reservation;
+ /* Zap PTEs and give away pages in one big multicall. */
+ (void)HYPERVISOR_multicall(sc->rx_mcl, i+1);
+ /* Check return status of HYPERVISOR_dom_mem_op(). */
+ if (unlikely(sc->rx_mcl[i].result != i))
+ panic("Unable to reduce memory reservation\n");
} else {
- if (HYPERVISOR_memory_op(XENMEM_decrease_reservation,
- &reservation) != i)
- panic("Unable to reduce memory reservation\n");
- }
+ if (HYPERVISOR_memory_op(
+ XENMEM_decrease_reservation, &reservation)
+ != i)
+ panic("Unable to reduce memory "
+ "reservation\n");
+ }
} else {
- wmb();
+ wmb();
}
/* Above is a suitable barrier to ensure backend will see requests. */
@@ -836,126 +836,129 @@
push:
RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&sc->rx, notify);
if (notify)
- notify_remote_via_irq(sc->irq);
+ notify_remote_via_irq(sc->irq);
}
static void
xn_rxeof(struct netfront_info *np)
{
- struct ifnet *ifp;
- struct netfront_rx_info rinfo;
- struct netif_rx_response *rx = &rinfo.rx;
- struct netif_extra_info *extras = rinfo.extras;
- RING_IDX i, rp;
- multicall_entry_t *mcl;
- struct mbuf *m;
- struct mbuf_head rxq, errq, tmpq;
- int err, pages_flipped = 0;
+ struct ifnet *ifp;
+ struct netfront_rx_info rinfo;
+ struct netif_rx_response *rx = &rinfo.rx;
+ struct netif_extra_info *extras = rinfo.extras;
+ RING_IDX i, rp;
+ multicall_entry_t *mcl;
+ struct mbuf *m;
+ struct mbuf_head rxq, errq, tmpq;
+ int err, pages_flipped = 0;
+
+ XN_RX_LOCK_ASSERT(np);
+ if (!netfront_carrier_ok(np))
+ return;
- XN_RX_LOCK_ASSERT(np);
- if (!netfront_carrier_ok(np))
- return;
+ mbufq_init(&tmpq);
+ mbufq_init(&errq);
+ mbufq_init(&rxq);
- mbufq_init(&tmpq);
- mbufq_init(&errq);
- mbufq_init(&rxq);
-
- ifp = np->xn_ifp;
+ ifp = np->xn_ifp;
- rp = np->rx.sring->rsp_prod;
- rmb(); /* Ensure we see queued responses up to 'rp'. */
+ rp = np->rx.sring->rsp_prod;
+ rmb(); /* Ensure we see queued responses up to 'rp'. */
+
+ i = np->rx.rsp_cons;
+ while ((i != rp)) {
+ memcpy(rx, RING_GET_RESPONSE(&np->rx, i), sizeof(*rx));
+ memset(extras, 0, sizeof(rinfo.extras));
+
+ err = xennet_get_responses(np, &rinfo, rp, &tmpq,
+ &pages_flipped);
- i = np->rx.rsp_cons;
- while ((i != rp)) {
- memcpy(rx, RING_GET_RESPONSE(&np->rx, i), sizeof(*rx));
- memset(extras, 0, sizeof(rinfo.extras));
+ if (unlikely(err)) {
+ while ((m = mbufq_dequeue(&tmpq)))
+ mbufq_tail(&errq, m);
+ np->stats.rx_errors++;
+ i = np->rx.rsp_cons;
+ continue;
+ }
- err = xennet_get_responses(np, &rinfo, rp, &tmpq,
- &pages_flipped);
+ m = mbufq_dequeue(&tmpq);
- if (unlikely(err)) {
- while ((m = mbufq_dequeue(&tmpq)))
- mbufq_tail(&errq, m);
- np->stats.rx_errors++;
- i = np->rx.rsp_cons;
- continue;
- }
+ m->m_data += rx->offset;/* (rx->addr & PAGE_MASK); */
+ m->m_pkthdr.len = m->m_len = rx->status;
+ m->m_pkthdr.rcvif = ifp;
- m = mbufq_dequeue(&tmpq);
-
-
- m->m_data += rx->offset;/* (rx->addr & PAGE_MASK); */
- m->m_pkthdr.len = m->m_len = rx->status;
- m->m_pkthdr.rcvif = ifp;
-
- if ( rx->flags & NETRXF_data_validated ) {
- /* Tell the stack the checksums are okay */
- /*
- * XXX this isn't necessarily the case - need to add check
- *
- */
+ if ( rx->flags & NETRXF_data_validated ) {
+ /* Tell the stack the checksums are okay */
+ /*
+ * XXX this isn't necessarily the case - need to add
+ * check
+ */
- m->m_pkthdr.csum_flags |=
- (CSUM_IP_CHECKED | CSUM_IP_VALID | CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
- m->m_pkthdr.csum_data = 0xffff;
- }
+ m->m_pkthdr.csum_flags |=
+ (CSUM_IP_CHECKED | CSUM_IP_VALID | CSUM_DATA_VALID
+ | CSUM_PSEUDO_HDR);
+ m->m_pkthdr.csum_data = 0xffff;
+ }
- np->stats.rx_packets++;
- np->stats.rx_bytes += rx->status;
+ np->stats.rx_packets++;
+ np->stats.rx_bytes += rx->status;
- mbufq_tail(&rxq, m);
- np->rx.rsp_cons = ++i;
- }
+ mbufq_tail(&rxq, m);
+ np->rx.rsp_cons = ++i;
+ }
- if (pages_flipped) {
- /* Some pages are no longer absent... */
+ if (pages_flipped) {
+ /* Some pages are no longer absent... */
#ifdef notyet
- balloon_update_driver_allowance(-pages_flipped);
+ balloon_update_driver_allowance(-pages_flipped);
#endif
- /* Do all the remapping work, and M->P updates, in one big hypercall. */
- if (!!xen_feature(XENFEAT_auto_translated_physmap)) {
- mcl = np->rx_mcl + pages_flipped;
- mcl->op = __HYPERVISOR_mmu_update;
- mcl->args[0] = (unsigned long)np->rx_mmu;
- mcl->args[1] = pages_flipped;
- mcl->args[2] = 0;
- mcl->args[3] = DOMID_SELF;
- (void)HYPERVISOR_multicall(np->rx_mcl, pages_flipped + 1);
- }
+ /* Do all the remapping work, and M->P updates, in one big
+ * hypercall.
+ */
+ if (!!xen_feature(XENFEAT_auto_translated_physmap)) {
+ mcl = np->rx_mcl + pages_flipped;
+ mcl->op = __HYPERVISOR_mmu_update;
+ mcl->args[0] = (u_long)np->rx_mmu;
+ mcl->args[1] = pages_flipped;
+ mcl->args[2] = 0;
+ mcl->args[3] = DOMID_SELF;
+ (void)HYPERVISOR_multicall(np->rx_mcl,
+ pages_flipped + 1);
}
+ }
- while ((m = mbufq_dequeue(&errq)))
- m_freem(m);
+ while ((m = mbufq_dequeue(&errq)))
+ m_freem(m);
+ /*
+ * Process all the mbufs after the remapping is complete.
+ * Break the mbuf chain first though.
+ */
+ while ((m = mbufq_dequeue(&rxq)) != NULL) {
+ ifp->if_ipackets++;
- /*
- * Process all the mbufs after the remapping is complete.
- * Break the mbuf chain first though.
+ /*
+ * Do we really need to drop the rx lock?
*/
- while ((m = mbufq_dequeue(&rxq)) != NULL) {
- ifp->if_ipackets++;
-
- /*
- * Do we really need to drop the rx lock?
- */
- XN_RX_UNLOCK(np);
- /* Pass it up. */
- (*ifp->if_input)(ifp, m);
- XN_RX_LOCK(np);
- }
+ XN_RX_UNLOCK(np);
+ /* Pass it up. */
+ (*ifp->if_input)(ifp, m);
+ XN_RX_LOCK(np);
+ }
- np->rx.rsp_cons = i;
+ np->rx.rsp_cons = i;
+
+#if 0
+ /* If we get a callback with very few responses, reduce fill target. */
+ /* NB. Note exponential increase, linear decrease. */
+ if (((np->rx.req_prod_pvt - np->rx.sring->rsp_prod) >
+ ((3*np->rx_target) / 4)) && (--np->rx_target < np->rx_min_target))
+ np->rx_target = np->rx_min_target;
+#endif
- /* If we get a callback with very few responses, reduce fill target. */
- /* NB. Note exponential increase, linear decrease. */
- if (((np->rx.req_prod_pvt - np->rx.sring->rsp_prod) >
- ((3*np->rx_target) / 4)) && (--np->rx_target < np->rx_min_target))
- np->rx_target = np->rx_min_target;
-
- network_alloc_rx_buffers(np);
+ network_alloc_rx_buffers(np);
- np->rx.sring->rsp_event = i + 1;
-
+ np->rx.sring->rsp_event = i + 1;
}
static void
@@ -986,10 +989,10 @@
KASSERT(m != NULL, ("mbuf not found in xn_tx_chain"));
M_ASSERTVALID(m);
if (unlikely(gnttab_query_foreign_access(
- np->grant_tx_ref[id]) != 0)) {
- printk("network_tx_buf_gc: warning "
- "-- grant still in use by backend "
- "domain.\n");
+ np->grant_tx_ref[id]) != 0)) {
+ printf("network_tx_buf_gc: warning "
+ "-- grant still in use by backend "
+ "domain.\n");
goto out;
}
gnttab_end_foreign_access_ref(
@@ -1005,17 +1008,17 @@
np->tx.rsp_cons = prod;
/*
- * Set a new event, then check for race with update of tx_cons. Note
- * that it is essential to schedule a callback, no matter how few
- * buffers are pending. Even if there is space in the transmit ring,
- * higher layers may be blocked because too much data is outstanding:
- * in such cases notification from Xen is likely to be the only kick
+ * Set a new event, then check for race with update of
+ * tx_cons. Note that it is essential to schedule a
+ * callback, no matter how few buffers are pending. Even if
+ * there is space in the transmit ring, higher layers may
+ * be blocked because too much data is outstanding: in such
+ * cases notification from Xen is likely to be the only kick
* that we'll get.
*/
np->tx.sring->rsp_event =
- prod + ((np->tx.sring->req_prod - prod) >> 1) + 1;
-
-
+ prod + ((np->tx.sring->req_prod - prod) >> 1) + 1;
+
mb();
} while (prod != np->tx.sring->rsp_prod);
@@ -1038,24 +1041,25 @@
struct netfront_info *np = xsc;
struct ifnet *ifp = np->xn_ifp;
-
+#if 0
if (!(np->rx.rsp_cons != np->rx.sring->rsp_prod &&
- likely(netfront_carrier_ok(np)) &&
- ifp->if_drv_flags & IFF_DRV_RUNNING))
- return;
+ likely(netfront_carrier_ok(np)) &&
+ ifp->if_drv_flags & IFF_DRV_RUNNING))
+ return;
+#endif
if (np->tx.rsp_cons != np->tx.sring->rsp_prod) {
- XN_TX_LOCK(np);
- xn_txeof(np);
- XN_TX_UNLOCK(np);
+ XN_TX_LOCK(np);
+ xn_txeof(np);
+ XN_TX_UNLOCK(np);
}
-
+
XN_RX_LOCK(np);
xn_rxeof(np);
XN_RX_UNLOCK(np);
if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
- !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
- xn_start(ifp);
+ !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
+ xn_start(ifp);
}
@@ -1063,62 +1067,60 @@
xennet_move_rx_slot(struct netfront_info *np, struct mbuf *m,
grant_ref_t ref)
{
- int new = xennet_rxidx(np->rx.req_prod_pvt);
+ int new = xennet_rxidx(np->rx.req_prod_pvt);
- PANIC_IF(np->rx_mbufs[new] != NULL);
- np->rx_mbufs[new] = m;
- np->grant_rx_ref[new] = ref;
- RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->id = new;
- RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->gref = ref;
- np->rx.req_prod_pvt++;
+ KASSERT(np->rx_mbufs[new] == NULL, ("rx_mbufs != NULL"));
+ np->rx_mbufs[new] = m;
+ np->grant_rx_ref[new] = ref;
+ RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->id = new;
+ RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->gref = ref;
+ np->rx.req_prod_pvt++;
}
static int
xennet_get_extras(struct netfront_info *np,
- struct netif_extra_info *extras, RING_IDX rp)
-
+ struct netif_extra_info *extras, RING_IDX rp)
{
- struct netif_extra_info *extra;
- RING_IDX cons = np->rx.rsp_cons;
+ struct netif_extra_info *extra;
+ RING_IDX cons = np->rx.rsp_cons;
- int err = 0;
+ int err = 0;
- do {
- struct mbuf *m;
- grant_ref_t ref;
+ do {
+ struct mbuf *m;
+ grant_ref_t ref;
- if (unlikely(cons + 1 == rp)) {
+ if (unlikely(cons + 1 == rp)) {
#if 0
- if (net_ratelimit())
- WPRINTK("Missing extra info\n");
+ if (net_ratelimit())
+ WPRINTK("Missing extra info\n");
#endif
- err = -EINVAL;
- break;
- }
+ err = -EINVAL;
+ break;
+ }
- extra = (struct netif_extra_info *)
- RING_GET_RESPONSE(&np->rx, ++cons);
+ extra = (struct netif_extra_info *)
+ RING_GET_RESPONSE(&np->rx, ++cons);
- if (unlikely(!extra->type ||
- extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
+ if (unlikely(!extra->type ||
+ extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
#if 0
- if (net_ratelimit())
- WPRINTK("Invalid extra type: %d\n",
- extra->type);
+ if (net_ratelimit())
+ WPRINTK("Invalid extra type: %d\n",
+ extra->type);
#endif
- err = -EINVAL;
- } else {
- memcpy(&extras[extra->type - 1], extra,
- sizeof(*extra));
- }
+ err = -EINVAL;
+ } else {
+ memcpy(&extras[extra->type - 1], extra, sizeof(*extra));
+ }
- m = xennet_get_rx_mbuf(np, cons);
- ref = xennet_get_rx_ref(np, cons);
- xennet_move_rx_slot(np, m, ref);
- } while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE);
+ m = xennet_get_rx_mbuf(np, cons);
+ ref = xennet_get_rx_ref(np, cons);
+ xennet_move_rx_slot(np, m, ref);
+ } while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE);
- np->rx.rsp_cons = cons;
- return err;
+ np->rx.rsp_cons = cons;
+ return err;
}
static int
@@ -1127,124 +1129,123 @@
struct mbuf_head *list,
int *pages_flipped_p)
{
- int pages_flipped = *pages_flipped_p;
- struct mmu_update *mmu;
- struct multicall_entry *mcl;
- struct netif_rx_response *rx = &rinfo->rx;
- struct netif_extra_info *extras = rinfo->extras;
- RING_IDX cons = np->rx.rsp_cons;
- struct mbuf *m = xennet_get_rx_mbuf(np, cons);
- grant_ref_t ref = xennet_get_rx_ref(np, cons);
- int max = 24 /* MAX_SKB_FRAGS + (rx->status <= RX_COPY_THRESHOLD) */;
- int frags = 1;
- int err = 0;
- unsigned long ret;
+ int pages_flipped = *pages_flipped_p;
+ struct mmu_update *mmu;
+ struct multicall_entry *mcl;
+ struct netif_rx_response *rx = &rinfo->rx;
+ struct netif_extra_info *extras = rinfo->extras;
+ RING_IDX cons = np->rx.rsp_cons;
+ struct mbuf *m = xennet_get_rx_mbuf(np, cons);
+ grant_ref_t ref = xennet_get_rx_ref(np, cons);
+ int max = 24 /* MAX_SKB_FRAGS + (rx->status <= RX_COPY_THRESHOLD) */;
+ int frags = 1;
+ int err = 0;
+ u_long ret;
- if (rx->flags & NETRXF_extra_info) {
- err = xennet_get_extras(np, extras, rp);
- cons = np->rx.rsp_cons;
- }
+ if (rx->flags & NETRXF_extra_info) {
+ err = xennet_get_extras(np, extras, rp);
+ cons = np->rx.rsp_cons;
+ }
- for (;;) {
- unsigned long mfn;
+ for (;;) {
+ u_long mfn;
- if (unlikely(rx->status < 0 ||
>>> TRUNCATED FOR MAIL (1000 lines) <<<
More information about the p4-projects
mailing list