PERFORCE change 134280 for review
Kip Macy
kmacy at FreeBSD.org
Sun Jan 27 21:27:37 PST 2008
http://perforce.freebsd.org/chv.cgi?CH=134280
Change 134280 by kmacy at kmacy:pandemonium:toehead on 2008/01/28 05:26:40
- define some missing ddp bits
- replace #ifdef notyet with a clearer indication of what needs to go in
in some places
Affected files ...
.. //depot/projects/toehead/sys/dev/cxgb/ulp/tom/cxgb_cpl_io.c#8 edit
.. //depot/projects/toehead/sys/dev/cxgb/ulp/tom/cxgb_cpl_socket.c#9 edit
.. //depot/projects/toehead/sys/dev/cxgb/ulp/tom/cxgb_ddp.c#3 edit
.. //depot/projects/toehead/sys/dev/cxgb/ulp/tom/cxgb_t3_ddp.h#8 edit
Differences ...
==== //depot/projects/toehead/sys/dev/cxgb/ulp/tom/cxgb_cpl_io.c#8 (text+ko) ====
@@ -509,7 +509,7 @@
static void
handle_urg_ptr(struct socket *so, uint32_t urg_seq)
{
-#ifdef notyet
+#ifdef URGENT_DATA_SUPPORTED
struct tcpcb *tp = sototcpcb(so);
urg_seq--; /* initially points past the urgent data, per BSD */
@@ -867,7 +867,7 @@
static int
t3_set_cong_control(struct socket *so, const char *name)
{
-#ifdef notyet
+#ifdef CONGESTION_CONTROL_SUPPORTED
int cong_algo;
for (cong_algo = 0; cong_algo < ARRAY_SIZE(t3_cong_ops); cong_algo++)
@@ -1512,7 +1512,7 @@
* XXX I need to revisit this
*/
if ((err = t3_set_cong_control(so, name)) == 0) {
-#ifdef notyet
+#ifdef CONGESTION_CONTROL_SUPPORTED
tp->t_cong_control = strdup(name, M_CXGB);
#endif
} else
@@ -1743,9 +1743,7 @@
"tcb_rpl_as_ddp_complete: seq 0x%x hwbuf %u lskb->len %u",
m->m_seq, q->cur_buf, m->m_pkthdr.len);
#endif
-#ifdef notyet
- __skb_queue_tail(&sk->sk_receive_queue, skb);
-#endif
+ sbappend(&so->so_rcv, m);
if (__predict_true((so->so_state & SS_NOFDREF) == 0))
sorwakeup(so);
}
@@ -1772,6 +1770,7 @@
handle_ddp_data(struct toepcb *toep, struct mbuf *m)
{
struct tcpcb *tp = toep->tp_tp;
+ struct socket *so;
struct ddp_state *q;
struct ddp_buf_state *bsp;
struct cpl_rx_data *hdr = cplhdr(m);
@@ -1802,10 +1801,11 @@
if (!(bsp->flags & DDP_BF_NOFLIP))
q->cur_buf ^= 1;
tp->t_rcvtime = ticks;
-#ifdef notyet
- __skb_queue_tail(&sk->sk_receive_queue, skb);
-#endif
- /* For now, don't re-enable DDP after a connection fell out of DDP
+
+ so = toeptoso(toep);
+ sbappend(&so->so_rcv, m);
+ /*
+ * For now, don't re-enable DDP after a connection fell out of DDP
* mode.
*/
q->ubuf_ddp_ready = 0;
@@ -1848,7 +1848,7 @@
#endif
m_adj(m, sizeof(*hdr));
-#ifdef notyet
+#ifdef URGENT_DATA_SUPPORTED
/*
* We don't handle urgent data yet
*/
@@ -2172,9 +2172,7 @@
if (!(bsp->flags & DDP_BF_NOFLIP))
q->cur_buf ^= 1;
tp->t_rcvtime = ticks;
-#ifdef notyet
- __skb_queue_tail(&sk->sk_receive_queue, skb);
-#endif
+ sbappend(&so->so_rcv, m);
if (__predict_true((so->so_state & SS_NOFDREF) == 0))
sorwakeup(so);
return (1);
@@ -3433,14 +3431,10 @@
fixup_and_send_ofo(so);
if (__predict_false(so->so_state & SS_NOFDREF)) {
-#ifdef notyet
- /*
- * XXX not clear what should be done here
- * appears to correspond to sorwakeup_locked
+ /*
+ * XXX does this even make sense?
*/
- sk->sk_state_change(sk);
- sk_wake_async(so, 0, POLL_OUT);
-#endif
+ sorwakeup(so);
}
m_free(m);
#ifdef notyet
@@ -3817,12 +3811,10 @@
sizeof(*getreq);
m = m_gethdr_nofail(wrlen);
m_set_priority(m, mkprio(CPL_PRIORITY_CONTROL, toep));
-#ifdef notyet
- wr = (struct work_request_hdr *)__skb_put(skb, wrlen);
+ wr = mtod(m, struct work_request_hdr *);
wr->wr_hi = htonl(V_WR_OP(FW_WROPCODE_BYPASS));
-#else
- wr = mtod(m, struct work_request_hdr *);
-#endif
+ m->m_pkthdr.len = m->m_len = sizeof(wrlen);
+
lock = (struct cpl_barrier *)(wr + 1);
mk_cpl_barrier_ulp(lock);
==== //depot/projects/toehead/sys/dev/cxgb/ulp/tom/cxgb_cpl_socket.c#9 (text+ko) ====
@@ -255,7 +255,7 @@
static int
so_should_ddp(const struct toepcb *toep, int last_recv_len)
{
- return toep->tp_ulp_mode == ULP_MODE_TCPDDP && (toep->tp_ddp_state.ubuf == NULL) &&
+ return toep->tp_ulp_mode == ULP_MODE_TCPDDP && (toep->tp_ddp_state.kbuf[0] == NULL) &&
last_recv_len > TOM_TUNABLE(toep->tp_toedev, ddp_thres) &&
toep->tp_tp->rcv_wnd >
(TOM_TUNABLE(toep->tp_toedev, ddp_copy_limit) + DDP_RSVD_WIN);
@@ -575,8 +575,8 @@
if (p->ubuf && user_ddp_ok && !user_ddp_pending &&
uio->uio_iov->iov_len > p->kbuf[0]->dgl_length &&
p->ubuf_ddp_ready) {
- user_ddp_pending =
- !t3_overlay_ubuf(so, uio, (so->so_state & SS_NBIO), flags, 1, 1);
+ user_ddp_pending =
+ !t3_overlay_ubuf(so, uio, (so->so_state & SS_NBIO), flags);
if (user_ddp_pending) {
p->kbuf_posted++;
user_ddp_ok = 0;
@@ -612,7 +612,7 @@
offset = toep->tp_copied_seq - m->m_seq;
if (offset > m->m_pkthdr.len)
panic("t3_soreceive: BUG: OFFSET > LEN seq 0x%x "
- "skb->len %d flags 0x%x", m->m_seq,
+ "pktlen %d ddp flags 0x%x", m->m_seq,
m->m_pkthdr.len, m->m_ddp_flags);
avail = m->m_pkthdr.len - offset;
if (len < avail) {
@@ -620,7 +620,7 @@
panic("bad state in t3_soreceive\n");
avail = len;
}
-#ifdef notyet
+#ifdef URGENT_DATA_SUPPORTED
/*
* Check if the data we are preparing to copy contains urgent
* data. Either stop short of urgent data or skip it if it's
@@ -658,7 +658,7 @@
uio->uio_iov->iov_len > p->kbuf[0]->dgl_length &&
p->ubuf_ddp_ready) {
user_ddp_pending =
- !t3_overlay_ubuf(so, uio, (so->so_state & SS_NBIO), flags, 1, 1);
+ !t3_overlay_ubuf(so, uio, (so->so_state & SS_NBIO), flags);
if (user_ddp_pending) {
p->kbuf_posted++;
user_ddp_ok = 0;
@@ -679,7 +679,7 @@
toep->tp_copied_seq += avail;
copied += avail;
len -= avail;
-#ifdef notyet
+#ifdef URGENT_DATA_SUPPORTED
skip_copy:
if (tp->urg_data && after(tp->copied_seq, tp->urg_seq))
tp->urg_data = 0;
@@ -704,7 +704,7 @@
}
}
sbfree(&so->so_rcv, m);
- m = so->so_rcv.sb_mb = m_free(m);
+ m = so->so_rcv.sb_mb = m_free(m); /* XXX need to clean mbuf first */
buffers_freed++;
if ((so->so_rcv.sb_mb == NULL) && got_psh)
@@ -739,6 +739,7 @@
t3_post_kbuf(so, 1);
p->kbuf_posted++;
} else if (so_should_ddp(toep, copied)) {
+ printf("entering ddp\n");
t3_enter_ddp(so, TOM_TUNABLE(TOE_DEV(so),
ddp_copy_limit), 0);
p->kbuf_posted = 1;
==== //depot/projects/toehead/sys/dev/cxgb/ulp/tom/cxgb_ddp.c#3 (text+ko) ====
@@ -125,22 +125,21 @@
unsigned int npages;
struct ddp_gather_list *p;
- if (addr >= VM_MAXUSER_ADDRESS)
- return (EINVAL);
-#if 0
- if (!access_ok(VERIFY_WRITE, addr, len))
+ /*
+ * XXX need x86 agnostic check
+ */
+ if (addr + len > VM_MAXUSER_ADDRESS)
return (EFAULT);
-#endif
+
pg_off = addr & ~PAGE_MASK;
npages = (pg_off + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
p = malloc(sizeof(struct ddp_gather_list) + npages * sizeof(vm_page_t *),
M_DEVBUF, M_NOWAIT);
- if (!p)
+ if (p == NULL)
return (ENOMEM);
-
err = vm_fault_hold_user_pages(addr, p->dgl_pages, npages, VM_HOLD_WRITEABLE);
-
+ printf("held pages\n");
if (err)
goto free_gl;
@@ -157,7 +156,7 @@
p->dgl_length = len;
p->dgl_offset = pg_off;
p->dgl_nelem = npages;
-#ifdef notyet
+#ifdef NEED_BUSDMA
p->phys_addr[0] = pci_map_page(pdev, p->pages[0], pg_off,
PAGE_SIZE - pg_off,
PCI_DMA_FROMDEVICE) - pg_off;
@@ -165,7 +164,6 @@
p->phys_addr[i] = pci_map_page(pdev, p->pages[i], 0, PAGE_SIZE,
PCI_DMA_FROMDEVICE);
#endif
-
*newgl = p;
return 0;
unpin:
@@ -180,7 +178,7 @@
static void
unmap_ddp_gl(const struct ddp_gather_list *gl)
{
-#ifdef notyet
+#ifdef NEED_BUSDMA
int i;
if (!gl->nelem)
@@ -198,15 +196,10 @@
static void
ddp_gl_free_pages(struct ddp_gather_list *gl, int dirty)
{
-#ifdef notyet
- int i;
-
- for (i = 0; i < gl->nelem; ++i) {
- if (dirty)
- set_page_dirty_lock(gl->pages[i]);
- put_page(gl->pages[i]);
- }
-#endif
+ /*
+ * XXX need to be able to
+ */
+ vm_fault_unhold_pages(gl->dgl_pages, gl->dgl_nelem);
}
void
@@ -236,9 +229,7 @@
npages = ((addr & ~PAGE_MASK) + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
nppods = min(pages2ppods(npages), MAX_PPODS);
-#ifdef notyet
- nppods = ALIGN(nppods, PPOD_CLUSTER_SIZE);
-#endif
+ nppods = roundup2(nppods, PPOD_CLUSTER_SIZE);
err = t3_alloc_ppods(d, nppods, &tag);
if (err && nppods > PPOD_CLUSTER_SIZE) {
nppods = PPOD_CLUSTER_SIZE;
@@ -255,8 +246,6 @@
return (0);
}
-
-
/*
* Reposts the kernel DDP buffer after it has been previously become full and
* invalidated. We just need to reset the offset and adjust the DDP flags.
@@ -267,7 +256,7 @@
*/
static void
t3_repost_kbuf(struct socket *so, unsigned int bufidx, int modulate,
- int activate)
+ int activate)
{
struct toepcb *toep = sototcpcb(so)->t_toe;
struct ddp_state *p = &toep->tp_ddp_state;
@@ -278,14 +267,14 @@
p->cur_buf = bufidx;
p->kbuf_idx = bufidx;
if (!bufidx)
- t3_setup_ddpbufs(toep, 0, 0, 0, 0,
+ t3_setup_ddpbufs(toep, 0, 0, 0, 0,
V_TF_DDP_PSH_NO_INVALIDATE(p->kbuf_noinval) |
V_TF_DDP_BUF0_VALID(1),
V_TF_DDP_PSH_NO_INVALIDATE(1) | V_TF_DDP_OFF(1) |
V_TF_DDP_BUF0_VALID(1) |
V_TF_DDP_ACTIVE_BUF(activate), modulate);
else
- t3_setup_ddpbufs(toep, 0, 0, 0, 0,
+ t3_setup_ddpbufs(toep, 0, 0, 0, 0,
V_TF_DDP_PSH_NO_INVALIDATE(p->kbuf_noinval) |
V_TF_DDP_BUF1_VALID(1) |
V_TF_DDP_ACTIVE_BUF(activate),
@@ -426,12 +415,13 @@
*/
int
t3_overlay_ubuf(struct socket *so, const struct uio *uio,
- int nonblock, int rcv_flags, int modulate, int post_kbuf)
+ int nonblock, int rcv_flags)
{
int err, len, ubuf_idx;
unsigned long flags;
struct toepcb *toep = sototcpcb(so)->t_toe;
struct ddp_state *p = &toep->tp_ddp_state;
+ struct ddp_buf_state *dbs;
if (p->ubuf == NULL)
return (EINVAL);
@@ -448,17 +438,15 @@
flags = select_ddp_flags(so, ubuf_idx, nonblock, rcv_flags);
- if (post_kbuf) {
- struct ddp_buf_state *dbs = &p->buf_state[ubuf_idx ^ 1];
+ dbs = &p->buf_state[ubuf_idx ^ 1];
- dbs->cur_offset = 0;
- dbs->flags = 0;
- dbs->gl = p->kbuf[ubuf_idx ^ 1];
- p->kbuf_idx ^= 1;
- flags |= p->kbuf_idx ?
- V_TF_DDP_BUF1_VALID(1) | V_TF_DDP_PUSH_DISABLE_1(0) :
- V_TF_DDP_BUF0_VALID(1) | V_TF_DDP_PUSH_DISABLE_0(0);
- }
+ dbs->cur_offset = 0;
+ dbs->flags = 0;
+ dbs->gl = p->kbuf[ubuf_idx ^ 1];
+ p->kbuf_idx ^= 1;
+ flags |= p->kbuf_idx ?
+ V_TF_DDP_BUF1_VALID(1) | V_TF_DDP_PUSH_DISABLE_1(0) :
+ V_TF_DDP_BUF0_VALID(1) | V_TF_DDP_PUSH_DISABLE_0(0);
if (ubuf_idx == 0) {
t3_overlay_ddpbuf(toep, 0, p->ubuf_tag << 6, p->kbuf_tag[1] << 6,
@@ -557,7 +545,8 @@
int
t3_enter_ddp(struct socket *so, unsigned int kbuf_size, unsigned int waitall)
{
- int err = ENOMEM;
+ int i, err = ENOMEM;
+ static vm_pindex_t color;
unsigned int nppods, kbuf_pages, idx = 0;
struct toepcb *toep = sototcpcb(so)->t_toe;
struct ddp_state *p = &toep->tp_ddp_state;
@@ -576,7 +565,7 @@
p->kbuf[idx] =
malloc(sizeof (struct ddp_gather_list) + kbuf_pages *
sizeof(vm_page_t *), M_DEVBUF, M_NOWAIT|M_ZERO);
- if (!p->kbuf[idx])
+ if (p->kbuf[idx] == NULL)
goto err;
err = t3_alloc_ppods(d, nppods, &p->kbuf_tag[idx]);
if (err)
@@ -586,20 +575,21 @@
p->kbuf[idx]->dgl_length = kbuf_size;
p->kbuf[idx]->dgl_offset = 0;
p->kbuf[idx]->dgl_nelem = kbuf_pages;
-#ifdef notyet
- p->kbuf[idx]->pages =
- (struct page **)&p->kbuf[idx]->phys_addr[kbuf_pages];
-
+
for (i = 0; i < kbuf_pages; ++i) {
-
- p->kbuf[idx]->pages[i] = alloc_page(sk->sk_allocation);
- if (!p->kbuf[idx]->pages[i]) {
- p->kbuf[idx]->nelem = i;
+ p->kbuf[idx]->dgl_pages[i] = vm_page_alloc(NULL, color,
+ VM_ALLOC_NOOBJ | VM_ALLOC_NORMAL | VM_ALLOC_WIRED |
+ VM_ALLOC_ZERO);
+ if (p->kbuf[idx]->dgl_pages[i] == NULL) {
+ p->kbuf[idx]->dgl_nelem = i;
goto err;
}
-
}
-
+#ifdef NEED_BUSDMA
+ /*
+ * XXX we'll need this for VT-d or any platform with an iommu :-/
+ *
+ */
for (i = 0; i < kbuf_pages; ++i)
p->kbuf[idx]->phys_addr[i] =
pci_map_page(p->pdev, p->kbuf[idx]->pages[i],
==== //depot/projects/toehead/sys/dev/cxgb/ulp/tom/cxgb_t3_ddp.h#8 (text+ko) ====
@@ -160,7 +160,7 @@
int rcv_flags, int modulate, int post_kbuf);
void t3_cancel_ubuf(struct toepcb *toep);
int t3_overlay_ubuf(struct socket *so, const struct uio *uio, int nonblock,
- int rcv_flags, int modulate, int post_kbuf);
+ int rcv_flags);
int t3_enter_ddp(struct socket *so, unsigned int kbuf_size, unsigned int waitall);
void t3_cleanup_ddp(struct toepcb *toep);
void t3_release_ddp_resources(struct toepcb *toep);
More information about the p4-projects
mailing list