PERFORCE change 164811 for review

Gleb Kurtsou gk at FreeBSD.org
Sun Jun 21 14:08:44 UTC 2009


http://perforce.freebsd.org/chv.cgi?CH=164811

Change 164811 by gk at gk_h1 on 2009/06/21 14:07:54

	add rest of vops. support not block aligned read/write. handle file resize.
	rename to be more consistens, cleanup

Affected files ...

.. //depot/projects/soc2009/gk_pefs/sys/fs/pefs/pefs.h#3 edit
.. //depot/projects/soc2009/gk_pefs/sys/fs/pefs/pefs_subr.c#3 edit
.. //depot/projects/soc2009/gk_pefs/sys/fs/pefs/pefs_vfsops.c#2 edit
.. //depot/projects/soc2009/gk_pefs/sys/fs/pefs/pefs_vnops.c#3 edit
.. //depot/projects/soc2009/gk_pefs/sys/fs/pefs/pefs_xbase64.c#2 edit
.. //depot/projects/soc2009/gk_pefs/sys/modules/pefs/Makefile#3 edit

Differences ...

==== //depot/projects/soc2009/gk_pefs/sys/fs/pefs/pefs.h#3 (text+ko) ====

@@ -39,62 +39,81 @@
 
 #include <sys/uio.h>
 
-struct pe_mount {
-	struct mount	*pem_vfs;
-	struct vnode	*pem_rootvp;	/* Reference to root pe_node */
+struct pefs_mount {
+	struct mount	*pm_vfs;
+	struct vnode	*pm_rootvp;	/* Reference to root pefs_node */
 };
 
 #ifdef _KERNEL
+
+#define PEFS_BLOCK		16
+
 /*
  * A cache of vnode references
  */
-struct pe_node {
-	LIST_ENTRY(pe_node)	pe_hash;	/* Hash list */
-	struct vnode	        *pe_lowervp;	/* VREFed once */
-	struct vnode		*pe_vnode;	/* Back pointer */
+struct pefs_node {
+	LIST_ENTRY(pefs_node)	pn_hash;	/* Hash list */
+	struct vnode	        *pn_lowervp;	/* VREFed once */
+	struct vnode		*pn_vnode;	/* Back pointer */
 };
 
-struct pe_xuio {
-	struct uio xuio_uio;
-	void **xuio_bases;
+struct pefs_chunk {
+	int pc_iovcnt;
+	int pc_basescnt;
+	struct iovec *pc_iov;
+	int pc_size;
+	void **pc_bases;
+	struct uio pc_uio;
 };
 
-typedef void (*pe_tr)(void *mem, size_t size, void *arg);
+typedef void (*pefs_tr)(void *mem, size_t size, void *arg);
 
-#define	MOUNTTOPEMOUNT(mp) ((struct pe_mount *)((mp)->mnt_data))
-#define	VTOPE(vp) ((struct pe_node *)(vp)->v_data)
-#define	PETOV(xp) ((xp)->pe_vnode)
+#define	MOUNTTOPEMOUNT(mp) ((struct pefs_mount *)((mp)->mnt_data))
+#define	VTOPE(vp) ((struct pefs_node *)(vp)->v_data)
+#define	PETOV(xp) ((xp)->pn_vnode)
 
 struct vfsconf;
 struct vop_generic_args;
 
 int pefs_init(struct vfsconf *vfsp);
 int pefs_uninit(struct vfsconf *vfsp);
-int pe_nodeget(struct mount *mp, struct vnode *target, struct vnode **vpp);
-void pe_hashrem(struct pe_node *xp);
-int pe_bypass(struct vop_generic_args *ap);
-struct pe_xuio* pe_xuio_create(struct uio* uio);
-void pe_xuio_finish(struct pe_xuio* xuio, struct uio *src);
-void pe_xuio_transfrom(struct pe_xuio *xuio, struct uio *src, pe_tr tr, void *arg);
-int pe_name_encrypt(const char *plain, size_t plain_len, char *enc, size_t enc_size);
-int pe_name_decrypt(const char *enc, size_t enc_len, char *plain, size_t plain_size);
+int pefs_nodeget(struct mount *mp, struct vnode *target, struct vnode **vpp);
+void pefs_hashrem(struct pefs_node *xp);
+int pefs_bypass(struct vop_generic_args *ap);
+int pefs_name_encrypt(const char *plain, size_t plain_len, char *enc, size_t enc_size);
+int pefs_name_decrypt(const char *enc, size_t enc_len, char *plain, size_t plain_size);
+
+void pefs_xor(void *mem, size_t size);
+
+#define PEFS_NAME_NTOP_SIZE(a)		(((a) * 4 + 2)/3)
+#define PEFS_NAME_PTON_SIZE(a)		(((a) * 3)/4)
+int pefs_name_ntop(u_char const *src, size_t srclength, char *target, size_t targsize);
+int pefs_name_pton(char const *src, size_t srclen, u_char *target, size_t targsize);
 
-void pe_xor(void *mem, size_t size, void *arg);
+void pefs_xor_chunk(struct pefs_chunk *pc);
 
-int pe_b64_ntop(u_char const *src, size_t srclength, char *target, size_t targsize);
-int pe_b64_pton(char const *src, size_t srclen, u_char *target, size_t targsize);
+struct pefs_chunk* pefs_chunk_create(size_t size);
+void pefs_chunk_free(struct pefs_chunk* pc);
+struct uio* pefs_chunk_uio(struct pefs_chunk *pc, off_t uio_offset, enum uio_rw uio_rw);
+struct uio* pefs_chunk_uio_range(struct pefs_chunk *pc, size_t skip, size_t size, off_t uio_offset, enum uio_rw uio_rw);
+void* pefs_chunk_get(struct pefs_chunk *pc, size_t *size, long *_state);
+void pefs_chunk_zero(struct pefs_chunk *pc);
+int pefs_chunk_copy(struct pefs_chunk *pc, size_t skip, struct uio *uio);
+void pefs_chunk_crop(struct pefs_chunk *pc, size_t skip_begin, size_t skip_end);
+void pefs_chunk_shrink(struct pefs_chunk *pc, size_t size);
 
 #ifdef DIAGNOSTIC
-struct vnode *pe_checkvp(struct vnode *vp, char *fil, int lno);
-#define	PEVPTOLOWERVP(vp) pe_checkvp((vp), __FILE__, __LINE__)
+struct vnode *pefs_checkvp(struct vnode *vp, char *fil, int lno);
+#define	PEVPTOLOWERVP(vp) pefs_checkvp((vp), __FILE__, __LINE__)
 #else
-#define	PEVPTOLOWERVP(vp) (VTOPE(vp)->pe_lowervp)
+#define	PEVPTOLOWERVP(vp) (VTOPE(vp)->pn_lowervp)
 #endif
 
-extern struct vop_vector pe_vnodeops;
+extern struct vop_vector pefs_vnodeops;
 
 #ifdef MALLOC_DECLARE
 MALLOC_DECLARE(M_PEFSNODE);
+MALLOC_DECLARE(M_PEFSBUF);
 #endif
 
 #ifdef PEFS_DEBUG

==== //depot/projects/soc2009/gk_pefs/sys/fs/pefs/pefs_subr.c#3 (text+ko) ====

@@ -61,31 +61,27 @@
  * alias is removed the lower vnode is vrele'd.
  */
 
-#define	PE_NHASH(vp) \
-	(&pe_node_hashtbl[(((uintptr_t)vp)>>LOG2_SIZEVNODE) & pe_node_hash])
+#define	PEFS_NHASH(vp) \
+	(&pefs_node_hashtbl[(((uintptr_t)vp)>>LOG2_SIZEVNODE) & pefs_node_hash])
 
-static LIST_HEAD(pe_node_hashhead, pe_node) *pe_node_hashtbl;
-static u_long pe_node_hash;
-struct mtx pe_hashmtx;
+static LIST_HEAD(pefs_node_hashhead, pefs_node) *pefs_node_hashtbl;
+static u_long pefs_node_hash;
+struct mtx pefs_hashmtx;
 
 static MALLOC_DEFINE(M_PEFSHASH, "pefs_hash", "PEFS hash table");
 MALLOC_DEFINE(M_PEFSNODE, "pefs_node", "PEFS vnode private part");
-static MALLOC_DEFINE(M_PEFSBUF, "pefs_buf", "PEFS buffers");
+MALLOC_DEFINE(M_PEFSBUF, "pefs_buf", "PEFS buffers");
 
-static struct vnode * pe_hashget(struct mount *, struct vnode *);
-static struct vnode * pe_hashins(struct mount *, struct pe_node *);
-
 /*
  * Initialise cache headers
  */
 int
-pefs_init(vfsp)
-	struct vfsconf *vfsp;
+pefs_init(struct vfsconf *vfsp)
 {
 
 	PEFSDEBUG("pefs_init\n");		/* printed during system boot */
-	pe_node_hashtbl = hashinit(NPENODECACHE, M_PEFSHASH, &pe_node_hash);
-	mtx_init(&pe_hashmtx, "pehs", NULL, MTX_DEF);
+	pefs_node_hashtbl = hashinit(NPENODECACHE, M_PEFSHASH, &pefs_node_hash);
+	mtx_init(&pefs_hashmtx, "pehs", NULL, MTX_DEF);
 	return (0);
 }
 
@@ -94,8 +90,8 @@
 	struct vfsconf *vfsp;
 {
 
-	mtx_destroy(&pe_hashmtx);
-	free(pe_node_hashtbl, M_PEFSHASH);
+	mtx_destroy(&pefs_hashmtx);
+	free(pefs_node_hashtbl, M_PEFSHASH);
 	return (0);
 }
 
@@ -104,26 +100,24 @@
  * Lower vnode should be locked on entry and will be left locked on exit.
  */
 static struct vnode *
-pe_hashget(mp, lowervp)
-	struct mount *mp;
-	struct vnode *lowervp;
+pefs_hashget(struct mount *mp, struct vnode *lowervp)
 {
-	struct pe_node_hashhead *hd;
-	struct pe_node *a;
+	struct pefs_node_hashhead *hd;
+	struct pefs_node *a;
 	struct vnode *vp;
 
-	ASSERT_VOP_LOCKED(lowervp, "pe_hashget");
+	ASSERT_VOP_LOCKED(lowervp, "pefs_hashget");
 
 	/*
 	 * Find hash base, and then search the (two-way) linked
-	 * list looking for a pe_node structure which is referencing
-	 * the lower vnode.  If found, the increment the pe_node
+	 * list looking for a pefs_node structure which is referencing
+	 * the lower vnode.  If found, the increment the pefs_node
 	 * reference count (but NOT the lower vnode's VREF counter).
 	 */
-	hd = PE_NHASH(lowervp);
-	mtx_lock(&pe_hashmtx);
-	LIST_FOREACH(a, hd, pe_hash) {
-		if (a->pe_lowervp == lowervp && PETOV(a)->v_mount == mp) {
+	hd = PEFS_NHASH(lowervp);
+	mtx_lock(&pefs_hashmtx);
+	LIST_FOREACH(a, hd, pn_hash) {
+		if (a->pn_lowervp == lowervp && PETOV(a)->v_mount == mp) {
 			/*
 			 * Since we have the lower node locked the pefs
 			 * node can not be in the process of recycling.  If
@@ -132,49 +126,47 @@
 			 */
 			vp = PETOV(a);
 			vref(vp);
-			mtx_unlock(&pe_hashmtx);
+			mtx_unlock(&pefs_hashmtx);
 			return (vp);
 		}
 	}
-	mtx_unlock(&pe_hashmtx);
+	mtx_unlock(&pefs_hashmtx);
 	return (NULLVP);
 }
 
 /*
- * Act like pe_hashget, but add passed pe_node to hash if no existing
+ * Act like pefs_hashget, but add passed pefs_node to hash if no existing
  * node found.
  */
 static struct vnode *
-pe_hashins(mp, xp)
-	struct mount *mp;
-	struct pe_node *xp;
+pefs_hashins(struct mount *mp, struct pefs_node *xp)
 {
-	struct pe_node_hashhead *hd;
-	struct pe_node *oxp;
+	struct pefs_node_hashhead *hd;
+	struct pefs_node *oxp;
 	struct vnode *ovp;
 
-	hd = PE_NHASH(xp->pe_lowervp);
-	mtx_lock(&pe_hashmtx);
-	LIST_FOREACH(oxp, hd, pe_hash) {
-		if (oxp->pe_lowervp == xp->pe_lowervp &&
+	hd = PEFS_NHASH(xp->pn_lowervp);
+	mtx_lock(&pefs_hashmtx);
+	LIST_FOREACH(oxp, hd, pn_hash) {
+		if (oxp->pn_lowervp == xp->pn_lowervp &&
 		    PETOV(oxp)->v_mount == mp) {
 			/*
-			 * See pe_hashget for a description of this
+			 * See pefs_hashget for a description of this
 			 * operation.
 			 */
 			ovp = PETOV(oxp);
 			vref(ovp);
-			mtx_unlock(&pe_hashmtx);
+			mtx_unlock(&pefs_hashmtx);
 			return (ovp);
 		}
 	}
-	LIST_INSERT_HEAD(hd, xp, pe_hash);
-	mtx_unlock(&pe_hashmtx);
+	LIST_INSERT_HEAD(hd, xp, pn_hash);
+	mtx_unlock(&pefs_hashmtx);
 	return (NULLVP);
 }
 
 static void
-pe_insmntque_dtr(struct vnode *vp, void *xp)
+pefs_insmntque_dtr(struct vnode *vp, void *xp)
 {
 	vp->v_data = NULL;
 	vp->v_vnlock = &vp->v_lock;
@@ -194,17 +186,14 @@
  * the caller's "spare" reference to created pefs vnode.
  */
 int
-pe_nodeget(mp, lowervp, vpp)
-	struct mount *mp;
-	struct vnode *lowervp;
-	struct vnode **vpp;
+pefs_nodeget(struct mount *mp, struct vnode *lowervp, struct vnode **vpp)
 {
-	struct pe_node *xp;
+	struct pefs_node *xp;
 	struct vnode *vp;
 	int error;
 
 	/* Lookup the hash firstly */
-	*vpp = pe_hashget(mp, lowervp);
+	*vpp = pefs_hashget(mp, lowervp);
 	if (*vpp != NULL) {
 		vrele(lowervp);
 		return (0);
@@ -223,34 +212,34 @@
 	 * might cause a bogus v_data pointer to get dereferenced
 	 * elsewhere if MALLOC should block.
 	 */
-	xp = malloc(sizeof(struct pe_node),
+	xp = malloc(sizeof(struct pefs_node),
 	    M_PEFSNODE, M_WAITOK);
 
-	error = getnewvnode("null", mp, &pe_vnodeops, &vp);
+	error = getnewvnode("pefs", mp, &pefs_vnodeops, &vp);
 	if (error) {
 		free(xp, M_PEFSNODE);
 		return (error);
 	}
 
-	xp->pe_vnode = vp;
-	xp->pe_lowervp = lowervp;
+	xp->pn_vnode = vp;
+	xp->pn_lowervp = lowervp;
 	vp->v_type = lowervp->v_type;
 	vp->v_data = xp;
 	vp->v_vnlock = lowervp->v_vnlock;
 	if (vp->v_vnlock == NULL)
-		panic("pe_nodeget: Passed a NULL vnlock.\n");
-	error = insmntque1(vp, mp, pe_insmntque_dtr, xp);
+		panic("pefs_nodeget: Passed a NULL vnlock.\n");
+	error = insmntque1(vp, mp, pefs_insmntque_dtr, xp);
 	if (error != 0)
 		return (error);
 	/*
 	 * Atomically insert our new node into the hash or vget existing
 	 * if someone else has beaten us to it.
 	 */
-	*vpp = pe_hashins(mp, xp);
+	*vpp = pefs_hashins(mp, xp);
 	if (*vpp != NULL) {
 		vrele(lowervp);
 		vp->v_vnlock = &vp->v_lock;
-		xp->pe_lowervp = NULL;
+		xp->pn_lowervp = NULL;
 		vrele(vp);
 		return (0);
 	}
@@ -263,17 +252,16 @@
  * Remove node from hash.
  */
 void
-pe_hashrem(xp)
-	struct pe_node *xp;
+pefs_hashrem(struct pefs_node *xp)
 {
 
-	mtx_lock(&pe_hashmtx);
-	LIST_REMOVE(xp, pe_hash);
-	mtx_unlock(&pe_hashmtx);
+	mtx_lock(&pefs_hashmtx);
+	LIST_REMOVE(xp, pn_hash);
+	mtx_unlock(&pefs_hashmtx);
 }
 
 void
-pe_xor(void *mem, size_t size, void *arg)
+pefs_xor(void *mem, size_t size)
 {
 	char *b = (char *)mem;
 	char *e = b + size;
@@ -283,8 +271,23 @@
 	}
 }
 
+void
+pefs_xor_chunk(struct pefs_chunk *pc)
+{
+	long arg = 0;
+	char *mem;
+	size_t size;
+
+	while (1) {
+		mem = pefs_chunk_get(pc, &size, &arg);
+		if (mem == NULL)
+			break;
+		pefs_xor(mem, size);
+	}
+}
+
 int
-pe_name_encrypt(const char *plain, size_t plain_len, char *enc, size_t enc_size)
+pefs_name_encrypt(const char *plain, size_t plain_len, char *enc, size_t enc_size)
 {
 	char b_static[64];
 	char *b;
@@ -298,123 +301,236 @@
 	// FIXME
 	for (int i = 0; i < plain_len; i++)
 		b[i] ^= 0xAA;
-	r = pe_b64_ntop(b, plain_len, enc, enc_size);
-	printf("pe_name_encrypt: %d; %.*s => %.*s\n", r, plain_len, plain, r, enc);
+	r = pefs_name_ntop(b, plain_len, enc, enc_size);
+	// printf("pefs_name_encrypt: %d; %.*s => %.*s\n", r, plain_len, plain, r, enc);
 	if (b != b_static)
 		free(b, M_PEFSBUF);
 	return r;
 }
 
 int
-pe_name_decrypt(const char *enc, size_t enc_len, char *plain, size_t plain_size)
+pefs_name_decrypt(const char *enc, size_t enc_len, char *plain, size_t plain_size)
 {
 	int r;
 
-	r = pe_b64_pton(enc, enc_len, plain, plain_size);
+	r = pefs_name_pton(enc, enc_len, plain, plain_size);
 	if (r > 0) {
 		// FIXME
 		for (int i = 0; i < r; i++)
 			plain[i] ^= 0xAA;
 	}
-	printf("pe_name_decrypt: %d; %.*s => %.*s\n", r, enc_len, enc, r < 0 ? 0 : r, plain);
+	if (r < plain_size)
+		plain[r] = '\0';
+	// printf("pefs_name_decrypt: %d; %.*s => %.*s\n", r, enc_len, enc, r < 0 ? 0 : r, plain);
 	return r;
 }
 
-struct pe_xuio*
-pe_xuio_create(struct uio* uio)
+struct pefs_chunk*
+pefs_chunk_create(size_t size)
+{
+	struct pefs_chunk *pc;
+	int iovcnt;
+
+	iovcnt = (size + PAGE_SIZE - 1) / PAGE_SIZE;
+	PEFSDEBUG("%s: length=%u; iovcnt=%d\n", __func__, size, iovcnt);
+	pc = malloc(sizeof(struct pefs_chunk) + (sizeof(void*) + sizeof(struct iovec) * 2) * iovcnt,
+			M_PEFSBUF, M_WAITOK | M_ZERO);
+
+	pc->pc_size = size;
+	pc->pc_iovcnt = iovcnt;
+	pc->pc_basescnt = iovcnt;
+	pc->pc_bases = (void **)(pc + 1);
+	pc->pc_iov = (struct iovec *)(pc->pc_bases + iovcnt);
+	pc->pc_uio.uio_iov = (struct iovec *)(pc->pc_iov + iovcnt);
+
+	for (int i = 0; i < iovcnt && size > 0; i++) {
+		int len = imin(PAGE_SIZE, size);
+		pc->pc_iov[i].iov_len = len;
+		pc->pc_bases[i] = malloc(len, M_PEFSBUF, M_WAITOK | M_ZERO);
+		pc->pc_iov[i].iov_base = pc->pc_bases[i];
+		size -= len;
+	}
+	if (size != 0)
+		panic("pefs_chunk initialization error\n");
+
+	return (pc);
+}
+
+void
+pefs_chunk_free(struct pefs_chunk* pc)
+{
+	for (int i = 0; i < pc->pc_basescnt; i++) {
+		free(pc->pc_bases[i], M_PEFSBUF);
+	}
+	free(pc, M_PEFSBUF);
+}
+
+struct uio*
+pefs_chunk_uio(struct pefs_chunk *pc, off_t uio_offset, enum uio_rw uio_rw)
+{
+	return pefs_chunk_uio_range(pc, 0, pc->pc_size, uio_offset, uio_rw);
+}
+
+struct uio*
+pefs_chunk_uio_range(struct pefs_chunk *pc, size_t skip, size_t size, off_t uio_offset, enum uio_rw uio_rw)
 {
-	struct pe_xuio *xuio;
-	size_t len;
-	int iov_cnt;
+	int skip_iov, i;
 
-	len = 0;
-	for (int i = 0; i < uio->uio_iovcnt; i++) {
-		len += uio->uio_iov[i].iov_len;
+	KASSERT(pc->pc_size >= size + skip, ("invalid range value"));
+	/* skip */
+	for (skip_iov = 0; skip_iov < pc->pc_iovcnt && skip; skip_iov++) {
+		if (skip < pc->pc_iov[skip_iov].iov_len)
+			break;
+		else
+			skip -= pc->pc_iov[skip_iov].iov_len;
 	}
 
-	iov_cnt = (len + PAGE_SIZE - 1) / PAGE_SIZE;
-	xuio = malloc(sizeof(struct pe_xuio) + (sizeof(void*) + sizeof(struct iovec)) * iov_cnt,
-			M_PEFSBUF, M_WAITOK);
-	xuio->xuio_uio = *uio;
-	xuio->xuio_uio.uio_segflg = UIO_SYSSPACE;
-	xuio->xuio_uio.uio_iov = (struct iovec *)(xuio + 1);
-	xuio->xuio_bases = (void **)(xuio->xuio_uio.uio_iov + iov_cnt);
-	for (int i = 0; i < iov_cnt; i++) {
-		xuio->xuio_uio.uio_iov[i].iov_len = imin(PAGE_SIZE, len);
-		len -= PAGE_SIZE;
-		xuio->xuio_bases[i] = xuio->xuio_uio.uio_iov[i].iov_base =
-			malloc(xuio->xuio_uio.uio_iov[i].iov_len, M_PEFSBUF, M_WAITOK);
+	pc->pc_uio.uio_offset = uio_offset;
+	pc->pc_uio.uio_resid = size;
+	pc->pc_uio.uio_rw = uio_rw;
+	pc->pc_uio.uio_segflg = UIO_SYSSPACE;
+	pc->pc_uio.uio_td = curthread;
+
+	for (i = 0; skip_iov + i < pc->pc_iovcnt && size; i++) {
+		pc->pc_uio.uio_iov[i].iov_base = (char *)pc->pc_iov[skip_iov + i].iov_base + skip;
+		pc->pc_uio.uio_iov[i].iov_len = min(size, pc->pc_iov[skip_iov + i].iov_len - skip);
+		size -= pc->pc_uio.uio_iov[i].iov_len;
+		skip = 0;
+		printf("%s: creating iov: i=%d; len=%d; base=%p\n", __func__, i,
+				pc->pc_uio.uio_iov[i].iov_len, pc->pc_uio.uio_iov[i].iov_base);
 	}
+	pc->pc_uio.uio_iovcnt = i;
+
+	return (&pc->pc_uio);
+}
 
-	return (xuio);
+void*
+pefs_chunk_get(struct pefs_chunk *pc, size_t *size, long *state)
+{
+	void *mem;
+
+	if (!size || !state || *state < 0)
+		panic("invalid args");
+	if (*state >= pc->pc_iovcnt) {
+		*size = 0;
+		return (NULL);
+	}
+	mem = pc->pc_iov[*state].iov_base;
+	*size = pc->pc_iov[*state].iov_len;
+	(*state)++;
+	return (mem);
 }
 
 void
-pe_xuio_finish(struct pe_xuio* xuio, struct uio *src)
+pefs_chunk_zero(struct pefs_chunk *pc)
+{
+	int i;
+
+	for (i = 0; i < pc->pc_iovcnt; i++) {
+		bzero(pc->pc_iov[i].iov_base, pc->pc_iov[i].iov_len);
+	}
+}
+
+int
+pefs_chunk_copy(struct pefs_chunk *pc, size_t skip, struct uio *uio)
 {
-	src->uio_offset = xuio->xuio_uio.uio_offset;
-	src->uio_resid = xuio->xuio_uio.uio_resid;
-	for (int i = 0; i < xuio->xuio_uio.uio_iovcnt; i++) {
-		free(xuio->xuio_bases[i], M_PEFSBUF);
+	int i;
+	int error;
+
+	KASSERT(pc->pc_size >= skip, ("invalid range value"));
+	/* skip */
+	for (i = 0; i < pc->pc_iovcnt && skip; i++) {
+		if (skip < pc->pc_iov[i].iov_len)
+			break;
+		else
+			skip -= pc->pc_iov[i].iov_len;
+	}
+
+	for (; i < pc->pc_iovcnt && uio->uio_resid; i++) {
+		error = uiomove((char *)pc->pc_iov[i].iov_base + skip, pc->pc_iov[i].iov_len - skip, uio);
+		if (error)
+			return (error);
+		skip = 0;
 	}
-	free(xuio, M_PEFSBUF);
+	return (0);
 }
 
 void
-pe_xuio_transfrom(struct pe_xuio *_xuio, struct uio *src, pe_tr tr, void *arg)
+pefs_chunk_crop(struct pefs_chunk *pc, size_t skip_begin, size_t skip_end)
 {
-	struct uio *xuio = &_xuio->xuio_uio;
+
+	KASSERT(pc->pc_size >= skip_begin + skip_end, ("invalid range value"));
+
+	if (skip_begin + skip_end == 0)
+		return;
+
+	PEFSDEBUG("%s: skip_begin=%d; skip_end=%d; size=%d\n", __func__,
+			skip_begin, skip_end, pc->pc_size);
+
+	for (int i = 0; i < pc->pc_iovcnt && skip_begin; i++) {
+		struct iovec *iov = &pc->pc_iov[i];
 
-	KASSERT(xuio->uio_segflg == UIO_SYSSPACE && xuio->uio_rw == src->uio_rw,
-	    ("pe_uio_transfrom"));
-	for (int i = 0; i < xuio->uio_iovcnt; i++) {
-		int n;
-		void *base = xuio->uio_iov[i].iov_base;
+		if (skip_begin < iov->iov_len) {
+			pc->pc_size -= skip_begin;
+			iov->iov_base = (char *)iov->iov_base + skip_begin;
+			iov->iov_len -= skip_begin;
+			skip_begin = 0;
+			break;
+		} else {
+			pc->pc_size -= iov->iov_len;
+			skip_begin -= iov->iov_len;
+			pc->pc_iovcnt--;
+			i--;
+			pc->pc_iov++;
+		}
+	}
+	for (int i = pc->pc_iovcnt - 1; i >= 0 && skip_end; i--) {
+		struct iovec *iov = &pc->pc_iov[i];
 
-		if (xuio->uio_rw == UIO_READ) {
-			n = src->uio_iov[i].iov_len - xuio->uio_iov[i].iov_len;
-			base = ((caddr_t) base) - n;
+		if (skip_end < iov->iov_len) {
+			pc->pc_size -= skip_end;
+			iov->iov_len -= skip_end;
+			skip_end = 0;
+			break;
 		} else {
-			n = src->uio_iov[i].iov_len;
+			pc->pc_size -= iov->iov_len;
+			skip_end -= iov->iov_len;
+			pc->pc_iovcnt--;
 		}
-		if (xuio->uio_rw == UIO_WRITE)
-			uiomove(base, n, src);
-		if (tr)
-			tr(base, n, arg);
-		if (xuio->uio_rw == UIO_READ)
-			uiomove(base, n, src);
 	}
 }
 
+void
+pefs_chunk_shrink(struct pefs_chunk *pc, size_t size)
+{
+	pefs_chunk_crop(pc, 0, pc->pc_size - size);
+}
 
 #ifdef DIAGNOSTIC
 
 #ifdef KDB
-#define	pe_checkvp_barrier	1
+#define	pefs_checkvp_barrier	1
 #else
-#define	pe_checkvp_barrier	0
+#define	pefs_checkvp_barrier	0
 #endif
 
 struct vnode *
-pe_checkvp(vp, fil, lno)
-	struct vnode *vp;
-	char *fil;
-	int lno;
+pefs_checkvp(struct vnode *vp, char *fil, int lno)
 {
 	int interlock = 0;
-	struct pe_node *a = VTOPE(vp);
+	struct pefs_node *a = VTOPE(vp);
 #ifdef notyet
 	/*
 	 * Can't do this check because vop_reclaim runs
 	 * with a funny vop vector.
 	 */
-	if (vp->v_op != pe_vnodeop_p) {
-		printf ("pe_checkvp: on non-null-node\n");
-		while (pe_checkvp_barrier) /*WAIT*/ ;
-		panic("pe_checkvp");
+	if (vp->v_op != pefs_vnodeop_p) {
+		printf ("pefs_checkvp: on non-null-node\n");
+		while (pefs_checkvp_barrier) /*WAIT*/ ;
+		panic("pefs_checkvp");
 	};
 #endif
-	if (a->pe_lowervp == NULLVP) {
+	if (a->pefs_lowervp == NULLVP) {
 		/* Should never happen */
 		int i; u_long *p;
 		printf("vp = %p, ZERO ptr\n", (void *)vp);
@@ -422,21 +538,21 @@
 			printf(" %lx", p[i]);
 		printf("\n");
 		/* wait for debugger */
-		while (pe_checkvp_barrier) /*WAIT*/ ;
-		panic("pe_checkvp");
+		while (pefs_checkvp_barrier) /*WAIT*/ ;
+		panic("pefs_checkvp");
 	}
 	if (mtx_owned(VI_MTX(vp)) != 0) {
 		VI_UNLOCK(vp);
 		interlock = 1;
 	}
-	if (vrefcnt(a->pe_lowervp) < 1) {
+	if (vrefcnt(a->pefs_lowervp) < 1) {
 		int i; u_long *p;
 		printf("vp = %p, unref'ed lowervp\n", (void *)vp);
 		for (p = (u_long *) a, i = 0; i < 8; i++)
 			printf(" %lx", p[i]);
 		printf("\n");
 		/* wait for debugger */
-		while (pe_checkvp_barrier) /*WAIT*/ ;
+		while (pefs_checkvp_barrier) /*WAIT*/ ;
 		panic ("null with unref'ed lowervp");
 	};
 	if (interlock != 0)
@@ -444,9 +560,9 @@
 #ifdef notyet
 	printf("null %x/%d -> %x/%d [%s, %d]\n",
 	        PETOV(a), vrefcnt(PETOV(a)),
-		a->pe_lowervp, vrefcnt(a->pe_lowervp),
+		a->pn_lowervp, vrefcnt(a->pn_lowervp),
 		fil, lno);
 #endif
-	return a->pe_lowervp;
+	return a->pn_lowervp;
 }
 #endif

==== //depot/projects/soc2009/gk_pefs/sys/fs/pefs/pefs_vfsops.c#2 (text+ko) ====

@@ -38,11 +38,6 @@
  * $FreeBSD$
  */
 
-/*
- * Null Layer
- * (See pe_vnops.c for a description of what this does.)
- */
-
 #include <sys/param.h>
 #include <sys/systm.h>
 #include <sys/fcntl.h>
@@ -58,16 +53,6 @@
 
 static MALLOC_DEFINE(M_PEFSMNT, "pefs_mount", "PEFS mount structure");
 
-static vfs_fhtovp_t	pefs_fhtovp;
-static vfs_mount_t	pefs_mount;
-static vfs_quotactl_t	pefs_quotactl;
-static vfs_root_t	pefs_root;
-static vfs_sync_t	pefs_sync;
-static vfs_statfs_t	pefs_statfs;
-static vfs_unmount_t	pefs_unmount;
-static vfs_vget_t	pefs_vget;
-static vfs_extattrctl_t	pefs_extattrctl;
-
 /*
  * Mount null layer
  */
@@ -76,8 +61,8 @@
 {
 	int error = 0;
 	struct vnode *lowerrootvp, *vp;
-	struct vnode *pem_rootvp;
-	struct pe_mount *xmp;
+	struct vnode *pm_rootvp;
+	struct pefs_mount *xmp;
 	char *target;
 	int isvnunlocked = 0, len;
 	struct nameidata nd, *ndp = &nd;
@@ -110,7 +95,7 @@
 	 * Unlock lower node to avoid deadlock.
 	 * (XXX) VOP_ISLOCKED is needed?
 	 */
-	if ((mp->mnt_vnodecovered->v_op == &pe_vnodeops) &&
+	if ((mp->mnt_vnodecovered->v_op == &pefs_vnodeops) &&
 		VOP_ISLOCKED(mp->mnt_vnodecovered)) {
 		VOP_UNLOCK(mp->mnt_vnodecovered, 0);
 		isvnunlocked = 1;
@@ -138,25 +123,25 @@
 	/*
 	 * Check multi null mount to avoid `lock against myself' panic.
 	 */
-	if (lowerrootvp == VTOPE(mp->mnt_vnodecovered)->pe_lowervp) {
+	if (lowerrootvp == VTOPE(mp->mnt_vnodecovered)->pn_lowervp) {
 		PEFSDEBUG("pefs_mount: multi null mount?\n");
 		vput(lowerrootvp);
 		return (EDEADLK);
 	}
 
-	xmp = (struct pe_mount *) malloc(sizeof(struct pe_mount),
+	xmp = (struct pefs_mount *) malloc(sizeof(struct pefs_mount),
 				M_PEFSMNT, M_WAITOK);	/* XXX */
 
 	/*
 	 * Save reference to underlying FS
 	 */
-	xmp->pem_vfs = lowerrootvp->v_mount;
+	xmp->pm_vfs = lowerrootvp->v_mount;
 
 	/*
 	 * Save reference.  Each mount also holds
 	 * a reference on the root vnode.
 	 */
-	error = pe_nodeget(mp, lowerrootvp, &vp);
+	error = pefs_nodeget(mp, lowerrootvp, &vp);
 	/*
 	 * Make sure the node alias worked
 	 */
@@ -171,16 +156,16 @@
 	 * Keep a held reference to the root vnode.
 	 * It is vrele'd in pefs_unmount.
 	 */
-	pem_rootvp = vp;
-	pem_rootvp->v_vflag |= VV_ROOT;
-	xmp->pem_rootvp = pem_rootvp;
+	pm_rootvp = vp;
+	pm_rootvp->v_vflag |= VV_ROOT;
+	xmp->pm_rootvp = pm_rootvp;
 
 	/*
 	 * Unlock the node (either the lower or the alias)
 	 */
 	VOP_UNLOCK(vp, 0);
 
-	if (PEVPTOLOWERVP(pem_rootvp)->v_mount->mnt_flag & MNT_LOCAL) {
+	if (PEVPTOLOWERVP(pm_rootvp)->v_mount->mnt_flag & MNT_LOCAL) {
 		MNT_ILOCK(mp);
 		mp->mnt_flag |= MNT_LOCAL;
 		MNT_IUNLOCK(mp);
@@ -202,9 +187,7 @@
  * Free reference to null layer
  */
 static int
-pefs_unmount(mp, mntflags)
-	struct mount *mp;
-	int mntflags;
+pefs_unmount(struct mount *mp, int mntflags)
 {
 	void *mntdata;
 	int error;
@@ -215,13 +198,13 @@
 	if (mntflags & MNT_FORCE)
 		flags |= FORCECLOSE;
 
-	/* There is 1 extra root vnode reference (pem_rootvp). */
+	/* There is 1 extra root vnode reference (pm_rootvp). */
 	error = vflush(mp, 1, flags, curthread);
 	if (error)
 		return (error);
 
 	/*
-	 * Finally, throw away the pe_mount structure
+	 * Finally, throw away the pefs_mount structure
 	 */
 	mntdata = mp->mnt_data;
 	mp->mnt_data = 0;
@@ -230,24 +213,21 @@
 }
 
 static int
-pefs_root(mp, flags, vpp)
-	struct mount *mp;
-	int flags;
-	struct vnode **vpp;
+pefs_root(struct mount *mp, int flags, struct vnode **vpp)
 {
 	struct vnode *vp;
 
 	PEFSDEBUG("pefs_root(mp = %p, vp = %p->%p)\n", (void *)mp,
-	    (void *)MOUNTTOPEMOUNT(mp)->pem_rootvp,
-	    (void *)PEVPTOLOWERVP(MOUNTTOPEMOUNT(mp)->pem_rootvp));
+	    (void *)MOUNTTOPEMOUNT(mp)->pm_rootvp,
+	    (void *)PEVPTOLOWERVP(MOUNTTOPEMOUNT(mp)->pm_rootvp));
 
 	/*
 	 * Return locked reference to root.
 	 */
-	vp = MOUNTTOPEMOUNT(mp)->pem_rootvp;
+	vp = MOUNTTOPEMOUNT(mp)->pm_rootvp;
 	VREF(vp);
 
-#ifdef PEFS_DEBUG
+#ifdef PEFSXXX_DEBUG
 	if (VOP_ISLOCKED(vp))
 		panic("root vnode is locked.\n");
 #endif
@@ -257,30 +237,24 @@
 }
 
 static int
-pefs_quotactl(mp, cmd, uid, arg)
-	struct mount *mp;
-	int cmd;
-	uid_t uid;
-	void *arg;
+pefs_quotactl(struct mount *mp, int cmd, uid_t uid, void *arg)
 {
-	return VFS_QUOTACTL(MOUNTTOPEMOUNT(mp)->pem_vfs, cmd, uid, arg);
+	return VFS_QUOTACTL(MOUNTTOPEMOUNT(mp)->pm_vfs, cmd, uid, arg);
 }
 
 static int
-pefs_statfs(mp, sbp)
-	struct mount *mp;
-	struct statfs *sbp;
+pefs_statfs(struct mount *mp, struct statfs *sbp)
 {
 	int error;
 	struct statfs mstat;
 
 	PEFSDEBUG("pefs_statfs(mp = %p, vp = %p->%p)\n", (void *)mp,
-	    (void *)MOUNTTOPEMOUNT(mp)->pem_rootvp,
-	    (void *)PEVPTOLOWERVP(MOUNTTOPEMOUNT(mp)->pem_rootvp));
+	    (void *)MOUNTTOPEMOUNT(mp)->pm_rootvp,
+	    (void *)PEVPTOLOWERVP(MOUNTTOPEMOUNT(mp)->pm_rootvp));
 
 	bzero(&mstat, sizeof(mstat));
 
-	error = VFS_STATFS(MOUNTTOPEMOUNT(mp)->pem_vfs, &mstat);
+	error = VFS_STATFS(MOUNTTOPEMOUNT(mp)->pm_vfs, &mstat);
 	if (error)
 		return (error);
 
@@ -298,9 +272,7 @@
 }
 
 static int
-pefs_sync(mp, waitfor)
-	struct mount *mp;
-	int waitfor;
+pefs_sync(struct mount *mp, int waitfor)
 {
 	/*
 	 * XXX - Assumes no data cached at null layer.
@@ -309,48 +281,43 @@
 }
 
 static int
-pefs_vget(mp, ino, flags, vpp)
-	struct mount *mp;
-	ino_t ino;
-	int flags;
-	struct vnode **vpp;
+pefs_vget(struct mount *mp, ino_t ino, int flags, struct vnode **vpp)
 {
 	int error;
-	error = VFS_VGET(MOUNTTOPEMOUNT(mp)->pem_vfs, ino, flags, vpp);
+	error = VFS_VGET(MOUNTTOPEMOUNT(mp)->pm_vfs, ino, flags, vpp);
 	if (error)
 		return (error);
 
-	return (pe_nodeget(mp, *vpp, vpp));
+	return (pefs_nodeget(mp, *vpp, vpp));
 }
 
 static int
-pefs_fhtovp(mp, fidp, vpp)
-	struct mount *mp;
-	struct fid *fidp;
-	struct vnode **vpp;
+pefs_fhtovp(struct mount *mp, struct fid *fidp, struct vnode **vpp)
 {
 	int error;
-	error = VFS_FHTOVP(MOUNTTOPEMOUNT(mp)->pem_vfs, fidp, vpp);
+
+	error = VFS_FHTOVP(MOUNTTOPEMOUNT(mp)->pm_vfs, fidp, vpp);
 	if (error)
 		return (error);
 
-	return (pe_nodeget(mp, *vpp, vpp));
+	error = pefs_nodeget(mp, *vpp, vpp);
+	printf("pefs_fhtovp: error=%d; vp=%p; v_object=%p\n", error,
+			!error ? *vpp : NULL, !error ? (*vpp)->v_object : NULL);
+	if (error)
+		return (error);
+	vnode_create_vobject(*vpp, 0, curthread);
+	return (error);
 }
 
-static int                        
-pefs_extattrctl(mp, cmd, filename_vp, namespace, attrname)
-	struct mount *mp;
-	int cmd;
-	struct vnode *filename_vp;
-	int namespace;
-	const char *attrname;
+static int
+pefs_extattrctl(struct mount *mp, int cmd, struct vnode *filename_vp, int namespace, const char *attrname)
 {
-	return VFS_EXTATTRCTL(MOUNTTOPEMOUNT(mp)->pem_vfs, cmd, filename_vp,
+	return VFS_EXTATTRCTL(MOUNTTOPEMOUNT(mp)->pm_vfs, cmd, filename_vp,
 	    namespace, attrname);

>>> TRUNCATED FOR MAIL (1000 lines) <<<


More information about the p4-projects mailing list