sparc64/142102: commit references a PR

dfilter service dfilter at FreeBSD.ORG
Sat Jan 9 16:00:15 UTC 2010


The following reply was made to PR sparc64/142102; it has been noted by GNATS.

From: dfilter at FreeBSD.ORG (dfilter service)
To: bug-followup at FreeBSD.org
Cc:  
Subject: Re: sparc64/142102: commit references a PR
Date: Sat,  9 Jan 2010 15:59:24 +0000 (UTC)

 Author: marius
 Date: Sat Jan  9 15:59:15 2010
 New Revision: 201899
 URL: http://svn.freebsd.org/changeset/base/201899
 
 Log:
   Some style(9) fixes in order to fabricate a commit to denote that
   the commit message for r201896 actually should have read:
   
   As nfsm_srvmtofh_xx() assumes the 4-byte alignment required by XDR
   ensure the mbuf data is aligned accordingly by calling nfs_realign()
   in fha_extract_info(). This fix is orthogonal to the problem solved
   by r199274/r199284.
   
   PR:		142102 (second part)
   MFC after:	1 week
 
 Modified:
   head/sys/nfsserver/nfs.h
   head/sys/nfsserver/nfs_fha.c
   head/sys/nfsserver/nfs_srvkrpc.c
 
 Modified: head/sys/nfsserver/nfs.h
 ==============================================================================
 --- head/sys/nfsserver/nfs.h	Sat Jan  9 15:43:47 2010	(r201898)
 +++ head/sys/nfsserver/nfs.h	Sat Jan  9 15:59:15 2010	(r201899)
 @@ -82,14 +82,13 @@
  #define IO_METASYNC	0
  #endif
  
 -
  /* NFS state flags XXX -Wunused */
  #define	NFSRV_SNDLOCK		0x01000000  /* Send socket lock */
  #define	NFSRV_WANTSND		0x02000000  /* Want above */
  
  /*
 - * Structures for the nfssvc(2) syscall. Not that anyone but nfsd and mount_nfs
 - * should ever try and use it.
 + * Structures for the nfssvc(2) syscall.  Not that anyone but nfsd and
 + * mount_nfs should ever try and use it.
   */
  
  /*
 
 Modified: head/sys/nfsserver/nfs_fha.c
 ==============================================================================
 --- head/sys/nfsserver/nfs_fha.c	Sat Jan  9 15:43:47 2010	(r201898)
 +++ head/sys/nfsserver/nfs_fha.c	Sat Jan  9 15:59:15 2010	(r201899)
 @@ -71,16 +71,17 @@ static struct fha_global {
  	u_long hashmask;
  } g_fha;
  
 -/* 
 - * These are the entries in the filehandle hash. They talk about a specific 
 - * file, requests against which are being handled by one or more nfsds. We keep
 - * a chain of nfsds against the file. We only have more than one if reads are 
 - * ongoing, and then only if the reads affect disparate regions of the file.
 +/*
 + * These are the entries in the filehandle hash.  They talk about a specific
 + * file, requests against which are being handled by one or more nfsds.  We
 + * keep a chain of nfsds against the file. We only have more than one if reads
 + * are ongoing, and then only if the reads affect disparate regions of the
 + * file.
   *
 - * In general, we want to assign a new request to an existing nfsd if it is 
 - * going to contend with work happening already on that nfsd, or if the 
 - * operation is a read and the nfsd is already handling a proximate read. We 
 - * do this to avoid jumping around in the read stream unnecessarily, and to 
 + * In general, we want to assign a new request to an existing nfsd if it is
 + * going to contend with work happening already on that nfsd, or if the
 + * operation is a read and the nfsd is already handling a proximate read.  We
 + * do this to avoid jumping around in the read stream unnecessarily, and to
   * avoid contention between threads over single files.
   */
  struct fha_hash_entry {
 @@ -101,7 +102,7 @@ struct fha_info {
  };
  
  static int fhe_stats_sysctl(SYSCTL_HANDLER_ARGS);
 - 
 +
  static void
  nfs_fha_init(void *foo)
  {
 @@ -136,7 +137,7 @@ nfs_fha_init(void *foo)
  	    &fha_ctls.max_reqs_per_nfsd, 0, "Maximum requests that "
  	    "single nfsd thread should be working on at any time");
  
 -	SYSCTL_ADD_OID(&fha_clist, SYSCTL_STATIC_CHILDREN(_vfs_nfsrv_fha), 
 +	SYSCTL_ADD_OID(&fha_clist, SYSCTL_STATIC_CHILDREN(_vfs_nfsrv_fha),
  	    OID_AUTO, "fhe_stats", CTLTYPE_STRING | CTLFLAG_RD, 0, 0,
  	    fhe_stats_sysctl, "A", "");
  }
 @@ -151,7 +152,7 @@ nfs_fha_uninit(void *foo)
  SYSINIT(nfs_fha, SI_SUB_ROOT_CONF, SI_ORDER_ANY, nfs_fha_init, NULL);
  SYSUNINIT(nfs_fha, SI_SUB_ROOT_CONF, SI_ORDER_ANY, nfs_fha_uninit, NULL);
  
 -/* 
 +/*
   * This just specifies that offsets should obey affinity when within
   * the same 1Mbyte (1<<20) chunk for the file (reads only for now).
   */
 @@ -167,18 +168,18 @@ fha_extract_info(struct svc_req *req, st
  	u_int32_t *tl;
  	rpcproc_t procnum;
  
 -	/* 
 -	 * We start off with a random fh. If we get a reasonable
 -	 * procnum, we set the fh. If there's a concept of offset 
 +	/*
 +	 * We start off with a random fh.  If we get a reasonable
 +	 * procnum, we set the fh.  If there's a concept of offset
  	 * that we're interested in, we set that.
  	 */
  	i->fh = ++random_fh;
  	i->offset = 0;
  	i->locktype = LK_EXCLUSIVE;
 -	
 +
  	/*
  	 * Extract the procnum and convert to v3 form if necessary,
 -	 * taking care to deal with out-of-range procnums. Caller will
 +	 * taking care to deal with out-of-range procnums.  Caller will
  	 * ensure that rq_vers is either 2 or 3.
  	 */
  	procnum = req->rq_proc;
 @@ -188,19 +189,19 @@ fha_extract_info(struct svc_req *req, st
  		procnum = nfsrv_nfsv3_procid[procnum];
  	}
  
 -	/* 
 -	 * We do affinity for most. However, we divide a realm of affinity 
 -	 * by file offset so as to allow for concurrent random access. We 
 -	 * only do this for reads today, but this may change when IFS supports 
 +	/*
 +	 * We do affinity for most.  However, we divide a realm of affinity
 +	 * by file offset so as to allow for concurrent random access.  We
 +	 * only do this for reads today, but this may change when IFS supports
  	 * efficient concurrent writes.
  	 */
  	if (procnum == NFSPROC_FSSTAT ||
  	    procnum == NFSPROC_FSINFO ||
  	    procnum == NFSPROC_PATHCONF ||
 -	    procnum == NFSPROC_NOOP || 
 +	    procnum == NFSPROC_NOOP ||
  	    procnum == NFSPROC_NULL)
  		goto out;
 -	
 +
  	nfs_realign(&req->rq_args);
  	md = req->rq_args;
  	dpos = mtod(md, caddr_t);
 @@ -270,8 +271,8 @@ fha_hash_entry_new(u_int64_t fh)
  	e->num_writes = 0;
  	e->num_threads = 0;
  	LIST_INIT(&e->threads);
 -	
 -	return e;
 +
 +	return (e);
  }
  
  static void
 @@ -296,10 +297,9 @@ fha_hash_entry_lookup(SVCPOOL *pool, u_i
  {
  	struct fha_hash_entry *fhe, *new_fhe;
  
 -	LIST_FOREACH(fhe, &g_fha.hashtable[fh % g_fha.hashmask], link) {
 +	LIST_FOREACH(fhe, &g_fha.hashtable[fh % g_fha.hashmask], link)
  		if (fhe->fh == fh)
  			break;
 -	}
  
  	if (!fhe) {
  		/* Allocate a new entry. */
 @@ -308,25 +308,24 @@ fha_hash_entry_lookup(SVCPOOL *pool, u_i
  		mtx_lock(&pool->sp_lock);
  
  		/* Double-check to make sure we still need the new entry. */
 -		LIST_FOREACH(fhe, &g_fha.hashtable[fh % g_fha.hashmask], link) {
 +		LIST_FOREACH(fhe, &g_fha.hashtable[fh % g_fha.hashmask], link)
  			if (fhe->fh == fh)
  				break;
 -		}
  		if (!fhe) {
  			fhe = new_fhe;
  			LIST_INSERT_HEAD(&g_fha.hashtable[fh % g_fha.hashmask],
  			    fhe, link);
 -		} else {
 +		} else
  			fha_hash_entry_destroy(new_fhe);
 -		}
  	}
  
 -	return fhe;
 +	return (fhe);
  }
  
  static void
  fha_hash_entry_add_thread(struct fha_hash_entry *fhe, SVCTHREAD *thread)
  {
 +
  	LIST_INSERT_HEAD(&fhe->threads, thread, st_alink);
  	fhe->num_threads++;
  }
 @@ -339,7 +338,7 @@ fha_hash_entry_remove_thread(struct fha_
  	fhe->num_threads--;
  }
  
 -/* 
 +/*
   * Account for an ongoing operation associated with this file.
   */
  static void
 @@ -365,7 +364,7 @@ get_idle_thread(SVCPOOL *pool)
  }
  
  
 -/* 
 +/*
   * Get the service thread currently associated with the fhe that is
   * appropriate to handle this operation.
   */
 @@ -387,15 +386,15 @@ fha_hash_entry_choose_thread(SVCPOOL *po
  		/* If there are any writes in progress, use the first thread. */
  		if (fhe->num_writes) {
  #if 0
 -			ITRACE_CURPROC(ITRACE_NFS, ITRACE_INFO, 
 +			ITRACE_CURPROC(ITRACE_NFS, ITRACE_INFO,
  			    "fha: %p(%d)w", thread, req_count);
  #endif
  			return (thread);
  		}
  
 -		/* 
 -		 * Check for read locality, making sure that we won't 
 -		 * exceed our per-thread load limit in the process. 
 +		/*
 +		 * Check for read locality, making sure that we won't
 +		 * exceed our per-thread load limit in the process.
  		 */
  		offset1 = i->offset >> fha_ctls.bin_shift;
  		offset2 = STAILQ_FIRST(&thread->st_reqs)->rq_p3
 @@ -404,21 +403,21 @@ fha_hash_entry_choose_thread(SVCPOOL *po
  			if ((fha_ctls.max_reqs_per_nfsd == 0) ||
  			    (req_count < fha_ctls.max_reqs_per_nfsd)) {
  #if 0
 -				ITRACE_CURPROC(ITRACE_NFS, ITRACE_INFO, 
 +				ITRACE_CURPROC(ITRACE_NFS, ITRACE_INFO,
  				    "fha: %p(%d)r", thread, req_count);
  #endif
  				return (thread);
  			}
  		}
  
 -		/* 
 +		/*
  		 * We don't have a locality match, so skip this thread,
 -		 * but keep track of the most attractive thread in case 
 +		 * but keep track of the most attractive thread in case
  		 * we need to come back to it later.
  		 */
  #if 0
 -		ITRACE_CURPROC(ITRACE_NFS, ITRACE_INFO, 
 -		    "fha: %p(%d)s off1 %llu off2 %llu", thread, 
 +		ITRACE_CURPROC(ITRACE_NFS, ITRACE_INFO,
 +		    "fha: %p(%d)s off1 %llu off2 %llu", thread,
  		    req_count, offset1, offset2);
  #endif
  		if ((min_thread == NULL) || (req_count < min_count)) {
 @@ -427,38 +426,38 @@ fha_hash_entry_choose_thread(SVCPOOL *po
  		}
  	}
  
 -	/* 
 -	 * We didn't find a good match yet. See if we can add 
 +	/*
 +	 * We didn't find a good match yet.  See if we can add
  	 * a new thread to this file handle entry's thread list.
  	 */
 -	if ((fha_ctls.max_nfsds_per_fh == 0) || 
 +	if ((fha_ctls.max_nfsds_per_fh == 0) ||
  	    (fhe->num_threads < fha_ctls.max_nfsds_per_fh)) {
 -		/* 
 -		 * We can add a new thread, so try for an idle thread 
 -		 * first, and fall back to this_thread if none are idle. 
 +		/*
 +		 * We can add a new thread, so try for an idle thread
 +		 * first, and fall back to this_thread if none are idle.
  		 */
  		if (STAILQ_EMPTY(&this_thread->st_reqs)) {
  			thread = this_thread;
  #if 0
 -			ITRACE_CURPROC(ITRACE_NFS, ITRACE_INFO, 
 +			ITRACE_CURPROC(ITRACE_NFS, ITRACE_INFO,
  			    "fha: %p(%d)t", thread, thread->st_reqcount);
  #endif
  		} else if ((thread = get_idle_thread(pool))) {
  #if 0
 -			ITRACE_CURPROC(ITRACE_NFS, ITRACE_INFO, 
 +			ITRACE_CURPROC(ITRACE_NFS, ITRACE_INFO,
  			    "fha: %p(%d)i", thread, thread->st_reqcount);
  #endif
 -		} else { 
 +		} else {
  			thread = this_thread;
  #if 0
 -			ITRACE_CURPROC(ITRACE_NFS, ITRACE_INFO, 
 +			ITRACE_CURPROC(ITRACE_NFS, ITRACE_INFO,
  			    "fha: %p(%d)b", thread, thread->st_reqcount);
  #endif
  		}
  		fha_hash_entry_add_thread(fhe, thread);
  	} else {
 -		/* 
 -		 * We don't want to use any more threads for this file, so 
 +		/*
 +		 * We don't want to use any more threads for this file, so
  		 * go back to the most attractive nfsd we're already using.
  		 */
  		thread = min_thread;
 @@ -467,8 +466,8 @@ fha_hash_entry_choose_thread(SVCPOOL *po
  	return (thread);
  }
  
 -/* 
 - * After getting a request, try to assign it to some thread. Usually we
 +/*
 + * After getting a request, try to assign it to some thread.  Usually we
   * handle it ourselves.
   */
  SVCTHREAD *
 @@ -491,16 +490,16 @@ fha_assign(SVCTHREAD *this_thread, struc
  	pool = req->rq_xprt->xp_pool;
  	fha_extract_info(req, &i);
  
 -	/* 
 -	 * We save the offset associated with this request for later 
 +	/*
 +	 * We save the offset associated with this request for later
  	 * nfsd matching.
  	 */
  	fhe = fha_hash_entry_lookup(pool, i.fh);
  	req->rq_p1 = fhe;
  	req->rq_p2 = i.locktype;
  	req->rq_p3 = i.offset;
 -	
 -	/* 
 +
 +	/*
  	 * Choose a thread, taking into consideration locality, thread load,
  	 * and the number of threads already working on this file.
  	 */
 @@ -511,8 +510,8 @@ fha_assign(SVCTHREAD *this_thread, struc
  	return (thread);
  }
  
 -/* 
 - * Called when we're done with an operation. The request has already
 +/*
 + * Called when we're done with an operation.  The request has already
   * been de-queued.
   */
  void
 
 Modified: head/sys/nfsserver/nfs_srvkrpc.c
 ==============================================================================
 --- head/sys/nfsserver/nfs_srvkrpc.c	Sat Jan  9 15:43:47 2010	(r201898)
 +++ head/sys/nfsserver/nfs_srvkrpc.c	Sat Jan  9 15:59:15 2010	(r201899)
 @@ -187,19 +187,18 @@ nfssvc_nfsserver(struct thread *td, stru
  		}
  		error = nfssvc_addsock(fp, td);
  		fdrop(fp, td);
 -	} else if (uap->flag & NFSSVC_OLDNFSD) {
 +	} else if (uap->flag & NFSSVC_OLDNFSD)
  		error = nfssvc_nfsd(td, NULL);
 -	} else if (uap->flag & NFSSVC_NFSD) {
 -		if (!uap->argp) 
 +	else if (uap->flag & NFSSVC_NFSD) {
 +		if (!uap->argp)
  			return (EINVAL);
  		error = copyin(uap->argp, (caddr_t)&nfsdarg,
  		    sizeof(nfsdarg));
  		if (error)
  			return (error);
  		error = nfssvc_nfsd(td, &nfsdarg);
 -	} else {
 +	} else
  		error = ENXIO;
 -	}
  	return (error);
  }
  
 @@ -447,9 +446,8 @@ nfssvc_addsock(struct file *fp, struct t
  
  	siz = sb_max_adj;
  	error = soreserve(so, siz, siz);
 -	if (error) {
 +	if (error)
  		return (error);
 -	}
  
  	/*
  	 * Steal the socket from userland so that it doesn't close
 @@ -471,7 +469,7 @@ nfssvc_addsock(struct file *fp, struct t
  }
  
  /*
 - * Called by nfssvc() for nfsds. Just loops around servicing rpc requests
 + * Called by nfssvc() for nfsds.  Just loops around servicing rpc requests
   * until it is killed by a signal.
   */
  static int
 @@ -496,9 +494,9 @@ nfssvc_nfsd(struct thread *td, struct nf
  #endif
  
  	/*
 -	 * Only the first nfsd actually does any work. The RPC code
 -	 * adds threads to it as needed. Any extra processes offered
 -	 * by nfsd just exit. If nfsd is new enough, it will call us
 +	 * Only the first nfsd actually does any work.  The RPC code
 +	 * adds threads to it as needed.  Any extra processes offered
 +	 * by nfsd just exit.  If nfsd is new enough, it will call us
  	 * once with a structure that specifies how many threads to
  	 * use.
  	 */
 @@ -522,7 +520,7 @@ nfssvc_nfsd(struct thread *td, struct nf
  			nfsrv_pool->sp_minthreads = 4;
  			nfsrv_pool->sp_maxthreads = 4;
  		}
 -			
 +
  		svc_run(nfsrv_pool);
  
  #ifdef KGSSAPI
 @@ -541,7 +539,7 @@ nfssvc_nfsd(struct thread *td, struct nf
  
  /*
   * Size the NFS server's duplicate request cache at 1/2 the
 - * nmbclusters, floating within a (64, 2048) range. This is to
 + * nmbclusters, floating within a (64, 2048) range.  This is to
   * prevent all mbuf clusters being tied up in the NFS dupreq
   * cache for small values of nmbclusters.
   */
 _______________________________________________
 svn-src-all at freebsd.org mailing list
 http://lists.freebsd.org/mailman/listinfo/svn-src-all
 To unsubscribe, send any mail to "svn-src-all-unsubscribe at freebsd.org"
 


More information about the freebsd-sparc64 mailing list