svn commit: r280279 - head/sys/sys

Konstantin Belousov kostikbel at gmail.com
Mon Apr 20 11:54:03 UTC 2015


On Mon, Apr 13, 2015 at 04:04:45PM -0400, Jung-uk Kim wrote:
> Please try the attached patch.
> 
> Jung-uk Kim
> -----BEGIN PGP SIGNATURE-----
> Version: GnuPG v2
> 
> iQEcBAEBCAAGBQJVLCFZAAoJEHyflib82/FGOp0H/1+Jr+cKUn/MnV5O5SghPw9f
> XzTM4+BV9BcWabLRjFe1LR065SfLDXqKLuU4h5lmVSlXQaxElAXxaMeyO3mrMzR4
> Sb1xr0rf+ZfUARJeEJWI65Wpn+gEH+7XxXAIAetYGMwwclBOBgbZIoDXITnCaUFa
> /pi3zQIey8EzbvlzhQcffLDV8oF4f8HNEMoSxMRtOiZNNPu/8ECnyGeHZhOd++kh
> pwZNsSbcCw3RXMheuErTpKPrJSEXgMNmWG3G00aP7L8IjcObgOqMUQt+8eT8Ge8B
> tEv40kgm2G/OG2akONh4/6bX3hyodW3IHcb6AYhqZogiDIqd/eXD4jDup/kkVxU=
> =1Ca9
> -----END PGP SIGNATURE-----

> Index: sys/amd64/amd64/pmap.c
> ===================================================================
> --- sys/amd64/amd64/pmap.c	(revision 281496)
> +++ sys/amd64/amd64/pmap.c	(working copy)
> @@ -412,7 +412,7 @@ static caddr_t crashdumpmap;
>  static void	free_pv_chunk(struct pv_chunk *pc);
>  static void	free_pv_entry(pmap_t pmap, pv_entry_t pv);
>  static pv_entry_t get_pv_entry(pmap_t pmap, struct rwlock **lockp);
> -static int	popcnt_pc_map_elem_pq(uint64_t elem);
> +static int	popcnt_pc_map(uint64_t *pc_map);
>  static vm_page_t reclaim_pv_chunk(pmap_t locked_pmap, struct rwlock **lockp);
>  static void	reserve_pv_entries(pmap_t pmap, int needed,
>  		    struct rwlock **lockp);
> @@ -2979,7 +2979,7 @@ retry:
>  }
>  
>  /*
> - * Returns the number of one bits within the given PV chunk map element.
> + * Returns the number of one bits within the given PV chunk map.
>   *
>   * The erratas for Intel processors state that "POPCNT Instruction May
>   * Take Longer to Execute Than Expected".  It is believed that the
> @@ -2994,12 +2994,21 @@ retry:
>   * 5th Gen Core: BDM85
>   */
>  static int
> -popcnt_pc_map_elem_pq(uint64_t elem)
> +popcnt_pc_map(uint64_t *pc_map)
>  {
> -	u_long result;
> +	u_long count, result;
> +	int field;
>  
> -	__asm __volatile("xorl %k0,%k0;popcntq %1,%0"
> -	    : "=&r" (result) : "rm" (elem));
> +	result = 0;
> +	if ((cpu_feature2 & CPUID2_POPCNT) != 0)
> +		for (field = 0; field < _NPCM; field++) {
> +			__asm __volatile("xorl %k0, %k0; popcntq %1, %0"
> +			    : "=r" (count) : "m" (pc_map[field]));
> +			result += count;
> +		}
> +	else
> +		for (field = 0; field < _NPCM; field++)
> +			result += bitcount64(pc_map[field]);
>  	return (result);
>  }
>  
> @@ -3031,15 +3040,7 @@ reserve_pv_entries(pmap_t pmap, int needed, struct
>  retry:
>  	avail = 0;
>  	TAILQ_FOREACH(pc, &pmap->pm_pvchunk, pc_list) {
> -		if ((cpu_feature2 & CPUID2_POPCNT) == 0) {
> -			free = bitcount64(pc->pc_map[0]);
> -			free += bitcount64(pc->pc_map[1]);
> -			free += bitcount64(pc->pc_map[2]);
> -		} else {
> -			free = popcnt_pc_map_elem_pq(pc->pc_map[0]);
> -			free += popcnt_pc_map_elem_pq(pc->pc_map[1]);
> -			free += popcnt_pc_map_elem_pq(pc->pc_map[2]);
> -		}
> +		free = popcnt_pc_map(pc->pc_map);
>  		if (free == 0)
>  			break;
>  		avail += free;

Yes, this worked for me the same way as for you, the argument is taken
directly from memory, without temporary spill.  Is this due to silly
inliner ?  Whatever the reason is, I think a comment should be added
noting the subtlety.

Otherwise, looks fine.


More information about the svn-src-all mailing list