socsvn commit: r235762 - soc2012/vbotton/ntfs_apple

vbotton at FreeBSD.org vbotton at FreeBSD.org
Mon May 14 14:04:03 UTC 2012


Author: vbotton
Date: Mon May 14 14:04:01 2012
New Revision: 235762
URL: http://svnweb.FreeBSD.org/socsvn/?view=rev&rev=235762

Log:
  Some works towards portability, essentially endianness related functions

Added:
  soc2012/vbotton/ntfs_apple/
  soc2012/vbotton/ntfs_apple/ntfs.h
  soc2012/vbotton/ntfs_apple/ntfs_attr.c
  soc2012/vbotton/ntfs_apple/ntfs_attr.h
  soc2012/vbotton/ntfs_apple/ntfs_attr_list.c
  soc2012/vbotton/ntfs_apple/ntfs_attr_list.h
  soc2012/vbotton/ntfs_apple/ntfs_bitmap.c
  soc2012/vbotton/ntfs_apple/ntfs_bitmap.h
  soc2012/vbotton/ntfs_apple/ntfs_collate.c
  soc2012/vbotton/ntfs_apple/ntfs_collate.h
  soc2012/vbotton/ntfs_apple/ntfs_compress.c
  soc2012/vbotton/ntfs_apple/ntfs_compress.h
  soc2012/vbotton/ntfs_apple/ntfs_debug.c
  soc2012/vbotton/ntfs_apple/ntfs_debug.h
  soc2012/vbotton/ntfs_apple/ntfs_dir.c
  soc2012/vbotton/ntfs_apple/ntfs_dir.h
  soc2012/vbotton/ntfs_apple/ntfs_endian.h
  soc2012/vbotton/ntfs_apple/ntfs_hash.c
  soc2012/vbotton/ntfs_apple/ntfs_hash.h
  soc2012/vbotton/ntfs_apple/ntfs_index.c
  soc2012/vbotton/ntfs_apple/ntfs_index.h
  soc2012/vbotton/ntfs_apple/ntfs_inode.c
  soc2012/vbotton/ntfs_apple/ntfs_inode.h
  soc2012/vbotton/ntfs_apple/ntfs_layout.h
  soc2012/vbotton/ntfs_apple/ntfs_lcnalloc.c
  soc2012/vbotton/ntfs_apple/ntfs_lcnalloc.h
  soc2012/vbotton/ntfs_apple/ntfs_logfile.c
  soc2012/vbotton/ntfs_apple/ntfs_logfile.h
  soc2012/vbotton/ntfs_apple/ntfs_mft.c
  soc2012/vbotton/ntfs_apple/ntfs_mft.h
  soc2012/vbotton/ntfs_apple/ntfs_mst.c
  soc2012/vbotton/ntfs_apple/ntfs_mst.h
  soc2012/vbotton/ntfs_apple/ntfs_page.c
  soc2012/vbotton/ntfs_apple/ntfs_page.h
  soc2012/vbotton/ntfs_apple/ntfs_quota.c
  soc2012/vbotton/ntfs_apple/ntfs_quota.h
  soc2012/vbotton/ntfs_apple/ntfs_runlist.c
  soc2012/vbotton/ntfs_apple/ntfs_runlist.h
  soc2012/vbotton/ntfs_apple/ntfs_secure.c
  soc2012/vbotton/ntfs_apple/ntfs_secure.h
  soc2012/vbotton/ntfs_apple/ntfs_sfm.c
  soc2012/vbotton/ntfs_apple/ntfs_sfm.h
  soc2012/vbotton/ntfs_apple/ntfs_time.h
  soc2012/vbotton/ntfs_apple/ntfs_types.h
  soc2012/vbotton/ntfs_apple/ntfs_unistr.c
  soc2012/vbotton/ntfs_apple/ntfs_unistr.h
  soc2012/vbotton/ntfs_apple/ntfs_usnjrnl.c
  soc2012/vbotton/ntfs_apple/ntfs_usnjrnl.h
  soc2012/vbotton/ntfs_apple/ntfs_vfsops.c
  soc2012/vbotton/ntfs_apple/ntfs_vnops.c
  soc2012/vbotton/ntfs_apple/ntfs_vnops.h
  soc2012/vbotton/ntfs_apple/ntfs_volume.h

Added: soc2012/vbotton/ntfs_apple/ntfs.h
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ soc2012/vbotton/ntfs_apple/ntfs.h	Mon May 14 14:04:01 2012	(r235762)
@@ -0,0 +1,167 @@
+/*
+ * ntfs.h - Some generic defines for the NTFS kernel driver.
+ *
+ * Copyright (c) 2006-2008 Anton Altaparmakov.  All Rights Reserved.
+ * Portions Copyright (c) 2006-2008 Apple Inc.  All Rights Reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer. 
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution. 
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of its
+ *    contributors may be used to endorse or promote products derived from this
+ *    software without specific prior written permission. 
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * ALTERNATIVELY, provided that this notice and licensing terms are retained in
+ * full, this file may be redistributed and/or modified under the terms of the
+ * GNU General Public License (GPL) Version 2, in which case the provisions of
+ * that version of the GPL will apply to you instead of the license terms
+ * above.  You can obtain a copy of the GPL Version 2 at
+ * http://developer.apple.com/opensource/licenses/gpl-2.txt.
+ */
+
+#ifndef _OSX_NTFS_H
+#define _OSX_NTFS_H
+
+#ifdef KERNEL
+
+#include <sys/mount.h>
+#include <kern/locks.h>
+
+/* The email address of the NTFS developers. */
+__private_extern__ const char ntfs_dev_email[];
+__private_extern__ const char ntfs_please_email[];
+
+/*
+ * Lock group and lock attribute for de-/initialization of locks (defined
+ * in ntfs_vfsops.c).
+ */
+__private_extern__ lck_grp_t *ntfs_lock_grp;
+__private_extern__ lck_attr_t *ntfs_lock_attr;
+
+/*
+ * A tag for allocation and freeing of memory (defined in ntfs_vfsops.c).
+ */
+__private_extern__ OSMallocTag ntfs_malloc_tag;
+
+#include "ntfs_volume.h"
+
+/**
+ * NTFS_MP - return the NTFS volume given a vfs mount
+ * @mp:		VFS mount
+ *
+ * NTFS_MP() returns the NTFS volume associated with the VFS mount @mp.
+ */
+static inline ntfs_volume *NTFS_MP(mount_t mp)
+{
+	return (ntfs_volume*)vfs_fsprivate(mp);
+}
+
+__private_extern__ void ntfs_do_postponed_release(ntfs_volume *vol);
+
+#endif /* KERNEL */
+
+#include "ntfs_endian.h"
+#include "ntfs_types.h"
+
+/* Some useful constants to do with NTFS. */
+enum {
+	NTFS_BLOCK_SIZE		= 512,
+	NTFS_BLOCK_SIZE_SHIFT	= 9,
+	NTFS_MAX_NAME_LEN	= 255,
+	NTFS_MAX_ATTR_NAME_LEN	= 255,
+	NTFS_MAX_SECTOR_SIZE	= 4096,		/* 4kiB */
+	NTFS_MAX_CLUSTER_SIZE	= 64 * 1024,	/* 64kiB */
+	NTFS_ALLOC_BLOCK	= 1024,
+	NTFS_MAX_HARD_LINKS	= 65535,	/* 2^16 - 1 */
+	NTFS_MAX_ATTR_LIST_SIZE	= 256 * 1024,	/* 256kiB, corresponding to the
+						   VACB_MAPPING_GRANULARITY on
+						   Windows. */
+	NTFS_COMPRESSION_UNIT	= 4,
+};
+
+/*
+ * The maximum attribute size on NTFS is 2^63 - 1 bytes as it is stored in a
+ * signed 64 bit type (s64).
+ */
+#define NTFS_MAX_ATTRIBUTE_SIZE 0x7fffffffffffffffULL
+
+/*
+ * The maximum number of MFT records allowed on NTFS is 2^32 as described in
+ * various documentation to be found on the Microsoft web site.  This is an
+ * imposed limit rather than an inherent NTFS format limit.
+ */
+#define NTFS_MAX_NR_MFT_RECORDS 0x100000000ULL
+
+// TODO: Constants so ntfs_vfsops.c compiles for now...
+enum {
+	/* One of these must be present, default is ON_ERRORS_CONTINUE. */
+	ON_ERRORS_PANIC		= 0x01,
+	ON_ERRORS_REMOUNT_RO	= 0x02,
+	ON_ERRORS_CONTINUE	= 0x04,
+	/* Optional, can be combined with any of the above. */
+	ON_ERRORS_RECOVER	= 0x10,
+};
+
+/*
+ * The NTFS mount options header passed in from user space.
+ */
+typedef struct {
+#ifndef KERNEL
+	char *fspec;	/* Path of device to mount, consumed by mount(2). */
+#endif /* !KERNEL */
+	u8 major_ver;	/* The major version of the mount options structure. */
+	u8 minor_ver;	/* The minor version of the mount options structure. */
+} __attribute__((__packed__)) ntfs_mount_options_header;
+
+/*
+ * The NTFS mount options passed in from user space.  This follows the
+ * ntfs_mount_options_header aligned to an eight byte boundary.
+ *
+ * This is major version 0, minor version 0, which does not have any options,
+ * i.e. is empty.
+ */
+typedef struct {
+	/* Mount options version 0.0 does not have any ntfs options. */
+} __attribute__((__packed__)) ntfs_mount_options_0_0;
+
+/*
+ * The currently defined flags for the ntfs mount options structure.
+ */
+enum {
+	/* Below flag(s) appeared in mount options version 1.0. */
+	NTFS_MNT_OPT_CASE_SENSITIVE = htole32(0x00000001),
+	/* Below flag(s) appeared in mount options version x.y. */
+	// TODO: Add NTFS specific mount options flags here.
+};
+
+typedef le32 NTFS_MNT_OPTS;
+
+/*
+ * The NTFS mount options passed in from user space.  This follows the
+ * ntfs_mount_options_header aligned to an eight byte boundary.
+ *
+ * This is major version 1, minor version 0, which has only one option, a
+ * little endian, 32-bit flags option.
+ */
+typedef struct {
+	NTFS_MNT_OPTS flags;
+	// TODO: Add NTFS specific mount options here.
+} __attribute__((__packed__)) ntfs_mount_options_1_0;
+
+#endif /* !_OSX_NTFS_H */

Added: soc2012/vbotton/ntfs_apple/ntfs_attr.c
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ soc2012/vbotton/ntfs_apple/ntfs_attr.c	Mon May 14 14:04:01 2012	(r235762)
@@ -0,0 +1,9096 @@
+/*
+ * ntfs_attr.c - NTFS kernel attribute operations.
+ *
+ * Copyright (c) 2006-2011 Anton Altaparmakov.  All Rights Reserved.
+ * Portions Copyright (c) 2006-2011 Apple Inc.  All Rights Reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer. 
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution. 
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of its
+ *    contributors may be used to endorse or promote products derived from this
+ *    software without specific prior written permission. 
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * ALTERNATIVELY, provided that this notice and licensing terms are retained in
+ * full, this file may be redistributed and/or modified under the terms of the
+ * GNU General Public License (GPL) Version 2, in which case the provisions of
+ * that version of the GPL will apply to you instead of the license terms
+ * above.  You can obtain a copy of the GPL Version 2 at
+ * http://developer.apple.com/opensource/licenses/gpl-2.txt.
+ */
+
+#include <sys/errno.h>
+#include <sys/stat.h>
+#include <sys/ucred.h>
+/*#include <sys/ubc.h>*/
+
+#include <string.h>
+
+/*#include <libkern/libkern.h>
+#include <libkern/OSMalloc.h>*/
+
+//#include <kern/debug.h>
+//#include <kern/sched_prim.h>
+
+#include "ntfs.h"
+#include "ntfs_attr.h"
+#include "ntfs_attr_list.h"
+#include "ntfs_debug.h"
+#include "ntfs_dir.h"
+#include "ntfs_endian.h"
+#include "ntfs_index.h"
+#include "ntfs_inode.h"
+#include "ntfs_layout.h"
+#include "ntfs_lcnalloc.h"
+#include "ntfs_mft.h"
+#include "ntfs_page.h"
+#include "ntfs_runlist.h"
+#include "ntfs_time.h"
+#include "ntfs_types.h"
+#include "ntfs_unistr.h"
+
+ntfschar AT_UNNAMED[1] = { 0 };
+
+/**
+ * ntfs_attr_map_runlist - map the whole runlist of an ntfs inode
+ * @ni:		ntfs inode for which to map the whole runlist
+ *
+ * Map the whole runlist of the ntfs inode @ni.
+ *
+ * Return 0 on success and errno on error.
+ *
+ * Note this function requires the runlist not to be mapped yet at all.  This
+ * limitation is ok because we only use this function at mount time to map the
+ * runlist of some system files thus we are guaranteed that they will not have
+ * any runlist fragments mapped yet.
+ *
+ * Note the runlist can be NULL after this function returns if the attribute
+ * has zero allocated size, i.e. there simply is no runlist.
+ */
+errno_t ntfs_attr_map_runlist(ntfs_inode *ni)
+{
+	VCN vcn, end_vcn;
+	ntfs_inode *base_ni;
+	MFT_RECORD *m;
+	ntfs_attr_search_ctx *ctx;
+	ATTR_RECORD *a;
+	errno_t err = 0;
+
+	ntfs_debug("Entering for mft_no 0x%llx, type 0x%x.",
+			(unsigned long long)ni->mft_no,
+			(unsigned)le32_to_cpu(ni->type));
+	/* If the attribute is resident there is nothing to do. */
+	if (!NInoNonResident(ni)) {
+		ntfs_debug("Done (resident, nothing to do).");
+		return 0;
+	}
+	lck_rw_lock_exclusive(&ni->rl.lock);
+	/* Verify that the runlist is not mapped yet. */
+	if (ni->rl.alloc && ni->rl.elements)
+		panic("%s(): ni->rl.alloc && ni->rl.elements\n", __FUNCTION__);
+	base_ni = ni;
+	if (NInoAttr(ni))
+		base_ni = ni->base_ni;
+	err = ntfs_mft_record_map(base_ni, &m);
+	if (err)
+		goto err;
+	ctx = ntfs_attr_search_ctx_get(base_ni, m);
+	if (!ctx) {
+		err = ENOMEM;
+		goto unm_err;
+	}
+	vcn = 0;
+	end_vcn = ni->allocated_size >> ni->vol->cluster_size_shift;
+	do {
+		err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len, vcn,
+				NULL, 0, ctx);
+		if (err) {
+			if (err == ENOENT)
+				err = EIO;
+			break;
+		}
+		a = ctx->a;
+		if (!a->non_resident) {
+corrupt_err:
+			ntfs_error(ni->vol->mp, "Inode 0x%llx contains corrupt "
+					"attribute extent, run chkdsk.",
+					(unsigned long long)base_ni->mft_no);
+			NVolSetErrors(ni->vol);
+			err = EIO;
+			break;
+		}
+		/*
+		 * If we are in the first attribute extent, verify the cached
+		 * allocated size is correct.
+		 */
+		if (!a->lowest_vcn)
+			if (sle64_to_cpu(a->allocated_size) !=
+					ni->allocated_size)
+				panic("%s(): sle64_to_cpu(a->allocated_size) "
+						"!= ni->allocated_size\n",
+						__FUNCTION__);
+		/*
+		 * Sanity check the lowest_vcn of the attribute is equal to the
+		 * vcn we looked up and that the highest_vcn of the attribute
+		 * is above the current vcn.
+		 */
+		if (sle64_to_cpu(a->lowest_vcn) != vcn || (vcn &&
+				sle64_to_cpu(a->highest_vcn) < vcn))
+			goto corrupt_err;
+		/* Determine the next vcn. */
+		vcn = sle64_to_cpu(a->highest_vcn) + 1;
+		/*
+		 * Finally, map the runlist fragment contained in this
+		 * attribute extent.
+		 */
+		err = ntfs_mapping_pairs_decompress(ni->vol, a, &ni->rl);
+	} while (!err && vcn < end_vcn);
+unm_err:
+	ntfs_attr_search_ctx_put(ctx);
+	ntfs_mft_record_unmap(base_ni);
+err:
+	lck_rw_unlock_exclusive(&ni->rl.lock);
+	if (!err)
+		ntfs_debug("Done.");
+	else
+		ntfs_error(ni->vol->mp, "Failed (error %d).", (int)err);
+	return err;
+}
+
+/**
+ * ntfs_map_runlist_nolock - map (a part of) a runlist of an ntfs inode
+ * @ni:		ntfs inode for which to map (part of) a runlist
+ * @vcn:	map runlist part containing this vcn
+ * @ctx:	active attribute search context if present or NULL if not
+ *
+ * Map the part of a runlist containing the @vcn of the ntfs inode @ni.
+ *
+ * If @ctx is specified, it is an active search context of @ni and its base mft
+ * record.  This is needed when ntfs_map_runlist_nolock() encounters unmapped
+ * runlist fragments and allows their mapping.  If you do not have the mft
+ * record mapped, you can specify @ctx as NULL and ntfs_map_runlist_nolock()
+ * will perform the necessary mapping and unmapping.
+ *
+ * Note, ntfs_map_runlist_nolock() saves the state of @ctx on entry and
+ * restores it before returning.  Thus, @ctx will be left pointing to the same
+ * attribute on return as on entry.  However, the actual pointers in @ctx may
+ * point to different memory locations on return, so you must remember to reset
+ * any cached pointers from the @ctx, i.e. after the call to
+ * ntfs_map_runlist_nolock(), you will probably want to do:
+ *	m = ctx->m;
+ *	a = ctx->a;
+ * Assuming you cache ctx->a in a variable @a of type ATTR_RECORD * and that
+ * you cache ctx->m in a variable @m of type MFT_RECORD *.
+ *
+ * Return 0 on success and errno on error.  There is one special error code
+ * which is not an error as such.  This is ENOENT.  It means that @vcn is out
+ * of bounds of the runlist.
+ *
+ * Note the runlist can be NULL after this function returns if @vcn is zero and
+ * the attribute has zero allocated size, i.e. there simply is no runlist.
+ *
+ * WARNING: If @ctx is supplied, regardless of whether success or failure is
+ *	    returned, you need to check @ctx->is_error and if 1 the @ctx is no
+ *	    longer valid, i.e. you need to either call
+ *	    ntfs_attr_search_ctx_reinit() or ntfs_attr_search_ctx_put() on it.
+ *	    In that case @ctx->error will give you the error code for why the
+ *	    mapping of the old inode failed.
+ *	    Also if @ctx is supplied and the current attribute (or the mft
+ *	    record it is in) has been modified then the caller must call
+ *	    NInoSetMrecNeedsDirtying(ctx->ni); before calling
+ *	    ntfs_map_runlist_nolock() or the changes may be lost.
+ *
+ * Locking: - The runlist described by @ni must be locked for writing on entry
+ *	      and is locked on return.  Note the runlist will be modified.
+ *	    - If @ctx is NULL, the base mft record of @ni must not be mapped on
+ *	      entry and it will be left unmapped on return.
+ *	    - If @ctx is not NULL, the base mft record must be mapped on entry
+ *	      and it will be left mapped on return.
+ */
+errno_t ntfs_map_runlist_nolock(ntfs_inode *ni, VCN vcn,
+		ntfs_attr_search_ctx *ctx)
+{
+	VCN end_vcn;
+	ntfs_inode *base_ni;
+	MFT_RECORD *m;
+	ATTR_RECORD *a;
+	errno_t err = 0;
+	BOOL ctx_is_temporary, ctx_needs_reset;
+	ntfs_attr_search_ctx old_ctx = { { NULL, }, };
+
+	ntfs_debug("Entering for mft_no 0x%llx, vcn 0x%llx.",
+			(unsigned long long)ni->mft_no,
+			(unsigned long long)vcn);
+	base_ni = ni;
+	if (NInoAttr(ni))
+		base_ni = ni->base_ni;
+	if (!ctx) {
+		ctx_is_temporary = ctx_needs_reset = TRUE;
+		err = ntfs_mft_record_map(base_ni, &m);
+		if (err)
+			goto done;
+		ctx = ntfs_attr_search_ctx_get(base_ni, m);
+		if (!ctx) {
+			err = ENOMEM;
+			goto err;
+		}
+	} else {
+		VCN allocated_size_vcn;
+
+		if (ctx->is_error)
+			panic("%s(): ctx->is_error\n", __FUNCTION__);
+		a = ctx->a;
+		if (!a->non_resident)
+			panic("%s(): !a->non_resident\n", __FUNCTION__);
+		ctx_is_temporary = FALSE;
+		end_vcn = sle64_to_cpu(a->highest_vcn);
+		lck_spin_lock(&ni->size_lock);
+		allocated_size_vcn = ni->allocated_size >>
+				ni->vol->cluster_size_shift;
+		lck_spin_unlock(&ni->size_lock);
+		/*
+		 * If we already have the attribute extent containing @vcn in
+		 * @ctx, no need to look it up again.  We slightly cheat in
+		 * that if vcn exceeds the allocated size, we will refuse to
+		 * map the runlist below, so there is definitely no need to get
+		 * the right attribute extent.
+		 */
+		if (vcn >= allocated_size_vcn || (a->type == ni->type &&
+				a->name_length == ni->name_len &&
+				!bcmp((u8*)a + le16_to_cpu(a->name_offset),
+				ni->name, ni->name_len) &&
+				sle64_to_cpu(a->lowest_vcn) <= vcn &&
+				end_vcn >= vcn))
+			ctx_needs_reset = FALSE;
+		else {
+			/* Save the old search context. */
+			old_ctx = *ctx;
+			/*
+			 * Reinitialize the search context so we can lookup the
+			 * needed attribute extent.
+			 */
+			ntfs_attr_search_ctx_reinit(ctx);
+			ctx_needs_reset = TRUE;
+		}
+	}
+	if (ctx_needs_reset) {
+		err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len, vcn,
+				NULL, 0, ctx);
+		if (err) {
+			if (err == ENOENT)
+				err = EIO;
+			goto err;
+		}
+		if (!ctx->a->non_resident)
+			panic("%s(): !a->non_resident!\n", __FUNCTION__);
+	}
+	a = ctx->a;
+	/*
+	 * Only decompress the mapping pairs if @vcn is inside it.  Otherwise
+	 * we get into problems when we try to map an out of bounds vcn because
+	 * we then try to map the already mapped runlist fragment and
+	 * ntfs_mapping_pairs_decompress() fails.
+	 */
+	end_vcn = sle64_to_cpu(a->highest_vcn) + 1;
+	if (vcn && vcn >= end_vcn) {
+		err = ENOENT;
+		goto err;
+	}
+	err = ntfs_mapping_pairs_decompress(ni->vol, a, &ni->rl);
+err:
+	if (ctx_is_temporary) {
+		if (ctx)
+			ntfs_attr_search_ctx_put(ctx);
+		ntfs_mft_record_unmap(base_ni);
+	} else if (ctx_needs_reset) {
+		/*
+		 * If there is no attribute list, restoring the search context
+		 * is acomplished simply by copying the saved context back over
+		 * the caller supplied context.  If there is an attribute list,
+		 * things are more complicated as we need to deal with mapping
+		 * of mft records and resulting potential changes in pointers.
+		 */
+		if (NInoAttrList(base_ni)) {
+			/*
+			 * If the currently mapped (extent) inode is not the
+			 * one we had before, we need to unmap it and map the
+			 * old one.
+			 */
+			if (ctx->ni != old_ctx.ni) {
+				/*
+				 * If the currently mapped inode is not the
+				 * base inode, unmap it.
+				 */
+				if (ctx->base_ni && ctx->ni != ctx->base_ni) {
+					ntfs_extent_mft_record_unmap(ctx->ni);
+					ctx->m = ctx->base_m;
+					if (!ctx->m)
+						panic("%s(): !ctx->m\n",
+								__FUNCTION__);
+				}
+				/*
+				 * If the old mapped inode is not the base
+				 * inode, map it.
+				 */
+				if (old_ctx.base_ni && old_ctx.ni !=
+						old_ctx.base_ni) {
+					errno_t err2;
+retry_map:
+					err2 = ntfs_mft_record_map(old_ctx.ni,
+							&ctx->m);
+					/*
+					 * Something bad has happened.  If out
+					 * of memory retry till it succeeds.
+					 * Any other errors are fatal and we
+					 * return the error code in ctx->m.
+					 * Let the caller deal with it...  We
+					 * just need to fudge things so the
+					 * caller can reinit and/or put the
+					 * search context safely.
+					 */
+					if (err2) {
+						if (err2 == ENOMEM) {
+							(void)thread_block(
+							THREAD_CONTINUE_NULL);
+							goto retry_map;
+						}
+						ctx->is_error = 1;
+						ctx->error = err2;
+						old_ctx.ni = old_ctx.base_ni;
+					}
+				}
+			}
+			if (ctx->is_error) {
+				old_ctx.is_error = 1;
+				old_ctx.error = ctx->error;
+			} else if (ctx->m != old_ctx.m) {
+				/*
+				 * Update the changed pointers in the saved
+				 * context.
+				 */
+				old_ctx.a = (ATTR_RECORD*)((u8*)ctx->m +
+						((u8*)old_ctx.a -
+						(u8*)old_ctx.m));
+				old_ctx.m = ctx->m;
+			}
+		}
+		/* Restore the search context to the saved one. */
+		*ctx = old_ctx;
+	}
+done:
+	ntfs_debug("Done (error %d).", (int)err);
+	return err;
+}
+
+/**
+ * ntfs_attr_vcn_to_lcn_nolock - convert a vcn into a lcn given an ntfs inode
+ * @ni:			ntfs inode of the attribute whose runlist to search
+ * @vcn:		vcn to convert
+ * @write_locked:	true if the runlist is locked for writing
+ * @clusters:		optional destination for number of contiguous clusters
+ *
+ * Find the virtual cluster number @vcn in the runlist of the ntfs attribute
+ * described by the ntfs inode @ni and return the corresponding logical cluster
+ * number (lcn).
+ *
+ * If the @vcn is not mapped yet, the attempt is made to map the attribute
+ * extent containing the @vcn and the vcn to lcn conversion is retried.
+ *
+ * If @write_locked is true the caller has locked the runlist for writing and
+ * if false for reading.
+ *
+ * If @clusters is not NULL, on success (i.e. we return >= LCN_HOLE) we return
+ * the number of contiguous clusters after the returned lcn in *@clusters.
+ *
+ * Since lcns must be >= 0, we use negative return codes with special meaning:
+ *
+ * Return code	Meaning / Description
+ * ==========================================
+ *  LCN_HOLE	Hole / not allocated on disk.
+ *  LCN_ENOENT	There is no such vcn in the runlist, i.e. @vcn is out of bounds.
+ *  LCN_ENOMEM	Not enough memory to map runlist.
+ *  LCN_EIO	Critical error (runlist/file is corrupt, i/o error, etc).
+ *
+ * Locking: - The runlist must be locked on entry and is left locked on return.
+ *	    - If @write_locked is FALSE, i.e. the runlist is locked for reading,
+ *	      the lock may be dropped inside the function so you cannot rely on
+ *	      the runlist still being the same when this function returns.
+ */
+LCN ntfs_attr_vcn_to_lcn_nolock(ntfs_inode *ni, const VCN vcn,
+		const BOOL write_locked, s64 *clusters)
+{
+	LCN lcn;
+	BOOL need_lock_switch = FALSE;
+	BOOL is_retry = FALSE;
+
+	ntfs_debug("Entering for mft_no 0x%llx, vcn 0x%llx, %s_locked.",
+			(unsigned long long)ni->mft_no,
+			(unsigned long long)vcn,
+			write_locked ? "write" : "read");
+	if (!NInoNonResident(ni))
+		panic("%s(): !NInoNonResident(ni)\n", __FUNCTION__);
+	if (vcn < 0)
+		panic("%s(): vcn < 0\n", __FUNCTION__);
+retry_remap:
+	if (!ni->rl.elements) {
+		lck_spin_lock(&ni->size_lock);
+		if (!ni->allocated_size) {
+			lck_spin_unlock(&ni->size_lock);
+			lcn = LCN_ENOENT;
+			goto lcn_enoent;
+		}
+		lck_spin_unlock(&ni->size_lock);
+		if (!is_retry)
+			goto try_to_map;
+		lcn = LCN_EIO;
+		goto lcn_eio;
+	}
+	/* Convert vcn to lcn.  If that fails map the runlist and retry once. */
+	lcn = ntfs_rl_vcn_to_lcn(ni->rl.rl, vcn, clusters);
+	if (lcn >= LCN_HOLE) {
+		if (need_lock_switch)
+			lck_rw_lock_exclusive_to_shared(&ni->rl.lock);
+		ntfs_debug("Done (lcn 0x%llx, clusters 0x%llx).",
+				(unsigned long long)lcn,
+				clusters ? (unsigned long long)*clusters : 0);
+		return lcn;
+	}
+	if (lcn != LCN_RL_NOT_MAPPED) {
+		if (lcn != LCN_ENOENT)
+			lcn = LCN_EIO;
+	} else if (!is_retry) {
+		errno_t err;
+
+try_to_map:
+		if (!write_locked && !need_lock_switch) {
+			need_lock_switch = TRUE;
+			/*
+			 * If converting the lock from shared to exclusive
+			 * fails, need to take the lock for writing and retry
+			 * in case the racing process did the mapping for us.
+			 */
+			if (!lck_rw_lock_shared_to_exclusive(&ni->rl.lock)) {
+				lck_rw_lock_exclusive(&ni->rl.lock);
+				goto retry_remap;
+			}
+		}
+		err = ntfs_map_runlist_nolock(ni, vcn, NULL);
+		if (!err) {
+			is_retry = TRUE;
+			goto retry_remap;
+		}
+		switch (err) {
+		case ENOENT:
+			lcn = LCN_ENOENT;
+			break;
+		case ENOMEM:
+			lcn = LCN_ENOMEM;
+			break;
+		default:
+			lcn = LCN_EIO;
+		}
+	}
+lcn_eio:
+	if (need_lock_switch)
+		lck_rw_lock_exclusive_to_shared(&ni->rl.lock);
+	if (lcn == LCN_ENOENT) {
+lcn_enoent:
+		ntfs_debug("Done (LCN_ENOENT).");
+	} else
+		ntfs_error(ni->vol->mp, "Failed (error %lld).", (long long)lcn);
+	return lcn;
+}
+
+/**
+ * ntfs_attr_find_vcn_nolock - find a vcn in the runlist of an ntfs inode
+ * @ni:		ntfs inode of the attribute whose runlist to search
+ * @vcn:	vcn to find
+ * @run:	return pointer for the found runlist element
+ * @ctx:	active attribute search context if present or NULL if not
+ *
+ * Find the virtual cluster number @vcn in the runlist of the ntfs attribute
+ * described by the ntfs inode @ni and return the address of the runlist
+ * element containing the @vcn in *@run.
+ *
+ * If the @vcn is not mapped yet, the attempt is made to map the attribute
+ * extent containing the @vcn and the vcn to lcn conversion is retried.
+ *
+ * If @ctx is specified, it is an active search context of @ni and its base mft
+ * record.  This is needed when ntfs_attr_find_vcn_nolock() encounters unmapped
+ * runlist fragments and allows their mapping.  If you do not have the mft
+ * record mapped, you can specify @ctx as NULL and ntfs_attr_find_vcn_nolock()
+ * will perform the necessary mapping and unmapping.
+ *
+ * Note, ntfs_attr_find_vcn_nolock() saves the state of @ctx on entry and
+ * restores it before returning.  Thus, @ctx will be left pointing to the same
+ * attribute on return as on entry.  However, the actual pointers in @ctx may
+ * point to different memory locations on return, so you must remember to reset
+ * any cached pointers from the @ctx, i.e. after the call to
+ * ntfs_attr_find_vcn_nolock(), you will probably want to do:
+ *	m = ctx->m;
+ *	a = ctx->a;
+ * Assuming you cache ctx->a in a variable @a of type ATTR_RECORD * and that
+ * you cache ctx->m in a variable @m of type MFT_RECORD *.
+ * Note you need to distinguish between the lcn of the returned runlist element
+ * being >= 0 and LCN_HOLE.  In the later case you have to return zeroes on
+ * read and allocate clusters on write.
+ *
+ * Return 0 on success and errno on error.
+ *
+ * The possible error return codes are:
+ *	ENOENT	- No such vcn in the runlist, i.e. @vcn is out of bounds.
+ *	ENOMEM	- Not enough memory to map runlist.
+ *	EIO	- Critical error (runlist/file is corrupt, i/o error, etc).
+ *
+ * WARNING: If @ctx is supplied, regardless of whether success or failure is
+ *	    returned, you need to check @ctx->is_error and if 1 the @ctx is no
+ *	    longer valid, i.e. you need to either call
+ *	    ntfs_attr_search_ctx_reinit() or ntfs_attr_search_ctx_put() on it.
+ *	    In that case @ctx->error will give you the error code for why the
+ *	    mapping of the old inode failed.
+ *	    Also if @ctx is supplied and the current attribute (or the mft
+ *	    record it is in) has been modified then the caller must call
+ *	    NInoSetMrecNeedsDirtying(ctx->ni); before calling
+ *	    ntfs_map_runlist_nolock() or the changes may be lost.
+ *
+ * Locking: - The runlist described by @ni must be locked for writing on entry
+ *	      and is locked on return.  Note the runlist may be modified when
+ *	      needed runlist fragments need to be mapped.
+ *	    - If @ctx is NULL, the base mft record of @ni must not be mapped on
+ *	      entry and it will be left unmapped on return.
+ *	    - If @ctx is not NULL, the base mft record must be mapped on entry
+ *	      and it will be left mapped on return.
+ */
+errno_t ntfs_attr_find_vcn_nolock(ntfs_inode *ni, const VCN vcn,
+		ntfs_rl_element **run, ntfs_attr_search_ctx *ctx)
+{
+	ntfs_rl_element *rl;
+	errno_t err = 0;
+	BOOL is_retry = FALSE;
+
+	ntfs_debug("Entering for mft_no 0x%llx, vcn 0x%llx, with%s ctx.",
+			(unsigned long long)ni->mft_no,
+			(unsigned long long)vcn, ctx ? "" : "out");
+	if (!NInoNonResident(ni))
+		panic("%s(): !NInoNonResident(ni)\n", __FUNCTION__);
+	if (vcn < 0)
+		panic("%s(): vcn < 0\n", __FUNCTION__);
+retry_remap:
+	if (!ni->rl.elements) {
+		lck_spin_lock(&ni->size_lock);
+		if (!ni->allocated_size) {
+			lck_spin_unlock(&ni->size_lock);
+			return LCN_ENOENT;
+		}
+		lck_spin_unlock(&ni->size_lock);
+		if (!is_retry)
+			goto try_to_map;
+		err = EIO;
+		goto err;
+	}
+	rl = ni->rl.rl;
+	if (vcn >= rl[0].vcn) {
+		while (rl->length) {
+			if (vcn < rl[1].vcn) {
+				if (rl->lcn >= LCN_HOLE) {
+					ntfs_debug("Done.");
+					*run = rl;
+					return 0;
+				}
+				break;
+			}
+			rl++;
+		}
+		if (rl->lcn != LCN_RL_NOT_MAPPED) {
+			if (rl->lcn == LCN_ENOENT)
+				err = ENOENT;
+			else
+				err = EIO;
+		}
+	}
+	if (!err && !is_retry) {
+		/*
+		 * If the search context is invalid we cannot map the unmapped
+		 * region.
+		 */
+		if (ctx->is_error)
+			err = ctx->error;
+		else {
+try_to_map:
+			/*
+			 * The @vcn is in an unmapped region, map the runlist
+			 * and retry.
+			 */
+			err = ntfs_map_runlist_nolock(ni, vcn, ctx);
+			if (!err) {
+				is_retry = TRUE;
+				goto retry_remap;
+			}
+		}
+		if (err == EINVAL)
+			err = EIO;
+	} else if (!err)
+		err = EIO;
+err:
+	if (err != ENOENT)
+		ntfs_error(ni->vol->mp, "Failed (error %d).", err);
+	return err;
+}
+
+/**
+ * ntfs_attr_search_ctx_reinit - reinitialize an attribute search context
+ * @ctx:	attribute search context to reinitialize
+ *
+ * Reinitialize the attribute search context @ctx, unmapping an associated
+ * extent mft record if present, and initialize the search context again.
+ *
+ * This is used when a search for a new attribute is being started to reset
+ * the search context to the beginning.
+ *
+ * Note: We preserve the content of @ctx->is_mft_locked so that reinitializing
+ * a search context can also be done when dealing with the mft itself.
+ */
+void ntfs_attr_search_ctx_reinit(ntfs_attr_search_ctx *ctx)
+{
+	const BOOL mft_is_locked = ctx->is_mft_locked;
+
+	if (!ctx->base_ni) {
+		/* No attribute list. */
+		ctx->is_first = 1;
+		ctx->is_iteration = 0;
+		/* Sanity checks are performed elsewhere. */
+		ctx->a = (ATTR_RECORD*)((u8*)ctx->m +
+				le16_to_cpu(ctx->m->attrs_offset));
+		/*
+		 * This needs resetting due to
+		 * ntfs_attr_find_in_attribute_list() which can leave it set
+		 * despite having zeroed ctx->base_ni.
+		 */
+		ctx->al_entry = NULL;
+		return;
+	}
+	/* Attribute list. */
+	if (ctx->ni != ctx->base_ni)
+		ntfs_extent_mft_record_unmap(ctx->ni);
+	ntfs_attr_search_ctx_init(ctx, ctx->base_ni, ctx->base_m);
+	if (mft_is_locked)
+		ctx->is_mft_locked = 1;
+}
+
+/**
+ * ntfs_attr_search_ctx_get - allocate and init a new attribute search context
+ * @ni:		ntfs inode with which to initialize the search context
+ * @m:		mft record with which to initialize the search context
+ *
+ * Allocate a new attribute search context, initialize it with @ni and @m, and
+ * return it.  Return NULL if allocation failed.
+ */
+ntfs_attr_search_ctx *ntfs_attr_search_ctx_get(ntfs_inode *ni, MFT_RECORD *m)
+{
+	ntfs_attr_search_ctx *ctx;
+
+	ctx = OSMalloc(sizeof(ntfs_attr_search_ctx), ntfs_malloc_tag);
+	if (ctx)
+		ntfs_attr_search_ctx_init(ctx, ni, m);
+	return ctx;
+}
+
+/**
+ * ntfs_attr_search_ctx_put - release an attribute search context
+ * @ctx:	attribute search context to free
+ *
+ * Release the attribute search context @ctx, unmapping an associated extent
+ * mft record if present.
+ */
+void ntfs_attr_search_ctx_put(ntfs_attr_search_ctx *ctx)
+{
+	if (ctx->base_ni && ctx->ni != ctx->base_ni)
+		ntfs_extent_mft_record_unmap(ctx->ni);
+	OSFree(ctx, sizeof(ntfs_attr_search_ctx), ntfs_malloc_tag);
+}
+
+/**
+ * ntfs_attr_find_in_mft_record - find (next) attribute in mft record
+ * @type:	attribute type to find
+ * @name:	attribute name to find (optional, i.e. NULL means do not care)
+ * @name_len:	attribute name length (only needed if @name present)
+ * @val:	attribute value to find (optional, resident attributes only)
+ * @val_len:	attribute value length (only needed if @val present)
+ * @ctx:	search context with mft record and attribute to search from
+ *
+ * You should not need to call this function directly.  Use ntfs_attr_lookup()
+ * instead.
+ *
+ * ntfs_attr_find_in_mft_record() takes a search context @ctx as parameter and
+ * searches the mft record specified by @ctx->m, beginning at @ctx->a, for an
+ * attribute of @type, optionally @name and @val.
+ *
+ * If the attribute is found, ntfs_attr_find_in_mft_record() returns 0 and
+ * @ctx->a is set to point to the found attribute.
+ *
+ * If the attribute is not found, ENOENT is returned and @ctx->a is set to
+ * point to the attribute before which the attribute being searched for would
+ * need to be inserted if such an action were to be desired.
+ *
+ * On actual error, ntfs_attr_find_in_mft_record() returns EIO.  In this case
+ * @ctx->a is undefined and in particular do not rely on it not having changed.
+ *
+ * If @ctx->is_first is 1, the search begins with @ctx->a itself.  If it is 0,
+ * the search begins after @ctx->a.
+ *
+ * If @ctx->is_iteration is 1 and @type is AT_UNUSED this is not a search but
+ * an iteration in which case each attribute in the mft record is returned in
+ * turn with each call to ntfs_attr_find_in_mft_record().  Note all attributes
+ * are returned including the attribute list attribute, unlike when
+ * @ctx->is_iteration is 0 when it is not returned unless it is specifically
+ * looked for.
+ *
+ * Similarly to the above, when @ctx->is_iterations is 1 and @type is not
+ * AT_UNUSED all attributes of type @type are returned one after the other.
+ *
+ * If @name is AT_UNNAMED search for an unnamed attribute.  If @name is present
+ * but not AT_UNNAMED search for a named attribute matching @name.  Otherwise,
+ * match both named and unnamed attributes.
+ *
+ * Finally, the resident attribute value @val is looked for, if present.  If
+ * @val is not present (NULL), @val_len is ignored.
+ *
+ * ntfs_attr_find_in_mft_record() only searches the specified mft record and it
+ * ignores the presence of an attribute list attribute (unless it is the one
+ * being searched for, obviously).  If you need to take attribute lists into
+ * consideration, use ntfs_attr_lookup() instead (see below).  This also means
+ * that you cannot use ntfs_attr_find_in_mft_record() to search for extent
+ * records of non-resident attributes, as extents with lowest_vcn != 0 are
+ * usually described by the attribute list attribute only.  Note that it is
+ * possible that the first extent is only in the attribute list while the last
+ * extent is in the base mft record, so do not rely on being able to find the
+ * first extent in the base mft record.
+ *
+ * Warning: Never use @val when looking for attribute types which can be
+ *	    non-resident as this most likely will result in a crash!
+ *
+ * Note if the volume is mounted case sensitive we treat attribute names as
+ * being case sensitive and vice versa if the volume is not mounted case
+ * sensitive we treat attribute names as being case insensitive also.
+ */
+errno_t ntfs_attr_find_in_mft_record(const ATTR_TYPE type,
+		const ntfschar *name, const u32 name_len,
+		const void *val, const u32 val_len, ntfs_attr_search_ctx *ctx)
+{
+	ATTR_RECORD *a;
+	ntfs_volume *vol = ctx->ni->vol;
+	const ntfschar *upcase = vol->upcase;
+	const u32 upcase_len = vol->upcase_len;
+	const BOOL case_sensitive = NVolCaseSensitive(vol);
+	const BOOL is_iteration = ctx->is_iteration;
+
+	/*
+	 * Iterate over attributes in mft record starting at @ctx->a, or the
+	 * attribute following that, if @ctx->is_first is true.
+	 */
+	if (ctx->is_first) {
+		a = ctx->a;
+		ctx->is_first = 0;
+	} else
+		a = (ATTR_RECORD*)((u8*)ctx->a + le32_to_cpu(ctx->a->length));
+	for (;;	a = (ATTR_RECORD*)((u8*)a + le32_to_cpu(a->length))) {
+		if ((u8*)a < (u8*)ctx->m || (u8*)a > (u8*)ctx->m +
+				le32_to_cpu(ctx->m->bytes_allocated))
+			break;
+		ctx->a = a;
+		if (((!is_iteration || type != AT_UNUSED) &&
+				le32_to_cpu(a->type) > le32_to_cpu(type)) ||
+				a->type == AT_END)
+			return ENOENT;
+		if (!a->length)
+			break;
+		if (is_iteration) {
+			if (type == AT_UNUSED || type == a->type)
+				return 0;
+		}
+		if (a->type != type)

*** DIFF OUTPUT TRUNCATED AT 1000 LINES ***


More information about the svn-soc-all mailing list