socsvn commit: r237313 - soc2012/vbotton/ntfs_apple

vbotton at FreeBSD.org vbotton at FreeBSD.org
Fri Jun 8 15:21:02 UTC 2012


Author: vbotton
Date: Fri Jun  8 15:20:59 2012
New Revision: 237313
URL: http://svnweb.FreeBSD.org/socsvn/?view=rev&rev=237313

Log:
  replace many endianness functions, change ino64_t to ino_t

Modified:
  soc2012/vbotton/ntfs_apple/ntfs_attr.c
  soc2012/vbotton/ntfs_apple/ntfs_attr_list.c
  soc2012/vbotton/ntfs_apple/ntfs_collate.c
  soc2012/vbotton/ntfs_apple/ntfs_collate.h
  soc2012/vbotton/ntfs_apple/ntfs_compress.c
  soc2012/vbotton/ntfs_apple/ntfs_debug.c
  soc2012/vbotton/ntfs_apple/ntfs_dir.c
  soc2012/vbotton/ntfs_apple/ntfs_hash.c
  soc2012/vbotton/ntfs_apple/ntfs_index.c
  soc2012/vbotton/ntfs_apple/ntfs_index.h
  soc2012/vbotton/ntfs_apple/ntfs_inode.c
  soc2012/vbotton/ntfs_apple/ntfs_inode.h
  soc2012/vbotton/ntfs_apple/ntfs_layout.h
  soc2012/vbotton/ntfs_apple/ntfs_logfile.c
  soc2012/vbotton/ntfs_apple/ntfs_mft.c
  soc2012/vbotton/ntfs_apple/ntfs_mst.c
  soc2012/vbotton/ntfs_apple/ntfs_quota.c
  soc2012/vbotton/ntfs_apple/ntfs_runlist.c
  soc2012/vbotton/ntfs_apple/ntfs_secure.c
  soc2012/vbotton/ntfs_apple/ntfs_secure.h
  soc2012/vbotton/ntfs_apple/ntfs_types.h
  soc2012/vbotton/ntfs_apple/ntfs_unistr.c
  soc2012/vbotton/ntfs_apple/ntfs_usnjrnl.c
  soc2012/vbotton/ntfs_apple/ntfs_vfsops.c
  soc2012/vbotton/ntfs_apple/ntfs_vnops.c

Modified: soc2012/vbotton/ntfs_apple/ntfs_attr.c
==============================================================================
--- soc2012/vbotton/ntfs_apple/ntfs_attr.c	Fri Jun  8 15:10:13 2012	(r237312)
+++ soc2012/vbotton/ntfs_apple/ntfs_attr.c	Fri Jun  8 15:20:59 2012	(r237313)
@@ -1121,7 +1121,7 @@
 			} else {
 				/* We want an extent record. */
 				err = ntfs_extent_mft_record_map_ext(base_ni,
-						le64_to_cpu(
+						le64toh(
 						al_entry->mft_reference), &ni,
 						&ctx->m, ctx->is_mft_locked);
 				if (err) {
@@ -1493,7 +1493,7 @@
 	memmove(a, (u8*)a + le32toh(a->length),
 			new_muse - ((u8*)a - (u8*)m));
 	/* Adjust @m to reflect the change in used space. */
-	m->bytes_in_use = cpu_to_le32(new_muse);
+	m->bytes_in_use = htole32(new_muse);
 }
 
 /**
@@ -1726,7 +1726,7 @@
 	/* Move attributes starting with @a to make space of @size bytes. */
 	memmove((u8*)a + size, a, muse - ((u8*)a - (u8*)m));
 	/* Adjust @m to reflect the change in used space. */
-	m->bytes_in_use = cpu_to_le32(new_muse);
+	m->bytes_in_use = htole32(new_muse);
 	/* Clear the created space so we start with a clean slate. */
 	bzero(a, size);
 	/*
@@ -1734,7 +1734,7 @@
 	 * We do this here so that the caller does not need to worry about
 	 * rounding up the size to set the attribute length.
 	 */
-	a->length = cpu_to_le32(size);
+	a->length = htole32(size);
 	return 0;
 }
 
@@ -1775,10 +1775,10 @@
 		memmove((u8*)a + new_size, (u8*)a + old_size,
 				muse - ((u8*)a - (u8*)m) - old_size);
 		/* Adjust @m to reflect the change in used space. */
-		m->bytes_in_use = cpu_to_le32(new_muse);
+		m->bytes_in_use = htole32(new_muse);
 		/* Adjust @a to reflect the new size. */
 		if (new_size >= offsetof(ATTR_REC, length) + sizeof(a->length))
-			a->length = cpu_to_le32(new_size);
+			a->length = htole32(new_size);
 	}
 	return 0;
 }
@@ -2368,7 +2368,7 @@
 	 */
 	m->next_attr_instance = htole16(
 			(le16toh(m->next_attr_instance) + 1) & 0xffff);
-	a->value_length = cpu_to_le32(val_len);
+	a->value_length = htole32(val_len);
 	a->value_offset = htole16(val_ofs);
 	if (type == AT_FILENAME)
 		a->resident_flags = RESIDENT_ATTR_IS_INDEXED;
@@ -2590,7 +2590,7 @@
 	 */
 	m->next_attr_instance = htole16(
 			(le16toh(m->next_attr_instance) + 1) & 0xffff);
-	a->value_length = cpu_to_le32(val_len);
+	a->value_length = htole32(val_len);
 	a->value_offset = htole16(val_ofs);
 	if (type == AT_FILENAME)
 		a->resident_flags = RESIDENT_ATTR_IS_INDEXED;
@@ -2895,7 +2895,7 @@
 		bzero((u8*)a + le16toh(a->value_offset) + old_size,
 				new_size - old_size);
 	/* Finally update the length of the attribute value. */
-	a->value_length = cpu_to_le32(new_size);
+	a->value_length = htole32(new_size);
 	return 0;
 }
 
@@ -3978,7 +3978,7 @@
 							"le16toh("
 							"a->value_offset)\n",
 							__FUNCTION__);
-				a->value_length = cpu_to_le32(new_init_size);
+				a->value_length = htole32(new_init_size);
 			}
 			data_size_updated = TRUE;
 			if (ni == base_ni && !S_ISDIR(ni->mode))
@@ -4135,7 +4135,7 @@
 	 */
 	kattr = (u8*)a + le16toh(a->value_offset);
 	bzero(kattr + attr_len, new_init_size - attr_len);
-	a->value_length = cpu_to_le32((u32)new_init_size);
+	a->value_length = htole32((u32)new_init_size);
 	/* Update the sizes in the ntfs inode as well as the ubc size. */
 	mtx_lock_spin(&ni->size_lock);
 	ni->initialized_size = ni->data_size = size = new_init_size;
@@ -6131,14 +6131,14 @@
 					(u32)new_data_size - attr_len);
 			mtx_lock_spin(&ni->size_lock);
 			ni->initialized_size = ni->data_size = new_data_size;
-			a->value_length = cpu_to_le32((u32)new_data_size);
+			a->value_length = htole32((u32)new_data_size);
 		} else
 			mtx_lock_spin(&ni->size_lock);
 		ni->allocated_size = le32toh(a->length) -
 				le16toh(a->value_offset);
 		mtx_unlock_spin(&ni->size_lock);
 		if (new_data_size > attr_len)
-			a->value_length = cpu_to_le32((u32)new_data_size);
+			a->value_length = htole32((u32)new_data_size);
 		goto dirty_done;
 	}
 	/*
@@ -8544,7 +8544,7 @@
 		} else {
 			/* We want an extent mft record. */
 			err = ntfs_extent_mft_record_map(base_ni,
-					le64_to_cpu(al_entry->mft_reference),
+					le64toh(al_entry->mft_reference),
 					&eni, &m);
 			if (err) {
 				ntfs_error(vol->mp, "Failed to map extent mft "

Modified: soc2012/vbotton/ntfs_apple/ntfs_attr_list.c
==============================================================================
--- soc2012/vbotton/ntfs_apple/ntfs_attr_list.c	Fri Jun  8 15:10:13 2012	(r237312)
+++ soc2012/vbotton/ntfs_apple/ntfs_attr_list.c	Fri Jun  8 15:20:59 2012	(r237313)
@@ -92,7 +92,7 @@
 	entry = (ATTR_LIST_ENTRY*)al;
 	al_end = (u8*)al + ni->attr_list_size;
 	for (;; entry = (ATTR_LIST_ENTRY*)((u8*)entry +
-			le16_to_cpu(entry->length))) {
+			le16toh(entry->length))) {
 		/* Out of bounds check. */
 		if ((u8*)entry < al || (u8*)entry > al_end)
 			goto err;
@@ -278,7 +278,7 @@
 	mref = MK_LE_MREF(base_ni->mft_no, base_ni->seq_no);
 	/* Maximum number of usable bytes in the mft record. */
 	bytes_free = le32_to_cpu(m->bytes_allocated) -
-			le16_to_cpu(m->attrs_offset);
+			le16toh(m->attrs_offset);
 	do {
 		unsigned al_entry_len, len;
 
@@ -374,7 +374,7 @@
 		/* Copy the name if the attribute is named. */
 		if (a->name_length)
 			memcpy(&al_entry->name,
-					(u8*)a + le16_to_cpu(a->name_offset),
+					(u8*)a + le16toh(a->name_offset),
 					a->name_length * sizeof(ntfschar));
 		/* Zero the padding area at the end if it exists. */
 		if (al_entry_len - len > 0)
@@ -416,7 +416,7 @@
 			attr_len > le32_to_cpu(m->bytes_allocated) -
 			le32_to_cpu(m->bytes_in_use);
 			al_entry = (ATTR_LIST_ENTRY*)((u8*)al_entry +
-			le16_to_cpu(al_entry->length))) {
+			le16toh(al_entry->length))) {
 		/* Get the next attribute in the mft record. */
 		err = ntfs_attr_find_in_mft_record(AT_UNUSED, NULL, 0, NULL, 0,
 				&al_ctx);
@@ -528,7 +528,7 @@
 		err = ntfs_resident_attr_record_insert_internal(m, a,
 				AT_ATTRIBUTE_LIST, NULL, 0, al_size);
 		if (!err) {
-			memcpy((u8*)a + le16_to_cpu(a->value_offset), al,
+			memcpy((u8*)a + le16toh(a->value_offset), al,
 					al_size);
 			/*
 			 * If we already allocated some clusters in a previous
@@ -720,7 +720,7 @@
 		 * But first find the attribute list entry matching the
 		 * attribute record so it can be updated.
 		 */
-		a_name = (ntfschar*)((u8*)a + le16_to_cpu(a->name_offset));
+		a_name = (ntfschar*)((u8*)a + le16toh(a->name_offset));
 		al_entry = (ATTR_LIST_ENTRY*)base_ni->attr_list;
 		do {
 			/*
@@ -754,7 +754,7 @@
 			}
 			/* Go to the next attribute list entry. */
 			al_entry = (ATTR_LIST_ENTRY*)((u8*)al_entry +
-					le16_to_cpu(al_entry->length));
+					le16toh(al_entry->length));
 		} while (1);
 		err = ntfs_attr_record_move_for_attr_list_attribute(&al_ctx,
 				al_entry, ctx, NULL);
@@ -789,7 +789,7 @@
 	 * consumed the old one.
 	 */
 	m->next_attr_instance = htole16(
-			(le16_to_cpu(m->next_attr_instance) + 1) & 0xffff);
+			(le16toh(m->next_attr_instance) + 1) & 0xffff);
 	a->highest_vcn = cpu_to_sle64((attr_len / vol->cluster_size) - 1);
 	a->allocated_size = cpu_to_sle64(attr_len);
 	a->initialized_size = a->data_size = cpu_to_sle64(al_size);
@@ -888,7 +888,7 @@
 		ctx->base_ni = base_ni;
 		ctx->base_m = m;
 		ctx->base_a = (ATTR_RECORD*)((u8*)m +
-				le16_to_cpu(m->attrs_offset));
+				le16toh(m->attrs_offset));
 	}
 	ntfs_debug("Done.");
 	return 0;
@@ -926,7 +926,7 @@
 	al_end = al + al_size;
 	for (al_entry = (ATTR_LIST_ENTRY*)al; (u8*)al_entry < al_end;
 			al_entry = (ATTR_LIST_ENTRY*)((u8*)al_entry +
-			le16_to_cpu(al_entry->length))) {
+			le16toh(al_entry->length))) {
 		ntfs_inode *ni;
 		ntfschar *a_name;
 		u8 *val;
@@ -938,7 +938,7 @@
 retry_undo_map:
 		/* Find the extent record in the base ntfs inode. */
 		err2 = ntfs_extent_mft_record_map(base_ni,
-				le64_to_cpu(al_entry->mft_reference), &ni, &m);
+				le64toh(al_entry->mft_reference), &ni, &m);
 		if (err2) {
 			if (err2 == ENOMEM)
 				goto retry_undo_map;
@@ -969,7 +969,7 @@
 			continue;
 		}
 		/* Find the attribute record that needs moving back. */
-		a = (ATTR_RECORD*)((u8*)m + le16_to_cpu(m->attrs_offset));
+		a = (ATTR_RECORD*)((u8*)m + le16toh(m->attrs_offset));
 		if (a->type == AT_END)
 			panic("%s(): a->type == AT_END\n", __FUNCTION__);
 		while (a->instance != al_entry->instance) {
@@ -982,7 +982,7 @@
 		if (al_entry->type != a->type)
 			panic("%s(): al_entry->type != a->type\n",
 					__FUNCTION__);
-		a_name = (ntfschar*)((u8*)a + le16_to_cpu(a->name_offset));
+		a_name = (ntfschar*)((u8*)a + le16toh(a->name_offset));
 		if (!ntfs_are_names_equal((ntfschar*)((u8*)al_entry +
 				al_entry->name_offset), al_entry->name_length,
 				a_name, a->name_length, TRUE, vol->upcase,
@@ -998,7 +998,7 @@
 		 * attribute.
 		 */
 		if (a->type == AT_FILENAME) {
-			val = (u8*)a + le16_to_cpu(a->value_offset);
+			val = (u8*)a + le16toh(a->value_offset);
 			val_len = le32_to_cpu(a->value_length);
 		} else {
 			val = NULL;
@@ -1038,7 +1038,7 @@
 		 * Increment the next attribute instance number in the mft
 		 * record as we consumed the old one.
 		 */
-		al_ctx.m->next_attr_instance = htole16((le16_to_cpu(
+		al_ctx.m->next_attr_instance = htole16((le16toh(
 				al_ctx.m->next_attr_instance) + 1) & 0xffff);
 		/*
 		 * If this is the only attribute in the extent mft record, free
@@ -1181,7 +1181,7 @@
 update_resident:
 		/* Update the part of the attribute value that has changed. */
 		if (start_ofs < ni->attr_list_size) {
-			memcpy((u8*)a + le16_to_cpu(a->value_offset) +
+			memcpy((u8*)a + le16toh(a->value_offset) +
 					start_ofs, ni->attr_list + start_ofs,
 					ni->attr_list_size - start_ofs);
 			dirty_mft = TRUE;
@@ -1194,20 +1194,20 @@
 	 */
 	rw_wlock(&ni->attr_list_rl.lock);
 	/* Update the attribute sizes. */
-	if (ni->attr_list_size < sle64_to_cpu(a->initialized_size))
+	if (ni->attr_list_size < sle64toh(a->initialized_size))
 		a->initialized_size = cpu_to_sle64(ni->attr_list_size);
 	a->data_size = cpu_to_sle64(ni->attr_list_size);
 	dirty_mft = TRUE;
 	alloc_size = (ni->attr_list_size + vol->cluster_size_mask) &
 			~vol->cluster_size_mask;
-	if (alloc_size > sle64_to_cpu(a->allocated_size))
-		panic("%s(): alloc_size > sle64_to_cpu(a->allocated_size)\n",
+	if (alloc_size > sle64toh(a->allocated_size))
+		panic("%s(): alloc_size > sle64toh(a->allocated_size)\n",
 				__FUNCTION__);
 	/*
 	 * If the attribute allocation has not changed we are done with the
 	 * resize and go straight onto the data update.
 	 */
-	if (alloc_size == sle64_to_cpu(a->allocated_size))
+	if (alloc_size == sle64toh(a->allocated_size))
 		goto update_non_resident;
 	/*
 	 * The allocated size has shrunk by at least one cluster thus we need
@@ -1246,7 +1246,7 @@
 		goto err;
 	}
 	err = ntfs_mapping_pairs_build(vol, (s8*)a +
-			le16_to_cpu(a->mapping_pairs_offset), mp_size,
+			le16toh(a->mapping_pairs_offset), mp_size,
 			ni->attr_list_rl.rl, 0, -1, NULL);
 	if (err) {
 		ntfs_error(vol->mp, "Cannot shrink attribute list attribute "
@@ -1259,7 +1259,7 @@
 	a->highest_vcn = cpu_to_sle64((alloc_size >> vol->cluster_size_shift) -
 			1);
 	err = ntfs_attr_record_resize(ctx->m, a,
-			le16_to_cpu(a->mapping_pairs_offset) + mp_size);
+			le16toh(a->mapping_pairs_offset) + mp_size);
 	/* Shrinking the attribute record cannot fail. */
 	if (err)
 		panic("%s(): err (non-resident)\n", __FUNCTION__);
@@ -1411,7 +1411,7 @@
 		 * Update the part of the attribute list attribute record value
 		 * that has changed.
 		 */
-		memcpy((u8*)al_a + le16_to_cpu(al_a->value_offset) + al_ofs,
+		memcpy((u8*)al_a + le16toh(al_a->value_offset) + al_ofs,
 				base_ni->attr_list + al_ofs,
 				base_ni->attr_list_size - al_ofs);
 		goto done;
@@ -1429,7 +1429,7 @@
 			((le32_to_cpu(al_a->value_length) + 7) & ~7);
 	/* Maximum number of usable bytes in the mft record. */
 	bytes_free = le32_to_cpu(base_m->bytes_allocated) -
-			le16_to_cpu(base_m->attrs_offset);
+			le16toh(base_m->attrs_offset);
 	/*
 	 * If the attribute list attribute has become bigger than fits in an
 	 * mft record switch it to a non-resident attribute record.
@@ -1540,7 +1540,7 @@
 		 * But first find the attribute list entry matching the
 		 * attribute record so it can be updated.
 		 */
-		a_name = (ntfschar*)((u8*)a + le16_to_cpu(a->name_offset));
+		a_name = (ntfschar*)((u8*)a + le16toh(a->name_offset));
 		al_entry = (ATTR_LIST_ENTRY*)base_ni->attr_list;
 		al_end = base_ni->attr_list + base_ni->attr_list_size;
 		do {
@@ -1590,7 +1590,7 @@
 			}
 			/* Go to the next attribute list entry. */
 			al_entry = (ATTR_LIST_ENTRY*)((u8*)al_entry +
-					le16_to_cpu(al_entry->length));
+					le16toh(al_entry->length));
 		} while (1);
 		err = ntfs_attr_record_move_for_attr_list_attribute(&actx,
 				al_entry, ctx, &remap_needed);
@@ -1639,7 +1639,7 @@
 		 * Update the part of the attribute list attribute record value
 		 * that has changed.
 		 */
-		memcpy((u8*)al_a + le16_to_cpu(al_a->value_offset) + al_ofs,
+		memcpy((u8*)al_a + le16toh(al_a->value_offset) + al_ofs,
 				base_ni->attr_list + al_ofs,
 				base_ni->attr_list_size - al_ofs);
 		/*
@@ -1743,7 +1743,7 @@
 	if (!al_a->non_resident)
 		panic("%s(): !al_a->non_resident\n", __FUNCTION__);
 	/* Allocate more disk space if needed. */
-	if (base_ni->attr_list_size <= sle64_to_cpu(al_a->allocated_size))
+	if (base_ni->attr_list_size <= sle64toh(al_a->allocated_size))
 		goto skip_alloc;
 	/* Work out the new allocated size we need. */
 	alloc_size = (base_ni->attr_list_size + vol->cluster_size_mask) &
@@ -1771,9 +1771,9 @@
 	 */
 	runlist.rl = NULL;
 	runlist.alloc = runlist.elements = 0;
-	err = ntfs_cluster_alloc(vol, sle64_to_cpu(al_a->allocated_size) >>
+	err = ntfs_cluster_alloc(vol, sle64toh(al_a->allocated_size) >>
 			vol->cluster_size_shift, (alloc_size -
-			sle64_to_cpu(al_a->allocated_size)) >>
+			sle64toh(al_a->allocated_size)) >>
 			vol->cluster_size_shift, lcn, DATA_ZONE, TRUE,
 			&runlist);
 	if (err) {
@@ -1781,7 +1781,7 @@
 				"non-resident attribute list attribute in "
 				"base mft record 0x%llx (error %d).",
 				((alloc_size -
-				sle64_to_cpu(al_a->allocated_size)) >>
+				sle64toh(al_a->allocated_size)) >>
 				vol->cluster_size_shift) > 1 ? "s" : "",
 				(unsigned long long)base_ni->mft_no, err);
 		goto unl_done;
@@ -1809,7 +1809,7 @@
 	if (err)
 		panic("%s(): err (ntfs_get_size_for_mapping_pairs(), "
 				"non-resident)\n", __FUNCTION__);
-	mp_ofs = le16_to_cpu(al_a->mapping_pairs_offset);
+	mp_ofs = le16toh(al_a->mapping_pairs_offset);
 	old_arec_size = le32_to_cpu(al_a->length);
 	/* Extend the attribute record to fit the bigger mapping pairs array. */
 	err = ntfs_attr_record_resize(base_m, al_a, mp_ofs + mp_size);
@@ -1975,7 +1975,7 @@
 		 * But first find the attribute list entry matching the
 		 * attribute record so it can be updated.
 		 */
-		a_name = (ntfschar*)((u8*)a + le16_to_cpu(a->name_offset));
+		a_name = (ntfschar*)((u8*)a + le16toh(a->name_offset));
 		al_entry = (ATTR_LIST_ENTRY*)base_ni->attr_list;
 		al_end = base_ni->attr_list + base_ni->attr_list_size;
 		do {
@@ -2025,7 +2025,7 @@
 			}
 			/* Go to the next attribute list entry. */
 			al_entry = (ATTR_LIST_ENTRY*)((u8*)al_entry +
-					le16_to_cpu(al_entry->length));
+					le16toh(al_entry->length));
 		} while (1);
 		err = ntfs_attr_record_move_for_attr_list_attribute(&actx,
 				al_entry, ctx, &remap_needed);
@@ -2187,7 +2187,7 @@
 			"end_entry offset 0x%lx, bytes to_delete 0x%x.",
 			(unsigned long long)ni->mft_no,
 			le32_to_cpu(start_entry->type),
-			(unsigned)le16_to_cpu(start_entry->length),
+			(unsigned)le16toh(start_entry->length),
 			(unsigned long)((u8*)start_entry - ni->attr_list),
 			(unsigned long)((u8*)end_entry - ni->attr_list),
 			to_delete);

Modified: soc2012/vbotton/ntfs_apple/ntfs_collate.c
==============================================================================
--- soc2012/vbotton/ntfs_apple/ntfs_collate.c	Fri Jun  8 15:10:13 2012	(r237312)
+++ soc2012/vbotton/ntfs_apple/ntfs_collate.c	Fri Jun  8 15:20:59 2012	(r237313)
@@ -35,6 +35,8 @@
  * http://developer.apple.com/opensource/licenses/gpl-2.txt.
  */
 
+#include <sys/types.h>
+#include <sys/libkern.h>
 
 #include "ntfs_collate.h"
 #include "ntfs_debug.h"
@@ -143,8 +145,8 @@
 		min_len = data2_len;
 	min_len >>= 2;
 	for (i = 0; i < min_len; i++) {
-		const u32 u1 = le32_to_cpu(p1[i]);
-		const u32 u2 = le32_to_cpu(p2[i]);
+		const u32 u1 = le32toh(p1[i]);
+		const u32 u2 = le32toh(p2[i]);
 		if (u1 > u2) {
 			rc = 1;
 			goto out;
@@ -201,7 +203,7 @@
 		const void *data2, const int data2_len) {
 	int i;
 
-	i = le32_to_cpu(cr);
+	i = le32toh(cr);
 	ntfs_debug("Entering (collation rule 0x%x, data1_len 0x%x, data2_len "
 			"0x%x).", i, data1_len, data2_len);
 	/*

Modified: soc2012/vbotton/ntfs_apple/ntfs_collate.h
==============================================================================
--- soc2012/vbotton/ntfs_apple/ntfs_collate.h	Fri Jun  8 15:10:13 2012	(r237312)
+++ soc2012/vbotton/ntfs_apple/ntfs_collate.h	Fri Jun  8 15:20:59 2012	(r237313)
@@ -52,7 +52,7 @@
 	 */
 	if (cr == COLLATION_UNICODE_STRING)
 		return FALSE;
-	i = le32_to_cpu(cr);
+	i = le32toh(cr);
 	if (((i >= 0) && (i <= 0x02)) || ((i >= 0x10) && (i <= 0x13)))
 		return TRUE;
 	return FALSE;

Modified: soc2012/vbotton/ntfs_apple/ntfs_compress.c
==============================================================================
--- soc2012/vbotton/ntfs_apple/ntfs_compress.c	Fri Jun  8 15:10:13 2012	(r237312)
+++ soc2012/vbotton/ntfs_apple/ntfs_compress.c	Fri Jun  8 15:20:59 2012	(r237313)
@@ -37,18 +37,10 @@
 
 #include <sys/errno.h>
 #include <sys/ucred.h>
-#include <sys/ubc.h>
 #include <sys/uio.h>
 #include <sys/types.h>
 
-#include <mach/memory_object_types.h>
-
-#include <string.h>
-
-#include <libkern/OSMalloc.h>
-
-#include <kern/debug.h>
-#include <kern/locks.h>
+#include <sys/lock.h>
 
 #include "ntfs.h"
 #include "ntfs_attr.h"
@@ -378,7 +370,7 @@
 	 * position in the compression block is one byte before its end so the
 	 * first two checks do not detect it.
 	 */
-	if (cb == cb_end || !le16_to_cpup((le16*)cb) || dst == dst_end) {
+	if (cb == cb_end || !le16tohp((le16*)cb) || dst == dst_end) {
 		ntfs_debug("Done.");
 		return 0;
 	}
@@ -393,7 +385,7 @@
 		goto err;
 	/* Setup the current sub-block source pointers and validate range. */
 	cb_sb_start = cb;
-	cb_sb_end = cb + (le16_to_cpup((le16*)cb) & NTFS_SB_SIZE_MASK) + 3;
+	cb_sb_end = cb + (le16tohp((le16*)cb) & NTFS_SB_SIZE_MASK) + 3;
 	if (cb_sb_end > cb_end)
 		goto err;
 	/*
@@ -432,7 +424,7 @@
 		}
 	}
 	/* Now, we are ready to process the current sub-block. */
-	if (!(le16_to_cpup((le16*)cb) & NTFS_SB_IS_COMPRESSED)) {
+	if (!(le16tohp((le16*)cb) & NTFS_SB_IS_COMPRESSED)) {
 		/*
 		 * This sb is not compressed, just copy its data into the
 		 * destination buffer.
@@ -509,7 +501,7 @@
 		for (u = dst - dst_sb_start - 1; u >= 0x10; u >>= 1)
 			lg++;
 		/* Get the phrase token. */
-		pt = le16_to_cpup((u16*)cb);
+		pt = le16tohp((u16*)cb);
 		/*
 		 * Calculate starting position of the byte sequence in the
 		 * destination using the fact that p = (pt >> (12 - lg)) + 1
@@ -814,7 +806,7 @@
 	 */
 	ntfs_debug("Found compressed compression block.");
 	if (!cb) {
-		cb = OSMalloc(cb_size, ntfs_malloc_tag);
+		cb = malloc(cb_size);
 		if (!cb) {
 			ntfs_error(vol->mp, "Not enough memory to allocate "
 					"temporary buffer.");
@@ -881,7 +873,7 @@
 	if (uio)
 		uio_free(uio);
 	if (cb)
-		OSFree(cb, cb_size, ntfs_malloc_tag);
+		free(cb);
 	ntfs_debug("Done.");
 	return 0;
 cl_err:
@@ -900,7 +892,7 @@
 	if (uio)
 		uio_free(uio);
 	if (cb)
-		OSFree(cb, cb_size, ntfs_malloc_tag);
+		free(cb);
 	ntfs_error(vol->mp, "Failed (error %d).", err);
 	return err;
 }

Modified: soc2012/vbotton/ntfs_apple/ntfs_debug.c
==============================================================================
--- soc2012/vbotton/ntfs_apple/ntfs_debug.c	Fri Jun  8 15:10:13 2012	(r237312)
+++ soc2012/vbotton/ntfs_apple/ntfs_debug.c	Fri Jun  8 15:20:59 2012	(r237313)
@@ -314,20 +314,20 @@
 	printf("NTFS-fs DEBUG: Dumping attribute list (size 0x%x):\n", size);
 	for (entry = (ATTR_LIST_ENTRY*)al, u = 1; (u8*)entry < end;
 			entry = (ATTR_LIST_ENTRY*)((u8*)entry +
-			le16_to_cpu(entry->length)), u++) {
+			le16toh(entry->length)), u++) {
 		printf("--------------- Entry %u ---------------\n", u);
 		printf("Attribute type: 0x%x\n",
 				(unsigned)le32_to_cpu(entry->type));
 		printf("Record length: 0x%x\n",
-				(unsigned)le16_to_cpu(entry->length));
+				(unsigned)le16toh(entry->length));
 		printf("Name length: 0x%x\n", (unsigned)entry->name_length);
 		printf("Name offset: 0x%x\n", (unsigned)entry->name_offset);
 		printf("Starting VCN: 0x%llx\n", (unsigned long long)
-				sle64_to_cpu(entry->lowest_vcn));
+				sle64toh(entry->lowest_vcn));
 		printf("MFT reference: 0x%llx\n", (unsigned long long)
 				MREF_LE(entry->mft_reference));
 		printf("Instance: 0x%x\n",
-				(unsigned)le16_to_cpu(entry->instance));
+				(unsigned)le16toh(entry->instance));
 	}
 	printf("--------------- End of attribute list ---------------\n");
 }

Modified: soc2012/vbotton/ntfs_apple/ntfs_dir.c
==============================================================================
--- soc2012/vbotton/ntfs_apple/ntfs_dir.c	Fri Jun  8 15:10:13 2012	(r237312)
+++ soc2012/vbotton/ntfs_apple/ntfs_dir.c	Fri Jun  8 15:20:59 2012	(r237313)
@@ -167,7 +167,7 @@
 	 * Get to the index root value (it has been verified in
 	 * ntfs_index_inode_read()).
 	 */
-	ir = (INDEX_ROOT*)((u8*)ctx->a + le16_to_cpu(ctx->a->value_offset));
+	ir = (INDEX_ROOT*)((u8*)ctx->a + le16toh(ctx->a->value_offset));
 	index_end = (u8*)&ir->index + le32_to_cpu(ir->index.index_length);
 	/* The first index entry. */
 	ie = (INDEX_ENTRY*)((u8*)&ir->index +
@@ -176,13 +176,13 @@
 	 * Loop until we exceed valid memory (corruption case) or until we
 	 * reach the last entry.
 	 */
-	for (;; ie = (INDEX_ENTRY*)((u8*)ie + le16_to_cpu(ie->length))) {
+	for (;; ie = (INDEX_ENTRY*)((u8*)ie + le16toh(ie->length))) {
 		ntfs_debug("In index root, offset 0x%x.",
 				(unsigned)((u8*)ie - (u8*)ir));
 		/* Bounds checks. */
 		if ((u8*)ie < (u8*)&ir->index || (u8*)ie +
 				sizeof(INDEX_ENTRY_HEADER) > index_end ||
-				(u8*)ie + le16_to_cpu(ie->key_length) >
+				(u8*)ie + le16toh(ie->key_length) >
 				index_end)
 			goto dir_err;
 		/*
@@ -228,7 +228,7 @@
 						goto put_err;
 					}
 				}
-				name->mref = le64_to_cpu(ie->indexed_file);
+				name->mref = le64toh(ie->indexed_file);
 				name->type = FILENAME_DOS;
 				name->len = len = ie->key.filename.
 						filename_length;
@@ -240,7 +240,7 @@
 							ntfs_malloc_tag);
 				*res_name = NULL;
 			}
-			*res_mref = le64_to_cpu(ie->indexed_file);
+			*res_mref = le64toh(ie->indexed_file);
 			ntfs_attr_search_ctx_put(ctx);
 			ntfs_mft_record_unmap(dir_ni);
 			rw_unlock(&ia_ni->lock);
@@ -279,7 +279,7 @@
 						goto put_err;
 					}
 				}
-				name->mref = le64_to_cpu(ie->indexed_file);
+				name->mref = le64toh(ie->indexed_file);
 				name->type = type;
 				name->len = len = ie->key.filename.
 						filename_length;
@@ -347,7 +347,7 @@
 		goto put_err;
 	}
 	/* Get the starting vcn of the index block holding the child node. */
-	vcn = sle64_to_cpup((sle64*)((u8*)ie + le16_to_cpu(ie->length) - 8));
+	vcn = sle64tohp((sle64*)((u8*)ie + le16toh(ie->length) - 8));
 	/*
 	 * We are done with the index root and the mft record.  Release them,
 	 * otherwise we deadlock with ntfs_page_map().
@@ -387,12 +387,12 @@
 				(unsigned long long)dir_ni->mft_no);
 		goto page_err;
 	}
-	if (sle64_to_cpu(ia->index_block_vcn) != vcn) {
+	if (sle64toh(ia->index_block_vcn) != vcn) {
 		ntfs_error(mp, "Actual VCN (0x%llx) of index buffer is "
 				"different from expected VCN (0x%llx).  "
 				"Directory inode 0x%llx is corrupt or driver "
 				"bug.", (unsigned long long)
-				sle64_to_cpu(ia->index_block_vcn),
+				sle64toh(ia->index_block_vcn),
 				(unsigned long long)vcn,
 				(unsigned long long)dir_ni->mft_no);
 		goto page_err;
@@ -436,11 +436,11 @@
 	 * loop until we exceed valid memory (corruption case) or until we
 	 * reach the last entry.
 	 */
-	for (;; ie = (INDEX_ENTRY*)((u8*)ie + le16_to_cpu(ie->length))) {
+	for (;; ie = (INDEX_ENTRY*)((u8*)ie + le16toh(ie->length))) {
 		/* Bounds check. */
 		if ((u8*)ie < (u8*)&ia->index || (u8*)ie +
 				sizeof(INDEX_ENTRY_HEADER) > index_end ||
-				(u8*)ie + le16_to_cpu(ie->key_length) >
+				(u8*)ie + le16toh(ie->key_length) >
 				index_end) {
 			ntfs_error(mp, "Index entry out of bounds in "
 					"directory inode 0x%llx.",
@@ -490,7 +490,7 @@
 						goto page_err;
 					}
 				}
-				name->mref = le64_to_cpu(ie->indexed_file);
+				name->mref = le64toh(ie->indexed_file);
 				name->type = FILENAME_DOS;
 				name->len = len = ie->key.filename.
 						filename_length;
@@ -502,7 +502,7 @@
 							ntfs_malloc_tag);
 				*res_name = NULL;
 			}
-			*res_mref = le64_to_cpu(ie->indexed_file);
+			*res_mref = le64toh(ie->indexed_file);
 			ntfs_page_unmap(ia_ni, upl, pl, FALSE);
 			rw_unlock(&ia_ni->lock);
 			(void)vnode_put(ia_vn);
@@ -543,7 +543,7 @@
 						goto page_err;
 					}
 				}
-				name->mref = le64_to_cpu(ie->indexed_file);
+				name->mref = le64toh(ie->indexed_file);
 				name->type = type;
 				name->len = len = ie->key.filename.
 						filename_length;
@@ -604,8 +604,8 @@
 		}
 		/* Child node present, descend into it. */
 		old_vcn = vcn;
-		vcn = sle64_to_cpup((sle64*)((u8*)ie +
-				le16_to_cpu(ie->length) - 8));
+		vcn = sle64tohp((sle64*)((u8*)ie +
+				le16toh(ie->length) - 8));
 		if (vcn >= 0) {
 			/*
 			 * If @vcn is in the same page cache page as @old_vcn
@@ -685,7 +685,7 @@
 static inline int ntfs_do_dirent(ntfs_volume *vol, INDEX_ENTRY *ie,
 		struct dirent *de, uio_t uio, int *entries)
 {
-	ino64_t mref;
+	ino_t mref;
 	u8 *utf8_name;
 	size_t utf8_size;
 	signed res_size, padding;
@@ -1438,7 +1438,7 @@
 		 * old name then reuse its buffer if the two are the same size
 		 * and otherwise free the old name first.
 		 */
-		size = le16_to_cpu(ictx->entry->key_length);
+		size = le16toh(ictx->entry->key_length);
 		if (dh->fn_size != size) {
 			if (dh->fn_size)
 				OSFree(dh->fn, dh->fn_size, ntfs_malloc_tag);
@@ -1581,7 +1581,7 @@
 	 * Get to the index root value (it has been verified in
 	 * ntfs_inode_read()).
 	 */
-	ir = (INDEX_ROOT*)((u8*)ctx->a + le16_to_cpu(ctx->a->value_offset));
+	ir = (INDEX_ROOT*)((u8*)ctx->a + le16toh(ctx->a->value_offset));
 	index_end = (u8*)&ir->index + le32_to_cpu(ir->index.index_length);
 	/* The first index entry. */
 	ie = (INDEX_ENTRY*)((u8*)&ir->index +
@@ -1589,7 +1589,7 @@
 	/* Bounds checks. */
 	if ((u8*)ie < (u8*)&ir->index ||
 			(u8*)ie + sizeof(INDEX_ENTRY_HEADER) > index_end ||
-			(u8*)ie + le16_to_cpu(ie->key_length) > index_end)
+			(u8*)ie + le16toh(ie->key_length) > index_end)
 		goto dir_err;
 	/*
 	 * If this is not the end node, it is a filename and thus the directory
@@ -1700,13 +1700,13 @@
 				(unsigned long long)dir_ni->mft_no, es2);
 		goto vol_err;
 	}
-	if (sle64_to_cpu(ia->index_block_vcn) != (ia_pos &
+	if (sle64toh(ia->index_block_vcn) != (ia_pos &
 			~(s64)(ia_ni->block_size - 1)) >>
 			ia_ni->vcn_size_shift) {
 		ntfs_error(vol->mp, "Actual VCN (0x%llx) of index record is "
 				"different from expected VCN (0x%llx)%s%llx%s",
 				(unsigned long long)
-				sle64_to_cpu(ia->index_block_vcn),
+				sle64toh(ia->index_block_vcn),
 				(unsigned long long)ia_pos >>
 				ia_ni->vcn_size_shift, es1,
 				(unsigned long long)dir_ni->mft_no, es2);
@@ -1719,7 +1719,7 @@
 				"(%u) differing from the directory specified "
 				"size (%u)%s%llx%s", (unsigned long long)
 				(unsigned long long)
-				sle64_to_cpu(ia->index_block_vcn),
+				sle64toh(ia->index_block_vcn),
 				(unsigned)(offsetof(INDEX_BLOCK, index) +
 				le32_to_cpu(ia->index.allocated_size)),
 				(unsigned)ia_ni->block_size, es1,
@@ -1733,7 +1733,7 @@
 				"cannot happen and points either to memory "
 				"corruption or to a driver bug.",
 				(unsigned long long)
-				sle64_to_cpu(ia->index_block_vcn),
+				sle64toh(ia->index_block_vcn),
 				(unsigned long long)dir_ni->mft_no);
 		goto vol_err;
 	}
@@ -1742,7 +1742,7 @@
 		ntfs_error(vol->mp, "Size of index block (VCN 0x%llx) "
 				"exceeds maximum size%s%llx%s",
 				(unsigned long long)
-				sle64_to_cpu(ia->index_block_vcn), es1,
+				sle64toh(ia->index_block_vcn), es1,
 				(unsigned long long)dir_ni->mft_no, es2);
 		goto vol_err;
 	}
@@ -1752,7 +1752,7 @@
 	/* Bounds checks. */
 	if ((u8*)ie < (u8*)&ia->index ||
 			(u8*)ie + sizeof(INDEX_ENTRY_HEADER) > index_end ||
-			(u8*)ie + le16_to_cpu(ie->key_length) > index_end)
+			(u8*)ie + le16toh(ie->key_length) > index_end)
 		goto dir_err;
 	/*
 	 * If this is the end node, it is not a filename so we continue to the
@@ -1919,8 +1919,8 @@
 				"parent directory (0x%llx) specified in the "
 				"matching directory index entry.  Volume is "
 				"corrupt.  Run chkdsk.", (unsigned long long)
-				le64_to_cpu(fn->parent_directory),
-				(unsigned long long)le64_to_cpu(
+				le64toh(fn->parent_directory),
+				(unsigned long long)le64toh(
 				ie->key.filename.parent_directory));
 		NVolSetErrors(vol);
 		err = EIO;
@@ -1940,7 +1940,7 @@
 				"chkdsk.", (unsigned long long)
 				MK_MREF(ni->mft_no, ni->seq_no),
 				(unsigned long long)
-				le64_to_cpu(ie->indexed_file));
+				le64toh(ie->indexed_file));
 		NVolSetErrors(vol);
 		err = EIO;
 		goto put_err;

Modified: soc2012/vbotton/ntfs_apple/ntfs_hash.c
==============================================================================
--- soc2012/vbotton/ntfs_apple/ntfs_hash.c	Fri Jun  8 15:10:13 2012	(r237312)
+++ soc2012/vbotton/ntfs_apple/ntfs_hash.c	Fri Jun  8 15:20:59 2012	(r237313)
@@ -111,7 +111,7 @@
  * volume @vol.
  */
 static inline unsigned long ntfs_inode_hash(const ntfs_volume *vol,
-		const ino64_t mft_no)
+		const ino_t mft_no)
 {
 	return (vol->dev + mft_no) & ntfs_inode_hash_mask;
 }
@@ -125,7 +125,7 @@
  * @mft_no on the volume @vol.
  */
 static inline ntfs_inode_list_head *ntfs_inode_hash_list(const ntfs_volume *vol,
-		const ino64_t mft_no)
+		const ino_t mft_no)
 {
 	return ntfs_inode_hash_table + ntfs_inode_hash(vol, mft_no);
 }

Modified: soc2012/vbotton/ntfs_apple/ntfs_index.c
==============================================================================
--- soc2012/vbotton/ntfs_apple/ntfs_index.c	Fri Jun  8 15:10:13 2012	(r237312)
+++ soc2012/vbotton/ntfs_apple/ntfs_index.c	Fri Jun  8 15:20:59 2012	(r237313)
@@ -204,7 +204,7 @@
 				le32_to_cpu(actx->m->bytes_in_use);
 		/* Get to the index root value. */
 		ictx->ir = (INDEX_ROOT*)((u8*)actx->a +
-				le16_to_cpu(actx->a->value_offset));
+				le16toh(actx->a->value_offset));
 		delta = (u8*)&ictx->ir->index - (u8*)ictx->index;
 		ictx->index = &ictx->ir->index;
 	} else {
@@ -378,14 +378,14 @@
 	 * reach the last entry and for each entry place a pointer to it into
 	 * our array of entry pointers.
 	 */
-	for (;; ie = (INDEX_ENTRY*)((u8*)ie + le16_to_cpu(ie->length))) {
+	for (;; ie = (INDEX_ENTRY*)((u8*)ie + le16toh(ie->length))) {
 		/* Bounds checks. */
 		if ((u8*)ie < (u8*)index || (u8*)ie +
 				sizeof(INDEX_ENTRY_HEADER) > index_end ||
-				(u8*)ie + le16_to_cpu(ie->length) > index_end ||
+				(u8*)ie + le16toh(ie->length) > index_end ||
 				(u32)sizeof(INDEX_ENTRY_HEADER) +
-				le16_to_cpu(ie->key_length) >
-				le16_to_cpu(ie->length))
+				le16toh(ie->key_length) >
+				le16toh(ie->length))
 			goto err;
 		/* Add this entry to the array of entry pointers. */
 		if (nr_entries >= max_entries)
@@ -396,11 +396,11 @@
 			break;
 		/* Further bounds checks for view indexes. */
 		if (is_view && ((u32)sizeof(INDEX_ENTRY_HEADER) +
-				le16_to_cpu(ie->key_length) >
-				le16_to_cpu(ie->data_offset) ||
-				(u32)le16_to_cpu(ie->data_offset) +
-				le16_to_cpu(ie->data_length) >
-				le16_to_cpu(ie->length)))
+				le16toh(ie->key_length) >
+				le16toh(ie->data_offset) ||
+				(u32)le16toh(ie->data_offset) +
+				le16toh(ie->data_length) >
+				le16toh(ie->length)))
 			goto err;
 	}
 	/* Except for the index root, leaf nodes are not allowed to be empty. */
@@ -536,7 +536,7 @@
 		unsigned max_root_entries;
 
 		max_root_entries = 1 + ((idx_ni->vol->mft_record_size -
-				le16_to_cpu(m->attrs_offset) -
+				le16toh(m->attrs_offset) -
 				offsetof(ATTR_RECORD, reservedR) -
 				sizeof(((ATTR_RECORD*)NULL)->reservedR) -
 				sizeof(INDEX_ROOT) -
@@ -549,7 +549,7 @@
 	 * Get to the index root value (it has been verified when the inode was
 	 * read in ntfs_index_inode_read()).
 	 */
-	ir = (INDEX_ROOT*)((u8*)actx->a + le16_to_cpu(actx->a->value_offset));
+	ir = (INDEX_ROOT*)((u8*)actx->a + le16toh(actx->a->value_offset));
 	ictx->index = &ir->index;
 	/*
 	 * Gather the index entry pointers and finish setting up the index
@@ -626,8 +626,8 @@
 		goto err;
 	}
 	/* Get the starting vcn of the child index block to descend into. */
-	vcn = sle64_to_cpup((sle64*)((u8*)ictx->entry +
-			le16_to_cpu(ictx->entry->length) - sizeof(VCN)));
+	vcn = sle64tohp((sle64*)((u8*)ictx->entry +
+			le16toh(ictx->entry->length) - sizeof(VCN)));
 	if (vcn < 0) {
 		ntfs_error(idx_ni->vol->mp, es, "Negative child node VCN",
 				(unsigned long long)idx_ni->mft_no);
@@ -697,12 +697,12 @@
 				(unsigned long long)idx_ni->mft_no);
 		goto unm_err;
 	}
-	if (sle64_to_cpu(ia->index_block_vcn) != vcn) {
+	if (sle64toh(ia->index_block_vcn) != vcn) {
 		ntfs_error(idx_ni->vol->mp, "Actual VCN (0x%llx) of index "
 				"buffer is different from expected VCN "
 				"(0x%llx).  Inode 0x%llx is corrupt.  Run "
 				"chkdsk.", (unsigned long long)
-				sle64_to_cpu(ia->index_block_vcn),
+				sle64toh(ia->index_block_vcn),
 				(unsigned long long)vcn,
 				(unsigned long long)idx_ni->mft_no);
 		goto unm_err;
@@ -866,7 +866,7 @@
 					NTFSCHAR_SIZE_SHIFT;
 			ie_match_key = &ie->key.filename.filename;
 		} else {
-			ie_match_key_len = le16_to_cpu(ie->key_length);
+			ie_match_key_len = le16toh(ie->key_length);
 			ie_match_key = &ie->key;
 		}
 		/*
@@ -888,7 +888,7 @@
 		 * know which way in the B+tree we have to go.
 		 */
 		rc = ntfs_collate(idx_ni->vol, idx_ni->collation_rule, key,
-				key_len, &ie->key, le16_to_cpu(ie->key_length));
+				key_len, &ie->key, le16toh(ie->key_length));
 		/*
 		 * If @key collates before the key of the current entry, need
 		 * to search on the left.
@@ -1620,7 +1620,7 @@
 	 * consumed the old one.
 	 */
 	m->next_attr_instance = htole16(
-			(le16_to_cpu(m->next_attr_instance) + 1) & 0xffff);
+			(le16toh(m->next_attr_instance) + 1) & 0xffff);
 	a->highest_vcn = cpu_to_sle64((idx_ni->allocated_size >>
 			vol->cluster_size_shift) - 1);
 	a->mapping_pairs_offset = htole16(mp_ofs);
@@ -1916,14 +1916,14 @@
 	ia->usa_ofs = htole16(sizeof(INDEX_BLOCK));
 	ia->usa_count = htole16(1 + (idx_ni->block_size / NTFS_BLOCK_SIZE));
 	/* Set the update sequence number to 1. */
-	*(le16*)((u8*)ia + le16_to_cpu(ia->usa_ofs)) = htole16(1);
+	*(le16*)((u8*)ia + le16toh(ia->usa_ofs)) = htole16(1);
 	ia->index_block_vcn = cpu_to_sle64(vcn);
 	ih = &ia->index;
 	ie_ofs = (sizeof(INDEX_HEADER) +
-			(le16_to_cpu(ia->usa_count) << 1) + 7) & ~7;
-	ih->entries_offset = cpu_to_le32(ie_ofs);
-	ih->index_length = cpu_to_le32(ie_ofs + sizeof(INDEX_ENTRY_HEADER));
-	ih->allocated_size = cpu_to_le32(idx_ni->block_size -
+			(le16toh(ia->usa_count) << 1) + 7) & ~7;
+	ih->entries_offset = htole32(ie_ofs);
+	ih->index_length = htole32(ie_ofs + sizeof(INDEX_ENTRY_HEADER));
+	ih->allocated_size = htole32(idx_ni->block_size -
 			offsetof(INDEX_BLOCK, index));
 	ih->flags = LEAF_NODE;
 	ie = (INDEX_ENTRY*)((u8*)ih + ie_ofs);
@@ -2204,14 +2204,14 @@
 			idx_ni->block_size_shift) & PAGE_MASK));
 	/* Preserve the update sequence number across the layout. */
 	usn = 0;
-	if (le16_to_cpu(ia->usa_ofs) < NTFS_BLOCK_SIZE - sizeof(u16))
-		usn = *(le16*)((u8*)ia + le16_to_cpu(ia->usa_ofs));
+	if (le16toh(ia->usa_ofs) < NTFS_BLOCK_SIZE - sizeof(u16))
+		usn = *(le16*)((u8*)ia + le16toh(ia->usa_ofs));
 	/* Calculate the index block vcn from the index block number. */
 	*dst_vcn = bmp_pos = bmp_pos << idx_ni->block_size_shift >>
 			idx_ni->vcn_size_shift;
 	ntfs_index_block_lay_out(idx_ni, bmp_pos, ia);
 	if (usn && usn != htole16(0xffff))
-		*(le16*)((u8*)ia + le16_to_cpu(ia->usa_ofs)) = usn;
+		*(le16*)((u8*)ia + le16toh(ia->usa_ofs)) = usn;
 	*dst_ia = ia;
 	*dst_upl_ofs = upl_ofs;
 	*dst_upl = upl;
@@ -2403,11 +2403,11 @@
 	ia->usa_ofs = htole16(sizeof(INDEX_BLOCK));
 	ia->usa_count = htole16(1 + (idx_ni->block_size / NTFS_BLOCK_SIZE));
 	/* Set the update sequence number to 1. */
-	*(le16*)((u8*)ia + le16_to_cpu(ia->usa_ofs)) = htole16(1);
+	*(le16*)((u8*)ia + le16toh(ia->usa_ofs)) = htole16(1);
 	ih = &ia->index;
 	ia_ie_ofs = (sizeof(INDEX_HEADER) +
-			(le16_to_cpu(ia->usa_count) << 1) + 7) & ~7;
-	ih->entries_offset = cpu_to_le32(ia_ie_ofs);

*** DIFF OUTPUT TRUNCATED AT 1000 LINES ***


More information about the svn-soc-all mailing list