[XFS] We really suck at spulling. Thanks to Chris Pascoe for fixing all

these typos.

SGI-PV: 904196
SGI-Modid: xfs-linux-melb:xfs-kern:25539a

Signed-off-by: Nathan Scott <nathans@sgi.com>
This commit is contained in:
Nathan Scott 2006-03-29 08:55:14 +10:00
parent ca9ba4471c
commit c41564b5af
42 changed files with 93 additions and 93 deletions

View file

@ -79,7 +79,7 @@ static inline void mrdemote(mrlock_t *mrp)
* Debug-only routine, without some platform-specific asm code, we can * Debug-only routine, without some platform-specific asm code, we can
* now only answer requests regarding whether we hold the lock for write * now only answer requests regarding whether we hold the lock for write
* (reader state is outside our visibility, we only track writer state). * (reader state is outside our visibility, we only track writer state).
* Note: means !ismrlocked would give false positivies, so don't do that. * Note: means !ismrlocked would give false positives, so don't do that.
*/ */
static inline int ismrlocked(mrlock_t *mrp, int type) static inline int ismrlocked(mrlock_t *mrp, int type)
{ {

View file

@ -372,7 +372,7 @@ static inline int bio_add_buffer(struct bio *bio, struct buffer_head *bh)
* assumes that all buffers on the page are started at the same time. * assumes that all buffers on the page are started at the same time.
* *
* The fix is two passes across the ioend list - one to start writeback on the * The fix is two passes across the ioend list - one to start writeback on the
* bufferheads, and then the second one submit them for I/O. * buffer_heads, and then submit them for I/O on the second pass.
*/ */
STATIC void STATIC void
xfs_submit_ioend( xfs_submit_ioend(
@ -699,7 +699,7 @@ xfs_convert_page(
/* /*
* page_dirty is initially a count of buffers on the page before * page_dirty is initially a count of buffers on the page before
* EOF and is decrememted as we move each into a cleanable state. * EOF and is decremented as we move each into a cleanable state.
* *
* Derivation: * Derivation:
* *
@ -842,7 +842,7 @@ xfs_cluster_write(
* page if possible. * page if possible.
* The bh->b_state's cannot know if any of the blocks or which block for * The bh->b_state's cannot know if any of the blocks or which block for
* that matter are dirty due to mmap writes, and therefore bh uptodate is * that matter are dirty due to mmap writes, and therefore bh uptodate is
* only vaild if the page itself isn't completely uptodate. Some layers * only valid if the page itself isn't completely uptodate. Some layers
* may clear the page dirty flag prior to calling write page, under the * may clear the page dirty flag prior to calling write page, under the
* assumption the entire page will be written out; by not writing out the * assumption the entire page will be written out; by not writing out the
* whole page the page can be reused before all valid dirty data is * whole page the page can be reused before all valid dirty data is
@ -892,7 +892,7 @@ xfs_page_state_convert(
/* /*
* page_dirty is initially a count of buffers on the page before * page_dirty is initially a count of buffers on the page before
* EOF and is decrememted as we move each into a cleanable state. * EOF and is decremented as we move each into a cleanable state.
* *
* Derivation: * Derivation:
* *
@ -1339,9 +1339,9 @@ xfs_end_io_direct(
/* /*
* Non-NULL private data means we need to issue a transaction to * Non-NULL private data means we need to issue a transaction to
* convert a range from unwritten to written extents. This needs * convert a range from unwritten to written extents. This needs
* to happen from process contect but aio+dio I/O completion * to happen from process context but aio+dio I/O completion
* happens from irq context so we need to defer it to a workqueue. * happens from irq context so we need to defer it to a workqueue.
* This is not nessecary for synchronous direct I/O, but we do * This is not necessary for synchronous direct I/O, but we do
* it anyway to keep the code uniform and simpler. * it anyway to keep the code uniform and simpler.
* *
* The core direct I/O code might be changed to always call the * The core direct I/O code might be changed to always call the
@ -1358,7 +1358,7 @@ xfs_end_io_direct(
} }
/* /*
* blockdev_direct_IO can return an error even afer the I/O * blockdev_direct_IO can return an error even after the I/O
* completion handler was called. Thus we need to protect * completion handler was called. Thus we need to protect
* against double-freeing. * against double-freeing.
*/ */

View file

@ -54,7 +54,7 @@
* Note, the NFS filehandle also includes an fsid portion which * Note, the NFS filehandle also includes an fsid portion which
* may have an inode number in it. That number is hardcoded to * may have an inode number in it. That number is hardcoded to
* 32bits and there is no way for XFS to intercept it. In * 32bits and there is no way for XFS to intercept it. In
* practice this means when exporting an XFS filesytem with 64bit * practice this means when exporting an XFS filesystem with 64bit
* inodes you should either export the mountpoint (rather than * inodes you should either export the mountpoint (rather than
* a subdirectory) or use the "fsid" export option. * a subdirectory) or use the "fsid" export option.
*/ */

View file

@ -681,7 +681,7 @@ start:
eventsent = 1; eventsent = 1;
/* /*
* The iolock was dropped and reaquired in XFS_SEND_DATA * The iolock was dropped and reacquired in XFS_SEND_DATA
* so we have to recheck the size when appending. * so we have to recheck the size when appending.
* We will only "goto start;" once, since having sent the * We will only "goto start;" once, since having sent the
* event prevents another call to XFS_SEND_DATA, which is * event prevents another call to XFS_SEND_DATA, which is

View file

@ -92,7 +92,7 @@ typedef enum {
#define SYNC_FSDATA 0x0020 /* flush fs data (e.g. superblocks) */ #define SYNC_FSDATA 0x0020 /* flush fs data (e.g. superblocks) */
#define SYNC_REFCACHE 0x0040 /* prune some of the nfs ref cache */ #define SYNC_REFCACHE 0x0040 /* prune some of the nfs ref cache */
#define SYNC_REMOUNT 0x0080 /* remount readonly, no dummy LRs */ #define SYNC_REMOUNT 0x0080 /* remount readonly, no dummy LRs */
#define SYNC_QUIESCE 0x0100 /* quiesce fileystem for a snapshot */ #define SYNC_QUIESCE 0x0100 /* quiesce filesystem for a snapshot */
typedef int (*vfs_mount_t)(bhv_desc_t *, typedef int (*vfs_mount_t)(bhv_desc_t *,
struct xfs_mount_args *, struct cred *); struct xfs_mount_args *, struct cred *);

View file

@ -221,7 +221,7 @@ xfs_qm_dqunpin_wait(
* as possible. * as possible.
* *
* We must not be holding the AIL_LOCK at this point. Calling incore() to * We must not be holding the AIL_LOCK at this point. Calling incore() to
* search the buffercache can be a time consuming thing, and AIL_LOCK is a * search the buffer cache can be a time consuming thing, and AIL_LOCK is a
* spinlock. * spinlock.
*/ */
STATIC void STATIC void

View file

@ -289,7 +289,7 @@ xfs_qm_rele_quotafs_ref(
/* /*
* This is called at mount time from xfs_mountfs to initialize the quotainfo * This is called at mount time from xfs_mountfs to initialize the quotainfo
* structure and start the global quotamanager (xfs_Gqm) if it hasn't done * structure and start the global quota manager (xfs_Gqm) if it hasn't done
* so already. Note that the superblock has not been read in yet. * so already. Note that the superblock has not been read in yet.
*/ */
void void
@ -807,7 +807,7 @@ xfs_qm_dqattach_one(
* Given a udquot and gdquot, attach a ptr to the group dquot in the * Given a udquot and gdquot, attach a ptr to the group dquot in the
* udquot as a hint for future lookups. The idea sounds simple, but the * udquot as a hint for future lookups. The idea sounds simple, but the
* execution isn't, because the udquot might have a group dquot attached * execution isn't, because the udquot might have a group dquot attached
* already and getting rid of that gets us into lock ordering contraints. * already and getting rid of that gets us into lock ordering constraints.
* The process is complicated more by the fact that the dquots may or may not * The process is complicated more by the fact that the dquots may or may not
* be locked on entry. * be locked on entry.
*/ */
@ -1094,10 +1094,10 @@ xfs_qm_sync(
} }
/* /*
* If we can't grab the flush lock then if the caller * If we can't grab the flush lock then if the caller
* really wanted us to give this our best shot, * really wanted us to give this our best shot, so
* see if we can give a push to the buffer before we wait * see if we can give a push to the buffer before we wait
* on the flush lock. At this point, we know that * on the flush lock. At this point, we know that
* eventhough the dquot is being flushed, * even though the dquot is being flushed,
* it has (new) dirty data. * it has (new) dirty data.
*/ */
xfs_qm_dqflock_pushbuf_wait(dqp); xfs_qm_dqflock_pushbuf_wait(dqp);
@ -1491,7 +1491,7 @@ xfs_qm_reset_dqcounts(
/* /*
* Do a sanity check, and if needed, repair the dqblk. Don't * Do a sanity check, and if needed, repair the dqblk. Don't
* output any warnings because it's perfectly possible to * output any warnings because it's perfectly possible to
* find unitialized dquot blks. See comment in xfs_qm_dqcheck. * find uninitialised dquot blks. See comment in xfs_qm_dqcheck.
*/ */
(void) xfs_qm_dqcheck(ddq, id+j, type, XFS_QMOPT_DQREPAIR, (void) xfs_qm_dqcheck(ddq, id+j, type, XFS_QMOPT_DQREPAIR,
"xfs_quotacheck"); "xfs_quotacheck");
@ -1580,7 +1580,7 @@ xfs_qm_dqiterate(
error = 0; error = 0;
/* /*
* This looks racey, but we can't keep an inode lock across a * This looks racy, but we can't keep an inode lock across a
* trans_reserve. But, this gets called during quotacheck, and that * trans_reserve. But, this gets called during quotacheck, and that
* happens only at mount time which is single threaded. * happens only at mount time which is single threaded.
*/ */
@ -1824,7 +1824,7 @@ xfs_qm_dqusage_adjust(
* we have to start from the beginning anyway. * we have to start from the beginning anyway.
* Once we're done, we'll log all the dquot bufs. * Once we're done, we'll log all the dquot bufs.
* *
* The *QUOTA_ON checks below may look pretty racey, but quotachecks * The *QUOTA_ON checks below may look pretty racy, but quotachecks
* and quotaoffs don't race. (Quotachecks happen at mount time only). * and quotaoffs don't race. (Quotachecks happen at mount time only).
*/ */
if (XFS_IS_UQUOTA_ON(mp)) { if (XFS_IS_UQUOTA_ON(mp)) {

View file

@ -912,7 +912,7 @@ xfs_qm_export_dquot(
/* /*
* Internally, we don't reset all the timers when quota enforcement * Internally, we don't reset all the timers when quota enforcement
* gets turned off. No need to confuse the userlevel code, * gets turned off. No need to confuse the user level code,
* so return zeroes in that case. * so return zeroes in that case.
*/ */
if (! XFS_IS_QUOTA_ENFORCED(mp)) { if (! XFS_IS_QUOTA_ENFORCED(mp)) {

View file

@ -804,7 +804,7 @@ xfs_trans_reserve_quota_bydquots(
} }
/* /*
* Didnt change anything critical, so, no need to log * Didn't change anything critical, so, no need to log
*/ */
return (0); return (0);
} }

View file

@ -395,7 +395,7 @@ xfs_acl_allow_set(
* The access control process to determine the access permission: * The access control process to determine the access permission:
* if uid == file owner id, use the file owner bits. * if uid == file owner id, use the file owner bits.
* if gid == file owner group id, use the file group bits. * if gid == file owner group id, use the file group bits.
* scan ACL for a maching user or group, and use matched entry * scan ACL for a matching user or group, and use matched entry
* permission. Use total permissions of all matching group entries, * permission. Use total permissions of all matching group entries,
* until all acl entries are exhausted. The final permission produced * until all acl entries are exhausted. The final permission produced
* by matching acl entry or entries needs to be & with group permission. * by matching acl entry or entries needs to be & with group permission.

View file

@ -179,7 +179,7 @@ typedef struct xfs_perag
{ {
char pagf_init; /* this agf's entry is initialized */ char pagf_init; /* this agf's entry is initialized */
char pagi_init; /* this agi's entry is initialized */ char pagi_init; /* this agi's entry is initialized */
char pagf_metadata; /* the agf is prefered to be metadata */ char pagf_metadata; /* the agf is preferred to be metadata */
char pagi_inodeok; /* The agi is ok for inodes */ char pagi_inodeok; /* The agi is ok for inodes */
__uint8_t pagf_levels[XFS_BTNUM_AGF]; __uint8_t pagf_levels[XFS_BTNUM_AGF];
/* # of levels in bno & cnt btree */ /* # of levels in bno & cnt btree */

View file

@ -511,7 +511,7 @@ STATIC void
xfs_alloc_trace_busy( xfs_alloc_trace_busy(
char *name, /* function tag string */ char *name, /* function tag string */
char *str, /* additional string */ char *str, /* additional string */
xfs_mount_t *mp, /* file system mount poing */ xfs_mount_t *mp, /* file system mount point */
xfs_agnumber_t agno, /* allocation group number */ xfs_agnumber_t agno, /* allocation group number */
xfs_agblock_t agbno, /* a.g. relative block number */ xfs_agblock_t agbno, /* a.g. relative block number */
xfs_extlen_t len, /* length of extent */ xfs_extlen_t len, /* length of extent */
@ -1843,7 +1843,7 @@ xfs_alloc_fix_freelist(
} else } else
agbp = NULL; agbp = NULL;
/* If this is a metadata prefered pag and we are user data /* If this is a metadata preferred pag and we are user data
* then try somewhere else if we are not being asked to * then try somewhere else if we are not being asked to
* try harder at this point * try harder at this point
*/ */
@ -2458,7 +2458,7 @@ error0:
/* /*
* AG Busy list management * AG Busy list management
* The busy list contains block ranges that have been freed but whose * The busy list contains block ranges that have been freed but whose
* transacations have not yet hit disk. If any block listed in a busy * transactions have not yet hit disk. If any block listed in a busy
* list is reused, the transaction that freed it must be forced to disk * list is reused, the transaction that freed it must be forced to disk
* before continuing to use the block. * before continuing to use the block.
* *

View file

@ -68,7 +68,7 @@ typedef struct xfs_alloc_arg {
xfs_alloctype_t otype; /* original allocation type */ xfs_alloctype_t otype; /* original allocation type */
char wasdel; /* set if allocation was prev delayed */ char wasdel; /* set if allocation was prev delayed */
char wasfromfl; /* set if allocation is from freelist */ char wasfromfl; /* set if allocation is from freelist */
char isfl; /* set if is freelist blocks - !actg */ char isfl; /* set if is freelist blocks - !acctg */
char userdata; /* set if this is user data */ char userdata; /* set if this is user data */
} xfs_alloc_arg_t; } xfs_alloc_arg_t;

View file

@ -294,7 +294,7 @@ xfs_attr_set_int(xfs_inode_t *dp, const char *name, int namelen,
xfs_trans_ihold(args.trans, dp); xfs_trans_ihold(args.trans, dp);
/* /*
* If the attribute list is non-existant or a shortform list, * If the attribute list is non-existent or a shortform list,
* upgrade it to a single-leaf-block attribute list. * upgrade it to a single-leaf-block attribute list.
*/ */
if ((dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) || if ((dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) ||
@ -1584,7 +1584,7 @@ out:
* Fill in the disk block numbers in the state structure for the buffers * Fill in the disk block numbers in the state structure for the buffers
* that are attached to the state structure. * that are attached to the state structure.
* This is done so that we can quickly reattach ourselves to those buffers * This is done so that we can quickly reattach ourselves to those buffers
* after some set of transaction commit's has released these buffers. * after some set of transaction commits have released these buffers.
*/ */
STATIC int STATIC int
xfs_attr_fillstate(xfs_da_state_t *state) xfs_attr_fillstate(xfs_da_state_t *state)
@ -1631,7 +1631,7 @@ xfs_attr_fillstate(xfs_da_state_t *state)
/* /*
* Reattach the buffers to the state structure based on the disk block * Reattach the buffers to the state structure based on the disk block
* numbers stored in the state structure. * numbers stored in the state structure.
* This is done after some set of transaction commit's has released those * This is done after some set of transaction commits have released those
* buffers from our grip. * buffers from our grip.
*/ */
STATIC int STATIC int

View file

@ -524,7 +524,7 @@ xfs_attr_shortform_compare(const void *a, const void *b)
/* /*
* Copy out entries of shortform attribute lists for attr_list(). * Copy out entries of shortform attribute lists for attr_list().
* Shortform atrtribute lists are not stored in hashval sorted order. * Shortform attribute lists are not stored in hashval sorted order.
* If the output buffer is not large enough to hold them all, then we * If the output buffer is not large enough to hold them all, then we
* we have to calculate each entries' hashvalue and sort them before * we have to calculate each entries' hashvalue and sort them before
* we can begin returning them to the user. * we can begin returning them to the user.
@ -1541,7 +1541,7 @@ xfs_attr_leaf_toosmall(xfs_da_state_t *state, int *action)
/* /*
* Check for the degenerate case of the block being empty. * Check for the degenerate case of the block being empty.
* If the block is empty, we'll simply delete it, no need to * If the block is empty, we'll simply delete it, no need to
* coalesce it with a sibling block. We choose (aribtrarily) * coalesce it with a sibling block. We choose (arbitrarily)
* to merge with the forward block unless it is NULL. * to merge with the forward block unless it is NULL.
*/ */
if (count == 0) { if (count == 0) {

View file

@ -31,7 +31,7 @@
* The behavior chain is ordered based on the 'position' number which * The behavior chain is ordered based on the 'position' number which
* lives in the first field of the ops vector (higher numbers first). * lives in the first field of the ops vector (higher numbers first).
* *
* Attemps to insert duplicate ops result in an EINVAL return code. * Attempts to insert duplicate ops result in an EINVAL return code.
* Otherwise, return 0 to indicate success. * Otherwise, return 0 to indicate success.
*/ */
int int
@ -84,7 +84,7 @@ bhv_insert(bhv_head_t *bhp, bhv_desc_t *bdp)
/* /*
* Remove a behavior descriptor from a position in a behavior chain; * Remove a behavior descriptor from a position in a behavior chain;
* the postition is guaranteed not to be the first position. * the position is guaranteed not to be the first position.
* Should only be called by the bhv_remove() macro. * Should only be called by the bhv_remove() macro.
*/ */
void void

View file

@ -39,7 +39,7 @@
* behaviors is synchronized with operations-in-progress (oip's) so that * behaviors is synchronized with operations-in-progress (oip's) so that
* the oip's always see a consistent view of the chain. * the oip's always see a consistent view of the chain.
* *
* The term "interpostion" is used to refer to the act of inserting * The term "interposition" is used to refer to the act of inserting
* a behavior such that it interposes on (i.e., is inserted in front * a behavior such that it interposes on (i.e., is inserted in front
* of) a particular other behavior. A key example of this is when a * of) a particular other behavior. A key example of this is when a
* system implementing distributed single system image wishes to * system implementing distributed single system image wishes to
@ -51,7 +51,7 @@
* *
* Behavior synchronization is logic which is necessary under certain * Behavior synchronization is logic which is necessary under certain
* circumstances that there is no conflict between ongoing operations * circumstances that there is no conflict between ongoing operations
* traversing the behavior chain and those dunamically modifying the * traversing the behavior chain and those dynamically modifying the
* behavior chain. Because behavior synchronization adds extra overhead * behavior chain. Because behavior synchronization adds extra overhead
* to virtual operation invocation, we want to restrict, as much as * to virtual operation invocation, we want to restrict, as much as
* we can, the requirement for this extra code, to those situations * we can, the requirement for this extra code, to those situations

View file

@ -98,12 +98,12 @@ xfs_buf_item_flush_log_debug(
} }
/* /*
* This function is called to verify that our caller's have logged * This function is called to verify that our callers have logged
* all the bytes that they changed. * all the bytes that they changed.
* *
* It does this by comparing the original copy of the buffer stored in * It does this by comparing the original copy of the buffer stored in
* the buf log item's bli_orig array to the current copy of the buffer * the buf log item's bli_orig array to the current copy of the buffer
* and ensuring that all bytes which miscompare are set in the bli_logged * and ensuring that all bytes which mismatch are set in the bli_logged
* array of the buf log item. * array of the buf log item.
*/ */
STATIC void STATIC void

View file

@ -38,7 +38,7 @@ typedef struct xfs_cap_set {
/* /*
* For Linux, we take the bitfields directly from capability.h * For Linux, we take the bitfields directly from capability.h
* and no longer attempt to keep this attribute ondisk compatible * and no longer attempt to keep this attribute ondisk compatible
* with IRIX. Since this attribute is only set on exectuables, * with IRIX. Since this attribute is only set on executables,
* it just doesn't make much sense to try. We do use a different * it just doesn't make much sense to try. We do use a different
* named attribute though, to avoid confusion. * named attribute though, to avoid confusion.
*/ */

View file

@ -840,7 +840,7 @@ xfs_da_node_toosmall(xfs_da_state_t *state, int *action)
/* /*
* Check for the degenerate case of the block being empty. * Check for the degenerate case of the block being empty.
* If the block is empty, we'll simply delete it, no need to * If the block is empty, we'll simply delete it, no need to
* coalesce it with a sibling block. We choose (aribtrarily) * coalesce it with a sibling block. We choose (arbitrarily)
* to merge with the forward block unless it is NULL. * to merge with the forward block unless it is NULL.
*/ */
if (count == 0) { if (count == 0) {

View file

@ -533,7 +533,7 @@ xfs_dir2_block_getdents(
/* /*
* Reached the end of the block. * Reached the end of the block.
* Set the offset to a nonexistent block 1 and return. * Set the offset to a non-existent block 1 and return.
*/ */
*eofp = 1; *eofp = 1;

View file

@ -515,7 +515,7 @@ xfs_dir2_leaf_addname(
ASSERT(be32_to_cpu(leaf->ents[highstale].address) == ASSERT(be32_to_cpu(leaf->ents[highstale].address) ==
XFS_DIR2_NULL_DATAPTR); XFS_DIR2_NULL_DATAPTR);
/* /*
* Copy entries down to copver the stale entry * Copy entries down to cover the stale entry
* and make room for the new entry. * and make room for the new entry.
*/ */
if (highstale - index > 0) if (highstale - index > 0)

View file

@ -830,7 +830,7 @@ xfs_dir2_leafn_rebalance(
state->inleaf = 1; state->inleaf = 1;
blk2->index = 0; blk2->index = 0;
cmn_err(CE_ALERT, cmn_err(CE_ALERT,
"xfs_dir2_leafn_rebalance: picked the wrong leaf? reverting orignal leaf: " "xfs_dir2_leafn_rebalance: picked the wrong leaf? reverting original leaf: "
"blk1->index %d\n", "blk1->index %d\n",
blk1->index); blk1->index);
} }

View file

@ -1341,7 +1341,7 @@ xfs_dir_leaf_toosmall(xfs_da_state_t *state, int *action)
/* /*
* Check for the degenerate case of the block being empty. * Check for the degenerate case of the block being empty.
* If the block is empty, we'll simply delete it, no need to * If the block is empty, we'll simply delete it, no need to
* coalesce it with a sibling block. We choose (aribtrarily) * coalesce it with a sibling block. We choose (arbitrarily)
* to merge with the forward block unless it is NULL. * to merge with the forward block unless it is NULL.
*/ */
if (count == 0) { if (count == 0) {

View file

@ -477,7 +477,7 @@ xfs_fs_counts(
* *
* xfs_reserve_blocks is called to set m_resblks * xfs_reserve_blocks is called to set m_resblks
* in the in-core mount table. The number of unused reserved blocks * in the in-core mount table. The number of unused reserved blocks
* is kept in m_resbls_avail. * is kept in m_resblks_avail.
* *
* Reserve the requested number of blocks if available. Otherwise return * Reserve the requested number of blocks if available. Otherwise return
* as many as possible to satisfy the request. The actual number * as many as possible to satisfy the request. The actual number

View file

@ -1023,7 +1023,7 @@ xfs_difree(
rec.ir_freecount++; rec.ir_freecount++;
/* /*
* When an inode cluster is free, it becomes elgible for removal * When an inode cluster is free, it becomes eligible for removal
*/ */
if ((mp->m_flags & XFS_MOUNT_IDELETE) && if ((mp->m_flags & XFS_MOUNT_IDELETE) &&
(rec.ir_freecount == XFS_IALLOC_INODES(mp))) { (rec.ir_freecount == XFS_IALLOC_INODES(mp))) {

View file

@ -509,7 +509,7 @@ retry:
} else { } else {
/* /*
* If the inode is not fully constructed due to * If the inode is not fully constructed due to
* filehandle mistmatches wait for the inode to go * filehandle mismatches wait for the inode to go
* away and try again. * away and try again.
* *
* iget_locked will call __wait_on_freeing_inode * iget_locked will call __wait_on_freeing_inode

View file

@ -160,7 +160,7 @@ xfs_inotobp(
xfs_dinode_t *dip; xfs_dinode_t *dip;
/* /*
* Call the space managment code to find the location of the * Call the space management code to find the location of the
* inode on disk. * inode on disk.
*/ */
imap.im_blkno = 0; imap.im_blkno = 0;
@ -837,7 +837,7 @@ xfs_dic2xflags(
/* /*
* Given a mount structure and an inode number, return a pointer * Given a mount structure and an inode number, return a pointer
* to a newly allocated in-core inode coresponding to the given * to a newly allocated in-core inode corresponding to the given
* inode number. * inode number.
* *
* Initialize the inode's attributes and extent pointers if it * Initialize the inode's attributes and extent pointers if it
@ -2723,7 +2723,7 @@ xfs_ipin(
/* /*
* Decrement the pin count of the given inode, and wake up * Decrement the pin count of the given inode, and wake up
* anyone in xfs_iwait_unpin() if the count goes to 0. The * anyone in xfs_iwait_unpin() if the count goes to 0. The
* inode must have been previoulsy pinned with a call to xfs_ipin(). * inode must have been previously pinned with a call to xfs_ipin().
*/ */
void void
xfs_iunpin( xfs_iunpin(
@ -3690,7 +3690,7 @@ void
xfs_iext_add( xfs_iext_add(
xfs_ifork_t *ifp, /* inode fork pointer */ xfs_ifork_t *ifp, /* inode fork pointer */
xfs_extnum_t idx, /* index to begin adding exts */ xfs_extnum_t idx, /* index to begin adding exts */
int ext_diff) /* nubmer of extents to add */ int ext_diff) /* number of extents to add */
{ {
int byte_diff; /* new bytes being added */ int byte_diff; /* new bytes being added */
int new_size; /* size of extents after adding */ int new_size; /* size of extents after adding */
@ -4038,7 +4038,7 @@ xfs_iext_remove_indirect(
xfs_extnum_t ext_diff; /* extents to remove in current list */ xfs_extnum_t ext_diff; /* extents to remove in current list */
xfs_extnum_t nex1; /* number of extents before idx */ xfs_extnum_t nex1; /* number of extents before idx */
xfs_extnum_t nex2; /* extents after idx + count */ xfs_extnum_t nex2; /* extents after idx + count */
int nlists; /* entries in indirecton array */ int nlists; /* entries in indirection array */
int page_idx = idx; /* index in target extent list */ int page_idx = idx; /* index in target extent list */
ASSERT(ifp->if_flags & XFS_IFEXTIREC); ASSERT(ifp->if_flags & XFS_IFEXTIREC);
@ -4291,9 +4291,9 @@ xfs_iext_bno_to_ext(
xfs_filblks_t blockcount = 0; /* number of blocks in extent */ xfs_filblks_t blockcount = 0; /* number of blocks in extent */
xfs_bmbt_rec_t *ep = NULL; /* pointer to target extent */ xfs_bmbt_rec_t *ep = NULL; /* pointer to target extent */
xfs_ext_irec_t *erp = NULL; /* indirection array pointer */ xfs_ext_irec_t *erp = NULL; /* indirection array pointer */
int high; /* upper boundry in search */ int high; /* upper boundary in search */
xfs_extnum_t idx = 0; /* index of target extent */ xfs_extnum_t idx = 0; /* index of target extent */
int low; /* lower boundry in search */ int low; /* lower boundary in search */
xfs_extnum_t nextents; /* number of file extents */ xfs_extnum_t nextents; /* number of file extents */
xfs_fileoff_t startoff = 0; /* start offset of extent */ xfs_fileoff_t startoff = 0; /* start offset of extent */

View file

@ -580,7 +580,7 @@ xfs_inode_item_unpin_remove(
* been or is in the process of being flushed, then (ideally) we'd like to * been or is in the process of being flushed, then (ideally) we'd like to
* see if the inode's buffer is still incore, and if so give it a nudge. * see if the inode's buffer is still incore, and if so give it a nudge.
* We delay doing so until the pushbuf routine, though, to avoid holding * We delay doing so until the pushbuf routine, though, to avoid holding
* the AIL lock across a call to the blackhole which is the buffercache. * the AIL lock across a call to the blackhole which is the buffer cache.
* Also we don't want to sleep in any device strategy routines, which can happen * Also we don't want to sleep in any device strategy routines, which can happen
* if we do the subsequent bawrite in here. * if we do the subsequent bawrite in here.
*/ */

View file

@ -272,7 +272,7 @@ xfs_bulkstat(
size_t statstruct_size, /* sizeof struct filling */ size_t statstruct_size, /* sizeof struct filling */
char __user *ubuffer, /* buffer with inode stats */ char __user *ubuffer, /* buffer with inode stats */
int flags, /* defined in xfs_itable.h */ int flags, /* defined in xfs_itable.h */
int *done) /* 1 if there're more stats to get */ int *done) /* 1 if there are more stats to get */
{ {
xfs_agblock_t agbno=0;/* allocation group block number */ xfs_agblock_t agbno=0;/* allocation group block number */
xfs_buf_t *agbp; /* agi header buffer */ xfs_buf_t *agbp; /* agi header buffer */
@ -676,7 +676,7 @@ xfs_bulkstat_single(
xfs_mount_t *mp, /* mount point for filesystem */ xfs_mount_t *mp, /* mount point for filesystem */
xfs_ino_t *lastinop, /* inode to return */ xfs_ino_t *lastinop, /* inode to return */
char __user *buffer, /* buffer with inode stats */ char __user *buffer, /* buffer with inode stats */
int *done) /* 1 if there're more stats to get */ int *done) /* 1 if there are more stats to get */
{ {
int count; /* count value for bulkstat call */ int count; /* count value for bulkstat call */
int error; /* return value */ int error; /* return value */

View file

@ -60,7 +60,7 @@ xfs_bulkstat(
size_t statstruct_size,/* sizeof struct that we're filling */ size_t statstruct_size,/* sizeof struct that we're filling */
char __user *ubuffer,/* buffer with inode stats */ char __user *ubuffer,/* buffer with inode stats */
int flags, /* flag to control access method */ int flags, /* flag to control access method */
int *done); /* 1 if there're more stats to get */ int *done); /* 1 if there are more stats to get */
int int
xfs_bulkstat_single( xfs_bulkstat_single(

View file

@ -59,7 +59,7 @@ STATIC xlog_t * xlog_alloc_log(xfs_mount_t *mp,
int num_bblks); int num_bblks);
STATIC int xlog_space_left(xlog_t *log, int cycle, int bytes); STATIC int xlog_space_left(xlog_t *log, int cycle, int bytes);
STATIC int xlog_sync(xlog_t *log, xlog_in_core_t *iclog); STATIC int xlog_sync(xlog_t *log, xlog_in_core_t *iclog);
STATIC void xlog_unalloc_log(xlog_t *log); STATIC void xlog_dealloc_log(xlog_t *log);
STATIC int xlog_write(xfs_mount_t *mp, xfs_log_iovec_t region[], STATIC int xlog_write(xfs_mount_t *mp, xfs_log_iovec_t region[],
int nentries, xfs_log_ticket_t tic, int nentries, xfs_log_ticket_t tic,
xfs_lsn_t *start_lsn, xfs_lsn_t *start_lsn,
@ -304,7 +304,7 @@ xfs_log_done(xfs_mount_t *mp,
if ((ticket->t_flags & XLOG_TIC_PERM_RESERV) == 0 || if ((ticket->t_flags & XLOG_TIC_PERM_RESERV) == 0 ||
(flags & XFS_LOG_REL_PERM_RESERV)) { (flags & XFS_LOG_REL_PERM_RESERV)) {
/* /*
* Release ticket if not permanent reservation or a specifc * Release ticket if not permanent reservation or a specific
* request has been made to release a permanent reservation. * request has been made to release a permanent reservation.
*/ */
xlog_trace_loggrant(log, ticket, "xfs_log_done: (non-permanent)"); xlog_trace_loggrant(log, ticket, "xfs_log_done: (non-permanent)");
@ -511,7 +511,7 @@ xfs_log_mount(xfs_mount_t *mp,
vfsp->vfs_flag |= VFS_RDONLY; vfsp->vfs_flag |= VFS_RDONLY;
if (error) { if (error) {
cmn_err(CE_WARN, "XFS: log mount/recovery failed: error %d", error); cmn_err(CE_WARN, "XFS: log mount/recovery failed: error %d", error);
xlog_unalloc_log(mp->m_log); xlog_dealloc_log(mp->m_log);
return error; return error;
} }
} }
@ -667,7 +667,7 @@ xfs_log_unmount_write(xfs_mount_t *mp)
* *
* Go through the motions of sync'ing and releasing * Go through the motions of sync'ing and releasing
* the iclog, even though no I/O will actually happen, * the iclog, even though no I/O will actually happen,
* we need to wait for other log I/O's that may already * we need to wait for other log I/Os that may already
* be in progress. Do this as a separate section of * be in progress. Do this as a separate section of
* code so we'll know if we ever get stuck here that * code so we'll know if we ever get stuck here that
* we're in this odd situation of trying to unmount * we're in this odd situation of trying to unmount
@ -704,7 +704,7 @@ xfs_log_unmount_write(xfs_mount_t *mp)
void void
xfs_log_unmount_dealloc(xfs_mount_t *mp) xfs_log_unmount_dealloc(xfs_mount_t *mp)
{ {
xlog_unalloc_log(mp->m_log); xlog_dealloc_log(mp->m_log);
} }
/* /*
@ -1492,7 +1492,7 @@ xlog_sync(xlog_t *log,
ASSERT(XFS_BUF_ADDR(bp) <= log->l_logBBsize-1); ASSERT(XFS_BUF_ADDR(bp) <= log->l_logBBsize-1);
ASSERT(XFS_BUF_ADDR(bp) + BTOBB(count) <= log->l_logBBsize); ASSERT(XFS_BUF_ADDR(bp) + BTOBB(count) <= log->l_logBBsize);
/* account for internal log which does't start at block #0 */ /* account for internal log which doesn't start at block #0 */
XFS_BUF_SET_ADDR(bp, XFS_BUF_ADDR(bp) + log->l_logBBstart); XFS_BUF_SET_ADDR(bp, XFS_BUF_ADDR(bp) + log->l_logBBstart);
XFS_BUF_WRITE(bp); XFS_BUF_WRITE(bp);
if ((error = XFS_bwrite(bp))) { if ((error = XFS_bwrite(bp))) {
@ -1506,10 +1506,10 @@ xlog_sync(xlog_t *log,
/* /*
* Unallocate a log structure * Deallocate a log structure
*/ */
void void
xlog_unalloc_log(xlog_t *log) xlog_dealloc_log(xlog_t *log)
{ {
xlog_in_core_t *iclog, *next_iclog; xlog_in_core_t *iclog, *next_iclog;
xlog_ticket_t *tic, *next_tic; xlog_ticket_t *tic, *next_tic;
@ -1539,7 +1539,7 @@ xlog_unalloc_log(xlog_t *log)
if ((log->l_ticket_cnt != log->l_ticket_tcnt) && if ((log->l_ticket_cnt != log->l_ticket_tcnt) &&
!XLOG_FORCED_SHUTDOWN(log)) { !XLOG_FORCED_SHUTDOWN(log)) {
xfs_fs_cmn_err(CE_WARN, log->l_mp, xfs_fs_cmn_err(CE_WARN, log->l_mp,
"xlog_unalloc_log: (cnt: %d, total: %d)", "xlog_dealloc_log: (cnt: %d, total: %d)",
log->l_ticket_cnt, log->l_ticket_tcnt); log->l_ticket_cnt, log->l_ticket_tcnt);
/* ASSERT(log->l_ticket_cnt == log->l_ticket_tcnt); */ /* ASSERT(log->l_ticket_cnt == log->l_ticket_tcnt); */
@ -1562,7 +1562,7 @@ xlog_unalloc_log(xlog_t *log)
#endif #endif
log->l_mp->m_log = NULL; log->l_mp->m_log = NULL;
kmem_free(log, sizeof(xlog_t)); kmem_free(log, sizeof(xlog_t));
} /* xlog_unalloc_log */ } /* xlog_dealloc_log */
/* /*
* Update counters atomically now that memcpy is done. * Update counters atomically now that memcpy is done.
@ -2829,7 +2829,7 @@ xlog_state_release_iclog(xlog_t *log,
/* /*
* We let the log lock go, so it's possible that we hit a log I/O * We let the log lock go, so it's possible that we hit a log I/O
* error or someother SHUTDOWN condition that marks the iclog * error or some other SHUTDOWN condition that marks the iclog
* as XLOG_STATE_IOERROR before the bwrite. However, we know that * as XLOG_STATE_IOERROR before the bwrite. However, we know that
* this iclog has consistent data, so we ignore IOERROR * this iclog has consistent data, so we ignore IOERROR
* flags after this point. * flags after this point.

View file

@ -27,7 +27,7 @@
#ifdef __KERNEL__ #ifdef __KERNEL__
/* /*
* By comparing each compnent, we don't have to worry about extra * By comparing each component, we don't have to worry about extra
* endian issues in treating two 32 bit numbers as one 64 bit number * endian issues in treating two 32 bit numbers as one 64 bit number
*/ */
static inline xfs_lsn_t _lsn_cmp(xfs_lsn_t lsn1, xfs_lsn_t lsn2) static inline xfs_lsn_t _lsn_cmp(xfs_lsn_t lsn1, xfs_lsn_t lsn2)

View file

@ -583,7 +583,7 @@ xlog_find_head(
* x | x ... | x - 1 | x * x | x ... | x - 1 | x
* Another case that fits this picture would be * Another case that fits this picture would be
* x | x + 1 | x ... | x * x | x + 1 | x ... | x
* In this case the head really is somwhere at the end of the * In this case the head really is somewhere at the end of the
* log, as one of the latest writes at the beginning was * log, as one of the latest writes at the beginning was
* incomplete. * incomplete.
* One more case is * One more case is
@ -2799,7 +2799,7 @@ xlog_recover_do_trans(
* we don't need to worry about the block number being * we don't need to worry about the block number being
* truncated in > 1 TB buffers because in user-land, * truncated in > 1 TB buffers because in user-land,
* we're now n32 or 64-bit so xfs_daddr_t is 64-bits so * we're now n32 or 64-bit so xfs_daddr_t is 64-bits so
* the blkno's will get through the user-mode buffer * the blknos will get through the user-mode buffer
* cache properly. The only bad case is o32 kernels * cache properly. The only bad case is o32 kernels
* where xfs_daddr_t is 32-bits but mount will warn us * where xfs_daddr_t is 32-bits but mount will warn us
* off a > 1 TB filesystem before we get here. * off a > 1 TB filesystem before we get here.

View file

@ -393,7 +393,7 @@ xfs_initialize_perag(
break; break;
} }
/* This ag is prefered for inodes */ /* This ag is preferred for inodes */
pag = &mp->m_perag[index]; pag = &mp->m_perag[index];
pag->pagi_inodeok = 1; pag->pagi_inodeok = 1;
if (index < max_metadata) if (index < max_metadata)
@ -1728,7 +1728,7 @@ xfs_mount_log_sbunit(
* We cannot use the hotcpu_register() function because it does * We cannot use the hotcpu_register() function because it does
* not allow notifier instances. We need a notifier per filesystem * not allow notifier instances. We need a notifier per filesystem
* as we need to be able to identify the filesystem to balance * as we need to be able to identify the filesystem to balance
* the counters out. This is acheived by having a notifier block * the counters out. This is achieved by having a notifier block
* embedded in the xfs_mount_t and doing pointer magic to get the * embedded in the xfs_mount_t and doing pointer magic to get the
* mount pointer from the notifier block address. * mount pointer from the notifier block address.
*/ */

View file

@ -379,7 +379,7 @@ typedef struct xfs_mount {
#endif #endif
int m_dalign; /* stripe unit */ int m_dalign; /* stripe unit */
int m_swidth; /* stripe width */ int m_swidth; /* stripe width */
int m_sinoalign; /* stripe unit inode alignmnt */ int m_sinoalign; /* stripe unit inode alignment */
int m_attr_magicpct;/* 37% of the blocksize */ int m_attr_magicpct;/* 37% of the blocksize */
int m_dir_magicpct; /* 37% of the dir blocksize */ int m_dir_magicpct; /* 37% of the dir blocksize */
__uint8_t m_mk_sharedro; /* mark shared ro on unmount */ __uint8_t m_mk_sharedro; /* mark shared ro on unmount */

View file

@ -31,7 +31,7 @@
typedef __uint32_t xfs_dqid_t; typedef __uint32_t xfs_dqid_t;
/* /*
* Eventhough users may not have quota limits occupying all 64-bits, * Even though users may not have quota limits occupying all 64-bits,
* they may need 64-bit accounting. Hence, 64-bit quota-counters, * they may need 64-bit accounting. Hence, 64-bit quota-counters,
* and quota-limits. This is a waste in the common case, but hey ... * and quota-limits. This is a waste in the common case, but hey ...
*/ */
@ -246,7 +246,7 @@ typedef struct xfs_qoff_logformat {
#ifdef __KERNEL__ #ifdef __KERNEL__
/* /*
* This check is done typically without holding the inode lock; * This check is done typically without holding the inode lock;
* that may seem racey, but it is harmless in the context that it is used. * that may seem racy, but it is harmless in the context that it is used.
* The inode cannot go inactive as long a reference is kept, and * The inode cannot go inactive as long a reference is kept, and
* therefore if dquot(s) were attached, they'll stay consistent. * therefore if dquot(s) were attached, they'll stay consistent.
* If, for example, the ownership of the inode changes while * If, for example, the ownership of the inode changes while

View file

@ -490,7 +490,7 @@ xfs_trans_mod_sb(
case XFS_TRANS_SB_RES_FREXTENTS: case XFS_TRANS_SB_RES_FREXTENTS:
/* /*
* The allocation has already been applied to the * The allocation has already been applied to the
* in-core superblocks's counter. This should only * in-core superblock's counter. This should only
* be applied to the on-disk superblock. * be applied to the on-disk superblock.
*/ */
ASSERT(delta < 0); ASSERT(delta < 0);
@ -611,7 +611,7 @@ xfs_trans_apply_sb_deltas(
if (whole) if (whole)
/* /*
* Log the whole thing, the fields are discontiguous. * Log the whole thing, the fields are noncontiguous.
*/ */
xfs_trans_log_buf(tp, bp, 0, sizeof(xfs_sb_t) - 1); xfs_trans_log_buf(tp, bp, 0, sizeof(xfs_sb_t) - 1);
else else
@ -669,7 +669,7 @@ xfs_trans_unreserve_and_mod_sb(
/* /*
* Apply any superblock modifications to the in-core version. * Apply any superblock modifications to the in-core version.
* The t_res_fdblocks_delta and t_res_frextents_delta fields are * The t_res_fdblocks_delta and t_res_frextents_delta fields are
* explicity NOT applied to the in-core superblock. * explicitly NOT applied to the in-core superblock.
* The idea is that that has already been done. * The idea is that that has already been done.
*/ */
if (tp->t_flags & XFS_TRANS_SB_DIRTY) { if (tp->t_flags & XFS_TRANS_SB_DIRTY) {

View file

@ -354,7 +354,7 @@ typedef struct xfs_trans {
xfs_lsn_t t_commit_lsn; /* log seq num of end of xfs_lsn_t t_commit_lsn; /* log seq num of end of
* transaction. */ * transaction. */
struct xfs_mount *t_mountp; /* ptr to fs mount struct */ struct xfs_mount *t_mountp; /* ptr to fs mount struct */
struct xfs_dquot_acct *t_dqinfo; /* accting info for dquots */ struct xfs_dquot_acct *t_dqinfo; /* acctg info for dquots */
xfs_trans_callback_t t_callback; /* transaction callback */ xfs_trans_callback_t t_callback; /* transaction callback */
void *t_callarg; /* callback arg */ void *t_callarg; /* callback arg */
unsigned int t_flags; /* misc flags */ unsigned int t_flags; /* misc flags */

View file

@ -272,7 +272,7 @@ xfs_trans_log_inode(
* This is to coordinate with the xfs_iflush() and xfs_iflush_done() * This is to coordinate with the xfs_iflush() and xfs_iflush_done()
* routines in the eventual clearing of the ilf_fields bits. * routines in the eventual clearing of the ilf_fields bits.
* See the big comment in xfs_iflush() for an explanation of * See the big comment in xfs_iflush() for an explanation of
* this coorination mechanism. * this coordination mechanism.
*/ */
flags |= ip->i_itemp->ili_last_fields; flags |= ip->i_itemp->ili_last_fields;
ip->i_itemp->ili_format.ilf_fields |= flags; ip->i_itemp->ili_format.ilf_fields |= flags;

View file

@ -880,10 +880,10 @@ xfs_statvfs(
* determine if they should be flushed sync, async, or * determine if they should be flushed sync, async, or
* delwri. * delwri.
* SYNC_CLOSE - This flag is passed when the system is being * SYNC_CLOSE - This flag is passed when the system is being
* unmounted. We should sync and invalidate everthing. * unmounted. We should sync and invalidate everything.
* SYNC_FSDATA - This indicates that the caller would like to make * SYNC_FSDATA - This indicates that the caller would like to make
* sure the superblock is safe on disk. We can ensure * sure the superblock is safe on disk. We can ensure
* this by simply makeing sure the log gets flushed * this by simply making sure the log gets flushed
* if SYNC_BDFLUSH is set, and by actually writing it * if SYNC_BDFLUSH is set, and by actually writing it
* out otherwise. * out otherwise.
* *
@ -908,7 +908,7 @@ xfs_sync(
* *
* This routine supports all of the flags defined for the generic VFS_SYNC * This routine supports all of the flags defined for the generic VFS_SYNC
* interface as explained above under xfs_sync. In the interests of not * interface as explained above under xfs_sync. In the interests of not
* changing interfaces within the 6.5 family, additional internallly- * changing interfaces within the 6.5 family, additional internally-
* required functions are specified within a separate xflags parameter, * required functions are specified within a separate xflags parameter,
* only available by calling this routine. * only available by calling this routine.
* *
@ -1090,7 +1090,7 @@ xfs_sync_inodes(
* If this is just vfs_sync() or pflushd() calling * If this is just vfs_sync() or pflushd() calling
* then we can skip inodes for which it looks like * then we can skip inodes for which it looks like
* there is nothing to do. Since we don't have the * there is nothing to do. Since we don't have the
* inode locked this is racey, but these are periodic * inode locked this is racy, but these are periodic
* calls so it doesn't matter. For the others we want * calls so it doesn't matter. For the others we want
* to know for sure, so we at least try to lock them. * to know for sure, so we at least try to lock them.
*/ */
@ -1429,7 +1429,7 @@ xfs_sync_inodes(
* *
* This routine supports all of the flags defined for the generic VFS_SYNC * This routine supports all of the flags defined for the generic VFS_SYNC
* interface as explained above under xfs_sync. In the interests of not * interface as explained above under xfs_sync. In the interests of not
* changing interfaces within the 6.5 family, additional internallly- * changing interfaces within the 6.5 family, additional internally-
* required functions are specified within a separate xflags parameter, * required functions are specified within a separate xflags parameter,
* only available by calling this routine. * only available by calling this routine.
* *

View file

@ -848,7 +848,7 @@ xfs_setattr(
* If this is a synchronous mount, make sure that the * If this is a synchronous mount, make sure that the
* transaction goes to disk before returning to the user. * transaction goes to disk before returning to the user.
* This is slightly sub-optimal in that truncates require * This is slightly sub-optimal in that truncates require
* two sync transactions instead of one for wsync filesytems. * two sync transactions instead of one for wsync filesystems.
* One for the truncate and one for the timestamps since we * One for the truncate and one for the timestamps since we
* don't want to change the timestamps unless we're sure the * don't want to change the timestamps unless we're sure the
* truncate worked. Truncates are less than 1% of the laddis * truncate worked. Truncates are less than 1% of the laddis
@ -1170,7 +1170,7 @@ xfs_fsync(
/* /*
* If this inode is on the RT dev we need to flush that * If this inode is on the RT dev we need to flush that
* cache aswell. * cache as well.
*/ */
if (ip->i_d.di_flags & XFS_DIFLAG_REALTIME) if (ip->i_d.di_flags & XFS_DIFLAG_REALTIME)
xfs_blkdev_issue_flush(ip->i_mount->m_rtdev_targp); xfs_blkdev_issue_flush(ip->i_mount->m_rtdev_targp);
@ -1380,7 +1380,7 @@ xfs_inactive_symlink_rmt(
*/ */
ntp = xfs_trans_dup(tp); ntp = xfs_trans_dup(tp);
/* /*
* Commit the transaction containing extent freeing and EFD's. * Commit the transaction containing extent freeing and EFDs.
* If we get an error on the commit here or on the reserve below, * If we get an error on the commit here or on the reserve below,
* we need to unlock the inode since the new transaction doesn't * we need to unlock the inode since the new transaction doesn't
* have the inode attached. * have the inode attached.
@ -2023,7 +2023,7 @@ xfs_create(
XFS_QM_DQRELE(mp, gdqp); XFS_QM_DQRELE(mp, gdqp);
/* /*
* Propogate the fact that the vnode changed after the * Propagate the fact that the vnode changed after the
* xfs_inode locks have been released. * xfs_inode locks have been released.
*/ */
VOP_VNODE_CHANGE(vp, VCHANGE_FLAGS_TRUNCATED, 3); VOP_VNODE_CHANGE(vp, VCHANGE_FLAGS_TRUNCATED, 3);
@ -2370,7 +2370,7 @@ xfs_remove(
* for a log reservation. Since we'll have to wait for the * for a log reservation. Since we'll have to wait for the
* inactive code to complete before returning from xfs_iget, * inactive code to complete before returning from xfs_iget,
* we need to make sure that we don't have log space reserved * we need to make sure that we don't have log space reserved
* when we call xfs_iget. Instead we get an unlocked referece * when we call xfs_iget. Instead we get an unlocked reference
* to the inode before getting our log reservation. * to the inode before getting our log reservation.
*/ */
error = xfs_get_dir_entry(dentry, &ip); error = xfs_get_dir_entry(dentry, &ip);
@ -3020,7 +3020,7 @@ xfs_rmdir(
* for a log reservation. Since we'll have to wait for the * for a log reservation. Since we'll have to wait for the
* inactive code to complete before returning from xfs_iget, * inactive code to complete before returning from xfs_iget,
* we need to make sure that we don't have log space reserved * we need to make sure that we don't have log space reserved
* when we call xfs_iget. Instead we get an unlocked referece * when we call xfs_iget. Instead we get an unlocked reference
* to the inode before getting our log reservation. * to the inode before getting our log reservation.
*/ */
error = xfs_get_dir_entry(dentry, &cdp); error = xfs_get_dir_entry(dentry, &cdp);