@@ -548,6 +548,30 @@ out:
return ret;
}
+static int spufs_gang_close(struct inode *inode, struct file *file)
+{
+ struct dentry *dir;
+ struct inode *parent;
+
+ dir = file->f_path.dentry;
+ parent = dir->d_parent->d_inode;
+
+ mutex_lock_nested(&parent->i_mutex, I_MUTEX_PARENT);
+ d_drop(dir);
+ mutex_unlock(&parent->i_mutex);
+
+ return dcache_dir_close(inode, file);
+}
+
+const struct file_operations spufs_gang_fops = {
+ .open = dcache_dir_open,
+ .release = spufs_gang_close,
+ .llseek = dcache_dir_lseek,
+ .read = generic_read_dir,
+ .readdir = dcache_readdir,
+ .fsync = simple_sync_file,
+};
+
static int spufs_gang_open(struct dentry *dentry, struct vfsmount *mnt)
{
int ret;
@@ -567,7 +591,7 @@ static int spufs_gang_open(struct dentry *dentry, struct vfsmount *mnt)
goto out;
}
- filp->f_op = &simple_dir_operations;
+ filp->f_op = &spufs_gang_fops;
fd_install(ret, filp);
out:
return ret;
At present, there seems to be a race with SPU gang destruction and calling dcache_readdir on the top level directory of a spufs mount. dcache_readdir expects the parent directory's inode to be locked for any operations that remove subdirs, but this isn't true for the spufs gang destruction path: sys_close -> gang->file_operations->dcache_dir_close() -> dput() This dput() will cause the gang's dentry to be unhashed without the parent's dcache mutex held. This breaks dcache_readdir's assumption about locked dentries. This change uses a spufs-specific file_operations->release callback for gangs, which does an explicit d_drop() with the parent's mutex held. However, we do end up changing the dentry lifetime semantics for SPU gangs. Currently, the dentry will still be present for a gang that has been close()ed, but still has active contexts. With this change, the gang may be unhashed (and so not appear in readdir("/spu")), but still have active contexts. However, I believe that this lifetime behaviour is more consistent with non-gang SPU contexts at the moment - contexts are unhashed at close() time. Signed-off-by: Jeremy Kerr <jk@ozlabs.org>