#ifndef _FS_CEPH_SUPER_H #define _FS_CEPH_SUPER_H #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef CONFIG_CEPH_FSCACHE #include #endif /* f_type in struct statfs */ #define CEPH_SUPER_MAGIC 0x00c36400 /* large granularity for statfs utilization stats to facilitate * large volume sizes on 32-bit machines. */ #define CEPH_BLOCK_SHIFT 22 /* 4 MB */ #define CEPH_BLOCK (1 << CEPH_BLOCK_SHIFT) #define CEPH_MOUNT_OPT_DIRSTAT (1<<4) /* `cat dirname` for stats */ #define CEPH_MOUNT_OPT_RBYTES (1<<5) /* dir st_bytes = rbytes */ #define CEPH_MOUNT_OPT_NOASYNCREADDIR (1<<7) /* no dcache readdir */ #define CEPH_MOUNT_OPT_INO32 (1<<8) /* 32 bit inos */ #define CEPH_MOUNT_OPT_DCACHE (1<<9) /* use dcache for readdir etc */ #define CEPH_MOUNT_OPT_FSCACHE (1<<10) /* use fscache */ #define CEPH_MOUNT_OPT_NOPOOLPERM (1<<11) /* no pool permission check */ #define CEPH_MOUNT_OPT_DEFAULT (CEPH_MOUNT_OPT_RBYTES | \ CEPH_MOUNT_OPT_DCACHE) #define ceph_set_mount_opt(fsc, opt) \ (fsc)->mount_options->flags |= CEPH_MOUNT_OPT_##opt; #define ceph_test_mount_opt(fsc, opt) \ (!!((fsc)->mount_options->flags & CEPH_MOUNT_OPT_##opt)) #define CEPH_RSIZE_DEFAULT 0 /* max read size */ #define CEPH_RASIZE_DEFAULT (8192*1024) /* readahead */ #define CEPH_MAX_READDIR_DEFAULT 1024 #define CEPH_MAX_READDIR_BYTES_DEFAULT (512*1024) #define CEPH_SNAPDIRNAME_DEFAULT ".snap" struct ceph_mount_options { int flags; int sb_flags; int wsize; /* max write size */ int rsize; /* max read size */ int rasize; /* max readahead */ int congestion_kb; /* max writeback in flight */ int caps_wanted_delay_min, caps_wanted_delay_max; int cap_release_safety; int max_readdir; /* max readdir result (entires) */ int max_readdir_bytes; /* max readdir result (bytes) */ /* * everything above this point can be memcmp'd; everything below * is handled in compare_mount_options() */ char *snapdir_name; /* default ".snap" */ }; struct ceph_fs_client { struct super_block *sb; struct ceph_mount_options *mount_options; struct ceph_client *client; unsigned long mount_state; int min_caps; /* min caps i added */ struct ceph_mds_client *mdsc; /* writeback */ mempool_t *wb_pagevec_pool; struct workqueue_struct *wb_wq; struct workqueue_struct *pg_inv_wq; struct workqueue_struct *trunc_wq; atomic_long_t writeback_count; struct backing_dev_info backing_dev_info; #ifdef CONFIG_DEBUG_FS struct dentry *debugfs_dentry_lru, *debugfs_caps; struct dentry *debugfs_congestion_kb; struct dentry *debugfs_bdi; struct dentry *debugfs_mdsc, *debugfs_mdsmap; struct dentry *debugfs_mds_sessions; #endif #ifdef CONFIG_CEPH_FSCACHE struct fscache_cookie *fscache; struct workqueue_struct *revalidate_wq; #endif }; /* * File i/o capability. This tracks shared state with the metadata * server that allows us to cache or writeback attributes or to read * and write data. For any given inode, we should have one or more * capabilities, one issued by each metadata server, and our * cumulative access is the OR of all issued capabilities. * * Each cap is referenced by the inode's i_caps rbtree and by per-mds * session capability lists. */ struct ceph_cap { struct ceph_inode_info *ci; struct rb_node ci_node; /* per-ci cap tree */ struct ceph_mds_session *session; struct list_head session_caps; /* per-session caplist */ u64 cap_id; /* unique cap id (mds provided) */ union { /* in-use caps */ struct { int issued; /* latest, from the mds */ int implemented; /* implemented superset of issued (for revocation) */ int mds, mds_wanted; }; /* caps to release */ struct { u64 cap_ino; int queue_release; }; }; u32 seq, issue_seq, mseq; u32 cap_gen; /* active/stale cycle */ unsigned long last_used; struct list_head caps_item; }; #define CHECK_CAPS_NODELAY 1 /* do not delay any further */ #define CHECK_CAPS_AUTHONLY 2 /* only check auth cap */ #define CHECK_CAPS_FLUSH 4 /* flush any dirty caps */ /* * Snapped cap state that is pending flush to mds. When a snapshot occurs, * we first complete any in-process sync writes and writeback any dirty * data before flushing the snapped state (tracked here) back to the MDS. */ struct ceph_cap_snap { atomic_t nref; struct ceph_inode_info *ci; struct list_head ci_item, flushing_item; u64 follows, flush_tid; int issued, dirty; struct ceph_snap_context *context; umode_t mode; kuid_t uid; kgid_t gid; struct ceph_buffer *xattr_blob; u64 xattr_version; u64 size; struct timespec mtime, atime, ctime; u64 time_warp_seq; int writing; /* a sync write is still in progress */ int dirty_pages; /* dirty pages awaiting writeback */ bool inline_data; bool need_flush; }; static inline void ceph_put_cap_snap(struct ceph_cap_snap *capsnap) { if (atomic_dec_and_test(&capsnap->nref)) { if (capsnap->xattr_blob) ceph_buffer_put(capsnap->xattr_blob); kfree(capsnap); } } struct ceph_cap_flush { u64 tid; int caps; struct rb_node g_node; // global union { struct rb_node i_node; // inode struct list_head list; }; }; /* * The frag tree describes how a directory is fragmented, potentially across * multiple metadata servers. It is also used to indicate points where * metadata authority is delegated, and whether/where metadata is replicated. * * A _leaf_ frag will be present in the i_fragtree IFF there is * delegation info. That is, if mds >= 0 || ndist > 0. */ #define CEPH_MAX_DIRFRAG_REP 4 struct ceph_inode_frag { struct rb_node node; /* fragtree state */ u32 frag; int split_by; /* i.e. 2^(split_by) children */ /* delegation and replication info */ int mds; /* -1 if same authority as parent */ int ndist; /* >0 if replicated */ int dist[CEPH_MAX_DIRFRAG_REP]; }; /* * We cache inode xattrs as an encoded blob until they are first used, * at which point we parse them into an rbtree. */ struct ceph_inode_xattr { struct rb_node node; const char *name; int name_len; const char *val; int val_len; int dirty; int should_free_name; int should_free_val; }; /* * Ceph dentry state */ struct ceph_dentry_info { struct ceph_mds_session *lease_session; u32 lease_gen, lease_shared_gen; u32 lease_seq; unsigned long lease_renew_after, lease_renew_from; struct list_head lru; struct dentry *dentry; u64 time; u64 offset; }; struct ceph_inode_xattrs_info { /* * (still encoded) xattr blob. we avoid the overhead of parsing * this until someone actually calls getxattr, etc. * * blob->vec.iov_len == 4 implies there are no xattrs; blob == * NULL means we don't know. */ struct ceph_buffer *blob, *prealloc_blob; struct rb_root index; bool dirty; int count; int names_size; int vals_size; u64 version, index_version; }; /* * Ceph inode. */ struct ceph_inode_info { struct ceph_vino i_vino; /* ceph ino + snap */ spinlock_t i_ceph_lock; u64 i_version; u64 i_inline_version; u32 i_time_warp_seq; unsigned i_ceph_flags; atomic64_t i_release_count; atomic64_t i_ordered_count; atomic64_t i_complete_seq[2]; struct ceph_dir_layout i_dir_layout; struct ceph_file_layout i_layout; char *i_symlink; /* for dirs */ struct timespec i_rctime; u64 i_rbytes, i_rfiles, i_rsubdirs; u64 i_files, i_subdirs; struct rb_root i_fragtree; struct mutex i_fragtree_mutex; struct ceph_inode_xattrs_info i_xattrs; /* capabilities. protected _both_ by i_ceph_lock and cap->session's * s_mutex. */ struct rb_root i_caps; /* cap list */ struct ceph_cap *i_auth_cap; /* authoritative cap, if any */ unsigned i_dirty_caps, i_flushing_caps; /* mask of dirtied fields */ struct list_head i_dirty_item, i_flushing_item; /* we need to track cap writeback on a per-cap-bit basis, to allow * overlapping, pipelined cap flushes to the mds. we can probably * reduce the tid to 8 bits if we're concerned about inode size. */ struct ceph_cap_flush *i_prealloc_cap_flush; struct rb_root i_cap_flush_tree; wait_queue_head_t i_cap_wq; /* threads waiting on a capability */ unsigned long i_hold_caps_min; /* jiffies */ unsigned long i_hold_caps_max; /* jiffies */ struct list_head i_cap_delay_list; /* for delayed cap release to mds */ struct ceph_cap_reservation i_cap_migration_resv; struct list_head i_cap_snaps; /* snapped state pending flush to mds */ struct ceph_snap_context *i_head_snapc; /* set if wr_buffer_head > 0 or dirty|flushing caps */ unsigned i_snap_caps; /* cap bits for snapped files */ int i_nr_by_mode[CEPH_FILE_MODE_NUM]; /* open file counts */ struct mutex i_truncate_mutex; u32 i_truncate_seq; /* last truncate to smaller size */ u64 i_truncate_size; /* and the size we last truncated down to */ int i_truncate_pending; /* still need to call vmtruncate */ u64 i_max_size; /* max file size authorized by mds */ u64 i_reported_size; /* (max_)size reported to or requested of mds */ u64 i_wanted_max_size; /* offset we'd like to write too */ u64 i_requested_max_size; /* max_size we've requested */ /* held references to caps */ int i_pin_ref; int i_rd_ref, i_rdcache_ref, i_wr_ref, i_wb_ref; int i_wrbuffer_ref, i_wrbuffer_ref_head; u32 i_shared_gen; /* increment each time we get FILE_SHARED */ u32 i_rdcache_gen; /* incremented each time we get FILE_CACHE. */ u32 i_rdcache_revoking; /* RDCACHE gen to async invalidate, if any */ struct list_head i_unsafe_writes; /* uncommitted sync writes */ struct list_head i_unsafe_dirops; /* uncommitted mds dir ops */ struct list_head i_unsafe_iops; /* uncommitted mds inode ops */ spinlock_t i_unsafe_lock; struct ceph_snap_realm *i_snap_realm; /* snap realm (if caps) */ int i_snap_realm_counter; /* snap realm (if caps) */ struct list_head i_snap_realm_item; struct list_head i_snap_flush_item; struct work_struct i_wb_work; /* writeback work */ struct work_struct i_pg_inv_work; /* page invalidation work */ struct work_struct i_vmtruncate_work; #ifdef CONFIG_CEPH_FSCACHE struct fscache_cookie *fscache; u32 i_fscache_gen; /* sequence, for delayed fscache validate */ struct work_struct i_revalidate_work; #endif struct inode vfs_inode; /* at end */ }; static inline struct ceph_inode_info *ceph_inode(struct inode *inode) { return container_of(inode, struct ceph_inode_info, vfs_inode); } static inline struct ceph_fs_client *ceph_inode_to_client(struct inode *inode) { return (struct ceph_fs_client *)inode->i_sb->s_fs_info; } static inline struct ceph_fs_client *ceph_sb_to_client(struct super_block *sb) { return (struct ceph_fs_client *)sb->s_fs_info; } static inline struct ceph_vino ceph_vino(struct inode *inode) { return ceph_inode(inode)->i_vino; } /* * ino_t is <64 bits on many architectures, blech. * * i_ino (kernel inode) st_ino (userspace) * i386 32 32 * x86_64+ino32 64 32 * x86_64 64 64 */ static inline u32 ceph_ino_to_ino32(__u64 vino) { u32 ino = vino & 0xffffffff; ino ^= vino >> 32; if (!ino) ino = 2; return ino; } /* * kernel i_ino value */ static inline ino_t ceph_vino_to_ino(struct ceph_vino vino) { #if BITS_PER_LONG == 32 return ceph_ino_to_ino32(vino.ino); #else return (ino_t)vino.ino; #endif } /* * user-visible ino (stat, filldir) */ #if BITS_PER_LONG == 32 static inline ino_t ceph_translate_ino(struct super_block *sb, ino_t ino) { return ino; } #else static inline ino_t ceph_translate_ino(struct super_block *sb, ino_t ino) { if (ceph_test_mount_opt(ceph_sb_to_client(sb), INO32)) ino = ceph_ino_to_ino32(ino); return ino; } #endif /* for printf-style formatting */ #define ceph_vinop(i) ceph_inode(i)->i_vino.ino, ceph_inode(i)->i_vino.snap static inline u64 ceph_ino(struct inode *inode) { return ceph_inode(inode)->i_vino.ino; } static inline u64 ceph_snap(struct inode *inode) { return ceph_inode(inode)->i_vino.snap; } static inline int ceph_ino_compare(struct inode *inode, void *data) { struct ceph_vino *pvino = (struct ceph_vino *)data; struct ceph_inode_info *ci = ceph_inode(inode); return ci->i_vino.ino == pvino->ino && ci->i_vino.snap == pvino->snap; } static inline struct inode *ceph_find_inode(struct super_block *sb, struct ceph_vino vino) { ino_t t = ceph_vino_to_ino(vino); return ilookup5(sb, t, ceph_ino_compare, &vino); } /* * Ceph inode. */ #define CEPH_I_DIR_ORDERED (1 << 0) /* dentries in dir are ordered */ #define CEPH_I_NODELAY (1 << 1) /* do not delay cap release */ #define CEPH_I_FLUSH (1 << 2) /* do not delay flush of dirty metadata */ #define CEPH_I_NOFLUSH (1 << 3) /* do not flus
/*
 * Device Tree Source for UniPhier PH1-Pro5 SoC
 *
 * Copyright (C) 2015 Masahiro Yamada <yamada.masahiro@socionext.com>
 *
 * This file is dual-licensed: you can use it either under the terms
 * of the GPL or the X11 license, at your option. Note that this dual
 * licensing only applies to this file, and not this project as a
 * whole.
 *
 *  a) This file is free software; you can redistribute it and/or
 *     modify it under the terms of the GNU General Public License as
 *     published by the Free Software Foundation; either version 2 of the
 *     License, or (at your option) any later version.
 *
 *     This file is distributed in the hope that it will be useful,
 *     but WITHOUT ANY WARRANTY; without even the implied warranty of
 *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 *     GNU General Public License for more details.
 *
 * Or, alternatively,
 *
 *  b) Permission is hereby granted, free of charge, to any person
 *     obtaining a copy of this software and associated documentation
 *     files (the "Software"), to deal in the Software without
 *     restriction, including without limitation the rights to use,
 *     copy, modify, merge, publish, distribute, sublicense, and/or
 *     sell copies of the Software, and to permit persons to whom the
 *     Software is furnished to do so, subject to the following
 *     conditions:
 *
 *     The above copyright notice and this permission notice shall be
 *     included in all copies or substantial portions of the Software.
 *
 *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
 *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
 *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
 *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 *     OTHER DEALINGS IN THE SOFTWARE.
 */

/include/ "skeleton.dtsi"

/ {
	compatible = "socionext,ph1-pro5";

	cpus {
		#address-cells = <1>;
		#size-cells = <0>;
		enable-method = "socionext,uniphier-smp";

		cpu@0 {
			device_type = "cpu";
			compatible = "arm,cortex-a9";
			reg = <0>;
			next-level-cache = <&l2>;
		};

		cpu@1 {
			device_type = "cpu";
			compatible = "arm,cortex-a9";
			reg = <1>;
			next-level-cache = <&l2>;
		};
	};

	clocks {
		arm_timer_clk: arm_timer_clk {
			#clock-cells = <0>;
			compatible = "fixed-clock";
			clock-frequency = <50000000>;
		};

		uart_clk: uart_clk {
			#clock-cells = <0>;
			compatible = "fixed-clock";
			clock-frequency = <73728000>;
		};

		i2c_clk: i2c_clk {
			#clock-cells = <0>;
			compatible = "fixed-clock";
			clock-frequency = <50000000>;
		};
	};

	soc {
		compatible = "simple-bus";
		#address-cells = <1>;
		#size-cells = <1>;
		ranges;
		interrupt-parent = <&intc>;

		extbus: extbus {
			compatible = "simple-bus";
			#address-cells = <2>;
			#size-cells = <1>;
		};

		l2: l2-cache@500c0000 {
			compatible = "socionext,uniphier-system-cache";
			reg = <0x500c0000 0x2000>, <0x503c0100 0x8>,
			      <0x506c0000 0x400>;
			interrupts = <0 190 4>, <0 191 4>;
			cache-unified;
			cache-size = <(2 * 1024 * 1024)>;
			cache-sets = <512>;
			cache-line-size = <128>;
			cache-level = <2>;
			next-level-cache = <&l3>;
		};

		l3: l3-cache@500c8000 {
			compatible = "socionext,uniphier-system-cache";
			reg = <0x500c8000 0x2000>, <0x503c8100 0x8>,
			      <0x506c8000 0x400>;
			interrupts = <0 174 4>, <0 175 4>;
			cache-unified;
			cache-size = <(2 * 1024 * 1024)>;
			cache-sets = <512>;
			cache-line-size = <256>;
			cache-level = <3>;
		};

		serial0: serial@54006800 {
			compatible = "socionext,uniphier-uart";
			status = "disabled";
			reg = <0x54006800 0x40>;
			pinctrl-names = "default";
			pinctrl-0 = <&pinctrl_uart0>;
			interrupts = <0 33 4>;
			clocks = <&uart_clk>;
		};

		serial1: serial@54006900 {
			compatible = "socionext,uniphier-uart";
			status = "disabled";
			reg = <0x54006900 0x40>;
			pinctrl-names = "default";
			pinctrl-0 = <&pinctrl_uart1>;
			interrupts = <0 35 4>;
			clocks = <&uart_clk>;
		};

		serial2: serial@54006a00 {
			compatible = "socionext,uniphier-uart";
			status = "disabled";
			reg = <0x54006a00 0x40>;
			pinctrl-names = "default";
			pinctrl-0 = <&pinctrl_uart2>;
			interrupts = <0 37 4>;
			clocks = <&uart_clk>;
		};

		serial3: serial@54006b00 {
			compatible = "socionext,uniphier-uart";
			status = "disabled";
			reg = <0x54006b00 0x40>;
			pinctrl-names = "default";
			pinctrl-0 = <&pinctrl_uart3>;
			interrupts = <0 177 4>;
			clocks = <&uart_clk>;
		};

		i2c0: i2c@58780000 {
			compatible = "socionext,uniphier-fi2c";
			status = "disabled";
			reg = <0x58780000 0x80>;
			#address-cells = <1>;
			#size-cells = <0>;
			pinctrl-names = "default";
			pinctrl-0 = <&pinctrl_i2c0>;
			interrupts = <0 41 4>;
			clocks = <&i2c_clk>;
			clock-frequency = <100000>;
		};

		i2c1: i2c@58781000 {
			compatible = "socionext,uniphier-fi2c";
			status = "disabled";
			reg = <0x58781000 0x80>;
			#address-cells = <1>;
			#size-cells = <0>;
			pinctrl-names = "default";
			pinctrl-0 = <&pinctrl_i2c1>;
			interrupts = <0 42 4>;
			clocks = <&i2c_clk>;
			clock-frequency = <100000>;
		};

		i2c2: i2c@58782000 {
			compatible = "socionext,uniphier-fi2c";
			status = "disabled";
			reg = <0x58782000 0x80>;
			#address-cells = <1>;
			#size-cells = <0>;
			pinctrl-names = "default";
			pinctrl-0 = <&pinctrl_i2c2>;
			interrupts = <0 43 4>;
			clocks = <&i2c_clk>;
			clock-frequency = <100000>;
		};

		i2c3: i2c@58783000 {
			compatible = "socionext,uniphier-fi2c";
			status = "disabled";
			reg = <0x58783000 0x80>;
			#address-cells = <1>;
			#size-cells = <0>;
			pinctrl-names = "default";
			pinctrl-0 = <&pinctrl_i2c3>;
			interrupts = <0 44 4>;
			clocks = <&i2c_clk>;
			clock-frequency = <100000>;
		};

		/* i2c4 does not exist */

		/* chip-internal connection for DMD */
		i2c5: i2c@58785000 {
			compatible = "socionext,uniphier-fi2c";
			reg = <0x58785000 0x80>;
			#address-cells = <1>;
			#size-cells = <0>;
			interrupts = <0 25 4>;
			clocks = <&i2c_clk>;
			clock-frequency = <400000>;
		};

		/* chip-internal connection for HDMI */
		i2c6: i2c@58786000 {
			compatible = "socionext,uniphier-fi2c";
			reg = <0x58786000 0x80>;
			#address-cells = <1>;
			#size-cells = <0>;
			interrupts = <0 26 4>;
			clocks = <&i2c_clk>;
			clock-frequency = <400000>;
		};

		system-bus-controller@58c00000 {
			compatible = "socionext,uniphier-system-bus-controller";
			reg = <0x58c00000 0x400>, <0x59800000 0x2000>;
		};

		pinctrl: pinctrl@5f801000 {
			compatible = "socionext,ph1-pro5-pinctrl", "syscon";
			reg = <0x5f801000 0xe00>;
		};

		timer@60000200 {
			compatible = "arm,cortex-a9-global-timer";
			reg = <0x60000200 0x20>;
			interrupts = <1 11 0x304>;
			clocks = <&arm_timer_clk>;
		};

		timer@60000600 {
			compatible = "arm,cortex-a9-twd-timer";
			reg = <0x60000600 0x20>;
			interrupts = <1 13 0x304>;
			clocks = <&arm_timer_clk>;
		};

		intc: interrupt-controller@60001000 {
			compatible = "arm,cortex-a9-gic";
			#interrupt-cells = <3>;
			interrupt-controller;
			reg = <0x60001000 0x1000>,
			      <0x60000100 0x100>;
		};
	};
};

/include/ "uniphier-pinctrl.dtsi"
ct vm_area_struct *vma); extern int ceph_uninline_data(struct file *filp, struct page *locked_page); extern int ceph_pool_perm_check(struct ceph_inode_info *ci, int need); extern void ceph_pool_perm_destroy(struct ceph_mds_client* mdsc); /* file.c */ extern const struct file_operations ceph_file_fops; extern int ceph_open(struct inode *inode, struct file *file); extern int ceph_atomic_open(struct inode *dir, struct dentry *dentry, struct file *file, unsigned flags, umode_t mode, int *opened); extern int ceph_release(struct inode *inode, struct file *filp); extern void ceph_fill_inline_data(struct inode *inode, struct page *locked_page, char *data, size_t len); /* dir.c */ extern const struct file_operations ceph_dir_fops; extern const struct file_operations ceph_snapdir_fops; extern const struct inode_operations ceph_dir_iops; extern const struct inode_operations ceph_snapdir_iops; extern const struct dentry_operations ceph_dentry_ops, ceph_snap_dentry_ops, ceph_snapdir_dentry_ops; extern int ceph_handle_notrace_create(struct inode *dir, struct dentry *dentry); extern int ceph_handle_snapdir(struct ceph_mds_request *req, struct dentry *dentry, int err); extern struct dentry *ceph_finish_lookup(struct ceph_mds_request *req, struct dentry *dentry, int err); extern void ceph_dentry_lru_add(struct dentry *dn); extern void ceph_dentry_lru_touch(struct dentry *dn); extern void ceph_dentry_lru_del(struct dentry *dn); extern void ceph_invalidate_dentry_lease(struct dentry *dentry); extern unsigned ceph_dentry_hash(struct inode *dir, struct dentry *dn); extern struct inode *ceph_get_dentry_parent_inode(struct dentry *dentry); extern void ceph_readdir_cache_release(struct ceph_readdir_cache_control *ctl); /* * our d_ops vary depending on whether the inode is live, * snapshotted (read-only), or a virtual ".snap" directory. */ int ceph_init_dentry(struct dentry *dentry); /* ioctl.c */ extern long ceph_ioctl(struct file *file, unsigned int cmd, unsigned long arg); /* export.c */ extern const struct export_operations ceph_export_ops; /* locks.c */ extern __init void ceph_flock_init(void); extern int ceph_lock(struct file *file, int cmd, struct file_lock *fl); extern int ceph_flock(struct file *file, int cmd, struct file_lock *fl); extern void ceph_count_locks(struct inode *inode, int *p_num, int *f_num); extern int ceph_encode_locks_to_buffer(struct inode *inode, struct ceph_filelock *flocks, int num_fcntl_locks, int num_flock_locks); extern int ceph_locks_to_pagelist(struct ceph_filelock *flocks, struct ceph_pagelist *pagelist, int num_fcntl_locks, int num_flock_locks); extern int lock_to_ceph_filelock(struct file_lock *fl, struct ceph_filelock *c); /* debugfs.c */ extern int ceph_fs_debugfs_init(struct ceph_fs_client *client); extern void ceph_fs_debugfs_cleanup(struct ceph_fs_client *client); #endif /* _FS_CEPH_SUPER_H */