4 * Copyright (C) International Business Machines Corp., 2002,2008
5 * Author(s): Steve French (sfrench@us.ibm.com)
7 * Common Internet FileSystem (CIFS) client
9 * This library is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU Lesser General Public License as published
11 * by the Free Software Foundation; either version 2.1 of the License, or
12 * (at your option) any later version.
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
17 * the GNU Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public License
20 * along with this library; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 /* Note that BB means BUGBUG (ie something to fix eventually) */
26 #include <linux/module.h>
28 #include <linux/mount.h>
29 #include <linux/slab.h>
30 #include <linux/init.h>
31 #include <linux/list.h>
32 #include <linux/seq_file.h>
33 #include <linux/vfs.h>
34 #include <linux/mempool.h>
35 #include <linux/delay.h>
36 #include <linux/kthread.h>
37 #include <linux/freezer.h>
40 #define DECLARE_GLOBALS_HERE
42 #include "cifsproto.h"
43 #include "cifs_debug.h"
44 #include "cifs_fs_sb.h"
46 #include <linux/key-type.h>
47 #include "dns_resolve.h"
48 #include "cifs_spnego.h"
49 #define CIFS_MAGIC_NUMBER 0xFF534D42 /* the first four bytes of SMB PDUs */
51 #ifdef CONFIG_CIFS_QUOTA
52 static struct quotactl_ops cifs_quotactl_ops;
58 unsigned int oplockEnabled = 1;
59 unsigned int experimEnabled = 0;
60 unsigned int linuxExtEnabled = 1;
61 unsigned int lookupCacheEnabled = 1;
62 unsigned int multiuser_mount = 0;
63 unsigned int extended_security = CIFSSEC_DEF;
64 /* unsigned int ntlmv2_support = 0; */
65 unsigned int sign_CIFS_PDUs = 1;
66 extern struct task_struct *oplockThread; /* remove sparse warning */
67 struct task_struct *oplockThread = NULL;
68 /* extern struct task_struct * dnotifyThread; remove sparse warning */
69 static struct task_struct *dnotifyThread = NULL;
70 static const struct super_operations cifs_super_ops;
71 unsigned int CIFSMaxBufSize = CIFS_MAX_MSGSIZE;
72 module_param(CIFSMaxBufSize, int, 0);
73 MODULE_PARM_DESC(CIFSMaxBufSize, "Network buffer size (not including header). "
74 "Default: 16384 Range: 8192 to 130048");
75 unsigned int cifs_min_rcv = CIFS_MIN_RCV_POOL;
76 module_param(cifs_min_rcv, int, 0);
77 MODULE_PARM_DESC(cifs_min_rcv, "Network buffers in pool. Default: 4 Range: "
79 unsigned int cifs_min_small = 30;
80 module_param(cifs_min_small, int, 0);
81 MODULE_PARM_DESC(cifs_min_small, "Small network buffers in pool. Default: 30 "
83 unsigned int cifs_max_pending = CIFS_MAX_REQ;
84 module_param(cifs_max_pending, int, 0);
85 MODULE_PARM_DESC(cifs_max_pending, "Simultaneous requests to server. "
86 "Default: 50 Range: 2 to 256");
88 extern mempool_t *cifs_sm_req_poolp;
89 extern mempool_t *cifs_req_poolp;
90 extern mempool_t *cifs_mid_poolp;
92 extern struct kmem_cache *cifs_oplock_cachep;
95 cifs_read_super(struct super_block *sb, void *data,
96 const char *devname, int silent)
99 struct cifs_sb_info *cifs_sb;
102 /* BB should we make this contingent on mount parm? */
103 sb->s_flags |= MS_NODIRATIME | MS_NOATIME;
104 sb->s_fs_info = kzalloc(sizeof(struct cifs_sb_info), GFP_KERNEL);
105 cifs_sb = CIFS_SB(sb);
109 #ifdef CONFIG_CIFS_DFS_UPCALL
110 /* copy mount params to sb for use in submounts */
111 /* BB: should we move this after the mount so we
112 * do not have to do the copy on failed mounts?
113 * BB: May be it is better to do simple copy before
114 * complex operation (mount), and in case of fail
115 * just exit instead of doing mount and attempting
116 * undo it if this copy fails?*/
118 int len = strlen(data);
119 cifs_sb->mountdata = kzalloc(len + 1, GFP_KERNEL);
120 if (cifs_sb->mountdata == NULL) {
121 kfree(sb->s_fs_info);
122 sb->s_fs_info = NULL;
125 strncpy(cifs_sb->mountdata, data, len + 1);
126 cifs_sb->mountdata[len] = '\0';
130 rc = cifs_mount(sb, cifs_sb, data, devname);
135 ("cifs_mount failed w/return code = %d", rc));
136 goto out_mount_failed;
139 sb->s_magic = CIFS_MAGIC_NUMBER;
140 sb->s_op = &cifs_super_ops;
141 /* if (cifs_sb->tcon->ses->server->maxBuf > MAX_CIFS_HDR_SIZE + 512)
143 cifs_sb->tcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE; */
144 #ifdef CONFIG_CIFS_QUOTA
145 sb->s_qcop = &cifs_quotactl_ops;
147 sb->s_blocksize = CIFS_MAX_MSGSIZE;
148 sb->s_blocksize_bits = 14; /* default 2**14 = CIFS_MAX_MSGSIZE */
149 inode = cifs_iget(sb, ROOT_I);
157 sb->s_root = d_alloc_root(inode);
164 #ifdef CONFIG_CIFS_EXPERIMENTAL
165 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
166 cFYI(1, ("export ops supported"));
167 sb->s_export_op = &cifs_export_ops;
169 #endif /* EXPERIMENTAL */
174 cERROR(1, ("cifs_read_super: get root inode failed"));
178 cifs_umount(sb, cifs_sb);
182 #ifdef CONFIG_CIFS_DFS_UPCALL
183 if (cifs_sb->mountdata) {
184 kfree(cifs_sb->mountdata);
185 cifs_sb->mountdata = NULL;
188 if (cifs_sb->local_nls)
189 unload_nls(cifs_sb->local_nls);
196 cifs_put_super(struct super_block *sb)
199 struct cifs_sb_info *cifs_sb;
201 cFYI(1, ("In cifs_put_super"));
202 cifs_sb = CIFS_SB(sb);
203 if (cifs_sb == NULL) {
204 cFYI(1, ("Empty cifs superblock info passed to unmount"));
207 rc = cifs_umount(sb, cifs_sb);
209 cERROR(1, ("cifs_umount failed with return code %d", rc));
210 #ifdef CONFIG_CIFS_DFS_UPCALL
211 if (cifs_sb->mountdata) {
212 kfree(cifs_sb->mountdata);
213 cifs_sb->mountdata = NULL;
217 unload_nls(cifs_sb->local_nls);
223 cifs_statfs(struct dentry *dentry, struct kstatfs *buf)
225 struct super_block *sb = dentry->d_sb;
226 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
227 struct cifsTconInfo *tcon = cifs_sb->tcon;
228 int rc = -EOPNOTSUPP;
233 buf->f_type = CIFS_MAGIC_NUMBER;
236 * PATH_MAX may be too long - it would presumably be total path,
237 * but note that some servers (includinng Samba 3) have a shorter
240 * Instead could get the real value via SMB_QUERY_FS_ATTRIBUTE_INFO.
242 buf->f_namelen = PATH_MAX;
243 buf->f_files = 0; /* undefined */
244 buf->f_ffree = 0; /* unlimited */
247 * We could add a second check for a QFS Unix capability bit
249 if ((tcon->ses->capabilities & CAP_UNIX) &&
250 (CIFS_POSIX_EXTENSIONS & le64_to_cpu(tcon->fsUnixInfo.Capability)))
251 rc = CIFSSMBQFSPosixInfo(xid, tcon, buf);
254 * Only need to call the old QFSInfo if failed on newer one,
257 if (rc && (tcon->ses->capabilities & CAP_NT_SMBS))
258 rc = CIFSSMBQFSInfo(xid, tcon, buf);
261 * Some old Windows servers also do not support level 103, retry with
262 * older level one if old server failed the previous call or we
263 * bypassed it because we detected that this was an older LANMAN sess
266 rc = SMBOldQFSInfo(xid, tcon, buf);
272 static int cifs_permission(struct inode *inode, int mask)
274 struct cifs_sb_info *cifs_sb;
276 cifs_sb = CIFS_SB(inode->i_sb);
278 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM)
280 else /* file mode might have been restricted at mount time
281 on the client (above and beyond ACL on servers) for
282 servers which do not support setting and viewing mode bits,
283 so allowing client to check permissions is useful */
284 return generic_permission(inode, mask, NULL);
287 static struct kmem_cache *cifs_inode_cachep;
288 static struct kmem_cache *cifs_req_cachep;
289 static struct kmem_cache *cifs_mid_cachep;
290 struct kmem_cache *cifs_oplock_cachep;
291 static struct kmem_cache *cifs_sm_req_cachep;
292 mempool_t *cifs_sm_req_poolp;
293 mempool_t *cifs_req_poolp;
294 mempool_t *cifs_mid_poolp;
296 static struct inode *
297 cifs_alloc_inode(struct super_block *sb)
299 struct cifsInodeInfo *cifs_inode;
300 cifs_inode = kmem_cache_alloc(cifs_inode_cachep, GFP_KERNEL);
303 cifs_inode->cifsAttrs = 0x20; /* default */
304 atomic_set(&cifs_inode->inUse, 0);
305 cifs_inode->time = 0;
306 cifs_inode->write_behind_rc = 0;
307 /* Until the file is open and we have gotten oplock
308 info back from the server, can not assume caching of
309 file data or metadata */
310 cifs_inode->clientCanCacheRead = false;
311 cifs_inode->clientCanCacheAll = false;
312 cifs_inode->delete_pending = false;
313 cifs_inode->vfs_inode.i_blkbits = 14; /* 2**14 = CIFS_MAX_MSGSIZE */
315 /* Can not set i_flags here - they get immediately overwritten
316 to zero by the VFS */
317 /* cifs_inode->vfs_inode.i_flags = S_NOATIME | S_NOCMTIME;*/
318 INIT_LIST_HEAD(&cifs_inode->openFileList);
319 return &cifs_inode->vfs_inode;
323 cifs_destroy_inode(struct inode *inode)
325 kmem_cache_free(cifs_inode_cachep, CIFS_I(inode));
329 * cifs_show_options() is for displaying mount options in /proc/mounts.
330 * Not all settable options are displayed but most of the important
334 cifs_show_options(struct seq_file *s, struct vfsmount *m)
336 struct cifs_sb_info *cifs_sb;
338 cifs_sb = CIFS_SB(m->mnt_sb);
342 /* BB add prepath to mount options displayed */
343 seq_printf(s, ",unc=%s", cifs_sb->tcon->treeName);
344 if (cifs_sb->tcon->ses) {
345 if (cifs_sb->tcon->ses->userName)
346 seq_printf(s, ",username=%s",
347 cifs_sb->tcon->ses->userName);
348 if (cifs_sb->tcon->ses->domainName)
349 seq_printf(s, ",domain=%s",
350 cifs_sb->tcon->ses->domainName);
352 if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID) ||
353 !(cifs_sb->tcon->unix_ext))
354 seq_printf(s, ",uid=%d", cifs_sb->mnt_uid);
355 if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID) ||
356 !(cifs_sb->tcon->unix_ext))
357 seq_printf(s, ",gid=%d", cifs_sb->mnt_gid);
358 if (!cifs_sb->tcon->unix_ext) {
359 seq_printf(s, ",file_mode=0%o,dir_mode=0%o",
360 cifs_sb->mnt_file_mode,
361 cifs_sb->mnt_dir_mode);
363 if (cifs_sb->tcon->seal)
364 seq_printf(s, ",seal");
365 if (cifs_sb->tcon->nocase)
366 seq_printf(s, ",nocase");
367 if (cifs_sb->tcon->retry)
368 seq_printf(s, ",hard");
370 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)
371 seq_printf(s, ",posixpaths");
372 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID)
373 seq_printf(s, ",setuids");
374 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)
375 seq_printf(s, ",serverino");
376 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO)
377 seq_printf(s, ",directio");
378 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
379 seq_printf(s, ",nouser_xattr");
380 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR)
381 seq_printf(s, ",mapchars");
382 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL)
383 seq_printf(s, ",sfu");
384 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
385 seq_printf(s, ",nobrl");
386 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL)
387 seq_printf(s, ",cifsacl");
388 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM)
389 seq_printf(s, ",dynperm");
390 if (m->mnt_sb->s_flags & MS_POSIXACL)
391 seq_printf(s, ",acl");
393 seq_printf(s, ",rsize=%d", cifs_sb->rsize);
394 seq_printf(s, ",wsize=%d", cifs_sb->wsize);
399 #ifdef CONFIG_CIFS_QUOTA
400 int cifs_xquota_set(struct super_block *sb, int quota_type, qid_t qid,
401 struct fs_disk_quota *pdquota)
405 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
406 struct cifsTconInfo *pTcon;
409 pTcon = cifs_sb->tcon;
416 cFYI(1, ("set type: 0x%x id: %d", quota_type, qid));
425 int cifs_xquota_get(struct super_block *sb, int quota_type, qid_t qid,
426 struct fs_disk_quota *pdquota)
430 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
431 struct cifsTconInfo *pTcon;
434 pTcon = cifs_sb->tcon;
440 cFYI(1, ("set type: 0x%x id: %d", quota_type, qid));
449 int cifs_xstate_set(struct super_block *sb, unsigned int flags, int operation)
453 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
454 struct cifsTconInfo *pTcon;
457 pTcon = cifs_sb->tcon;
463 cFYI(1, ("flags: 0x%x operation: 0x%x", flags, operation));
472 int cifs_xstate_get(struct super_block *sb, struct fs_quota_stat *qstats)
476 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
477 struct cifsTconInfo *pTcon;
480 pTcon = cifs_sb->tcon;
486 cFYI(1, ("pqstats %p", qstats));
495 static struct quotactl_ops cifs_quotactl_ops = {
496 .set_xquota = cifs_xquota_set,
497 .get_xquota = cifs_xquota_get,
498 .set_xstate = cifs_xstate_set,
499 .get_xstate = cifs_xstate_get,
503 static void cifs_umount_begin(struct super_block *sb)
505 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
506 struct cifsTconInfo *tcon;
511 tcon = cifs_sb->tcon;
514 down(&tcon->tconSem);
515 if (atomic_read(&tcon->useCount) == 1)
516 tcon->tidStatus = CifsExiting;
519 /* cancel_brl_requests(tcon); */ /* BB mark all brl mids as exiting */
520 /* cancel_notify_requests(tcon); */
521 if (tcon->ses && tcon->ses->server) {
522 cFYI(1, ("wake up tasks now - umount begin not complete"));
523 wake_up_all(&tcon->ses->server->request_q);
524 wake_up_all(&tcon->ses->server->response_q);
525 msleep(1); /* yield */
526 /* we have to kick the requests once more */
527 wake_up_all(&tcon->ses->server->response_q);
530 /* BB FIXME - finish add checks for tidStatus BB */
535 #ifdef CONFIG_CIFS_STATS2
536 static int cifs_show_stats(struct seq_file *s, struct vfsmount *mnt)
543 static int cifs_remount(struct super_block *sb, int *flags, char *data)
545 *flags |= MS_NODIRATIME;
549 static const struct super_operations cifs_super_ops = {
550 .put_super = cifs_put_super,
551 .statfs = cifs_statfs,
552 .alloc_inode = cifs_alloc_inode,
553 .destroy_inode = cifs_destroy_inode,
554 /* .drop_inode = generic_delete_inode,
555 .delete_inode = cifs_delete_inode, */ /* Do not need above two
556 functions unless later we add lazy close of inodes or unless the
557 kernel forgets to call us with the same number of releases (closes)
559 .show_options = cifs_show_options,
560 .umount_begin = cifs_umount_begin,
561 .remount_fs = cifs_remount,
562 #ifdef CONFIG_CIFS_STATS2
563 .show_stats = cifs_show_stats,
568 cifs_get_sb(struct file_system_type *fs_type,
569 int flags, const char *dev_name, void *data, struct vfsmount *mnt)
572 struct super_block *sb = sget(fs_type, NULL, set_anon_super, NULL);
574 cFYI(1, ("Devname: %s flags: %d ", dev_name, flags));
581 rc = cifs_read_super(sb, data, dev_name, flags & MS_SILENT ? 1 : 0);
583 up_write(&sb->s_umount);
584 deactivate_super(sb);
587 sb->s_flags |= MS_ACTIVE;
588 return simple_set_mnt(mnt, sb);
591 static ssize_t cifs_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
592 unsigned long nr_segs, loff_t pos)
594 struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode;
597 written = generic_file_aio_write(iocb, iov, nr_segs, pos);
598 if (!CIFS_I(inode)->clientCanCacheAll)
599 filemap_fdatawrite(inode->i_mapping);
603 static loff_t cifs_llseek(struct file *file, loff_t offset, int origin)
605 /* origin == SEEK_END => we must revalidate the cached file length */
606 if (origin == SEEK_END) {
609 /* some applications poll for the file length in this strange
610 way so we must seek to end on non-oplocked files by
611 setting the revalidate time to zero */
612 CIFS_I(file->f_path.dentry->d_inode)->time = 0;
614 retval = cifs_revalidate(file->f_path.dentry);
616 return (loff_t)retval;
618 return generic_file_llseek_unlocked(file, offset, origin);
621 #ifdef CONFIG_CIFS_EXPERIMENTAL
622 static int cifs_setlease(struct file *file, long arg, struct file_lock **lease)
624 /* note that this is called by vfs setlease with the BKL held
625 although I doubt that BKL is needed here in cifs */
626 struct inode *inode = file->f_path.dentry->d_inode;
628 if (!(S_ISREG(inode->i_mode)))
631 /* check if file is oplocked */
632 if (((arg == F_RDLCK) &&
633 (CIFS_I(inode)->clientCanCacheRead)) ||
635 (CIFS_I(inode)->clientCanCacheAll)))
636 return generic_setlease(file, arg, lease);
637 else if (CIFS_SB(inode->i_sb)->tcon->local_lease &&
638 !CIFS_I(inode)->clientCanCacheRead)
639 /* If the server claims to support oplock on this
640 file, then we still need to check oplock even
641 if the local_lease mount option is set, but there
642 are servers which do not support oplock for which
643 this mount option may be useful if the user
644 knows that the file won't be changed on the server
646 return generic_setlease(file, arg, lease);
652 struct file_system_type cifs_fs_type = {
653 .owner = THIS_MODULE,
655 .get_sb = cifs_get_sb,
656 .kill_sb = kill_anon_super,
659 const struct inode_operations cifs_dir_inode_ops = {
660 .create = cifs_create,
661 .lookup = cifs_lookup,
662 .getattr = cifs_getattr,
663 .unlink = cifs_unlink,
664 .link = cifs_hardlink,
667 .rename = cifs_rename,
668 .permission = cifs_permission,
669 /* revalidate:cifs_revalidate, */
670 .setattr = cifs_setattr,
671 .symlink = cifs_symlink,
673 #ifdef CONFIG_CIFS_XATTR
674 .setxattr = cifs_setxattr,
675 .getxattr = cifs_getxattr,
676 .listxattr = cifs_listxattr,
677 .removexattr = cifs_removexattr,
681 const struct inode_operations cifs_file_inode_ops = {
682 /* revalidate:cifs_revalidate, */
683 .setattr = cifs_setattr,
684 .getattr = cifs_getattr, /* do we need this anymore? */
685 .rename = cifs_rename,
686 .permission = cifs_permission,
687 #ifdef CONFIG_CIFS_XATTR
688 .setxattr = cifs_setxattr,
689 .getxattr = cifs_getxattr,
690 .listxattr = cifs_listxattr,
691 .removexattr = cifs_removexattr,
695 const struct inode_operations cifs_symlink_inode_ops = {
696 .readlink = generic_readlink,
697 .follow_link = cifs_follow_link,
698 .put_link = cifs_put_link,
699 .permission = cifs_permission,
700 /* BB add the following two eventually */
701 /* revalidate: cifs_revalidate,
702 setattr: cifs_notify_change, *//* BB do we need notify change */
703 #ifdef CONFIG_CIFS_XATTR
704 .setxattr = cifs_setxattr,
705 .getxattr = cifs_getxattr,
706 .listxattr = cifs_listxattr,
707 .removexattr = cifs_removexattr,
711 const struct file_operations cifs_file_ops = {
712 .read = do_sync_read,
713 .write = do_sync_write,
714 .aio_read = generic_file_aio_read,
715 .aio_write = cifs_file_aio_write,
717 .release = cifs_close,
721 .mmap = cifs_file_mmap,
722 .splice_read = generic_file_splice_read,
723 .llseek = cifs_llseek,
724 #ifdef CONFIG_CIFS_POSIX
725 .unlocked_ioctl = cifs_ioctl,
726 #endif /* CONFIG_CIFS_POSIX */
728 #ifdef CONFIG_CIFS_EXPERIMENTAL
729 .dir_notify = cifs_dir_notify,
730 .setlease = cifs_setlease,
731 #endif /* CONFIG_CIFS_EXPERIMENTAL */
734 const struct file_operations cifs_file_direct_ops = {
735 /* no mmap, no aio, no readv -
736 BB reevaluate whether they can be done with directio, no cache */
737 .read = cifs_user_read,
738 .write = cifs_user_write,
740 .release = cifs_close,
744 .splice_read = generic_file_splice_read,
745 #ifdef CONFIG_CIFS_POSIX
746 .unlocked_ioctl = cifs_ioctl,
747 #endif /* CONFIG_CIFS_POSIX */
748 .llseek = cifs_llseek,
749 #ifdef CONFIG_CIFS_EXPERIMENTAL
750 .dir_notify = cifs_dir_notify,
751 .setlease = cifs_setlease,
752 #endif /* CONFIG_CIFS_EXPERIMENTAL */
754 const struct file_operations cifs_file_nobrl_ops = {
755 .read = do_sync_read,
756 .write = do_sync_write,
757 .aio_read = generic_file_aio_read,
758 .aio_write = cifs_file_aio_write,
760 .release = cifs_close,
763 .mmap = cifs_file_mmap,
764 .splice_read = generic_file_splice_read,
765 .llseek = cifs_llseek,
766 #ifdef CONFIG_CIFS_POSIX
767 .unlocked_ioctl = cifs_ioctl,
768 #endif /* CONFIG_CIFS_POSIX */
770 #ifdef CONFIG_CIFS_EXPERIMENTAL
771 .dir_notify = cifs_dir_notify,
772 .setlease = cifs_setlease,
773 #endif /* CONFIG_CIFS_EXPERIMENTAL */
776 const struct file_operations cifs_file_direct_nobrl_ops = {
777 /* no mmap, no aio, no readv -
778 BB reevaluate whether they can be done with directio, no cache */
779 .read = cifs_user_read,
780 .write = cifs_user_write,
782 .release = cifs_close,
785 .splice_read = generic_file_splice_read,
786 #ifdef CONFIG_CIFS_POSIX
787 .unlocked_ioctl = cifs_ioctl,
788 #endif /* CONFIG_CIFS_POSIX */
789 .llseek = cifs_llseek,
790 #ifdef CONFIG_CIFS_EXPERIMENTAL
791 .dir_notify = cifs_dir_notify,
792 .setlease = cifs_setlease,
793 #endif /* CONFIG_CIFS_EXPERIMENTAL */
796 const struct file_operations cifs_dir_ops = {
797 .readdir = cifs_readdir,
798 .release = cifs_closedir,
799 .read = generic_read_dir,
800 #ifdef CONFIG_CIFS_EXPERIMENTAL
801 .dir_notify = cifs_dir_notify,
802 #endif /* CONFIG_CIFS_EXPERIMENTAL */
803 .unlocked_ioctl = cifs_ioctl,
807 cifs_init_once(void *inode)
809 struct cifsInodeInfo *cifsi = inode;
811 inode_init_once(&cifsi->vfs_inode);
812 INIT_LIST_HEAD(&cifsi->lockList);
816 cifs_init_inodecache(void)
818 cifs_inode_cachep = kmem_cache_create("cifs_inode_cache",
819 sizeof(struct cifsInodeInfo),
820 0, (SLAB_RECLAIM_ACCOUNT|
823 if (cifs_inode_cachep == NULL)
830 cifs_destroy_inodecache(void)
832 kmem_cache_destroy(cifs_inode_cachep);
836 cifs_init_request_bufs(void)
838 if (CIFSMaxBufSize < 8192) {
839 /* Buffer size can not be smaller than 2 * PATH_MAX since maximum
840 Unicode path name has to fit in any SMB/CIFS path based frames */
841 CIFSMaxBufSize = 8192;
842 } else if (CIFSMaxBufSize > 1024*127) {
843 CIFSMaxBufSize = 1024 * 127;
845 CIFSMaxBufSize &= 0x1FE00; /* Round size to even 512 byte mult*/
847 /* cERROR(1,("CIFSMaxBufSize %d 0x%x",CIFSMaxBufSize,CIFSMaxBufSize)); */
848 cifs_req_cachep = kmem_cache_create("cifs_request",
850 MAX_CIFS_HDR_SIZE, 0,
851 SLAB_HWCACHE_ALIGN, NULL);
852 if (cifs_req_cachep == NULL)
855 if (cifs_min_rcv < 1)
857 else if (cifs_min_rcv > 64) {
859 cERROR(1, ("cifs_min_rcv set to maximum (64)"));
862 cifs_req_poolp = mempool_create_slab_pool(cifs_min_rcv,
865 if (cifs_req_poolp == NULL) {
866 kmem_cache_destroy(cifs_req_cachep);
869 /* MAX_CIFS_SMALL_BUFFER_SIZE bytes is enough for most SMB responses and
870 almost all handle based requests (but not write response, nor is it
871 sufficient for path based requests). A smaller size would have
872 been more efficient (compacting multiple slab items on one 4k page)
873 for the case in which debug was on, but this larger size allows
874 more SMBs to use small buffer alloc and is still much more
875 efficient to alloc 1 per page off the slab compared to 17K (5page)
876 alloc of large cifs buffers even when page debugging is on */
877 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
878 MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
880 if (cifs_sm_req_cachep == NULL) {
881 mempool_destroy(cifs_req_poolp);
882 kmem_cache_destroy(cifs_req_cachep);
886 if (cifs_min_small < 2)
888 else if (cifs_min_small > 256) {
889 cifs_min_small = 256;
890 cFYI(1, ("cifs_min_small set to maximum (256)"));
893 cifs_sm_req_poolp = mempool_create_slab_pool(cifs_min_small,
896 if (cifs_sm_req_poolp == NULL) {
897 mempool_destroy(cifs_req_poolp);
898 kmem_cache_destroy(cifs_req_cachep);
899 kmem_cache_destroy(cifs_sm_req_cachep);
907 cifs_destroy_request_bufs(void)
909 mempool_destroy(cifs_req_poolp);
910 kmem_cache_destroy(cifs_req_cachep);
911 mempool_destroy(cifs_sm_req_poolp);
912 kmem_cache_destroy(cifs_sm_req_cachep);
918 cifs_mid_cachep = kmem_cache_create("cifs_mpx_ids",
919 sizeof(struct mid_q_entry), 0,
920 SLAB_HWCACHE_ALIGN, NULL);
921 if (cifs_mid_cachep == NULL)
924 /* 3 is a reasonable minimum number of simultaneous operations */
925 cifs_mid_poolp = mempool_create_slab_pool(3, cifs_mid_cachep);
926 if (cifs_mid_poolp == NULL) {
927 kmem_cache_destroy(cifs_mid_cachep);
931 cifs_oplock_cachep = kmem_cache_create("cifs_oplock_structs",
932 sizeof(struct oplock_q_entry), 0,
933 SLAB_HWCACHE_ALIGN, NULL);
934 if (cifs_oplock_cachep == NULL) {
935 mempool_destroy(cifs_mid_poolp);
936 kmem_cache_destroy(cifs_mid_cachep);
944 cifs_destroy_mids(void)
946 mempool_destroy(cifs_mid_poolp);
947 kmem_cache_destroy(cifs_mid_cachep);
948 kmem_cache_destroy(cifs_oplock_cachep);
951 static int cifs_oplock_thread(void *dummyarg)
953 struct oplock_q_entry *oplock_item;
954 struct cifsTconInfo *pTcon;
964 spin_lock(&GlobalMid_Lock);
965 if (list_empty(&GlobalOplock_Q)) {
966 spin_unlock(&GlobalMid_Lock);
967 set_current_state(TASK_INTERRUPTIBLE);
968 schedule_timeout(39*HZ);
970 oplock_item = list_entry(GlobalOplock_Q.next,
971 struct oplock_q_entry, qhead);
972 cFYI(1, ("found oplock item to write out"));
973 pTcon = oplock_item->tcon;
974 inode = oplock_item->pinode;
975 netfid = oplock_item->netfid;
976 spin_unlock(&GlobalMid_Lock);
977 DeleteOplockQEntry(oplock_item);
978 /* can not grab inode sem here since it would
979 deadlock when oplock received on delete
980 since vfs_unlink holds the i_mutex across
982 /* mutex_lock(&inode->i_mutex);*/
983 if (S_ISREG(inode->i_mode)) {
984 #ifdef CONFIG_CIFS_EXPERIMENTAL
985 if (CIFS_I(inode)->clientCanCacheAll == 0)
986 break_lease(inode, FMODE_READ);
987 else if (CIFS_I(inode)->clientCanCacheRead == 0)
988 break_lease(inode, FMODE_WRITE);
990 rc = filemap_fdatawrite(inode->i_mapping);
991 if (CIFS_I(inode)->clientCanCacheRead == 0) {
992 waitrc = filemap_fdatawait(
994 invalidate_remote_inode(inode);
1000 /* mutex_unlock(&inode->i_mutex);*/
1002 CIFS_I(inode)->write_behind_rc = rc;
1003 cFYI(1, ("Oplock flush inode %p rc %d",
1006 /* releasing stale oplock after recent reconnect
1007 of smb session using a now incorrect file
1008 handle is not a data integrity issue but do
1009 not bother sending an oplock release if session
1010 to server still is disconnected since oplock
1011 already released by the server in that case */
1012 if (pTcon->tidStatus != CifsNeedReconnect) {
1013 rc = CIFSSMBLock(0, pTcon, netfid,
1014 0 /* len */ , 0 /* offset */, 0,
1015 0, LOCKING_ANDX_OPLOCK_RELEASE,
1016 false /* wait flag */);
1017 cFYI(1, ("Oplock release rc = %d", rc));
1019 set_current_state(TASK_INTERRUPTIBLE);
1020 schedule_timeout(1); /* yield in case q were corrupt */
1022 } while (!kthread_should_stop());
1027 static int cifs_dnotify_thread(void *dummyarg)
1029 struct list_head *tmp;
1030 struct cifsSesInfo *ses;
1033 if (try_to_freeze())
1035 set_current_state(TASK_INTERRUPTIBLE);
1036 schedule_timeout(15*HZ);
1037 read_lock(&GlobalSMBSeslock);
1038 /* check if any stuck requests that need
1039 to be woken up and wakeq so the
1040 thread can wake up and error out */
1041 list_for_each(tmp, &GlobalSMBSessionList) {
1042 ses = list_entry(tmp, struct cifsSesInfo,
1044 if (ses->server && atomic_read(&ses->server->inFlight))
1045 wake_up_all(&ses->server->response_q);
1047 read_unlock(&GlobalSMBSeslock);
1048 } while (!kthread_should_stop());
1058 /* INIT_LIST_HEAD(&GlobalServerList);*/ /* BB not implemented yet */
1059 INIT_LIST_HEAD(&GlobalSMBSessionList);
1060 INIT_LIST_HEAD(&GlobalTreeConnectionList);
1061 INIT_LIST_HEAD(&GlobalOplock_Q);
1062 #ifdef CONFIG_CIFS_EXPERIMENTAL
1063 INIT_LIST_HEAD(&GlobalDnotifyReqList);
1064 INIT_LIST_HEAD(&GlobalDnotifyRsp_Q);
1067 * Initialize Global counters
1069 atomic_set(&sesInfoAllocCount, 0);
1070 atomic_set(&tconInfoAllocCount, 0);
1071 atomic_set(&tcpSesAllocCount, 0);
1072 atomic_set(&tcpSesReconnectCount, 0);
1073 atomic_set(&tconInfoReconnectCount, 0);
1075 atomic_set(&bufAllocCount, 0);
1076 atomic_set(&smBufAllocCount, 0);
1077 #ifdef CONFIG_CIFS_STATS2
1078 atomic_set(&totBufAllocCount, 0);
1079 atomic_set(&totSmBufAllocCount, 0);
1080 #endif /* CONFIG_CIFS_STATS2 */
1082 atomic_set(&midCount, 0);
1083 GlobalCurrentXid = 0;
1084 GlobalTotalActiveXid = 0;
1085 GlobalMaxActiveXid = 0;
1086 memset(Local_System_Name, 0, 15);
1087 rwlock_init(&GlobalSMBSeslock);
1088 spin_lock_init(&GlobalMid_Lock);
1090 if (cifs_max_pending < 2) {
1091 cifs_max_pending = 2;
1092 cFYI(1, ("cifs_max_pending set to min of 2"));
1093 } else if (cifs_max_pending > 256) {
1094 cifs_max_pending = 256;
1095 cFYI(1, ("cifs_max_pending set to max of 256"));
1098 rc = cifs_init_inodecache();
1100 goto out_clean_proc;
1102 rc = cifs_init_mids();
1104 goto out_destroy_inodecache;
1106 rc = cifs_init_request_bufs();
1108 goto out_destroy_mids;
1110 rc = register_filesystem(&cifs_fs_type);
1112 goto out_destroy_request_bufs;
1113 #ifdef CONFIG_CIFS_UPCALL
1114 rc = register_key_type(&cifs_spnego_key_type);
1116 goto out_unregister_filesystem;
1118 #ifdef CONFIG_CIFS_DFS_UPCALL
1119 rc = register_key_type(&key_type_dns_resolver);
1121 goto out_unregister_key_type;
1123 oplockThread = kthread_run(cifs_oplock_thread, NULL, "cifsoplockd");
1124 if (IS_ERR(oplockThread)) {
1125 rc = PTR_ERR(oplockThread);
1126 cERROR(1, ("error %d create oplock thread", rc));
1127 goto out_unregister_dfs_key_type;
1130 dnotifyThread = kthread_run(cifs_dnotify_thread, NULL, "cifsdnotifyd");
1131 if (IS_ERR(dnotifyThread)) {
1132 rc = PTR_ERR(dnotifyThread);
1133 cERROR(1, ("error %d create dnotify thread", rc));
1134 goto out_stop_oplock_thread;
1139 out_stop_oplock_thread:
1140 kthread_stop(oplockThread);
1141 out_unregister_dfs_key_type:
1142 #ifdef CONFIG_CIFS_DFS_UPCALL
1143 unregister_key_type(&key_type_dns_resolver);
1144 out_unregister_key_type:
1146 #ifdef CONFIG_CIFS_UPCALL
1147 unregister_key_type(&cifs_spnego_key_type);
1148 out_unregister_filesystem:
1150 unregister_filesystem(&cifs_fs_type);
1151 out_destroy_request_bufs:
1152 cifs_destroy_request_bufs();
1154 cifs_destroy_mids();
1155 out_destroy_inodecache:
1156 cifs_destroy_inodecache();
1165 cFYI(DBG2, ("exit_cifs"));
1167 #ifdef CONFIG_CIFS_DFS_UPCALL
1168 cifs_dfs_release_automount_timer();
1169 unregister_key_type(&key_type_dns_resolver);
1171 #ifdef CONFIG_CIFS_UPCALL
1172 unregister_key_type(&cifs_spnego_key_type);
1174 unregister_filesystem(&cifs_fs_type);
1175 cifs_destroy_inodecache();
1176 cifs_destroy_mids();
1177 cifs_destroy_request_bufs();
1178 kthread_stop(oplockThread);
1179 kthread_stop(dnotifyThread);
1182 MODULE_AUTHOR("Steve French <sfrench@us.ibm.com>");
1183 MODULE_LICENSE("GPL"); /* combination of LGPL + GPL source behaves as GPL */
1185 ("VFS to access servers complying with the SNIA CIFS Specification "
1186 "e.g. Samba and Windows");
1187 MODULE_VERSION(CIFS_VERSION);
1188 module_init(init_cifs)
1189 module_exit(exit_cifs)