From patchwork Mon Oct 4 17:52:59 2010 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jeff Layton X-Patchwork-Id: 229321 Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by demeter1.kernel.org (8.14.4/8.14.3) with ESMTP id o94Hx9Qv020962 for ; Mon, 4 Oct 2010 17:59:09 GMT Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1756248Ab0JDR7J (ORCPT ); Mon, 4 Oct 2010 13:59:09 -0400 Received: from cdptpa-omtalb.mail.rr.com ([75.180.132.120]:64429 "EHLO cdptpa-omtalb.mail.rr.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1755173Ab0JDR7H (ORCPT ); Mon, 4 Oct 2010 13:59:07 -0400 X-Authority-Analysis: v=1.1 cv=C0onXBtlORurSwCUfpuzpxmAU2q4LH+bZvnNY+zP63Q= c=1 sm=0 a=zw1Irim_UnkA:10 a=ld/erqUjW76FpBUqCqkKeA==:17 a=20KFwNOVAAAA:8 a=KtlL6sUWWDmSzHYKm6gA:9 a=hFEy_17YcSC3jhaCvOcA:7 a=0bzYudpJPEEDDgiYIVd805Gvqr4A:4 a=jEp0ucaQiEUA:10 a=ld/erqUjW76FpBUqCqkKeA==:117 X-Cloudmark-Score: 0 X-Originating-IP: 71.70.153.3 Received: from [71.70.153.3] ([71.70.153.3:37111] helo=mail.poochiereds.net) by cdptpa-oedge04.mail.rr.com (envelope-from ) (ecelerity 2.2.2.39 r()) with ESMTP id 91/56-29346-AE51AAC4; Mon, 04 Oct 2010 17:59:06 +0000 Received: by mail.poochiereds.net (Postfix, from userid 4447) id 7AE7658184; Mon, 4 Oct 2010 13:53:02 -0400 (EDT) From: Jeff Layton To: linux-cifs@vger.kernel.org Subject: [PATCH 12/14] cifs: move close processing from cifs_close to cifsFileInfo_put Date: Mon, 4 Oct 2010 13:52:59 -0400 Message-Id: <1286214781-626-13-git-send-email-jlayton@redhat.com> X-Mailer: git-send-email 1.7.2.3 In-Reply-To: <1286214781-626-1-git-send-email-jlayton@redhat.com> References: <1286214781-626-1-git-send-email-jlayton@redhat.com> Sender: linux-cifs-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-cifs@vger.kernel.org X-Greylist: IP, sender and recipient auto-whitelisted, not delayed by milter-greylist-4.2.3 (demeter1.kernel.org [140.211.167.41]); Mon, 04 Oct 2010 17:59:09 +0000 (UTC) diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h index 522cefa..06024c5 100644 --- a/fs/cifs/cifsglob.h +++ b/fs/cifs/cifsglob.h @@ -391,7 +391,6 @@ struct cifsFileInfo { struct tcon_link *tlink; struct mutex lock_mutex; struct list_head llist; /* list of byte range locks we have. */ - bool closePend:1; /* file is marked to close */ bool invalidHandle:1; /* file closed via session abend */ bool oplock_break_cancelled:1; atomic_t count; /* reference count */ diff --git a/fs/cifs/file.c b/fs/cifs/file.c index c7a3e40..00c7262 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -178,7 +178,6 @@ cifs_new_fileinfo(__u16 fileHandle, struct file *file, pCifsFile->dentry = dget(dentry); pCifsFile->f_flags = file->f_flags; pCifsFile->invalidHandle = false; - pCifsFile->closePend = false; pCifsFile->tlink = cifs_get_tlink(tlink); mutex_init(&pCifsFile->fh_mutex); mutex_init(&pCifsFile->lock_mutex); @@ -207,14 +206,58 @@ cifs_new_fileinfo(__u16 fileHandle, struct file *file, return pCifsFile; } -/* Release a reference on the file private data */ +/* + * Release a reference on the file private data. This may involve closing + * the filehandle out on the server. + */ void cifsFileInfo_put(struct cifsFileInfo *cifs_file) { - if (atomic_dec_and_test(&cifs_file->count)) { - cifs_put_tlink(cifs_file->tlink); - dput(cifs_file->dentry); - kfree(cifs_file); + struct cifsTconInfo *tcon = tlink_tcon(cifs_file->tlink); + struct cifsInodeInfo *cifsi = CIFS_I(cifs_file->dentry->d_inode); + struct cifsLockInfo *li, *tmp; + + if (!atomic_dec_and_lock(&cifs_file->count, &cifs_file_list_lock)) + return; + + /* count can be incremented inside the lock, so must check again */ + if (atomic_read(&cifs_file->count) > 0) { + spin_unlock(&cifs_file_list_lock); + return; + } + + /* remove it from the lists */ + list_del(&cifs_file->flist); + list_del(&cifs_file->tlist); + + if (list_empty(&cifsi->openFileList)) { + cFYI(1, "closing last open instance for inode %p", + cifs_file->dentry->d_inode); + cifsi->clientCanCacheRead = false; + cifsi->clientCanCacheAll = false; + } + spin_unlock(&cifs_file_list_lock); + + if (!tcon->need_reconnect && !cifs_file->invalidHandle) { + int xid, rc; + + xid = GetXid(); + rc = CIFSSMBClose(xid, tcon, cifs_file->netfid); + FreeXid(xid); + } + + /* Delete any outstanding lock records. We'll lose them when the file + * is closed anyway. + */ + mutex_lock(&cifs_file->lock_mutex); + list_for_each_entry_safe(li, tmp, &cifs_file->llist, llist) { + list_del(&li->llist); + kfree(li); } + mutex_unlock(&cifs_file->lock_mutex); + + cifs_put_tlink(cifs_file->tlink); + dput(cifs_file->dentry); + kfree(cifs_file); } int cifs_open(struct inode *inode, struct file *file) @@ -544,79 +587,11 @@ reopen_error_exit: int cifs_close(struct inode *inode, struct file *file) { - int rc = 0; - int xid, timeout; - struct cifs_sb_info *cifs_sb; - struct cifsTconInfo *pTcon; - struct cifsFileInfo *pSMBFile = file->private_data; - - xid = GetXid(); - - cifs_sb = CIFS_SB(inode->i_sb); - pTcon = tlink_tcon(pSMBFile->tlink); - if (pSMBFile) { - struct cifsLockInfo *li, *tmp; - spin_lock(&cifs_file_list_lock); - pSMBFile->closePend = true; - if (pTcon) { - /* no sense reconnecting to close a file that is - already closed */ - if (!pTcon->need_reconnect) { - spin_unlock(&cifs_file_list_lock); - timeout = 2; - while ((atomic_read(&pSMBFile->count) != 1) - && (timeout <= 2048)) { - /* Give write a better chance to get to - server ahead of the close. We do not - want to add a wait_q here as it would - increase the memory utilization as - the struct would be in each open file, - but this should give enough time to - clear the socket */ - cFYI(DBG2, "close delay, write pending"); - msleep(timeout); - timeout *= 4; - } - if (!pTcon->need_reconnect && - !pSMBFile->invalidHandle) - rc = CIFSSMBClose(xid, pTcon, - pSMBFile->netfid); - } else - spin_unlock(&cifs_file_list_lock); - } else - spin_unlock(&cifs_file_list_lock); + cifsFileInfo_put(file->private_data); + file->private_data = NULL; - /* Delete any outstanding lock records. - We'll lose them when the file is closed anyway. */ - mutex_lock(&pSMBFile->lock_mutex); - list_for_each_entry_safe(li, tmp, &pSMBFile->llist, llist) { - list_del(&li->llist); - kfree(li); - } - mutex_unlock(&pSMBFile->lock_mutex); - - spin_lock(&cifs_file_list_lock); - list_del(&pSMBFile->flist); - list_del(&pSMBFile->tlist); - spin_unlock(&cifs_file_list_lock); - cifsFileInfo_put(file->private_data); - file->private_data = NULL; - } else - rc = -EBADF; - - spin_lock(&cifs_file_list_lock); - if (list_empty(&(CIFS_I(inode)->openFileList))) { - cFYI(1, "closing last open instance for inode %p", inode); - /* if the file is not open we do not know if we can cache info - on this inode, much less write behind and read ahead */ - CIFS_I(inode)->clientCanCacheRead = false; - CIFS_I(inode)->clientCanCacheAll = false; - } - spin_unlock(&cifs_file_list_lock); - if ((rc == 0) && CIFS_I(inode)->write_behind_rc) - rc = CIFS_I(inode)->write_behind_rc; - FreeXid(xid); - return rc; + /* return code from the ->release op is always ignored */ + return 0; } int cifs_closedir(struct inode *inode, struct file *file) @@ -963,13 +938,6 @@ ssize_t cifs_user_write(struct file *file, const char __user *write_data, we blocked so return what we managed to write */ return total_written; } - if (open_file->closePend) { - FreeXid(xid); - if (total_written) - return total_written; - else - return -EBADF; - } if (open_file->invalidHandle) { /* we could deadlock if we called filemap_fdatawait from here so tell @@ -1050,13 +1018,6 @@ static ssize_t cifs_write(struct cifsFileInfo *open_file, total_written += bytes_written) { rc = -EAGAIN; while (rc == -EAGAIN) { - if (open_file->closePend) { - FreeXid(xid); - if (total_written) - return total_written; - else - return -EBADF; - } if (open_file->invalidHandle) { /* we could deadlock if we called filemap_fdatawait from here so tell @@ -1136,8 +1097,6 @@ struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode, are always at the end of the list but since the first entry might have a close pending, we go through the whole list */ list_for_each_entry(open_file, &cifs_inode->openFileList, flist) { - if (open_file->closePend) - continue; if (fsuid_only && open_file->uid != current_fsuid()) continue; if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) { @@ -1183,8 +1142,6 @@ struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode, spin_lock(&cifs_file_list_lock); refind_writable: list_for_each_entry(open_file, &cifs_inode->openFileList, flist) { - if (open_file->closePend) - continue; if (!any_available && open_file->pid != current->tgid) continue; if (fsuid_only && open_file->uid != current_fsuid()) @@ -1201,32 +1158,15 @@ refind_writable: spin_unlock(&cifs_file_list_lock); /* Had to unlock since following call can block */ rc = cifs_reopen_file(open_file, false); - if (!rc) { - if (!open_file->closePend) - return open_file; - else { /* start over in case this was deleted */ - /* since the list could be modified */ - spin_lock(&cifs_file_list_lock); - cifsFileInfo_put(open_file); - goto refind_writable; - } - } + if (!rc) + return open_file; - /* if it fails, try another handle if possible - - (we can not do this if closePending since - loop could be modified - in which case we - have to start at the beginning of the list - again. Note that it would be bad - to hold up writepages here (rather than - in caller) with continuous retries */ + /* if it fails, try another handle if possible */ cFYI(1, "wp failed on reopen file"); - spin_lock(&cifs_file_list_lock); - /* can not use this handle, no write - pending on this one after all */ cifsFileInfo_put(open_file); - if (open_file->closePend) /* list could have changed */ - goto refind_writable; + spin_lock(&cifs_file_list_lock); + /* else we simply continue to the next entry. Thus we do not loop on reopen errors. If we can not reopen the file, for example if we @@ -1747,8 +1687,7 @@ ssize_t cifs_user_read(struct file *file, char __user *read_data, smb_read_data = NULL; while (rc == -EAGAIN) { int buf_type = CIFS_NO_BUFFER; - if ((open_file->invalidHandle) && - (!open_file->closePend)) { + if (open_file->invalidHandle) { rc = cifs_reopen_file(open_file, true); if (rc != 0) break; @@ -1833,8 +1772,7 @@ static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size, } rc = -EAGAIN; while (rc == -EAGAIN) { - if ((open_file->invalidHandle) && - (!open_file->closePend)) { + if (open_file->invalidHandle) { rc = cifs_reopen_file(open_file, true); if (rc != 0) break; @@ -1998,8 +1936,7 @@ static int cifs_readpages(struct file *file, struct address_space *mapping, read_size, contig_pages); rc = -EAGAIN; while (rc == -EAGAIN) { - if ((open_file->invalidHandle) && - (!open_file->closePend)) { + if (open_file->invalidHandle) { rc = cifs_reopen_file(open_file, true); if (rc != 0) break; @@ -2151,8 +2088,6 @@ static int is_inode_writable(struct cifsInodeInfo *cifs_inode) spin_lock(&cifs_file_list_lock); list_for_each_entry(open_file, &cifs_inode->openFileList, flist) { - if (open_file->closePend) - continue; if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) { spin_unlock(&cifs_file_list_lock); return 1; @@ -2311,7 +2246,7 @@ void cifs_oplock_break(struct work_struct *work) * not bother sending an oplock release if session to server still is * disconnected since oplock already released by the server */ - if (!cfile->closePend && !cfile->oplock_break_cancelled) { + if (!cfile->oplock_break_cancelled) { rc = CIFSSMBLock(0, tlink_tcon(cfile->tlink), cfile->netfid, 0, 0, 0, 0, LOCKING_ANDX_OPLOCK_RELEASE, false); cFYI(1, "Oplock release rc = %d", rc); diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c index de6073c..b86b423 100644 --- a/fs/cifs/misc.c +++ b/fs/cifs/misc.c @@ -567,16 +567,6 @@ is_valid_oplock_break(struct smb_hdr *buf, struct TCP_Server_Info *srv) if (pSMB->Fid != netfile->netfid) continue; - /* - * don't do anything if file is about to be - * closed anyway. - */ - if (netfile->closePend) { - spin_unlock(&cifs_file_list_lock); - read_unlock(&cifs_tcp_ses_lock); - return true; - } - cFYI(1, "file id match, oplock break"); pCifsInode = CIFS_I(netfile->dentry->d_inode); pCifsInode->clientCanCacheAll = false;