diff options
Diffstat (limited to 'fs/netfs/direct_write.c')
| -rw-r--r-- | fs/netfs/direct_write.c | 228 |
1 files changed, 212 insertions, 16 deletions
diff --git a/fs/netfs/direct_write.c b/fs/netfs/direct_write.c index a9d1c3b2c084..dd1451bf7543 100644 --- a/fs/netfs/direct_write.c +++ b/fs/netfs/direct_write.c @@ -10,6 +10,202 @@ #include "internal.h" /* + * Perform the cleanup rituals after an unbuffered write is complete. + */ +static void netfs_unbuffered_write_done(struct netfs_io_request *wreq) +{ + struct netfs_inode *ictx = netfs_inode(wreq->inode); + + _enter("R=%x", wreq->debug_id); + + /* Okay, declare that all I/O is complete. */ + trace_netfs_rreq(wreq, netfs_rreq_trace_write_done); + + if (!wreq->error) + netfs_update_i_size(ictx, &ictx->inode, wreq->start, wreq->transferred); + + if (wreq->origin == NETFS_DIO_WRITE && + wreq->mapping->nrpages) { + /* mmap may have got underfoot and we may now have folios + * locally covering the region we just wrote. Attempt to + * discard the folios, but leave in place any modified locally. + * ->write_iter() is prevented from interfering by the DIO + * counter. + */ + pgoff_t first = wreq->start >> PAGE_SHIFT; + pgoff_t last = (wreq->start + wreq->transferred - 1) >> PAGE_SHIFT; + + invalidate_inode_pages2_range(wreq->mapping, first, last); + } + + if (wreq->origin == NETFS_DIO_WRITE) + inode_dio_end(wreq->inode); + + _debug("finished"); + netfs_wake_rreq_flag(wreq, NETFS_RREQ_IN_PROGRESS, netfs_rreq_trace_wake_ip); + /* As we cleared NETFS_RREQ_IN_PROGRESS, we acquired its ref. */ + + if (wreq->iocb) { + size_t written = umin(wreq->transferred, wreq->len); + + wreq->iocb->ki_pos += written; + if (wreq->iocb->ki_complete) { + trace_netfs_rreq(wreq, netfs_rreq_trace_ki_complete); + wreq->iocb->ki_complete(wreq->iocb, wreq->error ?: written); + } + wreq->iocb = VFS_PTR_POISON; + } + + netfs_clear_subrequests(wreq); +} + +/* + * Collect the subrequest results of unbuffered write subrequests. + */ +static void netfs_unbuffered_write_collect(struct netfs_io_request *wreq, + struct netfs_io_stream *stream, + struct netfs_io_subrequest *subreq) +{ + trace_netfs_collect_sreq(wreq, subreq); + + spin_lock(&wreq->lock); + list_del_init(&subreq->rreq_link); + spin_unlock(&wreq->lock); + + wreq->transferred += subreq->transferred; + iov_iter_advance(&wreq->buffer.iter, subreq->transferred); + + stream->collected_to = subreq->start + subreq->transferred; + wreq->collected_to = stream->collected_to; + netfs_put_subrequest(subreq, netfs_sreq_trace_put_done); + + trace_netfs_collect_stream(wreq, stream); + trace_netfs_collect_state(wreq, wreq->collected_to, 0); +} + +/* + * Write data to the server without going through the pagecache and without + * writing it to the local cache. We dispatch the subrequests serially and + * wait for each to complete before dispatching the next, lest we leave a gap + * in the data written due to a failure such as ENOSPC. We could, however + * attempt to do preparation such as content encryption for the next subreq + * whilst the current is in progress. + */ +static int netfs_unbuffered_write(struct netfs_io_request *wreq) +{ + struct netfs_io_subrequest *subreq = NULL; + struct netfs_io_stream *stream = &wreq->io_streams[0]; + int ret; + + _enter("%llx", wreq->len); + + if (wreq->origin == NETFS_DIO_WRITE) + inode_dio_begin(wreq->inode); + + stream->collected_to = wreq->start; + + for (;;) { + bool retry = false; + + if (!subreq) { + netfs_prepare_write(wreq, stream, wreq->start + wreq->transferred); + subreq = stream->construct; + stream->construct = NULL; + stream->front = NULL; + } + + /* Check if (re-)preparation failed. */ + if (unlikely(test_bit(NETFS_SREQ_FAILED, &subreq->flags))) { + netfs_write_subrequest_terminated(subreq, subreq->error); + wreq->error = subreq->error; + break; + } + + iov_iter_truncate(&subreq->io_iter, wreq->len - wreq->transferred); + if (!iov_iter_count(&subreq->io_iter)) + break; + + subreq->len = netfs_limit_iter(&subreq->io_iter, 0, + stream->sreq_max_len, + stream->sreq_max_segs); + iov_iter_truncate(&subreq->io_iter, subreq->len); + stream->submit_extendable_to = subreq->len; + + trace_netfs_sreq(subreq, netfs_sreq_trace_submit); + stream->issue_write(subreq); + + /* Async, need to wait. */ + netfs_wait_for_in_progress_stream(wreq, stream); + + if (test_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags)) { + retry = true; + } else if (test_bit(NETFS_SREQ_FAILED, &subreq->flags)) { + ret = subreq->error; + wreq->error = ret; + netfs_see_subrequest(subreq, netfs_sreq_trace_see_failed); + subreq = NULL; + break; + } + ret = 0; + + if (!retry) { + netfs_unbuffered_write_collect(wreq, stream, subreq); + subreq = NULL; + if (wreq->transferred >= wreq->len) + break; + if (!wreq->iocb && signal_pending(current)) { + ret = wreq->transferred ? -EINTR : -ERESTARTSYS; + trace_netfs_rreq(wreq, netfs_rreq_trace_intr); + break; + } + continue; + } + + /* We need to retry the last subrequest, so first reset the + * iterator, taking into account what, if anything, we managed + * to transfer. + */ + subreq->error = -EAGAIN; + trace_netfs_sreq(subreq, netfs_sreq_trace_retry); + if (subreq->transferred > 0) + iov_iter_advance(&wreq->buffer.iter, subreq->transferred); + + if (stream->source == NETFS_UPLOAD_TO_SERVER && + wreq->netfs_ops->retry_request) + wreq->netfs_ops->retry_request(wreq, stream); + + __clear_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags); + __clear_bit(NETFS_SREQ_BOUNDARY, &subreq->flags); + __clear_bit(NETFS_SREQ_FAILED, &subreq->flags); + subreq->io_iter = wreq->buffer.iter; + subreq->start = wreq->start + wreq->transferred; + subreq->len = wreq->len - wreq->transferred; + subreq->transferred = 0; + subreq->retry_count += 1; + stream->sreq_max_len = UINT_MAX; + stream->sreq_max_segs = INT_MAX; + + netfs_get_subrequest(subreq, netfs_sreq_trace_get_resubmit); + stream->prepare_write(subreq); + + __set_bit(NETFS_SREQ_IN_PROGRESS, &subreq->flags); + netfs_stat(&netfs_n_wh_retry_write_subreq); + } + + netfs_unbuffered_write_done(wreq); + _leave(" = %d", ret); + return ret; +} + +static void netfs_unbuffered_write_async(struct work_struct *work) +{ + struct netfs_io_request *wreq = container_of(work, struct netfs_io_request, work); + + netfs_unbuffered_write(wreq); + netfs_put_request(wreq, netfs_rreq_trace_put_complete); +} + +/* * Perform an unbuffered write where we may have to do an RMW operation on an * encrypted file. This can also be used for direct I/O writes. */ @@ -70,35 +266,35 @@ ssize_t netfs_unbuffered_write_iter_locked(struct kiocb *iocb, struct iov_iter * */ wreq->buffer.iter = *iter; } + + wreq->len = iov_iter_count(&wreq->buffer.iter); } __set_bit(NETFS_RREQ_USE_IO_ITER, &wreq->flags); - if (async) - __set_bit(NETFS_RREQ_OFFLOAD_COLLECTION, &wreq->flags); /* Copy the data into the bounce buffer and encrypt it. */ // TODO /* Dispatch the write. */ __set_bit(NETFS_RREQ_UPLOAD_TO_SERVER, &wreq->flags); - if (async) - wreq->iocb = iocb; - wreq->len = iov_iter_count(&wreq->buffer.iter); - ret = netfs_unbuffered_write(wreq, is_sync_kiocb(iocb), wreq->len); - if (ret < 0) { - _debug("begin = %zd", ret); - goto out; - } - if (!async) { - ret = netfs_wait_for_write(wreq); - if (ret > 0) - iocb->ki_pos += ret; - } else { + if (async) { + INIT_WORK(&wreq->work, netfs_unbuffered_write_async); + wreq->iocb = iocb; + queue_work(system_dfl_wq, &wreq->work); ret = -EIOCBQUEUED; + } else { + ret = netfs_unbuffered_write(wreq); + if (ret < 0) { + _debug("begin = %zd", ret); + } else { + iocb->ki_pos += wreq->transferred; + ret = wreq->transferred ?: wreq->error; + } + + netfs_put_request(wreq, netfs_rreq_trace_put_complete); } -out: netfs_put_request(wreq, netfs_rreq_trace_put_return); return ret; |
