| // SPDX-License-Identifier: GPL-2.0-only |
| /* Network filesystem content encryption support. |
| * |
| * Copyright (C) 2024 Red Hat, Inc. All Rights Reserved. |
| * Written by David Howells (dhowells@redhat.com) |
| */ |
| |
| #include <linux/fs.h> |
| #include <linux/mm.h> |
| #include <linux/pagemap.h> |
| #include <linux/slab.h> |
| #include <linux/scatterlist.h> |
| #include "internal.h" |
| |
| /* |
| * Take requests off of the pending I/O queue and issue them once they're fully |
| * encrypted. |
| */ |
| void netfs_issue_encrypted_subreqs(struct netfs_io_request *wreq, |
| struct netfs_io_stream *stream) |
| { |
| struct netfs_io_subrequest *subreq; |
| |
| // TODO: Offload this loop to the filesystem if it can do vectored writes |
| |
| while ((subreq = list_first_entry_or_null(&stream->io_queue, |
| struct netfs_io_subrequest, ioq_link))) { |
| _debug("issue? %llx+%zx > %llx", |
| subreq->start, subreq->len, atomic64_read(&wreq->encrypted_to)); |
| if (subreq->start + subreq->len > atomic64_read(&wreq->encrypted_to)) |
| break; |
| list_del(&subreq->ioq_link); |
| stream->issue_write(subreq); |
| } |
| } |
| |
| /* |
| * Collect completed/failed encryptions and dispatch unblocked operations. |
| */ |
| void netfs_encrypt_collection_worker(struct work_struct *work) |
| { |
| struct netfs_io_subrequest *subreq; |
| struct netfs_io_request *wreq = container_of(work, struct netfs_io_request, crypto_work); |
| unsigned long long encrypted_to = 0; |
| |
| netfs_see_request(wreq, netfs_rreq_trace_put_crypto_work_nq); |
| |
| while ((subreq = list_first_entry_or_null(&wreq->enc_subrequests, |
| struct netfs_io_subrequest, rreq_link))) { |
| if (test_bit(NETFS_SREQ_IN_PROGRESS, &subreq->flags)) |
| break; |
| encrypted_to = subreq->start + subreq->len; |
| trace_netfs_sreq(subreq, netfs_sreq_trace_discard); |
| spin_lock(&wreq->lock); |
| list_del_init(&subreq->rreq_link); |
| spin_unlock(&wreq->lock); |
| netfs_put_subrequest(subreq, netfs_sreq_trace_put_done); |
| } |
| |
| atomic64_set(&wreq->encrypted_to, encrypted_to); |
| netfs_issue_encrypted_subreqs(wreq, &wreq->io_streams[0]); |
| netfs_issue_encrypted_subreqs(wreq, &wreq->io_streams[1]); |
| netfs_put_request(wreq, netfs_rreq_trace_put_crypto_work); |
| } |
| |
| /** |
| * netfs_crypto_req_done - Note completion of an encryption request |
| * @data: The subrequest pointer passed to ->encrypt_block() |
| * @err: Error code indicating type of completion. |
| * |
| * Tell the netfs library that an (asynchronous) encryption request may have |
| * completed (the crypto subsys can return -EINPROGRESS randomly and this is |
| * just ignored). |
| * |
| * This function may be passed to skcipher_request_set_callback() as the |
| * completion function; in such a case, the subrequest pointer should be passed |
| * as the data. |
| */ |
| void netfs_crypto_req_done(void *data, int err) |
| { |
| struct netfs_io_subrequest *subreq = data; |
| struct netfs_io_request *wreq = subreq->rreq; |
| |
| _enter("R=%08x[%x],%d", wreq->debug_id, subreq->debug_index, err); |
| |
| if (err == -EINPROGRESS) { |
| subreq->error = err; |
| trace_netfs_sreq(subreq, netfs_sreq_trace_inprogress); |
| subreq->error = 0; |
| return; |
| } |
| |
| if (err < 0) { |
| subreq->error = err; |
| set_bit(NETFS_SREQ_FAILED, &subreq->flags); |
| trace_netfs_failure(wreq, subreq, err, netfs_fail_encryption); |
| set_bit(NETFS_RREQ_PAUSE, &wreq->flags); |
| trace_netfs_rreq(wreq, netfs_rreq_trace_set_pause); |
| } else { |
| subreq->transferred = subreq->len; |
| } |
| |
| trace_netfs_sreq(subreq, netfs_sreq_trace_terminated); |
| clear_bit_unlock(NETFS_SREQ_IN_PROGRESS, &subreq->flags); |
| wake_up_bit(&subreq->flags, NETFS_SREQ_IN_PROGRESS); |
| |
| if (list_is_first(&subreq->rreq_link, &wreq->enc_subrequests)) { |
| netfs_get_request(wreq, netfs_rreq_trace_get_crypto_work); |
| if (!queue_work(system_unbound_wq, &wreq->crypto_work)) |
| netfs_put_request(wreq, netfs_rreq_trace_put_crypto_work_nq); |
| } |
| |
| netfs_put_subrequest(subreq, netfs_sreq_trace_put_terminated); |
| } |
| EXPORT_SYMBOL(netfs_crypto_req_done); |
| |
| /* |
| * Start asynchronous encryption from the source folio in/into the bounce folio |
| * (which may be the same folio). |
| */ |
| bool netfs_encrypt_folio(struct netfs_io_request *wreq, struct folio *folio, |
| struct folio *bounce, unsigned long long start, size_t len, |
| gfp_t gfp) |
| { |
| struct netfs_io_subrequest *subreq; |
| struct netfs_inode *ictx = netfs_inode(wreq->inode); |
| size_t bsize = wreq->crypto_bsize; |
| int ret; |
| |
| _enter(""); |
| |
| trace_netfs_rreq(wreq, netfs_rreq_trace_encrypt); |
| |
| _debug("ENCRYPT %llx-%llx", start, start + len - 1); |
| |
| do { |
| ret = -ENOMEM; |
| subreq = netfs_alloc_subrequest(wreq, NETFS_ENCRYPT_BLOCK); |
| if (!subreq) |
| goto error_failed; |
| |
| subreq->start = start; |
| subreq->len = bsize; |
| spin_lock(&wreq->lock); |
| list_add_tail(&subreq->rreq_link, &wreq->enc_subrequests); |
| spin_unlock(&wreq->lock); |
| |
| trace_netfs_sreq_ref(wreq->debug_id, subreq->debug_index, |
| refcount_read(&subreq->ref), |
| netfs_sreq_trace_new); |
| |
| trace_netfs_sreq(subreq, netfs_sreq_trace_encrypt); |
| |
| sg_init_marker(&subreq->src_sg, 1); |
| sg_init_marker(&subreq->dst_sg, 1); |
| sg_set_folio(&subreq->src_sg, folio, bsize, start - folio_pos(folio)); |
| sg_set_folio(&subreq->dst_sg, bounce, bsize, start - folio_pos(bounce)); |
| |
| ictx->ops->encrypt_block(subreq, gfp); |
| start += bsize; |
| len -= bsize; |
| } while (len > 0); |
| |
| return true; |
| |
| error_failed: |
| trace_netfs_failure(wreq, NULL, ret, netfs_fail_encryption); |
| wreq->error = ret; |
| return false; |
| } |
| |
| /* |
| * Populate a scatterlist from the next bufferage of an I/O iterator. |
| */ |
| static int netfs_iter_to_sglist(struct iov_iter *iter, size_t len, |
| struct scatterlist *sg, unsigned int n_sg) |
| { |
| struct sg_table sgtable = { .sgl = sg }; |
| ssize_t ret; |
| |
| _enter("%zx/%zx", len, iov_iter_count(iter)); |
| |
| sg_init_table(sg, n_sg); |
| ret = extract_iter_to_sg(iter, len, &sgtable, n_sg, 0); |
| if (ret < 0) |
| return ret; |
| if (sgtable.nents > 0) |
| sg_mark_end(&sg[sgtable.nents - 1]); |
| return sgtable.nents; |
| } |
| |
| /* |
| * Start an asynchronous encryption from the iterator to the bounce buffer. |
| */ |
| bool netfs_encrypt_to_folio(struct netfs_io_request *wreq, struct folio *bounce, |
| struct iov_iter *iter, unsigned long long start, size_t len, |
| gfp_t gfp) |
| { |
| struct netfs_io_subrequest *subreq; |
| struct netfs_inode *ictx = netfs_inode(wreq->inode); |
| size_t bsize = wreq->crypto_bsize; |
| int ret; |
| |
| _enter(""); |
| |
| trace_netfs_rreq(wreq, netfs_rreq_trace_encrypt); |
| |
| _debug("ENCRYPT %llx-%llx", start, start + len - 1); |
| |
| do { |
| ret = -ENOMEM; |
| subreq = netfs_alloc_subrequest(wreq, NETFS_ENCRYPT_BLOCK); |
| if (!subreq) |
| goto error_failed; |
| |
| subreq->start = start; |
| subreq->len = bsize; |
| spin_lock(&wreq->lock); |
| list_add_tail(&subreq->rreq_link, &wreq->enc_subrequests); |
| spin_unlock(&wreq->lock); |
| |
| sg_init_marker(&subreq->src_sg, 1); |
| sg_init_marker(&subreq->dst_sg, 1); |
| sg_set_folio(&subreq->dst_sg, bounce, bsize, start - folio_pos(bounce)); |
| |
| ret = netfs_iter_to_sglist(iter, bsize, &subreq->src_sg, 1); |
| if (ret < 0) |
| goto error; |
| |
| ictx->ops->encrypt_block(subreq, gfp); |
| start += bsize; |
| len -= bsize; |
| } while (len > 0); |
| |
| return true; |
| |
| error_failed: |
| trace_netfs_failure(wreq, NULL, ret, netfs_fail_encryption); |
| error: |
| wreq->error = ret; |
| return false; |
| } |
| |
| /* |
| * Decrypt a folio in the pagecache. We arrange the minimum folio size such |
| * that the crypto block size will never be larger than the size of the folios |
| * we are using. |
| */ |
| void netfs_decrypt_folio(struct netfs_io_request *rreq, struct folio *folio) |
| { |
| struct netfs_inode *ictx = netfs_inode(rreq->inode); |
| struct scatterlist sg; |
| unsigned long long start = folio_pos(folio); |
| size_t len = folio_size(folio); |
| size_t bsize = rreq->crypto_bsize, off = 0; |
| int ret; |
| |
| if (start >= rreq->i_size) |
| return; |
| trace_netfs_folio(folio, netfs_folio_trace_decrypt); |
| |
| len = umin(len, rreq->i_size - start); |
| |
| _debug("DECRYPT %llx-%llx", start, start + len - 1); |
| |
| do { |
| sg_init_table(&sg, 1); |
| sg_set_folio(&sg, folio, bsize, off); |
| |
| ret = ictx->ops->decrypt_block(rreq, start + off, bsize, &sg, 1, &sg, 1); |
| if (ret < 0) |
| goto error_failed; |
| off += bsize; |
| } while (off < len); |
| |
| return; |
| |
| error_failed: |
| trace_netfs_failure(rreq, NULL, ret, netfs_fail_decryption); |
| rreq->error = ret; |
| set_bit(NETFS_RREQ_FAILED, &rreq->flags); |
| return; |
| } |
| |
| /* |
| * Decrypt the result of a DIO read request. |
| */ |
| void netfs_decrypt_dio(struct netfs_io_request *rreq) |
| { |
| struct netfs_inode *ictx = netfs_inode(rreq->inode); |
| struct scatterlist source_sg[16], dest_sg[16]; |
| unsigned long long start = rreq->start; |
| unsigned int n_source; |
| size_t len, processed = 0, bsize = rreq->crypto_bsize; |
| int ret; |
| |
| trace_netfs_rreq(rreq, netfs_rreq_trace_decrypt); |
| if (rreq->start >= rreq->i_size) |
| return; |
| |
| len = round_down(rreq->transferred, bsize); |
| |
| if (!test_bit(NETFS_RREQ_CRYPT_IN_PLACE, &rreq->flags) && |
| rreq->buffer.iter.count < rreq->len) |
| iov_iter_revert(&rreq->buffer.iter, rreq->len - rreq->buffer.iter.count); |
| if (rreq->bounce.iter.count < len) |
| iov_iter_revert(&rreq->bounce.iter, len - rreq->bounce.iter.count); |
| |
| _debug("DECRYPT %llx-%llx f=%lx %zx/%zx %zx/%llx", |
| start, start + len - 1, rreq->flags, |
| rreq->bounce.iter.count, len, |
| rreq->buffer.iter.count, rreq->len); |
| |
| do { |
| _debug("chunk %zx/%zx", processed, len); |
| |
| ret = netfs_iter_to_sglist(&rreq->bounce.iter, bsize, |
| source_sg, ARRAY_SIZE(source_sg)); |
| if (ret < 0) |
| goto error; |
| if (ret == 0) { |
| pr_err("Failed to extract buffer iter in netfs_decrypt_dio()\n"); |
| ret = -EIO; |
| goto error; |
| } |
| n_source = ret; |
| |
| if (test_bit(NETFS_RREQ_CRYPT_IN_PLACE, &rreq->flags)) { |
| ret = ictx->ops->decrypt_block(rreq, start, bsize, |
| source_sg, n_source, |
| source_sg, n_source); |
| } else { |
| ret = netfs_iter_to_sglist(&rreq->buffer.iter, bsize, |
| dest_sg, ARRAY_SIZE(dest_sg)); |
| if (ret < 0) |
| goto error; |
| ret = ictx->ops->decrypt_block(rreq, start, bsize, |
| source_sg, n_source, |
| dest_sg, ret); |
| } |
| |
| if (ret < 0) |
| goto error_failed; |
| processed += bsize; |
| } while (processed < len); |
| |
| rreq->transferred = processed; |
| return; |
| |
| error_failed: |
| trace_netfs_failure(rreq, NULL, ret, netfs_fail_decryption); |
| error: |
| rreq->error = ret; |
| __set_bit(NETFS_RREQ_FAILED, &rreq->flags); |
| return; |
| } |