blob: 78cf98068e978dfa44307108ee6b5c8a54040a76 [file] [log] [blame] [edit]
// SPDX-License-Identifier: GPL-2.0-or-later
/* Iterator helpers.
*
* Copyright (C) 2022 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*/
#include <linux/export.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/uio.h>
#include <linux/scatterlist.h>
#include <linux/netfs.h>
#include "internal.h"
/**
* netfs_extract_iter - Extract the pages from an iterator into a bvecq
* @orig: The original iterator
* @orig_len: The amount of iterator to copy
* @max_segs: Maximum number of contiguous segments
* @fpos: Starting file position to label the bvecq with
* @_bvecq_head: Where to cache the bvec queue
* @extraction_flags: Flags to qualify the request
*
* Extract the page fragments from the given amount of the source iterator and
* build bvec queue that refers to all of those bits. This allows the original
* iterator to disposed of.
*
* @extraction_flags can have ITER_ALLOW_P2PDMA set to request peer-to-peer DMA be
* allowed on the pages extracted.
*
* On success, the amount of data in the bvec is returned, the original
* iterator will have been advanced by the amount extracted.
*
* The bvecq segments are marked with indications on how to get clean up the
* extracted fragments.
*/
ssize_t netfs_extract_iter(struct iov_iter *orig, size_t orig_len, size_t max_segs,
unsigned long long fpos, struct bvecq **_bvecq_head,
iov_iter_extraction_t extraction_flags)
{
struct bvecq *bq_tail = NULL;
ssize_t ret = 0;
size_t segs_per_bq;
size_t extracted = 0;
_enter("{%u,%zx},%zx", orig->iter_type, orig->count, orig_len);
if (max_segs == 0)
max_segs = ULONG_MAX;
/* We want the biggest pow-of-2 size that has at most 255 segs and that
* won't exceed a 4K page.
*/
segs_per_bq = (4096 - sizeof(*bq_tail)) / sizeof(bq_tail->__bv[0]);
if (segs_per_bq > 255)
segs_per_bq = (2048 - sizeof(*bq_tail)) / sizeof(bq_tail->__bv[0]);
do {
struct bvecq *bq;
size_t nr_slots = iov_iter_npages(orig, umin(segs_per_bq, max_segs));
if (WARN_ON(nr_slots == 0 && extracted < orig_len) ||
WARN_ON(nr_slots > max_segs))
break;
max_segs -= nr_slots;
bq = netfs_alloc_one_bvecq(nr_slots, GFP_NOFS);
if (!bq) {
ret = -ENOMEM;
break;
}
bq->free = user_backed_iter(orig);
bq->unpin = iov_iter_extract_will_pin(orig);
bq->prev = bq_tail;
bq->fpos = fpos + extracted;
if (bq_tail)
bq_tail->next = bq;
else
*_bvecq_head = bq;
bq_tail = bq;
if (extracted >= orig_len)
break;
/* Put the page list at the end of the bvec list storage. bvec
* elements are larger than page pointers, so as long as we
* work 0->last, we should be fine.
*/
struct bio_vec *bv = bq->bv;
struct page **pages;
size_t bv_size = array_size(bq->max_segs, sizeof(*bv));
size_t pg_size = array_size(bq->max_segs, sizeof(*pages));
pages = (void *)bv + bv_size - pg_size;
do {
unsigned int cur_npages;
ssize_t got;
size_t offset;
got = iov_iter_extract_pages(orig, &pages, orig_len - extracted,
bq->max_segs - bq->nr_segs,
extraction_flags, &offset);
if (got < 0) {
pr_err("Couldn't get user pages (rc=%zd)\n", got);
ret = got;
goto out;
}
if (got == 0) {
pr_err("extract_pages gave nothing from %zu/%zu\n",
extracted, orig_len);
ret = -EIO;
goto out;
}
if (got > orig_len - extracted) {
pr_err("get_pages rc=%zd more than %zu\n",
got, orig_len - extracted);
goto out;
}
extracted += got;
got += offset;
cur_npages = DIV_ROUND_UP(got, PAGE_SIZE);
for (unsigned int i = 0; i < cur_npages; i++) {
size_t len = umin(got, PAGE_SIZE);
bvec_set_page(&bq->bv[bq->nr_segs],
*pages++, len - offset, offset);
bq->nr_segs++;
got -= len;
offset = 0;
}
} while (extracted < orig_len && !bvecq_is_full(bq));
} while (extracted < orig_len && max_segs > 0);
out:
return extracted ?: ret;
}
EXPORT_SYMBOL_GPL(netfs_extract_iter);