diff options
author | Wolfram Sang <wsa+renesas@sang-engineering.com> | 2024-11-18 08:35:47 +0100 |
---|---|---|
committer | Wolfram Sang <wsa+renesas@sang-engineering.com> | 2024-11-18 08:35:47 +0100 |
commit | 1b3073291ddbe23fede7e0dd1b6f5635e370f8ba (patch) | |
tree | a3245db38b3389d4a63731b2c679c2d38eb24026 /fs/netfs/buffered_read.c | |
parent | 48730a9d04ffccda541602d722d1ff81920a85d8 (diff) | |
parent | 1922bc245541bd08e3282d8199c8ac703e366111 (diff) |
Merge tag 'i2c-host-6.13-p1' of git://git.kernel.org/pub/scm/linux/kernel/git/andi.shyti/linux into i2c/for-mergewindow
i2c-host updates for v6.13, part 1
Major Improvements and Refactoring:
- All controllers using the 'remove_new' callback have been
reverted to use the 'remove' callback.
- Intel SCH controller underwent significant refactoring,
this brings love and a modern look to the driver.
- PIIX4 driver refactored to enable usage by other drivers
(e.g., AMD ASF).
- iMX/MXC improved message handling to reduce protocol overhead:
Refactored DMA/non-DMA read/write and bus polling mechanisms
to achieve this.
- ACPI documentation for PIIX4.
New Features:
- i2c-cadence added support for atomic transfers.
- Qualcomm CII added support for a 32MHz serial engine clock.
Deprecated Features:
- Dropped outdated support for AMD756 S4882 and NFORCE2 S4985. If
somebody misses this, Jean will rewrite support using the proper
i2c mux framework.
New Hardware Support:
- Added support for:
- Intel Panther Lake (new ID)
- AMD ASF (new driver)
- S32G2/S32G3 SoCs (new ID)
- Realtek RTL I2C Controller (new driver)
- HJMC01 DesignWare ACPI HID (new ID)
- PIC64GX to Microchip Core (new ID)
- Qualcomm SDM670 to Qualcomm CCI (new ID)
Diffstat (limited to 'fs/netfs/buffered_read.c')
-rw-r--r-- | fs/netfs/buffered_read.c | 47 |
1 files changed, 14 insertions, 33 deletions
diff --git a/fs/netfs/buffered_read.c b/fs/netfs/buffered_read.c index c40e226053ccc..af46a598f4d7c 100644 --- a/fs/netfs/buffered_read.c +++ b/fs/netfs/buffered_read.c @@ -67,7 +67,8 @@ static int netfs_begin_cache_read(struct netfs_io_request *rreq, struct netfs_in * Decant the list of folios to read into a rolling buffer. */ static size_t netfs_load_buffer_from_ra(struct netfs_io_request *rreq, - struct folio_queue *folioq) + struct folio_queue *folioq, + struct folio_batch *put_batch) { unsigned int order, nr; size_t size = 0; @@ -82,6 +83,9 @@ static size_t netfs_load_buffer_from_ra(struct netfs_io_request *rreq, order = folio_order(folio); folioq->orders[i] = order; size += PAGE_SIZE << order; + + if (!folio_batch_add(put_batch, folio)) + folio_batch_release(put_batch); } for (int i = nr; i < folioq_nr_slots(folioq); i++) @@ -120,6 +124,9 @@ static ssize_t netfs_prepare_read_iterator(struct netfs_io_subrequest *subreq) * that we will need to release later - but we don't want to do * that until after we've started the I/O. */ + struct folio_batch put_batch; + + folio_batch_init(&put_batch); while (rreq->submitted < subreq->start + rsize) { struct folio_queue *tail = rreq->buffer_tail, *new; size_t added; @@ -132,10 +139,11 @@ static ssize_t netfs_prepare_read_iterator(struct netfs_io_subrequest *subreq) new->prev = tail; tail->next = new; rreq->buffer_tail = new; - added = netfs_load_buffer_from_ra(rreq, new); + added = netfs_load_buffer_from_ra(rreq, new, &put_batch); rreq->iter.count += added; rreq->submitted += added; } + folio_batch_release(&put_batch); } subreq->len = rsize; @@ -348,6 +356,7 @@ static int netfs_wait_for_read(struct netfs_io_request *rreq) static int netfs_prime_buffer(struct netfs_io_request *rreq) { struct folio_queue *folioq; + struct folio_batch put_batch; size_t added; folioq = kmalloc(sizeof(*folioq), GFP_KERNEL); @@ -360,39 +369,14 @@ static int netfs_prime_buffer(struct netfs_io_request *rreq) rreq->submitted = rreq->start; iov_iter_folio_queue(&rreq->iter, ITER_DEST, folioq, 0, 0, 0); - added = netfs_load_buffer_from_ra(rreq, folioq); + folio_batch_init(&put_batch); + added = netfs_load_buffer_from_ra(rreq, folioq, &put_batch); + folio_batch_release(&put_batch); rreq->iter.count += added; rreq->submitted += added; return 0; } -/* - * Drop the ref on each folio that we inherited from the VM readahead code. We - * still have the folio locks to pin the page until we complete the I/O. - * - * Note that we can't just release the batch in each queue struct as we use the - * occupancy count in other places. - */ -static void netfs_put_ra_refs(struct folio_queue *folioq) -{ - struct folio_batch fbatch; - - folio_batch_init(&fbatch); - while (folioq) { - for (unsigned int slot = 0; slot < folioq_count(folioq); slot++) { - struct folio *folio = folioq_folio(folioq, slot); - if (!folio) - continue; - trace_netfs_folio(folio, netfs_folio_trace_read_put); - if (!folio_batch_add(&fbatch, folio)) - folio_batch_release(&fbatch); - } - folioq = folioq->next; - } - - folio_batch_release(&fbatch); -} - /** * netfs_readahead - Helper to manage a read request * @ractl: The description of the readahead request @@ -436,9 +420,6 @@ void netfs_readahead(struct readahead_control *ractl) goto cleanup_free; netfs_read_to_pagecache(rreq); - /* Release the folio refs whilst we're waiting for the I/O. */ - netfs_put_ra_refs(rreq->buffer); - netfs_put_request(rreq, true, netfs_rreq_trace_put_return); return; |