@@ -278,6 +278,46 @@ static void iomap_adjust_read_range(struct inode *inode, struct folio *folio,
278278 * lenp = plen ;
279279}
280280
281+ static inline bool iomap_block_needs_zeroing (const struct iomap_iter * iter ,
282+ loff_t pos )
283+ {
284+ const struct iomap * srcmap = iomap_iter_srcmap (iter );
285+
286+ return srcmap -> type != IOMAP_MAPPED ||
287+ (srcmap -> flags & IOMAP_F_NEW ) ||
288+ pos >= i_size_read (iter -> inode );
289+ }
290+
291+ /**
292+ * iomap_read_inline_data - copy inline data into the page cache
293+ * @iter: iteration structure
294+ * @folio: folio to copy to
295+ *
296+ * Copy the inline data in @iter into @folio and zero out the rest of the folio.
297+ * Only a single IOMAP_INLINE extent is allowed at the end of each file.
298+ * Returns zero for success to complete the read, or the usual negative errno.
299+ */
300+ static int iomap_read_inline_data (const struct iomap_iter * iter ,
301+ struct folio * folio )
302+ {
303+ const struct iomap * iomap = iomap_iter_srcmap (iter );
304+ size_t size = i_size_read (iter -> inode ) - iomap -> offset ;
305+ size_t offset = offset_in_folio (folio , iomap -> offset );
306+
307+ if (folio_test_uptodate (folio ))
308+ return 0 ;
309+
310+ if (WARN_ON_ONCE (size > iomap -> length ))
311+ return - EIO ;
312+ if (offset > 0 )
313+ ifs_alloc (iter -> inode , folio , iter -> flags );
314+
315+ folio_fill_tail (folio , offset , iomap -> inline_data , size );
316+ iomap_set_range_uptodate (folio , offset , folio_size (folio ) - offset );
317+ return 0 ;
318+ }
319+
320+ #ifdef CONFIG_BLOCK
281321static void iomap_finish_folio_read (struct folio * folio , size_t off ,
282322 size_t len , int error )
283323{
@@ -317,45 +357,6 @@ struct iomap_readpage_ctx {
317357 struct readahead_control * rac ;
318358};
319359
320- /**
321- * iomap_read_inline_data - copy inline data into the page cache
322- * @iter: iteration structure
323- * @folio: folio to copy to
324- *
325- * Copy the inline data in @iter into @folio and zero out the rest of the folio.
326- * Only a single IOMAP_INLINE extent is allowed at the end of each file.
327- * Returns zero for success to complete the read, or the usual negative errno.
328- */
329- static int iomap_read_inline_data (const struct iomap_iter * iter ,
330- struct folio * folio )
331- {
332- const struct iomap * iomap = iomap_iter_srcmap (iter );
333- size_t size = i_size_read (iter -> inode ) - iomap -> offset ;
334- size_t offset = offset_in_folio (folio , iomap -> offset );
335-
336- if (folio_test_uptodate (folio ))
337- return 0 ;
338-
339- if (WARN_ON_ONCE (size > iomap -> length ))
340- return - EIO ;
341- if (offset > 0 )
342- ifs_alloc (iter -> inode , folio , iter -> flags );
343-
344- folio_fill_tail (folio , offset , iomap -> inline_data , size );
345- iomap_set_range_uptodate (folio , offset , folio_size (folio ) - offset );
346- return 0 ;
347- }
348-
349- static inline bool iomap_block_needs_zeroing (const struct iomap_iter * iter ,
350- loff_t pos )
351- {
352- const struct iomap * srcmap = iomap_iter_srcmap (iter );
353-
354- return srcmap -> type != IOMAP_MAPPED ||
355- (srcmap -> flags & IOMAP_F_NEW ) ||
356- pos >= i_size_read (iter -> inode );
357- }
358-
359360static int iomap_readpage_iter (struct iomap_iter * iter ,
360361 struct iomap_readpage_ctx * ctx )
361362{
@@ -548,6 +549,27 @@ void iomap_readahead(struct readahead_control *rac, const struct iomap_ops *ops)
548549}
549550EXPORT_SYMBOL_GPL (iomap_readahead );
550551
552+ static int iomap_read_folio_range (const struct iomap_iter * iter ,
553+ struct folio * folio , loff_t pos , size_t len )
554+ {
555+ const struct iomap * srcmap = iomap_iter_srcmap (iter );
556+ struct bio_vec bvec ;
557+ struct bio bio ;
558+
559+ bio_init (& bio , srcmap -> bdev , & bvec , 1 , REQ_OP_READ );
560+ bio .bi_iter .bi_sector = iomap_sector (srcmap , pos );
561+ bio_add_folio_nofail (& bio , folio , len , offset_in_folio (folio , pos ));
562+ return submit_bio_wait (& bio );
563+ }
564+ #else
565+ static int iomap_read_folio_range (const struct iomap_iter * iter ,
566+ struct folio * folio , loff_t pos , size_t len )
567+ {
568+ WARN_ON_ONCE (1 );
569+ return - EIO ;
570+ }
571+ #endif /* CONFIG_BLOCK */
572+
551573/*
552574 * iomap_is_partially_uptodate checks whether blocks within a folio are
553575 * uptodate or not.
@@ -659,19 +681,6 @@ iomap_write_failed(struct inode *inode, loff_t pos, unsigned len)
659681 pos + len - 1 );
660682}
661683
662- static int iomap_read_folio_range (const struct iomap_iter * iter ,
663- struct folio * folio , loff_t pos , size_t len )
664- {
665- const struct iomap * srcmap = iomap_iter_srcmap (iter );
666- struct bio_vec bvec ;
667- struct bio bio ;
668-
669- bio_init (& bio , srcmap -> bdev , & bvec , 1 , REQ_OP_READ );
670- bio .bi_iter .bi_sector = iomap_sector (srcmap , pos );
671- bio_add_folio_nofail (& bio , folio , len , offset_in_folio (folio , pos ));
672- return submit_bio_wait (& bio );
673- }
674-
675684static int __iomap_write_begin (const struct iomap_iter * iter ,
676685 const struct iomap_write_ops * write_ops , size_t len ,
677686 struct folio * folio )
0 commit comments