11//! The virtual memory representation of the MIR interpreter.
22
3+ use std:: borrow:: Cow ;
4+ use std:: convert:: TryFrom ;
5+ use std:: iter;
6+ use std:: ops:: { Add , Deref , DerefMut , Mul , Range , Sub } ;
7+
8+ use rustc_ast:: ast:: Mutability ;
9+ use rustc_data_structures:: sorted_map:: SortedMap ;
10+ use rustc_target:: abi:: HasDataLayout ;
11+
312use super :: {
413 read_target_uint, write_target_uint, AllocId , InterpResult , Pointer , Scalar , ScalarMaybeUndef ,
514} ;
615
716use crate :: ty:: layout:: { Align , Size } ;
817
9- use rustc_ast:: ast:: Mutability ;
10- use rustc_data_structures:: sorted_map:: SortedMap ;
11- use rustc_target:: abi:: HasDataLayout ;
12- use std:: borrow:: Cow ;
13- use std:: iter;
14- use std:: ops:: { Deref , DerefMut , Range } ;
15-
1618// NOTE: When adding new fields, make sure to adjust the `Snapshot` impl in
1719// `src/librustc_mir/interpret/snapshot.rs`.
1820#[ derive( Clone , Debug , Eq , PartialEq , PartialOrd , Ord , Hash , RustcEncodable , RustcDecodable ) ]
@@ -90,7 +92,7 @@ impl<Tag> Allocation<Tag> {
9092 /// Creates a read-only allocation initialized by the given bytes
9193 pub fn from_bytes < ' a > ( slice : impl Into < Cow < ' a , [ u8 ] > > , align : Align ) -> Self {
9294 let bytes = slice. into ( ) . into_owned ( ) ;
93- let size = Size :: from_bytes ( bytes. len ( ) as u64 ) ;
95+ let size = Size :: from_bytes ( u64 :: try_from ( bytes. len ( ) ) . unwrap ( ) ) ;
9496 Self {
9597 bytes,
9698 relocations : Relocations :: new ( ) ,
@@ -107,9 +109,8 @@ impl<Tag> Allocation<Tag> {
107109 }
108110
109111 pub fn undef ( size : Size , align : Align ) -> Self {
110- assert_eq ! ( size. bytes( ) as usize as u64 , size. bytes( ) ) ;
111112 Allocation {
112- bytes : vec ! [ 0 ; size. bytes( ) as usize ] ,
113+ bytes : vec ! [ 0 ; usize :: try_from ( size. bytes( ) ) . unwrap ( ) ] ,
113114 relocations : Relocations :: new ( ) ,
114115 undef_mask : UndefMask :: new ( size, false ) ,
115116 size,
@@ -152,7 +153,7 @@ impl Allocation<(), ()> {
152153/// Raw accessors. Provide access to otherwise private bytes.
153154impl < Tag , Extra > Allocation < Tag , Extra > {
154155 pub fn len ( & self ) -> usize {
155- self . size . bytes ( ) as usize
156+ usize :: try_from ( self . size . bytes ( ) ) . unwrap ( )
156157 }
157158
158159 /// Looks at a slice which may describe undefined bytes or describe a relocation. This differs
@@ -182,21 +183,16 @@ impl<'tcx, Tag: Copy, Extra: AllocationExtra<Tag>> Allocation<Tag, Extra> {
182183 /// Returns the range of this allocation that was meant.
183184 #[ inline]
184185 fn check_bounds ( & self , offset : Size , size : Size ) -> Range < usize > {
185- let end = offset + size; // This does overflow checking.
186- assert_eq ! (
187- end. bytes( ) as usize as u64 ,
188- end. bytes( ) ,
189- "cannot handle this access on this host architecture"
190- ) ;
191- let end = end. bytes ( ) as usize ;
186+ let end = Size :: add ( offset, size) ; // This does overflow checking.
187+ let end = usize:: try_from ( end. bytes ( ) ) . expect ( "access too big for this host architecture" ) ;
192188 assert ! (
193189 end <= self . len( ) ,
194190 "Out-of-bounds access at offset {}, size {} in allocation of size {}" ,
195191 offset. bytes( ) ,
196192 size. bytes( ) ,
197193 self . len( )
198194 ) ;
199- ( offset. bytes ( ) as usize ) ..end
195+ usize :: try_from ( offset. bytes ( ) ) . unwrap ( ) ..end
200196 }
201197
202198 /// The last argument controls whether we error out when there are undefined
@@ -294,11 +290,11 @@ impl<'tcx, Tag: Copy, Extra: AllocationExtra<Tag>> Allocation<Tag, Extra> {
294290 cx : & impl HasDataLayout ,
295291 ptr : Pointer < Tag > ,
296292 ) -> InterpResult < ' tcx , & [ u8 ] > {
297- assert_eq ! ( ptr. offset. bytes( ) as usize as u64 , ptr. offset. bytes( ) ) ;
298- let offset = ptr. offset . bytes ( ) as usize ;
293+ let offset = usize:: try_from ( ptr. offset . bytes ( ) ) . unwrap ( ) ;
299294 Ok ( match self . bytes [ offset..] . iter ( ) . position ( |& c| c == 0 ) {
300295 Some ( size) => {
301- let size_with_null = Size :: from_bytes ( ( size + 1 ) as u64 ) ;
296+ let size_with_null =
297+ Size :: from_bytes ( u64:: try_from ( size. checked_add ( 1 ) . unwrap ( ) ) . unwrap ( ) ) ;
302298 // Go through `get_bytes` for checks and AllocationExtra hooks.
303299 // We read the null, so we include it in the request, but we want it removed
304300 // from the result, so we do subslicing.
@@ -343,7 +339,7 @@ impl<'tcx, Tag: Copy, Extra: AllocationExtra<Tag>> Allocation<Tag, Extra> {
343339 let ( lower, upper) = src. size_hint ( ) ;
344340 let len = upper. expect ( "can only write bounded iterators" ) ;
345341 assert_eq ! ( lower, len, "can only write iterators with a precise length" ) ;
346- let bytes = self . get_bytes_mut ( cx, ptr, Size :: from_bytes ( len as u64 ) ) ?;
342+ let bytes = self . get_bytes_mut ( cx, ptr, Size :: from_bytes ( u64:: try_from ( len ) . unwrap ( ) ) ) ?;
347343 // `zip` would stop when the first iterator ends; we want to definitely
348344 // cover all of `bytes`.
349345 for dest in bytes {
@@ -386,7 +382,11 @@ impl<'tcx, Tag: Copy, Extra: AllocationExtra<Tag>> Allocation<Tag, Extra> {
386382 } else {
387383 match self . relocations . get ( & ptr. offset ) {
388384 Some ( & ( tag, alloc_id) ) => {
389- let ptr = Pointer :: new_with_tag ( alloc_id, Size :: from_bytes ( bits as u64 ) , tag) ;
385+ let ptr = Pointer :: new_with_tag (
386+ alloc_id,
387+ Size :: from_bytes ( u64:: try_from ( bits) . unwrap ( ) ) ,
388+ tag,
389+ ) ;
390390 return Ok ( ScalarMaybeUndef :: Scalar ( ptr. into ( ) ) ) ;
391391 }
392392 None => { }
@@ -433,7 +433,7 @@ impl<'tcx, Tag: Copy, Extra: AllocationExtra<Tag>> Allocation<Tag, Extra> {
433433 } ;
434434
435435 let bytes = match val. to_bits_or_ptr ( type_size, cx) {
436- Err ( val) => val. offset . bytes ( ) as u128 ,
436+ Err ( val) => u128 :: from ( val. offset . bytes ( ) ) ,
437437 Ok ( data) => data,
438438 } ;
439439
@@ -479,7 +479,7 @@ impl<'tcx, Tag: Copy, Extra> Allocation<Tag, Extra> {
479479 // We have to go back `pointer_size - 1` bytes, as that one would still overlap with
480480 // the beginning of this range.
481481 let start = ptr. offset . bytes ( ) . saturating_sub ( cx. data_layout ( ) . pointer_size . bytes ( ) - 1 ) ;
482- let end = ptr. offset + size; // This does overflow checking.
482+ let end = Size :: add ( ptr. offset , size) ; // This does overflow checking.
483483 self . relocations . range ( Size :: from_bytes ( start) ..end)
484484 }
485485
@@ -524,7 +524,7 @@ impl<'tcx, Tag: Copy, Extra> Allocation<Tag, Extra> {
524524 )
525525 } ;
526526 let start = ptr. offset ;
527- let end = start + size;
527+ let end = Size :: add ( start, size) ;
528528
529529 // Mark parts of the outermost relocations as undefined if they partially fall outside the
530530 // given range.
@@ -563,15 +563,15 @@ impl<'tcx, Tag, Extra> Allocation<Tag, Extra> {
563563 #[ inline]
564564 fn check_defined ( & self , ptr : Pointer < Tag > , size : Size ) -> InterpResult < ' tcx > {
565565 self . undef_mask
566- . is_range_defined ( ptr. offset , ptr. offset + size)
566+ . is_range_defined ( ptr. offset , Size :: add ( ptr. offset , size) )
567567 . or_else ( |idx| throw_ub ! ( InvalidUndefBytes ( Some ( Pointer :: new( ptr. alloc_id, idx) ) ) ) )
568568 }
569569
570570 pub fn mark_definedness ( & mut self , ptr : Pointer < Tag > , size : Size , new_state : bool ) {
571571 if size. bytes ( ) == 0 {
572572 return ;
573573 }
574- self . undef_mask . set_range ( ptr. offset , ptr. offset + size, new_state) ;
574+ self . undef_mask . set_range ( ptr. offset , Size :: add ( ptr. offset , size) , new_state) ;
575575 }
576576}
577577
@@ -616,7 +616,7 @@ impl<Tag, Extra> Allocation<Tag, Extra> {
616616
617617 for i in 1 ..size. bytes ( ) {
618618 // FIXME: optimize to bitshift the current undef block's bits and read the top bit.
619- if self . undef_mask . get ( src. offset + Size :: from_bytes ( i) ) == cur {
619+ if self . undef_mask . get ( Size :: add ( src. offset , Size :: from_bytes ( i) ) ) == cur {
620620 cur_len += 1 ;
621621 } else {
622622 ranges. push ( cur_len) ;
@@ -643,7 +643,7 @@ impl<Tag, Extra> Allocation<Tag, Extra> {
643643 if defined. ranges . len ( ) <= 1 {
644644 self . undef_mask . set_range_inbounds (
645645 dest. offset ,
646- dest. offset + size * repeat,
646+ Size :: add ( dest. offset , Size :: mul ( size, repeat) ) ,
647647 defined. initial ,
648648 ) ;
649649 return ;
@@ -721,10 +721,10 @@ impl<Tag: Copy, Extra> Allocation<Tag, Extra> {
721721 for i in 0 ..length {
722722 new_relocations. extend ( relocations. iter ( ) . map ( |& ( offset, reloc) | {
723723 // compute offset for current repetition
724- let dest_offset = dest. offset + ( i * size) ;
724+ let dest_offset = Size :: add ( dest. offset , Size :: mul ( size, i ) ) ;
725725 (
726726 // shift offsets from source allocation to destination allocation
727- offset + dest_offset - src. offset ,
727+ Size :: sub ( Size :: add ( offset, dest_offset) , src. offset ) ,
728728 reloc,
729729 )
730730 } ) ) ;
@@ -861,18 +861,18 @@ impl UndefMask {
861861 if amount. bytes ( ) == 0 {
862862 return ;
863863 }
864- let unused_trailing_bits = self . blocks . len ( ) as u64 * Self :: BLOCK_SIZE - self . len . bytes ( ) ;
864+ let unused_trailing_bits =
865+ u64:: try_from ( self . blocks . len ( ) ) . unwrap ( ) * Self :: BLOCK_SIZE - self . len . bytes ( ) ;
865866 if amount. bytes ( ) > unused_trailing_bits {
866867 let additional_blocks = amount. bytes ( ) / Self :: BLOCK_SIZE + 1 ;
867- assert_eq ! ( additional_blocks as usize as u64 , additional_blocks) ;
868868 self . blocks . extend (
869869 // FIXME(oli-obk): optimize this by repeating `new_state as Block`.
870- iter:: repeat ( 0 ) . take ( additional_blocks as usize ) ,
870+ iter:: repeat ( 0 ) . take ( usize:: try_from ( additional_blocks ) . unwrap ( ) ) ,
871871 ) ;
872872 }
873873 let start = self . len ;
874874 self . len += amount;
875- self . set_range_inbounds ( start, start + amount, new_state) ;
875+ self . set_range_inbounds ( start, Size :: add ( start, amount) , new_state) ;
876876 }
877877}
878878
@@ -881,7 +881,5 @@ fn bit_index(bits: Size) -> (usize, usize) {
881881 let bits = bits. bytes ( ) ;
882882 let a = bits / UndefMask :: BLOCK_SIZE ;
883883 let b = bits % UndefMask :: BLOCK_SIZE ;
884- assert_eq ! ( a as usize as u64 , a) ;
885- assert_eq ! ( b as usize as u64 , b) ;
886- ( a as usize , b as usize )
884+ ( usize:: try_from ( a) . unwrap ( ) , usize:: try_from ( b) . unwrap ( ) )
887885}
0 commit comments