1+ use crate :: backend:: Backend ;
12use ndarray:: { Array , ArrayBase , Axis , Dimension , Ix2 , OwnedRepr , RemoveAxis } ;
23use ndarray_rand:: RandomExt ;
34use ndarray_rand:: rand_distr:: Uniform ;
4- use crate :: backend:: Backend ;
55
66#[ derive( Clone ) ]
77pub struct CPUBackend ;
@@ -29,10 +29,18 @@ impl Backend for CPUBackend {
2929 tensor. mapv ( f)
3030 }
3131
32- fn add < D : Dimension > ( a : & Self :: Tensor < D > , b : & Self :: Tensor < D > ) -> Self :: Tensor < D > { a + b }
33- fn sub < D : Dimension > ( a : & Self :: Tensor < D > , b : & Self :: Tensor < D > ) -> Self :: Tensor < D > { a - b }
34- fn mul < D : Dimension > ( a : & Self :: Tensor < D > , b : & Self :: Tensor < D > ) -> Self :: Tensor < D > { a * b }
35- fn div < D : Dimension > ( a : & Self :: Tensor < D > , b : & Self :: Tensor < D > ) -> Self :: Tensor < D > { a / b }
32+ fn add < D : Dimension > ( a : & Self :: Tensor < D > , b : & Self :: Tensor < D > ) -> Self :: Tensor < D > {
33+ a + b
34+ }
35+ fn sub < D : Dimension > ( a : & Self :: Tensor < D > , b : & Self :: Tensor < D > ) -> Self :: Tensor < D > {
36+ a - b
37+ }
38+ fn mul < D : Dimension > ( a : & Self :: Tensor < D > , b : & Self :: Tensor < D > ) -> Self :: Tensor < D > {
39+ a * b
40+ }
41+ fn div < D : Dimension > ( a : & Self :: Tensor < D > , b : & Self :: Tensor < D > ) -> Self :: Tensor < D > {
42+ a / b
43+ }
3644
3745 fn scale < D : Dimension > ( tensor : & Self :: Tensor < D > , scalar : f32 ) -> Self :: Tensor < D > {
3846 tensor * scalar
@@ -71,8 +79,10 @@ impl Backend for CPUBackend {
7179 let b2 = b_dyn. view ( ) . into_dimensionality :: < Ix2 > ( ) . unwrap ( ) ;
7280 let mut result = Array :: zeros ( ( batch, m, n) ) . into_dyn ( ) ;
7381 for i in 0 ..batch {
74- let ai = a_dyn. index_axis ( Axis ( 0 ) , i)
75- . into_dimensionality :: < Ix2 > ( ) . unwrap ( ) ;
82+ let ai = a_dyn
83+ . index_axis ( Axis ( 0 ) , i)
84+ . into_dimensionality :: < Ix2 > ( )
85+ . unwrap ( ) ;
7686 result. index_axis_mut ( Axis ( 0 ) , i) . assign ( & ai. dot ( & b2) ) ;
7787 }
7888 result
@@ -82,15 +92,23 @@ impl Backend for CPUBackend {
8292 let n = b_dyn. shape ( ) [ 2 ] ;
8393 let mut result = Array :: zeros ( ( batch, m, n) ) . into_dyn ( ) ;
8494 for i in 0 ..batch {
85- let ai = a_dyn. index_axis ( Axis ( 0 ) , i)
86- . into_dimensionality :: < Ix2 > ( ) . unwrap ( ) ;
87- let bi = b_dyn. index_axis ( Axis ( 0 ) , i)
88- . into_dimensionality :: < Ix2 > ( ) . unwrap ( ) ;
95+ let ai = a_dyn
96+ . index_axis ( Axis ( 0 ) , i)
97+ . into_dimensionality :: < Ix2 > ( )
98+ . unwrap ( ) ;
99+ let bi = b_dyn
100+ . index_axis ( Axis ( 0 ) , i)
101+ . into_dimensionality :: < Ix2 > ( )
102+ . unwrap ( ) ;
89103 result. index_axis_mut ( Axis ( 0 ) , i) . assign ( & ai. dot ( & bi) ) ;
90104 }
91105 result
92106 }
93- _ => panic ! ( "matmul: unsupported shapes {:?} × {:?}" , a_dyn. shape( ) , b_dyn. shape( ) ) ,
107+ _ => panic ! (
108+ "matmul: unsupported shapes {:?} × {:?}" ,
109+ a_dyn. shape( ) ,
110+ b_dyn. shape( )
111+ ) ,
94112 } ;
95113 out. into_dimensionality :: < D1 > ( )
96114 . expect ( "matmul output rank must match left operand" )
@@ -121,7 +139,6 @@ impl Backend for CPUBackend {
121139 . expect ( "broadcast_add output rank must match left operand" )
122140 }
123141
124-
125142 fn softmax < D : Dimension > ( tensor : & Self :: Tensor < D > ) -> Self :: Tensor < D > {
126143 let shape = tensor. shape ( ) . to_vec ( ) ;
127144 let last_dim = shape[ shape. len ( ) - 1 ] ;
0 commit comments