From 834494387a038c2f83861c4328bd0ab8c0823c47 Mon Sep 17 00:00:00 2001 From: "Ryan M. Richard" Date: Wed, 31 Dec 2025 14:03:55 -0600 Subject: [PATCH 01/13] buffer supports multiplication now --- .../adding_operations_to_contiguous.rst | 66 ++++++++++++++++++ .../developer/assets/how_contiguous_works.png | Bin 0 -> 183613 bytes docs/source/developer/index.rst | 1 + .../detail_/binary_operation_visitor.hpp | 25 +++++++ src/tensorwrapper/buffer/mdbuffer.cpp | 21 +++++- .../detail_/binary_operation_visitor.cpp | 66 ++++++++++++++++++ .../tensorwrapper/buffer/mdbuffer.cpp | 38 ++++++++++ 7 files changed, 216 insertions(+), 1 deletion(-) create mode 100644 docs/source/developer/adding_operations_to_contiguous.rst create mode 100644 docs/source/developer/assets/how_contiguous_works.png diff --git a/docs/source/developer/adding_operations_to_contiguous.rst b/docs/source/developer/adding_operations_to_contiguous.rst new file mode 100644 index 00000000..6a685cbc --- /dev/null +++ b/docs/source/developer/adding_operations_to_contiguous.rst @@ -0,0 +1,66 @@ +.. Copyright 2023 NWChemEx-Project +.. +.. Licensed under the Apache License, Version 2.0 (the "License"); +.. you may not use this file except in compliance with the License. +.. You may obtain a copy of the License at +.. +.. http://www.apache.org/licenses/LICENSE-2.0 +.. +.. Unless required by applicable law or agreed to in writing, software +.. distributed under the License is distributed on an "AS IS" BASIS, +.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +.. See the License for the specific language governing permissions and +.. limitations under the License. + +############################### +Adding Operations to Contiguous +############################### + +The ``Contiguous`` class is the workhorse of most tensor operations because it +provides the kernels that non-contiguous tensors are built on. As such, we may +need to add operations to it from time to time. This document describes how to +do that. + +********************************** +Understanding How Contiguous Works +********************************** + +.. figure:: assets/how_contiguous_works.png + :align: center + + Control flow for an operation resulting in a ``Contiguous`` buffer object. + +For concreteness, we'll trace how ``subtraction_assignment`` is implemented. +Other binary operations are implemented nearly identically and the +implementation of unary operations is extremely similar. + +1. The input objects, ``lhs`` and ``rhs`` are converted to ``Contiguous`` + objects. N.b., we should eventually use performance models to decide whether + the time to convert to ``Contiguous`` objects is worth it, or if we should + rely on algorithms which do not require contiguous data. +2. We work out the shape of the output tensor. +3. A visitor for the desired operation is created. For + ``subtraction_assignment``, this is ``detail_::SubtractionVisitor``. + + - Visitor definitions live in ``wtf/src/tensorwrapper/buffer/detail_/``. + +5. Control enters ``wtf::buffer::visit_contiguous_buffer`` to restore floating- + point types. +6. ``lhs`` and ``rhs`` are converted to ``std::span`` objects. +7. Control enters the visitor. +8. With types known, the output tensor can be initialized (and is). +9. The visitor converts the ``std::span`` objects into the tensor backend's + tensor objects. + + - Backend implementations live in ``wtf/src/tensorwrapper/backends/``. + +10. The backend's implementation of the operation is invoked. + +********************** +Adding a New Operation +********************** + +1. Verify that one of the backends supports the desired operation. If not, add + it to a backend first. +2. Create a visitor for it. +3. Add the operation to ``wtf::buffer::Contiguous``. diff --git a/docs/source/developer/assets/how_contiguous_works.png b/docs/source/developer/assets/how_contiguous_works.png new file mode 100644 index 0000000000000000000000000000000000000000..9602d632b60f000bd6ed70f7944a1febb673b938 GIT binary patch literal 183613 zcmc$`Wn5J4+CD4@3Mc{th=LMIBT^2C(jYx_r-F1hgM^@ffP~WB4MTTHNlP;{N_P(( z|BL(C`}y7M{oL=@_X9tMnZ=s5uIr5BJkH}>esVHmxHzOZH*Va(l@J$ExN!qh`^Jr% z%UHL-J3SpbLR|mfHZD_o4L*Rylh@g@)+Isx$+F_A#U*9K`PaNgZ z-V`#?d-?k#5MQ4AtKC3uK1Ym>Oc#AjK~x_&SeUi?B&A?^5x4G+6_Gp}z41YiAbaL` ziM_2OZa(Zos9kT}A>5-u>U(s@``!27g)iVs@uC6jhOAfuH_-7v-oT)|f%cy-#Xk$o ziI^rs{`3969gT%WSv^4TfBf&ij-eL6(GL}jQo{VNlmG8Y`osDD`>X%`DWxF#7X)@w z8sk4-!xzN3gi{KvZ#uusSP|5$^N8!2X9tl2pKv`8Osq%gVtAl4sSdHUGH=<_K^p(S^5{Yd7 z$oLsJFm^8J^{;0&>%|!!mlqjUGxcR7_wQ-BqsLlSXCH6YY;}{G<-PkENM;nF$#NRSJ4AFi zkw@ydWUoxsR8+0zirbkd}1murXey<-7E-dmJPJ!L_4}0DvjHqVlI@`n z%wA%+U!EVXchp~AoQ^rD3_Jx6L#}D+bIqnrJ%6|zP8pr-uMpnl9g!R7wp$z$VtPr9 zk%Ik|oOk>TSc>4{+3{W=?T?)7W4Bs_KVqx?6!DP#y*Pesxf-qh&#w&S#-E;N@-4|0 zAN=v9;54Si-I$+Zh_5_Ys5<#fLXT$@gnlg$g?awyVrh8P@~qVN`;F5ULfNQa@vmb8 zxu#z}2+DJI^f&n;n8b;7*>6X9dqn_;eUm*A0&vwL8AACDG?e(}IEKM=!iZEQuuej&q zv{gA5WEgD_!63Z$5(C2)vwtB%6Y*2cRTf!IFJPKgv*pHR+(&7*7;O|I8c6E>V57L} z`C^P2f^DmIPlnY~O`B(?4?Fx_vyrBcFlVJO&)IhK;&wA09<7_%NM7_qe)n35Cx&(q zO6^8vlpAVTRYFPXF(ybR_iu=BUW-(6PFVChhHSJ}RbmuwFOEXr4yKCXJ*5)k_#qY2 zN*Qt=4Q^OHk^v)gD$!VLI6u%dZm53Ablm>!sqhkI@V=r9H;tIJa&pk)*D(Ps^)HnY z0?#n`j(&fNLk}b)v|+z3i3=fWYV?n_TlxjXKJbPHK z9*ywmV8W`$tf0Ux|*OEt9#wH@&IF362_P>ON~MxIzpv{%nNktq-W~gV3E(YrPoICr=BFE!1awyS~uewPFPex!Rp%U zv7{$|DwHq4x=Y5{ZND|myZwFIFxH|w)~Y`G@XWQ}VOUwUN3%Xkn%~Dv`%dJ}YHoqg zCe+XK!)BEcTc%9(6Rc-im8^93pBxAAosp6oU7PiJOOWllaXYCnu2zcHKW&i+8M9@M z<7@&QN`4C4Yf-`Tm4JZG>$b;Eaq!=sw~OEG9it>zdOA9nNq;{jeJTGGr8y3@u5@7oy+VmllR>*E?*KUo4_-6_W^6t+b*tYXZ_R${b1e^tq&Vzi@+$kHd{n? zZ@t>@6Yhy((?{TI{G$B!<-IMgd0LlwJV(J%Fyx>cH*Q|i!&sK>P^>-UcBuWnX3DuK{a~RTN8&EQ(RxX5_v@`%LY}95 zXWPQ5e&oBjbH+KCo~(`vd_4(b52N7319I88YSFExwyV0A}x&=sR|C0J*>aThn;ip#?1 zvRuYb4xwAoz1WTPQrc{|bmwqCJMvmE)%X#-qM<0OdLsRY$!l37ih$;NM8%;i95>C4 zrioB%s+?6+s{E)YqaO$zVRf2@XM#NHv^`*c2tQfL+%O%B+!a6h{Rz*j!+M?#YB63) zY2O5#i5#m1yk__iyE=iE+f@nDyy59a1s77+!>!MXsLiHoAtHcC^X%O;Q(mFalLzOc zU$5?lcUZH`G#rbBG5bkUu&Dgv2i1sy$kbIu4Tr7N$`eMYb}H3X(j*!erC8ndiJGA`S9|2pK3q;qQ7WZ(s1!H6mOsUeM)&dft!v9q^hO{75wqTjdTtK9rm(=r ze#WkscX3dbAF63ypU0-oFZr zz7+1GJSWSkw3c5Ad90yvbvF7L>tFOAMkWyH`(*+kps`Ko$%H64N-sXTN$lUv4eoBy`FC?lHA>6e-St)|_WsNfo|x-XHa8Md6W^k(T- zyxl!zb6rO#=~X`4wY+_Yz%R}#>5-;G(kNWF`Mu^4BF$`K7#Y-5Y3TWdZ@bwF zL^}D^niOA(nOoKOxZ8ivKcp|wF1-i*5x$RXzrcaR-hAaWY2*74Y8uA95T&aHUK}mb z@99ex4rCd4vq9&nmJn2bM5sIhQ=A|=JV*`Z!#UU(NSAu@hmfd5$>uJ!!@st{Ui-yY zb&tod>{%u1mTQ(${tNI4u^ex5N$uPP#{O&Nq5%EL#$7;3@zS7k#Z-cGz6{F0eM@l+*MH$w1&6$<0 zhxgRoG$sLVHRT+8AL@0MI-bVoJQv6nqpIsRTzkA2D|luDYtQUhBD$Xmz=?bfrpZMc zzsGCGHVC`_3Ws-2?ytRCx%jg1;AIWXdG6u%iO{YQ zxDh_e*eHUYJd!FcGz}0Ld3ZQ%@J`|9a1vctw%80}Z1T>*nJ)xSyS1IB3}!zI*2#6P zNeHPGl~22@VNRE}MyOUTC46}PMVF~@80v?%`+M90sjXiJo!Z^3)(N9gld|`Z8P+hM zsm-y3lZnB{s!(GN@VlnF53$R4pWDNbhclNKPL$uJoWs6YH)4`R)g4YxFyGf;A(`tF zB(XYTqlnaXKhDyl$vjTUJ^vk2kV1W8R)4a$AW{12&u2ot03MJf3^&S<_k@gF?lV4l zy26*e^!fGYGZa!6Pa_$R;-oUQkD}h~;&^GXe8Tb$t<-nch=?n!)VQ;t&cZf&jzx2? zexr0y#$7>TVxc;Cz*%LCkT^1!|6+8R)Gsk=C1D7|$zq~Bkob+%v31(ZKNFTU7TbUF z!_LjgrqbOjfyOl|jc>_!w&sB1QcK(Qt8+5r%iy1SL^1 z1$jr4Mq%G4C@7A`)vJ5Rk8^xHJQi`o6@ZbLZ%@T;r6lJIQ_u}vE}%v$tY*YzkuhHo z3zfkGE-HTCW{ZF!<}ZSqg5vNK8k~Hz+wT{P1=*RCPP12+HP;aqI^EvC2c$hAt7R z-$WGZc-o?U;F9(`n5up$WLC8{tap`Ijz{)qkB{bpms%o*t=H%ql{Z3;vXfI)tVVrbD%+Nw~}>v;;*> z%5^1pPItRlZU*c4bwFT5E&{VbJexRvC5Yvm;(D<{-ueg$lxoE<1tfGQ_Q49zob=%y zQ$a1{Y>a)EXvo?Po+nX?EOhJYi;yUhJ!u4?SdcH)T={|W4pga`53KcV8Rp>8W{F~ z$GH#=YT^LIYDTOjjyne^{)xuhQ0^)|Zspb9`|HRO&b9Hy0} zDvLzz9Cz<~1JEFLUMrt_k4c?5%u2m$&rxIr!SN^Yvd#F~gGrL{*ElADG3+Cj5EaUA zOQ}8=)lEwEKRaUW52u{)c*3Zi7dWu9NP`MUWXvF;L`yqS`a#$_3JX*8!M|+gevUm^ z^d`}1C(zCWV7$jk71S7!(=jo#hNjCN437P8NH7V}SKf?p#h299xi)r0D4O$Q<3pTY z#A)dzibx=pe08=LI*6f-(fnuAF5;3mqF$JM*Hkvv_6qZHm$G(ZX8u{70QpJdxUz5# zz@sBxtoEV)ekT*`D{(NV%*U`pt*z<0n}1}QWWuTlO%?*OquzT2nu-;fE^(mHf3_ zqe!!ARn1c4Z^Rs^KLUpilVT5?X0jYMJ^;cSs|;KzC2^cmZ;-F4EYt*XWd8L+~^7`MO2Gx4#z6A%?7pP%`C2&y(fb@#g! zfqL;a#eH8~vio1wC7fw$+WKD&YMzLUUS~K~snISm2Ykr-x^_V(BD)*<^)(yZq4R?j zOYp9LIF~e4qq2j9v;!tf-4`>pQlCXGvyo>n<7zF4+lHWicJ)={3YH1AW7M@LOG^<9 zhT3`|r$UT7;=;@e8+$*=tRBD|@7c}Tb2#q_JpDAA2G%D%hq zxn=zbafM1~!v^5T1RILa$0I_2%(|#gxEt+F8GJt<&aZg22H~;4*bN)OUtEo9K{Z^U z3e^ZVkDGTWN4-S2qGA`cf~oEIF=_@I=0Q!CWkxq~JWqq&0S(dtt_3}aYW=IJEY z!tr5@uIPl#Ra~VHGz#bQdgM6zE|~!P6@Q?p+`2AIC(G{pTjqCi2}=luTg6-FD~n5- zB+aW)n@(+VEb{FVAXV~mMQntG;ZURLhTXln+D(bALc1^>c}dPpLwKO>|S z^>8C{e^1L{wR)MW?HndoE!|+{FON8?JITty%ZX`MC<=0bX_xfkb20mYVuYGr;aX`x zDYu5<+CS_u7OwUrSQVFQzm*JEHRA0qpb|;k$RQ`LgN07;?)EMP9aqv^u3 z#~y7b^^1<&PGV{c?w+5|@G^DkTrkmXhomx?*I1zDLs0B|e|-`I7xhElnip|uLfK{) zeaZ_Rd#ivwa|(Ea=u(tPFxV`dSQ*S*s>M2O-f2}%NF1TA%pv!t4RMMyv(ZsEVJ+A5 z<*onK%H^kJGu=7fAe+eNxl}YOo|Bm4VF9=)cc|ba2`X6E7DJu#NMHz;-oIZyM9 zb9Z(hb{Oe=OdGrdP+!07z4CySF2<;zZfP!q&ML>DXDi1nI|{vux?hyY=HINw`izq3 zZ*ssE0thNnSHtO-KX6EmB6Y(U3=3QKE2dqT@Mlnh&0LpSC?16`ceSN14@C}>E}!*p zr3!@|shB59%)t?6YnKPPZZ4e0)V@qIYp(k?G87K`v0EAnq25&vrjPrF>6-q7L|noW z!PXcFXybi|od8*~tUoqq!hPyI4#+k)L z&;4iR_suO=SfN}kMlogRazI6v!jB`|V^Oim!Fb2gdU`{*U)KcBl;iQAu~tUTx0=Fw@uoCg?yorwRL~dI zkMe~A&!R|7;R{>+^fdwZxSJ%HYr?$5wKX^1cUi;2ejV!?$Djm3)6LqcGo37#v>%en z=kL~?E}~0xwPnp4J3O;F3?_Uw$u7SJVL0nhP3i2|+ltV%#BPDY1w^q;Tug=v)%=1; z9TQ|-&*wvBg?w#zt%M)`@}b4D>mgxHtqsuXADrT{oH`U$iMom0txIFFL=bCj@LiNs z2oFaLvP=8~MBsN#2Ub5AJyi+8aN>(+D#*UW{nFDM*)Gjox6=bo2DNH0Po^k(j#Yi( z@pI@LZdZ$4GkE@+isb{m@@KS9pBS$c@!BwoHF#oXtLwsUa5K}WkoJj| zT@SQ}SXJv?Xo(d3Y0XH!W<>}xBUW|2`tq*Ax<-ZDE{5`%gQPb4oMip_)ivuH@#yDtb6xpbLp1zS=c#(d*k8$L4;zoJkCEoFZ| zfmO^45uF10rj$<82bpH1)43!|fPwghRBs*))B)$b>39a3-H;P z@DrHS!+A}Kra=DPy~dpT5>t_liWr9~@t9?T|MJW(t8%L`S6cVz-o|f>_SBxl^C^DA zNobfcAK+*ULCS&QDs6%_$&o_yE;W=wAltL_J>p1ryll$Ia6pN|VI z=sMuZj|s+XLCx;mAB?n6cZzOtOFQ}QuF^3$48^vsZ_DgKQh%v0wAi!fXIdAd+Vk*D zIZ0dXv-(P#^c_E^pTOPYFp+py5|zl{JwCf6<`z<9u)jXieplCZ62LTzZ*Lh2;4}Uk z91{G!sjC5}xaAfRQ5zmdVHK``XgB{R68Qz;H=D+Oec{WojtD0!r$5?Q3=|sr#q4tK zV>U}ftEXN9B5CRKZ8`1wqhu1px;WFy28;?244SoEcUl{_rE!&q`-X&?AJ2_v*~3FS zm=qFtNlD3Q>P^-13=>B_Q`NtLpG)3|V4ah-)H_7c1=;YXDE&NCP>$hm3?4|44t41W z9my9)7Jz0)@~k|0m5boWvz-gnKoI1I)(1dtP4(2SLEo(h+|ygd>x$@``!H*IWSRV@ z4F~!MFIgLKnZ--9zJ4L?b01d2YZ?s6RGa+QzA;isK2gkGJfN$w8gLs}dOVD`Q533k zk`g6*a^<3-X$}-mt9|53u9M0`%IKEnV)38& zc{ny}_ls$IBSCrFZ|uc@ocGHn(e_ZwVA+;@UseuEty|6P&_5lCmuE(Ek!1@h5iV@C z0DRrFV$yx72#1R@o_QKh20?Ubew*Ak(>30$Q`#sZju6mEg#>5|icObX;KOu@LIhSD z%$Y{a`HG~Q8U=4ZehEa+Bd0#;SDNWl zp-L9%tiXEJGM$UoXR8`g?U7_?4z7A5%76ImrtZ4s`Mdne>ZXk~ z*EjwS%jk_Hy!a;enQ%fEMq1VYAns!cp+%RLyNRo759k(6G{eGBt8B>7ST6?@rI_SVoA}7g1eZ=Nk8_kLi}eUKK7h~tmbYf3XcGp^ z*Pv{EiMz95CMvs9EOkT6tpi-RHW5dRo^MJ9f*-IwS%WgNi3lb5cxr6LS5hHzRA)kXof+v--%47P|NXvJ8Qg-e0e`9)0;mdh$$`2kznq2z|rVq3p)>Jx;mRv^GE|5`w|T8C=rP&KaQT$ugg4KCk`8i=cz)=flWlD2iPXfAwjX(yqm0G*AEhkEkd>z4%uQO_XkMu6P;(VQ`OfTtR!8LC7p*_B%xBz zt6K8PMl=Ky`^>ivrd8CjPu-=*^-gRx4&?`W8C#ErZbQv-bO}c~&Q5F2jvyMNhaPkUcHS9w{Ml$yjQljoFty>N`{_nrahKsZ>Gy*YDJqqe}3n!9W;rFm z%bm!f2P!g2T&3*W!g)!0Bm~nqALjLqL99(`?djW8!;eP>H$yMno_eckXsPY1O0}7bdu2nW{Hz6=8*SiH6^|G6IFjNf(!!(O z0e-J$DWbsKVRzd;{~q+f^O|!q27P4VIVSWXns{6Hv9n}2aA~z6&g^9t&lfEMxu(Ss zD&g*Ur)iUWpx^#fRu=xXCyslteNMrH^nk^)CttQi&ulbH`iY-ejv~1zXw*f1qo=VF zfG@T)NuYbziZ6tdLWwjvUUYp>SkMaPTIRpl!ApG*6O%rE$Yh5Kn9wW%tmkIaoGNxa&q@tq2NI7CoZ$<( z(-#lcMz6SXV+eV?ewazqeG~y(07NDApdYGOtApa=|LWN(74_}66)dHnKwu1~D^ zpo&^YgQ$H=qW{w(b3jOZSrpZrHbv)VrC63-?Ip{$G#4R1GhF6A|4nRizTLs^0JwUT}b_$bZ>=B{Hr&HYfl54$d`q`CX^i4eoSTTgBI%`I+5@y z;WLN$fQRu#2jgb9^@;)s^JX4vGhzQ60gV?Eo?Zvn)|Bw2GWU`_?Ah}CO@$QKm=XFt z{RaA;n{r&&`Y<;XivASh%^Y4Bg2W zf(*3`iMB3#D_*pGqm+}70yOWe*B&^YumT|0sgaN-X{HBsXxJ*%rdnVv)?Bm zs`A(t(rhObZ+pOP`~J6b&Yx2G*^i&~>V8X_hdF(Jt?5>87iygIvIvJ+F0Z3RB^294 zfuinfjh8Fw*Wu@7iY%8dE0Mn~z4t3Q*GRj%khh=?(ows{wb&7vKOamK64Y59XQEUy_S&<4ij44 z?vPbtk}*_-{o!C4vh6PJyF>lw6Sfds)cSY2N+H>!Czk)SaXm)iJ_}5r{rQ~5vC1-x zV)+aie99>7#v;Cn=P&u`rFYKPC9>>$C;V~AxX(USf^$w8d?`dLs}d;ESEA`AOR;xG zfL(n+noiZIExd?2bEG2~vPzwe{1pRbdXT-doEm)VojJ~9&EsA3`xI;POja%x9>jx2 zO0po24x7L%6f5ys`-jB|cKXHM@V;JX%$pJBA}>D}yv^vhVe&A=Oyhm_bt#y*_s7`> zl3xcz=;Ue9q^3Vg_)$WNXJ@BYjvU1+x6+A+Ir(8!I4|z%PMeMF571L7HOrqznrhO{ zC^RR^zWjsnG*~uz)tWb(I)hOs%Z$`92U~AmJ)OjaU+I%(d0#aKZYz;;yvk$w2#U#v~mB!fkS!j1m)_P`6;Gg*(oE4 z)?5N%gtvC*YqchwJK> z`&HhlO$N)$8=Svz`|;g4e?}KRa@r8ky8p1@mtMslZ*M@pa4H*-%#WB#lQ40%l!~d$ zA(|Ak>=t^wYo+fHDH`^pn4>ot5@|$Rr4VTyqnNKE%`9^|r53ND>srEcsDWH(Pj^>Y zO_`LBt6_9OI)-i!XoggSk&Pqrr!zRVJ61<$r4g$d*6%(u4Eo}i;(!T zaN*9^lEO5~1sC&U8GjDti)d)jyHdMiIs<{y&a${0KZ{|m8iy_LLPJ%(Z!?B4YsF1z zCPCTdg6Zxj9yz3jS^lHmz8Jzx9|f%TX4PcDJcfh~&~h;5QdDRErt)9?EAj6ieJ~S1 zS1_3oDy_q|deztb@#)RZnha1m$VJ;)wWkiD7-6$V5Y>gooQBmX42^Sjyh*)U!(6+# z>RMUYL*%fXY-+=Z^8M19MGR9His1_2$um}!e|(^~rEvZ5`+Vh)oIpF{#1W4AnLQ+# zehs7Cz<^LwHY$gDRAwZ{UP&j52)RsV^C~+GCeKM{t7|Rt8E#QuNh*>c*xPwIEx8K( zSTg1RPo~o55U;=N$FO2Cs^~u#swqHOe6Y`fq{HJRLX24UlCi9hHAc(!)i8>85Lc>$D0M z!gT|+si5$}jtl7IK7`e-j063N(d0K)(7v@RQPdQ(hv7043t!VdL#e>G&9w#NDN{8% zfKH2O34Tq+C?Xe8ofF$qE&Mj#VZ^5VE6{N$8OQ#$1K|ST3u&~xs|Rldjz@7?Xvln$ zwV47<%8ou>KpJ3C3NlkhH3FhRFXiQyZBw|IreZ3VC=|n znObJCkA!}LD#&87Czhn)Cij)$n0nsm&C&R%HSTvm!noct@D?a~he>p~1-fI;(^qe)s}xyI9XVV{uYyQyK4COV zfR!MA1McRHc96f7G*HlXUwIFa9)0Ik!#NKB&8ORVoS+P{SK4VA1nP7#N$+b0Edax6 zzr(?p_m^T;2h>w?!w{`2F@qc_Srx{<(0IC3q})$%fx;8!S8}*43t$?ir#M4(b5_M2 zw1(@WMH#9EI+$b)O*l+Z!uPn--d3gI02TS0?=IIP3@IT{o%uZEb9K&2u&9_C$*b9{ zzEY85r*DFmgKkf#aT4T$~ zKetLIZM#}IbwVu}Y6Yt9_Lpg;ao@Xc^&uadaJoT0RoVnBJy)|2?w$1f`jgeq#8?Cj zwNzBi@#cAPED7G4V%S$+sf^*Q(odKpmE^QFZ8+QF2egpo9oc%ne|Q>Rb$}#fIgY9fgk}1S{UFTH_pu{ZdXaq8$6C!`z7ylY zTDaX_fya55IS4o2PAM2y_s357TV2`kH)`8jmhbpI>&_yeS>K+Z){JCc{`o9tzGoZ9 z4ip&lc6YDE4oVhJAasv;>@?YbcDC0$X1lAz{rlt1Zouotzy>>$1V44F8^se#>H~NF zmxLAYt|CauYSL27|9uPGH*VMuAU`IS{`@E5B1=9M0MAGI8M289uq$Ke`HW3iJdyX8 z4pQJ1MuPi^$=*t4;w_366?KDf`+Gw9_*rdWs9|lVbz$i5+bu_eC9cK0(?Tc=d(8g+ z3mk~Hd#;RV#c9T^V$8OS87N-==d8sT8L(Kx@)`FJCDwn0fFPf2uTK~+I$TwUd+d`O z8OAC%8tp*N5=qqH-$RT54dKzrT0tOfuWde%7#VsKtF#kn5eE=84$~-eja;cPDHEmv z_YyYOqs8^>ql!?{9|E~Y(%6XNg8CCiI)r2M)24^QN%rV4sTptHzh62~uTu*Pc&*9u zpA+RsvuP5m{3yF3DMH^?n#iwl7v5DohFeNH~I|$y{#-r=_8f6B@1zBHIa=x*^N z6bAe1UcE9|Wk=6!D*Ob2iizjhS<6+)AC|I$(p(TgDQ z?{QMf5dq6eVU@@FyJ6vjT-}62Z&9phn<=O7k22DBnhQv`qvUa#xBJUM#I z-5xI8jI;8muHKeie!$xU0<$4WrB8G7ysoTi=Lz6<0K#Twi{%W!Ob9ZD8*_Ul^W zH0((rC^7_HB$|M!p1u%7i#K6Uvg0teO_}K|f-pVj zv5pcDIAdPQw>+=ZijRQ!TeKmvCkDy^G6|gz+(0WPPUZJX%%{&7*By@;{ffhuQS$^O zSQO@+`%za7Zu`+>Vc!@a-~T+YYjG8H!lr;(_i>Uf&}{>i$o!*H8+{#NyQGjCo;Xjp z!N`fXM(O^5lyCW17B4!^gdV>fm={3u0Bvp=@ zaJIm*2n1;rvF{xxEVxR*@Pf|(j)i!igGIQqiVc@~0VX<2?Y7V_`E&JtJDd<;(L=91 zuf*p=x4?!4`H#zGq2;lISs#@}hvaM1N?A@Ac}N<#(?M<^J7;>Edl8>9qtCg6;{}$Y=mY>6G@axB(y^zK|OSDb2(O z=r<&p9EH7dKdYKub)2*f)TJ=4T6^tJ3C*}5cl_|P=+R>)`=ERgl`k00&Orv`0Misf zCgY_h_yY?dnXrLKgGD-)^5#w|zc#;H-~6%Wv>9lp=}bSu{kxmt`u_m)GkU*x4=4M$ zt*TctJXKdqO$LQlK(>Sq2Ob^+iJ|Xmj*PRH9H#kD)Q>oByC-XuBbLiOY$69&Q> zV@{n-;PLt4%+r$XQvVjACm4+WA1MZj`^y}k_k#I|`z^vAK&;dQ#M^;!_#aeUvk+>aLw{b(YvFxWkcU+x?Ogb~PdIZjkd9TN(fk81d`t_2lD;j*;gSq-h=s<7A657^5J~ zE<}eyb<^^sQgTI%*|xps=xP0j>F{3Y4*4;zWk_Vz&H6KSgMjkbSpsS`J(&llBpO48 z1V7!vWV@W)mB(Bnm;LWWwX$B*Vm~)6iguWPe;ud~WJgY|ffGUf)tO^oJ0Yro3w8tO@lK0H7`xzV zfDT}0J1=+ot%EBiAkXP~iD)n4N&>nLWVe{I%7pYQ`hU&DfEvKi<#6+H3+h>=J^|*w z&CfF10K~%k?@GGX@bKb`tJJ?%&@jDxbN-=G+`E0(h>nsnrAFljTr$hbkACv5x@Ca{+?y-Sl`ce?{p3GW3Cxj z4tz%)(3VB}4?r zvNgKt*o-R|RPTl#Z=M63bGrw5`#S*F)Rp{ad;NBEe`$m{$3fHUliw{6{x_4Nc3-f4 zKQZL9vPXfjnn|{!q@>5gOUg(9>CT#!%_c~+H7hK&?Rh15D4@rB+Z@7xc;P|_CT zRXhY6nD@nWZz`QnB*A__!Vrx2iMolQuK*PU84<%mb4bfL0PLJcQ7WD8mYQm^31Cci zS2~;ES{RD?aW%1L3C6PUKLNs*+Xy`vC?0=4g3o_TwjG|@=GBG<*pbrW>9nTS0dDeB z$B&dCs^odVbp5J-DHh&;r(B4+7HAeg3wIpgL!(!Y>&7ZNaD%}d9=7Pf?TdqYuqa-1^I``Cl+@P+T7Q zGB*`_VWsWyf5Ukt81o;)h`}E*Zo{jp%lk#iFpKV@c1>VU`U>AQuNwwNZr;gaEFsr} zU|ng-ehACYM)`a*JwoUhE_@@U5p9Kvq8_TNqzAyf*6%@z!G)(Bj8!LryV$%O|wVC zAz{$$04u$7kIO@k#u+G5E7We2%)HRk1c``m(0nq1Q$?99+*~OIh3l?Oviw^oyvYaO zofoiH&hW9ny3$6}H{%1K%jPJS&7z2NSfbA+*9)1mVKHydl?Zm#==i>CR08 z?ZZ@_kcIPQe?!Gg87P>2Ql(s9(Q5e(l#3j7E+_@!qKNkmj7Fz}>l6p-`pGNx-;v3B zmDRMe{Z}aek0ABQ7BGBu77t!ctqA}uZ`#bj_K?FyZc?@ns+g)*xmP|oowmvB9%oe# z>U}rVm8&5Roq&3~w;Sz#ap=M2NL~6ltg35qC3=sssz3e-s{7_E39w)mj$3JI#tiQN zg}k0JthxXDSpWb`g1qHQb#H)5#}I5I_|tW3@FGvIkO{g(oUWN0U~ z5W}sM7QDLlMUK{6;Y%@F>h~*xG45bzl2#Xx7|c++yosLW?_{c^rA1Q?EfCi#%)rhg zGTH?iUp4@A@95@u^{)eyQkcHZK<^w#t5rh#P9+)d3a(H}=!AKkPRyV;4oCFEr6xP*s%fVRI)^Vx zPkm2(TG-agPC#X=nb)Y`l`f(-^}1fb?9NHT*lye}qw4$qH&-OIRkZqNs z*OKnsRjJN|RicI^Ysx=%BPJ-icfF(~Vn+-st9N;9KE}O91L$|p@za8tPU3SnF#;^7 z^5~?%BMa0MhWj1~34!=qp#+ zRB?F`op^}gS)`F4vp*6Em=b|)4TErHFA_^XCiXfksrB!wxmFM~;=z6{_&0J;VR#B? zh%av+mQ6quwHrlPHXygPS!CcXE3Yb3a(VFPq;+B;7O5DjxU!KXi9rm4vgEBQAN_M# zI`#oVeyU4u(Jny1Q^2#G(Dfy)oi6E^#I>S(-vKAG;d2i`9Nn!hQ z5Lms0fq-!Kf*~^wvd`^ZzYUu$kZ$^V61AnB$>Y}_gCTA6DfQWkyE5V+bBdojC&%M> z`XD0tk=^hpjjtv{UQO}s)I*ZB%6X=F#aJ&8d5e9de=h+9lAc3C-Wu?( zn4i+1k46f`(iT}Q_a!@W6s2!fshPI!5`BgA%@?_hzVltcLhL?JmKK10rm<4@mJ4B} z>+giNHtf?8Db^owgW;OVl*VeE1b~5UOPcS!#+de>)$spIP1~%LlFC$>!w0{0iWEdL z{d&RnRp@plZU6BVYbWC1R{=P`uy0vZY_O8z(~Me$-nN3LrmvrieD*o$`Y=eWRn|}o zY4reQrQrIzo&yV*Uhr(($%c|R8j_k%s#XbZ2I(MPbIi>zFP3OzH+|roC4f%G8L`A% zF{$^$Q!&o34Pc?ikru8jFiqBccY`(AlJkXS3LBltlc-Qx>*3=y9s4>m)DCUpc1AE% zo?{&EAv-mjtQa<8(>L^GKQ#Ye%S+n$+r4&RXE#u*Z!3s@OxM9%QTKDj;r*_08bBx1 zoN>SUJNjA0M8p0-x@!t+ir^`yF0GBEJk7rvze6qwKCE^v=%>052n2N%fj^DkZGK$$_yK&D@l)v-D#Me{=4LG7uZe9FOhn|8BTLaP(d4zoart$&s4;FO+tlo)3Yh?aM_=KvsCLn&Q81zY=|MnwZG?-7gRhIJcsT`^ zJR9Mh(9`zjL6S8P!}dzcwaHlDeuS?Vwb}ZVOt3?WX?}I?DZj(tty5#7UDLFxGjQs8 zPV>aH{g}%45u{sz#Y&7{$?I1x5Uq~6O%nV0ARc5|W8yre5~S+8cQ z^s#^P>H!7zMm*?0 z-9XPZ+~y{=_pc@1vWVHbsXrvf#}-s`!RNuxXFXJPd|IgO4xaR~VDyXN??etL+S|K^ z%MLase0|gyWxWz;dnKDEsiw=_ahZ{Q9=OAS?C`wKQ)LT>9@NP2XAwjalPo(t@n^K2 zPIXEI^C9T6%scmu^DqyK;@*k;4Ibh_?5c4d$&cX#2VYx1$8BE*5*y>s&LO5I*A#2aOk``L*l{?jjQ|LGm z%xyjxsv!f0%IVHNX;%1b)l66KW`xSQf}zvuR$HG}OdJ3YlE3`R9O=#d%^;0qhV2qgNe=&Q1F(65NkUqH& zuO)(c`LWeDTUaG2Vz8Oy)(a&xWs9WXXGbha7v_8N4iXRg1G3CErHVatK)IkbFQPeq z$$xn)`&)T!!~jV%SzXdGVei2ZECpYXoFD2tx@uFfK%_rXf0NazabLdWq1k+i`kvp> zonrn8mlLb$9({{oSfAP9PTh2=ZkU9%+{CWmt$L)n5^s9Oae~LVZi-70ZLm3iL^l67 zd9CiZuu7=&_U}01V?v`h@Q}szu(w{dRddovjf7s)nvv$)=?)-d!23Ljw`XE#r+Q~Esb&8kvZk>>x}o={f=vv3qq?*xfn zSw5bzO+q$@(XYG11Qma#D;NnKEF~kwHm-Y zFq=fHW((8Hei<|JVl z!ndz##j*1g6@LQl4fkEw@JvMQ_=|OZNmph1PXPQ3UOkCqGlIh;S_c6*@k7dIp0thC z;HfWOj{bp7l=SPKLVQH*$O%;gZBa+vaQRrbf_A_wFKWdqsEwZN{Xe$8I;^U0`&to| zkS?W>l9m=E1w;t}={Tfxcc%i$L$!vrEZPB++F1{yk( zvl6zhK%^Ut;`!}fH@WHpZJzD=KqD02kxlHeeJ|c@_@?46^UQJDv_|mtH?$W}kz+*8 zIY_i(*&fZ7zYeA*du%e(k8D)Owsy(I72?zp+Zz{(^|SUuCUmBtg*i>0W$T|ZFxCOL962)X-;81VwpSlrU zqKoa;c)T49nZ1N{q&fH=h{@GcZ9jQwXJWAGI9(4*1&(%Sr_CN*`~Xt%NmEBs!&i*4EnFIb za@Fv(b#hy}Q|2Ms2DFT+?2>`H8sFIy6qt~yq~v%^bvkW(2+t2DY7oHg#{H?l_6an{#yNpag;QuMV&5`v247gl+c#0srV;x2 zpAw&iY%`QkG|d2Xeo}8^$%Ae+eYBwXp^d_jfjif^@80D3O0-6lO&u(Uf`QYzdl+Pc zp%z!lhsj({ZO{5_@H5;tNu(^(XAzr^OeDc+`XJou=)UV68|ttT*0l+pkf2(L^Jk;w zrm7_XOYw!4y(U{%@6fMzpe=A6KT^$`djM#ge3<7MXkO>PmVH7)zq^6_EN!gel4E4e zRCxT{#g>cnc3zVvAdnR4&$}5gU7a7kV&dQ&8aZ zO)BPjqq%k=Lx{ek`6SskW)S7T8-ub8^oMW#pgD);?icY@;Kg2HMRj5+jWceZPufkS zQ|*TN=I$|@9sG8L80M*D{a(`hGa^uTrrP!(e8lrQ6IqF;ZBK%h_n&An#25^aZ)i3HD z5o)m6{@Hfxa0x*wj;+(+$dAYLctNVaY?AE7)57Asd?0Z96ttE#qo%v{St>gMd6IJnHWoh-?k<8myMiHG` z>P3~1yEZ_kiR_3$Hxo#JOPWJGN3PND6qFIFxC81nOiWPh310wS<%(S7UR7F%u~IJr zTbGNcvRn!61i@{b6W!n$JVlq`XJ@|f=!jaA%NMq1CvRD#gu0K1=}SMf+eb`dp!*z_i?N!`RL03Yd1P-(?GYwTC&OW+RF6urukxc5zi+c$u?!__KCirU zOIB!%#BmuTK62vyt6f;7ah6!^Nmfs zn5~vN>N)7A$zMyIu4ju$o5UNjn`L#v?YyUvigBb$wjj$~=ITRR@SMSaHs4tWu7*c? zDJh+nX?V|93|p{{qpX%oOt;o0>YnXn1ir<%tb3KY~+EqjW;AU+B0~NH#n?i?~i$wf~hW zm05OY9Gi=(sfA<_3C_4YR{lgxrkUF}+q#5gizY8mui(?> zo@_P?R7o_M7hR=|aAZqmYi!oKNOip5`obS2tTKv2i3F4~7xGopdn|9O-QOt52EKDd z+FJU!h)At_{+VqA(QvTA{YF4oJm1hA;X>C*Tv%Z3@#Erf1w!=YwpPEF9@U#oB z>Av@4$Cl#6tw@xq`bK232*{A=7aC%`Tfr|DKhK{kkszi@Gz1-#UV-pjGU>z-P{2p5 z$qpkYu)1xd{29|$f^ogn?;txiO%>DMkdl%qT1p=`K>~l#hETQ!L}aG& z_l%6?7^icS0LWk>+DcWSd0ReYF_ae9R4(hjYLsp>l|TdS&G9(=n{y9M$)rNd(>ey( zx#;KF)lHbKE7?YM=f|F+-`-R7(6tCAOl+Q@d1E-7OzMAp(GusvZ1hUBf@iB0OTdO9 z35W7_!8o9a_$(ufhvhJq>hpV$C2nYrl9Prg^(sNNw&M)M)!#T7Dy-jWr5jD=5o2$G z6x{>lQ}(zIx+;26Mt(K;jP<qm&`CD5}tWY4q08WR}Rc9&YkUTL%DjeAqm+Gq?>ypGfJAg*-Vs z4VIU2FJp9zykjBm%PmJ{(XEN1)IpL5o)m}!1z*yhXhf4}D3YV%7thnae~x&H*J}C$ zv?!YWOyuD`+i(Vv@c$Mv$59H_}Pg8qhh0@wv0V9 zZv*GtxEV(Wr3C)6Zb1n(pr+|F5uyD4WB*wm0@d(Zs; zpJJ6>9Y25u4{*rPeh2Ry!FkZ2M%gI?5`lQ2vt*7K8{p6!8h|>?@@oi>SG%Kd-MjrL z3bA?MtH6qQ=^0^^AerZGPL9jz$D=Yi!13t-)JDOAHab3}F%WHqzsikd^{1x~a4`v1Szc|Grb zbD!(495H$UyX*2p`R~0zdKV6K2T(?f!ED5We)Xpz{HX^L_)|CqBx?cdD*RA# z{o6~(6Cdjr&`C;?F^#83t52aTd~M6hhLIbA)hPcHK4#Ai*LlC1 zCAkk*PnxLQ4l96Q^>!YR!$*%bVaudex8ry5=NX3qsA9f@bD-nj3+yON zVA8TUc_p+HpSQe~pHY5dfKt9?7jKXvdJ8yBdIw(CdjTy1t-(FIHNb`tX^Of1D_fVWclGi1F;kdSG=ZF!d*f|FJV(0S3k@s9l7A zNQq!da9+)W4mK;S82T~x#$pGI#1EDawgi&bZI3|7cXPLJ?R0~Y+J2WR+#^CBn7^509M8rm22?r z1?$Oz@%W`(UKIfI4v8hbwg8Hd{P!Swga3uAC;Xd9`tP$w4B<-jdRY#vdi7zj0E|Xa z|8)5lUj_RufIQwvWB;`{Ae5wywyQm{MZFq84h0B~O}D=13)ui$tWnl|xK4B>U|C$` zj*Ls+0j2AmD_=Qaz++Is&Pk0hjVxP9jXE0|p^D9KX4uLQ$v4*EoGoadQ~}LeX=AM@u$PvOM)+;^3Ji(+ravG;or1Oh{dh31o#=o(?W0p>92WY* z{09%K`}L*Km7)7iF86kI`HoY4`oWBg`|N6TafIf2bcLZ^woA<}Q!dIEBG|5r@bkbi zze=Y&kNTo$q})(jLsUUL&QVdzu#^$$3~4_MiEXiyBeHC#jYwi5Y0Qz}B=e>5`wr+r z3eWStgZ`~?Ibi&K*-U@_XEzAA!5zQ0i45hm-<=U~If}-lIMyHDDTlPBPrJAqioA>8 zHJd(m5$Ehb#qM#h>hS)3mQ7IsZ6nLot|qz#qSC??W4?fKbb7}0)d9oOW>1Bb=?Xh= zQ~sGozw&D;bK)R9qRjUE!Ua^ts{q~sq>;lQnMZP0Br>`xWk$LRDu$FL=bO3l<)+is z(RGXC{V(@;4qNdwTFOgALx@*l#X&Fit-bNp2_9cpxFM&gb99=2unt#G}sm63&kpqI5ILyDdM-OWJlLC@0aS*O(f8;o~H^KyT6HR0ev#N{dhD&II5 zF9U!vH7Oz!?ZnPKkAPd@-V;esZRY{IGso`Rv_olbPZc1Lj@9p>W5tcZ%V1(F=u^|w z>*v(lJ=KUwo^xw*VLjtuJ+0{%=chXX8U?C(Cx%Jenh5=1$oLTC^stnnWTU+HpE1s^ zHi@f&vVm5Mj3EcpvxKwuD9ef+PE!8#ntdML{-tPo~P{h!n@-+gmAPu+Pw`JcQD&bCe!u^apMn>QzszCwGh9xRD-FE z|BeMYQeVe}$CFu!^X=Qu~dC- z$J;oJXSwU#)L7i}n~H0(zR8ZGE>^=Bde?d8xwGebbfuj6W1Ctkw%cYZcI!rQ1%=hh zrO#Qj->#Yurt~CJaSxs^@?)jZ@Uy~?XH2dv9=LDs?Zzvgp5#wKbK0Q~kM+9biLAdF zn&8G{1mVU}1+cgW;l@x=l{~t}jcJQ5Z40O%xW5%!+EYG6pc^F^w^=+LYiCrqS+F_7 zCOmO%$bwg67VQ>SlTX&RTRxi^lTMVRwu9Ur{7*#iM1VsgTD%tV7Vs16t_V^60U77p zEw53Z<*7h*=e5?3W$n9U@A~AbTwDK4iGxGF`?cQSq6{SPA8; zf>&qAc=9`{#|?owmzum-tT!QSS7_-ud7D{AQt~x6yX8SaCaSF6IY&9QCb60t@rujW zv38pd?jt+U>FV00tjoo##p%^?<1K&jkQZ%>hlv->Hmqyir#;(DKR8PG-J5%oI9lmr zSm{#QSEXP~q+Y<4dvCn24!l4`4*{L5h2y}V4*DEs@@~TS2Kb&RboSmzfFja+CiRSQ7m+#NITHlPXUri)N7J0-cO zvBgnAQ(sNgnXcX*BAzyz)|IOsGI1?elZ^5POwsy=keVH6?kt!dv%?uSFstPf^}5WC z<@H&*a&a2^a!)uVx30_cQ>E4Gtz<6R($9{6?G#;9b!WNGxYVedaH-apM?p1X3Q*3v zL{>!7v)$qu`9_P*VnuVVv2zUE^j`O6?c~(1TV2gRWaXBTpXn?)Ce-OSIl5njKFg1tJ?YXrEM7?{4j7K=*rvmHp=wSA*|O9& zchVQ{{k;G}2OpQDtbp}MPoaPqhhZ2SLMZRpcMV0@9NR)jU2tWw{nV8W{p?g{XE{nL z{SFR4{dhP&B#uPGed6+90r@^!zSd>Mj8y=zQ#Ic>_o767KffO&*3 z`G=zZsm(*tJx4^w%^NQIw(Wivt#)%J^-V9IIBRG|&AIf0OxolzeBMM&i*}JcqUhJi z0aZ$Pinkr<&tP!4DY(PyhFrAs3BJVLQ!J}j zLO|)dCewbJCq?bQlHoVO0y!yLAQ?5fcA6(;b)6cSz_sVr{$x06cnAyg z(unXB)p~5|!xZFA9G#DR%eDlaLU9w#yYF{v-n3rmjS);=Z4dRLrB7DhOjP6$#c~&$ zzMr-_G_A_9lUtc~^-4eKFQ??>5BFwAhZ~xrs1&(j=*pm56nUK~`*9?WWD0#Sa2E4vpObk3d)L)`!trrZqB)LP0F4VlGAyc%>Y&IsDM3YywERAmE`F2mt ztTV&_Ki*&-9=GSL)#c)dVRei8MjyBDis8#^V9wqPtVqI8zF{h7`S<~ZDXa=42s*}G z2OZ?S;4CAIm%0UV2^7P>$ASQqMReHxy8!F)$5V3E+95l4&fH^ON|q2|AUkA1fKM@A zC1JQa4m=@L3dK_yPooYvWTCruDy(|}bUMe&ySXazd=pF)O102Wfg2qh{1d^ns`AfO zeVtu&J1NrcYpbj#1B5H2n3PGM7t317G1eVdKeCi~yToD6WozFwi#zA~xLaCNyj?Y>2!+n@hb3f0a#0ParEDfg(%3*+8O&Gz*zaUkt|NC72 zLLC~n&J8XYI0StSk@Cu&U7;TTX~M^`qEG+iZr~~pT2EyX9{mh{i={S8hyce)O|7Rmb3#6oM0TDnC*lxs=i}DwQc(L#P z9%(wyAfJW=Ya@^i=2iifiuI8A+e)x3gQy1lOJF(|fNi(xhTo1xES|z5n1TcF1aV^E-11RLp3g^UvRoh8vZo^l$RY`v)nsKW15m=VDcsGBgi`cvi! z&=dbD1qCJ%D5$C#l?uP}0H;|l6b-8~+uotAlb*NKe`5gN0`WgYR}wZs(CvZU#}J;x zBzQ;-W3(5EP*j$iOtSpFuny4QNZ7zMtG(bt5o?vYZK|5pw6MQE+DBM3HpmGa{HjcA z5u$;Q!@_Z+VWOZI7j-C1oe49+U9zJl`txmH1K}~TAag*#*`dcYT&4Jd+wazQqZ_<| z2T{HtVVDRMrLjPn_0CD|6I;@R_s$Zc8qOyeCTU;^M#F=&v1G@dE4WKpqJ5tqs>j78H zRBw$+nG|@pV{m&#Cs%yb&das`odmJ3$3E_T2~MdA!|tFyvmT1D0L+&da7zybO98kG z#itN1%;3!*_5t`-j6qv~B5`S~#g`iB#ubcw8;%ijYHC5(rSNfmP|7WG(y#6}Mt-4bB%;^0T zQ0B$Mv|5>YnCGW^<_}Q+Clm_oV7(ExxdMV2>%O!zG2{QU+}?ea&z$Mh;|FUNJxV6^ zbu3URc@J&B28QhjbedX)f4&h%4EBxSZX&MCaT5_A?iB&D+ppY+rBu9g&h-eUUNMPV z6$_0vPN2zyY4;Y3-`)a)v+lt8eTK@c4VqMy%XHpedjemJ&O?b44?<<pE0;1#nd|>PCTfSHs}Vq;{L3hG7JXc6%iBV5x8o@$7+i=|C#7|t znP0kM&s*|aBLe2^c}ApABf655r*!%73>|mIWM*KeR#dSN1T^pFIa~Jyg%Ll6tz$@0e=iN;*L) zUGHy6HosaSq=6Wj07Pb7(Xmmt7P=g!=lzCj=|Zlni&IXZ=A>5&{u8Y3z(tCZ@`9~m z9%)}*dh2-r*%Q#bHs@b46}dRsL8J?fM*jyThOPOp#fWLos{5eZteLjAb}@PuB^>hF z<(JbAB)Wkd`P~nyj`mM)+30@NMo6Qsz7AejVOEUwx&4aRo^yJ&B-2TaA0NiUAoah) zbQdhisGdL7pu5>NB>V3;f$#+`ivm@eACJ-L?rblB_KE+>I|OC{;+CI*i_a1{X8E_J zN~Fa67B!Uf(EW@t(=YDD7GTwP{&kvs-Nkq#X#;t#uP~b`81Uc9|NBjeJ+bC{>ajz6Sz^xHZ)PvAsHk{VU9GpeQ&9i(E+#^(KxegXlHxBQl^{n#LgEQw4ugN= z@$V059m1zIbxl^0@uN+oJO;!MjGkZ0#9OMfH3FK_Sy?K=Or9Wy-a!l%m_j!wZYK?k zRPC0Z`|<@-5g+T%Lp0Qh>OmMbSCsD~FHU229ZZ>kYSy>$z`k(%NGw_O}u85xuHmW&X zvkO-#1mKVW(M;UJDjVUi81HS-28{(SceyW-bjEwK*9u&d;^?!d* z;XDDgbS0QYKFtJ6M3FNxCli|?o-!)XRV^?18ZwHkgGeEL4WkC`-vD~MA zz0o&cG)8~zn_!Z9&LgB5@z65S*vQ94K^Z9|z`OtF$Fr0Y{j;-YnTXkFnayxqga8es z;gw234T@n?gVtJwNDjGBWdjw%uEND;=-Ob=vJ62Q>w?q=kR(mw8j(K z_T*1@V4yv1(l0T>?kq66bl?C3ZUO(Ei`Y@Q`n?Uxu6i-Wzkl*B=EN90T#*f+5Xvxh zaXg@_j>0iRH@^AxD(~@)ztaBBmiA~!b89t?hMyrC5naZNW9awjWeGIhpW&*83To;q z^kWjl;Tw{A7jM3H!9TZ)A*&EXX2tK|OYY6lqG-t3Uz!9X*NE|E#o^PQmEBk8yYUE1 z{~b$;(E8th94jTA&gD5H!D*h-_2_dMg-Sl!jg=)0929$Qp3Qv;9*+(4&Id{ecm?Xu z4&rPvx$dKcGx)5936xPwS8GM42AAQN+q5))mRp&8l*+fEpeO&!(nP$X22!`xeJxkF zCq-+d7`gvU{i>^bCWVdxvLv%)`DXy$kicft7f%{r7&mT zwMaxBg!VmIR!hEwKT8YdHywGcg{+!Zf5d`kxswUpD8l0b>X^1urX*X~#N=QGrOPeL zQg_P;%B;W}QCyh#$VA)Fvnsn0P7m#yDb8`1@XOCRhj+zMz9c@&NoAuiyeFdgf5ya; zsqkHWRzEay?`vVJj^!OupBG`r?m>9NBu|#bE#yTa%`v*`9k}0R$J#msfGdBbwmAA? zDIjdMgqE@%hBnsf)$|!SMafpNzVJD8oTindvHGk>V(EkTzbv#D zF$9@e;LAl`(V&>U_s@yxaagV}T2Td~G>iNzz1`){Oi5oM!auEsGBM(D*%CY)o9acr zjzE45`G(tJg&h~kfW*B^kH5ns@L}A}zqX^|z}H@QzsoULmWAuRk@C+oL*vf4R;Wp9 zE}oxb0fgGIrs1-Nh1C;&$j`&&iLBAD=swc{9?CMQ)k%rBtwYOCJAW=sL{V{aO6GK4 zM^JIn-XE3F<2iujGw4F|upy8)F}F6Pp*Sh}DI7uCdYo#3-_P@~FVA8Z<&j(S)|k*$ z{S)Yd$9HPu4crfShaMV=$}eX)Hux#`CsKEDa z4I^`JQ*b9A@-r`W4%>X0g2o@DlP57_bhee3e~&)iKqDfk@XMukd*-CZ`hF#M*zV=v zb+Wy`cj|5gmRr~LylV#sOZIEOA&-?y<>ksgrn!<`)biA4GT=7B=iZN*-#;nps z#g%wB$nTEB>1EAX^A9c@`9`v5#s*(iL+tQur;gr_C7n-1E%sUo=2H$8vtG7Z9kDl0 zn9(p|2k=#jVlyUX;3Ci|SIq2~HLiJgbC{PsXrl=_eW5-ww|l4PG8ZpG{J}5V-u&q| zY%|#n{g|-~^6*_1CKw{(*SOx@dj8<75!v^zdP-MOo;916Xiz+kn4<6zw?5ZP|K3>t zzSo?K9hczPDC5gXht3D$Hu;LSgX}%}tdn*b!;V@9;Pc<{HbTaqw}s|JbY3sli^fJ_ zt~x9SL}EMc2%N%kV6?scbK<$e_62TK6dhSd+a2*Pwd8vBZt;NDFN0_={X5Jj0HPrq!);Z%taawoNvy>rs{UW)DkO>liSppj1xT^34dA@@;Bt z7Sxu$`O(0mNK9oy0A~?iw6M3=8aLPGZ|m+xCdO$3LFmdG+)NhrQ@^m6q)@KF0%+6Xf!h6SU8Dp5r216*4v3CnM>{(5k-~iw#+udKd~c}5#9>}vci35 za_GDdRTYU-%zC8Fu2|otcX<#FnLGAdvmu&T$n1|)qFRvQo+SM+s zjK){JWh0s7Bt+WaAcbG@M=Q%ZjHd*VREqAyD?0LFmPcTVA&+f{(RUjE=(K&^IhBrx z4E;IlzY+S=Rdt{!i_X`BOM8{`hnP1^*2+5|SdLe)CzO(igwryoKsw@GmNr&aS4LO4WTwt?wCD{W$rh z`szu6_AK&G$aSx{_V7%nQ50NWT9K0SY+p?PmGiaG$X-#w(ox|V7b6upVulqf&dZ zGJ{`F1>XyA@4Pwm$1a@~T<&VMl;AH^hoWv<@9VmFVfpv(52fo~rvOM3zMR z@k~9{4|>aOQisJR3Rdd$7N>kJOk@=Ug z+k6$AjvU3O9ww_qc>5-NI=C~QH-!vZ`e-Rk-poFqWc1MKKmN+=Nppk8dyhQVplXtE zTyl>|Z)X1SwSvp3&$`oK_hyYJc{m2*r?<9`LwGg?21hFgXDI(9hiug7pVw91Q_UhT zSBfY~7@uH~zBh|Ga}UmA00Lc%5d1I=rDmOvqk;qWe%i2}A>eL=0l*YU^Va)Jm`;4Fcs4`PLt!x%# zMO-ap@(;Os>voESgiGA?+wzCHNPgf3@YV8vL3<^8c<-U;;t8tTXF-cmt=@v<&q^uA zN~bRZol+sJ$=pE3YV-0Km(RW^&JKFCfb9V_2TFrvRwzAoy}w%>$=oTj%@*+8}E{HVTW#B4~t?s@AY0{**< z;d;C;C-#+hZHeohd)*dZ4PNEg^Xk96-P>lGKAq zLkxvBwgFF6{VydC=h*h_+e7S?{BUh%Cha9P7rYxB?j9>2Ew~OKW%CidA2;b`=l-4M%QTJtzn=BwE-17S^u4-bZmCneA> zh<#=D}nWp1e_=`%0<3jvH2nVLPQs!M1$|`vqBU z3F9dNn*pP`Cdkft2-}CGxHH&9$i@Hc)(k&8Rd7|-V94|9##Whnwa?g05SwSp6%zE< zzsT<5p;+j9^(UG|*Dd@cfK=M}#O(i~F)HtCo5YF~an3AB#zraUe}lJFHTy#U%lB?_ zgk?Psm zNDUuEFz=NaM|e}Kp-$Y@PJJRa#wa_Op3F}rvrpZz1?m0ucMPJwopSb_zh#8l1RIHi?flkw0ZE+Ab>;{3dc~FI>vhed z(}^HmFp0b-`(dngg!Cga+$h!z`CV{k6Y95Gi+oew440d>O|y#kcP4yY7qA?nR4}kG za*@7b=UyOGKRghEzvmQaW4K?xNo}!oXRg8SCoE$7G>(~BsM33j}| z{c2pd{LFv%*XbYc>v+KWPa z1F5C(AdC)zb<2Yr!TEqC(clofrt|>se(%Xk$Ww;!fQF!OrUWWeUU{Tzbp42RTG{4h z-+=Qc)9;xT;LjWjar8D%y>`X_IeOn-Y)El4l?%GV7RGA5Q;^cdMdr}^vc!b2!#J10 z5fyf=I+s|l`}nrs?Xd!*O&_AjzttU5aXUH0qivL5Yh-OOnPFuXQB=z4bKB)Pt`kW1 z-D6-#Z`h2wVv^s@{^{ko;B1f3qdlnA+>^{BGEN%%iI;ezTzFcKAf=+OL^t44z5Q0* zLlKGr4~GG%cr!TKotdj4u4pauSlatoWI${9l0O9r8e~UII)zd#x!gfB6GrOoRra1o ziDOr}>`maicC?+50obJZ;Wf>g3U9WcM2|+kbT!TBOe-04$%q+ zKnuA)>LTG@i?OY@(pmfvmA z{W^7Z_^wk@nI&m3eUSn)UJ-G6FJxW#Ucn^ z^V6zuh|51=)j;ZsX0+C5P&o50Kb)rwC{BoUYpZK&3bTLn z%`P{@9U+nEo%;l{@v{nbS$3UHY{-7qJWi9Qwgz?9eh1@tu>P7!8QWNUR~Nnez4JR2 ze{yUAI73m=`Y-dTZ@cO|qNNnM*vF(ibYq{GY_T0G6Gp0;@JhSfFVbw0)*bcE(vDMKIpt3UV$~tEfz5++n98Bz2GB>d5lt zq$1CxJ}amHoB_i3eRxHsygtV>tLS+o>^a1Ja zmp`yT92S5D7#bhh(`-&O9`);zur9@LnERxpv-TR0UN{-$DnG_G^P<$My@OFS%q69* zZbCsPETF1X+Nv#4!k+IyR%yp~>TXqiI%p*WC3vcvweVTp>Q)3W-g|CKXbUU8H;#_@#+ zu8ix7(dzq;qCA|sDc$5noC?3> zBCkte0};|PrYFvr_FpSp`GzN++s`D@UP6pjIA&IO)tF+o1{v)RHH)sLjXUO**H=`v zP}i-hu=p?#_B~0mLSS++G$6G;W+2-4rj@-Rcu=Oy)bXeHQ2O`IU^b#gY@Lf%jNtq$ z*StQ=$*GmqvUU0^o*(#JyJa7vnYhl5>rfpt@3-}Ds7@Qh5;TE(vImaylEq!wS|^l{ zs;}+nAjrl}ACzF4BGr)K6ev@;j%uhDeI;cyxL+HZh^%qahOCZ|Qr=)$iC1{tu9)z+^LHRQ*c70+Yf*O2;Dx)GFz7&s`HLk&|Y<6h`a;2kdN{h{ObyKD_x3 zh28y0oE00Y@c-_`JBV`DuxzX=Ggp_(&#qbYD_A{O9$dJmo_o)^MC@8Cb~p0ex(>UR zRy1$}^e8b+v^vTMf{umm44anC=M)&KT538$YBg1&o)%mQi6ooq-cwH@n*l}4A+2AXpH=>;mqABJ3N}*))x7u74Atp+$Y<$Fd9QJM*8B;*nA*JeW*J>wuG*usu(_DK zf-_$jsxfsLhJDD(GrOSgHXkLoB78}?RVyN^#8@5*eY7n-phYeEegU&0{WJN;z^u{U zpqAoSnYN$#O1A^_=`PvD77W4Ss>)CvH=0*F62Dq6u3wMN(|`s9m~7ci@)qxveZ23X ziKFAAf}-*nuF4?e{w(F>#g)s*gX$Xip;2K_GnQKsz0bW0IO(VV3kRBq?4;) zuwLVOE7EYkDzRPm)&Rg8IwK4FrRy6^wE7~sUVCMVjQ89GShkG`6<)?2Z#$yq7OGek z#luxS4ti4iNdO^O$#Moe;+RXRr|l7eeoek)@6tqn7+q+5W2pf1FlTQfpUwfJOy@uER(YK{D01g{M7v*oDXuTg#Nvn;E7H3-jI*~% z2;s=PI7AkmV%d1uNB@xTX^HWg=z#!Rk)to{aI{a8A%0mn5S#~FkbW9COvZTFpR z|4B$Py}}LK5Ff?WK5liM_xSaR;n0~HH9n$zw((;VvKM1}RShyiWKkjRlsTaVtk8=? z@o3ecson%vg*EmHa8{lm=NU9*JDP4!kIkv4&?XH7tUrnmo1{p}zQuHqow^}u=|x*K zSAp6STd`vDlKwZ+mzRO2S|K(BA&cPC}C>NF_&)(HCRz7li&0pCsy*=PvEUS8oUmc zgVYHt8g9l(@5hv@i@!sGycpuzv0chbZZ$dcCCQUK;TF8Ul3g{X5=&EhXxxBVHN|lm zESNX(zEW_+Xhci}X?N%6!wkhO4w0^^A%3b1%e9K_P33;cLpL1pHa;?uHuHq`bZ--1 zSrUsgrIFk_@v*8Tu61mXm6CiTUI7c-`w^X!Pn1<-GPcrD{x%Na&Ri1aAbuh{w>W zCL^>l=R)GKyTd=oP<3ST9Q@&%h#z*X+$rw1S@NicF5v$z42mN5J&MtA?qOF;+ZGGA}jkIq4-dd@0>EzMdz)D4g z&B;;`g-WD-EYMK3`0%m6h1;|o>$qZCL)lYDtAWyrnb0|`=A8EgM=~vBwd%d_=-oxc z3?gIWOP`_9)&q0(G2crCJm2Nrb7<|@Ki54{z@85swu7*=XW}n`B zBR^L*HnN* zE8WNTKfsy?d=3*Vc&HRcN6I?x%iSSwmmI!aeajndxDRiwk{ao}IBB zE_S=^siuqQr6b?H{Axhks$xwEK`8v`j5K%`?&+tNAr!i_vNj%?r+uL*%>{a4I2X#3 zDLa;&{!X)}mPFIq41^E9Jy0MTg!{C~o$uV0y?`V8lN2G+R6X-kkHOjmKRidT?HEQA zWy=Ta(Gvq?R>2k@cWI*H!$gG$blrs$tv))%PK{0oPqbc3Y6-s5bX6=c+ z6lHT3?ahQK#-Ob_w6@-$b{6W9?SpQ8plVwoJT66)VoVD|!(S69UL7T2{~~FJZr|jm z+a9@gwR(s>zh1#+lUp=Cyw@kw-`HiY5u&6iWb@tYS)F+#6C&+}Ypl(wrZ4WpBx7pC zhBFIoP;GxS{rKZy&KhEWB&xC7g)lZBk1Em^-q8TU<3)jb3nf;^K$q*WO_MOwEA@*2 zpO=LAexlQe+mjJGflq$2Q2eO$xJ0>lWPy-c_#l}eR+F8smf&^?~Ei5D9L!UDmX&l<(dZ z*#Gt}Yn~0)W$rG-T&4iE7+u3xPf(;x^LiV)B$HRJH`*0o8`J(50VToXaNPAvP^rytSdNHLC8ck}u_4uYyf&VWy)U;Qlul*5yBYcW&z$cQT?hvYq;a@^6-Ish5`DI( z@SVy6l{8XWa;e9zN?jK81UhO2<9{cZdo} z5fyCIpW$({w7zmBRJ|k>zA>_G!O<%&6;r3uczj9pwmW=mG>PK`&Bm$mQ#gy&qV=-{G)=l-}Cs;hAOFv(Diw%I*rj-ZQl z#%HbXue7NY?$3LXPa)$nHR33&%rZL5Dwm{cu>IJ1EV^&(MzmFE)edKV?0js!Ttu!r zRE*=6*c;8KM_kZ)cC-&>9q#J)F$IdYf$cA|(B$iU%pNkISf;V2e<8W5^9?C2SA~oe z5*oZm_h{*7O&i94{TSZwY3G4VkL*n<_Ki)_;^Q*JXNP5*meglv<1yn+m_%3mWtUoKnEYx!a;TOXk@Fek@{`~T?r3aBW%u5B0wrG%kHL1~a~l#mVy0jZ(8 zQ9-)9kx*Jnl?x-K;PhU19x zH!p49h*^scZ5w5Dqp}7k_9s)x0C7|uEqD!AtY=@+mfb6+Pt0;_zO~6e>-L4*vheCl zPFV^h=K_d=w5i+DaHxDEXc`dl4E_PAberlS!XR0Qec z+;^utWx7^y-Vk3bcH$ojV&{J^3MgCf(4i=Vt6y$Mc(dWC6M&>qt!^@^V`kT2uO+wa zLYX_Yv&#I$8X1!c3cs`TVU1{KMC*&ETfaoj`I1}K7Gj8_C%n*6V6Rce zW~gIE*{%0|WK}JC^o4a{1;(&)D22Y$o$~@X$0pd=(OA-?m4hdUfWmV^T>pSvG)9bLhDIcy0yT z*^`?_(l<>k?|Yu&?*{H@kVp!(V8#RFfc+lnJn19~*VMA{0VReJhxR=KAI4(ucn$f( z`?`y5wmo~^kCnzc)dA@p+I96-~Js_w-t?*Ty(%p(A~Qw`aAm?>q_@M7l>r6aD*O`AFSqd z`_6vIxqC+^v)bZR;+sp)W>0n>-fh!!eOi|--fhbVnk`)I6)9yPjuHG5~DtN8KJ?bWbKKfKE9$*l^1KN>PP<~3&fcAQLy?i& z!?`8TxIM5x-ddzWHl!e;W-pBW(mz{_Ps#1S0s1}b8n33^4TcE`F_Q|PF^v}8{F#S` z^-cq3_UNapv%%N78BF8GXZZ>T^*7eWl1~~rDOstutq)F*;A2x48bSuz{(72dhgk=d z?c(9Dw^LrS_c8(iU&^hpiehuQdx8CG{Q69Ap-rttR$^B9ZM<&Jds*$K_qynbsbY;R z>WGy@c4UpMG3^S_cK482HR3!@lFP=6oEtLZwOtfuyO1wjWHWzEf&}9In`G4|`Z^>- zbE6GueAsELn+lT*Q2|5oErVb!^-~S42N_TaP!qvZ`H1oT7>@%+<8xj2#ns&hqWF5@Qle=AC`FOjS}~E z^Kms57YFQ|oMSBCgU(*W!a=Fpx(ZLX(maYWdt1zgJ-e4i#S|~Gl>Sxs6FQj(36|$a zMqlYP3F=85g3Q0Eutea?x=ZeCqhL_n9%e4#$(5mx9#&D_x=;QDy48#Aq!7+6fo zFGMAWwq!?$2GLqAb=k{~VJ1vbyi|%{E*P>p`3XL-WV=E>wrvbhM;u3?HU(%wH}(pywB z_1&J>372~B_WA<oqaFrl)3cAYw|>HguOPaOP2@yfGG z=6XK)%zaRf*RUJ!d0F8-kz>yKl3TIbp=Ft5a(!koZ!=zN8Po0dp>xIDr;n{I#k#g! zD87vT>ev2wLUNydfO-I2A6g?yHn7}RsCnj3}kWy20D4ByC=9^!46Y$agW9gR}jq97lP3L44fHtrFsC zUJ^|f8O+{|rVxO@Dr0|+Z?i-zelB6*5EHo`T0|Quel&|&?ityz#yVi9oFbI6mW07G zv^gdx@y4(2+Q|b8p6>@YD?4FND2fcN1Ey`lf5Ng$j+3TQr|mx@a3V}Gn$aL2^ucrtq>iAB)wfcg(CUB^6=?bs_lj_4bW%t!IvM0bFN@i*!QNt`JDB~ z33^{goS-GtuU{R)+?vxi+*cJMC^pf-HBboO?(4O@m8YEquUPZSH6Q!QA@#8fW2Aev z*h~B?#`r;;h8?Bt*6*(prbYY)?evt*_>#%0CYWEbVQGKzcNY5OF*$!Qqo{qGD4JvE z=i=vd?5TlJq6Rm&fSJ%*S;kZvqr1fUo}{zpp934cY~cLF_Tz1~$+*8yLZ>n0yr5^{ z2bwthYS8icj#mW4Eq)+$?lV@0wS+Krg6%ZfcyYWHdjdL#&dQ5z59%0}n-xN`g5m75+)8zZMf8l5 zFpHaN14R?D8CFm(HU;b*$Y5D=n>gUT&lVMH}U z)UF2F?@fQtkL;MbUtILSZ1Gx18; zJj1G<#m&0gacBI`(tw>-e2Q*ta)nq`o0w<|{Zn)nZb7&lK>p!260oOrWRuD6^VcK^ zK>rE{GiOS$U@a~#7ID%3{T#o-Dxem?A*j`_`R}$O{+A{oWLM>qvYr;TaDPZlKdK=`F9RwR3_bcs;n@&tG=Ls>3=74$TRY%_BS+79P$1d-?&-c1R6X$i2!I*OO`KT=@0D_0c3e`~! zU+%cn@&+9IdN^rZpSH$uTbDH+*oh&pae&ebNxOx_HDC7-YB;NW+!HZY{{U)UUT0~$D=^2cQ+F+zL zReY)5#SeZkkJfJ_hX|Q`?5Py1oohiW>`1!OV)K?rGuf31`p`o$yj%wzpuSdsRQ``^ zzgk}D=MbN;q7G8K@RjMtM!MLOACuH^o~QoH?6e4sh%g^6Ju9o73NIt$_Ut#Mhg82D z+^W3(;Pbm-E?)AE+;bI!kL~{J-7nDnGw=X|6?4Y3de>BY#Q0i|%PNObnPcvqk%*2xbj70(&?Vv*nxsskI8sYp0!9Bczcgp8&6tHsJ;frhHH z;)Myh3da=b&2BGiDhX-Us>K63bA;OvcAzXdU5++vt|rz|Md&=XBFNTd%>KjDO<|ok z%1IDZ8Kc^8`a2#A(K+#BYw}dSz)>ivgamlN9o{hL{ak$GzEQF3~Iv|KK(+=WD?uUe;vrc(tmFOTl<4ie+B+~YDX+hX^CMWVc#}lc z20;fZ4&20k=6MF4cpY28#DQ4eCk-9! zs?SV-T!WH^la#K8DfO0Z-R|h{q1rc!C(E6-6(Xt6TTAtn8Vg6ZWt3g!l9xN*?RLHT zwymUb*W;n(44uxRQlgS8FiLu$a zyHtxR)M_zGi6SE+lHEK-xNnFRJ|U}j_A$a7#;w&8bzR8jwMj=ci$YZwBvQN1vLYrf z)JzID)}s&oUP|eeMp`*?2)49EON}V2LI}l87wo-}I0qAvY2y}~ev zi3!g*-g4Y%2xJ^oxwi27hgRv`koEa7jz`PIo$uxtSicXIbFhM}|82Q2h)+ytQSD`#`)-KyLg~-Y#;o()*LdHDEvA&e ztv5vGOt`2kWlv#eIL`CLQu-6nRSAdZ91ot!q(`k1iU!19chTwSQX$#tADIjC5WFYn zJS38}%JJ5^wMZd`Lq$hP$fdzDUsXL;ZEP3}{8?GoHwa|ytv&S*U{cPCjbAV9?R1f^ z7}8K%bWkOE%!58{oqFZ95e_CQ5c>W&`L))GdiUt*Scgkz_D^F0^&b<DHJ#X^&&?vbN7G;UNxO(poKpje0T(=vVAmCE=crNZQhv*IE2y6l@3apR$R^NwP z;N1Qv-$Fd-Ok@)ep^gI~f@_D}@*6$7Y0u44*8|Ix9_GJpT@WA9_#mTP3kIwke9Y^8 zGJnUDDk;L1Fr8jgxdb*5cfe5x5oKc1@9tXQa{QeGTS_2ZG927xAhMcXuU6L#!08Ds z+qbpuP2XFofZLuWJfw>*9KZBm5YyA-3iH=PX{cg77m>Wf0$ zL$s;XzKeH-#_?*_Hax4F=et{_>r($UV{vGeRw^a1a73?S_qE`2pk8R$8AY+CJN~oQ z=?4qYSQS(kG#nBc8Dj|p>4eWdmG=*teD-Z;czL1tT=a`WG%eT$=2^U2QOfX@#h63p& zr3;!T=&T}D3>G2U{A#*rOpYakb8VY@Bp~vTCi4O0q>}YFQ>U)oxFs&whj5{Z@<>KV zy0Y8PXdjjC~lO8p# zFTSM3T~Fxy^vkM*4`{ytO^4q(nlvxOZm9jaO{9vxkz|vLV4lpW z)HUr(&ecBFZx_+kO*IFajQ7`a-iePC>7abPg9#`F$bHh$742^%(RWiJymK2aMVpbv`i4l*7Gn@B5ui4QSMg86?Dd10HeO7;{W%ChbB>h|ZRhIPxMPhqNjy4}w3C&m}%ruLS|017KMwty8p zj4I;3s9f^>(yq!@%0Sc65(7UH`TPsOni2+N!a1{M@^0?~8i!Dz?V~Jp6`}Z8N_b~j zRo0RfU?D;bT9~YT+dkdDX|T_l2!iYxA27kZPjW?YcZRjJw5%zy5Box?eSpquaHZ7< z15svXCInMom3!1fKu2OES2;WM&9mBja}3Qm@qlCHLqzkjVfq~}Z__yfM!^9f#>P!n17y51z}N6$exQze0;wX` z1VJx@ULzn$P-+K4KBPPL!A-%lW!)_Jj#@K~2ti20ATmeAFu%7?ZSJM-73wwW3fJ}Z z7AEdqJD>6d>_Ucse+LGa2tRK*TV3=Hw<29KfrEg-HZ$7MbG#o-RVG&vS{#_?*~ zc?P}qDtmb;WV|rb-URkf&4$}E&2xZg6!$Q^+y~`)Xm+Zr4B+LGAO-A++-f#toTlN` zK$>@m641hq5WSO%BoaABbDV@CQnpvL!UNnE_5y&hB0USYq;Vs0GkeAI>#qtWFc52g z$aefHo$&D~@Oa;g(E{yg-seIOy+OjAv%447kDfjIF@ryf3)jq+QwNkU>`#M$aJSk=Q%Ea|0IOhFUp zoydD!X{>{=^7LD<^cBq;&mj5#Tx7Or`nO(WAS=J7lS?26@h&Cc0cEXZ>jxZA_{@PP zR%||mjB3G;Uelt(RXZ?&D|lgS0kjpUQss;Rm2Sk~S9$w3&SP--@Q>S!TV#jS)d@)5 zqAEpi8bU$=hI2q6JWzQ$i8&1TG18OpgBNwZF)dX0-gK<}Iu+m0AjblM3hJCQd$&b5 z-Ui)ah%en#B%ukf=9`tK)|#OkZn;|vv)@);^IX?fkLliDG(xO zWL)*UI1K?>_JXv;TJ}$;Y8`scJ+a4GFu(Tdw6})fll5ypejIU=%RDMAi!)NBsVaj9 z8VxK{S6f{`7pWH2P!e;|K3z zF@7f9Iymz~PWNM*9l+l+5j3>6djBqD9GqDy*Zq~*u1`?BW59~t3K+{M$;rp{oM!_J zH%3b-lPYuX)lw>e$D>-N`)JfiD*Yi~j#eRc!~FEw$4%NK-sfA=M$`|H_Ys9HAE1vJ zS2VP&TS5y?<503N(}3#Ple{H){e$1`irffYE`@K1qB`R(Ot~JaPU`1C(OTLs^iw;e z*euyLFh@BX8~Z^E;O&V)ombcMSd<1NCBpgx84@foOkx%lwXX>r&#%|z=+(PAP}fL= z5k+6*ZH$h{@D4b9enk_p^aIEZN8G2nL_4;9K3hz^gdk<{MPk=t>>{Phc`a_RXv~xrnJqP?4QE~lY5*snCFX1duh$feO zc`K92Y@VnSi#t#lIBC?=RA$$R+q{njPFLQ|0f}3?67Lzyz9@{Yq`^y;q-R^G)}=PU z71-Dqwchms^jRVIl(TXkJ}_#>@(DmS7>bNaODn$o!aHM0gQnMVY1yVtt!M}QePPW} zDl_7ui~>tIbiSLcU)uBdpQ{#zgv=T?3^=oBR9*mv>sG*s%cOtcIu}BRF?d>(lOtUR z?2AALpEBtrakgO&kbC0;wsOMTJk~Z`$6%@LTX9Rzs86+FZ{NRfRg$X>DcYX33``Yt zOH$f$gEh(V52~V;37_8uf{0Sj%)SCmlQuvIpAr%dtRMOq`0^QP4TOKq!YgUSO4_U_ z#OyJe1P}YGfP|JN44A#e3pFeK?>%&>!Q57U4JtIDfLc#%|Ht&SLGv+q-s1;?F#^X$ z>>j7~$m`tay@{MsHIIP;sUKdjR+TkLmxI-lx1Tv40u6yyk?UmCsUC+KL(gZ_%K?>E z>fA^uAtR|Y_>r|>y4$55wOMagEVc4R^3_>22uR%S6#V#6xAz!Xoxzy`iabsF1JB}k zywVE)b6*{>$wzqUD@oM%Q$bBgaSyE(Psd_abMF*IOYUlM73Qux(3Do1qZ5Xw(lE^^(KdU;EWi*17aJxH=#1+)IltfZ;U{t#aNf-)KK?G&tglZp~aat#CiLuMHk<(LMFsmN2GmuhUgJ z#E76Gah*!plU3JK>Q}@-et|w3 zSEy+(2$rc8m_xxWL;xR#xWR#k7D2H0M@Wi!`=ujVsfiMOx$giGbe2zg*^g7i)zsoNUJ4&#Vq&%d^0iE8QC|xG z83QL%`v6p3rHiP~4^M|;>kS3&Th`!9OWlbdm%+GH-w2$7y_dT2l5wWr0}b{1>5RE0 z%shp;OEMsOj*F>WEmFPkO7`19SXfva5J#tEYfp%K57RY3_W;au)KfW^CPxl4oMsg6{0J`QdTfcWo~XbR4!e_rTO-{3ECZ)HEP2keh? z;Mfoh_=^Pdm5BN|&+r2NZCQ=M)80sTAYYiWtB2VmthURJ(2N2-Sd63AuJ48q$I%MEs2fSUH5Gfnpa}Ztw)t^TQyS22>p4U z4$r)s{4AOzr5-js7GGN#wOTByR`T@jw=&QNU*5_;hj)-u>H=kNrE>SnljRIh4($q% z{3iDh8#+bD{y0JT6ojJcs+e`hP2FVGmnEOTwcN&7T$e#qJP!0xHa|7Yh1BJE>#czc zSg(HN(SktFHD@4SrsAP!hBoc|*y!R~OSucs9+q$?oIjDey<^CziDtAh7U%ImlRxn> zgj{$a_TtKx-SL7C0#Jl@x38rPp1QXf^Zet3e7rC4zFDcd?3DqAeztc?M?mAzY3G}= zLR$r%AK@*gPdrb@2`WiRy`7x_!661*W89O`b_*yqQ-WxZs1oZKWAkffnE60P0N>q~ zUG@&r`7?gB)*29UKB@z%YN;dMmA5VdZ{*KluZwj(L>SNe&tmNS_&w#8J zWi&CC!NH^QIhX;(`!t7Pjjl{@m!9t*^f1OcjM{eUK0UcpBx&FD9DmYGxl?NVWI3(4 zoVud2gRI&_q_M@0n0aO~s{TG{t7b85c(lCmtV~@C)XbBRB-G`7wGb;BEJhx;5~_T! zwjH_i({GE0aUi3Sn|h}QtX3qb@w@CA??n|~`1(R@0XH;#G}rKvcL+Wm!$2F6Hbvgy z>Y&ujeWK<8WYIZdJ@?sN=CIY_s%;-TMV#O?x>VhgNP9}OmROM12ycc!2(r4R+;E>`80Qpksh6Z`1NjELMGD$^H<~PG zF>9-ds={EpzN8Gs-EpGkq#IQt2kPZL;zy<1w{N~p8GxiN>v^6?T&xO|Ot#v4z>UQ! zIx0MR6_?S=6~;$e>~3^FIkw?OVS2e!cFmzVEO0(jNOuE71Ez8QhXY%5Y3@(Xe|Y`G z?~Z3BwsH6hM6gUgPZ#SoN^9I?e05{R%k=)m{-6;?QD>NK5B7leO{n1EOs&X~NsoQg zXgmlIbS-HN?^)aFCqsq3GZ@d15hajgYJIQIWeQ;jnFD>@_x~x12ya6x?q|ymr%eIB z@l8)T;j2qsRSB&i5I&1(y9q6ZBSRRn=EbxJkPY$91$12&V=Y~|3T_(=oB&0+b*4GM z5NRJ$1cFwlh3XmmTmQtEZx7E+4cELVXMtdq`rz3t$_TbOXTcBa=10qZ&y~?c zZRWjK7huxX`S)XxcL)rFMj&Q~`)*GuP7q0Ral@&MURPzn0b`b@m<_KOJls}@B3w)$ zfMyRt=ie=P`1CHoccK~{zk7M8gEt%wNDkFo$ozSV^;zB%c4u2hT@2;l@f-vS4w(hH zrLfo&QOfTpunB`lAwqB~G4G(_ZytwZQYp6_)5N{}VcCUhh`yv$EQPa}f56bkJ6@zD zc`y+dXSE*h24o+_NDo-vKx9V91K3@q`-2e(NSxi`D zi2{@`W%*{zX<|0!TgrO}k+yKp=Ez(UxyC5IsS5a@#i*`r8=>w-Q)*HBKv$c>iu?W$cv2FwnkFCBr2Xxv$Lp_eBMtasB96XR~KbC$5FXrdH1;naJwUgin+@0MTV?iq9jnDYj;u zU~5k$KjYAj8%-l~{Fu>J>U|3Zy_FKsdX=thUp1m`LIG^4X@=#u$59c39O9U1xqHJQ zVd`iP;3g9Ud2Bi)zUT#frMn-}klyxVCmdq0(;wUUdr81|>p=|+`v_(GgA0(_7|+hT z50#L!2yn=G${a%2Hfux_S5bl`2vR8>NRZiE4%|VvUhmaXcttlOsZA5|9Ps@r8SxJz zRzJXGO7)|#bv`-{eT?2jBxWoP$8*vGXBC*D4mZ zk$8~9Nz~5=h}{ozTT)pHar~P{iO#+dy}^N+D35+UXeL?~gGn~7B9f2-3bkf3C{tjs z_rp-PY#qIo+(kYGt^w}w%e-QZp+X{ghX>myYX=a>C656KmaLe--OiwR+*7yOUeZO> zDY~cYR@x}6H@Cm)YeBUZD)h9EbMcw(_reNsC~!_`+7!?F?K=+Aic(V`^@Ww8sJ) zAi4p=dt(*C)|j^21D2SInk54cO+-(wnY#dYAMWZ6aJ@}2Y$S1D0ltSQvff*xXhfMnrVr3fXhknWG-+v~L-v%Of5W(Q(Hi!w;8uG>t8A_6^8 zO$ol2!91T*P|rWkwV}1&zJ?bpMb58#?ar@kHivxX0Cm{-b&r|X0-pt=d z)n1vd!pKU|!UcQp4wu3R^*E_TUvd@ z`|?nZEYTSHOl;e*6&r4ksI5Deg4 z!dPA$FveV$b$8ns8foTPDWxMjEgSRP8nK2D=vWjpW52V<7(sy#353B&{koui?%lUX!K$@%C)AgrN=A za5<?yQ;9j~fV71?{uEOhKGTD}O=ht?=mILO z6g?8&b!u=NKlWBPEwm0_F~_EF#+>IMvhETp9;pwE!ojqhRNzk_8Ym$wt{bzU|6z z;zx+&Q?t5veG&h$_s?ZG#nmFdV7eoqR*q&vQJCb=IHn0|whXXfx>{EcjiQ!CXNG?h z3129vOf?ScK>-6dUnw{gCR92TUeyvtn~<&@JXo?{zh3k) zCJMtV0#;|8FESy6KZieVReR4{bplt4^sTsAwTia{Ez^&sz7)pG%d_W|@Rw6WOyivP zD~kPX(boA@f`_AG=pv3Jd=Wpawg=CE5=b63ljF644uZi?hT)fHn(A)MGXb$*m57yt zZwb0qAhO2YkPr?V(!q71^KCJMVunb`<|E^umlqDNRXr!o?B23``=q^nui)}lt~e4u ztf$uu!s)W|kG{tC(D;y|1?rDo53edh579@JLpBDjF>jvTpZ_;7 zoy9A;z#E?nPRln^wPeyqU1wI_F7xmcO~1)woxqkoi9^C6j!pz(Z_95(jY`*JFvAp% zY5ij-EwWRHG`?W6KQT5Qw`l+**cR0poIfN8DcHts?`gQu8`56;^#;Z4_ximFg^9u~{S zvq0uI~Z?QHN=M(8rkcxGFf|&C8H{Ue*)G|h(BR`<5_Yw3px(=XQHTUYkkEqVD z>Nbf!%<6o3USj_gq}goDq_b+x6@fSM8{6}rQ+5pSaO!6s8;g#5;IX3jAeG*}U`{Ok};gIK?J`gEm$IM#lpo9BAF6$_#=cl5n{5iBt}UK8;R_eUY71Gkt3o4eWg|26kl zP;!Ape30cZX@TC2_S?b=IY!Z#kkuNPs5~yLWl5fQiu$|r{0-Y?RAMx^Ys(8; z&~ktf#UJ~5Jqormb!j}`4fF0-hsr7QqI9`0#;V7BK~};T_TH`!j&DBRLS}pd&vcb< zYtNh2wCC>j=UxF$lXXUHFAqSRsk^7Y%RY@)fmq3As7IY*CaeFs&4-MGbD-p^Ji$tO$C{On>_%|zF zuTklnVw~x^INaJIIX}o~9D9Zpb$z%Pk?QoZeD^rgvHeUFFFt2*Fw%(%8C{+DP^@Wg z4YaQExLCa!JVCQFtyLD2e-!imSjVSuzc(aorUFNkcBZe4?)EE#?gyC#pYLS=F=5t6 z0#d(j?7H{t7cG=zm)b_CXGcmfBL%+wMYx=Efhlh1tGkDb-1(oaulnrN3P?isJ-H7| zA0hJ0QwrlO+q8e$+2jsTw#o~C{KqiZZ3+hcJZm5e@VW?zNrFP#z*3+94(iPt^q9U7 z_vRKF5mm@qaN8>wIZNt`u6wCr{nRgR)X@hDDWfUESHCZKuO1;FI1Jd77?UJTQUx{X z_yx=4*ezW|%472yH|B2vwaTipyifb4%xZ5O0u}2fo_)OMLu7aZdQ%8F6ADLzP{DJIH<0{Wf*p4|hR~g$_^Xz<6GN5a-H{BafA+~$^UN{u7)Mrf zFsMw&6pmPBlQ*dI+^>-&Mv`2n$1H(Mr$(Q_tQd(zBM8d1Dxo#_Rf7P3k5)m+^+jN4 zGEkqJB;CC*@r<({jY4bI32?5NqdlN^r4kp)cQ~tIv}R{I94(-1E8ldJ;&ARRqg7ng;CwH zog^A@3Z2rss9=v4NVpT}Ik(t-Kl(*aoygH6_$;`r{ni4o&NB^X>zRt4}>Z z#`bb9CSRoKM}J0`xg(L7B-5Pd&_Oh%CrC;N_YCHMh8fNpw(wpZIH-l#a zP~2(6NT=L>)W0Uu;-+J2FgaxkyiI7AJaE$Ul%f63p~>s}NBOsbyBgYzD0v9n)ld>r zR;vL+g1UDb8k5w`-z`+o#biMa#w+>uhSMwpPu_qA(@Kr0%-`Z7Ar%v|UA(0$mT?GU zF2OKTB3=Imx)padws6g{p9FQ*wA)FX|G{l0ItSqffU8)c|L{s!l_Z<7mF?83wye|N z)u_%#3myK^X?>VEE5MU-6CqF^``p9>yP!JSl$9YXa{Vbo2Y62Tl!-O*rZoJNH5H@b zE7)%pg%4aAbBFd1{3YEK-&-QrLiHB(MsR+KzPzJ@JzB-_pmO8|y!V>Y4||4pdEtY( zg{C@Sp-fLE6_}CF)zFQW$O<%jZGd!n8=-czP^=NoS(!^vwVn=|v3h)H$sNQk8HEis z3@I8n)8MP{bNF}CDn|pV|1@8WOa0O5{VU$Tbo3^SNnL_xU(!WRZ6=vJQmca#DEvTc zwRh%CcNU_PsX#u=@Ug=FMk=rDzh8qEh(y%!l_Dvh+9Ua@^!)`2WU`(je?aAHdR1lC zpUQX3sk@Yph0?TB9|WSD$4g}v;GvrFgAsGe+|J*b{?+>+zX{3OE(LQ0dYE>ob|!HE zF&0$>WqbYOU_{SJ<>%d>$3awgSX)Zz?oiz6H%5Hf$yV7@@MS*uAOZYZFFaF?=(2*u zAN&y(hJt?4GCg{jE&J6!GeJN>Iuk(2M5r-MFUZ_BN~5dO$RY$i>Dd*s5IxRZr?^c- z^E&`EkYnSD-jbWWMXcV{A7mIp;QL(+X!-HaT}{1u%g=z@)RCIZ{)uIO9eL z?x5{B)8d0K`|K3|6r?6-$>0z*MVATxu4XLJ9FeUtRr=*8MZ*iHEJC0k>N!>%_K)+`?SRAbu<6?XMc}eJVUT*aBgcyOASRA* zrUYLT^2PAG93=qMyxyXn09y~^1Hc0-%D5uXaPx5n1<|(28auIKPS$_+1bjC+((F5j z^3J&BmiR(tcSdU@LKKU765?QwxH7mzr4*W$^82IzwSetoF0OTiaG}r=<$GDyh_ov zZAiiuZmkXt(ddPErSLi6fktn^eIV9WUv?rhKRS101*DUYiV7%hv;dG<>OTI96t2`1Spx7SGqDpPPOm`LD|1Xi-Q7Lk~g6 z14-6c34BwcG*a9)xQiY2<>yE7Z^{tGNP-$;k|b}KDge0W+(Hg6;%n+M0vdFDYi_q? z?GNS$4_@+qx6#Q5@#uez{QN#H02s;&jAGU82iy)A1r(i0yZVc5I{@O$0&7$ms&4|y zRod|Y(D?GdCoKNik5yR!bU6LaKb#C;& z*Z+<`{cg0^19v0;?A~fFT%%#Y@ms*Rd$YGt{VUw`1Mz%1 zFaKWIV7>ut_t|CXA!55>2tbieO>xcAEp^QTe_&EfvLkbIhV0g(Z0Z9*aV+xx`#$pE zeO#x7hEuQJ2T0f1Czm>IYL?osK9SNakY*a-j?2uCk6j0iU7EF_eB$*N0{=NU1AZUz z2VjWlYqMtMjbcFt{tD0%zvQib>UgzNHQn?5JAb5UJ_entHXz@q(+2Qm{r@q+vn{LNlfb^lFX!S?)8rI0@J=cW$ZVeprkpTA7A%kr zVy-+wx(qNcO&WIrFg!y(g|@J?gu#kAxHI2zW+}sw0hYk(HLd*B-;t+H2BR<$=KB$(J73$ z*S53D9)X|QLB}G!|0VXG`M?1v8Q@5g^W1CXl=N*FoipdJm4~i}rgwD-$4Q z)?8q)o^1)31FFw3fC4Qt`aLDqDF>-Z9Al~qMSRc@|31*Q(;>-{OQa-^L8zP-8Szoz z%8r;*ZUF?*`>^@Hu;&mew&&jke132y3~>S^7Q5S_b*b}?p{ggC%WBdCFVhB&Dl=#} z5{^1>R;C-f0KrYwDHJVzGTvv&&S3M~97c5Fb2~>_eqDqhwp2&C<^;}zC6NyhKnnxw z&^%y{mRTx30)ME}-PHwHWlu)|kc!IIZh6KJhv(fNycD^QKbS?14^z!od(h|CxxD!C zd-Nm8Z8DSnl>x^WFC(shE(fD(|AFGNxd7_+|IXQQ#Do3ossDfw4-+PZApAB!*tV#H z8@SwjvbuD6%#&HClr8>V%UX;6$ro_hkuCna80pvWnwj3lF#3jBQ^z+uhsO9o++ zwSt?QUbD)2HKPn*IcpE~CF$MyE?AXZyGc zibI6Y>vjUY_0-)P_o+h7xSV++5Zhx3LuBDArscW75FSH3PWP|SHi)T!c_jZ4@H*4c zSybSF_XboTFm?*I);JFqU{e3ic$53+qY1aRe%l1Qa?iF575(4K-9IK-pO5W`;8J`0&vu% z2S=}%!DOhkZxirGX;CX{9bXS3g)U+EbdUE7UdFR(^oRc$HQJU(f-FT#-61JG2U?oh zSFog$wzjr8@b7So8!v0wm<1^6TJUxv__tsh8XBo1r(TDBD33!aH$S0L2Gbj=DT=_9 za5+OCBtJ+S1NMY8=}t$}1{zt<82joPJ(Uv$F>P2_FZdY+PbxxDZmc_|GK*?mY?rW{AME za)5z~S`yx8+V9#3gB*4jKDQIUTpZKPo5{NXQi4_pT}(9q9$1jDdw}nBILA>|o3_>6Br58uuo&zCC*gHGt}IK< zds$QTD^YDin6P~z(w&b$+P6|cA4C;$b5BnM2XOijN2%cBR;}lYGToKy<$Lz>doP9* zo4JSKRK<_~)hx{k_x4xy%DDh4z_BXy$a~%>*wTSJK~mu6VC`Z4F!@T3p%RoJmyL1m zJ`}(;(gXUS$@VUc3<_x?e%BI#dz>P`%c177{7N12p)IKe_W9lL2QH;i*7;#VT8sLF zVcFK%Cx>=tA2Y?P5rCf%ICC?%DV`pPe68L^$%ARXFhUFI?o-lS!^$1dY2)FG2PifN zO0<4(2u(q`hzl4crv;_mJ?*l&F{F$pnc<$Pw*=~H_<4`0m*S+b1G@CgAhj~ki695t3rqHJqA6z-@c}C(= z+Q&Vj?Q$~ibu{I~3ue~3y6wmEw%LC>RqnO5;yrPMdl^EOovVBF9_&W4S=xXi|EQLCTp~% zs*YYa3*{|{LX%~#jL`tdTpf;IFG=r*lwn2kWfT<%eCt3Rzq>3845)t3+qI6$_J1BA zpmRWV05H6d1(?r0CQoVKSkI|x{G%NmMS+ZN-S8-8&`z6O+hcvQrd;D`<E=#Wk4 z8n?`^n=0p%0@)4^2;dYV6ce1--lR{P?0(Y$IJ(!{7@mn)bGb&Lxh)*@anEjMk1VQ( z)vS%IFrychfdu9|78xHGuFgT(ENDuFy&(9A|M-40V%AY2C{h}`>F$*^bW(V)B>DN> z0K%Vl(9Naa8m8R1&BK&^whOh{nbDS{7*L%>MGT0jbD7=ihqS!}!tuYf?mJVx17l>R z?R4|O%>Ii$5s-z(2bb*X-)Iw3SA^uzs=tz6lA}!Y#3Z$C#g%-gL%<^ZJvmZfH<-~7 z0l|`#7y%8I^ta)S8OLQn9JnS;m9z-`CJR20Li9Fg$1vm4QM zEG^KoS?rw&n{G;Yz97O?A>yVqkMF;Cr#oO;r!_F--rV~Lu5W#{gQ;VcMN#3I`c&EI ztY!IX@tJHy&1Pz}`Km?1@B6leJTWOUjniGD#Fr<4tWfT<(mFM(LOye>ct|CpjlNUs z6cH|JsZgQwna(rA!j(ZqaB^P=+0QYrvHse5grVb>SprXNx}xzUI_Ci{RX|b*c6tQw zylW7n4F$kH`F$xiV*>GTb*tnXkt1}8{-8Cr&xT#6;U7I3XPPnz`H-HhlI3?JApeYO zShc$vj++ya(x|G)XUFH=uyOS2_^o$swL)~ce{n6w^#d=dy4NpF2RTL}_(3-3NBEHJ z=iRZ`XqR%qivd`K>$R7*IMfPXnRQCerzCMILed*LYmWH+-!N&t%J3>tkW~)b1%5Nz2kV} zT@4#R@)l2SR6k_}c|v03Ag?P#72!P)|p~%w$1FAgnsn z$)7^;bVb&!ZCiLKf8IS?IXx;{%1p)eZ~;H?b#T&A>eDJT2@)^A>sZcLe11on&}LgWq-t{bJlOI!Hy5{>z-?aw=c%~x}WLV z;YGA#FO9FkVPoF0D|>BQU03hrr++W>)e|G%U6$*_!=^k1K0ijdf6JtE7D}84JKClHp6876gttJ%rLFm3dGR6HMB z&Ks%vBUcCMElLGXw*wXd9_*65g+D(?FDqBW2tre6;K+(v`O-^#-os!=@hye>^4tCM zx{3V2J7@^g~0rp~OI9LumT#ac(58zTH4p(G-fyN$?^5+m;s@v6jctQ z-D|3T0mKL*9Nk4<6rjjg$TGDxhM&{y(d__jf(ebBAtikr&x?`io=c^)5%f2$qQnWp zeq^>b>A9fxq;>Wx5K-)1x5{Vw{`Kwu_oj-YBU|+VKhUS`N983E&>Iwdd9#|v@b4AG z2~*e9owpky$$p#?Fwq4+Iysoa^t{JC-_ecm-XqW(gHJ_R%*e_SGeT$9{G3RoVZydw zdJ%v$7B%y7{kW^&_JR*GwGnu=yW`2q+Z?;QQuwU|aD`w_#`5`~MS!rJH09EyB7?$H z#S10!EorK$ju1I9z=!3Te`Ft|%vE(LOO4doX&Rv&G8wyWT?5U2pVQi zNgF|or$LxIf&~QwuJ(X{4_=`BGlB_uqb(Tog%#LOJN$nfYz^^nru+CbPM(-+oSEXP z*7m;r7g*t@$JBy4Q+z|bEr$jOM&2~TDU!tDAq<4mY@FGGX$h(ABzl(Zks-~$OjoYM( zQ)M+NaCXov7~~yG&@M*`n-SH_KdF1a4~32(0HMc$K7%rf@d7T^CGFf|2wUWOda}Pn zEUt9TC;|2(2%b5iNGWT(Pntiz4-_;DOZb17An2%nS^zymzyy{PK21RCty4l3jg^Pn z(qAWAmyG_jQ}9UOTAZcuk6g8w|Q zZ^37vlRVCG3Oew)TG~T1F_CN5iI*gPX85Sb;RHfQ85)%1cY; zvc1S`f(4#REF*yJkXXAR*2=X4NxUv{26$TvBW0$B{iwe8zP@P-%6X^L02qFmXpxV1 zuFoOb4G|LvDtK9-U2sigHt29(;?{|8sC|_kDs<21vkwOfj6hf%T*9N6zX|gGXCrJ94oSzo-%teH%BM89a1_9i54{-hFy!& z+d%K)NC4qa^pl5Ahw<+<^43_(SHDqBZAnj2)qCBN<>T~0+A2&G6p6DPSEzXXy1+ew z#T}5D%*=b4!Ur!6)zGjoR-{|eTMbbSaH7rkHV#Zpd;=c2_wL0*s_knfh^!n601h zgsE)X61Hc_Z#%{U*Q;*M;qdyE*0VXv|?q-H_+oV`{3fIsLx zW8w*rgqx|)V_(aOPC5qW;SyXMc zzX4?Wms&svo5cb9fJqSG4`M;vLPnON8=hi``FRUvYR%>dUK|8uL&QIE*pG3C;AA3k zVR)p3P6~0wJ~+(Z+afi`cu=cZxj}iDph3mpiMHa?){wT=cEoPKtSqET>E$JZdyk+a zQSA(cUnGr{J+0ovJ8f{=fKFv$!$S-9TU)&NdSQ zGvkO174VC6ecW2lVfIb7r69uQTb?YQw{CL)78IN7p@sfZD1+4&sNb0z9)XA$6+<6B z{gu^(QZg>{%~W6Yas>cb^Y;tJI-K^_BA?z|PQc5sbK!vaz*j|;yiY4U*{x#GA|Vbj zOS=^xc-E%q+L*omPY0e(jqwi@XF4^Suj#gj`!KF}y}$z=I;rfy3#Tj#vWDBv7HlJbaI&AYk!6$#@SrH`LZ#j10LKRf;YeR0tt z+`W-k4rj`EV6t5#b2=$V0~Uc!mz{|-`{3pKNl+R%)*u-woX?QpSevTxY-}qvW()t3 zZ3D$>rHJulRXZsuv-3OXbH0N2O5Z=W{Z^?)MHrw4pW7nc^8n%=;Be>MTmO-3C;g!d z4P;(EB$vhEqUa$ET+U><2S-zkrc)`_+8~oQzx280pNWM~5Kx zn}{6C;dq3<)4L-!z`Qv``JU8%L>tY=Sc&U1g6^f*1`<17$>kvBbnTS zD4r%zB)G@l0M3IMT3-A@i`?YnH_#-;wrwe2hJmwkOIkP|_KLE{*fzj3g;;kX?y-mX z)1??2yoc&_8Eg&~5-&=h<-j&VqSbck6y>E@_y_b&bkF zJt!v)XRh#d0uyUaTsG;WRF=C(W^k#xV^EP_<0J_~AY6gFB`HyHN9QP;Ao9acuoKR} z@uRzVA~d*_*)!joNEYp}1|~CnIlaSJbj#tl<3D~OBvMyD@~urp+k>z}kPCgQlcHg z&5D{s9VEP(s?`m9chtOxfh;5EuAr-PJrogF4FYR$Sq+YNHagy=+kiu4=X$Zn1)w07 zi#Qqsxa`veoaCMLO`-jQidoqOKI3}7V9*DlK+Ji+jMtQjE9IF>CdJaMssL)V>CHl$ zu3R=wFY`=>Q|5f06%Mu+r;rCEiIc_a*{r&|w4^yYuAU-Ox;F@X6{ftp=oNG#T?^Mh z!xyCX!^K|lG9BdCJ(7#>iK_7kUI=O9Q@k>nEg!+O@-QJMM*BxN^@ruLqi$AyaGL4J z$z|_0&LSAiQq`3p87X0H(b}U6zcGpsd2B5t+6?l-|9+&>TR)B=mU|l$Ayk_((h@0= zW0Vob!n6*B#yS)@$(n$@!tZW;GfrXA4i?IMcO%EUWS%2^n#gB8qXMY{5N&YckH&^? z$beiviDm$cEdn2WGkDbVSA@QR!SU=uZ!wh&z90CmQPH(Pnk6rfkLFU6+FoD# zck$q$fM&k1hUZ}FffOmW^aVShUtCHF{>5y7bsKwJVAU{Lkm{8I_A`oQ3na(AoUmg= zdnyRyeqqVK@jx7wR1`aHbNB{8>bb5aVQt3ABn z4xzLJ%oY*fDL7dF{80Uh8~N)v>Q;l+q(ZRD$cP^mbN;-K%B%x_CZymAJ#^Q|D@uI& zUs97_a8D(-1Z|vzd^bp;m$$$E`gSbnL>q0?kvw+Bq<#QdPof9VIIK|oe#QlNRf-SG zs=fw(oWNOjz7Q&;Pg4OvtFPeYFK{!!f3eXV-Q3>U5y`frjIPMUfVA%KXHbG?WEuF| zqn{!C7APn(nl%7LEVtA0D?uD!b7!tTsJn58h1}L3z!P`Q z8r7_Yn?3gEhf7Pp9Y}u-0j{%?8{9(Z5?0uG<+tB&0vza72q;JTCq4b~;Zg@E!QuV& zD4vUa8YU+<`dq=9TCsoxs2RQEGiB|JrRw!O^nYCDTToWZ_y`9Q4KYk5CW7B@rOE*w zpoUyYu>l900}BSuUMgs;PX$8xLB1Py=4 z1I7VRFJ_DeL9YL88#prwmvPYw&rw&@dNLz7kU4dNnhNgu^%qfLd?q%OHVCf%>vwmt zU|{+MSS}DneEc!?Wyw?I*GkBHioh;JhjEGhVlsaIPAH3lMXv?75KOChjskJeD-swY zQy)k;nZ=emk@4ih3YcC((!{6sn$b<@bgs5&Nm%o-t zh=nFGA)vY0k8}p)i%*W;{fA<0QQ1xRJ*H{BI(`Z}{U~j(#(LKi>xrYXV5P z7SUwuh8<8UMS`Hpe*W{Fhjzc;5$*gfK@lwqERaXF9&GK~o80qX|1ygyaQ=tJ@21t9 ze~Ie*EQlyaAlrKL)XZmKsdZ|oc2v!JZWavu2jw8~&OwFg zq@qr@p+qBE$AKv)Utd`)5dlQU(nssz85GjUvXgrKL|C4w>9{Pt{UmeRoGv)Y?T;-% zJ`@4kPPoSM*t(w919KJl%RsBpU-_ZZhtEam5wwQ~aEYDL$qKa!=hB=M)PVz&8E9`< z1PrM0L77>pe4N_w6ZHVIN^A4T(658SGB}+-y{0?lQ4dsumkm&DcS{E;%w{mrU79waiX^J$Fr;D-3D61e zd4v1#!H%T>W7L94hcTiFFwzEqXm|@ow8Z@5VO(%}5k8Wi9BBeA?||)enef;yzXeed zk}4A$<*Wl|>eTXtpY8!)7|av8=tcq2IcF3rm=s%Rinmp)X1r)4jEUb&#k9QWM8gr( z07=Gv^d0L*G~#S_@3o zm}MT|gU~X5pgG@+0K)AaXHZvY>q06JLsBkHZ2*200dSd{z9|(M=H@|5m;NdIsjX}h^2V9>QQhIlg|0O zi`VAy3>ABk0M~PZ9N)p*cO=sRW8Ji)VsuhWyjjmg5! zw&>2qV#Y1+MYUOUaZDUm`*M^*qxlBnUSAc=lU=PudvehvmR64;x&9kiUdfGRxLYDNU_u_4>|)y>7d(UCKIq zb1pY6^xcZKGvbXoR)~j?4gLsV^gC+ZFLi>$K0P5#i#kASy~;s9ei8~jBIt!n!+(C| z0;bR79)=`1nzY?Oi%nEVFeJsx;BR%EI<*_?xz`FlX;iEWONN@zN_OjdTtAk;*;bR2J=1#J!rkAG~inrB=pN4^Q~EY3AC;BIC% zh?nqR+Ouny@Q+vYBCN!}9xidCf5JWyrqf0+B6$12?QnrBQP9jNpm*yLIn7+320unDR|@QA`yQ^P_NudtG8Po6rn z;ZlqhHq%MLaoI9?Rhf#|JUM;qwzxqAc2>)<>swmc4C_hl?mmbNv1~MDb4M{e!MVD+8?zhI0mtZ>f8u zx?LBG@$Yx&3gWS`o<9&Fl0Qzi=Q>HW+{~u#@D9Pu!^y_H)!SL6hmgZHr-NwnZ((g} z9<{t*?wZytg>QTUD1iX|z&~M-{M84c;D4aZu&s};ooS@~2P4e%sVS9veKOnaHcXE? zxZ+jPLuFp|+_{SQFFpf+9bP-q8r(^*<)W`i%0C7JgQ6DO$=nz+o`HIMF}emKMGa@~ zd9x05`z(N@b}Dj+k}?4`W54t9gZx#5(BSi7o!OENBWkjdWKdq7kQF2}XDmPGb8ro* zFgBf0IS$~e&W>Q?v4qSdxVF~= z2q}Pp17Q!GYA-J4H4<|*-JJQ8wSWNmiXL<70aw=qnBuVr4Sd|$u+o2>vl30)fj%!5 zytisTv>R(+FbSDi7vRIqO281{Us3w(iZ#LSSpdI`zVUW?)UspGv#XJSCF2|OX z*asymh&HeM^}o=z7d~Y;Xai5kK8$In(*MEhbC*Yp?rCD0J~1W+$F?78>6YesOd0R>x)10ea52X6tjS~d`3{QdK`$6qtj zvLM{j`0BURjVLjP5nZ>P&e_Yr9rxfN_5PTrZ-t5m!LSx+y~S zc9Wh@1Hd2239Y?7wEebOzy(q94kSELKzQ_&nLq0@n0QC|5f1)hh?4$ziJ9M`wsODY z-1(S3`0#7w@=*i6tf<=4=#$r_O4HSUe>n1@jSLOblfH`6ni-))?rsBJIzB=aR%6P89K492^JbspPL1KuG~+NMrr?=`Uc6;lkhrY z(bsb1kXomNy|vnnpSjvkvrIU`d~#a@@T9lRR$%rRIY?qHzP|tq$R50F-RE zUfVx8584q~VD65W!PRXRY}IBi1KK4yrsxfqz}fMRsHGpbFoSlL33}ayU<`MdD&xXA zcT7kA)q9x-(h*^ZKa^)N_*aM!`oV?arl}vwM}Tf&(^SpJ0zFXTkIDe?fO4p00A)$< z9~DKhx);xRCqq??$bXEcbs!V^dz%KS(W}#M3xS1|#G7#zP5_GhJn4eoD#`@zTSQ<4NX$mNTCqF{ zhq3&lGFNb#9V+QUJ_uujw49$g;19V1_>hO_hk|jeu%W$3i)tAAf66a3wgya(4!&`# zNwG4>%=SBgf?2;Bq$e;9c9k&9VSpbO76SidV&pxx=nXe0)XYgluJ%z%>5%*}h6@;W zvTud-rap~OD6_r-H6kuh6O2aF=Q{=9qNxQ@ON|IzEeF$=KQaAJ`;(RiMqdmVcZc;2 zfo?trqw^<1C}9dKyR1jp;o6m3q%q+{R4`E0f&f3s4upa?3Z+2p1^>GpsGW6#K7v2X zNLaxFwqw6B!JWIHu@ptm?04>KfWJx}UyCox7Z|^sf|iazz#7^5(`5JGxT&FweBgVUe-Y}vV!g=^T08Fnp(NM|YwdI{YeI0^#k1f#r_a{;QU;t%Co*5tbud)NdtPA zQm-Z&pf5I&Sg@OoLl8CsjyKL9s5a`at+_=&@4Zx1^Sejz)iZ zO82nmz_9EVJ0cyb9O2Oxh`w`1hu*y}k=yOe4tLOf9&DCBYGJmN=i z{~f&x7j<2|-&Zu1-M4OexCrFszJI~Zo{wldZmPTPd^i`tST@d%^T);kxhO_1SSIAE zhMt~k({NjC-ry_7AD7-U`}4Pa8qG3s6SKUsm(ICB@*W0E*OTHO+CVBqVFLgdA%h6j zIdIZW`HG|0E((X6#i8lTaz{2J@m0oi9`Az?5)1a!(RTG&7vE0D09NsvcaJXpSy{sK z+bw#Jaw5{Q91rVvR~R!3*{}ThzhK?cZ?}lGBI@HE>BhbQvVi0}hlJ-j@>A`ne^Cw? zijWo;4+y3F>!p!GKOzj`Ir4vCpoc(#VOP0VF?atEo<)ur3fGy*t@|NerpODv>p!G; zMb_EkT_97(izGX}`_s`{X7_Ws$BNk=K%zStIQJ(Y@RYD8RTZ|hB|L2^7#rk%MngBkW#phU+cN&_)w0_mp=XIC4Q+qo((z3k zGJmk`y*%zT>$iuG*7R<3aEct>npPU=CYm1dG3<-hN&%I~h~$^@S$ z*imQPHD!XF4Q;7!u%p%>)2=5Se)V2rxhr6#g=oL{xs*Pg5yNBhg72E7lN8`;VH6^iwjp)6aQ%>?nAN(Ve#!eyo~}%{c)X*8@am0o=b=oc(!ib zlo^*Db$65hgXr)V=Y#z^Iu)QJ@R3yL=A5nE?9c*SY0XHYPUF~_%ht2}=k#>%x&d=N zo9*4@T0+_y)ssk+EuT7wL zR&S${`~?#5`I)fo;N7O*aQcJLI)1w9`-_Q2N+L;Fv)KZMm%C4WnT&^2O!IxZ*QDr+ zNR16rVo4i{75EBBcRF;#v@D+$_QcrSljgK#f{)rxBq?Xz>}#C?qVl|rvVu;}1N{=Mzy#UIphFY6vI zH*Y5MZkp2hSsitx#Yf$umTFl!J*CfnE2H9MhJSQsuJ7?Z%L|8XQ>A-k-Dw|M@gwua zN<8uOVyx~>x5DSp@H+OKODO}r_m?PhCckkpyBkkDjw%&HI@osUy2S0PuGldOgg2Ii zEE%tU2#~>a>w=`O#37je*Vahl>s$v)Xpo`Wn^CW;|WDGjrR4XxxL>4#A~1??@}v z&+|$GZRveu(vCZL=}r7e4o>p%VoQ8OOAfiAn!~d;DyEVS(59~Ew5ZaiCF_o77kfGE zQ@;KIU&aAK3c3|tDwjltbkn5jW$9TxuD*BJU5=sKTOJ>7a8+I+Fs}1si5q*wsBYW& z(TK>7eTLR}(CZ$TUFKZ~wFSymQGF(L&6KDax=Y#{a8|WZIQ?n`v_*@zdcx z=XiuGXOoqjvf|Gr5x6Ye@x!Ot=Y!Zi;FuDY7{GT|bz*`~$RDZa*q#kpy3oLc4B6Jx zv#>Zt2VwR}jOS6iJ(k|=%DwB%30(X6WI@FrR=u#RHm8b`vMjQg3jR^)k%<}%>ex?@ ztaL%?^<{(&ROd1NTv?umY50pD$b8*SoI*^c^^DE2XPj}?<+Dn(V%><5(5c4~MqK(? zVhJ%j5es(mRdKIv81d4JmkRDE{alqQU$=I&ZXA@3&Gq;ok7oQFjN*6!;x)(O3f-*| z)nW-RzJORh?lgE7bA@4-y^X~&0-g)0D>B0AR8IH6s>8wAQ7m{H_56WmurlMPTBTsB zg>{ajX0py1S;5#Fa-HoREDzgtl^g?F^3>!NF1lNH>4puzzASAptoR~<2so$9p#n})j^pwNmln&A2XA4p$__giE8Cuc+GU3mXPhE03DFM`UKl*s z8^1F`MJJHzYQNaYIH*QTF%TIa1UR1&QB{DOrUl=3sYVe)O~Pw#L_!s>^ym49*=ru3 zmJA^^?fc5?&Bhn8vb$aPBFjyeU)ij!W+~;sRY)uQoJ0qC#2YDZ(=72p93F%YC}Woz zV-z2#4`E@&XQc>4*Pp<*%VRj{ZuAx_xGk*Q{~faJgcFRDOv3No)}Z7+5;?M+;>fQj zdN(<^Olf(B+Kd)^H^rSBqf>S?WKey0+zHV-gS3Uja5D7d>vq{oPF#z$r+<9KOj%5l zmzgopX~;~~-2HOLA&Kc*8K>>%Y@h|eI@9glL|M#wHdhYC9T@g` zjA0%j=>|Vnd5HaIl@n9d|9lx0Mb(j=KM`ga3iujJvAX_5;*iyD!RSJ-8?2ABsk1zb zU33N6vRhRBrKP-2=wt<^7YkVB3iD{~>k7DiV`8#jsdQGXj~{c8S9nUf?eW>2R1>J) zg14-~2zVeii-Z+dj_o-VWtbnPj*IgbMBTAS6L9J9lw{C`y1*HCHut9_XDe1+^PhLe zxfZ?|HW;%xtM;>_X0~+^C_am5lQZ=0itS`-XV4|u7@)PA*72Vt*#BqXN>JckcjLHb z9cXd~kMMKC+v)SJuY27kC(3~Vt?C@8Ga^UtZ%mKHx97J%VuaNOaPHpx<9^6rjsFb) ztQu+`mybMf@L858-vuY`j_+-lDdw?#+eB{FFBY@v((#yJGxkhk^EE^BUlhe{#s6cN zh=)h5mXEx3OTZ8fTl)KNc2`3RJ3IPlrwSWG9uPZL3<*gMf6e$@`-U+)dEi8nmN~bA zEQe;g6?-F*T2_@G509h4)t-JVzG}+x1%Aw7Dvw|xMlzo$ap^F)mO4NOdU0@EncE|w z>=-%RsbB%qp09Li;(I3YCn8X9}nM(+(8vII-;>D9NU z&!XJS@}Cz~=(Kr$RIu?Pem`Pex!1|n=@YizYLYWZUR!mtUb=XRUbHRwKrr4du75g` zHYrO~K-%`PEF>1i)#T4XHmBhDiXpJw;^7XnA^L%RXxJ8AKrh>{2A3E+ z{o=h>jeeoQ#F_r^H3s;@>ee91D4ks5lTVeEE-K``3aKSiMT3g16=!Lz-;s&81liMS zc7w2Nq$puVMR*)ahrE^jw_?0=I*1+=u##JVOj@fZH`rnaqX+`Qo0wt*7-HM4Xa|v z$Y4SyIo%7w!;-2}N}|KU11*0%MDeqKV*rmFi;x{~Fr&Q{cX>eg z@)P0jtey3*6$s=wzi6xZ3uz{ldw)3cctBM8jZhuHfzoH9e(O7*2<6WZ@n?5%tlZii z5p7OuErW`Z!h*-A)hGm&y0zV!MYn|keOXKL>)ANb2(s*}hIgn3LeD6kT(?YFY8=c- zX~<&BVWF{_=5ny2lO=P|n6B>Rd>#0*E|Y9=Hb+G`Wz3~^E`Yjz`NXl9Np?dG^aDip zNPmnh1P3zIEPI0Y*%t-xNW6Tm;yM#ns_l9#=*D}k@lA=unKRcayodh9!=9hlbjEh)EjMkR9_uem2KJbppxIL(V1a; ztM9P|JlDb==vr%(2RD%1S2HX{rl-Sej_4c$Q!{sUWSIAeqMbI9OorMsB{qvaZYt*3 zsQcM-HJ>EmR=(LWo+-~3SlO3R?1t>WnJ^esjc<1)t4Ff;+DWj`(M|-Ioj>8SAckc% z6zZ%!Lj|1DGQ)aa$=7>Tj=2rU0^z-9(HkZ6{71E~=x-jD{F5U$rH*(yz#6j>Z|@y( zD##}hy!9$u{3V@UxU$7}`gL40@0&->s`$?tO!)7H=f^i;KJsqFOLmdOXZB$vaU9TF z(8%U&<3+@TYndBDVc}M3v7lX~`I@_rO&XpHD392r9GZVF%4$ z4d$l;_=5Ux2~Ou^$)Z&94-x?n<=2>lYPIg?;O-ehsaT$BxR@Vixme9)kC-GxSkvK; zg;O>}R6362#Y_?gl-UI0udPIV4PT>ycSb-EL2t_ZJWSu;*bebpD3N4TU?~z()nKYp z-M^;fX2Xco;F)%8(K2{mai+nzJ)~}+^y#(J=g2@!?(xTp&3_d5aYE`Hl z=DMHok+2H(_3EK?z1I@V@O7x8H^l~O4?etnyP#q?^+CGoQrW$Ab13Thy-Mr8`Xv=f zXQnKzL03*R;?^DhB#hl?YW(B!eR`2|1!19fs-CqZ`2HT*O3(ZQW#|f-joZtj)j;C! zTEpsW+pZPH!O|Uq*>`qxkmPrIinCP4Z>EIq4^)&+4q3M7P-j{5w%Ev?F-WGrWOH~@ zZSL{bC8jk?MxfS#s(Tt5szho-#1X~uys4rky^0jQf(nRx z3U2Hn>cSxq3Z}()p~mrDvk0UnqaLCU(TVdgJc4RlElGkSXJTh|?AyaJycaGr$_{uJ z37MY7C1N}LkV@)f8?up$bE_`5`O#O@-TLY35*8GXQK@h?=rc)d+B9=jkxHYcV3$wT z6(3sahxA1{&xe;xpI*b^-Z#5Cu85V~HE>m0FYW7n;{Xfn;oJD?$jJ$yryOKCC@O1m z>{L=RNlGQ$GP$VaWo1}YW0y&Gk(Hu_{y!}RGt8% z;Xo6$vXHdx?QVX97gzWBm1OoO`n8;U4R_r)ZadQ26+L*KUx}Qb?DnMIX3TQR(MlW2 za`DgV-7ubJ7Je}}0`*ubIGQBrUL4v}H(zta;y=!Cr|v<+o71!p@;w2{@7)i5 zzpSVlraN;>SJXL%99c0dG;3!;quvV}Y0QR|xX6(#=IMNeW$7J#&vkPZdDt@Dtfkf9 z6J#)27G!ABCX8YOA=j1>8C`Gap=#g2r($+(^T$s+ZPa8Tvu79T*G|#KsH!B*0?I+ zM0?+3wT&(}Vkb#gTxd{_ACER>dQe7=?OmZrg3v51dIufii=W@wEm zKbA}IkDQq`C$)zR1coQpjhdGlLxNk9zh0{r`qocmbJMx!b5O&3JN?}UcZ~VVTzVCT4*_%!-G5ZOo?Mm+AN$I1k5q7|8n@>9 z`NbNHWl;|T51c)5G#*)vlDP*CRt*W7%f-wY7ga{>UyqmN7&ZaE`jl(9upCe~ei~dx zanSztcmD_82k|w0Z!jmk!T_p&Q~Hf82-WcX^>^l_8eM|J=57v6ts?DQjqh#o@eWsJ zD){hYm+59?&(`Qkwew5T2^jpNGVKXpZf;7~W>2D8f@^1k@e(i(QK zzIIFT?bMVH%qRHh8=;5uCXaWBfpVf+T$$>-y7JF5)L|wgRh}#Dk8Sgi3H_E+4BfsB z@`9e(d^WjxaRb%F@pYy&sVU4AO`>|$w(v=-@E*t%8%x~(D(1Z`O(wonmO#FO28OHp(!_Gx2KNZr<-%& z%GJxOef9c^OZg%2m&}8Gm`YV&dzLR_#UXAEzq!8zSND>|ILS?vG1xZA3D zOrsO!p)0qppTig4_|Vs2M;Rr&XhpIr-mSlHokGxLR1>bc3efoU*r=0rhfeT7Q`>nN zM2|JrvFuD{f}fyMeVPVGW0S4IeSyx?9P!tEQo*LJz5|h>bt8hq2%p`#4-%M_wKw${ zAbp#*3*JGB4#k%ZL#Yuy&v2#B2AkC-Epa^XjB5=;KgH zS(wo}qA#BZ5Xd$5C_MGf?rO(Z{1I5T94$(%WvMXrl-@GlylTK%PE99OIeZ3JE;P7B zLrUv}hotg~`e;*IYoSVgYwSTXTq!u-FsLZP2#@!yGV=)?su_bd2&oY)_HE-**Svzu zm#S)}l3=RCRN_bBI1h|BiZwrpsXTk{1iuX3bSNj{wa>Q;wF#gp*tN|I9I5Kal(iWu z5bZR!^ilJlTr;t~fp#12^(dnyeiZP^uk~C7{_T_dRq=>N7s3u&zUuDF>gitFm5rkc z!{(oxa77#iw#-I*e)pn!*t6gFruy@RlO5GB9;eBn4R{J=7pEy+NeE$^eI&dyKT5Hw z;oU{IU%4?!j~7^{`*|F%aRT?0MRrN}3`Pe@7bSL3Gcqi1kikhou9FQ#D_LD}37E*! zRPw5cR||jnFyKee*f?iHN5JLGW9!c)4BhPu-?to!#YGYpuxMOuR0Af%v&ku#Nj2)b z*uT7bWZi+sT=S#yn~i0VpV)*tm4%o`v3&*0x>6l*%@IiMfy`pixsSYMK7aVfNm! z*$EOq;8~5CBUZss_STabOrMH)o~)s&lCh;S)peq9<4NZGJH|{V$Z`fMu%Ql2!pTLc0oQYW1j$3nirW`vPAGigPhfn_vD~cwPHq42oJZkc*-B~XwS<@ z(lRJ5l9|ejRQLK75s#BSla_ok`hkV=o(tyVj7xbsqA6j64uKa={2xHbqC~&Bjkyv& zY2+9brC^my|J=iVLw#7>CJHL6W$QBGc-U6Y&fd6n!^^#Whl}com&>3p0hUeTy6JP_ zMlAlB@}(sFXS7f#-$SI%3r)GGZ2Ds+FFPWqHOrd3DT$5S+LMDLt|Kz)3(-*C#yBQD zM4K!d4$duU)>K~(?|noZl6p6&w2ZmJldTM82NzhtR7%+|&d(tf9FpEheVXp5NaHN@ zuzi(}$mYQcX<83e;>suhj2=?MB^D>^RU<0J8^wbH&4b3Jb8q}m9$A%9?W!(JzTzp} zkhu1Lpo7QeuepBnbGU})7>WI|D(i=wkAyLNfue~%JiUqemu{vB};D|Y)A-TGJ zpJbSMFCbsbAhDpUy@NS-<~ueaE;bbUMRZPXokGUfDNeEN`g8Z59a#~5T`Ko=1c5!` z)K6uZTjOSbc9*XwYq%~RW*t`4v18rW`xXB0il-VDloL&Ab8{oC5!HLrQ(n_o@F{5d zYU9_othD4`-%xVcX=EUn=P;6sTB(2;W#x1{7&kX5DSBkStet*h%SdXX+YNO?t-sLR zoGD%&)@aK=IjMuE1v!1zvM3&^e?^GBfae+@!EN86CYNMDyQoDrCYvAOLQhd~+BlV5 z_x0+G{dpLGs&3b(QNjAMPC%orf6(`a?>F2a-I6?`lF02?sy=KEWXL!{{g|E&@BBOh83#Ghzx)C!_XER#Nq23jSD3G*T`Kz1)ituUh zci$~%-voY6%GyzX!zV1Qh*y}FvsLW*AdD_R+%4!)Xz1{wHLVq!t{pAoF4eBrXB2Ar zJyiOvNF?J-FMv>H`SI(QWYjDzm)@Mc{Je1Ncs)l;Z%*~7@H?EWNG|)@y~3upwsy(0 z`4;+)NU9{`4vM!XFUvjcc>5_7sy#^Qc><28$8TLu&&P?OuP9Dq;=RV2egCR0=E_<9Z zL}OU+`%D2E0#B)7B*)&Epu-Rc&G)5*X0q0 zJJ=6%Ld1-rAVVLuRuM2gbKV8euaY*WA{AU2z^$T}bv$^XbyX=pt0ZLU)|RbiB&O1Q3|7z|qX zbj%33dNcx`@gmU_&4{7$@Bj9xx)H(}{^NbdgK}Jyz;fef3nOjg<0t=a)5tf6rLkV@ zZkN;Y>2G=(%xvsN^G9c=@4EQkL>`?Up5*=xH(j4rDCHe%wlXuvj2Q*PFQ@bw*qFF6 zaoNulJ+n2tar^MU%}Rh^&1dr6wR?P@YErp4N=R(#W*0@-hUM7_cfPtp8Z)$||8@TT z`tM3n&#LbS^!1GQJB(Lw8quSIRIkohnewipWJSdlI!SwUaxJW#eCH#eEVHe^1qUAvJnY zx^1_inGB`Co^3LFf4WT@>|UVqeDtPKX*v>Hqq&J{Q13a(sP>_@id&84dI%wNf8^)RB9(fL$dM;RbGrfb&^z?TK8&bJ zQhq4|`h>%?3BjXYF$g{ULD?6acJl6mE=8e1?)NQ@1kHSN?A(2+?(ef+m}&DDIJ54u z(NEns@`#MkD4Ow1jV7dZQd4Bo`_#q{DaR>#q-6;1)LCHZoP{luiJnbJgbaO|s|>yC zHi%@qFF|wr&C<)IOlA6?Tg?k?z;pQOH7$6CN+`f9f{y3~$9%esm{>ge&bOfT6lXbVG|AMX=oHR;9n@v7Rs|9_Ea>vbfc_>qFroA=2V&Q z5quOTwU{3B#u1Eu+Uxwb3Uv~AF;g;irX?+>%H9^^dyg-y=7A4cbkTZglxsBRtEq1m zol)xB4^Y-w`I}NEoxQ3XWtZ%yNr;@KvnP7D7m=v^LKZdns zb?%$`J`gcl9Vg_}gmh3iSH0!O@9#?4yUpvZCsPz^@a?mtLGD;t0MD@YaSdTrgeRTa z&3=Zi_9;NNnCCsbd+Z$|QxsQ=cR*08uteWU@mBJmH|ir6rlxOVCAl!UCuHk7)JoK8 zneov49^=x!$Vqi4c2vA_J`Dt+STlhQO*GKT&hDEhT=Rv_m~LU;{vpMD;q-0pFh3)+ z_lJC^FPPK)Ikm+fkfPOFHD`8}RQV9GQ4=+0f*1~aQ>88~h)cj09IB@=T^f*6o3YpW zxUa^a{o-h%;+8Lt!Mo<&T;1?RW{$fd(HG3MP*3Is;N=@ml3tD9QZfv^nS zA}WltANX`#Fv+5KMKQ*#L$P50GIhl&Z@N=4RgPpKLpMN9fqeeC`O`SwXV~!9nJcGv zDwF>Uw@N#%uUEP0d&QOWnsew) zFMB}3@aT!N5mgwykH|~r!DU$`Y=W-D(t_mY^eiZ;gq)Ja^2m*rN~N@#63WsH2iwZ^ zCLQ5}gRAtEIh*fvq9sNoOg^0DDP^AXo*7PkGasas<&d^aSAunwmI(GX75va&V5RO~ zP+cp~6kUaI2@g>Uqjnt5g6MOU16G|+*JtXMrtO*NoXFbOWi>1VWzR#gWfr}T+g5r5 zvM>2P*u%Zm9fADZXRdmr#fX)bD(U;Ids(1nKVp3~OL4M3VQ%3UXo6U2fzh{Ep%y4m zopc!bcIB4A+@dN?Wl$kicjYA05dZ34Xy4_FLQA_&8sBf#d2X^Mipk#Enzm*eBj)@b zv4_0%1ywzj)t~JMSIJW~w5SIZ=_uvSQsowYoH@`MKj)*J`tF@Tu)+Dpb>jtFcdZ2u zS7v)Zr{(+HzDAdxAyw+*ch{5=w8^Cd6?WmRfH9(KG_bLX_vGE4ph&&ZqaxPQWS(!> zbM=!g+8PsA?#iH3T|oZh-Rlm6NnA-3g0xp8jDM$xu%%0~W`4T-&)X&to+tGNwvvlN zcUqBmgw1r#?xhJ6Li(b1ajEndjSFbK8ge4!kX1_bJQ>7_-iD_gd%QEEgTg$po=oFp zY($1^fT0>;pAbU*RXJx^)pd6v*@>Sjk5;wqbrL>48DHalosiL?A)D0!QJ!xLy&0r? zaU$xa%7F%oIxVlSdKG5Q_rqnWk|K;WV6dduZ4y7-?2jiqL+oFttLEMNLTvtXFe{@g zM3DTkDX+^V68#0!RVBixhecI>g-xS>BdyH)A_OL+K?3D4T@ER&XP-5-#lX+Qw z%DK}yo9<0q@u4CJAYvR`&4+x6&x27EY&Xls2Us4POcbj2^7P>|D5#g^~TfmpK+y|86&VG=N@l0j8ux(k|YPx z)67lncG}hLm_>?oE^dL0NcCD5vjEzb%O2#?=yG)Sv8;y2ZnDmCY1$l?=$R zysa29*;OwX|R4!;}i5MRC9DxM|A@=#7y($pTx^Y* zS5e610Y(g+y*xgr{E`kN&A8L@un%gMS+ z`uQ4$J;txo{Ib2ZvWdAI*{D?9)MVv+7cXvExTFd@$hH5Q%VoG`sL@J^ zZ@uw*TqgmEw`NcK(FQlSez03uKX-7w)VVMqU{?_OrPIKx>>B#`YV$>DTuWntns9 z$f}rzr&G>acxCDm%4K1``zF86Y1XxJ#BH&J00J@2Ohv<{36Tm0vyR?-TQ%IpR-iTK zGsj{4sHl-IAYxHPVOb=tvKn#GNI6H`w5BaE@_xvy6#St`L5(rigJa9}x-6yGMWw=F zfx^XlwqNb9&LvF4IdGa=k=wJ(By9nq596>$#ax?YO65Sa(V(trs=!JcmP*^Xx|SH$ zHfJ%|{HS4j%40-nj*>5lBn$@_p@dnVS)4N4FsmN*^NLJUy45!2B=o|MX%5#@xX>A@ zQ_U(Iw_8)OKFgdeWU9pJ=h;+j%O-PwZ|mxuIxR}FU0ZqM z#}Hv|M#6maSmNSk8l|hI$!bNO$)EVUYWq!j_0ZK8*K!wkgD!wI3qTXeOaB=l0I*bW ztBLe@b3#pZkM^PhU+&(QEp2E_T#mX5P`s#1JUd{#okwk2sBI#-Nxa#X4m=fRDFkRkKkLfB0@o-(e>AkV9uZqTk3WZO#8x@^oo(y=+y@9D`S|*BS zO}I27v27IylE3Yz=xcpYqT2YTI<6(Zsj{6nY;Yr2!`W;tfWCdShx7TxOV+UQv9+!_ z_qyYf(tF9EZx0>*y%945PSja1`R)-l*D)I~qTJ5nM9PPL-G?E$s zs4wc<)DS+CM|Gm-CNKAssOlcR@5%Ie)Oi}-hDwzj>tA}X_G;)@iOdW0pB@vXj?{$2 z@(8i^hVzI_iVc4hMEiAs@lvRk@+mD=-E=FbD+=0DU;9&t*FAu38z8up9X676L@)aFo8|u%=$!9chf{i zN*j&F$5UTp_+Bm%TtM#LJ3763m~nAtf=eTiOjpL#Yz48sLRO>JqzOf{Ls_!6UrQCA z(R(VVC0f30!NLS>op|jgOn@oF(R5j*JpA5VCb+A1X`R=C^wzp|<{aNZ#pG+rj#`q@ zJXcHJgIa2BrGHIiv5EliZ^CH>XA+1X;sZ^Kka^u!P-Bc$k|3oyQ%+@lfa)AjQf5{p z9YB}rC`p+T(Bmp(duYV6$u&Kkxv1VPc0SttB@{2sB>4WnChtoT--_7Ug?}Buxv%m7 z8hWTiU|GPrv!wmP!>49S zWdBApmLh?VyL1{t9x4g(9r`m(@sxU}9%%E%8G{rDN{AoG2WTxPZcV0~ZHW4U0Qp;ow;XW~jHTw&TY0RoTnhF#eV7`69h- z>bHYWGeObBVELT|ekbc`R?VlNPjN22q}7+vFroXh0}qJUbSEz&nKU))$B z`kn2hP{79nzHKLMFwk8YL>E;y4Z#KNm7G_rfo?S*?~r_FENrW8{53MD%uc#c6m(OP2LYa9{$Z7X-a(s}c+n`hoTc_!8RT-!L;b432 z#~{(Lk}@3j_r~Y=bj^ANQ{z5;-Ixe03GIf65OqS{>}s%Gpp>Ape?~6I{4Q>$PEt;7+AH4nzxqw7vyIH znoubDjH`Oo8cA!K$Z#17{GI!}iGj1hT_Q9#3N#sk@78@&1dnRsGGJJA9+ zdIpGx7Bl`mP$w4;?dN!DXL{Ti#moOZ9Wb`C)G{68IzS2L z|9_)_zKZ3fs{ipud{y7bY65yNc zDi)*K=h?ZKI0ZCF|2uMMfZ!WSNU*sJNB{_=ssEYZ5lA_L#=lA}ruhn%QC&~8{`&%G z*$(hRqG6$AN`O7#%j!SuIlxL2L0ha;W`WPg6D9SJI|caBwYZ=A$sRR}=j739CI6S#d(F&ynWVknYKOxvCgmsym zxy(2J+0YbLjF_waZvD1ky!=9w1EibF&P1{EON=;H)6DJcWC%>>_5` zriv>8FV>FawYBsGSU&p%Ta?wH)n9T~a&`E8ds`)A_8I zY2-JT?jxPo__Ry+1%sP&aUTlX2uYmGKYT`>0%jP$-rD|HiZj`xNjRDd`g0Hsr4gv4 z632QHll8k;YCACA5n$Xf>;V&@1dosRNgoiFJE`ET{_9$^z9f0m%X*~qxtDS1FD`&x zPHygXv+Wwy4~B+J(5p2gNtKc15=R#_m=?vBnrZicVbQ>)yg64XhZyCZ&J{KM?$rUj zq*$3IfU@4~M`D)2-`|n)5J~rGW#BZR2>R%^n_l;!b6?Z-)9%SK+BLA|Jranz&nv;S;MFUZ zdwfSRc)>gPdO~2CI{`dH(mx45jO|N`07zQS0e)rRpro0xA3ohvrn7@l7 z)c1iV__O8VjgA=rmDgc#AD|ZlgW;Gx{{Hh|tiDA1Qh#f@*29<&@9*^lK}K<&>G zLmdUj-KOnH)_-Ybz-$bd1c1~q0D(>QTfr~X(o8KX1;b`wFdib8__}yQmezxT)Tp1V zz>P%TgVBv+2H6+D%GEq}k`D?)*?5PO4qxvLZ7$ZUqFgZkhJb{KgXty{8JiSLu@^m_LkecBAJmv?85iHv=x_s66kc z&cN89i3IcWiUkaac`jq&^S^9F4?bdXu03@q?~xL-UK^GT@Nu|21JV3fhR+GOSWfdw zf05~j6Y+N31(OI-u^w0N&p$yYDuojA8W60;hwiVzg#6XwRV>KX58U@yaAi!KJ*zUrO4RoNqSnYhBbi)q@<#-7(b zHoaI?sTNBy8D+5?X_9qmF!(8NoF{sz&4}B;ZOHyJ>`*y*^oL!>>9`7D${GD!tUGDG zUarK3kaW4hydS33&|#v}jFqEsl9|N_B2K72v`8 z?fC{s;eNPoznf3A|0Y<-w`oA9KK6JlDY5|l?o&+Fr=5|5Orr{8dA4-eIL&mF3b-cI zBcIe{fyyf0K^M3Fz{&0%hjn8@M2d1CPf~fO{pPShnJ#qfoe=>8&Qnt$z{h~EaM&kF zUqTdkfbjbt!4VQjkw@>5ymEi=+_18fRgtVsCXTVemCY&*&seAQ=`)4GQovao4_NCu zJ6a6$wLiW3o1+r3_&&V!^GYsbKs{4c0ZA|bI;PdnzlqX+zZUp#w3YGPXwD6K4@Onf zLXq4R)Wkop4DJgW7>q3nfWcxRK^F}BcR=F%dBAT%+2Ysm_)HVc&3UFuV|FM@o#OkY z*g{3AdC^IVHF6s14^??Iw)31XE{g3+00q+_wdA?jruBL&)5H}WW0@2Mow$U=9S{9M z-1KI4GsNh-Hs_rZ`_@X&V;UJ>(O#}N0vob19qI)+k6)YhX2FD`A zsoUBq`Hr^UOLAU&!tA*7?!&G1?1AI&Gv3q+0*1fJV(^D94=jI-SkJnfS5+Ja~*ZwYlOW_qKlw4#4b#x`>a^jr;=mlG3};5GOW4Gx_tmR!U4egPa(rg z6lugItI$d;WG>J*S-U@o&R{({<+A9qCHwpG+1qmOV6VJeemvp;vX!W#qB#(e zr$qJs&BO?t>=Iy8t9baB$N}gqvzAb=q?YOt3;VLEXH(c<38R2s3ayblG#^eX_y#6%EwI-0Ll*- z?)n=xi>)QXBI(%Z%sirD&T%~lWCfRbvY14rBE)MzS`5fhyA(J9Vvq-=Z+4Z-#?85l zn(<9yBJ+L$7WN>VXR^>JJKz8%xt9ro`VQ0YtyH4dw7>6YPc?!{aF)rw_RI=ewhlao zhn~vW`ty91c6|OJ*wLL{T8mmbE>8Q3km<6?v-RsJ1-eX9Zo+((7Hd1)s^Ajob1ueK zK(#mce16TMO$Wy&PkSs!SLH|FTAO7ms`+ux>uMPjC3mYwS1u@t*+%=E~uThG(MP zU`W09gMD$*vF{H2E4F{fyl?$}dLvj z5;tPrSn#Qlj$>(^&ie|9<&8tc4$xeCDs{4JUjpDKG7s0oLtydBD0=lpv_d`##Kw04 zX(zMiOKd28M*rDga@EDjdlT+^jgeXc7R^{dQp1Gj>!2$cW87O`TeB#L{2Cp9qpb?B}S+`W`9IS{g&v|a6M z!q!6aqM}5*p9aWE6q{O(e_+eU5-@34HAyRZX_rGmA=O6ET-nh|Z^JX7s@}TrZY^H$ zez`UO)ybDnj)_ZHz255CtwN`1fZI^VB8A;dykQn2WwUkdy z;&q98Kwr&}#Y2#R`7tK{JekKw50grYVUI(GU{LW4edIR$p|ocV?9lCMl@~<%R9?lz zWEbHIRC4xzAeFkp0u3J}S&<$BLf`XSvmU2hg|( zuFfGS0y3G)SjxJeM1YjXD3I^7?lLc-0-f6vKoGcm0Q9zx7znv+e~IgmyTAjn5aaB( zNd>bd10P!@^bsT~GarO;cSg#1Dtc|io8 zd4LEyAo>7s#+dk`5);u5eFW_KLC-4>blx?Ux~CJo8WY#UNH@@vYsAkR zaf^}FdkWJTsmT@-Ir9Ufw>Cq}lnWy()*B&)Yre|a^MfYA-al0gi_WqK6HOUfT1mToW1d7Tb^imm0crp^`M;^@)(0yXO zr}ZPr%nT8%t3tEd09T@9Zry&|%s_D0i)qSrUwXZcL%-t@n;zLmk{z&LRHdVUdKWb> zx;Q9&)j*(w+B~xe$ai$+mzv-u<&Fvu1CBbH-YZB^sxUv~0?|9oVV8+WcuNptCOd%A>b+fq@>Qx)%5n2Ar2%1c=ES8_gd~j4~dWp;y#7Jx(f66j(&jU~KtAOfh zg$4pve~CuA9T7U3myC!&nn_k=f^qQ8GYC+VFbu2N#D2|;Yde{HlTR-U*oZZN`5lgd zL`+WL8-XR|4#t`?0NiFdP9P85oCX9LWfxXrPslZ_dO z0##N3(li_bM+}3dLuG3m$mDHU3{?{~&ziFJvR;@GViubCe!1O_fyk>K z5JpEUn=<5bC!=)H{Z$Rl*f=scPizhHuL(s25b#37NQcVLTu%Z+QmzsUZEPoRNBPFWS8uOtWJ3;{sbw@zCh0q~mm{ zCGPW{Ubq^qBi4BH0DdCw4nRn&1JSG*D)3n1b@44m;QxAb=PeZ@ZL@zC#A|)oK+Bb1 z;GJfokOZ3z-Y@ZoZEhCmF(^)8pPQ-graWx>*^VkGw)1)UWIl{OmH{Pn!laxEum_iY zT~sN~GLm6Q)DC=-^OVZf_v=x4&i!rS*zc8!p6O%fBGmQ6dhukvCG8r{tZLvQ@givR zBR3E~+kB$g68QJ2O%ym$-w+12ZE_G|z%Km>7J6&~5b-Vb2%)mxzKi@a!Q0OmAu@=* zW+eDAXCKFIy`BmXMVmAprG#Gr3{HA?-tkzV&!Bnlx=uj+tw5|m^~lja6rZ*2vD-l} z71pZuJuph>=h?All;s5oVoY#A;$<9^6GNZN*Lnz|x5Gf9L+=+Oz>000UIVefILLNehMfYn zmq0r~$0p3P5uLEkjBOjTH9MdU5tEhV+{OchmzE$mF4foUt?O8iipP|$lx#`qN{VIf zcRF5`I9T#BSBz+J*{&c3WpNAF$PBsPq~h8~&ybrK9z%V9Hs>`sWn4g01(tXLpX>D7 zf=P>$V2Pujqwy!qtl|6g{L%zkRe84pM*`oy7x?Aefwp%9xVlDQ+W6Y{>f3-j3^dA{ zIsL_V1^56Zn6(=b!>N$Tk{$!%(IFewS1)5jfcVgebAMFp>+kCz94*{6L05AlU@`nS zYF@S3ahJffN1xI2o(F=7aMsAP$%K2ek>7XO9*S7h5?yf-eE=rD_&wc}PmPru6dTZf zCg&l0l;}7Ypbv=Z4~i&<)|)u2`i4TTL18Rl0UY)xh3e~~G?kUO4;S0o(`ut=YbR=e z_51}9e}7fp`f;pL1axYIby%~Autq=GB`@&J`Z0iOZLp#2v|HCsSnuI|1cfjjSNqBO zD?O90BUG=Ykz|1H=6(WU(U56m*X-@;cy%e31vS!e?hfWdG zpN{Vj*W_(`z$Prk`N0Vj>jafkBZu9-58JnEsP#%PKdz{7a}z!Re$EHUoB!kmy7RED z7LRe|AM&D&RbSpA)uy5|+;dH3e$TFGerIcl0T7S7Le)lo%;C6Zvu z2gAneIPJ%PDY4~qkP#%O%Lz_yK10~a2eDPMpl8}+d+$jL_>}W>weFzfkh*~=Ry_%U@rQF4WyVp!YbHY&8<>jqFy5vzkPU2}qUZSv98HHu)rW zO6XSLX*P6RJ94NUfj~3{F;UO?z&ouH9K-?|_b2w*eK{l~y(RZwwvj>3n$&CLTfF5` z_s%1;pubGcg5)i@Ms@@`?PW-}P9IQmUcqvPPR(0BAb(;xn%usx^^!hKlBS$25K{7b zc&b-kz)X}5x~w&yN4_JMg(ATQZtsR`)yr7g>Xq09y!>#iN=g23EBX4uV1E%butD;| z&B~3sL7iav{?J^yGvfO^kVhzaEW^2oRRs78#cXDqmW4^y-VZY&qs3lrgK=-t@=AC# z4OH$81{>h1qmh@p!Wp0KMCq5i(&$!H`Xs?%nzOv8KxjxQOrud3fk_aRLAV;E7nz?q z)`MURA)JWiT)Kmr>P{ukVoy*+i-h^yffdG^AS1vA0dxBAgd?woa;J1 z=NoqXh|w^}&J*IKETrz(`>mBKJ|-hdrLDUUW>>xJheDuFwtlc^LSMH#$uKUC}%eF9&j>M5rLv zu1(}|v5V1r2#AGgw~iHe1O;ad3((oaQ?JbmA$b|GAp%y> z?=VxJ?CG_SaE3M7c))QRF+D2U4AN?0{e9XIq5ZA!12J#SW#u+Y(`iRh0q>^u$w`lk zOXa+NS$mMt8eT8EmjI7G){Pmyy&G#In&@PN9tZ=`RARe!G-73q)%1+(>@QDHpYMl6 zPL_Zw^iC1(@O}-BQ*`q14|svWDQIu7>1DJQfxt{DDPP|fx4Q=4mz^mPxo!rP1Hq$a z8J1Ru^gJFoX$$3~up`J#3q%NXxPa&iD-r|v5-3DTF^4wChu|H2Uc7l_>%#!)Tx5+z zN48ytV8v%sypafsZGuvVcj&!wd_|a9Jl4Kum1Z@JM{#7?d}JASC#HxdwsbLiEBL*Uyfm^i#)3T-0J2iPz(6C zP4iTRMXRyg=D9YZ{!YOU%d{svA}v^r3f?OczAIQN8o{v~9qMo995RtElHJdY(j*|c zu+4OxD?4#r8dJ2gsMhH#AmKRXzIayB?iaJ^xV~oNgVFqnKFM$TxRb z+kYJ>;>q{Eq2J~U$ZnI2W%;7XgvCu?7rBqI=H2aSp=QH_7K?@A6%oznZ#=u;QD_ij zS)6pecVEi|>$%E;y1T~l{SkSW+Fxf^%P@lB$e6am^M{vOC4&mg zK+sK!QGvsasK8j1fP>t=G>-{5?MS>Qe$bVOK>L_wf`mc&C)gM8!`$(qemWoYrqO%h zMfmI$V---nGo^v&*~uvZrRa8^i=Bpm4aY4Xp~YQu;l{&|lZe#W&PVZTh=q`-GYh0m zljpcmM*EE1EPnP&%`m~~4jL3aAq%DMW_ou*a)Q*CNIu-!)xOIimqcl5zDM(LxPfo{ ztBZG|RLvQ8wnmGnNAq`g!gdIgfN(rDxljpus$1c$?k-T>8Ha=m=zL19BoY03_H;M8 ze1^?ede@IKGO8{kqX0{>TfmG|twU1uV`2?%Mg5`6!N~C>N_q-3P0;ec5N34Lv`$3w zdx5OYBetuOiRvSor+HkFFP2QPAk=|){o>aLo{;qpUDs4nwRvTJAb*VGPk+;RjZ0bD3-7c`i2u)n!YDwdECu(NN`>siJ2XyEHxF5{i@pwT7JN@fq+SQ`kI57 zs9o*$HIW2Y^D!Lbj{=~be2(!M_oLJW2M}!#dTcm7VbCSp@B9SpM}}0yja$J+YOGlR zp)B@;S9j6T;BJ1rq>#MRXi8pZ*f)PXX~7^d@`@8ci}zclT&tNyj)e5jM`4%mt*QIBRVUCZZASCQ)?tr4Zs zy1J5T@05|6A)&nB>{N}NA6B*CU3UVqS-9iw64xp~|Fm8kdCJEfxmgYiDCF+1 z6i`OyGLm9h`k%HvK&)0;edB*s&GG^b>W1d^_<`XzR_$Z1?mp*()}Hf^RQvC}qVGBp z)Z@|`CfnXK zk|jF6-xB z5glKfK*Oc+qaFtbQF3Gben`MqUGjj=k>@gPLzmEY z&j@4cN*jW3;WYD>l5j_l4VG0qJA1;EEIG`mMD%f2&;Ij+$5ce-ainS_<&{h^i#4*z zd`%l_9{dMFR$^L|Ce5j6V!=Z_t`=|0!r0LIalDeW<{jhmE0*HyF{og0Zs_!~)8$jc z8ZKsxPq5U5Uibw&9Sv)Xj1^)$v_jPkEKfsc4=`z84pK^UZAODn9Q-2dI~-@T4Xznw z(j-hq`MCw)Ll(B05JO9E$9o^UEl;#$_r63D$Yc)FX?kc2o?%s=x>|1|n@yBkX}xBg z@U10pKNsSKvD$T?X}pNlA`YCS{}p|B85Yf2h#OS>bEfERm!MYS&h77QAX8P)Pk#($ z4RV4cM>=e-l4{l;G5(~f0tnWisL7fd{n5i(SPUW9GKPPu(C@LeKjDyCS2zos?IbioM;Zh0TfjEV=3^a$4LzZ$OV(Ev_-ysly+%ZJk%2I=WaJ+rPELi3 zQK||#DpcnIMjPWZbssR30GOw(o8l3)lphJ;s!Qr0rA5vekm)jxc7Vd2>T7ym%lSimmv7X}LiTPtEBjDl3+a>c zSCrR)F3H$zJo0!LQg%~t-TkQ)M}aw{)^C@VnO5I1?Qg+hptgR~fR*ofodPiuW^X|n zgVb+(hr^oG-~EWHH;{d{8{z8iPL<7WY|nD`!${LkL4oW&Mo7r{Ds>Hrey;Rwc^@>V z`omB66h-pkUuhs4P>0EXoKCzvJCxkG@1x*hV2V-jO><#T^U}FkFR|PEy5gr!HO_1a zksH&&M*tLTYIpuz>LZjUe`a4!HSWp_>m9slvU!nj#K<-yY%`n#L;E0Lm!C))bF+43J0RgxTN3APE4G*RzP| zCsJWc_a_PwKv)wZNtkIqzAKdrH$Cki=SblDem15ac@|Pm5O=)QcHhsI9F0_%VS>1B z6J*(JSqavGgn^QeMX55rvbZwmho;XP`6yB(NqAHH2NZyAgm0qJ1*nuhd-8PYFz8mW z5`oOn+(Z3(>}l6+_C6tc`94Dz*5-5#$G$T?%gejSk3MqnEPZOf&0o~$Hym^u{imx7 zP&>Ko{Jy@a^*E4*0mToQywW$R9CxbC+#p-kVv0M}T)s25DUT%Nyt0g4o_QAvYuV@mko zG~+L6%(mcBdE8ji2DbC@%?d?18P?+mdwOoy55(*(Lu)}A>$_dqGb8D0w51FYgSrKtsk}^g`(%tgsIUv-0WINJNP2cj(C@Ll2H5-d^Ktj(^DM<@`{inZ9e=F+0d$*&5nuVbE@Woqh?vxD9k?_JigHWfpN}_#@mk!Yx&`i z9CYS)I8{T4r;h2?yh8gw`eUS?70bPRaQ;r=l9k88rBsN1nc<-otKKlMos4oiQy&Hg zN}_oD)l+u`4Ne!M%d}|FRvO&JrspVcC9eYHYi~8kdjiP%e826(lMfwpID&c%i&l(} z<%;uf$7OKixQ^UE9bTf!PVQ*@bc0Cli|5veTsE`{IA@un0KGW@J;%``BM^OxBh7s; zaQt+SCg^u@Qh>y@q0>A721jA`rw%5N2x12lRoU*ww@?yu`18PTo*&X`tA9Yx!t;?! zhy)mks)S6OwpM$oJ{cD;d=4yAKW%vrUc~C)>Zr1IkJFJL4 z?hUHP_oV7G6Y<;QpFH1fd1RucJ6ZMOwe!J(HWO<#Jg`^}{GMyGeaytM)1t z6xQ1_+}+nxUqTs$Of>-zSi33$hm@q36<$RA#^D8c@~G3I z_qqRx@3Q&fge9g0ts1AJOM&je{s>ZD4CfT_cY@}-dZMS0>9J%z{Ko=2y4v zr)%8IVv%VN`*%Z;{R*r0-njrR+^WvG&*f!}O@}|$cS_x6yzXsiGU~Lofl8+dubL35 zTy)~-=wy!qjM^+;*KwA!7kl;*X&6#R!eU*__g%{)5k<`d+hu)64vkDT8%@WibA|%} z+m@5|7a#6Y3Y#Czu8slB*Sy~cfr$MdTKj7S?vrCxGUKi;fF=F1GiMkirn3eFb%9r< zz*#7&w;bu}zW)J<#&A(gL=yPITdkT0DCS_Q>o~<$(*=`ngxX1-VbQxJj1Xg5{GELI z4o=YTm0!=&bv7DS$I#+-gQ(0hK+%qFI$w)ZT~9U5M6sgJI18kknFtH3aZhBhL7(Wx zWyYcjIPTFUTr^E9niCenbt~{ExP)4KLIad!f;J7tWTPq>58Wn&$~Lmn~KlSs^>_-CQlilDvOC2OK(Wy6Zc%Sv2X zTkHHB-2y3Kkor#i|`_qna-dL&(T;LhwFbgQ|>fZ5cqn&Q5H&K2g;UMBKwxvZbZSH$4_(fv>OD=oqL*N7~xRd5cF%l5x1}R zmFt7V`Rk^I)}z~dGndYT@`@E3OF%z}31GApt+)%WRCr$8rKQa;{!GZZ?N`xH z_n*f9HdM55RH6`U9Fxb<^L}j%CSLJPP;(Q+(8BqaS^$NLyM=|IJTw%4Pb)>C;SJVo zVCt@8;nw%|=K*PGkI?85#e0apgf?n7qTxblBh2`d9J*zJ6OyDSX$Z`d=Z|-Cet4E7Hi2`izbF9xc%6otSCk_)|7(njul) zH*<{`Dk&=D3UruZjI$_n@9BRn*Tp!$l2szrCMm}>DN3*kiGBIt)#S1BVVIUbhtNg< z?S?GN;<8rabS8o6w->wk^-=a`@Jd9Nta)NCWuWXTYy*TJ#y}{(?M1Q}`+ZH) zRNi-!Ci&J?)+dJM5eYpVxRL=y`hz5fXNYca^iDhlcsN}KV;)X*m!BUchEqv@*FxH= zSSBJEOOLCZK?;nne4yp&XZL71)_SiR$0-UFV%~Ep-jo1ZDiWr=ib5p5Q&C?6xon6>lG9#kMdngak0177UOLLLNiZ-DstE-YZ>)JBJQ^krk8yTYZRqV5%nCqIo; zTzB`Lj!~pp)ax7>L?7~>bd)4NQP63erx-vW;}0jSBeIZaiveOuxj0$q*GPw+j)IQJ z{C5jA!}35~#U5w9BVC5S4v5dTSIW-$xPZdY7%mfXKM|AeO9go|2TsIDFU5Vq)!UdnD(HE{_b zkqn8w54_KGS=E88ri&KsHAfQA*mg(}|k zbId|(R?A1tw#FiVzkP}34-8h60X8pByU;!apsWNl+(E95#^cW8deGe2%oKJtW{Uh) z6W#EESKd~O8K;5R@}daZ)OlCV?u>&|i79X>^1LsgtuiM0>HQFSc27DjS5|2Kw`n8m zVMUsm4ag!Qf9LqJMH0tCfl69SiC>6y_nm3btPj9$(~prJAul%(eC}mMzD!rU+VR&t zaeG!uLB^02-t`Mo!>W{~dw?+3Uam~K)T9a-$KZv#>aeTiz87X5nSgDpe(f0HB z!1w#`aW8ZKDT^CdOV{fZ)BUlrWId-yGq2WMBK3Q<8|8VFOcDtRy`uP+y>h1+GZZs^ z1WQVqhDPw5nb;Jom0)>|A=+lR`0&@^7vFvOSc#zcx$8iLdimz>4#$Ah&t${2B|+Pi zzlyn@hyww)rb@ejN|9t)PR5_V7Dfst=I8tLyqDGMOAZJ}<+WZ@L3{HO#Qkv#sdRtu zQ!~{}H_g>3P!X}$ljpwqYcDkx;0UDar(s6GG8`VOls0L+!S2Q;f01Ste8Bz)cm4NYYtdPXBIH{{F z52#2s9W>#ncG#{D<(KD2t|7j(%?}N?2EA{8f%hOrz~uPHD)!7EfMK2RY|?P6*56rj zC6O1T0t|^Ym(Ds?Cs{S)EdiS*>FEeGvpYb!OHGP^$I;SCopuoF8}ZUEVnMf^%qwPP zjVg;qzc28gRb)Z*g2roJg@bMnX<3nTqGi6rnSf+Se#Kn_U-(< z_X}8)2>5#{nJVyTpz@u(gZdZopb*c2zHV2!n#l~hqplB2Rxd;(Y&S24mFU33|ML0? z1Wx5%=Mbq$e5AQK`_Lz)yg-ByhyjkSKvlK^kJs7u!+^}-@7h`RJ+CL3G4%D*Y#_Ip(-?|OkLt2ihtQnT$9(Qn2xkTPQlH%HeHP#^&3uf(eV z6dAoXp95`Z2>?OR1WsX;^T+3DJvSR{D-LgxUlLUTqWq3F+5)w{ zE4tE=dQ-5(l&cciNuXP@ru+m%N1ljSNR7gZPL(*jh9Wmd7g}04qyvB*Qv(mEpX!72 zJ@qaG(!ZORkwbBFzvX1%LA9u=0@R2%jvGATZcBlNZwKgsnq3{0)eaWnC+?|Da_i*| z&_Iv)E7nuHatT?f>7Ry~pr>*!z#DF9NOo? z0!c9S746)we;=xU{?H=|zh`Sk&Y%ZEHZ(YJiRAfzZ$6qA?8Uos_s0;r*^c>2KwyO7KAREDMZ70u;zG4aQ=nF7=?etl7oEXccFN2E=}kbw4Xj{PgOkNgV;)RtfUsO(^hy9z;fX1~giqlrDjy68&Fa3qc-g z&emwFZqHh6j1*?XR=y0J?-7X`hw0a^0pV|nh)=_p3T`Z36G?Lm zaN6QSZ#H{}t8-Ca==X)Y%$M#5-(aA|H!RsH@#2-vk%KL4-?91eVh{;yd{W5Gb}bpS)P4_EE$6Q>OqSLv#=El;Tj^20k zu2Y)Zh=)A6T{oaHX&G>SrQPA=-To}6w5L{}48!HN$F&K7NR-BnaW`STiP@U;IDB&o zeEyAFc%$6uD5vl^zHl?g#tZ>qdm=b20Ve6@)vw)2drQu8FF{c&O84l_#^=P%mn&;- z#e^%?yv0dwB*VT4K9aU5o`!EA6Y&)N=NDES36dyt~+HN;_Kd zcCu7x%^xkj66HM?AUc$#eUoN_kCy?N4uSh#G10Wj*cv2vVm|-5%PAj_ypyEOzRG~f zK8(?-?;rb!LXrH(R*De>UzmVy@)2y31}cT$rsPm^9G652g>w&H{y1}7!O?JrIY zW)`uBTemm!EmT9JSH^Hu+GrvBb_XC`22uXn)e1ycQ2Az6JYlarm7W`cZLE&{^&PG+ z9>Db-V{I3Q9N`}U`Rvki5e^JxJ$OqpWGTZMPr@yeZhP=On_AJsh2VRfO2`#p!o!cS zGj4o69=uUnZvBG@uT1qnR;|Yi0TNz}>m~{mRd>>rbm16sg|mIvkM>iw6BpkAU^p|s zx+q|84(2Kr(s&EI=?8MWRs;&;MJs%oKSTAiNng1Y-!-)3NKbN0dDtnQFgKjN6QFDM zyQz_m{t{EYYEMWBpY~=sp%o=Za|AhECxAj3Qa5Qof=cIlR5A&Mxon^bhJWSwlbFjk z8%amb7i80MFKMgoH)kJ3Wo&kd-KxWW)jpb6)U~Ei)nN_YpyET2Kr%9SFbDz9Dh>~P zvJ+FP9c?wP7;_0Dc&?D2YY{)dbIC|QO%hErLHALB7C40qtaa{uy4qhWfQ}He(MHTC zLFmX>31qSr^16Njm0>WAfO%RU15;n5G#Pq~DOb|_1c`*t68*JP`0d?)MImjGa_>BoPs@DzXq^l(V+H*{u1WH-Y#3&90Ov1 z`_jjll$A?}iXz~I+sc5&;9jVXG&Z2>1J;z_n4QTN^F z9hG?aiw4fh0gMLQlcfQfPQACr&WuhrQv5dUNzP?uuiZw%6!#lY`As3S1bTbNF)VPK zP*DJyJczY!3yo*G^Wp?*-@cJ%jdVjomk~smhCqP;2E?uplFrgumK?2~D4x`mK|CJy zNy#V`{^LQ%=Gvn%lO<>lqxwMEFx96c&ID#OW?dtCu~D2c-H5=>Ckaq?hLVi(i)KfB zD%~!xdC%p+ZI&mbY>>iGXZHS@2Hp<@JS@E>am4cKA3mx8Xwyw;=8)9Wz(nUY;~g+%4juhXm$uvsKj$WXMQpQsuw0d;{1^DKKrIn{7^$S z^d5r12>QUy1VJy?W#vYg*iDLm{fH^d26k_#JJ$)VKX#QUN6YPq8C1| zInk!|CM$17p0)dc!p1;gR{TmNjNe=S+F^ycn@1-|7q)-vq`7GFBnmW z2}yO-*DJL#ce1>}Y08X(YXQEwzb6WsA|X++k)hgI1EdeqX_G+YTNlV<3(&qlr_Kz! z1n}yeLT$hHVv&BxWh~=Vl5g2N?`2UQA9qCZ;WBl8ml|=sH5$w( z8vY6q9;RamkdyW?5v}&jN4?Gy;hyn&wSnq+qN}$HD;Hy_dRb*FpQ-{o@dg9f&BfKR zC-i0bzaiT_KM{8L8g56-PIsXVA>z*PtA+c@y$(T-=ij=wSs|;ZddJ=Rf)| zEG9v~IOPh=tX>h>1q8gi8j-UEtB3jlPIkxnfZq<@_dYLAz=;;z5gAJiU^ z(*nBcJ?;7^X4s(!uDb3u&2{#d(C^F79D>LF5GAjtfj+J2OSotv)ymoZLN_z%2R{(2 z9JLm&09-#U5g9OxEALl| ze;Wz)s9V#u0DhfBnAZLbNNUzrD;s4*ZZNEYlU(sD15Bhf{XeSCIx6a}d;5wgpfpm_ zARq$LosuFYB3;rT-3`(W(hY(rN_P%Dl%#-k4Glwg^Pcg(y`SH^)-3*V%^JUP&feGc z+1EL)EY7l`-Tr#LI(w2TtlEp;77V}sW97vuJ)h<1U)8Rm1jzl@LpC8} zJ7Z6HwvRYXnddP-w0FIsIttJ&xM4GgXWzkfH^%XB0xt#6F3I<;jK5g`;3(dl5YQBj zJZwWDwH2kFofj1~Mq)(fGaZd_O~2f($-=kX?`!MbKbf|1(dYvkYUYpi*R5_2%cL4L zJfuxgd2B%wLZBIHAiqRm`$%<$s@=qvt<)D18Wy5kVCS{pjUo9)joi+|P3Gf}gr|M_u3`UlKIVr7y0NxX04_7bODM&Nid_9@HRVX4-+%!|MZh?B45aHc#&)4L3#Q4*CS$JVex5T(hzC^u*U{etBHhg09oxq|~fXx|=+w;#AiU9Mj ziZtKwpZWp(rAEGjouX1Gd1K3bt zcxfZh-XH5!y+-WNlcSZ!lnt<*kgM>M1!Z4P>NzMHR}SN1?~H4 z1Ccd#Hm*+8@!!yjmuYui=u`az&rx7@xvaq0WNJg+*h=zBL;qK41d=zxx&P> z=Q+i(G478edaF(++F}+zCYG1h0JXt|aYs;*3UmQp*ajg%NZwbYV{Z@VTWuswOgJ_T zKB&04dP6skrOVdpl3x4KX+nJ5qP>-=ArA1yo@6LEFm zlA3Ecpik#Tg6bQbELzpfa^sv9y_LMC^7fw7lvJDhd{_f8gz1TQ-YJ^@H9@cFKb>Td z3gnFKZm>c=3eejb0TUmGTad*M3#*E)0jM#r1tHjGVs{@57J`@cpJv2adq7w%=vE3+w-}=bF^>l>4#iWzv}ZO3NlQIPz&>KsJcvV- z$OkNA7%bFtu1gps^J6OxvL}Jp)7uui<&zg?x~EThSAUm0X~e3m!KylT4J0&UWLp7O zbNw;$9yaA25h+2Mpll2ZVMu;;U2qYr1G6rD-!r%e zL2+aN&Gs0(7kd-5Fz{5ADr*r9k}!eoDEJHzLy9bFp7@G9Qy=NSJ2T@I&ef zO@?JZc>)tYo82Zcuw0J`ebukm7vd96+3mC!Xe(kvyW5W_$--(}OVupgJe_%6$g?PP zAD?yR`pM;XIAKK&4uF~jk50=p*>0&b1z%M?+eps?H?=H!cusIfCO%EA`NOiXXan5p z;J*fm3N-+Up>^*XCDlGb4o_!#^Sm|fd=EzHNy(^&wT<_$lvDirPBaIyTEtnp>sSCm zXEA#7W&4%+v_2oGV(Rg2JoWLFLMMu#bdas`-q&GbqI;3BWke2BmeDxQNOBCB9cl~) zOZVPs4?;fNJolio^kD|aSlIC5)nUb3EF!B76VEOy4z%y`iB$UgYylD18Zna5zPFo< zgMveaJg64bsfK%G*q5AKpegS8u8wZ)!`E;|gOi^1M| zLHoO)7&iffTa}B9cIG%sA=euuOhBccGQiE+owjZ$s_lQ6<;&TMzbgYB9omz_VrEJj z{0*`P2~A`z@s{}HZE0y(bbk_4yK5njLxcrJ+2Dg%ZMD0`eU10%lad#Ho=ZoOrgST& za?m~Mn*tY@|AEGQ+`*<=!1qKIyP z`oFOPSL8jhLrG%i4gZ3QBB*E!MwwosVF=7oW! zfeanO)+3%$s6&rb#M3j3E4@InRkyYj@T^mmlbaW#iXKGPO*k&!b7*rz%eq6M-icoF zrp>D#@DZW2woaWPta?o1m4bUM_hxruX<>o}h^bUm#%nL=duKVejvKtUur7Oli2mP)@-9vYUNUH8WrJuq zEJ;h+huU~kvb(XDQm!%s9=XWMR zoh|RNC_^}L@K8}c<{6p=kclpLgmYLjx*2+v46{1kl7fLmKMZqNULcG!1aFgMU%uDP z=)$c+y{mxj@p|ZrMh|wd8w{4VD;bqnVB;DeJU4DFqE8|At7}`lmNp{EM&E1S;Pk#O z;&Q?seFi{Zl~LO>VffKk^=8yTypuepU3?86@3ynCnHqzjX9H_1V4Dlst~Sr^GWz$5 zKC#dxIL(DzsC+r~B^rc`Jgr}qS6>0r1LxDbNK3)wchNU!N_WIQa*InIlYv;@P#aOJ&{ceZgJfAKOE*RJ?ymS(;wSR8zEP!2VExyH#EToc^2D)q zNXQ4gn$HAto<0>@{JJKf11-#gZL{qsWN!n$!JO?VkdgI-BnGY>DFh^G6VUi^coWN| za)G2HC@gz|(86c&V+0R&rI@0@g$+}spU;OjUhLBep4$p??;C@h=axWzBog3D&{L=- z4=c`o*57!C?Fh^VKEx_=pN$3pq())b>Mn9X8f_L0d_}nd@M~K0*Z+u4;4drP$sQ)5Aod72KS|fQY_b;&XUs~-y@U*l z#jshSzCa=`Gd2v8Ha@c-k8UL%;2aDP8$k7y2tjIlR#?{ggsu;M@Y^Egepe}mMpY7= z@ClNdD~1kv5I-JLn+DSfJ<{ACf{xCxeH3QbO%+UJaM%S^laRUyWXY%gG+g7^EX)Iy|X$3V@{U^ya^*5zENBECdFC!Ysn z=ko6`Y#akLe@c)_CW}ujpH|s;28q$5Apb(cqkCM@k$E3mJmqslDN1^mk4Bqs`YCAF zI9+*`sBS@k4Sug7Eon~>4|Ci%922U?ZEiRcdYS+;-iE$sI-j{TUOk@7E(TAuSZm}E z>5VKC-Oi$<${FPT7iC4E-$D>M|~drl=zDF-3NH3QyLa{0+2``oeZaWsv(yx7<5L9r~EZX zMPo%%7u*5To(B3uIt~x5>}P>ppKU_!qUw3``c6P)^v&*oN!FO`zHFClMvA-WcROwe zd;ldD#>P$j#2eUwbz7>Qi!MDNNwB0d4}pwGSeq+ya`T-as%L_my#sl1z7Qa_@n zuj%H(Vx&N=WRc31WQidh_EP?;e}ZWfeNCEGT*83B)?1n1=3U|u^980D$+fSA2hg=2 z5BlG!8sLYk`)24*963nTB4j%VP^mgkTbxnYrZ4wk?ebfaUeDI42!gB}R+g)b4A|7| zbxlDGWp&nlDF;|!pSM&}bxx{d-$Pr>EATW6t&VkyG)yp;=9wKv zs0Yay%Al*5-+M&2di_W!vyKP*VT6b!P2wt zB%|+u@EI2GsP1sqEMv<>6Zn_XpG%qXpgIW>UN~)bFLV*8e}BvyGw{cZjg-2h7`unT z>$EKG$jdZTBB0AvMkI&6|NZF80&pyuaNKn!p&uR@%GP;l{skk|o_?aM#l^#6`<4zf zUc-`<>N8+x7F64BX^X?K>m|mz4Kd9&rC*fhojC&o$~Ie?NaI z)gXivGLt7~nohARD`=g<8?qES|AYib(u_$$i%vP5m^@uSnF%^sIrye9;^Z4uAxy#+ zbjb8j8DtZaD(RVJ4jI9oWY->S2MML0c0qgn$9~FO+z8N+>XJ0L8&lS$k&k@WK+LIj z58JhY+BK0eYmh;KkS$v0=Q4ji{9wHKhARe3pA3d0t`7Cc=xM$3e(&Ob`^HA>^?Nrj z=Vt<-?m49{&Ke9Q#Ac=Uwtsej$QxOxLp+7bWCEeYRQy-!y%BFF5VK%6OgHA8!l_+C z3?<&b(Y+|cy+;$-zdeHxUwmaoc~*jW>m2Ej1a@k_T*H=XLU|lv(W*YTxUx)22YVgf zLAVEB)dyTB4urI{b>}Kg$!fY2ivD6BoHTa@amAxqr&?uyeppPSu4MIauBryPFht04 zL%?VkQ7`NMS^`8+G`8PgyApxA;K1fgl|$)=v`8ui=}o?Ughbg)xaSS!!2X3AEq8;h zg@bV|&3izVxooUq*Y~9nb=~z7`y_6Ad8ofu54L&ASs&VlG^w4068f6Dpcc-tczXe{ z61I(`m~?4mZGg*jF$SNekb!;F*EhB%#Uj_1z=qb}|1o`Z7n%3Mlc>B+B!npCh-d!H zs>i0!okUD%+cAPMgCZtIz1EXFooJK7f3Ny@OiMbr{qo$5aRBFL$ov9zF4EgleX;of zeIWww3YhTeL8rd5wv}*Z988=ivmsm(+82Htg6?k4dl)!RqKzRR4fnNi_A`>0yH@rm zvu6Z_MGzus9T2z}nhN*X7lEe55-Km3k^Cv@D~E2>x&BLScaOyWQH0t6cCys3Se1FV zl`HC@#a7v0Az&-Zh-wMJp|NB=E!BqxE~04mxpENQe5a$xWPnU6a(Kt*UGE94fA-Je zV987P>iEZ&1YKMoGLaqb;mM=><*(7PKX~r8S6)GJjY1gb0v>{0h#;<<2W7x(TOSGTU>Wx-y&n?fTDlmxp*R$T z!mm%i7t!I5w+rW7y7_p@-nDmbE5$T#7%enrHRa_t%UFJMMOmqQtL}>a1X)Cv65G3d z_E9*7mCnnEZ;hp0;*cjeJWO9ww@o&_xXs2L)YJ1QQ+)rHRml@AF@AzOx)1J&y!=O& zz`bn_41;b1rlUK03g&pu?=F;Dq6fZ2@jw5`ctRkUsucRSaO*)^2wcaybm^Z3u2mnH zO<(6O&|Lgl_e+X|R#5!<9*#T0#f zM@$w0?VB7i62-k0h=+@Fw4QW>eOVspDiBKlGe9C>Saj5WE=f1BzJxjvSK%YoQb;DG z^!+;xKslgO@#f>l>vS+++1Swe=8z(fSHjn$>|}+zPz!!Hal&{d?29pum+KGIdTc92 z9Rox%OA{jCk`W&=WnEzbr_ORrHt~a)+YG1YzJ&UGA?9-G_(TVO{@#rU?FOzJiY6w6 z#7jetdN($YOPr$rFf(4d`9rnvC)3;5^;CMD1OT)RJw|q&2y;tW$XLKq%wdE29WHsF zV=SZG-Jcka;DO2I;5-o#y-Id~dH?O*MAfKTu$MsAPZDR6{s=nAyLUY^jg?zd3K8`` z-2hoUCZ-H5^wnebO`1_JKkFO32nxB(zGq;}Hj}QNf!fk%xvPkT-H+bnTVL2D`(@*A%ko|)^da?NFCp#a`n z+HY5X-rGCv4Ivo>Q;2DD4$FyB3HJ6kL_7E#bJckrek;s2Q|P_W4FfUEzm<=pF%neW zh}Y3f?v4McuE!Rby)U;D*?()uP7-9plLI?O#Lx)*z``0IB>{LLee06Nq;XOexgVLz zX(1oCy9b%6;L{Y#P}T?NXkCVQWtl^FwMlOFrk+B5=_H*-EBoR^D?FVKjD`|=2N1tE z!K(yZX(8o=XW&W)AggNINeaaGmGEQnKphVvDJIRutA4r_&H-=16-_-<4d)U9A^Y9@aMzH=KJ%mOlqxN{vQhEo$N=dpllqL4`6UD z)W~l!PoqxXGyq@J663yF5btYSd7AuxRxaOS(T~};Y^@1|Ik?&lVpwlT{^AA>fF+#b zD=E-KH1JYL=sFR^fc0PLkL_!MKw~tD<2Qi^`ib%GqHR(0%?0gR%VB4hd@X_EIN)R^<^dm;W(l5}$q1UO8GVgn!8gsKIDb3!)0OZ?kDS zr(;_#BlvX;=DHXel91`%s{_#e%Vj*c_Ob!5OkxrAlI-Cb8@Gq~|2=wt^oyr99 zyC;j2Hxa*^(HG&?^a1mjc8!SZL%h62!TJTjK=;flIq^oJI&v^@g)D2vI#a!;r@ zNH6MvR!_I|RskBtT3Pn*7CDNHaNGU_${s3ouUL6j0 zTtGk_vI?YAM(c(IPzDTMt@1S6rTtf;=RLJW=-06JVCiWo%W*)U(f|J!gc#6%+vsXa zR^aA#r?<5D-s{gVDAs(t121XTja{Wb%dyZjyDOpg-x^_pgosT=6m6BEUF05x=j{|X z*#F-RTxS9o**TH%hckFojXC-^8Bp`*%6w^WBlx^-hEp}RuQ;#89`}PUHgYPs;?=Q+(zZkR=FG7Dsad@Vn#9yU@7($wBtbO_+m^c~LBuAk z0A5Nl|7Q9jw3uv2-|;ngDpsjVL6tuGv|bpvjqDp~;C3N)ZyjSlA*Xtbqp+&+ZV!ibXJ{tjmsne#dlFE|0Lec;I0ocC$ z*t`JP*pih{-~zkW<$*1rNt}2cE)}zri?>a`BmVpRYDNz8|VO^NkL~$XA#F`b{4=;_@-xHXO zd^lyhj0D7b`xmPQ)i5fO{)DiUJ!iO#qg5i~S9j2oIuiMUwXAH@LEu+HvU9dohyg5YFpHn$z!0y;#iSQWgGVnu- z(MbPy2N0-I&vFzHlN=VHLn!(&^X8=!?+6`+zkQfJydZ!gZ z5+WxMq#2!uPiq1P3~@7kP3ZT)9+w7WQQ9f=plcc1$ebS9n0bl-mownG;jQ2`C+tZf zT(ykHaFAceKYW55Xy;GL_FMdP*K?V53Uwa@-6mtD2B3^6;8MVKOIh-WEV0%$-lst8g5iYtJq@aShi6+3k`y#yY9`S@QclH(HMk4Vr{tfLW;rK@14m&m!`VvBvra@Rcb5F=)Q>8WEm>wJj+D)U-dHtLk2ku<+1z$SQ%fW zBA@qEfDD6#|8r|2TR>Rn`}^a`rO)Q$$|iD%PgpRMmFAx{Nz}(I?rb{v{DJ4V49}l z?1hG1&qAR4v6Qvn0PN-~uYY8M1(ggmByrhZK(Nloj2Tx69^S*L* z06&UA9RJM#A|4d_J=NBE{bi&lv&D9n+dZpM_mx@e1U(TX!~#o&EN9lwi7So==F1fk zTD%keUN~5gZ;Z=RX6h@a1B-eB)wQ^*oHY8x2MnInHQs72Ib5fTIXu(~bfk8vb4>d% z;w^78sdZM9JckSKC0BlRxif84$no>Lk%e_tl-qg%{gzt~e85KbWMFAimDdg9_~`YE z$43@uMQav9(MjPW@vBRHMH~C>^@b5CZ8zifOmai5KFnG@oJl{QlH(tKYF9Yfw@`4= z2IozYs|6?8^A$Q38JqVq$RGBWd9*K?Nm}THSD_wGTsJ4`=4;{hnelQqr;o7NAvN+&ou!s~RhVh9Uhqro_1Dn!~0WtTT}5q64@z z<+h{^M7(gD#|EZC z8VS%MvLHRZq_0`6kWC9V)Evg4)B3rfo-{`Np*mje)so1GWD>Jj9zL&CO5lm2vpI9G zo+ZC=z17&GuVp&wv9HtkvW&;;?ksrx^24r!6kki9_{X9rOTo3(XNTiI5vvqDkx zR=r-O!GuS9-Y-q`ZFGkT*)GIkWj`Nlo)3Jq0cuP}J-J-IWM=9N6M4?ooixnKLWz zewJEK8`RbXf;*K*>Qc~0YfRj~AH#KeFTjnm(DmGxWQ=8xKL+;t^CqKY=P7dxpFJV~ ziKPIGGAwU>zPVY5?s4HRV1wFwL;SIXYm?O&Bjx|z)W!d9>cZimNOWdjGnO<5=bIdf z@dx!pzuyH?hf;B)N^E^>w=zR4;Oa6TzgThxRHQ2 zR##e*99{l=yXKrPx9SCK|K_HRfAZn${C=D9N4Ih~y-Ru1*D;1#y=4$>ay3=QtR9F0%W{)I5QMQA-ADP5@?!brw-hVDE1`vW~ex>@Z2E$W@MVoI7C{hgr=n-V$cQ* z3w7v#fZ*?Z8@X|P2om=Pq{wA8!qFcshM`G-uKd%d9(%Gxcwo zKW&vp1^ zeP$EzWw8#=rx6j@Fcje~(C8g8vrzMYa*)ov02_0e9~|JI1Vx>QuTB>Bdk8if6$=kXZ3o;1K(8%tJo)lg{E65S;MT2NWxKJ2 zgLopz1ge8#9Yg-tpoNI4BD-$%Lm!m&V3hMq(Sk&>TQmXl#Wv9ACU5B> zclK?KYn}R>=iE#&(xQAfK3BeREUJ2voo8Z-qqE?qFb(Mk7J-{Gwt8s4?Ny-axQ*KBE!11IG&!Z(nxIhz zleU}znFJ{S$R4JIs#t8}Yr}u}VC6fIT zDbUv`afcb50y25>3IwZigNijA5uk}NUyu;NI|il^OPz)g`@061vD-pTs9ZYObh#qu zg%?(A2_-uW>49L8m++N=a9M+wF3!D|-uQSrbv!jInKQq}KT}Dl@vnnGI*X7P4)3e8 zD3hGS{{$<pOnYG0+x;p(rD&im_Ob-=g1$m}(c0Gsdi?Pbf; zqs~zJOf&5K3Qpu01Xfn{id3CI{rz1I;Hu}XhHj%uj_WqMca-Cg92jT|`qs47AtI;+ zvZq1aX4!EFLQ{MCKqK@GBu4(gTHsc#Q&Xt>{U2=3Y2ItM@)P~^cfl>|{w%=UKBOq2 zNEjh63wvDzsN_$dfPZQ5K4M{$kUK2DCRT-;0Z^0*fM?%++}Hq?QPq&`H%l;J!lCxeOt0KpB>SgEk$PA8h%_^YUVZ?#4=$F|AS z)(@sBFkeIqMfi6Qm&+A590sf;Lhb$byxMkvcyw}b`tAO(Aa~=}CR!)U z*|j9SLI(@CP(#fT++);3{OW5%O=o4hF-^FQ+Ln}Txs|l6L2p?RF>fDv*K2s0bDCk} zm2(xBB0QTJrL@)VWPZ$W!n()8rFzG}R8|=C8Qs*e9*>VuF>U|Nbm}~rscggonSzM2G&2m{mTuOY|-l+q;+-bV@XtCUQQ7Ua`%?TdZLuJM1t?^Q`OfG4M zOkh&0#;I&NkX<8IWI|*zvuitVXRf|qFS&4-98RY9ioU2mJP|UcV{Flm!NUoX)M_*U zAC;Eh!}}7Uw2Uo1Q{4x!UcWYw49avck~+n?+adGdCC!I-Itgrk|I!B{Y7^bjb134N z_py1~(K#l9Jt3*AnHJb{pegs(Co<$8 zMz(;Go8h(Rbm@kQ8E9O?!qQpnS+gv8=XwW7Lp+GGTvmZeAMzKGj~RGyx{YWaX8rRdv4onb=h~hZ9~f8X0Qo|Q%-A%90S3ru!7l;__uuBQt5`ZF=fQzE`JU?V z8{q1u@0)USV5QFLBgGza_O+Jb0g^IXx^q>0o9@s}Uuj zR`BdipNh75zjaL`v@O3T0h=t0-mL#Up@QQc*7L%e*9JXpSM{}5xm8!U@H)-YDkB#G zt<(>_R#+9p8Jy(W@6HjhZvjk5{624~*l22&9aC_1s?7b{>xU7Wr*n-`u#Jta^`31&K z6Sm&LS zoV4j;4Ly0WU^mnf^o!hA&NhzXwu>xb#p-7o!!35;{FJ6a0^ zv8w_I-DwG`nmr2%0I^{ttXM3X)Ah%oVnRLt==xZ5?mGAY=yq>Kj|e4R2nOua zK}mMOyh0`U1=Ku>I8KDdnM1CAtmnQW|3+?@ALH7pAUfAtMaMNq|K=|gxDzvcQe?!n za~Q6|BD~aO1?+6&`)UXFS(wb+qk?enExT0Ck9)nc-7p+hn;VOGD~c!TMtzA|qKYr$f!$p|K*_x~Q+vYZu*#l!JJxZwOO=2Z4) z?1JXM2?#@u$o2BG{c=2dtVm)!0HK6Y2w=jxoLdBAmQkoPf%aG8)r&k;BTVu>I7y*>`qdC`m~h1<6dNKcR6o| zW3%W4br+YmJv@0cPfW+%*FYYjLlV*+zUJf_Idfwh`W^x%?L`=u z>l2W03Yxzf8aitbTag5S5rcX94s_x|Be!P9-#u{e`!s@qXd}u!@B4S3&Hs#)+CStP zcGr{GddOMT-+pU2 zpFh22{Kisx-LhE!XMPpv$5_6KJ{Qq-+j%H*HNMKJ32N$tg{MEIR-E589_dmSV3+RxXHE83_gA${m+(hH_JrPWUi z_JGZ2O!Q3%OrOyP^SKGYBEK=DiiUJy(Rk45Qr8(!EBxD7jP|$Fho5KEnwiEA>-ARcninteWBBQ64?O&Lb%>UZ z6}rh!+vsI=$&^$#YJ<~1vG>KDGW&Y=apllE^Llq~PJU>?%xnFx4$uXL!r@ z$l9cSV`Rzbpru3nL0l(ys#3|DEn6pv1?t>8;$PERi7+WRf=*~Q&-@tAv3-*FFA#CZ zfTl<|o^a@*dBED2f+8`$_wXnZMLOu!MGh``4)li>C~B??f3!JME0gfJwn%%@&i$}~ zT7A_;FQHAO%X#X@Vucww5gL+&YI$?dTF$Wu#SlV&3d&#HR(wTvC=9416S_5^DVP~> zz5x~_eX0bc<8+>dEHJ)1z8oivk74pTow7ooI1i{yv^JK!DID>k{uzW1p-{NC!U{Oo9jlPsL+j_M7 zNIz3*I_j3cr!*0QGYvdXqAsE_Ivui$As1FlmLzMHDi=WOo?URuY=bIbHA50Zu0 zH}ORISa0^U%I24bRtP@mCaohoE>+W4?o-cRp>CqWBADwHKI<^!9q>+K7*;3W^BGQzxA>PkRM-)F8`8kvNa! za#!YkX1p&Z!T8A%cet!nGIsfBUQ^(ZWE=vtLW=F?8p2z;>J0hq`zyN0cL2#P##ukGECu^AJ_E3QW zFj2Y3v0lK%D?BAXWroZV-^e$Ai^j?SXd&gPCK1Q#?DWhVrV<7Y=hcj~#S`e#N6QVh52>iO}GR_14eh zES-X=Y%E1X@%n%-=xuT{&4}D1R=&7x$ZnFxe2MG+dbHeU3RoZop7vd+Q1{@jxS@S{ z)D79nc;2vkaoF7qCV5*OAhq|i@m%#onr2LKwHbA25jh`bs7MtrMy4IJtn&?YOti8U zLCDmlsp=C)AzR#0YR=mYya+BLu+!sy-vE5+ap_mx^0_X|Msr z7^&+fl$+{~*r0_$*@e`6m$=PDslM|Ww{GEb3W$~q*ME*`$YwS~*-Q6Wo1qtrtED>4 zHR_ntFo$}+%_7!?VtIzlj`^Cq;h5<%kC7wXSoQd!INHMblSgqdy07SPg_$*w6UHky z0GX%S`)vSa#-!gX_?lj6c+jM0;?>-SH^H+g#gSsz$LqpW`LUNIirsD_vc6B=(Ri%2 zl1dihHUm4-UsX6PJ^#9w^=EuG)wjj5(IZvm#oW-|o5r6V;{8mrzqu#HI4V_~{vlev zN49*tm&nsNE_Z6FuWmzhSX9kXb^Xj>J|Udh1Dv%S5B+X&slmS*GV~}+GuO?t^FxTs zDCithC-4nOaR$OSPSIAg2q(nokfMoATW9E44u!*sqBimZVPYA9E}l~s0NA>W&?i_UzC zksYbO)CB86_-vOl&KYlBK-R-F#;&7?a+=s)2lTLw2wMcKP(e^&>{plNdQt>%w|N%B zQ^HLh_^A$WDmAkw4$E+DbwIleN3mThq2&}CSxRR3`lbm|Bv%A4a~j2ww8JcYO@}k1 zJVHT!lGxes7tUQN&LZDc1s$H%1KdS!tecfFv$mc%v<4l4)3=u^y4w@oVb3^13k#DujVQ3TL#qLP`-Hop2vEK{992Vv zW(Z*HcD5;ztaMK`aB{HUwaK_^v%Me#M}a#fyMNG2U}*!4L+bRflvU{tZP;yl|1t zgK8IIchn#|490}$HFYzXxYt|+IB`7fI}%bKppPsh34_QFKSd_}25~Q`-8?}2fJbi9 zFQRb)=0)U(QO3EX=cD@~CwBc_`|-jA4Rg@n#H(+`x{p=?8OukU!>MB zTWT=y^fhBE_)>J8@AY*MCfTMMN5%0(-LtC;cuCDT&aa>2=wQIHF?I+wyCuVgBG((d z0(>@R8YhFLTPK#L8YM;5Q_q#~gZgO9aQOpd#tkz2Nky;bIl+n^n8hZcY6?2MJlIY9 zqh78t^)JhZ>jx`keV5)T$Jak^3)}sbcy886%F=Q6XDB>r((kiaa8JCyCo;ME=>EP$ z(1WRNPip45qXSyPCwHf#g;&iS27=X+Wkh^PcE+mzkJSJ;h9^iutI2ar)w}kq8%teM z(@^vSfKI*2=ofH*gAt1E0BvXPN1Wyr3+ps@BjL-?Ul(k-Y)^6v(E@AlmN=U@@qS^e zRs?Zg3i0gedq*ondjg1eL5gKqPcSy>a76Z3R~lhN&hw+VGD=j|aF4N|77xI;;D>jl zl`&^(+Y7b-%>w8hMJt>1VKphI^?92OA;yb7ij{rAP<^*GP(4RoohJ$HWx$#uWYx@{ zrIxjfc%O2F>9OLz>j}ooJFqz{`Zq7;*mMVPkX2y0=Gme{b2t9o;o~fa-_Oce>w*NM zyE1#OPs*b5U~gra@###}QI38sc&!FUa8GX{SsCni31%H(VVQi^P5-Z>cL% z%-H64yGJe{U1W~le8e1ay0#l$bT~v(9v&#(9k&mTG$=wbD)eq5+B)HYbIp~YmW)zF z8_$DW?u;kW37#PQ$vXG0DR(l~%QCm;s zm3zDv07O0H&&OUreP22jc7b-wQZGEapb%<=~Nwzz&XUxFf<%P$En z;1ZERVs*P_Y@wQXdd&{M#Aji@j-jx9-zFu-OqCf#CB4VH)sOGybg3V%_w+#gBt6$$So2Km*;|9z0ChTy~Ew0V@{HOz8a&Rs7Ki-@GD zAh(|^pQTeBI^=G(z@lVV?OhStv%?~RTt1uwGPU5uW49=g5h|qZ~RwP1~o_id@IE~BY4dR5d zmt^bnUQVxJ)?mWua8{$*(MooEUo^-ykJ_3RoeGGE(TiR=D;i7BN<_a*|5Cg|J7C4M zzHdHNw86ga4O>ab+I#4W$ zQRvxBKs)Ta(wAJlW^eug3<%eL16sZMr^N2T-p?W>lsWZ2T>*x^gsQH5{GMmF()bYc z_Cp(V>JyNU&}TCIf@4&x{rcrGCh^Qn^TJ*9m!Gn9Z&T$ag=xMkmyzL8-u>U_6&g}_ z*Fd61w8FtQoFOlLF&TGPi=k6Yu1p%e(l&_2KSuCKT_BRi&9@~^+Dm34#yVp&h|$y4?^!#ehG z?@%K_{Po`>0Xi|$TK1`-9K5UY5OI5C4>oK5_$HSHH15+jfv9w zDfDC$U7Ipp0vDZ)oAM(yoTp8LGCOJ-m(Lsf!CjC>k>>n6UuBB;p>jZs5ym1X^r1_q z6RHGusXuo_^v-NUaH?c7Mw}q_T)Qg+@{UPiHnrd+2*Y9&xttv2l%CmrmDtUuK7Kl^ z?eb}9q>>oQj&m&*p(uFxD)Ygh7o^lLF@Vj&z=t~bfdN;85M4hP?T49{eb>KBylH_; zQp~5KEh~r@qD+=cdvM6UKFEkVlB{nyNLi|};DQfa&!iEULH#fSKHPlIvsR!nUIQ5mIL)*Dmu!5XU)q>p^Hec@eDHtoVB)h zs@jBt_m|j09wNxaDzjiCu9$BlX(xCcLG#V-xZLdZj{KB`kF~EbSk^aH)hJV9vNJ><~n z?&rUakIr@s!l9}Q)=hDdH1+kf3Dol&)S~=#NJ5GHdu&x(KkyhcPTH_}C946?fSu;_{s;z4! z6=sGI#_ZNoEb?%|9ppPu5-##z*v>EvOka8>HpTzP)mO(w;cR^?(z$??bcu9GNv9&M zbW7*bv2=HXw1A+}-Q6MGxpYXkgdqA3&;8x!x%d4a_Om;4=FFUPzHt!1lPA)RKc~rW zr(50rTn>i3$Vt847-l(NGml1ha8nZPL+y~6`3XC#;`kJqcVdy)?Q~LVmx7->#8b=o zHV8ty%WmXJ`@@+g;YGTWLs|=BRp9=EZR4ILI?g~@t~Zw(aC*lfBmB?M_=@F^{iSIr zfXMuNj2eBGwa<0+wKYpq7wwCLHT2lHbh@Q8=X4}GWhK;4;Ni~Y@za7__VF2QJR|8k z!+wTgQ3do=MB!uOru}QuHrjXhZ?|0&UA&$*8Gu1zlB9hwpNR{_?^@2|Q-pPZ=AQQM z9uAXJ|yk_m9>DbJCjo=?v7N!%}`GeQQ>$i#AC8y(3zeC3VrFxL| z_euQ~^MA%I{Qb1iSn`(Xueib?P^xy3?{u!8DO6!_2ks3nUP9L*wuz6>!U(l+1a?1r zL?akJvyn{5mLiehTO;TiB{ zBHKhi>NR;Jb6I0uqoROpa-oK7dFIUR`bg9N+;O@%b;Px!^!Pmm>}uNq?S&dk6_n5dc z@2wHgW0Vv1jME%|ci z>vg8gA7^?twq!(+cVh`l?+4#Ts>U<4rnXRKC6Lk z#v$7e-v3J~Ad0@S`n&f3>WAI=9REyBjUpFR_qlFfl@>y#CEA_65| zD!_w?uO1bU0~G+%15kY@Kvq_E3%u-5lO%fv2M5ui3`V_#?e;z)#4?E6w{y?+fwMr( zLjy79R^ruh?(5!&S}7{XgbS!svW^K|t(f3LuCPI^|2TtSclKmI_ZmX!+g<#3&hO7@ zVN7a-R2VmQJ{KAG%`UGHHBdRY@u>W771n@FXi^fh#(rebhf6S4LKi}UX7D@YQdKus zF2YEr+PpoT1X^|5yc&%&Na@zztbtyw6U>Gd4Gt8R_BCeeqc%s0Ffb0{#HezMk8Yu5 zraD7ITYo}3wnPp4+CI%Dg6b!<{ z2hZ=iU=wG`GOG zGD7nN3{&C(%1YRX{Fv@BUI8T{grPu3;Vd0knpwwR)skj)a871td#?p4dkk z01dSST1E3N!4CYdWd72TgabEco8x_q$$)q0D71OWwsve~O zufT~DqxUBat+}j774}ZfA7;A&e2^`o_M2e6EiFn=#6Os7W;s42s%a$z5)u+eK=F_T z_!Vz<`@SfW4QwPBu&0p$_k!I$6$T0>@md&ZWYkaaNBVL3A9OCGI)DxW{oVBmj-DEL zRhr(MuIC(CsPiA^=JW{Ez6DVmz>Q(Zf=NU~M0Dv3CD>8}_ClKA!S7G6 zoU9I}g_O|oc9)y$zszL{xH#IpY3_6&Vr85y#(Kyuy}i9zZDu^DeeRd4Mvwn%U%pa< zZSj!AY~Ykn_YK4Go{e$uH z&;!-s2wp#jcJl|=R?i7UgHy5Z^Yd^)CG^>Y@V`|nj*#I#L@a<0yfhy0@JT@Lm9K2n zX{k3j8ihVgECgLum!Pl#nZW_0{Ef(OTE_jUB4pNc<^MX$1=)znZ9E!gLcgOoH=fx?OSSzzY3ujgk;1MwM0OGpEwj)Ki}N|6Lzfh;_p& zYeOG^O6_7dPr{mj>-v*qL4$=N=ujp|7OE365JYKq= z1C0IbP!r`<3{v|m6=jZ1YnXkYAwKmimob&G4b?pCG zfB!9-hkw5AJ6%vu#CQyT!hqz5`z^Hqo4#h=({Iw}O9WRz|E&6cjF^K%<&`}Yyyll( z0?eQPN0}0qCiWrM+SaygbIViin!m159Dxp!Cbl<-m*zahz_qz8APir>}{HXP~s z&pv)`!v|daKtf%3y`8}-d(xbSBmxEz+eZ{iFJ%BTjeJ(u)BV3&9zYXEt}4l@T@$BP ztd2B827haW@NKAmphnb}`c?GB*Y58|mYy~{bjQ$rXXweSUX|}(pXGHuQn&93d_QEy zXhTI8wVT~NNKI|YrBp$F6Mdp9~VL(n%9-dkz)yl>Ie zC-`Scze4;wg!HJJCMN?sTFmnXl3$UU+}AU2H;cxkuvX9J_XjiFFukn$6uWsv0LSxs z%}dGrxTi?}dPH`i&Dpvnv`XZk50X-NvTR47$p>%8}?M`ihOfU~PM8_A6$8uRvLk*R=nN4u2SGkY}a#e2wxSP~6A?DUwag z+N!nhs8U09?|htC-tyBR1Mv zJwGIO1Yq4+BPCeTt;`2_-8?x>o_rdOlJKSK3;D;3z#EvEPXBSeh~qX-{J#o9u@XcC z4EFH`2jj6uQj;G8W#`M_rv1w1e!gk{jgGK+kkyrEByNVU@XzVU@YN4awF-_R^I#c%h&q}m`<&uza-or!U+8;1LLT);QFod`@lP{GCJ)!t6IR>aLEVCPibE0WY|j3) z>Mxt#(o5+#I)2jyqqcKE(3Np@{a`>^e1Lup9`+Bli&!f)WZrJ9t4jjsqx!26(*;|_ z&P%_y>j8M#h!P}eBrcB9s4hT zjf%cb)D0A6*I;DmekZEP6n`L2rW$o&BDd({VnoW}JK zQiNTSjDOlJ{y%9Tsk_?B%4^hfH2Gn<#i6oRL^ijEr8@{*Tu&n3uUiTx2TX1cq`mXG z{-IapG~&u=VesNr@OmrlvOz}`hNE%g?~mn?jBTeDdcXFJMwrtvBy46Dxq|pASS==I zY)KCW1hSUz<$oNdb3|P=D*QOWnX7+DOfhQkZxv1|muXM$c7|6Hmd|_HSCYv0oklc6 z-z|SV!FnUB=TyA+%bLp!6NBw7)iZ?SI39lFF?CC=6+> zazH8nGHkaAP}(Vg38QoZ#H;IxWbd)uZiubfrrRZSDEq%l02i>Kb`H=Q*08$-5Zw zy+R3HiwNW!(`9!VVGX+urf1Jq|YOIT;N zM2i*>a}!%VH}6Pc2kQCVT|kSB5h&*+0wlTa1NTQYhOl2JkM z4=+0|e6hCh!7W`ewk$u;@?>_|<5~ONc~znm8D&qq){ zp13Nm_ifYaqBv1c+fWT>@F^(;j|~h=bbS^4O+tqH@YWK3CFs2SIe&icFsjz~!S0$J zq}rm1a9r)rD*1|S)KAj+?ARWd0e==+J?po&gR{S>eNjYo<~U|d@HU(^RaaLRE3G2= zX9-b5#6Cm>XYZ!Bs7@13ER9&`XPNpq?<`TFQRuY`(MjKFwnck;J~g zG9muxCxp#SA!#hyc(HsZ=&iaRXN@iz?*T)pYF6n1%@td*cwG&p7K`54AuARRC{bMP z-q^hG%e)DsU?-2_vBhQD-8B(v z@q{K9Mp+oo9iTfOjH8_Xls54Extr7K$PUT`@_OEMYmZprxIpAK53wnudQc&|68M3zo?`+c9G2Kz}+y(^hD?_66j*v}5;sd9Qj$@P+ZV?nD z>{2yJt?VcIxf2m(%p#h*yF^u`*ME8+F=~~Fc+NzAwGcfai$<)#{a4VAd-KMJAYPqZ z_R;l6MYP^+W<1XdK^0EHG@SV(at>Hf2LduQE&z;-jwjm){dK;54tUJ_LGPcSU{5!5 z)>3Rkgddp9VKhv6tABwUa}v3rE9xi;=LIk-bIN{gN2|z`pydMmh}XicNrToN1p5Ps zi%%DW`hR5q`N;s(b)62aIMKQxeh-$WtWvK9Gddlr=$2DOyqgFI0NzvQ4bFN}r1y6D z<}L<>!tU@ROnXBW*PrHE=c{0v%9y z3wSGnKTbe#rQ53aoQAVrjYa%bM-YQGxR$m7J8j13MNFW5q-GIn^oNp?5_zSQ6Cj#E z28^rnG_`B3zQNsr!j7aV!~&PCLoqKHu$-G&RP))?JFr*!A-UDntp=|^8ORJQ;3Az5 z=ifaDh98CN=!vdxdv+ob@u%LAKXJGNq^ZUhP{&RdyrXSrV)^&eQ=5A29*cO#@?L80 zJo9%qr{br#?wXBbs{GD1ye+S(Z$|(0n=I5>R39!@NV?iBG5Q#2=04WQ%s1tRO6la^ zhZ$M&RAjPvm?pD$)=M}Kt5s^lG52;YyAoxX-*#bqVB*ZXePbNIAF>IoK|>4eF@BYz z=f0B8oL<;OFO8q8G0Q3|6v#S2^hr&8o?yr_22DMthdPuG;Yp8kyPo3Mj3mXi)~Y1;rnF1D748 ztyE6T20DKWv^D8U_^GF(!6&r%CFr-!I>9UkJjG5pKBq3!da0ojsN{&r%E{I4>^doqwO9Ot6@#tnyqXA5RQ~<(^sn zzUqrDj()7IN9p)63{9y#i-_{Q-i+Aw4wHY`+;-@Gwqq~HOAUpyOHLic&f}&lXKCNI zUq^4MOh=I!cgHoco9{Il50|F%cbN86p1Ry8gysOVX1;-@a&?(}m5=#)j;|U*%3Q90 z6p3!U3-i>*FOELLT(|mDy((wsn*6iTHI7d;&;wacO@eEwIfUu!ldM<)zAxfZoY;6H z9E&@3qiMbKkAwR7*BaQW4l|iznR+~WBTHIPDwo)>7B=dswwTuUc4JYa@yhG5 z7WcC!*5apF+ljh0&+DJ9)4ckTrXsl!Xpkwhqx3WN>_BPUl<}7?3RX^5O?~_DU%Soa zeXTFOZ$4{Po84Y?!b*LBYC}xO8WateFe|~>*4%3C&Ua_U_I9N_ZK#J zi+lBhmXNkRPtqCz2Is3!yXcPz(0UH<9&-|NdOp}Z4vdV<=+_$eMd89oNxN&6oq4z$ zK(`fS`vAPsBh33j@C*fM~G&=JUfARk8w-N4V$e43mgn7i? z<*91$BQ)yrk@1+x?fzEjelll6e{*Y>w<)@zFZ>xb(hK(PrM2+)@Veg-tO75 z7aZ9KFfXoZ>_=^?&vEMQh`dt6DBC5?BQW;tpbRyfGM<<|x=IwOdZ(bUlFwBD4`F}v z^3Mq=D}qaVL3_PQsg*~^}rL!n&ys28=zL&Rmy#yygfS=mT zowSjCH8H@t#OE2^+_?05^tl?Jlj3VjXmH2BUMg%g@@`njHivst5sspYOG!$p}ZIy ze<5Y+mN5fOIbxe~P&=#YyArt+#Llj-+l>$apRSVKo%y@yfD6{R@hzTHe#Z`Ua~FQ< zz3#>uTODp!!n=;dYDbpSuuZHYziVNK{AMcVy8B%#Y<9?o#6$6Zud%K?``{1alEach zsj!Y$mrGiv?rXY7?`jJQzH^SOh&&3fd2l6a{$7b4KarIi`=H0+I9&*zTdq$`@#~@WKki z!mTm8ULqbODhdod-Qz%J$o8s|bnX?-{dc9WhbL388?i@;#dYzbX?B}D=4`2%_ZVVQ zJ!+du3$dzFJ!dpdP2b~fw>&={-FT$2;JS~uU*aWXwMcnw1eBu0#Q zq9&1&`~px5}RsV zR!f{ei6rbmxm_}t&hAb^sWeAE>OFf=t?^BX_m4V}xD)#PHMQ^om?& zeN4cezeYf{D{d*oR9A*I&IN`7|W25hb zop>Bs_hstw4e^Ack9&k%F!C*qz#}r|MMExjUJIOtTxyB(e)rA%w)N?;Zmm_4u>pTP zJ~fh!AI+gJt z=tkwB#UIFb0OmK{M}#njX6ZRlIEaC^CPqK!L!~Nd->Vqp+5jFcIl-S&xJiiG=Ap$z)>arRf;&Uv@EmgX@dC+X&~gpPl3vHEj8w#`08=_0V<1T+Myn z1GsjK&$(*aW$aslvaf=X0;4G9+OW1!PqXw7w~jkg9`FTCUARdFfT7EQj>Q zL|)V!>2qc!<#}E1$a$aSdp0pt)%07taArBgo|brdUNBTi20sn67?hi;lM=#cV0Zes z^D|e&ya?0rYN6#IQa_Xa6!pM+dp#%9rIW+?oP(J(zbt+a$A|tK6S5^;_YPSZnkWk9 z0ZXf|JFLg@9!Bw8_pHw6G2+QJEyXhu^b7y7sPeMA>==KmQ8e`_${X2dE;hNM?Fx;p z!@~oU`bBkXg>!1e=%PC3ePpb=>E2<*5pDg}H+rX+Sd>g-y7{Pv^`Y}~sMZPjqe+?B zrP4-0(qVj0+@P^(Qqd?G?-a7Eo5{-AaQwZ47dZ;oB(!lUOXqL9L!ML2}xsA{dMGqvy z#)bxdBNa&Ox)#RCls&R|t-zQ*tf}efFR+(dlD-uRn~9un2{X6@lyP)K1Hs>7ii$C0 zz!@&f_5b4bLK&EN#Rz} z*Jj1GEN*=Yz&gH?GgaqK5(hk5{-MuqF?r)LH~Mnt%;bi;;cus&WZEqCcdB4bMXeVS zDsoqJV~9;VdDb+-<+tjNX@$C3#D~Z3aJ|F6vF&$D5}z4TH*_$&d^@)YLfzpF3qsZL zqchR-@%rriYO}*CpP*c=(hv&gf{W$^7gnpdyDbvhMqFf-HXmAP|9tZ}D?Za&E{Vvf zkRmo0aY%O?Y58;7d60T|r&sTSHu2ao{{ZX{m^anFu@Am>e3z{*P{!QT;o9$p3!@{I z>#6oj;Zw1tWUxK4a8hii&#QA&)gOZ3)q+&^i6MQbPCj@j^>S<=YwwnTJo@)Z(O9wt zwznPs`$dYWf%@}X#^Rc_h)hp8*CDe@Jh^^#^|2qSmkUSNuS|pHzWn``H?i^RigOsX zLRw?VS|~Af{R3&4-I;auAaX&*ZbI};2_if@GncJ z!oIB0X;LwvpR*2La8_yOq6NA0UPDXK?-F*|ru(n%r8Igduo$uyF_*G(SasCzMQRQV z$-_(3%bZ71B^q^Zq)2SQ2#A1L|-CVj`!lVvbeA~2iS&3yxnAHrggI>T(uQx^JB{-xefL7u`L&XoO9^YClPA&7{ zR70XYmrrBtQT}>oU<95}rq{eEt;_Jw96*~P&VHiyMYpJ>a;X8M(k`{%Ugqk`&&sU2 z{e?ZIj+A*UCW?IMsaS-`>3BdWa|{o%KXxU3v%$EA>DSRoG?ld(R2)fThzEIY@nU>@ z67{mr?40U^94vrFohHw|3sQ_%dC?V~-t?a5FV*Z}z16K?!l0CK>ge4}xoDjKH3fTW z)q#12k7hCcd-2bK_Vaf|OtHevf7&wDK2@pK9XD~G+PZmZX#JX?8L9q)Cjsf67&9ug zro?f`UyXkAHa7C^AT?`fHFJm5^^wF>OMX7^)c89h`88=Um0~pR;}CB~p`PumTjTM| zyp!7n3i>NJGOpFJipn>Wbu-}~hu&u7o7J@uN*FQ64PV14#!&rpdh28-yf$li@^n1V zI2*1BLf?WKeb0QTvf7arcMKtGY@j|m@p9Fpy+9%=RO2O=2W=Xj{(O+U9*X{Wkd#I( z?F8l*ciXcCUh&>htR3N~)Ld2^>tX{%@#z&I&(@u4B&d9=b&JOthZHvP3cd4y84@+S z%}Zm~w)D)k{)y>e;>b9X;}laf-ywejG^JtQWQgH%E^~`6)Xvxbgrlh0JJneDnJvBC zy@ADLyJc9+$&m#|U-bXf5^sOPP&XQ5a2bjj68vsGzmE;d7X2YSg^yKzM+64cyy`y_R`%=H zubf-WO5@Lqf@|>}6VgO`H-!MEovA2gdg2y2lE|Qx47vluWgA-AywG%jZQxi(ktY!f z0Pd*{B>eWDC7oSDt#7sxm4>M~S;v4ft5Q{E>a!1~|L#tPfl(5(1X=@D=Th~wZ_O6% zBg4&R!Tvk+GAuZutl}3k*5r+$x~waG#+4pNdWZ^nKyiAEZ`iqJt#DIZQ1MY^CWyW4 zIpc;4GrnQ9TFVY7$f|L&tt=rmj_|BXt5%{si>cB*fa^x$m>;OS_w(2_WraH{2lTN zCAq->T%A#B26EHY6{n&vg(Rr^JnAp7XAP<3X#P=iPt$18P|JV#h+K5umgqp` zB#o-!dU-ms(S9uVj`-S@d&i9ue>xQeU%{T!QJbyp9IL;5JI>WR=hBI2%f;!v($##W zY^%-qbbO=qCg>@5FvQw3&gc&4W!m@n)W&L6_x(zk0`mn`QGm`$eGu`;(v$gQ8d-Mw z=xDO{&S+F*{Ak2lskZ*0Q;uk$>cc7FeJ{HYHLBf*Vb&%eV93lV$nyzEG@b)?Tl#Zy zIoQ=kOM@%%Ewsl9Hn@oW6u=Y$H#KX*?J+)F0F0%;mEjj4R^tee_CtWtbgdT zba{S1`MScO%MrCu24D7Y~O8gd}mh z2}IYzZ%T^Q$P5INIr?b&s1roUVZRIsRz4+R*=S^WL*7Rl>e2wKXq~fmq%4y+zYG!Y z^w=hNbTifmQ^2gXKku-x#lyG~*8g%eKHrG?F1!rlM~ha5=1Dkx^SVl?p4@AhN;q@r zU75&x4}FgUAGClJ4hV`4#qs5^Ga0_wm*D7i+UxUyetaKifbbR zqbccdGr$NnN3-xI;nikNx#|3cIlqX?b(4{7w=tn1lJY7s;-C+cRA<%o4u(x*;-F7pZ< zTx8$ttNS!4HyO+;d4`AL=N)c#m4*uXHr(b3t#GQ4Zc?x8Eb5Y`03gZIeBQ2hwI|}3 zI@~Djev6Q2UGEl1lnwzKX-Nedv%Q~tQmp!1;M~_B~6< zt^kmiC0Flo&UM-urJf)lqb^GKe2hW?3Z(OzDa9#Knb&pp>k{NmkEn4wFvF)orNS+# z8A2Sz`;5<)%o@K$>7Oy6AsQlY=)%~PT4WjCJY$zWqFayRqqH(;VEy{L*iT!yy6;eY zG1=5|i{VteHtEOrw0M_ssY6f<{Zuw5ioc4U9-S(XGl71+(9+Q9$(+Mu&BGTgPNIb z`Pl@8^HsE~H{nrU2l=vLLV9Zilu1ZEAidSSL)jU^|A`Y0Un|whtW`EdL0)n%p(R7# zC<;h~!Z=Myg-q9_p_l;&euANUov-5(5W%^9@$Q$(A>stUgoy&Ca>96Gz zpB)**nXhm0ALLOe@K8q_D7{YowXj&1rw%Hg474su6u$Zl)V+)tw$r%>_C9~uUl=kQ z`Q*!^dPPkx_(#8puV1^Os-k?T|cWnpWgQnl(s6m-#KJ?&Lm$D`RTDc^`9yM1nfNJZ@^oG*A2H@Q~Cn#^WNPXL}$qgr*S(0J}of+2UESk!o=NwSltMwuW zx#2v0*Soz=ft<)M6^_OLDYA$^)oOPcWO_`~dh{nDst|7b(!o-}{k8gs*gQ)v`V=Il zv-x;m((OC0j+iG=kxriZ+l40T>o)%BKaQFGip)EWMFMczSd3+VJFx%J#BMzz&fLW~ z(~19x?(ixW_Tei7B0`IE3bRs=3}Fw9_h_duNZ(d6yBlqyAmh8aVeJ8lq}gqt56|~y z>3-MbLG5h4wV&FKs;3L65r*7j&|e~UW)^h6BWBaD?P!nZi;l+$!dl5kIk2f4p0R)E z(W`4civK!(Phwp^Q)0)wjeA7Kz$~iub<;K zm=%X^CW^;QvjzAGt%cprQWx)8lP@wZ5P8h3jD>nGbTbaNxEfI#3?Qj1N36?G^~R_` zKe+YNfwC@ow{vSBAm0?T@l7rTeIqLiz|+qG09x?sew*Dl9zdqvULTK=JOb*NZC@yO zmnbp-*0|wn_NQtIi{IS0BdrLN_cMB-IzsoLcTYC5+59X)uamJaHEqMLZUj%ip-zoR zX?D65%us9V3l{29PW4;={<%A%X%(@EyRdsKC}J~~B}{&atG@;se~uq}n;*w60>tPt zwFK>OCz#dc!!ouU{y`|MQA1LzjPqvZ&HZ+pRuz41PqsWVap(D6YB!f;w7*pw3d%C` zo(B8Qepz2F>pmEShG7uLu4BO5=+S$PY}u#n5uW7zRBHGQbw9b;G5O=9$i8&BTB6GN zv}hC^hnvm~-TdX>t5WR7wUu`FJao>r&4FDIK8!84lDe93s${yrPEWVR?Dn{YSOo|9 z&b%L!L$R;gyZQWixs z#|XD+@t+W(et=Hf8Zm$?*SLKXCmN~W$z(BzZ0WMlr@?C z$GIe`eT$XEcTcAZK+NvO3+6Z#ZJaQs7O<+0mID|EZ5@!i4zI5jFqx@N7T2)uAC()| zzu2U3i^12hUBhq@_cYh#sBbtH8C|v&0-=|tvinqzw3q{H#y$G^xz=~b?$q@>Q zu98I;N5?Q++7s>ZmtUFFF;O)pWmD~o@ISLXHm<-bH-JTVdy-MqE({JWKw){lH$zhQ zS{r|ajraSU{(SC}hen0MvaGv9hPG%RY*FJ`nuqW6;3{|R*q`t$c(lDXO5=sMYxI1)l(m`JAm#-l|b$Qz8xhME1#Prwna4z zylbg~{5cbC25p*C-jKU|{DPXh1qAYQgk6AjsHHfjD%|HhFwZ~YGQ zmCNBS=DR1<3t>g4R>CJO7tZ(}ZXe(qbQ-Q1X5^fzojt@Yk#%2*vSpONUom z^@c{0^*8r&c)8p0wG@+`jmwbynM~u_{e%CtT=v%SX1Rahto#$bB&DF1QCfm^_OP&; zz$dXv)7g-5L(}Hzk2;se!@|L{N~*zgTP1Iv@A23C;BN;Yy&@vL>*T{^s5%U#CUFm) z;nBp*QRALln^e*|MN_MIclfuN8NJKB4P`#+N7U})CBE`kbd~68j^zcFVJwHoHb)D; zMlE-2{qc=EkHTBmf+-P6`l8ifI_lwqYsZB8oR{;$NM4{N-GyGCcT{Uo8h4qfk*3hf za_Tu-@=QF+eCpF3Myu!Gw?cDv{9i04e9kNEFeatO3#T_;@0;1r%20a8Syz4u*WY>L zT{q%C4Qa;K{aerHa|j!~NZ*l5q!=L&ZRn?U<+WH*zFp>)YAg~c+Q+&+JMD-yi5B2_ z$KFH3Kp{SSVqfXD41Lo*;vLnx{27$-VK(y^qRF;QOHrOnpV=f@=9&;bMcuKQJ9U7$cV%LogHT1y* zAj>R<;}L`?77V)o2iXnWgVU(olL2q)FVLt%HR|eGgTiwpQJN#JetQA@QO$<~zTTC0 zjgC*wy%2+DpK@O~O}HUS+<$n#t;i zs6WWtrSVuNo9Dfko^LM7!)=}&gUohjqK=eP4E;N0K0{EmlH{)KorLJ z?IIi9(0;x)sjK#B04ad6un!+ATqp0ZORgAq71oN6x3A|{n|DT?w{O*q@M7LPw|c48 z;6Ipd3xA&!yh2B4^|`8hli|e>bVI%jbay2hq5b|-LrYc zeo5WCB)s2Q_u{T4u8QJT4i-ABgueMfw8p2um$}RX=U2?u(92oj80dzm&)iDCLt>2M zG5evAj-@lSWf#2KR_ks>rKw{nwLA$I7vX+J+;3ULg#Xq8Z~$Q9saVE>7B5}<71cUWB$w!k#RrK+MWB-|)8 zDtR`znecs>nDuOJlgF_Ka(>B$n$T3vUxKS-pHK? z6bRXzEwK4|VSMi_@G#77aONZ_Psazk|`-nb4b4kt6p3Y9>xCyKPH@Gj~JZ5E_|_VneYNNrZ2Fj$(UL{3-n|tIw#0b#4muK z()1j`o+-=iEdU>(;lVnsXT~0gaS#z)+0Lu)wFga`!;5!^E~v9In#dY82P>8uTvW1(`Tg)SOLeph?jFggsQ>}F1#sY#pR4_q_u&KNt zNIT>BsZ5|4!a6o5@wILFvzoPxOZ?ShiO7Rw-Tm!OW)fr z5$>9mllnK7{^jw5-($w{`>0Zzq~~S>1!2b*R`kTle>TW@Rw8S53|+sFEqqJprDn{? z)LW^6zmIO_ee+7#U|6IY(?}?z4v5mal`WU`7YdqL6tro--Ku`%fF*jI+6Q}`(!%-X z?`L)x@7f58|A>#Rf+%kPQ?$-5tvJkLPg~Aqifm8NUZB_?JsuG>?J~8)Fp}hBvwfcW z){iSnI>^p%cc0hm?8tj&J%H$yEwfv%TZjge8FtXiAPawvR#V$v_qrk?q8qOW*lH4$ z$c0^^Q1%@^ipb`$xo3k@(@^nd$nB7*=8KE0!mw)89P)Mph*1YLRT1Z`b||gY^v{Lp z_es8;AfIcohTfsrb@uGN)7$7DzpvZ_p1rK)q}XsTJF?#PIEQes7cC1{C6YB>I|vFO zq@q)n@>x2wzSh7nS_c7jvCGu2%&gr>pwI;;(65IO%gM=IYS|05CGuS z!6y2^Au0%v^WLzi3sMy4OJE}H5=o8Qhul<$)!%_0_A_&$4U(xNg;$4s6Q1NwH)q#`n8RBX0%2iMo z38(bZ)mF9V<1y;6uu^0V`RmS$KP<6 zbHaA~RJZ#%S-|s3fNt6`<9yJWwB=JiTb&wYtcMYfCOmh3CrA&kYQcju)DG0@lpvL-$V%~^l>TbkfMF3*;yuDNGs zj-0KO{7;QN({6d6D&*kh2{N6v&25hsdjA+_lO@(KFSDG&VY0O)?k4sNn`(Oh2)a^V zYs|8#iRw>$-M1z!xZ8%mm9gr*1(~mBU1Yf_98IT~m%#pYKfYhIclSXf!dxh0bD6>c z=I+Oz3VG74m75*x&ty31=1W#CwC$Z$JE*vjP3m*M#9^u?HNg`$hUm1ipQST>;MnIM z(9r9OCk!*(xahfl*~_ip7<4-M_XqW)nFYgKR>VjEwnCAFQNh{jU6Fs=<)K6rA8pgd zHgcB})jLD}?vp2U)7rPQx=I8s+?Cl#N=VrO77a;7W8}s%HP<6plyCc0JY>cqaV8|n z7tE7Sy@X}Zrfv>D-EFC=u*f86R+&^s1nclPQKesmK2v5Ek4ck?eRx6tE9F*ApQwp; zvv9)Bdp9%p<|u|HV>?L9KT!jh=GjU`&~VkMx6R>rRH*nQhvvZ+T?-&O6%%Sr2Jp1% zY|^NlAGK)3Kf+vpjEqf2JS#b8<_D`rCHJTNH}Q-vKrE?9IdHZc)K88TU%WK1w14D; zQ~9xvhrJ7D-Hl^kRay{8l(!sNnW!H})G5e+ock8@uLD_C;y&rm}}L>9R>wRlCR_eg>-I;OZk zb;4)u7VWaFkq74X<=O><{F=#O`{Y-l#$*9j0M=loWPp!@@9jrqvNKhjS0K4tW!$^5 zxIDXm`ov@pej(8-GpJUQFgM*Z&cG3Jiir#V{h{bp-gh|73EsB&?BC>^No7;U(0b~g z>4d@g>M;DY?ZkOx@5nqhqZj2fRwzP@r-@-1)0eHXH&LmcW#{*iBQK{xV98>YsNlFJEetEGI$(mHdo zabLKCP4^|`dqpQ)G7tBC zqXOyo4pGW-B=O>wL9mkon_q3&y5J)dmVc zh|?C{rOHnfFM-lI-Gd1+x{t*_c`NSl;Z}{n(;K=@VGs4!ykQ1~x9@UrscMejIk@BE&)9j?9e*AZ%`h*U8>ps=>+;Z@R)Ulh zdhdP>%8?;d&@<8L$0@@hfm1J@Xd5tj))oCJYTI}QX$II+^HFtbQpb;Cq1M6k;i<7< ziPoA@`xrgz;=Ux*L#r9(!Vp|1LImt%@nZ2KVW=2p@3a~0Ljbjq7q!qGNzab)AMf;z zYEpzGa`mpVSo56j*a#Ow*QinpRuEDb;!=_&a|+aaRy2m zpH{3Ev_x_ea?ILv??Cc;yPI)oU~jgedYQY#blnzNIzNfn9YciOntXiL^ao1mkPmBT;IkmM!P_A#{RV=VH_23Yg6E4k@FaE4TA z=;iHPNP8?%Cy5%&oNp~%cl`HPpYBgrx-dl5|1wX1?CzRdJb({oa@UJlJ9eb6G+n(W61xymFSIgc%5ysN}&gG=F83HXLZ+|mE$cY?kKRV^AKO;ZdhpCWL8mB0!2?`N8D!Iaj( zN#$nto0|(D@`MNgA}_meHY<*6g-)e;F)U5N&V;+8vTIE#5xj;%0bw*_Ht8aI*Z zpAX(WWnXh7Wsbeot{^Y2KUl?Op5Ec_ncT-Th}Kw z=OD*E$jr=2R(57)sR$w2n`G}Tn-Izhp_Hv7d&|n+n`7@C87Zm%{psl`J-_e&fBm29 zxt{Cvbk1ko_xrx@_jtWuuWq@FW*=$NhTBO**MGECXoVx|@1N}{XTJdm5DuG(uEjw> z1*bg>WlByiyKr%?2`2>J1{b=!?_0G!akoo_V(#afO2}u_cM)>M1?E0FmVJZn=!V*t zYYB-*Vfq!%J9P}UA`I$v;uYDt4L%E$@}cW1lYfqH72$kkwNVkhn{T|{IpWrCh0 zMD=ESFvM(M*w+AA$e-KLGzy1t3 zn4dKDDmn8{sN=9EG|S~-mP!oC-TRgVVHE$COWEbn_|*}z6Xo6Aim%ollFp0%X}Jf@ zzR5LRxmG+ppOw9fLNly3qVSJioHceM%woJ+BhZ!qkj-DOi94L&>=qUT2Py%Uc)id+&P}q~Sv$wmD=e)iCgxce8 zGL4vI%x@!ul!Yt>Ym<=~R*{#qQqR610sPpbnw*?Gt1*6`rv(p8*17U_^X zXhOMn`OjbaVHz{W47G2rPgXiC-cL5vJ3A-Ltl-^}Q#Mau^MYo;rSp7QRwT1tu4lK9 zmr-z#u2)JCdi#22xAga1gAdZA3eVS)XY&}=>Wd}LTov>OP+%7Oi0eg!hi`l++q?KH zoAoo#DhJ4UE(q+l+V@2<-e_Cmw*7^X5)n|Xdu1E)O zl8;ylHwxP|>CDi)B!@2vdeKh zw^99%n*Zyk4`a5gHiGde{zJk7>;?>vMemEGWX(SPKi7oX&YvNUfxJ!Z8@8=;e-=}W zloE6@aBG+5WS!3aS;Ei&#EfFr1MGBZy|2FbpI;2KK?7+uhJ356=+1R^pqN!QG0nT5 zt8T6?;rtm0KTHx?uw$3KdBeShSu6BsW{T07J8)Ji-YF9M z^9w1M;7>N2JdJ#6XaCS=KaJqu`uSJl_SmU|ng{y4f8P5Ij(-ry_muD4A`PllJYxaq zBw?viS{*avL<_`cug@+Gy|!6bLRv(RUgA8v$yrVlXhN|34>?AOvHsj5MoNMOOg+S~ zLzHH2_1{eaye-4vl@kBBZ70^(+x@RF2F@DrFzGLL96B1EN8=0=R9gQ~5dHa%R5j^Z z(%48KM9P%L_m??>ZC*-l?D~^h{kL|fpOr*v-x)l-YgBl478Mx$F^et4>U16Q0Idb{ zZlrbc*>xubnNdbF2yl-MYED8hnE9FBsku*reCnSE0F+zQ{vldlimex9lq>roc;c)v{x`oJ&^H?=VBLhhxQLS&b>?IwydOYLT7R9gwoOsxMf3q)FQ z|Nf=sP@>NK3l?!^Uu+VPNJX(^AATtZT96h zD9;B|POB1*wq0uC%YS`~!QYwDUU34=V|NByJTWCUmI<`^e2Odix$*uQ2nH7%%*)lV z=I6X7;sUKvv>c1+KjcCLB&YuSR4TJL2e#vS`agzl7rSiU>&pc4eiP5q(?7ERHMde5 zBye442|&5!ZBjD>I1KL|?cKAC9};)5EL#2RH4#ky0R`S#hsU~K0N0OCG}P$r)1`HW zHi$U}3>k%<{{m1K{KVq8*;8@ryO~hJd}?~1{<&@#kGoRJ z^Jr_mCOY`+9dmCpn7l8RbA~9-V}VlPNX5E|vc#QF+?Fg8SXK2Wf8AONmV1Zch6weR zhjCr)=ky96o=gB8n>;B4oL2w&DtJl~)@oP>s0U$ttRz`@uM5~{G1Ky_v4H;JotpH& z9)dY050g(bzN7QTO1qnZGL9Dx)SUpz_7}H&0fXHivjbC%L#F4y457OLj~E;0E7s|h z+p9xfH$kV7(WX82-wO)YU(e1q#v$69!OYCesKy(JF1C|t>JqzNbNg%I3Hb*_ARG4c z4Nh;(0O5+TH=mN1Z`UkVE2O*@yE!1u#WcG7FH6HePop3NOOe!N=g?2#>7w9{7%u}f zV)_t~SC~s>?3tAhLW|+>HC#FHtl8oqjjB5Eu)p|K>DAJ=0%DbA0ut|n&%4@Ek2n;_ zo!PIYU}ZNAQ2|QDzTa@QKDFvjXjGR>s?7q$4+zPrsS{0hcD>$Bk$U?W*>C_V6q{EI zYh1U(A8Wh%mY;WJe-=RaBy%1;c<`MIQ-{@49*Fn>byeAsdij>Q?L9@CV!ppdt3~Fe z^_b*DxqWQGiEDhpck7}!B#H*eWkqjPjln1VbFt>fps!eY}T~| z2#cOjt_r$1gsgjnD1Zi2(-qS^vnmo?hdhvn({_(nv1`TF-d4?8n+F8-U)8>aR@y?F9@k-nsKOesk_0ku5p zS*3BKqQsyq+p}b4XPph+ zS~caZj(2{)=SJw{^T5fVDlRd8bNPGm>0M;`cJU{lyr@aLji95~Q=&rZ`V-{k4(}h3 zK}q;LsBl$tAAR{)_@Il!_I3S@-SSDnrb^`N7KgTS4Wf%x6BG}J$cGp+Qod%@exe+5 z{cv|gR_{=G8Fqneg}Y(T_e3T*i`~s%M1hO8hVQ`5Ek7qO!_H`AM_a2TC^&qi6P7y@@FlV;JG%bNLQS-n=HJ;k41cn;~AmpiB|SCfbJ zKmX_|FHzU^EznH9NPD9x+{TI)%X3Jyb!${?Yf*_b3syc-J%RsLlY9;9dr`DgVg6|~ z{D=DJNSlVMICFT_#=2-c_Pv7qC!IoVqA99IPB6H8$gO= zIj+z7%gUZ?TfG&JwryX?Eq_9pdqOOFPgs3GSnV9IGLHL6SG*sEwy@cyLh%X4?mgYV z{S-dL+GJTiKE0&wovZVhR00oF_?#d6oZU@wm|!m4)sthVfNQh^PzkWBWhp*gAIk0m zyj$DT|2B^Nv;BbC?Jv;cPd$&fr+sp`$;PEqu9#e3#B_Eqb-B%uE2k+O4)G>_TJADC z{dx$U3eqrc#nA4Kj;8?uF!sxrpN+oNxgAPK`v!ci*rJcFjYv+%O^Xw?;$G5HbYB=X z!2Nj}wOTI#ah?9MIjK69nE3e2ANpOry+?Wrb|NQj!x8E$$ig8J)#>dC6FQy1sUbLAkeF`XuECHUbuFkIZ%Ad`Y!G8|UT((9%WmcT| z07q(=ThrmV>2$6OsFt2!FO+zp3e)u33Q|1H*SSBVuIs5?OU~45qq%_W-il73*!-sx z6##V~Q}b)c+(;c({m|E|zE{ZaLSs{qPp=o^5-)zYE%ly@1O7(Nc6VeaOZm1VO?EkP zteCoHqDHxv783PYM{~8SxU+<>=&i4_$8z;#^!8S_b;os&1nmvcc+^AM?blI0bY3;p zE+w;e>zd~2L?_##<|w@olYeuz|2nyknWP2;9jqt#aE3Wxhy}*ZnnzR1J2p&9H$(|v zPMwez(e8-PcMf2Cr30j^nXAszs7A@>2B+Z?8Yka2Cd_;fP1t4W86G6nKR0RS8d+0u zDXIua(USEDPRi84=;*&3Jwa52Z=*g+$fB->bs~FnNXCof;fBd&9ZeT!JgU6et5T=Y zqX&Y9xfj$~;@j2|F6XTujpXHN#IGr747Dv>E%m4OIaj8BH~o%GcH(&UkrccoJU8Ml z-0v2;`t3MP+@Ku0QH|8_ujAR1Dl*S!$~!saXC1KOYdx;9k;$ zraFKA*Z=wU|Ihc5A7ijA<~n2LXWR#96#PrsfT?VUqM`rU(9gW}KmQZ+CK$sQfeG?| zSK$BoMvT-X>C3g`BUwMR|Ge+nKL#*^dnqJa|IP1v_8STe+c+xDN+*m`X#XGYE=}@s zjha~rq=WunzZ7$Xd8s<<6v2Q7rIFxM0G<7_@dJ;i79rOkGQro02v#qxISE)cKv_T9~N{(99 z&bf=>_mH~l5J&3&7+@(+VlZm@pHnn%1AfLU{=6ow?SP1N=|NdRL3dHl*$CtJya9eR zFp70M!i5PSIx&PrQ1Bd;K7ac3|3;1f%#zes1h|Lf=VZ-WAWlbv)wq4UBb9Ic?2KRb z+l(3Fq0wKiX{!gj?XU=X2uaSqP6Ofpdh$W&$(<{LXfJ}XVu2~VSr!VRu>UI|`1@yX zD1be$yS(=(1M`JWx=X5uA&Hf8_8j}YAQgDQ5>nd=DT4AU9rD2L9H)tnLjK3YKJy0) z$+Qx03j@T*oxmcJiz}jNf3-LM9xcekgP=#fQfmDij2A6@Yxe!DQ&QymhIG@v{Xf6& z4$V6Ucz~e|*L`C!jR8=?=9~{<`Pl@*@1+={4cXY}sj(gS>a$3+W<2Hz0wcG-Z0et( zWx#=haScdm;09yzh8x|pI%}uSr0=ho`PVm9!8KXKIZ-U&QShKgSrFR4_`kl$3jWfh z=|d;91m>8*bMie+h$XK0Uw5B|?p`6*y9q7|kMm1kqh7-q|C>1f*IY?;LSyAv5lIW! zL=~W0#ID=ECW&CX({-vx}e80Iv98#t|$%2+W{{Q5aJ7gHd zv_zYHp_h=l3>jCr+Y_HZ46FWMs#F<_4fp(sRuVv_vhmqW3TRc@ zziOCx1*mGDB_^@~RCZ#lvZPjcM1HDB~q?tW6~6a^C|S&H|dLdZ5Tx23_$3oKdla7 zV!1Bsx5(tPzG-K2=y#(U7CuCnEww*&1U;f1(Y^;v zUjTONp*Mlo=N*^QBA*l2HrLBGwO=>#9RWp@+i+WlnKV18QVWHGID7R?ge;jf4+|8tvNtZQ~1Jt;j!n&9+1GYnkciV77rthT$(yDni<#w}@(y)T$WUt8VyfuJ!vHk*BPU&&u5d?|CGww{u4-3?Yz^Dxe zraU)KY}kcax;A@M{bk)M={SmM;#v%))PZOxfTgJuqFn~A>(J0skcDCfNo_$55Z!3A z%#q7o5PYbvJh)eN6c-9)935KKu4p!jRLv(Mtocx zyk&FCy7NM=efr`yQ`?Q&C3jstlDX`=dV{M~|1oqJ%N4gBU1cwwa%=+MEE z*beeGEfTR?Z({E}d-QFQp3iEOQ)Ihz=u)L<&PX3YRPiX%SsV6L-+#38sS|B= zep~34F!uL}ZmEDa6k{T&sPugyf@cYIc_u)l%`M_xi9FF|RHMo!Fwi&}ldG1ETbq|( z0;WbJuhep=wTQOwx}|0A;yZPqPK*DOM#<_bz=SjN@bsy+J~?1+*n9f?gsyA#p0TIO z51q_4?H?a`3uFFTq=Nxy#@`vU@@}g%?=PR5yU6OkK}81fwUD%Vg1*t{*PP#+eg$$%=q&>93{NmB$XM#%k~WT<)A=6HA1&KU^x%N05<@cvQw4H} zBh>O^2i3E++Q8TF!!f-48bzt!pcI7qwXeju131Q8K!F;u-U1P3gZa@=CU9dDo&+48 zKuSU=>*#*fcT%b2aKH|$NiFzrscIZ-*q)tJCL|`@a8!+3;p-$|%EZkhiHZvfa z36ZXEIs{fC<>lr19u8k3k~GDucsFcuKNOrz3KX3 zCLfUH6gm7cAx3nCJ^U>Jy`yf1!u5cQ;f77l8ghm2(Md0@xD)uf6I@#HV>l=GAu_-i zP5pd{alK*G^WDqlg~ZIO_>X+bbV^19M@U`0%q?8&_r+_b^}s);t}e=-$FlDL0gpk6 zZybmoKGTlj0lCC=-2B~+em=9;bIrG(B!#9%{m5Vb&Nj1M$b+TC-X%MkM# z$=hmQ!-`|zz1)gW27iC|6uWTkUHL*dQq`lHTc@1UKF*9-uM4G*L8jo;NH#J!C7wUA zw}&c@zjojX%8$eA#~ z1za&M$``!z0&|@S zQb_)(QhtF7_Om{pJ3!+e7lO(f1=6S6ANoDuFDs^9Q^@`1J@C*81FneED~oLl4sw#t8pzM9^s@GY$p2@lg-Pi-1(mc?ylgf1^ovYP9HGva!+A+jT zy}23N;HX8RJ22vHub{4fep^uW%i~cafv=$2!648DF2=26T@+0Cia41)m`t_e<`ml& zf)&yaLt;sK&n*%*J*os5#P=NlGQ$~4;~2J8yWhLB;mBjLpnW~F6D8T%{P6R%o5jf` zx2~{qznqWFMrF;1pCG1w;K9BrnOns3P2BZLjlMo0NZ&`jsR(=tV%RP-m9XuLon&GA zxrRs=EKjhmmw^8H01p<0p7YZKpx(pO)-3aDA9&dhubfBgXAxAC!%6`Sj1nwz5}pS! zATUM_E%zmBfR zoRlhxzE9^^wyv=D(=8uPBh=i73v^A0KCm&rsf(Kz#FyPtZ(#{?E9NalA2tCea3VPG z2bkNkCJbp%Nfze<3T;$fQd~i7nub&@)jYX6n3~RSai>WMaBBf};)h0b8F=EiDkdLt z++4+w&6<3p=KzmD3<3&GQ`)-?`m4P)^ubey>)aL_+Seuc1|$cKmVIB?Pn`EWY4}LJ z!*Zu8MlvEeDrMBCT~Rhz zo0>>nDioW(;Vh7jtsF02v)v}uRV4~br@Ybyk1A>!d_g7RXqq$kkRteMrMsm8z%##a z8Pz(XBhZE6DQCD|g8)PlDNMb{WUlJ6>n%;C!F&6VMwL%iz+8a#e; z4{`l=Js}SlC>m0arHaKyHp2Sc*SP7)4aUe~6>9=MF9C~rCE>})`dUm}hDb`+4hX7f z-sf$LfIOY)8oEZ;nViXdJ@e{)%432^`xizbn;t-S=WE}R^aEVyvC!2Ea$e3TVSJ@D z#qyhO29?M?>u5@3dE*czy_E`u_k%T{g0uCmnb0-CvG0ZtR|>BD2v{U}fT0>|TGIYn zgB`l0z&Oob5p0!~e%sz>&IGe*d7NIW{>OJOk%cg(0}1rcx2G&1IiCsO5caSWDymi4 z$@|a~s!MUb^bY#gDA8|GsjLeaIs?+s*I3YbtiK5@GOa!Flr~_)c1WFUOu4KZ&pE)EmWbG@s)FK@u86#ym z{o#H@o03i@2Th9WSh9}YpWI6^h=w-IRavG{f{f(O$wtQ zN^V$940CiIsp{X`E$10N+JjnG^?kpXo%*8(N|iqxxs{$_Ih2KCJWU;VC1UroAy#u6 zpL^isdzZ?J8E}~D4+xH&+#CLEr1nS=YSW{=b#d#v#p;JT*w`@++l!a8uR{qgWMS8~ z*tsC7Lo2#R_?tbUsf+jf_zKQ*OBL1fm=r_)t~0^h9PHQHi;F-r&Ym=HA}~bbtj$TL z&=iYK3e{w*W!NLZExhkO($IV*Nm{WCi$`4TrH(-L%2>h&yGM`YY7Ibz>Op-1M3)fIfPAJ1Nkn2~(4X=l;y2 zAEyBnChppdqkwhWoqaN7-x+YJ<6vTqP5EcdvEvqz$vE?Qfo|S~V$*QmbA=l8^H z2AYvAQ&mpe@~5xpQIo4IG16QNwp_In{YsSEouyW>M6j3JW;T$L|)k^FF@?hf@~av3X|r0 ze%G!05^K>MQ;o5m(>!uy>94G=iRbG&;Pxq8ZM8`AxZY5clXIzDB&~|UyXNGb0rDx8 zUKu0jc;AEFEr@J;nf}M|jRNs*hM+*JBvb1ntUkW6*JA~Ew}m%jmF5puaC;woZrvwA z)vWf$QMq^tE+<%oTM;fKbFfOAHhNswe0Xp^aP?Acf=}N>*r62?WnFqR$a|Nn=@G+{ zbQTxs7~jT=$I$Yyrq8IWcJM`Pp2sK;rrN2pt2?|m{!rqo#@em2ivvC!8z)yb{GV?e zzkT;{Im`E0PsVx=Q0*u@!v7Gef-hAC8u>6ivoF4{I_fE}5<2{GYekz&iyn#|+hORK zrfxN5^Nr*NeXrl`Ew;6h1-?81GIsx@>--i9D0R%=IuAC+62g0cz4zw8!AMpv~W>`3XvB?J41i_LCt({6a)ZVJS4*qXbE z*QXL0LPst(%ynVOZ zJ;ZGWkTtxnd;FA$o+;8Iw~NLlyjrPR|Is*hFNGwvX)~V#*Xx<22lT2%U{Uy=Fg4}P zRj_m;R+>&J7gr8Fstb=~e#-ryfb%u?7+FOm zzU$W@+bb0IVvSlHXmg2y7>#iYoW?|%1zj3> z8M3%qz5vhhL7zz^JK7%EDw++kep>mw_cef)Dq_}jb_qUj+HLCfMB@z&|FM*j&N#nN zi&9J3heIg0L|-mH!vz5hLY{IW=oICe}7XD%VD#?=BrpXp`M6c9app zJvj5O6=Mj@k&oV3Pqmy{m=9v&8I^bqojP4HVr;H%opf;blL(Nq=hDq^i#i?LPWb40 zTM*-J)t4mL!%4wKnTn+b>H=|x|n61E!c z=GMOQu+pmjC}^kbYABK|eS+ME+QALvv8p8G`)V9B^3gJ62p=! z(Q{I)eKJCm%+%;D?HUQnBXB^;rKlf^h9k**aqmMRunUOL6}u@fdtyYx-NB$t)4+IE;8qZ1nDP{tjYt6t?bl zU*%{_Tm2-;8TGdyAP*3!C`qMBnFSlPOU2Ddfo)P16RK1y_rN9$lC+q$a=UZqi%J-` z&@dI;6{-*UzD!YQ)V8o~Aj7v2*keLtL!F0n^7^w*F?t~qM9~s%0GE<(w2H|1;)jm0 zwQx|2a_U%}Uba1tWujS&W-wMlW;09{J*C<>U2Q~_p$~u$uzcNDBl_zhnGK>9dZu}G zlZOfxbYnq2dQuFRBX*O40eZIE0ikpMnu_YtYX57&=3bEhg)e1S7!;9>MWb>+C;GnW+ne5S#99GpT0cQ~WC zzq0p-urY|9h|ax*tLrF;Z<7H{w!j7ZChsP1|6?0ucK&8&cnOXFDzL^Dt6TO8K6>q`tR;?VcYMQKoEu6asW)PONoc9}VP7NWTe zz>yO$t2<_|#B@$-PmHy}BY)gC??A~GOQ9Qd52)P7yrP|91yy`f&R1{E z9M4NHCX3}2)ueU(&fTq(zB3UswsLQMV|=_Fg53j1u6Za8JB8eho0^BWsdsQ|5n4}- z7&s$~BL})aPgu}9>?eEIv+G}h&f@Kf4P1+MqCSP7YwjShs9oT|6mNOs7R)+!dt~c^ z5zZW0u)at+#rax`X=EC5j0f}X=S7EKn?ThvO)vW8RJXbp)8m=Bap_NlEYEH89qxH-C%Gg&|7FR~G+tE@y$iZRCFFi_ zhDTXsIZi6p>6&pBT@T28Wz-YBcJoKA*uR%OWkeFvTveIAMR-nw&v86(@DNN+0x6ObWs)K&f&JH<`enD^>NJi-)q51?U{upZk#a_wF5v4Jj_`~_-4ejm8GORr(#=)-zfc~e*^o25w6<^6>wTvH>e}<)) z2JpI~5m}(7o;DUCb8-|y)9iyaOVPb^ZirtG8+mY}-igp?>d6N@WzEktl}2QIBl_Em zTpz`wddh*=eXqWvhvbmWV4W?)`T69I5VMV+TlX^!#tNb!$;MFL=VDirszR&Nj@l#a zUCY^O3n^s4;44mr5>wAX2+|0rC!X2HH9OT-b9G|zHkU6dABK;!fBq@tTXhGG2kt&s zMqmV++R^LKU-1TP!T0V`2{g_Kul}xqJR`La&MuC6h+oL)UOu^}Yg4E?j5)orM`9h} zhY*^7bqu&_6o3jOR-i>=!~-Cx1&hA@tpQ9Z~)3#h!%w)x6I8CrXT6|*Kos;Kk8R0 zhRr~w8A`Qv89X{&wxzDd=MG!PRh=Ls-g*M50B$Scq3?i)!9P4Gu4rt?aX-hOk-5nZ zyy)?3n1uN5=G})3{Ogp*ST>m5inKk|@7Q~(77jD&zO<0Jzn94oJOV_+odBQHZQMF( zWsObTy#=x%+J4eoH=pxFnO?5xa87ZSm{(;%j*fzwxeMdxahb{(tqfzY_0 zyq1#fq(9OJs%m@18^>@OP&|Nta90g6^s``wU(cnKQ*(dts>UbpT%R_+_&Fk8knjue zW~F?spTl-vTHFsFz+Ljm!vgt@l#whHcWn zNU>B?%~f43NYk%^2j79#VLE<Ko6 zyZ5K%z6LFak@w%O5zJBV2v=rTOK?wl-%jKnp|7+r47GnkBw3#6IUD{}(s~SiPj~P8 zlg0}J%@`8rcX<(?#o_(Ov?Yu8O?15P%hZnP3Wp=Bd_(SE10EtSlb@}vyI(N@k^d1I zoYKANNs=HydPqEcbE;yPh)xtHN7Uo0-}~FoVP-{BR;j?_z~pqtxXuzV_hF1S7=?^q zC)x3QHQ~{;vey(znRXmwh0l|YkQtb!0W2ic?z;AkSg>ei%1%GH4ZiUPD45uhA39GI zn+3Q(TKi5|C@icl%i#13>jkMe{VT=pW&{$w@0zRT81siXFTo&#bj?e+QOQNQocCq7 zLWHR?XVvjB;*9(*gngrIz~7o@4C9+Dw@E3!O0Y*SQNjRPU&oEKOAofMZ5(Z&Zg>nL z_9?b8=|)wxhHU_iqwutI?01I1%9^!~1cI*CG8&hYuw;!_r9|+jmBMo~>)%^5QKNHK zI0NU+zW7pfInLfHBYeEmoSWdUe;&?eh)*9sHy6~8TV)G2^;7&JUuaXmjz)xZt>9G0 z45XJ*7YrFMaMiu?=E4A3TCw}f3WCP&c@OE|D6UJ_(3uFO?q87ft~>xpm2bfbMVRiy zMh!U-@B$&)ph;7m+Pnxax}jUq25yn2MseYrntt=DBu|7BeCvR&$)bSItomH0HwDw)6xE948D zQGn=UV8eM?6Jxb@%M=zR8{~rV>~#udtx-R1Ul~yE-gqu(E1LLwUmRq*VmnpY&~Rrh zAdEYS%w~5th>iTZVaQ8t9xeBhg$eZOuLJTVbdl@h-@fcGgUZQhAI;G z{^R;{T$45$r^rYekwMOt1A#IIQ4t?_0S$CInnpEfS)^vxe4Fs{vw$x=Z1mpii6JLP zZTn#mzKdA+-BTw;1)sxM1)}6I9rF^Dr(s(!O#GHj;av-tByvV%Qy|$RGhXqOxjy{W zr{QDPT1T;csJKLjdO9!GNvrze_E1lY6K(U8_RxE(UpB*to|oHzyy%hfoj%Q;^~oZ~ zpLvvf^Fwha~Mbu-XKtlW*@_b4jIWj=**-8zufpfmU*LRLAZKit@dt0mvM4 z)`8B)yKlA2#vPG;D!^0bZ}|%Lef6v81~j9qX>isRQEDJI(UCa~zj-7LfIrN*DB_c>fs>zM9#k>moKCl~zB5h@P&!1oYrzCOG-{gY}xuI0_;6 z*x-;zw|~~ye`XvNf*UvrYcGO&^C$CE0ZS6> z-cQePl(z<6-(Vn|0owv3+Bbi}o&Z1u?~u%uy?}!1Td`;_60b+mu!C-mUE42!^sEE4 z1Hm>SQEh;j@GsRh0ARvRi?Bl|?M4Ly{dp1jU0^sD2e&Z4J)gs){S&IVz<@qh$ic_i z*_y^UcC~|s3p4SG_$+=DC=keFj|Q?p^Zy`GS|KG@W^Bi~7ac^A2j#>7#bpy>R8sG}TZyJ_3{uOb73KLQ5i;j*C6ex{74 zb~j)P5SN1qSG+S9#uNoNO%}AeXPATsF7id%Z_Ls61=}c}i2urd9xZ5lCvHd1n>e3r>o_au@ zJ@{5f_@gLm)^#))C4e_FN!s7T61wNn8zY5II#;IK!1n$b-B;g}Z8kVZDOf_Sw2ZBq z9b?k``PS;>ZgRgij$INd-6X(e<2hFK7*6;d#Cc14Q&gX?R)Tu;PcY#G!xf*e&LW@# zt**xNWV<3003M*%j7F=~=@VI{$<@eDn6hssV81r@rh;Mxz&_grfQ2Qoc7NgsZvYX* z5`;wqE!DWKH>&0W@L;?h9C`X?6Fl-`#VU(J;3il~XiOnU{W?*04DY89OmNV<`{%8} z-v!@6(=UJo)+w5S05WHaZFR02?Vr6zfk6}JO05(!jcOF&Jn zu>Q)1UowU9fO>Y$oG8$c#zh8CU3K63=jE7V+8{t|8(Xvn07K_og{9b(Yc-`-fD#r| z+6nTg{1^I-n~HX>6)>-s)KWqP$y}|HEH^zNQix^s3pnwdDcV49j)l|M6veLqE=GzD z!no6=i{Art_;LXbYVLG6Zjzv|AGBHZ=dum_ed45-E5CypS{F_u+3Fflb$j}h=|(5` zItOs=xDJL|7k?wQ6(Ef6d1>AY&|StccWU?Vc=6iO?dkq$;q--pjrt8lK{ykS&~@s# zS8)GTexN7gdD!pO0zk-&Ajv{@tNSPRme3!Vq8NK-1VCGr4FiavfWOnP@sWb@!ocpB z0KBzp<<@;m$#db=6M+`Msw!MvME<3f0AGFu5Y8bP^FNsZ6cpz`V6$ru-=EquH~%-H zJ&*9e8Oz!sPocLYv7mciS9RdY1>x3>6A&!5tcd)4kbiFa7(!sng>FKHBnEVU4rFCh zO}S>F{){s#*uS9dR*I3-9i$MJfa2=@#rB}NuONfgxi;_`WKU-HcKAo+bevNqyj=5A zc=gnS-{Vr3x#9S#!)Fscl=mYu8Dch}5EX}1909(k|(!BxGVnb-? z^m%@$l7+1A5*qBlnaeM>TNX4ww$q?H#}=r*4w(VaOae@YjX!1POiTcZP0?s1^J>Lb zWqOX(D!Hx?;1+|pnY@~rx5#q!>xPxh=9_|nZ0T2lTZZ>Y+ByAmCOyrqItHPG_~jLx zI7#XAD|`DnKmVwU2@-*nYOx;1D`EEBZ6Hga*Jp_%~ z5r+@`V`{v=I5vjfqsCdf=>Mto5P;M=K)}IDE9x8rcr=z5=Kl%TVl<%zqnRBM55cQ` z|Lgya!M=^wZ7FCDR~s@~{!;J&=gRfUt+k*{Fy=O9=j15ycK{2e8w9oU2~Z_B1Nzcc z&i4!OpebEq0dRH?kIDO%XD$jxObNKlDW(mYyE5h{IG-q7`KT0OHP|pP`rv13%U0FSQa!F(inL~%Qj;| z4+;D&0QQayhgw+vTp6JCh$(rEgCZ{o_ojd#(|0e+f@yEe;8A?x`|sLB;lcN}uKoB{ z@(sX}Jjcqc#>l^c%#jMA$P&VGAVdD;E#z9Ke<{qpGR6e6b8r+2a|;PH4NpNe^!2ru z04>Gb7WHfd!sLSy2TS-`FR9hf49D;FDxx$8h(zOnvpBAE1D>jpr@n`b5GB@oL&13; zaf#e`5NO#_D(rKKgVT-=h;%eu7S77&P{iT`&PlNXLtc43PypXN?gDUJGGX(*M?X$x zIq8Vqeu9nTBfwyGx=4Bm{JtIZ0b~+E(dCu+_v1x}li>cZ02ib1gj#T6R6hQl2wr~j@x2*HA(>tF-COwJfT~JEvhJAaUT2Mw3c)c2`r4ag%11L^c2r`n1yCzd zKqV6F5O>y1~%)_IpEwOwd1^&95@LPx8cCS3Xa(kURa+_o_+^ z)emNz7EKU(3=&zYx+VR zqvmt8q!bw3jj@z6&cF#cGyp00E?QAo+ZO;%>w<8HC`C}qz|x!PvRXD00cgy1K&;$t zNZ2qNr)#<9wlL(|kqq ztu{-EdC#D+8z{vn@@4;=dw#I-uXN`c%AZ{Wke4gvED8`Zre6=%9mql~fD=%kTf^G+ zHu5SXZQ&5u)MWsrKrU9db9p{!1bI1{M6F;2b_DH$6Ho_(odmWqj?UXb4Hw{#x5_bD z!?ZySJDr@uhiU3{2EqU+QW}Z?G|?LfvRR#7ybDr*IK2dznka^_<`Pn~Y!0;RjU+Zx zglZtWX|T3ogeV8lD?LQ^cp1n5E42x>`dLHOtQu{ez;#eyU~U()=%wK@6t*0c1FjqE zTb&BRQ?!^nDO^@MpYL70X`D3)uMOoLr$_f^M>zVPw?<2&XD1l^@>$Hnz3FUJcxxhB=1aiEczP{~HJ8BY+}7>{CV z+6R1|)pMK9G${h~o*jHZMd%B_CoTllg5xHED*wU|nM{loVgPE}-kyfr)ku7vQiH;Y z-2_E*;H%}n+(FLas=5GEijH`UC%y4Rct+u#bpynA7uc zBNHWEj0=(wd^_S_%F%U(u~$Ws0{FS4um$$FS7|Z4+~9>ng&ot&xX7Rp8D|`tzphRVsx2o;=L&% z0?JE-7dge_^!!JW3`mNw4Q@8EXbB>sxz|ymx4bk_Y31#)>*Z0dSkcBo8dm{Y)oiW* zmFviXx6mYf9CeP!aU2BZ^8}X0pPGPjHv=L&>1P9Tl@#b4<9-ngLt@ai?uPQj?$q8a z$u^&{4qSHrzWdH*Rk-VM){$uU$#<139$F&H$Adkd{POzUDQ0dn0MW+l(VX?byxzsO zy3Tqt%+5De zLvouj!ULXOsY!0xu2kl!JUx0A3A|4nXSllJ?6R_K%_oGFSfcI3Y8#FRNi)BcH-7)= zr>kh;FwC$n+?N@5BzT3%dgsOSva7FmzBeb#vpj(8eWFAnpsJ9>mLS`x8x3OuRV{Q)eN)nF&D(vn1uZtpyyS+hT z;Z*>~#uek1FsTB4@wPgE#t&F5t>fRbj^zHd3;WP;7b#uP;`EmHTS&l%r#I+w(+P+N z(bC@TFxZR}1+|0kcUe=XT<;;txGZyF+~~5gm>NH&0^Usi<1Mh6KLwsBmYmB{Xb#+g z6eEQ3Jg{W0g9b~4z0t9P-}6jBh%nIA@m+polo63qehBm>x=N(d6xDkxG69IgTzGx?l?y^dMisT2-A6ma+$cSY@)sLI_1pYU z%@ZGcltg+$LMg>J(DWaN&4+UjBBeP!cN?*xw1%7J@)Td%jV@zbcXVg|P{t_EAbA~e z=dQLAkrpX2)`AWU={*ox@J4vzst%FsKD}x;BVGks^bS4u?egE2^OZck!zl0-f4ZDl zY)tUm5s=UG{Ys?Q^b|WS`HH>teKZ=To!Qp@s5wR})b4z9SzFFc6u|yM92A~XLSd(K z54NC~4`jxeRLryPHwB{Ds+&j!ljd*R2_A7Q?}b+k;veVha6XjxmX!k?N-wP{}#&p1dJEAB0Mp8{eD_r3?AmU z_jy$+B{YQ?n6loy!jFy@Y*x0}=@3l`J)&O859l_huEEp(!I43JRW#PI%^Y@ScRF6} z66#_>l5j?Wme57Vye^|J&oWKwYIsVNGF-;b@f7ARmF>q_NbXyNdgWMSGDJy(j-XZT zp)4hrZynrv)s^{<#uKMfYNID598vQVbb38{#Bl8CS`R1M64Jh{R)G2A+GdPIe`j3- z@Oj(v5=16C7e?vRikYUu86L2B%n@51MP-rMI}i*+sjnTz3^v-de? z|6;QcUd%+{|SCHh>g7s<;CI{pb!0J=cD+C?MMYeTg}*v;t1Gm3a#)%8;|7( z#pONJ#t=u07zm}b5?Nj9GY9Szo-J;yE2TVlag;$;^U5lI-(OIC1(veND3jOvD*`J~ zV0mI9i1d#s{Tg2U!A&3`EFj9neC%qq{L2vU7hxa)I&5oI_T|?4Yp9B@uX@4CCC*Tm zj~8#28E!4r8)`YGb&Q@xQIyt+y*sBEN|C+h`!`#Y<>`vObuXq994!Oic=ufb$sGAm z*3p!37uQu8FZwhuR%{g9y&Fh7Km^7UJ_hJ$->Wu#BMeFA#Nsmno_X+h{52Kt%vd~F zYoXl(EtsgSKqZ}M2005733NiGRPb;l2Y?pW`40~wx|q*VnHHb&UwhmF-tXjSmAJxc z_9&II#y(4rS!9tn*pyv5uQ@tw;fpGnQJdRP^#xy#zB9J^?r3lKh%M-4Zw%}sRg7!O zl0MHnmLovVqV12Ap3++zw378c4&%>~EV4l)WQWnPKYhbHLjE`pf7vuyM-aI`8o$8w z_)ICK$Gp%~Y(jj7phv)Y^mnmvtmQe*wfERJOuT*-@3H>x&ZJ3U9_K^9hg52kN522I z?60>y`=0vRGD?I~)9O&;FvwV6*Qg4d;FIji{}!s<(U_XA!D`3~*oqu&gG%1o z(8cW_55GCw@V6a2g>6XPKByyBIG~`zmw@WE%=nD!RUM1gsuZ}-05!r2*D%oWu^9?7 z97rQg4fQ>nXOQrc*CKL{ad%D=G*$7?l+lRa?kzx<1X}q!U_Y=zz#pIS$N?c@ocRH= zZ{Spy9{?0*DjjgaK=+B1U)(T>HPDbQs6z6>n#n0RkLX|q?v3Nqiv@}jtKjX35 z{6^uz`^rMS`qXM5?Xg$Lx%(`tL!TU(A{~sz<1N`3ao<^sgUfvGrd>AQ18%7GH24Vt zctjf!;hY*edi41bvYN;4xBG)(+oXX8=P2HrXOg3g`d6cI3ty#w^2{)%hpl;z?XFoI z>pCCNrNMU8Kvm_b>(&@Q9m-q72|@>TR$LtCG6DEmw)d#7#l7Z|SA z)Nu8kjStvQzRwR&B<_4^@{CSR4~=S&>W=eQKTzH>4}a`DWK>dr4lug2$Fj03pD?Dj zx?cetw1&Fj9iTZPa2s0%XDdZ+qBy0`$u6jNFDeTba{8pwU^8Y}e{6MU?>}gGhmieY z(eCJUYfK)fVA~Q^3s^Zw4w!J#%y3c>!(^oe-%0FD0|*BjF2#ta_~&)=gh8HXQm9lX z?;LTzIJUb^p8>j)&DM7O$R4L>Kvg4>FkUg~V%Thkf5w4;_<^CdzO-dGv)CGPD*PeS zY8{Z|s*=mKvCe3U*gx2T1s^;)^W&ievXm?+gKWIj32DlPHiCYK$4B;hPlZIVj|hC& zng?9sn`l!09Fz3u<2*+Q1@?dyLciBaTPi=0DJ#{|&~p98hjvO5fpD3?OjD&@T1_aZ zCF+K0p&#f89g|K3br{Xdn6@;f2!lPV+L-e+^;PyO(e$VE`K4@S*<u3 z4Rt^CDo;?CxKZCt9rEnD9kv3Ui|4dwjk4{JA-IqC1%SpvQ`VIhK8H&~L||gz(qXVa z_+>``<&&FZGS~~9E@y`aC61dS+G$@blp{nfS{#!!bZ;Gv?-7R`S|D0!5?1?hqrFei z3{d8uTdlS1xPOdOk2yYrCArkKBO@6x1F&nGdx1I)J7hTWv|vYY-4t4A&!7i0#reI_ zzMOV(U+;)g=?f@_x+Sb!V89W|^5)_&*uy)A74XrVfw91rX$eGHc7tbrAKPz}dXfd7 z*V#uTi};;vwg?CJgxGiR`)pDG(o!!C2MSY3HRIW#Z{$+Me|ol2X(E4ThS8kiH6cH2 zBkz!Bo$Ony4FRel6Ar=?$XZUfBTz~hrGJEg6Xdi0zS}|I^Vtkbu$O+q=156n((K!@LAhnAq=?K^E$a)x3P1eC5| z6ki|Ow{p2YoNfqwqNc69U5DtDPQF#K^&^c9hFqR8BS$(OduK`77rN_R<$P;yfG3Lt;u?J%;RvBIrK`KUJh^hs(Eusvv#Mi> z(zVWnWmVEde&&_^Ov6Qmy1KSr4FauvbeVy%5V$aD?#C%JE5Xsz3ipB%%zb_)_=|;v zkRy0Elx50j*St|1XLX{_Z=v(@)axgKlC)pq@>R);Hu-YniX^Uy<9yGSJ7wRUiQeVF z;$l@$>c7QyJ;q1bH_Y1F&WL;=*pC6vrnkju39RN(6i>)bfAAW+!kp`#^j7vV4`>>C z&AK@Rd5KE(!q<`#DxO3ouFV+#@?&wX*7a4L!*?>!5U`%H3ws9rSWZw(q^oKQ?txbB znpq|~lqQuXW)!Zm2>e{Y5cv7qcV}&=ZfEuU<<44i^JMSq=8DhzL0}dZo%}sMoE$VG z{|CAx5r!d0O_{(Top1)H>AQz^(K&TgwP%2S&?Ar?;_{f($7~v93Hu#QgYrkor|wut ze03(~%}2c~4D~JT7Z0ilsy%xLOCoDE=Z>uR zxR#h%#RsIJRA@^s0PA)v_cuWP5oa@-%4)MBJ1HU+@+g8!PZFIdzu^IYGYNhWd-}hX znl)(fsid+9-T~WopY+oy0X$zsI++!V`FO$eTB{FBDG$2ZMv_S+)MYM_mv~6TYctEd zC^4^A8=h(#$Uk0Ek8%ZlsH;?eSULNap*+FFJ}IsCglz{vtFW9ZlQ^Ms5y^%y^72YoF7m|A7HePDhZo5&@C-Zn4`KhbWc|k_M?K@;72L- zN3x>AWn>*q(a>ly8a_6cv3G39zwj%)q#c^*u(tHSB$~N`Dk_+F1ay+Iww-TLluNhP zPVHc(Nb6kMJNzX9exgSpHa!qGp>p*7q;sf?;iv9?doDfn9zVTu&_4T)=NoODOdTQF zxg~_{J8BAQ5%}zhMV}R!xBusu-LX`8qtD_a<6!GEb7*(M1NZ$UqEDUT=@9WGZKC7c z?kKoREb6_WCWv0DIQ#o@V6v(TDKb-pkDst;^--zkMRllJxJc)WNmPo1m;RyKp&$LD zq%zG`k{>PkvXYr$o(voXoxagy{jg8A+I-qG#2Z{=Sd&L#pT5TpeNE@BE*o`mz-9KF zI}JFjv^m7@CJ<&%EVO8Z`1g3_AJebV_2u$*-DgLpe}`XW zTvM`GVQw#?HW?+18&P3=_f1YLy;@)F&hF#Qc_E*x?<}&1yC@)st3KF{mfc*hh_H+R zI?UnV%$)*~Uozq=m@9p=9*9TkbL%w4pmFEKGK~oNp)kG`E)BH@P0|Yy4lcKb-AEEBxMb`8 z3f{b0T$nYwgxk+gq6S#o*a3XM^6XD>8Sr?Ch5*7Qt5@&rLG<6xW^SD*X&94_RzbR}o;wGf*O;&*i4*ofXfv*;!YF zmSJbeujm|H2KyJk>Ti9g$+u{gcw zp>T3~_FZ%zGS;UrsL3H)!X_c1$@2=gYMr_*@oS{0@ptmcsjYlV3EisR7kKl^`VRFU zJKq_yHon^O(tb6$CDb{pbhLAo*3hYWdMRhwiDA@}or;q_eL2?i*+qDQUaF#|fDBLI zHJh^xX+e>34he*D*4~yb&KOy!gaUN2GgbQS=v7GKMT}?a(ZQADM1s5Sxw^JCt zP``61Z>Q3T+(zDqeft$LRbsApzS2eA$@5eMr5%$Z>*0oJfR)p?w>Mj5J(M7#QS-|T z12B6GTm?#o!)tbt0&JVpb6Y$;__^Qg0kp>Tb3-5Q1>Ve*lW^0Af5hI%$P*8uQ$RNQlB?2mgrBM* z%dPiC6V0+nRNwTZy5O{c*)VCluP57QN95~wiywCIkUmKYdYVIFr>)$6lMiMU`VyVr zO~ZXt>J2mWZzyBTo`M}|GJtwaM3e*}J@eC27WG7%L>_?R92(p@Q(eR^e^Zt9e6}ja zVIHbQ@gr)H75ym#0vo?{!neC0o5?26eCI@(erO#bL)%{{RQ^6%Rno7B^Xq_W?wszH z=~8K7D7cKOg{80eZ4@LrBp*S4sVF`xpg^yw?tPsy#LhDpXkw7!@1T=fDMY-n8uSnV zRKeruH;3m!zBdzVM+eb;7Ol$%u}qs@m^nPenBdfZQ5j!p;Uq*}k%olt^>H+p z5id1H^G6;(9?~~ISpE$-J)A{!ez9C{uhfgRWzeIcL+)@Qkz#4_Cv5X&4#OoPVa1RV z3RN=~MCqWGEz{4I=RBfDIiOEAq(Re01nTAq4`d&|`E!maXp9@R>N_40dpuQ|_%GR! z{AMv&r+_6CC34PV4Ze=YC{=(p%um2v2al{4dnWw7TIGyM$+fB2!>B1FN_5G9ZUk2_ z%7<{7O-!0;wrZS37wiiDk5c;wO*g{Vt#`_s<*_XOkX5VuUhRkyu?Zmnnp@S59cp1uBSEtW7tLp;{=)%Ptz6hzP~^k!9u!K z&Jg5c$7UTwLV51hD53e{ISpq+y#MKT;;S?6jbIbB*N&mmoXgZANUaq*t(X;~d^{WI_O4I-cY9mTOdho8c zwQHCN5IG^R6{Pd3KEQQUuMnRB{zrtv)*-RZOdI42Ej3a99`YVA9JLo{H%A*nbyl-9 zzPIg(^DDvOw6J7QDE-dIFc+lFX#+jXLq#ryl*92*Dx`g>h~5Vxx>O~f0p{XbWN;mB z6qTO5jK{g$CNf$}#BHjBTA>5EH1l=vw+}v+~+4=e4zKTp|%U0<^$;+?Za zA-#pvtlPZn;ZilHgeDZ|8Ji(V(|QVk;$Ii_(LQpa z#^7bucM-8g-IJJ3y8gLS0E2d^O64{}$mL-uytOz=spC!lFZ~s`n<;$xPCybMB|YU_ zBjg(H3)C8a0u;=Z(L7}mPLsDH=dn9@R$CN5cmL?Q{&sfb72`f=sa;n zGeLSU%XKTIRE4(aQu!VAZSVlHVp7$h5Q~Q>C{BU_4IpbVsQm_5A4IzA{}tA25h2D$ zLD&<592PoMmj8bDBlM@N!DClXJIRIRO70(U?a{upw}Pp-h5(K}H%x=OatZ*07~mm7 zy0{mJnZn-KPctK9v4hE7{!5Exj*zg({R3Zlqa|l)u&{qb|IhOM=c!hNOgdS?-2GgQ zWAA{#_iLA>Y8)qk(!Mt%yqT*hGNMN9%=}**4T3Ld6AN?I1`MUnmq$lcBm^9NYTrDdC+TPV z3&!kC7t6z^hLVV>48wWyIK=>Cz(L@6V<;KmNp<~87D{~o@V@|DE=|3Z1Hpgh{68~K zWsdB(rzQ+>K+7)$Rm3R0=34?nc=X*)nO>EYz)Jw=S_9@8=g&Z4Ap&?}+yiCNi_fw* zp@p0HzsrXkH$vIt7P!kGisE3dAi-%{W7L}74x4sU40S!Arc}A z?bRLejY^yN*9`I@M9mNloQODJgIS`&Y$DkKN0CH^Ts}G}_G2^Cl6x|2vA@iTQooc+fG&|`1+r(8vFr2 zqJ!KMkSDk%I!u4_km;>W{FiI}C(|G@47+{y8S(6lB7A}83!L++!|j1Y1NfmO0<_mc zEef8$gIwk{)tzkx=Cy8}o8_zy*0y}^Mpt6r->D0Lzq}Du$hyQ?D)g!twoDrVj^}hc zpL!SIDc%P_pYco>@W0<^ks@(3X7tQ!6N5voLN$>)p+n_=)*V0!E7PxLUJ7mov|@^n z;lL0W1AZ-^zdeWi?Zp36Bv?TPZh;e#Dvr7L-_ZQei;mJICQ6oK*8#;r#se383E=@E zNvJgMX6SEE{5w{ssB!NslDqB%oMuzxoUHzb8-{@Ig8r0{uujqM5`elNywYas2kG{z z;YM!2w4}@a*@6EPDgYfIMS?EZ9$*hKUvSp?|1;PB$?ZR#5eL!*Vujy6#P}ezrQM7+&S`ejWeZSSR?^sFF+1? zuHa4#nA{Pw)sCXK(YOB}!xzXLx`^=+5WpL~>{s66{QvTLhR_|VCv+-leE<;gA%0`o zuC0KoA2qp`as)~<`Yw)ui+fvu;ooF2HCJSA2H2p%4@syE$Q89q^nU|L3@?S(_EUcf7p-Yx zy~kGWW4H?c6y5*-eRL6@xyyoUJpwx^o}K^Iq6-*uUm2>@MHq1W2FjWMI_&GARmS|+ zA%Ni_MJOdXvMT2SI)Q)o??1bEM2ZB!F#54L4S-D%N>;9aPhTok9M_2F)D!HliID_d z+<#9BGiopcmS*(H1k}-htrqY%ZE|}q82j#DcHWb>zSh~5T|7bTn{O{H%YI2AG z8&4Sq^c7;OUYrJ!`&Zx8zoUW58_h!516;*^DQ&T5V)DPCM&*y}l=V&bE~AQO2|Jz7 zzTh#Ot!9?{{~N2Hq7SD`xVCMYzkDC*-=PdNEh4D3TsEt_9id0JANi_Cz*g%87`pxc zFHrgO0+3@yx_rNWyhIGa#oCak+r}#W(NvASTSGw|s4@GEF=IGZ7A>POA1Et`rSe>D zZz78iC@KR3%!G{SE#4&KNuiLDl3!9(A_4ekDGC=x$O*o|EGxou8R!|sfd z*y^y*e;OZ8_LXJBU!|{4SVVjAJ&96i#<)+a!+`hMtUW%yYGlioF6{>wBx3#ERE%Cb z#;^3dVl4eSP%vS6&5r4*F*V2&8~!j2M~N*U@v``M9ClURTOc@XT0O(by%bVnS^l0! zw{(dv5pe0G&8Hdp!Ra;D;Ou){#%r83wnEHI7StR6n1Cx4Q{Mn5re`5ebAIvY9ISm) z#|hDd*PEpi!FG*@BfWp^n>6UG2B`sGrT{b89)Lh^7(cPv^!Fd5d00=?`U7D!+B-68 zvP9}O$OR2ehD01QUvT{AU>}br3rnA1N&fc13fD!nmO_6!@%!>sG4yx%!leHH=^uil z=)c+5=({Nl!C?3*WbiYcun;TXO3wIy*WOVKkOOwEP9<}Yo|fon%fjuFy(M&)q8Pt9 zSXM6D*=aO85%L(|j$w68Xp`?oY4?blJwbf+1od*4;}EfwrL8~>v+BQFy&U?iucGGV z;*!qM7R!dhP?qS*S+$jl>FGfvQp;akN)a704c;@6&|i-piM&yik<~3gy|ILCgi1+yHPx1N%2Qc5|T1M zX4vlh`nJsPRAXY|^oorN)0T4EyBQKaQ&3WF(p_F|h)$N+a&uc@Zy6R9d~;b-YB~7s z^t;U#O0i~X38`9Hnun9I^qq6E&}bG?l{SQ6C=Z~({xnA9Gf*nmKMZ;A^s5C2a_O(3 z=l*XVVWP*4+=SU_2lNlvO(dx4ugq@c5W^K=s1D-(2S|2TeGpR-TTQ}7rZF|L8qv{nYLuP{iY{LI}9({aR zKHC$mcN@U10)VfwE9@u%kZ(J=SG%nb>_Z2(OcY@o};l|Bl>GG&>QE;oa^An zDko{`( z{^U=_dobJ1a{CuXjxxILs#hjKO9O;%3&ScMcM_l75c+`>6Gw(zQpCYLA&{8Hw<1om zjSH?*SDO4QK`VTDj!dIo6_5qtvn$%um`G2^vzz^)hR&)BR3o0gSSmEiydfSOeVKe< z#RKJx+{o<#ZH~P!87R{u+b49u_;vYnv#Bt-7Ym0qsk*n1i&^?7LiWIgRrQA;FIxvf z5q8uMDI68E6NWD2qxb|Z)mKZP(zRxBNc_H5O8EYGi_`tJeP#>UOS^P2^F(agVD;gL zB7pC!M=|KHe9HBB0EX6^u)(o*v+BPy$SZUUT@Obs7~^2P#1rJjN*j!i=|s9BAgXua zIb3LwxIMoHW^fN*Z@i>_q0Da3!f$)Y8+cu%ZHo?ZOamo53d0i$uu8YRw2XaK+DnfP zLCJj;Rnels?P8`kFKbTB!%a`R7j_b?eq`07SveS@X^>eC_g~Z zy_!gWi8_NVS|egeC$o+c2!dz{MKt#8mTdyFa<-R1!-W3})Hp^UCx9Pp2fb^rf-Cja z4TT5*PizDqR;hl@$ujlgN4BHP*FWCOEppwLqsZiLrd^Z`fm{^=b*5Gj%p2Ok&|>=V zeNBL=T?`Ek{3_V!9FN)Hm8E=dk?2s2GJd32$j|rV8*K2faA_B70$eQOewZYz z&}o->dWU2z`wp#(e7@SBEmbi0nR!)ME5P2=vO?sG>J}X~rxNVe1Ke{N-$(yD6+qQN zf$DO`?}_}{u}3+WEYG|&`MPA={xbOGN$*MABRSQYa$BGW!gP1+b>YggXqYsm% zGh^w@S#~t`C1Ou^E5U42=GW8T-QoJ9P1;*S4htCTKMNNU_$TwuOF`e>*=xb|bKP4# z>EgzaOPq5{HweHxlkdm=)*LBNEubefVI{370P)?KTgv?72~2_hjUrn&q|TO}ma_ui z50)kv3qiFnDRj!*R?X;d0wJ9@+^^Z`H8_Mg6gQaNJHv%O?cJ#j<@dJZ&1ID4S8ULi zn#sY<6MW#qj`cG{J@qDe!neZ4Wl72NdO4yb?J|3e+TKU^AM9T+3G^pb%rbR@&uRkh zc1gE>6~JcwFBx)-dW%;@t6#^9d_k$<4&j<8Q&Axn8oq>gyxB^`v2Z!nyM`01$0SG$-oMK zARVjYR{dS0SY>{=%ecG-dHy=^z!KtEXU3RhK}G8aae*CY=w6G0sb}XZ|VisB6EAgYL^V<|}3 z_4>zg0e3c*T#>vUWTU8pFB#P=IOukTl-&B)$Uc5q=-w*$=B@o$PUbTiR4qvKSq=yh2JYMhb;D`j00qS@Wyjh8}N!<;ZOhiOH{+n5V3m_)29f=3z+Otw5o_UmG7WrUKQGILKdbRM?)NihRtemz3 zDZ;$DIDT%U2%&Vlh1VtmE?7*@%kdQ6IEF({gN}(|M=eB(EROcrpJ}p0n$w9KG-UnbZuQeDSBn)O=$Nm9JTH)P_m~l>JijI-^zts}OPM8JAPf&_srI-!@V%n2 zICCtxxb@MUW5L+&hUto!)P2_WYn#h2yhpP1@jy zc1%}gVJIZ0;dANsN?42krjaK>7?A`xNP9Wj(G>Buknir%WFk!jecmn2AQQ4hL^+1k zbk)=&nu&Mx_N%L<>pfq0(&!yotm*+zljRry_@U5(C$Q<7l7*T}2+N;ed$56KUZ9Y- z8^gk4aiRI!fws;ofJOvuO)gLr@LT4=oWC|pgk`fn?J!ccsEz%o*~WV3LT>CUM>}^+ zHNtN-N9vj0!smIU?e3|7@Odwtp(*_eYN9VmP&BDVc?VPtmpL7@aPYKqYb84p`~SMN zPz5{SN62^pwv0*Gh`2rM-+Mx@OC%5>h4$5-9;M;B-Cg(t+;oH7P>jtuG|_Li1Z(jn zFcXls2rB`38)dK3$}jFB_5=p^e7s|;X#Wn|M1!C_3!CZl`7uUAF_0At0%?QkCid>;(F<*I%)+4Wy(};( zna-v~ymY28Hm^{AmqSO4iPJ6OCbgQ4V--#*0eS@z0tBXqA_vs+#Y> zpL#NQsy@!?DC-N2F+DSFRyQ6>g-s@v+AmLE6WQ)hdiYT9yE{XGi;(_{yg_>!)EE08 z_^i()3&=T+Wlx`kv6f%WRL;)Fh*4C6XLQcbDmM-noXzs8%lMv@@Eov?Af4 zW^7q1yY_?<1ROU1Tj~RT`7TR^FUy-#$kIG+yIx^?^7&%gh7WVu zFKi&aE`j9rz?6b+$Mo3*Vc`4r#n$wbqUgIMblNlk5y{Rmx|Q#bCbOjQ#mikxklxrjQMoxZxSI<3&8bxn|tXfZjON?hd%1HI=k`w1Olpry@G!ie0 zH^MGL&u7mS3mJ(|vZLOupT_ramr%ESSDN1CN(RoL+OHLA^LFm=7d{70qsDE>mnf(D znfDP>MMxs9OF2pXF0A`ZOGEq-ho)hY-2wc53YsseBoz=B_`OKyW3TtXud}X(+y+iA z=1-*?sWk_#LrA~%IG*EnY4FXy!DB}fp~9CeySj|6t~`$2yN3i8wW&N0nljw2e4_5L z`PRkR;rY(vR}!8(n@N$D5#53y+>g{^*qTzTZRAwrVfMgm&iOGKgoQUcuHk|6tU~={ zfhb+pmnM7XEh;8;iRl`Fcb21aY{zXdCoYy*&A>j6(c2)+mAwG#ZvN7=*lvGEt8YQhs|cWuz=xOs>#hUxaKc zdGn11vJhTZyuWJyqJevB67fE=_N%6xqa8oGGId0o`7nP(^1T==7;_UOggcQc&mU*b zere!uoz}p&%*Rp1%umkcr0y`EIptqC?RI>rq8TIVzn1b|$SUszsu9@RaF&}}d*^&4HA`L2Lce5^Hz@Kp}SmWFo7v25Bn@#{NfX5V`aqp7WFB0Jn4 zfnE@{de2feVbAhs#6*a@&nS8h1W_>HoFwt+cn1d^M)DLEhbo|*ZXdVWxxEx zG1_~HCBL78nnnei*iJJ#)Q2o{d!q-+qVm3RQ(3XNoX*Wf#W&_NiNO)iPo( z4SiG}76RT7`1%JZ`5vCFAFT(nt@U*q^u-RMarm1`91f zsAZ5FBJW?iKt(RQ+}K{hd^PmK7PIUshAn(QZn%8EH48nBR<&gs>w+f0_ib}TvdPdx z>5QKn{U+FGW2}W;2QyO1z4OL`x6#*JiyFS((WJ{3oz$lwomUPjx-c9N_Y35ilCJhN zl!sRTqPwd%Zu+5lEvS)Mv3<9U;w)cGM4M_7nkoM$JTan=1ACfDd%?;4SZEMIhFNb{ z=O8((H~fdwR>3%EwXeZm8}DlUWm5`8#I~s!faX!XDqmi0H4YIQeVJXW0{S)ndkxN@ zQo9-l#cZ=0)^Y-A7va&e@vjwbo?zss2oV-d2=R|lIi|07bwHiNr7v(91T(Dplk zDpNU%v_2_rA(xGI1^I1=hUAxOtoc%?qzk$C8pC&ZOts}PbRz$JDxad&G-N@9r<}J8 zq1P_o>`axSu2^i}Ybr(FCRAPvu#hpbqbzo>GVl&kKRlIP9L z50E$1u+?dlzSkD)-R0?L?cj;1OdV*5>0;4%jkcbiUh-_MWx@{Qxo8^5yh-4n$p}cU z8}EL1xn)E}+o$9C#-HsNx1NncWQR3LeUI`pb0J&$r*^H;Ggty~cD}&W1zIor#a!v{ zjPp17OWe~&B_#VDA>bV5Ae`r|u-^)wnWF7po3C9uhR05Qt+Y#SMsAN=6hNiqv7y7o z3Laeu%HGt!!XzJ<*M0IY77{1Xi3=Qgz(1Ha35f+0KDH7kw+OMvy_IbtZpSoZF)Dbi zHg|8pi}i$86iT$~>5;h2?d(k~Uo+Beq+ZFO)bI>X{Nyo<*b6V;q$pEThT6M}>uJGQ z5r^Z^`or8(1E+5Oi8a#L4SJ}6j>u1KwE(UiPRA@NiWig8f(js)_nfzhq%A-B(6G;y zTd61xk$h7ctjWQ2g6!D?#!vTpV%)8fK@Kz3nku5TiH9U%i=mtdPyvR}UwTQu5KQqX zKI2XYo}K(y3%>En63|Ls<{TlsjG3*;vOZF}D7NS)Y(XQPvsU@nr z>z80nvC$2k_aE18C{a+M55>{S!JRJgX;M$0>3nXq00pt4^!x3MX&lb}zHe9|>p+KC zWv1SJUGABNYQn!TWpV6|i-mctgE>?}nri1wMAM!R6^L|Hg!Q~Q=o(Lg?-ordXSx*Ti)x-Mb7cKRx6xXbI)Fik|yVOnCb(X=uJ-dwG9S}A_P4`j3&8Q1MBp{7J>a6@F zjq_Qrlg>eFp7vx?XjEm$Z6FINm(Hd%KI}VIs=HFh{Qx4vE}_sOlCp+(1XDv%c{_6r z`|Of9+X1$Pkuf|YLO!?CIz%IUnjh3EiYlzHb@O180Th#t{3-!MKX!sIJzM);agyXY zs*s`dh1A@Sl%OJgM9!rJS}S~W&p>#S^_8l02G*oI+VvjIdp&mZY(r)N5nM{+Uk=HyjDC+nzv+JV$~qd%Yn(c3?Vkwd z-xb~v7N#gYJ4=zTMg2hJY~W7sV}2ks10pp}8{~eI;bgm8epcAEk;FiPwYA!hPmtnDY6avScSt*aj1cpFB#4T~K}wuDi;Mz#xro1NBnP+p zRV5wPFs;_DLLa-%*Uxwt7t7NGxyu!Qd3Wr2X_woD40^R)BAPx~!i8zz0EfZW zY)Ks2^NUy*!iZ-Cdn(uP0Fk8(0 zoZPDlwlC3*sa~1ZA>gyYh8=|ni|EX?c21V;ax?|z#HibwNlIOK3_LGP&w6DPkqH$H z<@ORh!>K9a=Exgp3;;#f?zWOlb?Qhw1 zR|L$Cep=amYx>g7e1>zaJXeL6?t8_UPiYYs2x2E$H<3WGlE8LNh)|q- zgzn}ei$6=>b=%?u5Y7c!`63zA&G=vGEXSw#EV41Tk|0u0vq#$~PnH$gh*99?Marlh zJUWz!M|g&^Zn4l@Qd$oYv)T+R-ZKvDDDeYLTzj3j++0uiKk?1M3HEouOXwN+Xd87+J*X>>B9(ewJU;^5JcUfd5Kz00?5I5C(+ zQGS?V=H$>0tM_HFC3sfJ*q(24fRZXG;M9p_xQOtc#SYrmPIFKkq05Nu z7pwG(N`kWS?|s=eNflRDmo4T4Zh2gzjuGe)3np^wVQjGc*k|?!ry0*C+@ha#!Y;gp zR~+p*6^W<)-i+X~_z(~oionM)X1HJM7%PgEWd9X$PoGVkX0 z0x!gGO5i#?Chj!WVSfu8f9utZt;4AUCMCD+2J{#);>QYAf$fJ?(-RT2N6Pa$iZRBpT&x)TBxsaR7 zeVMnD>x2npzLKEK$DV0JgtQOj8Y*pIe@Con^e5~&A!wA8!9uK;FJf!RK+Vf}&)4xackD7prh)b2Cv%oJ-FGc;xGLm3V)LF`mGhL< zOBj;%UciQKW;SoDDwdFuP=)$QwF^`lNA&~i*rT(x1x&G5`w5NDv`r_tseZ^7k1(M6o_?=c?}Qf^Z%;-;BIK%up%R77RQ?<=!~! z8a_`*8jG)(uF+CfRHNnEKq<+Ii^9Xv zxnCoiS5pBIRy!CdPRV3_v)TStlCwD+A|!79Dj)sBX${54vyJF$kZ@{5GWT#Z>}|$coO=P(y5A4>HhRJO0R8l z098fmh=noNar)JUgXdm%?**%6z3wA{vG zk95qny~&4V7WC8}v~Ns#?2YQq5`bY@+?l(;*K>PcujJvRORg@`f=qhq>OziH63e|~ zf8-6fG21Yj=cSd3<)n@DR4RTtV`JrlH0 zNafT2VQN!+s*lYpca^q%f>*sRx=3l8TE>=1)ZX|dBPQU~S(EW39u-1Hw@@2Ux}k&? zRQK)JHLubATF5f@NX&Durqn;^i*I956l zLh~ooA08(*!MTlAGjQSOfL~r=B)}@|1yBJQ=)L4>W~qp8231_~TQTku3|m{ye$lzN z^-UbJ z;cK~WPlrav9EkKNe?GwA$>p3nyISYp;hG8WXBv)?`K>W{=ay(_yYA`foL0f|Q~c

8)McHYbn z?Y{SO;&qoD6y&;~sC;6j>jg<|KgZ^Ado7Q=b3G1J$C1Y6)A91UQ$||tDkSy2O{VHp zz0a=FR7Lj#SyD<`eHedIOeQsLczT>!9iu^GNGIRddsG(Yc;@Hkbxl~^=EcT{{)9-Qi^Z-srz}d zCtHrO&lWKw>8g19O-82GnDRI^XD|R)@2xpF;O*Tz2r_b+v(=mjPME1zh33+DR z$}`~3nxSxSi(0^JO(f(slG17oT@~Xu@J=ioH-GjL59F%BQj$^NqmDw}OW-~){XBU- zT<>W`ZAB#Hv!_@yqLR}dyTX@3QttEgNHQA(b8wZ78kKd3(S4O!vWPPxTqs|n6g_L( zZiDpa*h};9)IgXD(kba9xQRvka0f4%s%U26pF?d%j}cc?Q?>j9kuPa@h$~`d zJ)NB$oND$!^34Un)?M$bRR1IOOX(o`&4LyCv@|w5AvqQ#nCjkqcnbng48YQ@c$2R! zm$p+J=q*4VDY#@G`K2`G=k4`qPH^YI&93}X;CAzd$HtY>T`$gV{E|@G#a^F%`N;Uz zHhtgz3pu}_VzHsRtFk)qiFFs*x>8Yr)$uX^*<&pg&-njW*LQ|Bm3$8);)=RIyX&s* zDpI6~kU;1y2qIFYw+l!@P+EWxx}cy^L@9z&q$yQOKsteds7Mn*0z|q>2LYu@e{UuV z?DOB34|8YEIdf*_w3(ZbGrx;a502`dHti*jA6#q2CdZ^3R(<-ASYyX(sdu#w{u-c) zf8BTAv17;JjM81%X7}DQ)J50X{`+VRKN6grpaM>8t~^L7WABFZ&dUsBva*HgqDLeu zn)nq9Fzxu}(fYi-%gR%h1*09;-eDK1>MyBAb9~u$N2x?)a`)pK{Hg{h6AmnOUjFow zTnEd$O8oHcHi_$<+WSVMxOA%4d@8Px!wbIq4YQr@PnR?{I^Ia-J-JH#U+GH1m{a8a zAKAF&Ol=Yy%WvKamY>9!=5F2?&#=L} z)!dV!MfDSJFJ97{;2Yj)^DZvK%Cg{na3+V{yq3?f+q>0&o>J{ble^xhoohCCu9kmq zt>o30J{I%L^3ApLYZI4)m=BKL%=fSxeu!?FhcAkWICf6|C=-*Z;x9*-F=ylc5cvgj zNYd#vydlrM-XK`~gW4ue*d+l&pE{kn`TZz|I{=HYl*Js#o*=+!u1_8WK^*41%}n__APcx&1q6QJl*ZNxZm8e#o)fqFQGE| zva$|xj@o!T_uCAA>pr;7MfG ztSY@EP+6u$#4TeA#8-Ev)9UVCo153|F3ZwcFRm0xSg7|G;^hrWB+z%9qokL88rJ=5 zu1Om#raL(~H_s{I$Li{pX&w5rstXAv?7^=d(5J(i@<%B#;UC9#ZOJe5Z@t{=9$s?u z;FFo9{V|3RZW8Y<{U9wmVVHLb7NcZr|L<&bV^r=iCFWnJ*;{B7-NfwiqM@oq6{pk> zO`V|2lr0tQwPNNm(fcWqIjSdL{gnH={S~DCwzCETAE&HGMl}x+=WjXsF){Bt-%=*^ zClPlh$InN##p9=p>x6yd)C61F+ANjG_&Q-e@@@LbIfCfm7*mN^Dbcej{b z#N7iTM)I9$!*39@2HA^3pQ$XS?Lq&8=WxaMeXj!x+n-}6*v{;r&xvjbf!SycC9+Qi zHsG(ZWqnJnQYRqdLFS8A`)RfU;>yeV{u{^gtrW6KcapkV#WqrNb-8&HS5*e-QuN@@ zZl&aJJS-Iy3`-5BM5G-Tz@e-e;D=)@D>9Q5?`);ZrF z+T3W^`NCF91_g-MTax5WdDOMQqHi3ImV6N^EtzW;@{i7`(T?Zl*^z&7&y?CR`LnPb z(9N1Y=jrjG3In7DMehot%w}^+|W-uWz!r#9OY?xYk%ORDF4Z(mfyha-|Ujz zC?kB3vjcK{zDwnTzbt%ph?ol(VcTLj4$d@FNc}5y;T+{=K_`B>4?Lfv8R*NG^9Dpj zM5-0Fw_-LsZ`h+GTLlFLPN`;fVDyYhII^**sNSaTJ4lo{n9D-j3`YbGn0_KdE1BTz zR(S8%>S2AJ+8cS7#T34rCn`WDRrS}vXp6m;T%#uz4!r60TKFT4hoZC@bI@!L&>!PS z-6am1I>B~7jS!9#8LV&*GMxDneN%3^lw-R?!Nc9zXV+E8D}7sct3%x$xI*3KK zySpWKT5owZ-OfsxI80xgDqs9H?lT_xf(>i?SMp@|V^EuHV3T7#blqX3zxfP5=cfIJ zAFWeqDay{~Z&@lSK~T_4cVYc++Rj5(U48wt^=JP2*?)UUVA%Sj6Sg$ z!RVqoC~(Ex{B(jmA6=tBc5ptD{KLc?5@|ni^{z{fFY53X_k(D3iqo}kiJv(xh1~gV zYY0sizm2X5t6DxjK4}p#J4{N}?mI`e%?iWZTq(+bRm;rmlrqPrd!yN6nT__5HQ?W6 zvwdSvHr;u4yPUp#its{juIpeO);D{XDV(eOpR1lzLy!NxjH890d)w5) zPvT=wk1qfCKJYKYP&8n(l{esGUGZNpx!H5f^q;A}HwDb(o4w~~eXCr%ifXW1?`?no z-~K{eMR$-cDE!hI6Z38}wk&zt7&U#jXQ7YI6MnC_52PJ`Zd2Y~)uiP=%VtTbzOr!I z*|C2q_SyNc_9R;mk1~3W{vF|E!EXPO2|DR2%e{L;?}zkex@>cIk^dHTJaZ**%kF%Y z_gIRIX38W=I9oAYoqgU(IW2_BMH=+m>`RH*t^l9CN9#4GWegKM7M9S9#H3gByLiU7=E156A8zCg zaAj~>Ed`gfgvH8ij&CPy6xg@?!)H$q1yjgx_`@+t2fj0-;5R-Np>6h4x6e0jmnpYK z9FOzxyFC{r>sicBn^UAvp<9!MS0aD4zTpg^W*cfUIKp-pYG$*~arAnGO1 zf7C*>IN!9gj58TpuHh%8{=o>fKqMOQiOFt4jmC}bqaf4rtM1JwpVEEA}-%bH}B!l%RAcJeIig`fro=e{Pp6SDT8iYLQScKdo=h7ZEys{zxcaRI*&V z$#1mBDLD!|SO~mH1!tsgN;b)V5f3NnoVE2#&&u{r;F)fAD@pYTT2=hV!OLssB=LW7`eV zWzOkLBeiM;%q>^mj9>S1r9AuCN5V!GP24+LGmX@juCV)R?CNFQ+Wpw&)Qb5Ra%>nm zZlSs>ZlS2+?3>mjwZ%n`jy0;f{@omM`nvOrBbu?r2fNkC=Nc~;*?r+&xpCEO_l6Z$ z=a_HLGewnebv^d{Rdp5q%ZYg2&`D+uEuCJ{H9e8qg+4ro&)-ski6-`?jc@TB`sUq* zu|<5>&Avj@=k=8YEu&s^J2lic+EnVunsrkrU&A+T*HR5le3B^J*S!rT2(4sCJPmfyq|X-X6|-#ZBWfEZrEpQ zV%Z-1hX=gnC|boPrzd!-DBb@vJnd-bmR&CKIKK2F-7CZQN|t%|HRZ29?RRTjIK6+G zi(CCG2lovUB-`1dy5K5Vonsir?StQKeR#>WOyfxP2lM54riVph8?y^*ZW zXh`d`{d)@EP}NK~ibOK7L$wXuf)nK0A4NwN2kpi5JKEEZy>xCbSUJgmt-l~^`n%}Y zd{>!r2iMc4i^obXx4A`yF18OG)Ha`0uSp8ds`_qQ)*xxEzouw3Qsig{UnkXI6zL%(M2o@%Xr~p%Vj~0X8RhSyLEEk`^VAKW${e-ZyzH?mQsx`HF|_hx~k+5S4!U$ zTF2KmNY$pMF1XBB=;ofx8i+F)!nla^%a$}s;<9ba8>2;Y@~zCRQlEHg7=M*am~CbE zxM>?uL4Kj$LFqhe+i&+sKVx&C?p_EJb-#26L)s4M#8A(~?6L50NkZh<- zn|==}{G$HIt~0+Tz6rUF&2F`;-#%3nX6m^c;rr1B;ap47R~M`AZcWuQ4ITPF@^Tr9 z75;WGw{CfUBB$(Xl%V;P;46u3d?XHK?n7bh&Ep{Fssy0kt^>eIou<(BOQ61XdOH7F!P4nOq}F8#hF5y zTARERACM|RyZk6B^M1LRL1^z}TINLaY+}02rN;4LLFyw^mhaJxeYV}^(h)-Ik}CwW zl&t++o#ZDYs1ee%Y{x1o&eq7qiRKg%PpMMp?(Ix(aBC3vax>!2$?{77U(RuTTH=%d zHGUv(VRe$2dS9ow-0S8>aB%SXfV>5AM3nVEZW46KFZAv#pI8p1 z#!D?e8;Z+^Z!nL5zt~sN(TN}uiOTS$y(wvFO5@|>DbJo=Fh3!>xfSeS5jtF#m1L_V zlXh^Mz_&aX{uD^T_sieK>eTJ9?dOH+JqL;BOQYiUZ=NduMTr*J#Y!I5jw;^L`u5Xu z(kB*H@*h2_8Ug=}XNzKHF?D{f+Wjx4o92AQ5k~Ku;!1!0-}o@q9e$*;wp8%R!=?%U zdaXFfBMzf?Ek2}LY?H{g>D8q&$Gs$A50ik7?an-C_Z1F6J|hC~ZTM>}h*1<~38FX)Q5 z6u`M-E$pl<2^MbWY@8kNOkV7+ID#{bWtU_}vh%a!@K^^+Yy361iE$>dX_qa(DYn?ADkP8BV#H`6y`OU0J61>cRr1^zNN={`=_dKJc z5JJ{$3d+%(f$p~JE0cWHxoA!=R=-tj=ErR4#341+Vr32 zrGh#Rlo)^em;~gGpkfeyLordZR_L{@L)xVJ6!s6Dzf`K>#9pscEz|qeT@f z-40*0~5YCygr}G z0Nk=@q>BV`6m4K|0W9P4oS#&nE zng3)#e;}-It?8zz4j*l#8W`;WfE5bS=}q{M=a(@Z^a8?|(4~%smFL|O!=tW(U@1sZO?Y*5P2y5l!NQT4TwOz9z*`8i(outmApp;y z0th5SjkrghZ**uuc7YH(WR=lItI3&Z>~i(XXic+N;bZVz&%-^+Hidr-gOl|2gsk57 zsu=do`z~FPc;nU7*xybNehKI--9BdFA2hBl#hOpf)-1MLxjC9gXf|Po`rAVBymt2K z(=&vduV;JYpY?snbvIs?p}J3T58mg1Fo@3&UpQ6TOT>ST>%=DmW{kO8HAN}j7XHyN zJwyj0P?g%ZQGNg~dd@~1z>v2$DNw0v{m$_(ZRUF`&H0wr$O$y1q;50e8~rvCy>qMM`B5Da_W&oX z2A@m+sIjZ~8I;Ry0I@WofhV#r_34CGpFRPs10keJ?I=algqk>;P{dUKA)aYRK!&R% z$YkLp{Jo15)9?FyGUM4OGx4Pv)PKl|%{GI5nnjp?2VjtwBL$kNOUXdN2rEq;T}8CB0B0mX z3aOK=S6@jdD1jzY^f`bsM1df%rLaXixO)PQ$SdGs)!a-qhKTYIdr=Pm9W$jf5x~q4 zLWQ6w%JUi(_3VOFgx>Wo-BJw1Gp1!n>7{ zfIB`?k!#)9Phc2q%up6}mM;cy5c>>Z2rFgql!&;j{d2w|WmdL;1SNj=;g^pKwSgM? z1-^{YEL!_V0)9GyIE3M6tHWAG0|w$hUzUY1(uoG(QB72br zOk7L=l}Ief;Ci7ctt3wWb2_76q28G*A-OsVjMkNq=T+Xgbs_|%w-lOkvkCt*GpvnCd}LfNGweaP-erF$OFVYXYn=DgJB#f?0DLon?p~TT4y&ya3Qv z0Ee?K>K7ec2(@UTQ0lejoEZ*|i%uQ}L`dw#AfOJUCB*wgH`lF93VpX-%ROJeY*>X|b9 zju9b6?XaT@x0?q6QV5iy6Yo7+`ldh~+p^GGGu@TvZ8w#l3Cs$KUH2NB2LmI8Hrkgd zoo#lDV{~GK@RxL32onhE7#WBdH`{YdJkj5?4nR<#w2E?8t@f#>*O4MODEC3dLP1-W z)T2ASP_2`bZ!X*I6A<|0ou@${WOd)C$+%jlx+NB3pZw(lV(QN5U~SM5!`OgPu>r5= z-5IQ^h2&ZV?uDr`hCrmyp$H*g>9Q)Lmzo#nLzj%607As%+c{5$*IQ8l1gqH&;vv$a zl;IP?`9AN>7{-p2gD|nF+LV4fzFb{T2ty+my%kq6^?LsU!kk+&2t+jY2D{kGK#;w; zD;QBED?EOBp$622@d-e(2;l+YYrMFSDa9IRS`nIXnE|sobr$&ewIKypkheG~odYUWlMZz~}|eJZobN3`*d9NDxjCLJ;vP0Ae6w z1*jnI3xcPZ;m8G0K{6ZzcMs?mi9$SuPbBO-AD6YV>3D>QglgS)5eV}sVP?)i?A{Pi zCK4k}()dA?bWhM=P>InN0kzo35M`QCE4=B%iEsEadim?qK2FkWKH*XT7v+Co8-3e0 zU+Yz}CuKfGyYYleAo|K%pR4ZGqy#`K@}R=bM5IQeK8Vx>5%t^{VN(L& zgq^cckaD#Rcf(S6hS5 zIE`F`x4a{2S1wrh`MHdH>fJVR&U23dIPu7&pr&!q+X|FIIx`X$o}LT=gYbk9EtQ(T zX;hV+?>6K9t_(ITd>SWrexdJ}>JziOV@(Z=kR;-#0udbro@m_V#W8FQ@u0QjO2d6Y zB82w#BQ}#ml`Qh0ys#N>kq!qYxs7tepOlH-P=({?=_DEd|PTci9y0B z9}oR_{QRMW`h-@@HH(`%y%6DvI)t%ggiSGZmvSCzncXu$i+OQ3CGLHp#yeT@hog5U z+HB$f{7PdK4l6QY<&)P>p4{fXEy3tbrblr;qB}*9Ln8tmNfld~iL3nLbVn$xfNDplTYZVIab&RM{&~XP7 + void operator()(std::span lhs, std::span rhs) { + using clean_t = std::decay_t; + auto pthis = this->make_this_eigen_tensor_(); + auto plhs = this->make_lhs_eigen_tensor_(lhs); + auto prhs = this->make_rhs_eigen_tensor_(rhs); + + if(this_labels().is_hadamard_product(lhs_labels(), rhs_labels())) + pthis->hadamard_assignment(this_labels(), lhs_labels(), + rhs_labels(), *plhs, *prhs); + else if(this_labels().is_contraction(lhs_labels(), rhs_labels())) + pthis->contraction_assignment(this_labels(), lhs_labels(), + rhs_labels(), *plhs, *prhs); + else + throw std::runtime_error( + "MultiplicationVisitor: Batched contraction NYI"); + } +}; + } // namespace tensorwrapper::buffer::detail_ diff --git a/src/tensorwrapper/buffer/mdbuffer.cpp b/src/tensorwrapper/buffer/mdbuffer.cpp index dc016b47..79400829 100644 --- a/src/tensorwrapper/buffer/mdbuffer.cpp +++ b/src/tensorwrapper/buffer/mdbuffer.cpp @@ -157,7 +157,26 @@ auto MDBuffer::multiplication_assignment_(label_type this_labels, const_labeled_reference lhs, const_labeled_reference rhs) -> dsl_reference { - throw std::runtime_error("multiplication NYI"); + const auto& lhs_down = downcast(lhs.object()); + const auto& rhs_down = downcast(rhs.object()); + const auto& lhs_shape = lhs_down.m_shape_; + const auto& rhs_shape = rhs_down.m_shape_; + + auto labeled_lhs_shape = lhs_shape(lhs.labels()); + auto labeled_rhs_shape = rhs_shape(rhs.labels()); + + m_shape_.multiplication_assignment(this_labels, labeled_lhs_shape, + labeled_rhs_shape); + + detail_::MultiplicationVisitor visitor(m_buffer_, this_labels, m_shape_, + lhs.labels(), lhs_shape, + rhs.labels(), rhs_shape); + + wtf::buffer::visit_contiguous_buffer(visitor, lhs_down.m_buffer_, + rhs_down.m_buffer_); + + mark_for_rehash_(); + return *this; } auto MDBuffer::permute_assignment_(label_type this_labels, diff --git a/tests/cxx/unit_tests/tensorwrapper/buffer/detail_/binary_operation_visitor.cpp b/tests/cxx/unit_tests/tensorwrapper/buffer/detail_/binary_operation_visitor.cpp index bbcc0599..e3dcfa2c 100644 --- a/tests/cxx/unit_tests/tensorwrapper/buffer/detail_/binary_operation_visitor.cpp +++ b/tests/cxx/unit_tests/tensorwrapper/buffer/detail_/binary_operation_visitor.cpp @@ -146,3 +146,69 @@ TEMPLATE_LIST_TEST_CASE("SubtractionVisitor", "[buffer][detail_]", REQUIRE(empty_buffer.at(3) == TestType(0.0)); } } + +TEMPLATE_LIST_TEST_CASE("MultiplicationVisitor", "[buffer][detail_]", + types::floating_point_types) { + using VisitorType = buffer::detail_::MultiplicationVisitor; + using buffer_type = typename VisitorType::buffer_type; + using label_type = typename VisitorType::label_type; + using shape_type = typename VisitorType::shape_type; + + TestType one{1.0}, two{2.0}, three{3.0}, four{4.0}; + std::vector this_data{one, two, three, four}; + std::vector lhs_data{four, three, two, one}; + std::vector rhs_data{one, one, one, one}; + shape_type shape({4}); + label_type labels("i"); + + std::span lhs_span(lhs_data.data(), lhs_data.size()); + std::span clhs_span(lhs_data.data(), lhs_data.size()); + std::span rhs_span(rhs_data.data(), rhs_data.size()); + std::span crhs_span(rhs_data.data(), rhs_data.size()); + + SECTION("existing buffer: Hadamard") { + buffer_type this_buffer(this_data); + VisitorType visitor(this_buffer, labels, shape, labels, shape, labels, + shape); + + visitor(lhs_span, rhs_span); + REQUIRE(this_buffer.at(0) == TestType(4.0)); + REQUIRE(this_buffer.at(1) == TestType(3.0)); + REQUIRE(this_buffer.at(2) == TestType(2.0)); + REQUIRE(this_buffer.at(3) == TestType(1.0)); + } + + SECTION("existing buffer: contraction") { + buffer_type this_buffer(this_data); + shape_type scalar_shape; + VisitorType visitor(this_buffer, label_type(""), scalar_shape, labels, + shape, labels, shape); + + visitor(lhs_span, rhs_span); + REQUIRE(this_buffer.size() == 1); + REQUIRE(this_buffer.at(0) == TestType(10.0)); + } + + SECTION("existing buffer: batched contraction") { + buffer_type this_buffer(this_data); + shape_type out_shape({2}); + label_type lhs_labels("a,i"); + label_type rhs_labels("i,a"); + VisitorType visitor(this_buffer, labels, out_shape, lhs_labels, shape, + rhs_labels, shape); + + REQUIRE_THROWS_AS(visitor(lhs_span, rhs_span), std::runtime_error); + } + + SECTION("non-existing buffer") { + buffer_type empty_buffer; + VisitorType visitor(empty_buffer, labels, shape, labels, shape, labels, + shape); + + visitor(clhs_span, crhs_span); + REQUIRE(empty_buffer.at(0) == TestType(4.0)); + REQUIRE(empty_buffer.at(1) == TestType(3.0)); + REQUIRE(empty_buffer.at(2) == TestType(2.0)); + REQUIRE(empty_buffer.at(3) == TestType(1.0)); + } +} diff --git a/tests/cxx/unit_tests/tensorwrapper/buffer/mdbuffer.cpp b/tests/cxx/unit_tests/tensorwrapper/buffer/mdbuffer.cpp index 11dd8081..33c13421 100644 --- a/tests/cxx/unit_tests/tensorwrapper/buffer/mdbuffer.cpp +++ b/tests/cxx/unit_tests/tensorwrapper/buffer/mdbuffer.cpp @@ -317,6 +317,44 @@ TEMPLATE_LIST_TEST_CASE("MDBuffer", "", types::floating_point_types) { } } + SECTION("multiplication_assignment_") { + // N.b., dispatching among hadamard, contraction, etc. is the visitor's + // responsibility and happens there. Here we just test hadamard. + + SECTION("scalar") { + label_type labels(""); + MDBuffer result; + result.multiplication_assignment(labels, scalar(labels), + scalar(labels)); + REQUIRE(result.shape() == scalar_shape); + REQUIRE(result.get_elem({}) == TestType(1.0)); + } + + SECTION("vector") { + label_type labels("i"); + MDBuffer result; + result.multiplication_assignment(labels, vector(labels), + vector(labels)); + REQUIRE(result.shape() == vector_shape); + REQUIRE(result.get_elem({0}) == TestType(1.0)); + REQUIRE(result.get_elem({1}) == TestType(4.0)); + REQUIRE(result.get_elem({2}) == TestType(9.0)); + REQUIRE(result.get_elem({3}) == TestType(16.0)); + } + + SECTION("matrix") { + label_type labels("i,j"); + MDBuffer result; + result.multiplication_assignment(labels, matrix(labels), + matrix(labels)); + REQUIRE(result.shape() == matrix_shape); + REQUIRE(result.get_elem({0, 0}) == TestType(1.0)); + REQUIRE(result.get_elem({0, 1}) == TestType(4.0)); + REQUIRE(result.get_elem({1, 0}) == TestType(9.0)); + REQUIRE(result.get_elem({1, 1}) == TestType(16.0)); + } + } + SECTION("scalar_multiplication_") { // TODO: Test with other scalar types when public API supports it using scalar_type = double; From e3f3e8e9ea686f90fe657fabc29ead7a8069f65b Mon Sep 17 00:00:00 2001 From: "Ryan M. Richard" Date: Wed, 31 Dec 2025 15:59:28 -0600 Subject: [PATCH 02/13] purge of buffer::eigen started --- include/tensorwrapper/allocator/allocator.hpp | 2 +- .../tensorwrapper/allocator/allocator_fwd.hpp | 4 - .../tensorwrapper/allocator/contiguous.hpp | 132 ++++- include/tensorwrapper/allocator/eigen.hpp | 181 ------- include/tensorwrapper/buffer/buffer.hpp | 2 +- include/tensorwrapper/buffer/buffer_fwd.hpp | 4 - include/tensorwrapper/buffer/contiguous.hpp | 361 ++++++++----- include/tensorwrapper/buffer/eigen.hpp | 303 ----------- include/tensorwrapper/buffer/mdbuffer.hpp | 337 ------------ .../tensorwrapper/forward_declarations.hpp | 10 +- ...uffer_traits.hpp => contiguous_traits.hpp} | 12 +- src/tensorwrapper/allocator/contiguous.cpp | 65 +++ src/tensorwrapper/allocator/eigen.cpp | 132 ----- src/tensorwrapper/buffer/contiguoues.cpp | 26 - .../buffer/{mdbuffer.cpp => contiguous.cpp} | 69 +-- .../buffer/detail_/eigen_dispatch.hpp | 60 --- .../buffer/detail_/eigen_pimpl.hpp | 176 ------- .../buffer/detail_/eigen_tensor.cpp | 237 --------- .../buffer/detail_/eigen_tensor.hpp | 236 --------- src/tensorwrapper/buffer/eigen.cpp | 259 ---------- .../operations/approximately_equal.cpp | 28 +- src/tensorwrapper/operations/norm.cpp | 30 +- .../tensor/detail_/tensor_factory.cpp | 8 +- src/tensorwrapper/utilities/to_json.cpp | 16 +- .../tensorwrapper/allocator/contiguous.cpp | 187 ++++--- .../tensorwrapper/allocator/eigen.cpp | 128 ----- .../tensorwrapper/buffer/contiguous.cpp | 487 +++++++++++++++--- .../unit_tests/tensorwrapper/buffer/eigen.cpp | 255 --------- .../tensorwrapper/buffer/mdbuffer.cpp | 455 ---------------- 29 files changed, 1014 insertions(+), 3188 deletions(-) delete mode 100644 include/tensorwrapper/allocator/eigen.hpp delete mode 100644 include/tensorwrapper/buffer/eigen.hpp delete mode 100644 include/tensorwrapper/buffer/mdbuffer.hpp rename include/tensorwrapper/types/{mdbuffer_traits.hpp => contiguous_traits.hpp} (83%) create mode 100644 src/tensorwrapper/allocator/contiguous.cpp delete mode 100644 src/tensorwrapper/allocator/eigen.cpp delete mode 100644 src/tensorwrapper/buffer/contiguoues.cpp rename src/tensorwrapper/buffer/{mdbuffer.cpp => contiguous.cpp} (79%) delete mode 100644 src/tensorwrapper/buffer/detail_/eigen_dispatch.hpp delete mode 100644 src/tensorwrapper/buffer/detail_/eigen_pimpl.hpp delete mode 100644 src/tensorwrapper/buffer/detail_/eigen_tensor.cpp delete mode 100644 src/tensorwrapper/buffer/detail_/eigen_tensor.hpp delete mode 100644 src/tensorwrapper/buffer/eigen.cpp delete mode 100644 tests/cxx/unit_tests/tensorwrapper/allocator/eigen.cpp delete mode 100644 tests/cxx/unit_tests/tensorwrapper/buffer/eigen.cpp delete mode 100644 tests/cxx/unit_tests/tensorwrapper/buffer/mdbuffer.cpp diff --git a/include/tensorwrapper/allocator/allocator.hpp b/include/tensorwrapper/allocator/allocator.hpp index 9f00af46..8d3debbb 100644 --- a/include/tensorwrapper/allocator/allocator.hpp +++ b/include/tensorwrapper/allocator/allocator.hpp @@ -16,7 +16,7 @@ #pragma once #include -#include +#include #include #include diff --git a/include/tensorwrapper/allocator/allocator_fwd.hpp b/include/tensorwrapper/allocator/allocator_fwd.hpp index 346cec07..6f6051d7 100644 --- a/include/tensorwrapper/allocator/allocator_fwd.hpp +++ b/include/tensorwrapper/allocator/allocator_fwd.hpp @@ -20,14 +20,10 @@ namespace tensorwrapper::allocator { class AllocatorBase; -template -class Eigen; - class Local; class Replicated; -template class Contiguous; } // namespace tensorwrapper::allocator diff --git a/include/tensorwrapper/allocator/contiguous.hpp b/include/tensorwrapper/allocator/contiguous.hpp index 6231257f..b30fb4e4 100644 --- a/include/tensorwrapper/allocator/contiguous.hpp +++ b/include/tensorwrapper/allocator/contiguous.hpp @@ -17,6 +17,7 @@ #pragma once #include #include +#include namespace tensorwrapper::allocator { @@ -24,11 +25,10 @@ namespace tensorwrapper::allocator { * * @tparam FloatType Type of the elements in the contiguous buffer. */ -template class Contiguous : public Replicated { private: /// Type of *this - using my_type = Contiguous; + using my_type = Contiguous; /// Type *this derives from using base_type = Replicated; @@ -41,23 +41,64 @@ class Contiguous : public Replicated { using base_type::layout_pointer; ///@} - /// Type of each element in the tensor - using element_type = FloatType; + /// Types associated with the buffer *this makes + using buffer_type = buffer::Contiguous; + using buffer_reference = buffer_type&; + using const_buffer_reference = const buffer_type&; + using buffer_pointer = std::unique_ptr; - /// Type of the buffer associated with *this - using contiguous_buffer_type = buffer::Contiguous; - using contiguous_pointer = std::unique_ptr; + using size_type = std::size_t; /// Type of initializer lists + template using rank0_il = typename types::ILTraits::type; + + template using rank1_il = typename types::ILTraits::type; + + template using rank2_il = typename types::ILTraits::type; + + template using rank3_il = typename types::ILTraits::type; + + template using rank4_il = typename types::ILTraits::type; /// Pull in base class's ctors using base_type::base_type; + /** @brief Determines if @p buffer can be rebound as a Contiguous buffer. + * + * Rebinding a buffer allows the same memory to be viewed as a (possibly) + * different type of buffer. + * + * @param[in] buffer The tensor we are attempting to rebind. + * + * @return True if @p buffer can be rebound to the type of buffer + * associated with this allocator and false otherwise. + * + * @throw None No throw guarantee + */ + static bool can_rebind(const_buffer_base_reference buffer); + + /** @brief Rebinds a buffer to the same type as *this. + * + * This method will convert @p buffer into a buffer which could have been + * allocated by *this. If @p buffer was allocated as such a buffer already, + * then this method is simply a downcast. + * + * @param[in] buffer The buffer to rebind. + * + * @return A mutable reference to @p buffer viewed as a buffer that could + * have been allocated by *this. + * + * @throw std::runtime_error if can_rebind(buffer) is false. Strong throw + * guarantee. + */ + static buffer_reference rebind(buffer_base_reference buffer); + static const_buffer_reference rebind(const_buffer_base_reference buffer); + /** @brief Allocates a contiguous pointer given @p layout. * * @note These methods shadow the function of the same name in the base @@ -72,22 +113,41 @@ class Contiguous : public Replicated { * @return A pointer to the newly allocated buffer::Contiguous object. */ ///@{ - contiguous_pointer allocate(const_layout_reference layout) { + buffer_pointer allocate(const_layout_reference layout) { return allocate(layout.clone_as()); } - contiguous_pointer allocate(layout_pointer layout) { + buffer_pointer allocate(layout_pointer layout) { auto p = allocate_(std::move(layout)); - return detail_::static_pointer_cast(p); + return detail_::static_pointer_cast(p); } ///@} /// Constructs a contiguous buffer from an initializer list ///@{ - contiguous_pointer construct(rank0_il il) { return construct_(il); } - contiguous_pointer construct(rank1_il il) { return construct_(il); } - contiguous_pointer construct(rank2_il il) { return construct_(il); } - contiguous_pointer construct(rank3_il il) { return construct_(il); } - contiguous_pointer construct(rank4_il il) { return construct_(il); } + template + buffer_pointer construct(rank0_il il) { + return il_construct_(il); + } + + template + buffer_pointer construct(rank1_il il) { + return il_construct_(il); + } + + template + buffer_pointer construct(rank2_il il) { + return il_construct_(il); + } + + template + buffer_pointer construct(rank3_il il) { + return il_construct_(il); + } + + template + buffer_pointer construct(rank4_il il) { + return il_construct_(il); + } ///@} /** @brief Constructs a contiguous buffer and sets all elements to @p value. @@ -99,25 +159,45 @@ class Contiguous : public Replicated { * @return A pointer to the newly constructed buffer. */ ///@{ - contiguous_pointer construct(const_layout_reference layout, - element_type value) { + template + buffer_pointer construct(const_layout_reference layout, ElementType value) { return construct(layout.clone_as(), std::move(value)); } - contiguous_pointer construct(layout_pointer layout, element_type value) { - return construct_(std::move(layout), std::move(value)); + + template + buffer_pointer construct(layout_pointer layout, ElementType value) { + return construct_(std::move(layout), wtf::fp::make_float(value)); } ///@} protected: - virtual contiguous_pointer construct_(rank0_il il) = 0; - virtual contiguous_pointer construct_(rank1_il il) = 0; - virtual contiguous_pointer construct_(rank2_il il) = 0; - virtual contiguous_pointer construct_(rank3_il il) = 0; - virtual contiguous_pointer construct_(rank4_il il) = 0; + buffer_base_pointer allocate_(layout_pointer playout) override; /// To be overridden by the derived class to implement construct - virtual contiguous_pointer construct_(layout_pointer layout, - element_type value) = 0; + virtual buffer_pointer construct_(layout_pointer layout, + wtf::fp::Float value); + + base_pointer clone_() const override { + return std::make_unique(*this); + } + + /// Implements are_equal, by deferring to the base's operator== + bool are_equal_(const_base_reference rhs) const noexcept override { + return base_type::template are_equal_impl_(rhs); + } + +private: + layout_pointer layout_from_extents_(const std::vector& extents); + + template + buffer_pointer il_construct_(ILType il) { + throw std::runtime_error("Fix me!"); + // auto [extents, data] = detail_::unwrap_il(il); + // auto pbuffer = this->allocate(layout_from_extents_(extents)); + // auto& buffer_down = rebind(*pbuffer); + // buffer_down.copy(data); + // return pbuffer; + } }; } // namespace tensorwrapper::allocator diff --git a/include/tensorwrapper/allocator/eigen.hpp b/include/tensorwrapper/allocator/eigen.hpp deleted file mode 100644 index 851b5f0a..00000000 --- a/include/tensorwrapper/allocator/eigen.hpp +++ /dev/null @@ -1,181 +0,0 @@ -/* - * Copyright 2024 NWChemEx-Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#pragma once -#include -#include -#include - -namespace tensorwrapper::allocator { - -/** @brief Used to allocate buffers which rely on Eigen tensors. - * - * @tparam FloatType The numerical type the buffer will use to store the - * elements. - * @tparam Rank The rank of the tensor stored in the buffer. - * - * This allocator is capable of creating buffers with Eigen tensors in them. - * - */ -template -class Eigen : public Contiguous { -private: - /// The type of *this - using my_type = Eigen; - - /// The class *this inherits from - using my_base_type = Contiguous; - -public: - // Pull in base class's types - using typename my_base_type::base_pointer; - using typename my_base_type::buffer_base_pointer; - using typename my_base_type::buffer_base_reference; - using typename my_base_type::const_base_reference; - using typename my_base_type::const_buffer_base_reference; - using typename my_base_type::contiguous_pointer; - using typename my_base_type::element_type; - using typename my_base_type::layout_pointer; - using typename my_base_type::rank0_il; - using typename my_base_type::rank1_il; - using typename my_base_type::rank2_il; - using typename my_base_type::rank3_il; - using typename my_base_type::rank4_il; - using typename my_base_type::runtime_view_type; - - /// Type of a buffer containing an Eigen tensor - using eigen_buffer_type = buffer::Eigen; - - /// Type of a mutable reference to an object of type eigen_buffer_type - using eigen_buffer_reference = eigen_buffer_type&; - - /// Type of a read-only reference to an object of type eigen_buffer_type - using const_eigen_buffer_reference = const eigen_buffer_type&; - - /// Type of a pointer to an eigen_buffer_type object - using eigen_buffer_pointer = std::unique_ptr; - - // Reuse base class's ctors - using my_base_type::my_base_type; - - // ------------------------------------------------------------------------- - // -- Ctor - // ------------------------------------------------------------------------- - - /** @brief Creates a new Eigen allocator tied to the runtime @p rv. - * - * This ctor simply dispatches to the base class's ctor with the same - * signature. See the base class's description for more detail. - * - * @param[in] rv The runtime to use for allocating. - * - * @throw None No throw guarantee. - */ - explicit Eigen(runtime_view_type rv) : my_base_type(std::move(rv)) {} - - /** @brief Determines if @p buffer can be rebound as an Eigen buffer. - * - * Rebinding a buffer allows the same memory to be viewed as a (possibly) - * different type of buffer. - * - * @param[in] buffer The tensor we are attempting to rebind. - * - * @return True if @p buffer can be rebound to the type of buffer - * associated with this allocator and false otherwise. - * - * @throw None No throw guarantee - */ - static bool can_rebind(const_buffer_base_reference buffer); - - /** @brief Rebinds a buffer to the same type as *this. - * - * This method will convert @p buffer into a buffer which could have been - * allocated by *this. If @p buffer was allocated as such a buffer already, - * then this method is simply a downcast. - * - * @param[in] buffer The buffer to rebind. - * - * @return A mutable reference to @p buffer viewed as a buffer that could - * have been allocated by *this. - * - * @throw std::runtime_error if can_rebind(buffer) is false. Strong throw - * guarantee. - */ - static eigen_buffer_reference rebind(buffer_base_reference buffer); - - /** @brief Rebinds a buffer to the same type as *this. - * - * This method is the same as the non-const version except that the result - * is read-only. See the description for the non-const version for more - * details. - * - * @param[in] buffer The buffer to rebind. - * - * @return A read-only reference to @p buffer viewed as if it was - * allocated by *this. - * - * @throw std::runtime_error if can_rebind(buffer) is false. Strong throw - * guarantee. - */ - static const_eigen_buffer_reference rebind( - const_buffer_base_reference buffer); - - static base_pointer make_eigen_allocator(unsigned int rank, - runtime_view_type rv); - -protected: - /** @brief Polymorphic allocation of a new buffer. - * - * This method overrides the polymorphic allocation so that it creates a - * new Eigen buffer. - */ - buffer_base_pointer allocate_(layout_pointer playout) override; - - contiguous_pointer construct_(rank0_il il) override; - contiguous_pointer construct_(rank1_il il) override; - contiguous_pointer construct_(rank2_il il) override; - contiguous_pointer construct_(rank3_il il) override; - contiguous_pointer construct_(rank4_il il) override; - - contiguous_pointer construct_(layout_pointer playout, - element_type value) override; - - /// Implements clone by calling copy ctor - base_pointer clone_() const override { - return std::make_unique(*this); - } - - /// Implements are_equal, by deferring to the base's operator== - bool are_equal_(const_base_reference rhs) const noexcept override { - return my_base_type::template are_equal_impl_(rhs); - } - -private: - template - contiguous_pointer il_construct_(ILType il); -}; - -// ----------------------------------------------------------------------------- -// -- Explicit class template declarations -// ----------------------------------------------------------------------------- - -#define DECLARE_EIGEN_ALLOCATOR(TYPE) extern template class Eigen - -TW_APPLY_FLOATING_POINT_TYPES(DECLARE_EIGEN_ALLOCATOR); - -#undef DECLARE_EIGEN_ALLOCATOR - -} // namespace tensorwrapper::allocator diff --git a/include/tensorwrapper/buffer/buffer.hpp b/include/tensorwrapper/buffer/buffer.hpp index 93bde979..980b69c5 100644 --- a/include/tensorwrapper/buffer/buffer.hpp +++ b/include/tensorwrapper/buffer/buffer.hpp @@ -16,7 +16,7 @@ #pragma once #include -#include +#include #include #include diff --git a/include/tensorwrapper/buffer/buffer_fwd.hpp b/include/tensorwrapper/buffer/buffer_fwd.hpp index 940e3b2b..98f41eea 100644 --- a/include/tensorwrapper/buffer/buffer_fwd.hpp +++ b/include/tensorwrapper/buffer/buffer_fwd.hpp @@ -20,12 +20,8 @@ namespace tensorwrapper::buffer { class BufferBase; -template class Contiguous; -template -class Eigen; - class Local; class Replicated; diff --git a/include/tensorwrapper/buffer/contiguous.hpp b/include/tensorwrapper/buffer/contiguous.hpp index ed09aebb..3946c0a0 100644 --- a/include/tensorwrapper/buffer/contiguous.hpp +++ b/include/tensorwrapper/buffer/contiguous.hpp @@ -16,196 +16,321 @@ #pragma once #include -#include -#include +#include +#include +#include namespace tensorwrapper::buffer { -/** @brief Denotes that a buffer is held contiguously. +/** @brief A multidimensional (MD) contiguous buffer. * - * Contiguous buffers are such that given a pointer to the first element `p`, - * the `i`-th element (`i` is zero based) is given by dereferencing the - * pointer `p + i`. Note that contiguous buffers are always vectors and storing - * higher rank tensors in a contiguous buffer requires "vectorization" of the - * tensor. In C++ vectorization is usually done in row-major format. - * - * @tparam FloatType the type of elements in the buffer. + * This class is a dense multidimensional buffer of contiguous floating-point + * values. */ -template class Contiguous : public Replicated { private: /// Type *this derives from using my_base_type = Replicated; -public: - /// Type of each element - using element_type = FloatType; - - /// Type of a mutable reference to an object of type element_type - using reference = element_type&; + /// Type defining the types for the public API of *this + using traits_type = types::ClassTraits; - /// Type of a read-only reference to an object of type element_type - using const_reference = const element_type&; + /// Type of *this + using my_type = Contiguous; - using element_vector = std::vector; +public: + /// Add types from traits_type to public API + ///@{ + using value_type = typename traits_type::value_type; + using reference = typename traits_type::reference; + using const_reference = typename traits_type::const_reference; + using buffer_type = typename traits_type::buffer_type; + using buffer_view = typename traits_type::buffer_view; + using const_buffer_view = typename traits_type::const_buffer_view; + using rank_type = typename traits_type::rank_type; + using shape_type = typename traits_type::shape_type; + using const_shape_view = typename traits_type::const_shape_view; + using size_type = typename traits_type::size_type; + ///@} - /// Type of a pointer to a mutable element_type object - using pointer = element_type*; + using index_vector = std::vector; + using typename my_base_type::label_type; + using string_type = std::string; - /// Type of a pointer to a read-only element_type object - using const_pointer = const element_type*; + // ------------------------------------------------------------------------- + // -- Ctors, assignment, and dtor + // ------------------------------------------------------------------------- - /// Type used for offsets and indexing - using size_type = std::size_t; + /** @brief Creates an empty multi-dimensional buffer. + * + * The resulting buffer will have a shape of rank 0, but a size of 0. Thus + * the buffer can NOT be used to store any elements (including treating + * *this as a scalar). The resulting buffer can be assigned to or moved + * to to populate it. + * + * @throw None No throw guarantee. + */ + Contiguous() noexcept; - /// Type of a multi-dimensional index - using index_vector = std::vector; + /** @brief Treats allocated memory like a multi-dimensional buffer. + * + * @tparam T The type of the elements in the buffer. Must satisfy the + * FloatingPoint concept. + * + * This ctor will use @p element to create a buffer_type object and then + * pass that along with @p shape to the main ctor. + * + * @param[in] elements The elements to be used as the backing store. + * @param[in] shape The shape of *this. + * + * @throw std::invalid_argument if the size of @p elements does not match + * the size implied by @p shape. Strong throw + * guarantee. + * @throw std::bad_alloc if there is a problem allocating memory for the + * internal state. Strong throw guarantee. + */ + template + Contiguous(std::vector elements, shape_type shape) : + Contiguous(buffer_type(std::move(elements)), std::move(shape)) {} - // Pull in base's ctors - using my_base_type::my_base_type; + /** @brief The main ctor. + * + * This ctor will create *this using @p buffer as the backing store and + * @p shape to describe the geometry of the multidimensional array. + * + * All other ctors (aside from copy and move) delegate to this one. + * + * @param[in] buffer The buffer to be used as the backing store. + * @param[in] shape The shape of *this. + * + * @throw std::invalid_argument if the size of @p buffer does not match + * the size implied by @p shape. Strong throw + * guarantee. + * @throw std::bad_alloc if there is a problem allocating memory for the + * internal state. Strong throw guarantee. + */ + Contiguous(buffer_type buffer, shape_type shape); - /// Returns the number of elements in contiguous memory - size_type size() const noexcept { return size_(); } + /** @brief Initializes *this to a deep copy of @p other. + * + * This ctor will initialize *this to be a deep copy of @p other. + * + * @param[in] other The Contiguous to copy. + * + * @throw std::bad_alloc if there is a problem allocating memory for the + * internal state. Strong throw guarantee. + */ + Contiguous(const Contiguous& other) = default; - /** @brief Returns a mutable pointer to the first element in contiguous - * memory + /** @brief Move ctor. * - * @warning Returning a mutable pointer to the underlying data makes it - * no longer possible for *this to reliably track changes to that - * data. Calling this method may have performance implications, so - * use only when strictly required. + * This ctor will initialize *this by taking the state from @p other. + * After this ctor is called @p other is left in a valid but unspecified + * state. * - * @return A read/write pointer to the data. + * @param[in,out] other The Contiguous to move from. * * @throw None No throw guarantee. */ - pointer get_mutable_data() noexcept { return get_mutable_data_(); } + Contiguous(Contiguous&& other) noexcept = default; + + /** @brief Copy assignment. + * + * This operator will make *this a deep copy of @p other. + * + * @param[in] other The Contiguous to copy. + * + * @return *this after the assignment. + * + * @throw std::bad_alloc if there is a problem allocating memory for the + * internal state. Strong throw guarantee. + */ + Contiguous& operator=(const Contiguous& other) = default; - /** @brief Returns an immutable pointer to the first element in contiguous - * memory + /** @brief Move assignment. * - * @return A read-only pointer to the data. + * This operator will make *this take the state from @p other. After + * this operator is called @p other is left in a valid but unspecified + * state. + * + * @param[in,out] other The Contiguous to move from. + * + * @return *this after the assignment. * * @throw None No throw guarantee. */ - const_pointer get_immutable_data() const noexcept { - return get_immutable_data_(); - } + Contiguous& operator=(Contiguous&& other) noexcept = default; - /** @brief Retrieves a tensor element by offset. + /** @brief Defaulted dtor. * - * This method is used to access the element in an immutable way. + * @throw None No throw guarantee. + */ + ~Contiguous() override = default; + + // ------------------------------------------------------------------------- + // -- State Accessors + // ------------------------------------------------------------------------- + + /** @brief Returns (a view of) the shape of *this. * - * @param[in] index The offset of the element being retrieved. + * The shape of *this describes the geometry of the underlying + * multidimensional array. * - * @return A read-only reference to the element. + * @return A view of the shape of *this. * - * @throw std::runtime_error if the number of indices does not match the - * rank of the tensor. Strong throw guarantee. + * @throw std::bad_alloc if there is a problem allocating memory for the + * returned view. Strong throw guarantee. */ - const_reference get_elem(index_vector index) const { - if(index.size() != this->rank()) - throw std::runtime_error("Number of offsets must match rank"); - return get_elem_(index); - } + const_shape_view shape() const; - /** @brief Sets a tensor element by offset. + /** @brief The total number of elements in *this. * - * This method is used to change the value of an element. + * The total number of elements is the product of the extents of each + * mode of *this. * - * @param[in] index The offset of the element being updated. - * @param[in] new_value The new value of the element. + * @return The total number of elements in *this. * - * @throw std::runtime_error if the number of indices does not match the - * rank of the tensor. Strong throw guarantee. + * @throw None No throw guarantee. */ - void set_elem(index_vector index, element_type new_value) { - if(index.size() != this->rank()) - throw std::runtime_error("Number of offsets must match rank"); - return set_elem_(index, new_value); - } + size_type size() const noexcept; - /** @brief Retrieves a tensor element by ordinal offset. + /** @brief Returns the element with the offsets specified by @p index. * - * This method is used to access the element in an immutable way. + * This method will retrieve a const reference to the element at the + * offsets specified by @p index. The length of @p index must be equal + * to the rank of *this and each entry in @p index must be less than the + * extent of the corresponding mode of *this. * - * @param[in] index The ordinal offset of the element being retrieved. + * This method can only be used to retrieve elements from *this. To modify + * elements use set_elem(). * - * @return A read-only reference to the element. + * @param[in] index The offsets into each mode of *this for the desired + * element. * - * @throw std::runtime_error if the index is greater than the number of - * elements. Strong throw guarantee. + * @return A const reference to the element at the specified offsets. */ - const_reference get_data(size_type index) const { - if(index >= this->size()) - throw std::runtime_error("Index greater than number of elements"); - return get_data_(std::move(index)); - } + const_reference get_elem(index_vector index) const; - /** @brief Sets a tensor element by ordinal offset. + /** @brief Sets the specified element to @p new_value. * - * This method is used to change the value of an element. + * This method will set the element at the offsets specified by @p index. + * The length of @p index must be equal to the rank of *this and each + * entry in @p index must be less than the extent of the corresponding + * mode of *this. * - * @param[in] index The ordinal offset of the element being updated. - * @param[in] new_value The new value of the element. + * @param[in] index The offsets into each mode of *this for the desired + * element. + * @param[in] new_value The new value for the specified element. * - * @throw std::runtime_error if the index is greater than the number of - * elements. Strong throw guarantee. + * @throw std::out_of_range if any entry in @p index is invalid. Strong + * throw guarantee. */ - void set_data(size_type index, element_type new_value) { - if(index >= this->size()) - throw std::runtime_error("Index greater than number of elements"); - set_data_(index, new_value); - } + void set_elem(index_vector index, value_type new_value); - /** @brief Sets all elements to a value. + /** @brief Returns a view of the data. * - * @param[in] value The new value of all elements. + * This method is deprecated. Use set_slice instead. + */ + [[deprecated]] buffer_view get_mutable_data(); + + /** @brief Returns a read-only view of the data. * - * @throw None No throw guarantee. + * This method is deprecated. Use get_slice instead. */ - void fill(element_type value) { fill_(std::move(value)); } + [[deprecated]] const_buffer_view get_immutable_data() const; - /** @brief Sets elements using a list of values. + // ------------------------------------------------------------------------- + // -- Utility Methods + // ------------------------------------------------------------------------- + + /** @brief Compares two Contiguous objects for exact equality. + * + * Two Contiguous objects are exactly equal if they have the same shape and + * if all of their corresponding elements are bitwise identical. + * In practice, the implementation stores a hash of the elements in the + * tensor and compares the hashes for equality rather than checking each + * element individually. * - * @param[in] values The new values of all elements. + * @param[in] rhs The Contiguous to compare against. + * + * @return True if *this and @p rhs are exactly equal and false otherwise. * * @throw None No throw guarantee. */ - void copy(const element_vector& values) { copy_(values); } + bool operator==(const my_type& rhs) const noexcept; protected: - /// Derived class can override if it likes - virtual size_type size_() const noexcept { return layout().shape().size(); } + /// Makes a deep polymorphic copy of *this + buffer_base_pointer clone_() const override; - /// Derived class should implement according to data() description - virtual pointer get_mutable_data_() noexcept = 0; + /// Implements are_equal by checking that rhs is an Contiguous and then + /// calling operator== + bool are_equal_(const_buffer_base_reference rhs) const noexcept override; - /// Derived class should implement according to data() const description - virtual const_pointer get_immutable_data_() const noexcept = 0; + dsl_reference addition_assignment_(label_type this_labels, + const_labeled_reference lhs, + const_labeled_reference rhs) override; + dsl_reference subtraction_assignment_(label_type this_labels, + const_labeled_reference lhs, + const_labeled_reference rhs) override; + dsl_reference multiplication_assignment_( + label_type this_labels, const_labeled_reference lhs, + const_labeled_reference rhs) override; - /// Derived class should implement according to get_elem() - virtual const_reference get_elem_(index_vector index) const = 0; + dsl_reference permute_assignment_(label_type this_labels, + const_labeled_reference rhs) override; - /// Derived class should implement according to set_elem() - virtual void set_elem_(index_vector index, element_type new_value) = 0; + dsl_reference scalar_multiplication_(label_type this_labels, double scalar, + const_labeled_reference rhs) override; - /// Derived class should implement according to get_data() - virtual const_reference get_data_(size_type index) const = 0; + /// Calls add_to_stream_ on a stringstream to implement + string_type to_string_() const override; - /// Derived class should implement according to set_data() - virtual void set_data_(size_type index, element_type new_value) = 0; + /// Uses Eigen's printing capabilities to add to stream + std::ostream& add_to_stream_(std::ostream& os) const override; - /// Derived class should implement according to fill() - virtual void fill_(element_type) = 0; +private: + /// Type for storing the hash of *this + using hash_type = std::size_t; - virtual void copy_(const element_vector& values) = 0; -}; + /// Logic for validating that an index is within the bounds of the shape + void check_index_(const index_vector& index) const; + + /// Converts a coordinate index to a linear (ordinal) index + size_type coordinate_to_ordinal_(index_vector index) const; + + /// Returns the hash for the current state of *this, computing first if + /// needed. + hash_type get_hash_() const { + if(m_recalculate_hash_ or !m_hash_caching_) update_hash_(); + return m_hash_; + } + + /// Computes the hash for the current state of *this + void update_hash_() const; + + /// Designates that the state may have changed and to recalculate the hash. + /// This function is really just for readability and clarity. + void mark_for_rehash_() const { m_recalculate_hash_ = true; } + + /// Designates that state changes are not trackable and we should + /// recalculate the hash each time. + void turn_off_hash_caching_() const { m_hash_caching_ = false; } -#define DECLARE_CONTIG_BUFFER(TYPE) extern template class Contiguous + /// Tracks whether the hash needs to be redetermined + mutable bool m_recalculate_hash_ = true; -TW_APPLY_FLOATING_POINT_TYPES(DECLARE_CONTIG_BUFFER); + /// Tracks whether hash caching has been turned off + mutable bool m_hash_caching_ = true; -#undef DECLARE_CONTIG_BUFFER + /// Holds the computed hash value for this instance's state + mutable hash_type m_hash_ = 0; + + /// How the hyper-rectangular array is shaped + shape_type m_shape_; + + /// The flat buffer holding the elements of *this + buffer_type m_buffer_; +}; } // namespace tensorwrapper::buffer diff --git a/include/tensorwrapper/buffer/eigen.hpp b/include/tensorwrapper/buffer/eigen.hpp deleted file mode 100644 index d41e384e..00000000 --- a/include/tensorwrapper/buffer/eigen.hpp +++ /dev/null @@ -1,303 +0,0 @@ -/* - * Copyright 2024 NWChemEx-Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#pragma once -#include -#include - -namespace tensorwrapper::buffer { -namespace detail_ { -template -class EigenPIMPL; - -} - -/** @brief A buffer which wraps an Eigen object. - * - * @tparam FloatType The type used to store the elements of the object. - * - * Right now the backend is always an Eigen Tensor, but concievably it could - * be generalized to be matrices or Eigen's map class. - */ -template -class Eigen : public Contiguous { -private: - /// Type of *this - using my_type = Eigen; - - /// Type *this derives from - using my_base_type = Contiguous; - -public: - /// Pull in base class's types - using typename my_base_type::allocator_base_pointer; - using typename my_base_type::buffer_base_pointer; - using typename my_base_type::const_allocator_reference; - using typename my_base_type::const_buffer_base_reference; - using typename my_base_type::const_labeled_reference; - using typename my_base_type::const_layout_reference; - using typename my_base_type::const_pointer; - using typename my_base_type::const_reference; - using typename my_base_type::dsl_reference; - using typename my_base_type::element_type; - using typename my_base_type::element_vector; - using typename my_base_type::index_vector; - using typename my_base_type::label_type; - using typename my_base_type::layout_pointer; - using typename my_base_type::layout_type; - using typename my_base_type::pointer; - using typename my_base_type::polymorphic_base; - using typename my_base_type::reference; - using typename my_base_type::size_type; - - using pimpl_type = detail_::EigenPIMPL; - using pimpl_pointer = std::unique_ptr; - using pimpl_reference = pimpl_type&; - using const_pimpl_reference = const pimpl_type&; - - /** @brief Creates a buffer with no layout and a default initialized - * tensor. - * - * @throw None No throw guarantee. - */ - Eigen() noexcept; - - /** @brief Wraps the provided tensor. - * - * @tparam DataType The type of the input tensor. Must be implicitly - * convertible to an object of type data_type. - * - * @param[in] t The tensor to wrap. - * @param[in] layout The physical layout of @p t. - * - * @throw std::bad_alloc if there is a problem copying @p layout. Strong - * throw guarantee. - */ - Eigen(pimpl_pointer pimpl, const_layout_reference layout, - const_allocator_reference allocator) : - Eigen(std::move(pimpl), layout.template clone_as(), - allocator.clone()) {} - - Eigen(pimpl_pointer pimpl, layout_pointer playout, - allocator_base_pointer pallocator); - - /** @brief Initializes *this with a copy of @p other. - * - * @param[in] other The object to copy. - * - * @throw std::bad_alloc if there is a problem allocating the copy. Strong - * throw guarantee. - */ - Eigen(const Eigen& other); - - /** @brief Initializes *this with the state from @p other. - * - * @param[in,out] other The object to take the state from. After this call - * @p other will be in a valid, but otherwise - * undefined state. - * - * @throw None No throw guarantee. - */ - Eigen(Eigen&& other) noexcept; - - /** @brief Replaces the state in *this with a copy of the state in @p rhs. - * - * @param[in] rhs The object to copy the state from. - * - * @return *this after replacing its state with a copy of @p rhs. - * - * @throw std::bad_alloc if the copy fails to allocate memory. Strong - * throw guarantee. - */ - Eigen& operator=(const Eigen& rhs); - - /** @brief Replaces the state in *this with the state in @p rhs. - * - * @param[in,out] rhs The Eigen object to take the state from. After this - * method is called @p rhs will be in a valid, but - * otherwise undefined state. - * - * @return *this after taking the state from @p rhs. - * - * @throw None No throw guarantee. - */ - Eigen& operator=(Eigen&& rhs) noexcept; - - /// Defaulted no throw dtor - ~Eigen() noexcept; - - // ------------------------------------------------------------------------- - // -- Utility methods - // ------------------------------------------------------------------------- - - /** @brief Exchanges the contents of *this with @p other. - * - * @param[in,out] other The buffer to swap state with. - * - * @throw None No throw guarantee. - */ - void swap(Eigen& other) noexcept; - - /** @brief Is *this value equal to @p rhs? - * - * Two Eigen objects are value equal if they both have the same layout and - * they both have the same values. - * - * @note For tensors where the @p FloatType is an uncertain floating point - * number, the tensors are required to have the same sources of - * uncertainty. - * - * @param[in] rhs The object to compare against. - * - * @return True if *this is value equal to @p rhs and false otherwise. - * - * @throw None No throw guarantee. - */ - bool operator==(const Eigen& rhs) const noexcept; - - /** @brief Is *this different from @p rhs? - * - * This class defines different as not value equal. See operator== for the - * definition of value equal. - * - * @param[in] rhs The object to compare *this to. - * - * @return False if *this is value equal to @p rhs and true otherwise. - * - * @throw None No throw guarantee. - */ - bool operator!=(const Eigen& rhs) const noexcept { return !(*this == rhs); } - -protected: - /// Implements clone by calling copy ctor - buffer_base_pointer clone_() const override; - - /// Implements are_equal by calling are_equal_impl_ - bool are_equal_(const_buffer_base_reference rhs) const noexcept override; - - /// Implements addition_assignment by calling addition_assignment on state - dsl_reference addition_assignment_(label_type this_labels, - const_labeled_reference lhs, - const_labeled_reference rhs) override; - - /// Calls subtraction_assignment on each member - dsl_reference subtraction_assignment_(label_type this_labels, - const_labeled_reference lhs, - const_labeled_reference rhs) override; - - /// Calls multiplication_assignment on each member - dsl_reference multiplication_assignment_( - label_type this_labels, const_labeled_reference lhs, - const_labeled_reference rhs) override; - - /// Calls permute_assignment on each member - dsl_reference permute_assignment_(label_type this_labels, - const_labeled_reference rhs) override; - - /// Scales *this by @p scalar - dsl_reference scalar_multiplication_(label_type this_labels, double scalar, - const_labeled_reference rhs) override; - - /// Implements getting the raw pointer - pointer get_mutable_data_() noexcept override; - - /// Implements getting the raw pointer (read-only) - const_pointer get_immutable_data_() const noexcept override; - - /// Implements read-only element access - const_reference get_elem_(index_vector index) const override; - - // Implements element updating - void set_elem_(index_vector index, element_type new_value) override; - - /// Implements read-only element access by ordinal index - const_reference get_data_(size_type index) const override; - - // Implements element updating by ordinal index - void set_data_(size_type index, element_type new_value) override; - - /// Implements filling the tensor - void fill_(element_type value) override; - - /// Implements copying new values into the tensor - void copy_(const element_vector& values) override; - - /// Implements to_string - typename polymorphic_base::string_type to_string_() const override; - - /// Implements add_to_stream - std::ostream& add_to_stream_(std::ostream& os) const override; - -private: - /// True if *this has a PIMPL - bool has_pimpl_() const noexcept; - - /// Throws std::runtime_error if *this has no PIMPL - void assert_pimpl_() const; - - /// Asserts *this has a PIMPL then returns it - pimpl_reference pimpl_(); - - /// Assert *this has a PIMPL then returns it - const_pimpl_reference pimpl_() const; - - /// The object actually implementing *this - pimpl_pointer m_pimpl_; -}; - -/** @brief Wraps downcasting a buffer to an Eigen buffer. - * - * @tparam FloatType The type of the elements in the resulting Buffer. - * - * This function is a convience function for using an allocator to convert - * @p b to a buffer::Eigen object. - * - * @param[in] b The BufferBase object to convert. - * - * @return A reference to @p b after downcasting it. - */ -template -Eigen& to_eigen_buffer(BufferBase& b); - -/** @brief Wraps downcasting a buffer to an Eigen buffer. - * - * @tparam FloatType The type of the elements in the resulting Buffer. - * - * This function is the same as the non-const overload except that result will - * be read-only. - * - * @param[in] b The BufferBase object to convert. - * - * @return A reference to @p b after downcasting it. - */ -template -const Eigen& to_eigen_buffer(const BufferBase& b); - -#define DECLARE_EIGEN_BUFFER(TYPE) extern template class Eigen -#define DECLARE_TO_EIGEN_BUFFER(TYPE) \ - extern template Eigen& to_eigen_buffer(BufferBase&) -#define DECLARE_TO_CONST_EIGEN_BUFFER(TYPE) \ - extern template const Eigen& to_eigen_buffer(const BufferBase&) - -TW_APPLY_FLOATING_POINT_TYPES(DECLARE_EIGEN_BUFFER); -TW_APPLY_FLOATING_POINT_TYPES(DECLARE_TO_EIGEN_BUFFER); -TW_APPLY_FLOATING_POINT_TYPES(DECLARE_TO_CONST_EIGEN_BUFFER); - -#undef DECLARE_EIGEN_BUFFER -#undef DECLARE_TO_EIGEN_BUFFER -#undef DECLARE_TO_CONST_EIGEN_BUFFER - -} // namespace tensorwrapper::buffer diff --git a/include/tensorwrapper/buffer/mdbuffer.hpp b/include/tensorwrapper/buffer/mdbuffer.hpp deleted file mode 100644 index ab5e1cc0..00000000 --- a/include/tensorwrapper/buffer/mdbuffer.hpp +++ /dev/null @@ -1,337 +0,0 @@ -/* - * Copyright 2025 NWChemEx-Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#pragma once -#include -#include -#include -#include - -namespace tensorwrapper::buffer { - -/** @brief A multidimensional (MD) buffer. - * - * This class is a dense multidimensional buffer of floating-point values. - */ -class MDBuffer : public Replicated { -private: - /// Type *this derives from - using my_base_type = Replicated; - - /// Type defining the types for the public API of *this - using traits_type = types::ClassTraits; - - /// Type of *this - using my_type = MDBuffer; - -public: - /// Add types from traits_type to public API - ///@{ - using value_type = typename traits_type::value_type; - using reference = typename traits_type::reference; - using const_reference = typename traits_type::const_reference; - using buffer_type = typename traits_type::buffer_type; - using buffer_view = typename traits_type::buffer_view; - using const_buffer_view = typename traits_type::const_buffer_view; - using pimpl_type = typename traits_type::pimpl_type; - using pimpl_pointer = typename traits_type::pimpl_pointer; - using rank_type = typename traits_type::rank_type; - using shape_type = typename traits_type::shape_type; - using const_shape_view = typename traits_type::const_shape_view; - using size_type = typename traits_type::size_type; - ///@} - - using index_vector = std::vector; - using typename my_base_type::label_type; - using string_type = std::string; - - // ------------------------------------------------------------------------- - // -- Ctors, assignment, and dtor - // ------------------------------------------------------------------------- - - /** @brief Creates an empty multi-dimensional buffer. - * - * The resulting buffer will have a shape of rank 0, but a size of 0. Thus - * the buffer can NOT be used to store any elements (including treating - * *this as a scalar). The resulting buffer can be assigned to or moved - * to to populate it. - * - * @throw None No throw guarantee. - */ - MDBuffer() noexcept; - - /** @brief Treats allocated memory like a multi-dimensional buffer. - * - * @tparam T The type of the elements in the buffer. Must satisfy the - * FloatingPoint concept. - * - * This ctor will use @p element to create a buffer_type object and then - * pass that along with @p shape to the main ctor. - * - * @param[in] elements The elements to be used as the backing store. - * @param[in] shape The shape of *this. - * - * @throw std::invalid_argument if the size of @p elements does not match - * the size implied by @p shape. Strong throw - * guarantee. - * @throw std::bad_alloc if there is a problem allocating memory for the - * internal state. Strong throw guarantee. - */ - template - MDBuffer(std::vector elements, shape_type shape) : - MDBuffer(buffer_type(std::move(elements)), std::move(shape)) {} - - /** @brief The main ctor. - * - * This ctor will create *this using @p buffer as the backing store and - * @p shape to describe the geometry of the multidimensional array. - * - * All other ctors (aside from copy and move) delegate to this one. - * - * @param[in] buffer The buffer to be used as the backing store. - * @param[in] shape The shape of *this. - * - * @throw std::invalid_argument if the size of @p buffer does not match - * the size implied by @p shape. Strong throw - * guarantee. - * @throw std::bad_alloc if there is a problem allocating memory for the - * internal state. Strong throw guarantee. - */ - MDBuffer(buffer_type buffer, shape_type shape); - - /** @brief Initializes *this to a deep copy of @p other. - * - * This ctor will initialize *this to be a deep copy of @p other. - * - * @param[in] other The MDBuffer to copy. - * - * @throw std::bad_alloc if there is a problem allocating memory for the - * internal state. Strong throw guarantee. - */ - MDBuffer(const MDBuffer& other) = default; - - /** @brief Move ctor. - * - * This ctor will initialize *this by taking the state from @p other. - * After this ctor is called @p other is left in a valid but unspecified - * state. - * - * @param[in,out] other The MDBuffer to move from. - * - * @throw None No throw guarantee. - */ - MDBuffer(MDBuffer&& other) noexcept = default; - - /** @brief Copy assignment. - * - * This operator will make *this a deep copy of @p other. - * - * @param[in] other The MDBuffer to copy. - * - * @return *this after the assignment. - * - * @throw std::bad_alloc if there is a problem allocating memory for the - * internal state. Strong throw guarantee. - */ - MDBuffer& operator=(const MDBuffer& other) = default; - - /** @brief Move assignment. - * - * This operator will make *this take the state from @p other. After - * this operator is called @p other is left in a valid but unspecified - * state. - * - * @param[in,out] other The MDBuffer to move from. - * - * @return *this after the assignment. - * - * @throw None No throw guarantee. - */ - MDBuffer& operator=(MDBuffer&& other) noexcept = default; - - /** @brief Defaulted dtor. - * - * @throw None No throw guarantee. - */ - ~MDBuffer() override = default; - - // ------------------------------------------------------------------------- - // -- State Accessors - // ------------------------------------------------------------------------- - - /** @brief Returns (a view of) the shape of *this. - * - * The shape of *this describes the geometry of the underlying - * multidimensional array. - * - * @return A view of the shape of *this. - * - * @throw std::bad_alloc if there is a problem allocating memory for the - * returned view. Strong throw guarantee. - */ - const_shape_view shape() const; - - /** @brief The total number of elements in *this. - * - * The total number of elements is the product of the extents of each - * mode of *this. - * - * @return The total number of elements in *this. - * - * @throw None No throw guarantee. - */ - size_type size() const noexcept; - - /** @brief Returns the element with the offsets specified by @p index. - * - * This method will retrieve a const reference to the element at the - * offsets specified by @p index. The length of @p index must be equal - * to the rank of *this and each entry in @p index must be less than the - * extent of the corresponding mode of *this. - * - * This method can only be used to retrieve elements from *this. To modify - * elements use set_elem(). - * - * @param[in] index The offsets into each mode of *this for the desired - * element. - * - * @return A const reference to the element at the specified offsets. - */ - const_reference get_elem(index_vector index) const; - - /** @brief Sets the specified element to @p new_value. - * - * This method will set the element at the offsets specified by @p index. - * The length of @p index must be equal to the rank of *this and each - * entry in @p index must be less than the extent of the corresponding - * mode of *this. - * - * @param[in] index The offsets into each mode of *this for the desired - * element. - * @param[in] new_value The new value for the specified element. - * - * @throw std::out_of_range if any entry in @p index is invalid. Strong - * throw guarantee. - */ - void set_elem(index_vector index, value_type new_value); - - /** @brief Returns a view of the data. - * - * This method is deprecated. Use set_slice instead. - */ - [[deprecated]] buffer_view get_mutable_data(); - - /** @brief Returns a read-only view of the data. - * - * This method is deprecated. Use get_slice instead. - */ - [[deprecated]] const_buffer_view get_immutable_data() const; - - // ------------------------------------------------------------------------- - // -- Utility Methods - // ------------------------------------------------------------------------- - - /** @brief Compares two MDBuffer objects for exact equality. - * - * Two MDBuffer objects are exactly equal if they have the same shape and - * if all of their corresponding elements are bitwise identical. - * In practice, the implementation stores a hash of the elements in the - * tensor and compares the hashes for equality rather than checking each - * element individually. - * - * @param[in] rhs The MDBuffer to compare against. - * - * @return True if *this and @p rhs are exactly equal and false otherwise. - * - * @throw None No throw guarantee. - */ - bool operator==(const my_type& rhs) const noexcept; - -protected: - /// Makes a deep polymorphic copy of *this - buffer_base_pointer clone_() const override; - - /// Implements are_equal by checking that rhs is an MDBuffer and then - /// calling operator== - bool are_equal_(const_buffer_base_reference rhs) const noexcept override; - - dsl_reference addition_assignment_(label_type this_labels, - const_labeled_reference lhs, - const_labeled_reference rhs) override; - dsl_reference subtraction_assignment_(label_type this_labels, - const_labeled_reference lhs, - const_labeled_reference rhs) override; - dsl_reference multiplication_assignment_( - label_type this_labels, const_labeled_reference lhs, - const_labeled_reference rhs) override; - - dsl_reference permute_assignment_(label_type this_labels, - const_labeled_reference rhs) override; - - dsl_reference scalar_multiplication_(label_type this_labels, double scalar, - const_labeled_reference rhs) override; - - /// Calls add_to_stream_ on a stringstream to implement - string_type to_string_() const override; - - /// Uses Eigen's printing capabilities to add to stream - std::ostream& add_to_stream_(std::ostream& os) const override; - -private: - /// Type for storing the hash of *this - using hash_type = std::size_t; - - /// Logic for validating that an index is within the bounds of the shape - void check_index_(const index_vector& index) const; - - /// Converts a coordinate index to a linear (ordinal) index - size_type coordinate_to_ordinal_(index_vector index) const; - - /// Returns the hash for the current state of *this, computing first if - /// needed. - hash_type get_hash_() const { - if(m_recalculate_hash_ or !m_hash_caching_) update_hash_(); - return m_hash_; - } - - /// Computes the hash for the current state of *this - void update_hash_() const; - - /// Designates that the state may have changed and to recalculate the hash. - /// This function is really just for readability and clarity. - void mark_for_rehash_() const { m_recalculate_hash_ = true; } - - /// Designates that state changes are not trackable and we should - /// recalculate the hash each time. - void turn_off_hash_caching_() const { m_hash_caching_ = false; } - - /// Tracks whether the hash needs to be redetermined - mutable bool m_recalculate_hash_ = true; - - /// Tracks whether hash caching has been turned off - mutable bool m_hash_caching_ = true; - - /// Holds the computed hash value for this instance's state - mutable hash_type m_hash_ = 0; - - /// How the hyper-rectangular array is shaped - shape_type m_shape_; - - /// The flat buffer holding the elements of *this - buffer_type m_buffer_; -}; - -} // namespace tensorwrapper::buffer diff --git a/include/tensorwrapper/forward_declarations.hpp b/include/tensorwrapper/forward_declarations.hpp index e030b3a9..9328da87 100644 --- a/include/tensorwrapper/forward_declarations.hpp +++ b/include/tensorwrapper/forward_declarations.hpp @@ -15,18 +15,10 @@ */ #pragma once +#include namespace tensorwrapper { -namespace buffer { -namespace detail_ { -class MDBufferPIMPL; -} - -class MDBuffer; - -} // namespace buffer - namespace shape { template class SmoothView; diff --git a/include/tensorwrapper/types/mdbuffer_traits.hpp b/include/tensorwrapper/types/contiguous_traits.hpp similarity index 83% rename from include/tensorwrapper/types/mdbuffer_traits.hpp rename to include/tensorwrapper/types/contiguous_traits.hpp index aa60a608..9f4ce64e 100644 --- a/include/tensorwrapper/types/mdbuffer_traits.hpp +++ b/include/tensorwrapper/types/contiguous_traits.hpp @@ -22,7 +22,7 @@ namespace tensorwrapper::types { -struct MDBufferTraitsCommon { +struct ContiguousTraitsCommon { using value_type = wtf::fp::Float; using const_reference = wtf::fp::FloatView; using buffer_type = wtf::buffer::FloatBuffer; @@ -31,13 +31,11 @@ struct MDBufferTraitsCommon { using const_shape_view = shape::SmoothView; using rank_type = typename ClassTraits::rank_type; using size_type = typename ClassTraits::size_type; - using pimpl_type = tensorwrapper::buffer::detail_::MDBufferPIMPL; - using pimpl_pointer = std::unique_ptr; }; template<> -struct ClassTraits - : public MDBufferTraitsCommon { +struct ClassTraits + : public ContiguousTraitsCommon { using reference = wtf::fp::FloatView; using buffer_view = wtf::buffer::BufferView; @@ -45,8 +43,8 @@ struct ClassTraits }; template<> -struct ClassTraits - : public MDBufferTraitsCommon { +struct ClassTraits + : public ContiguousTraitsCommon { using reference = wtf::fp::FloatView; using buffer_view = wtf::buffer::BufferView; }; diff --git a/src/tensorwrapper/allocator/contiguous.cpp b/src/tensorwrapper/allocator/contiguous.cpp new file mode 100644 index 00000000..4226262e --- /dev/null +++ b/src/tensorwrapper/allocator/contiguous.cpp @@ -0,0 +1,65 @@ +/* + * Copyright 2024 NWChemEx-Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "../tensor/detail_/il_utils.hpp" +#include +#include +#include +#include + +namespace tensorwrapper::allocator { + +bool Contiguous::can_rebind(const_buffer_base_reference buffer) { + auto pbuffer = dynamic_cast(&buffer); + return pbuffer != nullptr; +} + +auto Contiguous::rebind(buffer_base_reference buffer) -> buffer_reference { + if(can_rebind(buffer)) return static_cast(buffer); + throw std::runtime_error("Can not rebind buffer"); +} + +auto Contiguous::rebind(const_buffer_base_reference buffer) + -> const_buffer_reference { + if(can_rebind(buffer)) return dynamic_cast(buffer); + throw std::runtime_error("Can not rebind buffer"); +} + +// ----------------------------------------------------------------------------- +// -- Protected methods +// ----------------------------------------------------------------------------- + +auto Contiguous::allocate_(layout_pointer playout) { + return std::make_unique(std::move(playout)); +} + +auto Contiguous::construct_(layout_pointer playout, wtf::fp::Float value) + -> contiguous_pointer { + auto pbuffer = this->allocate(std::move(playout)); + auto& contig_buffer = static_cast(*pbuffer); + contig_buffer.fill(value); + return pbuffer; +} + +// -- Private + +auto Contiguous::layout_from_extents_(const std::vector& extents) + -> layout_pointer { + shape::Smooth shape(extents.begin(), extents.end()); + return std::make_unique(std::move(shape)); +} + +} // namespace tensorwrapper::allocator diff --git a/src/tensorwrapper/allocator/eigen.cpp b/src/tensorwrapper/allocator/eigen.cpp deleted file mode 100644 index d493ecae..00000000 --- a/src/tensorwrapper/allocator/eigen.cpp +++ /dev/null @@ -1,132 +0,0 @@ -/* - * Copyright 2024 NWChemEx-Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "../buffer/detail_/eigen_tensor.hpp" -#include "../tensor/detail_/il_utils.hpp" -#include -#include -#include -#include - -namespace tensorwrapper::allocator { - -#define TPARAMS template -#define EIGEN Eigen - -TPARAMS -bool EIGEN::can_rebind(const_buffer_base_reference buffer) { - auto pbuffer = dynamic_cast*>(&buffer); - return pbuffer != nullptr; -} - -TPARAMS -typename EIGEN::eigen_buffer_reference EIGEN::rebind( - buffer_base_reference buffer) { - if(can_rebind(buffer)) return static_cast(buffer); - throw std::runtime_error("Can not rebind buffer"); -} - -TPARAMS -typename EIGEN::const_eigen_buffer_reference EIGEN::rebind( - const_buffer_base_reference buffer) { - if(can_rebind(buffer)) - return dynamic_cast(buffer); - throw std::runtime_error("Can not rebind buffer"); -} - -// ----------------------------------------------------------------------------- -// -- Protected methods -// ----------------------------------------------------------------------------- - -#define ALLOCATE(Rank) \ - if(playout->rank() == Rank) { \ - using pimpl_type = buffer::detail_::EigenTensor; \ - auto ppimpl = \ - std::make_unique(playout->shape().as_smooth()); \ - return std::make_unique( \ - std::move(ppimpl), std::move(playout), this->clone()); \ - } - -TPARAMS -typename EIGEN::buffer_base_pointer EIGEN::allocate_(layout_pointer playout) { - using buffer_type = buffer::Eigen; - ALLOCATE(0) - else ALLOCATE(1) else ALLOCATE(2) else ALLOCATE(3) else ALLOCATE(4) else ALLOCATE(5) else ALLOCATE( - 6) else ALLOCATE(7) else ALLOCATE(8) else ALLOCATE(9) else ALLOCATE(10) else { - throw std::runtime_error("Tensors with rank > 10 not supported."); - } -} - -TPARAMS -typename EIGEN::contiguous_pointer EIGEN::construct_(rank0_il il) { - return il_construct_(il); -} - -TPARAMS -typename EIGEN::contiguous_pointer EIGEN::construct_(rank1_il il) { - return il_construct_(il); -} - -TPARAMS -typename EIGEN::contiguous_pointer EIGEN::construct_(rank2_il il) { - return il_construct_(il); -} - -TPARAMS -typename EIGEN::contiguous_pointer EIGEN::construct_(rank3_il il) { - return il_construct_(il); -} - -TPARAMS -typename EIGEN::contiguous_pointer EIGEN::construct_(rank4_il il) { - return il_construct_(il); -} - -TPARAMS -typename EIGEN::contiguous_pointer EIGEN::construct_(layout_pointer playout, - element_type value) { - auto pbuffer = this->allocate(std::move(playout)); - auto& contig_buffer = static_cast&>(*pbuffer); - contig_buffer.fill(value); - return pbuffer; -} - -// -- Private - -TPARAMS -template -typename EIGEN::contiguous_pointer EIGEN::il_construct_(ILType il) { - auto [extents, data] = unwrap_il(il); - shape::Smooth shape(extents.begin(), extents.end()); - auto playout = std::make_unique(std::move(shape)); - auto pbuffer = this->allocate(std::move(playout)); - auto& buffer_down = rebind(*pbuffer); - buffer_down.copy(data); - return pbuffer; -} - -#undef EIGEN -#undef TPARAMS - -// -- Explicit class template instantiation - -#define DEFINE_EIGEN_ALLOCATOR(TYPE) template class Eigen - -TW_APPLY_FLOATING_POINT_TYPES(DEFINE_EIGEN_ALLOCATOR); - -#undef DEFINE_EIGEN_ALLOCATOR - -} // namespace tensorwrapper::allocator diff --git a/src/tensorwrapper/buffer/contiguoues.cpp b/src/tensorwrapper/buffer/contiguoues.cpp deleted file mode 100644 index 36438832..00000000 --- a/src/tensorwrapper/buffer/contiguoues.cpp +++ /dev/null @@ -1,26 +0,0 @@ -/* - * Copyright 2025 NWChemEx-Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#include - -namespace tensorwrapper::buffer { - -#define DEFINE_CONTIG_BUFFER(TYPE) template class Contiguous - -TW_APPLY_FLOATING_POINT_TYPES(DEFINE_CONTIG_BUFFER); - -#undef DEFINE_CONTIG_BUFFER - -} // namespace tensorwrapper::buffer diff --git a/src/tensorwrapper/buffer/mdbuffer.cpp b/src/tensorwrapper/buffer/contiguous.cpp similarity index 79% rename from src/tensorwrapper/buffer/mdbuffer.cpp rename to src/tensorwrapper/buffer/contiguous.cpp index 79400829..38149117 100644 --- a/src/tensorwrapper/buffer/mdbuffer.cpp +++ b/src/tensorwrapper/buffer/contiguous.cpp @@ -17,17 +17,18 @@ #include "../backends/eigen/eigen_tensor_impl.hpp" #include "detail_/binary_operation_visitor.hpp" #include "detail_/hash_utilities.hpp" -#include +#include #include namespace tensorwrapper::buffer { namespace { template -const MDBuffer& downcast(T&& object) { - auto* pobject = dynamic_cast(&object); +const Contiguous& downcast(T&& object) { + auto* pobject = dynamic_cast(&object); if(pobject == nullptr) { - throw std::invalid_argument("The provided buffer must be an MDBuffer."); + throw std::invalid_argument( + "The provided buffer must be an Contiguous."); } return *pobject; } @@ -35,9 +36,9 @@ const MDBuffer& downcast(T&& object) { using fp_types = types::floating_point_types; -MDBuffer::MDBuffer() noexcept = default; +Contiguous::Contiguous() noexcept = default; -MDBuffer::MDBuffer(buffer_type buffer, shape_type shape) : +Contiguous::Contiguous(buffer_type buffer, shape_type shape) : my_base_type(std::make_unique(shape), nullptr), m_shape_(std::move(shape)), m_buffer_() { @@ -54,27 +55,27 @@ MDBuffer::MDBuffer(buffer_type buffer, shape_type shape) : // -- State Accessor // ----------------------------------------------------------------------------- -auto MDBuffer::shape() const -> const_shape_view { return m_shape_; } +auto Contiguous::shape() const -> const_shape_view { return m_shape_; } -auto MDBuffer::size() const noexcept -> size_type { return m_buffer_.size(); } +auto Contiguous::size() const noexcept -> size_type { return m_buffer_.size(); } -auto MDBuffer::get_elem(index_vector index) const -> const_reference { +auto Contiguous::get_elem(index_vector index) const -> const_reference { auto ordinal_index = coordinate_to_ordinal_(index); return m_buffer_.at(ordinal_index); } -void MDBuffer::set_elem(index_vector index, value_type new_value) { +void Contiguous::set_elem(index_vector index, value_type new_value) { auto ordinal_index = coordinate_to_ordinal_(index); mark_for_rehash_(); m_buffer_.at(ordinal_index) = new_value; } -auto MDBuffer::get_mutable_data() -> buffer_view { +auto Contiguous::get_mutable_data() -> buffer_view { mark_for_rehash_(); return m_buffer_; } -auto MDBuffer::get_immutable_data() const -> const_buffer_view { +auto Contiguous::get_immutable_data() const -> const_buffer_view { return m_buffer_; } @@ -82,7 +83,7 @@ auto MDBuffer::get_immutable_data() const -> const_buffer_view { // -- Utility Methods // ----------------------------------------------------------------------------- -bool MDBuffer::operator==(const my_type& rhs) const noexcept { +bool Contiguous::operator==(const my_type& rhs) const noexcept { if(!my_base_type::operator==(rhs)) return false; return get_hash_() == rhs.get_hash_(); } @@ -91,17 +92,17 @@ bool MDBuffer::operator==(const my_type& rhs) const noexcept { // -- Protected Methods // ----------------------------------------------------------------------------- -auto MDBuffer::clone_() const -> buffer_base_pointer { - return std::make_unique(*this); +auto Contiguous::clone_() const -> buffer_base_pointer { + return std::make_unique(*this); } -bool MDBuffer::are_equal_(const_buffer_base_reference rhs) const noexcept { +bool Contiguous::are_equal_(const_buffer_base_reference rhs) const noexcept { return my_base_type::template are_equal_impl_(rhs); } -auto MDBuffer::addition_assignment_(label_type this_labels, - const_labeled_reference lhs, - const_labeled_reference rhs) +auto Contiguous::addition_assignment_(label_type this_labels, + const_labeled_reference lhs, + const_labeled_reference rhs) -> dsl_reference { const auto& lhs_down = downcast(lhs.object()); const auto& rhs_down = downcast(rhs.object()); @@ -126,9 +127,9 @@ auto MDBuffer::addition_assignment_(label_type this_labels, return *this; } -auto MDBuffer::subtraction_assignment_(label_type this_labels, - const_labeled_reference lhs, - const_labeled_reference rhs) +auto Contiguous::subtraction_assignment_(label_type this_labels, + const_labeled_reference lhs, + const_labeled_reference rhs) -> dsl_reference { const auto& lhs_down = downcast(lhs.object()); const auto& rhs_down = downcast(rhs.object()); @@ -153,9 +154,9 @@ auto MDBuffer::subtraction_assignment_(label_type this_labels, return *this; } -auto MDBuffer::multiplication_assignment_(label_type this_labels, - const_labeled_reference lhs, - const_labeled_reference rhs) +auto Contiguous::multiplication_assignment_(label_type this_labels, + const_labeled_reference lhs, + const_labeled_reference rhs) -> dsl_reference { const auto& lhs_down = downcast(lhs.object()); const auto& rhs_down = downcast(rhs.object()); @@ -179,8 +180,8 @@ auto MDBuffer::multiplication_assignment_(label_type this_labels, return *this; } -auto MDBuffer::permute_assignment_(label_type this_labels, - const_labeled_reference rhs) +auto Contiguous::permute_assignment_(label_type this_labels, + const_labeled_reference rhs) -> dsl_reference { const auto& rhs_down = downcast(rhs.object()); const auto& rhs_labels = rhs.labels(); @@ -198,8 +199,8 @@ auto MDBuffer::permute_assignment_(label_type this_labels, return *this; } -auto MDBuffer::scalar_multiplication_(label_type this_labels, double scalar, - const_labeled_reference rhs) +auto Contiguous::scalar_multiplication_(label_type this_labels, double scalar, + const_labeled_reference rhs) -> dsl_reference { const auto& rhs_down = downcast(rhs.object()); const auto& rhs_labels = rhs.labels(); @@ -217,13 +218,13 @@ auto MDBuffer::scalar_multiplication_(label_type this_labels, double scalar, return *this; } -auto MDBuffer::to_string_() const -> string_type { +auto Contiguous::to_string_() const -> string_type { std::stringstream ss; add_to_stream_(ss); return ss.str(); } -std::ostream& MDBuffer::add_to_stream_(std::ostream& os) const { +std::ostream& Contiguous::add_to_stream_(std::ostream& os) const { /// XXX: EigenTensor should handle aliasing a const buffer correctly. That's /// a lot of work, just to get this to work though... @@ -243,7 +244,7 @@ std::ostream& MDBuffer::add_to_stream_(std::ostream& os) const { // -- Private Methods // ----------------------------------------------------------------------------- -void MDBuffer::check_index_(const index_vector& index) const { +void Contiguous::check_index_(const index_vector& index) const { if(index.size() != m_shape_.rank()) { throw std::out_of_range( "The length of the provided index does not match the rank of " @@ -258,7 +259,7 @@ void MDBuffer::check_index_(const index_vector& index) const { } } -auto MDBuffer::coordinate_to_ordinal_(index_vector index) const -> size_type { +auto Contiguous::coordinate_to_ordinal_(index_vector index) const -> size_type { check_index_(index); using size_type = typename decltype(index)::size_type; size_type ordinal = 0; @@ -270,7 +271,7 @@ auto MDBuffer::coordinate_to_ordinal_(index_vector index) const -> size_type { return ordinal; } -void MDBuffer::update_hash_() const { +void Contiguous::update_hash_() const { buffer::detail_::hash_utilities::HashVisitor visitor; if(m_buffer_.size()) { wtf::buffer::visit_contiguous_buffer(visitor, m_buffer_); diff --git a/src/tensorwrapper/buffer/detail_/eigen_dispatch.hpp b/src/tensorwrapper/buffer/detail_/eigen_dispatch.hpp deleted file mode 100644 index 90c26509..00000000 --- a/src/tensorwrapper/buffer/detail_/eigen_dispatch.hpp +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Copyright 2025 NWChemEx-Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#pragma once -#include -#include - -namespace tensorwrapper::buffer::detail_ { - -constexpr std::size_t MaxEigenRank = 8; - -template -using eigen_tensor_type = eigen::Tensor; - -template -using eigen_tensor_map = eigen::TensorMap>; - -template -auto wrap_tensor(std::span s, const shape::Smooth& shape) { - using tensor_type = eigen::Tensor; - using map_type = eigen::TensorMap; - - if constexpr(Rank > MaxEigenRank) { - static_assert( - Rank <= MaxEigenRank, - "Eigen tensors of rank > MaxEigenRank are not supported."); - } else { - if(shape.rank() == Rank) return variant_type(map_type(s)); - } -} - -template -auto eigen_dispatch_impl(VisitorType&& visitor, - eigen::TensorMap>& A, - Args&&... args) { - return visitor(A, std::forward(args)...); -} - -template -auto eigen_tensor_dispatch(std::span s, shape::Smooth shape, - Args&&... args) { - using tensor_type = eigen::Tensor; -} - -} // namespace tensorwrapper::buffer::detail_ diff --git a/src/tensorwrapper/buffer/detail_/eigen_pimpl.hpp b/src/tensorwrapper/buffer/detail_/eigen_pimpl.hpp deleted file mode 100644 index 8c297914..00000000 --- a/src/tensorwrapper/buffer/detail_/eigen_pimpl.hpp +++ /dev/null @@ -1,176 +0,0 @@ -/* - * Copyright 2025 NWChemEx-Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#pragma once -#include "../../backends/eigen.hpp" -#include -#include - -namespace tensorwrapper::buffer::detail_ { - -/// Common API that type-erases Eigen's many tensor classes. -template -class EigenPIMPL - : public tensorwrapper::detail_::PolymorphicBase> { -private: - using my_type = EigenPIMPL; - using polymorphic_base = tensorwrapper::detail_::PolymorphicBase; - -public: - using parent_type = Eigen; - using pimpl_pointer = typename parent_type::pimpl_pointer; - using label_type = typename parent_type::label_type; - using element_type = typename parent_type::element_type; - using element_vector = typename parent_type::element_vector; - using reference = typename parent_type::reference; - using const_shape_reference = const shape::ShapeBase&; - using const_reference = typename parent_type::const_reference; - using pointer = typename parent_type::pointer; - using const_pointer = typename parent_type::const_pointer; - using string_type = typename polymorphic_base::string_type; - using index_vector = typename parent_type::index_vector; - using size_type = typename parent_type::size_type; - - using const_pimpl_reference = const EigenPIMPL&; - - using eigen_rank_type = unsigned int; - - eigen_rank_type rank() const noexcept { return rank_(); } - - size_type size() const noexcept { return size_(); } - - size_type extent(eigen_rank_type i) const { - assert(i < rank()); - return extent_(i); - } - - pointer get_mutable_data() noexcept { return get_mutable_data_(); } - - const_pointer get_immutable_data() const noexcept { - return get_immutable_data_(); - } - - const_reference get_elem(index_vector index) const { - assert(index.size() == rank()); - return get_elem_(std::move(index)); - } - - void set_elem(index_vector index, element_type new_value) { - assert(index.size() == rank()); - set_elem_(index, new_value); - } - - const_reference get_data(size_type index) const { - assert(index < size()); - return get_data_(std::move(index)); - } - - void set_data(size_type index, element_type new_value) { - assert(index < size()); - set_data_(index, new_value); - } - - void fill(element_type value) { fill_(std::move(value)); } - - void copy(const element_vector& values) { - assert(values.size() <= size()); - copy_(values); - } - - void addition_assignment(label_type this_labels, label_type lhs_labels, - label_type rhs_labels, const_pimpl_reference lhs, - const_pimpl_reference rhs) { - addition_assignment_(std::move(this_labels), std::move(lhs_labels), - std::move(rhs_labels), lhs, rhs); - } - - void subtraction_assignment(label_type this_labels, label_type lhs_labels, - label_type rhs_labels, - const_pimpl_reference lhs, - const_pimpl_reference rhs) { - subtraction_assignment_(std::move(this_labels), std::move(lhs_labels), - std::move(rhs_labels), lhs, rhs); - } - - void hadamard_assignment(label_type this_labels, label_type lhs_labels, - label_type rhs_labels, const_pimpl_reference lhs, - const_pimpl_reference rhs) { - hadamard_assignment_(std::move(this_labels), std::move(lhs_labels), - std::move(rhs_labels), lhs, rhs); - } - - void contraction_assignment(label_type this_labels, label_type lhs_labels, - label_type rhs_labels, - const_shape_reference result_shape, - const_pimpl_reference lhs, - const_pimpl_reference rhs) { - contraction_assignment_(std::move(this_labels), std::move(lhs_labels), - std::move(rhs_labels), result_shape, lhs, rhs); - } - - void permute_assignment(label_type this_labels, label_type rhs_labels, - const_pimpl_reference rhs) { - permute_assignment_(std::move(this_labels), std::move(rhs_labels), rhs); - } - - void scalar_multiplication(label_type this_labels, label_type rhs_labels, - FloatType scalar, const_pimpl_reference rhs) { - scalar_multiplication_(std::move(this_labels), std::move(rhs_labels), - scalar, rhs); - } - -protected: - virtual eigen_rank_type rank_() const noexcept = 0; - virtual size_type size_() const = 0; - virtual size_type extent_(eigen_rank_type i) const = 0; - virtual pointer get_mutable_data_() noexcept = 0; - virtual const_pointer get_immutable_data_() const noexcept = 0; - virtual const_reference get_elem_(index_vector index) const = 0; - virtual void set_elem_(index_vector index, element_type new_value) = 0; - virtual const_reference get_data_(size_type index) const = 0; - virtual void set_data_(size_type index, element_type new_value) = 0; - virtual void fill_(element_type value) = 0; - virtual void copy_(const element_vector& values) = 0; - virtual void addition_assignment_(label_type this_labels, - label_type lhs_labels, - label_type rhs_labels, - const_pimpl_reference lhs, - const_pimpl_reference rhs) = 0; - virtual void subtraction_assignment_(label_type this_labels, - label_type lhs_labels, - label_type rhs_labels, - const_pimpl_reference lhs, - const_pimpl_reference rhs) = 0; - virtual void hadamard_assignment_(label_type this_labels, - label_type lhs_labels, - label_type rhs_labels, - const_pimpl_reference lhs, - const_pimpl_reference rhs) = 0; - virtual void contraction_assignment_(label_type this_labels, - label_type lhs_labels, - label_type rhs_labels, - const_shape_reference result_shape, - const_pimpl_reference lhs, - const_pimpl_reference rhs) = 0; - virtual void permute_assignment_(label_type this_labels, - label_type rhs_labels, - const_pimpl_reference rhs) = 0; - virtual void scalar_multiplication_(label_type this_labels, - label_type rhs_labels, FloatType scalar, - const_pimpl_reference rhs) = 0; -}; - -} // namespace tensorwrapper::buffer::detail_ diff --git a/src/tensorwrapper/buffer/detail_/eigen_tensor.cpp b/src/tensorwrapper/buffer/detail_/eigen_tensor.cpp deleted file mode 100644 index 84096fb5..00000000 --- a/src/tensorwrapper/buffer/detail_/eigen_tensor.cpp +++ /dev/null @@ -1,237 +0,0 @@ -/* - * Copyright 2025 NWChemEx-Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "../../backends/eigen.hpp" -#include "../contraction_planner.hpp" -#include "eigen_tensor.hpp" - -namespace tensorwrapper::buffer::detail_ { - -#define TPARAMS template -#define EIGEN_TENSOR EigenTensor - -TPARAMS -template -void EIGEN_TENSOR::element_wise_op_(OperationType op, label_type this_labels, - label_type lhs_labels, - label_type rhs_labels, - const_pimpl_reference lhs, - const_pimpl_reference rhs) { - // Downcast LHS and RHS - const auto* lhs_down = dynamic_cast(&lhs); - const auto& lhs_eigen = lhs_down->m_tensor_; - const auto* rhs_down = dynamic_cast(&rhs); - const auto& rhs_eigen = rhs_down->m_tensor_; - - // Whose indices match whose? - bool this_matches_lhs = (this_labels == lhs_labels); - bool this_matches_rhs = (this_labels == rhs_labels); - bool lhs_matches_rhs = (lhs_labels == rhs_labels); - - // The three possible permutations we may need to apply - auto get_permutation = [](auto&& lhs_, auto&& rhs_) { - auto l_to_r = lhs_.permutation(rhs_); - return std::vector(l_to_r.begin(), l_to_r.end()); - }; - auto r_to_l = get_permutation(rhs_labels, lhs_labels); - auto l_to_r = get_permutation(lhs_labels, rhs_labels); - auto this_to_r = get_permutation(this_labels, rhs_labels); - - if(this_matches_lhs && this_matches_rhs) { // No permutations - m_tensor_ = op(lhs_eigen, rhs_eigen); - } else if(this_matches_lhs) { // RHS needs permuted - m_tensor_ = op(lhs_eigen, rhs_eigen.shuffle(r_to_l)); - } else if(this_matches_rhs) { // LHS needs permuted - m_tensor_ = op(lhs_eigen.shuffle(l_to_r), rhs_eigen); - } else if(lhs_matches_rhs) { // This needs permuted - m_tensor_ = op(lhs_eigen, rhs_eigen).shuffle(this_to_r); - } else { // Everything needs permuted - m_tensor_ = op(lhs_eigen.shuffle(l_to_r), rhs_eigen).shuffle(this_to_r); - } - mark_for_rehash_(); -} - -TPARAMS -void EIGEN_TENSOR::addition_assignment_(label_type this_labels, - label_type lhs_labels, - label_type rhs_labels, - const_pimpl_reference lhs, - const_pimpl_reference rhs) { - auto lambda = [](auto&& lhs, auto&& rhs) { return lhs + rhs; }; - element_wise_op_(lambda, std::move(this_labels), std::move(lhs_labels), - std::move(rhs_labels), lhs, rhs); - mark_for_rehash_(); -} - -template -auto matrix_size(TensorType&& t, std::size_t row_ranks) { - std::size_t nrows = 1; - for(std::size_t i = 0; i < row_ranks; ++i) nrows *= t.extent(i); - - std::size_t ncols = 1; - const auto rank = t.rank(); - for(std::size_t i = row_ranks; i < rank; ++i) ncols *= t.extent(i); - return std::make_pair(nrows, ncols); -} - -TPARAMS -void EIGEN_TENSOR::contraction_assignment_(label_type olabels, - label_type llabels, - label_type rlabels, - const_shape_reference result_shape, - const_pimpl_reference lhs, - const_pimpl_reference rhs) { - ContractionPlanner plan(olabels, llabels, rlabels); - - auto lt = lhs.clone(); - auto rt = rhs.clone(); - lt->permute_assignment(plan.lhs_permutation(), llabels, lhs); - rt->permute_assignment(plan.rhs_permutation(), rlabels, rhs); - - const auto [lrows, lcols] = matrix_size(*lt, plan.lhs_free().size()); - const auto [rrows, rcols] = matrix_size(*rt, plan.rhs_dummy().size()); - - // Work out the types of the matrix amd a map - constexpr auto e_dyn = ::Eigen::Dynamic; - constexpr auto e_row_major = ::Eigen::RowMajor; - using matrix_t = ::Eigen::Matrix; - using map_t = ::Eigen::Map; - - eigen::data_type buffer(lrows, rcols); - - map_t lmatrix(lt->get_mutable_data(), lrows, lcols); - map_t rmatrix(rt->get_mutable_data(), rrows, rcols); - map_t omatrix(buffer.data(), lrows, rcols); - omatrix = lmatrix * rmatrix; - - auto mlabels = plan.result_matrix_labels(); - auto oshape = result_shape(olabels); - - // oshapes is the final shape, permute it to shape omatrix is currently in - auto temp_shape = result_shape.clone(); - temp_shape->permute_assignment(mlabels, oshape); - auto mshape = temp_shape->as_smooth(); - - auto m_to_o = olabels.permutation(mlabels); // N.b. Eigen def is inverse us - - std::array out_size; - std::array m_to_o_array; - for(std::size_t i = 0; i < Rank; ++i) { - out_size[i] = mshape.extent(i); - m_to_o_array[i] = m_to_o[i]; - } - - auto tensor = buffer.reshape(out_size); - if constexpr(Rank > 0) { - m_tensor_ = tensor.shuffle(m_to_o_array); - } else { - m_tensor_ = tensor; - } - - mark_for_rehash_(); -} - -TPARAMS -void EIGEN_TENSOR::hadamard_assignment_(label_type this_labels, - label_type lhs_labels, - label_type rhs_labels, - const_pimpl_reference lhs, - const_pimpl_reference rhs) { - auto lambda = [](auto&& lhs, auto&& rhs) { return lhs * rhs; }; - element_wise_op_(lambda, std::move(this_labels), std::move(lhs_labels), - std::move(rhs_labels), lhs, rhs); - mark_for_rehash_(); -} - -TPARAMS -void EIGEN_TENSOR::permute_assignment_(label_type this_labels, - label_type rhs_labels, - const_pimpl_reference rhs) { - const auto* rhs_down = dynamic_cast(&rhs); - - if(this_labels != rhs_labels) { // We need to permute rhs before assignment - // Eigen adopts the opposite definition of permutation from us. - auto r_to_l = this_labels.permutation(rhs_labels); - // Eigen wants int objects - std::vector r_to_l2(r_to_l.begin(), r_to_l.end()); - m_tensor_ = rhs_down->m_tensor_.shuffle(r_to_l2); - } else { - m_tensor_ = rhs_down->m_tensor_; - } - mark_for_rehash_(); -} - -TPARAMS -void EIGEN_TENSOR::scalar_multiplication_(label_type this_labels, - label_type rhs_labels, - FloatType scalar, - const_pimpl_reference rhs) { - const auto* rhs_downcasted = dynamic_cast(&rhs); - - if(this_labels != rhs_labels) { // We need to permute rhs before assignment - auto r_to_l = rhs_labels.permutation(this_labels); - // Eigen wants int objects - std::vector r_to_l2(r_to_l.begin(), r_to_l.end()); - m_tensor_ = rhs_downcasted->m_tensor_.shuffle(r_to_l2) * scalar; - } else { - m_tensor_ = rhs_downcasted->m_tensor_ * scalar; - } - mark_for_rehash_(); -} - -TPARAMS -void EIGEN_TENSOR::subtraction_assignment_(label_type this_labels, - label_type lhs_labels, - label_type rhs_labels, - const_pimpl_reference lhs, - const_pimpl_reference rhs) { - auto lambda = [](auto&& lhs, auto&& rhs) { return lhs - rhs; }; - element_wise_op_(lambda, std::move(this_labels), std::move(lhs_labels), - std::move(rhs_labels), lhs, rhs); - mark_for_rehash_(); -} - -TPARAMS -void EIGEN_TENSOR::update_hash_() const { - m_hash_ = hash_type{rank_()}; - for(eigen_rank_type i = 0; i < rank_(); ++i) - hash_utilities::hash_input(m_hash_, m_tensor_.dimension(i)); - for(auto i = 0; i < m_tensor_.size(); ++i) - hash_utilities::hash_input(m_hash_, m_tensor_.data()[i]); - m_recalculate_hash_ = false; -} - -#undef EIGEN_TENSOR -#undef TPARAMS - -#define DEFINE_EIGEN_TENSOR(TYPE) \ - template class EigenTensor; \ - template class EigenTensor; \ - template class EigenTensor; \ - template class EigenTensor; \ - template class EigenTensor; \ - template class EigenTensor; \ - template class EigenTensor; \ - template class EigenTensor; \ - template class EigenTensor; \ - template class EigenTensor; \ - template class EigenTensor - -TW_APPLY_FLOATING_POINT_TYPES(DEFINE_EIGEN_TENSOR); - -#undef DEFINE_EIGEN_TENSOR - -} // namespace tensorwrapper::buffer::detail_ diff --git a/src/tensorwrapper/buffer/detail_/eigen_tensor.hpp b/src/tensorwrapper/buffer/detail_/eigen_tensor.hpp deleted file mode 100644 index 4f89f003..00000000 --- a/src/tensorwrapper/buffer/detail_/eigen_tensor.hpp +++ /dev/null @@ -1,236 +0,0 @@ -/* - * Copyright 2025 NWChemEx-Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#pragma once -#include "eigen_pimpl.hpp" -#include "hash_utilities.hpp" -#include -#include -#include - -namespace tensorwrapper::buffer::detail_ { - -/// Implements EigenPIMPL by wrapping eigen::Tensor -template -class EigenTensor : public EigenPIMPL { -private: - using my_type = EigenTensor; - using base_type = EigenPIMPL; - -public: - using typename base_type::const_base_reference; - using typename base_type::const_pimpl_reference; - using typename base_type::const_pointer; - using typename base_type::const_reference; - using typename base_type::const_shape_reference; - using typename base_type::eigen_rank_type; - using typename base_type::element_type; - using typename base_type::element_vector; - using typename base_type::index_vector; - using typename base_type::label_type; - using typename base_type::pimpl_pointer; - using typename base_type::pointer; - using typename base_type::reference; - using typename base_type::size_type; - using typename base_type::string_type; - - using smooth_view = shape::SmoothView; - using const_smooth_view = shape::SmoothView; - using const_smooth_view_reference = const const_smooth_view&; - using eigen_data_type = eigen::data_type; - using eigen_reference = eigen_data_type&; - using const_eigen_reference = const eigen_data_type&; - using hash_type = hash_utilities::hash_type; - - EigenTensor() = default; - - explicit EigenTensor(const_smooth_view_reference shape) : - m_tensor_(allocate_from_shape_(shape, std::make_index_sequence())) { - } - - /// Tests for exact equality - bool operator==(const my_type& rhs) const noexcept { - return get_hash() == rhs.get_hash(); - } - - // Returns the hash for the current state of *this, computing first if - // needed. - hash_type get_hash() const { - if(m_recalculate_hash_ or !m_hash_caching_) update_hash_(); - return m_hash_; - } - -protected: - pimpl_pointer clone_() const override { - return std::make_unique(*this); - } - - eigen_rank_type rank_() const noexcept override { return Rank; } - - size_type size_() const noexcept override { return m_tensor_.size(); } - - size_type extent_(eigen_rank_type i) const override { - return m_tensor_.dimension(i); - } - - pointer get_mutable_data_() noexcept override { - turn_off_hash_caching_(); - return m_tensor_.data(); - } - - const_pointer get_immutable_data_() const noexcept override { - return m_tensor_.data(); - } - - const_reference get_elem_(index_vector index) const override { - return unwrap_vector_(std::move(index), - std::make_index_sequence()); - } - - void set_elem_(index_vector index, element_type new_value) override { - mark_for_rehash_(); - unwrap_vector_(std::move(index), std::make_index_sequence()) = - new_value; - } - - const_reference get_data_(size_type index) const override { - return m_tensor_.data()[index]; - } - - void set_data_(size_type index, element_type new_value) override { - mark_for_rehash_(); - m_tensor_.data()[index] = new_value; - } - - void fill_(element_type value) override { - mark_for_rehash_(); - std::fill(m_tensor_.data(), m_tensor_.data() + m_tensor_.size(), value); - } - - void copy_(const element_vector& values) override { - mark_for_rehash_(); - std::copy(values.begin(), values.end(), m_tensor_.data()); - } - - bool are_equal_(const_base_reference rhs) const noexcept override { - return base_type::template are_equal_impl_(rhs); - } - - string_type to_string_() const override { - std::stringstream ss; - ss << m_tensor_; - return ss.str(); - } - - std::ostream& add_to_stream_(std::ostream& os) const override { - return os << m_tensor_; - } - - void addition_assignment_(label_type this_labels, label_type lhs_labels, - label_type rhs_labels, const_pimpl_reference lhs, - const_pimpl_reference rhs) override; - - void subtraction_assignment_(label_type this_labels, label_type lhs_labels, - label_type rhs_labels, - const_pimpl_reference lhs, - const_pimpl_reference rhs) override; - - void hadamard_assignment_(label_type this_labels, label_type lhs_labels, - label_type rhs_labels, const_pimpl_reference lhs, - const_pimpl_reference rhs) override; - - void contraction_assignment_(label_type this_labels, label_type lhs_labels, - label_type rhs_labels, - const_shape_reference result_shape, - const_pimpl_reference lhs, - const_pimpl_reference rhs) override; - - void permute_assignment_(label_type this_labels, label_type rhs_labels, - const_pimpl_reference rhs) override; - - void scalar_multiplication_(label_type this_labels, label_type rhs_labels, - FloatType scalar, - const_pimpl_reference rhs) override; - -private: - // Code factorization for implementing element-wise operations - template - void element_wise_op_(OperationType op, label_type this_labels, - label_type lhs_labels, label_type rhs_labels, - const_pimpl_reference lhs, const_pimpl_reference rhs); - - // Handles TMP needed to create an Eigen Tensor from a Smooth object - template - auto allocate_from_shape_(const_smooth_view_reference shape, - std::index_sequence) { - return eigen_data_type(shape.extent(I)...); - } - - // Gets an element from the Eigen Tensor by unwrapping a std::vector - template - reference unwrap_vector_(index_vector index, std::index_sequence) { - return m_tensor_(tensorwrapper::detail_::to_long(index.at(I))...); - } - - // Same as mutable version, but result is read-only - template - const_reference unwrap_vector_(index_vector index, - std::index_sequence) const { - return m_tensor_(tensorwrapper::detail_::to_long(index.at(I))...); - } - - // Computes the hash for the current state of *this - void update_hash_() const; - - // Designates that the state may have changed and to recalculate the hash. - // This function is really just for readability and clarity. - void mark_for_rehash_() const { m_recalculate_hash_ = true; } - - // Designates that state changes are not trackable and we should recalculate - // the hash each time. - void turn_off_hash_caching_() const { m_hash_caching_ = false; } - - // Tracks whether the hash needs to be redetermined - mutable bool m_recalculate_hash_ = true; - - // Tracks whether hash caching has been turned off - mutable bool m_hash_caching_ = true; - - // Holds the computed hash value for this instance's state - mutable hash_type m_hash_; - - // The Eigen tensor *this wraps - eigen_data_type m_tensor_; -}; - -#define DECLARE_EIGEN_TENSOR(TYPE) \ - extern template class EigenTensor; \ - extern template class EigenTensor; \ - extern template class EigenTensor; \ - extern template class EigenTensor; \ - extern template class EigenTensor; \ - extern template class EigenTensor; \ - extern template class EigenTensor; \ - extern template class EigenTensor; \ - extern template class EigenTensor; \ - extern template class EigenTensor; \ - extern template class EigenTensor - -TW_APPLY_FLOATING_POINT_TYPES(DECLARE_EIGEN_TENSOR); - -#undef DECLARE_EIGEN_TENSOR - -} // namespace tensorwrapper::buffer::detail_ diff --git a/src/tensorwrapper/buffer/eigen.cpp b/src/tensorwrapper/buffer/eigen.cpp deleted file mode 100644 index f023e6f7..00000000 --- a/src/tensorwrapper/buffer/eigen.cpp +++ /dev/null @@ -1,259 +0,0 @@ -/* - * Copyright 2024 NWChemEx-Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#include "detail_/eigen_tensor.hpp" -#include -#include -#include -#include - -namespace tensorwrapper::buffer { - -#define TPARAMS template -#define EIGEN Eigen - -// -- Public Methods - -TPARAMS -EIGEN::Eigen() noexcept = default; - -TPARAMS -EIGEN::Eigen(pimpl_pointer pimpl, layout_pointer playout, - allocator_base_pointer pallocator) : - my_base_type(std::move(playout), std::move(pallocator)), - m_pimpl_(std::move(pimpl)) {} - -TPARAMS -EIGEN::Eigen(const Eigen& other) : - Eigen(other.has_pimpl_() ? other.m_pimpl_->clone() : nullptr, other.layout(), - other.allocator()) {} - -TPARAMS -EIGEN::Eigen(Eigen&& other) noexcept = default; - -TPARAMS -EIGEN& EIGEN::operator=(const Eigen& rhs) { - if(this != &rhs) Eigen(rhs).swap(*this); - return *this; -} - -TPARAMS -EIGEN& EIGEN::operator=(Eigen&& rhs) noexcept = default; - -TPARAMS -EIGEN::~Eigen() noexcept = default; - -TPARAMS -void EIGEN::swap(Eigen& other) noexcept { m_pimpl_.swap(other.m_pimpl_); } - -TPARAMS -bool EIGEN::operator==(const Eigen& rhs) const noexcept { - if(has_pimpl_() != rhs.has_pimpl_()) return false; - if(!has_pimpl_()) return true; - return m_pimpl_->are_equal(*rhs.m_pimpl_); -} - -// -- Protected Methods - -TPARAMS -typename EIGEN::buffer_base_pointer EIGEN::clone_() const { - return std::make_unique(*this); -} - -TPARAMS -bool EIGEN::are_equal_(const_buffer_base_reference rhs) const noexcept { - return my_base_type::template are_equal_impl_(rhs); -} - -TPARAMS -typename EIGEN::dsl_reference EIGEN::addition_assignment_( - label_type this_labels, const_labeled_reference lhs, - const_labeled_reference rhs) { - BufferBase::addition_assignment_(this_labels, lhs, rhs); - using alloc_type = allocator::Eigen; - const auto& lhs_down = alloc_type::rebind(lhs.object()); - const auto& rhs_down = alloc_type::rebind(rhs.object()); - if(!has_pimpl_()) m_pimpl_ = lhs_down.pimpl_().clone(); - pimpl_().addition_assignment(this_labels, lhs.labels(), rhs.labels(), - lhs_down.pimpl_(), rhs_down.pimpl_()); - - return *this; -} - -TPARAMS -typename EIGEN::dsl_reference EIGEN::subtraction_assignment_( - label_type this_labels, const_labeled_reference lhs, - const_labeled_reference rhs) { - BufferBase::subtraction_assignment_(this_labels, lhs, rhs); - using alloc_type = allocator::Eigen; - const auto& lhs_down = alloc_type::rebind(lhs.object()); - const auto& rhs_down = alloc_type::rebind(rhs.object()); - if(!has_pimpl_()) m_pimpl_ = lhs_down.pimpl_().clone(); - pimpl_().subtraction_assignment(this_labels, lhs.labels(), rhs.labels(), - lhs_down.pimpl_(), rhs_down.pimpl_()); - return *this; -} - -TPARAMS -typename EIGEN::dsl_reference EIGEN::multiplication_assignment_( - label_type this_labels, const_labeled_reference lhs, - const_labeled_reference rhs) { - BufferBase::multiplication_assignment_(this_labels, lhs, rhs); - - using alloc_type = allocator::Eigen; - const auto& lhs_down = alloc_type::rebind(lhs.object()); - const auto& rhs_down = alloc_type::rebind(rhs.object()); - - if(!has_pimpl_()) m_pimpl_ = lhs_down.pimpl_().clone(); - if(this_labels.is_hadamard_product(lhs.labels(), rhs.labels())) - pimpl_().hadamard_assignment(this_labels, lhs.labels(), rhs.labels(), - lhs_down.pimpl_(), rhs_down.pimpl_()); - else if(this_labels.is_contraction(lhs.labels(), rhs.labels())) - pimpl_().contraction_assignment(this_labels, lhs.labels(), rhs.labels(), - this->layout().shape(), - lhs_down.pimpl_(), rhs_down.pimpl_()); - else - throw std::runtime_error("Mixed products NYI"); - - return *this; -} - -TPARAMS -typename EIGEN::dsl_reference EIGEN::permute_assignment_( - label_type this_labels, const_labeled_reference rhs) { - BufferBase::permute_assignment_(this_labels, rhs); - using alloc_type = allocator::Eigen; - const auto& rhs_down = alloc_type::rebind(rhs.object()); - if(!has_pimpl_()) m_pimpl_ = rhs_down.pimpl_().clone(); - pimpl_().permute_assignment(this_labels, rhs.labels(), rhs_down.pimpl_()); - - return *this; -} - -TPARAMS -typename EIGEN::dsl_reference EIGEN::scalar_multiplication_( - label_type this_labels, double scalar, const_labeled_reference rhs) { - BufferBase::permute_assignment_(this_labels, rhs); - using alloc_type = allocator::Eigen; - const auto& rhs_down = alloc_type::rebind(rhs.object()); - if(!has_pimpl_()) m_pimpl_ = rhs_down.pimpl_().clone(); - pimpl_().scalar_multiplication(this_labels, rhs.labels(), scalar, - rhs_down.pimpl_()); - return *this; -} - -TPARAMS -typename EIGEN::pointer EIGEN::get_mutable_data_() noexcept { - return m_pimpl_ ? m_pimpl_->get_mutable_data() : nullptr; -} - -TPARAMS -typename EIGEN::const_pointer EIGEN::get_immutable_data_() const noexcept { - return m_pimpl_ ? m_pimpl_->get_immutable_data() : nullptr; -} - -TPARAMS -typename EIGEN::const_reference EIGEN::get_elem_(index_vector index) const { - return pimpl_().get_elem(std::move(index)); -} - -TPARAMS -void EIGEN::set_elem_(index_vector index, element_type new_value) { - return pimpl_().set_elem(std::move(index), std::move(new_value)); -} - -TPARAMS -typename EIGEN::const_reference EIGEN::get_data_(size_type index) const { - return pimpl_().get_data(std::move(index)); -} - -TPARAMS -void EIGEN::set_data_(size_type index, element_type new_value) { - return pimpl_().set_data(std::move(index), std::move(new_value)); -} - -TPARAMS -void EIGEN::fill_(element_type value) { - return pimpl_().fill(std::move(value)); -} - -TPARAMS -void EIGEN::copy_(const element_vector& values) { - return pimpl_().copy(values); -} - -TPARAMS -typename EIGEN::polymorphic_base::string_type EIGEN::to_string_() const { - return m_pimpl_ ? m_pimpl_->to_string() : ""; -} - -TPARAMS -std::ostream& EIGEN::add_to_stream_(std::ostream& os) const { - return m_pimpl_ ? m_pimpl_->add_to_stream(os) : os; -} - -// -- Private methods - -TPARAMS -bool EIGEN::has_pimpl_() const noexcept { return static_cast(m_pimpl_); } - -TPARAMS -void EIGEN::assert_pimpl_() const { - if(has_pimpl_()) return; - throw std::runtime_error("buffer::Eigen has no PIMPL!"); -} - -TPARAMS -typename EIGEN::pimpl_reference EIGEN::pimpl_() { - assert_pimpl_(); - return *m_pimpl_; -} - -TPARAMS -typename EIGEN::const_pimpl_reference EIGEN::pimpl_() const { - assert_pimpl_(); - return *m_pimpl_; -} - -TPARAMS -EIGEN& to_eigen_buffer(BufferBase& b) { - using allocator_type = allocator::Eigen; - return allocator_type::rebind(b); -} - -TPARAMS -const EIGEN& to_eigen_buffer(const BufferBase& b) { - using allocator_type = allocator::Eigen; - return allocator_type::rebind(b); -} - -#undef EIGEN -#undef TPARAMS - -#define DEFINE_EIGEN_BUFFER(TYPE) template class Eigen -#define DEFINE_TO_EIGEN_BUFFER(TYPE) \ - template Eigen& to_eigen_buffer(BufferBase&) -#define DEFINE_TO_CONST_EIGEN_BUFFER(TYPE) \ - template const Eigen& to_eigen_buffer(const BufferBase&) - -TW_APPLY_FLOATING_POINT_TYPES(DEFINE_EIGEN_BUFFER); -TW_APPLY_FLOATING_POINT_TYPES(DEFINE_TO_EIGEN_BUFFER); -TW_APPLY_FLOATING_POINT_TYPES(DEFINE_TO_CONST_EIGEN_BUFFER); - -#undef DEFINE_EIGEN_BUFFER -#undef DEFINE_TO_EIGEN_BUFFER -#undef DEFINE_TO_CONST_EIGEN_BUFFER - -} // namespace tensorwrapper::buffer diff --git a/src/tensorwrapper/operations/approximately_equal.cpp b/src/tensorwrapper/operations/approximately_equal.cpp index 3da9e93a..a5a9edfd 100644 --- a/src/tensorwrapper/operations/approximately_equal.cpp +++ b/src/tensorwrapper/operations/approximately_equal.cpp @@ -14,29 +14,30 @@ * limitations under the License. */ -#include -#include +#include +#include #include -#include +#include namespace tensorwrapper::operations { namespace { struct Kernel { + Kernel(double tolerance) : tol(tolerance) {} + template - bool run(const buffer::BufferBase& result, double tol) { - using allocator_type = allocator::Eigen; + bool operator()(const std::span result) { const FloatType zero{0.0}; const FloatType ptol = static_cast(tol); - auto& buffer_down = allocator_type::rebind(result); - - for(std::size_t i = 0; i < buffer_down.size(); ++i) { - auto diff = buffer_down.get_data(i); + for(std::size_t i = 0; i < result.size(); ++i) { + auto diff = result[i]; if(diff < zero) diff *= -1.0; if(diff >= ptol) return false; } return true; } + + double tol; }; } // namespace @@ -50,9 +51,12 @@ bool approximately_equal(const Tensor& lhs, const Tensor& rhs, double tol) { Tensor result; result(index) = lhs(index) - rhs(index); - using tensorwrapper::utilities::floating_point_dispatch; - - return floating_point_dispatch(Kernel{}, result.buffer(), tol); + using allocator_type = allocator::Contiguous; + allocator_type alloc(result.buffer().allocator().runtime()); + const auto& buffer_down = alloc.rebind(result.buffer()); + Kernel k(tol); + return wtf::buffer::visit_contiguous_buffer( + k, buffer_down); } } // namespace tensorwrapper::operations diff --git a/src/tensorwrapper/operations/norm.cpp b/src/tensorwrapper/operations/norm.cpp index 178337c9..ac500af8 100644 --- a/src/tensorwrapper/operations/norm.cpp +++ b/src/tensorwrapper/operations/norm.cpp @@ -13,37 +13,43 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -#include +#include +#include #include #include -#include +#include namespace tensorwrapper::operations { namespace { struct InfinityKernel { + InfinityKernel(allocator::Contiguous& alloc) : palloc(&alloc) {} + template - Tensor run(const buffer::BufferBase& t) { - using allocator_type = allocator::Eigen; - allocator_type alloc(t.allocator().runtime()); + auto operator()(const std::span buffer) { FloatType max_element{0.0}; - const auto& buffer_down = alloc.rebind(t); - for(std::size_t i = 0; i < buffer_down.size(); ++i) { - auto elem = types::fabs(buffer_down.get_data(i)); + for(std::size_t i = 0; i < buffer.size(); ++i) { + auto elem = types::fabs(buffer[i]); if(elem > max_element) max_element = elem; } shape::Smooth s{}; layout::Physical l(s); - auto pbuffer = alloc.construct(l, max_element); + auto pbuffer = palloc->construct(l, max_element); return Tensor(s, std::move(pbuffer)); } + + allocator::Contiguous* palloc; }; } // namespace Tensor infinity_norm(const Tensor& t) { - InfinityKernel k; - return utilities::floating_point_dispatch(k, t.buffer()); + using allocator_type = allocator::Contiguous; + auto rv = t.allocator().runtime(); + allocator_type alloc(rv); + const auto& buffer_down = alloc.rebind(t.buffer()); + InfinityKernel kernel(alloc); + return wtf::buffer::visit_contiguous_buffer( + kernel, buffer_down); } } // namespace tensorwrapper::operations diff --git a/src/tensorwrapper/tensor/detail_/tensor_factory.cpp b/src/tensorwrapper/tensor/detail_/tensor_factory.cpp index d7f4c684..78a72071 100644 --- a/src/tensorwrapper/tensor/detail_/tensor_factory.cpp +++ b/src/tensorwrapper/tensor/detail_/tensor_factory.cpp @@ -18,8 +18,8 @@ #include "tensor_factory.hpp" #include "tensor_pimpl.hpp" #include -#include -#include +#include +#include #include namespace tensorwrapper::detail_ { @@ -69,7 +69,7 @@ physical_layout_pointer TensorFactory::default_physical_layout( allocator_pointer TensorFactory::default_allocator( const_physical_reference physical, runtime_view_type rv) { - return std::make_unique>(rv); + return std::make_unique(rv); } bool TensorFactory::can_make_logical_layout(const input_type& input) noexcept { @@ -177,7 +177,7 @@ namespace { /// Wraps the process of turning an initializer list into a TensorInput object template auto il_to_input(T il, parallelzone::runtime::RuntimeView rv = {}) { - allocator::Eigen alloc(rv); + allocator::Contiguous alloc(rv); auto pbuffer = alloc.construct(il); return TensorInput(pbuffer->layout().shape(), std::move(pbuffer)); } diff --git a/src/tensorwrapper/utilities/to_json.cpp b/src/tensorwrapper/utilities/to_json.cpp index 907257ff..cd798fd5 100644 --- a/src/tensorwrapper/utilities/to_json.cpp +++ b/src/tensorwrapper/utilities/to_json.cpp @@ -14,7 +14,8 @@ * limitations under the License. */ -#include +#include +#include #include namespace tensorwrapper::utilities { @@ -22,16 +23,14 @@ namespace tensorwrapper::utilities { using offset_type = std::size_t; using offset_vector = std::vector; -template -using buffer_type = buffer::Contiguous; +using buffer_type = buffer::Contiguous; -template -void to_json_(std::ostream& os, const buffer_type& t, - offset_vector index) { +void to_json_(std::ostream& os, const buffer_type& t, offset_vector index) { const auto& shape = t.layout().shape().as_smooth(); auto rank = index.size(); if(rank == t.rank()) { - os << t.get_elem(index); + throw std::runtime_error("Fix me!"); + // os << t.get_elem(index); return; } else { auto n_elements = shape.extent(rank); @@ -48,7 +47,8 @@ void to_json_(std::ostream& os, const buffer_type& t, std::ostream& to_json(std::ostream& os, const Tensor& t) { offset_vector i; - const auto& buffer = buffer::to_eigen_buffer(t.buffer()); + allocator::Contiguous alloc(t.buffer().allocator().runtime()); + const auto& buffer = alloc.rebind(t.buffer()); to_json_(os, buffer, i); return os; } diff --git a/tests/cxx/unit_tests/tensorwrapper/allocator/contiguous.cpp b/tests/cxx/unit_tests/tensorwrapper/allocator/contiguous.cpp index 7ba1ecf9..74188081 100644 --- a/tests/cxx/unit_tests/tensorwrapper/allocator/contiguous.cpp +++ b/tests/cxx/unit_tests/tensorwrapper/allocator/contiguous.cpp @@ -1,5 +1,5 @@ /* - * Copyright 2025 NWChemEx-Project + * Copyright 2024 NWChemEx-Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -15,87 +15,114 @@ */ #include "../testing/testing.hpp" +#include #include +#include +#include using namespace tensorwrapper; -TEMPLATE_LIST_TEST_CASE("allocator::Contiguous", "", - types::floating_point_types) { - using allocator_type = allocator::Contiguous; - using layout_type = typename allocator_type::layout_type; - - auto alloc = testing::make_allocator(); - - auto scalar_corr = testing::eigen_scalar(); - auto vector_corr = testing::eigen_vector(); - auto matrix_corr = testing::eigen_matrix(); - - SECTION("allocate(layout)") { - auto pscalar = alloc.allocate(scalar_corr->layout()); - pscalar->set_data(0, 42.0); - REQUIRE(pscalar->are_equal(*scalar_corr)); - } - - SECTION("allocate(layout*)") { - auto pvector = alloc.allocate(vector_corr->layout()); - pvector->set_data(0, 0.0); - pvector->set_data(1, 1.0); - pvector->set_data(2, 2.0); - pvector->set_data(3, 3.0); - pvector->set_data(4, 4.0); - - REQUIRE(pvector->are_equal(*vector_corr)); - } - - SECTION("contruct(scalar)") { - auto pscalar = alloc.construct(42.0); - REQUIRE(pscalar->are_equal(*scalar_corr)); - } - - SECTION("construct(vector)") { - auto pvector = alloc.construct({0.0, 1.0, 2.0, 3.0, 4.0}); - REQUIRE(pvector->are_equal(*vector_corr)); - } - - SECTION("construct(matrix)") { - typename allocator_type::rank2_il il{{1.0, 2.0}, {3.0, 4.0}}; - auto pmatrix = alloc.construct(il); - REQUIRE(pmatrix->are_equal(*matrix_corr)); - } - - SECTION("construct(tensor3)") { - typename allocator_type::rank3_il il{{{1.0, 2.0}, {3.0, 4.0}}, - {{5.0, 6.0}, {7.0, 8.0}}}; - auto ptensor3 = alloc.construct(il); - REQUIRE(ptensor3->are_equal(*testing::eigen_tensor3())); - } - - SECTION("construct(tensor4)") { - typename allocator_type::rank4_il il{ - {{{1.0, 2.0}, {3.0, 4.0}}, {{5.0, 6.0}, {7.0, 8.0}}}, - {{{9.0, 10.0}, {11.0, 12.0}}, {{13.0, 14.0}, {15.0, 16.0}}}}; - auto ptensor4 = alloc.construct(il); - REQUIRE(ptensor4->are_equal(*testing::eigen_tensor4())); - } - - SECTION("construct(layout, value)") { - auto pmatrix = alloc.construct(matrix_corr->layout(), 0.0); - matrix_corr->set_elem({0, 0}, 0.0); - matrix_corr->set_elem({0, 1}, 0.0); - matrix_corr->set_elem({1, 0}, 0.0); - matrix_corr->set_elem({1, 1}, 0.0); - - REQUIRE(pmatrix->are_equal(*matrix_corr)); - } - - SECTION("construct(layout*, value)") { - auto pmatrix = alloc.construct( - matrix_corr->layout().template clone_as(), 0.0); - matrix_corr->set_elem({0, 0}, 0.0); - matrix_corr->set_elem({0, 1}, 0.0); - matrix_corr->set_elem({1, 0}, 0.0); - matrix_corr->set_elem({1, 1}, 0.0); - - REQUIRE(pmatrix->are_equal(*matrix_corr)); - } +using types2test = types::floating_point_types; + +TEMPLATE_LIST_TEST_CASE("allocator::Contiguous", "", types2test) { + using alloc_type = allocator::Contiguous; + + parallelzone::runtime::RuntimeView rv; + // auto scalar_layout = testing::scalar_physical(); + // auto vector_layout = testing::vector_physical(2); + // auto matrix_layout = testing::matrix_physical(2, 2); + // using layout_type = decltype(scalar_layout); + + // auto pscalar_corr = testing::eigen_scalar(); + // auto& scalar_corr = *pscalar_corr; + // scalar_corr.set_elem({}, 0.0); + + // auto pvector_corr = testing::eigen_vector(2); + // auto& vector_corr = *pvector_corr; + // vector_corr.set_elem({0}, 1); + // vector_corr.set_elem({1}, 1); + + // auto pmatrix_corr = testing::eigen_matrix(2, 2); + // auto& matrix_corr = *pmatrix_corr; + // matrix_corr.set_elem({0, 0}, 2); + // matrix_corr.set_elem({0, 1}, 2); + // matrix_corr.set_elem({1, 0}, 2); + // matrix_corr.set_elem({1, 1}, 2); + + // alloc_type alloc(rv); + + // SECTION("Ctor") { + // SECTION("runtime") { REQUIRE(alloc.runtime() == rv); } + // testing::test_copy_and_move_ctors(alloc); + // } + + // SECTION("allocate(Layout)") { + // // N.b. allocate doesn't initialize tensor, so only compare layouts + // auto pscalar = alloc.allocate(scalar_layout); + // REQUIRE(pscalar->layout().are_equal(scalar_layout)); + + // auto pvector = alloc.allocate(vector_layout); + // REQUIRE(pvector->layout().are_equal(vector_layout)); + + // auto pmatrix = alloc.allocate(matrix_layout); + // REQUIRE(pmatrix->layout().are_equal(matrix_layout)); + + // // Works if ranks don't match + // pvector = alloc.allocate(vector_layout); + // REQUIRE(pvector->layout().are_equal(vector_layout)); + // } + + // SECTION("allocate(std::unique_ptr)") { + // // N.b. allocate doesn't initialize tensor, so only compare layouts + // auto pscalar_layout = std::make_unique(scalar_layout); + // auto pscalar = alloc.allocate(std::move(pscalar_layout)); + // REQUIRE(pscalar->layout().are_equal(scalar_layout)); + + // auto pvector_layout = std::make_unique(vector_layout); + // auto pvector = alloc.allocate(std::move(pvector_layout)); + // REQUIRE(pvector->layout().are_equal(vector_layout)); + + // auto pmatrix_layout = std::make_unique(matrix_layout); + // auto pmatrix = alloc.allocate(std::move(pmatrix_layout)); + // REQUIRE(pmatrix->layout().are_equal(matrix_layout)); + // } + + // SECTION("construct(value)") { + // auto pscalar = alloc.construct(scalar_layout, 0); + // REQUIRE(*pscalar == scalar_corr); + + // auto pvector = alloc.construct(vector_layout, 1); + // REQUIRE(*pvector == vector_corr); + + // auto pmatrix_layout = std::make_unique(matrix_layout); + // auto pmatrix = alloc.construct(std::move(pmatrix_layout), 2); + // REQUIRE(*pmatrix == matrix_corr); + // } + + // SECTION("can_rebind") { REQUIRE(alloc.can_rebind(scalar_corr)); } + + // SECTION("rebind(non-const)") { + // using type = typename alloc_type::buffer_base_reference; + // type scalar_base = scalar_corr; + // auto& eigen_buffer = alloc.rebind(scalar_base); + // REQUIRE(&eigen_buffer == &scalar_corr); + // } + + // SECTION("rebind(const)") { + // using type = typename + // alloc_type::const_buffer_base_reference; type scalar_base = + // scalar_corr; auto& eigen_buffer = alloc.rebind(scalar_base); + // REQUIRE(&eigen_buffer == &scalar_corr); + // } + + // SECTION("operator==") { REQUIRE(alloc == alloc_type(rv)); } + + // SECTION("virtual_methods") { + // SECTION("clone") { + // auto pscalar = alloc.clone(); + // REQUIRE(pscalar->are_equal(alloc)); + // } + + // SECTION("are_equal") { REQUIRE(alloc.are_equal(alloc_type(rv))); } + // } } diff --git a/tests/cxx/unit_tests/tensorwrapper/allocator/eigen.cpp b/tests/cxx/unit_tests/tensorwrapper/allocator/eigen.cpp deleted file mode 100644 index 5e50001f..00000000 --- a/tests/cxx/unit_tests/tensorwrapper/allocator/eigen.cpp +++ /dev/null @@ -1,128 +0,0 @@ -/* - * Copyright 2024 NWChemEx-Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "../testing/testing.hpp" -#include -#include -#include -#include - -using namespace tensorwrapper; - -using types2test = types::floating_point_types; - -TEMPLATE_LIST_TEST_CASE("EigenAllocator", "", types2test) { - using alloc_type = allocator::Eigen; - - parallelzone::runtime::RuntimeView rv; - auto scalar_layout = testing::scalar_physical(); - auto vector_layout = testing::vector_physical(2); - auto matrix_layout = testing::matrix_physical(2, 2); - using layout_type = decltype(scalar_layout); - - auto pscalar_corr = testing::eigen_scalar(); - auto& scalar_corr = *pscalar_corr; - scalar_corr.set_elem({}, 0.0); - - auto pvector_corr = testing::eigen_vector(2); - auto& vector_corr = *pvector_corr; - vector_corr.set_elem({0}, 1); - vector_corr.set_elem({1}, 1); - - auto pmatrix_corr = testing::eigen_matrix(2, 2); - auto& matrix_corr = *pmatrix_corr; - matrix_corr.set_elem({0, 0}, 2); - matrix_corr.set_elem({0, 1}, 2); - matrix_corr.set_elem({1, 0}, 2); - matrix_corr.set_elem({1, 1}, 2); - - alloc_type alloc(rv); - - SECTION("Ctor") { - SECTION("runtime") { REQUIRE(alloc.runtime() == rv); } - testing::test_copy_and_move_ctors(alloc); - } - - SECTION("allocate(Layout)") { - // N.b. allocate doesn't initialize tensor, so only compare layouts - auto pscalar = alloc.allocate(scalar_layout); - REQUIRE(pscalar->layout().are_equal(scalar_layout)); - - auto pvector = alloc.allocate(vector_layout); - REQUIRE(pvector->layout().are_equal(vector_layout)); - - auto pmatrix = alloc.allocate(matrix_layout); - REQUIRE(pmatrix->layout().are_equal(matrix_layout)); - - // Works if ranks don't match - pvector = alloc.allocate(vector_layout); - REQUIRE(pvector->layout().are_equal(vector_layout)); - } - - SECTION("allocate(std::unique_ptr)") { - // N.b. allocate doesn't initialize tensor, so only compare layouts - auto pscalar_layout = std::make_unique(scalar_layout); - auto pscalar = alloc.allocate(std::move(pscalar_layout)); - REQUIRE(pscalar->layout().are_equal(scalar_layout)); - - auto pvector_layout = std::make_unique(vector_layout); - auto pvector = alloc.allocate(std::move(pvector_layout)); - REQUIRE(pvector->layout().are_equal(vector_layout)); - - auto pmatrix_layout = std::make_unique(matrix_layout); - auto pmatrix = alloc.allocate(std::move(pmatrix_layout)); - REQUIRE(pmatrix->layout().are_equal(matrix_layout)); - } - - SECTION("construct(value)") { - auto pscalar = alloc.construct(scalar_layout, 0); - REQUIRE(*pscalar == scalar_corr); - - auto pvector = alloc.construct(vector_layout, 1); - REQUIRE(*pvector == vector_corr); - - auto pmatrix_layout = std::make_unique(matrix_layout); - auto pmatrix = alloc.construct(std::move(pmatrix_layout), 2); - REQUIRE(*pmatrix == matrix_corr); - } - - SECTION("can_rebind") { REQUIRE(alloc.can_rebind(scalar_corr)); } - - SECTION("rebind(non-const)") { - using type = typename alloc_type::buffer_base_reference; - type scalar_base = scalar_corr; - auto& eigen_buffer = alloc.rebind(scalar_base); - REQUIRE(&eigen_buffer == &scalar_corr); - } - - SECTION("rebind(const)") { - using type = typename alloc_type::const_buffer_base_reference; - type scalar_base = scalar_corr; - auto& eigen_buffer = alloc.rebind(scalar_base); - REQUIRE(&eigen_buffer == &scalar_corr); - } - - SECTION("operator==") { REQUIRE(alloc == alloc_type(rv)); } - - SECTION("virtual_methods") { - SECTION("clone") { - auto pscalar = alloc.clone(); - REQUIRE(pscalar->are_equal(alloc)); - } - - SECTION("are_equal") { REQUIRE(alloc.are_equal(alloc_type(rv))); } - } -} diff --git a/tests/cxx/unit_tests/tensorwrapper/buffer/contiguous.cpp b/tests/cxx/unit_tests/tensorwrapper/buffer/contiguous.cpp index ddcd4f4e..fc65df5a 100644 --- a/tests/cxx/unit_tests/tensorwrapper/buffer/contiguous.cpp +++ b/tests/cxx/unit_tests/tensorwrapper/buffer/contiguous.cpp @@ -1,5 +1,5 @@ /* - * Copyright 2024 NWChemEx-Project + * Copyright 2025 NWChemEx-Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -15,120 +15,441 @@ */ #include "../testing/testing.hpp" -#include -#include -#include +#include +#include using namespace tensorwrapper; -using namespace buffer; - -/* Testing strategy: - * - * - Contiguous is an abstract class. To test it we must create an instance of - * a derived class. We then will upcast to Contiguous and perform checks - * through the BufferBase interface. +/* Testing notes: * + * The various operations (addition_assignment, etc.) are not exhaustively + * tested here. These operations are implemented via visitors that dispatch to + * various backends. The visitors themselves are tested in their own unit tests. + * Here we assume the visitors work and spot check a couple of operations for + * to help catch any integration issues. */ -TEMPLATE_LIST_TEST_CASE("buffer::Contiguous", "", types::floating_point_types) { - using base_type = Contiguous; - auto pt0 = testing::eigen_scalar(); - auto pt1 = testing::eigen_vector(); - auto& t0 = *pt0; - auto& t1 = *pt1; +TEMPLATE_LIST_TEST_CASE("Contiguous", "", types::floating_point_types) { + using buffer::Contiguous; + using buffer_type = Contiguous::buffer_type; + using shape_type = typename Contiguous::shape_type; + using label_type = typename Contiguous::label_type; + + TestType one(1.0), two(2.0), three(3.0), four(4.0); + std::vector data = {one, two, three, four}; + + shape_type scalar_shape({}); + shape_type vector_shape({4}); + shape_type matrix_shape({2, 2}); + + Contiguous defaulted; + Contiguous scalar(std::vector{one}, scalar_shape); + Contiguous vector(data, vector_shape); + Contiguous matrix(data, matrix_shape); + + SECTION("Ctors and assignment") { + SECTION("Default ctor") { + REQUIRE(defaulted.size() == 0); + REQUIRE(defaulted.shape() == shape_type()); + } + + SECTION("vector ctor") { + REQUIRE(scalar.size() == 1); + REQUIRE(scalar.shape() == scalar_shape); + REQUIRE(scalar.get_elem({}) == one); + + REQUIRE(vector.size() == 4); + REQUIRE(vector.shape() == vector_shape); + REQUIRE(vector.get_elem({0}) == one); + REQUIRE(vector.get_elem({1}) == two); + REQUIRE(vector.get_elem({2}) == three); + REQUIRE(vector.get_elem({3}) == four); + + REQUIRE(matrix.size() == 4); + REQUIRE(matrix.shape() == matrix_shape); + REQUIRE(matrix.get_elem({0, 0}) == one); + REQUIRE(matrix.get_elem({0, 1}) == two); + REQUIRE(matrix.get_elem({1, 0}) == three); + REQUIRE(matrix.get_elem({1, 1}) == four); + + REQUIRE_THROWS_AS(Contiguous(data, scalar_shape), + std::invalid_argument); + } + + SECTION("FloatBuffer ctor") { + buffer_type buf(data); + + Contiguous vector_buf(buf, vector_shape); + REQUIRE(vector_buf == vector); + + Contiguous matrix_buf(buf, matrix_shape); + REQUIRE(matrix_buf == matrix); + + REQUIRE_THROWS_AS(Contiguous(buf, scalar_shape), + std::invalid_argument); + } + + SECTION("Copy ctor") { + Contiguous defaulted_copy(defaulted); + REQUIRE(defaulted_copy == defaulted); + + Contiguous scalar_copy(scalar); + REQUIRE(scalar_copy == scalar); + + Contiguous vector_copy(vector); + REQUIRE(vector_copy == vector); + + Contiguous matrix_copy(matrix); + REQUIRE(matrix_copy == matrix); + } + + SECTION("Move ctor") { + Contiguous defaulted_temp(defaulted); + Contiguous defaulted_move(std::move(defaulted_temp)); + REQUIRE(defaulted_move == defaulted); + + Contiguous scalar_temp(scalar); + Contiguous scalar_move(std::move(scalar_temp)); + REQUIRE(scalar_move == scalar); + + Contiguous vector_temp(vector); + Contiguous vector_move(std::move(vector_temp)); + REQUIRE(vector_move == vector); + + Contiguous matrix_temp(matrix); + Contiguous matrix_move(std::move(matrix_temp)); + REQUIRE(matrix_move == matrix); + } + + SECTION("Copy assignment") { + Contiguous defaulted_copy; + auto pdefaulted_copy = &(defaulted_copy = defaulted); + REQUIRE(defaulted_copy == defaulted); + REQUIRE(pdefaulted_copy == &defaulted_copy); + + Contiguous scalar_copy; + auto pscalar_copy = &(scalar_copy = scalar); + REQUIRE(scalar_copy == scalar); + REQUIRE(pscalar_copy == &scalar_copy); + + Contiguous vector_copy; + auto pvector_copy = &(vector_copy = vector); + REQUIRE(vector_copy == vector); + REQUIRE(pvector_copy == &vector_copy); + + Contiguous matrix_copy; + auto pmatrix_copy = &(matrix_copy = matrix); + REQUIRE(matrix_copy == matrix); + REQUIRE(pmatrix_copy == &matrix_copy); + } - auto& base0 = static_cast(t0); - auto& base1 = static_cast(t1); + SECTION("Move assignment") { + Contiguous defaulted_temp(defaulted); + Contiguous defaulted_move; + auto pdefaulted_move = + &(defaulted_move = std::move(defaulted_temp)); + REQUIRE(defaulted_move == defaulted); + REQUIRE(pdefaulted_move == &defaulted_move); + + Contiguous scalar_temp(scalar); + Contiguous scalar_move; + auto pscalar_move = &(scalar_move = std::move(scalar_temp)); + REQUIRE(scalar_move == scalar); + REQUIRE(pscalar_move == &scalar_move); + + Contiguous vector_temp(vector); + Contiguous vector_move; + auto pvector_move = &(vector_move = std::move(vector_temp)); + REQUIRE(vector_move == vector); + REQUIRE(pvector_move == &vector_move); + + Contiguous matrix_temp(matrix); + Contiguous matrix_move; + auto pmatrix_move = &(matrix_move = std::move(matrix_temp)); + REQUIRE(matrix_move == matrix); + REQUIRE(pmatrix_move == &matrix_move); + } + } + + SECTION("shape") { + REQUIRE(defaulted.shape() == shape_type()); + REQUIRE(scalar.shape() == scalar_shape); + REQUIRE(vector.shape() == vector_shape); + REQUIRE(matrix.shape() == matrix_shape); + } SECTION("size") { - REQUIRE(base0.size() == 1); - REQUIRE(base1.size() == 5); + REQUIRE(defaulted.size() == 0); + REQUIRE(scalar.size() == 1); + REQUIRE(vector.size() == 4); + REQUIRE(matrix.size() == 4); } - SECTION("get_mutable_data()") { - REQUIRE(*base0.get_mutable_data() == TestType(42.0)); + SECTION("get_elem") { + REQUIRE_THROWS_AS(defaulted.get_elem({}), std::out_of_range); + + REQUIRE(scalar.get_elem({}) == one); + REQUIRE_THROWS_AS(scalar.get_elem({0}), std::out_of_range); - REQUIRE(*(base1.get_mutable_data() + 0) == TestType(0.0)); - REQUIRE(*(base1.get_mutable_data() + 1) == TestType(1.0)); - REQUIRE(*(base1.get_mutable_data() + 2) == TestType(2.0)); - REQUIRE(*(base1.get_mutable_data() + 3) == TestType(3.0)); - REQUIRE(*(base1.get_mutable_data() + 4) == TestType(4.0)); + REQUIRE(vector.get_elem({0}) == one); + REQUIRE(vector.get_elem({1}) == two); + REQUIRE(vector.get_elem({2}) == three); + REQUIRE(vector.get_elem({3}) == four); + REQUIRE_THROWS_AS(vector.get_elem({4}), std::out_of_range); + + REQUIRE(matrix.get_elem({0, 0}) == one); + REQUIRE(matrix.get_elem({0, 1}) == two); + REQUIRE(matrix.get_elem({1, 0}) == three); + REQUIRE(matrix.get_elem({1, 1}) == four); + REQUIRE_THROWS_AS(matrix.get_elem({2, 0}), std::out_of_range); } - SECTION("get_immutable_data() const") { - REQUIRE(*std::as_const(base0).get_immutable_data() == TestType(42.0)); - - REQUIRE(*(std::as_const(base1).get_immutable_data() + 0) == - TestType(0.0)); - REQUIRE(*(std::as_const(base1).get_immutable_data() + 1) == - TestType(1.0)); - REQUIRE(*(std::as_const(base1).get_immutable_data() + 2) == - TestType(2.0)); - REQUIRE(*(std::as_const(base1).get_immutable_data() + 3) == - TestType(3.0)); - REQUIRE(*(std::as_const(base1).get_immutable_data() + 4) == - TestType(4.0)); + SECTION("set_elem") { + REQUIRE_THROWS_AS(defaulted.set_elem({}, one), std::out_of_range); + + REQUIRE(scalar.get_elem({}) != two); + scalar.set_elem({}, two); + REQUIRE(scalar.get_elem({}) == two); + + REQUIRE(vector.get_elem({2}) != four); + vector.set_elem({2}, four); + REQUIRE(vector.get_elem({2}) == four); + + REQUIRE(matrix.get_elem({1, 0}) != one); + matrix.set_elem({1, 0}, one); + REQUIRE(matrix.get_elem({1, 0}) == one); } - SECTION("get_elem() const") { - REQUIRE(base0.get_elem({}) == TestType(42.0)); + SECTION("operator==") { + // Same object + REQUIRE(defaulted == defaulted); + + Contiguous scalar_copy(std::vector{one}, scalar_shape); + REQUIRE(scalar == scalar_copy); + + Contiguous vector_copy(data, vector_shape); + REQUIRE(vector == vector_copy); - REQUIRE(base1.get_elem({0}) == TestType(0.0)); - REQUIRE(base1.get_elem({1}) == TestType(1.0)); - REQUIRE(base1.get_elem({2}) == TestType(2.0)); - REQUIRE(base1.get_elem({3}) == TestType(3.0)); - REQUIRE(base1.get_elem({4}) == TestType(4.0)); + Contiguous matrix_copy(data, matrix_shape); + REQUIRE(matrix == matrix_copy); - REQUIRE_THROWS_AS(base0.get_elem({0}), std::runtime_error); + // Different ranks + REQUIRE_FALSE(scalar == vector); + REQUIRE_FALSE(vector == matrix); + REQUIRE_FALSE(scalar == matrix); + + // Different shapes + shape_type matrix_shape2({4, 1}); + REQUIRE_FALSE(scalar == Contiguous(data, matrix_shape2)); + + // Different values + std::vector diff_data = {two, three, four, one}; + Contiguous scalar_diff(std::vector{two}, scalar_shape); + REQUIRE_FALSE(scalar == scalar_diff); + REQUIRE_FALSE(vector == Contiguous(diff_data, vector_shape)); + REQUIRE_FALSE(matrix == Contiguous(diff_data, matrix_shape)); + } + + SECTION("addition_assignment_") { + SECTION("scalar") { + label_type labels(""); + Contiguous result; + result.addition_assignment(labels, scalar(labels), scalar(labels)); + REQUIRE(result.shape() == scalar_shape); + REQUIRE(result.get_elem({}) == TestType(2.0)); + } + + SECTION("vector") { + label_type labels("i"); + Contiguous result; + result.addition_assignment(labels, vector(labels), vector(labels)); + REQUIRE(result.shape() == vector_shape); + REQUIRE(result.get_elem({0}) == TestType(2.0)); + REQUIRE(result.get_elem({1}) == TestType(4.0)); + REQUIRE(result.get_elem({2}) == TestType(6.0)); + REQUIRE(result.get_elem({3}) == TestType(8.0)); + } + + SECTION("matrix") { + label_type labels("i,j"); + Contiguous result; + result.addition_assignment(labels, matrix(labels), matrix(labels)); + REQUIRE(result.shape() == matrix_shape); + REQUIRE(result.get_elem({0, 0}) == TestType(2.0)); + REQUIRE(result.get_elem({0, 1}) == TestType(4.0)); + REQUIRE(result.get_elem({1, 0}) == TestType(6.0)); + REQUIRE(result.get_elem({1, 1}) == TestType(8.0)); + } } - SECTION("set_elem() const") { - base0.set_elem({}, TestType(43.0)); - REQUIRE(base0.get_elem({}) == TestType(43.0)); + SECTION("subtraction_assignment_") { + SECTION("scalar") { + label_type labels(""); + Contiguous result; + result.subtraction_assignment(labels, scalar(labels), + scalar(labels)); + REQUIRE(result.shape() == scalar_shape); + REQUIRE(result.get_elem({}) == TestType(0.0)); + } - base1.set_elem({0}, TestType(43.0)); - REQUIRE(base1.get_elem({0}) == TestType(43.0)); + SECTION("vector") { + label_type labels("i"); + Contiguous result; + result.subtraction_assignment(labels, vector(labels), + vector(labels)); + REQUIRE(result.shape() == vector_shape); + REQUIRE(result.get_elem({0}) == TestType(0.0)); + REQUIRE(result.get_elem({1}) == TestType(0.0)); + REQUIRE(result.get_elem({2}) == TestType(0.0)); + REQUIRE(result.get_elem({3}) == TestType(0.0)); + } - REQUIRE_THROWS_AS(base0.set_elem({0}, TestType{0.0}), - std::runtime_error); + SECTION("matrix") { + label_type labels("i,j"); + Contiguous result; + result.subtraction_assignment(labels, matrix(labels), + matrix(labels)); + REQUIRE(result.shape() == matrix_shape); + REQUIRE(result.get_elem({0, 0}) == TestType(0.0)); + REQUIRE(result.get_elem({0, 1}) == TestType(0.0)); + REQUIRE(result.get_elem({1, 0}) == TestType(0.0)); + REQUIRE(result.get_elem({1, 1}) == TestType(0.0)); + } } - SECTION("get_data() const") { - REQUIRE(base0.get_data(0) == TestType(42.0)); + SECTION("multiplication_assignment_") { + // N.b., dispatching among hadamard, contraction, etc. is the visitor's + // responsibility and happens there. Here we just test hadamard. - REQUIRE(base1.get_data(0) == TestType(0.0)); - REQUIRE(base1.get_data(1) == TestType(1.0)); - REQUIRE(base1.get_data(2) == TestType(2.0)); - REQUIRE(base1.get_data(3) == TestType(3.0)); - REQUIRE(base1.get_data(4) == TestType(4.0)); + SECTION("scalar") { + label_type labels(""); + Contiguous result; + result.multiplication_assignment(labels, scalar(labels), + scalar(labels)); + REQUIRE(result.shape() == scalar_shape); + REQUIRE(result.get_elem({}) == TestType(1.0)); + } - REQUIRE_THROWS_AS(base0.get_data(1), std::runtime_error); + SECTION("vector") { + label_type labels("i"); + Contiguous result; + result.multiplication_assignment(labels, vector(labels), + vector(labels)); + REQUIRE(result.shape() == vector_shape); + REQUIRE(result.get_elem({0}) == TestType(1.0)); + REQUIRE(result.get_elem({1}) == TestType(4.0)); + REQUIRE(result.get_elem({2}) == TestType(9.0)); + REQUIRE(result.get_elem({3}) == TestType(16.0)); + } + + SECTION("matrix") { + label_type labels("i,j"); + Contiguous result; + result.multiplication_assignment(labels, matrix(labels), + matrix(labels)); + REQUIRE(result.shape() == matrix_shape); + REQUIRE(result.get_elem({0, 0}) == TestType(1.0)); + REQUIRE(result.get_elem({0, 1}) == TestType(4.0)); + REQUIRE(result.get_elem({1, 0}) == TestType(9.0)); + REQUIRE(result.get_elem({1, 1}) == TestType(16.0)); + } + } + + SECTION("scalar_multiplication_") { + // TODO: Test with other scalar types when public API supports it + using scalar_type = double; + scalar_type scalar_value_{2.0}; + TestType scalar_value(scalar_value_); + SECTION("scalar") { + label_type labels(""); + Contiguous result; + result.scalar_multiplication(labels, scalar_value_, scalar(labels)); + REQUIRE(result.shape() == scalar_shape); + REQUIRE(result.get_elem({}) == TestType(1.0) * scalar_value); + } + + SECTION("vector") { + label_type labels("i"); + Contiguous result; + result.scalar_multiplication(labels, scalar_value_, vector(labels)); + REQUIRE(result.shape() == vector_shape); + REQUIRE(result.get_elem({0}) == TestType(1.0) * scalar_value); + REQUIRE(result.get_elem({1}) == TestType(2.0) * scalar_value); + REQUIRE(result.get_elem({2}) == TestType(3.0) * scalar_value); + REQUIRE(result.get_elem({3}) == TestType(4.0) * scalar_value); + } + + SECTION("matrix") { + label_type rhs_labels("i,j"); + label_type lhs_labels("j,i"); + Contiguous result; + result.scalar_multiplication(lhs_labels, scalar_value_, + matrix(rhs_labels)); + REQUIRE(result.shape() == matrix_shape); + REQUIRE(result.get_elem({0, 0}) == TestType(1.0) * scalar_value); + REQUIRE(result.get_elem({0, 1}) == TestType(3.0) * scalar_value); + REQUIRE(result.get_elem({1, 0}) == TestType(2.0) * scalar_value); + REQUIRE(result.get_elem({1, 1}) == TestType(4.0) * scalar_value); + } } - SECTION("set_data() const") { - base0.set_data(0, TestType(43.0)); - REQUIRE(base0.get_elem({}) == TestType(43.0)); + SECTION("permute_assignment_") { + SECTION("scalar") { + label_type labels(""); + Contiguous result; + result.permute_assignment(labels, scalar(labels)); + REQUIRE(result.shape() == scalar_shape); + REQUIRE(result.get_elem({}) == TestType(1.0)); + } + + SECTION("vector") { + label_type labels("i"); + Contiguous result; + result.permute_assignment(labels, vector(labels)); + REQUIRE(result.shape() == vector_shape); + REQUIRE(result.get_elem({0}) == TestType(1.0)); + REQUIRE(result.get_elem({1}) == TestType(2.0)); + REQUIRE(result.get_elem({2}) == TestType(3.0)); + REQUIRE(result.get_elem({3}) == TestType(4.0)); + } - REQUIRE_THROWS_AS(base0.set_data(1, TestType{0.0}), std::runtime_error); + SECTION("matrix") { + label_type rhs_labels("i,j"); + label_type lhs_labels("j,i"); + Contiguous result; + result.permute_assignment(lhs_labels, matrix(rhs_labels)); + REQUIRE(result.shape() == matrix_shape); + REQUIRE(result.get_elem({0, 0}) == TestType(1.0)); + REQUIRE(result.get_elem({0, 1}) == TestType(3.0)); + REQUIRE(result.get_elem({1, 0}) == TestType(2.0)); + REQUIRE(result.get_elem({1, 1}) == TestType(4.0)); + } } - SECTION("fill()") { - base1.fill(TestType{43.0}); - REQUIRE(base1.get_data(0) == TestType(43.0)); - REQUIRE(base1.get_data(1) == TestType(43.0)); - REQUIRE(base1.get_data(2) == TestType(43.0)); - REQUIRE(base1.get_data(3) == TestType(43.0)); - REQUIRE(base1.get_data(4) == TestType(43.0)); + SECTION("to_string") { + REQUIRE(defaulted.to_string().empty()); + REQUIRE_FALSE(scalar.to_string().empty()); + REQUIRE_FALSE(vector.to_string().empty()); + REQUIRE_FALSE(matrix.to_string().empty()); } - SECTION("copy()") { - auto data = std::vector(5, TestType(43.0)); - base1.copy(data); - REQUIRE(base1.get_data(0) == TestType(43.0)); - REQUIRE(base1.get_data(1) == TestType(43.0)); - REQUIRE(base1.get_data(2) == TestType(43.0)); - REQUIRE(base1.get_data(3) == TestType(43.0)); - REQUIRE(base1.get_data(4) == TestType(43.0)); + SECTION("add_to_stream") { + std::stringstream ss; + SECTION("defaulted") { + defaulted.add_to_stream(ss); + REQUIRE(ss.str().empty()); + } + SECTION("scalar") { + scalar.add_to_stream(ss); + REQUIRE_FALSE(ss.str().empty()); + } + SECTION("vector") { + vector.add_to_stream(ss); + REQUIRE_FALSE(ss.str().empty()); + } + SECTION("matrix") { + matrix.add_to_stream(ss); + REQUIRE_FALSE(ss.str().empty()); + } } } diff --git a/tests/cxx/unit_tests/tensorwrapper/buffer/eigen.cpp b/tests/cxx/unit_tests/tensorwrapper/buffer/eigen.cpp deleted file mode 100644 index f783eff9..00000000 --- a/tests/cxx/unit_tests/tensorwrapper/buffer/eigen.cpp +++ /dev/null @@ -1,255 +0,0 @@ -/* - * Copyright 2024 NWChemEx-Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "../testing/testing.hpp" -#include -#include -#include - -using namespace tensorwrapper; -using namespace testing; - -TEMPLATE_LIST_TEST_CASE("Eigen", "", types::floating_point_types) { - // N.B. we actually get Contiguous objects back - using buffer_type = buffer::Eigen; - - auto pscalar = testing::eigen_scalar(); - auto& eigen_scalar = static_cast(*pscalar); - eigen_scalar.set_elem({}, 10.0); - - auto pvector = testing::eigen_vector(2); - auto& eigen_vector = static_cast(*pvector); - eigen_vector.set_elem({0}, 10.0); - eigen_vector.set_elem({1}, 20.0); - - auto pmatrix = testing::eigen_matrix(2, 3); - auto& eigen_matrix = static_cast(*pmatrix); - eigen_matrix.set_elem({0, 0}, 10.0); - eigen_matrix.set_elem({0, 1}, 20.0); - eigen_matrix.set_elem({0, 2}, 30.0); - eigen_matrix.set_elem({1, 0}, 40.0); - eigen_matrix.set_elem({1, 1}, 50.0); - eigen_matrix.set_elem({1, 2}, 60.0); - - auto ptensor = testing::eigen_tensor3(1, 2, 3); - auto& eigen_tensor = static_cast(*ptensor); - eigen_tensor.set_elem({0, 0, 0}, 10.0); - eigen_tensor.set_elem({0, 0, 1}, 20.0); - eigen_tensor.set_elem({0, 0, 2}, 30.0); - eigen_tensor.set_elem({0, 1, 0}, 40.0); - eigen_tensor.set_elem({0, 1, 1}, 50.0); - eigen_tensor.set_elem({0, 1, 2}, 60.0); - - auto scalar_layout = scalar_physical(); - auto vector_layout = vector_physical(2); - auto matrix_layout = matrix_physical(2, 3); - auto tensor_layout = tensor3_physical(1, 2, 3); - - buffer_type defaulted; - - SECTION("ctors, assignment") { - SECTION("default ctor") { - REQUIRE(defaulted.get_immutable_data() == nullptr); - } - - SECTION("value ctor") { - REQUIRE(eigen_scalar.layout().are_equal(scalar_layout)); - REQUIRE(eigen_vector.layout().are_equal(vector_layout)); - REQUIRE(eigen_matrix.layout().are_equal(matrix_layout)); - REQUIRE(eigen_tensor.layout().are_equal(tensor_layout)); - } - - test_copy_move_ctor_and_assignment(eigen_scalar, eigen_vector, - eigen_matrix, eigen_tensor); - } - - SECTION("swap") { - buffer_type copy(eigen_scalar); - eigen_scalar.swap(defaulted); - REQUIRE(defaulted == copy); - REQUIRE(eigen_scalar == buffer_type{}); - } - - SECTION("operator==") { - // Checking Layout/Allocator falls to base class tests - auto pscalar2 = testing::eigen_scalar(); - auto& eigen_scalar2 = static_cast(*pscalar2); - eigen_scalar2.set_elem({}, 10.0); - - // Defaulted != scalar - REQUIRE_FALSE(defaulted == eigen_scalar); - - // Everything the same - REQUIRE(eigen_scalar == eigen_scalar2); - - SECTION("Different buffer value") { - eigen_scalar2.set_elem({}, 2.0); - REQUIRE_FALSE(eigen_scalar == eigen_scalar2); - } - } - - SECTION("operator!=") { - auto pscalar2 = testing::eigen_scalar(); - auto& eigen_scalar2 = static_cast(*pscalar2); - eigen_scalar2.set_elem({}, 10.0); - - REQUIRE_FALSE(eigen_scalar != eigen_scalar2); - eigen_scalar2.set_elem({}, 2.0); - REQUIRE(eigen_scalar != eigen_scalar2); - } - - SECTION("virtual method overrides") { - SECTION("clone") { - REQUIRE(eigen_scalar.clone()->are_equal(eigen_scalar)); - REQUIRE(eigen_vector.clone()->are_equal(eigen_vector)); - REQUIRE(eigen_matrix.clone()->are_equal(eigen_matrix)); - } - - SECTION("are_equal") { - REQUIRE(eigen_scalar.are_equal(eigen_scalar)); - REQUIRE_FALSE(eigen_matrix.are_equal(eigen_scalar)); - } - - SECTION("addition_assignment") { - buffer_type output; - auto vi = eigen_vector("i"); - output.addition_assignment("i", vi, vi); - - auto corr = testing::eigen_vector(2); - corr->set_elem({0}, 20.0); - corr->set_elem({1}, 40.0); - - REQUIRE(output.are_equal(*corr)); - } - - SECTION("subtraction_assignment") { - buffer_type output; - auto vi = eigen_vector("i"); - output.subtraction_assignment("i", vi, vi); - - auto corr = testing::eigen_vector(2); - corr->set_elem({0}, 0.0); - corr->set_elem({1}, 0.0); - - REQUIRE(output.are_equal(*corr)); - } - - SECTION("multiplication_assignment") { - buffer_type output; - auto vi = eigen_vector("i"); - output.multiplication_assignment("i", vi, vi); - - auto corr = testing::eigen_vector(2); - corr->set_elem({0}, 100.0); - corr->set_elem({1}, 400.0); - - REQUIRE(output.are_equal(*corr)); - } - - SECTION("permute_assignment") { - buffer_type output; - auto mij = eigen_matrix("i,j"); - output.permute_assignment("j,i", mij); - - auto corr = testing::eigen_matrix(3, 2); - corr->set_elem({0, 0}, 10.0); - corr->set_elem({0, 1}, 40.0); - corr->set_elem({1, 0}, 20.0); - corr->set_elem({1, 1}, 50.0); - corr->set_elem({2, 0}, 30.0); - corr->set_elem({2, 1}, 60.0); - - REQUIRE(output.are_equal(*corr)); - } - - SECTION("scalar_multiplication") { - buffer_type output; - auto vi = eigen_vector("i"); - output.scalar_multiplication("i", 2.0, vi); - - auto corr = testing::eigen_vector(2); - corr->set_elem({0}, 20.0); - corr->set_elem({1}, 40.0); - - REQUIRE(output.are_equal(*corr)); - } - - SECTION("get_mutable_data_()") { - REQUIRE(defaulted.get_mutable_data() == nullptr); - REQUIRE(*eigen_scalar.get_mutable_data() == TestType{10.0}); - REQUIRE(*eigen_matrix.get_mutable_data() == TestType{10.0}); - } - - SECTION("get_immutable_data_() const") { - REQUIRE(std::as_const(defaulted).get_immutable_data() == nullptr); - REQUIRE(*std::as_const(eigen_scalar).get_immutable_data() == - TestType{10.0}); - REQUIRE(*std::as_const(eigen_matrix).get_immutable_data() == - TestType{10.0}); - } - - SECTION("get_elem_() const") { - TestType corr{10.0}; - REQUIRE(std::as_const(eigen_scalar).get_elem({}) == corr); - REQUIRE(std::as_const(eigen_vector).get_elem({0}) == corr); - REQUIRE(std::as_const(eigen_matrix).get_elem({0, 0}) == corr); - } - - SECTION("set_elem_()") { - eigen_vector.set_elem({0}, TestType{42.0}); - REQUIRE(eigen_vector.get_elem({0}) == TestType{42.0}); - } - - SECTION("get_data_() const") { - TestType corr{10.0}; - REQUIRE(std::as_const(eigen_scalar).get_data(0) == corr); - REQUIRE(std::as_const(eigen_vector).get_data(0) == corr); - REQUIRE(std::as_const(eigen_matrix).get_data(0) == corr); - } - - SECTION("set_data_()") { - eigen_vector.set_data(0, TestType{42.0}); - REQUIRE(eigen_vector.get_data(0) == TestType{42.0}); - } - - SECTION("fill_()") { - eigen_vector.fill(TestType{42.0}); - REQUIRE(eigen_vector.get_data(0) == TestType(42.0)); - REQUIRE(eigen_vector.get_data(1) == TestType(42.0)); - } - - SECTION("copy_()") { - auto data = std::vector(2, TestType(42.0)); - eigen_vector.copy(data); - REQUIRE(eigen_vector.get_data(0) == TestType(42.0)); - REQUIRE(eigen_vector.get_data(1) == TestType(42.0)); - } - } -} - -TEMPLATE_LIST_TEST_CASE("to_eigen_buffer", "", types::floating_point_types) { - using buffer_type = buffer::Eigen; - - auto pscalar = testing::eigen_scalar(); - auto& eigen_scalar = static_cast(*pscalar); - eigen_scalar.set_elem({}, 10.0); - - buffer::BufferBase& scalar_base = eigen_scalar; - REQUIRE(&buffer::to_eigen_buffer(scalar_base) == &eigen_scalar); - - const buffer::BufferBase& cscalar_base = eigen_scalar; - REQUIRE(&buffer::to_eigen_buffer(cscalar_base) == &eigen_scalar); -} diff --git a/tests/cxx/unit_tests/tensorwrapper/buffer/mdbuffer.cpp b/tests/cxx/unit_tests/tensorwrapper/buffer/mdbuffer.cpp deleted file mode 100644 index 33c13421..00000000 --- a/tests/cxx/unit_tests/tensorwrapper/buffer/mdbuffer.cpp +++ /dev/null @@ -1,455 +0,0 @@ -/* - * Copyright 2025 NWChemEx-Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "../testing/testing.hpp" -#include -#include - -using namespace tensorwrapper; - -/* Testing notes: - * - * The various operations (addition_assignment, etc.) are not exhaustively - * tested here. These operations are implemented via visitors that dispatch to - * various backends. The visitors themselves are tested in their own unit tests. - * Here we assume the visitors work and spot check a couple of operations for - * to help catch any integration issues. - */ - -TEMPLATE_LIST_TEST_CASE("MDBuffer", "", types::floating_point_types) { - using buffer::MDBuffer; - using buffer_type = MDBuffer::buffer_type; - using shape_type = typename MDBuffer::shape_type; - using label_type = typename MDBuffer::label_type; - - TestType one(1.0), two(2.0), three(3.0), four(4.0); - std::vector data = {one, two, three, four}; - - shape_type scalar_shape({}); - shape_type vector_shape({4}); - shape_type matrix_shape({2, 2}); - - MDBuffer defaulted; - MDBuffer scalar(std::vector{one}, scalar_shape); - MDBuffer vector(data, vector_shape); - MDBuffer matrix(data, matrix_shape); - - SECTION("Ctors and assignment") { - SECTION("Default ctor") { - REQUIRE(defaulted.size() == 0); - REQUIRE(defaulted.shape() == shape_type()); - } - - SECTION("vector ctor") { - REQUIRE(scalar.size() == 1); - REQUIRE(scalar.shape() == scalar_shape); - REQUIRE(scalar.get_elem({}) == one); - - REQUIRE(vector.size() == 4); - REQUIRE(vector.shape() == vector_shape); - REQUIRE(vector.get_elem({0}) == one); - REQUIRE(vector.get_elem({1}) == two); - REQUIRE(vector.get_elem({2}) == three); - REQUIRE(vector.get_elem({3}) == four); - - REQUIRE(matrix.size() == 4); - REQUIRE(matrix.shape() == matrix_shape); - REQUIRE(matrix.get_elem({0, 0}) == one); - REQUIRE(matrix.get_elem({0, 1}) == two); - REQUIRE(matrix.get_elem({1, 0}) == three); - REQUIRE(matrix.get_elem({1, 1}) == four); - - REQUIRE_THROWS_AS(MDBuffer(data, scalar_shape), - std::invalid_argument); - } - - SECTION("FloatBuffer ctor") { - buffer_type buf(data); - - MDBuffer vector_buf(buf, vector_shape); - REQUIRE(vector_buf == vector); - - MDBuffer matrix_buf(buf, matrix_shape); - REQUIRE(matrix_buf == matrix); - - REQUIRE_THROWS_AS(MDBuffer(buf, scalar_shape), - std::invalid_argument); - } - - SECTION("Copy ctor") { - MDBuffer defaulted_copy(defaulted); - REQUIRE(defaulted_copy == defaulted); - - MDBuffer scalar_copy(scalar); - REQUIRE(scalar_copy == scalar); - - MDBuffer vector_copy(vector); - REQUIRE(vector_copy == vector); - - MDBuffer matrix_copy(matrix); - REQUIRE(matrix_copy == matrix); - } - - SECTION("Move ctor") { - MDBuffer defaulted_temp(defaulted); - MDBuffer defaulted_move(std::move(defaulted_temp)); - REQUIRE(defaulted_move == defaulted); - - MDBuffer scalar_temp(scalar); - MDBuffer scalar_move(std::move(scalar_temp)); - REQUIRE(scalar_move == scalar); - - MDBuffer vector_temp(vector); - MDBuffer vector_move(std::move(vector_temp)); - REQUIRE(vector_move == vector); - - MDBuffer matrix_temp(matrix); - MDBuffer matrix_move(std::move(matrix_temp)); - REQUIRE(matrix_move == matrix); - } - - SECTION("Copy assignment") { - MDBuffer defaulted_copy; - auto pdefaulted_copy = &(defaulted_copy = defaulted); - REQUIRE(defaulted_copy == defaulted); - REQUIRE(pdefaulted_copy == &defaulted_copy); - - MDBuffer scalar_copy; - auto pscalar_copy = &(scalar_copy = scalar); - REQUIRE(scalar_copy == scalar); - REQUIRE(pscalar_copy == &scalar_copy); - - MDBuffer vector_copy; - auto pvector_copy = &(vector_copy = vector); - REQUIRE(vector_copy == vector); - REQUIRE(pvector_copy == &vector_copy); - - MDBuffer matrix_copy; - auto pmatrix_copy = &(matrix_copy = matrix); - REQUIRE(matrix_copy == matrix); - REQUIRE(pmatrix_copy == &matrix_copy); - } - - SECTION("Move assignment") { - MDBuffer defaulted_temp(defaulted); - MDBuffer defaulted_move; - auto pdefaulted_move = - &(defaulted_move = std::move(defaulted_temp)); - REQUIRE(defaulted_move == defaulted); - REQUIRE(pdefaulted_move == &defaulted_move); - - MDBuffer scalar_temp(scalar); - MDBuffer scalar_move; - auto pscalar_move = &(scalar_move = std::move(scalar_temp)); - REQUIRE(scalar_move == scalar); - REQUIRE(pscalar_move == &scalar_move); - - MDBuffer vector_temp(vector); - MDBuffer vector_move; - auto pvector_move = &(vector_move = std::move(vector_temp)); - REQUIRE(vector_move == vector); - REQUIRE(pvector_move == &vector_move); - - MDBuffer matrix_temp(matrix); - MDBuffer matrix_move; - auto pmatrix_move = &(matrix_move = std::move(matrix_temp)); - REQUIRE(matrix_move == matrix); - REQUIRE(pmatrix_move == &matrix_move); - } - } - - SECTION("shape") { - REQUIRE(defaulted.shape() == shape_type()); - REQUIRE(scalar.shape() == scalar_shape); - REQUIRE(vector.shape() == vector_shape); - REQUIRE(matrix.shape() == matrix_shape); - } - - SECTION("size") { - REQUIRE(defaulted.size() == 0); - REQUIRE(scalar.size() == 1); - REQUIRE(vector.size() == 4); - REQUIRE(matrix.size() == 4); - } - - SECTION("get_elem") { - REQUIRE_THROWS_AS(defaulted.get_elem({}), std::out_of_range); - - REQUIRE(scalar.get_elem({}) == one); - REQUIRE_THROWS_AS(scalar.get_elem({0}), std::out_of_range); - - REQUIRE(vector.get_elem({0}) == one); - REQUIRE(vector.get_elem({1}) == two); - REQUIRE(vector.get_elem({2}) == three); - REQUIRE(vector.get_elem({3}) == four); - REQUIRE_THROWS_AS(vector.get_elem({4}), std::out_of_range); - - REQUIRE(matrix.get_elem({0, 0}) == one); - REQUIRE(matrix.get_elem({0, 1}) == two); - REQUIRE(matrix.get_elem({1, 0}) == three); - REQUIRE(matrix.get_elem({1, 1}) == four); - REQUIRE_THROWS_AS(matrix.get_elem({2, 0}), std::out_of_range); - } - - SECTION("set_elem") { - REQUIRE_THROWS_AS(defaulted.set_elem({}, one), std::out_of_range); - - REQUIRE(scalar.get_elem({}) != two); - scalar.set_elem({}, two); - REQUIRE(scalar.get_elem({}) == two); - - REQUIRE(vector.get_elem({2}) != four); - vector.set_elem({2}, four); - REQUIRE(vector.get_elem({2}) == four); - - REQUIRE(matrix.get_elem({1, 0}) != one); - matrix.set_elem({1, 0}, one); - REQUIRE(matrix.get_elem({1, 0}) == one); - } - - SECTION("operator==") { - // Same object - REQUIRE(defaulted == defaulted); - - MDBuffer scalar_copy(std::vector{one}, scalar_shape); - REQUIRE(scalar == scalar_copy); - - MDBuffer vector_copy(data, vector_shape); - REQUIRE(vector == vector_copy); - - MDBuffer matrix_copy(data, matrix_shape); - REQUIRE(matrix == matrix_copy); - - // Different ranks - REQUIRE_FALSE(scalar == vector); - REQUIRE_FALSE(vector == matrix); - REQUIRE_FALSE(scalar == matrix); - - // Different shapes - shape_type matrix_shape2({4, 1}); - REQUIRE_FALSE(scalar == MDBuffer(data, matrix_shape2)); - - // Different values - std::vector diff_data = {two, three, four, one}; - MDBuffer scalar_diff(std::vector{two}, scalar_shape); - REQUIRE_FALSE(scalar == scalar_diff); - REQUIRE_FALSE(vector == MDBuffer(diff_data, vector_shape)); - REQUIRE_FALSE(matrix == MDBuffer(diff_data, matrix_shape)); - } - - SECTION("addition_assignment_") { - SECTION("scalar") { - label_type labels(""); - MDBuffer result; - result.addition_assignment(labels, scalar(labels), scalar(labels)); - REQUIRE(result.shape() == scalar_shape); - REQUIRE(result.get_elem({}) == TestType(2.0)); - } - - SECTION("vector") { - label_type labels("i"); - MDBuffer result; - result.addition_assignment(labels, vector(labels), vector(labels)); - REQUIRE(result.shape() == vector_shape); - REQUIRE(result.get_elem({0}) == TestType(2.0)); - REQUIRE(result.get_elem({1}) == TestType(4.0)); - REQUIRE(result.get_elem({2}) == TestType(6.0)); - REQUIRE(result.get_elem({3}) == TestType(8.0)); - } - - SECTION("matrix") { - label_type labels("i,j"); - MDBuffer result; - result.addition_assignment(labels, matrix(labels), matrix(labels)); - REQUIRE(result.shape() == matrix_shape); - REQUIRE(result.get_elem({0, 0}) == TestType(2.0)); - REQUIRE(result.get_elem({0, 1}) == TestType(4.0)); - REQUIRE(result.get_elem({1, 0}) == TestType(6.0)); - REQUIRE(result.get_elem({1, 1}) == TestType(8.0)); - } - } - - SECTION("subtraction_assignment_") { - SECTION("scalar") { - label_type labels(""); - MDBuffer result; - result.subtraction_assignment(labels, scalar(labels), - scalar(labels)); - REQUIRE(result.shape() == scalar_shape); - REQUIRE(result.get_elem({}) == TestType(0.0)); - } - - SECTION("vector") { - label_type labels("i"); - MDBuffer result; - result.subtraction_assignment(labels, vector(labels), - vector(labels)); - REQUIRE(result.shape() == vector_shape); - REQUIRE(result.get_elem({0}) == TestType(0.0)); - REQUIRE(result.get_elem({1}) == TestType(0.0)); - REQUIRE(result.get_elem({2}) == TestType(0.0)); - REQUIRE(result.get_elem({3}) == TestType(0.0)); - } - - SECTION("matrix") { - label_type labels("i,j"); - MDBuffer result; - result.subtraction_assignment(labels, matrix(labels), - matrix(labels)); - REQUIRE(result.shape() == matrix_shape); - REQUIRE(result.get_elem({0, 0}) == TestType(0.0)); - REQUIRE(result.get_elem({0, 1}) == TestType(0.0)); - REQUIRE(result.get_elem({1, 0}) == TestType(0.0)); - REQUIRE(result.get_elem({1, 1}) == TestType(0.0)); - } - } - - SECTION("multiplication_assignment_") { - // N.b., dispatching among hadamard, contraction, etc. is the visitor's - // responsibility and happens there. Here we just test hadamard. - - SECTION("scalar") { - label_type labels(""); - MDBuffer result; - result.multiplication_assignment(labels, scalar(labels), - scalar(labels)); - REQUIRE(result.shape() == scalar_shape); - REQUIRE(result.get_elem({}) == TestType(1.0)); - } - - SECTION("vector") { - label_type labels("i"); - MDBuffer result; - result.multiplication_assignment(labels, vector(labels), - vector(labels)); - REQUIRE(result.shape() == vector_shape); - REQUIRE(result.get_elem({0}) == TestType(1.0)); - REQUIRE(result.get_elem({1}) == TestType(4.0)); - REQUIRE(result.get_elem({2}) == TestType(9.0)); - REQUIRE(result.get_elem({3}) == TestType(16.0)); - } - - SECTION("matrix") { - label_type labels("i,j"); - MDBuffer result; - result.multiplication_assignment(labels, matrix(labels), - matrix(labels)); - REQUIRE(result.shape() == matrix_shape); - REQUIRE(result.get_elem({0, 0}) == TestType(1.0)); - REQUIRE(result.get_elem({0, 1}) == TestType(4.0)); - REQUIRE(result.get_elem({1, 0}) == TestType(9.0)); - REQUIRE(result.get_elem({1, 1}) == TestType(16.0)); - } - } - - SECTION("scalar_multiplication_") { - // TODO: Test with other scalar types when public API supports it - using scalar_type = double; - scalar_type scalar_value_{2.0}; - TestType scalar_value(scalar_value_); - SECTION("scalar") { - label_type labels(""); - MDBuffer result; - result.scalar_multiplication(labels, scalar_value_, scalar(labels)); - REQUIRE(result.shape() == scalar_shape); - REQUIRE(result.get_elem({}) == TestType(1.0) * scalar_value); - } - - SECTION("vector") { - label_type labels("i"); - MDBuffer result; - result.scalar_multiplication(labels, scalar_value_, vector(labels)); - REQUIRE(result.shape() == vector_shape); - REQUIRE(result.get_elem({0}) == TestType(1.0) * scalar_value); - REQUIRE(result.get_elem({1}) == TestType(2.0) * scalar_value); - REQUIRE(result.get_elem({2}) == TestType(3.0) * scalar_value); - REQUIRE(result.get_elem({3}) == TestType(4.0) * scalar_value); - } - - SECTION("matrix") { - label_type rhs_labels("i,j"); - label_type lhs_labels("j,i"); - MDBuffer result; - result.scalar_multiplication(lhs_labels, scalar_value_, - matrix(rhs_labels)); - REQUIRE(result.shape() == matrix_shape); - REQUIRE(result.get_elem({0, 0}) == TestType(1.0) * scalar_value); - REQUIRE(result.get_elem({0, 1}) == TestType(3.0) * scalar_value); - REQUIRE(result.get_elem({1, 0}) == TestType(2.0) * scalar_value); - REQUIRE(result.get_elem({1, 1}) == TestType(4.0) * scalar_value); - } - } - - SECTION("permute_assignment_") { - SECTION("scalar") { - label_type labels(""); - MDBuffer result; - result.permute_assignment(labels, scalar(labels)); - REQUIRE(result.shape() == scalar_shape); - REQUIRE(result.get_elem({}) == TestType(1.0)); - } - - SECTION("vector") { - label_type labels("i"); - MDBuffer result; - result.permute_assignment(labels, vector(labels)); - REQUIRE(result.shape() == vector_shape); - REQUIRE(result.get_elem({0}) == TestType(1.0)); - REQUIRE(result.get_elem({1}) == TestType(2.0)); - REQUIRE(result.get_elem({2}) == TestType(3.0)); - REQUIRE(result.get_elem({3}) == TestType(4.0)); - } - - SECTION("matrix") { - label_type rhs_labels("i,j"); - label_type lhs_labels("j,i"); - MDBuffer result; - result.permute_assignment(lhs_labels, matrix(rhs_labels)); - REQUIRE(result.shape() == matrix_shape); - REQUIRE(result.get_elem({0, 0}) == TestType(1.0)); - REQUIRE(result.get_elem({0, 1}) == TestType(3.0)); - REQUIRE(result.get_elem({1, 0}) == TestType(2.0)); - REQUIRE(result.get_elem({1, 1}) == TestType(4.0)); - } - } - - SECTION("to_string") { - REQUIRE(defaulted.to_string().empty()); - REQUIRE_FALSE(scalar.to_string().empty()); - REQUIRE_FALSE(vector.to_string().empty()); - REQUIRE_FALSE(matrix.to_string().empty()); - } - - SECTION("add_to_stream") { - std::stringstream ss; - SECTION("defaulted") { - defaulted.add_to_stream(ss); - REQUIRE(ss.str().empty()); - } - SECTION("scalar") { - scalar.add_to_stream(ss); - REQUIRE_FALSE(ss.str().empty()); - } - SECTION("vector") { - vector.add_to_stream(ss); - REQUIRE_FALSE(ss.str().empty()); - } - SECTION("matrix") { - matrix.add_to_stream(ss); - REQUIRE_FALSE(ss.str().empty()); - } - } -} From 127f2c40c9997a112f6280a6371d1ff7044e09e9 Mon Sep 17 00:00:00 2001 From: "Ryan M. Richard" Date: Thu, 1 Jan 2026 12:03:26 -0600 Subject: [PATCH 03/13] compiles, but doesn't work... --- .../tensorwrapper/allocator/contiguous.hpp | 2 + include/tensorwrapper/buffer/contiguous.hpp | 3 + .../utilities/floating_point_dispatch.hpp | 69 -- include/tensorwrapper/utilities/utilities.hpp | 1 - src/python/tensor/export_tensor.cpp | 52 +- src/tensorwrapper/allocator/contiguous.cpp | 11 +- src/tensorwrapper/diis/diis.cpp | 153 ++-- .../operations/approximately_equal.cpp | 5 +- src/tensorwrapper/operations/norm.cpp | 7 +- .../tensor/detail_/tensor_factory.cpp | 6 +- .../utilities/block_diagonal_matrix.cpp | 92 +- src/tensorwrapper/utilities/to_json.cpp | 8 +- .../tensorwrapper/buffer/buffer_base.cpp | 8 +- .../tensorwrapper/buffer/contiguous.cpp | 2 +- .../buffer/contraction_planner.cpp | 1 - .../buffer/detail_/eigen_tensor.cpp | 789 ------------------ .../cxx/unit_tests/tensorwrapper/dsl/dsl.cpp | 6 +- .../tensorwrapper/dsl/pairwise_parser.cpp | 6 +- .../operations/approximately_equal.cpp | 28 +- .../tensor/detail_/tensor_factory.cpp | 9 +- .../tensor/detail_/tensor_input.cpp | 98 +-- .../tensor/detail_/tensor_pimpl.cpp | 2 +- .../tensorwrapper/testing/eigen_buffers.hpp | 65 +- .../utilities/block_diagonal_matrix.cpp | 34 +- .../utilities/floating_point_dispatch.cpp | 47 -- 25 files changed, 284 insertions(+), 1220 deletions(-) delete mode 100644 include/tensorwrapper/utilities/floating_point_dispatch.hpp delete mode 100644 tests/cxx/unit_tests/tensorwrapper/buffer/detail_/eigen_tensor.cpp delete mode 100644 tests/cxx/unit_tests/tensorwrapper/utilities/floating_point_dispatch.cpp diff --git a/include/tensorwrapper/allocator/contiguous.hpp b/include/tensorwrapper/allocator/contiguous.hpp index b30fb4e4..836ac2a3 100644 --- a/include/tensorwrapper/allocator/contiguous.hpp +++ b/include/tensorwrapper/allocator/contiguous.hpp @@ -68,6 +68,8 @@ class Contiguous : public Replicated { /// Pull in base class's ctors using base_type::base_type; + explicit Contiguous(runtime_view_reference runtime) : base_type(runtime) {} + /** @brief Determines if @p buffer can be rebound as a Contiguous buffer. * * Rebinding a buffer allows the same memory to be viewed as a (possibly) diff --git a/include/tensorwrapper/buffer/contiguous.hpp b/include/tensorwrapper/buffer/contiguous.hpp index 3946c0a0..771206bd 100644 --- a/include/tensorwrapper/buffer/contiguous.hpp +++ b/include/tensorwrapper/buffer/contiguous.hpp @@ -53,7 +53,10 @@ class Contiguous : public Replicated { using size_type = typename traits_type::size_type; ///@} + /// Type of an offset vector using index_vector = std::vector; + + /// Type of the object used to annotate modes using typename my_base_type::label_type; using string_type = std::string; diff --git a/include/tensorwrapper/utilities/floating_point_dispatch.hpp b/include/tensorwrapper/utilities/floating_point_dispatch.hpp deleted file mode 100644 index cfc6bd4b..00000000 --- a/include/tensorwrapper/utilities/floating_point_dispatch.hpp +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright 2025 NWChemEx-Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#pragma once -#include -#include - -namespace tensorwrapper::utilities { - -/** @brief Wraps the logic needed to work out the floating point type of buffer. - * - * @tparam KernelType Type of a functor. The functor must define a function - * template called `run` that takes one explicit template - * type parameter (will be the floating point type of @p - * buffer) and @p buffer. `run` may take an arbitrary amount - * of additional arguments. - * @tparam BufferType The type of @p buffer. Must be derived from BufferBase. - * May contain cv or reference qualifiers. - * @tparam Args The types of any additional arguments which will be forwarded - * to @p kernel. - * - * @param[in] kernel The functor instance to call `run` on. - * @param[in] buffer The type of the elements in @p buffer will be used to - * dispatch. - * @param[in] args Any additional arguments to forward to @p kernel. - * - * @return Returns whatever @p kernel returns. - * - * @throw std::runtime_error if @p buffer is not derived from - */ -template -decltype(auto) floating_point_dispatch(KernelType&& kernel, BufferType&& buffer, - Args&&... args) { - using buffer_clean = std::decay_t; - using buffer_base = buffer::BufferBase; - constexpr bool is_buffer = std::is_base_of_v; - static_assert(is_buffer); - - using types::udouble; - using types::ufloat; - - if(allocator::Eigen::can_rebind(buffer)) { - return kernel.template run(buffer, std::forward(args)...); - } else if(allocator::Eigen::can_rebind(buffer)) { - return kernel.template run(buffer, std::forward(args)...); - } else if(allocator::Eigen::can_rebind(buffer)) { - return kernel.template run(buffer, std::forward(args)...); - } else if(allocator::Eigen::can_rebind(buffer)) { - return kernel.template run(buffer, - std::forward(args)...); - } else { - throw std::runtime_error("Can't rebind buffer to Contiguous<>"); - } -} - -} // namespace tensorwrapper::utilities diff --git a/include/tensorwrapper/utilities/utilities.hpp b/include/tensorwrapper/utilities/utilities.hpp index 790313d7..39632702 100644 --- a/include/tensorwrapper/utilities/utilities.hpp +++ b/include/tensorwrapper/utilities/utilities.hpp @@ -16,7 +16,6 @@ #pragma once #include -#include #include /// Namespace for helper functions diff --git a/src/python/tensor/export_tensor.cpp b/src/python/tensor/export_tensor.cpp index 2769d609..0e225b31 100644 --- a/src/python/tensor/export_tensor.cpp +++ b/src/python/tensor/export_tensor.cpp @@ -21,12 +21,14 @@ namespace tensorwrapper { -template -auto make_buffer_info(buffer::Contiguous& buffer) { +auto make_buffer_info(buffer::Contiguous& buffer) { + throw std::runtime_error("Fix me!!!!"); using size_type = std::size_t; - constexpr auto nbytes = sizeof(FloatType); - const auto desc = pybind11::format_descriptor::format(); - const auto rank = buffer.rank(); + constexpr auto nbytes = sizeof(double); + const auto desc = pybind11::format_descriptor::format(); + // constexpr auto nbytes = sizeof(FloatType); + // const auto desc = pybind11::format_descriptor::format(); + const auto rank = buffer.rank(); const auto smooth_shape = buffer.layout().shape().as_smooth(); @@ -39,31 +41,31 @@ auto make_buffer_info(buffer::Contiguous& buffer) { stride_i *= smooth_shape.extent(mode_i); strides[rank_i] = stride_i * nbytes; } - return pybind11::buffer_info(buffer.get_mutable_data(), nbytes, desc, rank, - shape, strides); + double* ptr = nullptr; // buffer.get_mutable_data(); + return pybind11::buffer_info(ptr, nbytes, desc, rank, shape, strides); } -auto make_tensor(pybind11::buffer b) { - pybind11::buffer_info info = b.request(); - if(info.format != pybind11::format_descriptor::format()) - throw std::runtime_error( - "Incompatible format: expected a double array!"); +Tensor make_tensor(pybind11::buffer b) { + throw std::runtime_error("Fix me!!!!"); + // pybind11::buffer_info info = b.request(); + // if(info.format != pybind11::format_descriptor::format()) + // throw std::runtime_error( + // "Incompatible format: expected a double array!"); - std::vector dims(info.ndim); - for(auto i = 0; i < info.ndim; ++i) { dims[i] = info.shape[i]; } + // std::vector dims(info.ndim); + // for(auto i = 0; i < info.ndim; ++i) { dims[i] = info.shape[i]; } - parallelzone::runtime::RuntimeView rv = {}; - allocator::Eigen allocator(rv); - shape::Smooth matrix_shape{dims.begin(), dims.end()}; - layout::Physical matrix_layout(matrix_shape); - auto pBuffer = allocator.allocate(matrix_layout); + // parallelzone::runtime::RuntimeView rv = {}; + // shape::Smooth matrix_shape{dims.begin(), dims.end()}; + // layout::Physical matrix_layout(matrix_shape); + // auto pBuffer = std::make_unique(rv, matrix_layout); - auto n_elements = std::accumulate(dims.begin(), dims.end(), 1, - std::multiplies()); - auto pData = static_cast(info.ptr); - for(auto i = 0; i < n_elements; ++i) pBuffer->set_data(i, pData[i]); + // auto n_elements = std::accumulate(dims.begin(), dims.end(), 1, + // std::multiplies()); + // auto pData = static_cast(info.ptr); + // for(auto i = 0; i < n_elements; ++i) pBuffer->set_elem({i}, pData[i]); - return Tensor(matrix_shape, std::move(pBuffer)); + // return Tensor(matrix_shape, std::move(pBuffer)); } void export_tensor(py_module_reference m) { @@ -75,7 +77,7 @@ void export_tensor(py_module_reference m) { .def(pybind11::self != pybind11::self) .def("__str__", [](Tensor& self) { return self.to_string(); }) .def_buffer([](Tensor& t) { - auto pbuffer = dynamic_cast*>(&t.buffer()); + auto pbuffer = dynamic_cast(&t.buffer()); if(pbuffer == nullptr) throw std::runtime_error("Expected buffer to hold doubles"); return make_buffer_info(*pbuffer); diff --git a/src/tensorwrapper/allocator/contiguous.cpp b/src/tensorwrapper/allocator/contiguous.cpp index 4226262e..4db1a010 100644 --- a/src/tensorwrapper/allocator/contiguous.cpp +++ b/src/tensorwrapper/allocator/contiguous.cpp @@ -42,16 +42,13 @@ auto Contiguous::rebind(const_buffer_base_reference buffer) // -- Protected methods // ----------------------------------------------------------------------------- -auto Contiguous::allocate_(layout_pointer playout) { - return std::make_unique(std::move(playout)); +auto Contiguous::allocate_(layout_pointer playout) -> buffer_base_pointer { + throw std::runtime_error("Fix me!"); } auto Contiguous::construct_(layout_pointer playout, wtf::fp::Float value) - -> contiguous_pointer { - auto pbuffer = this->allocate(std::move(playout)); - auto& contig_buffer = static_cast(*pbuffer); - contig_buffer.fill(value); - return pbuffer; + -> buffer_pointer { + throw std::runtime_error("Fix me!"); } // -- Private diff --git a/src/tensorwrapper/diis/diis.cpp b/src/tensorwrapper/diis/diis.cpp index 94b3d6a6..5e774cbf 100644 --- a/src/tensorwrapper/diis/diis.cpp +++ b/src/tensorwrapper/diis/diis.cpp @@ -17,98 +17,97 @@ #include #include #include -#include +#include namespace tensorwrapper::diis { namespace { -struct Kernel { - using buffer_base_type = tensorwrapper::buffer::BufferBase; +// struct Kernel { +// using buffer_base_type = tensorwrapper::buffer::BufferBase; - template - auto run(const buffer_base_type& t) { - using alloc_type = tensorwrapper::allocator::Eigen; - alloc_type alloc(t.allocator().runtime()); +// template +// auto run(const buffer_base_type& t) { +// using alloc_type = tensorwrapper::allocator::Eigen; +// alloc_type alloc(t.allocator().runtime()); - double rv; - if constexpr(tensorwrapper::types::is_uncertain_v) { - const auto& t_eigen = alloc.rebind(t); +// double rv; +// if constexpr(tensorwrapper::types::is_uncertain_v) { +// const auto& t_eigen = alloc.rebind(t); - rv = t_eigen.get_elem({}).mean(); - } else { - const auto& t_eigen = alloc.rebind(t); +// rv = t_eigen.get_elem({}).mean(); +// } else { +// const auto& t_eigen = alloc.rebind(t); - rv = t_eigen.get_elem({}); - } - return rv; - } -}; +// rv = t_eigen.get_elem({}); +// } +// return rv; +// } +// }; } // namespace using tensor_type = DIIS::tensor_type; -using tensorwrapper::utilities::floating_point_dispatch; - tensor_type DIIS::extrapolate(const tensor_type& X, const tensor_type& E) { - // Append new values to stored values - m_samples_.push_back(X); - m_errors_.push_back(E); - - // If we're over the max number of stored values, pop the oldest ones - // Also update m_B_ to overwrite the oldest values - if(m_errors_.size() > m_max_samples_) { - m_errors_.pop_front(); - m_samples_.pop_front(); - - // Overwrite the top-left block with the bottom right block. - // No need to zero out the parts that aren't overwritten, - // they'll be overwritten in the next step - if(m_max_samples_ > 1) { - m_B_.block(0, 0, m_max_samples_ - 1, m_max_samples_ - 1) = - m_B_.block(1, 1, m_max_samples_ - 1, m_max_samples_ - 1); - } - } - - // Current number of stored values - size_type sz = m_errors_.size(); - - // Add the new values to m_B_ - size_type i = sz - 1; - for(size_type j = 0; j <= i; ++j) { // compute upper triangle - tensor_type& E_i = m_errors_.at(i); - tensor_type& E_j = m_errors_.at(j); - - tensor_type temp; - temp("") = E_i("mu,nu") * E_j("mu,nu"); - m_B_(i, j) = floating_point_dispatch(Kernel{}, temp.buffer()); - - // Fill in lower triangle - if(i != j) m_B_(j, i) = m_B_(i, j); - } - - // Solve for expansion coefficients - matrix_type A = matrix_type::Zero(sz + 1, sz + 1); - A.topLeftCorner(sz, sz) = m_B_.topLeftCorner(sz, sz); - A.row(sz).setConstant(-1.0); - A.col(sz).setConstant(-1.0); - A(sz, sz) = 0.0; - - vector_type b = vector_type::Zero(sz + 1); - b(sz) = -1.0; - - vector_type coefs = A.colPivHouseholderQr().solve(b); - - // Extrapolate the new X from the coefficients. - tensor_type new_X; - new_X("mu,nu") = m_samples_.at(0)("mu,nu") * coefs(0); - for(size_type i = 1; i < sz; i++) { - tensor_type x_i; - x_i("mu,nu") = m_samples_.at(i)("mu,nu") * coefs(i); - new_X("mu,nu") = new_X("mu,nu") + x_i("mu,nu"); - } - return new_X; + throw std::runtime_error("DIIS::extrapolate NYI"); + // // Append new values to stored values + // m_samples_.push_back(X); + // m_errors_.push_back(E); + + // // If we're over the max number of stored values, pop the oldest ones + // // Also update m_B_ to overwrite the oldest values + // if(m_errors_.size() > m_max_samples_) { + // m_errors_.pop_front(); + // m_samples_.pop_front(); + + // // Overwrite the top-left block with the bottom right block. + // // No need to zero out the parts that aren't overwritten, + // // they'll be overwritten in the next step + // if(m_max_samples_ > 1) { + // m_B_.block(0, 0, m_max_samples_ - 1, m_max_samples_ - 1) = + // m_B_.block(1, 1, m_max_samples_ - 1, m_max_samples_ - 1); + // } + // } + + // // Current number of stored values + // size_type sz = m_errors_.size(); + + // // Add the new values to m_B_ + // size_type i = sz - 1; + // for(size_type j = 0; j <= i; ++j) { // compute upper triangle + // tensor_type& E_i = m_errors_.at(i); + // tensor_type& E_j = m_errors_.at(j); + + // tensor_type temp; + // temp("") = E_i("mu,nu") * E_j("mu,nu"); + // m_B_(i, j) = floating_point_dispatch(Kernel{}, temp.buffer()); + + // // Fill in lower triangle + // if(i != j) m_B_(j, i) = m_B_(i, j); + // } + + // // Solve for expansion coefficients + // matrix_type A = matrix_type::Zero(sz + 1, sz + 1); + // A.topLeftCorner(sz, sz) = m_B_.topLeftCorner(sz, sz); + // A.row(sz).setConstant(-1.0); + // A.col(sz).setConstant(-1.0); + // A(sz, sz) = 0.0; + + // vector_type b = vector_type::Zero(sz + 1); + // b(sz) = -1.0; + + // vector_type coefs = A.colPivHouseholderQr().solve(b); + + // // Extrapolate the new X from the coefficients. + // tensor_type new_X; + // new_X("mu,nu") = m_samples_.at(0)("mu,nu") * coefs(0); + // for(size_type i = 1; i < sz; i++) { + // tensor_type x_i; + // x_i("mu,nu") = m_samples_.at(i)("mu,nu") * coefs(i); + // new_X("mu,nu") = new_X("mu,nu") + x_i("mu,nu"); + // } + // return new_X; } bool DIIS::operator==(const DIIS& rhs) const noexcept { diff --git a/src/tensorwrapper/operations/approximately_equal.cpp b/src/tensorwrapper/operations/approximately_equal.cpp index a5a9edfd..4ed4ffdd 100644 --- a/src/tensorwrapper/operations/approximately_equal.cpp +++ b/src/tensorwrapper/operations/approximately_equal.cpp @@ -55,8 +55,9 @@ bool approximately_equal(const Tensor& lhs, const Tensor& rhs, double tol) { allocator_type alloc(result.buffer().allocator().runtime()); const auto& buffer_down = alloc.rebind(result.buffer()); Kernel k(tol); - return wtf::buffer::visit_contiguous_buffer( - k, buffer_down); + throw std::runtime_error("Fix me!!!!"); + // return wtf::buffer::visit_contiguous_buffer( + // k, buffer_down.get_immutable_data()); } } // namespace tensorwrapper::operations diff --git a/src/tensorwrapper/operations/norm.cpp b/src/tensorwrapper/operations/norm.cpp index ac500af8..e9ab74de 100644 --- a/src/tensorwrapper/operations/norm.cpp +++ b/src/tensorwrapper/operations/norm.cpp @@ -44,12 +44,13 @@ struct InfinityKernel { Tensor infinity_norm(const Tensor& t) { using allocator_type = allocator::Contiguous; - auto rv = t.allocator().runtime(); + auto rv = t.buffer().allocator().runtime(); allocator_type alloc(rv); const auto& buffer_down = alloc.rebind(t.buffer()); InfinityKernel kernel(alloc); - return wtf::buffer::visit_contiguous_buffer( - kernel, buffer_down); + throw std::runtime_error("Fix me!!!!"); + // return wtf::buffer::visit_contiguous_buffer( + // kernel, buffer_down); } } // namespace tensorwrapper::operations diff --git a/src/tensorwrapper/tensor/detail_/tensor_factory.cpp b/src/tensorwrapper/tensor/detail_/tensor_factory.cpp index 78a72071..abb2256d 100644 --- a/src/tensorwrapper/tensor/detail_/tensor_factory.cpp +++ b/src/tensorwrapper/tensor/detail_/tensor_factory.cpp @@ -177,8 +177,10 @@ namespace { /// Wraps the process of turning an initializer list into a TensorInput object template auto il_to_input(T il, parallelzone::runtime::RuntimeView rv = {}) { - allocator::Contiguous alloc(rv); - auto pbuffer = alloc.construct(il); + auto [extents, data] = unwrap_il(il); + shape::Smooth shape(extents.begin(), extents.end()); + auto pbuffer = + std::make_unique(std::move(data), std::move(shape)); return TensorInput(pbuffer->layout().shape(), std::move(pbuffer)); } diff --git a/src/tensorwrapper/utilities/block_diagonal_matrix.cpp b/src/tensorwrapper/utilities/block_diagonal_matrix.cpp index 2fd764d5..80ebc682 100644 --- a/src/tensorwrapper/utilities/block_diagonal_matrix.cpp +++ b/src/tensorwrapper/utilities/block_diagonal_matrix.cpp @@ -18,65 +18,69 @@ #include #include #include -#include namespace tensorwrapper::utilities { namespace { -struct BlockDiagonalMatrixKernel { - template - auto run(const buffer::BufferBase& b, const std::vector& matrices) { - using allocator_type = tensorwrapper::allocator::Eigen; +// struct BlockDiagonalMatrixKernel { +// template +// auto run(const buffer::BufferBase& b, const std::vector& +// matrices) { +// using allocator_type = tensorwrapper::allocator::Eigen; - // All inputs must be Rank 2, square, and the same floating point type. - // If so, sum their extent sizes. - std::size_t size = 0; - for(const auto& matrix : matrices) { - if(!allocator_type::can_rebind(matrix.buffer())) - throw std::runtime_error( - "All inputs must have the same floating point type"); +// // All inputs must be Rank 2, square, and the same floating point +// type. +// // If so, sum their extent sizes. +// std::size_t size = 0; +// for(const auto& matrix : matrices) { +// if(!allocator_type::can_rebind(matrix.buffer())) +// throw std::runtime_error( +// "All inputs must have the same floating point type"); - if(matrix.rank() != 2) - throw std::runtime_error( - "All inputs must be matrices (Rank == 2)"); +// if(matrix.rank() != 2) +// throw std::runtime_error( +// "All inputs must be matrices (Rank == 2)"); - const auto& mshape = matrix.buffer().layout().shape().as_smooth(); - if(mshape.extent(0) != mshape.extent(1)) - throw std::runtime_error("All inputs must be square matrices"); +// const auto& mshape = +// matrix.buffer().layout().shape().as_smooth(); if(mshape.extent(0) +// != mshape.extent(1)) +// throw std::runtime_error("All inputs must be square +// matrices"); - size += mshape.extent(0); - } +// size += mshape.extent(0); +// } - // Allocate new buffer - allocator_type allocator(b.allocator().runtime()); - shape::Smooth oshape{size, size}; - layout::Physical olayout(oshape); - auto obuffer = allocator.construct(olayout, 0.0); +// // Allocate new buffer +// allocator_type allocator(b.allocator().runtime()); +// shape::Smooth oshape{size, size}; +// layout::Physical olayout(oshape); +// auto obuffer = allocator.construct(olayout, 0.0); - // Copy values from input into corresponding blocks - std::size_t offset = 0; - for(const auto& matrix : matrices) { - const auto& mbuffer = allocator.rebind(matrix.buffer()); - auto extent = mbuffer.layout().shape().as_smooth().extent(0); - for(std::size_t i = 0; i < extent; ++i) { - for(std::size_t j = 0; j < extent; ++j) { - obuffer->set_elem({offset + i, offset + j}, - mbuffer.get_elem({i, j})); - } - } - offset += extent; - } - return Tensor(oshape, std::move(obuffer)); - } -}; +// // Copy values from input into corresponding blocks +// std::size_t offset = 0; +// for(const auto& matrix : matrices) { +// const auto& mbuffer = allocator.rebind(matrix.buffer()); +// auto extent = mbuffer.layout().shape().as_smooth().extent(0); +// for(std::size_t i = 0; i < extent; ++i) { +// for(std::size_t j = 0; j < extent; ++j) { +// obuffer->set_elem({offset + i, offset + j}, +// mbuffer.get_elem({i, j})); +// } +// } +// offset += extent; +// } +// return Tensor(oshape, std::move(obuffer)); +// } +// }; } // namespace Tensor block_diagonal_matrix(std::vector matrices) { - const auto& buffer0 = matrices[0].buffer(); - BlockDiagonalMatrixKernel kernel; - return floating_point_dispatch(kernel, buffer0, matrices); + throw std::runtime_error("Fix me!"); + // const auto& buffer0 = matrices[0].buffer(); + // BlockDiagonalMatrixKernel kernel; + // return floating_point_dispatch(kernel, buffer0, matrices); } } // namespace tensorwrapper::utilities diff --git a/src/tensorwrapper/utilities/to_json.cpp b/src/tensorwrapper/utilities/to_json.cpp index cd798fd5..f836fb08 100644 --- a/src/tensorwrapper/utilities/to_json.cpp +++ b/src/tensorwrapper/utilities/to_json.cpp @@ -47,9 +47,11 @@ void to_json_(std::ostream& os, const buffer_type& t, offset_vector index) { std::ostream& to_json(std::ostream& os, const Tensor& t) { offset_vector i; - allocator::Contiguous alloc(t.buffer().allocator().runtime()); - const auto& buffer = alloc.rebind(t.buffer()); - to_json_(os, buffer, i); + auto pbuffer_down = dynamic_cast(&t.buffer()); + if(pbuffer_down == nullptr) + throw std::runtime_error( + "to_json only supports tensors with Contiguous buffers"); + to_json_(os, *pbuffer_down, i); return os; } diff --git a/tests/cxx/unit_tests/tensorwrapper/buffer/buffer_base.cpp b/tests/cxx/unit_tests/tensorwrapper/buffer/buffer_base.cpp index 3ef4784c..99a16391 100644 --- a/tests/cxx/unit_tests/tensorwrapper/buffer/buffer_base.cpp +++ b/tests/cxx/unit_tests/tensorwrapper/buffer/buffer_base.cpp @@ -15,7 +15,7 @@ */ #include "../testing/testing.hpp" -#include +#include #include #include @@ -47,7 +47,7 @@ TEST_CASE("BufferBase") { auto scalar_layout = testing::scalar_physical(); auto vector_layout = testing::vector_physical(2); - buffer::Eigen defaulted; + buffer::Contiguous defaulted; BufferBase& defaulted_base = defaulted; BufferBase& scalar_base = scalar; BufferBase& vector_base = vector; @@ -77,7 +77,7 @@ TEST_CASE("BufferBase") { SECTION("operator==") { // Defaulted layout == defaulted layout - REQUIRE(defaulted_base == buffer::Eigen{}); + REQUIRE(defaulted_base == buffer::Contiguous{}); // Defaulted layout != non-defaulted layout REQUIRE_FALSE(defaulted_base == scalar_base); @@ -89,6 +89,6 @@ TEST_CASE("BufferBase") { SECTION("operator!=") { // Just spot check because it negates operator==, which was tested REQUIRE(defaulted_base != scalar_base); - REQUIRE_FALSE(defaulted_base != buffer::Eigen()); + REQUIRE_FALSE(defaulted_base != buffer::Contiguous()); } } diff --git a/tests/cxx/unit_tests/tensorwrapper/buffer/contiguous.cpp b/tests/cxx/unit_tests/tensorwrapper/buffer/contiguous.cpp index fc65df5a..55bbf89b 100644 --- a/tests/cxx/unit_tests/tensorwrapper/buffer/contiguous.cpp +++ b/tests/cxx/unit_tests/tensorwrapper/buffer/contiguous.cpp @@ -15,7 +15,7 @@ */ #include "../testing/testing.hpp" -#include +#include #include using namespace tensorwrapper; diff --git a/tests/cxx/unit_tests/tensorwrapper/buffer/contraction_planner.cpp b/tests/cxx/unit_tests/tensorwrapper/buffer/contraction_planner.cpp index 47a26d2f..53299bba 100644 --- a/tests/cxx/unit_tests/tensorwrapper/buffer/contraction_planner.cpp +++ b/tests/cxx/unit_tests/tensorwrapper/buffer/contraction_planner.cpp @@ -16,7 +16,6 @@ #include "../testing/testing.hpp" #include -#include using namespace tensorwrapper; using namespace buffer; diff --git a/tests/cxx/unit_tests/tensorwrapper/buffer/detail_/eigen_tensor.cpp b/tests/cxx/unit_tests/tensorwrapper/buffer/detail_/eigen_tensor.cpp deleted file mode 100644 index e7f565e6..00000000 --- a/tests/cxx/unit_tests/tensorwrapper/buffer/detail_/eigen_tensor.cpp +++ /dev/null @@ -1,789 +0,0 @@ -/* - * Copyright 2025 NWChemEx-Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "../../testing/testing.hpp" -#include -#include - -using namespace tensorwrapper; -using namespace testing; - -using buffer::detail_::hash_utilities::hash_input; - -template -using pimpl_type = buffer::detail_::EigenTensor; -using shape_type = shape::Smooth; - -// Should be the same regardless of template parameters -using label_type = typename pimpl_type::label_type; -using hash_type = typename pimpl_type::hash_type; - -TEMPLATE_LIST_TEST_CASE("EigenTensor", "", types::floating_point_types) { - pimpl_type scalar(shape_type{}); - scalar.set_elem({}, 1.0); - - pimpl_type vector(shape_type{2}); - vector.set_elem({0}, 1.0); - vector.set_elem({1}, 2.0); - - pimpl_type matrix(shape_type{2, 2}); - matrix.set_elem({0, 0}, 1.0); - matrix.set_elem({0, 1}, 2.0); - matrix.set_elem({1, 0}, 3.0); - matrix.set_elem({1, 1}, 4.0); - - pimpl_type tensor(shape_type{2, 2, 2}); - tensor.set_elem({0, 0, 0}, 1.0); - tensor.set_elem({0, 0, 1}, 2.0); - tensor.set_elem({0, 1, 0}, 3.0); - tensor.set_elem({0, 1, 1}, 4.0); - tensor.set_elem({1, 0, 0}, 5.0); - tensor.set_elem({1, 0, 1}, 6.0); - tensor.set_elem({1, 1, 0}, 7.0); - tensor.set_elem({1, 1, 1}, 8.0); - - // ------------------------------------------------------------------------- - // -- Public methods - // ------------------------------------------------------------------------- - - SECTION("operator==") { - SECTION("Same State") { - pimpl_type scalar2(scalar); - REQUIRE(scalar2 == scalar); - } - - SECTION("Different Value") { - pimpl_type scalar2(scalar); - scalar2.set_elem({}, 42.0); - REQUIRE_FALSE(scalar2 == scalar); - // Ensure hash is recalculated after change - scalar2.set_elem({}, 1.0); - REQUIRE(scalar2 == scalar); - } - - SECTION("Different Extents") { - pimpl_type vector2(shape_type{1}); - vector.set_elem({0}, 1.0); - REQUIRE_FALSE(vector2 == vector); - } - - if constexpr(types::is_uncertain_v) { - SECTION("Check Error Sources Match") { - pimpl_type uscalar(shape_type{}); - uscalar.set_elem({}, TestType(1.0, 0.0)); - pimpl_type uscalar2(uscalar); - REQUIRE(uscalar2 == uscalar); - } - } - } - - SECTION("get_hash") { - SECTION("scalar") { - hash_type scalar_hash = scalar.get_hash(); - - hash_type corr{std::as_const(scalar).rank()}; - hash_input(corr, std::as_const(scalar).get_elem({})); - REQUIRE(scalar_hash == corr); - } - SECTION("vector") { - hash_type vector_hash = vector.get_hash(); - - using buffer::detail_::hash_utilities::hash_input; - hash_type corr{std::as_const(vector).rank()}; - hash_input(corr, std::as_const(vector).extent(0)); - hash_input(corr, std::as_const(vector).get_elem({0})); - hash_input(corr, std::as_const(vector).get_elem({1})); - REQUIRE(vector_hash == corr); - } - } - - // ------------------------------------------------------------------------- - // -- Protected methods - // ------------------------------------------------------------------------- - - SECTION("clone_") { - REQUIRE(scalar.clone()->are_equal(scalar)); - REQUIRE(vector.clone()->are_equal(vector)); - REQUIRE(matrix.clone()->are_equal(matrix)); - REQUIRE(tensor.clone()->are_equal(tensor)); - } - - SECTION("rank_") { - REQUIRE(scalar.rank() == 0); - REQUIRE(vector.rank() == 1); - REQUIRE(matrix.rank() == 2); - REQUIRE(tensor.rank() == 3); - - pimpl_type defaulted; - REQUIRE(defaulted.rank() == 6); - } - - SECTION("size_") { - REQUIRE(scalar.size() == 1); - REQUIRE(vector.size() == 2); - REQUIRE(matrix.size() == 4); - REQUIRE(tensor.size() == 8); - } - - SECTION("extent_") { - REQUIRE(vector.extent(0) == 2); - - REQUIRE(matrix.extent(0) == 2); - REQUIRE(matrix.extent(1) == 2); - - REQUIRE(tensor.extent(0) == 2); - REQUIRE(tensor.extent(1) == 2); - REQUIRE(tensor.extent(2) == 2); - } - - SECTION("get_mutable_data_()") { - SECTION("accessing") { - REQUIRE(*scalar.get_mutable_data() == TestType{1.0}); - - REQUIRE(*vector.get_mutable_data() == TestType{1.0}); - REQUIRE(*(vector.get_mutable_data() + 1) == TestType{2.0}); - - REQUIRE(*matrix.get_mutable_data() == TestType{1.0}); - REQUIRE(*(matrix.get_mutable_data() + 1) == TestType{2.0}); - REQUIRE(*(matrix.get_mutable_data() + 2) == TestType{3.0}); - REQUIRE(*(matrix.get_mutable_data() + 3) == TestType{4.0}); - - REQUIRE(*tensor.get_mutable_data() == TestType{1.0}); - REQUIRE(*(tensor.get_mutable_data() + 1) == TestType{2.0}); - REQUIRE(*(tensor.get_mutable_data() + 2) == TestType{3.0}); - REQUIRE(*(tensor.get_mutable_data() + 3) == TestType{4.0}); - REQUIRE(*(tensor.get_mutable_data() + 4) == TestType{5.0}); - REQUIRE(*(tensor.get_mutable_data() + 5) == TestType{6.0}); - REQUIRE(*(tensor.get_mutable_data() + 6) == TestType{7.0}); - REQUIRE(*(tensor.get_mutable_data() + 7) == TestType{8.0}); - } - - SECTION("comparison behavior") { - // Initial state is the same - pimpl_type s(shape_type{}); - s.set_elem({}, 1.0); - REQUIRE(s == scalar); - // Still the same, but the normal hash recalculation flag would be - // reset after this comparison. - auto* pdata = s.get_mutable_data(); - REQUIRE(s == scalar); - // Changes state. The internal hash will have to be recalculated for - // the comparison to be false, ensuring that the hash caching has - // been turned off. - pdata[0] = 2.0; - REQUIRE_FALSE(s == scalar); - } - } - - SECTION("get_immutable_data_() const") { - REQUIRE(*std::as_const(scalar).get_immutable_data() == TestType{1.0}); - - REQUIRE(*std::as_const(vector).get_immutable_data() == TestType{1.0}); - REQUIRE(*(std::as_const(vector).get_immutable_data() + 1) == - TestType{2.0}); - - REQUIRE(*std::as_const(matrix).get_immutable_data() == TestType{1.0}); - REQUIRE(*(std::as_const(matrix).get_immutable_data() + 1) == - TestType{2.0}); - REQUIRE(*(std::as_const(matrix).get_immutable_data() + 2) == - TestType{3.0}); - REQUIRE(*(std::as_const(matrix).get_immutable_data() + 3) == - TestType{4.0}); - - REQUIRE(*std::as_const(tensor).get_immutable_data() == TestType{1.0}); - REQUIRE(*(std::as_const(tensor).get_immutable_data() + 1) == - TestType{2.0}); - REQUIRE(*(std::as_const(tensor).get_immutable_data() + 2) == - TestType{3.0}); - REQUIRE(*(std::as_const(tensor).get_immutable_data() + 3) == - TestType{4.0}); - REQUIRE(*(std::as_const(tensor).get_immutable_data() + 4) == - TestType{5.0}); - REQUIRE(*(std::as_const(tensor).get_immutable_data() + 5) == - TestType{6.0}); - REQUIRE(*(std::as_const(tensor).get_immutable_data() + 6) == - TestType{7.0}); - REQUIRE(*(std::as_const(tensor).get_immutable_data() + 7) == - TestType{8.0}); - } - - SECTION("get_elem_() const") { - REQUIRE(std::as_const(scalar).get_elem({}) == TestType{1.0}); - - REQUIRE(std::as_const(vector).get_elem({0}) == TestType{1.0}); - REQUIRE(std::as_const(vector).get_elem({1}) == TestType{2.0}); - - REQUIRE(std::as_const(matrix).get_elem({0, 0}) == TestType{1.0}); - REQUIRE(std::as_const(matrix).get_elem({0, 1}) == TestType{2.0}); - REQUIRE(std::as_const(matrix).get_elem({1, 0}) == TestType{3.0}); - REQUIRE(std::as_const(matrix).get_elem({1, 1}) == TestType{4.0}); - - REQUIRE(std::as_const(tensor).get_elem({0, 0, 0}) == TestType{1.0}); - REQUIRE(std::as_const(tensor).get_elem({0, 0, 1}) == TestType{2.0}); - REQUIRE(std::as_const(tensor).get_elem({0, 1, 0}) == TestType{3.0}); - REQUIRE(std::as_const(tensor).get_elem({0, 1, 1}) == TestType{4.0}); - REQUIRE(std::as_const(tensor).get_elem({1, 0, 0}) == TestType{5.0}); - REQUIRE(std::as_const(tensor).get_elem({1, 0, 1}) == TestType{6.0}); - REQUIRE(std::as_const(tensor).get_elem({1, 1, 0}) == TestType{7.0}); - REQUIRE(std::as_const(tensor).get_elem({1, 1, 1}) == TestType{8.0}); - } - - SECTION("set_elem_()") { - scalar.set_elem({}, TestType{2.0}); - REQUIRE(scalar.get_elem({}) == TestType{2.0}); - - vector.set_elem({0}, TestType{2.0}); - REQUIRE(vector.get_elem({0}) == TestType{2.0}); - REQUIRE(vector.get_elem({1}) == TestType{2.0}); - } - - SECTION("get_data() const") { - REQUIRE(std::as_const(scalar).get_data(0) == TestType{1.0}); - - REQUIRE(std::as_const(vector).get_data(0) == TestType{1.0}); - REQUIRE(std::as_const(vector).get_data(1) == TestType{2.0}); - - REQUIRE(std::as_const(matrix).get_data(0) == TestType{1.0}); - REQUIRE(std::as_const(matrix).get_data(1) == TestType{2.0}); - REQUIRE(std::as_const(matrix).get_data(2) == TestType{3.0}); - REQUIRE(std::as_const(matrix).get_data(3) == TestType{4.0}); - - REQUIRE(std::as_const(tensor).get_data(0) == TestType{1.0}); - REQUIRE(std::as_const(tensor).get_data(1) == TestType{2.0}); - REQUIRE(std::as_const(tensor).get_data(2) == TestType{3.0}); - REQUIRE(std::as_const(tensor).get_data(3) == TestType{4.0}); - REQUIRE(std::as_const(tensor).get_data(4) == TestType{5.0}); - REQUIRE(std::as_const(tensor).get_data(5) == TestType{6.0}); - REQUIRE(std::as_const(tensor).get_data(6) == TestType{7.0}); - REQUIRE(std::as_const(tensor).get_data(7) == TestType{8.0}); - } - - SECTION("set_data_()") { - scalar.set_data(0, TestType{2.0}); - REQUIRE(scalar.get_data(0) == TestType{2.0}); - - vector.set_data(0, TestType{2.0}); - REQUIRE(vector.get_data(0) == TestType{2.0}); - REQUIRE(vector.get_data(1) == TestType{2.0}); - } - - SECTION("fill_()") { - vector.fill(TestType{42.0}); - REQUIRE(vector.get_data(0) == TestType(42.0)); - REQUIRE(vector.get_data(1) == TestType(42.0)); - } - - SECTION("copy_()") { - auto data = std::vector(2, TestType(42.0)); - vector.copy(data); - REQUIRE(vector.get_data(0) == TestType(42.0)); - REQUIRE(vector.get_data(1) == TestType(42.0)); - } - - SECTION("are_equal_") { - pimpl_type scalar2(scalar); - REQUIRE(scalar2.are_equal(scalar)); - - scalar2.set_elem({}, 42.0); - REQUIRE_FALSE(scalar2.are_equal(scalar)); - } - - SECTION("to_string_") { - std::stringstream sone; - sone << TestType{1.0}; - - std::stringstream stwo; - stwo << TestType{2.0}; - - REQUIRE(scalar.to_string() == sone.str()); - REQUIRE(vector.to_string() == sone.str() + " " + stwo.str()); - } - - SECTION("add_to_stream_") { - std::stringstream ss, ss_corr; - ss << std::fixed << std::setprecision(4); - scalar.add_to_stream(ss); - ss_corr << std::fixed << std::setprecision(4); - ss_corr << TestType{1.0}; - REQUIRE(ss.str() == ss_corr.str()); - REQUIRE_FALSE(ss.str() == scalar.to_string()); - } - - SECTION("addition_assignment_") { - SECTION("scalar") { - pimpl_type output; - label_type s(""); - output.addition_assignment(s, s, s, scalar, scalar); - - pimpl_type corr(shape_type{}); - corr.set_elem({}, 2.0); - REQUIRE(output == corr); - } - - SECTION("tensor : permute none") { - pimpl_type output; - label_type o("i,j,k"); - label_type l("i,j,k"); - label_type r("i,j,k"); - - output.addition_assignment(o, l, r, tensor, tensor); - - pimpl_type corr(shape_type{2, 2, 2}); - corr.set_elem({0, 0, 0}, 2.0); - corr.set_elem({0, 0, 1}, 4.0); - corr.set_elem({0, 1, 0}, 6.0); - corr.set_elem({0, 1, 1}, 8.0); - corr.set_elem({1, 0, 0}, 10.0); - corr.set_elem({1, 0, 1}, 12.0); - corr.set_elem({1, 1, 0}, 14.0); - corr.set_elem({1, 1, 1}, 16.0); - REQUIRE(output == corr); - } - - SECTION("tensor : permute LHS") { - pimpl_type output; - label_type o("k,j,i"); - label_type l("i,j,k"); - label_type r("k,j,i"); - - output.addition_assignment(o, l, r, tensor, tensor); - - pimpl_type corr(shape_type{2, 2, 2}); - corr.set_elem({0, 0, 0}, 2.0); - corr.set_elem({0, 0, 1}, 7.0); - corr.set_elem({0, 1, 0}, 6.0); - corr.set_elem({0, 1, 1}, 11.0); - corr.set_elem({1, 0, 0}, 7.0); - corr.set_elem({1, 0, 1}, 12.0); - corr.set_elem({1, 1, 0}, 11.0); - corr.set_elem({1, 1, 1}, 16.0); - REQUIRE(output == corr); - } - - SECTION("tensor : permute RHS") { - pimpl_type output; - label_type o("k,j,i"); - label_type l("k,j,i"); - label_type r("i,j,k"); - - output.addition_assignment(o, l, r, tensor, tensor); - - pimpl_type corr(shape_type{2, 2, 2}); - corr.set_elem({0, 0, 0}, 2.0); - corr.set_elem({0, 0, 1}, 7.0); - corr.set_elem({0, 1, 0}, 6.0); - corr.set_elem({0, 1, 1}, 11.0); - corr.set_elem({1, 0, 0}, 7.0); - corr.set_elem({1, 0, 1}, 12.0); - corr.set_elem({1, 1, 0}, 11.0); - corr.set_elem({1, 1, 1}, 16.0); - REQUIRE(output == corr); - } - - SECTION("tensor : permute all") { - pimpl_type output; - label_type o("k,j,i"); - label_type l("i,j,k"); - label_type r("j,i,k"); - - output.addition_assignment(o, l, r, tensor, tensor); - - pimpl_type corr(shape_type{2, 2, 2}); - corr.set_elem({0, 0, 0}, 2.0); - corr.set_elem({0, 0, 1}, 8.0); - corr.set_elem({0, 1, 0}, 8.0); - corr.set_elem({0, 1, 1}, 14.0); - corr.set_elem({1, 0, 0}, 4.0); - corr.set_elem({1, 0, 1}, 10.0); - corr.set_elem({1, 1, 0}, 10.0); - corr.set_elem({1, 1, 1}, 16.0); - REQUIRE(output == corr); - } - } - - SECTION("subtraction_assignment_") { - SECTION("scalar") { - pimpl_type output; - label_type s(""); - output.subtraction_assignment(s, s, s, scalar, scalar); - - pimpl_type corr(shape_type{}); - corr.set_elem({}, 0.0); - REQUIRE(output == corr); - } - - SECTION("tensor : permute none") { - pimpl_type output; - label_type o("i,j,k"); - label_type l("i,j,k"); - label_type r("i,j,k"); - - output.subtraction_assignment(o, l, r, tensor, tensor); - - pimpl_type corr(shape_type{2, 2, 2}); - corr.set_elem({0, 0, 0}, 0.0); - corr.set_elem({0, 0, 1}, 0.0); - corr.set_elem({0, 1, 0}, 0.0); - corr.set_elem({0, 1, 1}, 0.0); - corr.set_elem({1, 0, 0}, 0.0); - corr.set_elem({1, 0, 1}, 0.0); - corr.set_elem({1, 1, 0}, 0.0); - corr.set_elem({1, 1, 1}, 0.0); - REQUIRE(output == corr); - } - - SECTION("tensor : permute LHS") { - pimpl_type output; - label_type o("k,j,i"); - label_type l("i,j,k"); - label_type r("k,j,i"); - - output.subtraction_assignment(o, l, r, tensor, tensor); - - pimpl_type corr(shape_type{2, 2, 2}); - corr.set_elem({0, 0, 0}, 0.0); - corr.set_elem({0, 0, 1}, 3.0); - corr.set_elem({0, 1, 0}, 0.0); - corr.set_elem({0, 1, 1}, 3.0); - corr.set_elem({1, 0, 0}, -3.0); - corr.set_elem({1, 0, 1}, 0.0); - corr.set_elem({1, 1, 0}, -3.0); - corr.set_elem({1, 1, 1}, 0.0); - REQUIRE(output == corr); - } - - SECTION("tensor : permute RHS") { - pimpl_type output; - label_type o("k,j,i"); - label_type l("k,j,i"); - label_type r("i,j,k"); - - output.subtraction_assignment(o, l, r, tensor, tensor); - - pimpl_type corr(shape_type{2, 2, 2}); - corr.set_elem({0, 0, 0}, 0.0); - corr.set_elem({0, 0, 1}, -3.0); - corr.set_elem({0, 1, 0}, 0.0); - corr.set_elem({0, 1, 1}, -3.0); - corr.set_elem({1, 0, 0}, 3.0); - corr.set_elem({1, 0, 1}, 0.0); - corr.set_elem({1, 1, 0}, 3.0); - corr.set_elem({1, 1, 1}, 0.0); - REQUIRE(output == corr); - } - - SECTION("tensor : permute all") { - pimpl_type output; - label_type o("k,j,i"); - label_type l("i,j,k"); - label_type r("j,i,k"); - - output.subtraction_assignment(o, l, r, tensor, tensor); - - pimpl_type corr(shape_type{2, 2, 2}); - corr.set_elem({0, 0, 0}, 0.0); - corr.set_elem({0, 0, 1}, 2.0); - corr.set_elem({0, 1, 0}, -2.0); - corr.set_elem({0, 1, 1}, 0.0); - corr.set_elem({1, 0, 0}, 0.0); - corr.set_elem({1, 0, 1}, 2.0); - corr.set_elem({1, 1, 0}, -2.0); - corr.set_elem({1, 1, 1}, 0.0); - REQUIRE(output == corr); - } - } - - SECTION("hadamard_assignment_") { - SECTION("scalar") { - pimpl_type output; - label_type s(""); - output.hadamard_assignment(s, s, s, scalar, scalar); - - pimpl_type corr(shape_type{}); - corr.set_elem({}, 1.0); - REQUIRE(output == corr); - } - - SECTION("tensor : permute none") { - pimpl_type output; - label_type o("i,j,k"); - label_type l("i,j,k"); - label_type r("i,j,k"); - - output.hadamard_assignment(o, l, r, tensor, tensor); - - pimpl_type corr(shape_type{2, 2, 2}); - corr.set_elem({0, 0, 0}, 1.0); - corr.set_elem({0, 0, 1}, 4.0); - corr.set_elem({0, 1, 0}, 9.0); - corr.set_elem({0, 1, 1}, 16.0); - corr.set_elem({1, 0, 0}, 25.0); - corr.set_elem({1, 0, 1}, 36.0); - corr.set_elem({1, 1, 0}, 49.0); - corr.set_elem({1, 1, 1}, 64.0); - REQUIRE(output == corr); - } - - SECTION("tensor : permute LHS") { - pimpl_type output; - label_type o("k,j,i"); - label_type l("i,j,k"); - label_type r("k,j,i"); - - output.hadamard_assignment(o, l, r, tensor, tensor); - - pimpl_type corr(shape_type{2, 2, 2}); - corr.set_elem({0, 0, 0}, 1.0); - corr.set_elem({0, 0, 1}, 10.0); - corr.set_elem({0, 1, 0}, 9.0); - corr.set_elem({0, 1, 1}, 28.0); - corr.set_elem({1, 0, 0}, 10.0); - corr.set_elem({1, 0, 1}, 36.0); - corr.set_elem({1, 1, 0}, 28.0); - corr.set_elem({1, 1, 1}, 64.0); - REQUIRE(output == corr); - } - - SECTION("tensor : permute RHS") { - pimpl_type output; - label_type o("k,j,i"); - label_type l("k,j,i"); - label_type r("i,j,k"); - - output.hadamard_assignment(o, l, r, tensor, tensor); - - pimpl_type corr(shape_type{2, 2, 2}); - corr.set_elem({0, 0, 0}, 1.0); - corr.set_elem({0, 0, 1}, 10.0); - corr.set_elem({0, 1, 0}, 9.0); - corr.set_elem({0, 1, 1}, 28.0); - corr.set_elem({1, 0, 0}, 10.0); - corr.set_elem({1, 0, 1}, 36.0); - corr.set_elem({1, 1, 0}, 28.0); - corr.set_elem({1, 1, 1}, 64.0); - REQUIRE(output == corr); - } - - SECTION("tensor : permute all") { - pimpl_type output; - label_type o("k,j,i"); - label_type l("i,j,k"); - label_type r("j,i,k"); - - output.hadamard_assignment(o, l, r, tensor, tensor); - - pimpl_type corr(shape_type{2, 2, 2}); - corr.set_elem({0, 0, 0}, 1.0); - corr.set_elem({0, 0, 1}, 15.0); - corr.set_elem({0, 1, 0}, 15.0); - corr.set_elem({0, 1, 1}, 49.0); - corr.set_elem({1, 0, 0}, 4.0); - corr.set_elem({1, 0, 1}, 24.0); - corr.set_elem({1, 1, 0}, 24.0); - corr.set_elem({1, 1, 1}, 64.0); - REQUIRE(output == corr); - } - } - - SECTION("contraction_assignment") { - SECTION("ijk,ijk->") { - pimpl_type output; - - label_type o(""); - label_type l("i,j,k"); - label_type r("i,j,k"); - shape_type oshape{}; - output.contraction_assignment(o, l, r, oshape, tensor, tensor); - - pimpl_type corr(oshape); - corr.set_elem({}, 204.0); - REQUIRE(output == corr); - } - - SECTION("ijk,jik->") { - pimpl_type output; - - label_type o(""); - label_type l("i,j,k"); - label_type r("j,i,k"); - shape_type oshape{}; - output.contraction_assignment(o, l, r, oshape, tensor, tensor); - - pimpl_type corr(oshape); - corr.set_elem({}, 196.0); - REQUIRE(output == corr); - } - - SECTION("ijk,jkl->il") { - pimpl_type output; - - label_type o("i,l"); - label_type l("i,j,k"); - label_type r("j,k,l"); - shape_type oshape{2, 2}; - output.contraction_assignment(o, l, r, oshape, tensor, tensor); - - pimpl_type corr(oshape); - corr.set_elem({0, 0}, 50.0); - corr.set_elem({0, 1}, 60.0); - corr.set_elem({1, 0}, 114.0); - corr.set_elem({1, 1}, 140.0); - REQUIRE(output == corr); - } - - SECTION("ijk,jlk->il") { - pimpl_type output; - - label_type o("i,l"); - label_type l("i,j,k"); - label_type r("j,l,k"); - shape_type oshape{2, 2}; - output.contraction_assignment(o, l, r, oshape, tensor, tensor); - - pimpl_type corr(oshape); - corr.set_elem({0, 0}, 44.0); - corr.set_elem({0, 1}, 64.0); - corr.set_elem({1, 0}, 100.0); - corr.set_elem({1, 1}, 152.0); - REQUIRE(output == corr); - } - - SECTION("ijk,jlk->li") { - pimpl_type output; - - label_type o("l,i"); - label_type l("i,j,k"); - label_type r("j,l,k"); - shape_type oshape{2, 2}; - output.contraction_assignment(o, l, r, oshape, tensor, tensor); - - pimpl_type corr(oshape); - corr.set_elem({0, 0}, 44.0); - corr.set_elem({0, 1}, 100.0); - corr.set_elem({1, 0}, 64.0); - corr.set_elem({1, 1}, 152.0); - REQUIRE(output == corr); - } - - SECTION("ijk,ljm->iklm") { - pimpl_type output; - - label_type o("i,k,l,m"); - label_type l("i,j,k"); - label_type r("l,j,m"); - shape_type oshape{2, 2, 2, 2}; - output.contraction_assignment(o, l, r, oshape, tensor, tensor); - - pimpl_type corr(oshape); - corr.set_elem({0, 0, 0, 0}, 10.0); - corr.set_elem({0, 0, 0, 1}, 14.0); - corr.set_elem({0, 0, 1, 0}, 26.0); - corr.set_elem({0, 0, 1, 1}, 30.0); - corr.set_elem({0, 1, 0, 0}, 14.0); - corr.set_elem({0, 1, 0, 1}, 20.0); - corr.set_elem({0, 1, 1, 0}, 38.0); - corr.set_elem({0, 1, 1, 1}, 44.0); - corr.set_elem({1, 0, 0, 0}, 26.0); - corr.set_elem({1, 0, 0, 1}, 38.0); - corr.set_elem({1, 0, 1, 0}, 74.0); - corr.set_elem({1, 0, 1, 1}, 86.0); - corr.set_elem({1, 1, 0, 0}, 30.0); - corr.set_elem({1, 1, 0, 1}, 44.0); - corr.set_elem({1, 1, 1, 0}, 86.0); - corr.set_elem({1, 1, 1, 1}, 100.0); - - REQUIRE(output == corr); - } - - SECTION("ij,jkl->ikl") { - pimpl_type output; - - label_type o("i,k,l"); - label_type l("i,j"); - label_type r("j,k,l"); - shape_type oshape{2, 2, 2}; - output.contraction_assignment(o, l, r, oshape, matrix, tensor); - - pimpl_type corr(oshape); - corr.set_elem({0, 0, 0}, 11.0); - corr.set_elem({0, 0, 1}, 14.0); - corr.set_elem({0, 1, 0}, 17.0); - corr.set_elem({0, 1, 1}, 20.0); - corr.set_elem({1, 0, 0}, 23.0); - corr.set_elem({1, 0, 1}, 30.0); - corr.set_elem({1, 1, 0}, 37.0); - corr.set_elem({1, 1, 1}, 44.0); - - REQUIRE(corr == output); - } - } - - SECTION("permute_assignment") { - pimpl_type output; - - SECTION("matrix : no permute") { - label_type o("i,j"); - label_type i("i,j"); - output.permute_assignment(o, i, matrix); - - REQUIRE(output == matrix); - } - - SECTION("matrix : permute") { - label_type o("i,j"); - label_type i("j,i"); - output.permute_assignment(o, i, matrix); - - pimpl_type corr(shape_type{2, 2}); - corr.set_elem({0, 0}, 1.0); - corr.set_elem({0, 1}, 3.0); - corr.set_elem({1, 0}, 2.0); - corr.set_elem({1, 1}, 4.0); - REQUIRE(output == corr); - } - } - - SECTION("scalar_multiplication") { - pimpl_type output; - - SECTION("matrix : no permute") { - label_type o("i,j"); - label_type i("i,j"); - output.scalar_multiplication(o, i, 2.0, matrix); - - pimpl_type corr(shape_type{2, 2}); - corr.set_elem({0, 0}, 2.0); - corr.set_elem({0, 1}, 4.0); - corr.set_elem({1, 0}, 6.0); - corr.set_elem({1, 1}, 8.0); - - REQUIRE(output == corr); - } - - SECTION("matrix : permute") { - label_type o("i,j"); - label_type i("j,i"); - output.scalar_multiplication(o, i, 2.0, matrix); - - pimpl_type corr(shape_type{2, 2}); - corr.set_elem({0, 0}, 2.0); - corr.set_elem({0, 1}, 6.0); - corr.set_elem({1, 0}, 4.0); - corr.set_elem({1, 1}, 8.0); - REQUIRE(output == corr); - } - } -} diff --git a/tests/cxx/unit_tests/tensorwrapper/dsl/dsl.cpp b/tests/cxx/unit_tests/tensorwrapper/dsl/dsl.cpp index 6e2b560a..0a740969 100644 --- a/tests/cxx/unit_tests/tensorwrapper/dsl/dsl.cpp +++ b/tests/cxx/unit_tests/tensorwrapper/dsl/dsl.cpp @@ -94,9 +94,9 @@ TEST_CASE("DSLr : buffer::Eigen") { auto& scalar2 = *pscalar2; auto& corr = *pcorr; - scalar0.set_data(0, 1.0); - scalar1.set_data(0, 2.0); - scalar2.set_data(0, 3.0); + scalar0.set_elem({}, 1.0); + scalar1.set_elem({}, 2.0); + scalar2.set_elem({}, 3.0); SECTION("assignment") { SECTION("scalar") { diff --git a/tests/cxx/unit_tests/tensorwrapper/dsl/pairwise_parser.cpp b/tests/cxx/unit_tests/tensorwrapper/dsl/pairwise_parser.cpp index 40a86519..43eb907c 100644 --- a/tests/cxx/unit_tests/tensorwrapper/dsl/pairwise_parser.cpp +++ b/tests/cxx/unit_tests/tensorwrapper/dsl/pairwise_parser.cpp @@ -134,9 +134,9 @@ TEST_CASE("PairwiseParser : buffer::Eigen") { auto& scalar2 = *pscalar2; auto& corr = *pcorr; - scalar0.set_data(0, 1.0); - scalar1.set_data(0, 2.0); - scalar2.set_data(0, 3.0); + scalar0.set_elem({}, 1.0); + scalar1.set_elem({}, 2.0); + scalar2.set_elem({}, 3.0); dsl::PairwiseParser p; diff --git a/tests/cxx/unit_tests/tensorwrapper/operations/approximately_equal.cpp b/tests/cxx/unit_tests/tensorwrapper/operations/approximately_equal.cpp index e9858dab..d73a1e4a 100644 --- a/tests/cxx/unit_tests/tensorwrapper/operations/approximately_equal.cpp +++ b/tests/cxx/unit_tests/tensorwrapper/operations/approximately_equal.cpp @@ -32,16 +32,16 @@ using namespace operations; TEMPLATE_LIST_TEST_CASE("approximately_equal", "", types::floating_point_types) { auto pscalar = testing::eigen_scalar(); - pscalar->set_data(0, 42.0); + pscalar->set_elem({}, 42.0); auto pvector = testing::eigen_vector(2); - pvector->set_data(0, 1.23); - pvector->set_data(1, 2.34); + pvector->set_elem({0}, 1.23); + pvector->set_elem({1}, 2.34); auto pscalar2 = testing::eigen_scalar(); - pscalar2->set_data(0, 42.0); + pscalar2->set_elem({}, 42.0); auto pvector2 = testing::eigen_vector(2); - pvector2->set_data(0, 1.23); - pvector2->set_data(1, 2.34); + pvector2->set_elem({0}, 1.23); + pvector2->set_elem({1}, 2.34); shape::Smooth s0{}; shape::Smooth s1{2}; @@ -66,8 +66,8 @@ TEMPLATE_LIST_TEST_CASE("approximately_equal", "", SECTION("Differ by more than default tolerance") { double value = 1e-1; - pscalar2->set_data(0, 42.0 + value); - pvector2->set_data(0, 1.23 + value); + pscalar2->set_elem({}, 42.0 + value); + pvector2->set_elem({0}, 1.23 + value); Tensor scalar2(s0, std::move(pscalar2)); Tensor vector2(s1, std::move(pvector2)); REQUIRE_FALSE(approximately_equal(scalar, scalar2)); @@ -78,8 +78,8 @@ TEMPLATE_LIST_TEST_CASE("approximately_equal", "", SECTION("Differ by less than default tolerance") { double value = 1e-17; - pscalar2->set_data(0, 42.0 + value); - pvector2->set_data(0, 1.23 + value); + pscalar2->set_elem({}, 42.0 + value); + pvector2->set_elem({0}, 1.23 + value); Tensor scalar2(s0, std::move(pscalar2)); Tensor vector2(s1, std::move(pvector2)); REQUIRE(approximately_equal(scalar, scalar2)); @@ -90,8 +90,8 @@ TEMPLATE_LIST_TEST_CASE("approximately_equal", "", SECTION("Differ by more than provided tolerance") { float value = 1e-1; - pscalar2->set_data(0, 43.0); - pvector2->set_data(0, 2.23); + pscalar2->set_elem({}, 43.0); + pvector2->set_elem({0}, 2.23); Tensor scalar2(s0, std::move(pscalar2)); Tensor vector2(s1, std::move(pvector2)); REQUIRE_FALSE(approximately_equal(scalar, scalar2, value)); @@ -102,8 +102,8 @@ TEMPLATE_LIST_TEST_CASE("approximately_equal", "", SECTION("Differ by less than provided tolerance") { double value = 1e-10; - pscalar2->set_data(0, 42.0 + value); - pvector2->set_data(0, 1.23 + value); + pscalar2->set_elem({}, 42.0 + value); + pvector2->set_elem({0}, 1.23 + value); Tensor scalar2(s0, std::move(pscalar2)); Tensor vector2(s1, std::move(pvector2)); REQUIRE(approximately_equal(scalar, scalar2, 1e-1)); diff --git a/tests/cxx/unit_tests/tensorwrapper/tensor/detail_/tensor_factory.cpp b/tests/cxx/unit_tests/tensorwrapper/tensor/detail_/tensor_factory.cpp index fd4e3f63..7eb59543 100644 --- a/tests/cxx/unit_tests/tensorwrapper/tensor/detail_/tensor_factory.cpp +++ b/tests/cxx/unit_tests/tensorwrapper/tensor/detail_/tensor_factory.cpp @@ -51,8 +51,8 @@ TEST_CASE("TensorFactory") { layout::Physical physical(shape, g, sparsity); auto pphysical = physical.clone_as(); - allocator::Eigen alloc(rv); - auto pbuffer = alloc.allocate(std::move(pphysical)); + std::vector data{0.0}; + auto pbuffer = std::make_unique(data, shape); auto buffer_address = pbuffer.get(); SECTION("default_logical_symmetry") { @@ -88,11 +88,6 @@ TEST_CASE("TensorFactory") { REQUIRE(result->are_equal(physical)); } - SECTION("default_allocator") { - auto result = TensorFactory::default_allocator(physical, rv); - REQUIRE(result->are_equal(alloc)); - } - SECTION("construct(input)") { SECTION("Can create default pimpl") { auto pdefaulted = TensorFactory::construct(TensorInput{}); diff --git a/tests/cxx/unit_tests/tensorwrapper/tensor/detail_/tensor_input.cpp b/tests/cxx/unit_tests/tensorwrapper/tensor/detail_/tensor_input.cpp index d7657add..ae3983c2 100644 --- a/tests/cxx/unit_tests/tensorwrapper/tensor/detail_/tensor_input.cpp +++ b/tests/cxx/unit_tests/tensorwrapper/tensor/detail_/tensor_input.cpp @@ -15,6 +15,7 @@ */ #include "../../testing/testing.hpp" +#include using namespace tensorwrapper; @@ -36,8 +37,9 @@ TEST_CASE("TensorInput") { sparsity::Pattern sparsity(2); layout::Logical logical(shape, g, sparsity); layout::Physical physical(shape, g, sparsity); - allocator::Eigen alloc(rv); - auto pbuffer = alloc.construct(42.0); + + std::vector data{42.0}; + auto pbuffer = std::make_unique(data, shape::Smooth{}); auto& buffer = *pbuffer; detail_::TensorInput defaulted; @@ -210,67 +212,38 @@ TEST_CASE("TensorInput") { REQUIRE(i.has_physical_layout()); } - SECTION("Allocator (by value)") { - detail_::TensorInput i(physical, alloc, logical); - REQUIRE(i.m_pshape == nullptr); - REQUIRE(i.m_psymmetry == nullptr); - REQUIRE(i.m_psparsity == nullptr); - REQUIRE(i.m_plogical->are_equal(logical)); - REQUIRE(i.m_pphysical->are_equal(physical)); - REQUIRE(i.m_palloc->are_equal(alloc)); - REQUIRE(i.m_pbuffer == nullptr); - REQUIRE(i.m_rv == rv); - - REQUIRE(i.has_allocator()); - } - - SECTION("Allocator (by pointer)") { - auto palloc = alloc.clone(); - auto alloc_address = palloc.get(); - detail_::TensorInput i(physical, std::move(palloc), logical); - REQUIRE(i.m_pshape == nullptr); - REQUIRE(i.m_psymmetry == nullptr); - REQUIRE(i.m_psparsity == nullptr); - REQUIRE(i.m_plogical->are_equal(logical)); - REQUIRE(i.m_pphysical->are_equal(physical)); - REQUIRE(i.m_palloc->are_equal(alloc)); - REQUIRE(i.m_palloc.get() == alloc_address); - REQUIRE(i.m_pbuffer == nullptr); - REQUIRE(i.m_rv == rv); - - REQUIRE(i.has_allocator()); - } - SECTION("Buffer (by value)") { - detail_::TensorInput i(physical, alloc, logical, buffer); - REQUIRE(i.m_pshape == nullptr); - REQUIRE(i.m_psymmetry == nullptr); - REQUIRE(i.m_psparsity == nullptr); - REQUIRE(i.m_plogical->are_equal(logical)); - REQUIRE(i.m_pphysical->are_equal(physical)); - REQUIRE(i.m_palloc->are_equal(alloc)); - // REQUIRE(i.m_pbuffer->are_equal(buffer)); - REQUIRE(i.m_rv == rv); - - REQUIRE(i.has_buffer()); + throw std::runtime_error("Fix me!"); + // detail_::TensorInput i(physical, alloc, logical, buffer); + // REQUIRE(i.m_pshape == nullptr); + // REQUIRE(i.m_psymmetry == nullptr); + // REQUIRE(i.m_psparsity == nullptr); + // REQUIRE(i.m_plogical->are_equal(logical)); + // REQUIRE(i.m_pphysical->are_equal(physical)); + // REQUIRE(i.m_palloc->are_equal(alloc)); + // // REQUIRE(i.m_pbuffer->are_equal(buffer)); + // REQUIRE(i.m_rv == rv); + + // REQUIRE(i.has_buffer()); } SECTION("Buffer (by pointer)") { - auto pbuffer = buffer.clone(); - auto buffer_address = pbuffer.get(); - detail_::TensorInput i(physical, alloc, logical, - std::move(pbuffer)); - REQUIRE(i.m_pshape == nullptr); - REQUIRE(i.m_psymmetry == nullptr); - REQUIRE(i.m_psparsity == nullptr); - REQUIRE(i.m_plogical->are_equal(logical)); - REQUIRE(i.m_pphysical->are_equal(physical)); - REQUIRE(i.m_palloc->are_equal(alloc)); - // REQUIRE(i.m_pbuffer->are_equal(buffer)); - REQUIRE(i.m_pbuffer.get() == buffer_address); - REQUIRE(i.m_rv == rv); - - REQUIRE(i.has_buffer()); + throw std::runtime_error("Fix me!"); + // auto pbuffer = buffer.clone(); + // auto buffer_address = pbuffer.get(); + // detail_::TensorInput i(physical, alloc, logical, + // std::move(pbuffer)); + // REQUIRE(i.m_pshape == nullptr); + // REQUIRE(i.m_psymmetry == nullptr); + // REQUIRE(i.m_psparsity == nullptr); + // REQUIRE(i.m_plogical->are_equal(logical)); + // REQUIRE(i.m_pphysical->are_equal(physical)); + // REQUIRE(i.m_palloc->are_equal(alloc)); + // // REQUIRE(i.m_pbuffer->are_equal(buffer)); + // REQUIRE(i.m_pbuffer.get() == buffer_address); + // REQUIRE(i.m_rv == rv); + + // REQUIRE(i.has_buffer()); } SECTION("RuntimeView") { @@ -317,13 +290,6 @@ TEST_CASE("TensorInput") { REQUIRE(w_physical.has_physical_layout()); } - SECTION("has_allocator") { - REQUIRE_FALSE(defaulted.has_allocator()); - - detail_::TensorInput w_allocator(alloc); - REQUIRE(w_allocator.has_allocator()); - } - SECTION("has_buffer") { REQUIRE_FALSE(defaulted.has_buffer()); diff --git a/tests/cxx/unit_tests/tensorwrapper/tensor/detail_/tensor_pimpl.cpp b/tests/cxx/unit_tests/tensorwrapper/tensor/detail_/tensor_pimpl.cpp index 0c92de48..efa562b6 100644 --- a/tests/cxx/unit_tests/tensorwrapper/tensor/detail_/tensor_pimpl.cpp +++ b/tests/cxx/unit_tests/tensorwrapper/tensor/detail_/tensor_pimpl.cpp @@ -15,7 +15,7 @@ */ #include "../../testing/testing.hpp" -#include +#include #include #include #include diff --git a/tests/cxx/unit_tests/tensorwrapper/testing/eigen_buffers.hpp b/tests/cxx/unit_tests/tensorwrapper/testing/eigen_buffers.hpp index 97cd8d75..b0d8c5e9 100644 --- a/tests/cxx/unit_tests/tensorwrapper/testing/eigen_buffers.hpp +++ b/tests/cxx/unit_tests/tensorwrapper/testing/eigen_buffers.hpp @@ -26,65 +26,62 @@ namespace tensorwrapper::testing { -template -auto make_allocator() { - parallelzone::runtime::RuntimeView rv; - return allocator::Eigen(rv); -} - template auto eigen_scalar(FloatType value = 42.0) { - auto alloc = make_allocator(); - return alloc.construct(value); + shape::Smooth shape{}; + std::vector data{value}; + return std::make_unique(std::move(data), + std::move(shape)); } template auto eigen_vector(std::size_t n = 5) { - layout::Physical l(shape::Smooth{n}); - auto alloc = make_allocator(); - auto buffer = alloc.allocate(l); - for(std::size_t i = 0; i < n; ++i) buffer->set_elem({i}, i); - return buffer; + shape::Smooth shape{n}; + std::vector data(n); + for(std::size_t i = 0; i < n; ++i) data[i] = static_cast(i); + return std::make_unique(std::move(data), + std::move(shape)); } template auto eigen_matrix(std::size_t n = 2, std::size_t m = 2) { - layout::Physical l(shape::Smooth{n, m}); - auto alloc = make_allocator(); - auto buffer = alloc.allocate(l); + shape::Smooth shape{n, m}; + std::vector data(n * m); double counter = 1.0; for(decltype(n) i = 0; i < n; ++i) - for(decltype(m) j = 0; j < m; ++j) buffer->set_elem({i, j}, counter++); - return buffer; + for(decltype(m) j = 0; j < m; ++j) + data[i * m + j] = static_cast(counter++); + return std::make_unique(std::move(data), + std::move(shape)); } template auto eigen_tensor3(std::size_t n = 2, std::size_t m = 2, std::size_t l = 2) { - layout::Physical layout(shape::Smooth{n, m, l}); - auto alloc = make_allocator(); - auto buffer = alloc.allocate(layout); + shape::Smooth shape{n, m, l}; + std::vector data(n * m * l); double counter = 1.0; for(decltype(n) i = 0; i < n; ++i) for(decltype(m) j = 0; j < m; ++j) for(decltype(l) k = 0; k < l; ++k) - buffer->set_elem({i, j, k}, counter++); - return buffer; + data[i * m * n + j * n + l] = static_cast(counter++); + return std::make_unique(std::move(data), + std::move(shape)); } template auto eigen_tensor4(std::array extents = {2, 2, 2, 2}) { - shape::Smooth shape{extents[0], extents[1], extents[2], extents[3]}; - layout::Physical layout(shape); - auto alloc = make_allocator(); - auto buffer = alloc.allocate(layout); + shape::Smooth shape(extents.begin(), extents.end()); + std::vector data(shape.size()); + buffer::Contiguous buffer(std::move(data), std::move(shape)); double counter = 1.0; - decltype(extents) i; - for(i[0] = 0; i[0] < extents[0]; ++i[0]) - for(i[1] = 0; i[1] < extents[1]; ++i[1]) - for(i[2] = 0; i[2] < extents[2]; ++i[2]) - for(i[3] = 0; i[3] < extents[3]; ++i[3]) - buffer->set_elem({i[0], i[1], i[2], i[3]}, counter++); - return buffer; + for(std::size_t i = 0; i < extents[0]; ++i) + for(decltype(i) j = 0; j < extents[1]; ++j) + for(decltype(i) k = 0; k < extents[2]; ++k) + for(decltype(i) l = 0; l < extents[3]; ++l) + buffer.set_elem({i, j, k, l}, + static_cast(counter++)); + + return std::make_unique(std::move(buffer)); } } // namespace tensorwrapper::testing diff --git a/tests/cxx/unit_tests/tensorwrapper/utilities/block_diagonal_matrix.cpp b/tests/cxx/unit_tests/tensorwrapper/utilities/block_diagonal_matrix.cpp index 93e5c0ec..dffccf8e 100644 --- a/tests/cxx/unit_tests/tensorwrapper/utilities/block_diagonal_matrix.cpp +++ b/tests/cxx/unit_tests/tensorwrapper/utilities/block_diagonal_matrix.cpp @@ -50,25 +50,25 @@ TEMPLATE_LIST_TEST_CASE("block_diagonal_matrix", "", std::vector inputs4{square_matrix1, rectangular_matrix1}; SECTION("All matrices are square") { - shape::Smooth corr_shape{5, 5}; - layout::Physical corr_layout(corr_shape); - auto allocator = make_allocator(); - auto corr_buffer = allocator.allocate(corr_layout); - double counter1 = 1.0, counter2 = 1.0; - for(std::size_t i = 0; i < 5; ++i) { - for(std::size_t j = 0; j < 5; ++j) { - if(i >= 2 and j >= 2) - corr_buffer->set_elem({i, j}, counter1++); - else if(i < 2 and j < 2) - corr_buffer->set_elem({i, j}, counter2++); - else - corr_buffer->set_elem({i, j}, 0.0); - } - } - Tensor corr(corr_shape, std::move(corr_buffer)); + // shape::Smooth corr_shape{5, 5}; + // layout::Physical corr_layout(corr_shape); + // auto allocator = make_allocator(); + // auto corr_buffer = allocator.allocate(corr_layout); + // double counter1 = 1.0, counter2 = 1.0; + // for(std::size_t i = 0; i < 5; ++i) { + // for(std::size_t j = 0; j < 5; ++j) { + // if(i >= 2 and j >= 2) + // corr_buffer->set_elem({i, j}, counter1++); + // else if(i < 2 and j < 2) + // corr_buffer->set_elem({i, j}, counter2++); + // else + // corr_buffer->set_elem({i, j}, 0.0); + // } + // } + // Tensor corr(corr_shape, std::move(corr_buffer)); auto result = block_diagonal_matrix(inputs1); - REQUIRE(result == corr); + // REQUIRE(result == corr); } SECTION("Input has different floating point types") { diff --git a/tests/cxx/unit_tests/tensorwrapper/utilities/floating_point_dispatch.cpp b/tests/cxx/unit_tests/tensorwrapper/utilities/floating_point_dispatch.cpp deleted file mode 100644 index f04922c9..00000000 --- a/tests/cxx/unit_tests/tensorwrapper/utilities/floating_point_dispatch.cpp +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Copyright 2025 NWChemEx-Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "../testing/testing.hpp" - -using namespace tensorwrapper; -using namespace tensorwrapper::utilities; - -struct Kernel { - template - void run(buffer::BufferBase& buffer) { - auto corr = testing::eigen_matrix(); - REQUIRE(corr->are_equal(buffer)); - } - - template - bool run(buffer::BufferBase& buffer, buffer::BufferBase& corr) { - return corr.are_equal(buffer); - } -}; - -TEMPLATE_LIST_TEST_CASE("floating_point_dispatch", "", - types::floating_point_types) { - Kernel kernel; - auto tensor = testing::eigen_matrix(); - - SECTION("Single input, no return") { - floating_point_dispatch(kernel, *tensor); - } - - SECTION("Two inputs and a return") { - REQUIRE(floating_point_dispatch(kernel, *tensor, *tensor)); - } -} From 747100382c9bb157458c4163c6323839fc8645de Mon Sep 17 00:00:00 2001 From: "Ryan M. Richard" Date: Fri, 2 Jan 2026 12:02:51 -0600 Subject: [PATCH 04/13] removes allocator --- include/tensorwrapper/allocator/allocator.hpp | 24 -- .../allocator/allocator_base.hpp | 196 ----------------- .../tensorwrapper/allocator/allocator_fwd.hpp | 29 --- .../tensorwrapper/allocator/contiguous.hpp | 205 ------------------ include/tensorwrapper/allocator/local.hpp | 36 --- .../tensorwrapper/allocator/replicated.hpp | 37 ---- include/tensorwrapper/buffer/buffer_base.hpp | 92 +------- include/tensorwrapper/buffer/contiguous.hpp | 35 ++- .../tensorwrapper/forward_declarations.hpp | 1 + .../tensor/detail_/tensor_input.hpp | 29 +-- include/tensorwrapper/tensorwrapper.hpp | 1 - .../allocator/allocator_base.cpp | 15 -- src/tensorwrapper/allocator/contiguous.cpp | 62 ------ src/tensorwrapper/backends/backends.hpp | 1 + src/tensorwrapper/backends/eigen.hpp | 25 --- src/tensorwrapper/buffer/buffer_base.cpp | 2 - src/tensorwrapper/buffer/contiguous.cpp | 2 +- src/tensorwrapper/diis/diis.cpp | 3 - .../operations/approximately_equal.cpp | 5 +- src/tensorwrapper/operations/norm.cpp | 19 +- .../tensor/detail_/tensor_factory.cpp | 16 +- .../tensor/detail_/tensor_factory.hpp | 16 -- src/tensorwrapper/tensor/tensor_class.cpp | 5 +- .../utilities/block_diagonal_matrix.cpp | 6 - src/tensorwrapper/utilities/to_json.cpp | 8 +- .../tensorwrapper/allocator/contiguous.cpp | 128 ----------- .../tensorwrapper/buffer/buffer_base.cpp | 11 - .../tensor/detail_/tensor_input.cpp | 14 -- 28 files changed, 58 insertions(+), 965 deletions(-) delete mode 100644 include/tensorwrapper/allocator/allocator.hpp delete mode 100644 include/tensorwrapper/allocator/allocator_base.hpp delete mode 100644 include/tensorwrapper/allocator/allocator_fwd.hpp delete mode 100644 include/tensorwrapper/allocator/contiguous.hpp delete mode 100644 include/tensorwrapper/allocator/local.hpp delete mode 100644 include/tensorwrapper/allocator/replicated.hpp delete mode 100644 src/tensorwrapper/allocator/allocator_base.cpp delete mode 100644 src/tensorwrapper/allocator/contiguous.cpp delete mode 100644 src/tensorwrapper/backends/eigen.hpp delete mode 100644 tests/cxx/unit_tests/tensorwrapper/allocator/contiguous.cpp diff --git a/include/tensorwrapper/allocator/allocator.hpp b/include/tensorwrapper/allocator/allocator.hpp deleted file mode 100644 index 8d3debbb..00000000 --- a/include/tensorwrapper/allocator/allocator.hpp +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Copyright 2024 NWChemEx-Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#pragma once -#include -#include -#include -#include - -/** @brief Contains classes related to allocating Buffer objects. */ -namespace tensorwrapper::allocator {} diff --git a/include/tensorwrapper/allocator/allocator_base.hpp b/include/tensorwrapper/allocator/allocator_base.hpp deleted file mode 100644 index 1ebc1308..00000000 --- a/include/tensorwrapper/allocator/allocator_base.hpp +++ /dev/null @@ -1,196 +0,0 @@ -/* - * Copyright 2024 NWChemEx-Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#pragma once -#include -#include -#include -#include -#include - -namespace tensorwrapper::allocator { - -/** @brief Common base class for all allocators. - * - * The AllocatorBase class serves as type-erasure and a unified API for all - * allocators. - */ -class AllocatorBase : public detail_::PolymorphicBase { -private: - /// The type of *this - using my_type = AllocatorBase; - - /// The type *this derives from - using my_base_type = detail_::PolymorphicBase; - -public: - /// Type of a view of the runtime system - using runtime_view_type = parallelzone::runtime::RuntimeView; - - /// Type of a mutable reference to the runtime system - using runtime_view_reference = runtime_view_type&; - - /// Type of a read-only reference to the runtime system - using const_runtime_view_reference = const runtime_view_type&; - - /// Type all physical layouts derive from - using layout_type = layout::Physical; - - /// Type of a pointer to an object of type layout_type - using layout_pointer = std::unique_ptr; - - /// Type of a read-only reference to the layout - using const_layout_reference = const layout_type&; - - /// Type all buffers derive from - using buffer_base_type = buffer::BufferBase; - - /// Type of the class defining types for the buffer_base_type class - using buffer_base_traits = types::ClassTraits; - - /// Type of a mutable reference to an object of type buffer_base_type - using buffer_base_reference = - typename buffer_base_traits::buffer_base_reference; - - /// Type of a read-only reference to an object of type buffer_base_type - using const_buffer_base_reference = - typename buffer_base_traits::const_buffer_base_reference; - - /// Type of a pointer to an object of type buffer_base_type - using buffer_base_pointer = - typename buffer_base_traits::buffer_base_pointer; - - // ------------------------------------------------------------------------- - // -- Ctors and assignment - // ------------------------------------------------------------------------- - - /** @brief Polymorphically allocates a new buffer. - * - * This method type-erases the process of creating a buffer by dispatching - * to the derived class. In general the buffer created by this method will - * NOT be initialized, though this will depend on the default behavior of - * the backend. Use `construct` instead of `allocate` if you additionally - * want to guarantee initialization. - * - * Derived classes implement this method by overriding allocate_. - * - * @param[in] playout A pointer to the layout for the new buffer. - * - * @return The newly allocated, but not necessarily initialized buffer. - */ - buffer_base_pointer allocate(layout_pointer playout) { - return allocate_(std::move(playout)); - } - - buffer_base_pointer allocate(const_layout_reference layout) { - return allocate(layout.clone_as()); - } - - /** @brief The runtime *this uses for allocating. - * - * Allocators are tied to runtimes. This method can be used to retrieve - * the runtime *this is using for allocation. - * - * @return A mutable reference to the runtime *this is using for allocating - * buffers. - * - * @throw None No throw guarantee. - */ - runtime_view_reference runtime() noexcept { return m_rv_; } - - /** @brief The runtime *this uses for allocating. - * - * This method is the same as the non-const version except that it returns - * the runtime in a read-only manner. - * - * @return A read-only reference to the runtime *this uses for allocating - * buffers. - * - * @throw None No throw guarantee. - */ - const_runtime_view_reference runtime() const noexcept { return m_rv_; } - - // ------------------------------------------------------------------------- - // -- Utility methods - // ------------------------------------------------------------------------- - - /** @brief Is *this value equal to @p rhs? - * - * This method is non-polymorphic and only compares the AllocatorBase part - * of *this to the AllocatorBase part of @p rhs. Two AllocatorBase objects - * are value equal if they contain views of the same runtime. - * - * @return True if *this is value equal to @p rhs and false otherwise. - * - * @throw None No throw guarantee. - */ - bool operator==(const AllocatorBase& rhs) const noexcept { - return m_rv_ == rhs.m_rv_; - } - - /** @brief Is *this different from @p rhs? - * - * This method defines "different" as "not value equal." See the - * documentation for operator== for the definition of value equal. - * - * @param[in] rhs The allocator to compare against. - * - * @return False if *this is value equal to @p rhs and true otherwise. - * - * @throw None No throw guarantee. - * - */ - bool operator!=(const AllocatorBase& rhs) const noexcept { - return !((*this) == rhs); - } - -protected: - /** @brief Creates an allocator for the runtime @p rv. - * - * @param[in] rv The runtime in which to allocate buffers. - * - * @throw None No throw guarantee. - */ - explicit AllocatorBase(runtime_view_type rv) : m_rv_(std::move(rv)) {} - - /** @brief Creates *this so that it uses the same runtime as @p other. - * - * @param[in] other The allocator to make a copy of. - * - * @throw std::bad_alloc if there is a problem allocating the copy. Strong - * throw guarantee. - */ - AllocatorBase(const AllocatorBase& other) = default; - - /** @brief Derived classes should overwrite in order to implement allocate. - * - * Derived classes are charged with ensuring @p playout is a valid layout - * and then creating a buffer adhering to the layout. - * - * @param[in] playout The layout for the buffer to allocate. - * - * @throw std::bad_alloc if the allocation fails. Strong throw guarantee. - * @throw std::runtime_error if @p playout is not a valid layout. Strong - * throw guarantee. - */ - virtual buffer_base_pointer allocate_(layout_pointer playout) = 0; - -private: - /// The runtime we are allocating memory in - runtime_view_type m_rv_; -}; - -} // namespace tensorwrapper::allocator diff --git a/include/tensorwrapper/allocator/allocator_fwd.hpp b/include/tensorwrapper/allocator/allocator_fwd.hpp deleted file mode 100644 index 6f6051d7..00000000 --- a/include/tensorwrapper/allocator/allocator_fwd.hpp +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Copyright 2025 NWChemEx-Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#pragma once - -namespace tensorwrapper::allocator { - -class AllocatorBase; - -class Local; - -class Replicated; - -class Contiguous; - -} // namespace tensorwrapper::allocator diff --git a/include/tensorwrapper/allocator/contiguous.hpp b/include/tensorwrapper/allocator/contiguous.hpp deleted file mode 100644 index 836ac2a3..00000000 --- a/include/tensorwrapper/allocator/contiguous.hpp +++ /dev/null @@ -1,205 +0,0 @@ -/* - * Copyright 2025 NWChemEx-Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#pragma once -#include -#include -#include - -namespace tensorwrapper::allocator { - -/** @brief Allocator that can create Contiguous buffers. - * - * @tparam FloatType Type of the elements in the contiguous buffer. - */ -class Contiguous : public Replicated { -private: - /// Type of *this - using my_type = Contiguous; - - /// Type *this derives from - using base_type = Replicated; - -public: - /// Pull in base types - ///@{ - using base_type::buffer_base_pointer; - using base_type::const_layout_reference; - using base_type::layout_pointer; - ///@} - - /// Types associated with the buffer *this makes - using buffer_type = buffer::Contiguous; - using buffer_reference = buffer_type&; - using const_buffer_reference = const buffer_type&; - using buffer_pointer = std::unique_ptr; - - using size_type = std::size_t; - - /// Type of initializer lists - template - using rank0_il = typename types::ILTraits::type; - - template - using rank1_il = typename types::ILTraits::type; - - template - using rank2_il = typename types::ILTraits::type; - - template - using rank3_il = typename types::ILTraits::type; - - template - using rank4_il = typename types::ILTraits::type; - - /// Pull in base class's ctors - using base_type::base_type; - - explicit Contiguous(runtime_view_reference runtime) : base_type(runtime) {} - - /** @brief Determines if @p buffer can be rebound as a Contiguous buffer. - * - * Rebinding a buffer allows the same memory to be viewed as a (possibly) - * different type of buffer. - * - * @param[in] buffer The tensor we are attempting to rebind. - * - * @return True if @p buffer can be rebound to the type of buffer - * associated with this allocator and false otherwise. - * - * @throw None No throw guarantee - */ - static bool can_rebind(const_buffer_base_reference buffer); - - /** @brief Rebinds a buffer to the same type as *this. - * - * This method will convert @p buffer into a buffer which could have been - * allocated by *this. If @p buffer was allocated as such a buffer already, - * then this method is simply a downcast. - * - * @param[in] buffer The buffer to rebind. - * - * @return A mutable reference to @p buffer viewed as a buffer that could - * have been allocated by *this. - * - * @throw std::runtime_error if can_rebind(buffer) is false. Strong throw - * guarantee. - */ - static buffer_reference rebind(buffer_base_reference buffer); - static const_buffer_reference rebind(const_buffer_base_reference buffer); - - /** @brief Allocates a contiguous pointer given @p layout. - * - * @note These methods shadow the function of the same name in the base - * class. The intent is to avoid needing to rebind a freshly - * allocated buffer when the user already knows it is a Contiguous - * buffer. - * - * @param[in] layout The layout of the tensor to allocate. May be passed as - * a unique_ptr or by reference. If passed by reference - * will be copied. - * - * @return A pointer to the newly allocated buffer::Contiguous object. - */ - ///@{ - buffer_pointer allocate(const_layout_reference layout) { - return allocate(layout.clone_as()); - } - buffer_pointer allocate(layout_pointer layout) { - auto p = allocate_(std::move(layout)); - return detail_::static_pointer_cast(p); - } - ///@} - - /// Constructs a contiguous buffer from an initializer list - ///@{ - template - buffer_pointer construct(rank0_il il) { - return il_construct_(il); - } - - template - buffer_pointer construct(rank1_il il) { - return il_construct_(il); - } - - template - buffer_pointer construct(rank2_il il) { - return il_construct_(il); - } - - template - buffer_pointer construct(rank3_il il) { - return il_construct_(il); - } - - template - buffer_pointer construct(rank4_il il) { - return il_construct_(il); - } - ///@} - - /** @brief Constructs a contiguous buffer and sets all elements to @p value. - * - * @param[in] layout The layout of the buffer to allocate. May be passed - * either by unique_ptr or reference. If passed by - * reference will be copied. - * - * @return A pointer to the newly constructed buffer. - */ - ///@{ - template - buffer_pointer construct(const_layout_reference layout, ElementType value) { - return construct(layout.clone_as(), std::move(value)); - } - - template - buffer_pointer construct(layout_pointer layout, ElementType value) { - return construct_(std::move(layout), wtf::fp::make_float(value)); - } - ///@} - -protected: - buffer_base_pointer allocate_(layout_pointer playout) override; - - /// To be overridden by the derived class to implement construct - virtual buffer_pointer construct_(layout_pointer layout, - wtf::fp::Float value); - - base_pointer clone_() const override { - return std::make_unique(*this); - } - - /// Implements are_equal, by deferring to the base's operator== - bool are_equal_(const_base_reference rhs) const noexcept override { - return base_type::template are_equal_impl_(rhs); - } - -private: - layout_pointer layout_from_extents_(const std::vector& extents); - - template - buffer_pointer il_construct_(ILType il) { - throw std::runtime_error("Fix me!"); - // auto [extents, data] = detail_::unwrap_il(il); - // auto pbuffer = this->allocate(layout_from_extents_(extents)); - // auto& buffer_down = rebind(*pbuffer); - // buffer_down.copy(data); - // return pbuffer; - } -}; - -} // namespace tensorwrapper::allocator diff --git a/include/tensorwrapper/allocator/local.hpp b/include/tensorwrapper/allocator/local.hpp deleted file mode 100644 index c9a82118..00000000 --- a/include/tensorwrapper/allocator/local.hpp +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Copyright 2024 NWChemEx-Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#pragma once -#include - -namespace tensorwrapper::allocator { - -/** @brief Can create buffers that exist entirely in local memory. - * - * This class is presently a stub that will be filled in later, as needed. - */ -class Local : public AllocatorBase { -private: - /// Type *this inherits from - using my_base_type = AllocatorBase; - -public: - // Pull in base's ctors - using my_base_type::my_base_type; -}; - -} // namespace tensorwrapper::allocator diff --git a/include/tensorwrapper/allocator/replicated.hpp b/include/tensorwrapper/allocator/replicated.hpp deleted file mode 100644 index be537aae..00000000 --- a/include/tensorwrapper/allocator/replicated.hpp +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Copyright 2024 NWChemEx-Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#pragma once -#include - -namespace tensorwrapper::allocator { - -/** @brief Can create buffers that exist entirely in local memory and are - * guaranteed to be the same for all processes. - * - * This class is presently a stub that will be filled in later, as needed. - */ -class Replicated : public Local { -private: - /// Type *this inherits from - using my_base_type = Local; - -public: - // Pull in base's ctors - using my_base_type::my_base_type; -}; - -} // namespace tensorwrapper::allocator diff --git a/include/tensorwrapper/buffer/buffer_base.hpp b/include/tensorwrapper/buffer/buffer_base.hpp index ad46d093..5d16ab07 100644 --- a/include/tensorwrapper/buffer/buffer_base.hpp +++ b/include/tensorwrapper/buffer/buffer_base.hpp @@ -15,11 +15,10 @@ */ #pragma once -#include #include #include #include -#include +#include #include namespace tensorwrapper::buffer { @@ -68,18 +67,6 @@ class BufferBase : public tensorwrapper::detail_::PolymorphicBase, /// Type of a pointer to the layout using layout_pointer = std::unique_ptr; - /// Type all allocators inherit from - using allocator_base_type = allocator::AllocatorBase; - - /// Type of a pointer to an allocator_base_type object - using allocator_base_pointer = std::unique_ptr; - - /// Type of a mutable reference to an allocator_base_type - using allocator_base_reference = allocator_base_type&; - - /// Type of a read-only reference to an allocator_base_type - using const_allocator_reference = const allocator_base_type&; - /// Type used to represent the tensor's rank using rank_type = typename layout_type::size_type; @@ -98,18 +85,6 @@ class BufferBase : public tensorwrapper::detail_::PolymorphicBase, */ bool has_layout() const noexcept { return static_cast(m_layout_); } - /** @brief Does *this have an allocator? - * - * Default constructed or moved from BufferBase objects will not have - * allocators. This method is used to determine if *this has an allocator - * or not. - * - * @throw None No throw guarantee. - */ - bool has_allocator() const noexcept { - return static_cast(m_allocator_); - } - /** @brief Retrieves the layout of *this. * * This method can be used to retrieve the layout associated with *this, @@ -126,38 +101,6 @@ class BufferBase : public tensorwrapper::detail_::PolymorphicBase, return *m_layout_; } - /** @brief Retrieves the allocator of *this. - * - * This method can be used to retrieve the allocator used to allocate - * *this, assuming *this was provided an allocator. See has_allocator for - * determining if *this has an allocator or not. - * - * @return A mutable reference to the allocator. - * - * @throw std::runtime_error if *this does not have an allocator. Strong - * throw guarantee. - */ - allocator_base_reference allocator() { - assert_layout_(); - return *m_allocator_; - } - - /** @brief Retrieves the allocator of *this. - * - * This method can be used to retrieve the allocator used to allocate - * *this, assuming *this was provided an allocator. See has_allocator for - * determining if *this has an allocator or not. - * - * @return A read-only reference to the allocator. - * - * @throw std::runtime_error if *this does not have an allocator. Strong - * throw guarantee. - */ - const_allocator_reference allocator() const { - assert_layout_(); - return *m_allocator_; - } - rank_type rank() const noexcept { return has_layout() ? layout().rank() : 0; } @@ -180,11 +123,8 @@ class BufferBase : public tensorwrapper::detail_::PolymorphicBase, */ bool operator==(const BufferBase& rhs) const noexcept { if(has_layout() != rhs.has_layout()) return false; - if(has_allocator() != rhs.has_allocator()) return false; if(has_layout() && m_layout_->are_different(*rhs.m_layout_)) return false; - if(has_allocator() && m_allocator_->are_different(*rhs.m_allocator_)) - return false; return true; } @@ -217,7 +157,7 @@ class BufferBase : public tensorwrapper::detail_::PolymorphicBase, * * @throw None No throw guarantee. */ - BufferBase() : BufferBase(nullptr, nullptr) {} + BufferBase() : BufferBase(nullptr) {} /** @brief Creates a buffer initialized with a copy of @p layout. * @@ -226,9 +166,8 @@ class BufferBase : public tensorwrapper::detail_::PolymorphicBase, * @throw std::bad_alloc if there is a problem allocating the copy of * @p layout. Strong throw guarantee. */ - explicit BufferBase(const_layout_reference layout, - const_allocator_reference allocator) : - BufferBase(layout.clone_as(), allocator.clone()) {} + explicit BufferBase(const_layout_reference layout) : + BufferBase(layout.clone_as()) {} /** @brief Creates a buffer which owns the layout pointed to by @p playout. * @@ -237,9 +176,8 @@ class BufferBase : public tensorwrapper::detail_::PolymorphicBase, * @throw None No throw guarantee. */ - explicit BufferBase(layout_pointer playout, - allocator_base_pointer pallocator) noexcept : - m_layout_(std::move(playout)), m_allocator_(std::move(pallocator)) {} + explicit BufferBase(layout_pointer playout) noexcept : + m_layout_(std::move(playout)) {} /** @brief Creates a buffer by deep copying @p other. * @@ -250,9 +188,7 @@ class BufferBase : public tensorwrapper::detail_::PolymorphicBase, */ BufferBase(const BufferBase& other) : m_layout_(other.m_layout_ ? other.m_layout_->clone_as() : - nullptr), - m_allocator_(other.m_allocator_ ? other.m_allocator_->clone() : nullptr) { - } + nullptr) {} /** @brief Replaces the state in *this with a deep copy of the state in * @p rhs. @@ -269,10 +205,8 @@ class BufferBase : public tensorwrapper::detail_::PolymorphicBase, auto temp_layout = rhs.has_layout() ? rhs.m_layout_->clone_as() : nullptr; - auto temp_allocator = - rhs.has_allocator() ? rhs.m_allocator_->clone() : nullptr; + temp_layout.swap(m_layout_); - temp_allocator.swap(m_allocator_); } return *this; } @@ -305,18 +239,8 @@ class BufferBase : public tensorwrapper::detail_::PolymorphicBase, "Buffer has no layout. Was it default initialized?"); } - /// Throws std::runtime_error when there is no allocator - void assert_allocator_() const { - if(has_allocator()) return; - throw std::runtime_error( - "Buffer has no allocator. Was it default initialized?"); - } - /// The layout of *this layout_pointer m_layout_; - - /// The allocator of *this - allocator_base_pointer m_allocator_; }; } // namespace tensorwrapper::buffer diff --git a/include/tensorwrapper/buffer/contiguous.hpp b/include/tensorwrapper/buffer/contiguous.hpp index 771206bd..63a52bcc 100644 --- a/include/tensorwrapper/buffer/contiguous.hpp +++ b/include/tensorwrapper/buffer/contiguous.hpp @@ -232,15 +232,13 @@ class Contiguous : public Replicated { /** @brief Returns a view of the data. * - * This method is deprecated. Use set_slice instead. */ - [[deprecated]] buffer_view get_mutable_data(); + buffer_view get_mutable_data(); /** @brief Returns a read-only view of the data. * - * This method is deprecated. Use get_slice instead. */ - [[deprecated]] const_buffer_view get_immutable_data() const; + const_buffer_view get_immutable_data() const; // ------------------------------------------------------------------------- // -- Utility Methods @@ -336,4 +334,33 @@ class Contiguous : public Replicated { buffer_type m_buffer_; }; +template +Contiguous make_contiguous(const shape::ShapeBase& shape) { + auto smooth_view = shape.as_smooth(); + using size_type = typename decltype(smooth_view)::size_type; + std::vector extents(smooth_view.rank()); + for(size_type i = 0; i < smooth_view.rank(); ++i) + extents[i] = smooth_view.extent(i); + shape::Smooth smooth_shape(extents.begin(), extents.end()); + std::vector elements(smooth_view.size(), + static_cast(0)); // Initialize to zeroes + return Contiguous(std::move(elements), std::move(smooth_shape)); +} + +inline Contiguous& make_contiguous(buffer::BufferBase& buffer) { + auto* pcontiguous = dynamic_cast(&buffer); + if(pcontiguous == nullptr) + throw std::runtime_error( + "make_contiguous: buffer is not a Contiguous buffer"); + return *pcontiguous; +} + +inline const Contiguous& make_contiguous(const buffer::BufferBase& buffer) { + const auto* pcontiguous = dynamic_cast(&buffer); + if(pcontiguous == nullptr) + throw std::runtime_error( + "make_contiguous: buffer is not a Contiguous buffer"); + return *pcontiguous; +} + } // namespace tensorwrapper::buffer diff --git a/include/tensorwrapper/forward_declarations.hpp b/include/tensorwrapper/forward_declarations.hpp index 9328da87..fee14413 100644 --- a/include/tensorwrapper/forward_declarations.hpp +++ b/include/tensorwrapper/forward_declarations.hpp @@ -16,6 +16,7 @@ #pragma once #include +#include namespace tensorwrapper { diff --git a/include/tensorwrapper/tensor/detail_/tensor_input.hpp b/include/tensorwrapper/tensor/detail_/tensor_input.hpp index 744e6437..b953c173 100644 --- a/include/tensorwrapper/tensor/detail_/tensor_input.hpp +++ b/include/tensorwrapper/tensor/detail_/tensor_input.hpp @@ -17,7 +17,6 @@ #pragma once #include #include -#include #include #include #include @@ -90,18 +89,8 @@ struct TensorInput { /// Type of a pointer to an object of type physical_layout_type using physical_layout_pointer = std::unique_ptr; - /// Type all allocators inherit from - using allocator_base = allocator::AllocatorBase; - - /// Type of a read-only reference to an object of type allocator_base - using const_allocator_reference = - typename allocator_base::const_base_reference; - - /// Type of a pointer to an object of type allocator_base - using allocator_pointer = typename allocator_base::base_pointer; - /// Type all buffer object's inherit from - using buffer_base = typename allocator_base::buffer_base_type; + using buffer_base = typename buffer::BufferBase; /// Type of a mutable reference to a buffer_base object using buffer_reference = typename buffer_base::base_reference; @@ -116,7 +105,7 @@ struct TensorInput { using const_buffer_pointer = typename buffer_base::const_base_pointer; /// Type of a view of the runtime - using runtime_view_type = typename allocator_base::runtime_view_type; + using runtime_view_type = parallelzone::runtime::RuntimeView; TensorInput() = default; @@ -192,16 +181,6 @@ struct TensorInput { m_pphysical = std::move(pphysical); } - template - TensorInput(const_allocator_reference alloc, Args&&... args) : - TensorInput(alloc.clone(), std::forward(args)...) {} - - template - TensorInput(allocator_pointer palloc, Args&&... args) : - TensorInput(std::forward(args)...) { - m_palloc = std::move(palloc); - } - template TensorInput(const_buffer_reference buffer, Args&&... args) : TensorInput(buffer.clone(), std::forward(args)...) {} @@ -242,8 +221,6 @@ struct TensorInput { bool has_physical_layout() const noexcept { return m_pphysical != nullptr; } - bool has_allocator() const noexcept { return m_palloc != nullptr; } - bool has_buffer() const noexcept { return m_pbuffer != nullptr; } ///@} @@ -257,8 +234,6 @@ struct TensorInput { physical_layout_pointer m_pphysical; - allocator_pointer m_palloc; - buffer_pointer m_pbuffer; runtime_view_type m_rv; diff --git a/include/tensorwrapper/tensorwrapper.hpp b/include/tensorwrapper/tensorwrapper.hpp index f529a8a2..861b0131 100644 --- a/include/tensorwrapper/tensorwrapper.hpp +++ b/include/tensorwrapper/tensorwrapper.hpp @@ -15,7 +15,6 @@ */ #pragma once -#include #include #include #include diff --git a/src/tensorwrapper/allocator/allocator_base.cpp b/src/tensorwrapper/allocator/allocator_base.cpp deleted file mode 100644 index 049b76d2..00000000 --- a/src/tensorwrapper/allocator/allocator_base.cpp +++ /dev/null @@ -1,15 +0,0 @@ -/* - * Copyright 2025 NWChemEx-Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ diff --git a/src/tensorwrapper/allocator/contiguous.cpp b/src/tensorwrapper/allocator/contiguous.cpp deleted file mode 100644 index 4db1a010..00000000 --- a/src/tensorwrapper/allocator/contiguous.cpp +++ /dev/null @@ -1,62 +0,0 @@ -/* - * Copyright 2024 NWChemEx-Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "../tensor/detail_/il_utils.hpp" -#include -#include -#include -#include - -namespace tensorwrapper::allocator { - -bool Contiguous::can_rebind(const_buffer_base_reference buffer) { - auto pbuffer = dynamic_cast(&buffer); - return pbuffer != nullptr; -} - -auto Contiguous::rebind(buffer_base_reference buffer) -> buffer_reference { - if(can_rebind(buffer)) return static_cast(buffer); - throw std::runtime_error("Can not rebind buffer"); -} - -auto Contiguous::rebind(const_buffer_base_reference buffer) - -> const_buffer_reference { - if(can_rebind(buffer)) return dynamic_cast(buffer); - throw std::runtime_error("Can not rebind buffer"); -} - -// ----------------------------------------------------------------------------- -// -- Protected methods -// ----------------------------------------------------------------------------- - -auto Contiguous::allocate_(layout_pointer playout) -> buffer_base_pointer { - throw std::runtime_error("Fix me!"); -} - -auto Contiguous::construct_(layout_pointer playout, wtf::fp::Float value) - -> buffer_pointer { - throw std::runtime_error("Fix me!"); -} - -// -- Private - -auto Contiguous::layout_from_extents_(const std::vector& extents) - -> layout_pointer { - shape::Smooth shape(extents.begin(), extents.end()); - return std::make_unique(std::move(shape)); -} - -} // namespace tensorwrapper::allocator diff --git a/src/tensorwrapper/backends/backends.hpp b/src/tensorwrapper/backends/backends.hpp index f08993c7..9daa4fe7 100644 --- a/src/tensorwrapper/backends/backends.hpp +++ b/src/tensorwrapper/backends/backends.hpp @@ -15,4 +15,5 @@ */ #pragma once +#include #include diff --git a/src/tensorwrapper/backends/eigen.hpp b/src/tensorwrapper/backends/eigen.hpp deleted file mode 100644 index fa564773..00000000 --- a/src/tensorwrapper/backends/eigen.hpp +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright 2024 NWChemEx-Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#pragma once -#include - -namespace tensorwrapper::eigen { - -template -using data_type = Eigen::Tensor; - -} // namespace tensorwrapper::eigen diff --git a/src/tensorwrapper/buffer/buffer_base.cpp b/src/tensorwrapper/buffer/buffer_base.cpp index f592f89f..4411afe7 100644 --- a/src/tensorwrapper/buffer/buffer_base.cpp +++ b/src/tensorwrapper/buffer/buffer_base.cpp @@ -32,7 +32,6 @@ dsl_reference BufferBase::binary_op_common_(FxnType&& fxn, auto rlayout = rbuffer.layout()(rhs.labels()); if(!has_layout()) m_layout_ = lbuffer.layout().clone_as(); - if(!has_allocator()) m_allocator_ = lbuffer.allocator().clone(); fxn(m_layout_, this_labels, llayout, rlayout); @@ -74,7 +73,6 @@ dsl_reference BufferBase::permute_assignment_(label_type this_labels, auto rlayout = rhs.object().layout()(rhs.labels()); if(!has_layout()) m_layout_ = rhs.object().layout().clone_as(); - if(!has_allocator()) m_allocator_ = rhs.object().allocator().clone(); m_layout_->permute_assignment(this_labels, rlayout); diff --git a/src/tensorwrapper/buffer/contiguous.cpp b/src/tensorwrapper/buffer/contiguous.cpp index 38149117..a23a59dc 100644 --- a/src/tensorwrapper/buffer/contiguous.cpp +++ b/src/tensorwrapper/buffer/contiguous.cpp @@ -39,7 +39,7 @@ using fp_types = types::floating_point_types; Contiguous::Contiguous() noexcept = default; Contiguous::Contiguous(buffer_type buffer, shape_type shape) : - my_base_type(std::make_unique(shape), nullptr), + my_base_type(std::make_unique(shape)), m_shape_(std::move(shape)), m_buffer_() { if(buffer.size() == shape.size()) { diff --git a/src/tensorwrapper/diis/diis.cpp b/src/tensorwrapper/diis/diis.cpp index 5e774cbf..187c2857 100644 --- a/src/tensorwrapper/diis/diis.cpp +++ b/src/tensorwrapper/diis/diis.cpp @@ -14,7 +14,6 @@ * limitations under the License. */ -#include #include #include #include @@ -28,8 +27,6 @@ namespace { // template // auto run(const buffer_base_type& t) { -// using alloc_type = tensorwrapper::allocator::Eigen; -// alloc_type alloc(t.allocator().runtime()); // double rv; // if constexpr(tensorwrapper::types::is_uncertain_v) { diff --git a/src/tensorwrapper/operations/approximately_equal.cpp b/src/tensorwrapper/operations/approximately_equal.cpp index 4ed4ffdd..9e3a08eb 100644 --- a/src/tensorwrapper/operations/approximately_equal.cpp +++ b/src/tensorwrapper/operations/approximately_equal.cpp @@ -14,7 +14,6 @@ * limitations under the License. */ -#include #include #include #include @@ -51,9 +50,7 @@ bool approximately_equal(const Tensor& lhs, const Tensor& rhs, double tol) { Tensor result; result(index) = lhs(index) - rhs(index); - using allocator_type = allocator::Contiguous; - allocator_type alloc(result.buffer().allocator().runtime()); - const auto& buffer_down = alloc.rebind(result.buffer()); + const auto& buffer_down = make_contiguous(result.buffer()); Kernel k(tol); throw std::runtime_error("Fix me!!!!"); // return wtf::buffer::visit_contiguous_buffer( diff --git a/src/tensorwrapper/operations/norm.cpp b/src/tensorwrapper/operations/norm.cpp index e9ab74de..8c26631e 100644 --- a/src/tensorwrapper/operations/norm.cpp +++ b/src/tensorwrapper/operations/norm.cpp @@ -13,7 +13,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -#include #include #include #include @@ -22,8 +21,6 @@ namespace tensorwrapper::operations { namespace { struct InfinityKernel { - InfinityKernel(allocator::Contiguous& alloc) : palloc(&alloc) {} - template auto operator()(const std::span buffer) { FloatType max_element{0.0}; @@ -32,25 +29,19 @@ struct InfinityKernel { if(elem > max_element) max_element = elem; } shape::Smooth s{}; - layout::Physical l(s); - auto pbuffer = palloc->construct(l, max_element); + std::vector data{max_element}; + auto pbuffer = std::make_unique(data, s); return Tensor(s, std::move(pbuffer)); } - - allocator::Contiguous* palloc; }; - } // namespace Tensor infinity_norm(const Tensor& t) { - using allocator_type = allocator::Contiguous; - auto rv = t.buffer().allocator().runtime(); - allocator_type alloc(rv); - const auto& buffer_down = alloc.rebind(t.buffer()); - InfinityKernel kernel(alloc); + // const auto& buffer_down = make_contiguous(t.buffer()); + // InfinityKernel kernel; throw std::runtime_error("Fix me!!!!"); // return wtf::buffer::visit_contiguous_buffer( - // kernel, buffer_down); + // kernel, buffer_down.get_immutable_data()); } } // namespace tensorwrapper::operations diff --git a/src/tensorwrapper/tensor/detail_/tensor_factory.cpp b/src/tensorwrapper/tensor/detail_/tensor_factory.cpp index abb2256d..0cb309bf 100644 --- a/src/tensorwrapper/tensor/detail_/tensor_factory.cpp +++ b/src/tensorwrapper/tensor/detail_/tensor_factory.cpp @@ -18,7 +18,6 @@ #include "tensor_factory.hpp" #include "tensor_pimpl.hpp" #include -#include #include #include @@ -31,7 +30,6 @@ using symmetry_pointer = typename TensorFactory::symmetry_pointer; using sparsity_pointer = typename TensorFactory::sparsity_pointer; using logical_layout_pointer = typename TensorFactory::logical_layout_pointer; using physical_layout_pointer = typename TensorFactory::physical_layout_pointer; -using allocator_pointer = typename TensorFactory::allocator_pointer; using buffer_pointer = typename pimpl_type::buffer_pointer; // ----------------------------------------------------------------------------- @@ -67,11 +65,6 @@ physical_layout_pointer TensorFactory::default_physical_layout( logical.shape(), logical.symmetry(), logical.sparsity()); } -allocator_pointer TensorFactory::default_allocator( - const_physical_reference physical, runtime_view_type rv) { - return std::make_unique(rv); -} - bool TensorFactory::can_make_logical_layout(const input_type& input) noexcept { return input.has_shape() || input.has_logical_layout(); } @@ -157,13 +150,10 @@ pimpl_pointer TensorFactory::construct(TensorInput input) { input.m_pphysical = default_physical_layout(*input.m_plogical); } - if(!input.has_allocator()) { - input.m_palloc = default_allocator(*input.m_pphysical, input.m_rv); - } - // TODO: Check if we have initialization criteria - input.m_pbuffer = - input.m_palloc->allocate(std::move(input.m_pphysical)); + auto buffer = + buffer::make_contiguous(input.m_pphysical->shape()); + input.m_pbuffer = std::make_unique(std::move(buffer)); } // Now we have both a logical layout and a buffer so we're done diff --git a/src/tensorwrapper/tensor/detail_/tensor_factory.hpp b/src/tensorwrapper/tensor/detail_/tensor_factory.hpp index 597cedd9..c8fb214a 100644 --- a/src/tensorwrapper/tensor/detail_/tensor_factory.hpp +++ b/src/tensorwrapper/tensor/detail_/tensor_factory.hpp @@ -66,7 +66,6 @@ class TensorFactory { using logical_layout_pointer = input_type::logical_layout_pointer; using const_physical_reference = input_type::const_physical_reference; using physical_layout_pointer = input_type::physical_layout_pointer; - using allocator_pointer = input_type::allocator_pointer; using runtime_view_type = input_type::runtime_view_type; // ------------------------------------------------------------------------- @@ -140,21 +139,6 @@ class TensorFactory { static physical_layout_pointer default_physical_layout( const_logical_reference logical); - /** @brief Constructs an allocator consistent with the physical layout. - * - * @param[in] physical The physical layout of the tensor we want to - * allocate. - * @param[in] rv The runtime that tensors will be allocated in. - * - * @return An allocator capable of allocating a tensor with the layout - * @p physical using the resources in @p rv. - * - * @throw std::bad_alloc if there is a problem allocating the return. - * Strong throw guarantee. - */ - static allocator_pointer default_allocator( - const_physical_reference physical, runtime_view_type rv); - /** @brief Actually constructs the tensor's PIMPL. * * This is the main entry point into this class (and is what callers diff --git a/src/tensorwrapper/tensor/tensor_class.cpp b/src/tensorwrapper/tensor/tensor_class.cpp index 0001b0db..454d283f 100644 --- a/src/tensorwrapper/tensor/tensor_class.cpp +++ b/src/tensorwrapper/tensor/tensor_class.cpp @@ -17,6 +17,7 @@ #include "../layout/converter.hpp" #include "detail_/tensor_factory.hpp" #include "detail_/tensor_pimpl.hpp" +#include #include namespace tensorwrapper { @@ -116,8 +117,8 @@ Tensor::dsl_reference Tensor::binary_common_(FxnType&& fxn, const auto& lbuffer = lobject.buffer(); const auto& rbuffer = robject.buffer(); - auto palloc = lbuffer.allocator().clone(); - auto pthis_buffer = palloc->allocate(std::move(pphys_layout)); + auto buffer = buffer::make_contiguous(pphys_layout->shape()); + auto pthis_buffer = std::make_unique(std::move(buffer)); fxn(*pthis_buffer, this_labels, lbuffer(llabels), rbuffer(rlabels)); diff --git a/src/tensorwrapper/utilities/block_diagonal_matrix.cpp b/src/tensorwrapper/utilities/block_diagonal_matrix.cpp index 80ebc682..3f33cd36 100644 --- a/src/tensorwrapper/utilities/block_diagonal_matrix.cpp +++ b/src/tensorwrapper/utilities/block_diagonal_matrix.cpp @@ -13,7 +13,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -#include #include #include #include @@ -27,16 +26,12 @@ namespace { // template // auto run(const buffer::BufferBase& b, const std::vector& // matrices) { -// using allocator_type = tensorwrapper::allocator::Eigen; // // All inputs must be Rank 2, square, and the same floating point // type. // // If so, sum their extent sizes. // std::size_t size = 0; // for(const auto& matrix : matrices) { -// if(!allocator_type::can_rebind(matrix.buffer())) -// throw std::runtime_error( -// "All inputs must have the same floating point type"); // if(matrix.rank() != 2) // throw std::runtime_error( @@ -52,7 +47,6 @@ namespace { // } // // Allocate new buffer -// allocator_type allocator(b.allocator().runtime()); // shape::Smooth oshape{size, size}; // layout::Physical olayout(oshape); // auto obuffer = allocator.construct(olayout, 0.0); diff --git a/src/tensorwrapper/utilities/to_json.cpp b/src/tensorwrapper/utilities/to_json.cpp index f836fb08..dd01fb39 100644 --- a/src/tensorwrapper/utilities/to_json.cpp +++ b/src/tensorwrapper/utilities/to_json.cpp @@ -14,7 +14,6 @@ * limitations under the License. */ -#include #include #include @@ -47,11 +46,8 @@ void to_json_(std::ostream& os, const buffer_type& t, offset_vector index) { std::ostream& to_json(std::ostream& os, const Tensor& t) { offset_vector i; - auto pbuffer_down = dynamic_cast(&t.buffer()); - if(pbuffer_down == nullptr) - throw std::runtime_error( - "to_json only supports tensors with Contiguous buffers"); - to_json_(os, *pbuffer_down, i); + auto buffer_down = buffer::make_contiguous(t.buffer()); + to_json_(os, buffer_down, i); return os; } diff --git a/tests/cxx/unit_tests/tensorwrapper/allocator/contiguous.cpp b/tests/cxx/unit_tests/tensorwrapper/allocator/contiguous.cpp deleted file mode 100644 index 74188081..00000000 --- a/tests/cxx/unit_tests/tensorwrapper/allocator/contiguous.cpp +++ /dev/null @@ -1,128 +0,0 @@ -/* - * Copyright 2024 NWChemEx-Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "../testing/testing.hpp" -#include -#include -#include -#include - -using namespace tensorwrapper; - -using types2test = types::floating_point_types; - -TEMPLATE_LIST_TEST_CASE("allocator::Contiguous", "", types2test) { - using alloc_type = allocator::Contiguous; - - parallelzone::runtime::RuntimeView rv; - // auto scalar_layout = testing::scalar_physical(); - // auto vector_layout = testing::vector_physical(2); - // auto matrix_layout = testing::matrix_physical(2, 2); - // using layout_type = decltype(scalar_layout); - - // auto pscalar_corr = testing::eigen_scalar(); - // auto& scalar_corr = *pscalar_corr; - // scalar_corr.set_elem({}, 0.0); - - // auto pvector_corr = testing::eigen_vector(2); - // auto& vector_corr = *pvector_corr; - // vector_corr.set_elem({0}, 1); - // vector_corr.set_elem({1}, 1); - - // auto pmatrix_corr = testing::eigen_matrix(2, 2); - // auto& matrix_corr = *pmatrix_corr; - // matrix_corr.set_elem({0, 0}, 2); - // matrix_corr.set_elem({0, 1}, 2); - // matrix_corr.set_elem({1, 0}, 2); - // matrix_corr.set_elem({1, 1}, 2); - - // alloc_type alloc(rv); - - // SECTION("Ctor") { - // SECTION("runtime") { REQUIRE(alloc.runtime() == rv); } - // testing::test_copy_and_move_ctors(alloc); - // } - - // SECTION("allocate(Layout)") { - // // N.b. allocate doesn't initialize tensor, so only compare layouts - // auto pscalar = alloc.allocate(scalar_layout); - // REQUIRE(pscalar->layout().are_equal(scalar_layout)); - - // auto pvector = alloc.allocate(vector_layout); - // REQUIRE(pvector->layout().are_equal(vector_layout)); - - // auto pmatrix = alloc.allocate(matrix_layout); - // REQUIRE(pmatrix->layout().are_equal(matrix_layout)); - - // // Works if ranks don't match - // pvector = alloc.allocate(vector_layout); - // REQUIRE(pvector->layout().are_equal(vector_layout)); - // } - - // SECTION("allocate(std::unique_ptr)") { - // // N.b. allocate doesn't initialize tensor, so only compare layouts - // auto pscalar_layout = std::make_unique(scalar_layout); - // auto pscalar = alloc.allocate(std::move(pscalar_layout)); - // REQUIRE(pscalar->layout().are_equal(scalar_layout)); - - // auto pvector_layout = std::make_unique(vector_layout); - // auto pvector = alloc.allocate(std::move(pvector_layout)); - // REQUIRE(pvector->layout().are_equal(vector_layout)); - - // auto pmatrix_layout = std::make_unique(matrix_layout); - // auto pmatrix = alloc.allocate(std::move(pmatrix_layout)); - // REQUIRE(pmatrix->layout().are_equal(matrix_layout)); - // } - - // SECTION("construct(value)") { - // auto pscalar = alloc.construct(scalar_layout, 0); - // REQUIRE(*pscalar == scalar_corr); - - // auto pvector = alloc.construct(vector_layout, 1); - // REQUIRE(*pvector == vector_corr); - - // auto pmatrix_layout = std::make_unique(matrix_layout); - // auto pmatrix = alloc.construct(std::move(pmatrix_layout), 2); - // REQUIRE(*pmatrix == matrix_corr); - // } - - // SECTION("can_rebind") { REQUIRE(alloc.can_rebind(scalar_corr)); } - - // SECTION("rebind(non-const)") { - // using type = typename alloc_type::buffer_base_reference; - // type scalar_base = scalar_corr; - // auto& eigen_buffer = alloc.rebind(scalar_base); - // REQUIRE(&eigen_buffer == &scalar_corr); - // } - - // SECTION("rebind(const)") { - // using type = typename - // alloc_type::const_buffer_base_reference; type scalar_base = - // scalar_corr; auto& eigen_buffer = alloc.rebind(scalar_base); - // REQUIRE(&eigen_buffer == &scalar_corr); - // } - - // SECTION("operator==") { REQUIRE(alloc == alloc_type(rv)); } - - // SECTION("virtual_methods") { - // SECTION("clone") { - // auto pscalar = alloc.clone(); - // REQUIRE(pscalar->are_equal(alloc)); - // } - - // SECTION("are_equal") { REQUIRE(alloc.are_equal(alloc_type(rv))); } - // } -} diff --git a/tests/cxx/unit_tests/tensorwrapper/buffer/buffer_base.cpp b/tests/cxx/unit_tests/tensorwrapper/buffer/buffer_base.cpp index 99a16391..6de12348 100644 --- a/tests/cxx/unit_tests/tensorwrapper/buffer/buffer_base.cpp +++ b/tests/cxx/unit_tests/tensorwrapper/buffer/buffer_base.cpp @@ -58,23 +58,12 @@ TEST_CASE("BufferBase") { REQUIRE(vector_base.has_layout()); } - SECTION("has_allocator") { REQUIRE_FALSE(defaulted_base.has_allocator()); } - SECTION("layout") { REQUIRE_THROWS_AS(defaulted_base.layout(), std::runtime_error); REQUIRE(scalar_base.layout().are_equal(scalar_layout)); REQUIRE(vector_base.layout().are_equal(vector_layout)); } - SECTION("allocator()") { - REQUIRE_THROWS_AS(defaulted_base.allocator(), std::runtime_error); - } - - SECTION("allocator() const") { - REQUIRE_THROWS_AS(std::as_const(defaulted_base).allocator(), - std::runtime_error); - } - SECTION("operator==") { // Defaulted layout == defaulted layout REQUIRE(defaulted_base == buffer::Contiguous{}); diff --git a/tests/cxx/unit_tests/tensorwrapper/tensor/detail_/tensor_input.cpp b/tests/cxx/unit_tests/tensorwrapper/tensor/detail_/tensor_input.cpp index ae3983c2..8fc678a1 100644 --- a/tests/cxx/unit_tests/tensorwrapper/tensor/detail_/tensor_input.cpp +++ b/tests/cxx/unit_tests/tensorwrapper/tensor/detail_/tensor_input.cpp @@ -53,7 +53,6 @@ TEST_CASE("TensorInput") { REQUIRE(defaulted.m_psparsity == nullptr); REQUIRE(defaulted.m_plogical == nullptr); REQUIRE(defaulted.m_pphysical == nullptr); - REQUIRE(defaulted.m_palloc == nullptr); REQUIRE(defaulted.m_pbuffer == nullptr); REQUIRE(defaulted.m_rv == rv); } @@ -64,7 +63,6 @@ TEST_CASE("TensorInput") { REQUIRE(scalar.m_psparsity == nullptr); REQUIRE(scalar.m_plogical == nullptr); REQUIRE(scalar.m_pphysical == nullptr); - REQUIRE(scalar.m_palloc == nullptr); REQUIRE(scalar.m_pbuffer == nullptr); REQUIRE(scalar.m_rv == rv); @@ -82,7 +80,6 @@ TEST_CASE("TensorInput") { REQUIRE(i.m_psparsity == nullptr); REQUIRE(i.m_plogical == nullptr); REQUIRE(i.m_pphysical == nullptr); - REQUIRE(i.m_palloc == nullptr); REQUIRE(i.m_pbuffer == nullptr); REQUIRE(i.m_rv == rv); @@ -95,7 +92,6 @@ TEST_CASE("TensorInput") { REQUIRE(symm_matrix.m_psparsity == nullptr); REQUIRE(symm_matrix.m_plogical == nullptr); REQUIRE(symm_matrix.m_pphysical == nullptr); - REQUIRE(symm_matrix.m_palloc == nullptr); REQUIRE(symm_matrix.m_pbuffer == nullptr); REQUIRE(symm_matrix.m_rv == rv); @@ -112,7 +108,6 @@ TEST_CASE("TensorInput") { REQUIRE(i.m_psparsity == nullptr); REQUIRE(i.m_plogical == nullptr); REQUIRE(i.m_pphysical == nullptr); - REQUIRE(i.m_palloc == nullptr); REQUIRE(i.m_pbuffer == nullptr); REQUIRE(i.m_rv == rv); @@ -126,7 +121,6 @@ TEST_CASE("TensorInput") { REQUIRE(*i.m_psparsity == sparsity); REQUIRE(i.m_plogical == nullptr); REQUIRE(i.m_pphysical == nullptr); - REQUIRE(i.m_palloc == nullptr); REQUIRE(i.m_pbuffer == nullptr); REQUIRE(i.m_rv == rv); @@ -143,7 +137,6 @@ TEST_CASE("TensorInput") { REQUIRE(i.m_psparsity.get() == psparsity_address); REQUIRE(i.m_plogical == nullptr); REQUIRE(i.m_pphysical == nullptr); - REQUIRE(i.m_palloc == nullptr); REQUIRE(i.m_pbuffer == nullptr); REQUIRE(i.m_rv == rv); @@ -157,7 +150,6 @@ TEST_CASE("TensorInput") { REQUIRE(i.m_psparsity == nullptr); REQUIRE(i.m_plogical->are_equal(logical)); REQUIRE(i.m_pphysical == nullptr); - REQUIRE(i.m_palloc == nullptr); REQUIRE(i.m_pbuffer == nullptr); REQUIRE(i.m_rv == rv); @@ -174,7 +166,6 @@ TEST_CASE("TensorInput") { REQUIRE(i.m_plogical->are_equal(logical)); REQUIRE(i.m_plogical.get() == plogical_address); REQUIRE(i.m_pphysical == nullptr); - REQUIRE(i.m_palloc == nullptr); REQUIRE(i.m_pbuffer == nullptr); REQUIRE(i.m_rv == rv); @@ -188,7 +179,6 @@ TEST_CASE("TensorInput") { REQUIRE(i.m_psparsity == nullptr); REQUIRE(i.m_plogical->are_equal(logical)); REQUIRE(i.m_pphysical->are_equal(physical)); - REQUIRE(i.m_palloc == nullptr); REQUIRE(i.m_pbuffer == nullptr); REQUIRE(i.m_rv == rv); @@ -205,7 +195,6 @@ TEST_CASE("TensorInput") { REQUIRE(i.m_plogical->are_equal(logical)); REQUIRE(i.m_pphysical->are_equal(physical)); REQUIRE(i.m_pphysical.get() == pphysical_address); - REQUIRE(i.m_palloc == nullptr); REQUIRE(i.m_pbuffer == nullptr); REQUIRE(i.m_rv == rv); @@ -220,7 +209,6 @@ TEST_CASE("TensorInput") { // REQUIRE(i.m_psparsity == nullptr); // REQUIRE(i.m_plogical->are_equal(logical)); // REQUIRE(i.m_pphysical->are_equal(physical)); - // REQUIRE(i.m_palloc->are_equal(alloc)); // // REQUIRE(i.m_pbuffer->are_equal(buffer)); // REQUIRE(i.m_rv == rv); @@ -238,7 +226,6 @@ TEST_CASE("TensorInput") { // REQUIRE(i.m_psparsity == nullptr); // REQUIRE(i.m_plogical->are_equal(logical)); // REQUIRE(i.m_pphysical->are_equal(physical)); - // REQUIRE(i.m_palloc->are_equal(alloc)); // // REQUIRE(i.m_pbuffer->are_equal(buffer)); // REQUIRE(i.m_pbuffer.get() == buffer_address); // REQUIRE(i.m_rv == rv); @@ -253,7 +240,6 @@ TEST_CASE("TensorInput") { REQUIRE(i.m_psparsity == nullptr); REQUIRE(i.m_plogical == nullptr); REQUIRE(i.m_pphysical == nullptr); - REQUIRE(i.m_palloc == nullptr); REQUIRE(i.m_pbuffer == nullptr); REQUIRE(i.m_rv == rv); } From 3f1964c6b980cd28243c84ad1d57ad7acb98bc88 Mon Sep 17 00:00:00 2001 From: "Ryan M. Richard" Date: Mon, 5 Jan 2026 11:13:24 -0600 Subject: [PATCH 05/13] compiles --- include/tensorwrapper/buffer/buffer_base.hpp | 7 +++ include/tensorwrapper/buffer/contiguous.hpp | 22 +++++++ src/tensorwrapper/buffer/contiguous.cpp | 14 +++++ .../detail_/unary_operation_visitor.hpp | 19 ++++++ .../operations/approximately_equal.cpp | 38 +----------- .../tensorwrapper/buffer/contiguous.cpp | 52 ++++++++++++++++ .../detail_/unary_operation_visitor.cpp | 59 +++++++++++++++++++ 7 files changed, 174 insertions(+), 37 deletions(-) diff --git a/include/tensorwrapper/buffer/buffer_base.hpp b/include/tensorwrapper/buffer/buffer_base.hpp index 5d16ab07..9926c1e6 100644 --- a/include/tensorwrapper/buffer/buffer_base.hpp +++ b/include/tensorwrapper/buffer/buffer_base.hpp @@ -144,6 +144,10 @@ class BufferBase : public tensorwrapper::detail_::PolymorphicBase, return !(*this == rhs); } + bool approximately_equal(const BufferBase& rhs, double tol) const { + return approximately_equal_(rhs, tol); + } + protected: // ------------------------------------------------------------------------- // -- Ctors, assignment @@ -226,6 +230,9 @@ class BufferBase : public tensorwrapper::detail_::PolymorphicBase, dsl_reference permute_assignment_(label_type this_labels, const_labeled_reference rhs) override; + virtual bool approximately_equal_(const BufferBase& rhs, + double tol) const = 0; + private: template dsl_reference binary_op_common_(FxnType&& fxn, label_type this_labels, diff --git a/include/tensorwrapper/buffer/contiguous.hpp b/include/tensorwrapper/buffer/contiguous.hpp index 63a52bcc..303ecab3 100644 --- a/include/tensorwrapper/buffer/contiguous.hpp +++ b/include/tensorwrapper/buffer/contiguous.hpp @@ -19,6 +19,7 @@ #include #include #include +#include namespace tensorwrapper::buffer { @@ -284,6 +285,9 @@ class Contiguous : public Replicated { dsl_reference scalar_multiplication_(label_type this_labels, double scalar, const_labeled_reference rhs) override; + bool approximately_equal_(const_buffer_base_reference rhs, + double tol) const override; + /// Calls add_to_stream_ on a stringstream to implement string_type to_string_() const override; @@ -334,6 +338,24 @@ class Contiguous : public Replicated { buffer_type m_buffer_; }; +template +decltype(auto) visit_contiguous_buffer(KernelType&& kernel, + buffer::Contiguous& buffer) { + using fp_types = types::floating_point_types; + auto wtf_buffer = buffer.get_mutable_data(); + return wtf::buffer::visit_contiguous_buffer_view( + std::forward(kernel), wtf_buffer); +} + +template +decltype(auto) visit_contiguous_buffer(KernelType&& kernel, + const buffer::Contiguous& buffer) { + using fp_types = types::floating_point_types; + auto wtf_buffer = buffer.get_immutable_data(); + return wtf::buffer::visit_contiguous_buffer_view( + std::forward(kernel), wtf_buffer); +} + template Contiguous make_contiguous(const shape::ShapeBase& shape) { auto smooth_view = shape.as_smooth(); diff --git a/src/tensorwrapper/buffer/contiguous.cpp b/src/tensorwrapper/buffer/contiguous.cpp index a23a59dc..1c19a28e 100644 --- a/src/tensorwrapper/buffer/contiguous.cpp +++ b/src/tensorwrapper/buffer/contiguous.cpp @@ -218,6 +218,20 @@ auto Contiguous::scalar_multiplication_(label_type this_labels, double scalar, return *this; } +bool Contiguous::approximately_equal_(const_buffer_base_reference rhs, + double tol) const { + const auto& rhs_down = downcast(rhs); + if(rank() != rhs_down.rank()) return false; + + std::string index(rank() ? "i0" : ""); + for(std::size_t i = 1; i < rank(); ++i) index += (",i" + std::to_string(i)); + Contiguous result; + result(index) = (*this)(index)-rhs_down(index); + + detail_::ApproximatelyEqualVisitor k(tol); + return buffer::visit_contiguous_buffer(k, result); +} + auto Contiguous::to_string_() const -> string_type { std::stringstream ss; add_to_stream_(ss); diff --git a/src/tensorwrapper/buffer/detail_/unary_operation_visitor.hpp b/src/tensorwrapper/buffer/detail_/unary_operation_visitor.hpp index 4a99c003..2a392a3b 100644 --- a/src/tensorwrapper/buffer/detail_/unary_operation_visitor.hpp +++ b/src/tensorwrapper/buffer/detail_/unary_operation_visitor.hpp @@ -143,4 +143,23 @@ class ScalarMultiplicationVisitor : public UnaryOperationVisitor { scalar_type m_scalar_; }; +class ApproximatelyEqualVisitor { +public: + explicit ApproximatelyEqualVisitor(double tol) : m_tol_(tol) {} + + template + bool operator()(const std::span result) { + const FloatType zero{0.0}; + const FloatType ptol = static_cast(m_tol_); + for(std::size_t i = 0; i < result.size(); ++i) { + auto diff = result[i]; + if(diff < zero) diff *= -1.0; + if(diff >= ptol) return false; + } + return true; + } + +private: + double m_tol_; +}; } // namespace tensorwrapper::buffer::detail_ diff --git a/src/tensorwrapper/operations/approximately_equal.cpp b/src/tensorwrapper/operations/approximately_equal.cpp index 9e3a08eb..e5c0b335 100644 --- a/src/tensorwrapper/operations/approximately_equal.cpp +++ b/src/tensorwrapper/operations/approximately_equal.cpp @@ -13,48 +13,12 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -#include #include -#include namespace tensorwrapper::operations { -namespace { - -struct Kernel { - Kernel(double tolerance) : tol(tolerance) {} - - template - bool operator()(const std::span result) { - const FloatType zero{0.0}; - const FloatType ptol = static_cast(tol); - for(std::size_t i = 0; i < result.size(); ++i) { - auto diff = result[i]; - if(diff < zero) diff *= -1.0; - if(diff >= ptol) return false; - } - return true; - } - - double tol; -}; - -} // namespace bool approximately_equal(const Tensor& lhs, const Tensor& rhs, double tol) { - if(lhs.rank() != rhs.rank()) return false; - - std::string index(lhs.rank() ? "i0" : ""); - for(std::size_t i = 1; i < lhs.rank(); ++i) - index += (",i" + std::to_string(i)); - Tensor result; - result(index) = lhs(index) - rhs(index); - - const auto& buffer_down = make_contiguous(result.buffer()); - Kernel k(tol); - throw std::runtime_error("Fix me!!!!"); - // return wtf::buffer::visit_contiguous_buffer( - // k, buffer_down.get_immutable_data()); + return lhs.buffer().approximately_equal(rhs.buffer(), tol); } } // namespace tensorwrapper::operations diff --git a/tests/cxx/unit_tests/tensorwrapper/buffer/contiguous.cpp b/tests/cxx/unit_tests/tensorwrapper/buffer/contiguous.cpp index 55bbf89b..415b2dd7 100644 --- a/tests/cxx/unit_tests/tensorwrapper/buffer/contiguous.cpp +++ b/tests/cxx/unit_tests/tensorwrapper/buffer/contiguous.cpp @@ -250,6 +250,58 @@ TEMPLATE_LIST_TEST_CASE("Contiguous", "", types::floating_point_types) { REQUIRE_FALSE(matrix == Contiguous(diff_data, matrix_shape)); } + SECTION("approximately_equal") { + Contiguous scalar2(std::vector{one}, scalar_shape); + Contiguous vector2(data, vector_shape); + Contiguous matrix2(data, matrix_shape); + double default_tol = 1e-16; + SECTION("different ranks") { + REQUIRE_FALSE(scalar.approximately_equal(vector, default_tol)); + REQUIRE_FALSE(scalar.approximately_equal(matrix, default_tol)); + REQUIRE_FALSE(vector.approximately_equal(scalar, default_tol)); + REQUIRE_FALSE(vector.approximately_equal(matrix, default_tol)); + REQUIRE_FALSE(matrix.approximately_equal(scalar, default_tol)); + REQUIRE_FALSE(matrix.approximately_equal(vector, default_tol)); + } + + SECTION("Same values") { + REQUIRE(scalar.approximately_equal(scalar2, default_tol)); + REQUIRE(scalar2.approximately_equal(scalar, default_tol)); + REQUIRE(vector.approximately_equal(vector2, default_tol)); + REQUIRE(vector2.approximately_equal(vector, default_tol)); + REQUIRE(matrix.approximately_equal(matrix2, default_tol)); + REQUIRE(matrix2.approximately_equal(matrix, default_tol)); + } + + SECTION("Differ by more than provided tolerance") { + TestType diff = 1e-1; + scalar2.set_elem({}, one + diff); + vector2.set_elem({0}, one + diff); + matrix2.set_elem({0, 0}, one + diff); + double tol = 1e-1; + REQUIRE_FALSE(scalar.approximately_equal(scalar2, tol)); + REQUIRE_FALSE(scalar2.approximately_equal(scalar, tol)); + REQUIRE_FALSE(vector.approximately_equal(vector2, tol)); + REQUIRE_FALSE(vector2.approximately_equal(vector, tol)); + REQUIRE_FALSE(matrix.approximately_equal(matrix2, tol)); + REQUIRE_FALSE(matrix2.approximately_equal(matrix, tol)); + } + + SECTION("Differ by less than provided tolerance") { + TestType diff = 1e-10; + double tol = 1e-10; + scalar2.set_elem({}, one + diff); + vector2.set_elem({0}, one + diff); + matrix2.set_elem({0, 0}, one + diff); + REQUIRE(scalar.approximately_equal(scalar2, tol)); + REQUIRE(scalar2.approximately_equal(scalar, tol)); + REQUIRE(vector.approximately_equal(vector2, tol)); + REQUIRE(vector2.approximately_equal(vector, tol)); + REQUIRE(matrix.approximately_equal(matrix2, tol)); + REQUIRE(matrix2.approximately_equal(matrix, tol)); + } + } + SECTION("addition_assignment_") { SECTION("scalar") { label_type labels(""); diff --git a/tests/cxx/unit_tests/tensorwrapper/buffer/detail_/unary_operation_visitor.cpp b/tests/cxx/unit_tests/tensorwrapper/buffer/detail_/unary_operation_visitor.cpp index 83d9b675..eb4b8200 100644 --- a/tests/cxx/unit_tests/tensorwrapper/buffer/detail_/unary_operation_visitor.cpp +++ b/tests/cxx/unit_tests/tensorwrapper/buffer/detail_/unary_operation_visitor.cpp @@ -149,3 +149,62 @@ TEMPLATE_LIST_TEST_CASE("ScalarMultiplicationVisitor", "[buffer][detail_]", REQUIRE(this_buffer.at(5) == TestType(6.0) * scalar); } } + +TEMPLATE_LIST_TEST_CASE("ApproximatelyEqualVisitor", "[buffer][detail_]", + types::floating_point_types) { + using VisitorType = buffer::detail_::ApproximatelyEqualVisitor; + using vector_type = std::vector; + using span_type = std::span; + using cspan_type = std::span; + double default_tol = 1e-16; + + vector_type scalar_diff{0.000001}; + vector_type scalar_same{0.0}; + vector_type vector_diff{0.000001, -0.000001}; + vector_type vector_same{0.0, 0.0}; + + span_type scalar_diff_span(scalar_diff.data(), scalar_diff.size()); + cspan_type cscalar_diff_span(scalar_diff.data(), scalar_diff.size()); + span_type scalar_same_span(scalar_same.data(), scalar_same.size()); + cspan_type cscalar_same_span(scalar_same.data(), scalar_same.size()); + span_type vector_diff_span(vector_diff.data(), vector_diff.size()); + cspan_type cvector_diff_span(vector_diff.data(), vector_diff.size()); + span_type vector_same_span(vector_same.data(), vector_same.size()); + cspan_type cvector_same_span(vector_same.data(), vector_same.size()); + + SECTION("Differ by more than default tolerance") { + VisitorType v(default_tol); + REQUIRE_FALSE(v(scalar_diff_span)); + REQUIRE_FALSE(v(cscalar_diff_span)); + REQUIRE_FALSE(v(vector_diff_span)); + REQUIRE_FALSE(v(cvector_diff_span)); + } + + SECTION("Differ by less than default tolerance") { + VisitorType v(default_tol); + REQUIRE(v(scalar_same_span)); + REQUIRE(v(cscalar_same_span)); + REQUIRE(v(vector_same_span)); + REQUIRE(v(cvector_same_span)); + } + + SECTION("Differ by more than provided tolerance") { + VisitorType v(1e-8); + REQUIRE_FALSE(v(scalar_diff_span)); + REQUIRE_FALSE(v(cscalar_diff_span)); + REQUIRE_FALSE(v(vector_diff_span)); + REQUIRE_FALSE(v(cvector_diff_span)); + } + + SECTION("Differ by less than provided tolerance") { + VisitorType v(1e-1); + REQUIRE(v(scalar_diff_span)); + REQUIRE(v(cscalar_diff_span)); + REQUIRE(v(vector_diff_span)); + REQUIRE(v(cvector_diff_span)); + REQUIRE(v(scalar_same_span)); + REQUIRE(v(cscalar_same_span)); + REQUIRE(v(vector_same_span)); + REQUIRE(v(cvector_same_span)); + } +} From ada28eb8686582d159d4298206c0af89696fc43d Mon Sep 17 00:00:00 2001 From: "Ryan M. Richard" Date: Mon, 5 Jan 2026 14:22:16 -0600 Subject: [PATCH 06/13] can now make FloatBuffer directly from Float objects --- include/tensorwrapper/buffer/contiguous.hpp | 2 ++ src/tensorwrapper/buffer/contiguous.cpp | 6 ++++ .../detail_/unary_operation_visitor.hpp | 13 +++++++++ src/tensorwrapper/operations/norm.cpp | 28 +++---------------- 4 files changed, 25 insertions(+), 24 deletions(-) diff --git a/include/tensorwrapper/buffer/contiguous.hpp b/include/tensorwrapper/buffer/contiguous.hpp index 303ecab3..9c0921dc 100644 --- a/include/tensorwrapper/buffer/contiguous.hpp +++ b/include/tensorwrapper/buffer/contiguous.hpp @@ -241,6 +241,8 @@ class Contiguous : public Replicated { */ const_buffer_view get_immutable_data() const; + value_type infinity_norm() const; + // ------------------------------------------------------------------------- // -- Utility Methods // ------------------------------------------------------------------------- diff --git a/src/tensorwrapper/buffer/contiguous.cpp b/src/tensorwrapper/buffer/contiguous.cpp index 1c19a28e..8714cf66 100644 --- a/src/tensorwrapper/buffer/contiguous.cpp +++ b/src/tensorwrapper/buffer/contiguous.cpp @@ -79,6 +79,12 @@ auto Contiguous::get_immutable_data() const -> const_buffer_view { return m_buffer_; } +auto Contiguous::infinity_norm() const -> value_type { + detail_::InfinityNormVisitor visitor; + auto v = wtf::buffer::visit_contiguous_buffer(visitor, m_buffer_); + return value_type{v}; +} + // ----------------------------------------------------------------------------- // -- Utility Methods // ----------------------------------------------------------------------------- diff --git a/src/tensorwrapper/buffer/detail_/unary_operation_visitor.hpp b/src/tensorwrapper/buffer/detail_/unary_operation_visitor.hpp index 2a392a3b..c3970d9d 100644 --- a/src/tensorwrapper/buffer/detail_/unary_operation_visitor.hpp +++ b/src/tensorwrapper/buffer/detail_/unary_operation_visitor.hpp @@ -162,4 +162,17 @@ class ApproximatelyEqualVisitor { private: double m_tol_; }; + +struct InfinityNormVisitor { + template + auto operator()(const std::span buffer) { + FloatType max_element{0.0}; + for(std::size_t i = 0; i < buffer.size(); ++i) { + auto elem = types::fabs(buffer[i]); + if(elem > max_element) max_element = elem; + } + return max_element; + } +}; + } // namespace tensorwrapper::buffer::detail_ diff --git a/src/tensorwrapper/operations/norm.cpp b/src/tensorwrapper/operations/norm.cpp index 8c26631e..705b4ada 100644 --- a/src/tensorwrapper/operations/norm.cpp +++ b/src/tensorwrapper/operations/norm.cpp @@ -14,34 +14,14 @@ * limitations under the License. */ #include -#include -#include -#include +#include namespace tensorwrapper::operations { -namespace { -struct InfinityKernel { - template - auto operator()(const std::span buffer) { - FloatType max_element{0.0}; - for(std::size_t i = 0; i < buffer.size(); ++i) { - auto elem = types::fabs(buffer[i]); - if(elem > max_element) max_element = elem; - } - shape::Smooth s{}; - std::vector data{max_element}; - auto pbuffer = std::make_unique(data, s); - return Tensor(s, std::move(pbuffer)); - } -}; -} // namespace Tensor infinity_norm(const Tensor& t) { - // const auto& buffer_down = make_contiguous(t.buffer()); - // InfinityKernel kernel; - throw std::runtime_error("Fix me!!!!"); - // return wtf::buffer::visit_contiguous_buffer( - // kernel, buffer_down.get_immutable_data()); + const auto& buffer_down = buffer::make_contiguous(t.buffer()); + auto max_value = buffer_down.infinity_norm(); + throw std::runtime_error("Fix Me!!!!"); } } // namespace tensorwrapper::operations From 8ec99043fc5db37d3702c4c47385c9428566645b Mon Sep 17 00:00:00 2001 From: "Ryan M. Richard" Date: Tue, 6 Jan 2026 10:21:50 -0600 Subject: [PATCH 07/13] norm compiles again --- src/tensorwrapper/buffer/contiguous.cpp | 6 ++++-- .../buffer/detail_/unary_operation_visitor.hpp | 4 ++-- src/tensorwrapper/operations/norm.cpp | 11 ++++++++++- .../unit_tests/tensorwrapper/buffer/contiguous.cpp | 7 +++++++ 4 files changed, 23 insertions(+), 5 deletions(-) diff --git a/src/tensorwrapper/buffer/contiguous.cpp b/src/tensorwrapper/buffer/contiguous.cpp index 8714cf66..830bf099 100644 --- a/src/tensorwrapper/buffer/contiguous.cpp +++ b/src/tensorwrapper/buffer/contiguous.cpp @@ -80,9 +80,11 @@ auto Contiguous::get_immutable_data() const -> const_buffer_view { } auto Contiguous::infinity_norm() const -> value_type { + if(m_buffer_.size() == 0) + throw std::runtime_error( + "Cannot compute the infinity norm of an empty tensor."); detail_::InfinityNormVisitor visitor; - auto v = wtf::buffer::visit_contiguous_buffer(visitor, m_buffer_); - return value_type{v}; + return wtf::buffer::visit_contiguous_buffer(visitor, m_buffer_); } // ----------------------------------------------------------------------------- diff --git a/src/tensorwrapper/buffer/detail_/unary_operation_visitor.hpp b/src/tensorwrapper/buffer/detail_/unary_operation_visitor.hpp index c3970d9d..34ac9d6f 100644 --- a/src/tensorwrapper/buffer/detail_/unary_operation_visitor.hpp +++ b/src/tensorwrapper/buffer/detail_/unary_operation_visitor.hpp @@ -166,12 +166,12 @@ class ApproximatelyEqualVisitor { struct InfinityNormVisitor { template auto operator()(const std::span buffer) { - FloatType max_element{0.0}; + std::decay_t max_element{0.0}; for(std::size_t i = 0; i < buffer.size(); ++i) { auto elem = types::fabs(buffer[i]); if(elem > max_element) max_element = elem; } - return max_element; + return wtf::fp::make_float(max_element); } }; diff --git a/src/tensorwrapper/operations/norm.cpp b/src/tensorwrapper/operations/norm.cpp index 705b4ada..47eba640 100644 --- a/src/tensorwrapper/operations/norm.cpp +++ b/src/tensorwrapper/operations/norm.cpp @@ -15,13 +15,22 @@ */ #include #include +#include +#include namespace tensorwrapper::operations { Tensor infinity_norm(const Tensor& t) { const auto& buffer_down = buffer::make_contiguous(t.buffer()); auto max_value = buffer_down.infinity_norm(); - throw std::runtime_error("Fix Me!!!!"); + std::initializer_list il{max_value}; + using fp_types = types::floating_point_types; + auto wtf_buffer = wtf::buffer::make_float_buffer(il); + shape::Smooth shape; + buffer::Contiguous buffer(std::move(wtf_buffer), shape); + layout::Physical playout(shape); + layout::Logical llayout(shape); + return Tensor(std::move(playout), std::move(llayout), std::move(buffer)); } } // namespace tensorwrapper::operations diff --git a/tests/cxx/unit_tests/tensorwrapper/buffer/contiguous.cpp b/tests/cxx/unit_tests/tensorwrapper/buffer/contiguous.cpp index 415b2dd7..3fc6cfd5 100644 --- a/tests/cxx/unit_tests/tensorwrapper/buffer/contiguous.cpp +++ b/tests/cxx/unit_tests/tensorwrapper/buffer/contiguous.cpp @@ -220,6 +220,13 @@ TEMPLATE_LIST_TEST_CASE("Contiguous", "", types::floating_point_types) { REQUIRE(matrix.get_elem({1, 0}) == one); } + SECTION("infinity_norm") { + REQUIRE_THROWS_AS(defaulted.infinity_norm(), std::runtime_error); + REQUIRE(scalar.infinity_norm() == one); + REQUIRE(vector.infinity_norm() == four); + REQUIRE(matrix.infinity_norm() == four); + } + SECTION("operator==") { // Same object REQUIRE(defaulted == defaulted); From 41e36fe72494692c32b876c7290686ad48b3f0a2 Mon Sep 17 00:00:00 2001 From: "Ryan M. Richard" Date: Tue, 6 Jan 2026 15:52:36 -0600 Subject: [PATCH 08/13] fixes to_json --- src/tensorwrapper/utilities/to_json.cpp | 3 +- .../operations/approximately_equal.cpp | 36 +++++++++---------- 2 files changed, 19 insertions(+), 20 deletions(-) diff --git a/src/tensorwrapper/utilities/to_json.cpp b/src/tensorwrapper/utilities/to_json.cpp index dd01fb39..22f7b7db 100644 --- a/src/tensorwrapper/utilities/to_json.cpp +++ b/src/tensorwrapper/utilities/to_json.cpp @@ -28,8 +28,7 @@ void to_json_(std::ostream& os, const buffer_type& t, offset_vector index) { const auto& shape = t.layout().shape().as_smooth(); auto rank = index.size(); if(rank == t.rank()) { - throw std::runtime_error("Fix me!"); - // os << t.get_elem(index); + os << t.get_elem(index).to_string(); return; } else { auto n_elements = shape.extent(rank); diff --git a/tests/cxx/unit_tests/tensorwrapper/operations/approximately_equal.cpp b/tests/cxx/unit_tests/tensorwrapper/operations/approximately_equal.cpp index d73a1e4a..d874031b 100644 --- a/tests/cxx/unit_tests/tensorwrapper/operations/approximately_equal.cpp +++ b/tests/cxx/unit_tests/tensorwrapper/operations/approximately_equal.cpp @@ -32,16 +32,16 @@ using namespace operations; TEMPLATE_LIST_TEST_CASE("approximately_equal", "", types::floating_point_types) { auto pscalar = testing::eigen_scalar(); - pscalar->set_elem({}, 42.0); + pscalar->set_elem({}, TestType{42.0}); auto pvector = testing::eigen_vector(2); - pvector->set_elem({0}, 1.23); - pvector->set_elem({1}, 2.34); + pvector->set_elem({0}, TestType{1.23}); + pvector->set_elem({1}, TestType{2.34}); auto pscalar2 = testing::eigen_scalar(); - pscalar2->set_elem({}, 42.0); + pscalar2->set_elem({}, TestType{42.0}); auto pvector2 = testing::eigen_vector(2); - pvector2->set_elem({0}, 1.23); - pvector2->set_elem({1}, 2.34); + pvector2->set_elem({0}, TestType{1.23}); + pvector2->set_elem({1}, TestType{2.34}); shape::Smooth s0{}; shape::Smooth s1{2}; @@ -65,9 +65,9 @@ TEMPLATE_LIST_TEST_CASE("approximately_equal", "", } SECTION("Differ by more than default tolerance") { - double value = 1e-1; - pscalar2->set_elem({}, 42.0 + value); - pvector2->set_elem({0}, 1.23 + value); + TestType value = 1e-1; + pscalar2->set_elem({}, TestType{42.0} + value); + pvector2->set_elem({0}, TestType{1.23} + value); Tensor scalar2(s0, std::move(pscalar2)); Tensor vector2(s1, std::move(pvector2)); REQUIRE_FALSE(approximately_equal(scalar, scalar2)); @@ -77,9 +77,9 @@ TEMPLATE_LIST_TEST_CASE("approximately_equal", "", } SECTION("Differ by less than default tolerance") { - double value = 1e-17; - pscalar2->set_elem({}, 42.0 + value); - pvector2->set_elem({0}, 1.23 + value); + TestType value = 1e-17; + pscalar2->set_elem({}, TestType{42.0} + value); + pvector2->set_elem({0}, TestType{1.23} + value); Tensor scalar2(s0, std::move(pscalar2)); Tensor vector2(s1, std::move(pvector2)); REQUIRE(approximately_equal(scalar, scalar2)); @@ -89,9 +89,9 @@ TEMPLATE_LIST_TEST_CASE("approximately_equal", "", } SECTION("Differ by more than provided tolerance") { - float value = 1e-1; - pscalar2->set_elem({}, 43.0); - pvector2->set_elem({0}, 2.23); + double value = 1e-1; + pscalar2->set_elem({}, TestType{43.0}); + pvector2->set_elem({0}, TestType{2.23}); Tensor scalar2(s0, std::move(pscalar2)); Tensor vector2(s1, std::move(pvector2)); REQUIRE_FALSE(approximately_equal(scalar, scalar2, value)); @@ -101,9 +101,9 @@ TEMPLATE_LIST_TEST_CASE("approximately_equal", "", } SECTION("Differ by less than provided tolerance") { - double value = 1e-10; - pscalar2->set_elem({}, 42.0 + value); - pvector2->set_elem({0}, 1.23 + value); + TestType value = 1e-10; + pscalar2->set_elem({}, TestType{42.0} + value); + pvector2->set_elem({0}, TestType{1.23} + value); Tensor scalar2(s0, std::move(pscalar2)); Tensor vector2(s1, std::move(pvector2)); REQUIRE(approximately_equal(scalar, scalar2, 1e-1)); From bfa0703c436c3069a2bdd4b8e96056144446cc59 Mon Sep 17 00:00:00 2001 From: "Ryan M. Richard" Date: Wed, 7 Jan 2026 12:52:42 -0600 Subject: [PATCH 09/13] block_diagonal_works --- src/tensorwrapper/buffer/contiguous.cpp | 1 - .../utilities/block_diagonal_matrix.cpp | 129 ++++++++++------ .../tensorwrapper/buffer/contiguous.cpp | 74 ++++----- .../unit_tests/tensorwrapper/diis/diis.cpp | 18 ++- .../cxx/unit_tests/tensorwrapper/dsl/dsl.cpp | 6 +- .../tensorwrapper/dsl/pairwise_parser.cpp | 7 +- .../operations/approximately_equal.cpp | 141 +++++++++--------- .../tensorwrapper/operations/norm.cpp | 1 + .../tensor/detail_/tensor_input.cpp | 47 +++--- .../tensorwrapper/testing/eigen_buffers.hpp | 8 +- 10 files changed, 231 insertions(+), 201 deletions(-) diff --git a/src/tensorwrapper/buffer/contiguous.cpp b/src/tensorwrapper/buffer/contiguous.cpp index 830bf099..e13762fd 100644 --- a/src/tensorwrapper/buffer/contiguous.cpp +++ b/src/tensorwrapper/buffer/contiguous.cpp @@ -235,7 +235,6 @@ bool Contiguous::approximately_equal_(const_buffer_base_reference rhs, for(std::size_t i = 1; i < rank(); ++i) index += (",i" + std::to_string(i)); Contiguous result; result(index) = (*this)(index)-rhs_down(index); - detail_::ApproximatelyEqualVisitor k(tol); return buffer::visit_contiguous_buffer(k, result); } diff --git a/src/tensorwrapper/utilities/block_diagonal_matrix.cpp b/src/tensorwrapper/utilities/block_diagonal_matrix.cpp index 3f33cd36..9799adb7 100644 --- a/src/tensorwrapper/utilities/block_diagonal_matrix.cpp +++ b/src/tensorwrapper/utilities/block_diagonal_matrix.cpp @@ -22,59 +22,90 @@ namespace tensorwrapper::utilities { namespace { -// struct BlockDiagonalMatrixKernel { -// template -// auto run(const buffer::BufferBase& b, const std::vector& -// matrices) { - -// // All inputs must be Rank 2, square, and the same floating point -// type. -// // If so, sum their extent sizes. -// std::size_t size = 0; -// for(const auto& matrix : matrices) { - -// if(matrix.rank() != 2) -// throw std::runtime_error( -// "All inputs must be matrices (Rank == 2)"); - -// const auto& mshape = -// matrix.buffer().layout().shape().as_smooth(); if(mshape.extent(0) -// != mshape.extent(1)) -// throw std::runtime_error("All inputs must be square -// matrices"); - -// size += mshape.extent(0); -// } - -// // Allocate new buffer -// shape::Smooth oshape{size, size}; -// layout::Physical olayout(oshape); -// auto obuffer = allocator.construct(olayout, 0.0); - -// // Copy values from input into corresponding blocks -// std::size_t offset = 0; -// for(const auto& matrix : matrices) { -// const auto& mbuffer = allocator.rebind(matrix.buffer()); -// auto extent = mbuffer.layout().shape().as_smooth().extent(0); -// for(std::size_t i = 0; i < extent; ++i) { -// for(std::size_t j = 0; j < extent; ++j) { -// obuffer->set_elem({offset + i, offset + j}, -// mbuffer.get_elem({i, j})); -// } -// } -// offset += extent; -// } -// return Tensor(oshape, std::move(obuffer)); -// } -// }; +struct Initializer { + explicit Initializer(shape::Smooth shape) : m_shape(std::move(shape)) {} + + template + void operator()(const std::span) { + using clean_type = std::decay_t; + m_buffer = buffer::make_contiguous(m_shape); + } + + buffer::Contiguous m_buffer; + shape::Smooth m_shape; +}; + +struct BlockDiagonalMatrixKernel { + // Initializes assuming square matrix + BlockDiagonalMatrixKernel(buffer::Contiguous& buffer, std::size_t offset, + std::size_t extent) : + m_pbuffer(&buffer), + m_offset(offset), + m_row_extent(extent), + m_col_extent(extent) {} + + template + void operator()(const std::span matrix_i) { + for(std::size_t i = 0; i < m_row_extent; ++i) { + for(std::size_t j = 0; j < m_col_extent; ++j) { + m_pbuffer->set_elem({m_offset + i, m_offset + j}, + matrix_i[i * m_col_extent + j]); + } + } + } + + buffer::Contiguous* m_pbuffer; + + std::size_t m_offset; + + std::size_t m_row_extent; + std::size_t m_col_extent; +}; } // namespace Tensor block_diagonal_matrix(std::vector matrices) { - throw std::runtime_error("Fix me!"); - // const auto& buffer0 = matrices[0].buffer(); - // BlockDiagonalMatrixKernel kernel; - // return floating_point_dispatch(kernel, buffer0, matrices); + if(matrices.empty()) { + Tensor t; + return t; // No idea why the compiler won't let us do 'return {};' here + } + + // All inputs must be Rank 2, square, and the same floating point type. + // If so, sum their extent sizes. + std::size_t size = 0; + std::vector row_extents(matrices.size()); + for(const auto& matrix : matrices) { + if(matrix.rank() != 2) + throw std::runtime_error("All inputs must be matrices (Rank == 2)"); + + const auto& mshape = matrix.buffer().layout().shape().as_smooth(); + if(mshape.extent(0) != mshape.extent(1)) + throw std::runtime_error("All inputs must be square matrices"); + + row_extents.push_back(mshape.extent(0)); + size += row_extents.back(); + } + + shape::Smooth shape{size, size}; + layout::Physical olayout(shape); + + Initializer init_kernel(shape); + const auto& buffer0 = buffer::make_contiguous(matrices.front().buffer()); + buffer::visit_contiguous_buffer(init_kernel, buffer0); + + buffer::Contiguous buffer = std::move(init_kernel.m_buffer); + + std::size_t offset = 0; + + for(const auto& matrix : matrices) { + const auto& buffer_i = buffer::make_contiguous(matrix.buffer()); + std::size_t row_extent = buffer_i.shape().extent(0); + BlockDiagonalMatrixKernel kernel(buffer, offset, row_extent); + buffer::visit_contiguous_buffer(kernel, buffer_i); + offset += row_extent; + } + layout::Logical llayout(shape); + return Tensor(std::move(buffer), std::move(llayout), std::move(olayout)); } } // namespace tensorwrapper::utilities diff --git a/tests/cxx/unit_tests/tensorwrapper/buffer/contiguous.cpp b/tests/cxx/unit_tests/tensorwrapper/buffer/contiguous.cpp index 3fc6cfd5..0d442d40 100644 --- a/tests/cxx/unit_tests/tensorwrapper/buffer/contiguous.cpp +++ b/tests/cxx/unit_tests/tensorwrapper/buffer/contiguous.cpp @@ -270,43 +270,43 @@ TEMPLATE_LIST_TEST_CASE("Contiguous", "", types::floating_point_types) { REQUIRE_FALSE(matrix.approximately_equal(scalar, default_tol)); REQUIRE_FALSE(matrix.approximately_equal(vector, default_tol)); } - - SECTION("Same values") { - REQUIRE(scalar.approximately_equal(scalar2, default_tol)); - REQUIRE(scalar2.approximately_equal(scalar, default_tol)); - REQUIRE(vector.approximately_equal(vector2, default_tol)); - REQUIRE(vector2.approximately_equal(vector, default_tol)); - REQUIRE(matrix.approximately_equal(matrix2, default_tol)); - REQUIRE(matrix2.approximately_equal(matrix, default_tol)); - } - - SECTION("Differ by more than provided tolerance") { - TestType diff = 1e-1; - scalar2.set_elem({}, one + diff); - vector2.set_elem({0}, one + diff); - matrix2.set_elem({0, 0}, one + diff); - double tol = 1e-1; - REQUIRE_FALSE(scalar.approximately_equal(scalar2, tol)); - REQUIRE_FALSE(scalar2.approximately_equal(scalar, tol)); - REQUIRE_FALSE(vector.approximately_equal(vector2, tol)); - REQUIRE_FALSE(vector2.approximately_equal(vector, tol)); - REQUIRE_FALSE(matrix.approximately_equal(matrix2, tol)); - REQUIRE_FALSE(matrix2.approximately_equal(matrix, tol)); - } - - SECTION("Differ by less than provided tolerance") { - TestType diff = 1e-10; - double tol = 1e-10; - scalar2.set_elem({}, one + diff); - vector2.set_elem({0}, one + diff); - matrix2.set_elem({0, 0}, one + diff); - REQUIRE(scalar.approximately_equal(scalar2, tol)); - REQUIRE(scalar2.approximately_equal(scalar, tol)); - REQUIRE(vector.approximately_equal(vector2, tol)); - REQUIRE(vector2.approximately_equal(vector, tol)); - REQUIRE(matrix.approximately_equal(matrix2, tol)); - REQUIRE(matrix2.approximately_equal(matrix, tol)); - } + throw std::runtime_error("Test not implemented."); + // SECTION("Same values") { + // REQUIRE(scalar.approximately_equal(scalar2, default_tol)); + // REQUIRE(scalar2.approximately_equal(scalar, default_tol)); + // REQUIRE(vector.approximately_equal(vector2, default_tol)); + // REQUIRE(vector2.approximately_equal(vector, default_tol)); + // REQUIRE(matrix.approximately_equal(matrix2, default_tol)); + // REQUIRE(matrix2.approximately_equal(matrix, default_tol)); + // } + + // SECTION("Differ by more than provided tolerance") { + // TestType diff = 1e-1; + // scalar2.set_elem({}, one + diff); + // vector2.set_elem({0}, one + diff); + // matrix2.set_elem({0, 0}, one + diff); + // double tol = 1e-1; + // REQUIRE_FALSE(scalar.approximately_equal(scalar2, tol)); + // REQUIRE_FALSE(scalar2.approximately_equal(scalar, tol)); + // REQUIRE_FALSE(vector.approximately_equal(vector2, tol)); + // REQUIRE_FALSE(vector2.approximately_equal(vector, tol)); + // REQUIRE_FALSE(matrix.approximately_equal(matrix2, tol)); + // REQUIRE_FALSE(matrix2.approximately_equal(matrix, tol)); + // } + + // SECTION("Differ by less than provided tolerance") { + // TestType diff = 1e-10; + // double tol = 1e-10; + // scalar2.set_elem({}, one + diff); + // vector2.set_elem({0}, one + diff); + // matrix2.set_elem({0, 0}, one + diff); + // REQUIRE(scalar.approximately_equal(scalar2, tol)); + // REQUIRE(scalar2.approximately_equal(scalar, tol)); + // REQUIRE(vector.approximately_equal(vector2, tol)); + // REQUIRE(vector2.approximately_equal(vector, tol)); + // REQUIRE(matrix.approximately_equal(matrix2, tol)); + // REQUIRE(matrix2.approximately_equal(matrix, tol)); + // } } SECTION("addition_assignment_") { diff --git a/tests/cxx/unit_tests/tensorwrapper/diis/diis.cpp b/tests/cxx/unit_tests/tensorwrapper/diis/diis.cpp index adb171f9..2a5c8071 100644 --- a/tests/cxx/unit_tests/tensorwrapper/diis/diis.cpp +++ b/tests/cxx/unit_tests/tensorwrapper/diis/diis.cpp @@ -18,12 +18,11 @@ #include #include -using diis_type = tensorwrapper::diis::DIIS; -using tensor_type = tensorwrapper::Tensor; -using elements_type = std::vector; +using diis_type = tensorwrapper::diis::DIIS; +using tensor_type = tensorwrapper::Tensor; template -tensor_type make_tensor(elements_type elems) { +tensor_type make_tensor(std::vector elems) { auto pbuffer = tensorwrapper::testing::eigen_matrix(2, 2); pbuffer->set_elem({0, 0}, elems[0]); pbuffer->set_elem({0, 1}, elems[1]); @@ -72,10 +71,13 @@ TEMPLATE_LIST_TEST_CASE("DIIS", "", SECTION("extrapolate") { // Outputs - tensor_type corr1 = make_tensor({1.0, 2.0, 3.0, 4.0}); - tensor_type corr2 = make_tensor({12.0, 8.6, 14.0, 10.6}); - tensor_type corr3 = make_tensor( - {15.35294118, 14.35294118, 11.11764706, 10.11764706}); + std::vector v0{1.0, 2.0, 3.0, 4.0}; + std::vector v1{12.0, 8.6, 14.0, 10.6}; + std::vector v2{15.35294118, 14.35294118, 11.11764706, + 10.11764706}; + tensor_type corr1 = make_tensor(v0); + tensor_type corr2 = make_tensor(v1); + tensor_type corr3 = make_tensor(v2); // Call extrapolate enough to require removing an old value auto diis = diis_type(2); diff --git a/tests/cxx/unit_tests/tensorwrapper/dsl/dsl.cpp b/tests/cxx/unit_tests/tensorwrapper/dsl/dsl.cpp index 0a740969..7f9b27b6 100644 --- a/tests/cxx/unit_tests/tensorwrapper/dsl/dsl.cpp +++ b/tests/cxx/unit_tests/tensorwrapper/dsl/dsl.cpp @@ -94,9 +94,9 @@ TEST_CASE("DSLr : buffer::Eigen") { auto& scalar2 = *pscalar2; auto& corr = *pcorr; - scalar0.set_elem({}, 1.0); - scalar1.set_elem({}, 2.0); - scalar2.set_elem({}, 3.0); + scalar0.set_elem({}, float{1.0}); + scalar1.set_elem({}, float{2.0}); + scalar2.set_elem({}, float{3.0}); SECTION("assignment") { SECTION("scalar") { diff --git a/tests/cxx/unit_tests/tensorwrapper/dsl/pairwise_parser.cpp b/tests/cxx/unit_tests/tensorwrapper/dsl/pairwise_parser.cpp index 43eb907c..887f64c1 100644 --- a/tests/cxx/unit_tests/tensorwrapper/dsl/pairwise_parser.cpp +++ b/tests/cxx/unit_tests/tensorwrapper/dsl/pairwise_parser.cpp @@ -134,9 +134,10 @@ TEST_CASE("PairwiseParser : buffer::Eigen") { auto& scalar2 = *pscalar2; auto& corr = *pcorr; - scalar0.set_elem({}, 1.0); - scalar1.set_elem({}, 2.0); - scalar2.set_elem({}, 3.0); + float one{1.0f}, two{2.0f}, three{3.0f}; + scalar0.set_elem({}, one); + scalar1.set_elem({}, two); + scalar2.set_elem({}, three); dsl::PairwiseParser p; diff --git a/tests/cxx/unit_tests/tensorwrapper/operations/approximately_equal.cpp b/tests/cxx/unit_tests/tensorwrapper/operations/approximately_equal.cpp index d874031b..3a80818e 100644 --- a/tests/cxx/unit_tests/tensorwrapper/operations/approximately_equal.cpp +++ b/tests/cxx/unit_tests/tensorwrapper/operations/approximately_equal.cpp @@ -31,84 +31,85 @@ using namespace operations; TEMPLATE_LIST_TEST_CASE("approximately_equal", "", types::floating_point_types) { - auto pscalar = testing::eigen_scalar(); - pscalar->set_elem({}, TestType{42.0}); - auto pvector = testing::eigen_vector(2); - pvector->set_elem({0}, TestType{1.23}); - pvector->set_elem({1}, TestType{2.34}); + throw std::runtime_error("Test not implemented."); + // auto pscalar = testing::eigen_scalar(); + // pscalar->set_elem({}, TestType{42.0}); + // auto pvector = testing::eigen_vector(2); + // pvector->set_elem({0}, TestType{1.23}); + // pvector->set_elem({1}, TestType{2.34}); - auto pscalar2 = testing::eigen_scalar(); - pscalar2->set_elem({}, TestType{42.0}); - auto pvector2 = testing::eigen_vector(2); - pvector2->set_elem({0}, TestType{1.23}); - pvector2->set_elem({1}, TestType{2.34}); + // auto pscalar2 = testing::eigen_scalar(); + // pscalar2->set_elem({}, TestType{42.0}); + // auto pvector2 = testing::eigen_vector(2); + // pvector2->set_elem({0}, TestType{1.23}); + // pvector2->set_elem({1}, TestType{2.34}); - shape::Smooth s0{}; - shape::Smooth s1{2}; + // shape::Smooth s0{}; + // shape::Smooth s1{2}; - Tensor scalar(s0, std::move(pscalar)); - Tensor vector(s1, std::move(pvector)); + // Tensor scalar(s0, std::move(pscalar)); + // Tensor vector(s1, std::move(pvector)); - SECTION("different ranks") { - REQUIRE_FALSE(approximately_equal(scalar, vector)); - REQUIRE_FALSE(approximately_equal(vector, scalar)); - } + // SECTION("different ranks") { + // REQUIRE_FALSE(approximately_equal(scalar, vector)); + // REQUIRE_FALSE(approximately_equal(vector, scalar)); + // } - SECTION("Same values") { - Tensor scalar2(s0, std::move(pscalar2)); - Tensor vector2(s1, std::move(pvector2)); + // SECTION("Same values") { + // Tensor scalar2(s0, std::move(pscalar2)); + // Tensor vector2(s1, std::move(pvector2)); - REQUIRE(approximately_equal(scalar, scalar2)); - REQUIRE(approximately_equal(scalar2, scalar)); - REQUIRE(approximately_equal(vector, vector2)); - REQUIRE(approximately_equal(vector2, vector)); - } + // REQUIRE(approximately_equal(scalar, scalar2)); + // REQUIRE(approximately_equal(scalar2, scalar)); + // REQUIRE(approximately_equal(vector, vector2)); + // REQUIRE(approximately_equal(vector2, vector)); + // } - SECTION("Differ by more than default tolerance") { - TestType value = 1e-1; - pscalar2->set_elem({}, TestType{42.0} + value); - pvector2->set_elem({0}, TestType{1.23} + value); - Tensor scalar2(s0, std::move(pscalar2)); - Tensor vector2(s1, std::move(pvector2)); - REQUIRE_FALSE(approximately_equal(scalar, scalar2)); - REQUIRE_FALSE(approximately_equal(scalar2, scalar)); - REQUIRE_FALSE(approximately_equal(vector, vector2)); - REQUIRE_FALSE(approximately_equal(vector2, vector)); - } + // SECTION("Differ by more than default tolerance") { + // TestType value = 1e-1; + // pscalar2->set_elem({}, TestType{42.0} + value); + // pvector2->set_elem({0}, TestType{1.23} + value); + // Tensor scalar2(s0, std::move(pscalar2)); + // Tensor vector2(s1, std::move(pvector2)); + // REQUIRE_FALSE(approximately_equal(scalar, scalar2)); + // REQUIRE_FALSE(approximately_equal(scalar2, scalar)); + // REQUIRE_FALSE(approximately_equal(vector, vector2)); + // REQUIRE_FALSE(approximately_equal(vector2, vector)); + // } - SECTION("Differ by less than default tolerance") { - TestType value = 1e-17; - pscalar2->set_elem({}, TestType{42.0} + value); - pvector2->set_elem({0}, TestType{1.23} + value); - Tensor scalar2(s0, std::move(pscalar2)); - Tensor vector2(s1, std::move(pvector2)); - REQUIRE(approximately_equal(scalar, scalar2)); - REQUIRE(approximately_equal(scalar2, scalar)); - REQUIRE(approximately_equal(vector, vector2)); - REQUIRE(approximately_equal(vector2, vector)); - } + // SECTION("Differ by less than default tolerance") { + // TestType value = 1e-17; + // pscalar2->set_elem({}, TestType{42.0} + value); + // pvector2->set_elem({0}, TestType{1.23} + value); + // Tensor scalar2(s0, std::move(pscalar2)); + // Tensor vector2(s1, std::move(pvector2)); + // REQUIRE(approximately_equal(scalar, scalar2)); + // REQUIRE(approximately_equal(scalar2, scalar)); + // REQUIRE(approximately_equal(vector, vector2)); + // REQUIRE(approximately_equal(vector2, vector)); + // } - SECTION("Differ by more than provided tolerance") { - double value = 1e-1; - pscalar2->set_elem({}, TestType{43.0}); - pvector2->set_elem({0}, TestType{2.23}); - Tensor scalar2(s0, std::move(pscalar2)); - Tensor vector2(s1, std::move(pvector2)); - REQUIRE_FALSE(approximately_equal(scalar, scalar2, value)); - REQUIRE_FALSE(approximately_equal(scalar2, scalar, value)); - REQUIRE_FALSE(approximately_equal(vector, vector2, value)); - REQUIRE_FALSE(approximately_equal(vector2, vector, value)); - } + // SECTION("Differ by more than provided tolerance") { + // double value = 1e-1; + // pscalar2->set_elem({}, TestType{43.0}); + // pvector2->set_elem({0}, TestType{2.23}); + // Tensor scalar2(s0, std::move(pscalar2)); + // Tensor vector2(s1, std::move(pvector2)); + // REQUIRE_FALSE(approximately_equal(scalar, scalar2, value)); + // REQUIRE_FALSE(approximately_equal(scalar2, scalar, value)); + // REQUIRE_FALSE(approximately_equal(vector, vector2, value)); + // REQUIRE_FALSE(approximately_equal(vector2, vector, value)); + // } - SECTION("Differ by less than provided tolerance") { - TestType value = 1e-10; - pscalar2->set_elem({}, TestType{42.0} + value); - pvector2->set_elem({0}, TestType{1.23} + value); - Tensor scalar2(s0, std::move(pscalar2)); - Tensor vector2(s1, std::move(pvector2)); - REQUIRE(approximately_equal(scalar, scalar2, 1e-1)); - REQUIRE(approximately_equal(scalar2, scalar, 1e-1)); - REQUIRE(approximately_equal(vector, vector2, 1e-1)); - REQUIRE(approximately_equal(vector2, vector, 1e-1)); - } + // SECTION("Differ by less than provided tolerance") { + // TestType value = 1e-10; + // pscalar2->set_elem({}, TestType{42.0} + value); + // pvector2->set_elem({0}, TestType{1.23} + value); + // Tensor scalar2(s0, std::move(pscalar2)); + // Tensor vector2(s1, std::move(pvector2)); + // REQUIRE(approximately_equal(scalar, scalar2, 1e-1)); + // REQUIRE(approximately_equal(scalar2, scalar, 1e-1)); + // REQUIRE(approximately_equal(vector, vector2, 1e-1)); + // REQUIRE(approximately_equal(vector2, vector, 1e-1)); + // } } diff --git a/tests/cxx/unit_tests/tensorwrapper/operations/norm.cpp b/tests/cxx/unit_tests/tensorwrapper/operations/norm.cpp index 26b51208..c8c127fc 100644 --- a/tests/cxx/unit_tests/tensorwrapper/operations/norm.cpp +++ b/tests/cxx/unit_tests/tensorwrapper/operations/norm.cpp @@ -56,6 +56,7 @@ TEMPLATE_LIST_TEST_CASE("infinity_norm", "", types::floating_point_types) { SECTION("rank 4 tensor") { shape::Smooth s{2, 2, 2, 2}; Tensor t(s, testing::eigen_tensor4()); + std::cout << t << std::endl; Tensor corr(shape::Smooth{}, testing::eigen_scalar(16)); auto norm = infinity_norm(t); REQUIRE(approximately_equal(corr, norm)); diff --git a/tests/cxx/unit_tests/tensorwrapper/tensor/detail_/tensor_input.cpp b/tests/cxx/unit_tests/tensorwrapper/tensor/detail_/tensor_input.cpp index 8fc678a1..93390585 100644 --- a/tests/cxx/unit_tests/tensorwrapper/tensor/detail_/tensor_input.cpp +++ b/tests/cxx/unit_tests/tensorwrapper/tensor/detail_/tensor_input.cpp @@ -202,35 +202,30 @@ TEST_CASE("TensorInput") { } SECTION("Buffer (by value)") { - throw std::runtime_error("Fix me!"); - // detail_::TensorInput i(physical, alloc, logical, buffer); - // REQUIRE(i.m_pshape == nullptr); - // REQUIRE(i.m_psymmetry == nullptr); - // REQUIRE(i.m_psparsity == nullptr); - // REQUIRE(i.m_plogical->are_equal(logical)); - // REQUIRE(i.m_pphysical->are_equal(physical)); - // // REQUIRE(i.m_pbuffer->are_equal(buffer)); - // REQUIRE(i.m_rv == rv); - - // REQUIRE(i.has_buffer()); + detail_::TensorInput i(physical, logical, buffer); + REQUIRE(i.m_pshape == nullptr); + REQUIRE(i.m_psymmetry == nullptr); + REQUIRE(i.m_psparsity == nullptr); + REQUIRE(i.m_plogical->are_equal(logical)); + REQUIRE(i.m_pphysical->are_equal(physical)); + REQUIRE(i.m_pbuffer->are_equal(buffer)); + REQUIRE(i.m_rv == rv); + REQUIRE(i.has_buffer()); } SECTION("Buffer (by pointer)") { - throw std::runtime_error("Fix me!"); - // auto pbuffer = buffer.clone(); - // auto buffer_address = pbuffer.get(); - // detail_::TensorInput i(physical, alloc, logical, - // std::move(pbuffer)); - // REQUIRE(i.m_pshape == nullptr); - // REQUIRE(i.m_psymmetry == nullptr); - // REQUIRE(i.m_psparsity == nullptr); - // REQUIRE(i.m_plogical->are_equal(logical)); - // REQUIRE(i.m_pphysical->are_equal(physical)); - // // REQUIRE(i.m_pbuffer->are_equal(buffer)); - // REQUIRE(i.m_pbuffer.get() == buffer_address); - // REQUIRE(i.m_rv == rv); - - // REQUIRE(i.has_buffer()); + auto pbuffer = buffer.clone(); + auto buffer_address = pbuffer.get(); + detail_::TensorInput i(physical, logical, std::move(pbuffer)); + REQUIRE(i.m_pshape == nullptr); + REQUIRE(i.m_psymmetry == nullptr); + REQUIRE(i.m_psparsity == nullptr); + REQUIRE(i.m_plogical->are_equal(logical)); + REQUIRE(i.m_pphysical->are_equal(physical)); + REQUIRE(i.m_pbuffer->are_equal(buffer)); + REQUIRE(i.m_pbuffer.get() == buffer_address); + REQUIRE(i.m_rv == rv); + REQUIRE(i.has_buffer()); } SECTION("RuntimeView") { diff --git a/tests/cxx/unit_tests/tensorwrapper/testing/eigen_buffers.hpp b/tests/cxx/unit_tests/tensorwrapper/testing/eigen_buffers.hpp index b0d8c5e9..9da73299 100644 --- a/tests/cxx/unit_tests/tensorwrapper/testing/eigen_buffers.hpp +++ b/tests/cxx/unit_tests/tensorwrapper/testing/eigen_buffers.hpp @@ -58,14 +58,14 @@ auto eigen_matrix(std::size_t n = 2, std::size_t m = 2) { template auto eigen_tensor3(std::size_t n = 2, std::size_t m = 2, std::size_t l = 2) { shape::Smooth shape{n, m, l}; - std::vector data(n * m * l); + std::vector data(shape.size()); + buffer::Contiguous buffer(std::move(data), std::move(shape)); double counter = 1.0; for(decltype(n) i = 0; i < n; ++i) for(decltype(m) j = 0; j < m; ++j) for(decltype(l) k = 0; k < l; ++k) - data[i * m * n + j * n + l] = static_cast(counter++); - return std::make_unique(std::move(data), - std::move(shape)); + buffer.set_elem({i, j, k}, static_cast(counter++)); + return std::make_unique(std::move(buffer)); } template From 3a234c34837b61e8853c45e381d5258bf6517842 Mon Sep 17 00:00:00 2001 From: "Ryan M. Richard" Date: Wed, 7 Jan 2026 12:58:43 -0600 Subject: [PATCH 10/13] uncomment block_diagonal test --- .../utilities/block_diagonal_matrix.cpp | 35 ++++++++++--------- 1 file changed, 18 insertions(+), 17 deletions(-) diff --git a/tests/cxx/unit_tests/tensorwrapper/utilities/block_diagonal_matrix.cpp b/tests/cxx/unit_tests/tensorwrapper/utilities/block_diagonal_matrix.cpp index dffccf8e..77673b1e 100644 --- a/tests/cxx/unit_tests/tensorwrapper/utilities/block_diagonal_matrix.cpp +++ b/tests/cxx/unit_tests/tensorwrapper/utilities/block_diagonal_matrix.cpp @@ -50,25 +50,26 @@ TEMPLATE_LIST_TEST_CASE("block_diagonal_matrix", "", std::vector inputs4{square_matrix1, rectangular_matrix1}; SECTION("All matrices are square") { - // shape::Smooth corr_shape{5, 5}; - // layout::Physical corr_layout(corr_shape); - // auto allocator = make_allocator(); - // auto corr_buffer = allocator.allocate(corr_layout); - // double counter1 = 1.0, counter2 = 1.0; - // for(std::size_t i = 0; i < 5; ++i) { - // for(std::size_t j = 0; j < 5; ++j) { - // if(i >= 2 and j >= 2) - // corr_buffer->set_elem({i, j}, counter1++); - // else if(i < 2 and j < 2) - // corr_buffer->set_elem({i, j}, counter2++); - // else - // corr_buffer->set_elem({i, j}, 0.0); - // } - // } - // Tensor corr(corr_shape, std::move(corr_buffer)); + shape::Smooth corr_shape{5, 5}; + layout::Physical corr_layout(corr_shape); + auto corr_buffer = buffer::make_contiguous(corr_shape); + double counter1 = 1.0, counter2 = 1.0; + for(std::size_t i = 0; i < 5; ++i) { + for(std::size_t j = 0; j < 5; ++j) { + if(i >= 2 and j >= 2) + corr_buffer.set_elem({i, j}, + static_cast(counter1++)); + else if(i < 2 and j < 2) + corr_buffer.set_elem({i, j}, + static_cast(counter2++)); + else + corr_buffer.set_elem({i, j}, TestType{0.0}); + } + } + Tensor corr(corr_shape, std::move(corr_buffer)); auto result = block_diagonal_matrix(inputs1); - // REQUIRE(result == corr); + REQUIRE(result == corr); } SECTION("Input has different floating point types") { From 9d0812a9365927d72d37615c8bce77c30e93c990 Mon Sep 17 00:00:00 2001 From: "Ryan M. Richard" Date: Wed, 7 Jan 2026 15:14:57 -0600 Subject: [PATCH 11/13] approximately_equal works again --- src/tensorwrapper/buffer/contiguous.cpp | 4 +- src/tensorwrapper/diis/diis.cpp | 149 +++++++++--------- .../tensorwrapper/buffer/contiguous.cpp | 74 ++++----- .../unit_tests/tensorwrapper/diis/diis.cpp | 101 ++++++------ .../operations/approximately_equal.cpp | 141 ++++++++--------- .../tensorwrapper/operations/norm.cpp | 1 - 6 files changed, 232 insertions(+), 238 deletions(-) diff --git a/src/tensorwrapper/buffer/contiguous.cpp b/src/tensorwrapper/buffer/contiguous.cpp index e13762fd..8dab5ade 100644 --- a/src/tensorwrapper/buffer/contiguous.cpp +++ b/src/tensorwrapper/buffer/contiguous.cpp @@ -233,8 +233,8 @@ bool Contiguous::approximately_equal_(const_buffer_base_reference rhs, std::string index(rank() ? "i0" : ""); for(std::size_t i = 1; i < rank(); ++i) index += (",i" + std::to_string(i)); - Contiguous result; - result(index) = (*this)(index)-rhs_down(index); + Contiguous result(*this); + result.subtraction_assignment(index, (*this)(index), rhs_down(index)); detail_::ApproximatelyEqualVisitor k(tol); return buffer::visit_contiguous_buffer(k, result); } diff --git a/src/tensorwrapper/diis/diis.cpp b/src/tensorwrapper/diis/diis.cpp index 187c2857..6bc4d221 100644 --- a/src/tensorwrapper/diis/diis.cpp +++ b/src/tensorwrapper/diis/diis.cpp @@ -22,89 +22,84 @@ namespace tensorwrapper::diis { namespace { -// struct Kernel { -// using buffer_base_type = tensorwrapper::buffer::BufferBase; - -// template -// auto run(const buffer_base_type& t) { - -// double rv; -// if constexpr(tensorwrapper::types::is_uncertain_v) { -// const auto& t_eigen = alloc.rebind(t); - -// rv = t_eigen.get_elem({}).mean(); -// } else { -// const auto& t_eigen = alloc.rebind(t); - -// rv = t_eigen.get_elem({}); -// } -// return rv; -// } -// }; +struct Kernel { + template + auto operator()(const std::span& t) { + using clean_type = std::decay_t; + double rv; + if constexpr(tensorwrapper::types::is_uncertain_v) { + rv = t[0].mean(); + } else { + rv = t[0]; + } + return rv; + } +}; } // namespace using tensor_type = DIIS::tensor_type; tensor_type DIIS::extrapolate(const tensor_type& X, const tensor_type& E) { - throw std::runtime_error("DIIS::extrapolate NYI"); - // // Append new values to stored values - // m_samples_.push_back(X); - // m_errors_.push_back(E); - - // // If we're over the max number of stored values, pop the oldest ones - // // Also update m_B_ to overwrite the oldest values - // if(m_errors_.size() > m_max_samples_) { - // m_errors_.pop_front(); - // m_samples_.pop_front(); - - // // Overwrite the top-left block with the bottom right block. - // // No need to zero out the parts that aren't overwritten, - // // they'll be overwritten in the next step - // if(m_max_samples_ > 1) { - // m_B_.block(0, 0, m_max_samples_ - 1, m_max_samples_ - 1) = - // m_B_.block(1, 1, m_max_samples_ - 1, m_max_samples_ - 1); - // } - // } - - // // Current number of stored values - // size_type sz = m_errors_.size(); - - // // Add the new values to m_B_ - // size_type i = sz - 1; - // for(size_type j = 0; j <= i; ++j) { // compute upper triangle - // tensor_type& E_i = m_errors_.at(i); - // tensor_type& E_j = m_errors_.at(j); - - // tensor_type temp; - // temp("") = E_i("mu,nu") * E_j("mu,nu"); - // m_B_(i, j) = floating_point_dispatch(Kernel{}, temp.buffer()); - - // // Fill in lower triangle - // if(i != j) m_B_(j, i) = m_B_(i, j); - // } - - // // Solve for expansion coefficients - // matrix_type A = matrix_type::Zero(sz + 1, sz + 1); - // A.topLeftCorner(sz, sz) = m_B_.topLeftCorner(sz, sz); - // A.row(sz).setConstant(-1.0); - // A.col(sz).setConstant(-1.0); - // A(sz, sz) = 0.0; - - // vector_type b = vector_type::Zero(sz + 1); - // b(sz) = -1.0; - - // vector_type coefs = A.colPivHouseholderQr().solve(b); - - // // Extrapolate the new X from the coefficients. - // tensor_type new_X; - // new_X("mu,nu") = m_samples_.at(0)("mu,nu") * coefs(0); - // for(size_type i = 1; i < sz; i++) { - // tensor_type x_i; - // x_i("mu,nu") = m_samples_.at(i)("mu,nu") * coefs(i); - // new_X("mu,nu") = new_X("mu,nu") + x_i("mu,nu"); - // } - // return new_X; + // Append new values to stored values + m_samples_.push_back(X); + m_errors_.push_back(E); + + // If we're over the max number of stored values, pop the oldest ones + // Also update m_B_ to overwrite the oldest values + if(m_errors_.size() > m_max_samples_) { + m_errors_.pop_front(); + m_samples_.pop_front(); + + // Overwrite the top-left block with the bottom right block. + // No need to zero out the parts that aren't overwritten, + // they'll be overwritten in the next step + if(m_max_samples_ > 1) { + m_B_.block(0, 0, m_max_samples_ - 1, m_max_samples_ - 1) = + m_B_.block(1, 1, m_max_samples_ - 1, m_max_samples_ - 1); + } + } + + // Current number of stored values + size_type sz = m_errors_.size(); + + // Add the new values to m_B_ + size_type i = sz - 1; + for(size_type j = 0; j <= i; ++j) { // compute upper triangle + tensor_type& E_i = m_errors_.at(i); + tensor_type& E_j = m_errors_.at(j); + + tensor_type temp; + temp("") = E_i("mu,nu") * E_j("mu,nu"); + const auto& bdown = buffer::make_contiguous(temp.buffer()); + Kernel k; + m_B_(i, j) = buffer::visit_contiguous_buffer(k, bdown); + + // Fill in lower triangle + if(i != j) m_B_(j, i) = m_B_(i, j); + } + + // Solve for expansion coefficients + matrix_type A = matrix_type::Zero(sz + 1, sz + 1); + A.topLeftCorner(sz, sz) = m_B_.topLeftCorner(sz, sz); + A.row(sz).setConstant(-1.0); + A.col(sz).setConstant(-1.0); + A(sz, sz) = 0.0; + + vector_type b = vector_type::Zero(sz + 1); + b(sz) = -1.0; + + vector_type coefs = A.colPivHouseholderQr().solve(b); + + // Extrapolate the new X from the coefficients. + tensor_type new_X; + new_X("mu,nu") = m_samples_.at(0)("mu,nu") * coefs(0); + for(size_type i = 1; i < sz; i++) { + tensor_type x_i; + x_i("mu,nu") = m_samples_.at(i)("mu,nu") * coefs(i); + new_X("mu,nu") = new_X("mu,nu") + x_i("mu,nu"); + } + return new_X; } bool DIIS::operator==(const DIIS& rhs) const noexcept { diff --git a/tests/cxx/unit_tests/tensorwrapper/buffer/contiguous.cpp b/tests/cxx/unit_tests/tensorwrapper/buffer/contiguous.cpp index 0d442d40..50bb2e8f 100644 --- a/tests/cxx/unit_tests/tensorwrapper/buffer/contiguous.cpp +++ b/tests/cxx/unit_tests/tensorwrapper/buffer/contiguous.cpp @@ -270,43 +270,43 @@ TEMPLATE_LIST_TEST_CASE("Contiguous", "", types::floating_point_types) { REQUIRE_FALSE(matrix.approximately_equal(scalar, default_tol)); REQUIRE_FALSE(matrix.approximately_equal(vector, default_tol)); } - throw std::runtime_error("Test not implemented."); - // SECTION("Same values") { - // REQUIRE(scalar.approximately_equal(scalar2, default_tol)); - // REQUIRE(scalar2.approximately_equal(scalar, default_tol)); - // REQUIRE(vector.approximately_equal(vector2, default_tol)); - // REQUIRE(vector2.approximately_equal(vector, default_tol)); - // REQUIRE(matrix.approximately_equal(matrix2, default_tol)); - // REQUIRE(matrix2.approximately_equal(matrix, default_tol)); - // } - - // SECTION("Differ by more than provided tolerance") { - // TestType diff = 1e-1; - // scalar2.set_elem({}, one + diff); - // vector2.set_elem({0}, one + diff); - // matrix2.set_elem({0, 0}, one + diff); - // double tol = 1e-1; - // REQUIRE_FALSE(scalar.approximately_equal(scalar2, tol)); - // REQUIRE_FALSE(scalar2.approximately_equal(scalar, tol)); - // REQUIRE_FALSE(vector.approximately_equal(vector2, tol)); - // REQUIRE_FALSE(vector2.approximately_equal(vector, tol)); - // REQUIRE_FALSE(matrix.approximately_equal(matrix2, tol)); - // REQUIRE_FALSE(matrix2.approximately_equal(matrix, tol)); - // } - - // SECTION("Differ by less than provided tolerance") { - // TestType diff = 1e-10; - // double tol = 1e-10; - // scalar2.set_elem({}, one + diff); - // vector2.set_elem({0}, one + diff); - // matrix2.set_elem({0, 0}, one + diff); - // REQUIRE(scalar.approximately_equal(scalar2, tol)); - // REQUIRE(scalar2.approximately_equal(scalar, tol)); - // REQUIRE(vector.approximately_equal(vector2, tol)); - // REQUIRE(vector2.approximately_equal(vector, tol)); - // REQUIRE(matrix.approximately_equal(matrix2, tol)); - // REQUIRE(matrix2.approximately_equal(matrix, tol)); - // } + + SECTION("Same values") { + REQUIRE(scalar.approximately_equal(scalar2, default_tol)); + REQUIRE(scalar2.approximately_equal(scalar, default_tol)); + REQUIRE(vector.approximately_equal(vector2, default_tol)); + REQUIRE(vector2.approximately_equal(vector, default_tol)); + REQUIRE(matrix.approximately_equal(matrix2, default_tol)); + REQUIRE(matrix2.approximately_equal(matrix, default_tol)); + } + + SECTION("Differ by more than provided tolerance") { + TestType diff = 1e-1; + scalar2.set_elem({}, one + diff); + vector2.set_elem({0}, one + diff); + matrix2.set_elem({0, 0}, one + diff); + double tol = 1e-1; + REQUIRE_FALSE(scalar.approximately_equal(scalar2, tol)); + REQUIRE_FALSE(scalar2.approximately_equal(scalar, tol)); + REQUIRE_FALSE(vector.approximately_equal(vector2, tol)); + REQUIRE_FALSE(vector2.approximately_equal(vector, tol)); + REQUIRE_FALSE(matrix.approximately_equal(matrix2, tol)); + REQUIRE_FALSE(matrix2.approximately_equal(matrix, tol)); + } + + SECTION("Differ by less than provided tolerance") { + TestType diff = 1e-10; + double tol = 1e-1; + scalar2.set_elem({}, one + diff); + vector2.set_elem({0}, one + diff); + matrix2.set_elem({0, 0}, one + diff); + REQUIRE(scalar.approximately_equal(scalar2, tol)); + REQUIRE(scalar2.approximately_equal(scalar, tol)); + REQUIRE(vector.approximately_equal(vector2, tol)); + REQUIRE(vector2.approximately_equal(vector, tol)); + REQUIRE(matrix.approximately_equal(matrix2, tol)); + REQUIRE(matrix2.approximately_equal(matrix, tol)); + } } SECTION("addition_assignment_") { diff --git a/tests/cxx/unit_tests/tensorwrapper/diis/diis.cpp b/tests/cxx/unit_tests/tensorwrapper/diis/diis.cpp index 2a5c8071..c8c54642 100644 --- a/tests/cxx/unit_tests/tensorwrapper/diis/diis.cpp +++ b/tests/cxx/unit_tests/tensorwrapper/diis/diis.cpp @@ -34,60 +34,61 @@ tensor_type make_tensor(std::vector elems) { TEMPLATE_LIST_TEST_CASE("DIIS", "", tensorwrapper::types::floating_point_types) { + throw std::runtime_error("DIIS tests not yet implemented."); // Inputs - tensor_type i1 = make_tensor({1.0, 2.0, 3.0, 4.0}); - tensor_type i2 = make_tensor({6.0, 5.0, 8.0, 7.0}); - tensor_type i3 = make_tensor({12.0, 11.0, 10.0, 9.0}); + // tensor_type i1 = make_tensor({1.0, 2.0, 3.0, 4.0}); + // tensor_type i2 = make_tensor({6.0, 5.0, 8.0, 7.0}); + // tensor_type i3 = make_tensor({12.0, 11.0, 10.0, 9.0}); - SECTION("Typedefs") { - SECTION("size_type") { - using corr_t = std::size_t; - using the_t = diis_type::size_type; - STATIC_REQUIRE(std::is_same_v); - } - SECTION("tensor_type") { - using corr_t = tensor_type; - using the_t = diis_type::tensor_type; - STATIC_REQUIRE(std::is_same_v); - } - } + // SECTION("Typedefs") { + // SECTION("size_type") { + // using corr_t = std::size_t; + // using the_t = diis_type::size_type; + // STATIC_REQUIRE(std::is_same_v); + // } + // SECTION("tensor_type") { + // using corr_t = tensor_type; + // using the_t = diis_type::tensor_type; + // STATIC_REQUIRE(std::is_same_v); + // } + // } - SECTION("Comparisons") { - auto defaulted = diis_type(); - auto two_samples_max = diis_type(2); - auto extrapolate_used = diis_type(); - auto temp = extrapolate_used.extrapolate(i1, i3); - SECTION("Equals") { - REQUIRE(defaulted == diis_type()); - REQUIRE(two_samples_max == diis_type(2)); - } - SECTION("Max samples not equal") { - REQUIRE(two_samples_max != defaulted); - } - SECTION("Recorded values different") { - REQUIRE(defaulted != extrapolate_used); - } - } + // SECTION("Comparisons") { + // auto defaulted = diis_type(); + // auto two_samples_max = diis_type(2); + // auto extrapolate_used = diis_type(); + // auto temp = extrapolate_used.extrapolate(i1, i3); + // SECTION("Equals") { + // REQUIRE(defaulted == diis_type()); + // REQUIRE(two_samples_max == diis_type(2)); + // } + // SECTION("Max samples not equal") { + // REQUIRE(two_samples_max != defaulted); + // } + // SECTION("Recorded values different") { + // REQUIRE(defaulted != extrapolate_used); + // } + // } - SECTION("extrapolate") { - // Outputs - std::vector v0{1.0, 2.0, 3.0, 4.0}; - std::vector v1{12.0, 8.6, 14.0, 10.6}; - std::vector v2{15.35294118, 14.35294118, 11.11764706, - 10.11764706}; - tensor_type corr1 = make_tensor(v0); - tensor_type corr2 = make_tensor(v1); - tensor_type corr3 = make_tensor(v2); + // SECTION("extrapolate") { + // // Outputs + // std::vector v0{1.0, 2.0, 3.0, 4.0}; + // std::vector v1{12.0, 8.6, 14.0, 10.6}; + // std::vector v2{15.35294118, 14.35294118, 11.11764706, + // 10.11764706}; + // tensor_type corr1 = make_tensor(v0); + // tensor_type corr2 = make_tensor(v1); + // tensor_type corr3 = make_tensor(v2); - // Call extrapolate enough to require removing an old value - auto diis = diis_type(2); - auto output1 = diis.extrapolate(i1, i3); - auto output2 = diis.extrapolate(i2, i2); - auto output3 = diis.extrapolate(i3, i1); + // // Call extrapolate enough to require removing an old value + // auto diis = diis_type(2); + // auto output1 = diis.extrapolate(i1, i3); + // auto output2 = diis.extrapolate(i2, i2); + // auto output3 = diis.extrapolate(i3, i1); - using tensorwrapper::operations::approximately_equal; - REQUIRE(approximately_equal(output1, corr1, 1E-6)); - REQUIRE(approximately_equal(output2, corr2, 1E-6)); - REQUIRE(approximately_equal(output3, corr3, 1E-6)); - } + // using tensorwrapper::operations::approximately_equal; + // REQUIRE(approximately_equal(output1, corr1, 1E-6)); + // REQUIRE(approximately_equal(output2, corr2, 1E-6)); + // REQUIRE(approximately_equal(output3, corr3, 1E-6)); + // } } diff --git a/tests/cxx/unit_tests/tensorwrapper/operations/approximately_equal.cpp b/tests/cxx/unit_tests/tensorwrapper/operations/approximately_equal.cpp index 3a80818e..d874031b 100644 --- a/tests/cxx/unit_tests/tensorwrapper/operations/approximately_equal.cpp +++ b/tests/cxx/unit_tests/tensorwrapper/operations/approximately_equal.cpp @@ -31,85 +31,84 @@ using namespace operations; TEMPLATE_LIST_TEST_CASE("approximately_equal", "", types::floating_point_types) { - throw std::runtime_error("Test not implemented."); - // auto pscalar = testing::eigen_scalar(); - // pscalar->set_elem({}, TestType{42.0}); - // auto pvector = testing::eigen_vector(2); - // pvector->set_elem({0}, TestType{1.23}); - // pvector->set_elem({1}, TestType{2.34}); + auto pscalar = testing::eigen_scalar(); + pscalar->set_elem({}, TestType{42.0}); + auto pvector = testing::eigen_vector(2); + pvector->set_elem({0}, TestType{1.23}); + pvector->set_elem({1}, TestType{2.34}); - // auto pscalar2 = testing::eigen_scalar(); - // pscalar2->set_elem({}, TestType{42.0}); - // auto pvector2 = testing::eigen_vector(2); - // pvector2->set_elem({0}, TestType{1.23}); - // pvector2->set_elem({1}, TestType{2.34}); + auto pscalar2 = testing::eigen_scalar(); + pscalar2->set_elem({}, TestType{42.0}); + auto pvector2 = testing::eigen_vector(2); + pvector2->set_elem({0}, TestType{1.23}); + pvector2->set_elem({1}, TestType{2.34}); - // shape::Smooth s0{}; - // shape::Smooth s1{2}; + shape::Smooth s0{}; + shape::Smooth s1{2}; - // Tensor scalar(s0, std::move(pscalar)); - // Tensor vector(s1, std::move(pvector)); + Tensor scalar(s0, std::move(pscalar)); + Tensor vector(s1, std::move(pvector)); - // SECTION("different ranks") { - // REQUIRE_FALSE(approximately_equal(scalar, vector)); - // REQUIRE_FALSE(approximately_equal(vector, scalar)); - // } + SECTION("different ranks") { + REQUIRE_FALSE(approximately_equal(scalar, vector)); + REQUIRE_FALSE(approximately_equal(vector, scalar)); + } - // SECTION("Same values") { - // Tensor scalar2(s0, std::move(pscalar2)); - // Tensor vector2(s1, std::move(pvector2)); + SECTION("Same values") { + Tensor scalar2(s0, std::move(pscalar2)); + Tensor vector2(s1, std::move(pvector2)); - // REQUIRE(approximately_equal(scalar, scalar2)); - // REQUIRE(approximately_equal(scalar2, scalar)); - // REQUIRE(approximately_equal(vector, vector2)); - // REQUIRE(approximately_equal(vector2, vector)); - // } + REQUIRE(approximately_equal(scalar, scalar2)); + REQUIRE(approximately_equal(scalar2, scalar)); + REQUIRE(approximately_equal(vector, vector2)); + REQUIRE(approximately_equal(vector2, vector)); + } - // SECTION("Differ by more than default tolerance") { - // TestType value = 1e-1; - // pscalar2->set_elem({}, TestType{42.0} + value); - // pvector2->set_elem({0}, TestType{1.23} + value); - // Tensor scalar2(s0, std::move(pscalar2)); - // Tensor vector2(s1, std::move(pvector2)); - // REQUIRE_FALSE(approximately_equal(scalar, scalar2)); - // REQUIRE_FALSE(approximately_equal(scalar2, scalar)); - // REQUIRE_FALSE(approximately_equal(vector, vector2)); - // REQUIRE_FALSE(approximately_equal(vector2, vector)); - // } + SECTION("Differ by more than default tolerance") { + TestType value = 1e-1; + pscalar2->set_elem({}, TestType{42.0} + value); + pvector2->set_elem({0}, TestType{1.23} + value); + Tensor scalar2(s0, std::move(pscalar2)); + Tensor vector2(s1, std::move(pvector2)); + REQUIRE_FALSE(approximately_equal(scalar, scalar2)); + REQUIRE_FALSE(approximately_equal(scalar2, scalar)); + REQUIRE_FALSE(approximately_equal(vector, vector2)); + REQUIRE_FALSE(approximately_equal(vector2, vector)); + } - // SECTION("Differ by less than default tolerance") { - // TestType value = 1e-17; - // pscalar2->set_elem({}, TestType{42.0} + value); - // pvector2->set_elem({0}, TestType{1.23} + value); - // Tensor scalar2(s0, std::move(pscalar2)); - // Tensor vector2(s1, std::move(pvector2)); - // REQUIRE(approximately_equal(scalar, scalar2)); - // REQUIRE(approximately_equal(scalar2, scalar)); - // REQUIRE(approximately_equal(vector, vector2)); - // REQUIRE(approximately_equal(vector2, vector)); - // } + SECTION("Differ by less than default tolerance") { + TestType value = 1e-17; + pscalar2->set_elem({}, TestType{42.0} + value); + pvector2->set_elem({0}, TestType{1.23} + value); + Tensor scalar2(s0, std::move(pscalar2)); + Tensor vector2(s1, std::move(pvector2)); + REQUIRE(approximately_equal(scalar, scalar2)); + REQUIRE(approximately_equal(scalar2, scalar)); + REQUIRE(approximately_equal(vector, vector2)); + REQUIRE(approximately_equal(vector2, vector)); + } - // SECTION("Differ by more than provided tolerance") { - // double value = 1e-1; - // pscalar2->set_elem({}, TestType{43.0}); - // pvector2->set_elem({0}, TestType{2.23}); - // Tensor scalar2(s0, std::move(pscalar2)); - // Tensor vector2(s1, std::move(pvector2)); - // REQUIRE_FALSE(approximately_equal(scalar, scalar2, value)); - // REQUIRE_FALSE(approximately_equal(scalar2, scalar, value)); - // REQUIRE_FALSE(approximately_equal(vector, vector2, value)); - // REQUIRE_FALSE(approximately_equal(vector2, vector, value)); - // } + SECTION("Differ by more than provided tolerance") { + double value = 1e-1; + pscalar2->set_elem({}, TestType{43.0}); + pvector2->set_elem({0}, TestType{2.23}); + Tensor scalar2(s0, std::move(pscalar2)); + Tensor vector2(s1, std::move(pvector2)); + REQUIRE_FALSE(approximately_equal(scalar, scalar2, value)); + REQUIRE_FALSE(approximately_equal(scalar2, scalar, value)); + REQUIRE_FALSE(approximately_equal(vector, vector2, value)); + REQUIRE_FALSE(approximately_equal(vector2, vector, value)); + } - // SECTION("Differ by less than provided tolerance") { - // TestType value = 1e-10; - // pscalar2->set_elem({}, TestType{42.0} + value); - // pvector2->set_elem({0}, TestType{1.23} + value); - // Tensor scalar2(s0, std::move(pscalar2)); - // Tensor vector2(s1, std::move(pvector2)); - // REQUIRE(approximately_equal(scalar, scalar2, 1e-1)); - // REQUIRE(approximately_equal(scalar2, scalar, 1e-1)); - // REQUIRE(approximately_equal(vector, vector2, 1e-1)); - // REQUIRE(approximately_equal(vector2, vector, 1e-1)); - // } + SECTION("Differ by less than provided tolerance") { + TestType value = 1e-10; + pscalar2->set_elem({}, TestType{42.0} + value); + pvector2->set_elem({0}, TestType{1.23} + value); + Tensor scalar2(s0, std::move(pscalar2)); + Tensor vector2(s1, std::move(pvector2)); + REQUIRE(approximately_equal(scalar, scalar2, 1e-1)); + REQUIRE(approximately_equal(scalar2, scalar, 1e-1)); + REQUIRE(approximately_equal(vector, vector2, 1e-1)); + REQUIRE(approximately_equal(vector2, vector, 1e-1)); + } } diff --git a/tests/cxx/unit_tests/tensorwrapper/operations/norm.cpp b/tests/cxx/unit_tests/tensorwrapper/operations/norm.cpp index c8c127fc..26b51208 100644 --- a/tests/cxx/unit_tests/tensorwrapper/operations/norm.cpp +++ b/tests/cxx/unit_tests/tensorwrapper/operations/norm.cpp @@ -56,7 +56,6 @@ TEMPLATE_LIST_TEST_CASE("infinity_norm", "", types::floating_point_types) { SECTION("rank 4 tensor") { shape::Smooth s{2, 2, 2, 2}; Tensor t(s, testing::eigen_tensor4()); - std::cout << t << std::endl; Tensor corr(shape::Smooth{}, testing::eigen_scalar(16)); auto norm = infinity_norm(t); REQUIRE(approximately_equal(corr, norm)); From 48808f4d20d9112447ef730cccd2706646f4a382 Mon Sep 17 00:00:00 2001 From: "Ryan M. Richard" Date: Mon, 12 Jan 2026 14:03:56 -0600 Subject: [PATCH 12/13] diis works again --- include/tensorwrapper/buffer/contiguous.hpp | 10 ++ src/tensorwrapper/buffer/contiguous.cpp | 22 ++++ src/tensorwrapper/diis/diis.cpp | 3 +- src/tensorwrapper/tensor/tensor_class.cpp | 2 +- .../tensorwrapper/buffer/contiguous.cpp | 20 +++ .../unit_tests/tensorwrapper/diis/diis.cpp | 115 +++++++++--------- 6 files changed, 113 insertions(+), 59 deletions(-) diff --git a/include/tensorwrapper/buffer/contiguous.hpp b/include/tensorwrapper/buffer/contiguous.hpp index 9c0921dc..8ab50205 100644 --- a/include/tensorwrapper/buffer/contiguous.hpp +++ b/include/tensorwrapper/buffer/contiguous.hpp @@ -387,4 +387,14 @@ inline const Contiguous& make_contiguous(const buffer::BufferBase& buffer) { return *pcontiguous; } +/** @brief Makes a new Contiguous buffer using @p buffer as a guide. + * + * This function is used to create a new buffer using @p buffer as a type hint. + * More specifically, this function will create a default initialized + * Contiguous buffer whose shape is given by @p shape. The type of the elements + * is taken from the type of the elements in @p buffer. + */ +Contiguous make_contiguous(const buffer::BufferBase& buffer, + const shape::ShapeBase& shape); + } // namespace tensorwrapper::buffer diff --git a/src/tensorwrapper/buffer/contiguous.cpp b/src/tensorwrapper/buffer/contiguous.cpp index 8dab5ade..41c1cc4c 100644 --- a/src/tensorwrapper/buffer/contiguous.cpp +++ b/src/tensorwrapper/buffer/contiguous.cpp @@ -301,4 +301,26 @@ void Contiguous::update_hash_() const { m_recalculate_hash_ = false; } +// ----------------------------------------------------------------------------- +// Free functions +// ----------------------------------------------------------------------------- + +Contiguous make_contiguous(const buffer::BufferBase& buffer, + const shape::ShapeBase& shape) { + auto smooth_view = shape.as_smooth(); + using size_type = typename decltype(smooth_view)::size_type; + std::vector extents(smooth_view.rank()); + for(size_type i = 0; i < smooth_view.rank(); ++i) + extents[i] = smooth_view.extent(i); + shape::Smooth smooth_shape(extents.begin(), extents.end()); + + auto lambda = [=](const auto& span) { + using value_type = std::decay_t; + std::vector data(smooth_shape.size()); + return Contiguous(std::move(data), std::move(smooth_shape)); + }; + + return visit_contiguous_buffer(lambda, make_contiguous(buffer)); +} + } // namespace tensorwrapper::buffer diff --git a/src/tensorwrapper/diis/diis.cpp b/src/tensorwrapper/diis/diis.cpp index 6bc4d221..585768ae 100644 --- a/src/tensorwrapper/diis/diis.cpp +++ b/src/tensorwrapper/diis/diis.cpp @@ -70,11 +70,12 @@ tensor_type DIIS::extrapolate(const tensor_type& X, const tensor_type& E) { tensor_type& E_j = m_errors_.at(j); tensor_type temp; + auto ei = buffer::make_contiguous(E_i.buffer()); + auto ej = buffer::make_contiguous(E_j.buffer()); temp("") = E_i("mu,nu") * E_j("mu,nu"); const auto& bdown = buffer::make_contiguous(temp.buffer()); Kernel k; m_B_(i, j) = buffer::visit_contiguous_buffer(k, bdown); - // Fill in lower triangle if(i != j) m_B_(j, i) = m_B_(i, j); } diff --git a/src/tensorwrapper/tensor/tensor_class.cpp b/src/tensorwrapper/tensor/tensor_class.cpp index 454d283f..9b3f9e55 100644 --- a/src/tensorwrapper/tensor/tensor_class.cpp +++ b/src/tensorwrapper/tensor/tensor_class.cpp @@ -117,7 +117,7 @@ Tensor::dsl_reference Tensor::binary_common_(FxnType&& fxn, const auto& lbuffer = lobject.buffer(); const auto& rbuffer = robject.buffer(); - auto buffer = buffer::make_contiguous(pphys_layout->shape()); + auto buffer = buffer::make_contiguous(lbuffer, pphys_layout->shape()); auto pthis_buffer = std::make_unique(std::move(buffer)); fxn(*pthis_buffer, this_labels, lbuffer(llabels), rbuffer(rlabels)); diff --git a/tests/cxx/unit_tests/tensorwrapper/buffer/contiguous.cpp b/tests/cxx/unit_tests/tensorwrapper/buffer/contiguous.cpp index 50bb2e8f..d31553f9 100644 --- a/tests/cxx/unit_tests/tensorwrapper/buffer/contiguous.cpp +++ b/tests/cxx/unit_tests/tensorwrapper/buffer/contiguous.cpp @@ -512,3 +512,23 @@ TEMPLATE_LIST_TEST_CASE("Contiguous", "", types::floating_point_types) { } } } + +TEST_CASE("make_contiguous(buffer, shape)") { + using buffer::Contiguous; + using tensor_type = Tensor; + using shape_type = shape::Smooth; + + std::vector data = {1.0, 2.0, 3.0, 4.0}; + shape_type shape({2, 2}); + buffer::Contiguous buffer(data, shape); + + tensor_type tensor(shape.clone(), + std::make_unique(buffer)); + + shape_type other({3, 4, 5}); + Contiguous contig = buffer::make_contiguous(tensor.buffer(), other); + + REQUIRE(contig.shape() == other); + REQUIRE(contig.size() == 60); // 3*4*5 = 60 + REQUIRE(contig.get_elem({0, 0, 0}) == 0.0); +} diff --git a/tests/cxx/unit_tests/tensorwrapper/diis/diis.cpp b/tests/cxx/unit_tests/tensorwrapper/diis/diis.cpp index c8c54642..672f2342 100644 --- a/tests/cxx/unit_tests/tensorwrapper/diis/diis.cpp +++ b/tests/cxx/unit_tests/tensorwrapper/diis/diis.cpp @@ -23,72 +23,73 @@ using tensor_type = tensorwrapper::Tensor; template tensor_type make_tensor(std::vector elems) { - auto pbuffer = tensorwrapper::testing::eigen_matrix(2, 2); - pbuffer->set_elem({0, 0}, elems[0]); - pbuffer->set_elem({0, 1}, elems[1]); - pbuffer->set_elem({1, 0}, elems[2]); - pbuffer->set_elem({1, 1}, elems[3]); - auto pshape = pbuffer->layout().shape().clone(); + using namespace tensorwrapper; + shape::Smooth shape{2, 2}; + buffer::Contiguous buffer(std::move(elems), shape); + auto pbuffer = std::make_unique(std::move(buffer)); + auto pshape = pbuffer->layout().shape().clone(); return tensor_type(std::move(pshape), std::move(pbuffer)); } TEMPLATE_LIST_TEST_CASE("DIIS", "", tensorwrapper::types::floating_point_types) { - throw std::runtime_error("DIIS tests not yet implemented."); // Inputs - // tensor_type i1 = make_tensor({1.0, 2.0, 3.0, 4.0}); - // tensor_type i2 = make_tensor({6.0, 5.0, 8.0, 7.0}); - // tensor_type i3 = make_tensor({12.0, 11.0, 10.0, 9.0}); + std::vector i1_data{1.0, 2.0, 3.0, 4.0}; + std::vector i2_data{6.0, 5.0, 8.0, 7.0}; + std::vector i3_data{12.0, 11.0, 10.0, 9.0}; + tensor_type i1 = make_tensor(i1_data); + tensor_type i2 = make_tensor(i2_data); + tensor_type i3 = make_tensor(i3_data); - // SECTION("Typedefs") { - // SECTION("size_type") { - // using corr_t = std::size_t; - // using the_t = diis_type::size_type; - // STATIC_REQUIRE(std::is_same_v); - // } - // SECTION("tensor_type") { - // using corr_t = tensor_type; - // using the_t = diis_type::tensor_type; - // STATIC_REQUIRE(std::is_same_v); - // } - // } + SECTION("Typedefs") { + SECTION("size_type") { + using corr_t = std::size_t; + using the_t = diis_type::size_type; + STATIC_REQUIRE(std::is_same_v); + } + SECTION("tensor_type") { + using corr_t = tensor_type; + using the_t = diis_type::tensor_type; + STATIC_REQUIRE(std::is_same_v); + } + } - // SECTION("Comparisons") { - // auto defaulted = diis_type(); - // auto two_samples_max = diis_type(2); - // auto extrapolate_used = diis_type(); - // auto temp = extrapolate_used.extrapolate(i1, i3); - // SECTION("Equals") { - // REQUIRE(defaulted == diis_type()); - // REQUIRE(two_samples_max == diis_type(2)); - // } - // SECTION("Max samples not equal") { - // REQUIRE(two_samples_max != defaulted); - // } - // SECTION("Recorded values different") { - // REQUIRE(defaulted != extrapolate_used); - // } - // } + SECTION("Comparisons") { + auto defaulted = diis_type(); + auto two_samples_max = diis_type(2); + auto extrapolate_used = diis_type(); + auto temp = extrapolate_used.extrapolate(i1, i3); + SECTION("Equals") { + REQUIRE(defaulted == diis_type()); + REQUIRE(two_samples_max == diis_type(2)); + } + SECTION("Max samples not equal") { + REQUIRE(two_samples_max != defaulted); + } + SECTION("Recorded values different") { + REQUIRE(defaulted != extrapolate_used); + } + } - // SECTION("extrapolate") { - // // Outputs - // std::vector v0{1.0, 2.0, 3.0, 4.0}; - // std::vector v1{12.0, 8.6, 14.0, 10.6}; - // std::vector v2{15.35294118, 14.35294118, 11.11764706, - // 10.11764706}; - // tensor_type corr1 = make_tensor(v0); - // tensor_type corr2 = make_tensor(v1); - // tensor_type corr3 = make_tensor(v2); + SECTION("extrapolate") { + // Outputs + std::vector v0{1.0, 2.0, 3.0, 4.0}; + std::vector v1{12.0, 8.6, 14.0, 10.6}; + std::vector v2{15.35294118, 14.35294118, 11.11764706, + 10.11764706}; + tensor_type corr1 = make_tensor(v0); + tensor_type corr2 = make_tensor(v1); + tensor_type corr3 = make_tensor(v2); - // // Call extrapolate enough to require removing an old value - // auto diis = diis_type(2); - // auto output1 = diis.extrapolate(i1, i3); - // auto output2 = diis.extrapolate(i2, i2); - // auto output3 = diis.extrapolate(i3, i1); + // Call extrapolate enough to require removing an old value + auto diis = diis_type(2); + auto output1 = diis.extrapolate(i1, i3); + auto output2 = diis.extrapolate(i2, i2); + auto output3 = diis.extrapolate(i3, i1); - // using tensorwrapper::operations::approximately_equal; - // REQUIRE(approximately_equal(output1, corr1, 1E-6)); - // REQUIRE(approximately_equal(output2, corr2, 1E-6)); - // REQUIRE(approximately_equal(output3, corr3, 1E-6)); - // } + using tensorwrapper::operations::approximately_equal; + REQUIRE(approximately_equal(output1, corr1, 1E-6)); + REQUIRE(approximately_equal(output2, corr2, 1E-6)); + REQUIRE(approximately_equal(output3, corr3, 1E-6)); + } } From a9fa99e353a13a4ff1530f007aa05dec8cc0eb53 Mon Sep 17 00:00:00 2001 From: "Ryan M. Richard" Date: Tue, 13 Jan 2026 11:07:10 -0600 Subject: [PATCH 13/13] python works again --- src/python/tensor/export_tensor.cpp | 126 +++++++++++++++++++--------- 1 file changed, 86 insertions(+), 40 deletions(-) diff --git a/src/python/tensor/export_tensor.cpp b/src/python/tensor/export_tensor.cpp index 0e225b31..31877149 100644 --- a/src/python/tensor/export_tensor.cpp +++ b/src/python/tensor/export_tensor.cpp @@ -20,52 +20,98 @@ #include namespace tensorwrapper { +namespace { -auto make_buffer_info(buffer::Contiguous& buffer) { - throw std::runtime_error("Fix me!!!!"); - using size_type = std::size_t; - constexpr auto nbytes = sizeof(double); - const auto desc = pybind11::format_descriptor::format(); - // constexpr auto nbytes = sizeof(FloatType); - // const auto desc = pybind11::format_descriptor::format(); - const auto rank = buffer.rank(); +template +auto get_desc_() -> decltype(pybind11::format_descriptor::format()) { + if constexpr(std::is_same_v) + return pybind11::format_descriptor::format(); + else if constexpr(std::is_same_v) + return pybind11::format_descriptor::format(); + else if constexpr(std::is_same_v) + return pybind11::format_descriptor::format(); + else + throw std::runtime_error("Unsupported floating point type!"); +} - const auto smooth_shape = buffer.layout().shape().as_smooth(); +struct GetBufferDataKernel { + using size_type = std::size_t; + using shape_type = shape::Smooth; + + GetBufferDataKernel(size_type rank, shape_type& smooth_shape) : + m_rank(rank), m_psmooth_shape(&smooth_shape) {} + + template + pybind11::buffer_info operator()(std::span buffer) { + using clean_type = std::decay_t; - std::vector shape(rank); - std::vector strides(rank); - for(size_type rank_i = 0; rank_i < rank; ++rank_i) { - shape[rank_i] = smooth_shape.extent(rank_i); - size_type stride_i = 1; - for(size_type mode_i = rank_i + 1; mode_i < rank; ++mode_i) - stride_i *= smooth_shape.extent(mode_i); - strides[rank_i] = stride_i * nbytes; + // We have only tested with doubles at the moment. + if constexpr(!std::is_same_v) + throw std::runtime_error("Expected doubles in the buffer!"); + + constexpr auto nbytes = sizeof(clean_type); + + const auto desc = get_desc_(); + const auto rank = m_rank; + + std::vector shape(rank); + std::vector strides(rank); + for(size_type rank_i = 0; rank_i < rank; ++rank_i) { + shape[rank_i] = m_psmooth_shape->extent(rank_i); + size_type stride_i = 1; + for(size_type mode_i = rank_i + 1; mode_i < rank; ++mode_i) + stride_i *= m_psmooth_shape->extent(mode_i); + strides[rank_i] = stride_i * nbytes; + } + auto* ptr = const_cast(buffer.data()); + return pybind11::buffer_info(ptr, nbytes, desc, rank, shape, strides); } - double* ptr = nullptr; // buffer.get_mutable_data(); - return pybind11::buffer_info(ptr, nbytes, desc, rank, shape, strides); + + size_type m_rank; + shape_type* m_psmooth_shape; +}; + +template +Tensor make_tensor_(pybind11::buffer_info& info) { + if(info.format != pybind11::format_descriptor::format()) + throw std::runtime_error( + "Incompatible format: expected a float array!"); + + // Work out physical layout of tensor + std::vector dims(info.ndim); + for(auto i = 0; i < info.ndim; ++i) { dims[i] = info.shape[i]; } + shape::Smooth shape(dims.begin(), dims.end()); + layout::Physical layout(shape); + + // Fill in Buffer object + auto n_elements = shape.size(); + std::vector data(n_elements); + auto pData = static_cast(info.ptr); + std::copy(pData, pData + n_elements, data.begin()); + auto pBuffer = std::make_unique(data, shape); + + return Tensor(shape, std::move(pBuffer)); +} + +} // namespace + +auto make_buffer_info(buffer::Contiguous& buffer) { + const auto rank = buffer.rank(); + const auto smooth_shape = buffer.layout().shape().as_smooth(); + std::vector extents(rank); + for(std::size_t i = 0; i < rank; ++i) extents[i] = smooth_shape.extent(i); + shape::Smooth shape(extents.begin(), extents.end()); + GetBufferDataKernel kernel(rank, shape); + return buffer::visit_contiguous_buffer(kernel, buffer); } Tensor make_tensor(pybind11::buffer b) { - throw std::runtime_error("Fix me!!!!"); - // pybind11::buffer_info info = b.request(); - // if(info.format != pybind11::format_descriptor::format()) - // throw std::runtime_error( - // "Incompatible format: expected a double array!"); - - // std::vector dims(info.ndim); - // for(auto i = 0; i < info.ndim; ++i) { dims[i] = info.shape[i]; } - - // parallelzone::runtime::RuntimeView rv = {}; - // shape::Smooth matrix_shape{dims.begin(), dims.end()}; - // layout::Physical matrix_layout(matrix_shape); - // auto pBuffer = std::make_unique(rv, matrix_layout); - - // auto n_elements = std::accumulate(dims.begin(), dims.end(), 1, - // std::multiplies()); - // auto pData = static_cast(info.ptr); - // for(auto i = 0; i < n_elements; ++i) pBuffer->set_elem({i}, pData[i]); - - // return Tensor(matrix_shape, std::move(pBuffer)); + pybind11::buffer_info info = b.request(); + if(info.format == pybind11::format_descriptor::format()) + return make_tensor_(info); + else + throw std::runtime_error( + "Incompatible format: expected a double array!"); } void export_tensor(py_module_reference m) { @@ -79,7 +125,7 @@ void export_tensor(py_module_reference m) { .def_buffer([](Tensor& t) { auto pbuffer = dynamic_cast(&t.buffer()); if(pbuffer == nullptr) - throw std::runtime_error("Expected buffer to hold doubles"); + throw std::runtime_error("Expected buffer to be contiguous"); return make_buffer_info(*pbuffer); }); }