From 091084bc319f9a03dc9fd39d1c3ac223c5d91604 Mon Sep 17 00:00:00 2001 From: xkjiang-srfv <52552899+xkjiang-srfv@users.noreply.github.com> Date: Wed, 25 Aug 2021 19:06:27 +0800 Subject: [PATCH] Add files via upload --- ActivationPrune.py | 127 ++++ ActivationPrune.xlsx | Bin 0 -> 11602 bytes Conv2dNew.py | 194 ++++++ K_means.py | 153 +++++ WeightPrune.py | 179 +++++ main.py | 23 + model.py | 636 ++++++++++++++++++ train.py | 285 ++++++++ ...\346\230\216\346\226\207\346\241\243.docx" | Bin 0 -> 31636 bytes 9 files changed, 1597 insertions(+) create mode 100644 ActivationPrune.py create mode 100644 ActivationPrune.xlsx create mode 100644 Conv2dNew.py create mode 100644 K_means.py create mode 100644 WeightPrune.py create mode 100644 main.py create mode 100644 model.py create mode 100644 train.py create mode 100644 "\350\257\264\346\230\216\346\226\207\346\241\243.docx" diff --git a/ActivationPrune.py b/ActivationPrune.py new file mode 100644 index 0000000..fd987d4 --- /dev/null +++ b/ActivationPrune.py @@ -0,0 +1,127 @@ +import copy +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch.autograd import Function +import time +from model import * +from train import * +import random +# from .model import ResNetBasicBlock + +from math import sqrt +import copy +from time import time +from Conv2dNew import Execution + +class Conv2dTest(nn.Conv2d): + def __init__(self, + ratio, + in_channels, + out_channels, + kernel_size, + stride=1, + padding=0, + dilation=1, + groups=1, + bias=True, + padding_mode='zeros', + ): + super(Conv2dTest, self).__init__(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, + bias, padding_mode) + self.ratio = ratio + def forward(self, input): + E = Execution(self.ratio) + output = E.conv2d(input, self.weight, self.bias, self.stride, self.padding) + return output + +class LinearTest(nn.Linear): + def __init__(self, + in_features, + out_features, + bias=True, + ): + super(LinearTest, self).__init__(in_features, out_features, bias) + + def forward(self, input): + output = F.linear(input, self.weight, self.bias) + return output + +def prepare(model, ratio,inplace=False): + # move intpo prepare + def addActivationPruneOp(module): + nonlocal layer_cnt + for name, child in module.named_children(): + if isinstance(child, nn.Conv2d): + p_name = str(layer_cnt) + activationPruneConv = Conv2dTest( + ratio, + child.in_channels, + child.out_channels, child.kernel_size, stride=child.stride, padding=child.padding, + dilation=child.dilation, groups=child.groups, bias=(child.bias is not None), + padding_mode=child.padding_mode + ) + if child.bias is not None: + activationPruneConv.bias = child.bias + activationPruneConv.weight = child.weight + module._modules[name] = activationPruneConv + layer_cnt += 1 + elif isinstance(child, nn.Linear): + p_name = str(layer_cnt) + activationPruneLinear = LinearTest( + child.in_features, child.out_features, + bias=(child.bias is not None) + ) + if child.bias is not None: + activationPruneLinear.bias = child.bias + activationPruneLinear.weight = child.weight + module._modules[name] = activationPruneLinear + layer_cnt += 1 + else: + addActivationPruneOp(child) # 这是用来迭代的,Maxpool层的功能是不变的 + layer_cnt = 0 + if not inplace: + model = copy.deepcopy(model) + addActivationPruneOp( model) # 为每一层添加量化操作 + return model + +def getPruneModel(model_name, weight_file_path,pattern,ratio): + if model_name == 'LeNet': + model_orign = getLeNet() # 加载原始模型框架 + elif model_name == 'AlexNet': + model_orign = getAlexnet() + elif model_name == 'VGG16': + model_orign = get_vgg16() + elif model_name == 'SqueezeNet': + model_orign = get_squeezenet() + elif model_name == 'InceptionV3': + model_orign = get_inception_v3() + elif model_name == 'ResNet': + model_orign = get_resnet18() + + if pattern == 'test' or pattern == 'retrain': + model_orign.load_state_dict(torch.load(weight_file_path)) # 原始模型框架加载模型信息 + activationPruneModel = prepare(model_orign,ratio) # 将原始模型转化成量化后的模型,即给每一个卷积层和线形层增加量化剪枝操作 + + return activationPruneModel + +def activationPruneModelOp(model_name, batch_size, img_size,pattern,ratio,epoch): + if model_name == 'VGG16' or model_name == 'AlexNet' or model_name == 'ResNet' or model_name == 'vgg16_thu' or model_name == 'SqueezeNet': + dataloaders, dataset_sizes = load_cifar10(batch_size=batch_size, pth_path='./data',img_size=img_size) # 确定数据集 + elif model_name == 'LeNet': + dataloaders, dataset_sizes = load_mnist(batch_size=batch_size, path='./data', img_size=img_size) + + criterion = nn.CrossEntropyLoss() + if pattern == 'retrain' or pattern == 'train': + weight_file_path = './pth/' + model_name + '/ratio=0'+ '/Activation' + '/best.pth' + activationPruneModel = getPruneModel(model_name, weight_file_path, pattern, ratio) + optimizer = optim.SGD(activationPruneModel.parameters(), lr=0.01, momentum=0.9) + scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=2, gamma=0.8) # 设置学习率下降策略 + train_model_jiang(activationPruneModel, dataloaders, dataset_sizes, ratio, 'activation', criterion=criterion,optimizer=optimizer, name=model_name, + scheduler=scheduler, num_epochs=epoch, rerun=False) # 进行模型的训练 + if pattern == 'test': + weight_file_path = './pth/' + model_name + '/ratio=' + str(ratio) + '/Activation/' + 'best.pth' + activationPruneModel = getPruneModel(model_name, weight_file_path, pattern, ratio) + test_model(activationPruneModel, dataloaders, dataset_sizes, criterion=criterion) + + diff --git a/ActivationPrune.xlsx b/ActivationPrune.xlsx new file mode 100644 index 0000000000000000000000000000000000000000..e05380510ab4e69aeb3bdbcaa8d7a2ad2f7c1e8c GIT binary patch literal 11602 zcmeIYg;yNg(mo6fGPn~oxVsaa!GpW&;BLX)HCWI9AwUT38r*`ryCwuD5Zr!~bMC#l zC+B?MKk(jOYo@zbcRk&EcU3*Lt4di80E+_!4+VsRf+B}<wMpwh^d8U2WJy0Xm-8iJ78+fcg!K4d)N?mW3kvycODFAR=FCZ*wi2jaO`Q zFo92{?_6=0e8mcZ0DdDm3b`-FsH=8+pWW_YUkkpa{C zQXih#pq;fZ`z7#u!YGm0u=nOZp>Wo_skfGS`IPQVd#l7YTC(&$+tEOE&2jCxKmr4p^{$2J1a9gI4N;8kJ49|kG=%vhTd9uQ@6vN zv~C#l_yhNC`k?R^Vv%*uf%A$!@Irn1rLYzt{kHQ`~k5v_L65@Vx46n;uy6z@8$8Ox z({7z#Au!Omq)IuLZT4ci&tA-4W=OyEqH}MHXDDqddM`J)PAN70U7{LeoJETe5&Hvi zIG#{OknU#%oi)?z3g}so+EICURU>cie)9O+fcezIJyem%7xITw8Mp(krk0Bpz5{mT z7dM0&YF00-t4woT_$j=M%iu-D&>BnjcoAuQyI2mlI-5DEzGX~+7fop?C9*qS;z+Wus=f7=W+ zgx(opl7?-1*-#{}!X85=+nqW5l*64ZCRNkz-}(nSz2TELMbPuxIk8pcAM8wU zs9~4szIqBb%jw~__f$3Rs{D88g96|0Zw8&SSb&>$yvt7&=HdSy*E<~Ri=Oz=G#)(i35_!`| z7X3bc?8&yJ#D+^q;9)ZH3o;(Z>BMI^iJ*2G%~L(vM9dxYyV1-`;&D*&o%Yc8(Nz2z zZi4nK5qR-l?EYfh9X_4VW+QAVaOpb~9{1oE^{Ff_jW%>B-VEyt{UhVG?2b=e1GNjg zB9Kr2cO9x+Zk~#RIN4fAY=jQE1ENF!tvzMx3Xb_~7;TkxPtZ4t)i?6WGLo6PodfPzc_mMN{mM!9KE9#w5lL zUIqKF8!She}^7nK>~YFOr(aC>Tk zG}U`FD@bN#q+B)F3QMP)+0nA2&j>Go)%Pgj+U1^~zfvq~mdy!8no-Y+8sWC80CZFQ z;4AW6YV8l@)lo$Szb74tBSe=m#H1bQ%G|04AEv@;GHyQWerK$`LXOoW99rT@6Nbg; z2S1^xbQ|IyOafXf$bCo;87IGaZ6@Gg<7r0S!&-wBuf2&FyAdEoIiYJ%*_+VHng=KC zz>=w`*=ly>cc5Em?LRf7ULv6>n_{@5Un?kvY1fyN{XCGN+1^6WFnGSt5Qp5-+isW0 z8oJElOzMne6wQeywk73z!X^(4})pj1)cKKO1{Fv)r&D?y?QI(lA0%sLmu5<5YG*oN zzJ@zMAUtf9A%+>ZXGn~B_&UyFZlCdkfUP|>f_HDfb!dNk+%rStyZma~c;E4Q*8Aob zU|yduJF@(%k-(Uze)b4q3Nb->#h#j6)pglSd*ii7(^3MB3B@+D4S{SvfUV`V%`L(| zY3{-KS=IdSs()*il>U>T}$)uFQ^kDBbM_8%r6KlUKq>;-(BUR}JHUGK?h{Wf_q zS7tqg^|Eg7u#k7$_I7)3xul@?X3lP@joVv6X=$?b>-oo}7fKr)pUd~g?lzNL=Z}88 ze>H-()XAzR@LU%?ew)*yr)W8D+4>0GCx<#}C!&G(joYQweWoTHwe$}TjUT1PKY5hwOO zS9uM+lazAP$DqXoh!3WxrA&}k=RnNGXu#;DS56+B_zr4D4ko)P1jt)rE8_aS4%7%j zIA-w*y2AE27oA9IsI9OWT&n^umR5Oe`x329fQ@H;-_QDnJ*#fgURx3`Dt(`y*gtoL z_sW0+kBWhYnju(O0t<)iU06Y|m_po!3Ijob3<^d8bj2AuPxra+2VbQyT6;?6iI zvI6C@Ua^X+J0tHpr6oQAMM;Mo=XEAyVmQGY{so8rn-<@jt*_=N0wa4JS4Xn(*2MC7 z2$XE;d%!tF@#Y^$VhS*pNDNGs33RG?p_&qj!Gtv9DueklQ2;TOHC{kV*yoMAG&W9x zV1LU;AQ~yofYPeS$$cxjHJCege$L%8TfEJhe(apeP)>i}nyy@~C0mn?2pFWEuAxl| z*E+G4_a?;(b?XU{xE;*>^pAd9z@9^{3vvYn;dIJhe%sZ`+}zFeUtz!>C+ztrw1g|12cKGE^Ug0{oW{TKyuA}FCbB?F zWSaQ|6Sf0>W>t0Sv&7sXR?WA+f`iB`m|!x^(w+%WRD2oLpP-G4TpgxD^?={w-@91- z8nHM*`0Ju0KT<)5{Eji1v++>b z7YW9~c``2sB#+JlO~np8E^}=C&|nMgM~2TY5&y~l6+{A=LLrNXK*FD^`ahV<&C1-~ zob`|YKZbp%Gw!&|gVT<_@I?0QShs2oVsXCDCBD1C`+oM*ZrChr;>;`6H-XCnwFDW)mQ)6x z?re7fNR7oCH71~1>^(vvJR||3a#>iT?~xW`1bHOSZZm4mG5wkJBPdul#QHC0-s!D& z7)tttVd4tdaksp`WjsU**|Rpd$QbQ^tLP_k+H?H514g+zj))6klWgT$4T>wHQ^u9$ z(|GRu@kIzsY0#Nm%!z;PdJ%!=XaG3Il{Y~IP$}9R5P&Q~EQKKnIesBr)mxww@B7E$ zN;hwt!@47(CtNhWtWx8IAqG$0g%xiCL4Jg$r@(2%sP|^8gc!G7iJUt-xOhRs7SX%6 zh8&72fkI4Gx(v@VoJP41GtVpwb2oAXOxFtck|4sO@))%Z{yt5_$e=hfS16Xtb`UxJ zw47$Je=|9jNus|E(a-?(%xRC94KrDGved;?n>f#{La+7qW;p;7>EGNnzB3fMV>$Uj zk#lgJ*5Utf7B6D7`~0&m{ASH>y_qyEd0zc zk>{3`cli?aUPj7VEjS-wMxCR>DBo49 z#WLsn$V6h!gE~K`A-DI!^hKHmuRPB4eUI$HsXL$C7g*g*X zb;)eVMY?Bz+t9j5Zx-}lD}ycqNy#ynWpc`DqFxg@L1*NEhqAhGTpengle;|Y3)W%m zt3N)ZS72dhAiCI`dxhdpC;GV)_R;BRzewhqt2R1Sqf^;gG4F;KvG__vaYBwxx0-Q` z086yjt`9s8DcgTD1e2(gnn9h0ke+d}9X*Dj$p9#EjPwFc$)!957b1}AP9N?2!r3@@ z+za!sjYeMB^S9=%wKzSAjUMNldq3;3tr4>HOFC3aI(j^z_L*<-y#N{5a1OXP3u~2L znC+VSNzn;mGDgQY=~lX?Cl-oZsS_dAzg@9g!(-RS(*po_#}jItIcVCcD1@UXVel@v z>o2m-yDzc?kUyK0S7y%A;2WKmyQ2*6q$`{Y*_6{<tXe6qxnm4HbzNT7ViV zmj+>8)9gMOkd(QZXJDvb3N6WjhG5*nlj296Ei=c|4Ktl@1@7F(q5IXYsU&9dRod@A zRtToe-O`OK6CD3FO7#<8nr`e0`xG z^)3AMr{!MJ7RJvg(5q;ho-DKrS2?d(-<6o}a7{Ap%8P64lvzKUM&}*0EgO=ZF;0tRFVO`(IgC#|9y!}2Lj9ApT;06v%>Rg1XS7FOFY{ouKl>&CY_+`r57(m( ziV;@^LRl*tH;Pu)Ni0fSgOz`1Do*OL`Xyy~qkHd7ik11kuvLxjw zmkEaA%+}%=(OK$LN)rR0dN~;gQQNMW@sK|}6A*R4yxOXK=J8lRJ9n3tpeOCGU?Ggw zPRnbAJ(rM0U~)JVdal$btR_ENfh29`HFRMMw6>Zm>}?R28Me^2lon!NoJ!FgK8_~I zrkXE~ZVRSV7em*E{!#GN6~U3m?#g{QP2z)lvN55OLiVz%b9#rF8#N!h!6-hUQO%MW z*AKmrEsiPVs=z^1!#TR|5pYERX04`wQERQAaidAt(5p)9));XdgB;`b>h;JbWjc&p8<-dXD%^bqv1oh3fHc{qZqN_z8nS&(~Y7FMama@W)kh zpCc2!iT!AE;I0H{NgvbPv&yeqKjZt_`6jnR#Q%P`$C1Q$nAtotT7XBEjI$DLnZ?I+ z<||8=!@#93nWu~D2J`g%T^b^PYzOU&RTtog@B;*I%6i1OIC=*|2+jmCu3Zp z45?HtQ$s=F|LGHcreOXR%Kr-D3v>-!_xZ5=)=M7H->id~ZUAqQ&rYA;6z7k{vo;d#Xh@66{wQa`g=;}CG;zYxC`{fWMGdxX4VH#Qn< zXg{<)JaTg$f1z*~o5>^KP~-QC$zn`;lCNXr=_Y>ps^oF*FglL}x~DqhMN5N_LjN|+ zI&MUnbE%qhsAGeM-+=D5c1P@6H3KuE$NV0Dc_CIp(wehxz;J6nRO$NFRIfzMn}hd! z4Z#I0mpaIoye(%lZgvs(grCGU1AK&5cV;(XamK&2+llZMbMo&@_osn1EXRGmU3p-c z-F)*weWJU+YFR#Y-&Ty^s4Oj9YG7fD4;!_(N_OTLJL61Vd@8k)VXKH~OhPqQlI*a+ zLLAqxqdo7C!OI>@*T48tzcP~5A$a2f(CRj9ef)5D>}cuVLT?hPQz`YTI{Sr(+pP@P zQn2F-+K-bOPscY``8+=U440B?cuv?`vHt3_oAl!g3w`g;z8j5@U*#pn$?R~9F2H_i|CBV!q9JDyYH)FbVG%Tr+CvuIA%U7QY z{EnM7!PhCpm&y;7}iT<6x-zqau0&}zOw6J#kPUMhsR|n(9`GmN7ly_kU$2C%E zg3Gq{#@?VU-q;VW(NU2gM$-(*uYO!Codw0GzY{7HS-uoLHN3IeL%<*W1FNP=)He^D z$`gwwJC4DlvDl_1HM|58PkeFpx$^l;Dtn}pCXB`5#Y;9et-M-ClR73zX=vGS0jz1wRrZU?$xVezuo5+}*l-ZJ52T(AKriBU`kFOWFdRPh%(Ffb%dN|3O~ z(K!)i1HUrUgzkvnh@ZQfS7RXBQTWb8_+Xm?1%Z>W;iukhohY%eQr=9k5}2L?+o?3< zVzq*Op*<{iT9gsU_#$Y-nlfcGzNjZSCFs5R2f!t096H)S<4=E!QcJy|Jj2LHuN`l9yMcdE+=R3+!;Ywg;w$nTX4ffL94x)$Cw zg$8!BCr|YU>4i%zQ8?;Vxj?a5+HY&S@|qo}cF(>6-mzN9hlgZ^Vfs|Tft;UP^x%Wu zdec=aKf6`?g34%DHBp3(92M2##{FX}=+K6K)8;r-%X9=^TGu<6=4Pln5^t#<4m5eX zrgv*Yg@gB;boD^R)N`v$?y1;2+I&~$79%YcYx>4ho@P>XslHA*&pMmpbErge&=T03 z6Dv#%Idsc(B%Iw>KQ;N~*wIx^F7iXMcWgSGP9!5o)LtlBC+T-pd-?+mxo92Z7;8GY z27IXmgfeD)QmuQu*u90CCbiikKXEOs(Vg5-f;BR7iXeg&@@f+hIH&<5jE+&RT;3|U zqAd5_E6vdo77tA-?1{y58X$XSs)d>jPk9}UJycO{|3lzv{hPJ73vE&<`y@!53DN3&x!?!+dh z*TAeyz9!ADv3AmJ&n%0Y3<$4f3(UYtQR$n?lYUU&LCm9?34yGiaUZzabn-3gy7ZhINA10?dm6G1C z@En~2D7v`RNd_tLq?0sD2qpc>Cb?B9PtD^bPA9p#XX#R)_if_X{PWB|ASxaUJetID zWdN4#k{f4rhqe6-9*}$UX`x=#{|7bbSbluTL4mDPF#kkW1lM-cU(P zoetfQ{cM-al}Y=HtR)vOC{vc=;qF_eg*@7kS{z zDmx{E3P`XEMG;O=dlsSqbjL{>6Jjgen$DyzTl&Lel-d!puxdJy1k+YrSD?Y1Sd#?w zeUV&74ad-`#zkU5muSC}tL>KyvM<;{ z{?Y5aN~{kh&L3Lz5Jk;d;IQ$U9PKi;tufKGTz7dlKer(5JkWpB@>c!fi~t;)<&q2)+2@E2@i2Nt(7v3B^WcV-iENZo80P!nw%B;AMh zOSh7l?(aQeghXZfX%duVgTEcc_P^(9l1;bEBa(z%w_kD0RVz+&jK!BRCq*poR!*_&SGlqU}~I;!dAmpy9!OX z*_s^`l^B$4g4c*BjRc?sh={``j034Mf(j@@5W_CXY%N1wM7gjrkf8~lf}mEBAW!*+ zndUe6ftmSpmLcB(bt zOjNyDpex{VXsTq$-<&$QgC{cGYV!=J9UX&F zHi=~Y@@Dqix8zE;yVhA_IMSHlZ>hN_`CZu~aHJnii&Mg05DdRugbfyX*2`VwV2x~U znv~b4gG}f*l60eJD`37@{&Hdyr%Yp#E+TZc9#xifIe~2`Ai!Vf{^9%6u49)hsf{0G zE!qDyKkE_|hnUA59)N%t5y}{XH`QqKo}wLEkOXjr0yqw=ErWLQbRC&jl4or@&AFqY zYos(G^!OvUw&4&{`4HsFwBr@?>~2T{`>p=d;br8p2~861^8>9*}nyU?rR}N z@?TwNzYG4g_vXI^7a&gH|GN|Ccbwn*FMc5rL#kfCbz}T4{Cf%fm+%}U`v?*KTS5GH zgx|}EzYu27{`2_%zoO#rfWKGeegV!w+Djm>^m~o&cYxnBq`v@S@ctarfAXcji~gQS z`6c=eQX~A=T>dSs@;l031EF6SP*BtmHT+vx^t<@)boL7%2a?}`yu@F~?RV+lx#E{} z0L>rLzcIx>GRNik',self.W_col,self.X_col) + torch.unsqueeze(self.bias,1) + # Reshape into (n_filters, out_height, out_width, batch_size) + output = output.reshape(self.output_shape() + (batch_size, )) + # Redistribute axises so that batch size comes first + return output.permute(3,0,1,2) + + + def output_shape(self): + channels, height, width = self.input_shape + pad_h, pad_w = determine_padding(self.filter_shape, output_shape=self.padding) + output_height = (height + np.sum(pad_h) - self.filter_shape[0]) / self.stride[0] + 1 + output_width = (width + np.sum(pad_w) - self.filter_shape[1]) / self.stride[0] + 1 + return self.n_filters, int(output_height), int(output_width) + + def parameters(self): + return np.prod(self.W.shape) + np.prod(self.w0.shape) + + def compressionRateStatistics(self,input,andSum,compareRatio): + pruneNumber = 0 + zerosNumber = 0 + for i in range(input.shape[1]): + if andSum[i] == 0: + zerosNumber += 1 + if andSum[i] != 0 and andSum[i] <= compareRatio: + pruneNumber += 1 + print('pruneNumberRatio=', pruneNumber / (input.shape[1])) + print('zerosNumberRatio=', zerosNumber / (input.shape[1])) + + def accuracyTest(self,andSum): + for i in range(len(andSum)): + print(i,andSum[i]) + + def activationSlidePrune(self,input,compareRatio): + matrixOne = torch.ones(input.shape,device='cuda:0') + + x = torch.clone(torch.detach(input)) + andOp = torch.logical_and(matrixOne,x) + andSum = torch.sum(andOp,dim=0) + + # self.compressionRateStatistics(input,andSum,compareRatio) + # self.accuracyTest(andSum) + + x1 = x.permute(1,0) + x1[(andSum<=compareRatio),] = 0 + x = x1.permute(1,0) + return x + +# image = np.random.randint(0,255,size=(1,3,32,32)).astype(np.uint8) +# input_shape=image.squeeze().shape +# conv2d = Conv2D(16, (3,3), input_shape=input_shape, padding='same', stride=1) +# conv2d.initialize(None) +# output=conv2d.forward_pass(image,training=True) +# print(output.shape) \ No newline at end of file diff --git a/K_means.py b/K_means.py new file mode 100644 index 0000000..2a754db --- /dev/null +++ b/K_means.py @@ -0,0 +1,153 @@ +# 聚类算法 + +import random +import pandas as pd +import numpy as np +import copy +import math + + +# 计算距离 +def Dis(dataSet, centroids, k): + # 处理质心 + # 如果之前分类的个数不够k类 + if len(centroids) < k: + centroids = np.append(centroids, random.sample(list(dataSet), k-len(centroids)), axis=0) + + # 处理节点 + clalist=[] + for data in dataSet: + #(np.tile(a,(2,1))就是把a先沿x轴复制1倍,即没有复制,仍然是 [0,1,2]。 再把结果沿y方向复制2倍得到array([[0,1,2],[0,1,2]])) + diff = np.tile(data, (k, 1)) + mul_Diff = np.multiply(diff, centroids) + mul_Dist = np.sum(mul_Diff, axis=1) #和 (axis=1表示行) + clalist.append(mul_Dist) + clalist = np.array(clalist) #返回一个每个点到质点的距离len(dateSet)*k的数组 + return clalist + + +# 计算质心 +def classify(dataSet, centroids, k): + # 计算样本到质心的距离 + clalist = Dis(dataSet, centroids, k) + # 分组并计算新的质心 + minDistIndices = np.argmax(clalist, axis=1) #axis=1 表示求出每行的最小值的下标 + newCentroids = pd.DataFrame(dataSet).groupby(minDistIndices).mean() #DataFramte(dataSet)对DataSet分组,groupby(min)按照min进行统计分类,mean()对分类结果求均值 + newCentroids = newCentroids.values + + # 对新质心,也分配成1-value_sum的形式,否则会出现小数 + for centro in newCentroids: + # centro是一个一维向量 + sorted_data=np.argsort(centro) # 排序信息 + value = 1 + for valueIndex in sorted_data: + centro[valueIndex] = value + value += 1 + + # 计算变化量 + # 有可能新分类个数不够k + if len(newCentroids) != len(centroids): + changed = 1 # 肯定有变化 + else: + changed = newCentroids - centroids # 有可能没变化 + + return changed, newCentroids + + +#确定初始中心点 +def euler_distance(point1: list, point2: list) -> float: + """ + 计算两点之间的欧拉距离,支持多维 + distance = 0.0 + for a, b in zip(point1, point2): + distance += math.pow(a - b, 2) + return math.sqrt(distance) + """ + distance = 0.0 + for a, b in zip(point1, point2): + distance += a*b + return distance + + +def get_closest_dist(point, centroids): + min_dist = math.inf # 初始设为无穷大 + for i, centroid in enumerate(centroids): + dist = euler_distance(centroid, point) + if dist < min_dist: + min_dist = dist + return min_dist + + +def kpp_centers(data_set: list, k: int) -> list: + """ + 从数据集中返回 k 个对象可作为质心 + """ + cluster_centers = [] + cluster_centers.append(random.choice(data_set)) + d = [0 for _ in range(len(data_set))] + for _ in range(1, k): + total = 0.0 + for i, point in enumerate(data_set): + d[i] = get_closest_dist(point, cluster_centers) # 与最近一个聚类中心的距离 + total += d[i] + total *= random.random() + for i, di in enumerate(d): # 轮盘法选出下一个聚类中心; + total -= di + if total > 0: + continue + cluster_centers.append(data_set[i]) + break + return cluster_centers + + +# 使用k-means分类 +def kmeans(dataSet, k): + # 将dataSet预处理成为算距离需要使用的重要程度矩阵 + valueSet = np.zeros(dataSet.shape, dtype=int) # 初始矩阵 + for index in range(len(dataSet)): + data = dataSet[index] + value = valueSet[index] + sorted_data=list(map(abs,data)) # 绝对值 + sorted_data=np.argsort(sorted_data) # 排序信息 + i = 1 # 对于越小的值,分配的i越小 + for valueIndex in sorted_data: + value[valueIndex] = i + i += 1 + + # 随机取质心 + # centroids = random.sample(dataSet, k) + centroids=kpp_centers(valueSet, k) + + # 更新质心 直到变化量全为0 + i=100 + changed, newCentroids = classify(valueSet, centroids, k) + # while(i): #while np.any(changed != 0) + while np.any(changed != 0) and i > 0: + changed, newCentroids = classify(valueSet, newCentroids, k) + i=i-1 + print("第{}次迭代".format(100-i)) + + centroids = sorted(newCentroids.tolist()) #tolist()将矩阵转换成列表 sorted()排序 + + clalist = Dis(valueSet, centroids, k) + minDistIndices = np.argmax(clalist, axis=1) + return minDistIndices + + +def getCluster(input, clusters_num): + # 对卷积层聚类为4维,对全连接层聚类为2维 + if len(input.shape) == 2: # 如果是全连接层 + fcValues = input.detach().cpu().numpy() # 转成numpy + # input.shape[1]是聚类基本单位的数据个数 + clusterIndex = kmeans(fcValues, clusters_num) # 分类 + elif len(input.shape) == 4: # 卷积层 + kernel_size = input.shape[3] # 卷积核尺寸 + preShape = input.shape[:2] # 四维数据的前两维 + inputCut = input.view(preShape[0]*preShape[1], kernel_size*kernel_size) # 降维后的数据,四维到二维 + convValues = inputCut.detach().cpu().numpy() # 转成numpy + clusterIndex = kmeans(convValues, clusters_num) # 分类 + clusterIndex.resize(preShape) + else: + clusterIndex = None + + return clusterIndex \ No newline at end of file diff --git a/WeightPrune.py b/WeightPrune.py new file mode 100644 index 0000000..d98f4e4 --- /dev/null +++ b/WeightPrune.py @@ -0,0 +1,179 @@ +# -*- coding: utf-8 -*- +import torch +import torchvision.transforms as transforms +import torch.optim as optim +from torch.utils.data import DataLoader +import torch.nn.utils.prune as prune +import pandas as pd +import numpy as np +from K_means import getCluster +import torch.nn as nn +from model import * +from train import * +from ActivationPrune import Conv2dTest,LinearTest +from torch.nn.parameter import Parameter + +def scp_upgrade(kernel,old_scp): + old_scp+=np.abs(kernel.cpu().detach().numpy()) + return old_scp + +def scp_binaeryzation(scps,C): + if len(scps.shape)==3: + for r in np.arange(0,scps.shape[0]): + series=pd.Series(scps[r].ravel()) + rank_info=series.rank() + for i in np.arange(0,scps[r].shape[0]): + for j in np.arange(0,scps[r].shape[1]): + index=i*scps[r].shape[0]+j + if(rank_info[index]<=C): + scps[r][i][j]=0 + else: + scps[r][i][j]=1 + + elif len(scps.shape)==2: + for r in np.arange(0,scps.shape[0]): + series=pd.Series(scps[r].ravel()) + rank_info=series.rank() + for i in np.arange(0,scps[r].shape[0]): + index=i + if(rank_info[index]<=C): + scps[r][i]=0 + else: + scps[r][i]=1 + +class PatternPruningMethod(prune.BasePruningMethod): + PRUNING_TYPE= "unstructured" + + def __init__(self, custers_num, cut_num, pruning_type): + self.clusters_num=custers_num + self.cut_num=cut_num + self.pruning_type=pruning_type + prune.BasePruningMethod.__init__(self) + + def compute_mask(self, t, default_mask): + mask=default_mask.clone()#复制一个mask大小等于当前层的filter + if self.pruning_type=='conv': + scps=np.zeros(self.clusters_num*default_mask.shape[-1]*default_mask.shape[-1])#复制num个scp,表示每一个卷积族的pattern + scps.resize(self.clusters_num,default_mask.shape[-1],default_mask.shape[-1]) + + clusters=getCluster(t,self.clusters_num)#输入当前层的filter,获得其聚类信息 + + print(clusters) + + for i in np.arange(0,clusters.shape[0]):#遍历所有kernel,计算所有cluster的scp + for j in np.arange(0,clusters.shape[1]): + scp_upgrade(t[i][j],scps[clusters[i][j]]) + + scp_binaeryzation(scps,self.cut_num)#根据scp二值化获得真正的pattern + print(scps) + + for i in np.arange(0,clusters.shape[0]):#根据scp和每个kernel的族编号得到最终的mask + for j in np.arange(0,clusters.shape[1]): + mask[i][j]=torch.from_numpy(scps[clusters[i][j]]) + + elif self.pruning_type=='full': + + scps=np.zeros(self.clusters_num*default_mask.shape[-1]) + scps.resize(self.clusters_num,default_mask.shape[-1]) + + clusters=getCluster(t,self.clusters_num) + + print(clusters) + + for i in np.arange(0,clusters.shape[0]): + scp_upgrade(t[i],scps[int(clusters[i])]) + + scp_binaeryzation(scps,self.cut_num)#根据scp二值化获得真正的pattern + print(scps) + + for i in np.arange(0,clusters.shape[0]):#根据scp和每个kernel的族编号得到最终的mask + mask[i]=torch.from_numpy(scps[int(clusters[i])]) + + + return mask + +def weightPrune(model_name,ratio,weightPrameter,LinearPrameter,inplace=False): + def activationWeightPruneOp(module): + for name, child in module.named_children(): + if isinstance(child, nn.Conv2d): + print(child) + print(child.weight.shape) + print('custers_num=6', 'cut_num=', child.weight.shape[-1] * child.weight.shape[-2] / weightPrameter, + 'pruning_type=conv') + convPruning = PatternPruningMethod(custers_num=6, + cut_num=child.weight.shape[-1] * child.weight.shape[-2] / weightPrameter, + pruning_type='conv') + convPruning.apply(child, 'weight', 6, child.weight.shape[-1] * child.weight.shape[-2] / weightPrameter, 'conv') + + + activationWeightPruneConv = Conv2dTest( + ratio, + child.in_channels, + child.out_channels, child.kernel_size, stride=child.stride, padding=child.padding, + dilation=child.dilation, groups=child.groups, bias=(child.bias is not None), + padding_mode=child.padding_mode + ) + if child.bias is not None: + activationWeightPruneConv.bias = child.bias + activationWeightPruneConv.weight = Parameter(child.weight) + module._modules[name] = activationWeightPruneConv + child._forward_pre_hooks + + elif isinstance(child, nn.Linear): + print(child) + print(child.weight.shape) + print('custers_num=4', 'cut_num=', child.weight.shape[-1] / LinearPrameter, 'pruning_type=full') + fullPruning = PatternPruningMethod(custers_num=8, cut_num=child.weight.shape[-1] / LinearPrameter, + pruning_type='full') + fullPruning.apply(child, 'weight', 8, child.weight.shape[-1] / LinearPrameter, 'full') + child._forward_pre_hooks + else: + activationWeightPruneOp(child) # 这是用来迭代的,Maxpool层的功能是不变的 + if not inplace: + model = copy.deepcopy(model_name) + activationWeightPruneOp( model_name) # 为每一层添加量化操作 + return model + + +def weightPruneModelOp(model_name,batch_size,img_size,ratio,pattern,epoch,weightParameter,LinearParameter): + if model_name == 'LeNet': + net = getLeNet() + if model_name == 'LeNet': + net = getLeNet() # 加载原始模型框架 + elif model_name == 'AlexNet': + net = getAlexnet() + elif model_name == 'VGG16': + net = get_vgg16() + elif model_name == 'SqueezeNet': + net = get_squeezenet() + elif model_name == 'InceptionV3': + net = get_inception_v3() + elif model_name == 'ResNet': + net = get_resnet18() + + if model_name == 'VGG16' or model_name == 'AlexNet' or model_name == 'ResNet' or model_name == 'vgg16_thu' or model_name == 'SqueezeNet': + dataloaders, dataset_sizes = load_cifar10(batch_size=batch_size, pth_path='./data', + img_size=img_size) # 确定数据集 + elif model_name == 'LeNet': + dataloaders, dataset_sizes = load_mnist(batch_size=batch_size, path='./data', img_size=img_size) + criterion = nn.CrossEntropyLoss() + + if pattern == 'retrain': + getPth = './pth/' + model_name + '/ratio=' +str(ratio)+ '/Activation' + '/best.pth' + optimizer = optim.SGD(net.parameters(), lr=0.01, momentum=0.9) + scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=2, gamma=0.8) # 设置学习率下降策略 + net.load_state_dict(torch.load(getPth)) + weightPrune(net, ratio ,weightParameter,LinearParameter) + train_model_jiang(net,dataloaders, dataset_sizes,ratio,'weight', criterion=criterion, optimizer=optimizer, name=model_name, + scheduler=scheduler, num_epochs=epoch, rerun=False) + + if pattern == 'test': + getPth = './pth/' + model_name+ '/ratio=' +str(ratio)+ '/ActivationWeight/' + 'best.pth' + weightPrune(net, ratio,weightParameter,LinearParameter) + net.load_state_dict(torch.load(getPth)) + test_model(net, dataloaders, dataset_sizes, criterion=criterion) + + + + + diff --git a/main.py b/main.py new file mode 100644 index 0000000..15de6f2 --- /dev/null +++ b/main.py @@ -0,0 +1,23 @@ +from ActivationPrune import * +from WeightPrune import weightPruneModelOp +import os + +if __name__ == '__main__': + model_name = 'LeNet' + batch_size = 64 + img_size = 32 + ratio = 0.2 + epochA = 10 + epochAW = 40 + patternA = 'retrain' + patternW = 'ratrain' + weightParameter = (4/3) + LinearParameter = 4 + if not os.path.exists('./pth/'+model_name+'/ratio='+str(ratio)): + os.makedirs('./pth/'+model_name+'/ratio='+str(ratio)+'/Activation') + if patternA != 'train': + os.makedirs('./pth/' + model_name + '/ratio=' + str(ratio) + '/ActivationWeight') + + activationPruneModelOp(model_name, batch_size, img_size,patternA,ratio,epochA) + if patternA != 'train' and not(patternA == 'test' and ratio == 0): + weightPruneModelOp(model_name, batch_size, img_size, ratio, patternW,epochAW,weightParameter,LinearParameter) \ No newline at end of file diff --git a/model.py b/model.py new file mode 100644 index 0000000..aae0ea7 --- /dev/null +++ b/model.py @@ -0,0 +1,636 @@ +from collections import OrderedDict +import torch.nn as nn +import torch.utils.model_zoo as model_zoo +import torch.nn.functional as F +import math +import torch + +class AlexNet(nn.Module): + + def __init__(self, num_classes=10): + super(AlexNet, self).__init__() + self.features = nn.Sequential( + nn.Conv2d(3, 96, kernel_size=11, stride=4, padding=2), + nn.ReLU(inplace=True), + nn.MaxPool2d(kernel_size=3, stride=2), + nn.Conv2d(96, 256, kernel_size=5, padding=2), + nn.ReLU(inplace=True), + nn.MaxPool2d(kernel_size=3, stride=2), + nn.Conv2d(256, 384, kernel_size=3, padding=1), + nn.ReLU(inplace=True), + nn.Conv2d(384, 384, kernel_size=3, padding=1), + nn.ReLU(inplace=True), + nn.Conv2d(384, 256, kernel_size=3, padding=1), + nn.ReLU(inplace=True), + nn.MaxPool2d(kernel_size=3, stride=2), + ) + self.classifier = nn.Sequential( + nn.Dropout(), + nn.Linear(256 * 6 * 6, 4096), + nn.ReLU(inplace=True), + nn.Dropout(), + nn.Linear(4096, 4096), + nn.ReLU(inplace=True), + nn.Linear(4096, num_classes), + # nn.Softmax() + ) + + def forward(self, x): + if hasattr(self, "first_input_prune"): + x = self.first_input_prune(x) + x = self.features(x) + x = x.view(x.size(0), 256 * 6 * 6) + x = self.classifier(x) + return x +class LeNet(nn.Module): + def __init__(self, num_classes=10): + super(LeNet, self).__init__() + self.features = nn.Sequential( + nn.Conv2d(1, 6, kernel_size=5), + nn.ReLU(inplace=True), + nn.MaxPool2d(kernel_size=2, stride=2), + nn.Conv2d(6, 16, kernel_size=5), + nn.ReLU(inplace=True), + nn.MaxPool2d(kernel_size=2, stride=2), + nn.Conv2d(16, 120, kernel_size=5), + nn.ReLU(inplace=True) + ) + self.classifier = nn.Sequential( + nn.Linear(120, 84), + nn.ReLU(inplace=True), + nn.Linear(84, num_classes) + ) + + def forward(self, x): + if hasattr(self, "first_input_prune"): + x = self.first_input_prune(x) + x = self.features(x) + x = x.view(x.size(0), -1) + x = self.classifier(x) + return x + +class VGG(nn.Module): + + def __init__(self, features, num_classes=10): + super(VGG, self).__init__() + self.features = features + self.classifier = nn.Sequential( + nn.Linear(512 * 7 * 7, 4096), + nn.ReLU(inplace=True), + nn.Dropout(), + nn.Linear(4096, 4096), + nn.ReLU(inplace=True), + nn.Dropout(), + nn.Linear(4096, num_classes), + ) + self._initialize_weights() + + def forward(self, x): + if hasattr(self, "first_input_prune"): + x = self.first_input_prune(x) + x = self.features(x) + x = x.view(x.size(0), -1) + x = self.classifier(x) + return x + + def _initialize_weights(self): + for m in self.modules(): + if isinstance(m, nn.Conv2d): + n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels + m.weight.data.normal_(0, math.sqrt(2. / n)) + if m.bias is not None: + m.bias.data.zero_() + elif isinstance(m, nn.BatchNorm2d): + m.weight.data.fill_(1) + m.bias.data.zero_() + elif isinstance(m, nn.Linear): + n = m.weight.size(1) + m.weight.data.normal_(0, 0.01) + m.bias.data.zero_() + + +class ResNet(nn.Module): + def __init__(self, block, layers, num_classes=10): + self.inplanes = 64 + super(ResNet, self).__init__() + + m = OrderedDict() + m['conv1'] = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False) + m['bn1'] = nn.BatchNorm2d(64) + m['relu1'] = nn.ReLU(inplace=True) + m['maxpool'] = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + self.group1 = nn.Sequential(m) + + self.layer1 = self._make_layer(block, 64, layers[0]) + self.layer2 = self._make_layer(block, 128, layers[1], stride=2) + self.layer3 = self._make_layer(block, 256, layers[2], stride=2) + self.layer4 = self._make_layer(block, 512, layers[3], stride=2) + + self.avgpool = nn.Sequential(nn.AvgPool2d(7)) + + self.group2 = nn.Sequential( + OrderedDict([ + ('fc', nn.Linear(512 * block.expansion, num_classes)) + ]) + ) + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels + m.weight.data.normal_(0, math.sqrt(2. / n)) + elif isinstance(m, nn.BatchNorm2d): + m.weight.data.fill_(1) + m.bias.data.zero_() + + def _make_layer(self, block, planes, blocks, stride=1): + downsample = None + if stride != 1 or self.inplanes != planes * block.expansion: + downsample = nn.Sequential( + nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False), + nn.BatchNorm2d(planes * block.expansion), + ) + + layers = [] + layers.append(block(self.inplanes, planes, stride, downsample)) + self.inplanes = planes * block.expansion + for i in range(1, blocks): + layers.append(block(self.inplanes, planes)) + + return nn.Sequential(*layers) + + def forward(self, x): + x = self.group1(x) + + x = self.layer1(x) + x = self.layer2(x) + x = self.layer3(x) + x = self.layer4(x) + + x = self.avgpool(x) + x = x.view(x.size(0), -1) + x = self.group2(x) + + return x + + +class ResNetBasicBlock(nn.Module): + expansion = 1 + + def __init__(self, inplanes, planes, stride=1, downsample=None): + super(ResNetBasicBlock, self).__init__() + m = OrderedDict() + m['conv1'] = nn.Conv2d(inplanes, planes, kernel_size=3, stride=stride, padding=1, bias=False) + m['relu1'] = nn.ReLU(inplace=True) + m['conv2'] = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False) + self.group1 = nn.Sequential(m) + self.relu = nn.Sequential(nn.ReLU(inplace=True)) + self.downsample = downsample + + def forward(self, x): + if self.downsample is not None: + residual = self.downsample(x) + else: + residual = x + out = self.group1(x) + residual + out = self.relu(out) + return out + +class Fire(nn.Module): + + def __init__(self, inplanes, squeeze_planes, + expand1x1_planes, expand3x3_planes): + super(Fire, self).__init__() + self.inplanes = inplanes + + self.group1 = nn.Sequential( + OrderedDict([ + ('squeeze', nn.Conv2d(inplanes, squeeze_planes, kernel_size=1)), + ('squeeze_activation', nn.ReLU(inplace=True)) + ]) + ) + + self.group2 = nn.Sequential( + OrderedDict([ + ('expand1x1', nn.Conv2d(squeeze_planes, expand1x1_planes, kernel_size=1)), + ('expand1x1_activation', nn.ReLU(inplace=True)) + ]) + ) + + self.group3 = nn.Sequential( + OrderedDict([ + ('expand3x3', nn.Conv2d(squeeze_planes, expand3x3_planes, kernel_size=3, padding=1)), + ('expand3x3_activation', nn.ReLU(inplace=True)) + ]) + ) + + def forward(self, x): + x = self.group1(x) + return torch.cat([self.group2(x), self.group3(x)], 1) + + +class SqueezeNet(nn.Module): + + def __init__(self, num_classes=1000): + super(SqueezeNet, self).__init__() + self.num_classes = num_classes + self.features = nn.Sequential( + nn.Conv2d(3, 96, kernel_size=7, stride=2), + nn.ReLU(inplace=True), + nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True), + Fire(96, 16, 64, 64), + Fire(128, 16, 64, 64), + Fire(128, 32, 128, 128), + nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True), + Fire(256, 32, 128, 128), + Fire(256, 48, 192, 192), + Fire(384, 48, 192, 192), + Fire(384, 64, 256, 256), + nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True), + Fire(512, 64, 256, 256), + ) + # Final convolution is initialized differently form the rest + final_conv = nn.Conv2d(512, num_classes, kernel_size=1) + self.classifier = nn.Sequential( + nn.Dropout(p=0.5), + final_conv, + nn.ReLU(inplace=True), + nn.AvgPool2d(13) + ) + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + gain = 2.0 + if m is final_conv: + m.weight.data.normal_(0, 0.01) + else: + fan_in = m.kernel_size[0] * m.kernel_size[1] * m.in_channels + u = math.sqrt(3.0 * gain / fan_in) + m.weight.data.uniform_(-u, u) + if m.bias is not None: + m.bias.data.zero_() + + def forward(self, x): + x = self.features(x) + x = self.classifier(x) + return x.view(x.size(0), self.num_classes) + + +class Inception3(nn.Module): + + def __init__(self, num_classes=1000, aux_logits=False, transform_input=False): + super(Inception3, self).__init__() + self.aux_logits = aux_logits + self.transform_input = transform_input + self.Conv2d_1a_3x3 = BasicConv2d(3, 32, kernel_size=3, stride=2) + self.Conv2d_2a_3x3 = BasicConv2d(32, 32, kernel_size=3) + self.Conv2d_2b_3x3 = BasicConv2d(32, 64, kernel_size=3, padding=1) + self.Conv2d_3b_1x1 = BasicConv2d(64, 80, kernel_size=1) + self.Conv2d_4a_3x3 = BasicConv2d(80, 192, kernel_size=3) + self.Mixed_5b = InceptionA(192, pool_features=32) + self.Mixed_5c = InceptionA(256, pool_features=64) + self.Mixed_5d = InceptionA(288, pool_features=64) + self.Mixed_6a = InceptionB(288) + self.Mixed_6b = InceptionC(768, channels_7x7=128) + self.Mixed_6c = InceptionC(768, channels_7x7=160) + self.Mixed_6d = InceptionC(768, channels_7x7=160) + self.Mixed_6e = InceptionC(768, channels_7x7=192) + if aux_logits: + self.AuxLogits = InceptionAux(768, num_classes) + self.Mixed_7a = InceptionD(768) + self.Mixed_7b = InceptionE(1280) + self.Mixed_7c = InceptionE(2048) + self.group1 = nn.Sequential( + OrderedDict([ + ('fc', nn.Linear(2048, num_classes)) + ]) + ) + + for m in self.modules(): + if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear): + import scipy.stats as stats + stddev = m.stddev if hasattr(m, 'stddev') else 0.1 + X = stats.truncnorm(-2, 2, scale=stddev) + values = torch.Tensor(X.rvs(m.weight.data.numel())) + m.weight.data.copy_(values.reshape(m.weight.shape)) + elif isinstance(m, nn.BatchNorm2d): + m.weight.data.fill_(1) + m.bias.data.zero_() + + def forward(self, x): + if self.transform_input: + x = x.clone() + x[0] = x[0] * (0.229 / 0.5) + (0.485 - 0.5) / 0.5 + x[1] = x[1] * (0.224 / 0.5) + (0.456 - 0.5) / 0.5 + x[2] = x[2] * (0.225 / 0.5) + (0.406 - 0.5) / 0.5 + # 299 x 299 x 3 + x = self.Conv2d_1a_3x3(x) + # 149 x 149 x 32 + x = self.Conv2d_2a_3x3(x) + # 147 x 147 x 32 + x = self.Conv2d_2b_3x3(x) + # 147 x 147 x 64 + x = F.max_pool2d(x, kernel_size=3, stride=2) + # 73 x 73 x 64 + x = self.Conv2d_3b_1x1(x) + # 73 x 73 x 80 + x = self.Conv2d_4a_3x3(x) + # 71 x 71 x 192 + x = F.max_pool2d(x, kernel_size=3, stride=2) + # 35 x 35 x 192 + x = self.Mixed_5b(x) + # 35 x 35 x 256 + x = self.Mixed_5c(x) + # 35 x 35 x 288 + x = self.Mixed_5d(x) + # 35 x 35 x 288 + x = self.Mixed_6a(x) + # 17 x 17 x 768 + x = self.Mixed_6b(x) + # 17 x 17 x 768 + x = self.Mixed_6c(x) + # 17 x 17 x 768 + x = self.Mixed_6d(x) + # 17 x 17 x 768 + x = self.Mixed_6e(x) + # 17 x 17 x 768 + if self.training and self.aux_logits: + aux = self.AuxLogits(x) + # 17 x 17 x 768 + x = self.Mixed_7a(x) + # 8 x 8 x 1280 + x = self.Mixed_7b(x) + # 8 x 8 x 2048 + x = self.Mixed_7c(x) + # 8 x 8 x 2048 + x = F.avg_pool2d(x, kernel_size=8) + # 1 x 1 x 2048 + x = F.dropout(x, training=self.training) + # 1 x 1 x 2048 + x = x.view(x.size(0), -1) + # 2048 + x = self.group1(x) + # 1000 (num_classes) + if self.training and self.aux_logits: + return x, aux + return x + + +class InceptionA(nn.Module): + + def __init__(self, in_channels, pool_features): + super(InceptionA, self).__init__() + self.branch1x1 = BasicConv2d(in_channels, 64, kernel_size=1) + + self.branch5x5_1 = BasicConv2d(in_channels, 48, kernel_size=1) + self.branch5x5_2 = BasicConv2d(48, 64, kernel_size=5, padding=2) + + self.branch3x3dbl_1 = BasicConv2d(in_channels, 64, kernel_size=1) + self.branch3x3dbl_2 = BasicConv2d(64, 96, kernel_size=3, padding=1) + self.branch3x3dbl_3 = BasicConv2d(96, 96, kernel_size=3, padding=1) + + self.branch_pool = BasicConv2d(in_channels, pool_features, kernel_size=1) + + def forward(self, x): + branch1x1 = self.branch1x1(x) + + branch5x5 = self.branch5x5_1(x) + branch5x5 = self.branch5x5_2(branch5x5) + + branch3x3dbl = self.branch3x3dbl_1(x) + branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl) + branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl) + + branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1) + branch_pool = self.branch_pool(branch_pool) + + outputs = [branch1x1, branch5x5, branch3x3dbl, branch_pool] + return torch.cat(outputs, 1) + + +class InceptionB(nn.Module): + + def __init__(self, in_channels): + super(InceptionB, self).__init__() + self.branch3x3 = BasicConv2d(in_channels, 384, kernel_size=3, stride=2) + + self.branch3x3dbl_1 = BasicConv2d(in_channels, 64, kernel_size=1) + self.branch3x3dbl_2 = BasicConv2d(64, 96, kernel_size=3, padding=1) + self.branch3x3dbl_3 = BasicConv2d(96, 96, kernel_size=3, stride=2) + + def forward(self, x): + branch3x3 = self.branch3x3(x) + + branch3x3dbl = self.branch3x3dbl_1(x) + branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl) + branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl) + + branch_pool = F.max_pool2d(x, kernel_size=3, stride=2) + + outputs = [branch3x3, branch3x3dbl, branch_pool] + return torch.cat(outputs, 1) + + +class InceptionC(nn.Module): + + def __init__(self, in_channels, channels_7x7): + super(InceptionC, self).__init__() + self.branch1x1 = BasicConv2d(in_channels, 192, kernel_size=1) + + c7 = channels_7x7 + self.branch7x7_1 = BasicConv2d(in_channels, c7, kernel_size=1) + self.branch7x7_2 = BasicConv2d(c7, c7, kernel_size=(1, 7), padding=(0, 3)) + self.branch7x7_3 = BasicConv2d(c7, 192, kernel_size=(7, 1), padding=(3, 0)) + + self.branch7x7dbl_1 = BasicConv2d(in_channels, c7, kernel_size=1) + self.branch7x7dbl_2 = BasicConv2d(c7, c7, kernel_size=(7, 1), padding=(3, 0)) + self.branch7x7dbl_3 = BasicConv2d(c7, c7, kernel_size=(1, 7), padding=(0, 3)) + self.branch7x7dbl_4 = BasicConv2d(c7, c7, kernel_size=(7, 1), padding=(3, 0)) + self.branch7x7dbl_5 = BasicConv2d(c7, 192, kernel_size=(1, 7), padding=(0, 3)) + + self.branch_pool = BasicConv2d(in_channels, 192, kernel_size=1) + + def forward(self, x): + branch1x1 = self.branch1x1(x) + + branch7x7 = self.branch7x7_1(x) + branch7x7 = self.branch7x7_2(branch7x7) + branch7x7 = self.branch7x7_3(branch7x7) + + branch7x7dbl = self.branch7x7dbl_1(x) + branch7x7dbl = self.branch7x7dbl_2(branch7x7dbl) + branch7x7dbl = self.branch7x7dbl_3(branch7x7dbl) + branch7x7dbl = self.branch7x7dbl_4(branch7x7dbl) + branch7x7dbl = self.branch7x7dbl_5(branch7x7dbl) + + branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1) + branch_pool = self.branch_pool(branch_pool) + + outputs = [branch1x1, branch7x7, branch7x7dbl, branch_pool] + return torch.cat(outputs, 1) + + +class InceptionD(nn.Module): + + def __init__(self, in_channels): + super(InceptionD, self).__init__() + self.branch3x3_1 = BasicConv2d(in_channels, 192, kernel_size=1) + self.branch3x3_2 = BasicConv2d(192, 320, kernel_size=3, stride=2) + + self.branch7x7x3_1 = BasicConv2d(in_channels, 192, kernel_size=1) + self.branch7x7x3_2 = BasicConv2d(192, 192, kernel_size=(1, 7), padding=(0, 3)) + self.branch7x7x3_3 = BasicConv2d(192, 192, kernel_size=(7, 1), padding=(3, 0)) + self.branch7x7x3_4 = BasicConv2d(192, 192, kernel_size=3, stride=2) + + def forward(self, x): + branch3x3 = self.branch3x3_1(x) + branch3x3 = self.branch3x3_2(branch3x3) + + branch7x7x3 = self.branch7x7x3_1(x) + branch7x7x3 = self.branch7x7x3_2(branch7x7x3) + branch7x7x3 = self.branch7x7x3_3(branch7x7x3) + branch7x7x3 = self.branch7x7x3_4(branch7x7x3) + + branch_pool = F.max_pool2d(x, kernel_size=3, stride=2) + outputs = [branch3x3, branch7x7x3, branch_pool] + return torch.cat(outputs, 1) + + +class InceptionE(nn.Module): + + def __init__(self, in_channels): + super(InceptionE, self).__init__() + self.branch1x1 = BasicConv2d(in_channels, 320, kernel_size=1) + + self.branch3x3_1 = BasicConv2d(in_channels, 384, kernel_size=1) + self.branch3x3_2a = BasicConv2d(384, 384, kernel_size=(1, 3), padding=(0, 1)) + self.branch3x3_2b = BasicConv2d(384, 384, kernel_size=(3, 1), padding=(1, 0)) + + self.branch3x3dbl_1 = BasicConv2d(in_channels, 448, kernel_size=1) + self.branch3x3dbl_2 = BasicConv2d(448, 384, kernel_size=3, padding=1) + self.branch3x3dbl_3a = BasicConv2d(384, 384, kernel_size=(1, 3), padding=(0, 1)) + self.branch3x3dbl_3b = BasicConv2d(384, 384, kernel_size=(3, 1), padding=(1, 0)) + + self.branch_pool = BasicConv2d(in_channels, 192, kernel_size=1) + + def forward(self, x): + branch1x1 = self.branch1x1(x) + + branch3x3 = self.branch3x3_1(x) + branch3x3 = [ + self.branch3x3_2a(branch3x3), + self.branch3x3_2b(branch3x3), + ] + branch3x3 = torch.cat(branch3x3, 1) + + branch3x3dbl = self.branch3x3dbl_1(x) + branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl) + branch3x3dbl = [ + self.branch3x3dbl_3a(branch3x3dbl), + self.branch3x3dbl_3b(branch3x3dbl), + ] + branch3x3dbl = torch.cat(branch3x3dbl, 1) + + branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1) + branch_pool = self.branch_pool(branch_pool) + + outputs = [branch1x1, branch3x3, branch3x3dbl, branch_pool] + return torch.cat(outputs, 1) + + +class InceptionAux(nn.Module): + + def __init__(self, in_channels, num_classes): + super(InceptionAux, self).__init__() + self.conv0 = BasicConv2d(in_channels, 128, kernel_size=1) + self.conv1 = BasicConv2d(128, 768, kernel_size=5) + self.conv1.stddev = 0.01 + + fc = nn.Linear(768, num_classes) + fc.stddev = 0.001 + + self.group1 = nn.Sequential( + OrderedDict([ + ('fc', fc) + ]) + ) + + def forward(self, x): + # 17 x 17 x 768 + x = F.avg_pool2d(x, kernel_size=5, stride=3) + # 5 x 5 x 768 + x = self.conv0(x) + # 5 x 5 x 128 + x = self.conv1(x) + # 1 x 1 x 768 + x = x.view(x.size(0), -1) + # 768 + x = self.group1(x) + # 1000 + return x + + +class BasicConv2d(nn.Module): + + def __init__(self, in_channels, out_channels, **kwargs): + super(BasicConv2d, self).__init__() + self.group1 = nn.Sequential( + OrderedDict([ + ('conv', nn.Conv2d(in_channels, out_channels, bias=False, **kwargs)) + # ,('bn', nn.BatchNorm2d(out_channels, eps=0.001)) + ]) + ) + + def forward(self, x): + x = self.group1(x) + return F.relu(x, inplace=True) + + +def vgg_make_layers(cfg, batch_norm=False): + layers = [] + in_channels = 3 + for v in cfg: + if v == 'M': + layers += [nn.MaxPool2d(kernel_size=2, stride=2)] + else: + conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1) + if batch_norm: + layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)] + else: + layers += [conv2d, nn.ReLU(inplace=True)] + in_channels = v + return nn.Sequential(*layers) + + + + +def getLeNet(num_classes=10): + model = LeNet(num_classes) + return model + +def getAlexnet(num_classes=10): + model = AlexNet(num_classes) + return model + +def get_vgg16(num_classes=10): + vgg16_setting = [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'] + model = VGG(vgg_make_layers(vgg16_setting), num_classes) + return model + + +def get_resnet18(num_classes=10): + model = ResNet(ResNetBasicBlock, [2, 2, 2, 2], num_classes) + return model + + +def get_squeezenet(num_classes=10): + model = SqueezeNet(num_classes) + return model + + +def get_inception_v3(num_classes=10): + model = Inception3(num_classes) + return model + + + + diff --git a/train.py b/train.py new file mode 100644 index 0000000..9c81a3c --- /dev/null +++ b/train.py @@ -0,0 +1,285 @@ +from __future__ import print_function, division +import torch +import torch.nn as nn +import torch.optim as optim +from torch.optim import lr_scheduler +import numpy as np +import torchvision +from torchvision import datasets, models, transforms +import matplotlib.pyplot as plt +import time +import os +import copy +from tqdm import tqdm +from collections import OrderedDict + +def download_mnist(save_path): + torchvision.datasets.MNIST(root=save_path,train=True,download=True) + torchvision.datasets.MNIST(root=save_path,train=False,download=True) + return save_path + +def load_mnist(batch_size=64,path='',img_size=32): + if img_size != 32: + transform = transforms.Compose( + [transforms.Resize((img_size,img_size)), + transforms.ToTensor()]) + test_transform = transforms.Compose( + [transforms.Resize((img_size,img_size)), + transforms.ToTensor()] + ) + else: + transform = transforms.Compose( + [transforms.Resize((img_size,img_size)), + transforms.ToTensor()]) + test_transform = transforms.Compose( + [transforms.Resize((img_size,img_size)), + transforms.ToTensor()]) + trainset = torchvision.datasets.MNIST(root=path,train=True,download=False,transform=transform) + trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size,shuffle=True, num_workers=2) + testset = torchvision.datasets.MNIST(root=path,train=False,download=False,transform=test_transform) + testloader = torch.utils.data.DataLoader(testset, batch_size=batch_size,shuffle=False, num_workers=2) + dataloaders = {"train":trainloader,"val":testloader} + dataset_sizes = {"train":60000,"val":10000} + return dataloaders,dataset_sizes + +def download_cifar10(save_path): + torchvision.datasets.CIFAR10(root=save_path,train=True,download=True) + torchvision.datasets.CIFAR10(root=save_path,train=False,download=True) + return save_path + +def load_cifar10(batch_size=64,pth_path='./data',img_size=32): + if img_size!=32: + transform = transforms.Compose( + [transforms.Resize((img_size,img_size)), + transforms.ToTensor()]) + test_transform = transforms.Compose([transforms.Resize((img_size,img_size)) + ,transforms.ToTensor()]) + else: + transform = transforms.Compose([transforms.Pad(padding = 4), + transforms.RandomCrop(32), + transforms.RandomHorizontalFlip(),transforms.ToTensor()]) + test_transform = transforms.Compose([transforms.ToTensor()]) + trainset = torchvision.datasets.CIFAR10(root=pth_path, train=True,download=False, transform=transform) + trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size,shuffle=True, num_workers=2) + testset = torchvision.datasets.CIFAR10(root=pth_path, train=False,download=False, transform=test_transform) + testloader = torch.utils.data.DataLoader(testset, batch_size=batch_size,shuffle=False, num_workers=2) + dataloaders = {"train":trainloader,"val":testloader} + dataset_sizes = {"train":50000,"val":10000} + return dataloaders,dataset_sizes + +def download_cifar100(save_path): + torchvision.datasets.CIFAR100(root=save_path,train=True,download=True) + torchvision.datasets.CIFAR100(root=save_path,train=False,download=False) + return save_path + +def load_cifar100(batch_size,pth_path,img_size): + if img_size!=32: + transform = transforms.Compose( + [transforms.Resize((img_size,img_size)), + transforms.ToTensor()]) + test_transform = transforms.Compose([transforms.Resize((img_size,img_size)) + ,transforms.ToTensor()]) + else: + transform = transforms.Compose([transforms.Pad(padding = 4), + transforms.RandomCrop(32), + transforms.RandomHorizontalFlip(),transforms.ToTensor()]) + test_transform = transforms.Compose([transforms.ToTensor()]) + trainset = torchvision.datasets.CIFAR100(root=pth_path,train=True,download=False,transform=transform) + trainloader = torch.utils.data.DataLoader(trainset,batch_size=batch_size,shuffle=True,num_workers=2) + testset = torchvision.datasets.CIFAR100(root=pth_path,train=False,download=False,transform=test_transform) + testloader = torch.utils.data.DataLoader(testset,batch_size=batch_size,shuffle=True,num_workers=2) + dataloaders = {"train":trainloader,"val":testloader} + dataset_size ={"train":50000,"val":10000} + return dataloaders,dataset_size +def test_model(model,dataloaders,dataset_sizes,criterion): + print("validation model:") + phase = "val" + model.cuda() + model.eval() + with torch.no_grad(): + running_loss = 0.0 + running_acc = 0.0 + for inputs,labels in tqdm(dataloaders[phase]): + inputs,labels = inputs.cuda(),labels.cuda() + outputs = model(inputs) + _,preds = torch.max(outputs,1) + loss = criterion(outputs,labels) + running_loss += loss.item() * inputs.size(0) + running_acc += torch.sum(preds == labels.data) + epoch_loss = running_loss/dataset_sizes[phase] + epoch_acc = running_acc / dataset_sizes[phase] + epoch_acc = epoch_acc.item() + print('{} Loss: {:.4f} Acc: {:.4f}'.format( + phase, epoch_loss, epoch_acc)) + return epoch_acc,epoch_loss + +def WriteAccuracy(savePath, msg): + + full_path = savePath + '/Accuracy.txt' # 也可以创建一个.doc的word文档 + file = open(full_path, 'a') + file.write(msg) #msg也就是下面的Hello world! + # file.close() + +def train_model_jiang(model, dataloaders, dataset_sizes,ratio, type,criterion, optimizer, name,scheduler=None, num_epochs=100,rerun=False): + if rerun == True: + print('我进来了') + print(num_epochs) + since = time.time() + model.load_state_dict(torch.load('./test_20.pth')) + best_model_wts = copy.deepcopy(model.state_dict()) + best_acc = 0.0 + + model.cuda() + for epoch in range(20, num_epochs): + print('Epoch {}/{}'.format(epoch + 1, num_epochs)) + print('-' * 10) + print('the %d lr:%f' % (epoch + 1, optimizer.param_groups[0]['lr'])) + + # Each epoch has a training and validation phase + for phase in ['train', 'val']: + if phase == 'train': + model.train() # Set model to training mode + else: + print('val stage') + model.eval() # Set model to evaluate mode + + running_loss = 0.0 + running_corrects = 0 + + # Iterate over data. + i = 0 + loss_a = 0 + p = 0 + for data in dataloaders[phase]: + inputs, labels = data + inputs = inputs.cuda() + labels = labels.cuda() + + # zero the parameter gradients + optimizer.zero_grad() + + # forward + # track history if only in train + with torch.set_grad_enabled(phase == 'train'): + outputs = model(inputs) + _, preds = torch.max(outputs, 1) + loss = criterion(outputs, labels) + loss_a = loss.item() + print('[%d ,%5d] loss:%.3f' % (epoch + 1, i + 1, loss_a)) + loss_a = 0 + i += 1 + # backward + optimize only if in training phase + if phase == 'train': + loss.backward() + optimizer.step() + + # statistics + running_loss += loss.item() * inputs.size(0) + running_corrects += torch.sum(preds == labels.data) + if phase == 'train' and scheduler is not None: + scheduler.step() + + epoch_loss = running_loss / dataset_sizes[phase] + epoch_acc = running_corrects.double() / dataset_sizes[phase] + # epoch_loss = running_loss / p + # epoch_acc = running_corrects.double() / p + + print('{} Loss: {:.4f} Acc: {:.4f}'.format( + phase, epoch_loss, epoch_acc)) + + + # deep copy the model + if phase == 'val' and epoch_acc > best_acc: + best_acc = epoch_acc + best_model_wts = copy.deepcopy(model.state_dict()) + model.load_state_dict(best_model_wts) + path = './test_{}.pth'.format(epoch+1) + torch.save(model.state_dict(), path) + + time_elapsed = time.time() - since + print('Training complete in {:.0f}m {:.0f}s'.format( + time_elapsed // 60, time_elapsed % 60)) + print('Best val Acc: {:4f}'.format(best_acc)) + + # load best model weights + model.load_state_dict(best_model_wts) + path = './best.pth'.format(epoch + 1) + torch.save(model.state_dict(), path) + + if rerun == False: + since = time.time() + best_model_wts = copy.deepcopy(model.state_dict()) + best_acc = 0.0 + if type == 'activation': + savePth = './pth/'+name+'/ratio='+str(ratio)+'/Activation' + else: + savePth = './pth/'+name+'/ratio='+str(ratio)+'/ActivationWeight' + model.cuda() + for epoch in range(num_epochs): + print('Epoch {}/{}'.format(epoch+1, num_epochs)) + print('-' * 10) + print('the %d lr:%f'%(epoch+1,optimizer.param_groups[0]['lr'])) + # Each epoch has a training and validation phase + for phase in ['train', 'val']: + if phase == 'train': + model.train() # Set model to training mode + else: + print('val stage') + model.eval() # Set model to evaluate mode + running_loss = 0.0 + running_corrects = 0 + # Iterate over data. + i = 0 + # loss_a = 0 + # p = 0 + for data in dataloaders[phase]: + inputs,labels = data + inputs = inputs.cuda() + labels = labels.cuda() + # zero the parameter gradients + optimizer.zero_grad() + # forward + # track history if only in train + with torch.set_grad_enabled(phase == 'train'): + outputs = model(inputs) + _, preds = torch.max(outputs, 1) + loss = criterion(outputs, labels) + loss_a = loss.item() + print('[%d ,%5d] loss:%.3f'%(epoch+1,i+1,loss_a)) + # loss_a = 0 + i += 1 + # backward + optimize only if in training phase + if phase == 'train': + loss.backward() + optimizer.step() + # statistics + running_loss += loss.item() * inputs.size(0) + running_corrects += torch.sum(preds == labels.data) + if phase == 'train' and scheduler is not None: + scheduler.step() + epoch_loss = running_loss / dataset_sizes[phase] + epoch_acc = running_corrects.double() / dataset_sizes[phase] + # epoch_loss = running_loss / p + # epoch_acc = running_corrects.double() / p + print('{} Loss: {:.4f} Acc: {:.4f}'.format( + phase, epoch_loss, epoch_acc)) + + # deep copy the model + if phase == 'val' and epoch_acc > best_acc: + best_acc = epoch_acc + best_model_wts = copy.deepcopy(model.state_dict()) + model.load_state_dict(best_model_wts) + path = savePth+'/test_{}.pth'.format(epoch + 1) + torch.save(model.state_dict(), path) + WriteAccuracy(savePth, str((round(float(epoch_acc),4))*100) + '%-') + + time_elapsed = time.time() - since + print('Training complete in {:.0f}m {:.0f}s'.format( + time_elapsed // 60, time_elapsed % 60)) + print('Best val Acc: {:4f}'.format(best_acc)) + + # load best model weights + model.load_state_dict(best_model_wts) + path = savePth + '/best.pth' + torch.save(model.state_dict(), path) + return model diff --git "a/\350\257\264\346\230\216\346\226\207\346\241\243.docx" "b/\350\257\264\346\230\216\346\226\207\346\241\243.docx" new file mode 100644 index 0000000000000000000000000000000000000000..9e693ff6b282d51c23c6f4d05b9bd8fe5f5103ec GIT binary patch literal 31636 zcmeFYbx>XZ_9b|64ekVY2_9T7uEE{i-6dFn;7%YoK@;5FEx5ZoL4v!^;rrg}>gk@E zsbBR!Gu3meE?n8?aAfbb*IN67qAV0N76=vu4+4S6K>K8HdX5kv&^!zXgb9L&)Dd;C zcQvzjHBj|(G;`5o^0c!h&4q@f%>h9I@&Ethf8#4qn>1|Q!-6J$8}bq~S-&9DOk2=9 zm>X+GX!!sU!Qn1;EuyUNzpj+=9U}qnVgau-)#ns{am288ie4uGbt*04Dw24x(~%+n zm+S@m{Xn)f4mtZ5I-49y8+cQW)(%3sITVqv)|fJEk&|tT1b4_BNFi>9mC9NbTD=+u zD!=cTO9BONycLpkFuQvBsZrV9w+i>|(VL$#@Y_pe{F1&|x+?gYAeDJ_kU^~Ksc59o zouQIY6Xw>;VjRn`&W8PS6~lAG(wm@O;>tiKZX9q;?Z4*SROkG1d8dtpWuJQb+NUT7dPHt(7|;?zi01X z`x~7<=41#bI5h3AmmW5UI}T?>g@6WEePI3p^%cmm*HFwwLU!eaRSLJ_q>ZJ)?K!GBIPlKL=Cfk&jy72PknN22^@fP9Wb)@Am zmlv$(U!M6F79jNwj3kGrrV}T;T{0xxHj?zt2$I#_MYJKz-Rt-0Jhpm*zJ;_FuzJop zZXu`d-IzZQQApDb1x0FMM~oA|#@~f~AMM24r5C%|Ekd#)ryHL!p{&Wva#EYW{qDz} zlgc_fXDy}U16M2{-u{)Sc?{nV(Ojr1h5bWZZ1-BVUa#t5Vk29$HQhBMfh`L&RYtsV zDI&pH>*S$#UPn=1cnBlH4}@^$$Is@v7}_uU5qtPToTp@3wFa9Ggc*9mmq5GvpX(!k zgT9df_=Xo61bPpGhwyN4Hf8=-k(fG|xY+?c?X8*pr(hs}js`^k@BJuE?2-o%UF=lq zDX7k4D<3TUQ=S?Amj*n^26cKbc|C;>(#~m>cAd0%IQLzniPxT2sa{!!+i$^1FSFQa)H8 zU@qq&(A2qUE92?)_-OeJz}}O8N?G|NKq_=;nl2NttlH2S@#FWa=W??;mEf{-(d0x3 z{7F1*>j<2}0Rbh1Bo%^wCo=_quUL*F?(e5(PqZ4Iui`TVmVX?zyZy2v$Bp~bGuLvF z%3`5utWb>cF>~v`A~yY{_IFCyBRw$@p(S!Rf}YT%^Fpi1M}2PyCL~3y8m@_6P z9^SAZ%>KW|-s|oKw0CGAP_iKigbt+4Ke_eaXVtNuj>~dCw%@7TE7WjbAkYGFLp7=n|SEaJvUq(gq>Ag(#&g&Ki<#STU zL|XtQNCX){Bsqx?P#$}q-frJ^kbPVQTu)0$87yZ;s zsh7>3r|)rcn)S+bv{ZSypD1xZGTf&R8oX?^g}8Pp?hk4A$%8Pk8QK)pXcEMi%L`TL zWItG&^${>16?X0Y=apz%cd;PRMUW!6s(pN?R>L@a{Y$*Bwys8xU}r*XA?Z7YH-Zv6d?2y1DKnk7rLr!f^Q>rCxEQ?StY~<& z1YN$1E{BXNGnF6#RdY%LTC_51#!wrGt5_QbYYfj^tE#JvC!L;QFSY3qk5pI9)>oVP z;D$U$+R=_JLZ1gSkV3IU^oJbo*TT-CAqioI$q-I#6LKC4^(p)3DtX@lZ-RLb|D}&3 z!Vj?}1Le+O0fkg;gn{m#n&NFsshhh$XTU~PpG?~gX!eE+ZhVl`oI4MkBWrW3O1x`oVHW-hKZCQa25Yh0_Ac8QdYn~E0#;$VlacN-OFSifviU{Agcqd^`;hM7$%oVnevxwrZbYqU1HBLHcX)=Q*iC1SWa(PoY`%6oWQSDQ2?H z$5|_^SEq_giN1PpX0ASjtxbfPY;t)c_Dag}V8ifH+35c0YAoS;mW^N=^04g_EcJVG zQT-%o`Hg2bO0fiEDilEhCN|TxRAiEMN2XG4o#nFU@eHKcw36kq7aj|fj%)9{Yr|13 z-?_V|teR(xb$95O=aRZTp?1qsT^oMijn95fPNyb{GmTZc>%KgkCxmdF=~j5`Jk!vn z3z2Fdzmno_GDUWVj2kg68LEG`z#m<{eJNUVZ0lo$O$)r@>lr)a7$35v45fHpFSrZ> z#i2tZa&AD8iYq2Do;78YgEEnr319urZdAg$y#Fd6T}N)ClJXNibV((6DH~FB4rslW z-QR+^8cD^;v21SKgWW_oLlTTC~bA*bk7*&8(T{mm1R!%5bR=~*J}DRvyt!4 zRDL^lC7Q|QW2GP0t>JT0qJ~Qz$cR*NeQ;C$4YSQ^@zAvKKkP5%_y9zLQSZ#aF&3b{$ zbFauWV>%Ui7);vQ*oFS~0}?m2hC`cP`=RcmhObYZe#=zj#X!@8!u}L)S?nz=Cdr`r zMQ~@O-}!khqhjz57f3fow0ECbc zORHIC{j55C1ZvPyIXP1t5ux7!a zocsXdeSi{G6&pp^>#O&%p2@YsZ>&;#&0?bHbBSt{FURO#5}=mY)AynH*gq%d!@pSQ zFsW9m_0@l@xu8(|0Sdofvc4ST6{A)D7Ck<9+O&DAhC+!= zp+U=tPX+Q~aT%ccPO6+>eRN3+MI+%?^l_LtRw+pcZRMhs@4e?y-Tw0*;tOS&o%@Vl zmcW7}GgD>b7)HU@>6uVaMHO^Nm&n51jkC>>x6A42=xR&33(j05F_ zw==cjsZn@v*O2Bu+D zD|0HCKjjp{r(#k;oH&ZXxM|Qd-gT2QTCwLubVi{FUO#G-gtsI8>HU)jb(bDJXK zd>dkXAddRcjg=(u>*>Lhl$vSvYaQN%FiVWnEgeY4<<|P>`eLG))>$UT!|8pSf@Td7 zqTlw&i5M^LcyAB7^&d}hf{g7N!OEsmMs(p>Fi1y)GGFQVlv}mOkqBkHFqNNkh-wY* zi%swCB;#0-4Y%i^US_B6$--dt(fZ7-AfK}hexrA8r(@sVb~%npo(l>9RX%wPzII zX%9*tOoSxsy6&MAiBdFaDk%QdHq9%8V&3DWq6v3i{YFiPVRv z!my+>eK&XS!jYzk8@|S(D?c|<_cc8UpV!#S7_??v{Vlvy6w2+1B(W(hm^!uNF?Uo> z)L_=@Jh5{T&x$j0A8z;goCMqg7tIN{1BV7~)%MY^wo%8b-Qp8kS!@B^Y59q0_LRb7Vx3zG^ zIcHiTH+a)Zu)epGe2QdBLg)k-FqRb!`pdsgdvPLY?h9tK?$}(zmL$ZE(axEQ{9SrqZ*T>5xhsiI{HOo>l z*tD=%pKNkuVHOo9BdJ6M@{EWSm2skZ9!;r`(BM=)V95M3Wo#9yNTn)>AntTw)kT@y zDfFZhHQyW)V4OhYm4N#MVez%du%YAt#UQ-q=QnYU$bj5pql1a9N4wC~R;whMgkq&M z1`S8Iv$02Z_GEK~ZX&5u-$$IpGxR5u08+8b(qF2P!fBn4`*(h>uVM6{g`UgqjY)m1 zaj5Vhzn_t35?Gd5PAz$(BbLRPJ*T;?^wbMDK-4tGUf)v3Cuf z5jV2JI*ynPg51$YNt&~P`u{79e45J9f=Ll zds>j0RpgJ4eK)&9T7aiTVGQITl1QBvtq^Hdg5#sm#PZEqXwwW#jnt(jrbVE z{zy};?l2R*GlokOLv}1uzLs+~8j#B;&xNZ_3&%&yUm&Ifc;bz!jXNPWR$o7(fRhF= zy2VIIIA~ZIvr#ijx(B~VMdbQxP}j5AdahgLoH!?;apwldBQVi~y?l`Ah-B9kBQHH= z8;tj2^Q1*g{9#45@6oVjt%M<-nzGrTq~D-76#4$sV;&Q0-GCVnIgJG*k98@d>800F z?9cDol$sIaGF=MTJTm#v-B8)wI+WsqiDr-_UNwxV6wqTz63pyM^mD8lQ-=3~_gu-i3B<93f%lN8b*-B^275I`XTOW4y zE|5P>E@nttH+XkQz`c^F_kG)|eMS5Ilo!lsQUBxQj8+p0&AX0#5I{e$v zCA43L4EP1%qAV!_Dj$1)0K7qPl+tnmfiPU(o)By1B%UA;*Q2zUu&Ss2-%KP|)!DY! zak<(TR=@%9qNJrzPET#krw*j|+JX}K3)5X=JXv1Af84Y5`daTn_jC84S?_sW&7lHY zZ2o$7SA-PJUgx30QlDKo9@>i76xNo zMwhyVq~m$<@$>xJqwUSSqB>8g^xyGS?AuTEni$0{ok!1=2hYLPq^$4(0rK#8$`!_3 zuiF|G9WM{TcYgP~DJd`RtUnl~kReHPFd^xGewQD}O(g|E{vtD!`}FZA3^W)7-5(B8 zwaw_v)kFgYVxb2_6u@&7LqMS6(LgFzIYCAm3xlAQ#X$7QM6Iz8vL}d^hv={An}RKVtdo%*u#M^r*jO z4`iYJaybeV35F`8{X8iI{Dc%~)v9mxiA(+an5D4|4U(xa5Ni+!Uhxy@rf(H^BX`t1 zo%_77(!7EAacAe3in3N6obdqF z1!VYXMdMKZ2Wb1ShPdP^XzceMwy^6DFR(>sAe(3uFc_fS*OFnea?gM za)0f$jL{y@>2f;0CZsLhCl$*#Q^Yj!Eb>cI-Yq@uH<15*yvWj za*)QALu2SDU6heN-ex7z^nKucnv^v7d~SBQJWze&yG-~H#AbTMIoJFo7n2uf_B$f@ zy2<+Jv8ng#7;M6Vsm$@-@k|5S$1foGUW&M=hHltSF_n#i@BuCu=VG~G!J8Dt3!A&UsQq;!uxomjHABU0d zK;_tsTDDw`dOz&gUSbpFSd&xeVPlGc%*LXKYmJCUOJ)VM#x3rn89Fi$nTNA z;7QbgJfsDK=nFw&SP(#klSBOc9&x7h5x@dSpac=YV(7rO^cXpA9zx9fHe3qttM=NQ zcO$1Q#$W$plzOSXm;FfqJwq1f6m(yYHUDHNKA75ZW6(lwh^x2IGbx7`Elv&`g1uYaRKWan7vFMPD~qD6 zERr{=c|Mw}wLXxN!AgMy z9{^GShcqqZ`kb~qb-$y!_rO|lS~h5d!e#bN7PE-M-W8IIGKRBgp*qUi;>+{1A00d7 zy-yiv4l8BU)P7ksSh+di3i;Gx&s_Zd+oI1;p24OCN}oX}gJBL$mySILTS4ZP+wA-3 z2;)b?+ZH?H@c>$6&3eP-;+dr2?IkbR&x;g;xLaOG_aY{yY{5vZar2Vi>9W8Dild6E={EH+Owr=!>#TywYyr`={#OQ0$=S}Nq|9$n_(3&_#`C82bY`&~Yx>DM-WPHEVViDYNnM21@zd_UA59D4IiTKI0`rZsF=g*msI%Ney`i9RBU;0d)9;U`_ zD%CUi6?Jg!^zd62u=2oeq+q(hT1D3C-><`5H;Ln31~P3@1BVXx&(zf!PXR(w=(?)% zO&x4Q#B*i$RO(+1R?f41UzOb>yFL$NF=cm6R9)jT?Y(?_^%ip3z&-scgBcwbcPL)Y zV1gri5GS%g8fOeiU$RipHS93sdv@&qRKIPi5TP@s#>J3g%O78-FZJE8&D{v+e%0v~ z&*$lBOa5u`rb@NQsmI{x-fj0Gta}%$1d>A)0n$nW{1N-{;#9+b{_cGIU&8y4-T*wQQgQGRx)UR%F6Qa@m`EOf?{q)KCoAt|U?wAL?(nxNh zh}H@&khL#1X9cK&EN(K!uZBNwTRp~4*JUqXz@7j~g=6HX^^GLbtUC5wBh=jtePi~8 zqVt7IQ}lczFLCc7wAC1!=E!jYY`cN~S;<@|5N3%EyomsSj=o!R7T5(MT0jIH2wgk? z1piYCKt=M;s_y_)1VM;u?Y@VJ5Pkzg(i{wkkv>((SnD_N2RMrgJY@vXI1~XABu91j z6QD0(GDt<%5O6LYfX(0FCz@OQkL;#tOk)}kU;e{hkjtCtqbXF8AP>=jqdqBbbf`9) z>43GU&^tQee!*2}@MckB2M!JqT2-+sm8#FI<`loAtT)hP{-n(pm#OzF`Y+?p-v8n$ z7{b~m>J=Z}9WPghdWK4kSC@~BG#_#PJ;AEhHTiH`;Zw@!2!4_bhatxY_9zEQe;ipy z^tdxsz{-q!nZb{*(QRD2sL!(9Re&n~jJBDGAUj}R$*ik{wn|Uujy=Edgng7P&s3q@ zmG>TP0uSg*$A_~eW63>AJg^-PETiZ(f*uXAb7$%ZTc3>7{zGoI$XXt`%*|z_>GJUj-t*79XlLveTJ=%Y|2QdRa_Eh?NGrH_R4Dsg zS)E&)+sZb-;CnS<$nq|wZ>Gh=;MA^fop!8wVPTiT-!ygPfJ(fcjO&%*K0Gax(a^uS zc)*n9`+MCgF9|NXk3Xp2C@q*00(;MR#bmtt)2RG~ffG1|8I|b%Zrk-wz7345Upw#b zHefdRVL5C4sW>sU&0X)C+Iu^H(XwHoxn}3mf@jW&%z0ftmIjgINr^&BuRkl= z%XyZXm5tqJ)5Ae;G(}Z~RV2i?Mi z8b9X#bknymVpS7SbTen?V-;Id^-Jr{6}HH{%eN|UZi6$`sIrKQ!oyri>87uKxPGEt z^Z1z2gs!xb{lvVT;|WOr0h*jW-6$~y&hCN#kfmKNiByb-=U!ve1iI!xdrfQrU_#tC zBgLv2Fj-_EAbc?uwfY7mz8{^1ei|*mI)KUhQPZ{CkR-sAaY)YmK3g-HX$o!pK@W6%0GJn3ar3yHoV%a5g(U9`qT;rv@U{mMrk`>EG&sZMc>mXfuYBA|+FRK|QL2N$ zX=sX<$znY4<~X?5K5a%^w=}ATYf-tsrVAA~b?#$%Y=&wNJmOj`m{IO9=_Td2y3G_S z7{<{`rFXDGT1j(sAZv>kfPtVhk-KxxR%a*JITP1Qhq0+`)^?PdcpJihV2HLUfohMO z>k2m+ck#Ra%bsR7J-y#! zWL22n8DvL9DsOptczJm#(WsCXE(?2w{}r~rj3}4!*y~Z^^}4Enf8=|V_1V;KBEDe- z1HSo2qHgBhGA^dic|xmJAayu3MQNR>-~EisSewX$f<5!_@=Khy_BR~_DsL{lvBN2mxsNl zrwAF7+wZcZdLzJrj$CAl{maL(+v@^6 zn&IZ#wq-KarS(dW-uU@lBe}v!^<<^o?fMa%y}0zdkPqw>9rTPJn325`5)BO29w42t zaQ!*$RtGhDHRbY-p+r7@@8;)FiGrC81Y5&fO7_7*1};6r4bY%T-Pg_db$GaX#1|M3 z#B*6}cOCTW?Q9 zDNHt1uQPt0{>YQHG^#V$dT{@2FAfW{Pu(DxAN9rR-sMPR-DcC=Iw~dO9lpS2gdm>^ zB@Q584#WfOu_;1TW8d37k8`a*x%h-;U{=c9NdM>=yUXMQ{PoxXGZz;|1fj`R&mb?T z_KhdTv54x|;ANp9gzv*gx2`d1%hK>r(%=9QNUW0Y%Ptq!4Sq+~JaVO8(-HhU{KF1X z@dz_p$|{<+N*~OU&9G z0v_d#_4-dS&Tsm+v)Wea&zbMHOt+J#*;9F00h+Gabfwn(uO%MxRne_S?_+D~q^Bh$ z(C~yHYqM5U6ezDPZg+o2VF1F69N~{f-^Os{J_K~FFt~%hF#1LGO|a3ftxE0(f&k@0 z2Ey)P0Wc^h9i$p{@*;W#&<#b_s5ek*MgaHyIS~>=g(L-n@ctiza@K1e9cTOgz`cpX zYk8#mtIcADY%&>8pereOSJ&2D+G+9Fr@vdWTy~|-$3ZuAsOqHF(c`_)?CQ5K?VHE* za{}u9wZ{wPBjSb`uLEZ=i#v#~3+73^iB@GzX45BQShZMc0iBH)Im&%(Y;5ffhnrQ% zw5!D9BrkVGjWf#r>>ZyH3Xz&q$a>a0`k6eRv~ z^*v)(Xf_Qs=jrKRupDPUL71vSK$1hwlmH%v3(-rQT<=to&h(I4N;aWj1EY}d9VrHy zVlt@jlokd}Kik35l2(aHCbJfeZ#w~RYE&)73|Y%dy#B|TD7L3$>Yo7NhgvG|?xL@_ zW#J3ZIgRqr0Q-yu@xN%@3HUh_K3goj?RK2Y}|u5Fn9wX+YT1_aN}4aDX?E zRfjET??4clm;n(7pFuhIq+kJvpLo}LVXy%KV?eIC3W6p+qvd1)iTJ=I1_cK11Fevk z1OzlyMFkMOgfDI`1Zj>7BzFt54X86s14qyl7J>@ia#KEXhkJyP>G0OuOuzIy%3SN# z-laPAs^mDDf80)Gl!4X7wlD%I+P45djU=;zn($u*0yL6~@kT5JE~l+WD3baZeHiI= z`&|5&V&AC3TPcywUDy?5m~4`Chms35HEH3Y$>INah~FpD_zz5TuY3IXwTM42%YV3` zbmmR6M!)j@Y=P#UsyT$gX1c`I%5x4(Y6gA9hl`1(FgW%wc2TfioW+$4h;iP$%GQyc z1M90txtbD06;&F*v3!R%aoWBhu%1D6BJ|()CBN{PWXcw0-2Q~zlTzIzzKtcf{C$#( zhzR>;ygd(DYMJ%Lv%9IGh~kKgAzg~a`(~@LwEt$y{pQb0KmLmFXgr-Fgjp|4{rQ^C zq|IS9F?IwMN6j9A!_6yv(0=w1ui(7JcAoLuy1(^qP#4|+4a3CD!Mm5tx;RLUPaO+( zT36%iWjMGcfi%#cfI?ENiWg)fh;EQeRgO#X-yOnsMygL5Q<3Ql12bmwU1zvBq?}&t zhBr}pd>VCZ$RbAmJ7K_}S?y$Z@NwRz!P7Z2;W}UbV%-zwOoonjPmIV{V8>DHAuPbY{fXS1N$qKPK8A{vGRY`aHwQr`f6{o zJM@j6oj+ecc`!p?*)@ICi0=_4oCR<8xVqGlEV&!g3QURssj2i;^zzvIchM=Vi#%WW?05W(_?k`o&F26gu^x*(Yd~GJs00IMM&82sl&pf zup^2YhbL+if7Bq>tsP{8Sv|||qt9;hj%}1*wNuu0we<(8KdY9GtLdzhFxCS^)WZ3F zw}$fo;5PJ90Y)E_~zm5J(v*KqiMq2tt#sw$i z6{E#|@UHsR2+R*vi@{y~rOqzlQ5Ecoo2Bx?mk}hyU8KUc`52#)4IxCM+^vu;)X9=K zLRR(8udMeRMf#i+9X<>&yt(muCoi5KzBc$h#_$#S2I6enCJziBsq>)% zok(+Ruw8uQ+h67!^ge`!e>~-`{H9LXVd(lXEXAaUPo3v}Zh|wh;P@OH<_8gN^(51i z&@YOwokT*LM$YEU_36DxQ#0u=GEjl4q;ZrG&+U1~2RuH&93u9T);|w|Ued_=cq%!=U+v9dDUZ-hV zPj?}EJt;t#qkRJu_*`ee#|p;J1IUmvZ(?2;-Rw=wOM>TH9uJKqM*$#=045Z4Uol{S zmito#VDgCWs)&H9#{;ZSV_GFNfcDb^SXXQ)S@2&E?p*ZMq6IoF5(!&S00#7xPZ5l+ z+jb0znSl4UIxb(}m0@GjedDzOwL@Csu!y& z7nWat*6I|5faV~<9|0{o?GmnuNtUzTgy-bkviX&;-S79jea#|dV5dd4Z!&y?;!@QK zF1uE>^95p_A9Z@^zPG&OoH+4JOUdxpF)v#c_Ha6Mhb$ZgOKzWrFOj3Id1+lNp3Sq1 z3riGK4z-{p#%ShO$SPxJIRdnM9<$L59WR}|Xw0v{oYiv)_q!RdnClTgg%CbQx0psd zg=%e{_g#GTgsk!mI2R!L2VEcF%A2>jRhL^<^PV%pVx=3VcQFZ>wdBnEPm%EXqfHoT zwM(E~1#mGv=ylzG_x!t~H5HEEr{WIFJ(^#euD`kvMp~qY7R6;`vOErU^-o->9Ip;P z!dctVob%_L5TmkBT2(V^A6 zwMkVj@~Kwip~Nbi?2;OnC5D_ke3P=p&fZ!5H+_QvhQ!SbCYThSF`)&$+o7<7({`Nf z@ES$**Rz{U5Qyk4vB4}MhKa5mQn^P(*|#uRPH8mp|5t?xBfbEH{Vt;r7) zQ!uOHP`u5YFR(a1px$}5tmP0TnM``Gr6jUH>#10?ejhbj+nxM^5c?jd!&|XplT)OR z8w0o!9jpqv2cIFnHYQoP9Bv<2qpJ6%+NOo^YmJbrTy1cfX42j{U%b|oP`n&1okx;T z6$wGKmEh^RQAJN$jC_tW5Vk!TjRI z63-d5YLA;)6{d{DLr&;KkgNfAIq3HJ@T08~e;AiLKbZ9?iN>h5TqiImck{tl5~vb- zozvyhci*`2)?PU4B^?@TD*nyDU2NJK%bW8}P1L>02Pc-@6}gv&*pi=ef2F>}v*YB` zD$D*u?Rj@drZ+gQmC?5g0K!XA+iD%2cAxY<{S3zoCyE2lXD+PSfzV;do(<)V3KSp&ZzO>+I^1 z72`debmZS3n0TcvC^pi6UG26$1V&JeA@cyjU@APITtPcuMUoT;;CGY&D@%Y5wFN-Q z3V^L-YN9|EASi(nge{Blz8XNYd<0;an&t}K4X6XD1duADwBrCQ;Cxa8aAq}y4i5zk zR;3{y1ejq(S13b~s{_Gl=>KSgtVS>Ph(h^v!A09$p}kVN>B4p4a>5qw4FxR{X^)6c zT2epZp?^|nZ1rXgq3BkdZ0j1g2yXRz8{3o7c>j!#rht8}odPOUFYTjjl8YRmh>anzgpGOm^gNqRWen87{`xlE7<^PApEqZK<(RtKs}EXh7(LqLMvWvN zjeZWq`9aqLj+)O_%!w*niFVb?LE3wTzvg2^H*dxQZ>C&kmL`d^L>KR4({+icy}7AZiD=;? zEd-%+Tbu1t+DC=P=e#G&ChzC0v9P|asNL9asoWtUoAtr^!Wg{pilxR@o8g3IDTA9@ zexea_Im#wFHve$SFO!T070Z21R%b3f`(M^-cV2mz?Zed@U6BpN5FnAuiu7x0suQ>m z=uaFA@FhOmF1(H4p2&CDlJGAjeF8Ct@)t1Tn_F_5Tr9a;2Ic) zF<7hs%rzes;<;G!;R=Er7&yd<25hbR@{CI811fJ6AUjrt43lVR6LbKPk>?{Gh5cXu zaN=~)xAD;094(KGmj-;)23QKS_nqRG!eCMW47${~oY7>RW~rJ6+`tNqd(WvNdla6; z-~j74dvCLtkn@7bs?L9i`tvqOm zq0w?O1Kd@C`3t%UYeWf?lk9_`X^sFS)_C7WF$1zB;A1e%G1Hlf%1Sxw+g7b^ z2u}WJGm6GJhyVcz16&RsxozNbFdCcaCA%THcQg}RS(f(ybH7_4%9}6na{*@*?q4dt zu;)$`nUpj;z)ww1kS_@!w{a+MMv(&C=P(Fhz~lxf_}LZ^#G6wOjGz)jP*Z_vlqw+c zRoaly052(k5s1*bi^Kn~2R{h3playN!jV9NAjIJ(yiWyn-LMb*09%jXKX?61+I4o9 zJpt+NROfc5*xDxN*RajTG2J;7WD>Ac;EmQ+LeNaAX$>Azn?31`4a(3W~i-D(>A4;ap~yP33~ zey{O(E-D!l9`;8lBcWE8FXy0RZ&Q;>Y=2NG#M;?4Xmaj5iWC|qJ*9OR8Ik*W843$G zew~3bY&^<5UbFaKG?de>!p`=7$gtIvD|cslZCwt8>1jtT(cc&}#zxN0=mtMc|0CDG zVXI;9b{7Gz@20(gfF*qMXN9;w#XkENH2a?{y?hjCTzptn8Gk?o^d3mR0WzL?<6%UA z7*ILOMu4^Ywi>1?FFQ+tGFK+!-?`aFa#Ac?=wl;`zdt7MH?Fdm1batH2U1%Ruy|yg zW_Q;?c*uJfACJPRxbNdUl3;^;&ufmdb&|8|p|Ao2!MXr20!*%;xg64xYm&0mp3;PLJ7!nQGBi377h!WML8ke0U86*@0545GPvEu!J~nE z!io((nN0x-g#HbINjq27@v^-&$|di0KZl>aU1vr3$o~;zT>k#aSg>d7jemG)^;&3BBfWapJ%b=S`damxI9_7Pp(lB@Y{- zAkrLo2<_9_fRb|Z)mRZQ7z*0*&3KbQ5u__`j%T9O3M?GUc_s7%2Ppps2R8s5#K3;+ zI(&aSrXiBjtITVQ&I#B8iXCsIHkFRhc&IIBPQ7vTTNDJjL4O~#M?LB8;K{yI*qp1 zZOIc4liGF7)W1({k8^y{DEu-pbFPV~blrMj%f&QSkuvuSD;~}%E*Zk^?ayai9_w4L zC(Yz~U6~+fxU%1Fc302;_inZblXKI;YpPd4)iBKy%-0qI&#dc3Xn;{V&b}k8z|eD( zF3+htv5oBN{MR_X1P|Hc`WIwvL)ybC&{v(id$muPM~(PDJi*LKIGB_TJJjz>EPaWi zTNtYayuJ1OS#EG(<4vzRcf+)LSKvpU^RV+rSC@-GQBO4*&%l3RaTkW%M!v4V>-P~{=5;hM zJ0bqAkud3dCFig3-y4Jblk7qH@8xBv|8F6Yf~#!c+w`~H7-Wy517WCi9y|4OXrL}A z9*xoIVS84CVWBXJP$3RAl8oMb5=e|bHLm`FpiTP+{^T%9GRP&Rw^DNb49MSwh_`rR zN=o(kf3;)jmcxELFP0Ux^D41f1oRx8x4t_KY_A!(X)l6`fl(qAV2mA9QqE$Z{}yWf z7RnkL2f5CasHQ5zG0a{YWVcqc<%ad8^r|BPs2fjs_yZSQ@vkN#WlJ<^T3Q_^pC z9bCNlvAiu^PT1c(-M#Acr0SvJY(||FS!1ActBF5dZQ>)^5$Ix=c;9m$%UFFG;+a{| zzOoU`ct()^3QTlkHV=CTp|fSkG;UwYT6nSG<;+{+h@g!zb2R(zzGj}b0LvziTO7$I zv*e#g`CGrRxGndK5&#H>!{pOEa!bJM*m{T{f}>jX518Hlrbfg9BpB731OtqnX$pWC z{cSNo+q3MCY3n67?fr}ktaT9x{;5t4p?&*y%T7E2s$|q7ZW{(9bm}-pb%{8bf?nbb z;FDw;diJmuzjS#$4tM`CXyp7tO3JlS$qv8$(>XOC|4Eu*wUqT?u16hU#ppewEPlPU-1Ej?T{^e%`u0DG;1fjvrTd?i9T)A?}5 zMJc_~@wy%}?UuO31}e(;-RIsfrB<5y=bJvs*F2c~0rWGn#W*pmp)Voejn2Z=%=n_q zCel!bEtTZ-rk)4JBOGJ-#P|pnupuKb`~Ut#tR~a9Bl+d65AM88e7e*$#u|n7-`4m( zs8wAZK#2hhNl8QBI;3-2bSoex(tru$w46pR8EK9Rzz;Ls`P1J0uYdTjS>x;osf4I2 zFl6<4Ym}FPk$XV_;DEU%;M!5ZRbgr9B}D4N)dEIa2m9d#N#mgiE-e+fD&o9%vv}AA zpi8nRq$L*}-OCRNZ#n_K;d_@_%9I73k;*?kQofrK;4(UBU8$`V!`mieg3^w&A;c5X z0du~<4T<=z<6PcsyH~5#rxLm{nz^&xXH2l{)t70%68RWmG|B_ZafHZ)4`Kr;8PYwlc}_&qG-J zwdOa#8pi0R%JXCR(bQm(*$`v%fAVw$1Jvo zzf`d^Qm!C2a9uSgM1c8y5RlVA^ZyMr@q!u=88-w#F9WLTYmBSLD)vzdR`m}mN*-p3 zn2u%z*R$PDHH}qC^g5bP0dp!90L?hQ0DL*h$h!c)?`}-GZi#*oaY}k`Y5Un4DA;i{ zDq!Kyzmk@VsGr0b1aS|2RbqUx_yZd7x_}w=`H3Iz2B3jJkj21^^4A&+{M)i+96;Ot zWA2mY@B%Ln^#VXZ_L~BZ{$O|L$bUWfUjnyS@D9*Sv?2j>ge5@kO5Fkj)d>)=UeLC$ znyLwfuW8rK(2%srKCIOvd&S=2-+mXFX!MRu^7qJY$Q^E0Yy zNr?(slb>Xw(GmP1UW=_6{40c{rJ4TTzx=a)e?yj(WTl0Cg@QboS;kop;qEU)-K zQpxszkV+V~uE5D0CWZ|q(g13S0t7(sq}uJ+|Iyx8M%9%x?c(n44#C~s-Q6WP1a~JS zxCRaG?he5T?ykXIg1g?6%*>ma%=>*m?!UX%g|+rMyZ3%-_d2_}x~jUmYDZx#b>iYC zLw@(>ZS@Z${=OfION|_0*fK|CjGywX%_qIh`*t$bLX3!@?Be%Te>Ek9)-8@KQ%=V%h&QIG*Kj*}5KoC| z5utSO{CIhFg7BRGJyr`ext-esUih3ueM!VTnl~ayF$KVt1YowF1!%Wfayy{8MCP${ zucfUv6aW{O5Q@&c`S8d5udM5G&jGZ}D0QE5-Gh?mJ-{RVp{xCF@p_zYI2unFz*gs) z#MaW#6XgIt1*l0fiH(HESoIX-!{Rj<6)UYSfUt!DAxMptc>a(gSqR{tDy(pajPqv)abI)%|5)HBb~Dzv z6Oe4OTq<&l$K)QDGq~Q8*mW|$4KK`CZrmx4rk5$_vvmiob!|!fw2(ppqwfC=3n@Ju zSzrjTFM~^m-`R_I(B-S9n|2dVROaD{bj8R=M~9@!3a_#V%SGhx7*8L=Hc{hXi#h0P zE?*TY&itCYaJ3{SoE;m)oQ+)oI?EG2KU2Fj36=@)`*>u4(&8=-5KCmod$4b~+}ee0 z{dTTH9NI9bOqj}?HcC=!3K7t$0DjP)8uv6MK=t_9UV2;`IHfH1=lmfN0uX9gbIpKQ zjaiXkPc(|3qXF_<#1WtW+a$K1u}pD`K5PvF6DTn7?I7|As zDu3*DnG{j^i#b`C@8Rby+D0v^-6f2U(_tp&TA3Em$y*5YZ$w~aZI!qV8J^Zc3CXMZ zQL>lEa~8L-K9C%=acH_8h3F+)vw$Ys?8318`QE33;71uZRjM z{x;PBWRWRtAi8Vc$~Ueuw(hpaOlEMz@?@XU>!l+J`pZwa5MG`vjx^GAz%OGjT^YHf zV|@j?N}0Rt?Ibrhv5@$vV-58e9gcp3Y3!v7w!9q5n6GY*Eggy=r-(7=7Mok4=W4p2 z0jvht{NZaLYX}%NAx3B?CAf)1K|ch#nm2`#OU@5>E?}%}mf*UW^i+-gP=m%%rb>T= zfBjUnR*Fbzf8PF^`n00P@v|A#$32E$?uUNP%tKRp+6~Tygzhat)oQ-Ig}e${!#|IZ zWcgj#Y%P)Rdsmy^hC83%k5DY-C%SvTjpy7e_OLa9sMe$HwKxcbvr5JL`GTF@bspe3 zIxvh!ngb>xa;UrvKui!tX1p>rg#gMAKN?Xmz^4XK`5pVqlkfg-8}D8dB42Tys2PR_HtUOibrtk#&`in9n*PDDpV*s+{zZ^%*%m zIiZecidOzDu5h(YS+n=v2*+L;ms6eB{ZJc}PH+sf=w_l`yJ`&`)}u+tW|rw1!4)k0 z@>{$f5q~K-of|mj$D!TAOkD_E-jB$2AH7G|1&0y1GT)(vk6?cF*CRp23U5ZvMqba6 z`LG!iK$^|b-78(JN+S-`+kpN-vZ##H6ArqCbNYuGKqK=GZJw2LrF7R+KxaPH(;vetYW2_4xY$9ZQy^AW zJR7y=&aj_n*}Rt%I&k#g#}?EEo!++11Ys*qInSTj7A_tfKA=-t&dr@&E4LiYu(g5& zV)bsZIS-brwe{L2#UOX-h`Ud_JX{aqW#ohSq?A7jd4kpa+c^L zXHm}*qrqT6AmHinwBNYZA018j%xvJLXA9dSq<-=}+Tiu^IQx3Ma;?MXA`*MN#@)tR zoO9m7=ke(}e6KRY_jF_LPbuORlB?U_A9V3ahIQJ@fS ziA>@42@u(?BC$I={f`7!qTGrGFeuh%58jKrGvuj6!F3)PRy~dVQgvAJR6$ zf!ysqU=42p@65KevFjafI}rWI$6S0RTr~Nuw|j@kpJ_WWjno(l{;eDG)bW=O-qKju z4oCDg+(6iJ%z}^Ftogz^lqkN*$xtwU)(wlz#x79o#i!jO_*7#MofHX3)@H3R1$d(3 zeMH?_+wAMvq1&qw4=G*Pm>CE}6zXn8IT!^L;C%^N!V;o2hO)h-=2JzA4 zlOFSpFp^wEni8H_b!K%nl#=0hK)M;BD^!xlZbYInms@-=Hn&Xg2D@-p69gnKmiz)z z;80%zb9b;TDUVcf?Jye^lZdD>)(K%RTw%DVP9))@8*>WYWD9l$DFawB^~C^+WnECu zvLRnM>^fB&V|&`S+5;zD{@Z7Z$>n4j*de5Xm#k}8N5s?1g`12io9K_mbO|KX0Eyfq zhhh0RJo^O~gs#jaGkMEJob9!37#o!tl8^{=O-G#U#;su#$k66Jt04rC)^dR}jVSAF zA;|?NRdunFZ64v$418}w${XQxDewvDgRKmM15%g>jWs?N!|{S|OrQ!X``bIQvlkSI zjoLHr9}y<|4W^yDSXTo#ahL=dq*KCUcl`{ew$t~*g?Dpenkh|2NcI#5^6JAqf$H2x za_dB!$Gyd&w)ptJ+NTzm1c%lN9ES_dDhUru$i^wM?hRWu`Gd?hP;O+B5}X7Yk|a`< zqJyw&;4@kaf2K^EsZwf8>zuX-hKrkY^f*^lfda~i3h%aNqAy|1h?qM(*lNl5oQ0h+ zo&yJ_dgLc#4nH6(fk3(LU56%{85{d3Hr<&+JiAw;Na$1Eg7Z;nn zB5%fMV~LT>)!KGC43+B^5A*rvZKvc%k2@Zc9+`0B2xaa?xJp7IyzxtmH65oPI0*LEP+8;#DnX&~pz0a6fV>`E+@^T)x6*LtE?W%ig{2*0A|qKft{B*XI0R*^dp|;=dgN5=QF6|1Ghflc}?_ zrLDQskNL1pW7&3@6UoQG@U?5=lDolVkYk3*+eVqp2AO=>Tsnu&mQtCBibuZswf(Ek zv930m-Zpvm?noLof_czYaI_n`jZJmT$66_*4CfCRW?T$V%d!EfM`~VJb$P;Ng7GZ8 z&^MY8gbJ@uTVHKytCh1TbOgJ$n}*&96|3@{IGgBi!VIG78$+z8gu_tXp{2?P_SmR2 zI_b|!eu_}cxk)ct{tg)xq!1upRh?_0L58V&Q9ngt+^!#o*W$M-+}c;krghd@%&5|5 zEMW-4WKSWZEHkeDOpoiI|7P*U3UxWmvrTWor`Od@7Yz}k5?TRwCSXrfnlmfx0SKCh zGb$6A?~WgM%{Dv;X5?#y7QRGAYb24U_{6x>IeS$5*1rD^a&(Vft`u+NH;ub7RH=uQ z8SVW56A);fCi+S>e*QH^dPQ+!ya78{v9Qzg? z5XeH9^mmvQQ{qsoRhw|6G0$lF4i$KC!K3rV!Z6;Erjb-^XApLZC}aM)D!a%t6C zA#6#wi4#`59+RMpmNVaP0XYtw1C=C>+=Tr#=QNMG$W5j0g05(RD z1^4g?wFY%F>2LNp3=z56K&pafRP%z7GNvZKbsq{FuYv`MVE@`d*i{MADeLtD!{^#o zc@9H5K=O_LbZaDSF)S7k=6W0Aq%Qc_mdaA2Zw!n8<-AQ^hmY)Se(_G{q3uaH&VRLq|ot}99^^{DG#VzU)E(0#`DxBuC7-GrFNsfS|IJA@L!o`8RkoPntLWY+E5ndx34yf5rt0cANt z`$0>_CJhmZDjc$eq@`MZ$f;1=i1+1v4@T#5cL}yr>ZWwNMMGdh!9>5Isf>PKdBrn6ZvRiwG_Uv{ZvG#;3H(Bxv*dNFsge%w^ z%wnuSt1j(Jzxi(RWnp7+p9^j@y=HeI7+>5e)W%t-X;e z8O9TxUg|GVdOyJa=_|!j~i6$?Hz#?H!^evPSW=XnC6gA z$paKZiA8;|kAnVTTk7lja5eS#TRiT?>j9K^bc%5~!fy8ORt9FOrVDoUz4A(#%5??I zC}V_eqg)8EQ%n3%!hM^z!KhhRG!{3o?cahHigv@5ZS-a}oh=wZz#2CdTsm3MYhru& z!CaamFDAmFUQQ{R$#ypzY+S=h>2i*wQp{aR_+gKGwlP|YbTBNRGbT4kY!od=6@FD> z&3toi&v`e0UH+=&{6P3$1>?|7zhM&~Kkf)1Ej$|Fj-MG%-Av!A{7p6<$IIC*Gr|R( zg5D6qt>7+OZeobXfe20J2rk3xG`7%AhcZf`k&#zC9!dD+NhRSQPSdw&K21D^)L9tl zby%Q(@AL>Y>zb14CSe4Tv7Cdw*}A_ujuqp#S8)u4z$gPAuSm)i(3|yu?{$NE@A?oz z*|(=nyFdY!+Fy-{JMm3nA|Tr&Q2tV|LZ?5a#X!f4LQ#jngg60Cj#n-=0v_uY8Ad$; z&HlsN(yov4gglC(qFWqO4}9e>Q03wp+QA^Lg}!x%9w{=HK~m;#i0A@EqSZbtvWSS` zhizL3z9-nig=e6hBIz)|b#7G@&Cx(ra#Mn| z&HQd8lMC`6vO32vGp)rwPG3WBV7U0rxfx%B)rZzLmz5AvG0=zABiuLcXw;y4 z+(O|44IJ<_p+kwW+Lt*P$jIzwv+cWiR|9KJdc!Jk>O6<-Wq&?ld^L7?t=3YDv^jh? zesLVal#|ihc?T{9`hvYhkB3+8msyu9>)Yfkc;#V@Cb33GO)vXr%d8ZJ-MuZ5J%{|N zmj~zh>8O{k;kEzzrq8|EzzSf2fKnWRfDr$B(@xGF?@WJAfUW7vj?0`VL8pyYYwz<* zS70R?c0^)v%!eFe>#etXSR5(lk)XA(;`}l4ygwHj0FjXo$tk|8>gNa*s}=~jI|sEm z%QEow8JgOEf22$4!OrboD-hNcp+>U3vbf*>J*llRmy+SLtY!(B050F9_tjCb-^*4~ z|EA@1_nv<`Tv)h+?CAB)gfatwT+;x=)&yGe`*|m~WHk>-zcYB=Y0()tSbfoB%KR zf|+6)$H^W{Fgc`tXtveeSiXSzlSuN}2cKJwL|dSK&Q4;Wv>uW!CVA?<1yOcRu70B^ zYuT5y^FpQg-qXsHys3AIII+bk18I8Ooi_$vRfq)+WYjM`d+!o3nxBj8oUf}NwqG7| zOg>3OpK1}nI#|OPha@we4Lc==f4UEi5ZvYjt}@owu-95z2P zdQALuRSmb07d44Qs_ES^B__#+0H*HCfJe>Z%QEI(pp{6O`qUF27oRJ2fyc!?)vnAD z?Pa-1gK)>9|IA4nO6rG}v)9?G5(1qq%(TT5a&Grf`(6oR1RK#+&Slr#!`|kWHbuyk z*17jl`bUEOl`dIv^V|~$*7*D1t?>tpUdBM7lg|pLLe3cdzl3i&kfv-g;ihbzqoizA z!=!9EfTV0$?ps6^;^%T@n1!vQQQy>eiyLRUYkP6H-C`m4t?-e3ba0CXGWxcH?~CQ; z_DayVvXUO`;C8zTXi8_x(2=jtp)qFBf^%KeEfy{bkO23?kvE>I(j61~TKj=up6rd3 zKb2R56%m1Kc=BUwOCV=lQ1o8l=nx58^zNo@zFY}^-MTLS*$mkzvJA>sQZOt1vbsWX z;ibA=r;6qXF4M0C9}0voBr9sLg_d<|c$hJgpdGGfMPucSRsm+HA^?d zR3gg!?x-&|Tug~4`_)n$N#;0GD0Vf!I)MgbXW(#-oQ-rE1-*fQA1x{=%0Zr06(cRF z3KLhgGkwe}Y5eg8J+f$(dg;uw1zqg$%iV6-R@Up%ide}p_5He|Y;U@W83?0yQ}LW^ zE^fhMy4kRpiAKV5^{7)H!CtO(My^g6lZ{+X%6tojqtTf5!0Z{!N{&*p^(bl0e2Rog z6`NgCZ^bCZ*xp@u4Ax5xpFqa77qO~E^JbIE!G$1_(KRi5mI*%rPP}rw16`tC^@nA3}iQG3sN@|=p$knN@ zvn(jb!daJ4L!%}&R;PLY`Y>qzT0phlEETZBGP#(6N{7g`5J-e&A{{FoB59UFl}Cgv zf`8iaw5%+7wQb@gp5k%FEC~AP1rO+2TK(5OJ#TC&5zwbzBzWE*^^l? z!HYJO8asc<6%(ci`k+L$VW`T?iIo6>AWq$R#^a1rwb6LMYWVeZmdZ#}#UCP~B>fY@ ztEh$t4r7VyuA%Dpk*=bpqwS$Er<9lW(vK&3EF{(Bd@AGgiOuB0Gr(^|Ci3JLu zqL68066-J^1Hy+nxqY{(FLQ)p_NM@(bidpqLI$?_F5^Ivu8o{KNYr08`6PVi`o+b}3HTAS_uAHyg({N~{%sF`5wzSrhc6=I9 zhI&(8wF3FB^fK;YFs`A>IwM3)9TSaT)`9te@L2mw{cQ4NeN^@>%|(+r8Sn1k$-pSB z2hBy@kCWw$Ph*8TpY(k1dait|ML54(2^`^#Pv6P6XisuCU03PBTjDRyfhhn^6V-Aq zIU6)!v$&smx1vwY3H*5b#C&1;>4DONjARf~Npm=JGx#iqiuUAmB*4*Ou_4)Sf zXw)i}?ey#qUlr#8vAKh366I-9udo_AZ4n&=UTz$|qNl$7(fcdF_sDIYBzBx7G8ZbI zw{n1H`JrgRTXlQU&TDl>f_a^;yO>oNWns@!;94MooLlG4XhB?bk_mBqMwK&CMRD*A zA@M-8IwfKd8`TiN75Pqk@xXJ=-0nN1Tw&=DC1UVD+5w0_sGqmZbT+qf@D3Ek1h7+! zW<|mvTjYMZ-vr9-c283>@&+Ktd;We+7;LEs>HUveBVf8eM8Hs4a5;fS|lpl@$8fwLc&9vp*#N>f)>(EuNt&Rx}HB1R@i4-bXBS<^S~U56F+xcqJkC zm|wmBvSjWOI^~x74D35Q?bYx5Dx;y6?Q6$Bv?40^kb9_nul1ZMc2_jxGxM;%(X)g5 zO!PJpW+$ryuH~rAI6=DfR%A)(A};lBLy|T>hU>| zBB|SJ_yp=9sPRl=l# z;K4!fdf;{L1k9t7k+MjtQ9c4kNv3}S4oW~C#Ao|rVrosR5STa|*9gsGVqh8<_r2pc zU{dgm8_FW8RWnHF6l{1yRmvhD>Q<7S(tP%UH*dT1gNh6RI9V&s=C9K5&_8i(Wd8&W z#7_PNf`=AT29<3B(4u7%b&>k6uGEkxPW26{U#=VHeG_mqa>!8{?t9=xWm0ZN9V;Y| zTJj>baxQuutFYZSS8C%5`!uDg0HgN5I4Xr^^8BQXT;(kr&g(U-!fsSm`#*Q2ZkC zztaCn0MHrzzaCz!0)RW)2!nOeWEEgfi;3R0H_0S2z_35rP>peXURDt)M@(%3z`D0%eKdI?&{ zm7K}-+0`zsLnvOF&2(rpDhLx(6UAh&h=Ll%Vvr@?I+RmuXFmBL?5Qt&6jp7id%cSFTd4xl#MUP42#xvV(8I&5qdp{JG3KJA*^NC1V~I%MP>PX<-FxNaI+e5$vwy zp(?**rHx9&deNYQj?_k)8JOIGZ*6z7y#^<_%dauftwl8fGMuZJ{yPU>p_5TPyz@E@orqXHg%6BqL8o$0h#Xz0W7TvA zc*X2W1O1|Tjf=PIipcVcfIsXmUaNJhj!?rhd{NFS2ioEyw|rZmTvx5$i-p_pJ~kzp zG|#$%Mo;-jIo()nF>2ReMA+5=I?>UJ$`;&*jXEce%se-5=_m(v9Zm$8T+-T}5%w&( zK1;^QA?xUgNX7^k9CI?8Yf6GtysF|z3*V(^S)h|}cyIosq&=ow15Po~9z(|nb^yg2 zLC@sIK*z9wAv!cX8N!Mt-3C12Ug`LzMkL75`VJxG%hb1~4XIs5gdof8GOs>;Pi1>& zp-83nBnZtK-vX1Y$Q~Q zHTWI%YfFSwR9cSSp~RCFG-$q|Z)2TUgVV4H6fO=faYK1%My5k1GtBsLv>~TsHvhFM7W|Sp;rQ4wGv81*S3aL;mvDxTu^t4ckn(NjC(e`W- zA-Q|D%S4WCP9=v||(w3a7Z^Y|J`S zY+;Zyn*26rr&d7m=QJjHMGF3lW_ct(Jqkn+P9ZJU&`ru^DA})6Mv9^a=)5TE=pgT> z`=Bc17ts3cwR5U68n)|N>ivq2^N&`#eIF_5-0@!m2SWm70xT0*=DDJJ^%+y>bMl7l zbHXSnFv$lF@j}rng0ZEWDdjj|NkmYjwLM20na-v`#Pigsx zNnqR<)R$7+QmyA`iO7LCLF~JW%p{YrdTn9&yD#wN!&18z$Ys9}x2PJtxg5^fc{7$c zh(PxR?z>0QqqPS-N9U6G%A>3#gVSy!xU{Ggk^iD*1fU zIC(D=ZD6^nh+M|UFJ$R$3gep-fxAeFdQpKB>yA}1=+9a~_x{FL)Jc+PDbgb+FulSh zSY1F@_y*HEa9Vv@SKyWQPc>5Z&1TifXBH3PY2iD%)$F8Fx)o=TpzDz!rJqAJ97#~3 zyNhiTdvdmK<(9GGK^bm|Z>t@1s!*jViDuW)f{}%mlc38mpVUX64f)l#1cG-gBFwdz z(!94Wkd+2i5N=uLA*T|+b1rq`h5BQxudSHY_(8;LRVyMaDB3;~3wgJ&8szR(@{w7c zgy1A@;07J+k>)Ig#R4*Yi}7tT=@`F&n{O`2$3Y6PliPu~WwwFeEMykMryeWe=n5Zb zh$HchLS=Bv6O3Y-Q*uEw5v^T!Xxqn0>Rw9c6@6pc={UeKCNOU>%Lc*U`|>B>_xW~ zEhf=bzw0_!U~P$GmlK>Jq_LunmS~lt6L`06UH9z)N71xi zoCF4>Y-kTKkY2vw?cE8n;`z~8v8;vLT`sYr#dcYaehp24qgzAke~v#}lp(ZNITWd2 z=}vKHXyD;@{AmM6cl`MpgczM+&DC~*{NEb^(#bm-T43ulVj)`FKDF7|2b}mVa5Tw( z`Su8Q%iqn*I=VLKkV`0ujp5LIFSiG+2kzvm4I)o%(8Y;mCTw*?qf@ zq3e|&Do<^!ldU>0EY;=J4>#JsrJWxi$Nd>#R?Y*sRR1IG%mCFkRgK=h`#CLtiqlo{^h{IF*1sB5c&`; zvZV|PW;-=GIt@}^pN~4!^&7X5bJP}eSBSM4yRZh=%adXn9iwB$A$)@PCEjWCSB>G@ zQ~KpH)+HXW>m}2$ou-@i!JL8W6q-Ydf(@L2{OT~zg(Yg`8Q&Fl^O2$;&uR?iJD+%@ z*iB&LHDM-GDy$B+cco1?+59!SAd?mv;eC;HH*|G#3v$NrTVq1w(l@+ff&7M>i^!7D zW|1uUnSv}|BSBz>j8W93`9XIJfn!MphkABd#km)h`dz#udqp_$GeTwR; zo$20#%@OS*fZN}mBIqF4fz~G%z|;_Y+(cNp#PDGbJB*o7w84ueHkUAsk^&(mS7Cm` z@)^3!jPnphORjtizf6IwO;?$G+gH_QJxGw0w^YJMqI}Vwxr zAMTo?o)2=cS(I*A7dgmD&zAdazSu*GjPuc`6osCf4;cr8Yzn=4?|n(r%Ss;SbGQ`C z=T{Sy2g;JA*NxGqaa%ap#wP#={F^_(KNrAy0{CMLptS*FMSp2+M)vmqUugpjsXzbH zl!t#5t!pJcM^ri~M+KuRX%H_$-|UpJvKUWnlcL32u7S_-8xInM+7y>>sa-uE>9N0F zO+JPfnHj6NbkyI*);@@D80Ge>ur*8az=*4o#xi$ECuG{yc)9}12)M+G?fZ-z7VgL;%0;m6^hkmXeid@y5V;Zc3b6q#G$D^W3cR>4#5>CM^?4iEHsANiEeT-hNDrlhjRa`o;9OwJC#4a(7!sBxl$2nA>R080dYLR;!XYLD-g}Oi< z8>uexs%#=lAy%Qg8=19*E_s*_xG?_n-J)POpq+g#`kFb$#$w(K=ZhM)%jW0x7r1Fn zy!$gJtubWTv`NFoYlBd(A9F}RbAyombslKCt}rJmY!rPm?|Jr7zafstC53wkt(lvw zVPO-l!M_52`9Js-7=#Y69Q5C72mkec{`&kcm4p>#{z>4UHBJ8l{`f2g1p@k8h11`G z|19?SciB?{^BnFCzV-0BrbI2mZ69 z^gH;U=ISpf5YUV@5YWGj*Wcm4TM@qqc-#K@?0?u4zvF+Omw(}{02LN~^3p$M>EH4H kl-s}1KtN$Gf9w9AQe0669KfM~G7o@%Q2>*3yZ<=*Kjt|*egFUf literal 0 HcmV?d00001