12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231523252335234523552365237523852395240524152425243524452455246524752485249525052515252525352545255525652575258525952605261526252635264526552665267526852695270527152725273527452755276527752785279528052815282528352845285528652875288528952905291 |
- {
- "cells": [
- {
- "cell_type": "code",
- "execution_count": 1,
- "id": "98abe12e",
- "metadata": {},
- "outputs": [
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "C:\\Users\\saeed\\Desktop\\Master\\bci\\lib\\site-packages\\tqdm\\auto.py:22: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
- " from .autonotebook import tqdm as notebook_tqdm\n"
- ]
- }
- ],
- "source": [
- "import torch\n",
- "import torch.nn as nn\n",
- "import torch.nn.functional as F\n",
- "from sklearn.model_selection import train_test_split\n",
- "from sklearn.model_selection import KFold, StratifiedKFold\n",
- "import librosa\n",
- "import librosa.display\n",
- "import IPython.display as ipd\n",
- "import matplotlib.pyplot as plt\n",
- "import numpy as np\n",
- "import scipy.io\n",
- "from tqdm import tqdm\n",
- "import glob\n",
- "import os\n",
- "import json\n",
- "import pickle\n",
- "from einops import rearrange\n",
- "from captum.attr import DeepLift, Saliency\n",
- "from captum.attr import visualization as viz"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 2,
- "id": "cd5442b9",
- "metadata": {},
- "outputs": [],
- "source": [
- "with open('bilab_10mfc_train.pkl', 'rb') as f:\n",
- " mfc_train = pickle.load(f)\n",
- "with open('bilab_10mfc_test.pkl', 'rb') as f:\n",
- " mfc_test = pickle.load(f)\n",
- "with open(\"data/bilabial/y_bilabial_train.pkl\", \"rb\") as f:\n",
- " y__train = pickle.load(f)\n",
- "with open(\"data/bilabial/y_bilabial_test.pkl\", \"rb\") as f:\n",
- " y__test = pickle.load(f)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "498285d5",
- "metadata": {},
- "outputs": [],
- "source": []
- },
- {
- "cell_type": "code",
- "execution_count": 3,
- "id": "01f70ae5",
- "metadata": {},
- "outputs": [],
- "source": [
- "trials = []\n",
- "for trial in mfc_train:\n",
- " pic = np.zeros((7,9,10,11))\n",
- " pic[0,2] = trial[3]\n",
- " pic[0,3] = trial[0]\n",
- " pic[0,4] = trial[1]\n",
- " pic[0,5] = trial[2]\n",
- " pic[0,6] = trial[4]\n",
- " pic[1,:] = trial[5:14]\n",
- " pic[2,:] = trial[14:23]\n",
- " pic[3,:] = trial[23:32]\n",
- " pic[4,:] = trial[32:41]\n",
- " pic[5,:] = trial[41:50]\n",
- " pic[6,0] = trial[50]\n",
- " pic[6,1] = trial[51]\n",
- " pic[6,2] = trial[52]\n",
- " pic[6,3] = trial[58]\n",
- " pic[6,4] = trial[53]\n",
- " pic[6,5] = trial[60]\n",
- " pic[6,6] = trial[54]\n",
- " pic[6,7] = trial[55]\n",
- " pic[6,8] = trial[56]\n",
- " trials.append(pic)\n",
- "picture_data_train = np.array(trials)\n",
- "trials = []\n",
- "for trial in mfc_test:\n",
- " pic = np.zeros((7,9,10,11))\n",
- " pic[0,2] = trial[3]\n",
- " pic[0,3] = trial[0]\n",
- " pic[0,4] = trial[1]\n",
- " pic[0,5] = trial[2]\n",
- " pic[0,6] = trial[4]\n",
- " pic[1,:] = trial[5:14]\n",
- " pic[2,:] = trial[14:23]\n",
- " pic[3,:] = trial[23:32]\n",
- " pic[4,:] = trial[32:41]\n",
- " pic[5,:] = trial[41:50]\n",
- " pic[6,0] = trial[50]\n",
- " pic[6,1] = trial[51]\n",
- " pic[6,2] = trial[52]\n",
- " pic[6,3] = trial[58]\n",
- " pic[6,4] = trial[53]\n",
- " pic[6,5] = trial[60]\n",
- " pic[6,6] = trial[54]\n",
- " pic[6,7] = trial[55]\n",
- " pic[6,8] = trial[56]\n",
- " trials.append(pic) \n",
- "picture_data_test = np.array(trials)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 5,
- "id": "c6e5d06e",
- "metadata": {},
- "outputs": [
- {
- "data": {
- "text/plain": [
- "(1913, 7, 9, 10, 11)"
- ]
- },
- "execution_count": 5,
- "metadata": {},
- "output_type": "execute_result"
- }
- ],
- "source": [
- "dataset = np.vstack((picture_data_train, picture_data_test))\n",
- "dataset.shape"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 32,
- "id": "c0cd1a50",
- "metadata": {},
- "outputs": [],
- "source": [
- "labels = y__train + y__test"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 2,
- "id": "19f34131",
- "metadata": {},
- "outputs": [],
- "source": [
- "#model\n",
- "\n",
- "class CNN_RNN(nn.Module):\n",
- " def __init__(self):\n",
- " super().__init__()\n",
- " self.conv1 = nn.Conv2d(20, 16, 3)\n",
- " #torch.nn.init.xavier_normal_(self.conv1.weight)\n",
- " self.pool = nn.MaxPool2d(2, 1)\n",
- " self.conv2 = nn.Conv2d(16, 32, 3)\n",
- " #torch.nn.init.xavier_normal_(self.conv2.weight)\n",
- " self.lstm = nn.LSTM(input_size=256, hidden_size=128, num_layers=2, batch_first=True)\n",
- " self.fc = nn.Linear(128, 2)\n",
- " #torch.nn.init.xavier_normal_(self.fc.weight)\n",
- " self.batch1 = nn.BatchNorm2d(16)\n",
- " self.batch2 = nn.BatchNorm2d(32)\n",
- " self.relu1 = nn.ReLU()\n",
- " self.relu2 = nn.ReLU()\n",
- " \n",
- " \n",
- " def forward(self, x):\n",
- " hidden = torch.zeros(2, x.shape[0], 128), torch.zeros(2, x.shape[0], 128)\n",
- " # (batch, heigth, width, feature, time)\n",
- " #print(x.shape)\n",
- " x = rearrange(x, 'batch heigth width feature time -> (batch time) feature heigth width')\n",
- " #print(x.shape)\n",
- " out = self.pool(self.relu1(self.batch1(self.conv1(x))))\n",
- " #print(out.shape)\n",
- " out = self.relu2(self.batch2(self.conv2(out)))\n",
- " #print(out.shape)\n",
- " out = rearrange(out, '(batch time) channel heigth width -> batch time (channel heigth width)', time=11)\n",
- " #print(out.shape)\n",
- " out, hidden = self.lstm(out, hidden) \n",
- " out = out[:,-1,:]\n",
- " out = self.fc(out)\n",
- " return out\n",
- " \n",
- "class FC(nn.Module):\n",
- " def __init__(self, hidden1=500):\n",
- " super(FC, self).__init__()\n",
- " self.fc1 = nn.Linear(6820, hidden1)\n",
- " torch.nn.init.xavier_normal(self.fc1.weight)\n",
- " self.fc2 = nn.Linear(hidden1, 1)\n",
- " torch.nn.init.xavier_normal(self.fc2.weight)\n",
- " self.dropout = nn.Dropout(0.3)\n",
- " \n",
- " def forward(self, x):\n",
- " x = x.view(-1, 6820)\n",
- " x = F.relu(self.fc1(x))\n",
- " #x = self.dropout(x)\n",
- " x = F.sigmoid(self.fc2(x))\n",
- " return x\n",
- " \n",
- "class cnn3d(nn.Module):\n",
- " def __init__(self):\n",
- " super().__init__()\n",
- " self.conv1 = nn.Conv3d(20, 16, kernel_size=(3, 3, 3), padding=1)\n",
- " self.conv2 = nn.Conv3d(16, 32, kernel_size=(3, 3, 3), padding=0)\n",
- " self.pool = nn.MaxPool3d((2, 2, 2), stride=2)\n",
- " self.fc1 = nn.Linear(192, 128)\n",
- " self.fc2 = nn.Linear(128, 1)\n",
- " self.drop = nn.Dropout(0.25)\n",
- " self.batch1 = nn.BatchNorm3d(16)\n",
- " self.batch2 = nn.BatchNorm3d(32)\n",
- " self.batch3 = nn.BatchNorm1d(128)\n",
- " \n",
- " def forward(self, x):\n",
- " x = rearrange(x, 'n h w m t -> n m t h w')\n",
- " out = self.pool(F.relu(self.batch1(self.conv1(x))))\n",
- " out = F.relu(self.batch2(self.conv2(out)))\n",
- " out = out.view(out.size(0), -1)\n",
- " out = self.drop(F.relu(self.batch3(self.fc1(out))))\n",
- " out = F.sigmoid(self.fc2(out))\n",
- " return out"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 85,
- "id": "fbe27fb8",
- "metadata": {},
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "(1913, 62, 20, 11) 1913\n"
- ]
- }
- ],
- "source": [
- "with open(\"data/normal_all_data.pkl\", \"rb\") as f:\n",
- " all_data = pickle.load(f)\n",
- "with open(\"data/11_20mfc.pkl\", \"rb\") as f:\n",
- " data = pickle.load(f)\n",
- "with open(\"data/all_label.pkl\", \"rb\") as f:\n",
- " labels = pickle.load(f)\n",
- "with open(\"data/vowel_label.pkl\", \"rb\") as f:\n",
- " vowel_label = pickle.load(f)\n",
- "with open(\"data/bilab_label.pkl\", \"rb\") as f:\n",
- " bilab_label = pickle.load(f)\n",
- "with open(\"data/nasal_label.pkl\", \"rb\") as f:\n",
- " nasal_label = pickle.load(f)\n",
- "with open(\"data/iy_label.pkl\", \"rb\") as f:\n",
- " iy_label = pickle.load(f)\n",
- "with open(\"data/uw_label.pkl\", \"rb\") as f:\n",
- " uw_label = pickle.load(f)\n",
- "\n",
- "print(all_data.shape, len(uw_label))"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 86,
- "id": "d79522e8",
- "metadata": {},
- "outputs": [],
- "source": [
- "trials = []\n",
- "for trial in all_data:\n",
- " pic = np.zeros((7,9,20,11))\n",
- " pic[0,2] = trial[3]\n",
- " pic[0,3] = trial[0]\n",
- " pic[0,4] = trial[1]\n",
- " pic[0,5] = trial[2]\n",
- " pic[0,6] = trial[4]\n",
- " pic[1,:] = trial[5:14]\n",
- " pic[2,:] = trial[14:23]\n",
- " pic[3,:] = trial[23:32]\n",
- " pic[4,:] = trial[32:41]\n",
- " pic[5,:] = trial[41:50]\n",
- " pic[6,0] = trial[50]\n",
- " pic[6,1] = trial[51]\n",
- " pic[6,2] = trial[52]\n",
- " pic[6,3] = trial[58]\n",
- " pic[6,4] = trial[53]\n",
- " pic[6,5] = trial[60]\n",
- " pic[6,6] = trial[54]\n",
- " pic[6,7] = trial[55]\n",
- " pic[6,8] = trial[56]\n",
- " trials.append(pic)\n",
- "picture_data_train = np.array(trials)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 87,
- "id": "b2c4df69",
- "metadata": {},
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "(1913, 7, 9, 20, 11) 1913\n"
- ]
- }
- ],
- "source": [
- "dataset = picture_data_train\n",
- "labels = nasal_label\n",
- "print(dataset.shape, len(labels))"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 253,
- "id": "ba72e9fb",
- "metadata": {},
- "outputs": [
- {
- "data": {
- "text/plain": [
- "(1913, 7, 9, 20, 11)"
- ]
- },
- "execution_count": 253,
- "metadata": {},
- "output_type": "execute_result"
- }
- ],
- "source": [
- "Max = np.max(dataset, axis=(0,1,2,4), keepdims=True)\n",
- "Min = np.min(dataset, axis=(0,1,2,4), keepdims=True)\n",
- "dataset = (dataset-Min)/(Max-Min)\n",
- "dataset.shape"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 256,
- "id": "45d5b88c",
- "metadata": {
- "scrolled": true
- },
- "outputs": [
- {
- "data": {
- "text/plain": [
- "<All keys matched successfully>"
- ]
- },
- "execution_count": 256,
- "metadata": {},
- "output_type": "execute_result"
- }
- ],
- "source": [
- "model = CNN_RNN().float()\n",
- "model.load_state_dict(torch.load('train/model0_acc.pt'))"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 85,
- "id": "509b8e17",
- "metadata": {},
- "outputs": [],
- "source": [
- "label = []\n",
- "for l in labels:\n",
- " label.append(l)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 257,
- "id": "d2a30b5f",
- "metadata": {},
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "torch.Size([1913, 7, 9, 20, 11])\n"
- ]
- }
- ],
- "source": [
- "sample = torch.from_numpy(dataset)\n",
- "#data = torch.rand((7,9,20,11))\n",
- "sample = sample.float()\n",
- "sample.requires_grad = True\n",
- "print(sample.shape)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 258,
- "id": "75a7a952",
- "metadata": {},
- "outputs": [
- {
- "data": {
- "text/plain": [
- "CNN_RNN(\n",
- " (conv1): Conv2d(20, 16, kernel_size=(3, 3), stride=(1, 1))\n",
- " (pool): MaxPool2d(kernel_size=2, stride=1, padding=0, dilation=1, ceil_mode=False)\n",
- " (conv2): Conv2d(16, 32, kernel_size=(3, 3), stride=(1, 1))\n",
- " (lstm): LSTM(256, 128, num_layers=2, batch_first=True)\n",
- " (fc): Linear(in_features=128, out_features=2, bias=True)\n",
- " (batch1): BatchNorm2d(16, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
- " (batch2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
- " (relu1): ReLU()\n",
- " (relu2): ReLU()\n",
- ")"
- ]
- },
- "execution_count": 258,
- "metadata": {},
- "output_type": "execute_result"
- }
- ],
- "source": [
- "model.eval()"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 259,
- "id": "81b5c2ab",
- "metadata": {},
- "outputs": [],
- "source": [
- "def attribute_image_features(algorithm, data, **kwargs):\n",
- " model.zero_grad()\n",
- " tensor_attributions = algorithm.attribute(data,\n",
- " target=labels,\n",
- " **kwargs\n",
- " )\n",
- " \n",
- " return tensor_attributions"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 260,
- "id": "e1c1a53c",
- "metadata": {
- "scrolled": true
- },
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "torch.Size([1913, 7, 9, 20, 11])\n"
- ]
- }
- ],
- "source": [
- "dl = DeepLift(model)\n",
- "#attr_dl = dl.attribute(data, labels)\n",
- "attr_dl = attribute_image_features(dl, sample, baselines=sample * 0)\n",
- "\n",
- "print(attr_dl.shape)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 261,
- "id": "05a94327",
- "metadata": {},
- "outputs": [],
- "source": [
- "attr_dl = attr_dl.detach().numpy()"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 262,
- "id": "8d88cd0d",
- "metadata": {},
- "outputs": [],
- "source": [
- "with open('sal_nasal.pkl', 'wb') as f:\n",
- " pickle.dump(attr_dl, f)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 217,
- "id": "4608f5dd",
- "metadata": {},
- "outputs": [],
- "source": [
- "with open('sal_vowel.pkl', 'rb') as f:\n",
- " attr_dl = pickle.load(f)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 218,
- "id": "8a4afb3d",
- "metadata": {},
- "outputs": [
- {
- "data": {
- "text/plain": [
- "(1913, 7, 9, 20, 11)"
- ]
- },
- "execution_count": 218,
- "metadata": {},
- "output_type": "execute_result"
- }
- ],
- "source": [
- "attr_dl.shape"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 219,
- "id": "184f5964",
- "metadata": {
- "scrolled": false
- },
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "(1913, 7, 9, 20, 11)\n",
- "(7, 9, 20)\n",
- "(20, 63)\n",
- "(array([ 2, 3, 9, 2, 16], dtype=int64), array([21, 32, 32, 22, 20], dtype=int64))\n"
- ]
- },
- {
- "data": {
- "image/png": "iVBORw0KGgoAAAANSUhEUgAABwMAAAItCAYAAADVKtVTAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjUuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8qNh9FAAAACXBIWXMAAAsTAAALEwEAmpwYAACApUlEQVR4nOzdebhlV1kn/u9b85ChKgkkQAhJCAJB5lKhQQmgTI2gDSIKKijEoVVsxW5pUFBQobEBBRGittiAUwB/ICKDEBKG0BomSZgSSMKUOZWp5rp3/f64p/B6c6vqVNVep05uPp/nOc+pu8/a61377H323ueseteq1loAAAAAAACApWfZ4W4AAAAAAAAA0IfOQAAAAAAAAFiidAYCAAAAAADAEqUzEAAAAAAAAJYonYEAAAAAAACwRK043A0AAAAAAABg6XjsI9e3666fmUisT/7bjve11h43kWC3UToDAQAAAAAAGMx118/kX9530kRiLb/TxcdNJNBtmM5AAAAAAAAABtOSzGb2cDeDEXMGAgAAAAAAwBKlMxAAAAZQVR+uqrZg2RlV1arqJQdQz1FV9UdVdVlV7R6t/4Ch28v0qKpnjfbzsxYsv6yqLjs8rQIAADgULTNtdiIP9k9nIAAATJf/leSXknwuye8n+e0kV04icFW9ZNQpdcYk4t1W7O99qao3jV4/eaING8DeOiIBAABYOswZCAAA0+WJSb7cWvvBw90QJubvk3wiyRWHuyEAAAAsPToDAQBgutw5yXmHuxFMTmvtxiQ3Hu52AAAADKUlmU3bbzkmwzChAACwiNHwiW+vqq9W1baquqmqPlZVz+wUb8+cg5XkEaOhG1tVfXhBucdW1Xuq6tqq2lFVX6mqV1bVhkXqfGRVnVVVnx+1f1tVXVhVL66qNQvKXpbkxaM/z5kXv80rc6t5Eee9ts9570ZzIb5q9O9d8+dRrKp7jYba/HpV7ayqq6rqr6rqnovEOb6q/qCqvlRVW6rqhtG/31RVp+7zTe7wvoyef2r0+qXzXr9s4ftWVauq6rdG7d1RVW/a13s3b/2jq+p1VfXNqto+avcvV1UtKLfPOSoXzkE4Orb+YvTnX8zftvlDnlbViqr6har6xOj92lpVn66qX6wq3ykBAACmnMxAAABY3J8kuShzWXpXJDk2yROSvLmq7tla+82B470pyYcz1/F0+ejvJLlsT4GqenGSlyS5Psm7k1yd5H5Jnp/kCVX10NbaTfPq/B9J7pXk40n+McmaJA8b1XFGVX1/a21mVPY1SX4oySOS/OX8uANYleRDSY5J8v4kNyW5dLRNj0vyjiQrk/xDkkuSnJjkvyT5z1X1yNbap0Zl1yX5WJK7J/nAqHwluVuSJyd5W5KvjtGeId+X3x69fv8kf5jkhtHyG3Jrb0/yXUn+Kcn/l7n9tz+rkvxzkg1J/mb091NGse6Z5L+OUcfevGnUzicneWeSz8x77YYkqao9++WxSb6U5K+SbE/yyCSvTfI9SX7iENoAAAAsUbOZPdxNYERnIAAALO47W2tfmb+gqlZlriPnN6rqDa21bw4VrLX2plGMFye5rLX2kgWxH5m5zqrzkzyhtXbDvNeelbkMr99O8t/mrfYLSS5trf2HbL6qemmSFyV5apK/HcV/zSi78BFJ3tRa+/BAm5Ykd0ry+SSPaK1tmdeOjUn+OsnWJN/XWvv8vNe+M3Pz6P1ZkgeNFj86cx2Br2mtzd/OPftm9ZjtGex9aa29ZJRFd/9Ruy7bR9y7Ze64unbMdiZz791XR+vtGLXzxUn+NckvVNXfttYOaljZ1tqbRsmFT07y/+05Bhd4YeY6Al+X5Ff2dJJW1fIkZyX56ap6W2vtnQfTBgAAAPozpAsAACxiYUfgaNnOJH+cuf9U9+gJN+mXR8/Pnd8ROGrXmzKX1fWMBcu/urDDa+TVo+fHDtvEffq1+R2BIz+ZuYy3F8/vCEyS1tqFSf40yQOr6vQF621bWHlrbWdr7eZxGnIY35ffPMCOwD1esKcjMElaa9cneenoz2cP0rJFjIYA/aUkVyb5b/OyJTP6969lbiqQZyxeAwAAcHvV0jLTJvNg/2QGAgDAIqrqpMwNJ/noJCclWbugyF0m3KSHJtmV5Eeq6kcWeX1VkjtU1bGtteuSpKrWJ3lekh9O8h1JjszcsJp7TGobtif5t0WWP3T0fP+9zHP3HaPne2cus/DcJN/MXGbmg5K8J3PDhn5mfkfV/hzG9+VfDmKd3ZkbznShD4+eH3jQrdm/78jc0K4XJ3nRgikK99iWuf0DAADAlNIZCAAAC1TVqZnruNmY5COZm+fuxiQzSU5O8lMZf0jKoRybufv3F++n3BFJrhvN9fahJN+d5MLMDXt5TeY6FDOqZ1LbcPVeMvGOHT0/dz/rH5EkrbWbquohmRsO9Un59wy+a6vq9Ule1lrbtZc6knx7DrzD9b5ceRDrXLuXjs49dR19CO3Znz375x7Z93F3RMc2AAAAt1GzkbU3LXQGAgDArf1q5jpCnr1wHrWq+rHMdQZO2o1JlrXWjhmz/JMz1+H1ptbafxhKsqrulP13Ki5mdrT+itba7gWvbdjHenv7Bnjj6Pn+rbXFMgdvXVFr30jyMzWXpnZ6kkcl+a9Jfitz0yD85n6q6PG+jGUvHaL7c1xVLV+kQ/CE0fON85bNjp739j1vQ5IbDiD2nrr/vrX2Xw5gPQAAAKaIOQMBAODWThs9v32R1x4xyYbM84kkG6vqPmOW37MN71jktb1tw54Op+V7eX3z6Pmui7y2acx2zfeJ0fP3HuiKbc5FrbXXJvmB0eIfGmPVHu/L/l4/FCuS/KdFlp8xev70vGV73T9VdVoWzyLcV9u/mLnOw4eMMioBAADG0pLMpE3kwf7pDAQAgFu7bPR8xvyFVfXYJM+ZdGNGXj16/tOquvPCF6tq/WgIzT0uGz2fsaDcqUlesZcY142eT9rL63vmvPsPw3pW1aOT/Nhe1tmXv8hcZ9OLq+q7F75YVcuq6ox5f9+nqo5fpJ49y7aOEfOy0fMZ8xce4vuyv9cP1e9X1beHLq2qY5K8aPTnX8wr98UkNyV5clXdcV75tUn+aC9177Xto+zP1ya5U5I/GtXzH1TVnarq9APYFgAAACbMMKEAAHBrr0/y7CRnV9XbknwryXcmeVySv0vyo5NuUGvtg1X1G0l+P8nFVfWeJJdmbr62u2Uuq+2jozYmyT8kuSTJr1bVfTOXQXZSkicm+ccs3nF1TuaGmvz9qvrOjDLNWmsvG73+F0l+PckLqur+ST6f5DuSPD7J3yd5ygFu03VV9dTRup+oqg8muShz/4n0rkkemrnhWteMVvmBJK+sqvOTfDnJ1UlOzNzQn7NJXjlG2B7vywcz9778aVW9PcnNSW5orb1uzLdiX67I3ByGF1bVu5KsTPLUzHXQvb61dt6egq21XVX1h5kbKvXTVfX3mfvO9wOZO4a/tUj952euE/VXqurY/PtchK9trd2Y5KVJ7p/k55L8YFV9KMk3k9wxc3MJPizJCzN3LAAAAHybOQOnh85AAABYoLX2b1X1yCQvS/KfM3ff/Nkk/yVzmWwT7wwctesVVfWxJL+c5OGZ6wS7MXOdM2cl+at5ZbdU1aOSvDxzWXDfm+SrmevceVUW2YbW2heq6qeSPD/JL+TfO+FeNnr96qp6ROY63b4vcx2QF2Sus+mUHGBn4KjOD1bV/UYxHztq587MdVx9KP9xqNb3Za6z7vtG235U5jrLPpDkVa21j48Rr8f78r6q+rXMZUz+SpJVSS5PMkRn4M4k35/k95I8Pclxo/a+PHNZewu9OHOde89NcmbmOvf+JslLskiHXWttc1U9ZbTes5KsH730liQ3jjoYfyjJM0evPzFzHdDXZK4z+jeTvPVQNxIAAIB+6uDmsAcAAAAAAIBbu//9V7X3vee4icS604lXfLK1djDz2N9umDMQAAAAAAAAlijDhAIAAAAAADCo2cPdAL5NZiAAAAAAAAAsUToDAQAAAAAAYImaqmFCl69f31ZuPOZwN+PQre6b/Npmqmv9SVK7+8do/UNMRLXD3YJDVzP9Y8yunsAbNTuBg2oCm9H7mJrEZ68mMQbA0tjd3TdjxdbOAZLsOrr/O7V86ySufd1DZHZl/xi9zU7i7nEC/11tEte+tqLzZ2MS171l/T/fk7jvXArX72QC1/Clco8wAW35BGJ03h9L4XtMMpnrd5vAdan1vr4ulf09iXPt8iXw3bX3PUiS2rE0ftSZxPl8Msdt3/prV9/6k0zkO8AkzufLVvX/ojG7o+8OXwq/6ezafH1mtmxZGieq27CWlpmlchOyBExVZ+DKjcfkxF/+b4e7GYesTtnStf6dN6zuWn+SrLyu/6Exic6hSXR6LN9527+urLyx/zZsPbX/neOyWybxq0z/EMu3990f3X9oSLJsR/8Yk9iOiXScdj6mjvtM/4P2yifs7B7jqE+u6R5jzfX9v/FsPb7vt89JfLndflz/Y2pmXf99sfLG/m/WzuP6/hCw/Jb+2zBzRP99sfrq/tfvSXQ4Lp/Ata/3fyiYWdP/873ylgn8544J/IC1Y+MEzoWdvy8tXyI/5K++rv927F7fPUR2buh74C7b1f99mp1AB9SKbf23Y+fG/j/kL9ve9xo+u6F/L/naS1d1jzGJiaZ2buwfZNkEfjfafVTf7Vh9Tf/7tZm1t/1ra5KsP+XG7jG2fPXorvVP4n5ttvMp5Bt/9Oq+AeA2aKo6AwEAAAAAALiNa8mMxMCpYc5AAAAAAAAAWKJkBgIAAAAAADCYlomM+MyYZAYCAAAAAADAEiUzEAAAAAAAgAFVZlKHuxGMDJ4ZWFV3raq3VdWNVXVTVb2jqk4aOg4AAAAAAACwb4NmBlbVuiQfSrIjyU9lbljYlyU5p6ru11rbMmQ8AAAAAAAApktLMtsOdyvYY+hhQp+b5NQk92ytXZIkVfVvSS5O8rNJXjVwPAAAAAAAAGAvhh4m9ElJPrGnIzBJWmuXJvlYkicPHAsAAAAAAIApNDOaN7D3g/0bujPwPkkuXGT5RUlOHzgWAAAAAAAAsA9DDxN6TJLNiyy/PsnGxVaoqjOTnJkkKzYsWgQAAAAAAIDbiJbI2psiQ2cGHrDW2lmttU2ttU3L168/3M0BAAAAAACAJWPozMDNWTwDcG8ZgwAAAAAAACwxs01m4LQYOjPwoszNG7jQ6Uk+P3AsAAAAAAAAYB+G7gx8V5KHVNWpexZU1clJHjZ6DQAAAAAAAJiQoYcJ/dMkv5jknVX1oszNEfnSJF9P8saBYwEAAAAAADBlWpKZGCZ0WgyaGdha25LkUUm+nOTNSd6a5NIkj2qt3TJkLAAAAAAAAGDfhs4MTGvta0meMnS9AAAAAAAATL+WyszgM9VxsOwJAAAAAAAAWKIGzwwEAAAAAADg9m22mTNwWsgMBAAAAAAAgCVKZiAAAAAAAACDaUlmIjNwWugM7GDn9Wu61r/+sv67bcspu7vHWLa9f2JqzXQPsSTs3ND6B5lAiOXb+l9cTnvo5d1jXHL+3brWv+6K/u/T7MruIbLz6P4HVZvEVbLzZmz45FV9AyS56iHHd48xO4F9ceTXd3SPsWt933uEm07tWn2S5Iiv9T+HbD92efcYO++xrXuM3Nz3ZLj2qv73Uo8845PdY3z0zzZ1j7H5Qbu6xzj6wv4Xv12dd/mynf0/37vXdg+R3ev63yNs/Hz3ENlxTN8dPrO6a/VJkp1H9d8Xk/gu0yYwplJb2Xc7VmzuvxE77jDbPcbMBL5Xrvt6/xvPZZ0vS1sn8HPftrv0/91oxY397wlb/xCpSfwesrXvZ3x2xQTOtRPoL1i+vX+Qmzev6x5jWedT+iTuEdryvseUkSnh1nQGAgAAAAAAMKDKzCT+VxVjsScAAAAAAABgiZIZCAAAAAAAwGBakln5aFPDngAAAAAAAIAlSmYgAAAAAAAAg5pJHe4mMCIzEAAAAAAAAJYomYEAAAAAAAAMprXKTJOPNi0G3RNVdWJVvbaqzq+qrVXVqurkIWMAAAAAAAAA4xm6W/a0JE9LsjnJRwauGwAAAAAAADgAQw8Tel5r7fgkqarnJHnMwPUDAAAAAAAw5WZTh7sJjAyaGdhamx2yPgAAAAAAAODgDZ0ZCAAAAAAAwO1YSzIz+Ex1HKzDvieq6syquqCqLpjZsuVwNwcAAAAAAACWjMOeGdhaOyvJWUmy5sS7tsPcHAAAAAAAAA5JZaYd9nw0RuwJAAAAAAAAWKIOe2YgAAAAAAAAS0dLMisfbWrYEwAAAAAAALBEDZ4ZWFVPHf3zwaPnx1fVNUmuaa2dO3Q8AAAAAAAApstMq8PdBEZ6DBN69oK/Xz96PjfJGR3iAQAAAAAAAIsYvDOwNV29AAAAAAAAt1ctlRkz1U0NewIAAAAAAACWqB7DhAIAAAAAAHA7Ntvko00LewIAAAAAAACWKJmBAAAAAAAADKYl5gycIjoDOzj2U8u71r9jY9fqkyQnnNv/Q3rTKf1j1Gz3ENm5ofUP0tnqzdU9xrorVvaPcVX/Hb7jg3fqHuOOv35V1/pXvPbYrvUnydUP6L+/j/ha9xC5/kG7u8dYtqXvNePiM0/oWn+SrLy5e4j86plv6x7jseu+2j3GI97y613rn7nTjq71J8m2U3Z1j5EvHtE9RC2fwPW7863OriP7b8M/XPDA7jHOeNaF3WPsmu17rk2Sz3/q3t1jbP3OvtelZbf0f59mj+5/bV1xTf/7kGv/0wS24/q+X9cf/+gLutafJO+9+PTuMZZ/fn33GNvvONM9RlvV97vMzLr+37+P+bf+Ma7b1P+zt3Vt/++VK2/se7497jP9v+Nff3r/a8ay/rs7y7f3P27biv73bCtu6bvPl03gK0Db2T/G9rv3/7608opV3WPsXtf3mFqxrf85ZLZzr8QkfhOG2xrdsgAAAAAAALBEyQwEAAAAAABgMC2VmdY/05TxyAwEAAAAAACAJUpmIAAAAAAAAIOalY82NewJAAAAAAAAWKJkBgIAAAAAADCY1pKZJh9tWtgTAAAAAAAAsEQN2hlYVU+tqrdX1eVVta2qvlRVv19VRw4ZBwAAAAAAgGlVmZ3Qg/0bOjPw+UlmkvzPJI9L8idJfj7JB6pKFiIAAAAAAABM0NAddD/YWntaa+2trbVzW2uvSfLLSb4nyRkDxwIAAAAAAGDKtMzNGTiJxziq6q5V9baqurGqbqqqd1TVSWOuu6aqXllVV4xGxTy/qr5vkXLLquoFVXVZVW2vqs9W1VP2U/d/qqrZqmpVtWKsjTkIg3YGttauWWTxv46e7zJkLAAAAAAAANiXqlqX5ENJ7pXkp5L8RJJ7JDmnqtaPUcWfJ3lukt9K8sQkVyR5X1U9YEG5lyZ5SZLXJXl8kk8kObuqnrCXdq1M8sYkVx3YFh24br2M8zxi9PyFCcQCAAAAAADgMJsZfHDKg/bcJKcmuWdr7ZIkqap/S3Jxkp9N8qq9rVhV90/y40l+urX2F6Nl5ya5KMnvJHnSaNkdMzeV3stba38wWv2cqjotycuTvGeR6n89SSX5P5mbfq+brnuiqu6SuTfjn1trF+ylzJlVdUFVXTCzZUvP5gAAAAAAAHD78qQkn9jTEZgkrbVLk3wsyZPHWHdXkr+dt+7uJH+T5LFVtXq0+LFJViV5y4L135LkvlV1yvyFVXX3JC9K8guj+rvq1hlYVUckeWeS3UmevbdyrbWzWmubWmublq8fJxsTAAAAAACAadVSmW2TeYzhPkkuXGT5RUlOH2PdS1trWxdZd1WS0+aV25HkkkXKZZE4b0hydmvtvP3EH0SXYUKram2Sf8hc2uUjWmvf6BEHAAAAAACA27Xjqmr+6JRntdbOmvf3MUk2L7Le9Uk27qfufa275/U9zze01tp+yqWqnpnkwUmesZ/Ygxm8M3A04eHbkmxK8gOttc8NHQMAAAAAAIDpNcE5A69trW2aVLBDUVXHZG6Owv/ZWrt6UnEH3RNVtSzJW5M8KskPtdY+MWT9AAAAAAAAcAA2Z/EMwL1l/Y27bvLvmX+bk2yoqoXjli4s97IkVyT5u6raUFUbkqwZvXZ0VXWZT2/ozMA/TvIjSX43yZaqesi8175huFAAAAAAAAAm6KLMzem30OlJPj/Guj9cVesWzBt4epKd+fc5Ai9KsjrJ3fMf5w3cM1fg5+f9fb8k1y0S69ok70zyQ/tp0wEbOkfz8aPnFyY5f8HjOQPHAgAAAAAAYMq0JLNt2UQeY3hXkodU1al7FlTVyUkeNnptX/4hycrMJcLtWXdFkh9N8v7W2o7R4vcm2ZVbzwP4zCQXttYuHf39K0keueDxl6PXvj/Ji8bZoAM1aGZga+3kIesDAAAAAACAQ/CnSX4xyTur6kWZ66t8aZKvJ3njnkJVdbckX0nyO62130mS1tqnq+pvk7ymqlYmuTTJzyc5JfM6/lprV1fVq5K8oKpuTvKpzHUYPirJk+aV+8zCxlXVGaN/ntta2z3MJv9HQw8TCgAAAAAAwO1aZSYLp887PFprW6rqUUleneTNSSrJB5P8SmvtlnlFK8ny3HpUzWdnbnq8lyXZkOSzSR7XWvvUgnIvTHJLkuclOSHJl5I8rbX27kE36CDoDAQAAAAAAGDJaq19LclT9lPmsuTWPZittW1JfnX02Nf6M5nrMHzZAbbtJUleciDrHCidgQAAAAAAAAxmz5yBTAd7AgAAAAAAAJYomYEdXPeQXV3rX//lVV3rT5JbnnZT9xgrztvQPcbN99/RPcaKK/vvj952HdG6x2jVf3zozffuH+PaB/bf3+vfc0LX+m/+/tmu9SdJ29j/s7f9hP6XsJWb+8fYddRM1/pnJ/DffnYf3f8c8jvv+y/dY7ziuv5v1rrNfeu/aX3/c9TaL63pHmPn0d1DZOVn1nWPsfq6vp+N6793Z9f6k2TVN/ofUx+//JTuMV54//d0j/H/Tr539xjLtizvW//OCczncWP/a+vG+17bPca6lX2/8yXJ5bvu2LX+D//Vd3WtP0nWfF/nC1+Sm++yunuMFTf1/ewlye7Vfe/RV97Y//N9/cO3d4+Rmf73a6uu7X+e2r2+7z3CtT/Q//tYru3/2cuy/sftzNq+38eSZMXN/Y/bXUf1PYdMIrFn3ZUTCHLTBH4KP2VL9xDLvtn3u8zOzsdTktRM38+3ZLTpMS1zBiIzEAAAAAAAAJYsmYEAAAAAAAAMprUyZ+AUsScAAAAAAABgiZIZCAAAAAAAwKBmZAZODXsCAAAAAAAAliiZgQAAAAAAAAymJZlNHe5mMCIzEAAAAAAAAJaoQTsDq+qxVfWhqrqyqnZU1Teq6u+q6vQh4wAAAAAAADCtKjNt2UQe7N/Qw4Qek+STSV6f5JokJyX5jSSfqKr7ttYuHzgeAAAAAAAAsBeDdga21v46yV/PX1ZV/5Lki0memuR/DxkPAAAAAACA6dKSzDZzBk6LSeRPXjd63j2BWAAAAAAAAMBIl87AqlpeVauq6h5J3pjkyizIGAQAAAAAAAD6GnrOwD3+X5IHj/59SZJHtdauXqxgVZ2Z5MwkWbFhY6fmAAAAAAAAMCkzExmcknH02hM/keQhSX48yU1JPlBVJy9WsLV2VmttU2tt0/L16zs1BwAAAAAAAG5/umQGtta+MPrn/6uqf0pyWZLfSPJzPeIBAAAAAAAwHVoqs60OdzMY6Z6j2Vq7IXNDhZ7WOxYAAAAAAADw73rNGfhtVXV8knsleWvvWAAAAAAAABx+s+YMnBqDdgZW1d8n+VSSf8vcXIHfkeS/Jdmd5H8PGQsAAAAAAADYt6EzAz+R5GlJfi3JqiRfT/LhJL/fWrts4FgAAAAAAABMmdaSGXMGTo1BOwNba69I8ooh6wQAAAAAAAAOTvc5AwEAAAAAALh9mZUZODXM3ggAAAAAAABLlMxAAAAAAAAABtNSmW3y0aaFzsAOVl69smv9Ox94S9f6k2TZruXdY+w8ZaZ7jCxr/WMsATuP7b8vVt3Q/3Rzj8d9tXuMS//+7t1jrLt6tmv9Nz14Z9f6k2Tjx1d3j3HLSd1DZOeddnWPsezGvp+N6ns4zTlhR/cQx5x4Q/cY199hffcYx72t72fjxvv13+E3PGAC14yrJ3CLOoGRSrbeb3vX+pd/Y03X+pPkrv/c//P9wCd8rnuM91//nd1jTOJ8u/Kmvl+kawK359uP3d09xs1b+382dn7kDt1jrLpj3+8yd37i5V3rT5Kb/+TE7jGWndD/u+vWO/f/Xrnyur7Xvu13mMBJ6oZV3UOsua7/D4qrHrS5e4wtlxzdtf6Zrf3vpVYc3/c+J0nat/qfz1fc0v+Yqgn8NLV8W9+b250n9L9+1zf774sjv9L/mnHTEf3PhTm67/5YPoHf8Lrfd/pJGG5FZyAAAAAAAACDmpnE/8RlLHI0AQAAAAAAYImSGQgAAAAAAMBgWpLZJjNwWsgMBAAAAAAAgCVKZyAAAAAAAAAsUYYJBQAAAAAAYECV2SYfbVrYEwAAAAAAALBEyQwEAAAAAABgULOpw90ERrpmBlbVe6uqVdXLesYBAAAAAAAAbq1bZmBV/ViS+/eqHwAAAAAAgOnTWjLTZAZOiy6ZgVW1Mcmrk/xqj/oBAAAAAACA/euVGfiKJBe21v66qv6qUwwAAAAAAACm0GzrOlMdB2DwzsCqeniSn4whQgEAAAAAAOCwGrQzsKpWJXljkj9orX1pzHXOTHJmkqzYsHHI5gAAAAAAADBhLZVZcwZOjaFzNP97krVJfnfcFVprZ7XWNrXWNi1fv37g5gAAAAAAAMDt12CZgVV1UpIXJnlOktVVtXrey6urakOSm1trM0PFBAAAAAAAYPrMRmbgtBgyM/DUJGuSvCXJ5nmPJHn+6N/3HTAeAAAAAAAAsA9Dzhn4mSSPXGT5OZnrIPzzJJcMGA8AAAAAAIAp0xJzBk6RwToDW2s3JPnwwuVVlSSXt9Zu9RoAAAAAAADQz5CZgQAAAAAAAJDZNuRMdRyK7p2BrckDBQAAAAAAgMNBtywAAAAAAAAsUYYJBQAAAAAAYDitMmvgyKkhMxAAAAAAAACWKJmBHcysaV3rX/uZI7rWnyRb7zrTPUbN9P9fAasvXtM9xu71fff3JKz9xgROBRP4TyBfOO/U7jF2nzTbPcYdL9jetf4jP72ua/1JMjuBQ2rN1f1jrLx5VfcYy3f0rX/15v7nqHX33dw9xuWX3LF7jGXb+/8fqd2r+9a/8VP9P3xHXb67e4xvPLJ7iKy5rv+F6aQ39P38XX961+qTJF99Vv/36Wtfvm/3GMu+vL57jNm13UOkLe97TK28pf/+Xn/Jyu4xjv5q//P51d/V//q6uvN5at2KnV3rT5I7/drnu8c475LTuseY3TWB/0e9fXnX6mtX/8/3URf33YYkmen/FT87P7Oxe4yffer7utb/l19+SNf6k2T3547uH2N9/+/fu4/oH2PZjv6fvxVbO18zvtr/+n3zPXd1j7Hi+gn8WLFjAteMlX2P20n8ZjuJ3wk5/FqSWTt7asgMBAAAAAAAgCVKZiAAAAAAAACDMmfg9JAZCAAAAAAAAEuUzEAAAAAAAAAG0yIzcJrIDAQAAAAAAIAlSmYgAAAAAAAAg5IZOD1kBgIAAAAAAMASNWhmYFWdkeScRV66sbW2YchYAAAAAAAATJ+Wkhk4RXoNE/rLSf513t+7O8UBAAAAAAAA9qJXZ+AXWmuf6FQ3AAAAAAAAU2w2MgOnhTkDAQAAAAAAYInq1Rn41qqaqarrquqvquqkTnEAAAAAAACYJi2ZbTWRB/s39DChNyb530nOTXJTkgcm+Z9Jzq+qB7bWrl64QlWdmeTMJFmxYePAzQEAAAAAAIDbr0E7A1trn07y6XmLzq2q85L8S5JfTvKiRdY5K8lZSbLmxLu2IdsDAAAAAAAAt2dDZwbeSmvtU1X15STf1TsWAAAAAAAAh1dLDOE5RXrNGbgYWX8AAAAAAAAwQd0zA6tqU5J7Jnlb71gAAAAAAAAcfjIDp8egnYFV9dYklyb5VJIbkjwwyQuSfDPJHw0ZCwAAAAAAANi3oTMDL0zyY0l+Kcm6JFcmeUeSF7fWrh04FgAAAAAAAFOmpWQGTpFBOwNba7+f5PeHrBMAAAAAAAA4ON3nDAQAAAAAAOD2pckMnBrLDncDAAAAAAAAgD5kBgIAAAAAADCo2cgMnBYyAwEAAAAAAGCJkhnYwbIdfXu7t9x9V9f6k+Soi1Z2j3HTvXZ3j1Ez+rvHsXt96x/jyO4hktn+Ie7wqf4xrnj4uq71z6zqWn2SZOc9tnWPsfqLa7vHWNF/M3LTPfueC1fcuLxr/Uly88fu3D3GvV99YfcYX3rJ6d1j7H7G9V3r3/LpY7vWnyQ3fW//+5BK/xjbVq7pHmPHb97Ytf7d7+17vUiSNZes7h5j+x3633eu3t3/f6Nu+HL/+6ktd+67Hdvu3P/+fN3X+38FveIR3UNk/eX9j6lbTum7Py6/cWPX+pPk+muO6h7jiC/0v7ndvb57iOxe2/8c0ltN4PvYLXfvf56axP3zGz7zfV3rn72l/7V1zQT29yQs39b/d6Pqf9hmtvPltfofUll1Vf97hEn8/rXi5v7nkBVb+75Xu9dM4JokWex2obVk1pyBU0NPCQAAAAAAACxRMgMBAAAAAAAYVJMZODVkBgIAAAAAAMASJTMQAAAAAACAAZU5A6eIzEAAAAAAAABYonQGAgAAAAAAwBJlmFAAAAAAAAAG1QwTOjW6ZAZW1ROq6ryquqWqbqqqC6rqUT1iAQAAAAAAAIsbPDOwqn42yetGj5dmrsPxAUnWDR0LAAAAAACA6dKSzMoMnBqDZgZW1clJXpPk11tr/6219oHW2vtaa69orb17yFgAAAAAAACwP1V116p6W1XdOBrR8h1VddKY666pqldW1RVVta2qzq+q71uk3LKqekFVXVZV26vqs1X1lAVl7lRVvz8aUfOGqrqmqj64WH1DGnqY0J9OMpvkDQPXCwAAAAAAwG1BS9qEHvtTVeuSfCjJvZL8VJKfSHKPJOdU1foxtubPkzw3yW8leWKSK5K8r6oesKDcS5O8JHMjZz4+ySeSnF1VT5hX5sFJfjTJO5P8SJJnJdme5MNV9cQx2nJQhh4m9OFJvpjk6VX1m0nuluSyJK9urf3xwLEAAAAAAABgX56b5NQk92ytXZIkVfVvSS5O8rNJXrW3Favq/kl+PMlPt9b+YrTs3CQXJfmdJE8aLbtjkucneXlr7Q9Gq59TVacleXmS94yWfTTJd7TWds+L8b5Rff89SZdRNofODLxz5npTX5m5jXtMkg8keV1VPW+xFarqzFE65AUzW7YM3BwAAAAAAAAmbTY1kccYnpTkE3s6ApOktXZpko8lefIY6+5K8rfz1t2d5G+SPLaqVo8WPzbJqiRvWbD+W5Lct6pOGa17w/yOwHn1fSbJXcbZmIMxdGfgsiRHJvnZ1tqfttY+1Fr7+STvTfKCqrrVXmmtndVa29Ra27R8/TjZmAAAAAAAADCW+yS5cJHlFyU5fYx1L22tbV1k3VVJTptXbkeSSxYpl33FqapVSR6a5Av7actBG7oz8LrR8wcWLH9/kuOT3GngeAAAAAAAAEyRlqS1msgjyXF7RqAcPc5c0JxjkmxepJnXJ9m4n03Z17p7Xt/zfENrt5rFcGG5xbwkyYlJXrGfthy0oecMvCjJQ/bx+uzA8QAAAAAAALj9ura1tulwN+JgVNWPJ/mNJC9trX2kV5yhMwP/fvT82AXLH5fkG621KweOBwAAAAAAwFSpzLbJPMawOYtnAO4t62/cdZN/z/zbnGTDItPlLSz3bVX1g0nelOTPW2sv3k87DsnQmYHvSXJOkjdW1XFJvprkR5I8JsmzB44FAAAAAAAA+3JR5ub0W+j0JJ8fY90frqp1C+YNPD3Jzvz7HIEXJVmd5O75j/MG7pkr8D/EqapHJzk7c0l2PzvGNhySQTMDR2Oh/lCSv0ny20neneR7kjyjtfamIWMBAAAAAAAwnVqbzGMM70rykKo6dc+Cqjo5ycNGr+3LPyRZmbnEtz3rrkjyo0ne31rbMVr83iS7kjxjwfrPTHJha+3Sees/NMk7k3wwyTNba92n2Bs6MzCttZuS/NfRAwAAAAAAAA6XP03yi0neWVUvStKSvDTJ15O8cU+hqrpbkq8k+Z3W2u8kSWvt01X1t0leU1Urk1ya5OeTnJJ5HX+ttaur6lVJXlBVNyf5VOY6DB+V5EnzYtwryT8muTbJK5M8eP7Ioq21Twy+9enQGQgAAAAAAMDtWxtvPr/uWmtbqupRSV6d5M1JKnNZeb/SWrtlXtFKsjy3HlXz2Ul+N8nLkmxI8tkkj2utfWpBuRcmuSXJ85KckORLSZ7WWnv3vDIPydwchBszN+3eQl3eNJ2BAAAAAAAALFmtta8lecp+ylyWRTrjWmvbkvzq6LGv9Wcy12H4sn2UeVOSN+2vvUMbdM5AAAAAAAAAYHrIDAQAAAAAAGAwrU3PMKHoDOyi9+F9wjnLO0dItm/oHiJrv97/8Fu2q3uIJWHXCf3fqFXfXNk9xpGXdQ+Rq753pnuMFZv7nkVmjpjtWn+SLLtqdfcY2+/Uf1/Urv43LMtv7ntOn1nTutafJEd/uXuIXPZL39k9xoZ7XNs9xu5/Oq5r/Tvvu7tr/Uly1Lod3WNs27aqe4zq/1blqn85oWv9s8f3/3yf8g/buse4+Gf63xPuvkP/Hb51dl33GDP32tK1/trRf1/sOqr/d5m2ov9nY8vd+t+HLN/adyCfG794bNf6k2TVSX2P2SS55e4TGPCo+h9Ttb3vZ2PVDf3fpxvu0/9cW2v7x5hZ1z/G2ovWdq1/9/r+x2zva1KSzF6/pnuM5bf0/85X/S8Z3ff5mmv7v0+3nNL/t4pJfMdf1fk3nUlY1v92La3zbedtfy/A8HQGAgAAAAAAMKhZmYFTw5yBAAAAAAAAsETJDAQAAAAAAGBQrf/I1YxJZiAAAAAAAAAsUTIDAQAAAAAAGFQzZ+DUkBkIAAAAAAAAS5TMQAAAAAAAAAbTUjIDp8igmYFV9eGqant5vHfIWAAAAAAAAMC+DZ0Z+AtJjlqw7KFJXpXkXQPHAgAAAAAAYAq1w90Avm3QzsDW2ucXLquq5ybZmeRvhowFAAAAAAAA7FvXOQOral2SH0nyD62163vGAgAAAAAAYAq0mDNwigw6Z+AifjjJkUn+snMcAAAAAAAAYIGumYFJfjLJ1Un+aW8FqurMJGcmyYoNGzs3BwAAAAAAgO5MGjg1umUGVtWdk3x/kre21nbvrVxr7azW2qbW2qbl69f3ag4AAAAAAADc7vQcJvSZo/oNEQoAAAAAAACHQc9hQn8qyWdba5/tGAMAAAAAAIAp01od7iYw0iUzsKo2JTk9sgIBAAAAAADgsOmVGfiTSXYneWun+gEAAAAAAJhSrR3uFrDH4JmBVbUyyY8leW9r7eqh6wcAAAAAAADGM3hmYGttV5I7DF0vAAAAAAAA06/FnIHTpMucgQAAAAAAAMDh12vOQAAAAAAAAG6PWhKZgVNDZiAAAAAAAAAsUTIDAQAAAAAAGFRrh7sF7KEzsIPdd9rRtf6r16zqWn+SHHFZ9xBZ+5Bru8e46aJju8dYConOK69e2T3GrqNnu8eYWb28e4wTPtw/xjUP7nuVXPut/tuwe23/K/3q6/snt+84tv9xe9RX+tb/6DP/X98ASY44o+91L0k+8dAN3WPsfNdx3WMsu3Bb1/rv+Mef7lp/klz82u/pHqM27uwe44hv9j+H3OnDN3at/0u/uLZr/Uny1f/aPURO/bP+14xrf7n/eWp2x7ruMXZ+s+8+Xz7b/852ZlX//X3aW3d1j3H9vdf0j/Ggma71H/WF/j8HbNu1vnuMdbf0P253fufW7jFmdva9Lu244+6u9SfJhov6H1NbHtb/fD5zVf/r686Nfc+Fsyv7n2vrG/2ve1nT//tYWz6BX6mX9T9Prbqp7zlk613674sVW/rfny/f3n9fbD++/3uVzodtTWAT0vm+08iUcGs6AwEAAAAAABiWzMCpYc5AAAAAAAAAWKJkBgIAAAAAADCgSjNm69SQGQgAAAAAAABLlMxAAAAAAAAAhmXOwKkhMxAAAAAAAACWKJ2BAAAAAAAAsEQN3hlYVQ+rqvdX1dVVdXNVfaqqfnroOAAAAAAAAEyhlrRWE3mwf4N2BlbV/ZL8c5KVSZ6b5L8k+dckf15VPz9kLAAAAAAAAGDfVgxc39OTLE/yg621W0bLPjDqJPzJJH8ycDwAAAAAAACmTTvcDWCPoYcJXZVkV5JtC5bf2CEWAAAAAAAAsA9Dd9C9afT8R1V156raUFXPTfLoJK8eOBYAAAAAAABTqSb0YH8GHSa0tXZhVZ2R5O+T/MJo8a4kP9da+5vF1qmqM5OcmSQrNmwcsjkAAAAAAABwuzZoZ2BV3SPJ25NclOTnMjdc6JOTvKGqtrfW3rpwndbaWUnOSpI1J97VCLIAAAAAAAC3dXp8psagnYFJfi9zmYBPbK3tGi37YFUdm+QPq+qvW2uzA8cEAAAAAAAAFjH0nIH3TfLZeR2Be/xLkmOT3HHgeAAAAAAAAEybNqEH+zV0Z+CVSR5QVasWLP+eJNuTXD9wPAAAAAAAAGAvhh4m9HVJzk7yD1X1+szNGfikJD+W5NWttZ0DxwMAAAAAAGCatCStDncrGBk0M7C19rYkT0iyOsmfJXl7kocn+a9Jfn3IWAAAAAAAAMC+DZ0ZmNbaPyX5p6HrBQAAAAAA4Lahmc9vagw9ZyAAAAAAAAAwJQbPDAQAAAAAAOB2Tmbg1JAZCAAAAAAAAEuUzkAAAAAAAABYogwT2kHb3bePde1V/ftwb7rPru4xVnz+2O4xahJpyDWBGJ2tvr7/RqzYsrx7jO3HdQ+RNTf0j9H7mNp+7219AySpK9d0j7HrqNnuMZb1PxVmy1361n/cylv6BkiybtnO7jGu+bFHdI+xYuc13WPsev6WrvVv/pkHda0/SdZ9fgLn8+39zyE33XN39xhbjz+qb4CZ/ttwxPnruse4+pdu6B7jJaf/Y/cYL6sndI9x5PuO6Vr/1jv3v3meWd09RK554NruMY78xkz3GHVB3/PtTaf139/V/zQ1kaGnZmcm8KXv6L43niu/2f/Dd/Pd+u+M5V/tf13aeFn3ENl8787v1XE7+tafZPnl/e/XZidwzZhd0/+4XbG1/zlkZlXf7Vg+gW1YtnsC59pJ/E44gc/f7Pa+9wi1tf93vmU7lsAPqoyn2dfTQmYgAAAAAAAALFEyAwEAAAAAABjUREbuYywyAwEAAAAAAGCJkhkIAAAAAADAcFomM1cnY5EZCAAAAAAAAEuUzEAAAAAAAAAGVEmrw90IRmQGAgAAAAAAwBI1eGdgVT2yqj5aVduq6vqqenNVHT90HAAAAAAAAKZUm9CD/Rq0M7CqvjfJ+5PckOQpSZ6X5PuSfLCqVg8ZCwAAAAAAANi3oecMfHGSy5P8UGttd5JU1ReS/GuSn0ny+oHjAQAAAAAAMG1k7U2NoYcJfUiSD+zpCEyS1toFSa5L8sMDxwIAAAAAAAD2YejMwJkkOxdZviPJdw4cCwAAAAAAgGkkM3BqDN0Z+KXMZQd+W1XdLcmdkuxabIWqOjPJmUmyYsPGgZsDAAAAAAAAt19DDxP6h0m+u6peVlV3rKp7JXlzktnR41Zaa2e11ja11jYtX79+4OYAAAAAAAAwUS1Jq8k82K9BOwNba29N8rIkv5bkqiSfT/LNJO9JcsWQsQAAAAAAAIB9GzozMK2130xyXJL7JblTa+3HktwjyUeHjgUAAAAAAADs3dBzBiZJWmtbknwuSarqcUnuleRnesQCAAAAAABgulQ73C1gj0E7A6vqgUken+RTo0UPT/LrSf5Xa+3jQ8YCAAAAAAAA9m3ozMCdSZ6Q5L8nWZ3kC0l+rrX2FwPHAQAAAAAAYFrJDJwag3YGttYuylw2IAAAAAAAAHCYLTvcDQAAAAAAAAD60BkIAAAAAAAAS9TQcwYCAAAAAABwO1fmDJwaOgM7WHXFyq71bz1lV9f6k+SIi/tuQ5LMrOoeInnATd1D7L7kyO4xepv9nhu7x1j14aO7x9j6sFu6x7jq2HXdY7Qjd3et/2F3v7Rr/Uny2SPu3D3G7s9u6B5jxx1museYObpvjLf85Q90rT9Jjr60//u068TqHmPzl4/rHuO7vuvLXev/Sv+3KXd6/Le6x/ji+ad0j5Gbl/ePcdK2rtVv/MjarvUnyQkfurp7jOtuvEP3GM+/6undY2z8TP9javuxfetfvq3/SWQSMR7+E5/sHuP9l9yre4xdW/p+J1t91I6u9SdJu/iI7jF2buz/C9Pai/qfb3du6Lsdy/r/jJBdG/vfE07i+n3dgyewHb3d0v83nd3r+3/2lm/tP6DZ7Or+2zGJ37+W9f0ZIbtO6n/NWPn11d1jrLqh/33I9i39f26v3X0/GzU7gS+Wy/QQwaTpDAQAAAAAAGBYbQKdy4zFnIEAAAAAAACwRMkMBAAAAAAAYDht9GAqyAwEAAAAAACAJUpmIAAAAAAAAMOSGTg1ZAYCAAAAAADAEiUzEAAAAAAAgEGVzMCpMVZmYFWdWFWvrarzq2prVbWqOnmRcmuq6pVVdUVVbRuV/77BWw0AAAAAAADs17jDhJ6W5GlJNif5yD7K/XmS5yb5rSRPTHJFkvdV1QMOoY0AAAAAAADclrQJPdivcYcJPa+1dnySVNVzkjxmYYGqun+SH0/y0621vxgtOzfJRUl+J8mTBmkxAAAAAAAAMJaxMgNba7NjFHtSkl1J/nbeeruT/E2Sx1bV6oNqIQAAAAAAABykqrprVb2tqm6sqpuq6h1VddKY6441RV5VLauqF1TVZVW1vao+W1VP2Uudz62qL1bVjqr6UlX93KFu476MO0zoOO6T5NLW2tYFyy9KsipzQ40CAAAAAACw1E3JMKFVtS7Jh5LcK8lPJfmJJPdIck5VrR9jS8adIu+lSV6S5HVJHp/kE0nOrqonLGjPc5O8McnbkzwuydlJXl9VPz9GWw7KuMOEjuOYzM0puND1816/lao6M8mZSbJiw8YBmwMAAAAAAMDt3HOTnJrknq21S5Kkqv4tycVJfjbJq/a24rhT5FXVHZM8P8nLW2t/MFr9nKo6LcnLk7xnVG5Fkt9N8ubW2gvnlbtzkpdW1Z+11nYNtuUjQ2YGHpTW2lmttU2ttU3L14/TAQsAAAAAAMC0qja5xxielOQTezoCk6S1dmmSjyV58hjrjjNF3mMzN0rmWxas/5Yk962qU0Z/PzTJHRYp9+YkxyZ5+FhbdICG7AzcnGSx1L49GYHXL/IaAAAAAAAA9HKfJBcusvyiJKePse44U+TdJ8mOJJcsUi7z4txn9LywPQvLDWrIzsCLkpwyGnt1vtOT7Myt3wAAAAAAAACWolaTeSTHVdUF8x5nLmjJvqa529/8deNOkXdMkhtaawtzFRcrl0Xq3OeUe4dqyM7Af0iyMsmP7FkwGvv0R5O8v7W2Y8BYAAAAAAAAcO2e6ehGj7MOd4OmzYpxC1bVU0f/fPDo+fFVdU2Sa1pr57bWPl1Vf5vkNVW1MsmlSX4+ySlJnjFkowEAAAAAAJhi483nNwn7muZusay/hevebS/rJv+e0bc5yYaqqgXZgYuVy6g9V+yj3KDG7gxMcvaCv18/ej43yRmjfz87ye8meVmSDUk+m+RxrbVPHXwTAQAAAAAA4KBclH+fq2++05N8fox1f7iq1i2YN3DhFHkXJVmd5O75j9Pm7ZkD8PPzymXUniv2UW5QYw8T2lqrvTzOmFdmW2vtV1trJ7TW1rTWvqe19uEeDQcAAAAAAGA6VZvMYwzvSvKQqjr1222rOjnJw0av7cu4U+S9N8mu3HqkzGcmubC1duno7/OTXLuXctcn+dhYW3SADiQzEAAAAAAAAG5L/jTJLyZ5Z1W9KHMDmL40ydeTvHFPoaq6W5KvJPmd1trvJMm4U+S11q6uqlcleUFV3ZzkU5nrMHxUkifNK7erqn4zyeur6ptJ/nlU5qeT/FJrbWePN0BnIAAAAAAAAMOakjkDW2tbqupRSV6d5M1JKskHk/xKa+2WeUUryfLcelTNcafIe2GSW5I8L8kJSb6U5GmttXcvaM8bqqol+bUkv57ka0l+sbX2+nSiMxAAAAAAAIAlq7X2tSRP2U+ZyzLXIbhw+bYkvzp67Gv9mcx1GL5sjPa8MfOyEnvTGQgAAAAAAMBwxp/PjwnQGdhBzdyq43hQq67qv9u2HzuBT+kEQuy+dl33GCu7R+hvx6VHdo9xhx/8VvcY28+/c/cYx17W/8A945c+2bX+v//Q93StP0nawkT6Ho6a7R6idvU9nyfJuq/3PYusv7L/+3TPX7+oe4yv/Y97dI/x8z/7/u4xXvneH+xa/8n/uKtr/Uly1V2O7R5j1/f13471F6/qHuOYj/aNceXTt3atP0nytP4xdp7d/1x7zKeXd4+xYgK745a79b0P2b2u/33O8h399/f7P/ig7jGOOP367jEefOolXes/57z7da0/SY78Zv/9PbO2e4isurH/Z2PZzr7v1bbjJ7ANW/t/CVi+o3uIzKzvH2P5lr7XpWP6355n9wQ+ezXTP8aOY/oft7uOvO3/xrb2i2v6BkjSJvAr9Y5j+u+LVVf335AV2/peM2ZWd60+SdKW9d0X1f+nELjN0RkIAAAAAADAsGQGTo1J5G4AAAAAAAAAh4HOQAAAAAAAAFiiDBMKAAAAAADAsAwTOjVkBgIAAAAAAMASJTMQAAAAAACAQZXMwKkhMxAAAAAAAACWqLE6A6vqxKp6bVWdX1Vbq6pV1cmLlPu9qnp/VV03KvOsoRsMAAAAAAAAjGfczMDTkjwtyeYkH9lHuV9KsjbJuw+xXQAAAAAAAMAhGnfOwPNaa8cnSVU9J8lj9lLu6NbabFWdluQnh2ggAAAAAAAAtzHmDJwaY2UGttZmhywHAAAAAAAA9DduZiAAAAAAAADsX0tKZuDUGHfOwG6q6syquqCqLpjZsuVwNwcAAAAAAACWjMPeGdhaO6u1tqm1tmn5+vWHuzkAAAAAAAAcqjahB/t12DsDAQAAAAAAgD7MGQgAAAAAAMCwZO1NDZmBAAAAAAAAsESNnRlYVU8d/fPBo+fHV9U1Sa5prZ07KvOIJHdIcsKozKaquiVJWmtvG6bJAAAAAAAATKtKUjIDp8aBDBN69oK/Xz96PjfJGaN//3aSR8wr819Hj2Ru3wMAAAAAAAATMnZnYGttv515rbUzDqk1AAAAAAAAwGAOJDMQAAAAAAAA9s8woVNj2eFuAAAAAAAAANCHzEAAAAAAAACG05KSGTg1ZAYCAAAAAADAEiUzsINdR852rf+Ir/fvw92yaVv3GDPbl3ePsfarq7rH2H3kEvjvDXfa0T3Ejjed0D3Gzsf0344tu1Z3j/H3H3xI1/o3fqFr9UmSHRure4zda7qHyLaTdvWP0fmjUTP9rxnnfPI+3WOs+d7+14w//osnd4+xuvNl6dGv/mjfAEnOfsOju8c47uMru8fY/Oj+9zo37F7btf47vq3/ufZbdz65e4wbH9D/XDsJtb3/+bat7HvfueKm/ufa3cfs7h5jEv/9uP7xmO4xPn7MsV3rP/o/Xdu1/iTZevNx3WMsm8Ahdd13T+K47Vv9iuv6//wze8RM9xi7jusfY/Wl/b9o7Dip73fXax/Z/zzYJvCbzvIj+n/2Zm7uf9+58vr+71Xr/D1jx5r+x9TRF3cPkRu/o/92LL/r1u4xtl25rmv9k7i29p5HrkmBmh5L4KfzpcLHAgAAAAAAAJYomYEAAAAAAAAMS2bg1JAZCAAAAAAAAEuUzEAAAAAAAAAGNYHpvBmTzEAAAAAAAABYomQGAgAAAAAAMCyZgVNDZiAAAAAAAAAsUWN1BlbViVX12qo6v6q2VlWrqpMXlNlUVWdV1RdHZb5WVW+tqlO6tBwAAAAAAIDp0yb4YL/GzQw8LcnTkmxO8pG9lHl6kvsk+aMkj0/yG0kelOSCqrrrIbYTAAAAAAAAOEDjzhl4Xmvt+CSpquckecwiZV7RWrtm/oKq+liSS5M8N8lvHUpDAQAAAAAAuG0oWXtTY6zMwNba7Bhlrllk2eVJrklylwNvGgAAAAAAAHAoxh0m9KBU1b2T3DHJF3rGAQAAAAAAAG5t3GFCD1hVrUjyhsxlBv75PsqdmeTMJFmxYWOv5gAAAAAAADAphgmdGj0zA1+X5D8leWZrbfPeCrXWzmqtbWqtbVq+fn3H5gAAAAAAAMDtS5fMwKp6eeay/X6qtfb+HjEAAAAAAACYTiUzcGoM3hlYVS9M8j+S/FJr7c1D1w8AAAAAAACMZ9DOwKr65SQvS/LC1trrhqwbAAAAAACA2wiZgVNj7M7Aqnrq6J8PHj0/vqquSXJNa+3cqnp6ktckeW+SD1XVQ+atflNr7fNDNBgAAAAAAAAYz4FkBp694O/Xj57PTXJGksclqdHz4xaU3VMGAAAAAACApaxFZuAUGbszsLVW+3n9WUmedYjtAQAAAAAAAAYy6JyBAAAAAAAA3L7V6MF0WHa4GwAAAAAAAAD0ITMQAAAAAACAYZkzcGrIDAQAAAAAAIAlSmZgB6s39+1jvem+O7vWnyTLrlrdPcaqrf1HDF53Vf//enDTkd1DdDd73aruMbYe3///Hqy5uP9xu+2uu7vHuNM5fd+rax7U/7N3xNe7h8iu9f1jLNu2vHuM5Z3Phff4wYu71p8kX/jQPbrHOPors91j7Di6/2djVefN+D+f+099AyRZe0T3EMmy/vti+aVruse45f7b+wZo/beh+n/0csTFK7vHWH9F/w254Tv6H7drr+4bY/sxXatPksyu7H9tbSv7fwdY1v8rWdZf0Xc7lv1N/x2+8oTuIdImMBHN8hsn8NPJsr77e3bVBD4XW/t/vldf3v+a0Sawu1d9o+938F0b+1/3VnX+7StJZm7pf0wtm0CaxCSOqd7XpUkcUzef2n9nLNvV/6Kx68p13WMs29W3/jaBz0XNmknu9qJkBk4NmYEAAAAAAACwRMkMBAAAAAAAYFgyA6eGzEAAAAAAAABYonQGAgAAAAAAwBJlmFAAAAAAAACGZZjQqSEzEAAAAAAAAJYomYEAAAAAAAAMpyUlM3BqjJUZWFUnVtVrq+r8qtpaVa2qTl5Q5m5V9c6quryqtlXVtVV1blU9oUvLAQAAAAAAgH0ad5jQ05I8LcnmJB/ZS5kjklyb5EVJnpDkZ5LcnOQfq+q/HGI7AQAAAAAAuK1oE3qwX+MOE3pea+34JKmq5yR5zMICrbWLMtcB+G1V9Y9JLk3y7CTvOLSmAgAAAAAAAAdirM7A1trswVTeWttdVTcm2X0w6wMAAAAAAHDbY87A6TFuZuDYqmpZ5oYfPS7JmUm+I8nzho4DAAAAAAAA7NvgnYFJ/leSXxv9+5YkT2+tfXBvhavqzMx1GmbFho0dmgMAAAAAAMBEyQycGss61PmaJN+V5AeT/FOSv6qqJ+6tcGvtrNbaptbapuXr13doDgAAAAAAANw+DZ4Z2Fr7RpJvjP58d1V9OMkfJHn30LEAAAAAAACYPuYMnB49MgMXuiDJaROIAwAAAAAAAMzTY87Ab6uqZUkenuQrPeMAAAAAAAAwJVrMGThFxu4MrKqnjv754NHz46vqmiTXtNbOraqXJDkmyceSXJnkhCQ/k+S7k/z4YC0GAAAAAAAAxnIgmYFnL/j79aPnc5OckeRTSX4lydOTHJ25DsHPJvne1trHDqmVAAAAAAAA3HbIDJwaY3cGttZqP6+/K8m7DrlFAAAAAAAAwCCWHe4GAAAAAAAAAH0cyDChAAAAAAAAsE+VpAwTOjVkBgIAAAAAAMASJTMQAAAAAACAYckMnBo6AztYeXPf+nesnOkbIMkRX1/VPcau9d1D5IZ79j/bLOu/O7pbeXP/JOHtx/TfF2uuq+4xdt7S/726elPf7ZhEev6WE/sHucOn+se4+cT++/vme+7qWv9F553Wtf4kOemcbd1j1M7Z7jGu/aF13WNsuM91Xetfd85xXetPkp3f0/lGJ8m2r/e/SVh5U//P9+qvrula/8332N21/iRZvrX/+1S7+1+/7/TR/uepLXfuf9zuPKpv/TWB+9pVE/jsbb9j/8/GzY+/pXuMdR86omv9s0f13xe713YPkTXX9b8nXHF5//PUzXfrHGDlBL4E9L9dm8j4VrMrJvA7Qudr35orl3etP0naBPbFshsn8B1/wwQO3En8EN75rVq2vf++aCf1v19b9bn+3/lqAofUrrvs7Fp/29r/HLJsuwELYdJ0BgIAAAAAADCoalIDp4UueAAAAAAAAFiiZAYCAAAAAAAwnBZzBk4RmYEAAAAAAACwRMkMBAAAAAAAYFAlM3BqyAwEAAAAAACAJUpmIAAAAAAAAMOSGTg1xsoMrKoTq+q1VXV+VW2tqlZVJ+9nnd8YlfvoIC0FAAAAAAAADsi4w4SeluRpSTYn+cj+ClfVqUlelOTqg28aAAAAAAAAt0XVJvNg/8btDDyvtXZ8a+0JSc4eo/yfJHlrki8cdMsAAAAAAACAQzJWZ2BrbXbcCqvqx5M8KMkLDrZRAAAAAAAA3Ia1CT06qaplVfWCqrqsqrZX1Wer6ikHsP4PVdWnR+teXlUvqqrli5R7eFV9vKq2VdWVVfWqqlq7oMxzq+o9VfXNqtpSVRdW1a9X1apx2jJuZuBYqmpjklcn+e+tteuHrBsAAAAAAAAm5KVJXpLkdUken+QTSc6uqifsb8WqemyStyf519G6f5i56fV+b0G5+yX5QOam3XviqMyzk7xpQZW/leTKJM8blfvbUfveOs6GrBin0AF4ZZIv59aN3KuqOjPJmUmyYsPGgZsDAAAAAAAA46uqOyZ5fpKXt9b+YLT4nKo6LcnLk7xnP1W8PMlHW2tnzlv3iCQvqqpXt9auHC3/7STfSPIjrbVdo9g7k/xlVb2itfapUbkHtdaumVf/OVVVSX67qk5trX11X40ZLDOwqr43yU8m+fnW2tiJma21s1prm1prm5avXz9UcwAAAAAAADgcWlITenTy2CSrkrxlwfK3JLlvVZ2ytxWr6q5JHrDIum9OsjJzmYKpqpVJHpfk7/Z0BI78XZKdSZ68Z8GCjsA9/nX0fJf9bMugw4S+McmfJ/lGVW2oqg2ZyzxcPvp79YCxAAAAAAAAoIf7JNmR5JIFyy8aPZ++n3WT5ML5C1trlybZOm/duydZs0i57Um+sp8YSfKIJLOZG7Fzn4YcJvTeo8fPLfLa5iT/LclrBowHAAAAAADANOqXtbfQcVV1wby/z2qtnXWIdR6T5IZFRsK8ft7r+1o3mesbW2jzvNf3Ve76fcUYzTX4vCT/p7V21T7akmTYzsBHLrLsNUmWJ/ml3Lr3FAAAAAAAAA7Fta21TfsqUFXfn+QDY9R1bmvtjEFa1UlV3SnJOzOXPfir46wzdmdgVT119M8Hj54fX1XXJLmmtXZua+3Di6xzQ5IVi70GAAAAAADA0lPpOp/fwfh45ka33J+to+fNSTZUVS3IDtyTrXd99m5Ppt/GRV7bOG/dfZU7Jv8+JOm3VdWxmevUrCSPba3dvI92fNuBZAaeveDv14+ez01yxgHUAwAAAAAAABPRWtua5IsHsMpFSVZnbl6/+SNf7pnH7/P7WTeZmzvw/D0Lq+rkJOvmrfuVzM1LeJ/5K1fVmiSnZkG/XFUdleR9SY5N8r2ttW+OuzHLxi3YWqu9PM7YxzpntNYePm4MAAAAAAAAloDWJvPo471JdiV5xoLlz0xyYWvt0r1vdvtaks/uZd1dSf5pVG7nKM7Tqmp+8t5TM9cR+a49C6pqXZJ/THJKkse01g5oar4h5wwEAAAAAACA27TW2tVV9aokL6iqm5N8KsmPJnlUkifNL1tVH0xyt9baafMW/88k766qNyb56yQPTPKiJH/YWrtyXrmXJPlEkr+rqj9OcnKSVyZ5W2vtk/PKvT3Jw5I8L8n6qnrIvNe+0lq7Zl/bozMQAAAAAACAQU3ZnIEH44VJbslcB9wJSb6U5GmttXcvKLc8C/rbWmvvqaqnJnlxkmcluSrJ7yX53QXlPlNVj0nyisxl/t2Y5P9mrjNxvseNnv9okXY+O8mb9rUhOgMBAAAAAABgntbaTJKXjR77KnfGXpa/I8k7xohzXpKH7qdM7a+efdEZCAAAAAAAwHDa6MFU0BnYwc6j+tZ/l7et7BsgyY6jJvEpPaSO7LGs2NY/xvY73PbPaBu/0H8bth6/rHuMnQ+7uXuMJ5zyxe4x3n3h/brWv/Ibq7rWnyQ7j9/dPcYVD+9/TK28uf9nY+03+p7T7/auzV3rT5Ktdz2ye4ya6b8vjrmoe4jc74xvdq3/Ds/sf476m89+V/cYy3dP4Pp98o7uMY7+9Oqu9a++vv+t/K4juofI9uNmu8f46i/1P6ZWf657iNSmG7vWv31L/3uEZd9a0z3G6muXd4+x9vP9r33Ld/S99m25a/9r664N/T/f2+7aP0atnekeY9WlfT8bdWj/kXwsK27pHiK33HNX9xgnvav/e3XN/ftew2f7/2yUlVv6x9jd/5KRNddO4LeKSfzG1vlUOLO+/7l25eVru8fYdmL/8/mKm/ofU0d8ru/3jEl89mZX9/1cVP9DFm5zdAYCAAAAAAAwKB2z06P/f1UAAAAAAAAADguZgQAAAAAAAAzrtj/D1pIhMxAAAAAAAACWKJ2BAAAAAAAAsEQZJhQAAAAAAIBBlWFCp8ZYmYFVdWJVvbaqzq+qrVXVqurkRcq1vTweMHTDAQAAAAAAgH0bNzPwtCRPS/LJJB9J8ph9lH1TkjcuWPblA24ZAAAAAAAAtz0tSZMaOC3G7Qw8r7V2fJJU1XOy787Ab7bWPnHILQMAAAAAAAAOyVidga212d4NAQAAAAAAYGkwZ+D0GGvOwAP081W1YzS34Ieq6ns7xAAAAAAAAAD2Y+jOwLck+YUk35/kzCTHJvlQVZ2xtxWq6syquqCqLpjZsmXg5gAAAAAAADBxbUIP9mvcOQPH0lr7iXl/fqSq3pnkwiQvS/LwvaxzVpKzkmTNiXe12wAAAAAAAGAgg3YGLtRau7mq/jHJz/SMAwAAAAAAwHSomDNwmvSYM3AxdjkAAAAAAABMWNfMwKo6KskTk/xLzzgAAAAAAABMidbmHkyFsTsDq+qpo38+ePT8+Kq6Jsk1rbVzq+r5Se6Z5Jwk30pytyTPT3JCkmcM12QAAAAAAABgHAeSGXj2gr9fP3o+N8kZSb6U5IdHj6OT3JTkY0l+prUmMxAAAAAAAOB2wpyB02PszsDWWu3n9X9I8g+H3CIAAAAAAABgEF3nDAQAAAAAAOB2SGbg1Fh2uBsAAAAAAAAA9KEzEAAAAAAAAJYow4QCAAAAAAAwqDJM6NSQGQgAAAAAAABLlMzADlZv7lv/Nffvv9t2bpjtHmPFlu4hsmvjBLbjltt+n/qu9dU9Rnv4Dd1jfN9dLu0e4wPv/q7uMdZv7Vv/tjv0/y85x328/3nqugf0346a6R4iy7f3rf/yJ23sGyDJ53/+9d1j3PusX+ge44iv9T+mPvmm+3Wtvz2u801IkpXfWNU9xiQsX9X/HmHXEX3r33Z8/2242+lXdI9x2cXHd4+x7Oo13WNsu3fnE3qSdvX6rvUv29b/vnZ2Vf9z7RFf678dN2za0T1GdvbdjiO/vLJr/UnSTu3/uZiEmSvXdo+x+4jO5/QJ/K/8ZTv6f/ZqZf9r37ce1v9eZ821fetvE/iZYmYCt4Rtef8YO47qf0wt39b/N5eZzrc66y/r/x1/xzETOFHt7r8vJnG+ne28O3av778RrfM2TOI8yBhaklmpgdPCxwIAAAAAAACWKJmBAAAAAAAADEti4NSQGQgAAAAAAABLlMxAAAAAAAAABlUyA6eGzEAAAAAAAABYomQGAgAAAAAAMKwmNXBajJUZWFUnVtVrq+r8qtpaVa2qTt5L2XtX1dlVdW1VbauqL1XV8wZtNQAAAAAAALBf42YGnpbkaUk+meQjSR6zWKGq2pTkQ0k+nOQ5SW5Mco8kRxxqQwEAAAAAALhtMGfg9Bi3M/C81trxSVJVz8kinYFVtSzJ/03ywdbaD8976ZxDbiUAAAAAAABwwMbqDGytzY5R7Iwk907ys4fSIAAAAAAAAG7D2ujBVBhrzsAxPXz0vKaqPlFVu6rq6qr6o6paO2AcAAAAAAAAYAzjDhM6jjuPnv82yeuS/EaSTUl+J8ldk/zwYitV1ZlJzkySFRs2DtgcAAAAAAAAJq2SVJMaOC2G7Azck2X4ltbab43+/eGqWp7k5VV179baFxau1Fo7K8lZSbLmxLs6MgAAAAAAAGAgQw4Tet3o+QMLlr9/9PzAAWMBAAAAAAAA+zFkZuBF+3l9dsBYAAAAAAAATCu9QlNjyMzAf0qyI8ljFyx/3Oj5ggFjAQAAAAAAAPsxdmZgVT119M8Hj54fX1XXJLmmtXZua+26qvr9JL9ZVTcl+VCSTUl+K8lfttYuGbLhAAAAAAAATKdq7XA3gZEDGSb07AV/v370fG6SM0b//p0kNyf5hSTPT3JFklcmeenBNxEAAAAAAAA4GGN3BrbWaowyLcmrRg8AAAAAAABub9rowVQYcs5AAAAAAAAAYIocyDChAAAAAAAAsB8tMWfg1JAZCAAAAAAAAEuUzEAAAAAAAAAGVRIDp4bOwA7u9fQvdq3/i39zr671J8mO4/p/SnedtLN7jLZjefcYSyHB9uZHbuke425H3dw9xkf/vwd2j7HjXtu7x1j16TVd6589fkfX+pPkujtU9xhrLl7dPcbs6v7nwpnvualr/RuP2Nq1/iS5+4ee3T1GHTnbPca1m/rHOOUdM13r/+aJG7vWnyQ7T9jVPUZm+p9Dsr3/PcK2e/a9ZrRd/e9Bdr3xhO4x7ri2/3as+8lvdY9x1Xl36R5jdmXf69KO4/qeo5Jk9bUT+Owd3//6vfaS/vchM2v6bsfsBH4N2LW5//u0fGv/c0ibwD1h72vf8q39r63b79j/XmrZtSu7x9h9dP9z4bIr+34A13+z/zG77fj+x9Ty/l+/05b3345lu/vHqG199/nWO/X/fGdF/+N23df734fU7u4huv8UuWJb/2N299q++1sHFNyazkAAAAAAAACGZc7AqXHbT2kCAAAAAAAAFiUzEAAAAAAAgOG0pCYwyjDjkRkIAAAAAAAAS5TMQAAAAAAAAIZlzsCpITMQAAAAAAAAliiZgQAAAAAAAAxLYuDUGCszsKpOrKrXVtX5VbW1qlpVnbygzEtGyxd7bO/SegAAAAAAAGCvxs0MPC3J05J8MslHkjxmkTJ/luS9C5atHy1718E2EAAAAAAAADg443YGntdaOz5Jquo5WaQzsLX2jSTfmL+sqn5iFOMvD7GdAAAAAAAA3EZUM07otBhrmNDW2uxB1v9TSa5K8r6DXB8AAAAAAAA4SONmBh6wqrprkkcmeU1rbXevOAAAAAAAAEwZmYFTY6zMwIP0zFH9+xwitKrOrKoLquqCmS1bOjYHAAAAAAAAbl+6ZQYm+ckkn26t/du+CrXWzkpyVpKsOfGuuokBAAAAAABuy1qSg52AjsF1yQysqu9Ocq/sJysQAAAAAAAA6KdXZuBPJdmV5K861Q8AAAAAAMAUqrSUOQOnxuCZgVW1KsnTk/xTa+2aoesHAAAAAAAAxjN2ZmBVPXX0zwePnh9fVdckuaa1du68ok9MckwMEQoAAAAAAHD7JDNwahzIMKFnL/j79aPnc5OcMW/5TyW5Psm7D75ZAAAAAAAAwKEauzOwtVZjlnvywTcHAAAAAACA2zyZgVNj8DkDAQAAAAAAgOlwIMOEAgAAAAAAwL61JLOHuxHsITMQAAAAAAAAliiZgQAAAAAAAAyqzBk4NXQGdvCVP79n1/p33rlr9UmSNVct7x6jrugfYxJ2HX3bP6Gt+dcjuse4/A7ru8c4YnP/fXHEP6/uHqOefnXX+nd/5I5d60+S7XfoPwbAtlN2do+xfPPK7jE2vPfIrvV/67vXdq0/SR7/wM91j3HOxQ/qHuN+3/WV7jE+M3tq1/rXfqu61p8kO/uHSO3uP3jFkV/sfx8y8703dq1/yw39P99HXHpL9xiX/+eju8e4/pP9b6B332V39xi1ZqZr/Wsu63+fs2Nj/3uEE+91VfcY3/rc8d1jLNvd94S79S59j6ckWXbUru4x1lze/1y4c0P/7xmt8/V1tv9tbWaP6H9MZVn/fbHm8lXdY9x8St9zYR2zo2v9SZJr+18zVm1eGgOa7V7b/7idXdU3xurr+++LNoHdvf3Y/vtiWf9LX5Zv73zRWBofPWABH20AAAAAAABYomQGAgAAAAAAMCzDhE4NmYEAAAAAAACwRMkMBAAAAAAAYEBNZuAUkRkIAAAAAAAAS5TMQAAAAAAAAIbTIjNwisgMBAAAAAAAgCVqrM7Aqjqxql5bVedX1daqalV18iLlTqqqv6yqr1XVtqr6clW9rKrWD95yAAAAAAAAptPshB7s17jDhJ6W5GlJPpnkI0kes7DAqMPvn5OsTPKbSb6W5LuS/HaSeyT50QHaCwAAAAAAAIxp3M7A81prxydJVT0ni3QGJnlY5jr9Httae/9o2TlVdUyS51fVutba1kNuMQAAAAAAAFOtzBk4NcYaJrS1Nk6i5arR800Llt8wilPjNwsAAAAAAAAOj6paVlUvqKrLqmp7VX22qp5yAOv/UFV9erTu5VX1oqpavki5h1fVx0fT711ZVa+qqrX7qHdlVX1uNKXfc8Zpy1idgWP65yQXJ3lFVZ1eVUdU1aOSPC/JG1prWwaMBQAAAAAAwLRqbTKPfl6a5CVJXpfk8Uk+keTsqnrC/lasqscmeXuSfx2t+4dJXpTk9xaUu1+SDyS5OskTR2WeneRN+6j++UmOO5ANGXeY0P1qrW2vqodnbuMumvfSnyX5xb2tV1VnJjkzSVZs2DhUcwAAAAAAAOCAVdUdM9fp9vLW2h+MFp9TVacleXmS9+ynipcn+Whr7cx56x6R5EVV9erW2pWj5b+d5BtJfqS1tmsUe2eSv6yqV7TWPrWgXadmrsPwuUneOu72DJYZWFVrkvxtkjsm+Ykkj0jy60l+NMkf72291tpZrbVNrbVNy9evH6o5AAAAAAAAHA4tyWybzKOPx2Zuery3LFj+liT3rapT9rZiVd01yQMWWffNSVZmLlMwVbUyyeOS/N2ejsCRv0uyM8mTF6n+T5L8TZKPj7shyYCZgUl+JskZSU5rrX1ltOy8qroxyVlV9YbW2mcHjAcAAAAAAABDu0+SHUkuWbB8z8iYpye5dB/rJsmF8xe21i6tqq2jdZPk7knWLFJue1V9ZV65JElVPSPJpiTPSHLE2FuSYecMvG+SzfM6Avf4l9HzvQeMBQAAAAAAwFSa0HyBc3MGHldVF8x7nLm/1o3hmCQ3tHarSQmvn/f6vtZNks2LvLZ53uv7Knf9/BhVtTHJq5L8j9batfuIvaghMwOvTLKxqk5rrc3vKf2e0fM3B4wFAAAAAAAA17bWNu2rQFV9f5IPjFHXua21MwZp1bBemeQrSf78YFYeuzOwqp46+ueDR8+Pr6prklzTWjs3yZuS/GqS91TV7yb5WubSFX8zySeTfOxgGggAAAAAAACH4OMZbwTLraPnzUk2VFUtyA7ck613ffZuT6bfxkVe2zhv3X2VOyajIUmr6nuSPDvJo5IcXVVJctSo3Nqq2pDkxkWyGL/tQDIDz17w9+tHz+cmOaO1dllVPSTJS5K8LMlxSb6e5Kwkv9tamz2AWAAAAAAAANxW7b1vauJaa1uTfPEAVrkoyerMzes3fzTMPfP4fX4/6yZzcweev2dhVZ2cZN28db+SuXkJ7zN/5apak+TU/Hu/3L0zN+3fhxeJ9Uejx8YkN+ytQWN3BrbWaowyn0/ytHHrBAAAAAAAgCnz3iS7kjwjyW/PW/7MJBe21i7d24qtta9V1WdH6/7ZgnV3JfmnUbmdVfXeJE+rqpe01naPyj01cx2R75rXlkcuCHNCkr9O8gdJ/jHJLfvamCHnDAQAAAAAAICpygw8UK21q6vqVUleUFU3J/lUkh/N3FCdT5pftqo+mORurbXT5i3+n0neXVVvzFyn3QOTvCjJH7bWrpxX7iVJPpHk76rqj5OcnLn5Ad/WWvvkqC1XJpm/zp4swyT5Umvtw/vbHp2BAAAAAAAA8B+9MHMZd8/LXCbel5I8rbX27gXllmdBf1tr7T1V9dQkL07yrCRXJfm9JL+7oNxnquoxSV6RuQy/G5P838x1Jg5GZyAAAAAAAADDaUlmb7uZgUnSWptJ8rLRY1/lztjL8nckeccYcc5L8tADbNtlSfY7vd8eyw6kcgAAAAAAAOC2Q2ZgBzc9dkvX+ttX1netP0l2HzHbPUaO3tU9xPqL1nSPsRTsnsDbtPaqsf+TwkHb/uibuse4+ZIju8eY+cqxXeu/64W791/oEH39R/rHyK7+/59l5qj+23HDPfteitd+bWXX+pPkvTvu3z3Gyefv7B7j4q336B4j37m9a/Xrv9n/c7Hxy8u7x7ju9P4xbjmp/73OER85umv9y79ra9f6k+Trgw5Ksrhj/rb/vlh100z3GJvv0f98e8vJfT/js/fe53zzg2ib+994XvHpE7rHOP2hl3aP8bW3n9q1/jv/8+au9SfJV368731tksz2/+hl9eb+32W2Hd/3XDg7gV9/alv/+5C2qn92wYr+l9fc5by+97Zf/en+x+zKrf1jtP63hJlZM4GMlf5vVXcza/u/T7uP7H9PeNQX+x9UW+88gWOq8+m2+v9km6yaQAymQEvaBPoZGIvMQAAAAAAAAFiiZAYCAAAAAAAwrHbbnjNwKZEZCAAAAAAAAEuUzEAAAAAAAACG05LMygycFjIDAQAAAAAAYImSGQgAAAAAAMCwzBk4NWQGAgAAAAAAwBI1VmdgVZ1YVa+tqvOramtVtao6eZFyp1TV26rqhqraUlXnVNWmwVsNAAAAAADA9GptMg/2a9zMwNOSPC3J5iQfWaxAVR2b5KNJvjPJzyZ5+uilc6rq3ofYTgAAAAAAAOAAjTtn4HmtteOTpKqek+Qxi5T5+STHJ/m+1tpXRmU/lOSrSX47c52JAAAAAAAAwISM1RnYWpsdo9hDkly8pyNwtN6WqvpIkidW1YrW2u6DbCcAAAAAAAC3CYbwnCbjDhM6jpkkOxdZviPJ2iR3HzAWAAAAAAAAsB/jDhM6ji8l+YGqOra1dl2SVNWyJN89ev2YxVaqqjOTnJkkKzZsHLA5AAAAAAAATFxLMjvOoJNMwpCZgW8Y1fd/q+ruVXWnJH+U5JTR64vu9dbaWa21Ta21TcvXrx+wOQAAAAAAAHD7NlhnYGvtq0mekeTBSS5J8q0kD03y6lGRK4aKBQAAAAAAwBRrbTIP9mvIzMC01t6e5C5JTk9yWmvtwUmOSPL11trXhowFAAAAAAAA7NuQcwYmSVprM0m+kCRVdeckP5rklUPHAQAAAAAAYErJ2psaY3cGVtVTR/988Oj58VV1TZJrWmvnVtXKJP8ryblJbkpynyQvSHJRkv89XJMBAAAAAACAcRxIZuDZC/5+/ej53CRnJGlJ7pHkx5NsSPKNJP8nye+11nYeUisBAAAAAAC4jWjJrMzAaTF2Z2Brrfbz+u4kTzzkFgEAAAAAAACDGHzOQAAAAAAAAG7HWtLa7OFuBSPLDncDAAAAAAAAgD5kBgIAAAAAADAscwZODZmBAAAAAAAAsERVa9PTM1tV1yS5/ABWOS7JtZ2aI8Z01S/GdMVYCtsgxvTUL8Z0xVgK2yDG9NQvxnTFWArbIMb01C/GdMVYCtsgxvTUL8Z0xVgK2yDG9NQvxnTFWArbMK0x7tZau0OvxjCeo1fcoT30yCdPJNb7bvjzT7bWNk0k2G3UVA0TeqAf0Kq6oPcOFmM66hdjumIshW0QY3rqF2O6YiyFbRBjeuoXY7piLIVtEGN66hdjumIshW0QY3rqF2O6YiyFbRBjeuoXY7piLIVtWEoxYKkzTCgAAAAAAAAsUVOVGQgAAAAAAMBtXGvJ7OzhbgUjt/XMwLPEmJoYS2EbxJie+sWYrhhLYRvEmJ76xZiuGEthG8SYnvrFmK4YS2EbxJie+sWYrhhLYRvEmJ76xZiuGEthG8SYnvrFAL6tWmuHuw0AAAAAAAAsEUcvP649dP0PTiTW+25+0yfNK7lvt/XMQAAAAAAAAGAvzBkIAAAAAADAoJo5A6fGbS4zsKruWlVvq6obq+qmqnpHVZ00cIwTq+q1VXV+VW2tqlZVJw9Y/1Or6u1VdXlVbauqL1XV71fVkQPGeGxVfaiqrqyqHVX1jar6u6o6fagYi8R87+i9etlA9Z0xqm/h44Yh6l8Q6wlVdV5V3TI6ri6oqkcNVPeH97IdrareO0SMUZyHVdX7q+rqqrq5qj5VVT89YP2PrKqPjo7Z66vqzVV1/CHUN9bnrKrWVNUrq+qKUezzq+r7Bqz/90bv23WjMs8achuqalNVnVVVXxyV+VpVvbWqThkwxt2q6p3zzinXVtW5VfWEoWIsss5vjMp9dMgY+/isPGCobaiqe1fV2aP3ac85+HlDbENVvWQf27B9iBijcidV1V+OjqdtVfXlqnpZVa0fMMYpNXe9vaGqtlTVOVW13+EWasxrXFVtrKo/G+2HLVX1z1V13/3VP26Mqjqyqv6g5s7BN42284xx6j+AGI+uqrdU1VdGZb5SVX9SVXccMMaDa+76+s2q2l5z1/X3VNVDh6h/kXXeMHqv3jLgNpy8j8/FhiG3o6oeMnq/9hy3n6uqpw+0HW/ax3Z8cYhtqKr71Ny97bdG7b+oqp5fVfv9D3wHEOMBo/dozz3Pu6rqtP3VP1p3rPvLOoT79XFi1CHeq48Z45Du18eMcdD36wezbh3gvfqY23BI9+sHsh11kPfrY27HId2v///tnXvcbmOZx78XexOxHRunsIkKEdk5TAwqJMZMR6diMpp0GJWSTJRBJ52NCZX56DOolEMUtXOqUQ5thrI1tLHZu6IdNiKnXPPHtV4eT8+z1nUfnmd7d9f383k+7/uudz3X77nXWte6fs9a97rvhNzI8usJ8XeSun594DEjBTW8K74U1m+nRlH9dmpk12+vxoB1kmq4sx1FNTylHZJZwx1tyK7fKW2QghqeoJFVw8V5ri7JbY9GaX47NUr9uUejxJ8n183U3Ha2odSfu9uRm9vOdhTld0JuZOV3Qvxsf94To9UnSYXa3aYhlep3h0bV+h0Ef41MqicDRWRZ4FLgUeAAQIHjgMtEZDNVfaiS1AbAm4Frgf8BdqkUd4IPAncC/wbMB7YAjgZ2EpG/VdUat8tXxj7/l4EFwDrAh4GrRGRTVb2jgsZTiMg+wEtrxuzhEODnPX8/UTO4iLwDOLF5HYvdJN8cWLaSxLuAaX3LtgU+D5xfQ0BENgMuBq4C3g48DLwROFVEllbVkwrjbw/MBH4IvAFYBcu9S0RkS1V9NCOsN89OBXYHDgNuA94N/FBEtlXV6yvE/1fgeuB7wP5JLfBp7A1sApwAzAbWAo4CZonI5qo6r4LGcsAfgCOxc8o07Dj4voi8QVXPqaDxFCKyfqP1+464uRqnAaf0LbulRnyxm1mXApcDBwH3Axti27ALj8bXgP6Lhs9tlnnyvVND7IbfxcBU7Fi6E3g58O9YW/aqoLEKcAXwIPAO7JxyKFZvt1LVX7XE76xxIiLABcB0LAfvA45o4m+uqvM72uCpo6sABwLXAT8CXt8RM0fjYOzYOQ47P22I7YddG1/yxwoaKwJzsLz4HfA3wPuBH4vIdqp6TWH8pxCRVwBvAR7o+Ny5Gp/kL/PgwVoaIrI7cC5wJrAv8BiwMfCcShrHAif3vW868I0B7UqOLyJrYuem3wDvw87rrwKOB54HHF5BY0Ms728E9sO+C3wM+EmTe13n9U5/WcGvezxsqVf3aJT6dY9GiV9Pem+mV0/RyPXrLo1Cv+7RKPXrntwo8eue+FX9+rBjpkINb41Pef32aJTWb4/GiuTXb69G7zo5NTxFI7eGuzQKa3hX/JL67dKoUMM9GqU1HFrO1bVyu02DevndplErv9s0VqQ8v111szC3PRqlud2qUSO3OzRq5XdbbtTI77b4xbnd5ZNq5LfDixXnt0OjWv0OxomC6qL+EMEEqjppXsB7gT8DG/QsWw87iR5aUWeJnt8Pwi5iTK8Y/3kDlu3f6LxyhNvvRY3GByrHXQm4C9iniX9cpbg7NvFePcJtMh34E/C+UWkM0T0Vu0i2cqV4n8BM1XJ9y68ErqwQ/2LM7E7pWTaj2T/vyozZmWfYlyAF3tazbApwM3B+afze9bALiwr8U+U2DMr3dYEngWNqaAx53xRgHnBBbQ3sItMpmCG+ota2av6XdQ5x7oslgJuAc0d1zA5531ubdXev1I5dmuW79C3/FFYPl62gcWQT6wU9y54L3A2c1RG/s8YB/9D8vVPPOisA9wInOLaTR0N6/vfq5n87Juxvj8agdf6uWefAGhpD3rc8VkP+o1Z87ObyjdiXwrnA6RW30/Tm74O82z9DY3msg8IXR6Ux5H1HNetsUqEN/9L8/cK+9b4J/K7SdvoasBBYsWed5wOPAMdnbrtn+EtG4NcHaFT36gM0qvv1fo3cdVLfS0WvPmA77Uhlvz5AYzqV/bpzXxT59QHtqOrXB8Sv5tfbjhkKa7gjflH9dmoU1W+PxpD1XfU7VYPMGu7cVtMpqOFOjaIanrkvXPU7oQ1FNdypkV3DcZyrS3PbqVHqzz0apf68U2PI+7z+3B0/N7ed26kot50apf48d1+489vZjuz8dsYv8uc4fFKF/PZolOa3R6NK/Y7XeF/TllhZd1n2rWN5AbMWdXuf7a/JNkzonsBVqjpnYoGq3g78FDuxVUHrPJnXFn/BgMUTPUTWGqH0Pc3Pqk/WAZ8GblTVb1SOOw4OxG7K9PckGhlNj/k3YTdp7q0Udingcaxw9nI/dYYD3gb4kao+deyo6izsmHpdTkBnnu2JtetbPe97AjNdu4rI0oXxi/Ld895B+a7WA30BjnzP/XzNdrofR76naIjIvsDLsC8kKZ9n1OdVT/wdgY2wXv6j0hjEAdhNtB9W0liq+dnfO3Qhlu9SQWMb4NeqemvP+x7CeizuIS3DoThr3J7Ab1X1sp733Y/1Vuys5x4NVdWuOBU0iup5wfsfwi42tOZ3YvzDgCWBz7bFLNTIwqnxJqxn7udGqDGI/YFrVXV2hfhdud2KU2Mb7KbDwp73zccuNGXVc/7SX47Crz9DY0Q1pV9jFMe2x4uX+PVh763p1Uf1faJNYxR+vbUdlfx6v0Ztv94fv6Zfbztmimp4V/zS+u3UqJXfqbnlqt8ZGlk1PFGjlDaNohruiD8IV/1O0Ciq4U6NUdTwXmrkdisV87tNY1Fdb8vJ7y5Kc3tRUyO3c8jJ7zZq5fcwSnPb45NK87tTo0J+ezQWVX4HJSjwpI7nFXQy2W4GboKdDPuZjT1mPpnZofnZNvRaMiKypIgs1Tx2fgrWy6yayReR7bBC++5aMQdwhoj8WWxOtzOl7hyR2wH/B+wtNtb0EyIyR0RG2Z7XYT2kvl4x5mnNzxNEZE0RWVFE3o4NXfCFCvH/jPVk7udR4CUV4g9jE+B2VX24b/lszJAljaH+bEFENsKGEqmd70uIyBQRWV1EPgq8EBteoVb8lbDj6UMVb2QP4p1i8+E8LDY/zvaV4m7X/HyOiFwlIo+LzdlzgogsU0njGYjI2sBOwBm9F+cKuRj4NfBpEdlYRJYTG0P/vcDJWmfI7LacXwZ4QWK8/hrXVs/XERHPsK1dGqPAo1H6OQa+v8nvqU0NnMjrr9aILzYfxZHYkyOPZ8Ts1Gj4ZFNn7xebByN5fqkWje2wnq2bis1D8oSIzBORj4nIkpU0nkEzbNMG5Nfz/vjfxoYdOlFszs5pIvI67Oni3Iso/Rptuf0CEXEN2dThL6v49VF72EyN5Pz2aJS0teu9Nby68/MV+fUOjSp+PXE7Z/n1Do3Tmp/Zfr0jfhW/7jhmimr4OL4/Zmok5bdXo6R+ezRKa3jCtsqu4Q6Nohqeur9z6rdDo7iGOzRq1PC2c3Utfz7K6ze5Gjn+vFOj0J+3xq/kzz3bqdSft2nU8ufu/Z2T3w6NGh69LX5pbnt8Uml+j+Paaa7GOK4DBMFiw6SaMxCbK+G+AcvvxYZTmJSIyFrAMcDFTe/NmlwNbNn8Pgcb1ihlnq+hiMhS2JfQz6rqzTVi9nE/Vlh/jPXA2QKbt+VKEdmiUjvWbF6faWLfivVeOlFEpqjqlypo9LM/NlTCRbUCquqNYhPznovNeQLW8/hgVf1mBYmbsd5KTyEi6wJrNDqjoi3nJ/4/qRB7oupk7MnAUyuHPx74QPP7H4G9VfWSivE/g83dd1rFmP2cjs3f+FtsONXDgEtFZGdVvbww9prNz29hX9Y+jA2fdQywNnV61PbzFqzjTbWb/6r6SHOx4GzMvE/wNeA9lWRuBnYWkVVU9R6wL7vAVs3/3bk3pMatjA11089Ebq+EHcMlGlXxaIjI8sAXsS8i51XWOAubAwqshrxWVW+qFP8k4JzenqK5DNF4FPMLM7Fz34uxmvsz6Z6D0quxJjaXxJnY/BLXYsPTHIXN6/L+Chr97I/VwOQbVIPiq+rdIrIt8F1sDgywfpRHq+rxNTSw3P5bEZk6cWGpOW43wZ4qXgmb+6aLNn9Zy6+PzMPmaBScZzwaJW0d+t6KXr3t89Xy620atfx6ynbO9etDNSr59bY2FPt15zGTXcPH8P0xSyO1fidqZNXvBI3sGu7UKKrhTo3sGp55TCXVb49GaQ13tqOkhnvO1aX+fBzXb5I1Mvx5ikZOfnvjl/hzj0apP/dolPrznGMq1Z93ahTmt6cNpf7c45NK83sc106TNUq/fwdjZLSDhQUp6LNgrFLvC+sp8akBy48DnhiRZvU5A/viLwfMwi58P38E8TcCtsbGnL8WmF+rLVgvpduAZXqWKZXmDByi+TJs2IVa8xLe0nzm1/ctvwjraSs1dHriron1+vl85bgbAndiwxDugfUwPgEzQftViL/fxL7Fnmh7MfCTZl/8qUL8YfOWzcSGGutff2L88e1L4vetkzxnYKpGs97JzX7ZpbYGNq78jOYYOAsbY36PSvti++Yc/JKeZZfjnDMwZ1s16y4P3JGi09KGrzTLT+hbfnizfKMR7O9fAdfVPKawydYvw740vAUbI/+D2JeLkypprN8cP9/HngJcA7uB+kSz/tbO+ANrHHbu/WbL51k7oQ2ddZSCOYcSNKZgN7IfBDarrdHsk5djE7Ffgg1LM6M0fnMMLQRW61k2l8T5hrzbqWfdtZtjNnVeo2HtmNns40P71j8JO3etUHl/Pwe74XVOre2EDaP0C+Aa7MLSjtjNp8eAwytpbNdsp1OxoXTWBb7Tk9urOeMP9ZdU8uttGn3rZXv1BI1sv+7R8H6OjH1Rxaunfj4y/HpHO6r49YT9ne3XO9pR7Nc74hf7dc8xQ0ENTz0myZtzKFUjuX6naJBZv537oqiGp26rnnXcNdzZjuwanrG/k+u3sw1FNdypUaWG98R7xrmaiv58mEbf/4r8uVOjyJ93aeTmt2NfVPPnnu3Us06WP29pRzV/7tzf2f68ox3VPPqQ+EW5jcMnlea3R6NveU79TtWokt/xGv1rmqysuyy971hexJyBna/JNkzofQzuUTysB/KzGrFh6S7ADMSuamNCV0VVf6WqV6uNOf8q7GLGh0vjNo+0fwTr0bO02DA3Kzb/nvg7d1iuoajqdViBeHmlkBNzbfyob/lMYDXsAnhNqj8l1PAJ7ELCHqr6PVW9RFUPwW4Ifal5oicbVT0Du7DwAWzus5uA3wAX4nuCIJe2nIenezFNCkTkU9jk0weq6sza8VV1vqrOao6BNwNXUW9+gVMwczq/J9+nAEs2fw+dv7EEVX0QuyFVI+fb8h2sl141RGQr7EJc7Xz/Z+wLyGtV9XRV/YmqfhbLz4NF5KWlAqp6G3ZRcUvsyYPfAtvy9DBmnXnfUeO6cttV08dRRz0azTn269iXnn9U1V/U1lDV21T156p6DrAb1vv4uJL4zXAwn8fmp3m0J7eXAKY2f0+t1Ya+9swDriAhtzs02vJ7KtaztlSjlz2xHs2pQwi2xf8QML1ZfraqXq6qH8V6xx4rIquWaqjqFdjwY2/EbiLMBVZo2vEYzrra4S+r+PVRedhUjdLzjEejpK3D3lvTq6d+vhy/3qFRxa8ntCPbr3doFPv1tvilfj3hmMmq4eP4/piqkVO/UzVy6rdTYwUKanjJ/vDW8ASNrBqe2Yak+p2gkV3DvRq1avgEA87VVfx5h0Z1hmmU+nOPRok/Hxa/pj/3tKFvnWR/3qFRxZ93aPSS5c8dGlU8+rD4FXLb45NK83sc107dGjXzOxg9CuiTOpZX0M1kuxk4m8HFYmPsy86koSne38Ge4nmtqv5y1Jpqk9HOoc48a+tjvW5Ox4rGxAvsCZX7gJK5gLqoleFdEwrXfo75AOAGVb2hctxNm7j9QwBdA6yC9Q4uQlWPAlYFNgPWUNV9sB7OV5TGbmE2sJ6ILNu3fGPMFM0ZoXZVROQj2BNoh6jqf49Jdhb15lXcCDiYZ+b7K7DhqO4D3llJZxg1cn5R5Pvj2LAoNdkUuE9Vb+1bfk3zc6MaIqp6NtYzcWNgA1XdErvoOE9V72x7r6PGtdXzO1W1c4jQcdTRBI2Tgb3IGJo3px2q+hjWO7Uzvzvir4r1dP0Ez8zttYE3N7/vPoo29DbHs5LzmGqjM78T23EANnfIhV1xE+JvCsxR1f4v49dgF0xK9zcAqvplzBe8BFhHVXfGnoS6eoCP6GSAv6zu1yt7WLdG7fOMpx0lbe1770i8euLny6rdQ46pNpLrd0c7qvj1ARpV/fqgNhT6de8xk1vDx/H9MVUjp35ntyOhfns01qOshtfYH105nnJMtTEsx3PakFq/vRolNdzdjto1vGFiPxb7c4fGKOnXyPbnCRpP/yPBn3fEr+LPOzRqrON5/yi/fw/6jMn+3KlR7NE74pfmtmc7l+b3OK6lpGiMIr+D4K+CyXYz8HxgGxFZf2KBiEzHLkifv6g+VCpND4YzgFdiPRiuGpPuathTKv0XkHO4HthpwAvMxO7ECG7UiMgM4EU8fdG7lHObn7v2LX8NMF9V76qkM/HZN6b+U0Jgj8xvLjbXQC9bY0P9VXmCTlUfUtVfqo2Z/hrseDq5RuwhXICZqzdNLBCbc28vYKaqPjpC7WqIyCFYD8GPqOqJY9JcAhtuoka+w+B8vwGbhHon7GJpdURkGjaUVo2cvwibG2FQvoPdPK1Ck4t7Axep6oJacRvuAlYSm1S+l62bn7+pJaSqf26eQrhVRNbEcu+ktvc4a9z5wFoiskPP+6YBf4+jno+jjno1RORz2PAqb1PV80ahMeB9y2I3J1rz2xH/Lgbn9t3Axc3vrReQC9qwDnaO6sxtp8Z5zc9B+f0Idq4q1ZhYd7VG50zvhTdn/LuADUSkv9euK7dT2qCqj6rqbFWdJyKbYr1qW3O7RbffX1b365U9rEtjFOcZTztK2tr33usZgVd3tqHIrw/QqO7Xh7Wjpl8foFHVrw9rQ4Ffvx7fMZNbw73xS3BrFNRvt0Y/3vqdoFFSw0va4a3hXo3zmmWpNTypDTn1O0GjpIYntaNWDR9wri7y506N6gzSKPHnXo0B63jzuyt+sT93aAxbx+3PnRrnNT+z/LlTY2J5Tn57NYo8uiM+UJTbHp9Umt/juHbq0qid38EYUAV9cjyvoJMpi/oDJPJV4D3Ad0XkSKwXxbHAPGwIu2qIyBubXycmaN9NRBYAC1T1x4Xh/xO7ufFx4CER6Z3ofb5WGOZMRM4FrsN6Jj0AvBCbnPcJbPLaIpqeqJcP0AW4Q1X/4n+piMgZwO1YOxZiQ/gdgRXaE0rjN1yIzb11SvNo/23YvtkFeFsljQn2x7b/GZXjgs3l9W3gAhH5MvAnbIiEfYAvNL3UshGRLbBhL65rFm0HHAYcr6o/K4jbmmeq+r8i8i3gi03v/NuxJ9DWw4YwLIrfrLMD1vNu9WadGSLyRwBV7bzB1aUhIntjExr/ALi0L98f0O4Jxj0aR2PDO/wUM6qrY0NJbgXs2xXfozEop0VkITDFm++OdnwQM8WXYcNSrov1hl2dCvtbVe8RkU8CR4nIA8Cl2Be2jwJfV9XOC1AJtWEPbJ8kX0x0aJwGHApcKCIfx+YfmoENLXQtdhwUaTT5djxPT3S+CXb+nU13DfHUuPOBK4HTReQwrIfrEdh8Bl2TsHs1EJHdgOfydK/wHZpz/UOqelGphogcju2L/wJ+3bfOggFPb+ZonIJdIJ6F9XRdF/NCawBvLY3P4Fr+CHC3M7c9bfgc1gHtSmABludHYL07P15DQ1VvFJHTgGOamzjXYV+gDwKOdfR2TfFm+wFLkpbfnvgnN7FnishnsKFydsTOg+eqDd1UpCEiz8fq6M+wzhEzsH1xjtqwg604/WWRX/d62BKv7tQo8usejRK/3vXeGl7d2YYiv+7cBkV+PXE7Z/l1p0a2X3fuiyK/7j1mRCSrhqcck7n1O6EN2fU7QSO7fidsq0HruGp4Qjuya3hCO7JqeMZ5Lrl+J2yn7BqeoJFdw53n6iJ/7q0HJf7co1Hqz50a2fndFV9VH6HQnzvbUOTPPRql/jzRY+T4c69Gdn4790WRP8fnk0q/f7u8WOH3706NCt+/gyDQZ8HEhSkvYB3gbOxL0INYT5PpI9DRIa/LK8Se2xL/6Eqf/3DsovBC4GHgZuwCTPVtNWC7tU44nhDrCOwL7/3YUHvzgK9gQ97U/MzTsAs+d2NDT/4C2LeyxlTMYF0wwm2/G2YaFzS5cT3wLmDJCrE3wXqfLcQuXFyH9cKpcby05hmwDDZm/l1Y77GrcU5C7Ix/+bD1amhgN26KzicOjT2xG1u/x8zjHZjZe0XNfTHgPZcDV9TSwHql/RT7QvU4ZrLPB7aquL8FM49zsHy/A5v8e2rN7QR8t/n8S40oLzbG5hiah+XkLdj8kCtV2hdTsMm4726OqVuxp1uXdcSe2xL/6J71VsZM/L1YnboEeKnz83s1hq03t4YGLecP4LRKGgdiXwzvwc6Bt2JDz25aazsNed/ptfZF04afY186H8fO52cCL6q8v5dqjtN5WH7fAry3pkaz7g3ALxPz2tuGbXh6fq+HsBvwRwLLVNoXq2G9yv+A5fZN2PxiU5ztcPlLCvx6gobrfJyrkXJMFGhk+/Xc9zaf3+XVnW0o8usJ+zvbrydoZPv1BI0sv+7cF6P068f1Lcuu4c74c8ms3x4NCuu3UyO7fqdsqwHrzMVZwxPakV3DE/Z5dg33bicy6ndiG7JruHNfZNdwnOfqktxO0MjOb49GaX47NUr8eVbdTMnthDaU+HPv/i7x5+5tRWZ+J7QjK7+d+6LInzcxOn0ShbXbqTGXgvrdpcEI6ne8Rv9anpV05yl7jeUFzFrU7X22v0RVCYIgCIIgCIIgCIIgCIIgCIIgCIIaTJOVdesldh6L1sVPnnWtqs4Yi9gkZbLNGRgEQRAEQRAEQRAEQRAEQRAEQRAEgZPJNmdgEARBEARBEARBEARBEARBEARB8GxHn1zUnyBoiCcDgyAIgiAIgiAIgiAIgiAIgiAIgmAxJeYMDIIgCIIgCIIgCIIgCIIgCIIgCKohIj8AVh2T3B9U9TVj0pqUxM3AIAiCIAiCIAiCIAiCIAiCIAiCIFhMiWFCgyAIgiAIgiAIgiAIgiAIgiAIgmAxJW4GBkEQBEEQBEEQBEEQBEEQBEEQBMFiStwMDIIgCIIgCIIgCIIgCIIgCIIgCILFlLgZGARBEARBEARBEARBEARBEARBEASLKXEzMAiCIAiCIAiCIAiCIAiCIAiCIAgWU/4fvhBhOsRDPkkAAAAASUVORK5CYII=\n",
- "text/plain": [
- "<Figure size 2520x720 with 2 Axes>"
- ]
- },
- "metadata": {
- "needs_background": "light"
- },
- "output_type": "display_data"
- },
- {
- "data": {
- "text/plain": [
- "\"for i in range(attr.shape[-1]):\\n final = attr[:,:,i]\\n plt.imshow(final, cmap='viridis', interpolation='nearest')\\n plt.colorbar()\\n plt.title('MFC'+str(i+1))\\n plt.show()\""
- ]
- },
- "execution_count": 219,
- "metadata": {},
- "output_type": "execute_result"
- }
- ],
- "source": [
- "#origin = datashape[0,:,:,0,0]\n",
- "print(attr_dl.shape)\n",
- "attr = np.mean(attr_dl, axis=(0, 4))\n",
- "print(attr.shape)\n",
- "'''with open('vowel_electrode.pkl', 'wb') as f:\n",
- " pickle.dump(attr, f)'''\n",
- "\n",
- "'''_ = viz.visualize_image_attr(final, original, method=\"blended_heat_map\",sign=\"all\",show_colorbar=True, \n",
- " title=\"Overlayed DeepLift\")'''\n",
- "'''plt.figure(figsize=(8, 8))\n",
- "plt.imshow(attr, cmap='viridis', interpolation='nearest')\n",
- "plt.colorbar()\n",
- "plt.title('electrode attribute')\n",
- "plt.show()'''\n",
- "#plt.savefig('electode_uw.png', dpi=300)\n",
- "\n",
- "'''plt.figure(figsize=(12, 5))\n",
- "ax = plt.gca()\n",
- "ax.set_facecolor((0.95,0.95,0.95))\n",
- "plt.grid()\n",
- "plt.bar(x=range(20), height=attr, tick_label=range(20))\n",
- "plt.xticks(fontsize=16)\n",
- "plt.yticks(fontsize=16)\n",
- "plt.title('MFCC attribute')\n",
- "plt.show()'''\n",
- "All=[]\n",
- "for i in range(20):\n",
- " All.append(attr[:,:,i])\n",
- "All = np.stack(All)\n",
- "All = All.reshape((20,63))\n",
- "print(All.shape)\n",
- "print(np.unravel_index(np.argsort(All, axis=None)[-5:], All.shape))\n",
- "plt.figure(figsize=(35, 10))\n",
- "ax = plt.gca()\n",
- "ax.set_xticks(range(63))\n",
- "ax.set_yticks(range(20))\n",
- "plt.xticks(fontsize=16)\n",
- "plt.yticks(fontsize=16)\n",
- "plt.imshow(All, cmap='viridis', interpolation='nearest')\n",
- "plt.title('all features attribute', fontsize=20)\n",
- "cbar = plt.colorbar()\n",
- "for t in cbar.ax.get_yticklabels():\n",
- " t.set_fontsize(16)\n",
- "plt.show()\n",
- "'''for i in range(attr.shape[-1]):\n",
- " final = attr[:,:,i]\n",
- " plt.imshow(final, cmap='viridis', interpolation='nearest')\n",
- " plt.colorbar()\n",
- " plt.title('MFC'+str(i+1))\n",
- " plt.show()'''"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 83,
- "id": "a07ed621",
- "metadata": {},
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "0.3645833333333333\n"
- ]
- }
- ],
- "source": [
- "print(sum(y__test)/(len(y__test)))"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 271,
- "id": "c2174d74",
- "metadata": {},
- "outputs": [],
- "source": [
- "class cnn(nn.Module):\n",
- " def __init__(self):\n",
- " super().__init__()\n",
- " self.conv1 = nn.Conv2d(20, 16, 3)\n",
- " #torch.nn.init.xavier_normal_(self.conv1.weight)\n",
- " self.pool = nn.MaxPool2d(2, 1)\n",
- " self.conv2 = nn.Conv2d(16, 32, 3)\n",
- " #torch.nn.init.xavier_normal_(self.conv2.weight)\n",
- " self.fc1 = nn.Linear(256, 128)\n",
- " self.fc2 = nn.Linear(128, 1)\n",
- " #torch.nn.init.xavier_normal_(self.fc.weight)\n",
- " self.batch1 = nn.BatchNorm2d(16)\n",
- " self.batch2 = nn.BatchNorm2d(32)\n",
- " \n",
- " def forward(self, x):\n",
- " # (batch, heigth, width, feature)\n",
- " #print(x.shape)\n",
- " x = rearrange(x, 'batch heigth width feature -> batch feature heigth width')\n",
- " #print(x.shape)\n",
- " out = self.pool(F.relu(self.batch1(self.conv1(x))))\n",
- " #print(out.shape)\n",
- " out = F.relu(self.batch2(self.conv2(out)))\n",
- " #print(out.shape)\n",
- " out = rearrange(out, 'batch channel heigth width -> batch (channel heigth width)')\n",
- " #print(out.shape)\n",
- " out = F.relu(self.fc1(out))\n",
- " out = F.sigmoid(self.fc2(out))\n",
- " return out"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 15,
- "id": "2c1032f9",
- "metadata": {},
- "outputs": [
- {
- "data": {
- "text/plain": [
- "(1913, 7, 9, 20, 11)"
- ]
- },
- "execution_count": 15,
- "metadata": {},
- "output_type": "execute_result"
- }
- ],
- "source": [
- "with open('sal_nasal.pkl', 'rb') as f:\n",
- " dataset = pickle.load(f)\n",
- "dataset.shape"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 16,
- "id": "a86f1e03",
- "metadata": {},
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "(1913, 7, 9, 20) 1913\n"
- ]
- }
- ],
- "source": [
- "data = np.mean(dataset, axis=4)\n",
- "labels = nasal_label\n",
- "print(data.shape, len(labels))"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 283,
- "id": "649e822c",
- "metadata": {},
- "outputs": [
- {
- "data": {
- "text/plain": [
- "1913"
- ]
- },
- "execution_count": 283,
- "metadata": {},
- "output_type": "execute_result"
- }
- ],
- "source": [
- "data1 = []\n",
- "for sample in data:\n",
- " data1.append(sample)\n",
- "len(data1)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 284,
- "id": "075addc9",
- "metadata": {},
- "outputs": [],
- "source": [
- "#config\n",
- "val_size = 0.15\n",
- "n_epochs = 100\n",
- "batch_size = 128\n",
- "print_every = 10\n",
- "k = 10\n",
- "skf=StratifiedKFold(n_splits=k, shuffle=True, random_state=42)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 285,
- "id": "21e68d7f",
- "metadata": {
- "scrolled": true
- },
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "-----------------------------Fold 1---------------\n",
- "preparing dataloaders...\n",
- "(1913, 7, 9, 20)\n",
- "coef when 0 > 1 1\n",
- "creating model...\n",
- "calculating total steps...\n",
- "epoch: 1\n",
- "validation loss decreased (inf ---> 0.698440), val_acc = 0.3953488372093023\n",
- "validation acc increased (0.000000 ---> 0.395349)\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "C:\\Users\\saeed\\Desktop\\Master\\bci\\lib\\site-packages\\torch\\nn\\functional.py:1960: UserWarning: nn.functional.sigmoid is deprecated. Use torch.sigmoid instead.\n",
- " warnings.warn(\"nn.functional.sigmoid is deprecated. Use torch.sigmoid instead.\")\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "validation acc increased (0.395349 ---> 0.395349)\n",
- "epoch 1: train loss = 0.6869507828848291, l1loss = 0.13827660727062954, train acc = 0.5616507297433316,\n",
- "val_loss = 0.71086893987286, val_acc = 0.3953488372093023\n",
- "\n",
- "epoch: 2\n",
- "validation acc increased (0.395349 ---> 0.395349)\n",
- "validation acc increased (0.395349 ---> 0.395349)\n",
- "epoch 2: train loss = 0.6654115830706062, l1loss = 0.13782025073482934, train acc = 0.6205334675390035,\n",
- "val_loss = 0.7223108600276386, val_acc = 0.3953488372093023\n",
- "\n",
- "epoch: 3\n",
- "validation acc increased (0.395349 ---> 0.395349)\n",
- "validation acc increased (0.395349 ---> 0.430233)\n",
- "epoch 3: train loss = 0.6485615584738721, l1loss = 0.13727900934057374, train acc = 0.6396577755410167,\n",
- "val_loss = 0.7128145556117214, val_acc = 0.5\n",
- "\n",
- "epoch: 4\n",
- "validation acc increased (0.430233 ---> 0.503876)\n",
- "validation loss decreased (0.698440 ---> 0.692173), val_acc = 0.5310077519379846\n",
- "validation acc increased (0.503876 ---> 0.531008)\n",
- "epoch 4: train loss = 0.6330915434791266, l1loss = 0.13658334071844982, train acc = 0.6431806743834927,\n",
- "val_loss = 0.6788588637529418, val_acc = 0.5348837209302325\n",
- "\n",
- "epoch: 5\n",
- "validation loss decreased (0.692173 ---> 0.676339), val_acc = 0.5310077519379846\n",
- "validation acc increased (0.531008 ---> 0.531008)\n",
- "validation loss decreased (0.676339 ---> 0.651838), val_acc = 0.5387596899224806\n",
- "validation acc increased (0.531008 ---> 0.538760)\n",
- "epoch 5: train loss = 0.6172819093477204, l1loss = 0.1356638338647419, train acc = 0.6492199295420231,\n",
- "val_loss = 0.6452844336975453, val_acc = 0.5426356589147286\n",
- "\n",
- "epoch: 6\n",
- "validation loss decreased (0.651838 ---> 0.643711), val_acc = 0.5387596899224806\n",
- "validation acc increased (0.538760 ---> 0.538760)\n",
- "validation loss decreased (0.643711 ---> 0.636844), val_acc = 0.5426356589147286\n",
- "validation acc increased (0.538760 ---> 0.542636)\n",
- "epoch 6: train loss = 0.6055080684574033, l1loss = 0.13447358302873286, train acc = 0.6607951685958732,\n",
- "val_loss = 0.6349219661350398, val_acc = 0.5426356589147286\n",
- "\n",
- "epoch: 7\n",
- "validation loss decreased (0.636844 ---> 0.635237), val_acc = 0.5426356589147286\n",
- "validation acc increased (0.542636 ---> 0.542636)\n",
- "validation loss decreased (0.635237 ---> 0.631333), val_acc = 0.5348837209302325\n",
- "epoch 7: train loss = 0.5956864868448677, l1loss = 0.1329548779029491, train acc = 0.6623049823855058,\n",
- "val_loss = 0.6336472632811051, val_acc = 0.5465116279069767\n",
- "\n",
- "epoch: 8\n",
- "validation acc increased (0.542636 ---> 0.542636)\n",
- "epoch 8: train loss = 0.5867841974778216, l1loss = 0.13107547391942834, train acc = 0.6673376950176145,\n",
- "val_loss = 0.6482481244922609, val_acc = 0.5310077519379846\n",
- "\n",
- "epoch: 9\n",
- "epoch 9: train loss = 0.5799472323001541, l1loss = 0.1288494519630628, train acc = 0.6738802214393558,\n",
- "val_loss = 0.6537467005640961, val_acc = 0.5310077519379846\n",
- "\n",
- "epoch: 10\n",
- "epoch 10: train loss = 0.5720185998341638, l1loss = 0.1262706755376906, train acc = 0.6809260191243081,\n",
- "val_loss = 0.660815857177557, val_acc = 0.5348837209302325\n",
- "\n",
- "epoch: 11\n",
- "epoch 11: train loss = 0.5634220912340123, l1loss = 0.12343447363715587, train acc = 0.688475088072471,\n",
- "val_loss = 0.6865861693093943, val_acc = 0.5271317829457365\n",
- "\n",
- "epoch: 12\n",
- "epoch 12: train loss = 0.5516697056160299, l1loss = 0.12035546596169051, train acc = 0.6985405133366884,\n",
- "val_loss = 0.6893619465273481, val_acc = 0.5310077519379846\n",
- "\n",
- "epoch: 13\n",
- "epoch 13: train loss = 0.5383751470057522, l1loss = 0.11719332186942971, train acc = 0.7075993960744842,\n",
- "val_loss = 0.7262142046477443, val_acc = 0.5193798449612403\n",
- "\n",
- "epoch: 14\n",
- "epoch 14: train loss = 0.5219575696154701, l1loss = 0.11402586181603548, train acc = 0.7166582788122798,\n",
- "val_loss = 0.8209437387165173, val_acc = 0.5310077519379846\n",
- "\n",
- "epoch: 15\n",
- "epoch 15: train loss = 0.5082864454930069, l1loss = 0.11104010005781983, train acc = 0.727730246602919,\n",
- "val_loss = 0.790199874907501, val_acc = 0.5387596899224806\n",
- "\n",
- "epoch: 16\n",
- "epoch 16: train loss = 0.4823058645930432, l1loss = 0.10828750046597213, train acc = 0.7518872672370408,\n",
- "val_loss = 0.8347636539806691, val_acc = 0.5348837209302325\n",
- "\n",
- "epoch: 17\n",
- "validation acc increased (0.542636 ---> 0.542636)\n",
- "epoch 17: train loss = 0.44870099950268694, l1loss = 0.10601656717844926, train acc = 0.7790639154504277,\n",
- "val_loss = 1.0818783419076787, val_acc = 0.5426356589147286\n",
- "\n",
- "epoch: 18\n",
- "epoch 18: train loss = 0.4243430009905753, l1loss = 0.10426574212390691, train acc = 0.7911424257674887,\n",
- "val_loss = 0.8771759863047637, val_acc = 0.5658914728682171\n",
- "\n",
- "epoch: 19\n",
- "validation acc increased (0.542636 ---> 0.562016)\n",
- "validation acc increased (0.562016 ---> 0.569767)\n",
- "epoch 19: train loss = 0.3871670792645165, l1loss = 0.10277385458117702, train acc = 0.8208354302969301,\n",
- "val_loss = 0.9480718586796014, val_acc = 0.5465116279069767\n",
- "\n",
- "epoch: 20\n",
- "epoch 20: train loss = 0.3737996664698783, l1loss = 0.1016839612097253, train acc = 0.8102667337695018,\n",
- "val_loss = 1.400591679321703, val_acc = 0.5387596899224806\n",
- "\n",
- "epoch: 21\n",
- "validation acc increased (0.569767 ---> 0.596899)\n",
- "epoch 21: train loss = 0.3521438902445516, l1loss = 0.10036715933754027, train acc = 0.8324106693507801,\n",
- "val_loss = 0.9109365958576054, val_acc = 0.5775193798449613\n",
- "\n",
- "epoch: 22\n",
- "epoch 22: train loss = 0.319912125056991, l1loss = 0.0992567646230473, train acc = 0.8580775037745345,\n",
- "val_loss = 1.5151436608429103, val_acc = 0.5310077519379846\n",
- "\n",
- "epoch: 23\n",
- "epoch 23: train loss = 0.3122329044474029, l1loss = 0.09838688995634784, train acc = 0.8575742325113236,\n",
- "val_loss = 3.091815226076012, val_acc = 0.5155038759689923\n",
- "\n",
- "epoch: 24\n",
- "epoch 24: train loss = 0.28910951249612654, l1loss = 0.09807651147628826, train acc = 0.879214896829391,\n",
- "val_loss = 1.2312677035960116, val_acc = 0.5581395348837209\n",
- "\n",
- "epoch: 25\n",
- "epoch 25: train loss = 0.2668642541332706, l1loss = 0.09742016104909122, train acc = 0.8842476094614997,\n",
- "val_loss = 1.9600338353667148, val_acc = 0.5852713178294574\n",
- "\n",
- "epoch: 26\n",
- "epoch 26: train loss = 0.2474498063415874, l1loss = 0.09673020170526284, train acc = 0.8933064921992954,\n",
- "val_loss = 1.8068715759025988, val_acc = 0.5775193798449613\n",
- "\n",
- "epoch: 27\n",
- "epoch 27: train loss = 0.1902359421446987, l1loss = 0.09593981095727029, train acc = 0.9315551082033215,\n",
- "val_loss = 1.3357789331628371, val_acc = 0.5581395348837209\n",
- "\n",
- "epoch: 28\n",
- "epoch 28: train loss = 0.15691894562057232, l1loss = 0.0954245226574892, train acc = 0.9436336185203825,\n",
- "val_loss = 2.6843640092731444, val_acc = 0.5348837209302325\n",
- "\n",
- "epoch: 29\n",
- "epoch 29: train loss = 0.1504599494989877, l1loss = 0.09509522278868618, train acc = 0.9431303472571716,\n",
- "val_loss = 2.4894338860068213, val_acc = 0.5968992248062015\n",
- "\n",
- "epoch: 30\n",
- "epoch 30: train loss = 0.12966379567523306, l1loss = 0.09448714830660015, train acc = 0.9542023150478107,\n",
- "val_loss = 1.8916605646296065, val_acc = 0.5968992248062015\n",
- "\n",
- "epoch: 31\n",
- "epoch 31: train loss = 0.15563943400926547, l1loss = 0.09434211563092114, train acc = 0.9471565173628586,\n",
- "val_loss = 4.939729660980461, val_acc = 0.5348837209302325\n",
- "\n",
- "epoch: 32\n",
- "epoch 32: train loss = 0.16452017606474612, l1loss = 0.09465074112097942, train acc = 0.9406139909411173,\n",
- "val_loss = 3.636755084344583, val_acc = 0.5775193798449613\n",
- "\n",
- "epoch: 33\n",
- "epoch 33: train loss = 0.12137126438525915, l1loss = 0.09421853995242188, train acc = 0.9592350276799195,\n",
- "val_loss = 3.1257032387016355, val_acc = 0.5542635658914729\n",
- "\n",
- "epoch: 34\n",
- "epoch 34: train loss = 0.11817072889855236, l1loss = 0.0940876242596418, train acc = 0.9572219426270759,\n",
- "val_loss = 3.6307011426881304, val_acc = 0.5852713178294574\n",
- "\n",
- "epoch: 35\n",
- "epoch 35: train loss = 0.09768068436092628, l1loss = 0.09356706718073818, train acc = 0.96527428283845,\n",
- "val_loss = 4.066986708678017, val_acc = 0.5852713178294574\n",
- "\n",
- "epoch: 36\n",
- "epoch 36: train loss = 0.09975038621823992, l1loss = 0.09309212238751408, train acc = 0.9622546552591847,\n",
- "val_loss = 3.2775008031564163, val_acc = 0.5503875968992248\n",
- "\n",
- "epoch: 37\n",
- "epoch 37: train loss = 0.08334749282366316, l1loss = 0.092504221824137, train acc = 0.9763462506290891,\n",
- "val_loss = 3.8003816779183093, val_acc = 0.5310077519379846\n",
- "\n",
- "epoch: 38\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "epoch 38: train loss = 0.07791504205383123, l1loss = 0.09236309195530733, train acc = 0.9793658782083543,\n",
- "val_loss = 3.233410909194355, val_acc = 0.5736434108527132\n",
- "\n",
- "epoch: 39\n",
- "epoch 39: train loss = 0.06921992062487432, l1loss = 0.09197287779689495, train acc = 0.9854051333668847,\n",
- "val_loss = 3.561995010505351, val_acc = 0.5852713178294574\n",
- "\n",
- "epoch: 40\n",
- "epoch 40: train loss = 0.09455262519377947, l1loss = 0.09194954407083131, train acc = 0.9703069954705587,\n",
- "val_loss = 3.140381110104725, val_acc = 0.562015503875969\n",
- "\n",
- "epoch: 41\n",
- "epoch 41: train loss = 0.04864857653104365, l1loss = 0.09150668548229776, train acc = 0.9899345747357826,\n",
- "val_loss = 3.4616599368552365, val_acc = 0.5426356589147286\n",
- "\n",
- "epoch: 42\n",
- "epoch 42: train loss = 0.03882463321757185, l1loss = 0.09062009955703403, train acc = 0.9924509310518369,\n",
- "val_loss = 3.4793882855149203, val_acc = 0.5891472868217055\n",
- "\n",
- "epoch: 43\n",
- "epoch 43: train loss = 0.028278966656936192, l1loss = 0.08959808966653936, train acc = 0.995973829894313,\n",
- "val_loss = 3.8493388558543007, val_acc = 0.5736434108527132\n",
- "\n",
- "epoch: 44\n",
- "epoch 44: train loss = 0.02344076073516177, l1loss = 0.08865111783919288, train acc = 0.9954705586311021,\n",
- "val_loss = 3.879825139115023, val_acc = 0.5581395348837209\n",
- "\n",
- "epoch: 45\n",
- "epoch 45: train loss = 0.01661977870085159, l1loss = 0.0877494524450055, train acc = 0.9989934574735783,\n",
- "val_loss = 4.2667010209357095, val_acc = 0.5658914728682171\n",
- "\n",
- "epoch: 46\n",
- "epoch 46: train loss = 0.013619207225413736, l1loss = 0.086835880546817, train acc = 0.9994967287367891,\n",
- "val_loss = 4.457068901653438, val_acc = 0.5852713178294574\n",
- "\n",
- "epoch: 47\n",
- "epoch 47: train loss = 0.015068223528380243, l1loss = 0.0860258729338166, train acc = 1.0,\n",
- "val_loss = 3.7533517517727013, val_acc = 0.5775193798449613\n",
- "\n",
- "!!! overfitted !!!\n",
- "[1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1]\n",
- "[0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1]\n",
- "early stoping results:\n",
- "\t [0.6041666666666666]\n",
- "output = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0]\n",
- "label = tensor([0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0,\n",
- " 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0,\n",
- " 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0,\n",
- " 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1,\n",
- " 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0,\n",
- " 0, 1, 0, 1, 1, 0, 0, 0])\n",
- "output = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
- "label = tensor([1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0,\n",
- " 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1,\n",
- " 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1,\n",
- " 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1,\n",
- " 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1,\n",
- " 0, 0, 0, 1, 0, 1, 1, 1])\n",
- "output = [0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0]\n",
- "label = tensor([0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0,\n",
- " 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0,\n",
- " 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1,\n",
- " 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1,\n",
- " 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1,\n",
- " 1, 1, 0, 1, 1, 1, 0, 1])\n",
- "output = [1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
- "label = tensor([1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0,\n",
- " 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1,\n",
- " 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0,\n",
- " 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0,\n",
- " 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0,\n",
- " 1, 1, 0, 0, 1, 0, 1, 1])\n",
- "output = [0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0]\n",
- "label = tensor([0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 0,\n",
- " 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0,\n",
- " 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0,\n",
- " 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1,\n",
- " 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1,\n",
- " 1, 0, 0, 0, 1, 0, 0, 0])\n",
- "output = [1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0]\n",
- "label = tensor([1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0,\n",
- " 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1,\n",
- " 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0,\n",
- " 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n",
- " 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0,\n",
- " 0, 1, 1, 0, 1, 0, 0, 1])\n",
- "output = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
- "label = tensor([1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1,\n",
- " 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1,\n",
- " 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0,\n",
- " 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0,\n",
- " 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1,\n",
- " 1, 0, 0, 1, 0, 1, 0, 1])\n",
- "output = [1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0]\n",
- "label = tensor([1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1,\n",
- " 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1,\n",
- " 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1,\n",
- " 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1,\n",
- " 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0,\n",
- " 0, 0, 1, 0, 1, 0, 0, 0])\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "output = [1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
- "label = tensor([1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1,\n",
- " 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1,\n",
- " 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0,\n",
- " 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1,\n",
- " 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0,\n",
- " 0, 1, 1, 1, 1, 1, 1, 1])\n",
- "output = [1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0]\n",
- "label = tensor([0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1,\n",
- " 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1,\n",
- " 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1,\n",
- " 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1,\n",
- " 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0,\n",
- " 0, 1, 0, 0, 0, 1, 1, 0])\n",
- "output = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
- "label = tensor([1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0,\n",
- " 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0,\n",
- " 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1,\n",
- " 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0,\n",
- " 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1,\n",
- " 1, 1, 0, 1, 1, 1, 0, 1])\n",
- "output = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
- "label = tensor([0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1,\n",
- " 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1,\n",
- " 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0,\n",
- " 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0,\n",
- " 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1,\n",
- " 0, 0, 1, 0, 0, 1, 1, 0])\n",
- "output = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0]\n",
- "label = tensor([0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0,\n",
- " 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1,\n",
- " 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0,\n",
- " 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1,\n",
- " 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1,\n",
- " 0, 0, 0, 0, 1, 1, 0, 0])\n",
- "output = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
- "label = tensor([0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1,\n",
- " 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1,\n",
- " 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1,\n",
- " 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1,\n",
- " 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 0,\n",
- " 0, 1, 0, 0, 1, 1, 0, 1])\n",
- "output = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0]\n",
- "label = tensor([1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0,\n",
- " 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0,\n",
- " 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0,\n",
- " 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0,\n",
- " 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1,\n",
- " 0, 1, 0, 1, 1, 0, 0, 1])\n",
- "output = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
- "label = tensor([0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1,\n",
- " 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1,\n",
- " 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1])\n",
- "\t [0.6673376950176145]\n",
- "[1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1]\n",
- "[1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0]\n",
- "full train results:\n",
- "\t [0.59375]\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "\t [0.9949672873678913]\n",
- "[1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1]\n",
- "[0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1]\n",
- "best accs results:\n",
- "\t [0.59375]\n",
- "\t [0.8037242073477604]\n",
- "[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1]\n",
- "-----------------------------Fold 2---------------\n",
- "preparing dataloaders...\n",
- "torch.Size([67, 7, 9, 20])\n",
- "coef when 0 > 1 1\n",
- "creating model...\n",
- "calculating total steps...\n",
- "epoch: 1\n",
- "validation loss decreased (inf ---> 0.693442), val_acc = 0.437984496124031\n",
- "validation acc increased (0.000000 ---> 0.437984)\n",
- "epoch 1: train loss = 0.7009523889089279, l1loss = 0.13856501084980288, train acc = 0.4914400805639476,\n",
- "val_loss = 0.7006198250970175, val_acc = 0.3992248062015504\n",
- "\n",
- "epoch: 2\n",
- "epoch 2: train loss = 0.6762631435888892, l1loss = 0.13815073134735872, train acc = 0.607754279959718,\n",
- "val_loss = 0.7034130031748336, val_acc = 0.4108527131782946\n",
- "\n",
- "epoch: 3\n",
- "validation acc increased (0.437984 ---> 0.492248)\n",
- "epoch 3: train loss = 0.6556627195767406, l1loss = 0.13765930575726615, train acc = 0.6299093655589124,\n",
- "val_loss = 0.6913710462954618, val_acc = 0.5193798449612403\n",
- "\n",
- "epoch: 4\n",
- "validation loss decreased (0.693442 ---> 0.689858), val_acc = 0.5116279069767442\n",
- "validation acc increased (0.492248 ---> 0.511628)\n",
- "validation loss decreased (0.689858 ---> 0.666388), val_acc = 0.562015503875969\n",
- "validation acc increased (0.511628 ---> 0.562016)\n",
- "epoch 4: train loss = 0.6381383219514368, l1loss = 0.13702793940076416, train acc = 0.6374622356495468,\n",
- "val_loss = 0.6515760264655416, val_acc = 0.5697674418604651\n",
- "\n",
- "epoch: 5\n",
- "validation loss decreased (0.666388 ---> 0.648785), val_acc = 0.5775193798449613\n",
- "validation acc increased (0.562016 ---> 0.577519)\n",
- "validation loss decreased (0.648785 ---> 0.622154), val_acc = 0.6085271317829457\n",
- "validation acc increased (0.577519 ---> 0.608527)\n",
- "epoch 5: train loss = 0.6235407856776035, l1loss = 0.13619495138961143, train acc = 0.6414904330312186,\n",
- "val_loss = 0.6117342756700146, val_acc = 0.6085271317829457\n",
- "\n",
- "epoch: 6\n",
- "validation loss decreased (0.622154 ---> 0.609880), val_acc = 0.6085271317829457\n",
- "validation acc increased (0.608527 ---> 0.608527)\n",
- "validation loss decreased (0.609880 ---> 0.601803), val_acc = 0.6124031007751938\n",
- "validation acc increased (0.608527 ---> 0.612403)\n",
- "epoch 6: train loss = 0.6092025198244977, l1loss = 0.13509638041768068, train acc = 0.649043303121853,\n",
- "val_loss = 0.597300708062889, val_acc = 0.6162790697674418\n",
- "\n",
- "epoch: 7\n",
- "validation loss decreased (0.601803 ---> 0.597826), val_acc = 0.6085271317829457\n",
- "validation loss decreased (0.597826 ---> 0.594267), val_acc = 0.6085271317829457\n",
- "epoch 7: train loss = 0.5984585170539364, l1loss = 0.13366857624246034, train acc = 0.6560926485397784,\n",
- "val_loss = 0.5917771292287249, val_acc = 0.6085271317829457\n",
- "\n",
- "epoch: 8\n",
- "validation loss decreased (0.594267 ---> 0.591831), val_acc = 0.6124031007751938\n",
- "validation acc increased (0.612403 ---> 0.612403)\n",
- "validation loss decreased (0.591831 ---> 0.585725), val_acc = 0.6124031007751938\n",
- "validation acc increased (0.612403 ---> 0.612403)\n",
- "epoch 8: train loss = 0.5909301571375414, l1loss = 0.13186680199156356, train acc = 0.6591137965760322,\n",
- "val_loss = 0.5863324623237285, val_acc = 0.6046511627906976\n",
- "\n",
- "epoch: 9\n",
- "validation loss decreased (0.585725 ---> 0.582507), val_acc = 0.6046511627906976\n",
- "epoch 9: train loss = 0.5829210489057942, l1loss = 0.12968964685666237, train acc = 0.6636455186304129,\n",
- "val_loss = 0.5822689447681918, val_acc = 0.6124031007751938\n",
- "\n",
- "epoch: 10\n",
- "validation loss decreased (0.582507 ---> 0.582276), val_acc = 0.6124031007751938\n",
- "validation acc increased (0.612403 ---> 0.612403)\n",
- "validation acc increased (0.612403 ---> 0.612403)\n",
- "epoch 10: train loss = 0.575366021463158, l1loss = 0.12713940261533974, train acc = 0.6717019133937563,\n",
- "val_loss = 0.5861175392949304, val_acc = 0.6046511627906976\n",
- "\n",
- "epoch: 11\n",
- "epoch 11: train loss = 0.5670345990439316, l1loss = 0.12424162386617392, train acc = 0.6782477341389728,\n",
- "val_loss = 0.588328478872314, val_acc = 0.5968992248062015\n",
- "\n",
- "epoch: 12\n",
- "epoch 12: train loss = 0.5552928300062335, l1loss = 0.1211286605770014, train acc = 0.6888217522658611,\n",
- "val_loss = 0.5963715642921684, val_acc = 0.5968992248062015\n",
- "\n",
- "epoch: 13\n",
- "epoch 13: train loss = 0.5412835172055831, l1loss = 0.11785069293335966, train acc = 0.6978851963746223,\n",
- "val_loss = 0.6184392266495283, val_acc = 0.5968992248062015\n",
- "\n",
- "epoch: 14\n",
- "epoch 14: train loss = 0.5228479005178777, l1loss = 0.1145535701471273, train acc = 0.7235649546827795,\n",
- "val_loss = 0.6210187313168548, val_acc = 0.5930232558139535\n",
- "\n",
- "epoch: 15\n",
- "epoch 15: train loss = 0.5053458828460294, l1loss = 0.11146547943953901, train acc = 0.7240684793554885,\n",
- "val_loss = 0.6982138563496197, val_acc = 0.6046511627906976\n",
- "\n",
- "epoch: 16\n",
- "epoch 16: train loss = 0.48290507022707846, l1loss = 0.10866142951199656, train acc = 0.7452165156092648,\n",
- "val_loss = 0.6597347740055055, val_acc = 0.5852713178294574\n",
- "\n",
- "epoch: 17\n",
- "epoch 17: train loss = 0.4580553521562559, l1loss = 0.10626861951262329, train acc = 0.7678751258811681,\n",
- "val_loss = 0.6594978039578874, val_acc = 0.5503875968992248\n",
- "\n",
- "epoch: 18\n",
- "epoch 18: train loss = 0.42439078243237366, l1loss = 0.10417765665570534, train acc = 0.7809667673716012,\n",
- "val_loss = 0.8379030893015307, val_acc = 0.6007751937984496\n",
- "\n",
- "epoch: 19\n",
- "epoch 19: train loss = 0.4111362245207707, l1loss = 0.10249360746067217, train acc = 0.7990936555891238,\n",
- "val_loss = 0.7785166943258093, val_acc = 0.5775193798449613\n",
- "\n",
- "epoch: 20\n",
- "epoch 20: train loss = 0.3810017136828777, l1loss = 0.10114079596022704, train acc = 0.8227593152064451,\n",
- "val_loss = 1.1790409819042498, val_acc = 0.5968992248062015\n",
- "\n",
- "epoch: 21\n",
- "epoch 21: train loss = 0.3430937575129464, l1loss = 0.09981364857364157, train acc = 0.8423967774420946,\n",
- "val_loss = 2.173583840214929, val_acc = 0.5852713178294574\n",
- "\n",
- "epoch: 22\n",
- "epoch 22: train loss = 0.3137253760571206, l1loss = 0.09864820635991034, train acc = 0.8600201409869084,\n",
- "val_loss = 1.0921057191236998, val_acc = 0.6085271317829457\n",
- "\n",
- "epoch: 23\n",
- "epoch 23: train loss = 0.2892607558830027, l1loss = 0.09758137509391868, train acc = 0.8746223564954683,\n",
- "val_loss = 3.513900268909543, val_acc = 0.6007751937984496\n",
- "\n",
- "epoch: 24\n",
- "epoch 24: train loss = 0.26403892289355685, l1loss = 0.09678075590132466, train acc = 0.8856998992950654,\n",
- "val_loss = 2.315718826740374, val_acc = 0.5813953488372093\n",
- "\n",
- "epoch: 25\n",
- "epoch 25: train loss = 0.2428219530577626, l1loss = 0.09631269205312834, train acc = 0.9033232628398792,\n",
- "val_loss = 1.3863746112631272, val_acc = 0.5581395348837209\n",
- "\n",
- "epoch: 26\n",
- "epoch 26: train loss = 0.23106987370705195, l1loss = 0.0956531882075985, train acc = 0.9078549848942599,\n",
- "val_loss = 3.2332160620726356, val_acc = 0.6007751937984496\n",
- "\n",
- "epoch: 27\n",
- "epoch 27: train loss = 0.24449666369657622, l1loss = 0.09556730201867412, train acc = 0.8942598187311178,\n",
- "val_loss = 1.645380326943804, val_acc = 0.5813953488372093\n",
- "\n",
- "epoch: 28\n",
- "epoch 28: train loss = 0.2848308266169595, l1loss = 0.09535873357923129, train acc = 0.8690835850956697,\n",
- "val_loss = 1.1068245080091648, val_acc = 0.5658914728682171\n",
- "\n",
- "epoch: 29\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "epoch 29: train loss = 0.21259009558988842, l1loss = 0.09493379782573934, train acc = 0.9154078549848943,\n",
- "val_loss = 1.3550787757533465, val_acc = 0.5930232558139535\n",
- "\n",
- "epoch: 30\n",
- "epoch 30: train loss = 0.17986376305961416, l1loss = 0.0943629225155138, train acc = 0.93202416918429,\n",
- "val_loss = 3.399730042824269, val_acc = 0.5813953488372093\n",
- "\n",
- "epoch: 31\n",
- "epoch 31: train loss = 0.1377756273278297, l1loss = 0.09381341631829078, train acc = 0.9587109768378651,\n",
- "val_loss = 1.4636259065117947, val_acc = 0.5930232558139535\n",
- "\n",
- "epoch: 32\n",
- "epoch 32: train loss = 0.15627087596409991, l1loss = 0.09361198741083539, train acc = 0.9370594159113796,\n",
- "val_loss = 2.010465736427144, val_acc = 0.5930232558139535\n",
- "\n",
- "epoch: 33\n",
- "epoch 33: train loss = 0.21454838335454043, l1loss = 0.09376692507052109, train acc = 0.904833836858006,\n",
- "val_loss = 3.555855639821258, val_acc = 0.5813953488372093\n",
- "\n",
- "epoch: 34\n",
- "epoch 34: train loss = 0.1433841212908667, l1loss = 0.09360743821237502, train acc = 0.9516616314199395,\n",
- "val_loss = 2.7130774370459623, val_acc = 0.6201550387596899\n",
- "\n",
- "epoch: 35\n",
- "validation acc increased (0.612403 ---> 0.620155)\n",
- "epoch 35: train loss = 0.09808508254970308, l1loss = 0.09324432826954553, train acc = 0.9712990936555891,\n",
- "val_loss = 2.107295219288316, val_acc = 0.5968992248062015\n",
- "\n",
- "epoch: 36\n",
- "epoch 36: train loss = 0.09397425880695037, l1loss = 0.09265048068095551, train acc = 0.9707955689828801,\n",
- "val_loss = 2.3170659689940223, val_acc = 0.6124031007751938\n",
- "\n",
- "epoch: 37\n",
- "epoch 37: train loss = 0.07624247869756166, l1loss = 0.09214130119764793, train acc = 0.9788519637462235,\n",
- "val_loss = 2.4360666565015885, val_acc = 0.6007751937984496\n",
- "\n",
- "epoch: 38\n",
- "epoch 38: train loss = 0.08142898870258773, l1loss = 0.09167566879728409, train acc = 0.972306143001007,\n",
- "val_loss = 3.0442896406779916, val_acc = 0.5775193798449613\n",
- "\n",
- "epoch: 39\n",
- "epoch 39: train loss = 0.05564044175476346, l1loss = 0.09119880020858538, train acc = 0.9914400805639476,\n",
- "val_loss = 3.227812499038933, val_acc = 0.6124031007751938\n",
- "\n",
- "epoch: 40\n",
- "validation acc increased (0.620155 ---> 0.627907)\n",
- "epoch 40: train loss = 0.047954427309086674, l1loss = 0.09070139868261831, train acc = 0.9894259818731118,\n",
- "val_loss = 2.6779214356296746, val_acc = 0.6085271317829457\n",
- "\n",
- "epoch: 41\n",
- "epoch 41: train loss = 0.0805706052561661, l1loss = 0.09069800027247522, train acc = 0.972306143001007,\n",
- "val_loss = 3.348146224206732, val_acc = 0.5775193798449613\n",
- "\n",
- "epoch: 42\n",
- "epoch 42: train loss = 0.120132422931905, l1loss = 0.09100891725504387, train acc = 0.9561933534743202,\n",
- "val_loss = 3.8875221688618034, val_acc = 0.5775193798449613\n",
- "\n",
- "epoch: 43\n",
- "validation acc increased (0.627907 ---> 0.627907)\n",
- "epoch 43: train loss = 0.08399603084435035, l1loss = 0.09161909364261416, train acc = 0.972809667673716,\n",
- "val_loss = 3.6390174350073172, val_acc = 0.5813953488372093\n",
- "\n",
- "epoch: 44\n",
- "epoch 44: train loss = 0.08419710318187452, l1loss = 0.09146056805192404, train acc = 0.9788519637462235,\n",
- "val_loss = 3.222931582105252, val_acc = 0.6201550387596899\n",
- "\n",
- "epoch: 45\n",
- "epoch 45: train loss = 0.06798528590261156, l1loss = 0.09109198656176992, train acc = 0.9783484390735147,\n",
- "val_loss = 4.210132499073827, val_acc = 0.5891472868217055\n",
- "\n",
- "epoch: 46\n",
- "epoch 46: train loss = 0.04261230672114085, l1loss = 0.09061521228706729, train acc = 0.9924471299093656,\n",
- "val_loss = 3.675780127214831, val_acc = 0.5852713178294574\n",
- "\n",
- "epoch: 47\n",
- "epoch 47: train loss = 0.030354493255431558, l1loss = 0.09003987193317692, train acc = 0.9969788519637462,\n",
- "val_loss = 3.3861651013987935, val_acc = 0.6201550387596899\n",
- "\n",
- "epoch: 48\n",
- "epoch 48: train loss = 0.02647563922973978, l1loss = 0.08927071901846749, train acc = 0.9974823766364552,\n",
- "val_loss = 3.631073840828829, val_acc = 0.5968992248062015\n",
- "\n",
- "epoch: 49\n",
- "epoch 49: train loss = 0.021390184417764645, l1loss = 0.08851830925406046, train acc = 0.9984894259818731,\n",
- "val_loss = 3.7224400486623823, val_acc = 0.6007751937984496\n",
- "\n",
- "epoch: 50\n",
- "epoch 50: train loss = 0.019483359864456175, l1loss = 0.08783999447826173, train acc = 0.998992950654582,\n",
- "val_loss = 3.5347149741742037, val_acc = 0.6201550387596899\n",
- "\n",
- "epoch: 51\n",
- "epoch 51: train loss = 0.01563782924795589, l1loss = 0.08718703778728858, train acc = 0.998992950654582,\n",
- "val_loss = 3.792635572049045, val_acc = 0.6085271317829457\n",
- "\n",
- "epoch: 52\n",
- "epoch 52: train loss = 0.015153491843307967, l1loss = 0.0865459504189328, train acc = 0.999496475327291,\n",
- "val_loss = 3.765355708987214, val_acc = 0.6162790697674418\n",
- "\n",
- "epoch: 53\n",
- "epoch 53: train loss = 0.014703800348416797, l1loss = 0.08596928344152245, train acc = 1.0,\n",
- "val_loss = 3.4551806544610697, val_acc = 0.6085271317829457\n",
- "\n",
- "!!! overfitted !!!\n",
- "[1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0]\n",
- "[1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 1]\n",
- "early stoping results:\n",
- "\t [0.6041666666666666, 0.5572916666666666]\n",
- "output = [1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
- "label = tensor([0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1,\n",
- " 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0,\n",
- " 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0,\n",
- " 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0,\n",
- " 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1,\n",
- " 0, 0, 1, 1, 1, 0, 1, 1])\n",
- "output = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
- "label = tensor([1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1,\n",
- " 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0,\n",
- " 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1,\n",
- " 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1,\n",
- " 0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0,\n",
- " 0, 0, 1, 1, 0, 0, 1, 1])\n",
- "output = [0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
- "label = tensor([0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1,\n",
- " 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0,\n",
- " 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1,\n",
- " 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1,\n",
- " 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1,\n",
- " 1, 1, 1, 0, 1, 1, 1, 1])\n",
- "output = [1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0]\n",
- "label = tensor([1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0,\n",
- " 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0,\n",
- " 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1,\n",
- " 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0,\n",
- " 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0,\n",
- " 1, 0, 0, 0, 1, 1, 0, 0])\n",
- "output = [1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
- "label = tensor([0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0,\n",
- " 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1,\n",
- " 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1,\n",
- " 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1,\n",
- " 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0,\n",
- " 1, 0, 1, 1, 0, 1, 0, 1])\n",
- "output = [1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
- "label = tensor([1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1,\n",
- " 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0,\n",
- " 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1,\n",
- " 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0,\n",
- " 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0,\n",
- " 0, 0, 1, 1, 1, 1, 0, 0])\n",
- "output = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
- "label = tensor([1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1,\n",
- " 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0,\n",
- " 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0,\n",
- " 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1,\n",
- " 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1,\n",
- " 1, 0, 0, 0, 0, 1, 1, 1])\n",
- "output = [1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
- "label = tensor([0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1,\n",
- " 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0,\n",
- " 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0,\n",
- " 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0,\n",
- " 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1,\n",
- " 1, 1, 1, 0, 1, 1, 1, 1])\n",
- "output = [1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
- "label = tensor([1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0,\n",
- " 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0,\n",
- " 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1,\n",
- " 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1,\n",
- " 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0,\n",
- " 0, 0, 1, 1, 0, 1, 0, 1])\n",
- "output = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
- "label = tensor([1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1,\n",
- " 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1,\n",
- " 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1,\n",
- " 0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1,\n",
- " 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0,\n",
- " 0, 1, 0, 1, 1, 1, 1, 0])\n",
- "output = [1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0]\n",
- "label = tensor([1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0,\n",
- " 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0,\n",
- " 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1,\n",
- " 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0,\n",
- " 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n",
- " 0, 0, 1, 1, 1, 0, 0, 0])\n",
- "output = [1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
- "label = tensor([0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1,\n",
- " 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1,\n",
- " 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0,\n",
- " 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0,\n",
- " 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0,\n",
- " 0, 0, 1, 1, 1, 1, 1, 0])\n",
- "output = [0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0]\n",
- "label = tensor([0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1,\n",
- " 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0,\n",
- " 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1,\n",
- " 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1,\n",
- " 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0,\n",
- " 1, 0, 1, 1, 1, 1, 0, 0])\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "output = [0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0]\n",
- "label = tensor([0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0,\n",
- " 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0,\n",
- " 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0,\n",
- " 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1,\n",
- " 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0,\n",
- " 1, 1, 1, 1, 0, 1, 1, 0])\n",
- "output = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
- "label = tensor([0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1,\n",
- " 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1,\n",
- " 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1,\n",
- " 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0,\n",
- " 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1,\n",
- " 0, 1, 0, 1, 1, 1, 0, 1])\n",
- "output = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0]\n",
- "label = tensor([1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1,\n",
- " 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0,\n",
- " 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1])\n",
- "\t [0.6673376950176145, 0.6722054380664653]\n",
- "[1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0]\n",
- "[0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 0, 1]\n",
- "full train results:\n",
- "\t [0.59375, 0.5885416666666666]\n",
- "\t [0.9949672873678913, 0.9974823766364552]\n",
- "[1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1]\n",
- "[1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1]\n",
- "best accs results:\n",
- "\t [0.59375, 0.5625]\n",
- "\t [0.8037242073477604, 0.8670694864048338]\n",
- "[0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1]\n",
- "-----------------------------Fold 3---------------\n",
- "preparing dataloaders...\n",
- "torch.Size([66, 7, 9, 20])\n",
- "coef when 0 > 1 1\n",
- "creating model...\n",
- "calculating total steps...\n",
- "epoch: 1\n",
- "validation loss decreased (inf ---> 0.694551), val_acc = 0.3643410852713178\n",
- "validation acc increased (0.000000 ---> 0.364341)\n",
- "validation acc increased (0.364341 ---> 0.364341)\n",
- "epoch 1: train loss = 0.6969524957183608, l1loss = 0.1381219726307948, train acc = 0.5654135338345865,\n",
- "val_loss = 0.7013817683670872, val_acc = 0.3643410852713178\n",
- "\n",
- "epoch: 2\n",
- "validation acc increased (0.364341 ---> 0.364341)\n",
- "validation acc increased (0.364341 ---> 0.364341)\n",
- "epoch 2: train loss = 0.6680379967940481, l1loss = 0.1377297131937548, train acc = 0.6476190476190476,\n",
- "val_loss = 0.7116472231325253, val_acc = 0.3643410852713178\n",
- "\n",
- "epoch: 3\n",
- "validation acc increased (0.364341 ---> 0.364341)\n",
- "validation acc increased (0.364341 ---> 0.395349)\n",
- "epoch 3: train loss = 0.6447332068194721, l1loss = 0.1372586890644298, train acc = 0.656641604010025,\n",
- "val_loss = 0.7110485115716624, val_acc = 0.42248062015503873\n",
- "\n",
- "epoch: 4\n",
- "validation acc increased (0.395349 ---> 0.426357)\n",
- "validation acc increased (0.426357 ---> 0.480620)\n",
- "epoch 4: train loss = 0.6260023004130313, l1loss = 0.1366433581687454, train acc = 0.6551378446115288,\n",
- "val_loss = 0.6881170402201571, val_acc = 0.4883720930232558\n",
- "\n",
- "epoch: 5\n",
- "validation loss decreased (0.694551 ---> 0.686318), val_acc = 0.49224806201550386\n",
- "validation acc increased (0.480620 ---> 0.492248)\n",
- "validation loss decreased (0.686318 ---> 0.665753), val_acc = 0.49612403100775193\n",
- "validation acc increased (0.492248 ---> 0.496124)\n",
- "epoch 5: train loss = 0.6107026895783599, l1loss = 0.13582232229990468, train acc = 0.6636591478696742,\n",
- "val_loss = 0.6599389855251756, val_acc = 0.5\n",
- "\n",
- "epoch: 6\n",
- "validation loss decreased (0.665753 ---> 0.659603), val_acc = 0.49612403100775193\n",
- "validation acc increased (0.496124 ---> 0.496124)\n",
- "validation loss decreased (0.659603 ---> 0.657801), val_acc = 0.49612403100775193\n",
- "validation acc increased (0.496124 ---> 0.496124)\n",
- "epoch 6: train loss = 0.5962554595524208, l1loss = 0.13474145999229642, train acc = 0.6731829573934837,\n",
- "val_loss = 0.6608283524365388, val_acc = 0.4883720930232558\n",
- "\n",
- "epoch: 7\n",
- "epoch 7: train loss = 0.5844666071702962, l1loss = 0.13333625524563897, train acc = 0.6807017543859649,\n",
- "val_loss = 0.6693935172502384, val_acc = 0.5\n",
- "\n",
- "epoch: 8\n",
- "validation acc increased (0.496124 ---> 0.500000)\n",
- "validation acc increased (0.500000 ---> 0.500000)\n",
- "epoch 8: train loss = 0.5756398552342465, l1loss = 0.13154963165297545, train acc = 0.6832080200501253,\n",
- "val_loss = 0.6796527236931084, val_acc = 0.49612403100775193\n",
- "\n",
- "epoch: 9\n",
- "epoch 9: train loss = 0.5669913336149135, l1loss = 0.12937707920421035, train acc = 0.6907268170426065,\n",
- "val_loss = 0.691935848820117, val_acc = 0.49612403100775193\n",
- "\n",
- "epoch: 10\n",
- "epoch 10: train loss = 0.5586488504756364, l1loss = 0.1268213682380834, train acc = 0.6952380952380952,\n",
- "val_loss = 0.7135535478591919, val_acc = 0.49612403100775193\n",
- "\n",
- "epoch: 11\n",
- "validation acc increased (0.500000 ---> 0.500000)\n",
- "epoch 11: train loss = 0.547169718527256, l1loss = 0.12398374975474556, train acc = 0.7007518796992481,\n",
- "val_loss = 0.762653996778089, val_acc = 0.5\n",
- "\n",
- "epoch: 12\n",
- "validation acc increased (0.500000 ---> 0.500000)\n",
- "epoch 12: train loss = 0.5339804249001028, l1loss = 0.12088241919613721, train acc = 0.7132832080200501,\n",
- "val_loss = 0.748571188874947, val_acc = 0.49224806201550386\n",
- "\n",
- "epoch: 13\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "epoch 13: train loss = 0.520279674302964, l1loss = 0.11765421265572833, train acc = 0.7127819548872181,\n",
- "val_loss = 0.8685980189678281, val_acc = 0.49224806201550386\n",
- "\n",
- "epoch: 14\n",
- "validation acc increased (0.500000 ---> 0.500000)\n",
- "epoch 14: train loss = 0.5055005631948772, l1loss = 0.11437528439631738, train acc = 0.7298245614035088,\n",
- "val_loss = 1.0051480910574744, val_acc = 0.5\n",
- "\n",
- "epoch: 15\n",
- "validation acc increased (0.500000 ---> 0.500000)\n",
- "validation acc increased (0.500000 ---> 0.500000)\n",
- "epoch 15: train loss = 0.48474925627983306, l1loss = 0.11120888095601161, train acc = 0.7518796992481203,\n",
- "val_loss = 0.7568337228871131, val_acc = 0.5155038759689923\n",
- "\n",
- "epoch: 16\n",
- "validation acc increased (0.500000 ---> 0.500000)\n",
- "validation acc increased (0.500000 ---> 0.635659)\n",
- "epoch 16: train loss = 0.47231552145236116, l1loss = 0.10832016670091409, train acc = 0.7543859649122807,\n",
- "val_loss = 0.8228741385215936, val_acc = 0.5\n",
- "\n",
- "epoch: 17\n",
- "epoch 17: train loss = 0.4447837758333163, l1loss = 0.10566314655661882, train acc = 0.7729323308270677,\n",
- "val_loss = 0.7647813073424405, val_acc = 0.5\n",
- "\n",
- "epoch: 18\n",
- "epoch 18: train loss = 0.4166216857750015, l1loss = 0.10333552387423982, train acc = 0.793984962406015,\n",
- "val_loss = 1.1814187527164932, val_acc = 0.49612403100775193\n",
- "\n",
- "epoch: 19\n",
- "epoch 19: train loss = 0.4013006369571638, l1loss = 0.1015192489128065, train acc = 0.7984962406015037,\n",
- "val_loss = 1.049088865287544, val_acc = 0.5077519379844961\n",
- "\n",
- "epoch: 20\n",
- "epoch 20: train loss = 0.38358660407532424, l1loss = 0.10005261027872712, train acc = 0.8030075187969925,\n",
- "val_loss = 0.9182334223458933, val_acc = 0.6085271317829457\n",
- "\n",
- "epoch: 21\n",
- "epoch 21: train loss = 0.3649053702826488, l1loss = 0.09899659350626451, train acc = 0.8180451127819549,\n",
- "val_loss = 1.0533229842666507, val_acc = 0.5232558139534884\n",
- "\n",
- "epoch: 22\n",
- "epoch 22: train loss = 0.3366708062645188, l1loss = 0.09801618972219023, train acc = 0.837593984962406,\n",
- "val_loss = 1.0070983062880907, val_acc = 0.5542635658914729\n",
- "\n",
- "epoch: 23\n",
- "epoch 23: train loss = 0.30011755532788154, l1loss = 0.097101370052885, train acc = 0.8736842105263158,\n",
- "val_loss = 1.9908723738766456, val_acc = 0.5\n",
- "\n",
- "epoch: 24\n",
- "epoch 24: train loss = 0.2893361300005949, l1loss = 0.09641273515789132, train acc = 0.8671679197994987,\n",
- "val_loss = 4.135273530501728, val_acc = 0.46124031007751937\n",
- "\n",
- "epoch: 25\n",
- "epoch 25: train loss = 0.2674656217408957, l1loss = 0.09561551359288376, train acc = 0.87468671679198,\n",
- "val_loss = 3.555174014365026, val_acc = 0.4689922480620155\n",
- "\n",
- "epoch: 26\n",
- "epoch 26: train loss = 0.25737038730529316, l1loss = 0.09514911164690677, train acc = 0.8927318295739348,\n",
- "val_loss = 1.4680318331071573, val_acc = 0.6007751937984496\n",
- "\n",
- "epoch: 27\n",
- "epoch 27: train loss = 0.19562739423641884, l1loss = 0.09470970924560887, train acc = 0.9263157894736842,\n",
- "val_loss = 1.4795843288879986, val_acc = 0.5697674418604651\n",
- "\n",
- "epoch: 28\n",
- "epoch 28: train loss = 0.21110729103100329, l1loss = 0.09441207906506713, train acc = 0.9132832080200501,\n",
- "val_loss = 2.8698120837987857, val_acc = 0.5465116279069767\n",
- "\n",
- "epoch: 29\n",
- "epoch 29: train loss = 0.2995036028082807, l1loss = 0.09422946139087056, train acc = 0.8756892230576441,\n",
- "val_loss = 1.910594179854519, val_acc = 0.562015503875969\n",
- "\n",
- "epoch: 30\n",
- "epoch 30: train loss = 0.29151671891821956, l1loss = 0.09452762886097557, train acc = 0.8696741854636592,\n",
- "val_loss = 2.842216599819272, val_acc = 0.5193798449612403\n",
- "\n",
- "epoch: 31\n",
- "epoch 31: train loss = 0.24369466431756368, l1loss = 0.09408135806632818, train acc = 0.8897243107769424,\n",
- "val_loss = 1.2342111413302117, val_acc = 0.5775193798449613\n",
- "\n",
- "epoch: 32\n",
- "epoch 32: train loss = 0.177748836042887, l1loss = 0.09305703179131175, train acc = 0.9273182957393483,\n",
- "val_loss = 1.935332564247209, val_acc = 0.5155038759689923\n",
- "\n",
- "epoch: 33\n",
- "epoch 33: train loss = 0.1304508766285459, l1loss = 0.09235037882674607, train acc = 0.9604010025062657,\n",
- "val_loss = 1.6885591052299322, val_acc = 0.5581395348837209\n",
- "\n",
- "epoch: 34\n",
- "epoch 34: train loss = 0.1040880726020139, l1loss = 0.09180786986846971, train acc = 0.968421052631579,\n",
- "val_loss = 1.8644648585901704, val_acc = 0.5581395348837209\n",
- "\n",
- "epoch: 35\n",
- "epoch 35: train loss = 0.11854673838406278, l1loss = 0.09142765101036034, train acc = 0.9508771929824561,\n",
- "val_loss = 4.638857999517503, val_acc = 0.49612403100775193\n",
- "\n",
- "epoch: 36\n",
- "epoch 36: train loss = 0.10307383279789958, l1loss = 0.09096615721557971, train acc = 0.9664160401002506,\n",
- "val_loss = 2.192981764212135, val_acc = 0.5232558139534884\n",
- "\n",
- "epoch: 37\n",
- "epoch 37: train loss = 0.09253215106731669, l1loss = 0.09045285032805345, train acc = 0.9654135338345865,\n",
- "val_loss = 1.9693481386169907, val_acc = 0.5736434108527132\n",
- "\n",
- "epoch: 38\n",
- "epoch 38: train loss = 0.08359189911519077, l1loss = 0.09000530017721922, train acc = 0.9764411027568922,\n",
- "val_loss = 2.151853931042575, val_acc = 0.5426356589147286\n",
- "\n",
- "epoch: 39\n",
- "epoch 39: train loss = 0.06199482042613185, l1loss = 0.08960390809484592, train acc = 0.9824561403508771,\n",
- "val_loss = 2.2035991822102274, val_acc = 0.5891472868217055\n",
- "\n",
- "epoch: 40\n",
- "epoch 40: train loss = 0.07736399561951035, l1loss = 0.08935502176744896, train acc = 0.9709273182957393,\n",
- "val_loss = 2.0890348573872286, val_acc = 0.5736434108527132\n",
- "\n",
- "epoch: 41\n",
- "epoch 41: train loss = 0.07827905084106856, l1loss = 0.08891892515701758, train acc = 0.9724310776942355,\n",
- "val_loss = 4.512386254561964, val_acc = 0.5038759689922481\n",
- "\n",
- "epoch: 42\n",
- "epoch 42: train loss = 0.046724244664635574, l1loss = 0.08847408577762451, train acc = 0.9899749373433584,\n",
- "val_loss = 2.2274649688439774, val_acc = 0.5658914728682171\n",
- "\n",
- "epoch: 43\n",
- "epoch 43: train loss = 0.04698878815747742, l1loss = 0.08795604082874786, train acc = 0.9904761904761905,\n",
- "val_loss = 3.3725712779880492, val_acc = 0.6085271317829457\n",
- "\n",
- "epoch: 44\n",
- "epoch 44: train loss = 0.044069078596388186, l1loss = 0.08755256260621518, train acc = 0.9884711779448622,\n",
- "val_loss = 2.6858827642692154, val_acc = 0.6085271317829457\n",
- "\n",
- "epoch: 45\n",
- "epoch 45: train loss = 0.03322243625134753, l1loss = 0.08706357124679369, train acc = 0.993483709273183,\n",
- "val_loss = 3.174153364906015, val_acc = 0.5232558139534884\n",
- "\n",
- "epoch: 46\n",
- "epoch 46: train loss = 0.0319087140971706, l1loss = 0.08656990248383137, train acc = 0.9949874686716792,\n",
- "val_loss = 3.085727780595306, val_acc = 0.6007751937984496\n",
- "\n",
- "epoch: 47\n",
- "epoch 47: train loss = 0.025276731471892886, l1loss = 0.0862220882547828, train acc = 0.9979949874686717,\n",
- "val_loss = 2.4670777454856756, val_acc = 0.562015503875969\n",
- "\n",
- "epoch: 48\n",
- "epoch 48: train loss = 0.03616303299976172, l1loss = 0.08582206025234142, train acc = 0.9909774436090225,\n",
- "val_loss = 2.59104063915623, val_acc = 0.5891472868217055\n",
- "\n",
- "epoch: 49\n",
- "epoch 49: train loss = 0.022464091186983545, l1loss = 0.08538708544165867, train acc = 0.9984962406015038,\n",
- "val_loss = 2.7770460093206215, val_acc = 0.5930232558139535\n",
- "\n",
- "epoch: 50\n",
- "epoch 50: train loss = 0.01979252443389785, l1loss = 0.08483099960593651, train acc = 0.9984962406015038,\n",
- "val_loss = 2.9248241084490636, val_acc = 0.5387596899224806\n",
- "\n",
- "epoch: 51\n",
- "epoch 51: train loss = 0.015581658314493365, l1loss = 0.08423851138368287, train acc = 0.9994987468671679,\n",
- "val_loss = 2.912791069625884, val_acc = 0.5387596899224806\n",
- "\n",
- "epoch: 52\n",
- "epoch 52: train loss = 0.014949842383688254, l1loss = 0.08364893044146679, train acc = 1.0,\n",
- "val_loss = 2.928498700261116, val_acc = 0.5891472868217055\n",
- "\n",
- "!!! overfitted !!!\n",
- "[1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1]\n",
- "[0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1]\n",
- "early stoping results:\n",
- "\t [0.6041666666666666, 0.5572916666666666, 0.5208333333333334]\n",
- "output = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0]\n",
- "label = tensor([0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0,\n",
- " 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0,\n",
- " 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1,\n",
- " 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0,\n",
- " 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1,\n",
- " 1, 1, 0, 1, 1, 0, 0, 0])\n",
- "output = [1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
- "label = tensor([1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1,\n",
- " 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0,\n",
- " 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0,\n",
- " 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1,\n",
- " 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0,\n",
- " 1, 1, 0, 1, 1, 1, 1, 0])\n",
- "output = [1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
- "label = tensor([0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1,\n",
- " 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1,\n",
- " 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1,\n",
- " 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1,\n",
- " 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0,\n",
- " 1, 1, 1, 1, 1, 1, 0, 1])\n",
- "output = [1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0]\n",
- "label = tensor([0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1,\n",
- " 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1,\n",
- " 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1,\n",
- " 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0,\n",
- " 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0,\n",
- " 0, 1, 1, 1, 0, 1, 1, 0])\n",
- "output = [1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
- "label = tensor([1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1,\n",
- " 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1,\n",
- " 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1,\n",
- " 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0,\n",
- " 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0,\n",
- " 0, 1, 0, 1, 1, 1, 0, 1])\n",
- "output = [0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0]\n",
- "label = tensor([0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 1,\n",
- " 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1,\n",
- " 1, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0,\n",
- " 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0,\n",
- " 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0,\n",
- " 0, 1, 1, 0, 1, 1, 1, 0])\n",
- "output = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0]\n",
- "label = tensor([0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1,\n",
- " 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0,\n",
- " 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0,\n",
- " 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0,\n",
- " 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0,\n",
- " 0, 0, 0, 1, 0, 1, 0, 1])\n",
- "output = [0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
- "label = tensor([0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1,\n",
- " 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1,\n",
- " 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1,\n",
- " 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n",
- " 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1,\n",
- " 0, 0, 0, 0, 1, 1, 1, 0])\n",
- "output = [1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0]\n",
- "label = tensor([0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0,\n",
- " 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1,\n",
- " 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1,\n",
- " 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0,\n",
- " 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0,\n",
- " 0, 1, 0, 0, 0, 0, 1, 1])\n",
- "output = [0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0]\n",
- "label = tensor([0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1,\n",
- " 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0,\n",
- " 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1,\n",
- " 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0,\n",
- " 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1,\n",
- " 0, 1, 0, 0, 0, 0, 0, 1])\n",
- "output = [1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
- "label = tensor([1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1,\n",
- " 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1,\n",
- " 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0,\n",
- " 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1,\n",
- " 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0,\n",
- " 0, 0, 0, 1, 0, 1, 1, 0])\n",
- "output = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0]\n",
- "label = tensor([0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1,\n",
- " 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1,\n",
- " 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1,\n",
- " 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1,\n",
- " 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0,\n",
- " 0, 0, 1, 0, 0, 1, 0, 1])\n",
- "output = [1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0]\n",
- "label = tensor([1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0,\n",
- " 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1,\n",
- " 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0,\n",
- " 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0,\n",
- " 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0,\n",
- " 1, 1, 0, 0, 0, 1, 0, 0])\n",
- "output = [1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0]\n",
- "label = tensor([0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0,\n",
- " 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1,\n",
- " 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1,\n",
- " 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1,\n",
- " 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0,\n",
- " 1, 0, 0, 0, 1, 1, 1, 0])\n",
- "output = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
- "label = tensor([0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1,\n",
- " 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1,\n",
- " 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0,\n",
- " 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0,\n",
- " 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1,\n",
- " 1, 1, 0, 1, 1, 1, 1, 0])\n",
- "output = [1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0]\n",
- "label = tensor([1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1,\n",
- " 1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1,\n",
- " 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1,\n",
- " 0, 1, 0])\n",
- "\t [0.6673376950176145, 0.6722054380664653, 0.6802005012531328]\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "[1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0]\n",
- "[1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0]\n",
- "full train results:\n",
- "\t [0.59375, 0.5885416666666666, 0.6354166666666666]\n",
- "\t [0.9949672873678913, 0.9974823766364552, 0.8666666666666667]\n",
- "[0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1]\n",
- "[1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1]\n",
- "best accs results:\n",
- "\t [0.59375, 0.5625, 0.6354166666666666]\n",
- "\t [0.8037242073477604, 0.8670694864048338, 0.4967418546365915]\n",
- "[1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1]\n",
- "-----------------------------Fold 4---------------\n",
- "preparing dataloaders...\n",
- "torch.Size([75, 7, 9, 20])\n",
- "coef when 0 > 1 1\n",
- "creating model...\n",
- "calculating total steps...\n",
- "epoch: 1\n",
- "validation loss decreased (inf ---> 0.683370), val_acc = 0.6666666666666666\n",
- "validation acc increased (0.000000 ---> 0.666667)\n",
- "epoch 1: train loss = 0.6846315076727115, l1loss = 0.13866792857171056, train acc = 0.5518962075848304,\n",
- "val_loss = 0.7129394467486891, val_acc = 0.3333333333333333\n",
- "\n",
- "epoch: 2\n",
- "epoch 2: train loss = 0.6627097059628683, l1loss = 0.13824024070760685, train acc = 0.6147704590818364,\n",
- "val_loss = 0.7328977205956629, val_acc = 0.34108527131782945\n",
- "\n",
- "epoch: 3\n",
- "epoch 3: train loss = 0.6461570380690569, l1loss = 0.13772706633913304, train acc = 0.6382235528942116,\n",
- "val_loss = 0.7231047513873078, val_acc = 0.46124031007751937\n",
- "\n",
- "epoch: 4\n",
- "epoch 4: train loss = 0.6311211969086272, l1loss = 0.13707065026084345, train acc = 0.6482035928143712,\n",
- "val_loss = 0.6812371556148973, val_acc = 0.5\n",
- "\n",
- "epoch: 5\n",
- "validation loss decreased (0.683370 ---> 0.677676), val_acc = 0.5116279069767442\n",
- "validation loss decreased (0.677676 ---> 0.642738), val_acc = 0.5271317829457365\n",
- "epoch 5: train loss = 0.6152418267703104, l1loss = 0.1361936211526513, train acc = 0.6596806387225549,\n",
- "val_loss = 0.6299803982409395, val_acc = 0.5348837209302325\n",
- "\n",
- "epoch: 6\n",
- "validation loss decreased (0.642738 ---> 0.628161), val_acc = 0.5310077519379846\n",
- "validation loss decreased (0.628161 ---> 0.618165), val_acc = 0.5271317829457365\n",
- "epoch 6: train loss = 0.6014872402726057, l1loss = 0.1350108287529555, train acc = 0.6641716566866267,\n",
- "val_loss = 0.6179831277492435, val_acc = 0.5271317829457365\n",
- "\n",
- "epoch: 7\n",
- "validation loss decreased (0.618165 ---> 0.617216), val_acc = 0.5271317829457365\n",
- "validation loss decreased (0.617216 ---> 0.614287), val_acc = 0.5193798449612403\n",
- "epoch 7: train loss = 0.5905261646487756, l1loss = 0.1334880442319516, train acc = 0.6711576846307385,\n",
- "val_loss = 0.6132445769716602, val_acc = 0.5193798449612403\n",
- "\n",
- "epoch: 8\n",
- "validation loss decreased (0.614287 ---> 0.612300), val_acc = 0.5193798449612403\n",
- "validation loss decreased (0.612300 ---> 0.611189), val_acc = 0.5271317829457365\n",
- "epoch 8: train loss = 0.5812796850404339, l1loss = 0.13158048356245616, train acc = 0.6781437125748503,\n",
- "val_loss = 0.6176805500836335, val_acc = 0.5310077519379846\n",
- "\n",
- "epoch: 9\n",
- "epoch 9: train loss = 0.5716513974699907, l1loss = 0.12926983946335768, train acc = 0.687125748502994,\n",
- "val_loss = 0.6314459124276804, val_acc = 0.5310077519379846\n",
- "\n",
- "epoch: 10\n",
- "epoch 10: train loss = 0.5630688287778767, l1loss = 0.12655608326256157, train acc = 0.6976047904191617,\n",
- "val_loss = 0.6434678920479708, val_acc = 0.5310077519379846\n",
- "\n",
- "epoch: 11\n",
- "epoch 11: train loss = 0.5509181600844789, l1loss = 0.12356667217618215, train acc = 0.7065868263473054,\n",
- "val_loss = 0.6670152029787847, val_acc = 0.5271317829457365\n",
- "\n",
- "epoch: 12\n",
- "epoch 12: train loss = 0.5384525743549218, l1loss = 0.12038559576648913, train acc = 0.7180638722554891,\n",
- "val_loss = 0.7214980153150337, val_acc = 0.5387596899224806\n",
- "\n",
- "epoch: 13\n",
- "epoch 13: train loss = 0.5237833831957477, l1loss = 0.11706482975961205, train acc = 0.7250499001996008,\n",
- "val_loss = 0.642303595550416, val_acc = 0.5348837209302325\n",
- "\n",
- "epoch: 14\n",
- "epoch 14: train loss = 0.49995520455156733, l1loss = 0.11376702059766727, train acc = 0.7380239520958084,\n",
- "val_loss = 0.7001118160957513, val_acc = 0.5387596899224806\n",
- "\n",
- "epoch: 15\n",
- "epoch 15: train loss = 0.4798619277225045, l1loss = 0.11074621205082434, train acc = 0.751996007984032,\n",
- "val_loss = 0.6432332724563835, val_acc = 0.5465116279069767\n",
- "\n",
- "epoch: 16\n",
- "epoch 16: train loss = 0.450496327377365, l1loss = 0.10813732176067349, train acc = 0.7639720558882236,\n",
- "val_loss = 0.6254766467929811, val_acc = 0.5465116279069767\n",
- "\n",
- "epoch: 17\n",
- "epoch 17: train loss = 0.42610586504498404, l1loss = 0.1061172228076025, train acc = 0.7869261477045908,\n",
- "val_loss = 0.8937256091324858, val_acc = 0.6666666666666666\n",
- "\n",
- "epoch: 18\n",
- "validation acc increased (0.666667 ---> 0.666667)\n",
- "validation acc increased (0.666667 ---> 0.666667)\n",
- "epoch 18: train loss = 0.4125394836633267, l1loss = 0.10447883528625655, train acc = 0.7999001996007984,\n",
- "val_loss = 1.3210663388865862, val_acc = 0.6666666666666666\n",
- "\n",
- "epoch: 19\n",
- "validation acc increased (0.666667 ---> 0.666667)\n",
- "epoch 19: train loss = 0.36414110577273034, l1loss = 0.10303020968051728, train acc = 0.8313373253493014,\n",
- "val_loss = 1.6127898642006366, val_acc = 0.6666666666666666\n",
- "\n",
- "epoch: 20\n",
- "validation acc increased (0.666667 ---> 0.666667)\n",
- "validation acc increased (0.666667 ---> 0.666667)\n",
- "epoch 20: train loss = 0.3496626854061842, l1loss = 0.10169063459434909, train acc = 0.8383233532934131,\n",
- "val_loss = 1.0074581005141254, val_acc = 0.6550387596899225\n",
- "\n",
- "epoch: 21\n",
- "epoch 21: train loss = 0.32243358922337817, l1loss = 0.10068233941724439, train acc = 0.8502994011976048,\n",
- "val_loss = 0.9281984200311262, val_acc = 0.5503875968992248\n",
- "\n",
- "epoch: 22\n",
- "epoch 22: train loss = 0.31491396836415975, l1loss = 0.09971560896216276, train acc = 0.8507984031936128,\n",
- "val_loss = 1.5095452051754146, val_acc = 0.6589147286821705\n",
- "\n",
- "epoch: 23\n",
- "epoch 23: train loss = 0.29515863696258227, l1loss = 0.09860645926105761, train acc = 0.874251497005988,\n",
- "val_loss = 0.887832756074824, val_acc = 0.5775193798449613\n",
- "\n",
- "epoch: 24\n",
- "epoch 24: train loss = 0.24699413461123637, l1loss = 0.09801728431574123, train acc = 0.8917165668662674,\n",
- "val_loss = 1.674672379974247, val_acc = 0.6550387596899225\n",
- "\n",
- "epoch: 25\n",
- "epoch 25: train loss = 0.255139184093285, l1loss = 0.09737496454261259, train acc = 0.8782435129740519,\n",
- "val_loss = 0.9508669986281302, val_acc = 0.5968992248062015\n",
- "\n",
- "epoch: 26\n",
- "epoch 26: train loss = 0.21437893561022486, l1loss = 0.09652904412168228, train acc = 0.9131736526946108,\n",
- "val_loss = 2.068134845689286, val_acc = 0.6511627906976745\n",
- "\n",
- "epoch: 27\n",
- "validation acc increased (0.666667 ---> 0.666667)\n",
- "epoch 27: train loss = 0.23168334174536898, l1loss = 0.09616398560013362, train acc = 0.905688622754491,\n",
- "val_loss = 1.2540726026361302, val_acc = 0.5697674418604651\n",
- "\n",
- "epoch: 28\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "epoch 28: train loss = 0.19921719834595145, l1loss = 0.09572086222990306, train acc = 0.9171656686626747,\n",
- "val_loss = 1.3161783601886543, val_acc = 0.5697674418604651\n",
- "\n",
- "epoch: 29\n",
- "epoch 29: train loss = 0.17246595051831115, l1loss = 0.09554041276672881, train acc = 0.937624750499002,\n",
- "val_loss = 3.398609378820582, val_acc = 0.6589147286821705\n",
- "\n",
- "epoch: 30\n",
- "epoch 30: train loss = 0.19262273220007053, l1loss = 0.09535936463973717, train acc = 0.9166666666666666,\n",
- "val_loss = 3.561779273572818, val_acc = 0.6550387596899225\n",
- "\n",
- "epoch: 31\n",
- "epoch 31: train loss = 0.13978799877171508, l1loss = 0.09458633950965371, train acc = 0.9545908183632734,\n",
- "val_loss = 3.981414251124248, val_acc = 0.6511627906976745\n",
- "\n",
- "epoch: 32\n",
- "epoch 32: train loss = 0.13305879046698055, l1loss = 0.09435142839918594, train acc = 0.9491017964071856,\n",
- "val_loss = 1.737026299617087, val_acc = 0.5852713178294574\n",
- "\n",
- "epoch: 33\n",
- "epoch 33: train loss = 0.15574632034806196, l1loss = 0.09403544885967068, train acc = 0.9406187624750499,\n",
- "val_loss = 3.2701686964940735, val_acc = 0.6434108527131783\n",
- "\n",
- "epoch: 34\n",
- "epoch 34: train loss = 0.12506161606359387, l1loss = 0.09349807686434534, train acc = 0.9545908183632734,\n",
- "val_loss = 1.6769787791336692, val_acc = 0.5542635658914729\n",
- "\n",
- "epoch: 35\n",
- "epoch 35: train loss = 0.0993102289914728, l1loss = 0.0933545645988154, train acc = 0.9715568862275449,\n",
- "val_loss = 2.9960278762403383, val_acc = 0.5658914728682171\n",
- "\n",
- "epoch: 36\n",
- "epoch 36: train loss = 0.1006979171684878, l1loss = 0.09339527790239471, train acc = 0.9645708582834331,\n",
- "val_loss = 3.6225777300753337, val_acc = 0.6589147286821705\n",
- "\n",
- "epoch: 37\n",
- "validation acc increased (0.666667 ---> 0.666667)\n",
- "epoch 37: train loss = 0.20696525356012904, l1loss = 0.09307418661024756, train acc = 0.9436127744510978,\n",
- "val_loss = 3.3196238436440164, val_acc = 0.6550387596899225\n",
- "\n",
- "epoch: 38\n",
- "epoch 38: train loss = 0.16276606584261516, l1loss = 0.094022158347442, train acc = 0.9416167664670658,\n",
- "val_loss = 1.4645525244779365, val_acc = 0.562015503875969\n",
- "\n",
- "epoch: 39\n",
- "epoch 39: train loss = 0.13700719901961006, l1loss = 0.09361297870705465, train acc = 0.9500998003992016,\n",
- "val_loss = 1.8845138701126558, val_acc = 0.6162790697674418\n",
- "\n",
- "epoch: 40\n",
- "epoch 40: train loss = 0.10037804332500447, l1loss = 0.09339838315864761, train acc = 0.9655688622754491,\n",
- "val_loss = 2.1819226367529048, val_acc = 0.5736434108527132\n",
- "\n",
- "epoch: 41\n",
- "epoch 41: train loss = 0.06094860051295715, l1loss = 0.09271240844103153, train acc = 0.9860279441117764,\n",
- "val_loss = 2.0809006154999254, val_acc = 0.5775193798449613\n",
- "\n",
- "epoch: 42\n",
- "epoch 42: train loss = 0.058166408848143863, l1loss = 0.09190802524487178, train acc = 0.9840319361277445,\n",
- "val_loss = 3.20259105789569, val_acc = 0.6356589147286822\n",
- "\n",
- "epoch: 43\n",
- "epoch 43: train loss = 0.04276541528290141, l1loss = 0.09118846676009382, train acc = 0.9955089820359282,\n",
- "val_loss = 2.6667797657870507, val_acc = 0.5542635658914729\n",
- "\n",
- "epoch: 44\n",
- "epoch 44: train loss = 0.03647657026520033, l1loss = 0.09060218744351717, train acc = 0.9945109780439122,\n",
- "val_loss = 2.7449685954308327, val_acc = 0.5736434108527132\n",
- "\n",
- "epoch: 45\n",
- "epoch 45: train loss = 0.03098047074830223, l1loss = 0.09009029960144542, train acc = 0.9965069860279441,\n",
- "val_loss = 3.183011844176655, val_acc = 0.6356589147286822\n",
- "\n",
- "epoch: 46\n",
- "epoch 46: train loss = 0.031913511436469776, l1loss = 0.08947685778795364, train acc = 0.9945109780439122,\n",
- "val_loss = 3.353361038852108, val_acc = 0.5581395348837209\n",
- "\n",
- "epoch: 47\n",
- "epoch 47: train loss = 0.02452494901513863, l1loss = 0.08888289796378085, train acc = 0.9970059880239521,\n",
- "val_loss = 2.6891871090083157, val_acc = 0.5930232558139535\n",
- "\n",
- "epoch: 48\n",
- "epoch 48: train loss = 0.05274008282882249, l1loss = 0.08846809942565279, train acc = 0.9810379241516967,\n",
- "val_loss = 4.0638800591461415, val_acc = 0.5658914728682171\n",
- "\n",
- "epoch: 49\n",
- "epoch 49: train loss = 0.048649915469858696, l1loss = 0.08838037115906527, train acc = 0.9865269461077845,\n",
- "val_loss = 3.1181407630905627, val_acc = 0.5658914728682171\n",
- "\n",
- "epoch: 50\n",
- "epoch 50: train loss = 0.051724245299598175, l1loss = 0.08826220857942414, train acc = 0.9810379241516967,\n",
- "val_loss = 2.9276230390681777, val_acc = 0.562015503875969\n",
- "\n",
- "epoch: 51\n",
- "epoch 51: train loss = 0.03112137069304784, l1loss = 0.08755336800021325, train acc = 0.9915169660678643,\n",
- "val_loss = 3.8795810669891595, val_acc = 0.6744186046511628\n",
- "\n",
- "epoch: 52\n",
- "validation acc increased (0.666667 ---> 0.678295)\n",
- "epoch 52: train loss = 0.019947556626356527, l1loss = 0.08692338591266772, train acc = 0.998003992015968,\n",
- "val_loss = 2.994923972091529, val_acc = 0.5775193798449613\n",
- "\n",
- "epoch: 53\n",
- "epoch 53: train loss = 0.016029105731797312, l1loss = 0.08628454102489524, train acc = 1.0,\n",
- "val_loss = 3.339036170826402, val_acc = 0.5930232558139535\n",
- "\n",
- "!!! overfitted !!!\n",
- "[0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1]\n",
- "[0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1]\n",
- "early stoping results:\n",
- "\t [0.6041666666666666, 0.5572916666666666, 0.5208333333333334, 0.5078534031413613]\n",
- "output = [1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0]\n",
- "label = tensor([1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1,\n",
- " 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1,\n",
- " 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1,\n",
- " 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0,\n",
- " 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0,\n",
- " 0, 0, 0, 0, 1, 0, 0, 1])\n",
- "output = [1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
- "label = tensor([0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0,\n",
- " 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1,\n",
- " 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0,\n",
- " 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0,\n",
- " 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0,\n",
- " 0, 1, 0, 1, 1, 1, 1, 0])\n",
- "output = [0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0]\n",
- "label = tensor([0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0,\n",
- " 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1,\n",
- " 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1,\n",
- " 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0,\n",
- " 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0,\n",
- " 1, 0, 0, 1, 1, 0, 0, 1])\n",
- "output = [1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
- "label = tensor([0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1,\n",
- " 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1,\n",
- " 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1,\n",
- " 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0,\n",
- " 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0,\n",
- " 0, 0, 1, 1, 0, 0, 1, 0])\n",
- "output = [0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
- "label = tensor([1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0,\n",
- " 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1,\n",
- " 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 0,\n",
- " 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1,\n",
- " 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 0,\n",
- " 0, 1, 0, 1, 0, 1, 1, 1])\n",
- "output = [1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0]\n",
- "label = tensor([1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0,\n",
- " 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1,\n",
- " 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0,\n",
- " 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0,\n",
- " 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1,\n",
- " 1, 0, 0, 1, 0, 1, 1, 0])\n",
- "output = [1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
- "label = tensor([1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1,\n",
- " 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1,\n",
- " 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0,\n",
- " 0, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1,\n",
- " 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1,\n",
- " 0, 1, 1, 1, 1, 0, 0, 0])\n",
- "output = [1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
- "label = tensor([1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0,\n",
- " 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1,\n",
- " 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0,\n",
- " 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0,\n",
- " 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1,\n",
- " 1, 0, 1, 1, 0, 1, 0, 0])\n",
- "output = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
- "label = tensor([0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1,\n",
- " 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1,\n",
- " 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1,\n",
- " 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1,\n",
- " 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1,\n",
- " 1, 0, 1, 1, 1, 0, 1, 0])\n",
- "output = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0]\n",
- "label = tensor([1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1,\n",
- " 0, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0,\n",
- " 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1,\n",
- " 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0,\n",
- " 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0,\n",
- " 0, 1, 0, 0, 0, 1, 1, 1])\n",
- "output = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
- "label = tensor([0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0,\n",
- " 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1,\n",
- " 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0,\n",
- " 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0,\n",
- " 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1,\n",
- " 1, 1, 0, 1, 1, 0, 1, 1])\n",
- "output = [1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0]\n",
- "label = tensor([0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0,\n",
- " 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0,\n",
- " 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1,\n",
- " 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0,\n",
- " 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0,\n",
- " 1, 1, 0, 1, 0, 0, 0, 1])\n",
- "output = [1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0]\n",
- "label = tensor([1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0,\n",
- " 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1,\n",
- " 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0,\n",
- " 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1,\n",
- " 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0,\n",
- " 0, 0, 1, 1, 0, 0, 0, 0])\n",
- "output = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0]\n",
- "label = tensor([0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1,\n",
- " 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1,\n",
- " 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1,\n",
- " 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0,\n",
- " 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0,\n",
- " 1, 0, 1, 0, 1, 0, 1, 1])\n",
- "output = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0]\n",
- "label = tensor([1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0,\n",
- " 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1,\n",
- " 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0,\n",
- " 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0,\n",
- " 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0,\n",
- " 0, 0, 0, 0, 1, 0, 1, 1])\n",
- "output = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
- "label = tensor([1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0,\n",
- " 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1,\n",
- " 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0,\n",
- " 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1])\n",
- "\t [0.6673376950176145, 0.6722054380664653, 0.6802005012531328, 0.6826347305389222]\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "[0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1]\n",
- "[1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0]\n",
- "full train results:\n",
- "\t [0.59375, 0.5885416666666666, 0.6354166666666666, 0.612565445026178]\n",
- "\t [0.9949672873678913, 0.9974823766364552, 0.8666666666666667, 0.8393213572854291]\n",
- "[1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1]\n",
- "[1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1]\n",
- "best accs results:\n",
- "\t [0.59375, 0.5625, 0.6354166666666666, 0.6282722513089005]\n",
- "\t [0.8037242073477604, 0.8670694864048338, 0.4967418546365915, 0.5489021956087824]\n",
- "[0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1]\n",
- "-----------------------------Fold 5---------------\n",
- "preparing dataloaders...\n",
- "torch.Size([84, 7, 9, 20])\n",
- "coef when 0 > 1 1\n",
- "creating model...\n",
- "calculating total steps...\n",
- "epoch: 1\n",
- "validation loss decreased (inf ---> 0.711296), val_acc = 0.36046511627906974\n",
- "validation acc increased (0.000000 ---> 0.360465)\n",
- "validation acc increased (0.360465 ---> 0.360465)\n",
- "epoch 1: train loss = 0.6870066553995976, l1loss = 0.13839889542723394, train acc = 0.5608412618928392,\n",
- "val_loss = 0.7490144917207171, val_acc = 0.36046511627906974\n",
- "\n",
- "epoch: 2\n",
- "validation acc increased (0.360465 ---> 0.360465)\n",
- "validation acc increased (0.360465 ---> 0.360465)\n",
- "epoch 2: train loss = 0.6565808814110371, l1loss = 0.13801056421961136, train acc = 0.629444166249374,\n",
- "val_loss = 0.765150977659595, val_acc = 0.36046511627906974\n",
- "\n",
- "epoch: 3\n",
- "validation acc increased (0.360465 ---> 0.360465)\n",
- "validation acc increased (0.360465 ---> 0.375969)\n",
- "epoch 3: train loss = 0.6360432937793512, l1loss = 0.13752871652543694, train acc = 0.6459689534301453,\n",
- "val_loss = 0.7466173269027887, val_acc = 0.42248062015503873\n",
- "\n",
- "epoch: 4\n",
- "validation acc increased (0.375969 ---> 0.434109)\n",
- "validation acc increased (0.434109 ---> 0.492248)\n",
- "epoch 4: train loss = 0.622315076511385, l1loss = 0.13690243397256882, train acc = 0.6439659489233851,\n",
- "val_loss = 0.6967740377714468, val_acc = 0.5116279069767442\n",
- "\n",
- "epoch: 5\n",
- "validation loss decreased (0.711296 ---> 0.693885), val_acc = 0.5116279069767442\n",
- "validation acc increased (0.492248 ---> 0.511628)\n",
- "validation loss decreased (0.693885 ---> 0.664049), val_acc = 0.5116279069767442\n",
- "validation acc increased (0.511628 ---> 0.511628)\n",
- "epoch 5: train loss = 0.6108160970147753, l1loss = 0.13607064582702214, train acc = 0.6539809714571858,\n",
- "val_loss = 0.6510321973830231, val_acc = 0.5116279069767442\n",
- "\n",
- "epoch: 6\n",
- "validation loss decreased (0.664049 ---> 0.649357), val_acc = 0.5155038759689923\n",
- "validation acc increased (0.511628 ---> 0.515504)\n",
- "validation loss decreased (0.649357 ---> 0.643178), val_acc = 0.5271317829457365\n",
- "validation acc increased (0.515504 ---> 0.527132)\n",
- "epoch 6: train loss = 0.598267633048188, l1loss = 0.13498037214063083, train acc = 0.6675012518778167,\n",
- "val_loss = 0.6432933178982994, val_acc = 0.5271317829457365\n",
- "\n",
- "epoch: 7\n",
- "validation loss decreased (0.643178 ---> 0.642807), val_acc = 0.5310077519379846\n",
- "validation acc increased (0.527132 ---> 0.531008)\n",
- "validation loss decreased (0.642807 ---> 0.641224), val_acc = 0.5271317829457365\n",
- "epoch 7: train loss = 0.5881321117707234, l1loss = 0.13360911191375363, train acc = 0.671507260891337,\n",
- "val_loss = 0.6395671737286471, val_acc = 0.5310077519379846\n",
- "\n",
- "epoch: 8\n",
- "validation loss decreased (0.641224 ---> 0.639952), val_acc = 0.5310077519379846\n",
- "validation acc increased (0.531008 ---> 0.531008)\n",
- "validation loss decreased (0.639952 ---> 0.637798), val_acc = 0.5310077519379846\n",
- "validation acc increased (0.531008 ---> 0.531008)\n",
- "epoch 8: train loss = 0.5801561990557877, l1loss = 0.13193928131355187, train acc = 0.6760140210315473,\n",
- "val_loss = 0.6369561816370765, val_acc = 0.5271317829457365\n",
- "\n",
- "epoch: 9\n",
- "validation loss decreased (0.637798 ---> 0.637255), val_acc = 0.5271317829457365\n",
- "validation loss decreased (0.637255 ---> 0.634658), val_acc = 0.5348837209302325\n",
- "validation acc increased (0.531008 ---> 0.534884)\n",
- "epoch 9: train loss = 0.5717252453208029, l1loss = 0.1299565265819796, train acc = 0.6815222834251377,\n",
- "val_loss = 0.637611803403774, val_acc = 0.5310077519379846\n",
- "\n",
- "epoch: 10\n",
- "validation acc increased (0.534884 ---> 0.534884)\n",
- "epoch 10: train loss = 0.5626098037840548, l1loss = 0.12765505166727123, train acc = 0.6920380570856285,\n",
- "val_loss = 0.656070447707361, val_acc = 0.5310077519379846\n",
- "\n",
- "epoch: 11\n",
- "validation acc increased (0.534884 ---> 0.534884)\n",
- "epoch 11: train loss = 0.5535151342123105, l1loss = 0.12510810005602147, train acc = 0.6955433149724587,\n",
- "val_loss = 0.6719910063484843, val_acc = 0.5310077519379846\n",
- "\n",
- "epoch: 12\n",
- "epoch 12: train loss = 0.5409535019172091, l1loss = 0.1223971828236303, train acc = 0.7090635953930896,\n",
- "val_loss = 0.7268788130708443, val_acc = 0.5232558139534884\n",
- "\n",
- "epoch: 13\n",
- "validation acc increased (0.534884 ---> 0.538760)\n",
- "epoch 13: train loss = 0.5258218407123519, l1loss = 0.11959482016089444, train acc = 0.7250876314471708,\n",
- "val_loss = 0.6585291873577029, val_acc = 0.5503875968992248\n",
- "\n",
- "epoch: 14\n",
- "epoch 14: train loss = 0.5086835484893906, l1loss = 0.1168519536823288, train acc = 0.7336004006009014,\n",
- "val_loss = 0.6720465666563936, val_acc = 0.5348837209302325\n",
- "\n",
- "epoch: 15\n",
- "validation acc increased (0.538760 ---> 0.538760)\n",
- "epoch 15: train loss = 0.48642194915784853, l1loss = 0.11419245105238635, train acc = 0.756634952428643,\n",
- "val_loss = 0.6040798605874528, val_acc = 0.5736434108527132\n",
- "\n",
- "epoch: 16\n",
- "validation loss decreased (0.634658 ---> 0.629792), val_acc = 0.5581395348837209\n",
- "validation acc increased (0.538760 ---> 0.558140)\n",
- "epoch 16: train loss = 0.4577500793297528, l1loss = 0.11169700107397768, train acc = 0.7791687531296946,\n",
- "val_loss = 0.9656664484231047, val_acc = 0.5\n",
- "\n",
- "epoch: 17\n",
- "validation acc increased (0.558140 ---> 0.596899)\n",
- "epoch 17: train loss = 0.42826039845130415, l1loss = 0.10948925794782673, train acc = 0.7926890335503255,\n",
- "val_loss = 0.8292172735975695, val_acc = 0.5348837209302325\n",
- "\n",
- "epoch: 18\n",
- "epoch 18: train loss = 0.39249560578680065, l1loss = 0.10765625636429564, train acc = 0.8232348522784176,\n",
- "val_loss = 0.8668296665184257, val_acc = 0.6511627906976745\n",
- "\n",
- "epoch: 19\n",
- "validation acc increased (0.596899 ---> 0.643411)\n",
- "epoch 19: train loss = 0.3674749865732494, l1loss = 0.10613021715677078, train acc = 0.8312468703054582,\n",
- "val_loss = 1.4543597975731357, val_acc = 0.5193798449612403\n",
- "\n",
- "epoch: 20\n",
- "epoch 20: train loss = 0.347654858826994, l1loss = 0.10473605919523005, train acc = 0.8357536304456685,\n",
- "val_loss = 2.475660421223156, val_acc = 0.6395348837209303\n",
- "\n",
- "epoch: 21\n",
- "epoch 21: train loss = 0.3268627472441974, l1loss = 0.10326415842094001, train acc = 0.85678517776665,\n",
- "val_loss = 0.8104829621869464, val_acc = 0.5813953488372093\n",
- "\n",
- "epoch: 22\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "epoch 22: train loss = 0.35131114540723307, l1loss = 0.10210657698411493, train acc = 0.8307461191787682,\n",
- "val_loss = 1.7831758935322133, val_acc = 0.6356589147286822\n",
- "\n",
- "epoch: 23\n",
- "epoch 23: train loss = 0.3259180520974103, l1loss = 0.10085294403146969, train acc = 0.8487731597396094,\n",
- "val_loss = 1.6435921807275262, val_acc = 0.5155038759689923\n",
- "\n",
- "epoch: 24\n",
- "epoch 24: train loss = 0.2837501486421049, l1loss = 0.09979496815306697, train acc = 0.871807711567351,\n",
- "val_loss = 2.8983472193683344, val_acc = 0.6395348837209303\n",
- "\n",
- "epoch: 25\n",
- "validation acc increased (0.643411 ---> 0.643411)\n",
- "epoch 25: train loss = 0.24991583432414619, l1loss = 0.09866795351968506, train acc = 0.8953430145217827,\n",
- "val_loss = 1.3234346984892853, val_acc = 0.5348837209302325\n",
- "\n",
- "epoch: 26\n",
- "epoch 26: train loss = 0.23671170943787412, l1loss = 0.09776017363437367, train acc = 0.901352028042063,\n",
- "val_loss = 1.7247427341549895, val_acc = 0.5658914728682171\n",
- "\n",
- "epoch: 27\n",
- "epoch 27: train loss = 0.233533194886486, l1loss = 0.09673886006706407, train acc = 0.9018527791687532,\n",
- "val_loss = 1.1423584624778393, val_acc = 0.5930232558139535\n",
- "\n",
- "epoch: 28\n",
- "epoch 28: train loss = 0.20981159080967166, l1loss = 0.09592169590768183, train acc = 0.9148723084626941,\n",
- "val_loss = 5.035286263902058, val_acc = 0.6395348837209303\n",
- "\n",
- "epoch: 29\n",
- "validation acc increased (0.643411 ---> 0.651163)\n",
- "epoch 29: train loss = 0.18721183538287653, l1loss = 0.09532376013520842, train acc = 0.9303955933900852,\n",
- "val_loss = 1.7854539109754932, val_acc = 0.624031007751938\n",
- "\n",
- "epoch: 30\n",
- "epoch 30: train loss = 0.21521277558552843, l1loss = 0.09483509715208126, train acc = 0.9048572859288934,\n",
- "val_loss = 2.0886588960654975, val_acc = 0.5387596899224806\n",
- "\n",
- "epoch: 31\n",
- "epoch 31: train loss = 0.16117875964479203, l1loss = 0.09430676077492069, train acc = 0.9409113670505759,\n",
- "val_loss = 1.3893336746119713, val_acc = 0.6007751937984496\n",
- "\n",
- "epoch: 32\n",
- "epoch 32: train loss = 0.18729717622832412, l1loss = 0.0941365569135996, train acc = 0.9243865798698047,\n",
- "val_loss = 5.178411402443583, val_acc = 0.5116279069767442\n",
- "\n",
- "epoch: 33\n",
- "epoch 33: train loss = 0.15735476534546883, l1loss = 0.0937146420292575, train acc = 0.9419128693039559,\n",
- "val_loss = 2.165926408398059, val_acc = 0.627906976744186\n",
- "\n",
- "epoch: 34\n",
- "epoch 34: train loss = 0.13561190845343132, l1loss = 0.09294714458924744, train acc = 0.9489233850776164,\n",
- "val_loss = 2.862183544986932, val_acc = 0.5465116279069767\n",
- "\n",
- "epoch: 35\n",
- "epoch 35: train loss = 0.11557932168946722, l1loss = 0.09219135233594229, train acc = 0.9584376564847271,\n",
- "val_loss = 3.1061169494109504, val_acc = 0.627906976744186\n",
- "\n",
- "epoch: 36\n",
- "epoch 36: train loss = 0.11341282006019225, l1loss = 0.09179794447730646, train acc = 0.9614421632448673,\n",
- "val_loss = 2.973599456843122, val_acc = 0.562015503875969\n",
- "\n",
- "epoch: 37\n",
- "epoch 37: train loss = 0.13424175273688602, l1loss = 0.09140255465230526, train acc = 0.9439158738107161,\n",
- "val_loss = 1.4640861449315565, val_acc = 0.5852713178294574\n",
- "\n",
- "epoch: 38\n",
- "epoch 38: train loss = 0.09078391378350896, l1loss = 0.09077151980051472, train acc = 0.9699549323985979,\n",
- "val_loss = 1.923348656458448, val_acc = 0.6201550387596899\n",
- "\n",
- "epoch: 39\n",
- "epoch 39: train loss = 0.1380670597001679, l1loss = 0.09045900325731569, train acc = 0.9444166249374061,\n",
- "val_loss = 3.3878318514934804, val_acc = 0.5465116279069767\n",
- "\n",
- "epoch: 40\n",
- "epoch 40: train loss = 0.12143602777733227, l1loss = 0.09066373266514743, train acc = 0.9579369053580371,\n",
- "val_loss = 2.142883213915566, val_acc = 0.5038759689922481\n",
- "\n",
- "epoch: 41\n",
- "epoch 41: train loss = 0.11649664227641339, l1loss = 0.09069241947854945, train acc = 0.9584376564847271,\n",
- "val_loss = 2.8424580928891205, val_acc = 0.6317829457364341\n",
- "\n",
- "epoch: 42\n",
- "epoch 42: train loss = 0.11953155570288092, l1loss = 0.09055208012230348, train acc = 0.9534301452178268,\n",
- "val_loss = 3.844284035438715, val_acc = 0.5503875968992248\n",
- "\n",
- "epoch: 43\n",
- "epoch 43: train loss = 0.1391850914218513, l1loss = 0.09037555076282623, train acc = 0.9514271407110666,\n",
- "val_loss = 3.0129795486843864, val_acc = 0.5503875968992248\n",
- "\n",
- "epoch: 44\n",
- "epoch 44: train loss = 0.11035888379791348, l1loss = 0.09016522343569418, train acc = 0.9589384076114171,\n",
- "val_loss = 2.5781839115675145, val_acc = 0.5658914728682171\n",
- "\n",
- "epoch: 45\n",
- "epoch 45: train loss = 0.09197489871015295, l1loss = 0.08964754919753291, train acc = 0.9669504256384577,\n",
- "val_loss = 2.4371593719304996, val_acc = 0.562015503875969\n",
- "\n",
- "epoch: 46\n",
- "epoch 46: train loss = 0.0579324397978988, l1loss = 0.08879983621758106, train acc = 0.9859789684526791,\n",
- "val_loss = 2.2613389057706494, val_acc = 0.5465116279069767\n",
- "\n",
- "epoch: 47\n",
- "epoch 47: train loss = 0.0461813348439197, l1loss = 0.08810112920473621, train acc = 0.9864797195793691,\n",
- "val_loss = 3.979649772939756, val_acc = 0.6317829457364341\n",
- "\n",
- "epoch: 48\n",
- "epoch 48: train loss = 0.03998827998063298, l1loss = 0.0878447880879843, train acc = 0.9929894842263395,\n",
- "val_loss = 5.175023500309434, val_acc = 0.627906976744186\n",
- "\n",
- "epoch: 49\n",
- "epoch 49: train loss = 0.04049086723706098, l1loss = 0.08717421617995039, train acc = 0.9934902353530295,\n",
- "val_loss = 3.0130414075629655, val_acc = 0.6434108527131783\n",
- "\n",
- "epoch: 50\n",
- "validation acc increased (0.651163 ---> 0.655039)\n",
- "epoch 50: train loss = 0.03793664645537936, l1loss = 0.08660745584537939, train acc = 0.9914872308462694,\n",
- "val_loss = 2.4097132146820543, val_acc = 0.5697674418604651\n",
- "\n",
- "epoch: 51\n",
- "epoch 51: train loss = 0.034598207665522455, l1loss = 0.0860587003667413, train acc = 0.9924887330996495,\n",
- "val_loss = 2.3104088583657907, val_acc = 0.6472868217054264\n",
- "\n",
- "epoch: 52\n",
- "epoch 52: train loss = 0.029838849054010557, l1loss = 0.08562057309736892, train acc = 0.9964947421131698,\n",
- "val_loss = 5.1767294721085895, val_acc = 0.624031007751938\n",
- "\n",
- "epoch: 53\n",
- "epoch 53: train loss = 0.023908328355425706, l1loss = 0.08518237430689989, train acc = 0.9964947421131698,\n",
- "val_loss = 3.105967559093653, val_acc = 0.6356589147286822\n",
- "\n",
- "epoch: 54\n",
- "epoch 54: train loss = 0.023712221452846013, l1loss = 0.08467714752280002, train acc = 0.9959939909864797,\n",
- "val_loss = 2.627571163944496, val_acc = 0.6511627906976745\n",
- "\n",
- "epoch: 55\n",
- "epoch 55: train loss = 0.02119624899009137, l1loss = 0.084207254313175, train acc = 0.9979969954932398,\n",
- "val_loss = 3.670362725738407, val_acc = 0.6317829457364341\n",
- "\n",
- "epoch: 56\n",
- "epoch 56: train loss = 0.02971064273695171, l1loss = 0.08382122342847381, train acc = 0.9934902353530295,\n",
- "val_loss = 4.768853254883384, val_acc = 0.6317829457364341\n",
- "\n",
- "epoch: 57\n",
- "epoch 57: train loss = 0.026361721558016006, l1loss = 0.08356228320447576, train acc = 0.9959939909864797,\n",
- "val_loss = 3.9932817562605982, val_acc = 0.627906976744186\n",
- "\n",
- "epoch: 58\n",
- "epoch 58: train loss = 0.018343641952941223, l1loss = 0.08313654303461179, train acc = 0.9984977466199298,\n",
- "val_loss = 2.416079253147811, val_acc = 0.5930232558139535\n",
- "\n",
- "epoch: 59\n",
- "epoch 59: train loss = 0.0166573378317614, l1loss = 0.08268285322604206, train acc = 0.9984977466199298,\n",
- "val_loss = 3.361119598383408, val_acc = 0.627906976744186\n",
- "\n",
- "epoch: 60\n",
- "epoch 60: train loss = 0.018886090959545875, l1loss = 0.08225083055649032, train acc = 0.9979969954932398,\n",
- "val_loss = 4.036783265050992, val_acc = 0.624031007751938\n",
- "\n",
- "epoch: 61\n",
- "epoch 61: train loss = 0.015808429777935618, l1loss = 0.08185882754082911, train acc = 0.99899849774662,\n",
- "val_loss = 2.6130907313768255, val_acc = 0.6434108527131783\n",
- "\n",
- "epoch: 62\n",
- "epoch 62: train loss = 0.014593462708313345, l1loss = 0.08146914296527474, train acc = 0.99899849774662,\n",
- "val_loss = 2.4723181946333064, val_acc = 0.6124031007751938\n",
- "\n",
- "epoch: 63\n",
- "epoch 63: train loss = 0.016414859970592235, l1loss = 0.08114846524188801, train acc = 0.9979969954932398,\n",
- "val_loss = 3.099476133206094, val_acc = 0.5813953488372093\n",
- "\n",
- "epoch: 64\n",
- "epoch 64: train loss = 0.018514045871398748, l1loss = 0.08086007390251504, train acc = 0.9969954932398598,\n",
- "val_loss = 4.376173152480015, val_acc = 0.624031007751938\n",
- "\n",
- "epoch: 65\n",
- "epoch 65: train loss = 0.0247556148073601, l1loss = 0.08061432335889512, train acc = 0.9954932398597897,\n",
- "val_loss = 2.659250307452771, val_acc = 0.6356589147286822\n",
- "\n",
- "epoch: 66\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "epoch 66: train loss = 0.020378888247494526, l1loss = 0.08030128979688891, train acc = 0.9954932398597897,\n",
- "val_loss = 3.7070143610932105, val_acc = 0.562015503875969\n",
- "\n",
- "epoch: 67\n",
- "epoch 67: train loss = 0.012092748920829163, l1loss = 0.07994780938701147, train acc = 0.99899849774662,\n",
- "val_loss = 3.0426118189050246, val_acc = 0.5581395348837209\n",
- "\n",
- "epoch: 68\n",
- "epoch 68: train loss = 0.011227833565080171, l1loss = 0.07961520851852301, train acc = 1.0,\n",
- "val_loss = 2.48265065576217, val_acc = 0.6201550387596899\n",
- "\n",
- "!!! overfitted !!!\n",
- "[1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1]\n",
- "[0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0]\n",
- "early stoping results:\n",
- "\t [0.6041666666666666, 0.5572916666666666, 0.5208333333333334, 0.5078534031413613, 0.5445026178010471]\n",
- "output = [0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
- "label = tensor([0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0,\n",
- " 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0,\n",
- " 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0,\n",
- " 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1,\n",
- " 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0,\n",
- " 0, 0, 0, 0, 1, 0, 0, 1])\n",
- "output = [1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
- "label = tensor([1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1,\n",
- " 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1,\n",
- " 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0,\n",
- " 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0,\n",
- " 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0,\n",
- " 0, 0, 1, 1, 1, 0, 1, 0])\n",
- "output = [1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0]\n",
- "label = tensor([1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1,\n",
- " 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1,\n",
- " 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1,\n",
- " 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0,\n",
- " 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0,\n",
- " 1, 0, 0, 1, 0, 0, 1, 1])\n",
- "output = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0]\n",
- "label = tensor([1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1,\n",
- " 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0,\n",
- " 0, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0,\n",
- " 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0,\n",
- " 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1,\n",
- " 1, 0, 1, 0, 0, 0, 0, 0])\n",
- "output = [1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0]\n",
- "label = tensor([1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0,\n",
- " 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0,\n",
- " 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1,\n",
- " 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1,\n",
- " 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0,\n",
- " 1, 1, 0, 0, 1, 1, 1, 0])\n",
- "output = [1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
- "label = tensor([1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0,\n",
- " 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0,\n",
- " 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1,\n",
- " 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1,\n",
- " 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1,\n",
- " 1, 1, 0, 0, 1, 1, 0, 1])\n",
- "output = [0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0]\n",
- "label = tensor([0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1,\n",
- " 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1,\n",
- " 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1,\n",
- " 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0,\n",
- " 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1,\n",
- " 1, 1, 0, 0, 1, 0, 1, 0])\n",
- "output = [1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
- "label = tensor([1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1,\n",
- " 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1,\n",
- " 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,\n",
- " 0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1,\n",
- " 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1,\n",
- " 0, 1, 0, 1, 0, 0, 1, 1])\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "output = [1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0]\n",
- "label = tensor([1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1,\n",
- " 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1,\n",
- " 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0,\n",
- " 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1,\n",
- " 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1,\n",
- " 0, 1, 0, 1, 0, 0, 0, 1])\n",
- "output = [1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0]\n",
- "label = tensor([1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0,\n",
- " 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0,\n",
- " 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1,\n",
- " 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0,\n",
- " 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1,\n",
- " 0, 1, 1, 0, 1, 1, 0, 0])\n",
- "output = [0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0]\n",
- "label = tensor([0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0,\n",
- " 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 1,\n",
- " 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0,\n",
- " 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0,\n",
- " 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1,\n",
- " 1, 1, 1, 0, 0, 1, 0, 0])\n",
- "output = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0]\n",
- "label = tensor([0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1,\n",
- " 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1,\n",
- " 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1,\n",
- " 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1,\n",
- " 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0,\n",
- " 1, 1, 1, 0, 0, 1, 1, 0])\n",
- "output = [1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0]\n",
- "label = tensor([1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0,\n",
- " 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1,\n",
- " 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0,\n",
- " 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1,\n",
- " 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1,\n",
- " 0, 0, 0, 1, 1, 0, 0, 1])\n",
- "output = [1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
- "label = tensor([0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1,\n",
- " 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1,\n",
- " 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0,\n",
- " 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1,\n",
- " 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1,\n",
- " 0, 1, 0, 1, 0, 1, 1, 1])\n",
- "output = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
- "label = tensor([1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0,\n",
- " 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0,\n",
- " 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0,\n",
- " 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1,\n",
- " 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0,\n",
- " 0, 0, 1, 1, 1, 0, 0, 1])\n",
- "output = [1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
- "label = tensor([1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0,\n",
- " 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0,\n",
- " 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0,\n",
- " 0, 1, 1, 1, 1])\n",
- "\t [0.6673376950176145, 0.6722054380664653, 0.6802005012531328, 0.6826347305389222, 0.7250876314471708]\n",
- "[0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1]\n",
- "[0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1]\n",
- "full train results:\n",
- "\t [0.59375, 0.5885416666666666, 0.6354166666666666, 0.612565445026178, 0.6387434554973822]\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "\t [0.9949672873678913, 0.9974823766364552, 0.8666666666666667, 0.8393213572854291, 0.99949924887331]\n",
- "[0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0]\n",
- "[1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0]\n",
- "best accs results:\n",
- "\t [0.59375, 0.5625, 0.6354166666666666, 0.6282722513089005, 0.6020942408376964]\n",
- "\t [0.8037242073477604, 0.8670694864048338, 0.4967418546365915, 0.5489021956087824, 0.9744616925388082]\n",
- "[0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1]\n",
- "-----------------------------Fold 6---------------\n",
- "preparing dataloaders...\n",
- "torch.Size([77, 7, 9, 20])\n",
- "coef when 0 > 1 1\n",
- "creating model...\n",
- "calculating total steps...\n",
- "epoch: 1\n",
- "validation loss decreased (inf ---> 0.691554), val_acc = 0.6511627906976745\n",
- "validation acc increased (0.000000 ---> 0.651163)\n",
- "validation loss decreased (0.691554 ---> 0.690849), val_acc = 0.6511627906976745\n",
- "validation acc increased (0.651163 ---> 0.651163)\n",
- "epoch 1: train loss = 0.6948271975517273, l1loss = 0.13827931582927705, train acc = 0.4485,\n",
- "val_loss = 0.6928248095882031, val_acc = 0.5968992248062015\n",
- "\n",
- "epoch: 2\n",
- "epoch 2: train loss = 0.672557541847229, l1loss = 0.137843541264534, train acc = 0.589,\n",
- "val_loss = 0.6987676075262617, val_acc = 0.3643410852713178\n",
- "\n",
- "epoch: 3\n",
- "epoch 3: train loss = 0.6535676913261413, l1loss = 0.13731942212581635, train acc = 0.636,\n",
- "val_loss = 0.6965285705965619, val_acc = 0.4728682170542636\n",
- "\n",
- "epoch: 4\n",
- "validation loss decreased (0.690849 ---> 0.677476), val_acc = 0.4883720930232558\n",
- "epoch 4: train loss = 0.6372407693862915, l1loss = 0.13664585983753205, train acc = 0.65,\n",
- "val_loss = 0.6638829569483913, val_acc = 0.5038759689922481\n",
- "\n",
- "epoch: 5\n",
- "validation loss decreased (0.677476 ---> 0.661241), val_acc = 0.5\n",
- "validation loss decreased (0.661241 ---> 0.639020), val_acc = 0.5116279069767442\n",
- "epoch 5: train loss = 0.6211547412872315, l1loss = 0.13576398611068727, train acc = 0.6585,\n",
- "val_loss = 0.6298631182474683, val_acc = 0.5116279069767442\n",
- "\n",
- "epoch: 6\n",
- "validation loss decreased (0.639020 ---> 0.628438), val_acc = 0.5116279069767442\n",
- "validation loss decreased (0.628438 ---> 0.621201), val_acc = 0.5077519379844961\n",
- "epoch 6: train loss = 0.6071342725753784, l1loss = 0.1346161276102066, train acc = 0.661,\n",
- "val_loss = 0.6213997648667919, val_acc = 0.5116279069767442\n",
- "\n",
- "epoch: 7\n",
- "validation loss decreased (0.621201 ---> 0.621017), val_acc = 0.5193798449612403\n",
- "epoch 7: train loss = 0.5958632040023804, l1loss = 0.13315785372257233, train acc = 0.6715,\n",
- "val_loss = 0.6230281388112741, val_acc = 0.5232558139534884\n",
- "\n",
- "epoch: 8\n",
- "epoch 8: train loss = 0.5865020289421081, l1loss = 0.13134343349933625, train acc = 0.6755,\n",
- "val_loss = 0.6317427439810058, val_acc = 0.5232558139534884\n",
- "\n",
- "epoch: 9\n",
- "epoch 9: train loss = 0.578163959980011, l1loss = 0.12913685393333435, train acc = 0.683,\n",
- "val_loss = 0.6439248508261156, val_acc = 0.5232558139534884\n",
- "\n",
- "epoch: 10\n",
- "epoch 10: train loss = 0.5690374765396118, l1loss = 0.12657931053638458, train acc = 0.687,\n",
- "val_loss = 0.6405774336452632, val_acc = 0.5271317829457365\n",
- "\n",
- "epoch: 11\n",
- "epoch 11: train loss = 0.5578733282089233, l1loss = 0.12375886642932891, train acc = 0.6915,\n",
- "val_loss = 0.6485154487827952, val_acc = 0.5271317829457365\n",
- "\n",
- "epoch: 12\n",
- "epoch 12: train loss = 0.547102264881134, l1loss = 0.12075013303756714, train acc = 0.7035,\n",
- "val_loss = 0.5815267077712125, val_acc = 0.5968992248062015\n",
- "\n",
- "epoch: 13\n",
- "validation loss decreased (0.621017 ---> 0.578556), val_acc = 0.6201550387596899\n",
- "epoch 13: train loss = 0.5293013830184936, l1loss = 0.11762005990743638, train acc = 0.715,\n",
- "val_loss = 0.622708785210469, val_acc = 0.5310077519379846\n",
- "\n",
- "epoch: 14\n",
- "epoch 14: train loss = 0.5110176424980164, l1loss = 0.11452219623327255, train acc = 0.736,\n",
- "val_loss = 0.5994160191495289, val_acc = 0.6124031007751938\n",
- "\n",
- "epoch: 15\n",
- "validation acc increased (0.651163 ---> 0.651163)\n",
- "epoch 15: train loss = 0.47669880533218384, l1loss = 0.11159368234872818, train acc = 0.7645,\n",
- "val_loss = 0.7053935582092566, val_acc = 0.6550387596899225\n",
- "\n",
- "epoch: 16\n",
- "validation acc increased (0.651163 ---> 0.655039)\n",
- "epoch 16: train loss = 0.4439049696922302, l1loss = 0.10900945609807969, train acc = 0.7855,\n",
- "val_loss = 0.8016450599182484, val_acc = 0.6317829457364341\n",
- "\n",
- "epoch: 17\n",
- "epoch 17: train loss = 0.4089318103790283, l1loss = 0.10682100701332092, train acc = 0.808,\n",
- "val_loss = 1.34718641343476, val_acc = 0.6472868217054264\n",
- "\n",
- "epoch: 18\n",
- "validation acc increased (0.655039 ---> 0.658915)\n",
- "epoch 18: train loss = 0.38175707817077636, l1loss = 0.10503107064962387, train acc = 0.819,\n",
- "val_loss = 1.51498044184012, val_acc = 0.6472868217054264\n",
- "\n",
- "epoch: 19\n",
- "epoch 19: train loss = 0.3507974019050598, l1loss = 0.10347136110067368, train acc = 0.843,\n",
- "val_loss = 1.8761492145153904, val_acc = 0.6472868217054264\n",
- "\n",
- "epoch: 20\n",
- "epoch 20: train loss = 0.3973570125102997, l1loss = 0.10228700506687165, train acc = 0.7975,\n",
- "val_loss = 3.359361020169517, val_acc = 0.6550387596899225\n",
- "\n",
- "epoch: 21\n",
- "epoch 21: train loss = 0.3766449837684631, l1loss = 0.10100592476129532, train acc = 0.8235,\n",
- "val_loss = 0.8532644796741101, val_acc = 0.6085271317829457\n",
- "\n",
- "epoch: 22\n",
- "epoch 22: train loss = 0.30714254689216614, l1loss = 0.09982918077707291, train acc = 0.868,\n",
- "val_loss = 4.533395921132823, val_acc = 0.4689922480620155\n",
- "\n",
- "epoch: 23\n",
- "epoch 23: train loss = 0.2730036299228668, l1loss = 0.09869706439971924, train acc = 0.8785,\n",
- "val_loss = 1.673116033391435, val_acc = 0.5348837209302325\n",
- "\n",
- "epoch: 24\n",
- "epoch 24: train loss = 0.252084813952446, l1loss = 0.09814584302902221, train acc = 0.894,\n",
- "val_loss = 2.988402714100919, val_acc = 0.6511627906976745\n",
- "\n",
- "epoch: 25\n",
- "epoch 25: train loss = 0.2553533761501312, l1loss = 0.09725052165985107, train acc = 0.8885,\n",
- "val_loss = 4.158237235490666, val_acc = 0.6472868217054264\n",
- "\n",
- "epoch: 26\n",
- "epoch 26: train loss = 0.22806144893169403, l1loss = 0.09632847493886948, train acc = 0.9095,\n",
- "val_loss = 1.619036548709598, val_acc = 0.6201550387596899\n",
- "\n",
- "epoch: 27\n",
- "epoch 27: train loss = 0.20052375197410582, l1loss = 0.0954943853020668, train acc = 0.9185,\n",
- "val_loss = 1.5913666817106942, val_acc = 0.5852713178294574\n",
- "\n",
- "epoch: 28\n",
- "epoch 28: train loss = 0.19252619695663453, l1loss = 0.09492724579572677, train acc = 0.92,\n",
- "val_loss = 2.3363786150318706, val_acc = 0.6162790697674418\n",
- "\n",
- "epoch: 29\n",
- "epoch 29: train loss = 0.19474881601333618, l1loss = 0.09441870188713074, train acc = 0.9265,\n",
- "val_loss = 3.430836339329564, val_acc = 0.6511627906976745\n",
- "\n",
- "epoch: 30\n",
- "epoch 30: train loss = 0.21158030927181243, l1loss = 0.09452930903434753, train acc = 0.907,\n",
- "val_loss = 8.181018297062364, val_acc = 0.4496124031007752\n",
- "\n",
- "epoch: 31\n",
- "epoch 31: train loss = 0.15486617839336395, l1loss = 0.0938582792878151, train acc = 0.9455,\n",
- "val_loss = 1.8784414095471995, val_acc = 0.562015503875969\n",
- "\n",
- "epoch: 32\n",
- "epoch 32: train loss = 0.12229575896263123, l1loss = 0.09318001997470855, train acc = 0.96,\n",
- "val_loss = 5.165876115015311, val_acc = 0.6472868217054264\n",
- "\n",
- "epoch: 33\n",
- "epoch 33: train loss = 0.11896066904067994, l1loss = 0.09262535566091537, train acc = 0.962,\n",
- "val_loss = 3.3326926933702574, val_acc = 0.6472868217054264\n",
- "\n",
- "epoch: 34\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "epoch 34: train loss = 0.1430732717514038, l1loss = 0.09208029317855836, train acc = 0.947,\n",
- "val_loss = 3.7897773609461005, val_acc = 0.6434108527131783\n",
- "\n",
- "epoch: 35\n",
- "epoch 35: train loss = 0.10793969309329987, l1loss = 0.09178146147727967, train acc = 0.9635,\n",
- "val_loss = 3.7946822282897408, val_acc = 0.6434108527131783\n",
- "\n",
- "epoch: 36\n",
- "epoch 36: train loss = 0.1086569909453392, l1loss = 0.09210293853282929, train acc = 0.964,\n",
- "val_loss = 3.237369472117618, val_acc = 0.6472868217054264\n",
- "\n",
- "epoch: 37\n",
- "epoch 37: train loss = 0.09874134624004365, l1loss = 0.09163604235649109, train acc = 0.959,\n",
- "val_loss = 10.602950192237085, val_acc = 0.4573643410852713\n",
- "\n",
- "epoch: 38\n",
- "epoch 38: train loss = 0.10966753149032593, l1loss = 0.09086926692724227, train acc = 0.957,\n",
- "val_loss = 6.996053880499315, val_acc = 0.6472868217054264\n",
- "\n",
- "epoch: 39\n",
- "epoch 39: train loss = 0.08259383499622346, l1loss = 0.09028866285085678, train acc = 0.977,\n",
- "val_loss = 2.8690995652546256, val_acc = 0.6395348837209303\n",
- "\n",
- "epoch: 40\n",
- "epoch 40: train loss = 0.07870484691858291, l1loss = 0.08984918028116226, train acc = 0.971,\n",
- "val_loss = 2.2550083565157513, val_acc = 0.6317829457364341\n",
- "\n",
- "epoch: 41\n",
- "epoch 41: train loss = 0.08104722368717193, l1loss = 0.08948963952064515, train acc = 0.9775,\n",
- "val_loss = 2.7512668971867527, val_acc = 0.624031007751938\n",
- "\n",
- "epoch: 42\n",
- "epoch 42: train loss = 0.061936515539884565, l1loss = 0.08921354949474335, train acc = 0.9805,\n",
- "val_loss = 3.8835250869277838, val_acc = 0.6395348837209303\n",
- "\n",
- "epoch: 43\n",
- "epoch 43: train loss = 0.04977551221847534, l1loss = 0.08895368778705597, train acc = 0.9865,\n",
- "val_loss = 3.912529797517052, val_acc = 0.5387596899224806\n",
- "\n",
- "epoch: 44\n",
- "epoch 44: train loss = 0.03967177218198776, l1loss = 0.08825107955932618, train acc = 0.9905,\n",
- "val_loss = 3.231899913891341, val_acc = 0.6085271317829457\n",
- "\n",
- "epoch: 45\n",
- "epoch 45: train loss = 0.048251408934593204, l1loss = 0.08825813591480255, train acc = 0.9865,\n",
- "val_loss = 3.3065699320430904, val_acc = 0.6434108527131783\n",
- "\n",
- "epoch: 46\n",
- "epoch 46: train loss = 0.04396019262075424, l1loss = 0.0874557437300682, train acc = 0.987,\n",
- "val_loss = 2.2364588347024488, val_acc = 0.6124031007751938\n",
- "\n",
- "epoch: 47\n",
- "epoch 47: train loss = 0.02501585677266121, l1loss = 0.08677183699607849, train acc = 0.998,\n",
- "val_loss = 3.7413160227989968, val_acc = 0.5387596899224806\n",
- "\n",
- "epoch: 48\n",
- "epoch 48: train loss = 0.024205303311347962, l1loss = 0.08615065145492554, train acc = 0.9965,\n",
- "val_loss = 4.811114255772081, val_acc = 0.6434108527131783\n",
- "\n",
- "epoch: 49\n",
- "epoch 49: train loss = 0.018718741066753863, l1loss = 0.0853805913925171, train acc = 0.9985,\n",
- "val_loss = 3.1942446878714157, val_acc = 0.6201550387596899\n",
- "\n",
- "epoch: 50\n",
- "epoch 50: train loss = 0.01704993227124214, l1loss = 0.0846337440609932, train acc = 0.9975,\n",
- "val_loss = 2.7979257494904273, val_acc = 0.5968992248062015\n",
- "\n",
- "epoch: 51\n",
- "epoch 51: train loss = 0.029849595367908478, l1loss = 0.08401931124925613, train acc = 0.9905,\n",
- "val_loss = 3.343561839687732, val_acc = 0.5387596899224806\n",
- "\n",
- "epoch: 52\n",
- "epoch 52: train loss = 0.02539513537287712, l1loss = 0.08352306187152862, train acc = 0.995,\n",
- "val_loss = 4.478786712469057, val_acc = 0.6472868217054264\n",
- "\n",
- "epoch: 53\n",
- "validation acc increased (0.658915 ---> 0.658915)\n",
- "epoch 53: train loss = 0.02735452988743782, l1loss = 0.08342606168985367, train acc = 0.9925,\n",
- "val_loss = 3.296960321271272, val_acc = 0.6162790697674418\n",
- "\n",
- "epoch: 54\n",
- "epoch 54: train loss = 0.03272683323174715, l1loss = 0.08329540795087814, train acc = 0.994,\n",
- "val_loss = 17.770323502000913, val_acc = 0.5\n",
- "\n",
- "epoch: 55\n",
- "epoch 55: train loss = 0.029803431689739226, l1loss = 0.08343729907274246, train acc = 0.996,\n",
- "val_loss = 7.326728362445683, val_acc = 0.6472868217054264\n",
- "\n",
- "epoch: 56\n",
- "epoch 56: train loss = 0.02619129529595375, l1loss = 0.08289635759592057, train acc = 0.996,\n",
- "val_loss = 3.209116557714089, val_acc = 0.562015503875969\n",
- "\n",
- "epoch: 57\n",
- "epoch 57: train loss = 0.023176433131098746, l1loss = 0.082220523416996, train acc = 0.9955,\n",
- "val_loss = 3.3258880940518636, val_acc = 0.5387596899224806\n",
- "\n",
- "epoch: 58\n",
- "epoch 58: train loss = 0.014486284106969833, l1loss = 0.08156805974245071, train acc = 0.9995,\n",
- "val_loss = 4.771162561667982, val_acc = 0.5193798449612403\n",
- "\n",
- "epoch: 59\n",
- "epoch 59: train loss = 0.018196347802877426, l1loss = 0.08102932274341583, train acc = 0.9965,\n",
- "val_loss = 3.726594481357308, val_acc = 0.6317829457364341\n",
- "\n",
- "epoch: 60\n",
- "epoch 60: train loss = 0.012295396901667118, l1loss = 0.0804902771115303, train acc = 0.9995,\n",
- "val_loss = 3.2824748639073915, val_acc = 0.6124031007751938\n",
- "\n",
- "epoch: 61\n",
- "epoch 61: train loss = 0.014604092702269554, l1loss = 0.08002961206436157, train acc = 0.9985,\n",
- "val_loss = 3.5945028610412004, val_acc = 0.627906976744186\n",
- "\n",
- "epoch: 62\n",
- "epoch 62: train loss = 0.011242052119225264, l1loss = 0.07957109451293945, train acc = 1.0,\n",
- "val_loss = 4.122840467349503, val_acc = 0.6395348837209303\n",
- "\n",
- "!!! overfitted !!!\n",
- "[1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1]\n",
- "[1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1]\n",
- "early stoping results:\n",
- "\t [0.6041666666666666, 0.5572916666666666, 0.5208333333333334, 0.5078534031413613, 0.5445026178010471, 0.6073298429319371]\n",
- "output = [0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0]\n",
- "label = tensor([0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1,\n",
- " 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0,\n",
- " 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0,\n",
- " 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1,\n",
- " 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0,\n",
- " 1, 1, 0, 1, 1, 0, 0, 1])\n",
- "output = [0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0]\n",
- "label = tensor([0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1,\n",
- " 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0,\n",
- " 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1,\n",
- " 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1,\n",
- " 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1,\n",
- " 0, 0, 1, 1, 0, 0, 0, 0])\n",
- "output = [0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0]\n",
- "label = tensor([0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1,\n",
- " 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0,\n",
- " 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1,\n",
- " 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0,\n",
- " 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1,\n",
- " 0, 1, 1, 0, 1, 1, 1, 0])\n",
- "output = [1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0]\n",
- "label = tensor([0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0,\n",
- " 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0,\n",
- " 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1,\n",
- " 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,\n",
- " 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1,\n",
- " 1, 0, 0, 1, 1, 0, 1, 1])\n",
- "output = [1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
- "label = tensor([0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1,\n",
- " 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1,\n",
- " 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0,\n",
- " 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1,\n",
- " 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0,\n",
- " 0, 1, 0, 1, 1, 1, 0, 0])\n",
- "output = [1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0]\n",
- "label = tensor([1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0,\n",
- " 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0,\n",
- " 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0,\n",
- " 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1,\n",
- " 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1,\n",
- " 0, 0, 0, 1, 0, 1, 0, 1])\n",
- "output = [0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0]\n",
- "label = tensor([0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0,\n",
- " 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1,\n",
- " 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 1,\n",
- " 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0,\n",
- " 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1,\n",
- " 1, 0, 0, 0, 0, 1, 0, 1])\n",
- "output = [0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0]\n",
- "label = tensor([0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0,\n",
- " 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0,\n",
- " 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1,\n",
- " 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1,\n",
- " 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1,\n",
- " 0, 0, 1, 1, 1, 1, 1, 0])\n",
- "output = [0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0]\n",
- "label = tensor([0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0,\n",
- " 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1,\n",
- " 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1,\n",
- " 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1,\n",
- " 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0,\n",
- " 1, 0, 1, 0, 0, 1, 1, 1])\n",
- "output = [1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0]\n",
- "label = tensor([1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0,\n",
- " 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0,\n",
- " 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0,\n",
- " 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0,\n",
- " 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1,\n",
- " 1, 0, 1, 0, 0, 0, 0, 1])\n",
- "output = [0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0]\n",
- "label = tensor([0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0,\n",
- " 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0,\n",
- " 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0,\n",
- " 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0,\n",
- " 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0,\n",
- " 0, 0, 1, 1, 1, 0, 0, 1])\n",
- "output = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0]\n",
- "label = tensor([0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1,\n",
- " 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1,\n",
- " 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0,\n",
- " 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0,\n",
- " 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0,\n",
- " 0, 1, 1, 1, 0, 1, 0, 1])\n",
- "output = [0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0]\n",
- "label = tensor([0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0,\n",
- " 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0,\n",
- " 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1,\n",
- " 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1,\n",
- " 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0,\n",
- " 0, 0, 1, 0, 1, 0, 0, 1])\n",
- "output = [1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0]\n",
- "label = tensor([1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1,\n",
- " 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0,\n",
- " 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1,\n",
- " 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0,\n",
- " 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1,\n",
- " 1, 1, 0, 1, 0, 0, 0, 1])\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "output = [0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0]\n",
- "label = tensor([1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1,\n",
- " 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0,\n",
- " 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0,\n",
- " 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1,\n",
- " 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1,\n",
- " 0, 1, 0, 1, 0, 1, 0, 1])\n",
- "output = [0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0]\n",
- "label = tensor([0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1,\n",
- " 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1,\n",
- " 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0,\n",
- " 1, 1, 1, 1, 0, 1, 1, 0])\n",
- "\t [0.6673376950176145, 0.6722054380664653, 0.6802005012531328, 0.6826347305389222, 0.7250876314471708, 0.7]\n",
- "[1, 0, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1]\n",
- "[0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0]\n",
- "full train results:\n",
- "\t [0.59375, 0.5885416666666666, 0.6354166666666666, 0.612565445026178, 0.6387434554973822, 0.6492146596858639]\n",
- "\t [0.9949672873678913, 0.9974823766364552, 0.8666666666666667, 0.8393213572854291, 0.99949924887331, 0.638]\n",
- "[0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1]\n",
- "[0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0]\n",
- "best accs results:\n",
- "\t [0.59375, 0.5625, 0.6354166666666666, 0.6282722513089005, 0.6020942408376964, 0.6387434554973822]\n",
- "\t [0.8037242073477604, 0.8670694864048338, 0.4967418546365915, 0.5489021956087824, 0.9744616925388082, 0.519]\n",
- "[0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0]\n",
- "-----------------------------Fold 7---------------\n",
- "preparing dataloaders...\n",
- "torch.Size([80, 7, 9, 20])\n",
- "coef when 0 > 1 1\n",
- "creating model...\n",
- "calculating total steps...\n",
- "epoch: 1\n",
- "validation loss decreased (inf ---> 0.698563), val_acc = 0.3488372093023256\n",
- "validation acc increased (0.000000 ---> 0.348837)\n",
- "validation acc increased (0.348837 ---> 0.348837)\n",
- "epoch 1: train loss = 0.6735606533417041, l1loss = 0.13814079643100097, train acc = 0.6146926536731634,\n",
- "val_loss = 0.7325391187224277, val_acc = 0.3488372093023256\n",
- "\n",
- "epoch: 2\n",
- "validation acc increased (0.348837 ---> 0.348837)\n",
- "validation acc increased (0.348837 ---> 0.348837)\n",
- "epoch 2: train loss = 0.6563243215111481, l1loss = 0.13766097700012023, train acc = 0.6366816591704147,\n",
- "val_loss = 0.7539332402768986, val_acc = 0.3488372093023256\n",
- "\n",
- "epoch: 3\n",
- "validation acc increased (0.348837 ---> 0.348837)\n",
- "validation acc increased (0.348837 ---> 0.379845)\n",
- "epoch 3: train loss = 0.6414987822880094, l1loss = 0.13709656230692027, train acc = 0.6391804097951025,\n",
- "val_loss = 0.7422034661899242, val_acc = 0.43410852713178294\n",
- "\n",
- "epoch: 4\n",
- "validation acc increased (0.379845 ---> 0.441860)\n",
- "validation acc increased (0.441860 ---> 0.500000)\n",
- "epoch 4: train loss = 0.6271164830120607, l1loss = 0.13637894998187725, train acc = 0.6456771614192903,\n",
- "val_loss = 0.68995659388313, val_acc = 0.5271317829457365\n",
- "\n",
- "epoch: 5\n",
- "validation loss decreased (0.698563 ---> 0.686751), val_acc = 0.5271317829457365\n",
- "validation acc increased (0.500000 ---> 0.527132)\n",
- "validation loss decreased (0.686751 ---> 0.653364), val_acc = 0.5387596899224806\n",
- "validation acc increased (0.527132 ---> 0.538760)\n",
- "epoch 5: train loss = 0.6123478256542048, l1loss = 0.13543122615860678, train acc = 0.6561719140429785,\n",
- "val_loss = 0.6406823590744374, val_acc = 0.5426356589147286\n",
- "\n",
- "epoch: 6\n",
- "validation loss decreased (0.653364 ---> 0.639413), val_acc = 0.5426356589147286\n",
- "validation acc increased (0.538760 ---> 0.542636)\n",
- "validation loss decreased (0.639413 ---> 0.626855), val_acc = 0.5426356589147286\n",
- "validation acc increased (0.542636 ---> 0.542636)\n",
- "epoch 6: train loss = 0.5990132123216995, l1loss = 0.1341949236550848, train acc = 0.6686656671664168,\n",
- "val_loss = 0.6231413797814717, val_acc = 0.5426356589147286\n",
- "\n",
- "epoch: 7\n",
- "validation loss decreased (0.626855 ---> 0.624045), val_acc = 0.5503875968992248\n",
- "validation acc increased (0.542636 ---> 0.550388)\n",
- "validation loss decreased (0.624045 ---> 0.620769), val_acc = 0.5426356589147286\n",
- "epoch 7: train loss = 0.5881006238402158, l1loss = 0.13262152082767562, train acc = 0.6771614192903548,\n",
- "val_loss = 0.6177397381312163, val_acc = 0.5503875968992248\n",
- "\n",
- "epoch: 8\n",
- "validation loss decreased (0.620769 ---> 0.617393), val_acc = 0.5503875968992248\n",
- "validation acc increased (0.550388 ---> 0.550388)\n",
- "validation acc increased (0.550388 ---> 0.554264)\n",
- "epoch 8: train loss = 0.5790939586332952, l1loss = 0.13068488615563606, train acc = 0.6836581709145427,\n",
- "val_loss = 0.6233009136924448, val_acc = 0.562015503875969\n",
- "\n",
- "epoch: 9\n",
- "validation acc increased (0.554264 ---> 0.562016)\n",
- "validation acc increased (0.562016 ---> 0.565891)\n",
- "epoch 9: train loss = 0.5705378363574523, l1loss = 0.12834613378914161, train acc = 0.6906546726636682,\n",
- "val_loss = 0.6323157320188921, val_acc = 0.562015503875969\n",
- "\n",
- "epoch: 10\n",
- "epoch 10: train loss = 0.5613774951251371, l1loss = 0.125647922248944, train acc = 0.6946526736631684,\n",
- "val_loss = 0.6595740812693456, val_acc = 0.5503875968992248\n",
- "\n",
- "epoch: 11\n",
- "epoch 11: train loss = 0.5492115492584824, l1loss = 0.12263813326651904, train acc = 0.7066466766616691,\n",
- "val_loss = 0.6838388831116432, val_acc = 0.5503875968992248\n",
- "\n",
- "epoch: 12\n",
- "epoch 12: train loss = 0.5365634637287651, l1loss = 0.11940663491470227, train acc = 0.712143928035982,\n",
- "val_loss = 0.7116500216630078, val_acc = 0.5542635658914729\n",
- "\n",
- "epoch: 13\n",
- "epoch 13: train loss = 0.5217770957636988, l1loss = 0.11611300713937918, train acc = 0.719640179910045,\n",
- "val_loss = 0.8096898549286894, val_acc = 0.5426356589147286\n",
- "\n",
- "epoch: 14\n",
- "epoch 14: train loss = 0.49729873053077933, l1loss = 0.1128313705094274, train acc = 0.7466266866566716,\n",
- "val_loss = 0.7668287199597026, val_acc = 0.5465116279069767\n",
- "\n",
- "epoch: 15\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "epoch 15: train loss = 0.47340078341371117, l1loss = 0.1097236432168616, train acc = 0.7606196901549226,\n",
- "val_loss = 0.7797975073489107, val_acc = 0.5581395348837209\n",
- "\n",
- "epoch: 16\n",
- "validation acc increased (0.565891 ---> 0.577519)\n",
- "epoch 16: train loss = 0.44012472370098615, l1loss = 0.10696254843014708, train acc = 0.7786106946526736,\n",
- "val_loss = 0.738024488900059, val_acc = 0.6395348837209303\n",
- "\n",
- "epoch: 17\n",
- "validation acc increased (0.577519 ---> 0.643411)\n",
- "epoch 17: train loss = 0.41693725505809315, l1loss = 0.10464524865329176, train acc = 0.783608195902049,\n",
- "val_loss = 0.7353155968725219, val_acc = 0.5968992248062015\n",
- "\n",
- "epoch: 18\n",
- "epoch 18: train loss = 0.4004883180702406, l1loss = 0.10272827999166463, train acc = 0.8055972013993004,\n",
- "val_loss = 1.211284273354582, val_acc = 0.5387596899224806\n",
- "\n",
- "epoch: 19\n",
- "epoch 19: train loss = 0.36191707039284504, l1loss = 0.10097880754945637, train acc = 0.8250874562718641,\n",
- "val_loss = 2.0748814187308615, val_acc = 0.6511627906976745\n",
- "\n",
- "epoch: 20\n",
- "validation acc increased (0.643411 ---> 0.651163)\n",
- "epoch 20: train loss = 0.34419407603563157, l1loss = 0.09956587383012543, train acc = 0.8425787106446777,\n",
- "val_loss = 1.1840626740640448, val_acc = 0.6124031007751938\n",
- "\n",
- "epoch: 21\n",
- "epoch 21: train loss = 0.3332295252912227, l1loss = 0.09810652255803451, train acc = 0.8430784607696152,\n",
- "val_loss = 3.1099504592806793, val_acc = 0.4806201550387597\n",
- "\n",
- "epoch: 22\n",
- "epoch 22: train loss = 0.2923158032485451, l1loss = 0.09699563946070401, train acc = 0.8785607196401799,\n",
- "val_loss = 1.7239803001862164, val_acc = 0.5542635658914729\n",
- "\n",
- "epoch: 23\n",
- "epoch 23: train loss = 0.3031199698088349, l1loss = 0.0963727720063189, train acc = 0.8640679660169915,\n",
- "val_loss = 4.811507051220423, val_acc = 0.4728682170542636\n",
- "\n",
- "epoch: 24\n",
- "epoch 24: train loss = 0.2679102631255068, l1loss = 0.09541694263274643, train acc = 0.8835582208895553,\n",
- "val_loss = 4.7253117746160935, val_acc = 0.47674418604651164\n",
- "\n",
- "epoch: 25\n",
- "epoch 25: train loss = 0.26991559321555064, l1loss = 0.0947239384509396, train acc = 0.88055972013993,\n",
- "val_loss = 1.9052141544430754, val_acc = 0.6317829457364341\n",
- "\n",
- "epoch: 26\n",
- "epoch 26: train loss = 0.2462208094089285, l1loss = 0.09422039279322336, train acc = 0.8970514742628686,\n",
- "val_loss = 2.0839634255845416, val_acc = 0.6162790697674418\n",
- "\n",
- "epoch: 27\n",
- "epoch 27: train loss = 0.27374583970511457, l1loss = 0.09358507321439107, train acc = 0.8685657171414293,\n",
- "val_loss = 4.70158687118412, val_acc = 0.4883720930232558\n",
- "\n",
- "epoch: 28\n",
- "epoch 28: train loss = 0.20702115298509002, l1loss = 0.09327136474600559, train acc = 0.9190404797601199,\n",
- "val_loss = 1.790774514393289, val_acc = 0.6124031007751938\n",
- "\n",
- "epoch: 29\n",
- "epoch 29: train loss = 0.23099662097676404, l1loss = 0.09279883265316576, train acc = 0.8985507246376812,\n",
- "val_loss = 5.580494865908952, val_acc = 0.5193798449612403\n",
- "\n",
- "epoch: 30\n",
- "epoch 30: train loss = 0.18276370340946135, l1loss = 0.09199970285559463, train acc = 0.9270364817591205,\n",
- "val_loss = 2.8631812488974266, val_acc = 0.627906976744186\n",
- "\n",
- "epoch: 31\n",
- "epoch 31: train loss = 0.15527670064578944, l1loss = 0.0914434359698877, train acc = 0.9470264867566217,\n",
- "val_loss = 4.503482984384912, val_acc = 0.5348837209302325\n",
- "\n",
- "epoch: 32\n",
- "epoch 32: train loss = 0.16465340346202204, l1loss = 0.0909644259528063, train acc = 0.9370314842578711,\n",
- "val_loss = 1.5456568664589594, val_acc = 0.5775193798449613\n",
- "\n",
- "epoch: 33\n",
- "epoch 33: train loss = 0.1859049220969235, l1loss = 0.09095004587054908, train acc = 0.9315342328835582,\n",
- "val_loss = 14.714020802993183, val_acc = 0.46511627906976744\n",
- "\n",
- "epoch: 34\n",
- "epoch 34: train loss = 0.14878231415088508, l1loss = 0.09091311531222385, train acc = 0.9445277361319341,\n",
- "val_loss = 4.930351785911146, val_acc = 0.6395348837209303\n",
- "\n",
- "epoch: 35\n",
- "epoch 35: train loss = 0.15755604538156176, l1loss = 0.09075812100693918, train acc = 0.9325337331334332,\n",
- "val_loss = 1.786901553471883, val_acc = 0.6162790697674418\n",
- "\n",
- "epoch: 36\n",
- "epoch 36: train loss = 0.13036989256076847, l1loss = 0.09029308312955586, train acc = 0.9450274862568716,\n",
- "val_loss = 4.942993589149889, val_acc = 0.6395348837209303\n",
- "\n",
- "epoch: 37\n",
- "epoch 37: train loss = 0.11117354914851334, l1loss = 0.08992485350546153, train acc = 0.9615192403798101,\n",
- "val_loss = 4.3989028816929, val_acc = 0.6356589147286822\n",
- "\n",
- "epoch: 38\n",
- "epoch 38: train loss = 0.08873243333919831, l1loss = 0.08950450801211915, train acc = 0.9720139930034982,\n",
- "val_loss = 5.96283143804979, val_acc = 0.5310077519379846\n",
- "\n",
- "epoch: 39\n",
- "epoch 39: train loss = 0.07572829289951068, l1loss = 0.08899277692538747, train acc = 0.9775112443778111,\n",
- "val_loss = 1.866163623887439, val_acc = 0.6007751937984496\n",
- "\n",
- "epoch: 40\n",
- "epoch 40: train loss = 0.07354736827622944, l1loss = 0.08869233787402339, train acc = 0.9790104947526237,\n",
- "val_loss = 2.5748086192811184, val_acc = 0.5775193798449613\n",
- "\n",
- "epoch: 41\n",
- "epoch 41: train loss = 0.07472476078525953, l1loss = 0.08826551713730799, train acc = 0.9735132433783108,\n",
- "val_loss = 3.9040491326800963, val_acc = 0.6124031007751938\n",
- "\n",
- "epoch: 42\n",
- "epoch 42: train loss = 0.06888431988690032, l1loss = 0.0877874091989931, train acc = 0.9765117441279361,\n",
- "val_loss = 2.331109551496284, val_acc = 0.5658914728682171\n",
- "\n",
- "epoch: 43\n",
- "epoch 43: train loss = 0.05748968711425399, l1loss = 0.08741870758192113, train acc = 0.9875062468765617,\n",
- "val_loss = 2.812307975088903, val_acc = 0.5852713178294574\n",
- "\n",
- "epoch: 44\n",
- "epoch 44: train loss = 0.057824458753776696, l1loss = 0.08714396681176728, train acc = 0.9835082458770614,\n",
- "val_loss = 2.8747254637784736, val_acc = 0.5968992248062015\n",
- "\n",
- "epoch: 45\n",
- "epoch 45: train loss = 0.0759028259409719, l1loss = 0.08678379254615766, train acc = 0.9740129935032483,\n",
- "val_loss = 4.5999353874561395, val_acc = 0.5503875968992248\n",
- "\n",
- "epoch: 46\n",
- "epoch 46: train loss = 0.04053982199988384, l1loss = 0.08636881607672621, train acc = 0.993503248375812,\n",
- "val_loss = 3.032141833342323, val_acc = 0.5310077519379846\n",
- "\n",
- "epoch: 47\n",
- "epoch 47: train loss = 0.03172164383823278, l1loss = 0.08597096050160935, train acc = 0.9945027486256871,\n",
- "val_loss = 5.050457126410433, val_acc = 0.5503875968992248\n",
- "\n",
- "epoch: 48\n",
- "epoch 48: train loss = 0.030161406717326925, l1loss = 0.08547273422705418, train acc = 0.9940029985007496,\n",
- "val_loss = 4.39157882217289, val_acc = 0.5465116279069767\n",
- "\n",
- "epoch: 49\n",
- "epoch 49: train loss = 0.026073823958962992, l1loss = 0.08494537337489988, train acc = 0.9975012493753124,\n",
- "val_loss = 6.5615896919947305, val_acc = 0.5155038759689923\n",
- "\n",
- "epoch: 50\n",
- "epoch 50: train loss = 0.031470798644824126, l1loss = 0.08458039399119152, train acc = 0.993503248375812,\n",
- "val_loss = 3.8240739467532134, val_acc = 0.5658914728682171\n",
- "\n",
- "epoch: 51\n",
- "epoch 51: train loss = 0.030669868893470842, l1loss = 0.08433086723312624, train acc = 0.993503248375812,\n",
- "val_loss = 4.175465990984162, val_acc = 0.6046511627906976\n",
- "\n",
- "epoch: 52\n",
- "epoch 52: train loss = 0.02038246937382823, l1loss = 0.08382471429667195, train acc = 0.9970014992503748,\n",
- "val_loss = 3.628271795987953, val_acc = 0.5852713178294574\n",
- "\n",
- "epoch: 53\n",
- "epoch 53: train loss = 0.018496526570907834, l1loss = 0.08326089246266488, train acc = 0.9995002498750625,\n",
- "val_loss = 3.4422422157701598, val_acc = 0.5426356589147286\n",
- "\n",
- "epoch: 54\n",
- "epoch 54: train loss = 0.02122760796430765, l1loss = 0.08280747008883674, train acc = 0.9970014992503748,\n",
- "val_loss = 3.4909615479698477, val_acc = 0.5271317829457365\n",
- "\n",
- "epoch: 55\n",
- "epoch 55: train loss = 0.025122589815681662, l1loss = 0.08247958504963017, train acc = 0.992503748125937,\n",
- "val_loss = 3.1251121426737587, val_acc = 0.5775193798449613\n",
- "\n",
- "epoch: 56\n",
- "epoch 56: train loss = 0.018375528820868077, l1loss = 0.08209296630538147, train acc = 0.9995002498750625,\n",
- "val_loss = 3.3797425502954526, val_acc = 0.5775193798449613\n",
- "\n",
- "epoch: 57\n",
- "epoch 57: train loss = 0.01760085413966892, l1loss = 0.08163161436165647, train acc = 0.9960019990004998,\n",
- "val_loss = 3.5595659359480982, val_acc = 0.5193798449612403\n",
- "\n",
- "epoch: 58\n",
- "epoch 58: train loss = 0.01351920877006659, l1loss = 0.08108949671680483, train acc = 0.9995002498750625,\n",
- "val_loss = 3.2694953966510387, val_acc = 0.5697674418604651\n",
- "\n",
- "epoch: 59\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "epoch 59: train loss = 0.019704249462690848, l1loss = 0.08070969545486746, train acc = 0.9960019990004998,\n",
- "val_loss = 3.709986501207301, val_acc = 0.5852713178294574\n",
- "\n",
- "epoch: 60\n",
- "epoch 60: train loss = 0.023627269952588233, l1loss = 0.08051739055951913, train acc = 0.9940029985007496,\n",
- "val_loss = 4.3342692463897, val_acc = 0.5891472868217055\n",
- "\n",
- "epoch: 61\n",
- "epoch 61: train loss = 0.03571600694002687, l1loss = 0.08089180287347919, train acc = 0.9900049975012494,\n",
- "val_loss = 3.6718489259712457, val_acc = 0.5465116279069767\n",
- "\n",
- "epoch: 62\n",
- "epoch 62: train loss = 0.04068389410960085, l1loss = 0.0812820299514111, train acc = 0.9910044977511244,\n",
- "val_loss = 22.92412723866544, val_acc = 0.4806201550387597\n",
- "\n",
- "epoch: 63\n",
- "epoch 63: train loss = 0.03855133038023482, l1loss = 0.08120857460656088, train acc = 0.9930034982508745,\n",
- "val_loss = 2.7737932450087497, val_acc = 0.5503875968992248\n",
- "\n",
- "epoch: 64\n",
- "epoch 64: train loss = 0.04269264168341061, l1loss = 0.0811207454906649, train acc = 0.9875062468765617,\n",
- "val_loss = 5.258781780568204, val_acc = 0.6201550387596899\n",
- "\n",
- "epoch: 65\n",
- "epoch 65: train loss = 0.028871186090626756, l1loss = 0.08079952534394286, train acc = 0.9950024987506247,\n",
- "val_loss = 4.771393295406371, val_acc = 0.6162790697674418\n",
- "\n",
- "epoch: 66\n",
- "epoch 66: train loss = 0.016369871543846032, l1loss = 0.08030458839088007, train acc = 0.999000499750125,\n",
- "val_loss = 4.057015269301658, val_acc = 0.5465116279069767\n",
- "\n",
- "epoch: 67\n",
- "epoch 67: train loss = 0.013506758245762841, l1loss = 0.07978588878467999, train acc = 0.9995002498750625,\n",
- "val_loss = 3.792802465054416, val_acc = 0.5465116279069767\n",
- "\n",
- "epoch: 68\n",
- "epoch 68: train loss = 0.011354195068533095, l1loss = 0.07937957041118218, train acc = 0.9995002498750625,\n",
- "val_loss = 3.2458120704621307, val_acc = 0.5891472868217055\n",
- "\n",
- "epoch: 69\n",
- "epoch 69: train loss = 0.014591847184760639, l1loss = 0.07902957535516852, train acc = 0.999000499750125,\n",
- "val_loss = 3.9379879207343094, val_acc = 0.5968992248062015\n",
- "\n",
- "epoch: 70\n",
- "epoch 70: train loss = 0.01913830605863795, l1loss = 0.07874485357352223, train acc = 0.9960019990004998,\n",
- "val_loss = 3.228848675251469, val_acc = 0.5930232558139535\n",
- "\n",
- "epoch: 71\n",
- "epoch 71: train loss = 0.013023115446449428, l1loss = 0.0784264379541675, train acc = 0.999000499750125,\n",
- "val_loss = 3.240803785102312, val_acc = 0.5930232558139535\n",
- "\n",
- "epoch: 72\n",
- "epoch 72: train loss = 0.011582904643770577, l1loss = 0.0781244968903297, train acc = 0.9995002498750625,\n",
- "val_loss = 3.635261055110961, val_acc = 0.5852713178294574\n",
- "\n",
- "epoch: 73\n",
- "epoch 73: train loss = 0.008814260559524226, l1loss = 0.07784299137650699, train acc = 1.0,\n",
- "val_loss = 3.3489851297796234, val_acc = 0.5930232558139535\n",
- "\n",
- "!!! overfitted !!!\n",
- "[1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1]\n",
- "[0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1]\n",
- "early stoping results:\n",
- "\t [0.6041666666666666, 0.5572916666666666, 0.5208333333333334, 0.5078534031413613, 0.5445026178010471, 0.6073298429319371, 0.5602094240837696]\n",
- "output = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
- "label = tensor([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1,\n",
- " 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1,\n",
- " 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0,\n",
- " 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1,\n",
- " 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1,\n",
- " 1, 1, 0, 1, 0, 1, 1, 1])\n",
- "output = [1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0]\n",
- "label = tensor([1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1,\n",
- " 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1,\n",
- " 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1,\n",
- " 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0,\n",
- " 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0,\n",
- " 0, 1, 0, 0, 1, 0, 1, 1])\n",
- "output = [1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0]\n",
- "label = tensor([1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0,\n",
- " 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1,\n",
- " 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1,\n",
- " 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0,\n",
- " 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0,\n",
- " 1, 0, 1, 0, 0, 0, 1, 1])\n",
- "output = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0]\n",
- "label = tensor([1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1,\n",
- " 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1,\n",
- " 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0,\n",
- " 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0,\n",
- " 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1,\n",
- " 1, 0, 0, 0, 0, 0, 0, 1])\n",
- "output = [1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0]\n",
- "label = tensor([0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1,\n",
- " 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1,\n",
- " 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1,\n",
- " 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0,\n",
- " 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1,\n",
- " 1, 0, 1, 0, 1, 0, 1, 0])\n",
- "output = [1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0]\n",
- "label = tensor([1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1,\n",
- " 1, 1, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0,\n",
- " 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1,\n",
- " 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0,\n",
- " 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0,\n",
- " 0, 1, 0, 0, 0, 1, 1, 0])\n",
- "output = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0]\n",
- "label = tensor([1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0,\n",
- " 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0,\n",
- " 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0,\n",
- " 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1,\n",
- " 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0,\n",
- " 1, 0, 0, 1, 0, 1, 1, 0])\n",
- "output = [1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0]\n",
- "label = tensor([1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1,\n",
- " 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0,\n",
- " 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0,\n",
- " 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0,\n",
- " 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1,\n",
- " 0, 0, 0, 1, 0, 1, 1, 0])\n",
- "output = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0]\n",
- "label = tensor([1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0,\n",
- " 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0,\n",
- " 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0,\n",
- " 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1,\n",
- " 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0,\n",
- " 0, 0, 1, 0, 1, 0, 1, 1])\n",
- "output = [1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
- "label = tensor([1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0,\n",
- " 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0,\n",
- " 1, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1,\n",
- " 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1,\n",
- " 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0,\n",
- " 0, 1, 0, 0, 1, 1, 1, 1])\n",
- "output = [1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
- "label = tensor([1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0,\n",
- " 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0,\n",
- " 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1,\n",
- " 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1,\n",
- " 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0,\n",
- " 0, 0, 1, 1, 0, 1, 1, 0])\n",
- "output = [1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0]\n",
- "label = tensor([1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 1,\n",
- " 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1,\n",
- " 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0,\n",
- " 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1,\n",
- " 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1,\n",
- " 1, 0, 1, 0, 0, 1, 0, 1])\n",
- "output = [1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
- "label = tensor([1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0,\n",
- " 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1,\n",
- " 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0,\n",
- " 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0,\n",
- " 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1,\n",
- " 0, 1, 0, 1, 1, 1, 0, 0])\n",
- "output = [0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
- "label = tensor([0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0,\n",
- " 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0,\n",
- " 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0,\n",
- " 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1,\n",
- " 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0,\n",
- " 0, 1, 1, 0, 1, 1, 1, 1])\n",
- "output = [1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0]\n",
- "label = tensor([0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1,\n",
- " 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1,\n",
- " 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 1,\n",
- " 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1,\n",
- " 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0,\n",
- " 0, 0, 1, 0, 1, 1, 1, 1])\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "output = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
- "label = tensor([1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1,\n",
- " 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1,\n",
- " 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0,\n",
- " 1, 0, 0, 1, 0, 1, 0, 1, 1])\n",
- "\t [0.6673376950176145, 0.6722054380664653, 0.6802005012531328, 0.6826347305389222, 0.7250876314471708, 0.7, 0.6841579210394803]\n",
- "[0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1]\n",
- "[0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1]\n",
- "full train results:\n",
- "\t [0.59375, 0.5885416666666666, 0.6354166666666666, 0.612565445026178, 0.6387434554973822, 0.6492146596858639, 0.6335078534031413]\n",
- "\t [0.9949672873678913, 0.9974823766364552, 0.8666666666666667, 0.8393213572854291, 0.99949924887331, 0.638, 0.999000499750125]\n",
- "[1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0]\n",
- "[1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1]\n",
- "best accs results:\n",
- "\t [0.59375, 0.5625, 0.6354166666666666, 0.6282722513089005, 0.6020942408376964, 0.6387434554973822, 0.643979057591623]\n",
- "\t [0.8037242073477604, 0.8670694864048338, 0.4967418546365915, 0.5489021956087824, 0.9744616925388082, 0.519, 0.47226386806596704]\n",
- "[1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1]\n",
- "-----------------------------Fold 8---------------\n",
- "preparing dataloaders...\n",
- "torch.Size([81, 7, 9, 20])\n",
- "coef when 0 > 1 1\n",
- "creating model...\n",
- "calculating total steps...\n",
- "epoch: 1\n",
- "validation loss decreased (inf ---> 0.690483), val_acc = 0.6162790697674418\n",
- "validation acc increased (0.000000 ---> 0.616279)\n",
- "validation acc increased (0.616279 ---> 0.616279)\n",
- "epoch 1: train loss = 0.681558296144248, l1loss = 0.13785075955840959, train acc = 0.47540160642570284,\n",
- "val_loss = 0.6908809380937916, val_acc = 0.6201550387596899\n",
- "\n",
- "epoch: 2\n",
- "validation acc increased (0.616279 ---> 0.616279)\n",
- "epoch 2: train loss = 0.6601414388442135, l1loss = 0.13744411087897887, train acc = 0.5727911646586346,\n",
- "val_loss = 0.6930313258208045, val_acc = 0.5038759689922481\n",
- "\n",
- "epoch: 3\n",
- "validation loss decreased (0.690483 ---> 0.688592), val_acc = 0.5465116279069767\n",
- "epoch 3: train loss = 0.6413024204323091, l1loss = 0.1369472207673582, train acc = 0.6360441767068273,\n",
- "val_loss = 0.682868339294611, val_acc = 0.5348837209302325\n",
- "\n",
- "epoch: 4\n",
- "validation loss decreased (0.688592 ---> 0.681418), val_acc = 0.5387596899224806\n",
- "validation loss decreased (0.681418 ---> 0.657421), val_acc = 0.5542635658914729\n",
- "epoch 4: train loss = 0.6253758257172672, l1loss = 0.13630323943842845, train acc = 0.6521084337349398,\n",
- "val_loss = 0.6426847808120787, val_acc = 0.562015503875969\n",
- "\n",
- "epoch: 5\n",
- "validation loss decreased (0.657421 ---> 0.639681), val_acc = 0.562015503875969\n",
- "validation loss decreased (0.639681 ---> 0.615633), val_acc = 0.5697674418604651\n",
- "epoch 5: train loss = 0.6111369527966143, l1loss = 0.13545335679647913, train acc = 0.6556224899598394,\n",
- "val_loss = 0.6082370059434757, val_acc = 0.5813953488372093\n",
- "\n",
- "epoch: 6\n",
- "validation loss decreased (0.615633 ---> 0.607422), val_acc = 0.5775193798449613\n",
- "validation loss decreased (0.607422 ---> 0.601718), val_acc = 0.5775193798449613\n",
- "epoch 6: train loss = 0.599387328548125, l1loss = 0.13432514200727624, train acc = 0.6606425702811245,\n",
- "val_loss = 0.6018130197081455, val_acc = 0.5813953488372093\n",
- "\n",
- "epoch: 7\n",
- "epoch 7: train loss = 0.5901381636719149, l1loss = 0.13286473970097232, train acc = 0.6686746987951807,\n",
- "val_loss = 0.6033216104951016, val_acc = 0.5813953488372093\n",
- "\n",
- "epoch: 8\n",
- "epoch 8: train loss = 0.5820648083725128, l1loss = 0.13100589835739518, train acc = 0.6746987951807228,\n",
- "val_loss = 0.6082159008166587, val_acc = 0.5813953488372093\n",
- "\n",
- "epoch: 9\n",
- "epoch 9: train loss = 0.5762511060898563, l1loss = 0.12873437647599292, train acc = 0.6757028112449799,\n",
- "val_loss = 0.6154433927794759, val_acc = 0.5697674418604651\n",
- "\n",
- "epoch: 10\n",
- "epoch 10: train loss = 0.5673032103772144, l1loss = 0.12611201866324168, train acc = 0.6827309236947792,\n",
- "val_loss = 0.6142219150251196, val_acc = 0.5736434108527132\n",
- "\n",
- "epoch: 11\n",
- "epoch 11: train loss = 0.5576656654656652, l1loss = 0.12317741024925048, train acc = 0.6907630522088354,\n",
- "val_loss = 0.6357968532761862, val_acc = 0.5775193798449613\n",
- "\n",
- "epoch: 12\n",
- "epoch 12: train loss = 0.544440203881168, l1loss = 0.12002360970380316, train acc = 0.7043172690763052,\n",
- "val_loss = 0.6717778880928837, val_acc = 0.5736434108527132\n",
- "\n",
- "epoch: 13\n",
- "epoch 13: train loss = 0.5326818835783196, l1loss = 0.11672397023702721, train acc = 0.7178714859437751,\n",
- "val_loss = 0.6902429789535759, val_acc = 0.5697674418604651\n",
- "\n",
- "epoch: 14\n",
- "epoch 14: train loss = 0.5085236329630197, l1loss = 0.1134706104555762, train acc = 0.7339357429718876,\n",
- "val_loss = 0.658633234195931, val_acc = 0.5930232558139535\n",
- "\n",
- "epoch: 15\n",
- "epoch 15: train loss = 0.48935657404512767, l1loss = 0.11044141605316875, train acc = 0.7459839357429718,\n",
- "val_loss = 0.6858358637307042, val_acc = 0.5930232558139535\n",
- "\n",
- "epoch: 16\n",
- "epoch 16: train loss = 0.45904517054079047, l1loss = 0.1077926407556936, train acc = 0.7740963855421686,\n",
- "val_loss = 0.8338234337550307, val_acc = 0.5852713178294574\n",
- "\n",
- "epoch: 17\n",
- "epoch 17: train loss = 0.426940765964937, l1loss = 0.1055830192254729, train acc = 0.7911646586345381,\n",
- "val_loss = 1.16572989049808, val_acc = 0.562015503875969\n",
- "\n",
- "epoch: 18\n",
- "epoch 18: train loss = 0.4012284585271016, l1loss = 0.10380807138949513, train acc = 0.8092369477911646,\n",
- "val_loss = 1.6921033988627352, val_acc = 0.5658914728682171\n",
- "\n",
- "epoch: 19\n",
- "epoch 19: train loss = 0.3755404464451663, l1loss = 0.10228538163096072, train acc = 0.8273092369477911,\n",
- "val_loss = 1.4203764996671862, val_acc = 0.5503875968992248\n",
- "\n",
- "epoch: 20\n",
- "validation acc increased (0.616279 ---> 0.616279)\n",
- "epoch 20: train loss = 0.3591645778901127, l1loss = 0.10097318358090987, train acc = 0.8288152610441767,\n",
- "val_loss = 1.1321186076763063, val_acc = 0.6162790697674418\n",
- "\n",
- "epoch: 21\n",
- "validation acc increased (0.616279 ---> 0.620155)\n",
- "epoch 21: train loss = 0.32908376967571823, l1loss = 0.09975837880229375, train acc = 0.8473895582329317,\n",
- "val_loss = 0.9505737890568815, val_acc = 0.6085271317829457\n",
- "\n",
- "epoch: 22\n",
- "epoch 22: train loss = 0.29188525078287086, l1loss = 0.09870235529170937, train acc = 0.8699799196787149,\n",
- "val_loss = 0.92200702197792, val_acc = 0.5775193798449613\n",
- "\n",
- "epoch: 23\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "epoch 23: train loss = 0.26653667099504585, l1loss = 0.09784682783257051, train acc = 0.8910642570281124,\n",
- "val_loss = 1.039930680001429, val_acc = 0.5736434108527132\n",
- "\n",
- "epoch: 24\n",
- "validation acc increased (0.620155 ---> 0.627907)\n",
- "epoch 24: train loss = 0.29164418386647023, l1loss = 0.09723315346073434, train acc = 0.857429718875502,\n",
- "val_loss = 1.7529087842896927, val_acc = 0.624031007751938\n",
- "\n",
- "epoch: 25\n",
- "validation acc increased (0.627907 ---> 0.631783)\n",
- "validation acc increased (0.631783 ---> 0.631783)\n",
- "epoch 25: train loss = 0.2822917654332387, l1loss = 0.09672807504134963, train acc = 0.8815261044176707,\n",
- "val_loss = 3.657799425051194, val_acc = 0.624031007751938\n",
- "\n",
- "epoch: 26\n",
- "epoch 26: train loss = 0.26554612988927756, l1loss = 0.09617703916677031, train acc = 0.8790160642570282,\n",
- "val_loss = 2.485792124918265, val_acc = 0.5775193798449613\n",
- "\n",
- "epoch: 27\n",
- "epoch 27: train loss = 0.254531506015115, l1loss = 0.09513618028068159, train acc = 0.8895582329317269,\n",
- "val_loss = 2.655083281125209, val_acc = 0.624031007751938\n",
- "\n",
- "epoch: 28\n",
- "epoch 28: train loss = 0.24327343211595312, l1loss = 0.09413622313713932, train acc = 0.8845381526104418,\n",
- "val_loss = 1.9870535994684972, val_acc = 0.5852713178294574\n",
- "\n",
- "epoch: 29\n",
- "epoch 29: train loss = 0.19294745029215832, l1loss = 0.09322106197895295, train acc = 0.9312248995983936,\n",
- "val_loss = 1.6232661962971207, val_acc = 0.6124031007751938\n",
- "\n",
- "epoch: 30\n",
- "epoch 30: train loss = 0.17906709673653645, l1loss = 0.09269103892596371, train acc = 0.9201807228915663,\n",
- "val_loss = 1.2391475744025653, val_acc = 0.627906976744186\n",
- "\n",
- "epoch: 31\n",
- "epoch 31: train loss = 0.16923785616595102, l1loss = 0.09213589386169212, train acc = 0.9412650602409639,\n",
- "val_loss = 2.774588699488677, val_acc = 0.562015503875969\n",
- "\n",
- "epoch: 32\n",
- "epoch 32: train loss = 0.18963111925077247, l1loss = 0.09187775111222363, train acc = 0.9181726907630522,\n",
- "val_loss = 3.4193982689879663, val_acc = 0.6085271317829457\n",
- "\n",
- "epoch: 33\n",
- "epoch 33: train loss = 0.21511334994710593, l1loss = 0.0914924276700939, train acc = 0.8945783132530121,\n",
- "val_loss = 1.4032248768580051, val_acc = 0.6046511627906976\n",
- "\n",
- "epoch: 34\n",
- "validation acc increased (0.631783 ---> 0.635659)\n",
- "epoch 34: train loss = 0.1589769302123043, l1loss = 0.09112457344570313, train acc = 0.9392570281124498,\n",
- "val_loss = 1.755115381969038, val_acc = 0.6085271317829457\n",
- "\n",
- "epoch: 35\n",
- "epoch 35: train loss = 0.14009647250893603, l1loss = 0.0910465270280838, train acc = 0.9513052208835341,\n",
- "val_loss = 1.4941087914991749, val_acc = 0.624031007751938\n",
- "\n",
- "epoch: 36\n",
- "epoch 36: train loss = 0.12341947081577347, l1loss = 0.0905273190763102, train acc = 0.9573293172690763,\n",
- "val_loss = 2.225553594818411, val_acc = 0.6317829457364341\n",
- "\n",
- "epoch: 37\n",
- "validation acc increased (0.635659 ---> 0.639535)\n",
- "epoch 37: train loss = 0.09714583913425365, l1loss = 0.08966618833173232, train acc = 0.9698795180722891,\n",
- "val_loss = 2.304292928340823, val_acc = 0.5852713178294574\n",
- "\n",
- "epoch: 38\n",
- "epoch 38: train loss = 0.11655990438288953, l1loss = 0.0893175832657929, train acc = 0.9568273092369478,\n",
- "val_loss = 2.1228176657716897, val_acc = 0.6007751937984496\n",
- "\n",
- "epoch: 39\n",
- "epoch 39: train loss = 0.09807442242361934, l1loss = 0.08927847062009406, train acc = 0.964859437751004,\n",
- "val_loss = 4.155446446450879, val_acc = 0.6317829457364341\n",
- "\n",
- "epoch: 40\n",
- "epoch 40: train loss = 0.0872385489294328, l1loss = 0.08882969411381755, train acc = 0.9774096385542169,\n",
- "val_loss = 1.9643646534099541, val_acc = 0.5968992248062015\n",
- "\n",
- "epoch: 41\n",
- "epoch 41: train loss = 0.07711015104589214, l1loss = 0.08862146217540565, train acc = 0.9754016064257028,\n",
- "val_loss = 3.4088204403718314, val_acc = 0.6046511627906976\n",
- "\n",
- "epoch: 42\n",
- "epoch 42: train loss = 0.07405169415904815, l1loss = 0.08791267791067262, train acc = 0.9759036144578314,\n",
- "val_loss = 2.432727573453918, val_acc = 0.5930232558139535\n",
- "\n",
- "epoch: 43\n",
- "epoch 43: train loss = 0.1035525700054973, l1loss = 0.08760496441379609, train acc = 0.9583333333333334,\n",
- "val_loss = 4.112110699794089, val_acc = 0.5775193798449613\n",
- "\n",
- "epoch: 44\n",
- "epoch 44: train loss = 0.07531021014753594, l1loss = 0.08808258729407108, train acc = 0.9799196787148594,\n",
- "val_loss = 4.904226697272487, val_acc = 0.624031007751938\n",
- "\n",
- "epoch: 45\n",
- "epoch 45: train loss = 0.08077501409862893, l1loss = 0.0878749838076442, train acc = 0.9693775100401606,\n",
- "val_loss = 2.515559282413749, val_acc = 0.6046511627906976\n",
- "\n",
- "epoch: 46\n",
- "epoch 46: train loss = 0.07068757228104465, l1loss = 0.08739253621264155, train acc = 0.9804216867469879,\n",
- "val_loss = 4.238404756368593, val_acc = 0.6085271317829457\n",
- "\n",
- "epoch: 47\n",
- "epoch 47: train loss = 0.056927473190797856, l1loss = 0.0867786297956145, train acc = 0.9844377510040161,\n",
- "val_loss = 2.2621954614801925, val_acc = 0.5930232558139535\n",
- "\n",
- "epoch: 48\n",
- "epoch 48: train loss = 0.07424461281293607, l1loss = 0.08710216084517629, train acc = 0.9844377510040161,\n",
- "val_loss = 8.002929717071297, val_acc = 0.6124031007751938\n",
- "\n",
- "epoch: 49\n",
- "epoch 49: train loss = 0.08703712670199842, l1loss = 0.08769192863779375, train acc = 0.9688755020080321,\n",
- "val_loss = 3.551590413086174, val_acc = 0.6085271317829457\n",
- "\n",
- "epoch: 50\n",
- "epoch 50: train loss = 0.07011409313324465, l1loss = 0.08753819362705491, train acc = 0.9844377510040161,\n",
- "val_loss = 3.0541449490241535, val_acc = 0.5852713178294574\n",
- "\n",
- "epoch: 51\n",
- "epoch 51: train loss = 0.04092977885680505, l1loss = 0.08705330301958873, train acc = 0.9944779116465864,\n",
- "val_loss = 3.317158439362696, val_acc = 0.5891472868217055\n",
- "\n",
- "epoch: 52\n",
- "epoch 52: train loss = 0.03851072603170891, l1loss = 0.08622222872503311, train acc = 0.9914658634538153,\n",
- "val_loss = 2.734001413796299, val_acc = 0.6124031007751938\n",
- "\n",
- "epoch: 53\n",
- "epoch 53: train loss = 0.03234308925139377, l1loss = 0.08536305176804822, train acc = 0.9954819277108434,\n",
- "val_loss = 3.4601848698401634, val_acc = 0.6046511627906976\n",
- "\n",
- "epoch: 54\n",
- "epoch 54: train loss = 0.026902885224864666, l1loss = 0.08468511724567797, train acc = 0.9964859437751004,\n",
- "val_loss = 3.1516257589177568, val_acc = 0.5968992248062015\n",
- "\n",
- "epoch: 55\n",
- "epoch 55: train loss = 0.02750487214171264, l1loss = 0.08413356647314317, train acc = 0.9959839357429718,\n",
- "val_loss = 3.372754837696751, val_acc = 0.5813953488372093\n",
- "\n",
- "epoch: 56\n",
- "epoch 56: train loss = 0.02194071931382978, l1loss = 0.08356957251288326, train acc = 0.9969879518072289,\n",
- "val_loss = 3.443614445915518, val_acc = 0.5968992248062015\n",
- "\n",
- "epoch: 57\n",
- "epoch 57: train loss = 0.02077264787561922, l1loss = 0.0830162249834662, train acc = 0.9979919678714859,\n",
- "val_loss = 3.4553179718958313, val_acc = 0.6007751937984496\n",
- "\n",
- "epoch: 58\n",
- "epoch 58: train loss = 0.02582511185372929, l1loss = 0.08288956104392507, train acc = 0.9959839357429718,\n",
- "val_loss = 3.5070084046947865, val_acc = 0.5968992248062015\n",
- "\n",
- "epoch: 59\n",
- "epoch 59: train loss = 0.017465517126831663, l1loss = 0.08231255358242127, train acc = 0.9974899598393574,\n",
- "val_loss = 3.79313347598379, val_acc = 0.5968992248062015\n",
- "\n",
- "epoch: 60\n",
- "epoch 60: train loss = 0.014348378337650415, l1loss = 0.08186340673142169, train acc = 0.9994979919678715,\n",
- "val_loss = 4.290767019109208, val_acc = 0.6356589147286822\n",
- "\n",
- "epoch: 61\n",
- "epoch 61: train loss = 0.014340946001999349, l1loss = 0.08129345541857333, train acc = 1.0,\n",
- "val_loss = 3.978316806668286, val_acc = 0.5930232558139535\n",
- "\n",
- "!!! overfitted !!!\n",
- "[1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1]\n",
- "[0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1]\n",
- "early stoping results:\n",
- "\t [0.6041666666666666, 0.5572916666666666, 0.5208333333333334, 0.5078534031413613, 0.5445026178010471, 0.6073298429319371, 0.5602094240837696, 0.5340314136125655]\n",
- "output = [1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
- "label = tensor([0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1,\n",
- " 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1,\n",
- " 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1,\n",
- " 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1,\n",
- " 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1,\n",
- " 1, 1, 0, 1, 1, 0, 1, 1])\n",
- "output = [1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0]\n",
- "label = tensor([1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0,\n",
- " 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0,\n",
- " 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0,\n",
- " 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1,\n",
- " 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1,\n",
- " 1, 1, 1, 0, 1, 0, 1, 0])\n",
- "output = [0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
- "label = tensor([0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1,\n",
- " 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1,\n",
- " 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0,\n",
- " 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0,\n",
- " 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0,\n",
- " 0, 0, 1, 0, 1, 1, 1, 1])\n",
- "output = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
- "label = tensor([0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1,\n",
- " 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0,\n",
- " 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1,\n",
- " 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1,\n",
- " 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0,\n",
- " 0, 0, 1, 1, 1, 0, 1, 1])\n",
- "output = [1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
- "label = tensor([1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0,\n",
- " 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0,\n",
- " 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1,\n",
- " 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0,\n",
- " 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0,\n",
- " 0, 0, 1, 1, 1, 0, 1, 1])\n",
- "output = [1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
- "label = tensor([0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1,\n",
- " 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0,\n",
- " 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1,\n",
- " 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1,\n",
- " 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0,\n",
- " 1, 0, 0, 1, 1, 0, 1, 1])\n",
- "output = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0]\n",
- "label = tensor([1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1,\n",
- " 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0,\n",
- " 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0,\n",
- " 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0,\n",
- " 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1,\n",
- " 1, 0, 1, 0, 0, 1, 1, 1])\n",
- "output = [1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
- "label = tensor([0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1,\n",
- " 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0,\n",
- " 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1,\n",
- " 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1,\n",
- " 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0,\n",
- " 1, 0, 1, 0, 1, 1, 0, 1])\n",
- "output = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0]\n",
- "label = tensor([0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1,\n",
- " 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0,\n",
- " 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0,\n",
- " 0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0,\n",
- " 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1,\n",
- " 1, 1, 0, 0, 1, 0, 1, 1])\n",
- "output = [1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0]\n",
- "label = tensor([1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1,\n",
- " 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0,\n",
- " 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0,\n",
- " 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0,\n",
- " 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0,\n",
- " 1, 1, 0, 1, 0, 0, 0, 0])\n",
- "output = [0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0]\n",
- "label = tensor([0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1,\n",
- " 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1,\n",
- " 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0,\n",
- " 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0,\n",
- " 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1,\n",
- " 0, 1, 1, 1, 1, 0, 1, 1])\n",
- "output = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0]\n",
- "label = tensor([1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0,\n",
- " 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0,\n",
- " 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1,\n",
- " 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1,\n",
- " 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0,\n",
- " 1, 1, 1, 1, 1, 1, 0, 0])\n",
- "output = [1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0]\n",
- "label = tensor([1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0,\n",
- " 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1,\n",
- " 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1,\n",
- " 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0,\n",
- " 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1,\n",
- " 0, 0, 1, 1, 0, 1, 1, 0])\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "output = [1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0]\n",
- "label = tensor([0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0,\n",
- " 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1,\n",
- " 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0,\n",
- " 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0,\n",
- " 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1,\n",
- " 1, 0, 1, 1, 1, 0, 0, 0])\n",
- "output = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0]\n",
- "label = tensor([0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1,\n",
- " 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1,\n",
- " 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1,\n",
- " 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1,\n",
- " 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1,\n",
- " 0, 1, 1, 0, 0, 0, 0, 1])\n",
- "output = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0]\n",
- "label = tensor([1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0,\n",
- " 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1,\n",
- " 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0])\n",
- "\t [0.6673376950176145, 0.6722054380664653, 0.6802005012531328, 0.6826347305389222, 0.7250876314471708, 0.7, 0.6841579210394803, 0.6651606425702812]\n",
- "[0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1]\n",
- "[1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1]\n",
- "full train results:\n",
- "\t [0.59375, 0.5885416666666666, 0.6354166666666666, 0.612565445026178, 0.6387434554973822, 0.6492146596858639, 0.6335078534031413, 0.6335078534031413]\n",
- "\t [0.9949672873678913, 0.9974823766364552, 0.8666666666666667, 0.8393213572854291, 0.99949924887331, 0.638, 0.999000499750125, 0.9949799196787149]\n",
- "[1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1]\n",
- "[0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0]\n",
- "best accs results:\n",
- "\t [0.59375, 0.5625, 0.6354166666666666, 0.6282722513089005, 0.6020942408376964, 0.6387434554973822, 0.643979057591623, 0.6387434554973822]\n",
- "\t [0.8037242073477604, 0.8670694864048338, 0.4967418546365915, 0.5489021956087824, 0.9744616925388082, 0.519, 0.47226386806596704, 0.7761044176706827]\n",
- "[1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0]\n",
- "-----------------------------Fold 9---------------\n",
- "preparing dataloaders...\n",
- "torch.Size([72, 7, 9, 20])\n",
- "coef when 0 > 1 1\n",
- "creating model...\n",
- "calculating total steps...\n",
- "epoch: 1\n",
- "validation loss decreased (inf ---> 0.682980), val_acc = 0.627906976744186\n",
- "validation acc increased (0.000000 ---> 0.627907)\n",
- "validation acc increased (0.627907 ---> 0.627907)\n",
- "epoch 1: train loss = 0.6777922638376853, l1loss = 0.13775174053838676, train acc = 0.48370927318295737,\n",
- "val_loss = 0.687888598257257, val_acc = 0.627906976744186\n",
- "\n",
- "epoch: 2\n",
- "validation acc increased (0.627907 ---> 0.627907)\n",
- "epoch 2: train loss = 0.6605406909060657, l1loss = 0.13724787228537683, train acc = 0.6210526315789474,\n",
- "val_loss = 0.7003788606140965, val_acc = 0.42248062015503873\n",
- "\n",
- "epoch: 3\n",
- "epoch 3: train loss = 0.6454371491171662, l1loss = 0.13666182353831174, train acc = 0.6416040100250626,\n",
- "val_loss = 0.7020500177560851, val_acc = 0.4844961240310077\n",
- "\n",
- "epoch: 4\n",
- "epoch 4: train loss = 0.6296094764444164, l1loss = 0.13591576377700146, train acc = 0.6571428571428571,\n",
- "val_loss = 0.6741203370020371, val_acc = 0.5271317829457365\n",
- "\n",
- "epoch: 5\n",
- "validation loss decreased (0.682980 ---> 0.672345), val_acc = 0.5271317829457365\n",
- "validation loss decreased (0.672345 ---> 0.651531), val_acc = 0.5271317829457365\n",
- "epoch 5: train loss = 0.6174353536507838, l1loss = 0.13493877780019192, train acc = 0.6571428571428571,\n",
- "val_loss = 0.64598062149314, val_acc = 0.5310077519379846\n",
- "\n",
- "epoch: 6\n",
- "validation loss decreased (0.651531 ---> 0.644699), val_acc = 0.5310077519379846\n",
- "validation loss decreased (0.644699 ---> 0.633400), val_acc = 0.5310077519379846\n",
- "epoch 6: train loss = 0.6050603376295334, l1loss = 0.13365735947189475, train acc = 0.6621553884711779,\n",
- "val_loss = 0.6314658402472504, val_acc = 0.5348837209302325\n",
- "\n",
- "epoch: 7\n",
- "validation loss decreased (0.633400 ---> 0.632519), val_acc = 0.5348837209302325\n",
- "validation loss decreased (0.632519 ---> 0.631722), val_acc = 0.5348837209302325\n",
- "epoch 7: train loss = 0.5956596527183265, l1loss = 0.13198910928459692, train acc = 0.6656641604010025,\n",
- "val_loss = 0.6331861458083455, val_acc = 0.5348837209302325\n",
- "\n",
- "epoch: 8\n",
- "epoch 8: train loss = 0.5859128608141926, l1loss = 0.1298997061070344, train acc = 0.6716791979949874,\n",
- "val_loss = 0.6339161365993263, val_acc = 0.5310077519379846\n",
- "\n",
- "epoch: 9\n",
- "epoch 9: train loss = 0.5775983289967205, l1loss = 0.12739512194368177, train acc = 0.6796992481203008,\n",
- "val_loss = 0.6418065218500388, val_acc = 0.5348837209302325\n",
- "\n",
- "epoch: 10\n",
- "epoch 10: train loss = 0.5680718240283784, l1loss = 0.12452510148286819, train acc = 0.6907268170426065,\n",
- "val_loss = 0.6507704207139422, val_acc = 0.5348837209302325\n",
- "\n",
- "epoch: 11\n",
- "epoch 11: train loss = 0.5571945123803944, l1loss = 0.1213724056954372, train acc = 0.6962406015037594,\n",
- "val_loss = 0.6841371244238329, val_acc = 0.5310077519379846\n",
- "\n",
- "epoch: 12\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "epoch 12: train loss = 0.5443704637369715, l1loss = 0.11801029294729233, train acc = 0.7072681704260652,\n",
- "val_loss = 0.7370998619138732, val_acc = 0.5232558139534884\n",
- "\n",
- "epoch: 13\n",
- "epoch 13: train loss = 0.5282434655012642, l1loss = 0.11461027522657748, train acc = 0.7263157894736842,\n",
- "val_loss = 0.7001163677651753, val_acc = 0.5426356589147286\n",
- "\n",
- "epoch: 14\n",
- "epoch 14: train loss = 0.5089050915008201, l1loss = 0.11128778580884288, train acc = 0.7413533834586467,\n",
- "val_loss = 0.7310682345730389, val_acc = 0.5348837209302325\n",
- "\n",
- "epoch: 15\n",
- "epoch 15: train loss = 0.4865566873311399, l1loss = 0.10820232349305524, train acc = 0.7533834586466165,\n",
- "val_loss = 0.9019363850586174, val_acc = 0.5310077519379846\n",
- "\n",
- "epoch: 16\n",
- "epoch 16: train loss = 0.45884659435217245, l1loss = 0.10554263851322924, train acc = 0.7789473684210526,\n",
- "val_loss = 0.9704440212989038, val_acc = 0.5232558139534884\n",
- "\n",
- "epoch: 17\n",
- "epoch 17: train loss = 0.4202355332541884, l1loss = 0.10339735007674473, train acc = 0.8070175438596491,\n",
- "val_loss = 0.9909111167802367, val_acc = 0.6124031007751938\n",
- "\n",
- "epoch: 18\n",
- "epoch 18: train loss = 0.39302192711292355, l1loss = 0.1019542952826746, train acc = 0.8120300751879699,\n",
- "val_loss = 1.3908450917680135, val_acc = 0.5232558139534884\n",
- "\n",
- "epoch: 19\n",
- "epoch 19: train loss = 0.3705816494641746, l1loss = 0.10042775794303507, train acc = 0.8165413533834587,\n",
- "val_loss = 0.8365242425785508, val_acc = 0.5697674418604651\n",
- "\n",
- "epoch: 20\n",
- "epoch 20: train loss = 0.35236290118150543, l1loss = 0.0989852717533745, train acc = 0.8370927318295739,\n",
- "val_loss = 1.175207398658575, val_acc = 0.5271317829457365\n",
- "\n",
- "epoch: 21\n",
- "epoch 21: train loss = 0.3512345191828888, l1loss = 0.09775472859242805, train acc = 0.8335839598997494,\n",
- "val_loss = 1.720713703958101, val_acc = 0.5116279069767442\n",
- "\n",
- "epoch: 22\n",
- "epoch 22: train loss = 0.32802224415436126, l1loss = 0.09652294553162759, train acc = 0.8526315789473684,\n",
- "val_loss = 1.0351900666259055, val_acc = 0.5387596899224806\n",
- "\n",
- "epoch: 23\n",
- "epoch 23: train loss = 0.2810892618987196, l1loss = 0.09586043051236255, train acc = 0.8847117794486216,\n",
- "val_loss = 1.7488028725912406, val_acc = 0.5193798449612403\n",
- "\n",
- "epoch: 24\n",
- "epoch 24: train loss = 0.29594317181666097, l1loss = 0.09554791460957443, train acc = 0.8736842105263158,\n",
- "val_loss = 1.383325032485548, val_acc = 0.5387596899224806\n",
- "\n",
- "epoch: 25\n",
- "epoch 25: train loss = 0.2908367050545556, l1loss = 0.09524397529605635, train acc = 0.8741854636591478,\n",
- "val_loss = 1.0075195944586466, val_acc = 0.6085271317829457\n",
- "\n",
- "epoch: 26\n",
- "epoch 26: train loss = 0.23876858004053733, l1loss = 0.09456468856050855, train acc = 0.9042606516290727,\n",
- "val_loss = 3.7633727769731307, val_acc = 0.627906976744186\n",
- "\n",
- "epoch: 27\n",
- "validation acc increased (0.627907 ---> 0.631783)\n",
- "epoch 27: train loss = 0.24321200991035405, l1loss = 0.09383204626409632, train acc = 0.8897243107769424,\n",
- "val_loss = 2.632583130237668, val_acc = 0.5155038759689923\n",
- "\n",
- "epoch: 28\n",
- "epoch 28: train loss = 0.19356993710188042, l1loss = 0.0934341524291158, train acc = 0.9238095238095239,\n",
- "val_loss = 1.41927497885948, val_acc = 0.5348837209302325\n",
- "\n",
- "epoch: 29\n",
- "epoch 29: train loss = 0.19538922365147965, l1loss = 0.09319527589512946, train acc = 0.9167919799498747,\n",
- "val_loss = 3.835814361424409, val_acc = 0.624031007751938\n",
- "\n",
- "epoch: 30\n",
- "epoch 30: train loss = 0.17916062683389897, l1loss = 0.09250437699837194, train acc = 0.9303258145363409,\n",
- "val_loss = 1.621931373026193, val_acc = 0.6085271317829457\n",
- "\n",
- "epoch: 31\n",
- "epoch 31: train loss = 0.21077537299098825, l1loss = 0.09211045807614959, train acc = 0.9087719298245615,\n",
- "val_loss = 1.3185462252807247, val_acc = 0.5930232558139535\n",
- "\n",
- "epoch: 32\n",
- "epoch 32: train loss = 0.23887639734380525, l1loss = 0.09190204342579782, train acc = 0.8952380952380953,\n",
- "val_loss = 1.9735888972762943, val_acc = 0.5232558139534884\n",
- "\n",
- "epoch: 33\n",
- "epoch 33: train loss = 0.14902917222122203, l1loss = 0.09144701687315651, train acc = 0.9478696741854636,\n",
- "val_loss = 1.734970021617505, val_acc = 0.5813953488372093\n",
- "\n",
- "epoch: 34\n",
- "epoch 34: train loss = 0.1352653970246327, l1loss = 0.09123586411389492, train acc = 0.9609022556390977,\n",
- "val_loss = 1.6061633298563402, val_acc = 0.5348837209302325\n",
- "\n",
- "epoch: 35\n",
- "epoch 35: train loss = 0.12018011457713923, l1loss = 0.0913496767742592, train acc = 0.9654135338345865,\n",
- "val_loss = 1.4085034085798633, val_acc = 0.5852713178294574\n",
- "\n",
- "epoch: 36\n",
- "epoch 36: train loss = 0.12157767949845259, l1loss = 0.09087181371405609, train acc = 0.9568922305764411,\n",
- "val_loss = 12.18474845738374, val_acc = 0.627906976744186\n",
- "\n",
- "epoch: 37\n",
- "epoch 37: train loss = 0.11420494081980005, l1loss = 0.09056822333419533, train acc = 0.9588972431077695,\n",
- "val_loss = 7.133128994195036, val_acc = 0.627906976744186\n",
- "\n",
- "epoch: 38\n",
- "epoch 38: train loss = 0.08377009170844142, l1loss = 0.08998052694444968, train acc = 0.9769423558897243,\n",
- "val_loss = 1.5790407207123405, val_acc = 0.5348837209302325\n",
- "\n",
- "epoch: 39\n",
- "epoch 39: train loss = 0.07196493960711592, l1loss = 0.08941901628310818, train acc = 0.9824561403508771,\n",
- "val_loss = 1.7764077741046285, val_acc = 0.5426356589147286\n",
- "\n",
- "epoch: 40\n",
- "epoch 40: train loss = 0.06884303719813663, l1loss = 0.08907348288778673, train acc = 0.981453634085213,\n",
- "val_loss = 2.0529281263665635, val_acc = 0.5658914728682171\n",
- "\n",
- "epoch: 41\n",
- "epoch 41: train loss = 0.08752110526152422, l1loss = 0.08877328430351458, train acc = 0.9669172932330827,\n",
- "val_loss = 3.8814146389332853, val_acc = 0.624031007751938\n",
- "\n",
- "epoch: 42\n",
- "epoch 42: train loss = 0.0534191582846761, l1loss = 0.08847359084619914, train acc = 0.9899749373433584,\n",
- "val_loss = 1.7713334769472595, val_acc = 0.5581395348837209\n",
- "\n",
- "epoch: 43\n",
- "epoch 43: train loss = 0.053076960512420585, l1loss = 0.08797531594011121, train acc = 0.9854636591478697,\n",
- "val_loss = 3.003138168837673, val_acc = 0.5310077519379846\n",
- "\n",
- "epoch: 44\n",
- "epoch 44: train loss = 0.03749572625173662, l1loss = 0.08741736609610101, train acc = 0.9954887218045113,\n",
- "val_loss = 2.302085431047188, val_acc = 0.5658914728682171\n",
- "\n",
- "epoch: 45\n",
- "epoch 45: train loss = 0.03470267659067211, l1loss = 0.086931109906438, train acc = 0.9944862155388471,\n",
- "val_loss = 3.2026045303936153, val_acc = 0.5465116279069767\n",
- "\n",
- "epoch: 46\n",
- "epoch 46: train loss = 0.03238022409398155, l1loss = 0.08665003874397517, train acc = 0.9949874686716792,\n",
- "val_loss = 2.3425228115200074, val_acc = 0.5697674418604651\n",
- "\n",
- "epoch: 47\n",
- "epoch 47: train loss = 0.026900880723108624, l1loss = 0.08609663713396641, train acc = 0.9979949874686717,\n",
- "val_loss = 2.393414729440859, val_acc = 0.5387596899224806\n",
- "\n",
- "epoch: 48\n",
- "epoch 48: train loss = 0.023233257417093242, l1loss = 0.08549760701064776, train acc = 0.9984962406015038,\n",
- "val_loss = 2.7446321505446765, val_acc = 0.5348837209302325\n",
- "\n",
- "epoch: 49\n",
- "epoch 49: train loss = 0.024097669448582153, l1loss = 0.08505817094168866, train acc = 0.9974937343358395,\n",
- "val_loss = 2.4446980241657226, val_acc = 0.5387596899224806\n",
- "\n",
- "epoch: 50\n",
- "epoch 50: train loss = 0.01827189443740331, l1loss = 0.08443690010330133, train acc = 0.9989974937343359,\n",
- "val_loss = 2.4035656581553377, val_acc = 0.5232558139534884\n",
- "\n",
- "epoch: 51\n",
- "epoch 51: train loss = 0.016350781231334335, l1loss = 0.08388166891452961, train acc = 1.0,\n",
- "val_loss = 2.443206698395485, val_acc = 0.5155038759689923\n",
- "\n",
- "!!! overfitted !!!\n",
- "[1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1]\n",
- "[1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1]\n",
- "early stoping results:\n",
- "\t [0.6041666666666666, 0.5572916666666666, 0.5208333333333334, 0.5078534031413613, 0.5445026178010471, 0.6073298429319371, 0.5602094240837696, 0.5340314136125655, 0.5654450261780105]\n",
- "output = [0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
- "label = tensor([0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0,\n",
- " 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0,\n",
- " 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1,\n",
- " 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0,\n",
- " 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0,\n",
- " 0, 1, 1, 0, 0, 1, 1, 0])\n",
- "output = [1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0]\n",
- "label = tensor([1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0,\n",
- " 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1,\n",
- " 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1,\n",
- " 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0,\n",
- " 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1,\n",
- " 1, 0, 0, 0, 1, 1, 1, 1])\n",
- "output = [1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
- "label = tensor([1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0,\n",
- " 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1,\n",
- " 0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1,\n",
- " 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0,\n",
- " 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0,\n",
- " 0, 1, 1, 1, 1, 1, 1, 1])\n",
- "output = [1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0]\n",
- "label = tensor([1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0,\n",
- " 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0,\n",
- " 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0,\n",
- " 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0,\n",
- " 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1,\n",
- " 0, 0, 0, 0, 0, 0, 1, 0])\n",
- "output = [1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
- "label = tensor([1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1,\n",
- " 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1,\n",
- " 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0,\n",
- " 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1,\n",
- " 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0,\n",
- " 1, 1, 1, 1, 1, 1, 0, 1])\n",
- "output = [1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0]\n",
- "label = tensor([1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0,\n",
- " 1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0,\n",
- " 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0,\n",
- " 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1,\n",
- " 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0,\n",
- " 1, 0, 1, 1, 0, 0, 1, 0])\n",
- "output = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
- "label = tensor([1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1,\n",
- " 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0,\n",
- " 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1,\n",
- " 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0,\n",
- " 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1,\n",
- " 0, 1, 1, 1, 1, 1, 0, 1])\n",
- "output = [1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
- "label = tensor([0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0,\n",
- " 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1,\n",
- " 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0,\n",
- " 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1,\n",
- " 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0,\n",
- " 0, 0, 1, 1, 1, 1, 0, 1])\n",
- "output = [0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0]\n",
- "label = tensor([0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1,\n",
- " 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0,\n",
- " 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1,\n",
- " 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0,\n",
- " 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1,\n",
- " 0, 1, 0, 1, 1, 0, 0, 1])\n",
- "output = [0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0]\n",
- "label = tensor([0, 0, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0,\n",
- " 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0,\n",
- " 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0,\n",
- " 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0,\n",
- " 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1,\n",
- " 1, 0, 1, 0, 0, 1, 1, 1])\n",
- "output = [1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
- "label = tensor([0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1,\n",
- " 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0,\n",
- " 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0,\n",
- " 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1,\n",
- " 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 0,\n",
- " 1, 1, 0, 0, 1, 0, 0, 1])\n",
- "output = [1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
- "label = tensor([1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1,\n",
- " 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0,\n",
- " 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1,\n",
- " 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1,\n",
- " 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0,\n",
- " 1, 0, 0, 1, 0, 1, 1, 0])\n",
- "output = [1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0]\n",
- "label = tensor([0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1,\n",
- " 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0,\n",
- " 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n",
- " 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0,\n",
- " 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1,\n",
- " 1, 0, 1, 1, 0, 0, 0, 1])\n",
- "output = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
- "label = tensor([1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0,\n",
- " 1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0,\n",
- " 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1,\n",
- " 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1,\n",
- " 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0,\n",
- " 0, 0, 0, 0, 0, 1, 0, 1])\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "output = [1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0]\n",
- "label = tensor([1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0,\n",
- " 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1,\n",
- " 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0,\n",
- " 0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1,\n",
- " 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1,\n",
- " 1, 0, 0, 1, 0, 1, 1, 0])\n",
- "output = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0]\n",
- "label = tensor([1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0,\n",
- " 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0,\n",
- " 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1,\n",
- " 1, 1, 1])\n",
- "\t [0.6673376950176145, 0.6722054380664653, 0.6802005012531328, 0.6826347305389222, 0.7250876314471708, 0.7, 0.6841579210394803, 0.6651606425702812, 0.668671679197995]\n",
- "[1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1]\n",
- "[1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1]\n",
- "full train results:\n",
- "\t [0.59375, 0.5885416666666666, 0.6354166666666666, 0.612565445026178, 0.6387434554973822, 0.6492146596858639, 0.6335078534031413, 0.6335078534031413, 0.612565445026178]\n",
- "\t [0.9949672873678913, 0.9974823766364552, 0.8666666666666667, 0.8393213572854291, 0.99949924887331, 0.638, 0.999000499750125, 0.9949799196787149, 0.9508771929824561]\n",
- "[1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0]\n",
- "[1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 1]\n",
- "best accs results:\n",
- "\t [0.59375, 0.5625, 0.6354166666666666, 0.6282722513089005, 0.6020942408376964, 0.6387434554973822, 0.643979057591623, 0.6387434554973822, 0.643979057591623]\n",
- "\t [0.8037242073477604, 0.8670694864048338, 0.4967418546365915, 0.5489021956087824, 0.9744616925388082, 0.519, 0.47226386806596704, 0.7761044176706827, 0.49473684210526314]\n",
- "[1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0]\n",
- "-----------------------------Fold 10---------------\n",
- "preparing dataloaders...\n",
- "torch.Size([75, 7, 9, 20])\n",
- "coef when 0 > 1 1\n",
- "creating model...\n",
- "calculating total steps...\n",
- "epoch: 1\n",
- "validation loss decreased (inf ---> 0.695332), val_acc = 0.39147286821705424\n",
- "validation acc increased (0.000000 ---> 0.391473)\n",
- "validation acc increased (0.391473 ---> 0.391473)\n",
- "epoch 1: train loss = 0.6719437708207711, l1loss = 0.13840212986696904, train acc = 0.6165829145728643,\n",
- "val_loss = 0.7145830023196317, val_acc = 0.39147286821705424\n",
- "\n",
- "epoch: 2\n",
- "validation acc increased (0.391473 ---> 0.391473)\n",
- "validation acc increased (0.391473 ---> 0.391473)\n",
- "epoch 2: train loss = 0.6526712950150572, l1loss = 0.13800431019696757, train acc = 0.6326633165829145,\n",
- "val_loss = 0.7263189609660659, val_acc = 0.39147286821705424\n",
- "\n",
- "epoch: 3\n",
- "validation acc increased (0.391473 ---> 0.391473)\n",
- "validation acc increased (0.391473 ---> 0.406977)\n",
- "epoch 3: train loss = 0.6384344094362691, l1loss = 0.13752041265892623, train acc = 0.6432160804020101,\n",
- "val_loss = 0.719265153241712, val_acc = 0.45348837209302323\n",
- "\n",
- "epoch: 4\n",
- "validation acc increased (0.406977 ---> 0.457364)\n",
- "validation loss decreased (0.695332 ---> 0.691640), val_acc = 0.5310077519379846\n",
- "validation acc increased (0.457364 ---> 0.531008)\n",
- "epoch 4: train loss = 0.6233027500123834, l1loss = 0.13689041630407076, train acc = 0.6472361809045226,\n",
- "val_loss = 0.674999581981999, val_acc = 0.5465116279069767\n",
- "\n",
- "epoch: 5\n",
- "validation loss decreased (0.691640 ---> 0.671344), val_acc = 0.5465116279069767\n",
- "validation acc increased (0.531008 ---> 0.546512)\n",
- "validation loss decreased (0.671344 ---> 0.640276), val_acc = 0.5542635658914729\n",
- "validation acc increased (0.546512 ---> 0.554264)\n",
- "epoch 5: train loss = 0.6108603898604311, l1loss = 0.1360374202231067, train acc = 0.650251256281407,\n",
- "val_loss = 0.6320977502090986, val_acc = 0.5581395348837209\n",
- "\n",
- "epoch: 6\n",
- "validation loss decreased (0.640276 ---> 0.630428), val_acc = 0.5581395348837209\n",
- "validation acc increased (0.554264 ---> 0.558140)\n",
- "validation loss decreased (0.630428 ---> 0.619005), val_acc = 0.5658914728682171\n",
- "validation acc increased (0.558140 ---> 0.565891)\n",
- "epoch 6: train loss = 0.6001691482773978, l1loss = 0.13489290275166382, train acc = 0.657788944723618,\n",
- "val_loss = 0.6185085505478142, val_acc = 0.5658914728682171\n",
- "\n",
- "epoch: 7\n",
- "validation loss decreased (0.619005 ---> 0.617354), val_acc = 0.5658914728682171\n",
- "validation acc increased (0.565891 ---> 0.565891)\n",
- "validation loss decreased (0.617354 ---> 0.615356), val_acc = 0.5658914728682171\n",
- "validation acc increased (0.565891 ---> 0.565891)\n",
- "epoch 7: train loss = 0.5907169395355722, l1loss = 0.13343299792040533, train acc = 0.6693467336683417,\n",
- "val_loss = 0.6119359452133031, val_acc = 0.5736434108527132\n",
- "\n",
- "epoch: 8\n",
- "validation loss decreased (0.615356 ---> 0.612685), val_acc = 0.5736434108527132\n",
- "validation acc increased (0.565891 ---> 0.573643)\n",
- "validation loss decreased (0.612685 ---> 0.608328), val_acc = 0.5736434108527132\n",
- "validation acc increased (0.573643 ---> 0.573643)\n",
- "epoch 8: train loss = 0.5808460336234701, l1loss = 0.13162351006539025, train acc = 0.6733668341708543,\n",
- "val_loss = 0.6133937794108724, val_acc = 0.5736434108527132\n",
- "\n",
- "epoch: 9\n",
- "validation acc increased (0.573643 ---> 0.573643)\n",
- "validation acc increased (0.573643 ---> 0.577519)\n",
- "epoch 9: train loss = 0.5721211679017724, l1loss = 0.12944562336308274, train acc = 0.6798994974874372,\n",
- "val_loss = 0.6197530188301738, val_acc = 0.5775193798449613\n",
- "\n",
- "epoch: 10\n",
- "validation acc increased (0.577519 ---> 0.577519)\n",
- "validation acc increased (0.577519 ---> 0.581395)\n",
- "epoch 10: train loss = 0.5629525616540382, l1loss = 0.12692890809708504, train acc = 0.6889447236180904,\n",
- "val_loss = 0.6338218380083409, val_acc = 0.5775193798449613\n",
- "\n",
- "epoch: 11\n",
- "epoch 11: train loss = 0.5500448384476666, l1loss = 0.12408753369920816, train acc = 0.699497487437186,\n",
- "val_loss = 0.6388333800227143, val_acc = 0.5736434108527132\n",
- "\n",
- "epoch: 12\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "epoch 12: train loss = 0.5367419248250261, l1loss = 0.12101081039887576, train acc = 0.7075376884422111,\n",
- "val_loss = 0.6473393957744273, val_acc = 0.5736434108527132\n",
- "\n",
- "epoch: 13\n",
- "epoch 13: train loss = 0.5212755465627316, l1loss = 0.11783038612136888, train acc = 0.7266331658291457,\n",
- "val_loss = 0.6885562600322472, val_acc = 0.5658914728682171\n",
- "\n",
- "epoch: 14\n",
- "epoch 14: train loss = 0.5001930103230117, l1loss = 0.11466055483973805, train acc = 0.735678391959799,\n",
- "val_loss = 0.7727257928182912, val_acc = 0.5736434108527132\n",
- "\n",
- "epoch: 15\n",
- "epoch 15: train loss = 0.4774602065134288, l1loss = 0.11163033707507292, train acc = 0.7597989949748744,\n",
- "val_loss = 0.9347423278084097, val_acc = 0.5697674418604651\n",
- "\n",
- "epoch: 16\n",
- "validation acc increased (0.581395 ---> 0.596899)\n",
- "epoch 16: train loss = 0.45671038232257016, l1loss = 0.10889189650664977, train acc = 0.7688442211055276,\n",
- "val_loss = 0.6646126000456107, val_acc = 0.5658914728682171\n",
- "\n",
- "epoch: 17\n",
- "epoch 17: train loss = 0.4233617796969773, l1loss = 0.10659007389641287, train acc = 0.7919597989949749,\n",
- "val_loss = 0.7088340003361073, val_acc = 0.5542635658914729\n",
- "\n",
- "epoch: 18\n",
- "epoch 18: train loss = 0.39575232902363916, l1loss = 0.10463322919966588, train acc = 0.807035175879397,\n",
- "val_loss = 0.7800197698349176, val_acc = 0.5775193798449613\n",
- "\n",
- "epoch: 19\n",
- "epoch 19: train loss = 0.3720384899695315, l1loss = 0.10311959861361202, train acc = 0.8321608040201005,\n",
- "val_loss = 1.0530221295911213, val_acc = 0.5852713178294574\n",
- "\n",
- "epoch: 20\n",
- "epoch 20: train loss = 0.35293013776966076, l1loss = 0.10156447164077854, train acc = 0.8321608040201005,\n",
- "val_loss = 0.8265702262405277, val_acc = 0.5503875968992248\n",
- "\n",
- "epoch: 21\n",
- "epoch 21: train loss = 0.3344811164853561, l1loss = 0.10016776281385566, train acc = 0.8361809045226131,\n",
- "val_loss = 1.0450448842011681, val_acc = 0.5775193798449613\n",
- "\n",
- "epoch: 22\n",
- "epoch 22: train loss = 0.2859235891145677, l1loss = 0.09900103884575955, train acc = 0.8763819095477386,\n",
- "val_loss = 1.4950751149377157, val_acc = 0.5736434108527132\n",
- "\n",
- "epoch: 23\n",
- "epoch 23: train loss = 0.27330368340913974, l1loss = 0.09814592776586063, train acc = 0.8748743718592965,\n",
- "val_loss = 1.6195507289827331, val_acc = 0.6085271317829457\n",
- "\n",
- "epoch: 24\n",
- "validation acc increased (0.596899 ---> 0.608527)\n",
- "epoch 24: train loss = 0.2554657350233452, l1loss = 0.09719441908238521, train acc = 0.8904522613065327,\n",
- "val_loss = 1.7599091363507648, val_acc = 0.5697674418604651\n",
- "\n",
- "epoch: 25\n",
- "validation acc increased (0.608527 ---> 0.612403)\n",
- "epoch 25: train loss = 0.26244540217533785, l1loss = 0.09658051813068103, train acc = 0.8854271356783919,\n",
- "val_loss = 1.328415922416273, val_acc = 0.5852713178294574\n",
- "\n",
- "epoch: 26\n",
- "epoch 26: train loss = 0.2488035890025709, l1loss = 0.09598230193907292, train acc = 0.9025125628140703,\n",
- "val_loss = 3.3820917366086976, val_acc = 0.5581395348837209\n",
- "\n",
- "epoch: 27\n",
- "epoch 27: train loss = 0.24395602191213386, l1loss = 0.09520171582698822, train acc = 0.8814070351758794,\n",
- "val_loss = 3.163267457207968, val_acc = 0.5581395348837209\n",
- "\n",
- "epoch: 28\n",
- "epoch 28: train loss = 0.2519955996891961, l1loss = 0.09456676208793219, train acc = 0.8798994974874372,\n",
- "val_loss = 2.3792159464932228, val_acc = 0.5581395348837209\n",
- "\n",
- "epoch: 29\n",
- "epoch 29: train loss = 0.20381304789428137, l1loss = 0.09389582407384661, train acc = 0.9130653266331659,\n",
- "val_loss = 1.6001185594603073, val_acc = 0.5581395348837209\n",
- "\n",
- "epoch: 30\n",
- "epoch 30: train loss = 0.17334565265693858, l1loss = 0.09341333552670839, train acc = 0.9341708542713568,\n",
- "val_loss = 1.851118143214736, val_acc = 0.5930232558139535\n",
- "\n",
- "epoch: 31\n",
- "validation acc increased (0.612403 ---> 0.620155)\n",
- "epoch 31: train loss = 0.14162640643479238, l1loss = 0.09275954460828148, train acc = 0.9552763819095478,\n",
- "val_loss = 2.0247100628623667, val_acc = 0.5697674418604651\n",
- "\n",
- "epoch: 32\n",
- "epoch 32: train loss = 0.15501669086703104, l1loss = 0.09234665035452674, train acc = 0.9371859296482412,\n",
- "val_loss = 2.2792202677837636, val_acc = 0.5852713178294574\n",
- "\n",
- "epoch: 33\n",
- "epoch 33: train loss = 0.12440619978248774, l1loss = 0.09191003219416394, train acc = 0.9582914572864322,\n",
- "val_loss = 2.0286328884982323, val_acc = 0.5968992248062015\n",
- "\n",
- "epoch: 34\n",
- "epoch 34: train loss = 0.12274269695258021, l1loss = 0.09157498074536348, train acc = 0.9547738693467337,\n",
- "val_loss = 2.5109643936157227, val_acc = 0.562015503875969\n",
- "\n",
- "epoch: 35\n",
- "epoch 35: train loss = 0.09087972700895376, l1loss = 0.09097352933943571, train acc = 0.9753768844221106,\n",
- "val_loss = 2.107085255227348, val_acc = 0.5697674418604651\n",
- "\n",
- "epoch: 36\n",
- "epoch 36: train loss = 0.09877167490109727, l1loss = 0.09090764223780465, train acc = 0.9693467336683417,\n",
- "val_loss = 2.899521066236866, val_acc = 0.5775193798449613\n",
- "\n",
- "epoch: 37\n",
- "epoch 37: train loss = 0.12718479042526465, l1loss = 0.09116000884292114, train acc = 0.9567839195979899,\n",
- "val_loss = 2.9109171711850563, val_acc = 0.5697674418604651\n",
- "\n",
- "epoch: 38\n",
- "epoch 38: train loss = 0.15343157828752718, l1loss = 0.09161678471008138, train acc = 0.9492462311557789,\n",
- "val_loss = 3.6789348217868065, val_acc = 0.5736434108527132\n",
- "\n",
- "epoch: 39\n",
- "epoch 39: train loss = 0.17688065675934356, l1loss = 0.09161391867015829, train acc = 0.9256281407035176,\n",
- "val_loss = 2.8331502167753473, val_acc = 0.562015503875969\n",
- "\n",
- "epoch: 40\n",
- "epoch 40: train loss = 0.12294016767985856, l1loss = 0.09109236833887484, train acc = 0.957286432160804,\n",
- "val_loss = 3.345622025718985, val_acc = 0.562015503875969\n",
- "\n",
- "epoch: 41\n",
- "epoch 41: train loss = 0.09555761652750586, l1loss = 0.0903443555211901, train acc = 0.9708542713567839,\n",
- "val_loss = 2.0581300794616224, val_acc = 0.6046511627906976\n",
- "\n",
- "epoch: 42\n",
- "epoch 42: train loss = 0.07512064163559046, l1loss = 0.08976309295724984, train acc = 0.9748743718592965,\n",
- "val_loss = 3.453665764756905, val_acc = 0.5852713178294574\n",
- "\n",
- "epoch: 43\n",
- "epoch 43: train loss = 0.04777320917677041, l1loss = 0.0891012175897857, train acc = 0.9909547738693467,\n",
- "val_loss = 2.352656833885252, val_acc = 0.5930232558139535\n",
- "\n",
- "epoch: 44\n",
- "epoch 44: train loss = 0.049835165397231304, l1loss = 0.08854852927539816, train acc = 0.9889447236180905,\n",
- "val_loss = 2.9295913517013075, val_acc = 0.5658914728682171\n",
- "\n",
- "epoch: 45\n",
- "epoch 45: train loss = 0.04242832651689424, l1loss = 0.08794202994761155, train acc = 0.9919597989949749,\n",
- "val_loss = 2.413541268932727, val_acc = 0.5736434108527132\n",
- "\n",
- "epoch: 46\n",
- "epoch 46: train loss = 0.03263931775549848, l1loss = 0.08736478100769483, train acc = 0.9939698492462311,\n",
- "val_loss = 2.5269698264048426, val_acc = 0.5813953488372093\n",
- "\n",
- "epoch: 47\n",
- "epoch 47: train loss = 0.026620606537065914, l1loss = 0.08684112468556543, train acc = 0.9969849246231156,\n",
- "val_loss = 3.387900699940763, val_acc = 0.5813953488372093\n",
- "\n",
- "epoch: 48\n",
- "epoch 48: train loss = 0.02269124810457529, l1loss = 0.08632632712622983, train acc = 0.9979899497487437,\n",
- "val_loss = 2.787818894451737, val_acc = 0.5736434108527132\n",
- "\n",
- "epoch: 49\n",
- "epoch 49: train loss = 0.022012453552466543, l1loss = 0.08572817495719871, train acc = 0.9964824120603015,\n",
- "val_loss = 2.598147943962452, val_acc = 0.5891472868217055\n",
- "\n",
- "epoch: 50\n",
- "epoch 50: train loss = 0.02431201655885682, l1loss = 0.0853168428168824, train acc = 0.9979899497487437,\n",
- "val_loss = 2.618208586574034, val_acc = 0.5852713178294574\n",
- "\n",
- "epoch: 51\n",
- "epoch 51: train loss = 0.042026931474256755, l1loss = 0.08488889236995323, train acc = 0.9869346733668342,\n",
- "val_loss = 5.366614888804827, val_acc = 0.5465116279069767\n",
- "\n",
- "epoch: 52\n",
- "epoch 52: train loss = 0.05808617710767679, l1loss = 0.08540894663214084, train acc = 0.9819095477386934,\n",
- "val_loss = 2.219164011090301, val_acc = 0.6046511627906976\n",
- "\n",
- "epoch: 53\n",
- "epoch 53: train loss = 0.04891583280796981, l1loss = 0.0857906321185318, train acc = 0.9894472361809046,\n",
- "val_loss = 2.5789132524830425, val_acc = 0.5736434108527132\n",
- "\n",
- "epoch: 54\n",
- "epoch 54: train loss = 0.04095752583301846, l1loss = 0.08530921218682773, train acc = 0.9899497487437185,\n",
- "val_loss = 2.9466089703315914, val_acc = 0.562015503875969\n",
- "\n",
- "epoch: 55\n",
- "epoch 55: train loss = 0.01827716198960441, l1loss = 0.08464146351544702, train acc = 0.9994974874371859,\n",
- "val_loss = 2.840330068455186, val_acc = 0.562015503875969\n",
- "\n",
- "epoch: 56\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "epoch 56: train loss = 0.01295484382724717, l1loss = 0.08385968863514799, train acc = 1.0,\n",
- "val_loss = 3.1219512177992237, val_acc = 0.5658914728682171\n",
- "\n",
- "!!! overfitted !!!\n",
- "[0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1]\n",
- "[1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1]\n",
- "early stoping results:\n",
- "\t [0.6041666666666666, 0.5572916666666666, 0.5208333333333334, 0.5078534031413613, 0.5445026178010471, 0.6073298429319371, 0.5602094240837696, 0.5340314136125655, 0.5654450261780105, 0.5287958115183246]\n",
- "output = [1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
- "label = tensor([0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1,\n",
- " 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0,\n",
- " 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1,\n",
- " 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0,\n",
- " 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1,\n",
- " 0, 1, 1, 1, 1, 0, 0, 1])\n",
- "output = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0]\n",
- "label = tensor([0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0,\n",
- " 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0,\n",
- " 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1,\n",
- " 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1,\n",
- " 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0,\n",
- " 0, 0, 0, 0, 0, 1, 0, 1])\n",
- "output = [0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0]\n",
- "label = tensor([0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0,\n",
- " 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0,\n",
- " 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 1,\n",
- " 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0,\n",
- " 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1,\n",
- " 0, 0, 1, 0, 1, 1, 1, 0])\n",
- "output = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
- "label = tensor([0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0,\n",
- " 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1,\n",
- " 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0,\n",
- " 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1,\n",
- " 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1,\n",
- " 1, 0, 0, 0, 1, 0, 0, 1])\n",
- "output = [1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
- "label = tensor([1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1,\n",
- " 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1,\n",
- " 0, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1,\n",
- " 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0,\n",
- " 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0,\n",
- " 0, 0, 0, 0, 1, 0, 0, 1])\n",
- "output = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0]\n",
- "label = tensor([0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1,\n",
- " 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1,\n",
- " 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1,\n",
- " 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0,\n",
- " 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1,\n",
- " 1, 1, 0, 0, 1, 1, 1, 0])\n",
- "output = [1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
- "label = tensor([1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0,\n",
- " 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1,\n",
- " 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1,\n",
- " 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0,\n",
- " 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0,\n",
- " 1, 1, 0, 1, 1, 1, 1, 1])\n",
- "output = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
- "label = tensor([1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1,\n",
- " 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1,\n",
- " 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1,\n",
- " 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0,\n",
- " 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0,\n",
- " 0, 0, 1, 0, 1, 1, 0, 0])\n",
- "output = [0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
- "label = tensor([0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1,\n",
- " 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1,\n",
- " 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0,\n",
- " 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0,\n",
- " 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1,\n",
- " 0, 1, 0, 1, 0, 0, 1, 0])\n",
- "output = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0]\n",
- "label = tensor([1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0,\n",
- " 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1,\n",
- " 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1,\n",
- " 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1,\n",
- " 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1,\n",
- " 0, 0, 1, 1, 0, 1, 1, 1])\n",
- "output = [1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0]\n",
- "label = tensor([1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0,\n",
- " 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0,\n",
- " 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1,\n",
- " 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0,\n",
- " 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0,\n",
- " 1, 0, 1, 0, 0, 0, 1, 1])\n",
- "output = [1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
- "label = tensor([0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1,\n",
- " 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1,\n",
- " 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0,\n",
- " 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1,\n",
- " 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0,\n",
- " 1, 1, 0, 1, 1, 0, 1, 1])\n",
- "output = [1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
- "label = tensor([1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1,\n",
- " 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1,\n",
- " 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1,\n",
- " 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1,\n",
- " 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0,\n",
- " 0, 0, 0, 1, 1, 1, 1, 0])\n",
- "output = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0]\n",
- "label = tensor([0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0,\n",
- " 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1,\n",
- " 0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0,\n",
- " 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1,\n",
- " 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0,\n",
- " 0, 1, 0, 1, 0, 1, 0, 0])\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "output = [1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
- "label = tensor([1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1,\n",
- " 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1,\n",
- " 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1,\n",
- " 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1,\n",
- " 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0,\n",
- " 1, 0, 0, 1, 1, 0, 0, 1])\n",
- "output = [1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0]\n",
- "label = tensor([0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0,\n",
- " 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0,\n",
- " 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1])\n",
- "\t [0.6673376950176145, 0.6722054380664653, 0.6802005012531328, 0.6826347305389222, 0.7250876314471708, 0.7, 0.6841579210394803, 0.6651606425702812, 0.668671679197995, 0.6798994974874372]\n",
- "[1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1]\n",
- "[0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1]\n",
- "full train results:\n",
- "\t [0.59375, 0.5885416666666666, 0.6354166666666666, 0.612565445026178, 0.6387434554973822, 0.6492146596858639, 0.6335078534031413, 0.6335078534031413, 0.612565445026178, 0.6335078534031413]\n",
- "\t [0.9949672873678913, 0.9974823766364552, 0.8666666666666667, 0.8393213572854291, 0.99949924887331, 0.638, 0.999000499750125, 0.9949799196787149, 0.9508771929824561, 0.9798994974874372]\n",
- "[0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1]\n",
- "[0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1]\n",
- "best accs results:\n",
- "\t [0.59375, 0.5625, 0.6354166666666666, 0.6282722513089005, 0.6020942408376964, 0.6387434554973822, 0.643979057591623, 0.6387434554973822, 0.643979057591623, 0.6230366492146597]\n",
- "\t [0.8037242073477604, 0.8670694864048338, 0.4967418546365915, 0.5489021956087824, 0.9744616925388082, 0.519, 0.47226386806596704, 0.7761044176706827, 0.49473684210526314, 0.91356783919598]\n",
- "[1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0]\n"
- ]
- }
- ],
- "source": [
- "train_accs, test_accs = [], []\n",
- "train_accs_over, test_accs_over = [], []\n",
- "train_accs_acc, test_accs_acc = [], []\n",
- "\n",
- "for fold, (train_val_idx, test_idx) in enumerate(skf.split(data1, labels)):\n",
- " \n",
- " print('-----------------------------Fold {}---------------'.format(fold + 1))\n",
- "\n",
- " \n",
- " print('preparing dataloaders...')\n",
- " print(data.shape)\n",
- " train_val_data = np.stack([data1[index] for index in train_val_idx])\n",
- " train_val_label = [labels[index] for index in train_val_idx]\n",
- " test_data = np.stack([data1[index] for index in test_idx])\n",
- " test_label = [labels[index] for index in test_idx]\n",
- " \n",
- " \n",
- " Max = np.max(train_val_data, axis=(0,1,2), keepdims=True)\n",
- " Min = np.min(train_val_data, axis=(0,1,2), keepdims=True)\n",
- " train_val_data = (train_val_data-Min)/(Max-Min)\n",
- " \n",
- " Max_test = np.max(test_data, axis=(0,1,2), keepdims=True)\n",
- " Min_test = np.min(test_data, axis=(0,1,2), keepdims=True)\n",
- " test_data = (test_data-Min)/(Max-Min)\n",
- " \n",
- " \n",
- " train_val = [[train_val_data[i], train_val_label[i]] for i in range(len(train_val_data))]\n",
- " test = [[test_data[i], test_label[i]] for i in range(len(test_data))]\n",
- " \n",
- " num_train_val = len(train_val)\n",
- " indices = list(range(num_train_val))\n",
- " np.random.shuffle(indices)\n",
- " split = int(np.floor(val_size*num_train_val))\n",
- " train, val = [train_val[i] for i in indices[split:]] ,[train_val[i] for i in indices[:split]]\n",
- " \n",
- " train_labels = [data[1] for data in train]\n",
- " \n",
- " oversample = 1\n",
- " _, counts = np.unique(train_labels, return_counts=True)\n",
- " if oversample==1:\n",
- " if counts[1]>counts[0]:\n",
- " label0 = [data for data in train if data[1]==0]\n",
- " coef = int(counts[1]/counts[0])\n",
- " print('coef when 1 > 0', coef)\n",
- " for i in range(coef):\n",
- " train = train + label0\n",
- " elif counts[1]<counts[0]:\n",
- " label1 = [data for data in train if data[1]==1]\n",
- " coef = int(counts[0]/counts[1])\n",
- " print('coef when 0 > 1', coef)\n",
- " for i in range(coef):\n",
- " train = train + label1\n",
- " \n",
- "\n",
- " train_loader = torch.utils.data.DataLoader(train, batch_size=batch_size, shuffle=True)\n",
- " val_loader = torch.utils.data.DataLoader(val, batch_size=batch_size, shuffle=True)\n",
- " test_loader = torch.utils.data.DataLoader(test, batch_size=batch_size, shuffle=True)\n",
- " \n",
- " print('creating model...')\n",
- " model = cnn().float()\n",
- " optimizer = torch.optim.Adam(model.parameters(), lr=0.0001)\n",
- " criterion = nn.BCELoss()\n",
- " \n",
- " print('calculating total steps...')\n",
- " steps = 0\n",
- " for epoch in range(n_epochs):\n",
- " for data, label in train_loader:\n",
- " steps += 1\n",
- "\n",
- " scheduler = torch.optim.lr_scheduler.OneCycleLR(optimizer, total_steps=steps, max_lr=0.001)\n",
- " scheduler1 = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer)\n",
- " l1_lambda = 0.0001\n",
- " \n",
- " min_val_loss = np.inf\n",
- " max_val_acc = 0\n",
- " \n",
- " for epoch in range(n_epochs):\n",
- " print('epoch: ', epoch+1)\n",
- " train_loss = 0\n",
- " l1_loss = 0\n",
- " train_correct = 0\n",
- " model.train()\n",
- " '''for name, param in model.named_parameters():\n",
- " print(name, param.data)\n",
- " break'''\n",
- " for iteration, (data,label) in enumerate(train_loader):\n",
- " #print('\\ndata = ', torch.amax(data, axis=(0,1,2,4)), torch.amin(data, axis=(0,1,2,4)))\n",
- " optimizer.zero_grad()\n",
- " output = model(data.float())\n",
- " label = torch.reshape(label, (-1,1))\n",
- " label = label.float()\n",
- " loss = criterion(output, label)\n",
- " add_loss = loss\n",
- " ex_loss = 0\n",
- " for W in model.parameters():\n",
- " ex_loss += l1_lambda*W.norm(1)\n",
- " loss = loss + l1_lambda*W.norm(1) \n",
- " loss.backward()\n",
- " optimizer.step()\n",
- " scheduler.step()\n",
- " targets = [1 if output[i].round()==label[i] else 0 for i in range(len(label))]\n",
- " train_correct += sum(targets)\n",
- " train_loss += add_loss.item()*data.shape[0]\n",
- " l1_loss += ex_loss.item()*data.shape[0]\n",
- " \n",
- " if iteration % print_every == 0:\n",
- " is_training = True\n",
- " val_loss = 0\n",
- " val_correct = 0\n",
- " model.eval()\n",
- " for data, label in val_loader:\n",
- " output = model(data.float())\n",
- " label = torch.reshape(label, (-1,1))\n",
- " label = label.float()\n",
- " loss = criterion(output, label) \n",
- " val_loss += loss.item()*data.shape[0]\n",
- " targets = [1 if output[i].round()==label[i] else 0 for i in range(len(label))]\n",
- " val_correct += sum(targets)\n",
- " val_loss = val_loss/len(val_loader.sampler)\n",
- " val_acc = val_correct/len(val_loader.sampler)\n",
- "\n",
- " if val_loss <= min_val_loss:\n",
- " print(\"validation loss decreased ({:.6f} ---> {:.6f}), val_acc = {}\".format(min_val_loss, val_loss, val_acc))\n",
- " torch.save(model.state_dict(), 'sal/model'+str(fold)+'.pt')\n",
- " min_val_loss = val_loss\n",
- " if val_acc >= max_val_acc:\n",
- " print(\"validation acc increased ({:.6f} ---> {:.6f})\".format(max_val_acc, val_acc))\n",
- " torch.save(model.state_dict(), 'sal/model'+str(fold)+'_acc.pt')\n",
- " max_val_acc = val_acc\n",
- " torch.save(model.state_dict(), 'sal/last_model'+str(fold)+'.pt')\n",
- " model.train(mode=is_training)\n",
- " \n",
- " train_acc = train_correct/len(train_loader.sampler) \n",
- " train_loss = train_loss/len(train_loader.sampler)\n",
- " loss1 = l1_loss/len(train_loader.sampler)\n",
- " \n",
- " val_loss = 0\n",
- " val_correct = 0\n",
- " model.eval()\n",
- " for data, label in val_loader:\n",
- " output = model(data.float())\n",
- " label = torch.reshape(label, (-1,1))\n",
- " label = label.float()\n",
- " loss = criterion(output, label) \n",
- " val_loss += loss.item()*data.shape[0]\n",
- " targets = [1 if output[i].round()==label[i] else 0 for i in range(len(label))]\n",
- " val_correct += sum(targets)\n",
- " \n",
- " val_loss = val_loss/len(val_loader.sampler)\n",
- " val_acc = val_correct/len(val_loader.sampler)\n",
- " \n",
- " print('epoch {}: train loss = {}, l1loss = {}, train acc = {},\\nval_loss = {}, val_acc = {}\\n'\n",
- " .format(epoch+1, train_loss, loss1, train_acc, val_loss, val_acc))\n",
- " if int(train_acc)==1:\n",
- " print('!!! overfitted !!!')\n",
- " break\n",
- " model.train()\n",
- " #scheduler1.step(val_loss)\n",
- " \n",
- " model =cnn().float()\n",
- " model.load_state_dict(torch.load('sal/model'+str(fold)+'.pt'))\n",
- " \n",
- " n_correct = 0\n",
- " model.eval()\n",
- " for data, label in test_loader:\n",
- " output = model(data.float())\n",
- " targets = [1 if output[i].round()==label[i] else 0 for i in range(len(label))]\n",
- " print(targets)\n",
- " n_correct += sum(targets)\n",
- " \n",
- " test_accs.append(n_correct/len(test_loader.sampler))\n",
- " print('early stoping results:\\n\\t', test_accs)\n",
- " \n",
- " n_correct = 0\n",
- " model.eval()\n",
- " for data, label in train_loader:\n",
- " output = model(data.float())\n",
- " print('output = ', [output[i].round().item() for i in range(len(label))])\n",
- " print('label = ', label)\n",
- " targets = [1 if output[i].round()==label[i] else 0 for i in range(len(label))]\n",
- " n_correct += sum(targets)\n",
- " \n",
- " train_accs.append(n_correct/len(train_loader.sampler))\n",
- " print('\\t', train_accs)\n",
- " \n",
- " model = cnn().float()\n",
- " model.load_state_dict(torch.load('sal/last_model'+str(fold)+'.pt'))\n",
- " \n",
- " n_correct = 0\n",
- " model.eval()\n",
- " for data, label in test_loader:\n",
- " output = model(data.float())\n",
- " targets = [1 if output[i].round()==label[i] else 0 for i in range(len(label))]\n",
- " print(targets)\n",
- " n_correct += sum(targets)\n",
- " test_accs_over.append(n_correct/len(test_loader.sampler))\n",
- " print('full train results:\\n\\t', test_accs_over)\n",
- " \n",
- " n_correct = 0\n",
- " model.eval()\n",
- " for data, label in train_loader:\n",
- " output = model(data.float())\n",
- " targets = [1 if output[i].round()==label[i] else 0 for i in range(len(label))]\n",
- " n_correct += sum(targets)\n",
- " train_accs_over.append(n_correct/len(train_loader.sampler))\n",
- " print('\\t', train_accs_over)\n",
- " \n",
- " model = cnn().float()\n",
- " model.load_state_dict(torch.load('sal/model'+str(fold)+'_acc.pt'))\n",
- " \n",
- " n_correct = 0\n",
- " model.eval()\n",
- " for data, label in test_loader:\n",
- " output = model(data.float())\n",
- " targets = [1 if output[i].round()==label[i] else 0 for i in range(len(label))]\n",
- " print(targets)\n",
- " n_correct += sum(targets)\n",
- " test_accs_acc.append(n_correct/len(test_loader.sampler))\n",
- " print('best accs results:\\n\\t', test_accs_acc)\n",
- " \n",
- " n_correct = 0\n",
- " model.eval()\n",
- " for data, label in train_loader:\n",
- " output = model(data.float())\n",
- " targets = [1 if output[i].round()==label[i] else 0 for i in range(len(label))]\n",
- " n_correct += sum(targets)\n",
- " train_accs_acc.append(n_correct/len(train_loader.sampler))\n",
- " print('\\t', train_accs_acc)\n",
- " print(test_label)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 286,
- "id": "27ca56ee",
- "metadata": {},
- "outputs": [
- {
- "data": {
- "text/plain": [
- "0.6210514834205934"
- ]
- },
- "execution_count": 286,
- "metadata": {},
- "output_type": "execute_result"
- }
- ],
- "source": [
- "(sum(test_accs_acc))/10"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 26,
- "id": "4043da8a",
- "metadata": {},
- "outputs": [],
- "source": [
- "from sklearn.linear_model import Perceptron"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 189,
- "id": "468a56e4",
- "metadata": {},
- "outputs": [
- {
- "data": {
- "text/plain": [
- "(1913, 7, 9, 20, 11)"
- ]
- },
- "execution_count": 189,
- "metadata": {},
- "output_type": "execute_result"
- }
- ],
- "source": [
- "dataset = picture_data_train\n",
- "dataset.shape"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 220,
- "id": "d4dc1a51",
- "metadata": {},
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "(1913, 7, 9, 20) 1913\n"
- ]
- }
- ],
- "source": [
- "mean_data = np.mean(dataset, axis=(4))\n",
- "labels = vowel_label\n",
- "print(mean_data.shape, len(labels))"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 221,
- "id": "904a96d9",
- "metadata": {},
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "(1913, 5) (1913, 5)\n"
- ]
- }
- ],
- "source": [
- "data1 = mean_data[:,[2,2,3,3,2],[2,4,5,5,3],[16,2,9,3,2]]\n",
- "X = data1.reshape((-1,5))\n",
- "print(data1.shape, X.shape)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 222,
- "id": "fd76f71a",
- "metadata": {},
- "outputs": [],
- "source": [
- "X_train, X_test, y_train, y_test = train_test_split(X, labels, test_size=0.1, random_state=42)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 223,
- "id": "c9ddc846",
- "metadata": {},
- "outputs": [],
- "source": [
- "clf = Perceptron(tol=1e-3, random_state=0)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 224,
- "id": "d3b2a352",
- "metadata": {},
- "outputs": [
- {
- "data": {
- "text/html": [
- "<style>#sk-container-id-28 {color: black;background-color: white;}#sk-container-id-28 pre{padding: 0;}#sk-container-id-28 div.sk-toggleable {background-color: white;}#sk-container-id-28 label.sk-toggleable__label {cursor: pointer;display: block;width: 100%;margin-bottom: 0;padding: 0.3em;box-sizing: border-box;text-align: center;}#sk-container-id-28 label.sk-toggleable__label-arrow:before {content: \"▸\";float: left;margin-right: 0.25em;color: #696969;}#sk-container-id-28 label.sk-toggleable__label-arrow:hover:before {color: black;}#sk-container-id-28 div.sk-estimator:hover label.sk-toggleable__label-arrow:before {color: black;}#sk-container-id-28 div.sk-toggleable__content {max-height: 0;max-width: 0;overflow: hidden;text-align: left;background-color: #f0f8ff;}#sk-container-id-28 div.sk-toggleable__content pre {margin: 0.2em;color: black;border-radius: 0.25em;background-color: #f0f8ff;}#sk-container-id-28 input.sk-toggleable__control:checked~div.sk-toggleable__content {max-height: 200px;max-width: 100%;overflow: auto;}#sk-container-id-28 input.sk-toggleable__control:checked~label.sk-toggleable__label-arrow:before {content: \"▾\";}#sk-container-id-28 div.sk-estimator input.sk-toggleable__control:checked~label.sk-toggleable__label {background-color: #d4ebff;}#sk-container-id-28 div.sk-label input.sk-toggleable__control:checked~label.sk-toggleable__label {background-color: #d4ebff;}#sk-container-id-28 input.sk-hidden--visually {border: 0;clip: rect(1px 1px 1px 1px);clip: rect(1px, 1px, 1px, 1px);height: 1px;margin: -1px;overflow: hidden;padding: 0;position: absolute;width: 1px;}#sk-container-id-28 div.sk-estimator {font-family: monospace;background-color: #f0f8ff;border: 1px dotted black;border-radius: 0.25em;box-sizing: border-box;margin-bottom: 0.5em;}#sk-container-id-28 div.sk-estimator:hover {background-color: #d4ebff;}#sk-container-id-28 div.sk-parallel-item::after {content: \"\";width: 100%;border-bottom: 1px solid gray;flex-grow: 1;}#sk-container-id-28 div.sk-label:hover label.sk-toggleable__label {background-color: #d4ebff;}#sk-container-id-28 div.sk-serial::before {content: \"\";position: absolute;border-left: 1px solid gray;box-sizing: border-box;top: 0;bottom: 0;left: 50%;z-index: 0;}#sk-container-id-28 div.sk-serial {display: flex;flex-direction: column;align-items: center;background-color: white;padding-right: 0.2em;padding-left: 0.2em;position: relative;}#sk-container-id-28 div.sk-item {position: relative;z-index: 1;}#sk-container-id-28 div.sk-parallel {display: flex;align-items: stretch;justify-content: center;background-color: white;position: relative;}#sk-container-id-28 div.sk-item::before, #sk-container-id-28 div.sk-parallel-item::before {content: \"\";position: absolute;border-left: 1px solid gray;box-sizing: border-box;top: 0;bottom: 0;left: 50%;z-index: -1;}#sk-container-id-28 div.sk-parallel-item {display: flex;flex-direction: column;z-index: 1;position: relative;background-color: white;}#sk-container-id-28 div.sk-parallel-item:first-child::after {align-self: flex-end;width: 50%;}#sk-container-id-28 div.sk-parallel-item:last-child::after {align-self: flex-start;width: 50%;}#sk-container-id-28 div.sk-parallel-item:only-child::after {width: 0;}#sk-container-id-28 div.sk-dashed-wrapped {border: 1px dashed gray;margin: 0 0.4em 0.5em 0.4em;box-sizing: border-box;padding-bottom: 0.4em;background-color: white;}#sk-container-id-28 div.sk-label label {font-family: monospace;font-weight: bold;display: inline-block;line-height: 1.2em;}#sk-container-id-28 div.sk-label-container {text-align: center;}#sk-container-id-28 div.sk-container {/* jupyter's `normalize.less` sets `[hidden] { display: none; }` but bootstrap.min.css set `[hidden] { display: none !important; }` so we also need the `!important` here to be able to override the default hidden behavior on the sphinx rendered scikit-learn.org. See: https://github.com/scikit-learn/scikit-learn/issues/21755 */display: inline-block !important;position: relative;}#sk-container-id-28 div.sk-text-repr-fallback {display: none;}</style><div id=\"sk-container-id-28\" class=\"sk-top-container\"><div class=\"sk-text-repr-fallback\"><pre>Perceptron()</pre><b>In a Jupyter environment, please rerun this cell to show the HTML representation or trust the notebook. <br />On GitHub, the HTML representation is unable to render, please try loading this page with nbviewer.org.</b></div><div class=\"sk-container\" hidden><div class=\"sk-item\"><div class=\"sk-estimator sk-toggleable\"><input class=\"sk-toggleable__control sk-hidden--visually\" id=\"sk-estimator-id-28\" type=\"checkbox\" checked><label for=\"sk-estimator-id-28\" class=\"sk-toggleable__label sk-toggleable__label-arrow\">Perceptron</label><div class=\"sk-toggleable__content\"><pre>Perceptron()</pre></div></div></div></div></div>"
- ],
- "text/plain": [
- "Perceptron()"
- ]
- },
- "execution_count": 224,
- "metadata": {},
- "output_type": "execute_result"
- }
- ],
- "source": [
- "clf.fit(X_train, y_train)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 225,
- "id": "6c07a732",
- "metadata": {},
- "outputs": [
- {
- "data": {
- "text/plain": [
- "0.8128994770482277"
- ]
- },
- "execution_count": 225,
- "metadata": {},
- "output_type": "execute_result"
- }
- ],
- "source": [
- "clf.score(X_train, y_train)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 226,
- "id": "472916ef",
- "metadata": {},
- "outputs": [
- {
- "data": {
- "text/plain": [
- "0.8697916666666666"
- ]
- },
- "execution_count": 226,
- "metadata": {},
- "output_type": "execute_result"
- }
- ],
- "source": [
- "clf.score(X_test, y_test)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 21,
- "id": "83916158",
- "metadata": {},
- "outputs": [
- {
- "data": {
- "text/plain": [
- "{'alpha': 0.0001,\n",
- " 'class_weight': None,\n",
- " 'early_stopping': False,\n",
- " 'eta0': 1.0,\n",
- " 'fit_intercept': True,\n",
- " 'l1_ratio': 0.15,\n",
- " 'max_iter': 1000,\n",
- " 'n_iter_no_change': 5,\n",
- " 'n_jobs': None,\n",
- " 'penalty': None,\n",
- " 'random_state': 0,\n",
- " 'shuffle': True,\n",
- " 'tol': 0.001,\n",
- " 'validation_fraction': 0.1,\n",
- " 'verbose': 0,\n",
- " 'warm_start': False}"
- ]
- },
- "execution_count": 21,
- "metadata": {},
- "output_type": "execute_result"
- }
- ],
- "source": [
- "clf.get_params()"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "19bffc72",
- "metadata": {},
- "outputs": [],
- "source": []
- }
- ],
- "metadata": {
- "kernelspec": {
- "display_name": "Python 3 (ipykernel)",
- "language": "python",
- "name": "python3"
- },
- "language_info": {
- "codemirror_mode": {
- "name": "ipython",
- "version": 3
- },
- "file_extension": ".py",
- "mimetype": "text/x-python",
- "name": "python",
- "nbconvert_exporter": "python",
- "pygments_lexer": "ipython3",
- "version": "3.9.7"
- }
- },
- "nbformat": 4,
- "nbformat_minor": 5
- }
|