You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

captum.ipynb 443KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231523252335234523552365237523852395240524152425243524452455246524752485249525052515252525352545255525652575258525952605261526252635264526552665267526852695270527152725273527452755276527752785279528052815282528352845285528652875288528952905291
  1. {
  2. "cells": [
  3. {
  4. "cell_type": "code",
  5. "execution_count": 1,
  6. "id": "98abe12e",
  7. "metadata": {},
  8. "outputs": [
  9. {
  10. "name": "stderr",
  11. "output_type": "stream",
  12. "text": [
  13. "C:\\Users\\saeed\\Desktop\\Master\\bci\\lib\\site-packages\\tqdm\\auto.py:22: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
  14. " from .autonotebook import tqdm as notebook_tqdm\n"
  15. ]
  16. }
  17. ],
  18. "source": [
  19. "import torch\n",
  20. "import torch.nn as nn\n",
  21. "import torch.nn.functional as F\n",
  22. "from sklearn.model_selection import train_test_split\n",
  23. "from sklearn.model_selection import KFold, StratifiedKFold\n",
  24. "import librosa\n",
  25. "import librosa.display\n",
  26. "import IPython.display as ipd\n",
  27. "import matplotlib.pyplot as plt\n",
  28. "import numpy as np\n",
  29. "import scipy.io\n",
  30. "from tqdm import tqdm\n",
  31. "import glob\n",
  32. "import os\n",
  33. "import json\n",
  34. "import pickle\n",
  35. "from einops import rearrange\n",
  36. "from captum.attr import DeepLift, Saliency\n",
  37. "from captum.attr import visualization as viz"
  38. ]
  39. },
  40. {
  41. "cell_type": "code",
  42. "execution_count": 2,
  43. "id": "cd5442b9",
  44. "metadata": {},
  45. "outputs": [],
  46. "source": [
  47. "with open('bilab_10mfc_train.pkl', 'rb') as f:\n",
  48. " mfc_train = pickle.load(f)\n",
  49. "with open('bilab_10mfc_test.pkl', 'rb') as f:\n",
  50. " mfc_test = pickle.load(f)\n",
  51. "with open(\"data/bilabial/y_bilabial_train.pkl\", \"rb\") as f:\n",
  52. " y__train = pickle.load(f)\n",
  53. "with open(\"data/bilabial/y_bilabial_test.pkl\", \"rb\") as f:\n",
  54. " y__test = pickle.load(f)"
  55. ]
  56. },
  57. {
  58. "cell_type": "code",
  59. "execution_count": null,
  60. "id": "498285d5",
  61. "metadata": {},
  62. "outputs": [],
  63. "source": []
  64. },
  65. {
  66. "cell_type": "code",
  67. "execution_count": 3,
  68. "id": "01f70ae5",
  69. "metadata": {},
  70. "outputs": [],
  71. "source": [
  72. "trials = []\n",
  73. "for trial in mfc_train:\n",
  74. " pic = np.zeros((7,9,10,11))\n",
  75. " pic[0,2] = trial[3]\n",
  76. " pic[0,3] = trial[0]\n",
  77. " pic[0,4] = trial[1]\n",
  78. " pic[0,5] = trial[2]\n",
  79. " pic[0,6] = trial[4]\n",
  80. " pic[1,:] = trial[5:14]\n",
  81. " pic[2,:] = trial[14:23]\n",
  82. " pic[3,:] = trial[23:32]\n",
  83. " pic[4,:] = trial[32:41]\n",
  84. " pic[5,:] = trial[41:50]\n",
  85. " pic[6,0] = trial[50]\n",
  86. " pic[6,1] = trial[51]\n",
  87. " pic[6,2] = trial[52]\n",
  88. " pic[6,3] = trial[58]\n",
  89. " pic[6,4] = trial[53]\n",
  90. " pic[6,5] = trial[60]\n",
  91. " pic[6,6] = trial[54]\n",
  92. " pic[6,7] = trial[55]\n",
  93. " pic[6,8] = trial[56]\n",
  94. " trials.append(pic)\n",
  95. "picture_data_train = np.array(trials)\n",
  96. "trials = []\n",
  97. "for trial in mfc_test:\n",
  98. " pic = np.zeros((7,9,10,11))\n",
  99. " pic[0,2] = trial[3]\n",
  100. " pic[0,3] = trial[0]\n",
  101. " pic[0,4] = trial[1]\n",
  102. " pic[0,5] = trial[2]\n",
  103. " pic[0,6] = trial[4]\n",
  104. " pic[1,:] = trial[5:14]\n",
  105. " pic[2,:] = trial[14:23]\n",
  106. " pic[3,:] = trial[23:32]\n",
  107. " pic[4,:] = trial[32:41]\n",
  108. " pic[5,:] = trial[41:50]\n",
  109. " pic[6,0] = trial[50]\n",
  110. " pic[6,1] = trial[51]\n",
  111. " pic[6,2] = trial[52]\n",
  112. " pic[6,3] = trial[58]\n",
  113. " pic[6,4] = trial[53]\n",
  114. " pic[6,5] = trial[60]\n",
  115. " pic[6,6] = trial[54]\n",
  116. " pic[6,7] = trial[55]\n",
  117. " pic[6,8] = trial[56]\n",
  118. " trials.append(pic) \n",
  119. "picture_data_test = np.array(trials)"
  120. ]
  121. },
  122. {
  123. "cell_type": "code",
  124. "execution_count": 5,
  125. "id": "c6e5d06e",
  126. "metadata": {},
  127. "outputs": [
  128. {
  129. "data": {
  130. "text/plain": [
  131. "(1913, 7, 9, 10, 11)"
  132. ]
  133. },
  134. "execution_count": 5,
  135. "metadata": {},
  136. "output_type": "execute_result"
  137. }
  138. ],
  139. "source": [
  140. "dataset = np.vstack((picture_data_train, picture_data_test))\n",
  141. "dataset.shape"
  142. ]
  143. },
  144. {
  145. "cell_type": "code",
  146. "execution_count": 32,
  147. "id": "c0cd1a50",
  148. "metadata": {},
  149. "outputs": [],
  150. "source": [
  151. "labels = y__train + y__test"
  152. ]
  153. },
  154. {
  155. "cell_type": "code",
  156. "execution_count": 2,
  157. "id": "19f34131",
  158. "metadata": {},
  159. "outputs": [],
  160. "source": [
  161. "#model\n",
  162. "\n",
  163. "class CNN_RNN(nn.Module):\n",
  164. " def __init__(self):\n",
  165. " super().__init__()\n",
  166. " self.conv1 = nn.Conv2d(20, 16, 3)\n",
  167. " #torch.nn.init.xavier_normal_(self.conv1.weight)\n",
  168. " self.pool = nn.MaxPool2d(2, 1)\n",
  169. " self.conv2 = nn.Conv2d(16, 32, 3)\n",
  170. " #torch.nn.init.xavier_normal_(self.conv2.weight)\n",
  171. " self.lstm = nn.LSTM(input_size=256, hidden_size=128, num_layers=2, batch_first=True)\n",
  172. " self.fc = nn.Linear(128, 2)\n",
  173. " #torch.nn.init.xavier_normal_(self.fc.weight)\n",
  174. " self.batch1 = nn.BatchNorm2d(16)\n",
  175. " self.batch2 = nn.BatchNorm2d(32)\n",
  176. " self.relu1 = nn.ReLU()\n",
  177. " self.relu2 = nn.ReLU()\n",
  178. " \n",
  179. " \n",
  180. " def forward(self, x):\n",
  181. " hidden = torch.zeros(2, x.shape[0], 128), torch.zeros(2, x.shape[0], 128)\n",
  182. " # (batch, heigth, width, feature, time)\n",
  183. " #print(x.shape)\n",
  184. " x = rearrange(x, 'batch heigth width feature time -> (batch time) feature heigth width')\n",
  185. " #print(x.shape)\n",
  186. " out = self.pool(self.relu1(self.batch1(self.conv1(x))))\n",
  187. " #print(out.shape)\n",
  188. " out = self.relu2(self.batch2(self.conv2(out)))\n",
  189. " #print(out.shape)\n",
  190. " out = rearrange(out, '(batch time) channel heigth width -> batch time (channel heigth width)', time=11)\n",
  191. " #print(out.shape)\n",
  192. " out, hidden = self.lstm(out, hidden) \n",
  193. " out = out[:,-1,:]\n",
  194. " out = self.fc(out)\n",
  195. " return out\n",
  196. " \n",
  197. "class FC(nn.Module):\n",
  198. " def __init__(self, hidden1=500):\n",
  199. " super(FC, self).__init__()\n",
  200. " self.fc1 = nn.Linear(6820, hidden1)\n",
  201. " torch.nn.init.xavier_normal(self.fc1.weight)\n",
  202. " self.fc2 = nn.Linear(hidden1, 1)\n",
  203. " torch.nn.init.xavier_normal(self.fc2.weight)\n",
  204. " self.dropout = nn.Dropout(0.3)\n",
  205. " \n",
  206. " def forward(self, x):\n",
  207. " x = x.view(-1, 6820)\n",
  208. " x = F.relu(self.fc1(x))\n",
  209. " #x = self.dropout(x)\n",
  210. " x = F.sigmoid(self.fc2(x))\n",
  211. " return x\n",
  212. " \n",
  213. "class cnn3d(nn.Module):\n",
  214. " def __init__(self):\n",
  215. " super().__init__()\n",
  216. " self.conv1 = nn.Conv3d(20, 16, kernel_size=(3, 3, 3), padding=1)\n",
  217. " self.conv2 = nn.Conv3d(16, 32, kernel_size=(3, 3, 3), padding=0)\n",
  218. " self.pool = nn.MaxPool3d((2, 2, 2), stride=2)\n",
  219. " self.fc1 = nn.Linear(192, 128)\n",
  220. " self.fc2 = nn.Linear(128, 1)\n",
  221. " self.drop = nn.Dropout(0.25)\n",
  222. " self.batch1 = nn.BatchNorm3d(16)\n",
  223. " self.batch2 = nn.BatchNorm3d(32)\n",
  224. " self.batch3 = nn.BatchNorm1d(128)\n",
  225. " \n",
  226. " def forward(self, x):\n",
  227. " x = rearrange(x, 'n h w m t -> n m t h w')\n",
  228. " out = self.pool(F.relu(self.batch1(self.conv1(x))))\n",
  229. " out = F.relu(self.batch2(self.conv2(out)))\n",
  230. " out = out.view(out.size(0), -1)\n",
  231. " out = self.drop(F.relu(self.batch3(self.fc1(out))))\n",
  232. " out = F.sigmoid(self.fc2(out))\n",
  233. " return out"
  234. ]
  235. },
  236. {
  237. "cell_type": "code",
  238. "execution_count": 85,
  239. "id": "fbe27fb8",
  240. "metadata": {},
  241. "outputs": [
  242. {
  243. "name": "stdout",
  244. "output_type": "stream",
  245. "text": [
  246. "(1913, 62, 20, 11) 1913\n"
  247. ]
  248. }
  249. ],
  250. "source": [
  251. "with open(\"data/normal_all_data.pkl\", \"rb\") as f:\n",
  252. " all_data = pickle.load(f)\n",
  253. "with open(\"data/11_20mfc.pkl\", \"rb\") as f:\n",
  254. " data = pickle.load(f)\n",
  255. "with open(\"data/all_label.pkl\", \"rb\") as f:\n",
  256. " labels = pickle.load(f)\n",
  257. "with open(\"data/vowel_label.pkl\", \"rb\") as f:\n",
  258. " vowel_label = pickle.load(f)\n",
  259. "with open(\"data/bilab_label.pkl\", \"rb\") as f:\n",
  260. " bilab_label = pickle.load(f)\n",
  261. "with open(\"data/nasal_label.pkl\", \"rb\") as f:\n",
  262. " nasal_label = pickle.load(f)\n",
  263. "with open(\"data/iy_label.pkl\", \"rb\") as f:\n",
  264. " iy_label = pickle.load(f)\n",
  265. "with open(\"data/uw_label.pkl\", \"rb\") as f:\n",
  266. " uw_label = pickle.load(f)\n",
  267. "\n",
  268. "print(all_data.shape, len(uw_label))"
  269. ]
  270. },
  271. {
  272. "cell_type": "code",
  273. "execution_count": 86,
  274. "id": "d79522e8",
  275. "metadata": {},
  276. "outputs": [],
  277. "source": [
  278. "trials = []\n",
  279. "for trial in all_data:\n",
  280. " pic = np.zeros((7,9,20,11))\n",
  281. " pic[0,2] = trial[3]\n",
  282. " pic[0,3] = trial[0]\n",
  283. " pic[0,4] = trial[1]\n",
  284. " pic[0,5] = trial[2]\n",
  285. " pic[0,6] = trial[4]\n",
  286. " pic[1,:] = trial[5:14]\n",
  287. " pic[2,:] = trial[14:23]\n",
  288. " pic[3,:] = trial[23:32]\n",
  289. " pic[4,:] = trial[32:41]\n",
  290. " pic[5,:] = trial[41:50]\n",
  291. " pic[6,0] = trial[50]\n",
  292. " pic[6,1] = trial[51]\n",
  293. " pic[6,2] = trial[52]\n",
  294. " pic[6,3] = trial[58]\n",
  295. " pic[6,4] = trial[53]\n",
  296. " pic[6,5] = trial[60]\n",
  297. " pic[6,6] = trial[54]\n",
  298. " pic[6,7] = trial[55]\n",
  299. " pic[6,8] = trial[56]\n",
  300. " trials.append(pic)\n",
  301. "picture_data_train = np.array(trials)"
  302. ]
  303. },
  304. {
  305. "cell_type": "code",
  306. "execution_count": 87,
  307. "id": "b2c4df69",
  308. "metadata": {},
  309. "outputs": [
  310. {
  311. "name": "stdout",
  312. "output_type": "stream",
  313. "text": [
  314. "(1913, 7, 9, 20, 11) 1913\n"
  315. ]
  316. }
  317. ],
  318. "source": [
  319. "dataset = picture_data_train\n",
  320. "labels = nasal_label\n",
  321. "print(dataset.shape, len(labels))"
  322. ]
  323. },
  324. {
  325. "cell_type": "code",
  326. "execution_count": 253,
  327. "id": "ba72e9fb",
  328. "metadata": {},
  329. "outputs": [
  330. {
  331. "data": {
  332. "text/plain": [
  333. "(1913, 7, 9, 20, 11)"
  334. ]
  335. },
  336. "execution_count": 253,
  337. "metadata": {},
  338. "output_type": "execute_result"
  339. }
  340. ],
  341. "source": [
  342. "Max = np.max(dataset, axis=(0,1,2,4), keepdims=True)\n",
  343. "Min = np.min(dataset, axis=(0,1,2,4), keepdims=True)\n",
  344. "dataset = (dataset-Min)/(Max-Min)\n",
  345. "dataset.shape"
  346. ]
  347. },
  348. {
  349. "cell_type": "code",
  350. "execution_count": 256,
  351. "id": "45d5b88c",
  352. "metadata": {
  353. "scrolled": true
  354. },
  355. "outputs": [
  356. {
  357. "data": {
  358. "text/plain": [
  359. "<All keys matched successfully>"
  360. ]
  361. },
  362. "execution_count": 256,
  363. "metadata": {},
  364. "output_type": "execute_result"
  365. }
  366. ],
  367. "source": [
  368. "model = CNN_RNN().float()\n",
  369. "model.load_state_dict(torch.load('train/model0_acc.pt'))"
  370. ]
  371. },
  372. {
  373. "cell_type": "code",
  374. "execution_count": 85,
  375. "id": "509b8e17",
  376. "metadata": {},
  377. "outputs": [],
  378. "source": [
  379. "label = []\n",
  380. "for l in labels:\n",
  381. " label.append(l)"
  382. ]
  383. },
  384. {
  385. "cell_type": "code",
  386. "execution_count": 257,
  387. "id": "d2a30b5f",
  388. "metadata": {},
  389. "outputs": [
  390. {
  391. "name": "stdout",
  392. "output_type": "stream",
  393. "text": [
  394. "torch.Size([1913, 7, 9, 20, 11])\n"
  395. ]
  396. }
  397. ],
  398. "source": [
  399. "sample = torch.from_numpy(dataset)\n",
  400. "#data = torch.rand((7,9,20,11))\n",
  401. "sample = sample.float()\n",
  402. "sample.requires_grad = True\n",
  403. "print(sample.shape)"
  404. ]
  405. },
  406. {
  407. "cell_type": "code",
  408. "execution_count": 258,
  409. "id": "75a7a952",
  410. "metadata": {},
  411. "outputs": [
  412. {
  413. "data": {
  414. "text/plain": [
  415. "CNN_RNN(\n",
  416. " (conv1): Conv2d(20, 16, kernel_size=(3, 3), stride=(1, 1))\n",
  417. " (pool): MaxPool2d(kernel_size=2, stride=1, padding=0, dilation=1, ceil_mode=False)\n",
  418. " (conv2): Conv2d(16, 32, kernel_size=(3, 3), stride=(1, 1))\n",
  419. " (lstm): LSTM(256, 128, num_layers=2, batch_first=True)\n",
  420. " (fc): Linear(in_features=128, out_features=2, bias=True)\n",
  421. " (batch1): BatchNorm2d(16, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
  422. " (batch2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
  423. " (relu1): ReLU()\n",
  424. " (relu2): ReLU()\n",
  425. ")"
  426. ]
  427. },
  428. "execution_count": 258,
  429. "metadata": {},
  430. "output_type": "execute_result"
  431. }
  432. ],
  433. "source": [
  434. "model.eval()"
  435. ]
  436. },
  437. {
  438. "cell_type": "code",
  439. "execution_count": 259,
  440. "id": "81b5c2ab",
  441. "metadata": {},
  442. "outputs": [],
  443. "source": [
  444. "def attribute_image_features(algorithm, data, **kwargs):\n",
  445. " model.zero_grad()\n",
  446. " tensor_attributions = algorithm.attribute(data,\n",
  447. " target=labels,\n",
  448. " **kwargs\n",
  449. " )\n",
  450. " \n",
  451. " return tensor_attributions"
  452. ]
  453. },
  454. {
  455. "cell_type": "code",
  456. "execution_count": 260,
  457. "id": "e1c1a53c",
  458. "metadata": {
  459. "scrolled": true
  460. },
  461. "outputs": [
  462. {
  463. "name": "stdout",
  464. "output_type": "stream",
  465. "text": [
  466. "torch.Size([1913, 7, 9, 20, 11])\n"
  467. ]
  468. }
  469. ],
  470. "source": [
  471. "dl = DeepLift(model)\n",
  472. "#attr_dl = dl.attribute(data, labels)\n",
  473. "attr_dl = attribute_image_features(dl, sample, baselines=sample * 0)\n",
  474. "\n",
  475. "print(attr_dl.shape)"
  476. ]
  477. },
  478. {
  479. "cell_type": "code",
  480. "execution_count": 261,
  481. "id": "05a94327",
  482. "metadata": {},
  483. "outputs": [],
  484. "source": [
  485. "attr_dl = attr_dl.detach().numpy()"
  486. ]
  487. },
  488. {
  489. "cell_type": "code",
  490. "execution_count": 262,
  491. "id": "8d88cd0d",
  492. "metadata": {},
  493. "outputs": [],
  494. "source": [
  495. "with open('sal_nasal.pkl', 'wb') as f:\n",
  496. " pickle.dump(attr_dl, f)"
  497. ]
  498. },
  499. {
  500. "cell_type": "code",
  501. "execution_count": 217,
  502. "id": "4608f5dd",
  503. "metadata": {},
  504. "outputs": [],
  505. "source": [
  506. "with open('sal_vowel.pkl', 'rb') as f:\n",
  507. " attr_dl = pickle.load(f)"
  508. ]
  509. },
  510. {
  511. "cell_type": "code",
  512. "execution_count": 218,
  513. "id": "8a4afb3d",
  514. "metadata": {},
  515. "outputs": [
  516. {
  517. "data": {
  518. "text/plain": [
  519. "(1913, 7, 9, 20, 11)"
  520. ]
  521. },
  522. "execution_count": 218,
  523. "metadata": {},
  524. "output_type": "execute_result"
  525. }
  526. ],
  527. "source": [
  528. "attr_dl.shape"
  529. ]
  530. },
  531. {
  532. "cell_type": "code",
  533. "execution_count": 219,
  534. "id": "184f5964",
  535. "metadata": {
  536. "scrolled": false
  537. },
  538. "outputs": [
  539. {
  540. "name": "stdout",
  541. "output_type": "stream",
  542. "text": [
  543. "(1913, 7, 9, 20, 11)\n",
  544. "(7, 9, 20)\n",
  545. "(20, 63)\n",
  546. "(array([ 2, 3, 9, 2, 16], dtype=int64), array([21, 32, 32, 22, 20], dtype=int64))\n"
  547. ]
  548. },
  549. {
  550. "data": {
  551. "image/png": "iVBORw0KGgoAAAANSUhEUgAABwMAAAItCAYAAADVKtVTAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjUuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8qNh9FAAAACXBIWXMAAAsTAAALEwEAmpwYAACApUlEQVR4nOzdebhlV1kn/u9b85ChKgkkQAhJCAJB5lKhQQmgTI2gDSIKKijEoVVsxW5pUFBQobEBBRGittiAUwB/ICKDEBKG0BomSZgSSMKUOZWp5rp3/f64p/B6c6vqVNVep05uPp/nOc+pu8/a61377H323ueseteq1loAAAAAAACApWfZ4W4AAAAAAAAA0IfOQAAAAAAAAFiidAYCAAAAAADAEqUzEAAAAAAAAJYonYEAAAAAAACwRK043A0AAAAAAABg6XjsI9e3666fmUisT/7bjve11h43kWC3UToDAQAAAAAAGMx118/kX9530kRiLb/TxcdNJNBtmM5AAAAAAAAABtOSzGb2cDeDEXMGAgAAAAAAwBKlMxAAAAZQVR+uqrZg2RlV1arqJQdQz1FV9UdVdVlV7R6t/4Ch28v0qKpnjfbzsxYsv6yqLjs8rQIAADgULTNtdiIP9k9nIAAATJf/leSXknwuye8n+e0kV04icFW9ZNQpdcYk4t1W7O99qao3jV4/eaING8DeOiIBAABYOswZCAAA0+WJSb7cWvvBw90QJubvk3wiyRWHuyEAAAAsPToDAQBgutw5yXmHuxFMTmvtxiQ3Hu52AAAADKUlmU3bbzkmwzChAACwiNHwiW+vqq9W1baquqmqPlZVz+wUb8+cg5XkEaOhG1tVfXhBucdW1Xuq6tqq2lFVX6mqV1bVhkXqfGRVnVVVnx+1f1tVXVhVL66qNQvKXpbkxaM/z5kXv80rc6t5Eee9ts9570ZzIb5q9O9d8+dRrKp7jYba/HpV7ayqq6rqr6rqnovEOb6q/qCqvlRVW6rqhtG/31RVp+7zTe7wvoyef2r0+qXzXr9s4ftWVauq6rdG7d1RVW/a13s3b/2jq+p1VfXNqto+avcvV1UtKLfPOSoXzkE4Orb+YvTnX8zftvlDnlbViqr6har6xOj92lpVn66qX6wq3ykBAACmnMxAAABY3J8kuShzWXpXJDk2yROSvLmq7tla+82B470pyYcz1/F0+ejvJLlsT4GqenGSlyS5Psm7k1yd5H5Jnp/kCVX10NbaTfPq/B9J7pXk40n+McmaJA8b1XFGVX1/a21mVPY1SX4oySOS/OX8uANYleRDSY5J8v4kNyW5dLRNj0vyjiQrk/xDkkuSnJjkvyT5z1X1yNbap0Zl1yX5WJK7J/nAqHwluVuSJyd5W5KvjtGeId+X3x69fv8kf5jkhtHyG3Jrb0/yXUn+Kcn/l7n9tz+rkvxzkg1J/mb091NGse6Z5L+OUcfevGnUzicneWeSz8x77YYkqao9++WxSb6U5K+SbE/yyCSvTfI9SX7iENoAAAAsUbOZPdxNYERnIAAALO47W2tfmb+gqlZlriPnN6rqDa21bw4VrLX2plGMFye5rLX2kgWxH5m5zqrzkzyhtXbDvNeelbkMr99O8t/mrfYLSS5trf2HbL6qemmSFyV5apK/HcV/zSi78BFJ3tRa+/BAm5Ykd0ry+SSPaK1tmdeOjUn+OsnWJN/XWvv8vNe+M3Pz6P1ZkgeNFj86cx2Br2mtzd/OPftm9ZjtGex9aa29ZJRFd/9Ruy7bR9y7Ze64unbMdiZz791XR+vtGLXzxUn+NckvVNXfttYOaljZ1tqbRsmFT07y/+05Bhd4YeY6Al+X5Ff2dJJW1fIkZyX56ap6W2vtnQfTBgAAAPozpAsAACxiYUfgaNnOJH+cuf9U9+gJN+mXR8/Pnd8ROGrXmzKX1fWMBcu/urDDa+TVo+fHDtvEffq1+R2BIz+ZuYy3F8/vCEyS1tqFSf40yQOr6vQF621bWHlrbWdr7eZxGnIY35ffPMCOwD1esKcjMElaa9cneenoz2cP0rJFjIYA/aUkVyb5b/OyJTP6969lbiqQZyxeAwAAcHvV0jLTJvNg/2QGAgDAIqrqpMwNJ/noJCclWbugyF0m3KSHJtmV5Eeq6kcWeX1VkjtU1bGtteuSpKrWJ3lekh9O8h1JjszcsJp7TGobtif5t0WWP3T0fP+9zHP3HaPne2cus/DcJN/MXGbmg5K8J3PDhn5mfkfV/hzG9+VfDmKd3ZkbznShD4+eH3jQrdm/78jc0K4XJ3nRgikK99iWuf0DAADAlNIZCAAAC1TVqZnruNmY5COZm+fuxiQzSU5O8lMZf0jKoRybufv3F++n3BFJrhvN9fahJN+d5MLMDXt5TeY6FDOqZ1LbcPVeMvGOHT0/dz/rH5EkrbWbquohmRsO9Un59wy+a6vq9Ule1lrbtZc6knx7DrzD9b5ceRDrXLuXjs49dR19CO3Znz375x7Z93F3RMc2AAAAt1GzkbU3LXQGAgDArf1q5jpCnr1wHrWq+rHMdQZO2o1JlrXWjhmz/JMz1+H1ptbafxhKsqrulP13Ki5mdrT+itba7gWvbdjHenv7Bnjj6Pn+rbXFMgdvXVFr30jyMzWXpnZ6kkcl+a9Jfitz0yD85n6q6PG+jGUvHaL7c1xVLV+kQ/CE0fON85bNjp739j1vQ5IbDiD2nrr/vrX2Xw5gPQAAAKaIOQMBAODWThs9v32R1x4xyYbM84kkG6vqPmOW37MN71jktb1tw54Op+V7eX3z6Pmui7y2acx2zfeJ0fP3HuiKbc5FrbXXJvmB0eIfGmPVHu/L/l4/FCuS/KdFlp8xev70vGV73T9VdVoWzyLcV9u/mLnOw4eMMioBAADG0pLMpE3kwf7pDAQAgFu7bPR8xvyFVfXYJM+ZdGNGXj16/tOquvPCF6tq/WgIzT0uGz2fsaDcqUlesZcY142eT9rL63vmvPsPw3pW1aOT/Nhe1tmXv8hcZ9OLq+q7F75YVcuq6ox5f9+nqo5fpJ49y7aOEfOy0fMZ8xce4vuyv9cP1e9X1beHLq2qY5K8aPTnX8wr98UkNyV5clXdcV75tUn+aC9177Xto+zP1ya5U5I/GtXzH1TVnarq9APYFgAAACbMMKEAAHBrr0/y7CRnV9XbknwryXcmeVySv0vyo5NuUGvtg1X1G0l+P8nFVfWeJJdmbr62u2Uuq+2jozYmyT8kuSTJr1bVfTOXQXZSkicm+ccs3nF1TuaGmvz9qvrOjDLNWmsvG73+F0l+PckLqur+ST6f5DuSPD7J3yd5ygFu03VV9dTRup+oqg8muShz/4n0rkkemrnhWteMVvmBJK+sqvOTfDnJ1UlOzNzQn7NJXjlG2B7vywcz9778aVW9PcnNSW5orb1uzLdiX67I3ByGF1bVu5KsTPLUzHXQvb61dt6egq21XVX1h5kbKvXTVfX3mfvO9wOZO4a/tUj952euE/VXqurY/PtchK9trd2Y5KVJ7p/k55L8YFV9KMk3k9wxc3MJPizJCzN3LAAAAHybOQOnh85AAABYoLX2b1X1yCQvS/KfM3ff/Nkk/yVzmWwT7wwctesVVfWxJL+c5OGZ6wS7MXOdM2cl+at5ZbdU1aOSvDxzWXDfm+SrmevceVUW2YbW2heq6qeSPD/JL+TfO+FeNnr96qp6ROY63b4vcx2QF2Sus+mUHGBn4KjOD1bV/UYxHztq587MdVx9KP9xqNb3Za6z7vtG235U5jrLPpDkVa21j48Rr8f78r6q+rXMZUz+SpJVSS5PMkRn4M4k35/k95I8Pclxo/a+PHNZewu9OHOde89NcmbmOvf+JslLskiHXWttc1U9ZbTes5KsH730liQ3jjoYfyjJM0evPzFzHdDXZK4z+jeTvPVQNxIAAIB+6uDmsAcAAAAAAIBbu//9V7X3vee4icS604lXfLK1djDz2N9umDMQAAAAAAAAlijDhAIAAAAAADCo2cPdAL5NZiAAAAAAAAAsUToDAQAAAAAAYImaqmFCl69f31ZuPOZwN+PQre6b/Npmqmv9SVK7+8do/UNMRLXD3YJDVzP9Y8yunsAbNTuBg2oCm9H7mJrEZ68mMQbA0tjd3TdjxdbOAZLsOrr/O7V86ySufd1DZHZl/xi9zU7i7nEC/11tEte+tqLzZ2MS171l/T/fk7jvXArX72QC1/Clco8wAW35BGJ03h9L4XtMMpnrd5vAdan1vr4ulf09iXPt8iXw3bX3PUiS2rE0ftSZxPl8Msdt3/prV9/6k0zkO8AkzufLVvX/ojG7o+8OXwq/6ezafH1mtmxZGieq27CWlpmlchOyBExVZ+DKjcfkxF/+b4e7GYesTtnStf6dN6zuWn+SrLyu/6Exic6hSXR6LN9527+urLyx/zZsPbX/neOyWybxq0z/EMu3990f3X9oSLJsR/8Yk9iOiXScdj6mjvtM/4P2yifs7B7jqE+u6R5jzfX9v/FsPb7vt89JfLndflz/Y2pmXf99sfLG/m/WzuP6/hCw/Jb+2zBzRP99sfrq/tfvSXQ4Lp/Ata/3fyiYWdP/873ylgn8544J/IC1Y+MEzoWdvy8tXyI/5K++rv927F7fPUR2buh74C7b1f99mp1AB9SKbf23Y+fG/j/kL9ve9xo+u6F/L/naS1d1jzGJiaZ2buwfZNkEfjfafVTf7Vh9Tf/7tZm1t/1ra5KsP+XG7jG2fPXorvVP4n5ttvMp5Bt/9Oq+AeA2aKo6AwEAAAAAALiNa8mMxMCpYc5AAAAAAAAAWKJkBgIAAAAAADCYlomM+MyYZAYCAAAAAADAEiUzEAAAAAAAgAFVZlKHuxGMDJ4ZWFV3raq3VdWNVXVTVb2jqk4aOg4AAAAAAACwb4NmBlbVuiQfSrIjyU9lbljYlyU5p6ru11rbMmQ8AAAAAAAApktLMtsOdyvYY+hhQp+b5NQk92ytXZIkVfVvSS5O8rNJXjVwPAAAAAAAAGAvhh4m9ElJPrGnIzBJWmuXJvlYkicPHAsAAAAAAIApNDOaN7D3g/0bujPwPkkuXGT5RUlOHzgWAAAAAAAAsA9DDxN6TJLNiyy/PsnGxVaoqjOTnJkkKzYsWgQAAAAAAIDbiJbI2psiQ2cGHrDW2lmttU2ttU3L168/3M0BAAAAAACAJWPozMDNWTwDcG8ZgwAAAAAAACwxs01m4LQYOjPwoszNG7jQ6Uk+P3AsAAAAAAAAYB+G7gx8V5KHVNWpexZU1clJHjZ6DQAAAAAAAJiQoYcJ/dMkv5jknVX1oszNEfnSJF9P8saBYwEAAAAAADBlWpKZGCZ0WgyaGdha25LkUUm+nOTNSd6a5NIkj2qt3TJkLAAAAAAAAGDfhs4MTGvta0meMnS9AAAAAAAATL+WyszgM9VxsOwJAAAAAAAAWKIGzwwEAAAAAADg9m22mTNwWsgMBAAAAAAAgCVKZiAAAAAAAACDaUlmIjNwWugM7GDn9Wu61r/+sv67bcspu7vHWLa9f2JqzXQPsSTs3ND6B5lAiOXb+l9cTnvo5d1jXHL+3brWv+6K/u/T7MruIbLz6P4HVZvEVbLzZmz45FV9AyS56iHHd48xO4F9ceTXd3SPsWt933uEm07tWn2S5Iiv9T+HbD92efcYO++xrXuM3Nz3ZLj2qv73Uo8845PdY3z0zzZ1j7H5Qbu6xzj6wv4Xv12dd/mynf0/37vXdg+R3ev63yNs/Hz3ENlxTN8dPrO6a/VJkp1H9d8Xk/gu0yYwplJb2Xc7VmzuvxE77jDbPcbMBL5Xrvt6/xvPZZ0vS1sn8HPftrv0/91oxY397wlb/xCpSfwesrXvZ3x2xQTOtRPoL1i+vX+Qmzev6x5jWedT+iTuEdryvseUkSnh1nQGAgAAAAAAMKDKzCT+VxVjsScAAAAAAABgiZIZCAAAAAAAwGBakln5aFPDngAAAAAAAIAlSmYgAAAAAAAAg5pJHe4mMCIzEAAAAAAAAJYomYEAAAAAAAAMprXKTJOPNi0G3RNVdWJVvbaqzq+qrVXVqurkIWMAAAAAAAAA4xm6W/a0JE9LsjnJRwauGwAAAAAAADgAQw8Tel5r7fgkqarnJHnMwPUDAAAAAAAw5WZTh7sJjAyaGdhamx2yPgAAAAAAAODgDZ0ZCAAAAAAAwO1YSzIz+Ex1HKzDvieq6syquqCqLpjZsuVwNwcAAAAAAACWjMOeGdhaOyvJWUmy5sS7tsPcHAAAAAAAAA5JZaYd9nw0RuwJAAAAAAAAWKIOe2YgAAAAAAAAS0dLMisfbWrYEwAAAAAAALBEDZ4ZWFVPHf3zwaPnx1fVNUmuaa2dO3Q8AAAAAAAApstMq8PdBEZ6DBN69oK/Xz96PjfJGR3iAQAAAAAAAIsYvDOwNV29AAAAAAAAt1ctlRkz1U0NewIAAAAAAACWqB7DhAIAAAAAAHA7Ntvko00LewIAAAAAAACWKJmBAAAAAAAADKYl5gycIjoDOzj2U8u71r9jY9fqkyQnnNv/Q3rTKf1j1Gz3ENm5ofUP0tnqzdU9xrorVvaPcVX/Hb7jg3fqHuOOv35V1/pXvPbYrvUnydUP6L+/j/ha9xC5/kG7u8dYtqXvNePiM0/oWn+SrLy5e4j86plv6x7jseu+2j3GI97y613rn7nTjq71J8m2U3Z1j5EvHtE9RC2fwPW7863OriP7b8M/XPDA7jHOeNaF3WPsmu17rk2Sz3/q3t1jbP3OvtelZbf0f59mj+5/bV1xTf/7kGv/0wS24/q+X9cf/+gLutafJO+9+PTuMZZ/fn33GNvvONM9RlvV97vMzLr+37+P+bf+Ma7b1P+zt3Vt/++VK2/se7497jP9v+Nff3r/a8ay/rs7y7f3P27biv73bCtu6bvPl03gK0Db2T/G9rv3/7608opV3WPsXtf3mFqxrf85ZLZzr8QkfhOG2xrdsgAAAAAAALBEyQwEAAAAAABgMC2VmdY/05TxyAwEAAAAAACAJUpmIAAAAAAAAIOalY82NewJAAAAAAAAWKJkBgIAAAAAADCY1pKZJh9tWtgTAAAAAAAAsEQN2hlYVU+tqrdX1eVVta2qvlRVv19VRw4ZBwAAAAAAgGlVmZ3Qg/0bOjPw+UlmkvzPJI9L8idJfj7JB6pKFiIAAAAAAABM0NAddD/YWntaa+2trbVzW2uvSfLLSb4nyRkDxwIAAAAAAGDKtMzNGTiJxziq6q5V9baqurGqbqqqd1TVSWOuu6aqXllVV4xGxTy/qr5vkXLLquoFVXVZVW2vqs9W1VP2U/d/qqrZqmpVtWKsjTkIg3YGttauWWTxv46e7zJkLAAAAAAAANiXqlqX5ENJ7pXkp5L8RJJ7JDmnqtaPUcWfJ3lukt9K8sQkVyR5X1U9YEG5lyZ5SZLXJXl8kk8kObuqnrCXdq1M8sYkVx3YFh24br2M8zxi9PyFCcQCAAAAAADgMJsZfHDKg/bcJKcmuWdr7ZIkqap/S3Jxkp9N8qq9rVhV90/y40l+urX2F6Nl5ya5KMnvJHnSaNkdMzeV3stba38wWv2cqjotycuTvGeR6n89SSX5P5mbfq+brnuiqu6SuTfjn1trF+ylzJlVdUFVXTCzZUvP5gAAAAAAAHD78qQkn9jTEZgkrbVLk3wsyZPHWHdXkr+dt+7uJH+T5LFVtXq0+LFJViV5y4L135LkvlV1yvyFVXX3JC9K8guj+rvq1hlYVUckeWeS3UmevbdyrbWzWmubWmublq8fJxsTAAAAAACAadVSmW2TeYzhPkkuXGT5RUlOH2PdS1trWxdZd1WS0+aV25HkkkXKZZE4b0hydmvtvP3EH0SXYUKram2Sf8hc2uUjWmvf6BEHAAAAAACA27Xjqmr+6JRntdbOmvf3MUk2L7Le9Uk27qfufa275/U9zze01tp+yqWqnpnkwUmesZ/Ygxm8M3A04eHbkmxK8gOttc8NHQMAAAAAAIDpNcE5A69trW2aVLBDUVXHZG6Owv/ZWrt6UnEH3RNVtSzJW5M8KskPtdY+MWT9AAAAAAAAcAA2Z/EMwL1l/Y27bvLvmX+bk2yoqoXjli4s97IkVyT5u6raUFUbkqwZvXZ0VXWZT2/ozMA/TvIjSX43yZaqesi8175huFAAAAAAAAAm6KLMzem30OlJPj/Guj9cVesWzBt4epKd+fc5Ai9KsjrJ3fMf5w3cM1fg5+f9fb8k1y0S69ok70zyQ/tp0wEbOkfz8aPnFyY5f8HjOQPHAgAAAAAAYMq0JLNt2UQeY3hXkodU1al7FlTVyUkeNnptX/4hycrMJcLtWXdFkh9N8v7W2o7R4vcm2ZVbzwP4zCQXttYuHf39K0keueDxl6PXvj/Ji8bZoAM1aGZga+3kIesDAAAAAACAQ/CnSX4xyTur6kWZ66t8aZKvJ3njnkJVdbckX0nyO62130mS1tqnq+pvk7ymqlYmuTTJzyc5JfM6/lprV1fVq5K8oKpuTvKpzHUYPirJk+aV+8zCxlXVGaN/ntta2z3MJv9HQw8TCgAAAAAAwO1aZSYLp887PFprW6rqUUleneTNSSrJB5P8SmvtlnlFK8ny3HpUzWdnbnq8lyXZkOSzSR7XWvvUgnIvTHJLkuclOSHJl5I8rbX27kE36CDoDAQAAAAAAGDJaq19LclT9lPmsuTWPZittW1JfnX02Nf6M5nrMHzZAbbtJUleciDrHCidgQAAAAAAAAxmz5yBTAd7AgAAAAAAAJYomYEdXPeQXV3rX//lVV3rT5JbnnZT9xgrztvQPcbN99/RPcaKK/vvj952HdG6x2jVf3zozffuH+PaB/bf3+vfc0LX+m/+/tmu9SdJ29j/s7f9hP6XsJWb+8fYddRM1/pnJ/DffnYf3f8c8jvv+y/dY7ziuv5v1rrNfeu/aX3/c9TaL63pHmPn0d1DZOVn1nWPsfq6vp+N6793Z9f6k2TVN/ofUx+//JTuMV54//d0j/H/Tr539xjLtizvW//OCczncWP/a+vG+17bPca6lX2/8yXJ5bvu2LX+D//Vd3WtP0nWfF/nC1+Sm++yunuMFTf1/ewlye7Vfe/RV97Y//N9/cO3d4+Rmf73a6uu7X+e2r2+7z3CtT/Q//tYru3/2cuy/sftzNq+38eSZMXN/Y/bXUf1PYdMIrFn3ZUTCHLTBH4KP2VL9xDLvtn3u8zOzsdTktRM38+3ZLTpMS1zBiIzEAAAAAAAAJYsmYEAAAAAAAAMprUyZ+AUsScAAAAAAABgiZIZCAAAAAAAwKBmZAZODXsCAAAAAAAAliiZgQAAAAAAAAymJZlNHe5mMCIzEAAAAAAAAJaoQTsDq+qxVfWhqrqyqnZU1Teq6u+q6vQh4wAAAAAAADCtKjNt2UQe7N/Qw4Qek+STSV6f5JokJyX5jSSfqKr7ttYuHzgeAAAAAAAAsBeDdga21v46yV/PX1ZV/5Lki0memuR/DxkPAAAAAACA6dKSzDZzBk6LSeRPXjd63j2BWAAAAAAAAMBIl87AqlpeVauq6h5J3pjkyizIGAQAAAAAAAD6GnrOwD3+X5IHj/59SZJHtdauXqxgVZ2Z5MwkWbFhY6fmAAAAAAAAMCkzExmcknH02hM/keQhSX48yU1JPlBVJy9WsLV2VmttU2tt0/L16zs1BwAAAAAAAG5/umQGtta+MPrn/6uqf0pyWZLfSPJzPeIBAAAAAAAwHVoqs60OdzMY6Z6j2Vq7IXNDhZ7WOxYAAAAAAADw73rNGfhtVXV8knsleWvvWAAAAAAAABx+s+YMnBqDdgZW1d8n+VSSf8vcXIHfkeS/Jdmd5H8PGQsAAAAAAADYt6EzAz+R5GlJfi3JqiRfT/LhJL/fWrts4FgAAAAAAABMmdaSGXMGTo1BOwNba69I8ooh6wQAAAAAAAAOTvc5AwEAAAAAALh9mZUZODXM3ggAAAAAAABLlMxAAAAAAAAABtNSmW3y0aaFzsAOVl69smv9Ox94S9f6k2TZruXdY+w8ZaZ7jCxr/WMsATuP7b8vVt3Q/3Rzj8d9tXuMS//+7t1jrLt6tmv9Nz14Z9f6k2Tjx1d3j3HLSd1DZOeddnWPsezGvp+N6ns4zTlhR/cQx5x4Q/cY199hffcYx72t72fjxvv13+E3PGAC14yrJ3CLOoGRSrbeb3vX+pd/Y03X+pPkrv/c//P9wCd8rnuM91//nd1jTOJ8u/Kmvl+kawK359uP3d09xs1b+382dn7kDt1jrLpj3+8yd37i5V3rT5Kb/+TE7jGWndD/u+vWO/f/Xrnyur7Xvu13mMBJ6oZV3UOsua7/D4qrHrS5e4wtlxzdtf6Zrf3vpVYc3/c+J0nat/qfz1fc0v+Yqgn8NLV8W9+b250n9L9+1zf774sjv9L/mnHTEf3PhTm67/5YPoHf8Lrfd/pJGG5FZyAAAAAAAACDmpnE/8RlLHI0AQAAAAAAYImSGQgAAAAAAMBgWpLZJjNwWsgMBAAAAAAAgCVKZyAAAAAAAAAsUYYJBQAAAAAAYECV2SYfbVrYEwAAAAAAALBEyQwEAAAAAABgULOpw90ERrpmBlbVe6uqVdXLesYBAAAAAAAAbq1bZmBV/ViS+/eqHwAAAAAAgOnTWjLTZAZOiy6ZgVW1Mcmrk/xqj/oBAAAAAACA/euVGfiKJBe21v66qv6qUwwAAAAAAACm0GzrOlMdB2DwzsCqeniSn4whQgEAAAAAAOCwGrQzsKpWJXljkj9orX1pzHXOTHJmkqzYsHHI5gAAAAAAADBhLZVZcwZOjaFzNP97krVJfnfcFVprZ7XWNrXWNi1fv37g5gAAAAAAAMDt12CZgVV1UpIXJnlOktVVtXrey6urakOSm1trM0PFBAAAAAAAYPrMRmbgtBgyM/DUJGuSvCXJ5nmPJHn+6N/3HTAeAAAAAAAAsA9Dzhn4mSSPXGT5OZnrIPzzJJcMGA8AAAAAAIAp0xJzBk6RwToDW2s3JPnwwuVVlSSXt9Zu9RoAAAAAAADQz5CZgQAAAAAAAJDZNuRMdRyK7p2BrckDBQAAAAAAgMNBtywAAAAAAAAsUYYJBQAAAAAAYDitMmvgyKkhMxAAAAAAAACWKJmBHcysaV3rX/uZI7rWnyRb7zrTPUbN9P9fAasvXtM9xu71fff3JKz9xgROBRP4TyBfOO/U7jF2nzTbPcYdL9jetf4jP72ua/1JMjuBQ2rN1f1jrLx5VfcYy3f0rX/15v7nqHX33dw9xuWX3LF7jGXb+/8fqd2r+9a/8VP9P3xHXb67e4xvPLJ7iKy5rv+F6aQ39P38XX961+qTJF99Vv/36Wtfvm/3GMu+vL57jNm13UOkLe97TK28pf/+Xn/Jyu4xjv5q//P51d/V//q6uvN5at2KnV3rT5I7/drnu8c475LTuseY3TWB/0e9fXnX6mtX/8/3URf33YYkmen/FT87P7Oxe4yffer7utb/l19+SNf6k2T3547uH2N9/+/fu4/oH2PZjv6fvxVbO18zvtr/+n3zPXd1j7Hi+gn8WLFjAteMlX2P20n8ZjuJ3wk5/FqSWTt7asgMBAAAAAAAgCVKZiAAAAAAAACDMmfg9JAZCAAAAAAAAEuUzEAAAAAAAAAG0yIzcJrIDAQAAAAAAIAlSmYgAAAAAAAAg5IZOD1kBgIAAAAAAMASNWhmYFWdkeScRV66sbW2YchYAAAAAAAATJ+Wkhk4RXoNE/rLSf513t+7O8UBAAAAAAAA9qJXZ+AXWmuf6FQ3AAAAAAAAU2w2MgOnhTkDAQAAAAAAYInq1Rn41qqaqarrquqvquqkTnEAAAAAAACYJi2ZbTWRB/s39DChNyb530nOTXJTkgcm+Z9Jzq+qB7bWrl64QlWdmeTMJFmxYePAzQEAAAAAAIDbr0E7A1trn07y6XmLzq2q85L8S5JfTvKiRdY5K8lZSbLmxLu2IdsDAAAAAAAAt2dDZwbeSmvtU1X15STf1TsWAAAAAAAAh1dLDOE5RXrNGbgYWX8AAAAAAAAwQd0zA6tqU5J7Jnlb71gAAAAAAAAcfjIDp8egnYFV9dYklyb5VJIbkjwwyQuSfDPJHw0ZCwAAAAAAANi3oTMDL0zyY0l+Kcm6JFcmeUeSF7fWrh04FgAAAAAAAFOmpWQGTpFBOwNba7+f5PeHrBMAAAAAAAA4ON3nDAQAAAAAAOD2pckMnBrLDncDAAAAAAAAgD5kBgIAAAAAADCo2cgMnBYyAwEAAAAAAGCJkhnYwbIdfXu7t9x9V9f6k+Soi1Z2j3HTvXZ3j1Ez+rvHsXt96x/jyO4hktn+Ie7wqf4xrnj4uq71z6zqWn2SZOc9tnWPsfqLa7vHWNF/M3LTPfueC1fcuLxr/Uly88fu3D3GvV99YfcYX3rJ6d1j7H7G9V3r3/LpY7vWnyQ3fW//+5BK/xjbVq7pHmPHb97Ytf7d7+17vUiSNZes7h5j+x3633eu3t3/f6Nu+HL/+6ktd+67Hdvu3P/+fN3X+38FveIR3UNk/eX9j6lbTum7Py6/cWPX+pPk+muO6h7jiC/0v7ndvb57iOxe2/8c0ltN4PvYLXfvf56axP3zGz7zfV3rn72l/7V1zQT29yQs39b/d6Pqf9hmtvPltfofUll1Vf97hEn8/rXi5v7nkBVb+75Xu9dM4JokWex2obVk1pyBU0NPCQAAAAAAACxRMgMBAAAAAAAYVJMZODVkBgIAAAAAAMASJTMQAAAAAACAAZU5A6eIzEAAAAAAAABYonQGAgAAAAAAwBJlmFAAAAAAAAAG1QwTOjW6ZAZW1ROq6ryquqWqbqqqC6rqUT1iAQAAAAAAAIsbPDOwqn42yetGj5dmrsPxAUnWDR0LAAAAAACA6dKSzMoMnBqDZgZW1clJXpPk11tr/6219oHW2vtaa69orb17yFgAAAAAAACwP1V116p6W1XdOBrR8h1VddKY666pqldW1RVVta2qzq+q71uk3LKqekFVXVZV26vqs1X1lAVl7lRVvz8aUfOGqrqmqj64WH1DGnqY0J9OMpvkDQPXCwAAAAAAwG1BS9qEHvtTVeuSfCjJvZL8VJKfSHKPJOdU1foxtubPkzw3yW8leWKSK5K8r6oesKDcS5O8JHMjZz4+ySeSnF1VT5hX5sFJfjTJO5P8SJJnJdme5MNV9cQx2nJQhh4m9OFJvpjk6VX1m0nuluSyJK9urf3xwLEAAAAAAABgX56b5NQk92ytXZIkVfVvSS5O8rNJXrW3Favq/kl+PMlPt9b+YrTs3CQXJfmdJE8aLbtjkucneXlr7Q9Gq59TVacleXmS94yWfTTJd7TWds+L8b5Rff89SZdRNofODLxz5npTX5m5jXtMkg8keV1VPW+xFarqzFE65AUzW7YM3BwAAAAAAAAmbTY1kccYnpTkE3s6ApOktXZpko8lefIY6+5K8rfz1t2d5G+SPLaqVo8WPzbJqiRvWbD+W5Lct6pOGa17w/yOwHn1fSbJXcbZmIMxdGfgsiRHJvnZ1tqfttY+1Fr7+STvTfKCqrrVXmmtndVa29Ra27R8/TjZmAAAAAAAADCW+yS5cJHlFyU5fYx1L22tbV1k3VVJTptXbkeSSxYpl33FqapVSR6a5Av7actBG7oz8LrR8wcWLH9/kuOT3GngeAAAAAAAAEyRlqS1msgjyXF7RqAcPc5c0JxjkmxepJnXJ9m4n03Z17p7Xt/zfENrt5rFcGG5xbwkyYlJXrGfthy0oecMvCjJQ/bx+uzA8QAAAAAAALj9ura1tulwN+JgVNWPJ/mNJC9trX2kV5yhMwP/fvT82AXLH5fkG621KweOBwAAAAAAwFSpzLbJPMawOYtnAO4t62/cdZN/z/zbnGTDItPlLSz3bVX1g0nelOTPW2sv3k87DsnQmYHvSXJOkjdW1XFJvprkR5I8JsmzB44FAAAAAAAA+3JR5ub0W+j0JJ8fY90frqp1C+YNPD3Jzvz7HIEXJVmd5O75j/MG7pkr8D/EqapHJzk7c0l2PzvGNhySQTMDR2Oh/lCSv0ny20neneR7kjyjtfamIWMBAAAAAAAwnVqbzGMM70rykKo6dc+Cqjo5ycNGr+3LPyRZmbnEtz3rrkjyo0ne31rbMVr83iS7kjxjwfrPTHJha+3Sees/NMk7k3wwyTNba92n2Bs6MzCttZuS/NfRAwAAAAAAAA6XP03yi0neWVUvStKSvDTJ15O8cU+hqrpbkq8k+Z3W2u8kSWvt01X1t0leU1Urk1ya5OeTnJJ5HX+ttaur6lVJXlBVNyf5VOY6DB+V5EnzYtwryT8muTbJK5M8eP7Ioq21Twy+9enQGQgAAAAAAMDtWxtvPr/uWmtbqupRSV6d5M1JKnNZeb/SWrtlXtFKsjy3HlXz2Ul+N8nLkmxI8tkkj2utfWpBuRcmuSXJ85KckORLSZ7WWnv3vDIPydwchBszN+3eQl3eNJ2BAAAAAAAALFmtta8lecp+ylyWRTrjWmvbkvzq6LGv9Wcy12H4sn2UeVOSN+2vvUMbdM5AAAAAAAAAYHrIDAQAAAAAAGAwrU3PMKHoDOyi9+F9wjnLO0dItm/oHiJrv97/8Fu2q3uIJWHXCf3fqFXfXNk9xpGXdQ+Rq753pnuMFZv7nkVmjpjtWn+SLLtqdfcY2+/Uf1/Urv43LMtv7ntOn1nTutafJEd/uXuIXPZL39k9xoZ7XNs9xu5/Oq5r/Tvvu7tr/Uly1Lod3WNs27aqe4zq/1blqn85oWv9s8f3/3yf8g/buse4+Gf63xPuvkP/Hb51dl33GDP32tK1/trRf1/sOqr/d5m2ov9nY8vd+t+HLN/adyCfG794bNf6k2TVSX2P2SS55e4TGPCo+h9Ttb3vZ2PVDf3fpxvu0/9cW2v7x5hZ1z/G2ovWdq1/9/r+x2zva1KSzF6/pnuM5bf0/85X/S8Z3ff5mmv7v0+3nNL/t4pJfMdf1fk3nUlY1v92La3zbedtfy/A8HQGAgAAAAAAMKhZmYFTw5yBAAAAAAAAsETJDAQAAAAAAGBQrf/I1YxJZiAAAAAAAAAsUTIDAQAAAAAAGFQzZ+DUkBkIAAAAAAAAS5TMQAAAAAAAAAbTUjIDp8igmYFV9eGqant5vHfIWAAAAAAAAMC+DZ0Z+AtJjlqw7KFJXpXkXQPHAgAAAAAAYAq1w90Avm3QzsDW2ucXLquq5ybZmeRvhowFAAAAAAAA7FvXOQOral2SH0nyD62163vGAgAAAAAAYAq0mDNwigw6Z+AifjjJkUn+snMcAAAAAAAAYIGumYFJfjLJ1Un+aW8FqurMJGcmyYoNGzs3BwAAAAAAgO5MGjg1umUGVtWdk3x/kre21nbvrVxr7azW2qbW2qbl69f3ag4AAAAAAADc7vQcJvSZo/oNEQoAAAAAAACHQc9hQn8qyWdba5/tGAMAAAAAAIAp01od7iYw0iUzsKo2JTk9sgIBAAAAAADgsOmVGfiTSXYneWun+gEAAAAAAJhSrR3uFrDH4JmBVbUyyY8leW9r7eqh6wcAAAAAAADGM3hmYGttV5I7DF0vAAAAAAAA06/FnIHTpMucgQAAAAAAAMDh12vOQAAAAAAAAG6PWhKZgVNDZiAAAAAAAAAsUTIDAQAAAAAAGFRrh7sF7KEzsIPdd9rRtf6r16zqWn+SHHFZ9xBZ+5Bru8e46aJju8dYConOK69e2T3GrqNnu8eYWb28e4wTPtw/xjUP7nuVXPut/tuwe23/K/3q6/snt+84tv9xe9RX+tb/6DP/X98ASY44o+91L0k+8dAN3WPsfNdx3WMsu3Bb1/rv+Mef7lp/klz82u/pHqM27uwe44hv9j+H3OnDN3at/0u/uLZr/Uny1f/aPURO/bP+14xrf7n/eWp2x7ruMXZ+s+8+Xz7b/852ZlX//X3aW3d1j3H9vdf0j/Ggma71H/WF/j8HbNu1vnuMdbf0P253fufW7jFmdva9Lu244+6u9SfJhov6H1NbHtb/fD5zVf/r686Nfc+Fsyv7n2vrG/2ve1nT//tYWz6BX6mX9T9Prbqp7zlk613674sVW/rfny/f3n9fbD++/3uVzodtTWAT0vm+08iUcGs6AwEAAAAAABiWzMCpYc5AAAAAAAAAWKJkBgIAAAAAADCgSjNm69SQGQgAAAAAAABLlMxAAAAAAAAAhmXOwKkhMxAAAAAAAACWKJ2BAAAAAAAAsEQN3hlYVQ+rqvdX1dVVdXNVfaqqfnroOAAAAAAAAEyhlrRWE3mwf4N2BlbV/ZL8c5KVSZ6b5L8k+dckf15VPz9kLAAAAAAAAGDfVgxc39OTLE/yg621W0bLPjDqJPzJJH8ycDwAAAAAAACmTTvcDWCPoYcJXZVkV5JtC5bf2CEWAAAAAAAAsA9Dd9C9afT8R1V156raUFXPTfLoJK8eOBYAAAAAAABTqSb0YH8GHSa0tXZhVZ2R5O+T/MJo8a4kP9da+5vF1qmqM5OcmSQrNmwcsjkAAAAAAABwuzZoZ2BV3SPJ25NclOTnMjdc6JOTvKGqtrfW3rpwndbaWUnOSpI1J97VCLIAAAAAAAC3dXp8psagnYFJfi9zmYBPbK3tGi37YFUdm+QPq+qvW2uzA8cEAAAAAAAAFjH0nIH3TfLZeR2Be/xLkmOT3HHgeAAAAAAAAEybNqEH+zV0Z+CVSR5QVasWLP+eJNuTXD9wPAAAAAAAAGAvhh4m9HVJzk7yD1X1+szNGfikJD+W5NWttZ0DxwMAAAAAAGCatCStDncrGBk0M7C19rYkT0iyOsmfJXl7kocn+a9Jfn3IWAAAAAAAAMC+DZ0ZmNbaPyX5p6HrBQAAAAAA4Lahmc9vagw9ZyAAAAAAAAAwJQbPDAQAAAAAAOB2Tmbg1JAZCAAAAAAAAEuUzkAAAAAAAABYogwT2kHb3bePde1V/ftwb7rPru4xVnz+2O4xahJpyDWBGJ2tvr7/RqzYsrx7jO3HdQ+RNTf0j9H7mNp+7219AySpK9d0j7HrqNnuMZb1PxVmy1361n/cylv6BkiybtnO7jGu+bFHdI+xYuc13WPsev6WrvVv/pkHda0/SdZ9fgLn8+39zyE33XN39xhbjz+qb4CZ/ttwxPnruse4+pdu6B7jJaf/Y/cYL6sndI9x5PuO6Vr/1jv3v3meWd09RK554NruMY78xkz3GHVB3/PtTaf139/V/zQ1kaGnZmcm8KXv6L43niu/2f/Dd/Pd+u+M5V/tf13aeFn3ENl8787v1XE7+tafZPnl/e/XZidwzZhd0/+4XbG1/zlkZlXf7Vg+gW1YtnsC59pJ/E44gc/f7Pa+9wi1tf93vmU7lsAPqoyn2dfTQmYgAAAAAAAALFEyAwEAAAAAABjUREbuYywyAwEAAAAAAGCJkhkIAAAAAADAcFomM1cnY5EZCAAAAAAAAEuUzEAAAAAAAAAGVEmrw90IRmQGAgAAAAAAwBI1eGdgVT2yqj5aVduq6vqqenNVHT90HAAAAAAAAKZUm9CD/Rq0M7CqvjfJ+5PckOQpSZ6X5PuSfLCqVg8ZCwAAAAAAANi3oecMfHGSy5P8UGttd5JU1ReS/GuSn0ny+oHjAQAAAAAAMG1k7U2NoYcJfUiSD+zpCEyS1toFSa5L8sMDxwIAAAAAAAD2YejMwJkkOxdZviPJdw4cCwAAAAAAgGkkM3BqDN0Z+KXMZQd+W1XdLcmdkuxabIWqOjPJmUmyYsPGgZsDAAAAAAAAt19DDxP6h0m+u6peVlV3rKp7JXlzktnR41Zaa2e11ja11jYtX79+4OYAAAAAAAAwUS1Jq8k82K9BOwNba29N8rIkv5bkqiSfT/LNJO9JcsWQsQAAAAAAAIB9GzozMK2130xyXJL7JblTa+3HktwjyUeHjgUAAAAAAADs3dBzBiZJWmtbknwuSarqcUnuleRnesQCAAAAAABgulQ73C1gj0E7A6vqgUken+RTo0UPT/LrSf5Xa+3jQ8YCAAAAAAAA9m3ozMCdSZ6Q5L8nWZ3kC0l+rrX2FwPHAQAAAAAAYFrJDJwag3YGttYuylw2IAAAAAAAAHCYLTvcDQAAAAAAAAD60BkIAAAAAAAAS9TQcwYCAAAAAABwO1fmDJwaOgM7WHXFyq71bz1lV9f6k+SIi/tuQ5LMrOoeInnATd1D7L7kyO4xepv9nhu7x1j14aO7x9j6sFu6x7jq2HXdY7Qjd3et/2F3v7Rr/Uny2SPu3D3G7s9u6B5jxx1museYObpvjLf85Q90rT9Jjr60//u068TqHmPzl4/rHuO7vuvLXev/Sv+3KXd6/Le6x/ji+ad0j5Gbl/ePcdK2rtVv/MjarvUnyQkfurp7jOtuvEP3GM+/6undY2z8TP9javuxfetfvq3/SWQSMR7+E5/sHuP9l9yre4xdW/p+J1t91I6u9SdJu/iI7jF2buz/C9Pai/qfb3du6Lsdy/r/jJBdG/vfE07i+n3dgyewHb3d0v83nd3r+3/2lm/tP6DZ7Or+2zGJ37+W9f0ZIbtO6n/NWPn11d1jrLqh/33I9i39f26v3X0/GzU7gS+Wy/QQwaTpDAQAAAAAAGBYbQKdy4zFnIEAAAAAAACwRMkMBAAAAAAAYDht9GAqyAwEAAAAAACAJUpmIAAAAAAAAMOSGTg1ZAYCAAAAAADAEiUzEAAAAAAAgEGVzMCpMVZmYFWdWFWvrarzq2prVbWqOnmRcmuq6pVVdUVVbRuV/77BWw0AAAAAAADs17jDhJ6W5GlJNif5yD7K/XmS5yb5rSRPTHJFkvdV1QMOoY0AAAAAAADclrQJPdivcYcJPa+1dnySVNVzkjxmYYGqun+SH0/y0621vxgtOzfJRUl+J8mTBmkxAAAAAAAAMJaxMgNba7NjFHtSkl1J/nbeeruT/E2Sx1bV6oNqIQAAAAAAABykqrprVb2tqm6sqpuq6h1VddKY6441RV5VLauqF1TVZVW1vao+W1VP2Uudz62qL1bVjqr6UlX93KFu476MO0zoOO6T5NLW2tYFyy9KsipzQ40CAAAAAACw1E3JMKFVtS7Jh5LcK8lPJfmJJPdIck5VrR9jS8adIu+lSV6S5HVJHp/kE0nOrqonLGjPc5O8McnbkzwuydlJXl9VPz9GWw7KuMOEjuOYzM0puND1816/lao6M8mZSbJiw8YBmwMAAAAAAMDt3HOTnJrknq21S5Kkqv4tycVJfjbJq/a24rhT5FXVHZM8P8nLW2t/MFr9nKo6LcnLk7xnVG5Fkt9N8ubW2gvnlbtzkpdW1Z+11nYNtuUjQ2YGHpTW2lmttU2ttU3L14/TAQsAAAAAAMC0qja5xxielOQTezoCk6S1dmmSjyV58hjrjjNF3mMzN0rmWxas/5Yk962qU0Z/PzTJHRYp9+YkxyZ5+FhbdICG7AzcnGSx1L49GYHXL/IaAAAAAAAA9HKfJBcusvyiJKePse44U+TdJ8mOJJcsUi7z4txn9LywPQvLDWrIzsCLkpwyGnt1vtOT7Myt3wAAAAAAAACWolaTeSTHVdUF8x5nLmjJvqa529/8deNOkXdMkhtaawtzFRcrl0Xq3OeUe4dqyM7Af0iyMsmP7FkwGvv0R5O8v7W2Y8BYAAAAAAAAcO2e6ehGj7MOd4OmzYpxC1bVU0f/fPDo+fFVdU2Sa1pr57bWPl1Vf5vkNVW1MsmlSX4+ySlJnjFkowEAAAAAAJhi483nNwn7muZusay/hevebS/rJv+e0bc5yYaqqgXZgYuVy6g9V+yj3KDG7gxMcvaCv18/ej43yRmjfz87ye8meVmSDUk+m+RxrbVPHXwTAQAAAAAA4KBclH+fq2++05N8fox1f7iq1i2YN3DhFHkXJVmd5O75j9Pm7ZkD8PPzymXUniv2UW5QYw8T2lqrvTzOmFdmW2vtV1trJ7TW1rTWvqe19uEeDQcAAAAAAGA6VZvMYwzvSvKQqjr1222rOjnJw0av7cu4U+S9N8mu3HqkzGcmubC1duno7/OTXLuXctcn+dhYW3SADiQzEAAAAAAAAG5L/jTJLyZ5Z1W9KHMDmL40ydeTvHFPoaq6W5KvJPmd1trvJMm4U+S11q6uqlcleUFV3ZzkU5nrMHxUkifNK7erqn4zyeur6ptJ/nlU5qeT/FJrbWePN0BnIAAAAAAAAMOakjkDW2tbqupRSV6d5M1JKskHk/xKa+2WeUUryfLcelTNcafIe2GSW5I8L8kJSb6U5GmttXcvaM8bqqol+bUkv57ka0l+sbX2+nSiMxAAAAAAAIAlq7X2tSRP2U+ZyzLXIbhw+bYkvzp67Gv9mcx1GL5sjPa8MfOyEnvTGQgAAAAAAMBwxp/PjwnQGdhBzdyq43hQq67qv9u2HzuBT+kEQuy+dl33GCu7R+hvx6VHdo9xhx/8VvcY28+/c/cYx17W/8A945c+2bX+v//Q93StP0nawkT6Ho6a7R6idvU9nyfJuq/3PYusv7L/+3TPX7+oe4yv/Y97dI/x8z/7/u4xXvneH+xa/8n/uKtr/Uly1V2O7R5j1/f13471F6/qHuOYj/aNceXTt3atP0nytP4xdp7d/1x7zKeXd4+xYgK745a79b0P2b2u/33O8h399/f7P/ig7jGOOP367jEefOolXes/57z7da0/SY78Zv/9PbO2e4isurH/Z2PZzr7v1bbjJ7ANW/t/CVi+o3uIzKzvH2P5lr7XpWP6355n9wQ+ezXTP8aOY/oft7uOvO3/xrb2i2v6BkjSJvAr9Y5j+u+LVVf335AV2/peM2ZWd60+SdKW9d0X1f+nELjN0RkIAAAAAADAsGQGTo1J5G4AAAAAAAAAh4HOQAAAAAAAAFiiDBMKAAAAAADAsAwTOjVkBgIAAAAAAMASJTMQAAAAAACAQZXMwKkhMxAAAAAAAACWqLE6A6vqxKp6bVWdX1Vbq6pV1cmLlPu9qnp/VV03KvOsoRsMAAAAAAAAjGfczMDTkjwtyeYkH9lHuV9KsjbJuw+xXQAAAAAAAMAhGnfOwPNaa8cnSVU9J8lj9lLu6NbabFWdluQnh2ggAAAAAAAAtzHmDJwaY2UGttZmhywHAAAAAAAA9DduZiAAAAAAAADsX0tKZuDUGHfOwG6q6syquqCqLpjZsuVwNwcAAAAAAACWjMPeGdhaO6u1tqm1tmn5+vWHuzkAAAAAAAAcqjahB/t12DsDAQAAAAAAgD7MGQgAAAAAAMCwZO1NDZmBAAAAAAAAsESNnRlYVU8d/fPBo+fHV9U1Sa5prZ07KvOIJHdIcsKozKaquiVJWmtvG6bJAAAAAAAATKtKUjIDp8aBDBN69oK/Xz96PjfJGaN//3aSR8wr819Hj2Ru3wMAAAAAAAATMnZnYGttv515rbUzDqk1AAAAAAAAwGAOJDMQAAAAAAAA9s8woVNj2eFuAAAAAAAAANCHzEAAAAAAAACG05KSGTg1ZAYCAAAAAADAEiUzsINdR852rf+Ir/fvw92yaVv3GDPbl3ePsfarq7rH2H3kEvjvDXfa0T3Ejjed0D3Gzsf0344tu1Z3j/H3H3xI1/o3fqFr9UmSHRure4zda7qHyLaTdvWP0fmjUTP9rxnnfPI+3WOs+d7+14w//osnd4+xuvNl6dGv/mjfAEnOfsOju8c47uMru8fY/Oj+9zo37F7btf47vq3/ufZbdz65e4wbH9D/XDsJtb3/+bat7HvfueKm/ufa3cfs7h5jEv/9uP7xmO4xPn7MsV3rP/o/Xdu1/iTZevNx3WMsm8Ahdd13T+K47Vv9iuv6//wze8RM9xi7jusfY/Wl/b9o7Dip73fXax/Z/zzYJvCbzvIj+n/2Zm7uf9+58vr+71Xr/D1jx5r+x9TRF3cPkRu/o/92LL/r1u4xtl25rmv9k7i29p5HrkmBmh5L4KfzpcLHAgAAAAAAAJYomYEAAAAAAAAMS2bg1JAZCAAAAAAAAEuUzEAAAAAAAAAGNYHpvBmTzEAAAAAAAABYomQGAgAAAAAAMCyZgVNDZiAAAAAAAAAsUWN1BlbViVX12qo6v6q2VlWrqpMXlNlUVWdV1RdHZb5WVW+tqlO6tBwAAAAAAIDp0yb4YL/GzQw8LcnTkmxO8pG9lHl6kvsk+aMkj0/yG0kelOSCqrrrIbYTAAAAAAAAOEDjzhl4Xmvt+CSpquckecwiZV7RWrtm/oKq+liSS5M8N8lvHUpDAQAAAAAAuG0oWXtTY6zMwNba7Bhlrllk2eVJrklylwNvGgAAAAAAAHAoxh0m9KBU1b2T3DHJF3rGAQAAAAAAAG5t3GFCD1hVrUjyhsxlBv75PsqdmeTMJFmxYWOv5gAAAAAAADAphgmdGj0zA1+X5D8leWZrbfPeCrXWzmqtbWqtbVq+fn3H5gAAAAAAAMDtS5fMwKp6eeay/X6qtfb+HjEAAAAAAACYTiUzcGoM3hlYVS9M8j+S/FJr7c1D1w8AAAAAAACMZ9DOwKr65SQvS/LC1trrhqwbAAAAAACA2wiZgVNj7M7Aqnrq6J8PHj0/vqquSXJNa+3cqnp6ktckeW+SD1XVQ+atflNr7fNDNBgAAAAAAAAYz4FkBp694O/Xj57PTXJGksclqdHz4xaU3VMGAAAAAACApaxFZuAUGbszsLVW+3n9WUmedYjtAQAAAAAAAAYy6JyBAAAAAAAA3L7V6MF0WHa4GwAAAAAAAAD0ITMQAAAAAACAYZkzcGrIDAQAAAAAAIAlSmZgB6s39+1jvem+O7vWnyTLrlrdPcaqrf1HDF53Vf//enDTkd1DdDd73aruMbYe3///Hqy5uP9xu+2uu7vHuNM5fd+rax7U/7N3xNe7h8iu9f1jLNu2vHuM5Z3Phff4wYu71p8kX/jQPbrHOPors91j7Di6/2djVefN+D+f+099AyRZe0T3EMmy/vti+aVruse45f7b+wZo/beh+n/0csTFK7vHWH9F/w254Tv6H7drr+4bY/sxXatPksyu7H9tbSv7fwdY1v8rWdZf0Xc7lv1N/x2+8oTuIdImMBHN8hsn8NPJsr77e3bVBD4XW/t/vldf3v+a0Sawu1d9o+938F0b+1/3VnX+7StJZm7pf0wtm0CaxCSOqd7XpUkcUzef2n9nLNvV/6Kx68p13WMs29W3/jaBz0XNmknu9qJkBk4NmYEAAAAAAACwRMkMBAAAAAAAYFgyA6eGzEAAAAAAAABYonQGAgAAAAAAwBJlmFAAAAAAAACGZZjQqSEzEAAAAAAAAJYomYEAAAAAAAAMpyUlM3BqjJUZWFUnVtVrq+r8qtpaVa2qTl5Q5m5V9c6quryqtlXVtVV1blU9oUvLAQAAAAAAgH0ad5jQ05I8LcnmJB/ZS5kjklyb5EVJnpDkZ5LcnOQfq+q/HGI7AQAAAAAAuK1oE3qwX+MOE3pea+34JKmq5yR5zMICrbWLMtcB+G1V9Y9JLk3y7CTvOLSmAgAAAAAAAAdirM7A1trswVTeWttdVTcm2X0w6wMAAAAAAHDbY87A6TFuZuDYqmpZ5oYfPS7JmUm+I8nzho4DAAAAAAAA7NvgnYFJ/leSXxv9+5YkT2+tfXBvhavqzMx1GmbFho0dmgMAAAAAAMBEyQycGss61PmaJN+V5AeT/FOSv6qqJ+6tcGvtrNbaptbapuXr13doDgAAAAAAANw+DZ4Z2Fr7RpJvjP58d1V9OMkfJHn30LEAAAAAAACYPuYMnB49MgMXuiDJaROIAwAAAAAAAMzTY87Ab6uqZUkenuQrPeMAAAAAAAAwJVrMGThFxu4MrKqnjv754NHz46vqmiTXtNbOraqXJDkmyceSXJnkhCQ/k+S7k/z4YC0GAAAAAAAAxnIgmYFnL/j79aPnc5OckeRTSX4lydOTHJ25DsHPJvne1trHDqmVAAAAAAAA3HbIDJwaY3cGttZqP6+/K8m7DrlFAAAAAAAAwCCWHe4GAAAAAAAAAH0cyDChAAAAAAAAsE+VpAwTOjVkBgIAAAAAAMASJTMQAAAAAACAYckMnBo6AztYeXPf+nesnOkbIMkRX1/VPcau9d1D5IZ79j/bLOu/O7pbeXP/JOHtx/TfF2uuq+4xdt7S/726elPf7ZhEev6WE/sHucOn+se4+cT++/vme+7qWv9F553Wtf4kOemcbd1j1M7Z7jGu/aF13WNsuM91Xetfd85xXetPkp3f0/lGJ8m2r/e/SVh5U//P9+qvrula/8332N21/iRZvrX/+1S7+1+/7/TR/uepLXfuf9zuPKpv/TWB+9pVE/jsbb9j/8/GzY+/pXuMdR86omv9s0f13xe713YPkTXX9b8nXHF5//PUzXfrHGDlBL4E9L9dm8j4VrMrJvA7Qudr35orl3etP0naBPbFshsn8B1/wwQO3En8EN75rVq2vf++aCf1v19b9bn+3/lqAofUrrvs7Fp/29r/HLJsuwELYdJ0BgIAAAAAADCoalIDp4UueAAAAAAAAFiiZAYCAAAAAAAwnBZzBk4RmYEAAAAAAACwRMkMBAAAAAAAYFAlM3BqyAwEAAAAAACAJUpmIAAAAAAAAMOSGTg1xsoMrKoTq+q1VXV+VW2tqlZVJ+9nnd8YlfvoIC0FAAAAAAAADsi4w4SeluRpSTYn+cj+ClfVqUlelOTqg28aAAAAAAAAt0XVJvNg/8btDDyvtXZ8a+0JSc4eo/yfJHlrki8cdMsAAAAAAACAQzJWZ2BrbXbcCqvqx5M8KMkLDrZRAAAAAAAA3Ia1CT06qaplVfWCqrqsqrZX1Wer6ikHsP4PVdWnR+teXlUvqqrli5R7eFV9vKq2VdWVVfWqqlq7oMxzq+o9VfXNqtpSVRdW1a9X1apx2jJuZuBYqmpjklcn+e+tteuHrBsAAAAAAAAm5KVJXpLkdUken+QTSc6uqifsb8WqemyStyf519G6f5i56fV+b0G5+yX5QOam3XviqMyzk7xpQZW/leTKJM8blfvbUfveOs6GrBin0AF4ZZIv59aN3KuqOjPJmUmyYsPGgZsDAAAAAAAA46uqOyZ5fpKXt9b+YLT4nKo6LcnLk7xnP1W8PMlHW2tnzlv3iCQvqqpXt9auHC3/7STfSPIjrbVdo9g7k/xlVb2itfapUbkHtdaumVf/OVVVSX67qk5trX11X40ZLDOwqr43yU8m+fnW2tiJma21s1prm1prm5avXz9UcwAAAAAAADgcWlITenTy2CSrkrxlwfK3JLlvVZ2ytxWr6q5JHrDIum9OsjJzmYKpqpVJHpfk7/Z0BI78XZKdSZ68Z8GCjsA9/nX0fJf9bMugw4S+McmfJ/lGVW2oqg2ZyzxcPvp79YCxAAAAAAAAoIf7JNmR5JIFyy8aPZ++n3WT5ML5C1trlybZOm/duydZs0i57Um+sp8YSfKIJLOZG7Fzn4YcJvTeo8fPLfLa5iT/LclrBowHAAAAAADANOqXtbfQcVV1wby/z2qtnXWIdR6T5IZFRsK8ft7r+1o3mesbW2jzvNf3Ve76fcUYzTX4vCT/p7V21T7akmTYzsBHLrLsNUmWJ/ml3Lr3FAAAAAAAAA7Fta21TfsqUFXfn+QDY9R1bmvtjEFa1UlV3SnJOzOXPfir46wzdmdgVT119M8Hj54fX1XXJLmmtXZua+3Di6xzQ5IVi70GAAAAAADA0lPpOp/fwfh45ka33J+to+fNSTZUVS3IDtyTrXd99m5Ppt/GRV7bOG/dfZU7Jv8+JOm3VdWxmevUrCSPba3dvI92fNuBZAaeveDv14+ez01yxgHUAwAAAAAAABPRWtua5IsHsMpFSVZnbl6/+SNf7pnH7/P7WTeZmzvw/D0Lq+rkJOvmrfuVzM1LeJ/5K1fVmiSnZkG/XFUdleR9SY5N8r2ttW+OuzHLxi3YWqu9PM7YxzpntNYePm4MAAAAAAAAloDWJvPo471JdiV5xoLlz0xyYWvt0r1vdvtaks/uZd1dSf5pVG7nKM7Tqmp+8t5TM9cR+a49C6pqXZJ/THJKkse01g5oar4h5wwEAAAAAACA27TW2tVV9aokL6iqm5N8KsmPJnlUkifNL1tVH0xyt9baafMW/88k766qNyb56yQPTPKiJH/YWrtyXrmXJPlEkr+rqj9OcnKSVyZ5W2vtk/PKvT3Jw5I8L8n6qnrIvNe+0lq7Zl/bozMQAAAAAACAQU3ZnIEH44VJbslcB9wJSb6U5GmttXcvKLc8C/rbWmvvqaqnJnlxkmcluSrJ7yX53QXlPlNVj0nyisxl/t2Y5P9mrjNxvseNnv9okXY+O8mb9rUhOgMBAAAAAABgntbaTJKXjR77KnfGXpa/I8k7xohzXpKH7qdM7a+efdEZCAAAAAAAwHDa6MFU0BnYwc6j+tZ/l7et7BsgyY6jJvEpPaSO7LGs2NY/xvY73PbPaBu/0H8bth6/rHuMnQ+7uXuMJ5zyxe4x3n3h/brWv/Ibq7rWnyQ7j9/dPcYVD+9/TK28uf9nY+03+p7T7/auzV3rT5Ktdz2ye4ya6b8vjrmoe4jc74xvdq3/Ds/sf476m89+V/cYy3dP4Pp98o7uMY7+9Oqu9a++vv+t/K4juofI9uNmu8f46i/1P6ZWf657iNSmG7vWv31L/3uEZd9a0z3G6muXd4+x9vP9r33Ld/S99m25a/9r664N/T/f2+7aP0atnekeY9WlfT8bdWj/kXwsK27pHiK33HNX9xgnvav/e3XN/ftew2f7/2yUlVv6x9jd/5KRNddO4LeKSfzG1vlUOLO+/7l25eVru8fYdmL/8/mKm/ofU0d8ru/3jEl89mZX9/1cVP9DFm5zdAYCAAAAAAAwKB2z06P/f1UAAAAAAAAADguZgQAAAAAAAAzrtj/D1pIhMxAAAAAAAACWKJ2BAAAAAAAAsEQZJhQAAAAAAIBBlWFCp8ZYmYFVdWJVvbaqzq+qrVXVqurkRcq1vTweMHTDAQAAAAAAgH0bNzPwtCRPS/LJJB9J8ph9lH1TkjcuWPblA24ZAAAAAAAAtz0tSZMaOC3G7Qw8r7V2fJJU1XOy787Ab7bWPnHILQMAAAAAAAAOyVidga212d4NAQAAAAAAYGkwZ+D0GGvOwAP081W1YzS34Ieq6ns7xAAAAAAAAAD2Y+jOwLck+YUk35/kzCTHJvlQVZ2xtxWq6syquqCqLpjZsmXg5gAAAAAAADBxbUIP9mvcOQPH0lr7iXl/fqSq3pnkwiQvS/LwvaxzVpKzkmTNiXe12wAAAAAAAGAgg3YGLtRau7mq/jHJz/SMAwAAAAAAwHSomDNwmvSYM3AxdjkAAAAAAABMWNfMwKo6KskTk/xLzzgAAAAAAABMidbmHkyFsTsDq+qpo38+ePT8+Kq6Jsk1rbVzq+r5Se6Z5Jwk30pytyTPT3JCkmcM12QAAAAAAABgHAeSGXj2gr9fP3o+N8kZSb6U5IdHj6OT3JTkY0l+prUmMxAAAAAAAOB2wpyB02PszsDWWu3n9X9I8g+H3CIAAAAAAABgEF3nDAQAAAAAAOB2SGbg1Fh2uBsAAAAAAAAA9KEzEAAAAAAAAJYow4QCAAAAAAAwqDJM6NSQGQgAAAAAAABLlMzADlZv7lv/Nffvv9t2bpjtHmPFlu4hsmvjBLbjltt+n/qu9dU9Rnv4Dd1jfN9dLu0e4wPv/q7uMdZv7Vv/tjv0/y85x328/3nqugf0346a6R4iy7f3rf/yJ23sGyDJ53/+9d1j3PusX+ge44iv9T+mPvmm+3Wtvz2u801IkpXfWNU9xiQsX9X/HmHXEX3r33Z8/2242+lXdI9x2cXHd4+x7Oo13WNsu3fnE3qSdvX6rvUv29b/vnZ2Vf9z7RFf678dN2za0T1GdvbdjiO/vLJr/UnSTu3/uZiEmSvXdo+x+4jO5/QJ/K/8ZTv6f/ZqZf9r37ce1v9eZ821fetvE/iZYmYCt4Rtef8YO47qf0wt39b/N5eZzrc66y/r/x1/xzETOFHt7r8vJnG+ne28O3av778RrfM2TOI8yBhaklmpgdPCxwIAAAAAAACWKJmBAAAAAAAADEti4NSQGQgAAAAAAABLlMxAAAAAAAAABlUyA6eGzEAAAAAAAABYomQGAgAAAAAAMKwmNXBajJUZWFUnVtVrq+r8qtpaVa2qTt5L2XtX1dlVdW1VbauqL1XV8wZtNQAAAAAAALBf42YGnpbkaUk+meQjSR6zWKGq2pTkQ0k+nOQ5SW5Mco8kRxxqQwEAAAAAALhtMGfg9Bi3M/C81trxSVJVz8kinYFVtSzJ/03ywdbaD8976ZxDbiUAAAAAAABwwMbqDGytzY5R7Iwk907ys4fSIAAAAAAAAG7D2ujBVBhrzsAxPXz0vKaqPlFVu6rq6qr6o6paO2AcAAAAAAAAYAzjDhM6jjuPnv82yeuS/EaSTUl+J8ldk/zwYitV1ZlJzkySFRs2DtgcAAAAAAAAJq2SVJMaOC2G7Azck2X4ltbab43+/eGqWp7k5VV179baFxau1Fo7K8lZSbLmxLs6MgAAAAAAAGAgQw4Tet3o+QMLlr9/9PzAAWMBAAAAAAAA+zFkZuBF+3l9dsBYAAAAAAAATCu9QlNjyMzAf0qyI8ljFyx/3Oj5ggFjAQAAAAAAAPsxdmZgVT119M8Hj54fX1XXJLmmtXZua+26qvr9JL9ZVTcl+VCSTUl+K8lfttYuGbLhAAAAAAAATKdq7XA3gZEDGSb07AV/v370fG6SM0b//p0kNyf5hSTPT3JFklcmeenBNxEAAAAAAAA4GGN3BrbWaowyLcmrRg8AAAAAAABub9rowVQYcs5AAAAAAAAAYIocyDChAAAAAAAAsB8tMWfg1JAZCAAAAAAAAEuUzEAAAAAAAAAGVRIDp4bOwA7u9fQvdq3/i39zr671J8mO4/p/SnedtLN7jLZjefcYSyHB9uZHbuke425H3dw9xkf/vwd2j7HjXtu7x1j16TVd6589fkfX+pPkujtU9xhrLl7dPcbs6v7nwpnvualr/RuP2Nq1/iS5+4ee3T1GHTnbPca1m/rHOOUdM13r/+aJG7vWnyQ7T9jVPUZm+p9Dsr3/PcK2e/a9ZrRd/e9Bdr3xhO4x7ri2/3as+8lvdY9x1Xl36R5jdmXf69KO4/qeo5Jk9bUT+Owd3//6vfaS/vchM2v6bsfsBH4N2LW5//u0fGv/c0ibwD1h72vf8q39r63b79j/XmrZtSu7x9h9dP9z4bIr+34A13+z/zG77fj+x9Ty/l+/05b3345lu/vHqG199/nWO/X/fGdF/+N23df734fU7u4huv8UuWJb/2N299q++1sHFNyazkAAAAAAAACGZc7AqXHbT2kCAAAAAAAAFiUzEAAAAAAAgOG0pCYwyjDjkRkIAAAAAAAAS5TMQAAAAAAAAIZlzsCpITMQAAAAAAAAliiZgQAAAAAAAAxLYuDUGCszsKpOrKrXVtX5VbW1qlpVnbygzEtGyxd7bO/SegAAAAAAAGCvxs0MPC3J05J8MslHkjxmkTJ/luS9C5atHy1718E2EAAAAAAAADg443YGntdaOz5Jquo5WaQzsLX2jSTfmL+sqn5iFOMvD7GdAAAAAAAA3EZUM07otBhrmNDW2uxB1v9TSa5K8r6DXB8AAAAAAAA4SONmBh6wqrprkkcmeU1rbXevOAAAAAAAAEwZmYFTY6zMwIP0zFH9+xwitKrOrKoLquqCmS1bOjYHAAAAAAAAbl+6ZQYm+ckkn26t/du+CrXWzkpyVpKsOfGuuokBAAAAAABuy1qSg52AjsF1yQysqu9Ocq/sJysQAAAAAAAA6KdXZuBPJdmV5K861Q8AAAAAAMAUqrSUOQOnxuCZgVW1KsnTk/xTa+2aoesHAAAAAAAAxjN2ZmBVPXX0zwePnh9fVdckuaa1du68ok9MckwMEQoAAAAAAHD7JDNwahzIMKFnL/j79aPnc5OcMW/5TyW5Psm7D75ZAAAAAAAAwKEauzOwtVZjlnvywTcHAAAAAACA2zyZgVNj8DkDAQAAAAAAgOlwIMOEAgAAAAAAwL61JLOHuxHsITMQAAAAAAAAliiZgQAAAAAAAAyqzBk4NXQGdvCVP79n1/p33rlr9UmSNVct7x6jrugfYxJ2HX3bP6Gt+dcjuse4/A7ru8c4YnP/fXHEP6/uHqOefnXX+nd/5I5d60+S7XfoPwbAtlN2do+xfPPK7jE2vPfIrvV/67vXdq0/SR7/wM91j3HOxQ/qHuN+3/WV7jE+M3tq1/rXfqu61p8kO/uHSO3uP3jFkV/sfx8y8703dq1/yw39P99HXHpL9xiX/+eju8e4/pP9b6B332V39xi1ZqZr/Wsu63+fs2Nj/3uEE+91VfcY3/rc8d1jLNvd94S79S59j6ckWXbUru4x1lze/1y4c0P/7xmt8/V1tv9tbWaP6H9MZVn/fbHm8lXdY9x8St9zYR2zo2v9SZJr+18zVm1eGgOa7V7b/7idXdU3xurr+++LNoHdvf3Y/vtiWf9LX5Zv73zRWBofPWABH20AAAAAAABYomQGAgAAAAAAMCzDhE4NmYEAAAAAAACwRMkMBAAAAAAAYEBNZuAUkRkIAAAAAAAAS5TMQAAAAAAAAIbTIjNwisgMBAAAAAAAgCVqrM7Aqjqxql5bVedX1daqalV18iLlTqqqv6yqr1XVtqr6clW9rKrWD95yAAAAAAAAptPshB7s17jDhJ6W5GlJPpnkI0kes7DAqMPvn5OsTPKbSb6W5LuS/HaSeyT50QHaCwAAAAAAAIxp3M7A81prxydJVT0ni3QGJnlY5jr9Httae/9o2TlVdUyS51fVutba1kNuMQAAAAAAAFOtzBk4NcYaJrS1Nk6i5arR800Llt8wilPjNwsAAAAAAAAOj6paVlUvqKrLqmp7VX22qp5yAOv/UFV9erTu5VX1oqpavki5h1fVx0fT711ZVa+qqrX7qHdlVX1uNKXfc8Zpy1idgWP65yQXJ3lFVZ1eVUdU1aOSPC/JG1prWwaMBQAAAAAAwLRqbTKPfl6a5CVJXpfk8Uk+keTsqnrC/lasqscmeXuSfx2t+4dJXpTk9xaUu1+SDyS5OskTR2WeneRN+6j++UmOO5ANGXeY0P1qrW2vqodnbuMumvfSnyX5xb2tV1VnJjkzSVZs2DhUcwAAAAAAAOCAVdUdM9fp9vLW2h+MFp9TVacleXmS9+ynipcn+Whr7cx56x6R5EVV9erW2pWj5b+d5BtJfqS1tmsUe2eSv6yqV7TWPrWgXadmrsPwuUneOu72DJYZWFVrkvxtkjsm+Ykkj0jy60l+NMkf72291tpZrbVNrbVNy9evH6o5AAAAAAAAHA4tyWybzKOPx2Zuery3LFj+liT3rapT9rZiVd01yQMWWffNSVZmLlMwVbUyyeOS/N2ejsCRv0uyM8mTF6n+T5L8TZKPj7shyYCZgUl+JskZSU5rrX1ltOy8qroxyVlV9YbW2mcHjAcAAAAAAABDu0+SHUkuWbB8z8iYpye5dB/rJsmF8xe21i6tqq2jdZPk7knWLFJue1V9ZV65JElVPSPJpiTPSHLE2FuSYecMvG+SzfM6Avf4l9HzvQeMBQAAAAAAwFSa0HyBc3MGHldVF8x7nLm/1o3hmCQ3tHarSQmvn/f6vtZNks2LvLZ53uv7Knf9/BhVtTHJq5L8j9batfuIvaghMwOvTLKxqk5rrc3vKf2e0fM3B4wFAAAAAAAA17bWNu2rQFV9f5IPjFHXua21MwZp1bBemeQrSf78YFYeuzOwqp46+ueDR8+Pr6prklzTWjs3yZuS/GqS91TV7yb5WubSFX8zySeTfOxgGggAAAAAAACH4OMZbwTLraPnzUk2VFUtyA7ck613ffZuT6bfxkVe2zhv3X2VOyajIUmr6nuSPDvJo5IcXVVJctSo3Nqq2pDkxkWyGL/tQDIDz17w9+tHz+cmOaO1dllVPSTJS5K8LMlxSb6e5Kwkv9tamz2AWAAAAAAAANxW7b1vauJaa1uTfPEAVrkoyerMzes3fzTMPfP4fX4/6yZzcweev2dhVZ2cZN28db+SuXkJ7zN/5apak+TU/Hu/3L0zN+3fhxeJ9Uejx8YkN+ytQWN3BrbWaowyn0/ytHHrBAAAAAAAgCnz3iS7kjwjyW/PW/7MJBe21i7d24qtta9V1WdH6/7ZgnV3JfmnUbmdVfXeJE+rqpe01naPyj01cx2R75rXlkcuCHNCkr9O8gdJ/jHJLfvamCHnDAQAAAAAAICpygw8UK21q6vqVUleUFU3J/lUkh/N3FCdT5pftqo+mORurbXT5i3+n0neXVVvzFyn3QOTvCjJH7bWrpxX7iVJPpHk76rqj5OcnLn5Ad/WWvvkqC1XJpm/zp4swyT5Umvtw/vbHp2BAAAAAAAA8B+9MHMZd8/LXCbel5I8rbX27gXllmdBf1tr7T1V9dQkL07yrCRXJfm9JL+7oNxnquoxSV6RuQy/G5P838x1Jg5GZyAAAAAAAADDaUlmb7uZgUnSWptJ8rLRY1/lztjL8nckeccYcc5L8tADbNtlSfY7vd8eyw6kcgAAAAAAAOC2Q2ZgBzc9dkvX+ttX1netP0l2HzHbPUaO3tU9xPqL1nSPsRTsnsDbtPaqsf+TwkHb/uibuse4+ZIju8eY+cqxXeu/64W791/oEH39R/rHyK7+/59l5qj+23HDPfteitd+bWXX+pPkvTvu3z3Gyefv7B7j4q336B4j37m9a/Xrv9n/c7Hxy8u7x7ju9P4xbjmp/73OER85umv9y79ra9f6k+Trgw5Ksrhj/rb/vlh100z3GJvv0f98e8vJfT/js/fe53zzg2ib+994XvHpE7rHOP2hl3aP8bW3n9q1/jv/8+au9SfJV368731tksz2/+hl9eb+32W2Hd/3XDg7gV9/alv/+5C2qn92wYr+l9fc5by+97Zf/en+x+zKrf1jtP63hJlZM4GMlf5vVXcza/u/T7uP7H9PeNQX+x9UW+88gWOq8+m2+v9km6yaQAymQEvaBPoZGIvMQAAAAAAAAFiiZAYCAAAAAAAwrHbbnjNwKZEZCAAAAAAAAEuUzEAAAAAAAACG05LMygycFjIDAQAAAAAAYImSGQgAAAAAAMCwzBk4NWQGAgAAAAAAwBI1VmdgVZ1YVa+tqvOramtVtao6eZFyp1TV26rqhqraUlXnVNWmwVsNAAAAAADA9GptMg/2a9zMwNOSPC3J5iQfWaxAVR2b5KNJvjPJzyZ5+uilc6rq3ofYTgAAAAAAAOAAjTtn4HmtteOTpKqek+Qxi5T5+STHJ/m+1tpXRmU/lOSrSX47c52JAAAAAAAAwISM1RnYWpsdo9hDkly8pyNwtN6WqvpIkidW1YrW2u6DbCcAAAAAAAC3CYbwnCbjDhM6jpkkOxdZviPJ2iR3HzAWAAAAAAAAsB/jDhM6ji8l+YGqOra1dl2SVNWyJN89ev2YxVaqqjOTnJkkKzZsHLA5AAAAAAAATFxLMjvOoJNMwpCZgW8Y1fd/q+ruVXWnJH+U5JTR64vu9dbaWa21Ta21TcvXrx+wOQAAAAAAAHD7NlhnYGvtq0mekeTBSS5J8q0kD03y6lGRK4aKBQAAAAAAwBRrbTIP9mvIzMC01t6e5C5JTk9yWmvtwUmOSPL11trXhowFAAAAAAAA7NuQcwYmSVprM0m+kCRVdeckP5rklUPHAQAAAAAAYErJ2psaY3cGVtVTR/988Oj58VV1TZJrWmvnVtXKJP8ryblJbkpynyQvSHJRkv89XJMBAAAAAACAcRxIZuDZC/5+/ej53CRnJGlJ7pHkx5NsSPKNJP8nye+11nYeUisBAAAAAAC4jWjJrMzAaTF2Z2Brrfbz+u4kTzzkFgEAAAAAAACDGHzOQAAAAAAAAG7HWtLa7OFuBSPLDncDAAAAAAAAgD5kBgIAAAAAADAscwZODZmBAAAAAAAAsERVa9PTM1tV1yS5/ABWOS7JtZ2aI8Z01S/GdMVYCtsgxvTUL8Z0xVgK2yDG9NQvxnTFWArbIMb01C/GdMVYCtsgxvTUL8Z0xVgK2yDG9NQvxnTFWArbMK0x7tZau0OvxjCeo1fcoT30yCdPJNb7bvjzT7bWNk0k2G3UVA0TeqAf0Kq6oPcOFmM66hdjumIshW0QY3rqF2O6YiyFbRBjeuoXY7piLIVtEGN66hdjumIshW0QY3rqF2O6YiyFbRBjeuoXY7piLIVtWEoxYKkzTCgAAAAAAAAsUVOVGQgAAAAAAMBtXGvJ7OzhbgUjt/XMwLPEmJoYS2EbxJie+sWYrhhLYRvEmJ76xZiuGEthG8SYnvrFmK4YS2EbxJie+sWYrhhLYRvEmJ76xZiuGEthG8SYnvrFAL6tWmuHuw0AAAAAAAAsEUcvP649dP0PTiTW+25+0yfNK7lvt/XMQAAAAAAAAGAvzBkIAAAAAADAoJo5A6fGbS4zsKruWlVvq6obq+qmqnpHVZ00cIwTq+q1VXV+VW2tqlZVJw9Y/1Or6u1VdXlVbauqL1XV71fVkQPGeGxVfaiqrqyqHVX1jar6u6o6fagYi8R87+i9etlA9Z0xqm/h44Yh6l8Q6wlVdV5V3TI6ri6oqkcNVPeH97IdrareO0SMUZyHVdX7q+rqqrq5qj5VVT89YP2PrKqPjo7Z66vqzVV1/CHUN9bnrKrWVNUrq+qKUezzq+r7Bqz/90bv23WjMs8achuqalNVnVVVXxyV+VpVvbWqThkwxt2q6p3zzinXVtW5VfWEoWIsss5vjMp9dMgY+/isPGCobaiqe1fV2aP3ac85+HlDbENVvWQf27B9iBijcidV1V+OjqdtVfXlqnpZVa0fMMYpNXe9vaGqtlTVOVW13+EWasxrXFVtrKo/G+2HLVX1z1V13/3VP26Mqjqyqv6g5s7BN42284xx6j+AGI+uqrdU1VdGZb5SVX9SVXccMMaDa+76+s2q2l5z1/X3VNVDh6h/kXXeMHqv3jLgNpy8j8/FhiG3o6oeMnq/9hy3n6uqpw+0HW/ax3Z8cYhtqKr71Ny97bdG7b+oqp5fVfv9D3wHEOMBo/dozz3Pu6rqtP3VP1p3rPvLOoT79XFi1CHeq48Z45Du18eMcdD36wezbh3gvfqY23BI9+sHsh11kPfrY27HId2v///tnXvcbmOZx78XexOxHRunsIkKEdk5TAwqJMZMR6diMpp0GJWSTJRBJ52NCZX56DOolEMUtXOqUQ5thrI1tLHZu6IdNiKnXPPHtV4eT8+z1nUfnmd7d9f383k+7/uudz3X77nXWte6fs9a97rvhNzI8usJ8XeSun594DEjBTW8K74U1m+nRlH9dmpk12+vxoB1kmq4sx1FNTylHZJZwx1tyK7fKW2QghqeoJFVw8V5ri7JbY9GaX47NUr9uUejxJ8n183U3Ha2odSfu9uRm9vOdhTld0JuZOV3Qvxsf94To9UnSYXa3aYhlep3h0bV+h0Ef41MqicDRWRZ4FLgUeAAQIHjgMtEZDNVfaiS1AbAm4Frgf8BdqkUd4IPAncC/wbMB7YAjgZ2EpG/VdUat8tXxj7/l4EFwDrAh4GrRGRTVb2jgsZTiMg+wEtrxuzhEODnPX8/UTO4iLwDOLF5HYvdJN8cWLaSxLuAaX3LtgU+D5xfQ0BENgMuBq4C3g48DLwROFVEllbVkwrjbw/MBH4IvAFYBcu9S0RkS1V9NCOsN89OBXYHDgNuA94N/FBEtlXV6yvE/1fgeuB7wP5JLfBp7A1sApwAzAbWAo4CZonI5qo6r4LGcsAfgCOxc8o07Dj4voi8QVXPqaDxFCKyfqP1+464uRqnAaf0LbulRnyxm1mXApcDBwH3Axti27ALj8bXgP6Lhs9tlnnyvVND7IbfxcBU7Fi6E3g58O9YW/aqoLEKcAXwIPAO7JxyKFZvt1LVX7XE76xxIiLABcB0LAfvA45o4m+uqvM72uCpo6sABwLXAT8CXt8RM0fjYOzYOQ47P22I7YddG1/yxwoaKwJzsLz4HfA3wPuBH4vIdqp6TWH8pxCRVwBvAR7o+Ny5Gp/kL/PgwVoaIrI7cC5wJrAv8BiwMfCcShrHAif3vW868I0B7UqOLyJrYuem3wDvw87rrwKOB54HHF5BY0Ms728E9sO+C3wM+EmTe13n9U5/WcGvezxsqVf3aJT6dY9GiV9Pem+mV0/RyPXrLo1Cv+7RKPXrntwo8eue+FX9+rBjpkINb41Pef32aJTWb4/GiuTXb69G7zo5NTxFI7eGuzQKa3hX/JL67dKoUMM9GqU1HFrO1bVyu02DevndplErv9s0VqQ8v111szC3PRqlud2qUSO3OzRq5XdbbtTI77b4xbnd5ZNq5LfDixXnt0OjWv0OxomC6qL+EMEEqjppXsB7gT8DG/QsWw87iR5aUWeJnt8Pwi5iTK8Y/3kDlu3f6LxyhNvvRY3GByrHXQm4C9iniX9cpbg7NvFePcJtMh34E/C+UWkM0T0Vu0i2cqV4n8BM1XJ9y68ErqwQ/2LM7E7pWTaj2T/vyozZmWfYlyAF3tazbApwM3B+afze9bALiwr8U+U2DMr3dYEngWNqaAx53xRgHnBBbQ3sItMpmCG+ota2av6XdQ5x7oslgJuAc0d1zA5531ubdXev1I5dmuW79C3/FFYPl62gcWQT6wU9y54L3A2c1RG/s8YB/9D8vVPPOisA9wInOLaTR0N6/vfq5n87Juxvj8agdf6uWefAGhpD3rc8VkP+o1Z87ObyjdiXwrnA6RW30/Tm74O82z9DY3msg8IXR6Ux5H1HNetsUqEN/9L8/cK+9b4J/K7SdvoasBBYsWed5wOPAMdnbrtn+EtG4NcHaFT36gM0qvv1fo3cdVLfS0WvPmA77Uhlvz5AYzqV/bpzXxT59QHtqOrXB8Sv5tfbjhkKa7gjflH9dmoU1W+PxpD1XfU7VYPMGu7cVtMpqOFOjaIanrkvXPU7oQ1FNdypkV3DcZyrS3PbqVHqzz0apf68U2PI+7z+3B0/N7ed26kot50apf48d1+489vZjuz8dsYv8uc4fFKF/PZolOa3R6NK/Y7XeF/TllhZd1n2rWN5AbMWdXuf7a/JNkzonsBVqjpnYoGq3g78FDuxVUHrPJnXFn/BgMUTPUTWGqH0Pc3Pqk/WAZ8GblTVb1SOOw4OxG7K9PckGhlNj/k3YTdp7q0Udingcaxw9nI/dYYD3gb4kao+deyo6izsmHpdTkBnnu2JtetbPe97AjNdu4rI0oXxi/Ld895B+a7WA30BjnzP/XzNdrofR76naIjIvsDLsC8kKZ9n1OdVT/wdgY2wXv6j0hjEAdhNtB9W0liq+dnfO3Qhlu9SQWMb4NeqemvP+x7CeizuIS3DoThr3J7Ab1X1sp733Y/1Vuys5x4NVdWuOBU0iup5wfsfwi42tOZ3YvzDgCWBz7bFLNTIwqnxJqxn7udGqDGI/YFrVXV2hfhdud2KU2Mb7KbDwp73zccuNGXVc/7SX47Crz9DY0Q1pV9jFMe2x4uX+PVh763p1Uf1faJNYxR+vbUdlfx6v0Ztv94fv6Zfbztmimp4V/zS+u3UqJXfqbnlqt8ZGlk1PFGjlDaNohruiD8IV/1O0Ciq4U6NUdTwXmrkdisV87tNY1Fdb8vJ7y5Kc3tRUyO3c8jJ7zZq5fcwSnPb45NK87tTo0J+ezQWVX4HJSjwpI7nFXQy2W4GboKdDPuZjT1mPpnZofnZNvRaMiKypIgs1Tx2fgrWy6yayReR7bBC++5aMQdwhoj8WWxOtzOl7hyR2wH/B+wtNtb0EyIyR0RG2Z7XYT2kvl4x5mnNzxNEZE0RWVFE3o4NXfCFCvH/jPVk7udR4CUV4g9jE+B2VX24b/lszJAljaH+bEFENsKGEqmd70uIyBQRWV1EPgq8EBteoVb8lbDj6UMVb2QP4p1i8+E8LDY/zvaV4m7X/HyOiFwlIo+LzdlzgogsU0njGYjI2sBOwBm9F+cKuRj4NfBpEdlYRJYTG0P/vcDJWmfI7LacXwZ4QWK8/hrXVs/XERHPsK1dGqPAo1H6OQa+v8nvqU0NnMjrr9aILzYfxZHYkyOPZ8Ts1Gj4ZFNn7xebByN5fqkWje2wnq2bis1D8oSIzBORj4nIkpU0nkEzbNMG5Nfz/vjfxoYdOlFszs5pIvI67Oni3Iso/Rptuf0CEXEN2dThL6v49VF72EyN5Pz2aJS0teu9Nby68/MV+fUOjSp+PXE7Z/n1Do3Tmp/Zfr0jfhW/7jhmimr4OL4/Zmok5bdXo6R+ezRKa3jCtsqu4Q6Nohqeur9z6rdDo7iGOzRq1PC2c3Utfz7K6ze5Gjn+vFOj0J+3xq/kzz3bqdSft2nU8ufu/Z2T3w6NGh69LX5pbnt8Uml+j+Paaa7GOK4DBMFiw6SaMxCbK+G+AcvvxYZTmJSIyFrAMcDFTe/NmlwNbNn8Pgcb1ihlnq+hiMhS2JfQz6rqzTVi9nE/Vlh/jPXA2QKbt+VKEdmiUjvWbF6faWLfivVeOlFEpqjqlypo9LM/NlTCRbUCquqNYhPznovNeQLW8/hgVf1mBYmbsd5KTyEi6wJrNDqjoi3nJ/4/qRB7oupk7MnAUyuHPx74QPP7H4G9VfWSivE/g83dd1rFmP2cjs3f+FtsONXDgEtFZGdVvbww9prNz29hX9Y+jA2fdQywNnV61PbzFqzjTbWb/6r6SHOx4GzMvE/wNeA9lWRuBnYWkVVU9R6wL7vAVs3/3bk3pMatjA11089Ebq+EHcMlGlXxaIjI8sAXsS8i51XWOAubAwqshrxWVW+qFP8k4JzenqK5DNF4FPMLM7Fz34uxmvsz6Z6D0quxJjaXxJnY/BLXYsPTHIXN6/L+Chr97I/VwOQbVIPiq+rdIrIt8F1sDgywfpRHq+rxNTSw3P5bEZk6cWGpOW43wZ4qXgmb+6aLNn9Zy6+PzMPmaBScZzwaJW0d+t6KXr3t89Xy620atfx6ynbO9etDNSr59bY2FPt15zGTXcPH8P0xSyO1fidqZNXvBI3sGu7UKKrhTo3sGp55TCXVb49GaQ13tqOkhnvO1aX+fBzXb5I1Mvx5ikZOfnvjl/hzj0apP/dolPrznGMq1Z93ahTmt6cNpf7c45NK83sc106TNUq/fwdjZLSDhQUp6LNgrFLvC+sp8akBy48DnhiRZvU5A/viLwfMwi58P38E8TcCtsbGnL8WmF+rLVgvpduAZXqWKZXmDByi+TJs2IVa8xLe0nzm1/ctvwjraSs1dHriron1+vl85bgbAndiwxDugfUwPgEzQftViL/fxL7Fnmh7MfCTZl/8qUL8YfOWzcSGGutff2L88e1L4vetkzxnYKpGs97JzX7ZpbYGNq78jOYYOAsbY36PSvti++Yc/JKeZZfjnDMwZ1s16y4P3JGi09KGrzTLT+hbfnizfKMR7O9fAdfVPKawydYvw740vAUbI/+D2JeLkypprN8cP9/HngJcA7uB+kSz/tbO+ANrHHbu/WbL51k7oQ2ddZSCOYcSNKZgN7IfBDarrdHsk5djE7Ffgg1LM6M0fnMMLQRW61k2l8T5hrzbqWfdtZtjNnVeo2HtmNns40P71j8JO3etUHl/Pwe74XVOre2EDaP0C+Aa7MLSjtjNp8eAwytpbNdsp1OxoXTWBb7Tk9urOeMP9ZdU8uttGn3rZXv1BI1sv+7R8H6OjH1Rxaunfj4y/HpHO6r49YT9ne3XO9pR7Nc74hf7dc8xQ0ENTz0myZtzKFUjuX6naJBZv537oqiGp26rnnXcNdzZjuwanrG/k+u3sw1FNdypUaWG98R7xrmaiv58mEbf/4r8uVOjyJ93aeTmt2NfVPPnnu3Us06WP29pRzV/7tzf2f68ox3VPPqQ+EW5jcMnlea3R6NveU79TtWokt/xGv1rmqysuyy971hexJyBna/JNkzofQzuUTysB/KzGrFh6S7ADMSuamNCV0VVf6WqV6uNOf8q7GLGh0vjNo+0fwTr0bO02DA3Kzb/nvg7d1iuoajqdViBeHmlkBNzbfyob/lMYDXsAnhNqj8l1PAJ7ELCHqr6PVW9RFUPwW4Ifal5oicbVT0Du7DwAWzus5uA3wAX4nuCIJe2nIenezFNCkTkU9jk0weq6sza8VV1vqrOao6BNwNXUW9+gVMwczq/J9+nAEs2fw+dv7EEVX0QuyFVI+fb8h2sl141RGQr7EJc7Xz/Z+wLyGtV9XRV/YmqfhbLz4NF5KWlAqp6G3ZRcUvsyYPfAtvy9DBmnXnfUeO6cttV08dRRz0azTn269iXnn9U1V/U1lDV21T156p6DrAb1vv4uJL4zXAwn8fmp3m0J7eXAKY2f0+t1Ya+9swDriAhtzs02vJ7KtaztlSjlz2xHs2pQwi2xf8QML1ZfraqXq6qH8V6xx4rIquWaqjqFdjwY2/EbiLMBVZo2vEYzrra4S+r+PVRedhUjdLzjEejpK3D3lvTq6d+vhy/3qFRxa8ntCPbr3doFPv1tvilfj3hmMmq4eP4/piqkVO/UzVy6rdTYwUKanjJ/vDW8ASNrBqe2Yak+p2gkV3DvRq1avgEA87VVfx5h0Z1hmmU+nOPRok/Hxa/pj/3tKFvnWR/3qFRxZ93aPSS5c8dGlU8+rD4FXLb45NK83sc107dGjXzOxg9CuiTOpZX0M1kuxk4m8HFYmPsy86koSne38Ge4nmtqv5y1Jpqk9HOoc48a+tjvW5Ox4rGxAvsCZX7gJK5gLqoleFdEwrXfo75AOAGVb2hctxNm7j9QwBdA6yC9Q4uQlWPAlYFNgPWUNV9sB7OV5TGbmE2sJ6ILNu3fGPMFM0ZoXZVROQj2BNoh6jqf49Jdhb15lXcCDiYZ+b7K7DhqO4D3llJZxg1cn5R5Pvj2LAoNdkUuE9Vb+1bfk3zc6MaIqp6NtYzcWNgA1XdErvoOE9V72x7r6PGtdXzO1W1c4jQcdTRBI2Tgb3IGJo3px2q+hjWO7Uzvzvir4r1dP0Ez8zttYE3N7/vPoo29DbHs5LzmGqjM78T23EANnfIhV1xE+JvCsxR1f4v49dgF0xK9zcAqvplzBe8BFhHVXfGnoS6eoCP6GSAv6zu1yt7WLdG7fOMpx0lbe1770i8euLny6rdQ46pNpLrd0c7qvj1ARpV/fqgNhT6de8xk1vDx/H9MVUjp35ntyOhfns01qOshtfYH105nnJMtTEsx3PakFq/vRolNdzdjto1vGFiPxb7c4fGKOnXyPbnCRpP/yPBn3fEr+LPOzRqrON5/yi/fw/6jMn+3KlR7NE74pfmtmc7l+b3OK6lpGiMIr+D4K+CyXYz8HxgGxFZf2KBiEzHLkifv6g+VCpND4YzgFdiPRiuGpPuathTKv0XkHO4HthpwAvMxO7ECG7UiMgM4EU8fdG7lHObn7v2LX8NMF9V76qkM/HZN6b+U0Jgj8xvLjbXQC9bY0P9VXmCTlUfUtVfqo2Z/hrseDq5RuwhXICZqzdNLBCbc28vYKaqPjpC7WqIyCFYD8GPqOqJY9JcAhtuoka+w+B8vwGbhHon7GJpdURkGjaUVo2cvwibG2FQvoPdPK1Ck4t7Axep6oJacRvuAlYSm1S+l62bn7+pJaSqf26eQrhVRNbEcu+ktvc4a9z5wFoiskPP+6YBf4+jno+jjno1RORz2PAqb1PV80ahMeB9y2I3J1rz2xH/Lgbn9t3Axc3vrReQC9qwDnaO6sxtp8Z5zc9B+f0Idq4q1ZhYd7VG50zvhTdn/LuADUSkv9euK7dT2qCqj6rqbFWdJyKbYr1qW3O7RbffX1b365U9rEtjFOcZTztK2tr33usZgVd3tqHIrw/QqO7Xh7Wjpl8foFHVrw9rQ4Ffvx7fMZNbw73xS3BrFNRvt0Y/3vqdoFFSw0va4a3hXo3zmmWpNTypDTn1O0GjpIYntaNWDR9wri7y506N6gzSKPHnXo0B63jzuyt+sT93aAxbx+3PnRrnNT+z/LlTY2J5Tn57NYo8uiM+UJTbHp9Umt/juHbq0qid38EYUAV9cjyvoJMpi/oDJPJV4D3Ad0XkSKwXxbHAPGwIu2qIyBubXycmaN9NRBYAC1T1x4Xh/xO7ufFx4CER6Z3ofb5WGOZMRM4FrsN6Jj0AvBCbnPcJbPLaIpqeqJcP0AW4Q1X/4n+piMgZwO1YOxZiQ/gdgRXaE0rjN1yIzb11SvNo/23YvtkFeFsljQn2x7b/GZXjgs3l9W3gAhH5MvAnbIiEfYAvNL3UshGRLbBhL65rFm0HHAYcr6o/K4jbmmeq+r8i8i3gi03v/NuxJ9DWw4YwLIrfrLMD1vNu9WadGSLyRwBV7bzB1aUhIntjExr/ALi0L98f0O4Jxj0aR2PDO/wUM6qrY0NJbgXs2xXfozEop0VkITDFm++OdnwQM8WXYcNSrov1hl2dCvtbVe8RkU8CR4nIA8Cl2Be2jwJfV9XOC1AJtWEPbJ8kX0x0aJwGHApcKCIfx+YfmoENLXQtdhwUaTT5djxPT3S+CXb+nU13DfHUuPOBK4HTReQwrIfrEdh8Bl2TsHs1EJHdgOfydK/wHZpz/UOqelGphogcju2L/wJ+3bfOggFPb+ZonIJdIJ6F9XRdF/NCawBvLY3P4Fr+CHC3M7c9bfgc1gHtSmABludHYL07P15DQ1VvFJHTgGOamzjXYV+gDwKOdfR2TfFm+wFLkpbfnvgnN7FnishnsKFydsTOg+eqDd1UpCEiz8fq6M+wzhEzsH1xjtqwg604/WWRX/d62BKv7tQo8usejRK/3vXeGl7d2YYiv+7cBkV+PXE7Z/l1p0a2X3fuiyK/7j1mRCSrhqcck7n1O6EN2fU7QSO7fidsq0HruGp4Qjuya3hCO7JqeMZ5Lrl+J2yn7BqeoJFdw53n6iJ/7q0HJf7co1Hqz50a2fndFV9VH6HQnzvbUOTPPRql/jzRY+T4c69Gdn4790WRP8fnk0q/f7u8WOH3706NCt+/gyDQZ8HEhSkvYB3gbOxL0INYT5PpI9DRIa/LK8Se2xL/6Eqf/3DsovBC4GHgZuwCTPVtNWC7tU44nhDrCOwL7/3YUHvzgK9gQ97U/MzTsAs+d2NDT/4C2LeyxlTMYF0wwm2/G2YaFzS5cT3wLmDJCrE3wXqfLcQuXFyH9cKpcby05hmwDDZm/l1Y77GrcU5C7Ix/+bD1amhgN26KzicOjT2xG1u/x8zjHZjZe0XNfTHgPZcDV9TSwHql/RT7QvU4ZrLPB7aquL8FM49zsHy/A5v8e2rN7QR8t/n8S40oLzbG5hiah+XkLdj8kCtV2hdTsMm4726OqVuxp1uXdcSe2xL/6J71VsZM/L1YnboEeKnz83s1hq03t4YGLecP4LRKGgdiXwzvwc6Bt2JDz25aazsNed/ptfZF04afY186H8fO52cCL6q8v5dqjtN5WH7fAry3pkaz7g3ALxPz2tuGbXh6fq+HsBvwRwLLVNoXq2G9yv+A5fZN2PxiU5ztcPlLCvx6gobrfJyrkXJMFGhk+/Xc9zaf3+XVnW0o8usJ+zvbrydoZPv1BI0sv+7cF6P068f1Lcuu4c74c8ms3x4NCuu3UyO7fqdsqwHrzMVZwxPakV3DE/Z5dg33bicy6ndiG7JruHNfZNdwnOfqktxO0MjOb49GaX47NUr8eVbdTMnthDaU+HPv/i7x5+5tRWZ+J7QjK7+d+6LInzcxOn0ShbXbqTGXgvrdpcEI6ne8Rv9anpV05yl7jeUFzFrU7X22v0RVCYIgCIIgCIIgCIIgCIIgCIIgCIIaTJOVdesldh6L1sVPnnWtqs4Yi9gkZbLNGRgEQRAEQRAEQRAEQRAEQRAEQRAEgZPJNmdgEARBEARBEARBEARBEARBEARB8GxHn1zUnyBoiCcDgyAIgiAIgiAIgiAIgiAIgiAIgmAxJeYMDIIgCIIgCIIgCIIgCIIgCIIgCKohIj8AVh2T3B9U9TVj0pqUxM3AIAiCIAiCIAiCIAiCIAiCIAiCIFhMiWFCgyAIgiAIgiAIgiAIgiAIgiAIgmAxJW4GBkEQBEEQBEEQBEEQBEEQBEEQBMFiStwMDIIgCIIgCIIgCIIgCIIgCIIgCILFlLgZGARBEARBEARBEARBEARBEARBEASLKXEzMAiCIAiCIAiCIAiCIAiCIAiCIAgWU/4fvhBhOsRDPkkAAAAASUVORK5CYII=\n",
  552. "text/plain": [
  553. "<Figure size 2520x720 with 2 Axes>"
  554. ]
  555. },
  556. "metadata": {
  557. "needs_background": "light"
  558. },
  559. "output_type": "display_data"
  560. },
  561. {
  562. "data": {
  563. "text/plain": [
  564. "\"for i in range(attr.shape[-1]):\\n final = attr[:,:,i]\\n plt.imshow(final, cmap='viridis', interpolation='nearest')\\n plt.colorbar()\\n plt.title('MFC'+str(i+1))\\n plt.show()\""
  565. ]
  566. },
  567. "execution_count": 219,
  568. "metadata": {},
  569. "output_type": "execute_result"
  570. }
  571. ],
  572. "source": [
  573. "#origin = datashape[0,:,:,0,0]\n",
  574. "print(attr_dl.shape)\n",
  575. "attr = np.mean(attr_dl, axis=(0, 4))\n",
  576. "print(attr.shape)\n",
  577. "'''with open('vowel_electrode.pkl', 'wb') as f:\n",
  578. " pickle.dump(attr, f)'''\n",
  579. "\n",
  580. "'''_ = viz.visualize_image_attr(final, original, method=\"blended_heat_map\",sign=\"all\",show_colorbar=True, \n",
  581. " title=\"Overlayed DeepLift\")'''\n",
  582. "'''plt.figure(figsize=(8, 8))\n",
  583. "plt.imshow(attr, cmap='viridis', interpolation='nearest')\n",
  584. "plt.colorbar()\n",
  585. "plt.title('electrode attribute')\n",
  586. "plt.show()'''\n",
  587. "#plt.savefig('electode_uw.png', dpi=300)\n",
  588. "\n",
  589. "'''plt.figure(figsize=(12, 5))\n",
  590. "ax = plt.gca()\n",
  591. "ax.set_facecolor((0.95,0.95,0.95))\n",
  592. "plt.grid()\n",
  593. "plt.bar(x=range(20), height=attr, tick_label=range(20))\n",
  594. "plt.xticks(fontsize=16)\n",
  595. "plt.yticks(fontsize=16)\n",
  596. "plt.title('MFCC attribute')\n",
  597. "plt.show()'''\n",
  598. "All=[]\n",
  599. "for i in range(20):\n",
  600. " All.append(attr[:,:,i])\n",
  601. "All = np.stack(All)\n",
  602. "All = All.reshape((20,63))\n",
  603. "print(All.shape)\n",
  604. "print(np.unravel_index(np.argsort(All, axis=None)[-5:], All.shape))\n",
  605. "plt.figure(figsize=(35, 10))\n",
  606. "ax = plt.gca()\n",
  607. "ax.set_xticks(range(63))\n",
  608. "ax.set_yticks(range(20))\n",
  609. "plt.xticks(fontsize=16)\n",
  610. "plt.yticks(fontsize=16)\n",
  611. "plt.imshow(All, cmap='viridis', interpolation='nearest')\n",
  612. "plt.title('all features attribute', fontsize=20)\n",
  613. "cbar = plt.colorbar()\n",
  614. "for t in cbar.ax.get_yticklabels():\n",
  615. " t.set_fontsize(16)\n",
  616. "plt.show()\n",
  617. "'''for i in range(attr.shape[-1]):\n",
  618. " final = attr[:,:,i]\n",
  619. " plt.imshow(final, cmap='viridis', interpolation='nearest')\n",
  620. " plt.colorbar()\n",
  621. " plt.title('MFC'+str(i+1))\n",
  622. " plt.show()'''"
  623. ]
  624. },
  625. {
  626. "cell_type": "code",
  627. "execution_count": 83,
  628. "id": "a07ed621",
  629. "metadata": {},
  630. "outputs": [
  631. {
  632. "name": "stdout",
  633. "output_type": "stream",
  634. "text": [
  635. "0.3645833333333333\n"
  636. ]
  637. }
  638. ],
  639. "source": [
  640. "print(sum(y__test)/(len(y__test)))"
  641. ]
  642. },
  643. {
  644. "cell_type": "code",
  645. "execution_count": 271,
  646. "id": "c2174d74",
  647. "metadata": {},
  648. "outputs": [],
  649. "source": [
  650. "class cnn(nn.Module):\n",
  651. " def __init__(self):\n",
  652. " super().__init__()\n",
  653. " self.conv1 = nn.Conv2d(20, 16, 3)\n",
  654. " #torch.nn.init.xavier_normal_(self.conv1.weight)\n",
  655. " self.pool = nn.MaxPool2d(2, 1)\n",
  656. " self.conv2 = nn.Conv2d(16, 32, 3)\n",
  657. " #torch.nn.init.xavier_normal_(self.conv2.weight)\n",
  658. " self.fc1 = nn.Linear(256, 128)\n",
  659. " self.fc2 = nn.Linear(128, 1)\n",
  660. " #torch.nn.init.xavier_normal_(self.fc.weight)\n",
  661. " self.batch1 = nn.BatchNorm2d(16)\n",
  662. " self.batch2 = nn.BatchNorm2d(32)\n",
  663. " \n",
  664. " def forward(self, x):\n",
  665. " # (batch, heigth, width, feature)\n",
  666. " #print(x.shape)\n",
  667. " x = rearrange(x, 'batch heigth width feature -> batch feature heigth width')\n",
  668. " #print(x.shape)\n",
  669. " out = self.pool(F.relu(self.batch1(self.conv1(x))))\n",
  670. " #print(out.shape)\n",
  671. " out = F.relu(self.batch2(self.conv2(out)))\n",
  672. " #print(out.shape)\n",
  673. " out = rearrange(out, 'batch channel heigth width -> batch (channel heigth width)')\n",
  674. " #print(out.shape)\n",
  675. " out = F.relu(self.fc1(out))\n",
  676. " out = F.sigmoid(self.fc2(out))\n",
  677. " return out"
  678. ]
  679. },
  680. {
  681. "cell_type": "code",
  682. "execution_count": 15,
  683. "id": "2c1032f9",
  684. "metadata": {},
  685. "outputs": [
  686. {
  687. "data": {
  688. "text/plain": [
  689. "(1913, 7, 9, 20, 11)"
  690. ]
  691. },
  692. "execution_count": 15,
  693. "metadata": {},
  694. "output_type": "execute_result"
  695. }
  696. ],
  697. "source": [
  698. "with open('sal_nasal.pkl', 'rb') as f:\n",
  699. " dataset = pickle.load(f)\n",
  700. "dataset.shape"
  701. ]
  702. },
  703. {
  704. "cell_type": "code",
  705. "execution_count": 16,
  706. "id": "a86f1e03",
  707. "metadata": {},
  708. "outputs": [
  709. {
  710. "name": "stdout",
  711. "output_type": "stream",
  712. "text": [
  713. "(1913, 7, 9, 20) 1913\n"
  714. ]
  715. }
  716. ],
  717. "source": [
  718. "data = np.mean(dataset, axis=4)\n",
  719. "labels = nasal_label\n",
  720. "print(data.shape, len(labels))"
  721. ]
  722. },
  723. {
  724. "cell_type": "code",
  725. "execution_count": 283,
  726. "id": "649e822c",
  727. "metadata": {},
  728. "outputs": [
  729. {
  730. "data": {
  731. "text/plain": [
  732. "1913"
  733. ]
  734. },
  735. "execution_count": 283,
  736. "metadata": {},
  737. "output_type": "execute_result"
  738. }
  739. ],
  740. "source": [
  741. "data1 = []\n",
  742. "for sample in data:\n",
  743. " data1.append(sample)\n",
  744. "len(data1)"
  745. ]
  746. },
  747. {
  748. "cell_type": "code",
  749. "execution_count": 284,
  750. "id": "075addc9",
  751. "metadata": {},
  752. "outputs": [],
  753. "source": [
  754. "#config\n",
  755. "val_size = 0.15\n",
  756. "n_epochs = 100\n",
  757. "batch_size = 128\n",
  758. "print_every = 10\n",
  759. "k = 10\n",
  760. "skf=StratifiedKFold(n_splits=k, shuffle=True, random_state=42)"
  761. ]
  762. },
  763. {
  764. "cell_type": "code",
  765. "execution_count": 285,
  766. "id": "21e68d7f",
  767. "metadata": {
  768. "scrolled": true
  769. },
  770. "outputs": [
  771. {
  772. "name": "stdout",
  773. "output_type": "stream",
  774. "text": [
  775. "-----------------------------Fold 1---------------\n",
  776. "preparing dataloaders...\n",
  777. "(1913, 7, 9, 20)\n",
  778. "coef when 0 > 1 1\n",
  779. "creating model...\n",
  780. "calculating total steps...\n",
  781. "epoch: 1\n",
  782. "validation loss decreased (inf ---> 0.698440), val_acc = 0.3953488372093023\n",
  783. "validation acc increased (0.000000 ---> 0.395349)\n"
  784. ]
  785. },
  786. {
  787. "name": "stderr",
  788. "output_type": "stream",
  789. "text": [
  790. "C:\\Users\\saeed\\Desktop\\Master\\bci\\lib\\site-packages\\torch\\nn\\functional.py:1960: UserWarning: nn.functional.sigmoid is deprecated. Use torch.sigmoid instead.\n",
  791. " warnings.warn(\"nn.functional.sigmoid is deprecated. Use torch.sigmoid instead.\")\n"
  792. ]
  793. },
  794. {
  795. "name": "stdout",
  796. "output_type": "stream",
  797. "text": [
  798. "validation acc increased (0.395349 ---> 0.395349)\n",
  799. "epoch 1: train loss = 0.6869507828848291, l1loss = 0.13827660727062954, train acc = 0.5616507297433316,\n",
  800. "val_loss = 0.71086893987286, val_acc = 0.3953488372093023\n",
  801. "\n",
  802. "epoch: 2\n",
  803. "validation acc increased (0.395349 ---> 0.395349)\n",
  804. "validation acc increased (0.395349 ---> 0.395349)\n",
  805. "epoch 2: train loss = 0.6654115830706062, l1loss = 0.13782025073482934, train acc = 0.6205334675390035,\n",
  806. "val_loss = 0.7223108600276386, val_acc = 0.3953488372093023\n",
  807. "\n",
  808. "epoch: 3\n",
  809. "validation acc increased (0.395349 ---> 0.395349)\n",
  810. "validation acc increased (0.395349 ---> 0.430233)\n",
  811. "epoch 3: train loss = 0.6485615584738721, l1loss = 0.13727900934057374, train acc = 0.6396577755410167,\n",
  812. "val_loss = 0.7128145556117214, val_acc = 0.5\n",
  813. "\n",
  814. "epoch: 4\n",
  815. "validation acc increased (0.430233 ---> 0.503876)\n",
  816. "validation loss decreased (0.698440 ---> 0.692173), val_acc = 0.5310077519379846\n",
  817. "validation acc increased (0.503876 ---> 0.531008)\n",
  818. "epoch 4: train loss = 0.6330915434791266, l1loss = 0.13658334071844982, train acc = 0.6431806743834927,\n",
  819. "val_loss = 0.6788588637529418, val_acc = 0.5348837209302325\n",
  820. "\n",
  821. "epoch: 5\n",
  822. "validation loss decreased (0.692173 ---> 0.676339), val_acc = 0.5310077519379846\n",
  823. "validation acc increased (0.531008 ---> 0.531008)\n",
  824. "validation loss decreased (0.676339 ---> 0.651838), val_acc = 0.5387596899224806\n",
  825. "validation acc increased (0.531008 ---> 0.538760)\n",
  826. "epoch 5: train loss = 0.6172819093477204, l1loss = 0.1356638338647419, train acc = 0.6492199295420231,\n",
  827. "val_loss = 0.6452844336975453, val_acc = 0.5426356589147286\n",
  828. "\n",
  829. "epoch: 6\n",
  830. "validation loss decreased (0.651838 ---> 0.643711), val_acc = 0.5387596899224806\n",
  831. "validation acc increased (0.538760 ---> 0.538760)\n",
  832. "validation loss decreased (0.643711 ---> 0.636844), val_acc = 0.5426356589147286\n",
  833. "validation acc increased (0.538760 ---> 0.542636)\n",
  834. "epoch 6: train loss = 0.6055080684574033, l1loss = 0.13447358302873286, train acc = 0.6607951685958732,\n",
  835. "val_loss = 0.6349219661350398, val_acc = 0.5426356589147286\n",
  836. "\n",
  837. "epoch: 7\n",
  838. "validation loss decreased (0.636844 ---> 0.635237), val_acc = 0.5426356589147286\n",
  839. "validation acc increased (0.542636 ---> 0.542636)\n",
  840. "validation loss decreased (0.635237 ---> 0.631333), val_acc = 0.5348837209302325\n",
  841. "epoch 7: train loss = 0.5956864868448677, l1loss = 0.1329548779029491, train acc = 0.6623049823855058,\n",
  842. "val_loss = 0.6336472632811051, val_acc = 0.5465116279069767\n",
  843. "\n",
  844. "epoch: 8\n",
  845. "validation acc increased (0.542636 ---> 0.542636)\n",
  846. "epoch 8: train loss = 0.5867841974778216, l1loss = 0.13107547391942834, train acc = 0.6673376950176145,\n",
  847. "val_loss = 0.6482481244922609, val_acc = 0.5310077519379846\n",
  848. "\n",
  849. "epoch: 9\n",
  850. "epoch 9: train loss = 0.5799472323001541, l1loss = 0.1288494519630628, train acc = 0.6738802214393558,\n",
  851. "val_loss = 0.6537467005640961, val_acc = 0.5310077519379846\n",
  852. "\n",
  853. "epoch: 10\n",
  854. "epoch 10: train loss = 0.5720185998341638, l1loss = 0.1262706755376906, train acc = 0.6809260191243081,\n",
  855. "val_loss = 0.660815857177557, val_acc = 0.5348837209302325\n",
  856. "\n",
  857. "epoch: 11\n",
  858. "epoch 11: train loss = 0.5634220912340123, l1loss = 0.12343447363715587, train acc = 0.688475088072471,\n",
  859. "val_loss = 0.6865861693093943, val_acc = 0.5271317829457365\n",
  860. "\n",
  861. "epoch: 12\n",
  862. "epoch 12: train loss = 0.5516697056160299, l1loss = 0.12035546596169051, train acc = 0.6985405133366884,\n",
  863. "val_loss = 0.6893619465273481, val_acc = 0.5310077519379846\n",
  864. "\n",
  865. "epoch: 13\n",
  866. "epoch 13: train loss = 0.5383751470057522, l1loss = 0.11719332186942971, train acc = 0.7075993960744842,\n",
  867. "val_loss = 0.7262142046477443, val_acc = 0.5193798449612403\n",
  868. "\n",
  869. "epoch: 14\n",
  870. "epoch 14: train loss = 0.5219575696154701, l1loss = 0.11402586181603548, train acc = 0.7166582788122798,\n",
  871. "val_loss = 0.8209437387165173, val_acc = 0.5310077519379846\n",
  872. "\n",
  873. "epoch: 15\n",
  874. "epoch 15: train loss = 0.5082864454930069, l1loss = 0.11104010005781983, train acc = 0.727730246602919,\n",
  875. "val_loss = 0.790199874907501, val_acc = 0.5387596899224806\n",
  876. "\n",
  877. "epoch: 16\n",
  878. "epoch 16: train loss = 0.4823058645930432, l1loss = 0.10828750046597213, train acc = 0.7518872672370408,\n",
  879. "val_loss = 0.8347636539806691, val_acc = 0.5348837209302325\n",
  880. "\n",
  881. "epoch: 17\n",
  882. "validation acc increased (0.542636 ---> 0.542636)\n",
  883. "epoch 17: train loss = 0.44870099950268694, l1loss = 0.10601656717844926, train acc = 0.7790639154504277,\n",
  884. "val_loss = 1.0818783419076787, val_acc = 0.5426356589147286\n",
  885. "\n",
  886. "epoch: 18\n",
  887. "epoch 18: train loss = 0.4243430009905753, l1loss = 0.10426574212390691, train acc = 0.7911424257674887,\n",
  888. "val_loss = 0.8771759863047637, val_acc = 0.5658914728682171\n",
  889. "\n",
  890. "epoch: 19\n",
  891. "validation acc increased (0.542636 ---> 0.562016)\n",
  892. "validation acc increased (0.562016 ---> 0.569767)\n",
  893. "epoch 19: train loss = 0.3871670792645165, l1loss = 0.10277385458117702, train acc = 0.8208354302969301,\n",
  894. "val_loss = 0.9480718586796014, val_acc = 0.5465116279069767\n",
  895. "\n",
  896. "epoch: 20\n",
  897. "epoch 20: train loss = 0.3737996664698783, l1loss = 0.1016839612097253, train acc = 0.8102667337695018,\n",
  898. "val_loss = 1.400591679321703, val_acc = 0.5387596899224806\n",
  899. "\n",
  900. "epoch: 21\n",
  901. "validation acc increased (0.569767 ---> 0.596899)\n",
  902. "epoch 21: train loss = 0.3521438902445516, l1loss = 0.10036715933754027, train acc = 0.8324106693507801,\n",
  903. "val_loss = 0.9109365958576054, val_acc = 0.5775193798449613\n",
  904. "\n",
  905. "epoch: 22\n",
  906. "epoch 22: train loss = 0.319912125056991, l1loss = 0.0992567646230473, train acc = 0.8580775037745345,\n",
  907. "val_loss = 1.5151436608429103, val_acc = 0.5310077519379846\n",
  908. "\n",
  909. "epoch: 23\n",
  910. "epoch 23: train loss = 0.3122329044474029, l1loss = 0.09838688995634784, train acc = 0.8575742325113236,\n",
  911. "val_loss = 3.091815226076012, val_acc = 0.5155038759689923\n",
  912. "\n",
  913. "epoch: 24\n",
  914. "epoch 24: train loss = 0.28910951249612654, l1loss = 0.09807651147628826, train acc = 0.879214896829391,\n",
  915. "val_loss = 1.2312677035960116, val_acc = 0.5581395348837209\n",
  916. "\n",
  917. "epoch: 25\n",
  918. "epoch 25: train loss = 0.2668642541332706, l1loss = 0.09742016104909122, train acc = 0.8842476094614997,\n",
  919. "val_loss = 1.9600338353667148, val_acc = 0.5852713178294574\n",
  920. "\n",
  921. "epoch: 26\n",
  922. "epoch 26: train loss = 0.2474498063415874, l1loss = 0.09673020170526284, train acc = 0.8933064921992954,\n",
  923. "val_loss = 1.8068715759025988, val_acc = 0.5775193798449613\n",
  924. "\n",
  925. "epoch: 27\n",
  926. "epoch 27: train loss = 0.1902359421446987, l1loss = 0.09593981095727029, train acc = 0.9315551082033215,\n",
  927. "val_loss = 1.3357789331628371, val_acc = 0.5581395348837209\n",
  928. "\n",
  929. "epoch: 28\n",
  930. "epoch 28: train loss = 0.15691894562057232, l1loss = 0.0954245226574892, train acc = 0.9436336185203825,\n",
  931. "val_loss = 2.6843640092731444, val_acc = 0.5348837209302325\n",
  932. "\n",
  933. "epoch: 29\n",
  934. "epoch 29: train loss = 0.1504599494989877, l1loss = 0.09509522278868618, train acc = 0.9431303472571716,\n",
  935. "val_loss = 2.4894338860068213, val_acc = 0.5968992248062015\n",
  936. "\n",
  937. "epoch: 30\n",
  938. "epoch 30: train loss = 0.12966379567523306, l1loss = 0.09448714830660015, train acc = 0.9542023150478107,\n",
  939. "val_loss = 1.8916605646296065, val_acc = 0.5968992248062015\n",
  940. "\n",
  941. "epoch: 31\n",
  942. "epoch 31: train loss = 0.15563943400926547, l1loss = 0.09434211563092114, train acc = 0.9471565173628586,\n",
  943. "val_loss = 4.939729660980461, val_acc = 0.5348837209302325\n",
  944. "\n",
  945. "epoch: 32\n",
  946. "epoch 32: train loss = 0.16452017606474612, l1loss = 0.09465074112097942, train acc = 0.9406139909411173,\n",
  947. "val_loss = 3.636755084344583, val_acc = 0.5775193798449613\n",
  948. "\n",
  949. "epoch: 33\n",
  950. "epoch 33: train loss = 0.12137126438525915, l1loss = 0.09421853995242188, train acc = 0.9592350276799195,\n",
  951. "val_loss = 3.1257032387016355, val_acc = 0.5542635658914729\n",
  952. "\n",
  953. "epoch: 34\n",
  954. "epoch 34: train loss = 0.11817072889855236, l1loss = 0.0940876242596418, train acc = 0.9572219426270759,\n",
  955. "val_loss = 3.6307011426881304, val_acc = 0.5852713178294574\n",
  956. "\n",
  957. "epoch: 35\n",
  958. "epoch 35: train loss = 0.09768068436092628, l1loss = 0.09356706718073818, train acc = 0.96527428283845,\n",
  959. "val_loss = 4.066986708678017, val_acc = 0.5852713178294574\n",
  960. "\n",
  961. "epoch: 36\n",
  962. "epoch 36: train loss = 0.09975038621823992, l1loss = 0.09309212238751408, train acc = 0.9622546552591847,\n",
  963. "val_loss = 3.2775008031564163, val_acc = 0.5503875968992248\n",
  964. "\n",
  965. "epoch: 37\n",
  966. "epoch 37: train loss = 0.08334749282366316, l1loss = 0.092504221824137, train acc = 0.9763462506290891,\n",
  967. "val_loss = 3.8003816779183093, val_acc = 0.5310077519379846\n",
  968. "\n",
  969. "epoch: 38\n"
  970. ]
  971. },
  972. {
  973. "name": "stdout",
  974. "output_type": "stream",
  975. "text": [
  976. "epoch 38: train loss = 0.07791504205383123, l1loss = 0.09236309195530733, train acc = 0.9793658782083543,\n",
  977. "val_loss = 3.233410909194355, val_acc = 0.5736434108527132\n",
  978. "\n",
  979. "epoch: 39\n",
  980. "epoch 39: train loss = 0.06921992062487432, l1loss = 0.09197287779689495, train acc = 0.9854051333668847,\n",
  981. "val_loss = 3.561995010505351, val_acc = 0.5852713178294574\n",
  982. "\n",
  983. "epoch: 40\n",
  984. "epoch 40: train loss = 0.09455262519377947, l1loss = 0.09194954407083131, train acc = 0.9703069954705587,\n",
  985. "val_loss = 3.140381110104725, val_acc = 0.562015503875969\n",
  986. "\n",
  987. "epoch: 41\n",
  988. "epoch 41: train loss = 0.04864857653104365, l1loss = 0.09150668548229776, train acc = 0.9899345747357826,\n",
  989. "val_loss = 3.4616599368552365, val_acc = 0.5426356589147286\n",
  990. "\n",
  991. "epoch: 42\n",
  992. "epoch 42: train loss = 0.03882463321757185, l1loss = 0.09062009955703403, train acc = 0.9924509310518369,\n",
  993. "val_loss = 3.4793882855149203, val_acc = 0.5891472868217055\n",
  994. "\n",
  995. "epoch: 43\n",
  996. "epoch 43: train loss = 0.028278966656936192, l1loss = 0.08959808966653936, train acc = 0.995973829894313,\n",
  997. "val_loss = 3.8493388558543007, val_acc = 0.5736434108527132\n",
  998. "\n",
  999. "epoch: 44\n",
  1000. "epoch 44: train loss = 0.02344076073516177, l1loss = 0.08865111783919288, train acc = 0.9954705586311021,\n",
  1001. "val_loss = 3.879825139115023, val_acc = 0.5581395348837209\n",
  1002. "\n",
  1003. "epoch: 45\n",
  1004. "epoch 45: train loss = 0.01661977870085159, l1loss = 0.0877494524450055, train acc = 0.9989934574735783,\n",
  1005. "val_loss = 4.2667010209357095, val_acc = 0.5658914728682171\n",
  1006. "\n",
  1007. "epoch: 46\n",
  1008. "epoch 46: train loss = 0.013619207225413736, l1loss = 0.086835880546817, train acc = 0.9994967287367891,\n",
  1009. "val_loss = 4.457068901653438, val_acc = 0.5852713178294574\n",
  1010. "\n",
  1011. "epoch: 47\n",
  1012. "epoch 47: train loss = 0.015068223528380243, l1loss = 0.0860258729338166, train acc = 1.0,\n",
  1013. "val_loss = 3.7533517517727013, val_acc = 0.5775193798449613\n",
  1014. "\n",
  1015. "!!! overfitted !!!\n",
  1016. "[1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1]\n",
  1017. "[0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1]\n",
  1018. "early stoping results:\n",
  1019. "\t [0.6041666666666666]\n",
  1020. "output = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0]\n",
  1021. "label = tensor([0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0,\n",
  1022. " 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0,\n",
  1023. " 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0,\n",
  1024. " 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1,\n",
  1025. " 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0,\n",
  1026. " 0, 1, 0, 1, 1, 0, 0, 0])\n",
  1027. "output = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
  1028. "label = tensor([1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0,\n",
  1029. " 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1,\n",
  1030. " 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1,\n",
  1031. " 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1,\n",
  1032. " 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1,\n",
  1033. " 0, 0, 0, 1, 0, 1, 1, 1])\n",
  1034. "output = [0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0]\n",
  1035. "label = tensor([0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0,\n",
  1036. " 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0,\n",
  1037. " 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1,\n",
  1038. " 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1,\n",
  1039. " 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1,\n",
  1040. " 1, 1, 0, 1, 1, 1, 0, 1])\n",
  1041. "output = [1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
  1042. "label = tensor([1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0,\n",
  1043. " 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1,\n",
  1044. " 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0,\n",
  1045. " 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0,\n",
  1046. " 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0,\n",
  1047. " 1, 1, 0, 0, 1, 0, 1, 1])\n",
  1048. "output = [0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0]\n",
  1049. "label = tensor([0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 0,\n",
  1050. " 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0,\n",
  1051. " 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0,\n",
  1052. " 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1,\n",
  1053. " 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1,\n",
  1054. " 1, 0, 0, 0, 1, 0, 0, 0])\n",
  1055. "output = [1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0]\n",
  1056. "label = tensor([1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0,\n",
  1057. " 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1,\n",
  1058. " 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0,\n",
  1059. " 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n",
  1060. " 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0,\n",
  1061. " 0, 1, 1, 0, 1, 0, 0, 1])\n",
  1062. "output = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
  1063. "label = tensor([1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1,\n",
  1064. " 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1,\n",
  1065. " 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0,\n",
  1066. " 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0,\n",
  1067. " 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1,\n",
  1068. " 1, 0, 0, 1, 0, 1, 0, 1])\n",
  1069. "output = [1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0]\n",
  1070. "label = tensor([1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1,\n",
  1071. " 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1,\n",
  1072. " 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1,\n",
  1073. " 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1,\n",
  1074. " 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0,\n",
  1075. " 0, 0, 1, 0, 1, 0, 0, 0])\n"
  1076. ]
  1077. },
  1078. {
  1079. "name": "stdout",
  1080. "output_type": "stream",
  1081. "text": [
  1082. "output = [1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
  1083. "label = tensor([1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1,\n",
  1084. " 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1,\n",
  1085. " 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0,\n",
  1086. " 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1,\n",
  1087. " 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0,\n",
  1088. " 0, 1, 1, 1, 1, 1, 1, 1])\n",
  1089. "output = [1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0]\n",
  1090. "label = tensor([0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1,\n",
  1091. " 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1,\n",
  1092. " 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1,\n",
  1093. " 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1,\n",
  1094. " 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0,\n",
  1095. " 0, 1, 0, 0, 0, 1, 1, 0])\n",
  1096. "output = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
  1097. "label = tensor([1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0,\n",
  1098. " 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0,\n",
  1099. " 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1,\n",
  1100. " 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0,\n",
  1101. " 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1,\n",
  1102. " 1, 1, 0, 1, 1, 1, 0, 1])\n",
  1103. "output = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
  1104. "label = tensor([0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1,\n",
  1105. " 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1,\n",
  1106. " 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0,\n",
  1107. " 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0,\n",
  1108. " 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1,\n",
  1109. " 0, 0, 1, 0, 0, 1, 1, 0])\n",
  1110. "output = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0]\n",
  1111. "label = tensor([0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0,\n",
  1112. " 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1,\n",
  1113. " 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0,\n",
  1114. " 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1,\n",
  1115. " 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1,\n",
  1116. " 0, 0, 0, 0, 1, 1, 0, 0])\n",
  1117. "output = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
  1118. "label = tensor([0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1,\n",
  1119. " 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1,\n",
  1120. " 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1,\n",
  1121. " 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1,\n",
  1122. " 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 0,\n",
  1123. " 0, 1, 0, 0, 1, 1, 0, 1])\n",
  1124. "output = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0]\n",
  1125. "label = tensor([1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0,\n",
  1126. " 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0,\n",
  1127. " 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0,\n",
  1128. " 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0,\n",
  1129. " 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1,\n",
  1130. " 0, 1, 0, 1, 1, 0, 0, 1])\n",
  1131. "output = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
  1132. "label = tensor([0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1,\n",
  1133. " 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1,\n",
  1134. " 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1])\n",
  1135. "\t [0.6673376950176145]\n",
  1136. "[1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1]\n",
  1137. "[1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0]\n",
  1138. "full train results:\n",
  1139. "\t [0.59375]\n"
  1140. ]
  1141. },
  1142. {
  1143. "name": "stdout",
  1144. "output_type": "stream",
  1145. "text": [
  1146. "\t [0.9949672873678913]\n",
  1147. "[1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1]\n",
  1148. "[0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1]\n",
  1149. "best accs results:\n",
  1150. "\t [0.59375]\n",
  1151. "\t [0.8037242073477604]\n",
  1152. "[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1]\n",
  1153. "-----------------------------Fold 2---------------\n",
  1154. "preparing dataloaders...\n",
  1155. "torch.Size([67, 7, 9, 20])\n",
  1156. "coef when 0 > 1 1\n",
  1157. "creating model...\n",
  1158. "calculating total steps...\n",
  1159. "epoch: 1\n",
  1160. "validation loss decreased (inf ---> 0.693442), val_acc = 0.437984496124031\n",
  1161. "validation acc increased (0.000000 ---> 0.437984)\n",
  1162. "epoch 1: train loss = 0.7009523889089279, l1loss = 0.13856501084980288, train acc = 0.4914400805639476,\n",
  1163. "val_loss = 0.7006198250970175, val_acc = 0.3992248062015504\n",
  1164. "\n",
  1165. "epoch: 2\n",
  1166. "epoch 2: train loss = 0.6762631435888892, l1loss = 0.13815073134735872, train acc = 0.607754279959718,\n",
  1167. "val_loss = 0.7034130031748336, val_acc = 0.4108527131782946\n",
  1168. "\n",
  1169. "epoch: 3\n",
  1170. "validation acc increased (0.437984 ---> 0.492248)\n",
  1171. "epoch 3: train loss = 0.6556627195767406, l1loss = 0.13765930575726615, train acc = 0.6299093655589124,\n",
  1172. "val_loss = 0.6913710462954618, val_acc = 0.5193798449612403\n",
  1173. "\n",
  1174. "epoch: 4\n",
  1175. "validation loss decreased (0.693442 ---> 0.689858), val_acc = 0.5116279069767442\n",
  1176. "validation acc increased (0.492248 ---> 0.511628)\n",
  1177. "validation loss decreased (0.689858 ---> 0.666388), val_acc = 0.562015503875969\n",
  1178. "validation acc increased (0.511628 ---> 0.562016)\n",
  1179. "epoch 4: train loss = 0.6381383219514368, l1loss = 0.13702793940076416, train acc = 0.6374622356495468,\n",
  1180. "val_loss = 0.6515760264655416, val_acc = 0.5697674418604651\n",
  1181. "\n",
  1182. "epoch: 5\n",
  1183. "validation loss decreased (0.666388 ---> 0.648785), val_acc = 0.5775193798449613\n",
  1184. "validation acc increased (0.562016 ---> 0.577519)\n",
  1185. "validation loss decreased (0.648785 ---> 0.622154), val_acc = 0.6085271317829457\n",
  1186. "validation acc increased (0.577519 ---> 0.608527)\n",
  1187. "epoch 5: train loss = 0.6235407856776035, l1loss = 0.13619495138961143, train acc = 0.6414904330312186,\n",
  1188. "val_loss = 0.6117342756700146, val_acc = 0.6085271317829457\n",
  1189. "\n",
  1190. "epoch: 6\n",
  1191. "validation loss decreased (0.622154 ---> 0.609880), val_acc = 0.6085271317829457\n",
  1192. "validation acc increased (0.608527 ---> 0.608527)\n",
  1193. "validation loss decreased (0.609880 ---> 0.601803), val_acc = 0.6124031007751938\n",
  1194. "validation acc increased (0.608527 ---> 0.612403)\n",
  1195. "epoch 6: train loss = 0.6092025198244977, l1loss = 0.13509638041768068, train acc = 0.649043303121853,\n",
  1196. "val_loss = 0.597300708062889, val_acc = 0.6162790697674418\n",
  1197. "\n",
  1198. "epoch: 7\n",
  1199. "validation loss decreased (0.601803 ---> 0.597826), val_acc = 0.6085271317829457\n",
  1200. "validation loss decreased (0.597826 ---> 0.594267), val_acc = 0.6085271317829457\n",
  1201. "epoch 7: train loss = 0.5984585170539364, l1loss = 0.13366857624246034, train acc = 0.6560926485397784,\n",
  1202. "val_loss = 0.5917771292287249, val_acc = 0.6085271317829457\n",
  1203. "\n",
  1204. "epoch: 8\n",
  1205. "validation loss decreased (0.594267 ---> 0.591831), val_acc = 0.6124031007751938\n",
  1206. "validation acc increased (0.612403 ---> 0.612403)\n",
  1207. "validation loss decreased (0.591831 ---> 0.585725), val_acc = 0.6124031007751938\n",
  1208. "validation acc increased (0.612403 ---> 0.612403)\n",
  1209. "epoch 8: train loss = 0.5909301571375414, l1loss = 0.13186680199156356, train acc = 0.6591137965760322,\n",
  1210. "val_loss = 0.5863324623237285, val_acc = 0.6046511627906976\n",
  1211. "\n",
  1212. "epoch: 9\n",
  1213. "validation loss decreased (0.585725 ---> 0.582507), val_acc = 0.6046511627906976\n",
  1214. "epoch 9: train loss = 0.5829210489057942, l1loss = 0.12968964685666237, train acc = 0.6636455186304129,\n",
  1215. "val_loss = 0.5822689447681918, val_acc = 0.6124031007751938\n",
  1216. "\n",
  1217. "epoch: 10\n",
  1218. "validation loss decreased (0.582507 ---> 0.582276), val_acc = 0.6124031007751938\n",
  1219. "validation acc increased (0.612403 ---> 0.612403)\n",
  1220. "validation acc increased (0.612403 ---> 0.612403)\n",
  1221. "epoch 10: train loss = 0.575366021463158, l1loss = 0.12713940261533974, train acc = 0.6717019133937563,\n",
  1222. "val_loss = 0.5861175392949304, val_acc = 0.6046511627906976\n",
  1223. "\n",
  1224. "epoch: 11\n",
  1225. "epoch 11: train loss = 0.5670345990439316, l1loss = 0.12424162386617392, train acc = 0.6782477341389728,\n",
  1226. "val_loss = 0.588328478872314, val_acc = 0.5968992248062015\n",
  1227. "\n",
  1228. "epoch: 12\n",
  1229. "epoch 12: train loss = 0.5552928300062335, l1loss = 0.1211286605770014, train acc = 0.6888217522658611,\n",
  1230. "val_loss = 0.5963715642921684, val_acc = 0.5968992248062015\n",
  1231. "\n",
  1232. "epoch: 13\n",
  1233. "epoch 13: train loss = 0.5412835172055831, l1loss = 0.11785069293335966, train acc = 0.6978851963746223,\n",
  1234. "val_loss = 0.6184392266495283, val_acc = 0.5968992248062015\n",
  1235. "\n",
  1236. "epoch: 14\n",
  1237. "epoch 14: train loss = 0.5228479005178777, l1loss = 0.1145535701471273, train acc = 0.7235649546827795,\n",
  1238. "val_loss = 0.6210187313168548, val_acc = 0.5930232558139535\n",
  1239. "\n",
  1240. "epoch: 15\n",
  1241. "epoch 15: train loss = 0.5053458828460294, l1loss = 0.11146547943953901, train acc = 0.7240684793554885,\n",
  1242. "val_loss = 0.6982138563496197, val_acc = 0.6046511627906976\n",
  1243. "\n",
  1244. "epoch: 16\n",
  1245. "epoch 16: train loss = 0.48290507022707846, l1loss = 0.10866142951199656, train acc = 0.7452165156092648,\n",
  1246. "val_loss = 0.6597347740055055, val_acc = 0.5852713178294574\n",
  1247. "\n",
  1248. "epoch: 17\n",
  1249. "epoch 17: train loss = 0.4580553521562559, l1loss = 0.10626861951262329, train acc = 0.7678751258811681,\n",
  1250. "val_loss = 0.6594978039578874, val_acc = 0.5503875968992248\n",
  1251. "\n",
  1252. "epoch: 18\n",
  1253. "epoch 18: train loss = 0.42439078243237366, l1loss = 0.10417765665570534, train acc = 0.7809667673716012,\n",
  1254. "val_loss = 0.8379030893015307, val_acc = 0.6007751937984496\n",
  1255. "\n",
  1256. "epoch: 19\n",
  1257. "epoch 19: train loss = 0.4111362245207707, l1loss = 0.10249360746067217, train acc = 0.7990936555891238,\n",
  1258. "val_loss = 0.7785166943258093, val_acc = 0.5775193798449613\n",
  1259. "\n",
  1260. "epoch: 20\n",
  1261. "epoch 20: train loss = 0.3810017136828777, l1loss = 0.10114079596022704, train acc = 0.8227593152064451,\n",
  1262. "val_loss = 1.1790409819042498, val_acc = 0.5968992248062015\n",
  1263. "\n",
  1264. "epoch: 21\n",
  1265. "epoch 21: train loss = 0.3430937575129464, l1loss = 0.09981364857364157, train acc = 0.8423967774420946,\n",
  1266. "val_loss = 2.173583840214929, val_acc = 0.5852713178294574\n",
  1267. "\n",
  1268. "epoch: 22\n",
  1269. "epoch 22: train loss = 0.3137253760571206, l1loss = 0.09864820635991034, train acc = 0.8600201409869084,\n",
  1270. "val_loss = 1.0921057191236998, val_acc = 0.6085271317829457\n",
  1271. "\n",
  1272. "epoch: 23\n",
  1273. "epoch 23: train loss = 0.2892607558830027, l1loss = 0.09758137509391868, train acc = 0.8746223564954683,\n",
  1274. "val_loss = 3.513900268909543, val_acc = 0.6007751937984496\n",
  1275. "\n",
  1276. "epoch: 24\n",
  1277. "epoch 24: train loss = 0.26403892289355685, l1loss = 0.09678075590132466, train acc = 0.8856998992950654,\n",
  1278. "val_loss = 2.315718826740374, val_acc = 0.5813953488372093\n",
  1279. "\n",
  1280. "epoch: 25\n",
  1281. "epoch 25: train loss = 0.2428219530577626, l1loss = 0.09631269205312834, train acc = 0.9033232628398792,\n",
  1282. "val_loss = 1.3863746112631272, val_acc = 0.5581395348837209\n",
  1283. "\n",
  1284. "epoch: 26\n",
  1285. "epoch 26: train loss = 0.23106987370705195, l1loss = 0.0956531882075985, train acc = 0.9078549848942599,\n",
  1286. "val_loss = 3.2332160620726356, val_acc = 0.6007751937984496\n",
  1287. "\n",
  1288. "epoch: 27\n",
  1289. "epoch 27: train loss = 0.24449666369657622, l1loss = 0.09556730201867412, train acc = 0.8942598187311178,\n",
  1290. "val_loss = 1.645380326943804, val_acc = 0.5813953488372093\n",
  1291. "\n",
  1292. "epoch: 28\n",
  1293. "epoch 28: train loss = 0.2848308266169595, l1loss = 0.09535873357923129, train acc = 0.8690835850956697,\n",
  1294. "val_loss = 1.1068245080091648, val_acc = 0.5658914728682171\n",
  1295. "\n",
  1296. "epoch: 29\n"
  1297. ]
  1298. },
  1299. {
  1300. "name": "stdout",
  1301. "output_type": "stream",
  1302. "text": [
  1303. "epoch 29: train loss = 0.21259009558988842, l1loss = 0.09493379782573934, train acc = 0.9154078549848943,\n",
  1304. "val_loss = 1.3550787757533465, val_acc = 0.5930232558139535\n",
  1305. "\n",
  1306. "epoch: 30\n",
  1307. "epoch 30: train loss = 0.17986376305961416, l1loss = 0.0943629225155138, train acc = 0.93202416918429,\n",
  1308. "val_loss = 3.399730042824269, val_acc = 0.5813953488372093\n",
  1309. "\n",
  1310. "epoch: 31\n",
  1311. "epoch 31: train loss = 0.1377756273278297, l1loss = 0.09381341631829078, train acc = 0.9587109768378651,\n",
  1312. "val_loss = 1.4636259065117947, val_acc = 0.5930232558139535\n",
  1313. "\n",
  1314. "epoch: 32\n",
  1315. "epoch 32: train loss = 0.15627087596409991, l1loss = 0.09361198741083539, train acc = 0.9370594159113796,\n",
  1316. "val_loss = 2.010465736427144, val_acc = 0.5930232558139535\n",
  1317. "\n",
  1318. "epoch: 33\n",
  1319. "epoch 33: train loss = 0.21454838335454043, l1loss = 0.09376692507052109, train acc = 0.904833836858006,\n",
  1320. "val_loss = 3.555855639821258, val_acc = 0.5813953488372093\n",
  1321. "\n",
  1322. "epoch: 34\n",
  1323. "epoch 34: train loss = 0.1433841212908667, l1loss = 0.09360743821237502, train acc = 0.9516616314199395,\n",
  1324. "val_loss = 2.7130774370459623, val_acc = 0.6201550387596899\n",
  1325. "\n",
  1326. "epoch: 35\n",
  1327. "validation acc increased (0.612403 ---> 0.620155)\n",
  1328. "epoch 35: train loss = 0.09808508254970308, l1loss = 0.09324432826954553, train acc = 0.9712990936555891,\n",
  1329. "val_loss = 2.107295219288316, val_acc = 0.5968992248062015\n",
  1330. "\n",
  1331. "epoch: 36\n",
  1332. "epoch 36: train loss = 0.09397425880695037, l1loss = 0.09265048068095551, train acc = 0.9707955689828801,\n",
  1333. "val_loss = 2.3170659689940223, val_acc = 0.6124031007751938\n",
  1334. "\n",
  1335. "epoch: 37\n",
  1336. "epoch 37: train loss = 0.07624247869756166, l1loss = 0.09214130119764793, train acc = 0.9788519637462235,\n",
  1337. "val_loss = 2.4360666565015885, val_acc = 0.6007751937984496\n",
  1338. "\n",
  1339. "epoch: 38\n",
  1340. "epoch 38: train loss = 0.08142898870258773, l1loss = 0.09167566879728409, train acc = 0.972306143001007,\n",
  1341. "val_loss = 3.0442896406779916, val_acc = 0.5775193798449613\n",
  1342. "\n",
  1343. "epoch: 39\n",
  1344. "epoch 39: train loss = 0.05564044175476346, l1loss = 0.09119880020858538, train acc = 0.9914400805639476,\n",
  1345. "val_loss = 3.227812499038933, val_acc = 0.6124031007751938\n",
  1346. "\n",
  1347. "epoch: 40\n",
  1348. "validation acc increased (0.620155 ---> 0.627907)\n",
  1349. "epoch 40: train loss = 0.047954427309086674, l1loss = 0.09070139868261831, train acc = 0.9894259818731118,\n",
  1350. "val_loss = 2.6779214356296746, val_acc = 0.6085271317829457\n",
  1351. "\n",
  1352. "epoch: 41\n",
  1353. "epoch 41: train loss = 0.0805706052561661, l1loss = 0.09069800027247522, train acc = 0.972306143001007,\n",
  1354. "val_loss = 3.348146224206732, val_acc = 0.5775193798449613\n",
  1355. "\n",
  1356. "epoch: 42\n",
  1357. "epoch 42: train loss = 0.120132422931905, l1loss = 0.09100891725504387, train acc = 0.9561933534743202,\n",
  1358. "val_loss = 3.8875221688618034, val_acc = 0.5775193798449613\n",
  1359. "\n",
  1360. "epoch: 43\n",
  1361. "validation acc increased (0.627907 ---> 0.627907)\n",
  1362. "epoch 43: train loss = 0.08399603084435035, l1loss = 0.09161909364261416, train acc = 0.972809667673716,\n",
  1363. "val_loss = 3.6390174350073172, val_acc = 0.5813953488372093\n",
  1364. "\n",
  1365. "epoch: 44\n",
  1366. "epoch 44: train loss = 0.08419710318187452, l1loss = 0.09146056805192404, train acc = 0.9788519637462235,\n",
  1367. "val_loss = 3.222931582105252, val_acc = 0.6201550387596899\n",
  1368. "\n",
  1369. "epoch: 45\n",
  1370. "epoch 45: train loss = 0.06798528590261156, l1loss = 0.09109198656176992, train acc = 0.9783484390735147,\n",
  1371. "val_loss = 4.210132499073827, val_acc = 0.5891472868217055\n",
  1372. "\n",
  1373. "epoch: 46\n",
  1374. "epoch 46: train loss = 0.04261230672114085, l1loss = 0.09061521228706729, train acc = 0.9924471299093656,\n",
  1375. "val_loss = 3.675780127214831, val_acc = 0.5852713178294574\n",
  1376. "\n",
  1377. "epoch: 47\n",
  1378. "epoch 47: train loss = 0.030354493255431558, l1loss = 0.09003987193317692, train acc = 0.9969788519637462,\n",
  1379. "val_loss = 3.3861651013987935, val_acc = 0.6201550387596899\n",
  1380. "\n",
  1381. "epoch: 48\n",
  1382. "epoch 48: train loss = 0.02647563922973978, l1loss = 0.08927071901846749, train acc = 0.9974823766364552,\n",
  1383. "val_loss = 3.631073840828829, val_acc = 0.5968992248062015\n",
  1384. "\n",
  1385. "epoch: 49\n",
  1386. "epoch 49: train loss = 0.021390184417764645, l1loss = 0.08851830925406046, train acc = 0.9984894259818731,\n",
  1387. "val_loss = 3.7224400486623823, val_acc = 0.6007751937984496\n",
  1388. "\n",
  1389. "epoch: 50\n",
  1390. "epoch 50: train loss = 0.019483359864456175, l1loss = 0.08783999447826173, train acc = 0.998992950654582,\n",
  1391. "val_loss = 3.5347149741742037, val_acc = 0.6201550387596899\n",
  1392. "\n",
  1393. "epoch: 51\n",
  1394. "epoch 51: train loss = 0.01563782924795589, l1loss = 0.08718703778728858, train acc = 0.998992950654582,\n",
  1395. "val_loss = 3.792635572049045, val_acc = 0.6085271317829457\n",
  1396. "\n",
  1397. "epoch: 52\n",
  1398. "epoch 52: train loss = 0.015153491843307967, l1loss = 0.0865459504189328, train acc = 0.999496475327291,\n",
  1399. "val_loss = 3.765355708987214, val_acc = 0.6162790697674418\n",
  1400. "\n",
  1401. "epoch: 53\n",
  1402. "epoch 53: train loss = 0.014703800348416797, l1loss = 0.08596928344152245, train acc = 1.0,\n",
  1403. "val_loss = 3.4551806544610697, val_acc = 0.6085271317829457\n",
  1404. "\n",
  1405. "!!! overfitted !!!\n",
  1406. "[1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0]\n",
  1407. "[1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 1]\n",
  1408. "early stoping results:\n",
  1409. "\t [0.6041666666666666, 0.5572916666666666]\n",
  1410. "output = [1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
  1411. "label = tensor([0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1,\n",
  1412. " 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0,\n",
  1413. " 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0,\n",
  1414. " 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0,\n",
  1415. " 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1,\n",
  1416. " 0, 0, 1, 1, 1, 0, 1, 1])\n",
  1417. "output = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
  1418. "label = tensor([1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1,\n",
  1419. " 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0,\n",
  1420. " 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1,\n",
  1421. " 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1,\n",
  1422. " 0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0,\n",
  1423. " 0, 0, 1, 1, 0, 0, 1, 1])\n",
  1424. "output = [0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
  1425. "label = tensor([0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1,\n",
  1426. " 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0,\n",
  1427. " 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1,\n",
  1428. " 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1,\n",
  1429. " 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1,\n",
  1430. " 1, 1, 1, 0, 1, 1, 1, 1])\n",
  1431. "output = [1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0]\n",
  1432. "label = tensor([1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0,\n",
  1433. " 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0,\n",
  1434. " 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1,\n",
  1435. " 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0,\n",
  1436. " 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0,\n",
  1437. " 1, 0, 0, 0, 1, 1, 0, 0])\n",
  1438. "output = [1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
  1439. "label = tensor([0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0,\n",
  1440. " 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1,\n",
  1441. " 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1,\n",
  1442. " 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1,\n",
  1443. " 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0,\n",
  1444. " 1, 0, 1, 1, 0, 1, 0, 1])\n",
  1445. "output = [1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
  1446. "label = tensor([1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1,\n",
  1447. " 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0,\n",
  1448. " 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1,\n",
  1449. " 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0,\n",
  1450. " 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0,\n",
  1451. " 0, 0, 1, 1, 1, 1, 0, 0])\n",
  1452. "output = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
  1453. "label = tensor([1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1,\n",
  1454. " 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0,\n",
  1455. " 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0,\n",
  1456. " 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1,\n",
  1457. " 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1,\n",
  1458. " 1, 0, 0, 0, 0, 1, 1, 1])\n",
  1459. "output = [1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
  1460. "label = tensor([0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1,\n",
  1461. " 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0,\n",
  1462. " 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0,\n",
  1463. " 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0,\n",
  1464. " 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1,\n",
  1465. " 1, 1, 1, 0, 1, 1, 1, 1])\n",
  1466. "output = [1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
  1467. "label = tensor([1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0,\n",
  1468. " 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0,\n",
  1469. " 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1,\n",
  1470. " 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1,\n",
  1471. " 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0,\n",
  1472. " 0, 0, 1, 1, 0, 1, 0, 1])\n",
  1473. "output = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
  1474. "label = tensor([1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1,\n",
  1475. " 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1,\n",
  1476. " 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1,\n",
  1477. " 0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1,\n",
  1478. " 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0,\n",
  1479. " 0, 1, 0, 1, 1, 1, 1, 0])\n",
  1480. "output = [1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0]\n",
  1481. "label = tensor([1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0,\n",
  1482. " 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0,\n",
  1483. " 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1,\n",
  1484. " 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0,\n",
  1485. " 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n",
  1486. " 0, 0, 1, 1, 1, 0, 0, 0])\n",
  1487. "output = [1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
  1488. "label = tensor([0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1,\n",
  1489. " 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1,\n",
  1490. " 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0,\n",
  1491. " 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0,\n",
  1492. " 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0,\n",
  1493. " 0, 0, 1, 1, 1, 1, 1, 0])\n",
  1494. "output = [0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0]\n",
  1495. "label = tensor([0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1,\n",
  1496. " 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0,\n",
  1497. " 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1,\n",
  1498. " 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1,\n",
  1499. " 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0,\n",
  1500. " 1, 0, 1, 1, 1, 1, 0, 0])\n"
  1501. ]
  1502. },
  1503. {
  1504. "name": "stdout",
  1505. "output_type": "stream",
  1506. "text": [
  1507. "output = [0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0]\n",
  1508. "label = tensor([0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0,\n",
  1509. " 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0,\n",
  1510. " 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0,\n",
  1511. " 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1,\n",
  1512. " 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0,\n",
  1513. " 1, 1, 1, 1, 0, 1, 1, 0])\n",
  1514. "output = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
  1515. "label = tensor([0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1,\n",
  1516. " 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1,\n",
  1517. " 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1,\n",
  1518. " 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0,\n",
  1519. " 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1,\n",
  1520. " 0, 1, 0, 1, 1, 1, 0, 1])\n",
  1521. "output = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0]\n",
  1522. "label = tensor([1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1,\n",
  1523. " 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0,\n",
  1524. " 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1])\n",
  1525. "\t [0.6673376950176145, 0.6722054380664653]\n",
  1526. "[1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0]\n",
  1527. "[0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 0, 1]\n",
  1528. "full train results:\n",
  1529. "\t [0.59375, 0.5885416666666666]\n",
  1530. "\t [0.9949672873678913, 0.9974823766364552]\n",
  1531. "[1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1]\n",
  1532. "[1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1]\n",
  1533. "best accs results:\n",
  1534. "\t [0.59375, 0.5625]\n",
  1535. "\t [0.8037242073477604, 0.8670694864048338]\n",
  1536. "[0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1]\n",
  1537. "-----------------------------Fold 3---------------\n",
  1538. "preparing dataloaders...\n",
  1539. "torch.Size([66, 7, 9, 20])\n",
  1540. "coef when 0 > 1 1\n",
  1541. "creating model...\n",
  1542. "calculating total steps...\n",
  1543. "epoch: 1\n",
  1544. "validation loss decreased (inf ---> 0.694551), val_acc = 0.3643410852713178\n",
  1545. "validation acc increased (0.000000 ---> 0.364341)\n",
  1546. "validation acc increased (0.364341 ---> 0.364341)\n",
  1547. "epoch 1: train loss = 0.6969524957183608, l1loss = 0.1381219726307948, train acc = 0.5654135338345865,\n",
  1548. "val_loss = 0.7013817683670872, val_acc = 0.3643410852713178\n",
  1549. "\n",
  1550. "epoch: 2\n",
  1551. "validation acc increased (0.364341 ---> 0.364341)\n",
  1552. "validation acc increased (0.364341 ---> 0.364341)\n",
  1553. "epoch 2: train loss = 0.6680379967940481, l1loss = 0.1377297131937548, train acc = 0.6476190476190476,\n",
  1554. "val_loss = 0.7116472231325253, val_acc = 0.3643410852713178\n",
  1555. "\n",
  1556. "epoch: 3\n",
  1557. "validation acc increased (0.364341 ---> 0.364341)\n",
  1558. "validation acc increased (0.364341 ---> 0.395349)\n",
  1559. "epoch 3: train loss = 0.6447332068194721, l1loss = 0.1372586890644298, train acc = 0.656641604010025,\n",
  1560. "val_loss = 0.7110485115716624, val_acc = 0.42248062015503873\n",
  1561. "\n",
  1562. "epoch: 4\n",
  1563. "validation acc increased (0.395349 ---> 0.426357)\n",
  1564. "validation acc increased (0.426357 ---> 0.480620)\n",
  1565. "epoch 4: train loss = 0.6260023004130313, l1loss = 0.1366433581687454, train acc = 0.6551378446115288,\n",
  1566. "val_loss = 0.6881170402201571, val_acc = 0.4883720930232558\n",
  1567. "\n",
  1568. "epoch: 5\n",
  1569. "validation loss decreased (0.694551 ---> 0.686318), val_acc = 0.49224806201550386\n",
  1570. "validation acc increased (0.480620 ---> 0.492248)\n",
  1571. "validation loss decreased (0.686318 ---> 0.665753), val_acc = 0.49612403100775193\n",
  1572. "validation acc increased (0.492248 ---> 0.496124)\n",
  1573. "epoch 5: train loss = 0.6107026895783599, l1loss = 0.13582232229990468, train acc = 0.6636591478696742,\n",
  1574. "val_loss = 0.6599389855251756, val_acc = 0.5\n",
  1575. "\n",
  1576. "epoch: 6\n",
  1577. "validation loss decreased (0.665753 ---> 0.659603), val_acc = 0.49612403100775193\n",
  1578. "validation acc increased (0.496124 ---> 0.496124)\n",
  1579. "validation loss decreased (0.659603 ---> 0.657801), val_acc = 0.49612403100775193\n",
  1580. "validation acc increased (0.496124 ---> 0.496124)\n",
  1581. "epoch 6: train loss = 0.5962554595524208, l1loss = 0.13474145999229642, train acc = 0.6731829573934837,\n",
  1582. "val_loss = 0.6608283524365388, val_acc = 0.4883720930232558\n",
  1583. "\n",
  1584. "epoch: 7\n",
  1585. "epoch 7: train loss = 0.5844666071702962, l1loss = 0.13333625524563897, train acc = 0.6807017543859649,\n",
  1586. "val_loss = 0.6693935172502384, val_acc = 0.5\n",
  1587. "\n",
  1588. "epoch: 8\n",
  1589. "validation acc increased (0.496124 ---> 0.500000)\n",
  1590. "validation acc increased (0.500000 ---> 0.500000)\n",
  1591. "epoch 8: train loss = 0.5756398552342465, l1loss = 0.13154963165297545, train acc = 0.6832080200501253,\n",
  1592. "val_loss = 0.6796527236931084, val_acc = 0.49612403100775193\n",
  1593. "\n",
  1594. "epoch: 9\n",
  1595. "epoch 9: train loss = 0.5669913336149135, l1loss = 0.12937707920421035, train acc = 0.6907268170426065,\n",
  1596. "val_loss = 0.691935848820117, val_acc = 0.49612403100775193\n",
  1597. "\n",
  1598. "epoch: 10\n",
  1599. "epoch 10: train loss = 0.5586488504756364, l1loss = 0.1268213682380834, train acc = 0.6952380952380952,\n",
  1600. "val_loss = 0.7135535478591919, val_acc = 0.49612403100775193\n",
  1601. "\n",
  1602. "epoch: 11\n",
  1603. "validation acc increased (0.500000 ---> 0.500000)\n",
  1604. "epoch 11: train loss = 0.547169718527256, l1loss = 0.12398374975474556, train acc = 0.7007518796992481,\n",
  1605. "val_loss = 0.762653996778089, val_acc = 0.5\n",
  1606. "\n",
  1607. "epoch: 12\n",
  1608. "validation acc increased (0.500000 ---> 0.500000)\n",
  1609. "epoch 12: train loss = 0.5339804249001028, l1loss = 0.12088241919613721, train acc = 0.7132832080200501,\n",
  1610. "val_loss = 0.748571188874947, val_acc = 0.49224806201550386\n",
  1611. "\n",
  1612. "epoch: 13\n"
  1613. ]
  1614. },
  1615. {
  1616. "name": "stdout",
  1617. "output_type": "stream",
  1618. "text": [
  1619. "epoch 13: train loss = 0.520279674302964, l1loss = 0.11765421265572833, train acc = 0.7127819548872181,\n",
  1620. "val_loss = 0.8685980189678281, val_acc = 0.49224806201550386\n",
  1621. "\n",
  1622. "epoch: 14\n",
  1623. "validation acc increased (0.500000 ---> 0.500000)\n",
  1624. "epoch 14: train loss = 0.5055005631948772, l1loss = 0.11437528439631738, train acc = 0.7298245614035088,\n",
  1625. "val_loss = 1.0051480910574744, val_acc = 0.5\n",
  1626. "\n",
  1627. "epoch: 15\n",
  1628. "validation acc increased (0.500000 ---> 0.500000)\n",
  1629. "validation acc increased (0.500000 ---> 0.500000)\n",
  1630. "epoch 15: train loss = 0.48474925627983306, l1loss = 0.11120888095601161, train acc = 0.7518796992481203,\n",
  1631. "val_loss = 0.7568337228871131, val_acc = 0.5155038759689923\n",
  1632. "\n",
  1633. "epoch: 16\n",
  1634. "validation acc increased (0.500000 ---> 0.500000)\n",
  1635. "validation acc increased (0.500000 ---> 0.635659)\n",
  1636. "epoch 16: train loss = 0.47231552145236116, l1loss = 0.10832016670091409, train acc = 0.7543859649122807,\n",
  1637. "val_loss = 0.8228741385215936, val_acc = 0.5\n",
  1638. "\n",
  1639. "epoch: 17\n",
  1640. "epoch 17: train loss = 0.4447837758333163, l1loss = 0.10566314655661882, train acc = 0.7729323308270677,\n",
  1641. "val_loss = 0.7647813073424405, val_acc = 0.5\n",
  1642. "\n",
  1643. "epoch: 18\n",
  1644. "epoch 18: train loss = 0.4166216857750015, l1loss = 0.10333552387423982, train acc = 0.793984962406015,\n",
  1645. "val_loss = 1.1814187527164932, val_acc = 0.49612403100775193\n",
  1646. "\n",
  1647. "epoch: 19\n",
  1648. "epoch 19: train loss = 0.4013006369571638, l1loss = 0.1015192489128065, train acc = 0.7984962406015037,\n",
  1649. "val_loss = 1.049088865287544, val_acc = 0.5077519379844961\n",
  1650. "\n",
  1651. "epoch: 20\n",
  1652. "epoch 20: train loss = 0.38358660407532424, l1loss = 0.10005261027872712, train acc = 0.8030075187969925,\n",
  1653. "val_loss = 0.9182334223458933, val_acc = 0.6085271317829457\n",
  1654. "\n",
  1655. "epoch: 21\n",
  1656. "epoch 21: train loss = 0.3649053702826488, l1loss = 0.09899659350626451, train acc = 0.8180451127819549,\n",
  1657. "val_loss = 1.0533229842666507, val_acc = 0.5232558139534884\n",
  1658. "\n",
  1659. "epoch: 22\n",
  1660. "epoch 22: train loss = 0.3366708062645188, l1loss = 0.09801618972219023, train acc = 0.837593984962406,\n",
  1661. "val_loss = 1.0070983062880907, val_acc = 0.5542635658914729\n",
  1662. "\n",
  1663. "epoch: 23\n",
  1664. "epoch 23: train loss = 0.30011755532788154, l1loss = 0.097101370052885, train acc = 0.8736842105263158,\n",
  1665. "val_loss = 1.9908723738766456, val_acc = 0.5\n",
  1666. "\n",
  1667. "epoch: 24\n",
  1668. "epoch 24: train loss = 0.2893361300005949, l1loss = 0.09641273515789132, train acc = 0.8671679197994987,\n",
  1669. "val_loss = 4.135273530501728, val_acc = 0.46124031007751937\n",
  1670. "\n",
  1671. "epoch: 25\n",
  1672. "epoch 25: train loss = 0.2674656217408957, l1loss = 0.09561551359288376, train acc = 0.87468671679198,\n",
  1673. "val_loss = 3.555174014365026, val_acc = 0.4689922480620155\n",
  1674. "\n",
  1675. "epoch: 26\n",
  1676. "epoch 26: train loss = 0.25737038730529316, l1loss = 0.09514911164690677, train acc = 0.8927318295739348,\n",
  1677. "val_loss = 1.4680318331071573, val_acc = 0.6007751937984496\n",
  1678. "\n",
  1679. "epoch: 27\n",
  1680. "epoch 27: train loss = 0.19562739423641884, l1loss = 0.09470970924560887, train acc = 0.9263157894736842,\n",
  1681. "val_loss = 1.4795843288879986, val_acc = 0.5697674418604651\n",
  1682. "\n",
  1683. "epoch: 28\n",
  1684. "epoch 28: train loss = 0.21110729103100329, l1loss = 0.09441207906506713, train acc = 0.9132832080200501,\n",
  1685. "val_loss = 2.8698120837987857, val_acc = 0.5465116279069767\n",
  1686. "\n",
  1687. "epoch: 29\n",
  1688. "epoch 29: train loss = 0.2995036028082807, l1loss = 0.09422946139087056, train acc = 0.8756892230576441,\n",
  1689. "val_loss = 1.910594179854519, val_acc = 0.562015503875969\n",
  1690. "\n",
  1691. "epoch: 30\n",
  1692. "epoch 30: train loss = 0.29151671891821956, l1loss = 0.09452762886097557, train acc = 0.8696741854636592,\n",
  1693. "val_loss = 2.842216599819272, val_acc = 0.5193798449612403\n",
  1694. "\n",
  1695. "epoch: 31\n",
  1696. "epoch 31: train loss = 0.24369466431756368, l1loss = 0.09408135806632818, train acc = 0.8897243107769424,\n",
  1697. "val_loss = 1.2342111413302117, val_acc = 0.5775193798449613\n",
  1698. "\n",
  1699. "epoch: 32\n",
  1700. "epoch 32: train loss = 0.177748836042887, l1loss = 0.09305703179131175, train acc = 0.9273182957393483,\n",
  1701. "val_loss = 1.935332564247209, val_acc = 0.5155038759689923\n",
  1702. "\n",
  1703. "epoch: 33\n",
  1704. "epoch 33: train loss = 0.1304508766285459, l1loss = 0.09235037882674607, train acc = 0.9604010025062657,\n",
  1705. "val_loss = 1.6885591052299322, val_acc = 0.5581395348837209\n",
  1706. "\n",
  1707. "epoch: 34\n",
  1708. "epoch 34: train loss = 0.1040880726020139, l1loss = 0.09180786986846971, train acc = 0.968421052631579,\n",
  1709. "val_loss = 1.8644648585901704, val_acc = 0.5581395348837209\n",
  1710. "\n",
  1711. "epoch: 35\n",
  1712. "epoch 35: train loss = 0.11854673838406278, l1loss = 0.09142765101036034, train acc = 0.9508771929824561,\n",
  1713. "val_loss = 4.638857999517503, val_acc = 0.49612403100775193\n",
  1714. "\n",
  1715. "epoch: 36\n",
  1716. "epoch 36: train loss = 0.10307383279789958, l1loss = 0.09096615721557971, train acc = 0.9664160401002506,\n",
  1717. "val_loss = 2.192981764212135, val_acc = 0.5232558139534884\n",
  1718. "\n",
  1719. "epoch: 37\n",
  1720. "epoch 37: train loss = 0.09253215106731669, l1loss = 0.09045285032805345, train acc = 0.9654135338345865,\n",
  1721. "val_loss = 1.9693481386169907, val_acc = 0.5736434108527132\n",
  1722. "\n",
  1723. "epoch: 38\n",
  1724. "epoch 38: train loss = 0.08359189911519077, l1loss = 0.09000530017721922, train acc = 0.9764411027568922,\n",
  1725. "val_loss = 2.151853931042575, val_acc = 0.5426356589147286\n",
  1726. "\n",
  1727. "epoch: 39\n",
  1728. "epoch 39: train loss = 0.06199482042613185, l1loss = 0.08960390809484592, train acc = 0.9824561403508771,\n",
  1729. "val_loss = 2.2035991822102274, val_acc = 0.5891472868217055\n",
  1730. "\n",
  1731. "epoch: 40\n",
  1732. "epoch 40: train loss = 0.07736399561951035, l1loss = 0.08935502176744896, train acc = 0.9709273182957393,\n",
  1733. "val_loss = 2.0890348573872286, val_acc = 0.5736434108527132\n",
  1734. "\n",
  1735. "epoch: 41\n",
  1736. "epoch 41: train loss = 0.07827905084106856, l1loss = 0.08891892515701758, train acc = 0.9724310776942355,\n",
  1737. "val_loss = 4.512386254561964, val_acc = 0.5038759689922481\n",
  1738. "\n",
  1739. "epoch: 42\n",
  1740. "epoch 42: train loss = 0.046724244664635574, l1loss = 0.08847408577762451, train acc = 0.9899749373433584,\n",
  1741. "val_loss = 2.2274649688439774, val_acc = 0.5658914728682171\n",
  1742. "\n",
  1743. "epoch: 43\n",
  1744. "epoch 43: train loss = 0.04698878815747742, l1loss = 0.08795604082874786, train acc = 0.9904761904761905,\n",
  1745. "val_loss = 3.3725712779880492, val_acc = 0.6085271317829457\n",
  1746. "\n",
  1747. "epoch: 44\n",
  1748. "epoch 44: train loss = 0.044069078596388186, l1loss = 0.08755256260621518, train acc = 0.9884711779448622,\n",
  1749. "val_loss = 2.6858827642692154, val_acc = 0.6085271317829457\n",
  1750. "\n",
  1751. "epoch: 45\n",
  1752. "epoch 45: train loss = 0.03322243625134753, l1loss = 0.08706357124679369, train acc = 0.993483709273183,\n",
  1753. "val_loss = 3.174153364906015, val_acc = 0.5232558139534884\n",
  1754. "\n",
  1755. "epoch: 46\n",
  1756. "epoch 46: train loss = 0.0319087140971706, l1loss = 0.08656990248383137, train acc = 0.9949874686716792,\n",
  1757. "val_loss = 3.085727780595306, val_acc = 0.6007751937984496\n",
  1758. "\n",
  1759. "epoch: 47\n",
  1760. "epoch 47: train loss = 0.025276731471892886, l1loss = 0.0862220882547828, train acc = 0.9979949874686717,\n",
  1761. "val_loss = 2.4670777454856756, val_acc = 0.562015503875969\n",
  1762. "\n",
  1763. "epoch: 48\n",
  1764. "epoch 48: train loss = 0.03616303299976172, l1loss = 0.08582206025234142, train acc = 0.9909774436090225,\n",
  1765. "val_loss = 2.59104063915623, val_acc = 0.5891472868217055\n",
  1766. "\n",
  1767. "epoch: 49\n",
  1768. "epoch 49: train loss = 0.022464091186983545, l1loss = 0.08538708544165867, train acc = 0.9984962406015038,\n",
  1769. "val_loss = 2.7770460093206215, val_acc = 0.5930232558139535\n",
  1770. "\n",
  1771. "epoch: 50\n",
  1772. "epoch 50: train loss = 0.01979252443389785, l1loss = 0.08483099960593651, train acc = 0.9984962406015038,\n",
  1773. "val_loss = 2.9248241084490636, val_acc = 0.5387596899224806\n",
  1774. "\n",
  1775. "epoch: 51\n",
  1776. "epoch 51: train loss = 0.015581658314493365, l1loss = 0.08423851138368287, train acc = 0.9994987468671679,\n",
  1777. "val_loss = 2.912791069625884, val_acc = 0.5387596899224806\n",
  1778. "\n",
  1779. "epoch: 52\n",
  1780. "epoch 52: train loss = 0.014949842383688254, l1loss = 0.08364893044146679, train acc = 1.0,\n",
  1781. "val_loss = 2.928498700261116, val_acc = 0.5891472868217055\n",
  1782. "\n",
  1783. "!!! overfitted !!!\n",
  1784. "[1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1]\n",
  1785. "[0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1]\n",
  1786. "early stoping results:\n",
  1787. "\t [0.6041666666666666, 0.5572916666666666, 0.5208333333333334]\n",
  1788. "output = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0]\n",
  1789. "label = tensor([0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0,\n",
  1790. " 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0,\n",
  1791. " 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1,\n",
  1792. " 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0,\n",
  1793. " 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1,\n",
  1794. " 1, 1, 0, 1, 1, 0, 0, 0])\n",
  1795. "output = [1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
  1796. "label = tensor([1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1,\n",
  1797. " 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0,\n",
  1798. " 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0,\n",
  1799. " 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1,\n",
  1800. " 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0,\n",
  1801. " 1, 1, 0, 1, 1, 1, 1, 0])\n",
  1802. "output = [1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
  1803. "label = tensor([0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1,\n",
  1804. " 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1,\n",
  1805. " 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1,\n",
  1806. " 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1,\n",
  1807. " 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0,\n",
  1808. " 1, 1, 1, 1, 1, 1, 0, 1])\n",
  1809. "output = [1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0]\n",
  1810. "label = tensor([0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1,\n",
  1811. " 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1,\n",
  1812. " 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1,\n",
  1813. " 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0,\n",
  1814. " 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0,\n",
  1815. " 0, 1, 1, 1, 0, 1, 1, 0])\n",
  1816. "output = [1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
  1817. "label = tensor([1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1,\n",
  1818. " 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1,\n",
  1819. " 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1,\n",
  1820. " 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0,\n",
  1821. " 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0,\n",
  1822. " 0, 1, 0, 1, 1, 1, 0, 1])\n",
  1823. "output = [0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0]\n",
  1824. "label = tensor([0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 1,\n",
  1825. " 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1,\n",
  1826. " 1, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0,\n",
  1827. " 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0,\n",
  1828. " 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0,\n",
  1829. " 0, 1, 1, 0, 1, 1, 1, 0])\n",
  1830. "output = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0]\n",
  1831. "label = tensor([0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1,\n",
  1832. " 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0,\n",
  1833. " 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0,\n",
  1834. " 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0,\n",
  1835. " 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0,\n",
  1836. " 0, 0, 0, 1, 0, 1, 0, 1])\n",
  1837. "output = [0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
  1838. "label = tensor([0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1,\n",
  1839. " 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1,\n",
  1840. " 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1,\n",
  1841. " 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n",
  1842. " 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1,\n",
  1843. " 0, 0, 0, 0, 1, 1, 1, 0])\n",
  1844. "output = [1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0]\n",
  1845. "label = tensor([0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0,\n",
  1846. " 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1,\n",
  1847. " 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1,\n",
  1848. " 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0,\n",
  1849. " 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0,\n",
  1850. " 0, 1, 0, 0, 0, 0, 1, 1])\n",
  1851. "output = [0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0]\n",
  1852. "label = tensor([0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1,\n",
  1853. " 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0,\n",
  1854. " 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1,\n",
  1855. " 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0,\n",
  1856. " 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1,\n",
  1857. " 0, 1, 0, 0, 0, 0, 0, 1])\n",
  1858. "output = [1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
  1859. "label = tensor([1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1,\n",
  1860. " 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1,\n",
  1861. " 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0,\n",
  1862. " 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1,\n",
  1863. " 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0,\n",
  1864. " 0, 0, 0, 1, 0, 1, 1, 0])\n",
  1865. "output = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0]\n",
  1866. "label = tensor([0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1,\n",
  1867. " 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1,\n",
  1868. " 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1,\n",
  1869. " 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1,\n",
  1870. " 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0,\n",
  1871. " 0, 0, 1, 0, 0, 1, 0, 1])\n",
  1872. "output = [1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0]\n",
  1873. "label = tensor([1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0,\n",
  1874. " 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1,\n",
  1875. " 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0,\n",
  1876. " 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0,\n",
  1877. " 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0,\n",
  1878. " 1, 1, 0, 0, 0, 1, 0, 0])\n",
  1879. "output = [1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0]\n",
  1880. "label = tensor([0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0,\n",
  1881. " 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1,\n",
  1882. " 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1,\n",
  1883. " 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1,\n",
  1884. " 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0,\n",
  1885. " 1, 0, 0, 0, 1, 1, 1, 0])\n",
  1886. "output = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
  1887. "label = tensor([0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1,\n",
  1888. " 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1,\n",
  1889. " 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0,\n",
  1890. " 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0,\n",
  1891. " 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1,\n",
  1892. " 1, 1, 0, 1, 1, 1, 1, 0])\n",
  1893. "output = [1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0]\n",
  1894. "label = tensor([1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1,\n",
  1895. " 1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1,\n",
  1896. " 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1,\n",
  1897. " 0, 1, 0])\n",
  1898. "\t [0.6673376950176145, 0.6722054380664653, 0.6802005012531328]\n"
  1899. ]
  1900. },
  1901. {
  1902. "name": "stdout",
  1903. "output_type": "stream",
  1904. "text": [
  1905. "[1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0]\n",
  1906. "[1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0]\n",
  1907. "full train results:\n",
  1908. "\t [0.59375, 0.5885416666666666, 0.6354166666666666]\n",
  1909. "\t [0.9949672873678913, 0.9974823766364552, 0.8666666666666667]\n",
  1910. "[0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1]\n",
  1911. "[1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1]\n",
  1912. "best accs results:\n",
  1913. "\t [0.59375, 0.5625, 0.6354166666666666]\n",
  1914. "\t [0.8037242073477604, 0.8670694864048338, 0.4967418546365915]\n",
  1915. "[1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1]\n",
  1916. "-----------------------------Fold 4---------------\n",
  1917. "preparing dataloaders...\n",
  1918. "torch.Size([75, 7, 9, 20])\n",
  1919. "coef when 0 > 1 1\n",
  1920. "creating model...\n",
  1921. "calculating total steps...\n",
  1922. "epoch: 1\n",
  1923. "validation loss decreased (inf ---> 0.683370), val_acc = 0.6666666666666666\n",
  1924. "validation acc increased (0.000000 ---> 0.666667)\n",
  1925. "epoch 1: train loss = 0.6846315076727115, l1loss = 0.13866792857171056, train acc = 0.5518962075848304,\n",
  1926. "val_loss = 0.7129394467486891, val_acc = 0.3333333333333333\n",
  1927. "\n",
  1928. "epoch: 2\n",
  1929. "epoch 2: train loss = 0.6627097059628683, l1loss = 0.13824024070760685, train acc = 0.6147704590818364,\n",
  1930. "val_loss = 0.7328977205956629, val_acc = 0.34108527131782945\n",
  1931. "\n",
  1932. "epoch: 3\n",
  1933. "epoch 3: train loss = 0.6461570380690569, l1loss = 0.13772706633913304, train acc = 0.6382235528942116,\n",
  1934. "val_loss = 0.7231047513873078, val_acc = 0.46124031007751937\n",
  1935. "\n",
  1936. "epoch: 4\n",
  1937. "epoch 4: train loss = 0.6311211969086272, l1loss = 0.13707065026084345, train acc = 0.6482035928143712,\n",
  1938. "val_loss = 0.6812371556148973, val_acc = 0.5\n",
  1939. "\n",
  1940. "epoch: 5\n",
  1941. "validation loss decreased (0.683370 ---> 0.677676), val_acc = 0.5116279069767442\n",
  1942. "validation loss decreased (0.677676 ---> 0.642738), val_acc = 0.5271317829457365\n",
  1943. "epoch 5: train loss = 0.6152418267703104, l1loss = 0.1361936211526513, train acc = 0.6596806387225549,\n",
  1944. "val_loss = 0.6299803982409395, val_acc = 0.5348837209302325\n",
  1945. "\n",
  1946. "epoch: 6\n",
  1947. "validation loss decreased (0.642738 ---> 0.628161), val_acc = 0.5310077519379846\n",
  1948. "validation loss decreased (0.628161 ---> 0.618165), val_acc = 0.5271317829457365\n",
  1949. "epoch 6: train loss = 0.6014872402726057, l1loss = 0.1350108287529555, train acc = 0.6641716566866267,\n",
  1950. "val_loss = 0.6179831277492435, val_acc = 0.5271317829457365\n",
  1951. "\n",
  1952. "epoch: 7\n",
  1953. "validation loss decreased (0.618165 ---> 0.617216), val_acc = 0.5271317829457365\n",
  1954. "validation loss decreased (0.617216 ---> 0.614287), val_acc = 0.5193798449612403\n",
  1955. "epoch 7: train loss = 0.5905261646487756, l1loss = 0.1334880442319516, train acc = 0.6711576846307385,\n",
  1956. "val_loss = 0.6132445769716602, val_acc = 0.5193798449612403\n",
  1957. "\n",
  1958. "epoch: 8\n",
  1959. "validation loss decreased (0.614287 ---> 0.612300), val_acc = 0.5193798449612403\n",
  1960. "validation loss decreased (0.612300 ---> 0.611189), val_acc = 0.5271317829457365\n",
  1961. "epoch 8: train loss = 0.5812796850404339, l1loss = 0.13158048356245616, train acc = 0.6781437125748503,\n",
  1962. "val_loss = 0.6176805500836335, val_acc = 0.5310077519379846\n",
  1963. "\n",
  1964. "epoch: 9\n",
  1965. "epoch 9: train loss = 0.5716513974699907, l1loss = 0.12926983946335768, train acc = 0.687125748502994,\n",
  1966. "val_loss = 0.6314459124276804, val_acc = 0.5310077519379846\n",
  1967. "\n",
  1968. "epoch: 10\n",
  1969. "epoch 10: train loss = 0.5630688287778767, l1loss = 0.12655608326256157, train acc = 0.6976047904191617,\n",
  1970. "val_loss = 0.6434678920479708, val_acc = 0.5310077519379846\n",
  1971. "\n",
  1972. "epoch: 11\n",
  1973. "epoch 11: train loss = 0.5509181600844789, l1loss = 0.12356667217618215, train acc = 0.7065868263473054,\n",
  1974. "val_loss = 0.6670152029787847, val_acc = 0.5271317829457365\n",
  1975. "\n",
  1976. "epoch: 12\n",
  1977. "epoch 12: train loss = 0.5384525743549218, l1loss = 0.12038559576648913, train acc = 0.7180638722554891,\n",
  1978. "val_loss = 0.7214980153150337, val_acc = 0.5387596899224806\n",
  1979. "\n",
  1980. "epoch: 13\n",
  1981. "epoch 13: train loss = 0.5237833831957477, l1loss = 0.11706482975961205, train acc = 0.7250499001996008,\n",
  1982. "val_loss = 0.642303595550416, val_acc = 0.5348837209302325\n",
  1983. "\n",
  1984. "epoch: 14\n",
  1985. "epoch 14: train loss = 0.49995520455156733, l1loss = 0.11376702059766727, train acc = 0.7380239520958084,\n",
  1986. "val_loss = 0.7001118160957513, val_acc = 0.5387596899224806\n",
  1987. "\n",
  1988. "epoch: 15\n",
  1989. "epoch 15: train loss = 0.4798619277225045, l1loss = 0.11074621205082434, train acc = 0.751996007984032,\n",
  1990. "val_loss = 0.6432332724563835, val_acc = 0.5465116279069767\n",
  1991. "\n",
  1992. "epoch: 16\n",
  1993. "epoch 16: train loss = 0.450496327377365, l1loss = 0.10813732176067349, train acc = 0.7639720558882236,\n",
  1994. "val_loss = 0.6254766467929811, val_acc = 0.5465116279069767\n",
  1995. "\n",
  1996. "epoch: 17\n",
  1997. "epoch 17: train loss = 0.42610586504498404, l1loss = 0.1061172228076025, train acc = 0.7869261477045908,\n",
  1998. "val_loss = 0.8937256091324858, val_acc = 0.6666666666666666\n",
  1999. "\n",
  2000. "epoch: 18\n",
  2001. "validation acc increased (0.666667 ---> 0.666667)\n",
  2002. "validation acc increased (0.666667 ---> 0.666667)\n",
  2003. "epoch 18: train loss = 0.4125394836633267, l1loss = 0.10447883528625655, train acc = 0.7999001996007984,\n",
  2004. "val_loss = 1.3210663388865862, val_acc = 0.6666666666666666\n",
  2005. "\n",
  2006. "epoch: 19\n",
  2007. "validation acc increased (0.666667 ---> 0.666667)\n",
  2008. "epoch 19: train loss = 0.36414110577273034, l1loss = 0.10303020968051728, train acc = 0.8313373253493014,\n",
  2009. "val_loss = 1.6127898642006366, val_acc = 0.6666666666666666\n",
  2010. "\n",
  2011. "epoch: 20\n",
  2012. "validation acc increased (0.666667 ---> 0.666667)\n",
  2013. "validation acc increased (0.666667 ---> 0.666667)\n",
  2014. "epoch 20: train loss = 0.3496626854061842, l1loss = 0.10169063459434909, train acc = 0.8383233532934131,\n",
  2015. "val_loss = 1.0074581005141254, val_acc = 0.6550387596899225\n",
  2016. "\n",
  2017. "epoch: 21\n",
  2018. "epoch 21: train loss = 0.32243358922337817, l1loss = 0.10068233941724439, train acc = 0.8502994011976048,\n",
  2019. "val_loss = 0.9281984200311262, val_acc = 0.5503875968992248\n",
  2020. "\n",
  2021. "epoch: 22\n",
  2022. "epoch 22: train loss = 0.31491396836415975, l1loss = 0.09971560896216276, train acc = 0.8507984031936128,\n",
  2023. "val_loss = 1.5095452051754146, val_acc = 0.6589147286821705\n",
  2024. "\n",
  2025. "epoch: 23\n",
  2026. "epoch 23: train loss = 0.29515863696258227, l1loss = 0.09860645926105761, train acc = 0.874251497005988,\n",
  2027. "val_loss = 0.887832756074824, val_acc = 0.5775193798449613\n",
  2028. "\n",
  2029. "epoch: 24\n",
  2030. "epoch 24: train loss = 0.24699413461123637, l1loss = 0.09801728431574123, train acc = 0.8917165668662674,\n",
  2031. "val_loss = 1.674672379974247, val_acc = 0.6550387596899225\n",
  2032. "\n",
  2033. "epoch: 25\n",
  2034. "epoch 25: train loss = 0.255139184093285, l1loss = 0.09737496454261259, train acc = 0.8782435129740519,\n",
  2035. "val_loss = 0.9508669986281302, val_acc = 0.5968992248062015\n",
  2036. "\n",
  2037. "epoch: 26\n",
  2038. "epoch 26: train loss = 0.21437893561022486, l1loss = 0.09652904412168228, train acc = 0.9131736526946108,\n",
  2039. "val_loss = 2.068134845689286, val_acc = 0.6511627906976745\n",
  2040. "\n",
  2041. "epoch: 27\n",
  2042. "validation acc increased (0.666667 ---> 0.666667)\n",
  2043. "epoch 27: train loss = 0.23168334174536898, l1loss = 0.09616398560013362, train acc = 0.905688622754491,\n",
  2044. "val_loss = 1.2540726026361302, val_acc = 0.5697674418604651\n",
  2045. "\n",
  2046. "epoch: 28\n"
  2047. ]
  2048. },
  2049. {
  2050. "name": "stdout",
  2051. "output_type": "stream",
  2052. "text": [
  2053. "epoch 28: train loss = 0.19921719834595145, l1loss = 0.09572086222990306, train acc = 0.9171656686626747,\n",
  2054. "val_loss = 1.3161783601886543, val_acc = 0.5697674418604651\n",
  2055. "\n",
  2056. "epoch: 29\n",
  2057. "epoch 29: train loss = 0.17246595051831115, l1loss = 0.09554041276672881, train acc = 0.937624750499002,\n",
  2058. "val_loss = 3.398609378820582, val_acc = 0.6589147286821705\n",
  2059. "\n",
  2060. "epoch: 30\n",
  2061. "epoch 30: train loss = 0.19262273220007053, l1loss = 0.09535936463973717, train acc = 0.9166666666666666,\n",
  2062. "val_loss = 3.561779273572818, val_acc = 0.6550387596899225\n",
  2063. "\n",
  2064. "epoch: 31\n",
  2065. "epoch 31: train loss = 0.13978799877171508, l1loss = 0.09458633950965371, train acc = 0.9545908183632734,\n",
  2066. "val_loss = 3.981414251124248, val_acc = 0.6511627906976745\n",
  2067. "\n",
  2068. "epoch: 32\n",
  2069. "epoch 32: train loss = 0.13305879046698055, l1loss = 0.09435142839918594, train acc = 0.9491017964071856,\n",
  2070. "val_loss = 1.737026299617087, val_acc = 0.5852713178294574\n",
  2071. "\n",
  2072. "epoch: 33\n",
  2073. "epoch 33: train loss = 0.15574632034806196, l1loss = 0.09403544885967068, train acc = 0.9406187624750499,\n",
  2074. "val_loss = 3.2701686964940735, val_acc = 0.6434108527131783\n",
  2075. "\n",
  2076. "epoch: 34\n",
  2077. "epoch 34: train loss = 0.12506161606359387, l1loss = 0.09349807686434534, train acc = 0.9545908183632734,\n",
  2078. "val_loss = 1.6769787791336692, val_acc = 0.5542635658914729\n",
  2079. "\n",
  2080. "epoch: 35\n",
  2081. "epoch 35: train loss = 0.0993102289914728, l1loss = 0.0933545645988154, train acc = 0.9715568862275449,\n",
  2082. "val_loss = 2.9960278762403383, val_acc = 0.5658914728682171\n",
  2083. "\n",
  2084. "epoch: 36\n",
  2085. "epoch 36: train loss = 0.1006979171684878, l1loss = 0.09339527790239471, train acc = 0.9645708582834331,\n",
  2086. "val_loss = 3.6225777300753337, val_acc = 0.6589147286821705\n",
  2087. "\n",
  2088. "epoch: 37\n",
  2089. "validation acc increased (0.666667 ---> 0.666667)\n",
  2090. "epoch 37: train loss = 0.20696525356012904, l1loss = 0.09307418661024756, train acc = 0.9436127744510978,\n",
  2091. "val_loss = 3.3196238436440164, val_acc = 0.6550387596899225\n",
  2092. "\n",
  2093. "epoch: 38\n",
  2094. "epoch 38: train loss = 0.16276606584261516, l1loss = 0.094022158347442, train acc = 0.9416167664670658,\n",
  2095. "val_loss = 1.4645525244779365, val_acc = 0.562015503875969\n",
  2096. "\n",
  2097. "epoch: 39\n",
  2098. "epoch 39: train loss = 0.13700719901961006, l1loss = 0.09361297870705465, train acc = 0.9500998003992016,\n",
  2099. "val_loss = 1.8845138701126558, val_acc = 0.6162790697674418\n",
  2100. "\n",
  2101. "epoch: 40\n",
  2102. "epoch 40: train loss = 0.10037804332500447, l1loss = 0.09339838315864761, train acc = 0.9655688622754491,\n",
  2103. "val_loss = 2.1819226367529048, val_acc = 0.5736434108527132\n",
  2104. "\n",
  2105. "epoch: 41\n",
  2106. "epoch 41: train loss = 0.06094860051295715, l1loss = 0.09271240844103153, train acc = 0.9860279441117764,\n",
  2107. "val_loss = 2.0809006154999254, val_acc = 0.5775193798449613\n",
  2108. "\n",
  2109. "epoch: 42\n",
  2110. "epoch 42: train loss = 0.058166408848143863, l1loss = 0.09190802524487178, train acc = 0.9840319361277445,\n",
  2111. "val_loss = 3.20259105789569, val_acc = 0.6356589147286822\n",
  2112. "\n",
  2113. "epoch: 43\n",
  2114. "epoch 43: train loss = 0.04276541528290141, l1loss = 0.09118846676009382, train acc = 0.9955089820359282,\n",
  2115. "val_loss = 2.6667797657870507, val_acc = 0.5542635658914729\n",
  2116. "\n",
  2117. "epoch: 44\n",
  2118. "epoch 44: train loss = 0.03647657026520033, l1loss = 0.09060218744351717, train acc = 0.9945109780439122,\n",
  2119. "val_loss = 2.7449685954308327, val_acc = 0.5736434108527132\n",
  2120. "\n",
  2121. "epoch: 45\n",
  2122. "epoch 45: train loss = 0.03098047074830223, l1loss = 0.09009029960144542, train acc = 0.9965069860279441,\n",
  2123. "val_loss = 3.183011844176655, val_acc = 0.6356589147286822\n",
  2124. "\n",
  2125. "epoch: 46\n",
  2126. "epoch 46: train loss = 0.031913511436469776, l1loss = 0.08947685778795364, train acc = 0.9945109780439122,\n",
  2127. "val_loss = 3.353361038852108, val_acc = 0.5581395348837209\n",
  2128. "\n",
  2129. "epoch: 47\n",
  2130. "epoch 47: train loss = 0.02452494901513863, l1loss = 0.08888289796378085, train acc = 0.9970059880239521,\n",
  2131. "val_loss = 2.6891871090083157, val_acc = 0.5930232558139535\n",
  2132. "\n",
  2133. "epoch: 48\n",
  2134. "epoch 48: train loss = 0.05274008282882249, l1loss = 0.08846809942565279, train acc = 0.9810379241516967,\n",
  2135. "val_loss = 4.0638800591461415, val_acc = 0.5658914728682171\n",
  2136. "\n",
  2137. "epoch: 49\n",
  2138. "epoch 49: train loss = 0.048649915469858696, l1loss = 0.08838037115906527, train acc = 0.9865269461077845,\n",
  2139. "val_loss = 3.1181407630905627, val_acc = 0.5658914728682171\n",
  2140. "\n",
  2141. "epoch: 50\n",
  2142. "epoch 50: train loss = 0.051724245299598175, l1loss = 0.08826220857942414, train acc = 0.9810379241516967,\n",
  2143. "val_loss = 2.9276230390681777, val_acc = 0.562015503875969\n",
  2144. "\n",
  2145. "epoch: 51\n",
  2146. "epoch 51: train loss = 0.03112137069304784, l1loss = 0.08755336800021325, train acc = 0.9915169660678643,\n",
  2147. "val_loss = 3.8795810669891595, val_acc = 0.6744186046511628\n",
  2148. "\n",
  2149. "epoch: 52\n",
  2150. "validation acc increased (0.666667 ---> 0.678295)\n",
  2151. "epoch 52: train loss = 0.019947556626356527, l1loss = 0.08692338591266772, train acc = 0.998003992015968,\n",
  2152. "val_loss = 2.994923972091529, val_acc = 0.5775193798449613\n",
  2153. "\n",
  2154. "epoch: 53\n",
  2155. "epoch 53: train loss = 0.016029105731797312, l1loss = 0.08628454102489524, train acc = 1.0,\n",
  2156. "val_loss = 3.339036170826402, val_acc = 0.5930232558139535\n",
  2157. "\n",
  2158. "!!! overfitted !!!\n",
  2159. "[0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1]\n",
  2160. "[0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1]\n",
  2161. "early stoping results:\n",
  2162. "\t [0.6041666666666666, 0.5572916666666666, 0.5208333333333334, 0.5078534031413613]\n",
  2163. "output = [1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0]\n",
  2164. "label = tensor([1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1,\n",
  2165. " 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1,\n",
  2166. " 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1,\n",
  2167. " 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0,\n",
  2168. " 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0,\n",
  2169. " 0, 0, 0, 0, 1, 0, 0, 1])\n",
  2170. "output = [1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
  2171. "label = tensor([0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0,\n",
  2172. " 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1,\n",
  2173. " 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0,\n",
  2174. " 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0,\n",
  2175. " 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0,\n",
  2176. " 0, 1, 0, 1, 1, 1, 1, 0])\n",
  2177. "output = [0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0]\n",
  2178. "label = tensor([0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0,\n",
  2179. " 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1,\n",
  2180. " 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1,\n",
  2181. " 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0,\n",
  2182. " 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0,\n",
  2183. " 1, 0, 0, 1, 1, 0, 0, 1])\n",
  2184. "output = [1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
  2185. "label = tensor([0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1,\n",
  2186. " 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1,\n",
  2187. " 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1,\n",
  2188. " 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0,\n",
  2189. " 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0,\n",
  2190. " 0, 0, 1, 1, 0, 0, 1, 0])\n",
  2191. "output = [0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
  2192. "label = tensor([1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0,\n",
  2193. " 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1,\n",
  2194. " 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 0,\n",
  2195. " 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1,\n",
  2196. " 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 0,\n",
  2197. " 0, 1, 0, 1, 0, 1, 1, 1])\n",
  2198. "output = [1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0]\n",
  2199. "label = tensor([1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0,\n",
  2200. " 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1,\n",
  2201. " 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0,\n",
  2202. " 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0,\n",
  2203. " 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1,\n",
  2204. " 1, 0, 0, 1, 0, 1, 1, 0])\n",
  2205. "output = [1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
  2206. "label = tensor([1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1,\n",
  2207. " 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1,\n",
  2208. " 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0,\n",
  2209. " 0, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1,\n",
  2210. " 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1,\n",
  2211. " 0, 1, 1, 1, 1, 0, 0, 0])\n",
  2212. "output = [1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
  2213. "label = tensor([1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0,\n",
  2214. " 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1,\n",
  2215. " 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0,\n",
  2216. " 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0,\n",
  2217. " 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1,\n",
  2218. " 1, 0, 1, 1, 0, 1, 0, 0])\n",
  2219. "output = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
  2220. "label = tensor([0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1,\n",
  2221. " 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1,\n",
  2222. " 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1,\n",
  2223. " 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1,\n",
  2224. " 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1,\n",
  2225. " 1, 0, 1, 1, 1, 0, 1, 0])\n",
  2226. "output = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0]\n",
  2227. "label = tensor([1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1,\n",
  2228. " 0, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0,\n",
  2229. " 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1,\n",
  2230. " 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0,\n",
  2231. " 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0,\n",
  2232. " 0, 1, 0, 0, 0, 1, 1, 1])\n",
  2233. "output = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
  2234. "label = tensor([0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0,\n",
  2235. " 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1,\n",
  2236. " 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0,\n",
  2237. " 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0,\n",
  2238. " 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1,\n",
  2239. " 1, 1, 0, 1, 1, 0, 1, 1])\n",
  2240. "output = [1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0]\n",
  2241. "label = tensor([0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0,\n",
  2242. " 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0,\n",
  2243. " 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1,\n",
  2244. " 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0,\n",
  2245. " 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0,\n",
  2246. " 1, 1, 0, 1, 0, 0, 0, 1])\n",
  2247. "output = [1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0]\n",
  2248. "label = tensor([1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0,\n",
  2249. " 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1,\n",
  2250. " 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0,\n",
  2251. " 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1,\n",
  2252. " 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0,\n",
  2253. " 0, 0, 1, 1, 0, 0, 0, 0])\n",
  2254. "output = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0]\n",
  2255. "label = tensor([0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1,\n",
  2256. " 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1,\n",
  2257. " 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1,\n",
  2258. " 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0,\n",
  2259. " 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0,\n",
  2260. " 1, 0, 1, 0, 1, 0, 1, 1])\n",
  2261. "output = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0]\n",
  2262. "label = tensor([1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0,\n",
  2263. " 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1,\n",
  2264. " 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0,\n",
  2265. " 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0,\n",
  2266. " 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0,\n",
  2267. " 0, 0, 0, 0, 1, 0, 1, 1])\n",
  2268. "output = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
  2269. "label = tensor([1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0,\n",
  2270. " 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1,\n",
  2271. " 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0,\n",
  2272. " 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1])\n",
  2273. "\t [0.6673376950176145, 0.6722054380664653, 0.6802005012531328, 0.6826347305389222]\n"
  2274. ]
  2275. },
  2276. {
  2277. "name": "stdout",
  2278. "output_type": "stream",
  2279. "text": [
  2280. "[0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1]\n",
  2281. "[1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0]\n",
  2282. "full train results:\n",
  2283. "\t [0.59375, 0.5885416666666666, 0.6354166666666666, 0.612565445026178]\n",
  2284. "\t [0.9949672873678913, 0.9974823766364552, 0.8666666666666667, 0.8393213572854291]\n",
  2285. "[1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1]\n",
  2286. "[1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1]\n",
  2287. "best accs results:\n",
  2288. "\t [0.59375, 0.5625, 0.6354166666666666, 0.6282722513089005]\n",
  2289. "\t [0.8037242073477604, 0.8670694864048338, 0.4967418546365915, 0.5489021956087824]\n",
  2290. "[0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1]\n",
  2291. "-----------------------------Fold 5---------------\n",
  2292. "preparing dataloaders...\n",
  2293. "torch.Size([84, 7, 9, 20])\n",
  2294. "coef when 0 > 1 1\n",
  2295. "creating model...\n",
  2296. "calculating total steps...\n",
  2297. "epoch: 1\n",
  2298. "validation loss decreased (inf ---> 0.711296), val_acc = 0.36046511627906974\n",
  2299. "validation acc increased (0.000000 ---> 0.360465)\n",
  2300. "validation acc increased (0.360465 ---> 0.360465)\n",
  2301. "epoch 1: train loss = 0.6870066553995976, l1loss = 0.13839889542723394, train acc = 0.5608412618928392,\n",
  2302. "val_loss = 0.7490144917207171, val_acc = 0.36046511627906974\n",
  2303. "\n",
  2304. "epoch: 2\n",
  2305. "validation acc increased (0.360465 ---> 0.360465)\n",
  2306. "validation acc increased (0.360465 ---> 0.360465)\n",
  2307. "epoch 2: train loss = 0.6565808814110371, l1loss = 0.13801056421961136, train acc = 0.629444166249374,\n",
  2308. "val_loss = 0.765150977659595, val_acc = 0.36046511627906974\n",
  2309. "\n",
  2310. "epoch: 3\n",
  2311. "validation acc increased (0.360465 ---> 0.360465)\n",
  2312. "validation acc increased (0.360465 ---> 0.375969)\n",
  2313. "epoch 3: train loss = 0.6360432937793512, l1loss = 0.13752871652543694, train acc = 0.6459689534301453,\n",
  2314. "val_loss = 0.7466173269027887, val_acc = 0.42248062015503873\n",
  2315. "\n",
  2316. "epoch: 4\n",
  2317. "validation acc increased (0.375969 ---> 0.434109)\n",
  2318. "validation acc increased (0.434109 ---> 0.492248)\n",
  2319. "epoch 4: train loss = 0.622315076511385, l1loss = 0.13690243397256882, train acc = 0.6439659489233851,\n",
  2320. "val_loss = 0.6967740377714468, val_acc = 0.5116279069767442\n",
  2321. "\n",
  2322. "epoch: 5\n",
  2323. "validation loss decreased (0.711296 ---> 0.693885), val_acc = 0.5116279069767442\n",
  2324. "validation acc increased (0.492248 ---> 0.511628)\n",
  2325. "validation loss decreased (0.693885 ---> 0.664049), val_acc = 0.5116279069767442\n",
  2326. "validation acc increased (0.511628 ---> 0.511628)\n",
  2327. "epoch 5: train loss = 0.6108160970147753, l1loss = 0.13607064582702214, train acc = 0.6539809714571858,\n",
  2328. "val_loss = 0.6510321973830231, val_acc = 0.5116279069767442\n",
  2329. "\n",
  2330. "epoch: 6\n",
  2331. "validation loss decreased (0.664049 ---> 0.649357), val_acc = 0.5155038759689923\n",
  2332. "validation acc increased (0.511628 ---> 0.515504)\n",
  2333. "validation loss decreased (0.649357 ---> 0.643178), val_acc = 0.5271317829457365\n",
  2334. "validation acc increased (0.515504 ---> 0.527132)\n",
  2335. "epoch 6: train loss = 0.598267633048188, l1loss = 0.13498037214063083, train acc = 0.6675012518778167,\n",
  2336. "val_loss = 0.6432933178982994, val_acc = 0.5271317829457365\n",
  2337. "\n",
  2338. "epoch: 7\n",
  2339. "validation loss decreased (0.643178 ---> 0.642807), val_acc = 0.5310077519379846\n",
  2340. "validation acc increased (0.527132 ---> 0.531008)\n",
  2341. "validation loss decreased (0.642807 ---> 0.641224), val_acc = 0.5271317829457365\n",
  2342. "epoch 7: train loss = 0.5881321117707234, l1loss = 0.13360911191375363, train acc = 0.671507260891337,\n",
  2343. "val_loss = 0.6395671737286471, val_acc = 0.5310077519379846\n",
  2344. "\n",
  2345. "epoch: 8\n",
  2346. "validation loss decreased (0.641224 ---> 0.639952), val_acc = 0.5310077519379846\n",
  2347. "validation acc increased (0.531008 ---> 0.531008)\n",
  2348. "validation loss decreased (0.639952 ---> 0.637798), val_acc = 0.5310077519379846\n",
  2349. "validation acc increased (0.531008 ---> 0.531008)\n",
  2350. "epoch 8: train loss = 0.5801561990557877, l1loss = 0.13193928131355187, train acc = 0.6760140210315473,\n",
  2351. "val_loss = 0.6369561816370765, val_acc = 0.5271317829457365\n",
  2352. "\n",
  2353. "epoch: 9\n",
  2354. "validation loss decreased (0.637798 ---> 0.637255), val_acc = 0.5271317829457365\n",
  2355. "validation loss decreased (0.637255 ---> 0.634658), val_acc = 0.5348837209302325\n",
  2356. "validation acc increased (0.531008 ---> 0.534884)\n",
  2357. "epoch 9: train loss = 0.5717252453208029, l1loss = 0.1299565265819796, train acc = 0.6815222834251377,\n",
  2358. "val_loss = 0.637611803403774, val_acc = 0.5310077519379846\n",
  2359. "\n",
  2360. "epoch: 10\n",
  2361. "validation acc increased (0.534884 ---> 0.534884)\n",
  2362. "epoch 10: train loss = 0.5626098037840548, l1loss = 0.12765505166727123, train acc = 0.6920380570856285,\n",
  2363. "val_loss = 0.656070447707361, val_acc = 0.5310077519379846\n",
  2364. "\n",
  2365. "epoch: 11\n",
  2366. "validation acc increased (0.534884 ---> 0.534884)\n",
  2367. "epoch 11: train loss = 0.5535151342123105, l1loss = 0.12510810005602147, train acc = 0.6955433149724587,\n",
  2368. "val_loss = 0.6719910063484843, val_acc = 0.5310077519379846\n",
  2369. "\n",
  2370. "epoch: 12\n",
  2371. "epoch 12: train loss = 0.5409535019172091, l1loss = 0.1223971828236303, train acc = 0.7090635953930896,\n",
  2372. "val_loss = 0.7268788130708443, val_acc = 0.5232558139534884\n",
  2373. "\n",
  2374. "epoch: 13\n",
  2375. "validation acc increased (0.534884 ---> 0.538760)\n",
  2376. "epoch 13: train loss = 0.5258218407123519, l1loss = 0.11959482016089444, train acc = 0.7250876314471708,\n",
  2377. "val_loss = 0.6585291873577029, val_acc = 0.5503875968992248\n",
  2378. "\n",
  2379. "epoch: 14\n",
  2380. "epoch 14: train loss = 0.5086835484893906, l1loss = 0.1168519536823288, train acc = 0.7336004006009014,\n",
  2381. "val_loss = 0.6720465666563936, val_acc = 0.5348837209302325\n",
  2382. "\n",
  2383. "epoch: 15\n",
  2384. "validation acc increased (0.538760 ---> 0.538760)\n",
  2385. "epoch 15: train loss = 0.48642194915784853, l1loss = 0.11419245105238635, train acc = 0.756634952428643,\n",
  2386. "val_loss = 0.6040798605874528, val_acc = 0.5736434108527132\n",
  2387. "\n",
  2388. "epoch: 16\n",
  2389. "validation loss decreased (0.634658 ---> 0.629792), val_acc = 0.5581395348837209\n",
  2390. "validation acc increased (0.538760 ---> 0.558140)\n",
  2391. "epoch 16: train loss = 0.4577500793297528, l1loss = 0.11169700107397768, train acc = 0.7791687531296946,\n",
  2392. "val_loss = 0.9656664484231047, val_acc = 0.5\n",
  2393. "\n",
  2394. "epoch: 17\n",
  2395. "validation acc increased (0.558140 ---> 0.596899)\n",
  2396. "epoch 17: train loss = 0.42826039845130415, l1loss = 0.10948925794782673, train acc = 0.7926890335503255,\n",
  2397. "val_loss = 0.8292172735975695, val_acc = 0.5348837209302325\n",
  2398. "\n",
  2399. "epoch: 18\n",
  2400. "epoch 18: train loss = 0.39249560578680065, l1loss = 0.10765625636429564, train acc = 0.8232348522784176,\n",
  2401. "val_loss = 0.8668296665184257, val_acc = 0.6511627906976745\n",
  2402. "\n",
  2403. "epoch: 19\n",
  2404. "validation acc increased (0.596899 ---> 0.643411)\n",
  2405. "epoch 19: train loss = 0.3674749865732494, l1loss = 0.10613021715677078, train acc = 0.8312468703054582,\n",
  2406. "val_loss = 1.4543597975731357, val_acc = 0.5193798449612403\n",
  2407. "\n",
  2408. "epoch: 20\n",
  2409. "epoch 20: train loss = 0.347654858826994, l1loss = 0.10473605919523005, train acc = 0.8357536304456685,\n",
  2410. "val_loss = 2.475660421223156, val_acc = 0.6395348837209303\n",
  2411. "\n",
  2412. "epoch: 21\n",
  2413. "epoch 21: train loss = 0.3268627472441974, l1loss = 0.10326415842094001, train acc = 0.85678517776665,\n",
  2414. "val_loss = 0.8104829621869464, val_acc = 0.5813953488372093\n",
  2415. "\n",
  2416. "epoch: 22\n"
  2417. ]
  2418. },
  2419. {
  2420. "name": "stdout",
  2421. "output_type": "stream",
  2422. "text": [
  2423. "epoch 22: train loss = 0.35131114540723307, l1loss = 0.10210657698411493, train acc = 0.8307461191787682,\n",
  2424. "val_loss = 1.7831758935322133, val_acc = 0.6356589147286822\n",
  2425. "\n",
  2426. "epoch: 23\n",
  2427. "epoch 23: train loss = 0.3259180520974103, l1loss = 0.10085294403146969, train acc = 0.8487731597396094,\n",
  2428. "val_loss = 1.6435921807275262, val_acc = 0.5155038759689923\n",
  2429. "\n",
  2430. "epoch: 24\n",
  2431. "epoch 24: train loss = 0.2837501486421049, l1loss = 0.09979496815306697, train acc = 0.871807711567351,\n",
  2432. "val_loss = 2.8983472193683344, val_acc = 0.6395348837209303\n",
  2433. "\n",
  2434. "epoch: 25\n",
  2435. "validation acc increased (0.643411 ---> 0.643411)\n",
  2436. "epoch 25: train loss = 0.24991583432414619, l1loss = 0.09866795351968506, train acc = 0.8953430145217827,\n",
  2437. "val_loss = 1.3234346984892853, val_acc = 0.5348837209302325\n",
  2438. "\n",
  2439. "epoch: 26\n",
  2440. "epoch 26: train loss = 0.23671170943787412, l1loss = 0.09776017363437367, train acc = 0.901352028042063,\n",
  2441. "val_loss = 1.7247427341549895, val_acc = 0.5658914728682171\n",
  2442. "\n",
  2443. "epoch: 27\n",
  2444. "epoch 27: train loss = 0.233533194886486, l1loss = 0.09673886006706407, train acc = 0.9018527791687532,\n",
  2445. "val_loss = 1.1423584624778393, val_acc = 0.5930232558139535\n",
  2446. "\n",
  2447. "epoch: 28\n",
  2448. "epoch 28: train loss = 0.20981159080967166, l1loss = 0.09592169590768183, train acc = 0.9148723084626941,\n",
  2449. "val_loss = 5.035286263902058, val_acc = 0.6395348837209303\n",
  2450. "\n",
  2451. "epoch: 29\n",
  2452. "validation acc increased (0.643411 ---> 0.651163)\n",
  2453. "epoch 29: train loss = 0.18721183538287653, l1loss = 0.09532376013520842, train acc = 0.9303955933900852,\n",
  2454. "val_loss = 1.7854539109754932, val_acc = 0.624031007751938\n",
  2455. "\n",
  2456. "epoch: 30\n",
  2457. "epoch 30: train loss = 0.21521277558552843, l1loss = 0.09483509715208126, train acc = 0.9048572859288934,\n",
  2458. "val_loss = 2.0886588960654975, val_acc = 0.5387596899224806\n",
  2459. "\n",
  2460. "epoch: 31\n",
  2461. "epoch 31: train loss = 0.16117875964479203, l1loss = 0.09430676077492069, train acc = 0.9409113670505759,\n",
  2462. "val_loss = 1.3893336746119713, val_acc = 0.6007751937984496\n",
  2463. "\n",
  2464. "epoch: 32\n",
  2465. "epoch 32: train loss = 0.18729717622832412, l1loss = 0.0941365569135996, train acc = 0.9243865798698047,\n",
  2466. "val_loss = 5.178411402443583, val_acc = 0.5116279069767442\n",
  2467. "\n",
  2468. "epoch: 33\n",
  2469. "epoch 33: train loss = 0.15735476534546883, l1loss = 0.0937146420292575, train acc = 0.9419128693039559,\n",
  2470. "val_loss = 2.165926408398059, val_acc = 0.627906976744186\n",
  2471. "\n",
  2472. "epoch: 34\n",
  2473. "epoch 34: train loss = 0.13561190845343132, l1loss = 0.09294714458924744, train acc = 0.9489233850776164,\n",
  2474. "val_loss = 2.862183544986932, val_acc = 0.5465116279069767\n",
  2475. "\n",
  2476. "epoch: 35\n",
  2477. "epoch 35: train loss = 0.11557932168946722, l1loss = 0.09219135233594229, train acc = 0.9584376564847271,\n",
  2478. "val_loss = 3.1061169494109504, val_acc = 0.627906976744186\n",
  2479. "\n",
  2480. "epoch: 36\n",
  2481. "epoch 36: train loss = 0.11341282006019225, l1loss = 0.09179794447730646, train acc = 0.9614421632448673,\n",
  2482. "val_loss = 2.973599456843122, val_acc = 0.562015503875969\n",
  2483. "\n",
  2484. "epoch: 37\n",
  2485. "epoch 37: train loss = 0.13424175273688602, l1loss = 0.09140255465230526, train acc = 0.9439158738107161,\n",
  2486. "val_loss = 1.4640861449315565, val_acc = 0.5852713178294574\n",
  2487. "\n",
  2488. "epoch: 38\n",
  2489. "epoch 38: train loss = 0.09078391378350896, l1loss = 0.09077151980051472, train acc = 0.9699549323985979,\n",
  2490. "val_loss = 1.923348656458448, val_acc = 0.6201550387596899\n",
  2491. "\n",
  2492. "epoch: 39\n",
  2493. "epoch 39: train loss = 0.1380670597001679, l1loss = 0.09045900325731569, train acc = 0.9444166249374061,\n",
  2494. "val_loss = 3.3878318514934804, val_acc = 0.5465116279069767\n",
  2495. "\n",
  2496. "epoch: 40\n",
  2497. "epoch 40: train loss = 0.12143602777733227, l1loss = 0.09066373266514743, train acc = 0.9579369053580371,\n",
  2498. "val_loss = 2.142883213915566, val_acc = 0.5038759689922481\n",
  2499. "\n",
  2500. "epoch: 41\n",
  2501. "epoch 41: train loss = 0.11649664227641339, l1loss = 0.09069241947854945, train acc = 0.9584376564847271,\n",
  2502. "val_loss = 2.8424580928891205, val_acc = 0.6317829457364341\n",
  2503. "\n",
  2504. "epoch: 42\n",
  2505. "epoch 42: train loss = 0.11953155570288092, l1loss = 0.09055208012230348, train acc = 0.9534301452178268,\n",
  2506. "val_loss = 3.844284035438715, val_acc = 0.5503875968992248\n",
  2507. "\n",
  2508. "epoch: 43\n",
  2509. "epoch 43: train loss = 0.1391850914218513, l1loss = 0.09037555076282623, train acc = 0.9514271407110666,\n",
  2510. "val_loss = 3.0129795486843864, val_acc = 0.5503875968992248\n",
  2511. "\n",
  2512. "epoch: 44\n",
  2513. "epoch 44: train loss = 0.11035888379791348, l1loss = 0.09016522343569418, train acc = 0.9589384076114171,\n",
  2514. "val_loss = 2.5781839115675145, val_acc = 0.5658914728682171\n",
  2515. "\n",
  2516. "epoch: 45\n",
  2517. "epoch 45: train loss = 0.09197489871015295, l1loss = 0.08964754919753291, train acc = 0.9669504256384577,\n",
  2518. "val_loss = 2.4371593719304996, val_acc = 0.562015503875969\n",
  2519. "\n",
  2520. "epoch: 46\n",
  2521. "epoch 46: train loss = 0.0579324397978988, l1loss = 0.08879983621758106, train acc = 0.9859789684526791,\n",
  2522. "val_loss = 2.2613389057706494, val_acc = 0.5465116279069767\n",
  2523. "\n",
  2524. "epoch: 47\n",
  2525. "epoch 47: train loss = 0.0461813348439197, l1loss = 0.08810112920473621, train acc = 0.9864797195793691,\n",
  2526. "val_loss = 3.979649772939756, val_acc = 0.6317829457364341\n",
  2527. "\n",
  2528. "epoch: 48\n",
  2529. "epoch 48: train loss = 0.03998827998063298, l1loss = 0.0878447880879843, train acc = 0.9929894842263395,\n",
  2530. "val_loss = 5.175023500309434, val_acc = 0.627906976744186\n",
  2531. "\n",
  2532. "epoch: 49\n",
  2533. "epoch 49: train loss = 0.04049086723706098, l1loss = 0.08717421617995039, train acc = 0.9934902353530295,\n",
  2534. "val_loss = 3.0130414075629655, val_acc = 0.6434108527131783\n",
  2535. "\n",
  2536. "epoch: 50\n",
  2537. "validation acc increased (0.651163 ---> 0.655039)\n",
  2538. "epoch 50: train loss = 0.03793664645537936, l1loss = 0.08660745584537939, train acc = 0.9914872308462694,\n",
  2539. "val_loss = 2.4097132146820543, val_acc = 0.5697674418604651\n",
  2540. "\n",
  2541. "epoch: 51\n",
  2542. "epoch 51: train loss = 0.034598207665522455, l1loss = 0.0860587003667413, train acc = 0.9924887330996495,\n",
  2543. "val_loss = 2.3104088583657907, val_acc = 0.6472868217054264\n",
  2544. "\n",
  2545. "epoch: 52\n",
  2546. "epoch 52: train loss = 0.029838849054010557, l1loss = 0.08562057309736892, train acc = 0.9964947421131698,\n",
  2547. "val_loss = 5.1767294721085895, val_acc = 0.624031007751938\n",
  2548. "\n",
  2549. "epoch: 53\n",
  2550. "epoch 53: train loss = 0.023908328355425706, l1loss = 0.08518237430689989, train acc = 0.9964947421131698,\n",
  2551. "val_loss = 3.105967559093653, val_acc = 0.6356589147286822\n",
  2552. "\n",
  2553. "epoch: 54\n",
  2554. "epoch 54: train loss = 0.023712221452846013, l1loss = 0.08467714752280002, train acc = 0.9959939909864797,\n",
  2555. "val_loss = 2.627571163944496, val_acc = 0.6511627906976745\n",
  2556. "\n",
  2557. "epoch: 55\n",
  2558. "epoch 55: train loss = 0.02119624899009137, l1loss = 0.084207254313175, train acc = 0.9979969954932398,\n",
  2559. "val_loss = 3.670362725738407, val_acc = 0.6317829457364341\n",
  2560. "\n",
  2561. "epoch: 56\n",
  2562. "epoch 56: train loss = 0.02971064273695171, l1loss = 0.08382122342847381, train acc = 0.9934902353530295,\n",
  2563. "val_loss = 4.768853254883384, val_acc = 0.6317829457364341\n",
  2564. "\n",
  2565. "epoch: 57\n",
  2566. "epoch 57: train loss = 0.026361721558016006, l1loss = 0.08356228320447576, train acc = 0.9959939909864797,\n",
  2567. "val_loss = 3.9932817562605982, val_acc = 0.627906976744186\n",
  2568. "\n",
  2569. "epoch: 58\n",
  2570. "epoch 58: train loss = 0.018343641952941223, l1loss = 0.08313654303461179, train acc = 0.9984977466199298,\n",
  2571. "val_loss = 2.416079253147811, val_acc = 0.5930232558139535\n",
  2572. "\n",
  2573. "epoch: 59\n",
  2574. "epoch 59: train loss = 0.0166573378317614, l1loss = 0.08268285322604206, train acc = 0.9984977466199298,\n",
  2575. "val_loss = 3.361119598383408, val_acc = 0.627906976744186\n",
  2576. "\n",
  2577. "epoch: 60\n",
  2578. "epoch 60: train loss = 0.018886090959545875, l1loss = 0.08225083055649032, train acc = 0.9979969954932398,\n",
  2579. "val_loss = 4.036783265050992, val_acc = 0.624031007751938\n",
  2580. "\n",
  2581. "epoch: 61\n",
  2582. "epoch 61: train loss = 0.015808429777935618, l1loss = 0.08185882754082911, train acc = 0.99899849774662,\n",
  2583. "val_loss = 2.6130907313768255, val_acc = 0.6434108527131783\n",
  2584. "\n",
  2585. "epoch: 62\n",
  2586. "epoch 62: train loss = 0.014593462708313345, l1loss = 0.08146914296527474, train acc = 0.99899849774662,\n",
  2587. "val_loss = 2.4723181946333064, val_acc = 0.6124031007751938\n",
  2588. "\n",
  2589. "epoch: 63\n",
  2590. "epoch 63: train loss = 0.016414859970592235, l1loss = 0.08114846524188801, train acc = 0.9979969954932398,\n",
  2591. "val_loss = 3.099476133206094, val_acc = 0.5813953488372093\n",
  2592. "\n",
  2593. "epoch: 64\n",
  2594. "epoch 64: train loss = 0.018514045871398748, l1loss = 0.08086007390251504, train acc = 0.9969954932398598,\n",
  2595. "val_loss = 4.376173152480015, val_acc = 0.624031007751938\n",
  2596. "\n",
  2597. "epoch: 65\n",
  2598. "epoch 65: train loss = 0.0247556148073601, l1loss = 0.08061432335889512, train acc = 0.9954932398597897,\n",
  2599. "val_loss = 2.659250307452771, val_acc = 0.6356589147286822\n",
  2600. "\n",
  2601. "epoch: 66\n"
  2602. ]
  2603. },
  2604. {
  2605. "name": "stdout",
  2606. "output_type": "stream",
  2607. "text": [
  2608. "epoch 66: train loss = 0.020378888247494526, l1loss = 0.08030128979688891, train acc = 0.9954932398597897,\n",
  2609. "val_loss = 3.7070143610932105, val_acc = 0.562015503875969\n",
  2610. "\n",
  2611. "epoch: 67\n",
  2612. "epoch 67: train loss = 0.012092748920829163, l1loss = 0.07994780938701147, train acc = 0.99899849774662,\n",
  2613. "val_loss = 3.0426118189050246, val_acc = 0.5581395348837209\n",
  2614. "\n",
  2615. "epoch: 68\n",
  2616. "epoch 68: train loss = 0.011227833565080171, l1loss = 0.07961520851852301, train acc = 1.0,\n",
  2617. "val_loss = 2.48265065576217, val_acc = 0.6201550387596899\n",
  2618. "\n",
  2619. "!!! overfitted !!!\n",
  2620. "[1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1]\n",
  2621. "[0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0]\n",
  2622. "early stoping results:\n",
  2623. "\t [0.6041666666666666, 0.5572916666666666, 0.5208333333333334, 0.5078534031413613, 0.5445026178010471]\n",
  2624. "output = [0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
  2625. "label = tensor([0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0,\n",
  2626. " 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0,\n",
  2627. " 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0,\n",
  2628. " 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1,\n",
  2629. " 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0,\n",
  2630. " 0, 0, 0, 0, 1, 0, 0, 1])\n",
  2631. "output = [1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
  2632. "label = tensor([1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1,\n",
  2633. " 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1,\n",
  2634. " 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0,\n",
  2635. " 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0,\n",
  2636. " 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0,\n",
  2637. " 0, 0, 1, 1, 1, 0, 1, 0])\n",
  2638. "output = [1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0]\n",
  2639. "label = tensor([1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1,\n",
  2640. " 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1,\n",
  2641. " 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1,\n",
  2642. " 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0,\n",
  2643. " 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0,\n",
  2644. " 1, 0, 0, 1, 0, 0, 1, 1])\n",
  2645. "output = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0]\n",
  2646. "label = tensor([1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1,\n",
  2647. " 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0,\n",
  2648. " 0, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0,\n",
  2649. " 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0,\n",
  2650. " 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1,\n",
  2651. " 1, 0, 1, 0, 0, 0, 0, 0])\n",
  2652. "output = [1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0]\n",
  2653. "label = tensor([1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0,\n",
  2654. " 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0,\n",
  2655. " 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1,\n",
  2656. " 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1,\n",
  2657. " 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0,\n",
  2658. " 1, 1, 0, 0, 1, 1, 1, 0])\n",
  2659. "output = [1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
  2660. "label = tensor([1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0,\n",
  2661. " 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0,\n",
  2662. " 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1,\n",
  2663. " 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1,\n",
  2664. " 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1,\n",
  2665. " 1, 1, 0, 0, 1, 1, 0, 1])\n",
  2666. "output = [0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0]\n",
  2667. "label = tensor([0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1,\n",
  2668. " 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1,\n",
  2669. " 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1,\n",
  2670. " 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0,\n",
  2671. " 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1,\n",
  2672. " 1, 1, 0, 0, 1, 0, 1, 0])\n",
  2673. "output = [1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
  2674. "label = tensor([1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1,\n",
  2675. " 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1,\n",
  2676. " 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,\n",
  2677. " 0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1,\n",
  2678. " 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1,\n",
  2679. " 0, 1, 0, 1, 0, 0, 1, 1])\n"
  2680. ]
  2681. },
  2682. {
  2683. "name": "stdout",
  2684. "output_type": "stream",
  2685. "text": [
  2686. "output = [1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0]\n",
  2687. "label = tensor([1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1,\n",
  2688. " 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1,\n",
  2689. " 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0,\n",
  2690. " 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1,\n",
  2691. " 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1,\n",
  2692. " 0, 1, 0, 1, 0, 0, 0, 1])\n",
  2693. "output = [1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0]\n",
  2694. "label = tensor([1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0,\n",
  2695. " 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0,\n",
  2696. " 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1,\n",
  2697. " 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0,\n",
  2698. " 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1,\n",
  2699. " 0, 1, 1, 0, 1, 1, 0, 0])\n",
  2700. "output = [0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0]\n",
  2701. "label = tensor([0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0,\n",
  2702. " 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 1,\n",
  2703. " 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0,\n",
  2704. " 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0,\n",
  2705. " 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1,\n",
  2706. " 1, 1, 1, 0, 0, 1, 0, 0])\n",
  2707. "output = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0]\n",
  2708. "label = tensor([0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1,\n",
  2709. " 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1,\n",
  2710. " 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1,\n",
  2711. " 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1,\n",
  2712. " 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0,\n",
  2713. " 1, 1, 1, 0, 0, 1, 1, 0])\n",
  2714. "output = [1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0]\n",
  2715. "label = tensor([1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0,\n",
  2716. " 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1,\n",
  2717. " 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0,\n",
  2718. " 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1,\n",
  2719. " 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1,\n",
  2720. " 0, 0, 0, 1, 1, 0, 0, 1])\n",
  2721. "output = [1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
  2722. "label = tensor([0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1,\n",
  2723. " 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1,\n",
  2724. " 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0,\n",
  2725. " 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1,\n",
  2726. " 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1,\n",
  2727. " 0, 1, 0, 1, 0, 1, 1, 1])\n",
  2728. "output = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
  2729. "label = tensor([1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0,\n",
  2730. " 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0,\n",
  2731. " 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0,\n",
  2732. " 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1,\n",
  2733. " 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0,\n",
  2734. " 0, 0, 1, 1, 1, 0, 0, 1])\n",
  2735. "output = [1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
  2736. "label = tensor([1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0,\n",
  2737. " 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0,\n",
  2738. " 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0,\n",
  2739. " 0, 1, 1, 1, 1])\n",
  2740. "\t [0.6673376950176145, 0.6722054380664653, 0.6802005012531328, 0.6826347305389222, 0.7250876314471708]\n",
  2741. "[0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1]\n",
  2742. "[0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1]\n",
  2743. "full train results:\n",
  2744. "\t [0.59375, 0.5885416666666666, 0.6354166666666666, 0.612565445026178, 0.6387434554973822]\n"
  2745. ]
  2746. },
  2747. {
  2748. "name": "stdout",
  2749. "output_type": "stream",
  2750. "text": [
  2751. "\t [0.9949672873678913, 0.9974823766364552, 0.8666666666666667, 0.8393213572854291, 0.99949924887331]\n",
  2752. "[0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0]\n",
  2753. "[1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0]\n",
  2754. "best accs results:\n",
  2755. "\t [0.59375, 0.5625, 0.6354166666666666, 0.6282722513089005, 0.6020942408376964]\n",
  2756. "\t [0.8037242073477604, 0.8670694864048338, 0.4967418546365915, 0.5489021956087824, 0.9744616925388082]\n",
  2757. "[0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1]\n",
  2758. "-----------------------------Fold 6---------------\n",
  2759. "preparing dataloaders...\n",
  2760. "torch.Size([77, 7, 9, 20])\n",
  2761. "coef when 0 > 1 1\n",
  2762. "creating model...\n",
  2763. "calculating total steps...\n",
  2764. "epoch: 1\n",
  2765. "validation loss decreased (inf ---> 0.691554), val_acc = 0.6511627906976745\n",
  2766. "validation acc increased (0.000000 ---> 0.651163)\n",
  2767. "validation loss decreased (0.691554 ---> 0.690849), val_acc = 0.6511627906976745\n",
  2768. "validation acc increased (0.651163 ---> 0.651163)\n",
  2769. "epoch 1: train loss = 0.6948271975517273, l1loss = 0.13827931582927705, train acc = 0.4485,\n",
  2770. "val_loss = 0.6928248095882031, val_acc = 0.5968992248062015\n",
  2771. "\n",
  2772. "epoch: 2\n",
  2773. "epoch 2: train loss = 0.672557541847229, l1loss = 0.137843541264534, train acc = 0.589,\n",
  2774. "val_loss = 0.6987676075262617, val_acc = 0.3643410852713178\n",
  2775. "\n",
  2776. "epoch: 3\n",
  2777. "epoch 3: train loss = 0.6535676913261413, l1loss = 0.13731942212581635, train acc = 0.636,\n",
  2778. "val_loss = 0.6965285705965619, val_acc = 0.4728682170542636\n",
  2779. "\n",
  2780. "epoch: 4\n",
  2781. "validation loss decreased (0.690849 ---> 0.677476), val_acc = 0.4883720930232558\n",
  2782. "epoch 4: train loss = 0.6372407693862915, l1loss = 0.13664585983753205, train acc = 0.65,\n",
  2783. "val_loss = 0.6638829569483913, val_acc = 0.5038759689922481\n",
  2784. "\n",
  2785. "epoch: 5\n",
  2786. "validation loss decreased (0.677476 ---> 0.661241), val_acc = 0.5\n",
  2787. "validation loss decreased (0.661241 ---> 0.639020), val_acc = 0.5116279069767442\n",
  2788. "epoch 5: train loss = 0.6211547412872315, l1loss = 0.13576398611068727, train acc = 0.6585,\n",
  2789. "val_loss = 0.6298631182474683, val_acc = 0.5116279069767442\n",
  2790. "\n",
  2791. "epoch: 6\n",
  2792. "validation loss decreased (0.639020 ---> 0.628438), val_acc = 0.5116279069767442\n",
  2793. "validation loss decreased (0.628438 ---> 0.621201), val_acc = 0.5077519379844961\n",
  2794. "epoch 6: train loss = 0.6071342725753784, l1loss = 0.1346161276102066, train acc = 0.661,\n",
  2795. "val_loss = 0.6213997648667919, val_acc = 0.5116279069767442\n",
  2796. "\n",
  2797. "epoch: 7\n",
  2798. "validation loss decreased (0.621201 ---> 0.621017), val_acc = 0.5193798449612403\n",
  2799. "epoch 7: train loss = 0.5958632040023804, l1loss = 0.13315785372257233, train acc = 0.6715,\n",
  2800. "val_loss = 0.6230281388112741, val_acc = 0.5232558139534884\n",
  2801. "\n",
  2802. "epoch: 8\n",
  2803. "epoch 8: train loss = 0.5865020289421081, l1loss = 0.13134343349933625, train acc = 0.6755,\n",
  2804. "val_loss = 0.6317427439810058, val_acc = 0.5232558139534884\n",
  2805. "\n",
  2806. "epoch: 9\n",
  2807. "epoch 9: train loss = 0.578163959980011, l1loss = 0.12913685393333435, train acc = 0.683,\n",
  2808. "val_loss = 0.6439248508261156, val_acc = 0.5232558139534884\n",
  2809. "\n",
  2810. "epoch: 10\n",
  2811. "epoch 10: train loss = 0.5690374765396118, l1loss = 0.12657931053638458, train acc = 0.687,\n",
  2812. "val_loss = 0.6405774336452632, val_acc = 0.5271317829457365\n",
  2813. "\n",
  2814. "epoch: 11\n",
  2815. "epoch 11: train loss = 0.5578733282089233, l1loss = 0.12375886642932891, train acc = 0.6915,\n",
  2816. "val_loss = 0.6485154487827952, val_acc = 0.5271317829457365\n",
  2817. "\n",
  2818. "epoch: 12\n",
  2819. "epoch 12: train loss = 0.547102264881134, l1loss = 0.12075013303756714, train acc = 0.7035,\n",
  2820. "val_loss = 0.5815267077712125, val_acc = 0.5968992248062015\n",
  2821. "\n",
  2822. "epoch: 13\n",
  2823. "validation loss decreased (0.621017 ---> 0.578556), val_acc = 0.6201550387596899\n",
  2824. "epoch 13: train loss = 0.5293013830184936, l1loss = 0.11762005990743638, train acc = 0.715,\n",
  2825. "val_loss = 0.622708785210469, val_acc = 0.5310077519379846\n",
  2826. "\n",
  2827. "epoch: 14\n",
  2828. "epoch 14: train loss = 0.5110176424980164, l1loss = 0.11452219623327255, train acc = 0.736,\n",
  2829. "val_loss = 0.5994160191495289, val_acc = 0.6124031007751938\n",
  2830. "\n",
  2831. "epoch: 15\n",
  2832. "validation acc increased (0.651163 ---> 0.651163)\n",
  2833. "epoch 15: train loss = 0.47669880533218384, l1loss = 0.11159368234872818, train acc = 0.7645,\n",
  2834. "val_loss = 0.7053935582092566, val_acc = 0.6550387596899225\n",
  2835. "\n",
  2836. "epoch: 16\n",
  2837. "validation acc increased (0.651163 ---> 0.655039)\n",
  2838. "epoch 16: train loss = 0.4439049696922302, l1loss = 0.10900945609807969, train acc = 0.7855,\n",
  2839. "val_loss = 0.8016450599182484, val_acc = 0.6317829457364341\n",
  2840. "\n",
  2841. "epoch: 17\n",
  2842. "epoch 17: train loss = 0.4089318103790283, l1loss = 0.10682100701332092, train acc = 0.808,\n",
  2843. "val_loss = 1.34718641343476, val_acc = 0.6472868217054264\n",
  2844. "\n",
  2845. "epoch: 18\n",
  2846. "validation acc increased (0.655039 ---> 0.658915)\n",
  2847. "epoch 18: train loss = 0.38175707817077636, l1loss = 0.10503107064962387, train acc = 0.819,\n",
  2848. "val_loss = 1.51498044184012, val_acc = 0.6472868217054264\n",
  2849. "\n",
  2850. "epoch: 19\n",
  2851. "epoch 19: train loss = 0.3507974019050598, l1loss = 0.10347136110067368, train acc = 0.843,\n",
  2852. "val_loss = 1.8761492145153904, val_acc = 0.6472868217054264\n",
  2853. "\n",
  2854. "epoch: 20\n",
  2855. "epoch 20: train loss = 0.3973570125102997, l1loss = 0.10228700506687165, train acc = 0.7975,\n",
  2856. "val_loss = 3.359361020169517, val_acc = 0.6550387596899225\n",
  2857. "\n",
  2858. "epoch: 21\n",
  2859. "epoch 21: train loss = 0.3766449837684631, l1loss = 0.10100592476129532, train acc = 0.8235,\n",
  2860. "val_loss = 0.8532644796741101, val_acc = 0.6085271317829457\n",
  2861. "\n",
  2862. "epoch: 22\n",
  2863. "epoch 22: train loss = 0.30714254689216614, l1loss = 0.09982918077707291, train acc = 0.868,\n",
  2864. "val_loss = 4.533395921132823, val_acc = 0.4689922480620155\n",
  2865. "\n",
  2866. "epoch: 23\n",
  2867. "epoch 23: train loss = 0.2730036299228668, l1loss = 0.09869706439971924, train acc = 0.8785,\n",
  2868. "val_loss = 1.673116033391435, val_acc = 0.5348837209302325\n",
  2869. "\n",
  2870. "epoch: 24\n",
  2871. "epoch 24: train loss = 0.252084813952446, l1loss = 0.09814584302902221, train acc = 0.894,\n",
  2872. "val_loss = 2.988402714100919, val_acc = 0.6511627906976745\n",
  2873. "\n",
  2874. "epoch: 25\n",
  2875. "epoch 25: train loss = 0.2553533761501312, l1loss = 0.09725052165985107, train acc = 0.8885,\n",
  2876. "val_loss = 4.158237235490666, val_acc = 0.6472868217054264\n",
  2877. "\n",
  2878. "epoch: 26\n",
  2879. "epoch 26: train loss = 0.22806144893169403, l1loss = 0.09632847493886948, train acc = 0.9095,\n",
  2880. "val_loss = 1.619036548709598, val_acc = 0.6201550387596899\n",
  2881. "\n",
  2882. "epoch: 27\n",
  2883. "epoch 27: train loss = 0.20052375197410582, l1loss = 0.0954943853020668, train acc = 0.9185,\n",
  2884. "val_loss = 1.5913666817106942, val_acc = 0.5852713178294574\n",
  2885. "\n",
  2886. "epoch: 28\n",
  2887. "epoch 28: train loss = 0.19252619695663453, l1loss = 0.09492724579572677, train acc = 0.92,\n",
  2888. "val_loss = 2.3363786150318706, val_acc = 0.6162790697674418\n",
  2889. "\n",
  2890. "epoch: 29\n",
  2891. "epoch 29: train loss = 0.19474881601333618, l1loss = 0.09441870188713074, train acc = 0.9265,\n",
  2892. "val_loss = 3.430836339329564, val_acc = 0.6511627906976745\n",
  2893. "\n",
  2894. "epoch: 30\n",
  2895. "epoch 30: train loss = 0.21158030927181243, l1loss = 0.09452930903434753, train acc = 0.907,\n",
  2896. "val_loss = 8.181018297062364, val_acc = 0.4496124031007752\n",
  2897. "\n",
  2898. "epoch: 31\n",
  2899. "epoch 31: train loss = 0.15486617839336395, l1loss = 0.0938582792878151, train acc = 0.9455,\n",
  2900. "val_loss = 1.8784414095471995, val_acc = 0.562015503875969\n",
  2901. "\n",
  2902. "epoch: 32\n",
  2903. "epoch 32: train loss = 0.12229575896263123, l1loss = 0.09318001997470855, train acc = 0.96,\n",
  2904. "val_loss = 5.165876115015311, val_acc = 0.6472868217054264\n",
  2905. "\n",
  2906. "epoch: 33\n",
  2907. "epoch 33: train loss = 0.11896066904067994, l1loss = 0.09262535566091537, train acc = 0.962,\n",
  2908. "val_loss = 3.3326926933702574, val_acc = 0.6472868217054264\n",
  2909. "\n",
  2910. "epoch: 34\n"
  2911. ]
  2912. },
  2913. {
  2914. "name": "stdout",
  2915. "output_type": "stream",
  2916. "text": [
  2917. "epoch 34: train loss = 0.1430732717514038, l1loss = 0.09208029317855836, train acc = 0.947,\n",
  2918. "val_loss = 3.7897773609461005, val_acc = 0.6434108527131783\n",
  2919. "\n",
  2920. "epoch: 35\n",
  2921. "epoch 35: train loss = 0.10793969309329987, l1loss = 0.09178146147727967, train acc = 0.9635,\n",
  2922. "val_loss = 3.7946822282897408, val_acc = 0.6434108527131783\n",
  2923. "\n",
  2924. "epoch: 36\n",
  2925. "epoch 36: train loss = 0.1086569909453392, l1loss = 0.09210293853282929, train acc = 0.964,\n",
  2926. "val_loss = 3.237369472117618, val_acc = 0.6472868217054264\n",
  2927. "\n",
  2928. "epoch: 37\n",
  2929. "epoch 37: train loss = 0.09874134624004365, l1loss = 0.09163604235649109, train acc = 0.959,\n",
  2930. "val_loss = 10.602950192237085, val_acc = 0.4573643410852713\n",
  2931. "\n",
  2932. "epoch: 38\n",
  2933. "epoch 38: train loss = 0.10966753149032593, l1loss = 0.09086926692724227, train acc = 0.957,\n",
  2934. "val_loss = 6.996053880499315, val_acc = 0.6472868217054264\n",
  2935. "\n",
  2936. "epoch: 39\n",
  2937. "epoch 39: train loss = 0.08259383499622346, l1loss = 0.09028866285085678, train acc = 0.977,\n",
  2938. "val_loss = 2.8690995652546256, val_acc = 0.6395348837209303\n",
  2939. "\n",
  2940. "epoch: 40\n",
  2941. "epoch 40: train loss = 0.07870484691858291, l1loss = 0.08984918028116226, train acc = 0.971,\n",
  2942. "val_loss = 2.2550083565157513, val_acc = 0.6317829457364341\n",
  2943. "\n",
  2944. "epoch: 41\n",
  2945. "epoch 41: train loss = 0.08104722368717193, l1loss = 0.08948963952064515, train acc = 0.9775,\n",
  2946. "val_loss = 2.7512668971867527, val_acc = 0.624031007751938\n",
  2947. "\n",
  2948. "epoch: 42\n",
  2949. "epoch 42: train loss = 0.061936515539884565, l1loss = 0.08921354949474335, train acc = 0.9805,\n",
  2950. "val_loss = 3.8835250869277838, val_acc = 0.6395348837209303\n",
  2951. "\n",
  2952. "epoch: 43\n",
  2953. "epoch 43: train loss = 0.04977551221847534, l1loss = 0.08895368778705597, train acc = 0.9865,\n",
  2954. "val_loss = 3.912529797517052, val_acc = 0.5387596899224806\n",
  2955. "\n",
  2956. "epoch: 44\n",
  2957. "epoch 44: train loss = 0.03967177218198776, l1loss = 0.08825107955932618, train acc = 0.9905,\n",
  2958. "val_loss = 3.231899913891341, val_acc = 0.6085271317829457\n",
  2959. "\n",
  2960. "epoch: 45\n",
  2961. "epoch 45: train loss = 0.048251408934593204, l1loss = 0.08825813591480255, train acc = 0.9865,\n",
  2962. "val_loss = 3.3065699320430904, val_acc = 0.6434108527131783\n",
  2963. "\n",
  2964. "epoch: 46\n",
  2965. "epoch 46: train loss = 0.04396019262075424, l1loss = 0.0874557437300682, train acc = 0.987,\n",
  2966. "val_loss = 2.2364588347024488, val_acc = 0.6124031007751938\n",
  2967. "\n",
  2968. "epoch: 47\n",
  2969. "epoch 47: train loss = 0.02501585677266121, l1loss = 0.08677183699607849, train acc = 0.998,\n",
  2970. "val_loss = 3.7413160227989968, val_acc = 0.5387596899224806\n",
  2971. "\n",
  2972. "epoch: 48\n",
  2973. "epoch 48: train loss = 0.024205303311347962, l1loss = 0.08615065145492554, train acc = 0.9965,\n",
  2974. "val_loss = 4.811114255772081, val_acc = 0.6434108527131783\n",
  2975. "\n",
  2976. "epoch: 49\n",
  2977. "epoch 49: train loss = 0.018718741066753863, l1loss = 0.0853805913925171, train acc = 0.9985,\n",
  2978. "val_loss = 3.1942446878714157, val_acc = 0.6201550387596899\n",
  2979. "\n",
  2980. "epoch: 50\n",
  2981. "epoch 50: train loss = 0.01704993227124214, l1loss = 0.0846337440609932, train acc = 0.9975,\n",
  2982. "val_loss = 2.7979257494904273, val_acc = 0.5968992248062015\n",
  2983. "\n",
  2984. "epoch: 51\n",
  2985. "epoch 51: train loss = 0.029849595367908478, l1loss = 0.08401931124925613, train acc = 0.9905,\n",
  2986. "val_loss = 3.343561839687732, val_acc = 0.5387596899224806\n",
  2987. "\n",
  2988. "epoch: 52\n",
  2989. "epoch 52: train loss = 0.02539513537287712, l1loss = 0.08352306187152862, train acc = 0.995,\n",
  2990. "val_loss = 4.478786712469057, val_acc = 0.6472868217054264\n",
  2991. "\n",
  2992. "epoch: 53\n",
  2993. "validation acc increased (0.658915 ---> 0.658915)\n",
  2994. "epoch 53: train loss = 0.02735452988743782, l1loss = 0.08342606168985367, train acc = 0.9925,\n",
  2995. "val_loss = 3.296960321271272, val_acc = 0.6162790697674418\n",
  2996. "\n",
  2997. "epoch: 54\n",
  2998. "epoch 54: train loss = 0.03272683323174715, l1loss = 0.08329540795087814, train acc = 0.994,\n",
  2999. "val_loss = 17.770323502000913, val_acc = 0.5\n",
  3000. "\n",
  3001. "epoch: 55\n",
  3002. "epoch 55: train loss = 0.029803431689739226, l1loss = 0.08343729907274246, train acc = 0.996,\n",
  3003. "val_loss = 7.326728362445683, val_acc = 0.6472868217054264\n",
  3004. "\n",
  3005. "epoch: 56\n",
  3006. "epoch 56: train loss = 0.02619129529595375, l1loss = 0.08289635759592057, train acc = 0.996,\n",
  3007. "val_loss = 3.209116557714089, val_acc = 0.562015503875969\n",
  3008. "\n",
  3009. "epoch: 57\n",
  3010. "epoch 57: train loss = 0.023176433131098746, l1loss = 0.082220523416996, train acc = 0.9955,\n",
  3011. "val_loss = 3.3258880940518636, val_acc = 0.5387596899224806\n",
  3012. "\n",
  3013. "epoch: 58\n",
  3014. "epoch 58: train loss = 0.014486284106969833, l1loss = 0.08156805974245071, train acc = 0.9995,\n",
  3015. "val_loss = 4.771162561667982, val_acc = 0.5193798449612403\n",
  3016. "\n",
  3017. "epoch: 59\n",
  3018. "epoch 59: train loss = 0.018196347802877426, l1loss = 0.08102932274341583, train acc = 0.9965,\n",
  3019. "val_loss = 3.726594481357308, val_acc = 0.6317829457364341\n",
  3020. "\n",
  3021. "epoch: 60\n",
  3022. "epoch 60: train loss = 0.012295396901667118, l1loss = 0.0804902771115303, train acc = 0.9995,\n",
  3023. "val_loss = 3.2824748639073915, val_acc = 0.6124031007751938\n",
  3024. "\n",
  3025. "epoch: 61\n",
  3026. "epoch 61: train loss = 0.014604092702269554, l1loss = 0.08002961206436157, train acc = 0.9985,\n",
  3027. "val_loss = 3.5945028610412004, val_acc = 0.627906976744186\n",
  3028. "\n",
  3029. "epoch: 62\n",
  3030. "epoch 62: train loss = 0.011242052119225264, l1loss = 0.07957109451293945, train acc = 1.0,\n",
  3031. "val_loss = 4.122840467349503, val_acc = 0.6395348837209303\n",
  3032. "\n",
  3033. "!!! overfitted !!!\n",
  3034. "[1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1]\n",
  3035. "[1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1]\n",
  3036. "early stoping results:\n",
  3037. "\t [0.6041666666666666, 0.5572916666666666, 0.5208333333333334, 0.5078534031413613, 0.5445026178010471, 0.6073298429319371]\n",
  3038. "output = [0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0]\n",
  3039. "label = tensor([0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1,\n",
  3040. " 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0,\n",
  3041. " 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0,\n",
  3042. " 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1,\n",
  3043. " 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0,\n",
  3044. " 1, 1, 0, 1, 1, 0, 0, 1])\n",
  3045. "output = [0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0]\n",
  3046. "label = tensor([0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1,\n",
  3047. " 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0,\n",
  3048. " 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1,\n",
  3049. " 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1,\n",
  3050. " 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1,\n",
  3051. " 0, 0, 1, 1, 0, 0, 0, 0])\n",
  3052. "output = [0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0]\n",
  3053. "label = tensor([0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1,\n",
  3054. " 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0,\n",
  3055. " 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1,\n",
  3056. " 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0,\n",
  3057. " 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1,\n",
  3058. " 0, 1, 1, 0, 1, 1, 1, 0])\n",
  3059. "output = [1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0]\n",
  3060. "label = tensor([0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0,\n",
  3061. " 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0,\n",
  3062. " 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1,\n",
  3063. " 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,\n",
  3064. " 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1,\n",
  3065. " 1, 0, 0, 1, 1, 0, 1, 1])\n",
  3066. "output = [1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
  3067. "label = tensor([0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1,\n",
  3068. " 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1,\n",
  3069. " 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0,\n",
  3070. " 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1,\n",
  3071. " 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0,\n",
  3072. " 0, 1, 0, 1, 1, 1, 0, 0])\n",
  3073. "output = [1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0]\n",
  3074. "label = tensor([1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0,\n",
  3075. " 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0,\n",
  3076. " 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0,\n",
  3077. " 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1,\n",
  3078. " 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1,\n",
  3079. " 0, 0, 0, 1, 0, 1, 0, 1])\n",
  3080. "output = [0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0]\n",
  3081. "label = tensor([0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0,\n",
  3082. " 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1,\n",
  3083. " 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 1,\n",
  3084. " 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0,\n",
  3085. " 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1,\n",
  3086. " 1, 0, 0, 0, 0, 1, 0, 1])\n",
  3087. "output = [0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0]\n",
  3088. "label = tensor([0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0,\n",
  3089. " 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0,\n",
  3090. " 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1,\n",
  3091. " 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1,\n",
  3092. " 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1,\n",
  3093. " 0, 0, 1, 1, 1, 1, 1, 0])\n",
  3094. "output = [0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0]\n",
  3095. "label = tensor([0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0,\n",
  3096. " 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1,\n",
  3097. " 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1,\n",
  3098. " 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1,\n",
  3099. " 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0,\n",
  3100. " 1, 0, 1, 0, 0, 1, 1, 1])\n",
  3101. "output = [1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0]\n",
  3102. "label = tensor([1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0,\n",
  3103. " 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0,\n",
  3104. " 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0,\n",
  3105. " 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0,\n",
  3106. " 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1,\n",
  3107. " 1, 0, 1, 0, 0, 0, 0, 1])\n",
  3108. "output = [0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0]\n",
  3109. "label = tensor([0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0,\n",
  3110. " 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0,\n",
  3111. " 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0,\n",
  3112. " 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0,\n",
  3113. " 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0,\n",
  3114. " 0, 0, 1, 1, 1, 0, 0, 1])\n",
  3115. "output = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0]\n",
  3116. "label = tensor([0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1,\n",
  3117. " 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1,\n",
  3118. " 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0,\n",
  3119. " 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0,\n",
  3120. " 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0,\n",
  3121. " 0, 1, 1, 1, 0, 1, 0, 1])\n",
  3122. "output = [0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0]\n",
  3123. "label = tensor([0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0,\n",
  3124. " 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0,\n",
  3125. " 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1,\n",
  3126. " 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1,\n",
  3127. " 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0,\n",
  3128. " 0, 0, 1, 0, 1, 0, 0, 1])\n",
  3129. "output = [1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0]\n",
  3130. "label = tensor([1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1,\n",
  3131. " 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0,\n",
  3132. " 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1,\n",
  3133. " 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0,\n",
  3134. " 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1,\n",
  3135. " 1, 1, 0, 1, 0, 0, 0, 1])\n"
  3136. ]
  3137. },
  3138. {
  3139. "name": "stdout",
  3140. "output_type": "stream",
  3141. "text": [
  3142. "output = [0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0]\n",
  3143. "label = tensor([1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1,\n",
  3144. " 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0,\n",
  3145. " 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0,\n",
  3146. " 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1,\n",
  3147. " 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1,\n",
  3148. " 0, 1, 0, 1, 0, 1, 0, 1])\n",
  3149. "output = [0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0]\n",
  3150. "label = tensor([0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1,\n",
  3151. " 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1,\n",
  3152. " 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0,\n",
  3153. " 1, 1, 1, 1, 0, 1, 1, 0])\n",
  3154. "\t [0.6673376950176145, 0.6722054380664653, 0.6802005012531328, 0.6826347305389222, 0.7250876314471708, 0.7]\n",
  3155. "[1, 0, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1]\n",
  3156. "[0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0]\n",
  3157. "full train results:\n",
  3158. "\t [0.59375, 0.5885416666666666, 0.6354166666666666, 0.612565445026178, 0.6387434554973822, 0.6492146596858639]\n",
  3159. "\t [0.9949672873678913, 0.9974823766364552, 0.8666666666666667, 0.8393213572854291, 0.99949924887331, 0.638]\n",
  3160. "[0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1]\n",
  3161. "[0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0]\n",
  3162. "best accs results:\n",
  3163. "\t [0.59375, 0.5625, 0.6354166666666666, 0.6282722513089005, 0.6020942408376964, 0.6387434554973822]\n",
  3164. "\t [0.8037242073477604, 0.8670694864048338, 0.4967418546365915, 0.5489021956087824, 0.9744616925388082, 0.519]\n",
  3165. "[0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0]\n",
  3166. "-----------------------------Fold 7---------------\n",
  3167. "preparing dataloaders...\n",
  3168. "torch.Size([80, 7, 9, 20])\n",
  3169. "coef when 0 > 1 1\n",
  3170. "creating model...\n",
  3171. "calculating total steps...\n",
  3172. "epoch: 1\n",
  3173. "validation loss decreased (inf ---> 0.698563), val_acc = 0.3488372093023256\n",
  3174. "validation acc increased (0.000000 ---> 0.348837)\n",
  3175. "validation acc increased (0.348837 ---> 0.348837)\n",
  3176. "epoch 1: train loss = 0.6735606533417041, l1loss = 0.13814079643100097, train acc = 0.6146926536731634,\n",
  3177. "val_loss = 0.7325391187224277, val_acc = 0.3488372093023256\n",
  3178. "\n",
  3179. "epoch: 2\n",
  3180. "validation acc increased (0.348837 ---> 0.348837)\n",
  3181. "validation acc increased (0.348837 ---> 0.348837)\n",
  3182. "epoch 2: train loss = 0.6563243215111481, l1loss = 0.13766097700012023, train acc = 0.6366816591704147,\n",
  3183. "val_loss = 0.7539332402768986, val_acc = 0.3488372093023256\n",
  3184. "\n",
  3185. "epoch: 3\n",
  3186. "validation acc increased (0.348837 ---> 0.348837)\n",
  3187. "validation acc increased (0.348837 ---> 0.379845)\n",
  3188. "epoch 3: train loss = 0.6414987822880094, l1loss = 0.13709656230692027, train acc = 0.6391804097951025,\n",
  3189. "val_loss = 0.7422034661899242, val_acc = 0.43410852713178294\n",
  3190. "\n",
  3191. "epoch: 4\n",
  3192. "validation acc increased (0.379845 ---> 0.441860)\n",
  3193. "validation acc increased (0.441860 ---> 0.500000)\n",
  3194. "epoch 4: train loss = 0.6271164830120607, l1loss = 0.13637894998187725, train acc = 0.6456771614192903,\n",
  3195. "val_loss = 0.68995659388313, val_acc = 0.5271317829457365\n",
  3196. "\n",
  3197. "epoch: 5\n",
  3198. "validation loss decreased (0.698563 ---> 0.686751), val_acc = 0.5271317829457365\n",
  3199. "validation acc increased (0.500000 ---> 0.527132)\n",
  3200. "validation loss decreased (0.686751 ---> 0.653364), val_acc = 0.5387596899224806\n",
  3201. "validation acc increased (0.527132 ---> 0.538760)\n",
  3202. "epoch 5: train loss = 0.6123478256542048, l1loss = 0.13543122615860678, train acc = 0.6561719140429785,\n",
  3203. "val_loss = 0.6406823590744374, val_acc = 0.5426356589147286\n",
  3204. "\n",
  3205. "epoch: 6\n",
  3206. "validation loss decreased (0.653364 ---> 0.639413), val_acc = 0.5426356589147286\n",
  3207. "validation acc increased (0.538760 ---> 0.542636)\n",
  3208. "validation loss decreased (0.639413 ---> 0.626855), val_acc = 0.5426356589147286\n",
  3209. "validation acc increased (0.542636 ---> 0.542636)\n",
  3210. "epoch 6: train loss = 0.5990132123216995, l1loss = 0.1341949236550848, train acc = 0.6686656671664168,\n",
  3211. "val_loss = 0.6231413797814717, val_acc = 0.5426356589147286\n",
  3212. "\n",
  3213. "epoch: 7\n",
  3214. "validation loss decreased (0.626855 ---> 0.624045), val_acc = 0.5503875968992248\n",
  3215. "validation acc increased (0.542636 ---> 0.550388)\n",
  3216. "validation loss decreased (0.624045 ---> 0.620769), val_acc = 0.5426356589147286\n",
  3217. "epoch 7: train loss = 0.5881006238402158, l1loss = 0.13262152082767562, train acc = 0.6771614192903548,\n",
  3218. "val_loss = 0.6177397381312163, val_acc = 0.5503875968992248\n",
  3219. "\n",
  3220. "epoch: 8\n",
  3221. "validation loss decreased (0.620769 ---> 0.617393), val_acc = 0.5503875968992248\n",
  3222. "validation acc increased (0.550388 ---> 0.550388)\n",
  3223. "validation acc increased (0.550388 ---> 0.554264)\n",
  3224. "epoch 8: train loss = 0.5790939586332952, l1loss = 0.13068488615563606, train acc = 0.6836581709145427,\n",
  3225. "val_loss = 0.6233009136924448, val_acc = 0.562015503875969\n",
  3226. "\n",
  3227. "epoch: 9\n",
  3228. "validation acc increased (0.554264 ---> 0.562016)\n",
  3229. "validation acc increased (0.562016 ---> 0.565891)\n",
  3230. "epoch 9: train loss = 0.5705378363574523, l1loss = 0.12834613378914161, train acc = 0.6906546726636682,\n",
  3231. "val_loss = 0.6323157320188921, val_acc = 0.562015503875969\n",
  3232. "\n",
  3233. "epoch: 10\n",
  3234. "epoch 10: train loss = 0.5613774951251371, l1loss = 0.125647922248944, train acc = 0.6946526736631684,\n",
  3235. "val_loss = 0.6595740812693456, val_acc = 0.5503875968992248\n",
  3236. "\n",
  3237. "epoch: 11\n",
  3238. "epoch 11: train loss = 0.5492115492584824, l1loss = 0.12263813326651904, train acc = 0.7066466766616691,\n",
  3239. "val_loss = 0.6838388831116432, val_acc = 0.5503875968992248\n",
  3240. "\n",
  3241. "epoch: 12\n",
  3242. "epoch 12: train loss = 0.5365634637287651, l1loss = 0.11940663491470227, train acc = 0.712143928035982,\n",
  3243. "val_loss = 0.7116500216630078, val_acc = 0.5542635658914729\n",
  3244. "\n",
  3245. "epoch: 13\n",
  3246. "epoch 13: train loss = 0.5217770957636988, l1loss = 0.11611300713937918, train acc = 0.719640179910045,\n",
  3247. "val_loss = 0.8096898549286894, val_acc = 0.5426356589147286\n",
  3248. "\n",
  3249. "epoch: 14\n",
  3250. "epoch 14: train loss = 0.49729873053077933, l1loss = 0.1128313705094274, train acc = 0.7466266866566716,\n",
  3251. "val_loss = 0.7668287199597026, val_acc = 0.5465116279069767\n",
  3252. "\n",
  3253. "epoch: 15\n"
  3254. ]
  3255. },
  3256. {
  3257. "name": "stdout",
  3258. "output_type": "stream",
  3259. "text": [
  3260. "epoch 15: train loss = 0.47340078341371117, l1loss = 0.1097236432168616, train acc = 0.7606196901549226,\n",
  3261. "val_loss = 0.7797975073489107, val_acc = 0.5581395348837209\n",
  3262. "\n",
  3263. "epoch: 16\n",
  3264. "validation acc increased (0.565891 ---> 0.577519)\n",
  3265. "epoch 16: train loss = 0.44012472370098615, l1loss = 0.10696254843014708, train acc = 0.7786106946526736,\n",
  3266. "val_loss = 0.738024488900059, val_acc = 0.6395348837209303\n",
  3267. "\n",
  3268. "epoch: 17\n",
  3269. "validation acc increased (0.577519 ---> 0.643411)\n",
  3270. "epoch 17: train loss = 0.41693725505809315, l1loss = 0.10464524865329176, train acc = 0.783608195902049,\n",
  3271. "val_loss = 0.7353155968725219, val_acc = 0.5968992248062015\n",
  3272. "\n",
  3273. "epoch: 18\n",
  3274. "epoch 18: train loss = 0.4004883180702406, l1loss = 0.10272827999166463, train acc = 0.8055972013993004,\n",
  3275. "val_loss = 1.211284273354582, val_acc = 0.5387596899224806\n",
  3276. "\n",
  3277. "epoch: 19\n",
  3278. "epoch 19: train loss = 0.36191707039284504, l1loss = 0.10097880754945637, train acc = 0.8250874562718641,\n",
  3279. "val_loss = 2.0748814187308615, val_acc = 0.6511627906976745\n",
  3280. "\n",
  3281. "epoch: 20\n",
  3282. "validation acc increased (0.643411 ---> 0.651163)\n",
  3283. "epoch 20: train loss = 0.34419407603563157, l1loss = 0.09956587383012543, train acc = 0.8425787106446777,\n",
  3284. "val_loss = 1.1840626740640448, val_acc = 0.6124031007751938\n",
  3285. "\n",
  3286. "epoch: 21\n",
  3287. "epoch 21: train loss = 0.3332295252912227, l1loss = 0.09810652255803451, train acc = 0.8430784607696152,\n",
  3288. "val_loss = 3.1099504592806793, val_acc = 0.4806201550387597\n",
  3289. "\n",
  3290. "epoch: 22\n",
  3291. "epoch 22: train loss = 0.2923158032485451, l1loss = 0.09699563946070401, train acc = 0.8785607196401799,\n",
  3292. "val_loss = 1.7239803001862164, val_acc = 0.5542635658914729\n",
  3293. "\n",
  3294. "epoch: 23\n",
  3295. "epoch 23: train loss = 0.3031199698088349, l1loss = 0.0963727720063189, train acc = 0.8640679660169915,\n",
  3296. "val_loss = 4.811507051220423, val_acc = 0.4728682170542636\n",
  3297. "\n",
  3298. "epoch: 24\n",
  3299. "epoch 24: train loss = 0.2679102631255068, l1loss = 0.09541694263274643, train acc = 0.8835582208895553,\n",
  3300. "val_loss = 4.7253117746160935, val_acc = 0.47674418604651164\n",
  3301. "\n",
  3302. "epoch: 25\n",
  3303. "epoch 25: train loss = 0.26991559321555064, l1loss = 0.0947239384509396, train acc = 0.88055972013993,\n",
  3304. "val_loss = 1.9052141544430754, val_acc = 0.6317829457364341\n",
  3305. "\n",
  3306. "epoch: 26\n",
  3307. "epoch 26: train loss = 0.2462208094089285, l1loss = 0.09422039279322336, train acc = 0.8970514742628686,\n",
  3308. "val_loss = 2.0839634255845416, val_acc = 0.6162790697674418\n",
  3309. "\n",
  3310. "epoch: 27\n",
  3311. "epoch 27: train loss = 0.27374583970511457, l1loss = 0.09358507321439107, train acc = 0.8685657171414293,\n",
  3312. "val_loss = 4.70158687118412, val_acc = 0.4883720930232558\n",
  3313. "\n",
  3314. "epoch: 28\n",
  3315. "epoch 28: train loss = 0.20702115298509002, l1loss = 0.09327136474600559, train acc = 0.9190404797601199,\n",
  3316. "val_loss = 1.790774514393289, val_acc = 0.6124031007751938\n",
  3317. "\n",
  3318. "epoch: 29\n",
  3319. "epoch 29: train loss = 0.23099662097676404, l1loss = 0.09279883265316576, train acc = 0.8985507246376812,\n",
  3320. "val_loss = 5.580494865908952, val_acc = 0.5193798449612403\n",
  3321. "\n",
  3322. "epoch: 30\n",
  3323. "epoch 30: train loss = 0.18276370340946135, l1loss = 0.09199970285559463, train acc = 0.9270364817591205,\n",
  3324. "val_loss = 2.8631812488974266, val_acc = 0.627906976744186\n",
  3325. "\n",
  3326. "epoch: 31\n",
  3327. "epoch 31: train loss = 0.15527670064578944, l1loss = 0.0914434359698877, train acc = 0.9470264867566217,\n",
  3328. "val_loss = 4.503482984384912, val_acc = 0.5348837209302325\n",
  3329. "\n",
  3330. "epoch: 32\n",
  3331. "epoch 32: train loss = 0.16465340346202204, l1loss = 0.0909644259528063, train acc = 0.9370314842578711,\n",
  3332. "val_loss = 1.5456568664589594, val_acc = 0.5775193798449613\n",
  3333. "\n",
  3334. "epoch: 33\n",
  3335. "epoch 33: train loss = 0.1859049220969235, l1loss = 0.09095004587054908, train acc = 0.9315342328835582,\n",
  3336. "val_loss = 14.714020802993183, val_acc = 0.46511627906976744\n",
  3337. "\n",
  3338. "epoch: 34\n",
  3339. "epoch 34: train loss = 0.14878231415088508, l1loss = 0.09091311531222385, train acc = 0.9445277361319341,\n",
  3340. "val_loss = 4.930351785911146, val_acc = 0.6395348837209303\n",
  3341. "\n",
  3342. "epoch: 35\n",
  3343. "epoch 35: train loss = 0.15755604538156176, l1loss = 0.09075812100693918, train acc = 0.9325337331334332,\n",
  3344. "val_loss = 1.786901553471883, val_acc = 0.6162790697674418\n",
  3345. "\n",
  3346. "epoch: 36\n",
  3347. "epoch 36: train loss = 0.13036989256076847, l1loss = 0.09029308312955586, train acc = 0.9450274862568716,\n",
  3348. "val_loss = 4.942993589149889, val_acc = 0.6395348837209303\n",
  3349. "\n",
  3350. "epoch: 37\n",
  3351. "epoch 37: train loss = 0.11117354914851334, l1loss = 0.08992485350546153, train acc = 0.9615192403798101,\n",
  3352. "val_loss = 4.3989028816929, val_acc = 0.6356589147286822\n",
  3353. "\n",
  3354. "epoch: 38\n",
  3355. "epoch 38: train loss = 0.08873243333919831, l1loss = 0.08950450801211915, train acc = 0.9720139930034982,\n",
  3356. "val_loss = 5.96283143804979, val_acc = 0.5310077519379846\n",
  3357. "\n",
  3358. "epoch: 39\n",
  3359. "epoch 39: train loss = 0.07572829289951068, l1loss = 0.08899277692538747, train acc = 0.9775112443778111,\n",
  3360. "val_loss = 1.866163623887439, val_acc = 0.6007751937984496\n",
  3361. "\n",
  3362. "epoch: 40\n",
  3363. "epoch 40: train loss = 0.07354736827622944, l1loss = 0.08869233787402339, train acc = 0.9790104947526237,\n",
  3364. "val_loss = 2.5748086192811184, val_acc = 0.5775193798449613\n",
  3365. "\n",
  3366. "epoch: 41\n",
  3367. "epoch 41: train loss = 0.07472476078525953, l1loss = 0.08826551713730799, train acc = 0.9735132433783108,\n",
  3368. "val_loss = 3.9040491326800963, val_acc = 0.6124031007751938\n",
  3369. "\n",
  3370. "epoch: 42\n",
  3371. "epoch 42: train loss = 0.06888431988690032, l1loss = 0.0877874091989931, train acc = 0.9765117441279361,\n",
  3372. "val_loss = 2.331109551496284, val_acc = 0.5658914728682171\n",
  3373. "\n",
  3374. "epoch: 43\n",
  3375. "epoch 43: train loss = 0.05748968711425399, l1loss = 0.08741870758192113, train acc = 0.9875062468765617,\n",
  3376. "val_loss = 2.812307975088903, val_acc = 0.5852713178294574\n",
  3377. "\n",
  3378. "epoch: 44\n",
  3379. "epoch 44: train loss = 0.057824458753776696, l1loss = 0.08714396681176728, train acc = 0.9835082458770614,\n",
  3380. "val_loss = 2.8747254637784736, val_acc = 0.5968992248062015\n",
  3381. "\n",
  3382. "epoch: 45\n",
  3383. "epoch 45: train loss = 0.0759028259409719, l1loss = 0.08678379254615766, train acc = 0.9740129935032483,\n",
  3384. "val_loss = 4.5999353874561395, val_acc = 0.5503875968992248\n",
  3385. "\n",
  3386. "epoch: 46\n",
  3387. "epoch 46: train loss = 0.04053982199988384, l1loss = 0.08636881607672621, train acc = 0.993503248375812,\n",
  3388. "val_loss = 3.032141833342323, val_acc = 0.5310077519379846\n",
  3389. "\n",
  3390. "epoch: 47\n",
  3391. "epoch 47: train loss = 0.03172164383823278, l1loss = 0.08597096050160935, train acc = 0.9945027486256871,\n",
  3392. "val_loss = 5.050457126410433, val_acc = 0.5503875968992248\n",
  3393. "\n",
  3394. "epoch: 48\n",
  3395. "epoch 48: train loss = 0.030161406717326925, l1loss = 0.08547273422705418, train acc = 0.9940029985007496,\n",
  3396. "val_loss = 4.39157882217289, val_acc = 0.5465116279069767\n",
  3397. "\n",
  3398. "epoch: 49\n",
  3399. "epoch 49: train loss = 0.026073823958962992, l1loss = 0.08494537337489988, train acc = 0.9975012493753124,\n",
  3400. "val_loss = 6.5615896919947305, val_acc = 0.5155038759689923\n",
  3401. "\n",
  3402. "epoch: 50\n",
  3403. "epoch 50: train loss = 0.031470798644824126, l1loss = 0.08458039399119152, train acc = 0.993503248375812,\n",
  3404. "val_loss = 3.8240739467532134, val_acc = 0.5658914728682171\n",
  3405. "\n",
  3406. "epoch: 51\n",
  3407. "epoch 51: train loss = 0.030669868893470842, l1loss = 0.08433086723312624, train acc = 0.993503248375812,\n",
  3408. "val_loss = 4.175465990984162, val_acc = 0.6046511627906976\n",
  3409. "\n",
  3410. "epoch: 52\n",
  3411. "epoch 52: train loss = 0.02038246937382823, l1loss = 0.08382471429667195, train acc = 0.9970014992503748,\n",
  3412. "val_loss = 3.628271795987953, val_acc = 0.5852713178294574\n",
  3413. "\n",
  3414. "epoch: 53\n",
  3415. "epoch 53: train loss = 0.018496526570907834, l1loss = 0.08326089246266488, train acc = 0.9995002498750625,\n",
  3416. "val_loss = 3.4422422157701598, val_acc = 0.5426356589147286\n",
  3417. "\n",
  3418. "epoch: 54\n",
  3419. "epoch 54: train loss = 0.02122760796430765, l1loss = 0.08280747008883674, train acc = 0.9970014992503748,\n",
  3420. "val_loss = 3.4909615479698477, val_acc = 0.5271317829457365\n",
  3421. "\n",
  3422. "epoch: 55\n",
  3423. "epoch 55: train loss = 0.025122589815681662, l1loss = 0.08247958504963017, train acc = 0.992503748125937,\n",
  3424. "val_loss = 3.1251121426737587, val_acc = 0.5775193798449613\n",
  3425. "\n",
  3426. "epoch: 56\n",
  3427. "epoch 56: train loss = 0.018375528820868077, l1loss = 0.08209296630538147, train acc = 0.9995002498750625,\n",
  3428. "val_loss = 3.3797425502954526, val_acc = 0.5775193798449613\n",
  3429. "\n",
  3430. "epoch: 57\n",
  3431. "epoch 57: train loss = 0.01760085413966892, l1loss = 0.08163161436165647, train acc = 0.9960019990004998,\n",
  3432. "val_loss = 3.5595659359480982, val_acc = 0.5193798449612403\n",
  3433. "\n",
  3434. "epoch: 58\n",
  3435. "epoch 58: train loss = 0.01351920877006659, l1loss = 0.08108949671680483, train acc = 0.9995002498750625,\n",
  3436. "val_loss = 3.2694953966510387, val_acc = 0.5697674418604651\n",
  3437. "\n",
  3438. "epoch: 59\n"
  3439. ]
  3440. },
  3441. {
  3442. "name": "stdout",
  3443. "output_type": "stream",
  3444. "text": [
  3445. "epoch 59: train loss = 0.019704249462690848, l1loss = 0.08070969545486746, train acc = 0.9960019990004998,\n",
  3446. "val_loss = 3.709986501207301, val_acc = 0.5852713178294574\n",
  3447. "\n",
  3448. "epoch: 60\n",
  3449. "epoch 60: train loss = 0.023627269952588233, l1loss = 0.08051739055951913, train acc = 0.9940029985007496,\n",
  3450. "val_loss = 4.3342692463897, val_acc = 0.5891472868217055\n",
  3451. "\n",
  3452. "epoch: 61\n",
  3453. "epoch 61: train loss = 0.03571600694002687, l1loss = 0.08089180287347919, train acc = 0.9900049975012494,\n",
  3454. "val_loss = 3.6718489259712457, val_acc = 0.5465116279069767\n",
  3455. "\n",
  3456. "epoch: 62\n",
  3457. "epoch 62: train loss = 0.04068389410960085, l1loss = 0.0812820299514111, train acc = 0.9910044977511244,\n",
  3458. "val_loss = 22.92412723866544, val_acc = 0.4806201550387597\n",
  3459. "\n",
  3460. "epoch: 63\n",
  3461. "epoch 63: train loss = 0.03855133038023482, l1loss = 0.08120857460656088, train acc = 0.9930034982508745,\n",
  3462. "val_loss = 2.7737932450087497, val_acc = 0.5503875968992248\n",
  3463. "\n",
  3464. "epoch: 64\n",
  3465. "epoch 64: train loss = 0.04269264168341061, l1loss = 0.0811207454906649, train acc = 0.9875062468765617,\n",
  3466. "val_loss = 5.258781780568204, val_acc = 0.6201550387596899\n",
  3467. "\n",
  3468. "epoch: 65\n",
  3469. "epoch 65: train loss = 0.028871186090626756, l1loss = 0.08079952534394286, train acc = 0.9950024987506247,\n",
  3470. "val_loss = 4.771393295406371, val_acc = 0.6162790697674418\n",
  3471. "\n",
  3472. "epoch: 66\n",
  3473. "epoch 66: train loss = 0.016369871543846032, l1loss = 0.08030458839088007, train acc = 0.999000499750125,\n",
  3474. "val_loss = 4.057015269301658, val_acc = 0.5465116279069767\n",
  3475. "\n",
  3476. "epoch: 67\n",
  3477. "epoch 67: train loss = 0.013506758245762841, l1loss = 0.07978588878467999, train acc = 0.9995002498750625,\n",
  3478. "val_loss = 3.792802465054416, val_acc = 0.5465116279069767\n",
  3479. "\n",
  3480. "epoch: 68\n",
  3481. "epoch 68: train loss = 0.011354195068533095, l1loss = 0.07937957041118218, train acc = 0.9995002498750625,\n",
  3482. "val_loss = 3.2458120704621307, val_acc = 0.5891472868217055\n",
  3483. "\n",
  3484. "epoch: 69\n",
  3485. "epoch 69: train loss = 0.014591847184760639, l1loss = 0.07902957535516852, train acc = 0.999000499750125,\n",
  3486. "val_loss = 3.9379879207343094, val_acc = 0.5968992248062015\n",
  3487. "\n",
  3488. "epoch: 70\n",
  3489. "epoch 70: train loss = 0.01913830605863795, l1loss = 0.07874485357352223, train acc = 0.9960019990004998,\n",
  3490. "val_loss = 3.228848675251469, val_acc = 0.5930232558139535\n",
  3491. "\n",
  3492. "epoch: 71\n",
  3493. "epoch 71: train loss = 0.013023115446449428, l1loss = 0.0784264379541675, train acc = 0.999000499750125,\n",
  3494. "val_loss = 3.240803785102312, val_acc = 0.5930232558139535\n",
  3495. "\n",
  3496. "epoch: 72\n",
  3497. "epoch 72: train loss = 0.011582904643770577, l1loss = 0.0781244968903297, train acc = 0.9995002498750625,\n",
  3498. "val_loss = 3.635261055110961, val_acc = 0.5852713178294574\n",
  3499. "\n",
  3500. "epoch: 73\n",
  3501. "epoch 73: train loss = 0.008814260559524226, l1loss = 0.07784299137650699, train acc = 1.0,\n",
  3502. "val_loss = 3.3489851297796234, val_acc = 0.5930232558139535\n",
  3503. "\n",
  3504. "!!! overfitted !!!\n",
  3505. "[1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1]\n",
  3506. "[0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1]\n",
  3507. "early stoping results:\n",
  3508. "\t [0.6041666666666666, 0.5572916666666666, 0.5208333333333334, 0.5078534031413613, 0.5445026178010471, 0.6073298429319371, 0.5602094240837696]\n",
  3509. "output = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
  3510. "label = tensor([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1,\n",
  3511. " 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1,\n",
  3512. " 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0,\n",
  3513. " 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1,\n",
  3514. " 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1,\n",
  3515. " 1, 1, 0, 1, 0, 1, 1, 1])\n",
  3516. "output = [1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0]\n",
  3517. "label = tensor([1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1,\n",
  3518. " 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1,\n",
  3519. " 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1,\n",
  3520. " 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0,\n",
  3521. " 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0,\n",
  3522. " 0, 1, 0, 0, 1, 0, 1, 1])\n",
  3523. "output = [1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0]\n",
  3524. "label = tensor([1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0,\n",
  3525. " 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1,\n",
  3526. " 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1,\n",
  3527. " 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0,\n",
  3528. " 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0,\n",
  3529. " 1, 0, 1, 0, 0, 0, 1, 1])\n",
  3530. "output = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0]\n",
  3531. "label = tensor([1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1,\n",
  3532. " 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1,\n",
  3533. " 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0,\n",
  3534. " 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0,\n",
  3535. " 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1,\n",
  3536. " 1, 0, 0, 0, 0, 0, 0, 1])\n",
  3537. "output = [1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0]\n",
  3538. "label = tensor([0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1,\n",
  3539. " 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1,\n",
  3540. " 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1,\n",
  3541. " 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0,\n",
  3542. " 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1,\n",
  3543. " 1, 0, 1, 0, 1, 0, 1, 0])\n",
  3544. "output = [1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0]\n",
  3545. "label = tensor([1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1,\n",
  3546. " 1, 1, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0,\n",
  3547. " 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1,\n",
  3548. " 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0,\n",
  3549. " 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0,\n",
  3550. " 0, 1, 0, 0, 0, 1, 1, 0])\n",
  3551. "output = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0]\n",
  3552. "label = tensor([1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0,\n",
  3553. " 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0,\n",
  3554. " 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0,\n",
  3555. " 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1,\n",
  3556. " 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0,\n",
  3557. " 1, 0, 0, 1, 0, 1, 1, 0])\n",
  3558. "output = [1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0]\n",
  3559. "label = tensor([1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1,\n",
  3560. " 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0,\n",
  3561. " 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0,\n",
  3562. " 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0,\n",
  3563. " 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1,\n",
  3564. " 0, 0, 0, 1, 0, 1, 1, 0])\n",
  3565. "output = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0]\n",
  3566. "label = tensor([1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0,\n",
  3567. " 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0,\n",
  3568. " 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0,\n",
  3569. " 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1,\n",
  3570. " 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0,\n",
  3571. " 0, 0, 1, 0, 1, 0, 1, 1])\n",
  3572. "output = [1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
  3573. "label = tensor([1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0,\n",
  3574. " 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0,\n",
  3575. " 1, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1,\n",
  3576. " 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1,\n",
  3577. " 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0,\n",
  3578. " 0, 1, 0, 0, 1, 1, 1, 1])\n",
  3579. "output = [1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
  3580. "label = tensor([1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0,\n",
  3581. " 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0,\n",
  3582. " 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1,\n",
  3583. " 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1,\n",
  3584. " 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0,\n",
  3585. " 0, 0, 1, 1, 0, 1, 1, 0])\n",
  3586. "output = [1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0]\n",
  3587. "label = tensor([1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 1,\n",
  3588. " 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1,\n",
  3589. " 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0,\n",
  3590. " 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1,\n",
  3591. " 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1,\n",
  3592. " 1, 0, 1, 0, 0, 1, 0, 1])\n",
  3593. "output = [1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
  3594. "label = tensor([1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0,\n",
  3595. " 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1,\n",
  3596. " 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0,\n",
  3597. " 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0,\n",
  3598. " 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1,\n",
  3599. " 0, 1, 0, 1, 1, 1, 0, 0])\n",
  3600. "output = [0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
  3601. "label = tensor([0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0,\n",
  3602. " 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0,\n",
  3603. " 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0,\n",
  3604. " 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1,\n",
  3605. " 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0,\n",
  3606. " 0, 1, 1, 0, 1, 1, 1, 1])\n",
  3607. "output = [1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0]\n",
  3608. "label = tensor([0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1,\n",
  3609. " 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1,\n",
  3610. " 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 1,\n",
  3611. " 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1,\n",
  3612. " 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0,\n",
  3613. " 0, 0, 1, 0, 1, 1, 1, 1])\n"
  3614. ]
  3615. },
  3616. {
  3617. "name": "stdout",
  3618. "output_type": "stream",
  3619. "text": [
  3620. "output = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
  3621. "label = tensor([1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1,\n",
  3622. " 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1,\n",
  3623. " 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0,\n",
  3624. " 1, 0, 0, 1, 0, 1, 0, 1, 1])\n",
  3625. "\t [0.6673376950176145, 0.6722054380664653, 0.6802005012531328, 0.6826347305389222, 0.7250876314471708, 0.7, 0.6841579210394803]\n",
  3626. "[0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1]\n",
  3627. "[0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1]\n",
  3628. "full train results:\n",
  3629. "\t [0.59375, 0.5885416666666666, 0.6354166666666666, 0.612565445026178, 0.6387434554973822, 0.6492146596858639, 0.6335078534031413]\n",
  3630. "\t [0.9949672873678913, 0.9974823766364552, 0.8666666666666667, 0.8393213572854291, 0.99949924887331, 0.638, 0.999000499750125]\n",
  3631. "[1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0]\n",
  3632. "[1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1]\n",
  3633. "best accs results:\n",
  3634. "\t [0.59375, 0.5625, 0.6354166666666666, 0.6282722513089005, 0.6020942408376964, 0.6387434554973822, 0.643979057591623]\n",
  3635. "\t [0.8037242073477604, 0.8670694864048338, 0.4967418546365915, 0.5489021956087824, 0.9744616925388082, 0.519, 0.47226386806596704]\n",
  3636. "[1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1]\n",
  3637. "-----------------------------Fold 8---------------\n",
  3638. "preparing dataloaders...\n",
  3639. "torch.Size([81, 7, 9, 20])\n",
  3640. "coef when 0 > 1 1\n",
  3641. "creating model...\n",
  3642. "calculating total steps...\n",
  3643. "epoch: 1\n",
  3644. "validation loss decreased (inf ---> 0.690483), val_acc = 0.6162790697674418\n",
  3645. "validation acc increased (0.000000 ---> 0.616279)\n",
  3646. "validation acc increased (0.616279 ---> 0.616279)\n",
  3647. "epoch 1: train loss = 0.681558296144248, l1loss = 0.13785075955840959, train acc = 0.47540160642570284,\n",
  3648. "val_loss = 0.6908809380937916, val_acc = 0.6201550387596899\n",
  3649. "\n",
  3650. "epoch: 2\n",
  3651. "validation acc increased (0.616279 ---> 0.616279)\n",
  3652. "epoch 2: train loss = 0.6601414388442135, l1loss = 0.13744411087897887, train acc = 0.5727911646586346,\n",
  3653. "val_loss = 0.6930313258208045, val_acc = 0.5038759689922481\n",
  3654. "\n",
  3655. "epoch: 3\n",
  3656. "validation loss decreased (0.690483 ---> 0.688592), val_acc = 0.5465116279069767\n",
  3657. "epoch 3: train loss = 0.6413024204323091, l1loss = 0.1369472207673582, train acc = 0.6360441767068273,\n",
  3658. "val_loss = 0.682868339294611, val_acc = 0.5348837209302325\n",
  3659. "\n",
  3660. "epoch: 4\n",
  3661. "validation loss decreased (0.688592 ---> 0.681418), val_acc = 0.5387596899224806\n",
  3662. "validation loss decreased (0.681418 ---> 0.657421), val_acc = 0.5542635658914729\n",
  3663. "epoch 4: train loss = 0.6253758257172672, l1loss = 0.13630323943842845, train acc = 0.6521084337349398,\n",
  3664. "val_loss = 0.6426847808120787, val_acc = 0.562015503875969\n",
  3665. "\n",
  3666. "epoch: 5\n",
  3667. "validation loss decreased (0.657421 ---> 0.639681), val_acc = 0.562015503875969\n",
  3668. "validation loss decreased (0.639681 ---> 0.615633), val_acc = 0.5697674418604651\n",
  3669. "epoch 5: train loss = 0.6111369527966143, l1loss = 0.13545335679647913, train acc = 0.6556224899598394,\n",
  3670. "val_loss = 0.6082370059434757, val_acc = 0.5813953488372093\n",
  3671. "\n",
  3672. "epoch: 6\n",
  3673. "validation loss decreased (0.615633 ---> 0.607422), val_acc = 0.5775193798449613\n",
  3674. "validation loss decreased (0.607422 ---> 0.601718), val_acc = 0.5775193798449613\n",
  3675. "epoch 6: train loss = 0.599387328548125, l1loss = 0.13432514200727624, train acc = 0.6606425702811245,\n",
  3676. "val_loss = 0.6018130197081455, val_acc = 0.5813953488372093\n",
  3677. "\n",
  3678. "epoch: 7\n",
  3679. "epoch 7: train loss = 0.5901381636719149, l1loss = 0.13286473970097232, train acc = 0.6686746987951807,\n",
  3680. "val_loss = 0.6033216104951016, val_acc = 0.5813953488372093\n",
  3681. "\n",
  3682. "epoch: 8\n",
  3683. "epoch 8: train loss = 0.5820648083725128, l1loss = 0.13100589835739518, train acc = 0.6746987951807228,\n",
  3684. "val_loss = 0.6082159008166587, val_acc = 0.5813953488372093\n",
  3685. "\n",
  3686. "epoch: 9\n",
  3687. "epoch 9: train loss = 0.5762511060898563, l1loss = 0.12873437647599292, train acc = 0.6757028112449799,\n",
  3688. "val_loss = 0.6154433927794759, val_acc = 0.5697674418604651\n",
  3689. "\n",
  3690. "epoch: 10\n",
  3691. "epoch 10: train loss = 0.5673032103772144, l1loss = 0.12611201866324168, train acc = 0.6827309236947792,\n",
  3692. "val_loss = 0.6142219150251196, val_acc = 0.5736434108527132\n",
  3693. "\n",
  3694. "epoch: 11\n",
  3695. "epoch 11: train loss = 0.5576656654656652, l1loss = 0.12317741024925048, train acc = 0.6907630522088354,\n",
  3696. "val_loss = 0.6357968532761862, val_acc = 0.5775193798449613\n",
  3697. "\n",
  3698. "epoch: 12\n",
  3699. "epoch 12: train loss = 0.544440203881168, l1loss = 0.12002360970380316, train acc = 0.7043172690763052,\n",
  3700. "val_loss = 0.6717778880928837, val_acc = 0.5736434108527132\n",
  3701. "\n",
  3702. "epoch: 13\n",
  3703. "epoch 13: train loss = 0.5326818835783196, l1loss = 0.11672397023702721, train acc = 0.7178714859437751,\n",
  3704. "val_loss = 0.6902429789535759, val_acc = 0.5697674418604651\n",
  3705. "\n",
  3706. "epoch: 14\n",
  3707. "epoch 14: train loss = 0.5085236329630197, l1loss = 0.1134706104555762, train acc = 0.7339357429718876,\n",
  3708. "val_loss = 0.658633234195931, val_acc = 0.5930232558139535\n",
  3709. "\n",
  3710. "epoch: 15\n",
  3711. "epoch 15: train loss = 0.48935657404512767, l1loss = 0.11044141605316875, train acc = 0.7459839357429718,\n",
  3712. "val_loss = 0.6858358637307042, val_acc = 0.5930232558139535\n",
  3713. "\n",
  3714. "epoch: 16\n",
  3715. "epoch 16: train loss = 0.45904517054079047, l1loss = 0.1077926407556936, train acc = 0.7740963855421686,\n",
  3716. "val_loss = 0.8338234337550307, val_acc = 0.5852713178294574\n",
  3717. "\n",
  3718. "epoch: 17\n",
  3719. "epoch 17: train loss = 0.426940765964937, l1loss = 0.1055830192254729, train acc = 0.7911646586345381,\n",
  3720. "val_loss = 1.16572989049808, val_acc = 0.562015503875969\n",
  3721. "\n",
  3722. "epoch: 18\n",
  3723. "epoch 18: train loss = 0.4012284585271016, l1loss = 0.10380807138949513, train acc = 0.8092369477911646,\n",
  3724. "val_loss = 1.6921033988627352, val_acc = 0.5658914728682171\n",
  3725. "\n",
  3726. "epoch: 19\n",
  3727. "epoch 19: train loss = 0.3755404464451663, l1loss = 0.10228538163096072, train acc = 0.8273092369477911,\n",
  3728. "val_loss = 1.4203764996671862, val_acc = 0.5503875968992248\n",
  3729. "\n",
  3730. "epoch: 20\n",
  3731. "validation acc increased (0.616279 ---> 0.616279)\n",
  3732. "epoch 20: train loss = 0.3591645778901127, l1loss = 0.10097318358090987, train acc = 0.8288152610441767,\n",
  3733. "val_loss = 1.1321186076763063, val_acc = 0.6162790697674418\n",
  3734. "\n",
  3735. "epoch: 21\n",
  3736. "validation acc increased (0.616279 ---> 0.620155)\n",
  3737. "epoch 21: train loss = 0.32908376967571823, l1loss = 0.09975837880229375, train acc = 0.8473895582329317,\n",
  3738. "val_loss = 0.9505737890568815, val_acc = 0.6085271317829457\n",
  3739. "\n",
  3740. "epoch: 22\n",
  3741. "epoch 22: train loss = 0.29188525078287086, l1loss = 0.09870235529170937, train acc = 0.8699799196787149,\n",
  3742. "val_loss = 0.92200702197792, val_acc = 0.5775193798449613\n",
  3743. "\n",
  3744. "epoch: 23\n"
  3745. ]
  3746. },
  3747. {
  3748. "name": "stdout",
  3749. "output_type": "stream",
  3750. "text": [
  3751. "epoch 23: train loss = 0.26653667099504585, l1loss = 0.09784682783257051, train acc = 0.8910642570281124,\n",
  3752. "val_loss = 1.039930680001429, val_acc = 0.5736434108527132\n",
  3753. "\n",
  3754. "epoch: 24\n",
  3755. "validation acc increased (0.620155 ---> 0.627907)\n",
  3756. "epoch 24: train loss = 0.29164418386647023, l1loss = 0.09723315346073434, train acc = 0.857429718875502,\n",
  3757. "val_loss = 1.7529087842896927, val_acc = 0.624031007751938\n",
  3758. "\n",
  3759. "epoch: 25\n",
  3760. "validation acc increased (0.627907 ---> 0.631783)\n",
  3761. "validation acc increased (0.631783 ---> 0.631783)\n",
  3762. "epoch 25: train loss = 0.2822917654332387, l1loss = 0.09672807504134963, train acc = 0.8815261044176707,\n",
  3763. "val_loss = 3.657799425051194, val_acc = 0.624031007751938\n",
  3764. "\n",
  3765. "epoch: 26\n",
  3766. "epoch 26: train loss = 0.26554612988927756, l1loss = 0.09617703916677031, train acc = 0.8790160642570282,\n",
  3767. "val_loss = 2.485792124918265, val_acc = 0.5775193798449613\n",
  3768. "\n",
  3769. "epoch: 27\n",
  3770. "epoch 27: train loss = 0.254531506015115, l1loss = 0.09513618028068159, train acc = 0.8895582329317269,\n",
  3771. "val_loss = 2.655083281125209, val_acc = 0.624031007751938\n",
  3772. "\n",
  3773. "epoch: 28\n",
  3774. "epoch 28: train loss = 0.24327343211595312, l1loss = 0.09413622313713932, train acc = 0.8845381526104418,\n",
  3775. "val_loss = 1.9870535994684972, val_acc = 0.5852713178294574\n",
  3776. "\n",
  3777. "epoch: 29\n",
  3778. "epoch 29: train loss = 0.19294745029215832, l1loss = 0.09322106197895295, train acc = 0.9312248995983936,\n",
  3779. "val_loss = 1.6232661962971207, val_acc = 0.6124031007751938\n",
  3780. "\n",
  3781. "epoch: 30\n",
  3782. "epoch 30: train loss = 0.17906709673653645, l1loss = 0.09269103892596371, train acc = 0.9201807228915663,\n",
  3783. "val_loss = 1.2391475744025653, val_acc = 0.627906976744186\n",
  3784. "\n",
  3785. "epoch: 31\n",
  3786. "epoch 31: train loss = 0.16923785616595102, l1loss = 0.09213589386169212, train acc = 0.9412650602409639,\n",
  3787. "val_loss = 2.774588699488677, val_acc = 0.562015503875969\n",
  3788. "\n",
  3789. "epoch: 32\n",
  3790. "epoch 32: train loss = 0.18963111925077247, l1loss = 0.09187775111222363, train acc = 0.9181726907630522,\n",
  3791. "val_loss = 3.4193982689879663, val_acc = 0.6085271317829457\n",
  3792. "\n",
  3793. "epoch: 33\n",
  3794. "epoch 33: train loss = 0.21511334994710593, l1loss = 0.0914924276700939, train acc = 0.8945783132530121,\n",
  3795. "val_loss = 1.4032248768580051, val_acc = 0.6046511627906976\n",
  3796. "\n",
  3797. "epoch: 34\n",
  3798. "validation acc increased (0.631783 ---> 0.635659)\n",
  3799. "epoch 34: train loss = 0.1589769302123043, l1loss = 0.09112457344570313, train acc = 0.9392570281124498,\n",
  3800. "val_loss = 1.755115381969038, val_acc = 0.6085271317829457\n",
  3801. "\n",
  3802. "epoch: 35\n",
  3803. "epoch 35: train loss = 0.14009647250893603, l1loss = 0.0910465270280838, train acc = 0.9513052208835341,\n",
  3804. "val_loss = 1.4941087914991749, val_acc = 0.624031007751938\n",
  3805. "\n",
  3806. "epoch: 36\n",
  3807. "epoch 36: train loss = 0.12341947081577347, l1loss = 0.0905273190763102, train acc = 0.9573293172690763,\n",
  3808. "val_loss = 2.225553594818411, val_acc = 0.6317829457364341\n",
  3809. "\n",
  3810. "epoch: 37\n",
  3811. "validation acc increased (0.635659 ---> 0.639535)\n",
  3812. "epoch 37: train loss = 0.09714583913425365, l1loss = 0.08966618833173232, train acc = 0.9698795180722891,\n",
  3813. "val_loss = 2.304292928340823, val_acc = 0.5852713178294574\n",
  3814. "\n",
  3815. "epoch: 38\n",
  3816. "epoch 38: train loss = 0.11655990438288953, l1loss = 0.0893175832657929, train acc = 0.9568273092369478,\n",
  3817. "val_loss = 2.1228176657716897, val_acc = 0.6007751937984496\n",
  3818. "\n",
  3819. "epoch: 39\n",
  3820. "epoch 39: train loss = 0.09807442242361934, l1loss = 0.08927847062009406, train acc = 0.964859437751004,\n",
  3821. "val_loss = 4.155446446450879, val_acc = 0.6317829457364341\n",
  3822. "\n",
  3823. "epoch: 40\n",
  3824. "epoch 40: train loss = 0.0872385489294328, l1loss = 0.08882969411381755, train acc = 0.9774096385542169,\n",
  3825. "val_loss = 1.9643646534099541, val_acc = 0.5968992248062015\n",
  3826. "\n",
  3827. "epoch: 41\n",
  3828. "epoch 41: train loss = 0.07711015104589214, l1loss = 0.08862146217540565, train acc = 0.9754016064257028,\n",
  3829. "val_loss = 3.4088204403718314, val_acc = 0.6046511627906976\n",
  3830. "\n",
  3831. "epoch: 42\n",
  3832. "epoch 42: train loss = 0.07405169415904815, l1loss = 0.08791267791067262, train acc = 0.9759036144578314,\n",
  3833. "val_loss = 2.432727573453918, val_acc = 0.5930232558139535\n",
  3834. "\n",
  3835. "epoch: 43\n",
  3836. "epoch 43: train loss = 0.1035525700054973, l1loss = 0.08760496441379609, train acc = 0.9583333333333334,\n",
  3837. "val_loss = 4.112110699794089, val_acc = 0.5775193798449613\n",
  3838. "\n",
  3839. "epoch: 44\n",
  3840. "epoch 44: train loss = 0.07531021014753594, l1loss = 0.08808258729407108, train acc = 0.9799196787148594,\n",
  3841. "val_loss = 4.904226697272487, val_acc = 0.624031007751938\n",
  3842. "\n",
  3843. "epoch: 45\n",
  3844. "epoch 45: train loss = 0.08077501409862893, l1loss = 0.0878749838076442, train acc = 0.9693775100401606,\n",
  3845. "val_loss = 2.515559282413749, val_acc = 0.6046511627906976\n",
  3846. "\n",
  3847. "epoch: 46\n",
  3848. "epoch 46: train loss = 0.07068757228104465, l1loss = 0.08739253621264155, train acc = 0.9804216867469879,\n",
  3849. "val_loss = 4.238404756368593, val_acc = 0.6085271317829457\n",
  3850. "\n",
  3851. "epoch: 47\n",
  3852. "epoch 47: train loss = 0.056927473190797856, l1loss = 0.0867786297956145, train acc = 0.9844377510040161,\n",
  3853. "val_loss = 2.2621954614801925, val_acc = 0.5930232558139535\n",
  3854. "\n",
  3855. "epoch: 48\n",
  3856. "epoch 48: train loss = 0.07424461281293607, l1loss = 0.08710216084517629, train acc = 0.9844377510040161,\n",
  3857. "val_loss = 8.002929717071297, val_acc = 0.6124031007751938\n",
  3858. "\n",
  3859. "epoch: 49\n",
  3860. "epoch 49: train loss = 0.08703712670199842, l1loss = 0.08769192863779375, train acc = 0.9688755020080321,\n",
  3861. "val_loss = 3.551590413086174, val_acc = 0.6085271317829457\n",
  3862. "\n",
  3863. "epoch: 50\n",
  3864. "epoch 50: train loss = 0.07011409313324465, l1loss = 0.08753819362705491, train acc = 0.9844377510040161,\n",
  3865. "val_loss = 3.0541449490241535, val_acc = 0.5852713178294574\n",
  3866. "\n",
  3867. "epoch: 51\n",
  3868. "epoch 51: train loss = 0.04092977885680505, l1loss = 0.08705330301958873, train acc = 0.9944779116465864,\n",
  3869. "val_loss = 3.317158439362696, val_acc = 0.5891472868217055\n",
  3870. "\n",
  3871. "epoch: 52\n",
  3872. "epoch 52: train loss = 0.03851072603170891, l1loss = 0.08622222872503311, train acc = 0.9914658634538153,\n",
  3873. "val_loss = 2.734001413796299, val_acc = 0.6124031007751938\n",
  3874. "\n",
  3875. "epoch: 53\n",
  3876. "epoch 53: train loss = 0.03234308925139377, l1loss = 0.08536305176804822, train acc = 0.9954819277108434,\n",
  3877. "val_loss = 3.4601848698401634, val_acc = 0.6046511627906976\n",
  3878. "\n",
  3879. "epoch: 54\n",
  3880. "epoch 54: train loss = 0.026902885224864666, l1loss = 0.08468511724567797, train acc = 0.9964859437751004,\n",
  3881. "val_loss = 3.1516257589177568, val_acc = 0.5968992248062015\n",
  3882. "\n",
  3883. "epoch: 55\n",
  3884. "epoch 55: train loss = 0.02750487214171264, l1loss = 0.08413356647314317, train acc = 0.9959839357429718,\n",
  3885. "val_loss = 3.372754837696751, val_acc = 0.5813953488372093\n",
  3886. "\n",
  3887. "epoch: 56\n",
  3888. "epoch 56: train loss = 0.02194071931382978, l1loss = 0.08356957251288326, train acc = 0.9969879518072289,\n",
  3889. "val_loss = 3.443614445915518, val_acc = 0.5968992248062015\n",
  3890. "\n",
  3891. "epoch: 57\n",
  3892. "epoch 57: train loss = 0.02077264787561922, l1loss = 0.0830162249834662, train acc = 0.9979919678714859,\n",
  3893. "val_loss = 3.4553179718958313, val_acc = 0.6007751937984496\n",
  3894. "\n",
  3895. "epoch: 58\n",
  3896. "epoch 58: train loss = 0.02582511185372929, l1loss = 0.08288956104392507, train acc = 0.9959839357429718,\n",
  3897. "val_loss = 3.5070084046947865, val_acc = 0.5968992248062015\n",
  3898. "\n",
  3899. "epoch: 59\n",
  3900. "epoch 59: train loss = 0.017465517126831663, l1loss = 0.08231255358242127, train acc = 0.9974899598393574,\n",
  3901. "val_loss = 3.79313347598379, val_acc = 0.5968992248062015\n",
  3902. "\n",
  3903. "epoch: 60\n",
  3904. "epoch 60: train loss = 0.014348378337650415, l1loss = 0.08186340673142169, train acc = 0.9994979919678715,\n",
  3905. "val_loss = 4.290767019109208, val_acc = 0.6356589147286822\n",
  3906. "\n",
  3907. "epoch: 61\n",
  3908. "epoch 61: train loss = 0.014340946001999349, l1loss = 0.08129345541857333, train acc = 1.0,\n",
  3909. "val_loss = 3.978316806668286, val_acc = 0.5930232558139535\n",
  3910. "\n",
  3911. "!!! overfitted !!!\n",
  3912. "[1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1]\n",
  3913. "[0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1]\n",
  3914. "early stoping results:\n",
  3915. "\t [0.6041666666666666, 0.5572916666666666, 0.5208333333333334, 0.5078534031413613, 0.5445026178010471, 0.6073298429319371, 0.5602094240837696, 0.5340314136125655]\n",
  3916. "output = [1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
  3917. "label = tensor([0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1,\n",
  3918. " 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1,\n",
  3919. " 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1,\n",
  3920. " 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1,\n",
  3921. " 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1,\n",
  3922. " 1, 1, 0, 1, 1, 0, 1, 1])\n",
  3923. "output = [1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0]\n",
  3924. "label = tensor([1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0,\n",
  3925. " 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0,\n",
  3926. " 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0,\n",
  3927. " 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1,\n",
  3928. " 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1,\n",
  3929. " 1, 1, 1, 0, 1, 0, 1, 0])\n",
  3930. "output = [0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
  3931. "label = tensor([0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1,\n",
  3932. " 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1,\n",
  3933. " 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0,\n",
  3934. " 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0,\n",
  3935. " 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0,\n",
  3936. " 0, 0, 1, 0, 1, 1, 1, 1])\n",
  3937. "output = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
  3938. "label = tensor([0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1,\n",
  3939. " 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0,\n",
  3940. " 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1,\n",
  3941. " 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1,\n",
  3942. " 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0,\n",
  3943. " 0, 0, 1, 1, 1, 0, 1, 1])\n",
  3944. "output = [1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
  3945. "label = tensor([1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0,\n",
  3946. " 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0,\n",
  3947. " 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1,\n",
  3948. " 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0,\n",
  3949. " 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0,\n",
  3950. " 0, 0, 1, 1, 1, 0, 1, 1])\n",
  3951. "output = [1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
  3952. "label = tensor([0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1,\n",
  3953. " 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0,\n",
  3954. " 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1,\n",
  3955. " 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1,\n",
  3956. " 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0,\n",
  3957. " 1, 0, 0, 1, 1, 0, 1, 1])\n",
  3958. "output = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0]\n",
  3959. "label = tensor([1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1,\n",
  3960. " 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0,\n",
  3961. " 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0,\n",
  3962. " 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0,\n",
  3963. " 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1,\n",
  3964. " 1, 0, 1, 0, 0, 1, 1, 1])\n",
  3965. "output = [1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
  3966. "label = tensor([0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1,\n",
  3967. " 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0,\n",
  3968. " 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1,\n",
  3969. " 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1,\n",
  3970. " 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0,\n",
  3971. " 1, 0, 1, 0, 1, 1, 0, 1])\n",
  3972. "output = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0]\n",
  3973. "label = tensor([0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1,\n",
  3974. " 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0,\n",
  3975. " 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0,\n",
  3976. " 0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0,\n",
  3977. " 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1,\n",
  3978. " 1, 1, 0, 0, 1, 0, 1, 1])\n",
  3979. "output = [1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0]\n",
  3980. "label = tensor([1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1,\n",
  3981. " 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0,\n",
  3982. " 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0,\n",
  3983. " 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0,\n",
  3984. " 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0,\n",
  3985. " 1, 1, 0, 1, 0, 0, 0, 0])\n",
  3986. "output = [0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0]\n",
  3987. "label = tensor([0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1,\n",
  3988. " 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1,\n",
  3989. " 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0,\n",
  3990. " 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0,\n",
  3991. " 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1,\n",
  3992. " 0, 1, 1, 1, 1, 0, 1, 1])\n",
  3993. "output = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0]\n",
  3994. "label = tensor([1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0,\n",
  3995. " 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0,\n",
  3996. " 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1,\n",
  3997. " 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1,\n",
  3998. " 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0,\n",
  3999. " 1, 1, 1, 1, 1, 1, 0, 0])\n",
  4000. "output = [1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0]\n",
  4001. "label = tensor([1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0,\n",
  4002. " 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1,\n",
  4003. " 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1,\n",
  4004. " 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0,\n",
  4005. " 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1,\n",
  4006. " 0, 0, 1, 1, 0, 1, 1, 0])\n"
  4007. ]
  4008. },
  4009. {
  4010. "name": "stdout",
  4011. "output_type": "stream",
  4012. "text": [
  4013. "output = [1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0]\n",
  4014. "label = tensor([0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0,\n",
  4015. " 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1,\n",
  4016. " 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0,\n",
  4017. " 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0,\n",
  4018. " 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1,\n",
  4019. " 1, 0, 1, 1, 1, 0, 0, 0])\n",
  4020. "output = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0]\n",
  4021. "label = tensor([0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1,\n",
  4022. " 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1,\n",
  4023. " 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1,\n",
  4024. " 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1,\n",
  4025. " 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1,\n",
  4026. " 0, 1, 1, 0, 0, 0, 0, 1])\n",
  4027. "output = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0]\n",
  4028. "label = tensor([1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0,\n",
  4029. " 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1,\n",
  4030. " 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0])\n",
  4031. "\t [0.6673376950176145, 0.6722054380664653, 0.6802005012531328, 0.6826347305389222, 0.7250876314471708, 0.7, 0.6841579210394803, 0.6651606425702812]\n",
  4032. "[0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1]\n",
  4033. "[1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1]\n",
  4034. "full train results:\n",
  4035. "\t [0.59375, 0.5885416666666666, 0.6354166666666666, 0.612565445026178, 0.6387434554973822, 0.6492146596858639, 0.6335078534031413, 0.6335078534031413]\n",
  4036. "\t [0.9949672873678913, 0.9974823766364552, 0.8666666666666667, 0.8393213572854291, 0.99949924887331, 0.638, 0.999000499750125, 0.9949799196787149]\n",
  4037. "[1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1]\n",
  4038. "[0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0]\n",
  4039. "best accs results:\n",
  4040. "\t [0.59375, 0.5625, 0.6354166666666666, 0.6282722513089005, 0.6020942408376964, 0.6387434554973822, 0.643979057591623, 0.6387434554973822]\n",
  4041. "\t [0.8037242073477604, 0.8670694864048338, 0.4967418546365915, 0.5489021956087824, 0.9744616925388082, 0.519, 0.47226386806596704, 0.7761044176706827]\n",
  4042. "[1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0]\n",
  4043. "-----------------------------Fold 9---------------\n",
  4044. "preparing dataloaders...\n",
  4045. "torch.Size([72, 7, 9, 20])\n",
  4046. "coef when 0 > 1 1\n",
  4047. "creating model...\n",
  4048. "calculating total steps...\n",
  4049. "epoch: 1\n",
  4050. "validation loss decreased (inf ---> 0.682980), val_acc = 0.627906976744186\n",
  4051. "validation acc increased (0.000000 ---> 0.627907)\n",
  4052. "validation acc increased (0.627907 ---> 0.627907)\n",
  4053. "epoch 1: train loss = 0.6777922638376853, l1loss = 0.13775174053838676, train acc = 0.48370927318295737,\n",
  4054. "val_loss = 0.687888598257257, val_acc = 0.627906976744186\n",
  4055. "\n",
  4056. "epoch: 2\n",
  4057. "validation acc increased (0.627907 ---> 0.627907)\n",
  4058. "epoch 2: train loss = 0.6605406909060657, l1loss = 0.13724787228537683, train acc = 0.6210526315789474,\n",
  4059. "val_loss = 0.7003788606140965, val_acc = 0.42248062015503873\n",
  4060. "\n",
  4061. "epoch: 3\n",
  4062. "epoch 3: train loss = 0.6454371491171662, l1loss = 0.13666182353831174, train acc = 0.6416040100250626,\n",
  4063. "val_loss = 0.7020500177560851, val_acc = 0.4844961240310077\n",
  4064. "\n",
  4065. "epoch: 4\n",
  4066. "epoch 4: train loss = 0.6296094764444164, l1loss = 0.13591576377700146, train acc = 0.6571428571428571,\n",
  4067. "val_loss = 0.6741203370020371, val_acc = 0.5271317829457365\n",
  4068. "\n",
  4069. "epoch: 5\n",
  4070. "validation loss decreased (0.682980 ---> 0.672345), val_acc = 0.5271317829457365\n",
  4071. "validation loss decreased (0.672345 ---> 0.651531), val_acc = 0.5271317829457365\n",
  4072. "epoch 5: train loss = 0.6174353536507838, l1loss = 0.13493877780019192, train acc = 0.6571428571428571,\n",
  4073. "val_loss = 0.64598062149314, val_acc = 0.5310077519379846\n",
  4074. "\n",
  4075. "epoch: 6\n",
  4076. "validation loss decreased (0.651531 ---> 0.644699), val_acc = 0.5310077519379846\n",
  4077. "validation loss decreased (0.644699 ---> 0.633400), val_acc = 0.5310077519379846\n",
  4078. "epoch 6: train loss = 0.6050603376295334, l1loss = 0.13365735947189475, train acc = 0.6621553884711779,\n",
  4079. "val_loss = 0.6314658402472504, val_acc = 0.5348837209302325\n",
  4080. "\n",
  4081. "epoch: 7\n",
  4082. "validation loss decreased (0.633400 ---> 0.632519), val_acc = 0.5348837209302325\n",
  4083. "validation loss decreased (0.632519 ---> 0.631722), val_acc = 0.5348837209302325\n",
  4084. "epoch 7: train loss = 0.5956596527183265, l1loss = 0.13198910928459692, train acc = 0.6656641604010025,\n",
  4085. "val_loss = 0.6331861458083455, val_acc = 0.5348837209302325\n",
  4086. "\n",
  4087. "epoch: 8\n",
  4088. "epoch 8: train loss = 0.5859128608141926, l1loss = 0.1298997061070344, train acc = 0.6716791979949874,\n",
  4089. "val_loss = 0.6339161365993263, val_acc = 0.5310077519379846\n",
  4090. "\n",
  4091. "epoch: 9\n",
  4092. "epoch 9: train loss = 0.5775983289967205, l1loss = 0.12739512194368177, train acc = 0.6796992481203008,\n",
  4093. "val_loss = 0.6418065218500388, val_acc = 0.5348837209302325\n",
  4094. "\n",
  4095. "epoch: 10\n",
  4096. "epoch 10: train loss = 0.5680718240283784, l1loss = 0.12452510148286819, train acc = 0.6907268170426065,\n",
  4097. "val_loss = 0.6507704207139422, val_acc = 0.5348837209302325\n",
  4098. "\n",
  4099. "epoch: 11\n",
  4100. "epoch 11: train loss = 0.5571945123803944, l1loss = 0.1213724056954372, train acc = 0.6962406015037594,\n",
  4101. "val_loss = 0.6841371244238329, val_acc = 0.5310077519379846\n",
  4102. "\n",
  4103. "epoch: 12\n"
  4104. ]
  4105. },
  4106. {
  4107. "name": "stdout",
  4108. "output_type": "stream",
  4109. "text": [
  4110. "epoch 12: train loss = 0.5443704637369715, l1loss = 0.11801029294729233, train acc = 0.7072681704260652,\n",
  4111. "val_loss = 0.7370998619138732, val_acc = 0.5232558139534884\n",
  4112. "\n",
  4113. "epoch: 13\n",
  4114. "epoch 13: train loss = 0.5282434655012642, l1loss = 0.11461027522657748, train acc = 0.7263157894736842,\n",
  4115. "val_loss = 0.7001163677651753, val_acc = 0.5426356589147286\n",
  4116. "\n",
  4117. "epoch: 14\n",
  4118. "epoch 14: train loss = 0.5089050915008201, l1loss = 0.11128778580884288, train acc = 0.7413533834586467,\n",
  4119. "val_loss = 0.7310682345730389, val_acc = 0.5348837209302325\n",
  4120. "\n",
  4121. "epoch: 15\n",
  4122. "epoch 15: train loss = 0.4865566873311399, l1loss = 0.10820232349305524, train acc = 0.7533834586466165,\n",
  4123. "val_loss = 0.9019363850586174, val_acc = 0.5310077519379846\n",
  4124. "\n",
  4125. "epoch: 16\n",
  4126. "epoch 16: train loss = 0.45884659435217245, l1loss = 0.10554263851322924, train acc = 0.7789473684210526,\n",
  4127. "val_loss = 0.9704440212989038, val_acc = 0.5232558139534884\n",
  4128. "\n",
  4129. "epoch: 17\n",
  4130. "epoch 17: train loss = 0.4202355332541884, l1loss = 0.10339735007674473, train acc = 0.8070175438596491,\n",
  4131. "val_loss = 0.9909111167802367, val_acc = 0.6124031007751938\n",
  4132. "\n",
  4133. "epoch: 18\n",
  4134. "epoch 18: train loss = 0.39302192711292355, l1loss = 0.1019542952826746, train acc = 0.8120300751879699,\n",
  4135. "val_loss = 1.3908450917680135, val_acc = 0.5232558139534884\n",
  4136. "\n",
  4137. "epoch: 19\n",
  4138. "epoch 19: train loss = 0.3705816494641746, l1loss = 0.10042775794303507, train acc = 0.8165413533834587,\n",
  4139. "val_loss = 0.8365242425785508, val_acc = 0.5697674418604651\n",
  4140. "\n",
  4141. "epoch: 20\n",
  4142. "epoch 20: train loss = 0.35236290118150543, l1loss = 0.0989852717533745, train acc = 0.8370927318295739,\n",
  4143. "val_loss = 1.175207398658575, val_acc = 0.5271317829457365\n",
  4144. "\n",
  4145. "epoch: 21\n",
  4146. "epoch 21: train loss = 0.3512345191828888, l1loss = 0.09775472859242805, train acc = 0.8335839598997494,\n",
  4147. "val_loss = 1.720713703958101, val_acc = 0.5116279069767442\n",
  4148. "\n",
  4149. "epoch: 22\n",
  4150. "epoch 22: train loss = 0.32802224415436126, l1loss = 0.09652294553162759, train acc = 0.8526315789473684,\n",
  4151. "val_loss = 1.0351900666259055, val_acc = 0.5387596899224806\n",
  4152. "\n",
  4153. "epoch: 23\n",
  4154. "epoch 23: train loss = 0.2810892618987196, l1loss = 0.09586043051236255, train acc = 0.8847117794486216,\n",
  4155. "val_loss = 1.7488028725912406, val_acc = 0.5193798449612403\n",
  4156. "\n",
  4157. "epoch: 24\n",
  4158. "epoch 24: train loss = 0.29594317181666097, l1loss = 0.09554791460957443, train acc = 0.8736842105263158,\n",
  4159. "val_loss = 1.383325032485548, val_acc = 0.5387596899224806\n",
  4160. "\n",
  4161. "epoch: 25\n",
  4162. "epoch 25: train loss = 0.2908367050545556, l1loss = 0.09524397529605635, train acc = 0.8741854636591478,\n",
  4163. "val_loss = 1.0075195944586466, val_acc = 0.6085271317829457\n",
  4164. "\n",
  4165. "epoch: 26\n",
  4166. "epoch 26: train loss = 0.23876858004053733, l1loss = 0.09456468856050855, train acc = 0.9042606516290727,\n",
  4167. "val_loss = 3.7633727769731307, val_acc = 0.627906976744186\n",
  4168. "\n",
  4169. "epoch: 27\n",
  4170. "validation acc increased (0.627907 ---> 0.631783)\n",
  4171. "epoch 27: train loss = 0.24321200991035405, l1loss = 0.09383204626409632, train acc = 0.8897243107769424,\n",
  4172. "val_loss = 2.632583130237668, val_acc = 0.5155038759689923\n",
  4173. "\n",
  4174. "epoch: 28\n",
  4175. "epoch 28: train loss = 0.19356993710188042, l1loss = 0.0934341524291158, train acc = 0.9238095238095239,\n",
  4176. "val_loss = 1.41927497885948, val_acc = 0.5348837209302325\n",
  4177. "\n",
  4178. "epoch: 29\n",
  4179. "epoch 29: train loss = 0.19538922365147965, l1loss = 0.09319527589512946, train acc = 0.9167919799498747,\n",
  4180. "val_loss = 3.835814361424409, val_acc = 0.624031007751938\n",
  4181. "\n",
  4182. "epoch: 30\n",
  4183. "epoch 30: train loss = 0.17916062683389897, l1loss = 0.09250437699837194, train acc = 0.9303258145363409,\n",
  4184. "val_loss = 1.621931373026193, val_acc = 0.6085271317829457\n",
  4185. "\n",
  4186. "epoch: 31\n",
  4187. "epoch 31: train loss = 0.21077537299098825, l1loss = 0.09211045807614959, train acc = 0.9087719298245615,\n",
  4188. "val_loss = 1.3185462252807247, val_acc = 0.5930232558139535\n",
  4189. "\n",
  4190. "epoch: 32\n",
  4191. "epoch 32: train loss = 0.23887639734380525, l1loss = 0.09190204342579782, train acc = 0.8952380952380953,\n",
  4192. "val_loss = 1.9735888972762943, val_acc = 0.5232558139534884\n",
  4193. "\n",
  4194. "epoch: 33\n",
  4195. "epoch 33: train loss = 0.14902917222122203, l1loss = 0.09144701687315651, train acc = 0.9478696741854636,\n",
  4196. "val_loss = 1.734970021617505, val_acc = 0.5813953488372093\n",
  4197. "\n",
  4198. "epoch: 34\n",
  4199. "epoch 34: train loss = 0.1352653970246327, l1loss = 0.09123586411389492, train acc = 0.9609022556390977,\n",
  4200. "val_loss = 1.6061633298563402, val_acc = 0.5348837209302325\n",
  4201. "\n",
  4202. "epoch: 35\n",
  4203. "epoch 35: train loss = 0.12018011457713923, l1loss = 0.0913496767742592, train acc = 0.9654135338345865,\n",
  4204. "val_loss = 1.4085034085798633, val_acc = 0.5852713178294574\n",
  4205. "\n",
  4206. "epoch: 36\n",
  4207. "epoch 36: train loss = 0.12157767949845259, l1loss = 0.09087181371405609, train acc = 0.9568922305764411,\n",
  4208. "val_loss = 12.18474845738374, val_acc = 0.627906976744186\n",
  4209. "\n",
  4210. "epoch: 37\n",
  4211. "epoch 37: train loss = 0.11420494081980005, l1loss = 0.09056822333419533, train acc = 0.9588972431077695,\n",
  4212. "val_loss = 7.133128994195036, val_acc = 0.627906976744186\n",
  4213. "\n",
  4214. "epoch: 38\n",
  4215. "epoch 38: train loss = 0.08377009170844142, l1loss = 0.08998052694444968, train acc = 0.9769423558897243,\n",
  4216. "val_loss = 1.5790407207123405, val_acc = 0.5348837209302325\n",
  4217. "\n",
  4218. "epoch: 39\n",
  4219. "epoch 39: train loss = 0.07196493960711592, l1loss = 0.08941901628310818, train acc = 0.9824561403508771,\n",
  4220. "val_loss = 1.7764077741046285, val_acc = 0.5426356589147286\n",
  4221. "\n",
  4222. "epoch: 40\n",
  4223. "epoch 40: train loss = 0.06884303719813663, l1loss = 0.08907348288778673, train acc = 0.981453634085213,\n",
  4224. "val_loss = 2.0529281263665635, val_acc = 0.5658914728682171\n",
  4225. "\n",
  4226. "epoch: 41\n",
  4227. "epoch 41: train loss = 0.08752110526152422, l1loss = 0.08877328430351458, train acc = 0.9669172932330827,\n",
  4228. "val_loss = 3.8814146389332853, val_acc = 0.624031007751938\n",
  4229. "\n",
  4230. "epoch: 42\n",
  4231. "epoch 42: train loss = 0.0534191582846761, l1loss = 0.08847359084619914, train acc = 0.9899749373433584,\n",
  4232. "val_loss = 1.7713334769472595, val_acc = 0.5581395348837209\n",
  4233. "\n",
  4234. "epoch: 43\n",
  4235. "epoch 43: train loss = 0.053076960512420585, l1loss = 0.08797531594011121, train acc = 0.9854636591478697,\n",
  4236. "val_loss = 3.003138168837673, val_acc = 0.5310077519379846\n",
  4237. "\n",
  4238. "epoch: 44\n",
  4239. "epoch 44: train loss = 0.03749572625173662, l1loss = 0.08741736609610101, train acc = 0.9954887218045113,\n",
  4240. "val_loss = 2.302085431047188, val_acc = 0.5658914728682171\n",
  4241. "\n",
  4242. "epoch: 45\n",
  4243. "epoch 45: train loss = 0.03470267659067211, l1loss = 0.086931109906438, train acc = 0.9944862155388471,\n",
  4244. "val_loss = 3.2026045303936153, val_acc = 0.5465116279069767\n",
  4245. "\n",
  4246. "epoch: 46\n",
  4247. "epoch 46: train loss = 0.03238022409398155, l1loss = 0.08665003874397517, train acc = 0.9949874686716792,\n",
  4248. "val_loss = 2.3425228115200074, val_acc = 0.5697674418604651\n",
  4249. "\n",
  4250. "epoch: 47\n",
  4251. "epoch 47: train loss = 0.026900880723108624, l1loss = 0.08609663713396641, train acc = 0.9979949874686717,\n",
  4252. "val_loss = 2.393414729440859, val_acc = 0.5387596899224806\n",
  4253. "\n",
  4254. "epoch: 48\n",
  4255. "epoch 48: train loss = 0.023233257417093242, l1loss = 0.08549760701064776, train acc = 0.9984962406015038,\n",
  4256. "val_loss = 2.7446321505446765, val_acc = 0.5348837209302325\n",
  4257. "\n",
  4258. "epoch: 49\n",
  4259. "epoch 49: train loss = 0.024097669448582153, l1loss = 0.08505817094168866, train acc = 0.9974937343358395,\n",
  4260. "val_loss = 2.4446980241657226, val_acc = 0.5387596899224806\n",
  4261. "\n",
  4262. "epoch: 50\n",
  4263. "epoch 50: train loss = 0.01827189443740331, l1loss = 0.08443690010330133, train acc = 0.9989974937343359,\n",
  4264. "val_loss = 2.4035656581553377, val_acc = 0.5232558139534884\n",
  4265. "\n",
  4266. "epoch: 51\n",
  4267. "epoch 51: train loss = 0.016350781231334335, l1loss = 0.08388166891452961, train acc = 1.0,\n",
  4268. "val_loss = 2.443206698395485, val_acc = 0.5155038759689923\n",
  4269. "\n",
  4270. "!!! overfitted !!!\n",
  4271. "[1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1]\n",
  4272. "[1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1]\n",
  4273. "early stoping results:\n",
  4274. "\t [0.6041666666666666, 0.5572916666666666, 0.5208333333333334, 0.5078534031413613, 0.5445026178010471, 0.6073298429319371, 0.5602094240837696, 0.5340314136125655, 0.5654450261780105]\n",
  4275. "output = [0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
  4276. "label = tensor([0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0,\n",
  4277. " 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0,\n",
  4278. " 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1,\n",
  4279. " 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0,\n",
  4280. " 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0,\n",
  4281. " 0, 1, 1, 0, 0, 1, 1, 0])\n",
  4282. "output = [1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0]\n",
  4283. "label = tensor([1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0,\n",
  4284. " 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1,\n",
  4285. " 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1,\n",
  4286. " 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0,\n",
  4287. " 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1,\n",
  4288. " 1, 0, 0, 0, 1, 1, 1, 1])\n",
  4289. "output = [1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
  4290. "label = tensor([1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0,\n",
  4291. " 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1,\n",
  4292. " 0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1,\n",
  4293. " 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0,\n",
  4294. " 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0,\n",
  4295. " 0, 1, 1, 1, 1, 1, 1, 1])\n",
  4296. "output = [1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0]\n",
  4297. "label = tensor([1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0,\n",
  4298. " 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0,\n",
  4299. " 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0,\n",
  4300. " 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0,\n",
  4301. " 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1,\n",
  4302. " 0, 0, 0, 0, 0, 0, 1, 0])\n",
  4303. "output = [1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
  4304. "label = tensor([1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1,\n",
  4305. " 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1,\n",
  4306. " 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0,\n",
  4307. " 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1,\n",
  4308. " 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0,\n",
  4309. " 1, 1, 1, 1, 1, 1, 0, 1])\n",
  4310. "output = [1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0]\n",
  4311. "label = tensor([1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0,\n",
  4312. " 1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0,\n",
  4313. " 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0,\n",
  4314. " 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1,\n",
  4315. " 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0,\n",
  4316. " 1, 0, 1, 1, 0, 0, 1, 0])\n",
  4317. "output = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
  4318. "label = tensor([1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1,\n",
  4319. " 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0,\n",
  4320. " 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1,\n",
  4321. " 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0,\n",
  4322. " 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1,\n",
  4323. " 0, 1, 1, 1, 1, 1, 0, 1])\n",
  4324. "output = [1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
  4325. "label = tensor([0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0,\n",
  4326. " 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1,\n",
  4327. " 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0,\n",
  4328. " 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1,\n",
  4329. " 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0,\n",
  4330. " 0, 0, 1, 1, 1, 1, 0, 1])\n",
  4331. "output = [0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0]\n",
  4332. "label = tensor([0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1,\n",
  4333. " 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0,\n",
  4334. " 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1,\n",
  4335. " 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0,\n",
  4336. " 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1,\n",
  4337. " 0, 1, 0, 1, 1, 0, 0, 1])\n",
  4338. "output = [0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0]\n",
  4339. "label = tensor([0, 0, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0,\n",
  4340. " 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0,\n",
  4341. " 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0,\n",
  4342. " 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0,\n",
  4343. " 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1,\n",
  4344. " 1, 0, 1, 0, 0, 1, 1, 1])\n",
  4345. "output = [1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
  4346. "label = tensor([0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1,\n",
  4347. " 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0,\n",
  4348. " 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0,\n",
  4349. " 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1,\n",
  4350. " 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 0,\n",
  4351. " 1, 1, 0, 0, 1, 0, 0, 1])\n",
  4352. "output = [1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
  4353. "label = tensor([1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1,\n",
  4354. " 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0,\n",
  4355. " 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1,\n",
  4356. " 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1,\n",
  4357. " 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0,\n",
  4358. " 1, 0, 0, 1, 0, 1, 1, 0])\n",
  4359. "output = [1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0]\n",
  4360. "label = tensor([0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1,\n",
  4361. " 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0,\n",
  4362. " 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n",
  4363. " 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0,\n",
  4364. " 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1,\n",
  4365. " 1, 0, 1, 1, 0, 0, 0, 1])\n",
  4366. "output = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
  4367. "label = tensor([1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0,\n",
  4368. " 1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0,\n",
  4369. " 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1,\n",
  4370. " 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1,\n",
  4371. " 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0,\n",
  4372. " 0, 0, 0, 0, 0, 1, 0, 1])\n"
  4373. ]
  4374. },
  4375. {
  4376. "name": "stdout",
  4377. "output_type": "stream",
  4378. "text": [
  4379. "output = [1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0]\n",
  4380. "label = tensor([1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0,\n",
  4381. " 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1,\n",
  4382. " 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0,\n",
  4383. " 0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1,\n",
  4384. " 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1,\n",
  4385. " 1, 0, 0, 1, 0, 1, 1, 0])\n",
  4386. "output = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0]\n",
  4387. "label = tensor([1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0,\n",
  4388. " 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0,\n",
  4389. " 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1,\n",
  4390. " 1, 1, 1])\n",
  4391. "\t [0.6673376950176145, 0.6722054380664653, 0.6802005012531328, 0.6826347305389222, 0.7250876314471708, 0.7, 0.6841579210394803, 0.6651606425702812, 0.668671679197995]\n",
  4392. "[1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1]\n",
  4393. "[1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1]\n",
  4394. "full train results:\n",
  4395. "\t [0.59375, 0.5885416666666666, 0.6354166666666666, 0.612565445026178, 0.6387434554973822, 0.6492146596858639, 0.6335078534031413, 0.6335078534031413, 0.612565445026178]\n",
  4396. "\t [0.9949672873678913, 0.9974823766364552, 0.8666666666666667, 0.8393213572854291, 0.99949924887331, 0.638, 0.999000499750125, 0.9949799196787149, 0.9508771929824561]\n",
  4397. "[1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0]\n",
  4398. "[1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 1]\n",
  4399. "best accs results:\n",
  4400. "\t [0.59375, 0.5625, 0.6354166666666666, 0.6282722513089005, 0.6020942408376964, 0.6387434554973822, 0.643979057591623, 0.6387434554973822, 0.643979057591623]\n",
  4401. "\t [0.8037242073477604, 0.8670694864048338, 0.4967418546365915, 0.5489021956087824, 0.9744616925388082, 0.519, 0.47226386806596704, 0.7761044176706827, 0.49473684210526314]\n",
  4402. "[1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0]\n",
  4403. "-----------------------------Fold 10---------------\n",
  4404. "preparing dataloaders...\n",
  4405. "torch.Size([75, 7, 9, 20])\n",
  4406. "coef when 0 > 1 1\n",
  4407. "creating model...\n",
  4408. "calculating total steps...\n",
  4409. "epoch: 1\n",
  4410. "validation loss decreased (inf ---> 0.695332), val_acc = 0.39147286821705424\n",
  4411. "validation acc increased (0.000000 ---> 0.391473)\n",
  4412. "validation acc increased (0.391473 ---> 0.391473)\n",
  4413. "epoch 1: train loss = 0.6719437708207711, l1loss = 0.13840212986696904, train acc = 0.6165829145728643,\n",
  4414. "val_loss = 0.7145830023196317, val_acc = 0.39147286821705424\n",
  4415. "\n",
  4416. "epoch: 2\n",
  4417. "validation acc increased (0.391473 ---> 0.391473)\n",
  4418. "validation acc increased (0.391473 ---> 0.391473)\n",
  4419. "epoch 2: train loss = 0.6526712950150572, l1loss = 0.13800431019696757, train acc = 0.6326633165829145,\n",
  4420. "val_loss = 0.7263189609660659, val_acc = 0.39147286821705424\n",
  4421. "\n",
  4422. "epoch: 3\n",
  4423. "validation acc increased (0.391473 ---> 0.391473)\n",
  4424. "validation acc increased (0.391473 ---> 0.406977)\n",
  4425. "epoch 3: train loss = 0.6384344094362691, l1loss = 0.13752041265892623, train acc = 0.6432160804020101,\n",
  4426. "val_loss = 0.719265153241712, val_acc = 0.45348837209302323\n",
  4427. "\n",
  4428. "epoch: 4\n",
  4429. "validation acc increased (0.406977 ---> 0.457364)\n",
  4430. "validation loss decreased (0.695332 ---> 0.691640), val_acc = 0.5310077519379846\n",
  4431. "validation acc increased (0.457364 ---> 0.531008)\n",
  4432. "epoch 4: train loss = 0.6233027500123834, l1loss = 0.13689041630407076, train acc = 0.6472361809045226,\n",
  4433. "val_loss = 0.674999581981999, val_acc = 0.5465116279069767\n",
  4434. "\n",
  4435. "epoch: 5\n",
  4436. "validation loss decreased (0.691640 ---> 0.671344), val_acc = 0.5465116279069767\n",
  4437. "validation acc increased (0.531008 ---> 0.546512)\n",
  4438. "validation loss decreased (0.671344 ---> 0.640276), val_acc = 0.5542635658914729\n",
  4439. "validation acc increased (0.546512 ---> 0.554264)\n",
  4440. "epoch 5: train loss = 0.6108603898604311, l1loss = 0.1360374202231067, train acc = 0.650251256281407,\n",
  4441. "val_loss = 0.6320977502090986, val_acc = 0.5581395348837209\n",
  4442. "\n",
  4443. "epoch: 6\n",
  4444. "validation loss decreased (0.640276 ---> 0.630428), val_acc = 0.5581395348837209\n",
  4445. "validation acc increased (0.554264 ---> 0.558140)\n",
  4446. "validation loss decreased (0.630428 ---> 0.619005), val_acc = 0.5658914728682171\n",
  4447. "validation acc increased (0.558140 ---> 0.565891)\n",
  4448. "epoch 6: train loss = 0.6001691482773978, l1loss = 0.13489290275166382, train acc = 0.657788944723618,\n",
  4449. "val_loss = 0.6185085505478142, val_acc = 0.5658914728682171\n",
  4450. "\n",
  4451. "epoch: 7\n",
  4452. "validation loss decreased (0.619005 ---> 0.617354), val_acc = 0.5658914728682171\n",
  4453. "validation acc increased (0.565891 ---> 0.565891)\n",
  4454. "validation loss decreased (0.617354 ---> 0.615356), val_acc = 0.5658914728682171\n",
  4455. "validation acc increased (0.565891 ---> 0.565891)\n",
  4456. "epoch 7: train loss = 0.5907169395355722, l1loss = 0.13343299792040533, train acc = 0.6693467336683417,\n",
  4457. "val_loss = 0.6119359452133031, val_acc = 0.5736434108527132\n",
  4458. "\n",
  4459. "epoch: 8\n",
  4460. "validation loss decreased (0.615356 ---> 0.612685), val_acc = 0.5736434108527132\n",
  4461. "validation acc increased (0.565891 ---> 0.573643)\n",
  4462. "validation loss decreased (0.612685 ---> 0.608328), val_acc = 0.5736434108527132\n",
  4463. "validation acc increased (0.573643 ---> 0.573643)\n",
  4464. "epoch 8: train loss = 0.5808460336234701, l1loss = 0.13162351006539025, train acc = 0.6733668341708543,\n",
  4465. "val_loss = 0.6133937794108724, val_acc = 0.5736434108527132\n",
  4466. "\n",
  4467. "epoch: 9\n",
  4468. "validation acc increased (0.573643 ---> 0.573643)\n",
  4469. "validation acc increased (0.573643 ---> 0.577519)\n",
  4470. "epoch 9: train loss = 0.5721211679017724, l1loss = 0.12944562336308274, train acc = 0.6798994974874372,\n",
  4471. "val_loss = 0.6197530188301738, val_acc = 0.5775193798449613\n",
  4472. "\n",
  4473. "epoch: 10\n",
  4474. "validation acc increased (0.577519 ---> 0.577519)\n",
  4475. "validation acc increased (0.577519 ---> 0.581395)\n",
  4476. "epoch 10: train loss = 0.5629525616540382, l1loss = 0.12692890809708504, train acc = 0.6889447236180904,\n",
  4477. "val_loss = 0.6338218380083409, val_acc = 0.5775193798449613\n",
  4478. "\n",
  4479. "epoch: 11\n",
  4480. "epoch 11: train loss = 0.5500448384476666, l1loss = 0.12408753369920816, train acc = 0.699497487437186,\n",
  4481. "val_loss = 0.6388333800227143, val_acc = 0.5736434108527132\n",
  4482. "\n",
  4483. "epoch: 12\n"
  4484. ]
  4485. },
  4486. {
  4487. "name": "stdout",
  4488. "output_type": "stream",
  4489. "text": [
  4490. "epoch 12: train loss = 0.5367419248250261, l1loss = 0.12101081039887576, train acc = 0.7075376884422111,\n",
  4491. "val_loss = 0.6473393957744273, val_acc = 0.5736434108527132\n",
  4492. "\n",
  4493. "epoch: 13\n",
  4494. "epoch 13: train loss = 0.5212755465627316, l1loss = 0.11783038612136888, train acc = 0.7266331658291457,\n",
  4495. "val_loss = 0.6885562600322472, val_acc = 0.5658914728682171\n",
  4496. "\n",
  4497. "epoch: 14\n",
  4498. "epoch 14: train loss = 0.5001930103230117, l1loss = 0.11466055483973805, train acc = 0.735678391959799,\n",
  4499. "val_loss = 0.7727257928182912, val_acc = 0.5736434108527132\n",
  4500. "\n",
  4501. "epoch: 15\n",
  4502. "epoch 15: train loss = 0.4774602065134288, l1loss = 0.11163033707507292, train acc = 0.7597989949748744,\n",
  4503. "val_loss = 0.9347423278084097, val_acc = 0.5697674418604651\n",
  4504. "\n",
  4505. "epoch: 16\n",
  4506. "validation acc increased (0.581395 ---> 0.596899)\n",
  4507. "epoch 16: train loss = 0.45671038232257016, l1loss = 0.10889189650664977, train acc = 0.7688442211055276,\n",
  4508. "val_loss = 0.6646126000456107, val_acc = 0.5658914728682171\n",
  4509. "\n",
  4510. "epoch: 17\n",
  4511. "epoch 17: train loss = 0.4233617796969773, l1loss = 0.10659007389641287, train acc = 0.7919597989949749,\n",
  4512. "val_loss = 0.7088340003361073, val_acc = 0.5542635658914729\n",
  4513. "\n",
  4514. "epoch: 18\n",
  4515. "epoch 18: train loss = 0.39575232902363916, l1loss = 0.10463322919966588, train acc = 0.807035175879397,\n",
  4516. "val_loss = 0.7800197698349176, val_acc = 0.5775193798449613\n",
  4517. "\n",
  4518. "epoch: 19\n",
  4519. "epoch 19: train loss = 0.3720384899695315, l1loss = 0.10311959861361202, train acc = 0.8321608040201005,\n",
  4520. "val_loss = 1.0530221295911213, val_acc = 0.5852713178294574\n",
  4521. "\n",
  4522. "epoch: 20\n",
  4523. "epoch 20: train loss = 0.35293013776966076, l1loss = 0.10156447164077854, train acc = 0.8321608040201005,\n",
  4524. "val_loss = 0.8265702262405277, val_acc = 0.5503875968992248\n",
  4525. "\n",
  4526. "epoch: 21\n",
  4527. "epoch 21: train loss = 0.3344811164853561, l1loss = 0.10016776281385566, train acc = 0.8361809045226131,\n",
  4528. "val_loss = 1.0450448842011681, val_acc = 0.5775193798449613\n",
  4529. "\n",
  4530. "epoch: 22\n",
  4531. "epoch 22: train loss = 0.2859235891145677, l1loss = 0.09900103884575955, train acc = 0.8763819095477386,\n",
  4532. "val_loss = 1.4950751149377157, val_acc = 0.5736434108527132\n",
  4533. "\n",
  4534. "epoch: 23\n",
  4535. "epoch 23: train loss = 0.27330368340913974, l1loss = 0.09814592776586063, train acc = 0.8748743718592965,\n",
  4536. "val_loss = 1.6195507289827331, val_acc = 0.6085271317829457\n",
  4537. "\n",
  4538. "epoch: 24\n",
  4539. "validation acc increased (0.596899 ---> 0.608527)\n",
  4540. "epoch 24: train loss = 0.2554657350233452, l1loss = 0.09719441908238521, train acc = 0.8904522613065327,\n",
  4541. "val_loss = 1.7599091363507648, val_acc = 0.5697674418604651\n",
  4542. "\n",
  4543. "epoch: 25\n",
  4544. "validation acc increased (0.608527 ---> 0.612403)\n",
  4545. "epoch 25: train loss = 0.26244540217533785, l1loss = 0.09658051813068103, train acc = 0.8854271356783919,\n",
  4546. "val_loss = 1.328415922416273, val_acc = 0.5852713178294574\n",
  4547. "\n",
  4548. "epoch: 26\n",
  4549. "epoch 26: train loss = 0.2488035890025709, l1loss = 0.09598230193907292, train acc = 0.9025125628140703,\n",
  4550. "val_loss = 3.3820917366086976, val_acc = 0.5581395348837209\n",
  4551. "\n",
  4552. "epoch: 27\n",
  4553. "epoch 27: train loss = 0.24395602191213386, l1loss = 0.09520171582698822, train acc = 0.8814070351758794,\n",
  4554. "val_loss = 3.163267457207968, val_acc = 0.5581395348837209\n",
  4555. "\n",
  4556. "epoch: 28\n",
  4557. "epoch 28: train loss = 0.2519955996891961, l1loss = 0.09456676208793219, train acc = 0.8798994974874372,\n",
  4558. "val_loss = 2.3792159464932228, val_acc = 0.5581395348837209\n",
  4559. "\n",
  4560. "epoch: 29\n",
  4561. "epoch 29: train loss = 0.20381304789428137, l1loss = 0.09389582407384661, train acc = 0.9130653266331659,\n",
  4562. "val_loss = 1.6001185594603073, val_acc = 0.5581395348837209\n",
  4563. "\n",
  4564. "epoch: 30\n",
  4565. "epoch 30: train loss = 0.17334565265693858, l1loss = 0.09341333552670839, train acc = 0.9341708542713568,\n",
  4566. "val_loss = 1.851118143214736, val_acc = 0.5930232558139535\n",
  4567. "\n",
  4568. "epoch: 31\n",
  4569. "validation acc increased (0.612403 ---> 0.620155)\n",
  4570. "epoch 31: train loss = 0.14162640643479238, l1loss = 0.09275954460828148, train acc = 0.9552763819095478,\n",
  4571. "val_loss = 2.0247100628623667, val_acc = 0.5697674418604651\n",
  4572. "\n",
  4573. "epoch: 32\n",
  4574. "epoch 32: train loss = 0.15501669086703104, l1loss = 0.09234665035452674, train acc = 0.9371859296482412,\n",
  4575. "val_loss = 2.2792202677837636, val_acc = 0.5852713178294574\n",
  4576. "\n",
  4577. "epoch: 33\n",
  4578. "epoch 33: train loss = 0.12440619978248774, l1loss = 0.09191003219416394, train acc = 0.9582914572864322,\n",
  4579. "val_loss = 2.0286328884982323, val_acc = 0.5968992248062015\n",
  4580. "\n",
  4581. "epoch: 34\n",
  4582. "epoch 34: train loss = 0.12274269695258021, l1loss = 0.09157498074536348, train acc = 0.9547738693467337,\n",
  4583. "val_loss = 2.5109643936157227, val_acc = 0.562015503875969\n",
  4584. "\n",
  4585. "epoch: 35\n",
  4586. "epoch 35: train loss = 0.09087972700895376, l1loss = 0.09097352933943571, train acc = 0.9753768844221106,\n",
  4587. "val_loss = 2.107085255227348, val_acc = 0.5697674418604651\n",
  4588. "\n",
  4589. "epoch: 36\n",
  4590. "epoch 36: train loss = 0.09877167490109727, l1loss = 0.09090764223780465, train acc = 0.9693467336683417,\n",
  4591. "val_loss = 2.899521066236866, val_acc = 0.5775193798449613\n",
  4592. "\n",
  4593. "epoch: 37\n",
  4594. "epoch 37: train loss = 0.12718479042526465, l1loss = 0.09116000884292114, train acc = 0.9567839195979899,\n",
  4595. "val_loss = 2.9109171711850563, val_acc = 0.5697674418604651\n",
  4596. "\n",
  4597. "epoch: 38\n",
  4598. "epoch 38: train loss = 0.15343157828752718, l1loss = 0.09161678471008138, train acc = 0.9492462311557789,\n",
  4599. "val_loss = 3.6789348217868065, val_acc = 0.5736434108527132\n",
  4600. "\n",
  4601. "epoch: 39\n",
  4602. "epoch 39: train loss = 0.17688065675934356, l1loss = 0.09161391867015829, train acc = 0.9256281407035176,\n",
  4603. "val_loss = 2.8331502167753473, val_acc = 0.562015503875969\n",
  4604. "\n",
  4605. "epoch: 40\n",
  4606. "epoch 40: train loss = 0.12294016767985856, l1loss = 0.09109236833887484, train acc = 0.957286432160804,\n",
  4607. "val_loss = 3.345622025718985, val_acc = 0.562015503875969\n",
  4608. "\n",
  4609. "epoch: 41\n",
  4610. "epoch 41: train loss = 0.09555761652750586, l1loss = 0.0903443555211901, train acc = 0.9708542713567839,\n",
  4611. "val_loss = 2.0581300794616224, val_acc = 0.6046511627906976\n",
  4612. "\n",
  4613. "epoch: 42\n",
  4614. "epoch 42: train loss = 0.07512064163559046, l1loss = 0.08976309295724984, train acc = 0.9748743718592965,\n",
  4615. "val_loss = 3.453665764756905, val_acc = 0.5852713178294574\n",
  4616. "\n",
  4617. "epoch: 43\n",
  4618. "epoch 43: train loss = 0.04777320917677041, l1loss = 0.0891012175897857, train acc = 0.9909547738693467,\n",
  4619. "val_loss = 2.352656833885252, val_acc = 0.5930232558139535\n",
  4620. "\n",
  4621. "epoch: 44\n",
  4622. "epoch 44: train loss = 0.049835165397231304, l1loss = 0.08854852927539816, train acc = 0.9889447236180905,\n",
  4623. "val_loss = 2.9295913517013075, val_acc = 0.5658914728682171\n",
  4624. "\n",
  4625. "epoch: 45\n",
  4626. "epoch 45: train loss = 0.04242832651689424, l1loss = 0.08794202994761155, train acc = 0.9919597989949749,\n",
  4627. "val_loss = 2.413541268932727, val_acc = 0.5736434108527132\n",
  4628. "\n",
  4629. "epoch: 46\n",
  4630. "epoch 46: train loss = 0.03263931775549848, l1loss = 0.08736478100769483, train acc = 0.9939698492462311,\n",
  4631. "val_loss = 2.5269698264048426, val_acc = 0.5813953488372093\n",
  4632. "\n",
  4633. "epoch: 47\n",
  4634. "epoch 47: train loss = 0.026620606537065914, l1loss = 0.08684112468556543, train acc = 0.9969849246231156,\n",
  4635. "val_loss = 3.387900699940763, val_acc = 0.5813953488372093\n",
  4636. "\n",
  4637. "epoch: 48\n",
  4638. "epoch 48: train loss = 0.02269124810457529, l1loss = 0.08632632712622983, train acc = 0.9979899497487437,\n",
  4639. "val_loss = 2.787818894451737, val_acc = 0.5736434108527132\n",
  4640. "\n",
  4641. "epoch: 49\n",
  4642. "epoch 49: train loss = 0.022012453552466543, l1loss = 0.08572817495719871, train acc = 0.9964824120603015,\n",
  4643. "val_loss = 2.598147943962452, val_acc = 0.5891472868217055\n",
  4644. "\n",
  4645. "epoch: 50\n",
  4646. "epoch 50: train loss = 0.02431201655885682, l1loss = 0.0853168428168824, train acc = 0.9979899497487437,\n",
  4647. "val_loss = 2.618208586574034, val_acc = 0.5852713178294574\n",
  4648. "\n",
  4649. "epoch: 51\n",
  4650. "epoch 51: train loss = 0.042026931474256755, l1loss = 0.08488889236995323, train acc = 0.9869346733668342,\n",
  4651. "val_loss = 5.366614888804827, val_acc = 0.5465116279069767\n",
  4652. "\n",
  4653. "epoch: 52\n",
  4654. "epoch 52: train loss = 0.05808617710767679, l1loss = 0.08540894663214084, train acc = 0.9819095477386934,\n",
  4655. "val_loss = 2.219164011090301, val_acc = 0.6046511627906976\n",
  4656. "\n",
  4657. "epoch: 53\n",
  4658. "epoch 53: train loss = 0.04891583280796981, l1loss = 0.0857906321185318, train acc = 0.9894472361809046,\n",
  4659. "val_loss = 2.5789132524830425, val_acc = 0.5736434108527132\n",
  4660. "\n",
  4661. "epoch: 54\n",
  4662. "epoch 54: train loss = 0.04095752583301846, l1loss = 0.08530921218682773, train acc = 0.9899497487437185,\n",
  4663. "val_loss = 2.9466089703315914, val_acc = 0.562015503875969\n",
  4664. "\n",
  4665. "epoch: 55\n",
  4666. "epoch 55: train loss = 0.01827716198960441, l1loss = 0.08464146351544702, train acc = 0.9994974874371859,\n",
  4667. "val_loss = 2.840330068455186, val_acc = 0.562015503875969\n",
  4668. "\n",
  4669. "epoch: 56\n"
  4670. ]
  4671. },
  4672. {
  4673. "name": "stdout",
  4674. "output_type": "stream",
  4675. "text": [
  4676. "epoch 56: train loss = 0.01295484382724717, l1loss = 0.08385968863514799, train acc = 1.0,\n",
  4677. "val_loss = 3.1219512177992237, val_acc = 0.5658914728682171\n",
  4678. "\n",
  4679. "!!! overfitted !!!\n",
  4680. "[0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1]\n",
  4681. "[1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1]\n",
  4682. "early stoping results:\n",
  4683. "\t [0.6041666666666666, 0.5572916666666666, 0.5208333333333334, 0.5078534031413613, 0.5445026178010471, 0.6073298429319371, 0.5602094240837696, 0.5340314136125655, 0.5654450261780105, 0.5287958115183246]\n",
  4684. "output = [1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
  4685. "label = tensor([0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1,\n",
  4686. " 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0,\n",
  4687. " 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1,\n",
  4688. " 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0,\n",
  4689. " 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1,\n",
  4690. " 0, 1, 1, 1, 1, 0, 0, 1])\n",
  4691. "output = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0]\n",
  4692. "label = tensor([0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0,\n",
  4693. " 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0,\n",
  4694. " 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1,\n",
  4695. " 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1,\n",
  4696. " 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0,\n",
  4697. " 0, 0, 0, 0, 0, 1, 0, 1])\n",
  4698. "output = [0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0]\n",
  4699. "label = tensor([0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0,\n",
  4700. " 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0,\n",
  4701. " 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 1,\n",
  4702. " 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0,\n",
  4703. " 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1,\n",
  4704. " 0, 0, 1, 0, 1, 1, 1, 0])\n",
  4705. "output = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
  4706. "label = tensor([0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0,\n",
  4707. " 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1,\n",
  4708. " 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0,\n",
  4709. " 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1,\n",
  4710. " 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1,\n",
  4711. " 1, 0, 0, 0, 1, 0, 0, 1])\n",
  4712. "output = [1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
  4713. "label = tensor([1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1,\n",
  4714. " 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1,\n",
  4715. " 0, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1,\n",
  4716. " 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0,\n",
  4717. " 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0,\n",
  4718. " 0, 0, 0, 0, 1, 0, 0, 1])\n",
  4719. "output = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0]\n",
  4720. "label = tensor([0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1,\n",
  4721. " 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1,\n",
  4722. " 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1,\n",
  4723. " 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0,\n",
  4724. " 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1,\n",
  4725. " 1, 1, 0, 0, 1, 1, 1, 0])\n",
  4726. "output = [1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
  4727. "label = tensor([1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0,\n",
  4728. " 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1,\n",
  4729. " 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1,\n",
  4730. " 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0,\n",
  4731. " 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0,\n",
  4732. " 1, 1, 0, 1, 1, 1, 1, 1])\n",
  4733. "output = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
  4734. "label = tensor([1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1,\n",
  4735. " 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1,\n",
  4736. " 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1,\n",
  4737. " 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0,\n",
  4738. " 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0,\n",
  4739. " 0, 0, 1, 0, 1, 1, 0, 0])\n",
  4740. "output = [0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
  4741. "label = tensor([0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1,\n",
  4742. " 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1,\n",
  4743. " 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0,\n",
  4744. " 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0,\n",
  4745. " 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1,\n",
  4746. " 0, 1, 0, 1, 0, 0, 1, 0])\n",
  4747. "output = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0]\n",
  4748. "label = tensor([1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0,\n",
  4749. " 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1,\n",
  4750. " 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1,\n",
  4751. " 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1,\n",
  4752. " 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1,\n",
  4753. " 0, 0, 1, 1, 0, 1, 1, 1])\n",
  4754. "output = [1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0]\n",
  4755. "label = tensor([1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0,\n",
  4756. " 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0,\n",
  4757. " 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1,\n",
  4758. " 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0,\n",
  4759. " 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0,\n",
  4760. " 1, 0, 1, 0, 0, 0, 1, 1])\n",
  4761. "output = [1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
  4762. "label = tensor([0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1,\n",
  4763. " 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1,\n",
  4764. " 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0,\n",
  4765. " 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1,\n",
  4766. " 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0,\n",
  4767. " 1, 1, 0, 1, 1, 0, 1, 1])\n",
  4768. "output = [1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
  4769. "label = tensor([1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1,\n",
  4770. " 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1,\n",
  4771. " 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1,\n",
  4772. " 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1,\n",
  4773. " 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0,\n",
  4774. " 0, 0, 0, 1, 1, 1, 1, 0])\n",
  4775. "output = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0]\n",
  4776. "label = tensor([0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0,\n",
  4777. " 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1,\n",
  4778. " 0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0,\n",
  4779. " 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1,\n",
  4780. " 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0,\n",
  4781. " 0, 1, 0, 1, 0, 1, 0, 0])\n"
  4782. ]
  4783. },
  4784. {
  4785. "name": "stdout",
  4786. "output_type": "stream",
  4787. "text": [
  4788. "output = [1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n",
  4789. "label = tensor([1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1,\n",
  4790. " 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1,\n",
  4791. " 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1,\n",
  4792. " 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1,\n",
  4793. " 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0,\n",
  4794. " 1, 0, 0, 1, 1, 0, 0, 1])\n",
  4795. "output = [1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0]\n",
  4796. "label = tensor([0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0,\n",
  4797. " 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0,\n",
  4798. " 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1])\n",
  4799. "\t [0.6673376950176145, 0.6722054380664653, 0.6802005012531328, 0.6826347305389222, 0.7250876314471708, 0.7, 0.6841579210394803, 0.6651606425702812, 0.668671679197995, 0.6798994974874372]\n",
  4800. "[1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1]\n",
  4801. "[0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1]\n",
  4802. "full train results:\n",
  4803. "\t [0.59375, 0.5885416666666666, 0.6354166666666666, 0.612565445026178, 0.6387434554973822, 0.6492146596858639, 0.6335078534031413, 0.6335078534031413, 0.612565445026178, 0.6335078534031413]\n",
  4804. "\t [0.9949672873678913, 0.9974823766364552, 0.8666666666666667, 0.8393213572854291, 0.99949924887331, 0.638, 0.999000499750125, 0.9949799196787149, 0.9508771929824561, 0.9798994974874372]\n",
  4805. "[0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1]\n",
  4806. "[0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1]\n",
  4807. "best accs results:\n",
  4808. "\t [0.59375, 0.5625, 0.6354166666666666, 0.6282722513089005, 0.6020942408376964, 0.6387434554973822, 0.643979057591623, 0.6387434554973822, 0.643979057591623, 0.6230366492146597]\n",
  4809. "\t [0.8037242073477604, 0.8670694864048338, 0.4967418546365915, 0.5489021956087824, 0.9744616925388082, 0.519, 0.47226386806596704, 0.7761044176706827, 0.49473684210526314, 0.91356783919598]\n",
  4810. "[1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0]\n"
  4811. ]
  4812. }
  4813. ],
  4814. "source": [
  4815. "train_accs, test_accs = [], []\n",
  4816. "train_accs_over, test_accs_over = [], []\n",
  4817. "train_accs_acc, test_accs_acc = [], []\n",
  4818. "\n",
  4819. "for fold, (train_val_idx, test_idx) in enumerate(skf.split(data1, labels)):\n",
  4820. " \n",
  4821. " print('-----------------------------Fold {}---------------'.format(fold + 1))\n",
  4822. "\n",
  4823. " \n",
  4824. " print('preparing dataloaders...')\n",
  4825. " print(data.shape)\n",
  4826. " train_val_data = np.stack([data1[index] for index in train_val_idx])\n",
  4827. " train_val_label = [labels[index] for index in train_val_idx]\n",
  4828. " test_data = np.stack([data1[index] for index in test_idx])\n",
  4829. " test_label = [labels[index] for index in test_idx]\n",
  4830. " \n",
  4831. " \n",
  4832. " Max = np.max(train_val_data, axis=(0,1,2), keepdims=True)\n",
  4833. " Min = np.min(train_val_data, axis=(0,1,2), keepdims=True)\n",
  4834. " train_val_data = (train_val_data-Min)/(Max-Min)\n",
  4835. " \n",
  4836. " Max_test = np.max(test_data, axis=(0,1,2), keepdims=True)\n",
  4837. " Min_test = np.min(test_data, axis=(0,1,2), keepdims=True)\n",
  4838. " test_data = (test_data-Min)/(Max-Min)\n",
  4839. " \n",
  4840. " \n",
  4841. " train_val = [[train_val_data[i], train_val_label[i]] for i in range(len(train_val_data))]\n",
  4842. " test = [[test_data[i], test_label[i]] for i in range(len(test_data))]\n",
  4843. " \n",
  4844. " num_train_val = len(train_val)\n",
  4845. " indices = list(range(num_train_val))\n",
  4846. " np.random.shuffle(indices)\n",
  4847. " split = int(np.floor(val_size*num_train_val))\n",
  4848. " train, val = [train_val[i] for i in indices[split:]] ,[train_val[i] for i in indices[:split]]\n",
  4849. " \n",
  4850. " train_labels = [data[1] for data in train]\n",
  4851. " \n",
  4852. " oversample = 1\n",
  4853. " _, counts = np.unique(train_labels, return_counts=True)\n",
  4854. " if oversample==1:\n",
  4855. " if counts[1]>counts[0]:\n",
  4856. " label0 = [data for data in train if data[1]==0]\n",
  4857. " coef = int(counts[1]/counts[0])\n",
  4858. " print('coef when 1 > 0', coef)\n",
  4859. " for i in range(coef):\n",
  4860. " train = train + label0\n",
  4861. " elif counts[1]<counts[0]:\n",
  4862. " label1 = [data for data in train if data[1]==1]\n",
  4863. " coef = int(counts[0]/counts[1])\n",
  4864. " print('coef when 0 > 1', coef)\n",
  4865. " for i in range(coef):\n",
  4866. " train = train + label1\n",
  4867. " \n",
  4868. "\n",
  4869. " train_loader = torch.utils.data.DataLoader(train, batch_size=batch_size, shuffle=True)\n",
  4870. " val_loader = torch.utils.data.DataLoader(val, batch_size=batch_size, shuffle=True)\n",
  4871. " test_loader = torch.utils.data.DataLoader(test, batch_size=batch_size, shuffle=True)\n",
  4872. " \n",
  4873. " print('creating model...')\n",
  4874. " model = cnn().float()\n",
  4875. " optimizer = torch.optim.Adam(model.parameters(), lr=0.0001)\n",
  4876. " criterion = nn.BCELoss()\n",
  4877. " \n",
  4878. " print('calculating total steps...')\n",
  4879. " steps = 0\n",
  4880. " for epoch in range(n_epochs):\n",
  4881. " for data, label in train_loader:\n",
  4882. " steps += 1\n",
  4883. "\n",
  4884. " scheduler = torch.optim.lr_scheduler.OneCycleLR(optimizer, total_steps=steps, max_lr=0.001)\n",
  4885. " scheduler1 = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer)\n",
  4886. " l1_lambda = 0.0001\n",
  4887. " \n",
  4888. " min_val_loss = np.inf\n",
  4889. " max_val_acc = 0\n",
  4890. " \n",
  4891. " for epoch in range(n_epochs):\n",
  4892. " print('epoch: ', epoch+1)\n",
  4893. " train_loss = 0\n",
  4894. " l1_loss = 0\n",
  4895. " train_correct = 0\n",
  4896. " model.train()\n",
  4897. " '''for name, param in model.named_parameters():\n",
  4898. " print(name, param.data)\n",
  4899. " break'''\n",
  4900. " for iteration, (data,label) in enumerate(train_loader):\n",
  4901. " #print('\\ndata = ', torch.amax(data, axis=(0,1,2,4)), torch.amin(data, axis=(0,1,2,4)))\n",
  4902. " optimizer.zero_grad()\n",
  4903. " output = model(data.float())\n",
  4904. " label = torch.reshape(label, (-1,1))\n",
  4905. " label = label.float()\n",
  4906. " loss = criterion(output, label)\n",
  4907. " add_loss = loss\n",
  4908. " ex_loss = 0\n",
  4909. " for W in model.parameters():\n",
  4910. " ex_loss += l1_lambda*W.norm(1)\n",
  4911. " loss = loss + l1_lambda*W.norm(1) \n",
  4912. " loss.backward()\n",
  4913. " optimizer.step()\n",
  4914. " scheduler.step()\n",
  4915. " targets = [1 if output[i].round()==label[i] else 0 for i in range(len(label))]\n",
  4916. " train_correct += sum(targets)\n",
  4917. " train_loss += add_loss.item()*data.shape[0]\n",
  4918. " l1_loss += ex_loss.item()*data.shape[0]\n",
  4919. " \n",
  4920. " if iteration % print_every == 0:\n",
  4921. " is_training = True\n",
  4922. " val_loss = 0\n",
  4923. " val_correct = 0\n",
  4924. " model.eval()\n",
  4925. " for data, label in val_loader:\n",
  4926. " output = model(data.float())\n",
  4927. " label = torch.reshape(label, (-1,1))\n",
  4928. " label = label.float()\n",
  4929. " loss = criterion(output, label) \n",
  4930. " val_loss += loss.item()*data.shape[0]\n",
  4931. " targets = [1 if output[i].round()==label[i] else 0 for i in range(len(label))]\n",
  4932. " val_correct += sum(targets)\n",
  4933. " val_loss = val_loss/len(val_loader.sampler)\n",
  4934. " val_acc = val_correct/len(val_loader.sampler)\n",
  4935. "\n",
  4936. " if val_loss <= min_val_loss:\n",
  4937. " print(\"validation loss decreased ({:.6f} ---> {:.6f}), val_acc = {}\".format(min_val_loss, val_loss, val_acc))\n",
  4938. " torch.save(model.state_dict(), 'sal/model'+str(fold)+'.pt')\n",
  4939. " min_val_loss = val_loss\n",
  4940. " if val_acc >= max_val_acc:\n",
  4941. " print(\"validation acc increased ({:.6f} ---> {:.6f})\".format(max_val_acc, val_acc))\n",
  4942. " torch.save(model.state_dict(), 'sal/model'+str(fold)+'_acc.pt')\n",
  4943. " max_val_acc = val_acc\n",
  4944. " torch.save(model.state_dict(), 'sal/last_model'+str(fold)+'.pt')\n",
  4945. " model.train(mode=is_training)\n",
  4946. " \n",
  4947. " train_acc = train_correct/len(train_loader.sampler) \n",
  4948. " train_loss = train_loss/len(train_loader.sampler)\n",
  4949. " loss1 = l1_loss/len(train_loader.sampler)\n",
  4950. " \n",
  4951. " val_loss = 0\n",
  4952. " val_correct = 0\n",
  4953. " model.eval()\n",
  4954. " for data, label in val_loader:\n",
  4955. " output = model(data.float())\n",
  4956. " label = torch.reshape(label, (-1,1))\n",
  4957. " label = label.float()\n",
  4958. " loss = criterion(output, label) \n",
  4959. " val_loss += loss.item()*data.shape[0]\n",
  4960. " targets = [1 if output[i].round()==label[i] else 0 for i in range(len(label))]\n",
  4961. " val_correct += sum(targets)\n",
  4962. " \n",
  4963. " val_loss = val_loss/len(val_loader.sampler)\n",
  4964. " val_acc = val_correct/len(val_loader.sampler)\n",
  4965. " \n",
  4966. " print('epoch {}: train loss = {}, l1loss = {}, train acc = {},\\nval_loss = {}, val_acc = {}\\n'\n",
  4967. " .format(epoch+1, train_loss, loss1, train_acc, val_loss, val_acc))\n",
  4968. " if int(train_acc)==1:\n",
  4969. " print('!!! overfitted !!!')\n",
  4970. " break\n",
  4971. " model.train()\n",
  4972. " #scheduler1.step(val_loss)\n",
  4973. " \n",
  4974. " model =cnn().float()\n",
  4975. " model.load_state_dict(torch.load('sal/model'+str(fold)+'.pt'))\n",
  4976. " \n",
  4977. " n_correct = 0\n",
  4978. " model.eval()\n",
  4979. " for data, label in test_loader:\n",
  4980. " output = model(data.float())\n",
  4981. " targets = [1 if output[i].round()==label[i] else 0 for i in range(len(label))]\n",
  4982. " print(targets)\n",
  4983. " n_correct += sum(targets)\n",
  4984. " \n",
  4985. " test_accs.append(n_correct/len(test_loader.sampler))\n",
  4986. " print('early stoping results:\\n\\t', test_accs)\n",
  4987. " \n",
  4988. " n_correct = 0\n",
  4989. " model.eval()\n",
  4990. " for data, label in train_loader:\n",
  4991. " output = model(data.float())\n",
  4992. " print('output = ', [output[i].round().item() for i in range(len(label))])\n",
  4993. " print('label = ', label)\n",
  4994. " targets = [1 if output[i].round()==label[i] else 0 for i in range(len(label))]\n",
  4995. " n_correct += sum(targets)\n",
  4996. " \n",
  4997. " train_accs.append(n_correct/len(train_loader.sampler))\n",
  4998. " print('\\t', train_accs)\n",
  4999. " \n",
  5000. " model = cnn().float()\n",
  5001. " model.load_state_dict(torch.load('sal/last_model'+str(fold)+'.pt'))\n",
  5002. " \n",
  5003. " n_correct = 0\n",
  5004. " model.eval()\n",
  5005. " for data, label in test_loader:\n",
  5006. " output = model(data.float())\n",
  5007. " targets = [1 if output[i].round()==label[i] else 0 for i in range(len(label))]\n",
  5008. " print(targets)\n",
  5009. " n_correct += sum(targets)\n",
  5010. " test_accs_over.append(n_correct/len(test_loader.sampler))\n",
  5011. " print('full train results:\\n\\t', test_accs_over)\n",
  5012. " \n",
  5013. " n_correct = 0\n",
  5014. " model.eval()\n",
  5015. " for data, label in train_loader:\n",
  5016. " output = model(data.float())\n",
  5017. " targets = [1 if output[i].round()==label[i] else 0 for i in range(len(label))]\n",
  5018. " n_correct += sum(targets)\n",
  5019. " train_accs_over.append(n_correct/len(train_loader.sampler))\n",
  5020. " print('\\t', train_accs_over)\n",
  5021. " \n",
  5022. " model = cnn().float()\n",
  5023. " model.load_state_dict(torch.load('sal/model'+str(fold)+'_acc.pt'))\n",
  5024. " \n",
  5025. " n_correct = 0\n",
  5026. " model.eval()\n",
  5027. " for data, label in test_loader:\n",
  5028. " output = model(data.float())\n",
  5029. " targets = [1 if output[i].round()==label[i] else 0 for i in range(len(label))]\n",
  5030. " print(targets)\n",
  5031. " n_correct += sum(targets)\n",
  5032. " test_accs_acc.append(n_correct/len(test_loader.sampler))\n",
  5033. " print('best accs results:\\n\\t', test_accs_acc)\n",
  5034. " \n",
  5035. " n_correct = 0\n",
  5036. " model.eval()\n",
  5037. " for data, label in train_loader:\n",
  5038. " output = model(data.float())\n",
  5039. " targets = [1 if output[i].round()==label[i] else 0 for i in range(len(label))]\n",
  5040. " n_correct += sum(targets)\n",
  5041. " train_accs_acc.append(n_correct/len(train_loader.sampler))\n",
  5042. " print('\\t', train_accs_acc)\n",
  5043. " print(test_label)"
  5044. ]
  5045. },
  5046. {
  5047. "cell_type": "code",
  5048. "execution_count": 286,
  5049. "id": "27ca56ee",
  5050. "metadata": {},
  5051. "outputs": [
  5052. {
  5053. "data": {
  5054. "text/plain": [
  5055. "0.6210514834205934"
  5056. ]
  5057. },
  5058. "execution_count": 286,
  5059. "metadata": {},
  5060. "output_type": "execute_result"
  5061. }
  5062. ],
  5063. "source": [
  5064. "(sum(test_accs_acc))/10"
  5065. ]
  5066. },
  5067. {
  5068. "cell_type": "code",
  5069. "execution_count": 26,
  5070. "id": "4043da8a",
  5071. "metadata": {},
  5072. "outputs": [],
  5073. "source": [
  5074. "from sklearn.linear_model import Perceptron"
  5075. ]
  5076. },
  5077. {
  5078. "cell_type": "code",
  5079. "execution_count": 189,
  5080. "id": "468a56e4",
  5081. "metadata": {},
  5082. "outputs": [
  5083. {
  5084. "data": {
  5085. "text/plain": [
  5086. "(1913, 7, 9, 20, 11)"
  5087. ]
  5088. },
  5089. "execution_count": 189,
  5090. "metadata": {},
  5091. "output_type": "execute_result"
  5092. }
  5093. ],
  5094. "source": [
  5095. "dataset = picture_data_train\n",
  5096. "dataset.shape"
  5097. ]
  5098. },
  5099. {
  5100. "cell_type": "code",
  5101. "execution_count": 220,
  5102. "id": "d4dc1a51",
  5103. "metadata": {},
  5104. "outputs": [
  5105. {
  5106. "name": "stdout",
  5107. "output_type": "stream",
  5108. "text": [
  5109. "(1913, 7, 9, 20) 1913\n"
  5110. ]
  5111. }
  5112. ],
  5113. "source": [
  5114. "mean_data = np.mean(dataset, axis=(4))\n",
  5115. "labels = vowel_label\n",
  5116. "print(mean_data.shape, len(labels))"
  5117. ]
  5118. },
  5119. {
  5120. "cell_type": "code",
  5121. "execution_count": 221,
  5122. "id": "904a96d9",
  5123. "metadata": {},
  5124. "outputs": [
  5125. {
  5126. "name": "stdout",
  5127. "output_type": "stream",
  5128. "text": [
  5129. "(1913, 5) (1913, 5)\n"
  5130. ]
  5131. }
  5132. ],
  5133. "source": [
  5134. "data1 = mean_data[:,[2,2,3,3,2],[2,4,5,5,3],[16,2,9,3,2]]\n",
  5135. "X = data1.reshape((-1,5))\n",
  5136. "print(data1.shape, X.shape)"
  5137. ]
  5138. },
  5139. {
  5140. "cell_type": "code",
  5141. "execution_count": 222,
  5142. "id": "fd76f71a",
  5143. "metadata": {},
  5144. "outputs": [],
  5145. "source": [
  5146. "X_train, X_test, y_train, y_test = train_test_split(X, labels, test_size=0.1, random_state=42)"
  5147. ]
  5148. },
  5149. {
  5150. "cell_type": "code",
  5151. "execution_count": 223,
  5152. "id": "c9ddc846",
  5153. "metadata": {},
  5154. "outputs": [],
  5155. "source": [
  5156. "clf = Perceptron(tol=1e-3, random_state=0)"
  5157. ]
  5158. },
  5159. {
  5160. "cell_type": "code",
  5161. "execution_count": 224,
  5162. "id": "d3b2a352",
  5163. "metadata": {},
  5164. "outputs": [
  5165. {
  5166. "data": {
  5167. "text/html": [
  5168. "<style>#sk-container-id-28 {color: black;background-color: white;}#sk-container-id-28 pre{padding: 0;}#sk-container-id-28 div.sk-toggleable {background-color: white;}#sk-container-id-28 label.sk-toggleable__label {cursor: pointer;display: block;width: 100%;margin-bottom: 0;padding: 0.3em;box-sizing: border-box;text-align: center;}#sk-container-id-28 label.sk-toggleable__label-arrow:before {content: \"▸\";float: left;margin-right: 0.25em;color: #696969;}#sk-container-id-28 label.sk-toggleable__label-arrow:hover:before {color: black;}#sk-container-id-28 div.sk-estimator:hover label.sk-toggleable__label-arrow:before {color: black;}#sk-container-id-28 div.sk-toggleable__content {max-height: 0;max-width: 0;overflow: hidden;text-align: left;background-color: #f0f8ff;}#sk-container-id-28 div.sk-toggleable__content pre {margin: 0.2em;color: black;border-radius: 0.25em;background-color: #f0f8ff;}#sk-container-id-28 input.sk-toggleable__control:checked~div.sk-toggleable__content {max-height: 200px;max-width: 100%;overflow: auto;}#sk-container-id-28 input.sk-toggleable__control:checked~label.sk-toggleable__label-arrow:before {content: \"▾\";}#sk-container-id-28 div.sk-estimator input.sk-toggleable__control:checked~label.sk-toggleable__label {background-color: #d4ebff;}#sk-container-id-28 div.sk-label input.sk-toggleable__control:checked~label.sk-toggleable__label {background-color: #d4ebff;}#sk-container-id-28 input.sk-hidden--visually {border: 0;clip: rect(1px 1px 1px 1px);clip: rect(1px, 1px, 1px, 1px);height: 1px;margin: -1px;overflow: hidden;padding: 0;position: absolute;width: 1px;}#sk-container-id-28 div.sk-estimator {font-family: monospace;background-color: #f0f8ff;border: 1px dotted black;border-radius: 0.25em;box-sizing: border-box;margin-bottom: 0.5em;}#sk-container-id-28 div.sk-estimator:hover {background-color: #d4ebff;}#sk-container-id-28 div.sk-parallel-item::after {content: \"\";width: 100%;border-bottom: 1px solid gray;flex-grow: 1;}#sk-container-id-28 div.sk-label:hover label.sk-toggleable__label {background-color: #d4ebff;}#sk-container-id-28 div.sk-serial::before {content: \"\";position: absolute;border-left: 1px solid gray;box-sizing: border-box;top: 0;bottom: 0;left: 50%;z-index: 0;}#sk-container-id-28 div.sk-serial {display: flex;flex-direction: column;align-items: center;background-color: white;padding-right: 0.2em;padding-left: 0.2em;position: relative;}#sk-container-id-28 div.sk-item {position: relative;z-index: 1;}#sk-container-id-28 div.sk-parallel {display: flex;align-items: stretch;justify-content: center;background-color: white;position: relative;}#sk-container-id-28 div.sk-item::before, #sk-container-id-28 div.sk-parallel-item::before {content: \"\";position: absolute;border-left: 1px solid gray;box-sizing: border-box;top: 0;bottom: 0;left: 50%;z-index: -1;}#sk-container-id-28 div.sk-parallel-item {display: flex;flex-direction: column;z-index: 1;position: relative;background-color: white;}#sk-container-id-28 div.sk-parallel-item:first-child::after {align-self: flex-end;width: 50%;}#sk-container-id-28 div.sk-parallel-item:last-child::after {align-self: flex-start;width: 50%;}#sk-container-id-28 div.sk-parallel-item:only-child::after {width: 0;}#sk-container-id-28 div.sk-dashed-wrapped {border: 1px dashed gray;margin: 0 0.4em 0.5em 0.4em;box-sizing: border-box;padding-bottom: 0.4em;background-color: white;}#sk-container-id-28 div.sk-label label {font-family: monospace;font-weight: bold;display: inline-block;line-height: 1.2em;}#sk-container-id-28 div.sk-label-container {text-align: center;}#sk-container-id-28 div.sk-container {/* jupyter's `normalize.less` sets `[hidden] { display: none; }` but bootstrap.min.css set `[hidden] { display: none !important; }` so we also need the `!important` here to be able to override the default hidden behavior on the sphinx rendered scikit-learn.org. See: https://github.com/scikit-learn/scikit-learn/issues/21755 */display: inline-block !important;position: relative;}#sk-container-id-28 div.sk-text-repr-fallback {display: none;}</style><div id=\"sk-container-id-28\" class=\"sk-top-container\"><div class=\"sk-text-repr-fallback\"><pre>Perceptron()</pre><b>In a Jupyter environment, please rerun this cell to show the HTML representation or trust the notebook. <br />On GitHub, the HTML representation is unable to render, please try loading this page with nbviewer.org.</b></div><div class=\"sk-container\" hidden><div class=\"sk-item\"><div class=\"sk-estimator sk-toggleable\"><input class=\"sk-toggleable__control sk-hidden--visually\" id=\"sk-estimator-id-28\" type=\"checkbox\" checked><label for=\"sk-estimator-id-28\" class=\"sk-toggleable__label sk-toggleable__label-arrow\">Perceptron</label><div class=\"sk-toggleable__content\"><pre>Perceptron()</pre></div></div></div></div></div>"
  5169. ],
  5170. "text/plain": [
  5171. "Perceptron()"
  5172. ]
  5173. },
  5174. "execution_count": 224,
  5175. "metadata": {},
  5176. "output_type": "execute_result"
  5177. }
  5178. ],
  5179. "source": [
  5180. "clf.fit(X_train, y_train)"
  5181. ]
  5182. },
  5183. {
  5184. "cell_type": "code",
  5185. "execution_count": 225,
  5186. "id": "6c07a732",
  5187. "metadata": {},
  5188. "outputs": [
  5189. {
  5190. "data": {
  5191. "text/plain": [
  5192. "0.8128994770482277"
  5193. ]
  5194. },
  5195. "execution_count": 225,
  5196. "metadata": {},
  5197. "output_type": "execute_result"
  5198. }
  5199. ],
  5200. "source": [
  5201. "clf.score(X_train, y_train)"
  5202. ]
  5203. },
  5204. {
  5205. "cell_type": "code",
  5206. "execution_count": 226,
  5207. "id": "472916ef",
  5208. "metadata": {},
  5209. "outputs": [
  5210. {
  5211. "data": {
  5212. "text/plain": [
  5213. "0.8697916666666666"
  5214. ]
  5215. },
  5216. "execution_count": 226,
  5217. "metadata": {},
  5218. "output_type": "execute_result"
  5219. }
  5220. ],
  5221. "source": [
  5222. "clf.score(X_test, y_test)"
  5223. ]
  5224. },
  5225. {
  5226. "cell_type": "code",
  5227. "execution_count": 21,
  5228. "id": "83916158",
  5229. "metadata": {},
  5230. "outputs": [
  5231. {
  5232. "data": {
  5233. "text/plain": [
  5234. "{'alpha': 0.0001,\n",
  5235. " 'class_weight': None,\n",
  5236. " 'early_stopping': False,\n",
  5237. " 'eta0': 1.0,\n",
  5238. " 'fit_intercept': True,\n",
  5239. " 'l1_ratio': 0.15,\n",
  5240. " 'max_iter': 1000,\n",
  5241. " 'n_iter_no_change': 5,\n",
  5242. " 'n_jobs': None,\n",
  5243. " 'penalty': None,\n",
  5244. " 'random_state': 0,\n",
  5245. " 'shuffle': True,\n",
  5246. " 'tol': 0.001,\n",
  5247. " 'validation_fraction': 0.1,\n",
  5248. " 'verbose': 0,\n",
  5249. " 'warm_start': False}"
  5250. ]
  5251. },
  5252. "execution_count": 21,
  5253. "metadata": {},
  5254. "output_type": "execute_result"
  5255. }
  5256. ],
  5257. "source": [
  5258. "clf.get_params()"
  5259. ]
  5260. },
  5261. {
  5262. "cell_type": "code",
  5263. "execution_count": null,
  5264. "id": "19bffc72",
  5265. "metadata": {},
  5266. "outputs": [],
  5267. "source": []
  5268. }
  5269. ],
  5270. "metadata": {
  5271. "kernelspec": {
  5272. "display_name": "Python 3 (ipykernel)",
  5273. "language": "python",
  5274. "name": "python3"
  5275. },
  5276. "language_info": {
  5277. "codemirror_mode": {
  5278. "name": "ipython",
  5279. "version": 3
  5280. },
  5281. "file_extension": ".py",
  5282. "mimetype": "text/x-python",
  5283. "name": "python",
  5284. "nbconvert_exporter": "python",
  5285. "pygments_lexer": "ipython3",
  5286. "version": "3.9.7"
  5287. }
  5288. },
  5289. "nbformat": 4,
  5290. "nbformat_minor": 5
  5291. }