{ "cells": [ { "cell_type": "code", "execution_count": 1, "metadata": {}, "outputs": [], "source": [ "import csv\n", "import numpy as np\n", "\n", "from src.dataset import PandemicDataset\n", "from src.problem import SIRProblem\n", "from src.dinn import DINN, Scheduler\n", "from src.plotter import Plotter" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "Load Data" ] }, { "cell_type": "code", "execution_count": 2, "metadata": {}, "outputs": [], "source": [ "covid_data = np.genfromtxt('./datasets/SIR_data.csv', delimiter=',')\n", "dataset = PandemicDataset('Synthetic_SIR', ['S', 'I', 'R'], 7.6e6, *covid_data)" ] }, { "cell_type": "code", "execution_count": 3, "metadata": {}, "outputs": [], "source": [ "def get_error(y, y_ref):\n", " err = []\n", " for i in range(len(y)):\n", " diff = y[i] - y_ref\n", " error = 1/3 * (np.linalg.norm(diff[0]) / np.linalg.norm(y_ref[0]) + \n", " np.linalg.norm(diff[1]) / np.linalg.norm(y_ref[1]) + \n", " np.linalg.norm(diff[2]) / np.linalg.norm(y_ref[2]))\n", " err.append(error)\n", " return np.array(err).mean(axis=0)" ] }, { "cell_type": "code", "execution_count": 5, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "\n", "Learning Rate:\t0.001\n", "Optimizer:\tADAM\n", "Scheduler:\tPOLYNOMIAL\n", "\n", "torch seed: 7344232523737245676\n", "\n", "Epoch 0 | LR 0.0009999\n", "physics loss:\t\t0.00852774945598735\n", "observation loss:\t1.0632901729211592\n", "loss:\t\t\t1.0718179223771467\n", "---------------------------------\n", "alpha:\t\t\t0.5791704654693604\n", "beta:\t\t\t0.24064970016479492\n", "#################################\n", "\n", "Epoch 1000 | LR 0.0008998999999999944\n", "physics loss:\t\t0.00010428958311113062\n", "observation loss:\t0.0028886066543031457\n", "loss:\t\t\t0.0029928962374142763\n", "---------------------------------\n", "alpha:\t\t\t0.32289087772369385\n", "beta:\t\t\t0.48989078402519226\n", "#################################\n", "\n", "Epoch 2000 | LR 0.0007998999999999963\n", "physics loss:\t\t6.593978560736111e-05\n", "observation loss:\t0.0004082090084230652\n", "loss:\t\t\t0.0004741487940304263\n", "---------------------------------\n", "alpha:\t\t\t0.3232259750366211\n", "beta:\t\t\t0.48726335167884827\n", "#################################\n", "\n", "Epoch 3000 | LR 0.000699900000000002\n", "physics loss:\t\t4.197694725904885e-05\n", "observation loss:\t0.00048559308675607095\n", "loss:\t\t\t0.0005275700340151198\n", "---------------------------------\n", "alpha:\t\t\t0.3279867470264435\n", "beta:\t\t\t0.49321258068084717\n", "#################################\n", "\n", "Epoch 4000 | LR 0.0005999000000000103\n", "physics loss:\t\t4.537836438963431e-05\n", "observation loss:\t0.00015905737477480653\n", "loss:\t\t\t0.00020443573916444085\n", "---------------------------------\n", "alpha:\t\t\t0.3300480246543884\n", "beta:\t\t\t0.4962349534034729\n", "#################################\n", "\n", "Epoch 5000 | LR 0.0004999000000000169\n", "physics loss:\t\t3.552936728462211e-05\n", "observation loss:\t0.00023845128850423744\n", "loss:\t\t\t0.0002739806557888595\n", "---------------------------------\n", "alpha:\t\t\t0.3312479555606842\n", "beta:\t\t\t0.49585384130477905\n", "#################################\n", "\n", "Epoch 6000 | LR 0.0003999000000000128\n", "physics loss:\t\t3.650966616089921e-05\n", "observation loss:\t9.427198384875194e-05\n", "loss:\t\t\t0.00013078165000965115\n", "---------------------------------\n", "alpha:\t\t\t0.33050960302352905\n", "beta:\t\t\t0.4955793619155884\n", "#################################\n", "\n", "Epoch 7000 | LR 0.00029990000000000366\n", "physics loss:\t\t3.936485758982795e-05\n", "observation loss:\t4.089197864410413e-05\n", "loss:\t\t\t8.025683623393207e-05\n", "---------------------------------\n", "alpha:\t\t\t0.330422967672348\n", "beta:\t\t\t0.4947073459625244\n", "#################################\n", "\n", "Epoch 8000 | LR 0.0001998999999999976\n", "physics loss:\t\t3.886132087888541e-05\n", "observation loss:\t3.799331993572665e-05\n", "loss:\t\t\t7.685464081461206e-05\n", "---------------------------------\n", "alpha:\t\t\t0.33085641264915466\n", "beta:\t\t\t0.4954530894756317\n", "#################################\n", "\n", "Epoch 9000 | LR 9.989999999999895e-05\n", "physics loss:\t\t3.8973161015748567e-05\n", "observation loss:\t3.600976938692644e-05\n", "loss:\t\t\t7.498293040267501e-05\n", "---------------------------------\n", "alpha:\t\t\t0.3308243453502655\n", "beta:\t\t\t0.4953617453575134\n", "#################################\n", "\n", "Learning Rate:\t0.001\n", "Optimizer:\tADAM\n", "Scheduler:\tPOLYNOMIAL\n", "\n", "torch seed: 13974148678280920715\n", "\n", "Epoch 0 | LR 0.0009999\n", "physics loss:\t\t0.0001287503355102872\n", "observation loss:\t0.882694273065505\n", "loss:\t\t\t0.8828230234010153\n", "---------------------------------\n", "alpha:\t\t\t0.3728388547897339\n", "beta:\t\t\t0.2401047796010971\n", "#################################\n", "\n", "Epoch 1000 | LR 0.0008998999999999944\n", "physics loss:\t\t2.8628702184206652e-05\n", "observation loss:\t0.0013679976169214065\n", "loss:\t\t\t0.001396626319105613\n", "---------------------------------\n", "alpha:\t\t\t0.29736149311065674\n", "beta:\t\t\t0.4456057548522949\n", "#################################\n", "\n", "Epoch 2000 | LR 0.0007998999999999963\n", "physics loss:\t\t2.223625981374804e-05\n", "observation loss:\t0.0002483236033214982\n", "loss:\t\t\t0.0002705598631352462\n", "---------------------------------\n", "alpha:\t\t\t0.32920563220977783\n", "beta:\t\t\t0.49372440576553345\n", "#################################\n", "\n", "Epoch 3000 | LR 0.000699900000000002\n", "physics loss:\t\t1.862663826185662e-05\n", "observation loss:\t0.0003833632851072574\n", "loss:\t\t\t0.00040198992336911403\n", "---------------------------------\n", "alpha:\t\t\t0.33170199394226074\n", "beta:\t\t\t0.4977753460407257\n", "#################################\n", "\n", "Epoch 4000 | LR 0.0005999000000000103\n", "physics loss:\t\t1.8544045149478127e-05\n", "observation loss:\t7.41022319790498e-05\n", "loss:\t\t\t9.264627712852792e-05\n", "---------------------------------\n", "alpha:\t\t\t0.33231687545776367\n", "beta:\t\t\t0.4981408715248108\n", "#################################\n", "\n", "Epoch 5000 | LR 0.0004999000000000169\n", "physics loss:\t\t1.6913371438059464e-05\n", "observation loss:\t0.00016788857434989194\n", "loss:\t\t\t0.0001848019457879514\n", "---------------------------------\n", "alpha:\t\t\t0.33255624771118164\n", "beta:\t\t\t0.4986303448677063\n", "#################################\n", "\n", "Epoch 6000 | LR 0.0003999000000000128\n", "physics loss:\t\t1.6275042092549277e-05\n", "observation loss:\t4.126435081256429e-05\n", "loss:\t\t\t5.753939290511357e-05\n", "---------------------------------\n", "alpha:\t\t\t0.33230409026145935\n", "beta:\t\t\t0.4990338981151581\n", "#################################\n", "\n", "Epoch 7000 | LR 0.00029990000000000366\n", "physics loss:\t\t1.512132602401294e-05\n", "observation loss:\t3.1907056151394045e-05\n", "loss:\t\t\t4.702838217540699e-05\n", "---------------------------------\n", "alpha:\t\t\t0.3328112065792084\n", "beta:\t\t\t0.49896669387817383\n", "#################################\n", "\n", "Epoch 8000 | LR 0.0001998999999999976\n", "physics loss:\t\t1.4224605287698235e-05\n", "observation loss:\t8.703364808445997e-06\n", "loss:\t\t\t2.2927970096144232e-05\n", "---------------------------------\n", "alpha:\t\t\t0.33194372057914734\n", "beta:\t\t\t0.4985193610191345\n", "#################################\n", "\n", "Epoch 9000 | LR 9.989999999999895e-05\n", "physics loss:\t\t1.414798103407193e-05\n", "observation loss:\t8.35025403971122e-06\n", "loss:\t\t\t2.249823507378315e-05\n", "---------------------------------\n", "alpha:\t\t\t0.33212387561798096\n", "beta:\t\t\t0.49900490045547485\n", "#################################\n", "\n", "Learning Rate:\t0.001\n", "Optimizer:\tADAM\n", "Scheduler:\tPOLYNOMIAL\n", "\n", "torch seed: 8856708383668657878\n", "\n", "Epoch 0 | LR 0.0009999\n", "physics loss:\t\t6.661209286171919e-07\n", "observation loss:\t1.1365172227899003\n", "loss:\t\t\t1.136517888910829\n", "---------------------------------\n", "alpha:\t\t\t0.6347366571426392\n", "beta:\t\t\t0.4883585274219513\n", "#################################\n", "\n", "Epoch 1000 | LR 0.0008998999999999944\n", "physics loss:\t\t5.6530584487254415e-05\n", "observation loss:\t0.0015439946074771679\n", "loss:\t\t\t0.0016005251919644222\n", "---------------------------------\n", "alpha:\t\t\t0.4070371091365814\n", "beta:\t\t\t0.6095771193504333\n", "#################################\n", "\n", "Epoch 2000 | LR 0.0007998999999999963\n", "physics loss:\t\t2.4105003715825452e-05\n", "observation loss:\t4.93280866562888e-05\n", "loss:\t\t\t7.343309037211425e-05\n", "---------------------------------\n", "alpha:\t\t\t0.3665112257003784\n", "beta:\t\t\t0.5489121675491333\n", "#################################\n", "\n", "Epoch 3000 | LR 0.000699900000000002\n", "physics loss:\t\t1.7371882545842878e-05\n", "observation loss:\t0.0004989551008049493\n", "loss:\t\t\t0.0005163269833507922\n", "---------------------------------\n", "alpha:\t\t\t0.34096625447273254\n", "beta:\t\t\t0.5113551020622253\n", "#################################\n", "\n", "Epoch 4000 | LR 0.0005999000000000103\n", "physics loss:\t\t1.589542198950243e-05\n", "observation loss:\t6.463586450712957e-05\n", "loss:\t\t\t8.0531286496632e-05\n", "---------------------------------\n", "alpha:\t\t\t0.3332453966140747\n", "beta:\t\t\t0.5013622045516968\n", "#################################\n", "\n", "Epoch 5000 | LR 0.0004999000000000169\n", "physics loss:\t\t1.4947803951786882e-05\n", "observation loss:\t5.773791614768732e-05\n", "loss:\t\t\t7.26857200994742e-05\n", "---------------------------------\n", "alpha:\t\t\t0.3330983519554138\n", "beta:\t\t\t0.5000171661376953\n", "#################################\n", "\n", "Epoch 6000 | LR 0.0003999000000000128\n", "physics loss:\t\t1.4707677597146236e-05\n", "observation loss:\t6.865935617780972e-05\n", "loss:\t\t\t8.336703377495595e-05\n", "---------------------------------\n", "alpha:\t\t\t0.3329768478870392\n", "beta:\t\t\t0.500525951385498\n", "#################################\n", "\n", "Epoch 7000 | LR 0.00029990000000000366\n", "physics loss:\t\t1.352202309908259e-05\n", "observation loss:\t9.50498857498595e-06\n", "loss:\t\t\t2.302701167406854e-05\n", "---------------------------------\n", "alpha:\t\t\t0.33351173996925354\n", "beta:\t\t\t0.5006612539291382\n", "#################################\n", "\n", "Epoch 8000 | LR 0.0001998999999999976\n", "physics loss:\t\t1.3968532328067627e-05\n", "observation loss:\t7.045591099654748e-06\n", "loss:\t\t\t2.1014123427722374e-05\n", "---------------------------------\n", "alpha:\t\t\t0.33390048146247864\n", "beta:\t\t\t0.501221239566803\n", "#################################\n", "\n", "Epoch 9000 | LR 9.989999999999895e-05\n", "physics loss:\t\t1.3257197473722147e-05\n", "observation loss:\t6.324658097780928e-06\n", "loss:\t\t\t1.9581855571503074e-05\n", "---------------------------------\n", "alpha:\t\t\t0.3345104157924652\n", "beta:\t\t\t0.5019903182983398\n", "#################################\n", "\n", "Learning Rate:\t0.001\n", "Optimizer:\tADAM\n", "Scheduler:\tPOLYNOMIAL\n", "\n", "torch seed: 322168311612641462\n", "\n", "Epoch 0 | LR 0.0009999\n", "physics loss:\t\t0.004353428856084084\n", "observation loss:\t1.0260840090518966\n", "loss:\t\t\t1.0304374379079806\n", "---------------------------------\n", "alpha:\t\t\t0.6746350526809692\n", "beta:\t\t\t0.04816503822803497\n", "#################################\n", "\n", "Epoch 1000 | LR 0.0008998999999999944\n", "physics loss:\t\t3.9179068226871085e-05\n", "observation loss:\t0.002654112009126786\n", "loss:\t\t\t0.002693291077353657\n", "---------------------------------\n", "alpha:\t\t\t0.3079822063446045\n", "beta:\t\t\t0.4635222554206848\n", "#################################\n", "\n", "Epoch 2000 | LR 0.0007998999999999963\n", "physics loss:\t\t3.8540852835034296e-05\n", "observation loss:\t0.00013981131784172578\n", "loss:\t\t\t0.00017835217067676008\n", "---------------------------------\n", "alpha:\t\t\t0.3143658936023712\n", "beta:\t\t\t0.47145596146583557\n", "#################################\n", "\n", "Epoch 3000 | LR 0.000699900000000002\n", "physics loss:\t\t3.96539337886877e-05\n", "observation loss:\t0.00017606467951664112\n", "loss:\t\t\t0.00021571861330532883\n", "---------------------------------\n", "alpha:\t\t\t0.3206492066383362\n", "beta:\t\t\t0.4807424545288086\n", "#################################\n", "\n", "Epoch 4000 | LR 0.0005999000000000103\n", "physics loss:\t\t3.6208046313102357e-05\n", "observation loss:\t2.9052013833495302e-05\n", "loss:\t\t\t6.526006014659766e-05\n", "---------------------------------\n", "alpha:\t\t\t0.32617223262786865\n", "beta:\t\t\t0.48839297890663147\n", "#################################\n", "\n", "Epoch 5000 | LR 0.0004999000000000169\n", "physics loss:\t\t3.606110511323864e-05\n", "observation loss:\t6.80326679142837e-05\n", "loss:\t\t\t0.00010409377302752235\n", "---------------------------------\n", "alpha:\t\t\t0.3291519284248352\n", "beta:\t\t\t0.49368634819984436\n", "#################################\n", "\n", "Epoch 6000 | LR 0.0003999000000000128\n", "physics loss:\t\t3.35749884995912e-05\n", "observation loss:\t1.5032820240048677e-05\n", "loss:\t\t\t4.860780873963988e-05\n", "---------------------------------\n", "alpha:\t\t\t0.33155524730682373\n", "beta:\t\t\t0.49695876240730286\n", "#################################\n", "\n", "Epoch 7000 | LR 0.00029990000000000366\n", "physics loss:\t\t3.293874649611295e-05\n", "observation loss:\t1.2791444045549445e-05\n", "loss:\t\t\t4.57301905416624e-05\n", "---------------------------------\n", "alpha:\t\t\t0.3327440917491913\n", "beta:\t\t\t0.4990793764591217\n", "#################################\n", "\n", "Epoch 8000 | LR 0.0001998999999999976\n", "physics loss:\t\t3.0445056059998848e-05\n", "observation loss:\t1.1837779554203423e-05\n", "loss:\t\t\t4.228283561420227e-05\n", "---------------------------------\n", "alpha:\t\t\t0.33326423168182373\n", "beta:\t\t\t0.4986100196838379\n", "#################################\n", "\n", "Epoch 9000 | LR 9.989999999999895e-05\n", "physics loss:\t\t3.4224290355370304e-05\n", "observation loss:\t1.1164133278788439e-05\n", "loss:\t\t\t4.5388423634158745e-05\n", "---------------------------------\n", "alpha:\t\t\t0.3327571749687195\n", "beta:\t\t\t0.49790158867836\n", "#################################\n", "\n", "Learning Rate:\t0.001\n", "Optimizer:\tADAM\n", "Scheduler:\tPOLYNOMIAL\n", "\n", "torch seed: 18082426015990684677\n", "\n", "Epoch 0 | LR 0.0009999\n", "physics loss:\t\t0.002102045101239928\n", "observation loss:\t1.0074584153782482\n", "loss:\t\t\t1.009560460479488\n", "---------------------------------\n", "alpha:\t\t\t0.4305820167064667\n", "beta:\t\t\t0.0871502012014389\n", "#################################\n", "\n", "Epoch 1000 | LR 0.0008998999999999944\n", "physics loss:\t\t0.00010882294992238554\n", "observation loss:\t0.0003838087128523165\n", "loss:\t\t\t0.000492631662774702\n", "---------------------------------\n", "alpha:\t\t\t0.25254976749420166\n", "beta:\t\t\t0.37877026200294495\n", "#################################\n", "\n", "Epoch 2000 | LR 0.0007998999999999963\n", "physics loss:\t\t4.470430183524086e-05\n", "observation loss:\t8.520396575061276e-05\n", "loss:\t\t\t0.00012990826758585363\n", "---------------------------------\n", "alpha:\t\t\t0.3076499104499817\n", "beta:\t\t\t0.4608965218067169\n", "#################################\n", "\n", "Epoch 3000 | LR 0.000699900000000002\n", "physics loss:\t\t3.952150725784859e-05\n", "observation loss:\t0.00031915133040270665\n", "loss:\t\t\t0.00035867283766055527\n", "---------------------------------\n", "alpha:\t\t\t0.32767054438591003\n", "beta:\t\t\t0.49167829751968384\n", "#################################\n", "\n", "Epoch 4000 | LR 0.0005999000000000103\n", "physics loss:\t\t3.805555074621392e-05\n", "observation loss:\t0.0002909003780581857\n", "loss:\t\t\t0.0003289559288043996\n", "---------------------------------\n", "alpha:\t\t\t0.3325372040271759\n", "beta:\t\t\t0.4990527927875519\n", "#################################\n", "\n", "Epoch 5000 | LR 0.0004999000000000169\n", "physics loss:\t\t3.51913248610847e-05\n", "observation loss:\t0.00016142023601472237\n", "loss:\t\t\t0.00019661156087580708\n", "---------------------------------\n", "alpha:\t\t\t0.33315038681030273\n", "beta:\t\t\t0.49953320622444153\n", "#################################\n", "\n", "Epoch 6000 | LR 0.0003999000000000128\n", "physics loss:\t\t3.46449333293224e-05\n", "observation loss:\t8.326499355363324e-05\n", "loss:\t\t\t0.00011790992688295564\n", "---------------------------------\n", "alpha:\t\t\t0.33181366324424744\n", "beta:\t\t\t0.49867501854896545\n", "#################################\n", "\n", "Epoch 7000 | LR 0.00029990000000000366\n", "physics loss:\t\t3.2408479982490445e-05\n", "observation loss:\t3.4671786218861615e-05\n", "loss:\t\t\t6.708026620135206e-05\n", "---------------------------------\n", "alpha:\t\t\t0.3313221335411072\n", "beta:\t\t\t0.4974777400493622\n", "#################################\n", "\n", "Epoch 8000 | LR 0.0001998999999999976\n", "physics loss:\t\t3.173604840422672e-05\n", "observation loss:\t3.083526984996649e-05\n", "loss:\t\t\t6.257131825419322e-05\n", "---------------------------------\n", "alpha:\t\t\t0.33108678460121155\n", "beta:\t\t\t0.49706247448921204\n", "#################################\n", "\n", "Epoch 9000 | LR 9.989999999999895e-05\n", "physics loss:\t\t3.152055695069558e-05\n", "observation loss:\t2.9993030278595605e-05\n", "loss:\t\t\t6.151358722929119e-05\n", "---------------------------------\n", "alpha:\t\t\t0.3311203718185425\n", "beta:\t\t\t0.49699151515960693\n", "#################################\n", "synthetic & 0.0043\n" ] } ], "source": [ "params = []\n", "predictions = []\n", "for iteration in range(5):\n", " problem = SIRProblem(dataset)\n", " plotter = Plotter()\n", "\n", " dinn = DINN(3, dataset, ['alpha', 'beta'], problem, plotter)\n", " dinn.configure_training(1e-3, 10000, scheduler_class=Scheduler.POLYNOMIAL, verbose=True)\n", " dinn.train(verbose=True)\n", " #dinn.save_training_process('Synthetic_parameters')\n", "\n", " params.append((dinn.get_regulated_param('alpha').item(), dinn.get_regulated_param('beta').item()))\n", " pred = (dinn.get_output(0), \n", " dinn.get_output(1), \n", " dinn.get_output(2))\n", " predictions.append([d.detach().cpu().numpy() for d in dataset.get_denormalized_data(pred)])\n", "print(\"synthetic\", \"&\", '{0:.4f}'.format(get_error(np.array(predictions), np.array([d for d in covid_data[1:]]))))" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "\n", "\n", "#param_matrix = np.array(params)\n", "#with open(f'./results/synthetic_parameters.csv', 'w', newline='') as csvfile:\n", "# writer = csv.writer(csvfile, delimiter=',')\n", "# for row in param_matrix:\n", "# writer.writerow(row)" ] } ], "metadata": { "kernelspec": { "display_name": "PINN", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.11.7" } }, "nbformat": 4, "nbformat_minor": 2 }