123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118 |
- import torch
- import numpy as np
- import csv
- import sys
- from src.dataset import PandemicDataset, Norms
- from src.problem import ReducedSIRProblem
- from src.dinn import DINN, Scheduler, Activation
- ALPHA = [1 / 14, 1 / 5]
- DO_STATES = True
- DO_SYNTHETIC = False
- ITERATIONS = 13
- state_starting_index = 0
- if "1" in sys.argv:
- state_starting_index = 8
- STATE_LOOKUP = {'Schleswig_Holstein': 2897000,
- 'Hamburg': 1841000,
- 'Niedersachsen': 7982000,
- 'Bremen': 569352,
- 'Nordrhein_Westfalen': 17930000,
- 'Hessen': 6266000,
- 'Rheinland_Pfalz': 4085000,
- 'Baden_Wuerttemberg': 11070000,
- 'Bayern': 13080000,
- 'Saarland': 990509,
- 'Berlin': 3645000,
- 'Brandenburg': 2641000,
- 'Mecklenburg_Vorpommern': 1610000,
- 'Sachsen': 4078000,
- 'Sachsen_Anhalt': 2208000,
- 'Thueringen': 2143000}
- if DO_SYNTHETIC:
- alpha = 1 / 3
- covid_data = np.genfromtxt(f'./datasets/I_data.csv', delimiter=',')
- for i in range(ITERATIONS):
- dataset = PandemicDataset('Synthetic I',
- ['I'],
- 7.6e6,
- *covid_data,
- norm_name=Norms.CONSTANT,
- use_scaled_time=True)
- problem = ReducedSIRProblem(dataset, alpha)
- dinn = DINN(2,
- dataset,
- [],
- problem,
- None,
- state_variables=['R_t'],
- hidden_size=100,
- hidden_layers=4,
- activation_layer=torch.nn.Tanh(),
- activation_output=Activation.POWER)
- dinn.configure_training(1e-3,
- 20000,
- scheduler_class=Scheduler.POLYNOMIAL,
- lambda_physics=1e-6,
- verbose=True)
- dinn.train(verbose=True, do_split_training=True)
- dinn.save_training_process(f'synthetic_{i}')
- # r_t = dinn.get_output(1).detach().cpu().numpy()
- # with open(f'./results/synthetic_{i}.csv', 'w', newline='') as csvfile:
- # writer = csv.writer(csvfile, delimiter=',')
- # writer.writerow(r_t)
- for iteration in range(ITERATIONS):
- if iteration <= 2:
- print('skip first three iteration, as it was already done')
- continue
- if DO_STATES:
- for state_idx in range(state_starting_index, state_starting_index + 8):
- state = list(STATE_LOOKUP.keys())[state_idx]
- exclude = ['Schleswig_Holstein', 'Hamburg', 'Niedersachsen']
- if iteration == 3 and state in exclude:
- print(f'skip in {state} third iteration, as it was already done')
- continue
- for i, alpha in enumerate(ALPHA):
- print(f'training for {state} ({state_idx}), alpha: {alpha}, iter: {iteration}')
- covid_data = np.genfromtxt(f'./datasets/I_RKI_{state}_1_{int(1/alpha)}.csv', delimiter=',')
- dataset = PandemicDataset(state, ['I'], STATE_LOOKUP[state], *covid_data, norm_name=Norms.CONSTANT, use_scaled_time=True)
- problem = ReducedSIRProblem(dataset, alpha)
- dinn = DINN(2,
- dataset,
- [],
- problem,
- None,
- state_variables=['R_t'],
- hidden_size=100,
- hidden_layers=4,
- activation_layer=torch.nn.Tanh(),
- activation_output=Activation.POWER)
- dinn.configure_training(1e-3,
- 25000,
- scheduler_class=Scheduler.POLYNOMIAL,
- lambda_obs=1e2,
- lambda_physics=1e-6,
- verbose=True)
- dinn.train(verbose=True, do_split_training=True)
- dinn.save_training_process(f'{state}_{i}_{iteration}')
- r_t = dinn.get_output(1).detach().cpu().numpy()
- with open(f'./results/{state}_{i}_{iteration}.csv', 'w', newline='') as csvfile:
- writer = csv.writer(csvfile, delimiter=',')
- writer.writerow(r_t)
|