{ "cells": [ { "cell_type": "code", "execution_count": 1, "metadata": {}, "outputs": [], "source": [ "import pandas as pd\n", "from skimage import io\n", "import numpy as np\n", "from PIL import Image\n", "import os\n", "import pickle as pkl\n", "import time\n", "import random\n", "import tqdm\n", "import datetime" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "# Datastream Creation\n", "This notebook was used to create the datasteams and look at distributions. \n", "Pleas also see the data/README.md file for further detail on how the data was used. \n" ] }, { "cell_type": "code", "execution_count": 2, "metadata": {}, "outputs": [], "source": [ "images_root = '/home/AMMOD_data/camera_traps/BayerWald/G-Fallen/MDcrops/'\n", "global data_dir_path\n", "data_dir_path = os.path.join(os.getcwd(), '../../data/')\n", "train_stream_file = data_dir_path+'data_stream_files/cv0_expsize128_crop_train_stream.pkl'\n", "test_stream_file = data_dir_path+'data_stream_files/cv0_expsize128_crop_test_stream.pkl'\n", "label_dict_path = data_dir_path+'label_dictionaries/BIRDS_11_Species.pkl'\n", "label_dict_path_cal = data_dir_path+'label_dictionaries/CALTECH_LABEL_DICT.pkl'\n", "\n", "with open(label_dict_path, 'rb') as p:\n", " label_dict = pkl.load(p)\n", "with open(label_dict_path_cal, 'rb') as p:\n", " label_dict_cal = pkl.load(p)\n" ] }, { "cell_type": "code", "execution_count": 21, "metadata": {}, "outputs": [], "source": [ "def filter_bw_data(md_threshold=0.9, only_species=False, crops=False):\n", " # This is the function that filters the raw data from the \n", " # all_data_MD.csv file using the megadetector max cofidence levels and other columns \n", " # and returns a dataframe with all data which is then further used to create the cross-validation splits\n", " \n", " #all_data_MD_path = data_dir_path+ 'raw_data_csv_files/all_data_MD.csv'\n", " all_data_MD_path = '/home/boehlke/AMMOD/cam_trap_classification/data/csv_files/all_data_MD.csv'\n", " all_data_MD = pd.read_csv(all_data_MD_path, low_memory=False)\n", " all_data_MD = all_data_MD.drop(all_data_MD[all_data_MD.broken==1].index)\n", " data_w_animal = all_data_MD.drop(all_data_MD[all_data_MD.series_w_animal==0].index)\n", " data_w_animal = data_w_animal.drop(data_w_animal[data_w_animal.label==-1].index)\n", " data_w_animal = data_w_animal.drop(data_w_animal[data_w_animal.MD_category!=1].index)\n", " data_w_animal = data_w_animal.drop(data_w_animal[data_w_animal.MD_max_conf<=md_threshold].index)\n", " \n", " #later on we decided to take out these two classes because they had very few instances. \n", " data_w_animal = data_w_animal.drop(data_w_animal[data_w_animal.label==4].index) #dog\n", " data_w_animal = data_w_animal.drop(data_w_animal[data_w_animal.label==7].index) #badger\n", " #data_w_animal = data_w_animal.drop(data_w_animal[data_w_animal.label==5].index) #lynx\n", " #lynx is also a small class but was kept in the current experiments \n", " \n", " # the labels had to be adjusted such that the labels for the used data range from 0 to 10\n", " data_w_animal.label = data_w_animal.label.replace({12 :7, 11:4})\n", "\n", " \n", " \n", " data = data_w_animal\n", " if not only_species:\n", " # originaly several experiments were planned investigating how empty images (images without animals)\n", " # should best be handled\n", " # in this section sequences without animals in them (based on original annotaions) were filtered\n", " # this was later ignored as we focised on species classificaion only\n", " # when using this data the label dict should be edited\n", " \n", " \n", " data_wo_animal = all_data_MD.drop(all_data_MD[all_data_MD.series_w_animal==1].index)\n", " empty_wo_detection = data_wo_animal.drop(data_wo_animal[data_wo_animal.MD_category!=-1].index)\n", " empty_w_detection = data_wo_animal.drop(data_wo_animal[data_wo_animal.MD_category==-1].index)\n", " empty_w_detection = empty_w_detection.drop(empty_w_detection[empty_w_detection.MD_max_conf>=0.5].index)\n", " empty_data = pd.concat([empty_wo_detection, empty_w_detection], ignore_index=True)\n", " empty_data['label']=13\n", " humans = data_wo_animal.drop(data_wo_animal[data_wo_animal.MD_category!=2].index)\n", " humans = humans.drop(humans[humans.MD_max_conf<=md_threshold].index)\n", " humans['label']=14\n", " vehicle = data_wo_animal.drop(data_wo_animal[data_wo_animal.MD_category!=3].index)\n", " vehicle = vehicle.drop(vehicle[vehicle.MD_max_conf<=md_threshold].index)\n", " vehicle['label']=15\n", " #print('vehicle', vehicle.shape, unique_series_count(vehicle))\n", " data = pd.concat([data_w_animal, empty_data,humans,vehicle ], ignore_index=True)\n", " \n", " # only the megadetector bbox with the highest confidence was used in our experiments\n", " # meaning if multiple individuals were detected by the megadetector only one might be used in the data\n", " data['crop_file'] =data['file'].str.replace('.JPG', '.JPG___crop00_mdv4.1.jpg')\n", " \n", " return data" ] }, { "cell_type": "code", "execution_count": 4, "metadata": {}, "outputs": [], "source": [ "def unique_series_count(data):\n", " data_seq_wise = data.drop_duplicates(subset='series_code', ignore_index=True)\n", " return ' '+str(data_seq_wise.shape[0])" ] }, { "cell_type": "code", "execution_count": 5, "metadata": {}, "outputs": [], "source": [ "def filter_caltech_data(all_data_MD):\n", " # this is a similar function to the one above for filtering the caltech cameratrap \n", " # dataset based on a csv file created to be contain similar columns as the one for the BW dataset\n", " \n", " all_data_MD = all_data_MD.drop(all_data_MD[all_data_MD.confidence<0.9].index)\n", " all_data_MD = all_data_MD.drop(all_data_MD[all_data_MD.datetime =='11 11'].index)\n", " all_data_MD_seq_wise = all_data_MD.drop_duplicates(subset='series_code', ignore_index=True)\n", " unique_label, label_counts = np.unique(all_data_MD_seq_wise['label'], return_counts=True)\n", " unique_label = unique_label[label_counts>30]\n", " all_data_MD = all_data_MD.drop(all_data_MD[np.logical_not(np.isin(all_data_MD.label, unique_label))].index)\n", " unique_label, label_counts = np.unique(all_data_MD['label'], return_counts=True)\n", "\n", " return all_data_MD\n" ] }, { "cell_type": "code", "execution_count": 6, "metadata": {}, "outputs": [], "source": [ "def flattened(list_w_sublists):\n", " # flattens a list with sublists\n", " flattened = []\n", " for item in list_w_sublists:\n", " if isinstance(item, list):\n", " for val in item:\n", " flattened.append(val)\n", " else:\n", " flattened.append(item)\n", " return flattened" ] }, { "cell_type": "code", "execution_count": 7, "metadata": {}, "outputs": [], "source": [ "#global series_code_list\n", "#series_code_list = np.array([])\n", "def get_int_series_code(series_code):\n", " idx = np.argwhere(series_code_list==series_code)\n", " print(idx)\n", " if idx==[]:\n", " idx = series_code_list.shape[0]\n", " series_code_list = np.concatenate([series_code_list, [series_code]])\n", " print(series_code_list)\n", " return idx\n", " " ] }, { "cell_type": "code", "execution_count": 8, "metadata": {}, "outputs": [], "source": [ "def get_cv_exp_stream_dict_from_cls_data_dict(cls_data_dict, exp_size=128, cv_splits=5, seed=1, caltech=False):\n", " \n", " # this function uses the cls_data_dict with {cls_int_label: cls_dataframe} entries \n", " # and creates cross validation splits as defined by the number cv_splits\n", " # all data is returned in form of a dictionary that has keys in the range(cv_splits) \n", " # with the data as values\n", " \n", " \n", " train_split_data_dict = {}\n", " test_split_data_dict = {}\n", " val_split_data_dict = {}\n", " \n", " for i in range(cv_splits):\n", " train_split_data_dict[i] = pd.DataFrame(columns = cls_data_dict[0].columns)\n", " test_split_data_dict[i] = pd.DataFrame(columns = cls_data_dict[0].columns)\n", " val_split_data_dict[i] = pd.DataFrame(columns = cls_data_dict[0].columns)\n", " \n", " for label, cls_data in cls_data_dict.items():\n", " cls_series_codes = np.unique(cls_data.series_code)\n", " np.random.seed(seed)\n", " np.random.shuffle(cls_series_codes)\n", " nr_of_series = cls_series_codes.shape[0]\n", " nr_series_in_split = int((1/cv_splits)*nr_of_series)\n", " \n", " for i in range(cv_splits):\n", " \n", " if i==cv_splits-1:\n", " split_series_codes_val = cls_series_codes[:nr_series_in_split]\n", " split_series_codes_test = cls_series_codes[-nr_series_in_split:]\n", " split_series_codes = np.concatenate([split_series_codes_val, split_series_codes_test])\n", "\n", " else:\n", " split_series_codes = cls_series_codes[i*nr_series_in_split:(i+2)*nr_series_in_split] \n", " split_series_codes_test = split_series_codes[:nr_series_in_split]\n", " split_series_codes_val = split_series_codes[nr_series_in_split:]\n", "\n", " split_test_data = cls_data.drop(cls_data[np.logical_not(cls_data.series_code.isin(split_series_codes_test.tolist()))].index)\n", " split_val_data = cls_data.drop(cls_data[np.logical_not(cls_data.series_code.isin(split_series_codes_val.tolist()))].index)\n", " split_train_data = cls_data.drop(cls_data[cls_data.series_code.isin(split_series_codes.tolist())].index)\n", "\n", " train_split_data_dict[i] = pd.concat([train_split_data_dict[i], split_train_data], ignore_index=True)\n", " test_split_data_dict[i] = pd.concat([test_split_data_dict[i], split_test_data], ignore_index=True)\n", " val_split_data_dict[i] = pd.concat([val_split_data_dict[i], split_val_data], ignore_index=True)\n", "\n", " total_data = 0\n", "\n", " total_data = train_split_data_dict[i].shape[0]+test_split_data_dict[i].shape[0]+val_split_data_dict[i].shape[0]\n", " cv_train_streams = {}\n", " cv_train_streams_crop = {}\n", " cv_test_data_winter = {}\n", " cv_val_data_winter = {}\n", " cv_test_data_crop_winter = {}\n", " cv_val_data_crop_winter = {}\n", " \n", " cv_test_data_summer = {}\n", " cv_val_data_summer = {}\n", " cv_test_data_crop_summer = {}\n", " cv_val_data_crop_summer = {}\n", " \n", " cv_summer_exp_list = {}\n", " cv_winter_exp_list = {}\n", " summer_start = datetime.datetime.strptime('04-21', '%m-%d')\n", " summer_end = datetime.datetime.strptime('09-21', '%m-%d')\n", "\n", " for i in range(cv_splits):\n", " summer_exp_list, winter_exp_list, train_stream, train_stream_crop = get_train_stream(train_split_data_dict[i], exp_size, caltech)\n", " test_data_winter = []\n", " val_data_winter = []\n", " test_data_crop_winter = []\n", " val_data_crop_winter = []\n", " test_data_summer = []\n", " val_data_summer = []\n", " test_data_crop_summer = []\n", " val_data_crop_summer = []\n", " \n", " for j, row in test_split_data_dict[i].iterrows():\n", " \n", " img_path = row.file\n", " img_path_crop = row.crop_file\n", " if not caltech: \n", " img_path = row.station+'/'+row.file\n", " img_path_crop = row.station+'/'+row.crop_file\n", " \n", " \n", " tupel = (img_path, row.label, row.series_code)\n", " tupel_crop = (img_path_crop, row.label, row.series_code)\n", " \n", " try:\n", " row_date = datetime.datetime.strptime(row.datetime[5:10], '%m-%d')\n", " \n", " if row_date > summer_start and row_date < summer_end:\n", " test_data_summer.append(tupel)\n", " test_data_crop_summer.append(tupel_crop)\n", " else: \n", " test_data_winter.append(tupel)\n", " test_data_crop_winter.append(tupel_crop)\n", " \n", " except ValueError: #handles weird case where 29.02 is not accepted as a valid date\n", " test_data_winter.append(tupel)\n", " test_data_crop_winter.append(tupel_crop)\n", " \n", " \n", " \n", " for j, row in val_split_data_dict[i].iterrows():\n", " img_path = row.file\n", " img_path_crop = row.crop_file\n", " if not caltech: \n", " img_path = row.station+'/'+row.file\n", " img_path_crop = row.station+'/'+row.crop_file\n", " \n", " tupel = (img_path, row.label, row.series_code)\n", " tupel_crop = (img_path_crop, row.label, row.series_code)\n", " try:\n", " row_date = datetime.datetime.strptime(row.datetime[5:10], '%m-%d')\n", " if row_date > summer_start and row_date < summer_end:\n", " val_data_summer.append(tupel)\n", " val_data_crop_summer.append(tupel_crop)\n", " else: \n", " val_data_winter.append(tupel)\n", " val_data_crop_winter.append(tupel_crop)\n", " \n", " except ValueError:\n", " val_data_winter.append(tupel)\n", " val_data_crop_winter.append(tupel_crop)\n", " \n", "\n", " cv_train_streams[i] = train_stream \n", " cv_train_streams_crop[i] = train_stream_crop\n", "\n", " cv_test_data_summer[i] = test_data_summer \n", " cv_val_data_summer[i] = val_data_summer\n", " cv_test_data_crop_summer[i] = test_data_crop_summer \n", " cv_val_data_crop_summer[i] = val_data_crop_summer\n", "\n", " cv_test_data_winter[i] = test_data_winter \n", " cv_val_data_winter[i] = val_data_winter\n", " cv_test_data_crop_winter[i] = test_data_crop_winter \n", " cv_val_data_crop_winter[i] = val_data_crop_winter\n", "\n", " cv_summer_exp_list[i] = summer_exp_list\n", " cv_winter_exp_list[i] = winter_exp_list\n", " \n", " return cv_summer_exp_list, cv_winter_exp_list, cv_train_streams, cv_train_streams_crop, cv_test_data_winter, cv_val_data_winter, cv_test_data_crop_winter, cv_val_data_crop_winter, cv_test_data_summer, cv_val_data_summer, cv_test_data_crop_summer, cv_val_data_crop_summer" ] }, { "cell_type": "code", "execution_count": 9, "metadata": {}, "outputs": [], "source": [ "def get_train_stream(train_data, exp_size, caltech=False):\n", " # this function takes all the data in the pandas dataframe train_data which is usally a subset of the \n", " # entire all_data_MD and creats a stream of data, i.e. a list of sublist with experience size 128 \n", " # which are ordered by their datetime stamp\n", " # each element in the sublist is a tuple containing the (path_to_image, int_label, seq_id)\n", " # the returned summer_exp_list and winter_exp_list contain the indexes of sublists that contain either only \n", " # summer data or only winter data\n", " \n", " train_data = train_data.sort_values('label', ascending=True)\n", " nr_images_lost = train_data.shape[0]%exp_size\n", " train_data = train_data.iloc[:-nr_images_lost,:]\n", " remaining_train_data = train_data\n", " seq_codes_and_dates = remaining_train_data.drop_duplicates('series_code',keep='first', ignore_index=True)\n", " seq_codes_and_dates = seq_codes_and_dates.sort_values('datetime')\n", " sequences = seq_codes_and_dates.series_code.tolist()\n", " summer_exp_list = []\n", " winter_exp_list = []\n", " train_stream = []\n", " train_stream_crop = []\n", " exp = []\n", " exp_crop = []\n", " summer_falg = 0\n", " winter_flag = 0\n", " \n", " # we defined three months before and after the summer solstice as summer\n", " summer_start = datetime.datetime.strptime('04-21', '%m-%d')\n", " summer_end = datetime.datetime.strptime('09-21', '%m-%d')\n", " for series_code in sequences:\n", " seq_data = remaining_train_data.drop(remaining_train_data[remaining_train_data.series_code!=series_code].index)\n", " \n", " if len(exp)+seq_data.shape[0]<=exp_size:\n", " for i, row in seq_data.iterrows():\n", " img_path = row.file\n", " img_path_crop = row.crop_file\n", " if not caltech: \n", " img_path = row.station+'/'+row.file\n", " img_path_crop = row.station+'/'+row.crop_file\n", " try:\n", " \n", " row_date = datetime.datetime.strptime(row.datetime[5:10], '%m-%d')\n", " if row_date > summer_start and row_date < summer_end:\n", " summer_flag = 1\n", " else: \n", " winter_flag = 1\n", " except ValueError:\n", " winter_flag = 1\n", " tupel = (img_path, row.label, row.series_code)\n", " tupel_crop = (img_path_crop, row.label, row.series_code)\n", " exp.append(tupel)\n", " exp_crop.append(tupel_crop)\n", " \n", " if len(exp)==exp_size:\n", " \n", " \n", " if summer_flag*winter_flag == 0:\n", " train_stream.append(exp)\n", " train_stream_crop.append(exp_crop)\n", " exp_nr = len(train_stream)-1\n", " if summer_flag == 1:\n", " summer_exp_list.append(exp_nr)\n", " else:\n", " winter_exp_list.append(exp_nr)\n", " else:\n", " print('experience lost')\n", " print(exp_nr)\n", " print(summer_flag)\n", " print(winter_flag)\n", " \n", " exp = []\n", " exp_crop = []\n", " summer_flag = 0\n", " winter_flag = 0\n", " \n", " else: \n", " needed = exp_size-len(exp)\n", " for i, row in seq_data.iloc[:needed,:].iterrows():\n", " img_path = row.file\n", " img_path_crop = row.crop_file\n", " if not caltech: \n", " img_path = row.station+'/'+row.file\n", " img_path_crop = row.station+'/'+row.crop_file\n", "\n", " try:\n", " \n", " row_date = datetime.datetime.strptime(row.datetime[5:10], '%m-%d')\n", " if row_date > summer_start and row_date < summer_end:\n", " summer_flag = 1\n", " else: \n", " winter_flag = 1\n", " except ValueError:\n", " winter_flag = 1\n", " \n", " tupel = (img_path, row.label, row.series_code)\n", " tupel_crop = (img_path_crop, row.label, row.series_code)\n", " exp.append(tupel)\n", " exp_crop.append(tupel_crop)\n", " \n", " if summer_flag*winter_flag == 0:\n", " train_stream.append(exp)\n", " train_stream_crop.append(exp_crop)\n", " exp_nr = len(train_stream)-1\n", " if summer_flag == 1:\n", " summer_exp_list.append(exp_nr)\n", " else:\n", " winter_exp_list.append(exp_nr)\n", " else:\n", " #experiences with summer and winter data are removed\n", " \n", " print('experience lost')\n", " print(exp_nr)\n", " print(summer_flag)\n", " print(winter_flag)\n", " \n", " exp=[]\n", " exp_crop = []\n", " summer_flag = 0\n", " winter_flag = 0\n", " \n", " for i, row in seq_data.iloc[needed:,:].iterrows():\n", " img_path = row.file\n", " img_path_crop = row.crop_file\n", " if not caltech: \n", " img_path = row.station+'/'+row.file\n", " img_path_crop = row.station+'/'+row.crop_file\n", " try:\n", " row_date = datetime.datetime.strptime(row.datetime[5:10], '%m-%d')\n", " if row_date > summer_start and row_date < summer_end:\n", " summer_flag = 1\n", " else: \n", " winter_flag = 1\n", " except ValueError:\n", " winter_flag = 1\n", " tupel = (img_path, row.label, row.series_code)\n", " tupel_crop = (img_path_crop, row.label, row.series_code)\n", " exp.append(tupel)\n", " exp_crop.append(tupel_crop)\n", " remaining_train_data = remaining_train_data.drop(remaining_train_data[remaining_train_data.series_code==series_code].index)\n", " return summer_exp_list, winter_exp_list, train_stream, train_stream_crop\n" ] }, { "cell_type": "code", "execution_count": 10, "metadata": {}, "outputs": [], "source": [ "def get_undersampled_datasets(max_number_of_seqs, all_data, seed=1):\n", " # creates a dictionary with integer class labels as keys \n", " # and a pandas dataframe as with all data for that class as the value\n", " # this is used to split the data classwise such that the distribution of classes\n", " # is roughly the same for each train-validation-test data\n", " \n", " cls_data_dict ={}\n", " nr_classes = np.unique(all_data['label']).shape[0]\n", " \n", " for i in np.unique(all_data['label']):\n", " cls_data = all_data.drop(all_data[all_data.label!=i].index)\n", " seqs = np.unique(cls_data.series_code)\n", " if seqs.shape[0] <= max_number_of_seqs:\n", " cls_data_dict[i]=cls_data\n", " else: \n", " np.random.seed(seed)\n", " selected_seqs = np.random.choice(seqs, max_number_of_seqs, replace=False)\n", " bool_selector = np.logical_not(cls_data.series_code.isin(list(selected_seqs)))\n", " cls_data = cls_data.drop(cls_data[bool_selector].index)\n", " cls_data_dict[i]=cls_data\n", "\n", " full_data = cls_data_dict[0]\n", " for i in range(1,nr_classes):\n", " full_data = pd.concat([full_data,cls_data_dict[i] ],ignore_index=True)\n", " return full_data, cls_data_dict" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Calling functions to create stream files" ] }, { "cell_type": "code", "execution_count": 23, "metadata": { "scrolled": true }, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ ":52: FutureWarning: The default value of regex will change from True to False in a future version.\n", " data['crop_file'] =data['file'].str.replace('.JPG', '.JPG___crop00_mdv4.1.jpg')\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "experience lost\n", "98\n", "1\n", "1\n", "experience lost\n", "194\n", "1\n", "1\n", "experience lost\n", "94\n", "1\n", "1\n", "experience lost\n", "190\n", "1\n", "1\n", "experience lost\n", "96\n", "1\n", "1\n", "experience lost\n", "190\n", "1\n", "1\n", "experience lost\n", "97\n", "1\n", "1\n", "experience lost\n", "193\n", "1\n", "1\n", "experience lost\n", "100\n", "1\n", "1\n", "experience lost\n", "194\n", "1\n", "1\n" ] } ], "source": [ "# Here the five-fold cross validation splits where generated. \n", "cv_splits=5\n", "exp_size=128\n", "\n", "# -- For Caltech Cameratrap Data use this Block ---\n", "dest_path = data_dir_path+ 'data_stream_files/Caltech_stream_files/'\n", "all_data_MD = pd.read_csv( data_dir_path+ 'raw_data_csv_files/all_data_Caltech.csv')\n", "all_data_MD = filter_bw_data(md_threshold=0.9, only_species=True)\n", "all_data_MD_seq_wise = all_data_MD.drop_duplicates(subset='series_code', ignore_index=True)\n", "caltech=True\n", "\n", "# -- For Bavarian Forest Data use this Block ---\n", "#dest_path = data_dir_path+ 'data_stream_files/BW_stream_files/'\n", "#all_data_MD = pd.read_csv( data_dir_path+ 'raw_data_csv_files/all_data_MD.csv')\n", "#all_data_MD = filter_bw_data(md_threshold=0.9, only_species=True)\n", "#all_data_MD_seq_wise = all_data_MD.drop_duplicates(subset='series_code', ignore_index=True)\n", "#caltech=False\n", "\n", " \n", "nr_cut_off_classes = 0 \n", "# nr of classes that should be undersampled, if nr_cut_off_classes is one for example, \n", "# then the class with the most images will be undersampled to have as many images as the second largest class\n", "# this was used to create different scenarios with different levels of class imbalance \n", "\n", "uniques = np.unique(all_data_MD_seq_wise['label'], return_counts=True)\n", "counts = uniques[1][np.argsort(uniques[1])]\n", "max_number_of_seqs = counts[-(nr_cut_off_classes+1)]\n", "\n", "\n", "# all data is split class wise ensuring that the distribution of classes\n", "# is roughly the same for each train-validation-test data \n", "_, cls_data_dict = get_undersampled_datasets(max_number_of_seqs, all_data_MD)\n", "\n", "# all data is split into winter and summer data in order to use this information to evaluate \n", "# the accuracy when there is a domain shift in form of seasonal changes\n", "# all variables containing train_stream in the name are a list of lists where each element in the \n", "# sublists is a tupel tuple containing the (path_to_image, int_label, seq_id) information for each image\n", "# the test and validation data are a simple list with these tupel. \n", "# Further all data has a normal and a '_crop' version where the paths\n", "# in the paths in the tuple differ, pointing to crops of the original image\n", "# preliminary experiments have shown that using the crops of the images is beneficial to the predictive accuracy\n", "cv_summer_exp_list, cv_winter_exp_list, cv_train_stream, cv_train_stream_crop, cv_test_data_winter, cv_val_data_winter, cv_test_data_crop_winter, cv_val_data_crop_winter, cv_test_data_summer, cv_val_data_summer, cv_test_data_crop_summer, cv_val_data_crop_summer = get_cv_exp_stream_dict_from_cls_data_dict(cls_data_dict, cv_splits=cv_splits, exp_size=exp_size, caltech=caltech)\n", "# set the caltech variable to False wehrn woreking with the Bavarian Forest data (from all_data_MD)\n", "\n", "for i in range(cv_splits):\n", " file_name = 'cv'+str(i)+'_expsize'+str(exp_size)\n", " file_name2 = 'cv'+str(i)+'_expsize'+str(exp_size*3)\n", " # we wanted to investigate how the experience size influences continuous learning\n", " # for this two streams were created, both containing the exact data\n", " # for the larger experience size stream three experiences were concatenated\n", " # both training streams correspond to the same validation and test data \n", " nr_exp_lost = len(cv_train_stream[i])%3\n", " if nr_exp_lost !=0:\n", " cv_train_stream[i] = cv_train_stream[i][:-nr_exp_lost]\n", " cv_train_stream_crop[i] = cv_train_stream_crop[i][:-nr_exp_lost]\n", " \n", " train_stream_larger_exp_size = [cv_train_stream[i][j]+cv_train_stream[i][j+1]+cv_train_stream[i][j+2] for j in range(0, len(cv_train_stream[i])-2, 3)]\n", " train_stream_larger_exp_size_crop = [cv_train_stream_crop[i][j]+cv_train_stream_crop[i][j+1]+cv_train_stream_crop[i][j+2] for j in range(0, len(cv_train_stream_crop[i])-2, 3)]\n", " \n", "\n", " test_data_i = cv_test_data_winter[i]+ cv_test_data_summer[i]\n", " test_data_i_label = np.array(test_data_i)[:,1]\n", " val_data_i = cv_val_data_winter[i]+ cv_val_data_summer[i]\n", " val_data_i_label = np.array(val_data_i)[:,1]\n", "\n", " test_data_crop_i = cv_test_data_crop_winter[i]+ cv_test_data_crop_summer[i]\n", " val_data_crop_i = cv_val_data_crop_winter[i]+ cv_val_data_crop_summer[i]\n", " season_split = {'summer' : cv_summer_exp_list[i], 'winter': cv_winter_exp_list[i]}\n", "\n", "\n", "\n", " \n", " with open(dest_path+file_name+'_train_stream.pkl', 'wb') as handle:\n", " pkl.dump(cv_train_stream[i], handle, protocol=pkl.HIGHEST_PROTOCOL)\n", " with open(dest_path+file_name+'_crop_train_stream.pkl', 'wb') as handle:\n", " pkl.dump(cv_train_stream_crop[i], handle, protocol=pkl.HIGHEST_PROTOCOL)\n", " with open(dest_path+file_name2+'_train_stream.pkl', 'wb') as handle:\n", " pkl.dump(train_stream_larger_exp_size, handle, protocol=pkl.HIGHEST_PROTOCOL)\n", " with open(dest_path+file_name2+'_crop_train_stream.pkl', 'wb') as handle:\n", " pkl.dump(train_stream_larger_exp_size_crop, handle, protocol=pkl.HIGHEST_PROTOCOL)\n", " with open(dest_path+file_name+'_exp_season_split_dict.pkl', 'wb') as handle:\n", " pkl.dump(season_split, handle, protocol=pkl.HIGHEST_PROTOCOL)\n", " \n", " with open(dest_path+file_name+'_test_stream.pkl', 'wb') as handle:\n", " pkl.dump(test_data_i, handle, protocol=pkl.HIGHEST_PROTOCOL)\n", " with open(dest_path+file_name+'_val_stream.pkl', 'wb') as handle:\n", " pkl.dump(val_data_i, handle, protocol=pkl.HIGHEST_PROTOCOL)\n", " \n", " with open(dest_path+file_name+'_crop_test_stream.pkl', 'wb') as handle:\n", " pkl.dump(test_data_crop_i, handle, protocol=pkl.HIGHEST_PROTOCOL)\n", " with open(dest_path+file_name+'_crop_val_stream.pkl', 'wb') as handle:\n", " pkl.dump(val_data_crop_i, handle, protocol=pkl.HIGHEST_PROTOCOL)\n", " \n", " \n", " with open(dest_path+file_name+'_winter_test_stream.pkl', 'wb') as handle:\n", " pkl.dump(cv_test_data_winter[i], handle, protocol=pkl.HIGHEST_PROTOCOL)\n", " with open(dest_path+file_name+'_winter_val_stream.pkl', 'wb') as handle:\n", " pkl.dump(cv_val_data_winter[i], handle, protocol=pkl.HIGHEST_PROTOCOL)\n", " \n", " with open(dest_path+file_name+'_summer_test_stream.pkl', 'wb') as handle:\n", " pkl.dump(cv_test_data_summer[i], handle, protocol=pkl.HIGHEST_PROTOCOL)\n", " with open(dest_path+file_name+'_summer_val_stream.pkl', 'wb') as handle:\n", " pkl.dump(cv_val_data_summer[i], handle, protocol=pkl.HIGHEST_PROTOCOL)#\n", " \n", " with open(dest_path+file_name+'_winter_crop_test_stream.pkl', 'wb') as handle:\n", " pkl.dump(cv_test_data_crop_winter[i], handle, protocol=pkl.HIGHEST_PROTOCOL)\n", " with open(dest_path+file_name+'_winter_crop_val_stream.pkl', 'wb') as handle:\n", " pkl.dump(cv_val_data_crop_winter[i], handle, protocol=pkl.HIGHEST_PROTOCOL)\n", " \n", " with open(dest_path+file_name+'_summer_crop_test_stream.pkl', 'wb') as handle:\n", " pkl.dump(cv_test_data_crop_summer[i], handle, protocol=pkl.HIGHEST_PROTOCOL)\n", " with open(dest_path+file_name+'_summer_crop_val_stream.pkl', 'wb') as handle:\n", " pkl.dump(cv_val_data_crop_summer[i], handle, protocol=pkl.HIGHEST_PROTOCOL)\n", " " ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Visiualising Distributions" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "def get_data_dist_given_lablels(label_vector, title='Number of Images per Class where MD is', hline_val=None, label_dict=label_dict):\n", " all_data_classes = label_vector\n", " uniques = np.unique(all_data_classes, return_counts=True)\n", " counts = uniques[1][np.argsort(uniques[1])]\n", " print(counts)\n", " label = uniques[0][np.argsort(uniques[1])]\n", " x_vals = range(0,label.shape[0])\n", " #x_vals = x_vals[np.argsort(uniques[1])]\n", " label_list= []\n", " for count in counts:\n", " label_list.append(str(count))\n", " print(label_dict.keys())\n", " named_label =[]\n", " for lab in label:\n", " name = label_dict[lab]\n", " named_label.append(name)\n", "\n", " barWidth = 0.9\n", " plt.figure(figsize=(15,10))\n", " plt.title(title)\n", " plt.bar(x_vals, counts, width = barWidth)\n", " if hline_val is not None:\n", " for val in hline_val:\n", " plt.axhline(y=val, color='r', linestyle='--')\n", " plt.xticks([r for r in range(len(named_label))],named_label , rotation=90)# Text on the top of each bar\n", " plt.ylabel('Number of Images')\n", " for i in range(len(label_list)):\n", " plt.text(x = x_vals[i]-0.25 , y = counts[i]+0.1, s = label_list[i], size = 10)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "md_threshold=0.9\n", "all_data_MD = filter_bw_data(md_threshold=md_threshold, only_species=False)\n", "all_data_MD_seq_wise = all_data_MD.drop_duplicates(subset='series_code', ignore_index=True)\n", "get_data_dist_given_lablels(all_data_MD_seq_wise['label'], title='Number of Images per Class: Non-Species Classes '+ 'MD>'+str(md_threshold), hline_val=[4595,1529,25031])" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "get_data_dist_given_lablels(all_data_MD_seq_wise['label'], title='Number of Sequences per Class Caltech', label_dict=label_dict_cal)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "md_threshold=0.9\n", "all_data_MD = filter_bw_data(md_threshold=md_threshold, only_species=True)\n", "all_data_MD_seq_wise = all_data_MD.drop_duplicates(subset='series_code', ignore_index=True)\n", "get_data_dist_given_lablels(all_data_MD_seq_wise['label'], title='Number of Sequences per Class: Species Only '+ 'MD>'+str(md_threshold), hline_val=[261,1529,4595])" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "#distribution of sequence lengths in two datasets\n", "\n", "all_data_MD = filter_bw_data(md_threshold=0.9, only_species=True)\n", "all_data_MD_seq_wise = all_data_MD.drop_duplicates(subset='series_code', ignore_index=True)\n", "_, counts = np.unique(all_data_MD.series_code, return_counts=True)\n", "seq_len , seq_len_count = np.unique(counts, return_counts=True)\n", "seq_len_counts_more_than_5 = np.concatenate((seq_len_count[:4], np.sum(seq_len_count[4:]).reshape(1,)))\n", "\n", "\n", "all_data_MD = pd.read_csv('all_data_Caltech.csv')\n", "print(all_data_MD.shape)\n", "all_data_MD = filter_caltech_seqs(all_data_MD)\n", "_, counts = np.unique(all_data_MD.series_code, return_counts=True)\n", "seq_len , seq_len_count_cal = np.unique(counts, return_counts=True)\n", "\n", "\n", "fig, axs = plt.subplots(1,2)\n", "axs = axs.flatten()\n", "axs[0].bar(np.arange(1,6), seq_len_count_cal/np.sum(seq_len_count_cal))\n", "axs[1].bar(np.arange(1,6), seq_len_counts_more_than_5/np.sum(seq_len_counts_more_than_5))\n", "axs[0].set_xticks(np.arange(1,6))\n", "axs[1].set_xticks(np.arange(1,6))\n", "axs[1].set_xticklabels(labels=['1', '2', '3', '4', '>5'])\n", "axs[0].set_title('Caltech CT')\n", "axs[1].set_title('Bayerwald')\n", "fig.suptitle('Distribution of Seq Lengths')" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "#creating new label dict with cls_id -> cls_name as key -> value\n", "new_birds_11_species_dict = {}\n", "for key, val in label_dict.items():\n", " if val ==12:\n", " new_birds_11_species_dict[7]=key.replace('_', ' ').title()\n", " elif val ==11:\n", " new_birds_11_species_dict[4] = key.replace('_', ' ').title()\n", " elif val in range(11) and val != 4 and val != 7:\n", " new_birds_11_species_dict[val] = key.title().replace('_', ' ')\n", " \n", "new_birds_11_species_dict[1] = 'Bird'\n", "new_birds_11_species_dict.items()\n", "with open('/home/boehlke/AMMOD/cam_trap_classification/data/csv_files/BIRDS_11_Species.pkl', 'wb') as handle:\n", " pkl.dump(new_birds_11_species_dict, handle, protocol=pkl.HIGHEST_PROTOCOL)" ] } ], "metadata": { "kernelspec": { "display_name": "ava-cl2", "language": "python", "name": "ava-cl2" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.8.10" } }, "nbformat": 4, "nbformat_minor": 2 }