From e6b110aaaeca7eaca339fb0babbf31d5422852e4 Mon Sep 17 00:00:00 2001 From: Ari Azarafrooz Date: Sat, 5 Nov 2022 14:59:40 -0700 Subject: [PATCH] dnc-demon impl --- Dataset/.DS_Store | Bin 0 -> 6148 bytes Dataset/Bitmap/.DS_Store | Bin 0 -> 6148 bytes Dataset/Bitmap/AssociativeRecall.py | 72 ++ Dataset/Bitmap/BitmapTask.py | 82 ++ Dataset/Bitmap/BitmapTaskRepeater.py | 56 ++ Dataset/Bitmap/CopyTask.py | 49 ++ Dataset/Bitmap/KeyValue.py | 81 ++ Dataset/Bitmap/KeyValue2Way.py | 89 +++ Dataset/Bitmap/__init__.py | 0 Dataset/NLP/.DS_Store | Bin 0 -> 6148 bytes Dataset/NLP/.gitignore | 1 + Dataset/NLP/NLPTask.py | 153 ++++ Dataset/NLP/Vocabulary.py | 52 ++ Dataset/NLP/__init__.py | 0 Dataset/NLP/bAbi.py | 297 +++++++ Dataset/__init__.py | 1 + Models/.DS_Store | Bin 0 -> 6148 bytes Models/DNCA.py | 965 +++++++++++++++++++++++ Models/Demon.py | 140 ++++ Models/Information_Agents.py | 238 ++++++ Utils/.DS_Store | Bin 0 -> 6148 bytes Utils/ArgumentParser.py | 167 ++++ Utils/Collate.py | 75 ++ Utils/Debug.py | 133 ++++ Utils/Helpers.py | 24 + Utils/Index.py | 20 + Utils/Process.py | 51 ++ Utils/Profile.py | 48 ++ Utils/Saver.py | 233 ++++++ Utils/Seed.py | 31 + Utils/Visdom.py | 323 ++++++++ Utils/download.py | 189 +++++ Utils/gpu_allocator.py | 111 +++ Utils/lockfile.py | 41 + Utils/timer.py | 54 ++ Utils/universal.py | 263 +++++++ Visualize/.DS_Store | Bin 0 -> 6148 bytes Visualize/BitmapTask.py | 73 ++ Visualize/__init__.py | 1 + Visualize/preview.py | 64 ++ assets/demon.png | Bin 0 -> 92114 bytes memory_demon.py | 1070 ++++++++++++++++++++++++++ requirements.txt | 8 + 43 files changed, 5255 insertions(+) create mode 100644 Dataset/.DS_Store create mode 100644 Dataset/Bitmap/.DS_Store create mode 100644 Dataset/Bitmap/AssociativeRecall.py create mode 100644 Dataset/Bitmap/BitmapTask.py create mode 100644 Dataset/Bitmap/BitmapTaskRepeater.py create mode 100644 Dataset/Bitmap/CopyTask.py create mode 100644 Dataset/Bitmap/KeyValue.py create mode 100644 Dataset/Bitmap/KeyValue2Way.py create mode 100644 Dataset/Bitmap/__init__.py create mode 100644 Dataset/NLP/.DS_Store create mode 100644 Dataset/NLP/.gitignore create mode 100644 Dataset/NLP/NLPTask.py create mode 100644 Dataset/NLP/Vocabulary.py create mode 100644 Dataset/NLP/__init__.py create mode 100644 Dataset/NLP/bAbi.py create mode 100644 Dataset/__init__.py create mode 100644 Models/.DS_Store create mode 100644 Models/DNCA.py create mode 100644 Models/Demon.py create mode 100644 Models/Information_Agents.py create mode 100644 Utils/.DS_Store create mode 100644 Utils/ArgumentParser.py create mode 100644 Utils/Collate.py create mode 100644 Utils/Debug.py create mode 100644 Utils/Helpers.py create mode 100644 Utils/Index.py create mode 100644 Utils/Process.py create mode 100644 Utils/Profile.py create mode 100644 Utils/Saver.py create mode 100644 Utils/Seed.py create mode 100644 Utils/Visdom.py create mode 100644 Utils/download.py create mode 100644 Utils/gpu_allocator.py create mode 100644 Utils/lockfile.py create mode 100644 Utils/timer.py create mode 100644 Utils/universal.py create mode 100644 Visualize/.DS_Store create mode 100644 Visualize/BitmapTask.py create mode 100644 Visualize/__init__.py create mode 100644 Visualize/preview.py create mode 100644 assets/demon.png create mode 100644 memory_demon.py create mode 100644 requirements.txt diff --git a/Dataset/.DS_Store b/Dataset/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..3f2c1654600a10121ec7da0bbda33594bf5bff12 GIT binary patch literal 6148 zcmeHK%}T>S5T0$TO({YT3VK`cTCfoX#Y>3wCL%`kpi&cBG#Im`P3@r+a@QB~MSLD- zcDG`UUd5jonEiHVXAbU@tgq08lYoOadNSEIB$)UkotaB5Di(^#8KHr>sp z+}holHs!>1-Bwd>yF1fq%~@IB*gNUmCHJX%G)xLSbXt}y&fp0rb4fh}gEUd;4caND z3`cMX$M25nuTIsb^C8I@8H~&TGr$ZK8L-!i@yl{r`Lrub2U5;7>6iD(#@{V@kHR7Lub{D^ageNyslZ m_!fc_U5e3{O7SYH7PL#6AbJ*agJ?nF9|27R56r-~GVltM>xQEM literal 0 HcmV?d00001 diff --git a/Dataset/Bitmap/.DS_Store b/Dataset/Bitmap/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..5008ddfcf53c02e82d7eee2e57c38e5672ef89f6 GIT binary patch literal 6148 zcmeH~Jr2S!425mzP>H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0to2xRh;%z)>hNBH`ckn8E`62 zC8+JqW=*$Qmxsr#d0oz2*KO40k=vTjE6&dT!O3~>kUXa9#c(R{_tUavaRIMr{II+y zf0QOFy~l_}_9Bas8DIvOfi-8q9(zu8&DY7xV+NRk-!nkxgG43tEanFF(SZ$JA8EWq zNP;%KB?zTO&th&6M^J=GMKr0xJ~4zzN58ajp2ggtNe7{4#_!mfg?*t2Jv;iP4hP{G z9lz7YCE41#vN)=>67>$1gyM38pDAeQ hQjD=wig!`9pkI=K=vmASq6dX90-6SHn1MfK-~)$;PA~uf literal 0 HcmV?d00001 diff --git a/Dataset/NLP/.gitignore b/Dataset/NLP/.gitignore new file mode 100644 index 0000000..06cf653 --- /dev/null +++ b/Dataset/NLP/.gitignore @@ -0,0 +1 @@ +cache diff --git a/Dataset/NLP/NLPTask.py b/Dataset/NLP/NLPTask.py new file mode 100644 index 0000000..419347e --- /dev/null +++ b/Dataset/NLP/NLPTask.py @@ -0,0 +1,153 @@ +# Copyright 2017 Robert Csordas. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ============================================================================== + +import torch +import torch.nn.functional as F +import os +from .Vocabulary import Vocabulary +from Utils import Visdom +from Utils import universal as U + +import numpy as np + + +class NLPTask(torch.utils.data.Dataset): + def __init__(self): + super(NLPTask, self).__init__() + + self.my_dir = os.path.abspath(os.path.dirname(__file__)) + self.cache_dir = os.path.join(self.my_dir, "cache") + + if not os.path.isdir(self.cache_dir): + os.makedirs(self.cache_dir) + + self.vocabulary = self._load_vocabulary() + + self._preview = None + + def _load_vocabulary(self): + cache_file = os.path.join(self.cache_dir, "vocabulary.pth") + if not os.path.isfile(cache_file): + print("WARNING: Vocabulary not found. Removing cached files.") + for f in os.listdir(self.cache_dir): + f = os.path.join(self.cache_dir, f) + if f.endswith(".pth"): + print(" " + f) + os.remove(f) + return Vocabulary() + else: + return torch.load(cache_file) + + def save_vocabulary(self): + cache_file = os.path.join(self.cache_dir, "vocabulary.pth") + if os.path.isfile(cache_file): + os.remove(cache_file) + torch.save(self.vocabulary, cache_file) + + def loss(self, net_output, target): + s = list(net_output.size()) + return ( + F.cross_entropy( + net_output.view([s[0] * s[1], s[2]]), + target.view([-1]), + ignore_index=0, + reduction="sum", + ) + / s[0] + ) + + + def demon_loss(self, net_output, target, saved_actions, device): + """ + computes the loss for the demon + :param net_output: + :param target: + :param saved_actions: + :return: + """ + net_output = net_output.detach() + s = list(net_output.size()) + loss = F.cross_entropy( + net_output.view([s[0] * s[1], s[2]]), + target.view([-1]), + ignore_index=0, + reduction="none", + ).view(s[0], s[1]) + + policy_losses = [] # list to save actor (policy) loss + + discount_factor = 0.99 + for i in range(0, loss.size(1)): # computing expected total reward + discount_vector = torch.from_numpy( + np.array([np.power(discount_factor, i) for i in range(loss.size(1) - i)])).to(device) + policy_losses.append(((saved_actions[i].log_prob).squeeze(1) * (discount_vector * loss[:, i:]).mean(dim=1))) + + demon_loss = torch.stack(policy_losses).mean(dim=0) + + return demon_loss + + + def generate_preview_text(self, data, net_output): + input = U.to_numpy(data["input"][0]) + reference = U.to_numpy(data["output"][0]) + net_out = U.argmax(net_output[0], -1) + net_out = U.to_numpy(net_out) + + res = "" + start_index = 0 + + for i in range(input.shape[0]): + if reference[i] != 0: + if start_index < i: + end_index = i + while end_index > start_index and input[end_index] == 0: + end_index -= 1 + + if end_index > start_index: + sentence = ( + " ".join( + self.vocabulary.indices_to_sentence( + input[start_index:i].tolist() + ) + ) + .replace(" .", ".") + .replace(" ,", ",") + .replace(" ?", "?") + .split(". ") + ) + sentence = ". ".join([s.capitalize() for s in sentence]) + res += sentence + "
" + + start_index = i + 1 + + match = reference[i] == net_out[i] + res += '%s [%s]
' % ( + "green" if match else "red", + self.vocabulary.indices_to_sentence([net_out[i]])[0], + self.vocabulary.indices_to_sentence([reference[i]])[0], + ) + return res + + def visualize_preview(self, data, net_output): + res = self.generate_preview_text(data, net_output) + + if self._preview is None: + self._preview = Visdom.Text("Preview") + + self._preview.set(res) + + def set_dump_dir(self, dir): + pass diff --git a/Dataset/NLP/Vocabulary.py b/Dataset/NLP/Vocabulary.py new file mode 100644 index 0000000..5b77fdc --- /dev/null +++ b/Dataset/NLP/Vocabulary.py @@ -0,0 +1,52 @@ +# Copyright 2017 Robert Csordas. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ============================================================================== + + +class Vocabulary: + def __init__(self): + self.words = {"-": 0, "?": 1, "": 2} + self.inv_words = {0: "-", 1: "?", 2: ""} + self.next_id = 3 + self.punctations = [".", "?", ","] + + def _process_word(self, w, add_words): + if not w.isalpha() and w not in self.punctations: + print("WARNING: word with unknown characters: %s", w) + w = "" + + if w not in self.words: + if add_words: + self.words[w] = self.next_id + self.inv_words[self.next_id] = w + self.next_id += 1 + else: + w = "" + + return self.words[w] + + def sentence_to_indices(self, sentence, add_words=True): + for p in self.punctations: + sentence = sentence.replace(p, " %s " % p) + + return [ + self._process_word(w, add_words) for w in sentence.lower().split(" ") if w + ] + + def indices_to_sentence(self, indices): + return [self.inv_words[i] for i in indices] + + def __len__(self): + return len(self.words) diff --git a/Dataset/NLP/__init__.py b/Dataset/NLP/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/Dataset/NLP/bAbi.py b/Dataset/NLP/bAbi.py new file mode 100644 index 0000000..013775f --- /dev/null +++ b/Dataset/NLP/bAbi.py @@ -0,0 +1,297 @@ +# Copyright 2017 Robert Csordas. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ============================================================================== + +import os +import glob +import torch +from collections import namedtuple +import numpy as np +from .NLPTask import NLPTask +from Utils import Visdom + +Sentence = namedtuple("Sentence", ["sentence", "answer", "supporting_facts"]) + + +class bAbiDataset(NLPTask): + URL = "http://www.thespermwhale.com/jaseweston/babi/tasks_1-20_v1-2.tar.gz" + DIR_NAME = "tasks_1-20_v1-2" + + def __init__( + self, dirs=["en-10k"], sets=None, think_steps=0, dir_name=None, name=None + ): + super(bAbiDataset, self).__init__() + + self._test_res_win = None + self._test_plot_win = None + self._think_steps = think_steps + + if dir_name is None: + self._download() + dir_name = os.path.join(self.cache_dir, self.DIR_NAME) + + self.data = {} + for d in dirs: + self.data[d] = self._load_or_create(os.path.join(dir_name, d)) + + self.all_tasks = None + self.name = name + self.use(sets=sets) + + def _make_active_list(self, tasks, sets, dirs): + def verify(name, checker): + if checker is None: + return True + + if callable(checker): + return checker(name) + elif isinstance(checker, list): + return name in checker + else: + return name == checker + + res = [] + for dirname, setlist in self.data.items(): + if not verify(dirname, dirs): + continue + + for sname, tasklist in setlist.items(): + if not verify(sname, sets): + continue + + for task, data in tasklist.items(): + name = task.split("_")[0][2:] + if not verify(name, tasks): + continue + + res += [(d, dirname, task, sname) for d in data] + + return res + + def use(self, tasks=None, sets=None, dirs=None): + self.all_tasks = self._make_active_list(tasks=tasks, sets=sets, dirs=dirs) + + def __len__(self): + return len(self.all_tasks) + + def _get_seq(self, index): + return self.all_tasks[index] + + def _seq_to_nn_input(self, seq): + in_arr = [] + out_arr = [] + hasAnswer = False + for sentence in seq[0]: + in_arr += sentence.sentence + out_arr += [0] * len(sentence.sentence) + if sentence.answer is not None: + in_arr += [0] * (len(sentence.answer) + self._think_steps) + out_arr += [0] * self._think_steps + sentence.answer + hasAnswer = True + + in_arr = np.asarray(in_arr, np.int64) + out_arr = np.asarray(out_arr, np.int64) + + return { + "input": in_arr, + "output": out_arr, + "meta": {"dir": seq[1], "task": seq[2], "set": seq[3]}, + } + + def __getitem__(self, item): + seq = self._get_seq(item) + return self._seq_to_nn_input(seq) + + def _load_or_create(self, directory): + cache_name = directory.replace("/", "_") + cache_file = os.path.join(self.cache_dir, cache_name + ".pth") + if not os.path.isfile(cache_file): + print("bAbI: Loading %s" % directory) + res = self._load_dir(directory) + print("Write: ", cache_file) + self.save_vocabulary() + torch.save(res, cache_file) + else: + res = torch.load(cache_file) + return res + + def _download(self): + if not os.path.isdir(os.path.join(self.cache_dir, self.DIR_NAME)): + print(self.URL) + print("bAbi data not found. Downloading...") + import requests, tarfile, io + + request = requests.get( + self.URL, + headers={ + "User-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2526.80 Safari/537.36" + }, + ) + + decompressed_file = tarfile.open( + fileobj=io.BytesIO(request.content), mode="r|gz" + ) + decompressed_file.extractall(self.cache_dir) + print("Done") + + def _load_dir( + self, + directory, + parse_name=lambda x: x.split(".")[0], + parse_set=lambda x: x.split(".")[0].split("_")[-1], + ): + res = {} + for f in glob.glob(os.path.join(directory, "**", "*.txt"), recursive=True): + basename = os.path.basename(f) + task_name = parse_name(basename) + set = parse_set(basename) + print("Loading", f) + + s = res.get(set) + if s is None: + s = {} + res[set] = s + s[task_name] = self._load_task(f, task_name) + + return res + + def _load_task(self, filename, task_name): + task = [] + currTask = [] + + nextIndex = 1 + with open(filename, "r") as f: + for line in f: + line = [f.strip() for f in line.split("\t")] + line[0] = line[0].split(" ") + i = int(line[0][0]) + line[0] = " ".join(line[0][1:]) + + if i != nextIndex: + nextIndex = i + task.append(currTask) + currTask = [] + + isQuestion = len(line) > 1 + currTask.append( + Sentence( + self.vocabulary.sentence_to_indices(line[0]), + self.vocabulary.sentence_to_indices(line[1].replace(",", " ")) + if isQuestion + else None, + [int(f) for f in line[2].split(" ")] if isQuestion else None, + ) + ) + + nextIndex += 1 + return task + + def start_test(self): + return {} + + def veify_result(self, test, data, net_output): + _, net_output = net_output.max(-1) + + ref = data["output"] + + mask = 1.0 - ref.eq(0).float() + + correct = (torch.eq(net_output, ref).float() * mask).sum(-1) + total = mask.sum(-1) + + correct = correct.data.cpu().numpy() + total = total.data.cpu().numpy() + + for i in range(correct.shape[0]): + task = data["meta"][i]["task"] + if task not in test: + test[task] = {"total": 0, "correct": 0} + + d = test[task] + d["total"] += total[i] + d["correct"] += correct[i] + + def _ensure_test_wins_exists(self, legend=None): + if self._test_res_win is None: + n = ("[" + self.name + "]") if self.name is not None else "" + self._test_res_win = Visdom.Text("Test results" + n) + self._test_plot_win = Visdom.Plot2D("Test results" + n, legend=legend) + elif self._test_plot_win.legend is None: + self._test_plot_win.set_legend(legend=legend) + + def show_test_results(self, iteration, test): + res = {k: v["correct"] / v["total"] for k, v in test.items()} + + t = "" + + all_keys = list(res.keys()) + + num_keys = [k for k in all_keys if k.startswith("qa")] + tmp = [ + i[0] + for i in sorted( + enumerate(num_keys), key=lambda x: int(x[1][2:].split("_")[0]) + ) + ] + num_keys = [num_keys[j] for j in tmp] + + all_keys = num_keys + sorted([k for k in all_keys if not k.startswith("qa")]) + + err_precent = [(1.0 - res[k]) * 100.0 for k in all_keys] + + n_passed = sum([int(p <= 5) for p in err_precent]) + n_total = len(err_precent) + err_precent = err_precent + [sum(err_precent) / len(err_precent)] + all_keys += ["mean"] + + for i, k in enumerate(all_keys): + t += '%s: %.2f%%
' % ( + "green" if err_precent[i] <= 5 else "red", + k, + err_precent[i], + ) + + t += "
Total: %d of %d passed." % (n_passed, n_total) + + self._ensure_test_wins_exists( + legend=[i.split("_")[0] if i.startswith("qa") else i for i in all_keys] + ) + + self._test_res_win.set(t) + self._test_plot_win.add_point(iteration, err_precent) + + def state_dict(self): + if self._test_res_win is not None: + return { + "_test_res_win": self._test_res_win.state_dict(), + "_test_plot_win": self._test_plot_win.state_dict(), + } + else: + return {} + + def load_state_dict(self, state): + if state: + self._ensure_test_wins_exists() + self._test_res_win.load_state_dict(state["_test_res_win"]) + self._test_plot_win.load_state_dict(state["_test_plot_win"]) + self._test_plot_win.legend = None + + def visualize_preview(self, data, net_output): + res = self.generate_preview_text(data, net_output) + res = ("%s
" % data["meta"][0]["task"]) + res + if self._preview is None: + self._preview = Visdom.Text("Preview") + + self._preview.set(res) diff --git a/Dataset/__init__.py b/Dataset/__init__.py new file mode 100644 index 0000000..8b13789 --- /dev/null +++ b/Dataset/__init__.py @@ -0,0 +1 @@ + diff --git a/Models/.DS_Store b/Models/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..e98ece8ad9660db3ba541c5e16b02d198063c874 GIT binary patch literal 6148 zcmeHK%}N6?5T3NvZYe?!3VI88E!Zsz6)#JzFW`zERMy?L*u`~I`r{&{uy=hSU&QBe zCP|f2J$Ml*GcfrklbH?qvSbnf5bbHt1*idlgGyMaVDpX8IO&QMtcOtOZ*(DqE)1X# z!Adkc{v!jlcU9c20EY04d-t_}L6nV$QKBM@c#DQfJf1Wf@1jsFZETjEvQu?#{ejB- zNjRB~+u`V%dKXG1(ZUX+%ebEfwe3@tPQp0tk90!Z>to2xRh;%z)>hNBH`ckn8E`62 zC8+JqW=*$Qmxsr#d0oz2*KO40k=vTjE6&dT!O3~>kUXa9#c(R{_tUavaRIMr{II+y zf0QOFy~l_}_9Bas8DIvOfi-8q9(zu8&DY7xV+NRk-!nkxgG43tEanFF(SZ$JA8EWq zNP;%KB?zTO&th&6M^J=GMKr0xJ~4zzN58ajp2ggtNe7{4#_!mfg?*t2Jv;iP4hP{G z9lz7YCE41#vN)=>67>$1gyM38pDAeQ hQjD=wig!`9pkI=K=vmASq6dX90-6SHn1MfK-~)$;PA~uf literal 0 HcmV?d00001 diff --git a/Models/DNCA.py b/Models/DNCA.py new file mode 100644 index 0000000..6dd7a30 --- /dev/null +++ b/Models/DNCA.py @@ -0,0 +1,965 @@ +# The Initial DNC Copyright 2017 Robert Csordas. All Rights Reserved. +# The modification of the initial DNC implementation by Ari Azarafrooz. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ============================================================================== + + +import torch +import torch.utils.data +import torch.nn.functional as F +import torch.nn.init as init +import functools +import math + + +def oneplus(t): + return F.softplus(t, 1, 20) + 1.0 + + +def get_next_tensor_part(src, dims, prev_pos=0): + if not isinstance(dims, list): + dims = [dims] + n = functools.reduce(lambda x, y: x * y, dims) + data = src.narrow(-1, prev_pos, n) + return ( + data.contiguous().view(list(data.size())[:-1] + dims) + if len(dims) > 1 + else data, + prev_pos + n, + ) + + +def split_tensor(src, shapes): + pos = 0 + res = [] + for s in shapes: + d, pos = get_next_tensor_part(src, s, pos) + res.append(d) + return res + + +def dict_get(dict, name): + return dict.get(name) if dict is not None else None + + +def dict_append(dict, name, val): + if dict is not None: + l = dict.get(name) + if not l: + l = [] + dict[name] = l + l.append(val) + + +def init_debug(debug, initial): + if debug is not None and not debug: + debug.update(initial) + + +def merge_debug_tensors(d, dim): + if d is not None: + for k, v in d.items(): + if isinstance(v, dict): + merge_debug_tensors(v, dim) + elif isinstance(v, list): + d[k] = torch.stack(v, dim) + + +def linear_reset(module, gain=1.0): + assert isinstance(module, torch.nn.Linear) + init.xavier_uniform_(module.weight, gain=gain) + s = module.weight.size(1) + if module.bias is not None: + module.bias.data.zero_() + + +_EPS = 1e-6 + + +class AllocationManager(torch.nn.Module): + def __init__(self): + super(AllocationManager, self).__init__() + self.usages = None + self.zero_usages = None + self.debug_sequ_init = False + self.one = None + + def _init_sequence(self, prev_read_distributions): + # prev_read_distributions size is [batch, n_heads, cell count] + s = prev_read_distributions.size() + if self.zero_usages is None or list(self.zero_usages.size()) != [s[0], s[-1]]: + self.zero_usages = torch.zeros( + s[0], s[-1], device=prev_read_distributions.device + ) + if self.debug_sequ_init: + self.zero_usages += torch.arange(0, s[-1]).unsqueeze(0) * 1e-10 + + self.usages = self.zero_usages + + def _init_consts(self, device): + if self.one is None: + self.one = torch.ones(1, device=device) + + def new_sequence(self): + self.usages = None + + def update_usages( + self, prev_write_distribution, prev_read_distributions, free_gates + ): + # Read distributions shape: [batch, n_heads, cell count] + # Free gates shape: [batch, n_heads] + + self._init_consts(prev_read_distributions.device) + phi = torch.addcmul( + self.one, -1, free_gates.unsqueeze(-1), prev_read_distributions + ).prod(-2) + # Phi is the free tensor, sized [batch, cell count] + + # If memory usage counter if doesn't exists + if self.usages is None: + self._init_sequence(prev_read_distributions) + # in first timestep nothing is written or read yet, so we don't need any further processing + else: + self.usages = ( + torch.addcmul( + self.usages, 1, prev_write_distribution.detach(), (1 - self.usages) + ) + * phi + ) + + return phi + + def forward(self, prev_write_distribution, prev_read_distributions, free_gates): + phi = self.update_usages( + prev_write_distribution, prev_read_distributions, free_gates + ) + sorted_usage, free_list = (self.usages * (1.0 - _EPS) + _EPS).sort(-1) + + u_prod = sorted_usage.cumprod(-1) + one_minus_usage = 1.0 - sorted_usage + sorted_scores = torch.cat( + [one_minus_usage[..., 0:1], one_minus_usage[..., 1:] * u_prod[..., :-1]], + dim=-1, + ) + + return sorted_scores.clone().scatter_(-1, free_list, sorted_scores), phi + + +class ContentAddressGenerator(torch.nn.Module): + def __init__( + self, disable_content_norm=False, mask_min=0.0, disable_key_masking=False + ): + super(ContentAddressGenerator, self).__init__() + self.disable_content_norm = disable_content_norm + self.mask_min = mask_min + self.disable_key_masking = disable_key_masking + + def forward(self, memory, keys, betas, mask=None): + # Memory shape [batch, cell count, word length] + # Key shape [batch, n heads*, word length] + # Betas shape [batch, n heads] + if mask is not None and self.mask_min != 0: + mask = mask * (1.0 - self.mask_min) + self.mask_min + + single_head = keys.dim() == 2 + if single_head: + # Single head + keys = keys.unsqueeze(1) + if mask is not None: + mask = mask.unsqueeze(1) + + memory = memory.unsqueeze(1) + keys = keys.unsqueeze(-2) + + if mask is not None: + mask = mask.unsqueeze(-2) + memory = memory * mask + if not self.disable_key_masking: + keys = keys * mask + + # Shape [batch, n heads, cell count] + norm = keys.norm(dim=-1) + if not self.disable_content_norm: + norm = norm * memory.norm(dim=-1) + + scores = (memory * keys).sum(-1) / (norm + _EPS) + scores *= betas.unsqueeze(-1) + + res = F.softmax(scores, scores.dim() - 1) + return res.squeeze(1) if single_head else res + + +class WriteHead(torch.nn.Module): + @staticmethod + def create_write_archive(write_dist, erase_vector, write_vector, phi): + return dict( + write_dist=write_dist, + erase_vector=erase_vector, + write_vector=write_vector, + phi=phi, + ) + + def __init__( + self, + dealloc_content=True, + disable_content_norm=False, + mask_min=0.0, + disable_key_masking=False, + ): + super(WriteHead, self).__init__() + self.write_content_generator = ContentAddressGenerator( + disable_content_norm, + mask_min=mask_min, + disable_key_masking=disable_key_masking, + ) + self.allocation_manager = AllocationManager() + self.last_write = None + self.dealloc_content = dealloc_content + self.new_sequence() + + def new_sequence(self): + self.last_write = None + self.allocation_manager.new_sequence() + + @staticmethod + def mem_update(memory, write_dist, erase_vector, write_vector, phi): + # In original paper the memory content is NOT deallocated, which makes content based addressing basically + # unusable when multiple similar steps should be done. The reason for this is that the memory contents are + # still there, so the lookup will find them, unless an allocation clears it before the next search, which is + # completely random. So I'm arguing that erase matrix should also take in account the free gates (multiply it + # with phi) + write_dist = write_dist.unsqueeze(-1) + + erase_matrix = 1.0 - write_dist * erase_vector.unsqueeze(-2) + if phi is not None: + erase_matrix = erase_matrix * phi.unsqueeze(-1) + + update_matrix = write_dist * write_vector.unsqueeze(-2) + return memory * erase_matrix + update_matrix + + def forward( + self, + demon_action, + memory, + write_content_key, + write_beta, + erase_vector, + write_vector, + alloc_gate, + write_gate, + free_gates, + prev_read_dist, + write_mask=None, + debug=None, + ): + last_w_dist = ( + self.last_write["write_dist"] if self.last_write is not None else None + ) + + content_dist = self.write_content_generator( + memory, write_content_key, write_beta, mask=write_mask + ) + alloc_dist, phi = self.allocation_manager( + last_w_dist, prev_read_dist, free_gates + ) + + # Shape [batch, cell count] + write_dist = write_gate * ( + alloc_gate * alloc_dist + (1 - alloc_gate) * content_dist + ) + self.last_write = WriteHead.create_write_archive( + write_dist, + erase_vector, + write_vector, + phi if self.dealloc_content else None, + ) + + dict_append(debug, "alloc_dist", alloc_dist) + dict_append(debug, "write_dist", write_dist) + dict_append(debug, "mem_usages", self.allocation_manager.usages) + dict_append(debug, "free_gates", free_gates) + dict_append(debug, "write_betas", write_beta) + dict_append(debug, "write_gate", write_gate) + dict_append(debug, "write_vector", write_vector) + dict_append(debug, "alloc_gate", alloc_gate) + dict_append(debug, "erase_vector", erase_vector) + if write_mask is not None: + dict_append(debug, "write_mask", write_mask) + + return WriteHead.mem_update(memory, **self.last_write) + + +class RawWriteHead(torch.nn.Module): + def __init__( + self, + n_read_heads, + word_length, + use_mask=False, + dealloc_content=True, + disable_content_norm=False, + mask_min=0.0, + disable_key_masking=False, + ): + super(RawWriteHead, self).__init__() + self.write_head = WriteHead( + dealloc_content=dealloc_content, + disable_content_norm=disable_content_norm, + mask_min=mask_min, + disable_key_masking=disable_key_masking, + ) + self.word_length = word_length + self.n_read_heads = n_read_heads + self.use_mask = use_mask + self.input_size = ( + 3 * self.word_length + + self.n_read_heads + + 3 + + (self.word_length if use_mask else 0) + ) + + def new_sequence(self): + self.write_head.new_sequence() + + def get_prev_write(self): + return self.write_head.last_write + + def forward(self, demon_action, memory, nn_output, prev_read_dist, debug): + shapes = ( + [[self.word_length]] * (4 if self.use_mask else 3) + + [[self.n_read_heads]] + + [[1]] * 3 + ) + tensors = split_tensor(nn_output, shapes) + + if self.use_mask: + write_mask = torch.sigmoid(tensors[0]) + tensors = tensors[1:] + else: + write_mask = None + + ( + write_content_key, + erase_vector, + write_vector, + free_gates, + write_beta, + alloc_gate, + write_gate, + ) = tensors + + erase_vector = torch.sigmoid(erase_vector) + free_gates = torch.sigmoid(free_gates) + write_beta = oneplus(write_beta) + alloc_gate = torch.sigmoid(alloc_gate) + write_gate = torch.sigmoid(write_gate) + + return self.write_head( + demon_action, + memory, + write_content_key, + write_beta, + erase_vector, + write_vector, + alloc_gate, + write_gate, + free_gates, + prev_read_dist, + debug=debug, + write_mask=write_mask, + ) + + def get_neural_input_size(self): + return self.input_size + + +class TemporalMemoryLinkage(torch.nn.Module): + def __init__(self): + super(TemporalMemoryLinkage, self).__init__() + self.temp_link_mat = None + self.precedence_weighting = None + self.diag_mask = None + + self.initial_temp_link_mat = None + self.initial_precedence_weighting = None + self.initial_diag_mask = None + self.initial_shape = None + + def new_sequence(self): + self.temp_link_mat = None + self.precedence_weighting = None + self.diag_mask = None + + def _init_link(self, w_dist): + s = list(w_dist.size()) + if self.initial_shape is None or s != self.initial_shape: + self.initial_temp_link_mat = torch.zeros(s[0], s[-1], s[-1]).to( + w_dist.device + ) + self.initial_precedence_weighting = torch.zeros(s[0], s[-1]).to( + w_dist.device + ) + self.initial_diag_mask = ( + 1.0 - torch.eye(s[-1]).unsqueeze(0).to(w_dist) + ).detach() + + self.temp_link_mat = self.initial_temp_link_mat + self.precedence_weighting = self.initial_precedence_weighting + self.diag_mask = self.initial_diag_mask + + def _update_precedence(self, w_dist): + # w_dist shape: [ batch, cell count ] + self.precedence_weighting = ( + 1.0 - w_dist.sum(-1, keepdim=True) + ) * self.precedence_weighting + w_dist + + def _update_links(self, w_dist): + if self.temp_link_mat is None: + self._init_link(w_dist) + + wt_i = w_dist.unsqueeze(-1) + wt_j = w_dist.unsqueeze(-2) + pt_j = self.precedence_weighting.unsqueeze(-2) + + self.temp_link_mat = ( + (1 - wt_i - wt_j) * self.temp_link_mat + wt_i * pt_j + ) * self.diag_mask + + def forward(self, w_dist, prev_r_dists, debug=None): + self._update_links(w_dist) + self._update_precedence(w_dist) + + # prev_r_dists shape: [ batch, n heads, cell count ] + # Emulate matrix-vector multiplication by broadcast and sum. This way we don't need to transpose the matrix + tlm_multi_head = self.temp_link_mat.unsqueeze(1) + + forward_dist = (tlm_multi_head * prev_r_dists.unsqueeze(-2)).sum(-1) + backward_dist = (tlm_multi_head * prev_r_dists.unsqueeze(-1)).sum(-2) + + dict_append(debug, "forward_dists", forward_dist) + dict_append(debug, "backward_dists", backward_dist) + dict_append(debug, "precedence_weights", self.precedence_weighting) + + # output shapes [ batch, n_heads, cell_count ] + return forward_dist, backward_dist + + +class ReadHead(torch.nn.Module): + def __init__( + self, disable_content_norm=False, mask_min=0.0, disable_key_masking=False + ): + super(ReadHead, self).__init__() + self.content_addr_generator = ContentAddressGenerator( + disable_content_norm=disable_content_norm, + mask_min=mask_min, + disable_key_masking=disable_key_masking, + ) + self.read_dist = None + self.read_data = None + self.new_sequence() + + def new_sequence(self): + self.read_dist = None + self.read_data = None + + def forward( + self, + memory, + read_content_keys, + read_betas, + forward_dist, + backward_dist, + gates, + read_mask=None, + debug=None, + ): + content_dist = self.content_addr_generator( + memory, read_content_keys, read_betas, mask=read_mask + ) + + self.read_dist = ( + backward_dist * gates[..., 0:1] + + content_dist * gates[..., 1:2] + + forward_dist * gates[..., 2:] + ) + + # memory shape: [ batch, cell count, word_length ] + # read_dist shape: [ batch, n heads, cell count ] + # result shape: [ batch, n_heads, word_length ] + self.read_data = (memory.unsqueeze(1) * self.read_dist.unsqueeze(-1)).sum(-2) + + dict_append(debug, "content_dist", content_dist) + dict_append(debug, "balance", gates) + dict_append(debug, "read_dist", self.read_dist) + dict_append(debug, "read_content_keys", read_content_keys) + if read_mask is not None: + dict_append(debug, "read_mask", read_mask) + dict_append(debug, "read_betas", read_betas.unsqueeze(-2)) + if read_mask is not None: + dict_append(debug, "read_mask", read_mask) + + return self.read_data + + +class RawReadHead(torch.nn.Module): + def __init__( + self, + n_heads, + word_length, + use_mask=False, + disable_content_norm=False, + mask_min=0.0, + disable_key_masking=False, + ): + super(RawReadHead, self).__init__() + self.read_head = ReadHead( + disable_content_norm=disable_content_norm, + mask_min=mask_min, + disable_key_masking=disable_key_masking, + ) + self.n_heads = n_heads + self.word_length = word_length + self.use_mask = use_mask + self.input_size = self.n_heads * ( + self.word_length * (2 if use_mask else 1) + 3 + 1 + ) + + def get_prev_dist(self, memory): + if self.read_head.read_dist is not None: + return self.read_head.read_dist + else: + m_shape = memory.size() + return torch.zeros(m_shape[0], self.n_heads, m_shape[1]).to(memory) + + def get_prev_data(self, memory): + if self.read_head.read_data is not None: + return self.read_head.read_data + else: + m_shape = memory.size() + return torch.zeros(m_shape[0], self.n_heads, m_shape[-1]).to(memory) + + def new_sequence(self): + self.read_head.new_sequence() + + def forward(self, memory, nn_output, forward_dist, backward_dist, debug): + shapes = [[self.n_heads, self.word_length]] * (2 if self.use_mask else 1) + [ + [self.n_heads], + [self.n_heads, 3], + ] + tensors = split_tensor(nn_output, shapes) + + if self.use_mask: + read_mask = torch.sigmoid(tensors[0]) + tensors = tensors[1:] + else: + read_mask = None + + keys, betas, gates = tensors + + betas = oneplus(betas) + gates = F.softmax(gates, gates.dim() - 1) + + return self.read_head( + memory, + keys, + betas, + forward_dist, + backward_dist, + gates, + debug=debug, + read_mask=read_mask, + ) + + def get_neural_input_size(self): + return self.input_size + + +class DistSharpnessEnhancer(torch.nn.Module): + def __init__(self, n_heads): + super(DistSharpnessEnhancer, self).__init__() + self.n_heads = n_heads if isinstance(n_heads, list) else [n_heads] + self.n_data = sum(self.n_heads) + + def forward(self, nn_input, *dists): + assert len(dists) == len(self.n_heads) + nn_input = oneplus(nn_input[..., : self.n_data]) + factors = split_tensor(nn_input, self.n_heads) + + res = [] + for i, d in enumerate(dists): + s = list(d.size()) + ndim = d.dim() + f = factors[i] + if ndim == 2: + assert self.n_heads[i] == 1 + elif ndim == 3: + f = f.unsqueeze(-1) + else: + assert False + + d += _EPS + d = d / d.max(dim=-1, keepdim=True)[0] + d = d.pow(f) + d = d / d.sum(dim=-1, keepdim=True) + res.append(d) + return res + + def get_neural_input_size(self): + return self.n_data + + +class DNC(torch.nn.Module): + def __init__( + self, + input_size, + output_size, + word_length, + cell_count, + n_read_heads, + controller, + batch_first=False, + clip_controller=20, + bias=True, + mask=False, + dealloc_content=True, + link_sharpness_control=True, + disable_content_norm=False, + mask_min=0.0, + disable_key_masking=False, + ): + super(DNC, self).__init__() + + self.clip_controller = clip_controller + + self.read_head = RawReadHead( + n_read_heads, + word_length, + use_mask=mask, + disable_content_norm=disable_content_norm, + mask_min=mask_min, + disable_key_masking=disable_key_masking, + ) + self.write_head = RawWriteHead( + n_read_heads, + word_length, + use_mask=mask, + dealloc_content=dealloc_content, + disable_content_norm=disable_content_norm, + mask_min=mask_min, + disable_key_masking=disable_key_masking, + ) + self.temporal_link = TemporalMemoryLinkage() + self.sharpness_control = ( + DistSharpnessEnhancer([n_read_heads, n_read_heads]) + if link_sharpness_control + else None + ) + + in_size = input_size + n_read_heads * word_length + control_channels = ( + self.read_head.get_neural_input_size() + + self.write_head.get_neural_input_size() + + ( + self.sharpness_control.get_neural_input_size() + if self.sharpness_control is not None + else 0 + ) + ) + + self.controller = controller + controller.init(in_size) + self.controller_to_controls = torch.nn.Linear( + controller.get_output_size(), control_channels, bias=bias + ) + self.controller_to_out = torch.nn.Linear( + controller.get_output_size(), output_size, bias=bias + ) + self.read_to_out = torch.nn.Linear( + word_length * n_read_heads, output_size, bias=bias + ) + + self.cell_count = cell_count + self.word_length = word_length + + self.memory = None + self.reset_parameters() + + self.batch_first = batch_first + self.zero_mem_tensor = None + + self.mem_state = None + + self.device = ( + torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu") + ) + + def reset_parameters(self): + linear_reset(self.controller_to_controls) + linear_reset(self.controller_to_out) + linear_reset(self.read_to_out) + self.controller.reset_parameters() + + def _step(self, in_data, debug, demon, rollout_storage): + init_debug(debug, {"read_head": {}, "write_head": {}, "temporal_links": {}}) + + # input shape: [ batch, channels ] + batch_size = in_data.size(0) + + # # run the demon if it is used + if demon: + # Running policy_old: + demon_action = demon.select_action( + torch.cat([in_data, self.memory.view(batch_size, -1)], -1), + rollout_storage, + ) + in_data = in_data + demon_action + + demon_action = None + + # run the controller + prev_read_data = self.read_head.get_prev_data(self.memory).view( + [batch_size, -1] + ) + + control_data = self.controller(torch.cat([in_data, prev_read_data], -1)) + + # memory ops + controls = self.controller_to_controls(control_data).contiguous() + controls = ( + controls.clamp(-self.clip_controller, self.clip_controller) + if self.clip_controller is not None + else controls + ) + + shapes = [ + [self.write_head.get_neural_input_size()], + [self.read_head.get_neural_input_size()], + ] + if self.sharpness_control is not None: + shapes.append(self.sharpness_control.get_neural_input_size()) + + tensors = split_tensor(controls, shapes) + + write_head_control, read_head_control = tensors[:2] + tensors = tensors[2:] + + prev_read_dist = self.read_head.get_prev_dist(self.memory) + + self.memory = self.write_head( + demon_action, + self.memory, + write_head_control, + prev_read_dist, + debug=dict_get(debug, "write_head"), + ) + + prev_write = self.write_head.get_prev_write() + forward_dist, backward_dist = self.temporal_link( + prev_write["write_dist"] if prev_write is not None else None, + prev_read_dist, + debug=dict_get(debug, "temporal_links"), + ) + + if self.sharpness_control is not None: + forward_dist, backward_dist = self.sharpness_control( + tensors[0], forward_dist, backward_dist + ) + + read_data = self.read_head( + self.memory, + read_head_control, + forward_dist, + backward_dist, + debug=dict_get(debug, "read_head"), + ) + + # output: + return self.controller_to_out(control_data) + self.read_to_out( + read_data.view(batch_size, -1) + ) + + def _mem_init(self, batch_size, device): + if self.zero_mem_tensor is None or self.zero_mem_tensor.size(0) != batch_size: + self.zero_mem_tensor = torch.zeros( + batch_size, self.cell_count, self.word_length + ).to(device) + + self.memory = self.zero_mem_tensor + + if self.mem_state is None: + self.mem_state = [] + + def forward(self, in_data, debug=None, demon=None, rollout_storage=None): + self.write_head.new_sequence() + self.read_head.new_sequence() + self.temporal_link.new_sequence() + self.controller.new_sequence() + + self._mem_init(in_data.size(0 if self.batch_first else 1), in_data.device) + + out_tsteps = [] + + if self.batch_first: + # input format: batch, time, channels + for t in range(in_data.size(1)): + out_tsteps.append( + self._step(in_data[:, t], debug, demon, rollout_storage) + ) + self.mem_state.append(self.memory.view(in_data.size(0), -1)) + else: + # input format: time, batch, channels + for t in range(in_data.size(0)): + out_tsteps.append(self._step(in_data[t], debug, demon, rollout_storage)) + self.mem_state.append(self.memory.view(-1, in_data.size(0))) + + merge_debug_tensors(debug, dim=1 if self.batch_first else 0) + return torch.stack(out_tsteps, dim=1 if self.batch_first else 0) + + +class LSTMController(torch.nn.Module): + def __init__(self, layer_sizes, out_from_all_layers=True): + super(LSTMController, self).__init__() + self.out_from_all_layers = out_from_all_layers + self.layer_sizes = layer_sizes + self.states = None + self.outputs = None + + def new_sequence(self): + self.states = [None] * len(self.layer_sizes) + self.outputs = [None] * len(self.layer_sizes) + + def reset_parameters(self): + def init_layer(l, index): + size = self.layer_sizes[index] + # Initialize all matrices to sigmoid, just data input to tanh + a = math.sqrt(3.0) * self.stdevs[i] + l.weight.data[0:-size].uniform_(-a, a) + a *= init.calculate_gain("tanh") + l.weight.data[-size:].uniform_(-a, a) + if l.bias is not None: + l.bias.data[self.layer_sizes[i] :].fill_(0) + # init forget gate to large number. + l.bias.data[: self.layer_sizes[i]].fill_(1) + + # xavier init merged input weights + for i in range(len(self.layer_sizes)): + init_layer(self.in_to_all[i], i) + init_layer(self.out_to_all[i], i) + if i > 0: + init_layer(self.prev_to_all[i - 1], i) + + def _add_modules(self, name, m_list): + for i, m in enumerate(m_list): + self.add_module("%s_%d" % (name, i), m) + + def init(self, input_size): + self.layer_sizes = self.layer_sizes + + # Xavier init: input to all gates is layers_sizes[i-1] + layer_sizes[i] + input_size -> layer_size big. + # So use xavier init according to this. + self.input_sizes = [ + (self.layer_sizes[i - 1] if i > 0 else 0) + self.layer_sizes[i] + input_size + for i in range(len(self.layer_sizes)) + ] + self.stdevs = [ + math.sqrt(2.0 / (self.layer_sizes[i] + self.input_sizes[i])) + for i in range(len(self.layer_sizes)) + ] + self.in_to_all = [ + torch.nn.Linear(input_size, 4 * self.layer_sizes[i]) + for i in range(len(self.layer_sizes)) + ] + self.out_to_all = [ + torch.nn.Linear(self.layer_sizes[i], 4 * self.layer_sizes[i], bias=False) + for i in range(len(self.layer_sizes)) + ] + self.prev_to_all = [ + torch.nn.Linear( + self.layer_sizes[i - 1], 4 * self.layer_sizes[i], bias=False + ) + for i in range(1, len(self.layer_sizes)) + ] + + self._add_modules("in_to_all", self.in_to_all) + self._add_modules("out_to_all", self.out_to_all) + self._add_modules("prev_to_all", self.prev_to_all) + + self.reset_parameters() + + def get_output_size(self): + return ( + sum(self.layer_sizes) if self.out_from_all_layers else self.layer_sizes[-1] + ) + + def forward(self, data): + for i, size in enumerate(self.layer_sizes): + d = self.in_to_all[i](data) + if self.outputs[i] is not None: + d += self.out_to_all[i](self.outputs[i]) + if i > 0: + d += self.prev_to_all[i - 1](self.outputs[i - 1]) + + input_data = torch.tanh(d[..., -size:]) + forget_gate, input_gate, output_gate = torch.sigmoid(d[..., :-size]).chunk( + 3, dim=-1 + ) + + state_update = input_gate * input_data + + if self.states[i] is not None: + self.states[i] = self.states[i] * forget_gate + state_update + else: + self.states[i] = state_update + + self.outputs[i] = output_gate * torch.tanh(self.states[i]) + + return ( + torch.cat(self.outputs, -1) + if self.out_from_all_layers + else self.outputs[-1] + ) + + +class FeedforwardController(torch.nn.Module): + def __init__(self, layer_sizes=[]): + super(FeedforwardController, self).__init__() + self.layer_sizes = layer_sizes + + def new_sequence(self): + pass + + def reset_parameters(self): + for module in self.model: + if isinstance(module, torch.nn.Linear): + linear_reset(module, gain=init.calculate_gain("relu")) + + def get_output_size(self): + return self.layer_sizes[-1] + + def init(self, input_size): + self.layer_sizes = self.layer_sizes + + # Xavier init: input to all gates is layers_sizes[i-1] + layer_sizes[i] + input_size -> layer_size big. + # So use xavier init according to this. + self.input_sizes = [input_size] + self.layer_sizes[:-1] + + layers = [] + for i, size in enumerate(self.layer_sizes): + layers.append(torch.nn.Linear(self.input_sizes[i], self.layer_sizes[i])) + layers.append(torch.nn.ReLU()) + self.model = torch.nn.Sequential(*layers) + self.reset_parameters() + + def forward(self, data): + return self.model(data) diff --git a/Models/Demon.py b/Models/Demon.py new file mode 100644 index 0000000..13ded38 --- /dev/null +++ b/Models/Demon.py @@ -0,0 +1,140 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.nn.init as init + +from torch.distributions import Normal + +from collections import namedtuple + +LOG_SIG_MAX = 2 +LOG_SIG_MIN = -20 +EPSILON = 1e-6 + +SavedAction = namedtuple("SavedAction", ["action", "log_prob", "mean"]) + + +def linear_reset(module, gain=1.0): + assert isinstance(module, torch.nn.Linear) + init.xavier_uniform_(module.weight, gain=gain) + s = module.weight.size(1) + if module.bias is not None: + module.bias.data.zero_() + + +class ZNet(nn.Module): + def __init__(self): + super(ZNet, self).__init__() + + def reset_parameters(self): + for module in self.lstm: + if isinstance(module, torch.nn.Linear): + linear_reset(module, gain=init.calculate_gain("relu")) + + for module in self.hidden2z: + if isinstance(module, torch.nn.Linear): + linear_reset(module, gain=init.calculate_gain("relu")) + + def init(self, input_size): + self.lstm = nn.Sequential(nn.LSTM(input_size, 32, batch_first=True)) + self.hidden2z = nn.Sequential(nn.Linear(32, 1)) + self.reset_parameters() + + def forward(self, data): + output, (hn, cn) = self.lstm(data) + zvals = self.hidden2z(output) + return F.softplus(zvals) + + +class FNet(nn.Module): + def __init__(self): + super(FNet, self).__init__() + + def reset_parameters(self): + for module in self.lstm: + if isinstance(module, torch.nn.Linear): + linear_reset(module, gain=init.calculate_gain("relu")) + + for module in self.hidden2z: + if isinstance(module, torch.nn.Linear): + linear_reset(module, gain=init.calculate_gain("relu")) + + def init(self, input_size): + self.lstm = nn.Sequential(nn.LSTM(input_size, 32, batch_first=True)) + self.hidden2z = nn.Sequential(nn.Linear(32, 1)) + self.reset_parameters() + + def forward(self, data): + output, (hn, cn) = self.lstm(data) + output = F.elu(output) + fvals = self.hidden2z(output) + return fvals + + +class Demon(torch.nn.Module): + """ + Demon manipulates the external memory of DNC. + """ + + def __init__(self, layer_sizes=[]): + super(Demon, self).__init__() + self.layer_sizes = layer_sizes + self.action_scale = torch.tensor(1) + self.action_bias = torch.tensor(0.0) + self.saved_actions = [] + + def get_output_size(self): + return self.layer_sizes[-1] + + def reset_parameters(self): + for module in self.model: + if isinstance(module, torch.nn.Linear): + linear_reset(module, gain=init.calculate_gain("relu")) + linear_reset(self.embed_mean, gain=init.calculate_gain("relu")) + linear_reset(self.embed_log_std, gain=init.calculate_gain("relu")) + + def init(self, input_size, output_size): + # Xavier init: input to all gates is layers_sizes[i-1] + layer_sizes[i] + input_size -> layer_size big. + # So use xavier init according to this. + self.input_sizes = [input_size] + self.layer_sizes[:-1] + layers = [] + for i, size in enumerate(self.layer_sizes): + layers.append(nn.Linear(self.input_sizes[i], self.layer_sizes[i])) + layers.append(nn.ReLU()) + + self.model = nn.Sequential(*layers) + self.embed_mean = nn.Linear(self.layer_sizes[-1], output_size) + self.embed_log_std = nn.Linear(self.layer_sizes[-1], output_size) + + self.reset_parameters() + + def forward(self, data): + x = self.model(data) + x = F.relu(x) + mean, log_std = self.embed_mean(x), self.embed_log_std(x) + log_std = torch.clamp(log_std, min=LOG_SIG_MIN, max=LOG_SIG_MAX) + std = torch.exp(log_std) + return mean, std + + def act(self, data): + """ + pathwise derivative estimator for taking actions. + :param data: + :return: + """ + mean, std = self.forward(data) + normal = Normal(mean, std) + x = normal.rsample() + + y = torch.softmax(x, dim=1) + + action = y * self.action_scale + self.action_bias + log_prob = normal.log_prob(action) + # Enforcing Action Bound + log_prob -= torch.log(self.action_scale * (1 - y.pow(2)) + EPSILON) + log_prob = log_prob.sum(1, keepdim=True) + + mean = torch.softmax(mean, dim=1) * self.action_scale + self.action_bias + self.saved_actions.append(SavedAction(action, log_prob, mean)) + + return mean diff --git a/Models/Information_Agents.py b/Models/Information_Agents.py new file mode 100644 index 0000000..2abbbd4 --- /dev/null +++ b/Models/Information_Agents.py @@ -0,0 +1,238 @@ +import torch +import torch.nn as nn +from torch.distributions import MultivariateNormal +import torch.nn.functional as F +import torch.nn.init as init + +import numpy as np + +device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") + + +class RolloutStorage: + def __init__(self): + self.actions = [] + self.states = [] + self.logprobs = [] + self.rewards = [] + self.is_terminals = [] + + def clear_storage(self): + del self.actions[:] + del self.states[:] + del self.logprobs[:] + del self.rewards[:] + del self.is_terminals[:] + + +class ActorCritic(nn.Module): + def __init__(self, state_dim, action_dim, action_std): + super(ActorCritic, self).__init__() + self.actor = nn.Sequential( + nn.Linear(state_dim, 64), + nn.Tanh(), + nn.Linear(64, 32), + nn.Tanh(), + nn.Linear(32, action_dim), + nn.Softmax(dim=1), + ) + + # critic + self.critic = nn.Sequential( + nn.Linear(state_dim, 64), + nn.Tanh(), + nn.Linear(64, 32), + nn.Tanh(), + nn.Linear(32, 1), + ) + self.action_var = torch.full((action_dim,), action_std * action_std).to(device) + + def forward(self): + raise NotImplementedError + + def act(self, state, rollout_storage): + action_mean = self.actor(state) + cov_mat = torch.diag(self.action_var).to(device) + + dist = MultivariateNormal(action_mean, cov_mat) + action = dist.sample() + action_logprob = dist.log_prob(action) + + if rollout_storage: + rollout_storage.states.append(state) + rollout_storage.actions.append(action) + rollout_storage.logprobs.append(action_logprob) + + return action.detach() + + def evaluate(self, state, action): + action_mean = self.actor(state) + + action_var = self.action_var.expand_as(action_mean) + cov_mat = torch.diag_embed(action_var).to(device) + + dist = MultivariateNormal(action_mean, cov_mat) + + action_logprobs = dist.log_prob(action) + dist_entropy = dist.entropy() + state_value = self.critic(state) + + return action_logprobs, torch.squeeze(state_value), dist_entropy + + +class Demon: + def __init__( + self, state_dim, action_dim, action_std, lr, betas, gamma, K_epochs, eps_clip + ): + self.lr = lr + self.betas = betas + self.gamma = gamma + self.eps_clip = eps_clip + self.K_epochs = K_epochs + + self.policy = ActorCritic(state_dim, action_dim, action_std).to(device) + self.optimizer = torch.optim.Adam(self.policy.parameters(), lr=lr, betas=betas) + + self.policy_old = ActorCritic(state_dim, action_dim, action_std).to(device) + self.policy_old.load_state_dict(self.policy.state_dict()) + + self.MseLoss = nn.MSELoss() + + def select_action(self, state, rollout_storage): + return self.policy_old.act(state, rollout_storage) + + def update(self, rollout_storage): + # Monte Carlo estimate of rewards: + rewards = [] + discounted_reward = 0 + for reward in reversed(rollout_storage.rewards): + discounted_reward = reward + (self.gamma * discounted_reward) + rewards.insert(0, discounted_reward) + + # Normalizing the rewards: + rewards = torch.stack(rewards) + rewards = (rewards - rewards.mean()) / (rewards.std() + 1e-5) + rewards = rewards.squeeze(-1) + + # convert list to tensor + old_states = torch.squeeze( + torch.stack(rollout_storage.states).to(device), 1 + ).detach() + old_actions = torch.squeeze( + torch.stack(rollout_storage.actions).to(device), 1 + ).detach() + old_logprobs = ( + torch.squeeze(torch.stack(rollout_storage.logprobs), 1).to(device).detach() + ) + + # Optimize policy for K epochs: + for _ in range(self.K_epochs): + # Evaluating old actions and values : + logprobs, state_values, dist_entropy = self.policy.evaluate( + old_states, old_actions + ) + + # Finding the ratio (pi_theta / pi_theta__old): + ratios = torch.exp(logprobs - old_logprobs.detach()) + + try: + state_values = state_values[ + :-1, : + ] # reward is computed as the mutual info between consequenct mem state, + # therefore n-1 values only. + ratios = ratios[:-1, :] # the same for ratio + dist_entropy = dist_entropy[:-1, :] # the same for entropy + advantages = rewards - state_values.detach() + surr1 = ratios * advantages + surr2 = ( + torch.clamp(ratios, 1 - self.eps_clip, 1 + self.eps_clip) + * advantages + ) + # Finding Surrogate Loss: + loss = ( + -torch.min(surr1, surr2) + + 0.5 * self.MseLoss(state_values, rewards) + - 0.01 * dist_entropy + ) + # take gradient step + self.optimizer.zero_grad() + loss.mean().backward() + torch.nn.utils.clip_grad_norm_(self.policy.parameters(), 0.5) + self.optimizer.step() + except Exception: + # Do thing for the sequences of lentgh 1. + loss = torch.zeros_like(rewards).to(device) + continue + + # Copy new weights into old policy: + self.policy_old.load_state_dict(self.policy.state_dict()) + return loss + + +############################################ +# Mutual information Estimator Network###### +############################################ + + +def linear_reset(module, gain=1.0): + assert isinstance(module, torch.nn.Linear) + init.xavier_uniform_(module.weight, gain=gain) + s = module.weight.size(1) + if module.bias is not None: + module.bias.data.zero_() + + +class FNet(nn.Module): + """ + Monte-Carlo estimators for Mutual Information Known as MINE. + Mine produces estimates that are neither an upper or lower bound on MI. + Other ZNet can be Introduced to address the problem of building bounds with finite samples (unlike Monte Carlo) + """ + + def __init__(self): + super(FNet, self).__init__() + + def reset_parameters(self): + for module in self.lstm: + if isinstance(module, torch.nn.Linear): + linear_reset(module, gain=init.calculate_gain("relu")) + + for module in self.hidden2f: + if isinstance(module, torch.nn.Linear): + linear_reset(module, gain=init.calculate_gain("relu")) + + def init(self, input_size): + self.lstm = nn.Sequential(nn.LSTM(input_size, 32, batch_first=True)) + self.hidden2f = nn.Sequential(nn.Linear(32, 1)) + self.reset_parameters() + + def forward(self, data): + output, (hn, cn) = self.lstm(data) + output = F.elu(output) + fvals = self.hidden2f(output) + return fvals + + +class ZNet(nn.Module): + def __init__(self): + super(ZNet, self).__init__() + + def reset_parameters(self): + for module in self.lstm: + if isinstance(module, torch.nn.Linear): + linear_reset(module, gain=init.calculate_gain("relu")) + + for module in self.hidden2z: + if isinstance(module, torch.nn.Linear): + linear_reset(module, gain=init.calculate_gain("relu")) + + def init(self, input_size): + self.lstm = nn.Sequential(nn.LSTM(input_size, 32, batch_first=True)) + self.hidden2z = nn.Sequential(nn.Linear(32, 1)) + self.reset_parameters() + + def forward(self, data): + output, (hn, cn) = self.lstm(data) + output = F.elu(output) + zvals = self.hidden2z(output) + return F.softplus(zvals) diff --git a/Utils/.DS_Store b/Utils/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..5008ddfcf53c02e82d7eee2e57c38e5672ef89f6 GIT binary patch literal 6148 zcmeH~Jr2S!425mzP>H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0max: + print(t) + assert False + + +def assert_dist(t, use_lower_limit=True): + if not enableDebug: + return + + assert_range(t) + + if t.sum(-1).max().cpu().data.numpy()>1.001: + print("MAT:", t) + print("SUM:", t.sum(-1)) + assert False + + if use_lower_limit and t.sum(-1).max().cpu().data.numpy()<0.999: + print(t) + print("SUM:", t.sum(-1)) + assert False + + +def print_stat(name, t): + if not enableDebug: + return + + min = t.min().cpu().data.numpy() + max = t.max().cpu().data.numpy() + mean = t.mean().cpu().data.numpy() + + print("%s: min: %g, mean: %g, max: %g" % (name, min, mean, max)) + + +def dbg_print(*things): + if not enableDebug: + return + print(*things) + +class GradPrinter(torch.autograd.Function): + @staticmethod + def forward(ctx, a): + return a + + @staticmethod + def backward(ctx, g): + print("Grad (print_grad): ", g[0]) + return g + +def print_grad(t): + return GradPrinter.apply(t) + +def assert_equal(t1, ref, limit=1e-5, force=True): + if not (enableDebug or force): + return + + assert t1.shape==ref.shape, "Tensor shapes differ: got %s, ref %s" % (t1.shape, ref.shape) + norm = ref.abs().sum() / ref.nonzero().sum().float() + threshold = norm * limit + + errcnt = ((t1 - ref).abs() > threshold).sum() + if errcnt > 0: + print("Tensors differ. (max difference: %g, norm %f). No of errors: %d of %d" % + ((t1 - ref).abs().max().item(), norm, errcnt, t1.numel())) + print("---------------------------------------------Out-----------------------------------------------") + print(t1) + print("---------------------------------------------Ref-----------------------------------------------") + print(ref) + print("-----------------------------------------------------------------------------------------------") + assert False \ No newline at end of file diff --git a/Utils/Helpers.py b/Utils/Helpers.py new file mode 100644 index 0000000..4868150 --- /dev/null +++ b/Utils/Helpers.py @@ -0,0 +1,24 @@ +# Copyright 2017 Robert Csordas. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ============================================================================== + +import torch +import torch.autograd + +def as_numpy(data): + if isinstance(data, (torch.Tensor, torch.autograd.Variable)): + return data.detach().cpu().numpy() + else: + return data diff --git a/Utils/Index.py b/Utils/Index.py new file mode 100644 index 0000000..3813caf --- /dev/null +++ b/Utils/Index.py @@ -0,0 +1,20 @@ +# Copyright 2017 Robert Csordas. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ============================================================================== + +def index_by_dim(arr, dim, i_start, i_end=None): + if dim<0: + dim += arr.ndim + return arr[tuple([slice(None,None)] * dim + [slice(i_start, i_end) if i_end is not None else i_start])] \ No newline at end of file diff --git a/Utils/Process.py b/Utils/Process.py new file mode 100644 index 0000000..12fe617 --- /dev/null +++ b/Utils/Process.py @@ -0,0 +1,51 @@ +# Copyright 2017 Robert Csordas. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ============================================================================== + +import sys +import ctypes +import subprocess +import os + +def run(cmd, hide_stderr = True): + libc_search_dirs = ["/lib", "/lib/x86_64-linux-gnu", "/lib/powerpc64le-linux-gnu"] + + if sys.platform=="linux" : + found = None + for d in libc_search_dirs: + file = os.path.join(d, "libc.so.6") + if os.path.isfile(file): + found = file + break + + if not found: + print("WARNING: Cannot find libc.so.6. Cannot kill process when parent dies.") + killer = None + else: + libc = ctypes.CDLL(found) + PR_SET_PDEATHSIG = 1 + KILL = 9 + killer = lambda: libc.prctl(PR_SET_PDEATHSIG, KILL) + else: + print("WARNING: OS not linux. Cannot kill process when parent dies.") + killer = None + + + if hide_stderr: + stderr = open(os.devnull,'w') + else: + stderr = None + + return subprocess.Popen(cmd.split(" "), stderr=stderr, preexec_fn=killer) diff --git a/Utils/Profile.py b/Utils/Profile.py new file mode 100644 index 0000000..60eb546 --- /dev/null +++ b/Utils/Profile.py @@ -0,0 +1,48 @@ +# Copyright 2017 Robert Csordas. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ============================================================================== + +import atexit + +ENABLED=False + +_profiler = None + + +def construct(): + global _profiler + if not ENABLED: + return + + if _profiler is None: + from line_profiler import LineProfiler + _profiler = LineProfiler() + + +def do_profile(follow=[]): + construct() + def inner(func): + if _profiler is not None: + _profiler.add_function(func) + for f in follow: + _profiler.add_function(f) + _profiler.enable_by_count() + return func + return inner + +@atexit.register +def print_prof(): + if _profiler is not None: + _profiler.print_stats() diff --git a/Utils/Saver.py b/Utils/Saver.py new file mode 100644 index 0000000..99f6984 --- /dev/null +++ b/Utils/Saver.py @@ -0,0 +1,233 @@ +# Copyright 2017 Robert Csordas. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ============================================================================== + +import torch +import os +import inspect +import time + + +class SaverElement: + def save(self): + raise NotImplementedError + + def load(self, saved_state): + raise NotImplementedError + + +class CallbackSaver(SaverElement): + def __init__(self, save_fn, load_fn): + super().__init__() + self.save = save_fn + self.load = load_fn + + +class StateSaver(SaverElement): + def __init__(self, model): + super().__init__() + self._model = model + + def load(self, state): + try: + self._model.load_state_dict(state) + except Exception as e: + if hasattr(self._model, "named_parameters"): + names = set([n for n, _ in self._model.named_parameters()]) + loaded = set(self._model.keys()) + if names!=loaded: + d = loaded.difference(names) + if d: + print("Loaded, but not in model: %s" % list(d)) + d = names.difference(loaded) + if d: + print("In model, but not loaded: %s" % list(d)) + if isinstance(self._model, torch.optim.Optimizer): + print("WARNING: optimizer parameters not loaded!") + else: + raise e + + def save(self): + return self._model.state_dict() + + +class GlobalVarSaver(SaverElement): + def __init__(self, name): + caller_frame = inspect.getouterframes(inspect.currentframe())[1] + self._vars = caller_frame.frame.f_globals + self._name = name + + def load(self, state): + self._vars.update({self._name: state}) + + def save(self): + return self._vars[self._name] + + +class PyObjectSaver(SaverElement): + def __init__(self, obj): + self._obj = obj + + def load(self, state): + def _load(target, state): + if isinstance(target, dict): + for k, v in state.items(): + target[k] = _load(target.get(k), v) + elif isinstance(target, list): + if len(target)!=len(state): + target.clear() + for v in state: + target.append(v) + else: + for i, v in enumerate(state): + target[i] = _load(target[i], v) + + elif hasattr(target, "__dict__"): + _load(target.__dict__, state) + else: + return state + return target + + _load(self._obj, state) + + def save(self): + def _save(target): + if isinstance(target, dict): + res = {k: _save(v) for k, v in target.items()} + elif isinstance(target, list): + res = [_save(v) for v in target] + elif hasattr(target, "__dict__"): + res = {k: _save(v) for k, v in target.__dict__.items()} + else: + res = target + + return res + + return _save(self._obj) + + @staticmethod + def obj_supported(obj): + return isinstance(obj, (list, dict)) or hasattr(obj, "__dict__") + + +class Saver: + def __init__(self, dir, short_interval, keep_every_n_hours=4): + self.savers = {} + self.short_interval = short_interval + os.makedirs(dir, exist_ok=True) + self.dir = dir + self._keep_every_n_seconds = keep_every_n_hours * 3600 + + def register(self, name, saver): + assert name not in self.savers, "Saver %s already registered" % name + + if isinstance(saver, SaverElement): + self.savers[name] = saver + elif hasattr(saver, "state_dict") and callable(saver.state_dict): + self.savers[name] = StateSaver(saver) + elif PyObjectSaver.obj_supported(saver): + self.savers[name] = PyObjectSaver(saver) + else: + assert "Unsupported thing to save: %s" % type(saver) + + def __setitem__(self, key, value): + self.register(key, value) + + def write(self, iter): + fname = os.path.join(self.dir, self.model_name_from_index(iter)) + print("Saving %s" % fname) + + state = {} + for name, fns in self.savers.items(): + state[name] = fns.save() + + torch.save(state, fname) + print("Saved.") + + self._cleanup() + + def tick(self, iter): + if iter % self.short_interval != 0: + return + + self.write(iter) + + @staticmethod + def model_name_from_index(index): + return "model-%d.pth" % index + + @staticmethod + def get_checkpoint_index_list(dir): + return list(reversed(sorted( + [int(fn.split(".")[0].split("-")[-1]) for fn in os.listdir(dir) if fn.split(".")[-1] == "pth"]))) + + @staticmethod + def get_ckpts_in_time_window(dir, time_window_s, index_list=None): + if index_list is None: + index_list = Saver.get_checkpoint_index_list(dir) + + + now = time.time() + + res = [] + for i in index_list: + name = Saver.model_name_from_index(i) + mtime = os.path.getmtime(os.path.join(dir, name)) + if now - mtime > time_window_s: + break + + res.append(name) + + return res + + @staticmethod + def load_last_checkpoint(dir): + last_checkpoint = Saver.get_checkpoint_index_list(dir) + + if last_checkpoint: + for index in last_checkpoint: + fname = Saver.model_name_from_index(index) + try: + print("Loading %s" % fname) + data = torch.load(os.path.join(dir, fname)) + except: + print("WARNING: Loading %s failed. Maybe file is corrupted?" % fname) + continue + return data + return None + + def _cleanup(self): + index_list = self.get_checkpoint_index_list(self.dir) + new_files = self.get_ckpts_in_time_window(self.dir, self._keep_every_n_seconds, index_list[2:]) + new_files = new_files[:-1] + + for f in new_files: + os.remove(os.path.join(self.dir, f)) + + def load(self, fname=None): + if fname is None: + state = self.load_last_checkpoint(self.dir) + if not state: + return False + else: + state = torch.load(fname) + + for k,s in state.items(): + if k not in self.savers: + print("WARNING: failed to load state of %s. It doesn't exists." % k) + continue + self.savers[k].load(s) + + return True diff --git a/Utils/Seed.py b/Utils/Seed.py new file mode 100644 index 0000000..ee32095 --- /dev/null +++ b/Utils/Seed.py @@ -0,0 +1,31 @@ +# Copyright 2017 Robert Csordas. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ============================================================================== + +import numpy as np + +seed = None + + +def fix(): + global seed + seed = 0xB0C1FA52 + + +def get_randstate(): + if seed: + return np.random.RandomState(seed) + else: + return np.random.RandomState() diff --git a/Utils/Visdom.py b/Utils/Visdom.py new file mode 100644 index 0000000..ec69fb4 --- /dev/null +++ b/Utils/Visdom.py @@ -0,0 +1,323 @@ +# Copyright 2017 Robert Csordas. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ============================================================================== + +import time +import visdom +import sys +import numpy as np +import os +import socket +from . import Process + +vis = None +port = None +visdom_fail_count = 0 + + +def port_used(port): + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + result = sock.connect_ex(('127.0.0.1', port)) + if result == 0: + sock.close() + return True + else: + return False + + +def alloc_port(start_from=7000): + while True: + if port_used(start_from): + print("Port already used: %d" % start_from) + start_from += 1 + else: + return start_from + + +def wait_for_port(port, timeout=5): + star_time = time.time() + while not port_used(port): + if time.time() - star_time > timeout: + return False + + time.sleep(0.1) + return True + + +def start(on_port=None): + global vis + global port + global visdom_fail_count + assert vis is None, "Cannot start more than 1 visdom servers." + + if visdom_fail_count>=3: + return + + port = alloc_port() if on_port is None else on_port + + print("Starting Visdom server on %d" % port) + Process.run("%s -m visdom.server -p %d" % (sys.executable, port)) + if not wait_for_port(port): + print("ERROR: failed to start Visdom server. Server not responding.") + visdom_fail_count += 1 + return + print("Done.") + + vis = visdom.Visdom(port=port) + + +def _start_if_not_running(): + if vis is None: + start() + + +def save_heatmap(dir, title, img): + if dir is not None: + fname = os.path.join(dir, title.replace(" ", "_") + ".npy") + d = os.path.dirname(fname) + os.makedirs(d, exist_ok=True) + np.save(fname, img) + + +class Plot2D: + TO_SAVE = ["x", "y", "curr_accu", "curr_cnt", "legend"] + + def __init__(self, name, store_interval=1, legend=None, xlabel=None, ylabel=None): + _start_if_not_running() + + self.x = [] + self.y = [] + self.store_interval = store_interval + self.curr_accu = None + self.curr_cnt = 0 + self.name = name + self.legend = legend + self.xlabel = xlabel + self.ylabel = ylabel + + self.replot = False + self.visplot = None + self.last_vis_update_pos = 0 + + def set_legend(self, legend): + if self.legend != legend: + self.legend = legend + self.replot = True + + def _send_update(self): + if not self.x or vis is None: + return + + if self.visplot is None or self.replot: + opts = { + "title": self.name + } + if self.xlabel: + opts["xlabel"] = self.xlabel + if self.ylabel: + opts["ylabel"] = self.ylabel + + if self.legend is not None: + opts["legend"] = self.legend + + self.visplot = vis.line(X=np.asfarray(self.x), Y=np.asfarray(self.y), opts=opts, win=self.visplot) + self.replot = False + else: + vis.line( + X=np.asfarray(self.x[self.last_vis_update_pos:]), + Y=np.asfarray(self.y[self.last_vis_update_pos:]), + win=self.visplot, + update='append' + ) + + self.last_vis_update_pos = len(self.x) - 1 + + def add_point(self, x, y): + if not isinstance(y, list): + y = [y] + + + + if self.curr_accu is None: + self.curr_accu = [0.0] * len(y) + + if len(self.curr_accu) < len(y): + # The number of curves increased. + need_to_add = (len(y) - len(self.curr_accu)) + + self.curr_accu += [0.0] * need_to_add + count = len(self.x) + if count>0: + self.replot = True + if not isinstance(self.x[0], list): + self.x = [[x] for x in self.x] + self.y = [[y] for y in self.y] + + nan = float("nan") + for a in self.x: + a += [nan] * need_to_add + for a in self.y: + a += [nan] * need_to_add + elif len(self.curr_accu) > len(y): + y = y[:] + [float("nan")] * (len(self.curr_accu) - len(y)) + + self.curr_accu = [self.curr_accu[i] + y[i] for i in range(len(y))] + self.curr_cnt += 1 + if self.curr_cnt == self.store_interval: + if len(y) > 1: + self.x.append([x] * len(y)) + self.y.append([a / self.curr_cnt for a in self.curr_accu]) + else: + self.x.append(x) + self.y.append(self.curr_accu[0] / self.curr_cnt) + + self.curr_accu = [0.0] * len(y) + self.curr_cnt = 0 + + self._send_update() + + def state_dict(self): + s = {k: self.__dict__[k] for k in self.TO_SAVE} + return s + + def load_state_dict(self, state): + if self.legend is not None: + # Load legend only if not given in the constructor. + state["legend"] = self.legend + self.__dict__.update(state) + self.last_vis_update_pos = 0 + + # Read old format + if not isinstance(self.curr_accu, list) and self.curr_accu is not None: + self.curr_accu = [self.curr_accu] + + self._send_update() + + +class Image: + def __init__(self, title, dumpdir=None): + _start_if_not_running() + + self.win = None + self.opts = dict(title=title) + self.dumpdir = dumpdir + + def set_dump_dir(self, dumpdir): + self.dumpdir = dumpdir + + def draw(self, img): + if vis is None: + return + + if isinstance(img, list): + if self.win is None: + self.win = vis.images(img, opts=self.opts) + else: + vis.images(img, win=self.win, opts=self.opts) + else: + if len(img.shape)==2: + img = np.expand_dims(img, 0) + elif img.shape[-1] in [1,3] and img.shape[0] not in [1,3]: + # cv2 image + img = img.transpose(2,0,1) + img = img[::-1] + + if img.dtype==np.uint8: + img = img.astype(np.float32)/255 + + self.opts["width"] = img.shape[2] + self.opts["height"] = img.shape[1] + + save_heatmap(self.dumpdir, self.opts["title"], img) + if self.win is None: + self.win = vis.image(img, opts=self.opts) + else: + vis.image(img, win=self.win, opts=self.opts) + + def __call__(self, img): + self.draw(img) + + +class Text: + def __init__(self, title): + _start_if_not_running() + + self.win = None + self.title = title + self.curr_text = "" + + def set(self, text): + self.curr_text = text + + if vis is None: + return + + if self.win is None: + self.win = vis.text(text, opts=dict( + title=self.title + )) + else: + vis.text(text, win=self.win) + + def state_dict(self): + return {"text": self.curr_text} + + def load_state_dict(self, state): + self.set(state["text"]) + + def __call__(self, text): + self.set(text) + + +class Heatmap: + def __init__(self, title, min=None, max=None, xlabel=None, ylabel=None, colormap='Viridis', dumpdir=None): + _start_if_not_running() + + self.win = None + self.opt = dict(title=title, colormap=colormap) + self.dumpdir = dumpdir + if min is not None: + self.opt["xmin"] = min + if max is not None: + self.opt["xmax"] = max + + if xlabel: + self.opt["xlabel"] = xlabel + if ylabel: + self.opt["ylabel"] = ylabel + + def set_dump_dir(self, dumpdir): + self.dumpdir = dumpdir + + def draw(self, img): + if vis is None: + return + + o = self.opt.copy() + if "xmin" not in o: + o["xmin"] = float(img.min()) + + if "xmax" not in o: + o["xmax"] = float(img.max()) + + save_heatmap(self.dumpdir, o["title"], img) + + if self.win is None: + self.win = vis.heatmap(img, opts=o) + else: + vis.heatmap(img, win=self.win, opts=o) + + def __call__(self, img): + self.draw(img) diff --git a/Utils/download.py b/Utils/download.py new file mode 100644 index 0000000..e78b5e1 --- /dev/null +++ b/Utils/download.py @@ -0,0 +1,189 @@ +# Copyright 2017 Robert Csordas. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ============================================================================== + +import requests, tarfile, io, os, zipfile + +from io import BytesIO, SEEK_SET, SEEK_END + + +class UrlStream: + def __init__(self, url): + self._url = url + headers = requests.head(url).headers + headers = {k.lower(): v for k,v in headers.items()} + self._seek_supported = headers.get('accept-ranges')=='bytes' and 'content-length' in headers + if self._seek_supported: + self._size = int(headers['content-length']) + self._curr_pos = 0 + self._buf_start_pos = 0 + self._iter = None + self._buffer = None + self._buf_size = 0 + self._loaded_all = False + + def seekable(self): + return self._seek_supported + + def _load_all(self): + if self._loaded_all: + return + self._make_request() + old_buf_pos = self._buffer.tell() + self._buffer.seek(0, SEEK_END) + for chunk in self._iter: + self._buffer.write(chunk) + self._buf_size = self._buffer.tell() + self._buffer.seek(old_buf_pos, SEEK_SET) + self._loaded_all = True + + def seek(self, position, whence=SEEK_SET): + if whence == SEEK_END: + assert position<=0 + if self._seek_supported: + self.seek(self._size + position) + else: + self._load_all() + self._buffer.seek(position, SEEK_END) + self._curr_pos = self._buffer.tell() + elif whence==SEEK_SET: + if self._curr_pos != position: + self._curr_pos = position + if self._seek_supported: + self._iter = None + self._buffer = None + else: + self._load_until(position) + self._buffer.seek(position) + self._curr_pos = position + else: + assert "Invalid whence %s" % whence + + return self.tell() + + def tell(self): + return self._curr_pos + + def _load_until(self, goal_position): + self._make_request() + old_buf_pos = self._buffer.tell() + current_position = self._buffer.seek(0, SEEK_END) + + goal_position = goal_position - self._buf_start_pos + while current_position < goal_position: + try: + d = next(self._iter) + self._buffer.write(d) + current_position += len(d) + except StopIteration: + break + self._buf_size = current_position + self._buffer.seek(old_buf_pos, SEEK_SET) + + def _new_buffer(self): + remaining = self._buffer.read() if self._buffer is not None else None + self._buffer = BytesIO() + if remaining is not None: + self._buffer.write(remaining) + self._buf_start_pos = self._curr_pos + self._buf_size = 0 if remaining is None else len(remaining) + self._buffer.seek(0, SEEK_SET) + self._loaded_all = False + + def _make_request(self): + if self._iter is None: + h = { + "User-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2526.80 Safari/537.36", + } + if self._seek_supported: + h["Range"] = "bytes=%d-%d" % (self._curr_pos, self._size - 1) + + r = requests.get(self._url, headers=h, stream=True) + + self._iter = r.iter_content(1024 * 1024) + self._new_buffer() + elif self._seek_supported and self._buf_size > 128 * 1024 * 1024: + self._new_buffer() + + def size(self): + if self._seek_supported: + return self._size + else: + self._load_all() + return self._buf_size + + def read(self, size=None): + if size is None: + size = self.size() + + self._load_until(self._curr_pos + size) + if self._seek_supported: + self._curr_pos = min(self._curr_pos+size, self._size) + + return self._buffer.read(size) + + def iter_content(self, block_size): + while True: + d = self.read(block_size) + if not len(d): + break + yield d + + +def download(url, dest=None, extract=True, ignore_if_exists=False): + """ + Download a file from the internet. + + Args: + url: the url to download + dest: destination file if extract=False, or destionation dir if extract=True. If None, it will be the last part of URL. + extract: extract a tar.gz or zip file? + ignore_if_exists: don't do anything if file exists + + Returns: + the destination filename. + """ + + base_url = url.split("?")[0] + + if dest is None: + dest = [f for f in base_url.split("/") if f][-1] + + if os.path.exists(dest) and ignore_if_exists: + return dest + + stream = UrlStream(url) + extension = base_url.split(".")[-1].lower() + + if extract and extension in ['gz', 'bz2', 'zip']: + os.makedirs(dest, exist_ok=True) + + if extension in ['gz', 'bz2']: + decompressed_file = tarfile.open(fileobj=stream, mode='r|'+extension) + elif extension=='zip': + decompressed_file = zipfile.ZipFile(stream, mode='r') + else: + assert False, "Invalid extension: %s" % extension + + decompressed_file.extractall(dest) + else: + try: + with open(dest, 'wb') as f: + for d in stream.iter_content(1024*1024): + f.write(d) + except: + os.remove(dest) + raise + return dest diff --git a/Utils/gpu_allocator.py b/Utils/gpu_allocator.py new file mode 100644 index 0000000..be88455 --- /dev/null +++ b/Utils/gpu_allocator.py @@ -0,0 +1,111 @@ +# Copyright 2017 Robert Csordas. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ============================================================================== + +import subprocess +import os +import torch +from Utils.lockfile import LockFile + +def get_memory_usage(): + try: + proc = subprocess.Popen("nvidia-smi --query-gpu=index,memory.used --format=csv,noheader,nounits".split(" "), + stdout=subprocess.PIPE) + lines = [s.strip().split(" ") for s in proc.communicate()[0].decode().split("\n") if s] + return {int(g[0][:-1]): int(g[1]) for g in lines} + except: + return None + + +def get_free_gpus(): + try: + free = [] + proc = subprocess.Popen("nvidia-smi --query-compute-apps=gpu_uuid --format=csv,noheader,nounits".split(" "), + stdout=subprocess.PIPE) + uuids = [s.strip() for s in proc.communicate()[0].decode().split("\n") if s] + + proc = subprocess.Popen("nvidia-smi --query-gpu=index,uuid --format=csv,noheader,nounits".split(" "), + stdout=subprocess.PIPE) + + id_uid_pair = [s.strip().split(", ") for s in proc.communicate()[0].decode().split("\n") if s] + for i in id_uid_pair: + id, uid = i + + if uid not in uuids: + free.append(int(id)) + + return free + except: + return None + +def _fix_order(): + os.environ["CUDA_DEVICE_ORDER"] = os.environ.get("CUDA_DEVICE_ORDER", "PCI_BUS_ID") + +def allocate(n:int = 1): + _fix_order() + with LockFile("/tmp/gpu_allocation_lock"): + if "CUDA_VISIBLE_DEVICES" in os.environ: + print("WARNING: trying to allocate %d GPUs, but CUDA_VISIBLE_DEVICES already set to %s" % + (n, os.environ["CUDA_VISIBLE_DEVICES"])) + return + + allocated = get_free_gpus() + if allocated is None: + print("WARNING: failed to allocate %d GPUs" % n) + return + allocated = allocated[:n] + + if len(allocated) < n: + print("There is no more free GPUs. Allocating the one with least memory usage.") + usage = get_memory_usage() + if usage is None: + print("WARNING: failed to allocate %d GPUs" % n) + return + + inv_usages = {} + + for k, v in usage.items(): + if v not in inv_usages: + inv_usages[v] = [] + + inv_usages[v].append(k) + + min_usage = list(sorted(inv_usages.keys())) + min_usage_devs = [] + for u in min_usage: + min_usage_devs += inv_usages[u] + + min_usage_devs = [m for m in min_usage_devs if m not in allocated] + + n2 = n - len(allocated) + if n2>len(min_usage_devs): + print("WARNING: trying to allocate %d GPUs but only %d available" % (n, len(min_usage_devs)+len(allocated))) + n2 = len(min_usage_devs) + + allocated += min_usage_devs[:n2] + + os.environ["CUDA_VISIBLE_DEVICES"]=",".join([str(a) for a in allocated]) + for i in range(len(allocated)): + a = torch.FloatTensor([0.0]) + a.cuda(i) + +def use_gpu(gpu="auto", n_autoalloc=1): + _fix_order() + + gpu = gpu.lower() + if gpu in ["auto", ""]: + allocate(n_autoalloc) + else: + os.environ["CUDA_VISIBLE_DEVICES"] = gpu diff --git a/Utils/lockfile.py b/Utils/lockfile.py new file mode 100644 index 0000000..f713b34 --- /dev/null +++ b/Utils/lockfile.py @@ -0,0 +1,41 @@ +# Copyright 2017 Robert Csordas. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ============================================================================== + +import os +import fcntl + + +class LockFile: + def __init__(self, fname): + self._fname = fname + self._fd = None + + def acquire(self): + self._fd=open(self._fname, "w") + os.chmod(self._fname, 0o777) + + fcntl.lockf(self._fd, fcntl.LOCK_EX) + + def release(self): + fcntl.lockf(self._fd, fcntl.LOCK_UN) + self._fd.close() + self._fd = None + + def __enter__(self): + self.acquire() + + def __exit__(self, exc_type, exc_val, exc_tb): + self.release() diff --git a/Utils/timer.py b/Utils/timer.py new file mode 100644 index 0000000..402052a --- /dev/null +++ b/Utils/timer.py @@ -0,0 +1,54 @@ +# Copyright 2017 Robert Csordas. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ============================================================================== + +import time + + +class OnceEvery: + def __init__(self, interval): + self._interval = interval + self._last_check = 0 + + def __call__(self): + now = time.time() + if now - self._last_check >= self._interval: + self._last_check = now + return True + else: + return False + + +class Measure: + def __init__(self, average=1): + self._start = None + self._average = average + self._accu_value = 0.0 + self._history_list = [] + + def start(self): + self._start = time.time() + + def passed(self): + if self._start is None: + return None + + p = time.time() - self._start + self._history_list.append(p) + self._accu_value += p + if len(self._history_list) > self._average: + self._accu_value -= self._history_list.pop(0) + + return self._accu_value / len(self._history_list) diff --git a/Utils/universal.py b/Utils/universal.py new file mode 100644 index 0000000..70ebf0c --- /dev/null +++ b/Utils/universal.py @@ -0,0 +1,263 @@ +# Copyright 2017 Robert Csordas. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ============================================================================== + +import torch +import torch.nn.functional as F +import numpy as np + +float32 = [np.float32, torch.float32] +float64 = [np.float64, torch.float64] +uint8 = [np.uint8, torch.uint8] + +_all_types = [float32, float64, uint8] + +_dtype_numpy_map = {v[0]().dtype.name:v for v in _all_types} +_dtype_pytorch_map = {v[1]:v for v in _all_types} + + +def dtype(t): + if torch.is_tensor(t): + return _dtype_pytorch_map[t.dtype] + else: + return _dtype_numpy_map[t.dtype.name] + + +def cast(t, type): + if torch.is_tensor(t): + return t.type(type[1]) + else: + return t.astype(type[0]) + + +def to_numpy(t): + if torch.is_tensor(t): + return t.detach().cpu().numpy() + else: + return t + + +def to_list(t): + t = to_numpy(t) + if isinstance(t, np.ndarray): + t = t.tolist() + return t + + +def is_tensor(t): + return torch.is_tensor(t) or isinstance(t, np.ndarray) + + +def first_batch(t): + if is_tensor(t): + return t[0] + else: + return t + + +def ndim(t): + if torch.is_tensor(t): + return t.dim() + else: + return t.ndim + + +def shape(t): + return list(t.shape) + + +def transpose(t, axis): + if torch.is_tensor(t): + return t.permute(axis) + else: + return np.transpose(t, axis) + + +def apply_recursive(d, fn, filter=None): + if isinstance(d, list): + return [apply_recursive(da, fn) for da in d] + elif isinstance(d, tuple): + return tuple(apply_recursive(list(d), fn)) + elif isinstance(d, dict): + return {k: apply_recursive(v, fn) for k, v in d.items()} + else: + if filter is None or filter(d): + return fn(d) + else: + return d + + +def apply_to_tensors(d, fn): + return apply_recursive(d, fn, torch.is_tensor) + + +def recursive_decorator(apply_this_fn): + def decorator(func): + def wrapped_funct(*args, **kwargs): + args = apply_recursive(args, apply_this_fn) + kwargs = apply_recursive(kwargs, apply_this_fn) + + return func(*args, **kwargs) + + return wrapped_funct + + return decorator + +untensor = recursive_decorator(to_numpy) +unnumpy = recursive_decorator(to_list) + +def unbatch(only_if_dim_equal=None): + if only_if_dim_equal is not None and not isinstance(only_if_dim_equal, list): + only_if_dim_equal = [only_if_dim_equal] + + def get_first_batch(t): + if is_tensor(t) and (only_if_dim_equal is None or ndim(t) in only_if_dim_equal): + return t[0] + else: + return t + + return recursive_decorator(get_first_batch) + + +def sigmoid(t): + if torch.is_tensor(t): + return torch.sigmoid(t) + else: + return 1.0 / (1.0 + np.exp(-t)) + + +def argmax(t, dim): + if torch.is_tensor(t): + _, res = t.max(dim) + else: + res = np.argmax(t, axis=dim) + + return res + + +def flip(t, axis): + if torch.is_tensor(t): + return t.flip(axis) + else: + return np.flip(t, axis) + + +def transpose(t, axes): + if torch.is_tensor(t): + return t.permute(*axes) + else: + return np.transpose(t, axes) + + +def split_n(t, axis): + if torch.is_tensor(t): + return t.split(1, dim=axis) + else: + return np.split(t, t.shape[axis], axis=axis) + + +def cat(array_of_tensors, axis): + if torch.is_tensor(array_of_tensors[0]): + return torch.cat(array_of_tensors, axis) + else: + return np.concatenate(array_of_tensors, axis) + + +def clamp(t, min=None, max=None): + if torch.is_tensor(t): + return t.clamp(min, max) + else: + if min is not None: + t = np.maximum(t, min) + + if max is not None: + t = np.minimum(t, max) + + return t + + +def power(t, p): + if torch.is_tensor(t) or torch.is_tensor(p): + return torch.pow(t, p) + else: + return np.power(t, p) + + +def random_normal_as(a, mean, std, seed=None): + if torch.is_tensor(a): + return torch.randn_like(a) * std + mean + else: + if seed is None: + seed = np.random + return seed.normal(loc=mean, scale=std, size=shape(a)) + + +def pad(t, pad): + assert ndim(t) == 4 + + if torch.is_tensor(t): + return F.pad(t, pad) + else: + assert np.pad(t, ([0,0], [0,0], pad[0:2], pad[2:])) + + +def dx(img): + lsh = img[:, :, :, 2:] + orig = img[:, :, :, :-2] + + return pad(0.5 * (lsh - orig), (1, 1, 0, 0)) + + +def dy(img): + ush = img[:, :, 2:, :] + orig = img[:, :, :-2, :] + + return pad(0.5 * (ush - orig), (0, 0, 1, 1)) + + +def reshape(t, shape): + if torch.is_tensor(t): + return t.view(*shape) + else: + return t.reshape(*shape) + + +def broadcast_to_beginning(t, target): + if torch.is_tensor(t): + nd_target = target.dim() + t_shape = list(t.shape) + return t.view(*t_shape, *([1]*(nd_target-len(t_shape)))) + else: + nd_target = target.ndim + t_shape = list(t.shape) + return t.reshape(*t_shape, *([1] * (nd_target - len(t_shape)))) + + +def lin_combine(d1,w1, d2,w2, bcast_begin=False): + if isinstance(d1, (list, tuple)): + assert len(d1) == len(d2) + res = [lin_combine(d1[i], w1, d2[i], w2) for i in range(len(d1))] + if isinstance(d1, tuple): + res = tuple(d1) + elif isinstance(d1, dict): + res = {k: lin_combine(v, w1, d2[k], w2) for k, v in d1.items()} + else: + if bcast_begin: + w1 = broadcast_to_beginning(w1, d1) + w2 = broadcast_to_beginning(w2, d2) + + res = d1 * w1 + d2 * w2 + + return res diff --git a/Visualize/.DS_Store b/Visualize/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..5008ddfcf53c02e82d7eee2e57c38e5672ef89f6 GIT binary patch literal 6148 zcmeH~Jr2S!425mzP>H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T02: + d=d[0] + + imgs.append(np.expand_dims(d.T*255, -1).astype(np.uint8)) + + img = np.concatenate(imgs, 0) + return nearest_zoom(img, zoom) + +def visualize_01(t, zoom=8): + return nearest_zoom(np.expand_dims(t*255,-1).astype(np.uint8), zoom) + +def nearest_zoom(img, zoom=1): + if zoom>1 and cv2 is not None: + return cv2.resize(img, (img.shape[1] * zoom, img.shape[0] * zoom), interpolation=cv2.INTER_NEAREST) + else: + return img + +def concatenate_tensors(tensors): + max_size = None + dtype = None + + for t in tensors: + s = t.shape + if max_size is None: + max_size = list(s) + dtype = t.dtype + continue + + assert t.ndim ==len(max_size), "Can only concatenate tensors with same ndim." + assert t.dtype == dtype, "Tensors must have the same type" + max_size = [max(max_size[i], s[i]) for i in range(len(max_size))] + + res = np.zeros([len(tensors)] + max_size, dtype=dtype) + for i, t in enumerate(tensors): + res[i][tuple([slice(0,t.shape[i]) for i in range(t.ndim)])] = t + return res + + + diff --git a/Visualize/__init__.py b/Visualize/__init__.py new file mode 100644 index 0000000..06f5289 --- /dev/null +++ b/Visualize/__init__.py @@ -0,0 +1 @@ +from .BitmapTask import * diff --git a/Visualize/preview.py b/Visualize/preview.py new file mode 100644 index 0000000..ea70521 --- /dev/null +++ b/Visualize/preview.py @@ -0,0 +1,64 @@ +# Copyright 2017 Robert Csordas. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ============================================================================== + +import time +import threading +import traceback +import sys +from Utils import universal as U + + +def preview(vis_interval=None, to_numpy=True, debatch=False): + def decorator(func): + state = { + "last_vis_time": 0, + "thread_running": False + } + + def wrapper_function(*args, **kwargs): + if state["thread_running"]: + return + + if vis_interval is not None: + now = time.time() + if now - state["last_vis_time"] < vis_interval: + return + state["last_vis_time"] = now + + state["thread_running"] = True + + if debatch: + args = U.apply_recursive(args, U.first_batch) + kwargs = U.apply_recursive(kwargs, U.first_batch) + + if to_numpy: + args = U.apply_recursive(args, U.to_numpy) + kwargs = U.apply_recursive(kwargs, U.to_numpy) + + def run(): + try: + func(*args, **kwargs) + except Exception: + traceback.print_exc() + sys.exit(-1) + + state["thread_running"] = False + + download_thread = threading.Thread(target=run) + download_thread.start() + + return wrapper_function + return decorator diff --git a/assets/demon.png b/assets/demon.png new file mode 100644 index 0000000000000000000000000000000000000000..bad4372b949d0897fcdb70822898e3f45afcfc85 GIT binary patch literal 92114 zcmbrl1yEew(k_g9a0%`X!GgQHyTd>rxWnM??n!V-(4fH~1Q>Cs!6euXDH}Y~)8ch{tqy%Pd8w*pjCl)kuD^=nCAo>b{e)svDZix=NM2hyiqJ+3DGd? zvJJsd|}oWh`t~hXoIGh0t@NmLoKqZjWTUpiwiFDQno3;fsmmDEC--(PwgKjvhsLWE`yFW{I#WB|Bp5QtJmxKDZmk4bCM@J#*#4BEv}avDcP-~9LyXVKUx+fcxi$w|tT zk@BtjVhyS z$~@5%ue8Kz(#u?LAq3as_uB@wY!X;~VouH6RLrv3F8dhi>`#=WpQ%g;JKtEE7SIUk zIgn>%FrvoJ?GImWOFn=Ur9h5x5fvy>(O5)+Z4W})&oW7dVc4FNNu&jS8q;d;V9e!8 z1-vRH8RRFoV?E?iL$-d$QB@LN0Nd@M}lU_4-$s`4YeS)yt=fEnm0{t9)xH~fr=CyE5rGoTAG_Gcn(et z?KO-B?sCJst6ZIuA?#AMN8@G&-m+W;%`5WLr; z=Vq>(@S@olk-`u!#bKHGm8)1DjWz<4I?7y1b{P)i*ULEr_)W;lo4Y>$h{BgbRj|@N zjSz(qwL6xhG67qPe<}v5ROq{XN~Kh{zB&Fa&+&3ZvbZP7r*WQ9h;k6B2)YkwD|oyK zG0pXTGIRZ(+1oYO`|?8~;TUH4uW|Ws>LfpN%1(+;IP9hMr>dEY6Sys!tFkweonG92 zmj8fSrS6=8WSAk=aLpqhlX$+hLtr_6I~!sQNnr2>;|XhgFQjz3CnLhZe^=8b#0keutp08T)l#orPDIR~IU+llpLMn)%z| z9khAwAF5$d^dMD2Tr2$gySS!|p~nddip$iI@&e@f_-W^4?hLaPO%`xV6n z^VcAtvy=0#BR`8_BEjQ`vB<%dnGsdiAyruuenQ?23eU$)3r#m8ErrSI;;F$ohJF&~ z;zLsjGVOvNhpq}3ZAzhSBNhuR)SS}dWeTJB zwcewS$i_}!yYm}F&la{#&|Sm%KzBrV7L1JtpqUxG#i@rFu{*_u_JVBm=;(TO%|GbD z>%?+{zMuc`gFou_slDUFvR-FdhOb&j5<=eFdbk(zBQyy=gJhj+-s3%stVRaXjU#`9 zvkqYkR{SJ2K=qp@4Vwu`5D5{vWMgSs87xVI{aMPlkid^vGkO8C0Qmrkhjc);AS#=Z zAkk7bKMa59x(lo%kV&Z)5sYU}9Uagg z-0h$JAY3eJXjz0gd03oTR9?(%$U9PVxA&MAm~xNtU~)&_H<~<{`MSS&;Og$(dgWf> zfpM@pellmVA3qa2GdyMZtx54LdsWjv+ym)eRNrd}nAe~-e6O_WY}!rf|h-Sz1MiEXw;`%v@3eHF6frX}Zu<+nk))G0ui zZ)6_hw-Z^2TLQPwzLl1x+O!|oPdR)eP<|TuJ<>f=Y(@NA<;%?%u948+^&@j%$hpsI z&Gg{(qF84QNb2uHKMa8CUG>u|H7i9FHHVQD!*_IsJ%^#0eJf}xq;-71pBiX>@A%I3 zea@gs|87BVUTW!gtxe6tN(Si2fw|7u$+z$JdrpUO%nGQ@s;%T!=GN#5^n~yvi0&RK z7U>sh5LuBRCF|5r(jT8ds#2O7nL1Fmc#y(%cznR(xw^m1AHzOmoo1pJwc;l zfnwn)J8JRAX{Y<0UTFD$n9AbIj;G)k)T z>F0DDd@j7kK0X*l#CUct#t99-+N7lYo%(r*eVh+vhq8~8vNNF zA3UP-QoZ=3A+YnP^;9OUIi}3@IYaPdz&+=n{Qz+i`GE1DZBq0S%W{hlE{;ca8KCUJzl727kDRsMh+{9nY@4+A1 z!r0<_HQ;^V9ZB<;LNZuJxXTq|<*tzP_3LDn-`casgMXde0y;lc9+MLzdl2z$EDB=p zUCdKBbofm8nHAlbkXo@W4Pz_cu+8kGzG_TQOkj5W*zTBPmIc4{M)s52f?Lz55GH2XNk z+l)+pX(EOvW^+PWoNivQcYD~yFoYO7I=)$3)Uwkxtc`E}?!2^hGPpGMy#HeHf}Tpc zBPcQ>f7t5TcP6BLrfu3e>^>KR@I!oJyxw={G(nis*MP{FsOj{>DeD^IE#vLWh32E` z+@0*Hli<34{)P2Yo*g%Pmyx)WI^sr{Y3Q|q=z+1H+uq6Rm=y@}!AOjRS~mdhjU(;3t` zP?d*?YHPq%UmCG7FFbGF>QKkc=3Vz1q9a?gWV5EH?ww<=@RV=EAJ^x()pui6)OV7n zrhZB1>$j4xJ=Tfai6WkvLf=HIz@Urb*RrLl9$|A~(mS$~D9=u7}PE6wW0RgHE zFG#hnt1CD@G&HZV$IJ=ZS;*A?rxp*D5V1XBQE|fQ-ulw5Q(lfyf6ODE8WcVGu+pf?nh5Lf{C>hQtcoC54% z@8~7~7N+{AhXC;XZ!;Se#Xnu#?S!fHl+`FCom@XqaKC&1j-5&bg@S@Y$kozHKtoFA zUzY>FgsDEdyE_Z8v3YrUz4PLF=j3Y5#=+0e&&JNl#>vSF^k8-Kc62ufvpTv_|JP0a zeIKb0ZWgXK&h9o&jue0IYYuYqa2KYc`a95nzy9kvKY(riXCz0re@zRRAlu(FY#i^{ z+5Y?9z@~Y=0;49~1g7Y5mg*h)V=Ti0!{cFM>jk)C$CYP$V`|s#?Gi=I_z~uL|JJ@UJ8A z-8W4Pk$e~mN(@R~N?Z#JeQXR5#?_q@p^*wQ>k@7j?2W}Qnf}PA5tGLBbu}Q+xK!|K zuVQRl+U8fa<&I{2MdzllXKNw)V zpo16ct*2@&`b!O)9MkyScH7utguhZHT_f!asrtm-pv44HIU@$6DIA{$6&Mc2idE=* z|JW4*|HElbgh{{7@-o+G>>Cmd4vfH9D)L9+c&dCPBp8KbcdBJ>xa)AQx%@dCdg{L? zvFwGlzle!_U0z+0q4iwdDEZzYCT0pY2`W+6d;@gQ9Qi&O{0$S%eFW@9oi^_snb&v= z18v;?|J0!|_}4^{?7`#B(S@z&!DRUCcSG60mm2~+W*wH*i+1lTjYj*0&67FO7%>GY zu|02vc4mQZ(@{a#f@h~+fRFR3SsqLzG_>M@0XbrB3td7XFuU;8Z?0OeLtbf$aH{+( zq43qWq&nr2V$nHXO-jJWy^S3)(hs}21jm(Dyy{(G=!4&%xu)~QVNsBgqrKWqI|HU0 z9hSgP9+6Z@*jVGMV`5LFiErJTiDG0hJGYBw`6~7VW3}|PlfiY&*Bszj< zBmm*b!&YLq6?bX-HUtO#Jhu-j*47aIE4&xhX;e8XIIW?a_rI8G6^V=*bcvDnQOExG zTUg47&Bb{IG1p$9i#nXZ!?c1spj5NI5y83VmV7b%3adWT6IwiORnXw>a^U+$YyY2se$%Z{MA8wqydWtSZZ*%(9&YS!S5dl|I+$D zv@5i9z-s=N82{<|uPZdr0HxCS{e3)`0S3^>e58L4>!d(ydylFi^S>JzVW1;>#EE(A z^F?3pLmM4e6tpXJz9x!1%Jt#n;YqIrJh8GGx2Bl{JbbX7D3p$QTaO;HpBMN_xh=C+ z_W>1;MK8(cdS6^J0*$328n1C(ht@FSQFi_h<#eI%PdweTU>{rTP{CZ=8f z!%$km@6VE@dU`GfEw1p!Z9eH_f$ccjrD|Q7CAaegWGQo1#zo#&yA#!&uL`=}PnTbZ zwW^G~eacnIQBY8*iXns|_HViE_3}wv+yU+pS)E6!J%i3U&_kn%2?ZH#`op~oq--_D!JyFFb{ zo~t%3>JE=pi~rKVKt+|^=(HrGoW)Zh6Hi1q0`j*SND795fnl)di@UtMySq#D5aKX> zXWUBD3GRH@C2sY(ArhYG5R9~Bluk}gmbk668e!UtwWNCNBd(sxH1!j|Ih;xLkLJ&7 zYzh6fGn{#V+Q;Rz+I~H2=yc)N{Eh~FGP2dW5iZ2u-TM{`2gEzqQP_ zpR7VzwE<%1=V(29rhzZC8u>F7y2JH9>=)|fJlm9Nbx!@V|F7I;$ve|t!zr?3j2CK^jLba-sduE(lO~&zD;LP4kI&&yYq8!M!~gjr zlFH^9zU)1*uFnc0W6uwpsr{QdQ~LHK3LEgvjPAkyM3Gjp9G0e@v(vXr|1yoQG0Qkp z3XYzK&!ZG;}s#rhERNpRIfLF%^_L7m3ijv~dAq_yXZ5c*7FLbLJn7Pw z4ec@_skCxs=Wwkl9|RN>eW4#4Wh(iynhWu0pVTzq;j6`HHa|KY{kI#x+N;gpSl5I| zGM|Ai9F@NCi!Uz^4-fqJJL_ihUzS9#w;ubMR2~udnsdZmK=`WwL+i@CMyh->R_LZst#5MnkMu#uw|!`(-m6%V#@){ zC+ZQs6O654771f9)A{E$%b?L-8bkP+eP<{qQYANT-Ly3ec@7p?O-$@}H9g(>d(gcz zlXiJn)YNCcECpnL2i&YAI|Wn^z;;#dmfaGKh6*Gjr8w<8jgMx(r+P;V(5vRAF(olb zc3U_7H6{nIs^SPA=ai4j&9!KF8l&*#d!|~{YRn7T@SIksRlb;#8m!%ueCqxdOyI?7FV+;RLQ<|va0>od4ZIZtuuDB z(Ld1hlIDepN7!e+&}BYkY8B=a1o06}kv3IlM?X zdaUgXP)KK@<*UzhWg#i><~k+J%7q9-uz}}l7-L`!Sc}MhCK7Qo=@`pt{wSt^)4N=6 zH)rv$At78UNRA5}}-+oYhpPaEmJYaEV7U@`cw+DTk-0)^&!c zgb3fGFClCFEuV9K>F|Y^JL|7#+DKQ7d4^eUq`PhkMtj_&Wv9k29?Jy4l!y4|BgQHf zTIcV&5Xt%GVE=UAfJgKQ`e4~O06E+l=)=l$ch(}D`_;9{2(LPSH;aK<5)qs~oo~Sx zn_tF5Puw)yW3%>e)bP22?M|IQkL95ZT_bc z8cV_DdlaF3Js(=KGJ7{!hs7Tp=-xNTY-C^^aV?BUXk75|uTP{Z7;m{yL^$3(x0GAE zpgm!Z7W+M1-rE}7o~|eYW?jvv{`O>ozA-)^KH|C1%xNYCPExCMm10vSj;{2ZFdm)C zkEH6zlLN329t0=)%1No7wk3CN^AD7iY53->))r#*F z2QuG|t6{X8tt?0&6^tQxSA|N}g@hW~l5B#KO)KEOPeF$Oct@;s-fsS8=5qw8w?w$> z8JOCYqi4lgYkHl{=<|q6{C+vGm)7V7) z37320?l24)r&O~<1tQARtFB74{HojGcR7hU5{&s*Gw35T`*5{3OgXI%8 z<17JL^*s}&^x})u7VoUQw1R1hGT=J+K4`8jwqIE2I8iBTH97BM!JxRyGW{}0=IU_X z%8k-)($k|Vmoyeh<`-l;6JujX5=qa#c|vK4h|l<^kP{sXRNG~j84t2~eSR2y=|ceR z)`kXMzwzxX$qO(*e|f$d1BoJ>l>xiMkX}5G%?-{KB8Cv07W``s`I{}X9F`BXI)a!L zM$y_U7w^`HK_ypN+x_v?drfS|^EzFwinM5au=*%ZiF6Kf+a65q@9J!n`aQ(q$RgW^%~l4R#aRzi zt^Knn;S0Cj7gCjLk*LTA^Nn?=81#UsC|fMDitz{Hz;l&ej_kF=#3DPvfQKCd#45tv zuOBU*3&_EUSE$CXHSzv@7>dU%Pe9;O0>-_79X+|ZVAGS`>Qg4k^Sdmwm{?Pkx_R&P zDj}O_(m~>Vxr>*6>U-9Cc5i*)TKknt$o!(ke9Nl8W`mlfy8W^o*5rp0wnffSSxIin z#Rc1{-qS^R^ku%a{J7Ugf7{O z*rq9@TkE<0A^Hy0PEx6VOhdS=pOQ~0acQCKsQ=}WgFTGC;zFsX?J}Ie1oNo{8Wv$d zIEX0&o>S&VFE6S)=t=*(L4)3tac3zc0ZRe-uMdZKgz_@Xi_}p;STVnEWfBV&tYhsA zhv+#a$%qRhAQP#U>a9q&BFwOZ=r|T!73S2U9pGsEQM3}0nkn{Kiqq`fc8^1mNj_M! zY08;a)oi1_tYf4QgBPiGP(&+1IRtxBzm(+~)cPBs=u4d^$UqKq!9?%dGll!UL^U0) zcKCiA&o})@U9uU>)Na^xdJWzirP6mlLG)i2aagX&Wzu0#W!1Zk5HUt=ccE11=g%+X ziK&Pr(u%v6#&4o{CDR&@T0p>yvJ-EoF4dwS4R_~a#kWikisYuLKUr!bm>~^ODH3aq zHEsB$+8p^D+KUVGn=rs^s5Yz|6p!u^KtJ+)+uOl$njFw46O==`?Tr?_5`{&*52&)c zju@s<@z)UgKl8N~(c#de7JUf0{Yg}{IoTiK^ZDiMCvGe{0*pAqXQ2bx6(!t}apjCD zS;OgmYhm|$V8UDMj~9G9Lq34}W2B$!NY4fnov!vP6{iV3FxzC%eLTr?osY(dgnjZy z0_sf$6Ni=Lrqu`B;n;Han4elohVm6aQn4e zR0*=nqGf-YZy{3{Zya@68+8ggtME;hdRG|u*tDR*`RhcOU@TC8((h(S!b!;|XYQn+ z--$w#6Bj*+uq5%syctJ(rt}i%7d3Kn&>La8OvEUHOuFSPOe%2Ue#I8D{XY)4NF^iC ztufYOBmyYW$1>SEw=8tV-WmSnBrAg*G;FslD|HII-#|ocAKlCVVncoo>mt29^9&w- zv^!6p7(BdpRUxN=bKis+ z4ow^?S8-5#a_R8`zRKQ$L8hw?qH}5x(cwl5CpHu5J zxy=Ydg(k+6qu9bfhQ&hUwF7?;9%L81LcX|xG5M<* z{wZ#&MtK^PAm*Bcn6rkQx7KVp=;HL-I{fq|ET};Jmm5==qK~oY&tK^?$CqS5K~M%P z?aU(sIg9fbOPt*JJ!Gh>wF6%Xn|>w0@W_-}TSiA2_Rp<-o|l_N-9C6eX6j5CE^%1~1^JV=dzwc&pB!=@E>!TI%(sN@j^t2V*ew3|OtSXVKMTBBC(hAe zyAsjrc|zy#nW9es{`l_xt5m(!i+IZd{^mi+Q`C3eN`0B&X4jj)@QkF~d**%^0_fQ@`LLkyf~eTlQA0Z;`V947U^RgOd#g0wxVM zI?znHcBGcxC_+dWuDU43b2PG|r9?VQ+TL3B>F%T?8i&4S zOA^6*fI^0J^KpAPQ}q#+FpLX9BCclTGvp;0njKA@_uM*wA>5Z6eo4tzTS8UgKkqCT_Z@0K9LZ%?I-?MB`BAFj{vz%zMk#F_! z;!p#TIbz`TkAOUmJ2E!n-rKnlX|&0hZhhu#596bK&&gy!qr9wiLA$&L%(ZQy<_e1v zEbninGRadZyeUxb3l1voOPX~60xuTrywY)_q)R-=Mh_g($nG4iV*ouwPH z?=Kmd)i~N(L9bIG-zOo(iG+^^5a^H!PRi}y#P8ku+z+TZIb%A^QKMurmx80b4&`ni zrHy#AxfJE1z@@pH%_88aw>#pa4+aSxAFRh-Q!UO>YFU=iiURh~gHimh>O-d8q|a!K-LM;+7tLv%Jx3-Y!hMWZ%7yz5ws zpcW)#78II=q>sLa?g75BW=g)aowe8jsY78%QT2x3UVeC~%dX&2N^M(_fNg?Ryt}pGQGa9F!WMuc&;)DP8_I=$Z~~aAt?7u z^ez6l{9c9uy$j;3iFN(MSir&v80gwDXg!hpS$cm!L4TaDDUd6l*Y6^%N9S=!0J_aA!)YLZ zydN%UDN`5fKa?EVUwPgh6DAZih0)a=LSNje5gbPB<(@H1;cUqmMzx5OG&w9OXnvN; z5%Et!z#yZt@y;(-jtRM-7{NFXjR!63X(ovfS|(iaYFupgrh?v2?1t1G>lefx5FUmC z?U;PV#)v`!`}+FiBr#NvO{;j}ed>ly&GK;$B=^(IG}0nVFHY#wRSlVlpPYR!#fBsb z_jP254}-(}yLV|0JG0e>%qnlbKRmU2yV=kvQo95cgKS37F&-Qdsb$vs_={m*BhUCm zm(9l_mi{zn8rW+))D>N3S(Z|m zfi{Ru#iYh37gXs&B0!m{_7E~oSd5M}CO%Fv(j$T^H*c28?!IqEX%D2;zwX+*@ns5I zav+r}b=^yZIz+>s!Bl#U;Ai;KH@+U>5q-rUTRcygDx3q0 z`0K6feeb>1aoqfb=p6Sx-jt_jgC7JcEMKHPpnCb0I`i)p%864VOMl@EHCt#AaXJ$F z5JhtbWbdK8bRb2d1ZfHW87bh8QTPg$|OrZWzYlZm<8#47)z+Z9={xv`Z_#6G07 zc|y7U5<@sXBr3}O15>4AK0{a_MQ$R88HI^*{2A@wb6djtGCMZJ@jw)`jSlJ$j*HgZ zU>~ed^yPCpYHhHiwWhQ`%9mjs+p`S}M5O%C=N;!3v`ry2kSOXmrqLNL(GAok*vysJ zrDHJZL-{dSj_1}%W{e}_Mx+^J`Babkn2@*q^1?@!X^Y2Q@Y zl)=F-RO+Of@E10C$P z(`S_-D@utjzXfplWZlhVkwrW2a}AP~M_#W~lV@FGcxpRCjaP&ljxbY9L*qt4PK%2T zXUS#>a%32j?$1)qrMmrbWYTS3XXYtH)Zasg@cM4myh7e?`kcy$+lzp4C4KZVzZQAf zk$7z_raom0bfyx!2%8#6h|Q@EwPgVXt5sonb?OiZ(&66<>!j`IG1Oit@$p6sCWYVv zIvTtNP1xT4P}L>3?KE~boP6%*m|5Xk-C5Tc&EMm1Tghuj@Gu&W0vsa}E z<@X-8-dYQp`Zp#47vGGV97#`B+A>d$VIcLmWQ~7n+-q25)z6lis^JJC=4`|pD=M_g z>cGRf>-Rnc?~IC3_r@kv8n>SA)=pA33X%JV?IsGt z{QIT|^6p_)tXvppI#jJBXqlAXHBCEBA42zrNe%8XT>U*Kwb;l)qp{$HPWT}g1B2dl ziE|~pT&^H!?x{>f>&HhdI$H~%Q2a3cRZ(|nbOAsA+m2r~EZ3deuU87rK zouqV&>ki+^9E*GlUYCQJVx(EG+6ft7jp=kX9+OsnUwoF_a%+A*MGJjWL@*7Cx5X7V z=IjB93Eai?`aE=^9vtUv3J=!!!}lvehtcEh7A9j+MyD5sTE*O!Ji1Ys6u1yXYJJF9 zC=u?Y5M(@*fWA(Qf`T@iYk}nQe_Zo`)Gkf>hq?%{9&J8p!s9ixED=nOIE(XjQ>oTlq`f}r zxb{H49Kt#?5;F`9IP$7qbh3e=TBkJ=x?=raRD6#0@kB~(xC3`=!JnEG;3VwVQKk(e zez&O9$m9_U>Lj#9@TXiB2ef8-6F3j3~KFMwYw2tLQ;c34y@cKh>OuSet8;Ug;jaJ91s zb`>mTjMOgEtHl-+kV5%HkuK=@thm)5k4F#7C%OI%kBXm>->3OaDP=wYk4XntkX!Dq z(e2a-yvSoSDHXzxh-i(Ch-}#$1Y#3W@ub6 z>cnj6-buAi2n}SnI4(cD0iczX<`Xs?fU?qP-Pm8(e%RnR%-4u_CyN=!x|`Naz)rZP z&-cb#WX9J^Ut56)zA&eSBD3WqWDB4j>XoX#L0^c?9YT z$X$~Da%=2#LPne)23b-dbd?FLIxI?@d}8TPh-hhjN{=Y`EHqn^0=247fl5FHR-ge` zjDcLO$-o05=$y5K$J?V`-{_z!ayIB66GqWFZ<2l}Y-0VQlJTjkKbDTY2B|G15c4KX zc{s#MA7lCvP|B>Qw}LM~gThNivZ^kgZ%4}w=NE2{?yzUfs&z0qa7&devWYmOnD8$4 z#ct_TbCpu(uehnGmif;qbl@d(V%^frWl{J*mR2cVamWnC0q4!qU_8ioiwuSrDoyy& zm)EB==2|zZGU6)0Z+S{fHsJ+_Q34Lj9K7)TJ7n~ohQSy;5=ERWPzkn~ES9g0B0b(o z62$j*Og@3$K1-Gr(6Afb5eU)De0jN}I-G1E_H7lRGS@aE-}`a`b4!}^2*^EGg#%d# z%LNEzrKI~HrJ*SJl$y44RjgeJ;z-FKK#-`7v@BsK!ONDIS~mm~nvkT+kz8TLQQ=^> z;liA95Iot-9s(dd$6phYF+qrM?k`}dfn9T2PoCgO+n?f4s01y#Sc;?Cc&NVZ82nQ( zG*$y{PsG--P*_Lz`xZaKLeY8@8+Z`89EZCiFPJgP7pbIC1h;m(Bnfz2)Gk8OjN8G% zJ-NpP6}nY*T(S&%KUfFFV`8JyP8kxRTg)=q3}tTk?+#%}GTBUj`bq4I9jfI|zT6jK z@RKeexb%tf5%B4KREJvzz{?V;fR7TEb0V<5clYzc<^PtS-V0Tz51!4Ba_tJ4HUB$O zWeY0Z>Ht{V;e#$`G$wU8wtyI6Yw_v~S9^g{%`auT=~)}x-cbw7>D|vge&0wRnT+)qk>cWMMN+}KBBedQkoIhSdp-r_`HK(N!dkcJo7{=# z>GI%)HjgIRD6Qd732EednCKj=Omqz~J{OAVK`YPA`F8JA%UV9?b*K+p_uxOkA3J(i zM{hRsSHn{gLue#l1l{-5Y-cM!%fT2Q9W+vzMF7vlrRAPSWrhJ5>aFvomSS`ceb{xg zDi-ie$;UsDeZNvMoHt)=9TY^8?Vc%1A0lM60TXu~-*}gDHACT4(;)d+doWdkOex8O z3~mr0naF0cCwQs1jOH_WO90KL&!^BnU!4bDpgL!7w;jvoXLUJQSGwrWtN%!9y|DiZ z!MR3XdZC=JtDqU*R}{qyZU&S-AvP*N7w(uk|EOO?=hT*|)-5uD?1~@86aUoNGvodt z(-e|h)$>A1qcphO;>|nmO2)pwXs5EC- zRCi%_>YDf~!;tkDdi|c~cyB#wKLjZrTN9zHLqgw8?6L7?X$i3SnBJo`k~| zn!o9iY@$NsmGNhd2{Hy8z>>7W5hKh6*7yw$c`D>s9hCWY`iF8MR!y68=i?!BwEi7~ zZ!ZEI++!)uTPc|ZD-19Q<_`ccAZJu%u`G%Rr5rqtBeJiv`Ui{4mM#* z)*Jw)(-M!CfjU-HBmn)uNi?>R*lki>-{P@jgL1;#{GU9LK_qYA(uhX`^<`#4+HcA^ zL*{DVVg&FTopS_)f&9rS3uFvQC+;;8owS?dFarhJoz(hl)&CQNhyuUsF3t4Q@zkpl zg3}~WG^8m|&5xKVL;{BbX5chF)p$S(Si#TF&$GyC- zO2@4+)+|xPLPplVKhc>~A<4zzngd0&O*MMFY?tG8+P04qrq@T+)ccqfPVpKwX5z@{ zA=&lkpQNF-0XD~^q0>)B9$$q+wBqY2VqlGZLvtII&j25PF@gq=q=-ox2(pmo^_wXi zWISW&^;crhaGgQ(4R!?aq+DOnN#@6LD@yp$hPZup7pxC}GHiwt)?1jUmBX0|n}K!l zAJ$`zd!+)-p3wm7Op&DR&#R!)n%@oTU#&fn3wuB+AXaM(4bo2m`)eJ6Zcmg{f4{~iz20QA?7D%nT(N^mX`lg3s6}! zcZSk?%U}}lki?#6GEMn%7*KE-CYLg1U1}p&BRtl}P)_L9$Of^4LEv6URy$wbH z{$HoCL@^#Nv?Aaw^dwUAVF6MNT&B(~J|Ja+$riSs_aQT7of>mhE2Y{;Jgx?i>?~fd z#$sYY&*M+Wk}D|Vdy;#?eDSLTmif@SCCz*B2A@evc>pfcRqIv${W6o_@$vdXx zHxGPrpi$1@3?A>vtexO_{ zI~>>PatrE-6>$fpB*Fxc_9+9XA=dy3(kzmRf8QQG;#)E!^w);CWDF#+YJeal5swpj zsT%Um9Bkvjh9x)9ox^Sp<6)sOSE_D-EP-aoCwKC? z7C^TQ=|iZJz`z>QkO|IZC>$G2jGxfR{yOcJ-i-!0b!xngwRUq=9dW1-baU0`=VypI zjNk?f^uY=MBBfg+DSE`kyEeJoY^LZIhjVLtySt0YF8d=ccwmUEk&z2*G)vSda5@2c zk~)wV;v+Lb&$!>|aH!bJ+}7Jio+JabY-&|_hFL)bc!GNbyT2Jdb>G@(VDmuTMBqO5 z%04}+?H8CRdPXD`tcR)d+8M%Ap}oyX;^s39W4FnAZv(?>RFiaiHXa;9!1k`uomMe} zN^VyS9qS!DzZIKttJq&|j5DL!*6wIdR$cuH=_A7^O$}6kXT3nv0@&U3QATXf>+$Cw zpPN+xv-Jx;$IU7nqtRS*>0{Q^I|DbMaXFTWdrNuik?0bE?6r+jK6lpnq$Ba^dOfI@ z+itcPNG41GOgw2;R0-WycN=7nkXX0^SZ^t44u~g^1dI;$I-YkCAjG0v6Nefz$Z)$g#v2@}LoCA?63Z=2w|Avr+Os%R|HNgH&?#t(0{uMHc zBjK|QxD)+mfZUf&xJmrDm8eg0y6qAzxo3kJrOFGsXE&&`td*M`%3zgku$wdQkM?0H zLoML8)0z4fPb|He5j*pp!X8+P;<}xoS{!M9^i*sJ9Gik1Sri!x5``RDU+6qo?csE( zO_zl8a(9HfY#^zBHtE%ysEADU>2LPu=}xbj{Qz_PW{cQ9ef|MK_N12%fD`Y!GsQrW z(`Lr=R#jFb24udGVJDF2Es6Qdond+v%+H)3Qkp$|KgV^oz?4CfxEhr{b4v7~lZz-> z^No_yxT@9blo@D`h6{9cby4-Y(Rx#Wtw*yrYWbu#y*BS%Y$uCrulH26k6q3UC`SO> z5FA-n=tyb{0z|VGR0SF3<+Eku!2MwW`Yvwl48_fEGGvuAwsFnQ`c_+#C#WucWIxZi(<%4YW>(1$g(FyMEj|?RXalu0X5mPV=dJ$v!8IY^|XF!)pr&JplVh9&<$E&|~F z(8ZGp8+0eWw_X19)q0k(cl{iSm>-TN!uGvm@DN^ssu=;V*)J%R4T1ZBY>Tx9$0eQV zYHz-GIpD$7INUv?IT3U+AqMof%31G|(Qx?^@uh$Lowil!zKHZMvcEunW4kN^TTm70 zzy*kUyf$6|uFsEH&5#Dil?X2R?M4#VVhBW47Q)-Ns9$qaJ~gbWts%wF8-^DUCH zi4P=y;gxuZZncSI%hh%|MHfN~Cox(AWzKwIIBFSC($;EGz#GIlG9ZAwVT8yekWhwI zX;;i@HP~|3X1bj$%_Brhc6lYqU@BsEe@ddCvuHQxvi1}XcVvbM0bp|uQjFLpFk~wD z*vJqF)PQtF>fa%~kbMiNGK_3gUedYkc$zF=HVkS(ElGmg5z@9b&G2wQCG8xKY(6qR8ERS>UTkbNCV-3^3;|+9`iuFNM&N%0F%gdR|HOy@&vzl#gL!68%lP4npoRn|YAzTS@e|zX zH=v+;Zt9X;`I#F^y@z}?-phSB@>Quv5Z;+A<5=8we|q!`P&>E7gt{ z5j%Zn)Jzc^O~CR=)nT5$2po+K?8rbUgUhMg?2M(}S?Ksj^+}UkXKA)nJqMj!Kyv%exn)z`wpO@Xn}u-s^8z@dm(O|B7V1Q16NoMX*c40CcVy90Je((`d5z zFl5Duuj0d=+atT&^B1>wi$dO)@yvI|xF7(4dJp{`#UglTlCMFlr~&XB%glQsB=#wK zrwWnu{syHegg|%-{tKtAQ|1F9om)@1M2{y^=RRcp?7&`LwM{7rduNpMEC*8ogG ztm=j8pN}bIfzK{J5t>mzki^WWQtkiE95Bt2jjW2yjO~Bq^uPFe>#(Y#uUnW->F$v3 zQ0mY~BOu*z00E`r&`7tGbV`FDaA-JmBM3+yq@<({snT&bzxR9J=RWuQzWcu(_S$o; zxyBrGjJXH@xAz1TjLaFy`enyj(yCadD0Opfi3B2A8&K+-NFSb=Wb^JoRa!p6a~2jB zZoqUBu30Fd%HMM*G}2McwRH+FGWfsd({~%SM$lBCMWba7^Zu0OuBr%-QxS8=2lL!c z?(u1$^oY9&}%Puvb|X2dbjS=1hE>NdnO;$4Y}`SjN|sQ`hO+w{)J z<}@WmFjZnZF!JkaBre5LIbYg5l<5VKm?eU4NTnWEJOK}l_?vm^qrbXDZqYz0dLm~O zd!>N*t!&v`RuY3?DvnZxj8@HV0|Nt*@cFN%{HjNX3Go8`8M6ME`&N2-X?dbzVp(ij zIVymRzrtrdD4JT-rKSLi;c&B01ndZUrWu^L5LVz9d5To0k%9hpm^ybre8_zNDqzO& zK3<75@i;b6`JSpn_8x-A=+EdUr-5M8pw#NQmk5hph0S#Z_m-)1EJQ~sI=Sg0riF6DldB7zIf(q9XU3i^l+;9Gws`NfI zm;)re*a2Xbs|5d>s{boT14IXqYwyRhcg%Kuq>eohuId7R<-Yfbw~IFSJw^*!%?&-Z^D9UHb(;n1JGaGI+|ubaz#0*vV_>iu|Hk&ApqkjwQhLUqiAym-;?2_u79j@c(0j>|g3jI|XQLX|kJ`sN{lJi(w=O)+JEm0^QuLdrO zM~;Ciedq7nW;-4W^Cl5Z(WlfZ+G@i80{cJcRr`O?EAUf*JV>txV2>9cf&tjjXDY0313FcURE4tugvke)4S2R8LwF>$!KFq`>~-{5!l?uLF4Sf&)Es)Bd|G zF~Fs8M?6)a{rz#eni=qPK654TH`YuN^cXswgpc&XIptcpYU|LZT0pOz#;Q)9Ygj%{ ziu|4i1l)-rQsgPDz1u!vCCEDtXk&Ql!Wq@mV)tjto`4!)cw@!nbQuyi!)2*1efDJ? z_=z8)>w&Mi4S6b<6r$WTx)Kpoxl8towRB}cPnqjJpvExE>pz*R<$upyK)KXn_ow0x z9Y_0#vLPC2#C`TmO#~CC(lq4={O9+_7r#c3Y5mLC{~fR9$}1>1TVxyPqSdfMBzn9d z94G}u)0g4@M;<@kcm%B4Cq0F=7BHoU;D@=+>mZ(Hb&Fe(N*mXeHo3x}pM`+!WpoDS z%5sNO>SscV40_5_z#`wEO+4kPM0NQ~AWw0CP1d=eE(R3R%e^J_7}At*R{LB|F0Qz6 zRKtuFs73xGC?f_ET+lTPZV+HHV%8J|wCY9lQwq z^j5Cv=l?F)PMh3L+II_#HWZ(o+cgHX&ObwmmX@Qo*2UC+udSWjNLLE6pDHMsk~!4E zqY?QGcy>`CY zCF=U*JQZ_*JzaB`=*IFwBj(+OjJiuJ502bB@ zXn_#vH32VF3wXkdh-7+0w3??E`th}VBG5_2GBH_vkJbqQSF@||7gLF^2rOzjwCjpL z9|86x2f+GCN=T^k#9jb&RQ3gW)#|z3I1B0!Kuob88127$}<8-VReIKKX-K_iB_3EQ5MgaPhM&O&5lWFbaENy0Y3i#$Zls6s1g##E+|ya6EI z*+ja>8NiLGc%Q>>8O+sNq;Ga~2=?x^WGtf*@=cDAuCnOzmrQqFxd8wo&=2;MB~p=m z%4tdY@6}l$wGMlb)aS`rk+`WCVJ%^(m~|%Y@?jX^dwpj`PD>toMPPJ;DstP{JWC!& zdKPcDWU;_K#%fES8NfBDxWPVP-25|){$ws2AQ%oW{N2~?F)wC+JmTvn(Mw4#_$+7B zYfwCem!j2}UnurJIl}6Hazw0o8wI~bX|{0rEMS+~o5-GJE(`B?5paP>`-E))(C?Ly zBMD~xaR9NXoL-#B$S&4e*hR-yH`$Go|AhI}5%%zz!#oUPN1m8EdSyJ-U#;tbih!ez zwL+Zx2-HimrW1F`z8rBzu4?1r~oM{ZI$fwIT#~FEX)E5Ta z9<@gL_PxbVl?;rEh`=He&SU4Z?A8GMy3aq-il_c;c|YYiP92}057TC=pEYfT;N^pS zG5{Yn`-Pt>erf^ZJV394=GP?Xd~u$(ldDVEe*-o~L0pP=aKQ2XqbMA6|A#VY2HFD%@AZp@8e^$uycH;msv=|94ie zLX(dAg##f-e6W;MSV)VK1y{s{d&wF+QyM$hq)PH`rA-j{XId{lJOu966WTu^r6;n@ zp9SL3^x-l1=5#BG^KKCPi^10p!u}fL?*j$0VBh_jPr}=Ok&r-oPqP466CE${f-aWq z_1Zn`?5{|7k-QX$+CDxNWH|rtf{YYd1ui=(eZ5VBO^S$xQ7I*XZt~K#@{T137`(&usJ*jCf@;NHM_ua1o z1eIx$^TkS}io0WZ?*BF~5t6fTkKT$Hn3G}a@s+Qe? zfJEIOUJ?R*IGhUoHsz+;_iVEPS4aBJZX)Z4PDfPqDA1#MrJl)G(#_wk!m5^*CK6RT zy+>0>$;|=*%w0vbh&S<{hk>Le3|v&XMa8$I$N|7b1&#hxVFE}MfutA|)eEd_4|dW` z5=$3`5s(U(L%$iB$fniv74Et_A7|!OxCKskx#p!Y}8Ox@jyVAT=i;92awgpFS{Rrg-qKl{sKluN8ou=Z7#wu*C~=r2bcz$ z3tTE(pYG0++d9J&Iv@je4oo#S6N3z2c!5-jWD6Dmqoo?0h-cHjz)#5u@hgpRaTp&} zPImKbYgh-s5>FP~Oab=O;CxZn0?#}E{{Eh3)r!SLMB&k}NRAaq!G_3B<_#@Nfb!k= zizZVuji?LllRZ|i%7g4+r9Iy9K%g>04s8+GJQh&DsvsD|Lwc7H*dkWIV;#QuA69~* zwTu|n7E}7;Bw3@Ob#ddPZEQSru@gwhnvH> z8e?Hp^i4g0Q^b~=}Sj=VT%07!Lv%%3`e zTUPLTf5MnXLd`e1?PxV9ng)O(kDBaDw`2!10F63xf4M#d-KPGbVbH4dD$k1;;BjSD zQ7;bb23Q->O1&8U-Ey)(KDF?E>%1qGy;}MgFaFCnfR~pE!dSUGRk-_55qr>N*+bE; zNj6@@L{3l3X;qT>(iFnxR~P%`r3>mW@9eUK@eIB)lrN>C7~7D@R9rjQmOy zLshCDEl#4a+~QnCp#Bw6x-Fy`cTr+21WNfy zOOA*)PS^PXcWTAI-NFy#x1A)k#Vj_%NcaLPiS)tSL|i7NmA>D=6O7(hg9A946hH-E zs+lCkmd%#no)vVw)@4d7EaJMRp>^in?6i~vH0`LlYw1)@fa*Ik;`eAl`pw0Wx{7)E z&hb;jX!mk8+s|>UrXzUf;s$7Ri!4L||G@^V1hoGWI_@+dZEgjI?aqtW2`UH!l+9ghT=^Xn(c@!Bp_s z>vQJg;BsXW9@|)K5M4!oj8w%mxlXh2u<**jYaG2Ue>|olMl(jQ%sl`+olHDJzw zpxk?=FBZ1NZibMfpNgw69|};xeHMSdiW{iIJuGLON zofP!tt1ubWA@#J+RZiTkF00h;!6EO+iA6x@T#Nq>MW@e5MjZRE&dLYzk>jTdWt2GR=};iG1Yw9AJ%4(F6BnY(G`5 zlPBBx?v95}K!~QELKL;YN6r^B35-in2Q(`|Jo1=AXs{P>cu@fiWhUZQ!xY{>K<8p(^nuy9l~tjr1&g1MeY3k?e6W>7BchquHq3_R-Tg#B3u zl0uTc5bnm-cl3~CxAf1}ce|7MC4gUuSg&y?j*Mg^5wYYMljeS2ie20ti3jNG%tPZS z#jQ5pEPqreZ+bUBtg0s$vmx1Ff613eNTr6AKI=lEkNVoZAW?pY}f$c2dsEwgKF&g2REvghsv?$)JAYdi~8u> ztUrZ`_GA_BLs{^?=u{fXX~-u0`iZ{@Hc-ptdkysV#ig!(g`YW%{(24i*!9}Fb+N-4 zByqk@WH7E-Tu^h4=-Y2GD|_dDi@~Yz45nYbn9Pu5BlL+)IMy7%`1^p2MsvzgWo24~ znOUBenc0ZhI9@kezs87j;mS?wmI}4>;Q)}L`mS^|h7N?g0HYp=@pq^}ik+wOmKlT# zK2iSJQny-cnTCFkxbku7b3a+UhLMZY!x5`;RV?^|>%qQ2^Q^1RgPs38tS)(X0}QzhBGWa2eLp#>;kG6fLVJ0n#DL1RzHDX6o_w05>kZm_!WI>&{{F3wuYr-%c1v zSy0cYgr$q3{~U|1KZ{W>n#ntCkyFy{$3nAgL$o$qa!os~9ZEAIflAnjE?0ka8fZ#Z zF)wd3t>x)EwE`=tPjkVMlW589(xx!21T#TVITb^Gq26A`lAs?jn^Tr10diu}eG^tT zGYsg!0B$1lY3b1<#3RX#lGJBL!t546MBJJr&PM0(ugfIzzV~-`O9gJvzgo5$blS=Z zN%_rnY*s8kY0G^Qn&be1`L2Qg-WwpBnE}~M=lTF+BYRMA{j)TBA8HCJRUdl{VW}4u z9M=PrIYNuH^luz@^LU|5&$ewO%lPP~6*>Wn>ukl;XC0m;koN)0eb*kpe}c_0lYO=E zHDFDHk9#wX6u#%*&?#>We`TicRJW$3`WstiA4s~YA0~4YfpJr$Ac?=&{fhu0Ks+i2 z%!Q~WvL#y#`)h-FeRKQQPc4scVmKs7`w#OvqTYI-3cRB>w_9aD7>uv;UAKbS=zC|J zN$a~Iw1%j>DL~{@C<%c1h3kAtVP5BgryTuXtMZTX;gI*{F{%&2cR#fv9Z(=EWR$*| z$0){N##ALQeHViltw=y@M{4VE1&myYoTHqP+A>&%14n>zP4KV( zN{3bOJ|OSI-@PAAPS+klb%BhpBKfK7oy`aof4d5yiY?8DSfLpH!O9E5Px8% zK*~JjhWo<3>MW|96v zqXjMsc|4lAT`|afUIFy&{a*L>f62JFbDbSeG=PM?`;@Tb7KQYZwY!+xp0seU>(F%g z;Nx<<@zeClznIdnOZ|aYOnjn~{9NVGBH-xdSZ{Ky)SDyH?Nsx@CEkn1(i*314`OX! zfD|fDJ(?~GZ^T8r1hA>}kyWulnVMbBx6tSm<9>ZGCNSQVWJB|F;(fGdmu8;W8y2S- zq9me`Tgm%pFDuu3RV|nt!?#990aFFZYKPh{+sfkaIGz3*+3`pdT5cNB5&V743UQb^ zhRIR^rp@oFs8}a@VYs(m!JptG2Y21LpZM|ebbx@FKW)P^l&E*!z@6}yWh4U>cvWu} z%TzW{9QBjzLp_;ki&HjyNsc;lnodO&gN#jxL)Lk@-dZ8$Ezk*mAr}*3#QcHVY4?d< zDN?|Q|G3Ux7|48f1Xxkr)XDjanhzJ=5FLLQxcPUc6W5nr1Qa2NA3seIkBr8qO%5ru ziG0GAm@$lq4WK*4aqIq-T}x@2nl19M?IhFUK-pO=3u-J!xarz+uZcq5Y6t7M)7YG* zC)QBmO`;dq;1yd|dV_x!c5iYl{hwUEes7%RuM9M@?rRxXAyq!BaZPvy$c=Chui2K=M9Scv#kG?m)K~854O;rCGcYO%K`wjra~kEA<*sk;M$+`&k2Hm~xG( zre$S;O9EZfbs?e5Nzidnz+iBPM%sxZ0Hcs8S#|K2r#|Y|PoSuHPRV3o{^vI@$BP%F zu*hDy)b`AL&72qAnQ9qaJ0iC6FJ?ZBMIo_t{pX@uj!?H@pZWJGc%9%WXyB-ODq1?m zAXGVp(1==~5SKsxycQuvSLzpIiOESn${=5m1|~-3q{5;X;%BtT-7regRVzja>g`%z&Ov?4T;dnHoj9Byy!i z$)ZBlr&bBzvJr*N1Wy6!;pD&4gD5#)=W9T`q6&=0%M??5@~ooV^jqMif=bb!SShqk z^fA=4rS2e@7<2C-=_|qmbxTr}q2UCofhAT0&)?Db#cXsUUNM7%0b3tMD*=LtSblq) zzlT)lKn=mD{|X^VC?ggqOB~%ho;)F!gE?k1Wzkhd7p*SWDTRpM#tTgv(V|pFrn*-= zH)>YXNVX-E@jaT%yT$~Df(>MIaY>*ugwH({f8_Vg+H*a3w`6ROXG7StM!wwD4Uo~~@QoMu;BONe4!x^P}_%();&OP`Seqo@q zE5VV>nXCvOy&u^hrVlPHfscKp6ijdd=yl9!`F%L_xqVDlZ{oA{;Z*pBgzVn0!8c#@ z98DS_c$*aJO#fMOM_QT(^A(p!Nq2ao+VceN zxGa7dwT=4xtk7a$;sr=n(h;Oe9=T-M?Npd7HHOxzt1&t?8I@v^=Y)7=>_`F?D-}B< zUHH)oKo)?BKq`wlFI_c(ZBR5+GXuz*Ql)gS1%Igs338xq6_*Y|AX3 zT(F(YkCm9v&9s5)2h6C#uXbT56{>NGLdA^v<$kfL`1YeRywRB57AMlhX>ZuY5%z>rG`9~NKt8c+0WvSq7*c#LD?@#!SA(kW<~#7{Bz$GT-{jJt!9=&ee$NsNk?`49 zbL$sSp{vDvg@;dwig7nuq?V&wXVLZQ1_RdV+I^j&RzFzxreqk+k?v>cUtaSY0=nxH z8Ay|FRKHjNI(Ut0lGF#6U#jk@bMGCTJN0b!Jtokn+V=tIg2qO99S6DytWZqOJR(_> z&;F4waK6A{=8Wy(!b5ZDj1ehQnJOd?shdYQtDyJy$<$u&5YoJ4y zWYXl&cd$7w|IA$qFw&$Z5F;ap^5l22W@3Ud$JRbu23wo<7tgi2TFQ%2REF8RO9)kl z0jK;qJ~<<#G70dj>AZ=`SFuXMLP@9>(AH8Qt6@2DnaTt0E&}GJ+exSCuMy1ueNAAS z^Kw>7kCGs!Ec|xJ;$mVx18p4L2c2((a^-}*S5Kn>Id#hkiL*1gEko((HD~?tIS^_H zD;vzD)h|x!hB^6kgd#h?zta?Rq3`!M_bW}-ll{G%;&_F30FSMKC>93Yb~6fIOM@G7 z#XZR{E}YqQWab1}g5c79``_HUNhIkB`x20;hOoCJ)Me$kh^DC`2z0cQ<#>HC0m6c6 zTRC>}1Sb5bb88Hc7XLY$WOWwFWXY_RjA!D(cfP>nJDoTfbLCqCdP$XPxP=QqDE*z9 zDXU5BqK$`dhzU6`HZMiSXTCC5-)4-EY9C!Ex+z7nkC#<@;X5>}vLF;wO6-mhasJ6M zN%#oO@fNgiuLLF`>040nI3nO%xBzm*|r31Vso(|LpSDcda2y1n$)bM(d(K zF!H_%;vEB!I*<#;WDaL0ss8&h)9Y-Wj|tBeJXcO6t%yzYSBCT%$BN(Nf};Ka zG3McXk((EK8cMr43&$7(`JJ<1s&=y+tmT6sI7JgVFC!W~n$nFz7&FBxGPdOa z=zUPIjwccHIuX=UwfQ>d8EvAF5DTg~e_E)E-Nt~R`^eV(bMidQ0xxaCN4uEK0fR8F zHHP%DBo}le{h0OUWvM)K0qUxXRj=J&HV=z_ftlCfn&BkAEBHr*WXr_e&}*p#`|;hn z&4I(>Lw4`F!a+?WR!FQOUu2Vll~po}7#CxN3Z3Q>x=@-9ZVdTMX3))k1z#fUJ+-QtZ_9iq?9P3wG@K zY_%}x*y0O_{6T53A%yJ_*4ilgW=WzKcxYRpjnutrdd^)ubXy{{&IU2?b|GL@Xqq28 zP9Q7gl}LVU-{&qGk3}^PUJI^2FRn>$!Jl3ER7h`uLWJ2 zVPEz3;fSBjreW-sZEnDfoi7?)0Wyj^Vsw=K z8Exd1#+dkbvE`cCJj7z~^ttV1F80*XgViTL7iL>FR6+&AeK%LMnFf%b6YOp| z0~(8jAG$LBl4nmHo7F$JYXOf*#keNX=ESk)1{rWNYoXs!@K8x~K`1VceQZ7rc%)}6 z3l9A|g-SJ$KpsmoA;?^&D&U^wVytSA z510w^84|PH0(2Ep82gZ!SSbkW0KP(X4L(92pfO6+X~aZ#`zxGask}CVQ2+TMS}i(o z6-NW!3ib<^loU}#VoaDgp-iPuiVb8`jVT`H@%-y`{X zgtryB4z>z6O6{w0T+Rzs@Q3(kXCz2xYjUV9h13~zSF>G zZQp`ISxm@kx2&q;X~g{=ZefSd2KjPO_yqU2MKU6jR=2y0!<-3WCxHqG0bm+emCwD= zBr5@CpeDk@?i>AWTGk%ldz4(E%a7<*w8D_#Ak}vPLnjxs;+{wHh<37F)7oE7bAB@% zIUSB9*Pd~Lmtb8+Ra>uR?aL5Gu?pT1g(czss8o_u55bRDGhDaMBB@2wWzRkS=~VFsqP zj;uCwHkuhX(t?vtAAD%!&d4N9&3u{#`9D)KB}N7gUm;#+bPrU zAj=ZfKy%5dwbEpd_nSj=g70IMaPSMTJXKC(Y1<(;;{u2KwP@p6QaShv*;-1(i;$N7 zb#|iSpOszz-wGsS)Hm@EWyEyG2Nv}F#PWnJdjs?~^m91Q)kvic@4<#e5d$HJ+v$g1 zE!kk=Vmr*g5;hh+h=1>>YKg_d1pPEW;6Po7v;ZGw$0-nSZ-o*JHP|C?8XyD8cR70Q7aei_Ljo%YTjRlegkxFjo13&jb7g)Ir=G{ z;NeH~6|WmJC2Fb07rZ9MbE%>VU`)=1otX#Hu|)Lp{0-)-(t0TF+g8lBjNZGbVF0f^db6Ewc!k|hByM((eD<&oE9Z48lku-kOfH174EuDTz~Bt2)0~; zw}YbT)nQmHKro0JwapAc3uxWz4}&Ek3}<0qJac>Go!M?b-e4RLUoIC;ip(74i~M}R zCx_A}JEHXnFHX`3_x}}6D;ttxPb%3rZG96$1IF}7Ib)WE@=>|GdedTj%Xzw|n1N!{ zS-Nz43=Z$~DnqZ%Sy4emz$1mBB(eF|;%C(|)H%9IgU6IFzjoz@MZkx8FbHAKl|81< z%j{N&Vb4TMIRBi{kor#I&2AQ1@Qx1F5>_`YO>!*c3Gsz5zGQ*48qMsbz|FUmLtwMe zG~T^YvTrT_IxAaTWSA3#$Jq5o-(=q#gEMnkAv{qH95uG(2Z={|EAA`jya&4suXpk7 zDuTx2ax8Pz*XM)ZS;xcie!9u1BPLDN5tPGjY>*y2E3i*wEZsVP<0jCtW`?Xt?%9&s ziBI$n7vj&nRGHv4cr@qAedDym6HB)^>3<{{8zz9!WY|II_|<%;{h0h-QcL zyFw6yrow70Phv}}M~h1l_W{ZV(f+=UWd}B7dk!)-A^|;P-7;>m0S!!vVP$U>3cW?^ zWOxU6m@pLemgNwRR31)-irM%C+pWm%@loK$K$kq}W4c297zR^MCPs>D$_5{7V0hvha1c!3)&Y(J7gHUIgEdwZhjJ4K-jg)=wPK==&2{f;m51 zrCxRUBg!U>Wf7Z6mMjnrgpx4SbT10N@iPK0xF2z_v}rKban=*#J)V19gLy?=Y;3Z7 ztc;g0>?gksH;bl?V-ZGfC!xzov)~$#Njrzm%^lFM z)OP+OSzm&SPu0=vV;~ojz|680s3dDhB{iAkV_)7So=|6y>MT%oKAy{X=a4E!`U z@I^||VX4P%bp1Ptc??~TfHf}bEa(B-PNvlRSWo)l0iVnK9Bsa?H8a+nv`2f(5;bP^ z?9pR7hn01I8~Z(&o%ku(lYLfW9cH|g6T~p940tvP8IL|82t*8mQ+5P#0!irVh25HO z@?G6&+bLy?A)H+BJ!AfR-O$*4Z_1CRKOW|4hDE+;BO+M+&IxRZJ9$XH-ggj+ucWZk zE9~?+I}yQ@zDgJWdSte7BHsEa^)~v>%EqydtV)5mvdntjw}@EyErHD+`$}6E<2)@$ zqd1s_TerC3TzHApuy$ycbB{Ea(WzTWPJ^7cjT6KnUg=m;^7j;Q0fR^WP26nldt~TC zb>F6!msJB|J58|Ru?V!W!+<`QVcFrve8=5nzCGw({(z_a@Q6#Om2u};=dW_%Zax(W z{>+KJ0{B@w%2Wedo-@+-z8LZM7%j$~W)b?6MS+5Ev&1o#n~s2a)_iVzZ&8R&rJ&=) zT%I&_v=io%oi4b)uXHcOE>~;a14bD`&6Xvk%{_IYqH@5D^U1J98NxrmBkOKSQ z3(tKK=)>xkJ6d)sE{@ECuf{!Q^mZmDoR_@3jhjBF6Nxbwv(S)y{Hi%#fm9XV)b}N% zDV>W<0ReVGY%VJ1lRVm0G;1I<+nFH2HF4j)5-fk&uadytGeRPThbmhpER;G4Y?P+; zeRPPEv1PJC+f!Oz3BTbb^$7d2y{eFn;&OVI^daaf*7x3kkXObse^NOrYXLd9)kP3m z-Sd$vJ)xdO`+?{5*0q3v_X9V#IPyb{466hV5w`c)_wevbM_4z6_6~G9g6du)&p$Q9np!|SIf_rCd!OnmqM)i70bR~falHn`H z;nSj0Pg>moYgo?nSZ1SZe3(sl!Bc-LCy^tdq-4M^LKs2^X|+(3HUU zRqyT~Uz%mD(iJ9?hh(%I+>90YT;ofnVjufdbvg^TpVAm4Lj<2TChx=#L*bFs}KLICDek1v*2==#M)l6e!tERB!YYBU^B$%`9FkF=!<# zmxy9-_{XeMs8x_fPj3RL5f8uJ#8k0-{3Z*eKo7bijasKLC)$-=d%yKIch#)~-)%Qo z{$lm12=(i+&fQCdTd&<(CZ_ysm*OwHc@uw(Pn#o3=%2!YF+1R90eY&d!c6T~+b zvyI{bI>?u5()<=|2+x6s#?)X!m*0Hjg@$v9kW_=^5H%LR;jYSH!?0MjqbS7O#;_mr zog3t5n^=M-!<42QVT|FGS@7a>&TW9!tMnom+}XT_Pjbts@9<>dXvv8gU_l!z!yLJY zLBoCdKn2Yz#A?iJT=5Y6Ep;ddZf@;55 zsoMK<>k&J)bu4ikZ;Les-*fUEc(3M5?IDwS8}P2!P)o%K<+1~}7Do4eXiW*fK{5x! zgy%%O9D*+#?w1UIqeBSudnqFb2jP#z1)MIWANC4wNx;#v6ZCOU z6YNT(SDAl#f+@c7FM>J@B&om#c%^Q|u{$S%%eA*QE&0zQ#a?l}2THt2Npa8XCeOGm zXRBi=sReo{ZbW-Qu8+00HcPX^dqUWitK%R8Ds;K`3w$1==HCRxmF_EgX+-w^Ujr$4 zgqTI^3w*Qed8NSsDk-IHEsBELN{qsi!=HWui2%u5PH6Zy;rFNd9hn0rdR&k}Jt2(1 z(7S{1GROI%1}`bDm}}WQZ+{qZf49mrX>K65%kG6`zfkeTZW88frJSmNx1m=uVErw1 z6%T6TIwSt4dU{_N2uFAnsFG(ZawOKceaMiMymydKxF?qde}7Jjrmoe}tf@YUq_h`W#Xm4qCJ!i8eP$0~cU50n<9<*PFUe4wN?$l6V^ zJlD^NY`ooFD&6^{3UCiA`L9rimNG$xb&3B_puLWv#t5B<1nAIsF2i?f>=A9|^X-<) z6+U;97H6^ud5_uI66NJoI68E(5QYNwv0VPDr_BO+JDAw);}pi=j{;Jn}zl6i`jiA@sImIX^LJ`MU1p)lm5x_9vj=1s*m`XefUSIJkv_xdL z6Kp?xUP(dR9(OeshztK&_57WOTyN^(TxS-rt~$ntCN{%!-qb=%g*~=>f(rka8@fuW z2mSalx~!;{Vm7JZGaBS266CP4NI6&i+c?)HqR6s;RW+VAVB+}m)xz)y%dPDMK0XQxKr=_qV>?z@O5ip~aCJob-mofsLFEi5=Hd@{1X zlp`6?F7*81e(s0_A}z$I*IjjH;$2p?%KR|lKJ9Ep@DhJ)F&vL*X#LAs#|H=QOSmGy zGoa|k^4;>W-DAcc__o`1?V(<&dj|nUZlG- z;aP?ZCStHe$;O(0KcF}M(@c{3iNFd_5~9F-9-2+~{=TUwtDwNMpmqf@|Kpy@4W&cE z4qff2vEDA`eMzOkVTlL*+X--nRaLhB1mw6W@ASKAT6*NiY8^tkwxN$cZ zB}BDkgo#2ELP?BS^v)8QLafznv1kYL_f#+qef;QgBDEu^5zFA~E9IOJu4rULoJ`-E z`ju#@T`ixjY<Qi1%i(ho(gVeO zRdf2tFjs9dknPI?_;RQe#CiPai;+?I+d{QUFrKhXB>gui7rB>>>|*2ypDH9RQTuf@ z5fZ!Er@_XJ4SGX!7rmj3&C$veCozsz3r~}PJf2x-Oe=&hvlRkoh5S8xiL5U3#g^=~ zH(oC@)!Dm|%l0KQ-6>&5)8p!+oUw%!f%7MDpVI%VyP8>cD4|gB&Y1 zhIAT8AYpO+_YX)`i)bT@${(yTA)=Po^{*s;&HZc|EP3?&Qepc~Ym4}E?;A{8S)~rR zILL?z6K0*IQ2b6J^}T%JAC@^P9AT*oOh_>Euj)IaW07wy6wg&I+enY7 zBmRzIAj<8|^1~Z81MUrC=|atKxzjz46%f!_!|%ut*2I*URxa|TQYNUb^7`wDb6M8?$m$MDQ*uDB9uSaY`Z74-=A> z!@}w`HWZD(uQlK@P88dg`YASzC#ru-;l8R~32(NsiXK1|(3ztmuZw&cDaG@l%cq ze=4NZRK#=ursI1or$w|vckOcG+wADYi3n`C76 zg5!D#_Q;t5D8w++M4C`@9k5TE ziHQ~xJlmISCu(ZA?9>JYL>;n+t*nnIhDF(4qf#6Z!@4ybKXl7Zc8v=+=}>3gki7ft ztJtC=uBF%in4o`@;Pe|0{#sq`y#}(8>7r#=kDLjc*5W*@E9V#IRjcy?7X#A3xSYxL!RrWu);@8`yqtljJf-I!0x z+$*%;Z8Ec7?=ac-Xoi+l?#ZEyqzVfoNH?gnw3~XIkYISvut~%8J zlvYR*(C>cl4e4%`mvV^zK0mm^+@v*^od$=yRD(qQlLq<;kJp-jPn)lXby{Edy~f;` z!~o9Xkqyl9Ll60fe?l+7aGMe4`g+d!634T*w3R1dxY(qWdU@|Gb8V%}bDD)#bqhXy zsOQ#OtHKQx(qWRv2-qk3KlC6?`93+7>OGLwtn`9)pd`oPLen^4@*-UQT%e(Mk#yVo zt>vC~=Ax7iF4eKcWUMDwvEX@F+Wu-)f3RYV= zsAVh`exd<}w%y|EkN?mF)yI2{l-l|*=%sz`A%e&1In$=+oDiVqfy)=CGnFLWeRISw zM8zh8x6b`5*7^`i!xR*wqfvGqRUJqXKWsBpKFgX@_fF#8Rq-LsQZy&3ETBKrR7ly* z#a2OJ80>_*E?Gt)JrF2uNMi*fyzbL9BJaTIZrXWKgD#K4()H>y-yhGiIiF(4(u0z& zt-$U+MCT%I?5^aAl=mn1k!r6785MCkyfE=ghIhfALe2z z%ZCgfQb*Qzn-8T%M|ecrk=%8z&ReCxi@288zNVM^AR!o`v(#FjnbD9|?4!4g|1rr` z`%+zO8i%aDAO!0N_tbF7a2v~sUC%eAVz5AcjV>5!^sRI)84n^3B9tmv1^W8`L5JCq z5Vek(mQq+&71JTo^o7!}lf{`djQW7>u4r59?1!dQ+hc?zr7|Vznl9L9(;*vi3rB{O zx1mA2|E+oq@aCI@40)kNJ=Y#0hSMF^G^ikiSdiAE#9a&1eq9Ul-s=aSf_3hBRd7B$ z+ZW`K^j6|a$iyB<937&(L4Nig3o%tImzU;T9=2$I?_pul27>cjcMclXJXK^7a^+^A5KG8og{8A_jRq- zLe0-;t)hp}|G8h~>;9Vni~m_MZlfCexXWYS`Db=&dVl%|*`b!=vy0Z*#4o0YP-V@iH0C+t>z@>lBp^8Fbmj zLHrOx0;P>~d82(&G7-dg5XY|{6Sn(^+=Sm($y(v5gCaI=KtsGU#5M8dt*AL;;+edL z^=Et9-s6R8knJHV@8^o>K^KYy_KRkfLstYbOu5B1HInQ!Xw+u!*=wtNHs_(D?j}NM zY^%$C<#8m)3b{EzarRA@&MjPi`}Yx|j~I1$g^vVpW~iGpQ3HwFt5`e9zPtU&!Y}P) zz#e|$S%Mown+D0zTMVXS3o;T+JK}1KF=#fB3Qo7o`iC&G< z`ekr@0G0|0@*h!c3BoBf8Qm}S1@KXFUZVfia!Br-tVRE2M9W&wUH_U( zld14w3=_iUaXZ!ZpF>spS%@jr^X+rBElRU>@63FzlyfshwRX`VVX4bm5b0_0ABybY z`!{~>Sx)-n^()4*)M0Ho}D{w~(f>(#3_Zjxzs8N5CD&2Zc_K$|8iYR=J5U?Ok=FF`V ztI5wsi=G&H#hnEwFX&->IbtH^S9Dj!+u+O}vZ@QQy8Lw6qOyZRsU=JH-UXTZ?H7{e z1Rwl*?sQZJ6gZU|5{wt=aPMc7MSE=uF({~94O|hjO1!@(fC?(NM5#VpUTFCiHX}&6 zb@0aq7PTed8&cw+_ThCX@meus2%X|YR4*@qy+6)Qc$cBdVjwceU|KD(b0k{NEug~I zOWxA;#a?JmJBxaGw+Z`fB>d4Hhf&S}YeqFNE4wOmuu}IKyOzfGlAKKZ(xp8EJbUy= z=Cb?=7jE9ERp@z$y`}dF)i(Po0^bl{t6Ge`DSObwSCQ$`>M_3CVuf-KD-ZoH?+H*x zAR*3k$^1xYE89aJIqZbgin7rDcs`@q*qZaVTFycouXdB zne~Ie#)4MMw}UF-F{a(O8y4*%EqRt~-=)083STDZ1qyN3OR6~UdCj9Cuw=0iY%@p@ zIkCjaV|l#qMza6LUNMzsurZEt6}zU*O*PoZ7uU;LYBc=hKQMt9D=bR;!B7ze7f6s3 z*OV{kl_-i%t(-;{tf8w_B}fp4QcOgxv!pdTqyz`fn}HucnAq!LZQN7S)AE{}G?6-# zZkco(>zfLrQyI-yKU{0Uv|Rl9<$SjKKWW279`x>`kA3^9OO!wPoBJ$A4vc2s)O>jK zs_mrek>k_MoUh?-d8zI+FL&4X29dsC6#_n_uW=YW8#Q9V&>Y zHnGDy%G3UsgfOx0>J!s-E;px(TXyWptL*m6S?(~PBHk%p52+Z$?{!(0Lgl5AY;by)DQA!&DtX9N zda^eEBth$Vmu8&H7fqun>;6c1mVZcaR{-{ic&~to2*nOk3^_rTXBc z{MTgaU+MLKh?{FWQq9|ZMwX=+-wFdzWjgZ9nvn(Zvh(kyeQ~h`u_)ahkMsYFv$uY0 z!h8S6=`Lx65lTo)#|SBDkdThSNa-5g2ukK{-F z#fWJw*6V+B<`EcpOcp||tDae19H!m=b|m z7;Uf>|MS&x(&OjcSwqWmNvp(xQpD$;kpbXsh<6!r$RxgXoe}8>wS;Wp8&n{J| z$o}DF=h=XuUM5%J^K*8+4|k$AL3&ceK|uSkDTQ2W`3mi!QF+}jE?jCCx$2afF>fv- zKkEHXpON_p-P06yeIe2R7Z2UPu~Z8;_T@-wBbmgdh7D*`{ekb|($dkHsEk*zD zZ6#Uu(j1wxxg$`j+t`tMD5rW@k3vUk7;8=@q0iK4|9A=zON(pPAF;PM^EA7gad_-B z^f`xoi6H$r@SSUUZRg)q5{AXd;OQl{SfHh)z+3!S^=D+=i%t}Ne3VJ!&ABbY4i-Mv zHjT*v9#xasqeteBY7-qA=iQ2&k;zts&Cl(lB@Jf@pb>JMNq!#|@a&0)b`m7q(s?Tv zHK{L5_Rik44*1zzfABq66OUHL4v0zR6+HWlO~htSDqJ|l!& zsO|LdFnIrxMCh!S2zw7tr_IWQJ$)*WnqH|;Vf-}*1ogn_Vkh|WwOz@*C!p6t^s1TN zbiEM}KZX}F@Mmb`CXNe(1Osx7jh%k6iK}qHL$3MkzipNT?PYQUd9N%YEvXR4zO#}1 zSBi@bSY4D+mW%Jd8)V9}cpFE20B4M2qt%KxuQch0Ebkw#&-NGBynod&gRY(-+}kCP zr&4-7W^s+|TY=tZ2#V$VMqCty6b5?0`ey|P0jVVR3ke0-kvs7?-0k`H_fj?tV0Yu? zCT3#6^o#KZS-z}+2@v$v-~0UGkzKkL!f}DSP4*=z7I$f^r2d@zSh-#v&V~I)`LL$>=QW z0G?TNsVXbqB7AkQr}tMhXp%s5?_OEq!Ja$O&_Tv_G|9&*625R?x=di8R|=CYxGg!I zjyadZve2e{K{ax%I^r8CEpXS^Y?{qw_ZX@|g^AOdzui3$J{+CZxK&z;5zX4mk0*_z zVMwbs)vTeO9dm)DBpm~*e{%gr3!BO-1o=YsX@-7)Fy@On8Xg(}dEKCR49Ko(E!OO` zeKZE%W)mTV>-*I5sJ=j_&&IE)!m%_gKtOS(kMNV!cdwAi5(E(Jflu)jW%w%@lvori z1z1O8AturnMixUHCy5MxRsyD8Z(ycwkYw=o-?O8gRVwa{lt#VROE;h$!UMwD9+nr+ZOr(E6Y3#yWrTx~4Et zIsuV|oGjgRKaXO)?PJ$(qIDgLZiahs1k|vi$sja4;FBm%#00M7!|aURZ>z12P3)4lUqBU0fC-SK3q7Brk0#osOLtEuI8U=pAenRv=kOG)WmNS%6lfca@B zGwmk-_nB=V<(IgJVDzONv9`H|0ld$bCw!E}=<;)MRQHEsqTVD*mQ;a75oDH)dE#`m zoig0Ks8pA1=G4JCr-KPN6q$oAnf2n>7>T}VjSm@nqX+Zhskt<&hc(&1%vuaXS9nvJu|bboLodhxsB4bwU4n>co zkiKJIe(g+mlVhV6`)wT$<0}@Qpkmz)Ps959g}+_F7TMVPH5d`~6I3V=yvcV^nzl>e z)tW*_KpFsbiH))fNd#4SyjgbomS6yKY($pk*EUxx)tiyP@@-+r3ilvyXT{3w{%1Hj z-tSz)opIQrCH%9;beVGmFO;k0_`6=mu|xz{l^Y9;* zS#0bER?BBzlca3FCh8Fdef607t!AjsZQ-cGxIuT8zT=t4XUIq+O&hqm$YTUwfgu>T zcqXzvaVX$ZNuNF?$>rE3HjVd`!TGl2)7^;5l~fk6GpxxJHSMYD0@e>|*F5VY^&VET z!CsdYjS`(Wna76+trkmJFynU*BYzwg_Z)TI2pW%q1^=YiU4;)_%s$iy{gJ49Q}wJ* zG4sW@G=5zR&DTyF%92el=37|#AVna2Q$KQCWR?Os>A{nLA+qQ}_dYfTqeb@unLZ&% za0)i+vYc0(tZ|8bq&dFa(tGslb77CNWn0xyDO^~vANCOP2}<;4iztu9l*!uJhfjkb zqa16VWju7&_G)=WV%)w71@hkyG4aL)ns>~LWm+fQHk(|niQWl8>e=ts<(09*abS#9 zwL2lXU1H<#uNx#`t}GTV1yn<%adgR@yRlyPfyu<4VIjui$ztbqoV`Os85-nT;XFJ; zn5eKXe2`WlNS}Dx*_zXLQo%5Z(1rc-jU{nJV#4cK&{DDxq%}9FAeTWV#f}eT9OYia zJ1rKJOM_CrbYNH$UcEwsNhVc`}c_5W|JlZt#l=&uAAub9k{XoXzMz%&wa3PUDR>cst>IG z+S4||o`%GRGDjizh39jIFg8vzRFrrEpv;@o!JmUaBB&5_D1fwVJt-mK#oxc4`}*-6R2Dv0$4L3AjCNWNi? zQYjLiIgm3jq6J2*?Hwcepg|FZc`{y)@1!D4;X%%l_V&j9lzx4OsS)H5Bf@Jse08jF zYM3UJG#+IZZO8$hcoq7*y2J9LyPMv|joP4kf6AY6a>_Sgl2Qq6Z3v84kcEq-hXvHO zv`V8HI(|gK{YAL<-{%k?l_A%A_f#;zcmVO+u@F-qZp60vHKJ?JA7%a~|{yU7YWN$mG>2DR>jf`H228o73@ z#U8jOQ$OCj@dHYIdCXsvt1-R{F6%7l-8fEpuD>2uj0*f270*@aN`$eU@cld$$j z+I^m1`ICUx#^Pg#xvF+A9JGBtL`=?vuOFl}cDze(L^|7$=$*feZutjNB+@Z0;t{T$%rCYL3Aj&b2MCXkT4lW0;CwT1@ExN3+GK8 z2K9S|P;5rA?xEcy1!o&ixi(qkrhb)dZdN)!TlBQ_NpK5KV~u z>#URnYwbm`Jl65|Rt`}ak#ff!hyHsguJ%J|fl<_ao}6~F=7g&Ez8@)3Wq$NE(L|-` z4=f+!U*D@hHzh)kpT-A8?%Xn%Hv8$DZsqo0Zw`4nF7w`T^URu~+E<5)x_8b_exSqD z(ZBpG;jW;LR+FvGYG2amY2WJ&2LcvcdTV1iRdy>G!8}FsRT3Ta@^Y@Iy@-VBL4Qgr zwDb`Zn!O<%Vk%hq5a|gS?V=n1LE2@%l=2&lq76M;S@f==YCRyvKK;p-!p`~B98gTf zY*VuGR3iyL3Hy+8@B@efQ2MEwm$RJF>=SD&{G*w@UztQE?a1<&tz$vCfb6HiLxaRm<4D061Ema_!*&+mD7gev7GZeSdfjN2*6U8h+PeW42J) zI5tvmL1aPlR;JuAB#k7%#xZV6?rN)w(bH#hR6zup@3-1uzYOD4+;7B{?{;i@OXR*#FViiKu>LxWsn3CIvw$bv`xel@*d=7|VW~KBi8f)>Iw!RL3d*kY&15s5}d5W9O3{a0B6w zbM(36^Fd(kPWJEFbFIRd9Hue<#2c1gN4tECdf7RQloy9-@v!$#pDjtH|Ft71$BMTI zUbZmp@@hjppdkru2p$OkxK;Xt@H+N4`r?vt_(vmEAqDCfU_RJPJ<9UQvmAGtLrNKP z*;V3~&q7}HwvcVi-z14V!LUrN97ebgk1!-Y4wTKkj?=_BqNi=wfd&8AGz`?T*BC5! z4~PUzyt}UcrD+>%Z`VlnP8=4}u9DcipSAlc?W?$NS~H#$at77hV5RN3cz(y!`5^JQ zZ;*%VfeJ>hvcLQXiW_>nK($D+NOzGhjetM3R@J?!viOm*#}jhdz+z3f%P)XFFNc{! z`{84}7?7(yJjmU9Zm@R6>c76fCajDWWewYZ8ZDuc>p( z%|GXmima=A-P*DAGQ_#wMnoJ)Y$q7A%rQ;5l_96N^p>VbcUU%2mJlG zbHT?OyK5?Jg>k6FT%!SH`j8S~xgVmhK{k~6cC^`nsYYj5@;hEJt$y~aiRZ>q-|;qqdw#1x%UKlFc1J!*pTI^i1hn zaV1LvT!uo5wT=CGO@_IshqHA?;q&fL!hruS8?q!`zs38gjfT0rQbGjo9+MT%o1unV zT2)s09zprS2hqZYN?``ez#Zl5lkUf3H#nHciWcZfZU}4A#!3>6JDDiV;?JVa&aLOx zJ18Q2InCH?$lmi;wvP<#5l;@$7~Rkj-CxeFIE!s-M83r`nWb?Cbx5AW!OuElU1@>7 ze%Vh@AhxEC%@b2@vIRw)cETjEzncV}huGkdI-eWkXMa)VV0ph;3M`XTZ`oO{H16Ax2bvH@j-W+8zc($^?bS zh;jlnNsVQ19ew44Fclj$t{(_AM*)UBy>nSsE=>$`J!O}WO_?Mqu`8}SM3C3Kwmigg zhaV2(TU;4NI@H|PnKm?B0_`K)=VlQeBpr7Qag|y1^Vat~3 zhwP2=P4sw@ghG2jm#?yH)hEF{u-8iCExBfUr;4-ZjpD9?O;59xjD~bQ3-(-LE(#5k zO;BXbeP**ZxLwM6CYwq`b+lc~5tcxQqaTQ5Fmepyhp=_rr!w@@v%4e}6W$vVq2wN- z2Tt(sf;%$ix-{&@v8gKHPq$J( z0o}DGTG81;E$elRy62im-yx*w*T6(+8>EJ9K-8 zn^Ihv9Q3%P>(TvE@?kD8rxO`Cyfft292}Kde7|sQKlK0S)9*e{7LiQb)qAs$vIDog z-fl1H~tXa%`UWXa=2k`=i96R)XWkLtv%)h{@@zTUSyzzeJ}c@xeiCNu~gid zUiS(1^0E!ynuA)Ddb6DGnEE*q$}xaBR$4Zm)@wP1OX7pM_SO8f=L6z~_ABNuf%FD3 ze)eS{zvTB8n}qi){TyHewa7X11pD!ZA&o8gGh*b9=2pD^0kg_c&d(>5ioyAxJS~%O z^7x6>z>3NWrjgtD3Q?THXLOCc*`TT}1q)*kU3NX;<*C7U+ljX@q?ZvY9Esl|c<1X5 zs^Wu)3#g8vt zP9%U8JqB?w8ECtW1>xkil@89VAn5XpkvtfEBcT5lO@QJ`0Mecq*196-tODsvwbo?e_NNoxl!rmLfqUWgWH$m;-jC^Ceo*C_f!I}K=M;dWQkDd zJkFx1zYB# zI7ychMCzPr3^af3|Bc0Db2mewYg`mepokRp11_VkNk-eXZyHhP#$uky5h+HNHAt2X z%r_XYtyA!PVzi2IX-N+!VNuJ5`&_^8ZrpAH2zzr}a>Oo3t;}$-bT0m&&X%ys;dbMv zm9;fd9vL_x%}-D$G1gk)nR{LE8tBmdO`j3`0-TBmv`MS<{*w{g1 z9q{iJ?k`Lk!ayDpvP`<4e3P4l?T1$9TeoML2f9u8|Lr_q`1v`@X!#1-G8x>G8s(X? z3OS|CeOWe&0qHgw;AK5Z(aMBg_1O+D)8bVj8#Df`0&q7xjZbbJlFpz6O*G9Q7DM3(rWZsDqJ6^(CEH>5l zLU%_5pokQ|om`73EZ{Xjh|pd3c`2ZUAZAE^k3!;Qvtj?1s#=j*@x*75`>6hL%r1hV z6dOT(fXB1KyGuz#y+V94qOp|M;EU~Kd$+p7+RU>J{dos%vuhZ#Fi_+fFkD>gDJf_e zPh5{8b0{HB{mSXMqQc)RVr~Ak#e1f|T;FiI4i`v6wevxYgJaYW^FqCk=y~IxqEh`P z%tgs;bGkQ?mXW^NUqnC6?u9P+VE|Dw@cSYr#H72Lp(R2=08twla=@f#We-1(0(>e( zBnOw9G1(-p6+#jKv8)CCl2cHf0q;14h{>hP2*X^X+pij8J)$2~el=sfKLdiGnJ~8w zs!x-qWm3^=5wm{-Q@8qI&EdZe`5QxS7))jgX{!Zee7>#357@T0*ngP61bzKtB*khR z0f=;;^AY(3xV`k^DduWdJ?rI`WN)1jFj}1JH z$`TFU(l2RL@tR(W{Rae%!@u^qH->Ci$)~rNd&7_lf)ivQx`Q--i`KV8Xqy#*Spn(O z=)vO&^hWs!!nT%V@O<+5Ag(6;-D|>ZRvcTlT%c)&oMQ9E$|1BnTm%z_p3>=S7x85& z+O5k>$b5+ef>G9*L){zrnuyG{L8K5&^Li)$)X`1&=pI11;qVvd6CX5%&9%iNR_L-; zKQHBVGcCGZqopV%$dP%f65;M$S8Ha)p@NqHXxafxb~fW`T%Pzjq#nnv(tW zTnnB;pxp+aRXtnx9ns-1^&pHIBZQF%_3%CC-@^*iaP ze0AtK_gVQTU61>4vRk=T_G$5xT+%h53lP%C0I9_8r1juI2UFpe*HQZ{Vk$O;E6XCv zhcM>`ZjxbS3)a54TuQx3{$TRem=HuZk0@P0W}B9HgcOFHC`Mlce=hq3l)N7{-fMuS z`|3$(f%cKF)R_n(3*X{N;}z)^4`D?2|lG5Z*f0W@+?((JdDaGo_a?U^To@L3RW zQYs1a*yBR11)+y~)0PC+F4#>LH1B6c>~n$u&heR-X6LPeM5xxEF6+G6=J6borHIXW zVRS!tvn$ut0JW$$&<{Cm>!e>0$t6x+G2e>3Jz(a-okntjqmW$jPX@q)9Dmw4X1O5G zs5{?ri|@3g;P$E@ud68l=6QRJ?%TE>k8O{GJtD7@XmDZY$~cS4(^Z_GeVtasa5i_; zUJ8Um7HCbSu|bY7q>yaXTJj~46k}5)UmGb=9`5p>M|4xb3qs**$)3jA}`!Q+^YGAL50=(rdG=^iAv!5Wgx! z_rC$72q5FxEE$S5gJfsQ61i*)S?}Z`e5Tssx93Hi+DBzN5inY`!?7%QbEGz%M5bHV z9=;M(EP>?k!R&H)q4Xt>{QCsA;BdiU0&6g^=_454gs%^RqKWO(YP0LE1t10l-~fx} z@Zd)r%ePKskx?N}X;0Xw+%aOx+t&A4u_t49AhtzRDlY`Dia8am;$zS&LB&$ht7luItpVXqqsc>v5HRf3fAw(LK+8Jgdsz1RF!?2cDOFo1ZX_zz7| zQ+es9_vm`~|IQRhV!CvUyNR&@cB5B%O$@!<`xu-woI9Da&c$W?+C%nQYzZ)G7>_w@ zkJH71ff5h&9aOwWirM?jsA;*kdk2r%N>I2`BL~mz#~-{JZT5U`w+W*F#x$jaI_Cn$ zApB`FJk5pK3iM62$pNoEJ6%H46N%6wokX`;Q;q4Ks}ju`KOR0klaM6>h>E56%-Mi& za8MmsTY}(utUbeK&IEL}6|>ao`o-u`nqDA4AH#d$Jy^Ad1*w1i#ALTfLAGF^hCL<*dK>@D9r z?qRu~2)}LV=DjQ<4>909|Nad@Eu~G(%IZB-4c$V7uK7$R4l~rh>!5NqY6w*$&_bGG z!ESG}n$aQy-mW&wO@k~>!Wsl0)g1J}bN-6p(wt1}sQKAjX$x;I-2i_7vY~`@YyQ#q zn2?WG1cBHY}4CaX5Ueh5mLe+Q+xu5hj|k=EzsPU58=Kpx!jIE;O58d z_%PlV`{^7BDWheBTsCaJe~_Q?bSznsADM!gH52ZfK75mgXMQ9^#nPz_mz7eo#H+lu z$r^)<*&45M^JX(=IwB>|E(Vbkos*m2bP3sdszln%jtvY17UV z_ob-6*Iq8M+p*(M?aW3xmWde()FC2G4UD*tzShMo;$-z9 z#HaEe7Bjay)%x!*yta0fJ$8I3Gz}C%3Na=Zi;N~NT}M5$*dkUd%zI!@oBGbiWv|SZ zEqWGEyMEojR%sU!ZjDv+A2#G=6z4B=etM|p>dPHu@KA3Z+e?&jI-F7*_0-tN!|T5j zH94Ie88_dZ61sPLhJ5Wi=pcr`Rs{dcolq$g>Ij?AVG#L20`W3IwH`BYdAJwvl0r;g zE;zuSqZcp8f|g&Z;!nH^&87+b++$mPq8qmDTe@N}Ckb0=`rK@htewMC$5HWvA%O_J z${vI9<-%Y*TFsVbRrJC9jQHjBio4Ht60C?8a)x+X@OOIXvd1Db9AM=t-gompjZQm+ z+%K?F8O8=zAc#o{j3q*DD&7@4Cm~vC@pjhE$;)p2XXKB(h{;S-p5sCP`pjXg^#$Dq z0zdhit`)n*n6!va+Up|^*%y9d6F)(!BWGAZIS&N4zx~KyU?dg`8zcl9$tD(!v_PLE znicy_bfwbxi5~*oRZ}I|;_+jlCm1fpXTyag+Izl8Acl0KFSBuL`liy3tH#OGC$FFF z$D_x`@>!YiD%9#$1=sQk4ii3{wCptA|8?R;?kI3Y=U9I_tQVNeEH-0*dMj%VC~`aj zDPrxxd{c+Pp`mTvC=;J!566P_nIji#c1P!!{)-glZ6qcQQ~h|tnR>Ci?EYBzU9z& zk`8!ed~`<+dP1FH#!TlpK=I2~9o?~XkVGCD@w7&$W`jfB*^sjM|0%E3$c0fXTnH-u zEm3M#-uc4`7BGx5X~ZRC)=Ej9PoDz`2o!oY73wFW?)geCmzlR0;vx?-cykrbRP8fR zD7F5K<+=^O;fhb79HY{&OlLukw@AgIv~$81@>;^if8VjIUFXJ<$4{cp zCFj$Ofi|Kix7tb@9nbb{0~uaPTqnAeGC?^v!i?CHFR z#(R!4#YgoWiVx>d`AknqcKNPbtLu5kWBs$6e{DTnDQISmdVk$Mdy)5$VU{!?#8MUG z%~B7zDv{X{4=CW@=qJGkS&>1I3Eq1$9%3#%9e?B1riDYVLu=B0mh;`*9lwivBd1o; zB&yKBW!132TE`0c3E|#~`izNmjxhKS5j{awPBojO2uLd5zsr3-ju3)87y9t>-Tw1& zoUZ5vhXjD*eR8oG7qM|+YV}ia)f3vY@&+zs>96uKH~lAvuLBA{VL(ix{~&g~1OIXn z11es1a{6C)*}w;~Z8tz~G>AZ-4cDkoCmwNGvn}#T}dI z;=f^UUoOA`njEDwgCuIU3%-W0lI6act{hrDn8nE+Ng$=#@pp94bnJ(%5iHkQM2kB40rKI41M9vsyF?n1vl zfh_ z+#_U|5q0uF+G1vPrilHf)ZLquX5@Agz@4UKIHWYuq4gzZo!as`5eKref48hD6_+7v)%&n?jY?xJ!Z4O z*sPZEM8YI}v+6%`6}4t7QD?t0G`FYWPhM+UU|Ft!9Sbt9p@o@Fa5;Xv&)cbg#D>rv z(iz@mG)CZ}VAlH&_X9g=ssh3#EIvM~V$kns)6S<)BLsf#Cc5p>GI&%d0ynFSNCFmA z?C+Kcfc3Z1Et8Ue9R`$NjzqC#JE?(mWNC}M7zcWk>C6Q_ctDdwYNTnWt?5J5k9O)s z4Ae>Fw{y(o!B=K(#Mra4c#3QZ2MLv#FZ5@)F+&gxfK4vo8AofINf11#a8!sG7a~=w zJ>tPa20ig+3_ne=Qn)+n!AbnTCrM6efn!Mnnk#ZtHtWnJd7*#(w!}>G`AJX~%IZws4RlBWIW$ zF5)v**J6oG(4(T1_cRGa{gaq?SoimyjZ9_D?!L*@cfdcnX&Vr3RSEcnZCm)%o9i{~ zaeP|o@x8;EUG$_>|8d#k(BAL)qC7{32m{{L4UUUmTGSV_PyenKC~NVN6+avA|B(GL z{~mi_Caoo1&%oP_4DA2yb-+S41?Z$uY-sj3c12U&B4o(VpH;`n6S;)S6D5#A}bNAQMwh6BQ4yrLv%&pPmVbj1Ye0>i3O5lH6fs2%y zRN?DEp1ynQhV|GkHY*DWyFRNr3ReGjx)wVQEiRW|vc%S0TMMf8X~y3gCo4)NfBdCD zQXqcOz{>uXI05>@f;Bf2Zi&#}&*!dohh;5KF8NIefHCAr`2~?x@jq;g2DiwAC#goWz2xDnh9Gjt3v5k?_gm;LuuBXy{j8|HH1; zcJptE3{~(lIfT1b4Gk*n>X)Ka2$Bo}-dh^|$_RpsL;_+E^}e{N*{UOsB0Td%35B?* zm+fKek)>xRPU$p-Gk9bkis}DDx5R{XYlpBu&6uX3kwz>d8=B?pPDp_m(dzmJm z3VES%{x#J%MUjq&H*s=9kLt4AGKmtM9d9bVtM%-DVW_+G#`iQ2A48ahK9=KKy@^55 zXYV(h%wH-M-R~;MA)Tsww>zBwkv~sFvNjv}4I4Bbys`>ui0I}u4K1}SeNs#vV&87u z{nOyP+ZreDdcv2vE`L*OrU_W!oRd8{gj^4m3+Apb1$20qG%Y9`W128(ZEYG_maB19vSmffGA z+WaMZwC26+poNd@CIv|lfvTSHuH)9hkg|6pwo)PANFW+y6_)x^jVf6CoD1>PSR{0y z@~iPFH5**c*X@4KHSe@sOgy?W|9VGTbrWNvE}58kdM&$%Fq=Cnp7*$27yR|+GUm@^ zv%dE{_Pl~OU>+ad46Sy69H*XeY}O)~hLCzyu5Z5OgXkOTO~zbjN2EJ!u9+G1mW#}k zCHSNMVnQpHL+lD_SHzS$gZ~EgA(%92>xc14W7+5d0vrR^=`Tjx1b+9Q(anGRbmFV! zH~6j?I9v8TZx0*w+MExf#!If5K@xgaLd`UkX*?jz;re-V=lyKJwd+Tyw>11G+_f3W z4V%MPzin(o{Ik`Fqg+05>E#tyVtUxX)%cZy4f#Sb4GAMWt($~$KIpQzxKx;X<9+(C z(m9$d_I$y9Woxn4Vr;!z9XGrlXSL34{kSIm8<ff3UGsj~4hHDW+3|0e1f~2}3$_>4NqMYr$;k(^H(tPiqFT&B-x*Qv7Bc4iZ&3B%HV5us#|?rQ$35wo{sFI> zk%6BR(cXxaqrK5VpE^)nB0)w+#M-wf7v5|7K&01FOr7fRs3uZliU2aA`(sldwo$8m zd8Odi8?_a4#5eRBB=p<}rw;|nfEeDUp!@Fp+}Wke)p$-CBwT zVxq9(GTyH*xuf1xT&d?wBBBeUIB{)Zv1Meq*SB}R2z#5goeR0anbC~gDDNBR#!xoN zN5id;RIR4)`2j+VE*I$UEF0PcK%YAhaZKY{u>_biM?~ z9WDw3OOSP0E=#{j$x_^s`8{RZxZ`*{%@h)679*F?${eVV0u48gSdcNiPy2Bf1TUHBP*+YfNlcG z(GZgJuzLu18pBew%Ias^ZZoxVDi^O&_PA26L}7L6pWc2U$Nq%z zH;7w%t+BGLSU6?ALGqZ?OA>qK2+~dLgI#-|opf!d1MDd4e2;iLttxx;nv)pGrX9BW z-uzT}hOT&7noLD_!9#)|JQOyb85KutG(`vYw*h>r=Mz{2izIyszHhZu|PU^!i)AHkvF=SE>i$=LRMYZvEy!>zdP{ z89Gz}XoqdRS&wdPDe=m3b=a%XOqIaB8Ti?cBInl$0B9dK?|$YH?QzDA0Seuf-hfTF zq4@dmDVuFnE&GSZSvJ+}FE*;5sBc^0wadv3aqcl)y`hn=DM@|PQuU-Sr#nFY2YhqzQE4PY>2ymAU3&F?IZ>VW&yfbV= zt*ws(j*H&!9MeSrzXm%?x#VNKImH6Tygx)wUQ~HXss9VE@+Rt9yU*mS?Tan}*#Pu= zTIQHeDNFo&IItBgs`x|9gluQ44b>F2wivy0Hn*+~4IPd{tFq2ic356SO2#EUtHEQi zRVD7a{5(ZA6Q*11$-EE^cdI7dp)d8w$PK3h7iZGZdtJV)=y!mwp1(~gP7~;`4T_jy z&o%B`>Q~{^@Gmr9*afNKD+qq=;%p~s4+lzHFb8>6JapK73O)Lpd)AJLqEGa^+98hU zO0oHFnKb-t2xatr!ZvP`oEDjcg{;H)ubz=Id=**Ph_o@rBc*?W(x5^9hT3QR2Km=k zqBBVs3=xH#{9_W4f8}6j5h1@ec^G2F$Sy7jGIDJL&lZOFMw)_$)JUXXzF~Yq;|%A$ z9CGm=&-{3Sm*b15FD-qwDEFVBG7^(OrHXbFcLJPIQCTdS*Qtv9$ID69aElAUYf6~r zfBKQ#HdB@GDXz8&_~9;NChTPTV(jZNp4Z)fcO&YCsy`aa)x|>zV zL_m5d`Y^LhPk}U9DUk2vRpG8M=X=NBv_P{%vnouK#tGpTPoP8g3w(aahKL0%E~e;? zttc)e$Ko{nCwz$rvT~I*I>lVTd7F~kmmKJcV%g~14T{Hv{By(tiof9ixCwtL)gcO! z)bs-d1&pfcvc9N_O-gTd(%Y>8fffV!oc!z1o{|Vd-f=^%WO&!_?4-t!WrhXM^Zb`9 z?_uZPE<=^Jp(z$+Eg1H(;d^%YC`D6Cc&^PN#j#v={!X%HDaEm{!;BH>DAVVjRl!R;<2<^8_E|R^+ZnqGfc(cYD-MI|q%C(N0DO?ndJ{S6 zsI_1yb1Kjn0|iTg9S6sJSz0LMT7SIu4fGpOGs!*oJN)37)EVO#K3a%xzxtKa6~9`K zEYrjkl}EJwaa}NNh_iOsww?H}1`Bu$uF>SVzzF0faj`7=ae3$}ocB&~=WTq(eQ@P8b4*^6;I4f%pCx%-bX z{W<)%YXuv}GgWP)633xdW{#%WN@@$aGRLhTOrWAQ&I%KaP5yJnL<*Fd0~Si-^WV?k zM8bb*Cs_XDQ<$-Z57-VY@kw)p`%@tV;yv@4+h3iw-eYvdp+hU^-b_5n_MDQ*T3y>v=o}hG5`1K+RTRKr31+b!DVbs#zd&h5OE z-`S%HjQnWnOq~kX`w^RWn!|+wj2}wcYQwT}ZW6P~YRQJeei>iDdmN3 zDB-rif6OT*n$FOe_qc)%vQ^JOlhUyLbTA|mZ?sho_l8H%`3UPU&im4xC%{72r_s!Bo$E{J>e$sq!0NjdAt#4Top{?-XCPYYC7=C zDfF-&N5dwNUeGb4!r09TQum$-%y zQ`6 zsfme4$86eUb(VU8_tRR=zXd$I@$b^zu=xo;E(S*cY#SeHQVcBng!RMxeme?J-YHpQ z(Q!t~6CpcPSQDi*wD>DuZ>fgje!+fwaB_oapwcn@YskulyCe@7{?CzFB45BC{AUe~ z+wWH(BTGf*&{DVh#mcZ!mp~I;ZDL#ZH_ZyTQW0ofbixOV4C#EEno8Gq%)}S*&$00V z&ZpuysXkZ=e!)n)WzgtJymi;O(p2+R>nVE7{c$6uF9*5n3x@Px1MqNl$b@M@o*^R0 z(Yt*}Y(7(LWXP3(4$}0Q-91rOQzDkeA=xU;<-QcD|p~ne-awh9s7H#~_ zw;MdhHExjNk@1FJ!o>$^n_x6OE||+v{HTSlX!Qha3#4|F&#;gxlW6xVY_TP!EgE32 zQQcJUkG~v;u|KE^ey1&-!93ebm67tJGPYp3{pr0Ym##(1DAo8&JOe6ab)gTYFr2O` z5FBV#Q!LZht{$-#ybDfTLq{H|gDJU03H39&UJf0v=uSbQWS)tI%&3CcDq|wFVUWP( zw{yC_eR&vr{8>ey1GV2^*=oCThnHIHLVt?kmPPOM7C1h);W4Rq^rsKf! ztA3^}TN{RWINV(LjbCQ1T#S>AQ!IzMBYTh}TUfS_U+?~TkY20w+bp*qbLUF?*;Sg+ zr__1IwcS68!@%WFWUK`H+_+%R@v34PxyK>h>GXQfPTy~_OHm`ru+^1qwwMO+jtNuX zrq|WYv4K=NL#b3tW}2RwFEj-?7)LR(U)VsKCeUW;(rJ!33`h$k!E)&YCOYwcP={+Q zG3uw>yH1(wRUN|je4sc{$T;6pYBl zxQzjz{#hY%1;q?Rb!wfJ7qhHGS>}H!{@qJpdHhn_<6CLrdrD$lQ`<{E?R{1I(|d1$ zMsu2{TEQ6g8C{6(-7R(6E@+n!(G=8V%zgo55OfbF_aAceW@LY)s;FwFC;jlbRAB({ zx&yA(Fu`#i-5-8`=#|wx8h)0eLFKQa8VvGiGYwO2=NU)citKFv>-8~1YtAe5z>#P3@$WN4IzI_=t%lxU{hC}er*-Crxs0K` ziktz_fV!MkM`eZ=36^h@K@x+maIo36$;$sn(^rNy;k|#;U4nEA(xFI?MmnWKV004($tDML(o{xp$vYZ(XaCB--*?+~MEEWe zIy5$CC8Z+7`9Xw)HK=9BiBQK!?WPdqZ8j2iwJcO^BB_#tzQ{j(=^i{QeGwXr{~nBm z!;Da#p6(*66#s#2?`a?QQRp~bwUT{|RD)&Y-J!PspXw|a8wJRdr_a|#Pt}6sgwx36 z`jGQ!yTPMFiHw7Ae!#9((N9>xMECuogp4#?)^vR>sxgD^uWw*Ng!vU;h4tsWO}Z?;;TlTG*;Q{j!*0!o9QD} zQ8mwRi-sI*L0gD-TnsSzeO)|cWy*lG8T3XsUY9r5Ruc~_nLbItTs89Dw8L$=)3UQ3 zKQIm?etT#+a{UtSn;5G;{!KmYCkNy0kP=G5XWjZmBIf6ghwIPfwl57FDfcBFS%mO+ z#_59sxZu+Flw^5hNloHL4gO@kbswxMo);7^7@RzZ8)B8)e@Yn`LkDsEgPjw(S7-bs z;j^+eDbAMgn6N@bu(7w3gQo;^EI$3EB5LINgb^sttYk4`@a+3w3F+%F3pSXx7R$Ga zAT6#aF)9&Bg_ORY73a(8(uOeQn%RNOY_oDQDK&5pVM-5&oDyLm#OLOYJT%(F8F563J0E;zv9f=! z?Z5m)kWkyp9k+IX;cO~2!Go#X<9Wzu3S|O1nj*ijkV#V*uV~n3{UHs9dUMiwFVAJC zaV`&;7srUnnLg;4ATBWwKcrzKnH{2G7Nlr86*hb_viY$s1u4HjR)Ej3WXk@czk zcFhd#k;Z2rDN)S8D@eqvi4Kd~K+Ri;SOcP0L;Is0dAxALJvOW+)CFVk;e78|5QL=r(`&sLUYv<82dj)1=H>A5{ zx;H=LrH(pg1wpXIzlH*+#k0`*Ud~D*mFT;fOQpnaiM~8j9_?5{#Z{+fqnRD>fuh#)4Rt}L#)dX{y9OQ~ zoy~)qok2yM+i1_ITkSMdu*+$jSsDHB4%5>I-HdDBYPS|| zEuykq7JfigGF6XLf2xkB(&z=R#4fD^qXWwZr<(l!M)s`@_juIjZ&{GuFCpdSQ|#Dl zr4>go8|q-Xm2|_c0YU^7^B>pIL5Js*D@}&FM5+-K#^y?Qz>7b zu1Hg0n-OaV*gyWL>(~LSu4l2{ffIjP{WOgV?a6Xc7N4qAaze=fdh-A6Cs?$Oj9&VZ zZ+>?3xCxLq72e?$ssUz5UfrIvY$4Tfk@G~_Nz@|0+<9e`!EY)UczeYQln$n# zqua)8P#T+o4*%~>c%U~_3nPPKjOnWAea;viz^fsOxm~`?co9=XtOmpwVHj<<_?s>` zM_2QEPrx~QfcM<QJ_c@@G>SK}D^kn1g7(D3fN%;p0RfVR9E3T0r9B77!*M_5K=IK!zpDY;aq?Hfj|DjS;g# zhxxiOeD84X-Ms)kM-c^TqTM~M|Badbw-26t$V8WMfsuxffb}#<59yE zqf4kfsffY4wii z?*$Yc-1eM2D8qCo`32MxAz#$O?u~TTIp(5^CO$n~yri4ZQ8nyW4LBSjFsHA6(fM(g z!SpUSINav51v)GdN=EZW$tBBD!d&L5J0;2KeNem1(&&EL#PY|Hw(yV_N#MouHT z!tz`2)vpu0hHvk?hTxH}2wQhNs58+a5sXlZmG3GDU^FDEd?mN+pt2p1zc`O9dFPyG zCGfh2+X)l8i&9l8#i4;Bv7jM-Su%%_g#{~c-jBnc{ZSp`C@b^EX}xAX;f<3jDjM}SyCdqcol>zn4)uW5{nkbgI)!8iX7pBpGu%L;yMS4wQ| zL<1ko!izjwy)G<-8u0Eu?`<-z;Y z{2=#d6E7szBe5fh%d(?$(j&8D-Z?D|ex`zQYfO!dnJ22kZBPx*#Rj6%!fHo}8HfBJ zH0ZZjR1nJe>m0~!emgIfq1Q1zgs#W|zp>^$l5b&*t{ z%+>V*{xIShEDGMvv}xVwRx_%RB;2;xh_>5H!;egDuw5Z*-~UBTE8k1})MAJRHSRFm7grLYn3$^S<&@ zE&Zk?JL>Ee;3&rhpkO4}tDtIKB;)pr0~!9mXwbHFWF;;gwdDxEv6jVvumaWXA6v5$v=jioq}aog`wmjjlGo+5_y5qF z(QGB{ms=WiySlz{p?aJ-R%Uf#apUSEt~px|*AKh$Ti0i$E#lf0vV!iIa1l~0z>uzo z*b&C2+k^H^PZ(4)r=lT>dFbGa?b+kP-HxrXF?!-eTnST+VCuB2h!tod9#*5SH6BU- z4(NxH!Y>WJKyzJ9-*vQG_!#}u!!y3XJhyuzh=ytk!Ji>^lE>w%f{IyKdO5z~`~JJt zZ}k5(3XpKd2BebW61|UX&+S-hz*|BkG$=+kR4Iq-F?u2CxOR!5SK*Jh2KK8qanZ1z z!;Z-Y|BwVoIH^rlRu^vcxMEUoYITae!>m{htOg|t@;$@4yp@jBT3_kvt*a3FM)ms9 zCeh;;lPs^`LXi6Pv1IofTSypU^^I9!C^q0xcZ8}!ZW7Ivhi;b8T7G9X;$wDid}N?R z;=AqVaIF}1-l0Km-dX|k=K}RzDzWP|nFryLT0DuyOb6Ve3~Om|oazk6f-V_!@)80* zOMRscUSHR2h7BU5)$rjRZ#~UDYy7Te{f5;3EqbYW9m76EyNpebn7mV^GbZ(hY3uva zCKOxxx4lzuEnZDbXVXG6{ICa%X_EF)e$d$7W>Ba+!ZMTzWA$NW zIozWHCh$DXPjFchpm1^sh4fW7tQekG{h7Wcg9B@?eyCtuTs=C`%T|yH3+4+Yedbc+Ap-Nu)x(L zY*1T|oH2%NqfPdyi_=Xq2P>;3d7n0QRGw`&7iO#Y_JlHuwj2I4!`JHsf$n~v3`6Hj zlK(r;t{S(7TtgN7-~u6Y@24+5ttd6_o@RNWL#Lj@?T3(|nq&u614FgRqr2481M3Z9 zyLRnt^IiT&%w^JhumVibO@r%+Q@|FbqxJQpgnhoe5z3RFLTfc+9cFPFPK6)%-*sJQ z!Y}xZnn@<;tiuj;`Yea6c9cu?sXr*pLeVi2@iAXuAIhtX^^jIP==K!@`oqAu=27{8 z9vkefLYP*%`dd2S?Ecr#$Z10P4|7pKjZ5fXPD=(HctY}9NB|LAk)xe^!SkL^saQwC zK1=w&r35q67e&&_0;hBzP{5J+O3xuG|4RIS`Bx}*BC{B;#-PS#s^0sHo9R54ABCT# zcL&uoa&(5!%x=NL|K7v@wovcka;?^}pF{h&K7)FV2bHePTDwA_2fub_o%I=Xl6pux zY>`6C1lXY7wodoakQ@3i#-7_Z0u}n~aK?{3nS!0&a+iz5Z$2|-p>x;t(b;gmQXLtP z8DSwqd&_F&n5-?C7lLc@!uZn+WdRI}+@i|qflJ^dHO@_nuhD2eH=g|{q@`2xooJU0 zt!?z(_Ggq22a)CsN!9&Ju*^l4*8lK2etB<#is)=h=(!6hWzsWZx78i(?ulo<~3i5P+k(JVr_uV`x*In9i5K z)F9EvSd6@(0IzOkbuh&3jw6Fhca`iRZzv~Dje(BbGK!H$9cBK;(hjd|zmoSi{>I88 zi9dGDi#v6;mLf_xFp*VPmqaD9B#nH&X&(CQsd?~qyZmhS&D^emh@+<~x#EW-1o;Qf zmY_q=V;VrLG?vMLK+BMnp)nuFV)u3PPrWY4TS8F$l-qBA{9Y0ilciuvq{ac{UoL1B;BI{&kS#+%R9mu!wlTgg@}no@H8 z`Bm?)U5s!&^j?G;465JVoW5b-98Xejv7S!w`gW&OoY0NEJv=ztVRJK@W<1jB-eoF~ zCn}*BwWlazepw|mUO)&s8RXaxZJZzhp4HsOAt zFenguo)y=`A(7JyPNXiyMvq~Ooz_rlS|DKcE!g#{Yt&0%6}H>=;eB?b_fMZXvFpow zz{H|?rOZF_1Lb3M8dNtssNc)opgFXHbsDP|sUEhLPj~Qn-G-XzYAjrgcjuIuzd5tu z%6srn+z!Oo(iQ>a|KZlV=*$Gf6_59Zwi0=CBri4mfq0+Yb61#W0$UxYxdLv=B?vBj z8jM4NNYi?+&x9r36Xv_5e*X1?eP$KEinj0D>YBH*>aQR8vvFOXzGEY|?+oU-d8rvw za`dONy$rUQU0ufP`iGpe9|*y3OqTILaj!gWXnX5aWqC7mzysErv9A;C4Xw#ysG!s* zbOH;Q_Bk_N3x{iO}aZbg_bpfx9K>R{{Qd1dO zr~|se{9U#ua3{E-;uE7K;;`7=a70PTE zTY6M&HB9pXnk1SvvcH~vf>M>{`VXUnXdE4}0X#T#PI(%zqjEm8YY~Y=Y=Q;r$z}&a zc~&p-Oo*j`%m;M>v05rFfhO}AaWd)oa9;#WibLf94^*mI`>G;4He;+lfNGISaY-}L z7P0O-nAcz<+B+3cdqJAl0RPCB)-3yFa4Ox3XO_~BFLT%uB~>;-?anUo7Ei0Qj$EPc zG=Ud3eR7wYM}(2keKfxW#(g`%4jZe0*DB!guu-2JA(B{0I{S&Ge!ZE6SjW07LFmFU zud>FU817U&a3S%0&|(rQk_#A(-CSZL zxN68fY&q-=^9K}^4kl)HENXC;@KPOP(J}ab9PCs)tE;~1%}u-o=rZs)aLx1f^qQ

urZW z*^$On>E6|lB%bA3vX+=zJI#Amu=#-oRJoa5#AW&Wq_9ip5mXW!Z(-;P2?`)Y zS*mjRL92?*VZeRJ2_J5CBXssbd6rW0Yl|o`C~E92I4*zq(zq7%ZFIP;IOxNkn@Qq;dQ5B`*j{2HXUv~6 z_oOTc{g4^-c;3z*sW85lHY~%R*Ro#$s|z)IINOhVCg*_V^PVFm(`L;@Fm%#A;gn{V{mT;&piJ_YYcf(X$; z8kxo@!+l4pM94D~#iP-;!$SCZt$HJ7?5h}fgFn$X5#<)*xJ-)zDNHvu|&itkw1S^yplYdj=V(U`C?FU3wS2$|{03 zm|LG@3mGV8fwuZ_joT{tR_SZh@3Vd0%dufW=_tq2RyQyaE-8DZ(<~%`QEFT!ox9I)Ns0;xmq4)BI)XvxZt-%nr&D4eW#DP z{w+qzO-%;^ys}dxh=Z81cJGX#`_&QSPS>weH}~2Ac*(JV`>|)rCxjdj62EH=g>E8F z?|+27$0b1M!PfS@zCcA`YgdJj-=%H`@M7G6W^OkbQYGKM=hP9Y(9i=~Dh$A+;=XH7C5^@rpDa(K!>5QfWfO#p>#G0-v35LrluU{i5;q4# zh2>um+a5Jj!#%|n?^4t-)wU*yrm^XN zUJWiBsLm2-=bNlm+j-;J1qBl@nvvy554eA1Mu+6scW8$)dt4 zN`(YSI^rJrA9V-cjH#QN&${nSWYy76p#s}*YGGzr`eL5xbEdTI;E7R3Wv_5sK9y98Z)XIsc;`@ z4jq3}gsu@)I~rkpG+&22xY|8t>>;TpJxciv-1BUJ91-iu#-Z~CI4JgV^fjm_IH>T$ zI5r{#fr4q&Zz`x+{^R+-ouYm%>$-S`8#lO|$aWN3F zHi!aawi_=_!Xvd%-x_BxCWGf}%J$}&+s1-%*KJ?#Gz4yq5s3?rG_yIVdkH^$gYpi9 zS;S!g@C+yrq?pj|-wsUWz1pS-wEB)aMJ!Z~haX6_1a;M{(pqcjBPW9DEhMxG1N=)* zBf_mQo!U08G75z3oy8n198j;j_|Y~YazesmTtac{_NhPkUk>=X)aw`u=vNmokdJ#C zOf&tAyKljbKF{WCRKVt4dn~ZJemLd$YtWq^%8sm~7QQC8^}4q2x0|T~IPW7S)Dq!L z1P*i-0xg2WEea=0K?-}k;L<_ur_IB0M(Y|22wuMj_JQc`EEk6g*wu+E`7RWyKsS}w zn`i;Sg0@=iqtMOF?x)cD|D4n5gHypP-@1AI`va`9_@n})kLlc34!txiDpZWJLw+^Z z{RfY|lSL)Eg{C_)t$g?pcB6o@U#}W6bRs)9s{lFLy=wq3X78(`IGs_+kYaO40`t(>S$lRPfU^PQc_LA+0{R%fJA6>=_Av@2^nH_G5~hCL*zG=h0M01ZQ&6$Nuqy;V7Jfq(M#D3DW<& z3khK7<#Px0*1?sq4{k*em*L_~0~zja!{4SSrulfu!Y=k^C6ZC`)rHoP;clF^>SOnIHr$tO zLjpTCUQcPjR$oRKvJ4lFeneFx!-~yB1&uL7vX?x3AML#2DkfRZJX|<22}e-~CwFV= z60$Tv+;5>fCG8EicXl<6&Qn>D{@Y?t@Or+`F-~7!-!r&v(m&{6YASjAb|T>R=1^PI zZ8c^$^myG&&NUwuat@l2Tu8kPnSW`m=k;WhxLu(-1I;r z15cArykOSt_;=N-1HB351!0yLWiT(SyQ%O$!sONXoD<|YV`|IVImeg_A|wre#)H9# zUlsJw`PTLCi$8+jx&dLKsBm%p=a`83_@CVx$P`QvJUiI4QDli>5hv$-!Oc zCbl1)++}jc(VG#Q5b2c)MRZl~jQU=VQru+!yQXFip|u?|i?@Ji67%+t@J%d}&=Mx$ z%Onq4oBR{?QJz1TRHG*F^ZOSK2;pNNy?MVfv-zmfi;R2@Kjfe;>3l!zhE(?%w8Q}O zikc&?X(DK_M)(IO{0JUYawFjq9CoHUQxH_&fF&u|8)~zGT%aTbn+GzZ+A7$6>RpfQ zhr9zGdUYK0ADo%jy{Dzm_Cp`{G2I#}aG5i*<=wo0S%r2tLf42T;gmWDD|TytTzo^h`OJ{%gDj=X?Teig9z1+5Rdf6+zwH0J@*L-i z1UD}611|mc1m1ksxk`@7hgyGW;T3IEPyyQw`MHHc-%GNIwXK*xm34+34bPuV*p-#k zsupgDm`{n?CaIz-n>_cwx*&fp34Xf$LOhvX-rR6jpu*0ua6(Bhm%Z)CYri%{t2YPp z?G+}*GZSg_IKa+ngi31!j7L=jC3Que^==@~#tpadi zO_O>OrHYj~q@V+KHHo?hvI>i=yVzd(8j`PjlXz73^jm5s5K&UA`R}NHguJn}ILo3Q zr(hZ{AVVePDGH9q)AEy-j!te8yk_#I;!6|FA8=su!UMKK-VlMkk*OYjN_A@u;ML#@ z*I-*@r9KMsI(+i&MW&9U@_r$_pt(KYC0q zV&>3fN0#*Daxug4w)RPQxjnFbHVG^1%vxy=;gO~f^b>f|b2lg?k)$-KC6Kt^(U&Hf?KRs-^1!4kas-1 zujysRZ-7}Z$*!8XFcJA;;xj}iFS{ELuVZHo#jds&h$}Fvlm9!A?Z?y9?zb^gS1ZJD z0ric~++cdTjE+1N%;IA62g<2;(0Qvq9lSZ6o(CD&+QjOno7J3t!{hs+-Nfm-l5*X` z01BYrItIvuq^y#M1RNpKhXrz~{LU}SR(&GQw}tV!U~bcqDi>%RNqJwiLO^KUMT>y{ zGdH1_Mn_1p`xgQ$aOKR-Bwt5??X7bLc!=(t74QP!2_+Zp+vE~Ie}tm{roln%V~Mu55-$7`JPtpbw7!pn|~Wv`v|9; z%CyP!MSS*`VZPne5VED?15p9FFyNCHXn;fr;ETn}b;Jw!zxJ;$smFIqWz*)q#G3gG zhx{%&NqZ{lx>S+a)qO@vXhNT)HE$KID#DapSq6@*OqdA96jTEd;)X;7o-;`t$Kdk z-gbSeS5GqLMOC%`n)Fb!o&OzCgaca}MR+q*o2ZZagd0JYFrc)Bfe5qm=O=QegJ$uZ zZ604xExL5b0L?xD5_>wZrc`jz1*6{KHTgWBX#5K&dZ`~q>K(Q$Gc9B4KZR6lq)MWlkQ z-->TyQHZ%O|9)-scSWuA8|~KqFwsJaw#-ArZqZ#`o2lieAYz8+`b^*OlV^pmCbG7( zNP1b%(j%Q59_>Ui0KG4Ur2TrXiC}AO2-q5wQgH03l%&lT%8Q1`?0T(@4oJh&V&rZC za46jYPaRF<{1f+La)x)}b>zwI(LnFxFVW%rp%ml*G(g&sF*;O{Kh@Zc%<-i&*^hyx z3d+CZ4wAT3z?v$n?s?4qr#?shD#4blyrCxJe-|YN1!gFc;H;J#&uU8$;gQ z!I}WRyQ?}#EB(?4W4~<_gknRITP*#z9C9R^4Yarqr*8mw<8#3pYz;@|5$}owlmb^L zpaRKC^~*YiEMVxaKCj(G3@IrBg+^_eG>L6}l%#}1lp+Wn#u1hq${YVSTe}us469e~ zB$_{QH06F|AWQjPSe#Q*f3HjP3JvCVc{fXe)RrWMCp6wbG*k~u1RAysI{>dwAlq@@ z?S1h=zh5?P1X0z*v+{FS?vr#Mlf6&kyFR0$4N;Fg4KhRGa0$kZY?A_$*6`y7NKk~i z10n_6*q>hK8YYkdCoU4;HGkr2g z8m}N~t!kfgB5afUwdW zBRAAv-)JD_3uQ2%1OPB0#{8~<+?d0X?XUTIITxKnYa9?7krMVhiO|bHAuL$$+e?;9 zmOti}EbdR;^}_jRXlg3L2!Z9F1pN`QIbE!fxXm@_`Wgh<3YxLa(}86d z7@MYX@>8)%9_Eq~tXAkU8~GlQ=#%B^+c+RQ0b5ZWYJB>tL-PF{&7uQG^pBVrj%bHGsY`O z(dF$uL};z6Quc-2XZUCfpD<{{u6L$oNdiaJkOa zj|r?%n(pn~BMb>JG;4l-r<#;i-^Q(Ab6sr({L-mBuuzx6X-L@#)Fi;O-Uft36ESBJRD;%bipTLrxKfftlqRTql}>BSkDRAUrLh2IRc>_k4w zbrHbJYpXtPy7hT{Y`GZmtLbOz6{aL@qc09nw}X`4hn2vG z-P&JK^bhcq17S_POlyY2YFAf`VY^o}#&_$QC$W4K5ts-XzWT$`ndc)UTRaThIQ@8- zdErWFFl6Ij3Gtse2nJaYX}>&Va!rll2fT{nYD{S#9ghCWB`VBC;0i8%H-NR&=^1lW zT}2vib$&gpV&hmE@q~zYS9~761~#4Asgq+=&Z+ieZLJt;?^y#uOMBCYw2DIhouj%E z4=WD-?(iCqW;Rfo5NW|kkSaW$kK1R5;+LaK(1QluTf-`wxiH2#vek!+^W;@~e57c) zK!&??8`Cg_?`!oL4BbJhXK;BJM@*2SL)Lf`JqafKFo6^GG1Gu$xk&vsLhB`@zfFVG zM7k=LUy9}pCcr_l<0nd*k{zdREggF++*G`~C0{wa*l+q#;;sVSOaPqceR(J3#1d9u z{ckE9>krFi=f_72F=uf%4||2L7V>olMbfHf9q4dcBX@Pgl%!*IJQHrUiDE9pbwA+J zMM3n51{i+F&A)ZwzRn)!MCocc*ysP7!(=eAsUm!o~55|572rRA{zNv$78SLfyO|`tUN9Aa13Y0r$ z!alVZk*XVc^Zd2dV4aeSs;5cweDN)1JH_B5PApS*idFTdZHOFbYaY#BOW*1AmM z>v+>&wO{YV|M?wt-F5ysLY=#;mrqj7x9S0{1mw`Lb(>TTm8Gu=0iLHm+j|~aCtfUr zo>%a8PoxNzteT*N$WrH z5WxNoW)Ulj(U>DKd9fmgohG`@GySOzz$({v=rFvdMwsON<_iWHkgul2dA&3M;8Mf; zb*)3@EKdau@!<}ltK}-LG9}>}fB$zpi-mtCMn&8O%MYFVtMJI*-+J6c)*4}7If`$- zHVAm_c=)jvXr1W$lFUla0bJZUTwWZ#PSboA3lpddD|pE&h|rb9LYiw{?`z<3p1nFltt9tDz+sSqaSWjBXi>+pKstB>TOkJs{|7fd>B5 ztvuJvAu2WXb`B6eJ@h-fKOZ=%w$~`fhKDzy zI&?ETUl~^FHSlw&+w3=dTt4^~Cd^F!ePb=8M3YgN^-tB~zU}dPN@yr3;QJ4p$r0yR zjov^*y|yl#?ngozz}VZRMen#O+aZ_mEB z_ZK_xdBZ?(av-5a*EgumCc?17NY%SlOIsiw$@~d1zH|e@ z1k)4Cf?J3N!Ho-iyWK*A1NunA*Y+4Su$8{id_=o z;1_;63SOJPmPADl7TKQnzVpU$E^uM`tHBT@fK}K6Yb^~`xj8?O13JMh>|bhr(fy@So-*p_xcMb> zb}E620ci~VBSqjN3Fc*xfociut8e++dsyU&A5-CQ{SGJ!swsS9;UXiJRJ2i1G)^e` z81R`KI+mgQmC;vZ7q!dBp&^K7na=1G$@*%-dM9xML}MB`IH$yU{H%Pt;<>)Pj`H)* z-$WiJZfdtVRl@kfO*P_vudffd~MyO#_b28FPzSI_yZ9p5w_>%qRzhMPc#3A}&u-jrhA2R&- z9N7CO6?Zfm{S~;oV;L-Udd=;HFrltU`J>ddb}BVDw}(uAMgjIKRLn0Q=sNF)8h7uvD8xi*vZ7Dm51uVeQ!30Czh~13{7Uo)K!C%W_ z%c+4&VQbW~;%(!*xXMb6ss5(XxIMUB8N$z~l5%7m^3YP3cb(Dir3zI9Ki~S z=baSrUuM+N)h~nT#4aOLkw8V?nb9|r0_sBn}^zFElSpD?qW9MdNk;GG} zKgL5O=CFef{1h9Xf0X3731s=^iRQ7TQ%GYNp+0EVJydV}o?I9~cE2)exwV*L?CEyQRZ& zNca0niGY?$YH+pLsgh~aYWHxW3_-ow-wq=Kur-_ts&=UnbUYU;Ejrk8H~N z*BDbH79-Cp*~La38PV|4A4Dw0ZKQ~Qtncv5c;QYJ_8CRyl6 zHTV}J5<>Noi+}4=nJEK9aip}nYm~;)Px4Z_B*&4nHZV|{U_R3BhHWo%wDN|N-I&(Z zj-!k)Q;$Zcv(cf_Ad}J=y&@IfyEa;L11Ng}qMoLM`{;I*81Kx(Ei6nP(SgR9*kF2e zUlSA>YCohx@sZGRQy;Oy_ukEhBuZ!x&)&;da`fc@1U| zBASk^Lt)8sqB@e3j-8rWd?@V2?-YhACbRhJn=bD}r`|(@Nla_FEguo70{UTx3{NB! z=FVQ1=r(#kZnS30I~uyD4^t0MLW38qaU_Cd_g3*g_XKQ!?B~e#qvjps&(8trr-%D9 zB?<2DW%M+EOKta*&3!W#M16DUXrC1V1PH$}5_9_7Ws0)o=I!DV3#3`IC316{li`kS zygE0c-EfZ;)%di-#w#{9T@-{JL}K`o9i*?bB$)Oc6K)-v%{^lqA}&~mtvjJu?CWG~ zZ2I)#63+OA84y7#Y$${8VER7W;i4+qZsHK!r&RF4@slphjhP)(%>rQq&mmBi(`z~V znDDJiPW^&1AEp>2H)Na~ULfNhO?35N(|qK+iY@+D-&UIpF|aLbxH8%|j*LS5s~$%tEr#7L?iFWy3c&BW$Q~ z{Wc8~(Pk+B3;QiEFj`-&0Rz+_G<%yS0*;vfNq{gBSf-YUC#A1cN)Rjx4l7Z7$POCb zWUZA(PGiM7{rUCc^r!z-O9&dAQXuk2ATNO^siz^L!5CGHJNv7rPzqay99WS}-#PA@ z*QEBKjAi8b^6R!!-xIP+{a4gZ@-Gw!a)SAH+PhjXi%!OpUfc-1QOo2s#=Mcm3x_OL09{6c7O7q27t5J zZ$dYSU?I=NXXP*4be2L?s`L~M>kJr2(~J=AUg)8L1c?=23=F8o?*O>!hFT?_^|@pH zA+)zc`+T7Y?r1v(l#mi(P)G8F0_#xyZGj6ORRaOJ96)2VuWL($Eu*lKrq~7JvBzh2 zyYinM(%=giaOq~JCTgb~g=-SdwRpVbNY(==pA@;KNVb@mCM!80Q*ml2J|J z1zTqvqX2@hYfq^D3`#Gi%dDtk?Un>|b;ObKb9wBVK%Fug45t}MJiRtg{~{XFA+-Ll zpQ(X9&Df2RN@yeZ-2+pqN>B1s&kurtV!s&-`8mNpRAR%g?T1Ha@>^`{^HEn>Q3%PC zRO>A{8gv(Kiz*H*lvyA_yfUR`>g71Y`D>)Ih=7IV|84K#psYqcpH@C*jn+7`NtXGB zlFbL7IFeM_GQ2b+fES<;7DvXRM?S5{m~WIP-D01{hC<_$_c-z=lFh2j#dB9a6Yu>j zWc?sKY%@@DijEn+)>egUF9oxBtA-FFd;tca3MNxvQ!gO;Ft>HoT>1KHeg)ea%99C0 zj_%_lIvhl~X#3d)9i{_xhWg|^qqEXd`^$wQ<1OKJ`9l;;KTN2!2n`c}Z_15o9(;*s zgU4hm2?M#YF= zC&Pbk6sv5GdsxM=I!nb%gA4}EUQykw=~Gt!zyJdXfGUPHUwvYXVQyuNaPF@bR%p-^ zDUqQWf8_`{&5P1qZQww0o2+`MUhc2w&n`T59KfQ&481 z%tLEY(c8Wv9a;6zA`9P?57ieKAf>Yl?+UZE0$pr#-J^U7XETqsFfr;qRc}v@uorE2 zqtw)62(^C7=#D3{`Lzd)u#N=i8W%R=j1`h68Cay5ix0#_s~{A%ghL^%o&OMcUvt7x zeGsRv2s(qf)Nb|)ALRM#T4&AP648(`^c*~oeQdz zFWm__y>CLIQgusGVi6?v{DN}d5nD+lloRtq>9^3xK0BOECF`zL3 zPOtHrgGO6V+&A=x9jn4&0tUw=vDlvd$&!1Fbr}qK0|H_yxwn)W-dFeYJWW@Y!0eon zUuE@-a8#RsG-!v7f)v81V)Lt#NG|>m5Ak=)Xd4}YA-=~~(WLM>P~+yeNhd4&tHe*L zT&*+9q7IfFNEt=ikR}~;cLE<7FnxeqVC|0QlpZoe2XD2_rVK|+K!mXf+Ot+2dwza* z@#cK`+*Kg;kGf6tw5=A)F(WS8&G1}>1WD{%tXCr$d^Zpsz#cXEr4gR2GKbN-*0TW$ z$lFOQMgn>;Dg&X6^RG29x=Fbpd7m;F;mtOJ)9VRcI|)kSrH!MA(;n>12nf>Sjr_lq z?-p)vG9*W4Lced7;-i7mz6YQRU0$9o@}L3YctlfI__m{%Q;{#5-Tu)Us|!G{>Ts{C zQ&wLwyl^aoxtge^ph%tg>YHz+1g(U*Lf5XEY zc6FWeO5R4Rr^%)d)Zs9HZIK4CxLo^7Usm=S;>CM3G0Qj^^M+Y`ko($n5ys}61U$4h z*TzO1AP4#y?dVi&qCd1KyzXWT)PaRnJ|q5E{p}&jx47uE4?M4hEY}7&hr&+-jtn5r z0~nE8OdQ#TuxQHI7rpmb0@-uB^9WG8xn(~1#C)(?+?bmU7##|oYZ*zp;7%sSmcamu z4OPAK{|>x$dQz#&Sa z3lq#Xor6J*uSn~bNydr_>Q`9Bghw8idZV@B>AQvAD*{)qxcb1?1##wGDlB!1P7`W%Gh=;O{MY$ z*qi|9_`BLJL>*BBBk7!NWe!{8PllropP1-XmSd)WzEt?x=54)xxCNey*E@o2QR!Cw znEMbso___yud`W{MH5KHtx1SZFrU3lcrO!lAPkJg!L=^)KFV$_w@TW6^=h`*RX7Vr zir`vJ{r_hHEc|?7+Wh2Gm5|fR9llmpb+_AzjRq1=>BmASpn>Y|T`CbK_&~=-Bb7D5 zuA5R)omV|ylK8P@6o3_(RI2aW{z`~OkT`AEgeAutH)3$&h|tX^e46mbzW9%L(Ec2X zd>j+oiOk|pwCfEqFcc*rF+HUTx3J6!PAaf5IIB=8Zes7n-bgk;2bd&iPvXEo9*1Of9L0bKs3B(R*3m#kQ2T-%boimLmmT*@6a*6p8s!EZl8lX0!J9ub zSysqZN`rI<&B7UJYBVlle3Fon&$!TfsFviHDPvTX>?-H6(UF~>3y2NfLXx>7Sj^34 zN+Qnl*8YT!f~K9DxRc#Ee`7jLDPe)&3SpuL z7~yMU%vTwV8i7k2+(JE_$HpBm?!}8RO#cTzSGND5OzRbinDOlxcci_lA}mMUIU8zc z(|&F}`h|gm7zkKut%u69KeSDlfSUFcuQtF|PFk*Lo2K^WX0xf~sYIEIC@^aS2XQd0 zZN>T$1l?-7Y|c_P4QGTT|PMGI%f@OrU3`CiWxxniy(-z0w20v`?$bwa=VdU;@7s zZB(ibztG;<@7))?!kNLOEj&=jxN+4`9=&rX*8D$~zB-WZ|NEb-+jO((Zl>#PYP$Qn znC`i1=9+DW>29XR#ARY!ac#QCa7|Bhwdwf1_WAz)zyI9Vv(9u9n(;tyu zJx*bUdo*3w2ShGz6oy$p({02=VCz*5rsA>Y@NR5C!P1zDJPMJ_jjh8EO$N;!7N7g_ zYlit5Sv(IB5nIM99J~PBD8!0PNRh!oQEBQ_y+!siueF*ztup5pv@S@Syt)d%Uj1sW z6*kCell67I&}S(mS{^akImpbd)II&yp-G=!nGinr!oI@t#YC!rj$joFg&YogY=Pf+ zOvD=KmG(nvkp~+;XB41mpb!uu?e%YOEH)b39F!`Pax14WM@{(@O%}bdDE_SIkkwBE z3nrwuz(>hVfww)i*tP5u(`{9#3Km7vg!JE>(3+$XBX<{2nSkN~MpuZlL~&!+_UTwQ z$2b;BT^?qu!=gy{8PtpoLhKQs)A*#s1uJwxh|#^9ffVj?hO#nm>X+DP&X7PDGt65m z4DG&OWxJ3yx_0dFu zhQ*ExDIHM>%qqWZCy(L4nF{l!#T#iw9K8%sJ}H{;*{JbajQp_Q#z}2kKbhXl&N#|7 zh`3zwU-=_vdEdHc9kgO0is(sTykcMIITT~2pCGT+x=aG&Qa0CAOM&}e<|vyA+_mc# zHkp_I`5>0sBb4w`uH`H}P^S-2xRv=1`BgU^dwy^Mm9F$>+|WqGYfy0|H!Fn`{(Ad_ zYNZ}lV8*dqV9$@GF|vWJTBlJ zY~Z(#eTVzea+M^o$Y79@ZXp);7*B_c$nSH)ZtZ(!d(Z~vv^f60X?TIZ$Vs$v3ZWd26{a}$( zixGl1Y zVFuh!FG$cu?ujObZy;oudJNpcokvm&a1^EqtUsppqdQN-R*}`!dXSX%>v}EfxUU-Z zJ|3sLiYF&`mKbuN0#F#@@>s>7-wVH)-1GY*4{Bd>uemqMVCj3bn*VirSV|RYM2s5# zgO|6id%|V!8nfWl3y;*BYF$XCWq$5|UP=QsqogR?lMrEWCI);s6Br+)#^(u4?oz8@ zNtz>qYE3}oRe*f?Z(7e7e$9I2BXvt3hJF>^cj!l^ep0T$LcG#MD~+lE4>4XcP6m zp}ZX4a3c)3nwrt;B9b)h<3^%f!W1t;-v0Xc&q_44an(ls2sBjqiJpEMYN>o`t{G-* z06>L{JoVz(=lq&ryd)p*^wf~m()xzWUflwuUIiTC?z4i&L}?XjYoS+|LRSTIo!3pOAb$x0VClm; zKTa+q3ivkq!PFi_8ae;~7y$1-=u@B|%&mc3U5PMby@FRd@Q#vPSZALMcm3t{^{3i? z2{sqGmWO-oDzCcwzW$;B^&0=Zw#X$%o>X~vE2Nt@*Hgi|lT7lkaJ*j^rh|qby~l4b znyb!V*-cR9XyIx#?-wZ+cVMDm0?K1v&A_kN0&$$QUp7_0u@-BOP6`!a_S0sU>CPS0 z4?lxc{2FO)n=pHW!x{e-UC(adA@_(ae&#?Q^aCSr3AcqgQ<-ui=mhI$cx^2Y_ag!= z^vB2)o^J1#wd*-Wu>3Ev)AHtq+vfNweIg%m+P`zAVxr;wz&l9&g(iZr!{U*s2K&R$ z7TYEqXO1T*{Y}moHCgotn8!HsTEJ9HNl8_cve{7MbvXefaq;cfqFCE)Of*=&{-D>e zA@Bzqsa=3_;QLwBW3s!W%8s`g4Hl$IpWYZwwy2#qv;HGi)eGkZ*unBD7@71(hS8#y zuLblAuI^q2k~UN(*BmqL#8H|)%QNG{#L{%S64VU3YIGFyBDcM=kd18Y>=8PRzZ9J_ zE}Du2Y#R9~ePtw7zn*RZbAXehx~8_I>z*npxouAA_bUa3pYAod3Yymcp0kQ4*1K=k zy?y>48$~S)L3uDOigVXW;AL1 zo31obwDYyGa_U#nf&p9OEP_p7itTOTx$dMPoGpc0W#JQ(4`QK#4L4Y1 zKS|dp1DZiQ3Mrb3kK8LSdT9p8ihsW#_?}r5>BJ}39r%XARfm-dE=P$xfFO>s??`Ha z3x7s>ADOV0yf(VW`MLgCOi9@nLX>k!?7NqQE@gINth_8lQR0p$x4JWB&f%e zdicB%;>r!Fb$xQG3m-ImO8qcf*TqFUR%d{e5jPEcz649(SGtu0_#3T%{7tKu??r!Q z=kqJIj1Jj1f1tQ>>!W@PriI_v}s<-U+#Fq`(yT?f?uzFv!6@3+nsNitwVs z3cIFCv2=t2)oRVP&=6?;MlyFd&N2IjIGxX|nA(;XY+V`vW5(-JoTwQpb-UTPmI z{&;R7_#wNGgeIp=Yhav^uJutH|CEH|l;X!H#LF~%nILFzVY4DRN*73qBlu!5MgRR} zkWutS<7U~<6FY89xdSo|)HG?aQz12gGRwWR_@VT?&-LVIX=PZM3+pRi(Nr$-`I%XD zr>VY2lLkJDA#ZSuj4Db4uRodW7^36uX6rnYh{kJrqBG0Cr+qd!Oy}*owFW-9M&uYC z)N#zmCo(5n5-S-C^XHBuh9UFX?QPKq{jQHO3*VK37X1+NH&xPb_5nB!1NK{}skWZ( z^KY8Y*iQPe7)k{{WJn!I5Z{5N!BY1M_kk1DlH_#k*qT z*Y1y`m7qDzBzU-x^C`+sN#cMI$5W&OuPkV3tQYcs zsZyNv+=-X|0tD2U_~1fY+y@RuWam4E#{iK3^=p(smZV;*-0_KH=1Qj`qrd7Rwyq$~ zCbadeI89fE5Xw$G%XUrviDQia$TqaY=MDFHw;Dv+yH72ajH5p?__Wn$O&8uFYTT44x`-#@h zN8N_#7f6o}+{{$@@~~JVdLtv}nb-U@<;9>2-)x`}%W``f@<;5tUL2qmwI-$_ek4Pp zM_;4yX>t%4%7*IMTK*LT+nHEs&u7GXlSD7{lV~%LD)s6VBWVjhJH6JW#?>fB)X$9% znQIPuS&uJ0iMqXXT;4VZxnV-9xtZd$N-GEXcs+3(rD?WwBv@aJWE8=elbf<7f5{Qg zIO?c*EsV`p#WWH|F^9k9`wplSea-cTSCU7Rm69t3g?OQ+anr36X_}@Zz-jS6T$1VfTY*w zw*iMPoloKv&Q%Q;skX|`q+Sm*NnGIo8grex;fbG3R$cy18!s$=)I_|7RfoF2@efF{ zB%EVG*4ozy+N@{xp-Jk(A!$gvqN5TcpAw;i@}P`;GnLl18XMgWcOxMJK&Kz?0u|C z6cf5$P(HRJM&gwij=VNFQDEnVV<`>Wb+!T+Zs?0G`P;t7{59g2NiOT5F*5aE+j4?| z=6Scb0`!aV7^I_qbC!{d^^T zd7n@%iiO~TEv7^^T+?gL9sR8%vbhL7uZhB0lAB~j)|#@{m;5T~l;x;XQZiVwkf%cG z+0)?Z^XmnFppCBC?eqj1aDNFDTnM(E#9lx3w^|{Af~T&-xfJUkPUwBh3v64y0%}I_ zJnFq~KZAG2#n&&31vsDvj~KC7Hb6Yz{aK`Bn|88-a$pU~auSmw0fzr1jy2^V zgla`paaDWddXaFP^Moo?b!2{xh`cdS8a`8i*?#V%pQ%8VgOdp~P=WQAc`Es6 z{`L!o<8*g1S%x=jt9n2lcv53s9z(KgArt$NOAgppwaw=P2%Ni&wXDMAScy}l=&WTt zs&~hYp#@p%i=t34ic}QOl<|zcU%Z`&_+yNW`rilc7gA2AD_uWYpJnQ=`O$|RbkITw zp{wls%Gf`{#s057QN(yyxY-fLM0vgK5*7Ca9wHjXXx_n>^nm_R4cHJAh*>)po;pQw zyO6wb9_&YG)jCW`|50=4p(4Zh7ctCIId!4_N!6CvF}4E&;coAUh_xBLZoc6cfRZFo z%2YSCzuvRRKGuL#>{*JDZt$c#u$S7I5frm?mf=+J`qFGu)MhT>P#c(LyJAp3C50*2 zENkRgjq)kJc}I7S`gxJkhP{nXShc+i@fu_BHlcb&rdSmI+j z%aQuPORjd<`4p#qEgoErKs%pl$PCg?oKbYH14zo&@|fTB2({qCp2|E6RBy%&@a6gn zkgg$8u+Kw%dpi;_%!C~F<_Y6CM?E?s9iebOOxu=G0L>#Er^?Dtu`2lJur66jpe(_P zYSvgs5uDAMR2mQSZzSecovwuw1{?jW+w3S%yp@a^0?%~ip8fb0&w5~^JOefQ>{L}h zCa!l(iVwKB&gAItTQ8>UM+i|(R4*{m-b1R8ixW1>y9MmQ$o>#7j40Yni2*m4{iK5q zF!h)0iB|i%N?w7(E@^}2E~Sa!o=3yxVMX|T0(+o~dOx2GYFCG!?wI(;{uRZg+EPVt zpxCRaf;Nu=#xJN=Y6*%(lCmr@8)(`);pKbq>>c3s}{Z;~To_|UphfL^7tBS8D+-@Qh&~V-ZUcHO% z%dO6FK<(joMyc{FAlX)-M*%t;?%YAl=sf2LNQ!^c;tlcZ>!z^DkL7|AvkA={AH7wH z;x%adxu760y6AiFcH=w2WnRTU3{nMem7TA*eGiacV@dEzHyH$(d^Yqy2PU*-zq&65 z!jS(0rQy>BXG?5eho|q&%u;?Yu~RsW#DH!sX@Vy|zjpmEC)BX2f}7@srV`*BR|M;G zl_FBwv*M#5&dk+yQ*@sxHs|sHB^n{vmGTq)S>P;6THi__ zjFB3yw*1{B%yK|0Mi{t`c|5|XTd=k6=ZI>txmy1EI}_oXqx zhkZdX*{CL6imJ~S*wA{#-pR>n+*oD430u@H-{oZ5@H)@Ju zn^6*TuD8qNI-?8<jGH70jF6PLg(A>6mv#E6* z&+-XlGTWHbaXt_3dDlp9l(%_cVL}q8F#H6*_L@}~tbsRj*6V^z=PE~*aL5Fn6}Zx1 zWClm~im-Fs%%X%@0WP0c-L9azc#Kz_#S6^P-lb{Detw<|zgCX1Up#u{hE|g-sx|ze zr;R4O2>SvN#(;+%&q+IPEXPPQGl;}sY!g99Dk>mx(T)z{ZJ0t z`OabFwW^fo;^LVh~9d;+5RcIavLnZSHgViTg3Y5-V-X>?LV2mZgOuUsmZ959C%oVaqy{0H z35LY=WrLXbg2`7NxpI`qSo~6b&$#7x(waM_yZx`o#(7w{h{?r3FMZE0pb<4@dXIx* zbpob}$(i_370!_q$??x%kK7(B-jzB}V8En^24Dk1X>e9g{b_UdUvLszcC(2-lD~Gy z1R{Hko3Hrj=ep(;a(OymY$ROUHlJ}SKl6#PCC(_b*bcd`OfaGQ@e=PRs>{Gi-ejfx zylvlb3IoG-sW|lOTk;s*=}Klp9^H_XFgHEqwPo4WJ4K-a}>gRu3)F50xUmnf){a$3DrjkG}_vwfD8$#4hZsAB&(1J2G zXLluU5k_7}`cyE8FZ44Di83u&k)FVJygoJzYMAIJ1KX7a8(VIf4av)E(NYIRbYR@Y zA=mLi#@e?mQgxn!CaT_`EH#msL_u#a_&ebmk}^UW={B91)I z#1&Ai^$zlNujbKFm7KCZ$Gxo6g=ln|QF@DY3f9a+0A{l*;dZL=FHWXR3b2*-P6B&$Oq=juXDS6$KzZ{^1)VTT@Gpd~y>8c^KLiB9>VEIQ4mJO== zj0(Zetr!I`K;F+ON^UeEo<4h!!En!FS(wytThaL$rqdh;6kAeIL}2Avh;?v_ z7y~_WHkm`^rgaRkE3H&xx>4G8QZ2z_D{3N|dMe-<$g|D(+iA?u?Qj;zb*x}enIOAd@1Pu&s!G^yIEIFX18g?j5Xhs1aIOvAaf9aNQ zJe8Xeueu>^{r{gS47AYCUjvVZ=cZfRf)0GrRWRigf8zWjpfB%#U$-a7{Qh&&Xa7X$ z+u2mZIx8dkK(8nzmZd+2qFj=|(_(^KNczF4?xU=rkl*MzXYJhRPbSN&=Sm?v4uGfe z7~7(ys7R$xAN9VZ*S$D?blvau!GG4LP8xqT=S<~5Moh*(*gn8GgiP=z`W`zFVknx7 zSN4j1l}(QeTeDgrtCL(NhGkC>p&qfl=kudEP+{uAg%#sGcIA(>_0$Zyl=$(;Go$GG z-MdM;304rIJ$z%kWV81hy=dXD_#GkLyT^iJKQX)#^ITItA3S<#7F5I-iGD z#W~&Z{dx(E6pb%5iy2!J7V;s%~-}Si>a0tV$KiyDLq#bagKLK`vGp{oC7;|dYG_+l zcQq?Yr?4$xLYps0>oJsViwF;gaJdFqTPJFAdVD{dXrbXj=NX8W;k0Z?%_@&BJgW{@Yo1vvndR;X6t( zZ|^T@XHmzn7ZhAKy%^{j`DyD6#ixC{{k0bpp{?fy7tRA5nhz3jd>1E2;G0Z4I=-N@9lKr~NM^t%Gk<8B zc2TDxS<=KWY?}F=Ze+FCku04sv&G2TTW&dSd~7_7o{ z%+}YSw#}h#omLgE#6dm7)Q}~Oo0WyLu9nW7!mjGQ6A_vsT{6XB*S_kO zEqrX{xm8zufcv?r|;i$QvR}TQ3W<8U`VcS*x*Fb?bDG zI8T_ZLNy-d(eomSp-$=+~T*qk1L#%hJWwS(%P?hhvC z4E(w{i$+Sz7V6Q;U>C{%FjoU0aFBv8ew76q)sGu?3s*WNv1T@=3uO7(TA^Ea=(mF`pOCc<3r$P;>(3VveDP z9@atvcs!Sv2RvJ$Rok}lVWhQTj2O*UKi^hq!E#x*>JLMX`GT~`!^U6O2OHF0qU{Fx zMKI*jObNgjXC)<0qg9X1uKB)Nk^LJ5;u7WPI9C__J z;Fk0Hf(G>cqi0Rjw8;f(4C^O82WgmVC8#D$HMeTdpQt#e$yjFYIYB^4*25Yfq@V2!V1RB0d*06Dv%lB9YKL|?$ked z>5KzgWM_BU=Li0lN91qE3Ln~?h2J^slw5v^Y`Z;v5wa{#JQ{mI4V!~@4PiQ>9Q)b( z5KC=pBIWB1R@P*Xy=Cy}-ck++5UT9E+m_(-5OA^gkKa|WGnUB-utdf)tZX3w^6_sjhX%lcZ! z7rx|j0rtD_>wt^wFx`j(tIQh#$+6KbyG4mg;P+F7i17uF>(gIt3oey@-ijBy+G;MXyLQ->&!G?QThQ zY#FOhazOAQfV@I|CE7vNB|PsH!N^OM)i^E;ga%?bwu?j>OHQ*Wl+-IN?r=fy4io*- zrMC@vFzx)qX@96biOqH55o2UCPoc9U3z9SxsjToTfF=e|XA_n+O&mCmU_vd*NQ%oi zc~bGwwePAzeDi5gKb4Ds0giPM*+Pe$k@>F&)j5GBi1T-Nooe%H26tx;%Pjl-1?Agq zpTYB;6Ym`n*Z=-D;S|M^Fx6%pG2TosK(;@{c3zfWolH6=q0;ZWGl(_5k0!f`ugzh~ z0oy{X00d3fH8bdbDWmIqXB6Gx@_t1dUE5w}GS=Gu=;q<XL2*J9iBvl#x4l#qY9=^g1mRy{b2Rt^rTr7K!14%Zrt6{ByTL*9 zc1se$uit;}r%tG0wE@J9r@93=1b+`fl4I;}5FU3FK=c3rw@Db9tnm<#A`M4`eI-K^ zH=dWsCH@cV;1cMe73ZEjS5H3BHCf?{u?}=#@*%&h(_Zbnx}r8sS;v$KzfRWAS+ebk z6J6JBe}0e~o-FX%iJAjSuH*{h{)_3zxK0!yQ)MC|6fddebyGwg*N=~8+2N|Y9XAjl zRrYNI{uAPS2u9Ed*Xq6d(o#$tfSX|SOtA6uNVUExNsO(B z)+1Q{VpgOJV$w8**8n2z_Wg0|`EM^V6kEOH15f=;VV9mqDvI8!4B#<;)0-J-d$&0z zt}iY}7{8BKVI{$NhshOjwIzBo`Ni?^emAI2D-$2P%zDa@-ttOnlTG(cnjTrfwVUNF ziow$Ex*`oVGVd9=c<~x2n-muEv#KG-uLE{h*%&G0tfxLVCzwv_tciXT6~zY4+eLly zzpLrrxZQ*ND?FAW5C->p;FgO2>SBAtiUXGCXjzlrNi!wM13RD!LaXr9W~K%M{xz94 zJn$ApZzp@L4wl|JvGwn6iZ0b@`8IPD6X<(r)qB9?_7t;IlD49B;O<$A_9GJLD7vOu ztM90;jEA${`sP)={B*2y=69D4mS;~$_xiwO_7oppeh ze|(`4IHaJf*Dk&-HU(dNeGipwJ*?07X0^Xo2fVK99jwAxM1w>A1VkLroVhHsygROE z8v$K!aoe?gQ3{$tOqcT)zk90t4jqu;Dj7Pn ziU=+L<&1^q8(m3v_ER#~Iwsgz+)GNXZR`=+VaW^-_Ew$umBr0>_E06|$g*DMOS_4X zu^>Ibg-_Azv+=Ku+Xb5lbzQN+nER4mJY=)i6AgWhz|$9Iby}D#%IUuSwdyfsIDVXU z7=Az<280ECx`s5ka_7>lmIx(IT2M|wAZ%n(-&rAFD~mY7H7lz*pySk>k<1?jh#l22 z5r-YzrvSszd(VPcXFL*Sef#e$@<{rDT61G1U37hG^=!mcPO`BePjTfE@cys8q#RX+ zv}|6aIxQdcVp!qc5z>ib*I_SmVO>6|OA=L5Ug@mgd=IO>)+rXav-Lr0iqq{#%5o7! z05c49-#v<);gcdjXAo!p9o|O@p+5YW-3ecA#BUJBhLSv$jG{m^8cxrc;-K4twE=Hl zMds@U0YLw%C(AMW${#_@@M+nxf&-ea60)M7gmNZA6CbK$%4wp#AI`1Cf&tBFzvYj8 zdwk?UBAISddR@HPlIY7yeDqjWvyS(}_ZMjpJZR!lKu7!Kgnx+Sb))sW^R~^wIkA1F zyeFzb-0v093R%L-H!-#ljgN3Zcrf z79YQG#q1JYpu7O}+UhcKl3G~Y-+7`W8N5b?(@&26gYg=G-cfV3pd%uy+FEu3A8=*d zY8LOb?l`^!#`JF5G|?h!e;M{Nyl3pTc`QqT+cwz|U}}F-$OUWrmdv2{#B2=*9sCjf z+l6rB*gi4y#Va&DFwz&|z^vrs&~c6>+~lPQ*C~4+5z+9!gJ+0LP#elteA;f5>7v2V z|Kg-o{4DQREyqy&#c8Ux;@$5n?&||_ju_zjY_lQtrX!ifs)P7XM?E^Rq6*#$mNb9M8tx@X=e-g@a7h9$jPe5fg_4kKDeDsnfZL-pg2n z)ssl7z`uMk%&^4(GU6ZogdET|+7+V|VSHA0K=1ZE>O~JVe4d-uCg^#a^0g~PV|~t( zBPe#%kQry#Gw|#mQM{zmB_On9uYfnHKSGqI%3=idKDM&FSy`<}MVXR;TfC1A zLc(`=H2-_fh83rvM+f9!)K~NfedQs}hpnscmT9y&Qp6ywCMV!e)26BGyNy^6vzPKn zyS`#VDY;lP`0M6ZG130(#moB17t$Bca$k}JRRx~UWjW!Ov{RnA)>A*BKmP=%><|C7pr*mM zpHas{u9ekcI0DXiP)Yda8KHZ3{kHpHCeWRl{uhKF+x?eQ3Tq(m!`7eSuKaOhVUF>3s zJ+z5wqJ7_HV&0^hsB2T_0($LwBdoul+JVPQUwu8MFu~x)yY*}bw8*}AU!nj%cqfA! zq-F7@F1CrhKVn_$p8!W2(%(9;>K(+e3JeB4_O4E?-KTZQTQ55Lw?Y?f?JBQbf1i-? ztDTlWE`|(^O!r98d~0*s=n>Ns66#Hk$b6Y>a#VT~IY71y+L+mbFJvPZCx1Qz(_&*V zDxJYcl$;smw;Z)5wK@%6m4AXyG?t&>7NqPl)P77 zi|JAn(KD<0iwkXey+odt0aJPrWxP8ELd2;i-{nnl%S zng`G8Qg$wDx20Pfs?!-Ivxi(l3f%+iSozu7I9#Z5%e;6j(^*`re!@)KL zaj;|=^?ePXb1gvvSt7MBt&oqNEd|zn3_SERRZjehw|1Erc6bt|{kyrofMRPa$RwSW z`&zvNwz$skd>ET-@f>j5Jd4~&Dhj~?3H`|Vonfkl7THI5&Y!bw;Fc2`6&xj@RyFJY zj+3U42q(PFCAh2V{VF%Ea06esy<#@fl%{+?UH-`Z_lX5lmAx{LWTz2Bl(n`q0Oq*9 zZLfbr6Sq0z!U;IvdH^T$KqKL2$m*8Iyj;gf;~&V!DGmzxi(mL?F!g+gVdu3t2IA*~ zJh%l=zFx%M?v=X8QHbx)nrWhOl_4&{l0)Q>hcTPh!!PXhbrfOZ1PL!OPaQth6Xa5k z?!MS0jv^};s!okNc$bXxOt3BGK*D9S5xT-zYN2K8SVQS+*FM-rv0X`Or zB;6v)(ugif;_jUFF!RcMwPfTORhs2O1a;hb6ktpH@82;=tuu<^4_xr`(z@G+J?%Te zEcl#VdZ(xme(hsh@T*~Rs^a1N)MfHE0_*NmZlTer#2sZeHuyrD21&|fRm=fhCk|)p zn!FybP1e>_EWcHSC6V6dn{iyAm=oV&HjYpr-tQ*0*MIKveC79HW)wSG3tz}|OtHnp zyFDb4iY~z)GHu9F`&LcZE>OV}3rdEC>iF}xUhNOIINcledN~4c%OD7C5ba)v`Kz8_ zj0J()`nMp+X~p*qi!E6_s*M6MT_Q^yl(CAdL5;BXzH4adNkDoN!4_?SAPof|7sn2H za0goma*X-Kf(QTVPrBn>t`H(*vkr_I^R58QeCo9U24LZxvfcYw5KQ8Y#ZWvkFf*2 zqK1LW?B;BEomC$!edRva0tq&wKoDx8JyQd#AAg|ayw=>Qg=kKQ8S|M_u71mtI{ZA+ zS^9gM@gp%So^16vnHJHRgRBa^pE2`od&jjwgyqF0e$7`$(q!`Ae&?+k8>iBTdLX%k z%n_u5O8ODe%-iQ}RD`JXje$5a;&kZ@64#sBAprtqLcFS$O*Q)$%HI-91ujt8@!ULD z)ut4Pn1)d-RK2K_2jGgqTZ>suLem2(H+z;5NFwt`?gk);uM+8P4NMW#$3wXV>jcIK zXkcYGw~Br(d46Jui5U4U+eeQ2`RNyK2uks{^ZZ+5r!TPPtr#n1iKnu_@BUU z8R$(7ghLjlhBS5Ax?Hf(Sxp2LsoTzL`#3G1662GepO|KWNHk{@AEos_gu^6evpJy) zvB&X+Op&S1bP;r~2-*Uv&i_W{>kvJJp8U@)zTu8H?Dw`(i$Hpp1#rc>b3cm(1$xVC z=!II@<_&#}xV>eSzE+o3+Ub0?FA12pm59Lo@5;p-0KtTmXEn(n`$c?m$Km;8=!$tQ z*NN47`xofo_iJ4?V}W7NG{PPI!ffNRKKGTTqMyQ8tYn*9x80Xm znEcTff`-3dn&_8dSc?q==!LM+X-pt!2XKMDpdJo*{cv9Kvmta^vut|9B$dULI&8NR zvBBTs)yr;O^F%KF_-zZ{dALO{X%CAWJ6Kumyi3@BoJ0Ww4GwXyF`wE_(}VYNW1#`V zM|WXO#jA1-DDU5G@AMuZMRU=ZJHh$*Obn2Ydhl8QX!wdlfiOe#cw2lnd-i@89x+CW zW?CcAs0O<;&(Hi)#p`Vs$J0hbE=B6g2-l7>Boj9VN|bilN2fPKbC2Hz{FohH|46i2 z+dIIfXU#QY>*byH&{F{eXcQ1Qh{WYRl)M6&dpaKv{SP|}oHF>$^>dhHVyn`Ge*%|M z1f#fS zEObBem`Smg)h*48dC{DQ?DpkH^xTs1;hAK%B%Vd;uKgn4_n<=%Vtg57S}i-IMUwR~ zHupCNw8l#QS&Xf-dM$9T(}jfp6+zbz^L!FiwH#q64SfDz%;M#7^m^5o)qZdCYKUZR z^3%5-fSh6Vp@3~(dfv2tY+=GIyf2;z-AM?RMtzDUQ5m>I3cnxvof-XE_f}TbfLhLR z=5^}^;DRCJW(+{lx0x*gdhdM7j*r}%Lu~x;AA^+hAb%e-qC*@3y1Z$>9;nbhYDr5Jh=kdS#=X@IP?$AkVuXqqkv)ibixrW+>94*rNv z>dYK#SaYV{OJHE3Ei6(Ilb`)5(yQuaL@St*Vb+nXE(f0%%Crj_kU3K*Q6CE$ z6n$D6zCFwCyi6(w_fXFd-KhL(M~TV@gf$?jd!p?o3POpG=1b$7!9}Z{+!{M>{FVGs z(?=RvNhXZ4EyWg&XqP=hnilM=H$-Ttlp3T0maix%6XNtuukRhBe55T)r-^@{u>2gp zqWgQkvn#yEoXZry3YJGo7k1~Gd~9eIm{L;;uA!pLoKSvD^!O4?vQ~d`fNybMrVFXirzgZ)f31l&qeOKx7+w^lANQcc zPs$Dta98&YUihr=%~`cR@09I+ebN(4yQQ6JWufpQrQ40?Pa+Hc5NpJwE(5%7vNm@5 zVl;l^Rx4)FEpvKcCC0q z_KABG#||HgRdIwgmZtvboF~q1slxSBKYJ60fdwH$m)q!I zm*Yl4AjG%{c$vkvyi9e^x69pze?V^lA84e4&64jGciWDif3)b^?l8OzvUkxR%#$*8 z^k0Cp@u1JvX_xbMfRd+1cQj>UhsLlP1idoowL|C&-QS7=Nv}Nz-S!C?;c|dSq{i7k zF!D#4c%-efX|4R-pJ2-thqut>!uj_L?s?``@7PC(kqM5HOXQgrIIjYJ;^Dq_!EVg& zL_b-4L(%grz!AKqcI5P4Z9x~HZc9s56eDir#K0nXrEdKHi5j>m-ETPnX~Np*v&x*@ zC^4^1mDZiyB?<)lyQd)_Kr*gp;pv_e+jTuMTy|Dt<2>xX%e$Ji@9E%y!%LRYy%!&N|)#wOgv}9OUvSI68i{zva4_jSiGw_C4 z)E3hzx)TkDn|d+|-sYxO0L}n8p;|uhkuN9!W|NZL{gcVW$ukJ$2md_9<6BbDYIFEc zyVR#7N(R<&J66&b-`zDlP+*uco{ zzL;4CHQVscv(?|6TynO3`u#^yX3Jlu4S0OBE`Mn5E)1b;K!_`gcTz(y4CSFPIAW2#jXZL}5A0slT=)3rQo3~})^`5om9fRQD@@;Ut#y97g8LFU7Ri)0V;{Uj1qSJ_i9!xnV!MruyC#9py`F1_ zB>nq+ux^F{V;ab|Z1;c;mq{~8TJ`#`@&k^jNc-=>-vNtTXz#QsCEdO@T9ycP=eUDy ziTDY;U+JO*<~;&{m4`g+h(eO&%;Jk%NEp@hh?HBhDvcL%tU93#U&xS@6)GqdrgO2L)SoA+xyXGq*V@?LDpyX%*_dT1IbP<~o%R%z_J1 zo4BR*qK~_-&5GNP#bCB359NP&T#%^GueL6l3*UkG#E7S!fBmip^$KT_v7rJ}<8?oLvNIy*qRX6zJ(lS^BoEJ2mC{ zA*8Ftv(Adoo~}iJQ|k1*t(6E9&D6pi`YkDYE2=Na1{?Ynz!5qU!_eg2U5(0M^Vb}( z0Ba(rePe}cr1$sic>lGFDyQvWkeseVDZQtgU7M0U%dNV(+Xq9bjO>Jx^034#n?URI zk5N63;3>#IG^zKVg(oroM@bz=5A8p-8adh>OG~ zF{ihY-*^(|iD;D4l9m^zj6b~$O3_Mal>T!!m5B1=1~MBkJ0j5&k6;9TqKrzP#+pZ-}b@7i8{TMmvG z#iyFnG{{#nc1R&%p;O`c$F%ljN>H1@6wf>>foX6*DUkQ?Qykl*g}yAI9re*houExeucYt)=<0%!eRUow`dO&{x-{s{zudK`fLl z88`?VIrehXt^c>uf^b7F5n9o~5Qs4_#Xt#y;MS*PKH!V1uqvl40C2qaEs#Gy6`I6n zjSmIWCV7WDHXJp(XNTxcMdLxY(P=4M*=Cah>Rc6LMR*}R?`tysVEV>v`VthV@UK9c z`qTG$LP?!Cy)|Oq?j7s2zOX&49qUjL0YvkL&UdzrUt1FW*T}Kwj1Mu={|j;B@=1tY zm#^jW)Z6}G94FzEGCrTPt?2m2$qG=WQwi1BFoZwfp$(e{>Z&P4P9^!9Cm}n>e^ZDg z^F&uHJqlBxic_4}HPmo%I(-oxQu&pO9m~geH>@oO%yG}y8xD^!Dm_N<9!{OzK^%dR z|F5a9j*9Aw+8!FDh7JK~kdzqdlul`BL6i^}a_A1}?pC@6hK2zp2BZXOq`Mp%Y2+J! z@A}^L-apSe=j?sX*>~^fp1aqxZwFZGHvua6gFEPOe*b0s`BN1b<@TilM|)9DJ>71a zG|b^kXGcUgUoAAPFj)`)-!ulmtHjyXHHLCrM_^!O6C0O7m(VxQH9f2=S(*NXJ6I1@ zu(S0}3W!ilJIw|J4S8I3&T2zst*>reN0##zewU*mi5|&9%rQyfjnImW-+{8RHo(Xx zReUvTFHuJA$1k!L0afsF#L;6S>fVgroLI4ROIANrCzz+Nn(cR7AxV;S7;Jp3BvEmt z|6aiJ=#&EaX&#|4# zzTS2(oaj{cKkdUu{{n1}46OB;YgXS0n7Z={?9P|Mrrr4g3xcO|zR}eYtZL3TO&+C~ z;)g^}_-dF6&{0XPhydB~-m_&sIcpR&3hJ!|7shFLv-0xY$se0FZv zD26}0ogtB+f=1~vP>Qo7rNeY~L?jwm+iSt13HQEL)D+e>WT58AO(cHF_l8G;c(Owh z3px2w?XOZq$Uwyw>gUwrIktGcK*v}qC851pZ&8Alrjlxf7b6o0F*;bmWMh)!H~S~m zd`5j+sMx4PYXADY@tktF2lOcUFWlEefX*uYIt^g4ff#SEno!9L&())si;Yq2*Z1`( z&f8wPlfOsbF&Z`}iiFeq?21`_&G3KiSFxyG@>j0S6KFM-k33bPEPK$q{W846IFLtS-N(Y3Jr)c|<+Pi={m@V^?jc#5R-i2vR$ieW4((tC06-nFQZ zqdqq<|9&lH(JQU^24`ui$sG@0Q#=HwPS^>3j9P z7tG@KWBg8(f3Y;O2oF6&%(X_-IE`8FKI~hX_Ah;$rwRF@Lf@dtzsQ{KnYz;0kch1E zHqWZc;hKcxIxuEj`#eRTr zM)6CEb@P4i`0l*lN5WM9@dSl?u?>*fbM;O+ks+ao-SpavcSczruWOjNEm>Gy7mtVs zXYbrg_rvA-wJzmD0&YR-u2lYMG~F^OR@r#60Xf{BGb+=n$TIsIoP@KGWgPbWLXv+s zLZAAZU)h<$fU$Q3fZk|}_?xY6?ZWGa`@`7v?|Ff+cw(=&wHMFQ=9m38|C}wI@sdW$ z2vIf>v9ksGUVn*(L`{BA z2iW6E$-iq0_Dt`a*mes%o|#1li$7Zb>Z&Tzn{g|9GNk+2MLsgoDQVxmB4(zXm4?>A zZ7YB7$@}qHf0n(H?fnwtc(-$@8Nq2A3VgTbHNbo+@%B)k4EH4_By-h*w-FjL(-dME z-NogKK03C8Vi7lcXq|mrTuBEQ>*TE$E?+^6GqykUSER931V=GOm4?3Fi%}*`SI}`m z6DWu{jmuENl6%M56snu1jFsG1R9adRPJvEsOu>WIOK5B@OC(n8*JFXy`ulD5^2PDt zXy?_e&mFX`8e*w^wJIfL`8f5Ud9}LiuPx>b-j$$1v-X+}L}(RQFajqj@r+XU^~S`R z=%dW#4b>;fxmplfD}oO@Zyug7ME~SXo&ooaB5T73xXE! zIcdZ%wz4}#5ryCB+jvM}sp}SXpW_p`ZoSH?7$n(vk2AtP_CYko?tJuhiSEy`{8*zPr|deL z={^cOsE^to#rp)}N>v0M40jxd#{l79GYzJW3)cI|y69UZ))6uDybhYNA0jy*A~I%U zh_L_!w23M8uU=+fxxQ!~RnHZ^k7 zm>_ET4gPMDQJm#7pD-S9u7_GkrW!5FDGpTQ+w;VW?!@%ZI?o(LI)C5J-ek{fwr@gA zIzprKNB+KdKqP4Exp>z2(sZ^vh(48I@*HrrAUTLg-SuFmHDeIKmZ<|jCwwldVeF`G zBSA4Dc;~hI(}){Bx&F+1dd!(C1I%)VX>{^rnv73$h-JHFXq}=>tg~Bwz|b)dC^1kp z^Qe{{P!z9Vjf1zMm1U|HcsOs-$!UwlyV^BPx-c!#@Bp5UY}HNY-Dz{kOZbA%g^_4O z!zVF`3xj)mgEW1km65luWgJcgp3 z1fE%xVfw7?PcP3}AHc`iUNURc7=RsFm*pT`jHP0jPnMyiNSUuRr9XaWq-9h^ z0@vHGm1csx_aFUb5q7vdzcFSQAo@CLdEncv-9sLoxGN$AU?QaDCKF|zV)QmI_E-#- zI%WU|s5S-|VAzaJ7MTpuPvQUpfg0Dm;=1s_&xVAHt2}=zvfO^x>R;?D4Uf6i!%|mb z8fF*o%-Pf3khuv3gJFsvaBtX>O0cFqnd3jz^lpZWGRRM>#fz z_WG$$U1;6~yKoRx*N6i&-<=h;|2a1`WQ|}}hC@IR=}pp?C6hLFve-W`CM^>3s4RW6 zzS{Yq^&AZ0QK*Z4N5cd$-MO>dL}!O6%OP{#h65Pi$}wUiph>yS-T3@YQJvR2;9}(< zvEfotwmzzm0o5GE65`$;p~=DN(CY{`$i2?#y;JK{Ly+UT0j$M5Tz;~m=1Uy?9PB8k)fdK~@9VTX11o@|8#ug9L2KY@MZ;$VT8&Ry^2 z%e0oJ@H!&ELKNJ0EuQQ44ZX_TcHx4K;GD$pK`9acLdD-G0u1vv$Z@Q6qN{nzmB@{E zG6u-zOxUkWtzx?ADUO1ug2Gkr;bZ2$ zNCjDt?3SlX!DDURN5$nkqp1^BkjIW;qeF;6g?R%X6&FPZN_q8b}9HcP_(&5pIqqJ~}5-b6o!ZuuUGe+np`e zqoyMR;rgODEeUuAEuCml4RZO$fX~S^Re6S@vf7rG11J%X8JHUuyo=!b_)}C-MJqX3 zwwxzo%Bpv1%V16~-VmL|ZPrDIaI#z~a6Kx2Z)G|pY;P)pduTPrIO*LnV!>8Kka0M( zTdT|%0|6BzfQ{g)BRlFJrs%)bm5>yBhrQ2DmLYtFcPwDoyZHK~;|)FeMQ-*?3=z2y z%r8B9`EvTuWh#1ppZKIGrlmb<3$Ai$;4z}zbikVU&EWhQn9s&NQ#J{4?Q<|UK1_@E z*<+Mbm01>A=)ha;N;IDCJHcEsfUK}Zw&^!VLHdU?j$`)=XNa zB7_#f-r{9td8f1nR^#F469E|ajV(8qZ>bdh(gH1FjDbBI9gPuyX+nT@9KFo8Y4GXG zZOj1CE8_m#0O3#Ef*ew6#c`~Y2vou%#S-ewgz9}onH zpP$G7YP5xy$+4p3$ubt;7S+ab-0r)B_p)0(<4NF!J?Z=%d&~=+K8?dxfA4Sf-@Mt& zTCC71-rG!+F+4R!xrKM%iNsNr5b3r_(Eh2dLXP=!mQ78w1R_>s%P>>dDk%`)R$W&o zA`U@v<~CM*qq6j$I9<=a$aZ6vfgs-~8!G_NP%9?ILu>Mk)_+=QO1YQq<;l(Qv>SvG z()@7_JGuGp2tNKYGvMx$hF;v`g?HLmTF`OVmE(I7-2WsB|C4AEU-H4H$aZChJIY)L z=6}?_8;8p3qz=O-)D1t3eCk~P!yff{r1q~Yrk)!O4b1@T-S_`yunqwrjjTf)ZnvUa z_yP{Z3$&j|of8EB;UBDhhS)Nxi6u{hds^IM>i_(nt5emg`}Vh^N4cD)FyJy<%t^xg zWR2Yo7l6(i61-ROk{);t5b-+yA(Go5xNyJhwb~xd!WTgzYns`co-3P(Ep-kRltt!* zY$vlQ6VUjWc6!53f4(BFBzlcJtkDveHwJN^&NA(w%686jVaovRNHE`in`C}bzNGnc zw$v=0eG5Zwyt9@PrQ2G&%es8u@zvAq?d^dP{%?*6j;Lz86-y;#s4c3PXtdSdyguC= zI3Je*5BuM2XC`>n)YgIw>nxl!3Z<7hxwr(i@jN(uJ>`#67-bz-EJ0Fu6 z9~>IeDGvSI(1wjZ!3?-dw<8JbWL$f1+8Xp2xOq@r_i|C)iSlQuI@P=74o`q&Jjsdf zc+Cq2N$;QYZLY;2PTqS)h>l@n+<@od!i@KN1eN|U=;Le_;mq0g2qBYKt!b;_mIkJp zx;kOuc)1ONVSH>T4LUezP4Mu?#MszAFW~Z*1L$zFRQ)i8i2zq24rK<+kxxS7alL=d zs#;{4L$V!qojdl~-`!uIDy@DE_|YQ7&p$rj?#@xK{R*>lT8P`cmg9t_D`?4nx>)N6zC#R$f=zC}s=G-R%`8j(TSY;(CHWAf?^0{PfXJ z&jP8*A9`?}Rk$$u3r3*fVCICO)W`{=okS_|BUfa6@W&6(Y@H<-o!e<)x>E1Qa%|u#OKVnT^+Al?o&R)E()94p+fKtS5f+jF3JIZjd4`P2d8(3h zBo7f8s$}1!{4V$2o*Si#s2Uj>eu#`!R=Q&~n~eQIPY1LkLWeb2{(^he33weX?U3Wy zrvT9S7dxJUJ14Tl+&`q2Q@VNeU{O`PJj(e9wI@vso>uNwm#T(BUor$-=&(pgEF!(O z#h3U1m63u`=4l|VyV+&PqLW6aS)e-382In68wo5F#uXc*Xg zMMahmGbFy_j7(+#G%C!_7E3XgwO|4JN!A(fGZI-oFC~3J7L;jlK>BQ0I zLyQm-gIAMy{a#a)dp{RGqf;P03ebMA`nR(71kl`rt?*az(~*zP@Nc{jzQ9RLip37sIB# zJV{aX--~atQ1|BeZdy{W%=PC{vOIq@z;f)q zc~Bx0u_}mPLGvqD_>$E9NnZ<_&q)uC<{8IR>OQm(JK)J=V85>=P=cHGL<9#-fUNCe zQhlaHOH>9fhRYmJkAAh}o_HYi>TvncMh`@Q;2Q&R=ej?E<-V{AbyB_1uc<*qV4zPh z0=e%fo?}Fr3xm*%B{0i0b3Y809a_f{;9z#j%ksZ5YjMmrGG;fIW`wvx1-qs#DOKmi zy1eCIo`?T88vl+WAn5;tHI2+>`>6p=K0aqHw-Pk;7Tmo=V}SrtJ&xbjN{g7!X>qNo zc4NA(R{Un7mLsSv>`Y1VGD)QNOV)#BOK=ejT2ByslR3d=oao$!HLt&OY$)WvwsCoI zC&?1BO*r8HMEuNoaEQ!pLrXjbZivoGhSMg>RPQjuwQ)`s?Q|GrbOO|F&tbEIlWok@ z%YTi2`ijj~sTTutBdU8!TSn7g7jU|Pw=omv%7f>euYp@*4$pvgU7Gm*u5FF-BE;qD zXmU@Kx}-ieUaXQLh(TZhqX1|Ywc4Hb;_=i1q27H#8>yOmx4-KW8Q(iZ2haDnOaBVa zx*mLS`}giRUB00<-}np473HFW9^VFGQnYq0qXo^!fgY3s(SRs($qDT-Kk!0Rw6_4U z6siWe$JMeIjuhszS*=bP&WDpe;wGD+lp;|xK!^&-8qJG86Ww~S3zTCfExaeUpG-@^ zfT;%`q+x>xlbF&{Q(u|3IvW&wNXBV68Y%lLJPG)Fvx|62&LOL+gUJY#1t5rWP!`9= z#wP5s_j;F@NX{T$RBs;VXP;IVQ3 zX=wMneVB<91*p6*Wsys#_7iVb%C&xRqXJ|UHZeoqtR)@q6G>-E5Nf>6t4Pd$*eQe( zg}I=LZwEAkWgVDapd3(+l;2BKzD?sWGor$ zl1}id^n0otVdCJ>|1hO4HL4#u)J&2VctRV(QQ#d)$!nr;cXce%D7_gcl(yd;iiOW9 zA~Hwt3Fm)iHeo=G$oNj@g^B*ZKu%Tzg%V@&Y6i*wr_X=LkT(%^d?W7?B>ofhUz02& cJMR6nupf_~t*AJA0jNh+QA+_T|2Fjh0sO?4!T 0: + Visdom.start(opt.visport) + + Visdom.Text("Name").set(opt.name) + + class LengthHackSampler: + def __init__(self, batch_size, length): + self.length = length + self.batch_size = batch_size + + def __iter__(self): + while True: + len = self.length() if callable(self.length) else self.length + yield [len] * self.batch_size + + def __len__(self): + return 0x7FFFFFFF + + embedding = None + test_set = None + curriculum = None + loader_reset = False + if opt.task == "copy": + dataset = CopyData(bit_w=opt.bit_w) + in_size = opt.bit_w + 1 + out_size = in_size + elif opt.task == "recall": + dataset = AssociativeRecall(bit_w=opt.bit_w, block_w=opt.block_w) + in_size = opt.bit_w + 2 + out_size = in_size + elif opt.task == "keyvalue": + assert opt.bit_w % 2 == 0, "Key-value datasets works only with even bit_w" + dataset = KeyValue(bit_w=opt.bit_w) + in_size = opt.bit_w + 1 + out_size = opt.bit_w // 2 + elif opt.task == "keyvalue2way": + assert opt.bit_w % 2 == 0, "Key-value datasets works only with even bit_w" + dataset = KeyValue2Way(bit_w=opt.bit_w) + in_size = opt.bit_w + 2 + out_size = opt.bit_w // 2 + elif opt.task == "babi": + dataset = bAbiDataset(think_steps=opt.think_steps, dir_name=opt.dataset_path) + test_set = bAbiDataset( + think_steps=opt.think_steps, dir_name=opt.dataset_path, name="test" + ) + dataset.use(opt.babi_train_tasks, opt.babi_train_sets) + in_size = opt.embedding_size + print("bAbi: loaded total of %d sequences." % len(dataset)) + test_set.use(opt.babi_test_tasks, opt.babi_test_sets) + out_size = len(dataset.vocabulary) + print( + "bAbi: using %d sequences for training, %d for testing" + % (len(dataset), len(test_set)) + ) + else: + assert False, "Invalid task: %s" % opt.task + + if opt.task in ["babi"]: + data_loader = torch.utils.data.DataLoader( + dataset, + batch_size=opt.batch_size, + num_workers=4, + pin_memory=True, + shuffle=True, + collate_fn=MetaCollate(), + ) + test_loader = ( + torch.utils.data.DataLoader( + test_set, + batch_size=opt.test_batch_size, + num_workers=opt.test_batch_size, + pin_memory=True, + shuffle=False, + collate_fn=MetaCollate(), + ) + if test_set is not None + else None + ) + else: + dataset = BitmapTaskRepeater(dataset) + data_loader = torch.utils.data.DataLoader( + dataset, + batch_sampler=LengthHackSampler( + opt.batch_size, BitmapTaskRepeater.key_sampler(opt.len, opt.repeat) + ), + num_workers=1, + pin_memory=True, + ) + + if opt.controller_type == "lstm": + controller_constructor = functools.partial( + LSTMController, out_from_all_layers=opt.lstm_use_all_outputs + ) + elif opt.controller_type == "linear": + controller_constructor = FeedforwardController + else: + assert False, "Invalid controller: %s" % opt.controller_type + + parity_size = 0 + + model = DNC( + in_size + parity_size, + out_size, + opt.data_word_size, + opt.mem_count, + opt.n_read_heads, + controller_constructor(opt.layer_sizes), + batch_first=True, + mask=opt.masked_lookup, + dealloc_content=opt.dealloc_content, + link_sharpness_control=opt.sharpness_control, + mask_min=opt.mask_min, + clip_controller=opt.clip_controller, + ) + + # model.load_state_dict(torch.load(model_dir, map_location="cpu")["model"]) + + print("data_word_size: {}".format(opt.data_word_size)) + rollout_storage = RolloutStorage() + demon_state_dim = ( + in_size + opt.mem_count * opt.data_word_size + ) #:TODO opt.mem_count * opt.data_word_size + demon_action_dim = in_size + + demon = Demon( + demon_state_dim, + demon_action_dim, + action_std, + lr, + betas, + gamma, + K_epochs, + eps_clip, + ) + + fnet = FNet() + fnet.init(2 * opt.mem_count * opt.data_word_size) + #fnet.load_state_dict(torch.load(model_dir, map_location="cpu")["FNet"]) + + znet = ZNet() + znet.init(opt.mem_count * opt.data_word_size) + #znet.load_state_dict(torch.load(model_dir, map_location="cpu")["ZNet"]) + + params = [ + {"params": [p for n, p in model.named_parameters() if not n.endswith(".bias")]}, + { + "params": [p for n, p in model.named_parameters() if n.endswith(".bias")], + "weight_decay": 0, + }, + ] + + device = ( + torch.device("cuda") + if opt.gpu != "none" and torch.cuda.is_available() + else torch.device("cpu") + ) + print("DEVICE: ", device) + + if isinstance(dataset, NLPTask): + embedding = torch.nn.Embedding(len(dataset.vocabulary), opt.embedding_size).to( + device + ) + params.append({"params": embedding.parameters(), "weight_decay": 0}) + # embedding.load_state_dict( + # torch.load(model_dir, map_location="cpu")["word_embeddings"] + # ) + + if opt.optimizer == "sgd": + optimizer = torch.optim.SGD( + params, lr=opt.lr, weight_decay=opt.wd, momentum=opt.momentum + ) + elif opt.optimizer == "adam": + optimizer = torch.optim.Adam(params, lr=opt.lr, weight_decay=opt.wd) + elif opt.optimizer == "rmsprop": + optimizer = torch.optim.RMSprop( + params, lr=opt.lr, weight_decay=opt.wd, momentum=opt.momentum, eps=1e-10 + ) + else: + assert "Invalid optimizer: %s" % opt.optimizer + + n_params = sum([sum([t.numel() for t in d["params"]]) for d in params]) + print("Number of parameters: %d" % n_params) + + model = model.to(device) + fnet = fnet.to(device) + znet = znet.to(device) + znet_optim = torch.optim.Adam(znet.parameters(), lr=0.001) + fnet_optim = torch.optim.Adam(fnet.parameters(), lr=0.001) + + if embedding is not None and hasattr(embedding, "to"): + embedding = embedding.to(device) + + i = 0 + loss_sum = 0 + + loss_plot = Visdom.Plot2D( + "loss", store_interval=opt.info_interval, xlabel="iterations", ylabel="loss" + ) + + if curriculum is not None: + curriculum_plot = Visdom.Plot2D( + "curriculum lesson" + + ( + " (last %d)" % (curriculum.n_lessons - 1) + if curriculum.n_lessons is not None + else "" + ), + xlabel="iterations", + ylabel="lesson", + ) + curriculum_accuracy = Visdom.Plot2D( + "curriculum accuracy", xlabel="iterations", ylabel="accuracy" + ) + + saver = Saver(os.path.join(opt.name, "save"), short_interval=opt.save_interval) + saver.register("model", StateSaver(model)) + saver.register("optimizer", StateSaver(optimizer)) + saver.register("i", GlobalVarSaver("i")) + saver.register("loss_sum", GlobalVarSaver("loss_sum")) + saver.register("loss_plot", StateSaver(loss_plot)) + saver.register("dataset", StateSaver(dataset)) + if test_set: + saver.register("test_set", StateSaver(test_set)) + + if curriculum is not None: + saver.register("curriculum", StateSaver(curriculum)) + saver.register("curriculum_plot", StateSaver(curriculum_plot)) + saver.register("curriculum_accuracy", StateSaver(curriculum_accuracy)) + + if isinstance(dataset, NLPTask): + saver.register("word_embeddings", StateSaver(embedding)) + elif embedding is not None: + saver.register("embeddings", StateSaver(embedding)) + + visualizers = {} + + debug_schemas = { + "read_head": {"list_dim": 2}, + "temporal_links/forward_dists": {"list_dim": 2}, + "temporal_links/backward_dists": {"list_dim": 2}, + } + + def plot_debug(debug, prefix="", schema={}): + if debug is None: + return + + for k, v in debug.items(): + curr_name = prefix + k + if curr_name in debug_schemas: + curr_schema = schema.copy() + curr_schema.update(debug_schemas[curr_name]) + else: + curr_schema = schema + + if isinstance(v, dict): + plot_debug(v, curr_name + "/", curr_schema) + continue + + data = v[0] + + if curr_schema.get("list_dim", -1) > 0: + if data.ndim != 3: + print( + "WARNING: unknown data shape for array display: %s, tensor %s" + % (data.shape, curr_name) + ) + continue + + n_steps = data.shape[curr_schema["list_dim"] - 1] + if curr_name not in visualizers: + visualizers[curr_name] = [ + Visdom.Heatmap( + curr_name + "_%d" % i, + dumpdir=os.path.join(opt.name, "preview") + if opt.dump_heatmaps + else None, + ) + for i in range(n_steps) + ] + + for i in range(n_steps): + visualizers[curr_name][i].draw( + index_by_dim(data, curr_schema["list_dim"] - 1, i) + ) + else: + if data.ndim != 2: + print( + "WARNING: unknown data shape for simple display: %s, tensor %s" + % (data.shape, curr_name) + ) + continue + + if curr_name not in visualizers: + visualizers[curr_name] = Visdom.Heatmap( + curr_name, + dumpdir=os.path.join(opt.name, "preview") + if opt.dump_heatmaps + else None, + ) + + visualizers[curr_name].draw(data) + + def run_model(input, debug=None, demon=None, rollout_storage=None): + if isinstance(dataset, NLPTask): + input = embedding(input["input"]) + else: + input = input["input"] * 2.0 - 1.0 + + return model(input, debug=debug, demon=demon, rollout_storage=rollout_storage) + + def run_znet(mem_state): + mem_state = mem_state[:, 1:, :] + return znet(mem_state) + + def run_fnet(mem_state, marginal=False): + shuffled_mem_state = None + if not marginal: + input = torch.cat((mem_state[:, :-1, :], mem_state[:, 1:, :]), dim=2) + else: + shuffled_indx = torch.randperm(mem_state.size(0)).to( + device + ) # random index for shuffling the elements of batch + shuffled_mem_state = mem_state.index_select(0, shuffled_indx) + input = torch.cat( + (mem_state[:, :-1, :], shuffled_mem_state[:, 1:, :]), dim=2 + ) + + return fnet(input), shuffled_mem_state + + def multiply_grads(params, mul): + if mul == 1: + return + + for pa in params: + for p in pa["params"]: + p.grad.data *= mul + + def test(): + if test_set is None: + return + + print("TESTING...") + start_time = time.time() + t = test_set.start_test() + with torch.no_grad(): + for data in tqdm(test_loader): + data = { + k: v.to(device) if torch.is_tensor(v) else v + for k, v in data.items() + } + if hasattr(dataset, "prepare"): + data = dataset.prepare(data) + + net_out = run_model(data, demon=demon) + test_set.veify_result(t, data, net_out) + + test_set.show_test_results(i, t) + print("Test done in %gs" % (time.time() - start_time)) + + if opt.test_on_start.lower() in ["on", "1", "true", "quit"]: + test() + if opt.test_on_start.lower() == "quit": + saver.write(i) + sys.exit(-1) + + if opt.print_test: + model.eval() + total = 0 + correct = 0 + with torch.no_grad(): + for data in tqdm(test_loader): + if not running: + return + + data = { + k: v.to(device) if torch.is_tensor(v) else v + for k, v in data.items() + } + if hasattr(test_set, "prepare"): + data = test_set.prepare(data) + + net_out = run_model(data, demon) + + c, t = test_set.curriculum_measure(net_out, data["output"]) + total += t + correct += c + + print( + "Test result: %2.f%% (%d out of %d correct)" + % (100.0 * correct / total, correct, total) + ) + model.train() + return + + iter_start_time = time.time() if i % opt.info_interval == 0 else None + data_load_total_time = 0 + + start_i = i + + if opt.dump_profile: + profiler = torch.autograd.profiler.profile(use_cuda=True) + + if opt.dump_heatmaps: + dataset.set_dump_dir(os.path.join(opt.name, "preview")) + + @preview() + def do_visualize(raw_data, output, pos_map, debug): + if pos_map is not None: + output = embedding.backmap_output( + output, pos_map, raw_data["output"].shape[1] + ) + dataset.visualize_preview(raw_data, output) + + if debug is not None: + plot_debug(debug) + + preview_timer = OnceEvery(opt.preview_interval) + + pos_map = None + start_iter = i + + if curriculum is not None: + curriculum.init() + + ma_et = 1.0 + while running: + data_load_timer = time.time() + for data in data_loader: + if not running: + break + + if loader_reset: + print("Loader reset requested. Resetting...") + loader_reset = False + if curriculum is not None: + curriculum.lesson_started() + break + + if opt.dump_profile: + if i == start_i + 1: + print("Starting profiler") + profiler.__enter__() + elif i == start_i + 5 + 1: + print("Stopping profiler") + profiler.__exit__(None, None, None) + print("Average stats") + print(profiler.key_averages().table("cpu_time_total")) + print("Writing trace to file") + profiler.export_chrome_trace(opt.dump_profile) + print("Done.") + sys.exit(0) + else: + print("Step %d out of 5" % (i - start_i)) + + debug.dbg_print("-------------------------------------") + raw_data = data + + data = { + k: v.to(device) if torch.is_tensor(v) else v for k, v in data.items() + } + if hasattr(dataset, "prepare"): + data = dataset.prepare(data) + + data_load_total_time += time.time() - data_load_timer + + need_preview = preview_timer() + debug_data = {} if opt.debug and need_preview else None + + optimizer.zero_grad() + # demon_optim.zero_grad() + # znet_optim.zero_grad() + # fnet_optim.zero_grad() + + if opt.n_subbatch == "auto": + n_subbatch = math.ceil( + data["input"].numel() / opt.max_input_count_per_batch + ) + else: + n_subbatch = int(opt.n_subbatch) + + real_batch = max(math.floor(opt.batch_size / n_subbatch), 1) + n_subbatch = math.ceil(opt.batch_size / real_batch) + remaning_batch = opt.batch_size % real_batch + + for subbatch in range(n_subbatch): + if not running: + break + input = data["input"] + target = data["output"] + + if n_subbatch != 1: + input = input[subbatch * real_batch : (subbatch + 1) * real_batch] + target = target[subbatch * real_batch : (subbatch + 1) * real_batch] + + f2 = data.copy() + f2["input"] = input + + # Demon modifies the memory before DNC model + output = run_model( + f2, + debug=debug_data if subbatch == n_subbatch - 1 else None, + demon=demon, + rollout_storage=rollout_storage, + ) + l = dataset.loss(output, target) + l.backward() + + mem_state = torch.stack(model.mem_state, dim=1) + t, _ = run_fnet(mem_state.detach()) + z = run_znet(mem_state.detach()) + + et, shuffled_mem = run_fnet(mem_state.detach(), marginal=True) + et = torch.exp(et) + mi_lb = t - (torch.mean(et) / z + torch.log(z) - 1) + + demon_rewards = mi_lb + info_loss = -mi_lb.mean() + + info_loss.backward() + + fnet_optim.step() + fnet_optim.zero_grad() + + znet_optim.step() + znet_optim.zero_grad() + + del model.mem_state[:] # reset the mem state + + for j in range(0, demon_rewards.size(1)): + rollout_storage.rewards.append(demon_rewards[:, j].detach()) + + # update if its time + if i % 1 == 0: # TODO demon_update_timestep = C + demon_loss = demon.update(rollout_storage) + rollout_storage.clear_storage() + + debug.nan_check(l, force=True) + + if curriculum is not None: + curriculum.update(*dataset.curriculum_measure(output, target)) + + if remaning_batch != 0 and subbatch == n_subbatch - 2: + multiply_grads(params, real_batch / remaning_batch) + + if n_subbatch != 1: + if remaning_batch == 0: + multiply_grads(params, 1 / n_subbatch) + else: + multiply_grads(params, remaning_batch / opt.batch_size) + + for p in params: + torch.nn.utils.clip_grad_norm_(p["params"], opt.grad_clip) + + optimizer.step() + + i += 1 + + curr_loss = l.data.item() + loss_plot.add_point(i, curr_loss) + + # writer.add_scalar("associative-recall-loss/dnc-md", curr_loss, i) + # writer.add_scalar("associative-recall-mutual_info/dnc-md", info_loss, i) + # writer.add_scalar( + # "associative-recall-demon_loss/dnc-md", demon_loss.mean(), i + # ) + + loss_sum += curr_loss + + if i % opt.info_interval == 0: + tim = time.time() + loss_avg = loss_sum / opt.info_interval + + if curriculum is not None: + curriculum_accuracy.add_point(i, curriculum.get_accuracy()) + curriculum_plot.add_point(i, curriculum.step) + + message = "Iteration %d, loss: %.4f" % (i, loss_avg) + if iter_start_time is not None: + message += ( + " (%.2f ms/iter, load time %.2g ms/iter, visport: %s)" + % ( + (tim - iter_start_time) / opt.info_interval * 1000.0, + data_load_total_time / opt.info_interval * 1000.0, + Visdom.port, + ) + ) + print(message) + iter_start_time = tim + loss_sum = 0 + data_load_total_time = 0 + + debug.dbg_print("Iteration %d, loss %g" % (i, curr_loss)) + + if need_preview: + do_visualize(raw_data, output, pos_map, debug_data) + + if i % opt.test_interval == 0: + test() + + saver.tick(i) + + if opt.demo and opt.exit_after is None: + running = False + input("Press enter to quit.") + + if opt.exit_after is not None and (i - start_iter) >= opt.exit_after: + running = False + + data_load_timer = time.time() + + +if __name__ == "__main__": + #writer = SummaryWriter() + global running + running = True + + def signal_handler(signal, frame): + global running + print("You pressed Ctrl+C!") + running = False + + signal.signal(signal.SIGINT, signal_handler) + + main() diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..3caa71c --- /dev/null +++ b/requirements.txt @@ -0,0 +1,8 @@ +tqdm +torch +visdom +numpy +tensorboard + + +