Tests for no memory pass

This commit is contained in:
ixaxaar 2017-11-21 17:28:49 +05:30
parent e3b4513730
commit 1a465dee0c
3 changed files with 196 additions and 3 deletions

View File

@ -17,6 +17,8 @@ import math
import time import time
sys.path.insert(0, '.') sys.path.insert(0, '.')
import functools
from dnc import DNC from dnc import DNC
from test_utils import generate_data, criterion from test_utils import generate_data, criterion
@ -128,6 +130,69 @@ def test_rnn_n():
optimizer.step() optimizer.step()
assert target_output.size() == T.Size([27, 10, 100]) assert target_output.size() == T.Size([27, 10, 100])
assert chx[1].size() == T.Size([1,10,100]) assert chx[1].size() == T.Size([num_hidden_layers,10,100])
assert mhx['memory'].size() == T.Size([10,12,17]) assert mhx['memory'].size() == T.Size([10,12,17])
assert rv.size() == T.Size([10, 51]) assert rv.size() == T.Size([10, 51])
def test_rnn_no_memory_pass():
T.manual_seed(1111)
input_size = 100
hidden_size = 100
rnn_type = 'gru'
num_layers = 3
num_hidden_layers = 5
dropout = 0.2
nr_cells = 12
cell_size = 17
read_heads = 3
gpu_id = -1
debug = True
lr = 0.001
sequence_max_length = 10
batch_size = 10
cuda = gpu_id
clip = 20
length = 13
rnn = DNC(
input_size=input_size,
hidden_size=hidden_size,
rnn_type=rnn_type,
num_layers=num_layers,
num_hidden_layers=num_hidden_layers,
dropout=dropout,
nr_cells=nr_cells,
cell_size=cell_size,
read_heads=read_heads,
gpu_id=gpu_id,
debug=debug
)
optimizer = optim.Adam(rnn.parameters(), lr=lr)
optimizer.zero_grad()
input_data, target_output = generate_data(batch_size, length, input_size, cuda)
target_output = target_output.transpose(0, 1).contiguous()
(chx, mhx, rv) = (None, None, None)
outputs = []
for x in range(6):
output, (chx, mhx, rv), v = rnn(input_data, (chx, mhx, rv), pass_through_memory=False)
output = output.transpose(0, 1)
outputs.append(output)
output = functools.reduce(lambda x,y: x + y, outputs)
loss = criterion((output), target_output)
loss.backward()
T.nn.utils.clip_grad_norm(rnn.parameters(), clip)
optimizer.step()
assert target_output.size() == T.Size([27, 10, 100])
assert chx[0].size() == T.Size([num_hidden_layers,10,100])
assert mhx['memory'].size() == T.Size([10,12,17])
assert rv == None

View File

@ -15,6 +15,7 @@ import sys
import os import os
import math import math
import time import time
import functools
sys.path.insert(0, '.') sys.path.insert(0, '.')
from dnc import DNC from dnc import DNC
@ -128,6 +129,68 @@ def test_rnn_n():
optimizer.step() optimizer.step()
assert target_output.size() == T.Size([27, 10, 100]) assert target_output.size() == T.Size([27, 10, 100])
assert chx[0][0].size() == T.Size([1,10,100]) assert chx[0][0].size() == T.Size([num_hidden_layers,10,100])
assert mhx['memory'].size() == T.Size([10,12,17]) assert mhx['memory'].size() == T.Size([10,12,17])
assert rv.size() == T.Size([10, 51]) assert rv.size() == T.Size([10, 51])
def test_rnn_no_memory_pass():
T.manual_seed(1111)
input_size = 100
hidden_size = 100
rnn_type = 'lstm'
num_layers = 3
num_hidden_layers = 5
dropout = 0.2
nr_cells = 12
cell_size = 17
read_heads = 3
gpu_id = -1
debug = True
lr = 0.001
sequence_max_length = 10
batch_size = 10
cuda = gpu_id
clip = 20
length = 13
rnn = DNC(
input_size=input_size,
hidden_size=hidden_size,
rnn_type=rnn_type,
num_layers=num_layers,
num_hidden_layers=num_hidden_layers,
dropout=dropout,
nr_cells=nr_cells,
cell_size=cell_size,
read_heads=read_heads,
gpu_id=gpu_id,
debug=debug
)
optimizer = optim.Adam(rnn.parameters(), lr=lr)
optimizer.zero_grad()
input_data, target_output = generate_data(batch_size, length, input_size, cuda)
target_output = target_output.transpose(0, 1).contiguous()
(chx, mhx, rv) = (None, None, None)
outputs = []
for x in range(6):
output, (chx, mhx, rv), v = rnn(input_data, (chx, mhx, rv), pass_through_memory=False)
output = output.transpose(0, 1)
outputs.append(output)
output = functools.reduce(lambda x,y: x + y, outputs)
loss = criterion((output), target_output)
loss.backward()
T.nn.utils.clip_grad_norm(rnn.parameters(), clip)
optimizer.step()
assert target_output.size() == T.Size([27, 10, 100])
assert chx[0][0].size() == T.Size([num_hidden_layers,10,100])
assert mhx['memory'].size() == T.Size([10,12,17])
assert rv == None

View File

@ -17,6 +17,8 @@ import math
import time import time
sys.path.insert(0, '.') sys.path.insert(0, '.')
import functools
from dnc import DNC from dnc import DNC
from test_utils import generate_data, criterion from test_utils import generate_data, criterion
@ -128,6 +130,69 @@ def test_rnn_n():
optimizer.step() optimizer.step()
assert target_output.size() == T.Size([27, 10, 100]) assert target_output.size() == T.Size([27, 10, 100])
assert chx[1].size() == T.Size([1,10,100]) assert chx[1].size() == T.Size([num_hidden_layers,10,100])
assert mhx['memory'].size() == T.Size([10,12,17]) assert mhx['memory'].size() == T.Size([10,12,17])
assert rv.size() == T.Size([10, 51]) assert rv.size() == T.Size([10, 51])
def test_rnn_no_memory_pass():
T.manual_seed(1111)
input_size = 100
hidden_size = 100
rnn_type = 'rnn'
num_layers = 3
num_hidden_layers = 5
dropout = 0.2
nr_cells = 12
cell_size = 17
read_heads = 3
gpu_id = -1
debug = True
lr = 0.001
sequence_max_length = 10
batch_size = 10
cuda = gpu_id
clip = 20
length = 13
rnn = DNC(
input_size=input_size,
hidden_size=hidden_size,
rnn_type=rnn_type,
num_layers=num_layers,
num_hidden_layers=num_hidden_layers,
dropout=dropout,
nr_cells=nr_cells,
cell_size=cell_size,
read_heads=read_heads,
gpu_id=gpu_id,
debug=debug
)
optimizer = optim.Adam(rnn.parameters(), lr=lr)
optimizer.zero_grad()
input_data, target_output = generate_data(batch_size, length, input_size, cuda)
target_output = target_output.transpose(0, 1).contiguous()
(chx, mhx, rv) = (None, None, None)
outputs = []
for x in range(6):
output, (chx, mhx, rv), v = rnn(input_data, (chx, mhx, rv), pass_through_memory=False)
output = output.transpose(0, 1)
outputs.append(output)
output = functools.reduce(lambda x,y: x + y, outputs)
loss = criterion((output), target_output)
loss.backward()
T.nn.utils.clip_grad_norm(rnn.parameters(), clip)
optimizer.step()
assert target_output.size() == T.Size([27, 10, 100])
assert chx[1].size() == T.Size([num_hidden_layers,10,100])
assert mhx['memory'].size() == T.Size([10,12,17])
assert rv == None