Modify how sequence max length is scaled for generalization
This commit is contained in:
parent
7719698fff
commit
c74defd78b
17
README.md
17
README.md
@ -367,7 +367,7 @@ Memory vectors returned by forward pass (`np.ndarray`):
|
||||
|
||||
## Tasks
|
||||
|
||||
### Copy task
|
||||
### Copy task (with curriculum and generalization)
|
||||
|
||||
The copy task, as descibed in the original paper, is included in the repo.
|
||||
|
||||
@ -375,13 +375,13 @@ From the project root:
|
||||
```bash
|
||||
python ./tasks/copy_task.py -cuda 0 -optim rmsprop -batch_size 32 -mem_slot 64 # (like original implementation)
|
||||
|
||||
python3 ./tasks/copy_task.py -cuda 0 -lr 0.001 -rnn_type lstm -nlayer 1 -nhlayer 2 -dropout 0 -mem_slot 32 -batch_size 1000 -optim adam -sequence_max_length 8 # (faster convergence)
|
||||
python ./tasks/copy_task.py -cuda 0 -lr 0.001 -rnn_type lstm -nlayer 1 -nhlayer 2 -dropout 0 -mem_slot 32 -batch_size 1000 -optim adam -sequence_max_length 8 # (faster convergence)
|
||||
|
||||
For SDNCs:
|
||||
python3 -B ./tasks/copy_task.py -cuda 0 -lr 0.001 -rnn_type lstm -memory_type sdnc -nlayer 1 -nhlayer 2 -dropout 0 -mem_slot 100 -mem_size 10 -read_heads 1 -sparse_reads 10 -batch_size 20 -optim adam -sequence_max_length 10
|
||||
python ./tasks/copy_task.py -cuda 0 -lr 0.001 -rnn_type lstm -memory_type sdnc -nlayer 1 -nhlayer 2 -dropout 0 -mem_slot 100 -mem_size 10 -read_heads 1 -sparse_reads 10 -batch_size 20 -optim adam -sequence_max_length 10
|
||||
|
||||
and for curriculum learning for SDNCs:
|
||||
python3 -B ./tasks/copy_task.py -cuda 0 -lr 0.001 -rnn_type lstm -memory_type sdnc -nlayer 1 -nhlayer 2 -dropout 0 -mem_slot 100 -mem_size 10 -read_heads 1 -sparse_reads 4 -temporal_reads 4 -batch_size 20 -optim adam -sequence_max_length 4 -curriculum_increment 2 -curriculum_freq 10000
|
||||
python ./tasks/copy_task.py -cuda 0 -lr 0.001 -rnn_type lstm -memory_type sdnc -nlayer 1 -nhlayer 2 -dropout 0 -mem_slot 100 -mem_size 10 -read_heads 1 -sparse_reads 4 -temporal_reads 4 -batch_size 20 -optim adam -sequence_max_length 4 -curriculum_increment 2 -curriculum_freq 10000
|
||||
```
|
||||
|
||||
For the full set of options, see:
|
||||
@ -419,18 +419,19 @@ This task
|
||||
The task first trains the network for sentences of size ~100, and then tests if the network genetalizes for lengths ~1000.
|
||||
|
||||
```bash
|
||||
python3 -B ./tasks/adding_task.py -cuda 0 -lr 0.0001 -rnn_type lstm -memory_type sam -nlayer 1 -nhlayer 1 -nhid 100 -dropout 0 -mem_slot 1000 -mem_size 32 -read_heads 1 -sparse_reads 4 -batch_size 20 -optim rmsprop -input_size 3 -sequence_max_length 1000
|
||||
python ./tasks/adding_task.py -cuda 0 -lr 0.0001 -rnn_type lstm -memory_type sam -nlayer 1 -nhlayer 1 -nhid 100 -dropout 0 -mem_slot 1000 -mem_size 32 -read_heads 1 -sparse_reads 4 -batch_size 20 -optim rmsprop -input_size 3 -sequence_max_length 100
|
||||
```
|
||||
|
||||
### Generalizing Addition task v2
|
||||
### Generalizing Argmax task
|
||||
|
||||
The second adding task is similar to the first one, except that the network's output at the last time step is used for loss calculation, forcing the network to learn to add.
|
||||
The second adding task is similar to the first one, except that the network's output at the last time step is expected to be the argmax of the input.
|
||||
|
||||
```bash
|
||||
python3 -B ./tasks/adding_task_v2.py -cuda 0 -lr 0.0001 -rnn_type lstm -memory_type sam -nlayer 1 -nhlayer 1 -nhid 100 -dropout 0 -mem_slot 1000 -mem_size 32 -read_heads 1 -sparse_reads 4 -batch_size 20 -optim rmsprop -input_size 3 -sequence_max_length 1000
|
||||
python ./tasks/argmax_task.py -cuda 0 -lr 0.0001 -rnn_type lstm -memory_type dnc -nlayer 1 -nhlayer 1 -nhid 100 -dropout 0 -mem_slot 100 -mem_size 10 -read_heads 2 -batch_size 1 -optim rmsprop -sequence_max_length 15 -input_size 10 -iterations 10000
|
||||
```
|
||||
|
||||
|
||||
|
||||
## Code Structure
|
||||
|
||||
1. DNCs:
|
||||
|
@ -206,7 +206,7 @@ if __name__ == '__main__':
|
||||
llprint("\rIteration {ep}/{tot}".format(ep=epoch, tot=iterations))
|
||||
optimizer.zero_grad()
|
||||
# We use for training just (sequence_max_length / 10) examples
|
||||
random_length = np.random.randint(2, (sequence_max_length / 10) + 1)
|
||||
random_length = np.random.randint(2, (sequence_max_length) + 1)
|
||||
input_data, target_output, sums_text = generate_data(random_length, input_size)
|
||||
|
||||
if rnn.debug:
|
||||
@ -226,12 +226,12 @@ if __name__ == '__main__':
|
||||
# detach memory from graph
|
||||
mhx = { k : (v.detach() if isinstance(v, var) else v) for k, v in mhx.items() }
|
||||
|
||||
summerize = (epoch % summarize_freq == 0)
|
||||
summarize = (epoch % summarize_freq == 0)
|
||||
take_checkpoint = (epoch != 0) and (epoch % iterations == 0)
|
||||
|
||||
last_100_losses.append(loss_value)
|
||||
|
||||
if summerize:
|
||||
if summarize:
|
||||
llprint("\rIteration %d/%d" % (epoch, iterations))
|
||||
llprint("\nAvg. Logistic Loss: %.4f\n" % (np.mean(last_100_losses)))
|
||||
output = output.data.cpu().numpy()
|
||||
@ -250,10 +250,10 @@ if __name__ == '__main__':
|
||||
|
||||
rnn.eval()
|
||||
|
||||
for i in range(int(iterations + 1 / 10)):
|
||||
for i in range(int((iterations + 1) / 10)):
|
||||
llprint("\nIteration %d/%d" % (i, iterations))
|
||||
# We test now the learned generalization using sequence_max_length examples
|
||||
random_length = np.random.randint(2, int(sequence_max_length) + 1)
|
||||
random_length = np.random.randint(2, int(sequence_max_length) * 10 + 1)
|
||||
input_data, target_output, sums_text = generate_data(random_length, input_size)
|
||||
|
||||
if rnn.debug:
|
||||
|
@ -1,276 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import warnings
|
||||
warnings.filterwarnings('ignore')
|
||||
|
||||
import numpy as np
|
||||
import getopt
|
||||
import sys
|
||||
import os
|
||||
import math
|
||||
import time
|
||||
import argparse
|
||||
from visdom import Visdom
|
||||
|
||||
sys.path.insert(0, os.path.join('..', '..'))
|
||||
|
||||
import torch as T
|
||||
from torch.autograd import Variable as var
|
||||
import torch.nn.functional as F
|
||||
import torch.optim as optim
|
||||
|
||||
from torch.nn.utils import clip_grad_norm
|
||||
|
||||
from dnc.dnc import DNC
|
||||
from dnc.sdnc import SDNC
|
||||
from dnc.sam import SAM
|
||||
from dnc.util import *
|
||||
|
||||
parser = argparse.ArgumentParser(description='PyTorch Differentiable Neural Computer')
|
||||
parser.add_argument('-input_size', type=int, default=6, help='dimension of input feature')
|
||||
parser.add_argument('-rnn_type', type=str, default='lstm', help='type of recurrent cells to use for the controller')
|
||||
parser.add_argument('-nhid', type=int, default=100, help='number of hidden units of the inner nn')
|
||||
parser.add_argument('-dropout', type=float, default=0, help='controller dropout')
|
||||
parser.add_argument('-memory_type', type=str, default='dnc', help='dense or sparse memory: dnc | sdnc | sam')
|
||||
|
||||
parser.add_argument('-nlayer', type=int, default=1, help='number of layers')
|
||||
parser.add_argument('-nhlayer', type=int, default=2, help='number of hidden layers')
|
||||
parser.add_argument('-lr', type=float, default=1e-4, help='initial learning rate')
|
||||
parser.add_argument('-optim', type=str, default='adam', help='learning rule, supports adam|rmsprop')
|
||||
parser.add_argument('-clip', type=float, default=50, help='gradient clipping')
|
||||
|
||||
parser.add_argument('-batch_size', type=int, default=100, metavar='N', help='batch size')
|
||||
parser.add_argument('-mem_size', type=int, default=20, help='memory dimension')
|
||||
parser.add_argument('-mem_slot', type=int, default=16, help='number of memory slots')
|
||||
parser.add_argument('-read_heads', type=int, default=4, help='number of read heads')
|
||||
parser.add_argument('-sparse_reads', type=int, default=10, help='number of sparse reads per read head')
|
||||
parser.add_argument('-temporal_reads', type=int, default=2, help='number of temporal reads')
|
||||
|
||||
parser.add_argument('-sequence_max_length', type=int, default=4, metavar='N', help='sequence_max_length')
|
||||
parser.add_argument('-cuda', type=int, default=-1, help='Cuda GPU ID, -1 for CPU')
|
||||
|
||||
parser.add_argument('-iterations', type=int, default=2000, metavar='N', help='total number of iteration')
|
||||
parser.add_argument('-summarize_freq', type=int, default=100, metavar='N', help='summarize frequency')
|
||||
parser.add_argument('-check_freq', type=int, default=100, metavar='N', help='check point frequency')
|
||||
parser.add_argument('-visdom', action='store_true', help='plot memory content on visdom per -summarize_freq steps')
|
||||
|
||||
args = parser.parse_args()
|
||||
print(args)
|
||||
|
||||
viz = Visdom()
|
||||
# assert viz.check_connection()
|
||||
|
||||
if args.cuda != -1:
|
||||
print('Using CUDA.')
|
||||
T.manual_seed(1111)
|
||||
else:
|
||||
print('Using CPU.')
|
||||
|
||||
def llprint(message):
|
||||
sys.stdout.write(message)
|
||||
sys.stdout.flush()
|
||||
|
||||
|
||||
def onehot(x, n):
|
||||
ret = np.zeros(n).astype(np.float32)
|
||||
ret[x] = 1.0
|
||||
return ret
|
||||
|
||||
|
||||
def generate_data(length, size):
|
||||
|
||||
content = np.random.randint(0, size - 1, length)
|
||||
|
||||
seqlen = length + 1
|
||||
x_seq_list = [float('nan')] * seqlen
|
||||
sums = 0.0
|
||||
sums_text = ""
|
||||
for i in range(seqlen):
|
||||
if (i < length):
|
||||
x_seq_list[i] = onehot(content[i], size)
|
||||
sums += content[i]
|
||||
sums_text += str(content[i]) + " + "
|
||||
else:
|
||||
x_seq_list[i] = onehot(size - 1, size)
|
||||
|
||||
x_seq_list = np.array(x_seq_list)
|
||||
x_seq_list = x_seq_list.reshape((1,) + x_seq_list.shape)
|
||||
x_seq_list = np.reshape(x_seq_list, (1, -1, size))
|
||||
|
||||
target_output = np.zeros((1, 1, seqlen), dtype=np.float32)
|
||||
target_output[:, -1, -1] = sums
|
||||
target_output = np.reshape(target_output, (1, -1, 1))
|
||||
|
||||
weights_vec = np.zeros((1, 1, seqlen), dtype=np.float32)
|
||||
weights_vec[:, -1, -1] = 1.0
|
||||
weights_vec = np.reshape(weights_vec, (1, -1, 1))
|
||||
|
||||
return cudavec(x_seq_list, gpu_id=args.cuda).float(), \
|
||||
cudavec(target_output, gpu_id=args.cuda).float(), sums_text, \
|
||||
cudavec(weights_vec, gpu_id=args.cuda)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
dirname = os.path.dirname(__file__)
|
||||
ckpts_dir = os.path.join(dirname, 'checkpoints')
|
||||
|
||||
input_size = args.input_size
|
||||
memory_type = args.memory_type
|
||||
lr = args.lr
|
||||
clip = args.clip
|
||||
batch_size = args.batch_size
|
||||
sequence_max_length = args.sequence_max_length
|
||||
cuda = args.cuda
|
||||
iterations = args.iterations
|
||||
summarize_freq = args.summarize_freq
|
||||
check_freq = args.check_freq
|
||||
visdom = args.visdom
|
||||
|
||||
from_checkpoint = None
|
||||
|
||||
if args.memory_type == 'dnc':
|
||||
rnn = DNC(
|
||||
input_size=args.input_size,
|
||||
hidden_size=args.nhid,
|
||||
rnn_type=args.rnn_type,
|
||||
num_layers=args.nlayer,
|
||||
num_hidden_layers=args.nhlayer,
|
||||
dropout=args.dropout,
|
||||
nr_cells=args.mem_slot,
|
||||
cell_size=args.mem_size,
|
||||
read_heads=args.read_heads,
|
||||
gpu_id=args.cuda,
|
||||
debug=args.visdom,
|
||||
batch_first=True,
|
||||
independent_linears=True
|
||||
)
|
||||
elif args.memory_type == 'sdnc':
|
||||
rnn = SDNC(
|
||||
input_size=args.input_size,
|
||||
hidden_size=args.nhid,
|
||||
rnn_type=args.rnn_type,
|
||||
num_layers=args.nlayer,
|
||||
num_hidden_layers=args.nhlayer,
|
||||
dropout=args.dropout,
|
||||
nr_cells=args.mem_slot,
|
||||
cell_size=args.mem_size,
|
||||
sparse_reads=args.sparse_reads,
|
||||
temporal_reads=args.temporal_reads,
|
||||
read_heads=args.read_heads,
|
||||
gpu_id=args.cuda,
|
||||
debug=args.visdom,
|
||||
batch_first=True,
|
||||
independent_linears=False
|
||||
)
|
||||
elif args.memory_type == 'sam':
|
||||
rnn = SAM(
|
||||
input_size=args.input_size,
|
||||
hidden_size=args.nhid,
|
||||
rnn_type=args.rnn_type,
|
||||
num_layers=args.nlayer,
|
||||
num_hidden_layers=args.nhlayer,
|
||||
dropout=args.dropout,
|
||||
nr_cells=args.mem_slot,
|
||||
cell_size=args.mem_size,
|
||||
sparse_reads=args.sparse_reads,
|
||||
read_heads=args.read_heads,
|
||||
gpu_id=args.cuda,
|
||||
debug=args.visdom,
|
||||
batch_first=True,
|
||||
independent_linears=False
|
||||
)
|
||||
else:
|
||||
raise Exception('Not recognized type of memory')
|
||||
|
||||
if args.cuda != -1:
|
||||
rnn = rnn.cuda(args.cuda)
|
||||
|
||||
print(rnn)
|
||||
|
||||
last_save_losses = []
|
||||
|
||||
if args.optim == 'adam':
|
||||
optimizer = optim.Adam(rnn.parameters(), lr=args.lr, eps=1e-9, betas=[0.9, 0.98]) # 0.0001
|
||||
elif args.optim == 'adamax':
|
||||
optimizer = optim.Adamax(rnn.parameters(), lr=args.lr, eps=1e-9, betas=[0.9, 0.98]) # 0.0001
|
||||
elif args.optim == 'rmsprop':
|
||||
optimizer = optim.RMSprop(rnn.parameters(), lr=args.lr, momentum=0.9, eps=1e-10) # 0.0001
|
||||
elif args.optim == 'sgd':
|
||||
optimizer = optim.SGD(rnn.parameters(), lr=args.lr) # 0.01
|
||||
elif args.optim == 'adagrad':
|
||||
optimizer = optim.Adagrad(rnn.parameters(), lr=args.lr)
|
||||
elif args.optim == 'adadelta':
|
||||
optimizer = optim.Adadelta(rnn.parameters(), lr=args.lr)
|
||||
|
||||
last_100_losses = []
|
||||
|
||||
(chx, mhx, rv) = (None, None, None)
|
||||
for epoch in range(iterations + 1):
|
||||
llprint("\rIteration {ep}/{tot}".format(ep=epoch, tot=iterations))
|
||||
optimizer.zero_grad()
|
||||
|
||||
# We use for training just (sequence_max_length / 10) examples
|
||||
random_length = np.random.randint(2, (sequence_max_length / 10) + 1)
|
||||
input_data, target_output, sums_text, loss_weights = generate_data(random_length, input_size)
|
||||
|
||||
if rnn.debug:
|
||||
output, (chx, mhx, rv), v = rnn(input_data, (None, mhx, None), reset_experience=True, pass_through_memory=True)
|
||||
else:
|
||||
output, (chx, mhx, rv) = rnn(input_data, (None, mhx, None), reset_experience=True, pass_through_memory=True)
|
||||
|
||||
loss = T.mean(((loss_weights * output).sum(-1, keepdim=True) - target_output) ** 2)
|
||||
|
||||
loss.backward()
|
||||
|
||||
T.nn.utils.clip_grad_norm(rnn.parameters(), args.clip)
|
||||
optimizer.step()
|
||||
loss_value = loss.data[0]
|
||||
|
||||
# detach memory from graph
|
||||
mhx = { k : (v.detach() if isinstance(v, var) else v) for k, v in mhx.items() }
|
||||
|
||||
summerize = (epoch % summarize_freq == 0)
|
||||
take_checkpoint = (epoch != 0) and (epoch % iterations == 0)
|
||||
|
||||
last_100_losses.append(loss_value)
|
||||
|
||||
if summerize:
|
||||
output = output[:, -1, :].sum().data.cpu().numpy()[0]
|
||||
target_output = target_output.sum().data.cpu().numpy()
|
||||
|
||||
llprint("\rIteration %d/%d" % (epoch, iterations))
|
||||
llprint("\nAvg. Logistic Loss: %.4f\n" % (np.mean(last_100_losses)))
|
||||
print(target_output)
|
||||
print("Real value: ", ' = ' + str(int(target_output[0])))
|
||||
print("Predicted: ", ' = ' + str(int(output // 1)) + " [" + str(output) + "]")
|
||||
last_100_losses = []
|
||||
|
||||
if take_checkpoint:
|
||||
llprint("\nSaving Checkpoint ... "),
|
||||
check_ptr = os.path.join(ckpts_dir, 'step_{}.pth'.format(epoch))
|
||||
cur_weights = rnn.state_dict()
|
||||
T.save(cur_weights, check_ptr)
|
||||
llprint("Done!\n")
|
||||
|
||||
llprint("\nTesting generalization...\n")
|
||||
|
||||
rnn.eval()
|
||||
|
||||
for i in range(int(iterations + 1 / 10)):
|
||||
llprint("\nIteration %d/%d" % (i, iterations))
|
||||
# We test now the learned generalization using sequence_max_length examples
|
||||
random_length = np.random.randint(2, sequence_max_length + 1)
|
||||
input_data, target_output, sums_text, loss_weights = generate_data(random_length, input_size)
|
||||
|
||||
if rnn.debug:
|
||||
output, (chx, mhx, rv), v = rnn(input_data, (None, mhx, None), reset_experience=True, pass_through_memory=True)
|
||||
else:
|
||||
output, (chx, mhx, rv) = rnn(input_data, (None, mhx, None), reset_experience=True, pass_through_memory=True)
|
||||
|
||||
output = output[:, -1, :].sum().data.cpu().numpy()[0]
|
||||
target_output = target_output.sum().data.cpu().numpy()
|
||||
|
||||
print("\nReal value: ", ' = ' + str(int(target_output[0])))
|
||||
print("Predicted: ", ' = ' + str(int(output // 1)) + " [" + str(output) + "]")
|
Loading…
Reference in New Issue
Block a user