Use FAISS instead of FLANN
This commit is contained in:
parent
7b1b53d4c5
commit
379d719c76
63
README.md
63
README.md
@ -1,8 +1,9 @@
|
||||
# Differentiable Neural Computer, for Pytorch
|
||||
# Differentiable Neural Computers and Sparse Differentiable Neural Computers, for Pytorch
|
||||
|
||||
[![Build Status](https://travis-ci.org/ixaxaar/pytorch-dnc.svg?branch=master)](https://travis-ci.org/ixaxaar/pytorch-dnc) [![PyPI version](https://badge.fury.io/py/dnc.svg)](https://badge.fury.io/py/dnc)
|
||||
|
||||
This is an implementation of [Differentiable Neural Computers](http://people.idsia.ch/~rupesh/rnnsymposium2016/slides/graves.pdf), described in the paper [Hybrid computing using a neural network with dynamic external memory, Graves et al.](https://www.nature.com/articles/nature20101)
|
||||
and the Sparse version of the DNC (the SDNC) described in [Scaling Memory-Augmented Neural Networks with Sparse Reads and Writes](http://papers.nips.cc/paper/6298-scaling-memory-augmented-neural-networks-with-sparse-reads-and-writes.pdf).
|
||||
|
||||
## Install
|
||||
|
||||
@ -12,20 +13,64 @@ pip install dnc
|
||||
|
||||
For using sparse DNCs, additional libraries are required:
|
||||
|
||||
### FLANN
|
||||
### FAISS
|
||||
|
||||
SDNCs require an additional library: [facebookresearch/faiss](https://github.com/facebookresearch/faiss).
|
||||
A compiled version of the library with intel SSE + CUDA 8 support ships with this library.
|
||||
If that does not work, one might need to manually compile faiss, as detailed below:
|
||||
|
||||
#### Installing FAISS
|
||||
|
||||
Needs `libopenblas.so` in `/usr/lib/`.
|
||||
|
||||
This has been tested on Arch Linux. Other distributions might have different libopenblas path or cuda root dir or numpy include files dir.
|
||||
|
||||
```bash
|
||||
git clone https://github.com/mariusmuja/flann.git
|
||||
cd flann
|
||||
git checkout 1.9.1
|
||||
mkdir build
|
||||
cd build
|
||||
cmake -DBUILD_CUDA_LIB=ON -DCMAKE_C_COMPILER=/opt/cuda/bin/gcc -DCMAKE_CXX_COMPILER=/opt/cuda/bin/g++ ..
|
||||
git clone https://github.com/facebookresearch/faiss.git
|
||||
cd faiss
|
||||
cp ./example_makefiles/makefile.inc.Linux ./makefile.inc
|
||||
# change libopenblas path
|
||||
sed -i "s/lib64\/libopenblas\.so\.0/lib\/libopenblas\.so/g" ./makefile.inc
|
||||
# add option for nvcc to work properly with g++ > 5
|
||||
sed -i "s/std c++11 \-lineinfo/std c++11 \-lineinfo \-Xcompiler \-D__CORRECT_ISO_CPP11_MATH_H_PROTO/g" ./makefile.inc
|
||||
# change CUDA ROOT
|
||||
sed -i "s/CUDAROOT=\/usr\/local\/cuda-8.0\//CUDAROOT=\/opt\/cuda\//g" ./makefile.inc
|
||||
# change numpy include files (for v3.6)
|
||||
sed -i "s/PYTHONCFLAGS=\-I\/usr\/include\/python2.7\/ \-I\/usr\/lib64\/python2.7\/site\-packages\/numpy\/core\/include\//PYTHONCFLAGS=\-I\/usr\/include\/python3.6m\/ \-I\/usr\/lib\/python3.6\/site\-packages\/numpy\/core\/include/g"
|
||||
|
||||
# build
|
||||
make
|
||||
sudo make install
|
||||
cd gpu
|
||||
make
|
||||
cd ..
|
||||
make py
|
||||
cd gpu
|
||||
make py
|
||||
cd ..
|
||||
|
||||
mkdir /tmp/faiss
|
||||
find -name "*.so" -exec cp {} /tmp/faiss \;
|
||||
find -name "*.a" -exec cp {} /tmp/faiss \;
|
||||
find -name "*.py" -exec cp {} /tmp/faiss \;
|
||||
mv /tmp/faiss .
|
||||
cd faiss
|
||||
|
||||
# convert to python3
|
||||
2to3 -w ./*.py
|
||||
rm -rf *.bak
|
||||
|
||||
# Fix relative imports
|
||||
for i in *.py; do
|
||||
filename=`echo $i | cut -d "." -f 1`
|
||||
echo $filename
|
||||
find -name "*.py" -exec sed -i "s/import $filename/import \.$filename/g" {} \;
|
||||
find -name "*.py" -exec sed -i "s/from $filename import/from \.$filename import/g" {} \;
|
||||
done
|
||||
```
|
||||
|
||||
|
||||
|
||||
|
||||
## Architecure
|
||||
|
||||
<img src="./docs/dnc.png" height="600" />
|
||||
|
@ -6,10 +6,10 @@ from torch.autograd import Variable as var
|
||||
import torch.nn.functional as F
|
||||
import numpy as np
|
||||
|
||||
from pyflann import FLANN
|
||||
# from fiass import fiass
|
||||
|
||||
from .util import *
|
||||
|
||||
import time
|
||||
|
||||
class SparseMemory(nn.Module):
|
||||
|
||||
@ -56,6 +56,7 @@ class SparseMemory(nn.Module):
|
||||
|
||||
def rebuild_indexes(self, hidden, force=False):
|
||||
b = hidden['sparse'].shape[0]
|
||||
t = time.time()
|
||||
|
||||
# if self.rebuild_indexes_after == self.index_reset_ctr or 'indexes' not in hidden:
|
||||
# self.index_reset_ctr = 0
|
||||
@ -64,6 +65,7 @@ class SparseMemory(nn.Module):
|
||||
x.build_index(hidden['sparse'][n], algorithm='kdtree', trees=self.num_kdtrees, checks=self.index_checks)
|
||||
for n, x in enumerate(hidden['indexes'])
|
||||
]
|
||||
print(time.time()-t)
|
||||
# self.index_reset_ctr += 1
|
||||
return hidden
|
||||
|
||||
@ -92,7 +94,7 @@ class SparseMemory(nn.Module):
|
||||
hidden['read_vectors'] = hidden['read_vectors'].clone()
|
||||
|
||||
if erase:
|
||||
hidden = self.rebuild_indexes(hidden)
|
||||
# hidden = self.rebuild_indexes(hidden)
|
||||
hidden['sparse'].fill(0)
|
||||
# hidden['memory'].data.fill_(δ)
|
||||
hidden['read_weights'].data.fill_(δ)
|
||||
@ -162,6 +164,7 @@ class SparseMemory(nn.Module):
|
||||
return hidden['read_vectors'][:, :-1, :].contiguous(), hidden
|
||||
|
||||
def forward(self, ξ, hidden):
|
||||
t = time.time()
|
||||
|
||||
# ξ = ξ.detach()
|
||||
m = self.mem_size
|
||||
@ -189,5 +192,6 @@ class SparseMemory(nn.Module):
|
||||
# write gate (b * 1)
|
||||
write_gate = F.sigmoid(ξ[:, -1].contiguous()).unsqueeze(1).view(b, 1)
|
||||
|
||||
print(time.time()-t, "-----------------")
|
||||
hidden = self.write(interpolation_gate, write_vector, write_gate, hidden)
|
||||
return self.read(read_query, hidden)
|
||||
|
3
setup.py
3
setup.py
@ -55,6 +55,9 @@ setup(
|
||||
keywords='differentiable neural computer dnc memory network',
|
||||
|
||||
packages=find_packages(exclude=['contrib', 'docs', 'tests', 'tasks']),
|
||||
package_data={
|
||||
'libs': ['faiss/libfaiss.a', 'faiss/libgpufaiss.a', 'faiss/_swigfaiss_gpu.so', 'faiss/_swigfaiss.so'],
|
||||
},
|
||||
|
||||
install_requires=['torch', 'numpy'],
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user