Compare commits

...

5 Commits
45 ... master

Author SHA1 Message Date
ixaxaar
33e35326db
bump version 2021-01-05 15:54:53 +00:00
ixaxaar
d57776c45a
Merge pull request #57 from rfeinman/master
fix bug in function \theta for batchwise cosine similarity
2020-11-24 14:51:39 +00:00
rfeinman
a660434d21 fix bug in function \theta for batchwise cosine similarity 2020-11-23 08:22:24 -05:00
ixaxaar
00bfa63bc5
Merge pull request #52 from bionicles/patch-1
FLANN link fix
2020-07-06 01:18:04 +05:30
bion howard
be40616920
FLANN link fix
the flann link didn't work so i put a new one, to the git repo for the project
2020-07-01 11:30:09 -04:00
3 changed files with 11 additions and 17 deletions

View File

@ -456,7 +456,7 @@ python ./tasks/argmax_task.py -cuda 0 -lr 0.0001 -rnn_type lstm -memory_type dnc
## General noteworthy stuff
1. SDNCs use the [FLANN approximate nearest neigbhour library](https://www.cs.ubc.ca/research/flann/), with its python binding [pyflann3](https://github.com/primetang/pyflann) and [FAISS](https://github.com/facebookresearch/faiss).
1. SDNCs use the [FLANN approximate nearest neigbhour library](https://github.com/mariusmuja/flann), with its python binding [pyflann3](https://github.com/primetang/pyflann) and [FAISS](https://github.com/facebookresearch/faiss).
FLANN can be installed either from pip (automatically as a dependency), or from source (e.g. for multithreading via OpenMP):

View File

@ -56,29 +56,23 @@ def cudalong(x, grad=False, gpu_id=-1):
return t
def θ(a, b, dimA=2, dimB=2, normBy=2):
"""Batchwise Cosine distance
def θ(a, b, normBy=2):
"""Batchwise Cosine similarity
Cosine distance
Cosine similarity
Arguments:
a {Tensor} -- A 3D Tensor (b * m * w)
b {Tensor} -- A 3D Tensor (b * r * w)
Keyword Arguments:
dimA {number} -- exponent value of the norm for `a` (default: {2})
dimB {number} -- exponent value of the norm for `b` (default: {1})
Returns:
Tensor -- Batchwise cosine distance (b * r * m)
Tensor -- Batchwise cosine similarity (b * r * m)
"""
a_norm = T.norm(a, normBy, dimA, keepdim=True).expand_as(a) + δ
b_norm = T.norm(b, normBy, dimB, keepdim=True).expand_as(b) + δ
x = T.bmm(a, b.transpose(1, 2)).transpose(1, 2) / (
T.bmm(a_norm, b_norm.transpose(1, 2)).transpose(1, 2) + δ)
# apply_dict(locals())
return x
dot = T.bmm(a, b.transpose(1,2))
a_norm = T.norm(a, normBy, dim=2).unsqueeze(2)
b_norm = T.norm(b, normBy, dim=2).unsqueeze(1)
cos = dot / (a_norm * b_norm + δ)
return cos.transpose(1,2).contiguous()
def σ(input, axis=1):

View File

@ -22,7 +22,7 @@ with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
setup(
name='dnc',
version='1.0.3',
version='1.1.0',
description='Differentiable Neural Computer, for Pytorch',
long_description=long_description,