transform

old TransE-like models
git clone https://esimon.eu/repos/transform.git
Log | Files | Refs | README

commit 194510b0a3c0718ad8137de758e2646d5a4a93e7
parent f3629a2ce1f606baa6447e1d7fb4ee25a4cc58ff
Author: Étienne Simon <esimon@esimon.eu>
Date:   Fri, 11 Apr 2014 19:30:02 +0200

Add class for Translations

Diffstat:
Arelations/translation.py | 53+++++++++++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 53 insertions(+), 0 deletions(-)

diff --git a/relations/translation.py b/relations/translation.py @@ -0,0 +1,53 @@ +#!/usr/bin/env python2 + +import numpy +import theano +import theano.tensor as T +import theano.sparse as S + +class Translations(object): + """ Translations class. + + This class has one parameter: + R -- the translations + """ + def __init__(self, rng, number, dimension, tag): + """ Initialise the parameter. + + Keyword arguments: + rng -- module for random number generation + number -- number of relation + dimension -- dimension of the embeddings + tag -- name of the relations for parameter declaration + """ + + self.number = number + self.dimension = dimension + + R_bound = numpy.sqrt(6. / dimension) + R_values = rng.uniform(low=-R_bound, high=R_bound, size=(number, dimension)) + R_values = R_values / numpy.sqrt(numpy.sum(R_values **2, axis=1)) + self.R = theano.shared(name=tag, value=numpy.asarray(R_values, dtype=theano.config.floatX)) + + self.params = [R] + + def L1_norm(self): + """ Compute the L1-norm of the relations parameter. """ + return T.sum(T.abs(self.R)) + + def sqrL2_norm(self): + """ Compute the squared L2-norm of the relations parameter. """ + return T.sum(T.sqr(self.R)) + + def apply(self, input, relations): + """ Apply the given relations to a given input. """ + return S.dot(relations, self.R)+inputs + + def sgd_updates(self, cost, learning_rate): + """ Compute the updates to perform a SGD step w.r.t. a given cost. + + Keyword arguments: + cost -- The cost to optimise. + learning_rate -- The learning rate used for gradient descent. + """ + return [(self.R, self.R - learning_rate * T.grad(cost=cost, wrt=self.R))]