taxi

Winning entry to the Kaggle taxi competition
git clone https://esimon.eu/repos/taxi.git
Log | Files | Refs | README

commit 5cff3a0d1ce5ae114d9090e41bccef294bfd0015
parent 9933cafdf3b218a509c2efd6e6e6ba91ae87aa9c
Author: Étienne Simon <esimon@esimon.eu>
Date:   Mon, 27 Jul 2015 13:41:08 -0400

Add bigger memory networks

Diffstat:
Aconfig/memory_network_bidir_2.py | 54++++++++++++++++++++++++++++++++++++++++++++++++++++++
Aconfig/memory_network_mlp_2.py | 53+++++++++++++++++++++++++++++++++++++++++++++++++++++
2 files changed, 107 insertions(+), 0 deletions(-)

diff --git a/config/memory_network_bidir_2.py b/config/memory_network_bidir_2.py @@ -0,0 +1,54 @@ +from blocks.initialization import IsotropicGaussian, Constant + +from blocks.bricks import Tanh + +import data +from model.memory_network_bidir import Model, Stream + + +dim_embeddings = [ + ('origin_call', data.origin_call_train_size, 10), + ('origin_stand', data.stands_size, 10), + ('week_of_year', 52, 10), + ('day_of_week', 7, 10), + ('qhour_of_day', 24 * 4, 10), + ('day_type', 3, 10), +] + +embed_weights_init = IsotropicGaussian(0.001) + + +class RNNConfig(object): + __slots__ = ('rec_state_dim', 'dim_embeddings', 'embed_weights_init', + 'dim_hidden', 'weights_init', 'biases_init') + +prefix_encoder = RNNConfig() +prefix_encoder.dim_embeddings = dim_embeddings +prefix_encoder.embed_weights_init = embed_weights_init +prefix_encoder.rec_state_dim = 100 +prefix_encoder.dim_hidden = [100, 100] +prefix_encoder.weights_init = IsotropicGaussian(0.01) +prefix_encoder.biases_init = Constant(0.001) + +candidate_encoder = RNNConfig() +candidate_encoder.dim_embeddings = dim_embeddings +candidate_encoder.embed_weights_init = embed_weights_init +candidate_encoder.rec_state_dim = 100 +candidate_encoder.dim_hidden = [100, 100] +candidate_encoder.weights_init = IsotropicGaussian(0.01) +candidate_encoder.biases_init = Constant(0.001) + +representation_size = 100 +representation_activation = Tanh + +normalize_representation = True + + +batch_size = 100 +batch_sort_size = 20 + +max_splits = 100 + +train_candidate_size = 1000 +valid_candidate_size = 1000 +test_candidate_size = 1000 diff --git a/config/memory_network_mlp_2.py b/config/memory_network_mlp_2.py @@ -0,0 +1,53 @@ +from blocks.initialization import IsotropicGaussian, Constant + +from blocks.bricks import Tanh + +import data +from model.memory_network_mlp import Model, Stream + +n_begin_end_pts = 5 + +dim_embeddings = [ + ('origin_call', data.origin_call_train_size, 10), + ('origin_stand', data.stands_size, 10), + ('week_of_year', 52, 10), + ('day_of_week', 7, 10), + ('qhour_of_day', 24 * 4, 10), + ('day_type', 3, 10), +] + +embed_weights_init = IsotropicGaussian(0.001) + +class MLPConfig(object): + __slots__ = ('dim_input', 'dim_hidden', 'dim_output', 'weights_init', 'biases_init', 'embed_weights_init', 'dim_embeddings') + +prefix_encoder = MLPConfig() +prefix_encoder.dim_input = n_begin_end_pts * 2 * 2 + sum(x for (_, _, x) in dim_embeddings) +prefix_encoder.dim_hidden = [100, 100] +prefix_encoder.weights_init = IsotropicGaussian(0.01) +prefix_encoder.biases_init = Constant(0.001) +prefix_encoder.embed_weights_init = embed_weights_init +prefix_encoder.dim_embeddings = dim_embeddings + +candidate_encoder = MLPConfig() +candidate_encoder.dim_input = n_begin_end_pts * 2 * 2 + sum(x for (_, _, x) in dim_embeddings) +candidate_encoder.dim_hidden = [100, 100] +candidate_encoder.weights_init = IsotropicGaussian(0.01) +candidate_encoder.biases_init = Constant(0.001) +candidate_encoder.embed_weights_init = embed_weights_init +candidate_encoder.dim_embeddings = dim_embeddings + +representation_size = 100 +representation_activation = Tanh + +normalize_representation = True + + +batch_size = 100 +batch_sort_size = 20 + +max_splits = 100 + +train_candidate_size = 1000 +valid_candidate_size = 1000 +test_candidate_size = 1000