mi024

College project master 1 "Projet IAD"
git clone https://esimon.eu/repos/mi024.git
Log | Files | Refs | README

commit 35c84f50b54e7a53c79e2d7f8e925fce35e5b4d6
parent 03d6fa940defadf627d717af03ec7a3b11adba10
Author: Étienne Simon <etienne.jl.simon@gmail.com>
Date:   Tue, 19 Mar 2013 15:12:05 +0100

Ajout d'opérateurs de `serialization' des classes de nmlp et de tests unitaires pour ceux-ci.

Diffstat:
MCMakeLists.txt | 20++++++++++++++++++++
Adoc/specifications.tex.orig | 194+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Asrc/CMakeLists.txt | 5+++++
Asrc/nmlp_base_iostream.hpp | 86+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Asrc/nmlp_base_iostream.ipp | 151++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Asrc/nmlp_criterion_iostream.hpp | 41+++++++++++++++++++++++++++++++++++++++++
Asrc/nmlp_criterion_iostream.ipp | 35+++++++++++++++++++++++++++++++++++
Asrc/nmlp_module_iostream.hpp | 84+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Asrc/nmlp_module_iostream.ipp | 148+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Asrc/robber.hpp | 36++++++++++++++++++++++++++++++++++++
Asrc/test/serialization.cpp | 209+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
11 files changed, 1009 insertions(+), 0 deletions(-)

diff --git a/CMakeLists.txt b/CMakeLists.txt @@ -1,6 +1,26 @@ cmake_minimum_required(VERSION 2.8) project(MI014S7) +find_package(CUDA REQUIRED) +find_package(Boost COMPONENTS unit_test_framework) +find_package(Boost COMPONENTS serialization REQUIRED) +find_package(Doxygen) + +set(WALFU_DIR "${CMAKE_CURRENT_SOURCE_DIR}/../WALFU" CACHE PATH "Path to the WALFU directory") +set(CUBLAS_LIBRARIES "/usr/lib/x86_64-linux-gnu/libcublas.so" CACHE FILEPATH "Path to the cublas shared library.") + set(CMAKE_MODULE_PATH "${CMAKE_SOURCE_DIR}/cmake" ${CMAKE_MODULE_PATH}) +set(WALFU_INCLUDE_DIRS ${WALFU_DIR} ${WALFU_DIR}/Core ${WALFU_DIR}/nmlp) +set(WALFU_LIBRARY_DIRS ${WALFU_DIR}/bin/Core ${WALFU_DIR}/bin/nmlp) + +set(CUDA_PROPAGATE_HOST_FLAGS OFF) +if(CMAKE_COMPILER_IS_GNUCXX) + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++98 -pedantic-errors -Wno-non-template-friend") +endif(CMAKE_COMPILER_IS_GNUCXX) add_subdirectory(doc) +add_subdirectory(src) + +enable_testing() +add_test(Serialization ${CMAKE_CURRENT_BINARY_DIR}/src/test_serialization) +add_custom_command(TARGET Serialization POST_BUILD COMMAND ${CMAKE_COMMAND} -E remove ${CMAKE_CURRENT_BINARY_DIR}/test_serialization_tmp) diff --git a/doc/specifications.tex.orig b/doc/specifications.tex.orig @@ -0,0 +1,194 @@ +\documentclass[11pt,a4paper]{report} +\usepackage{amsfonts} +\usepackage{setspace} +\usepackage{graphicx} +\usepackage{url} +\usepackage{epstopdf} +\usepackage[T1]{fontenc} +\usepackage[utf8]{inputenc} +\usepackage[french]{babel} +\usepackage{tikz} +\usepackage{tikz-uml} +\usetikzlibrary{positioning,chains,fit,shapes,calc,arrows,decorations.text} + +\newcommand{\nmlp}{\textsc{nmlp}} +\renewcommand{\thesection}{\arabic{section}} + +\begin{document} +\begin{titlepage} +\begin{center} + \begin{minipage}{0.58\textwidth} + \includegraphics[width=\textwidth]{images/UPMC_logotype} + \end{minipage} + \begin{minipage}{0.4\textwidth} + \begin{center} + \texttt{M I 0 2 4}\\ + \vspace{2ex} + \textbf{\Large Projet IAD} + \end{center} + \end{minipage} + \vfill + \begin{spacing}{2} + \textbf{\textsc{\huge Vers une Machine d'apprentissage persistante}} + \end{spacing} + \vspace{0.1\textheight} + {\LARGE Cahier des charges}\\ + \vspace{0.1\textheight} + {\Large Auteur~: \hfill Encadrant~: \\ + Étienne Simon \hfill Ludovic Denoyer} + \vfill + \today +\end{center} +\end{titlepage} + +\tableofcontents +\pagebreak + +\section{Objectif du PIAD} +\subsection{Présentation} +Ce PIAD est intitulé «~Vers une Machine d'apprentissage persistante.~» +L'objectif sera d'écrire une plateforme pour l'apprentissage et le raisonnement sur des données hétérogènes. +Les modèles ainsi appris devront être conservés dans une base de données en vue de leur réutilisation. + +\subsection{Cadre} +Le projet sera utilisé dans le cadre de la classification dans des réseaux hétérogènes. +Cela fera en projetant les données de différents types dans un espace latente (commun). + +\begin{figure}[h] + \centering + \begin{tikzpicture} + [coder/.style={->,ultra thick,postaction={decorate,decoration={raise=1ex,text along path,text align=center,text={|\sffamily|#1}}}}] + + \node [draw,rectangle,rounded corners,minimum size=3cm] (latent space) {Espace latent \(Z\)}; + \path (latent space.south)+(-4,-4) node [draw,rectangle,minimum size=2cm] (view 1) {Vue \(X_1\)}; + \path (latent space.south)+(0,-4) node [draw,rectangle,minimum size=2cm] (view 2) {Vue \(X_2\)}; + \path (latent space.south)+(4,-4) node [draw,rectangle,minimum size=2cm] (view 3) {Vue \(X_3\)}; + \draw [coder=encodeur] (view 1.95) to [bend left=15] (latent space.-125); + \draw [coder=d{é}codeur] (latent space.-115) to [bend left=15] (view 1.85); + \draw [coder=encodeur] (view 2.95) to [bend left=15] (latent space.-95); + \draw [coder=d{é}codeur] (latent space.-85) to [bend left=15] (view 2.85); + \draw [coder=encodeur] (view 3.95) to [bend left=15] (latent space.-65); + \draw [coder=d{é}codeur] (latent space.-55) to [bend left=15] (view 3.85); + \draw (latent space.center)+(0,1) node {\(\times z_1\)}; + \draw (latent space.center)+(1,0.5) node {\(\times z_2\)}; + \draw (latent space.center)+(-0.5,-1) node {\(\times z_3\)}; + \draw (view 1.center)+(0.5,0.5) node {\scriptsize\(\times x_1^1\)}; + \draw (view 1.center)+(-0.5,-0.5) node {\scriptsize\(\times x_1^2\)}; + \draw (view 2.center)+(-0.5,0.5) node {\scriptsize\(\times x_2^3\)}; + \draw (view 2.center)+(0,-0.5) node {\scriptsize\(\times x_2^2\)}; + \draw (view 3.center)+(0.5,0.5) node {\scriptsize\(\times x_3^2\)}; + \end{tikzpicture} + \caption{Relation entre les différents espaces} + \label{fig:latent_space_presentation} +\end{figure} + +La figure \ref{fig:latent_space_presentation} présente les différents espaces entrant en jeu~: +\begin{itemize} + \item Les vues sont les espaces d'observation, il y en a autant que de type de donnée. + Par exemple la vue 1 peut être un espace d'observation textuel et la vue 2 un espace d'observation visuel, au quel cas le vecteur \(x_1^2\) correspond à un mot du type «~chat~» et le vecteur \(x_2^2\) à l'image d'un chat. + \item L'espace latent contient les concepts associés aux observations. En conservant l'exemple précèdent, \(z_2\) correspond au concept «~chat.~» +\end{itemize} + +Pour faire le lien entre les vecteurs des espaces d'observation et le vecteur de l'espace latent, des encodeur et décodeur sont utilisés, il s'agit de fonctions \( e_i: X_i \mapsto Z \) et \( d_i: Z \mapsto X_i \). +Celles-ci sont apprises par des perceptrons multicouches (MLP) selon les observations faites. + +Par ailleurs, des relations sont définis sur les concepts. +Les relations sont des sous ensemble de \(Z^2\) elles aussi apprises par des MLP (qui apprendront plutôt des métriques du type \(Z^2 \mapsto \mathbb{R}\).) +Un exemple de relation est «~auteur de~» dans le cadre d'un réseau avec des nœuds «~article~» et «~personne.~» + +\subsection{Objectifs principaux} +\begin{figure}[ht] + \centering + \begin{tikzpicture}[scale=0.75, every node/.style={transform shape}] + \begin{umlsystem}[x=5]{Solution logicielle} + \umlusecase[y=-1,name=add_observation,width=3cm]{Ajout d'observation} + \umlusecase[y=-3,name=add_relation,width=3cm]{Ajout d'une relation} + \umlusecase[x=4,name=add_concept,width=3cm]{Ajout d'un concept} + \umlusecase[x=4,y=-2,name=add_relation_type,width=3cm]{Ajout d'un type de relation} + \umlusecase[x=4,y=-4,name=config_supervisor,width=3cm]{Configuration du superviseur} + \end{umlsystem} + \umlactor[y=-2]{Utilisateur} + \umlactor[x=14,y=-2]{Administrateur} + + \umlassoc{Utilisateur}{add_observation} + \umlassoc{Utilisateur}{add_relation} + \umlassoc{Administrateur}{add_concept} + \umlassoc{Administrateur}{add_relation_type} + \umlassoc{Administrateur}{config_supervisor} + \end{tikzpicture} + \caption{Diagramme des cas d'utilisation} + \label{fig:use_case_diagram} +\end{figure} + +L'objectif principal est d'écrire une plateforme d'apprentissage de représentations latentes. +Les objets manipulés doivent être persistant, ainsi, on peut distinguer deux bases de données~: +\begin{itemize} + \item Une base contenant les données d'apprentissage. + \item Une base contenant les modèles appris. +\end{itemize} + +Les modèles sont appris sous l'égide d'un superviseur, se sont des MLP et il sont décomposables en trois catégories~: +\begin{itemize} + \item Les encodeurs qui permettent de projeter un vecteur observé dans l'espace latent. + \item Les décodeurs qui permettent de projeter un vecteur concept de l'espace latent dans un espace d'observation. + \item Les métriques qui permettent d'établir des relations entre les concepts de l'espace latent. +\end{itemize} + +\subsection{Contraintes techniques} +\nmlp{} est une bibliothèque permettant l'apprentissage de MLP. +Les MLP sont traités comme des ensembles de modules, qui sont appris sous la gouvernance d'un critère. +La plateforme développée utilisera \nmlp{} pour la représentation et l'apprentissage des modèles. + +Le développement se fera en C++03 (ISO/IEC 14882:2003). +De plus, le livrable devra se limiter autant que possible aux dépendances de \nmlp{}, c'est à dire les bibliothèques Boost et CUDA. +Par ailleurs, le code devra fonctionner sous Windows et sous les divers *nix, n'ayant à disposition que des environnements FreeBSD et OpenBSD avec GCC et Clang, l'encadrant devra se charger de vérifier le fonctionnement du code sous Windows. +Le livrable devra être compilable avec le moteur de production CMake utilisé par \nmlp{}. + +Pour s'assurer d'un suivi en temps réel du travail effectué, l'ensemble du livrable sera maintenue par un logiciel de gestion de versions, en l'occurrence un dépôt Mercurial sur Bitbucket~:\\ +\indent\indent\url{https://bitbucket.org/ejls/mi024} + +\section{Description de la solution demandée} +\begin{figure}[h] + \centering + \begin{tikzpicture} + [action/.style={->,ultra thick,postaction={decorate,decoration={raise=1ex,text along path,text align=center,text={|\sffamily|#1}}}}] + \node [draw, cylinder, shape border rotate=90, minimum height=4cm, minimum width=3.5cm, aspect=2.5, label={[above]Bases de données}] (database) {}; + \path (database.center)+(0,0.75) node [draw] (model) {Modèles}; + \path (database.center)+(0,-0.75) node [draw,align=center] (data) {Données \\ d'apprentissage}; + \path (database.west)+(-3,0) node [draw, rectangle, rounded corners] (supervisor) {Superviseur}; + \draw [<->, ultra thick] (model) to (supervisor); + \draw [->, ultra thick] (data) to (supervisor); + \umlactor[x=-5,y=-1.5]{Utilisateur} + \umlactor[x=-8,y=1.8]{Administrateur} + \draw [action=remplit] (Utilisateur) to (data); + \draw [action=initialise] (Administrateur) to (model); + \draw [action=configure] (Administrateur) to (supervisor); + \end{tikzpicture} + \caption{Diagramme (informel) présentant l'architecture de la solution logicielle.} + \label{fig:architecture} +\end{figure} +La figure~\ref{fig:architecture} présente les principaux composant de l'architecture logicielle. +La majeure partie du travail s'effectuera sur le superviseur. +À noté, qu'une fonctionnalité intéressante qui pourra être ajouté est le lancement en parallèle de plusieurs superviseurs (chacun sur un modèle différent), il pourra alors être intéressant de construire un composant pour gérer un ensemble de superviseur (qui enverrait les mêmes données à des superviseurs paramétrés différemment par exemple.) + +Une description plus détaillée avec une liste des activités et un calendrier sont présents dans le plan de développement. + +\section{Composition du livrable} +\begin{itemize} + \item Un script de compilation CMake + \item Des binaires superviseurs + \item Une interface d'ajout de données d'apprentissage + \item Un contrôleur (serveur) gérant un ensemble de superviseur + \item La documentation du projet + \begin{itemize} + \item Le cahier des charges + \item Le plan de développement + \item Le dossier d'analyse et de conception + \item La documentation développeur + \item Le rapport des tests + \item Les manpages relatives aux différents binaires + \item Les README et INSTALL usuels + \item Le rapport final + \end{itemize} +\end{itemize} +\end{document} diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt @@ -0,0 +1,5 @@ +include_directories(${Boost_INCLUDE_DIRS} ${WALFU_INCLUDE_DIRS}) +link_directories(${Boost_LIBRARY_DIRS} ${WALFU_LIBRARY_DIRS}) + +cuda_add_executable(test_serialization ${WALFU_DIR}/nmlp/Kernels_nmlp.cu test/serialization.cpp) +target_link_libraries(test_serialization lib_nmlp lib_core ${CUBLAS_LIBRARIES} ${CUDA_LIBRARIES} ${Boost_UNIT_TEST_FRAMEWORK_LIBRARY} ${Boost_SERIALIZATION_LIBRARY}) diff --git a/src/nmlp_base_iostream.hpp b/src/nmlp_base_iostream.hpp @@ -0,0 +1,86 @@ +#ifndef NMLP_BASE_IOSTREAM_HPP_INCLUDED +#define NMLP_BASE_IOSTREAM_HPP_INCLUDED + +#include <boost/serialization/export.hpp> +#include <boost/serialization/split_free.hpp> + +#include "nmlp/CPUMatrix.h" +#include "nmlp/CPUSparseMatrix.h" +#include "nmlp/GPUMatrix.h" +#include "nmlp/Tensor.h" + +/** @brief Serialization save operator of @ref CPUMatrix. */ +template<class Archive> +void save(Archive &ar, CPUMatrix const &rhs, unsigned const version); +/** @brief Serialization pre-save operator of @ref CPUMatrix. */ +template<class Archive> +void save_construct_data(Archive &ar, CPUMatrix const *rhs, unsigned const version); + +/** @brief Serialization load operator of @ref CPUMatrix. */ +template<class Archive> +void load(Archive &ar, CPUMatrix &rhs, unsigned const version); +/** @brief Serialization pre-load operator of @ref CPUMatrix. */ +template<class Archive> +void load_construct_data(Archive &ar, CPUMatrix *rhs, unsigned const version); + +/** @brief Serialization save operator of @ref GPUMatrix. */ +template<class Archive> +void save(Archive &ar, GPUMatrix const &rhs, unsigned const version); +/** @brief Serialization pre-save operator of @ref GPUMatrix. */ +template<class Archive> +void save_construct_data(Archive &ar, GPUMatrix const *rhs, unsigned const version); + +/** @brief Serialization load operator of @ref GPUMatrix. */ +template<class Archive> +void load(Archive &ar, GPUMatrix &rhs, unsigned const version); +/** @brief Serialization pre-load operator of @ref GPUMatrix. */ +template<class Archive> +void load_construct_data(Archive &ar, GPUMatrix *rhs, unsigned const version); + +/** + * @brief Serialization save operator of @ref CPUSparseMatrix. + * + * @attention @ref CPUSparseMatrix is accessed through a member iterator, it is set to end after this function is called. + */ +template<class Archive> +void save(Archive &ar, CPUSparseMatrix const &rhs, unsigned const version); +/** @brief Serialization pre-save operator of @ref CPUSparseMatrix. */ +template<class Archive> +void save_construct_data(Archive &ar, CPUSparseMatrix const *rhs, unsigned const version); + +/** @brief Serialization load operator of @ref CPUSparseMatrix. + * + * @attention @ref CPUSparseMatrix is accessed through a member iterator, it is set to end after this function is called. + */ +template<class Archive> +void load(Archive &ar, CPUSparseMatrix &rhs, unsigned const version); +/** @brief Serialization pre-load operator of @ref CPUSparseMatrix. */ +template<class Archive> +void load_construct_data(Archive &ar, CPUSparseMatrix *rhs, unsigned const version); + +/** @brief Serialization save operator of @ref Tensor. */ +template<class Archive> +void save(Archive &ar, Tensor const &rhs, unsigned const version); +/** @brief Serialization pre-save operator of @ref Tensor. */ +template<class Archive> +void save_construct_data(Archive &ar, Tensor const *rhs, unsigned const version); + +/** @brief Serialization load operator of @ref Tensor. */ +template<class Archive> +void load(Archive &ar, Tensor &rhs, unsigned const version); +/** @brief Serialization pre-load operator of @ref Tensor. */ +template<class Archive> +void load_construct_data(Archive &ar, Tensor *rhs, unsigned const version); + +BOOST_SERIALIZATION_SPLIT_FREE(CPUMatrix) +BOOST_SERIALIZATION_SPLIT_FREE(GPUMatrix) +BOOST_SERIALIZATION_SPLIT_FREE(CPUSparseMatrix) +BOOST_SERIALIZATION_SPLIT_FREE(Tensor) +BOOST_CLASS_EXPORT_GUID(CPUMatrix, "CPUMatrix") +BOOST_CLASS_EXPORT_GUID(GPUMatrix, "GPUMatrix") +BOOST_CLASS_EXPORT_GUID(CPUSparseMatrix, "CPUSparseMatrix") +BOOST_CLASS_EXPORT_GUID(Tensor, "Tensor") + +#include "nmlp_base_iostream.ipp" + +#endif diff --git a/src/nmlp_base_iostream.ipp b/src/nmlp_base_iostream.ipp @@ -0,0 +1,151 @@ +#include <cstddef> + +#include <boost/serialization/shared_ptr.hpp> + +#include "nmlp/CPUMatrix.h" +#include "nmlp/CPUSparseMatrix.h" +#include "nmlp/GPUMatrix.h" +#include "nmlp/Matrix.h" +#include "nmlp/Tensor.h" + +#include "nmlp_base_iostream.hpp" + +template<class Archive, class Matrix_type> +void save_dense_matrix(Archive &ar, Matrix_type const &rhs, unsigned const){ + Matrix_type &nonconst_rhs=const_cast<Matrix_type&>(rhs); // The interface of nmlp is const-inconsistent. + for(std::size_t y=0; y<nonconst_rhs.getNumberOfRows(); ++y) + for(std::size_t x=0; x<nonconst_rhs.getNumberOfColumns(); ++x){ + float val=nonconst_rhs.getValue(y, x); + ar << val; + } +} + +template<class Archive, class Matrix_type> +void load_dense_matrix(Archive &ar, Matrix_type &rhs, unsigned const){ + for(std::size_t y=0; y<rhs.getNumberOfRows(); ++y) + for(std::size_t x=0; x<rhs.getNumberOfColumns(); ++x){ + float value; + ar >> value; + rhs.setValue(y, x, value); + } +} + +template<class Archive, class Matrix_type> +void load_construct_matrix_data(Archive &ar, Matrix_type *rhs, unsigned const){ + int r, c; + ar >> r >> c; + new(rhs) Matrix_type(r, c); +} + +template<class Archive, class Matrix_type> +void save_construct_matrix_data(Archive &ar, Matrix_type const *rhs, unsigned const){ + Matrix_type *nonconst_rhs=const_cast<Matrix_type*>(rhs); // The interface of nmlp is const-inconsistent. + int row=nonconst_rhs->getNumberOfRows(), column=nonconst_rhs->getNumberOfColumns(); + ar << row << column; +} + +template<class Archive> +void save_construct_data(Archive &ar, CPUMatrix const *rhs, unsigned const version) + { save_construct_matrix_data(ar, rhs, version); } + +template<class Archive> +void save(Archive &ar, CPUMatrix const &rhs, unsigned const version){ + boost::serialization::void_cast_register<CPUMatrix, Matrix>(); + save_dense_matrix(ar, rhs, version); +} + +template<class Archive> +void load_construct_data(Archive &ar, CPUMatrix *rhs, unsigned const version) + { load_construct_matrix_data(ar, rhs, version); } + +template<class Archive> +void load(Archive &ar, CPUMatrix &rhs, unsigned const version){ + boost::serialization::void_cast_register<CPUMatrix, Matrix>(); + load_dense_matrix(ar, rhs, version); +} + + +template<class Archive> +void save_construct_data(Archive &ar, GPUMatrix const *rhs, unsigned const version) + { save_construct_matrix_data(ar, rhs, version); } + +template<class Archive> +void save(Archive &ar, GPUMatrix const &rhs, unsigned const version){ + boost::serialization::void_cast_register<GPUMatrix, Matrix>(); + save_dense_matrix(ar, rhs, version); +} + +template<class Archive> +void load_construct_data(Archive &ar, GPUMatrix *rhs, unsigned const version) + { load_construct_matrix_data(ar, rhs, version); } + +template<class Archive> +void load(Archive &ar, GPUMatrix &rhs, unsigned const version){ + boost::serialization::void_cast_register<GPUMatrix, Matrix>(); + load_dense_matrix(ar, rhs, version); +} + +template<class Archive> +void save_construct_data(Archive &ar, CPUSparseMatrix const *rhs, unsigned const version) + { save_construct_matrix_data(ar, rhs, version); } + +template<class Archive> +void save(Archive &ar, CPUSparseMatrix const &rhs, unsigned const){ + boost::serialization::void_cast_register<CPUSparseMatrix, Matrix>(); + CPUSparseMatrix &nonconst_rhs=const_cast<CPUSparseMatrix&>(rhs); // The interface of nmlp is const-inconsistent. + int row, col, end_of_data=-1; + float val; + // This is the only way to do it with nmlp... + for(nonconst_rhs.initIterator(), nonconst_rhs.nextIterator(&row, &col, &val); nonconst_rhs.hasNextIterator(); nonconst_rhs.nextIterator(&row, &col, &val)) + if(val) + ar << row << col << val; + if(val) + ar << row << col << val; + ar << end_of_data; +} + +template<class Archive> +void load_construct_data(Archive &ar, CPUSparseMatrix *rhs, unsigned const version) + { load_construct_matrix_data(ar, rhs, version); } + +template<class Archive> +void load(Archive &ar, CPUSparseMatrix &rhs, unsigned const){ + boost::serialization::void_cast_register<CPUSparseMatrix, Matrix>(); + int row, col; + float val; + while(ar >> row, row!=-1){ + ar >> col >> val; + rhs.setValue(row, col, val); + } +} + +template<class Archive> +void save_construct_data(Archive &ar, Tensor const *rhs, unsigned const version){ + int size=const_cast<Tensor*>(rhs)->getNumberOfMatrices(); + ar << size; +} + +template<class Archive> +void save(Archive &ar, Tensor const &rhs, unsigned const){ + Tensor &nonconst_rhs=const_cast<Tensor&>(rhs); // The interface of nmlp is const-inconsistent. + for(std::size_t i=0; i<nonconst_rhs.getNumberOfMatrices(); ++i){ + boost::shared_ptr<Matrix> matrix=nonconst_rhs.getMatrix(i); + ar << matrix; + } +} + +template<class Archive> +void load_construct_data(Archive &ar, Tensor *rhs, unsigned const version){ + int n; + ar >> n; + new(rhs) Tensor(n); +} + +template<class Archive> +void load(Archive &ar, Tensor &rhs, unsigned const){ + for(std::size_t i=0; i<rhs.getNumberOfMatrices(); ++i){ + boost::shared_ptr<Matrix> m; + ar >> m; + rhs.setMatrix(i, m); + } +} diff --git a/src/nmlp_criterion_iostream.hpp b/src/nmlp_criterion_iostream.hpp @@ -0,0 +1,41 @@ +#ifndef NMLP_CRITERION_IOSTREAM_HPP_INCLUDED +#define NMLP_CRITERION_IOSTREAM_HPP_INCLUDED + +#include <boost/serialization/export.hpp> +#include <boost/serialization/split_free.hpp> + +#include "nmlp/criterions/CPUHingeLoss.h" +#include "nmlp/criterions/CPUMaxLoss.h" +#include "nmlp/criterions/CPUSquareLoss.h" +#include "nmlp/criterions/GPUHingeLoss.h" +#include "nmlp/criterions/GPUSquareLoss.h" + +/** @brief Serialization operator of @ref CPUMaxLoss. */ +template<class Archive> +void serialize(Archive &ar, CPUMaxLoss &rhs, unsigned const version); + +#define DECLARE_CRITERION_IO( NAME ) \ + /** @brief Serialization save operator of @ref NAME. */ \ + template<class Archive> \ + void save(Archive &ar, NAME const &rhs, unsigned const version); \ + /** @brief Serialization pre-save operator of @ref NAME. */ \ + template<class Archive> \ + void save_construct_data(Archive &ar, NAME const *rhs, unsigned const version); \ + /** @brief Serialization load operator of @ref NAME. */ \ + template<class Archive> \ + void load(Archive &ar, NAME &rhs, unsigned const version); \ + /** @brief Serialization pre-load operator of @ref NAME. */ \ + template<class Archive> \ + void save_construct_data(Archive &ar, NAME *rhs, unsigned const version); \ + BOOST_SERIALIZATION_SPLIT_FREE(NAME) \ + BOOST_CLASS_EXPORT_GUID(NAME, #NAME) + +DECLARE_CRITERION_IO( CPUHingeLoss ) +DECLARE_CRITERION_IO( CPUSquareLoss ) +DECLARE_CRITERION_IO( GPUHingeLoss ) +DECLARE_CRITERION_IO( GPUSquareLoss ) +BOOST_CLASS_EXPORT_GUID(CPUMaxLoss, "CPUMaxLoss") + +#include "nmlp_criterion_iostream.ipp" + +#endif diff --git a/src/nmlp_criterion_iostream.ipp b/src/nmlp_criterion_iostream.ipp @@ -0,0 +1,35 @@ +#include "nmlp_base_iostream.hpp" +#include "nmlp_criterion_iostream.hpp" +#include "robber.hpp" + +template<class Archive> +void serialize(Archive&,CPUMaxLoss&,unsigned const){ + boost::serialization::void_cast_register<CPUMaxLoss, Criterion>(); +} + +#define DEFINE_FUNCTION_CRITERION_IO( NAME ) \ + struct NAME ## _size_robber_tag: Tag_base<NAME ## _size_robber_tag, int NAME::*>{}; \ + template struct Rob<NAME ## _size_robber_tag, &NAME::size>; \ + template<class Archive> \ + void save_construct_data(Archive &ar, NAME const *rhs, unsigned const){ \ + ar << const_cast<NAME*>(rhs)->*get(NAME ## _size_robber_tag()); \ + } \ + template<class Archive> \ + void load_construct_data(Archive &ar, NAME *rhs, unsigned const){ \ + int size; \ + ar >> size; \ + new(rhs) NAME(size); \ + } \ + template<class Archive> \ + void save(Archive&,NAME const&,unsigned const){ \ + boost::serialization::void_cast_register<NAME, Criterion>(); \ + } \ + template<class Archive> \ + void load(Archive&,NAME&,unsigned const){ \ + boost::serialization::void_cast_register<NAME, Criterion>(); \ + } + +DEFINE_FUNCTION_CRITERION_IO( CPUSquareLoss ) +DEFINE_FUNCTION_CRITERION_IO( CPUHingeLoss ) +DEFINE_FUNCTION_CRITERION_IO( GPUSquareLoss ) +DEFINE_FUNCTION_CRITERION_IO( GPUHingeLoss ) diff --git a/src/nmlp_module_iostream.hpp b/src/nmlp_module_iostream.hpp @@ -0,0 +1,84 @@ +#ifndef NMLP_MODULE_IOSTREAM_HPP_INCLUDED +#define NMLP_MODULE_IOSTREAM_HPP_INCLUDED + +#include <boost/serialization/export.hpp> +#include <boost/serialization/split_free.hpp> + +#include "nmlp/modules/CPUConcatenation.h" +#include "nmlp/modules/CPUIdentity.h" +#include "nmlp/modules/CPULinear.h" +#include "nmlp/modules/CPULogistic.h" +#include "nmlp/modules/CPUPositiveShrink.h" +#include "nmlp/modules/CPUSparseLinear.h" +#include "nmlp/modules/CPUTanH.h" +#include "nmlp/modules/CPUUnique.h" +#include "nmlp/modules/GPUIdentity.h" +#include "nmlp/modules/GPULinear.h" +#include "nmlp/modules/GPUPositiveShrink.h" +#include "nmlp/modules/GPUTanH.h" +#include "nmlp/modules/GPUUnique.h" +#include "nmlp/modules/SequentialModule.h" +#include "nmlp/modules/TableModule.h" + +/** + * @note There is an error in nmlp/modules/GPUShrink.h and nmlp/modules/CPUShrink.h. + */ + +/** @brief Serialization operator of @ref TableModule. */ +template<class Archive> +void serialize(Archive &ar, TableModule &rhs, unsigned const version); + +/** @brief Serialization operator of @ref SequentialModule. */ +template<class Archive> +void serialize(Archive &ar, SequentialModule &rhs, unsigned const version); + + +#define DECLARE_MODULE_IO( NAME ) \ + /** @brief Serialization load operator of @ref NAME. */ \ + template<class Archive> \ + void serialize(Archive &ar, NAME &rhs, unsigned const version); \ + /** @brief Serialization pre-save operator of @ref NAME. */ \ + template<class Archive> \ + void save_construct_data(Archive &ar, NAME const *rhs, unsigned const version); \ + /** @brief Serialization pre-load operator of @ref NAME. */ \ + template<class Archive> \ + void save_construct_data(Archive &ar, NAME *rhs, unsigned const version); \ + BOOST_CLASS_EXPORT_GUID(NAME, #NAME) + +#define DECLARE_SPLIT_MODULE_IO( NAME ) \ + /** @brief Serialization save operator of @ref NAME. */ \ + template<class Archive> \ + void save(Archive &ar, NAME const &rhs, unsigned const version); \ + /** @brief Serialization pre-save operator of @ref NAME. */ \ + template<class Archive> \ + void save_construct_data(Archive &ar, NAME const *rhs, unsigned const version); \ + /** @brief Serialization load operator of @ref NAME. */ \ + template<class Archive> \ + void load(Archive &ar, NAME &rhs, unsigned const version); \ + /** @brief Serialization pre-load operator of @ref NAME. */ \ + template<class Archive> \ + void save_construct_data(Archive &ar, NAME *rhs, unsigned const version); \ + BOOST_SERIALIZATION_SPLIT_FREE(NAME) \ + BOOST_CLASS_EXPORT_GUID(NAME, #NAME) + +DECLARE_SPLIT_MODULE_IO(CPUConcatenation) +DECLARE_SPLIT_MODULE_IO(CPUIdentity) +DECLARE_SPLIT_MODULE_IO(CPULogistic) +DECLARE_SPLIT_MODULE_IO(CPUPositiveShrink) +DECLARE_SPLIT_MODULE_IO(CPUTanH) +DECLARE_SPLIT_MODULE_IO(GPUIdentity) +DECLARE_SPLIT_MODULE_IO(GPUPositiveShrink) +DECLARE_SPLIT_MODULE_IO(GPUTanH) + +DECLARE_MODULE_IO(CPUUnique) +DECLARE_MODULE_IO(GPULinear) +DECLARE_MODULE_IO(CPULinear) +DECLARE_MODULE_IO(GPUUnique) +DECLARE_MODULE_IO(CPUSparseLinear) + +BOOST_CLASS_EXPORT_GUID(SequentialModule, "SequentialModule") +BOOST_CLASS_EXPORT_GUID(TableModule, "TableModule") + +#include "nmlp_module_iostream.ipp" + +#endif diff --git a/src/nmlp_module_iostream.ipp b/src/nmlp_module_iostream.ipp @@ -0,0 +1,148 @@ +#include <cstddef> + +#include <boost/serialization/vector.hpp> +#include <boost/serialization/shared_ptr.hpp> + +#include "nmlp/modules/TensorModule.h" + +#include "nmlp_base_iostream.hpp" +#include "nmlp_module_iostream.hpp" +#include "robber.hpp" + +struct TableModule_modules_robber_tag: Tag_base<TableModule_modules_robber_tag, std::vector<boost::shared_ptr<Module> > TableModule::*>{}; +template struct Rob<TableModule_modules_robber_tag, &TableModule::modules>; + +template<class Archive> +void serialize(Archive &ar, TableModule &rhs, unsigned const){ + boost::serialization::void_cast_register<TableModule, Module>(); + ar & rhs.*get(TableModule_modules_robber_tag()); +} + +struct SequentialModule_modules_robber_tag: Tag_base<SequentialModule_modules_robber_tag, std::vector<boost::shared_ptr<Module> > SequentialModule::*>{}; +template struct Rob<SequentialModule_modules_robber_tag, &SequentialModule::modules>; + +template<class Archive> +void serialize(Archive &ar, SequentialModule &rhs, unsigned const){ + boost::serialization::void_cast_register<SequentialModule, Module>(); + ar & rhs.*get(SequentialModule_modules_robber_tag()); +} + +struct CPUUnique_parameters_robber_tag: Tag_base<CPUUnique_parameters_robber_tag, boost::shared_ptr<CPUMatrix> CPUUnique::*>{}; +template struct Rob<CPUUnique_parameters_robber_tag, &CPUUnique::parameters>; +template<class Archive> +void save_construct_data(Archive &ar, CPUUnique const *rhs, unsigned const){ + int nc=((const_cast<CPUUnique*>(rhs)->*get(CPUUnique_parameters_robber_tag()))->getNumberOfColumns()); + ar << nc; +} +template<class Archive> +void load_construct_data(Archive &ar, CPUUnique *rhs, unsigned const){ + int c; + ar >> c; + new(rhs) CPUUnique(c); +} +template<class Archive> +void serialize(Archive &ar, CPUUnique &rhs, unsigned const){ + boost::serialization::void_cast_register<CPUUnique, Module>(); + ar & (rhs.*get(CPUUnique_parameters_robber_tag())); +} + +struct GPUUnique_parameters_robber_tag: Tag_base<GPUUnique_parameters_robber_tag, boost::shared_ptr<GPUMatrix> GPUUnique::*>{}; +template struct Rob<GPUUnique_parameters_robber_tag, &GPUUnique::parameters>; +template<class Archive> +void save_construct_data(Archive &ar, GPUUnique const *rhs, unsigned const){ + int nc=((const_cast<GPUUnique*>(rhs)->*get(GPUUnique_parameters_robber_tag()))->getNumberOfColumns()); + ar << nc; +} +template<class Archive> +void load_construct_data(Archive &ar, GPUUnique *rhs, unsigned const){ + int c; + ar >> c; + new(rhs) GPUUnique(c); +} +template<class Archive> +void serialize(Archive &ar, GPUUnique &rhs, unsigned const){ + boost::serialization::void_cast_register<GPUUnique, Module>(); + ar & (rhs.*get(GPUUnique_parameters_robber_tag())); +} + +struct CPUConcatenation_size1_robber_tag: Tag_base<CPUConcatenation_size1_robber_tag, int CPUConcatenation::*>{}; +template struct Rob<CPUConcatenation_size1_robber_tag, &CPUConcatenation::size1>; +struct CPUConcatenation_size2_robber_tag: Tag_base<CPUConcatenation_size2_robber_tag, int CPUConcatenation::*>{}; +template struct Rob<CPUConcatenation_size2_robber_tag, &CPUConcatenation::size2>; + +template<class Archive> +void save_construct_data(Archive &ar, CPUConcatenation const *rhs, unsigned const){ + ar << (const_cast<CPUConcatenation*>(rhs)->*get(CPUConcatenation_size1_robber_tag())) << (const_cast<CPUConcatenation*>(rhs)->*get(CPUConcatenation_size2_robber_tag())); +} + +template<class Archive> +void load_construct_data(Archive &ar, CPUConcatenation *rhs, unsigned const){ + int size1, size2; + ar >> size1 >> size2; + new(rhs) CPUConcatenation(size1, size2); +} + +template<class Archive> +void save(Archive&,CPUConcatenation const&,unsigned const){ + boost::serialization::void_cast_register<CPUConcatenation, Module>(); +} +template<class Archive> +void load(Archive&,CPUConcatenation&,unsigned const){ + boost::serialization::void_cast_register<CPUConcatenation, Module>(); +} + +struct TensorModule_parameters_robber_tag: Tag_base<TensorModule_parameters_robber_tag, boost::shared_ptr<Matrix> TensorModule::*>{}; +template struct Rob<TensorModule_parameters_robber_tag, &TensorModule::parameters>; + +#define DEFINE_FUNCTION_MODULE_LINEAR_IO( NAME ) \ + template<class Archive> \ + void save_construct_data(Archive &ar, NAME const *rhs, unsigned const){ \ + boost::shared_ptr<Matrix> matrix=const_cast<NAME*>(rhs)->*get(TensorModule_parameters_robber_tag()); \ + int row=matrix->getNumberOfRows(), column=matrix->getNumberOfColumns(); \ + ar << row << column; \ + } \ + template<class Archive> \ + void load_construct_data(Archive &ar, NAME *rhs, unsigned const){ \ + int row, column; \ + ar >> row >> column; \ + new(rhs) NAME(row, column); \ + } \ + template<class Archive> \ + void serialize(Archive &ar, NAME &rhs, unsigned const){ \ + boost::serialization::void_cast_register<NAME, TensorModule>(); \ + ar & rhs.*get(TensorModule_parameters_robber_tag()); \ + } + +DEFINE_FUNCTION_MODULE_LINEAR_IO( CPUSparseLinear ) +DEFINE_FUNCTION_MODULE_LINEAR_IO( CPULinear ) +DEFINE_FUNCTION_MODULE_LINEAR_IO( GPULinear ) + +#define DEFINE_FUNCTION_MODULE_IO( NAME ) \ + struct NAME ## _size_robber_tag: Tag_base<NAME ## _size_robber_tag, int NAME::*>{}; \ + template struct Rob<NAME ## _size_robber_tag, &NAME::size>; \ + template<class Archive> \ + void save_construct_data(Archive &ar, NAME const *rhs, unsigned const){ \ + ar << const_cast<NAME*>(rhs)->*get(NAME ## _size_robber_tag()); \ + } \ + template<class Archive> \ + void load_construct_data(Archive &ar, NAME *rhs, unsigned const){ \ + int size; \ + ar >> size; \ + new(rhs) NAME(size); \ + } \ + template<class Archive> \ + void save(Archive&,NAME const&,unsigned const){ \ + boost::serialization::void_cast_register<NAME, Module>(); \ + } \ + template<class Archive> \ + void load(Archive&,NAME&,unsigned const){ \ + boost::serialization::void_cast_register<NAME, Module>(); \ + } + +DEFINE_FUNCTION_MODULE_IO(CPUIdentity) +DEFINE_FUNCTION_MODULE_IO(CPULogistic) +DEFINE_FUNCTION_MODULE_IO(CPUPositiveShrink) +DEFINE_FUNCTION_MODULE_IO(CPUTanH) +DEFINE_FUNCTION_MODULE_IO(GPUIdentity) +DEFINE_FUNCTION_MODULE_IO(GPUPositiveShrink) +DEFINE_FUNCTION_MODULE_IO(GPUTanH) diff --git a/src/robber.hpp b/src/robber.hpp @@ -0,0 +1,36 @@ +#ifndef ROBBER_HPP_INCLUDED +#define ROBBER_HPP_INCLUDED + +/** + * @brief Accessor to non-public member. + * + * In order to access a non-public member of a class, you have to specialize this template with a tag (see @ref Tag_base) and the member to access. + * This trick is from Johannes "litb" Schaub. + * + * @tparam Tag The tag of the class to rob. + * @tparam M The address of the member to access. + * + * @bug When called on virtual function, only the final override can be accessed. + */ +template<typename Tag, typename Tag::type M> +struct Rob{ + friend typename Tag::type get(Tag){ + return M; + } +}; + +/** + * @brief Class template to simplify the definition of robber tags. + * + * This templated can be used alongside the @ref Rob class template with a CRTP. + * + * @tparam Tag The tag itself. + * @tparam Member The type of the member to access. + */ +template<typename Tag, typename Member> +struct Tag_base{ + typedef Member type; + friend type get(Tag); +}; + +#endif diff --git a/src/test/serialization.cpp b/src/test/serialization.cpp @@ -0,0 +1,209 @@ +#include <cctype> +#include <fstream> +#include <ios> + +#define BOOST_TEST_DYN_LINK +#define BOOST_TEST_MODULE serialization +#include <boost/archive/text_iarchive.hpp> +#include <boost/archive/text_oarchive.hpp> +#include <boost/make_shared.hpp> +#include <boost/shared_ptr.hpp> +#include <boost/test/unit_test.hpp> + +#include "../nmlp_base_iostream.hpp" +#include "../nmlp_criterion_iostream.hpp" +#include "../nmlp_module_iostream.hpp" + + +template<class T> +void write_and_read(T &out, T &in){ + { + std::ofstream ofs("test_serialization_tmp"); + boost::archive::text_oarchive oa(ofs); + oa << out; + } + { + std::ifstream ifs("test_serialization_tmp"); + boost::archive::text_iarchive ia(ifs); + ia >> in; + } +} + +#define TEST_MATRIX( NAME ) \ + BOOST_AUTO_TEST_CASE( NAME ## _test ){ \ + boost::shared_ptr<NAME> out=boost::make_shared<NAME>(12, 42), in; \ + for(std::size_t y=0; y<12; ++y) \ + for(std::size_t x=0; x<42; ++x) \ + out->setValue(y, x, y*3+x*7); \ + write_and_read(out, in); \ + BOOST_CHECK_EQUAL(in->getNumberOfRows(), 12); \ + BOOST_CHECK_EQUAL(in->getNumberOfColumns(), 42); \ + for(std::size_t y=0; y<12; ++y) \ + for(std::size_t x=0; x<42; ++x) \ + BOOST_CHECK_CLOSE(in->getValue(y, x), y*3+x*7, 1e-5); \ + } + +TEST_MATRIX(CPUMatrix) +TEST_MATRIX(GPUMatrix) +TEST_MATRIX(CPUSparseMatrix) + +BOOST_AUTO_TEST_CASE( Tensor_test ){ + boost::shared_ptr<CPUMatrix> m0_out=boost::make_shared<CPUMatrix>(12, 42), m0_in; + boost::shared_ptr<GPUMatrix> m1_out=boost::make_shared<GPUMatrix>(12, 42), m1_in; + boost::shared_ptr<CPUSparseMatrix> m2_out=boost::make_shared<CPUSparseMatrix>(12, 42), m2_in; + for(std::size_t y=0; y<12; ++y) + for(std::size_t x=0; x<42; ++x){ + m0_out->setValue(y, x, y*2+x*3); + m1_out->setValue(y, x, y*5+x*7); + m2_out->setValue(y, x, y*3+x*5); + } + boost::shared_ptr<Tensor> out=boost::make_shared<Tensor>(3), in; + out->setMatrix(0, m0_out); + out->setMatrix(1, m1_out); + out->setMatrix(2, m2_out); + + write_and_read(out, in); + BOOST_CHECK_EQUAL(in->getNumberOfMatrices(), 3); + m0_in=boost::dynamic_pointer_cast<CPUMatrix>(in->getMatrix(0)); + m1_in=boost::dynamic_pointer_cast<GPUMatrix>(in->getMatrix(1)); + m2_in=boost::dynamic_pointer_cast<CPUSparseMatrix>(in->getMatrix(2)); + BOOST_CHECK_NE(m0_in, boost::shared_ptr<CPUMatrix>()); + BOOST_CHECK_NE(m1_in, boost::shared_ptr<GPUMatrix>()); + BOOST_CHECK_NE(m2_in, boost::shared_ptr<CPUSparseMatrix>()); + for(std::size_t y=0; y<12; ++y) + for(std::size_t x=0; x<42; ++x){ + BOOST_CHECK_CLOSE(m0_in->getValue(y, x), y*2+x*3, 1e-5); + BOOST_CHECK_CLOSE(m1_in->getValue(y, x), y*5+x*7, 1e-5); + BOOST_CHECK_CLOSE(m2_in->getValue(y, x), y*3+x*5, 1e-5); + } +} + +#define TEST_SIZE_CLASS( NAME, PARENT ) \ + BOOST_AUTO_TEST_CASE( NAME ## _test ){ \ + boost::shared_ptr<PARENT> out=boost::make_shared<NAME>(42), in; \ + write_and_read(out, in); \ + boost::shared_ptr<NAME> conv=boost::dynamic_pointer_cast<NAME>(in); \ + BOOST_CHECK_NE(conv, boost::shared_ptr<NAME>()); \ + BOOST_CHECK_EQUAL(conv.get()->*get(NAME ## _size_robber_tag()), 42); \ + } + +TEST_SIZE_CLASS(CPUSquareLoss, Criterion) +TEST_SIZE_CLASS(CPUHingeLoss, Criterion) +TEST_SIZE_CLASS(GPUSquareLoss, Criterion) +TEST_SIZE_CLASS(GPUHingeLoss, Criterion) + +BOOST_AUTO_TEST_CASE( CPUMaxLoss_test ){ + boost::shared_ptr<Criterion> out=boost::make_shared<CPUMaxLoss>(), in; + write_and_read(out, in); + boost::shared_ptr<CPUMaxLoss> conv=boost::dynamic_pointer_cast<CPUMaxLoss>(in); + BOOST_CHECK_NE(conv, boost::shared_ptr<CPUMaxLoss>()); +} + +TEST_SIZE_CLASS(CPUIdentity, Module) +TEST_SIZE_CLASS(CPULogistic, Module) +TEST_SIZE_CLASS(CPUPositiveShrink, Module) +TEST_SIZE_CLASS(CPUTanH, Module) +TEST_SIZE_CLASS(GPUIdentity, Module) +TEST_SIZE_CLASS(GPUPositiveShrink, Module) +TEST_SIZE_CLASS(GPUTanH, Module) + +BOOST_AUTO_TEST_CASE( CPUConcatenation_test ){ + boost::shared_ptr<Module> out=boost::make_shared<CPUConcatenation>(42, 12), in; + write_and_read(out, in); + boost::shared_ptr<CPUConcatenation> conv=boost::dynamic_pointer_cast<CPUConcatenation>(in); + BOOST_CHECK_NE(conv, boost::shared_ptr<CPUConcatenation>()); + BOOST_CHECK_EQUAL(conv.get()->*get(CPUConcatenation_size1_robber_tag()), 42); + BOOST_CHECK_EQUAL(conv.get()->*get(CPUConcatenation_size2_robber_tag()), 12); +} + +BOOST_AUTO_TEST_CASE( CPUUnique_test ){ + boost::shared_ptr<CPUUnique> out=boost::make_shared<CPUUnique>(12), in; + for(std::size_t i=0; i<12; ++i) + out->setValue(0, i, i*42); + write_and_read(out, in); + BOOST_CHECK_EQUAL((in.get()->*get(CPUUnique_parameters_robber_tag()))->getNumberOfColumns(), 12); + for(std::size_t i=0; i<12; ++i) + BOOST_CHECK_CLOSE((in.get()->*get(CPUUnique_parameters_robber_tag()))->getValue(0, i), i*42, 1e-5); +} + +BOOST_AUTO_TEST_CASE( GPUUnique_test ){ + boost::shared_ptr<GPUUnique> out=boost::make_shared<GPUUnique>(12), in; + for(std::size_t i=0; i<12; ++i) + (out.get()->*get(GPUUnique_parameters_robber_tag()))->setValue(0, i, i*42); + write_and_read(out, in); + BOOST_CHECK_EQUAL((in.get()->*get(GPUUnique_parameters_robber_tag()))->getNumberOfColumns(), 12); + for(std::size_t i=0; i<12; ++i) + BOOST_CHECK_CLOSE((in.get()->*get(GPUUnique_parameters_robber_tag()))->getValue(0, i), i*42, 1e-5); +} + +#define TEST_LINEAR_MODULE( NAME ) \ +BOOST_AUTO_TEST_CASE( NAME ## _test ){ \ + boost::shared_ptr<NAME> out=boost::make_shared<NAME>(4, 2), in; \ + for(std::size_t i=0; i<8; ++i) \ + (out.get()->*get(TensorModule_parameters_robber_tag()))->setValue(i>>1, i&1, i*3); \ + write_and_read(out, in); \ + for(std::size_t i=0; i<8; ++i) \ + BOOST_CHECK_CLOSE((out.get()->*get(TensorModule_parameters_robber_tag()))->getValue(i>>1, i&1), i*3, 1e-5); \ + } + +TEST_LINEAR_MODULE(CPULinear) +TEST_LINEAR_MODULE(GPULinear) +TEST_LINEAR_MODULE(CPUSparseLinear) + +BOOST_AUTO_TEST_CASE( TableModule_test ){ + boost::shared_ptr<CPUUnique> m0_out=boost::make_shared<CPUUnique>(12), m1_out=boost::make_shared<CPUUnique>(12), m2_out=boost::make_shared<CPUUnique>(12), m0_in, m1_in, m2_in; + boost::shared_ptr<TableModule> out=boost::make_shared<TableModule>(), in; + for(std::size_t i=0; i<12; ++i){ + m0_out->setValue(0, i, i*3); + m1_out->setValue(0, i, i*5); + m2_out->setValue(0, i, i*7); + } + out->addModule(m0_out); + out->addModule(m1_out); + out->addModule(m2_out); + + write_and_read(out, in); + + BOOST_CHECK_EQUAL((out.get()->*get(TableModule_modules_robber_tag())).size(), 3); + m0_in=boost::dynamic_pointer_cast<CPUUnique>((out.get()->*get(TableModule_modules_robber_tag()))[0]); + m1_in=boost::dynamic_pointer_cast<CPUUnique>((out.get()->*get(TableModule_modules_robber_tag()))[1]); + m2_in=boost::dynamic_pointer_cast<CPUUnique>((out.get()->*get(TableModule_modules_robber_tag()))[2]); + BOOST_CHECK_NE(m0_in, boost::shared_ptr<CPUUnique>()); + BOOST_CHECK_NE(m1_in, boost::shared_ptr<CPUUnique>()); + BOOST_CHECK_NE(m2_in, boost::shared_ptr<CPUUnique>()); + + for(std::size_t i=0; i<12; ++i){ + BOOST_CHECK_CLOSE((m0_in.get()->*get(CPUUnique_parameters_robber_tag()))->getValue(0, i), i*3, 1e-5); + BOOST_CHECK_CLOSE((m1_in.get()->*get(CPUUnique_parameters_robber_tag()))->getValue(0, i), i*5, 1e-5); + BOOST_CHECK_CLOSE((m2_in.get()->*get(CPUUnique_parameters_robber_tag()))->getValue(0, i), i*7, 1e-5); + } +} + +BOOST_AUTO_TEST_CASE( SequentialModule_test ){ + boost::shared_ptr<CPUUnique> m0_out=boost::make_shared<CPUUnique>(12), m1_out=boost::make_shared<CPUUnique>(12), m2_out=boost::make_shared<CPUUnique>(12), m0_in, m1_in, m2_in; + boost::shared_ptr<SequentialModule> out=boost::make_shared<SequentialModule>(), in; + for(std::size_t i=0; i<12; ++i){ + m0_out->setValue(0, i, i*3); + m1_out->setValue(0, i, i*5); + m2_out->setValue(0, i, i*7); + } + out->addModule(m0_out); + out->addModule(m1_out); + out->addModule(m2_out); + + write_and_read(out, in); + + BOOST_CHECK_EQUAL((out.get()->*get(SequentialModule_modules_robber_tag())).size(), 3); + m0_in=boost::dynamic_pointer_cast<CPUUnique>((out.get()->*get(SequentialModule_modules_robber_tag()))[0]); + m1_in=boost::dynamic_pointer_cast<CPUUnique>((out.get()->*get(SequentialModule_modules_robber_tag()))[1]); + m2_in=boost::dynamic_pointer_cast<CPUUnique>((out.get()->*get(SequentialModule_modules_robber_tag()))[2]); + BOOST_CHECK_NE(m0_in, boost::shared_ptr<CPUUnique>()); + BOOST_CHECK_NE(m1_in, boost::shared_ptr<CPUUnique>()); + BOOST_CHECK_NE(m2_in, boost::shared_ptr<CPUUnique>()); + + for(std::size_t i=0; i<12; ++i){ + BOOST_CHECK_CLOSE((m0_in.get()->*get(CPUUnique_parameters_robber_tag()))->getValue(0, i), i*3, 1e-5); + BOOST_CHECK_CLOSE((m1_in.get()->*get(CPUUnique_parameters_robber_tag()))->getValue(0, i), i*5, 1e-5); + BOOST_CHECK_CLOSE((m2_in.get()->*get(CPUUnique_parameters_robber_tag()))->getValue(0, i), i*7, 1e-5); + } +}