### ''' Similar to M1 from https://github.com/dpkingma/nips14-ssl Original Author: S. Saemundsson Edited by: Z. Rahaie ''' ### from vae import VariationalAutoencoder import numpy as np import data.asd as asd if __name__ == '__main__': num_batches = 0 dim_z = 0 epochs = 0 learning_rate = 1e-4 l2_loss = 1e-5 seed = 12345 hidden_layers_px = [ sqrt(input_size), sqrt(hidden_layers_px[0]), sqrt(hidden_layers_px[1])] hidden_layers_qz = [ sqrt(input_size), sqrt(hidden_layers_px[0]), sqrt(hidden_layers_px[2]) ] asd_path = ['nds/AutDB_ASD_cnv_dataset.txt'] #Uses anglpy module exists kingma github (linked before) to divide the dataset train_x, train_y, valid_x, valid_y, test_x, test_y = asd.load_numpy(asd_path, binarize_y=True) x_train, y_train = train_x.T, train_y.T x_valid, y_valid = valid_x.T, valid_y.T x_test, y_test = test_x.T, test_y.T dim_x = x_train.shape[1] dim_y = y_train.shape[1] VAE = VariationalAutoencoder( dim_x = dim_x, dim_z = dim_z, hidden_layers_px = hidden_layers_px, hidden_layers_qz = hidden_layers_qz, l2_loss = l2_loss ) #every n iterations (set to 0 to disable) VAE.train( x = x_train, x_valid = x_valid, epochs = epochs, num_batches = num_batches, learning_rate = learning_rate, seed = seed, stop_iter = 30, print_every = 10, draw_img = 0 ) weights_as_numpy = VAE.get_weights() output_weights_to_label = weights_as_numpy[6] scores = weights_as_numpy[0] * weights_as_numpy[1] * weights_as_numpy [2] * output_weights_to_label genes_index = [i for i,v in enumerate(scores) if v > 0] print(genes_index)