You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

GroupLASSONN.py 2.9KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879
  1. import tensorflow as tf
  2. import numpy as np
  3. import pandas as pd
  4. from pylab import rcParams
  5. import matplotlib.pyplot as plt
  6. import warnings
  7. from mlxtend.plotting import plot_decision_regions
  8. from matplotlib.colors import ListedColormap
  9. from tensorflow.keras.models import Sequential
  10. from tensorflow.keras.layers import Dense
  11. from sklearn.model_selection import train_test_split
  12. from group_lasso import BaseGroupLasso, GroupLasso
  13. from tensorflow import keras
  14. import math
  15. #using https://github.com/yngvem/group-lasso/
  16. warnings.filterwarnings('ignore')
  17. nd_path = ['nds/allnds_case.txt', 'nds/allnds_control.txt']
  18. X, y = load_dataset(nd_path)
  19. X_train, X_test, y_train, y_test = train_test_split(X, y,
  20. test_size=0.33,
  21. random_state=42)
  22. reg_model = Sequential()
  23. reg_model.add(Dense(math.sqrt(X_train.shape[1]), input_dim= X_train.shape[1], activation='relu'))
  24. reg_model.add(Dense(1, activation='sigmoid'))
  25. reg_model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
  26. reg_history = reg_model.fit(X_train, y_train,
  27. validation_data=(X_test, y_test),
  28. epochs=4000, verbose=1)
  29. # serialize model to JSON
  30. model_json = model.to_json()
  31. with open('model.json', 'w') as json_file:
  32. json_file.write(model_json)
  33. # serialize weights to HDF5
  34. model.save_weights('model.h5')
  35. print('Saved model to disk')
  36. # load json and create model
  37. json_file = open('model.json', 'r')
  38. loaded_model_json = json_file.read()
  39. json_file.close()
  40. loaded_model = model_from_json(loaded_model_json)
  41. # load weights into new model
  42. loaded_model.load_weights("model.h5")
  43. print("Loaded model from disk")
  44. # evaluate loaded model on test data
  45. # loaded_model.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
  46. asd_path = ['nds/asd_case.txt', 'nds/asd_control.txt']
  47. X, y = load_dataset(asd_path)
  48. #https://github.com/bhattbhavesh91/regularization-neural-networks/blob/master/regularization-notebook.ipynb
  49. X_train, X_test, y_train, y_test = train_test_split(X, y,
  50. test_size=0.33,
  51. random_state=42)
  52. reg_model = Sequential()
  53. model.layers[0].set_weights(loaded_model[0])
  54. reg_model.add(Dense(math.sqrt(X_train.shape[1]), input_dim= X_train.shape[1], activation='relu', kernel_regularizer='group_lasso'))
  55. reg_model.add(Dense(1, activation='ReLU'))
  56. reg_model.add(Dense(1, activation='sigmoid'))
  57. reg_model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
  58. reg_history = reg_model.fit(X_train, y_train,
  59. validation_data=(X_test, y_test),
  60. epochs=4000, verbose=1)
  61. model = keras.models.load_model('path/to/saved/model')
  62. weights = model.get_layer('input').get_weights()
  63. values = weights[0]
  64. for indx, v in enumerate(values):
  65. if v!=0:
  66. print('nonzero input', indx)