import sparsenet.nn_utils as nn
import sparsenet.convnet as convnet
import sparsenet.dataset as dataset
#We use Gnumpy as the matrix backend.
nn.set_backend("gnumpy",board=0,conv_backend="cudaconvnet")
X,T,X_test,T_test,X_labels,T_labels=dataset.MNIST.load("numpy")
dp = nn.dp_ram(X=X,T=X,X_test=X_test,T_test=T_test,T_train_labels=None,T_labels=T_labels,
train_range = [0,6],
test_range = [0,1])
nn.show_images(X[:25,:,:,:],(5,5))
cfg = convnet.cn.NeuralNetCfg(want_lifetime_sparsity=True,want_dropout = False,want_tied= False)
cfg.input_dense(shape=784,dropout = None)
#Lifetime sparsity of 5%
cfg.dense(num_filters=1000,activation=nn.relu,lifetime_sparsity=5,dropout = None)
cfg.output_dense(num_filters=784,activation=nn.linear)
cfg.cost("euclidean")
#We are training a dense autoencoder on MNIST.
cfg.params(arch='dense',learning='auto',dataset='mnist')
cfg.info()
cfg.save_location('fc-wta')
AE=convnet.NeuralNet(cfg)
AE.train(dp,
mini_batch=100,
num_epochs = 3000,
initial_weights=.01,
momentum=.9,
learning_rate=0.01,learn_params={'epoch_1':[],'epoch_2':[]},
want_visual=True,visual_params={'interval':10,'save':False},
want_test=False,test_params={'interval':10},
want_log=True,log_params={'interval':1},
want_weights=True,weights_params={'interval':100,'want_last':True})
AE.load("/media/gpu3/work/save/mnist_auto_1/mnist_auto_1_last")
AE.show_filters()
AE.load("/media/gpu3/work/save/mnist_auto_2/mnist_auto_2_last")
AE.show_filters()
AE.load("/media/gpu3/work/save/mnist_auto_3/mnist_auto_3_last")
AE.show_filters()
AE.load("/media/gpu3/work/save/mnist_auto_5/mnist_auto_5_last")
AE.show_filters()
AE.load("/media/gpu3/work/save/mnist_auto_10/mnist_auto_10_last")
AE.show_filters()
#Tying weights improve the classification performance
cfg = convnet.cn.NeuralNetCfg(want_lifetime_sparsity=True,want_dropout = False,want_tied= True)
cfg.input_dense(shape=784,dropout = None)
#lifetime_sparsity of 5%
cfg.dense(num_filters=2000,activation=nn.relu,lifetime_sparsity=5,dropout = None)
cfg.output_dense(num_filters=784,activation=nn.linear)
cfg.cost("euclidean")
cfg.params(arch='dense',learning='auto',dataset='mnist')
cfg.info()
AE=convnet.NeuralNet(cfg)
AE.load("/media/gpu3/work/save/mnist_auto_2000_5/mnist_auto_2000_5")
#At test time, we turn off the sparsity constraint
AE.test_mode = True
#We create a new dataset by passing all the training points through the network and obtaining their representations:
H_train = np.zeros((60000,2000))
for i in range(300):
AE.feedforward(nn.garray(dp.X[i*200:(i+1)*200]))
H_train[i*200:(i+1)*200] = AE.H[1].as_numpy_array()
H_test = np.zeros((10000,2000))
for i in range(50):
AE.feedforward(nn.garray(dp.X_test[i*200:(i+1)*200]))
H_test[i*200:(i+1)*200] = AE.H[1].as_numpy_array()
AE.test_mode = False
#We train a logitic regression on top of the unsupervised features.
LR_cfg = convnet.cn.NeuralNetCfg()
LR_cfg.input_dense(shape=2000)
LR_cfg.output_dense(num_filters=10,activation=nn.softmax)
LR_cfg.cost("cross-entropy")
LR_cfg.params(arch='dense',learning='disc',dataset='mnist')
LR=convnet.NeuralNet(LR_cfg)
#Data Provider for Logistic Regression
dp = nn.dp_ram(X=H_train,T=T,X_test=H_test,T_test=T_test,T_train_labels=None,T_labels=None,
train_range = [0,6],
test_range = [0,1])
LR.train(dp,
mini_batch=100,
num_epochs = 20,
initial_weights=.01,
momentum=.9,
learning_rate=.1,learn_params={'epoch_1':[15,.01],'epoch_2':[18,.001]},
want_visual=False,visual_params={'interval':3,'save':False},
want_test=True,test_params={'interval':10},
want_log=False,log_params={'interval':1},
want_weights=False,weights_params={'interval':100})