Hot-keys on this page
r m x p toggle line displays
j k next/prev highlighted chunk
0 (zero) top of page
1 (one) first highlighted chunk
1from torch import nn
2import torchapp as ta
3from torchapp.examples.logistic_regression import LogisticRegressionApp
5class MultilayerPerceptronApp(LogisticRegressionApp):
6 def model(
7 self,
8 hidden_layers:int = ta.Param(default=1, tune_min=0, tune_max=4, tune=True, help="The number of hidden layers"),
9 hidden_size:int = ta.Param(default=64, tune_min=8, tune_max=256, tune=True, tune_log=True, help="The size of the hidden layers"),
10 hidden_bias:bool = ta.Param(default=True, tune=True, help="Whether or not the hidden layers have bias"),
11 ) -> nn.Module:
12 in_features = 1
14 layer_list = []
15 for _ in range(hidden_layers):
16 layer_list.append(nn.Linear(in_features=in_features, out_features=hidden_size, bias=hidden_bias))
17 in_features = hidden_size
19 # final layer
20 layer_list.append(nn.Linear(in_features=in_features, out_features=1, bias=True))
22 return nn.Sequential(*layer_list)
25if __name__ == "__main__":
26 MultilayerPerceptronApp.main()