NAM LSTM Configuration
data_config = {
"train": {
"x_path": "drive/MyDrive/x_train.wav",
"y_path": "drive/MyDrive/y_train.wav",
"ny": 48000
},
"validation": {
"x_path": "drive/MyDrive/x_test.wav",
"y_path": "drive/MyDrive/y_test.wav",
"ny": None
},
"common": {
"delay": int(input("What is the latency (in samples) of your reamp? "))
},
}
model_config = {
"net": {
"name": "LSTM",
"config": {
"num_layers": 2,
"hidden_size": 14,
"train_burn_in": 64, #!# Much larger originally
"train_truncate": 20480 #!# Much smaller originally
}
},
"loss": {
"val_loss": "mse",
"mask_first": 64, #!# 4096
# "pre_emph_weight": 1.0,
# "pre_emph_coef": 0.90
},
"optimizer": {
"lr": 0.005 #!# 0.004
},
"lr_scheduler": {
"class": "ExponentialLR",
"kwargs": {
"gamma": 0.997
}
}
}
learning_config = {
"train_dataloader": {
"batch_size": 8, #!# 16
"shuffle": True,
"pin_memory": True,
"drop_last": False, #!# True
"num_workers": 8 #!# 0
},
"val_dataloader": {},
"trainer": {
"accelerator": "gpu",
"devices": 1,
"max_epochs": 1000 #!# 500-650 is usually enough
Old/earlier and less effective settings:
data_config = {
"train": {
"x_path": "drive/MyDrive/x_train.wav",
"y_path": "drive/MyDrive/y_train.wav",
"ny": 48000
},
"validation": {
"x_path": "drive/MyDrive/x_test.wav",
"y_path": "drive/MyDrive/y_test.wav",
"ny": None
},
"common": {
"delay": int(input("What is the latency (in samples) of your reamp? "))
},
}
model_config = {
"net": {
"name": "LSTM",
"config": {
"num_layers": 2, #!# 1 Sometimes creates artifacts
"hidden_size": 14, #!# Try 28 for really difficult/complex models
"train_burn_in": 256, #!# Careful!
"train_truncate": 20480 #!# Careful!
}
},
"loss": {
"val_loss": "mse",
"mask_first": 4096,
"pre_emph_weight": 1.0,
"pre_emph_coef": 0.90
},
"optimizer": {
"lr": 0.01
},
"lr_scheduler": {
"class": "ExponentialLR",
"kwargs": {
"gamma": 0.997
}
}
}
learning_config = {
"train_dataloader": {
"batch_size": 8,
"shuffle": True,
"pin_memory": False,
"drop_last": False,
"num_workers": 8 #!# With this input and test file 8 works. Otherwise try 0
},
"val_dataloader": {},
"trainer": {
"accelerator": "gpu",
"devices": 1,
"max_epochs": 2000 #!# Choose how many you want. 500 is usually enough
(C)(R) 20240622, R Gerthsson