You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
MKL_NUM_THREADS=1 CUDA_VISIBLE_DEVICES=1 python run.py +experiment=speaker_ecapa_tdnn tune_model=True data/module=dogbark trainer.auto_lr_find=auto_lr_find tune_iterations=5000
/home/arthur/.conda/envs/w2v2/lib/python3.8/site-packages/pytorch_lightning/core/decorators.py:65: LightningDeprecationWarning: The @auto_move_data decorator is deprecated in v1.3 and will be removed in v1.5. Please use trainer.predict instead for inference. The decorator was applied to forward
rank_zero_deprecation(
/home/arthur/.conda/envs/w2v2/lib/python3.8/site-packages/pytorch_lightning/core/decorators.py:65: LightningDeprecationWarning: The @auto_move_data decorator is deprecated in v1.3 and will be removed in v1.5. Please use trainer.predict instead for inference. The decorator was applied to forward
rank_zero_deprecation(
data_folder: ${oc.env:DATA_FOLDER}
temp_folder: ${oc.env:TEMP_FOLDER}
log_folder: ${oc.env:LOG_FOLDER}
seed: 42133724
tune_model: true
tune_iterations: 5000
verify_model: false
fit_model: true
eval_model: true
load_network_from_checkpoint: null
use_cometml: ${oc.decode:${oc.env:USE_COMET_ML}}
gpus: ${oc.decode:${oc.env:NUM_GPUS}}
project_name: ecapa-tdnn
experiment_name: ${random_uuid:}
tag: ${now:%Y-%m-%d}
callbacks:
to_add:
3.9.0
pytorch_lightning.version='1.4.5'
torch.version='1.8.2+cu102'
[2023-03-27 20:54:04,608][pytorch_lightning.utilities.seed][INFO] - Global seed set to 42133724
Error executing job with overrides: ['+experiment=speaker_ecapa_tdnn', 'tune_model=True', 'data/module=dogbark', 'trainer.auto_lr_find=auto_lr_find', 'tune_iterations=5000']
Traceback (most recent call last):
File "/home/arthur/.conda/envs/w2v2/lib/python3.8/site-packages/hydra/_internal/instantiate/_instantiate2.py", line 62, in _call_target
return target(*args, **kwargs)
File "", line 33, in init
File "/home/arthur/dog_verification/w2v2-speaker-master/src/config_util.py", line 26, in post_init
post_init_type_cast(self)
File "/home/arthur/dog_verification/w2v2-speaker-master/src/config_util.py", line 41, in post_init_type_cast
elif isinstance(value, typehint_cls):
TypeError: isinstance() arg 2 must be a type or tuple of types
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/arthur/.conda/envs/w2v2/lib/python3.8/site-packages/hydra/_internal/utils.py", line 211, in run_and_report
return func()
File "/home/arthur/.conda/envs/w2v2/lib/python3.8/site-packages/hydra/_internal/utils.py", line 368, in
lambda: hydra.run(
File "/home/arthur/.conda/envs/w2v2/lib/python3.8/site-packages/hydra/_internal/hydra.py", line 110, in run
_ = ret.return_value
File "/home/arthur/.conda/envs/w2v2/lib/python3.8/site-packages/hydra/core/utils.py", line 233, in return_value
raise self._return_value
File "/home/arthur/.conda/envs/w2v2/lib/python3.8/site-packages/hydra/core/utils.py", line 160, in run_job
ret.return_value = task_function(task_cfg)
File "run.py", line 38, in run
return run_train_eval_script(cfg)
File "/home/arthur/dog_verification/w2v2-speaker-master/src/main.py", line 429, in run_train_eval_script
dm = construct_data_module(cfg)
File "/home/arthur/dog_verification/w2v2-speaker-master/src/main.py", line 127, in construct_data_module
dm_cfg = hydra.utils.instantiate(cfg.data.module)
File "/home/arthur/.conda/envs/w2v2/lib/python3.8/site-packages/hydra/_internal/instantiate/_instantiate2.py", line 180, in instantiate
return instantiate_node(config, *args, recursive=recursive, convert=convert)
File "/home/arthur/.conda/envs/w2v2/lib/python3.8/site-packages/hydra/_internal/instantiate/_instantiate2.py", line 249, in instantiate_node
return _call_target(target, *args, **kwargs)
File "/home/arthur/.conda/envs/w2v2/lib/python3.8/site-packages/hydra/_internal/instantiate/_instantiate2.py", line 64, in _call_target
raise type(e)(
File "/home/arthur/.conda/envs/w2v2/lib/python3.8/site-packages/hydra/_internal/instantiate/_instantiate2.py", line 62, in _call_target
return target(*args, **kwargs)
File "", line 33, in init
File "/home/arthur/dog_verification/w2v2-speaker-master/src/config_util.py", line 26, in post_init
post_init_type_cast(self)
File "/home/arthur/dog_verification/w2v2-speaker-master/src/config_util.py", line 41, in post_init_type_cast
elif isinstance(value, typehint_cls):
TypeError: Error instantiating 'src.data.modules.speaker.voxceleb.VoxCelebDataModuleConfig' : isinstance() arg 2 must be a type or tuple of types
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "run.py", line 48, in
run()
File "/home/arthur/.conda/envs/w2v2/lib/python3.8/site-packages/hydra/main.py", line 49, in decorated_main
_run_hydra(
File "/home/arthur/.conda/envs/w2v2/lib/python3.8/site-packages/hydra/_internal/utils.py", line 367, in _run_hydra
run_and_report(
File "/home/arthur/.conda/envs/w2v2/lib/python3.8/site-packages/hydra/_internal/utils.py", line 251, in run_and_report
assert mdl is not None
AssertionError
The text was updated successfully, but these errors were encountered:
MKL_NUM_THREADS=1 CUDA_VISIBLE_DEVICES=1 python run.py +experiment=speaker_ecapa_tdnn tune_model=True data/module=dogbark trainer.auto_lr_find=auto_lr_find tune_iterations=5000
/home/arthur/.conda/envs/w2v2/lib/python3.8/site-packages/pytorch_lightning/core/decorators.py:65: LightningDeprecationWarning: The
@auto_move_data
decorator is deprecated in v1.3 and will be removed in v1.5. Please usetrainer.predict
instead for inference. The decorator was applied toforward
rank_zero_deprecation(
/home/arthur/.conda/envs/w2v2/lib/python3.8/site-packages/pytorch_lightning/core/decorators.py:65: LightningDeprecationWarning: The
@auto_move_data
decorator is deprecated in v1.3 and will be removed in v1.5. Please usetrainer.predict
instead for inference. The decorator was applied toforward
rank_zero_deprecation(
data_folder: ${oc.env:DATA_FOLDER}
temp_folder: ${oc.env:TEMP_FOLDER}
log_folder: ${oc.env:LOG_FOLDER}
seed: 42133724
tune_model: true
tune_iterations: 5000
verify_model: false
fit_model: true
eval_model: true
load_network_from_checkpoint: null
use_cometml: ${oc.decode:${oc.env:USE_COMET_ML}}
gpus: ${oc.decode:${oc.env:NUM_GPUS}}
project_name: ecapa-tdnn
experiment_name: ${random_uuid:}
tag: ${now:%Y-%m-%d}
callbacks:
to_add:
lr_monitor:
target: pytorch_lightning.callbacks.LearningRateMonitor
ram_monitor:
target: src.callbacks.memory_monitor.RamMemoryMonitor
frequency: 100
checkpoint:
target: pytorch_lightning.callbacks.model_checkpoint.ModelCheckpoint
monitor: val_eer
save_top_k: 1
mode: min
filename: '{epoch}.{step}.{val_eer:.4f}.best'
save_last: true
every_n_val_epochs: 1
last_checkpoint_pattern: '{epoch}.{step}.{val_eer:.4f}.last'
data:
module:
target: src.data.modules.speaker.voxceleb.VoxCelebDataModuleConfig
use_voxceleb1_dev: true
use_voxceleb1_test: true
use_voxceleb2_dev: false
use_voxceleb2_test: false
all_voxceleb1_is_test_set: false
has_train: true
has_val: false
has_test: true
test_split_file_path: ${data_folder}/veri_test.txt
shards_folder: ${data_folder}/dog_barking_shards
extraction_folder: ${temp_folder}/dog_barking
split_mode: equal
train_val_ratio: 0.97
num_val_speakers: -1
eer_validation_pairs: 10000
sequential_same_speaker_samples: 1
min_unique_speakers_per_shard: 500
discard_partial_shards: true
voxceleb1_train_zip_path: ${data_folder}/voxceleb_archives/vox1_dev_wav.zip
voxceleb1_test_zip_path: ${data_folder}/voxceleb_archives/vox1_test_wav.zip
voxceleb2_train_zip_path: ${data_folder}/voxceleb_archives/vox2_dev_wav.zip
voxceleb2_test_zip_path: ${data_folder}/voxceleb_archives/vox2_test_wav.zip
train_collate_fn: pad_right
val_collate_fn: default
test_collate_fn: default
add_batch_debug_info: false
limit_samples: -1
batch_processing_mode: categorical
pipeline:
train_pipeline:
val_pipeline:
test_pipeline:
selector_train:
target: src.data.preprocess.random_chunks.AudioChunkSelector
selection_strategy: random
desired_chunk_length_sec: 3
selector_val:
target: src.data.preprocess.random_chunks.AudioChunkSelector
selection_strategy: start
desired_chunk_length_sec: 3
filterbank:
target: src.data.preprocess.audio_features.FilterBank
n_mels: 40
normalizer:
target: src.data.preprocess.input_normalisation.InputNormalizer2D
normalize_over_channels: true
shards:
target: src.data.common.WebDataSetShardConfig
samples_per_shard: 500
use_gzip_compression: true
shuffle_shards: true
queue_size: 1024
dataloader:
target: src.data.common.SpeakerDataLoaderConfig
train_batch_size: 32
val_batch_size: ${data.dataloader.train_batch_size}
test_batch_size: 1
num_workers: 2
pin_memory: true
evaluator:
target: src.evaluation.speaker.cosine_distance.CosineDistanceEvaluator
center_before_scoring: false
length_norm_before_scoring: false
max_num_training_samples: 0
network:
target: src.lightning_modules.speaker.ecapa_tdnn.EcapaTDNNModuleConfig
input_mel_coefficients: ${data.pipeline.filterbank.n_mels}
lin_neurons: 192
channels:
kernel_sizes:
dilations:
attention_channels: 128
res2net_scale: 8
se_channels: 128
global_context: true
pretrained_weights_path: null
explicit_stat_pool_embedding_size: null
explicit_num_speakers: null
tokenizer:
target: src.tokenizer.tokenizer_wav2vec2.Wav2vec2TokenizerConfig
tokenizer_huggingface_id: facebook/wav2vec2-base-960h
optim:
algo:
target: torch.optim.Adam
lr: 0.0001
weight_decay: 0
betas:
eps: 1.0e-08
amsgrad: false
schedule:
scheduler:
target: torch.optim.lr_scheduler.OneCycleLR
max_lr: ${optim.algo.lr}
total_steps: ${trainer.max_steps}
div_factor: 25
monitor: null
interval: step
frequency: null
name: null
loss:
target: src.optim.loss.aam_softmax.AngularAdditiveMarginSoftMaxLoss
input_features: 192
output_features: 5994
margin: 0.2
scale: 30
output_features: 48
trainer:
target: pytorch_lightning.Trainer
gpus: ${gpus}
accelerator: null
num_nodes: 1
min_epochs: null
max_epochs: null
min_steps: null
max_steps: 100000
val_check_interval: 5000
accumulate_grad_batches: 1
progress_bar_refresh_rate: 500
deterministic: false
limit_train_batches: 1.0
limit_val_batches: 1.0
limit_test_batches: 1.0
fast_dev_run: false
precision: 32
num_sanity_val_steps: 2
auto_lr_find: auto_lr_find
gradient_clip_val: 0
3.9.0
pytorch_lightning.version='1.4.5'
torch.version='1.8.2+cu102'
[2023-03-27 20:54:04,608][pytorch_lightning.utilities.seed][INFO] - Global seed set to 42133724
Error executing job with overrides: ['+experiment=speaker_ecapa_tdnn', 'tune_model=True', 'data/module=dogbark', 'trainer.auto_lr_find=auto_lr_find', 'tune_iterations=5000']
Traceback (most recent call last):
File "/home/arthur/.conda/envs/w2v2/lib/python3.8/site-packages/hydra/_internal/instantiate/_instantiate2.py", line 62, in _call_target
return target(*args, **kwargs)
File "", line 33, in init
File "/home/arthur/dog_verification/w2v2-speaker-master/src/config_util.py", line 26, in post_init
post_init_type_cast(self)
File "/home/arthur/dog_verification/w2v2-speaker-master/src/config_util.py", line 41, in post_init_type_cast
elif isinstance(value, typehint_cls):
TypeError: isinstance() arg 2 must be a type or tuple of types
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/arthur/.conda/envs/w2v2/lib/python3.8/site-packages/hydra/_internal/utils.py", line 211, in run_and_report
return func()
File "/home/arthur/.conda/envs/w2v2/lib/python3.8/site-packages/hydra/_internal/utils.py", line 368, in
lambda: hydra.run(
File "/home/arthur/.conda/envs/w2v2/lib/python3.8/site-packages/hydra/_internal/hydra.py", line 110, in run
_ = ret.return_value
File "/home/arthur/.conda/envs/w2v2/lib/python3.8/site-packages/hydra/core/utils.py", line 233, in return_value
raise self._return_value
File "/home/arthur/.conda/envs/w2v2/lib/python3.8/site-packages/hydra/core/utils.py", line 160, in run_job
ret.return_value = task_function(task_cfg)
File "run.py", line 38, in run
return run_train_eval_script(cfg)
File "/home/arthur/dog_verification/w2v2-speaker-master/src/main.py", line 429, in run_train_eval_script
dm = construct_data_module(cfg)
File "/home/arthur/dog_verification/w2v2-speaker-master/src/main.py", line 127, in construct_data_module
dm_cfg = hydra.utils.instantiate(cfg.data.module)
File "/home/arthur/.conda/envs/w2v2/lib/python3.8/site-packages/hydra/_internal/instantiate/_instantiate2.py", line 180, in instantiate
return instantiate_node(config, *args, recursive=recursive, convert=convert)
File "/home/arthur/.conda/envs/w2v2/lib/python3.8/site-packages/hydra/_internal/instantiate/_instantiate2.py", line 249, in instantiate_node
return _call_target(target, *args, **kwargs)
File "/home/arthur/.conda/envs/w2v2/lib/python3.8/site-packages/hydra/_internal/instantiate/_instantiate2.py", line 64, in _call_target
raise type(e)(
File "/home/arthur/.conda/envs/w2v2/lib/python3.8/site-packages/hydra/_internal/instantiate/_instantiate2.py", line 62, in _call_target
return target(*args, **kwargs)
File "", line 33, in init
File "/home/arthur/dog_verification/w2v2-speaker-master/src/config_util.py", line 26, in post_init
post_init_type_cast(self)
File "/home/arthur/dog_verification/w2v2-speaker-master/src/config_util.py", line 41, in post_init_type_cast
elif isinstance(value, typehint_cls):
TypeError: Error instantiating 'src.data.modules.speaker.voxceleb.VoxCelebDataModuleConfig' : isinstance() arg 2 must be a type or tuple of types
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "run.py", line 48, in
run()
File "/home/arthur/.conda/envs/w2v2/lib/python3.8/site-packages/hydra/main.py", line 49, in decorated_main
_run_hydra(
File "/home/arthur/.conda/envs/w2v2/lib/python3.8/site-packages/hydra/_internal/utils.py", line 367, in _run_hydra
run_and_report(
File "/home/arthur/.conda/envs/w2v2/lib/python3.8/site-packages/hydra/_internal/utils.py", line 251, in run_and_report
assert mdl is not None
AssertionError
The text was updated successfully, but these errors were encountered: