Spaces:
Running
on
Zero
Running
on
Zero
slslslrhfem
commited on
Commit
·
617e256
1
Parent(s):
035eb8a
fix some model
Browse files- inference.py +0 -1
- model.py +0 -1
inference.py
CHANGED
|
@@ -213,7 +213,6 @@ def inference(audio_path):
|
|
| 213 |
checkpoint_path = 'checkpoints/EmbeddingModel_MERT_768-epoch=0073-val_loss=0.1058-val_acc=0.9585-val_f1=0.9366-val_precision=0.9936-val_recall=0.8857.ckpt',
|
| 214 |
input_dim=input_dim,
|
| 215 |
#emb_model=backbone_model
|
| 216 |
-
is_emb = True,
|
| 217 |
)
|
| 218 |
|
| 219 |
|
|
|
|
| 213 |
checkpoint_path = 'checkpoints/EmbeddingModel_MERT_768-epoch=0073-val_loss=0.1058-val_acc=0.9585-val_f1=0.9366-val_precision=0.9936-val_recall=0.8857.ckpt',
|
| 214 |
input_dim=input_dim,
|
| 215 |
#emb_model=backbone_model
|
|
|
|
| 216 |
)
|
| 217 |
|
| 218 |
|
model.py
CHANGED
|
@@ -17,7 +17,6 @@ class MusicAudioClassifier(pl.LightningModule):
|
|
| 17 |
hidden_dim: int = 256,
|
| 18 |
learning_rate: float = 1e-4,
|
| 19 |
emb_model: Optional[nn.Module] = None,
|
| 20 |
-
is_emb: bool = False,
|
| 21 |
backbone: str = 'segment_transformer',
|
| 22 |
num_classes: int = 2):
|
| 23 |
super().__init__()
|
|
|
|
| 17 |
hidden_dim: int = 256,
|
| 18 |
learning_rate: float = 1e-4,
|
| 19 |
emb_model: Optional[nn.Module] = None,
|
|
|
|
| 20 |
backbone: str = 'segment_transformer',
|
| 21 |
num_classes: int = 2):
|
| 22 |
super().__init__()
|