Skip to main content
  • Home
  • Development
  • Documentation
  • Donate
  • Operational login
  • Browse the archive

swh logo
SoftwareHeritage
Software
Heritage
Archive
Features
  • Search

  • Downloads

  • Save code now

  • Add forge now

  • Help

https://github.com/open-mmlab/Amphion
09 September 2024, 06:46:44 UTC
  • Code
  • Branches (2)
  • Releases (3)
  • Visits
    • Branches
    • Releases
    • HEAD
    • refs/heads/main
    • refs/heads/revert-154-FACodec-readme
    • v0.1.1-alpha
    • v0.1.0-alpha
    • v0.1.0
  • 1e97ea0
  • /
  • models
  • /
  • base
  • /
  • base_inference.py
Raw File Download
Take a new snapshot of a software origin

If the archived software origin currently browsed is not synchronized with its upstream version (for instance when new commits have been issued), you can explicitly request Software Heritage to take a new snapshot of it.

Use the form below to proceed. Once a request has been submitted and accepted, it will be processed as soon as possible. You can then check its processing state by visiting this dedicated page.
swh spinner

Processing "take a new snapshot" request ...

To reference or cite the objects present in the Software Heritage archive, permalinks based on SoftWare Hash IDentifiers (SWHIDs) must be used.
Select below a type of object currently browsed in order to display its associated SWHID and permalink.

  • content
  • directory
  • revision
  • snapshot
origin badgecontent badge Iframe embedding
swh:1:cnt:2713f19a0d61f06bca1f01de5ccd8a3b4d2cc02f
origin badgedirectory badge Iframe embedding
swh:1:dir:3d94ed501da5d3fbbeecc0047c44e7dfbdf81979
origin badgerevision badge
swh:1:rev:6f47d3a8cab69b1dfdb354456257b5cf88412c59
origin badgesnapshot badge
swh:1:snp:bef780d851faeac80aef6db569e51e66f505bf34

This interface enables to generate software citations, provided that the root directory of browsed objects contains a citation.cff or codemeta.json file.
Select below a type of object currently browsed in order to generate citations for them.

  • content
  • directory
  • revision
  • snapshot
Generate software citation in BibTex format (requires biblatex-software package)
Generating citation ...
Generate software citation in BibTex format (requires biblatex-software package)
Generating citation ...
Generate software citation in BibTex format (requires biblatex-software package)
Generating citation ...
Generate software citation in BibTex format (requires biblatex-software package)
Generating citation ...
Tip revision: 6f47d3a8cab69b1dfdb354456257b5cf88412c59 authored by Xueyao Zhang on 12 March 2024, 11:52:50 UTC
Revert "fix a typo in NS3 readme"
Tip revision: 6f47d3a
base_inference.py
# Copyright (c) 2023 Amphion.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.

import argparse
import os
import re
import time
from pathlib import Path

import torch
from torch.utils.data import DataLoader
from tqdm import tqdm

from models.vocoders.vocoder_inference import synthesis
from torch.utils.data import DataLoader
from utils.util import set_all_random_seed
from utils.util import load_config


def parse_vocoder(vocoder_dir):
    r"""Parse vocoder config"""
    vocoder_dir = os.path.abspath(vocoder_dir)
    ckpt_list = [ckpt for ckpt in Path(vocoder_dir).glob("*.pt")]
    ckpt_list.sort(key=lambda x: int(x.stem), reverse=True)
    ckpt_path = str(ckpt_list[0])
    vocoder_cfg = load_config(os.path.join(vocoder_dir, "args.json"), lowercase=True)
    vocoder_cfg.model.bigvgan = vocoder_cfg.vocoder
    return vocoder_cfg, ckpt_path


class BaseInference(object):
    def __init__(self, cfg, args):
        self.cfg = cfg
        self.args = args
        self.model_type = cfg.model_type
        self.avg_rtf = list()
        set_all_random_seed(10086)
        os.makedirs(args.output_dir, exist_ok=True)

        if torch.cuda.is_available():
            self.device = torch.device("cuda")
        else:
            self.device = torch.device("cpu")
            torch.set_num_threads(10)  # inference on 1 core cpu.

        # Load acoustic model
        self.model = self.create_model().to(self.device)
        state_dict = self.load_state_dict()
        self.load_model(state_dict)
        self.model.eval()

        # Load vocoder model if necessary
        if self.args.checkpoint_dir_vocoder is not None:
            self.get_vocoder_info()

    def create_model(self):
        raise NotImplementedError

    def load_state_dict(self):
        self.checkpoint_file = self.args.checkpoint_file
        if self.checkpoint_file is None:
            assert self.args.checkpoint_dir is not None
            checkpoint_path = os.path.join(self.args.checkpoint_dir, "checkpoint")
            checkpoint_filename = open(checkpoint_path).readlines()[-1].strip()
            self.checkpoint_file = os.path.join(
                self.args.checkpoint_dir, checkpoint_filename
            )

        self.checkpoint_dir = os.path.split(self.checkpoint_file)[0]

        print("Restore acoustic model from {}".format(self.checkpoint_file))
        raw_state_dict = torch.load(self.checkpoint_file, map_location=self.device)
        self.am_restore_step = re.findall(r"step-(.+?)_loss", self.checkpoint_file)[0]

        return raw_state_dict

    def load_model(self, model):
        raise NotImplementedError

    def get_vocoder_info(self):
        self.checkpoint_dir_vocoder = self.args.checkpoint_dir_vocoder
        self.vocoder_cfg = os.path.join(
            os.path.dirname(self.checkpoint_dir_vocoder), "args.json"
        )
        self.cfg.vocoder = load_config(self.vocoder_cfg, lowercase=True)
        self.vocoder_tag = self.checkpoint_dir_vocoder.split("/")[-2].split(":")[-1]
        self.vocoder_steps = self.checkpoint_dir_vocoder.split("/")[-1].split(".")[0]

    def build_test_utt_data(self):
        raise NotImplementedError

    def build_testdata_loader(self, args, target_speaker=None):
        datasets, collate = self.build_test_dataset()
        self.test_dataset = datasets(self.cfg, args, target_speaker)
        self.test_collate = collate(self.cfg)
        self.test_batch_size = min(
            self.cfg.train.batch_size, len(self.test_dataset.metadata)
        )
        test_loader = DataLoader(
            self.test_dataset,
            collate_fn=self.test_collate,
            num_workers=self.args.num_workers,
            batch_size=self.test_batch_size,
            shuffle=False,
        )
        return test_loader

    def inference_each_batch(self, batch_data):
        raise NotImplementedError

    def inference_for_batches(self, args, target_speaker=None):
        ###### Construct test_batch ######
        loader = self.build_testdata_loader(args, target_speaker)

        n_batch = len(loader)
        now = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time()))
        print(
            "Model eval time: {}, batch_size = {}, n_batch = {}".format(
                now, self.test_batch_size, n_batch
            )
        )
        self.model.eval()

        ###### Inference for each batch ######
        pred_res = []
        with torch.no_grad():
            for i, batch_data in enumerate(loader if n_batch == 1 else tqdm(loader)):
                # Put the data to device
                for k, v in batch_data.items():
                    batch_data[k] = batch_data[k].to(self.device)

                y_pred, stats = self.inference_each_batch(batch_data)

                pred_res += y_pred

        return pred_res

    def inference(self, feature):
        raise NotImplementedError

    def synthesis_by_vocoder(self, pred):
        audios_pred = synthesis(
            self.vocoder_cfg,
            self.checkpoint_dir_vocoder,
            len(pred),
            pred,
        )
        return audios_pred

    def __call__(self, utt):
        feature = self.build_test_utt_data(utt)
        start_time = time.time()
        with torch.no_grad():
            outputs = self.inference(feature)[0]
        time_used = time.time() - start_time
        rtf = time_used / (
            outputs.shape[1]
            * self.cfg.preprocess.hop_size
            / self.cfg.preprocess.sample_rate
        )
        print("Time used: {:.3f}, RTF: {:.4f}".format(time_used, rtf))
        self.avg_rtf.append(rtf)
        audios = outputs.cpu().squeeze().numpy().reshape(-1, 1)
        return audios


def base_parser():
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "--config", default="config.json", help="json files for configurations."
    )
    parser.add_argument("--use_ddp_inference", default=False)
    parser.add_argument("--n_workers", default=1, type=int)
    parser.add_argument("--local_rank", default=-1, type=int)
    parser.add_argument(
        "--batch_size", default=1, type=int, help="Batch size for inference"
    )
    parser.add_argument(
        "--num_workers",
        default=1,
        type=int,
        help="Worker number for inference dataloader",
    )
    parser.add_argument(
        "--checkpoint_dir",
        type=str,
        default=None,
        help="Checkpoint dir including model file and configuration",
    )
    parser.add_argument(
        "--checkpoint_file", help="checkpoint file", type=str, default=None
    )
    parser.add_argument(
        "--test_list", help="test utterance list for testing", type=str, default=None
    )
    parser.add_argument(
        "--checkpoint_dir_vocoder",
        help="Vocoder's checkpoint dir including model file and configuration",
        type=str,
        default=None,
    )
    parser.add_argument(
        "--output_dir",
        type=str,
        default=None,
        help="Output dir for saving generated results",
    )
    return parser


if __name__ == "__main__":
    parser = base_parser()
    args = parser.parse_args()
    cfg = load_config(args.config)

    # Build inference
    inference = BaseInference(cfg, args)
    inference()

back to top

Software Heritage — Copyright (C) 2015–2025, The Software Heritage developers. License: GNU AGPLv3+.
The source code of Software Heritage itself is available on our development forge.
The source code files archived by Software Heritage are available under their own copyright and licenses.
Terms of use: Archive access, API— Content policy— Contact— JavaScript license information— Web API