{"payload":{"pageCount":2,"repositories":[{"type":"Public","name":"Accented-TTS-MLVAE-ADV","owner":"AMAAI-Lab","isFork":false,"description":"","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":0,"forksCount":0,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-03T06:07:31.826Z"}},{"type":"Public","name":"CM-HRNN","owner":"AMAAI-Lab","isFork":true,"description":"Hierarchical Recurrent Neural Networks for Conditional Melody Generation with Long-term Structure","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":0,"forksCount":2,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-31T14:52:00.892Z"}},{"type":"Public","name":"DisfluencySpeech","owner":"AMAAI-Lab","isFork":false,"description":"Resources for DisfluencySpeech","allTopics":[],"primaryLanguage":null,"pullRequestCount":0,"issueCount":0,"starsCount":0,"forksCount":0,"license":"MIT License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-30T12:04:05.635Z"}},{"type":"Public","name":"MidiCaps","owner":"AMAAI-Lab","isFork":false,"description":"A large-scale dataset of caption-annotated MIDI files. ","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":3,"forksCount":0,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-23T08:58:12.615Z"}},{"type":"Public","name":"Video2Music","owner":"AMAAI-Lab","isFork":false,"description":"Video2Music: Suitable Music Generation from Videos using an Affective Multimodal Transformer model","allTopics":["ai","deep-learning","music-generation","affective-computing","multimodal"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":1,"starsCount":126,"forksCount":18,"license":"MIT License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-04-20T04:31:43.984Z"}},{"type":"Public","name":"awesome-MER","owner":"AMAAI-Lab","isFork":false,"description":"A curated list of Datasets, Models and Papers for Music Emotion Recognition (MER)","allTopics":[],"primaryLanguage":null,"pullRequestCount":0,"issueCount":0,"starsCount":1,"forksCount":0,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-04-13T09:45:34.129Z"}},{"type":"Public","name":"mustango","owner":"AMAAI-Lab","isFork":false,"description":"Mustango: Toward Controllable Text-to-Music Generation","allTopics":["diffusion-models","text-to-audio","text-to-music","large-language-models"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":3,"starsCount":292,"forksCount":24,"license":"MIT License","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,86,8,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,4,0,0,0,0,0,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-03-26T06:26:23.683Z"}},{"type":"Public","name":"emotionweb","owner":"AMAAI-Lab","isFork":false,"description":"Website emotion guidance","allTopics":[],"primaryLanguage":{"name":"JavaScript","color":"#f1e05a"},"pullRequestCount":0,"issueCount":0,"starsCount":0,"forksCount":0,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-03-14T04:18:10.161Z"}},{"type":"Public","name":"genmusic_demo_list","owner":"AMAAI-Lab","isFork":true,"description":"a list of demo websites for automatic music generation research","allTopics":[],"primaryLanguage":null,"pullRequestCount":0,"issueCount":0,"starsCount":1,"forksCount":37,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-11-15T07:44:34.989Z"}},{"type":"Public","name":"ai-audio-datasets-list","owner":"AMAAI-Lab","isFork":true,"description":"This is a list of datasets consisting of speech, music, and sound effects, which can provide training data for Generative AI, AIGC, AI model training, intelligent audio tool development, and audio applications. It is mainly used for speech recognition, speech synthesis, singing voice synthesis, music information retrieval, music generation, etc.","allTopics":[],"primaryLanguage":null,"pullRequestCount":0,"issueCount":0,"starsCount":1,"forksCount":27,"license":"MIT License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-10-31T05:15:14.607Z"}},{"type":"Public","name":"kylo-ren-app","owner":"AMAAI-Lab","isFork":true,"description":"Web interface for AI music generation models","allTopics":[],"primaryLanguage":{"name":"JavaScript","color":"#f1e05a"},"pullRequestCount":0,"issueCount":0,"starsCount":0,"forksCount":2,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-10-19T04:03:07.538Z"}},{"type":"Public","name":"PreBit","owner":"AMAAI-Lab","isFork":false,"description":"This is the repo accompanying the paper: \"A multimodal model with Twitter FinBERT embeddings for extreme price movement prediction of Bitcoin\"","allTopics":[],"primaryLanguage":{"name":"Jupyter Notebook","color":"#DA5B0B"},"pullRequestCount":0,"issueCount":0,"starsCount":3,"forksCount":4,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-10-19T03:55:26.466Z"}},{"type":"Public","name":"singapore-music-classifier","owner":"AMAAI-Lab","isFork":false,"description":"Code for paper A dataset and classification model for Malay, Hindi, Tamil and Chinese music","allTopics":["music","classification","singapore","ismir"],"primaryLanguage":{"name":"Jupyter Notebook","color":"#DA5B0B"},"pullRequestCount":0,"issueCount":0,"starsCount":1,"forksCount":0,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-10-19T03:49:05.932Z"}},{"type":"Public","name":"FundamentalMusicEmbedding","owner":"AMAAI-Lab","isFork":true,"description":"Fundamental Music Embedding, FME","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":0,"forksCount":5,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-10-16T12:39:59.666Z"}},{"type":"Public","name":"MuVi","owner":"AMAAI-Lab","isFork":false,"description":"Predicting emotion from music videos: exploring the relative contribution of visual and auditory information on affective responses","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":13,"forksCount":0,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-10-03T13:21:22.284Z"}},{"type":"Public","name":"nnAudio","owner":"AMAAI-Lab","isFork":true,"description":"Audio processing by using pytorch 1D convolution network","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":0,"forksCount":90,"license":"MIT License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-09-04T22:14:55.853Z"}},{"type":"Public","name":"ReconVAT","owner":"AMAAI-Lab","isFork":true,"description":"ReconVAT: a semi-supervised automatic music transcription (AMT) model ","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":0,"forksCount":4,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-08-29T22:44:33.476Z"}},{"type":"Public","name":"AudioLoader","owner":"AMAAI-Lab","isFork":true,"description":"PyTorch Dataset for Speech and Music audio","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":0,"forksCount":10,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-08-18T11:32:25.630Z"}},{"type":"Public","name":"Jointist","owner":"AMAAI-Lab","isFork":true,"description":"Official Implementation of Jointist","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":0,"forksCount":1,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-07-26T00:31:00.330Z"}},{"type":"Public","name":"demucs_lightning","owner":"AMAAI-Lab","isFork":true,"description":"Demucs Lightning: A PyTorch lightning version of Demucs with Hydra and Tensorboard features","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":0,"forksCount":10,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-05-03T06:28:50.706Z"}},{"type":"Public","name":"DiffRoll","owner":"AMAAI-Lab","isFork":true,"description":"PyTorch implementation of DiffRoll, a diffusion-based generative automatic music transcription (AMT) model","allTopics":[],"primaryLanguage":{"name":"Jupyter Notebook","color":"#DA5B0B"},"pullRequestCount":0,"issueCount":0,"starsCount":0,"forksCount":9,"license":"MIT License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-04-24T09:07:46.699Z"}},{"type":"Public","name":"IJCNN2020_music_emotion","owner":"AMAAI-Lab","isFork":true,"description":"Regression-based Music Emotion Prediction using Triplet Neural Networks","allTopics":[],"primaryLanguage":{"name":"Jupyter Notebook","color":"#DA5B0B"},"pullRequestCount":0,"issueCount":0,"starsCount":0,"forksCount":5,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-03-24T22:10:17.012Z"}},{"type":"Public","name":"LeadSheetGen_Valence","owner":"AMAAI-Lab","isFork":true,"description":"A novel seq2seq framework where high-level musicalities (such us the valence of the chord progression) are fed to the Encoder, and they are \"translated\" to lead sheet events in the Decoder. For further details please read and cite our paper:","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":1,"issueCount":0,"starsCount":0,"forksCount":3,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-01-04T01:45:47.232Z"}},{"type":"Public","name":"Conditional-Drums-Generation-using-Compound-Word-Representations","owner":"AMAAI-Lab","isFork":true,"description":"Conditional Drums Generation using Compound Word Representations","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":0,"forksCount":4,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2022-07-29T16:12:04.073Z"}},{"type":"Public","name":"MusIAC","owner":"AMAAI-Lab","isFork":true,"description":"music inpainting control","allTopics":[],"primaryLanguage":{"name":"Jupyter Notebook","color":"#DA5B0B"},"pullRequestCount":0,"issueCount":0,"starsCount":0,"forksCount":1,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2022-02-08T15:17:11.114Z"}},{"type":"Public","name":"AMAAI-guidebook","owner":"AMAAI-Lab","isFork":false,"description":"","allTopics":[],"primaryLanguage":{"name":"TeX","color":"#3D6117"},"pullRequestCount":0,"issueCount":0,"starsCount":0,"forksCount":1,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2022-01-21T06:05:14.541Z"}},{"type":"Public","name":"HEAR_2021_NeurIPS_Challenge_SUTD_AMAAI","owner":"AMAAI-Lab","isFork":false,"description":"","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":0,"forksCount":0,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2021-10-31T09:27:27.997Z"}},{"type":"Public","name":"datasets_emotion","owner":"AMAAI-Lab","isFork":true,"description":"This repository collects information about different data sets for Music Emotion Recognition.","allTopics":[],"primaryLanguage":null,"pullRequestCount":0,"issueCount":0,"starsCount":0,"forksCount":23,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2021-09-08T16:18:57.298Z"}},{"type":"Public","name":"nnAudio-hear","owner":"AMAAI-Lab","isFork":false,"description":"","allTopics":[],"primaryLanguage":null,"pullRequestCount":0,"issueCount":0,"starsCount":0,"forksCount":0,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2021-06-07T03:51:47.783Z"}},{"type":"Public","name":"AMAAI-Lab.github.io","owner":"AMAAI-Lab","isFork":true,"description":"Allan Lab website","allTopics":[],"primaryLanguage":{"name":"HTML","color":"#e34c26"},"pullRequestCount":0,"issueCount":0,"starsCount":0,"forksCount":1107,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2021-04-29T06:27:29.846Z"}}],"repositoryCount":34,"userInfo":null,"searchable":true,"definitions":[],"typeFilters":[{"id":"all","text":"All"},{"id":"public","text":"Public"},{"id":"source","text":"Sources"},{"id":"fork","text":"Forks"},{"id":"archived","text":"Archived"},{"id":"template","text":"Templates"}],"compactMode":false},"title":"Repositories"}