{"payload":{"pageCount":1,"repositories":[{"type":"Public","name":"ExplainableVQA","owner":"VQAssessment","isFork":false,"description":"[ACMMM Oral, 2023] \"Towards Explainable In-the-wild Video Quality Assessment: A Database and a Language-Prompted Approach\"","topicNames":["maxwell","explainable-ai","video-quality-assessment","blind-video-quality-assessment","technical-quality","aesthetic-quality","endeavor-project"],"topicsNotShown":0,"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":3,"starsCount":53,"forksCount":6,"license":"MIT License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-11-21T14:08:28.659Z"}},{"type":"Public","name":"BVQI","owner":"VQAssessment","isFork":false,"description":"[ICME 2023 Oral, Extended to TIP (UR)] The best zero-shot VQA approach that even outperforms several fully-supervised methods. ","topicNames":["quality-assessment","video-quality-assessment","blind-video-quality-assessment"],"topicsNotShown":0,"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":34,"forksCount":4,"license":"MIT License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-07-11T03:49:17.280Z"}}],"repositoryCount":2,"userInfo":null,"searchable":true,"definitions":[],"typeFilters":[{"id":"all","text":"All"},{"id":"public","text":"Public"},{"id":"source","text":"Sources"},{"id":"fork","text":"Forks"},{"id":"archived","text":"Archived"},{"id":"mirror","text":"Mirrors"},{"id":"template","text":"Templates"}],"compactMode":false},"title":"Repositories"}