{"payload":{"pageCount":1,"repositories":[{"type":"Public","name":"tokenize-anything","owner":"baaivision","isFork":false,"description":"Tokenize Anything via Prompting","allTopics":["representation-learning","multimodal","foundation-models","promptable"],"primaryLanguage":{"name":"Jupyter Notebook","color":"#DA5B0B"},"pullRequestCount":0,"issueCount":6,"starsCount":455,"forksCount":16,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-28T03:15:40.410Z"}},{"type":"Public","name":"Emu","owner":"baaivision","isFork":false,"description":"Emu Series: Generative Multimodal Models from BAAI","allTopics":["foundation-models","in-context-learning","multimodal-pretraining","instruct-tuning","multimodal-generalist","generative-pretraining-in-multimodality"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":38,"starsCount":1520,"forksCount":79,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-03-08T07:09:57.165Z"}},{"type":"Public","name":"EVA","owner":"baaivision","isFork":false,"description":"EVA Series: Visual Representation Fantasies from BAAI","allTopics":["representation-learning","vision-transformer","foundation-models"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":1,"issueCount":59,"starsCount":2015,"forksCount":146,"license":"MIT License","participation":[0,7,0,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,0,0,2,1,0,0,0,0,0,0,0,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-03-08T06:28:16.967Z"}},{"type":"Public","name":"CapsFusion","owner":"baaivision","isFork":false,"description":"[CVPR 2024] CapsFusion: Rethinking Image-Text Data at Scale","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":5,"starsCount":180,"forksCount":4,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-02-27T11:43:43.509Z"}},{"type":"Public","name":"GeoDream","owner":"baaivision","isFork":false,"description":"GeoDream: Disentangling 2D and Geometric Priors for High-Fidelity and Consistent 3D Generation","allTopics":["3d","3d-generation","text-to-3d","3d-aigc"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":2,"issueCount":3,"starsCount":560,"forksCount":12,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-01-22T02:11:45.574Z"}},{"type":"Public","name":"Uni3D","owner":"baaivision","isFork":false,"description":"[ICLR'24 Spotlight] Uni3D: 3D Visual Representation from BAAI","allTopics":["3d-representation-learning","vision-transformers","foundation-models"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":13,"starsCount":410,"forksCount":23,"license":"MIT License","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,1,0,0,0,1,0,2,0,4,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-01-17T06:37:34.243Z"}},{"type":"Public","name":"emu2","owner":"baaivision","isFork":false,"description":"","allTopics":[],"primaryLanguage":{"name":"JavaScript","color":"#f1e05a"},"pullRequestCount":0,"issueCount":0,"starsCount":4,"forksCount":0,"license":null,"participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,11,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-12-21T12:29:29.574Z"}},{"type":"Public","name":"JudgeLM","owner":"baaivision","isFork":false,"description":"An open-sourced LLM judge for evaluating LLM-generated answers.","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":6,"starsCount":268,"forksCount":18,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-11-02T11:39:13.737Z"}},{"type":"Public","name":"Painter","owner":"baaivision","isFork":false,"description":"Painter & SegGPT Series: Vision Foundation Models from BAAI","allTopics":["in-context-learning","cvpr2023","generalist-model","in-context-visual-learning","generalist-painter","seggpt","segmentation-foundation-model"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":5,"issueCount":49,"starsCount":2442,"forksCount":159,"license":"MIT License","participation":[0,0,0,0,2,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-10-31T10:22:54.367Z"}},{"type":"Public","name":"vid2vid-zero","owner":"baaivision","isFork":false,"description":"Zero-Shot Video Editing Using Off-The-Shelf Image Diffusion Models","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":8,"starsCount":326,"forksCount":22,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-07-04T08:25:45.385Z"}},{"type":"Public","name":"MUSE-Pytorch","owner":"baaivision","isFork":false,"description":"An in-context conditioning version of MUSE with pre-trained checkpoints.","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":5,"starsCount":94,"forksCount":2,"license":"MIT License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-06-04T08:27:46.672Z"}}],"repositoryCount":11,"userInfo":null,"searchable":true,"definitions":[],"typeFilters":[{"id":"all","text":"All"},{"id":"public","text":"Public"},{"id":"source","text":"Sources"},{"id":"fork","text":"Forks"},{"id":"archived","text":"Archived"},{"id":"template","text":"Templates"}],"compactMode":false},"title":"Repositories"}