{"payload":{"pageCount":1,"repositories":[{"type":"Public","name":"open_lm","owner":"mlfoundations","isFork":false,"description":"A repository for research on medium sized language models.","topicNames":[],"topicsNotShown":0,"allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":27,"issueCount":26,"starsCount":316,"forksCount":40,"license":"MIT License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-24T04:54:43.930Z"}},{"type":"Public","name":"open_flamingo","owner":"mlfoundations","isFork":false,"description":"An open-source framework for training large multimodal models.","topicNames":["computer-vision","language-model","flamingo","multimodal-learning","in-context-learning","deep-learning","pytorch"],"topicsNotShown":0,"allTopics":["computer-vision","language-model","flamingo","multimodal-learning","in-context-learning","deep-learning","pytorch"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":5,"issueCount":37,"starsCount":3504,"forksCount":263,"license":"MIT License","participation":[0,4,0,0,4,8,1,0,1,5,6,1,2,4,0,0,0,1,1,4,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-09T22:45:23.880Z"}},{"type":"Public","name":"dataset2metadata","owner":"mlfoundations","isFork":false,"description":"","topicNames":[],"topicsNotShown":0,"allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":17,"forksCount":6,"license":"MIT License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-03-21T20:52:35.500Z"}},{"type":"Public","name":"tableshift","owner":"mlfoundations","isFork":false,"description":"A benchmark for distribution shift in tabular data","topicNames":[],"topicsNotShown":0,"allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":1,"issueCount":8,"starsCount":28,"forksCount":5,"license":"MIT License","participation":[0,12,2,0,0,0,0,0,0,0,10,8,21,4,0,0,0,0,0,0,0,0,0,0,0,2,0,0,3,4,4,5,5,1,3,1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-02-14T05:38:17.871Z"}},{"type":"Public","name":"task_vectors","owner":"mlfoundations","isFork":false,"description":"Editing Models with Task Arithmetic","topicNames":[],"topicsNotShown":0,"allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":4,"starsCount":357,"forksCount":27,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-01-11T17:11:00.311Z"}},{"type":"Public","name":"datacomp","owner":"mlfoundations","isFork":false,"description":"DataComp: In search of the next generation of multimodal datasets","topicNames":[],"topicsNotShown":0,"allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":3,"issueCount":17,"starsCount":557,"forksCount":46,"license":"Other","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-01-02T01:55:13.446Z"}},{"type":"Public","name":"VisIT-Bench","owner":"mlfoundations","isFork":false,"description":"","topicNames":[],"topicsNotShown":0,"allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":44,"forksCount":2,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-10-29T09:43:35.583Z"}},{"type":"Public","name":"patching","owner":"mlfoundations","isFork":false,"description":"Patching open-vocabulary models by interpolating weights","topicNames":[],"topicsNotShown":0,"allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":83,"forksCount":7,"license":"MIT License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-09-28T13:11:02.427Z"}},{"type":"Public","name":"imagenet-applications-transfer","owner":"mlfoundations","isFork":false,"description":"","topicNames":[],"topicsNotShown":0,"allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":2,"forksCount":2,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-08-22T05:21:35.843Z"}},{"type":"Public","name":"llm-foundry","owner":"mlfoundations","isFork":true,"description":"LLM training code for MosaicML foundation models","topicNames":[],"topicsNotShown":0,"allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":1,"forksCount":485,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-08-10T16:58:13.480Z"}},{"type":"Public","name":"open-diffusion","owner":"mlfoundations","isFork":false,"description":"Simple large-scale training of stable diffusion with multi-node support.","topicNames":[],"topicsNotShown":0,"allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":2,"starsCount":113,"forksCount":6,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-05-08T23:15:52.900Z"}},{"type":"Public","name":"webdataset-resharder","owner":"mlfoundations","isFork":false,"description":"Efficiently process webdatasets","topicNames":["webdataset","webdataset-format"],"topicsNotShown":0,"allTopics":["webdataset","webdataset-format"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":1,"starsCount":4,"forksCount":0,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-04-05T07:55:47.107Z"}},{"type":"Public","name":"clip_quality_not_quantity","owner":"mlfoundations","isFork":false,"description":"","topicNames":[],"topicsNotShown":0,"allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":24,"forksCount":1,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2022-10-18T23:45:16.554Z"}},{"type":"Public","name":"model-soups","owner":"mlfoundations","isFork":false,"description":"Model soups: averaging weights of multiple fine-tuned models improves accuracy without increasing inference time","topicNames":[],"topicsNotShown":0,"allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":8,"starsCount":377,"forksCount":35,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2022-08-28T02:44:16.676Z"}},{"type":"Public","name":"wise-ft","owner":"mlfoundations","isFork":false,"description":"Robust fine-tuning of zero-shot models","topicNames":[],"topicsNotShown":0,"allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":5,"starsCount":575,"forksCount":59,"license":"Other","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2022-04-29T20:54:27.937Z"}}],"repositoryCount":15,"userInfo":null,"searchable":true,"definitions":[],"typeFilters":[{"id":"all","text":"All"},{"id":"public","text":"Public"},{"id":"source","text":"Sources"},{"id":"fork","text":"Forks"},{"id":"archived","text":"Archived"},{"id":"template","text":"Templates"}],"compactMode":false},"title":"Repositories"}