{"payload":{"pageCount":3,"repositories":[{"type":"Public","name":"megfile","owner":"megvii-research","isFork":false,"description":"Megvii FILE Library - Working with Files in Python same as the standard library","topicNames":["python","streaming","oss","s3","sftp","file","hdfs"],"topicsNotShown":0,"allTopics":["python","streaming","oss","s3","sftp","file","hdfs"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":2,"issueCount":5,"starsCount":110,"forksCount":15,"license":"Apache License 2.0","participation":[0,0,10,1,4,10,4,3,3,8,3,7,4,2,4,5,11,0,0,5,9,5,7,3,1,0,2,0,2,0,0,3,0,1,5,0,0,0,0,0,2,0,0,0,0,1,1,5,0,3,3,1],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-24T07:55:56.587Z"}},{"type":"Public","name":"NAFNet","owner":"megvii-research","isFork":false,"description":"The state-of-the-art image restoration model without nonlinear activation functions.","topicNames":["image-denoising","image-restoration","image-deblurring","denoise","low-level-vision","deblur","eccv2022","stereo-super-resolution","pytorch"],"topicsNotShown":0,"allTopics":["image-denoising","image-restoration","image-deblurring","denoise","low-level-vision","deblur","eccv2022","stereo-super-resolution","pytorch"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":3,"issueCount":93,"starsCount":2032,"forksCount":250,"license":"Other","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-04-22T18:08:24.747Z"}},{"type":"Public","name":"DRConv","owner":"megvii-research","isFork":false,"description":"","topicNames":[],"topicsNotShown":0,"allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":1,"forksCount":1,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-01-15T12:50:00.134Z"}},{"type":"Public","name":"SCSC","owner":"megvii-research","isFork":false,"description":"","topicNames":[],"topicsNotShown":0,"allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":0,"forksCount":1,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-01-15T12:47:03.397Z"}},{"type":"Public","name":"MOTR","owner":"megvii-research","isFork":false,"description":"[ECCV2022] MOTR: End-to-End Multiple-Object Tracking with TRansformer","topicNames":["end-to-end","transformer","multi-object-tracking","pytorch"],"topicsNotShown":0,"allTopics":["end-to-end","transformer","multi-object-tracking","pytorch"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":1,"issueCount":28,"starsCount":560,"forksCount":88,"license":"Other","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-01-15T07:10:16.176Z"}},{"type":"Public","name":"Sparsebit","owner":"megvii-research","isFork":false,"description":"A model compression and acceleration toolbox based on pytorch.","topicNames":["deep-learning","sparse","pruning","quantization","tensorrt","quantization-aware-training","post-training-quantization"],"topicsNotShown":0,"allTopics":["deep-learning","sparse","pruning","quantization","tensorrt","quantization-aware-training","post-training-quantization"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":13,"issueCount":8,"starsCount":321,"forksCount":39,"license":"Apache License 2.0","participation":[0,0,0,0,0,0,0,0,0,8,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-01-12T01:08:59.595Z"}},{"type":"Public","name":"PMN","owner":"megvii-research","isFork":false,"description":"[TPAMI 2023 / ACMMM 2022 Best Paper Runner-Up Award] Learnability Enhancement for Low-light Raw Denoising: Where Paired Real Data Meets Noise Modeling (a Data Perspective)","topicNames":["pytorch","raw","data-augmentation","denoising","low-light","paired-data","noise-modeling"],"topicsNotShown":0,"allTopics":["pytorch","raw","data-augmentation","denoising","low-light","paired-data","noise-modeling"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":120,"forksCount":14,"license":"Apache License 2.0","participation":[0,0,0,0,0,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-12-14T13:05:20.049Z"}},{"type":"Public","name":"protoclip","owner":"megvii-research","isFork":false,"description":"📍 Official pytorch implementation of paper \"ProtoCLIP: Prototypical Contrastive Language Image Pretraining\" (IEEE TNNLS)","topicNames":["self-supervised-learning","contrastive-learning","vision-language-pretraining"],"topicsNotShown":0,"allTopics":["self-supervised-learning","contrastive-learning","vision-language-pretraining"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":43,"forksCount":0,"license":"Other","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-11-08T02:41:38.543Z"}},{"type":"Public","name":"mdistiller","owner":"megvii-research","isFork":false,"description":"The official implementation of [CVPR2022] Decoupled Knowledge Distillation https://arxiv.org/abs/2203.08679 and [ICCV2023] DOT: A Distillation-Oriented Trainer https://openaccess.thecvf.com/content/ICCV2023/papers/Zhao_DOT_A_Distillation-Oriented_Trainer_ICCV_2023_paper.pdf","topicNames":["computer-vision","deep-learning","pytorch","imagenet","coco","cifar","knowledge-distillation","cvpr2022","iccv2023"],"topicsNotShown":0,"allTopics":["computer-vision","deep-learning","pytorch","imagenet","coco","cifar","knowledge-distillation","cvpr2022","iccv2023"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":20,"starsCount":750,"forksCount":114,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-11-05T09:23:59.509Z"}},{"type":"Public","name":"PETR","owner":"megvii-research","isFork":false,"description":"[ECCV2022] PETR: Position Embedding Transformation for Multi-View 3D Object Detection & [ICCV2023] PETRv2: A Unified Framework for 3D Perception from Multi-Camera Images","topicNames":["segmentation","object-detection","multi-task-learning","multi-camera","3d-position-embedding"],"topicsNotShown":0,"allTopics":["segmentation","object-detection","multi-task-learning","multi-camera","3d-position-embedding"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":70,"starsCount":791,"forksCount":122,"license":"Other","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-10-11T05:59:40.574Z"}},{"type":"Public","name":"RevCol","owner":"megvii-research","isFork":false,"description":"Official Code of Paper \"Reversible Column Networks\" \"RevColv2\"","topicNames":["computer-vision","cnn","transformer","vit","mae","iclr2023","pytorch"],"topicsNotShown":0,"allTopics":["computer-vision","cnn","transformer","vit","mae","iclr2023","pytorch"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":5,"starsCount":244,"forksCount":10,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-09-06T03:46:38.358Z"}},{"type":"Public","name":"AAAI2023-PVD","owner":"megvii-research","isFork":false,"description":"Official Implementation of PVD and PVDAL: http://sk-fun.fun/PVD-AL/","topicNames":["computer-vision","nerf","3d","3d-reconstruction","3d-graphics","rendering-3d-volumes","nerfs","neural-rendering","neuralradiance-fields"],"topicsNotShown":0,"allTopics":["computer-vision","nerf","3d","3d-reconstruction","3d-graphics","rendering-3d-volumes","nerfs","neural-rendering","neuralradiance-fields"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":182,"forksCount":5,"license":"MIT License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-08-08T11:56:00.121Z"}},{"type":"Public","name":"TPS-CVPR2023","owner":"megvii-research","isFork":false,"description":"","topicNames":[],"topicsNotShown":0,"allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":3,"starsCount":38,"forksCount":0,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-08-07T12:16:35.052Z"}},{"type":"Public","name":"CVPR2023-UniDistill","owner":"megvii-research","isFork":false,"description":"CVPR2023 (highlight) - UniDistill: A Universal Cross-Modality Knowledge Distillation Framework for 3D Object Detection in Bird's-Eye View","topicNames":[],"topicsNotShown":0,"allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":8,"starsCount":99,"forksCount":10,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-08-05T04:06:37.762Z"}},{"type":"Public","name":"video_analyst","owner":"megvii-research","isFork":false,"description":"A series of basic algorithms that are useful for video understanding, including Single Object Tracking (SOT), Video Object Segmentation (VOS) and so on.","topicNames":["video","segmentation","sot","vos"],"topicsNotShown":0,"allTopics":["video","segmentation","sot","vos"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":10,"issueCount":20,"starsCount":814,"forksCount":175,"license":"MIT License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-08-03T16:40:06.843Z"}},{"type":"Public","name":"IntLLaMA","owner":"megvii-research","isFork":false,"description":"IntLLaMA: A fast and light quantization solution for LLaMA","topicNames":["llama","quantization","llms"],"topicsNotShown":0,"allTopics":["llama","quantization","llms"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":2,"issueCount":0,"starsCount":21,"forksCount":0,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-07-21T04:49:46.936Z"}},{"type":"Public","name":"FullMatch","owner":"megvii-research","isFork":false,"description":"Official implementation of FullMatch (CVPR2023)","topicNames":[],"topicsNotShown":0,"allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":2,"starsCount":38,"forksCount":2,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-06-14T06:53:54.418Z"}},{"type":"Public","name":"TLC","owner":"megvii-research","isFork":false,"description":"Test-time Local Converter","topicNames":["deep-learning","neural-network"],"topicsNotShown":0,"allTopics":["deep-learning","neural-network"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":23,"starsCount":217,"forksCount":10,"license":"Other","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-06-09T03:06:51.433Z"}},{"type":"Public","name":"DCLS-SR","owner":"megvii-research","isFork":false,"description":"Official PyTorch implementation of the paper \"Deep Constrained Least Squares for Blind Image Super-Resolution\", CVPR 2022.","topicNames":["deblurring","blind-super-resolution","deep-learning","pytorch","super-resolution"],"topicsNotShown":0,"allTopics":["deblurring","blind-super-resolution","deep-learning","pytorch","super-resolution"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":34,"starsCount":216,"forksCount":18,"license":"MIT License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-05-13T19:02:25.311Z"}},{"type":"Public","name":"SimpleDG","owner":"megvii-research","isFork":false,"description":"This is the training and test code for ECCV2022 workshop NICO challenge","topicNames":["domain-generalization"],"topicsNotShown":0,"allTopics":["domain-generalization"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":7,"forksCount":0,"license":"MIT License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-04-26T02:58:19.601Z"}},{"type":"Public","name":"FQ-ViT","owner":"megvii-research","isFork":false,"description":"[IJCAI 2022] FQ-ViT: Post-Training Quantization for Fully Quantized Vision Transformer","topicNames":["imagenet","quantization","post-training-quantization","vision-transformer","pytorch"],"topicsNotShown":0,"allTopics":["imagenet","quantization","post-training-quantization","vision-transformer","pytorch"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":8,"starsCount":281,"forksCount":46,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-04-11T06:18:11.413Z"}},{"type":"Public","name":"OccDepth","owner":"megvii-research","isFork":false,"description":"Maybe the first academic open work on stereo 3D SSC method with vision-only input.","topicNames":["occupancy","stereo-camera","camera-based","semantic-scene-completion"],"topicsNotShown":0,"allTopics":["occupancy","stereo-camera","camera-based","semantic-scene-completion"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":9,"starsCount":266,"forksCount":23,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-04-11T06:17:59.340Z"}},{"type":"Public","name":"LBHomo","owner":"megvii-research","isFork":false,"description":"This is the official PyTorch implementation of Semi-supervised Deep Large-baseline Homography Estimation with Progressive Equivalence Constraint, AAAI 2023","topicNames":[],"topicsNotShown":0,"allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":8,"starsCount":14,"forksCount":3,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-03-19T01:45:03.336Z"}},{"type":"Public","name":"DVN","owner":"megvii-research","isFork":false,"description":"","topicNames":[],"topicsNotShown":0,"allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":5,"starsCount":12,"forksCount":2,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-03-16T03:37:28.664Z"}},{"type":"Public","name":"US3L-CVPR2023","owner":"megvii-research","isFork":false,"description":"PyTorch implementation of US3L (Accepted to CVPR2023)","topicNames":["pytorch","pruning","self-supervised-learning","universally-slimmable"],"topicsNotShown":0,"allTopics":["pytorch","pruning","self-supervised-learning","universally-slimmable"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":32,"forksCount":1,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-03-15T12:13:53.955Z"}},{"type":"Public","name":"SSQL-ECCV2022","owner":"megvii-research","isFork":false,"description":"PyTorch implementation of SSQL (Accepted to ECCV2022 oral presentation)","topicNames":["quantization","deep-learning","pytorch","self-supervised-learning"],"topicsNotShown":0,"allTopics":["quantization","deep-learning","pytorch","self-supervised-learning"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":1,"starsCount":75,"forksCount":6,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-03-15T01:57:14.815Z"}},{"type":"Public","name":"basecls","owner":"megvii-research","isFork":false,"description":"A codebase & model zoo for pretrained backbone based on MegEngine.","topicNames":["classification","pretrained-models","imagenet-classifier","distributed-training","megengine"],"topicsNotShown":0,"allTopics":["classification","pretrained-models","imagenet-classifier","distributed-training","megengine"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":9,"issueCount":0,"starsCount":32,"forksCount":3,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-03-06T23:35:52.922Z"}},{"type":"Public","name":"MOTRv2","owner":"megvii-research","isFork":false,"description":"[CVPR2023] MOTRv2: Bootstrapping End-to-End Multi-Object Tracking by Pretrained Object Detectors","topicNames":["end-to-end","transformer","multi-object-tracking","pytorch"],"topicsNotShown":0,"allTopics":["end-to-end","transformer","multi-object-tracking","pytorch"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":42,"starsCount":335,"forksCount":45,"license":"Other","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-02-28T02:18:21.426Z"}},{"type":"Public","name":"CREStereo","owner":"megvii-research","isFork":false,"description":"Official MegEngine implementation of CREStereo(CVPR 2022 Oral).","topicNames":["computer-vision","stereo","cvpr","stereo-vision","stereo-matching","megengine","deep-learning","dataset"],"topicsNotShown":0,"allTopics":["computer-vision","stereo","cvpr","stereo-vision","stereo-matching","megengine","deep-learning","dataset"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":2,"issueCount":42,"starsCount":450,"forksCount":56,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-02-10T06:58:10.078Z"}},{"type":"Public","name":"TransMVSNet","owner":"megvii-research","isFork":false,"description":"(CVPR 2022) TransMVSNet: Global Context-aware Multi-view Stereo Network with Transformers.","topicNames":["transformer","multi-view-stereo"],"topicsNotShown":0,"allTopics":["transformer","multi-view-stereo"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":1,"issueCount":23,"starsCount":261,"forksCount":25,"license":"MIT License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-02-06T12:25:40.310Z"}}],"repositoryCount":74,"userInfo":null,"searchable":true,"definitions":[],"typeFilters":[{"id":"all","text":"All"},{"id":"public","text":"Public"},{"id":"source","text":"Sources"},{"id":"fork","text":"Forks"},{"id":"archived","text":"Archived"},{"id":"template","text":"Templates"}],"compactMode":false},"title":"Repositories"}