{"payload":{"pageCount":2,"repositories":[{"type":"Public","name":"SparseOcc","owner":"MCG-NJU","isFork":false,"description":"Fully Sparse 3D Occupancy Prediction & RayIoU Evaluation Metric","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":7,"starsCount":108,"forksCount":11,"license":"Apache License 2.0","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,3,4,2,0,0,0,0,3],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-31T00:56:33.609Z"}},{"type":"Public","name":"MOTIP","owner":"MCG-NJU","isFork":false,"description":"Multiple Object Tracking as ID Prediction","allTopics":["tracking","computer","multi-object-tracking","multiple-object-tracking"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":3,"starsCount":46,"forksCount":5,"license":"Apache License 2.0","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,30,0,6,0,6,2,13,1,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-15T16:05:40.066Z"}},{"type":"Public","name":"MeMOTR","owner":"MCG-NJU","isFork":false,"description":"[ICCV 2023] MeMOTR: Long-Term Memory-Augmented Transformer for Multi-Object Tracking","allTopics":["tracking","computer-vision","deep-learning","multi-object-tracking"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":3,"starsCount":129,"forksCount":5,"license":"MIT License","participation":[0,0,0,0,0,0,0,0,1,21,0,5,0,2,2,0,0,0,2,5,1,0,8,3,1,4,0,8,1,5,0,1,0,0,4,2,0,0,1,0,0,2,0,0,0,0,1,0,1,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-09T09:05:12.410Z"}},{"type":"Public","name":"MixFormerV2","owner":"MCG-NJU","isFork":false,"description":"[NeurIPS 2023] MixFormerV2: Efficient Fully Transformer Tracking","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":22,"starsCount":122,"forksCount":18,"license":"MIT License","participation":[1,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,5,6,0,0,0,0,1,1,3,0,0,0,0,1,0,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-04-20T11:16:56.913Z"}},{"type":"Public","name":"BIVDiff","owner":"MCG-NJU","isFork":false,"description":"[CVPR 2024] BIVDiff: A Training-free Framework for General-Purpose Video Synthesis via Bridging Image and Video Diffusion Models","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":26,"forksCount":0,"license":null,"participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,5,2,0,0,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-04-16T03:46:53.656Z"}},{"type":"Public","name":"SGM-VFI","owner":"MCG-NJU","isFork":false,"description":"[CVPR 2024] Sparse Global Matching for Video Frame Interpolation with Large Motion","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":34,"forksCount":4,"license":null,"participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,8,2,0,0,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-04-15T13:55:14.546Z"}},{"type":"Public","name":"LogN","owner":"MCG-NJU","isFork":false,"description":"This repo is an official implementation of our IJCV paper: Logit Normalization for Long-Tail Object Detection, which was published in 08 January 2024.","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":2,"forksCount":0,"license":"Apache License 2.0","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-04-14T06:32:46.298Z"}},{"type":"Public","name":"SparseBEV","owner":"MCG-NJU","isFork":false,"description":"[ICCV 2023] SparseBEV: High-Performance Sparse 3D Object Detection from Multi-Camera Videos","allTopics":["transformer","autonomous-driving","3d-object-detection","bev-perception"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":14,"starsCount":290,"forksCount":19,"license":"MIT License","participation":[0,0,0,0,0,0,0,0,1,0,0,1,2,4,0,1,3,0,0,0,1,0,0,1,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-03-31T16:01:21.015Z"}},{"type":"Public","name":"CoMAE","owner":"MCG-NJU","isFork":false,"description":"[AAAI 2023] CoMAE: Single Model Hybrid Pre-training on Small-Scale RGB-D Datasets","allTopics":["rgb-d","pre-training","aaai2023"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":3,"starsCount":30,"forksCount":2,"license":null,"participation":[0,0,0,0,0,0,0,0,0,0,0,2,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-03-28T03:51:26.023Z"}},{"type":"Public","name":"MixFormer","owner":"MCG-NJU","isFork":false,"description":"[CVPR 2022 Oral & TPAMI 2024] MixFormer: End-to-End Tracking with Iterative Mixed Attention ","allTopics":["tracking","vot","cvpr2022"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":38,"starsCount":429,"forksCount":73,"license":"MIT License","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-02-28T09:50:34.314Z"}},{"type":"Public","name":"VLG","owner":"MCG-NJU","isFork":false,"description":"VLG: General Video Recognition with Web Textual Knowledge (https://arxiv.org/abs/2212.01638)","allTopics":["action-recognition","few-shot-recognition","open-set-recognition","long-tailed-recognition","video-language"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":8,"forksCount":0,"license":null,"participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-01-18T07:23:38.330Z"}},{"type":"Public","name":"StageInteractor","owner":"MCG-NJU","isFork":false,"description":"[ICCV 2023] StageInteractor: Query-based Object Detector with Cross-stage Interaction","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":9,"forksCount":1,"license":"Apache License 2.0","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-12-22T13:48:13.179Z"}},{"type":"Public","name":"VideoMAE","owner":"MCG-NJU","isFork":false,"description":"[NeurIPS 2022 Spotlight] VideoMAE: Masked Autoencoders are Data-Efficient Learners for Self-Supervised Video Pre-Training","allTopics":["transformer","video-understanding","mae","video-analysis","video-representation-learning","self-supervised-learning","masked-autoencoder","vision-transformer","video-transformer","neurips-2022","pytorch","action-recognition"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":41,"starsCount":1239,"forksCount":122,"license":"Other","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-12-08T13:44:48.183Z"}},{"type":"Public","name":"DGN","owner":"MCG-NJU","isFork":false,"description":"[IJCV 2023] Dual Graph Networks for Pose Estimation in Crowded Scenes","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":7,"forksCount":1,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-12-05T07:27:30.896Z"}},{"type":"Public","name":"PointTAD","owner":"MCG-NJU","isFork":false,"description":"[NeurIPS 2022] PointTAD: Multi-Label Temporal Action Detection with Learnable Query Points","allTopics":["video-understanding","action-detection","temporal-action-detection","neurips-2022"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":4,"starsCount":36,"forksCount":1,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-11-24T07:53:22.838Z"}},{"type":"Public","name":"DEQDet","owner":"MCG-NJU","isFork":false,"description":"[ICCV 2023] Deep Equilibrium Object Detection","allTopics":[],"primaryLanguage":{"name":"Jupyter Notebook","color":"#DA5B0B"},"pullRequestCount":0,"issueCount":1,"starsCount":20,"forksCount":1,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-11-14T17:17:18.948Z"}},{"type":"Public","name":"CamLiFlow","owner":"MCG-NJU","isFork":false,"description":"[CVPR 2022 Oral & TPAMI 2023] Learning Optical Flow and Scene Flow with Bidirectional Camera-LiDAR Fusion","allTopics":["point-cloud","optical-flow","multimodal","scene-flow","cvpr2022"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":208,"forksCount":20,"license":null,"participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-11-05T02:38:26.276Z"}},{"type":"Public","name":"MGMAE","owner":"MCG-NJU","isFork":false,"description":"[ICCV 2023] MGMAE: Motion Guided Masking for Video Masked Autoencoding","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":2,"starsCount":17,"forksCount":0,"license":"MIT License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-10-16T06:54:18.255Z"}},{"type":"Public","name":"EVAD","owner":"MCG-NJU","isFork":false,"description":"[ICCV 2023] Efficient Video Action Detection with Token Dropout and Context Refinement","allTopics":["pytorch","transformer","action-recognition","action-detection","efficient-transformers"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":4,"starsCount":19,"forksCount":3,"license":"Other","participation":[0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-09-27T08:30:23.710Z"}},{"type":"Public","name":"PDPP","owner":"MCG-NJU","isFork":false,"description":"[CVPR 2023 Hightlight] PDPP: Projected Diffusion for Procedure Planning in Instructional Videos","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":4,"starsCount":26,"forksCount":0,"license":null,"participation":[0,0,0,0,0,0,0,1,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-08-30T15:19:16.623Z"}},{"type":"Public","name":"TemporalPerceiver","owner":"MCG-NJU","isFork":false,"description":"[T-PAMI 2023] Temporal Perceiver: A General Architecture for Arbitrary Boundary Detection","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":1,"starsCount":32,"forksCount":1,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-08-29T07:17:30.663Z"}},{"type":"Public","name":"MixSort","owner":"MCG-NJU","isFork":false,"description":"[ICCV2023] MixSort: The Customized Tracker in SportsMOT ","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":7,"starsCount":60,"forksCount":7,"license":"MIT License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-08-21T02:36:11.659Z"}},{"type":"Public","name":"MultiSports","owner":"MCG-NJU","isFork":false,"description":"[ICCV 2021] MultiSports: A Multi-Person Video Dataset of Spatio-Temporally Localized Sports Actions","allTopics":["sports-data","action-detection","sports-analytics"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":20,"starsCount":103,"forksCount":7,"license":"Other","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-08-04T06:39:43.665Z"}},{"type":"Public","name":"SportsMOT","owner":"MCG-NJU","isFork":false,"description":"[ICCV 2023] SportsMOT: A Large Multi-Object Tracking Dataset in Multiple Sports Scenes","allTopics":["competition","dataset","multi-object-tracking","mot"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":6,"starsCount":121,"forksCount":4,"license":null,"participation":[0,0,0,0,0,0,1,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-07-24T02:13:51.411Z"}},{"type":"Public","name":"LinK","owner":"MCG-NJU","isFork":false,"description":"[CVPR 2023] LinK: Linear Kernel for LiDAR-based 3D Perception","allTopics":["detection","point-cloud","segmentation","autonomous-driving","large-kernels","nuscenes","semantickitti"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":3,"starsCount":77,"forksCount":6,"license":null,"participation":[0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-07-12T03:06:39.652Z"}},{"type":"Public","name":"BFRNet","owner":"MCG-NJU","isFork":false,"description":"","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":6,"forksCount":0,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-06-20T15:24:13.615Z"}},{"type":"Public","name":"BasicTAD","owner":"MCG-NJU","isFork":false,"description":"BasicTAD: an Astounding RGB-Only Baselinefor Temporal Action Detection","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":1,"starsCount":46,"forksCount":6,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-06-10T06:41:49.281Z"}},{"type":"Public","name":"EMA-VFI","owner":"MCG-NJU","isFork":false,"description":"[CVPR 2023] Extracting Motion and Appearance via Inter-Frame Attention for Efficient Video Frame Interpolatio","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":16,"starsCount":321,"forksCount":39,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-05-29T08:21:56.513Z"}},{"type":"Public","name":"STMixer","owner":"MCG-NJU","isFork":false,"description":"[CVPR 2023] STMixer: A One-Stage Sparse Action Detector","allTopics":["transformer","action-recognition","action-detection","one-stage-detector"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":3,"starsCount":46,"forksCount":3,"license":null,"participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-05-18T09:39:31.579Z"}},{"type":"Public","name":"APP-Net","owner":"MCG-NJU","isFork":false,"description":"[TIP] APP-Net: Auxiliary-point-based Push and Pull Operations for Efficient Point Cloud Recognition","allTopics":["point-cloud","classification","efficient-algorithm"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":10,"forksCount":0,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-05-15T09:12:13.519Z"}}],"repositoryCount":54,"userInfo":null,"searchable":true,"definitions":[],"typeFilters":[{"id":"all","text":"All"},{"id":"public","text":"Public"},{"id":"source","text":"Sources"},{"id":"fork","text":"Forks"},{"id":"archived","text":"Archived"},{"id":"template","text":"Templates"}],"compactMode":false},"title":"Repositories"}