{"payload":{"pageCount":2,"repositories":[{"type":"Public","name":"triton_cli","owner":"triton-inference-server","isFork":false,"description":"Triton CLI is an open source command line interface that enables users to create, deploy, and profile models served by the Triton Inference Server.","topicNames":[],"topicsNotShown":0,"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":3,"issueCount":2,"starsCount":18,"forksCount":1,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-08T02:58:15.804Z"}},{"type":"Public","name":"tutorials","owner":"triton-inference-server","isFork":false,"description":"This repository contains tutorials and examples for Triton Inference Server","topicNames":[],"topicsNotShown":0,"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":11,"issueCount":8,"starsCount":412,"forksCount":74,"license":"BSD 3-Clause \"New\" or \"Revised\" License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-08T02:45:27.610Z"}},{"type":"Public","name":"onnxruntime_backend","owner":"triton-inference-server","isFork":false,"description":"The Triton backend for the ONNX Runtime.","topicNames":["inference","backend","triton-inference-server","onnx-runtime"],"topicsNotShown":0,"primaryLanguage":{"name":"C++","color":"#f34b7d"},"pullRequestCount":2,"issueCount":63,"starsCount":113,"forksCount":52,"license":"BSD 3-Clause \"New\" or \"Revised\" License","participation":[0,0,2,1,0,0,1,0,4,0,0,0,1,0,0,1,0,0,0,1,1,0,1,0,0,0,0,0,1,0,2,2,1,0,0,1,0,0,1,1,1,0,0,2,1,0,0,0,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-08T01:53:22.989Z"}},{"type":"Public","name":"core","owner":"triton-inference-server","isFork":false,"description":"The core library and APIs implementing the Triton Inference Server. ","topicNames":[],"topicsNotShown":0,"primaryLanguage":{"name":"C++","color":"#f34b7d"},"pullRequestCount":20,"issueCount":0,"starsCount":91,"forksCount":87,"license":"BSD 3-Clause \"New\" or \"Revised\" License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-08T01:03:40.366Z"}},{"type":"Public","name":"client","owner":"triton-inference-server","isFork":false,"description":"Triton Python, C++ and Java client libraries, and GRPC-generated client examples for go, java and scala.","topicNames":[],"topicsNotShown":0,"primaryLanguage":{"name":"C++","color":"#f34b7d"},"pullRequestCount":24,"issueCount":5,"starsCount":487,"forksCount":215,"license":"BSD 3-Clause \"New\" or \"Revised\" License","participation":[2,1,1,0,5,1,3,1,3,3,5,2,1,4,1,0,1,1,2,3,4,8,3,3,4,1,1,5,2,3,2,1,0,0,4,4,1,0,1,0,1,3,8,40,15,22,4,5,11,13,7,8],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-08T00:58:04.358Z"}},{"type":"Public","name":"server","owner":"triton-inference-server","isFork":false,"description":"The Triton Inference Server provides an optimized cloud and edge inferencing solution. ","topicNames":["machine-learning","cloud","deep-learning","gpu","inference","edge","datacenter"],"topicsNotShown":0,"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":51,"issueCount":409,"starsCount":7390,"forksCount":1375,"license":"BSD 3-Clause \"New\" or \"Revised\" License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-08T00:48:19.241Z"}},{"type":"Public","name":"tensorrt_backend","owner":"triton-inference-server","isFork":false,"description":"The Triton backend for TensorRT.","topicNames":[],"topicsNotShown":0,"primaryLanguage":{"name":"C++","color":"#f34b7d"},"pullRequestCount":1,"issueCount":0,"starsCount":45,"forksCount":26,"license":"BSD 3-Clause \"New\" or \"Revised\" License","participation":[0,0,1,0,0,0,1,0,1,0,1,0,0,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,0,0,0,1,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-07T22:45:24.645Z"}},{"type":"Public","name":"pytorch_backend","owner":"triton-inference-server","isFork":false,"description":"The Triton backend for the PyTorch TorchScript models.","topicNames":[],"topicsNotShown":0,"primaryLanguage":{"name":"C++","color":"#f34b7d"},"pullRequestCount":3,"issueCount":0,"starsCount":103,"forksCount":40,"license":"BSD 3-Clause \"New\" or \"Revised\" License","participation":[1,0,3,2,2,0,1,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,2,0,0,0,0,0,0,0,1,0,0,0,0,0,1,1,3],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-07T19:25:48.817Z"}},{"type":"Public","name":"model_navigator","owner":"triton-inference-server","isFork":false,"description":"Triton Model Navigator is an inference toolkit designed for optimizing and deploying Deep Learning models with a focus on NVIDIA GPUs.","topicNames":["deep-learning","gpu","inference"],"topicsNotShown":0,"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":1,"issueCount":3,"starsCount":157,"forksCount":24,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-07T18:17:29.722Z"}},{"type":"Public","name":"model_analyzer","owner":"triton-inference-server","isFork":false,"description":"Triton Model Analyzer is a CLI tool to help with better understanding of the compute and memory requirements of the Triton Inference Server models.","topicNames":["deep-learning","gpu","inference","performance-analysis"],"topicsNotShown":0,"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":3,"issueCount":10,"starsCount":378,"forksCount":73,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-07T16:57:12.365Z"}},{"type":"Public","name":"dali_backend","owner":"triton-inference-server","isFork":false,"description":"The Triton backend that allows running GPU-accelerated data pre-processing pipelines implemented in DALI's python API.","topicNames":["python","deep-learning","gpu","image-processing","dali","data-preprocessing","nvidia-dali","fast-data-pipeline"],"topicsNotShown":0,"primaryLanguage":{"name":"C++","color":"#f34b7d"},"pullRequestCount":7,"issueCount":19,"starsCount":117,"forksCount":26,"license":"MIT License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-07T15:40:47.154Z"}},{"type":"Public","name":"tensorrtllm_backend","owner":"triton-inference-server","isFork":false,"description":"The Triton TensorRT-LLM Backend","topicNames":[],"topicsNotShown":0,"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":12,"issueCount":190,"starsCount":492,"forksCount":64,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-07T15:37:19.297Z"}},{"type":"Public","name":"python_backend","owner":"triton-inference-server","isFork":false,"description":"Triton backend that enables pre-process, post-processing and other logic to be implemented in Python.","topicNames":[],"topicsNotShown":0,"primaryLanguage":{"name":"C++","color":"#f34b7d"},"pullRequestCount":9,"issueCount":0,"starsCount":477,"forksCount":137,"license":"BSD 3-Clause \"New\" or \"Revised\" License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-07T02:34:09.176Z"}},{"type":"Public","name":"vllm_backend","owner":"triton-inference-server","isFork":false,"description":"","topicNames":[],"topicsNotShown":0,"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":1,"issueCount":0,"starsCount":111,"forksCount":13,"license":"BSD 3-Clause \"New\" or \"Revised\" License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-06T23:41:47.161Z"}},{"type":"Public","name":"third_party","owner":"triton-inference-server","isFork":false,"description":"Third-party source packages that are modified for use in Triton.","topicNames":[],"topicsNotShown":0,"primaryLanguage":{"name":"C","color":"#555555"},"pullRequestCount":5,"issueCount":0,"starsCount":7,"forksCount":45,"license":"BSD 3-Clause \"New\" or \"Revised\" License","participation":[0,0,1,2,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-06T03:49:47.812Z"}},{"type":"Public","name":"fil_backend","owner":"triton-inference-server","isFork":false,"description":"FIL backend for the Triton Inference Server","topicNames":[],"topicsNotShown":0,"primaryLanguage":{"name":"Jupyter Notebook","color":"#DA5B0B"},"pullRequestCount":4,"issueCount":46,"starsCount":64,"forksCount":34,"license":"Apache License 2.0","participation":[0,0,0,1,0,0,0,1,1,0,0,1,0,0,0,0,1,0,0,0,0,2,0,0,0,1,0,0,0,0,0,0,0,0,5,1,0,0,0,1,0,0,0,1,0,0,2,0,0,0,0,2],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-04T03:42:03.120Z"}},{"type":"Public","name":"developer_tools","owner":"triton-inference-server","isFork":false,"description":"","topicNames":[],"topicsNotShown":0,"primaryLanguage":{"name":"C++","color":"#f34b7d"},"pullRequestCount":5,"issueCount":0,"starsCount":15,"forksCount":9,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-02T22:46:17.120Z"}},{"type":"Public","name":"openvino_backend","owner":"triton-inference-server","isFork":false,"description":"OpenVINO backend for Triton.","topicNames":[],"topicsNotShown":0,"primaryLanguage":{"name":"C++","color":"#f34b7d"},"pullRequestCount":3,"issueCount":3,"starsCount":25,"forksCount":14,"license":"BSD 3-Clause \"New\" or \"Revised\" License","participation":[1,0,0,0,0,0,1,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,2,0,0,3,1,0,0,0,1,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-04-30T20:18:04.679Z"}},{"type":"Public","name":"common","owner":"triton-inference-server","isFork":false,"description":"Common source, scripts and utilities shared across all Triton repositories.","topicNames":[],"topicsNotShown":0,"primaryLanguage":{"name":"C++","color":"#f34b7d"},"pullRequestCount":4,"issueCount":0,"starsCount":56,"forksCount":71,"license":"BSD 3-Clause \"New\" or \"Revised\" License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-04-29T09:55:41.813Z"}},{"type":"Public","name":"pytriton","owner":"triton-inference-server","isFork":false,"description":"PyTriton is a Flask/FastAPI-like interface that simplifies Triton's deployment in Python environments.","topicNames":["gpu","deep-learning","inference"],"topicsNotShown":0,"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":11,"starsCount":666,"forksCount":44,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-04-15T07:07:02.438Z"}},{"type":"Public","name":"tensorflow_backend","owner":"triton-inference-server","isFork":false,"description":"The Triton backend for TensorFlow.","topicNames":[],"topicsNotShown":0,"primaryLanguage":{"name":"C++","color":"#f34b7d"},"pullRequestCount":0,"issueCount":0,"starsCount":39,"forksCount":17,"license":"BSD 3-Clause \"New\" or \"Revised\" License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-04-11T00:08:38.617Z"}},{"type":"Public","name":"square_backend","owner":"triton-inference-server","isFork":false,"description":"Simple Triton backend used for testing.","topicNames":[],"topicsNotShown":0,"primaryLanguage":{"name":"C++","color":"#f34b7d"},"pullRequestCount":0,"issueCount":0,"starsCount":2,"forksCount":4,"license":"BSD 3-Clause \"New\" or \"Revised\" License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-04-11T00:08:32.150Z"}},{"type":"Public","name":"repeat_backend","owner":"triton-inference-server","isFork":false,"description":"An example Triton backend that demonstrates sending zero, one, or multiple responses for each request. ","topicNames":[],"topicsNotShown":0,"primaryLanguage":{"name":"C++","color":"#f34b7d"},"pullRequestCount":0,"issueCount":0,"starsCount":5,"forksCount":7,"license":"BSD 3-Clause \"New\" or \"Revised\" License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-04-11T00:08:19.795Z"}},{"type":"Public","name":"redis_cache","owner":"triton-inference-server","isFork":false,"description":"TRITONCACHE implementation of a Redis cache","topicNames":[],"topicsNotShown":0,"primaryLanguage":{"name":"C++","color":"#f34b7d"},"pullRequestCount":0,"issueCount":2,"starsCount":5,"forksCount":4,"license":"BSD 3-Clause \"New\" or \"Revised\" License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-04-11T00:08:12.110Z"}},{"type":"Public","name":"local_cache","owner":"triton-inference-server","isFork":false,"description":"Implementation of a local in-memory cache for Triton Inference Server's TRITONCACHE API","topicNames":[],"topicsNotShown":0,"primaryLanguage":{"name":"C++","color":"#f34b7d"},"pullRequestCount":0,"issueCount":1,"starsCount":2,"forksCount":1,"license":"BSD 3-Clause \"New\" or \"Revised\" License","participation":[0,0,0,0,1,0,2,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-04-11T00:07:34.089Z"}},{"type":"Public","name":"identity_backend","owner":"triton-inference-server","isFork":false,"description":"Example Triton backend that demonstrates most of the Triton Backend API.","topicNames":[],"topicsNotShown":0,"primaryLanguage":{"name":"C++","color":"#f34b7d"},"pullRequestCount":0,"issueCount":0,"starsCount":6,"forksCount":13,"license":"BSD 3-Clause \"New\" or \"Revised\" License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-04-11T00:07:28.131Z"}},{"type":"Public","name":"checksum_repository_agent","owner":"triton-inference-server","isFork":false,"description":"The Triton repository agent that verifies model checksums.","topicNames":[],"topicsNotShown":0,"primaryLanguage":{"name":"C++","color":"#f34b7d"},"pullRequestCount":0,"issueCount":0,"starsCount":7,"forksCount":6,"license":"BSD 3-Clause \"New\" or \"Revised\" License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-04-11T00:06:50.209Z"}},{"type":"Public","name":"backend","owner":"triton-inference-server","isFork":false,"description":"Common source, scripts and utilities for creating Triton backends.","topicNames":[],"topicsNotShown":0,"primaryLanguage":{"name":"C++","color":"#f34b7d"},"pullRequestCount":1,"issueCount":0,"starsCount":259,"forksCount":78,"license":"BSD 3-Clause \"New\" or \"Revised\" License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-04-11T00:06:44.501Z"}},{"type":"Public","name":"contrib","owner":"triton-inference-server","isFork":false,"description":"Community contributions to Triton that are not officially supported or maintained by the Triton project.","topicNames":[],"topicsNotShown":0,"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":1,"issueCount":0,"starsCount":8,"forksCount":7,"license":"BSD 3-Clause \"New\" or \"Revised\" License","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-02-16T23:07:29.062Z"}},{"type":"Public","name":"stateful_backend","owner":"triton-inference-server","isFork":false,"description":"Triton backend for managing the model state tensors automatically in sequence batcher","topicNames":["backend","triton","stateful"],"topicsNotShown":0,"primaryLanguage":{"name":"C++","color":"#f34b7d"},"pullRequestCount":0,"issueCount":0,"starsCount":9,"forksCount":4,"license":"MIT License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-02-12T16:39:27.879Z"}}],"repositoryCount":34,"userInfo":null,"searchable":true,"definitions":[],"typeFilters":[{"id":"all","text":"All"},{"id":"public","text":"Public"},{"id":"source","text":"Sources"},{"id":"fork","text":"Forks"},{"id":"archived","text":"Archived"},{"id":"mirror","text":"Mirrors"},{"id":"template","text":"Templates"}],"compactMode":false},"title":"Repositories"}