{"payload":{"header_redesign_enabled":false,"results":[{"id":"726349745","archived":false,"color":"#3572A5","followers":169,"has_funding_file":false,"hl_name":"WisconsinAIVision/ViP-LLaVA","hl_trunc_description":"[CVPR2024] ViP-LLaVA: Making Large Multimodal Models Understand Arbitrary Visual Prompts","language":"Python","mirror":false,"owned_by_organization":true,"public":true,"repo":{"repository":{"id":726349745,"name":"ViP-LLaVA","owner_id":166878028,"owner_login":"WisconsinAIVision","updated_at":"2024-05-01T18:02:37.307Z","has_issues":true}},"sponsorable":false,"topics":["chatbot","llama","multi-modal","clip","vision-language","gpt-4","foundation-models","visual-prompting","llava","llama2","cvpr2024","gpt-4-vision"],"type":"Public","help_wanted_issues_count":0,"good_first_issue_issues_count":0,"starred_by_current_user":false}],"type":"repositories","page":1,"page_count":1,"elapsed_millis":72,"errors":[],"result_count":1,"facets":[],"protected_org_logins":[],"topics":null,"query_id":"","logged_in":false,"sign_up_path":"/signup?source=code_search_results","sign_in_path":"/login?return_to=https%3A%2F%2Fgithub.com%2Fsearch%3Fq%3Drepo%253AWisconsinAIVision%252FViP-LLaVA%2B%2Blanguage%253APython","metadata":null,"csrf_tokens":{"/WisconsinAIVision/ViP-LLaVA/star":{"post":"eupkHWbVqzxIirhT0WIhYlsYZ6T0WGvKbdY-YhOgHDQuNXCU-z6mUSZXOzZ1OBPV-nEzAxzgXxeEc9pybUEtcQ"},"/WisconsinAIVision/ViP-LLaVA/unstar":{"post":"N590WL2HGlPpudgcij0e72_CTVoUyTa8QK9nT8KZ3wndBJxmx9XBmQnfHFYHvgPd28AYD8Pz1b_z49dt0bhiUQ"},"/sponsors/batch_deferred_sponsor_buttons":{"post":"DJJ0kpe0fQ8gF6g2XYxFzduz5_3Y-GBawM8TKQnyJS355otiL33KsCNlnAqOSNCEMNzojOJx2ylyqXj82QNBDA"}}},"title":"Repository search results"}