{"payload":{"header_redesign_enabled":false,"results":[{"id":"738776129","archived":false,"color":"#3572A5","followers":81,"has_funding_file":false,"hl_name":"intel/auto-round","hl_trunc_description":"SOTA Weight-only Quantization Algorithm for LLMs. This is official implementation of \"Optimize Weight Rounding via Signed Gradient Descen…","language":"Python","mirror":false,"owned_by_organization":true,"public":true,"repo":{"repository":{"id":738776129,"name":"auto-round","owner_id":17888862,"owner_login":"intel","updated_at":"2024-05-27T13:47:40.575Z","has_issues":true}},"sponsorable":false,"topics":["rounding","quantization","awq","int4","gptq","neural-compressor","weight-only"],"type":"Public","help_wanted_issues_count":0,"good_first_issue_issues_count":0,"starred_by_current_user":false}],"type":"repositories","page":1,"page_count":1,"elapsed_millis":87,"errors":[],"result_count":1,"facets":[],"protected_org_logins":[],"topics":null,"query_id":"","logged_in":false,"sign_up_path":"/signup?source=code_search_results","sign_in_path":"/login?return_to=https%3A%2F%2Fgithub.com%2Fsearch%3Fq%3Drepo%253Aintel%252Fauto-round%2B%2Blanguage%253APython","metadata":null,"csrf_tokens":{"/intel/auto-round/star":{"post":"wwQ9KSGc0PzpWFucSl-NLwA5br7btSSmEvJp4j6fICRvDezaFyOINZo1n1P8PtkFLls4ppOCc7ZabRekFo5PTw"},"/intel/auto-round/unstar":{"post":"2JYZx3EwTXmkEM0LVI8VXgWBfVnchhOMqPwZvD7CMIA4A2VbIckXHA41kUlfjYGpbbnpxug77O0UzJ8LBtqslQ"},"/sponsors/batch_deferred_sponsor_buttons":{"post":"ixLM52aQ1IH0G-NdupAiCed2Nr6gZy7uKoosfuxq-Ww2Npk7BrnRGsvH8dxldKZC4CRMcZ-CShhzYTwaXeo1Ag"}}},"title":"Repository search results"}