{"payload":{"header_redesign_enabled":false,"results":[{"id":"461904687","archived":false,"color":"#DA5B0B","followers":409,"has_funding_file":false,"hl_name":"audeering/w2v2-how-to","hl_trunc_description":"How to use our public wav2vec2 dimensional emotion model","language":"Jupyter Notebook","mirror":false,"owned_by_organization":true,"public":true,"repo":{"repository":{"id":461904687,"name":"w2v2-how-to","owner_id":30039206,"owner_login":"audeering","updated_at":"2023-05-22T13:00:31.194Z","has_issues":true}},"sponsorable":false,"topics":["deep-learning","valence","arousal","onnx","speech-emotion-recognition","dominance","transformer-models","wav2vec2","msp-podcast"],"type":"Public","help_wanted_issues_count":0,"good_first_issue_issues_count":0,"starred_by_current_user":false}],"type":"repositories","page":1,"page_count":1,"elapsed_millis":70,"errors":[],"result_count":1,"facets":[],"protected_org_logins":[],"topics":null,"query_id":"","logged_in":false,"sign_up_path":"/signup?source=code_search_results","sign_in_path":"/login?return_to=https%3A%2F%2Fgithub.com%2Fsearch%3Fq%3Drepo%253Aaudeering%252Fw2v2-how-to%2B%2Blanguage%253A%2522Jupyter%2BNotebook%2522","metadata":null,"csrf_tokens":{"/audeering/w2v2-how-to/star":{"post":"5bTqqfMSIqHXC_okojfJ_Bzg4qFYQ6xgzbw1phVBaP-xOSYK9N6Ac0PDIhYb96fAXHGZNucvBVPnAloDLs7mAw"},"/audeering/w2v2-how-to/unstar":{"post":"QABuuAn79v_WC5xFGPH-yaDiFYkMjf-gwCXAFJJYLPn0RfmOmOU_YiZbBNBjfkyhVvPPWBB5-C147ktSmSU3-w"},"/sponsors/batch_deferred_sponsor_buttons":{"post":"EuGSzti6xRLypMfX6FNR7K9-vUXh0uh1URNVRpB-s0_FB4OD8rkRqV1M4VzvrF3qHyICcgpn7DLMP6yyaHy6Zg"}}},"title":"Repository search results"}