From c70ac065e8435dc566e707a618940d8cb8bfc767 Mon Sep 17 00:00:00 2001 From: Jeff MAURY Date: Tue, 26 Mar 2024 09:46:09 +0100 Subject: [PATCH] fix: update recipe catalog - Use latest version of the recipes - Update prefered models for code generation - Skip empty categories Fixes #481 Signed-off-by: Jeff MAURY --- packages/backend/src/assets/ai.json | 10 +++++----- packages/frontend/src/pages/Recipes.svelte | 13 +++++++------ 2 files changed, 12 insertions(+), 11 deletions(-) diff --git a/packages/backend/src/assets/ai.json b/packages/backend/src/assets/ai.json index e0ff1a56b..3b1b0d312 100644 --- a/packages/backend/src/assets/ai.json +++ b/packages/backend/src/assets/ai.json @@ -5,7 +5,7 @@ "description" : "This is a Streamlit chat demo application.", "name" : "ChatBot", "repository": "https://github.com/redhat-et/locallm", - "ref": "6795ba1", + "ref": "00cd386", "icon": "natural-language-processing", "categories": [ "natural-language-processing" @@ -29,7 +29,7 @@ "description" : "This is a Streamlit demo application for summarizing text.", "name" : "Summarizer", "repository": "https://github.com/redhat-et/locallm", - "ref": "10bc46e", + "ref": "00cd386", "icon": "natural-language-processing", "categories": [ "natural-language-processing" @@ -53,7 +53,7 @@ "description" : "This is a code-generation demo application.", "name" : "Code Generation", "repository": "https://github.com/redhat-et/locallm", - "ref": "a5e830d", + "ref": "00cd386", "icon": "generator", "categories": [ "natural-language-processing" @@ -61,11 +61,11 @@ "config": "code-generation/ai-studio.yaml", "readme": "# Code Generation\n\nThis example will deploy a local code-gen application using a llama.cpp model server and a python app built with langchain. \n\n### Download Model\n\n- **codellama**\n\n - Download URL: `wget https://huggingface.co/TheBloke/CodeLlama-7B-Instruct-GGUF/resolve/main/codellama-7b-instruct.Q4_K_M.gguf` \n\n```\n\ncd ../models\n\nwget \n\ncd ../\n\n```\n\n### Deploy Model Service\n\nTo start the model service, refer to [the playground model-service document](../playground/README.md). Deploy the LLM server and volumn mount the model of choice.\n\n```\n\npodman run --rm -it -d \\ \n\n -p 8001:8001 \\ \n\n -v Local/path/to/locallm/models:/locallm/models:ro,Z \\ \n\n -e MODEL_PATH=models/ \\ \n\n -e HOST=0.0.0.0 \\ \n\n -e PORT=8001 \\ \n\n playground:image\n\n```\n\n### Build Container Image\n\nOnce the model service is deployed, then follow the instruction below to build your container image and run it locally. \n\n- `podman build -t codegen-app code-generation -f code-generation/builds/Containerfile`\n\n- `podman run -it -p 8501:8501 codegen-app -- -m http://10.88.0.1:8001/v1` ", "models": [ + "hf.TheBloke.mistral-7b-code-16k-qlora.Q4_K_M", + "hf.TheBloke.mistral-7b-codealpaca-lora.Q4_K_M", "hf.TheBloke.mistral-7b-instruct-v0.1.Q4_K_M", "hf.NousResearch.Hermes-2-Pro-Mistral-7B.Q4_K_M", "hf.ibm.merlinite-7b-Q4_K_M", - "hf.TheBloke.mistral-7b-codealpaca-lora.Q4_K_M", - "hf.TheBloke.mistral-7b-code-16k-qlora.Q4_K_M", "hf.froggeric.Cerebrum-1.0-7b-Q4_KS", "hf.TheBloke.openchat-3.5-0106.Q4_K_M", "hf.TheBloke.mistral-7b-openorca.Q4_K_M", diff --git a/packages/frontend/src/pages/Recipes.svelte b/packages/frontend/src/pages/Recipes.svelte index b81eb0c93..0bbb61223 100644 --- a/packages/frontend/src/pages/Recipes.svelte +++ b/packages/frontend/src/pages/Recipes.svelte @@ -18,12 +18,13 @@ $: categories = $catalog.categories; displayDescription="{false}" /> {#each categories as category} - - + {#if $catalog.recipes.some(r => r.categories.includes(category.id))} + + {/if} {/each}