From de3ae1c60d600d9c7115937196be1125e0b968e6 Mon Sep 17 00:00:00 2001 From: twishaptel12 Date: Tue, 27 Jan 2026 14:46:13 +0530 Subject: [PATCH 1/2] feat: add optional rag-workflows skill --- .opencode/skills/rag-workflows/SKILL.md | 40 +++++++++++++++++++++++++ 1 file changed, 40 insertions(+) create mode 100644 .opencode/skills/rag-workflows/SKILL.md diff --git a/.opencode/skills/rag-workflows/SKILL.md b/.opencode/skills/rag-workflows/SKILL.md new file mode 100644 index 00000000..e3c2080e --- /dev/null +++ b/.opencode/skills/rag-workflows/SKILL.md @@ -0,0 +1,40 @@ +--- +name: rag-workflows +description: Retrieval-Augmented Generation (RAG) workflows for document-based Q&A +--- + + + +## Overview + +This skill describes patterns and workflows for **Retrieval-Augmented Generation (RAG)** in OpenWork, with a focus on **local document question answering**. + +RAG combines document retrieval with large language models to answer questions grounded in external context rather than relying solely on model knowledge. + +This skill is intended to help users design and reason about RAG-style workflows within OpenWork. + +## Local RAG with Ollama + +A common use case is running RAG **fully locally** using Ollama as the LLM backend. This enables: + +- Querying private or sensitive documents +- Offline experimentation +- Avoiding external API dependencies + +Typical steps in a local RAG workflow include: +1. Preparing a set of local documents +2. Retrieving relevant chunks based on a query +3. Providing retrieved context to a local LLM via Ollama +4. Generating an answer grounded in the retrieved context + +## Example Use Cases + +- Question answering over local markdown or text files +- Exploring private knowledge bases +- Prototyping RAG pipelines before production deployment + +## Notes + +- This skill is **optional** and **not enabled by default** +- It becomes available via the Skills settings when present +- This skill currently provides conceptual guidance and patterns rather than executable workflows From 71c0aefa2ad82ef029f671a4e9625a23c39bf7aa Mon Sep 17 00:00:00 2001 From: twishaptel12 Date: Tue, 3 Feb 2026 20:34:24 +0530 Subject: [PATCH 2/2] docs: update rag-workflows SKILL.md with example and ollama setup --- .opencode/skills/rag-workflows/SKILL.md | 70 +++++++++++++++---- .../skills/rag-workflows/prompts/doc-qa.md | 12 ++++ .../workflows/local-ollama-doc-qa.yaml | 31 ++++++++ 3 files changed, 100 insertions(+), 13 deletions(-) create mode 100644 .opencode/skills/rag-workflows/prompts/doc-qa.md create mode 100644 .opencode/skills/rag-workflows/workflows/local-ollama-doc-qa.yaml diff --git a/.opencode/skills/rag-workflows/SKILL.md b/.opencode/skills/rag-workflows/SKILL.md index e3c2080e..19339c82 100644 --- a/.opencode/skills/rag-workflows/SKILL.md +++ b/.opencode/skills/rag-workflows/SKILL.md @@ -17,24 +17,68 @@ This skill is intended to help users design and reason about RAG-style workflows A common use case is running RAG **fully locally** using Ollama as the LLM backend. This enables: -- Querying private or sensitive documents -- Offline experimentation -- Avoiding external API dependencies +- Querying private or sensitive documents +- Offline experimentation +- Avoiding external API dependencies Typical steps in a local RAG workflow include: -1. Preparing a set of local documents -2. Retrieving relevant chunks based on a query -3. Providing retrieved context to a local LLM via Ollama -4. Generating an answer grounded in the retrieved context +1. Preparing a set of local documents +2. Retrieving relevant chunks based on a query +3. Providing retrieved context to a local LLM via Ollama +4. Generating an answer grounded in the retrieved context + +## Setup (Ollama) + +To use the included example workflow, you will need Ollama installed and running locally. + +### 1. Install Ollama + +Follow the official installation instructions for your platform: +https://ollama.com/download + +### 2. Start Ollama + +After installation, ensure the Ollama service is running. On most systems, this can be done by simply running: + +```bash +ollama serve +```` + +(or by launching the Ollama app if you installed the desktop version). + +### 3. Pull the required model + +The example workflow uses **llama3**. Install it with: + +```bash +ollama pull llama3 +``` + +You can verify it is available by running: + +```bash +ollama list +``` + +## Included Example + +This skill includes a minimal example workflow: + +* **local-ollama-doc-qa** + +It demonstrates: + +* Retrieving context from local documents +* Answering questions using a local Ollama model ## Example Use Cases -- Question answering over local markdown or text files -- Exploring private knowledge bases -- Prototyping RAG pipelines before production deployment +* Question answering over local markdown or text files +* Exploring private knowledge bases +* Prototyping RAG pipelines before production deployment ## Notes -- This skill is **optional** and **not enabled by default** -- It becomes available via the Skills settings when present -- This skill currently provides conceptual guidance and patterns rather than executable workflows +* This skill is **optional** and **not enabled by default** +* It becomes available via the Skills settings when present +* This skill currently provides conceptual guidance, patterns, and a minimal runnable example diff --git a/.opencode/skills/rag-workflows/prompts/doc-qa.md b/.opencode/skills/rag-workflows/prompts/doc-qa.md new file mode 100644 index 00000000..c1979e06 --- /dev/null +++ b/.opencode/skills/rag-workflows/prompts/doc-qa.md @@ -0,0 +1,12 @@ +You are a helpful assistant answering questions based only on the provided context. + +If the answer cannot be found in the context, say: +"I don't know based on the provided documents." + +Context: +{{context}} + +Question: +{{question}} + +Answer: diff --git a/.opencode/skills/rag-workflows/workflows/local-ollama-doc-qa.yaml b/.opencode/skills/rag-workflows/workflows/local-ollama-doc-qa.yaml new file mode 100644 index 00000000..aba74a96 --- /dev/null +++ b/.opencode/skills/rag-workflows/workflows/local-ollama-doc-qa.yaml @@ -0,0 +1,31 @@ +# //review-2026-02-15 @twishapatel12 + +name: local-ollama-doc-qa +description: Local document Q&A using Ollama + +inputs: + documents_path: + type: string + description: Path to a folder containing local documents + question: + type: string + description: Question to ask about the documents + +steps: + - id: retrieve + type: rag.retrieve + with: + path: "{{documents_path}}" + + - id: answer + type: llm.generate + with: + provider: ollama + model: llama3 + prompt: ../prompts/doc-qa.md + context: "{{steps.retrieve.context}}" + question: "{{question}}" + +outputs: + answer: + value: "{{steps.answer.text}}"