@@ -10,11 +10,15 @@ check: fmt
1010check_fmt :
1111 cargo + nightly fmt -- --check
1212
13- fmt_yaml :
13+ yaml_fmt :
1414 yamlfmt lefthook.yml
1515 yamlfmt -dstar .github/ **/ *.{yaml,yml}
1616
17- fmt : fmt_yaml
17+ md_fmt :
18+ markdown-fmt -m 80 CONTRIBUTING.md
19+ markdown-fmt -m 80 README.md
20+
21+ fmt : yaml_fmt
1822 cargo + nightly fmt
1923
2024test :
@@ -31,41 +35,3 @@ release: check
3135
3236download_test_video :
3337 wget -O " video.mp4" " https://commondatastorage.googleapis.com/gtv-videos-bucket/sample/ElephantsDream.mp4"
34-
35- llm_ctx :
36- dircat -b -e rs -o ctx.md .
37-
38- llm_grammar_check : llm_ctx
39- gemma-cli -model=gemma-3-12b-it -prompt=.llms/ prompts/ grammar_check.md -input=ctx.md -output=.llms/ grammar_check.md
40-
41- llm_non_idiomatic : llm_ctx
42- gemma-cli -model=gemini-2.5 -pro -prompt=.llms/ prompts/ non_idiomatic.md -input=ctx.md -output=.llms/ non_idiomatic.md
43-
44- llm_improve_comments : llm_ctx
45- gemma-cli -model=gemma-3-12b-it -prompt=.llms/ prompts/ improve_comments.md -input=ctx.md -output=.llms/ improve_comments.md
46-
47- llm_llama_grammar_check : llm_ctx
48- python3 .llms/ inference/ hf.py --model " meta-llama/Llama-4-Scout-17B-16E-Instruct:cerebras" --prompt=.llms/ prompts/ grammar_check.md --input=ctx.md --output=.llms/ llama_grammar_check.md
49-
50- llm_maverick_tests_enhancement : llm_ctx
51- python3 .llms/ inference/ hf.py --model " meta-llama/Llama-4-Maverick-17B-128E-Instruct:cerebras" --max_tokens 65000 --prompt=.llms/ prompts/ enhance_tests.md --input=ctx.md --output=.llms/ maverick_tests_enhancement.md
52-
53- llm_maverick_enhance_readme : llm_ctx
54- echo " " >> ctx.md
55- echo " Source of of README file:" >> ctx.md
56- echo " \`\`\` markdown" >> ctx.md
57- cat README.md >> ctx.md
58- echo " \`\`\` " >> ctx.md
59- python3 .llms/ inference/ hf.py --model " meta-llama/Llama-4-Maverick-17B-128E-Instruct:cerebras" --max_tokens 8000 --prompt=.llms/ prompts/ enhance_readme.md --input=ctx.md --output=.llms/ maverick_readme.md
60-
61- llm_qwen3_coder_non_idiomatic : llm_ctx
62- python3 .llms/ inference/ hf.py --model " Qwen/Qwen3-Coder-480B-A35B-Instruct:novita" --prompt=.llms/ prompts/ non_idiomatic.md --input=ctx.md --output=.llms/ qwen3_non_idiomatic.md
63-
64- llm_qwen3_coder_improve_comments : llm_ctx
65- python3 .llms/ inference/ hf.py --model " Qwen/Qwen3-Coder-480B-A35B-Instruct:novita" --prompt=.llms/ prompts/ improve_comments.md --input=ctx.md --output=.llms/ qwen3_improve_comments.md
66-
67- llm_qwen3_code_review : llm_ctx
68- python3 .llms/ inference/ hf.py --model " Qwen/Qwen3-Coder-480B-A35B-Instruct:novita" --prompt=.llms/ prompts/ code_review.md --input=ctx.md --output=.llms/ qwen3_code_review.md
69-
70- llm_glm45air_code_review : llm_ctx
71- python3 .llms/ inference/ hf.py --model " zai-org/GLM-4.5-Air-FP8:together" --max_tokens 96000 --prompt=.llms/ prompts/ code_review.md --input=ctx.md --output=.llms/ glm45 air_code_review.md
0 commit comments