88
99SCRIPT_DIR=" $( cd " $( dirname " ${BASH_SOURCE[0]} " ) " & > /dev/null && pwd ) "
1010
11+ # -----
12+ # Dependencies
13+ # -----
14+
1115# Nix is required for llama.cpp
1216if ! command -v nix & > /dev/null; then
1317 echo " Installing Nix."
@@ -16,15 +20,21 @@ if ! command -v nix &> /dev/null; then
1620 return 1
1721fi
1822
19- if ! command -v ollama & > /dev/null; then
20- echo " Installing Ollama."
21- curl -fsSL https://ollama.com/install.sh | sh
22- fi
23+ # -----
24+ # Local AI
25+ # -----
2326
24- if ! command -v llama-server & > /dev/null; then
25- echo " Installing llama.cpp."
26- nix profile install nixpkgs#llama-cpp --extra-experimental-features nix-command --extra-experimental-features flakes
27- fi
27+ # LM Studio takes care of these
28+
29+ # if ! command -v ollama &> /dev/null; then
30+ # echo "Installing Ollama."
31+ # curl -fsSL https://ollama.com/install.sh | sh
32+ # fi
33+
34+ # if ! command -v llama-server &> /dev/null; then
35+ # echo "Installing llama.cpp."
36+ # nix profile install nixpkgs#llama-cpp --extra-experimental-features nix-command --extra-experimental-features flakes
37+ # fi
2838
2939# if [ ! -f "${SCRIPT_DIR}/LM-Studio-"* ]; then
3040# echo "Downloading LM Studio."
3343# chmod +x "${SCRIPT_DIR}/LM-Studio-"*
3444# fi
3545
36- echo " Ensuring that LM Studio is executable."
37- chmod +x " ${SCRIPT_DIR} /LM-Studio-" * .appimage
46+ if [ -f " ${SCRIPT_DIR} /LM-Studio-" * ]; then
47+ echo " Ensuring that LM Studio is executable."
48+ chmod +x " ${SCRIPT_DIR} /LM-Studio-" * .appimage
3849
39- echo " Creating LM Studio shortcut symlink."
40- ln -f -s " ${SCRIPT_DIR} /lm-studio.desktop" " ${HOME} /.local/share/applications/lm-studio.desktop"
50+ echo " Creating LM Studio shortcut symlink."
51+ ln -f -s " ${SCRIPT_DIR} /lm-studio.desktop" " ${HOME} /.local/share/applications/lm-studio.desktop"
52+ fi
4153
42- ollama --version
54+ if command -v ollama & > /dev/null; then
55+ ollama --version
56+ fi
4357
44- which llama-server
45- llama-server --version
58+ if command -v llama-server & > /dev/null; then
59+ which llama-server
60+ llama-server --version
4661
47- which llama-cli
48- # llama-cli -hf Qwen/Qwen2.5-7B-Instruct-GGUF
62+ which llama-cli
63+ # llama-cli -hf Qwen/Qwen2.5-7B-Instruct-GGUF
64+
65+ llama-bench --list-devices
66+ # llama-bench --model ${HOME}/.cache/llama.cpp/Qwen_Qwen2.5-7B-Instruct-GGUF_qwen2.5-7b-instruct-q2_k.gguf
67+ fi
4968
50- llama-bench --list-devices
51- # llama-bench --model ${HOME}/.cache/llama.cpp/Qwen_Qwen2.5-7B-Instruct-GGUF_qwen2.5-7b-instruct-q2_k.gguf
69+ # -----
70+ # Cloud AI
71+ # -----
72+ curl -fsSL https://claude.ai/install.sh | bash
0 commit comments