Skip to content

Commit 17662d5

Browse files
committed
feat(lm): prefer dspy.LM per dspy.ai; keep legacy/openai and ollama fallbacks
1 parent 408acf8 commit 17662d5

File tree

1 file changed

+51
-21
lines changed

1 file changed

+51
-21
lines changed

orbit_agent/config.py

Lines changed: 51 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -195,43 +195,73 @@ def get_config() -> AppConfig:
195195

196196

197197
def configure_lm() -> AppConfig:
198-
"""Configure the language model and return config"""
198+
"""Configure the language model per dspy.ai and return config.
199+
200+
Priority:
201+
1) Use dspy.LM with the `model` string (preferred per https://dspy.ai/)
202+
2) If dspy.LM is unavailable or fails and provider is OpenAI, fall back to dsp.GPT3
203+
3) If provider is Ollama and dspy.LM fails, fall back to a minimal Ollama adapter
204+
"""
199205
config = get_config()
200206

207+
model = config.lm.model
208+
temperature = config.lm.temperature
209+
max_tokens = config.lm.max_tokens
210+
211+
# 1) Preferred: use modern dspy.LM API (as in dspy.ai)
201212
try:
202-
# Use custom lightweight LM adapters compatible with dsp/dspy Predict
203-
from .lm_providers import OpenAIChatLM, OllamaLM
213+
lm = dspy.LM(
214+
model=model,
215+
temperature=temperature,
216+
max_tokens=max_tokens,
217+
)
218+
dspy.configure(lm=lm)
219+
logger.info(f"Configured LM via dspy.LM: {model}")
220+
return config
221+
except Exception as e:
222+
logger.warning(f"dspy.LM unavailable or failed: {e}")
204223

205-
model = config.lm.model
206-
lm_impl = None
224+
# 2) OpenAI fallback via dsp.GPT3 (legacy installs)
225+
try:
207226
if model.startswith("openai/"):
227+
import importlib
228+
229+
dsp = importlib.import_module("dsp")
208230
mname = model.replace("openai/", "")
209-
lm_impl = OpenAIChatLM(
231+
lm_impl = dsp.GPT3(
210232
model=mname,
211233
api_key=config.lm.api_key or os.getenv("OPENAI_API_KEY"),
212-
temperature=config.lm.temperature,
213-
max_tokens=config.lm.max_tokens,
214234
api_base=os.getenv("OPENAI_BASE_URL"),
235+
temperature=temperature,
236+
max_tokens=max_tokens,
215237
)
216-
elif model.startswith("ollama_chat/") or model.startswith("ollama/"):
238+
dspy.configure(lm=lm_impl)
239+
logger.info(f"Configured LM via dsp.GPT3: {model}")
240+
return config
241+
except Exception as e:
242+
logger.warning(f"dsp.GPT3 fallback unavailable: {e}")
243+
244+
# 3) Ollama fallback via minimal adapter
245+
try:
246+
if model.startswith("ollama_chat/") or model.startswith("ollama/"):
247+
from .lm_providers import OllamaLM
248+
217249
mname = model.replace("ollama_chat/", "").replace("ollama/", "")
218250
lm_impl = OllamaLM(
219251
model=mname,
220252
base_url=config.lm.api_base or os.getenv("OLLAMA_API_BASE", "http://localhost:11434"),
221-
temperature=config.lm.temperature,
222-
max_tokens=config.lm.max_tokens,
253+
temperature=temperature,
254+
max_tokens=max_tokens,
223255
)
224-
else:
225-
raise ValueError(f"Unsupported model provider for '{model}'. Use openai/ or ollama_chat/.")
226-
227-
dspy.configure(lm=lm_impl)
228-
logger.info(f"Configured LM: {config.lm.model}")
229-
return config
230-
256+
dspy.configure(lm=lm_impl)
257+
logger.info(f"Configured LM via Ollama adapter: {model}")
258+
return config
231259
except Exception as e:
232-
logger.error(f"Failed to configure LM: {e}")
233-
logger.info("Continuing without LM configuration for testing")
234-
return config
260+
logger.warning(f"Ollama adapter fallback unavailable: {e}")
261+
262+
# Final: no LM configured, allow non-LLM commands to work
263+
logger.error("No usable LM configuration found. Proceeding without LM.")
264+
return config
235265

236266

237267
def reload_config():

0 commit comments

Comments
 (0)