fromsentimatriximportSentimatrixfromsentimatrix.configimportSentimatrixConfig,LLMConfigconfig=SentimatrixConfig(llm=LLMConfig(provider="lmstudio",model="local-model",# Model loaded in LM Studioapi_base="http://localhost:1234/v1"))asyncwithSentimatrix(config)assm:summary=awaitsm.summarize_reviews(reviews)
LLMConfig(provider="lmstudio",model="local-model",api_base="http://localhost:1234/v1",# LM Studio servertemperature=0.7,max_tokens=4096,timeout=60,# Local inference may be slower)
# Perfect for sensitive data that can't leave your networkconfig=SentimatrixConfig(llm=LLMConfig(provider="lmstudio",api_base="http://localhost:1234/v1"))asyncwithSentimatrix(config)assm:# All processing happens locallysensitive_reviews=load_private_data()results=awaitsm.analyze_batch(sensitive_reviews)