classLLMTextCompletionProgram(BasePydanticProgram[Model]):""" LLM Text Completion Program. Uses generic LLM text completion + an output parser to generate a structured output. """def__init__(self,output_parser:BaseOutputParser,output_cls:Type[Model],prompt:BasePromptTemplate,llm:LLM,verbose:bool=False,)->None:self._output_parser=output_parserself._output_cls=output_clsself._llm=llmself._prompt=promptself._verbose=verboseself._prompt.output_parser=output_parser@classmethoddeffrom_defaults(cls,output_parser:Optional[BaseOutputParser]=None,output_cls:Optional[Type[Model]]=None,prompt_template_str:Optional[str]=None,prompt:Optional[BasePromptTemplate]=None,llm:Optional[LLM]=None,verbose:bool=False,**kwargs:Any,)->"LLMTextCompletionProgram[Model]":llm=llmorSettings.llmifpromptisNoneandprompt_template_strisNone:raiseValueError("Must provide either prompt or prompt_template_str.")ifpromptisnotNoneandprompt_template_strisnotNone:raiseValueError("Must provide either prompt or prompt_template_str.")ifprompt_template_strisnotNone:prompt=PromptTemplate(prompt_template_str)# decide default output class if not setifoutput_clsisNone:ifnotisinstance(output_parser,PydanticOutputParser):raiseValueError("Output parser must be PydanticOutputParser.")output_cls=output_parser.output_clselse:ifoutput_parserisNone:output_parser=PydanticOutputParser(output_cls=output_cls)returncls(output_parser,output_cls,prompt=cast(PromptTemplate,prompt),llm=llm,verbose=verbose,)@propertydefoutput_cls(self)->Type[Model]:returnself._output_cls@propertydefprompt(self)->BasePromptTemplate:returnself._prompt@prompt.setterdefprompt(self,prompt:BasePromptTemplate)->None:self._prompt=promptdef__call__(self,llm_kwargs:Optional[Dict[str,Any]]=None,*args:Any,**kwargs:Any,)->Model:llm_kwargs=llm_kwargsor{}ifself._llm.metadata.is_chat_model:messages=self._prompt.format_messages(llm=self._llm,**kwargs)messages=self._llm._extend_messages(messages)chat_response=self._llm.chat(messages,**llm_kwargs)raw_output=chat_response.message.contentor""else:formatted_prompt=self._prompt.format(llm=self._llm,**kwargs)response=self._llm.complete(formatted_prompt,**llm_kwargs)raw_output=response.textoutput=self._output_parser.parse(raw_output)ifnotisinstance(output,self._output_cls):raiseValueError(f"Output parser returned {type(output)} but expected {self._output_cls}")returnoutputasyncdefacall(self,llm_kwargs:Optional[Dict[str,Any]]=None,*args:Any,**kwargs:Any,)->Model:llm_kwargs=llm_kwargsor{}ifself._llm.metadata.is_chat_model:messages=self._prompt.format_messages(llm=self._llm,**kwargs)messages=self._llm._extend_messages(messages)chat_response=awaitself._llm.achat(messages,**llm_kwargs)raw_output=chat_response.message.contentor""else:formatted_prompt=self._prompt.format(llm=self._llm,**kwargs)response=awaitself._llm.acomplete(formatted_prompt,**llm_kwargs)raw_output=response.textoutput=self._output_parser.parse(raw_output)ifnotisinstance(output,self._output_cls):raiseValueError(f"Output parser returned {type(output)} but expected {self._output_cls}")returnoutput