@deprecated("Please use `FunctionCallingProgram` instead.")classOpenAIPydanticProgram(BaseLLMFunctionProgram[LLM]):""" An OpenAI-based function that returns a pydantic model. Note: this interface is not yet stable. """def__init__(self,output_cls:Type[Model],llm:LLM,prompt:BasePromptTemplate,tool_choice:Union[str,Dict[str,Any]],allow_multiple:bool=False,verbose:bool=False,)->None:"""Init params."""self._output_cls=output_clsself._llm=llmself._prompt=promptself._verbose=verboseself._allow_multiple=allow_multipleself._tool_choice=tool_choice@classmethoddeffrom_defaults(cls,output_cls:Type[Model],prompt_template_str:Optional[str]=None,prompt:Optional[PromptTemplate]=None,llm:Optional[LLM]=None,verbose:bool=False,allow_multiple:bool=False,tool_choice:Optional[Union[str,Dict[str,Any]]]=None,**kwargs:Any,)->"OpenAIPydanticProgram":llm=llmorSettings.llmifnotisinstance(llm,OpenAI):raiseValueError("OpenAIPydanticProgram only supports OpenAI LLMs. "f"Got: {type(llm)}")ifnotllm.metadata.is_function_calling_model:raiseValueError(f"Model name {llm.metadata.model_name} does not support ""function calling API. ")ifpromptisNoneandprompt_template_strisNone:raiseValueError("Must provide either prompt or prompt_template_str.")ifpromptisnotNoneandprompt_template_strisnotNone:raiseValueError("Must provide either prompt or prompt_template_str.")ifprompt_template_strisnotNone:prompt=PromptTemplate(prompt_template_str)tool_choice=tool_choiceor_default_tool_choice(output_cls,allow_multiple)returncls(output_cls=output_cls,llm=llm,prompt=cast(PromptTemplate,prompt),tool_choice=tool_choice,allow_multiple=allow_multiple,verbose=verbose,)@propertydefoutput_cls(self)->Type[Model]:returnself._output_cls@propertydefprompt(self)->BasePromptTemplate:returnself._prompt@prompt.setterdefprompt(self,prompt:BasePromptTemplate)->None:self._prompt=promptdef__call__(self,llm_kwargs:Optional[Dict[str,Any]]=None,*args:Any,**kwargs:Any,)->Union[Model,List[Model]]:llm_kwargs=llm_kwargsor{}description=self._description_eval(**kwargs)openai_fn_spec=to_openai_tool(self._output_cls,description=description)messages=self._prompt.format_messages(llm=self._llm,**kwargs)if"tool_choice"notinllm_kwargs:llm_kwargs["tool_choice"]=self._tool_choicechat_response=self._llm.chat(messages=messages,tools=[openai_fn_spec],**llm_kwargs,)message=chat_response.messageif"tool_calls"notinmessage.additional_kwargs:raiseValueError("Expected tool_calls in ai_message.additional_kwargs, ""but none found.")tool_calls=message.additional_kwargs["tool_calls"]return_parse_tool_calls(tool_calls,output_cls=self.output_cls,allow_multiple=self._allow_multiple,verbose=self._verbose,)asyncdefacall(self,llm_kwargs:Optional[Dict[str,Any]]=None,*args:Any,**kwargs:Any,)->Union[Model,List[Model]]:llm_kwargs=llm_kwargsor{}description=self._description_eval(**kwargs)openai_fn_spec=to_openai_tool(self._output_cls,description=description)messages=self._prompt.format_messages(llm=self._llm,**kwargs)if"tool_choice"notinllm_kwargs:llm_kwargs["tool_choice"]=self._tool_choicechat_response=awaitself._llm.achat(messages=messages,tools=[openai_fn_spec],**llm_kwargs,)message=chat_response.messageif"tool_calls"notinmessage.additional_kwargs:raiseValueError("Expected function call in ai_message.additional_kwargs, ""but none found.")tool_calls=message.additional_kwargs["tool_calls"]return_parse_tool_calls(tool_calls,output_cls=self.output_cls,allow_multiple=self._allow_multiple,verbose=self._verbose,)defstream_list(self,llm_kwargs:Optional[Dict[str,Any]]=None,*args:Any,**kwargs:Any,)->Generator[Model,None,None]:"""Streams a list of objects."""llm_kwargs=llm_kwargsor{}messages=self._prompt.format_messages(llm=self._llm,**kwargs)description=self._description_eval(**kwargs)list_output_cls=create_list_model(self._output_cls)openai_fn_spec=to_openai_tool(list_output_cls,description=description)if"tool_choice"notinllm_kwargs:llm_kwargs["tool_choice"]=_default_tool_choice(list_output_cls)chat_response_gen=self._llm.stream_chat(messages=messages,tools=[openai_fn_spec],**llm_kwargs,)# extract function call arguments# obj_start_idx finds start position (before a new "{" in JSON)obj_start_idx:int=-1# NOTE: uninitializedforstream_respinchat_response_gen:kwargs=stream_resp.message.additional_kwargstool_calls=kwargs["tool_calls"]iflen(tool_calls)==0:continue# NOTE: right now assume only one tool call# TODO: handle parallel tool calls in streaming settingfn_args=kwargs["tool_calls"][0].function.arguments# this is inspired by `get_object` from `MultiTaskBase` in# the openai_function_call repoiffn_args.find("[")!=-1:ifobj_start_idx==-1:obj_start_idx=fn_args.find("[")+1else:# keep going until we find the start positioncontinuenew_obj_json_str,obj_start_idx=_get_json_str(fn_args,obj_start_idx)ifnew_obj_json_strisnotNone:obj_json_str=new_obj_json_strobj=self._output_cls.parse_raw(obj_json_str)ifself._verbose:print(f"Extracted object: {obj.json()}")yieldobjdefstream_partial_objects(self,llm_kwargs:Optional[Dict[str,Any]]=None,*args:Any,**kwargs:Any,)->Generator[Model,None,None]:"""Streams the intermediate partial object."""llm_kwargs=llm_kwargsor{}messages=self._prompt.format_messages(llm=self._llm,**kwargs)description=self._description_eval(**kwargs)openai_fn_spec=to_openai_tool(self._output_cls,description=description)if"tool_choice"notinllm_kwargs:llm_kwargs["tool_choice"]=_default_tool_choice(self._output_cls)chat_response_gen=self._llm.stream_chat(messages=messages,tools=[openai_fn_spec],**llm_kwargs,)forpartial_respinchat_response_gen:kwargs=partial_resp.message.additional_kwargstool_calls=kwargs["tool_calls"]iflen(tool_calls)==0:continuefn_args=kwargs["tool_calls"][0].function.argumentstry:partial_object=parse_partial_json(fn_args)yieldself._output_cls.parse_obj(partial_object)except(ValidationError,ValueError):continuedef_description_eval(self,**kwargs:Any)->Optional[str]:description=kwargs.get("description",None)## __doc__ checks if docstring is provided in the Pydantic Modelifnot(self._output_cls.__doc__ordescription):raiseValueError("Must provide description for your Pydantic Model. Either provide a docstring or add `description=<your_description>` to the method. Required to convert Pydantic Model to OpenAI Function.")## If both docstring and description are provided, raise errorifself._output_cls.__doc__anddescription:raiseValueError("Must provide either a docstring or a description, not both.")returndescription
defstream_list(self,llm_kwargs:Optional[Dict[str,Any]]=None,*args:Any,**kwargs:Any,)->Generator[Model,None,None]:"""Streams a list of objects."""llm_kwargs=llm_kwargsor{}messages=self._prompt.format_messages(llm=self._llm,**kwargs)description=self._description_eval(**kwargs)list_output_cls=create_list_model(self._output_cls)openai_fn_spec=to_openai_tool(list_output_cls,description=description)if"tool_choice"notinllm_kwargs:llm_kwargs["tool_choice"]=_default_tool_choice(list_output_cls)chat_response_gen=self._llm.stream_chat(messages=messages,tools=[openai_fn_spec],**llm_kwargs,)# extract function call arguments# obj_start_idx finds start position (before a new "{" in JSON)obj_start_idx:int=-1# NOTE: uninitializedforstream_respinchat_response_gen:kwargs=stream_resp.message.additional_kwargstool_calls=kwargs["tool_calls"]iflen(tool_calls)==0:continue# NOTE: right now assume only one tool call# TODO: handle parallel tool calls in streaming settingfn_args=kwargs["tool_calls"][0].function.arguments# this is inspired by `get_object` from `MultiTaskBase` in# the openai_function_call repoiffn_args.find("[")!=-1:ifobj_start_idx==-1:obj_start_idx=fn_args.find("[")+1else:# keep going until we find the start positioncontinuenew_obj_json_str,obj_start_idx=_get_json_str(fn_args,obj_start_idx)ifnew_obj_json_strisnotNone:obj_json_str=new_obj_json_strobj=self._output_cls.parse_raw(obj_json_str)ifself._verbose:print(f"Extracted object: {obj.json()}")yieldobj
defstream_partial_objects(self,llm_kwargs:Optional[Dict[str,Any]]=None,*args:Any,**kwargs:Any,)->Generator[Model,None,None]:"""Streams the intermediate partial object."""llm_kwargs=llm_kwargsor{}messages=self._prompt.format_messages(llm=self._llm,**kwargs)description=self._description_eval(**kwargs)openai_fn_spec=to_openai_tool(self._output_cls,description=description)if"tool_choice"notinllm_kwargs:llm_kwargs["tool_choice"]=_default_tool_choice(self._output_cls)chat_response_gen=self._llm.stream_chat(messages=messages,tools=[openai_fn_spec],**llm_kwargs,)forpartial_respinchat_response_gen:kwargs=partial_resp.message.additional_kwargstool_calls=kwargs["tool_calls"]iflen(tool_calls)==0:continuefn_args=kwargs["tool_calls"][0].function.argumentstry:partial_object=parse_partial_json(fn_args)yieldself._output_cls.parse_obj(partial_object)except(ValidationError,ValueError):continue