@@ -56,7 +56,7 @@ client.prompts.log(
5656 messages = [{" role" : " user" , " content" : " What really happened at Roswell?" }],
5757 inputs = {" person" : " Trump" },
5858 created_at = datetime.datetime.fromisoformat(
59- " 2024-07-19 00 :29:35.178000+00:00" ,
59+ " 2024-07-18 23 :29:35.178000+00:00" ,
6060 ),
6161 provider_latency = 6.5931549072265625 ,
6262 output_message = {
@@ -643,64 +643,12 @@ in the case where you are storing or deriving your Prompt details in code.
643643<dd >
644644
645645``` python
646- import datetime
647-
648646from humanloop import Humanloop
649647
650648client = Humanloop(
651649 api_key = " YOUR_API_KEY" ,
652650)
653- response = client.prompts.call_stream(
654- version_id = " string" ,
655- environment = " string" ,
656- path = " string" ,
657- id = " string" ,
658- messages = [
659- {
660- " content" : " string" ,
661- " name" : " string" ,
662- " tool_call_id" : " string" ,
663- " role" : " user" ,
664- " tool_calls" : [
665- {
666- " id" : " string" ,
667- " type" : " function" ,
668- " function" : {" name" : " string" },
669- }
670- ],
671- }
672- ],
673- prompt = {" model" : " string" },
674- inputs = {" string" : {" key" : " value" }},
675- source = " string" ,
676- metadata = {" string" : {" key" : " value" }},
677- start_time = datetime.datetime.fromisoformat(
678- " 2024-01-15 09:30:00+00:00" ,
679- ),
680- end_time = datetime.datetime.fromisoformat(
681- " 2024-01-15 09:30:00+00:00" ,
682- ),
683- source_datapoint_id = " string" ,
684- trace_parent_id = " string" ,
685- user = " string" ,
686- prompts_call_stream_request_environment = " string" ,
687- save = True ,
688- log_id = " string" ,
689- provider_api_keys = {
690- " openai" : " string" ,
691- " ai_21" : " string" ,
692- " mock" : " string" ,
693- " anthropic" : " string" ,
694- " bedrock" : " string" ,
695- " cohere" : " string" ,
696- " openai_azure" : " string" ,
697- " openai_azure_endpoint" : " string" ,
698- },
699- num_samples = 1 ,
700- return_inputs = True ,
701- logprobs = 1 ,
702- suffix = " string" ,
703- )
651+ response = client.prompts.call_stream()
704652for chunk in response:
705653 yield chunk
706654
@@ -1528,6 +1476,30 @@ Input variables should be specified with double curly bracket syntax: `{{input_n
15281476<dl >
15291477<dd >
15301478
1479+ ** description:** ` typing.Optional[str] ` — Description of the Prompt.
1480+
1481+ </dd >
1482+ </dl >
1483+
1484+ <dl >
1485+ <dd >
1486+
1487+ ** tags:** ` typing.Optional[typing.Sequence[str]] ` — List of tags associated with this prompt.
1488+
1489+ </dd >
1490+ </dl >
1491+
1492+ <dl >
1493+ <dd >
1494+
1495+ ** readme:** ` typing.Optional[str] ` — Long description of the Prompt.
1496+
1497+ </dd >
1498+ </dl >
1499+
1500+ <dl >
1501+ <dd >
1502+
15311503** request_options:** ` typing.Optional[RequestOptions] ` — Request-specific configuration.
15321504
15331505</dd >
@@ -4059,7 +4031,7 @@ By default, the new Dataset version will be set to the list of Datapoints provid
40594031the request. You can also create a new version by adding or removing Datapoints from an existing version
40604032by specifying ` action ` as ` add ` or ` remove ` respectively. In this case, you may specify
40614033the ` version_id ` or ` environment ` query parameters to identify the existing version to base
4062- the new version on. If neither is provided, the default deployed version will be used.
4034+ the new version on. If neither is provided, the latest created version will be used.
40634035
40644036If you provide a commit message, then the new version will be committed;
40654037otherwise it will be uncommitted. If you try to commit an already committed version,
@@ -5436,6 +5408,14 @@ client.evaluators.log(
54365408<dl >
54375409<dd >
54385410
5411+ ** output_message:** ` typing.Optional[ChatMessageParams] ` — The message returned by the LLM. Only populated for LLM Evaluator Logs.
5412+
5413+ </dd >
5414+ </dl >
5415+
5416+ <dl >
5417+ <dd >
5418+
54395419** judgment:** ` typing.Optional[CreateEvaluatorLogRequestJudgmentParams] ` — Evaluator assessment of the Log.
54405420
54415421</dd >
@@ -6592,10 +6572,10 @@ client.flows.log(
65926572 output = " The patient is likely experiencing a myocardial infarction. Immediate medical attention is required." ,
65936573 trace_status = " incomplete" ,
65946574 start_time = datetime.datetime.fromisoformat(
6595- " 2024-07-08 22 :40:35+00:00" ,
6575+ " 2024-07-08 21 :40:35+00:00" ,
65966576 ),
65976577 end_time = datetime.datetime.fromisoformat(
6598- " 2024-07-08 22 :40:39+00:00" ,
6578+ " 2024-07-08 21 :40:39+00:00" ,
65996579 ),
66006580)
66016581
@@ -8410,7 +8390,7 @@ client.directories.update(
84108390</details >
84118391
84128392## Files
8413- <details ><summary ><code >client.files.<a href =" src/humanloop/files/client.py " >list </a >(...)</code ></summary >
8393+ <details ><summary ><code >client.files.<a href =" src/humanloop/files/client.py " >list_files </a >(...)</code ></summary >
84148394<dl >
84158395<dd >
84168396
@@ -8442,7 +8422,7 @@ from humanloop import Humanloop
84428422client = Humanloop(
84438423 api_key = " YOUR_API_KEY" ,
84448424)
8445- client.files.list ()
8425+ client.files.list_files ()
84468426
84478427```
84488428</dd >
@@ -8482,6 +8462,14 @@ client.files.list()
84828462<dl >
84838463<dd >
84848464
8465+ ** template:** ` typing.Optional[bool] ` — Filter to include only template files.
8466+
8467+ </dd >
8468+ </dl >
8469+
8470+ <dl >
8471+ <dd >
8472+
84858473** type:** ` typing.Optional[typing.Union[FileType, typing.Sequence[FileType]]] ` — List of file types to filter for.
84868474
84878475</dd >
@@ -8522,6 +8510,84 @@ client.files.list()
85228510</dl >
85238511
85248512
8513+ </dd >
8514+ </dl >
8515+ </details >
8516+
8517+ <details ><summary ><code >client.files.<a href =" src/humanloop/files/client.py " >retrieve_by_path</a >(...)</code ></summary >
8518+ <dl >
8519+ <dd >
8520+
8521+ #### 📝 Description
8522+
8523+ <dl >
8524+ <dd >
8525+
8526+ <dl >
8527+ <dd >
8528+
8529+ Retrieve a File by path.
8530+ </dd >
8531+ </dl >
8532+ </dd >
8533+ </dl >
8534+
8535+ #### 🔌 Usage
8536+
8537+ <dl >
8538+ <dd >
8539+
8540+ <dl >
8541+ <dd >
8542+
8543+ ``` python
8544+ from humanloop import Humanloop
8545+
8546+ client = Humanloop(
8547+ api_key = " YOUR_API_KEY" ,
8548+ )
8549+ client.files.retrieve_by_path(
8550+ path = " path" ,
8551+ )
8552+
8553+ ```
8554+ </dd >
8555+ </dl >
8556+ </dd >
8557+ </dl >
8558+
8559+ #### ⚙️ Parameters
8560+
8561+ <dl >
8562+ <dd >
8563+
8564+ <dl >
8565+ <dd >
8566+
8567+ ** path:** ` str ` — Path of the File to retrieve.
8568+
8569+ </dd >
8570+ </dl >
8571+
8572+ <dl >
8573+ <dd >
8574+
8575+ ** environment:** ` typing.Optional[str] ` — Name of the Environment to retrieve a deployed Version from.
8576+
8577+ </dd >
8578+ </dl >
8579+
8580+ <dl >
8581+ <dd >
8582+
8583+ ** request_options:** ` typing.Optional[RequestOptions] ` — Request-specific configuration.
8584+
8585+ </dd >
8586+ </dl >
8587+ </dd >
8588+ </dl >
8589+
8590+
85258591</dd >
85268592</dl >
85278593</details >
@@ -9812,6 +9878,14 @@ for page in response.iter_pages():
98129878<dl >
98139879<dd >
98149880
9881+ ** id:** ` typing.Optional[typing.Union[str, typing.Sequence[str]]] ` — If provided, returns Logs whose IDs contain any of the specified values as substrings.
9882+
9883+ </dd >
9884+ </dl >
9885+
9886+ <dl >
9887+ <dd >
9888+
98159889** search:** ` typing.Optional[str] ` — If provided, only Logs that contain the provided string in its inputs and output will be returned.
98169890
98179891</dd >
@@ -9912,9 +9986,7 @@ from humanloop import Humanloop
99129986client = Humanloop(
99139987 api_key = " YOUR_API_KEY" ,
99149988)
9915- client.logs.delete(
9916- id = " string" ,
9917- )
9989+ client.logs.delete()
99189990
99199991```
99209992</dd >
0 commit comments