-
-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathaction.yml
More file actions
73 lines (69 loc) · 2.66 KB
/
action.yml
File metadata and controls
73 lines (69 loc) · 2.66 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
name: 'LLM Action'
description: 'GitHub Action to interact with OpenAI Compatible LLM services'
author: 'appleboy'
branding:
icon: 'message-square'
color: 'blue'
inputs:
base_url:
description: 'Base URL for OpenAI Compatible API endpoint'
required: false
default: 'https://api.openai.com/v1'
api_key:
description: 'API Key for authentication'
required: true
model:
description: 'Model name to use'
required: false
default: 'gpt-4o'
skip_ssl_verify:
description: 'Skip SSL certificate verification'
required: false
default: 'false'
system_prompt:
description: 'System prompt to set the context. Supports plain text, file path, or URL (http://, https://). For files, use absolute/relative path or file:// prefix. Supports Go templates with environment variables (e.g., {{.GITHUB_REPOSITORY}}, {{.MODEL}}).'
required: false
default: ''
input_prompt:
description: 'User input prompt for the LLM. Supports plain text, file path, or URL (http://, https://). For files, use absolute/relative path or file:// prefix. Supports Go templates with environment variables (e.g., {{.GITHUB_REPOSITORY}}, {{.MODEL}}).'
required: true
temperature:
description: 'Temperature for response randomness (0.0-2.0)'
required: false
default: '0.7'
max_tokens:
description: 'Maximum tokens in the response'
required: false
default: '1000'
tool_schema:
description: 'JSON schema for structured output via function calling. Supports plain text, file path, or URL. Supports Go templates with environment variables (e.g., {{.GITHUB_REPOSITORY}}).'
required: false
default: ''
debug:
description: 'Enable debug mode to print all parameters'
required: false
default: 'false'
headers:
description: 'Custom HTTP headers to include in API requests. Format: "Header1:Value1,Header2:Value2" or multiline with one header per line. Useful for log analysis or custom authentication.'
required: false
default: ''
outputs:
response:
description: 'The response from the LLM'
prompt_tokens:
description: 'Number of tokens in the prompt'
completion_tokens:
description: 'Number of tokens in the completion'
total_tokens:
description: 'Total number of tokens used'
prompt_cached_tokens:
description: 'Number of cached tokens in the prompt (cost saving)'
completion_reasoning_tokens:
description: 'Number of reasoning tokens (for o1/o3 models)'
completion_accepted_prediction_tokens:
description: 'Number of accepted prediction tokens'
completion_rejected_prediction_tokens:
description: 'Number of rejected prediction tokens'
runs:
using: 'docker'
image: 'Dockerfile'