Skip to content

Core Functions Reference

Runtime

Core runtime engine for LLM tool calling.

ToolRuntime

Runtime engine for LLM tool calling.

Supports both: - Custom callable: def my_llm(system_prompt: str, user_prompt: str) -> str - LangChain models: Any BaseChatModel instance

Example

from langchain_google_genai import ChatGoogleGenerativeAI llm = ChatGoogleGenerativeAI(model="gemini-1.5-flash") runtime = ToolRuntime(llm)

@runtime.tool def add(a: int, b: int) -> int: ... return a + b

runtime.run("What is 5 + 3?") 'The result is 8.'

Source code in llm_tool_runtime/runtime.py
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
class ToolRuntime:
    """
    Runtime engine for LLM tool calling.

    Supports both:
    - Custom callable: def my_llm(system_prompt: str, user_prompt: str) -> str
    - LangChain models: Any BaseChatModel instance

    Example:
        >>> from langchain_google_genai import ChatGoogleGenerativeAI
        >>> llm = ChatGoogleGenerativeAI(model="gemini-1.5-flash")
        >>> runtime = ToolRuntime(llm)
        >>> 
        >>> @runtime.tool
        >>> def add(a: int, b: int) -> int:
        ...     return a + b
        >>> 
        >>> runtime.run("What is 5 + 3?")
        'The result is 8.'
    """

    def __init__(
        self, 
        llm: Union[Callable[[str, str], str], Any],
        max_steps: int = 5,
        max_retries: Optional[int] = None,
        verbose: bool = False
    ):
        """
        Initialize the tool runtime.

        Args:
            llm: Either a callable (system, user) -> str, or a LangChain model
            max_steps: Maximum number of steps (tool calls) in a chain. Defaults to 5.
            max_retries: Legacy parameter, alias for max_steps.
            verbose: If True, print debug information

        Raises:
            ValueError: If llm is None or invalid
        """
        if llm is None:
            raise ValueError("LLM cannot be None. Provide a LangChain model or callable.")

        if not callable(llm) and not self._check_langchain_model(llm):
            raise ValueError(
                "LLM must be either a callable(system, user) -> str or a LangChain BaseChatModel"
            )

        self.llm = llm
        self.registry = ToolRegistry()
        # Use max_retries if provided (backward compatibility), else max_steps
        self.max_steps = max(1, max_retries if max_retries is not None else max_steps)
        self.verbose = verbose
        self._is_langchain = self._check_langchain_model(llm)
        self._use_combined_prompt = False  # Track if we need to skip system messages

    def _check_langchain_model(self, llm: Any) -> bool:
        """Check if the provided LLM is a LangChain model."""
        if not LANGCHAIN_AVAILABLE:
            return False
        if BaseChatModel is None:
            return False
        return isinstance(llm, BaseChatModel)

    def _handle_api_error(self, error: Exception) -> None:
        """Convert common API errors to our custom exceptions."""
        error_str = str(error).lower()

        # Check for API key errors
        if any(phrase in error_str for phrase in [
            'api key', 'invalid key', 'unauthorized', 'authentication', 
            'api_key', 'invalid_api_key', '401', 'forbidden'
        ]):
            raise InvalidAPIKeyError() from error

        # Check for rate limit errors
        if any(phrase in error_str for phrase in [
            'rate limit', 'rate_limit', 'too many requests', '429', 
            'quota exceeded', 'quota_exceeded'
        ]):
            raise RateLimitError() from error

        # Check for connection errors
        if any(phrase in error_str for phrase in [
            'connection', 'timeout', 'network', 'unreachable', 
            'dns', 'ssl', 'certificate'
        ]):
            raise LLMConnectionError(f"Failed to connect to LLM: {error}", error) from error

    def _call_llm(self, system_prompt: str, user_prompt: str) -> str:
        """
        Call the LLM with system and user prompts.

        Handles models that don't support system instructions by automatically
        falling back to combined prompts.
        """
        if self._is_langchain:
            try:
                # If we know this model doesn't support system messages, skip trying
                if self._use_combined_prompt:
                    combined_prompt = f"{system_prompt}\n\n---\n\nUser: {user_prompt}"
                    messages = [HumanMessage(content=combined_prompt)]
                    response = self.llm.invoke(messages)
                    return response.content

                # Try with system message first
                messages = [
                    SystemMessage(content=system_prompt),
                    HumanMessage(content=user_prompt)
                ]
                response = self.llm.invoke(messages)
                return response.content

            except Exception as e:
                error_str = str(e)

                # If system message not supported, switch to combined prompt mode
                if "Developer instruction is not enabled" in error_str or \
                   "system" in error_str.lower() and "not supported" in error_str.lower():
                    if self.verbose:
                        print("System instructions not supported, using combined prompt...")
                    self._use_combined_prompt = True
                    combined_prompt = f"{system_prompt}\n\n---\n\nUser: {user_prompt}"
                    messages = [HumanMessage(content=combined_prompt)]
                    response = self.llm.invoke(messages)
                    return response.content

                # Handle other API errors
                self._handle_api_error(e)
                raise LLMConnectionError(f"LLM call failed: {e}", e) from e
        else:
            # Custom callable
            try:
                result = self.llm(system_prompt, user_prompt)
                if result is None:
                    raise ValueError("LLM callable returned None")
                return str(result)
            except Exception as e:
                self._handle_api_error(e)
                raise LLMConnectionError(f"Custom LLM call failed: {e}", e) from e

    def tool(self, fn: Optional[Callable] = None, *, description: Optional[str] = None):
        """
        Decorator to register a function as a tool.

        Usage:
            @runtime.tool
            def my_tool(arg1: str) -> str:
                return "result"

            @runtime.tool(description="Custom description")
            def another_tool(x: int) -> int:
                return x * 2
        """
        return self.registry.register(fn, description=description)

    def run(self, user_prompt: str) -> str:
        """
        Run the tool calling loop for a user prompt.

        Args:
            user_prompt: The user's input/question

        Returns:
            The final LLM response after any tool calls

        Raises:
            ValueError: If user_prompt is empty
            MaxRetriesExceededError: If tool calling fails after max retries
            InvalidAPIKeyError: If API key is invalid or missing
            RateLimitError: If API rate limit is exceeded
            LLMConnectionError: If connection to LLM fails
        """
        # Validate input
        if not user_prompt or not user_prompt.strip():
            raise ValueError("User prompt cannot be empty")

        # Check if any tools are registered
        if not self.registry.tools:
            if self.verbose:
                print("Warning: No tools registered. LLM will respond without tool access.")

        system_prompt = build_system_prompt(self.registry.tools)
        # We start with the user prompt
        current_conversation = f"User: {user_prompt.strip()}"
        last_error = None

        for step in range(self.max_steps):
            if self.verbose:
                print(f"\n[Step {step + 1}/{self.max_steps}]")
                # print(f"Context length: {len(current_conversation)} chars")

            try:
                # For models that need system instructions, we pass them separately
                # For our internal history, we just append to the string
                output = self._call_llm(system_prompt, current_conversation)
            except (InvalidAPIKeyError, RateLimitError, LLMConnectionError):
                raise
            except Exception as e:
                last_error = str(e)
                if self.verbose:
                    print(f"LLM call error: {e}")
                if step == self.max_steps - 1:
                    raise LLMConnectionError(f"LLM call failed after {self.max_steps} steps: {e}", e)
                continue

            if self.verbose:
                print(f"LLM output: {output[:200]}...")

            call = parse_tool_call(output)

            if not call:
                # No tool call means the LLM is done and giving a final answer
                if self.verbose:
                    print("No tool call detected, returning response")
                return output

            # We found a tool call!
            tool_name = call["name"]
            tool_args = call["arguments"]

            if self.verbose:
                print(f"Tool call: {tool_name}({tool_args})")

            # Append LLM's thought/tool call to conversation context
            # (Note: In a more advanced implementation, we'd distinguish between
            # thought trace and exact tool call syntax, but for text-only 
            # runtime, we just append the output)
            current_conversation += f"\n\nAssistant: {output}"

            try:
                # Execute the tool
                tool = self.registry.get(tool_name)
                result = tool.call(tool_args)

                if self.verbose:
                    print(f"Tool result: {result}")

                # Append result to conversation
                current_conversation += f"\n\nTool '{tool_name}' result:\n{result}"

                # Now loop back to let LLM see the result and decide next step
                continue

            except ToolNotFoundError as e:
                if self.verbose:
                    print(f"Tool not found: {e}")
                available = self.registry.list_tools()
                error_msg = (
                    f"Error: Tool '{tool_name}' does not exist. "
                    f"Available tools: {', '.join(available) if available else 'none'}."
                )
                current_conversation += f"\n\nSystem: {error_msg}"
                last_error = str(e)

            except ToolRuntimeError as e:
                if self.verbose:
                    print(f"Tool error: {e}")
                current_conversation += f"\n\nSystem: Error calling tool '{tool_name}': {e}"
                last_error = str(e)

            except Exception as e:
                if self.verbose:
                    print(f"Unexpected tool error: {e}")
                current_conversation += f"\n\nSystem: Unexpected error with tool '{tool_name}': {e}"
                last_error = str(e)

        # If we exit the loop, we ran out of steps
        raise MaxRetriesExceededError(self.max_steps, last_error)

    def run_safe(self, user_prompt: str, default: str = "I encountered an error processing your request.") -> str:
        """
        Run the tool calling loop with automatic error handling.

        This method never raises exceptions - it returns a default message on error.
        Useful for production environments where you want graceful degradation.

        Args:
            user_prompt: The user's input/question
            default: Default message to return on error

        Returns:
            The LLM response or the default message on error
        """
        try:
            return self.run(user_prompt)
        except InvalidAPIKeyError:
            return "Service configuration error. Please contact support."
        except RateLimitError:
            return "Service is temporarily busy. Please try again in a moment."
        except LLMConnectionError:
            return "Unable to connect to the AI service. Please check your connection."
        except MaxRetriesExceededError:
            return "Unable to complete the request. Please try rephrasing your question."
        except Exception as e:
            if self.verbose:
                print(f"Unexpected error in run_safe: {e}")
            return default

    def run_with_history(self, user_prompt: str, history: list = None) -> tuple[str, list]:
        """
        Run with conversation history support.

        Args:
            user_prompt: The user's input
            history: List of previous (user, assistant) message tuples

        Returns:
            Tuple of (response, updated_history)
        """
        history = history or []

        # Validate history format
        for item in history:
            if not isinstance(item, (list, tuple)) or len(item) != 2:
                raise ValueError("History must be a list of (user_message, assistant_message) tuples")

        # Build context from history
        context_parts = []
        for user_msg, assistant_msg in history[-5:]:  # Last 5 exchanges
            context_parts.append(f"User: {user_msg}")
            context_parts.append(f"Assistant: {assistant_msg}")

        if context_parts:
            full_prompt = "\n".join(context_parts) + f"\n\nUser: {user_prompt}"
        else:
            full_prompt = user_prompt

        response = self.run(full_prompt)
        history.append((user_prompt, response))

        return response, history

__init__(llm, max_steps=5, max_retries=None, verbose=False)

Initialize the tool runtime.

Parameters:

Name Type Description Default
llm Union[Callable[[str, str], str], Any]

Either a callable (system, user) -> str, or a LangChain model

required
max_steps int

Maximum number of steps (tool calls) in a chain. Defaults to 5.

5
max_retries Optional[int]

Legacy parameter, alias for max_steps.

None
verbose bool

If True, print debug information

False

Raises:

Type Description
ValueError

If llm is None or invalid

Source code in llm_tool_runtime/runtime.py
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
def __init__(
    self, 
    llm: Union[Callable[[str, str], str], Any],
    max_steps: int = 5,
    max_retries: Optional[int] = None,
    verbose: bool = False
):
    """
    Initialize the tool runtime.

    Args:
        llm: Either a callable (system, user) -> str, or a LangChain model
        max_steps: Maximum number of steps (tool calls) in a chain. Defaults to 5.
        max_retries: Legacy parameter, alias for max_steps.
        verbose: If True, print debug information

    Raises:
        ValueError: If llm is None or invalid
    """
    if llm is None:
        raise ValueError("LLM cannot be None. Provide a LangChain model or callable.")

    if not callable(llm) and not self._check_langchain_model(llm):
        raise ValueError(
            "LLM must be either a callable(system, user) -> str or a LangChain BaseChatModel"
        )

    self.llm = llm
    self.registry = ToolRegistry()
    # Use max_retries if provided (backward compatibility), else max_steps
    self.max_steps = max(1, max_retries if max_retries is not None else max_steps)
    self.verbose = verbose
    self._is_langchain = self._check_langchain_model(llm)
    self._use_combined_prompt = False  # Track if we need to skip system messages

run(user_prompt)

Run the tool calling loop for a user prompt.

Parameters:

Name Type Description Default
user_prompt str

The user's input/question

required

Returns:

Type Description
str

The final LLM response after any tool calls

Raises:

Type Description
ValueError

If user_prompt is empty

MaxRetriesExceededError

If tool calling fails after max retries

InvalidAPIKeyError

If API key is invalid or missing

RateLimitError

If API rate limit is exceeded

LLMConnectionError

If connection to LLM fails

Source code in llm_tool_runtime/runtime.py
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
def run(self, user_prompt: str) -> str:
    """
    Run the tool calling loop for a user prompt.

    Args:
        user_prompt: The user's input/question

    Returns:
        The final LLM response after any tool calls

    Raises:
        ValueError: If user_prompt is empty
        MaxRetriesExceededError: If tool calling fails after max retries
        InvalidAPIKeyError: If API key is invalid or missing
        RateLimitError: If API rate limit is exceeded
        LLMConnectionError: If connection to LLM fails
    """
    # Validate input
    if not user_prompt or not user_prompt.strip():
        raise ValueError("User prompt cannot be empty")

    # Check if any tools are registered
    if not self.registry.tools:
        if self.verbose:
            print("Warning: No tools registered. LLM will respond without tool access.")

    system_prompt = build_system_prompt(self.registry.tools)
    # We start with the user prompt
    current_conversation = f"User: {user_prompt.strip()}"
    last_error = None

    for step in range(self.max_steps):
        if self.verbose:
            print(f"\n[Step {step + 1}/{self.max_steps}]")
            # print(f"Context length: {len(current_conversation)} chars")

        try:
            # For models that need system instructions, we pass them separately
            # For our internal history, we just append to the string
            output = self._call_llm(system_prompt, current_conversation)
        except (InvalidAPIKeyError, RateLimitError, LLMConnectionError):
            raise
        except Exception as e:
            last_error = str(e)
            if self.verbose:
                print(f"LLM call error: {e}")
            if step == self.max_steps - 1:
                raise LLMConnectionError(f"LLM call failed after {self.max_steps} steps: {e}", e)
            continue

        if self.verbose:
            print(f"LLM output: {output[:200]}...")

        call = parse_tool_call(output)

        if not call:
            # No tool call means the LLM is done and giving a final answer
            if self.verbose:
                print("No tool call detected, returning response")
            return output

        # We found a tool call!
        tool_name = call["name"]
        tool_args = call["arguments"]

        if self.verbose:
            print(f"Tool call: {tool_name}({tool_args})")

        # Append LLM's thought/tool call to conversation context
        # (Note: In a more advanced implementation, we'd distinguish between
        # thought trace and exact tool call syntax, but for text-only 
        # runtime, we just append the output)
        current_conversation += f"\n\nAssistant: {output}"

        try:
            # Execute the tool
            tool = self.registry.get(tool_name)
            result = tool.call(tool_args)

            if self.verbose:
                print(f"Tool result: {result}")

            # Append result to conversation
            current_conversation += f"\n\nTool '{tool_name}' result:\n{result}"

            # Now loop back to let LLM see the result and decide next step
            continue

        except ToolNotFoundError as e:
            if self.verbose:
                print(f"Tool not found: {e}")
            available = self.registry.list_tools()
            error_msg = (
                f"Error: Tool '{tool_name}' does not exist. "
                f"Available tools: {', '.join(available) if available else 'none'}."
            )
            current_conversation += f"\n\nSystem: {error_msg}"
            last_error = str(e)

        except ToolRuntimeError as e:
            if self.verbose:
                print(f"Tool error: {e}")
            current_conversation += f"\n\nSystem: Error calling tool '{tool_name}': {e}"
            last_error = str(e)

        except Exception as e:
            if self.verbose:
                print(f"Unexpected tool error: {e}")
            current_conversation += f"\n\nSystem: Unexpected error with tool '{tool_name}': {e}"
            last_error = str(e)

    # If we exit the loop, we ran out of steps
    raise MaxRetriesExceededError(self.max_steps, last_error)

run_safe(user_prompt, default='I encountered an error processing your request.')

Run the tool calling loop with automatic error handling.

This method never raises exceptions - it returns a default message on error. Useful for production environments where you want graceful degradation.

Parameters:

Name Type Description Default
user_prompt str

The user's input/question

required
default str

Default message to return on error

'I encountered an error processing your request.'

Returns:

Type Description
str

The LLM response or the default message on error

Source code in llm_tool_runtime/runtime.py
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
def run_safe(self, user_prompt: str, default: str = "I encountered an error processing your request.") -> str:
    """
    Run the tool calling loop with automatic error handling.

    This method never raises exceptions - it returns a default message on error.
    Useful for production environments where you want graceful degradation.

    Args:
        user_prompt: The user's input/question
        default: Default message to return on error

    Returns:
        The LLM response or the default message on error
    """
    try:
        return self.run(user_prompt)
    except InvalidAPIKeyError:
        return "Service configuration error. Please contact support."
    except RateLimitError:
        return "Service is temporarily busy. Please try again in a moment."
    except LLMConnectionError:
        return "Unable to connect to the AI service. Please check your connection."
    except MaxRetriesExceededError:
        return "Unable to complete the request. Please try rephrasing your question."
    except Exception as e:
        if self.verbose:
            print(f"Unexpected error in run_safe: {e}")
        return default

run_with_history(user_prompt, history=None)

Run with conversation history support.

Parameters:

Name Type Description Default
user_prompt str

The user's input

required
history list

List of previous (user, assistant) message tuples

None

Returns:

Type Description
tuple[str, list]

Tuple of (response, updated_history)

Source code in llm_tool_runtime/runtime.py
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
def run_with_history(self, user_prompt: str, history: list = None) -> tuple[str, list]:
    """
    Run with conversation history support.

    Args:
        user_prompt: The user's input
        history: List of previous (user, assistant) message tuples

    Returns:
        Tuple of (response, updated_history)
    """
    history = history or []

    # Validate history format
    for item in history:
        if not isinstance(item, (list, tuple)) or len(item) != 2:
            raise ValueError("History must be a list of (user_message, assistant_message) tuples")

    # Build context from history
    context_parts = []
    for user_msg, assistant_msg in history[-5:]:  # Last 5 exchanges
        context_parts.append(f"User: {user_msg}")
        context_parts.append(f"Assistant: {assistant_msg}")

    if context_parts:
        full_prompt = "\n".join(context_parts) + f"\n\nUser: {user_prompt}"
    else:
        full_prompt = user_prompt

    response = self.run(full_prompt)
    history.append((user_prompt, response))

    return response, history

tool(fn=None, *, description=None)

Decorator to register a function as a tool.

Usage

@runtime.tool def my_tool(arg1: str) -> str: return "result"

@runtime.tool(description="Custom description") def another_tool(x: int) -> int: return x * 2

Source code in llm_tool_runtime/runtime.py
167
168
169
170
171
172
173
174
175
176
177
178
179
180
def tool(self, fn: Optional[Callable] = None, *, description: Optional[str] = None):
    """
    Decorator to register a function as a tool.

    Usage:
        @runtime.tool
        def my_tool(arg1: str) -> str:
            return "result"

        @runtime.tool(description="Custom description")
        def another_tool(x: int) -> int:
            return x * 2
    """
    return self.registry.register(fn, description=description)

Registry

Tool registry for managing registered functions.

Tool

Wrapper for a callable function registered as a tool.

Source code in llm_tool_runtime/registry.py
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
class Tool:
    """Wrapper for a callable function registered as a tool."""

    def __init__(self, fn: Callable, description: Optional[str] = None):
        self.fn = fn
        self.name = fn.__name__
        self.signature: Signature = signature(fn)
        self.description = description or fn.__doc__ or f"Tool: {self.name}"

    def call(self, args: Dict[str, Any]) -> Any:
        """Execute the tool with the given arguments."""
        # Convert argument types based on signature annotations
        converted_args = {}
        for param_name, param in self.signature.parameters.items():
            if param_name in args:
                value = args[param_name]
                # Try to convert to annotated type if available
                if param.annotation != param.empty:
                    try:
                        converted_args[param_name] = param.annotation(value)
                    except (ValueError, TypeError):
                        converted_args[param_name] = value
                else:
                    converted_args[param_name] = value

        try:
            return self.fn(**converted_args)
        except Exception as e:
            raise ToolExecutionError(self.name, e)

    def get_schema(self) -> Dict[str, Any]:
        """Get the tool schema for prompt building."""
        params = {}
        for name, param in self.signature.parameters.items():
            param_type = "any"
            if param.annotation != param.empty:
                param_type = getattr(param.annotation, "__name__", str(param.annotation))
            params[name] = param_type

        return {
            "name": self.name,
            "description": self.description,
            "parameters": params
        }

call(args)

Execute the tool with the given arguments.

Source code in llm_tool_runtime/registry.py
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
def call(self, args: Dict[str, Any]) -> Any:
    """Execute the tool with the given arguments."""
    # Convert argument types based on signature annotations
    converted_args = {}
    for param_name, param in self.signature.parameters.items():
        if param_name in args:
            value = args[param_name]
            # Try to convert to annotated type if available
            if param.annotation != param.empty:
                try:
                    converted_args[param_name] = param.annotation(value)
                except (ValueError, TypeError):
                    converted_args[param_name] = value
            else:
                converted_args[param_name] = value

    try:
        return self.fn(**converted_args)
    except Exception as e:
        raise ToolExecutionError(self.name, e)

get_schema()

Get the tool schema for prompt building.

Source code in llm_tool_runtime/registry.py
38
39
40
41
42
43
44
45
46
47
48
49
50
51
def get_schema(self) -> Dict[str, Any]:
    """Get the tool schema for prompt building."""
    params = {}
    for name, param in self.signature.parameters.items():
        param_type = "any"
        if param.annotation != param.empty:
            param_type = getattr(param.annotation, "__name__", str(param.annotation))
        params[name] = param_type

    return {
        "name": self.name,
        "description": self.description,
        "parameters": params
    }

ToolRegistry

Registry for managing tool functions.

Source code in llm_tool_runtime/registry.py
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
class ToolRegistry:
    """Registry for managing tool functions."""

    def __init__(self):
        self.tools: Dict[str, Tool] = {}

    def register(self, fn: Optional[Callable] = None, *, description: Optional[str] = None):
        """
        Register a function as a tool.

        Can be used as a decorator with or without arguments:
            @registry.register
            def my_tool(): ...

            @registry.register(description="My tool description")
            def my_tool(): ...
        """
        def decorator(func: Callable) -> Callable:
            tool = Tool(func, description=description)
            self.tools[tool.name] = tool
            return func

        if fn is not None:
            return decorator(fn)
        return decorator

    def get(self, name: str) -> Tool:
        """Get a tool by name, raises ToolNotFoundError if not found."""
        if name not in self.tools:
            raise ToolNotFoundError(name, available_tools=self.list_tools())
        return self.tools[name]

    def list_tools(self) -> list:
        """List all registered tool names."""
        return list(self.tools.keys())

    def get_all_schemas(self) -> list:
        """Get schemas for all registered tools."""
        return [tool.get_schema() for tool in self.tools.values()]

get(name)

Get a tool by name, raises ToolNotFoundError if not found.

Source code in llm_tool_runtime/registry.py
80
81
82
83
84
def get(self, name: str) -> Tool:
    """Get a tool by name, raises ToolNotFoundError if not found."""
    if name not in self.tools:
        raise ToolNotFoundError(name, available_tools=self.list_tools())
    return self.tools[name]

get_all_schemas()

Get schemas for all registered tools.

Source code in llm_tool_runtime/registry.py
90
91
92
def get_all_schemas(self) -> list:
    """Get schemas for all registered tools."""
    return [tool.get_schema() for tool in self.tools.values()]

list_tools()

List all registered tool names.

Source code in llm_tool_runtime/registry.py
86
87
88
def list_tools(self) -> list:
    """List all registered tool names."""
    return list(self.tools.keys())

register(fn=None, *, description=None)

Register a function as a tool.

Can be used as a decorator with or without arguments

@registry.register def my_tool(): ...

@registry.register(description="My tool description") def my_tool(): ...

Source code in llm_tool_runtime/registry.py
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
def register(self, fn: Optional[Callable] = None, *, description: Optional[str] = None):
    """
    Register a function as a tool.

    Can be used as a decorator with or without arguments:
        @registry.register
        def my_tool(): ...

        @registry.register(description="My tool description")
        def my_tool(): ...
    """
    def decorator(func: Callable) -> Callable:
        tool = Tool(func, description=description)
        self.tools[tool.name] = tool
        return func

    if fn is not None:
        return decorator(fn)
    return decorator

Parser

Parser for extracting tool calls from LLM output.

extract_all_tool_calls(text)

Extract all tool calls from LLM output (for future multi-tool support).

Parameters:

Name Type Description Default
text str

The raw LLM output text

required

Returns:

Type Description
list[ToolCall]

List of ToolCall dicts found in the text

Source code in llm_tool_runtime/parser.py
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
def extract_all_tool_calls(text: str) -> list[ToolCall]:
    """
    Extract all tool calls from LLM output (for future multi-tool support).

    Args:
        text: The raw LLM output text

    Returns:
        List of ToolCall dicts found in the text
    """
    calls = []
    for match in TOOL_CALL_PATTERN.finditer(text):
        try:
            parsed = json.loads(match.group(1))
            if isinstance(parsed, dict) and "name" in parsed:
                calls.append(ToolCall(
                    name=parsed["name"],
                    arguments=parsed.get("arguments", {})
                ))
        except json.JSONDecodeError:
            continue
    return calls

parse_tool_call(text)

Parse a tool call from LLM output text.

Parameters:

Name Type Description Default
text str

The raw LLM output text

required

Returns:

Type Description
Optional[ToolCall]

ToolCall dict with 'name' and 'arguments' if found, None otherwise

Source code in llm_tool_runtime/parser.py
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
def parse_tool_call(text: str) -> Optional[ToolCall]:
    """
    Parse a tool call from LLM output text.

    Args:
        text: The raw LLM output text

    Returns:
        ToolCall dict with 'name' and 'arguments' if found, None otherwise
    """
    if not text:
        return None

    match = TOOL_CALL_PATTERN.search(text)
    if not match:
        return None

    try:
        parsed = json.loads(match.group(1))

        # Validate structure
        if not isinstance(parsed, dict):
            return None
        if "name" not in parsed:
            return None
        if "arguments" not in parsed:
            parsed["arguments"] = {}
        if not isinstance(parsed["arguments"], dict):
            return None

        return ToolCall(
            name=parsed["name"],
            arguments=parsed["arguments"]
        )
    except json.JSONDecodeError:
        return None