token_counter( model="gpt-3.5-turbo", messages=[ {"role": "user", "content": "Hello, how are you?"} ])
13
If we set return_cache_key=True, the function is not executed and only the cache key is returned instead.
# This will not execute the function, but only return the cache key.cache_key = token_counter( model="gpt-4o", text="Hello, how are you?", return_cache_key=True)assertnot is_in_cache(cache_key)# This will cache the result.token_counter( model="gpt-4o", text="Hello, how are you?")assert is_in_cache(cache_key)clear_cache_key(cache_key)