🪙Tokenizer Completions
Ever wanted to see how many tokens something is depending on the model? Here you go.
Models can be found here.
import requests
# Define the URL for the Shard AI API endpoint.
api_url = "https://api.shard-ai.xyz/v1/tokenizer/completions"
# Define the data to be sent in the request.
request_data = {
"model": "gpt-3.5-turbo", # Specify the model to be used.
"input": 'hey how are you?' # Provide the input text.
}
# Provide the authorization token for accessing the API.
authorization_token = "Bearer shard-xxx"
# Prepare the request headers.
headers = {
"Authorization": authorization_token
}
# Send the HTTP POST request to the API endpoint.
response = requests.post(api_url, headers=headers, json=request_data)
# Print the response received from the API.
print(response.text)
package main
import (
"fmt"
"log"
"github.com/valyala/fasthttp"
)
func main() {
// Define the URL for the Shard AI API endpoint.
apiURL := "https://api.shard-ai.xyz/v1/tokenizer/completions"
// Define the data to be sent in the request.
requestData := `{
"model": "gpt-3.5-turbo",
"input": "hey how are you?"
}`
// Provide the authorization token for accessing the API.
authorizationToken := "Bearer shard-xxx"
// Prepare the request.
req := fasthttp.AcquireRequest()
defer fasthttp.ReleaseRequest(req) // Don't forget to release
req.Header.SetContentType("application/json")
req.Header.SetMethod("POST")
req.Header.Set("Authorization", authorizationToken)
req.SetRequestURI(apiURL)
req.SetBodyString(requestData)
// Prepare the response.
resp := fasthttp.AcquireResponse()
defer fasthttp.ReleaseResponse(resp) // Don't forget to release
// Send the request.
if err := fasthttp.Do(req, resp); err != nil {
log.Fatalf("Error while making request: %s", err)
}
// Print the response received from the API.
fmt.Println(string(resp.Body()))
}
const fetch = require('node-fetch');
// Define the URL for the Shard AI API endpoint.
const apiUrl = "https://api.shard-ai.xyz/v1/tokenizer/completions";
// Define the data to be sent in the request.
const requestData = {
model: "gpt-3.5-turbo", // Specify the model to be used.
input: "hey how are you?" // Provide the input text.
};
// Provide the authorization token for accessing the API.
const authToken = "Bearer shard-xxx";
// Define the options for the HTTP request.
const options = {
method: 'POST',
headers: {
'Content-Type': 'application/json',
'Authorization': authToken
},
body: JSON.stringify(requestData)
};
// Send the HTTP POST request to the API endpoint.
fetch(apiUrl, options)
.then(response => response.json())
.then(data => console.log(data))
.catch(error => console.error('Error:', error));
Responses
{
"total_tokens": 5,
"input": "hey how are you?",
"model": "gpt-3.5-turbo",
"created": 1714017655
}
This error means you have exceeded your rate limit! For basic users, your rate limit is a global limit of 15 requests a minute. Upgrade to premium to get more.
{
"error": {
"message": "Something went wrong, try again later.",
"type": "error",
"param": None,
"code": None
}
}
// Sorry, something went wrong, you can try contacting us @ discord.gg/ligma!
Last updated