Skip to content

dhruv-hhai/honeyhive-go-sdk

Repository files navigation

HoneyHive Go SDK

HoneyHive is a model observability and evaluation platform that helps you continuously improve your models in production. We help you evaluate, deploy, monitor and fine-tune both closed and open-source large language models for production use-cases, allowing you to optimize model performance & align your models with your users’ preferences.

SDK Installation

go get github.com/honeyhive-ai/honeyhive-go-sdk

Authentication

After signing up on the app, you can find your API key in the Settings page.

SDK Example Usage

Ingesting a single generation

package main

import (
    "context"
    "log"
    "github.com/honeyhive-ai/honeyhive-go-sdk"
    "github.com/honeyhive-ai/honeyhive-go-sdk/pkg/models/shared"
    "github.com/honeyhive-ai/honeyhive-go-sdk/pkg/models/operations"
)

func main() {
    s := honeyhive.New(honeyhive.WithSecurity(
        shared.Security{
            BearerAuth: shared.SchemeBearerAuth{
                Authorization: "Bearer <YOUR API KEY>",
            },
        },
    ))
    
    // project name in the HoneyHive platform
    taskName := "<YOUR PROJECT NAME>"

    // example prompt looks like "Say this is a {{var}}"
    prompt := "<YOUR PROMPT TEMPLATE>"

    // the model used to generate the response, ex. "text-davinci-003"
    model := "<MODEL ENGINE>"

    // the input variable that was inserted into the prompt 
    // ex. map[string]interface{}{"var": "test"}
    inputs := map[string]interface{}{
        // <THE INPUT INSERTED INTO THE PROMPT>
    }

    // the hyperparameters used to generate the response
    temp := 0.5
    topP := 1.0
    freqPenalty := 0.0
    presPenalty := 0.0
    max_tokens := 100
    hyperparameters := shared.HyperParameters{
        FrequencyPenalty: &freqPenalty,
        MaxTokens: &max_tokens,
        PresencePenalty: &presPenalty,
        Temperature: &temp,
        TopP: &topP,
    }

    // the response returned by the model provider like below:
    // "id": "cmpl-6oIrGYG5V1GJi57QhsoGWOs1AHwb7",
    // "object": "text_completion",
    // "created": 1677446910,
    // "model": "text-davinci-003",
    // "choices": []map[string]interface{}{
    // 	{
    // 		"text": "\n\nThis is indeed a test",
    // 		"index": 0,
    // 		"logprobs": nil,
    // 		"finish_reason": "length",
    // 	},
    // },
    // "usage": map[string]interface{}{
    // 	"prompt_tokens": 5,
    // 	"completion_tokens": 7,
    // 	"total_tokens": 12,
    // },
    response := map[string]interface{}{
        // <THE RESPONSE OBJECT FROM THE MODEL PROVIDER>
    }

    // the final text generated by the model, like resp.Choices[0].Text for OpenAI
    generation := "<FINAL GENERATION>"

    // the source of the generation
    // ideally the saved version name in the HoneyHive platform, ex. "curie-writer"
    source := "<PROMPT VERSION NAME>"

    // in case you are tracking latency, you can send that as well
    latency := 1000.23

    req := operations.IngestSingleGenerationRequest{
        Request: shared.SingleGenerationInput{
            Task: &taskName,
            Prompt: &prompt,
            Model: &model,
            Inputs: inputs,
            Response: response,
            Source: &source,
            Hyperparameters: &hyperparameters,
            Generation: &generation,
            Latency: &latency,
        },
    }

    ctx := context.Background()
    res, err := s.Generation.IngestSingleGeneration(ctx, req)
    if err != nil {
        log.Fatal(err)
    }

    if res.SingleGenerationOutput != nil {
        // you can extract the generation id from the response
        generationId := *res.SingleGenerationOutput.GenerationID
    }
}

Ingesting feedback for a generation

package main

import (
    "context"
    "log"
    "github.com/honeyhive-ai/honeyhive-go-sdk"
    "github.com/honeyhive-ai/honeyhive-go-sdk/pkg/models/shared"
    "github.com/honeyhive-ai/honeyhive-go-sdk/pkg/models/operations"
    "fmt"
)

func main() {
    s := honeyhive.New(honeyhive.WithSecurity(
        shared.Security{
            BearerAuth: shared.SchemeBearerAuth{
                Authorization: "Bearer <YOUR API KEY>",
            },
        },
    ))
    
    taskName := "<YOUR PROJECT NAME>"
    generationID := "<GENERATION ID>"

    // the feedback json can be like
    // it can include generation feedback as well as user properties
    // ex. map[string]interface{ 
    // "feedback_provided": true,
    // "accepted": true,
    // "correction": "This is indeed a test",
    // "user_country": "US",
    // "user_tenant": "honeyhive",
    // } 
    feedbackJSON := map[string]interface{}{
        // <YOUR FEEDBACK JSON>
    }
    
    req := operations.CreateFeedbackRequest{
        Request: shared.Feedback{
            Task: &taskName,
            GenerationID: &generationID,
            FeedbackJSON: feedbackJSON,
        },
    }

    ctx := context.Background()
    res, err := s.Feedback.CreateFeedback(ctx, req)
    if err != nil {
        log.Fatal(err)
    }

    if res.Feedback != nil {
        // handle response
        fmt.Println(*res.Feedback.GenerationID)
    }
}

SDK Available Operations

Dataset

  • CreateDataset - Create Dataset
  • DeleteDataset - Delete Datasets
  • FindDatasets - Get Datasets

Feedback

  • CreateFeedback - Create Feedback

FineTunedModel

  • FindFineTunedModels - This endpoint gets the fine-tuned models being managed by the user.

Generation

  • GetGeneration - Get Generations
  • IngestGenerations - Create Generation for Task
  • IngestSingleGeneration - Ingest Single Generation
  • ModelPromptCreateGeneration - Create Generation for Model and Prompt
  • TaskCreateGeneration - Create Generation for Task

Metric

  • CreateMetric - Create Metric
  • FindMetrics - Get Metrics

Prompt

  • ChangePrompt - Update Prompts
  • CreatePrompt - Create Prompt
  • DeletePrompt - Delete Prompts
  • FindPrompts - Get Prompts

Task

  • CreateTask - Create a new task
  • FindAllTasks - Find all Tasks
  • UpdateTask - Update an existing task

SDK Generated by Speakeasy