انتقل إلى المحتوى الرئيسي

Class: OpenAI

OpenAI LLM implementation

Implements

Constructors

constructor

new OpenAI(init?)

Parameters

NameType
init?Partial<OpenAI> & { azure?: AzureOpenAIConfig }

Defined in

packages/core/src/llm/LLM.ts:152

Properties

additionalChatOptions

Optional additionalChatOptions: Omit<Partial<ChatCompletionCreateParams>, "model" | "temperature" | "max_tokens" | "messages" | "top_p" | "streaming">

Defined in

packages/core/src/llm/LLM.ts:135


additionalSessionOptions

Optional additionalSessionOptions: Omit<Partial<ClientOptions>, "apiKey" | "timeout" | "maxRetries">

Defined in

packages/core/src/llm/LLM.ts:145


apiKey

Optional apiKey: string = undefined

Defined in

packages/core/src/llm/LLM.ts:141


callbackManager

Optional callbackManager: CallbackManager

Defined in

packages/core/src/llm/LLM.ts:150


hasStreaming

hasStreaming: boolean = true

Implementation of

LLM.hasStreaming

Defined in

packages/core/src/llm/LLM.ts:128


maxRetries

maxRetries: number

Defined in

packages/core/src/llm/LLM.ts:142


maxTokens

Optional maxTokens: number

Defined in

packages/core/src/llm/LLM.ts:134


model

model: "gpt-3.5-turbo" | "gpt-3.5-turbo-1106" | "gpt-3.5-turbo-16k" | "gpt-4" | "gpt-4-32k" | "gpt-4-1106-preview" | "gpt-4-vision-preview"

Defined in

packages/core/src/llm/LLM.ts:131


session

session: OpenAISession

Defined in

packages/core/src/llm/LLM.ts:144


temperature

temperature: number

Defined in

packages/core/src/llm/LLM.ts:132


timeout

Optional timeout: number

Defined in

packages/core/src/llm/LLM.ts:143


topP

topP: number

Defined in

packages/core/src/llm/LLM.ts:133

Accessors

metadata

get metadata(): Object

Returns

Object

NameType
contextWindownumber
maxTokensundefined | number
model"gpt-3.5-turbo" | "gpt-3.5-turbo-1106" | "gpt-3.5-turbo-16k" | "gpt-4" | "gpt-4-32k" | "gpt-4-1106-preview" | "gpt-4-vision-preview"
temperaturenumber
tokenizerCL100K_BASE
topPnumber

Implementation of

LLM.metadata

Defined in

packages/core/src/llm/LLM.ts:206

Methods

chat

chat<T, R>(messages, parentEvent?, streaming?): Promise<R>

Get a chat response from the LLM

Type parameters

NameType
Textends undefined | boolean = undefined
RT extends true ? AsyncGenerator<string, void, unknown> : ChatResponse

Parameters

NameTypeDescription
messagesChatMessage[]The return type of chat() and complete() are set by the "streaming" parameter being set to True.
parentEvent?Event-
streaming?T-

Returns

Promise<R>

Implementation of

LLM.chat

Defined in

packages/core/src/llm/LLM.ts:249


complete

complete<T, R>(prompt, parentEvent?, streaming?): Promise<R>

Get a prompt completion from the LLM

Type parameters

NameType
Textends undefined | boolean = undefined
RT extends true ? AsyncGenerator<string, void, unknown> : ChatResponse

Parameters

NameTypeDescription
promptstringthe prompt to complete
parentEvent?Event-
streaming?T-

Returns

Promise<R>

Implementation of

LLM.complete

Defined in

packages/core/src/llm/LLM.ts:286


mapMessageType

mapMessageType(messageType): "function" | "user" | "assistant" | "system"

Parameters

NameType
messageTypeMessageType

Returns

"function" | "user" | "assistant" | "system"

Defined in

packages/core/src/llm/LLM.ts:232


streamChat

Protected streamChat(messages, parentEvent?): AsyncGenerator<string, void, unknown>

Parameters

NameType
messagesChatMessage[]
parentEvent?Event

Returns

AsyncGenerator<string, void, unknown>

Defined in

packages/core/src/llm/LLM.ts:300


streamComplete

Protected streamComplete(query, parentEvent?): AsyncGenerator<string, void, unknown>

Parameters

NameType
querystring
parentEvent?Event

Returns

AsyncGenerator<string, void, unknown>

Defined in

packages/core/src/llm/LLM.ts:364


tokens

tokens(messages): number

Calculates the number of tokens needed for the given chat messages

Parameters

NameType
messagesChatMessage[]

Returns

number

Implementation of

LLM.tokens

Defined in

packages/core/src/llm/LLM.ts:217