feat: use marka api in all apis
This commit is contained in:
@@ -25,10 +25,12 @@ function extractListFromResponse(response?: string): string[] {
|
||||
.filter((line) => line.length > 2);
|
||||
}
|
||||
|
||||
const model = "gpt-4.1-mini";
|
||||
|
||||
export async function summarize(content: string) {
|
||||
if (!openAI) return;
|
||||
const chatCompletion = await openAI.chat.completions.create({
|
||||
model: "gpt-3.5-turbo",
|
||||
model: model,
|
||||
messages: [
|
||||
{
|
||||
role: "user",
|
||||
@@ -44,7 +46,7 @@ export async function summarize(content: string) {
|
||||
export async function shortenTitle(content: string) {
|
||||
if (!openAI) return;
|
||||
const chatCompletion = await openAI.chat.completions.create({
|
||||
model: "gpt-3.5-turbo",
|
||||
model: model,
|
||||
messages: [
|
||||
{
|
||||
role: "system",
|
||||
@@ -64,7 +66,7 @@ export async function shortenTitle(content: string) {
|
||||
export async function extractAuthorName(content: string) {
|
||||
if (!openAI) return;
|
||||
const chatCompletion = await openAI.chat.completions.create({
|
||||
model: "gpt-3.5-turbo",
|
||||
model: model,
|
||||
messages: [
|
||||
{
|
||||
role: "system",
|
||||
@@ -95,7 +97,7 @@ export async function createGenres(
|
||||
) {
|
||||
if (!openAI) return;
|
||||
const chatCompletion = await openAI.chat.completions.create({
|
||||
model: "gpt-3.5-turbo",
|
||||
model: model,
|
||||
messages: [
|
||||
{
|
||||
role: "system",
|
||||
@@ -123,7 +125,7 @@ export async function createKeywords(
|
||||
) {
|
||||
if (!openAI) return;
|
||||
const chatCompletion = await openAI.chat.completions.create({
|
||||
model: "gpt-3.5-turbo",
|
||||
model: model,
|
||||
messages: [
|
||||
{
|
||||
"role": "system",
|
||||
@@ -155,7 +157,7 @@ export const getMovieRecommendations = async (
|
||||
if (cache.has(cacheId)) return cache.get(cacheId);
|
||||
|
||||
const chatCompletion = await openAI.chat.completions.create({
|
||||
model: "gpt-3.5-turbo",
|
||||
model: model,
|
||||
messages: [
|
||||
{
|
||||
role: "user",
|
||||
@@ -193,7 +195,7 @@ respond with a plain unordered list each item starting with the year the movie w
|
||||
export async function createTags(content: string) {
|
||||
if (!openAI) return;
|
||||
const chatCompletion = await openAI.chat.completions.create({
|
||||
model: "gpt-3.5-turbo",
|
||||
model: model,
|
||||
messages: [
|
||||
{
|
||||
role: "system",
|
||||
@@ -212,7 +214,7 @@ export async function createTags(content: string) {
|
||||
export async function extractRecipe(content: string) {
|
||||
if (!openAI) return;
|
||||
const completion = await openAI.beta.chat.completions.parse({
|
||||
model: "gpt-4o-2024-08-06",
|
||||
model: model,
|
||||
temperature: 0.1,
|
||||
messages: [
|
||||
{
|
||||
@@ -230,7 +232,7 @@ export async function extractRecipe(content: string) {
|
||||
export async function extractArticleMetadata(content: string) {
|
||||
if (!openAI) return;
|
||||
const completion = await openAI.beta.chat.completions.parse({
|
||||
model: "gpt-4o-2024-08-06",
|
||||
model: model,
|
||||
temperature: 0.1,
|
||||
messages: [
|
||||
{
|
||||
|
||||
Reference in New Issue
Block a user