Configuration
Direct link to heading
Configuration Details
{
"id": "io.infomaker.wingman",
"name": "im-wingman",
"style": "https://plugins.writer.infomaker.io/v1/infomaker/im-wingman/1.2.0/style.css",
"url": "https://plugins.writer.infomaker.io/v1/infomaker/im-wingman/1.2.0/index.js",
"mandatory": false,
"enabled": true,
"data": {
"host": "https://ai-eu-west-1.saas-prod.infomaker.io",
"contextLimit": 2,
"encryptedKeyPhrase": "somePhrase",
"widgets": [
"generic",
"headline",
"summary"
],
"widgetConfig": {
"headline": {
"digital": {
"preText": "Generate a headline",
"creativity": 5,
"headlineCount": 10,
"digitalHeadlineWordCount": 20,
"checkBoxDefault": false,
"providerAccessToken": "someToken",
"serviceProvider": "openai",
"modelId": "gpt-3.5-turbo"
},
"print": {
"preText": "Suggest headlines for the article provided in XML tag. If the article has a strong local connection, reflect this in the headline.",
"creativity": 5,
"headlineCount": 10,
"printHeadlineWordCount": 8,
"checkBoxDefault": false,
"providerAccessToken": "",
"serviceProvider": "Bedrock",
"modelId": "anthropic.claude-3-sonnet-20240229-v1:0"
}
},
"summary": {
"digital": {
"preText": "Generate a summary",
"creativity": 5,
"summaryCount": 5,
"digitalSummaryWordCount": 40,
"checkBoxDefault": false,
"providerAccessToken": "someToken",
"serviceProvider": "openai",
"modelId": "gpt-3.5-turbo"
},
"print": {
"preText": "Suggest summaries for the article provided in XML tag. Act as a news editor and your task is to suggest a summary for the article.",
"creativity": 5,
"summaryCount": 5,
"printSummaryWordCount": 100,
"checkBoxDefault": false,
"providerAccessToken": "",
"serviceProvider": "Bedrock",
"modelId": "anthropic.claude-v2:1"
}
},
"generic": {
"digital": {
"preText": "Generate a headline",
"creativity": 5,
"checkBoxDefault": false,
"providerAccessToken": "someToken",
"serviceProvider": "openai",
"modelId": "gpt-3.5-turbo"
},
"print": {
"preText": "You are a News Editor of a News Firm and your task is to suggest headlines for the article provided in XML tag.\n\nPlease use professional tone while generating headlines.",
"creativity": 5,
"checkBoxDefault": false,
"providerAccessToken": "",
"serviceProvider": "Bedrock",
"modelId": "anthropic.claude-3-sonnet-20240229-v1:0"
}
}
}
}
}contextLimit: Max. limit of context tags that can be sent in the prompt.
encryptedKeyPhrase: Encryption Key Phrase for sending encrypted API Key.
widgets: Widgets that should be enabled in Wingman plugin.
Headline Configuration Details
"widgetConfig": {
"headline": {
"digital": {
"preText": "Generate a headline",
"creativity": 5,
"headlineCount": 10,
"digitalHeadlineWordCount": 20,
"checkBoxDefault": false,
"providerAccessToken": "someToken",
"serviceProvider": "openai",
"modelId": "gpt-3.5-turbo"
},
"print": {
"preText": "Suggest headlines for the article provided in XML tag. If the article has a strong local connection, reflect this in the headline.",
"creativity": 5,
"headlineCount": 10,
"printHeadlineWordCount": 8,
"checkBoxDefault": false,
"providerAccessToken": "",
"serviceProvider": "Bedrock",
"modelId": "anthropic.claude-3-sonnet-20240229-v1:0"
}
},
...
}PreText: Field where the user sends the custom prompt to generate the headline.
creativity: Determines temperature, between 1–5 (1 = least randomness, 5 = most).
headlineCount: Total headline results to generate.
digitalHeadlineWordCount: Approx. word count for digital headlines.
printHeadlineWordCount: Approx. word count for print headlines.
checkBoxDefault: Default state of checkbox (false means unchecked by default).
providerAccessToken: Currently applicable for ChatGPT when an organization wants to use their own API key instead of Naviga's (if available).
serviceProvider: AI provider for prompt ("openai" for ChatGPT, "Bedrock" for AWS).
modelId: Model to be used for the prompt.
Summary Configuration Details
"summary": {
"digital": {
"preText": "Generate a summary",
"creativity": 5,
"summaryCount": 5,
"digitalSummaryWordCount": 40,
"checkBoxDefault": false,
"providerAccessToken": "someToken",
"serviceProvider": "openai",
"modelId": "gpt-3.5-turbo"
},
"print": {
"preText": "Suggest summaries for the article provided in XML tag. Act as a news editor and your task is to suggest a summary for the article.",
"creativity": 5,
"summaryCount": 5,
"printSummaryWordCount": 100,
"checkBoxDefault": false,
"providerAccessToken": "",
"serviceProvider": "Bedrock",
"modelId": "anthropic.claude-v2:1"
}
}PreText: Field where the user sends the custom prompt to generate the summary.
creativity: Determines temperature, between 1–5 (1 = least randomness, 5 = most).
summaryCount: Total summary results to generate.
digitalSummaryWordCount: Approx. word count for digital summaries.
printSummaryWordCount: Approx. word count for print summaries.
displayCount: Number of results displayed before a "show more" button.
checkBoxDefault: Default state of checkbox (false means unchecked by default).
providerAccessToken: Currently applicable for ChatGPT when an organization wants to use their own API key instead of Naviga's (if available).
serviceProvider: AI provider for prompt ("openai" for ChatGPT, "Bedrock" for AWS).
modelId: Model to be used for the prompt.
Generic Configuration Details
"generic": {
"digital": {
"preText": "Generate a headline",
"creativity": 5,
"checkBoxDefault": false,
"providerAccessToken": "someToken",
"serviceProvider": "openai",
"modelId": "gpt-3.5-turbo"
},
"print": {
"preText": "You are a News Editor of a News Firm and your task is to suggest headlines for the article provided in XML tag.\n\nPlease use professional tone while generating headlines.",
"creativity": 5,
"checkBoxDefault": false,
"providerAccessToken": "",
"serviceProvider": "Bedrock",
"modelId": "anthropic.claude-3-sonnet-20240229-v1:0"
}
}PreText: Field where the user sends the custom prompt to generate generic results.
creativity: Determines temperature, between 1–5 (1 = least randomness, 5 = most).
checkBoxDefault: Default state of checkbox (false means unchecked by default).
providerAccessToken: Currently applicable for ChatGPT when an organization wants to use their own API key instead of Naviga's (if available).
serviceProvider: AI provider for prompt ("openai" for ChatGPT, "Bedrock" for AWS).
modelId: Model to be used for the prompt.
Was this helpful?