ref: Split LLM suggestion finding and title rewriting
This commit is contained in:
parent
b487f01aae
commit
3762f0d265
1 changed files with 15 additions and 5 deletions
|
|
@ -14,7 +14,7 @@ class LLMClient:
|
|||
self.config_ai = config_ai if config_ai else AiConfig.from_env()
|
||||
self.client = client if client else Groq(api_key=self.config_ai.API_KEY)
|
||||
|
||||
def rewrite_title(self, original_content: str) -> str:
|
||||
def get_alternative_title_suggestions(self, original_content: str) -> str:
|
||||
suggestions = self.client.chat.completions.create(
|
||||
messages=[
|
||||
{
|
||||
|
|
@ -31,16 +31,22 @@ class LLMClient:
|
|||
suggestions_str = suggestions.choices[0].message.content
|
||||
if not suggestions_str:
|
||||
raise ValueError
|
||||
print("Suggestions: ", suggestions_str)
|
||||
return suggestions_str
|
||||
|
||||
def rewrite_title(
|
||||
self, original_content: str, suggestions: str | None = None
|
||||
) -> str:
|
||||
if not suggestions:
|
||||
suggestions = self.get_alternative_title_suggestions(original_content)
|
||||
winner = self.client.chat.completions.create(
|
||||
messages=[
|
||||
{
|
||||
"role": "system",
|
||||
"content": "You are an editor at a satirical newspaper. Improve on the following satirical headline. For a given headline, you diligently evaluate: (1) Whether the headline is funny; (2) Whether the headline follows a clear satirical goal; (3) Whether the headline has sufficient substance and bite. Based on the outcomes of your review, you pick your favorite headline from the given suggestions and you make targeted revisions to it. Your output consists solely of the revised headline.",
|
||||
"content": "You are an editor at a satirical newspaper. Improve on the following satirical headline. For a given headline, you diligently evaluate: (1) Whether the headline is funny; (2) Whether the headline follows a clear satirical goal; (3) Whether the headline has sufficient substance and bite. Based on the outcomes of your review, you pick your favorite headline from the given suggestions and you make targeted revisions to it. Keep the length roughly to that of the original suggestions. Your output consists solely of the revised headline.",
|
||||
},
|
||||
{
|
||||
"role": "user",
|
||||
"content": suggestions_str,
|
||||
"content": suggestions,
|
||||
},
|
||||
],
|
||||
model="llama-3.3-70b-versatile",
|
||||
|
|
@ -52,11 +58,15 @@ class LLMClient:
|
|||
return winner_str.strip(" \"'")
|
||||
|
||||
def rewrite_summary(self, orig: Original, improved_title: str) -> str:
|
||||
no_shocking_turn: bool = True
|
||||
summary = self.client.chat.completions.create(
|
||||
messages=[
|
||||
{
|
||||
"role": "user",
|
||||
"content": f"Below there is an original title and an original summary. Then follows an improved title. Write an improved summary based on the original summary which fits to the improved title. Only output the improved summary.\n\nTitle:{orig.title}\nSummary:{orig.summary}\n---\nTitle:{improved_title}\nSummary:",
|
||||
"content": f"""
|
||||
Below there is an original title and an original summary. Then follows an improved title. Write an improved summary based on the original summary which fits to the improved title.
|
||||
{"Do not use the phrase: 'in a surprising turn of events' or 'in a shocking turn of events.'" if no_shocking_turn else ""}
|
||||
Only output the improved summary.\n\nTitle:{orig.title}\nSummary:{orig.summary}\n---\nTitle:{improved_title}\nSummary:""",
|
||||
}
|
||||
],
|
||||
model="llama-3.3-70b-versatile",
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue