Prompting techniques using instructor

Nabil Wasti
8 min readJun 17, 2024

--

Let’s apply the prompting techniques mentioned in “The Prompt Report- A Systematic Survey of Prompting Techniques” using instructor

1.Text-Based Prompting Techniques

In-Context Learning (ICL)

Definition and Implementation

In-Context Learning (ICL) refers to providing examples within the prompt to guide the model’s responses. This technique can be particularly effective for teaching the model about new tasks or domains.

Examples: Few-Shot and Zero-Shot Prompting Techniques

Here’s how to implement ICL with the Instructor library:

from openai import OpenAI
import instructor
from pydantic import BaseModel

# Apply the patch to the OpenAI client
client = instructor.from_openai(OpenAI())

class ClassificationResult(BaseModel):
label: str

def few_shot_prompting(examples: list, query: str) -> ClassificationResult:
"""
Perform few-shot prompting using provided examples and query.
"""
messages = [
{"role": "system", "content": "You are an expert text classifier."},
]

for example in examples:
messages.append({"role": "user", "content": example["input"]})
messages.append({"role": "assistant", "content": example["output"]})

messages.append({"role": "user", "content": query})

return client.chat.completions.create(
model="gpt-3.5-turbo",
response_model=ClassificationResult,
messages=messages,
)

# Example usage
examples = [
{"input": "Classify the following text: 'I love this product!'", "output": "Positive"},
{"input": "Classify the following text: 'This is the worst experience ever.'", "output": "Negative"},
]
query = "Classify the following text: 'I am not sure if I like it or not.'"
result = few_shot_prompting(examples, query)
print(result.label)

Zero-Shot Prompting

Definition and Applications

Zero-shot prompting involves giving the model a task without any examples. This is useful when you want the model to generalize from its training data.

Role Prompting and its Impact on Model Output

Role prompting defines the role of the assistant to guide the model’s response.

class ZeroShotResult(BaseModel):
label: str

def zero_shot_prompting(query: str) -> ZeroShotResult:
"""
Perform zero-shot prompting using provided query.
"""
messages = [
{"role": "system", "content": "You are a world-class text classifier."},
{"role": "user", "content": query},
]

return client.chat.completions.create(
model="gpt-3.5-turbo",
response_model=ZeroShotResult,
messages=messages,
)

# Example usage
query = "Classify the following text: 'This is an average product.'"
result = zero_shot_prompting(query)
print(result.label)

Thought Generation

Techniques to Stimulate Thought Processes in Models

Thought generation techniques can help models elaborate on ideas or generate creative content.

Practical Examples and Use Cases

class ThoughtGenerationResult(BaseModel):
thought: str

def generate_thoughts(prompt: str) -> ThoughtGenerationResult:
"""
Generate thoughts based on the given prompt.
"""
messages = [
{"role": "system", "content": "You are a creative thinker."},
{"role": "user", "content": prompt},
]

return client.chat.completions.create(
model="gpt-3.5-turbo",
response_model=ThoughtGenerationResult,
messages=messages,
)

# Example usage
prompt = "What are some innovative uses of artificial intelligence in education?"
result = generate_thoughts(prompt)
print(result.thought)

Decomposition

Breaking Down Complex Tasks into Simpler Prompts

Decomposition involves breaking a complex task into smaller, manageable prompts.

Step-by-Step Guide and Examples

class DecompositionResult(BaseModel):
steps: list

def decompose_task(task: str) -> DecompositionResult:
"""
Decompose a complex task into smaller steps.
"""
messages = [
{"role": "system", "content": "You are an expert at breaking down complex tasks into smaller steps."},
{"role": "user", "content": f"Break down the following task into smaller steps: {task}"},
]

return client.chat.completions.create(
model="gpt-3.5-turbo",
response_model=DecompositionResult,
messages=messages,
)

# Example usage
task = "Plan a project to develop a new software application."
result = decompose_task(task)
print(result.steps)

Ensembling

Combining Multiple Prompts to Enhance Performance

Ensembling involves using multiple prompts and combining their outputs to achieve better results.

Methods and Examples

class EnsembleResult(BaseModel):
combined_result: str

def ensemble_prompting(prompts: list) -> EnsembleResult:
"""
Combine multiple prompts to enhance performance.
"""
combined_output = ""

for prompt in prompts:
messages = [
{"role": "system", "content": "You are a knowledgeable assistant."},
{"role": "user", "content": prompt},
]
result = client.chat.completions.create(
model="gpt-3.5-turbo",
response_model=EnsembleResult,
messages=messages,
)
combined_output += result.combined_result + "\n"

return EnsembleResult(combined_result=combined_output.strip())

# Example usage
prompts = [
"Explain the concept of machine learning.",
"Describe the benefits of using machine learning in healthcare.",
"What are the challenges of implementing machine learning models?"
]
result = ensemble_prompting(prompts)
print(result.combined_result)

Self-Criticism

Techniques for Generating Self-Critical Responses

Self-criticism techniques help the model generate responses that critique its own output.

Applications and Examples

class SelfCriticismResult(BaseModel):
critique: str

def generate_self_criticism(prompt: str) -> SelfCriticismResult:
"""
Generate a self-critical response based on the given prompt.
"""
messages = [
{"role": "system", "content": "You are a reflective thinker."},
{"role": "user", "content": prompt},
]

return client.chat.completions.create(
model="gpt-3.5-turbo",
response_model=SelfCriticismResult,
messages=messages,
)

# Example usage
prompt = "Review the following paragraph for any potential flaws: 'Artificial intelligence is the future of technology. It will solve all our problems.'"
result = generate_self_criticism(prompt)
print(result.critique)

2. Task-Oriented Prompting Techniques

Chain of Thought (CoT)

Definition and Implementation

Chain of Thought (CoT) prompting helps the model generate step-by-step solutions for complex problems by breaking down the reasoning process into smaller, logical steps.

Practical Example

from pydantic import BaseModel

class CoTResult(BaseModel):
steps: list

def chain_of_thought(prompt: str) -> CoTResult:
"""
Generate a step-by-step solution for the given prompt.
"""
messages = [
{"role": "system", "content": "You are a logical thinker."},
{"role": "user", "content": f"Break down the following problem into a step-by-step solution: {prompt}"},
]

return client.chat.completions.create(
model="gpt-3.5-turbo",
response_model=CoTResult,
messages=messages,
)

# Example usage
prompt = "Calculate the total cost of a shopping list with items priced at $10, $20, and $30, including a 10% tax."
result = chain_of_thought(prompt)
print(result.steps)

Self-Consistency

Definition and Implementation

Self-consistency involves generating multiple answers for the same prompt and then choosing the most consistent answer.

Practical Example

from typing import List
from pydantic import BaseModel

class SelfConsistencyResult(BaseModel):
consistent_answer: str

def self_consistency(prompt: str, n: int = 5) -> SelfConsistencyResult:
"""
Generate multiple answers for the same prompt and choose the most consistent one.
"""
answers = []
for _ in range(n):
messages = [
{"role": "system", "content": "You are a consistent thinker."},
{"role": "user", "content": prompt},
]
result = client.chat.completions.create(
model="gpt-3.5-turbo",
response_model=SelfConsistencyResult,
messages=messages,
)
answers.append(result.consistent_answer)

# Choose the most frequent answer
most_consistent = max(set(answers), key=answers.count)
return SelfConsistencyResult(consistent_answer=most_consistent)

# Example usage
prompt = "What is the capital of France?"
result = self_consistency(prompt)
print(result.consistent_answer)

Planning

Techniques for Planning Complex Tasks

Planning techniques help models break down complex tasks into a sequence of actions or steps.

Practical Example

class PlanningResult(BaseModel):
plan: list

def plan_task(prompt: str) -> PlanningResult:
"""
Generate a plan for the given task.
"""
messages = [
{"role": "system", "content": "You are a strategic planner."},
{"role": "user", "content": f"Plan the following task step by step: {prompt}"},
]

return client.chat.completions.create(
model="gpt-3.5-turbo",
response_model=PlanningResult,
messages=messages,
)

# Example usage
prompt = "Organize a conference on artificial intelligence."
result = plan_task(prompt)
print(result.plan)

Tool Use

Integrating External Tools for Enhanced Prompting

Tool use involves integrating external tools to enhance the capabilities of the model, such as calculators, databases, or APIs.

Practical Example

class ToolUseResult(BaseModel):
result: str

def use_tool(prompt: str, tool_function) -> ToolUseResult:
"""
Use an external tool to enhance the model's capabilities.
"""
tool_output = tool_function(prompt)
messages = [
{"role": "system", "content": "You are an assistant that uses external tools to provide accurate answers."},
{"role": "user", "content": f"Use the following tool output to answer the question: {tool_output}"},
]

return client.chat.completions.create(
model="gpt-3.5-turbo",
response_model=ToolUseResult,
messages=messages,
)

# Example usage with a hypothetical external tool function
def example_tool_function(prompt):
return f"Processed result for: {prompt}"

prompt = "Calculate the distance between New York and Los Angeles."
result = use_tool(prompt, example_tool_function)
print(result.result)

Role Play

Definition and Applications

Role play involves setting a role for the model to guide its responses, making it behave as an expert in a specific domain.

Practical Example

class RolePlayResult(BaseModel):
advice: str

def role_play(prompt: str, role: str) -> RolePlayResult:
"""
Perform role play by setting a role for the model.
"""
messages = [
{"role": "system", "content": f"You are an expert {role}."},
{"role": "user", "content": prompt},
]

return client.chat.completions.create(
model="gpt-3.5-turbo",
response_model=RolePlayResult,
messages=messages,
)

# Example usage
prompt = "What are the best practices for software development?"
role = "software engineer"
result = role_play(prompt, role)
print(result.advice)

Interactive Prompting

Techniques for Interactive Dialogues

Interactive prompting involves creating dialogues where the model and the user engage in a back-and-forth conversation to refine the response.

Practical Example

class InteractiveResult(BaseModel):
response: str

def interactive_prompting(initial_prompt: str, follow_up: str) -> InteractiveResult:
"""
Create an interactive dialogue with the model.
"""
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": initial_prompt},
{"role": "assistant", "content": follow_up},
]

return client.chat.completions.create(
model="gpt-3.5-turbo",
response_model=InteractiveResult,
messages=messages,
)

# Example usage
initial_prompt = "Explain the concept of blockchain technology."
follow_up = "How does it differ from traditional databases?"
result = interactive_prompting(initial_prompt, follow_up)
print(result.response)

3. Advanced Prompting Techniques

Prompt Chaining

Definition and Implementation

Prompt chaining involves using the output of one prompt as the input for the next, creating a chain of prompts.

Practical Example

class PromptChainingResult(BaseModel):
final_result: str

def prompt_chaining(initial_prompt: str, chain_prompts: list) -> PromptChainingResult:
"""
Implement prompt chaining by using the output of one prompt as the input for the next.
"""
current_result = initial_prompt
for prompt in chain_prompts:
messages = [
{"role": "system", "content": "You are an assistant that builds on previous responses."},
{"role": "user", "content": f"{current_result} {prompt}"},
]
result = client.chat.completions.create(
model="gpt-3.5-turbo",
response_model=PromptChainingResult,
messages=messages,
)
current_result = result.final_result

return PromptChainingResult(final_result=current_result)

# Example usage
initial_prompt = "Describe the process of photosynthesis."
chain_prompts = [
"Explain the role of sunlight in this process.",
"Describe the role of chlorophyll.",
"What are the end products of photosynthesis?"
]
result = prompt_chaining(initial_prompt, chain_prompts)
print(result.final_result)

Conditional Prompting

Definition and Use Cases

Conditional prompting involves altering the prompt based on specific conditions or previous responses to guide the model towards the desired outcome.

Practical Example

class ConditionalPromptingResult(BaseModel):
response: str

def conditional_prompting(prompt: str, condition: str) -> ConditionalPromptingResult:
"""
Implement conditional prompting by altering the prompt based on specific conditions.
"""
messages = [
{"role": "system", "content": f"You are an assistant that responds based on the following condition: {condition}"},
{"role": "user", "content": prompt},
]

return client.chat.completions.create(
model="gpt-3.5-turbo",
response_model=ConditionalPromptingResult,
messages=messages,
)

# Example usage
prompt = "Provide health advice."
condition = "The user is a diabetic."
result = conditional_prompting(prompt, condition)
print(result.response)

Hypothetical Prompting

Techniques and Applications

Hypothetical prompting involves asking the model to consider hypothetical scenarios to explore different possibilities or outcomes.

Practical Example

class HypotheticalPromptingResult(BaseModel):
response: str

def hypothetical_prompting(prompt: str, scenario: str) -> HypotheticalPromptingResult:
"""
Implement hypothetical prompting by asking the model to consider hypothetical scenarios.
"""
messages = [
{"role": "system", "content": "You are an assistant that explores hypothetical scenarios."},
{"role": "user", "content": f"Consider the following hypothetical scenario: {scenario}"},
{"role": "user", "content": prompt},
]

return client.chat.completions.create(
model="gpt-3.5-turbo",
response_model=HypotheticalPromptingResult,
messages=messages,
)

# Example usage
prompt = "What would be the impact on global trade?"
scenario = "If all countries abolished tariffs."
result = hypothetical_prompting(prompt, scenario)
print(result.response)

Context-Aware Prompting

Definition and Examples

Context-aware prompting involves providing the model with relevant context to improve the accuracy and relevance of its responses.

Practical Example

class ContextAwarePromptingResult(BaseModel):
response: str

def context_aware_prompting(prompt: str, context: str) -> ContextAwarePromptingResult:
"""
Implement context-aware prompting by providing relevant context to the model.
"""
messages = [
{"role": "system", "content": "You are an assistant that uses provided context to enhance responses."},
{"role": "user", "content": f"Context: {context}"},
{"role": "user", "content": prompt},
]

return client.chat.completions.create(
model="gpt-3.5-turbo",
response_model=ContextAwarePromptingResult,
messages=messages,
)

# Example usage
prompt = "Summarize the recent advancements in AI."
context = "Recent advancements include GPT-4, reinforcement learning breakthroughs, and improved natural language understanding."
result = context_aware_prompting(prompt, context)
print(result.response)

Dynamic Prompting

Techniques for Dynamic Adjustments

Dynamic prompting involves adjusting the prompt dynamically based on the interaction or changing conditions during the conversation.

Practical Example

class DynamicPromptingResult(BaseModel):
response: str

def dynamic_prompting(initial_prompt: str, follow_up: str) -> DynamicPromptingResult:
"""
Implement dynamic prompting by adjusting the prompt based on the interaction.
"""
messages = [
{"role": "system", "content": "You are an assistant that dynamically adjusts prompts based on interactions."},
{"role": "user", "content": initial_prompt},
{"role": "assistant", "content": follow_up},
]

return client.chat.completions.create(
model="gpt-3.5-turbo",
response_model=DynamicPromptingResult,
messages=messages,
)

# Example usage
initial_prompt = "Explain the benefits of cloud computing."
follow_up = "How does it improve scalability?"
result = dynamic_prompting(initial_prompt, follow_up)
print(result.response)

prompt techniques from The Prompt Report- A Systematic Survey of Prompting Techniques

--

--