Build an AI-powered web application that extracts action items from developer notes.
构建一个AI驱动的Web应用,从开发者笔记中提取行动项。
This week, you'll build a complete web application with both rule-based and LLM-powered extraction capabilities. The application allows users to:
from ollama import chat
from app.config import settings
def extract_action_items_llm(text: str) -> list[str]:
"""
Extract action items from text using LLM.
Args:
text: Input text to extract from
Returns:
List of extracted action items
"""
# Input validation
if not text or len(text) > settings.max_text_length:
raise ValueError(f"Text must be 1-{settings.max_text_length} characters")
# Build prompt
prompt = f"""Extract action items from the following text.
An action item is a task that needs to be done. Look for:
- Bullet points (-, *, •, or numbered lists)
- Keywords (todo:, action:, next:)
- Imperative sentences starting with verbs like: fix, implement, create, update
- Checkbox markers ([ ], [todo])
Text:
{text}
Return a JSON object with a single key "items" containing an array of action item strings.
If no action items are found, return {{"items": []}}."""
# Call LLM with JSON format
response = chat(
model=settings.ollama_model,
messages=[{"role": "user", "content": prompt}],
format="json", # Enable structured output
options={"temperature": settings.extraction_temperature}
)
# Parse response
result = json.loads(response["message"]["content"])
# Handle different response formats
if isinstance(result, list):
return result
elif "items" in result:
return result["items"]
elif "actionItems" in result:
return [item["description"] for item in result["actionItems"]]
else:
return []
| Test Class | Tests | Coverage |
| TestExtractActionItems | 5 tests | Bullets, keywords, empty, no items, imperatives |
| TestExtractActionItemsLLM | 6 tests | LLM extraction scenarios |
def test_extract_bullets():
"""Test extraction from bullet points."""
text = """
- Fix the login bug
* Update the documentation
• Review PR #123
"""
items = extract_action_items(text)
assert len(items) == 3
assert "fix the login bug" in items[0].lower()
def test_extract_empty():
"""Test extraction from empty text."""
items = extract_action_items("")
assert len(items) == 0
# Run all tests
cd week2
pytest tests/ -v
# Run with coverage
pytest tests/ --cov=app --cov-report=html
# Result: ======================== 11 passed in 14.30s =========================
from pydantic import BaseModel, Field, validator
class NoteCreate(BaseModel):
"""Request model for creating a note."""
title: str = Field(..., min_length=1, max_length=200)
content: str = Field(..., min_length=1, max_length=10000)
class NoteResponse(BaseModel):
"""Response model for a note."""
id: int
title: str
content: str
created_at: datetime
updated_at: datetime
class Config:
from_attributes = True # Pydantic v2
from pydantic_settings import BaseSettings, SettingsConfigDict
class Settings(BaseSettings):
model_config = SettingsConfigDict(
env_file=".env",
case_sensitive=False
)
app_name: str = "Action Item Extractor"
ollama_model: str = "llama3.1:8b"
max_text_length: int = 10000
extraction_temperature: float = 0.3
# Usage
from app.config import settings
model = settings.ollama_model
/extract-llm - Extract action items using LLM
/notes - List all notes
/notes - Create a new note
<!-- Extract with Rules -->
<button onclick="extractWithRules()">
Extract (Rules)
</button>
<!-- Extract with LLM -->
<button onclick="extractWithLLM()">
Extract LLM
</button>
<!-- List Notes -->
<button onclick="listNotes()">
List Notes
</button>
Prompt Engineering:
API Calling:
format="json" for structured output / 使用结构化输出temperature / 设置适当的温度Type Safety:
Error Handling:
Separation of Concerns:
Configuration Management: