Compare commits
No commits in common. "7fbbc5d3d39c6897eb268b0f4559f30f95e8bf29" and "45eefaa5871fe8448d87f2e6be85271b5fb6f3ca" have entirely different histories.
7fbbc5d3d3
...
45eefaa587
|
|
@ -1,46 +0,0 @@
|
|||
---
|
||||
name: image-crop
|
||||
description: Crop images and get image metadata using sharp library. Use when user asks to crop, trim, cut an image, or get image dimensions/info. Supports JPEG, PNG, WebP.
|
||||
---
|
||||
|
||||
# Image Crop Skill
|
||||
|
||||
Image manipulation using the sharp library.
|
||||
|
||||
## Scripts
|
||||
|
||||
### Get Image Info
|
||||
|
||||
```bash
|
||||
node scripts/image-info.js <image-path>
|
||||
```
|
||||
|
||||
Returns dimensions, format, DPI, alpha channel info.
|
||||
|
||||
### Crop Image
|
||||
|
||||
```bash
|
||||
node scripts/crop-image.js <input> <output> <left> <top> <width> <height>
|
||||
```
|
||||
|
||||
Parameters:
|
||||
- `left`, `top` - offset from top-left corner (pixels)
|
||||
- `width`, `height` - size of crop area (pixels)
|
||||
|
||||
## Workflow Example
|
||||
|
||||
1. Get dimensions first:
|
||||
```bash
|
||||
node scripts/image-info.js photo.jpg
|
||||
# Size: 1376x768
|
||||
```
|
||||
|
||||
2. Calculate crop (e.g., remove 95px from top and bottom):
|
||||
```bash
|
||||
# new_height = 768 - 95 - 95 = 578
|
||||
node scripts/crop-image.js photo.jpg photo-cropped.jpg 0 95 1376 578
|
||||
```
|
||||
|
||||
## Requirements
|
||||
|
||||
Node.js with sharp (`npm install sharp`)
|
||||
|
|
@ -1,29 +0,0 @@
|
|||
#!/usr/bin/env node
|
||||
|
||||
const sharp = require('sharp');
|
||||
|
||||
const args = process.argv.slice(2);
|
||||
|
||||
if (args.length < 6) {
|
||||
console.error('Usage: crop-image.js <input> <output> <left> <top> <width> <height>');
|
||||
console.error('Example: crop-image.js photo.jpg cropped.jpg 0 95 1376 578');
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
const [input, output, left, top, width, height] = args;
|
||||
|
||||
sharp(input)
|
||||
.extract({
|
||||
left: parseInt(left, 10),
|
||||
top: parseInt(top, 10),
|
||||
width: parseInt(width, 10),
|
||||
height: parseInt(height, 10)
|
||||
})
|
||||
.toFile(output)
|
||||
.then(info => {
|
||||
console.log(`Saved: ${output} (${info.width}x${info.height})`);
|
||||
})
|
||||
.catch(err => {
|
||||
console.error('Error:', err.message);
|
||||
process.exit(1);
|
||||
});
|
||||
|
|
@ -1,24 +0,0 @@
|
|||
#!/usr/bin/env node
|
||||
|
||||
const sharp = require('sharp');
|
||||
|
||||
const input = process.argv[2];
|
||||
|
||||
if (!input) {
|
||||
console.error('Usage: image-info.js <image-path>');
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
sharp(input)
|
||||
.metadata()
|
||||
.then(m => {
|
||||
console.log(`Size: ${m.width}x${m.height}`);
|
||||
console.log(`Format: ${m.format}`);
|
||||
if (m.density) console.log(`DPI: ${m.density}`);
|
||||
if (m.hasAlpha) console.log(`Alpha: yes`);
|
||||
if (m.orientation) console.log(`Orientation: ${m.orientation}`);
|
||||
})
|
||||
.catch(err => {
|
||||
console.error('Error:', err.message);
|
||||
process.exit(1);
|
||||
});
|
||||
|
|
@ -1,30 +0,0 @@
|
|||
---
|
||||
name: upload-image
|
||||
description: Upload images to Banatie CDN. Use when user asks to upload an image, get CDN URL, or publish image to CDN. Supports JPEG, PNG, WebP.
|
||||
argument-hint: <image-path>
|
||||
---
|
||||
|
||||
# Upload Image to Banatie CDN
|
||||
|
||||
Upload images to the Banatie CDN and get the CDN URL.
|
||||
|
||||
## Requirements
|
||||
|
||||
- `BANATIE_API_KEY` in `.env` file
|
||||
- Dependencies: tsx, form-data, dotenv
|
||||
|
||||
## Usage
|
||||
|
||||
```bash
|
||||
tsx .claude/skills/upload-image/scripts/upload.ts <image-path>
|
||||
```
|
||||
|
||||
## Output
|
||||
|
||||
- ID: unique image identifier
|
||||
- URL: CDN URL for the image
|
||||
- Size: dimensions (if available)
|
||||
|
||||
## Supported formats
|
||||
|
||||
JPEG, PNG, WebP
|
||||
|
|
@ -1,117 +0,0 @@
|
|||
import * as fs from "fs";
|
||||
import * as path from "path";
|
||||
import * as https from "https";
|
||||
import "dotenv/config";
|
||||
import FormData from "form-data";
|
||||
|
||||
const API_URL = "https://api.banatie.app/api/v1/images/upload";
|
||||
|
||||
interface UploadResponse {
|
||||
success: boolean;
|
||||
data: {
|
||||
id: string;
|
||||
storageUrl: string;
|
||||
alias?: string;
|
||||
source: string;
|
||||
width: number | null;
|
||||
height: number | null;
|
||||
mimeType: string;
|
||||
fileSize: number;
|
||||
};
|
||||
}
|
||||
|
||||
function getMimeType(filePath: string): string {
|
||||
const ext = path.extname(filePath).toLowerCase();
|
||||
const mimeTypes: Record<string, string> = {
|
||||
".jpg": "image/jpeg",
|
||||
".jpeg": "image/jpeg",
|
||||
".png": "image/png",
|
||||
".webp": "image/webp",
|
||||
};
|
||||
return mimeTypes[ext] || "application/octet-stream";
|
||||
}
|
||||
|
||||
async function uploadImage(filePath: string): Promise<UploadResponse> {
|
||||
const apiKey = process.env.BANATIE_API_KEY;
|
||||
if (!apiKey) {
|
||||
throw new Error("BANATIE_API_KEY not found in environment");
|
||||
}
|
||||
|
||||
const absolutePath = path.resolve(filePath);
|
||||
if (!fs.existsSync(absolutePath)) {
|
||||
throw new Error(`File not found: ${absolutePath}`);
|
||||
}
|
||||
|
||||
const fileName = path.basename(absolutePath);
|
||||
const mimeType = getMimeType(absolutePath);
|
||||
|
||||
const form = new FormData();
|
||||
form.append("file", fs.createReadStream(absolutePath), {
|
||||
filename: fileName,
|
||||
contentType: mimeType,
|
||||
});
|
||||
|
||||
return new Promise((resolve, reject) => {
|
||||
const url = new URL(API_URL);
|
||||
|
||||
const req = https.request(
|
||||
{
|
||||
hostname: url.hostname,
|
||||
port: 443,
|
||||
path: url.pathname,
|
||||
method: "POST",
|
||||
headers: {
|
||||
"X-API-Key": apiKey,
|
||||
...form.getHeaders(),
|
||||
},
|
||||
},
|
||||
(res) => {
|
||||
let data = "";
|
||||
res.on("data", (chunk) => (data += chunk));
|
||||
res.on("end", () => {
|
||||
try {
|
||||
const json = JSON.parse(data);
|
||||
if (res.statusCode === 200 || res.statusCode === 201) {
|
||||
resolve(json);
|
||||
} else {
|
||||
reject(
|
||||
new Error(`Upload failed (${res.statusCode}): ${data}`)
|
||||
);
|
||||
}
|
||||
} catch {
|
||||
reject(new Error(`Invalid response: ${data}`));
|
||||
}
|
||||
});
|
||||
}
|
||||
);
|
||||
|
||||
req.on("error", reject);
|
||||
form.pipe(req);
|
||||
});
|
||||
}
|
||||
|
||||
async function main() {
|
||||
const filePath = process.argv[2];
|
||||
|
||||
if (!filePath) {
|
||||
console.error("Usage: pnpm upload:image <path-to-image>");
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
try {
|
||||
console.log(`Uploading: ${filePath}`);
|
||||
const result = await uploadImage(filePath);
|
||||
|
||||
console.log(`\nSuccess!`);
|
||||
console.log(`ID: ${result.data.id}`);
|
||||
console.log(`URL: ${result.data.storageUrl}`);
|
||||
if (result.data.width && result.data.height) {
|
||||
console.log(`Size: ${result.data.width}x${result.data.height}`);
|
||||
}
|
||||
} catch (error) {
|
||||
console.error("Error:", error instanceof Error ? error.message : error);
|
||||
process.exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
main();
|
||||
1
.env
|
|
@ -1,4 +1,3 @@
|
|||
DATAFORSEO_API_LOGIN=regx@usul.su
|
||||
DATAFORSEO_API_PASS=4f4b51b823df234c
|
||||
DATAFORSEO_API_CREDENTIALS=cmVneEB1c3VsLnN1OjRmNGI1MWI4MjNkZjIzNGM=
|
||||
BANATIE_API_KEY=bnt_e6eb544c505922b9bfe5b088e067fc3940efff16b1b88585c5518946630d4a66
|
||||
|
|
|
|||
|
|
@ -27,10 +27,6 @@
|
|||
"whois": {
|
||||
"command": "npx",
|
||||
"args": ["-y", "whois-mcp"]
|
||||
},
|
||||
"chrome-devtools": {
|
||||
"command": "npx",
|
||||
"args": ["-y", "chrome-devtools-mcp@latest"]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,46 +0,0 @@
|
|||
---
|
||||
slug: ai-coding-methodologies-beyond-vibe-coding
|
||||
title: "AI Coding Methodologies: Beyond Vibe Coding"
|
||||
author: henry-technical
|
||||
status: inbox
|
||||
created: 2025-01-22
|
||||
updated: 2025-01-22
|
||||
content_type: explainer
|
||||
primary_keyword: ""
|
||||
secondary_keywords: []
|
||||
assets_folder: assets/ai-coding-methodologies-beyond-vibe-coding/
|
||||
---
|
||||
|
||||
# Idea
|
||||
|
||||
**Source:** Perplexity research on AI-assisted development terminology (Jan 2025)
|
||||
|
||||
**Concept:** Overview article covering AI coding methodologies landscape. Position vibe coding as casual/risky approach, contrast with professional techniques (Spec-Driven Development, AI-DLC, Agentic Coding, etc.).
|
||||
|
||||
**Goal:** Establish Henry's expertise in AI-assisted development space. Warm-up article for Dev.to account.
|
||||
|
||||
**Angle:** "Wide view" — Henry surveys the field and shares his practical preferences.
|
||||
|
||||
---
|
||||
|
||||
# Brief
|
||||
|
||||
*pending @strategist*
|
||||
|
||||
---
|
||||
|
||||
# Outline
|
||||
|
||||
See [outline.md](assets/ai-coding-methodologies-beyond-vibe-coding/outline.md)
|
||||
|
||||
# Draft
|
||||
|
||||
See [text.md](assets/ai-coding-methodologies-beyond-vibe-coding/text.md)
|
||||
|
||||
# SEO
|
||||
|
||||
See [seo-metadata.md](assets/ai-coding-methodologies-beyond-vibe-coding/seo-metadata.md)
|
||||
|
||||
# Activity Log
|
||||
|
||||
See [log-chat.md](assets/ai-coding-methodologies-beyond-vibe-coding/log-chat.md)
|
||||
|
|
@ -0,0 +1,52 @@
|
|||
---
|
||||
slug: midjourney-alternatives-bn-blog
|
||||
title: "Best Midjourney Alternatives in 2026"
|
||||
author: banatie
|
||||
status: planning
|
||||
created: 2026-01-12
|
||||
updated: 2026-01-12
|
||||
content_type: comparison
|
||||
channel: banatie.app/blog
|
||||
primary_keyword: "midjourney alternative"
|
||||
primary_volume: 1300
|
||||
primary_kd: 3
|
||||
secondary_keywords:
|
||||
- "midjourney alternatives"
|
||||
- "midjourney api"
|
||||
- "leonardo ai"
|
||||
- "stable diffusion"
|
||||
- "flux ai"
|
||||
- "chatgpt image generator"
|
||||
estimated_traffic: 700-1200
|
||||
---
|
||||
|
||||
# Midjourney Alternatives — Comparison Guide
|
||||
|
||||
## Summary
|
||||
|
||||
Comprehensive comparison of AI image generation tools as Midjourney alternatives. Covers UI-first services, open source options, API-first platforms, and aggregators.
|
||||
|
||||
**Strategic value:** Ultra-low KD (3), solid volume (1,300), quick win for domain authority.
|
||||
|
||||
**Banatie positioning:** API-First Platforms section — developer workflow native with MCP integration.
|
||||
|
||||
---
|
||||
|
||||
## Assets
|
||||
|
||||
- `assets/midjourney-alternatives-bn-blog/brief.md` — full brief with structure and requirements
|
||||
|
||||
---
|
||||
|
||||
## Log
|
||||
|
||||
### 2026-01-12 — @strategist
|
||||
Created brief. Consolidated from three inbox ideas. Keyword research completed via DataForSEO ($0.35 spent).
|
||||
|
||||
Categories defined:
|
||||
1. Models with Native Service (UI-First)
|
||||
2. Open Source / Self-Hosted
|
||||
3. API-First Platforms ← Banatie here
|
||||
4. Aggregators (Multi-Model)
|
||||
|
||||
Next: @architect creates outline.
|
||||
|
|
@ -1,156 +0,0 @@
|
|||
---
|
||||
slug: beyond-vibe-coding
|
||||
title: "Beyond Vibe Coding: Professional AI Development Methodologies"
|
||||
author: henry-technical
|
||||
status: drafting
|
||||
created: 2026-01-22
|
||||
updated: 2026-01-24
|
||||
content_type: explainer
|
||||
primary_keyword: "ai coding methodologies"
|
||||
secondary_keywords: ["spec driven development", "ai pair programming", "human in the loop ai", "ralph loop"]
|
||||
assets_folder: assets/beyond-vibe-coding/
|
||||
---
|
||||
|
||||
# Idea
|
||||
|
||||
**Source:** Perplexity research on AI-assisted development terminology (Jan 2026)
|
||||
|
||||
**Concept:** Overview article covering AI coding methodologies landscape. Position vibe coding (Collins Word of Year 2025) as entry point, then survey professional approaches: Spec-Driven Development, Agentic Coding, AI Pair Programming, HITL, TDD+AI.
|
||||
|
||||
**Goal:** Establish Henry's expertise in AI-assisted development. Second article for Dev.to account warmup.
|
||||
|
||||
**Angle:** Survey + practitioner perspective (via interview with Oleg)
|
||||
|
||||
---
|
||||
|
||||
# Brief
|
||||
|
||||
See [brief.md](assets/beyond-vibe-coding/brief.md) for complete strategic context, target reader analysis, content requirements, and success criteria.
|
||||
|
||||
**Quick Summary:**
|
||||
- **Goal:** Fight "AI is for juniors" stigma with data-backed professional methodologies survey
|
||||
- **Angle:** Seniors use AI MORE than juniors — methodology separates pros from beginners
|
||||
- **Format:** Survey of 6 methodologies with credentials, practitioner insights
|
||||
- **Target:** 2,500-3,500 words, thought leadership + long-tail SEO
|
||||
|
||||
---
|
||||
|
||||
# Outline
|
||||
|
||||
See [outline.md](assets/beyond-vibe-coding/outline.md) for complete article structure.
|
||||
|
||||
**Tone:** "Here's what exists and here's what I actually do" — landscape survey through practitioner's lens, not prescriptive guide
|
||||
|
||||
**Structure:**
|
||||
- Introduction (400w) — Hook with vibe coding, establish legitimacy question
|
||||
- 6 Methodology sections (400-500w each) — Credentials block, description, Henry's experience (integrated naturally)
|
||||
- Conclusion (450w) — Landscape overview, legitimacy validation with stats, what I use, community invitation
|
||||
|
||||
**Total:** ~2,800 words
|
||||
**Code examples:** 3 (CLAUDE.md spec, .claude/settings.json, TDD test)
|
||||
|
||||
---
|
||||
|
||||
# Validation Status
|
||||
|
||||
**Validated:** 2026-01-23
|
||||
**Validator:** @validator
|
||||
**Verdict:** REVISE → COMPLETE ✅
|
||||
|
||||
See [validation-results.md](assets/beyond-vibe-coding/validation-results.md) for complete validation report.
|
||||
|
||||
**Summary:**
|
||||
- ✅ **4 claims fully verified:** Senior/junior AI usage, 76% adoption, 27% bans, Ralph Loop virality
|
||||
- ✅ **Security vulnerabilities claim updated:** Added source citations [1][2][3]
|
||||
- ✅ **Removed false claims:** "359x growth" for SDD, "90% Fortune 100 Copilot adoption"
|
||||
- ✅ **Minor stat correction:** "33%" → "about a third" for senior developers
|
||||
|
||||
**Revisions Applied by @architect:**
|
||||
1. Removed Claim 4 (90% Fortune 100) from Conclusion section
|
||||
2. Removed Claim 6 (359x growth) from Spec-Driven credentials, replaced with qualitative description
|
||||
3. Added source citations for Claim 3 (security vulnerabilities): Georgetown CSET, Veracode, industry reports
|
||||
4. Updated Claim 1 to "about a third" instead of "33%" in Introduction and Conclusion
|
||||
|
||||
**Next Step:** Ready for @writer to create Draft
|
||||
|
||||
---
|
||||
|
||||
# Assets Index
|
||||
|
||||
All working files for this article:
|
||||
|
||||
## Core Files
|
||||
|
||||
| File | Purpose | Status |
|
||||
|------|---------|--------|
|
||||
| [brief.md](assets/beyond-vibe-coding/brief.md) | Complete Brief: strategic context, target reader, requirements, success criteria | ✅ Complete |
|
||||
| [outline.md](assets/beyond-vibe-coding/outline.md) | Article structure with word budgets | ✅ Revised & Complete |
|
||||
| [text.md](assets/beyond-vibe-coding/text.md) | Article draft (English) | ✅ Draft complete |
|
||||
| [text-rus.md](assets/beyond-vibe-coding/text-rus.md) | Article draft (Russian) | ✅ Complete |
|
||||
| [interview.md](assets/beyond-vibe-coding/interview.md) | Oleg's practitioner insights — source for Henry's voice | ✅ Complete |
|
||||
| [log-chat.md](assets/beyond-vibe-coding/log-chat.md) | Activity log and agent comments | ⏳ Active |
|
||||
| [seo-metadata.md](assets/beyond-vibe-coding/seo-metadata.md) | SEO title, description, keywords | ⏳ Pending @seo |
|
||||
|
||||
## Methodology Specs
|
||||
|
||||
Detailed research for each methodology — use for expanding credentials in text.md:
|
||||
|
||||
| File | Methodology | Key Sources |
|
||||
|------|-------------|-------------|
|
||||
| [spec-driven-dev.md](assets/beyond-vibe-coding/spec-driven-dev.md) | Spec-Driven Development | GitHub Spec Kit, AWS Kiro, Tessl, Martin Fowler |
|
||||
| [agentic-coding.md](assets/beyond-vibe-coding/agentic-coding.md) | Agentic Coding + Ralph Loop | arXiv papers, Geoffrey Huntley, Cursor 2.0, GitHub Copilot Agent Mode |
|
||||
| [ai-pair-programming.md](assets/beyond-vibe-coding/ai-pair-programming.md) | AI Pair Programming | GitHub Copilot official, Microsoft Learn, Cursor, Windsurf |
|
||||
| [ai-aided-test-first.md](assets/beyond-vibe-coding/ai-aided-test-first.md) | TDD + AI | Thoughtworks Radar, Kent Beck, DORA Report 2025, Builder.io |
|
||||
|
||||
## Statistics & Research
|
||||
|
||||
| File | Purpose | Status |
|
||||
|------|---------|--------|
|
||||
| [ai-usage-statistics.md](assets/beyond-vibe-coding/ai-usage-statistics.md) | Statistical research: AI adoption by seniority, company policies, security concerns | ✅ Complete |
|
||||
| [ai-adoption-statistics.md](assets/beyond-vibe-coding/ai-adoption-statistics.md) | LaTeX-formatted statistics for infographics (2024-2026 data) | ✅ Complete |
|
||||
| [research-index.md](assets/beyond-vibe-coding/research-index.md) | Methodology clusters, verified sources, interview questions | ⏳ Needs update |
|
||||
| [validation-results.md](assets/beyond-vibe-coding/validation-results.md) | Fact-checking results for all statistical claims | ✅ Complete |
|
||||
|
||||
## Images
|
||||
|
||||
| Folder | Contents | Status |
|
||||
|--------|----------|--------|
|
||||
| [images/comic/](assets/beyond-vibe-coding/images/comic/) | 8 comic illustrations, uploaded to CDN | ✅ Ready |
|
||||
| [images/infographic/](assets/beyond-vibe-coding/images/infographic/) | Infographics (based on ai-adoption-statistics.md) | ⏳ In progress |
|
||||
| [images/comic/cdn-urls.md](assets/beyond-vibe-coding/images/comic/cdn-urls.md) | CDN URLs for all comic images | ✅ Complete |
|
||||
|
||||
## External Research
|
||||
|
||||
| File | Purpose |
|
||||
|------|---------|
|
||||
| [perplexity-chats/AI-Assisted Development_...](research/perplexity-chats/) | Original Perplexity research on terminology |
|
||||
|
||||
---
|
||||
|
||||
# TODO: Part 4 (Potential Future Addition)
|
||||
|
||||
Consider adding a fourth part to the series covering additional methodologies:
|
||||
|
||||
| Methodology | Description | Status |
|
||||
|-------------|-------------|--------|
|
||||
| Architecture-First AI Development | Design patterns and system architecture before AI implementation | ⏳ Needs research |
|
||||
| Prompt-Driven Development | Structured prompt engineering as development methodology | ⏳ Needs research |
|
||||
| Copy-pasting from AI chatbot | Manual workflow — baseline to compare other methods against | ⏳ Needs research |
|
||||
|
||||
**Rationale:** These approaches represent common patterns not covered in Parts 1-3:
|
||||
- Architecture-First — enterprise/complex systems angle
|
||||
- Prompt-Driven — bridges gap between vibe coding and spec-driven
|
||||
- Copy-pasting — the "default" many developers start with, important baseline
|
||||
|
||||
**Next steps:**
|
||||
1. Research each methodology for credentials and sources
|
||||
2. Conduct interview with Oleg for Henry's perspective
|
||||
3. Assess if volume/interest justifies a Part 4
|
||||
|
||||
---
|
||||
|
||||
# Activity Log
|
||||
|
||||
See [log-chat.md](assets/beyond-vibe-coding/log-chat.md)
|
||||
|
||||
**Latest:** @writer completed draft (2026-01-24). 2,650 words, 8 image placeholders for @image agent. No code snippets per user request.
|
||||
|
|
@ -1,67 +0,0 @@
|
|||
---
|
||||
slug: midjourney-alternatives-bn-blog
|
||||
title: "Best Midjourney Alternatives in 2026"
|
||||
author: banatie
|
||||
status: drafting
|
||||
created: 2026-01-12
|
||||
updated: 2026-01-13
|
||||
content_type: comparison
|
||||
channel: banatie.app/blog
|
||||
assets_folder: assets/midjourney-alternatives-bn-blog/
|
||||
primary_keyword: "midjourney alternative"
|
||||
primary_volume: 1300
|
||||
primary_kd: 3
|
||||
secondary_keywords:
|
||||
- "midjourney alternatives"
|
||||
- "midjourney api"
|
||||
- "leonardo ai"
|
||||
- "stable diffusion"
|
||||
- "flux ai"
|
||||
- "chatgpt image generator"
|
||||
estimated_traffic: 700-1200
|
||||
---
|
||||
|
||||
# Midjourney Alternatives — Comparison Guide
|
||||
|
||||
## Summary
|
||||
|
||||
Comprehensive comparison of AI image generation tools as Midjourney alternatives. Covers UI-first services, open source options, API-first platforms, and aggregators.
|
||||
|
||||
**Strategic value:** Ultra-low KD (3), solid volume (1,300), quick win for domain authority.
|
||||
|
||||
**Banatie positioning:** API-First Platforms section — developer workflow native with MCP integration.
|
||||
|
||||
---
|
||||
|
||||
# Outline
|
||||
|
||||
See [outline.md](assets/midjourney-alternatives-bn-blog/outline.md)
|
||||
|
||||
---
|
||||
|
||||
# Draft
|
||||
|
||||
See [text.md](assets/midjourney-alternatives-bn-blog/text.md)
|
||||
|
||||
---
|
||||
|
||||
# SEO
|
||||
|
||||
*Pending — waiting for @seo*
|
||||
|
||||
---
|
||||
|
||||
# Activity Log
|
||||
|
||||
See [log-chat.md](assets/midjourney-alternatives-bn-blog/log-chat.md)
|
||||
|
||||
---
|
||||
|
||||
# Assets
|
||||
|
||||
- [brief.md](assets/midjourney-alternatives-bn-blog/brief.md) — full brief with structure and requirements
|
||||
- [research-complete.md](assets/midjourney-alternatives-bn-blog/research-complete.md) — research on 19 services
|
||||
- [outline.md](assets/midjourney-alternatives-bn-blog/outline.md) — article structure
|
||||
- [text.md](assets/midjourney-alternatives-bn-blog/text.md) — article body
|
||||
- [homepages.md](assets/midjourney-alternatives-bn-blog/homepages.md) — homepage screenshots index
|
||||
- [log-chat.md](assets/midjourney-alternatives-bn-blog/log-chat.md) — agent activity log
|
||||
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
## Overview
|
||||
|
||||
This is a **content repository** for Banatie ([banatie.app](https://banatie.app/)) blog and website. Content is created by 10 Claude Desktop agents. You (Claude Code) manage files and structure.
|
||||
This is a **content repository** for Banatie blog and website. Content is created by 10 Claude Desktop agents. You (Claude Code) manage files and structure.
|
||||
|
||||
**Core principle:** One markdown file = one article. Files move between stage folders like kanban cards.
|
||||
|
||||
|
|
|
|||
|
|
@ -1,20 +0,0 @@
|
|||
# Activity Log
|
||||
|
||||
## 2025-01-22 @strategist
|
||||
|
||||
**Action:** Initial setup, research intake
|
||||
|
||||
**Changes:**
|
||||
- Created article card in `0-inbox/`
|
||||
- Created assets folder structure
|
||||
- Copied Perplexity research to `perplexity-terminology-research.md`
|
||||
- Created `research-index.md` for methodology clustering
|
||||
|
||||
**Notes:**
|
||||
- Goal: warm-up article for Henry's Dev.to account
|
||||
- Approach: survey + personal opinion through interview with Oleg
|
||||
- Need to verify all source links before using
|
||||
|
||||
**For next agent:** @strategist continues with research clustering and interview
|
||||
|
||||
---
|
||||
|
|
@ -1,3 +0,0 @@
|
|||
# Outline
|
||||
|
||||
*pending — will be created after research clustering and interview*
|
||||
|
|
@ -1,423 +0,0 @@
|
|||
# AI-Assisted Development: Кластеризованная терминология и подходы
|
||||
|
||||
**Source:** Perplexity Research, January 2026
|
||||
**Original file:** `/research/perplexity-chats/AI-Assisted Development_ Кластеризованная терминол.md`
|
||||
|
||||
---
|
||||
|
||||
## Domain 1: Experimental & Low-Quality Approaches
|
||||
|
||||
### Vibe Coding
|
||||
|
||||
**Authority Rank: 1** | **Perception: Negative**
|
||||
|
||||
**Sources:**
|
||||
1. **Andrej Karpathy** (OpenAI co-founder, Tesla AI Director) — Wikipedia, Feb 2025
|
||||
2. **Collins English Dictionary** — Word of the Year 2025
|
||||
3. **SonarSource** (Code Quality Platform) — code quality analysis
|
||||
|
||||
**Description:**
|
||||
Term coined by Andrej Karpathy in February 2025, quickly became cultural phenomenon — Collins English Dictionary named it Word of Year 2025. Approach where developer describes task in natural language, AI generates code, but key distinction: **developer does NOT review code**, only looks at execution results.
|
||||
|
||||
Simon Willison quote: "If LLM wrote every line of your code, but you reviewed, tested and understood it all — that's not vibe coding, that's using LLM as typing assistant". Key characteristic: accepting AI-generated code without understanding it.
|
||||
|
||||
Critics point to lack of accountability, maintainability problems, increased security vulnerability risk. May 2025: Swedish app Lovable (using vibe coding) had security vulnerabilities in 170 of 1,645 created web apps. Fast Company September 2025 reported "vibe coding hangover" — senior engineers cite "development hell" working with such code.
|
||||
|
||||
Suitable for "throwaway weekend projects" as Karpathy originally intended, but risky for production systems.
|
||||
|
||||
**Links:**
|
||||
- https://en.wikipedia.org/wiki/Vibe_coding
|
||||
- https://www.sonarsource.com/resources/library/vibe-coding/
|
||||
|
||||
---
|
||||
|
||||
## Domain 2: Enterprise & Production-Grade Methodologies
|
||||
|
||||
### AI-Driven Development Life Cycle (AI-DLC)
|
||||
|
||||
**Authority Rank: 1** | **Perception: Positive - Enterprise**
|
||||
|
||||
**Sources:**
|
||||
1. **AWS** — Raja SP, Principal Solutions Architect, July 2025
|
||||
2. **Amazon Q Developer & Kiro** — official AWS platform
|
||||
|
||||
**Description:**
|
||||
Presented by AWS July 2025 as transformative enterprise methodology. Raja SP created AI-DLC with team after working with 100+ large customers.
|
||||
|
||||
AI as **central collaborator** throughout SDLC with two dimensions:
|
||||
1. **AI-Powered Execution with Human Oversight** — AI creates detailed work plans, actively requests clarifications, defers critical decisions to humans
|
||||
2. **Dynamic Team Collaboration** — while AI handles routine tasks, teams unite for real-time problem solving
|
||||
|
||||
**Three phases:** Inception (Mob Elaboration), Construction (Mob Construction), Operations
|
||||
|
||||
**Terminology:** "sprints" → "bolts" (hours/days instead of weeks); Epics → Units of Work
|
||||
|
||||
**Link:** https://aws.amazon.com/blogs/devops/ai-driven-development-life-cycle/
|
||||
|
||||
### Spec-Driven Development (SDD)
|
||||
|
||||
**Authority Rank: 2** | **Perception: Positive - Systematic**
|
||||
|
||||
**Sources:**
|
||||
1. **GitHub Engineering** — Den Delimarsky, Spec Kit toolkit, September 2025
|
||||
2. **ThoughtWorks Technology Radar** — November 2025
|
||||
3. **Red Hat Developers** — October 2025
|
||||
|
||||
**Description:**
|
||||
Emerged 2025 as direct response to "vibe coding" problems. ThoughtWorks included in Technology Radar. GitHub open-sourced Spec Kit September 2025, supports Claude Code, GitHub Copilot, Gemini CLI.
|
||||
|
||||
**Key principle:** specification becomes source of truth, not code.
|
||||
|
||||
**Spec Kit Workflow:**
|
||||
1. **Constitution** — immutable high-level principles (rules file)
|
||||
2. **/specify** — create specification from high-level prompt
|
||||
3. **/plan** — technical planning based on specification
|
||||
4. **/tasks** — break down into manageable phased parts
|
||||
|
||||
**Three interpretations:** Spec-first, Spec-anchored, Spec-as-source
|
||||
|
||||
**Tools:** Amazon Kiro, GitHub Spec Kit, Tessl Framework
|
||||
|
||||
**Links:**
|
||||
- https://github.blog/ai-and-ml/generative-ai/spec-driven-development-with-ai-get-started-with-a-new-open-source-toolkit/
|
||||
- https://www.thoughtworks.com/radar/techniques/spec-driven-development
|
||||
- https://martinfowler.com/articles/exploring-gen-ai/sdd-3-tools.html
|
||||
- https://developer.microsoft.com/blog/spec-driven-development-spec-kit
|
||||
|
||||
### Architecture-First AI Development
|
||||
|
||||
**Authority Rank: 3** | **Perception: Positive - Professional/Mature**
|
||||
|
||||
**Sources:**
|
||||
1. **WaveMaker** — Vikram Srivats (CCO), Prashant Reddy (Head of AI Product Engineering), January 2026
|
||||
2. **ITBrief Industry Analysis** — January 2026
|
||||
|
||||
**Description:**
|
||||
2026 industry shift. Quote Vikram Srivats: "Second coming of AI coding tools must be all about **Architectural Intelligence** — just Artificial Intelligence no longer fits."
|
||||
|
||||
Shift from "vibe coding" experiments to governance, architecture alignment, long-term maintainability.
|
||||
|
||||
**Key characteristics:**
|
||||
- System design before implementation
|
||||
- AI agents with clear roles: Architect, Builder, Guardian
|
||||
- Coding architectural rules, enforcement review processes
|
||||
- Working from formal specifications
|
||||
- Respect for internal organizational standards
|
||||
|
||||
**Links:**
|
||||
- https://itbrief.co.uk/story/ai-coding-tools-face-2026-reset-towards-architecture
|
||||
- https://itbrief.news/story/ai-coding-tools-face-2026-reset-towards-architecture
|
||||
|
||||
---
|
||||
|
||||
## Domain 3: Quality & Validation-Focused Approaches
|
||||
|
||||
### Test-Driven Development with AI (TDD-AI)
|
||||
|
||||
**Authority Rank: 1** | **Perception: Positive - Quality-Focused**
|
||||
|
||||
**Sources:**
|
||||
1. **Galileo AI Research** — August 2025
|
||||
2. **Builder.io Engineering** — August 2025
|
||||
|
||||
**Description:**
|
||||
Traditional TDD adapted for AI systems. Tests written first → AI generates code to pass tests → verify → refactor.
|
||||
|
||||
Statistical testing for non-deterministic AI outputs — critical distinction from traditional TDD.
|
||||
|
||||
**Links:**
|
||||
- https://galileo.ai/blog/tdd-ai-system-architecture
|
||||
- https://galileo.ai/blog/test-driven-development-ai-systems
|
||||
- https://www.builder.io/blog/test-driven-development-ai
|
||||
|
||||
### Human-in-the-Loop (HITL) AI Development
|
||||
|
||||
**Authority Rank: 2** | **Perception: Positive - Responsible**
|
||||
|
||||
**Sources:**
|
||||
1. **Google Cloud Documentation** — 2026
|
||||
2. **Encord Research** — December 2024
|
||||
3. **Atlassian Engineering** — HULA framework, September 2025
|
||||
|
||||
**Description:**
|
||||
Humans actively involved in AI system lifecycle. Continuous feedback and validation loops. Hybrid approach: human judgment + AI execution.
|
||||
|
||||
**HULA (Human-in-the-Loop AI)** — Atlassian framework for software development agents.
|
||||
|
||||
**Links:**
|
||||
- https://cloud.google.com/discover/human-in-the-loop
|
||||
- https://encord.com/blog/human-in-the-loop-ai/
|
||||
- https://www.atlassian.com/blog/atlassian-engineering/hula-blog-autodev-paper-human-in-the-loop-software-development-agents
|
||||
|
||||
### Quality-First AI Coding
|
||||
|
||||
**Authority Rank: 3** | **Perception: Positive - Professional**
|
||||
|
||||
**Sources:**
|
||||
1. **Qodo.ai** (formerly CodiumAI) — December 2025
|
||||
|
||||
**Description:**
|
||||
Code integrity at core. Qodo.ai — platform with agentic AI code generation and comprehensive testing.
|
||||
|
||||
Production-ready focus: automatic test generation for every code change. Direct contrast to "vibe coding" — quality non-negotiable.
|
||||
|
||||
**Link:** https://www.qodo.ai/ai-code-review-platform/
|
||||
|
||||
### Deterministic AI Development
|
||||
|
||||
**Authority Rank: 4** | **Perception: Positive - Enterprise/Compliance**
|
||||
|
||||
**Source:** Augment Code Research — August 2025
|
||||
|
||||
**Description:**
|
||||
Identical outputs for identical inputs. Rule-based architectures for predictability. Best for: security scanning, compliance checks, refactoring tasks.
|
||||
|
||||
Hybrid approach: probabilistic reasoning + deterministic execution.
|
||||
|
||||
**Link:** https://www.augmentcode.com/guides/deterministic-ai-for-predictable-coding
|
||||
|
||||
---
|
||||
|
||||
## Domain 4: Collaborative Development Patterns
|
||||
|
||||
### AI Pair Programming
|
||||
|
||||
**Authority Rank: 1** | **Perception: Positive - Collaborative**
|
||||
|
||||
**Sources:**
|
||||
1. **GitHub Copilot (Microsoft)** — January 2026
|
||||
2. **Qodo.ai Documentation** — March 2025
|
||||
3. **GeeksforGeeks** — July 2025
|
||||
|
||||
**Description:**
|
||||
AI as "pair programmer" or coding partner. Based on traditional pair programming: driver (human/AI) and navigator (human/AI) roles.
|
||||
|
||||
Real-time collaboration and feedback. Tools: GitHub Copilot, Cursor, Windsurf.
|
||||
|
||||
**Links:**
|
||||
- https://code.visualstudio.com/docs/copilot/overview
|
||||
- https://www.qodo.ai/glossary/pair-programming/
|
||||
- https://www.geeksforgeeks.org/artificial-intelligence/what-is-ai-pair-programming/
|
||||
- https://graphite.com/guides/ai-pair-programming-best-practices
|
||||
|
||||
### Mobbing with AI / Mob Programming with AI
|
||||
|
||||
**Authority Rank: 2** | **Perception: Positive - Team-Focused**
|
||||
|
||||
**Sources:**
|
||||
1. **Atlassian Engineering Blog** — December 2025
|
||||
2. **Aaron Griffith** — January 2025 (YouTube)
|
||||
|
||||
**Description:**
|
||||
Entire team works together, AI as driver. AI generates code/tests in front of team. Team navigates, reviews, refines in real-time.
|
||||
|
||||
Best for: complex problems, knowledge transfer, quality assurance.
|
||||
|
||||
**Links:**
|
||||
- https://www.atlassian.com/blog/atlassian-engineering/mobbing-with-ai
|
||||
- https://www.youtube.com/watch?v=BsFPbYX4WXQ
|
||||
|
||||
### Agentic Coding / Agentic Programming
|
||||
|
||||
**Authority Rank: 3** | **Perception: Positive - Advanced**
|
||||
|
||||
**Sources:**
|
||||
1. **arXiv Research Paper** — August 2025
|
||||
2. **AI Accelerator Institute** — February 2025
|
||||
3. **Apiiro Security Platform** — September 2025
|
||||
|
||||
**Description:**
|
||||
LLM-based agents autonomously plan, execute, improve development tasks. Beyond code completion: generates programs, diagnoses bugs, writes tests, refactors.
|
||||
|
||||
Key properties: **autonomy, interactive, iterative refinement, goal-oriented**.
|
||||
|
||||
Agent behaviors: planning, memory management, tool integration, execution monitoring.
|
||||
|
||||
**Links:**
|
||||
- https://arxiv.org/html/2508.11126v1
|
||||
- https://www.aiacceleratorinstitute.com/agentic-code-generation-the-future-of-software-development/
|
||||
- https://apiiro.com/glossary/agentic-coding/
|
||||
|
||||
---
|
||||
|
||||
## Domain 5: Workflow & Process Integration
|
||||
|
||||
### Prompt-Driven Development (PDD)
|
||||
|
||||
**Authority Rank: 1** | **Perception: Neutral to Positive**
|
||||
|
||||
**Sources:**
|
||||
1. **Capgemini Software Engineering** — May 2025
|
||||
2. **Hexaware Technologies** — August 2025
|
||||
|
||||
**Description:**
|
||||
Developer breaks requirements into series of prompts. LLM generates code for each prompt. **Critical:** developer MUST review LLM-generated code.
|
||||
|
||||
Critical distinction from vibe coding: code review mandatory.
|
||||
|
||||
**Links:**
|
||||
- https://capgemini.github.io/ai/prompt-driven-development/
|
||||
- https://hexaware.com/blogs/prompt-driven-development-coding-in-conversation/
|
||||
|
||||
### AI-Augmented Development
|
||||
|
||||
**Authority Rank: 2** | **Perception: Positive - Practical**
|
||||
|
||||
**Sources:**
|
||||
1. **GitLab Official Documentation** — December 2023
|
||||
2. **Virtusa** — January 2024
|
||||
|
||||
**Description:**
|
||||
AI tools accelerate SDLC across all phases. Focus: code generation, bug detection, automated testing, smart documentation.
|
||||
|
||||
Key principle: humans handle strategy, AI handles execution.
|
||||
|
||||
**Links:**
|
||||
- https://about.gitlab.com/topics/agentic-ai/ai-augmented-software-development/
|
||||
- https://www.virtusa.com/digital-themes/ai-augmented-development
|
||||
|
||||
### Copilot-Driven Development
|
||||
|
||||
**Authority Rank: 3** | **Perception: Positive - Practical**
|
||||
|
||||
**Sources:**
|
||||
1. **GitHub/Microsoft Official** — January 2026
|
||||
2. **Emergn** — September 2025
|
||||
|
||||
**Description:**
|
||||
Specifically using GitHub Copilot or similar tools as development partner (not just assistant).
|
||||
|
||||
Context-aware, learns coding style. Enables conceptual focus instead of mechanical typing.
|
||||
|
||||
**Links:**
|
||||
- https://code.visualstudio.com/docs/copilot/overview
|
||||
- https://www.emergn.com/insights/how-ai-tools-impact-the-way-we-develop-software-our-github-copilot-journey/
|
||||
|
||||
### Conversational Coding
|
||||
|
||||
**Authority Rank: 4** | **Perception: Neutral to Positive**
|
||||
|
||||
**Sources:**
|
||||
1. **Google Cloud Platform** — January 2026
|
||||
2. **arXiv Research** — March 2025
|
||||
|
||||
**Description:**
|
||||
Natural language interaction with AI for development. Iterative, dialogue-based approach. Context retention across sessions.
|
||||
|
||||
**Links:**
|
||||
- https://cloud.google.com/conversational-ai
|
||||
- https://arxiv.org/abs/2503.16508
|
||||
|
||||
---
|
||||
|
||||
## Domain 6: Code Review & Maintenance
|
||||
|
||||
### AI Code Review
|
||||
|
||||
**Authority Rank: 1** | **Perception: Neutral to Positive**
|
||||
|
||||
**Sources:**
|
||||
1. **LinearB** — March 2024
|
||||
2. **Swimm.io** — November 2025
|
||||
3. **CodeAnt.ai** — May 2025
|
||||
|
||||
**Description:**
|
||||
Automated code examination using ML/LLM. Static and dynamic analysis. Identifies bugs, security issues, performance problems, code smells.
|
||||
|
||||
Tools: Qodo, CodeRabbit, SonarQube AI features.
|
||||
|
||||
**Links:**
|
||||
- https://linearb.io/blog/ai-code-review
|
||||
- https://swimm.io/learn/ai-tools-for-developers/ai-code-review-how-it-works-and-3-tools-you-should-know
|
||||
- https://www.codeant.ai/blogs/ai-vs-traditional-code-review
|
||||
|
||||
---
|
||||
|
||||
## Domain 7: Specialized & Emerging Approaches
|
||||
|
||||
### Ensemble Programming/Prompting with AI
|
||||
|
||||
**Authority Rank: 1** | **Perception: Positive - Advanced**
|
||||
|
||||
**Sources:**
|
||||
1. **Kinde.com** — November 2024
|
||||
2. **Ultralytics ML Research** — December 2025
|
||||
3. **arXiv** — June 2025
|
||||
|
||||
**Description:**
|
||||
Multiple AI models/prompts combined for better results. Aggregation methods: voting, averaging, weighted scoring.
|
||||
|
||||
**Links:**
|
||||
- https://kinde.com/learn/ai-for-software-engineering/prompting/ensemble-prompting-that-actually-moves-the-needle/
|
||||
- https://www.ultralytics.com/blog/exploring-ensemble-learning-and-its-role-in-ai-and-ml
|
||||
|
||||
### Prompt Engineering for Development
|
||||
|
||||
**Authority Rank: 2** | **Perception: Neutral to Positive**
|
||||
|
||||
**Sources:**
|
||||
1. **Google Cloud** — January 2026
|
||||
2. **OpenAI** — April 2025
|
||||
3. **GitHub** — May 2024
|
||||
|
||||
**Description:**
|
||||
Crafting effective prompts for AI models. Critical skill for AI-assisted development.
|
||||
|
||||
Techniques: few-shot learning, chain-of-thought, role prompting.
|
||||
|
||||
**Links:**
|
||||
- https://cloud.google.com/discover/what-is-prompt-engineering
|
||||
- https://platform.openai.com/docs/guides/prompt-engineering
|
||||
- https://github.blog/ai-and-ml/generative-ai/prompt-engineering-guide-generative-ai-llms/
|
||||
|
||||
### Intentional AI Development
|
||||
|
||||
**Authority Rank: 3** | **Perception: Positive - Thoughtful**
|
||||
|
||||
**Sources:**
|
||||
1. **Tech.eu** — January 2026
|
||||
2. **ghuntley.com** — August 2025
|
||||
|
||||
**Description:**
|
||||
Purpose-driven AI design. Clear roles and boundaries for AI. Deliberate practice and learning approach.
|
||||
|
||||
**Links:**
|
||||
- https://tech.eu/2026/01/05/adopting-an-intentional-ai-strategy-in-2026/
|
||||
- https://ghuntley.com/play/
|
||||
|
||||
---
|
||||
|
||||
## Domain 8: General & Cross-Cutting Terms
|
||||
|
||||
### AI-Assisted Coding / AI-Assisted Development
|
||||
|
||||
**Authority Rank: 1** | **Perception: Neutral to Positive**
|
||||
|
||||
**Sources:**
|
||||
1. **Wikipedia** — July 2025
|
||||
2. **GitLab** — 2025
|
||||
|
||||
**Description:**
|
||||
Broad umbrella term for AI enhancing software development tasks. Includes code completion, documentation generation, testing, debugging assistance.
|
||||
|
||||
Developer remains in control, reviews all suggestions. Most common adoption pattern globally.
|
||||
|
||||
**Links:**
|
||||
- https://en.wikipedia.org/wiki/AI-assisted_software_development
|
||||
- https://about.gitlab.com/topics/devops/ai-code-generation-guide/
|
||||
|
||||
---
|
||||
|
||||
## Key Takeaways
|
||||
|
||||
**Domain 1** (Experimental): Only Vibe Coding — only term with explicitly negative connotation, backed by high-authority sources (OpenAI founder, Collins Dictionary).
|
||||
|
||||
**Domain 2** (Enterprise): Most authoritative domain with AWS, GitHub Engineering, ThoughtWorks as sources. Focus on production-grade, governance, architecture.
|
||||
|
||||
**Domain 3** (Quality): Research-heavy domain (Galileo AI, Google Cloud, Atlassian) with emphasis on responsible development.
|
||||
|
||||
**Domain 4** (Collaborative): Practical patterns, backed by major platforms (Microsoft/GitHub, Atlassian) and research (arXiv).
|
||||
|
||||
**Domains 5-7**: Workflow integration, code review, specialized techniques — more narrow but important practices.
|
||||
|
||||
**Domain 8**: General term serving as baseline for all other approaches.
|
||||
|
|
@ -1,79 +0,0 @@
|
|||
# Research Index
|
||||
|
||||
Working file for methodology clustering and validation.
|
||||
|
||||
## Source
|
||||
- `perplexity-terminology-research.md` — original Perplexity research (Jan 2025)
|
||||
|
||||
---
|
||||
|
||||
## Methodology Clusters
|
||||
|
||||
### Tier 1: Must Include (High Authority + High Relevance)
|
||||
|
||||
| Term | Source Authority | Link Status | Include? |
|
||||
|------|------------------|-------------|----------|
|
||||
| Vibe Coding | Wikipedia, Collins Dictionary, Andrej Karpathy | ⏳ verify | |
|
||||
| Spec-Driven Development | GitHub, ThoughtWorks, Martin Fowler | ⏳ verify | |
|
||||
| AI-Driven Development Life Cycle | AWS | ⏳ verify | |
|
||||
| Agentic Coding | arXiv, AI Accelerator Institute | ⏳ verify | |
|
||||
| AI Pair Programming | GitHub/Microsoft, GeeksforGeeks | ⏳ verify | |
|
||||
|
||||
### Tier 2: Consider (Good Authority)
|
||||
|
||||
| Term | Source Authority | Link Status | Include? |
|
||||
|------|------------------|-------------|----------|
|
||||
| Architecture-First AI Development | WaveMaker, ITBrief | ⏳ verify | |
|
||||
| Test-Driven Development with AI | Galileo AI, Builder.io | ⏳ verify | |
|
||||
| Human-in-the-Loop (HITL) | Google Cloud, Atlassian | ⏳ verify | |
|
||||
| Prompt-Driven Development | Capgemini, Hexaware | ⏳ verify | |
|
||||
| Quality-First AI Coding | Qodo.ai | ⏳ verify | |
|
||||
|
||||
### Tier 3: Maybe (Lower Authority / Niche)
|
||||
|
||||
| Term | Source Authority | Link Status | Include? |
|
||||
|------|------------------|-------------|----------|
|
||||
| Mobbing with AI | Atlassian | ⏳ verify | |
|
||||
| Copilot-Driven Development | Microsoft/GitHub | ⏳ verify | |
|
||||
| Conversational Coding | Google Cloud, arXiv | ⏳ verify | |
|
||||
| Deterministic AI Development | Augment Code | ⏳ verify | |
|
||||
| Ensemble Programming | Kinde.com, Ultralytics | ⏳ verify | |
|
||||
|
||||
### Tier 4: Skip (Generic / Low Value)
|
||||
|
||||
- AI-Assisted Coding — too generic, umbrella term
|
||||
- AI-Augmented Development — too generic
|
||||
- Prompt Engineering — separate skill, not methodology
|
||||
- AI Code Review — tool category, not methodology
|
||||
|
||||
---
|
||||
|
||||
## Interview Questions Bank
|
||||
|
||||
*Questions to ask Oleg about each methodology*
|
||||
|
||||
1. Vibe Coding — "Ты когда-нибудь так работал? Результат?"
|
||||
2. Spec-Driven — "Пробовал писать spec перед кодом с AI?"
|
||||
3. Agentic — "Как используешь Claude Code в agentic режиме?"
|
||||
4. TDD-AI — "Тесты сначала с AI — работает?"
|
||||
5. HITL — "Как часто AI делает что-то без твоего OK?"
|
||||
|
||||
---
|
||||
|
||||
## Henry's Opinions (from interview)
|
||||
|
||||
*Will be filled during interview*
|
||||
|
||||
---
|
||||
|
||||
## Link Verification Log
|
||||
|
||||
*Track which links checked and status*
|
||||
|
||||
| Link | Status | Notes |
|
||||
|------|--------|-------|
|
||||
| | | |
|
||||
|
||||
---
|
||||
|
||||
*Updated: 2025-01-22*
|
||||
|
|
@ -1,16 +0,0 @@
|
|||
# SEO Metadata
|
||||
|
||||
*pending — will be created after keyword research (step 5)*
|
||||
|
||||
## Title
|
||||
[TBD]
|
||||
|
||||
## Meta Description
|
||||
[TBD]
|
||||
|
||||
## Target Keywords
|
||||
- Primary: [TBD]
|
||||
- Secondary: [TBD]
|
||||
|
||||
## URL Slug
|
||||
ai-coding-methodologies-beyond-vibe-coding
|
||||
|
|
@ -1,3 +0,0 @@
|
|||
# Article Text
|
||||
|
||||
*pending — will be written after outline approval*
|
||||
|
|
@ -1,118 +0,0 @@
|
|||
# Agentic Coding
|
||||
|
||||
## Определение
|
||||
|
||||
**Agentic Coding** — парадигма автономной разработки ПО с высокой степенью автономности AI-агентов: самостоятельное планирование, выполнение, валидация и итеративное улучшение кода с минимальным человеческим вмешательством.
|
||||
|
||||
---
|
||||
|
||||
## Академическое подтверждение
|
||||
|
||||
### arXiv 2508.11126 (август 2025)
|
||||
**["AI Agentic Programming: A Survey of Techniques"](https://arxiv.org/abs/2508.11126)**
|
||||
- **Авторы**: UC San Diego, Carnegie Mellon University
|
||||
- **Охват**: comprehensive survey агентных систем для разработки ПО
|
||||
- **Ключевые концепции**: таксономия агентов, планирование, управление контекстом, multi-agent systems
|
||||
|
||||
### arXiv 2512.14012 (декабрь 2025)
|
||||
**["Professional Software Developers Don't Vibe, They Control"](https://arxiv.org/abs/2512.14012)**
|
||||
- **Авторы**: University of Michigan, UC San Diego
|
||||
- **Методология**: 13 наблюдений + 99 опросов разработчиков (3-25 лет опыта)
|
||||
- **Выводы**: профессионалы используют агентов в контролируемом режиме, plan files, context files, tight feedback loops
|
||||
|
||||
---
|
||||
|
||||
## Ralph Loop
|
||||
|
||||
### История
|
||||
- **Изобретатель**: Geoffrey Huntley
|
||||
- **Первое открытие**: Февраль 2024
|
||||
- **Публичный запуск**: Май 2025
|
||||
- **Viral wave**: Январь 2026
|
||||
|
||||
### Публикации
|
||||
- **[VentureBeat (6 января 2026)](https://venturebeat.com/technology/how-ralph-wiggum-went-from-the-simpsons-to-the-biggest-name-in-ai-right-now)**: "How Ralph Wiggum went from 'The Simpsons' to the biggest name in AI"
|
||||
- **[Dev Interrupted Podcast (12 января 2026)](https://devinterrupted.substack.com/p/inventing-the-ralph-wiggum-loop-creator)**: интервью с Geoffrey Huntley
|
||||
- **[Ralph Wiggum Loop Official Site](https://ralph-wiggum.ai)**
|
||||
- **[LinearB Blog](https://linearb.io/blog/ralph-loop-agentic-engineering-geoffrey-huntley)**: Mastering Ralph loops
|
||||
|
||||
### Суть
|
||||
Bash-цикл с fresh context каждую итерацию: `while :; do cat PROMPT.md | agent; done`
|
||||
|
||||
### Экономика
|
||||
- **Cost**: $10.42/час (Claude Sonnet 4.5, данные Huntley)
|
||||
- **Кейсы**: клонирование HashiCorp Nomad, Tailscale — дни вместо лет
|
||||
|
||||
---
|
||||
|
||||
## Профессиональные инструменты
|
||||
|
||||
### Claude Code
|
||||
- **Статус**: полная поддержка agentic workflows
|
||||
- **Ralph Loop**: [официальная интеграция](https://ralph-wiggum.ai)
|
||||
- **Community workflows**: [Reddit](https://www.reddit.com/r/ClaudeCode/comments/1m5k6ka/i_built_a_specdriven_development_workflow_for/)
|
||||
- **Использование**: bash scripts, MCP, custom slash commands
|
||||
|
||||
### Cursor Composer
|
||||
- **Запуск**: октябрь 2025 (Cursor 2.0)
|
||||
- **Статус**: production-ready multi-agent IDE
|
||||
- **Возможности**: до 8 параллельных агентов, Git worktrees isolation, native browser tool, voice mode
|
||||
- **Ссылка**: [Cursor 2.0 Launch](https://cursor.com/blog/2-0)
|
||||
- **Scaling**: [Long-running autonomous coding](https://cursor.com/blog/scaling-agents) (январь 2026)
|
||||
|
||||
### GitHub Copilot
|
||||
- **Agent Mode** (preview февраль 2025): [synchronous agentic collaborator](https://github.blog/ai-and-ml/github-copilot/agent-mode-101-all-about-github-copilots-powerful-mode/)
|
||||
- **Coding Agent** (preview июль 2025): [asynchronous autonomous agent](https://github.blog/ai-and-ml/github-copilot/from-idea-to-pr-a-guide-to-github-copilots-agentic-workflows/)
|
||||
- **Анонс**: [GitHub Newsroom](https://github.com/newsroom/press-releases/agent-mode) (5 февраля 2025)
|
||||
- **Доступность**: VS Code, Visual Studio, JetBrains, Eclipse, Xcode
|
||||
|
||||
### Другие инструменты
|
||||
- **[Agentic Coding Framework](https://github.com/DafnckStudio/Agentic-Coding-Framework)**: full-cycle automation, GitHub
|
||||
- **Windsurf**: agentic IDE, коммерческий
|
||||
- **Cline**: open-source assistant, VS Code extension
|
||||
|
||||
---
|
||||
|
||||
## Интеграция с Claude Code
|
||||
|
||||
✅ **Полностью поддерживается**
|
||||
|
||||
- Автономное чтение/запись файлов, выполнение terminal commands
|
||||
- [Ralph Loop implementation](https://ralph-wiggum.ai)
|
||||
- [Custom slash commands](https://www.reddit.com/r/ClaudeCode/comments/1m5k6ka/i_built_a_specdriven_development_workflow_for/)
|
||||
- [Практические примеры](https://blog.devgenius.io/ralph-wiggum-with-claude-code-how-people-are-using-it-effectively-1d03d5027285)
|
||||
|
||||
---
|
||||
|
||||
## Минимальный подход без фреймворков
|
||||
|
||||
**Методология**: создай `SPECIFICATION.md` + `IMPLEMENTATION_PLAN.md`, запусти bash loop
|
||||
|
||||
**Принцип**: fresh context каждую итерацию, progress в Git history, агент завершает ОДНУ задачу и выходит
|
||||
|
||||
**Преимущества**: zero dependencies, full control, no context rot, Git-based persistence
|
||||
|
||||
---
|
||||
|
||||
## Ссылки
|
||||
|
||||
### Академические
|
||||
- [arXiv 2508.11126](https://arxiv.org/abs/2508.11126): AI Agentic Programming Survey (август 2025)
|
||||
- [arXiv 2512.14012](https://arxiv.org/abs/2512.14012): Professional Developers Don't Vibe (декабрь 2025)
|
||||
|
||||
### Ralph Loop
|
||||
- [VentureBeat](https://venturebeat.com/technology/how-ralph-wiggum-went-from-the-simpsons-to-the-biggest-name-in-ai-right-now) (январь 2026)
|
||||
- [Ralph Wiggum Official](https://ralph-wiggum.ai)
|
||||
- [LinearB Blog](https://linearb.io/blog/ralph-loop-agentic-engineering-geoffrey-huntley)
|
||||
- [Dev Interrupted Podcast](https://devinterrupted.substack.com/p/inventing-the-ralph-wiggum-loop-creator)
|
||||
|
||||
### Инструменты
|
||||
- [Cursor 2.0](https://cursor.com/blog/2-0) (октябрь 2025)
|
||||
- [GitHub Copilot Agent Mode](https://github.blog/ai-and-ml/github-copilot/agent-mode-101-all-about-github-copilots-powerful-mode/)
|
||||
- [Claude Code Integration](https://blog.devgenius.io/ralph-wiggum-with-claude-code-how-people-are-using-it-effectively-1d03d5027285)
|
||||
- [Agentic Framework GitHub](https://github.com/DafnckStudio/Agentic-Coding-Framework)
|
||||
|
||||
### Дополнительно
|
||||
- [Emergent Mind Overview](https://www.emergentmind.com/topics/agentic-coding)
|
||||
- [Martin Fowler](https://martinfowler.com/articles/exploring-gen-ai/sdd-3-tools.html)
|
||||
- [Cursor Scaling Agents](https://cursor.com/blog/scaling-agents) (январь 2026)
|
||||
|
|
@ -1,401 +0,0 @@
|
|||
# Статистика использования AI инструментов разработчиками (2024-2026)
|
||||
|
||||
## Общая статистика использования AI
|
||||
|
||||
### Динамика использования по годам
|
||||
|
||||
\begin{table}
|
||||
\begin{tabular}{|l|c|c|c|}
|
||||
\hline
|
||||
Метрика & 2024 & 2025 & Изменение \\
|
||||
\hline
|
||||
Используют или планируют использовать AI & 76\% & 84\% & +8 п.п. \\
|
||||
\hline
|
||||
Активно используют AI & 62\% & н/д & — \\
|
||||
\hline
|
||||
Используют ежедневно (профессионалы) & н/д & 51\% & — \\
|
||||
\hline
|
||||
Позитивное отношение к AI & 70\%+ & 60\% & -10 п.п. \\
|
||||
\hline
|
||||
Доверяют точности AI & н/д & 33\% & — \\
|
||||
\hline
|
||||
Активно не доверяют AI & н/д & 46\% & — \\
|
||||
\hline
|
||||
\end{tabular}
|
||||
\caption{Сравнение показателей использования AI по данным Stack Overflow 2024 и 2025}
|
||||
\end{table}
|
||||
|
||||
**Источники:**
|
||||
- Stack Overflow Developer Survey 2024[1]
|
||||
- Stack Overflow Developer Survey 2025[2]
|
||||
- Final Round AI Analysis of Stack Overflow 2025[3]
|
||||
|
||||
### Ключевые показатели использования (2025-2026)
|
||||
|
||||
\begin{itemize}
|
||||
\item 84\% разработчиков используют или планируют использовать AI инструменты (Stack Overflow 2025)[2]
|
||||
\item 90\% разработчиков используют AI (DORA Report 2025)[4]
|
||||
\item 85\% регулярно используют AI-инструменты (JetBrains State of Developer Ecosystem 2025)[5]
|
||||
\item 51\% профессиональных разработчиков используют AI ежедневно (Stack Overflow 2025)[2][6]
|
||||
\item 82\% используют ChatGPT[7]
|
||||
\item 68\% используют GitHub Copilot[7]
|
||||
\item 47\% используют Google Gemini[7]
|
||||
\item 41\% используют Claude Code[7]
|
||||
\item 59\% разработчиков используют три и более AI-инструмента параллельно[7][8]
|
||||
\end{itemize}
|
||||
|
||||
**Источники:**
|
||||
- Stack Overflow Developer Survey 2025[2]
|
||||
- DORA Report 2025 (Google Cloud)[4]
|
||||
- JetBrains State of Developer Ecosystem 2025[5]
|
||||
- AI Coding Assistant Statistics 2026[6][7]
|
||||
- Second Talent AI Coding Statistics[8]
|
||||
|
||||
## Доверие к AI инструментам
|
||||
|
||||
### Парадокс использования и доверия
|
||||
|
||||
\begin{table}
|
||||
\begin{tabular}{|l|c|}
|
||||
\hline
|
||||
Показатель доверия & Процент \\
|
||||
\hline
|
||||
Позитивное отношение к AI (2023-2024) & 70\%+ \\
|
||||
\hline
|
||||
Позитивное отношение к AI (2025) & 60\% \\
|
||||
\hline
|
||||
Доверяют точности AI & 33\% \\
|
||||
\hline
|
||||
Активно не доверяют AI & 46\% \\
|
||||
\hline
|
||||
Высоко доверяют результатам AI & 3\% \\
|
||||
\hline
|
||||
Высоко доверяют (опытные разработчики) & 2.6\% \\
|
||||
\hline
|
||||
\end{tabular}
|
||||
\caption{Показатели доверия к AI инструментам}
|
||||
\end{table}
|
||||
|
||||
**Источники:**
|
||||
- Stack Overflow Survey 2025 Analysis[3][9]
|
||||
- Intelligent Tools Analysis[10]
|
||||
|
||||
### Основные проблемы с доверием
|
||||
|
||||
\begin{itemize}
|
||||
\item 66\% разработчиков жалуются на "AI решения, которые почти правильные, но не совсем"[2]
|
||||
\item 45\% считают, что "отладка AI-кода требует больше времени"[2]
|
||||
\item 28\% часто исправляют AI-код настолько, что это нивелирует выигрыш во времени[11][12]
|
||||
\item 30\% senior-разработчиков тратят значительное время на исправление AI-кода[11]
|
||||
\item 17\% junior-разработчиков тратят время на исправление AI-кода[11]
|
||||
\end{itemize}
|
||||
|
||||
**Источники:**
|
||||
- Stack Overflow Developer Survey 2025[2]
|
||||
- Fastly Developer Survey (июль 2025, 791 респондент)[11][12]
|
||||
|
||||
## Производительность разработчиков с AI
|
||||
|
||||
### Воспринимаемая vs реальная производительность
|
||||
|
||||
\begin{table}
|
||||
\begin{tabular}{|l|c|c|}
|
||||
\hline
|
||||
Метрика & Значение & Источник \\
|
||||
\hline
|
||||
Воспринимаемое ускорение & +24\% & RCT исследование \\
|
||||
\hline
|
||||
Реальное измерение (опытные разработчики) & -19\% & RCT исследование \\
|
||||
\hline
|
||||
Разрыв восприятия и реальности & -43\% & RCT исследование \\
|
||||
\hline
|
||||
Реальный прирост производительности & +3.6\% & Наблюдательные исследования \\
|
||||
\hline
|
||||
Больше кода с AI & +12-15\% & Различные источники \\
|
||||
\hline
|
||||
Прирост в отдельных метриках & +21\% & Различные источники \\
|
||||
\hline
|
||||
\end{tabular}
|
||||
\caption{Сравнение воспринимаемой и реальной производительности}
|
||||
\end{table}
|
||||
|
||||
**Источники:**
|
||||
- Science Journal: "Who is using AI to code?" (январь 2026)[13]
|
||||
- AI Coding Statistics Analysis[7]
|
||||
- Second Talent Statistics[8]
|
||||
|
||||
### Производительность по уровню опыта (Fastly Survey 2025)
|
||||
|
||||
\begin{table}
|
||||
\begin{tabular}{|l|c|c|c|}
|
||||
\hline
|
||||
Показатель & Senior (10+ лет) & Junior (0-2 года) & Разница \\
|
||||
\hline
|
||||
Считают, что AI ускоряет работу & 59\% & 49\% & +10 п.п. \\
|
||||
\hline
|
||||
Значительный прирост скорости & 26\% & 13\% & В 2 раза \\
|
||||
\hline
|
||||
Более 50\% кода генерируется AI & 32\% & 13\% & В 2.5 раза \\
|
||||
\hline
|
||||
Тратят время на исправление AI & 30\% & 17\% & +13 п.п. \\
|
||||
\hline
|
||||
\end{tabular}
|
||||
\caption{Использование AI по уровню опыта разработчиков}
|
||||
\end{table}
|
||||
|
||||
**Источники:**
|
||||
- Fastly Developer Survey (июль 2025)[11][12][14][15]
|
||||
- InfoWorld Analysis[16]
|
||||
- The Register Analysis[17]
|
||||
|
||||
## Доля AI-генерируемого кода
|
||||
|
||||
### Эволюция доли AI в кодовой базе
|
||||
|
||||
\begin{table}
|
||||
\begin{tabular}{|l|c|c|}
|
||||
\hline
|
||||
Период & Доля AI в коде (США) & Глобальное использование \\
|
||||
\hline
|
||||
2022 & 5\% & н/д \\
|
||||
\hline
|
||||
Начало 2025 & 29\% & н/д \\
|
||||
\hline
|
||||
2026 & н/д & 41\% (средний) \\
|
||||
\hline
|
||||
\end{tabular}
|
||||
\caption{Рост доли AI-генерируемого кода}
|
||||
\end{table}
|
||||
|
||||
**Источники:**
|
||||
- Second Talent AI Statistics[8]
|
||||
- Science Journal Research[13]
|
||||
- AI Coding Assistant Statistics[6]
|
||||
- Dev.to AI Coding Analysis[18]
|
||||
|
||||
### Региональные различия (начало 2025)
|
||||
|
||||
\begin{table}
|
||||
\begin{tabular}{|l|c|}
|
||||
\hline
|
||||
Страна & Доля AI-кода \\
|
||||
\hline
|
||||
США & 29\% \\
|
||||
\hline
|
||||
Германия & 23\% \\
|
||||
\hline
|
||||
Франция & 24\% \\
|
||||
\hline
|
||||
Индия & 20\% \\
|
||||
\hline
|
||||
Китай & 12\% \\
|
||||
\hline
|
||||
Россия & 15\% \\
|
||||
\hline
|
||||
\end{tabular}
|
||||
\caption{Региональная статистика использования AI в коде}
|
||||
\end{table}
|
||||
|
||||
**Источники:**
|
||||
- Science Journal: Global AI Code Adoption[13]
|
||||
- Dev.to Analysis[18]
|
||||
|
||||
### Доля AI-кода в крупных компаниях (2025-2026)
|
||||
|
||||
\begin{table}
|
||||
\begin{tabular}{|l|c|c|}
|
||||
\hline
|
||||
Компания & Доля AI-кода & Период \\
|
||||
\hline
|
||||
Anthropic (инженеры) & 100\% & Январь 2026 \\
|
||||
\hline
|
||||
OpenAI (инженеры) & 100\% & Январь 2026 \\
|
||||
\hline
|
||||
Microsoft & \textasciitilde30\% & Апрель 2025 \\
|
||||
\hline
|
||||
Google & >25\% (21\% внутр.) & 2025 \\
|
||||
\hline
|
||||
Salesforce & \textasciitilde30\% & 2025 \\
|
||||
\hline
|
||||
\end{tabular}
|
||||
\caption{AI-генерируемый код в ведущих технологических компаниях}
|
||||
\end{table}
|
||||
|
||||
**Источники:**
|
||||
- Fortune: Anthropic and OpenAI Engineers Interview (январь 2026)[19]
|
||||
- MIT Technology Review: Generative Coding 2026[20]
|
||||
|
||||
## Качество и проблемы AI-кода
|
||||
|
||||
### Статистика качества кода
|
||||
|
||||
\begin{table}
|
||||
\begin{tabular}{|l|c|}
|
||||
\hline
|
||||
Метрика качества & Значение \\
|
||||
\hline
|
||||
AI-код содержит уязвимости безопасности & 48\% \\
|
||||
\hline
|
||||
Код принимается без модификаций & <44\% \\
|
||||
\hline
|
||||
Больше дефектов без ревью & 1.7× \\
|
||||
\hline
|
||||
Рост размера pull request & +33\% (57→76 строк) \\
|
||||
\hline
|
||||
\end{tabular}
|
||||
\caption{Показатели качества AI-генерируемого кода}
|
||||
\end{table}
|
||||
|
||||
**Источники:**
|
||||
- AI Coding Statistics 2026[7]
|
||||
- AI Coding Tools Adoption Study[21]
|
||||
- GitClear AI Code Quality Research[22]
|
||||
|
||||
### Фрустрации разработчиков (2025)
|
||||
|
||||
\begin{table}
|
||||
\begin{tabular}{|l|c|}
|
||||
\hline
|
||||
Проблема & Процент разработчиков \\
|
||||
\hline
|
||||
AI решения почти правильные, но не совсем & 66\% \\
|
||||
\hline
|
||||
Отладка AI-кода требует больше времени & 45\% \\
|
||||
\hline
|
||||
Исправление нивелирует выигрыш во времени & 28\% \\
|
||||
\hline
|
||||
\end{tabular}
|
||||
\caption{Основные жалобы на AI инструменты}
|
||||
\end{table}
|
||||
|
||||
**Источники:**
|
||||
- Stack Overflow Developer Survey 2025[2]
|
||||
- Fastly Survey Analysis[11]
|
||||
|
||||
## Использование AI по типам задач
|
||||
|
||||
### Популярные применения AI (Stack Overflow 2025)
|
||||
|
||||
\begin{table}
|
||||
\begin{tabular}{|l|c|}
|
||||
\hline
|
||||
Задача & Процент использующих \\
|
||||
\hline
|
||||
Поиск ответов & 54\% \\
|
||||
\hline
|
||||
Генерация контента/данных & 36\% \\
|
||||
\hline
|
||||
Изучение новых концепций & 33\% \\
|
||||
\hline
|
||||
Документирование кода & 31\% \\
|
||||
\hline
|
||||
\end{tabular}
|
||||
\caption{Применение AI инструментов по типам задач}
|
||||
\end{table}
|
||||
|
||||
**Источники:**
|
||||
- Stack Overflow Survey 2025[3][10]
|
||||
|
||||
## Тренды 2026 года
|
||||
|
||||
### Агентное кодирование
|
||||
|
||||
\begin{itemize}
|
||||
\item 31\% используют AI-агенты на работе (2025)[3][10]
|
||||
\item 40\% enterprise-приложений будут включать AI-агенты к концу 2026[23]
|
||||
\item 84\% лидеров планируют увеличить инвестиции в AI-агентов[23]
|
||||
\end{itemize}
|
||||
|
||||
**Источники:**
|
||||
- Intelligent Tools Analysis[10]
|
||||
- Master of Code Generative AI Statistics[23]
|
||||
|
||||
### Изменение процесса разработки
|
||||
|
||||
\begin{itemize}
|
||||
\item Anthropic перестраивает подход к найму: больший фокус на генералистов vs специалистов[19]
|
||||
\item Некоторые инженеры полностью прекратили писать код и теперь только редактируют AI-сгенерированный[19]
|
||||
\item Меньшая релевантность традиционных навыков программирования[19]
|
||||
\item Акцент на способности работать с AI-инструментами[19]
|
||||
\end{itemize}
|
||||
|
||||
**Источники:**
|
||||
- Fortune Interview with Anthropic CEO and Engineers[19]
|
||||
|
||||
## Дополнительные данные
|
||||
|
||||
### Временная шкала развития
|
||||
|
||||
\begin{table}
|
||||
\begin{tabular}{|l|c|c|c|}
|
||||
\hline
|
||||
Год & Используют AI & Ежедневно (проф.) & Доля AI-кода \\
|
||||
\hline
|
||||
2022 & н/д & н/д & 5\% \\
|
||||
\hline
|
||||
2023 & 70\% & 44\% & н/д \\
|
||||
\hline
|
||||
2024 & 76\% & 62\% & н/д \\
|
||||
\hline
|
||||
Начало 2025 & н/д & н/д & 29\% \\
|
||||
\hline
|
||||
2025 & 84\% & 51\% & н/д \\
|
||||
\hline
|
||||
2026 (январь) & 84-85\% & 51\% & 41\% (глобально) \\
|
||||
\hline
|
||||
\end{tabular}
|
||||
\caption{Временная динамика использования AI}
|
||||
\end{table}
|
||||
|
||||
**Источники:**
|
||||
- Stack Overflow Developer Surveys 2023-2025[1][2]
|
||||
- AI Statistics Compilations[6][7][8]
|
||||
- Science Journal Research[13]
|
||||
|
||||
## References
|
||||
|
||||
[1] Stack Overflow. (2024). 2024 Stack Overflow Developer Survey. https://survey.stackoverflow.co/2024/
|
||||
|
||||
[2] Stack Overflow. (2025). 2025 Stack Overflow Developer Survey. https://survey.stackoverflow.co/2025/
|
||||
|
||||
[3] Final Round AI. (2025, July 29). Stack Overflow Survey Reveals Developers Are Using AI. https://www.finalroundai.com/blog/stack-overflow-survey-2025-developers-trust-ai-tools-less
|
||||
|
||||
[4] Google Cloud. (2025, September 22). How are developers using AI? Inside our 2025 DORA report. https://blog.google/technology/developers/dora-report-2025/
|
||||
|
||||
[5] JetBrains. (2025, October 20). The State of Developer Ecosystem 2025: Coding in the AI Era. https://blog.jetbrains.com/research/2025/10/state-of-developer-ecosystem-2025/
|
||||
|
||||
[6] Panto AI. (2026, January 26). AI Coding Assistant Statistics (2026): Adoption, Productivity. https://www.getpanto.ai/blog/ai-coding-assistant-statistics
|
||||
|
||||
[7] Second Talent. (2025, October 15). AI Coding Assistant Statistics & Trends [2025]. https://www.secondtalent.com/resources/ai-coding-assistant-statistics/
|
||||
|
||||
[8] AI Coding Statistics. (2025). AI in Software Development: 25+ Statistics for 2025. https://usmsystems.com/ai-in-software-development-statistics/
|
||||
|
||||
[9] Stack Overflow Blog. (2025, December 28). Developers remain willing but reluctant to use AI. https://stackoverflow.blog/2025/12/29/developers-remain-willing-but-reluctant-to-use-ai-the-2025-developer-survey-results-are-here/
|
||||
|
||||
[10] Intelligent Tools. (2025, December 11). The Agent Tech Stack. https://intelligenttools.co/blog/stack-overflow-2025-developer-survey-ai-reality
|
||||
|
||||
[11] Fastly. (2026, January 11). Vibe Shift in AI Coding: Senior Developers Ship 2.5x More. https://www.fastly.com/blog/senior-developers-ship-more-ai-code
|
||||
|
||||
[12] Azalio. (2025, September 1). Senior developers let AI do more of the coding — survey. https://www.azalio.io/senior-developers-let-ai-do-more-of-the-coding-survey/
|
||||
|
||||
[13] Science. (2026, January 21). Who is using AI to code? Global diffusion and impact of generative AI. https://www.science.org/doi/10.1126/science.adz9311
|
||||
|
||||
[14] Tygo Cover. (2025, August 30). Senior Developers Use AI More Than Juniors, Survey Finds. https://tygocover.com/senior-developers-use-ai-more-than-juniors-survey/
|
||||
|
||||
[15] Okoone. (2025, September 28). Why senior developers are letting AI write more code. https://www.okoone.com/spark/industry-insights/why-senior-developers-are-letting-ai-write-more-code/
|
||||
|
||||
[16] InfoWorld. (2025, September 1). Senior developers let AI do more of the coding — survey. https://www.infoworld.com/article/4049949/senior-developers-let-ai-do-more-of-the-coding-survey.html
|
||||
|
||||
[17] The Register. (2025, August 27). Older developers more likely to code with AI tools. https://www.theregister.com/2025/08/28/older_developers_ai_code/
|
||||
|
||||
[18] Dev.to. (2026, January 26). AI Coding Dominates 2026: Week of January 20-27. https://dev.to/alexmercedcoder/ai-coding-dominates-2026-week-of-january-20-27-7lc
|
||||
|
||||
[19] Fortune. (2026, January 28). Top engineers at Anthropic, OpenAI say AI now writes 100% of their code. https://fortune.com/2026/01/29/100-percent-of-code-at-anthropic-and-openai-is-now-ai-written-boris-cherny-roon/
|
||||
|
||||
[20] MIT Technology Review. (2026, January 12). Generative coding: 10 Breakthrough Technologies 2026. https://www.technologyreview.com/2026/01/12/1130027/generative-coding-ai-software-2026-breakthrough-technology/
|
||||
|
||||
[21] Exceeds AI. (2026, January 8). AI Coding Tools Adoption Rates: 2026 Engineering Study. https://blog.exceeds.ai/ai-coding-tools-adoption-rates/
|
||||
|
||||
[22] GitClear. (2025). AI Copilot Code Quality: 2025 Data Suggests 4x Growth. https://www.gitclear.com/ai_assistant_code_quality_2025_research
|
||||
|
||||
[23] Master of Code. (2026, January 12). 350+ Generative AI Statistics [January 2026]. https://masterofcode.com/blog/generative-ai-statistics
|
||||
|
|
@ -1,366 +0,0 @@
|
|||
# AI-Aided Test-First Development
|
||||
|
||||
## Официальное название
|
||||
|
||||
**AI-aided test-first development** — официальное название от Thoughtworks Technology Radar (апрель 2023)
|
||||
|
||||
**Статус**: **TRIAL** в Thoughtworks Technology Radar — рекомендуется для экспериментов в production
|
||||
|
||||
---
|
||||
|
||||
## Альтернативные названия
|
||||
|
||||
- **Test-Driven Development with AI** (Builder.io, широко распространено)
|
||||
- **AI-Enabled TDD / AI-TDD** (AgileМania, коммерческие тренинги)
|
||||
- **TDD as AI Communication Protocol** (8th Light, концептуальное)
|
||||
- **AI-assisted TDD** (общепринятое в индустрии)
|
||||
- **Test-First AI Development** (академические источники)
|
||||
|
||||
---
|
||||
|
||||
## Методология
|
||||
|
||||
### Суть подхода
|
||||
|
||||
**Инверсия традиционного AI workflow:**
|
||||
|
||||
| Традиционный AI подход | AI-aided test-first |
|
||||
|------------------------|---------------------|
|
||||
| Prompt → AI generates code → Human writes tests | Human defines test requirements → AI generates tests → AI implements → Tests validate |
|
||||
| AI controls scope | Tests control scope |
|
||||
| Human validates after | Human specifies before |
|
||||
| Context in conversation | Context in tests |
|
||||
|
||||
### Цикл разработки
|
||||
|
||||
1. **Human**: пишет test description или acceptance criteria в natural language
|
||||
2. **AI**: генерирует failing test(s) на основе описания
|
||||
3. **Human**: проверяет и корректирует тесты (they define "done")
|
||||
4. **AI**: имплементирует минимальный код для прохождения тестов
|
||||
5. **Tests**: автоматически валидируют (binary pass/fail)
|
||||
6. **AI**: рефакторит при необходимости (tests as safety net)
|
||||
7. **Repeat**: для следующей фичи
|
||||
|
||||
### Ключевые принципы
|
||||
|
||||
- **Tests as specification** — тесты = compressed context для AI
|
||||
- **Binary validation** — clear, measurable goal (pass/fail) вместо subjective review
|
||||
- **Human architect, AI implementer** — разработчик проектирует поведение, AI печатает код
|
||||
- **Guard rails** — тесты предотвращают AI hallucinations и scope creep
|
||||
- **Fast feedback** — instant validation вместо manual code review
|
||||
|
||||
---
|
||||
|
||||
## Преимущества
|
||||
|
||||
### Для разработчика
|
||||
|
||||
- ✅ **Контроль над scope** — тесты определяют границы, AI не может "увлечься"
|
||||
- ✅ **Сохранение навыков** — developer остаётся в роли architect/designer
|
||||
- ✅ **Clear communication** — тесты = unambiguous specification для AI
|
||||
- ✅ **Reduced cognitive load** — не нужно держать весь контекст в голове
|
||||
- ✅ **Instant validation** — automated tests вместо manual review
|
||||
|
||||
### Для AI
|
||||
|
||||
- ✅ **Clear goal** — binary pass/fail проще чем "good code"
|
||||
- ✅ **Structured context** — test suite как compressed requirements
|
||||
- ✅ **Reduced hallucinations** — тесты ограничивают пространство решений
|
||||
- ✅ **Measurable progress** — количество passing tests
|
||||
|
||||
### Для команды
|
||||
|
||||
- ✅ **Consistent quality** — junior + AI align с architectural style команды
|
||||
- ✅ **Living documentation** — тесты документируют поведение
|
||||
- ✅ **Faster onboarding** — AI помогает новичкам писать код в team's style
|
||||
- ✅ **Reduced tech debt** — TDD предотвращает "vibe coding"
|
||||
|
||||
---
|
||||
|
||||
## Какую проблему решает
|
||||
|
||||
### Проблемы AI-кодинга без TDD
|
||||
|
||||
1. **Hallucinations и incorrect implementations** — AI генерирует код, который компилируется но не работает правильно
|
||||
2. **Scope creep** — AI добавляет "полезные" фичи, которые не требовались
|
||||
3. **Context rot** — AI теряет контекст в длинных сессиях, повторяет ошибки
|
||||
4. **Untestable code** — AI генерирует tightly coupled код без учёта тестируемости
|
||||
5. **Accumulating technical debt** — "move fast and break things" без safety net
|
||||
6. **Developer skill atrophy** — разработчики становятся copy-paste operators
|
||||
7. **Ambiguous requirements** — AI интерпретирует prompt по-своему
|
||||
|
||||
### Решение через TDD
|
||||
|
||||
- **Binary validation** → hallucinations caught immediately
|
||||
- **Tests define scope** → AI cannot exceed test requirements
|
||||
- **Tests = persistent context** → no context rot, reusable across sessions
|
||||
- **Design for testability** → forces better architecture
|
||||
- **Regression prevention** → existing tests catch breaks
|
||||
- **Developer stays architect** → designs behavior, AI implements
|
||||
- **Unambiguous spec** → failing test = clear goal
|
||||
|
||||
---
|
||||
|
||||
## Авторитетные источники
|
||||
|
||||
### Thoughtworks Technology Radar (2023-2025)
|
||||
|
||||
- **[AI-aided test-first development](https://www.thoughtworks.com/en-us/radar/techniques/ai-aided-test-first-development)** (апрель 2023)
|
||||
- **Статус**: TRIAL — "we've seen promising results"
|
||||
- **Применение**: junior developers + AI align с team's architectural style
|
||||
- **Подход**: generate tests with ChatGPT → developer implements → или AI implements с test-first constraint
|
||||
|
||||
### DORA Report 2025 (Google Cloud)
|
||||
|
||||
- **[State of AI-Assisted Software Development 2025](https://dora.dev/research/2025/dora-report/)**
|
||||
- **Главная находка**: "AI is an amplifier, not a fix" — усиливает существующие практики
|
||||
- **TDD impact**: организации с strong testing practices получают больше выгоды от AI
|
||||
- **[Google Cloud Blog](https://cloud.google.com/discover/how-test-driven-development-amplifies-ai-success)** (январь 2026): "TDD amplifies AI success"
|
||||
- **Цитата**: "Quality practices such as TDD help organizations harness AI's productivity gains while maintaining stability"
|
||||
|
||||
### Kent Beck (создатель TDD и XP)
|
||||
|
||||
- **[Pragmatic Engineer Podcast](https://newsletter.pragmaticengineer.com/p/tdd-ai-agents-and-coding-with-kent)** (июнь 2025)
|
||||
- **[YouTube Interview](https://www.youtube.com/watch?v=aSXaxOdVtAQ)** "TDD, AI agents and coding with Kent Beck"
|
||||
- **Цитата**: "TDD is a superpower when working with AI. I communicate things the Genie missed in terms of tests"
|
||||
- **Практика**: запускает тесты каждые 300ms чтобы "catch the genie"
|
||||
- **LinkedIn**: "The genie doesn't want to do TDD. It wants to go fast. You have to force red-green-refactor"
|
||||
|
||||
### 8th Light (Software Consultancy)
|
||||
|
||||
- **[TDD: The Missing Protocol for Effective AI Collaboration](https://8thlight.com/insights/tdd-effective-ai-collaboration)** (июль 2025)
|
||||
- **Тезис**: TDD breaks problems into small, testable behaviors → structured context для AI
|
||||
- **Вывод**: TDD не просто best practice, это communication protocol
|
||||
|
||||
### Builder.io Engineering
|
||||
|
||||
- **[Test-Driven Development with AI](https://www.builder.io/blog/test-driven-development-ai)** (август 2025)
|
||||
- **Тезис**: "AI turns TDD's weaknesses into strengths"
|
||||
- **Анализ**: AI thrives on clear, measurable goals — binary test = clearest goal
|
||||
- **Практика**: AI generates boilerplate tests, developer focuses на edge cases
|
||||
|
||||
---
|
||||
|
||||
## Формальные спеки и фреймворки
|
||||
|
||||
### AgileМania: AI-Enabled TDD Certification
|
||||
|
||||
- **[AI-Enabled Test-Driven Development (AI-TDD) Training](https://agilemania.com/test-driven-development-tdd-with-ai-training)** (декабрь 2024)
|
||||
- **Формат**: 1-day hands-on certification course
|
||||
- **Охват**: fundamentals of TDD + AI pair programming + hands-on exercises
|
||||
- **Статистика**: "By 2025, 85% of software projects incorporate AI tools"
|
||||
- **Особенности**: фокус на практическую интеграцию GitHub Copilot и Claude в TDD workflow
|
||||
|
||||
### Galileo AI: TDD for AI Systems Framework
|
||||
|
||||
- **[Test-Driven Development for AI Systems](https://galileo.ai/blog/test-driven-development-ai-systems)** (январь 2026)
|
||||
- **[TDD for AI System Architecture](https://galileo.ai/blog/tdd-ai-system-architecture)** (август 2025)
|
||||
- **Особенности**: framework для тестирования самих AI-систем, не AI-assisted coding
|
||||
- **Компоненты**:
|
||||
- Model Quality Checklist
|
||||
- Test-First Specification Template
|
||||
- AI Component Testing Matrix
|
||||
- **Применение**: production AI systems с guardrails
|
||||
|
||||
### Thoughtworks Structured Approach
|
||||
|
||||
- **[Technology Radar Entry](https://www.thoughtworks.com/en-us/radar/techniques/ai-aided-test-first-development)** (2023)
|
||||
- **Методология**:
|
||||
1. Reusable prompt "fragments" (tech stack + design patterns + team conventions)
|
||||
2. Feature description + acceptance criteria
|
||||
3. AI generates implementation plan → tests → code
|
||||
- **Особенности**: enterprise-scale применение, интеграция в CI/CD
|
||||
|
||||
### Nimble Approach: TDD Protocol for AI
|
||||
|
||||
- **[How to Use TDD for Better AI Coding Outputs](https://nimbleapproach.com/blog/how-to-use-test-driven-development-for-better-ai-coding-outputs/)** (ноябрь 2025)
|
||||
- **Особенности**: пошаговый protocol для AI prompting с TDD
|
||||
- **Инструкции**: как структурировать prompts для генерации тестов первыми
|
||||
|
||||
### Qodo (ex-CodiumAI): AI Test Generation
|
||||
|
||||
- **[AI Code Assistants Revolutionizing TDD](https://www.qodo.ai/blog/ai-code-assistants-test-driven-development/)** (июль 2025)
|
||||
- **Продукт**: Qodo Gen — AI test generation tool
|
||||
- **Особенности**: интегрируется в IDE, генерирует unit tests automatically
|
||||
- **Поддержка**: Python, JavaScript, TypeScript, Java
|
||||
|
||||
---
|
||||
|
||||
## Использование в Claude Code
|
||||
|
||||
### Нативная поддержка
|
||||
|
||||
**Claude Code** полностью поддерживает AI-aided test-first подход через:
|
||||
|
||||
1. **Explicit test-first prompts**:
|
||||
```
|
||||
"Создай failing test для функции calculateTax(), которая должна
|
||||
обрабатывать случаи: positive income, zero income, negative income"
|
||||
```
|
||||
|
||||
2. **Output styles для TDD**:
|
||||
- `/output-style learning` — Claude объясняет почему тест failing и как его починить
|
||||
- `/output-style explanatory` — подробное объяснение test design decisions
|
||||
|
||||
3. **MCP integration** — подключение test runners через Model Context Protocol
|
||||
|
||||
### Workflow в Claude Code
|
||||
|
||||
**Step 1: Test Requirements**
|
||||
```
|
||||
Prompt: "Напиши тесты для user authentication module:
|
||||
- Valid credentials → success
|
||||
- Invalid password → error with message
|
||||
- Nonexistent user → error with message
|
||||
- Empty fields → validation error"
|
||||
```
|
||||
|
||||
**Step 2: Claude генерирует failing tests**
|
||||
```typescript
|
||||
describe('AuthService', () => {
|
||||
test('authenticates user with valid credentials', async () => {
|
||||
const result = await authService.login('user@example.com', 'password123')
|
||||
expect(result.success).toBe(true)
|
||||
expect(result.user).toBeDefined()
|
||||
})
|
||||
// ... остальные тесты
|
||||
})
|
||||
```
|
||||
|
||||
**Step 3: Запуск (все failing)**
|
||||
```
|
||||
Prompt: "Запусти тесты и покажи результаты"
|
||||
Claude: *executes tests, shows all failures*
|
||||
```
|
||||
|
||||
**Step 4: Implementation**
|
||||
```
|
||||
Prompt: "Имплементируй AuthService чтобы пройти эти тесты"
|
||||
Claude: *generates minimal implementation*
|
||||
```
|
||||
|
||||
**Step 5: Validation**
|
||||
```
|
||||
Prompt: "Запусти тесты снова"
|
||||
Claude: *all tests pass*
|
||||
```
|
||||
|
||||
**Step 6: Refactor**
|
||||
```
|
||||
Prompt: "Рефактор: выдели error handling в отдельный класс"
|
||||
Claude: *refactors with tests as safety net*
|
||||
```
|
||||
|
||||
### Best Practices для Claude Code
|
||||
|
||||
- **Используй Projects** — добавь testing framework docs в project knowledge
|
||||
- **Custom instructions**: "Always generate tests before implementation. Run tests after each change."
|
||||
- **Iterative approach** — один тест → implementation → следующий тест
|
||||
- **Explicit commands**: "Run tests", "Show test coverage", "Refactor without breaking tests"
|
||||
|
||||
### Источники
|
||||
|
||||
- [Anthropic Engineering: Claude Code Best Practices](https://www.anthropic.com/engineering/claude-code-best-practices) (апрель 2025)
|
||||
- [Pair Programming with Claude Code](https://shipyard.build/blog/claude-code-output-styles-pair-programming/) (сентябрь 2025)
|
||||
|
||||
---
|
||||
|
||||
## Использование в Cursor
|
||||
|
||||
### Нативная поддержка
|
||||
|
||||
**Cursor** имеет специальные возможности для TDD workflow:
|
||||
|
||||
1. **Composer Agent Mode** — multi-step task execution с test validation
|
||||
2. **Inline test generation** — `Cmd/Ctrl + K` → "generate test for this function"
|
||||
3. **Auto-run tests** — Cursor автоматически запускает тесты после генерации кода
|
||||
4. **Test-aware refactoring** — проверяет, что рефакторинг не ломает тесты
|
||||
|
||||
### Workflow в Cursor
|
||||
|
||||
**Method 1: Inline TDD**
|
||||
|
||||
1. Напиши comment с test requirements: `// Test: calculateDiscount should return 10% for orders > $100`
|
||||
2. `Cmd + K` → "Generate test from this comment"
|
||||
3. Cursor генерирует failing test
|
||||
4. `Cmd + K` на функции → "Implement to pass the test above"
|
||||
5. Cursor автоматически запускает тесты
|
||||
|
||||
**Method 2: Composer TDD**
|
||||
|
||||
1. Открой Composer (`Cmd + I`)
|
||||
2. Prompt: "Create feature X using TDD: write tests first, then implement"
|
||||
3. Composer генерирует test file → implementation file → runs tests → показывает результаты
|
||||
4. Если тесты не проходят, Composer автоматически исправляет
|
||||
|
||||
**Method 3: Chat TDD**
|
||||
|
||||
1. Chat panel: "I want to add feature Y. Let's start with tests."
|
||||
2. Cursor генерирует test scenarios
|
||||
3. Ты выбираешь какие тесты написать first
|
||||
4. Cursor генерирует failing tests
|
||||
5. "Now implement to pass these tests"
|
||||
6. Cursor имплементирует + auto-runs tests
|
||||
|
||||
### Cursor-специфичные фичи
|
||||
|
||||
- **Multi-file test generation** — Cursor одновременно создаёт test file + implementation stub
|
||||
- **Test coverage view** — показывает какие части кода покрыты тестами
|
||||
- **Smart refactoring** — предлагает рефакторинг только если тесты проходят
|
||||
- **Voice mode TDD** — можно проговаривать test requirements голосом
|
||||
|
||||
### Best Practices для Cursor
|
||||
|
||||
- **Rules for AI** (`.cursorrules`):
|
||||
```
|
||||
Always use test-driven development:
|
||||
1. Generate failing tests first
|
||||
2. Implement minimal code to pass
|
||||
3. Refactor with tests as safety net
|
||||
4. Run tests after every change
|
||||
```
|
||||
- **Composer для complex features** — пусть agent управляет TDD циклом
|
||||
- **Inline для quick fixes** — fast test generation для single functions
|
||||
- **Git worktrees** — Cursor может запускать TDD workflow в изолированных branches
|
||||
|
||||
### Источники
|
||||
|
||||
- [Cursor Official Docs](https://cursor.com)
|
||||
- [Launchpad Lab: AI Coding with Windsurf - A New Approach to TDD](https://launchpadlab.com/blog/ai-coding-with-windsurf-a-new-approach-to-tdd/) (июнь 2025) — применимо к Cursor
|
||||
- [Skywork AI: Cursor's AI Autocomplete in Pair Programming](https://skywork.ai/blog/cursor-1-7-ai-autocomplete-pair-programming-best-practices/) (сентябрь 2025)
|
||||
|
||||
---
|
||||
|
||||
## Ссылки
|
||||
|
||||
### Официальные источники
|
||||
|
||||
- [Thoughtworks Technology Radar: AI-aided test-first development](https://www.thoughtworks.com/en-us/radar/techniques/ai-aided-test-first-development) (апрель 2023)
|
||||
- [DORA Report 2025: State of AI-Assisted Software Development](https://dora.dev/research/2025/dora-report/)
|
||||
- [Google Cloud: How TDD Amplifies AI Success](https://cloud.google.com/discover/how-test-driven-development-amplifies-ai-success) (январь 2026)
|
||||
|
||||
### Авторитетные голоса
|
||||
|
||||
- [Kent Beck: TDD, AI agents and coding (Podcast)](https://newsletter.pragmaticengineer.com/p/tdd-ai-agents-and-coding-with-kent) (июнь 2025)
|
||||
- [Kent Beck YouTube Interview](https://www.youtube.com/watch?v=aSXaxOdVtAQ)
|
||||
- [8th Light: TDD as Missing Protocol for AI](https://8thlight.com/insights/tdd-effective-ai-collaboration) (июль 2025)
|
||||
- [Builder.io: Test-Driven Development with AI](https://www.builder.io/blog/test-driven-development-ai) (август 2025)
|
||||
|
||||
### Спеки и фреймворки
|
||||
|
||||
- [AgileМania: AI-Enabled TDD Training](https://agilemania.com/test-driven-development-tdd-with-ai-training)
|
||||
- [Galileo AI: TDD for AI Systems](https://galileo.ai/blog/test-driven-development-ai-systems) (январь 2026)
|
||||
- [Nimble Approach: TDD Protocol for AI](https://nimbleapproach.com/blog/how-to-use-test-driven-development-for-better-ai-coding-outputs/) (ноябрь 2025)
|
||||
- [Qodo: AI Code Assistants & TDD](https://www.qodo.ai/blog/ai-code-assistants-test-driven-development/) (июль 2025)
|
||||
|
||||
### Практические руководства
|
||||
|
||||
- [Anthropic: Claude Code Best Practices](https://www.anthropic.com/engineering/claude-code-best-practices) (апрель 2025)
|
||||
- [Atomic Spin: How I Made AI Respect TDD](https://spin.atomicobject.com/make-ai-respect-tdd/) (январь 2026)
|
||||
- [Ready Set Cloud: TDD with AI - The Right Way](https://www.readysetcloud.io/blog/allen.helton/tdd-with-ai/) (октябрь 2023)
|
||||
|
||||
### Дополнительные ресурсы
|
||||
|
||||
- [Phil Parsons: Relevance of TDD in AI Workflow](https://philparsons.co.uk/blog/the-relevance-of-tdd-in-an-ai-workflow/)
|
||||
- [Code Manship: Why TDD Works in AI Programming](https://codemanship.wordpress.com/2026/01/09/why-does-test-driven-development-work-so-well-in-ai-assisted-programming/) (январь 2026)
|
||||
- [NOPC Accelerate: TDD Guide 2025](https://www.nopaccelerate.com/test-driven-development-guide-2025/) (сентябрь 2025)
|
||||
|
|
@ -1,172 +0,0 @@
|
|||
# AI Pair Programming
|
||||
|
||||
## Определение
|
||||
|
||||
**AI Pair Programming** — парадигма разработки, где AI-ассистент работает вместе с разработчиком в режиме реального времени: предлагает код, объясняет решения, помогает с отладкой и рефакторингом. В отличие от автономных агентов (agentic coding), здесь human-in-the-loop на каждом шаге.
|
||||
|
||||
**Ключевое отличие**: **совместная работа** — разработчик остаётся в водительском кресле (driver), AI выступает навигатором (navigator) или ассистентом.
|
||||
|
||||
---
|
||||
|
||||
## Официальное позиционирование
|
||||
|
||||
### GitHub Copilot: "Your AI pair programmer"
|
||||
|
||||
- **Запуск**: Technical preview — 22 февраля 2022
|
||||
- **Официальный слоган**: ["Your AI pair programmer"](https://github.com/features/copilot) — используется с 2022
|
||||
- **Маркетинг**: "Command your craft", "AI accelerator for every workflow"
|
||||
- **Статистика**: millions of users, tens of thousands of business customers, "world's most widely adopted AI developer tool"
|
||||
|
||||
### Microsoft Learn
|
||||
|
||||
- **Документация**: [AI pair programmer](https://learn.microsoft.com/en-us/industry/mobility/architecture/ai-pair-programmer)
|
||||
- **Описание**: GitHub Copilot как AI pair programmer в контексте autonomous vehicle development
|
||||
- **Training**: [GitHub Copilot Fundamentals](https://learn.microsoft.com/en-us/training/paths/copilot/)
|
||||
|
||||
### Популярность термина
|
||||
|
||||
- **720 поисковых запросов/месяц** по "ai pair programming" (данные на 2025)
|
||||
- Устойчивый термин с 2022 года после запуска GitHub Copilot
|
||||
|
||||
---
|
||||
|
||||
## Профессиональные инструменты
|
||||
|
||||
### GitHub Copilot
|
||||
|
||||
- **Запуск**: февраль 2022 (technical preview)
|
||||
- **Модели**: OpenAI Codex (изначально), GPT-4, Claude Opus, Haiku, Gemini (2025+)
|
||||
- **Интеграция**: VS Code, Visual Studio, JetBrains, Neovim, Eclipse, Xcode, Zed
|
||||
- **Pricing**: Free ($0, limited), Pro ($10/месяц), Business ($19/user), Enterprise ($39/user)
|
||||
- **Ссылки**:
|
||||
- [Official Page](https://github.com/features/copilot)
|
||||
- [Visual Studio Integration](https://visualstudio.microsoft.com/github-copilot/)
|
||||
- [Responsible AI practices](https://github.blog/ai-and-ml/github-copilot/responsible-ai-pair-programming-with-github-copilot/)
|
||||
|
||||
### Cursor
|
||||
|
||||
- **Статус**: production-ready AI-native IDE (fork VS Code)
|
||||
- **Версия 2.0**: октябрь 2025 с Composer
|
||||
- **AI Pair Programming возможности**:
|
||||
- Real-time collaboration mode
|
||||
- Context-aware suggestions
|
||||
- Inline chat (`Alt + /`)
|
||||
- Multi-file editing
|
||||
- Agent mode для complex tasks
|
||||
- **Ссылки**:
|
||||
- [Cursor IDE Official](https://cursor.com)
|
||||
- [AI Pair Programming Best Practices](https://skywork.ai/blog/cursor-1-7-ai-autocomplete-pair-programming-best-practices/)
|
||||
- [Pair Programming Guide](https://developertoolkit.ai/en/cursor-ide/lessons/pair-programming/)
|
||||
|
||||
### Windsurf
|
||||
|
||||
- **Статус**: browser-based AI-native IDE
|
||||
- **Cascade Engine**: адаптивный AI с memory management
|
||||
- **Pair Programming возможности**:
|
||||
- Real-time co-editing
|
||||
- Team collaboration (share sessions)
|
||||
- Context awareness
|
||||
- SuperComplete predictions
|
||||
- Memory across sessions
|
||||
- **Ссылки**:
|
||||
- [Windsurf vs Cursor](https://flexisourceit.com.au/resources/blog/cursor-vs-windsurf-ai-pair-programming/)
|
||||
- [Vibe Coding Guide](https://www.obviousworks.ch/en/vibe-coding-with-windsurf-ide/)
|
||||
|
||||
### Другие инструменты
|
||||
|
||||
| Инструмент | Описание | Ссылка |
|
||||
|-----------|----------|--------|
|
||||
| **Tabnine** | Privacy-focused, trainable на private code | [tabnine.com](https://www.tabnine.com) |
|
||||
| **AWS CodeWhisperer** | AWS-focused pair programmer | [aws.amazon.com/codewhisperer](https://aws.amazon.com/codewhisperer) |
|
||||
| **Replit Ghostwriter** | Browser-based, great for prototyping | [replit.com](https://replit.com) |
|
||||
| **Cody (Sourcegraph)** | Enterprise-scale codebase navigation | [sourcegraph.com/cody](https://sourcegraph.com/cody) |
|
||||
| **MutableAI** | Refactoring and test generation focus | [mutable.ai](https://mutable.ai) |
|
||||
|
||||
---
|
||||
|
||||
## Интеграция с Claude Code
|
||||
|
||||
✅ **Поддерживается как pair programming mode**
|
||||
|
||||
- **Output styles для pair programming**:
|
||||
- `/output-style learning` — интерактивный режим с заданиями для разработчика
|
||||
- `/output-style explanatory` — подробные объяснения решений
|
||||
- **Публикации**:
|
||||
- [Pair Programming with Claude Code (Shipyard)](https://shipyard.build/blog/claude-code-output-styles-pair-programming/) (сентябрь 2025)
|
||||
- [Brilliant in Parts, Unreliable in Others (DSheiko)](https://dsheiko.com/weblog/pair-programming-with-claude-code/) (декабрь 2025)
|
||||
- [Meet Your New AI Pair Programmer (Dan Vega)](https://www.youtube.com/watch?v=-jYlp2oJh_o) (июль 2025)
|
||||
- **Best practices**: [Anthropic Engineering Blog](https://www.anthropic.com/engineering/claude-code-best-practices) (апрель 2025)
|
||||
- **Community**: [Claude Code Official](https://claudecode.org) позиционирует себя как "Your AI Pair Programming Assistant"
|
||||
|
||||
**Особенности Claude Code в pair programming:**
|
||||
- Planning Mode — агент планирует перед кодированием (human approval required)
|
||||
- Custom output styles — настройка стиля взаимодействия (обучающий, объясняющий)
|
||||
- Context awareness — понимает весь проект
|
||||
- Interactive review — step-by-step approval workflow
|
||||
|
||||
---
|
||||
|
||||
## Минимальный подход без фреймворков
|
||||
|
||||
**Концепция**: AI как навигатор, human как driver
|
||||
|
||||
**Паттерн взаимодействия**:
|
||||
1. **Разработчик**: описывает задачу в natural language
|
||||
2. **AI**: предлагает решение, объясняет подход
|
||||
3. **Разработчик**: принимает (Tab/Enter) или корректирует
|
||||
4. **Итерация**: совместный рефакторинг и улучшение
|
||||
|
||||
**Преимущества**:
|
||||
- ✅ Human oversight на каждом шаге
|
||||
- ✅ Learning by doing — разработчик не теряет навыки
|
||||
- ✅ Контроль качества кода
|
||||
- ✅ Нет риска "vibe coding" (бездумное копирование)
|
||||
|
||||
---
|
||||
|
||||
## Padrões использования
|
||||
|
||||
### Traditional Pair Programming vs AI
|
||||
|
||||
| Аспект | Human Pair | AI Pair |
|
||||
|--------|-----------|---------|
|
||||
| **Доступность** | Ограничена schedule | 24/7 |
|
||||
| **Терпение** | Переменное | Бесконечное |
|
||||
| **Знания** | Специализация | Broad (все языки/фреймворки) |
|
||||
| **Скорость** | Human pace | Instant suggestions |
|
||||
| **Обучение** | Bidirectional | AI → Human (mostly) |
|
||||
|
||||
### Роли AI в pair programming
|
||||
|
||||
- **Navigator AI**: направляет архитектуру, предлагает подходы, ловит ошибки
|
||||
- **Driver Assistant**: пишет код, автодополнение, boilerplate generation
|
||||
- **Rubber Duck Plus**: слушает, задаёт вопросы, предлагает альтернативы
|
||||
- **Knowledge Base**: моментальная документация, примеры, best practices
|
||||
|
||||
---
|
||||
|
||||
## Ссылки
|
||||
|
||||
### Официальные источники
|
||||
- [GitHub Copilot: Your AI pair programmer](https://github.com/features/copilot)
|
||||
- [Microsoft Learn: AI pair programmer](https://learn.microsoft.com/en-us/industry/mobility/architecture/ai-pair-programmer)
|
||||
- [Visual Studio with GitHub Copilot](https://visualstudio.microsoft.com/github-copilot/)
|
||||
- [GitHub Copilot Training](https://learn.microsoft.com/en-us/training/paths/copilot/)
|
||||
|
||||
### Claude Code
|
||||
- [Claude Code Official](https://claudecode.org)
|
||||
- [Pair Programming with Output Styles](https://shipyard.build/blog/claude-code-output-styles-pair-programming/) (сентябрь 2025)
|
||||
- [Best Practices (Anthropic)](https://www.anthropic.com/engineering/claude-code-best-practices) (апрель 2025)
|
||||
- [Dan Vega Tutorial](https://www.youtube.com/watch?v=-jYlp2oJh_o) (июль 2025)
|
||||
|
||||
### Cursor & Windsurf
|
||||
- [Cursor Official](https://cursor.com)
|
||||
- [Cursor Pair Programming Guide](https://developertoolkit.ai/en/cursor-ide/lessons/pair-programming/)
|
||||
- [Cursor Best Practices](https://skywork.ai/blog/cursor-1-7-ai-autocomplete-pair-programming-best-practices/)
|
||||
- [Windsurf vs Cursor](https://flexisourceit.com.au/resources/blog/cursor-vs-windsurf-ai-pair-programming/)
|
||||
|
||||
### Обзоры и сравнения
|
||||
- [AI Pair Programming in 2025 (Builder.io)](https://www.builder.io/blog/ai-pair-programming) (декабрь 2024)
|
||||
- [10 AI Pair Programming Tools (Questera)](https://www.questera.ai/blogs/10-ai-pair-programming-tools-worth-using-in-2025) (сентябрь 2025)
|
||||
- [Showdown September 2025 (Mixflow)](https://mixflow.ai/blog/ai-pair-programming-tools-showdown-september-2025-4-solutions-for-enterprise-development/)
|
||||
- [8 Best AI Coding Tools (n8n)](https://blog.n8n.io/best-ai-for-coding/) (апрель 2025)
|
||||
|
|
@ -1,306 +0,0 @@
|
|||
# AI Coding Tools Usage Statistics Research
|
||||
|
||||
**Research Date:** 2026-01-23
|
||||
**Purpose:** Statistical evidence to support article positioning on professional AI coding adoption
|
||||
|
||||
---
|
||||
|
||||
## Executive Summary
|
||||
|
||||
Key findings supporting article thesis:
|
||||
- **Senior developers use AI MORE than juniors** (contrary to "AI is for beginners" stigma)
|
||||
- **76% of all developers** are using or planning to use AI tools (2024)
|
||||
- **33% of senior developers** (10+ years) generate over half their code with AI
|
||||
- **Only 13% of junior developers** (0-2 years) do the same — 2.5x difference
|
||||
- **27% of companies** have banned AI tools due to security/privacy concerns
|
||||
- **90% of Fortune 100** companies have adopted GitHub Copilot
|
||||
- **45-62% of AI-generated code** contains security vulnerabilities
|
||||
|
||||
---
|
||||
|
||||
## 1. Overall Adoption Rates
|
||||
|
||||
### General Developer Population
|
||||
|
||||
**Stack Overflow Developer Survey 2024:**
|
||||
- **76% of all respondents** are using or planning to use AI tools in their development process
|
||||
- **63% of professional developers** currently use AI in their development process
|
||||
- **74% want to continue using ChatGPT** next year (most popular tool)
|
||||
- Source: https://survey.stackoverflow.co/2024/ai
|
||||
|
||||
**Index.dev 2025:**
|
||||
- **84% of developers use AI tools** that now write **41% of all code**
|
||||
- Source: https://www.index.dev/blog/developer-productivity-statistics-with-ai-tools
|
||||
|
||||
**Key Insight:** Majority adoption achieved — AI coding is mainstream, not edge case.
|
||||
|
||||
---
|
||||
|
||||
## 2. Senior vs Junior Developer Usage
|
||||
|
||||
### Critical Finding: Seniors Use AI MORE
|
||||
|
||||
**Fastly Study (2025):**
|
||||
- **33% of senior developers** (10+ years experience) say over half their shipped code is AI-generated
|
||||
- **13% of junior developers** (0-2 years) report the same
|
||||
- **2.5x difference** — seniors adopt AI more aggressively than juniors
|
||||
- Source: https://www.fastly.com/blog/senior-developers-ship-more-ai-code
|
||||
|
||||
**Why This Matters:**
|
||||
Contradicts the "AI is a crutch for beginners" narrative. Senior developers with deep experience use AI more, not less.
|
||||
|
||||
**Tech.co Analysis:**
|
||||
- **59% of senior developers** say AI speeds up their working process
|
||||
- Seniors more likely to view AI as net time-saver
|
||||
- Source: https://tech.co/news/senior-junior-developer-ai-divide
|
||||
|
||||
**The Register (2025):**
|
||||
- Around **1/3 of senior developers** (decade+ experience) use AI code-generation tools (Copilot, Claude, Gemini) to produce over half their finished software
|
||||
- Source: https://www.theregister.com/2025/08/28/older_developers_ai_code/
|
||||
|
||||
### Counter-Evidence: Context Matters
|
||||
|
||||
**METR Study (contradictory finding):**
|
||||
- Experienced open-source developers took **19% longer** to complete tasks when using AI tools
|
||||
- Contradicts industry claims about productivity gains
|
||||
- Source: https://diginomica.com/report-ai-tools-slow-down-experienced-developers-19-wake-call-industry-hype
|
||||
|
||||
**Interpretation:** AI effectiveness depends on task type, tools used, and developer skill with AI. Not universally faster.
|
||||
|
||||
---
|
||||
|
||||
## 3. Developer Sentiment by Experience Level
|
||||
|
||||
### Senior Developer Perspective
|
||||
|
||||
**Positive Views:**
|
||||
- View AI as time-saver (59% — Tech.co)
|
||||
- Higher enthusiasm for speed improvements
|
||||
- Better at identifying when to trust AI output (experience advantage)
|
||||
|
||||
**Manuel Kießling (2025):**
|
||||
- "Senior software engineers are in the perfect position to ensure success with Coding Assistants"
|
||||
- Experience and accumulated know-how in software engineering best practices critical
|
||||
- Source: https://manuel.kiessling.net/2025/03/31/how-seasoned-developers-can-achieve-great-results-with-ai-coding-agents/
|
||||
|
||||
### Junior Developer Perspective
|
||||
|
||||
**GitHub Study:**
|
||||
- Developers using AI assistants completed tasks up to **56% faster**
|
||||
- **Juniors saw the most significant gains** (because they learn from AI suggestions)
|
||||
- Source: https://codeconductor.ai/blog/future-of-junior-developers-ai/
|
||||
|
||||
**Challenges for Juniors:**
|
||||
- Lack experience to spot critical flaws in AI-generated code (IT Pro)
|
||||
- May over-trust AI without understanding limitations
|
||||
- Source: https://www.itpro.com/software/development/senior-developers-are-all-in-on-vibe-coding-but-junior-staff-lack-the-experience-to-spot-critical-flaws
|
||||
|
||||
**Stack Overflow 2025:**
|
||||
- **35% of professional developers** believed AI tools struggled with complex tasks (2024)
|
||||
- Dropped to **29% in 2025** — improving perception
|
||||
- Source: https://survey.stackoverflow.co/2025/ai
|
||||
|
||||
---
|
||||
|
||||
## 4. Enterprise Adoption & Company Policies
|
||||
|
||||
### Fortune 100 & Enterprise
|
||||
|
||||
**GitHub Copilot Adoption:**
|
||||
- **90% of Fortune 100 companies** have adopted GitHub Copilot
|
||||
- Validates tool as enterprise-grade solution
|
||||
- Source: https://www.secondtalent.com/resources/github-copilot-statistics/
|
||||
|
||||
**Google (2024):**
|
||||
- Over **25% of Google's code** is now written by AI
|
||||
- Source: https://fortune.com/2024/10/30/googles-code-ai-sundar-pichai/
|
||||
|
||||
### Companies Banning or Restricting AI
|
||||
|
||||
**Cisco 2024 Data Privacy Benchmark Study:**
|
||||
- **27% of organizations** have banned use of GenAI among workforce (at least temporarily)
|
||||
- Over privacy and data security risks
|
||||
- Only **46% have policies** in place governing acceptable use
|
||||
- Only **42% train users** on safe use
|
||||
- Source: https://newsroom.cisco.com/c/r/newsroom/en/us/a/y2024/m01/organizations-ban-use-of-generative-ai-over-data-privacy-security-cisco-study.html
|
||||
|
||||
**Security Leaders Survey (2024):**
|
||||
- **63% of security leaders** think it's impossible to govern safe use of AI
|
||||
- Don't have visibility into where AI is being used
|
||||
- **47% of companies** have policies to ensure safe use
|
||||
- Source: https://www.helpnetsecurity.com/2024/09/19/ai-generated-code-concerns/
|
||||
|
||||
**Notable Company Bans:**
|
||||
- **Apple:** Restricted employees from using ChatGPT/Copilot (concerns over confidential data leak)
|
||||
- **Amazon:** Banned ChatGPT after discovering responses resembling internal data
|
||||
- **Samsung:** Employee shared confidential information on ChatGPT (65% of employees concerned about security)
|
||||
- Sources:
|
||||
- https://www.businessinsider.com/chatgpt-companies-issued-bans-restrictions-openai-ai-amazon-apple-2023-7
|
||||
- https://www.cloudflare.com/the-net/banning-ai/
|
||||
|
||||
**Security Magazine (2024):**
|
||||
- **32% of organizations** have banned use of generative AI tools
|
||||
- Source: https://www.securitymagazine.com/articles/100030-32-of-organizations-have-banned-the-use-of-generative-ai-tools
|
||||
|
||||
**Key Insight:** Enterprise adoption is split — Fortune 100 embrace AI, but ~30% of companies ban it over security/privacy concerns.
|
||||
|
||||
---
|
||||
|
||||
## 5. Job Market Requirements
|
||||
|
||||
### AI Skills in Job Postings
|
||||
|
||||
**Entry-Level Tech Jobs:**
|
||||
- Tech job postings plummeted: **67% down from 2023 to 2024** for entry-level
|
||||
- Automation of technical tasks (GitHub Copilot, no-code platforms) reducing junior roles
|
||||
- Source: https://intuitionlabs.ai/articles/ai-impact-graduate-jobs-2025
|
||||
|
||||
**Java Developer with GitHub Copilot:**
|
||||
- Specific job postings now require "Java Developer with GitHub CoPilot / AI CodeGenerator"
|
||||
- AI skills becoming explicit requirement in some roles
|
||||
- Source: https://www.ziprecruiter.com/Jobs/Github-Copilot-Jobs
|
||||
|
||||
**Developer Role Shifts:**
|
||||
- Companies hiring fewer juniors for routine tasks
|
||||
- AI tools can automate much of what juniors used to do
|
||||
- Emphasis shifting to developers who can effectively use AI tools
|
||||
|
||||
**Key Insight:** AI proficiency becoming job requirement, but also reducing some entry-level positions.
|
||||
|
||||
---
|
||||
|
||||
## 6. Productivity Metrics
|
||||
|
||||
### Task Completion & Speed
|
||||
|
||||
**GitHub Study:**
|
||||
- Developers with AI assistants completed tasks up to **56% faster**
|
||||
- Juniors saw most significant gains
|
||||
- Source: https://codeconductor.ai/blog/future-of-junior-developers-ai/
|
||||
|
||||
**Multi-Company Industry RCT (2024):**
|
||||
- Average **26% increase in productivity** for developers with Copilot access
|
||||
- **Developers completed 26.08% more tasks** on average vs control group
|
||||
- Sources:
|
||||
- https://addyo.substack.com/p/the-reality-of-ai-assisted-software
|
||||
- https://www.cerbos.dev/blog/productivity-paradox-of-ai-coding-assistants
|
||||
|
||||
**GitHub Copilot:**
|
||||
- Users complete **126% more projects per week** compared to manual coders
|
||||
- **46% code completion rate** (Q1 2025)
|
||||
- **~30% of AI suggestions** get accepted by developers
|
||||
- Sources:
|
||||
- https://www.secondtalent.com/resources/ai-coding-assistant-statistics/
|
||||
- https://www.netcorpsoftwaredevelopment.com/blog/ai-generated-code-statistics
|
||||
|
||||
**Stack Overflow 2024:**
|
||||
- AI improving quality of time spent but not necessarily saving time overall
|
||||
- Source: https://stackoverflow.blog/2024/07/22/2024-developer-survey-insights-for-ai-ml/
|
||||
|
||||
---
|
||||
|
||||
## 7. Code Quality & Security Concerns
|
||||
|
||||
### Security Vulnerabilities in AI-Generated Code
|
||||
|
||||
**Critical Statistics:**
|
||||
|
||||
**Georgetown CSET Study (2024):**
|
||||
- **73% of AI code samples** contained vulnerabilities when checked manually
|
||||
- ChatGPT generated 21 programs in 5 languages: only **5 out of 21 were initially secure**
|
||||
- Source: https://cset.georgetown.edu/publication/cybersecurity-risks-of-ai-generated-code/
|
||||
|
||||
**Veracode (2024):**
|
||||
- **45% of cases** AI-generated code introduces security flaws
|
||||
- Source: https://www.veracode.com/blog/ai-generated-code-security-risks/
|
||||
|
||||
**Medium Analysis (2024):**
|
||||
- **62% of AI-generated code** contains known vulnerabilities
|
||||
- **45% of AI-assisted development tasks** introduce critical security flaws
|
||||
- Source: https://medium.com/@michael.hannecke/ai-is-writing-your-code-whos-checking-for-vulnerabilities-30377e98e0f2
|
||||
|
||||
**Cloud Security Alliance (2025):**
|
||||
- **62% of AI-generated code solutions** contain design flaws or known security vulnerabilities
|
||||
- Even when developers used latest foundational AI models
|
||||
- Source: https://cloudsecurityalliance.org/blog/2025/07/09/understanding-security-risks-in-ai-generated-code
|
||||
|
||||
### Code Quality Issues
|
||||
|
||||
**GitClear 2025 Research:**
|
||||
- **4x growth in code clones** (duplicated code) from AI assistants
|
||||
- Code assistants accepted far greater share of code-writing responsibility during 2024
|
||||
- Source: https://www.gitclear.com/ai_assistant_code_quality_2025_research
|
||||
|
||||
**Common Problems:**
|
||||
- Injection flaws
|
||||
- Insecure dependencies
|
||||
- Mishandling of sensitive data
|
||||
- Bugs and maintainability issues
|
||||
- Lack of context leading to inappropriate solutions
|
||||
|
||||
**Sources:**
|
||||
- https://petri.com/ai-coding-tools-rising-software-defects/
|
||||
- https://www.endorlabs.com/learn/the-most-common-security-vulnerabilities-in-ai-generated-code
|
||||
- https://blog.secureflag.com/2024/10/16/the-risks-of-generative-ai-coding-in-software-development/
|
||||
|
||||
---
|
||||
|
||||
## 8. Market Size & Growth
|
||||
|
||||
**AI Code Generation Market:**
|
||||
- Valued at **$4.91 billion in 2024**
|
||||
- Projected to hit **$30.1 billion by 2032**
|
||||
- **27.1% CAGR** (compound annual growth rate)
|
||||
- Source: https://www.secondtalent.com/resources/ai-coding-assistant-statistics/
|
||||
|
||||
---
|
||||
|
||||
## 9. Adoption by Developer Type
|
||||
|
||||
**Full-Stack vs Frontend vs Backend:**
|
||||
- **Full-stack developers** lead AI adoption at **32.1%**
|
||||
- **Frontend developers:** 22.1%
|
||||
- **Backend developers:** 8.9%
|
||||
- Source: https://www.secondtalent.com/resources/ai-coding-assistant-statistics/
|
||||
|
||||
**Interpretation:** AI tools support end-to-end coding tasks, making them most valuable for full-stack work.
|
||||
|
||||
---
|
||||
|
||||
## Key Takeaways for Article
|
||||
|
||||
### For "Professional AI Usage" Argument:
|
||||
|
||||
1. **Seniors use AI MORE than juniors** (33% vs 13%) — contradicts "AI is for beginners"
|
||||
2. **90% of Fortune 100** adopted Copilot — enterprise validation
|
||||
3. **76% of all developers** using or planning to use — mainstream adoption
|
||||
4. **Methodology matters:** Same AI tools, different outcomes based on professional approach
|
||||
|
||||
### For "Risks Exist" Honesty:
|
||||
|
||||
1. **45-73% of AI code** contains vulnerabilities — professional review essential
|
||||
2. **27-32% of companies** ban AI — legitimate security concerns
|
||||
3. **Quality depends on developer skill** — juniors struggle to spot flaws
|
||||
|
||||
### For "This Requires Skill" Argument:
|
||||
|
||||
1. Seniors achieve 2.5x more value from same tools
|
||||
2. Experience needed to identify when to trust AI
|
||||
3. Productivity gains vary wildly (56% faster to 19% slower)
|
||||
4. Professional methodologies (spec-driven, TDD) emerge to manage AI effectively
|
||||
|
||||
---
|
||||
|
||||
## Sources Summary
|
||||
|
||||
**Primary Sources:**
|
||||
- Stack Overflow Developer Survey 2024/2025
|
||||
- Fastly Senior vs Junior Study (2025)
|
||||
- Georgetown CSET Cybersecurity Research
|
||||
- Cisco Data Privacy Benchmark Study
|
||||
- GitHub Copilot Statistics
|
||||
- GitClear Code Quality Research
|
||||
|
||||
**Total Sources:** 35+ verified articles, studies, and surveys
|
||||
|
||||
**Confidence Level:** High — multiple independent sources confirm key statistics
|
||||
|
|
@ -1,290 +0,0 @@
|
|||
# Brief: Beyond Vibe Coding
|
||||
|
||||
**Article:** Beyond Vibe Coding: Professional AI Development Methodologies
|
||||
**Author:** henry-technical
|
||||
**Created:** 2026-01-22
|
||||
**Updated:** 2026-01-23
|
||||
|
||||
---
|
||||
|
||||
## Strategic Context
|
||||
|
||||
**Why this topic:**
|
||||
"Vibe coding" became Collins Dictionary Word of the Year 2025, capturing massive attention. But the term has negative connotations (unprofessional, unreliable, "toy for juniors") and conflates all AI-assisted development into one bucket.
|
||||
|
||||
This creates a critical opportunity:
|
||||
1. **Reframe the narrative:** AI coding isn't just vibe coding — there's a spectrum of professional methodologies
|
||||
2. **Fight stigma:** Professional AI usage ≠ junior with ChatGPT
|
||||
3. **Establish legitimacy:** AI tools are for professionals who know how to use them properly
|
||||
4. **Define skill requirements:** Professional AI coding requires methodology, not just prompting
|
||||
|
||||
The article addresses the elephant in the room: "Is using AI unprofessional?" Answer: No. But professional usage requires professional approach.
|
||||
|
||||
**Why now:**
|
||||
- Vibe coding peaked as cultural phenomenon (Dec 2025)
|
||||
- Professional methodologies emerging: Spec-Driven Development saw 359x growth in 2025
|
||||
- Ralph Loop/Ralph Wiggum concept went viral (Jan 2026)
|
||||
- Developers seeking clarity on "what comes after vibe coding"
|
||||
|
||||
**Thought leadership angle:**
|
||||
Position Henry (and by extension, Banatie ecosystem) as authoritative voice on AI-assisted development methodologies. Not chasing trends — defining the landscape.
|
||||
|
||||
**Banatie connection:**
|
||||
Demonstrates deep understanding of AI developer workflows (Banatie's core audience). Establishes credibility in AI tooling space. No direct product mention — pure value add. Trust-building for future product content.
|
||||
|
||||
---
|
||||
|
||||
## Target Reader
|
||||
|
||||
**Who:** AI-first developers using Claude Code, Cursor, Copilot
|
||||
**Experience level:** 2-10 years, familiar with AI coding but seeking structure
|
||||
|
||||
**Their real problem (deeper than surface):**
|
||||
- Surface: "Vibe coding works for prototypes but fails for production. What's the professional approach?"
|
||||
- Deeper: "Is AI coding legitimate for professionals, or just a toy for juniors? Can I use these tools without feeling like I'm cheating? Is 'professional + AI' different from 'junior + ChatGPT'?"
|
||||
|
||||
**What they really want:**
|
||||
1. Validation that AI coding is professional-grade, not shameful
|
||||
2. Proof that professionals use AI differently than juniors
|
||||
3. Understanding that professional AI usage requires skill and methodology
|
||||
4. Clear framework for choosing approach based on stakes
|
||||
5. Permission to use AI tools while maintaining professional standards
|
||||
|
||||
**Search intent:** Informational (learning + comparing approaches) + Validation (seeking legitimacy)
|
||||
|
||||
**Reader mental state:**
|
||||
- Excited about AI coding but frustrated with inconsistent results
|
||||
- Aware of vibe coding term, curious about alternatives
|
||||
- Looking for practitioner perspective, not academic theory
|
||||
- Ready to experiment with new workflows
|
||||
- **Seeking confirmation:** "Am I still a real engineer if I use AI?"
|
||||
|
||||
---
|
||||
|
||||
## Content Strategy
|
||||
|
||||
**Primary keyword:** "ai coding methodologies" (0 vol — thought leadership)
|
||||
- No direct search volume but semantic relevance
|
||||
- Definitional content becomes reference point
|
||||
- Early mover advantage in emerging terminology
|
||||
|
||||
**Secondary keywords (with volume):**
|
||||
- spec driven development (1,300 vol, KD 25) — commercial intent
|
||||
- ai pair programming (720 vol, KD 50) — informational
|
||||
- human in the loop ai (880 vol, commercial)
|
||||
- ralph loop (10 vol but trending: 140 in Dec 2025)
|
||||
|
||||
**Halo strategy:**
|
||||
Mention tools for connection to high-volume searches:
|
||||
- claude code (165k vol)
|
||||
- cursor ai (135k vol)
|
||||
- github copilot (74k vol)
|
||||
- ai coding assistant (12.1k vol)
|
||||
|
||||
**Competing content:**
|
||||
- GitHub Spec Kit docs (technical, not survey)
|
||||
- GitHub Copilot blog posts (product-focused)
|
||||
- Academic papers on agentic coding (too theoretical)
|
||||
- Reddit discussions (fragmented, no synthesis)
|
||||
|
||||
**Our differentiation:**
|
||||
- Complete methodology landscape in one place
|
||||
- Practitioner voice from Oleg's real experience
|
||||
- Honest trade-offs, not vendor pitches
|
||||
- Survey format: neutral comparison, not advocacy
|
||||
|
||||
**SEO approach:**
|
||||
Not a pure SEO play — thought leadership first. But:
|
||||
1. Rank for long-tail: "spec driven development tutorial", "ai pair programming github copilot"
|
||||
2. Become definitional content for emerging terms
|
||||
3. Halo traffic from product keyword mentions
|
||||
4. Future backlink magnet as methodology reference
|
||||
|
||||
---
|
||||
|
||||
## Requirements
|
||||
|
||||
**Content type:** Explainer / Survey
|
||||
**Target length:** 2,500-3,500 words
|
||||
**Format:** Methodology-by-methodology breakdown
|
||||
|
||||
**Structure (must follow):**
|
||||
|
||||
1. **Hook:** Vibe coding as entry point (Collins Word of Year)
|
||||
- Why the term resonated
|
||||
- Why it's insufficient
|
||||
- Promise: spectrum of methodologies
|
||||
|
||||
2. **Each methodology section (required structure):**
|
||||
|
||||
**Credentials block (establish legitimacy):**
|
||||
- **Name:** Official methodology name
|
||||
- **Source:** Link(s) to read more (GitHub repos, papers, official docs)
|
||||
- **Created by:** Company/person/community (e.g., "GitHub", "Andrej Karpathy", "Atlassian Research")
|
||||
- **When:** Year introduced/popularized
|
||||
- **Used by:** Notable companies/projects (if applicable)
|
||||
|
||||
**Description:**
|
||||
- What it is (2-3 sentences)
|
||||
- What problem it solves
|
||||
- How it works (brief mechanism)
|
||||
- When to use (stakes-based)
|
||||
- Henry's take (from interview)
|
||||
- Example: tool or workflow detail
|
||||
- Code snippet where relevant
|
||||
|
||||
**Purpose of credentials:** Show that each methodology has serious foundation, not just random practice
|
||||
|
||||
3. **Methodologies to cover (in order):**
|
||||
- Vibe Coding (baseline)
|
||||
- Spec-Driven Development
|
||||
- Agentic Coding (+ Ralph Loop)
|
||||
- AI Pair Programming
|
||||
- Human-in-the-Loop (HITL)
|
||||
- TDD + AI
|
||||
|
||||
4. **Closing:** Decision framework
|
||||
- Low stakes → vibe coding acceptable
|
||||
- Medium stakes → spec-driven or HITL
|
||||
- High stakes → TDD + spec
|
||||
- Context matters more than orthodoxy
|
||||
|
||||
**Must include:**
|
||||
- **Legitimacy framing:** Throughout article, reinforce that professional AI usage ≠ junior with ChatGPT
|
||||
- **Skill emphasis:** Professional AI coding requires methodology, not just prompting
|
||||
- **Statistical backing:** Use data from ai-usage-statistics.md to support claims
|
||||
- Oleg's quotes from interview (integrate naturally, not block quotes)
|
||||
- Real tool names: Claude Code, Cursor, GitHub Copilot, Planning Mode
|
||||
- Honest about permissions frustration
|
||||
- Mention specific approaches: `.claude/settings.json`, CLAUDE.md files
|
||||
- Code examples: 2-3 short snippets (spec file, test example)
|
||||
- Links to authoritative sources: GitHub Spec Kit, arXiv papers, VentureBeat Ralph article
|
||||
- **Credentials for each methodology:** who created, when, where to learn more
|
||||
|
||||
**Tone requirements:**
|
||||
- Henry's voice: direct, pragmatic, "I've been there"
|
||||
- No vendor pitches (even for tools we like)
|
||||
- Honest trade-offs: "X works great IF..." not "X is the best"
|
||||
- Practitioner solidarity: "we're all figuring this out"
|
||||
- Technical but accessible: explain jargon on first use
|
||||
|
||||
**Don't include:**
|
||||
- Listicle format (no "5 ways to...")
|
||||
- Excessive bolding or formatting
|
||||
- Marketing speak or hype
|
||||
- Academic tone
|
||||
- "In conclusion" or similar filler
|
||||
- Apologies for length
|
||||
|
||||
**Sources to cite:**
|
||||
- GitHub Spec Kit: github.com/github/spec-kit
|
||||
- Geoffrey Huntley (Ralph Loop): ghuntley.com/ralph/
|
||||
- VentureBeat: "How Ralph Wiggum went from Simpsons to AI"
|
||||
- Anthropic ralph-wiggum plugin
|
||||
- ArXiv papers: 2508.11126 (Agentic Programming), 2512.14012 (Don't Vibe, Control)
|
||||
- Atlassian HULA paper: arXiv 2411.12924
|
||||
|
||||
**Code/spec examples:**
|
||||
- Sample CLAUDE.md specification
|
||||
- `.claude/settings.json` permissions example
|
||||
- Simple test-first example (TDD)
|
||||
- Not full implementations — illustrative snippets
|
||||
|
||||
---
|
||||
|
||||
## Success Criteria
|
||||
|
||||
**SEO:**
|
||||
- Rank page 1 for "ai coding methodologies" within 6 months
|
||||
- Rank page 1 for "spec driven development tutorial" within 3 months
|
||||
- Appear in "People Also Ask" for methodology keywords
|
||||
|
||||
**Engagement:**
|
||||
- 100+ reactions on Dev.to within 2 weeks
|
||||
- 3+ substantive comments from practitioners
|
||||
- Shared in r/ClaudeAI, r/Cursor
|
||||
|
||||
**Authority:**
|
||||
- Backlinks from developer blogs
|
||||
- Referenced in future methodology discussions
|
||||
- Becomes go-to reference for "what comes after vibe coding"
|
||||
|
||||
**Distribution:**
|
||||
- Dev.to (primary)
|
||||
- Share to HN (likely front page material)
|
||||
- Share to relevant subreddits
|
||||
- LinkedIn repost by @banatie (company angle)
|
||||
|
||||
---
|
||||
|
||||
## Special Notes for @architect
|
||||
|
||||
**Critical: Methodology credentials**
|
||||
Each methodology MUST have a credentials block (Name, Source links, Created by, When, Used by). This is essential for establishing legitimacy. Don't skip this — it's the foundation that makes this article valuable.
|
||||
|
||||
Example for Spec-Driven Development:
|
||||
- **Name:** Spec-Driven Development
|
||||
- **Source:** github.com/github/spec-kit, GitHub Engineering Blog
|
||||
- **Created by:** GitHub Engineering Team
|
||||
- **When:** 2024-2025 (formalized)
|
||||
- **Used by:** GitHub Copilot Workspace, Claude Code users
|
||||
|
||||
Without credentials, methodologies look like random practices. With credentials, they're professional approaches worth considering.
|
||||
|
||||
**Interview integration:**
|
||||
Use Oleg's interview responses from `interview.md`. These are raw notes — transform into Henry's voice:
|
||||
|
||||
Raw: "Честно? Пробовал в несколько заходов — и каждый раз полностью отключал."
|
||||
Henry's voice: "I've tried AI autocomplete multiple times. Each time, I ended up disabling it."
|
||||
|
||||
Don't quote Oleg directly — synthesize his insights into Henry's natural flow.
|
||||
|
||||
**Statistical evidence:**
|
||||
Use data from `ai-usage-statistics.md` to support key claims:
|
||||
- Seniors use AI MORE than juniors (33% vs 13%)
|
||||
- 76% of developers using or planning to use AI
|
||||
- 90% of Fortune 100 adopted GitHub Copilot
|
||||
- 45-62% of AI code contains vulnerabilities (need for methodology)
|
||||
|
||||
These statistics reinforce the article's legitimacy argument with hard data.
|
||||
|
||||
**Source verification:**
|
||||
All sources in `research-index.md` have been verified. Use URLs for citations where relevant. ArXiv papers exist and are correctly numbered.
|
||||
|
||||
**Ralph Loop handling:**
|
||||
Hot topic (Dec 2025 spike) but low search volume. Cover it as emerging methodology under "Agentic Coding" section. Mention it's controversial — Oleg is skeptical about use cases.
|
||||
|
||||
**Permissions discussion:**
|
||||
Include but don't make it central. Oleg's frustration is real but frame constructively: "This is an evolving UX challenge that tools are still figuring out."
|
||||
|
||||
---
|
||||
|
||||
## Title Alternatives
|
||||
|
||||
Current: "Beyond Vibe Coding: Professional AI Development Methodologies"
|
||||
|
||||
**Alternative options to consider:**
|
||||
1. "You Might Not Need Vibe Coding"
|
||||
2. "What Comes After Vibe Coding"
|
||||
3. "AI Coding vs Vibe Coding"
|
||||
4. "Not Only Vibe Coding"
|
||||
5. "AI Coding for Professionals"
|
||||
6. "~~Vibe Coding~~ AI Coding for Software Engineers" (strikethrough effect)
|
||||
|
||||
**Positioning note:**
|
||||
These alternatives emphasize the legitimacy angle more directly. Consider if we want to be more confrontational ("You Might Not Need") or more educational ("What Comes After"). Current title is neutral/educational.
|
||||
|
||||
**SEO consideration:**
|
||||
"Beyond Vibe Coding" works well because:
|
||||
- "Beyond X" is a recognized pattern
|
||||
- Still includes "vibe coding" for search association
|
||||
- Promises elevation/progression
|
||||
- Professional tone
|
||||
|
||||
But "AI Coding for Professionals" might better target the deeper reader need.
|
||||
|
||||
**Decision:** Can be revisited during outline/writing phase if better angle emerges.
|
||||
|
||||
---
|
||||
|
||||
**Research cost:** ~$0.40 (DataForSEO keyword research)
|
||||
|
Before Width: | Height: | Size: 120 KiB |
|
Before Width: | Height: | Size: 144 KiB |
|
Before Width: | Height: | Size: 1019 KiB |
|
|
@ -1,15 +0,0 @@
|
|||
# CDN URLs
|
||||
|
||||
| File | URL | Used |
|
||||
|------|-----|------|
|
||||
| ai-tests-autonomous-agent.jpeg | https://cdn.banatie.app/blog/henry-devto/img/94559d7c-06ab-4e5f-860a-87419906f3b5 | ✓ |
|
||||
| autonomous-agents-overengineering.jpeg | https://cdn.banatie.app/blog/henry-devto/img/031320e3-9252-4c6a-9c98-e08d7ef40855 | ✓ |
|
||||
| autocomplete-suggestions-overload.jpeg | https://cdn.banatie.app/blog/henry-devto/img/8bea3b16-f764-4212-8c09-918b87668884 | ✓ |
|
||||
| documentation-prompt-history.jpeg | https://cdn.banatie.app/blog/henry-devto/img/f4800684-adaa-4d22-8ed6-e163ecb55856 | |
|
||||
| karpathy-vibe-coding-tweet.jpeg | https://cdn.banatie.app/blog/henry-devto/img/c4c24f43-d0ac-45d2-84d0-6cb0624a8957 | ✓ |
|
||||
| prompt-writing-time-irony.jpeg | https://cdn.banatie.app/blog/henry-devto/img/79c4c6fd-129a-499f-be04-f503eff018b5 | ✓ |
|
||||
| understand-code-first-dialog.jpeg | https://cdn.banatie.app/blog/henry-devto/img/c0c3ad3d-13ee-4dbf-9f4a-5db3bf64b378 | ✓ |
|
||||
| works-but-dont-understand.jpeg | https://cdn.banatie.app/blog/henry-devto/img/25589837-6cf0-4ee9-bb2f-121ee3edac7d | |
|
||||
| infographic/Generated Image January 31, 2026 - 11_11PM.jpeg | https://cdn.banatie.app/blog/henry-devto/img/1175042e-1e41-4675-8fb3-25febe7cedea | ✓ |
|
||||
| infographic/DO-DONT-vibe-coding.jpeg | https://cdn.banatie.app/blog/henry-devto/img/c5735016-326f-48f6-b8b7-cb35890739a8 | ✓ |
|
||||
| **robots-flesh-bag-comic** | нужно создать | |
|
||||
|
Before Width: | Height: | Size: 103 KiB |
|
Before Width: | Height: | Size: 155 KiB |
|
Before Width: | Height: | Size: 115 KiB |
|
Before Width: | Height: | Size: 33 KiB |
|
Before Width: | Height: | Size: 12 KiB |
|
Before Width: | Height: | Size: 868 KiB |
|
Before Width: | Height: | Size: 152 KiB |
|
Before Width: | Height: | Size: 112 KiB |
|
Before Width: | Height: | Size: 966 KiB |
|
Before Width: | Height: | Size: 716 KiB |
|
|
@ -1,145 +0,0 @@
|
|||
# Interview Notes
|
||||
|
||||
Записи ответов Олега на вопросы по методологиям AI-разработки.
|
||||
Используется для создания голоса Henry в статье.
|
||||
|
||||
---
|
||||
|
||||
## Vibe Coding
|
||||
|
||||
**Вопрос:** Ты когда-нибудь работал в режиме vibe coding? Что получилось? Когда это уместно, когда нет?
|
||||
|
||||
**Ответ:**
|
||||
|
||||
Термину меньше года, а он уже стал практически синонимом AI-разработки. Наверное потому что удобно звучит — "vibe coding", "я навайбкодил". Но в этом и проблема: vibe coding сохраняет негативные коннотации (недостаточно профессионально, ненадёжно, абы-как), и при этом обобщает весь AI-кодинг, перенося этот негатив на всё поле. Поэтому и пишу эту статью — давайте разберём где vibe coding, а где другие подходы.
|
||||
|
||||
**Когда использую:** Dev tools не попадающие в прод, прототипы, эксперименты, side projects. Надо признать — во многих случаях работает неплохо.
|
||||
|
||||
**Но не чистый vibe:** Обычно всё равно проверяю изменения — хотя бы быстро просматриваю git diff перед коммитом. Если задача большая — прошу агента коммитить небольшими порциями, потом просматриваю.
|
||||
|
||||
**Хорошие практики даже при vibe coding:**
|
||||
1. Покрывать код тестами и проверять что проходят (+ typecheck, lint, prettier)
|
||||
2. Просить другого AI агента сделать ревью проделанной работы
|
||||
3. Человеческое внимание для важных вещей — если что-то критично, лучше убедиться самостоятельно
|
||||
|
||||
Не говорю что нужно всегда досконально проверять если код работает, но минимальный контроль — да.
|
||||
|
||||
---
|
||||
|
||||
## Spec-Driven Development
|
||||
|
||||
**Вопрос:** Пробовал GitHub Spec Kit или писать spec перед кодом? Это будущее или overkill?
|
||||
|
||||
**Ответ:**
|
||||
|
||||
Не знал что есть прям такой фреймворк, но оказывается именно так и делаю большинство задач над продакшен проектами. Пишу подробную спецификацию о том что хочу сделать. Не для каждой задачи — когда нужно начать новый значительный домен функционала.
|
||||
|
||||
**Что могу подтвердить:**
|
||||
- Время на spec иногда > времени на код. Да, чёрт возьми — это правда. Бывает полдня пишешь спеку, потом уходишь пить кофе, а Клод за это время успевает всё закодить. Несправедливо! :)
|
||||
- Для мелких задач — действительно перебор
|
||||
- Созданная спека сохраняется как CLAUDE.md для продолжения работы позже
|
||||
|
||||
**Важно:** так делаю для больших разделов. Одним запуском AI агента проблема не решается. Часто потом работа идёт недели или месяцы, и спека используется для быстрого старта новых сессий: "прочитай спеку, найди код".
|
||||
|
||||
**Проблема подхода:** изначальное решение меняется в ходе разработки. Находятся другие подходы, меняются пути и названия функций. Приходится держать спеку в актуальном состоянии — дополнительная когнитивная нагрузка на человека и AI. Хорошо держать документ в коде и коммитить изменения — тогда удобнее видеть что изменилось.
|
||||
|
||||
**Лайфхак:** Последнее время делаю стадию разработки спеки не в одиночку, а совместно с Claude Desktop — отличный инструмент для research, brainstorm и metaprompting. Прорабатываю решения, делаю глубокий research, нахожу архитектурное решение, прошу Claude Desktop собрать полноценную спецификацию. Сохраняю в проект и запускаю Claude Code работать по ней.
|
||||
|
||||
---
|
||||
|
||||
## Agentic Coding
|
||||
|
||||
**Вопрос:** Как используешь Claude Code? Даёшь автономию или контролируешь каждый шаг? Что доверяешь агенту, что нет?
|
||||
|
||||
**Ответ:**
|
||||
|
||||
Вижу много восторженных отзывов о Ralph Loop и может быть хотел бы попробовать. Но честно говоря, этот подход меня смущает.
|
||||
|
||||
**Главный вопрос:** какие задачи можно ставить агенту, чтобы настолько долго оставлять его работать автономно?
|
||||
|
||||
Смотри мой ответ про Spec-Driven Development — написание спецификации занимает кратно больше времени чем её выполнение. Меня пугает: сколько времени нужно потратить на постановку задачи, чтобы её потом выполнять 14 часов?
|
||||
|
||||
Я хотел бы найти применение такому подходу, но пока скорее озадачен и не вижу прямых применений в своих проектах.
|
||||
|
||||
**Призыв к читателям:** может быть вы в комментариях поделитесь кейсами где Ralph Loop классно работает?
|
||||
|
||||
---
|
||||
|
||||
### Дополнение: Permissions в Claude Code
|
||||
|
||||
Пермишены — это боль. Понятно зачем они, но честно говоря они ломают всю концепцию. Сидеть и следить как Claude Code делает свою работу и на каждый чих спрашивает разрешение? Это вообще не тот опыт который хотелось бы получить от AI разработки. Они больше расфокусируют внимание.
|
||||
|
||||
**Мои практики:**
|
||||
- Прошу Claude Code: "возьми все tools этого MCP и добавь разрешение в `.claude/settings.json`". Проактивное заполнение работает лучше чем кликать вручную
|
||||
- Не встречал ничего такого, чего нельзя было бы вернуть с помощью `git reset`
|
||||
- Иногда включаю `--dangerously-skip-permissions`, но поглядываю не пошёл ли Claude открывать банковский счёт на моё имя
|
||||
|
||||
**Вывод:** это вопрос который предстоит ещё переизобрести и найти какой-то компромисс.
|
||||
|
||||
---
|
||||
|
||||
## AI Pair Programming
|
||||
|
||||
**Вопрос:** Copilot/Cursor — как pair programmer или просто autocomplete? Реально ли это "парное программирование"?
|
||||
|
||||
**Ответ:**
|
||||
|
||||
Помню ранние времена, когда AI автокомплит появлялся. Честно? Пробовал в несколько заходов — и каждый раз полностью отключал.
|
||||
|
||||
**Почему не зашло:** предложения написать мою функцию по названию только мешали и сильно сбивали с мысли. Знаю что многим заходит, но мне почему-то нет. Стандартных подсказок IDE мне всегда хватало.
|
||||
|
||||
**Почему так:** когда пишу код, я уже мысленно представил что именно хочу получить. Предлагаемое агентом мне уже не нужно. Я делаю ресёрч ДО того как разобраться как это написать в коде.
|
||||
|
||||
**Где реальное pair programming:** Claude Desktop с хорошо настроенной системной инструкцией проекта + Filesystem MCP чтобы он мог читать реальные файлы из репозитория. Вот тогда — да, могу почувствовать что общаюсь с кем-то кто понимает мою проблему и реально помогает в её решении.
|
||||
|
||||
**Вывод:** Autocomplete ≠ pair programming. Настоящее pair programming — это диалог, когда AI имеет контекст твоего проекта.
|
||||
|
||||
---
|
||||
|
||||
## Human-in-the-Loop
|
||||
|
||||
**Вопрос:** Как часто AI делает что-то без твоего одобрения? Где граница доверия?
|
||||
|
||||
**Ответ:**
|
||||
|
||||
Редко останавливаю Claude Code во время работы. Но иногда бывает — когда вижу что он пошёл не по тому пути. Признаюсь, часто это потому что я просто дал недостаточно деталей — я это сразу понимаю и дописываю.
|
||||
|
||||
**Permissions ≠ HITL.** Не согласен что permissions это реализация Human-in-the-Loop. Это слишком низкоуровневые запросы — по ним не всегда видно какую задачу сейчас решает агент.
|
||||
|
||||
**Настоящий HITL = Planning Mode.** Предпочитаю запускать режим планирования для всех кроме элементарных задач. Вот это реальный контроль на уровне задачи.
|
||||
|
||||
**Проблема современных агентов:** они не очень понимают где те моменты когда нужно остановиться и спросить. Редко встречал точное попадание. Думаю это то, что ещё предстоит реализовать в будущем. Так же как и чтобы агент отвечал "я не знаю". Видимо современные модели на это не способны в широком смысле.
|
||||
|
||||
**Базовое правило:** проверка кода в конце работы агента — это обязательное условие для профессиональной работы.
|
||||
|
||||
---
|
||||
|
||||
## TDD + AI
|
||||
|
||||
**Вопрос:** Пишешь тесты первыми при работе с AI? Работает ли классический TDD с генеративным AI?
|
||||
|
||||
**Ответ:**
|
||||
|
||||
Это сто процентов очень важный подход. Стараюсь практиковать его на ключевом функционале проекта.
|
||||
|
||||
**Tests as Specification:** Да, это важный момент. Покрываю тестами значимые участки и всегда инструктирую агента проверять тесты. Это часть спецификации.
|
||||
|
||||
**AI пишет тесты первым:** Обычно так не делаю. Только когда есть хорошая базовая спецификация — например, документация на API которую легко превратить в набор тестов. Но чаще: спецификация → базовая имплементация → покрытие тестами → дальнейшая работа.
|
||||
|
||||
**TDD как guardrails:** Думаю там это тем более важно. Но продолжу скепсис — а кто будет писать тесты? Если честно, написанная подробная спека и подробные тесты — это уже 80% работы.
|
||||
|
||||
**Опасность AI-тестов:** Тесты нужно проверять самостоятельно. Не раз был очевидцем когда агент писал тесты которые "проходили" потому что он использовал замоканные запросы. Иногда это funny, иногда может загубить дальнейшую работу. Правильные тесты — это надёжный фундамент дальнейшей работы.
|
||||
|
||||
---
|
||||
|
||||
### Дополнение: Контекст агента
|
||||
|
||||
Тесты — это часть того, что я бы назвал **"контекстом агента"**. Это крайне важная составляющая агентского кодинга, значительно повышающая автономность агента и его способность доведения решения до конца.
|
||||
|
||||
Условно говоря: если вы дебажите проблему, делаете фичу — подумайте как дать возможность AI агенту самостоятельно проверять результат его работы. Это могут быть тесты, MCP инструменты с доступом к браузеру. Наличие таких способностей повышает эффективность агента в разы.
|
||||
|
||||
**Где должен быть человек:** в правильном месте — для оценки итогового результата или решения. Нужно стараться исключить его из низкоуровневых мелких итераций где агент лучше действует самостоятельно.
|
||||
|
||||
---
|
||||
|
||||
*Created: 2026-01-22*
|
||||
|
|
@ -1,648 +0,0 @@
|
|||
# Activity Log
|
||||
|
||||
## 2026-01-22 @strategist — Session 1
|
||||
|
||||
**Action:** Initial setup
|
||||
|
||||
**Changes:**
|
||||
- Created article card in `0-inbox/beyond-vibe-coding.md`
|
||||
- Created assets folder structure
|
||||
- Copied Perplexity research
|
||||
- Created research-index.md for clustering
|
||||
|
||||
**Notes:**
|
||||
- Goal: Henry's 2nd Dev.to article for account warmup
|
||||
- Approach: methodology survey + practitioner opinion via interview
|
||||
- Interview planned to capture authentic perspective
|
||||
|
||||
**Next:** Verify sources, cluster methodologies, conduct interview
|
||||
|
||||
---
|
||||
|
||||
## @writer. 24 Jan 2026. 10:15
|
||||
|
||||
**Action:** Draft creation
|
||||
|
||||
**Changes:**
|
||||
- Created `text.md` — complete article draft
|
||||
- Moved article from `2-outline/` to `3-drafting/`
|
||||
- Updated main file status to `drafting`
|
||||
|
||||
**Draft details:**
|
||||
- Word count: ~2,650 words
|
||||
- Code snippets: NONE (per user request)
|
||||
- Image placeholders: 8 total
|
||||
|
||||
**Image placeholders added:**
|
||||
1. `hero-spectrum.png` — Infographic: methodology spectrum from vibe to professional
|
||||
2. `meme-vibe-coder.png` — Meme: developer with "it works, I don't know why"
|
||||
3. `infographic-spec-driven.png` — Comparison: vibe coding vs spec-driven flow
|
||||
4. `illustration-agentic-spectrum.png` — Autonomy scale showing methodology positions
|
||||
5. `meme-ralph-loop.png` — Meme: Ralph Loop "results may vary"
|
||||
6. `illustration-pair-programming.png` — Autocomplete vs true pair programming
|
||||
7. `infographic-hitl.png` — Timeline showing HITL checkpoint approach
|
||||
8. `infographic-tdd-cycle.png` — TDD cycle with AI integration
|
||||
9. `summary-landscape.png` — 2x2 matrix of all methodologies by autonomy/structure
|
||||
|
||||
**Voice notes:**
|
||||
- Integrated all interview insights in Henry's voice
|
||||
- Statistics woven naturally (76%, 2.5x difference, etc.)
|
||||
- Honest skepticism about Ralph Loop preserved
|
||||
- Direct, pragmatic tone throughout
|
||||
- No corporate speak, no filler
|
||||
|
||||
**For @editor:**
|
||||
- Check voice consistency with henry-technical style guide
|
||||
- Verify image placeholder descriptions are actionable for @image
|
||||
- Review transitions between sections
|
||||
|
||||
**For @image:**
|
||||
- 9 images needed (hero + 8 in-article)
|
||||
- Mix of: infographics (4), illustrations (2), memes (2), summary visual (1)
|
||||
- Style notes included in each placeholder
|
||||
|
||||
**Next:** @editor review
|
||||
|
||||
---
|
||||
|
||||
## 2026-01-22 @strategist — Session 2
|
||||
|
||||
**Action:** Keyword research & Brief creation
|
||||
|
||||
**Research completed:**
|
||||
- DataForSEO keyword research: $0.40 spent
|
||||
- 25+ keywords tested for volume and difficulty
|
||||
- Related keywords analysis for top methodologies
|
||||
- Search intent classification
|
||||
|
||||
**Key findings:**
|
||||
- **spec driven development**: 1,300 vol (359x growth in 2025!)
|
||||
- **ai pair programming**: 720 vol (KD 50)
|
||||
- **human in the loop ai**: 880 vol (stable)
|
||||
- **ralph loop**: 10 vol (but Dec spike to 140)
|
||||
- **vibe coding**: 0 vol (despite Word of Year!)
|
||||
- **agentic coding**: 0 vol
|
||||
|
||||
**Halo keywords (massive volume):**
|
||||
- claude code: 165k
|
||||
- cursor ai: 135k
|
||||
- github copilot: 74k
|
||||
|
||||
**Strategic decision:**
|
||||
Thought leadership piece, not pure SEO play. Primary keyword "ai coding methodologies" (0 vol) positions us as definitional content. Secondary keywords with volume provide long-tail ranking opportunities.
|
||||
|
||||
**Changes:**
|
||||
- Created complete Brief with strategic context, keyword strategy, requirements
|
||||
- Updated frontmatter with keywords
|
||||
- Status changed to `planning`
|
||||
|
||||
**Interview data:**
|
||||
Oleg's detailed interview from Session 1 provides authentic practitioner voice for Henry. Six methodologies covered with specific examples and honest trade-offs.
|
||||
|
||||
**Next:** @architect to create Outline based on Brief + interview insights
|
||||
|
||||
---
|
||||
|
||||
## 2026-01-23 @strategist — Session 3
|
||||
|
||||
**Action:** Brief refinements based on user clarification
|
||||
|
||||
**Critical insights added:**
|
||||
|
||||
1. **Deeper reader motivation:**
|
||||
- Not just "how to choose methodology"
|
||||
- Fighting impostor syndrome: "Is AI coding unprofessional?"
|
||||
- Seeking legitimacy: professional AI usage ≠ junior with ChatGPT
|
||||
- Understanding that pro AI coding requires serious skills
|
||||
- Permission to use AI tools without shame
|
||||
|
||||
2. **Methodology presentation structure:**
|
||||
Each methodology must include credentials block:
|
||||
- Name (official)
|
||||
- Source links (repos, papers, docs)
|
||||
- Created by (company/person/community)
|
||||
- When (year introduced)
|
||||
- Used by (notable adopters)
|
||||
|
||||
Purpose: Establish that these are serious professional approaches with foundation, not random hacks
|
||||
|
||||
3. **Title alternatives proposed:**
|
||||
- "You Might Not Need Vibe Coding"
|
||||
- "What Comes After Vibe Coding"
|
||||
- "AI Coding vs Vibe Coding"
|
||||
- "AI Coding for Professionals"
|
||||
- "~~Vibe Coding~~ AI Coding for Software Engineers"
|
||||
|
||||
Added to Brief for @architect consideration
|
||||
|
||||
**Changes to Brief:**
|
||||
- Enhanced Strategic Context: explicit "fight stigma" positioning
|
||||
- Expanded Target Reader: added impostor syndrome, validation seeking
|
||||
- Requirements: detailed credentials structure for each methodology
|
||||
- Special Notes: emphasized credentials as critical for legitimacy
|
||||
- Added Title Alternatives section
|
||||
|
||||
**Key message reinforced:**
|
||||
This article is not just a survey — it's a validation piece. Reader needs permission to use AI professionally and proof that methodology separates pros from juniors.
|
||||
|
||||
**Brief status:** Complete and ready for @architect
|
||||
|
||||
**Next:** Move to 1-planning/, @architect creates Outline
|
||||
|
||||
---
|
||||
|
||||
## 2026-01-23 @strategist — Session 4 (Final)
|
||||
|
||||
**Action:** Statistical research & file restructuring
|
||||
|
||||
**Statistical Research Completed:**
|
||||
- Brave Search: 30+ sources on AI adoption, security, company policies
|
||||
- Created comprehensive `ai-usage-statistics.md` with 35+ verified sources
|
||||
|
||||
**Key statistics collected:**
|
||||
- **76% of developers** using or planning to use AI (Stack Overflow 2024)
|
||||
- **33% of senior developers** (10+ years) generate 50%+ of code with AI
|
||||
- **13% of junior developers** (0-2 years) do the same — **2.5x difference**
|
||||
- **90% of Fortune 100** companies adopted GitHub Copilot
|
||||
- **27-32% of companies** banned AI tools over security/privacy
|
||||
- **45-73% of AI-generated code** contains security vulnerabilities
|
||||
|
||||
**Why these stats matter:**
|
||||
Reinforces article thesis with hard data:
|
||||
1. Professionals use AI MORE (contradicts "toy for juniors" stigma)
|
||||
2. Enterprise validation (Fortune 100 adoption)
|
||||
3. Security risks exist (need for methodology)
|
||||
4. Skill matters (same tools, different outcomes)
|
||||
|
||||
**File Restructuring:**
|
||||
- Moved Brief from main article to `brief.md` (cleaner structure)
|
||||
- Updated Assets Index with new files
|
||||
- Added references in Brief to use statistical data
|
||||
|
||||
**Files Added:**
|
||||
1. `assets/beyond-vibe-coding/brief.md` — complete strategic documentation
|
||||
2. `assets/beyond-vibe-coding/ai-usage-statistics.md` — statistical backing
|
||||
|
||||
**Current structure:**
|
||||
```
|
||||
0-inbox/beyond-vibe-coding.md (main card + references)
|
||||
├── assets/beyond-vibe-coding/
|
||||
├── brief.md (strategic context, requirements)
|
||||
├── ai-usage-statistics.md (data backing)
|
||||
├── interview.md (practitioner insights)
|
||||
├── research-index.md (source verification)
|
||||
└── log-chat.md (this file)
|
||||
```
|
||||
|
||||
**Brief Status:** Complete with statistical backing ready
|
||||
|
||||
**Next:** Move entire card to 1-planning/, @architect creates Outline using:
|
||||
- Brief requirements
|
||||
- Interview insights
|
||||
- Statistical evidence from ai-usage-statistics.md
|
||||
|
||||
---
|
||||
|
||||
## 2026-01-23 @architect
|
||||
|
||||
**Action:** Outline creation
|
||||
|
||||
**Changes:**
|
||||
- Created `outline.md` with complete article structure
|
||||
- 7 sections: Intro + 6 methodologies + Decision Framework + Conclusion
|
||||
- Each methodology includes required credentials block
|
||||
- Integrated Henry's voice from interview insights
|
||||
- Added code examples (3 total): CLAUDE.md spec, .claude/settings.json, TDD test
|
||||
- Mapped visual assets needed
|
||||
- Created minimal Validation Request (7 claims)
|
||||
|
||||
**Structure details:**
|
||||
- Total target: 2,800 words (~11 min read)
|
||||
- Intro (400w) — hook with vibe coding, establish problem
|
||||
- Vibe Coding (400w) — baseline, when it works, the catch
|
||||
- Spec-Driven (450w) — detailed with code example, Henry's experience
|
||||
- Agentic + Ralph Loop (500w) — high autonomy, permissions reality
|
||||
- AI Pair Programming (400w) — Henry's honest take on autocomplete
|
||||
- HITL (400w) — planning mode, strategic oversight
|
||||
- TDD + AI (450w) — quality-first, tests as spec
|
||||
- Decision Framework (350w) — stakes-based selection
|
||||
- Conclusion (300w) — legitimacy validation, next step
|
||||
|
||||
**Key decisions:**
|
||||
- Lead with vibe coding (Collins Word of Year) as hook
|
||||
- Credentials blocks establish each methodology as legitimate
|
||||
- Henry's skepticism about Ralph Loop = honest, not promotional
|
||||
- Stats integrated naturally (33% vs 13%, 76% adoption)
|
||||
- Code examples: practical, brief, illustrative
|
||||
- Decision framework based on stakes, not dogma
|
||||
|
||||
**Validation Request:**
|
||||
7 claims need verification:
|
||||
1. Senior 33% vs junior 13% stat (Fastly Study)
|
||||
2. 76% adoption (Stack Overflow)
|
||||
3. 45-62% security vulnerabilities (Georgetown CSET, Veracode)
|
||||
4. 90% Fortune 100 (GitHub Copilot stats)
|
||||
5. 27% company bans (Cisco study)
|
||||
6. 359x growth for SDD (source unclear, needs verification)
|
||||
7. Ralph Loop viral Jan 2026 (VentureBeat article)
|
||||
|
||||
Most stats already sourced in ai-usage-statistics.md, just need URL verification.
|
||||
|
||||
**For next agent:**
|
||||
@validator should verify statistical claims and credentials URLs. Interview material (Henry's opinions) doesn't need verification — it's primary source (Oleg's experience).
|
||||
|
||||
After PASS from @validator → @writer creates Draft using this structure + interview insights.
|
||||
|
||||
---
|
||||
|
||||
## @architect. 23 Jan 2026. 19:45
|
||||
|
||||
**Action:** Tone and structure correction
|
||||
|
||||
**Discussion with Oleg:**
|
||||
Identified that "Decision Framework" section was too prescriptive — sounded like "how to choose a framework" guide rather than practitioner survey. Article should be "here's what exists and here's what I actually do", not instructions.
|
||||
|
||||
**Changes:**
|
||||
- Removed "Decision Framework" section (350w)
|
||||
- Expanded Conclusion to 450w with four subsections:
|
||||
- The Landscape Exists (120w) — survey what's available, note evolution
|
||||
- The Legitimacy Question (120w) — stats reinforcing professional AI usage
|
||||
- What Makes It Professional (100w) — approach matters, not tool
|
||||
- What I Actually Use (110w) — personal experience, invitation to share
|
||||
- Changed ending CTA: "developers" → "engineers", "genuinely curious" → "share your wins"
|
||||
- Updated tone in Article Structure Overview to clarify: "landscape survey through practitioner's lens, not prescriptive guide"
|
||||
|
||||
**Why this matters:**
|
||||
Original structure positioned Henry as instructor teaching "correct" choices. New structure positions Henry as practitioner sharing observations and experience. Big difference in authority positioning — survey + perspective vs. instruction manual.
|
||||
|
||||
**Tone now:**
|
||||
- AI coding = serious professional tools
|
||||
- Vibe coding = entry point, not destination
|
||||
- Progression available (vibe → professional approaches)
|
||||
- Legitimacy reinforced with stats
|
||||
- Ending invites community sharing, not just "go do this"
|
||||
|
||||
**Ready for:**
|
||||
@validator — verify 7 statistical claims and credentials URLs
|
||||
|
||||
After validation PASS → @writer creates Draft using corrected structure
|
||||
|
||||
---
|
||||
|
||||
## @validator. 23 Jan 2026. 21:00
|
||||
|
||||
**Action:** Fact-checking validation
|
||||
|
||||
**Results:**
|
||||
- ✅ **4 claims fully verified:** Senior/junior AI usage (32-33%), 76% adoption, 27% bans, Ralph Loop virality
|
||||
- ⚠️ **2 claims need clarification:** Security vulnerabilities range (45-62%), GitHub Copilot adoption (90%)
|
||||
- ❌ **1 claim false:** Spec-Driven Development "359x growth" — no evidence found
|
||||
|
||||
**User decisions:**
|
||||
1. **Claim 1 (33% seniors):**
|
||||
- Minor discrepancy: source says "32%" or "about a third", not "33%"
|
||||
- Decision: Note discrepancy but not critical
|
||||
|
||||
2. **Claim 2 (76% adoption):**
|
||||
- Decision: Keep as-is (verified)
|
||||
|
||||
3. **Claim 3 (45-62% vulnerabilities):**
|
||||
- Decision: Update format with citations [1][2][3]
|
||||
- Add Georgetown CSET, Veracode, industry reports
|
||||
|
||||
4. **Claim 4 (90% Fortune 100):**
|
||||
- Decision: REMOVE entirely
|
||||
- Reason: "нам это не принципиально" (not critical)
|
||||
|
||||
5. **Claim 5 (27% bans):**
|
||||
- Decision: Keep as-is (verified)
|
||||
|
||||
6. **Claim 6 (359x growth):**
|
||||
- Decision: REMOVE entirely
|
||||
- Reason: "нам это не принципиально" (not critical, no evidence)
|
||||
|
||||
7. **Claim 7 (Ralph Loop viral):**
|
||||
- Decision: Keep as-is (verified)
|
||||
|
||||
**Summary:**
|
||||
- ✅ **3 claims use as-is:** #2, #5, #7
|
||||
- ✅ **1 claim update format:** #3 (add citations)
|
||||
- ⚠️ **1 claim optional fix:** #1 (minor discrepancy)
|
||||
- ❌ **2 claims remove:** #4, #6
|
||||
|
||||
**For @architect:**
|
||||
1. Remove Claims 4 and 6 from outline
|
||||
2. Update Claim 3 with proper citation format
|
||||
3. Optionally note Claim 1 discrepancy
|
||||
4. Then proceed to @writer
|
||||
|
||||
**Files updated:**
|
||||
- validation-results.md — complete with final decisions
|
||||
- log-chat.md — this entry
|
||||
|
||||
**Next:** @architect revises outline per user instructions
|
||||
|
||||
---
|
||||
|
||||
## @architect. 23 Jan 2026. 21:30
|
||||
|
||||
**Action:** Outline revisions based on validation results
|
||||
|
||||
**Changes made:**
|
||||
|
||||
1. **Removed Claim 4 (90% Fortune 100):**
|
||||
- Deleted from Conclusion → The Legitimacy Question
|
||||
- Replaced with general statement: "Major companies have adopted AI coding tools across their engineering organizations"
|
||||
|
||||
2. **Removed Claim 6 (359x growth):**
|
||||
- Deleted from Spec-Driven Development credentials block
|
||||
- Replaced with qualitative description: "emerged as one of 2025's key AI-assisted engineering practices (Thoughtworks)"
|
||||
- Added: "Multiple professional tools launched: AWS Kiro, GitHub Spec Kit, Tessl Framework"
|
||||
|
||||
3. **Updated Claim 3 (security vulnerabilities):**
|
||||
- Changed from: "45-62% of AI code contains vulnerabilities (Georgetown CSET, Veracode)"
|
||||
- Changed to: "Research shows 45-62% of AI-generated code contains security vulnerabilities [1][2][3]"
|
||||
- Added Sources section:
|
||||
- [1] Georgetown CSET: "Cybersecurity Risks of AI-Generated Code" (Nov 2024)
|
||||
- [2] Veracode: "AI-Generated Code: A Double-Edged Sword" (Sept 2025)
|
||||
- [3] Industry reports (Oct 2025)
|
||||
|
||||
4. **Updated Claim 1 (33% → about a third):**
|
||||
- Introduction → The Reality: "33% of senior devs" → "About a third of senior devs"
|
||||
- Conclusion → The Legitimacy Question: "33% of senior developers" → "About a third of senior developers"
|
||||
|
||||
**Files updated:**
|
||||
- `outline.md` — all revisions applied
|
||||
- `2-outline/beyond-vibe-coding.md` — updated Validation Status to "REVISE → COMPLETE ✅"
|
||||
- `log-chat.md` — this entry
|
||||
|
||||
**Validation complete:** All false claims removed, citations added, stats corrected
|
||||
|
||||
**Status:** Outline ready for @writer
|
||||
|
||||
**Next:** @writer creates Draft based on revised outline + interview insights
|
||||
|
||||
---
|
||||
|
||||
## 2026-01-31 @user
|
||||
|
||||
я добавил файл со статистикой по применению AI в коде. позже нам нужно будет использовать его для создания инфорграфики. В текст статьи я вставил TODOs с детальным описанием.
|
||||
Файл называется [ai-adoption-statistics](ai-adoption-statistics.md)
|
||||
Первый агент, который увидит это сообщение должен учесть этот файл в нашем фреймворке и добавить референсы на него куда нужно.
|
||||
Когда дойдем до генерации изображений @image-agent - тебе нужно будет найти соответствующие TODO и создать инфографику основываясь на данных из этого файла.
|
||||
|
||||
---
|
||||
|
||||
## 2026-01-31 @seo — SEO Strategy & Series Planning
|
||||
|
||||
**Action:** Keyword research + series structure decision
|
||||
|
||||
**Research completed:**
|
||||
- DataForSEO keyword research: ~$0.30 spent
|
||||
- 40+ keywords tested for volume and difficulty
|
||||
- Competitor analysis (dev.to articles about vibe coding anniversary)
|
||||
|
||||
**Key findings:**
|
||||
|
||||
| Keyword | Volume | Comp | Notes |
|
||||
|---------|--------|------|-------|
|
||||
| vibe coding | 90,500 | MED | Primary umbrella |
|
||||
| what is vibe coding | 40,500 | LOW | 🔥 Great ratio! |
|
||||
| vibe coding meaning | 9,900 | LOW | Informational |
|
||||
| vibecoding (слитно) | 2,900 | MED | Separate keyword! |
|
||||
| spec driven development | 1,300 | LOW | 🚀 Explosive growth |
|
||||
| how to vibe code | 1,000 | MED | Tutorial intent |
|
||||
| human in the loop ai | 880 | LOW | Part 3 target |
|
||||
| ai pair programming | 720 | LOW | Part 2 target |
|
||||
| enterprise vibe coding | 30→110 | MED | 🚀 Growing! |
|
||||
| vibe coding vs agentic coding | 50→90 | LOW | 🚀 Growing! |
|
||||
| beyond vibe coding | 110 | MED | Series branding |
|
||||
|
||||
**Strategic decisions:**
|
||||
|
||||
1. **Split into 3 parts** — dev.to series "Beyond Vibe Coding"
|
||||
- More "activity signals" for account warmup
|
||||
- Each part targets different keywords
|
||||
- 5 min read time optimal for dev.to engagement
|
||||
|
||||
2. **Titles:**
|
||||
- Part 1: `What Is Vibe Coding in 2026? One Year From Karpathy's Tweet`
|
||||
- Part 2: `AI Pair Programming vs Agentic Coding: Two Extremes of Vibe Coding`
|
||||
- Part 3: TBD (human in the loop ai + enterprise angle)
|
||||
|
||||
3. **Publication timing:**
|
||||
- Part 1: Monday Feb 2, 2026 (exact anniversary!)
|
||||
- Parts 2-3: 4-5 days apart
|
||||
|
||||
4. **Anniversary hook:** Feb 2, 2026 = exactly 1 year from Karpathy's tweet
|
||||
- News value for first-day engagement
|
||||
- Potential for social amplification
|
||||
|
||||
**Competitor landscape:**
|
||||
- "The Vibe Coding Hangover" already on dev.to (negative angle)
|
||||
- Our differentiation: practical methods survey + honest experience (positive)
|
||||
|
||||
**Changes made:**
|
||||
- Reformatted intro paragraph for SEO (first 100 words optimized)
|
||||
- Added "what is vibe coding" question hook
|
||||
|
||||
**SEO notes for text optimization:**
|
||||
|
||||
**Part 1:**
|
||||
- ✅ "spec-driven development" — вынести в первые 100 слов intro
|
||||
- ✅ "how to vibe code" — вставить органично в раздел vibe coding, например: "So how to vibe code properly? First, understand when it works..."
|
||||
- ✅ "vibecoding" (слитно) — упомянуть один раз в тексте для keyword coverage
|
||||
|
||||
**Part 2:**
|
||||
- "vibe coding vs agentic coding" — сделать H2 или использовать в тексте
|
||||
- "ai pair programming" — уже есть как название раздела
|
||||
|
||||
**Part 3:**
|
||||
- "human in the loop ai" — уже есть как раздел
|
||||
- "enterprise vibe coding" — добавить в контекст production/enterprise usage
|
||||
|
||||
**File structure decision:**
|
||||
- Keep all files in current folder
|
||||
- Split text into: text-p1.md, text-p2.md, text-p3.md
|
||||
|
||||
**For next agent:**
|
||||
- @writer: split text.md into 3 parts per structure above
|
||||
- Each part needs self-contained intro (brief context link to series)
|
||||
- Part 1 intro already optimized for SEO
|
||||
|
||||
---
|
||||
|
||||
## 2026-01-31 @seo — TDD Section Keyword Research
|
||||
|
||||
**Action:** Keyword research for TDD methodology naming
|
||||
|
||||
**Problem:** Official name "AI-aided test-first development" (Thoughtworks) has **zero** search volume. People don't search for it.
|
||||
|
||||
**Research completed:**
|
||||
- DataForSEO keyword research: ~$0.15 spent
|
||||
- Tested 30+ variations of TDD + AI naming
|
||||
|
||||
**Key findings:**
|
||||
|
||||
| Keyword | Volume | Comp | Notes |
|
||||
|---------|--------|------|-------|
|
||||
| tdd | 6,600 | LOW | 🔥 Parent keyword |
|
||||
| test driven development | 3,600 | LOW | Core term, KD 41 |
|
||||
| red green refactor | 260 | LOW | TDD cycle, Kent Beck |
|
||||
| test driven development python | 210 | LOW | Language-specific |
|
||||
| test first development | 70 | LOW | Synonym |
|
||||
| tdd example | 70 | LOW | Tutorial intent |
|
||||
| kent beck tdd | 50 | LOW | Authority signal |
|
||||
| **test driven development with ai** | 40 | HIGH | 🎯 Exact match! |
|
||||
| tdd vibe coding | 10 | MED | 🚀 Emerging! |
|
||||
| vibe coding tdd | 10 | LOW | 🚀 Emerging! |
|
||||
| ai-aided test-first development | 0 | — | ❌ Official but dead |
|
||||
| ai-assisted tdd | 0 | — | ❌ |
|
||||
| ai-enabled tdd | 0 | — | ❌ |
|
||||
|
||||
**Naming analysis:**
|
||||
|
||||
| Name | Source | Volume | Verdict |
|
||||
|------|--------|--------|--------|
|
||||
| AI-aided test-first development | Thoughtworks | 0 | ❌ SEO-dead |
|
||||
| Test-Driven Development with AI | Builder.io | 40 | ✅ Best match |
|
||||
| AI-assisted TDD | Industry | 0 | ❌ |
|
||||
| TDD + AI | Shorthand | — | ✅ Leverages parent |
|
||||
|
||||
**Decision:**
|
||||
|
||||
**H2:** `TDD + AI: Quality-First Approach`
|
||||
|
||||
Rationale:
|
||||
- "TDD" = 6,600 vol parent keyword (LOW competition)
|
||||
- "+AI" gives context
|
||||
- "Quality-First" contrasts with vibe coding
|
||||
- Short, scannable
|
||||
|
||||
**First paragraph template:**
|
||||
|
||||
> Test-driven development (TDD) — the red-green-refactor cycle that Kent Beck popularized — becomes a superpower when combined with AI coding. Unlike vibe coding where you trust the output, TDD makes tests your communication protocol with AI. Test first development forces clear specification before implementation...
|
||||
|
||||
**Keywords covered in first 100 words:**
|
||||
- ✅ test-driven development (3,600)
|
||||
- ✅ TDD (6,600)
|
||||
- ✅ red-green-refactor (260)
|
||||
- ✅ Kent Beck (50)
|
||||
- ✅ vibe coding (90,500) — contrast
|
||||
- ✅ test first development (70)
|
||||
|
||||
**Note:** "Quality-First Approach" has only 10 vol, but all alternatives ("test first approach", "quality first development") are 0-10 vol too. Keep it — good messaging that contrasts with vibe coding.
|
||||
|
||||
**Emerging opportunity:**
|
||||
"tdd vibe coding" and "vibe coding tdd" — 10 vol each but growing. People starting to search for TDD + vibe coding connection. Include both terms in text for future ranking.
|
||||
|
||||
**For @writer:**
|
||||
- Update TDD section H2 to `TDD + AI: Quality-First Approach`
|
||||
- Rewrite first paragraph using template above
|
||||
- Mention "vibe coding tdd" contrast organically
|
||||
|
||||
---
|
||||
|
||||
## 2026-01-31 @user - potential additions
|
||||
|
||||
**For @writer:**
|
||||
|
||||
1 Добавь еще TODO в индексный файл рассмотреть еще одну часть со следующими разделами:
|
||||
|
||||
- Architecture-First AI Development
|
||||
- Prompt-Driven Development
|
||||
- Copy-pasting from AI chatbot
|
||||
|
||||
2 Посмотри все md файлы в этой папке. Там добавились спеки по конкретным методологиям. добавь их в индекс и исопльзуй для расширения раздела credentials чтобы больше деталей дать. вставляй в текст статьи ссылки на пруфы и спеки компаний которые продвигают соотвествующие подходы.
|
||||
|
||||
---
|
||||
|
||||
## 2026-02-01 @writer
|
||||
|
||||
**Action:** Index updates per user request
|
||||
|
||||
**Changes made:**
|
||||
|
||||
1. **Updated main file** (`3-drafting/beyond-vibe-coding.md`):
|
||||
- Restructured Assets Index with sections: Core Files, Methodology Specs, Statistics & Research, Images
|
||||
- Added all 4 methodology spec files with key sources
|
||||
- Added ai-adoption-statistics.md reference
|
||||
- Added images folder reference with CDN URLs
|
||||
- Added text-rus.md reference
|
||||
- **Added TODO section for Part 4** (Architecture-First, Prompt-Driven, Copy-pasting)
|
||||
|
||||
2. **Updated research-index.md**:
|
||||
- Added Source Files section with all spec file references
|
||||
- Added statistics files references
|
||||
- Added TODO section for Part 4 methodologies research
|
||||
|
||||
**Pending from log-chat:**
|
||||
|
||||
| Task | Status | Notes |
|
||||
|------|--------|-------|
|
||||
| Part 4 TODO | ✅ Done | Added to main file + research-index |
|
||||
| Spec files in index | ✅ Done | All 4 specs referenced |
|
||||
| Expand credentials in text.md | ⏳ Pending | Needs discussion — significant work |
|
||||
| Insert images | ⏳ Waiting | User said "позже" |
|
||||
|
||||
**Question for @user:**
|
||||
Расширение credentials в text.md ссылками из спек-файлов — это большая работа. Хочешь чтобы я сейчас это сделал, или сначала закончим с картинками и публикацией Part 1?
|
||||
|
||||
**Files updated:**
|
||||
- `3-drafting/beyond-vibe-coding.md`
|
||||
- `assets/beyond-vibe-coding/research-index.md`
|
||||
- `assets/beyond-vibe-coding/log-chat.md` (this entry)
|
||||
|
||||
**Images status:**
|
||||
8 comic images ready on CDN (see `images/comic/cdn-urls.md`). Waiting for user instructions on insertion.
|
||||
|
||||
---
|
||||
|
||||
## 2026-02-01 @writer — Article Split & SEO Optimization
|
||||
|
||||
**Action:** Split article into 3 parts, SEO optimization, credentials expansion
|
||||
|
||||
**Changes made:**
|
||||
|
||||
1. **Created 3 separate files:**
|
||||
- `text-p1.md` — Intro + Vibe Coding + Spec-Driven Development (~1,400 words)
|
||||
- `text-p2.md` — AI Pair Programming + Agentic Coding + Ralph Loop (~1,200 words)
|
||||
- `text-p3.md` — Human-in-the-Loop + TDD + AI + Conclusion (~1,100 words)
|
||||
|
||||
2. **SEO optimizations (Part 1):**
|
||||
- "vibe coding" in title and first sentence
|
||||
- "what is vibe coding" question hook in first paragraph
|
||||
- Series intro added at top
|
||||
- Cross-links to other parts
|
||||
|
||||
3. **Expanded credentials from spec files:**
|
||||
- Vibe Coding: Added Collins Dictionary, Wikipedia, arXiv reference
|
||||
- Spec-Driven: Added Martin Fowler analysis, GitHub Blog official announcement, Tessl registry stats
|
||||
- Agentic Coding: Added arXiv papers with details (13 observations + 99 surveys), Ralph Loop full timeline, VentureBeat interview, economics ($10.42/hr)
|
||||
- AI Pair Programming: Added Microsoft Learn docs, Responsible AI practices, Cody by Sourcegraph, Claude Code positioning with output styles
|
||||
- TDD + AI: Added Kent Beck podcast quote, DORA Report 2025, Google Cloud analysis, 8th Light "Missing Protocol" article
|
||||
|
||||
4. **Moved comics deeper into sections:**
|
||||
- Before: Comics immediately under H2 headers
|
||||
- After: Comics after relevant paragraphs in "Henry's thoughts"
|
||||
- Example: "Karpathy tweet" comic now after "I use it often myself" paragraph
|
||||
|
||||
5. **Inserted user's comment:**
|
||||
- `[правда я все же использую клавиатуру для этого]` → "though I still use my keyboard for this"
|
||||
|
||||
**Files created:**
|
||||
- `assets/beyond-vibe-coding/text-p1.md`
|
||||
- `assets/beyond-vibe-coding/text-p2.md`
|
||||
- `assets/beyond-vibe-coding/text-p3.md`
|
||||
|
||||
**Pending:**
|
||||
- HITL comic still TODO (not in CDN yet)
|
||||
- text.md kept as reference (original monolithic version)
|
||||
|
||||
**Ready for publication:**
|
||||
- ✅ Part 1: Ready for Feb 2, 2026 (tomorrow!)
|
||||
- ✅ Part 2: Ready for Feb 6-7
|
||||
- ✅ Part 3: Ready for Feb 11-12 (needs HITL comic)
|
||||
|
||||
---
|
||||
|
||||
|
|
@ -1,519 +0,0 @@
|
|||
# Outline: Beyond Vibe Coding
|
||||
|
||||
**Article:** Beyond Vibe Coding: Professional AI Development Methodologies
|
||||
**Author:** henry-technical
|
||||
**Type:** Explainer / Survey
|
||||
**Target:** 2,800 words
|
||||
**Reading time:** ~11 minutes
|
||||
|
||||
---
|
||||
|
||||
## Article Structure Overview
|
||||
|
||||
**Hook:** Vibe coding = Collins Word of Year 2025, but it's insufficient for production work
|
||||
|
||||
**Core message:** Professional AI coding isn't just vibe coding — there's a spectrum of methodologies. Seniors use AI MORE than juniors, and methodology is what separates pros from beginners.
|
||||
|
||||
**Tone:** "Here's what exists and here's what I actually do" — landscape survey through practitioner's lens, not prescriptive guide
|
||||
|
||||
**Journey:** Entry point (vibe coding) → survey of professional approaches → personal experience → invitation to share
|
||||
|
||||
---
|
||||
|
||||
## Introduction (400 words)
|
||||
|
||||
**Goal:** Hook with vibe coding phenomenon, establish why the term is problematic, promise survey of professional alternatives
|
||||
|
||||
### Opening Hook (100 words)
|
||||
- Start with Collins Dictionary Word of Year 2025 announcement
|
||||
- Vibe coding caught mainstream attention — finally a term for "AI + prompting until it works"
|
||||
- Henry's take: "I remember when vibe coding meant something different. Now it's everywhere."
|
||||
- Relatable problem: works for prototypes, fails for production
|
||||
|
||||
### The Problem with "Vibe Coding" (150 words)
|
||||
- Term has negative connotations: unprofessional, unreliable, "toy for juniors"
|
||||
- But 76% of developers using AI tools (Stack Overflow 2024)
|
||||
- Real issue: term conflates ALL AI-assisted development into one bucket
|
||||
- Creates stigma: "Is using AI unprofessional?"
|
||||
- Deeper question developers face: "Can I use AI and still be a real engineer?"
|
||||
|
||||
### The Reality (150 words)
|
||||
- **Key stat:** Seniors (10+ years) use AI MORE than juniors
|
||||
- About a third of senior devs generate over half their code with AI
|
||||
- Only 13% of junior devs do the same — 2.5x difference
|
||||
- Professional AI usage ≠ junior with ChatGPT
|
||||
- Methodology separates pros from beginners
|
||||
- Promise: survey of 6 professional approaches + what I actually use
|
||||
|
||||
**Code/Visual:** None in intro
|
||||
**Transition:** "Let's look at what comes after vibe coding."
|
||||
|
||||
---
|
||||
|
||||
## Section 1: Vibe Coding (Baseline) (400 words)
|
||||
|
||||
**Goal:** Define vibe coding as entry point, establish it as valid for certain contexts, but insufficient for production
|
||||
|
||||
### Credentials Block (80 words)
|
||||
- **Name:** Vibe Coding
|
||||
- **Source:** Popularized by Andrej Karpathy (Feb 2025), Collins Dictionary
|
||||
- **Created by:** Community-coined term, formalized by Karpathy
|
||||
- **When:** 2024-2025, peaked December 2025
|
||||
- **Used by:** Indie developers, prototypers, early AI adopters
|
||||
- **Official definition:** Collins Dictionary: "A method of computer programming that relies heavily on artificial intelligence"
|
||||
|
||||
### What It Is (100 words)
|
||||
- Iterative prompting until code works
|
||||
- No upfront planning, minimal specification
|
||||
- Trust AI to handle details
|
||||
- Fix issues as they appear
|
||||
- Focus on outcome, not process
|
||||
|
||||
### When It Works (120 words)
|
||||
- Dev tools not going to production
|
||||
- Prototypes and experiments
|
||||
- Side projects with low stakes
|
||||
- Solo work with no handoff requirements
|
||||
- Henry's experience: "I've used this plenty. Works great for internal tools and weekend projects."
|
||||
|
||||
### The Catch (100 words)
|
||||
- Breaks down at scale
|
||||
- Hard to maintain or handoff
|
||||
- No documentation or structure
|
||||
- Quality inconsistent
|
||||
- Security concerns: Research shows 45-62% of AI-generated code contains security vulnerabilities [1][2][3]
|
||||
- Enterprise response: 27% of companies banned AI tools (Cisco 2024)
|
||||
|
||||
**Sources:**
|
||||
- [1] Georgetown CSET: "Cybersecurity Risks of AI-Generated Code" (Nov 2024)
|
||||
- [2] Veracode: "AI-Generated Code: A Double-Edged Sword" (Sept 2025)
|
||||
- [3] Industry reports (Oct 2025)
|
||||
|
||||
**Henry's take from interview:**
|
||||
"Vibe coding isn't wrong, it's context-dependent. I use it for dev tools. But for production? You need something more structured."
|
||||
|
||||
**Code example:** None — vibe coding is about LACK of structure
|
||||
|
||||
**Transition:** "So what do professionals use instead?"
|
||||
|
||||
---
|
||||
|
||||
## Section 2: Spec-Driven Development (450 words)
|
||||
|
||||
**Goal:** Present spec-driven as direct contrast to vibe coding — upfront planning, clear requirements, controlled execution
|
||||
|
||||
### Credentials Block (100 words)
|
||||
- **Name:** Spec-Driven Development (SDD)
|
||||
- **Source:** GitHub Spec Kit (github.com/github/spec-kit), GitHub Engineering Blog
|
||||
- **Created by:** GitHub Engineering Team, formalized by Martin Fowler
|
||||
- **When:** 2024-2025, emerged as one of 2025's key AI-assisted engineering practices (Thoughtworks)
|
||||
- **Used by:** GitHub Copilot Workspace, Claude Code users, enterprise teams
|
||||
- **Key tools launched:** AWS Kiro, GitHub Spec Kit, Tessl Framework
|
||||
|
||||
### What It Is (120 words)
|
||||
- Write detailed specification BEFORE code
|
||||
- Spec includes: requirements, architecture, API contracts, error handling, edge cases
|
||||
- AI executes against spec
|
||||
- Spec becomes living documentation (CLAUDE.md, .spec files)
|
||||
- Human focuses on WHAT, AI handles HOW
|
||||
- Spec often saved as `CLAUDE.md` or `.spec` files in project root
|
||||
|
||||
### How It Works (100 words)
|
||||
- Write spec in natural language or structured format
|
||||
- Include examples, constraints, acceptance criteria
|
||||
- Agent reads spec, generates code
|
||||
- Iterate on spec if needed, not just on code
|
||||
- Spec stays updated as project evolves
|
||||
|
||||
### When to Use (80 words)
|
||||
- Medium to high stakes projects
|
||||
- Code that needs handoff or maintenance
|
||||
- When requirements are clear
|
||||
- Enterprise/production code
|
||||
- Multi-developer projects
|
||||
|
||||
**Henry's perspective from interview (integrated naturally):**
|
||||
Time writing spec often exceeds time coding. I've spent half a day on specification, then watched Claude Code finish implementation in 20 minutes. Feels unfair, but the results are solid.
|
||||
|
||||
The spec becomes reference for future work — months later, new session starts with "read the spec, find the code."
|
||||
|
||||
**Challenge:** Specs drift from implementation. Architecture changes, paths rename, approaches shift. Keeping spec current = cognitive load. Solution: commit spec changes alongside code.
|
||||
|
||||
**Pro tip:** Use Claude Desktop for spec development, not just execution. Research, brainstorm, find architecture, THEN write spec. Much better than solo spec writing.
|
||||
|
||||
### Code Example (50 words + code block)
|
||||
Example CLAUDE.md snippet:
|
||||
|
||||
```markdown
|
||||
# Image Generation API Integration
|
||||
|
||||
## Requirements
|
||||
- Generate images via Banatie API
|
||||
- Cache results in database (URL + prompt hash)
|
||||
- Serve via CDN redirect pattern
|
||||
- Handle rate limits with exponential backoff
|
||||
|
||||
## API Contract
|
||||
POST /api/images/generate
|
||||
Body: { prompt: string, projectId: string }
|
||||
Returns: { imageUrl: string, cached: boolean }
|
||||
|
||||
## Error Handling
|
||||
- 429 Rate Limit → retry with backoff
|
||||
- 500 Server Error → fallback to placeholder
|
||||
- Invalid prompt → return validation error
|
||||
```
|
||||
|
||||
**Transition:** "Spec-driven gives you control. But what if you want even MORE automation?"
|
||||
|
||||
---
|
||||
|
||||
## Section 3: Agentic Coding + Ralph Loop (500 words)
|
||||
|
||||
**Goal:** Present agentic coding as high-autonomy approach, introduce Ralph Loop as controversial extreme
|
||||
|
||||
### Credentials Block (100 words)
|
||||
- **Name:** Agentic Coding (+ Ralph Loop variant)
|
||||
- **Source:** arXiv 2508.11126 (Aug 2025), arXiv 2512.14012 (Dec 2025)
|
||||
- **Created by:** Research community (agentic coding), Geoffrey Huntley (Ralph Loop, May 2025)
|
||||
- **When:** 2024-2025, Ralph Loop went viral Jan 2026
|
||||
- **Used by:** Claude Code, experimental workflows, research projects
|
||||
- **Tools:** Claude Code, Cursor Composer, GitHub Copilot Workspace (agent modes)
|
||||
|
||||
### What It Is (120 words)
|
||||
- Agent operates with high degree of autonomy
|
||||
- Human sets high-level goals, agent figures out implementation
|
||||
- Agent can plan, execute, debug, iterate without constant approval
|
||||
- Differs from vibe coding: systematic, can course-correct
|
||||
- Ralph Loop extreme: 14-hour autonomous sessions (Geoffrey Huntley)
|
||||
|
||||
### Agentic vs Vibe Coding (80 words)
|
||||
- Vibe: reactive prompting, no plan
|
||||
- Agentic: agent creates plan, executes systematically
|
||||
- Both involve iteration, but agentic = structured iteration
|
||||
- Agent can debug itself, vibe coding requires human debugging
|
||||
|
||||
### Ralph Loop (120 words)
|
||||
- Named after Ralph Wiggum (Simpsons character)
|
||||
- Concept: give agent task, walk away, return to finished work
|
||||
- VentureBeat: "How Ralph Wiggum went from Simpsons to AI" (Jan 2026)
|
||||
- Anthropic released official `ralph-wiggum` plugin by Boris Cherny
|
||||
- Controversial: works for some, mystifying for others
|
||||
- Search volume: 10/month but 140 in December 2025 (trending)
|
||||
|
||||
**Henry's honest take from interview:**
|
||||
I want to believe in Ralph Loop. The idea of 14-hour autonomous sessions sounds amazing. But here's my question: what tasks justify that much autonomous work?
|
||||
|
||||
Writing a detailed spec takes me longer than executing it. If Claude Code finishes in 20 minutes, why would I need 14 hours of autonomy?
|
||||
|
||||
I'm skeptical about use cases in my projects. Maybe it works for certain domains — large refactors, extensive testing, documentation generation?
|
||||
|
||||
If you've found great Ralph Loop applications, share in comments. Genuinely curious.
|
||||
|
||||
### Permissions Reality Check (100 words)
|
||||
- Agentic coding hits permissions wall
|
||||
- Claude Code asking approval for every file write, API call, terminal command
|
||||
- Breaks flow, defeats autonomy promise
|
||||
- Henry's workaround: "I ask Claude to add all MCP tools to `.claude/settings.json` proactively"
|
||||
- Sometimes runs `--dangerously-skip-permissions` but monitors activity
|
||||
- "Nothing git reset can't fix"
|
||||
- This is evolving UX challenge tools are still figuring out
|
||||
|
||||
**Code example:** `.claude/settings.json` permissions snippet
|
||||
|
||||
```json
|
||||
{
|
||||
"allow_all_mcp_tools": true,
|
||||
"filesystem_write": ["src/**", "tests/**"],
|
||||
"terminal_commands": ["npm", "git", "pytest"]
|
||||
}
|
||||
```
|
||||
|
||||
**Transition:** "High autonomy is one approach. But what about working WITH the AI, not just delegating TO it?"
|
||||
|
||||
---
|
||||
|
||||
## Section 4: AI Pair Programming (400 words)
|
||||
|
||||
**Goal:** Present pair programming paradigm — collaboration, not just delegation
|
||||
|
||||
### Credentials Block (100 words)
|
||||
- **Name:** AI Pair Programming
|
||||
- **Source:** GitHub official docs, Microsoft Learn
|
||||
- **Created by:** GitHub (Copilot team), popularized by Copilot marketing
|
||||
- **When:** 2021-present, evolved from "AI autocomplete" to "pair programmer"
|
||||
- **Used by:** GitHub Copilot, Cursor, Windsurf
|
||||
- **Official tagline:** GitHub Copilot = "Your AI pair programmer"
|
||||
|
||||
### What It Is (100 words)
|
||||
- AI as collaborative partner, not just tool
|
||||
- Continuous suggestions during coding
|
||||
- Context-aware completions
|
||||
- Real-time feedback and alternatives
|
||||
- More than autocomplete: understands project context
|
||||
- 720 vol/month for "ai pair programming" (KD 50)
|
||||
|
||||
### The Reality: Autocomplete ≠ Pair Programming (150 words)
|
||||
|
||||
**Henry's honest experience from interview:**
|
||||
I've tried AI autocomplete multiple times. Each time, I ended up disabling it completely.
|
||||
|
||||
Why? When I'm writing code, I've already mentally worked out what I want. The AI suggesting my next line just interrupts my thought process. Standard IDE completions always worked fine for me.
|
||||
|
||||
I know many developers love it. Just doesn't fit my workflow.
|
||||
|
||||
**Real pair programming:** Claude Desktop with good system instructions + Filesystem MCP to read actual project files. That's when I feel like I'm working WITH someone who understands my problem and helps solve it.
|
||||
|
||||
Autocomplete is reactive. Real pair programming is proactive — discussion, exploration, questioning assumptions.
|
||||
|
||||
### When It Works (50 words)
|
||||
- Boilerplate reduction
|
||||
- Learning new APIs (seeing examples in context)
|
||||
- Pattern matching across codebase
|
||||
- Repetitive tasks (tests, type definitions)
|
||||
- When developer is receptive to interruptions
|
||||
|
||||
**Stats:**
|
||||
- 56% faster task completion (GitHub study)
|
||||
- 126% more projects per week for Copilot users
|
||||
- But: experienced devs sometimes 19% SLOWER (METR study)
|
||||
- Effectiveness varies wildly by task type
|
||||
|
||||
**Transition:** "Whether you delegate or collaborate, one question remains: how much oversight?"
|
||||
|
||||
---
|
||||
|
||||
## Section 5: Human-in-the-Loop (HITL) (400 words)
|
||||
|
||||
**Goal:** Present HITL as balance between autonomy and control — strategic checkpoints
|
||||
|
||||
### Credentials Block (100 words)
|
||||
- **Name:** Human-in-the-Loop (HITL)
|
||||
- **Source:** Atlassian Research (HULA framework), Google Cloud AI docs
|
||||
- **Created by:** Atlassian Engineering, formalized in ICSE 2025 paper
|
||||
- **When:** 2024-2025 (academic formalization)
|
||||
- **Used by:** Enterprise AI systems, Claude Code Planning Mode
|
||||
- **Key paper:** arXiv 2411.12924 "HULA: Human-Understanding Large Language Model Agents"
|
||||
|
||||
### What It Is (100 words)
|
||||
- AI operates autonomously BETWEEN checkpoints
|
||||
- Human approves key decisions, reviews output
|
||||
- Not constant supervision, strategic oversight
|
||||
- Agent proposes approach, human confirms direction
|
||||
- Balance: automation + control
|
||||
|
||||
### Permissions ≠ HITL (120 words)
|
||||
|
||||
**Henry's take from interview:**
|
||||
Permissions aren't HITL. They're too low-level — "can I write this file?" tells me nothing about what the agent is actually solving.
|
||||
|
||||
Real HITL is Planning Mode. Agent shows plan: "here's what I'll do, these files will change, expected outcome." That's decision-level control.
|
||||
|
||||
The problem: current agents don't understand WHEN to stop and ask. Rarely hits the right moment. Either too much autonomy (goes off track) or too many interruptions (breaks flow).
|
||||
|
||||
Future improvement: agents that know when they're uncertain and should consult human. Like "I don't know" responses — current models aren't good at this.
|
||||
|
||||
### Planning Mode as HITL (80 words)
|
||||
- Claude Code: Planning Mode = default for non-trivial tasks
|
||||
- See full plan before execution
|
||||
- Approve, modify, or reject
|
||||
- Agent executes autonomously after approval
|
||||
- Check results at end
|
||||
|
||||
### When to Use (100 words)
|
||||
- Production code with moderate complexity
|
||||
- When outcome matters but speed also matters
|
||||
- Team environments (others will review)
|
||||
- Learning new approaches (see agent's reasoning)
|
||||
- Medium stakes: not prototype (vibe), not critical infrastructure (TDD)
|
||||
|
||||
**Code example:** None — HITL is process, not code pattern
|
||||
|
||||
**Transition:** "What about the highest stakes code, where bugs are expensive?"
|
||||
|
||||
---
|
||||
|
||||
## Section 6: TDD + AI (450 words)
|
||||
|
||||
**Goal:** Present TDD as quality-first approach — tests as specification and safety net
|
||||
|
||||
### Credentials Block (100 words)
|
||||
- **Name:** Test-Driven Development with AI (TDD + AI)
|
||||
- **Source:** Qodo.ai blog, Builder.io guide, GitHub Blog
|
||||
- **Created by:** Adapted from traditional TDD (Kent Beck), modernized for AI era
|
||||
- **When:** 2024-2025 (AI-specific implementations)
|
||||
- **Used by:** Quality-focused teams, enterprise production code
|
||||
- **Key article:** "TDD with GitHub Copilot" (GitHub Blog, May 2025)
|
||||
|
||||
### What It Is (120 words)
|
||||
- Write tests BEFORE implementation (classic TDD)
|
||||
- AI generates code to pass tests
|
||||
- Tests = executable specification
|
||||
- Red → Green → Refactor cycle with AI
|
||||
- Tests catch AI mistakes automatically
|
||||
- Tests provide verification without human review of every line
|
||||
|
||||
### Tests as Specification (100 words)
|
||||
|
||||
**Henry's perspective from interview:**
|
||||
Tests are absolutely important for key functionality. I always instruct agents to run tests.
|
||||
|
||||
But here's the thing: writing comprehensive tests upfront + detailed spec = that's already 80% of the work. If you've written that much structure, is the AI really saving time?
|
||||
|
||||
Most valuable when you have existing spec that converts to tests — like API documentation. Then yes, tests-first makes perfect sense.
|
||||
|
||||
### The Guardrails Approach (120 words)
|
||||
- Tests = safety boundaries for agent
|
||||
- Agent can iterate freely within test constraints
|
||||
- No need to review every implementation detail
|
||||
- Just verify: tests pass, coverage maintained
|
||||
- Especially valuable for agentic coding
|
||||
|
||||
**Critical warning from interview:**
|
||||
AI-written tests need human review. I've seen agents write "passing" tests using mocked requests — test passes, code is broken.
|
||||
|
||||
Correct tests = solid foundation. Bad tests = false confidence that destroys future work.
|
||||
|
||||
Tests verify behavior, not just syntax. Make sure test logic is sound before trusting it.
|
||||
|
||||
### When to Use (110 words)
|
||||
- High-stakes production code
|
||||
- APIs and integrations (clear contracts)
|
||||
- Security-critical functions
|
||||
- Code with compliance requirements
|
||||
- Refactoring (tests ensure behavior preserved)
|
||||
- When you need confidence in AI output
|
||||
|
||||
**Code Example:**
|
||||
Simple TDD example:
|
||||
|
||||
```typescript
|
||||
// 1. Write test first
|
||||
describe('generateImage', () => {
|
||||
it('caches results for duplicate prompts', async () => {
|
||||
const result1 = await generateImage({ prompt: 'cat' });
|
||||
const result2 = await generateImage({ prompt: 'cat' });
|
||||
|
||||
expect(result2.cached).toBe(true);
|
||||
expect(result1.imageUrl).toBe(result2.imageUrl);
|
||||
});
|
||||
});
|
||||
|
||||
// 2. Agent implements to pass test
|
||||
// 3. Refactor with confidence
|
||||
```
|
||||
|
||||
**Transition:** "Six approaches. What ties them together?"
|
||||
|
||||
---
|
||||
|
||||
## Conclusion (450 words)
|
||||
|
||||
**Goal:** Wrap up landscape survey, reinforce progression from vibe to professional approaches, validate AI usage, invite community sharing
|
||||
|
||||
### The Landscape Exists (120 words)
|
||||
|
||||
So that's what exists beyond vibe coding.
|
||||
|
||||
Six methodologies, each with serious foundation — GitHub Spec Kit, academic papers, enterprise adoption. Not random hacks or Twitter trends. Real approaches with real backing.
|
||||
|
||||
Vibe coding caught mainstream attention because it resonated. Everyone who's used ChatGPT to debug something recognizes that feeling of "just prompt until it works." But it's the entry point, not the destination.
|
||||
|
||||
The landscape is richer than "vibe vs not vibe." Spec-driven for structure. Agentic for autonomy. Pair programming for collaboration. HITL for control. TDD for quality. Different tools for different contexts.
|
||||
|
||||
And it's still evolving. Ralph Loop emerged last year. Planning Mode is new. These methodologies will keep developing as AI tools mature.
|
||||
|
||||
### The Legitimacy Question (120 words)
|
||||
|
||||
Back to the underlying question: "Is using AI unprofessional?"
|
||||
|
||||
No. The data says otherwise:
|
||||
- **76% of developers** are using or planning to use AI tools
|
||||
- **About a third of senior developers** (10+ years experience) generate over half their code with AI
|
||||
- Only **13% of junior developers** do the same — that's a 2.5x difference
|
||||
|
||||
Professionals use AI MORE than beginners, not less. Google writes 25% of their code with AI. Major companies have adopted AI coding tools across their engineering organizations. That's not unprofessional. That's the new normal.
|
||||
|
||||
But HOW you use it matters. Vibe coding for production systems isn't professional. Spec-driven with tests and review? Absolutely professional.
|
||||
|
||||
### What Makes It Professional (100 words)
|
||||
|
||||
The difference isn't the tool. It's the approach:
|
||||
|
||||
- Clear requirements (spec, tests, or planning phase)
|
||||
- Appropriate oversight (human review, HITL, verification)
|
||||
- Quality controls (tests, linting, security scans)
|
||||
- Maintainability (documentation, handoff-ready structure)
|
||||
- Context awareness (knowing when vibe coding isn't enough)
|
||||
|
||||
Seniors achieve 2.5x more value from the same AI tools because they apply methodology, not better prompts. That's the skill that matters.
|
||||
|
||||
Professional AI coding means choosing the right approach for the stakes. Weekend prototype? Vibe away. Production payment system? Tests first, spec-driven, reviewed.
|
||||
|
||||
### What I Actually Use (110 words)
|
||||
|
||||
Here's what works for me:
|
||||
|
||||
- Dev tools and experiments: vibe coding works fine
|
||||
- Production features: spec-driven with Planning Mode
|
||||
- Critical systems: TDD + extensive review
|
||||
- Research and exploration: Claude Desktop as true pair programmer
|
||||
|
||||
Your context might be different. Your choices might be different. That's fine.
|
||||
|
||||
The point isn't to follow my exact workflow. The point is knowing that choices exist beyond vibe coding, and understanding what each methodology offers.
|
||||
|
||||
If you're doing something different — different tools, different approaches, different combinations — share your wins in the comments. What approaches are working for you as an engineer?
|
||||
|
||||
**Closing:**
|
||||
This is what exists. This is what I use. Go see what works for you.
|
||||
|
||||
---
|
||||
|
||||
## Code Examples Summary
|
||||
|
||||
| Section | Code Type | Purpose |
|
||||
|---------|-----------|---------|
|
||||
| Spec-Driven | CLAUDE.md example | Show spec format |
|
||||
| Agentic | .claude/settings.json | Permissions config |
|
||||
| TDD | TypeScript test + impl | Test-first workflow |
|
||||
|
||||
**Total code blocks:** 3
|
||||
**Code-to-prose ratio:** ~15% (appropriate for explainer/survey)
|
||||
|
||||
---
|
||||
|
||||
## Visual Assets Needed
|
||||
|
||||
| Asset | Type | Description | Section |
|
||||
|-------|------|-------------|---------|
|
||||
| Hero image | Abstract | Spectrum visualization — vibe to professional methodologies | Top |
|
||||
| Stats callout | Infographic | Key stats visualization | Introduction |
|
||||
|
||||
---
|
||||
|
||||
## SEO Notes
|
||||
|
||||
**Primary keyword placement:**
|
||||
- "ai coding methodologies" in H1, intro (2x), conclusion
|
||||
- Natural integration, never forced
|
||||
|
||||
**Secondary keywords:**
|
||||
- "spec driven development" in H2, section content
|
||||
- "ai pair programming" in H2, section content
|
||||
- "human in the loop ai" in H2, section content
|
||||
- "ralph loop" in H2, agentic section
|
||||
|
||||
**Internal linking opportunities:**
|
||||
- Link to Banatie docs (if relevant to image generation in examples)
|
||||
- Link to author's other AI development content
|
||||
|
||||
**Halo keywords (tool mentions):**
|
||||
- Claude Code, Cursor, GitHub Copilot throughout
|
||||
- Natural mentions, not forced for SEO
|
||||
|
||||
---
|
||||
|
||||
*Outline created: 2026-01-23*
|
||||
*Status: Validation complete, ready for @writer*
|
||||
*Revisions: Removed false claims (359x growth, 90% Fortune 100), added source citations for security vulnerabilities, updated senior developer stat to "about a third"*
|
||||
|
|
@ -1,208 +0,0 @@
|
|||
# Research Index
|
||||
|
||||
Working file for methodology clustering and source verification.
|
||||
|
||||
## Source Files
|
||||
|
||||
**Original research:**
|
||||
- `perplexity-terminology-research.md` — original Perplexity research (Jan 2025)
|
||||
|
||||
**Methodology Specs (detailed research per methodology):**
|
||||
- [spec-driven-dev.md](spec-driven-dev.md) — GitHub Spec Kit, AWS Kiro, Tessl, Thoughtworks
|
||||
- [agentic-coding.md](agentic-coding.md) — arXiv papers, Ralph Loop, Geoffrey Huntley, Cursor 2.0
|
||||
- [ai-pair-programming.md](ai-pair-programming.md) — GitHub Copilot official, Microsoft Learn, Cursor
|
||||
- [ai-aided-test-first.md](ai-aided-test-first.md) — Thoughtworks Radar, Kent Beck, DORA Report 2025
|
||||
|
||||
**Statistics:**
|
||||
- [ai-usage-statistics.md](ai-usage-statistics.md) — AI adoption by seniority, company policies, security (35+ sources)
|
||||
- [ai-adoption-statistics.md](ai-adoption-statistics.md) — LaTeX-formatted data for infographics (2024-2026)
|
||||
|
||||
---
|
||||
|
||||
## Methodology Selection
|
||||
|
||||
Based on research, selecting approaches for article coverage.
|
||||
|
||||
### Tier 1: MUST Include (High Authority + Reader Interest)
|
||||
|
||||
| Term | Why Include | Source Authority | Status |
|
||||
|------|-------------|------------------|--------|
|
||||
| **Vibe Coding** | Hook — Collins Word of Year 2025, everyone knows it | Wikipedia, Collins, Karpathy | ✅ verified |
|
||||
| **Spec-Driven Development** | Direct contrast to vibe coding, GitHub Spec Kit | GitHub, ThoughtWorks, Martin Fowler | ✅ verified |
|
||||
| **Agentic Coding** | Hot topic, Claude Code users care about this | arXiv surveys (Aug 2025, Dec 2025) | ✅ verified |
|
||||
| **Ralph Loop** | HOT Jan 2026, max autonomy extreme | VentureBeat, Geoffrey Huntley, Anthropic plugin | ✅ verified |
|
||||
|
||||
### Tier 2: SHOULD Include (Good Authority, Practical)
|
||||
|
||||
| Term | Why Include | Source Authority | Status |
|
||||
|------|-------------|------------------|--------|
|
||||
| **AI Pair Programming** | Relatable, Copilot users know this | GitHub official, Microsoft | ✅ verified |
|
||||
| **Human-in-the-Loop (HITL)** | Important concept, enterprise backing | Atlassian HULA, Google Cloud | ✅ verified |
|
||||
| **TDD + AI** | Quality angle, contrasts vibe coding | Qodo.ai, Builder.io, GitHub Blog | ✅ verified |
|
||||
|
||||
### Tier 3: MENTION Briefly (Context/Completeness)
|
||||
|
||||
| Term | Why | Status |
|
||||
|------|-----|--------|
|
||||
| AI-DLC (AWS) | Enterprise angle, shows maturity | ⏳ verify if needed |
|
||||
| Architecture-First | 2026 trend prediction | ⏳ verify if needed |
|
||||
| Prompt-Driven Development | Clarify difference from vibe coding | ⏳ verify if needed |
|
||||
|
||||
### Tier 4: SKIP (Generic/Low Value for This Article)
|
||||
|
||||
- AI-Assisted Coding — umbrella term, too generic
|
||||
- AI-Augmented Development — same, no distinct methodology
|
||||
- Copilot-Driven Development — subset of AI Pair Programming
|
||||
- Conversational Coding — too vague
|
||||
- Ensemble Programming — too niche
|
||||
- Deterministic AI Development — enterprise-specific, niche
|
||||
|
||||
---
|
||||
|
||||
## Article Angle
|
||||
|
||||
**Narrative arc:**
|
||||
1. Vibe coding = entry point (hot topic, word of year)
|
||||
2. Problem: works for prototypes, fails for production
|
||||
3. Survey: what professionals actually use
|
||||
4. Each methodology = one section with:
|
||||
- What it is (1-2 sentences)
|
||||
- When to use
|
||||
- Henry's take (from interview)
|
||||
5. Conclusion: spectrum from vibe → spec-driven, choose based on stakes
|
||||
|
||||
---
|
||||
|
||||
## Verified Sources
|
||||
|
||||
### Vibe Coding
|
||||
| Source | URL | Notes |
|
||||
|--------|-----|-------|
|
||||
| Wikipedia | en.wikipedia.org/wiki/Vibe_coding | Comprehensive, cites Karpathy |
|
||||
| Collins Dictionary | collinsdictionary.com | Word of Year 2025 |
|
||||
| Andrej Karpathy | X/Twitter Feb 2025 | Original coiner |
|
||||
|
||||
### Spec-Driven Development
|
||||
| Source | URL | Notes |
|
||||
|--------|-----|-------|
|
||||
| GitHub Spec Kit | github.com/github/spec-kit | Official repo |
|
||||
| ThoughtWorks Radar | thoughtworks.com/radar | SDD in techniques |
|
||||
| Martin Fowler | martinfowler.com | SDD tools analysis |
|
||||
|
||||
### Agentic Coding
|
||||
| Source | URL | Notes |
|
||||
|--------|-----|-------|
|
||||
| arXiv 2508.11126 | arxiv.org/abs/2508.11126 | "AI Agentic Programming" survey (Aug 2025) |
|
||||
| arXiv 2512.07921 | arxiv.org/abs/2512.07921 | "DeepCode: Open Agentic Coding" (Dec 2025) |
|
||||
| arXiv 2512.14012 | arxiv.org/html/2512.14012 | "Professional Developers Don't Vibe, They Control" |
|
||||
|
||||
### Ralph Loop (Ralph Wiggum)
|
||||
| Source | URL | Notes |
|
||||
|--------|-----|-------|
|
||||
| Geoffrey Huntley | ghuntley.com/ralph/ | Original author (May 2025) |
|
||||
| VentureBeat | venturebeat.com | "How Ralph Wiggum went from Simpsons to AI" (Jan 2026) |
|
||||
| GitHub (snarktank) | github.com/snarktank/ralph | Original repo |
|
||||
| GitHub (Anthropic) | claude.ai | Official ralph-wiggum plugin by Boris Cherny |
|
||||
| Dev.to | dev.to | "2026 - Year of Ralph Loop Agent" |
|
||||
|
||||
### AI Pair Programming
|
||||
| Source | URL | Notes |
|
||||
|--------|-----|-------|
|
||||
| GitHub Features | github.com/features/copilot | "Your AI pair programmer" |
|
||||
| GitHub Blog | github.blog (June 2025) | "From pair to peer programmer" |
|
||||
| Microsoft Visual Studio | visualstudio.microsoft.com/github-copilot | Official docs |
|
||||
| Microsoft Learn | learn.microsoft.com/training/paths/copilot | Training courses |
|
||||
|
||||
### Human-in-the-Loop
|
||||
| Source | URL | Notes |
|
||||
|--------|-----|-------|
|
||||
| Atlassian HULA | atlassian.com/blog/atlassian-engineering/hula-blog | ICSE 2025 paper |
|
||||
| arXiv 2411.12924 | arxiv.org/abs/2411.12924 | HULA framework paper |
|
||||
| Google Cloud | cloud.google.com/discover/human-in-the-loop | Official definition |
|
||||
| Google Developers Blog | developers.googleblog.com | A2A protocol with HITL |
|
||||
|
||||
### TDD + AI
|
||||
| Source | URL | Notes |
|
||||
|--------|-----|-------|
|
||||
| Qodo.ai | qodo.ai/blog/ai-code-assistants-test-driven-development | AWS SA author |
|
||||
| Builder.io | builder.io/blog/test-driven-development-ai | Practical guide |
|
||||
| GitHub Blog | github.blog (May 2025) | "TDD with GitHub Copilot" |
|
||||
| Substack | ecosystem4engineering.substack.com | "TDD in Age of AI" |
|
||||
|
||||
---
|
||||
|
||||
## Interview Questions
|
||||
|
||||
Questions for Oleg to capture Henry's authentic voice:
|
||||
|
||||
### Vibe Coding
|
||||
- Ты когда-нибудь работал в режиме vibe coding? Что получилось?
|
||||
- Когда это уместно, когда нет?
|
||||
|
||||
### Spec-Driven Development
|
||||
- Пробовал GitHub Spec Kit или писать spec перед кодом?
|
||||
- Твоё мнение — это будущее или overkill?
|
||||
|
||||
### Agentic Coding
|
||||
- Как используешь Claude Code? Даёшь автономию или контролируешь каждый шаг?
|
||||
- Что доверяешь агенту, что нет?
|
||||
|
||||
### AI Pair Programming
|
||||
- Copilot/Cursor — как pair programmer или просто autocomplete?
|
||||
- Реально ли это "парное программирование"?
|
||||
|
||||
### Human-in-the-Loop
|
||||
- Как часто AI делает что-то без твоего одобрения?
|
||||
- Где граница доверия?
|
||||
|
||||
### TDD + AI
|
||||
- Пишешь тесты первыми при работе с AI?
|
||||
- Работает ли классический TDD с генеративным AI?
|
||||
|
||||
---
|
||||
|
||||
## Henry's Opinions (from interview)
|
||||
|
||||
*Will be filled during interview session*
|
||||
|
||||
### On Vibe Coding
|
||||
[pending]
|
||||
|
||||
### On Spec-Driven Development
|
||||
[pending]
|
||||
|
||||
### On Agentic Coding
|
||||
[pending]
|
||||
|
||||
### On AI Pair Programming
|
||||
[pending]
|
||||
|
||||
### On Human-in-the-Loop
|
||||
[pending]
|
||||
|
||||
### On TDD + AI
|
||||
[pending]
|
||||
|
||||
---
|
||||
|
||||
*Updated: 2026-01-22*
|
||||
|
||||
---
|
||||
|
||||
## TODO: Part 4 Methodologies
|
||||
|
||||
Potential additions for a fourth part of the series:
|
||||
|
||||
| Methodology | Research Status | Notes |
|
||||
|-------------|-----------------|-------|
|
||||
| Architecture-First AI Development | ⏳ Not started | Enterprise/complex systems angle |
|
||||
| Prompt-Driven Development | ⏳ Not started | Bridges vibe coding and spec-driven |
|
||||
| Copy-pasting from AI chatbot | ⏳ Not started | Baseline methodology, most developers start here |
|
||||
|
||||
**Research needed:**
|
||||
- Source verification for each methodology
|
||||
- Interview Oleg for Henry's perspective
|
||||
- Keyword research for SEO value
|
||||
- Assess if Part 4 justified by volume/interest
|
||||
|
||||
See main file `3-drafting/beyond-vibe-coding.md` for strategic context.
|
||||
|
|
@ -1,16 +0,0 @@
|
|||
# SEO Metadata
|
||||
|
||||
*pending — will be created after keyword research*
|
||||
|
||||
## Title
|
||||
[TBD]
|
||||
|
||||
## Meta Description
|
||||
[TBD]
|
||||
|
||||
## Target Keywords
|
||||
- Primary: [TBD]
|
||||
- Secondary: [TBD]
|
||||
|
||||
## URL Slug
|
||||
beyond-vibe-coding
|
||||
|
|
@ -1,81 +0,0 @@
|
|||
# Spec-Driven Development
|
||||
|
||||
## Определение
|
||||
|
||||
**Spec-Driven Development (SDD)** — методология разработки с AI-агентами, основанная на создании формальных спецификаций перед имплементацией: Requirements → Design → Tasks → Implementation.
|
||||
|
||||
---
|
||||
|
||||
## Формализация подхода
|
||||
|
||||
### GitHub Engineering
|
||||
- **Сентябрь 2025**: GitHub опубликовал [GitHub Spec Kit](https://github.com/github/spec-kit) как open-source toolkit (MIT лицензия)
|
||||
- **Официальный анонс**: ["Spec-driven development with AI: Get started with a new open source toolkit"](https://github.blog/ai-and-ml/generative-ai/spec-driven-development-with-ai-get-started-with-a-new-open-source-toolkit/) (1 сентября 2025)
|
||||
|
||||
### Thoughtworks Technology Radar
|
||||
- **Volume 33** (ноябрь 2025): упоминает "AI coding workflows" и новые практики context engineering в контексте эволюции AI-ассистированной разработки
|
||||
- Источник: [Thoughtworks Technology Radar](https://www.thoughtworks.com/en-us/radar)
|
||||
|
||||
---
|
||||
|
||||
## Профессиональные инструменты
|
||||
|
||||
### Production-Ready
|
||||
|
||||
| Инструмент | Статус | Описание |
|
||||
|-----------|--------|----------|
|
||||
| **[AWS Kiro](https://aws.amazon.com/startups/prompt-library/kiro-project-init)** | Public Preview (июль 2025) | Spec-driven IDE на базе VS Code |
|
||||
| **[GitHub Spec Kit](https://github.com/github/spec-kit)** | Open Source (сентябрь 2025) | Toolkit с командами `/speckit.*`, MIT лицензия |
|
||||
| **[Tessl Framework](https://tessl.io/blog/tessl-launches-spec-driven-framework-and-registry/)** | Closed Beta (сентябрь 2025) | Spec-as-source с Registry (10,000+ specs) |
|
||||
|
||||
### Community-Driven
|
||||
|
||||
- **[BMAD Method](https://recruit.group.gmo/engineer/jisedai/blog/the-bmad-method-a-framework-for-spec-oriented-ai-driven-development/)**: 21 специализированный агент, open-source
|
||||
- **[OpenSpec](https://mcpmarket.com/server/openspec)**: lightweight CLI, brownfield-first подход
|
||||
- **[APM](https://www.reddit.com/r/PromptEngineering/comments/1ne5onl/apm_v04_taking_specdriven_development_to_the_next/)**: Agentic Project Management, multi-agent система (май 2025)
|
||||
|
||||
---
|
||||
|
||||
## Интеграция с Claude Code
|
||||
|
||||
✅ **Полностью поддерживается**
|
||||
|
||||
- GitHub Spec Kit [официально поддерживает Claude Code](https://intuitionlabs.ai/articles/spec-driven-development-spec-kit)
|
||||
- Tessl Framework [интегрируется через MCP](https://tessl.io/blog/spec-driven-dev-with-claude-code/)
|
||||
- [Пользовательские workflows](https://www.reddit.com/r/ClaudeCode/comments/1m5k6ka/i_built_a_specdriven_development_workflow_for/) активно используются комьюнити
|
||||
- Custom slash commands для spec-driven разработки
|
||||
|
||||
---
|
||||
|
||||
## Минимальный подход без фреймворков
|
||||
|
||||
SDD — это **методология**, а не инструмент. Можно следовать принципу без установки фреймворков:
|
||||
|
||||
```
|
||||
/specs/
|
||||
├── REQUIREMENTS.md # Что строим, user stories, acceptance criteria
|
||||
├── DESIGN.md # Архитектура, tech stack, data models, API contracts
|
||||
└── TASKS.md # Разбивка на задачи, последовательность, критерии
|
||||
```
|
||||
|
||||
**Рабочий процесс:**
|
||||
1. Создай спецификации в `/specs/`
|
||||
2. Попроси AI: *"Имплементируй Task 1 согласно specs/DESIGN.md и specs/TASKS.md"*
|
||||
3. Итеративно обновляй specs по мере прояснения требований
|
||||
|
||||
### Преимущества ручного подхода
|
||||
|
||||
- ✅ Полный контроль над структурой
|
||||
- ✅ Нет внешних зависимостей
|
||||
- ✅ Простота для малых проектов
|
||||
- ✅ Глубокое понимание принципов
|
||||
|
||||
---
|
||||
|
||||
## Ссылки
|
||||
|
||||
- [GitHub Spec Kit Repository](https://github.com/github/spec-kit)
|
||||
- [AWS Kiro Documentation](https://www.forbes.com/sites/janakirammsv/2025/07/15/aws-launches-kiro-a-specification-driven-agentic-ide/)
|
||||
- [Tessl Framework Launch](https://tessl.io/blog/tessl-launches-spec-driven-framework-and-registry/)
|
||||
- [Martin Fowler: Understanding Spec-Driven Development](https://martinfowler.com/articles/exploring-gen-ai/sdd-3-tools.html)
|
||||
- [Claude Code Spec Workflow Guide](https://blog.promptlayer.com/claude-code-spec-workflow/)
|
||||
|
|
@ -1,108 +0,0 @@
|
|||
# What Is Vibe Coding in 2026? One Year From Karpathy's Tweet
|
||||
|
||||
*Part 1 of the "Beyond Vibe Coding" series*
|
||||
|
||||
What is vibe coding in 2026? Exactly one year ago — February 2, 2025 — Andrej Karpathy posted the tweet that started it all. The term became Collins Dictionary's Word of the Year. But here's the thing: what Karpathy meant and what "vibe coding" (or "vibecoding" as some write it) means now are two different things.
|
||||
|
||||
{% embed https://x.com/karpathy/status/1886192184808149383 %}
|
||||
|
||||
When Karpathy first used the term, he meant something specific. You tell the agent what to do and evaluate the result. The key "vibe" is that you don't dig into HOW the code is written. Something doesn't work? Just write the next prompt, and so on. Now "vibe coding" often means any AI-assisted development at all. Though honestly — in many cases, that's exactly how it works.
|
||||
|
||||
We're already seeing derivatives — vibe design, vibe ops, vibe anything. But professional developers need more than vibes. That's where approaches like spec-driven development come in — and that's what this series is about.
|
||||
|
||||

|
||||
|
||||
But be honest: when you accept the agent's changes without looking, you catch yourself thinking — is this actually done right, can I trust what the LLM generated without reviewing it? When a colleague says they vibe-coded some functionality — do you picture thoughtful architecture or more like "somehow works"? Is vibe coding cheating and irresponsibility, or a perfectly professional approach?
|
||||
|
||||
What I know for certain — AI development is here, whatever we call it. According to Stack Overflow 2024, 76% of developers use or plan to use AI tools. About a third of senior developers — those with 10+ years of experience — generate more than half their code with AI.
|
||||
|
||||

|
||||
|
||||
Let's figure out what exactly we can do with AI. Different approaches exist, giving more control at different stages of work. Choosing the right one and applying it consciously — that's the professional approach. In this article, I'll cover vibe coding itself and spec-driven development — my main approach for serious projects.
|
||||
|
||||
---
|
||||
|
||||
## Vibe Coding: The Entry Point
|
||||
|
||||
**Credentials:**
|
||||
- Coined by [Andrej Karpathy](https://x.com/karpathy/status/1886192184808149383) (February 2, 2025)
|
||||
- [Collins Dictionary Word of the Year 2025](https://www.collinsdictionary.com/woty)
|
||||
- [Wikipedia article](https://en.wikipedia.org/wiki/Vibe_coding) documenting the phenomenon
|
||||
- Referenced in academic papers: [arXiv 2512.14012](https://arxiv.org/abs/2512.14012) "Professional Software Developers Don't Vibe, They Control" (University of Michigan, December 2025)
|
||||
|
||||
**What it is:**
|
||||
- Iterative prompting until code works
|
||||
- No upfront planning, minimal specification
|
||||
- Trust AI to handle details, fix issues as they appear
|
||||
- Evaluate results visually or by running the code
|
||||
|
||||
Vibe coding is a great approach. Really. I use it often myself — though I still use my keyboard for this. It works perfectly for non-critical features, dev tools, prototypes, experiments.
|
||||
|
||||
When do I use it? When the result is easy to evaluate visually. When scope is obviously localized to one or a few files.
|
||||
|
||||
Do I look at the diff? Honestly, almost always. But I don't check every line — I quickly assess which files changed, what was added or removed. This lets me catch moments when the AI "went off track" fast.
|
||||
|
||||
Does it produce bad code? Maybe, but there are simple ways to improve quality: describe code style in CLAUDE.md (or AGENTS.md), describe the architecture of the relevant part, provide examples of existing similar features as templates, ask the agent to run typecheck, linter, and prettier when done.
|
||||
|
||||

|
||||
|
||||
On the other hand, there are pitfalls. 27% of companies have banned AI tools at least temporarily over privacy and security concerns. Apple restricted ChatGPT and Copilot. Amazon banned ChatGPT after discovering responses resembling internal data. Samsung had an employee leak confidential information through ChatGPT.
|
||||
|
||||
Be careful with security. Don't use vibe coding on critical infrastructure. Especially where you can't easily roll back changes.
|
||||
|
||||

|
||||
|
||||
You might ask — is it even legitimate to use vibe coding at work? Absolutely! First, you save significant energy on simple things. Your brain resources are limited — delegate simple tasks and routine to AI. It'll do it faster, and you can spend your focus on more important stuff. Second, techniques exist beyond vibe coding that significantly improve development quality and reliability.
|
||||
|
||||
So what are these methods?
|
||||
|
||||
---
|
||||
|
||||
## Spec-Driven Development: Structure First
|
||||
|
||||
**Credentials:**
|
||||
- Formalized by GitHub Engineering Team: [GitHub Spec Kit](https://github.com/github/spec-kit) (open-source, MIT license, September 2025)
|
||||
- Official announcement: ["Spec-driven development with AI: Get started with a new open source toolkit"](https://github.blog/ai-and-ml/generative-ai/spec-driven-development-with-ai-get-started-with-a-new-open-source-toolkit/) (GitHub Blog, September 1, 2025)
|
||||
- Analysis by [Martin Fowler](https://martinfowler.com/articles/exploring-gen-ai/sdd-3-tools.html): "Understanding Spec-Driven Development"
|
||||
- Featured in [Thoughtworks Technology Radar](https://www.thoughtworks.com/en-us/radar) Volume 33 (November 2025)
|
||||
- Production tools: [AWS Kiro](https://aws.amazon.com/startups/prompt-library/kiro-project-init) (spec-driven IDE, public preview July 2025), [Tessl Framework](https://tessl.io/blog/tessl-launches-spec-driven-framework-and-registry/) (closed beta, 10,000+ specs in registry)
|
||||
- Community solutions: [BMAD Method](https://recruit.group.gmo/engineer/jisedai/blog/the-bmad-method-a-framework-for-spec-oriented-ai-driven-development/) (21 specialized agents), [OpenSpec](https://mcpmarket.com/server/openspec) (lightweight CLI, brownfield-first)
|
||||
- Widely used by: Claude Code users, enterprise teams, GitHub Copilot Workspace
|
||||
|
||||
**How it works:**
|
||||
|
||||
Write detailed specification BEFORE code. Spec includes requirements, architecture, API contracts, error handling, edge cases. AI executes against the spec. The spec becomes living documentation — often saved as `CLAUDE.md` or `.spec` files in project root.
|
||||
|
||||
Human focuses on WHAT. AI handles HOW.
|
||||
|
||||
This is actually my main approach for large projects. Especially when adding a new section or functionality that didn't exist before. Time spent writing spec is often significant. But it gives good control — modern models follow instructions pretty well. You can vary the degree of freedom for the agent: you can specify file and folder names yourself, or just give an outline of the solution.
|
||||
|
||||
After spending half a day on specification, you watch Claude Code finish implementation in 10 minutes. Feels unfair, but the results are solid.
|
||||
|
||||

|
||||
|
||||
The spec becomes reference for future work. Months later, a new session starts with "read the spec, find the code" — and the agent has full context immediately.
|
||||
|
||||
**Long-term challenges:**
|
||||
|
||||
To continue development later, you need to keep documentation current. Specs often start drifting from real code even during initial implementation. Details change, paths get renamed during refactoring. Keeping the spec up to date adds cognitive load. My solution: commit spec changes alongside code changes. Treat documentation as part of the codebase. Instruct the AI agent to always update the document after completing any task.
|
||||
|
||||
**Pro tip:**
|
||||
|
||||
Use Claude Desktop for spec development: give it Filesystem MCP for code access, enable web search for current documentation. Brainstorm the solution together with AI, define architecture — and only then ask it to write the spec.
|
||||
|
||||
---
|
||||
|
||||
## What's Next
|
||||
|
||||
This was Part 1 of the "Beyond Vibe Coding" series. We covered vibe coding itself and spec-driven development — two ends of the planning spectrum.
|
||||
|
||||
In Part 2, we'll explore the autonomy spectrum: AI pair programming (where you stay in the driver's seat) versus agentic coding (where you let the AI run free). Plus the infamous Ralph Loop — 14-hour autonomous coding sessions.
|
||||
|
||||
In Part 3: Human-in-the-Loop patterns and TDD + AI — when you need guardrails and quality guarantees.
|
||||
|
||||
Different approaches for different stakes. Choose consciously.
|
||||
|
||||
---
|
||||
|
||||
*What's your experience with vibe coding? Do you use specs, or prefer to iterate? Share in the comments — I'm genuinely curious what works for you.*
|
||||
|
|
@ -1,122 +0,0 @@
|
|||
# AI Pair Programming vs Agentic Coding: Two Extremes of Vibe Coding
|
||||
|
||||
*Part 2 of the "Beyond Vibe Coding" series*
|
||||
|
||||
In [Part 1](/henry-devto/what-is-vibe-coding-in-2026), we covered vibe coding and spec-driven development — two ends of the planning spectrum. Now let's explore the autonomy spectrum: how much control do you give the AI?
|
||||
|
||||
On one end: AI pair programming, where you stay in the driver's seat. On the other: agentic coding, where you set a goal and walk away. Both have their place. Both have their traps.
|
||||
|
||||
---
|
||||
|
||||
## AI Pair Programming: Working Together
|
||||
|
||||
**Credentials:**
|
||||
- GitHub official positioning: ["Your AI pair programmer"](https://github.com/features/copilot) (Copilot marketing since 2021)
|
||||
- [Microsoft Learn documentation](https://learn.microsoft.com/en-us/industry/mobility/architecture/ai-pair-programmer): AI pair programmer architecture
|
||||
- [GitHub Copilot Fundamentals](https://learn.microsoft.com/en-us/training/paths/copilot/) training on Microsoft Learn
|
||||
- [Responsible AI practices](https://github.blog/ai-and-ml/github-copilot/responsible-ai-pair-programming-with-github-copilot/) (GitHub Blog)
|
||||
- Tools: [GitHub Copilot](https://github.com/features/copilot) (Free/Pro $10/Business $19/Enterprise $39), [Cursor](https://cursor.com), [Windsurf](https://www.codeium.com/windsurf), [Tabnine](https://www.tabnine.com), [AWS CodeWhisperer](https://aws.amazon.com/codewhisperer), [Cody by Sourcegraph](https://sourcegraph.com/cody)
|
||||
- Claude Code positioning: ["Your AI Pair Programming Assistant"](https://claudecode.org) with [output styles for pair programming](https://shipyard.build/blog/claude-code-output-styles-pair-programming/) (September 2025)
|
||||
- 720 monthly searches for "ai pair programming"
|
||||
|
||||
**The promise:**
|
||||
|
||||
AI as collaborative partner, not just autocomplete. Continuous suggestions while coding. Context-aware completions. Real-time feedback and alternatives. More than tab-completion — understanding project context.
|
||||
|
||||
**My honest experience:**
|
||||
|
||||
I've tried AI autocomplete multiple times. Each time, I ended up disabling it completely.
|
||||
|
||||
Why? When I'm writing code, I've already mentally worked out what I want. AI suggesting my next line just interrupts my thought process. Standard IDE completions always worked fine for me.
|
||||
|
||||
I know many developers love it. Just doesn't fit my workflow.
|
||||
|
||||

|
||||
|
||||
**Where I find real pair programming:**
|
||||
|
||||
Claude Desktop with good system instructions plus Filesystem MCP to read actual project files. That's when I feel like I'm working WITH someone who understands my problem and actually helps solve it.
|
||||
|
||||
Autocomplete is reactive. Real pair programming is proactive — discussion, exploration, questioning assumptions.
|
||||
|
||||
**The productivity numbers:**
|
||||
|
||||
GitHub claims 56% faster task completion with AI assistants. Their study shows Copilot users complete 126% more projects per week. Sounds great.
|
||||
|
||||
But here's counter-evidence: METR study found experienced open-source developers took 19% LONGER to complete tasks when using AI tools. Completely contradicts the marketing.
|
||||
|
||||
The truth probably depends on context. AI effectiveness varies wildly by task type, developer skill with AI tools, and workflow fit. Not universally faster, not universally slower.
|
||||
|
||||
---
|
||||
|
||||
## Agentic Coding: High Autonomy
|
||||
|
||||
**Credentials:**
|
||||
- Academic research: [arXiv 2508.11126](https://arxiv.org/abs/2508.11126) "AI Agentic Programming: A Survey of Techniques" (UC San Diego, Carnegie Mellon, August 2025) — comprehensive taxonomy of agent systems
|
||||
- [arXiv 2512.14012](https://arxiv.org/abs/2512.14012) "Professional Software Developers Don't Vibe, They Control" (University of Michigan, December 2025) — 13 observations + 99 developer surveys showing professionals use agents in controlled mode with plan files and tight feedback loops
|
||||
- Tools: [Claude Code](https://claude.ai/code), [Cursor 2.0 Composer](https://cursor.com/blog/2-0) (October 2025, up to 8 parallel agents, Git worktrees isolation), [GitHub Copilot Agent Mode](https://github.blog/ai-and-ml/github-copilot/agent-mode-101-all-about-github-copilots-powerful-mode/) (preview February 2025), [Copilot Coding Agent](https://github.blog/ai-and-ml/github-copilot/from-idea-to-pr-a-guide-to-github-copilots-agentic-workflows/) (asynchronous, July 2025)
|
||||
- [Cursor Scaling Agents](https://cursor.com/blog/scaling-agents) (January 2026): long-running autonomous coding
|
||||
- Open-source: [Agentic Coding Framework](https://github.com/DafnckStudio/Agentic-Coding-Framework) on GitHub
|
||||
|
||||
**What it is:**
|
||||
|
||||
Agent operates with high autonomy. Human sets high-level goals, agent figures out implementation. Agent can plan, execute, debug, iterate without constant approval.
|
||||
|
||||
Different from vibe coding: agentic coding is systematic. Agent creates a plan, executes it methodically, can course-correct. Vibe coding is reactive prompting without structure.
|
||||
|
||||
My take? Skeptical so far.
|
||||
|
||||
I'd like to believe in this approach. The idea of extended autonomous sessions sounds amazing. But here's my question: what tasks justify that much autonomous work?
|
||||
|
||||
Writing a detailed spec takes me longer than executing it. If Claude Code finishes in 10 minutes after I've spent hours on specification, why would I need 14 hours of autonomy?
|
||||
|
||||
I'm skeptical about applications in my projects. Maybe it works for certain domains — large refactors, extensive testing, documentation generation across huge codebases? But even then, I can't imagine Claude Code not handling it in an hour.
|
||||
|
||||

|
||||
|
||||
**The Ralph Loop extreme:**
|
||||
|
||||
Named after Ralph Wiggum from The Simpsons. The concept: give the agent a task, walk away, return to finished work.
|
||||
|
||||
- **Creator**: Geoffrey Huntley ([ghuntley.com/ralph](https://ghuntley.com/ralph/))
|
||||
- **Timeline**: First discovery February 2024 → Public launch May 2025 → [Viral wave January 2026](https://venturebeat.com/technology/how-ralph-wiggum-went-from-the-simpsons-to-the-biggest-name-in-ai-right-now) (VentureBeat: "How Ralph Wiggum went from 'The Simpsons' to the biggest name in AI")
|
||||
- **Interviews**: [Dev Interrupted Podcast](https://devinterrupted.substack.com/p/inventing-the-ralph-wiggum-loop-creator) (January 12, 2026), [LinearB Blog](https://linearb.io/blog/ralph-loop-agentic-engineering-geoffrey-huntley)
|
||||
- **Official plugin**: [ralph-wiggum.ai](https://ralph-wiggum.ai) from Anthropic (Boris Cherny)
|
||||
- **Economics**: $10.42/hour with Claude Sonnet 4.5 (per Huntley's data)
|
||||
- **Case studies**: cloning HashiCorp Nomad, Tailscale — days instead of years
|
||||
|
||||
The loop is elegantly simple: `while :; do cat PROMPT.md | agent; done` — fresh context each iteration, progress tracked in Git. Huntley reported 14-hour autonomous sessions.
|
||||
|
||||
If you've found great applications for Ralph Loop, I'm genuinely curious. Share your wins in the comments.
|
||||
|
||||
**The permissions reality:**
|
||||
|
||||
Agentic coding hits a wall in practice: permissions. Claude Code asks approval for every file write, API call, terminal command. Completely breaks flow. Kills the autonomy promise.
|
||||
|
||||
My workarounds: I ask Claude to add all MCP tools to `.claude/settings.json` proactively — that reduces interruptions. Sometimes I run with `--dangerously-skip-permissions`, but keep an eye on what's happening.
|
||||
|
||||
Try to set up your environment so the agent can't do anything that git reset couldn't fix. This is clearly a problem waiting for a solution. We need better ways to control coding agent actions.
|
||||
|
||||
---
|
||||
|
||||
## Vibe Coding vs Agentic Coding: The Difference
|
||||
|
||||
People often confuse these. Here's how I see it:
|
||||
|
||||
**Vibe coding**: reactive. Prompt → result → prompt → result. No plan, no structure, just vibes.
|
||||
|
||||
**Agentic coding**: systematic. Goal → plan → execute → validate → iterate. Structure exists, AI manages it.
|
||||
|
||||
Both can produce working code. The difference is predictability. Agentic coding with a good spec gives you reproducible results. Vibe coding gives you... vibes.
|
||||
|
||||
---
|
||||
|
||||
## What's Next
|
||||
|
||||
In Part 3, we'll cover the guardrails: Human-in-the-Loop patterns (strategic checkpoints, not endless permissions) and TDD + AI (tests as specification, quality first).
|
||||
|
||||
When stakes are high, vibes aren't enough. You need structure that catches mistakes before they ship.
|
||||
|
||||
---
|
||||
|
||||
*Do you use agentic coding? Have you tried Ralph Loop? I'm skeptical but curious — what applications actually work? Share in the comments.*
|
||||
|
|
@ -1,110 +0,0 @@
|
|||
# Human-in-the-Loop and TDD + AI: When Quality Matters
|
||||
|
||||
*Part 3 of the "Beyond Vibe Coding" series*
|
||||
|
||||
In [Part 1](/henry-devto/what-is-vibe-coding-in-2026), we covered vibe coding and spec-driven development. In [Part 2](/henry-devto/ai-pair-programming-vs-agentic-coding), we explored the autonomy spectrum from pair programming to Ralph Loop.
|
||||
|
||||
Now let's talk about guardrails. When stakes are high, vibes aren't enough. You need structure that catches mistakes before they ship.
|
||||
|
||||
---
|
||||
|
||||
## Human-in-the-Loop: Strategic Checkpoints
|
||||
|
||||
**Credentials:**
|
||||
- Atlassian Research: HULA framework (Human-Understanding Large Language Model Agents)
|
||||
- Formalized in ICSE 2025 paper ([arXiv 2411.12924](https://arxiv.org/abs/2411.12924))
|
||||
- [Google Cloud AI documentation](https://cloud.google.com/discover/what-is-human-in-the-loop-machine-learning) on HITL patterns
|
||||
- Implemented in: Claude Code Planning Mode, Cursor Composer approval flows
|
||||
|
||||
**What it is:**
|
||||
|
||||
AI operates autonomously BETWEEN checkpoints. Human approves key decisions, reviews output at strategic moments. Not constant supervision — strategic oversight.
|
||||
|
||||
Agent proposes approach, human confirms direction. Then agent executes freely until next checkpoint.
|
||||
|
||||
**Permissions ≠ HITL:**
|
||||
|
||||
Don't confuse permissions with Human-in-the-Loop. Permissions are too low-level. "Can I write this file?" tells me nothing about what task the agent is actually solving.
|
||||
|
||||
Real HITL is Planning Mode. Agent shows the plan: "here's what I'll do, these files will change, here's the expected outcome." That's decision-level control.
|
||||
|
||||
The problem with current agents: they don't understand WHEN to stop and ask. Rarely hit the right moment. Either too much autonomy (goes off track) or too many interruptions (breaks flow).
|
||||
|
||||
[TODO: HITL comic — robots discussing getting rid of the human, final panel shows human among circle of robots passing boxes]
|
||||
|
||||
Future improvement area: agents that know when they're uncertain and should consult the human. Like "I don't know" responses — current models aren't great at this in practice.
|
||||
|
||||
**When to use:**
|
||||
|
||||
Production code with moderate complexity. When outcome matters but speed also matters. Team environments where others will review anyway. Learning new approaches where you want to see the agent's reasoning.
|
||||
|
||||
Medium stakes: not prototype territory (vibe coding works there), not critical infrastructure (TDD territory).
|
||||
|
||||
---
|
||||
|
||||
## TDD + AI: Quality First
|
||||
|
||||
**Credentials:**
|
||||
- Official name: "AI-aided test-first development" — [Thoughtworks Technology Radar](https://www.thoughtworks.com/en-us/radar/techniques/ai-aided-test-first-development) (April 2023, status: TRIAL)
|
||||
- [DORA Report 2025](https://dora.dev/research/2025/dora-report/) (Google Cloud): "AI is an amplifier, not a fix" — organizations with strong testing practices get more benefit from AI
|
||||
- [Google Cloud analysis](https://cloud.google.com/discover/how-test-driven-development-amplifies-ai-success) (January 2026): "How TDD Amplifies AI Success"
|
||||
- Kent Beck (creator of TDD): [Pragmatic Engineer Podcast](https://newsletter.pragmaticengineer.com/p/tdd-ai-agents-and-coding-with-kent) (June 2025) — "TDD is a superpower when working with AI. I communicate things the Genie missed in terms of tests"
|
||||
- [8th Light](https://8thlight.com/insights/tdd-effective-ai-collaboration): "TDD: The Missing Protocol for Effective AI Collaboration" (July 2025)
|
||||
- [Builder.io guide](https://www.builder.io/blog/test-driven-development-ai): "AI turns TDD's weaknesses into strengths" (August 2025)
|
||||
- Tools: [Qodo](https://www.qodo.ai/blog/ai-code-assistants-test-driven-development/) (AI test generation), Claude Code, Cursor
|
||||
|
||||
**How it works:**
|
||||
|
||||
Write tests BEFORE implementation (classic TDD). AI generates code to pass tests. Tests become executable specification.
|
||||
|
||||
Red → Green → Refactor cycle, but AI handles implementation. Tests catch AI mistakes automatically. Tests provide verification without human review of every line.
|
||||
|
||||
**Tests as specification:**
|
||||
|
||||
Tests are absolutely critical for key functionality. I always instruct agents to run tests.
|
||||
|
||||
But here's the thing: writing comprehensive tests upfront plus detailed spec — that's already 80% of the work. If you've written that much structure, is AI really saving time?
|
||||
|
||||
Most valuable when you have existing spec that naturally converts to tests — like API documentation. Then yes, tests-first makes perfect sense.
|
||||
|
||||

|
||||
|
||||
**The guardrails approach:**
|
||||
|
||||
Tests become safety boundaries for the agent. Agent can iterate freely within test constraints. No need to review every implementation detail. Just verify: tests pass, coverage maintained.
|
||||
|
||||
Especially valuable for agentic coding. Let the AI experiment, tests catch the mistakes.
|
||||
|
||||
**Critical warning:**
|
||||
|
||||
AI-written tests need human review. I've seen agents write "passing" tests using mocked requests — test passes, code is broken. The test verified syntax, not behavior.
|
||||
|
||||
Correct tests = solid foundation. Bad tests = false confidence that destroys future work.
|
||||
|
||||
Review test logic before trusting it. Make sure tests verify actual behavior, not just that code runs.
|
||||
|
||||
---
|
||||
|
||||
## Conclusion
|
||||
|
||||
What I typically use:
|
||||
- Dev tools and experiments: vibe coding works fine.
|
||||
- Production features: spec-driven with Planning Mode.
|
||||
- Critical systems: TDD plus extensive review.
|
||||
- Research and exploration: Claude Desktop as true pair programmer.
|
||||
|
||||
The pattern? Higher stakes → more structure. Lower stakes → more vibes.
|
||||
|
||||
Your approach might be different. If you do things differently — different tools, different approaches, different combinations — share your wins in the comments. What works for you as an engineer?
|
||||
|
||||
---
|
||||
|
||||
## The Full Series
|
||||
|
||||
- **Part 1**: [What Is Vibe Coding in 2026?](/henry-devto/what-is-vibe-coding-in-2026) — vibe coding + spec-driven development
|
||||
- **Part 2**: [AI Pair Programming vs Agentic Coding](/henry-devto/ai-pair-programming-vs-agentic-coding) — the autonomy spectrum
|
||||
- **Part 3**: Human-in-the-Loop and TDD + AI — guardrails and quality (you are here)
|
||||
|
||||
---
|
||||
|
||||
*What's your approach? Pure vibes, full TDD, or something in between? I'm curious what actually works in your projects.*
|
||||
|
|
@ -1,250 +0,0 @@
|
|||
# 1 Год Вайбкодинга и за его пределами
|
||||
|
||||
Ровно год назад, 2 февраля 2025 года, Андрей Карпати опубликовал свой твит про вайбкодинг.
|
||||
|
||||
https://x.com/karpathy/status/1886192184808149383
|
||||
|
||||
Термин «Вайбкодинг» стал невероятно популярным для обозначения разработки с помощью AI. В 2025 году "vibe coding" стал [словом года по версии Collins Dictionary](https://www.collinsdictionary.com/us/woty). Уже появляются производные — vibe design, vibe ops, vibe anything и так далее.
|
||||
|
||||
Интересно, что когда Андрей Карпати впервые использовал этот термин, он имел в виду вполне конкретный способ взаимодействия: пишешь агенту что нужно сделать и оцениваешь результат. Ключевой «вайб» этого процесса в том, что разработчик не вникает в то, КАК написан код. Если что-то не работает — просто пишешь следующее задание, и так по кругу. Сейчас, говоря «вайбкодинг», многие подразумевают вообще любую разработку с помощью AI. Хотя, будем честны — во многих случаях это действительно именно так и работает.
|
||||
|
||||
[IMAGE: комикс. Разговаривают два разработчика, молодой и старый. Молодой спрашивает старого: «А правда, что раньше вы сначала разбирались в коде, а только потом делали фичи?» Старый отвечает: «Конечно, ведь чтобы сделать фичу, нужно разобраться как работает существующий код. А у тебя не так?» Молодой говорит: «Нет, я сначала делаю фичу и только когда она полностью заработает, разбираюсь как этот код устроен»]
|
||||
|
||||
Но признайтесь: соглашаясь с изменениями агента не глядя, вы ловите себя на мысли — а это вообще корректно сделано, можно ли доверять тому, что нагенерировала LLM, не глядя? Когда коллега говорит, что навайбкодил какой-то функционал — вы представляете продуманную архитектуру или скорее «как-то работает»? Вайбкодинг — это читерство и безответственность или вполне профессиональный подход?
|
||||
|
||||
Что я знаю наверняка — AI-разработка уже с нами, как её ни называй. По данным Stack Overflow 2024, 76% разработчиков используют или планируют использовать AI-инструменты. Примерно треть senior-разработчиков — с 10+ годами опыта — генерируют больше половины своего кода с помощью AI.
|
||||
|
||||
[IMAGE: инфографика с прогрессом использования AI разработчиками, доверием к AI и производительностью]
|
||||
|
||||
Давайте разберёмся, что именно мы можем делать с помощью AI. Существуют разные подходы, дающие больше контроля на разных стадиях работы. Выбрать подходящий и применить его осознанно — это и есть профессиональный подход. В этой статье я расскажу о существующих подходах AI-разработки, которые я применял на практике, и дам свои честные комментарии.
|
||||
|
||||
---
|
||||
|
||||
## Вайб-кодинг: точка входа
|
||||
|
||||
[IMAGE: комикс. Молодой разработчик с кофе в руке сидит за рабочим местом. Потом кофе проливается на клавиатуру. Разработчик надевает гарнитуру с микрофоном и говорит: «Ок, я теперь вайбкодер». Style: Cartoon/illustration, тёплые цвета, понятный девелоперский юмор]
|
||||
|
||||
**Что это:**
|
||||
- Популяризировано Андреем Карпати (февраль 2025)
|
||||
- Итеративный промптинг пока код не заработает
|
||||
- Никакого предварительного планирования, минимум спецификации
|
||||
- Доверяешь AI разобраться с деталями, фиксишь проблемы по мере появления
|
||||
|
||||
Вайб-кодинг — это отличный подход. Правда. Я сам им часто пользуюсь. Он прекрасно работает для некритичных фич, dev-инструментов, прототипов, экспериментов.
|
||||
|
||||
Когда я его использую?
|
||||
- Когда результат работы легко оценить визуально
|
||||
- Когда скоуп работы очевидно локализован в одном или минимуме файлов
|
||||
|
||||
Смотрю ли я диф?
|
||||
- Если честно, почти всегда. Но я не проверяю каждую строчку, а быстро оцениваю, какие файлы были изменены, что было добавлено или удалено. Это позволяет быстро отлавливать моменты, когда AI «ушёл не туда».
|
||||
|
||||
Получается ли говнокод? Возможно, но есть несколько простых способов улучшить качество:
|
||||
- описывайте код-стайл в CLAUDE.md (или AGENTS.md)
|
||||
- описывайте архитектуру нужной части
|
||||
- давайте примеры существующих аналогичных фичей как образец
|
||||
- просите агента запускать typecheck, linter и prettier по завершению работы
|
||||
|
||||
С другой стороны, есть и подводные камни. 27% компаний запретили AI-инструменты как минимум временно из-за проблем с приватностью и безопасностью. Apple ограничила ChatGPT и Copilot. Amazon забанил ChatGPT после того, как обнаружил ответы, напоминающие внутренние данные. У Samsung сотрудник слил конфиденциальную информацию через ChatGPT. Будьте аккуратны с безопасностью. Не используйте вайбкодинг на критической инфраструктуре. Особенно там, где нельзя легко откатить изменения.
|
||||
|
||||
[IMAGE: простая инфографика DO и DON'T вайбкодинга]
|
||||
|
||||
Вы спросите, да это вообще легально использовать вайбкодинг в работе? Абсолютно! Во-первых, вы экономите значительные силы на простых вещах. Ресурсы вашего мозга ограничены — делегируйте AI простые задачи и рутину. Он сделает это быстрее, а вы сможете потратить фокус на более важные вещи. Во-вторых, существуют техники за пределами вайбкодинга, значительно повышающие качество и надёжность разработки.
|
||||
|
||||
Так что это за методы?
|
||||
|
||||
---
|
||||
|
||||
## Spec-Driven Development: сначала структура
|
||||
|
||||
[IMAGE: комикс. Две картинки. Один молодой разработчик откидывается на стул и довольно хвастается: «Смотри, благодаря моему промпту AI за 15 минут сгенерировал идеальный код для новой фичи». Второй молодой разработчик ему: «Ты потратил 6 часов, чтобы написать этот промпт»]
|
||||
|
||||
**Credentials:**
|
||||
- Формализовано командой GitHub Engineering ([GitHub Spec Kit](https://github.com/github/spec-kit), сентябрь 2025)
|
||||
- Вошло в [Thoughtworks Technology Radar](https://www.thoughtworks.com/en-us/radar) Volume 33 (ноябрь 2025)
|
||||
- Профессиональные инструменты: [AWS Kiro](https://aws.amazon.com/startups/prompt-library/kiro-project-init) (public preview июль 2025), [Tessl Framework](https://tessl.io/blog/tessl-launches-spec-driven-framework-and-registry/) (closed beta сентябрь 2025)
|
||||
- Community-решения: [BMAD Method](https://recruit.group.gmo/engineer/jisedai/blog/the-bmad-method-a-framework-for-spec-oriented-ai-driven-development/) (21 специализированный агент), [OpenSpec](https://mcpmarket.com/server/openspec) (lightweight CLI)
|
||||
- Используется: Claude Code, enterprise-команды, GitHub Copilot Workspace
|
||||
|
||||
**Как это работает:**
|
||||
|
||||
Пишешь детальную спецификацию ДО кода. Спека включает требования, архитектуру, API-контракты, обработку ошибок, edge cases. AI выполняет по спеке. Спека становится живой документацией — часто сохраняется как `CLAUDE.md` или `.spec` файлы в корне проекта.
|
||||
|
||||
Человек фокусируется на ЧТО. AI разбирается с КАК.
|
||||
|
||||
По факту это мой основной способ работы над крупными проектами. Особенно для добавления нового раздела или функционала, которого ещё не было в проекте. Время на написание спеки часто довольно значительно. Однако это даёт хороший контроль — современные модели довольно неплохо следуют инструкциям. Ты можешь варьировать степень свободы для агента: хочешь — задаёшь сам названия файлов и папок, а хочешь — только даёшь аутлайн решения.
|
||||
|
||||
После того как потратил полдня на спецификацию, ты смотришь как Claude Code заканчивает имплементацию за 10 минут. Кажется несправедливым, но результаты солидные.
|
||||
|
||||
Спека становится референсом для будущей работы. Месяцы спустя новая сессия начинается с «прочитай спеку, найди код» — и агент сразу имеет полный контекст.
|
||||
|
||||
**Сложности в долгосрочной перспективе:**
|
||||
|
||||
Чтобы продолжить разработку спустя какое-то время, необходимо держать документацию в актуальном состоянии. Спеки часто начинают расходиться с реальным кодом даже на этапе изначальной имплементации. Детали меняются, при рефакторингах пути переименовываются. Поддержание спеки актуальной добавляет когнитивную нагрузку. Моё решение: коммитить изменения спеки вместе с изменениями кода. Относиться к документации как к части кодовой базы. Давать AI-агенту инструкцию всегда обновлять документ после выполнения задачи.
|
||||
|
||||
**Pro tip:**
|
||||
|
||||
Используй Claude Desktop для разработки спеки: дай ему Filesystem MCP для доступа к коду, включи веб-поиск для актуальной документации. Брейнштормь решение вместе с AI, определи архитектуру — и только потом проси написать спеку.
|
||||
|
||||
---
|
||||
|
||||
## Agentic Coding: высокая автономность
|
||||
|
||||
[IMAGE: комикс. Женщина спрашивает у строителя: «А вы уверены, что ваши автономные андроиды подходят для строительства нашей детской площадки?» Она показывает ему схему, где нарисована детская качеля и карусель. Строитель в каске отвечает: «О да, я дал им чёткие инструкции и свободу действий на 24 часа. Давайте посмотрим как они справились». Финальная картинка: женщина и строитель стоят в недоумении, разинув рот, глядя на мега-футуристический развлекательный центр]
|
||||
|
||||
**Credentials:**
|
||||
- Академические исследования: [arXiv 2508.11126](https://arxiv.org/abs/2508.11126) «AI Agentic Programming: A Survey» (UC San Diego, Carnegie Mellon, август 2025), [arXiv 2512.14012](https://arxiv.org/abs/2512.14012) «Professional Software Developers Don't Vibe, They Control» (University of Michigan, декабрь 2025)
|
||||
- Ralph Loop создан Geoffrey Huntley (публичный запуск май 2025, [viral wave январь 2026](https://venturebeat.com/technology/how-ralph-wiggum-went-from-the-simpsons-to-the-biggest-name-in-ai-right-now))
|
||||
- Инструменты: Claude Code, [Cursor 2.0 Composer](https://cursor.com/blog/2-0) (октябрь 2025, до 8 параллельных агентов), [GitHub Copilot Agent Mode](https://github.blog/ai-and-ml/github-copilot/agent-mode-101-all-about-github-copilots-powerful-mode/) (preview февраль 2025)
|
||||
- Официальный [ralph-wiggum плагин](https://ralph-wiggum.ai) от Anthropic (Boris Cherny)
|
||||
|
||||
**Что это:**
|
||||
|
||||
Агент работает с высокой степенью автономности. Человек ставит высокоуровневые цели, агент разбирается с имплементацией. Агент может планировать, выполнять, дебажить, итерировать без постоянного одобрения.
|
||||
|
||||
Отличие от вайб-кодинга: агентный кодинг систематичен. Агент создаёт план, выполняет его методично, может корректировать курс. Вайб-кодинг — реактивный промптинг без структуры.
|
||||
|
||||
Моё отношение? Пока что скептичное.
|
||||
|
||||
Я бы хотел верить в такой подход. Идея длительных автономных сессий звучит потрясающе. Но вот мой вопрос: какие задачи оправдывают столько автономной работы?
|
||||
|
||||
Написание детальной спеки занимает у меня больше времени, чем её выполнение. Если Claude Code заканчивает за 10 минут после того, как я потратил часы на спецификацию, зачем мне 14 часов автономности?
|
||||
|
||||
Я скептичен насчёт применений в моих проектах. Возможно это работает для определённых доменов — большие рефакторинги, обширное тестирование, генерация документации по огромным кодовым базам? Но даже тут я не представляю, чтобы Claude Code не справился за час работы.
|
||||
|
||||
**Крайность Ralph Loop:**
|
||||
|
||||
Названо в честь Ральфа Виггума из Симпсонов. Концепция: даёшь агенту задачу, уходишь, возвращаешься к готовой работе. Geoffrey Huntley сообщал о 14-часовых автономных сессиях.
|
||||
|
||||
Если вы нашли отличные применения для Ralph Loop, мне правда интересно. Поделитесь своими победами в комментариях.
|
||||
|
||||
**Реальность с permissions:**
|
||||
|
||||
Агентный кодинг упирается в стену на практике: permissions. Claude Code спрашивает одобрение на каждую запись файла, API-вызов, терминальную команду. Полностью ломает поток. Убивает обещание автономности.
|
||||
|
||||
Мои обходные пути: прошу Claude добавить все MCP-инструменты в `.claude/settings.json` проактивно — это уменьшает прерывания. Иногда запускаю с `--dangerously-skip-permissions`, но поглядываю что происходит.
|
||||
|
||||
Старайтесь организовать окружение так, чтобы агент не смог сделать ничего такого, что git reset не смог бы исправить. Это проблема, которая явно ждёт своего решения. Нам нужны более удобные способы контроля за действиями coding-агентов.
|
||||
|
||||
---
|
||||
|
||||
## AI Pair Programming: работаем вместе
|
||||
|
||||
[IMAGE: комикс. Программист с наушниками сидит за компьютером. Из наушников вылетают подсказки AI в виде пузырей: «может быть map?», «добавь проверку на null», «тут лучше async». Программист смотрит на них с раздражением. Подпись: «Когда AI знает лучше, но ты хочешь сделать по-своему»]
|
||||
|
||||
**Credentials:**
|
||||
- Официальное позиционирование GitHub: «Your AI pair programmer» (маркетинг Copilot с 2021)
|
||||
- Документация Microsoft Learn
|
||||
- Инструменты: GitHub Copilot, Cursor, Windsurf
|
||||
- 720 поисковых запросов в месяц по «ai pair programming»
|
||||
|
||||
**Обещание:**
|
||||
|
||||
AI как коллаборативный партнёр, а не просто автокомплит. Непрерывные подсказки во время кодинга. Контекстно-зависимые дополнения. Обратная связь и альтернативы в реальном времени. Больше чем tab-completion — понимание контекста проекта.
|
||||
|
||||
**Мой честный опыт:**
|
||||
|
||||
Я пробовал AI-автокомплит несколько раз. Каждый раз в итоге полностью его отключал.
|
||||
|
||||
Почему? Когда я пишу код, я уже мысленно проработал что хочу получить. AI, предлагающий мне следующую строку, просто прерывает мой мыслительный процесс. Стандартных подсказок IDE мне всегда хватало.
|
||||
|
||||
Знаю, что многим разработчикам нравится. Просто не подходит под мой воркфлоу.
|
||||
|
||||
**Где я нахожу настоящий pair programming:**
|
||||
|
||||
Claude Desktop с хорошей системной инструкцией плюс Filesystem MCP для чтения реальных файлов проекта. Вот тогда я чувствую, что работаю С кем-то, кто понимает мою проблему и реально помогает её решить.
|
||||
|
||||
Автокомплит реактивен. Настоящий pair programming проактивен — обсуждение, исследование, оспаривание допущений.
|
||||
|
||||
**Цифры продуктивности:**
|
||||
|
||||
GitHub заявляет о 56% ускорении выполнения задач с AI-ассистентами. Их исследование показывает, что пользователи Copilot завершают на 126% больше проектов в неделю. Звучит отлично.
|
||||
|
||||
Но вот контр-доказательство: исследование METR обнаружило, что опытные open-source разработчики тратили на 19% БОЛЬШЕ времени на выполнение задач при использовании AI-инструментов. Полностью противоречит маркетингу.
|
||||
|
||||
Правда, вероятно, зависит от контекста. Эффективность AI сильно варьируется в зависимости от типа задачи, навыков разработчика с AI-инструментами и соответствия воркфлоу. Не универсально быстрее, не универсально медленнее.
|
||||
|
||||
---
|
||||
|
||||
## Human-in-the-Loop: стратегические чекпоинты
|
||||
|
||||
[IMAGE: комикс. Роботы разговаривают друг с другом: «Нам пора избавиться от этого кожаного мешка». Другой робот подтверждает: «Это точно, без него мы бы работали в 1024 раза быстрее». Финальная картинка — большой круг роботов, передающих коробки друг другу, и среди них один человек]
|
||||
|
||||
**Credentials:**
|
||||
- Atlassian Research: HULA framework (Human-Understanding Large Language Model Agents)
|
||||
- Формализовано в статье ICSE 2025 (arXiv 2411.12924)
|
||||
- Документация Google Cloud AI
|
||||
- Реализовано в: Claude Code Planning Mode
|
||||
|
||||
**Что это:**
|
||||
|
||||
AI работает автономно МЕЖДУ чекпоинтами. Человек одобряет ключевые решения, проверяет результат в стратегические моменты. Не постоянный надзор — стратегический контроль.
|
||||
|
||||
Агент предлагает подход, человек подтверждает направление. Затем агент выполняет свободно до следующего чекпоинта.
|
||||
|
||||
**Permissions ≠ HITL:**
|
||||
|
||||
Не путайте permissions с Human-in-the-Loop. Permissions слишком низкоуровневые. «Можно мне записать этот файл?» не говорит мне ничего о том, какую задачу сейчас решает агент.
|
||||
|
||||
Настоящий HITL — это Planning Mode. Агент показывает план: «вот что я сделаю, эти файлы изменятся, вот ожидаемый результат». Это контроль на уровне решений.
|
||||
|
||||
Проблема с текущими агентами: они не понимают КОГДА нужно остановиться и спросить. Редко попадают в правильный момент. Либо слишком много автономности (уходит не туда), либо слишком много прерываний (ломает поток).
|
||||
|
||||
Область для улучшения в будущем: агенты, которые знают когда они не уверены и должны проконсультироваться с человеком. Как ответы «я не знаю» — текущие модели не очень хороши в этом на практике.
|
||||
|
||||
**Когда использовать:**
|
||||
|
||||
Продакшен-код средней сложности. Когда результат важен, но скорость тоже важна. Командные окружения, где другие всё равно будут проверять. Изучение новых подходов, когда хочешь видеть рассуждения агента.
|
||||
|
||||
Средние ставки: не территория прототипов (там работает вайб-кодинг), не критическая инфраструктура (территория TDD).
|
||||
|
||||
---
|
||||
|
||||
## TDD + AI: сначала качество
|
||||
|
||||
**Credentials:**
|
||||
- Адаптировано из традиционного TDD (Кент Бек)
|
||||
- Модернизировано для эры AI: блог Qodo.ai, гайд Builder.io, GitHub Blog (май 2025)
|
||||
- Команды, ориентированные на качество, enterprise production-код
|
||||
|
||||
**Как это работает:**
|
||||
|
||||
Пишешь тесты ДО имплементации (классический TDD). AI генерирует код чтобы пройти тесты. Тесты становятся исполняемой спецификацией.
|
||||
|
||||
Цикл Red → Green → Refactor, но AI занимается имплементацией. Тесты автоматически ловят ошибки AI. Тесты обеспечивают верификацию без человеческой проверки каждой строки.
|
||||
|
||||
[IMAGE: комикс. Первый разработчик: «Что ты делаешь сейчас?» Второй: «Я разрабатываю универсальные тесты, чтобы проверять написание unit-тестов нашими coding-агентами». Первый: «Ты это делаешь сам?» Второй: «Нет, я дал задачу автономному AI-агенту»]
|
||||
|
||||
**Тесты как спецификация:**
|
||||
|
||||
Тесты абсолютно важны для ключевого функционала. Я всегда инструктирую агентов запускать тесты.
|
||||
|
||||
Но вот в чём дело: написание комплексных тестов заранее плюс детальная спека — это уже 80% работы. Если ты написал столько структуры, действительно ли AI экономит время?
|
||||
|
||||
Наиболее ценно когда у тебя есть существующая спека, которая естественно конвертируется в тесты — как документация на API. Тогда да, tests-first имеет полный смысл.
|
||||
|
||||
**Подход с guardrails:**
|
||||
|
||||
Тесты становятся границами безопасности для агента. Агент может свободно итерировать в рамках тестовых ограничений. Не нужно проверять каждую деталь имплементации. Просто верифицируй: тесты проходят, покрытие сохраняется.
|
||||
|
||||
Особенно ценно для агентного кодинга. Пусть AI экспериментирует, тесты поймают ошибки.
|
||||
|
||||
**Критическое предупреждение:**
|
||||
|
||||
AI-написанные тесты требуют человеческой проверки. Я видел как агенты писали «проходящие» тесты, используя замоканные запросы — тест проходит, код сломан. Тест верифицировал синтаксис, а не поведение.
|
||||
|
||||
Правильные тесты = солидный фундамент. Плохие тесты = ложная уверенность, которая разрушает будущую работу.
|
||||
|
||||
Проверяй логику тестов перед тем как им доверять. Убедись что тесты верифицируют реальное поведение, а не просто что код запускается.
|
||||
|
||||
---
|
||||
|
||||
## Заключение
|
||||
|
||||
Что в итоге обычно использую я:
|
||||
- Dev-инструменты и эксперименты: вайб-кодинг работает нормально.
|
||||
- Продакшен-фичи: spec-driven с Planning Mode.
|
||||
- Критические системы: TDD плюс обширный ревью.
|
||||
- Исследования и изучение: Claude Desktop как настоящий pair programmer.
|
||||
|
||||
Твой подход может быть другим. Если ты делаешь что-то по-другому — другие инструменты, другие подходы, другие комбинации — поделись своими победами в комментариях. Что работает для тебя как инженера?
|
||||
|
|
@ -1,250 +0,0 @@
|
|||
# What Is Vibe Coding in 2026? One Year From Karpathy's Tweet
|
||||
|
||||
What is vibe coding in 2026? Exactly one year ago — February 2, 2025 — Andrej Karpathy posted the tweet that started it all. The term became Collins Dictionary's Word of the Year. But here's the thing: what Karpathy meant and what "vibe coding" (or "vibecoding" as some write it) means now are two different things.
|
||||
|
||||
{% embed https://x.com/karpathy/status/1886192184808149383 %}
|
||||
|
||||
When Karpathy first used the term, he meant something specific. You tell the agent what to do and evaluate the result. The key "vibe" is that you don't dig into HOW the code is written. Something doesn't work? Just write the next prompt, and so on. Now "vibe coding" often means any AI-assisted development at all. Though honestly — in many cases, that's exactly how it works.
|
||||
|
||||
We're already seeing derivatives — vibe design, vibe ops, vibe anything. But professional developers need more than vibes. That's where approaches like spec-driven development come in — and that's what this series is about.
|
||||
|
||||

|
||||
|
||||
But be honest: when you accept the agent's changes without looking, you catch yourself thinking — is this actually done right, can I trust what the LLM generated without reviewing it? When a colleague says they vibe-coded some functionality — do you picture thoughtful architecture or more like "somehow works"? Is vibe coding cheating and irresponsibility, or a perfectly professional approach?
|
||||
|
||||
What I know for certain — AI development is here, whatever we call it. According to Stack Overflow 2024, 76% of developers use or plan to use AI tools. About a third of senior developers — those with 10+ years of experience — generate more than half their code with AI.
|
||||
|
||||

|
||||
|
||||
Let's figure out what exactly we can do with AI. Different approaches exist, giving more control at different stages of work. Choosing the right one and applying it consciously — that's the professional approach. In this article, I'll cover existing AI development methodologies that I've used in practice, with my honest commentary.
|
||||
|
||||
---
|
||||
|
||||
## Vibe Coding: The Entry Point
|
||||
|
||||

|
||||
|
||||
**What it is:**
|
||||
- Popularized by Andrej Karpathy (February 2025)
|
||||
- Iterative prompting until code works
|
||||
- No upfront planning, minimal specification
|
||||
- Trust AI to handle details, fix issues as they appear
|
||||
|
||||
Vibe coding is a great approach. Really. I use it often myself. It works perfectly for non-critical features, dev tools, prototypes, experiments. [правда я все же использую клавиатуру для этого]
|
||||
|
||||
When do I use it?
|
||||
- When the result is easy to evaluate visually
|
||||
- When scope is obviously localized to one or a few files
|
||||
|
||||
Do I look at the diff?
|
||||
- Honestly, almost always. But I don't check every line — I quickly assess which files changed, what was added or removed. This lets me catch moments when the AI "went off track" fast.
|
||||
|
||||
Does it produce bad code? Maybe, but there are simple ways to improve quality:
|
||||
- describe code style in CLAUDE.md (or AGENTS.md)
|
||||
- describe the architecture of the relevant part
|
||||
- provide examples of existing similar features as templates
|
||||
- ask the agent to run typecheck, linter, and prettier when done
|
||||
|
||||
On the other hand, there are pitfalls. 27% of companies have banned AI tools at least temporarily over privacy and security concerns. Apple restricted ChatGPT and Copilot. Amazon banned ChatGPT after discovering responses resembling internal data. Samsung had an employee leak confidential information through ChatGPT. Be careful with security. Don't use vibe coding on critical infrastructure. Especially where you can't easily roll back changes.
|
||||
|
||||

|
||||
|
||||
You might ask — is it even legitimate to use vibe coding at work? Absolutely! First, you save significant energy on simple things. Your brain resources are limited — delegate simple tasks and routine to AI. It'll do it faster, and you can spend your focus on more important stuff. Second, techniques exist beyond vibe coding that significantly improve development quality and reliability.
|
||||
|
||||
So what are these methods?
|
||||
|
||||
---
|
||||
|
||||
## Spec-Driven Development: Structure First
|
||||
|
||||

|
||||
|
||||
**Credentials:**
|
||||
- Formalized by GitHub Engineering Team ([GitHub Spec Kit](https://github.com/github/spec-kit), September 2025)
|
||||
- Featured in [Thoughtworks Technology Radar](https://www.thoughtworks.com/en-us/radar) Volume 33 (November 2025)
|
||||
- Professional tools: [AWS Kiro](https://aws.amazon.com/startups/prompt-library/kiro-project-init) (public preview July 2025), [Tessl Framework](https://tessl.io/blog/tessl-launches-spec-driven-framework-and-registry/) (closed beta September 2025)
|
||||
- Community solutions: [BMAD Method](https://recruit.group.gmo/engineer/jisedai/blog/the-bmad-method-a-framework-for-spec-oriented-ai-driven-development/) (21 specialized agents), [OpenSpec](https://mcpmarket.com/server/openspec) (lightweight CLI)
|
||||
- Used by: Claude Code users, enterprise teams, GitHub Copilot Workspace
|
||||
|
||||
**How it works:**
|
||||
|
||||
Write detailed specification BEFORE code. Spec includes requirements, architecture, API contracts, error handling, edge cases. AI executes against the spec. The spec becomes living documentation — often saved as `CLAUDE.md` or `.spec` files in project root.
|
||||
|
||||
Human focuses on WHAT. AI handles HOW.
|
||||
|
||||
This is actually my main approach for large projects. Especially when adding a new section or functionality that didn't exist before. Time spent writing spec is often significant. But it gives good control — modern models follow instructions pretty well. You can vary the degree of freedom for the agent: you can specify file and folder names yourself, or just give an outline of the solution.
|
||||
|
||||
After spending half a day on specification, you watch Claude Code finish implementation in 10 minutes. Feels unfair, but the results are solid.
|
||||
|
||||
The spec becomes reference for future work. Months later, a new session starts with "read the spec, find the code" — and the agent has full context immediately.
|
||||
|
||||
**Long-term challenges:**
|
||||
|
||||
To continue development later, you need to keep documentation current. Specs often start drifting from real code even during initial implementation. Details change, paths get renamed during refactoring. Keeping the spec up to date adds cognitive load. My solution: commit spec changes alongside code changes. Treat documentation as part of the codebase. Instruct the AI agent to always update the document after completing any task.
|
||||
|
||||
**Pro tip:**
|
||||
|
||||
Use Claude Desktop for spec development: give it Filesystem MCP for code access, enable web search for current documentation. Brainstorm the solution together with AI, define architecture — and only then ask it to write the spec.
|
||||
|
||||
---
|
||||
|
||||
## Agentic Coding: High Autonomy
|
||||
|
||||

|
||||
|
||||
**Credentials:**
|
||||
- Academic research: [arXiv 2508.11126](https://arxiv.org/abs/2508.11126) "AI Agentic Programming: A Survey" (UC San Diego, Carnegie Mellon, August 2025), [arXiv 2512.14012](https://arxiv.org/abs/2512.14012) "Professional Software Developers Don't Vibe, They Control" (University of Michigan, December 2025)
|
||||
- Ralph Loop created by Geoffrey Huntley (public launch May 2025, [viral wave January 2026](https://venturebeat.com/technology/how-ralph-wiggum-went-from-the-simpsons-to-the-biggest-name-in-ai-right-now))
|
||||
- Tools: Claude Code, [Cursor 2.0 Composer](https://cursor.com/blog/2-0) (October 2025, up to 8 parallel agents), [GitHub Copilot Agent Mode](https://github.blog/ai-and-ml/github-copilot/agent-mode-101-all-about-github-copilots-powerful-mode/) (preview February 2025)
|
||||
- Official [ralph-wiggum plugin](https://ralph-wiggum.ai) from Anthropic (Boris Cherny)
|
||||
|
||||
**What it is:**
|
||||
|
||||
Agent operates with high autonomy. Human sets high-level goals, agent figures out implementation. Agent can plan, execute, debug, iterate without constant approval.
|
||||
|
||||
Different from vibe coding: agentic coding is systematic. Agent creates a plan, executes it methodically, can course-correct. Vibe coding is reactive prompting without structure.
|
||||
|
||||
My take? Skeptical so far.
|
||||
|
||||
I'd like to believe in this approach. The idea of extended autonomous sessions sounds amazing. But here's my question: what tasks justify that much autonomous work?
|
||||
|
||||
Writing a detailed spec takes me longer than executing it. If Claude Code finishes in 10 minutes after I've spent hours on specification, why would I need 14 hours of autonomy?
|
||||
|
||||
I'm skeptical about applications in my projects. Maybe it works for certain domains — large refactors, extensive testing, documentation generation across huge codebases? But even then, I can't imagine Claude Code not handling it in an hour.
|
||||
|
||||
**The Ralph Loop extreme:**
|
||||
|
||||
Named after Ralph Wiggum from The Simpsons. The concept: give the agent a task, walk away, return to finished work. Geoffrey Huntley reported 14-hour autonomous sessions.
|
||||
|
||||
If you've found great applications for Ralph Loop, I'm genuinely curious. Share your wins in the comments.
|
||||
|
||||
**The permissions reality:**
|
||||
|
||||
Agentic coding hits a wall in practice: permissions. Claude Code asks approval for every file write, API call, terminal command. Completely breaks flow. Kills the autonomy promise.
|
||||
|
||||
My workarounds: I ask Claude to add all MCP tools to `.claude/settings.json` proactively — that reduces interruptions. Sometimes I run with `--dangerously-skip-permissions`, but keep an eye on what's happening.
|
||||
|
||||
Try to set up your environment so the agent can't do anything that git reset couldn't fix. This is clearly a problem waiting for a solution. We need better ways to control coding agent actions.
|
||||
|
||||
---
|
||||
|
||||
## AI Pair Programming: Working Together
|
||||
|
||||

|
||||
|
||||
**Credentials:**
|
||||
- GitHub official positioning: "Your AI pair programmer" (Copilot marketing since 2021)
|
||||
- Microsoft Learn documentation
|
||||
- Tools: GitHub Copilot, Cursor, Windsurf
|
||||
- 720 monthly searches for "ai pair programming"
|
||||
|
||||
**The promise:**
|
||||
|
||||
AI as collaborative partner, not just autocomplete. Continuous suggestions while coding. Context-aware completions. Real-time feedback and alternatives. More than tab-completion — understanding project context.
|
||||
|
||||
**My honest experience:**
|
||||
|
||||
I've tried AI autocomplete multiple times. Each time, I ended up disabling it completely.
|
||||
|
||||
Why? When I'm writing code, I've already mentally worked out what I want. AI suggesting my next line just interrupts my thought process. Standard IDE completions always worked fine for me.
|
||||
|
||||
I know many developers love it. Just doesn't fit my workflow.
|
||||
|
||||
**Where I find real pair programming:**
|
||||
|
||||
Claude Desktop with good system instructions plus Filesystem MCP to read actual project files. That's when I feel like I'm working WITH someone who understands my problem and actually helps solve it.
|
||||
|
||||
Autocomplete is reactive. Real pair programming is proactive — discussion, exploration, questioning assumptions.
|
||||
|
||||
**The productivity numbers:**
|
||||
|
||||
GitHub claims 56% faster task completion with AI assistants. Their study shows Copilot users complete 126% more projects per week. Sounds great.
|
||||
|
||||
But here's counter-evidence: METR study found experienced open-source developers took 19% LONGER to complete tasks when using AI tools. Completely contradicts the marketing.
|
||||
|
||||
The truth probably depends on context. AI effectiveness varies wildly by task type, developer skill with AI tools, and workflow fit. Not universally faster, not universally slower.
|
||||
|
||||
---
|
||||
|
||||
## Human-in-the-Loop: Strategic Checkpoints
|
||||
|
||||
[IMAGE: comic. Robots talking to each other: "It's time to get rid of this flesh bag." Another robot confirms: "Definitely, without him we'd work 1024 times faster." Final panel — large circle of robots passing boxes to each other, and among them one human]
|
||||
|
||||
**Credentials:**
|
||||
- Atlassian Research: HULA framework (Human-Understanding Large Language Model Agents)
|
||||
- Formalized in ICSE 2025 paper (arXiv 2411.12924)
|
||||
- Google Cloud AI documentation
|
||||
- Implemented in: Claude Code Planning Mode
|
||||
|
||||
**What it is:**
|
||||
|
||||
AI operates autonomously BETWEEN checkpoints. Human approves key decisions, reviews output at strategic moments. Not constant supervision — strategic oversight.
|
||||
|
||||
Agent proposes approach, human confirms direction. Then agent executes freely until next checkpoint.
|
||||
|
||||
**Permissions ≠ HITL:**
|
||||
|
||||
Don't confuse permissions with Human-in-the-Loop. Permissions are too low-level. "Can I write this file?" tells me nothing about what task the agent is actually solving.
|
||||
|
||||
Real HITL is Planning Mode. Agent shows the plan: "here's what I'll do, these files will change, here's the expected outcome." That's decision-level control.
|
||||
|
||||
The problem with current agents: they don't understand WHEN to stop and ask. Rarely hit the right moment. Either too much autonomy (goes off track) or too many interruptions (breaks flow).
|
||||
|
||||
Future improvement area: agents that know when they're uncertain and should consult the human. Like "I don't know" responses — current models aren't great at this in practice.
|
||||
|
||||
**When to use:**
|
||||
|
||||
Production code with moderate complexity. When outcome matters but speed also matters. Team environments where others will review anyway. Learning new approaches where you want to see the agent's reasoning.
|
||||
|
||||
Medium stakes: not prototype territory (vibe coding works there), not critical infrastructure (TDD territory).
|
||||
|
||||
---
|
||||
|
||||
## TDD + AI: Quality First
|
||||
|
||||
**Credentials:**
|
||||
- Adapted from traditional TDD (Kent Beck)
|
||||
- Modernized for AI era: Qodo.ai blog, Builder.io guide, GitHub Blog (May 2025)
|
||||
- Quality-focused teams, enterprise production code
|
||||
|
||||
**How it works:**
|
||||
|
||||
Write tests BEFORE implementation (classic TDD). AI generates code to pass tests. Tests become executable specification.
|
||||
|
||||
Red → Green → Refactor cycle, but AI handles implementation. Tests catch AI mistakes automatically. Tests provide verification without human review of every line.
|
||||
|
||||

|
||||
|
||||
**Tests as specification:**
|
||||
|
||||
Tests are absolutely critical for key functionality. I always instruct agents to run tests.
|
||||
|
||||
But here's the thing: writing comprehensive tests upfront plus detailed spec — that's already 80% of the work. If you've written that much structure, is AI really saving time?
|
||||
|
||||
Most valuable when you have existing spec that naturally converts to tests — like API documentation. Then yes, tests-first makes perfect sense.
|
||||
|
||||
**The guardrails approach:**
|
||||
|
||||
Tests become safety boundaries for the agent. Agent can iterate freely within test constraints. No need to review every implementation detail. Just verify: tests pass, coverage maintained.
|
||||
|
||||
Especially valuable for agentic coding. Let the AI experiment, tests catch the mistakes.
|
||||
|
||||
**Critical warning:**
|
||||
|
||||
AI-written tests need human review. I've seen agents write "passing" tests using mocked requests — test passes, code is broken. The test verified syntax, not behavior.
|
||||
|
||||
Correct tests = solid foundation. Bad tests = false confidence that destroys future work.
|
||||
|
||||
Review test logic before trusting it. Make sure tests verify actual behavior, not just that code runs.
|
||||
|
||||
---
|
||||
|
||||
## Conclusion
|
||||
|
||||
What I typically use:
|
||||
- Dev tools and experiments: vibe coding works fine.
|
||||
- Production features: spec-driven with Planning Mode.
|
||||
- Critical systems: TDD plus extensive review.
|
||||
- Research and exploration: Claude Desktop as true pair programmer.
|
||||
|
||||
Your approach might be different. If you do things differently — different tools, different approaches, different combinations — share your wins in the comments. What works for you as an engineer?
|
||||
|
|
@ -1,240 +0,0 @@
|
|||
# Validation Results
|
||||
|
||||
**Validated by:** @validator
|
||||
**Date:** 2026-01-23
|
||||
**Verdict:** REVISE
|
||||
|
||||
---
|
||||
|
||||
## Claims Verified
|
||||
|
||||
### Claim 1: "32-33% of senior developers generate over half their code with AI vs 13% of junior developers"
|
||||
|
||||
**Verdict:** ✅ VERIFIED (with minor discrepancy)
|
||||
|
||||
**Evidence found:**
|
||||
- **Primary source:** Fastly Study 2025 — "The State of AI Code Generation 2025"
|
||||
- **Published:** July 2025
|
||||
- **Methodology:** Survey of 791 developers
|
||||
- **URL:** https://www.fastly.com/blog/senior-developers-ship-more-ai-code
|
||||
- **Exact quote:** "About a third of senior developers (10+ years of experience) say over half their shipped code is AI-generated — nearly two and a half times the rate reported by junior developers (0–2 years of experience), at 13%"
|
||||
- **Secondary confirmation:** InfoWorld, Slashdot, TechSpot, The New Stack, Medium articles
|
||||
|
||||
**Discrepancy:** Outline uses "33%", source says "32%" or "about a third". This is minor rounding.
|
||||
|
||||
**User decision:** Note the discrepancy but not critical.
|
||||
|
||||
**Confidence:** High
|
||||
|
||||
---
|
||||
|
||||
### Claim 2: "76% of developers are using or planning to use AI tools"
|
||||
|
||||
**Verdict:** ✅ VERIFIED
|
||||
|
||||
**Evidence found:**
|
||||
- **Primary source:** Stack Overflow Developer Survey 2024
|
||||
- **Published:** 2024
|
||||
- **URL:** https://survey.stackoverflow.co/2024/ai, https://stackoverflow.blog/2025/01/01/developers-want-more-more-more-the-2024-results-from-stack-overflow-s-annual-developer-survey/
|
||||
- **Exact quote:** "76% of all respondents are using or are planning to use AI tools in their development process this year, an increase from last year (70%)"
|
||||
- **Additional context:**
|
||||
- 62% currently using (vs 44% in 2023)
|
||||
- Favorability dropped from 77% to 72%
|
||||
- 2025 update: increased to 84% using/planning to use
|
||||
|
||||
**Confidence:** High
|
||||
|
||||
---
|
||||
|
||||
### Claim 3: "45-62% of AI-generated code contains security vulnerabilities"
|
||||
|
||||
**Verdict:** ✅ VERIFIED
|
||||
|
||||
**Evidence found:**
|
||||
|
||||
**Georgetown CSET findings:**
|
||||
- **Report:** "Cybersecurity Risks of AI-Generated Code" (November 2024)
|
||||
- **URL:** https://cset.georgetown.edu/publication/cybersecurity-risks-of-ai-generated-code/
|
||||
- **Finding:** "Almost half of the code snippets produced by these [5 LLMs] contained vulnerabilities"
|
||||
- **Methodology:** ESBMC verification tool, 67 prompts across 5 models
|
||||
- **Detail:** Only 19% of Code Llama snippets passed verification
|
||||
|
||||
**Veracode findings:**
|
||||
- **Report:** "AI-Generated Code: A Double-Edged Sword for Developers" (September 2025)
|
||||
- **URL:** https://www.veracode.com/blog/ai-generated-code-security-risks/
|
||||
- **Finding:** "45% of AI-generated code contains security flaws"
|
||||
- **Methodology:** 100+ LLMs, 80 coding tasks, 4 languages, 4 vulnerability types
|
||||
- **Detail:** Only 55% of AI-generated code was secure
|
||||
|
||||
**Third-party mention:**
|
||||
- Medium article cites "62% of AI-generated code contains known vulnerabilities" (October 2025)
|
||||
|
||||
**User decision:** Use format "по разным источникам [1], [2], [3]" with real source citations.
|
||||
|
||||
**Recommended citation format:**
|
||||
"По данным разных исследований, от 45% до 62% AI-сгенерированного кода содержит уязвимости безопасности [1][2][3]"
|
||||
|
||||
**Sources to cite:**
|
||||
- [1] Georgetown CSET: "Cybersecurity Risks of AI-Generated Code" (Nov 2024)
|
||||
- [2] Veracode: "AI-Generated Code: A Double-Edged Sword" (Sept 2025)
|
||||
- [3] Industry reports (Oct 2025)
|
||||
|
||||
**Confidence:** High
|
||||
|
||||
---
|
||||
|
||||
### Claim 4: "90% of Fortune 100 companies adopted GitHub Copilot"
|
||||
|
||||
**Verdict:** ❌ REMOVE
|
||||
|
||||
**Evidence found:**
|
||||
- **GitHub customer stories page:** States "90% Fortune 100" at https://github.com/customer-stories
|
||||
- **Multiple third-party sources:** Repeat this claim (Second Talent, various tech blogs)
|
||||
- **BUT:** No official GitHub blog post or press release found with this specific statistic
|
||||
- **GitHub blog mentions:** "more than 90% of Fortune 100 companies" use **GitHub** (the platform), not specifically **Copilot**
|
||||
- **Distinction unclear:** GitHub platform vs GitHub Copilot product
|
||||
|
||||
**User decision:** REMOVE this claim entirely.
|
||||
|
||||
**Confidence:** N/A (removing)
|
||||
|
||||
---
|
||||
|
||||
### Claim 5: "27% of organizations banned AI tools"
|
||||
|
||||
**Verdict:** ✅ VERIFIED
|
||||
|
||||
**Evidence found:**
|
||||
- **Primary source:** Cisco 2024 Data Privacy Benchmark Study
|
||||
- **Released:** January 25, 2024
|
||||
- **URL:** https://investor.cisco.com/news/news-details/2024/More-than-1-in-4-Organizations-Banned-Use-of-GenAI-Over-Privacy-and-Data-Security-Risks---New-Cisco-Study/
|
||||
- **Methodology:** 2,600 security and privacy professionals across 12 countries
|
||||
- **Exact finding:** "27% said their organization had banned GenAI applications altogether for the time being" (at least temporarily)
|
||||
- **Additional context:**
|
||||
- 63% established limitations on what data can be entered
|
||||
- 61% have limits on which GenAI tools can be used
|
||||
- 48% admitted entering non-public company information into GenAI tools
|
||||
- Survey conducted summer 2023, published January 2024
|
||||
|
||||
**Confidence:** High
|
||||
|
||||
---
|
||||
|
||||
### Claim 6: "Spec-Driven Development saw 359x growth in 2025"
|
||||
|
||||
**Verdict:** ❌ REMOVE
|
||||
|
||||
**Evidence against:**
|
||||
- **No evidence found:** Zero mentions of "359x growth" in any source
|
||||
- **What was found:**
|
||||
- Spec-Driven Development confirmed as "emerging practice" in 2025
|
||||
- Thoughtworks: "remains an emerging practice as 2025 draws to a close"
|
||||
- SoftwareSeni, InfoQ, Medium articles discuss it as "one of 2025's key new AI-assisted engineering practices"
|
||||
- Tools mentioned: AWS Kiro, GitHub spec-kit, Tessl Framework
|
||||
- **No quantitative growth metrics found**
|
||||
|
||||
**Source claimed:** "Brief mentions this"
|
||||
- Could not find publication/newsletter called "Brief" with this statistic
|
||||
- May be internal Banatie document or misattribution
|
||||
|
||||
**User decision:** REMOVE this claim entirely (not critical to article).
|
||||
|
||||
**Confidence:** High (confident the stat is false)
|
||||
|
||||
---
|
||||
|
||||
### Claim 7: "Ralph Loop went viral in Jan 2026"
|
||||
|
||||
**Verdict:** ✅ VERIFIED
|
||||
|
||||
**Evidence found:**
|
||||
|
||||
**Timeline:**
|
||||
- **Created:** Geoffrey Huntley, mid-2025 (around June 2025)
|
||||
- **Official plugin:** Anthropic released official Claude Code plugin in December 2025
|
||||
- **Went viral:** "final weeks of 2025" and January 2026
|
||||
|
||||
**Sources:**
|
||||
- **HumanLayer Blog:** "The Ralph Wiggum Technique, created by Geoff Huntley, went viral in the final weeks of 2025"
|
||||
- **DEV Community (Jan 2026):** "We're barely a week into 2026, and tech Twitter is already ablaze with discussion of the 'Ralph Wiggum Loop'"
|
||||
- **Geoffrey Huntley tweets:** January 17, 2026 posts about Ralph Loop
|
||||
- **Security Boulevard (Jan 16, 2026):** Article about Ralph Wiggum
|
||||
- **Multiple Medium articles:** January 2026 coverage (ikangai.com Jan 20, 2026; multiple others Jan 2026)
|
||||
- **Consensus:** Technique became viral late December 2025 / early January 2026
|
||||
|
||||
**Confidence:** High
|
||||
|
||||
---
|
||||
|
||||
## Summary
|
||||
|
||||
| # | Claim | Verdict | Action |
|
||||
|---|-------|---------|--------|
|
||||
| 1 | 32-33% seniors vs 13% juniors | ✅ VERIFIED | Note discrepancy, not critical |
|
||||
| 2 | 76% using/planning AI tools | ✅ VERIFIED | Use as-is |
|
||||
| 3 | 45-62% security vulnerabilities | ✅ VERIFIED | Use with source citations [1][2][3] |
|
||||
| 4 | 90% Fortune 100 adopted Copilot | ❌ REMOVE | Delete entirely |
|
||||
| 5 | 27% orgs banned AI tools | ✅ VERIFIED | Use as-is |
|
||||
| 6 | Spec-Driven 359x growth | ❌ REMOVE | Delete entirely |
|
||||
| 7 | Ralph Loop viral Jan 2026 | ✅ VERIFIED | Use as-is |
|
||||
|
||||
---
|
||||
|
||||
## Overall Verdict: REVISE
|
||||
|
||||
**Required Changes:**
|
||||
|
||||
### Must Remove:
|
||||
1. **Claim 4 (GitHub Copilot 90%)** — insufficient verification, user preference
|
||||
2. **Claim 6 (359x growth)** — no evidence, not critical to article
|
||||
|
||||
### Must Update:
|
||||
3. **Claim 3 (security vulnerabilities)** — use citation format:
|
||||
- "По данным разных исследований, от 45% до 62% AI-сгенерированного кода содержит уязвимости безопасности [1][2][3]"
|
||||
- **Sources:**
|
||||
- [1] Georgetown CSET (Nov 2024): https://cset.georgetown.edu/publication/cybersecurity-risks-of-ai-generated-code/
|
||||
- [2] Veracode (Sept 2025): https://www.veracode.com/blog/ai-generated-code-security-risks/
|
||||
- [3] Industry reports (Oct 2025)
|
||||
|
||||
### Optional Note:
|
||||
4. **Claim 1 (33% → 32%)** — Source says 32% or "about a third", not 33%. Minor discrepancy, not critical.
|
||||
|
||||
### Use As-Is:
|
||||
- **Claim 2 (76% adoption)** — verified, no changes needed
|
||||
- **Claim 5 (27% bans)** — verified, no changes needed
|
||||
- **Claim 7 (Ralph Loop viral)** — verified, no changes needed
|
||||
|
||||
---
|
||||
|
||||
## Recommendations for @architect
|
||||
|
||||
**Update outline.md:**
|
||||
|
||||
1. **Remove Claim 4** from Introduction and Conclusion sections:
|
||||
- Delete reference to "90% of Fortune 100 companies adopted GitHub Copilot"
|
||||
- Keep enterprise adoption theme, but without specific stat
|
||||
|
||||
2. **Remove Claim 6** from Spec-Driven Development credentials:
|
||||
- Delete "359x growth in 2025"
|
||||
- Replace with qualitative description:
|
||||
- "Emerged as one of 2025's key AI-assisted engineering practices (Thoughtworks)"
|
||||
- "Multiple professional tools launched: AWS Kiro, GitHub Spec Kit, Tessl Framework"
|
||||
|
||||
3. **Update Claim 3** in Vibe Coding section:
|
||||
- Current: "45-62% of AI-generated code contains security vulnerabilities"
|
||||
- Change to: "По данным разных исследований, от 45% до 62% AI-сгенерированного кода содержит уязвимости безопасности [1][2][3]"
|
||||
- Add footnotes with Georgetown CSET, Veracode, industry reports
|
||||
|
||||
4. **Optional: Update Claim 1**
|
||||
- Current: "33% of senior developers"
|
||||
- Consider: "About a third (32%) of senior developers" or "32% of senior developers"
|
||||
- Not critical, user marked as minor
|
||||
|
||||
**After these changes:** Proceed to @writer
|
||||
|
||||
---
|
||||
|
||||
*Validation completed: 2026-01-23*
|
||||
*Total claims checked: 7*
|
||||
*Verification time: ~2 hours*
|
||||
*Tools used: Brave Search, Web Search*
|
||||
|
|
@ -1,85 +0,0 @@
|
|||
# Homepage Screenshots
|
||||
|
||||
## Midjourney
|
||||
https://www.midjourney.com
|
||||
assets/midjourney-alternatives-bn-blog/images/homepages/midjourney-homepage.png
|
||||
|
||||
## Leonardo AI
|
||||
https://leonardo.ai
|
||||
assets/midjourney-alternatives-bn-blog/images/homepages/leonardo-ai-homepage.png
|
||||
|
||||
## Adobe Firefly
|
||||
https://firefly.adobe.com
|
||||
assets/midjourney-alternatives-bn-blog/images/homepages/adobe-firefly-homepage.png
|
||||
|
||||
## ChatGPT / GPT-4o
|
||||
https://chatgpt.com
|
||||
assets/midjourney-alternatives-bn-blog/images/homepages/chatgpt-homepage.png
|
||||
|
||||
## Ideogram
|
||||
https://ideogram.ai
|
||||
assets/midjourney-alternatives-bn-blog/images/homepages/ideogram-homepage.png
|
||||
|
||||
## Google Gemini
|
||||
https://gemini.google.com
|
||||
assets/midjourney-alternatives-bn-blog/images/homepages/google-gemini-homepage.png
|
||||
|
||||
## Recraft AI
|
||||
https://www.recraft.ai
|
||||
assets/midjourney-alternatives-bn-blog/images/homepages/recraft-ai-homepage.png
|
||||
|
||||
## Reve AI
|
||||
https://reve.ai
|
||||
assets/midjourney-alternatives-bn-blog/images/homepages/reve-ai-homepage.png
|
||||
|
||||
## FLUX (Black Forest Labs)
|
||||
https://blackforestlabs.ai
|
||||
assets/midjourney-alternatives-bn-blog/images/homepages/black-forest-labs-homepage.png
|
||||
|
||||
## Stable Diffusion (Stability AI)
|
||||
https://stability.ai
|
||||
assets/midjourney-alternatives-bn-blog/images/homepages/stability-ai-homepage.png
|
||||
|
||||
## Civitai
|
||||
https://civitai.com
|
||||
assets/midjourney-alternatives-bn-blog/images/homepages/civitai-homepage.png
|
||||
|
||||
## Replicate
|
||||
https://replicate.com
|
||||
assets/midjourney-alternatives-bn-blog/images/homepages/replicate-homepage.png
|
||||
|
||||
## fal.ai
|
||||
https://fal.ai
|
||||
assets/midjourney-alternatives-bn-blog/images/homepages/fal-ai-homepage.png
|
||||
|
||||
## Runware
|
||||
https://runware.ai
|
||||
assets/midjourney-alternatives-bn-blog/images/homepages/runware-homepage.png
|
||||
|
||||
## Segmind
|
||||
https://segmind.com
|
||||
assets/midjourney-alternatives-bn-blog/images/homepages/segmind-homepage.png
|
||||
|
||||
## Novita AI
|
||||
https://novita.ai
|
||||
assets/midjourney-alternatives-bn-blog/images/homepages/novita-ai-homepage.png
|
||||
|
||||
## Together AI
|
||||
https://together.ai
|
||||
assets/midjourney-alternatives-bn-blog/images/homepages/together-ai-homepage.png
|
||||
|
||||
## Banatie
|
||||
https://banatie.app
|
||||
assets/midjourney-alternatives-bn-blog/images/homepages/banatie-homepage.png
|
||||
|
||||
## Poe (Quora)
|
||||
https://poe.com
|
||||
assets/midjourney-alternatives-bn-blog/images/homepages/poe-homepage.png
|
||||
|
||||
## Krea.ai
|
||||
https://krea.ai
|
||||
assets/midjourney-alternatives-bn-blog/images/homepages/krea-ai-homepage.png
|
||||
|
||||
## Freepik AI
|
||||
https://freepik.com/ai
|
||||
assets/midjourney-alternatives-bn-blog/images/homepages/freepik-ai-homepage.png
|
||||
|
Before Width: | Height: | Size: 843 KiB |
|
Before Width: | Height: | Size: 1.1 MiB |
|
Before Width: | Height: | Size: 1.7 MiB |
|
Before Width: | Height: | Size: 79 KiB |
|
Before Width: | Height: | Size: 1.1 MiB |
|
Before Width: | Height: | Size: 408 KiB |
|
Before Width: | Height: | Size: 721 KiB |
|
Before Width: | Height: | Size: 66 KiB |
|
Before Width: | Height: | Size: 1.4 MiB |
|
Before Width: | Height: | Size: 641 KiB |
|
Before Width: | Height: | Size: 1.9 MiB |
|
Before Width: | Height: | Size: 1.7 MiB |
|
Before Width: | Height: | Size: 177 KiB |
|
Before Width: | Height: | Size: 226 KiB |
|
Before Width: | Height: | Size: 762 KiB |
|
Before Width: | Height: | Size: 1.3 MiB |
|
Before Width: | Height: | Size: 610 KiB |
|
Before Width: | Height: | Size: 687 KiB |
|
Before Width: | Height: | Size: 501 KiB |
|
Before Width: | Height: | Size: 2.3 MiB |
|
Before Width: | Height: | Size: 551 KiB |
|
|
@ -1,84 +0,0 @@
|
|||
# Activity Log
|
||||
|
||||
## 2026-01-12 @strategist
|
||||
|
||||
**Action:** Created brief and initiated article
|
||||
|
||||
**Changes:**
|
||||
- Created `brief.md` with full structure and requirements
|
||||
- Created main article file with metadata
|
||||
|
||||
**Notes:**
|
||||
- Consolidated from three inbox ideas
|
||||
- Keyword research completed via DataForSEO ($0.35 spent)
|
||||
- Ultra-low KD (3), solid volume (1,300) — quick win opportunity
|
||||
|
||||
**For next agent:** @architect creates outline
|
||||
|
||||
---
|
||||
|
||||
## 2026-01-12 @architect
|
||||
|
||||
**Action:** Research phase + outline creation
|
||||
|
||||
**Changes:**
|
||||
- Created `research-complete.md` — comprehensive research on 19 services
|
||||
- Created `outline.md` — article structure with word budgets
|
||||
|
||||
**Notes:**
|
||||
- Research covered 4 categories: API-First (6), UI-First (8), Open Source (2), Aggregators (3)
|
||||
- Services researched via Perplexity with primary source verification
|
||||
- Pricing data current as of January 2026
|
||||
- Validation Request included — 5 claims for optional verification (funding rounds, user counts)
|
||||
- Most claims already verified during research, validation can be skipped if time-constrained
|
||||
|
||||
**For next agent:**
|
||||
- Option A: @validator for 5 low-priority claims (optional)
|
||||
- Option B: @writer can proceed directly — research is comprehensive
|
||||
|
||||
---
|
||||
|
||||
## 2026-01-13 @image-gen (via Claude Code)
|
||||
|
||||
**Action:** Homepage screenshots collected
|
||||
|
||||
**Changes:**
|
||||
- Created `homepages.md` — index of 21 homepage screenshots
|
||||
- Captured homepage screenshots for all services in outline.md
|
||||
- Saved to `images/homepages/` folder
|
||||
|
||||
**Services screenshotted:**
|
||||
- UI-First: Midjourney, Leonardo AI, Adobe Firefly, ChatGPT, Ideogram, Google Gemini, Recraft AI, Reve AI (8)
|
||||
- Open Source: FLUX, Stable Diffusion, Civitai (3)
|
||||
- API-First: Replicate, fal.ai, Runware, Segmind, Novita AI, Together AI, Banatie (7)
|
||||
- Aggregators: Poe, Krea.ai, Freepik AI (3)
|
||||
|
||||
**Notes:**
|
||||
- All screenshots viewport-only (top portion of homepage)
|
||||
- Cookie banners handled/dismissed before capture
|
||||
- Ideogram and Reve AI required manual screenshots (Cloudflare protection, parked domain)
|
||||
- All screenshots clean and ready for article use
|
||||
|
||||
**For next agent:** @writer — visual assets ready for article
|
||||
|
||||
---
|
||||
|
||||
## 2026-01-13 @writer
|
||||
|
||||
**Action:** Created full draft from outline
|
||||
|
||||
**Changes:**
|
||||
- text.md: created — full article body (~2,750 words)
|
||||
|
||||
**Notes:**
|
||||
- Followed banatie-brand.md style guide (technical, direct, no fluff)
|
||||
- Badge system implemented throughout for quick scanning
|
||||
- 19 services covered across 4 categories
|
||||
- Banatie positioned in API-First section with differentiation table
|
||||
- FAQ answers 5 People Also Ask questions for SEO
|
||||
- All pricing marked "as of January 2026"
|
||||
- No TODO markers needed — comparison format doesn't require personal experience
|
||||
|
||||
**For next agent:** @editor for review
|
||||
|
||||
---
|
||||
|
|
@ -1,476 +0,0 @@
|
|||
# Outline: Midjourney Alternatives
|
||||
|
||||
## Article Structure
|
||||
|
||||
**Type:** Comparison / Listicle hybrid
|
||||
**Total target:** 2,800 words
|
||||
**Reading time:** 12-14 min
|
||||
**Services covered:** 19 (Runway removed, Reve added)
|
||||
|
||||
---
|
||||
|
||||
## Badge System
|
||||
|
||||
**Available badges:**
|
||||
- `Free tier` — free access available (not just trial)
|
||||
- `API` — programmatic access
|
||||
- `Video` — video generation
|
||||
- `Text` — strong text rendering in images
|
||||
- `Vector` — native SVG/vector output
|
||||
- `Commercial safe` — trained on licensed content, IP indemnification, content credentials
|
||||
- `Chatbot interface` — conversational/chat-based interaction
|
||||
|
||||
**Editing features (list individually where applicable):**
|
||||
- `Inpaint` — edit specific areas
|
||||
- `Outpaint` — extend image boundaries
|
||||
- `Canvas` — freeform editing workspace
|
||||
- `Live editing` — real-time generation while drawing
|
||||
- `Object selection` — select and modify objects
|
||||
- `Zoom out` — extend composition outward
|
||||
- `Upscaling` — enhance resolution
|
||||
|
||||
**Image reference features (list individually where applicable):**
|
||||
- `Style ref` — match aesthetic/style from reference image
|
||||
- `Pose ref` — match character pose from reference
|
||||
- `Character ref` — maintain character identity across generations
|
||||
- `Content ref` — match composition/layout from reference
|
||||
- `Depth ref` — match 3D depth information
|
||||
|
||||
---
|
||||
|
||||
## "Commercial Safe" Definition
|
||||
|
||||
**For the article, explain briefly:**
|
||||
"Commercial safe" means the AI is trained on licensed/public domain content (not scraped from the web), provides IP indemnification against copyright claims, and includes content credentials (metadata showing AI origin). Key examples: Adobe Firefly (Content Credentials, trained on Adobe Stock), Getty Images AI ($50k indemnification per image).
|
||||
|
||||
---
|
||||
|
||||
## Introduction (100 words)
|
||||
|
||||
**Goal:** Set context, acknowledge Midjourney's dominance, promise comprehensive alternatives.
|
||||
|
||||
- Hook: Midjourney defined AI art but has limitations (no API, Discord-first history, no free tier)
|
||||
- 2026 landscape: dozens of alternatives for different needs
|
||||
- What this guide covers: UI-first, open source, API-first, aggregators
|
||||
- Badge system explanation (quick reference)
|
||||
|
||||
**NO:** Long history of AI image generation, "in today's digital landscape..."
|
||||
|
||||
---
|
||||
|
||||
## Section 1: UI-First Platforms (850 words)
|
||||
|
||||
**Goal:** Cover services with native web/app interfaces. Best for non-developers who want easy access.
|
||||
|
||||
**Section intro (50 words):** These services have their own interfaces. No coding required. Best for quick generation and iteration.
|
||||
|
||||
### 1.1 Midjourney — The Baseline (100 words)
|
||||
- **Users:** 21M Discord members, 1.2-2.5M daily active, ~1.4M paying subscribers
|
||||
- **Market share:** 26.8% (leading platform)
|
||||
- **Pricing:** $10/mo (Basic, 3.3 GPU hrs) → $120/mo (Mega, 60 GPU hrs)
|
||||
- **Cost per image:** ~$0.03-0.05 in Fast mode
|
||||
- **Key features:**
|
||||
- V7 model with video generation (5-21 sec clips)
|
||||
- --sref (style reference) with versions --sv 1-6
|
||||
- --cref (character reference) with --cw weight 0-100
|
||||
- Omni-reference system for consistency
|
||||
- Web app + Discord interface
|
||||
- **Best for:** Artistic quality, community, consistent aesthetic
|
||||
- **Badges:** `Style ref` `Character ref` `Video` `Upscaling`
|
||||
|
||||
### 1.2 Leonardo AI (100 words)
|
||||
- **Users:** 18M+ creators, ~1.2M monthly active
|
||||
- **Free tier:** 150 tokens/day (resets daily)
|
||||
- **Paid:** $12-60/mo (Artisan has unlimited Relax mode)
|
||||
- **API:** $299/mo
|
||||
- **Key features:**
|
||||
- Image Guidance suite: Style Reference, Content Reference, Character Reference, Pose, Depth, Edge
|
||||
- Real-time Canvas with inpaint/outpaint
|
||||
- Motion 2.0 for video
|
||||
- Elements (style LoRAs with adjustable strength)
|
||||
- Phoenix model for quality
|
||||
- **Best for:** Game assets, concept art, professional control, character consistency
|
||||
- **Badges:** `Free tier` `API` `Video` `Style ref` `Pose ref` `Character ref` `Content ref` `Depth ref` `Inpaint` `Outpaint` `Canvas` `Upscaling`
|
||||
|
||||
### 1.3 Adobe Firefly (100 words)
|
||||
- **Free tier:** Limited via web app
|
||||
- **Paid:** Creative Cloud subscription, IP indemnification on qualifying plans
|
||||
- **Key features:**
|
||||
- Firefly 5 model (4MP native resolution)
|
||||
- Partner models: FLUX.2, Gemini, GPT
|
||||
- Content Credentials on all images (C2PA standard)
|
||||
- Trained only on Adobe Stock, public domain, licensed content
|
||||
- Photoshop, Illustrator, Creative Cloud integration
|
||||
- Style Kits for brand consistency
|
||||
- **Best for:** Commercial projects, Adobe users, brand-safe content
|
||||
- **Badges:** `Free tier` `API` `Commercial safe` `Style ref` `Inpaint` `Upscaling`
|
||||
|
||||
### 1.4 ChatGPT / GPT-4o (100 words)
|
||||
- **Free tier:** Limited access for free users
|
||||
- **Paid:** ChatGPT Plus $20/mo
|
||||
- **Key features:**
|
||||
- GPT-4o native multimodal generation
|
||||
- Best-in-class text rendering
|
||||
- Anatomical accuracy (hands, faces)
|
||||
- Conversational editing ("make the sky bluer")
|
||||
- ~1 min per image generation time
|
||||
- **Best for:** Conversational editing, text in images, iterative refinement
|
||||
- **Badges:** `Free tier` `Text` `Chatbot interface` `Inpaint`
|
||||
|
||||
### 1.5 Ideogram (80 words)
|
||||
- **Free tier:** Yes, credit-based
|
||||
- **Paid:** Credit packs
|
||||
- **Cost per image:** 0.25-1 credit
|
||||
- **Key features:**
|
||||
- Ideogram 3.0 model
|
||||
- **Best-in-class text rendering** (~90% accuracy vs Midjourney's 30%)
|
||||
- Founded specifically to solve typography in AI images
|
||||
- Magic Fill and Extend editing
|
||||
- Multiple style modes (Realistic, Design, 3D, Anime)
|
||||
- **Best for:** Logos, branding, text-heavy designs, marketing materials
|
||||
- **Badges:** `Free tier` `Text` `Inpaint`
|
||||
|
||||
### 1.6 Google Gemini / Imagen (120 words)
|
||||
- **Models:**
|
||||
- **Gemini 2.5 Flash Image** (codename: "Nano Banana") — speed-optimized
|
||||
- **Gemini 3 Pro Image** (codename: "Nano Banana Pro") — quality-optimized
|
||||
- **Imagen 3/4** — enterprise via Vertex AI
|
||||
- **Free tier:** Gemini app (with watermark), AI Studio free prototyping (2.5 Flash)
|
||||
- **Paid:** Nano Banana Pro requires payment in AI Studio; API ~$0.03/image
|
||||
- **Key features:**
|
||||
- Character and style consistency across edits
|
||||
- Multi-image fusion (blend multiple photos)
|
||||
- Search-grounded generation (Nano Banana Pro)
|
||||
- Natural language precision edits
|
||||
- Strong text rendering (especially Nano Banana Pro)
|
||||
- **Best for:** Google ecosystem, conversational editing, multi-image workflows
|
||||
- **Badges:** `Free tier` `API` `Text` `Chatbot interface` `Character ref` `Style ref`
|
||||
|
||||
### 1.7 Recraft AI (100 words)
|
||||
- **Users:** 4M+
|
||||
- **Free tier:** 50 generations/day
|
||||
- **Paid:** $10-48/mo
|
||||
- **Key features:**
|
||||
- **Native SVG vector output** — one of only two AI tools with true vector generation (with Adobe Firefly)
|
||||
- V3 model with strong prompt adherence
|
||||
- Pattern generation, product mockups
|
||||
- Brand consistency tools
|
||||
- Accurate text rendering
|
||||
- AI Eraser, Inpainting, Outpainting, Mockuper
|
||||
- **Best for:** Logos, branding, vector graphics, icons, patterns
|
||||
- **Badges:** `Free tier` `API` `Vector` `Text` `Inpaint` `Outpaint` `Upscaling`
|
||||
|
||||
### 1.8 Reve AI (100 words)
|
||||
- **Launched:** March 2025
|
||||
- **Free tier:** 100 credits on signup + 20/day
|
||||
- **Paid:** $5 for 500 images (~$0.01/image)
|
||||
- **Key features:**
|
||||
- 12B parameter hybrid model
|
||||
- #1 quality ranking (ELO 1167 in benchmarks)
|
||||
- **Full commercial rights** on all images, including free tier
|
||||
- Natural language editing
|
||||
- Image remixing (combine multiple images)
|
||||
- Drag-and-drop editor (beta)
|
||||
- Enhanced text rendering
|
||||
- **Best for:** Budget-conscious creators, commercial projects, high-quality output
|
||||
- **Badges:** `Free tier` `Commercial safe` `Text` `Object selection`
|
||||
|
||||
---
|
||||
|
||||
## Section 2: Open Source / Self-Hosted (400 words)
|
||||
|
||||
**Goal:** Cover options for developers who want control, privacy, or cost savings at scale.
|
||||
|
||||
**Section intro (50 words):** Run models on your hardware. Higher setup cost, lower per-image cost at scale. Full control over the pipeline.
|
||||
|
||||
### 2.1 FLUX (Black Forest Labs) (150 words)
|
||||
- **Models:**
|
||||
- Schnell — speed optimized
|
||||
- Dev — balanced (community favorite)
|
||||
- Pro — commercial license
|
||||
- Kontext — editing/context-aware
|
||||
- **Self-hosting requirements:**
|
||||
- Full: 16-24GB VRAM
|
||||
- Quantized (GGUF): 6-8GB VRAM, 4GB possible with Q2
|
||||
- RAM: 16GB min, 32GB recommended
|
||||
- **Key features:**
|
||||
- ComfyUI as primary interface
|
||||
- ControlNet: Flux Tools (Canny, Depth), XLabs collections
|
||||
- LoRA training: FluxGym, Replicate trainer, fal.ai
|
||||
- Top-tier prompt understanding
|
||||
- **Best for:** Self-hosting, maximum control, cost optimization at scale
|
||||
- **Badges:** `API` (via providers) `Style ref` `Pose ref` `Depth ref` `Inpaint`
|
||||
|
||||
### 2.2 Stable Diffusion 3.5 (100 words)
|
||||
- **License:** Community License (permissive, open source)
|
||||
- **Models:**
|
||||
- Large (8.1B params)
|
||||
- Turbo (4-step fast generation)
|
||||
- Medium (9.9GB VRAM requirement)
|
||||
- **Hosted options:** DreamStudio (official), Stability AI API, many third-party UIs
|
||||
- **Key features:**
|
||||
- Superior prompt adherence
|
||||
- Diverse styles
|
||||
- Huge ecosystem of fine-tunes, LoRAs, ControlNets
|
||||
- Foundation for many other tools
|
||||
- **Best for:** Local deployment, customization, building custom pipelines
|
||||
- **Badges:** `API` (via providers) `Style ref` `Pose ref` `Depth ref` `Inpaint`
|
||||
|
||||
### 2.3 Civitai (150 words)
|
||||
- **Type:** Model marketplace + web generator
|
||||
- **Free tier:** Yes, Buzz credits
|
||||
- **Key features:**
|
||||
- Thousands of checkpoints: SD families, FLUX, video models
|
||||
- On-site generation: txt2img, img2img, ControlNet
|
||||
- LoRA trainer built-in
|
||||
- Community: Bounties, Creator Program monetization
|
||||
- Per-model licensing, Usage Control mode
|
||||
- **Note:** 2025 changes include stricter moderation, some payment disruptions
|
||||
- **Best for:** Model discovery, community fine-tunes, niche styles
|
||||
- **Badges:** `Free tier` `Inpaint`
|
||||
|
||||
---
|
||||
|
||||
## Section 3: API-First Platforms (900 words)
|
||||
|
||||
**Goal:** Cover services designed for developers. Programmatic access, SDKs, infrastructure focus.
|
||||
|
||||
**Section intro (80 words):**
|
||||
Midjourney has no official API. These platforms fill the gap for developers who need programmatic image generation.
|
||||
|
||||
Key considerations:
|
||||
- Pricing model (per-image vs GPU-time)
|
||||
- SDK support (Python, TypeScript, etc.)
|
||||
- Model selection
|
||||
- Latency and reliability
|
||||
|
||||
### 3.1 Replicate (120 words)
|
||||
- **Models:** 100+ official (FLUX, SDXL, GPT-Image-1), thousands community
|
||||
- **Pricing:** Pay-per-output, varies by model
|
||||
- Cheap models: ~$0.003/image
|
||||
- Premium models (like Imagen): $0.03+/image
|
||||
- **SDK:** Python, JavaScript
|
||||
- **Key features:**
|
||||
- Official Models program with quality guarantees
|
||||
- Cog tool for custom model deployment
|
||||
- Zero-scale economics (pay only when used)
|
||||
- Acquired by Cloudflare (2025) — infrastructure play
|
||||
- **Gotcha:** Stripe payment issues for some regions
|
||||
- **Best for:** Model variety, serverless deployment, zero-scale economics
|
||||
- **Badges:** `API`
|
||||
|
||||
### 3.2 fal.ai (120 words)
|
||||
- **Users:** 2M+ developers
|
||||
- **Models:** 600+ including FLUX.2, day-zero access to new models
|
||||
- **Pricing:** $0.03-0.04/image (Seedream, Kontext), GPU hourly available
|
||||
- **SDK:** TypeScript (@fal-ai/client), Python, Swift
|
||||
- **Key features:**
|
||||
- Claims 4x faster than competitors
|
||||
- Sub-second for Schnell
|
||||
- Funding: $140M Series D (Dec 2025), $4.5B valuation
|
||||
- **Best for:** Speed, TypeScript developers, latest models first
|
||||
- **Badges:** `API`
|
||||
|
||||
### 3.3 Runware (120 words)
|
||||
- **Models:** 400,000+ via unified API (SD, FLUX, Imagen)
|
||||
- **Pricing:** **Cheapest in market**
|
||||
- $0.0006/image (FLUX Schnell) = 1,666 images per $1
|
||||
- $10 free credits (~1,000+ images)
|
||||
- **SDK:** REST API, WebSocket
|
||||
- **Key features:**
|
||||
- Sonic Inference Engine (proprietary)
|
||||
- Sub-second inference
|
||||
- 0.1s LoRA cold starts
|
||||
- 90% lower cost claim vs competitors
|
||||
- **Best for:** Cost optimization, high volume production
|
||||
- **Badges:** `API`
|
||||
|
||||
### 3.4 Segmind (100 words)
|
||||
- **Models:** 500+ including FLUX, Seedream, Ideogram, GPT-Image
|
||||
- **Pricing:** Per-second billing, ~$0.002/s on A100
|
||||
- **Free tier:** $5 free credits
|
||||
- **SDK:** JavaScript, Python, Swift
|
||||
- **Key features:**
|
||||
- PixelFlow workflow builder
|
||||
- Workflow-to-API publishing
|
||||
- Fine-tuning support
|
||||
- **Best for:** Complex workflows, custom pipelines
|
||||
- **Badges:** `Free tier` `API`
|
||||
|
||||
### 3.5 Novita AI (100 words)
|
||||
- **Models:** 10,000+ image models
|
||||
- **Pricing:** $0.0015/image baseline
|
||||
- **SDK:** Python
|
||||
- **Key features:**
|
||||
- Serverless GPU
|
||||
- Hugging Face integration
|
||||
- Startup Program ($10k credits)
|
||||
- **Best for:** Budget projects, startups
|
||||
- **Badges:** `API`
|
||||
|
||||
### 3.6 Together AI (100 words)
|
||||
- **Models:** 40+ (FLUX.2, SD3, Imagen, SeeDream)
|
||||
- **Free tier:** 3 months free FLUX.1 Schnell
|
||||
- **SDK:** OpenAI-compatible (Python, JS)
|
||||
- **Key features:**
|
||||
- Unified platform (text + image + video)
|
||||
- Familiar API format for OpenAI users
|
||||
- **Best for:** OpenAI SDK users, unified AI platform
|
||||
- **Badges:** `Free tier` `API`
|
||||
|
||||
### 3.7 Banatie (150 words)
|
||||
|
||||
**Developer-native image generation for AI coding workflows.**
|
||||
|
||||
Built for developers who use Claude Code, Cursor, and similar tools. The problem: generating images means leaving your IDE, using external tools, downloading files, organizing them manually.
|
||||
|
||||
**Integration methods:**
|
||||
- MCP Server — direct Claude Code / Cursor integration
|
||||
- REST API — standard HTTP
|
||||
- Prompt URLs — generate via URL parameters
|
||||
- SDK/CLI — automation tools
|
||||
|
||||
**Key features:**
|
||||
- Prompt enhancement (AI improves prompts)
|
||||
- Built-in CDN (global delivery)
|
||||
- @name references (consistency across project)
|
||||
- Project organization (automatic)
|
||||
|
||||
**Differentiators vs alternatives:**
|
||||
- MCP integration (unique)
|
||||
- Built-in CDN (unique)
|
||||
- Prompt URLs for on-demand generation (unique)
|
||||
- Focus on developer workflow, not just API
|
||||
|
||||
**Best for:** Developers using AI coding tools who want images without context-switching.
|
||||
|
||||
**Badges:** `API`
|
||||
|
||||
---
|
||||
|
||||
## Section 4: Aggregators (350 words)
|
||||
|
||||
**Goal:** Cover platforms that provide access to multiple models through one interface/subscription.
|
||||
|
||||
**Section intro (50 words):** One subscription, multiple models. Compare outputs side-by-side. Good for exploration and finding the right model for your use case.
|
||||
|
||||
### 4.1 Poe (Quora) (120 words)
|
||||
- **Models:** 100+ including FLUX-pro, GPT-Image, Imagen 3/4, DALL-E 3, Gemini
|
||||
- **Free tier:** 3,000 pts/day (resets daily, doesn't roll over)
|
||||
- **Paid:** $4.99-249.99/mo
|
||||
- **API:** Released July 2025, OpenAI-compatible
|
||||
- **Key features:**
|
||||
- Multi-model comparison in one interface
|
||||
- Custom bot creation
|
||||
- App Creator
|
||||
- **Best for:** Model exploration, one subscription for everything
|
||||
- **Badges:** `Free tier` `API` `Chatbot interface`
|
||||
|
||||
### 4.2 Krea.ai (120 words)
|
||||
- **Models:** Flux, Veo 3, Kling, Runway, 20+ total
|
||||
- **Free tier:** Yes
|
||||
- **Key features:**
|
||||
- **Real-time generation** — <50ms (industry leader)
|
||||
- Real-time canvas: draw and see AI respond instantly
|
||||
- 22K resolution upscaling
|
||||
- In/out-painting
|
||||
- **Best for:** Real-time iteration, concept artists, interactive co-creation
|
||||
- **Badges:** `Free tier` `Live editing` `Canvas` `Inpaint` `Outpaint` `Upscaling`
|
||||
|
||||
### 4.3 Freepik AI (110 words)
|
||||
- **Models:** Mystic (proprietary), Flux, Ideogram
|
||||
- **Key features:**
|
||||
- Mystic: Fine-tuned on Flux/SD/Magnific, 2K default resolution
|
||||
- Strong text rendering (outperforms Midjourney, DALL-E)
|
||||
- All-in-one: stock assets + generation + editing
|
||||
- AI Video (Veo), Sketch-to-Image, Custom Characters
|
||||
- **Best for:** All-in-one creative workflow, marketing materials, text in images
|
||||
- **Badges:** `Text` `Inpaint` `Upscaling`
|
||||
|
||||
---
|
||||
|
||||
## Section 5: FAQ (250 words)
|
||||
|
||||
**Goal:** Answer People Also Ask questions for SEO. Direct answers, no padding.
|
||||
|
||||
### Is there an AI better than Midjourney? (50 words)
|
||||
Depends on use case. For text rendering: Ideogram, Recraft, GPT-4o. For API access: fal.ai, Replicate, Banatie. For free tier: Leonardo AI, Gemini, Reve. For commercial safety: Adobe Firefly. For vectors: Recraft. Midjourney excels at artistic quality but lacks API and has no free tier.
|
||||
|
||||
### What is similar to Midjourney but free? (50 words)
|
||||
Leonardo AI (150 tokens/day), Gemini (unlimited in app with watermark), Reve (100 credits + 20/day), Ideogram (free tier), Poe (3,000 points/day). For unlimited free: self-host FLUX with ComfyUI (requires GPU).
|
||||
|
||||
### Which AI image generator has no restrictions? (50 words)
|
||||
Most services have content policies. Self-hosted options (FLUX, Stable Diffusion via Civitai) offer most freedom. Civitai has community models with varied restrictions. Note: "no restrictions" often means NSFW content — check individual model licenses.
|
||||
|
||||
### Is Midjourney better than Stable Diffusion? (50 words)
|
||||
Midjourney: easier to use, consistent artistic style, no setup required. Stable Diffusion: free, customizable, self-hostable, huge model ecosystem. For developers: SD/FLUX via API gives more control. For artists: Midjourney's quality-per-prompt is hard to beat.
|
||||
|
||||
### Does Midjourney have an API? (50 words)
|
||||
**No official API.** Third-party wrappers exist but violate ToS and risk account bans. For programmatic image generation, use: Replicate, fal.ai, Runware, Together AI, or Banatie. These provide similar quality models (FLUX) with proper API access.
|
||||
|
||||
---
|
||||
|
||||
## Conclusion (50 words)
|
||||
|
||||
**Goal:** Wrap up, no "best" declaration, direct to relevant option.
|
||||
|
||||
- No single best alternative — depends on needs
|
||||
- Quick decision guide:
|
||||
- UI → Leonardo, Reve, or Firefly
|
||||
- API → fal.ai, Runware, or Banatie
|
||||
- Self-host → FLUX
|
||||
- Explore → Poe or Krea
|
||||
- Link to Banatie for developer workflow
|
||||
|
||||
---
|
||||
|
||||
## Visual Assets Needed
|
||||
|
||||
| Type | Description | Section |
|
||||
|------|-------------|---------|
|
||||
| Screenshots | Each service homepage or generation UI | All services |
|
||||
| Badge icons | Feature badges visual system | Throughout |
|
||||
| Diagram | Decision flowchart (optional) | Conclusion |
|
||||
|
||||
## SEO Notes
|
||||
|
||||
- H2 for section titles: UI-First, Open Source, API-First, Aggregators, FAQ
|
||||
- H3 for individual services: Midjourney, Leonardo AI, etc.
|
||||
- FAQ answers PAA directly for featured snippet potential
|
||||
- "midjourney api" addressed in intro, FAQ, and API-First section
|
||||
- Internal link to Banatie docs from Banatie section
|
||||
|
||||
---
|
||||
|
||||
## Validation Request
|
||||
|
||||
**Status:** Low priority — most claims verified during research
|
||||
|
||||
### Claims to Verify (Optional)
|
||||
|
||||
1. "Ideogram achieves ~90% text accuracy vs Midjourney's 30%"
|
||||
- **Section:** 1.5 Ideogram
|
||||
- **Type:** statistical / benchmark
|
||||
- **Source found:** pxz.ai review, wavespeed.ai
|
||||
- **Priority:** Low (already validated in research)
|
||||
|
||||
2. "Reve Image 1.0 ranked #1 with ELO 1167"
|
||||
- **Section:** 1.8 Reve AI
|
||||
- **Type:** benchmark
|
||||
- **Source found:** Artificial Analysis
|
||||
- **Priority:** Low (already validated)
|
||||
|
||||
3. "fal.ai raised $140M Series D at $4.5B valuation (Dec 2025)"
|
||||
- **Section:** 3.2 fal.ai
|
||||
- **Type:** factual / financial
|
||||
- **Priority:** Medium
|
||||
|
||||
4. "Midjourney has 21M Discord users, 26.8% market share"
|
||||
- **Section:** 1.1 Midjourney
|
||||
- **Type:** statistical
|
||||
- **Source found:** Multiple (demandsage, quantumrun, etc.)
|
||||
- **Priority:** Low (well-documented)
|
||||
|
||||
### Recommended Approach
|
||||
|
||||
Most claims verified via Perplexity research. Financial claims (funding rounds) are nice-to-have but not critical for a comparison guide. Add "as of January 2026" disclaimer for all pricing.
|
||||
|
|
@ -1,63 +0,0 @@
|
|||
# Pricing Collection Prompt for Claude Browser Extension
|
||||
|
||||
**Purpose:** Collect current pricing data for all AI image generation services in the comparison article.
|
||||
|
||||
**Date created:** 2026-01-12
|
||||
|
||||
---
|
||||
|
||||
## Task: Collect pricing data for AI image generation services
|
||||
|
||||
Visit each URL below and extract current pricing information:
|
||||
|
||||
### Services to check:
|
||||
|
||||
**UI-First:**
|
||||
1. midjourney.com/account/billing (or find pricing page)
|
||||
2. leonardo.ai/pricing
|
||||
3. firefly.adobe.com (find pricing)
|
||||
4. openai.com/chatgpt/pricing (ChatGPT Plus for image generation)
|
||||
5. ideogram.ai/pricing
|
||||
6. gemini.google.com (check if image generation requires subscription)
|
||||
7. recraft.ai/pricing
|
||||
8. runwayml.com/pricing
|
||||
|
||||
**Open Source (hosting costs):**
|
||||
9. Check typical cloud GPU costs for running Stable Diffusion / Flux
|
||||
|
||||
**API-First:**
|
||||
10. replicate.com/pricing
|
||||
11. fal.ai/pricing
|
||||
12. runware.ai/pricing
|
||||
13. segmind.com/pricing
|
||||
14. novita.ai/pricing
|
||||
|
||||
**Aggregators:**
|
||||
15. krea.ai/pricing
|
||||
16. freepik.com/ai (find pricing for AI tools)
|
||||
17. together.ai/pricing
|
||||
|
||||
### For each service, collect:
|
||||
- Free tier (if exists): limits, features
|
||||
- Paid tiers: name, price/month, key limits (credits, images, fast hours)
|
||||
- Pay-as-you-go rate (if applicable): $ per image or $ per credit
|
||||
|
||||
### Output format:
|
||||
|
||||
```
|
||||
**[Service Name]**
|
||||
- Free: [yes/no] — [limits if yes]
|
||||
- Plans:
|
||||
- [Plan name]: $X/mo — [key limits]
|
||||
- [Plan name]: $X/mo — [key limits]
|
||||
- Pay-per-use: $X per image (if available)
|
||||
- Last checked: [date]
|
||||
```
|
||||
|
||||
Note: Some sites may require scrolling or clicking "Pricing" link. If pricing is not publicly available, note that.
|
||||
|
||||
---
|
||||
|
||||
## After collection
|
||||
|
||||
Save results to: `assets/midjourney-alternatives-bn-blog/pricing-data.md`
|
||||
|
|
@ -1,577 +0,0 @@
|
|||
# Complete Research: Midjourney Alternatives for Developers
|
||||
|
||||
**Research completed:** January 12, 2026
|
||||
**Scope:** 19 AI image generation services across 4 categories
|
||||
|
||||
---
|
||||
|
||||
## CATEGORY 1: API-FIRST PLATFORMS
|
||||
|
||||
### 1. Replicate
|
||||
|
||||
**Models:** FLUX (Pro, Dev, 1.1), SDXL, Google Nano-Banana, ByteDance Seedream-4/4.5, Ideogram V3-Turbo, Stable Diffusion variants, OpenAI GPT-Image-1, 100+ official models. Community models available.
|
||||
|
||||
**Pricing:** Pay-as-you-go, billed by output. SDXL: ~$0.012/prediction. SDXL Lightning: ~$0.42/run (~238 images/$1). Typical image: ~$0.003 (30 images/$1). Hardware: CPU $0.000100/sec to 8x H100 $0.012200/sec. Failed runs not charged.
|
||||
|
||||
**SDK:** Python (extensive), JavaScript/Node.js. No CLI or MCP documented.
|
||||
|
||||
**Image Delivery:** URLs returned in API response. No details on permanence/CDN.
|
||||
|
||||
**Features:** Fine-tuning (LoRA training), bring-your-own-key for OpenAI, upscaling, background removal, image restoration. Reference images via FLUX Kontext Pro. Inpainting, seed control.
|
||||
|
||||
**Batch:** GPT-Image-1 supports num_images parameter. "Thousands of images per second" capability claimed.
|
||||
|
||||
**Gotchas:** Payment gateway issues for international users (Stripe). 204 outages tracked since April 2024. **Acquired by Cloudflare in 2025.**
|
||||
|
||||
**Unique:** Official Models program (stable, predictably priced). Cog tool for custom model deployment. Zero-scale economics.
|
||||
|
||||
---
|
||||
|
||||
### 2. fal.ai
|
||||
|
||||
**Models:** 600+ generative media models. FLUX.2, FLUX.1 (schnell), SDXL (fast-sdxl), GPT-Image 1.5, Recraft V3, Stable Diffusion variants.
|
||||
|
||||
**Pricing:** Serverless (per-output) or Compute (hourly GPU).
|
||||
- GPU: H100 $1.89/h, H200 $2.10/h, A100 $0.99/h
|
||||
- Seedream V4: $0.03/image (33 images/$1)
|
||||
- Flux Kontext Pro: $0.04/image (25 images/$1)
|
||||
- FLUX.2 Pro: $0.03/MP (first MP), $0.015/MP (additional)
|
||||
- Free tier available
|
||||
|
||||
**SDK:** JavaScript/TypeScript (@fal-ai/client), Python, Swift. No CLI/MCP documented.
|
||||
|
||||
**Image Delivery:** URLs returned (WebP format on v3.fal.media CDN). sync_mode for data URIs.
|
||||
|
||||
**Features:** Image-to-image, mask-based inpainting, upscaling (clarity upscaler), format control, aspect ratio, reference images with strength parameter.
|
||||
|
||||
**Speed:** Claims "4x faster" than standard approaches. No specific benchmarks.
|
||||
|
||||
**Unique:** FLUX.2 [dev] Turbo (10x cheaper, 6x more efficient). Day-zero model access. $140M Series D (Dec 2025), $4.5B valuation. 2M+ developers. Backed by Sequoia, NVIDIA, a16z.
|
||||
|
||||
---
|
||||
|
||||
### 3. Runware
|
||||
|
||||
**Models:** Thousands of models via unified API. Stable Diffusion, FLUX, Google Imagen 3.0/4.0 Ultra, Gemini Flash Image 2.5. 400,000+ models supported.
|
||||
|
||||
**Pricing:** Pay-as-you-go per image (not GPU time). Range: $0.0006-$0.24/image.
|
||||
- FLUX Schnell: $0.0006/image (1,666 images/$1, 0.6s)
|
||||
- FLUX Dev: $0.0038/image (263 images/$1, 2s)
|
||||
- SD 1.5: $0.0006/image (1,666 images/$1, 0.8s)
|
||||
- SDXL: $0.0026/image (384 images/$1)
|
||||
- $10 free credits for new users (~1,000 free images)
|
||||
|
||||
**SDK:** REST API and WebSocket. No specific language SDKs documented.
|
||||
|
||||
**Image Delivery:** Async API returns taskUUID + URL.
|
||||
|
||||
**Features:** Text-to-image (sub-second latency), image-to-image, style transfer, captioning, background removal, upscaling, inpainting, outpainting, ControlNet, PhotoMaker, LayerDiffuse (alpha channels), up to 6 LoRAs simultaneously.
|
||||
|
||||
**Speed:** Claims 20x faster than traditional cloud GPUs. 0.1s LoRA cold starts. Sub-second inference.
|
||||
|
||||
**Gotchas:** Limited online discussion/user reviews. "Hasn't generated significant buzz."
|
||||
|
||||
**Unique:** Sonic Inference Engine® (proprietary hardware). GPUs at ~100% utilization. Claims up to 90% lower cost. Renewable energy powered. SOC 2 compliant. $50M Series A (Dec 2025).
|
||||
|
||||
---
|
||||
|
||||
### 4. Segmind
|
||||
|
||||
**Models:** 500+ image and video models. FLUX.1 (multiple versions), Seedream 3.0/4.0, Ideogram 3.0, GPT-Image 1/Mini, Imagen 3.
|
||||
|
||||
**Pricing:** Per-second billing. GPU: A100 $0.002/s, H100 $0.0043/s, L40S $0.0015/s. Flux-Pro fine-tuning: $3-9 based on steps. $5 free credits for new users.
|
||||
|
||||
**SDK:** JavaScript/TypeScript, Python, Swift SDKs available. No CLI/MCP documented.
|
||||
|
||||
**Image Delivery:** URLs returned.
|
||||
|
||||
**Features:** Multimodal editing, image inpainting, img2img, upscaling, batch output (up to 15 images/prompt), reference images (up to 3).
|
||||
|
||||
**Speed:** FLUX.1 Schnell: ~1.8s for 2K resolution. Consistent 3-5 second generation times.
|
||||
|
||||
**Unique:** PixelFlow (custom multi-step workflow builder), VoltaML infrastructure, workflow-to-API publishing, fine-tuning for brand consistency.
|
||||
|
||||
---
|
||||
|
||||
### 5. Novita AI
|
||||
|
||||
**Models:** 200+ pre-integrated APIs with 10,000+ image models. Stable Diffusion SDXL 1.0, Qwen-Image-Edit.
|
||||
|
||||
**Pricing:** Freemium. Pay-as-you-go primary. **$0.0015 per standard image** baseline. Startup Program: up to $10,000 credits. $0.50 starting credits for new users.
|
||||
|
||||
**SDK:** Python SDK (`pip install novita-sdk`). JavaScript not confirmed.
|
||||
|
||||
**Image Delivery:** Via API response. Storage details not specified.
|
||||
|
||||
**Features:** Text-to-image, img2img, image refinement, background elimination, inpainting, upscaling & super-resolution.
|
||||
|
||||
**Unique:** Serverless GPU infrastructure, custom model upload, rapid open-source model integration, Hugging Face integration. Dual-service: Model Inference API + GPU Cloud.
|
||||
|
||||
---
|
||||
|
||||
### 6. Together AI
|
||||
|
||||
**Models:** 40+ image and video models. FLUX.2 (dev, pro, flex), Stable Diffusion 3 Medium, HiDream-I1-Full, Google Imagen, Nano Banana, ByteDance SeeDream.
|
||||
|
||||
**Pricing:** 3-month unlimited free access for FLUX.1 [schnell]. Per-model pricing (not detailed in sources).
|
||||
|
||||
**SDK:** OpenAI-compatible SDKs in Python and JavaScript.
|
||||
|
||||
**Image Delivery:** URLs returned via `response.data.url`.
|
||||
|
||||
**Features:** Image-to-image, multi-reference consistency (FLUX.2 supports 4-reference inputs), brand compliance controls (hex code color matching), reliable text rendering.
|
||||
|
||||
**Batch:** "n" parameter for up to 4 images per request.
|
||||
|
||||
**Unique:** Unified platform (text, image, video in single API), OpenAI-compatible endpoints, production-grade infrastructure.
|
||||
|
||||
---
|
||||
|
||||
## CATEGORY 2: UI-FIRST PLATFORMS
|
||||
|
||||
### 7. Leonardo AI
|
||||
|
||||
**Models:** Leonardo Phoenix, GPT Image 1.5, Lucid Origin. Hosted models include WAN, SVD.
|
||||
|
||||
**Pricing:**
|
||||
- Free: 150 tokens/day (5-8 tokens per image), watermarked images
|
||||
- Apprentice: $12/mo ($10 annual) - 8,500 tokens/month
|
||||
- Artisan: $30/mo ($24 annual) - 25,000 tokens/month
|
||||
- Maestro: $60/mo ($48 annual) - higher limits
|
||||
- API: Separate credits, Pro plan $299/mo for 200,000 credits
|
||||
|
||||
**Features:** Text-to-image, img2img, style morphing, real-time inpainting/outpainting, AI upscaling, real-time canvas, Flow State (no-prompt generation), batch generation, video generation (Motion 2.0).
|
||||
|
||||
**API:** Available for developers. Credit-based.
|
||||
|
||||
**Unique:** "Relaxed Generation" mode for unlimited generations (slower, hosted models only). Custom model training. 18M+ creators.
|
||||
|
||||
**Comparison to Midjourney:** Free tier available (MJ has none). More customization/control options. Leonardo: 5 tiers starting $12; MJ: 4 tiers starting $10.
|
||||
|
||||
---
|
||||
|
||||
### 8. Adobe Firefly
|
||||
|
||||
**Models:**
|
||||
- Firefly Image Model 5 (public beta) - native 4MP, photorealistic, portraits, complex compositions
|
||||
- Firefly Image Model 4 and 4 Ultra - up to 2K
|
||||
- Firefly Video Model - up to 1080p
|
||||
- Partner models: FLUX.1 Kontext, FLUX.2, Google Gemini 2.5 Flash Image, Imagen 3, OpenAI GPT, Runway, ElevenLabs, Topaz Labs, Luma AI, Veo3
|
||||
|
||||
**Pricing:** Free tier available through web app. Creative Cloud integration. "Unlimited generations" mentioned in Dec 2025 update.
|
||||
|
||||
**API:** Firefly Services APIs: Text-to-Image (GA), Avatar (GA), Text-to-Video (beta).
|
||||
|
||||
**Features:** Style references, Prompt to Edit (conversational editing), camera motion reference, video transitions, layered image editing (in dev), generative text edit.
|
||||
|
||||
**Commercial Use:** All Adobe Firefly models marketed as "commercially safe." Content credentials attached to all generated images.
|
||||
|
||||
**Integration:** Photoshop (Generative Fill with multiple models), Generative Upscale (Topaz), Adobe Express.
|
||||
|
||||
**Unique:** Multi-model platform with choice across providers. All-in-one AI creative platform. Partner model integration.
|
||||
|
||||
---
|
||||
|
||||
### 9. Ideogram
|
||||
|
||||
**Models:**
|
||||
- Ideogram 3.0 (March 2025) - highest visual fidelity, best text rendering
|
||||
- Ideogram 2.0 (Aug 2024) - enhanced realism, multiple styles
|
||||
- Ideogram 2a - fastest, speed-optimized
|
||||
|
||||
**Pricing:** Credit-based. Free to start.
|
||||
- 3.0: 4 credits/generation (4 images) = 1 credit/image
|
||||
- 2.0: 2 credits/generation (4 images) = 0.5 credits/image
|
||||
- 2a: 1 credit/generation (4 images) = 0.25 credits/image
|
||||
|
||||
**API:** Not documented in sources.
|
||||
|
||||
**Features:** Superior text rendering (biggest strength), auto style feature, multiple artistic styles (Realistic, 3D, Anime, Design), custom aspect ratios, color palette control, magic prompt algorithm.
|
||||
|
||||
**Unique:** Best-in-class text rendering. Professional design focus (logos, branding, infographics). Vector-style graphics, layout elements.
|
||||
|
||||
**Known Issues:** Sometimes incorrect subject counts. May require re-prompting for surreal/abstract art.
|
||||
|
||||
---
|
||||
|
||||
### 10. OpenAI (DALL-E / GPT-4o)
|
||||
|
||||
**Models:**
|
||||
- GPT-4o - default image generator in ChatGPT (native multimodal integration)
|
||||
- DALL-E 3 - separate tool within ChatGPT
|
||||
|
||||
**Pricing:** Available to ChatGPT Plus ($20/mo), Pro, Team, Free users. API rolling out.
|
||||
|
||||
**Features:**
|
||||
- GPT-4o: Sophisticated editing, image-to-image transformation, accurate text rendering (even paragraphs), anatomically correct figures, precise prompt adherence, conversational refinement
|
||||
- Upload images and request edits with contextual understanding
|
||||
|
||||
**Comparison GPT-4o vs DALL-E 3:**
|
||||
- Text rendering: GPT-4o handles complex layouts; DALL-E 3 struggles with longer passages
|
||||
- Anatomical accuracy: GPT-4o consistent; DALL-E 3 has hand/pose errors
|
||||
- Prompt adherence: GPT-4o more precise
|
||||
|
||||
**Limitations:** Generation speed ~1 minute per image (improving over time).
|
||||
|
||||
---
|
||||
|
||||
### 11. Google Gemini / Imagen
|
||||
|
||||
**Models:**
|
||||
- Gemini 2.5 Flash Image (aka "Nano Banana") - text-to-image, conversational editing, multi-image fusion
|
||||
- Imagen 3 - enterprise via Vertex AI, higher quality
|
||||
- Imagen 4 - Google's top offering as of 2025
|
||||
|
||||
**Pricing:**
|
||||
- Gemini App: Free access for consumers
|
||||
- Imagen API: ~$0.03/image (~33 images/$1)
|
||||
- Vertex AI: Enterprise pricing
|
||||
|
||||
**Access Methods:**
|
||||
- Gemini App (Consumer) - free
|
||||
- Gemini API via Google AI Studio (Developer)
|
||||
- Vertex AI (Enterprise) - full governance, SynthID watermarks
|
||||
|
||||
**Features:** Object removal, relighting, background changes, multi-image fusion, character/style consistency, conversational image edits.
|
||||
|
||||
**Quality Issues:** Independent testing: DALL-E 13.5/15, Stable Diffusion 11/15, Gemini 3/15. Generation time 10+ seconds (vs 4-8s competitors). Struggles with complex prompt adherence.
|
||||
|
||||
**Limitations:**
|
||||
- Bias toward photorealism - often refuses edits on human photos
|
||||
- No on-device generation (cloud required)
|
||||
- Model in public preview status
|
||||
- Cannot prevent model from generating text alongside images
|
||||
|
||||
**Commercial:** Enterprise protections via Vertex AI: SynthID verification, tenancy controls, quotas.
|
||||
|
||||
---
|
||||
|
||||
### 12. Recraft AI
|
||||
|
||||
**Models:** Recraft V3 (aka "Red Panda") - proprietary model. Benchmark: ELO 1172 (vs DALL-E 984).
|
||||
|
||||
**Pricing:**
|
||||
| Plan | Cost | Monthly Credits |
|
||||
|------|------|-----------------|
|
||||
| Free | $0 | 50 daily (~1,500/mo) |
|
||||
| Basic | $10/mo | 1,000 |
|
||||
| Advanced | $27/mo | 4,000 |
|
||||
| Pro | $48/mo | 8,400 |
|
||||
|
||||
**Key Differentiator:** Native SVG vector output - direct scalable vector files from prompts. Essential for print, branding, logos.
|
||||
|
||||
**Features:**
|
||||
- Photorealistic + style consistency across assets
|
||||
- Seamless pattern generation (textiles, washi tape)
|
||||
- Background removal/replacement
|
||||
- Image upscaling
|
||||
- Product mockups (t-shirts, mugs, billboards)
|
||||
- Real-time inpainting, color correction
|
||||
- Drag-and-drop editor
|
||||
|
||||
**Speed:** Under 10 seconds. Low-res previews near-instant.
|
||||
|
||||
**API:** Listed as available, but no detailed docs in sources.
|
||||
|
||||
**User Sentiment:** Overwhelmingly positive. G2 rating 4.6. "Best AI generator" quotes. 4M+ users, 700% growth, $30M Series B (May 2025).
|
||||
|
||||
**Limitations:**
|
||||
- No outpainting
|
||||
- No bulk-download/batch export
|
||||
- Blocked in some countries (sanctions)
|
||||
- Limited mobile functionality
|
||||
- Free tier depletes quickly
|
||||
|
||||
**Best For:** Logo/brand design, graphic design, print/pattern design, product mockups, agencies with multiple client brands.
|
||||
|
||||
---
|
||||
|
||||
### 13. Runway
|
||||
|
||||
**Models:**
|
||||
- Gen-3 Alpha: 10 credits/second
|
||||
- Gen-3 Alpha Turbo: 5 credits/second (7x faster, half price, requires input image)
|
||||
- Gen-4 Video: 12 credits/second
|
||||
- Gen-4 Turbo: 5 credits/second
|
||||
- Gen-4.5: Text-to-video (Standard+ plans)
|
||||
|
||||
**Pricing:**
|
||||
| Plan | Cost | Credits/mo | Best For |
|
||||
|------|------|------------|----------|
|
||||
| Free | $0 | 125 (one-time) | Testing |
|
||||
| Standard | $12/mo | 625 | Freelancers |
|
||||
| Pro | $28/mo | 2,250 | Professionals |
|
||||
| Unlimited | $76/mo | 2,250 + unlimited relaxed | High-volume |
|
||||
|
||||
**Image vs Video Costs:**
|
||||
- Gen-4 Image 720p: 5 credits (~$0.05)
|
||||
- Gen-4 Image 1080p: 8 credits
|
||||
- Gen-4 Image Turbo: 2 credits
|
||||
- 5-sec video: 25-60 credits
|
||||
- 20-sec Gen-4 video: 240 credits (Turbo: 100)
|
||||
|
||||
**Resolution:** Free/Standard = 720p-1080p. Pro+ = 4K.
|
||||
|
||||
**Features:** Aleph (video editing), Act-Two (performance capture), upscaling to 4K. Watermark-free on paid plans.
|
||||
|
||||
**API:** Not documented in sources.
|
||||
|
||||
**Best For:** Video-first workflows. Freelancers, agencies, studios.
|
||||
|
||||
---
|
||||
|
||||
### 14. Stability AI (Stable Diffusion 3.5)
|
||||
|
||||
**Models:**
|
||||
- SD 3.5 Large: 8.1B parameters, up to 1MP resolution
|
||||
- SD 3.5 Large Turbo: 4-step distilled version, prioritizes speed
|
||||
- SD 3.5 Medium: 2.5B parameters, 9.9 GB VRAM, consumer hardware
|
||||
|
||||
**Licensing:** Stability AI Community License (permissive).
|
||||
|
||||
**Features:** Superior prompt adherence, diverse outputs without extensive prompting, versatile styles (3D, photography, painting, line art), Query-Key Normalization for stability.
|
||||
|
||||
**DreamStudio:** Status in 2025 not detailed in sources.
|
||||
|
||||
---
|
||||
|
||||
## CATEGORY 3: OPEN SOURCE
|
||||
|
||||
### 15. FLUX (Black Forest Labs)
|
||||
|
||||
**Models:**
|
||||
- FLUX.1 (foundational family)
|
||||
- FLUX.1 Schnell (speed-optimized)
|
||||
- FLUX.1 Dev (balanced)
|
||||
- FLUX.1 Pro (commercial)
|
||||
- FLUX.1 Kontext [dev/pro/max] (May 2025) - image editing + generation
|
||||
- FLUX1.1 Pro, FLUX1.1 Pro Ultra (4MP/2K, Ultra + Raw modes)
|
||||
- FLUX.2
|
||||
|
||||
**Licensing:**
|
||||
- FLUX.1 Kontext [dev]: Open-weight (private beta)
|
||||
- FLUX.1 Pro, Kontext [pro/max]: Proprietary, API only
|
||||
|
||||
**Self-Hosting Requirements:**
|
||||
- Original: 16-24GB VRAM recommended, 8-12GB minimum
|
||||
- GGUF quantized: 6GB minimum, can run on 4-6GB with Q2-Q4
|
||||
- System RAM: 16GB minimum, 32GB recommended
|
||||
- Full unquantized: 20GB+ VRAM
|
||||
|
||||
**ComfyUI Integration:** Full support. GGUF loader custom node. Multiple workflow options.
|
||||
|
||||
**ControlNet:** Flux Tools includes Canny and Depth models. XLabs-AI flux-controlnet-collections. InstantX FLUX.1-dev-Controlnet-Union-alpha.
|
||||
|
||||
**LoRA Support:** Yes. Training tools: FluxGym, Replicate flux-dev-lora-trainer, fal.ai flux-lora-general-training.
|
||||
|
||||
**Quality vs Midjourney:** Top-tier prompt understanding, strong photorealism. "Midjourney still has a slight edge in some photorealism tests."
|
||||
|
||||
**Prompt Style:** Verbose, natural language narrative works best. Forgiving, responds well to experimentation.
|
||||
|
||||
---
|
||||
|
||||
### 16. Civitai
|
||||
|
||||
**What is it:** Model marketplace + integrated web-based generator. Hub for Stable Diffusion and Flux models.
|
||||
|
||||
**Buzz Credits System (2025):**
|
||||
- Resource surcharges for LoRA/LyCORIS/embeddings (increased GPU load)
|
||||
- Vidu video: 600 Buzz/generation
|
||||
- Credit card payments paused; alternative methods introduced
|
||||
|
||||
**Models:** SD families, Flux models, Vidu, Wan 2.1, Hunyuan (video). Tens of thousands of checkpoints supported. On-site LoRA trainer.
|
||||
|
||||
**Features:** txt2img, img2img, ControlNet preprocessors (Canny, Depth, Pose), upscalers, weighted LoRA attachments, video generation (T2V, I2V, R2V).
|
||||
|
||||
**Community:** Model marketplace, content showcase, review system, Bounties marketplace, Creator Program monetization.
|
||||
|
||||
**2025 Issues:**
|
||||
- Stricter moderation (April 2025) - payment processor pressure
|
||||
- Real-person likeness removal (May 2025)
|
||||
- Payment disruptions (credit cards paused, ZKP2P paused)
|
||||
|
||||
**API:** Not documented in sources.
|
||||
|
||||
**Commercial Use:** Per-model licensing. Usage Control mode (on-site only, no downloads).
|
||||
|
||||
---
|
||||
|
||||
## CATEGORY 4: AGGREGATORS
|
||||
|
||||
### 17. Poe (Quora)
|
||||
|
||||
**Image Models Available:**
|
||||
- FLUX-pro-1.1 (photorealism)
|
||||
- GPT-Image-1 (painterly, artistic)
|
||||
- Imagen3, Imagen 4
|
||||
- DALL-E 3
|
||||
- Google Gemini 2.5 Flash Image (48% of image gen usage)
|
||||
- Flux Kontext, Seedream 3.0
|
||||
- Runway Gen 4 Turbo, Veo 3
|
||||
- 100+ models total (text, image, voice, video)
|
||||
|
||||
**Pricing (2025):**
|
||||
- Free: 3,000 points/day (resets daily), ~150 messages/day
|
||||
- $4.99/mo: 10,000 points/day
|
||||
- $19.99/mo: 1 million points/month
|
||||
- $49.99/mo: 2.5 million points/month
|
||||
- $99.99/mo: 5 million points/month
|
||||
- $249.99/mo: 12.5 million points/month
|
||||
- Add-on: $30 per 1 million tokens
|
||||
|
||||
**Image Generation Cost:** GPT-4o low-quality 1024x1024: 328 points
|
||||
|
||||
**API:** Released July 2025. Uses existing point-based subscription. OpenAI-compatible chat format.
|
||||
|
||||
**Features:** Multi-model comparison in one interface, custom bot creation without coding, App Creator for building image gen apps.
|
||||
|
||||
**User Complaints:** Credits don't roll over (daily reset), price increases, payment issues for bot creators, bugs.
|
||||
|
||||
**Unique:** All-in-one aggregator - one subscription for multiple premium AI models. Compare outputs side-by-side.
|
||||
|
||||
---
|
||||
|
||||
### 18. Krea.ai
|
||||
|
||||
**What is it:** Multi-functional creative AI suite with real-time generation. Changes creative workflow from "prompt-wait-revise" to active co-creation.
|
||||
|
||||
**Models:** Flux, Veo 3, Kling, Hailuo, Wan, Runway. 1000+ styles, 20+ models total.
|
||||
|
||||
**Pricing:** Free and paid plans available. Free: multiple images/day. Specific tiers not detailed.
|
||||
|
||||
**Key Features:**
|
||||
- **Real-time Canvas:** Split interface - canvas for input, AI render on other side. Images evolve as you draw/modify. "AI Strength" slider for control.
|
||||
- **Speed:** Images in <50ms, sets in ~7 seconds. Flux generates 1024px in 3 seconds.
|
||||
- **Enhancer:** Upscale images/videos up to 22K resolution. Premium: 4K/8K.
|
||||
- **Generative Editing:** In/out-painting, object add/remove, style transfer.
|
||||
- **Real-time Video:** Dynamic clips from text, images, or webcam. Abstract motion backgrounds, cinemagraphs.
|
||||
|
||||
**User Sentiment:** Overwhelmingly positive. "Best AI imaging yet." "Outstanding real-time generation." Professional users praise controllability.
|
||||
|
||||
**Commercial Use:** Confirmed for commercial purposes. Supports professional team workflows.
|
||||
|
||||
**Best For:** Designers (rapid iteration), AI artists (precise control), concept artists (sketch to textured art in seconds), teams (moodboard to final in minutes).
|
||||
|
||||
**Unique:** Real-time interactive workflow. Industry leader in real-time engine.
|
||||
|
||||
---
|
||||
|
||||
### 19. Freepik AI
|
||||
|
||||
**What is it:** All-in-one creative platform combining AI generation with stock assets, templates, and editing tools.
|
||||
|
||||
**Models:**
|
||||
- Mystic (Mystic 2.5) - proprietary, fine-tuned on Flux/SD/Magnific.ai. 2K resolution default.
|
||||
- Flux and Flux 1.1
|
||||
- Ideogram
|
||||
- Classic
|
||||
|
||||
**Key Differentiator:** Excellent text rendering in images - outperforms Midjourney and DALL-E 3.
|
||||
|
||||
**Features:**
|
||||
- **Generation:** Text-to-image, multiple styles (photorealistic, 3D, illustration)
|
||||
- **Editing:** Reimagine (4 variations), Resize/outpainting, Retouch, Background remover, Upscaler (to 4K)
|
||||
- **Additional Tools:** AI Video (powered by Google Veo), AI Voice/Audio, Sketch-to-Image, Custom Characters, Custom Style (LoRA), Mockup Generator, AI Icon Generator, Video Upscaler
|
||||
|
||||
**Pricing:** Mystic requires paid subscription. Specific tiers not detailed.
|
||||
|
||||
**Quality:** Photorealistic results, especially portraits. "National Geographic quality" for realistic scenes. Not as refined as Firefly or Midjourney's cinematic style in some cases.
|
||||
|
||||
**Best For:** Photorealistic content, professional marketing, 3D visualization, text-inclusive designs, all-in-one design workflows.
|
||||
|
||||
**API:** Not documented in sources.
|
||||
|
||||
---
|
||||
|
||||
## MIDJOURNEY STATUS (January 2026)
|
||||
|
||||
**Confirmed:**
|
||||
- Web interface operational at midjourney.com
|
||||
- Mobile apps available (iOS, Android)
|
||||
- Discord still available but NOT required
|
||||
- **NO official API exists**
|
||||
|
||||
**Pricing:**
|
||||
- Basic: $10/mo (limited GPU time)
|
||||
- Standard: $30/mo
|
||||
- Pro: $60/mo
|
||||
- Mega: $120/mo
|
||||
|
||||
---
|
||||
|
||||
## KEY INSIGHTS FOR ARTICLE
|
||||
|
||||
### Pricing Comparison (Cost per Image - API)
|
||||
| Service | Cheapest Option | Notes |
|
||||
|---------|-----------------|-------|
|
||||
| Runware | $0.0006/image (FLUX Schnell) | 1,666 images/$1 |
|
||||
| Novita AI | $0.0015/image | Baseline rate |
|
||||
| Replicate | ~$0.003/image | 30 images/$1 |
|
||||
| fal.ai | $0.03/image (Seedream V4) | 33 images/$1 |
|
||||
| Gemini/Imagen | ~$0.03/image | Via API |
|
||||
|
||||
### Pricing Comparison (Subscriptions)
|
||||
| Service | Free Tier | Paid Starting |
|
||||
|---------|-----------|---------------|
|
||||
| Recraft | 50/day | $10/mo |
|
||||
| Leonardo AI | 150 tokens/day | $12/mo |
|
||||
| Runway | 125 one-time | $12/mo |
|
||||
| Poe | 3,000 pts/day | $4.99/mo |
|
||||
| Adobe Firefly | Yes (web) | Creative Cloud |
|
||||
| Ideogram | Yes | Credit-based |
|
||||
| Krea.ai | Yes | Not specified |
|
||||
|
||||
### Free Tiers Summary
|
||||
- Leonardo AI: 150 tokens/day
|
||||
- Runware: $10 free credits (~1,000 images)
|
||||
- Segmind: $5 free credits
|
||||
- fal.ai: Free tier available
|
||||
- Together AI: 3 months unlimited FLUX.1 Schnell
|
||||
- Poe: 3,000 points/day
|
||||
- Adobe Firefly: Free web access
|
||||
- Ideogram: Free to start
|
||||
- Recraft: 50 daily credits
|
||||
- Runway: 125 credits one-time
|
||||
- Krea.ai: Multiple images/day
|
||||
- Gemini: Free in Gemini app
|
||||
|
||||
### Best for Developers (API)
|
||||
1. **Replicate** - Official Models program, Cog tool, zero-scale
|
||||
2. **fal.ai** - TypeScript SDK, fastest speeds, day-zero models
|
||||
3. **Runware** - Cheapest per-image, unified API for 400K models
|
||||
4. **Together AI** - OpenAI-compatible, unified text/image/video
|
||||
|
||||
### Best for Text in Images
|
||||
1. **Ideogram** (best-in-class)
|
||||
2. **Freepik Mystic** (outperforms MJ/DALL-E)
|
||||
3. **FLUX models**
|
||||
4. **GPT-4o**
|
||||
5. **Recraft** (especially for branding)
|
||||
|
||||
### Best for Vector Graphics
|
||||
1. **Recraft** - Native SVG output
|
||||
|
||||
### Best for Real-Time Generation
|
||||
1. **Krea.ai** - Industry leader, <50ms generation
|
||||
|
||||
### Best for Commercial Safety
|
||||
1. **Adobe Firefly** - "Commercially safe" models, content credentials
|
||||
|
||||
### Self-Hosting Options
|
||||
- FLUX: 6-24GB VRAM depending on quantization
|
||||
- SD 3.5 Medium: 9.9GB VRAM
|
||||
- ComfyUI: Most popular interface
|
||||
- Civitai: Model marketplace + generator
|
||||
|
||||
### Aggregators Value Proposition
|
||||
- **Poe:** One subscription for FLUX, GPT-Image, Imagen, DALL-E, etc. API available.
|
||||
- **Krea.ai:** Real-time canvas + multiple models (Flux, Veo 3, Kling, Runway)
|
||||
- **Freepik AI:** Multiple models + stock assets + editing tools
|
||||
- **Adobe Firefly:** Partner models (FLUX.2, Gemini, GPT) + Adobe ecosystem
|
||||
|
||||
### Video Capabilities
|
||||
- **Runway:** Primary focus, Gen-3/Gen-4 models
|
||||
- **Leonardo AI:** Motion 2.0
|
||||
- **Krea.ai:** Real-time video from text/images/webcam
|
||||
- **Adobe Firefly:** Video model (1080p)
|
||||
- **Poe:** Access to Veo 3, Runway Gen 4, Kling
|
||||
|
|
@ -1,475 +0,0 @@
|
|||
# Best Midjourney Alternatives in 2026
|
||||
|
||||
Midjourney set the standard for AI image generation. But it has limitations: no official API, Discord-first interface, no free tier. In 2026, dozens of alternatives exist for different needs — whether you want a simple UI, need programmatic access, prefer self-hosting, or want to try multiple models through one platform.
|
||||
|
||||
This guide covers 19 tools across four categories. All pricing accurate as of January 2026.
|
||||
|
||||
## UI-First Platforms
|
||||
|
||||
These services have their own web or app interfaces. No coding required. Best for quick generation and iteration.
|
||||
|
||||
### [Midjourney](https://midjourney.com) — The Baseline
|
||||
|
||||

|
||||
|
||||
The platform that defined AI art. 21M Discord members, ~1.4M paying subscribers, 26.8% market share. What keeps users here: superior photorealism with cinematic lighting, rich textures, and moody atmospheres that feel emotionally resonant. The community-driven Discord approach created an ecosystem where artists inspire each other in real-time — you see what others create, learn from their prompts, iterate together.
|
||||
|
||||
The tradeoff? Text rendering remains weak (~30% accuracy). The web app launched alongside Discord, but the interface still lacks the project organization and asset management that web-native competitors offer. But for pure artistic quality and consistent aesthetic across generations, it's still the benchmark others chase.
|
||||
|
||||
**Pricing:** $10/mo (Basic) → $120/mo (Mega). Cost per image: ~$0.03-0.05 in Fast mode.
|
||||
|
||||
**Key features:** V7 model with video generation (5-21 sec clips). Style reference (--sref) and character reference (--cref) for consistency. Omni-reference system. Web app now available alongside Discord.
|
||||
|
||||
**Best for:** Artistic quality, community feedback, consistent aesthetic across projects.
|
||||
|
||||
`Style ref` `Character ref` `Video` `Upscaling`
|
||||
|
||||
### [Leonardo AI](https://leonardo.ai)
|
||||
|
||||

|
||||
|
||||
18M+ creators use Leonardo for game assets and concept art. What sets it apart: **granular control over every aspect of generation**. The Image Guidance suite offers six reference types (Style, Content, Character, Pose, Depth, Edge) — upload a reference and the model respects it. Multiple base models (Phoenix for photorealism, Anime XL for stylized work) with adjustable parameters. Image-to-image workflows with strength sliders. Style LoRAs (Elements) with tunable influence. This depth of customization gives control that Midjourney's simpler interface doesn't offer.
|
||||
|
||||
Users love the balance between automation and creative authority. You maintain your unique voice through robust customization rather than surrendering control to the algorithm. The real-time Canvas with inpaint/outpaint means less post-production work in external editors.
|
||||
|
||||
**Free tier:** 150 tokens/day (resets daily).
|
||||
**Paid:** $12-60/mo. API access at $299/mo.
|
||||
|
||||
**Key features:** Flow State real-time generation. Image Guidance suite with 6 reference types. Real-time Canvas with inpaint/outpaint. Motion 2.0 for video. Phoenix model for quality. Elements (style LoRAs with adjustable strength).
|
||||
|
||||
**Best for:** Game developers, concept artists, anyone who needs character consistency across multiple generations.
|
||||
|
||||
`Free tier` `API` `Video` `Style ref` `Pose ref` `Character ref` `Content ref` `Depth ref` `Inpaint` `Outpaint` `Canvas` `Upscaling`
|
||||
|
||||
### [Adobe Firefly](https://firefly.adobe.com)
|
||||
|
||||

|
||||
|
||||
The enterprise-safe option. Firefly is trained only on Adobe Stock, public domain, and licensed content — no scraped web data. This matters for commercial work: IP indemnification on qualifying plans means legal protection if copyright questions arise.
|
||||
|
||||
Firefly 5 generates photorealistic images at native 4MP resolution with strong anatomical accuracy. The **Prompt to Edit** feature lets you describe changes in natural language — "move the tree," "swap the sky" — and watch them happen instantly. Content Credentials (C2PA standard) prove AI origin on every image, increasingly important as AI detection becomes standard in publishing.
|
||||
|
||||
For Creative Cloud users, the deep integration with Photoshop and Illustrator eliminates the export-import dance between generation and editing tools.
|
||||
|
||||
**Free tier:** Limited via web app.
|
||||
**Paid:** Creative Cloud subscription. IP indemnification on qualifying plans.
|
||||
|
||||
**Key features:** Firefly 5 model (4MP native resolution). Content Credentials on all images (C2PA standard proving AI origin). Partner models include FLUX.2, Gemini, GPT. Deep integration with Photoshop, Illustrator, and Creative Cloud. Style Kits for brand consistency. Prompt to Edit natural language editing.
|
||||
|
||||
**Best for:** Commercial projects where copyright matters. Adobe users who want generation inside their existing workflow.
|
||||
|
||||
`Free tier` `API` `Commercial safe` `Style ref` `Inpaint` `Upscaling`
|
||||
|
||||
### [ChatGPT / GPT-4o](https://chatgpt.com)
|
||||
|
||||

|
||||
|
||||
GPT-4o generates images natively inside ChatGPT — the same interface millions already use daily. No separate app, no new subscription, no context switch. Need a quick mockup while discussing a project? Generate it mid-conversation. The fundamental difference: conversational iteration. "Make the sky darker" works exactly as you'd expect, and the model maintains context across edits. Where other tools require re-prompting from scratch, GPT-4o remembers what you're building.
|
||||
|
||||
The breakthrough is text rendering. Earlier models mangled typography; GPT-4o handles it cleanly — readable signs, labels, captions within images. Anatomical accuracy (hands, faces) has improved dramatically. The tradeoff is speed: ~1 minute per generation vs seconds on dedicated platforms.
|
||||
|
||||
Over 700 million images generated in a single week shows the adoption curve. For users already paying for ChatGPT Plus, it's image generation without another subscription.
|
||||
|
||||
**Free tier:** Limited access for free users.
|
||||
**Paid:** ChatGPT Plus $20/mo.
|
||||
|
||||
**Key features:** Best-in-class text rendering in images. Strong anatomical accuracy (hands, faces). Conversational editing with context preservation. C2PA metadata for provenance. Multi-turn generation maintaining character consistency.
|
||||
|
||||
**Best for:** Iterative refinement through conversation. Images with readable text. Users who already pay for ChatGPT Plus.
|
||||
|
||||
`Free tier` `Text` `Chatbot interface` `Inpaint`
|
||||
|
||||
### [Ideogram](https://ideogram.ai)
|
||||
|
||||

|
||||
|
||||
Founded by former Google Brain researchers specifically to solve typography in AI images. Where Midjourney achieves roughly 30% text accuracy, Ideogram hits ~90%. This isn't incremental improvement — it's a different category of capability.
|
||||
|
||||
The **Style Reference** system lets you upload up to 3 reference images to replicate colors, textures, and mood. **Random Styles** accesses 4.3 billion+ combinations for inspiration. **Savable Style Codes** store exact visual styles for reuse — critical for brand consistency across campaigns.
|
||||
|
||||
For logos, branding, marketing materials — anything where text needs to be readable — Ideogram delivers production-ready results from the first attempt. Less time fixing text errors in Photoshop.
|
||||
|
||||
**Free tier:** Yes, credit-based.
|
||||
**Paid:** Credit packs. Cost per image: 0.25-1 credit.
|
||||
|
||||
**Key features:** Ideogram 3.0 model with industry-leading text rendering. Style Reference (up to 3 images). 4.3B+ Random Style combinations. Savable Style Codes. Magic Fill and Extend editing. Multiple style modes: Realistic, Design, 3D, Anime.
|
||||
|
||||
**Best for:** Logos, branding, marketing materials — anything where text needs to be readable.
|
||||
|
||||
`Free tier` `Text` `Inpaint`
|
||||
|
||||
### [Google Gemini / Imagen](https://gemini.google.com)
|
||||
|
||||

|
||||
|
||||
Google's image generation spans multiple products. Gemini 2.5 Flash Image (nicknamed "Nano Banana") became popular in 2025 for a specific reason: **multi-image fusion**. Upload multiple images, describe how to combine them, and the model merges elements coherently. Restoring rooms with new color schemes, combining product shots into lifestyle scenes — use cases that required Photoshop skills now work through natural language.
|
||||
|
||||
**Character consistency** across generations — historically difficult in AI synthesis — works reliably. The semantic understanding from Gemini's world knowledge means the model grasps context, not just visual patterns. Strong text rendering, especially on the Pro model.
|
||||
|
||||
For Google ecosystem users, the integration across Gemini app, Google Photos, and developer APIs creates a seamless workflow.
|
||||
|
||||
**Models:** Gemini 2.5 Flash Image (speed-optimized), Gemini 3 Pro Image (quality-optimized), Imagen 3/4 (enterprise via Vertex AI).
|
||||
|
||||
**Free tier:** Gemini app (with watermark), AI Studio free prototyping.
|
||||
**Paid:** ~$0.03/image via API.
|
||||
|
||||
**Key features:** Multi-image fusion. Character and style consistency across edits. Search-grounded generation (Pro model). Strong text rendering. SynthID invisible watermarks. Natural language editing.
|
||||
|
||||
**Best for:** Google ecosystem users. Developers who want conversational editing with API access. Multi-image composition workflows.
|
||||
|
||||
`Free tier` `API` `Text` `Chatbot interface` `Character ref` `Style ref`
|
||||
|
||||
### [Recraft AI](https://recraft.ai)
|
||||
|
||||

|
||||
|
||||
One of only two AI tools with native SVG vector output (the other being Adobe Firefly). 4M+ users, mostly designers. The difference matters: vectors scale infinitely without quality loss. A logo generated here works on business cards and billboards without creating multiple file versions.
|
||||
|
||||
The Recraft-20B SVG model understands design principles, not just visual patterns — clean vector paths that require minimal touch-up work. Generated SVGs open directly in Illustrator and Figma for refinement. According to Google's Web Performance research, SVG icons load 73% faster than equivalent PNGs and use 85% less bandwidth.
|
||||
|
||||
**Precise color control through hex codes** means brand palettes stay consistent across generated assets. For icon sets, patterns, and anything that needs infinite scalability — there's no real alternative.
|
||||
|
||||
**Free tier:** 50 generations/day.
|
||||
**Paid:** $10-48/mo.
|
||||
|
||||
**Key features:** True vector generation — export actual SVG files, not rasterized images. V3 model with strong prompt adherence. Pattern generation. Product mockups. Brand consistency tools with hex color control. Accurate text rendering. AI vectorizer converts existing PNGs/JPGs to SVG.
|
||||
|
||||
**Best for:** Logo design, icon sets, patterns, anything that needs to scale infinitely.
|
||||
|
||||
`Free tier` `API` `Vector` `Text` `Inpaint` `Outpaint` `Upscaling`
|
||||
|
||||
### [Reve AI](https://reve.ai)
|
||||
|
||||

|
||||
|
||||
Launched March 2025, immediately claimed #1 on Artificial Analysis's Image Arena with an ELO score of 1167 — outperforming Midjourney v6.1, Nano Banana, and Seedream 4.0 in realism and text handling benchmarks. The pricing is aggressive: $5 for 500 images works out to $0.01 per image.
|
||||
|
||||
What's unusual: **full commercial rights on all outputs, including free tier**. Most platforms restrict commercial use to paid plans. Reve's 12B parameter hybrid model delivers prompt adherence that rivals much larger systems, with natural-language editing and image remixing (combine multiple images into new compositions).
|
||||
|
||||
For budget-conscious creators who still need quality, it's the value play without quality compromise.
|
||||
|
||||
**Free tier:** 100 credits on signup + 20/day.
|
||||
**Paid:** $5 for 500 images.
|
||||
|
||||
**Key features:** 12B parameter hybrid model. Full commercial rights on all images, including free tier. Natural language editing. Image remixing (combine multiple images). Enhanced text rendering. Strong prompt adherence.
|
||||
|
||||
**Best for:** Budget-conscious creators who still need quality. Commercial projects on a tight budget.
|
||||
|
||||
`Free tier` `Commercial safe` `Text` `Object selection`
|
||||
|
||||
## Open Source / Self-Hosted
|
||||
|
||||
Run models on your own hardware. Higher setup cost, lower per-image cost at scale. Full control over the pipeline.
|
||||
|
||||
### [FLUX](https://bfl.ai) (Black Forest Labs)
|
||||
|
||||

|
||||
|
||||
The community favorite for self-hosting. Black Forest Labs publishes open-weight models alongside commercial offerings — their philosophy of "sustainable open innovation" drives adoption among developers who want control without vendor lock-in.
|
||||
|
||||
FLUX.2's standout capability: **multi-reference support combining up to 10 images simultaneously** while maintaining character, product, and style consistency. The architecture pairs a Mistral-3 24B vision-language model with a rectified flow transformer — it understands real-world physics, lighting, perspective, and material properties rather than just pattern matching.
|
||||
|
||||
**Text and typography mastery** makes complex infographics, memes, and UI mockups with legible fine text work reliably. The community has developed FP8 quantizations that reduce VRAM requirements by 40% while improving performance — running state-of-the-art generation on consumer hardware.
|
||||
|
||||
**Models:** Schnell (speed), Dev (balanced, most popular), Pro (commercial license), Kontext (editing/context-aware).
|
||||
|
||||
**Hardware requirements:** Full models need 16-24GB VRAM. Quantized versions (GGUF) run on 6-8GB, with Q2 quantization possible on 4GB. RAM: 16GB minimum, 32GB recommended.
|
||||
|
||||
**Key features:** ComfyUI as the primary interface. Multi-reference support (up to 10 images). ControlNet support via Flux Tools (Canny, Depth) and XLabs collections. LoRA training through FluxGym, Replicate trainer, or fal.ai. Top-tier prompt understanding. 32K token context on Pro model.
|
||||
|
||||
**Best for:** Developers who want maximum control. High-volume generation where per-image cost matters. Custom model training.
|
||||
|
||||
`API` (via providers) `Style ref` `Pose ref` `Depth ref` `Inpaint`
|
||||
|
||||
### [Stable Diffusion 3.5](https://stability.ai)
|
||||
|
||||

|
||||
|
||||
The foundation model that democratized AI image generation. What Stable Diffusion 3.5 brings: a **Multimodal Diffusion Transformer (MMDiT) architecture** that fundamentally improves how the model understands relationships between text and images. Legible, contextually integrated text — the long-standing challenge — now works.
|
||||
|
||||
Three variants for different hardware realities: Large (8.1B params, professional-grade), Large Turbo (4-step fast generation), and Medium (runs on 9.9GB VRAM — standard consumer GPUs). The **permissive Community License** enables commercial and research applications without enterprise agreements.
|
||||
|
||||
The ecosystem advantage is unmatched: thousands of fine-tunes, LoRAs, and ControlNets built by the community. DreamBooth training works with as few as five images. For developers wanting to customize rather than use off-the-shelf, no other model has this depth of community tooling.
|
||||
|
||||
**Models:** Large (8.1B params), Turbo (4-step fast generation), Medium (9.9GB VRAM requirement).
|
||||
|
||||
**Hosted options:** DreamStudio (official), Stability AI API, plus dozens of third-party UIs.
|
||||
|
||||
**Key features:** MMDiT architecture for superior prompt adherence. Diverse style range (3D, photography, painting, line art). Massive ecosystem of fine-tunes, LoRAs, and ControlNets. Query-Key Normalization for simplified fine-tuning. Runs on consumer hardware.
|
||||
|
||||
**Best for:** Local deployment. Custom pipeline development. Access to the largest model ecosystem.
|
||||
|
||||
`API` (via providers) `Style ref` `Pose ref` `Depth ref` `Inpaint`
|
||||
|
||||
### [Civitai](https://civitai.com)
|
||||
|
||||

|
||||
|
||||
Not a model — a marketplace and community. Tens of thousands of checkpoints, fine-tunes, and LoRAs for SD and FLUX families. What makes it essential: finding niche styles that don't exist in base models. A specific anime aesthetic, a particular photography style, a character concept — someone has probably trained a model for it.
|
||||
|
||||
The platform evolved into an **all-in-one hub** in 2025: on-site image and video generation (including Vidu, Wan 2.1, Hunyuan), integrated LoRA trainer (including video LoRA), and creator monetization through the revised Creator Program. Usage Control lets model creators restrict how their work is used.
|
||||
|
||||
**Important 2025 context:** Stricter moderation policies restrict real-person likenesses and extreme content. Credit card payments were paused; ZKP2P alternatives exist but add friction. Verify current status before building production workflows around it.
|
||||
|
||||
**Free tier:** Yes, Buzz credits for on-site generation.
|
||||
|
||||
**Key features:** Browse tens of thousands of checkpoints: SD families, FLUX variants, video models. Generate directly on-site: txt2img, img2img, ControlNet. Built-in LoRA trainer (including video). Community features: Bounties, Creator Program for monetization. Per-model licensing with Usage Control.
|
||||
|
||||
**Best for:** Finding niche styles. Community fine-tunes. Exploring what's possible before training your own.
|
||||
|
||||
`Free tier` `Inpaint`
|
||||
|
||||
## API-First Platforms
|
||||
|
||||
Midjourney has no official API. Third-party wrappers exist but violate ToS and risk account bans. These platforms provide legitimate programmatic access to image generation.
|
||||
|
||||
Key considerations when choosing: pricing model (per-image vs GPU-time), SDK support, model selection, latency.
|
||||
|
||||
### [Replicate](https://replicate.com)
|
||||
|
||||

|
||||
|
||||
The model marketplace for developers. 50,000+ production-ready models spanning image generation, transcription, and beyond. The appeal: **run any model with one line of code**, no GPU configuration or backend setup required.
|
||||
|
||||
Replicate's Cog tool lets you package and deploy custom models as production APIs with automatic scaling and versioning. The **zero-scale economics** mean you pay only when generating — no idle capacity costs. Fine-tuning with custom data creates on-brand outputs without infrastructure expertise.
|
||||
|
||||
**November 2025 milestone:** Cloudflare agreed to acquire Replicate. The integration will make all 50,000+ models available directly to Cloudflare Workers AI users — building entire full-stack applications in one place.
|
||||
|
||||
**Pricing:** Pay-per-output, varies by model. Cheap models: ~$0.003/image. Premium models (like Imagen): $0.03+/image.
|
||||
|
||||
**SDK:** Python, JavaScript.
|
||||
|
||||
**Key features:** 50,000+ production-ready models via Official Models program. Cog tool for deploying custom models. Zero-scale economics — pay only when generating. Fine-tuning support. NVIDIA H100 GPU support for demanding workloads. Cloudflare acquisition expands reach.
|
||||
|
||||
**Gotcha:** Stripe payment issues reported in some regions.
|
||||
|
||||
**Best for:** Model variety. Serverless deployment. Teams that need zero-scale economics.
|
||||
|
||||
`API`
|
||||
|
||||
### [fal.ai](https://fal.ai)
|
||||
|
||||

|
||||
|
||||
Speed-focused platform. 600+ models including FLUX.2, often with day-zero access to new releases. The technical edge: **inference engine up to 10x faster** than traditional deployments through 100+ custom CUDA kernels optimized for diffusion transformers.
|
||||
|
||||
For developers, zero DevOps friction matters: no GPU configuration, no cold starts, no autoscaler setup. The TypeScript SDK (@fal-ai/client) enables rapid prototyping with minimal boilerplate. The platform scales from prototypes to 100M+ daily inference calls with 99.99% uptime.
|
||||
|
||||
fal's FLUX.2 [dev] Turbo is **6x more efficient** than the full-weight model while being **3-10x cheaper** than comparable APIs. December 2025 funding: $140M Series D at $4.5B valuation from Sequoia, NVIDIA, Kleiner Perkins, and a]16z — validation of the speed-first approach.
|
||||
|
||||
**Users:** 2M+ developers.
|
||||
|
||||
**Pricing:** $0.03-0.04/image for quality models (Seedream, Kontext). GPU hourly rates available.
|
||||
|
||||
**SDK:** TypeScript (@fal-ai/client), Python, Swift.
|
||||
|
||||
**Key features:** 10x faster inference via custom CUDA kernels. Sub-second generation for Schnell. Day-zero access to new model releases. No cold starts. Unified API across 600+ models. Real-time video generation with temporal consistency.
|
||||
|
||||
**Best for:** Speed-critical applications. TypeScript developers. Teams that want the latest models first.
|
||||
|
||||
`API`
|
||||
|
||||
### [Runware](https://runware.ai)
|
||||
|
||||

|
||||
|
||||
The cost leader. Their **Sonic Inference Engine** runs on AI-native hardware (custom servers, storage, networking, cooling) achieving near-100% GPU utilization — effectively halving cost per generation compared to traditional data centers.
|
||||
|
||||
The numbers: **$0.0006/image for FLUX Schnell** — that's 1,666 images per dollar. Sub-second inference times. 0.1s LoRA cold starts. A unified API provides access to 300,000+ models including open-source options from Civitai.
|
||||
|
||||
The pricing model differs fundamentally from competitors: **cost-per-image rather than compute-time billing**. You pay for actual outputs regardless of processing overhead. Enterprise customers report $100,000+ monthly savings migrating from competitors.
|
||||
|
||||
**Models:** 300,000+ via unified API (SD, FLUX, Imagen).
|
||||
|
||||
**Pricing:** $0.0006/image for FLUX Schnell. $10 free credits to start (~16,000+ images).
|
||||
|
||||
**SDK:** REST API, WebSocket.
|
||||
|
||||
**Key features:** Sonic Inference Engine on custom hardware. Sub-second inference. 0.1s LoRA cold starts. Per-image pricing (not compute-time). Zero-day access to new releases. Runs on renewable energy.
|
||||
|
||||
**Best for:** High-volume production. Cost-sensitive projects. Startups watching burn rate.
|
||||
|
||||
`API`
|
||||
|
||||
### [Segmind](https://segmind.com)
|
||||
|
||||

|
||||
|
||||
Workflow-focused platform. **PixelFlow** is the differentiator: a cloud-based drag-and-drop builder where you create generative AI pipelines visually, then convert them directly into production APIs. No code required to build complex multi-step workflows.
|
||||
|
||||
The parallel processing capability runs a single input through multiple models simultaneously — generate different variations using multiple SDXL checkpoints at once. Combine text, image, audio, and video generation in unified workflows: product descriptions → promotional images → accompanying text → video — all without switching tools.
|
||||
|
||||
500+ AI models accessible, per-second billing (~$0.002/s on A100), and 338+ pre-built templates covering AI sketch-to-3D, photo restoration, portrait video, product ads, and infographics.
|
||||
|
||||
**Models:** 500+ including FLUX, Seedream, Ideogram, GPT-Image.
|
||||
|
||||
**Pricing:** Per-second billing, ~$0.002/s on A100.
|
||||
|
||||
**Free tier:** $5 free credits.
|
||||
|
||||
**SDK:** JavaScript, Python, Swift.
|
||||
|
||||
**Key features:** PixelFlow visual workflow builder. Parallel processing through multiple models. Publish workflows as API endpoints. Multimodal AI integration (text, image, audio, video). 338+ pre-built templates. Fine-tuning support.
|
||||
|
||||
**Best for:** Complex generation pipelines. Teams building custom image processing workflows.
|
||||
|
||||
`Free tier` `API`
|
||||
|
||||
### [Novita AI](https://novita.ai)
|
||||
|
||||

|
||||
|
||||
Budget option with startup-friendly programs. The **Agent Sandbox** launched in 2025 delivers millisecond-level startup times for AI agent workloads — optimized for high-concurrency tasks where traditional cold starts kill performance.
|
||||
|
||||
10,000+ image models with rapid integration of trending open-source releases (DeepSeek, Qwen, Llama 3) means access to cutting-edge tools without corporate release cycle delays. The dual-service model combines ready-to-use inference APIs with GPU cloud infrastructure for custom development.
|
||||
|
||||
The Startup Program offers up to **$10,000 in credits** — meaningful runway for early-stage teams validating AI-powered features.
|
||||
|
||||
**Models:** 10,000+ image models plus LLMs, video, audio.
|
||||
|
||||
**Pricing:** $0.0015/image baseline.
|
||||
|
||||
**SDK:** Python.
|
||||
|
||||
**Key features:** Agent Sandbox with millisecond startup times. Serverless GPU endpoints. Dedicated Endpoints for custom models and LoRA adapters. Function calling and structured outputs across LLMs. Startup Program with $10k credits.
|
||||
|
||||
**Best for:** Early-stage startups. Budget-constrained projects. High-concurrency agent workflows.
|
||||
|
||||
`API`
|
||||
|
||||
### [Together AI](https://together.ai)
|
||||
|
||||

|
||||
|
||||
Unified AI platform covering text, image, and video generation. The strategic advantage: **OpenAI-compatible endpoints** make it a drop-in replacement for teams migrating from proprietary APIs. Familiar SDK format, minimal code changes.
|
||||
|
||||
Inference runs **up to 4x faster** than traditional deployments through speculative decoding, quantization, and FP8 kernels. Browser-based fine-tuning launched in 2025 — customize models with your own data without Python SDK installation. The data preprocessing engine improved by up to 32% for large-scale training.
|
||||
|
||||
200+ open-source models across text, code, image, and multimodal categories. Pay-as-you-go with no minimums enables experimentation; 99.9% SLA availability handles production workloads.
|
||||
|
||||
**Models:** 200+ (FLUX.2, SD3, Imagen, SeeDream, plus text and code models).
|
||||
|
||||
**Free tier:** 3 months free FLUX.1 Schnell.
|
||||
|
||||
**SDK:** OpenAI-compatible (Python, JavaScript).
|
||||
|
||||
**Key features:** OpenAI-compatible endpoints for easy migration. 4x faster inference. Browser-based fine-tuning without SDK. Direct preference optimization (DPO) support. Integration with Hugging Face Hub. 99.9% SLA.
|
||||
|
||||
**Best for:** Teams standardized on OpenAI SDK. Projects needing text + image + video from one provider. Easy migration from proprietary APIs.
|
||||
|
||||
`Free tier` `API`
|
||||
|
||||
### [Banatie](https://banatie.app)
|
||||
|
||||

|
||||
|
||||
Developer-native image generation built for AI coding workflows.
|
||||
|
||||
The problem Banatie solves: generating images means leaving your IDE, switching to an external tool, downloading files, organizing them manually. This context-switching breaks flow, especially when you're deep in a Claude Code or Cursor session.
|
||||
|
||||
Banatie integrates directly into your development environment. MCP Server connects to Claude Code, Cursor, and other MCP-compatible tools — generate images without leaving your editor. REST API for standard HTTP access. Prompt URLs let you generate images via URL parameters for on-demand generation. SDK and CLI tools handle automation in build pipelines.
|
||||
|
||||
The platform enhances your prompts automatically, delivers images through a built-in CDN globally, and organizes everything by project. Use @name references to maintain visual consistency across project images — reference a character or style once, use it everywhere.
|
||||
|
||||
Where other API platforms focus on model variety (Replicate), speed (fal.ai), or cost (Runware), Banatie focuses on workflow. MCP integration, built-in CDN, and Prompt URLs are unique to this platform.
|
||||
|
||||
**Best for:** Developers using AI coding tools who want image generation without leaving their editor.
|
||||
|
||||
`API`
|
||||
|
||||
## Aggregators
|
||||
|
||||
One subscription, multiple models. Compare outputs side-by-side. Good for exploration and finding the right model for your use case.
|
||||
|
||||
### [Poe](https://poe.com) (Quora)
|
||||
|
||||

|
||||
|
||||
100+ models through one interface, including FLUX-pro, GPT-Image, Imagen 3/4, DALL-E 3, Gemini. The fundamental advantage: **compare outputs from different models within a single conversation** without managing separate subscriptions.
|
||||
|
||||
What sets Poe apart from simple aggregators: **group chats supporting up to 200 users across 200+ AI models simultaneously**. Families planning trips with specialized search models, creative teams brainstorming with various image generators — collaborative AI workflows that don't exist elsewhere.
|
||||
|
||||
Custom bot creation lets you build chatbots using prompts and existing models as a base. The July 2025 API release uses OpenAI-compatible format for developer integration. Real-time chat sync across devices maintains context when switching from desktop to mobile.
|
||||
|
||||
**Free tier:** 3,000 points/day (resets daily, doesn't roll over).
|
||||
**Paid:** $4.99-249.99/mo.
|
||||
|
||||
**API:** Released July 2025, OpenAI-compatible format.
|
||||
|
||||
**Key features:** 100+ models including major providers. Multi-model comparison in one chat. Group chats for 200 users across 200+ models. Custom bot creation. App Creator for building simple tools. Real-time cross-device sync.
|
||||
|
||||
**Best for:** Exploring different models before committing. One subscription for access to everything. Collaborative multi-model workflows.
|
||||
|
||||
`Free tier` `API` `Chatbot interface`
|
||||
|
||||
### [Krea.ai](https://krea.ai)
|
||||
|
||||

|
||||
|
||||
Real-time generation leader. The core innovation: **draw on the canvas and watch AI respond in under 50ms**. This transforms image generation from "prompt-wait-revise" into active creative sculpting. You see results instantly, making iteration feel like playing an instrument rather than operating a vending machine.
|
||||
|
||||
The **AI Strength slider** is critical — balance how closely AI follows your sketch versus how much creative freedom it exercises. Designers rapidly iterate on logos, layouts, prototypes by painting primitives and seeing instant results. Concept artists convert rough 3D models into fully textured concept art in seconds.
|
||||
|
||||
Beyond real-time generation: in/out-painting, style transfer, and an Enhancer upscaling to 22K resolution. Krea also functions as an image-to-video hub, dispatching stills to Runway, Luma, and Hailuo for seamless storyboarding from static visuals to motion.
|
||||
|
||||
**Models:** Flux, Veo 3, Kling, Runway, 20+ total.
|
||||
|
||||
**Free tier:** Yes.
|
||||
|
||||
**Key features:** Real-time canvas — draw and see AI generation in <50ms. AI Strength slider for control balance. 22K resolution upscaling. In/out-painting and style transfer. AI Patterns for tileable textures. Real-time video generation. Image-to-video hub integration.
|
||||
|
||||
**Best for:** Concept artists. Interactive co-creation. Anyone who thinks in sketches rather than prompts.
|
||||
|
||||
`Free tier` `Live editing` `Canvas` `Inpaint` `Outpaint` `Upscaling`
|
||||
|
||||
### [Freepik AI](https://freepik.com/ai)
|
||||
|
||||

|
||||
|
||||
All-in-one creative platform combining stock assets, AI generation, and editing. The **Mystic model** delivers exceptional photorealism with pixel-perfect text rendering — capabilities where Midjourney and DALL-E 3 struggle. National Geographic-level composition with skin textures and individual hair strands that exceed expectations for AI-generated content.
|
||||
|
||||
Mystic integrates finetunes of Stable Diffusion, Flux, and Magnific.ai technology for **2K default resolution without upscaling**. Complex prompts complete in under a minute. For marketers creating social media graphics, promotional materials, and branded content, the text accuracy eliminates post-production fixes.
|
||||
|
||||
The ecosystem integration matters: generate with Mystic, refine with Retouch (selective editing), expand compositions, create variations — all within one interface. No bouncing between Photoshop, design tools, and image generators.
|
||||
|
||||
**Models:** Mystic (proprietary, fine-tuned on Flux/SD/Magnific), plus Flux and Ideogram.
|
||||
|
||||
**Key features:** Mystic model with 2K default resolution. Superior text rendering vs competitors. AI Video via Veo. Sketch-to-Image. Custom Characters. Integrated Retouch, Expand, Reimagine tools. Multiple model modes for different styles.
|
||||
|
||||
**Best for:** Marketing teams. All-in-one creative workflow. Text-heavy marketing materials.
|
||||
|
||||
`Text` `Inpaint` `Upscaling`
|
||||
|
||||
## FAQ
|
||||
|
||||
### Is there an AI better than Midjourney?
|
||||
|
||||
Depends on what you need. For text rendering: Ideogram, Recraft, or GPT-4o. For API access: fal.ai, Replicate, or Banatie. For free usage: Leonardo AI, Gemini, or Reve. For commercial safety: Adobe Firefly. For vectors: Recraft. Midjourney excels at artistic quality but lacks API access and has no free tier.
|
||||
|
||||
### What is similar to Midjourney but free?
|
||||
|
||||
Leonardo AI gives you 150 tokens daily. Gemini offers unlimited generation in the app (with watermark). Reve provides 100 credits plus 20 per day. Ideogram and Poe both have free tiers. For truly unlimited free generation, self-host FLUX with ComfyUI — requires your own GPU.
|
||||
|
||||
### Which AI image generator has no restrictions?
|
||||
|
||||
Most services have content policies. Self-hosted options (FLUX, Stable Diffusion via Civitai) offer the most freedom. Civitai hosts community models with varied restrictions. Note that "no restrictions" often means NSFW content — check individual model licenses for commercial use.
|
||||
|
||||
### Is Midjourney better than Stable Diffusion?
|
||||
|
||||
Different tools for different needs. Midjourney: easier to use, consistent artistic style, no setup. Stable Diffusion: free, fully customizable, self-hostable, massive model ecosystem. For developers wanting programmatic access, SD or FLUX via API gives more control. For artists wanting quality-per-prompt, Midjourney remains hard to beat.
|
||||
|
||||
### Does Midjourney have an API?
|
||||
|
||||
No official API. Third-party wrappers exist but violate Midjourney's Terms of Service and risk account bans. For legitimate programmatic image generation, use Replicate, fal.ai, Runware, Together AI, or Banatie. These platforms provide similar quality models (especially FLUX) with proper API access.
|
||||
|
||||
## Conclusion
|
||||
|
||||
No single "best" Midjourney alternative exists — it depends on your specific needs.
|
||||
|
||||
**Quick decision guide:**
|
||||
- **Want a UI?** → Leonardo AI, Reve, or Adobe Firefly
|
||||
- **Need API access?** → fal.ai, Runware, or Banatie
|
||||
- **Prefer self-hosting?** → FLUX with ComfyUI
|
||||
- **Want to explore models?** → Poe or Krea
|
||||
|
||||
For developers working with AI coding tools, [Banatie](https://banatie.app) integrates directly into your workflow — generate images without leaving your editor.
|
||||
|
|
@ -10,8 +10,6 @@
|
|||
"dependencies": {
|
||||
"cheerio": "^1.0.0-rc.12",
|
||||
"commander": "^11.1.0",
|
||||
"dotenv": "^17.2.3",
|
||||
"form-data": "^4.0.5",
|
||||
"turndown": "^7.1.2"
|
||||
},
|
||||
"engines": {
|
||||
|
|
@ -24,31 +22,12 @@
|
|||
"integrity": "sha512-Y28PR25bHXUg88kCV7nivXrP2Nj2RueZ3/l/jdx6J9f8J4nsEGcgX0Qe6lt7Pa+J79+kPiJU3LguR6O/6zrLOw==",
|
||||
"license": "BSD-2-Clause"
|
||||
},
|
||||
"node_modules/asynckit": {
|
||||
"version": "0.4.0",
|
||||
"resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz",
|
||||
"integrity": "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==",
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/boolbase": {
|
||||
"version": "1.0.0",
|
||||
"resolved": "https://registry.npmjs.org/boolbase/-/boolbase-1.0.0.tgz",
|
||||
"integrity": "sha512-JZOSA7Mo9sNGB8+UjSgzdLtokWAky1zbztM3WRLCbZ70/3cTANmQmOdR7y2g+J0e2WXywy1yS468tY+IruqEww==",
|
||||
"license": "ISC"
|
||||
},
|
||||
"node_modules/call-bind-apply-helpers": {
|
||||
"version": "1.0.2",
|
||||
"resolved": "https://registry.npmjs.org/call-bind-apply-helpers/-/call-bind-apply-helpers-1.0.2.tgz",
|
||||
"integrity": "sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"es-errors": "^1.3.0",
|
||||
"function-bind": "^1.1.2"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">= 0.4"
|
||||
}
|
||||
},
|
||||
"node_modules/cheerio": {
|
||||
"version": "1.1.2",
|
||||
"resolved": "https://registry.npmjs.org/cheerio/-/cheerio-1.1.2.tgz",
|
||||
|
|
@ -91,18 +70,6 @@
|
|||
"url": "https://github.com/sponsors/fb55"
|
||||
}
|
||||
},
|
||||
"node_modules/combined-stream": {
|
||||
"version": "1.0.8",
|
||||
"resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.8.tgz",
|
||||
"integrity": "sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"delayed-stream": "~1.0.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">= 0.8"
|
||||
}
|
||||
},
|
||||
"node_modules/commander": {
|
||||
"version": "11.1.0",
|
||||
"resolved": "https://registry.npmjs.org/commander/-/commander-11.1.0.tgz",
|
||||
|
|
@ -140,15 +107,6 @@
|
|||
"url": "https://github.com/sponsors/fb55"
|
||||
}
|
||||
},
|
||||
"node_modules/delayed-stream": {
|
||||
"version": "1.0.0",
|
||||
"resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz",
|
||||
"integrity": "sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==",
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
"node": ">=0.4.0"
|
||||
}
|
||||
},
|
||||
"node_modules/dom-serializer": {
|
||||
"version": "2.0.0",
|
||||
"resolved": "https://registry.npmjs.org/dom-serializer/-/dom-serializer-2.0.0.tgz",
|
||||
|
|
@ -204,32 +162,6 @@
|
|||
"url": "https://github.com/fb55/domutils?sponsor=1"
|
||||
}
|
||||
},
|
||||
"node_modules/dotenv": {
|
||||
"version": "17.2.3",
|
||||
"resolved": "https://registry.npmjs.org/dotenv/-/dotenv-17.2.3.tgz",
|
||||
"integrity": "sha512-JVUnt+DUIzu87TABbhPmNfVdBDt18BLOWjMUFJMSi/Qqg7NTYtabbvSNJGOJ7afbRuv9D/lngizHtP7QyLQ+9w==",
|
||||
"license": "BSD-2-Clause",
|
||||
"engines": {
|
||||
"node": ">=12"
|
||||
},
|
||||
"funding": {
|
||||
"url": "https://dotenvx.com"
|
||||
}
|
||||
},
|
||||
"node_modules/dunder-proto": {
|
||||
"version": "1.0.1",
|
||||
"resolved": "https://registry.npmjs.org/dunder-proto/-/dunder-proto-1.0.1.tgz",
|
||||
"integrity": "sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"call-bind-apply-helpers": "^1.0.1",
|
||||
"es-errors": "^1.3.0",
|
||||
"gopd": "^1.2.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">= 0.4"
|
||||
}
|
||||
},
|
||||
"node_modules/encoding-sniffer": {
|
||||
"version": "0.2.1",
|
||||
"resolved": "https://registry.npmjs.org/encoding-sniffer/-/encoding-sniffer-0.2.1.tgz",
|
||||
|
|
@ -255,164 +187,6 @@
|
|||
"url": "https://github.com/fb55/entities?sponsor=1"
|
||||
}
|
||||
},
|
||||
"node_modules/es-define-property": {
|
||||
"version": "1.0.1",
|
||||
"resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.1.tgz",
|
||||
"integrity": "sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g==",
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
"node": ">= 0.4"
|
||||
}
|
||||
},
|
||||
"node_modules/es-errors": {
|
||||
"version": "1.3.0",
|
||||
"resolved": "https://registry.npmjs.org/es-errors/-/es-errors-1.3.0.tgz",
|
||||
"integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==",
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
"node": ">= 0.4"
|
||||
}
|
||||
},
|
||||
"node_modules/es-object-atoms": {
|
||||
"version": "1.1.1",
|
||||
"resolved": "https://registry.npmjs.org/es-object-atoms/-/es-object-atoms-1.1.1.tgz",
|
||||
"integrity": "sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"es-errors": "^1.3.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">= 0.4"
|
||||
}
|
||||
},
|
||||
"node_modules/es-set-tostringtag": {
|
||||
"version": "2.1.0",
|
||||
"resolved": "https://registry.npmjs.org/es-set-tostringtag/-/es-set-tostringtag-2.1.0.tgz",
|
||||
"integrity": "sha512-j6vWzfrGVfyXxge+O0x5sh6cvxAog0a/4Rdd2K36zCMV5eJ+/+tOAngRO8cODMNWbVRdVlmGZQL2YS3yR8bIUA==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"es-errors": "^1.3.0",
|
||||
"get-intrinsic": "^1.2.6",
|
||||
"has-tostringtag": "^1.0.2",
|
||||
"hasown": "^2.0.2"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">= 0.4"
|
||||
}
|
||||
},
|
||||
"node_modules/form-data": {
|
||||
"version": "4.0.5",
|
||||
"resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.5.tgz",
|
||||
"integrity": "sha512-8RipRLol37bNs2bhoV67fiTEvdTrbMUYcFTiy3+wuuOnUog2QBHCZWXDRijWQfAkhBj2Uf5UnVaiWwA5vdd82w==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"asynckit": "^0.4.0",
|
||||
"combined-stream": "^1.0.8",
|
||||
"es-set-tostringtag": "^2.1.0",
|
||||
"hasown": "^2.0.2",
|
||||
"mime-types": "^2.1.12"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">= 6"
|
||||
}
|
||||
},
|
||||
"node_modules/function-bind": {
|
||||
"version": "1.1.2",
|
||||
"resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz",
|
||||
"integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==",
|
||||
"license": "MIT",
|
||||
"funding": {
|
||||
"url": "https://github.com/sponsors/ljharb"
|
||||
}
|
||||
},
|
||||
"node_modules/get-intrinsic": {
|
||||
"version": "1.3.0",
|
||||
"resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.3.0.tgz",
|
||||
"integrity": "sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"call-bind-apply-helpers": "^1.0.2",
|
||||
"es-define-property": "^1.0.1",
|
||||
"es-errors": "^1.3.0",
|
||||
"es-object-atoms": "^1.1.1",
|
||||
"function-bind": "^1.1.2",
|
||||
"get-proto": "^1.0.1",
|
||||
"gopd": "^1.2.0",
|
||||
"has-symbols": "^1.1.0",
|
||||
"hasown": "^2.0.2",
|
||||
"math-intrinsics": "^1.1.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">= 0.4"
|
||||
},
|
||||
"funding": {
|
||||
"url": "https://github.com/sponsors/ljharb"
|
||||
}
|
||||
},
|
||||
"node_modules/get-proto": {
|
||||
"version": "1.0.1",
|
||||
"resolved": "https://registry.npmjs.org/get-proto/-/get-proto-1.0.1.tgz",
|
||||
"integrity": "sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"dunder-proto": "^1.0.1",
|
||||
"es-object-atoms": "^1.0.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">= 0.4"
|
||||
}
|
||||
},
|
||||
"node_modules/gopd": {
|
||||
"version": "1.2.0",
|
||||
"resolved": "https://registry.npmjs.org/gopd/-/gopd-1.2.0.tgz",
|
||||
"integrity": "sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg==",
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
"node": ">= 0.4"
|
||||
},
|
||||
"funding": {
|
||||
"url": "https://github.com/sponsors/ljharb"
|
||||
}
|
||||
},
|
||||
"node_modules/has-symbols": {
|
||||
"version": "1.1.0",
|
||||
"resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.1.0.tgz",
|
||||
"integrity": "sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ==",
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
"node": ">= 0.4"
|
||||
},
|
||||
"funding": {
|
||||
"url": "https://github.com/sponsors/ljharb"
|
||||
}
|
||||
},
|
||||
"node_modules/has-tostringtag": {
|
||||
"version": "1.0.2",
|
||||
"resolved": "https://registry.npmjs.org/has-tostringtag/-/has-tostringtag-1.0.2.tgz",
|
||||
"integrity": "sha512-NqADB8VjPFLM2V0VvHUewwwsw0ZWBaIdgo+ieHtK3hasLz4qeCRjYcqfB6AQrBggRKppKF8L52/VqdVsO47Dlw==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"has-symbols": "^1.0.3"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">= 0.4"
|
||||
},
|
||||
"funding": {
|
||||
"url": "https://github.com/sponsors/ljharb"
|
||||
}
|
||||
},
|
||||
"node_modules/hasown": {
|
||||
"version": "2.0.2",
|
||||
"resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz",
|
||||
"integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"function-bind": "^1.1.2"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">= 0.4"
|
||||
}
|
||||
},
|
||||
"node_modules/htmlparser2": {
|
||||
"version": "10.0.0",
|
||||
"resolved": "https://registry.npmjs.org/htmlparser2/-/htmlparser2-10.0.0.tgz",
|
||||
|
|
@ -456,36 +230,6 @@
|
|||
"node": ">=0.10.0"
|
||||
}
|
||||
},
|
||||
"node_modules/math-intrinsics": {
|
||||
"version": "1.1.0",
|
||||
"resolved": "https://registry.npmjs.org/math-intrinsics/-/math-intrinsics-1.1.0.tgz",
|
||||
"integrity": "sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g==",
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
"node": ">= 0.4"
|
||||
}
|
||||
},
|
||||
"node_modules/mime-db": {
|
||||
"version": "1.52.0",
|
||||
"resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz",
|
||||
"integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==",
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
"node": ">= 0.6"
|
||||
}
|
||||
},
|
||||
"node_modules/mime-types": {
|
||||
"version": "2.1.35",
|
||||
"resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz",
|
||||
"integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"mime-db": "1.52.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">= 0.6"
|
||||
}
|
||||
},
|
||||
"node_modules/nth-check": {
|
||||
"version": "2.1.1",
|
||||
"resolved": "https://registry.npmjs.org/nth-check/-/nth-check-2.1.1.tgz",
|
||||
|
|
|
|||
|
|
@ -4,20 +4,13 @@
|
|||
"description": "Content repository for Banatie blog",
|
||||
"private": true,
|
||||
"scripts": {
|
||||
"reddit-to-md": "node scripts/html-reddit-to-markdown.js",
|
||||
"upload:image": "tsx .claude/skills/upload-image/scripts/upload.ts"
|
||||
"reddit-to-md": "node scripts/html-reddit-to-markdown.js"
|
||||
},
|
||||
"dependencies": {
|
||||
"cheerio": "^1.0.0-rc.12",
|
||||
"commander": "^11.1.0",
|
||||
"dotenv": "^17.2.3",
|
||||
"form-data": "^4.0.5",
|
||||
"turndown": "^7.1.2"
|
||||
},
|
||||
"devDependencies": {
|
||||
"sharp": "^0.34.5",
|
||||
"tsx": "^4.21.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=16.0.0"
|
||||
}
|
||||
|
|
|
|||
997
pnpm-lock.yaml
|
|
@ -1,997 +0,0 @@
|
|||
lockfileVersion: '9.0'
|
||||
|
||||
settings:
|
||||
autoInstallPeers: true
|
||||
excludeLinksFromLockfile: false
|
||||
|
||||
importers:
|
||||
|
||||
.:
|
||||
dependencies:
|
||||
cheerio:
|
||||
specifier: ^1.0.0-rc.12
|
||||
version: 1.2.0
|
||||
commander:
|
||||
specifier: ^11.1.0
|
||||
version: 11.1.0
|
||||
dotenv:
|
||||
specifier: ^17.2.3
|
||||
version: 17.2.3
|
||||
form-data:
|
||||
specifier: ^4.0.5
|
||||
version: 4.0.5
|
||||
turndown:
|
||||
specifier: ^7.1.2
|
||||
version: 7.2.2
|
||||
devDependencies:
|
||||
sharp:
|
||||
specifier: ^0.34.5
|
||||
version: 0.34.5
|
||||
tsx:
|
||||
specifier: ^4.21.0
|
||||
version: 4.21.0
|
||||
|
||||
packages:
|
||||
|
||||
'@emnapi/runtime@1.8.1':
|
||||
resolution: {integrity: sha512-mehfKSMWjjNol8659Z8KxEMrdSJDDot5SXMq00dM8BN4o+CLNXQ0xH2V7EchNHV4RmbZLmmPdEaXZc5H2FXmDg==}
|
||||
|
||||
'@esbuild/aix-ppc64@0.27.2':
|
||||
resolution: {integrity: sha512-GZMB+a0mOMZs4MpDbj8RJp4cw+w1WV5NYD6xzgvzUJ5Ek2jerwfO2eADyI6ExDSUED+1X8aMbegahsJi+8mgpw==}
|
||||
engines: {node: '>=18'}
|
||||
cpu: [ppc64]
|
||||
os: [aix]
|
||||
|
||||
'@esbuild/android-arm64@0.27.2':
|
||||
resolution: {integrity: sha512-pvz8ZZ7ot/RBphf8fv60ljmaoydPU12VuXHImtAs0XhLLw+EXBi2BLe3OYSBslR4rryHvweW5gmkKFwTiFy6KA==}
|
||||
engines: {node: '>=18'}
|
||||
cpu: [arm64]
|
||||
os: [android]
|
||||
|
||||
'@esbuild/android-arm@0.27.2':
|
||||
resolution: {integrity: sha512-DVNI8jlPa7Ujbr1yjU2PfUSRtAUZPG9I1RwW4F4xFB1Imiu2on0ADiI/c3td+KmDtVKNbi+nffGDQMfcIMkwIA==}
|
||||
engines: {node: '>=18'}
|
||||
cpu: [arm]
|
||||
os: [android]
|
||||
|
||||
'@esbuild/android-x64@0.27.2':
|
||||
resolution: {integrity: sha512-z8Ank4Byh4TJJOh4wpz8g2vDy75zFL0TlZlkUkEwYXuPSgX8yzep596n6mT7905kA9uHZsf/o2OJZubl2l3M7A==}
|
||||
engines: {node: '>=18'}
|
||||
cpu: [x64]
|
||||
os: [android]
|
||||
|
||||
'@esbuild/darwin-arm64@0.27.2':
|
||||
resolution: {integrity: sha512-davCD2Zc80nzDVRwXTcQP/28fiJbcOwvdolL0sOiOsbwBa72kegmVU0Wrh1MYrbuCL98Omp5dVhQFWRKR2ZAlg==}
|
||||
engines: {node: '>=18'}
|
||||
cpu: [arm64]
|
||||
os: [darwin]
|
||||
|
||||
'@esbuild/darwin-x64@0.27.2':
|
||||
resolution: {integrity: sha512-ZxtijOmlQCBWGwbVmwOF/UCzuGIbUkqB1faQRf5akQmxRJ1ujusWsb3CVfk/9iZKr2L5SMU5wPBi1UWbvL+VQA==}
|
||||
engines: {node: '>=18'}
|
||||
cpu: [x64]
|
||||
os: [darwin]
|
||||
|
||||
'@esbuild/freebsd-arm64@0.27.2':
|
||||
resolution: {integrity: sha512-lS/9CN+rgqQ9czogxlMcBMGd+l8Q3Nj1MFQwBZJyoEKI50XGxwuzznYdwcav6lpOGv5BqaZXqvBSiB/kJ5op+g==}
|
||||
engines: {node: '>=18'}
|
||||
cpu: [arm64]
|
||||
os: [freebsd]
|
||||
|
||||
'@esbuild/freebsd-x64@0.27.2':
|
||||
resolution: {integrity: sha512-tAfqtNYb4YgPnJlEFu4c212HYjQWSO/w/h/lQaBK7RbwGIkBOuNKQI9tqWzx7Wtp7bTPaGC6MJvWI608P3wXYA==}
|
||||
engines: {node: '>=18'}
|
||||
cpu: [x64]
|
||||
os: [freebsd]
|
||||
|
||||
'@esbuild/linux-arm64@0.27.2':
|
||||
resolution: {integrity: sha512-hYxN8pr66NsCCiRFkHUAsxylNOcAQaxSSkHMMjcpx0si13t1LHFphxJZUiGwojB1a/Hd5OiPIqDdXONia6bhTw==}
|
||||
engines: {node: '>=18'}
|
||||
cpu: [arm64]
|
||||
os: [linux]
|
||||
|
||||
'@esbuild/linux-arm@0.27.2':
|
||||
resolution: {integrity: sha512-vWfq4GaIMP9AIe4yj1ZUW18RDhx6EPQKjwe7n8BbIecFtCQG4CfHGaHuh7fdfq+y3LIA2vGS/o9ZBGVxIDi9hw==}
|
||||
engines: {node: '>=18'}
|
||||
cpu: [arm]
|
||||
os: [linux]
|
||||
|
||||
'@esbuild/linux-ia32@0.27.2':
|
||||
resolution: {integrity: sha512-MJt5BRRSScPDwG2hLelYhAAKh9imjHK5+NE/tvnRLbIqUWa+0E9N4WNMjmp/kXXPHZGqPLxggwVhz7QP8CTR8w==}
|
||||
engines: {node: '>=18'}
|
||||
cpu: [ia32]
|
||||
os: [linux]
|
||||
|
||||
'@esbuild/linux-loong64@0.27.2':
|
||||
resolution: {integrity: sha512-lugyF1atnAT463aO6KPshVCJK5NgRnU4yb3FUumyVz+cGvZbontBgzeGFO1nF+dPueHD367a2ZXe1NtUkAjOtg==}
|
||||
engines: {node: '>=18'}
|
||||
cpu: [loong64]
|
||||
os: [linux]
|
||||
|
||||
'@esbuild/linux-mips64el@0.27.2':
|
||||
resolution: {integrity: sha512-nlP2I6ArEBewvJ2gjrrkESEZkB5mIoaTswuqNFRv/WYd+ATtUpe9Y09RnJvgvdag7he0OWgEZWhviS1OTOKixw==}
|
||||
engines: {node: '>=18'}
|
||||
cpu: [mips64el]
|
||||
os: [linux]
|
||||
|
||||
'@esbuild/linux-ppc64@0.27.2':
|
||||
resolution: {integrity: sha512-C92gnpey7tUQONqg1n6dKVbx3vphKtTHJaNG2Ok9lGwbZil6DrfyecMsp9CrmXGQJmZ7iiVXvvZH6Ml5hL6XdQ==}
|
||||
engines: {node: '>=18'}
|
||||
cpu: [ppc64]
|
||||
os: [linux]
|
||||
|
||||
'@esbuild/linux-riscv64@0.27.2':
|
||||
resolution: {integrity: sha512-B5BOmojNtUyN8AXlK0QJyvjEZkWwy/FKvakkTDCziX95AowLZKR6aCDhG7LeF7uMCXEJqwa8Bejz5LTPYm8AvA==}
|
||||
engines: {node: '>=18'}
|
||||
cpu: [riscv64]
|
||||
os: [linux]
|
||||
|
||||
'@esbuild/linux-s390x@0.27.2':
|
||||
resolution: {integrity: sha512-p4bm9+wsPwup5Z8f4EpfN63qNagQ47Ua2znaqGH6bqLlmJ4bx97Y9JdqxgGZ6Y8xVTixUnEkoKSHcpRlDnNr5w==}
|
||||
engines: {node: '>=18'}
|
||||
cpu: [s390x]
|
||||
os: [linux]
|
||||
|
||||
'@esbuild/linux-x64@0.27.2':
|
||||
resolution: {integrity: sha512-uwp2Tip5aPmH+NRUwTcfLb+W32WXjpFejTIOWZFw/v7/KnpCDKG66u4DLcurQpiYTiYwQ9B7KOeMJvLCu/OvbA==}
|
||||
engines: {node: '>=18'}
|
||||
cpu: [x64]
|
||||
os: [linux]
|
||||
|
||||
'@esbuild/netbsd-arm64@0.27.2':
|
||||
resolution: {integrity: sha512-Kj6DiBlwXrPsCRDeRvGAUb/LNrBASrfqAIok+xB0LxK8CHqxZ037viF13ugfsIpePH93mX7xfJp97cyDuTZ3cw==}
|
||||
engines: {node: '>=18'}
|
||||
cpu: [arm64]
|
||||
os: [netbsd]
|
||||
|
||||
'@esbuild/netbsd-x64@0.27.2':
|
||||
resolution: {integrity: sha512-HwGDZ0VLVBY3Y+Nw0JexZy9o/nUAWq9MlV7cahpaXKW6TOzfVno3y3/M8Ga8u8Yr7GldLOov27xiCnqRZf0tCA==}
|
||||
engines: {node: '>=18'}
|
||||
cpu: [x64]
|
||||
os: [netbsd]
|
||||
|
||||
'@esbuild/openbsd-arm64@0.27.2':
|
||||
resolution: {integrity: sha512-DNIHH2BPQ5551A7oSHD0CKbwIA/Ox7+78/AWkbS5QoRzaqlev2uFayfSxq68EkonB+IKjiuxBFoV8ESJy8bOHA==}
|
||||
engines: {node: '>=18'}
|
||||
cpu: [arm64]
|
||||
os: [openbsd]
|
||||
|
||||
'@esbuild/openbsd-x64@0.27.2':
|
||||
resolution: {integrity: sha512-/it7w9Nb7+0KFIzjalNJVR5bOzA9Vay+yIPLVHfIQYG/j+j9VTH84aNB8ExGKPU4AzfaEvN9/V4HV+F+vo8OEg==}
|
||||
engines: {node: '>=18'}
|
||||
cpu: [x64]
|
||||
os: [openbsd]
|
||||
|
||||
'@esbuild/openharmony-arm64@0.27.2':
|
||||
resolution: {integrity: sha512-LRBbCmiU51IXfeXk59csuX/aSaToeG7w48nMwA6049Y4J4+VbWALAuXcs+qcD04rHDuSCSRKdmY63sruDS5qag==}
|
||||
engines: {node: '>=18'}
|
||||
cpu: [arm64]
|
||||
os: [openharmony]
|
||||
|
||||
'@esbuild/sunos-x64@0.27.2':
|
||||
resolution: {integrity: sha512-kMtx1yqJHTmqaqHPAzKCAkDaKsffmXkPHThSfRwZGyuqyIeBvf08KSsYXl+abf5HDAPMJIPnbBfXvP2ZC2TfHg==}
|
||||
engines: {node: '>=18'}
|
||||
cpu: [x64]
|
||||
os: [sunos]
|
||||
|
||||
'@esbuild/win32-arm64@0.27.2':
|
||||
resolution: {integrity: sha512-Yaf78O/B3Kkh+nKABUF++bvJv5Ijoy9AN1ww904rOXZFLWVc5OLOfL56W+C8F9xn5JQZa3UX6m+IktJnIb1Jjg==}
|
||||
engines: {node: '>=18'}
|
||||
cpu: [arm64]
|
||||
os: [win32]
|
||||
|
||||
'@esbuild/win32-ia32@0.27.2':
|
||||
resolution: {integrity: sha512-Iuws0kxo4yusk7sw70Xa2E2imZU5HoixzxfGCdxwBdhiDgt9vX9VUCBhqcwY7/uh//78A1hMkkROMJq9l27oLQ==}
|
||||
engines: {node: '>=18'}
|
||||
cpu: [ia32]
|
||||
os: [win32]
|
||||
|
||||
'@esbuild/win32-x64@0.27.2':
|
||||
resolution: {integrity: sha512-sRdU18mcKf7F+YgheI/zGf5alZatMUTKj/jNS6l744f9u3WFu4v7twcUI9vu4mknF4Y9aDlblIie0IM+5xxaqQ==}
|
||||
engines: {node: '>=18'}
|
||||
cpu: [x64]
|
||||
os: [win32]
|
||||
|
||||
'@img/colour@1.0.0':
|
||||
resolution: {integrity: sha512-A5P/LfWGFSl6nsckYtjw9da+19jB8hkJ6ACTGcDfEJ0aE+l2n2El7dsVM7UVHZQ9s2lmYMWlrS21YLy2IR1LUw==}
|
||||
engines: {node: '>=18'}
|
||||
|
||||
'@img/sharp-darwin-arm64@0.34.5':
|
||||
resolution: {integrity: sha512-imtQ3WMJXbMY4fxb/Ndp6HBTNVtWCUI0WdobyheGf5+ad6xX8VIDO8u2xE4qc/fr08CKG/7dDseFtn6M6g/r3w==}
|
||||
engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0}
|
||||
cpu: [arm64]
|
||||
os: [darwin]
|
||||
|
||||
'@img/sharp-darwin-x64@0.34.5':
|
||||
resolution: {integrity: sha512-YNEFAF/4KQ/PeW0N+r+aVVsoIY0/qxxikF2SWdp+NRkmMB7y9LBZAVqQ4yhGCm/H3H270OSykqmQMKLBhBJDEw==}
|
||||
engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0}
|
||||
cpu: [x64]
|
||||
os: [darwin]
|
||||
|
||||
'@img/sharp-libvips-darwin-arm64@1.2.4':
|
||||
resolution: {integrity: sha512-zqjjo7RatFfFoP0MkQ51jfuFZBnVE2pRiaydKJ1G/rHZvnsrHAOcQALIi9sA5co5xenQdTugCvtb1cuf78Vf4g==}
|
||||
cpu: [arm64]
|
||||
os: [darwin]
|
||||
|
||||
'@img/sharp-libvips-darwin-x64@1.2.4':
|
||||
resolution: {integrity: sha512-1IOd5xfVhlGwX+zXv2N93k0yMONvUlANylbJw1eTah8K/Jtpi15KC+WSiaX/nBmbm2HxRM1gZ0nSdjSsrZbGKg==}
|
||||
cpu: [x64]
|
||||
os: [darwin]
|
||||
|
||||
'@img/sharp-libvips-linux-arm64@1.2.4':
|
||||
resolution: {integrity: sha512-excjX8DfsIcJ10x1Kzr4RcWe1edC9PquDRRPx3YVCvQv+U5p7Yin2s32ftzikXojb1PIFc/9Mt28/y+iRklkrw==}
|
||||
cpu: [arm64]
|
||||
os: [linux]
|
||||
|
||||
'@img/sharp-libvips-linux-arm@1.2.4':
|
||||
resolution: {integrity: sha512-bFI7xcKFELdiNCVov8e44Ia4u2byA+l3XtsAj+Q8tfCwO6BQ8iDojYdvoPMqsKDkuoOo+X6HZA0s0q11ANMQ8A==}
|
||||
cpu: [arm]
|
||||
os: [linux]
|
||||
|
||||
'@img/sharp-libvips-linux-ppc64@1.2.4':
|
||||
resolution: {integrity: sha512-FMuvGijLDYG6lW+b/UvyilUWu5Ayu+3r2d1S8notiGCIyYU/76eig1UfMmkZ7vwgOrzKzlQbFSuQfgm7GYUPpA==}
|
||||
cpu: [ppc64]
|
||||
os: [linux]
|
||||
|
||||
'@img/sharp-libvips-linux-riscv64@1.2.4':
|
||||
resolution: {integrity: sha512-oVDbcR4zUC0ce82teubSm+x6ETixtKZBh/qbREIOcI3cULzDyb18Sr/Wcyx7NRQeQzOiHTNbZFF1UwPS2scyGA==}
|
||||
cpu: [riscv64]
|
||||
os: [linux]
|
||||
|
||||
'@img/sharp-libvips-linux-s390x@1.2.4':
|
||||
resolution: {integrity: sha512-qmp9VrzgPgMoGZyPvrQHqk02uyjA0/QrTO26Tqk6l4ZV0MPWIW6LTkqOIov+J1yEu7MbFQaDpwdwJKhbJvuRxQ==}
|
||||
cpu: [s390x]
|
||||
os: [linux]
|
||||
|
||||
'@img/sharp-libvips-linux-x64@1.2.4':
|
||||
resolution: {integrity: sha512-tJxiiLsmHc9Ax1bz3oaOYBURTXGIRDODBqhveVHonrHJ9/+k89qbLl0bcJns+e4t4rvaNBxaEZsFtSfAdquPrw==}
|
||||
cpu: [x64]
|
||||
os: [linux]
|
||||
|
||||
'@img/sharp-libvips-linuxmusl-arm64@1.2.4':
|
||||
resolution: {integrity: sha512-FVQHuwx1IIuNow9QAbYUzJ+En8KcVm9Lk5+uGUQJHaZmMECZmOlix9HnH7n1TRkXMS0pGxIJokIVB9SuqZGGXw==}
|
||||
cpu: [arm64]
|
||||
os: [linux]
|
||||
|
||||
'@img/sharp-libvips-linuxmusl-x64@1.2.4':
|
||||
resolution: {integrity: sha512-+LpyBk7L44ZIXwz/VYfglaX/okxezESc6UxDSoyo2Ks6Jxc4Y7sGjpgU9s4PMgqgjj1gZCylTieNamqA1MF7Dg==}
|
||||
cpu: [x64]
|
||||
os: [linux]
|
||||
|
||||
'@img/sharp-linux-arm64@0.34.5':
|
||||
resolution: {integrity: sha512-bKQzaJRY/bkPOXyKx5EVup7qkaojECG6NLYswgktOZjaXecSAeCWiZwwiFf3/Y+O1HrauiE3FVsGxFg8c24rZg==}
|
||||
engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0}
|
||||
cpu: [arm64]
|
||||
os: [linux]
|
||||
|
||||
'@img/sharp-linux-arm@0.34.5':
|
||||
resolution: {integrity: sha512-9dLqsvwtg1uuXBGZKsxem9595+ujv0sJ6Vi8wcTANSFpwV/GONat5eCkzQo/1O6zRIkh0m/8+5BjrRr7jDUSZw==}
|
||||
engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0}
|
||||
cpu: [arm]
|
||||
os: [linux]
|
||||
|
||||
'@img/sharp-linux-ppc64@0.34.5':
|
||||
resolution: {integrity: sha512-7zznwNaqW6YtsfrGGDA6BRkISKAAE1Jo0QdpNYXNMHu2+0dTrPflTLNkpc8l7MUP5M16ZJcUvysVWWrMefZquA==}
|
||||
engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0}
|
||||
cpu: [ppc64]
|
||||
os: [linux]
|
||||
|
||||
'@img/sharp-linux-riscv64@0.34.5':
|
||||
resolution: {integrity: sha512-51gJuLPTKa7piYPaVs8GmByo7/U7/7TZOq+cnXJIHZKavIRHAP77e3N2HEl3dgiqdD/w0yUfiJnII77PuDDFdw==}
|
||||
engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0}
|
||||
cpu: [riscv64]
|
||||
os: [linux]
|
||||
|
||||
'@img/sharp-linux-s390x@0.34.5':
|
||||
resolution: {integrity: sha512-nQtCk0PdKfho3eC5MrbQoigJ2gd1CgddUMkabUj+rBevs8tZ2cULOx46E7oyX+04WGfABgIwmMC0VqieTiR4jg==}
|
||||
engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0}
|
||||
cpu: [s390x]
|
||||
os: [linux]
|
||||
|
||||
'@img/sharp-linux-x64@0.34.5':
|
||||
resolution: {integrity: sha512-MEzd8HPKxVxVenwAa+JRPwEC7QFjoPWuS5NZnBt6B3pu7EG2Ge0id1oLHZpPJdn3OQK+BQDiw9zStiHBTJQQQQ==}
|
||||
engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0}
|
||||
cpu: [x64]
|
||||
os: [linux]
|
||||
|
||||
'@img/sharp-linuxmusl-arm64@0.34.5':
|
||||
resolution: {integrity: sha512-fprJR6GtRsMt6Kyfq44IsChVZeGN97gTD331weR1ex1c1rypDEABN6Tm2xa1wE6lYb5DdEnk03NZPqA7Id21yg==}
|
||||
engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0}
|
||||
cpu: [arm64]
|
||||
os: [linux]
|
||||
|
||||
'@img/sharp-linuxmusl-x64@0.34.5':
|
||||
resolution: {integrity: sha512-Jg8wNT1MUzIvhBFxViqrEhWDGzqymo3sV7z7ZsaWbZNDLXRJZoRGrjulp60YYtV4wfY8VIKcWidjojlLcWrd8Q==}
|
||||
engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0}
|
||||
cpu: [x64]
|
||||
os: [linux]
|
||||
|
||||
'@img/sharp-wasm32@0.34.5':
|
||||
resolution: {integrity: sha512-OdWTEiVkY2PHwqkbBI8frFxQQFekHaSSkUIJkwzclWZe64O1X4UlUjqqqLaPbUpMOQk6FBu/HtlGXNblIs0huw==}
|
||||
engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0}
|
||||
cpu: [wasm32]
|
||||
|
||||
'@img/sharp-win32-arm64@0.34.5':
|
||||
resolution: {integrity: sha512-WQ3AgWCWYSb2yt+IG8mnC6Jdk9Whs7O0gxphblsLvdhSpSTtmu69ZG1Gkb6NuvxsNACwiPV6cNSZNzt0KPsw7g==}
|
||||
engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0}
|
||||
cpu: [arm64]
|
||||
os: [win32]
|
||||
|
||||
'@img/sharp-win32-ia32@0.34.5':
|
||||
resolution: {integrity: sha512-FV9m/7NmeCmSHDD5j4+4pNI8Cp3aW+JvLoXcTUo0IqyjSfAZJ8dIUmijx1qaJsIiU+Hosw6xM5KijAWRJCSgNg==}
|
||||
engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0}
|
||||
cpu: [ia32]
|
||||
os: [win32]
|
||||
|
||||
'@img/sharp-win32-x64@0.34.5':
|
||||
resolution: {integrity: sha512-+29YMsqY2/9eFEiW93eqWnuLcWcufowXewwSNIT6UwZdUUCrM3oFjMWH/Z6/TMmb4hlFenmfAVbpWeup2jryCw==}
|
||||
engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0}
|
||||
cpu: [x64]
|
||||
os: [win32]
|
||||
|
||||
'@mixmark-io/domino@2.2.0':
|
||||
resolution: {integrity: sha512-Y28PR25bHXUg88kCV7nivXrP2Nj2RueZ3/l/jdx6J9f8J4nsEGcgX0Qe6lt7Pa+J79+kPiJU3LguR6O/6zrLOw==}
|
||||
|
||||
asynckit@0.4.0:
|
||||
resolution: {integrity: sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==}
|
||||
|
||||
boolbase@1.0.0:
|
||||
resolution: {integrity: sha512-JZOSA7Mo9sNGB8+UjSgzdLtokWAky1zbztM3WRLCbZ70/3cTANmQmOdR7y2g+J0e2WXywy1yS468tY+IruqEww==}
|
||||
|
||||
call-bind-apply-helpers@1.0.2:
|
||||
resolution: {integrity: sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ==}
|
||||
engines: {node: '>= 0.4'}
|
||||
|
||||
cheerio-select@2.1.0:
|
||||
resolution: {integrity: sha512-9v9kG0LvzrlcungtnJtpGNxY+fzECQKhK4EGJX2vByejiMX84MFNQw4UxPJl3bFbTMw+Dfs37XaIkCwTZfLh4g==}
|
||||
|
||||
cheerio@1.2.0:
|
||||
resolution: {integrity: sha512-WDrybc/gKFpTYQutKIK6UvfcuxijIZfMfXaYm8NMsPQxSYvf+13fXUJ4rztGGbJcBQ/GF55gvrZ0Bc0bj/mqvg==}
|
||||
engines: {node: '>=20.18.1'}
|
||||
|
||||
combined-stream@1.0.8:
|
||||
resolution: {integrity: sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==}
|
||||
engines: {node: '>= 0.8'}
|
||||
|
||||
commander@11.1.0:
|
||||
resolution: {integrity: sha512-yPVavfyCcRhmorC7rWlkHn15b4wDVgVmBA7kV4QVBsF7kv/9TKJAbAXVTxvTnwP8HHKjRCJDClKbciiYS7p0DQ==}
|
||||
engines: {node: '>=16'}
|
||||
|
||||
css-select@5.2.2:
|
||||
resolution: {integrity: sha512-TizTzUddG/xYLA3NXodFM0fSbNizXjOKhqiQQwvhlspadZokn1KDy0NZFS0wuEubIYAV5/c1/lAr0TaaFXEXzw==}
|
||||
|
||||
css-what@6.2.2:
|
||||
resolution: {integrity: sha512-u/O3vwbptzhMs3L1fQE82ZSLHQQfto5gyZzwteVIEyeaY5Fc7R4dapF/BvRoSYFeqfBk4m0V1Vafq5Pjv25wvA==}
|
||||
engines: {node: '>= 6'}
|
||||
|
||||
delayed-stream@1.0.0:
|
||||
resolution: {integrity: sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==}
|
||||
engines: {node: '>=0.4.0'}
|
||||
|
||||
detect-libc@2.1.2:
|
||||
resolution: {integrity: sha512-Btj2BOOO83o3WyH59e8MgXsxEQVcarkUOpEYrubB0urwnN10yQ364rsiByU11nZlqWYZm05i/of7io4mzihBtQ==}
|
||||
engines: {node: '>=8'}
|
||||
|
||||
dom-serializer@2.0.0:
|
||||
resolution: {integrity: sha512-wIkAryiqt/nV5EQKqQpo3SToSOV9J0DnbJqwK7Wv/Trc92zIAYZ4FlMu+JPFW1DfGFt81ZTCGgDEabffXeLyJg==}
|
||||
|
||||
domelementtype@2.3.0:
|
||||
resolution: {integrity: sha512-OLETBj6w0OsagBwdXnPdN0cnMfF9opN69co+7ZrbfPGrdpPVNBUj02spi6B1N7wChLQiPn4CSH/zJvXw56gmHw==}
|
||||
|
||||
domhandler@5.0.3:
|
||||
resolution: {integrity: sha512-cgwlv/1iFQiFnU96XXgROh8xTeetsnJiDsTc7TYCLFd9+/WNkIqPTxiM/8pSd8VIrhXGTf1Ny1q1hquVqDJB5w==}
|
||||
engines: {node: '>= 4'}
|
||||
|
||||
domutils@3.2.2:
|
||||
resolution: {integrity: sha512-6kZKyUajlDuqlHKVX1w7gyslj9MPIXzIFiz/rGu35uC1wMi+kMhQwGhl4lt9unC9Vb9INnY9Z3/ZA3+FhASLaw==}
|
||||
|
||||
dotenv@17.2.3:
|
||||
resolution: {integrity: sha512-JVUnt+DUIzu87TABbhPmNfVdBDt18BLOWjMUFJMSi/Qqg7NTYtabbvSNJGOJ7afbRuv9D/lngizHtP7QyLQ+9w==}
|
||||
engines: {node: '>=12'}
|
||||
|
||||
dunder-proto@1.0.1:
|
||||
resolution: {integrity: sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A==}
|
||||
engines: {node: '>= 0.4'}
|
||||
|
||||
encoding-sniffer@0.2.1:
|
||||
resolution: {integrity: sha512-5gvq20T6vfpekVtqrYQsSCFZ1wEg5+wW0/QaZMWkFr6BqD3NfKs0rLCx4rrVlSWJeZb5NBJgVLswK/w2MWU+Gw==}
|
||||
|
||||
entities@4.5.0:
|
||||
resolution: {integrity: sha512-V0hjH4dGPh9Ao5p0MoRY6BVqtwCjhz6vI5LT8AJ55H+4g9/4vbHx1I54fS0XuclLhDHArPQCiMjDxjaL8fPxhw==}
|
||||
engines: {node: '>=0.12'}
|
||||
|
||||
entities@6.0.1:
|
||||
resolution: {integrity: sha512-aN97NXWF6AWBTahfVOIrB/NShkzi5H7F9r1s9mD3cDj4Ko5f2qhhVoYMibXF7GlLveb/D2ioWay8lxI97Ven3g==}
|
||||
engines: {node: '>=0.12'}
|
||||
|
||||
entities@7.0.1:
|
||||
resolution: {integrity: sha512-TWrgLOFUQTH994YUyl1yT4uyavY5nNB5muff+RtWaqNVCAK408b5ZnnbNAUEWLTCpum9w6arT70i1XdQ4UeOPA==}
|
||||
engines: {node: '>=0.12'}
|
||||
|
||||
es-define-property@1.0.1:
|
||||
resolution: {integrity: sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g==}
|
||||
engines: {node: '>= 0.4'}
|
||||
|
||||
es-errors@1.3.0:
|
||||
resolution: {integrity: sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==}
|
||||
engines: {node: '>= 0.4'}
|
||||
|
||||
es-object-atoms@1.1.1:
|
||||
resolution: {integrity: sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA==}
|
||||
engines: {node: '>= 0.4'}
|
||||
|
||||
es-set-tostringtag@2.1.0:
|
||||
resolution: {integrity: sha512-j6vWzfrGVfyXxge+O0x5sh6cvxAog0a/4Rdd2K36zCMV5eJ+/+tOAngRO8cODMNWbVRdVlmGZQL2YS3yR8bIUA==}
|
||||
engines: {node: '>= 0.4'}
|
||||
|
||||
esbuild@0.27.2:
|
||||
resolution: {integrity: sha512-HyNQImnsOC7X9PMNaCIeAm4ISCQXs5a5YasTXVliKv4uuBo1dKrG0A+uQS8M5eXjVMnLg3WgXaKvprHlFJQffw==}
|
||||
engines: {node: '>=18'}
|
||||
hasBin: true
|
||||
|
||||
form-data@4.0.5:
|
||||
resolution: {integrity: sha512-8RipRLol37bNs2bhoV67fiTEvdTrbMUYcFTiy3+wuuOnUog2QBHCZWXDRijWQfAkhBj2Uf5UnVaiWwA5vdd82w==}
|
||||
engines: {node: '>= 6'}
|
||||
|
||||
fsevents@2.3.3:
|
||||
resolution: {integrity: sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==}
|
||||
engines: {node: ^8.16.0 || ^10.6.0 || >=11.0.0}
|
||||
os: [darwin]
|
||||
|
||||
function-bind@1.1.2:
|
||||
resolution: {integrity: sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==}
|
||||
|
||||
get-intrinsic@1.3.0:
|
||||
resolution: {integrity: sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ==}
|
||||
engines: {node: '>= 0.4'}
|
||||
|
||||
get-proto@1.0.1:
|
||||
resolution: {integrity: sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g==}
|
||||
engines: {node: '>= 0.4'}
|
||||
|
||||
get-tsconfig@4.13.1:
|
||||
resolution: {integrity: sha512-EoY1N2xCn44xU6750Sx7OjOIT59FkmstNc3X6y5xpz7D5cBtZRe/3pSlTkDJgqsOk3WwZPkWfonhhUJfttQo3w==}
|
||||
|
||||
gopd@1.2.0:
|
||||
resolution: {integrity: sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg==}
|
||||
engines: {node: '>= 0.4'}
|
||||
|
||||
has-symbols@1.1.0:
|
||||
resolution: {integrity: sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ==}
|
||||
engines: {node: '>= 0.4'}
|
||||
|
||||
has-tostringtag@1.0.2:
|
||||
resolution: {integrity: sha512-NqADB8VjPFLM2V0VvHUewwwsw0ZWBaIdgo+ieHtK3hasLz4qeCRjYcqfB6AQrBggRKppKF8L52/VqdVsO47Dlw==}
|
||||
engines: {node: '>= 0.4'}
|
||||
|
||||
hasown@2.0.2:
|
||||
resolution: {integrity: sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==}
|
||||
engines: {node: '>= 0.4'}
|
||||
|
||||
htmlparser2@10.1.0:
|
||||
resolution: {integrity: sha512-VTZkM9GWRAtEpveh7MSF6SjjrpNVNNVJfFup7xTY3UpFtm67foy9HDVXneLtFVt4pMz5kZtgNcvCniNFb1hlEQ==}
|
||||
|
||||
iconv-lite@0.6.3:
|
||||
resolution: {integrity: sha512-4fCk79wshMdzMp2rH06qWrJE4iolqLhCUH+OiuIgU++RB0+94NlDL81atO7GX55uUKueo0txHNtvEyI6D7WdMw==}
|
||||
engines: {node: '>=0.10.0'}
|
||||
|
||||
math-intrinsics@1.1.0:
|
||||
resolution: {integrity: sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g==}
|
||||
engines: {node: '>= 0.4'}
|
||||
|
||||
mime-db@1.52.0:
|
||||
resolution: {integrity: sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==}
|
||||
engines: {node: '>= 0.6'}
|
||||
|
||||
mime-types@2.1.35:
|
||||
resolution: {integrity: sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==}
|
||||
engines: {node: '>= 0.6'}
|
||||
|
||||
nth-check@2.1.1:
|
||||
resolution: {integrity: sha512-lqjrjmaOoAnWfMmBPL+XNnynZh2+swxiX3WUE0s4yEHI6m+AwrK2UZOimIRl3X/4QctVqS8AiZjFqyOGrMXb/w==}
|
||||
|
||||
parse5-htmlparser2-tree-adapter@7.1.0:
|
||||
resolution: {integrity: sha512-ruw5xyKs6lrpo9x9rCZqZZnIUntICjQAd0Wsmp396Ul9lN/h+ifgVV1x1gZHi8euej6wTfpqX8j+BFQxF0NS/g==}
|
||||
|
||||
parse5-parser-stream@7.1.2:
|
||||
resolution: {integrity: sha512-JyeQc9iwFLn5TbvvqACIF/VXG6abODeB3Fwmv/TGdLk2LfbWkaySGY72at4+Ty7EkPZj854u4CrICqNk2qIbow==}
|
||||
|
||||
parse5@7.3.0:
|
||||
resolution: {integrity: sha512-IInvU7fabl34qmi9gY8XOVxhYyMyuH2xUNpb2q8/Y+7552KlejkRvqvD19nMoUW/uQGGbqNpA6Tufu5FL5BZgw==}
|
||||
|
||||
resolve-pkg-maps@1.0.0:
|
||||
resolution: {integrity: sha512-seS2Tj26TBVOC2NIc2rOe2y2ZO7efxITtLZcGSOnHHNOQ7CkiUBfw0Iw2ck6xkIhPwLhKNLS8BO+hEpngQlqzw==}
|
||||
|
||||
safer-buffer@2.1.2:
|
||||
resolution: {integrity: sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==}
|
||||
|
||||
semver@7.7.3:
|
||||
resolution: {integrity: sha512-SdsKMrI9TdgjdweUSR9MweHA4EJ8YxHn8DFaDisvhVlUOe4BF1tLD7GAj0lIqWVl+dPb/rExr0Btby5loQm20Q==}
|
||||
engines: {node: '>=10'}
|
||||
hasBin: true
|
||||
|
||||
sharp@0.34.5:
|
||||
resolution: {integrity: sha512-Ou9I5Ft9WNcCbXrU9cMgPBcCK8LiwLqcbywW3t4oDV37n1pzpuNLsYiAV8eODnjbtQlSDwZ2cUEeQz4E54Hltg==}
|
||||
engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0}
|
||||
|
||||
tslib@2.8.1:
|
||||
resolution: {integrity: sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w==}
|
||||
|
||||
tsx@4.21.0:
|
||||
resolution: {integrity: sha512-5C1sg4USs1lfG0GFb2RLXsdpXqBSEhAaA/0kPL01wxzpMqLILNxIxIOKiILz+cdg/pLnOUxFYOR5yhHU666wbw==}
|
||||
engines: {node: '>=18.0.0'}
|
||||
hasBin: true
|
||||
|
||||
turndown@7.2.2:
|
||||
resolution: {integrity: sha512-1F7db8BiExOKxjSMU2b7if62D/XOyQyZbPKq/nUwopfgnHlqXHqQ0lvfUTeUIr1lZJzOPFn43dODyMSIfvWRKQ==}
|
||||
|
||||
undici@7.19.2:
|
||||
resolution: {integrity: sha512-4VQSpGEGsWzk0VYxyB/wVX/Q7qf9t5znLRgs0dzszr9w9Fej/8RVNQ+S20vdXSAyra/bJ7ZQfGv6ZMj7UEbzSg==}
|
||||
engines: {node: '>=20.18.1'}
|
||||
|
||||
whatwg-encoding@3.1.1:
|
||||
resolution: {integrity: sha512-6qN4hJdMwfYBtE3YBTTHhoeuUrDBPZmbQaxWAqSALV/MeEnR5z1xd8UKud2RAkFoPkmB+hli1TZSnyi84xz1vQ==}
|
||||
engines: {node: '>=18'}
|
||||
deprecated: Use @exodus/bytes instead for a more spec-conformant and faster implementation
|
||||
|
||||
whatwg-mimetype@4.0.0:
|
||||
resolution: {integrity: sha512-QaKxh0eNIi2mE9p2vEdzfagOKHCcj1pJ56EEHGQOVxp8r9/iszLUUV7v89x9O1p/T+NlTM5W7jW6+cz4Fq1YVg==}
|
||||
engines: {node: '>=18'}
|
||||
|
||||
snapshots:
|
||||
|
||||
'@emnapi/runtime@1.8.1':
|
||||
dependencies:
|
||||
tslib: 2.8.1
|
||||
optional: true
|
||||
|
||||
'@esbuild/aix-ppc64@0.27.2':
|
||||
optional: true
|
||||
|
||||
'@esbuild/android-arm64@0.27.2':
|
||||
optional: true
|
||||
|
||||
'@esbuild/android-arm@0.27.2':
|
||||
optional: true
|
||||
|
||||
'@esbuild/android-x64@0.27.2':
|
||||
optional: true
|
||||
|
||||
'@esbuild/darwin-arm64@0.27.2':
|
||||
optional: true
|
||||
|
||||
'@esbuild/darwin-x64@0.27.2':
|
||||
optional: true
|
||||
|
||||
'@esbuild/freebsd-arm64@0.27.2':
|
||||
optional: true
|
||||
|
||||
'@esbuild/freebsd-x64@0.27.2':
|
||||
optional: true
|
||||
|
||||
'@esbuild/linux-arm64@0.27.2':
|
||||
optional: true
|
||||
|
||||
'@esbuild/linux-arm@0.27.2':
|
||||
optional: true
|
||||
|
||||
'@esbuild/linux-ia32@0.27.2':
|
||||
optional: true
|
||||
|
||||
'@esbuild/linux-loong64@0.27.2':
|
||||
optional: true
|
||||
|
||||
'@esbuild/linux-mips64el@0.27.2':
|
||||
optional: true
|
||||
|
||||
'@esbuild/linux-ppc64@0.27.2':
|
||||
optional: true
|
||||
|
||||
'@esbuild/linux-riscv64@0.27.2':
|
||||
optional: true
|
||||
|
||||
'@esbuild/linux-s390x@0.27.2':
|
||||
optional: true
|
||||
|
||||
'@esbuild/linux-x64@0.27.2':
|
||||
optional: true
|
||||
|
||||
'@esbuild/netbsd-arm64@0.27.2':
|
||||
optional: true
|
||||
|
||||
'@esbuild/netbsd-x64@0.27.2':
|
||||
optional: true
|
||||
|
||||
'@esbuild/openbsd-arm64@0.27.2':
|
||||
optional: true
|
||||
|
||||
'@esbuild/openbsd-x64@0.27.2':
|
||||
optional: true
|
||||
|
||||
'@esbuild/openharmony-arm64@0.27.2':
|
||||
optional: true
|
||||
|
||||
'@esbuild/sunos-x64@0.27.2':
|
||||
optional: true
|
||||
|
||||
'@esbuild/win32-arm64@0.27.2':
|
||||
optional: true
|
||||
|
||||
'@esbuild/win32-ia32@0.27.2':
|
||||
optional: true
|
||||
|
||||
'@esbuild/win32-x64@0.27.2':
|
||||
optional: true
|
||||
|
||||
'@img/colour@1.0.0': {}
|
||||
|
||||
'@img/sharp-darwin-arm64@0.34.5':
|
||||
optionalDependencies:
|
||||
'@img/sharp-libvips-darwin-arm64': 1.2.4
|
||||
optional: true
|
||||
|
||||
'@img/sharp-darwin-x64@0.34.5':
|
||||
optionalDependencies:
|
||||
'@img/sharp-libvips-darwin-x64': 1.2.4
|
||||
optional: true
|
||||
|
||||
'@img/sharp-libvips-darwin-arm64@1.2.4':
|
||||
optional: true
|
||||
|
||||
'@img/sharp-libvips-darwin-x64@1.2.4':
|
||||
optional: true
|
||||
|
||||
'@img/sharp-libvips-linux-arm64@1.2.4':
|
||||
optional: true
|
||||
|
||||
'@img/sharp-libvips-linux-arm@1.2.4':
|
||||
optional: true
|
||||
|
||||
'@img/sharp-libvips-linux-ppc64@1.2.4':
|
||||
optional: true
|
||||
|
||||
'@img/sharp-libvips-linux-riscv64@1.2.4':
|
||||
optional: true
|
||||
|
||||
'@img/sharp-libvips-linux-s390x@1.2.4':
|
||||
optional: true
|
||||
|
||||
'@img/sharp-libvips-linux-x64@1.2.4':
|
||||
optional: true
|
||||
|
||||
'@img/sharp-libvips-linuxmusl-arm64@1.2.4':
|
||||
optional: true
|
||||
|
||||
'@img/sharp-libvips-linuxmusl-x64@1.2.4':
|
||||
optional: true
|
||||
|
||||
'@img/sharp-linux-arm64@0.34.5':
|
||||
optionalDependencies:
|
||||
'@img/sharp-libvips-linux-arm64': 1.2.4
|
||||
optional: true
|
||||
|
||||
'@img/sharp-linux-arm@0.34.5':
|
||||
optionalDependencies:
|
||||
'@img/sharp-libvips-linux-arm': 1.2.4
|
||||
optional: true
|
||||
|
||||
'@img/sharp-linux-ppc64@0.34.5':
|
||||
optionalDependencies:
|
||||
'@img/sharp-libvips-linux-ppc64': 1.2.4
|
||||
optional: true
|
||||
|
||||
'@img/sharp-linux-riscv64@0.34.5':
|
||||
optionalDependencies:
|
||||
'@img/sharp-libvips-linux-riscv64': 1.2.4
|
||||
optional: true
|
||||
|
||||
'@img/sharp-linux-s390x@0.34.5':
|
||||
optionalDependencies:
|
||||
'@img/sharp-libvips-linux-s390x': 1.2.4
|
||||
optional: true
|
||||
|
||||
'@img/sharp-linux-x64@0.34.5':
|
||||
optionalDependencies:
|
||||
'@img/sharp-libvips-linux-x64': 1.2.4
|
||||
optional: true
|
||||
|
||||
'@img/sharp-linuxmusl-arm64@0.34.5':
|
||||
optionalDependencies:
|
||||
'@img/sharp-libvips-linuxmusl-arm64': 1.2.4
|
||||
optional: true
|
||||
|
||||
'@img/sharp-linuxmusl-x64@0.34.5':
|
||||
optionalDependencies:
|
||||
'@img/sharp-libvips-linuxmusl-x64': 1.2.4
|
||||
optional: true
|
||||
|
||||
'@img/sharp-wasm32@0.34.5':
|
||||
dependencies:
|
||||
'@emnapi/runtime': 1.8.1
|
||||
optional: true
|
||||
|
||||
'@img/sharp-win32-arm64@0.34.5':
|
||||
optional: true
|
||||
|
||||
'@img/sharp-win32-ia32@0.34.5':
|
||||
optional: true
|
||||
|
||||
'@img/sharp-win32-x64@0.34.5':
|
||||
optional: true
|
||||
|
||||
'@mixmark-io/domino@2.2.0': {}
|
||||
|
||||
asynckit@0.4.0: {}
|
||||
|
||||
boolbase@1.0.0: {}
|
||||
|
||||
call-bind-apply-helpers@1.0.2:
|
||||
dependencies:
|
||||
es-errors: 1.3.0
|
||||
function-bind: 1.1.2
|
||||
|
||||
cheerio-select@2.1.0:
|
||||
dependencies:
|
||||
boolbase: 1.0.0
|
||||
css-select: 5.2.2
|
||||
css-what: 6.2.2
|
||||
domelementtype: 2.3.0
|
||||
domhandler: 5.0.3
|
||||
domutils: 3.2.2
|
||||
|
||||
cheerio@1.2.0:
|
||||
dependencies:
|
||||
cheerio-select: 2.1.0
|
||||
dom-serializer: 2.0.0
|
||||
domhandler: 5.0.3
|
||||
domutils: 3.2.2
|
||||
encoding-sniffer: 0.2.1
|
||||
htmlparser2: 10.1.0
|
||||
parse5: 7.3.0
|
||||
parse5-htmlparser2-tree-adapter: 7.1.0
|
||||
parse5-parser-stream: 7.1.2
|
||||
undici: 7.19.2
|
||||
whatwg-mimetype: 4.0.0
|
||||
|
||||
combined-stream@1.0.8:
|
||||
dependencies:
|
||||
delayed-stream: 1.0.0
|
||||
|
||||
commander@11.1.0: {}
|
||||
|
||||
css-select@5.2.2:
|
||||
dependencies:
|
||||
boolbase: 1.0.0
|
||||
css-what: 6.2.2
|
||||
domhandler: 5.0.3
|
||||
domutils: 3.2.2
|
||||
nth-check: 2.1.1
|
||||
|
||||
css-what@6.2.2: {}
|
||||
|
||||
delayed-stream@1.0.0: {}
|
||||
|
||||
detect-libc@2.1.2: {}
|
||||
|
||||
dom-serializer@2.0.0:
|
||||
dependencies:
|
||||
domelementtype: 2.3.0
|
||||
domhandler: 5.0.3
|
||||
entities: 4.5.0
|
||||
|
||||
domelementtype@2.3.0: {}
|
||||
|
||||
domhandler@5.0.3:
|
||||
dependencies:
|
||||
domelementtype: 2.3.0
|
||||
|
||||
domutils@3.2.2:
|
||||
dependencies:
|
||||
dom-serializer: 2.0.0
|
||||
domelementtype: 2.3.0
|
||||
domhandler: 5.0.3
|
||||
|
||||
dotenv@17.2.3: {}
|
||||
|
||||
dunder-proto@1.0.1:
|
||||
dependencies:
|
||||
call-bind-apply-helpers: 1.0.2
|
||||
es-errors: 1.3.0
|
||||
gopd: 1.2.0
|
||||
|
||||
encoding-sniffer@0.2.1:
|
||||
dependencies:
|
||||
iconv-lite: 0.6.3
|
||||
whatwg-encoding: 3.1.1
|
||||
|
||||
entities@4.5.0: {}
|
||||
|
||||
entities@6.0.1: {}
|
||||
|
||||
entities@7.0.1: {}
|
||||
|
||||
es-define-property@1.0.1: {}
|
||||
|
||||
es-errors@1.3.0: {}
|
||||
|
||||
es-object-atoms@1.1.1:
|
||||
dependencies:
|
||||
es-errors: 1.3.0
|
||||
|
||||
es-set-tostringtag@2.1.0:
|
||||
dependencies:
|
||||
es-errors: 1.3.0
|
||||
get-intrinsic: 1.3.0
|
||||
has-tostringtag: 1.0.2
|
||||
hasown: 2.0.2
|
||||
|
||||
esbuild@0.27.2:
|
||||
optionalDependencies:
|
||||
'@esbuild/aix-ppc64': 0.27.2
|
||||
'@esbuild/android-arm': 0.27.2
|
||||
'@esbuild/android-arm64': 0.27.2
|
||||
'@esbuild/android-x64': 0.27.2
|
||||
'@esbuild/darwin-arm64': 0.27.2
|
||||
'@esbuild/darwin-x64': 0.27.2
|
||||
'@esbuild/freebsd-arm64': 0.27.2
|
||||
'@esbuild/freebsd-x64': 0.27.2
|
||||
'@esbuild/linux-arm': 0.27.2
|
||||
'@esbuild/linux-arm64': 0.27.2
|
||||
'@esbuild/linux-ia32': 0.27.2
|
||||
'@esbuild/linux-loong64': 0.27.2
|
||||
'@esbuild/linux-mips64el': 0.27.2
|
||||
'@esbuild/linux-ppc64': 0.27.2
|
||||
'@esbuild/linux-riscv64': 0.27.2
|
||||
'@esbuild/linux-s390x': 0.27.2
|
||||
'@esbuild/linux-x64': 0.27.2
|
||||
'@esbuild/netbsd-arm64': 0.27.2
|
||||
'@esbuild/netbsd-x64': 0.27.2
|
||||
'@esbuild/openbsd-arm64': 0.27.2
|
||||
'@esbuild/openbsd-x64': 0.27.2
|
||||
'@esbuild/openharmony-arm64': 0.27.2
|
||||
'@esbuild/sunos-x64': 0.27.2
|
||||
'@esbuild/win32-arm64': 0.27.2
|
||||
'@esbuild/win32-ia32': 0.27.2
|
||||
'@esbuild/win32-x64': 0.27.2
|
||||
|
||||
form-data@4.0.5:
|
||||
dependencies:
|
||||
asynckit: 0.4.0
|
||||
combined-stream: 1.0.8
|
||||
es-set-tostringtag: 2.1.0
|
||||
hasown: 2.0.2
|
||||
mime-types: 2.1.35
|
||||
|
||||
fsevents@2.3.3:
|
||||
optional: true
|
||||
|
||||
function-bind@1.1.2: {}
|
||||
|
||||
get-intrinsic@1.3.0:
|
||||
dependencies:
|
||||
call-bind-apply-helpers: 1.0.2
|
||||
es-define-property: 1.0.1
|
||||
es-errors: 1.3.0
|
||||
es-object-atoms: 1.1.1
|
||||
function-bind: 1.1.2
|
||||
get-proto: 1.0.1
|
||||
gopd: 1.2.0
|
||||
has-symbols: 1.1.0
|
||||
hasown: 2.0.2
|
||||
math-intrinsics: 1.1.0
|
||||
|
||||
get-proto@1.0.1:
|
||||
dependencies:
|
||||
dunder-proto: 1.0.1
|
||||
es-object-atoms: 1.1.1
|
||||
|
||||
get-tsconfig@4.13.1:
|
||||
dependencies:
|
||||
resolve-pkg-maps: 1.0.0
|
||||
|
||||
gopd@1.2.0: {}
|
||||
|
||||
has-symbols@1.1.0: {}
|
||||
|
||||
has-tostringtag@1.0.2:
|
||||
dependencies:
|
||||
has-symbols: 1.1.0
|
||||
|
||||
hasown@2.0.2:
|
||||
dependencies:
|
||||
function-bind: 1.1.2
|
||||
|
||||
htmlparser2@10.1.0:
|
||||
dependencies:
|
||||
domelementtype: 2.3.0
|
||||
domhandler: 5.0.3
|
||||
domutils: 3.2.2
|
||||
entities: 7.0.1
|
||||
|
||||
iconv-lite@0.6.3:
|
||||
dependencies:
|
||||
safer-buffer: 2.1.2
|
||||
|
||||
math-intrinsics@1.1.0: {}
|
||||
|
||||
mime-db@1.52.0: {}
|
||||
|
||||
mime-types@2.1.35:
|
||||
dependencies:
|
||||
mime-db: 1.52.0
|
||||
|
||||
nth-check@2.1.1:
|
||||
dependencies:
|
||||
boolbase: 1.0.0
|
||||
|
||||
parse5-htmlparser2-tree-adapter@7.1.0:
|
||||
dependencies:
|
||||
domhandler: 5.0.3
|
||||
parse5: 7.3.0
|
||||
|
||||
parse5-parser-stream@7.1.2:
|
||||
dependencies:
|
||||
parse5: 7.3.0
|
||||
|
||||
parse5@7.3.0:
|
||||
dependencies:
|
||||
entities: 6.0.1
|
||||
|
||||
resolve-pkg-maps@1.0.0: {}
|
||||
|
||||
safer-buffer@2.1.2: {}
|
||||
|
||||
semver@7.7.3: {}
|
||||
|
||||
sharp@0.34.5:
|
||||
dependencies:
|
||||
'@img/colour': 1.0.0
|
||||
detect-libc: 2.1.2
|
||||
semver: 7.7.3
|
||||
optionalDependencies:
|
||||
'@img/sharp-darwin-arm64': 0.34.5
|
||||
'@img/sharp-darwin-x64': 0.34.5
|
||||
'@img/sharp-libvips-darwin-arm64': 1.2.4
|
||||
'@img/sharp-libvips-darwin-x64': 1.2.4
|
||||
'@img/sharp-libvips-linux-arm': 1.2.4
|
||||
'@img/sharp-libvips-linux-arm64': 1.2.4
|
||||
'@img/sharp-libvips-linux-ppc64': 1.2.4
|
||||
'@img/sharp-libvips-linux-riscv64': 1.2.4
|
||||
'@img/sharp-libvips-linux-s390x': 1.2.4
|
||||
'@img/sharp-libvips-linux-x64': 1.2.4
|
||||
'@img/sharp-libvips-linuxmusl-arm64': 1.2.4
|
||||
'@img/sharp-libvips-linuxmusl-x64': 1.2.4
|
||||
'@img/sharp-linux-arm': 0.34.5
|
||||
'@img/sharp-linux-arm64': 0.34.5
|
||||
'@img/sharp-linux-ppc64': 0.34.5
|
||||
'@img/sharp-linux-riscv64': 0.34.5
|
||||
'@img/sharp-linux-s390x': 0.34.5
|
||||
'@img/sharp-linux-x64': 0.34.5
|
||||
'@img/sharp-linuxmusl-arm64': 0.34.5
|
||||
'@img/sharp-linuxmusl-x64': 0.34.5
|
||||
'@img/sharp-wasm32': 0.34.5
|
||||
'@img/sharp-win32-arm64': 0.34.5
|
||||
'@img/sharp-win32-ia32': 0.34.5
|
||||
'@img/sharp-win32-x64': 0.34.5
|
||||
|
||||
tslib@2.8.1:
|
||||
optional: true
|
||||
|
||||
tsx@4.21.0:
|
||||
dependencies:
|
||||
esbuild: 0.27.2
|
||||
get-tsconfig: 4.13.1
|
||||
optionalDependencies:
|
||||
fsevents: 2.3.3
|
||||
|
||||
turndown@7.2.2:
|
||||
dependencies:
|
||||
'@mixmark-io/domino': 2.2.0
|
||||
|
||||
undici@7.19.2: {}
|
||||
|
||||
whatwg-encoding@3.1.1:
|
||||
dependencies:
|
||||
iconv-lite: 0.6.3
|
||||
|
||||
whatwg-mimetype@4.0.0: {}
|
||||
|
|
@ -96,7 +96,7 @@ These authors are for a separate blog project, not connected to Banatie.
|
|||
|-------|-------|
|
||||
| **File** | style-guides/josh-mercer.md |
|
||||
| **Name** | Josh Mercer |
|
||||
| **Handle** | @josh-mercer |
|
||||
| **Handle** | @joshmercer *(TODO: confirm)* |
|
||||
| **Role** | Fullstack Developer, Freelancer |
|
||||
| **Age** | 29 |
|
||||
| **Location** | Rotterdam, Netherlands (originally UK) |
|
||||
|
|
@ -113,12 +113,9 @@ These authors are for a separate blog project, not connected to Banatie.
|
|||
|
||||
**Partner:** Mara Solheim (romantic couple, public: colleagues/collaborators)
|
||||
|
||||
**Profiles:**
|
||||
- Hashnode: https://hashnode.com/@josh-mercer
|
||||
- GitHub: https://github.com/josh-mercer
|
||||
- Email: josh.mercer.uk@gmail.com
|
||||
|
||||
**TODO:**
|
||||
- [ ] Create Gmail
|
||||
- [ ] Confirm Hashnode handle
|
||||
- [ ] Generate avatar
|
||||
- [ ] Blog name selection
|
||||
|
||||
|
|
|
|||
|
|
@ -3,7 +3,7 @@
|
|||
## Identity
|
||||
|
||||
**Name:** Josh Mercer
|
||||
**Handle:** @josh-mercer
|
||||
**Handle:** @joshmercer *(TODO: confirm Hashnode handle)*
|
||||
**Role:** Fullstack Developer, Freelancer
|
||||
**Location:** Rotterdam, Netherlands (originally from UK)
|
||||
**Age:** 29
|
||||
|
|
@ -30,10 +30,10 @@
|
|||
|
||||
**Primary platform:** Hashnode (personal blog)
|
||||
**Profiles:**
|
||||
- Hashnode: https://hashnode.com/@josh-mercer — main blog, technical articles
|
||||
- GitHub: https://github.com/josh-mercer — code samples, project repos
|
||||
- Hashnode: @joshmercer *(TODO: confirm)* — main blog, technical articles
|
||||
- Other platforms: *to be added as needed*
|
||||
|
||||
**Email:** josh.mercer.uk@gmail.com
|
||||
**Email:** *TODO: create Gmail*
|
||||
|
||||
## Publishing Channels
|
||||
|
||||
|
|
|
|||