Compare commits
1 Commits
master
...
chore/remo
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9f421b0bfa |
@@ -3,6 +3,6 @@
|
||||
"enabled": false
|
||||
},
|
||||
"_variables": {
|
||||
"lastUpdateCheck": 1763378528944
|
||||
"lastUpdateCheck": 1758909687006
|
||||
}
|
||||
}
|
||||
1
.astro/types.d.ts
vendored
@@ -1,2 +1 @@
|
||||
/// <reference types="astro/client" />
|
||||
/// <reference path="content.d.ts" />
|
||||
80
.github/workflows/cleanup-orphaned-content.yml
vendored
@@ -1,80 +0,0 @@
|
||||
name: Cleanup Orphaned Content
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
roadmap_slug:
|
||||
description: "The ID of the roadmap to clean up"
|
||||
required: true
|
||||
|
||||
jobs:
|
||||
cleanup-content:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Setup pnpm@v9
|
||||
uses: pnpm/action-setup@v4
|
||||
with:
|
||||
version: 9
|
||||
run_install: false
|
||||
|
||||
- name: Setup Node.js Version 20 (LTS)
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: 20
|
||||
cache: 'pnpm'
|
||||
|
||||
- name: Install Dependencies and Run Cleanup
|
||||
run: |
|
||||
echo "Installing Dependencies"
|
||||
pnpm install
|
||||
echo "Running Orphaned Content Cleanup"
|
||||
npm run cleanup:orphaned-content -- --roadmap-slug=${{ inputs.roadmap_slug }}
|
||||
|
||||
- name: Read cleanup summary
|
||||
id: read-summary
|
||||
run: |
|
||||
if [ -f .cleanup-summary.md ]; then
|
||||
{
|
||||
echo 'summary<<EOF'
|
||||
cat .cleanup-summary.md
|
||||
echo 'EOF'
|
||||
} >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
- name: Check for changes
|
||||
id: verify-changed-files
|
||||
run: |
|
||||
if [ -n "$(git status --porcelain)" ]; then
|
||||
echo "changed=true" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "changed=false" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
- name: Delete summary file
|
||||
if: steps.verify-changed-files.outputs.changed == 'true'
|
||||
run: rm -f .cleanup-summary.md
|
||||
|
||||
- name: Create PR
|
||||
if: steps.verify-changed-files.outputs.changed == 'true'
|
||||
uses: peter-evans/create-pull-request@v7
|
||||
with:
|
||||
delete-branch: false
|
||||
branch: "chore/cleanup-orphaned-content-${{ inputs.roadmap_slug }}"
|
||||
base: "master"
|
||||
labels: |
|
||||
automated pr
|
||||
reviewers: jcanalesluna,kamranahmedse
|
||||
commit-message: "chore: cleanup orphaned content files"
|
||||
title: "chore: cleanup orphaned content - ${{ inputs.roadmap_slug }}"
|
||||
body: |
|
||||
${{ steps.read-summary.outputs.summary }}
|
||||
|
||||
> [!IMPORTANT]
|
||||
> This PR removes orphaned/duplicate content files for: ${{ inputs.roadmap_slug }}
|
||||
>
|
||||
> Commit: ${{ github.sha }}
|
||||
> Workflow Path: ${{ github.workflow_ref }}
|
||||
|
||||
**Please review the changes and merge the PR if everything looks correct.**
|
||||
66
.github/workflows/sync-repo-to-database.yml
vendored
@@ -1,17 +1,20 @@
|
||||
name: Sync Repo to Database
|
||||
name: Sync on Roadmap Changes
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
roadmap_slug:
|
||||
description: "The slug of the roadmap to sync (e.g., frontend, backend)"
|
||||
required: true
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
paths:
|
||||
- 'src/data/roadmaps/**'
|
||||
|
||||
jobs:
|
||||
sync-roadmap:
|
||||
sync-on-changes:
|
||||
runs-on: ubuntu-latest
|
||||
if: github.actor != 'github-actions[bot]' && github.actor != 'dependabot[bot]'
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 2 # Fetch previous commit to compare changes
|
||||
|
||||
- name: Setup pnpm@v9
|
||||
uses: pnpm/action-setup@v4
|
||||
@@ -25,33 +28,40 @@ jobs:
|
||||
node-version: 20
|
||||
cache: 'pnpm'
|
||||
|
||||
- name: Get all roadmap files
|
||||
id: roadmap-files
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
run: |
|
||||
ROADMAP_DIR="src/data/roadmaps/${{ inputs.roadmap_slug }}"
|
||||
|
||||
if [ ! -d "$ROADMAP_DIR" ]; then
|
||||
echo "Error: Roadmap directory '$ROADMAP_DIR' does not exist"
|
||||
exit 1
|
||||
echo "Getting changed files in /src/data/roadmaps/"
|
||||
|
||||
# Get changed files between HEAD and previous commit
|
||||
CHANGED_FILES=$(git diff --name-only HEAD~1 HEAD -- src/data/roadmaps/)
|
||||
|
||||
if [ -z "$CHANGED_FILES" ]; then
|
||||
echo "No changes found in roadmaps directory"
|
||||
echo "has_changes=false" >> $GITHUB_OUTPUT
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo "Getting all files in $ROADMAP_DIR"
|
||||
|
||||
ALL_FILES=$(find "$ROADMAP_DIR" -type f | tr '\n' ',')
|
||||
|
||||
echo "Files to sync:"
|
||||
echo "$ALL_FILES"
|
||||
|
||||
echo "files=$ALL_FILES" >> $GITHUB_OUTPUT
|
||||
|
||||
echo "Changed files:"
|
||||
echo "$CHANGED_FILES"
|
||||
|
||||
# Convert to space-separated list for the script
|
||||
CHANGED_FILES_LIST=$(echo "$CHANGED_FILES" | tr '\n' ',')
|
||||
|
||||
echo "has_changes=true" >> $GITHUB_OUTPUT
|
||||
echo "changed_files=$CHANGED_FILES_LIST" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Install Dependencies
|
||||
if: steps.changed-files.outputs.has_changes == 'true'
|
||||
run: |
|
||||
echo "Installing Dependencies"
|
||||
pnpm install
|
||||
|
||||
- name: Run sync script
|
||||
- name: Run sync script with changed files
|
||||
if: steps.changed-files.outputs.has_changes == 'true'
|
||||
run: |
|
||||
echo "Running sync script for roadmap: ${{ inputs.roadmap_slug }}"
|
||||
echo "Files: ${{ steps.roadmap-files.outputs.files }}"
|
||||
|
||||
npm run sync:repo-to-database -- --files="${{ steps.roadmap-files.outputs.files }}" --secret=${{ secrets.GH_SYNC_SECRET }}
|
||||
echo "Running sync script for changed roadmap files"
|
||||
echo "Changed files: ${{ steps.changed-files.outputs.changed_files }}"
|
||||
|
||||
# Run your script with the changed file paths
|
||||
npm run sync:repo-to-database -- --files="${{ steps.changed-files.outputs.changed_files }}" --secret=${{ secrets.GH_SYNC_SECRET }}
|
||||
|
||||
@@ -20,29 +20,10 @@ export default defineConfig({
|
||||
status: 301,
|
||||
destination: '/ai',
|
||||
},
|
||||
'/best-practices': {
|
||||
status: 301,
|
||||
destination: '/roadmaps',
|
||||
},
|
||||
'/best-practices/aws': {
|
||||
status: 301,
|
||||
destination: '/aws-best-practices',
|
||||
},
|
||||
'/best-practices/backend-performance': {
|
||||
status: 301,
|
||||
destination: '/backend-performance-best-practices',
|
||||
},
|
||||
'/best-practices/frontend-performance': {
|
||||
status: 301,
|
||||
destination: '/frontend-performance-best-practices',
|
||||
},
|
||||
'/best-practices/api-security': {
|
||||
status: 301,
|
||||
destination: '/api-security-best-practices',
|
||||
},
|
||||
'/best-practices/code-review': {
|
||||
status: 301,
|
||||
destination: '/code-review-best-practices',
|
||||
},
|
||||
vite: {
|
||||
server: {
|
||||
allowedHosts: ['roadmap.sh', 'port3k.kamranahmed.info'],
|
||||
},
|
||||
},
|
||||
markdown: {
|
||||
@@ -91,8 +72,5 @@ export default defineConfig({
|
||||
ssr: {
|
||||
noExternal: [/^@roadmapsh\/editor.*$/],
|
||||
},
|
||||
server: {
|
||||
allowedHosts: ['roadmap.sh', 'port3k.kamranahmed.info'],
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
@@ -23,7 +23,7 @@ For new roadmaps, you can either:
|
||||
For the existing roadmaps, please follow the details listed for the nature of contribution:
|
||||
|
||||
- **Fixing Typos** — Make your changes in the [roadmap markdown file](https://github.com/kamranahmedse/developer-roadmap/tree/master/src/data/roadmaps) and submit a [PR](https://github.com/kamranahmedse/developer-roadmap/pulls).
|
||||
- **Adding/Removing Nodes and Modifying Node Titles** — Please open an [issue](https://github.com/kamranahmedse/developer-roadmap/issues) with your suggestion.
|
||||
- **Adding or Removing Nodes** — Please open an [issue](https://github.com/kamranahmedse/developer-roadmap/issues) with your suggestion.
|
||||
|
||||
**Note:** Please note that our goal is **not to have the biggest list of items**. Our goal is to list items or skills most relevant today.
|
||||
|
||||
|
||||
@@ -31,9 +31,7 @@
|
||||
"migrate:editor-roadmaps": "tsx ./scripts/migrate-editor-roadmap.ts",
|
||||
"sync:content-to-repo": "tsx ./scripts/sync-content-to-repo.ts",
|
||||
"sync:repo-to-database": "tsx ./scripts/sync-repo-to-database.ts",
|
||||
"sync:roadmap": "tsx ./scripts/sync-roadmap-to-database.ts",
|
||||
"migrate:content-repo-to-database": "tsx ./scripts/migrate-content-repo-to-database.ts",
|
||||
"cleanup:orphaned-content": "tsx ./scripts/cleanup-orphaned-content.ts",
|
||||
"official:roadmap-assets": "tsx ./scripts/official-roadmap-assets.ts",
|
||||
"test:e2e": "playwright test"
|
||||
},
|
||||
|
||||
|
Before Width: | Height: | Size: 284 KiB |
|
Before Width: | Height: | Size: 276 KiB |
|
Before Width: | Height: | Size: 333 KiB |
|
Before Width: | Height: | Size: 550 KiB |
|
Before Width: | Height: | Size: 223 KiB |
|
Before Width: | Height: | Size: 362 KiB |
|
Before Width: | Height: | Size: 416 KiB |
21
readme.md
@@ -35,7 +35,6 @@ Here is the list of available roadmaps with more being actively worked upon.
|
||||
- [Frontend Roadmap](https://roadmap.sh/frontend) / [Frontend Beginner Roadmap](https://roadmap.sh/frontend?r=frontend-beginner)
|
||||
- [Backend Roadmap](https://roadmap.sh/backend) / [Backend Beginner Roadmap](https://roadmap.sh/backend?r=backend-beginner)
|
||||
- [DevOps Roadmap](https://roadmap.sh/devops) / [DevOps Beginner Roadmap](https://roadmap.sh/devops?r=devops-beginner)
|
||||
- [DevSecOps Roadmap](https://roadmap.sh/devsecops)
|
||||
- [Full Stack Roadmap](https://roadmap.sh/full-stack)
|
||||
- [HTML Roadmap](https://roadmap.sh/html)
|
||||
- [CSS Roadmap](https://roadmap.sh/css)
|
||||
@@ -59,8 +58,7 @@ Here is the list of available roadmaps with more being actively worked upon.
|
||||
- [Product Manager Roadmap](https://roadmap.sh/product-manager)
|
||||
- [Engineering Manager Roadmap](https://roadmap.sh/engineering-manager)
|
||||
- [QA Roadmap](https://roadmap.sh/qa)
|
||||
- [Python Roadmap](https://roadmap.sh/python)
|
||||
- [Django Roadmap](https://roadmap.sh/django)
|
||||
- [Python Roadmap](https://roadmap.sh/python)
|
||||
- [Software Architect Roadmap](https://roadmap.sh/software-architect)
|
||||
- [Game Developer Roadmap](https://roadmap.sh/game-developer) / [Server Side Game Developer](https://roadmap.sh/server-side-game-developer)
|
||||
- [Software Design and Architecture Roadmap](https://roadmap.sh/software-design-architecture)
|
||||
@@ -72,12 +70,9 @@ Here is the list of available roadmaps with more being actively worked upon.
|
||||
- [Angular Roadmap](https://roadmap.sh/angular)
|
||||
- [Node.js Roadmap](https://roadmap.sh/nodejs)
|
||||
- [PHP Roadmap](https://roadmap.sh/php)
|
||||
- [Wordpress Roadmap](https://roadmap.sh/wordpress)
|
||||
- [Laravel Roadmap](https://roadmap.sh/laravel)
|
||||
- [GraphQL Roadmap](https://roadmap.sh/graphql)
|
||||
- [Android Roadmap](https://roadmap.sh/android)
|
||||
- [iOS Roadmap](https://roadmap.sh/ios)
|
||||
- [Swift/Swift UI Roadmap](https://roadmap.sh/swift-ui)
|
||||
- [Flutter Roadmap](https://roadmap.sh/flutter)
|
||||
- [Go Roadmap](https://roadmap.sh/golang)
|
||||
- [Rust Roadmap](https://roadmap.sh/rust)
|
||||
@@ -86,7 +81,6 @@ Here is the list of available roadmaps with more being actively worked upon.
|
||||
- [Spring Boot Roadmap](https://roadmap.sh/spring-boot)
|
||||
- [Design System Roadmap](https://roadmap.sh/design-system)
|
||||
- [PostgreSQL Roadmap](https://roadmap.sh/postgresql-dba)
|
||||
- [ElasticSearch Roadmap](https://roadmap.sh/elasticsearch)
|
||||
- [SQL Roadmap](https://roadmap.sh/sql)
|
||||
- [Redis Roadmap](https://roadmap.sh/redis)
|
||||
- [Blockchain Roadmap](https://roadmap.sh/blockchain)
|
||||
@@ -102,7 +96,6 @@ Here is the list of available roadmaps with more being actively worked upon.
|
||||
- [DevRel Engineer Roadmap](https://roadmap.sh/devrel)
|
||||
- [AI Red Teaming Roadmap](https://roadmap.sh/ai-red-teaming)
|
||||
- [AI Agents Roadmap](https://roadmap.sh/ai-agents)
|
||||
- [Bash/Shell Roadmap](https://roadmap.sh/shell-bash)
|
||||
|
||||
There are also interactive best practices:
|
||||
|
||||
@@ -137,16 +130,16 @@ Please consider sharing a post about [roadmap.sh](https://roadmap.sh) and the va
|
||||
Clone the repository, install the dependencies and start the application
|
||||
|
||||
```bash
|
||||
git clone git@github.com:kamranahmedse/developer-roadmap.git --depth 1
|
||||
git clone git@github.com:kamranahmedse/developer-roadmap.git
|
||||
cd developer-roadmap
|
||||
pnpm add @roadmapsh/editor@npm:@roadmapsh/dummy-editor -w
|
||||
pnpm install
|
||||
npm install
|
||||
npm run dev
|
||||
```
|
||||
|
||||
Run the development server with:
|
||||
Note: use the `depth` parameter to reduce the clone size and speed up the clone.
|
||||
|
||||
```bash
|
||||
pnpm dev
|
||||
```sh
|
||||
git clone --depth=1 https://github.com/kamranahmedse/developer-roadmap.git
|
||||
```
|
||||
|
||||
## Contribution
|
||||
|
||||
@@ -1,259 +0,0 @@
|
||||
import type { Node } from '@roadmapsh/editor';
|
||||
import matter from 'gray-matter';
|
||||
import fs from 'node:fs/promises';
|
||||
import path from 'node:path';
|
||||
import { fileURLToPath } from 'node:url';
|
||||
import { slugify } from '../src/lib/slugger';
|
||||
|
||||
const __filename = fileURLToPath(import.meta.url);
|
||||
const __dirname = path.dirname(__filename);
|
||||
|
||||
const ROADMAP_CONTENT_DIR = path.join(__dirname, '../src/data/roadmaps');
|
||||
|
||||
const args = process.argv.slice(2);
|
||||
const roadmapSlug = args?.[0]?.replace('--roadmap-slug=', '');
|
||||
|
||||
if (!roadmapSlug) {
|
||||
console.error('Usage: tsx scripts/cleanup-orphaned-content.ts --roadmap-slug=<slug|__all__>');
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
interface OrphanEntry {
|
||||
file: string;
|
||||
reason: string;
|
||||
duplicateOf: string;
|
||||
action: 'deleted' | 'renamed';
|
||||
renamedTo?: string;
|
||||
}
|
||||
|
||||
async function fetchRoadmapJson(slug: string): Promise<{ nodes: Node[] }> {
|
||||
try {
|
||||
const response = await fetch(
|
||||
`https://roadmap.sh/api/v1-official-roadmap/${slug}`,
|
||||
);
|
||||
|
||||
if (!response.ok) {
|
||||
throw new Error(`HTTP ${response.status}`);
|
||||
}
|
||||
|
||||
const data = await response.json();
|
||||
if (data.error) {
|
||||
throw new Error(data.error);
|
||||
}
|
||||
|
||||
return data;
|
||||
} catch (err) {
|
||||
console.log(` API fetch failed for ${slug}, falling back to local JSON`);
|
||||
const localPath = path.join(ROADMAP_CONTENT_DIR, slug, `${slug}.json`);
|
||||
const raw = await fs.readFile(localPath, 'utf-8');
|
||||
return JSON.parse(raw);
|
||||
}
|
||||
}
|
||||
|
||||
async function isEditorRoadmap(slug: string): Promise<boolean> {
|
||||
const mdPath = path.join(ROADMAP_CONTENT_DIR, slug, `${slug}.md`);
|
||||
try {
|
||||
const raw = await fs.readFile(mdPath, 'utf-8');
|
||||
const { data } = matter(raw);
|
||||
return data.renderer === 'editor';
|
||||
} catch {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
async function getEditorRoadmapSlugs(): Promise<string[]> {
|
||||
const allDirs = await fs.readdir(ROADMAP_CONTENT_DIR);
|
||||
const results: string[] = [];
|
||||
|
||||
for (const dir of allDirs) {
|
||||
const stat = await fs.stat(path.join(ROADMAP_CONTENT_DIR, dir)).catch(() => null);
|
||||
if (!stat?.isDirectory()) {
|
||||
continue;
|
||||
}
|
||||
if (await isEditorRoadmap(dir)) {
|
||||
results.push(dir);
|
||||
}
|
||||
}
|
||||
|
||||
return results;
|
||||
}
|
||||
|
||||
function parseContentFilename(filename: string): { slug: string; nodeId: string } | null {
|
||||
const match = filename.match(/^(.+)@([^.]+)\.md$/);
|
||||
if (!match) {
|
||||
return null;
|
||||
}
|
||||
return { slug: match[1], nodeId: match[2] };
|
||||
}
|
||||
|
||||
async function cleanupRoadmap(slug: string): Promise<OrphanEntry[]> {
|
||||
console.log(`\nProcessing: ${slug}`);
|
||||
|
||||
const contentDir = path.join(ROADMAP_CONTENT_DIR, slug, 'content');
|
||||
const stat = await fs.stat(contentDir).catch(() => null);
|
||||
if (!stat?.isDirectory()) {
|
||||
console.log(` No content directory found, skipping`);
|
||||
return [];
|
||||
}
|
||||
|
||||
const roadmapData = await fetchRoadmapJson(slug);
|
||||
if (!roadmapData?.nodes) {
|
||||
console.log(` No nodes found in roadmap JSON, skipping`);
|
||||
return [];
|
||||
}
|
||||
|
||||
const topicNodes = roadmapData.nodes.filter(
|
||||
(node) =>
|
||||
node?.type &&
|
||||
['topic', 'subtopic'].includes(node.type) &&
|
||||
node.data?.label,
|
||||
);
|
||||
|
||||
const validNodeIds = new Set<string>();
|
||||
const nodeIdToExpectedSlug = new Map<string, string>();
|
||||
|
||||
for (const node of topicNodes) {
|
||||
validNodeIds.add(node.id);
|
||||
nodeIdToExpectedSlug.set(node.id, slugify(node.data.label as string));
|
||||
}
|
||||
|
||||
const files = await fs.readdir(contentDir);
|
||||
const orphans: OrphanEntry[] = [];
|
||||
|
||||
const validFilesBySlug = new Map<string, string>();
|
||||
for (const file of files) {
|
||||
const parsed = parseContentFilename(file);
|
||||
if (!parsed) {
|
||||
continue;
|
||||
}
|
||||
if (validNodeIds.has(parsed.nodeId) && nodeIdToExpectedSlug.get(parsed.nodeId) === parsed.slug) {
|
||||
validFilesBySlug.set(parsed.slug, file);
|
||||
}
|
||||
}
|
||||
|
||||
for (const file of files) {
|
||||
const parsed = parseContentFilename(file);
|
||||
if (!parsed) {
|
||||
continue;
|
||||
}
|
||||
|
||||
const { slug: fileSlug, nodeId } = parsed;
|
||||
|
||||
if (validNodeIds.has(nodeId)) {
|
||||
const expectedSlug = nodeIdToExpectedSlug.get(nodeId)!;
|
||||
if (fileSlug === expectedSlug) {
|
||||
continue;
|
||||
}
|
||||
|
||||
const correctFile = `${expectedSlug}@${nodeId}.md`;
|
||||
const correctFileExists = files.includes(correctFile);
|
||||
|
||||
if (correctFileExists) {
|
||||
orphans.push({
|
||||
file,
|
||||
reason: 'Same nodeId, old slug',
|
||||
duplicateOf: correctFile,
|
||||
action: 'deleted',
|
||||
});
|
||||
} else {
|
||||
orphans.push({
|
||||
file,
|
||||
reason: 'Same nodeId, old slug',
|
||||
duplicateOf: correctFile,
|
||||
action: 'renamed',
|
||||
renamedTo: correctFile,
|
||||
});
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
const validFile = validFilesBySlug.get(fileSlug);
|
||||
if (validFile) {
|
||||
orphans.push({
|
||||
file,
|
||||
reason: 'Same slug, old nodeId',
|
||||
duplicateOf: validFile,
|
||||
action: 'deleted',
|
||||
});
|
||||
} else {
|
||||
orphans.push({
|
||||
file,
|
||||
reason: 'Topic removed from roadmap',
|
||||
duplicateOf: 'N/A',
|
||||
action: 'deleted',
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
for (const orphan of orphans) {
|
||||
const filePath = path.join(contentDir, orphan.file);
|
||||
if (orphan.action === 'renamed') {
|
||||
const newPath = path.join(contentDir, orphan.renamedTo!);
|
||||
await fs.rename(filePath, newPath);
|
||||
console.log(` Renamed: ${orphan.file} -> ${orphan.renamedTo} (${orphan.reason})`);
|
||||
} else {
|
||||
await fs.unlink(filePath);
|
||||
console.log(` Deleted: ${orphan.file} (${orphan.reason})`);
|
||||
}
|
||||
}
|
||||
|
||||
if (orphans.length === 0) {
|
||||
console.log(` No orphans found`);
|
||||
}
|
||||
|
||||
return orphans;
|
||||
}
|
||||
|
||||
async function main() {
|
||||
const slugs =
|
||||
roadmapSlug === '__all__'
|
||||
? await getEditorRoadmapSlugs()
|
||||
: [roadmapSlug];
|
||||
|
||||
if (roadmapSlug !== '__all__') {
|
||||
if (!(await isEditorRoadmap(roadmapSlug))) {
|
||||
console.error(`${roadmapSlug} is not an editor-rendered roadmap`);
|
||||
process.exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
console.log(`Processing ${slugs.length} roadmap(s)...`);
|
||||
|
||||
const allOrphans = new Map<string, OrphanEntry[]>();
|
||||
let totalOrphans = 0;
|
||||
|
||||
for (const slug of slugs) {
|
||||
const orphans = await cleanupRoadmap(slug);
|
||||
if (orphans.length > 0) {
|
||||
allOrphans.set(slug, orphans);
|
||||
totalOrphans += orphans.length;
|
||||
}
|
||||
}
|
||||
|
||||
const roadmapsAffected = allOrphans.size;
|
||||
|
||||
let summary = `## Orphaned Content Cleanup\n\n`;
|
||||
summary += `Cleaned up **${totalOrphans}** orphaned content file(s) across **${roadmapsAffected}** roadmap(s).\n\n`;
|
||||
|
||||
for (const [slug, orphans] of allOrphans) {
|
||||
summary += `### ${slug}\n\n`;
|
||||
summary += `| File | Action | Reason | Duplicate Of |\n`;
|
||||
summary += `|---|---|---|---|\n`;
|
||||
for (const orphan of orphans) {
|
||||
const action = orphan.action === 'renamed' ? `Renamed to \`${orphan.renamedTo}\`` : 'Deleted';
|
||||
const dupOf = orphan.duplicateOf === 'N/A' ? 'N/A' : `\`${orphan.duplicateOf}\``;
|
||||
summary += `| \`${orphan.file}\` | ${action} | ${orphan.reason} | ${dupOf} |\n`;
|
||||
}
|
||||
summary += `\n`;
|
||||
}
|
||||
|
||||
const summaryPath = path.join(__dirname, '..', '.cleanup-summary.md');
|
||||
await fs.writeFile(summaryPath, summary);
|
||||
console.log(`\nSummary written to .cleanup-summary.md`);
|
||||
console.log(`Total: ${totalOrphans} orphaned file(s) cleaned up across ${roadmapsAffected} roadmap(s)`);
|
||||
}
|
||||
|
||||
main().catch((err) => {
|
||||
console.error(err);
|
||||
process.exit(1);
|
||||
});
|
||||
@@ -36,11 +36,6 @@ export async function fetchRoadmapJson(
|
||||
|
||||
const response = await fetch(
|
||||
`https://roadmap.sh/api/v1-official-roadmap/${roadmapId}`,
|
||||
{
|
||||
headers: {
|
||||
'User-Agent': 'Mozilla/5.0 (compatible; roadmap-sync/1.0)',
|
||||
},
|
||||
},
|
||||
);
|
||||
|
||||
if (!response.ok) {
|
||||
@@ -69,7 +64,6 @@ export async function syncContentToDatabase(
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
'User-Agent': 'Mozilla/5.0 (compatible; roadmap-sync/1.0)',
|
||||
},
|
||||
body: JSON.stringify({
|
||||
topics,
|
||||
@@ -78,21 +72,14 @@ export async function syncContentToDatabase(
|
||||
},
|
||||
);
|
||||
|
||||
const responseText = await response.text();
|
||||
|
||||
if (!response.ok) {
|
||||
const error = await response.json();
|
||||
throw new Error(
|
||||
`Failed to sync content to database: ${response.status} ${response.statusText}\n${responseText}`,
|
||||
`Failed to sync content to database: ${response.statusText} ${JSON.stringify(error, null, 2)}`,
|
||||
);
|
||||
}
|
||||
|
||||
try {
|
||||
return JSON.parse(responseText);
|
||||
} catch {
|
||||
throw new Error(
|
||||
`Failed to parse response as JSON: ${responseText.substring(0, 500)}`,
|
||||
);
|
||||
}
|
||||
return response.json();
|
||||
}
|
||||
|
||||
const files =
|
||||
@@ -244,15 +231,8 @@ try {
|
||||
});
|
||||
}
|
||||
|
||||
console.log(`📤 Syncing ${topics.length} topics to database...`);
|
||||
await syncContentToDatabase(topics);
|
||||
console.log(`✅ Successfully synced ${topics.length} topics`);
|
||||
} catch (error) {
|
||||
console.error('❌ Sync failed with error:');
|
||||
console.error(error);
|
||||
if (error instanceof Error) {
|
||||
console.error('\nError message:', error.message);
|
||||
console.error('\nStack trace:', error.stack);
|
||||
}
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
@@ -1,82 +0,0 @@
|
||||
import { execSync } from 'node:child_process';
|
||||
import fs from 'node:fs';
|
||||
import path from 'node:path';
|
||||
import { fileURLToPath } from 'node:url';
|
||||
|
||||
const __filename = fileURLToPath(import.meta.url);
|
||||
const __dirname = path.dirname(__filename);
|
||||
|
||||
const args = process.argv.slice(2);
|
||||
const roadmapSlug = args
|
||||
.find((arg) => arg.startsWith('--roadmap='))
|
||||
?.replace('--roadmap=', '');
|
||||
const secret = args
|
||||
.find((arg) => arg.startsWith('--secret='))
|
||||
?.replace('--secret=', '');
|
||||
|
||||
if (!roadmapSlug) {
|
||||
console.error('❌ Roadmap slug is required. Use --roadmap=<slug>');
|
||||
console.error(' Example: npm run sync:roadmap -- --roadmap=frontend --secret=<secret>');
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
if (!secret) {
|
||||
console.error('❌ Secret is required. Use --secret=<secret>');
|
||||
console.error(' Example: npm run sync:roadmap -- --roadmap=frontend --secret=<secret>');
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
const roadmapDir = path.join(__dirname, '../src/data/roadmaps', roadmapSlug);
|
||||
|
||||
if (!fs.existsSync(roadmapDir)) {
|
||||
console.error(`❌ Roadmap directory not found: ${roadmapDir}`);
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
console.log(`🔍 Finding all content files in: ${roadmapDir}`);
|
||||
|
||||
function getAllFiles(dir: string): string[] {
|
||||
const files: string[] = [];
|
||||
const entries = fs.readdirSync(dir, { withFileTypes: true });
|
||||
|
||||
for (const entry of entries) {
|
||||
const fullPath = path.join(dir, entry.name);
|
||||
if (entry.isDirectory()) {
|
||||
files.push(...getAllFiles(fullPath));
|
||||
} else {
|
||||
files.push(fullPath);
|
||||
}
|
||||
}
|
||||
|
||||
return files;
|
||||
}
|
||||
|
||||
const allFiles = getAllFiles(roadmapDir);
|
||||
const relativeFiles = allFiles.map((file) =>
|
||||
file.replace(path.join(__dirname, '../'), ''),
|
||||
);
|
||||
|
||||
console.log(`📁 Found ${relativeFiles.length} files`);
|
||||
|
||||
if (relativeFiles.length === 0) {
|
||||
console.log('⚠️ No files found to sync');
|
||||
process.exit(0);
|
||||
}
|
||||
|
||||
const filesArg = relativeFiles.join(',');
|
||||
|
||||
console.log(`🚀 Syncing roadmap "${roadmapSlug}" to database...`);
|
||||
|
||||
try {
|
||||
execSync(
|
||||
`npx tsx ./scripts/sync-repo-to-database.ts --files="${filesArg}" --secret=${secret}`,
|
||||
{
|
||||
cwd: path.join(__dirname, '..'),
|
||||
stdio: 'inherit',
|
||||
},
|
||||
);
|
||||
console.log(`✅ Successfully synced roadmap "${roadmapSlug}" to database`);
|
||||
} catch (error) {
|
||||
console.error(`❌ Failed to sync roadmap "${roadmapSlug}" to database`);
|
||||
process.exit(1);
|
||||
}
|
||||
@@ -78,6 +78,13 @@ const defaultPages: PageType[] = [
|
||||
icon: <RoadmapIcon className="mr-2 h-4 w-4 stroke-2" />,
|
||||
isProtected: true,
|
||||
},
|
||||
{
|
||||
id: 'best-practices',
|
||||
url: '/best-practices',
|
||||
title: 'Best Practices',
|
||||
group: 'Pages',
|
||||
icon: <BestPracticesIcon className="mr-2 h-4 w-4 stroke-2" />,
|
||||
},
|
||||
{
|
||||
id: 'questions',
|
||||
url: '/questions',
|
||||
|
||||
@@ -453,12 +453,12 @@ export function PersonalDashboard(props: PersonalDashboardProps) {
|
||||
<HeroRoadmap
|
||||
key={roadmap.id}
|
||||
resourceId={roadmap.id}
|
||||
resourceType="roadmap"
|
||||
resourceType="best-practice"
|
||||
resourceTitle={roadmap.title}
|
||||
isFavorite={roadmap.isFavorite}
|
||||
percentageDone={percentageDone}
|
||||
isNew={roadmap.isNew}
|
||||
url={`/${roadmap.id}`}
|
||||
url={`/best-practices/${roadmap.id}`}
|
||||
/>
|
||||
);
|
||||
})}
|
||||
@@ -470,9 +470,7 @@ export function PersonalDashboard(props: PersonalDashboardProps) {
|
||||
<div className="grid grid-cols-1 gap-5 bg-gray-50 px-4 py-5 sm:gap-16 sm:px-0 sm:py-16">
|
||||
<FeaturedGuideList
|
||||
heading="Guides"
|
||||
guides={guides
|
||||
.filter((guide) => guide.roadmapId !== 'questions')
|
||||
.slice(0, 15)}
|
||||
guides={guides.slice(0, 15)}
|
||||
questions={guides
|
||||
.filter((guide) => guide.roadmapId === 'questions')
|
||||
.slice(0, 15)}
|
||||
|
||||
@@ -176,51 +176,6 @@ export function EditorRoadmapRenderer(props: RoadmapRendererProps) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (nodeType === 'checklist-item' && (target.tagName === 'text' || target.tagName === 'tspan')) {
|
||||
e.preventDefault();
|
||||
|
||||
const textElement = target.tagName === 'tspan' ? (target.closest('text') as SVGTextElement) : target;
|
||||
const clickedText = textElement?.textContent?.trim();
|
||||
if (!clickedText) {
|
||||
return;
|
||||
}
|
||||
|
||||
const parentChecklistId = targetGroup?.dataset?.parentId;
|
||||
if (!parentChecklistId) {
|
||||
return;
|
||||
}
|
||||
|
||||
const parentChecklistGroup = roadmapRef.current?.querySelector(
|
||||
`g[data-node-id="${parentChecklistId}"][data-type="checklist"]`
|
||||
);
|
||||
if (!parentChecklistGroup) {
|
||||
return;
|
||||
}
|
||||
|
||||
const labelGroup = parentChecklistGroup.querySelector(
|
||||
'g[data-type="checklist-label"]'
|
||||
);
|
||||
if (!labelGroup) {
|
||||
return;
|
||||
}
|
||||
|
||||
const labelText = labelGroup.querySelector('text')?.textContent?.trim();
|
||||
if (!labelText) {
|
||||
return;
|
||||
}
|
||||
|
||||
window.dispatchEvent(
|
||||
new CustomEvent('roadmap.checklist.click', {
|
||||
detail: {
|
||||
roadmapId: resourceId,
|
||||
labelText,
|
||||
clickedText,
|
||||
},
|
||||
}),
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
// we don't have the topic popup for checklist-item
|
||||
if (nodeType === 'checklist-item') {
|
||||
return;
|
||||
|
||||
@@ -306,53 +306,6 @@ export function RoadmapFloatingChat(props: RoadmapChatProps) {
|
||||
};
|
||||
}, [isOpen, isPersonalizeOpen]);
|
||||
|
||||
useEffect(() => {
|
||||
const handleChecklistClick = (e: CustomEvent) => {
|
||||
const { roadmapId: eventRoadmapId, labelText, clickedText } = e.detail;
|
||||
|
||||
if (eventRoadmapId !== roadmapId) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (!isLoggedIn()) {
|
||||
setIsOpen(false);
|
||||
showLoginPopup();
|
||||
return;
|
||||
}
|
||||
|
||||
const roadmapTitle = roadmapDetail?.json?.title?.page || roadmapDetail?.json?.title?.card || 'this roadmap';
|
||||
const message = `Explain the '${roadmapTitle}' best practice '${labelText} > ${clickedText}'`;
|
||||
|
||||
flushSync(() => {
|
||||
setIsOpen(true);
|
||||
});
|
||||
|
||||
setTimeout(() => {
|
||||
sendMessage(
|
||||
{ text: message, metadata: { json: textToJSON(message) } },
|
||||
{
|
||||
body: {
|
||||
roadmapId,
|
||||
...(activeChatHistoryId
|
||||
? { chatHistoryId: activeChatHistoryId }
|
||||
: {}),
|
||||
},
|
||||
},
|
||||
);
|
||||
|
||||
setTimeout(() => {
|
||||
scrollToBottom('smooth');
|
||||
inputRef.current?.focus();
|
||||
}, 0);
|
||||
}, 100);
|
||||
};
|
||||
|
||||
window.addEventListener('roadmap.checklist.click', handleChecklistClick as EventListener);
|
||||
return () => {
|
||||
window.removeEventListener('roadmap.checklist.click', handleChecklistClick as EventListener);
|
||||
};
|
||||
}, [roadmapId, roadmapDetail, sendMessage, activeChatHistoryId, scrollToBottom]);
|
||||
|
||||
function textToJSON(text: string): JSONContent {
|
||||
return {
|
||||
type: 'doc',
|
||||
|
||||
@@ -81,6 +81,9 @@ export function AccountDropdownList(props: AccountDropdownListProps) {
|
||||
<SquareUserRound className="h-4 w-4 stroke-[2.5px] text-slate-400 group-hover:text-white" />
|
||||
My Profile
|
||||
</span>
|
||||
<span className="rounded-xs bg-yellow-300 px-1 text-xs tracking-wide text-black uppercase">
|
||||
New
|
||||
</span>
|
||||
</a>
|
||||
</li>
|
||||
<li className="px-1">
|
||||
|
||||
@@ -5,7 +5,7 @@ export const paidFeaturesList = [
|
||||
'AI tutor on Roadmaps',
|
||||
'Custom Roadmaps (up to 100)',
|
||||
'AI-Powered Roadmap Editor',
|
||||
'Personal AI Coach',
|
||||
'Peronsal AI Coach',
|
||||
'and more...',
|
||||
];
|
||||
|
||||
|
||||
@@ -14,7 +14,6 @@ import { ScheduleButton } from './Schedule/ScheduleButton';
|
||||
import { ShareRoadmapButton } from './ShareRoadmapButton';
|
||||
import { TabLink } from './TabLink';
|
||||
import { PersonalizedRoadmap } from './PersonalizedRoadmap/PersonalizedRoadmap';
|
||||
import RoadmapFeedbackAlert from './Roadmaps/RoadmapFeedbackAlert.astro';
|
||||
|
||||
export interface Props {
|
||||
title: string;
|
||||
@@ -30,7 +29,6 @@ export interface Props {
|
||||
coursesCount?: number;
|
||||
hasAIChat?: boolean;
|
||||
isForkable?: boolean;
|
||||
isNew?: boolean;
|
||||
activeTab?: 'roadmap' | 'projects' | 'courses';
|
||||
}
|
||||
|
||||
@@ -43,7 +41,6 @@ const {
|
||||
projectCount = 0,
|
||||
activeTab = 'roadmap',
|
||||
coursesCount = 0,
|
||||
isNew = false,
|
||||
} = Astro.props;
|
||||
|
||||
const hasCourses = coursesCount > 0;
|
||||
@@ -68,8 +65,6 @@ const hasProjects = projectCount > 0;
|
||||
)
|
||||
}
|
||||
|
||||
{isNew && <RoadmapFeedbackAlert roadmapId={roadmapId} title={title} />}
|
||||
|
||||
<div
|
||||
class='relative rounded-none border bg-white px-5 pt-4 pb-0 sm:rounded-lg'
|
||||
>
|
||||
|
||||
@@ -1,37 +0,0 @@
|
||||
---
|
||||
import { Bell } from 'lucide-react';
|
||||
|
||||
export interface Props {
|
||||
roadmapId: string;
|
||||
title: string;
|
||||
}
|
||||
|
||||
const { roadmapId, title } = Astro.props;
|
||||
|
||||
const issueTitle = encodeURIComponent(`[New Roadmap Feedback] ${title}`);
|
||||
const roadmapUrl = encodeURIComponent(`https://roadmap.sh/${roadmapId}`);
|
||||
const feedbackUrl = `https://github.com/kamranahmedse/developer-roadmap/issues/new?template=01-suggest-changes.yml&title=${issueTitle}&url=${roadmapUrl}`;
|
||||
---
|
||||
|
||||
<div
|
||||
class='hidden rounded-md border border-yellow-400 bg-gradient-to-r from-yellow-100 via-yellow-50 to-amber-100 px-2 py-2 sm:block'
|
||||
>
|
||||
<p class='flex items-center gap-2.5 text-sm'>
|
||||
<span
|
||||
class='inline-flex items-center gap-1 rounded-sm bg-gradient-to-r from-yellow-600 to-yellow-700 px-2.5 py-1 text-xs font-bold text-white'
|
||||
>
|
||||
<Bell className='h-3.5 w-3.5' />
|
||||
Feedback Wanted
|
||||
</span>
|
||||
<span class='font-medium text-yellow-900'>
|
||||
This is a new roadmap, help us improve it with
|
||||
<a
|
||||
href={feedbackUrl}
|
||||
target='_blank'
|
||||
class='font-bold text-yellow-700 underline decoration-yellow-500 decoration-2 underline-offset-2 transition-all hover:text-yellow-900 hover:decoration-yellow-700'
|
||||
>
|
||||
your feedback
|
||||
</a>
|
||||
</span>
|
||||
</p>
|
||||
</div>
|
||||
@@ -55,7 +55,7 @@ export type GroupType = {
|
||||
roadmaps: {
|
||||
title: string;
|
||||
link: string;
|
||||
type: 'role' | 'skill' | 'best-practice';
|
||||
type: 'role' | 'skill';
|
||||
otherGroups?: AllowGroupNames[];
|
||||
}[];
|
||||
};
|
||||
@@ -169,12 +169,6 @@ const groups: GroupType[] = [
|
||||
type: 'skill',
|
||||
otherGroups: ['Web Development'],
|
||||
},
|
||||
{
|
||||
title: 'Laravel',
|
||||
link: '/laravel',
|
||||
type: 'skill',
|
||||
otherGroups: ['Web Development'],
|
||||
},
|
||||
],
|
||||
},
|
||||
{
|
||||
@@ -196,12 +190,7 @@ const groups: GroupType[] = [
|
||||
title: 'JavaScript',
|
||||
link: '/javascript',
|
||||
type: 'skill',
|
||||
otherGroups: [
|
||||
'Web Development',
|
||||
'DevOps',
|
||||
'Mobile Development',
|
||||
'Absolute Beginners',
|
||||
],
|
||||
otherGroups: ['Web Development', 'DevOps', 'Mobile Development', 'Absolute Beginners'],
|
||||
},
|
||||
{
|
||||
title: 'Kotlin',
|
||||
@@ -209,12 +198,6 @@ const groups: GroupType[] = [
|
||||
type: 'skill',
|
||||
otherGroups: ['Web Development', 'DevOps', 'Mobile Development'],
|
||||
},
|
||||
{
|
||||
title: 'Swift & Swift-UI',
|
||||
link: '/swift-ui',
|
||||
type: 'skill',
|
||||
otherGroups: ['Mobile Development'],
|
||||
},
|
||||
{
|
||||
title: 'TypeScript',
|
||||
link: '/typescript',
|
||||
@@ -268,12 +251,6 @@ const groups: GroupType[] = [
|
||||
type: 'skill',
|
||||
otherGroups: ['Web Development', 'Databases', 'DevOps'],
|
||||
},
|
||||
{
|
||||
title: 'Shell / Bash',
|
||||
link: '/shell-bash',
|
||||
type: 'skill',
|
||||
otherGroups: ['Web Development', 'DevOps'],
|
||||
},
|
||||
],
|
||||
},
|
||||
{
|
||||
@@ -543,41 +520,6 @@ const groups: GroupType[] = [
|
||||
},
|
||||
],
|
||||
},
|
||||
{
|
||||
group: 'Best Practices',
|
||||
roadmaps: [
|
||||
{
|
||||
title: 'Backend Performance',
|
||||
link: '/backend-performance-best-practices',
|
||||
type: 'best-practice',
|
||||
otherGroups: ['Web Development'],
|
||||
},
|
||||
{
|
||||
title: 'Frontend Performance',
|
||||
link: '/frontend-performance-best-practices',
|
||||
type: 'best-practice',
|
||||
otherGroups: ['Web Development'],
|
||||
},
|
||||
{
|
||||
title: 'Code Review',
|
||||
link: '/code-review-best-practices',
|
||||
type: 'best-practice',
|
||||
otherGroups: ['Web Development'],
|
||||
},
|
||||
{
|
||||
title: 'AWS',
|
||||
link: '/aws-best-practices',
|
||||
type: 'best-practice',
|
||||
otherGroups: ['Web Development', 'DevOps'],
|
||||
},
|
||||
{
|
||||
title: 'API Security',
|
||||
link: '/api-security-best-practices',
|
||||
type: 'best-practice',
|
||||
otherGroups: ['Web Development'],
|
||||
},
|
||||
],
|
||||
},
|
||||
];
|
||||
|
||||
const roleRoadmaps = groups.flatMap((group) =>
|
||||
@@ -586,9 +528,6 @@ const roleRoadmaps = groups.flatMap((group) =>
|
||||
const skillRoadmaps = groups.flatMap((group) =>
|
||||
group.roadmaps.filter((roadmap) => roadmap.type === 'skill'),
|
||||
);
|
||||
const bestPracticeRoadmaps = groups.flatMap((group) =>
|
||||
group.roadmaps.filter((roadmap) => roadmap.type === 'best-practice'),
|
||||
);
|
||||
|
||||
const allGroups = [
|
||||
{
|
||||
@@ -599,10 +538,6 @@ const allGroups = [
|
||||
group: 'Skill Based Roadmaps',
|
||||
roadmaps: skillRoadmaps,
|
||||
},
|
||||
{
|
||||
group: 'Best Practices',
|
||||
roadmaps: bestPracticeRoadmaps,
|
||||
},
|
||||
];
|
||||
|
||||
export function RoadmapsPage() {
|
||||
|
||||
@@ -5,4 +5,4 @@ Acting, also called tool invocation, is the step where the AI chooses a tool and
|
||||
Visit the following resources to learn more:
|
||||
|
||||
- [@article@What are Tools in AI Agents?](https://huggingface.co/learn/agents-course/en/unit1/tools)
|
||||
- [@article@What is Tool Calling in Agents?](https://www.useparagon.com/blog/ai-building-blocks-what-is-tool-calling-a-guide-for-pms)
|
||||
- [@article@What is Tool Calling in Agents?](https://www.useparagon.com/blog/ai-building-blocks-what-is-tool-calling-a-guide-for-pms)
|
||||
|
||||
@@ -5,4 +5,4 @@ An agent loop is the cycle that lets an AI agent keep working toward a goal. Fir
|
||||
Visit the following resources to learn more:
|
||||
|
||||
- [@article@What is an Agent Loop?](https://huggingface.co/learn/agents-course/en/unit1/agent-steps-and-structure)
|
||||
- [@article@Let's Build your Own Agentic Loop](https://www.reddit.com/r/AI_Agents/comments/1js1xjz/lets_build_our_own_agentic_loop_running_in_our/)
|
||||
- [@article@Let's Build your Own Agentic Loop](https://www.reddit.com/r/AI_Agents/comments/1js1xjz/lets_build_our_own_agentic_loop_running_in_our/)
|
||||
|
||||
@@ -4,4 +4,4 @@ Anthropic Tool Use lets you connect a Claude model to real software functions so
|
||||
|
||||
Visit the following resources to learn more:
|
||||
|
||||
- [@official@Anthropic Tool Use](https://docs.anthropic.com/en/docs/build-with-claude/tool-use/overview)
|
||||
- [@official@Anthropic Tool Use](https://docs.anthropic.com/en/docs/build-with-claude/tool-use/overview)
|
||||
|
||||
@@ -5,4 +5,4 @@ API requests let an AI agent ask another service for data or for an action. The
|
||||
Visit the following resources to learn more:
|
||||
|
||||
- [@article@Introduction to APIs - MDN Web Docs](https://developer.mozilla.org/en-US/docs/Learn_web_development/Extensions/Client-side_APIs/Introduction)
|
||||
- [@article@How APIs Power AI Agents: A Comprehensive Guide](https://blog.treblle.com/api-guide-for-ai-agents/)
|
||||
- [@article@How APIs Power AI Agents: A Comprehensive Guide](https://blog.treblle.com/api-guide-for-ai-agents/)
|
||||
|
||||
@@ -6,4 +6,4 @@ Visit the following resources to learn more:
|
||||
|
||||
- [@article@Introduction to the server-side](https://developer.mozilla.org/en-US/docs/Learn/Server-side/First_steps/Introduction)
|
||||
- [@article@What is a REST API? - Red Hat](https://www.redhat.com/en/topics/api/what-is-a-rest-api)
|
||||
- [@article@What is a Database? - Oracle](https://www.oracle.com/database/what-is-database/)
|
||||
- [@article@What is a Database? - Oracle](https://www.oracle.com/database/what-is-database/)
|
||||
|
||||
@@ -5,4 +5,4 @@ Chain of Thought (CoT) is a way for an AI agent to think out loud. Before giving
|
||||
Visit the following resources to learn more:
|
||||
|
||||
- [@article@Chain-of-Thought Prompting Elicits Reasoning in Large Language Models](https://arxiv.org/abs/2201.11903)
|
||||
- [@article@Evoking Chain of Thought Reasoning in LLMs - Prompting Guide](https://www.promptingguide.ai/techniques/cot)
|
||||
- [@article@Evoking Chain of Thought Reasoning in LLMs - Prompting Guide](https://www.promptingguide.ai/techniques/cot)
|
||||
|
||||
@@ -4,8 +4,8 @@ Closed-weight models are AI systems whose trained parameters—the numbers that
|
||||
|
||||
Visit the following resources to learn more:
|
||||
|
||||
- [@article@Open-Source LLMs vs Closed LLMs](https://hatchworks.com/blog/gen-ai/open-source-vs-closed-llms-guide/)
|
||||
- [@article@2024 Comparison of Open-Source Vs Closed-Source LLMs](https://blog.spheron.network/choosing-the-right-llm-2024-comparison-of-open-source-vs-closed-source-llms)
|
||||
- [@official@Open AI's GPT-4](https://openai.com/gpt-4)
|
||||
- [@official@Claude](https://www.anthropic.com/claude)
|
||||
- [@official@Gemini](https://deepmind.google/technologies/gemini/)
|
||||
- [@article@Open-Source LLMs vs Closed LLMs](https://hatchworks.com/blog/gen-ai/open-source-vs-closed-llms-guide/)
|
||||
- [@article@2024 Comparison of Open-Source Vs Closed-Source LLMs](https://blog.spheron.network/choosing-the-right-llm-2024-comparison-of-open-source-vs-closed-source-llms)
|
||||
@@ -7,4 +7,4 @@ Visit the following resources to learn more:
|
||||
- [@article@What is a REPL?](https://docs.replit.com/getting-started/intro-replit)
|
||||
- [@article@Code Execution AI Agent](https://docs.praison.ai/features/codeagent)
|
||||
- [@article@Building an AI Agent's Code Execution Environment](https://murraycole.com/posts/ai-code-execution-environment)
|
||||
- [@article@Python Code Tool](https://python.langchain.com/docs/integrations/tools/python/)
|
||||
- [@article@Python Code Tool](https://python.langchain.com/docs/integrations/tools/python/)
|
||||
|
||||
@@ -4,6 +4,6 @@ Code-generation agents take a plain language request, understand the goal, and t
|
||||
|
||||
Visit the following resources to learn more:
|
||||
|
||||
- [@official@GitHub Copilot](https://github.com/features/copilot)
|
||||
- [@article@Multi-Agent-based Code Generation](https://arxiv.org/abs/2312.13010)
|
||||
- [@article@From Prompt to Production: GitHub Blog](https://github.blog/ai-and-ml/github-copilot/from-prompt-to-production-building-a-landing-page-with-copilot-agent-mode/)
|
||||
- [@article@From Prompt to Production: GitHub Blog](https://github.blog/ai-and-ml/github-copilot/from-prompt-to-production-building-a-landing-page-with-copilot-agent-mode/)
|
||||
- [@official@GitHub Copilot](https://github.com/features/copilot)
|
||||
@@ -8,4 +8,4 @@ Visit the following resources to learn more:
|
||||
|
||||
- [@article@What is a Context Window in AI?](https://www.ibm.com/think/topics/context-window)
|
||||
- [@article@Scaling Language Models with Retrieval-Augmented Generation (RAG)](https://arxiv.org/abs/2005.11401)
|
||||
- [@article@Long Context in Language Models - Anthropic's Claude 3](https://www.anthropic.com/news/claude-3-family)
|
||||
- [@article@Long Context in Language Models - Anthropic's Claude 3](https://www.anthropic.com/news/claude-3-family)
|
||||
|
||||
@@ -5,4 +5,4 @@ An MCP server stores and shares conversation data for AI agents using the Model
|
||||
Visit the following resources to learn more:
|
||||
|
||||
- [@official@Model Context Protocol (MCP) Specification](https://www.anthropic.com/news/model-context-protocol)
|
||||
- [@article@How to Build and Host Your Own MCP Servers in Easy Steps?](https://collabnix.com/how-to-build-and-host-your-own-mcp-servers-in-easy-steps/)
|
||||
- [@article@How to Build and Host Your Own MCP Servers in Easy Steps?](https://collabnix.com/how-to-build-and-host-your-own-mcp-servers-in-easy-steps/)
|
||||
|
||||
@@ -6,4 +6,4 @@ Visit the following resources to learn more:
|
||||
|
||||
- [@official@Airflow: Directed Acyclic Graphs Documentation](https://airflow.apache.org/docs/apache-airflow/stable/concepts/dags.html)
|
||||
- [@article@What are DAGs in AI Systems?](https://www.restack.io/p/version-control-for-ai-answer-what-is-dag-in-ai-cat-ai)
|
||||
- [@video@DAGs Explained Simply](https://www.youtube.com/watch?v=1Yh5S-S6wsI)
|
||||
- [@video@DAGs Explained Simply](https://www.youtube.com/watch?v=1Yh5S-S6wsI)
|
||||
|
||||
@@ -5,4 +5,4 @@ AI agents can automate data analysis by pulling information from files, database
|
||||
Visit the following resources to learn more:
|
||||
|
||||
- [@article@How AI Will Transform Data Analysis in 2025](https://www.devfi.com/ai-transform-data-analysis-2025/)
|
||||
- [@article@How AI Has Changed The World Of Analytics And Data Science](https://www.forbes.com/councils/forbestechcouncil/2025/01/28/how-ai-has-changed-the-world-of-analytics-and-data-science/k)
|
||||
- [@article@How AI Has Changed The World Of Analytics And Data Science](https://www.forbes.com/councils/forbestechcouncil/2025/01/28/how-ai-has-changed-the-world-of-analytics-and-data-science/k)
|
||||
|
||||
@@ -6,4 +6,4 @@ Visit the following resources to learn more:
|
||||
|
||||
- [@official@GDPR Compliance Overview](https://gdpr.eu/)
|
||||
- [@article@Protect Sensitive Data with PII Redaction Software](https://redactor.ai/blog/pii-redaction-software-guide)
|
||||
- [@article@A Complete Guide on PII Redaction](https://enthu.ai/blog/what-is-pii-redaction/)
|
||||
- [@article@A Complete Guide on PII Redaction](https://enthu.ai/blog/what-is-pii-redaction/)
|
||||
|
||||
@@ -4,4 +4,4 @@ Database queries let an AI agent fetch, add, change, or remove data stored in a
|
||||
|
||||
Visit the following resources to learn more:
|
||||
|
||||
- [@article@Building Your Own Database Agent](https://www.deeplearning.ai/short-courses/building-your-own-database-agent/)
|
||||
- [@article@Building Your Own Database Agent](https://www.deeplearning.ai/short-courses/building-your-own-database-agent/)
|
||||
|
||||
@@ -7,4 +7,4 @@ Visit the following resources to learn more:
|
||||
- [@official@DeepEval - The Open-Source LLM Evaluation Framework](https://www.deepeval.com/)
|
||||
- [@opensource@DeepEval GitHub Repository](https://github.com/confident-ai/deepeval)
|
||||
- [@article@Evaluate LLMs Effectively Using DeepEval: A Pratical Guide](https://www.datacamp.com/tutorial/deepeval)
|
||||
- [@video@DeepEval - LLM Evaluation Framework](https://www.youtube.com/watch?v=ZNs2dCXHlfo)
|
||||
- [@video@DeepEval - LLM Evaluation Framework](https://www.youtube.com/watch?v=ZNs2dCXHlfo)
|
||||
|
||||
@@ -5,4 +5,4 @@ Email, Slack, and SMS are message channels an AI agent can use to act on tasks a
|
||||
Visit the following resources to learn more:
|
||||
|
||||
- [@official@Twilio Messaging API](https://www.twilio.com/docs/usage/api)
|
||||
- [@official@Slack AI Agents](https://slack.com/ai-agents)
|
||||
- [@official@Slack AI Agents](https://slack.com/ai-agents)
|
||||
|
||||
@@ -5,4 +5,4 @@ Embeddings turn words, pictures, or other data into lists of numbers called vect
|
||||
Visit the following resources to learn more:
|
||||
|
||||
- [@official@OpenAI Embeddings API Documentation](https://platform.openai.com/docs/guides/embeddings/what-are-embeddings)
|
||||
- [@article@Understanding Embeddings and Vector Search (Pinecone Blog)](https://www.pinecone.io/learn/vector-embeddings/)
|
||||
- [@article@Understanding Embeddings and Vector Search (Pinecone Blog)](https://www.pinecone.io/learn/vector-embeddings/)
|
||||
|
||||
@@ -6,4 +6,4 @@ Visit the following resources to learn more:
|
||||
|
||||
- [@article@What Is AI Agent Memory? - IBM](https://www.ibm.com/think/topics/ai-agent-memory)
|
||||
- [@article@Episodic Memory vs. Semantic Memory: The Key Differences](https://www.magneticmemorymethod.com/episodic-vs-semantic-memory/)
|
||||
- [@article@Memory Systems in LangChain](https://python.langchain.com/docs/how_to/chatbots_memory/)
|
||||
- [@article@Memory Systems in LangChain](https://python.langchain.com/docs/how_to/chatbots_memory/)
|
||||
|
||||
@@ -7,4 +7,4 @@ Visit the following resources to learn more:
|
||||
- [@article@Filesystem MCP server for AI Agents](https://playbooks.com/mcp/mateicanavra-filesystem)
|
||||
- [@article@File System Access API](https://developer.mozilla.org/en-US/docs/Web/API/File_System_Access_API)
|
||||
- [@article@Understanding File Permissions and Security](https://linuxize.com/post/understanding-linux-file-permissions/)
|
||||
- [@video@How File Systems Work?](https://www.youtube.com/watch?v=KN8YgJnShPM)
|
||||
- [@video@How File Systems Work?](https://www.youtube.com/watch?v=KN8YgJnShPM)
|
||||
|
||||
@@ -7,4 +7,4 @@ Visit the following resources to learn more:
|
||||
- [@article@OpenAI Fine Tuning](https://platform.openai.com/docs/guides/fine-tuning)
|
||||
- [@article@Prompt Engineering Guide](https://www.promptingguide.ai/)
|
||||
- [@article@Prompt Engineering vs Prompt Tuning: A Detailed Explanation](https://medium.com/@aabhi02/prompt-engineering-vs-prompt-tuning-a-detailed-explanation-19ea8ce62ac4)
|
||||
- [@video@RAG vs Fine-Tuning vs Prompt Engineering: Optimizing AI Models](https://youtu.be/zYGDpG-pTho?si=pFeWqbjSN1RM4WiZ)
|
||||
- [@video@RAG vs Fine-Tuning vs Prompt Engineering: Optimizing AI Models](https://youtu.be/zYGDpG-pTho?si=pFeWqbjSN1RM4WiZ)
|
||||
|
||||
@@ -6,4 +6,4 @@ Visit the following resources to learn more:
|
||||
|
||||
- [@official@Git Basics](https://git-scm.com/doc)
|
||||
- [@official@Introduction to the Terminal](https://ubuntu.com/tutorials/command-line-for-beginners#1-overview)
|
||||
- [@video@Git and Terminal Basics Crash Course (YouTube)](https://www.youtube.com/watch?v=HVsySz-h9r4)
|
||||
- [@video@Git and Terminal Basics Crash Course (YouTube)](https://www.youtube.com/watch?v=HVsySz-h9r4)
|
||||
|
||||
@@ -6,4 +6,4 @@ Visit the following resources to learn more:
|
||||
|
||||
- [@official@Haystack](https://haystack.deepset.ai/)
|
||||
- [@official@Haystack Overview](https://docs.haystack.deepset.ai/docs/intro)
|
||||
- [@opensource@deepset-ai/haystack](https://github.com/deepset-ai/haystack)
|
||||
- [@opensource@deepset-ai/haystack](https://github.com/deepset-ai/haystack)
|
||||
|
||||
@@ -6,4 +6,4 @@ Visit the following resources to learn more:
|
||||
|
||||
- [@official@Helicone](https://www.helicone.ai/)
|
||||
- [@official@Helicone OSS LLM Observability](https://docs.helicone.ai/getting-started/quick-start)
|
||||
- [@opensource@Helicone/helicone](https://github.com/Helicone/helicone)
|
||||
- [@opensource@Helicone/helicone](https://github.com/Helicone/helicone)
|
||||
|
||||
@@ -7,4 +7,4 @@ Visit the following resources to learn more:
|
||||
- [@article@Human in the Loop · Cloudflare Agents](https://developers.cloudflare.com/agents/concepts/human-in-the-loop/)
|
||||
- [@article@What is Human-in-the-Loop: A Guide](https://logifusion.com/what-is-human-in-the-loop-htil/)
|
||||
- [@article@Human-in-the-Loop ML](https://docs.aws.amazon.com/sagemaker/latest/dg/sms-human-review-workflow.html)
|
||||
- [@article@The Importance of Human Feedback in AI (Hugging Face Blog)](https://huggingface.co/blog/rlhf)
|
||||
- [@article@The Importance of Human Feedback in AI (Hugging Face Blog)](https://huggingface.co/blog/rlhf)
|
||||
|
||||
@@ -4,6 +4,6 @@ After you write a first prompt, treat it as a draft, not the final version. Run
|
||||
|
||||
Visit the following resources to learn more:
|
||||
|
||||
- [@course@Prompt Engineering Best Practices](https://www.deeplearning.ai/short-courses/chatgpt-prompt-engineering-for-developers/)
|
||||
- [@article@Master Iterative Prompting: A Guide](https://blogs.vreamer.space/master-iterative-prompting-a-guide-to-more-effective-interactions-with-ai-50a736eaec38)
|
||||
- [@video@Prompt Engineering: The Iterative Process](https://www.youtube.com/watch?v=dOxUroR57xs)
|
||||
- [@course@Prompt Engineering Best Practices](https://www.deeplearning.ai/short-courses/chatgpt-prompt-engineering-for-developers/)
|
||||
- [@video@Prompt Engineering: The Iterative Process](https://www.youtube.com/watch?v=dOxUroR57xs)
|
||||
|
||||
@@ -1,10 +1,11 @@
|
||||
# LangChain
|
||||
|
||||
LangChain is a framework designed to simplify the creation of applications using large language models (LLMs). It provides tools and abstractions to connect LLMs to various data sources, create chains of calls to LLMs or other utilities, and build agents that can interact with their environment. Essentially, it helps developers structure, chain, and orchestrate different AI components to build more complex and capable AI applications.
|
||||
LangChain is a Python and JavaScript library that helps you put large language models to work in real products. It gives ready-made parts for common agent tasks such as talking to many tools, keeping short-term memory, and calling an external API when the model needs fresh data. You combine these parts like Lego blocks: pick a model, add a prompt template, chain the steps, then wrap the chain in an “agent” that can choose what step to run next. Built-in connectors link to OpenAI, Hugging Face, vector stores, and SQL databases, so you can search documents or pull company data without writing a lot of glue code. This lets you move fast from idea to working bot, while still letting you swap out parts if your needs change.
|
||||
|
||||
Visit the following resources to learn more:
|
||||
|
||||
- [@official@LangChain Documentation](https://python.langchain.com/docs/introduction/)
|
||||
- [@opensource@langchain-ai/langchain](https://github.com/langchain-ai/langchain)
|
||||
- [@article@Building Applications with LLMs using LangChain](https://www.pinecone.io/learn/series/langchain/)
|
||||
- [@article@AI Agents with LangChain and LangGraph](https://www.udacity.com/course/ai-agents-with-langchain-and-langgraph--cd13764)
|
||||
- [@video@LangChain Crash Course - Build LLM Apps Fast (YouTube)](https://www.youtube.com/watch?v=nAmC7SoVLd8)
|
||||
- [@video@LangChain Crash Course - Build LLM Apps Fast (YouTube)](https://www.youtube.com/watch?v=nAmC7SoVLd8)
|
||||
|
||||
@@ -7,4 +7,4 @@ Visit the following resources to learn more:
|
||||
- [@official@LangFuse](https://langfuse.com/)
|
||||
- [@official@LangFuse Documentation](https://langfuse.com/docs)
|
||||
- [@opensource@langfuse/langfuse](https://github.com/langfuse/langfuse)
|
||||
- [@article@Langfuse: Open Source LLM Engineering Platform](https://www.ycombinator.com/companies/langfuse)
|
||||
- [@article@Langfuse: Open Source LLM Engineering Platform](https://www.ycombinator.com/companies/langfuse)
|
||||
|
||||
@@ -7,4 +7,4 @@ Visit the following resources to learn more:
|
||||
- [@official@LangSmith](https://smith.langchain.com/)
|
||||
- [@official@LangSmith Documentation](https://docs.smith.langchain.com/)
|
||||
- [@official@Harden your application with LangSmith Evaluation](https://www.langchain.com/evaluation)
|
||||
- [@article@What is LangSmith and Why should I care as a developer?](https://medium.com/around-the-prompt/what-is-langsmith-and-why-should-i-care-as-a-developer-e5921deb54b5)
|
||||
- [@article@What is LangSmith and Why should I care as a developer?](https://medium.com/around-the-prompt/what-is-langsmith-and-why-should-i-care-as-a-developer-e5921deb54b5)
|
||||
|
||||
@@ -7,4 +7,4 @@ Visit the following resources to learn more:
|
||||
- [@official@LangSmith](https://smith.langchain.com/)
|
||||
- [@official@LangSmith Documentation](https://docs.smith.langchain.com/)
|
||||
- [@official@Harden your application with LangSmith Evaluation](https://www.langchain.com/evaluation)
|
||||
- [@article@What is LangSmith and Why should I care as a developer?](https://medium.com/around-the-prompt/what-is-langsmith-and-why-should-i-care-as-a-developer-e5921deb54b5)
|
||||
- [@article@What is LangSmith and Why should I care as a developer?](https://medium.com/around-the-prompt/what-is-langsmith-and-why-should-i-care-as-a-developer-e5921deb54b5)
|
||||
|
||||
@@ -6,5 +6,5 @@ Visit the following resources to learn more:
|
||||
|
||||
- [@article@Build a Simple Local MCP Server](https://blog.stackademic.com/build-simple-local-mcp-server-5434d19572a4)
|
||||
- [@article@How to Build and Host Your Own MCP Servers in Easy Steps](https://collabnix.com/how-to-build-and-host-your-own-mcp-servers-in-easy-steps/)
|
||||
- [@article@Expose localhost to Internet](https://ngrok.com/docs)
|
||||
- [@video@Run a Local Server on Your Machine](https://www.youtube.com/watch?v=ldGl6L4Vktk)
|
||||
- [@article@Expose localhost to Internet](https://ngrok.com/docs)
|
||||
- [@video@Run a Local Server on Your Machine](https://www.youtube.com/watch?v=ldGl6L4Vktk)
|
||||
|
||||
@@ -5,7 +5,7 @@ Long term memory in an AI agent stores important information for future use, lik
|
||||
Visit the following resources to learn more:
|
||||
|
||||
- [@article@Long Term Memory in AI Agents](https://medium.com/@alozie_igbokwe/ai-101-long-term-memory-in-ai-agents-35f87f2d0ce0)
|
||||
- [@article@Memory Management in AI Agents](https://python.langchain.com/docs/how_to/chatbots_memory/)
|
||||
- [@article@Storing and Retrieving Knowledge for Agents](https://www.pinecone.io/learn/langchain-retrieval-augmentation/)
|
||||
- [@article@Memory Management in AI Agents](https://python.langchain.com/docs/how_to/chatbots_memory/)
|
||||
- [@article@Storing and Retrieving Knowledge for Agents](https://www.pinecone.io/learn/langchain-retrieval-augmentation/)
|
||||
- [@article@Short-Term vs Long-Term Memory in AI Agents](https://adasci.org/short-term-vs-long-term-memory-in-ai-agents/)
|
||||
- [@video@Building Brain-Like Memory for AI Agents](https://www.youtube.com/watch?v=VKPngyO0iKg)
|
||||
- [@video@Building Brain-Like Memory for AI Agents](https://www.youtube.com/watch?v=VKPngyO0iKg)
|
||||
|
||||
@@ -4,7 +4,7 @@ Building an AI agent from scratch means writing every part of the system yoursel
|
||||
|
||||
Visit the following resources to learn more:
|
||||
|
||||
- [@article@A Step-by-Step Guide to Building an AI Agent From Scratch](https://www.neurond.com/blog/how-to-build-an-ai-agent)
|
||||
- [@article@How to Build AI Agents](https://wotnot.io/blog/build-ai-agents)
|
||||
- [@article@A Step-by-Step Guide to Building an AI Agent From Scratch](https://www.neurond.com/blog/how-to-build-an-ai-agent)
|
||||
- [@article@How to Build AI Agents](https://wotnot.io/blog/build-ai-agents)
|
||||
- [@article@Build Your Own AI Agent from Scratch in 30 Minutes](https://medium.com/@gurpartap.sandhu3/build-you-own-ai-agent-from-scratch-in-30-mins-using-simple-python-1458f8099da0)
|
||||
- [@video@Building an AI Agent From Scratch](https://www.youtube.com/watch?v=bTMPwUgLZf0)
|
||||
- [@video@Building an AI Agent From Scratch](https://www.youtube.com/watch?v=bTMPwUgLZf0)
|
||||
|
||||
@@ -4,9 +4,9 @@ Max Length sets the maximum number of tokens a language model can generate in on
|
||||
|
||||
Visit the following resources to learn more:
|
||||
|
||||
- [@official@OpenAI Token Usage](https://platform.openai.com/docs/guides/gpt/managing-tokens)
|
||||
- [@official@Size and Max Token Limits](https://docs.anthropic.com/claude/docs/size-and-token-limits)
|
||||
- [@article@Utilising Max Token Context Window of Anthropic Claude](https://medium.com/@nampreetsingh/utilising-max-token-context-window-of-anthropic-claude-on-amazon-bedrock-7377d94b2dfa)
|
||||
- [@article@Controlling the Length of OpenAI Model Responses](https://help.openai.com/en/articles/5072518-controlling-the-length-of-openai-model-responses)
|
||||
- [@official@OpenAI Token Usage](https://platform.openai.com/docs/guides/gpt/managing-tokens)
|
||||
- [@official@Size and Max Token Limits](https://docs.anthropic.com/claude/docs/size-and-token-limits)
|
||||
- [@article@Utilising Max Token Context Window of Anthropic Claude](https://medium.com/@nampreetsingh/utilising-max-token-context-window-of-anthropic-claude-on-amazon-bedrock-7377d94b2dfa)
|
||||
- [@article@Controlling the Length of OpenAI Model Responses](https://help.openai.com/en/articles/5072518-controlling-the-length-of-openai-model-responses)
|
||||
- [@article@Max Model Length in AI](https://www.restack.io/p/ai-model-answer-max-model-length-cat-ai)
|
||||
- [@video@Understanding ChatGPT/OpenAI Tokens](https://youtu.be/Mo3NV5n1yZk)
|
||||
@@ -4,7 +4,7 @@ The MCP Client is the part of an AI agent that talks to the language model API.
|
||||
|
||||
Visit the following resources to learn more:
|
||||
|
||||
- [@opensource@Model Context Protocol](https://github.com/modelcontextprotocol/modelcontextprotocol)
|
||||
- [@official@Model Context Protocol](https://modelcontextprotocol.io/introduction)
|
||||
- [@official@OpenAI API Reference](https://platform.openai.com/docs/api-reference)
|
||||
- [@official@Anthropic API Documentation](https://docs.anthropic.com/claude/reference)
|
||||
- [@opensource@Model Context Protocol](https://github.com/modelcontextprotocol/modelcontextprotocol)
|
||||
- [@official@OpenAI API Reference](https://platform.openai.com/docs/api-reference)
|
||||
- [@official@Anthropic API Documentation](https://docs.anthropic.com/claude/reference)
|
||||
|
||||
@@ -4,7 +4,7 @@ MCP Hosts are computers or services that run the Model Context Protocol. They ha
|
||||
|
||||
Visit the following resources to learn more:
|
||||
|
||||
- [@official@Vercel Serverless Hosting](https://vercel.com/docs)
|
||||
- [@opensource@punkeye/awesome-mcp-servers](https://github.com/punkpeye/awesome-mcp-servers)
|
||||
- [@official@Vercel Serverless Hosting](https://vercel.com/docs)
|
||||
- [@article@The Ultimate Guide to MCP](https://guangzhengli.com/blog/en/model-context-protocol)
|
||||
- [@article@AWS MCP Servers for Code Assistants](https://aws.amazon.com/blogs/machine-learning/introducing-aws-mcp-servers-for-code-assistants-part-1/)
|
||||
- [@article@AWS MCP Servers for Code Assistants](https://aws.amazon.com/blogs/machine-learning/introducing-aws-mcp-servers-for-code-assistants-part-1/)
|
||||
- [@opensource@punkeye/awesome-mcp-servers](https://github.com/punkpeye/awesome-mcp-servers)
|
||||
@@ -4,7 +4,7 @@ An MCP Server is the main machine or cloud service that runs the Model Context P
|
||||
|
||||
Visit the following resources to learn more:
|
||||
|
||||
- [@opensource@punkeye/awesome-mcp-servers](https://github.com/punkpeye/awesome-mcp-servers)
|
||||
- [@article@Introducing the Azure MCP Server ](https://devblogs.microsoft.com/azure-sdk/introducing-the-azure-mcp-server/)
|
||||
- [@article@Introducing the Azure MCP Server ](https://devblogs.microsoft.com/azure-sdk/introducing-the-azure-mcp-server/)
|
||||
- [@article@The Ultimate Guide to MCP](https://guangzhengli.com/blog/en/model-context-protocol)
|
||||
- [@article@AWS MCP Servers for Code Assistants](https://aws.amazon.com/blogs/machine-learning/introducing-aws-mcp-servers-for-code-assistants-part-1/)
|
||||
- [@article@AWS MCP Servers for Code Assistants](https://aws.amazon.com/blogs/machine-learning/introducing-aws-mcp-servers-for-code-assistants-part-1/)
|
||||
- [@opensource@punkeye/awesome-mcp-servers](https://github.com/punkpeye/awesome-mcp-servers)
|
||||
@@ -5,6 +5,6 @@ To judge how well an AI agent works, you need clear numbers. Track accuracy, pre
|
||||
Visit the following resources to learn more:
|
||||
|
||||
- [@article@Robustness Testing for AI](https://mitibmwatsonailab.mit.edu/category/robustness/)
|
||||
- [@article@Complete Guide to Machine Learning Evaluation Metrics](https://medium.com/analytics-vidhya/complete-guide-to-machine-learning-evaluation-metrics-615c2864d916)
|
||||
- [@article@Measuring Model Performance](https://developers.google.com/machine-learning/crash-course/classification/accuracy)
|
||||
- [@article@A Practical Framework for (Gen)AI Value Measurement](https://medium.com/google-cloud/a-practical-framework-for-gen-ai-value-measurement-5fccf3b66c43)
|
||||
- [@article@Complete Guide to Machine Learning Evaluation Metrics](https://medium.com/analytics-vidhya/complete-guide-to-machine-learning-evaluation-metrics-615c2864d916)
|
||||
- [@article@Measuring Model Performance](https://developers.google.com/machine-learning/crash-course/classification/accuracy)
|
||||
- [@article@A Practical Framework for (Gen)AI Value Measurement](https://medium.com/google-cloud/a-practical-framework-for-gen-ai-value-measurement-5fccf3b66c43)
|
||||
|
||||
@@ -4,8 +4,8 @@ Model Context Protocol (MCP) is a rulebook that tells an AI agent how to pack ba
|
||||
|
||||
Visit the following resources to learn more:
|
||||
|
||||
- [@course@MCP: Build Rich-Context AI Apps with Anthropic](https://www.deeplearning.ai/short-courses/mcp-build-rich-context-ai-apps-with-anthropic/)
|
||||
- [@official@Model Context Protocol](https://modelcontextprotocol.io/introduction)
|
||||
- [@opensource@Model Context Protocol](https://github.com/modelcontextprotocol/modelcontextprotocol)
|
||||
- [@article@Introducing the Azure MCP Server ](https://devblogs.microsoft.com/azure-sdk/introducing-the-azure-mcp-server/)
|
||||
- [@article@The Ultimate Guide to MCP](https://guangzhengli.com/blog/en/model-context-protocol)
|
||||
- [@official@Model Context Protocol](https://modelcontextprotocol.io/introduction)
|
||||
- [@article@Introducing the Azure MCP Server ](https://devblogs.microsoft.com/azure-sdk/introducing-the-azure-mcp-server/)
|
||||
- [@article@The Ultimate Guide to MCP](https://guangzhengli.com/blog/en/model-context-protocol)
|
||||
- [@course@MCP: Build Rich-Context AI Apps with Anthropic](https://www.deeplearning.ai/short-courses/mcp-build-rich-context-ai-apps-with-anthropic/)
|
||||
|
||||
@@ -4,5 +4,5 @@ Game studios use AI agents to control non-player characters (NPCs). The agent ob
|
||||
|
||||
Visit the following resources to learn more:
|
||||
|
||||
- [@official@Unity – AI for NPCs](https://dev.epicgames.com/documentation/en-us/unreal-engine/artificial-intelligence-in-unreal-engine?application_version=5.3)
|
||||
- [@official@Unity – AI for NPCs](https://dev.epicgames.com/documentation/en-us/unreal-engine/artificial-intelligence-in-unreal-engine?application_version=5.3)
|
||||
- [@article@AI-Driven NPCs: The Future of Gaming Explained](https://www.capermint.com/blog/everything-you-need-to-know-about-non-player-character-npc/)
|
||||
@@ -4,5 +4,5 @@ Observation and reflection form the thinking pause in an AI agent’s loop. Firs
|
||||
|
||||
Visit the following resources to learn more:
|
||||
|
||||
- [@official@Best Practices for Prompting and Self-checking](https://platform.openai.com/docs/guides/prompt-engineering)
|
||||
- [@article@Self-Reflective AI: Building Agents That Learn by Observing Themselves](https://arxiv.org/abs/2302.14045)
|
||||
- [@official@Best Practices for Prompting and Self-checking](https://platform.openai.com/docs/guides/prompt-engineering)
|
||||
- [@article@Self-Reflective AI: Building Agents That Learn by Observing Themselves](https://arxiv.org/abs/2302.14045)
|
||||
|
||||
@@ -4,8 +4,8 @@ Open-weight models are neural networks whose trained parameters, also called wei
|
||||
|
||||
Visit the following resources to learn more:
|
||||
|
||||
- [@official@BLOOM BigScience](https://bigscience.huggingface.co/)
|
||||
- [@official@Falcon LLM – Technology Innovation Institute (TII)](https://falconllm.tii.ae/)
|
||||
- [@official@Llama 2 – Meta's Official Announcement](https://ai.meta.com/llama/)
|
||||
- [@official@Hugging Face – Open LLM Leaderboard (Top Open Models)](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)
|
||||
- [@official@EleutherAI – Open Research Collective (GPT-Neo, GPT-J, etc.)](https://www.eleuther.ai/)
|
||||
- [@official@BLOOM BigScience](https://bigscience.huggingface.co/)
|
||||
- [@official@Falcon LLM – Technology Innovation Institute (TII)](https://falconllm.tii.ae/)
|
||||
- [@official@Llama 2 – Meta's Official Announcement](https://ai.meta.com/llama/)
|
||||
- [@official@Hugging Face – Open LLM Leaderboard (Top Open Models)](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)
|
||||
- [@official@EleutherAI – Open Research Collective (GPT-Neo, GPT-J, etc.)](https://www.eleuther.ai/)
|
||||
|
||||
@@ -4,7 +4,7 @@ The OpenAI Assistants API lets you add clear, task-specific actions to a chat wi
|
||||
|
||||
Visit the following resources to learn more:
|
||||
|
||||
- [@official@OpenAI Documentation – Assistants API Overview](https://platform.openai.com/docs/assistants/overview)
|
||||
- [@official@OpenAI Blog – Introducing the Assistants API](https://openai.com/blog/assistants-api)
|
||||
- [@official@OpenAI Cookbook – Assistants API Example](https://github.com/openai/openai-cookbook/blob/main/examples/Assistants_API_overview_python.ipynb)
|
||||
- [@official@OpenAI API Reference – Assistants Endpoints](https://platform.openai.com/docs/api-reference/assistants)
|
||||
- [@official@OpenAI Documentation – Assistants API Overview](https://platform.openai.com/docs/assistants/overview)
|
||||
- [@official@OpenAI Blog – Introducing the Assistants API](https://openai.com/blog/assistants-api)
|
||||
- [@official@OpenAI Cookbook – Assistants API Example](https://github.com/openai/openai-cookbook/blob/main/examples/Assistants_API_overview_python.ipynb)
|
||||
- [@official@OpenAI API Reference – Assistants Endpoints](https://platform.openai.com/docs/api-reference/assistants)
|
||||
|
||||
@@ -4,8 +4,8 @@ OpenAI Function Calling lets you give a language model a list of tools and have
|
||||
|
||||
Visit the following resources to learn more:
|
||||
|
||||
- [@official@OpenAI Documentation – Function Calling](https://platform.openai.com/docs/guides/function-calling)
|
||||
- [@official@OpenAI Cookbook – Using Functions with GPT Models](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_call_functions_with_chat_models.ipynb)
|
||||
- [@article@@officialOpenAI Blog – Announcing Function Calling and Other Updates](https://openai.com/blog/function-calling-and-other-api-updates)
|
||||
- [@article@@officialOpenAI API Reference – Functions Section](https://platform.openai.com/docs/api-reference/chat/create#functions)
|
||||
- [@article@@officialOpenAI Community – Discussions and Examples on Function Calling](https://community.openai.com/tag/function-calling)
|
||||
- [@official@OpenAI Documentation – Function Calling](https://platform.openai.com/docs/guides/function-calling)
|
||||
- [@official@OpenAI Cookbook – Using Functions with GPT Models](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_call_functions_with_chat_models.ipynb)
|
||||
- [@officialOpenAI Blog – Announcing Function Calling and Other Updates](https://openai.com/blog/function-calling-and-other-api-updates)
|
||||
- [@officialOpenAI API Reference – Functions Section](https://platform.openai.com/docs/api-reference/chat/create#functions)
|
||||
- [@officialOpenAI Community – Discussions and Examples on Function Calling](https://community.openai.com/tag/function-calling)
|
||||
|
||||
@@ -4,7 +4,7 @@ openllmetry is a small Python library that makes it easy to watch what your AI a
|
||||
|
||||
Visit the following resources to learn more:
|
||||
|
||||
- [@official@OpenTelemetry Documentation](https://www.traceloop.com/blog/openllmetry)
|
||||
- [@official@What is OpenLLMetry? - traceloop](https://www.traceloop.com/docs/openllmetry/introduction)
|
||||
- [@official@OpenTelemetry Documentation](https://www.traceloop.com/blog/openllmetry)
|
||||
- [@official@What is OpenLLMetry? - traceloop](https://www.traceloop.com/docs/openllmetry/introduction)
|
||||
- [@official@Use Traceloop with Python](https://www.traceloop.com/docs/openllmetry/getting-started-python)
|
||||
- [@opensource@traceloop/openllmetry](https://github.com/traceloop/openllmetry)
|
||||
@@ -5,4 +5,4 @@ Perception, also called user input, is the first step in an agent loop. The agen
|
||||
Visit the following resources to learn more:
|
||||
|
||||
- [@article@Perception in AI: Understanding Its Types and Importance](https://marktalks.com/perception-in-ai-understanding-its-types-and-importance/)
|
||||
- [@article@What Is AI Agent Perception? - IBM](https://www.ibm.com/think/topics/ai-agent-perception)
|
||||
- [@article@What Is AI Agent Perception? - IBM](https://www.ibm.com/think/topics/ai-agent-perception)
|
||||
|
||||
@@ -5,4 +5,4 @@ A personal assistant AI agent is a smart program that helps one person manage da
|
||||
Visit the following resources to learn more:
|
||||
|
||||
- [@article@A Complete Guide on AI-powered Personal Assistants](https://medium.com/@alexander_clifford/a-complete-guide-on-ai-powered-personal-assistants-with-examples-2f5cd894d566)
|
||||
- [@article@9 Best AI Personal Assistants for Work, Chat and Home](https://saner.ai/best-ai-personal-assistants/)
|
||||
- [@article@9 Best AI Personal Assistants for Work, Chat and Home](https://saner.ai/best-ai-personal-assistants/)
|
||||
|
||||
@@ -6,4 +6,4 @@ Visit the following resources to learn more:
|
||||
|
||||
- [@official@OpenAI Pricing](https://openai.com/api/pricing/)
|
||||
- [@article@Executive Guide To AI Agent Pricing](https://www.forbes.com/councils/forbesbusinesscouncil/2025/01/28/executive-guide-to-ai-agent-pricing-winning-strategies-and-models-to-drive-growth/)
|
||||
- [@article@AI Pricing: How Much Does Artificial Intelligence Cost In 2025?](https://www.internetsearchinc.com/ai-pricing-how-much-does-artificial-intelligence-cost/)
|
||||
- [@article@AI Pricing: How Much Does Artificial Intelligence Cost In 2025?](https://www.internetsearchinc.com/ai-pricing-how-much-does-artificial-intelligence-cost/)
|
||||
|
||||
@@ -6,4 +6,4 @@ Visit the following resources to learn more:
|
||||
|
||||
- [@article@Prompt Injection vs. Jailbreaking: What's the Difference?](https://learnprompting.org/blog/injection_jailbreaking)
|
||||
- [@article@Prompt Injection vs Prompt Jailbreak](https://codoid.com/ai/prompt-injection-vs-prompt-jailbreak-a-detailed-comparison/)
|
||||
- [@article@How Prompt Attacks Exploit GenAI and How to Fight Back](https://unit42.paloaltonetworks.com/new-frontier-of-genai-threats-a-comprehensive-guide-to-prompt-attacks/)
|
||||
- [@article@How Prompt Attacks Exploit GenAI and How to Fight Back](https://unit42.paloaltonetworks.com/new-frontier-of-genai-threats-a-comprehensive-guide-to-prompt-attacks/)
|
||||
|
||||
@@ -6,4 +6,4 @@ Visit the following resources to learn more:
|
||||
|
||||
- [@article@What is Context in Prompt Engineering?](https://www.godofprompt.ai/blog/what-is-context-in-prompt-engineering)
|
||||
- [@article@The Importance of Context for Reliable AI Systems](https://medium.com/mathco-ai/the-importance-of-context-for-reliable-ai-systems-and-how-to-provide-context-009bd1ac7189/)
|
||||
- [@article@Context Engineering: Why Feeding AI the Right Context Matters](https://inspirednonsense.com/context-engineering-why-feeding-ai-the-right-context-matters-353e8f87d6d3)
|
||||
- [@article@Context Engineering: Why Feeding AI the Right Context Matters](https://inspirednonsense.com/context-engineering-why-feeding-ai-the-right-context-matters-353e8f87d6d3)
|
||||
|
||||
@@ -5,4 +5,4 @@ A RAG (Retrieval-Augmented Generation) agent mixes search with language generati
|
||||
Visit the following resources to learn more:
|
||||
|
||||
- [@article@What is RAG? - Retrieval-Augmented Generation AI Explained](https://aws.amazon.com/what-is/retrieval-augmented-generation/)
|
||||
- [@article@What Is Retrieval-Augmented Generation, aka RAG?](https://blogs.nvidia.com/blog/what-is-retrieval-augmented-generation/)
|
||||
- [@article@What Is Retrieval-Augmented Generation, aka RAG?](https://blogs.nvidia.com/blog/what-is-retrieval-augmented-generation/)
|
||||
|
||||
@@ -6,4 +6,4 @@ Visit the following resources to learn more:
|
||||
|
||||
- [@article@Understanding Retrieval-Augmented Generation (RAG) and Vector Databases](https://pureai.com/Articles/2025/03/03/Understanding-RAG.aspx)
|
||||
- [@article@Build Advanced Retrieval-Augmented Generation Systems](https://learn.microsoft.com/en-us/azure/developer/ai/advanced-retrieval-augmented-generation)
|
||||
- [@article@What Is Retrieval-Augmented Generation, aka RAG?](https://blogs.nvidia.com/blog/what-is-retrieval-augmented-generation/)
|
||||
- [@article@What Is Retrieval-Augmented Generation, aka RAG?](https://blogs.nvidia.com/blog/what-is-retrieval-augmented-generation/)
|
||||
|
||||
@@ -5,5 +5,5 @@ Ragas is an open-source tool used to check how well a Retrieval-Augmented Genera
|
||||
Visit the following resources to learn more:
|
||||
|
||||
- [@official@Ragas Documentation](https://docs.ragas.io/en/latest/)
|
||||
- [@article@Evaluating RAG Applications with RAGAs](https://towardsdatascience.com/evaluating-rag-applications-with-ragas-81d67b0ee31a/n)
|
||||
- [@opensource@explodinggradients/ragas](https://github.com/explodinggradients/ragas)
|
||||
- [@article@Evaluating RAG Applications with RAGAs](https://towardsdatascience.com/evaluating-rag-applications-with-ragas-81d67b0ee31a/n)
|
||||
@@ -5,4 +5,4 @@ ReAct is an agent pattern that makes a model alternate between two simple steps:
|
||||
Visit the following resources to learn more:
|
||||
|
||||
- [@official@ReAct: Synergizing Reasoning and Acting in Language Models](https://react-lm.github.io/)
|
||||
- [@article@ReAct Systems: Enhancing LLMs with Reasoning and Action](https://learnprompting.org/docs/agents/react)
|
||||
- [@article@ReAct Systems: Enhancing LLMs with Reasoning and Action](https://learnprompting.org/docs/agents/react)
|
||||
|
||||
@@ -5,4 +5,4 @@ Reason and Plan is the moment when an AI agent thinks before it acts. The agent
|
||||
Visit the following resources to learn more:
|
||||
|
||||
- [@official@ReAct: Synergizing Reasoning and Acting in Language Models](https://react-lm.github.io/)
|
||||
- [@article@ReAct Systems: Enhancing LLMs with Reasoning and Action](https://learnprompting.org/docs/agents/react)
|
||||
- [@article@ReAct Systems: Enhancing LLMs with Reasoning and Action](https://learnprompting.org/docs/agents/react)
|
||||
|
||||
@@ -5,4 +5,4 @@ Reasoning models break a task into clear steps and follow a line of logic, while
|
||||
Visit the following resources to learn more:
|
||||
|
||||
- [@official@ReAct: Synergizing Reasoning and Acting in Language Models](https://react-lm.github.io/)
|
||||
- [@article@ReAct Systems: Enhancing LLMs with Reasoning and Action](https://learnprompting.org/docs/agents/react)
|
||||
- [@article@ReAct Systems: Enhancing LLMs with Reasoning and Action](https://learnprompting.org/docs/agents/react)
|
||||
|
||||
@@ -5,4 +5,4 @@ A **REST API** (Representational State Transfer) is an architectural style for d
|
||||
Visit the following resources to learn more:
|
||||
|
||||
- [@article@What is RESTful API? - RESTful API Explained - AWS](https://aws.amazon.com/what-is/restful-api/)
|
||||
- [@article@What Is a REST API? Examples, Uses & Challenges ](https://blog.postman.com/rest-api-examples/)
|
||||
- [@article@What Is a REST API? Examples, Uses & Challenges ](https://blog.postman.com/rest-api-examples/)
|
||||
|
||||
@@ -2,25 +2,26 @@
|
||||
|
||||
Short term memory are the facts which are passed as a part of the prompt to the LLM e.g. there might be a prompt like below:
|
||||
|
||||
Users Profile:
|
||||
- name: {name}
|
||||
- age: {age}
|
||||
- expertise: {expertise}
|
||||
|
||||
User is currently learning about {current_topic}. User has some goals in mind which are:
|
||||
- {goal_1}
|
||||
- {goal_2}
|
||||
- {goal_3}
|
||||
|
||||
Help the user achieve the goals.
|
||||
|
||||
```text
|
||||
Users Profile:
|
||||
- name: {name}
|
||||
- age: {age}
|
||||
- expertise: {expertise}
|
||||
|
||||
User is currently learning about {current_topic}. User has some goals in mind which are:
|
||||
- {goal_1}
|
||||
- {goal_2}
|
||||
- {goal_3}
|
||||
|
||||
Help the user achieve the goals.
|
||||
```
|
||||
|
||||
Notice how we injected the user's profile, current topic and goals in the prompt. These are all short term memories.
|
||||
|
||||
Visit the following resources to learn more:
|
||||
|
||||
- [@article@Memory Management in AI Agents](https://python.langchain.com/docs/how_to/chatbots_memory/)
|
||||
- [@article@Memory Management in AI Agents](https://python.langchain.com/docs/how_to/chatbots_memory/)
|
||||
- [@article@Build Smarter AI Agents: Manage Short-term and Long-term Memory](https://redis.io/blog/build-smarter-ai-agents-manage-short-term-and-long-term-memory-with-redis/)
|
||||
- [@article@Storing and Retrieving Knowledge for Agents](https://www.pinecone.io/learn/langchain-retrieval-augmentation/)
|
||||
- [@article@Storing and Retrieving Knowledge for Agents](https://www.pinecone.io/learn/langchain-retrieval-augmentation/)
|
||||
- [@article@Short-Term vs Long-Term Memory in AI Agents](https://adasci.org/short-term-vs-long-term-memory-in-ai-agents/)
|
||||
- [@video@Building Brain-Like Memory for AI Agents](https://www.youtube.com/watch?v=VKPngyO0iKg)
|
||||
- [@video@Building Brain-Like Memory for AI Agents](https://www.youtube.com/watch?v=VKPngyO0iKg)
|
||||
|
||||
@@ -4,6 +4,6 @@ Smol Depot is an open-source kit that lets you bundle all the parts of a small A
|
||||
|
||||
Visit the following resources to learn more:
|
||||
|
||||
- [@official@smol.ai - Continuous Fine-tuning Platform for AI Engineers](https://smol.candycode.dev/)
|
||||
- [@official@smol.ai - Continuous Fine-tuning Platform for AI Engineers](https://smol.candycode.dev/)
|
||||
- [@article@5-min Smol AI Tutorial](https://www.ai-jason.com/learning-ai/smol-ai-tutorial)
|
||||
- [@video@Smol AI Full Beginner Course](https://www.youtube.com/watch?v=d7qFVrpLh34)
|
||||
- [@video@Smol AI Full Beginner Course](https://www.youtube.com/watch?v=d7qFVrpLh34)
|
||||
|
||||
@@ -4,5 +4,5 @@ When you give a task to an AI, make clear how long the answer should be and what
|
||||
|
||||
Visit the following resources to learn more:
|
||||
|
||||
- [@article@Mastering Prompt Engineering: Format, Length, and Audience](https://techlasi.com/savvy/mastering-prompt-engineering-format-length-and-audience-examples-for-2024/)
|
||||
- [@article@Ultimate Guide to Prompt Engineering](https://promptdrive.ai/prompt-engineering/)
|
||||
- [@article@Mastering Prompt Engineering: Format, Length, and Audience](https://techlasi.com/savvy/mastering-prompt-engineering-format-length-and-audience-examples-for-2024/)
|
||||
- [@article@Ultimate Guide to Prompt Engineering](https://promptdrive.ai/prompt-engineering/)
|
||||
|
||||
@@ -5,4 +5,4 @@ Stopping criteria tell the language model when to stop writing more text. Withou
|
||||
Visit the following resources to learn more:
|
||||
|
||||
- [@article@Defining Stopping Criteria in Large Language Models](https://www.metriccoders.com/post/defining-stopping-criteria-in-large-language-models-a-practical-guide)
|
||||
- [@article@Stopping Criteria for Decision Tree Algorithm and Tree Plots](https://aieagle.in/stopping-criteria-for-decision-tree-algorithm-and-tree-plots/)
|
||||
- [@article@Stopping Criteria for Decision Tree Algorithm and Tree Plots](https://aieagle.in/stopping-criteria-for-decision-tree-algorithm-and-tree-plots/)
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# Structured Logging & Tracing
|
||||
|
||||
Structured logging and tracing are ways to record what an AI agent does so you can find and fix problems fast. Instead of dumping plain text, the agent writes logs in a fixed key-value format, such as time, user\_id, step, and message. Because every entry follows the same shape, search tools can filter, sort, and count events with ease. Tracing links those log lines into a chain that follows one request or task across many functions, threads, or microservices. By adding a unique trace ID to each step, you can see how long each part took and where errors happened. Together, structured logs and traces offer clear, machine-readable data that helps developers spot slow code paths, unusual behavior, and hidden bugs without endless manual scans.
|
||||
Structured logging and tracing are ways to record what an AI agent does so you can find and fix problems fast. Instead of dumping plain text, the agent writes logs in a fixed key-value format, such as time, user_id, step, and message. Because every entry follows the same shape, search tools can filter, sort, and count events with ease. Tracing links those log lines into a chain that follows one request or task across many functions, threads, or microservices. By adding a unique trace ID to each step, you can see how long each part took and where errors happened. Together, structured logs and traces offer clear, machine-readable data that helps developers spot slow code paths, unusual behavior, and hidden bugs without endless manual scans.
|
||||
|
||||
Visit the following resources to learn more:
|
||||
|
||||
|
||||
@@ -5,4 +5,4 @@ Summarization or compression lets an AI agent keep the gist of past chats withou
|
||||
Visit the following resources to learn more:
|
||||
|
||||
- [@article@Evaluating LLMs for Text Summarization](https://insights.sei.cmu.edu/blog/evaluating-llms-for-text-summarization-introduction/)
|
||||
- [@article@The Ultimate Guide to AI Document Summarization](https://www.documentllm.com/blog/ai-document-summarization-guide)
|
||||
- [@article@The Ultimate Guide to AI Document Summarization](https://www.documentllm.com/blog/ai-document-summarization-guide)
|
||||
|
||||
@@ -7,4 +7,4 @@ Visit the following resources to learn more:
|
||||
- [@article@What Temperature Means in Natural Language Processing and AI](https://thenewstack.io/what-temperature-means-in-natural-language-processing-and-ai/)
|
||||
- [@article@LLM Temperature: How It Works and When You Should Use It](https://www.vellum.ai/llm-parameters/temperature)
|
||||
- [@article@What is LLM Temperature? - IBM](https://www.ibm.com/think/topics/llm-temperature)
|
||||
- [@article@How Temperature Settings Transform Your AI Agent's Responses](https://docsbot.ai/article/how-temperature-settings-transform-your-ai-agents-responses)
|
||||
- [@article@How Temperature Settings Transform Your AI Agent's Responses](https://docsbot.ai/article/how-temperature-settings-transform-your-ai-agents-responses)
|
||||
|
||||
@@ -5,4 +5,4 @@ Tokenization is the step where raw text is broken into small pieces called token
|
||||
Visit the following resources to learn more:
|
||||
|
||||
- [@article@Explaining Tokens — the Language and Currency of AI](https://blogs.nvidia.com/blog/ai-tokens-explained/)
|
||||
- [@article@What is Tokenization? Types, Use Cases, Implementation](https://www.datacamp.com/blog/what-is-tokenization)
|
||||
- [@article@What is Tokenization? Types, Use Cases, Implementation](https://www.datacamp.com/blog/what-is-tokenization)
|
||||
|
||||
@@ -5,4 +5,4 @@ A tool is any skill or function that an AI agent can call to get a job done. It
|
||||
Visit the following resources to learn more:
|
||||
|
||||
- [@article@Understanding the Agent Function in AI: Key Roles and Responsibilities](https://pingax.com/ai/agent/function/understanding-the-agent-function-in-ai-key-roles-and-responsibilities/)
|
||||
- [@article@What is an AI Tool?](https://www.synthesia.io/glossary/ai-tool)
|
||||
- [@article@What is an AI Tool?](https://www.synthesia.io/glossary/ai-tool)
|
||||
|
||||