mirror of
https://github.com/kamranahmedse/developer-roadmap.git
synced 2026-03-12 17:51:53 +08:00
Compare commits
79 Commits
6d8b8d8400
...
4db8a11713
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4db8a11713 | ||
|
|
94d4e06415 | ||
|
|
e1561c5bb6 | ||
|
|
65fb30f540 | ||
|
|
682d35eee8 | ||
|
|
969b25ce4b | ||
|
|
7815f61d8a | ||
|
|
b788750ef1 | ||
|
|
4e6b7b6b32 | ||
|
|
db1d88ba4a | ||
|
|
e7369b6e27 | ||
|
|
71097bb430 | ||
|
|
6c9265a44e | ||
|
|
2e45440cc6 | ||
|
|
3300a6c191 | ||
|
|
88337e084e | ||
|
|
afb2e53715 | ||
|
|
6d86637f1f | ||
|
|
bbc4bbe00e | ||
|
|
e8fb4dba40 | ||
|
|
205e1fdde7 | ||
|
|
b230e3cfa3 | ||
|
|
4f887ead7c | ||
|
|
3fdad9804a | ||
|
|
a28bbbeb14 | ||
|
|
e4c1e1e4d5 | ||
|
|
b0878c3481 | ||
|
|
a27d607e79 | ||
|
|
ab9a60827e | ||
|
|
42e0ae3d38 | ||
|
|
9a6154af9c | ||
|
|
6702946b80 | ||
|
|
2c28823466 | ||
|
|
b034c1d89a | ||
|
|
8661342bb1 | ||
|
|
4c6625bb4f | ||
|
|
e15a0e3330 | ||
|
|
0bfef16728 | ||
|
|
ad1204b5d8 | ||
|
|
58e95d3b84 | ||
|
|
e93ff5d674 | ||
|
|
f886dbea4f | ||
|
|
2d8bd99240 | ||
|
|
72ab2e5861 | ||
|
|
abfdcb1520 | ||
|
|
e76f34c504 | ||
|
|
6eb7fe2e4c | ||
|
|
e092941e83 | ||
|
|
15f99c1ca6 | ||
|
|
c4a32d80b3 | ||
|
|
89df813ca6 | ||
|
|
dcb15f5a2b | ||
|
|
04c4f5c792 | ||
|
|
76f07790c8 | ||
|
|
68f8ee08ad | ||
|
|
d09bfb0ef1 | ||
|
|
4ae63e69fa | ||
|
|
de1ba25adc | ||
|
|
3b283d4273 | ||
|
|
36fae29426 | ||
|
|
b761d319f7 | ||
|
|
102d565028 | ||
|
|
3bd5767dbe | ||
|
|
818b06be4f | ||
|
|
67889f2176 | ||
|
|
24537898ea | ||
|
|
1a33f23cf5 | ||
|
|
d1211e7680 | ||
|
|
0f5818451b | ||
|
|
0b6a6cad47 | ||
|
|
dfa35b63c2 | ||
|
|
8287b7a96e | ||
|
|
462d1dac6b | ||
|
|
e6f7bd44e1 | ||
|
|
72d6abed1b | ||
|
|
fecf1daf07 | ||
|
|
3b580515d5 | ||
|
|
e8017f3e85 | ||
|
|
9a7e557276 |
80
.github/workflows/cleanup-orphaned-content.yml
vendored
Normal file
80
.github/workflows/cleanup-orphaned-content.yml
vendored
Normal file
@@ -0,0 +1,80 @@
|
||||
name: Cleanup Orphaned Content
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
roadmap_slug:
|
||||
description: "The ID of the roadmap to clean up"
|
||||
required: true
|
||||
|
||||
jobs:
|
||||
cleanup-content:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Setup pnpm@v9
|
||||
uses: pnpm/action-setup@v4
|
||||
with:
|
||||
version: 9
|
||||
run_install: false
|
||||
|
||||
- name: Setup Node.js Version 20 (LTS)
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: 20
|
||||
cache: 'pnpm'
|
||||
|
||||
- name: Install Dependencies and Run Cleanup
|
||||
run: |
|
||||
echo "Installing Dependencies"
|
||||
pnpm install
|
||||
echo "Running Orphaned Content Cleanup"
|
||||
npm run cleanup:orphaned-content -- --roadmap-slug=${{ inputs.roadmap_slug }}
|
||||
|
||||
- name: Read cleanup summary
|
||||
id: read-summary
|
||||
run: |
|
||||
if [ -f .cleanup-summary.md ]; then
|
||||
{
|
||||
echo 'summary<<EOF'
|
||||
cat .cleanup-summary.md
|
||||
echo 'EOF'
|
||||
} >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
- name: Check for changes
|
||||
id: verify-changed-files
|
||||
run: |
|
||||
if [ -n "$(git status --porcelain)" ]; then
|
||||
echo "changed=true" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "changed=false" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
- name: Delete summary file
|
||||
if: steps.verify-changed-files.outputs.changed == 'true'
|
||||
run: rm -f .cleanup-summary.md
|
||||
|
||||
- name: Create PR
|
||||
if: steps.verify-changed-files.outputs.changed == 'true'
|
||||
uses: peter-evans/create-pull-request@v7
|
||||
with:
|
||||
delete-branch: false
|
||||
branch: "chore/cleanup-orphaned-content-${{ inputs.roadmap_slug }}"
|
||||
base: "master"
|
||||
labels: |
|
||||
automated pr
|
||||
reviewers: jcanalesluna,kamranahmedse
|
||||
commit-message: "chore: cleanup orphaned content files"
|
||||
title: "chore: cleanup orphaned content - ${{ inputs.roadmap_slug }}"
|
||||
body: |
|
||||
${{ steps.read-summary.outputs.summary }}
|
||||
|
||||
> [!IMPORTANT]
|
||||
> This PR removes orphaned/duplicate content files for: ${{ inputs.roadmap_slug }}
|
||||
>
|
||||
> Commit: ${{ github.sha }}
|
||||
> Workflow Path: ${{ github.workflow_ref }}
|
||||
|
||||
**Please review the changes and merge the PR if everything looks correct.**
|
||||
@@ -33,6 +33,7 @@
|
||||
"sync:repo-to-database": "tsx ./scripts/sync-repo-to-database.ts",
|
||||
"sync:roadmap": "tsx ./scripts/sync-roadmap-to-database.ts",
|
||||
"migrate:content-repo-to-database": "tsx ./scripts/migrate-content-repo-to-database.ts",
|
||||
"cleanup:orphaned-content": "tsx ./scripts/cleanup-orphaned-content.ts",
|
||||
"official:roadmap-assets": "tsx ./scripts/official-roadmap-assets.ts",
|
||||
"test:e2e": "playwright test"
|
||||
},
|
||||
|
||||
259
scripts/cleanup-orphaned-content.ts
Normal file
259
scripts/cleanup-orphaned-content.ts
Normal file
@@ -0,0 +1,259 @@
|
||||
import type { Node } from '@roadmapsh/editor';
|
||||
import matter from 'gray-matter';
|
||||
import fs from 'node:fs/promises';
|
||||
import path from 'node:path';
|
||||
import { fileURLToPath } from 'node:url';
|
||||
import { slugify } from '../src/lib/slugger';
|
||||
|
||||
const __filename = fileURLToPath(import.meta.url);
|
||||
const __dirname = path.dirname(__filename);
|
||||
|
||||
const ROADMAP_CONTENT_DIR = path.join(__dirname, '../src/data/roadmaps');
|
||||
|
||||
const args = process.argv.slice(2);
|
||||
const roadmapSlug = args?.[0]?.replace('--roadmap-slug=', '');
|
||||
|
||||
if (!roadmapSlug) {
|
||||
console.error('Usage: tsx scripts/cleanup-orphaned-content.ts --roadmap-slug=<slug|__all__>');
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
interface OrphanEntry {
|
||||
file: string;
|
||||
reason: string;
|
||||
duplicateOf: string;
|
||||
action: 'deleted' | 'renamed';
|
||||
renamedTo?: string;
|
||||
}
|
||||
|
||||
async function fetchRoadmapJson(slug: string): Promise<{ nodes: Node[] }> {
|
||||
try {
|
||||
const response = await fetch(
|
||||
`https://roadmap.sh/api/v1-official-roadmap/${slug}`,
|
||||
);
|
||||
|
||||
if (!response.ok) {
|
||||
throw new Error(`HTTP ${response.status}`);
|
||||
}
|
||||
|
||||
const data = await response.json();
|
||||
if (data.error) {
|
||||
throw new Error(data.error);
|
||||
}
|
||||
|
||||
return data;
|
||||
} catch (err) {
|
||||
console.log(` API fetch failed for ${slug}, falling back to local JSON`);
|
||||
const localPath = path.join(ROADMAP_CONTENT_DIR, slug, `${slug}.json`);
|
||||
const raw = await fs.readFile(localPath, 'utf-8');
|
||||
return JSON.parse(raw);
|
||||
}
|
||||
}
|
||||
|
||||
async function isEditorRoadmap(slug: string): Promise<boolean> {
|
||||
const mdPath = path.join(ROADMAP_CONTENT_DIR, slug, `${slug}.md`);
|
||||
try {
|
||||
const raw = await fs.readFile(mdPath, 'utf-8');
|
||||
const { data } = matter(raw);
|
||||
return data.renderer === 'editor';
|
||||
} catch {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
async function getEditorRoadmapSlugs(): Promise<string[]> {
|
||||
const allDirs = await fs.readdir(ROADMAP_CONTENT_DIR);
|
||||
const results: string[] = [];
|
||||
|
||||
for (const dir of allDirs) {
|
||||
const stat = await fs.stat(path.join(ROADMAP_CONTENT_DIR, dir)).catch(() => null);
|
||||
if (!stat?.isDirectory()) {
|
||||
continue;
|
||||
}
|
||||
if (await isEditorRoadmap(dir)) {
|
||||
results.push(dir);
|
||||
}
|
||||
}
|
||||
|
||||
return results;
|
||||
}
|
||||
|
||||
function parseContentFilename(filename: string): { slug: string; nodeId: string } | null {
|
||||
const match = filename.match(/^(.+)@([^.]+)\.md$/);
|
||||
if (!match) {
|
||||
return null;
|
||||
}
|
||||
return { slug: match[1], nodeId: match[2] };
|
||||
}
|
||||
|
||||
async function cleanupRoadmap(slug: string): Promise<OrphanEntry[]> {
|
||||
console.log(`\nProcessing: ${slug}`);
|
||||
|
||||
const contentDir = path.join(ROADMAP_CONTENT_DIR, slug, 'content');
|
||||
const stat = await fs.stat(contentDir).catch(() => null);
|
||||
if (!stat?.isDirectory()) {
|
||||
console.log(` No content directory found, skipping`);
|
||||
return [];
|
||||
}
|
||||
|
||||
const roadmapData = await fetchRoadmapJson(slug);
|
||||
if (!roadmapData?.nodes) {
|
||||
console.log(` No nodes found in roadmap JSON, skipping`);
|
||||
return [];
|
||||
}
|
||||
|
||||
const topicNodes = roadmapData.nodes.filter(
|
||||
(node) =>
|
||||
node?.type &&
|
||||
['topic', 'subtopic'].includes(node.type) &&
|
||||
node.data?.label,
|
||||
);
|
||||
|
||||
const validNodeIds = new Set<string>();
|
||||
const nodeIdToExpectedSlug = new Map<string, string>();
|
||||
|
||||
for (const node of topicNodes) {
|
||||
validNodeIds.add(node.id);
|
||||
nodeIdToExpectedSlug.set(node.id, slugify(node.data.label as string));
|
||||
}
|
||||
|
||||
const files = await fs.readdir(contentDir);
|
||||
const orphans: OrphanEntry[] = [];
|
||||
|
||||
const validFilesBySlug = new Map<string, string>();
|
||||
for (const file of files) {
|
||||
const parsed = parseContentFilename(file);
|
||||
if (!parsed) {
|
||||
continue;
|
||||
}
|
||||
if (validNodeIds.has(parsed.nodeId) && nodeIdToExpectedSlug.get(parsed.nodeId) === parsed.slug) {
|
||||
validFilesBySlug.set(parsed.slug, file);
|
||||
}
|
||||
}
|
||||
|
||||
for (const file of files) {
|
||||
const parsed = parseContentFilename(file);
|
||||
if (!parsed) {
|
||||
continue;
|
||||
}
|
||||
|
||||
const { slug: fileSlug, nodeId } = parsed;
|
||||
|
||||
if (validNodeIds.has(nodeId)) {
|
||||
const expectedSlug = nodeIdToExpectedSlug.get(nodeId)!;
|
||||
if (fileSlug === expectedSlug) {
|
||||
continue;
|
||||
}
|
||||
|
||||
const correctFile = `${expectedSlug}@${nodeId}.md`;
|
||||
const correctFileExists = files.includes(correctFile);
|
||||
|
||||
if (correctFileExists) {
|
||||
orphans.push({
|
||||
file,
|
||||
reason: 'Same nodeId, old slug',
|
||||
duplicateOf: correctFile,
|
||||
action: 'deleted',
|
||||
});
|
||||
} else {
|
||||
orphans.push({
|
||||
file,
|
||||
reason: 'Same nodeId, old slug',
|
||||
duplicateOf: correctFile,
|
||||
action: 'renamed',
|
||||
renamedTo: correctFile,
|
||||
});
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
const validFile = validFilesBySlug.get(fileSlug);
|
||||
if (validFile) {
|
||||
orphans.push({
|
||||
file,
|
||||
reason: 'Same slug, old nodeId',
|
||||
duplicateOf: validFile,
|
||||
action: 'deleted',
|
||||
});
|
||||
} else {
|
||||
orphans.push({
|
||||
file,
|
||||
reason: 'Topic removed from roadmap',
|
||||
duplicateOf: 'N/A',
|
||||
action: 'deleted',
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
for (const orphan of orphans) {
|
||||
const filePath = path.join(contentDir, orphan.file);
|
||||
if (orphan.action === 'renamed') {
|
||||
const newPath = path.join(contentDir, orphan.renamedTo!);
|
||||
await fs.rename(filePath, newPath);
|
||||
console.log(` Renamed: ${orphan.file} -> ${orphan.renamedTo} (${orphan.reason})`);
|
||||
} else {
|
||||
await fs.unlink(filePath);
|
||||
console.log(` Deleted: ${orphan.file} (${orphan.reason})`);
|
||||
}
|
||||
}
|
||||
|
||||
if (orphans.length === 0) {
|
||||
console.log(` No orphans found`);
|
||||
}
|
||||
|
||||
return orphans;
|
||||
}
|
||||
|
||||
async function main() {
|
||||
const slugs =
|
||||
roadmapSlug === '__all__'
|
||||
? await getEditorRoadmapSlugs()
|
||||
: [roadmapSlug];
|
||||
|
||||
if (roadmapSlug !== '__all__') {
|
||||
if (!(await isEditorRoadmap(roadmapSlug))) {
|
||||
console.error(`${roadmapSlug} is not an editor-rendered roadmap`);
|
||||
process.exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
console.log(`Processing ${slugs.length} roadmap(s)...`);
|
||||
|
||||
const allOrphans = new Map<string, OrphanEntry[]>();
|
||||
let totalOrphans = 0;
|
||||
|
||||
for (const slug of slugs) {
|
||||
const orphans = await cleanupRoadmap(slug);
|
||||
if (orphans.length > 0) {
|
||||
allOrphans.set(slug, orphans);
|
||||
totalOrphans += orphans.length;
|
||||
}
|
||||
}
|
||||
|
||||
const roadmapsAffected = allOrphans.size;
|
||||
|
||||
let summary = `## Orphaned Content Cleanup\n\n`;
|
||||
summary += `Cleaned up **${totalOrphans}** orphaned content file(s) across **${roadmapsAffected}** roadmap(s).\n\n`;
|
||||
|
||||
for (const [slug, orphans] of allOrphans) {
|
||||
summary += `### ${slug}\n\n`;
|
||||
summary += `| File | Action | Reason | Duplicate Of |\n`;
|
||||
summary += `|---|---|---|---|\n`;
|
||||
for (const orphan of orphans) {
|
||||
const action = orphan.action === 'renamed' ? `Renamed to \`${orphan.renamedTo}\`` : 'Deleted';
|
||||
const dupOf = orphan.duplicateOf === 'N/A' ? 'N/A' : `\`${orphan.duplicateOf}\``;
|
||||
summary += `| \`${orphan.file}\` | ${action} | ${orphan.reason} | ${dupOf} |\n`;
|
||||
}
|
||||
summary += `\n`;
|
||||
}
|
||||
|
||||
const summaryPath = path.join(__dirname, '..', '.cleanup-summary.md');
|
||||
await fs.writeFile(summaryPath, summary);
|
||||
console.log(`\nSummary written to .cleanup-summary.md`);
|
||||
console.log(`Total: ${totalOrphans} orphaned file(s) cleaned up across ${roadmapsAffected} roadmap(s)`);
|
||||
}
|
||||
|
||||
main().catch((err) => {
|
||||
console.error(err);
|
||||
process.exit(1);
|
||||
});
|
||||
@@ -5,4 +5,4 @@ Acting, also called tool invocation, is the step where the AI chooses a tool and
|
||||
Visit the following resources to learn more:
|
||||
|
||||
- [@article@What are Tools in AI Agents?](https://huggingface.co/learn/agents-course/en/unit1/tools)
|
||||
- [@article@What is Tool Calling in Agents?](https://www.useparagon.com/blog/ai-building-blocks-what-is-tool-calling-a-guide-for-pms)
|
||||
- [@article@What is Tool Calling in Agents?](https://www.useparagon.com/blog/ai-building-blocks-what-is-tool-calling-a-guide-for-pms)
|
||||
@@ -5,4 +5,4 @@ An agent loop is the cycle that lets an AI agent keep working toward a goal. Fir
|
||||
Visit the following resources to learn more:
|
||||
|
||||
- [@article@What is an Agent Loop?](https://huggingface.co/learn/agents-course/en/unit1/agent-steps-and-structure)
|
||||
- [@article@Let's Build your Own Agentic Loop](https://www.reddit.com/r/AI_Agents/comments/1js1xjz/lets_build_our_own_agentic_loop_running_in_our/)
|
||||
- [@article@Let's Build your Own Agentic Loop](https://www.reddit.com/r/AI_Agents/comments/1js1xjz/lets_build_our_own_agentic_loop_running_in_our/)
|
||||
@@ -4,4 +4,4 @@ Anthropic Tool Use lets you connect a Claude model to real software functions so
|
||||
|
||||
Visit the following resources to learn more:
|
||||
|
||||
- [@official@Anthropic Tool Use](https://docs.anthropic.com/en/docs/build-with-claude/tool-use/overview)
|
||||
- [@official@Anthropic Tool Use](https://docs.anthropic.com/en/docs/build-with-claude/tool-use/overview)
|
||||
@@ -5,4 +5,4 @@ API requests let an AI agent ask another service for data or for an action. The
|
||||
Visit the following resources to learn more:
|
||||
|
||||
- [@article@Introduction to APIs - MDN Web Docs](https://developer.mozilla.org/en-US/docs/Learn_web_development/Extensions/Client-side_APIs/Introduction)
|
||||
- [@article@How APIs Power AI Agents: A Comprehensive Guide](https://blog.treblle.com/api-guide-for-ai-agents/)
|
||||
- [@article@How APIs Power AI Agents: A Comprehensive Guide](https://blog.treblle.com/api-guide-for-ai-agents/)
|
||||
@@ -6,4 +6,4 @@ Visit the following resources to learn more:
|
||||
|
||||
- [@article@Introduction to the server-side](https://developer.mozilla.org/en-US/docs/Learn/Server-side/First_steps/Introduction)
|
||||
- [@article@What is a REST API? - Red Hat](https://www.redhat.com/en/topics/api/what-is-a-rest-api)
|
||||
- [@article@What is a Database? - Oracle](https://www.oracle.com/database/what-is-database/)
|
||||
- [@article@What is a Database? - Oracle](https://www.oracle.com/database/what-is-database/)
|
||||
@@ -5,4 +5,4 @@ Chain of Thought (CoT) is a way for an AI agent to think out loud. Before giving
|
||||
Visit the following resources to learn more:
|
||||
|
||||
- [@article@Chain-of-Thought Prompting Elicits Reasoning in Large Language Models](https://arxiv.org/abs/2201.11903)
|
||||
- [@article@Evoking Chain of Thought Reasoning in LLMs - Prompting Guide](https://www.promptingguide.ai/techniques/cot)
|
||||
- [@article@Evoking Chain of Thought Reasoning in LLMs - Prompting Guide](https://www.promptingguide.ai/techniques/cot)
|
||||
@@ -4,8 +4,8 @@ Closed-weight models are AI systems whose trained parameters—the numbers that
|
||||
|
||||
Visit the following resources to learn more:
|
||||
|
||||
- [@article@Open-Source LLMs vs Closed LLMs](https://hatchworks.com/blog/gen-ai/open-source-vs-closed-llms-guide/)
|
||||
- [@article@2024 Comparison of Open-Source Vs Closed-Source LLMs](https://blog.spheron.network/choosing-the-right-llm-2024-comparison-of-open-source-vs-closed-source-llms)
|
||||
- [@official@Open AI's GPT-4](https://openai.com/gpt-4)
|
||||
- [@official@Claude](https://www.anthropic.com/claude)
|
||||
- [@official@Gemini](https://deepmind.google/technologies/gemini/)
|
||||
- [@article@Open-Source LLMs vs Closed LLMs](https://hatchworks.com/blog/gen-ai/open-source-vs-closed-llms-guide/)
|
||||
- [@article@2024 Comparison of Open-Source Vs Closed-Source LLMs](https://blog.spheron.network/choosing-the-right-llm-2024-comparison-of-open-source-vs-closed-source-llms)
|
||||
@@ -7,4 +7,4 @@ Visit the following resources to learn more:
|
||||
- [@article@What is a REPL?](https://docs.replit.com/getting-started/intro-replit)
|
||||
- [@article@Code Execution AI Agent](https://docs.praison.ai/features/codeagent)
|
||||
- [@article@Building an AI Agent's Code Execution Environment](https://murraycole.com/posts/ai-code-execution-environment)
|
||||
- [@article@Python Code Tool](https://python.langchain.com/docs/integrations/tools/python/)
|
||||
- [@article@Python Code Tool](https://python.langchain.com/docs/integrations/tools/python/)
|
||||
@@ -4,6 +4,6 @@ Code-generation agents take a plain language request, understand the goal, and t
|
||||
|
||||
Visit the following resources to learn more:
|
||||
|
||||
- [@official@GitHub Copilot](https://github.com/features/copilot)
|
||||
- [@article@Multi-Agent-based Code Generation](https://arxiv.org/abs/2312.13010)
|
||||
- [@article@From Prompt to Production: GitHub Blog](https://github.blog/ai-and-ml/github-copilot/from-prompt-to-production-building-a-landing-page-with-copilot-agent-mode/)
|
||||
- [@official@GitHub Copilot](https://github.com/features/copilot)
|
||||
- [@article@From Prompt to Production: GitHub Blog](https://github.blog/ai-and-ml/github-copilot/from-prompt-to-production-building-a-landing-page-with-copilot-agent-mode/)
|
||||
@@ -8,4 +8,4 @@ Visit the following resources to learn more:
|
||||
|
||||
- [@article@What is a Context Window in AI?](https://www.ibm.com/think/topics/context-window)
|
||||
- [@article@Scaling Language Models with Retrieval-Augmented Generation (RAG)](https://arxiv.org/abs/2005.11401)
|
||||
- [@article@Long Context in Language Models - Anthropic's Claude 3](https://www.anthropic.com/news/claude-3-family)
|
||||
- [@article@Long Context in Language Models - Anthropic's Claude 3](https://www.anthropic.com/news/claude-3-family)
|
||||
@@ -5,4 +5,4 @@ An MCP server stores and shares conversation data for AI agents using the Model
|
||||
Visit the following resources to learn more:
|
||||
|
||||
- [@official@Model Context Protocol (MCP) Specification](https://www.anthropic.com/news/model-context-protocol)
|
||||
- [@article@How to Build and Host Your Own MCP Servers in Easy Steps?](https://collabnix.com/how-to-build-and-host-your-own-mcp-servers-in-easy-steps/)
|
||||
- [@article@How to Build and Host Your Own MCP Servers in Easy Steps?](https://collabnix.com/how-to-build-and-host-your-own-mcp-servers-in-easy-steps/)
|
||||
@@ -6,4 +6,4 @@ Visit the following resources to learn more:
|
||||
|
||||
- [@official@Airflow: Directed Acyclic Graphs Documentation](https://airflow.apache.org/docs/apache-airflow/stable/concepts/dags.html)
|
||||
- [@article@What are DAGs in AI Systems?](https://www.restack.io/p/version-control-for-ai-answer-what-is-dag-in-ai-cat-ai)
|
||||
- [@video@DAGs Explained Simply](https://www.youtube.com/watch?v=1Yh5S-S6wsI)
|
||||
- [@video@DAGs Explained Simply](https://www.youtube.com/watch?v=1Yh5S-S6wsI)
|
||||
@@ -5,4 +5,4 @@ AI agents can automate data analysis by pulling information from files, database
|
||||
Visit the following resources to learn more:
|
||||
|
||||
- [@article@How AI Will Transform Data Analysis in 2025](https://www.devfi.com/ai-transform-data-analysis-2025/)
|
||||
- [@article@How AI Has Changed The World Of Analytics And Data Science](https://www.forbes.com/councils/forbestechcouncil/2025/01/28/how-ai-has-changed-the-world-of-analytics-and-data-science/k)
|
||||
- [@article@How AI Has Changed The World Of Analytics And Data Science](https://www.forbes.com/councils/forbestechcouncil/2025/01/28/how-ai-has-changed-the-world-of-analytics-and-data-science/k)
|
||||
@@ -6,4 +6,4 @@ Visit the following resources to learn more:
|
||||
|
||||
- [@official@GDPR Compliance Overview](https://gdpr.eu/)
|
||||
- [@article@Protect Sensitive Data with PII Redaction Software](https://redactor.ai/blog/pii-redaction-software-guide)
|
||||
- [@article@A Complete Guide on PII Redaction](https://enthu.ai/blog/what-is-pii-redaction/)
|
||||
- [@article@A Complete Guide on PII Redaction](https://enthu.ai/blog/what-is-pii-redaction/)
|
||||
@@ -4,4 +4,4 @@ Database queries let an AI agent fetch, add, change, or remove data stored in a
|
||||
|
||||
Visit the following resources to learn more:
|
||||
|
||||
- [@article@Building Your Own Database Agent](https://www.deeplearning.ai/short-courses/building-your-own-database-agent/)
|
||||
- [@article@Building Your Own Database Agent](https://www.deeplearning.ai/short-courses/building-your-own-database-agent/)
|
||||
@@ -7,4 +7,4 @@ Visit the following resources to learn more:
|
||||
- [@official@DeepEval - The Open-Source LLM Evaluation Framework](https://www.deepeval.com/)
|
||||
- [@opensource@DeepEval GitHub Repository](https://github.com/confident-ai/deepeval)
|
||||
- [@article@Evaluate LLMs Effectively Using DeepEval: A Pratical Guide](https://www.datacamp.com/tutorial/deepeval)
|
||||
- [@video@DeepEval - LLM Evaluation Framework](https://www.youtube.com/watch?v=ZNs2dCXHlfo)
|
||||
- [@video@DeepEval - LLM Evaluation Framework](https://www.youtube.com/watch?v=ZNs2dCXHlfo)
|
||||
@@ -5,4 +5,4 @@ Email, Slack, and SMS are message channels an AI agent can use to act on tasks a
|
||||
Visit the following resources to learn more:
|
||||
|
||||
- [@official@Twilio Messaging API](https://www.twilio.com/docs/usage/api)
|
||||
- [@official@Slack AI Agents](https://slack.com/ai-agents)
|
||||
- [@official@Slack AI Agents](https://slack.com/ai-agents)
|
||||
@@ -5,7 +5,4 @@ Embeddings turn words, pictures, or other data into lists of numbers called vect
|
||||
Visit the following resources to learn more:
|
||||
|
||||
- [@official@OpenAI Embeddings API Documentation](https://platform.openai.com/docs/guides/embeddings/what-are-embeddings)
|
||||
- [@article@Understanding Embeddings and Vector Search (Pinecone Blog)](https://www.pinecone.io/learn/vector-embeddings/)
|
||||
- [@video@What are Word Embeddings?](https://youtu.be/wgfSDrqYMJ4?si=8bS9_cVChpTzl2z6)
|
||||
- [@video@What is a Vector Database? Powering Semantic Search & AI Applications](https://youtu.be/gl1r1XV0SLw?si=StU9dl8yQTNxdDaI)
|
||||
- [@video@What is a Vector Database?](https://youtu.be/t9IDoenf-lo?si=QG0WD3di9zIliBPC)
|
||||
- [@article@Understanding Embeddings and Vector Search (Pinecone Blog)](https://www.pinecone.io/learn/vector-embeddings/)
|
||||
@@ -6,4 +6,4 @@ Visit the following resources to learn more:
|
||||
|
||||
- [@article@What Is AI Agent Memory? - IBM](https://www.ibm.com/think/topics/ai-agent-memory)
|
||||
- [@article@Episodic Memory vs. Semantic Memory: The Key Differences](https://www.magneticmemorymethod.com/episodic-vs-semantic-memory/)
|
||||
- [@article@Memory Systems in LangChain](https://python.langchain.com/docs/how_to/chatbots_memory/)
|
||||
- [@article@Memory Systems in LangChain](https://python.langchain.com/docs/how_to/chatbots_memory/)
|
||||
@@ -7,4 +7,4 @@ Visit the following resources to learn more:
|
||||
- [@article@Filesystem MCP server for AI Agents](https://playbooks.com/mcp/mateicanavra-filesystem)
|
||||
- [@article@File System Access API](https://developer.mozilla.org/en-US/docs/Web/API/File_System_Access_API)
|
||||
- [@article@Understanding File Permissions and Security](https://linuxize.com/post/understanding-linux-file-permissions/)
|
||||
- [@video@How File Systems Work?](https://www.youtube.com/watch?v=KN8YgJnShPM)
|
||||
- [@video@How File Systems Work?](https://www.youtube.com/watch?v=KN8YgJnShPM)
|
||||
@@ -7,4 +7,4 @@ Visit the following resources to learn more:
|
||||
- [@article@OpenAI Fine Tuning](https://platform.openai.com/docs/guides/fine-tuning)
|
||||
- [@article@Prompt Engineering Guide](https://www.promptingguide.ai/)
|
||||
- [@article@Prompt Engineering vs Prompt Tuning: A Detailed Explanation](https://medium.com/@aabhi02/prompt-engineering-vs-prompt-tuning-a-detailed-explanation-19ea8ce62ac4)
|
||||
- [@video@RAG vs Fine-Tuning vs Prompt Engineering: Optimizing AI Models](https://youtu.be/zYGDpG-pTho?si=pFeWqbjSN1RM4WiZ)
|
||||
- [@video@RAG vs Fine-Tuning vs Prompt Engineering: Optimizing AI Models](https://youtu.be/zYGDpG-pTho?si=pFeWqbjSN1RM4WiZ)
|
||||
@@ -6,4 +6,4 @@ Visit the following resources to learn more:
|
||||
|
||||
- [@official@Git Basics](https://git-scm.com/doc)
|
||||
- [@official@Introduction to the Terminal](https://ubuntu.com/tutorials/command-line-for-beginners#1-overview)
|
||||
- [@video@Git and Terminal Basics Crash Course (YouTube)](https://www.youtube.com/watch?v=HVsySz-h9r4)
|
||||
- [@video@Git and Terminal Basics Crash Course (YouTube)](https://www.youtube.com/watch?v=HVsySz-h9r4)
|
||||
@@ -6,4 +6,4 @@ Visit the following resources to learn more:
|
||||
|
||||
- [@official@Haystack](https://haystack.deepset.ai/)
|
||||
- [@official@Haystack Overview](https://docs.haystack.deepset.ai/docs/intro)
|
||||
- [@opensource@deepset-ai/haystack](https://github.com/deepset-ai/haystack)
|
||||
- [@opensource@deepset-ai/haystack](https://github.com/deepset-ai/haystack)
|
||||
@@ -6,4 +6,4 @@ Visit the following resources to learn more:
|
||||
|
||||
- [@official@Helicone](https://www.helicone.ai/)
|
||||
- [@official@Helicone OSS LLM Observability](https://docs.helicone.ai/getting-started/quick-start)
|
||||
- [@opensource@Helicone/helicone](https://github.com/Helicone/helicone)
|
||||
- [@opensource@Helicone/helicone](https://github.com/Helicone/helicone)
|
||||
@@ -7,4 +7,4 @@ Visit the following resources to learn more:
|
||||
- [@article@Human in the Loop · Cloudflare Agents](https://developers.cloudflare.com/agents/concepts/human-in-the-loop/)
|
||||
- [@article@What is Human-in-the-Loop: A Guide](https://logifusion.com/what-is-human-in-the-loop-htil/)
|
||||
- [@article@Human-in-the-Loop ML](https://docs.aws.amazon.com/sagemaker/latest/dg/sms-human-review-workflow.html)
|
||||
- [@article@The Importance of Human Feedback in AI (Hugging Face Blog)](https://huggingface.co/blog/rlhf)
|
||||
- [@article@The Importance of Human Feedback in AI (Hugging Face Blog)](https://huggingface.co/blog/rlhf)
|
||||
@@ -4,6 +4,6 @@ After you write a first prompt, treat it as a draft, not the final version. Run
|
||||
|
||||
Visit the following resources to learn more:
|
||||
|
||||
- [@article@Master Iterative Prompting: A Guide](https://blogs.vreamer.space/master-iterative-prompting-a-guide-to-more-effective-interactions-with-ai-50a736eaec38)
|
||||
- [@course@Prompt Engineering Best Practices](https://www.deeplearning.ai/short-courses/chatgpt-prompt-engineering-for-developers/)
|
||||
- [@video@Prompt Engineering: The Iterative Process](https://www.youtube.com/watch?v=dOxUroR57xs)
|
||||
- [@article@Master Iterative Prompting: A Guide](https://blogs.vreamer.space/master-iterative-prompting-a-guide-to-more-effective-interactions-with-ai-50a736eaec38)
|
||||
- [@video@Prompt Engineering: The Iterative Process](https://www.youtube.com/watch?v=dOxUroR57xs)
|
||||
@@ -1,11 +1,10 @@
|
||||
# LangChain
|
||||
|
||||
LangChain is a Python and JavaScript library that helps you put large language models to work in real products. It gives ready-made parts for common agent tasks such as talking to many tools, keeping short-term memory, and calling an external API when the model needs fresh data. You combine these parts like Lego blocks: pick a model, add a prompt template, chain the steps, then wrap the chain in an “agent” that can choose what step to run next. Built-in connectors link to OpenAI, Hugging Face, vector stores, and SQL databases, so you can search documents or pull company data without writing a lot of glue code. This lets you move fast from idea to working bot, while still letting you swap out parts if your needs change.
|
||||
LangChain is a framework designed to simplify the creation of applications using large language models (LLMs). It provides tools and abstractions to connect LLMs to various data sources, create chains of calls to LLMs or other utilities, and build agents that can interact with their environment. Essentially, it helps developers structure, chain, and orchestrate different AI components to build more complex and capable AI applications.
|
||||
|
||||
Visit the following resources to learn more:
|
||||
|
||||
- [@official@LangChain Documentation](https://python.langchain.com/docs/introduction/)
|
||||
- [@opensource@langchain-ai/langchain](https://github.com/langchain-ai/langchain)
|
||||
- [@article@Building Applications with LLMs using LangChain](https://www.pinecone.io/learn/series/langchain/)
|
||||
- [@article@AI Agents with LangChain and LangGraph](https://www.udacity.com/course/ai-agents-with-langchain-and-langgraph--cd13764)
|
||||
- [@video@LangChain Crash Course - Build LLM Apps Fast (YouTube)](https://www.youtube.com/watch?v=nAmC7SoVLd8)
|
||||
- [@video@LangChain Crash Course - Build LLM Apps Fast (YouTube)](https://www.youtube.com/watch?v=nAmC7SoVLd8)
|
||||
@@ -7,4 +7,4 @@ Visit the following resources to learn more:
|
||||
- [@official@LangFuse](https://langfuse.com/)
|
||||
- [@official@LangFuse Documentation](https://langfuse.com/docs)
|
||||
- [@opensource@langfuse/langfuse](https://github.com/langfuse/langfuse)
|
||||
- [@article@Langfuse: Open Source LLM Engineering Platform](https://www.ycombinator.com/companies/langfuse)
|
||||
- [@article@Langfuse: Open Source LLM Engineering Platform](https://www.ycombinator.com/companies/langfuse)
|
||||
@@ -7,4 +7,4 @@ Visit the following resources to learn more:
|
||||
- [@official@LangSmith](https://smith.langchain.com/)
|
||||
- [@official@LangSmith Documentation](https://docs.smith.langchain.com/)
|
||||
- [@official@Harden your application with LangSmith Evaluation](https://www.langchain.com/evaluation)
|
||||
- [@article@What is LangSmith and Why should I care as a developer?](https://medium.com/around-the-prompt/what-is-langsmith-and-why-should-i-care-as-a-developer-e5921deb54b5)
|
||||
- [@article@What is LangSmith and Why should I care as a developer?](https://medium.com/around-the-prompt/what-is-langsmith-and-why-should-i-care-as-a-developer-e5921deb54b5)
|
||||
@@ -7,4 +7,4 @@ Visit the following resources to learn more:
|
||||
- [@official@LangSmith](https://smith.langchain.com/)
|
||||
- [@official@LangSmith Documentation](https://docs.smith.langchain.com/)
|
||||
- [@official@Harden your application with LangSmith Evaluation](https://www.langchain.com/evaluation)
|
||||
- [@article@What is LangSmith and Why should I care as a developer?](https://medium.com/around-the-prompt/what-is-langsmith-and-why-should-i-care-as-a-developer-e5921deb54b5)
|
||||
- [@article@What is LangSmith and Why should I care as a developer?](https://medium.com/around-the-prompt/what-is-langsmith-and-why-should-i-care-as-a-developer-e5921deb54b5)
|
||||
@@ -6,5 +6,5 @@ Visit the following resources to learn more:
|
||||
|
||||
- [@article@Build a Simple Local MCP Server](https://blog.stackademic.com/build-simple-local-mcp-server-5434d19572a4)
|
||||
- [@article@How to Build and Host Your Own MCP Servers in Easy Steps](https://collabnix.com/how-to-build-and-host-your-own-mcp-servers-in-easy-steps/)
|
||||
- [@article@Expose localhost to Internet](https://ngrok.com/docs)
|
||||
- [@video@Run a Local Server on Your Machine](https://www.youtube.com/watch?v=ldGl6L4Vktk)
|
||||
- [@article@Expose localhost to Internet](https://ngrok.com/docs)
|
||||
- [@video@Run a Local Server on Your Machine](https://www.youtube.com/watch?v=ldGl6L4Vktk)
|
||||
@@ -5,7 +5,7 @@ Long term memory in an AI agent stores important information for future use, lik
|
||||
Visit the following resources to learn more:
|
||||
|
||||
- [@article@Long Term Memory in AI Agents](https://medium.com/@alozie_igbokwe/ai-101-long-term-memory-in-ai-agents-35f87f2d0ce0)
|
||||
- [@article@Memory Management in AI Agents](https://python.langchain.com/docs/how_to/chatbots_memory/)
|
||||
- [@article@Storing and Retrieving Knowledge for Agents](https://www.pinecone.io/learn/langchain-retrieval-augmentation/)
|
||||
- [@article@Memory Management in AI Agents](https://python.langchain.com/docs/how_to/chatbots_memory/)
|
||||
- [@article@Storing and Retrieving Knowledge for Agents](https://www.pinecone.io/learn/langchain-retrieval-augmentation/)
|
||||
- [@article@Short-Term vs Long-Term Memory in AI Agents](https://adasci.org/short-term-vs-long-term-memory-in-ai-agents/)
|
||||
- [@video@Building Brain-Like Memory for AI Agents](https://www.youtube.com/watch?v=VKPngyO0iKg)
|
||||
- [@video@Building Brain-Like Memory for AI Agents](https://www.youtube.com/watch?v=VKPngyO0iKg)
|
||||
@@ -4,7 +4,7 @@ Building an AI agent from scratch means writing every part of the system yoursel
|
||||
|
||||
Visit the following resources to learn more:
|
||||
|
||||
- [@article@A Step-by-Step Guide to Building an AI Agent From Scratch](https://www.neurond.com/blog/how-to-build-an-ai-agent)
|
||||
- [@article@How to Build AI Agents](https://wotnot.io/blog/build-ai-agents)
|
||||
- [@article@A Step-by-Step Guide to Building an AI Agent From Scratch](https://www.neurond.com/blog/how-to-build-an-ai-agent)
|
||||
- [@article@How to Build AI Agents](https://wotnot.io/blog/build-ai-agents)
|
||||
- [@article@Build Your Own AI Agent from Scratch in 30 Minutes](https://medium.com/@gurpartap.sandhu3/build-you-own-ai-agent-from-scratch-in-30-mins-using-simple-python-1458f8099da0)
|
||||
- [@video@Building an AI Agent From Scratch](https://www.youtube.com/watch?v=bTMPwUgLZf0)
|
||||
- [@video@Building an AI Agent From Scratch](https://www.youtube.com/watch?v=bTMPwUgLZf0)
|
||||
@@ -4,9 +4,9 @@ Max Length sets the maximum number of tokens a language model can generate in on
|
||||
|
||||
Visit the following resources to learn more:
|
||||
|
||||
- [@official@OpenAI Token Usage](https://platform.openai.com/docs/guides/gpt/managing-tokens)
|
||||
- [@official@Size and Max Token Limits](https://docs.anthropic.com/claude/docs/size-and-token-limits)
|
||||
- [@article@Utilising Max Token Context Window of Anthropic Claude](https://medium.com/@nampreetsingh/utilising-max-token-context-window-of-anthropic-claude-on-amazon-bedrock-7377d94b2dfa)
|
||||
- [@article@Controlling the Length of OpenAI Model Responses](https://help.openai.com/en/articles/5072518-controlling-the-length-of-openai-model-responses)
|
||||
- [@official@OpenAI Token Usage](https://platform.openai.com/docs/guides/gpt/managing-tokens)
|
||||
- [@official@Size and Max Token Limits](https://docs.anthropic.com/claude/docs/size-and-token-limits)
|
||||
- [@article@Utilising Max Token Context Window of Anthropic Claude](https://medium.com/@nampreetsingh/utilising-max-token-context-window-of-anthropic-claude-on-amazon-bedrock-7377d94b2dfa)
|
||||
- [@article@Controlling the Length of OpenAI Model Responses](https://help.openai.com/en/articles/5072518-controlling-the-length-of-openai-model-responses)
|
||||
- [@article@Max Model Length in AI](https://www.restack.io/p/ai-model-answer-max-model-length-cat-ai)
|
||||
- [@video@Understanding ChatGPT/OpenAI Tokens](https://youtu.be/Mo3NV5n1yZk)
|
||||
@@ -4,7 +4,7 @@ The MCP Client is the part of an AI agent that talks to the language model API.
|
||||
|
||||
Visit the following resources to learn more:
|
||||
|
||||
- [@opensource@Model Context Protocol](https://github.com/modelcontextprotocol/modelcontextprotocol)
|
||||
- [@official@Model Context Protocol](https://modelcontextprotocol.io/introduction)
|
||||
- [@official@OpenAI API Reference](https://platform.openai.com/docs/api-reference)
|
||||
- [@official@Anthropic API Documentation](https://docs.anthropic.com/claude/reference)
|
||||
- [@official@OpenAI API Reference](https://platform.openai.com/docs/api-reference)
|
||||
- [@official@Anthropic API Documentation](https://docs.anthropic.com/claude/reference)
|
||||
- [@opensource@Model Context Protocol](https://github.com/modelcontextprotocol/modelcontextprotocol)
|
||||
@@ -4,7 +4,7 @@ MCP Hosts are computers or services that run the Model Context Protocol. They ha
|
||||
|
||||
Visit the following resources to learn more:
|
||||
|
||||
- [@official@Vercel Serverless Hosting](https://vercel.com/docs)
|
||||
- [@official@Vercel Serverless Hosting](https://vercel.com/docs)
|
||||
- [@opensource@punkeye/awesome-mcp-servers](https://github.com/punkpeye/awesome-mcp-servers)
|
||||
- [@article@The Ultimate Guide to MCP](https://guangzhengli.com/blog/en/model-context-protocol)
|
||||
- [@article@AWS MCP Servers for Code Assistants](https://aws.amazon.com/blogs/machine-learning/introducing-aws-mcp-servers-for-code-assistants-part-1/)
|
||||
- [@opensource@punkeye/awesome-mcp-servers](https://github.com/punkpeye/awesome-mcp-servers)
|
||||
- [@article@AWS MCP Servers for Code Assistants](https://aws.amazon.com/blogs/machine-learning/introducing-aws-mcp-servers-for-code-assistants-part-1/)
|
||||
@@ -4,7 +4,7 @@ An MCP Server is the main machine or cloud service that runs the Model Context P
|
||||
|
||||
Visit the following resources to learn more:
|
||||
|
||||
- [@article@Introducing the Azure MCP Server ](https://devblogs.microsoft.com/azure-sdk/introducing-the-azure-mcp-server/)
|
||||
- [@opensource@punkeye/awesome-mcp-servers](https://github.com/punkpeye/awesome-mcp-servers)
|
||||
- [@article@Introducing the Azure MCP Server ](https://devblogs.microsoft.com/azure-sdk/introducing-the-azure-mcp-server/)
|
||||
- [@article@The Ultimate Guide to MCP](https://guangzhengli.com/blog/en/model-context-protocol)
|
||||
- [@article@AWS MCP Servers for Code Assistants](https://aws.amazon.com/blogs/machine-learning/introducing-aws-mcp-servers-for-code-assistants-part-1/)
|
||||
- [@opensource@punkeye/awesome-mcp-servers](https://github.com/punkpeye/awesome-mcp-servers)
|
||||
- [@article@AWS MCP Servers for Code Assistants](https://aws.amazon.com/blogs/machine-learning/introducing-aws-mcp-servers-for-code-assistants-part-1/)
|
||||
@@ -5,6 +5,6 @@ To judge how well an AI agent works, you need clear numbers. Track accuracy, pre
|
||||
Visit the following resources to learn more:
|
||||
|
||||
- [@article@Robustness Testing for AI](https://mitibmwatsonailab.mit.edu/category/robustness/)
|
||||
- [@article@Complete Guide to Machine Learning Evaluation Metrics](https://medium.com/analytics-vidhya/complete-guide-to-machine-learning-evaluation-metrics-615c2864d916)
|
||||
- [@article@Measuring Model Performance](https://developers.google.com/machine-learning/crash-course/classification/accuracy)
|
||||
- [@article@A Practical Framework for (Gen)AI Value Measurement](https://medium.com/google-cloud/a-practical-framework-for-gen-ai-value-measurement-5fccf3b66c43)
|
||||
- [@article@Complete Guide to Machine Learning Evaluation Metrics](https://medium.com/analytics-vidhya/complete-guide-to-machine-learning-evaluation-metrics-615c2864d916)
|
||||
- [@article@Measuring Model Performance](https://developers.google.com/machine-learning/crash-course/classification/accuracy)
|
||||
- [@article@A Practical Framework for (Gen)AI Value Measurement](https://medium.com/google-cloud/a-practical-framework-for-gen-ai-value-measurement-5fccf3b66c43)
|
||||
@@ -4,8 +4,8 @@ Model Context Protocol (MCP) is a rulebook that tells an AI agent how to pack ba
|
||||
|
||||
Visit the following resources to learn more:
|
||||
|
||||
- [@opensource@Model Context Protocol](https://github.com/modelcontextprotocol/modelcontextprotocol)
|
||||
- [@official@Model Context Protocol](https://modelcontextprotocol.io/introduction)
|
||||
- [@article@Introducing the Azure MCP Server ](https://devblogs.microsoft.com/azure-sdk/introducing-the-azure-mcp-server/)
|
||||
- [@article@The Ultimate Guide to MCP](https://guangzhengli.com/blog/en/model-context-protocol)
|
||||
- [@course@MCP: Build Rich-Context AI Apps with Anthropic](https://www.deeplearning.ai/short-courses/mcp-build-rich-context-ai-apps-with-anthropic/)
|
||||
- [@official@Model Context Protocol](https://modelcontextprotocol.io/introduction)
|
||||
- [@opensource@Model Context Protocol](https://github.com/modelcontextprotocol/modelcontextprotocol)
|
||||
- [@article@Introducing the Azure MCP Server ](https://devblogs.microsoft.com/azure-sdk/introducing-the-azure-mcp-server/)
|
||||
- [@article@The Ultimate Guide to MCP](https://guangzhengli.com/blog/en/model-context-protocol)
|
||||
@@ -4,5 +4,5 @@ Game studios use AI agents to control non-player characters (NPCs). The agent ob
|
||||
|
||||
Visit the following resources to learn more:
|
||||
|
||||
- [@official@Unity – AI for NPCs](https://dev.epicgames.com/documentation/en-us/unreal-engine/artificial-intelligence-in-unreal-engine?application_version=5.3)
|
||||
- [@official@Unity – AI for NPCs](https://dev.epicgames.com/documentation/en-us/unreal-engine/artificial-intelligence-in-unreal-engine?application_version=5.3)
|
||||
- [@article@AI-Driven NPCs: The Future of Gaming Explained](https://www.capermint.com/blog/everything-you-need-to-know-about-non-player-character-npc/)
|
||||
@@ -4,5 +4,5 @@ Observation and reflection form the thinking pause in an AI agent’s loop. Firs
|
||||
|
||||
Visit the following resources to learn more:
|
||||
|
||||
- [@official@Best Practices for Prompting and Self-checking](https://platform.openai.com/docs/guides/prompt-engineering)
|
||||
- [@article@Self-Reflective AI: Building Agents That Learn by Observing Themselves](https://arxiv.org/abs/2302.14045)
|
||||
- [@official@Best Practices for Prompting and Self-checking](https://platform.openai.com/docs/guides/prompt-engineering)
|
||||
- [@article@Self-Reflective AI: Building Agents That Learn by Observing Themselves](https://arxiv.org/abs/2302.14045)
|
||||
@@ -4,8 +4,8 @@ Open-weight models are neural networks whose trained parameters, also called wei
|
||||
|
||||
Visit the following resources to learn more:
|
||||
|
||||
- [@official@BLOOM BigScience](https://bigscience.huggingface.co/)
|
||||
- [@official@Falcon LLM – Technology Innovation Institute (TII)](https://falconllm.tii.ae/)
|
||||
- [@official@Llama 2 – Meta's Official Announcement](https://ai.meta.com/llama/)
|
||||
- [@official@Hugging Face – Open LLM Leaderboard (Top Open Models)](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)
|
||||
- [@official@EleutherAI – Open Research Collective (GPT-Neo, GPT-J, etc.)](https://www.eleuther.ai/)
|
||||
- [@official@BLOOM BigScience](https://bigscience.huggingface.co/)
|
||||
- [@official@Falcon LLM – Technology Innovation Institute (TII)](https://falconllm.tii.ae/)
|
||||
- [@official@Llama 2 – Meta's Official Announcement](https://ai.meta.com/llama/)
|
||||
- [@official@Hugging Face – Open LLM Leaderboard (Top Open Models)](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)
|
||||
- [@official@EleutherAI – Open Research Collective (GPT-Neo, GPT-J, etc.)](https://www.eleuther.ai/)
|
||||
@@ -4,7 +4,7 @@ The OpenAI Assistants API lets you add clear, task-specific actions to a chat wi
|
||||
|
||||
Visit the following resources to learn more:
|
||||
|
||||
- [@official@OpenAI Documentation – Assistants API Overview](https://platform.openai.com/docs/assistants/overview)
|
||||
- [@official@OpenAI Blog – Introducing the Assistants API](https://openai.com/blog/assistants-api)
|
||||
- [@official@OpenAI Cookbook – Assistants API Example](https://github.com/openai/openai-cookbook/blob/main/examples/Assistants_API_overview_python.ipynb)
|
||||
- [@official@OpenAI API Reference – Assistants Endpoints](https://platform.openai.com/docs/api-reference/assistants)
|
||||
- [@official@OpenAI Documentation – Assistants API Overview](https://platform.openai.com/docs/assistants/overview)
|
||||
- [@official@OpenAI Blog – Introducing the Assistants API](https://openai.com/blog/assistants-api)
|
||||
- [@official@OpenAI Cookbook – Assistants API Example](https://github.com/openai/openai-cookbook/blob/main/examples/Assistants_API_overview_python.ipynb)
|
||||
- [@official@OpenAI API Reference – Assistants Endpoints](https://platform.openai.com/docs/api-reference/assistants)
|
||||
@@ -4,8 +4,8 @@ OpenAI Function Calling lets you give a language model a list of tools and have
|
||||
|
||||
Visit the following resources to learn more:
|
||||
|
||||
- [@official@OpenAI Documentation – Function Calling](https://platform.openai.com/docs/guides/function-calling)
|
||||
- [@official@OpenAI Cookbook – Using Functions with GPT Models](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_call_functions_with_chat_models.ipynb)
|
||||
- [@officialOpenAI Blog – Announcing Function Calling and Other Updates](https://openai.com/blog/function-calling-and-other-api-updates)
|
||||
- [@officialOpenAI API Reference – Functions Section](https://platform.openai.com/docs/api-reference/chat/create#functions)
|
||||
- [@officialOpenAI Community – Discussions and Examples on Function Calling](https://community.openai.com/tag/function-calling)
|
||||
- [@official@OpenAI Documentation – Function Calling](https://platform.openai.com/docs/guides/function-calling)
|
||||
- [@official@OpenAI Cookbook – Using Functions with GPT Models](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_call_functions_with_chat_models.ipynb)
|
||||
- [@article@@officialOpenAI Blog – Announcing Function Calling and Other Updates](https://openai.com/blog/function-calling-and-other-api-updates)
|
||||
- [@article@@officialOpenAI API Reference – Functions Section](https://platform.openai.com/docs/api-reference/chat/create#functions)
|
||||
- [@article@@officialOpenAI Community – Discussions and Examples on Function Calling](https://community.openai.com/tag/function-calling)
|
||||
@@ -4,7 +4,7 @@ openllmetry is a small Python library that makes it easy to watch what your AI a
|
||||
|
||||
Visit the following resources to learn more:
|
||||
|
||||
- [@official@OpenTelemetry Documentation](https://www.traceloop.com/blog/openllmetry)
|
||||
- [@official@What is OpenLLMetry? - traceloop](https://www.traceloop.com/docs/openllmetry/introduction)
|
||||
- [@official@OpenTelemetry Documentation](https://www.traceloop.com/blog/openllmetry)
|
||||
- [@official@What is OpenLLMetry? - traceloop](https://www.traceloop.com/docs/openllmetry/introduction)
|
||||
- [@official@Use Traceloop with Python](https://www.traceloop.com/docs/openllmetry/getting-started-python)
|
||||
- [@opensource@traceloop/openllmetry](https://github.com/traceloop/openllmetry)
|
||||
@@ -5,4 +5,4 @@ Perception, also called user input, is the first step in an agent loop. The agen
|
||||
Visit the following resources to learn more:
|
||||
|
||||
- [@article@Perception in AI: Understanding Its Types and Importance](https://marktalks.com/perception-in-ai-understanding-its-types-and-importance/)
|
||||
- [@article@What Is AI Agent Perception? - IBM](https://www.ibm.com/think/topics/ai-agent-perception)
|
||||
- [@article@What Is AI Agent Perception? - IBM](https://www.ibm.com/think/topics/ai-agent-perception)
|
||||
@@ -5,4 +5,4 @@ A personal assistant AI agent is a smart program that helps one person manage da
|
||||
Visit the following resources to learn more:
|
||||
|
||||
- [@article@A Complete Guide on AI-powered Personal Assistants](https://medium.com/@alexander_clifford/a-complete-guide-on-ai-powered-personal-assistants-with-examples-2f5cd894d566)
|
||||
- [@article@9 Best AI Personal Assistants for Work, Chat and Home](https://saner.ai/best-ai-personal-assistants/)
|
||||
- [@article@9 Best AI Personal Assistants for Work, Chat and Home](https://saner.ai/best-ai-personal-assistants/)
|
||||
@@ -6,4 +6,4 @@ Visit the following resources to learn more:
|
||||
|
||||
- [@official@OpenAI Pricing](https://openai.com/api/pricing/)
|
||||
- [@article@Executive Guide To AI Agent Pricing](https://www.forbes.com/councils/forbesbusinesscouncil/2025/01/28/executive-guide-to-ai-agent-pricing-winning-strategies-and-models-to-drive-growth/)
|
||||
- [@article@AI Pricing: How Much Does Artificial Intelligence Cost In 2025?](https://www.internetsearchinc.com/ai-pricing-how-much-does-artificial-intelligence-cost/)
|
||||
- [@article@AI Pricing: How Much Does Artificial Intelligence Cost In 2025?](https://www.internetsearchinc.com/ai-pricing-how-much-does-artificial-intelligence-cost/)
|
||||
@@ -6,4 +6,4 @@ Visit the following resources to learn more:
|
||||
|
||||
- [@article@Prompt Injection vs. Jailbreaking: What's the Difference?](https://learnprompting.org/blog/injection_jailbreaking)
|
||||
- [@article@Prompt Injection vs Prompt Jailbreak](https://codoid.com/ai/prompt-injection-vs-prompt-jailbreak-a-detailed-comparison/)
|
||||
- [@article@How Prompt Attacks Exploit GenAI and How to Fight Back](https://unit42.paloaltonetworks.com/new-frontier-of-genai-threats-a-comprehensive-guide-to-prompt-attacks/)
|
||||
- [@article@How Prompt Attacks Exploit GenAI and How to Fight Back](https://unit42.paloaltonetworks.com/new-frontier-of-genai-threats-a-comprehensive-guide-to-prompt-attacks/)
|
||||
@@ -6,4 +6,4 @@ Visit the following resources to learn more:
|
||||
|
||||
- [@article@What is Context in Prompt Engineering?](https://www.godofprompt.ai/blog/what-is-context-in-prompt-engineering)
|
||||
- [@article@The Importance of Context for Reliable AI Systems](https://medium.com/mathco-ai/the-importance-of-context-for-reliable-ai-systems-and-how-to-provide-context-009bd1ac7189/)
|
||||
- [@article@Context Engineering: Why Feeding AI the Right Context Matters](https://inspirednonsense.com/context-engineering-why-feeding-ai-the-right-context-matters-353e8f87d6d3)
|
||||
- [@article@Context Engineering: Why Feeding AI the Right Context Matters](https://inspirednonsense.com/context-engineering-why-feeding-ai-the-right-context-matters-353e8f87d6d3)
|
||||
@@ -5,4 +5,4 @@ A RAG (Retrieval-Augmented Generation) agent mixes search with language generati
|
||||
Visit the following resources to learn more:
|
||||
|
||||
- [@article@What is RAG? - Retrieval-Augmented Generation AI Explained](https://aws.amazon.com/what-is/retrieval-augmented-generation/)
|
||||
- [@article@What Is Retrieval-Augmented Generation, aka RAG?](https://blogs.nvidia.com/blog/what-is-retrieval-augmented-generation/)
|
||||
- [@article@What Is Retrieval-Augmented Generation, aka RAG?](https://blogs.nvidia.com/blog/what-is-retrieval-augmented-generation/)
|
||||
@@ -6,4 +6,4 @@ Visit the following resources to learn more:
|
||||
|
||||
- [@article@Understanding Retrieval-Augmented Generation (RAG) and Vector Databases](https://pureai.com/Articles/2025/03/03/Understanding-RAG.aspx)
|
||||
- [@article@Build Advanced Retrieval-Augmented Generation Systems](https://learn.microsoft.com/en-us/azure/developer/ai/advanced-retrieval-augmented-generation)
|
||||
- [@article@What Is Retrieval-Augmented Generation, aka RAG?](https://blogs.nvidia.com/blog/what-is-retrieval-augmented-generation/)
|
||||
- [@article@What Is Retrieval-Augmented Generation, aka RAG?](https://blogs.nvidia.com/blog/what-is-retrieval-augmented-generation/)
|
||||
@@ -5,5 +5,5 @@ Ragas is an open-source tool used to check how well a Retrieval-Augmented Genera
|
||||
Visit the following resources to learn more:
|
||||
|
||||
- [@official@Ragas Documentation](https://docs.ragas.io/en/latest/)
|
||||
- [@article@Evaluating RAG Applications with RAGAs](https://towardsdatascience.com/evaluating-rag-applications-with-ragas-81d67b0ee31a/n)
|
||||
- [@opensource@explodinggradients/ragas](https://github.com/explodinggradients/ragas)
|
||||
- [@article@Evaluating RAG Applications with RAGAs](https://towardsdatascience.com/evaluating-rag-applications-with-ragas-81d67b0ee31a/n)
|
||||
@@ -5,4 +5,4 @@ ReAct is an agent pattern that makes a model alternate between two simple steps:
|
||||
Visit the following resources to learn more:
|
||||
|
||||
- [@official@ReAct: Synergizing Reasoning and Acting in Language Models](https://react-lm.github.io/)
|
||||
- [@article@ReAct Systems: Enhancing LLMs with Reasoning and Action](https://learnprompting.org/docs/agents/react)
|
||||
- [@article@ReAct Systems: Enhancing LLMs with Reasoning and Action](https://learnprompting.org/docs/agents/react)
|
||||
@@ -5,4 +5,4 @@ Reason and Plan is the moment when an AI agent thinks before it acts. The agent
|
||||
Visit the following resources to learn more:
|
||||
|
||||
- [@official@ReAct: Synergizing Reasoning and Acting in Language Models](https://react-lm.github.io/)
|
||||
- [@article@ReAct Systems: Enhancing LLMs with Reasoning and Action](https://learnprompting.org/docs/agents/react)
|
||||
- [@article@ReAct Systems: Enhancing LLMs with Reasoning and Action](https://learnprompting.org/docs/agents/react)
|
||||
@@ -5,4 +5,4 @@ Reasoning models break a task into clear steps and follow a line of logic, while
|
||||
Visit the following resources to learn more:
|
||||
|
||||
- [@official@ReAct: Synergizing Reasoning and Acting in Language Models](https://react-lm.github.io/)
|
||||
- [@article@ReAct Systems: Enhancing LLMs with Reasoning and Action](https://learnprompting.org/docs/agents/react)
|
||||
- [@article@ReAct Systems: Enhancing LLMs with Reasoning and Action](https://learnprompting.org/docs/agents/react)
|
||||
@@ -5,4 +5,4 @@ A **REST API** (Representational State Transfer) is an architectural style for d
|
||||
Visit the following resources to learn more:
|
||||
|
||||
- [@article@What is RESTful API? - RESTful API Explained - AWS](https://aws.amazon.com/what-is/restful-api/)
|
||||
- [@article@What Is a REST API? Examples, Uses & Challenges ](https://blog.postman.com/rest-api-examples/)
|
||||
- [@article@What Is a REST API? Examples, Uses & Challenges ](https://blog.postman.com/rest-api-examples/)
|
||||
@@ -2,26 +2,25 @@
|
||||
|
||||
Short term memory are the facts which are passed as a part of the prompt to the LLM e.g. there might be a prompt like below:
|
||||
|
||||
```text
|
||||
Users Profile:
|
||||
- name: {name}
|
||||
- age: {age}
|
||||
- expertise: {expertise}
|
||||
|
||||
User is currently learning about {current_topic}. User has some goals in mind which are:
|
||||
- {goal_1}
|
||||
- {goal_2}
|
||||
- {goal_3}
|
||||
|
||||
Help the user achieve the goals.
|
||||
```
|
||||
Users Profile:
|
||||
- name: {name}
|
||||
- age: {age}
|
||||
- expertise: {expertise}
|
||||
|
||||
User is currently learning about {current_topic}. User has some goals in mind which are:
|
||||
- {goal_1}
|
||||
- {goal_2}
|
||||
- {goal_3}
|
||||
|
||||
Help the user achieve the goals.
|
||||
|
||||
|
||||
Notice how we injected the user's profile, current topic and goals in the prompt. These are all short term memories.
|
||||
|
||||
Visit the following resources to learn more:
|
||||
|
||||
- [@article@Memory Management in AI Agents](https://python.langchain.com/docs/how_to/chatbots_memory/)
|
||||
- [@article@Memory Management in AI Agents](https://python.langchain.com/docs/how_to/chatbots_memory/)
|
||||
- [@article@Build Smarter AI Agents: Manage Short-term and Long-term Memory](https://redis.io/blog/build-smarter-ai-agents-manage-short-term-and-long-term-memory-with-redis/)
|
||||
- [@article@Storing and Retrieving Knowledge for Agents](https://www.pinecone.io/learn/langchain-retrieval-augmentation/)
|
||||
- [@article@Storing and Retrieving Knowledge for Agents](https://www.pinecone.io/learn/langchain-retrieval-augmentation/)
|
||||
- [@article@Short-Term vs Long-Term Memory in AI Agents](https://adasci.org/short-term-vs-long-term-memory-in-ai-agents/)
|
||||
- [@video@Building Brain-Like Memory for AI Agents](https://www.youtube.com/watch?v=VKPngyO0iKg)
|
||||
- [@video@Building Brain-Like Memory for AI Agents](https://www.youtube.com/watch?v=VKPngyO0iKg)
|
||||
@@ -4,6 +4,6 @@ Smol Depot is an open-source kit that lets you bundle all the parts of a small A
|
||||
|
||||
Visit the following resources to learn more:
|
||||
|
||||
- [@official@smol.ai - Continuous Fine-tuning Platform for AI Engineers](https://smol.candycode.dev/)
|
||||
- [@official@smol.ai - Continuous Fine-tuning Platform for AI Engineers](https://smol.candycode.dev/)
|
||||
- [@article@5-min Smol AI Tutorial](https://www.ai-jason.com/learning-ai/smol-ai-tutorial)
|
||||
- [@video@Smol AI Full Beginner Course](https://www.youtube.com/watch?v=d7qFVrpLh34)
|
||||
- [@video@Smol AI Full Beginner Course](https://www.youtube.com/watch?v=d7qFVrpLh34)
|
||||
@@ -4,5 +4,5 @@ When you give a task to an AI, make clear how long the answer should be and what
|
||||
|
||||
Visit the following resources to learn more:
|
||||
|
||||
- [@article@Mastering Prompt Engineering: Format, Length, and Audience](https://techlasi.com/savvy/mastering-prompt-engineering-format-length-and-audience-examples-for-2024/)
|
||||
- [@article@Ultimate Guide to Prompt Engineering](https://promptdrive.ai/prompt-engineering/)
|
||||
- [@article@Mastering Prompt Engineering: Format, Length, and Audience](https://techlasi.com/savvy/mastering-prompt-engineering-format-length-and-audience-examples-for-2024/)
|
||||
- [@article@Ultimate Guide to Prompt Engineering](https://promptdrive.ai/prompt-engineering/)
|
||||
@@ -5,4 +5,4 @@ Stopping criteria tell the language model when to stop writing more text. Withou
|
||||
Visit the following resources to learn more:
|
||||
|
||||
- [@article@Defining Stopping Criteria in Large Language Models](https://www.metriccoders.com/post/defining-stopping-criteria-in-large-language-models-a-practical-guide)
|
||||
- [@article@Stopping Criteria for Decision Tree Algorithm and Tree Plots](https://aieagle.in/stopping-criteria-for-decision-tree-algorithm-and-tree-plots/)
|
||||
- [@article@Stopping Criteria for Decision Tree Algorithm and Tree Plots](https://aieagle.in/stopping-criteria-for-decision-tree-algorithm-and-tree-plots/)
|
||||
@@ -1,6 +1,6 @@
|
||||
# Structured Logging & Tracing
|
||||
|
||||
Structured logging and tracing are ways to record what an AI agent does so you can find and fix problems fast. Instead of dumping plain text, the agent writes logs in a fixed key-value format, such as time, user_id, step, and message. Because every entry follows the same shape, search tools can filter, sort, and count events with ease. Tracing links those log lines into a chain that follows one request or task across many functions, threads, or microservices. By adding a unique trace ID to each step, you can see how long each part took and where errors happened. Together, structured logs and traces offer clear, machine-readable data that helps developers spot slow code paths, unusual behavior, and hidden bugs without endless manual scans.
|
||||
Structured logging and tracing are ways to record what an AI agent does so you can find and fix problems fast. Instead of dumping plain text, the agent writes logs in a fixed key-value format, such as time, user\_id, step, and message. Because every entry follows the same shape, search tools can filter, sort, and count events with ease. Tracing links those log lines into a chain that follows one request or task across many functions, threads, or microservices. By adding a unique trace ID to each step, you can see how long each part took and where errors happened. Together, structured logs and traces offer clear, machine-readable data that helps developers spot slow code paths, unusual behavior, and hidden bugs without endless manual scans.
|
||||
|
||||
Visit the following resources to learn more:
|
||||
|
||||
|
||||
@@ -5,4 +5,4 @@ Summarization or compression lets an AI agent keep the gist of past chats withou
|
||||
Visit the following resources to learn more:
|
||||
|
||||
- [@article@Evaluating LLMs for Text Summarization](https://insights.sei.cmu.edu/blog/evaluating-llms-for-text-summarization-introduction/)
|
||||
- [@article@The Ultimate Guide to AI Document Summarization](https://www.documentllm.com/blog/ai-document-summarization-guide)
|
||||
- [@article@The Ultimate Guide to AI Document Summarization](https://www.documentllm.com/blog/ai-document-summarization-guide)
|
||||
@@ -7,4 +7,4 @@ Visit the following resources to learn more:
|
||||
- [@article@What Temperature Means in Natural Language Processing and AI](https://thenewstack.io/what-temperature-means-in-natural-language-processing-and-ai/)
|
||||
- [@article@LLM Temperature: How It Works and When You Should Use It](https://www.vellum.ai/llm-parameters/temperature)
|
||||
- [@article@What is LLM Temperature? - IBM](https://www.ibm.com/think/topics/llm-temperature)
|
||||
- [@article@How Temperature Settings Transform Your AI Agent's Responses](https://docsbot.ai/article/how-temperature-settings-transform-your-ai-agents-responses)
|
||||
- [@article@How Temperature Settings Transform Your AI Agent's Responses](https://docsbot.ai/article/how-temperature-settings-transform-your-ai-agents-responses)
|
||||
@@ -5,4 +5,4 @@ Tokenization is the step where raw text is broken into small pieces called token
|
||||
Visit the following resources to learn more:
|
||||
|
||||
- [@article@Explaining Tokens — the Language and Currency of AI](https://blogs.nvidia.com/blog/ai-tokens-explained/)
|
||||
- [@article@What is Tokenization? Types, Use Cases, Implementation](https://www.datacamp.com/blog/what-is-tokenization)
|
||||
- [@article@What is Tokenization? Types, Use Cases, Implementation](https://www.datacamp.com/blog/what-is-tokenization)
|
||||
@@ -5,4 +5,4 @@ A tool is any skill or function that an AI agent can call to get a job done. It
|
||||
Visit the following resources to learn more:
|
||||
|
||||
- [@article@Understanding the Agent Function in AI: Key Roles and Responsibilities](https://pingax.com/ai/agent/function/understanding-the-agent-function-in-ai-key-roles-and-responsibilities/)
|
||||
- [@article@What is an AI Tool?](https://www.synthesia.io/glossary/ai-tool)
|
||||
- [@article@What is an AI Tool?](https://www.synthesia.io/glossary/ai-tool)
|
||||
@@ -6,4 +6,4 @@ Visit the following resources to learn more:
|
||||
|
||||
- [@article@AI Sandbox | Harvard University Information Technology](https://www.huit.harvard.edu/ai-sandbox)
|
||||
- [@article@How to Set Up AI Sandboxes to Maximize Adoption](https://medium.com/@emilholmegaard/how-to-set-up-ai-sandboxes-to-maximize-adoption-without-compromising-ethics-and-values-637c70626130)
|
||||
- [@article@Sandboxes for AI - The Datasphere Initiative](https://www.thedatasphere.org/datasphere-publish/sandboxes-for-ai/)
|
||||
- [@article@Sandboxes for AI - The Datasphere Initiative](https://www.thedatasphere.org/datasphere-publish/sandboxes-for-ai/)
|
||||
@@ -6,4 +6,4 @@ Visit the following resources to learn more:
|
||||
|
||||
- [@article@Nucleus Sampling](https://nn.labml.ai/sampling/nucleus.html)
|
||||
- [@article@Sampling Techniques in Large Language Models (LLMs)](https://medium.com/@shashankag14/understanding-sampling-techniques-in-large-language-models-llms-dfc28b93f518)
|
||||
- [@article@Temperature, top_p and top_k for chatbot responses](https://community.openai.com/t/temperature-top-p-and-top-k-for-chatbot-responses/295542)
|
||||
- [@article@Temperature, top_p and top_k for chatbot responses](https://community.openai.com/t/temperature-top-p-and-top-k-for-chatbot-responses/295542)
|
||||
@@ -5,4 +5,4 @@ Transformer models are a type of neural network that read input data—like word
|
||||
Visit the following resources to learn more:
|
||||
|
||||
- [@article@Exploring Open Source AI Models: LLMs and Transformer Architectures](https://llmmodels.org/blog/exploring-open-source-ai-models-llms-and-transformer-architectures/)
|
||||
- [@article@How Transformer LLMs Work](https://www.deeplearning.ai/short-courses/how-transformer-llms-work/)
|
||||
- [@article@How Transformer LLMs Work](https://www.deeplearning.ai/short-courses/how-transformer-llms-work/)
|
||||
@@ -6,4 +6,4 @@ Visit the following resources to learn more:
|
||||
|
||||
- [@article@Tree of Thoughts (ToT) | Prompt Engineering Guide](https://www.promptingguide.ai/techniques/tot)
|
||||
- [@article@What is tree-of-thoughts? - IBM](https://www.ibm.com/think/topics/tree-of-thoughts)
|
||||
- [@article@The Revolutionary Approach of Tree-of-Thought Prompting in AI](https://medium.com/@WeavePlatform/the-revolutionary-approach-of-tree-of-thought-prompting-in-ai-eb7c0872247b)
|
||||
- [@article@The Revolutionary Approach of Tree-of-Thought Prompting in AI](https://medium.com/@WeavePlatform/the-revolutionary-approach-of-tree-of-thought-prompting-in-ai-eb7c0872247b)
|
||||
@@ -7,4 +7,4 @@ Visit the following resources to learn more:
|
||||
- [@article@What Is RAG in AI and How to Use It?](https://www.v7labs.com/blog/what-is-rag)
|
||||
- [@article@An Introduction to RAG and Simple & Complex RAG](https://medium.com/enterprise-rag/an-introduction-to-rag-and-simple-complex-rag-9c3aa9bd017b)
|
||||
- [@video@Learn RAG From Scratch](https://www.youtube.com/watch?v=sVcwVQRHIc8)
|
||||
- [@video@What is Retrieval-Augmented Generation (RAG)?](https://www.youtube.com/watch?v=T-D1OfcDW1M)
|
||||
- [@video@What is Retrieval-Augmented Generation (RAG)?](https://www.youtube.com/watch?v=T-D1OfcDW1M)
|
||||
@@ -5,4 +5,4 @@ User profile storage is the part of an AI agent’s memory that holds stable fac
|
||||
Visit the following resources to learn more:
|
||||
|
||||
- [@article@Storage Technology Explained: AI and Data Storage](https://www.computerweekly.com/feature/Storage-technology-explained-AI-and-the-data-storage-it-needs)
|
||||
- [@partner@The Architect's Guide to Storage for AI - The New Stack](https://thenewstack.io/the-architects-guide-to-storage-for-ai/)
|
||||
- [@article@The Architect's Guide to Storage for AI - The New Stack](https://thenewstack.io/the-architects-guide-to-storage-for-ai/)
|
||||
@@ -5,4 +5,4 @@ Tools are extra skills or resources that an AI agent can call on to finish a job
|
||||
Visit the following resources to learn more:
|
||||
|
||||
- [@article@Compare 50+ AI Agent Tools in 2025 - AIMultiple](https://research.aimultiple.com/ai-agent-tools/)
|
||||
- [@article@AI Agents Explained in Simple Terms for Beginners](https://www.geeky-gadgets.com/ai-agents-explained-for-beginners/)
|
||||
- [@article@AI Agents Explained in Simple Terms for Beginners](https://www.geeky-gadgets.com/ai-agents-explained-for-beginners/)
|
||||
@@ -5,7 +5,7 @@ Agent memory is the part of an AI agent that keeps track of what has already hap
|
||||
Visit the following resources to learn more:
|
||||
|
||||
- [@article@Agentic Memory for LLM Agents](https://arxiv.org/abs/2502.12110)
|
||||
- [@article@Memory Management in AI Agents](https://python.langchain.com/docs/how_to/chatbots_memory/)
|
||||
- [@article@Storing and Retrieving Knowledge for Agents](https://www.pinecone.io/learn/langchain-retrieval-augmentation/)
|
||||
- [@article@Memory Management in AI Agents](https://python.langchain.com/docs/how_to/chatbots_memory/)
|
||||
- [@article@Storing and Retrieving Knowledge for Agents](https://www.pinecone.io/learn/langchain-retrieval-augmentation/)
|
||||
- [@article@Short-Term vs Long-Term Memory in AI Agents](https://adasci.org/short-term-vs-long-term-memory-in-ai-agents/)
|
||||
- [@video@Building Brain-Like Memory for AI Agents](https://www.youtube.com/watch?v=VKPngyO0iKg)
|
||||
- [@video@Building Brain-Like Memory for AI Agents](https://www.youtube.com/watch?v=VKPngyO0iKg)
|
||||
@@ -6,4 +6,4 @@ Visit the following resources to learn more:
|
||||
|
||||
- [@roadmap@Visit Dedicated Prompt Engineering Roadmap](https://roadmap.sh/prompt-engineering)
|
||||
- [@article@What is Prompt Engineering? - AI Prompt Engineering Explained - AWS](https://aws.amazon.com/what-is/prompt-engineering/)
|
||||
- [@article@What is Prompt Engineering? A Detailed Guide For 2025](https://www.datacamp.com/blog/what-is-prompt-engineering-the-future-of-ai-communication)
|
||||
- [@article@What is Prompt Engineering? A Detailed Guide For 2025](https://www.datacamp.com/blog/what-is-prompt-engineering-the-future-of-ai-communication)
|
||||
@@ -1,9 +1,10 @@
|
||||
# Agents Usecases
|
||||
# Agents Use Cases
|
||||
|
||||
AI Agents have a variety of usecases ranging from customer support, workflow automation, cybersecurity, finance, marketing and sales, and more.
|
||||
AI Agents have a variety of use cases ranging from customer support, workflow automation, cybersecurity, finance, marketing, and sales, and more.
|
||||
|
||||
Visit the following resources to learn more:
|
||||
|
||||
- [@article@Top 15 Use Cases Of AI Agents In Business](https://www.ampcome.com/post/15-use-cases-of-ai-agents-in-business)
|
||||
- [@article@A Brief Guide on AI Agents: Benefits and Use Cases](https://www.codica.com/blog/brief-guide-on-ai-agents/)
|
||||
- [@article@How to Build Effective AI Agents to Process Millions of Requests](https://towardsdatascience.com/how-to-build-effective-ai-agents-to-process-millions-of-requests/?utm_source=roadmap&utm_medium=Referral&utm_campaign=TDS+roadmap+integration)
|
||||
- [@video@The Complete Guide to Building AI Agents for Beginners](https://youtu.be/MOyl58VF2ak?si=-QjRD_5y3iViprJX)
|
||||
@@ -6,4 +6,5 @@ Visit the following resources to learn more:
|
||||
|
||||
- [@article@Building an AI Agent Tutorial - LangChain](https://python.langchain.com/docs/tutorials/agents/)
|
||||
- [@article@AI Agents and Their Types](https://play.ht/blog/ai-agents-use-cases/)
|
||||
- [@article@How to Design My First AI Agent](https://towardsdatascience.com/how-to-design-my-first-ai-agent/?utm_source=roadmap&utm_medium=Referral&utm_campaign=TDS+roadmap+integration)
|
||||
- [@video@The Complete Guide to Building AI Agents for Beginners](https://youtu.be/MOyl58VF2ak?si=-QjRD_5y3iViprJX)
|
||||
@@ -1,9 +0,0 @@
|
||||
# AI Agents
|
||||
|
||||
In AI engineering, "agents" refer to autonomous systems or components that can perceive their environment, make decisions, and take actions to achieve specific goals. Agents often interact with external systems, users, or other agents to carry out complex tasks. They can vary in complexity, from simple rule-based bots to sophisticated AI-powered agents that leverage machine learning models, natural language processing, and reinforcement learning.
|
||||
|
||||
Visit the following resources to learn more:
|
||||
|
||||
- [@article@Building an AI Agent Tutorial - LangChain](https://python.langchain.com/docs/tutorials/agents/)
|
||||
- [@article@AI agents and their types](https://play.ht/blog/ai-agents-use-cases/)
|
||||
- [@video@The Complete Guide to Building AI Agents for Beginners](https://youtu.be/MOyl58VF2ak?si=-QjRD_5y3iViprJX)
|
||||
@@ -1,14 +0,0 @@
|
||||
# AI Code Editors
|
||||
|
||||
AI code editors are development tools that leverage artificial intelligence to assist software developers in writing, debugging, and optimizing code. These editors go beyond traditional syntax highlighting and code completion by incorporating machine learning models, natural language processing, and data analysis to understand code context, generate suggestions, and even automate portions of the software development process.
|
||||
|
||||
Visit the following resources to learn more:
|
||||
|
||||
- [@official@Gemini CLI - Google's AI coding assistant for command line](https://github.com/google-gemini/gemini-cli)
|
||||
- [@official@OpenAI Codex - AI code generation via API and CLI](https://openai.com/codex/)
|
||||
- [@article@Cursor - The AI Code Editor](https://www.cursor.com/)
|
||||
- [@article@PearAI - The Open Source, Extendable AI Code Editor](https://trypear.ai/)
|
||||
- [@article@Bolt - Prompt, run, edit, and deploy full-stack web apps](https://bolt.new)
|
||||
- [@article@Replit - Build Apps using AI](https://replit.com/ai)
|
||||
- [@article@v0 - Build Apps with AI](https://v0.dev)
|
||||
- [@article@Claude Code - AI coding assistant in terminal](https://www.claude.com/product/claude-code)
|
||||
@@ -6,4 +6,5 @@ Visit the following resources to learn more:
|
||||
|
||||
- [@article@What does an AI Engineer do?](https://www.codecademy.com/resources/blog/what-does-an-ai-engineer-do/)
|
||||
- [@article@What is an ML Engineer?](https://www.coursera.org/articles/what-is-machine-learning-engineer)
|
||||
- [@article@Machine Learning vs AI Engineer: What Are the Differences?](https://towardsdatascience.com/machine-learning-vs-ai-engineer-no-confusing-jargon/?utm_source=roadmap&utm_medium=Referral&utm_campaign=TDS+roadmap+integration)
|
||||
- [@video@AI vs ML](https://www.youtube.com/watch?v=4RixMPF4xis)
|
||||
@@ -5,4 +5,5 @@ AI safety and ethics involve establishing guidelines and best practices to ensur
|
||||
Visit the following resources to learn more:
|
||||
|
||||
- [@article@Understanding Artificial Intelligence Ethics and Safety](https://www.turing.ac.uk/news/publications/understanding-artificial-intelligence-ethics-and-safety)
|
||||
- [@article@The Hidden Security Risks of LLMs](https://towardsdatascience.com/the-hidden-security-risks-of-llms/?utm_source=roadmap&utm_medium=Referral&utm_campaign=TDS+roadmap+integration)
|
||||
- [@video@What is AI Ethics?](https://www.youtube.com/watch?v=aGwYtUzMQUk)
|
||||
@@ -1,8 +1,9 @@
|
||||
# AI vs AGI
|
||||
|
||||
AI (Artificial Intelligence) refers to systems designed to perform specific tasks by mimicking aspects of human intelligence, such as pattern recognition, decision-making, and language processing. These systems, known as "narrow AI," are highly specialized, excelling in defined areas like image classification or recommendation algorithms but lacking broader cognitive abilities. In contrast, AGI (Artificial General Intelligence) represents a theoretical form of intelligence that possesses the ability to understand, learn, and apply knowledge across a wide range of tasks at a human-like level. AGI would have the capacity for abstract thinking, reasoning, and adaptability similar to human cognitive abilities, making it far more versatile than today’s AI systems. While current AI technology is powerful, AGI remains a distant goal and presents complex challenges in safety, ethics, and technical feasibility.
|
||||
AI (Artificial Intelligence) refers to systems designed to perform specific tasks by mimicking aspects of human intelligence, such as pattern recognition, decision-making, and language processing. These systems, known as "narrow AI," are highly specialized, excelling in specific areas such as image classification or recommender algorithms but lacking broader cognitive abilities. In contrast, AGI (Artificial General Intelligence) is a theoretical form of intelligence that can understand, learn, and apply knowledge across a wide range of tasks at a human-like level. AGI would have the capacity for abstract thinking, reasoning, and adaptability similar to human cognitive abilities, making it far more versatile than today’s AI systems. While current AI technology is powerful, AGI remains a distant goal and presents complex challenges in safety, ethics, and technical feasibility.
|
||||
|
||||
Visit the following resources to learn more:
|
||||
|
||||
- [@article@What is AGI?](https://aws.amazon.com/what-is/artificial-general-intelligence/)
|
||||
- [@article@The crucial difference between AI and AGI](https://www.forbes.com/sites/bernardmarr/2024/05/20/the-crucial-difference-between-ai-and-agi/)
|
||||
- [@article@The crucial difference between AI and AGI](https://www.forbes.com/sites/bernardmarr/2024/05/20/the-crucial-difference-between-ai-and-agi/)
|
||||
- [@article@Stop Worrying about AGI: The Immediate Danger is Reduced General Intelligence (RGI)](https://towardsdatascience.com/stop-worrying-about-agi-the-immediate-danger-is-reduced-general-intelligence-rgi/?utm_source=roadmap&utm_medium=Referral&utm_campaign=TDS+roadmap+integration)
|
||||
@@ -4,4 +4,5 @@ Anomaly detection with embeddings works by transforming data, such as text, imag
|
||||
|
||||
Visit the following resources to learn more:
|
||||
|
||||
- [@article@Anomaly in Embeddings](https://ai.google.dev/gemini-api/tutorials/anomaly_detection)
|
||||
- [@article@Anomaly in Embeddings](https://ai.google.dev/gemini-api/tutorials/anomaly_detection)
|
||||
- [@article@Boosting Your Anomaly Detection With LLMs](https://towardsdatascience.com/boosting-your-anomaly-detection-with-llms/?utm_source=roadmap&utm_medium=Referral&utm_campaign=TDS+roadmap+integration)
|
||||
@@ -1,8 +1,10 @@
|
||||
# Anthropic's Claude
|
||||
|
||||
Anthropic's Claude is an AI language model designed to facilitate safe and scalable AI systems. Named after Claude Shannon, the father of information theory, Claude focuses on responsible AI use, emphasizing safety, alignment with human intentions, and minimizing harmful outputs. Built as a competitor to models like OpenAI's GPT, Claude is designed to handle natural language tasks such as generating text, answering questions, and supporting conversations, with a strong focus on aligning AI behavior with user goals while maintaining transparency and avoiding harmful biases.
|
||||
Anthropic's Claude is an AI language model designed to facilitate safe and scalable AI systems. Named after Claude Shannon, the father of information theory, Claude focuses on responsible AI use, emphasizing safety, alignment with human intentions, and minimizing harmful outputs.
|
||||
|
||||
Visit the following resources to learn more:
|
||||
|
||||
- [@course@Claude 101](https://anthropic.skilljar.com/claude-101)
|
||||
- [@official@Claude](https://claude.ai)
|
||||
- [@video@How To Use Claude Pro For Beginners](https://www.youtube.com/watch?v=J3X_JWQkvo8)
|
||||
- [@video@How To Use Claude Pro For Beginners](https://www.youtube.com/watch?v=J3X_JWQkvo8)
|
||||
- [@video@Claude FULL COURSE 1 HOUR (Build & Automate Anything)](https://www.youtube.com/watch?v=KrKhfm2Xuho)
|
||||
@@ -1,8 +0,0 @@
|
||||
# AWS SageMaker
|
||||
|
||||
AWS SageMaker is a fully managed machine learning service from Amazon Web Services that enables developers and data scientists to build, train, and deploy machine learning models at scale. It provides an integrated development environment, simplifying the entire ML workflow, from data preparation and model development to training, tuning, and inference. SageMaker supports popular ML frameworks like TensorFlow, PyTorch, and Scikit-learn, and offers features like automated model tuning, model monitoring, and one-click deployment. It's designed to make machine learning more accessible and scalable, even for large enterprise applications.
|
||||
|
||||
Visit the following resources to learn more:
|
||||
|
||||
- [@official@AWS SageMaker](https://aws.amazon.com/sagemaker/)
|
||||
- [@video@Introduction to Amazon SageMaker](https://www.youtube.com/watch?v=Qv_Tr_BCFCQ)
|
||||
@@ -1,8 +0,0 @@
|
||||
# Azure AI
|
||||
|
||||
Azure AI is a suite of AI services and tools provided by Microsoft through its Azure cloud platform. It includes pre-built AI models for natural language processing, computer vision, and speech, as well as tools for developing custom machine learning models using services like Azure Machine Learning. Azure AI enables developers to integrate AI capabilities into applications with APIs for tasks like sentiment analysis, image recognition, and language translation. It also supports responsible AI development with features for model monitoring, explainability, and fairness, aiming to make AI accessible, scalable, and secure across industries.
|
||||
|
||||
Visit the following resources to learn more:
|
||||
|
||||
- [@official@Azure AI](https://azure.microsoft.com/en-gb/solutions/ai)
|
||||
- [@video@How to Choose the Right Models for Your Apps](https://www.youtube.com/watch?v=sx_uGylH8eg)
|
||||
@@ -1,8 +0,0 @@
|
||||
# Benefits of Pre-trained Models
|
||||
|
||||
Pre-trained models offer several benefits in AI engineering by significantly reducing development time and computational resources because these models are trained on large datasets and can be fine-tuned for specific tasks, which enables quicker deployment and better performance with less data. They help overcome the challenge of needing vast amounts of labeled data and computational power for training from scratch. Additionally, pre-trained models often demonstrate improved accuracy, generalization, and robustness across different tasks, making them ideal for applications in natural language processing, computer vision, and other AI domains.
|
||||
|
||||
Visit the following resources to learn more:
|
||||
|
||||
- [@article@Why Pre-Trained Models Matter For Machine Learning](https://www.ahead.com/resources/why-pre-trained-models-matter-for-machine-learning/)
|
||||
- [@article@Why You Should Use Pre-Trained Models Versus Building Your Own](https://cohere.com/blog/pre-trained-vs-in-house-nlp-models)
|
||||
@@ -1,8 +0,0 @@
|
||||
# Capabilities / Context Length
|
||||
|
||||
A key aspect of the OpenAI models is their context length, which refers to the amount of input text the model can process at once. Earlier models like GPT-3 had a context length of up to 4,096 tokens (words or word pieces), while more recent models like GPT-4 can handle significantly larger context lengths, some supporting up to 32,768 tokens. This extended context length enables the models to handle more complex tasks, such as maintaining long conversations or processing lengthy documents, which enhances their utility in real-world applications like legal document analysis or code generation.
|
||||
|
||||
Visit the following resources to learn more:
|
||||
|
||||
- [@official@Managing Context](https://platform.openai.com/docs/guides/conversation-state?api-mode=responses#managing-context-for-text-generation)
|
||||
- [@official@Capabilities](https://platform.openai.com/docs/guides/text-generation)
|
||||
@@ -1,8 +0,0 @@
|
||||
# Chat Completions API
|
||||
|
||||
The OpenAI Chat Completions API is a powerful interface that allows developers to integrate conversational AI into applications by utilizing models like GPT-3.5 and GPT-4. It is designed to manage multi-turn conversations, keeping context across interactions, making it ideal for chatbots, virtual assistants, and interactive AI systems. With the API, users can structure conversations by providing messages in a specific format, where each message has a role (e.g., "system" to guide the model, "user" for input, and "assistant" for responses).
|
||||
|
||||
Visit the following resources to learn more:
|
||||
|
||||
- [@official@Create Chat Completions](https://platform.openai.com/docs/api-reference/chat/create)
|
||||
- [@article@Getting Start with Chat Completions API](https://medium.com/the-ai-archives/getting-started-with-openais-chat-completions-api-in-2024-462aae00bf0a)
|
||||
@@ -0,0 +1,8 @@
|
||||
# Choosing the Right Model
|
||||
|
||||
Selecting the appropriate large language model (LLM) involves considering factors like task complexity, budget, and desired performance. Simpler tasks may only require smaller, more efficient models, while more complex problems benefit from larger models with greater capacity. Cost is also a crucial factor, as larger models generally require more computational resources. You'll also need to assess the model's accuracy, speed, and ability to generalize to new, unseen data. Consider fine-tuning existing models on your specific dataset if you need specialized performance.
|
||||
|
||||
Visit the following resources to learn more:
|
||||
|
||||
- [@article@Choosing the right model](https://bentoml.com/llm/getting-started/choosing-the-right-model)
|
||||
- [@article@Beyond vibes: How to properly select the right LLM for the right task](https://aws.amazon.com/blogs/machine-learning/beyond-vibes-how-to-properly-select-the-right-llm-for-the-right-task/)
|
||||
@@ -0,0 +1,10 @@
|
||||
# Claude Agent SDK
|
||||
|
||||
The Claude Agent SDK provides tools and libraries to build autonomous AI agents powered by Anthropic's Claude models. It simplifies the development process by offering pre-built components and abstractions for tasks such as planning, tool usage, memory management, and human interaction. This allows developers to focus on defining the agent's behavior and capabilities rather than the underlying infrastructure.
|
||||
|
||||
Visit the following resources to learn more:
|
||||
|
||||
- [@official@Agent SDK overview](https://platform.claude.com/docs/en/agent-sdk/overview)
|
||||
- [@article@Getting Started with the Claude Agent SDK](https://www.kdnuggets.com/getting-started-with-the-claude-agent-sdk)
|
||||
- [@video@Building Custom AI Agents Just Got EASIER - Claude Agent SDK](https://www.youtube.com/watch?v=NsROagHaKxA)
|
||||
- [@video@Claude Agents SDK BEATS all Agent Framework! (Beginners Guide)](https://www.youtube.com/watch?v=i6N8oQQ0tUE)
|
||||
@@ -0,0 +1,10 @@
|
||||
# Claude Code
|
||||
|
||||
Claude Code refers to the code-generation capabilities of Anthropic's Claude AI model. It's designed to assist developers by understanding natural language prompts and translating them into functional code across various programming languages. This allows developers to automate repetitive coding tasks, generate code snippets, and even create entire functions or modules based on descriptive instructions.
|
||||
|
||||
Visit the following resources to learn more:
|
||||
|
||||
- [@roadmap@Visit the Dedicated Claude Code Roadmap](https://roadmap.sh/claude-code)
|
||||
- [@course@Claude Code in Action](https://anthropic.skilljar.com/claude-code-in-action)
|
||||
- [@official@Claude Code Overview](https://code.claude.com/docs/en/overview)
|
||||
- [@video@Introducing Claude Code](https://www.youtube.com/watch?v=AJpK3YTTKZ4)
|
||||
@@ -0,0 +1,9 @@
|
||||
# Claude Messages API
|
||||
|
||||
The Claude Messages API provides a structured way to interact with the Claude AI model. It allows developers to send a series of messages to Claude, mimicking a conversation. These messages can include text, images, and even structured data, enabling you to build complex interactions and extract specific insights from the model's responses through structured inputs and outputs.
|
||||
|
||||
Visit the following resources to learn more:
|
||||
|
||||
- [@official@Messages API](https://platform.claude.com/docs/en/api/messages)
|
||||
- [@official@Using the Messages API](https://platform.claude.com/docs/en/build-with-claude/working-with-messages)
|
||||
- [@article@Claude API: How to get a key and use the API](https://zapier.com/blog/claude-api/)
|
||||
@@ -4,5 +4,6 @@ Open-source models are freely available for customization and collaboration, pro
|
||||
|
||||
Visit the following resources to learn more:
|
||||
|
||||
- [@article@OpenAI vs. Open Source LLM](https://ubiops.com/openai-vs-open-source-llm/)
|
||||
- [@article@https://hatchworks.com/blog/gen-ai/open-source-vs-closed-llms-guide/](https://hatchworks.com/blog/gen-ai/open-source-vs-closed-llms-guide/)
|
||||
- [@video@Open Source vs Closed AI: LLMs, Agents & the AI Stack Explained](https://www.youtube.com/watch?v=_QfxGZGITGw)
|
||||
- [@video@Open-Source vs Closed-Source LLMs](https://www.youtube.com/watch?v=710PDpuLwOc)
|
||||
@@ -1,10 +0,0 @@
|
||||
# Code Completion Tools
|
||||
|
||||
Code completion tools are AI-powered development assistants designed to enhance productivity by automatically suggesting code snippets, functions, and entire blocks of code as developers type. These tools, such as GitHub Copilot and Tabnine, leverage machine learning models trained on vast code repositories to predict and generate contextually relevant code. They help reduce repetitive coding tasks, minimize errors, and accelerate the development process by offering real-time, intelligent suggestions.
|
||||
|
||||
Visit the following resources to learn more:
|
||||
|
||||
- [@official@GitHub Copilot](https://github.com/features/copilot)
|
||||
- [@official@Codeium](https://codeium.com/)
|
||||
- [@official@Supermaven](https://supermaven.com/)
|
||||
- [@official@Tabnine](https://www.tabnine.com/)
|
||||
@@ -0,0 +1,8 @@
|
||||
# Codex
|
||||
|
||||
Codex is an AI model created by OpenAI that translates natural language into code. It's designed to understand and generate code in a variety of programming languages, including Python, JavaScript, and more. Codex is particularly adept at interpreting comments and instructions to produce functional code snippets, making it a powerful tool for automating and accelerating the software development process.
|
||||
|
||||
Visit the following resources to learn more:
|
||||
|
||||
- [@official@Codex - Official Webste](https://chatgpt.com/codex)
|
||||
- [@video@Getting started with Codex](https://www.youtube.com/watch?v=px7XlbYgk7I)
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user