From 8661342bb1ae3b75f0268c4a6f489735fe7b277a Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Mon, 2 Mar 2026 18:51:33 +0100 Subject: [PATCH] chore: sync content to repo (#9663) Co-authored-by: kamranahmedse <4921183+kamranahmedse@users.noreply.github.com> --- .../content/agents-usecases@778HsQzTuJ_3c9OSn5DmH.md | 4 ++-- .../content/ai-agents@Uffu609uQbIzDl88Ddccv.md | 9 +++++++++ .../content/anthropic-claude@hy6EyKiNxk1x84J63dhez.md | 10 ++++++++++ .../choosing-the-right-model@zeWoTtAFEpVXDQzWNsha4.md | 8 ++++++++ .../content/claude-agent-sdk@xXLyuUNrKEc32XLQxMjgT.md | 10 ++++++++++ .../content/claude-code@wr5ddjutC-fX_ixysTHaT.md | 3 +++ .../claude-messages-api@nxwMVla0-PNG8nlocKK5v.md | 9 +++++++++ ...osed-vs-open-source-models@RBwGsq9DngUsl8PrrCbqx.md | 9 +++++++++ .../ai-engineer/content/codex@XY2l96sry3WyLzzo3KUeU.md | 3 +++ .../content/cohere@a7qsvoauFe5u953I699ps.md | 3 ++- .../content/cohere@y0qD5Kb4Pf-ymIwW-tvhX.md | 8 ++++++++ .../content/constrains@PT3uDiUjiKhO8laOkCmgP.md | 7 +++++++ .../content-moderation-apis@ljZLa3yjQpegiZWwtnn_q.md | 9 +++++++++ .../context-compaction@9XCxilAQ7FRet7lHQr1gE.md | 8 ++++++++ .../context-engineering@kCiHNaZ9CgnS9uksIQ_SY.md | 9 +++++++++ .../content/context-isolation@9JwWIK0Z2MK8-6EQQJsCO.md | 8 ++++++++ .../content/context@9oUpeEnaMWctQLTobbmY7.md | 3 +++ .../content/context@vvpYkmycH0_W030E-L12f.md | 10 ++++++++++ .../ai-engineer/content/cot@nyBgEHvUhwF-NANMwkRJW.md | 10 ++++++++++ .../content/cursor@MWhoqhNnBaoeCdN_8i15k.md | 3 +++ .../content/deepseek@UGYYh2W1XnnbgYNY8L8Hb.md | 9 +++++++++ .../content/embedding-models@fr0UOXlVVctkk1K84Z8Of.md | 8 ++++++++ .../content/embeddings@XyEp6jnBSpCxMGwALnYfT.md | 2 +- .../content/external-memory@KWjD4xEPhOOYS51dvRLd2.md | 8 ++++++++ .../content/few-shot@DZPM9zjCbYYWBPLmQImxQ.md | 10 ++++++++++ .../content/fine-tuning@zTvsCNS3ucsZmvy1tHyeI.md | 9 +++++++++ .../content/function-calling@wFfjHkGLrcCyLyXV4BiFM.md | 10 ++++++++++ .../content/gemini-embedding@4GArjDYipit4SLqKZAWDf.md | 9 +++++++++ .../content/gemini@hzeEo8COf2l07iu5EdlFo.md | 3 +++ .../content/gemma2@MNtaY1_kOJHeoWuM-abb4.md | 8 ++++++++ .../content/google-adk@mbp2NoL-VZ5hZIIblNBXt.md | 6 +++--- .../content/google-gemini-api@TsG_I7FL-cOCSw8gvZH3r.md | 9 +++++++++ .../content/google-gemini@oe8E6ZIQWuYvHVbYJHUc1.md | 10 ++++++++++ .../content/how-llms-work@zdeuA4GbdBl2DwKgiOA4G.md | 7 +++++++ .../content/hugging-face-hub@YLOdOvLXa5Fa7_mmuvKEi.md | 6 +++--- ...hugging-face-inference-sdk@3kRTzlLNBnXdTsAEXVu_M.md | 8 ++++++++ .../content/hugging-face@v99C5Bml2a6148LCJ9gy9.md | 2 +- .../content/inference@4NtUD9V64gkp8SFudj_ai.md | 8 ++++++++ .../content/input-format@LCHse57rXf3sl8ml1ow0T.md | 8 ++++++++ .../ai-engineer/content/jina@apVYIV4EyejPft25oAvdI.md | 8 ++++++++ .../large-language-model-llm@wf2BSyUekr1S1q6l8kyq6.md | 10 ++++++++++ .../content/lm-studio@a_3SabylVqzzOyw3tZN5f.md | 8 ++++++++ .../manual-implementation@6xaRB34_g0HGt-y1dGYXR.md | 8 ++++---- .../content/meta-llama@OkYO-aSPiuVYuLXHswBCn.md | 8 ++++++++ .../content/mistral@n-Ud2dXkqIzK37jlKItN4.md | 8 ++++++++ .../models-on-hugging-face@dLEg4IA3F5jgc44Bst9if.md | 2 +- .../content/multi-agents@kG1bkF2oY21CJOm9zhdpn.md | 9 +++++++++ .../content/nanobanana-api@6y73FLjshnqxV8BTGUeiu.md | 7 +++++++ .../open-ai-embeddings-api@l6priWeJhbdUD5tJ7uHyG.md | 2 ++ .../content/openai-agentkit@Sm0Ne5Nx72hcZCdAcC0C2.md | 9 +++++++++ .../openai-compatible-apis@vnXtUupJUlyU_uCbZBbnk.md | 8 ++++++++ .../openai-gpt-o-series@3PQVZbcr4neNMRr6CuNzS.md | 9 +++++++++ .../openai-response-api@ro3vY_sp6xMQ-hfzO-rc1.md | 9 +++++++++ .../content/openrouter@1GlpMjmdAWor0X_BnISGg.md | 9 +++++++++ .../content/prompt-caching@bqqY0gsZkBpcHMZw1hcZ5.md | 8 ++++++++ .../prompt-engineering@VjXmSCdzi2ACv-W85Sy9D.md | 9 +++++++++ ...mpt-vs-context-engineering@ozrR8IvjNFbHd44kZrExX.md | 10 ++++++++++ .../ai-engineer/content/qwen@c0RPhpD00VIUgF4HJgN2T.md | 8 ++++++++ .../rag-and-dynamic-filters@LnQ2AatMWpExUHcZhDIPd.md | 8 ++++++++ .../ai-engineer/content/rag@IX1BJWGwGmB4L063g0Frf.md | 8 ++++++++ .../ai-engineer/content/react@Waox7xR_yUeSnOtQFzU4c.md | 8 ++++++++ .../repetition-penalties@0_pa739kMPWHfuSQV-VO7.md | 8 ++++++++ .../content/replit@Ubk4GN0Z4XlDJ3EbRXdxg.md | 3 +++ .../content/role--behavior@N3TzWYxU0jgv1l99Ts58n.md | 3 +++ .../sampling-parameters@LbB2PeytxRSuU07Bk0KlJ.md | 3 +++ .../self-hosted-models@_qqITQ8O0Q0RWUeZsUXnY.md | 8 ++++++++ .../streaming-responses@MUDBYjR7uCUZQ-kQxi2K_.md | 9 +++++++++ .../content/structured-output@zqhmLzHsmDlrTFVHzhq6-.md | 9 +++++++++ .../content/system-prompting@S46Vaq8hYq6Ee1Id_-fSQ.md | 8 ++++++++ .../content/temperature@_bPTciEA1GT1JwfXim19z.md | 10 ++++++++++ .../content/tokens@2WbVpRLqwi3Oeqk1JPui4.md | 9 +++++++++ .../tools--function-calling@eOqCBgBTKM8CmY3nsWjre.md | 10 ++++++++++ .../ai-engineer/content/top-k@qzvp6YxWDiGakA2mtspfh.md | 8 ++++++++ .../ai-engineer/content/top-p@FjV3oD7G2Ocq5HhUC17iH.md | 8 ++++++++ .../content/type-of-models@2X0NDcq2ojBJ0RxY_U6bl.md | 3 +++ .../using-sdks-directly@WZVW8FQu6LyspSKm1C_sl.md | 2 +- .../content/vector-dbs@dzPKW_fn82lY1OOVrggk3.md | 8 ++++++++ .../vertex-ai-agent-builder@AxzTGDCC2Ftp4G66U4Uqr.md | 9 +++++++++ .../what-are-embeddings@--ig0Ume_BnXb9K2U7HJN.md | 5 +++-- .../content/windsurf@Xsl8mx6J182TxPPtNP471.md | 3 +++ .../content/zero-shot@15XOFdVp0IC-kLYPXUJWh.md | 10 ++++++++++ 81 files changed, 573 insertions(+), 19 deletions(-) create mode 100644 src/data/roadmaps/ai-engineer/content/ai-agents@Uffu609uQbIzDl88Ddccv.md create mode 100644 src/data/roadmaps/ai-engineer/content/anthropic-claude@hy6EyKiNxk1x84J63dhez.md create mode 100644 src/data/roadmaps/ai-engineer/content/choosing-the-right-model@zeWoTtAFEpVXDQzWNsha4.md create mode 100644 src/data/roadmaps/ai-engineer/content/claude-agent-sdk@xXLyuUNrKEc32XLQxMjgT.md create mode 100644 src/data/roadmaps/ai-engineer/content/claude-code@wr5ddjutC-fX_ixysTHaT.md create mode 100644 src/data/roadmaps/ai-engineer/content/claude-messages-api@nxwMVla0-PNG8nlocKK5v.md create mode 100644 src/data/roadmaps/ai-engineer/content/closed-vs-open-source-models@RBwGsq9DngUsl8PrrCbqx.md create mode 100644 src/data/roadmaps/ai-engineer/content/codex@XY2l96sry3WyLzzo3KUeU.md create mode 100644 src/data/roadmaps/ai-engineer/content/cohere@y0qD5Kb4Pf-ymIwW-tvhX.md create mode 100644 src/data/roadmaps/ai-engineer/content/constrains@PT3uDiUjiKhO8laOkCmgP.md create mode 100644 src/data/roadmaps/ai-engineer/content/content-moderation-apis@ljZLa3yjQpegiZWwtnn_q.md create mode 100644 src/data/roadmaps/ai-engineer/content/context-compaction@9XCxilAQ7FRet7lHQr1gE.md create mode 100644 src/data/roadmaps/ai-engineer/content/context-engineering@kCiHNaZ9CgnS9uksIQ_SY.md create mode 100644 src/data/roadmaps/ai-engineer/content/context-isolation@9JwWIK0Z2MK8-6EQQJsCO.md create mode 100644 src/data/roadmaps/ai-engineer/content/context@9oUpeEnaMWctQLTobbmY7.md create mode 100644 src/data/roadmaps/ai-engineer/content/context@vvpYkmycH0_W030E-L12f.md create mode 100644 src/data/roadmaps/ai-engineer/content/cot@nyBgEHvUhwF-NANMwkRJW.md create mode 100644 src/data/roadmaps/ai-engineer/content/cursor@MWhoqhNnBaoeCdN_8i15k.md create mode 100644 src/data/roadmaps/ai-engineer/content/deepseek@UGYYh2W1XnnbgYNY8L8Hb.md create mode 100644 src/data/roadmaps/ai-engineer/content/embedding-models@fr0UOXlVVctkk1K84Z8Of.md create mode 100644 src/data/roadmaps/ai-engineer/content/external-memory@KWjD4xEPhOOYS51dvRLd2.md create mode 100644 src/data/roadmaps/ai-engineer/content/few-shot@DZPM9zjCbYYWBPLmQImxQ.md create mode 100644 src/data/roadmaps/ai-engineer/content/fine-tuning@zTvsCNS3ucsZmvy1tHyeI.md create mode 100644 src/data/roadmaps/ai-engineer/content/function-calling@wFfjHkGLrcCyLyXV4BiFM.md create mode 100644 src/data/roadmaps/ai-engineer/content/gemini-embedding@4GArjDYipit4SLqKZAWDf.md create mode 100644 src/data/roadmaps/ai-engineer/content/gemini@hzeEo8COf2l07iu5EdlFo.md create mode 100644 src/data/roadmaps/ai-engineer/content/gemma2@MNtaY1_kOJHeoWuM-abb4.md create mode 100644 src/data/roadmaps/ai-engineer/content/google-gemini-api@TsG_I7FL-cOCSw8gvZH3r.md create mode 100644 src/data/roadmaps/ai-engineer/content/google-gemini@oe8E6ZIQWuYvHVbYJHUc1.md create mode 100644 src/data/roadmaps/ai-engineer/content/how-llms-work@zdeuA4GbdBl2DwKgiOA4G.md create mode 100644 src/data/roadmaps/ai-engineer/content/hugging-face-inference-sdk@3kRTzlLNBnXdTsAEXVu_M.md create mode 100644 src/data/roadmaps/ai-engineer/content/inference@4NtUD9V64gkp8SFudj_ai.md create mode 100644 src/data/roadmaps/ai-engineer/content/input-format@LCHse57rXf3sl8ml1ow0T.md create mode 100644 src/data/roadmaps/ai-engineer/content/jina@apVYIV4EyejPft25oAvdI.md create mode 100644 src/data/roadmaps/ai-engineer/content/large-language-model-llm@wf2BSyUekr1S1q6l8kyq6.md create mode 100644 src/data/roadmaps/ai-engineer/content/lm-studio@a_3SabylVqzzOyw3tZN5f.md create mode 100644 src/data/roadmaps/ai-engineer/content/meta-llama@OkYO-aSPiuVYuLXHswBCn.md create mode 100644 src/data/roadmaps/ai-engineer/content/mistral@n-Ud2dXkqIzK37jlKItN4.md create mode 100644 src/data/roadmaps/ai-engineer/content/multi-agents@kG1bkF2oY21CJOm9zhdpn.md create mode 100644 src/data/roadmaps/ai-engineer/content/nanobanana-api@6y73FLjshnqxV8BTGUeiu.md create mode 100644 src/data/roadmaps/ai-engineer/content/openai-agentkit@Sm0Ne5Nx72hcZCdAcC0C2.md create mode 100644 src/data/roadmaps/ai-engineer/content/openai-compatible-apis@vnXtUupJUlyU_uCbZBbnk.md create mode 100644 src/data/roadmaps/ai-engineer/content/openai-gpt-o-series@3PQVZbcr4neNMRr6CuNzS.md create mode 100644 src/data/roadmaps/ai-engineer/content/openai-response-api@ro3vY_sp6xMQ-hfzO-rc1.md create mode 100644 src/data/roadmaps/ai-engineer/content/openrouter@1GlpMjmdAWor0X_BnISGg.md create mode 100644 src/data/roadmaps/ai-engineer/content/prompt-caching@bqqY0gsZkBpcHMZw1hcZ5.md create mode 100644 src/data/roadmaps/ai-engineer/content/prompt-engineering@VjXmSCdzi2ACv-W85Sy9D.md create mode 100644 src/data/roadmaps/ai-engineer/content/prompt-vs-context-engineering@ozrR8IvjNFbHd44kZrExX.md create mode 100644 src/data/roadmaps/ai-engineer/content/qwen@c0RPhpD00VIUgF4HJgN2T.md create mode 100644 src/data/roadmaps/ai-engineer/content/rag-and-dynamic-filters@LnQ2AatMWpExUHcZhDIPd.md create mode 100644 src/data/roadmaps/ai-engineer/content/rag@IX1BJWGwGmB4L063g0Frf.md create mode 100644 src/data/roadmaps/ai-engineer/content/react@Waox7xR_yUeSnOtQFzU4c.md create mode 100644 src/data/roadmaps/ai-engineer/content/repetition-penalties@0_pa739kMPWHfuSQV-VO7.md create mode 100644 src/data/roadmaps/ai-engineer/content/replit@Ubk4GN0Z4XlDJ3EbRXdxg.md create mode 100644 src/data/roadmaps/ai-engineer/content/role--behavior@N3TzWYxU0jgv1l99Ts58n.md create mode 100644 src/data/roadmaps/ai-engineer/content/sampling-parameters@LbB2PeytxRSuU07Bk0KlJ.md create mode 100644 src/data/roadmaps/ai-engineer/content/self-hosted-models@_qqITQ8O0Q0RWUeZsUXnY.md create mode 100644 src/data/roadmaps/ai-engineer/content/streaming-responses@MUDBYjR7uCUZQ-kQxi2K_.md create mode 100644 src/data/roadmaps/ai-engineer/content/structured-output@zqhmLzHsmDlrTFVHzhq6-.md create mode 100644 src/data/roadmaps/ai-engineer/content/system-prompting@S46Vaq8hYq6Ee1Id_-fSQ.md create mode 100644 src/data/roadmaps/ai-engineer/content/temperature@_bPTciEA1GT1JwfXim19z.md create mode 100644 src/data/roadmaps/ai-engineer/content/tokens@2WbVpRLqwi3Oeqk1JPui4.md create mode 100644 src/data/roadmaps/ai-engineer/content/tools--function-calling@eOqCBgBTKM8CmY3nsWjre.md create mode 100644 src/data/roadmaps/ai-engineer/content/top-k@qzvp6YxWDiGakA2mtspfh.md create mode 100644 src/data/roadmaps/ai-engineer/content/top-p@FjV3oD7G2Ocq5HhUC17iH.md create mode 100644 src/data/roadmaps/ai-engineer/content/type-of-models@2X0NDcq2ojBJ0RxY_U6bl.md create mode 100644 src/data/roadmaps/ai-engineer/content/vector-dbs@dzPKW_fn82lY1OOVrggk3.md create mode 100644 src/data/roadmaps/ai-engineer/content/vertex-ai-agent-builder@AxzTGDCC2Ftp4G66U4Uqr.md create mode 100644 src/data/roadmaps/ai-engineer/content/windsurf@Xsl8mx6J182TxPPtNP471.md create mode 100644 src/data/roadmaps/ai-engineer/content/zero-shot@15XOFdVp0IC-kLYPXUJWh.md diff --git a/src/data/roadmaps/ai-engineer/content/agents-usecases@778HsQzTuJ_3c9OSn5DmH.md b/src/data/roadmaps/ai-engineer/content/agents-usecases@778HsQzTuJ_3c9OSn5DmH.md index 1b9663b93..030ae1200 100644 --- a/src/data/roadmaps/ai-engineer/content/agents-usecases@778HsQzTuJ_3c9OSn5DmH.md +++ b/src/data/roadmaps/ai-engineer/content/agents-usecases@778HsQzTuJ_3c9OSn5DmH.md @@ -1,6 +1,6 @@ -# Agents Usecases +# Agents Use Cases -AI Agents have a variety of usecases ranging from customer support, workflow automation, cybersecurity, finance, marketing and sales, and more. +AI Agents have a variety of use cases ranging from customer support, workflow automation, cybersecurity, finance, marketing, and sales, and more. Visit the following resources to learn more: diff --git a/src/data/roadmaps/ai-engineer/content/ai-agents@Uffu609uQbIzDl88Ddccv.md b/src/data/roadmaps/ai-engineer/content/ai-agents@Uffu609uQbIzDl88Ddccv.md new file mode 100644 index 000000000..81514b712 --- /dev/null +++ b/src/data/roadmaps/ai-engineer/content/ai-agents@Uffu609uQbIzDl88Ddccv.md @@ -0,0 +1,9 @@ +# AI Agents + +In AI engineering, "agents" refer to autonomous systems or components that can perceive their environment, make decisions, and take actions to achieve specific goals. Agents often interact with external systems, users, or other agents to carry out complex tasks. They can vary in complexity, from simple rule-based bots to sophisticated AI-powered agents that leverage machine learning models, natural language processing, and reinforcement learning. + +Visit the following resources to learn more: + +- [@article@Building an AI Agent Tutorial - LangChain](https://python.langchain.com/docs/tutorials/agents/) +- [@article@AI Agents and Their Types](https://www.digitalocean.com/resources/articles/types-of-ai-agents) +- [@video@The Complete Guide to Building AI Agents for Beginners](https://youtu.be/MOyl58VF2ak?si=-QjRD_5y3iViprJX) \ No newline at end of file diff --git a/src/data/roadmaps/ai-engineer/content/anthropic-claude@hy6EyKiNxk1x84J63dhez.md b/src/data/roadmaps/ai-engineer/content/anthropic-claude@hy6EyKiNxk1x84J63dhez.md new file mode 100644 index 000000000..8d9c6327d --- /dev/null +++ b/src/data/roadmaps/ai-engineer/content/anthropic-claude@hy6EyKiNxk1x84J63dhez.md @@ -0,0 +1,10 @@ +# Anthropic's Claude + +Anthropic's Claude is an AI language model designed to facilitate safe and scalable AI systems. Named after Claude Shannon, the father of information theory, Claude focuses on responsible AI use, emphasizing safety, alignment with human intentions, and minimizing harmful outputs. + +Visit the following resources to learn more: + +- [@official@Claude](https://claude.ai) +- [@course@Claude 101](https://anthropic.skilljar.com/claude-101) +- [@video@How To Use Claude Pro For Beginners](https://www.youtube.com/watch?v=J3X_JWQkvo8) +- [@video@Claude FULL COURSE 1 HOUR (Build & Automate Anything)](https://www.youtube.com/watch?v=KrKhfm2Xuho) \ No newline at end of file diff --git a/src/data/roadmaps/ai-engineer/content/choosing-the-right-model@zeWoTtAFEpVXDQzWNsha4.md b/src/data/roadmaps/ai-engineer/content/choosing-the-right-model@zeWoTtAFEpVXDQzWNsha4.md new file mode 100644 index 000000000..8f07c146e --- /dev/null +++ b/src/data/roadmaps/ai-engineer/content/choosing-the-right-model@zeWoTtAFEpVXDQzWNsha4.md @@ -0,0 +1,8 @@ +# Choosing the Right Model + +Selecting the appropriate large language model (LLM) involves considering factors like task complexity, budget, and desired performance. Simpler tasks may only require smaller, more efficient models, while more complex problems benefit from larger models with greater capacity. Cost is also a crucial factor, as larger models generally require more computational resources. You'll also need to assess the model's accuracy, speed, and ability to generalize to new, unseen data. Consider fine-tuning existing models on your specific dataset if you need specialized performance. + +Visit the following resources to learn more: + +- [@article@Choosing the right model](https://bentoml.com/llm/getting-started/choosing-the-right-model) +- [@article@Beyond vibes: How to properly select the right LLM for the right task](https://aws.amazon.com/blogs/machine-learning/beyond-vibes-how-to-properly-select-the-right-llm-for-the-right-task/) \ No newline at end of file diff --git a/src/data/roadmaps/ai-engineer/content/claude-agent-sdk@xXLyuUNrKEc32XLQxMjgT.md b/src/data/roadmaps/ai-engineer/content/claude-agent-sdk@xXLyuUNrKEc32XLQxMjgT.md new file mode 100644 index 000000000..c3ddf551f --- /dev/null +++ b/src/data/roadmaps/ai-engineer/content/claude-agent-sdk@xXLyuUNrKEc32XLQxMjgT.md @@ -0,0 +1,10 @@ +# Claude Agent SDK + +The Claude Agent SDK provides tools and libraries to build autonomous AI agents powered by Anthropic's Claude models. It simplifies the development process by offering pre-built components and abstractions for tasks such as planning, tool usage, memory management, and human interaction. This allows developers to focus on defining the agent's behavior and capabilities rather than the underlying infrastructure. + +Visit the following resources to learn more: + +- [@official@Agent SDK overview](https://platform.claude.com/docs/en/agent-sdk/overview) +- [@article@Getting Started with the Claude Agent SDK](https://www.kdnuggets.com/getting-started-with-the-claude-agent-sdk) +- [@video@Building Custom AI Agents Just Got EASIER - Claude Agent SDK](https://www.youtube.com/watch?v=NsROagHaKxA) +- [@video@Claude Agents SDK BEATS all Agent Framework! (Beginners Guide)](https://www.youtube.com/watch?v=i6N8oQQ0tUE) \ No newline at end of file diff --git a/src/data/roadmaps/ai-engineer/content/claude-code@wr5ddjutC-fX_ixysTHaT.md b/src/data/roadmaps/ai-engineer/content/claude-code@wr5ddjutC-fX_ixysTHaT.md new file mode 100644 index 000000000..6ffb1f263 --- /dev/null +++ b/src/data/roadmaps/ai-engineer/content/claude-code@wr5ddjutC-fX_ixysTHaT.md @@ -0,0 +1,3 @@ +# Claude Code + +Claude Code refers to the code-generation capabilities of Anthropic's Claude AI model. It's designed to assist developers by understanding natural language prompts and translating them into functional code across various programming languages. This allows developers to automate repetitive coding tasks, generate code snippets, and even create entire functions or modules based on descriptive instructions. \ No newline at end of file diff --git a/src/data/roadmaps/ai-engineer/content/claude-messages-api@nxwMVla0-PNG8nlocKK5v.md b/src/data/roadmaps/ai-engineer/content/claude-messages-api@nxwMVla0-PNG8nlocKK5v.md new file mode 100644 index 000000000..ed600c857 --- /dev/null +++ b/src/data/roadmaps/ai-engineer/content/claude-messages-api@nxwMVla0-PNG8nlocKK5v.md @@ -0,0 +1,9 @@ +# Claude Messages API + +The Claude Messages API provides a structured way to interact with the Claude AI model. It allows developers to send a series of messages to Claude, mimicking a conversation. These messages can include text, images, and even structured data, enabling you to build complex interactions and extract specific insights from the model's responses through structured inputs and outputs. + +Visit the following resources to learn more: + +- [@official@Messages API](https://platform.claude.com/docs/en/api/messages) +- [@official@Using the Messages API](https://platform.claude.com/docs/en/build-with-claude/working-with-messages) +- [@article@Claude API: How to get a key and use the API](https://zapier.com/blog/claude-api/) \ No newline at end of file diff --git a/src/data/roadmaps/ai-engineer/content/closed-vs-open-source-models@RBwGsq9DngUsl8PrrCbqx.md b/src/data/roadmaps/ai-engineer/content/closed-vs-open-source-models@RBwGsq9DngUsl8PrrCbqx.md new file mode 100644 index 000000000..af74f72c6 --- /dev/null +++ b/src/data/roadmaps/ai-engineer/content/closed-vs-open-source-models@RBwGsq9DngUsl8PrrCbqx.md @@ -0,0 +1,9 @@ +# Open vs Closed Source Models + +Open-source models are freely available for customization and collaboration, promoting transparency and flexibility, while closed-source models are proprietary, offering ease of use but limiting modification and transparency. + +Visit the following resources to learn more: + +- [@article@Open-Source LLMs vs Closed: Unbiased Guide for Innovative Companies [2026]](https://hatchworks.com/blog/gen-ai/open-source-vs-closed-llms-guide/) +- [@video@Open Source vs Closed AI: LLMs, Agents & the AI Stack Explained](https://www.youtube.com/watch?v=_QfxGZGITGw) +- [@video@Open-Source vs Closed-Source LLMs](https://www.youtube.com/watch?v=710PDpuLwOc) \ No newline at end of file diff --git a/src/data/roadmaps/ai-engineer/content/codex@XY2l96sry3WyLzzo3KUeU.md b/src/data/roadmaps/ai-engineer/content/codex@XY2l96sry3WyLzzo3KUeU.md new file mode 100644 index 000000000..1af441e33 --- /dev/null +++ b/src/data/roadmaps/ai-engineer/content/codex@XY2l96sry3WyLzzo3KUeU.md @@ -0,0 +1,3 @@ +# Codex + +Codex is an AI model created by OpenAI that translates natural language into code. It's designed to understand and generate code in a variety of programming languages, including Python, JavaScript, and more. Codex is particularly adept at interpreting comments and instructions to produce functional code snippets, making it a powerful tool for automating and accelerating the software development process. \ No newline at end of file diff --git a/src/data/roadmaps/ai-engineer/content/cohere@a7qsvoauFe5u953I699ps.md b/src/data/roadmaps/ai-engineer/content/cohere@a7qsvoauFe5u953I699ps.md index 6744b867a..321924b70 100644 --- a/src/data/roadmaps/ai-engineer/content/cohere@a7qsvoauFe5u953I699ps.md +++ b/src/data/roadmaps/ai-engineer/content/cohere@a7qsvoauFe5u953I699ps.md @@ -1,8 +1,9 @@ # Cohere -Cohere is an AI platform that specializes in natural language processing (NLP) by providing large language models designed to help developers build and deploy text-based applications. Cohere’s models are used for tasks such as text classification, language generation, semantic search, and sentiment analysis. Unlike some other providers, Cohere emphasizes simplicity and scalability, offering an easy-to-use API that allows developers to fine-tune models on custom data for specific use cases. Additionally, Cohere provides robust multilingual support and focuses on ensuring that its NLP solutions are both accessible and enterprise-ready, catering to a wide range of industries. +Cohere offers a suite of closed large language models (LLMs) designed for various natural language processing tasks. These models are accessible through an API and are trained and maintained by Cohere, meaning users don't have to worry about the complexities of training and fine-tuning their own models from scratch. Cohere focuses on providing enterprise-grade NLP solutions emphasizing safety, reliability, and ease of integration. Visit the following resources to learn more: - [@official@Cohere](https://cohere.com/) +- [@official@LLM University - Cohere](https://cohere.com/llmu) - [@article@What Does Cohere Do?](https://medium.com/geekculture/what-does-cohere-do-cdadf6d70435) \ No newline at end of file diff --git a/src/data/roadmaps/ai-engineer/content/cohere@y0qD5Kb4Pf-ymIwW-tvhX.md b/src/data/roadmaps/ai-engineer/content/cohere@y0qD5Kb4Pf-ymIwW-tvhX.md new file mode 100644 index 000000000..fdaf144a9 --- /dev/null +++ b/src/data/roadmaps/ai-engineer/content/cohere@y0qD5Kb4Pf-ymIwW-tvhX.md @@ -0,0 +1,8 @@ +# Cohere Embeddings + +Cohere offers powerful text embeddings, which are numerical representations of text data. These embeddings capture the semantic meaning of words, sentences, and documents, allowing AI models to understand relationships and perform tasks like similarity search, clustering, and information retrieval. Cohere's embedding models are designed for high accuracy and performance across various natural language processing applications. + +Visit the following resources to learn more: + +- [@official@Introduction to Embeddings at Cohere](https://docs.cohere.com/docs/embeddings) +- [@official@What are embedding models? Benefits and best practices](https://cohere.com/blog/embedding-models) \ No newline at end of file diff --git a/src/data/roadmaps/ai-engineer/content/constrains@PT3uDiUjiKhO8laOkCmgP.md b/src/data/roadmaps/ai-engineer/content/constrains@PT3uDiUjiKhO8laOkCmgP.md new file mode 100644 index 000000000..3e989bc0e --- /dev/null +++ b/src/data/roadmaps/ai-engineer/content/constrains@PT3uDiUjiKhO8laOkCmgP.md @@ -0,0 +1,7 @@ +# Constraining Prompts + +Constraining system prompts involves explicitly defining boundaries and limitations within the instructions given to a large language model (LLM). This technique focuses on guiding the LLM to operate within a specific scope, preventing it from generating irrelevant, harmful, or factually incorrect responses by setting rules and restrictions on its behavior and output format. + +Visit the following resources to learn more: + +- [@article@Introduction: The Power of Clear Instructions](https://codesignal.com/learn/courses/prompting-foundations/lessons/defining-constraints-and-requirements-for-effective-prompts) \ No newline at end of file diff --git a/src/data/roadmaps/ai-engineer/content/content-moderation-apis@ljZLa3yjQpegiZWwtnn_q.md b/src/data/roadmaps/ai-engineer/content/content-moderation-apis@ljZLa3yjQpegiZWwtnn_q.md new file mode 100644 index 000000000..d5d5c04e1 --- /dev/null +++ b/src/data/roadmaps/ai-engineer/content/content-moderation-apis@ljZLa3yjQpegiZWwtnn_q.md @@ -0,0 +1,9 @@ +# Content Moderation APIs + +Content Moderation APIs are tools that automatically analyze text, images, video, and audio to detect potentially harmful or inappropriate content. These APIs use machine learning models to identify violations of predefined policies related to areas like hate speech, violence, self-harm, and sexually suggestive material. The results allow developers to filter or take action against problematic user-generated content. + +Visit the following resources to learn more: + +- [@official@Moderation API](https://platform.openai.com/docs/guides/moderation) +- [@article@How to use the moderation API](https://cookbook.openai.com/examples/how_to_use_moderation) +- [@article@Content moderation: What it is, how it works, and the best APIs](https://www.assemblyai.com/blog/content-moderation-what-it-is-how-it-works-best-apis-2) \ No newline at end of file diff --git a/src/data/roadmaps/ai-engineer/content/context-compaction@9XCxilAQ7FRet7lHQr1gE.md b/src/data/roadmaps/ai-engineer/content/context-compaction@9XCxilAQ7FRet7lHQr1gE.md new file mode 100644 index 000000000..08a969e00 --- /dev/null +++ b/src/data/roadmaps/ai-engineer/content/context-compaction@9XCxilAQ7FRet7lHQr1gE.md @@ -0,0 +1,8 @@ +# Context Compaction + +Context compaction is a technique used to reduce the length of the context provided to a large language model (LLM) without sacrificing relevant information. This process aims to remove redundant, irrelevant, or less important information from the context window to make room for more data or improve the efficiency and effectiveness of the LLM's processing. Compaction can involve techniques like summarization, filtering, or re-ranking of context information. + +Visit the following resources to learn more: + +- [@article@Context Engineering](https://blog.langchain.com/context-engineering-for-agents/) +- [@opensource@Context Compaction](https://gist.github.com/badlogic/cd2ef65b0697c4dbe2d13fbecb0a0a5f) \ No newline at end of file diff --git a/src/data/roadmaps/ai-engineer/content/context-engineering@kCiHNaZ9CgnS9uksIQ_SY.md b/src/data/roadmaps/ai-engineer/content/context-engineering@kCiHNaZ9CgnS9uksIQ_SY.md new file mode 100644 index 000000000..aecac8810 --- /dev/null +++ b/src/data/roadmaps/ai-engineer/content/context-engineering@kCiHNaZ9CgnS9uksIQ_SY.md @@ -0,0 +1,9 @@ +# Context Engineering + +Context engineering is the practice of carefully designing and organizing the information you give to an AI model so it can do its job better. Think of it like preparing everything an AI needs before it starts working (including providing the right instructions, examples, background knowledge, and conversation history), all put together smartly so the model gives you the best possible answer. Instead of just asking a question and hoping for the best, you are building the perfect "environment" of information around the AI to guide it toward success. + +Visit the following resources to learn more: + +- [@article@Context Engineering Guide](https://www.promptingguide.ai/guides/context-engineering-guide) +- [@article@Effective context engineering for AI agents](https://www.anthropic.com/engineering/effective-context-engineering-for-ai-agents) +- [@video@Context Engineering vs. Prompt Engineering: Smarter AI with RAG & Agents](https://www.youtube.com/watch?v=vD0E3EUb8-8) \ No newline at end of file diff --git a/src/data/roadmaps/ai-engineer/content/context-isolation@9JwWIK0Z2MK8-6EQQJsCO.md b/src/data/roadmaps/ai-engineer/content/context-isolation@9JwWIK0Z2MK8-6EQQJsCO.md new file mode 100644 index 000000000..09a2b5b03 --- /dev/null +++ b/src/data/roadmaps/ai-engineer/content/context-isolation@9JwWIK0Z2MK8-6EQQJsCO.md @@ -0,0 +1,8 @@ +# Context Isolation + +Context isolation is about keeping different tasks or areas of knowledge separate when working with large language models (LLMs). Think of it like giving each task its own dedicated space. Instead of one big LLM trying to handle everything at once, you use multiple, smaller "agents" that are each focused on a specific job and trained on their own specific data. This prevents unrelated information from interfering with each other, leading to more accurate and reliable results. + +Visit the following resources to learn more: + +- [@article@4 context engineering strategies every AI engineer needs to know](https://newsletter.owainlewis.com/p/4-context-engineering-strategies) +- [@article@Context Engineering](https://blog.langchain.com/context-engineering-for-agents/) \ No newline at end of file diff --git a/src/data/roadmaps/ai-engineer/content/context@9oUpeEnaMWctQLTobbmY7.md b/src/data/roadmaps/ai-engineer/content/context@9oUpeEnaMWctQLTobbmY7.md new file mode 100644 index 000000000..c10afdaf1 --- /dev/null +++ b/src/data/roadmaps/ai-engineer/content/context@9oUpeEnaMWctQLTobbmY7.md @@ -0,0 +1,3 @@ +# Contextual prompting + +Contextual prompting provides specific background information or situational details relevant to the current task, helping LLMs understand nuances and tailor responses accordingly. Unlike system or role prompts, contextual prompts supply immediate, task-specific information that's dynamic and changes based on the situation. For example: "Context: You are writing for a blog about retro 80's arcade video games. Suggest 3 topics to write articles about." This technique ensures responses are relevant, accurate, and appropriately framed for the specific context provided. \ No newline at end of file diff --git a/src/data/roadmaps/ai-engineer/content/context@vvpYkmycH0_W030E-L12f.md b/src/data/roadmaps/ai-engineer/content/context@vvpYkmycH0_W030E-L12f.md new file mode 100644 index 000000000..4a60bac37 --- /dev/null +++ b/src/data/roadmaps/ai-engineer/content/context@vvpYkmycH0_W030E-L12f.md @@ -0,0 +1,10 @@ +# Context + +Context, in the realm of Large Language Models (LLMs), refers to the information provided to the model alongside the prompt, which it uses to generate a relevant and coherent response. It encompasses the user's query, any supporting text, previous turns in a conversation, or any other data that helps the LLM understand the desired output. Essentially, it's the background knowledge and instructions that guide the LLM's reasoning and generation process. + +Visit the following resources to learn more: + +- [@article@What is a Context Window in AI?](https://www.ibm.com/think/topics/context-window) +- [@article@What Is an AI Context Window?](https://www.coursera.org/articles/context-window) +- [@article@Cutting Through the Noise: Smarter Context Management for LLM-Powered Agents](https://blog.jetbrains.com/research/2025/12/efficient-context-management/) +- [@video@What is a Context Window? Unlocking LLM Secrets](https://www.youtube.com/watch?v=-QVoIxEpFkM) \ No newline at end of file diff --git a/src/data/roadmaps/ai-engineer/content/cot@nyBgEHvUhwF-NANMwkRJW.md b/src/data/roadmaps/ai-engineer/content/cot@nyBgEHvUhwF-NANMwkRJW.md new file mode 100644 index 000000000..e8c7af383 --- /dev/null +++ b/src/data/roadmaps/ai-engineer/content/cot@nyBgEHvUhwF-NANMwkRJW.md @@ -0,0 +1,10 @@ +# Chain-of-Thought Prompting + +Chain of Thought (CoT) is a way for an AI agent to think out loud. Before giving its final answer, the agent writes short notes that show each step it takes. These notes can list facts, name sub-tasks, or do small bits of math. By seeing the steps, the agent stays organized and is less likely to make a mistake. People who read the answer can also check the logic and spot any weak points. The same written steps can be fed back into the agent so it can plan, reflect, or fix itself. Because it is easy to use and boosts trust, CoT is one of the most common designs for language-based agents today. + +Visit the following resources to learn more: + +- [@article@What is chain of thought (CoT) prompting?](https://www.ibm.com/think/topics/chain-of-thoughts) +- [@article@Chain-of-Thought Prompting Elicits Reasoning in Large Language Models](https://arxiv.org/abs/2201.11903) +- [@article@Evoking Chain of Thought Reasoning in LLMs - Prompting Guide](https://www.promptingguide.ai/techniques/cot) +- [@video@ChatGPT Prompt Engineering Principles: Chain of Thought Prompting](https://www.youtube.com/watch?v=Kar2qfLDQ2c) \ No newline at end of file diff --git a/src/data/roadmaps/ai-engineer/content/cursor@MWhoqhNnBaoeCdN_8i15k.md b/src/data/roadmaps/ai-engineer/content/cursor@MWhoqhNnBaoeCdN_8i15k.md new file mode 100644 index 000000000..077cf1b44 --- /dev/null +++ b/src/data/roadmaps/ai-engineer/content/cursor@MWhoqhNnBaoeCdN_8i15k.md @@ -0,0 +1,3 @@ +# Cursor + +Cursor is an AI-powered code editor designed to enhance developer productivity. It leverages large language models to provide features such as code generation, intelligent autocompletion, and code refactoring suggestions, all within a familiar editor environment. Cursor aims to streamline the coding process and accelerate software development. \ No newline at end of file diff --git a/src/data/roadmaps/ai-engineer/content/deepseek@UGYYh2W1XnnbgYNY8L8Hb.md b/src/data/roadmaps/ai-engineer/content/deepseek@UGYYh2W1XnnbgYNY8L8Hb.md new file mode 100644 index 000000000..378d1852d --- /dev/null +++ b/src/data/roadmaps/ai-engineer/content/deepseek@UGYYh2W1XnnbgYNY8L8Hb.md @@ -0,0 +1,9 @@ +# DeepSeek + +DeepSeek refers to a family of large language models (LLMs) developed by DeepSeek AI. These models are designed to be powerful and efficient, capable of handling various natural language processing tasks such as text generation, translation, and question answering. They are made available with open weights, allowing developers and researchers to use, study, and modify the model architecture and parameters. + +Visit the following resources to learn more: + +- [@official@Deepseek](https://www.deepseek.com/en/) +- [@article@DeepSeek explained: Everything you need to know](https://www.techtarget.com/whatis/feature/DeepSeek-explained-Everything-you-need-to-know) +- [@video@What is DeepSeek? AI Model Basics Explained](https://www.youtube.com/watch?v=KTonvXhsxpc) \ No newline at end of file diff --git a/src/data/roadmaps/ai-engineer/content/embedding-models@fr0UOXlVVctkk1K84Z8Of.md b/src/data/roadmaps/ai-engineer/content/embedding-models@fr0UOXlVVctkk1K84Z8Of.md new file mode 100644 index 000000000..f0117bd0b --- /dev/null +++ b/src/data/roadmaps/ai-engineer/content/embedding-models@fr0UOXlVVctkk1K84Z8Of.md @@ -0,0 +1,8 @@ +# Embedding Models + +Embedding models transform data, like text or images, into numerical representations called embeddings. These embeddings capture the semantic meaning and relationships within the data in a vector space. By representing data as vectors, we can perform mathematical operations to determine similarity, cluster related items, and feed the data into machine learning models. + +Visit the following resources to learn more: + +- [@article@What are Embedding Models? An Overview](https://www.couchbase.com/blog/embedding-models/) +- [@article@Best Open-Source Embedding Models Benchmarked and Ranked](https://supermemory.ai/blog/best-open-source-embedding-models-benchmarked-and-ranked/) \ No newline at end of file diff --git a/src/data/roadmaps/ai-engineer/content/embeddings@XyEp6jnBSpCxMGwALnYfT.md b/src/data/roadmaps/ai-engineer/content/embeddings@XyEp6jnBSpCxMGwALnYfT.md index 4f3d348dc..cd9703f79 100644 --- a/src/data/roadmaps/ai-engineer/content/embeddings@XyEp6jnBSpCxMGwALnYfT.md +++ b/src/data/roadmaps/ai-engineer/content/embeddings@XyEp6jnBSpCxMGwALnYfT.md @@ -1,6 +1,6 @@ # Embeddings -Embeddings are dense, continuous vector representations of data, such as words, sentences, or images, in a lower-dimensional space. They capture the semantic relationships and patterns in the data, where similar items are placed closer together in the vector space. In machine learning, embeddings are used to convert complex data into numerical form that models can process more easily. For example, word embeddings represent words based on their meanings and contexts, allowing models to understand relationships like synonyms or analogies. Embeddings are widely used in tasks like natural language processing, recommendation systems, and image recognition to improve model performance and efficiency. +Embeddings are dense, continuous vector representations of data, such as words, sentences, or images, in a lower-dimensional space. They capture the semantic relationships and patterns in the data, where similar items are placed closer together in the vector space. In machine learning, embeddings are used to convert complex data into a numerical form that models can process more easily. For example, word embeddings represent words based on their meanings and contexts, allowing models to understand relationships like synonyms or analogies. Embeddings are widely used in tasks like natural language processing, recommendation systems, and image recognition to improve model performance and efficiency. Visit the following resources to learn more: diff --git a/src/data/roadmaps/ai-engineer/content/external-memory@KWjD4xEPhOOYS51dvRLd2.md b/src/data/roadmaps/ai-engineer/content/external-memory@KWjD4xEPhOOYS51dvRLd2.md new file mode 100644 index 000000000..7b9095a19 --- /dev/null +++ b/src/data/roadmaps/ai-engineer/content/external-memory@KWjD4xEPhOOYS51dvRLd2.md @@ -0,0 +1,8 @@ +# External Memory for LLMs + +External memory refers to the techniques used to provide Large Language Models (LLMs) with access to information that is not stored directly within their parameters. This allows LLMs to access and utilize a much broader and more up-to-date knowledge base than what was available during their training. By using external memory, LLMs can overcome limitations related to knowledge cut-off, hallucination, and the inability to incorporate new information, leading to more accurate, reliable, and contextually relevant respons + +Visit the following resources to learn more: + +- [@article@Context Engineering - LLM Memory and Retrieval for AI Agents](https://weaviate.io/blog/context-engineering) +- [@article@4 context engineering strategies every AI engineer needs to know](https://newsletter.owainlewis.com/i/180013006/1-write-external-memory) \ No newline at end of file diff --git a/src/data/roadmaps/ai-engineer/content/few-shot@DZPM9zjCbYYWBPLmQImxQ.md b/src/data/roadmaps/ai-engineer/content/few-shot@DZPM9zjCbYYWBPLmQImxQ.md new file mode 100644 index 000000000..aaea4d3fe --- /dev/null +++ b/src/data/roadmaps/ai-engineer/content/few-shot@DZPM9zjCbYYWBPLmQImxQ.md @@ -0,0 +1,10 @@ +# Few-Shot Prompting + +Few-shot prompting is a technique used with large language models (LLMs) where you provide a small number of example input-output pairs in the prompt itself. These examples demonstrate the desired behavior of the LLM for a specific task. By seeing these "few shots" of correct answers, the LLM can better understand the pattern and generate accurate and relevant responses for new, unseen inputs. + +Visit the following resources to learn more: + +- [@article@Few-Shot Prompting](https://www.promptingguide.ai/techniques/fewshot) +- [@article@Technique #3: Examples in Prompts: From Zero-Shot to Few-Shot](https://learnprompting.org/docs/basics/few_shot?srsltid=AfmBOooXYnhXZxh3YDocIxmsft0KBwCcuKQjaU5gCnBxSJdSvjBgYDDR) +- [@article@What is few shot prompting?](https://www.ibm.com/think/topics/few-shot-prompting) +- [@video@Discover Few-Shot Prompting | Google AI Essentials](https://www.youtube.com/watch?v=9qdgEBVkWR4) \ No newline at end of file diff --git a/src/data/roadmaps/ai-engineer/content/fine-tuning@zTvsCNS3ucsZmvy1tHyeI.md b/src/data/roadmaps/ai-engineer/content/fine-tuning@zTvsCNS3ucsZmvy1tHyeI.md new file mode 100644 index 000000000..78b26f143 --- /dev/null +++ b/src/data/roadmaps/ai-engineer/content/fine-tuning@zTvsCNS3ucsZmvy1tHyeI.md @@ -0,0 +1,9 @@ +# Fine-tuning + +Fine-tuning involves taking a pre-trained large language model (LLM) and further training it on a smaller, task-specific dataset. This adapts the LLM to perform better on a particular task or domain. However, fine-tuning can be resource-intensive and may not always be the most efficient approach. Prompt engineering, retrieval-augmented generation (RAG), or using smaller, specialized models can sometimes achieve comparable or even better results with less computational overhead and data requirements. + +Visit the following resources to learn more: + +- [@article@What is fine-tuning?](https://www.ibm.com/think/topics/fine-tuning) +- [@article@What is fine-tuning? A guide to fine-tuning LLMs](https://cohere.com/blog/fine-tuning) +- [@video@RAG vs Fine-Tuning vs Prompt Engineering: Optimizing AI Models](https://www.youtube.com/watch?v=zYGDpG-pTho) \ No newline at end of file diff --git a/src/data/roadmaps/ai-engineer/content/function-calling@wFfjHkGLrcCyLyXV4BiFM.md b/src/data/roadmaps/ai-engineer/content/function-calling@wFfjHkGLrcCyLyXV4BiFM.md new file mode 100644 index 000000000..ac4958a34 --- /dev/null +++ b/src/data/roadmaps/ai-engineer/content/function-calling@wFfjHkGLrcCyLyXV4BiFM.md @@ -0,0 +1,10 @@ +# Function Calling + +Function calling allows Large Language Models (LLMs) to interact with external tools and APIs. Instead of just generating text, the LLM can be instructed to recognize when a specific function should be called based on the user's input, and then output the arguments needed to execute that function. This enables the LLM to perform actions beyond its training data, such as retrieving real-time information or automating tasks. + +Visit the following resources to learn more: + +- [@article@A Comprehensive Guide to Function Calling in LLMs](https://thenewstack.io/a-comprehensive-guide-to-function-calling-in-llms/) +- [@article@Function Calling with LLMs | Prompt Engineering Guide](https://www.promptingguide.ai/applications/function_calling) +- [@article@Function Calling with Open-Source LLMs](https://medium.com/@rushing_andrei/function-calling-with-open-source-llms-594aa5b3a304) +- [@video@LLM Function Calling - AI Tools Deep Dive](https://www.youtube.com/watch?v=gMeTK6zzaO4) \ No newline at end of file diff --git a/src/data/roadmaps/ai-engineer/content/gemini-embedding@4GArjDYipit4SLqKZAWDf.md b/src/data/roadmaps/ai-engineer/content/gemini-embedding@4GArjDYipit4SLqKZAWDf.md new file mode 100644 index 000000000..26eb7f1ee --- /dev/null +++ b/src/data/roadmaps/ai-engineer/content/gemini-embedding@4GArjDYipit4SLqKZAWDf.md @@ -0,0 +1,9 @@ +# Gemini Embedding + +The Gemini API offers methods to convert text, images, or other data types into numerical vector representations called embeddings. These embeddings capture the semantic meaning and relationships between different pieces of information, allowing for efficient similarity search, clustering, and other machine learning tasks. These proprietary models are offered as a service, often through an API, and require payment or subscription. + +Visit the following resources to learn more: + +- [@official@Embeddings](https://ai.google.dev/gemini-api/docs/embeddings) +- [@official@Gemini Embedding now generally available in the Gemini API](https://developers.googleblog.com/gemini-embedding-available-gemini-api/) +- [@video@What are text embeddings?](https://www.youtube.com/watch?v=vlcQV4j2kTo&t=117s) \ No newline at end of file diff --git a/src/data/roadmaps/ai-engineer/content/gemini@hzeEo8COf2l07iu5EdlFo.md b/src/data/roadmaps/ai-engineer/content/gemini@hzeEo8COf2l07iu5EdlFo.md new file mode 100644 index 000000000..95751fcaf --- /dev/null +++ b/src/data/roadmaps/ai-engineer/content/gemini@hzeEo8COf2l07iu5EdlFo.md @@ -0,0 +1,3 @@ +# Gemini + +Gemini is a multimodal AI model developed by Google. It's designed to understand and reason across different types of information, including text, code, audio, images, and video. This allows Gemini to solve complex problems and potentially generate new types of content, offering a more holistic approach compared to models focused on a single modality. \ No newline at end of file diff --git a/src/data/roadmaps/ai-engineer/content/gemma2@MNtaY1_kOJHeoWuM-abb4.md b/src/data/roadmaps/ai-engineer/content/gemma2@MNtaY1_kOJHeoWuM-abb4.md new file mode 100644 index 000000000..00939796e --- /dev/null +++ b/src/data/roadmaps/ai-engineer/content/gemma2@MNtaY1_kOJHeoWuM-abb4.md @@ -0,0 +1,8 @@ +# Gemma2 + +Gemma2 is a family of open-source large language models (LLMs) developed by Google. These models are designed to be lightweight and high-performing, making them suitable for a variety of tasks, including text generation, question answering, and code completion. Gemma models are available in different sizes, allowing developers to select the best model for their specific resource constraints and performance requirements. + +Visit the following resources to learn more: + +- [@official@Gemma](https://deepmind.google/models/gemma/) +- [@official@Gemma explained: What’s new in Gemma 2](https://developers.googleblog.com/gemma-explained-new-in-gemma-2/) \ No newline at end of file diff --git a/src/data/roadmaps/ai-engineer/content/google-adk@mbp2NoL-VZ5hZIIblNBXt.md b/src/data/roadmaps/ai-engineer/content/google-adk@mbp2NoL-VZ5hZIIblNBXt.md index 6dcf8d822..401669d06 100644 --- a/src/data/roadmaps/ai-engineer/content/google-adk@mbp2NoL-VZ5hZIIblNBXt.md +++ b/src/data/roadmaps/ai-engineer/content/google-adk@mbp2NoL-VZ5hZIIblNBXt.md @@ -1,9 +1,9 @@ # Google ADK -The Google AI Agent Definition Kit (ADK) provides tools and frameworks for defining, building, and deploying AI agents. It helps streamline the agent development process by offering structure for agent design, tools for interacting with external APIs and data sources, and infrastructure for deploying agents in various environments. In essence, the ADK simplifies how developers create agents that can perform tasks, interact with users, and integrate with other systems. +The Google Agent Development Kit (ADK) is a framework designed to help developers build, test, and deploy AI agents. It provides tools and libraries that streamline the agent development process, offering features like agent orchestration, tool integration, and evaluation metrics. ADK aims to simplify the complexities of creating sophisticated AI agents capable of interacting with the real world. Visit the following resources to learn more: -- [@official@Overview of Agent Development Kit](https://docs.cloud.google.com/agent-builder/agent-development-kit/overview) - [@official@Agent Development Kit](https://google.github.io/adk-docs/) -- [@official@Agent Development Kit: Making it easy to build multi-agent applications](https://developers.googleblog.com/en/agent-development-kit-easy-to-build-multi-agent-applications/) \ No newline at end of file +- [@official@Overview of Agent Development Kit](https://docs.cloud.google.com/agent-builder/agent-development-kit/overview) +- [@course@ADK Crash Course - From Beginner To Expert](https://codelabs.developers.google.com/onramp/instructions#0) \ No newline at end of file diff --git a/src/data/roadmaps/ai-engineer/content/google-gemini-api@TsG_I7FL-cOCSw8gvZH3r.md b/src/data/roadmaps/ai-engineer/content/google-gemini-api@TsG_I7FL-cOCSw8gvZH3r.md new file mode 100644 index 000000000..ca01cd6b6 --- /dev/null +++ b/src/data/roadmaps/ai-engineer/content/google-gemini-api@TsG_I7FL-cOCSw8gvZH3r.md @@ -0,0 +1,9 @@ +# Google Gemini API + +The Google Gemini API provides developers with programmatic access to Google's Gemini family of multimodal AI models. It allows applications to understand and generate content across text, images, audio, and video. Developers can use this API to build features like conversational interfaces, content creation tools, and systems that can analyze and respond to complex multi-sensory data. + +Visit the following resources to learn more: + +- [@official@Gemini API](https://ai.google.dev/gemini-api/docs) +- [@article@Everything you need to know about the Gemini API as a developer in less than 5 minutes](https://medium.com/around-the-prompt/everything-you-need-to-know-about-the-gemini-api-as-a-developer-in-less-than-5-minutes-5e75343ccff9) +- [@video@Introduction to Gemini APIs and AI Studio](https://www.youtube.com/watch?v=4oyqd7CB09c) \ No newline at end of file diff --git a/src/data/roadmaps/ai-engineer/content/google-gemini@oe8E6ZIQWuYvHVbYJHUc1.md b/src/data/roadmaps/ai-engineer/content/google-gemini@oe8E6ZIQWuYvHVbYJHUc1.md new file mode 100644 index 000000000..1aafc8887 --- /dev/null +++ b/src/data/roadmaps/ai-engineer/content/google-gemini@oe8E6ZIQWuYvHVbYJHUc1.md @@ -0,0 +1,10 @@ +# Google Gemini + +Google Gemini is a family of multimodal large language models (LLMs) developed by Google AI. It's designed to understand and generate content across various modalities, including text, images, audio, and video. Gemini comes in different sizes and capabilities, allowing developers to choose the best model for their specific needs and resource constraints. + +Visit the following resources to learn more: + +- [@official@Google Gemini](https://gemini.google.com/) +- [@official@Google's Gemini Documentation](https://workspace.google.com/solutions/ai/) +- [@course@Google AI Training](https://grow.google/ai/) +- [@video@Welcome to the Gemini era](https://www.youtube.com/watch?v=_fuimO6ErKI) \ No newline at end of file diff --git a/src/data/roadmaps/ai-engineer/content/how-llms-work@zdeuA4GbdBl2DwKgiOA4G.md b/src/data/roadmaps/ai-engineer/content/how-llms-work@zdeuA4GbdBl2DwKgiOA4G.md new file mode 100644 index 000000000..c9b8368ba --- /dev/null +++ b/src/data/roadmaps/ai-engineer/content/how-llms-work@zdeuA4GbdBl2DwKgiOA4G.md @@ -0,0 +1,7 @@ +# OpenAI API + +The OpenAI API provides access to powerful AI models like GPT, Codex, DALL-E, and Whisper, enabling developers to integrate capabilities such as text generation, code assistance, image creation, and speech recognition into their applications via a simple, scalable interface. + +Visit the following resources to learn more: + +- [@official@OpenAI API](https://openai.com/api/) \ No newline at end of file diff --git a/src/data/roadmaps/ai-engineer/content/hugging-face-hub@YLOdOvLXa5Fa7_mmuvKEi.md b/src/data/roadmaps/ai-engineer/content/hugging-face-hub@YLOdOvLXa5Fa7_mmuvKEi.md index 2c5e33f7d..a6f46f398 100644 --- a/src/data/roadmaps/ai-engineer/content/hugging-face-hub@YLOdOvLXa5Fa7_mmuvKEi.md +++ b/src/data/roadmaps/ai-engineer/content/hugging-face-hub@YLOdOvLXa5Fa7_mmuvKEi.md @@ -1,8 +1,8 @@ # Hugging Face Hub -The Hugging Face Hub is a comprehensive platform that hosts over 900,000 machine learning models, 200,000 datasets, and 300,000 demo applications, facilitating collaboration and sharing within the AI community. It serves as a central repository where users can discover, upload, and experiment with various models and datasets across multiple domains, including natural language processing, computer vision, and audio tasks. It also supports version control. +The Hugging Face Hub is a central platform where users can discover, share, and collaborate on pre-trained models, datasets, and demos for machine learning. It hosts a vast repository of resources contributed by the community, allowing users to easily access and integrate these assets into their AI projects. The Hub provides tools for version control, model card documentation, and even web-based deployment, simplifying the process of leveraging existing AI models and contributing back to the open-source community. Visit the following resources to learn more: -- [@course@nlp-official](https://huggingface.co/learn/nlp-course/en/chapter4/1) -- [@official@Hugging Face Documentation](https://huggingface.co/docs/hub/en/index) \ No newline at end of file +- [@official@Hugging Face Documentation](https://huggingface.co/docs/hub/en/index) +- [@course@The Hugging Face Hub (LLM Course)](https://huggingface.co/learn/nlp-course/en/chapter4/1) \ No newline at end of file diff --git a/src/data/roadmaps/ai-engineer/content/hugging-face-inference-sdk@3kRTzlLNBnXdTsAEXVu_M.md b/src/data/roadmaps/ai-engineer/content/hugging-face-inference-sdk@3kRTzlLNBnXdTsAEXVu_M.md new file mode 100644 index 000000000..ce0216a98 --- /dev/null +++ b/src/data/roadmaps/ai-engineer/content/hugging-face-inference-sdk@3kRTzlLNBnXdTsAEXVu_M.md @@ -0,0 +1,8 @@ +# Inference SDK + +The Hugging Face Inference SDK is a powerful tool that allows developers to easily integrate and run inference on large language models hosted on the Hugging Face Hub. By using the `InferenceClient`, users can make API calls to various models for tasks such as text generation, image creation, and more. The SDK supports both synchronous and asynchronous operations, thus compatible with existing workflows. + +Visit the following resources to learn more: + +- [@official@Inference](https://huggingface.co/docs/huggingface_hub/en/package_reference/inference_client) +- [@article@Endpoint Setup](https://www.npmjs.com/package/@huggingface/inference) \ No newline at end of file diff --git a/src/data/roadmaps/ai-engineer/content/hugging-face@v99C5Bml2a6148LCJ9gy9.md b/src/data/roadmaps/ai-engineer/content/hugging-face@v99C5Bml2a6148LCJ9gy9.md index 7781bd257..7a8f184ed 100644 --- a/src/data/roadmaps/ai-engineer/content/hugging-face@v99C5Bml2a6148LCJ9gy9.md +++ b/src/data/roadmaps/ai-engineer/content/hugging-face@v99C5Bml2a6148LCJ9gy9.md @@ -4,6 +4,6 @@ Hugging Face is a leading AI company and open-source platform that provides tool Visit the following resources to learn more: -- [@course@Hugging Face Official Video Course](https://www.youtube.com/watch?v=00GKzGyWFEs&list=PLo2EIpI_JMQvWfQndUesu0nPBAtZ9gP1o) - [@official@Hugging Face](https://huggingface.co) +- [@course@Hugging Face Official Video Course](https://www.youtube.com/watch?v=00GKzGyWFEs&list=PLo2EIpI_JMQvWfQndUesu0nPBAtZ9gP1o) - [@video@What is Hugging Face? - Machine Learning Hub Explained](https://www.youtube.com/watch?v=1AUjKfpRZVo) \ No newline at end of file diff --git a/src/data/roadmaps/ai-engineer/content/inference@4NtUD9V64gkp8SFudj_ai.md b/src/data/roadmaps/ai-engineer/content/inference@4NtUD9V64gkp8SFudj_ai.md new file mode 100644 index 000000000..52787f851 --- /dev/null +++ b/src/data/roadmaps/ai-engineer/content/inference@4NtUD9V64gkp8SFudj_ai.md @@ -0,0 +1,8 @@ +# Inference + +In artificial intelligence (AI), inference refers to the process by which a trained machine learning model makes predictions or draws conclusions from new, unseen data. Unlike training, inference involves the model applying what it has learned to make decisions without needing examples of the exact result. In essence, inference is the AI model actively functioning. For example, a self-driving car recognizing a stop sign on a road it has never encountered before demonstrates inference. The model identifies the stop sign in a new setting, using its learned knowledge to make a decision in real-time. + +Visit the following resources to learn more: + +- [@article@Inference vs Training](https://www.cloudflare.com/learning/ai/inference-vs-training/) +- [@article@What is Machine Learning Inference?](https://hazelcast.com/glossary/machine-learning-inference/) \ No newline at end of file diff --git a/src/data/roadmaps/ai-engineer/content/input-format@LCHse57rXf3sl8ml1ow0T.md b/src/data/roadmaps/ai-engineer/content/input-format@LCHse57rXf3sl8ml1ow0T.md new file mode 100644 index 000000000..70d7d0c1d --- /dev/null +++ b/src/data/roadmaps/ai-engineer/content/input-format@LCHse57rXf3sl8ml1ow0T.md @@ -0,0 +1,8 @@ +# Input Format + +Input format in prompt engineering deals with how you structure and present your prompt to a large language model (LLM). This includes the specific words used, the overall layout, and the inclusion of any examples or instructions. A well-defined input format guides the LLM towards generating the desired output, improving the relevance, accuracy, and consistency of its responses. Effective input formatting is key to unlocking the full potential of LLMs for specific tasks. + +Visit the following resources to learn more: + +- [@article@Elements of a Prompt](https://www.promptingguide.ai/introduction/elements) +- [@article@Does Prompt Formatting Have Any Impact on LLM Performance?](https://arxiv.org/html/2411.10541v1) \ No newline at end of file diff --git a/src/data/roadmaps/ai-engineer/content/jina@apVYIV4EyejPft25oAvdI.md b/src/data/roadmaps/ai-engineer/content/jina@apVYIV4EyejPft25oAvdI.md new file mode 100644 index 000000000..1051db89a --- /dev/null +++ b/src/data/roadmaps/ai-engineer/content/jina@apVYIV4EyejPft25oAvdI.md @@ -0,0 +1,8 @@ +# Jina AI + +Jina Embeddings are a family of high-performance, open-source, multilingual, and multimodal embedding models developed by Jina AI. They transform text and visual data (like images and charts) into dense numerical vectors for semantic search, RAG, and AI applications. Key features include supporting long contexts (up to 32k tokens), multi-task capabilities, and specialized versions for text-to-image and code retrieval. + +Visit the following resources to learn more: + +- [@official@Jina Embeddings](https://jina.ai/en-US/embeddings/) +- [@official@https://jina.ai/news/jina-embeddings-v5-text-distilling-4b-quality-into-sub-1b-multilingual-embeddings/](https://jina.ai/news/jina-embeddings-v5-text-distilling-4b-quality-into-sub-1b-multilingual-embeddings/) \ No newline at end of file diff --git a/src/data/roadmaps/ai-engineer/content/large-language-model-llm@wf2BSyUekr1S1q6l8kyq6.md b/src/data/roadmaps/ai-engineer/content/large-language-model-llm@wf2BSyUekr1S1q6l8kyq6.md new file mode 100644 index 000000000..226679800 --- /dev/null +++ b/src/data/roadmaps/ai-engineer/content/large-language-model-llm@wf2BSyUekr1S1q6l8kyq6.md @@ -0,0 +1,10 @@ +# LLMs + +LLMs, or Large Language Models, are advanced AI models trained on vast datasets to understand and generate human-like text. They can perform a wide range of natural language processing tasks, such as text generation, translation, summarization, and question answering. Examples include GPT-5, BERT, and DeepSeek. LLMs are capable of understanding context, handling complex queries, and generating coherent responses, making them useful for applications like chatbots, content creation, and automated support. However, they require significant computational resources and may carry biases from their training data. + +Visit the following resources to learn more: + +- [@article@What is a large language model (LLM)?](https://www.cloudflare.com/en-gb/learning/ai/what-is-large-language-model/) +- [@article@Understanding AI: Everything you need to know about language models](https://leerob.com/ai) +- [@video@How Large Language Models Work](https://www.youtube.com/watch?v=5sLYAQS9sWQ) +- [@video@Large Language Models (LLMs) - Everything You NEED To Know](https://www.youtube.com/watch?v=osKyvYJ3PRM) \ No newline at end of file diff --git a/src/data/roadmaps/ai-engineer/content/lm-studio@a_3SabylVqzzOyw3tZN5f.md b/src/data/roadmaps/ai-engineer/content/lm-studio@a_3SabylVqzzOyw3tZN5f.md new file mode 100644 index 000000000..52fdd3242 --- /dev/null +++ b/src/data/roadmaps/ai-engineer/content/lm-studio@a_3SabylVqzzOyw3tZN5f.md @@ -0,0 +1,8 @@ +# LM Studio + +LM Studio is a desktop application designed for discovering, downloading, and running large language models (LLMs) locally on your computer. It provides a user-friendly interface to experiment with different open-source LLMs without needing extensive technical knowledge or cloud infrastructure. Essentially, it helps you manage and interact with LLMs directly on your machine. + +Visit the following resources to learn more: + +- [@official@LMStudio](https://lmstudio.ai/) +- [@video@LM Studio Tutorial: Run Large Language Models (LLM) on Your Laptop](https://www.youtube.com/watch?v=ygUEbCpOOLg) \ No newline at end of file diff --git a/src/data/roadmaps/ai-engineer/content/manual-implementation@6xaRB34_g0HGt-y1dGYXR.md b/src/data/roadmaps/ai-engineer/content/manual-implementation@6xaRB34_g0HGt-y1dGYXR.md index 69e31441c..a7411cf49 100644 --- a/src/data/roadmaps/ai-engineer/content/manual-implementation@6xaRB34_g0HGt-y1dGYXR.md +++ b/src/data/roadmaps/ai-engineer/content/manual-implementation@6xaRB34_g0HGt-y1dGYXR.md @@ -1,8 +1,8 @@ -# Manual Implementation +# Manual Implementation of AI Agents -Services like Open AI functions and Tools or Vercel's AI SDK make it really easy to make SDK agents however it is a good idea to learn how these tools work under the hood. You can also create fully custom implementation of agents using by implementing custom loop. +Manually implementing AI agents involves crafting their logic and behavior from the ground up, without relying heavily on pre-built frameworks or automated tools. This approach requires writing code to define how the agent perceives its environment, makes decisions, and takes actions, often involving explicit programming of algorithms for tasks like perception, planning, and learning. It offers maximum control and customization but demands a deep understanding of AI principles and software engineering. Visit the following resources to learn more: -- [@official@OpenAI Function Calling](https://platform.openai.com/docs/guides/function-calling) -- [@official@Vercel AI SDK](https://sdk.vercel.ai/docs/foundations/tools) \ No newline at end of file +- [@official@A practical guide to building agents - OpenAI](https://cdn.openai.com/business-guides-and-resources/a-practical-guide-to-building-agents.pdf) +- [@official@Create custom subagents - Claude](https://code.claude.com/docs/en/sub-agents) \ No newline at end of file diff --git a/src/data/roadmaps/ai-engineer/content/meta-llama@OkYO-aSPiuVYuLXHswBCn.md b/src/data/roadmaps/ai-engineer/content/meta-llama@OkYO-aSPiuVYuLXHswBCn.md new file mode 100644 index 000000000..e425cba29 --- /dev/null +++ b/src/data/roadmaps/ai-engineer/content/meta-llama@OkYO-aSPiuVYuLXHswBCn.md @@ -0,0 +1,8 @@ +# Meta Llama + +Meta Llama is a family of large language models (LLMs) developed by Meta AI. These models are designed to be open-source and accessible, allowing researchers and developers to build and experiment with LLMs without the restrictions of proprietary models. Llama models have different versions and sizes, and they aim to provide a capable and efficient base for various natural language processing tasks. + +Visit the following resources to learn more: + +- [@official@Llama](https://www.llama.com/) +- [@course@Building with Llama 4](https://www.deeplearning.ai/short-courses/building-with-llama-4/) \ No newline at end of file diff --git a/src/data/roadmaps/ai-engineer/content/mistral@n-Ud2dXkqIzK37jlKItN4.md b/src/data/roadmaps/ai-engineer/content/mistral@n-Ud2dXkqIzK37jlKItN4.md new file mode 100644 index 000000000..a037c054e --- /dev/null +++ b/src/data/roadmaps/ai-engineer/content/mistral@n-Ud2dXkqIzK37jlKItN4.md @@ -0,0 +1,8 @@ +# Mistral AI + +Mistral AI is a company focused on developing open-weight, large language models (LLMs) to provide high-performance AI solutions. Mistral aims to create models that are both efficient and versatile, making them suitable for a wide range of natural language processing tasks, including text generation, translation, and summarization. By releasing open-weight models, Mistral promotes transparency and accessibility, allowing developers to customize and deploy AI solutions more flexibly compared to proprietary models. + +Visit the following resources to learn more: + +- [@official@Mistral AI](https://mistral.ai/) +- [@video@Mistral AI: The Gen AI Start-up you did not know existed](https://www.youtube.com/watch?v=vzrRGd18tAg) \ No newline at end of file diff --git a/src/data/roadmaps/ai-engineer/content/models-on-hugging-face@dLEg4IA3F5jgc44Bst9if.md b/src/data/roadmaps/ai-engineer/content/models-on-hugging-face@dLEg4IA3F5jgc44Bst9if.md index 738147d65..a223f8f20 100644 --- a/src/data/roadmaps/ai-engineer/content/models-on-hugging-face@dLEg4IA3F5jgc44Bst9if.md +++ b/src/data/roadmaps/ai-engineer/content/models-on-hugging-face@dLEg4IA3F5jgc44Bst9if.md @@ -1,6 +1,6 @@ # Models on Hugging Face -Embedding models are used to convert raw data like text, code, or images into high-dimensional vectors that capture semantic meaning. These vector representations allow AI systems to compare, cluster, and retrieve information based on similarity rather than exact matches. Hugging Face provides a wide range of pretrained embedding models such as `all-MiniLM-L6-v2`, `gte-base`, `Qwen3-Embedding-8B` and `bge-base` which are commonly used for tasks like semantic search, recommendation systems, duplicate detection, and retrieval-augmented generation (RAG). These models can be accessed through libraries like transformers or sentence-transformers, making it easy to generate high-quality embeddings for both general-purpose and task-specific applications. +Embedding models are used to convert raw data like text, code, or images into high-dimensional vectors that capture semantic meaning. These vector representations allow AI systems to compare, cluster, and retrieve information based on similarity rather than exact matches. Hugging Face provides a wide range of pretrained embedding models, which are commonly used for tasks like semantic search, recommendation systems, duplicate detection, and retrieval-augmented generation (RAG). These models can be accessed through libraries like `transformers` or `sentence-transformers`, making it easy to generate high-quality embeddings for both general-purpose and task-specific applications. Visit the following resources to learn more: diff --git a/src/data/roadmaps/ai-engineer/content/multi-agents@kG1bkF2oY21CJOm9zhdpn.md b/src/data/roadmaps/ai-engineer/content/multi-agents@kG1bkF2oY21CJOm9zhdpn.md new file mode 100644 index 000000000..043109ad3 --- /dev/null +++ b/src/data/roadmaps/ai-engineer/content/multi-agents@kG1bkF2oY21CJOm9zhdpn.md @@ -0,0 +1,9 @@ +# Multi-Agent Systems + +Multi-agent systems involve the coordinated behavior of multiple intelligent agents within an environment. These agents interact with each other, potentially cooperating or competing, to achieve individual or collective goals. The complexity arises from the need for agents to reason about the actions, intentions, and knowledge of other agents to make informed decisions and effectively navigate the environment. + +Visit the following resources to learn more: + +- [@article@What is a multi-agent system?](https://www.ibm.com/think/topics/multiagent-system) +- [@article@Multi-Agent Systems](https://huggingface.co/learn/agents-course/en/unit2/smolagents/multi_agent_systems) +- [@article@Guide to multi-agent systems (MAS)](https://cloud.google.com/discover/what-is-a-multi-agent-system) \ No newline at end of file diff --git a/src/data/roadmaps/ai-engineer/content/nanobanana-api@6y73FLjshnqxV8BTGUeiu.md b/src/data/roadmaps/ai-engineer/content/nanobanana-api@6y73FLjshnqxV8BTGUeiu.md new file mode 100644 index 000000000..3c467498f --- /dev/null +++ b/src/data/roadmaps/ai-engineer/content/nanobanana-api@6y73FLjshnqxV8BTGUeiu.md @@ -0,0 +1,7 @@ +# NanoBanana API + +The NanoBanana API is a tool designed to facilitate the integration and processing of data from multiple modalities, such as text, images, and audio. It provides a unified interface for managing diverse data types, enabling developers to build AI systems that can understand and reason about information from various sources simultaneously. This API typically includes functionalities for data ingestion, feature extraction, fusion techniques, and output generation tailored for multimodal applications. + +Visit the following resources to learn more: + +- [@official@NanoBanana API](https://nanobananaapi.ai/) \ No newline at end of file diff --git a/src/data/roadmaps/ai-engineer/content/open-ai-embeddings-api@l6priWeJhbdUD5tJ7uHyG.md b/src/data/roadmaps/ai-engineer/content/open-ai-embeddings-api@l6priWeJhbdUD5tJ7uHyG.md index bf366d306..284205411 100644 --- a/src/data/roadmaps/ai-engineer/content/open-ai-embeddings-api@l6priWeJhbdUD5tJ7uHyG.md +++ b/src/data/roadmaps/ai-engineer/content/open-ai-embeddings-api@l6priWeJhbdUD5tJ7uHyG.md @@ -1,5 +1,7 @@ # OpenAI Embeddings API +The OpenAI Embeddings API provides a straightforward way to transform text into numerical vector representations called embeddings. These embeddings capture the semantic meaning of text, allowing you to perform tasks like semantic search, clustering, and similarity comparison by analyzing the relationships between these vectors. The API simplifies the process of generating these embeddings, abstracting away the complexities of training and managing embedding models. + Visit the following resources to learn more: - [@official@OpenAI Embeddings API](https://platform.openai.com/docs/api-reference/embeddings/create) diff --git a/src/data/roadmaps/ai-engineer/content/openai-agentkit@Sm0Ne5Nx72hcZCdAcC0C2.md b/src/data/roadmaps/ai-engineer/content/openai-agentkit@Sm0Ne5Nx72hcZCdAcC0C2.md new file mode 100644 index 000000000..f65e055e4 --- /dev/null +++ b/src/data/roadmaps/ai-engineer/content/openai-agentkit@Sm0Ne5Nx72hcZCdAcC0C2.md @@ -0,0 +1,9 @@ +# OpenAI AgentKit + +OpenAI AgentKit is a framework designed to simplify the process of creating AI agents that can perform specific tasks by leveraging large language models (LLMs). It provides tools and abstractions for managing agent state, defining agent behavior, and connecting agents to various tools and resources, allowing developers to build more complex and capable AI systems without needing to manage all the low-level details. + +Visit the following resources to learn more: + +- [@official@Introducing AgentKit](https://openai.com/index/introducing-agentkit/) +- [@official@Build every step of agents on one platform](https://openai.com/agent-platform/) +- [@video@Build Hour: AgentKit](https://www.youtube.com/watch?v=sAitLFLbgDA) \ No newline at end of file diff --git a/src/data/roadmaps/ai-engineer/content/openai-compatible-apis@vnXtUupJUlyU_uCbZBbnk.md b/src/data/roadmaps/ai-engineer/content/openai-compatible-apis@vnXtUupJUlyU_uCbZBbnk.md new file mode 100644 index 000000000..5779f38fd --- /dev/null +++ b/src/data/roadmaps/ai-engineer/content/openai-compatible-apis@vnXtUupJUlyU_uCbZBbnk.md @@ -0,0 +1,8 @@ +# OpenAI-compatible APIs + +OpenAI-compatible APIs are interfaces that mimic the structure and functionality of OpenAI's API. These APIs allow developers to switch between different language models or providers with minimal code changes. This is particularly useful for testing different models, mitigating vendor lock-in, or building fallback mechanisms in case of service disruptions. + +Visit the following resources to learn more: + +- [@article@OpenAI-compatible API](https://bentoml.com/llm/llm-inference-basics/openai-compatible-api) +- [@article@OpenAI compatibility - Gemini](https://ai.google.dev/gemini-api/docs/openai) \ No newline at end of file diff --git a/src/data/roadmaps/ai-engineer/content/openai-gpt-o-series@3PQVZbcr4neNMRr6CuNzS.md b/src/data/roadmaps/ai-engineer/content/openai-gpt-o-series@3PQVZbcr4neNMRr6CuNzS.md new file mode 100644 index 000000000..cb62cab93 --- /dev/null +++ b/src/data/roadmaps/ai-engineer/content/openai-gpt-o-series@3PQVZbcr4neNMRr6CuNzS.md @@ -0,0 +1,9 @@ +# OpenAI Models (GPT, o-series) + +OpenAI models, including the GPT and "o-series" (like Whisper), are pre-trained AI models offered by OpenAI, accessible via their API. GPT models excel at natural language tasks like text generation, translation, and question answering. The "o-series" refers to OpenAI's other specialized models; for example, Whisper is a speech-to-text transcription model. + +Visit the following resources to learn more: + +- [@official@OpenAI Platform](https://openai.com/api/) +- [@official@OpenAI Models](https://developers.openai.com/api/docs/models) +- [@article@Your guide to the complete OpenAI models list](https://www.eesel.ai/blog/openai-models-list) \ No newline at end of file diff --git a/src/data/roadmaps/ai-engineer/content/openai-response-api@ro3vY_sp6xMQ-hfzO-rc1.md b/src/data/roadmaps/ai-engineer/content/openai-response-api@ro3vY_sp6xMQ-hfzO-rc1.md new file mode 100644 index 000000000..e10dcea3e --- /dev/null +++ b/src/data/roadmaps/ai-engineer/content/openai-response-api@ro3vY_sp6xMQ-hfzO-rc1.md @@ -0,0 +1,9 @@ +# OpenAI Response API + +The OpenAI Response API allows developers to interact with various AI models offered by OpenAI. It provides a structured way to send requests to these models and receive generated text, code, or other outputs. You can control aspects like the prompt, temperature, maximum length, and other parameters to tailor the response to your specific needs. It is intended to eventually replace the Assistants API and the Chat Completions API for new projects. + +Visit the following resources to learn more: + +- [@official@Responses API](https://developers.openai.com/api/reference/resources/responses/) +- [@opensource@Why we built the Responses API](https://developers.openai.com/blog/responses-api/) +- [@video@OpenAI Responses API Tutorial (2025)](https://www.youtube.com/playlist?list=PL4HikwTaYE0EWV3qieOYooyxb9osQLgou) \ No newline at end of file diff --git a/src/data/roadmaps/ai-engineer/content/openrouter@1GlpMjmdAWor0X_BnISGg.md b/src/data/roadmaps/ai-engineer/content/openrouter@1GlpMjmdAWor0X_BnISGg.md new file mode 100644 index 000000000..b6cffe04f --- /dev/null +++ b/src/data/roadmaps/ai-engineer/content/openrouter@1GlpMjmdAWor0X_BnISGg.md @@ -0,0 +1,9 @@ +# OpenRouter + +OpenRouter acts as a unified API, providing access to various large language models (LLMs) from different providers like OpenAI, Cohere, and Anthropic through a single platform. This allows developers to easily switch between models, compare their performance, and optimize their applications without needing to manage multiple API keys or integration processes. It essentially streamlines the process of experimenting with and deploying different AI models. + +Visit the following resources to learn more: + +- [@official@OpenRouter](https://openrouter.ai/) +- [@article@What is OpenRouter? A Guide with Practical Examples](https://www.codecademy.com/article/what-is-openrouter) +- [@video@What is Open Router ?](https://www.youtube.com/watch?v=pfT6l0yMsB0) \ No newline at end of file diff --git a/src/data/roadmaps/ai-engineer/content/prompt-caching@bqqY0gsZkBpcHMZw1hcZ5.md b/src/data/roadmaps/ai-engineer/content/prompt-caching@bqqY0gsZkBpcHMZw1hcZ5.md new file mode 100644 index 000000000..8e7bd037a --- /dev/null +++ b/src/data/roadmaps/ai-engineer/content/prompt-caching@bqqY0gsZkBpcHMZw1hcZ5.md @@ -0,0 +1,8 @@ +# Prompt Caching + +Prompt caching is a technique that stores the results of previous LLM prompts, allowing you to quickly retrieve and reuse them instead of re-running the prompt every time. This can significantly improve efficiency and reduce costs when dealing with frequently used or computationally expensive prompts. + +Visit the following resources to learn more: + +- [@article@What is Prompt Caching?](https://www.ibm.com/think/topics/prompt-caching) +- [@video@What is Prompt Caching? Optimize LLM Latency with AI Transformers](https://www.youtube.com/watch?v=u57EnkQaUTY) \ No newline at end of file diff --git a/src/data/roadmaps/ai-engineer/content/prompt-engineering@VjXmSCdzi2ACv-W85Sy9D.md b/src/data/roadmaps/ai-engineer/content/prompt-engineering@VjXmSCdzi2ACv-W85Sy9D.md new file mode 100644 index 000000000..246189414 --- /dev/null +++ b/src/data/roadmaps/ai-engineer/content/prompt-engineering@VjXmSCdzi2ACv-W85Sy9D.md @@ -0,0 +1,9 @@ +# Prompt Engineering + +Prompt engineering is the art and science of crafting effective prompts, which are the instructions or inputs given to a large language model (LLM). The goal is to design prompts that elicit the desired response from the LLM, guiding it to generate accurate, relevant, and creative outputs. This involves understanding the LLM's capabilities and limitations, and experimenting with different prompt structures, keywords, and contextual cues to achieve optimal results. + +Visit the following resources to learn more: + +- [@roadmap@Visit Dedicated Prompt Engineering Roadmap](https://roadmap.sh/prompt-engineering) +- [@article@hat is Prompt Engineering? - AI Prompt Engineering Explained - AWS](https://aws.amazon.com/what-is/prompt-engineering/) +- [@video@What is Prompt Engineering?](https://www.youtube.com/watch?v=nf1e-55KKbg) \ No newline at end of file diff --git a/src/data/roadmaps/ai-engineer/content/prompt-vs-context-engineering@ozrR8IvjNFbHd44kZrExX.md b/src/data/roadmaps/ai-engineer/content/prompt-vs-context-engineering@ozrR8IvjNFbHd44kZrExX.md new file mode 100644 index 000000000..38705043d --- /dev/null +++ b/src/data/roadmaps/ai-engineer/content/prompt-vs-context-engineering@ozrR8IvjNFbHd44kZrExX.md @@ -0,0 +1,10 @@ +# Prompt vs. Context Engineering + +Prompt engineering focuses on crafting effective instructions (prompts) to elicit desired outputs from Large Language Models (LLMs). Context engineering, on the other hand, involves structuring and providing relevant background information (the context) to the LLM, enabling it to generate more accurate and informed responses based on a richer understanding of the subject matter. + +Visit the following resources to learn more: + +- [@article@Context engineering vs. prompt engineering](https://www.elastic.co/search-labs/blog/context-engineering-vs-prompt-engineering) +- [@article@Effective context engineering for AI agents](https://www.anthropic.com/engineering/effective-context-engineering-for-ai-agents) +- [@article@Context Engineering vs Prompt Engineering](https://medium.com/data-science-in-your-pocket/context-engineering-vs-prompt-engineering-379e9622e19d) +- [@video@Context Engineering vs. Prompt Engineering: Smarter AI with RAG & Agents](https://www.youtube.com/watch?v=vD0E3EUb8-8) \ No newline at end of file diff --git a/src/data/roadmaps/ai-engineer/content/qwen@c0RPhpD00VIUgF4HJgN2T.md b/src/data/roadmaps/ai-engineer/content/qwen@c0RPhpD00VIUgF4HJgN2T.md new file mode 100644 index 000000000..268a0ef14 --- /dev/null +++ b/src/data/roadmaps/ai-engineer/content/qwen@c0RPhpD00VIUgF4HJgN2T.md @@ -0,0 +1,8 @@ +# Qwen + +Qwen is a series of large language models (LLMs) developed by Alibaba Group. These models are open-source, meaning their code and architecture are publicly available, allowing researchers and developers to use, study, and modify them for their own purposes. Qwen models are designed to perform various natural language processing tasks, such as text generation, translation, and question answering, and come in different sizes, allowing for flexibility based on computational resources and specific application requirements. + +Visit the following resources to learn more: + +- [@official@Qwen](https://chat.qwen.ai/) +- [@article@What is Qwen AI?](https://zapier.com/blog/qwen/) \ No newline at end of file diff --git a/src/data/roadmaps/ai-engineer/content/rag-and-dynamic-filters@LnQ2AatMWpExUHcZhDIPd.md b/src/data/roadmaps/ai-engineer/content/rag-and-dynamic-filters@LnQ2AatMWpExUHcZhDIPd.md new file mode 100644 index 000000000..c9b88c9c9 --- /dev/null +++ b/src/data/roadmaps/ai-engineer/content/rag-and-dynamic-filters@LnQ2AatMWpExUHcZhDIPd.md @@ -0,0 +1,8 @@ +# RAG and Dynamic Filters + +Retrieval-Augmented Generation (RAG) enhances Large Language Models (LLMs) by providing them with relevant, up-to-date information from external sources. Dynamic filters are techniques that selectively filter the information retrieved for RAG, ensuring that the LLM receives only the most pertinent context based on the specific query and user. This results in more accurate, focused, and contextually appropriate LLM responses. + +Visit the following resources to learn more: + +- [@article@4 context engineering strategies every AI engineer needs to know](https://newsletter.owainlewis.com/p/4-context-engineering-strategies) +- [@article@Context Engineering](https://blog.langchain.com/context-engineering-for-agents/) \ No newline at end of file diff --git a/src/data/roadmaps/ai-engineer/content/rag@IX1BJWGwGmB4L063g0Frf.md b/src/data/roadmaps/ai-engineer/content/rag@IX1BJWGwGmB4L063g0Frf.md new file mode 100644 index 000000000..3c9f87509 --- /dev/null +++ b/src/data/roadmaps/ai-engineer/content/rag@IX1BJWGwGmB4L063g0Frf.md @@ -0,0 +1,8 @@ +# RAG + +Retrieval-Augmented Generation (RAG) is an AI approach that combines information retrieval with language generation to create more accurate, contextually relevant outputs. It works by first retrieving relevant data from a knowledge base or external source, then using a language model to generate a response based on that information. This method enhances the accuracy of generative models by grounding their outputs in real-world data, making RAG ideal for tasks like question answering, summarization, and chatbots that require reliable, up-to-date information. + +Visit the following resources to learn more: + +- [@article@What is Retrieval-Augmented Generation? - Google](https://cloud.google.com/use-cases/retrieval-augmented-generation) +- [@video@What is Retrieval-Augmented Generation? - IBM](https://www.youtube.com/watch?v=T-D1OfcDW1M) \ No newline at end of file diff --git a/src/data/roadmaps/ai-engineer/content/react@Waox7xR_yUeSnOtQFzU4c.md b/src/data/roadmaps/ai-engineer/content/react@Waox7xR_yUeSnOtQFzU4c.md new file mode 100644 index 000000000..843c1fdc2 --- /dev/null +++ b/src/data/roadmaps/ai-engineer/content/react@Waox7xR_yUeSnOtQFzU4c.md @@ -0,0 +1,8 @@ +# ReAct + +ReAct (Reason and Act) prompting enables LLMs to solve complex tasks by combining reasoning with external tool interactions. It follows a thought-action-observation loop: analyze the problem, perform actions using external APIs, review results, and iterate until solved. Useful for research, multi-step problems, and tasks requiring current data. + +Visit the following resources to learn more: + +- [@article@ReAct Prompting](https://www.promptingguide.ai/techniques/react) +- [@video@4 Methods of Prompt Engineering](https://youtu.be/vD0E3EUb8-8?si=Y6MCLPzjmhMB4jSu&t=203) \ No newline at end of file diff --git a/src/data/roadmaps/ai-engineer/content/repetition-penalties@0_pa739kMPWHfuSQV-VO7.md b/src/data/roadmaps/ai-engineer/content/repetition-penalties@0_pa739kMPWHfuSQV-VO7.md new file mode 100644 index 000000000..32a8a2723 --- /dev/null +++ b/src/data/roadmaps/ai-engineer/content/repetition-penalties@0_pa739kMPWHfuSQV-VO7.md @@ -0,0 +1,8 @@ +# Repetition Penalties + +Repetition penalties discourage LLMs from repeating words or phrases by reducing the probability of selecting previously used tokens. This includes frequency penalty (scales with usage count) and presence penalty (applies equally to any used token). These parameters improve output quality by promoting vocabulary diversity and preventing redundant phrasing. + +Visit the following resources to learn more: + +- [@article@Stop the LLM From Rambling: Using Penalties to Control Repetition](https://dev.to/superorange0707/stop-the-llm-from-rambling-using-penalties-to-control-repetition-5h8) +- [@video@What are LLM Presence and Frequency Penalties?](https://www.youtube.com/watch?v=J66CRz6s734) \ No newline at end of file diff --git a/src/data/roadmaps/ai-engineer/content/replit@Ubk4GN0Z4XlDJ3EbRXdxg.md b/src/data/roadmaps/ai-engineer/content/replit@Ubk4GN0Z4XlDJ3EbRXdxg.md new file mode 100644 index 000000000..60f68cbc6 --- /dev/null +++ b/src/data/roadmaps/ai-engineer/content/replit@Ubk4GN0Z4XlDJ3EbRXdxg.md @@ -0,0 +1,3 @@ +# Replit + +Replit is an online integrated development environment (IDE) that allows users to write and run code in various programming languages directly in their web browser. It provides a collaborative coding environment with features like real-time collaboration, version control, and package management, making it easy to build and deploy projects without needing to install software locally. Replit also incorporates AI features like code completion and generation to help streamline the coding process. \ No newline at end of file diff --git a/src/data/roadmaps/ai-engineer/content/role--behavior@N3TzWYxU0jgv1l99Ts58n.md b/src/data/roadmaps/ai-engineer/content/role--behavior@N3TzWYxU0jgv1l99Ts58n.md new file mode 100644 index 000000000..05e477949 --- /dev/null +++ b/src/data/roadmaps/ai-engineer/content/role--behavior@N3TzWYxU0jgv1l99Ts58n.md @@ -0,0 +1,3 @@ +# Role & Behavior in System Prompting + +System prompting involves crafting instructions that define the AI model's role, personality, and overall behavior when interacting with users. This allows you to shape the AI's responses, ensuring they are consistent with desired guidelines, such as adopting a specific persona (e.g., a helpful assistant, an expert) or adhering to constraints on tone and style. By carefully defining these aspects, you can significantly influence the AI's output and guide it towards more relevant and effective interactions. \ No newline at end of file diff --git a/src/data/roadmaps/ai-engineer/content/sampling-parameters@LbB2PeytxRSuU07Bk0KlJ.md b/src/data/roadmaps/ai-engineer/content/sampling-parameters@LbB2PeytxRSuU07Bk0KlJ.md new file mode 100644 index 000000000..89814ee06 --- /dev/null +++ b/src/data/roadmaps/ai-engineer/content/sampling-parameters@LbB2PeytxRSuU07Bk0KlJ.md @@ -0,0 +1,3 @@ +# Sampling Parameters + +Sampling parameters are settings that control the randomness and creativity of the text generated by the LLM. They influence how the model chooses the next word in a sequence, impacting the overall coherence, diversity, and relevance of the output. Adjusting these parameters allows users to fine-tune the LLM's responses for specific applications, balancing between predictable, safe outputs and more imaginative, exploratory ones. \ No newline at end of file diff --git a/src/data/roadmaps/ai-engineer/content/self-hosted-models@_qqITQ8O0Q0RWUeZsUXnY.md b/src/data/roadmaps/ai-engineer/content/self-hosted-models@_qqITQ8O0Q0RWUeZsUXnY.md new file mode 100644 index 000000000..ca74676c1 --- /dev/null +++ b/src/data/roadmaps/ai-engineer/content/self-hosted-models@_qqITQ8O0Q0RWUeZsUXnY.md @@ -0,0 +1,8 @@ +# Self-Hosted AI Models + +Self-hosted AI models are machine learning models that are deployed and run on your own infrastructure, rather than relying on a third-party service or cloud provider. This means you have complete control over the hardware, software, and data used to run the model, allowing for greater customization, security, and potentially lower long-term costs, depending on your specific needs and scale. + +Visit the following resources to learn more: + +- [@article@Self-Hosted LLM: A Practical Guide for DevOps](https://www.plural.sh/blog/self-hosting-large-language-models/) +- [@article@Serverless vs. self-hosted LLM inference](https://bentoml.com/llm/llm-inference-basics/serverless-vs-self-hosted-llm-inference) \ No newline at end of file diff --git a/src/data/roadmaps/ai-engineer/content/streaming-responses@MUDBYjR7uCUZQ-kQxi2K_.md b/src/data/roadmaps/ai-engineer/content/streaming-responses@MUDBYjR7uCUZQ-kQxi2K_.md new file mode 100644 index 000000000..349b9b69a --- /dev/null +++ b/src/data/roadmaps/ai-engineer/content/streaming-responses@MUDBYjR7uCUZQ-kQxi2K_.md @@ -0,0 +1,9 @@ +# Streaming Responses + +Streamed responses is one of the techniques an AI agent can use to send its answer to the user. With a streamed response, the agent starts sending words as soon as it generates them. The user sees the text grow on the screen in real time. This feels fast and lets the user stop or change the request early. It is useful for long answers and chat-like apps. By contrast, an unstreamed response waits until the whole answer is ready, then sends it all at once. This makes the code on the client side simpler and is easier to cache or log, but the user must wait longer, especially for big outputs. + +Visit the following resources to learn more: + +- [@article@Streaming Responses in AI: How AI Outputs Are Generated in Real Time](https://dev.to/pranshu_kabra_fe98a73547a/streaming-responses-in-ai-how-ai-outputs-are-generated-in-real-time-18kb) +- [@article@Streaming vs Non-Streaming LLM Responses](https://medium.com/@vasanthancomrads/streaming-vs-non-streaming-llm-responses-db297ba5467e) +- [@article@AI for Web Devs: Faster Responses with HTTP Streaming](https://austingil.com/ai-for-web-devs-streaming/) \ No newline at end of file diff --git a/src/data/roadmaps/ai-engineer/content/structured-output@zqhmLzHsmDlrTFVHzhq6-.md b/src/data/roadmaps/ai-engineer/content/structured-output@zqhmLzHsmDlrTFVHzhq6-.md new file mode 100644 index 000000000..5e97db8d4 --- /dev/null +++ b/src/data/roadmaps/ai-engineer/content/structured-output@zqhmLzHsmDlrTFVHzhq6-.md @@ -0,0 +1,9 @@ +# Structured Output in System Prompting + +Structured output in system prompting refers to designing prompts that guide a Large Language Model (LLM) to generate responses in a predefined format, such as JSON, XML, or a specific text-based structure. This approach focuses on crafting system prompts to elicit predictable and parseable outputs, making it easier to integrate LLM responses into downstream applications and workflows. By providing clear instructions and examples of the desired output structure, we can reliably extract information and automate processes. + +Visit the following resources to learn more: + +- [@article@Structured Output](https://abdullin.com/structured-output/) +- [@article@Generating Structured Outputs from LLMs](https://towardsdatascience.com/generating-structured-outputs-from-llms/?utm_source=roadmap&utm_medium=Referral&utm_campaign=TDS+roadmap+integration) +- [@article@How do Structured Outputs Work?](https://docs.cohere.com/docs/structured-outputs) \ No newline at end of file diff --git a/src/data/roadmaps/ai-engineer/content/system-prompting@S46Vaq8hYq6Ee1Id_-fSQ.md b/src/data/roadmaps/ai-engineer/content/system-prompting@S46Vaq8hYq6Ee1Id_-fSQ.md new file mode 100644 index 000000000..fad275e58 --- /dev/null +++ b/src/data/roadmaps/ai-engineer/content/system-prompting@S46Vaq8hYq6Ee1Id_-fSQ.md @@ -0,0 +1,8 @@ +# System Prompting + +System prompting sets the overall context, purpose, and operational guidelines for LLMs. It defines the model's role, behavioral constraints, output format requirements, and safety guardrails. System prompts provide foundational parameters that influence all subsequent interactions, ensuring consistent, controlled, and structured AI responses throughout the session. + +Visit the following resources to learn more: + +- [@article@User prompts vs. system prompts: What’s the difference?](https://www.regie.ai/blog/user-prompts-vs-system-prompts) +- [@video@System Prompt Fundamentals](https://www.youtube.com/watch?v=RMR0Y8esSmE) \ No newline at end of file diff --git a/src/data/roadmaps/ai-engineer/content/temperature@_bPTciEA1GT1JwfXim19z.md b/src/data/roadmaps/ai-engineer/content/temperature@_bPTciEA1GT1JwfXim19z.md new file mode 100644 index 000000000..ef7db81b0 --- /dev/null +++ b/src/data/roadmaps/ai-engineer/content/temperature@_bPTciEA1GT1JwfXim19z.md @@ -0,0 +1,10 @@ +# Temperature + +Temperature is a parameter used in language models that controls the randomness of the generated text. A higher temperature value (e.g., 1.0) leads to more diverse and unpredictable outputs, as the model is more likely to sample less probable words. Conversely, a lower temperature value (e.g., 0.2) yields more deterministic, conservative outputs, favoring the most likely words according to the model's training data. Essentially, it influences the probability distribution from which the next word is selected. + +Visit the following resources to learn more: + +- [@article@What Temperature Means in Natural Language Processing and AI](https://thenewstack.io/what-temperature-means-in-natural-language-processing-and-ai/) +- [@article@LLM Temperature: How It Works and When You Should Use It](https://www.vellum.ai/llm-parameters/temperature) +- [@article@hat is LLM Temperature? - IBM](https://www.ibm.com/think/topics/llm-temperature) +- [@article@How Temperature Settings Transform Your AI Agent's Responses](https://docsbot.ai/article/how-temperature-settings-transform-your-ai-agents-responses) \ No newline at end of file diff --git a/src/data/roadmaps/ai-engineer/content/tokens@2WbVpRLqwi3Oeqk1JPui4.md b/src/data/roadmaps/ai-engineer/content/tokens@2WbVpRLqwi3Oeqk1JPui4.md new file mode 100644 index 000000000..d34c836af --- /dev/null +++ b/src/data/roadmaps/ai-engineer/content/tokens@2WbVpRLqwi3Oeqk1JPui4.md @@ -0,0 +1,9 @@ +# Tokens + +Tokens are the fundamental building blocks of large language models (LLMs). They are discrete units of text that the model processes and uses to understand and generate language. These units can be words, parts of words, or even individual characters, depending on the model's vocabulary. LLMs work by predicting the next token in a sequence, based on the preceding tokens and their learned patterns. + +Visit the following resources to learn more: + +- [@article@Explaining Tokens — the Language and Currency of AI](https://blogs.nvidia.com/blog/ai-tokens-explained/) +- [@article@Understanding Tokens and Parameters in Model Training: A Deep Dive](https://www.functionize.com/blog/understanding-tokens-and-parameters-in-model-training) +- [@video@Most devs don't understand how LLM tokens work](https://www.youtube.com/watch?v=nKSk_TiR8YA&t=33s) \ No newline at end of file diff --git a/src/data/roadmaps/ai-engineer/content/tools--function-calling@eOqCBgBTKM8CmY3nsWjre.md b/src/data/roadmaps/ai-engineer/content/tools--function-calling@eOqCBgBTKM8CmY3nsWjre.md new file mode 100644 index 000000000..c2e75f23e --- /dev/null +++ b/src/data/roadmaps/ai-engineer/content/tools--function-calling@eOqCBgBTKM8CmY3nsWjre.md @@ -0,0 +1,10 @@ +# Tools & Function Calling in AI Agents + +Tools and function calling equip AI agents with the ability to interact with the real world and access external information. Instead of just providing text-based responses, the agent can use predefined functions, tools, or APIs to perform specific actions like searching the web, sending emails, or controlling devices. This extends the agent's capabilities and allows it to solve more complex tasks that require actions beyond simple language generation. + +Visit the following resources to learn more: + +- [@article@A Comprehensive Guide to Function Calling in LLMs](https://thenewstack.io/a-comprehensive-guide-to-function-calling-in-llms/) +- [@article@What are Tools? - Hugging Face](https://huggingface.co/learn/agents-course/en/unit1/tools) +- [@article@Compare 50+ AI Agent Tools in 2026](https://aimultiple.com/ai-agent-tools) +- [@article@AI Agents Explained in Simple Terms for Beginners](https://www.geeky-gadgets.com/ai-agents-explained-for-beginners/) \ No newline at end of file diff --git a/src/data/roadmaps/ai-engineer/content/top-k@qzvp6YxWDiGakA2mtspfh.md b/src/data/roadmaps/ai-engineer/content/top-k@qzvp6YxWDiGakA2mtspfh.md new file mode 100644 index 000000000..eda4b9aa4 --- /dev/null +++ b/src/data/roadmaps/ai-engineer/content/top-k@qzvp6YxWDiGakA2mtspfh.md @@ -0,0 +1,8 @@ +# Top-K Sampling + +Top-K sampling is a method used by Large Language Models (LLMs) during text generation to select the next word. Instead of considering the entire vocabulary, it narrows down the choices to the K most probable words predicted by the model. Low values (1-10) produce conservative, factual outputs. Medium values (20-50) balance creativity and quality. High values (50+) enable diverse, creative outputs. Use low K for technical tasks, high K for creative writing. + +Visit the following resources to learn more: + +- [@article@Top-K Sampling: The Complete Token Selection Guide](https://www.dataannotation.tech/blog/top-k-sampling) +- [@video@What are the LLM’s Top-P + Top-K ?](https://www.youtube.com/watch?v=aDmp2Uim0zQ) \ No newline at end of file diff --git a/src/data/roadmaps/ai-engineer/content/top-p@FjV3oD7G2Ocq5HhUC17iH.md b/src/data/roadmaps/ai-engineer/content/top-p@FjV3oD7G2Ocq5HhUC17iH.md new file mode 100644 index 000000000..0a8afe8d6 --- /dev/null +++ b/src/data/roadmaps/ai-engineer/content/top-p@FjV3oD7G2Ocq5HhUC17iH.md @@ -0,0 +1,8 @@ +# Top-P Sampling + +Top-P sampling, also known as nucleus sampling, is a technique used in language models to generate text. Instead of considering all possible next words, it focuses on the smallest set of words whose cumulative probability exceeds a threshold 'P'. Unlike Top-K's fixed number, Top-P dynamically adjusts based on the probability distribution. Low values (0.1-0.5) produce focused outputs, medium (0.6-0.9) balance creativity and coherence, and high (0.9-0.99) enable creative diversity. + +Visit the following resources to learn more: + +- [@article@Top-P Sampling: What Is It and Why Does It Matter?](https://www.dataannotation.tech/blog/top-p-sampling) +- [@video@What are the LLM’s Top-P + Top-K ?](https://www.youtube.com/watch?v=aDmp2Uim0zQ) \ No newline at end of file diff --git a/src/data/roadmaps/ai-engineer/content/type-of-models@2X0NDcq2ojBJ0RxY_U6bl.md b/src/data/roadmaps/ai-engineer/content/type-of-models@2X0NDcq2ojBJ0RxY_U6bl.md new file mode 100644 index 000000000..e6f13fc4b --- /dev/null +++ b/src/data/roadmaps/ai-engineer/content/type-of-models@2X0NDcq2ojBJ0RxY_U6bl.md @@ -0,0 +1,3 @@ +# Types of AI Models + +AI models come in various forms. Open models provide transparent access to their architecture and training data, fostering collaboration and customization, while closed models keep these details proprietary. Pre-trained models are trained on massive datasets and can be fine-tuned for specific tasks, saving time and resources. Self-hosted models, on the other hand, offer greater control and privacy as they are deployed and managed on your own infrastructure. \ No newline at end of file diff --git a/src/data/roadmaps/ai-engineer/content/using-sdks-directly@WZVW8FQu6LyspSKm1C_sl.md b/src/data/roadmaps/ai-engineer/content/using-sdks-directly@WZVW8FQu6LyspSKm1C_sl.md index ebf2bf254..89d39dc7b 100644 --- a/src/data/roadmaps/ai-engineer/content/using-sdks-directly@WZVW8FQu6LyspSKm1C_sl.md +++ b/src/data/roadmaps/ai-engineer/content/using-sdks-directly@WZVW8FQu6LyspSKm1C_sl.md @@ -1,6 +1,6 @@ # Using SDKs Directly -While tools like Langchain and LlamaIndex make it easy to implement RAG, you don't have to necessarily learn and use them. If you know about the different steps of implementing RAG you can simply do it all yourself e.g. do the chunking using `@langchain/textsplitters` package, create embeddings using any LLM e.g. use OpenAI Embedding API through their SDK, save the embeddings to any vector database e.g. if you are using Supabase Vector DB, you can use their SDK and similarly you can use the relevant SDKs for the rest of the steps as well. +While tools like Langchain and LlamaIndex make it easy to implement RAG, you don't have to necessarily learn and use them. If you know about the different steps of implementing RAG, you can simply do it all yourself e.g., do the chunking using `@langchain/textsplitters` package, create embeddings using any LLM e.g., use OpenAI Embedding API through their SDK, save the embeddings to any vector database e.g. if you are using Supabase Vector DB, you can use their SDK, and similarly, you can use the relevant SDKs for the rest of the steps as well. Visit the following resources to learn more: diff --git a/src/data/roadmaps/ai-engineer/content/vector-dbs@dzPKW_fn82lY1OOVrggk3.md b/src/data/roadmaps/ai-engineer/content/vector-dbs@dzPKW_fn82lY1OOVrggk3.md new file mode 100644 index 000000000..938ed0d52 --- /dev/null +++ b/src/data/roadmaps/ai-engineer/content/vector-dbs@dzPKW_fn82lY1OOVrggk3.md @@ -0,0 +1,8 @@ +# Vector Databases + +Vector databases are specialized systems designed to store, index, and retrieve high-dimensional vectors, often used as embeddings that represent data like text, images, or audio. Unlike traditional databases that handle structured data, vector databases excel at managing unstructured data by enabling fast similarity searches, where vectors are compared to find those that are most similar to a query. This makes them essential for tasks like semantic search, recommendation systems, and content discovery, where understanding relationships between items is crucial. Vector databases use indexing techniques such as approximate nearest neighbor (ANN) search to efficiently handle large datasets, ensuring quick and accurate retrieval even at scale. + +Visit the following resources to learn more: + +- [@article@Vector Databases](https://developers.cloudflare.com/vectorize/reference/what-is-a-vector-database/) +- [@article@What are Vector Databases?](https://www.mongodb.com/resources/basics/databases/vector-databases) \ No newline at end of file diff --git a/src/data/roadmaps/ai-engineer/content/vertex-ai-agent-builder@AxzTGDCC2Ftp4G66U4Uqr.md b/src/data/roadmaps/ai-engineer/content/vertex-ai-agent-builder@AxzTGDCC2Ftp4G66U4Uqr.md new file mode 100644 index 000000000..31128e188 --- /dev/null +++ b/src/data/roadmaps/ai-engineer/content/vertex-ai-agent-builder@AxzTGDCC2Ftp4G66U4Uqr.md @@ -0,0 +1,9 @@ +# Vertex AI + +Vertex AI is Google Cloud's fully-managed, unified development platform for building, training, deploying, and managing machine learning (ML) models at scale. It provides tools for the entire ML lifecycle, from data preparation and custom training with AutoML to model monitoring and deployment. Vertex AI offers access to Google's foundation models, such as Gemini, along with custom training options and tools for building AI agents. It streamlines the ML workflow into a single interface, supporting both low-code and custom development on managed infrastructure + +Visit the following resources to learn more: + +- [@official@Vertex AI](https://cloud.google.com/generative-ai-studio?hl=en) +- [@article@Vertex AI Tutorial: A Comprehensive Guide For Beginners](https://www.datacamp.com/tutorial/vertex-ai-tutorial) +- [@video@Introduction to Vertex AI Studio](https://www.youtube.com/watch?v=KWarqNq195M) \ No newline at end of file diff --git a/src/data/roadmaps/ai-engineer/content/what-are-embeddings@--ig0Ume_BnXb9K2U7HJN.md b/src/data/roadmaps/ai-engineer/content/what-are-embeddings@--ig0Ume_BnXb9K2U7HJN.md index 1405d51c5..986cddb32 100644 --- a/src/data/roadmaps/ai-engineer/content/what-are-embeddings@--ig0Ume_BnXb9K2U7HJN.md +++ b/src/data/roadmaps/ai-engineer/content/what-are-embeddings@--ig0Ume_BnXb9K2U7HJN.md @@ -4,5 +4,6 @@ Embeddings are dense, numerical vector representations of data, such as words, s Visit the following resources to learn more: -- [@official@Introducing Text and Code Embeddings](https://openai.com/index/introducing-text-and-code-embeddings/) -- [@article@What are Embeddings](https://www.cloudflare.com/learning/ai/what-are-embeddings/) \ No newline at end of file +- [@article@What is Embedding? -IBM](https://www.ibm.com/topics/embedding) +- [@article@What are Embeddings in Machine Learning?](https://www.cloudflare.com/en-gb/learning/ai/what-are-embeddings/) +- [@video@What are Word Embeddings](https://www.youtube.com/watch?v=wgfSDrqYMJ4) \ No newline at end of file diff --git a/src/data/roadmaps/ai-engineer/content/windsurf@Xsl8mx6J182TxPPtNP471.md b/src/data/roadmaps/ai-engineer/content/windsurf@Xsl8mx6J182TxPPtNP471.md new file mode 100644 index 000000000..b62fd8e5d --- /dev/null +++ b/src/data/roadmaps/ai-engineer/content/windsurf@Xsl8mx6J182TxPPtNP471.md @@ -0,0 +1,3 @@ +# Windsurf + +Windsurf is a tool specifically designed to enhance code navigation and understanding within large codebases. It leverages AI to provide intelligent code search, relationship discovery between different code elements, and code completion suggestions that are contextually aware. It's intended to reduce the time developers spend exploring and understanding code, enabling them to write more efficient and accurate code. \ No newline at end of file diff --git a/src/data/roadmaps/ai-engineer/content/zero-shot@15XOFdVp0IC-kLYPXUJWh.md b/src/data/roadmaps/ai-engineer/content/zero-shot@15XOFdVp0IC-kLYPXUJWh.md new file mode 100644 index 000000000..a4db7f4a8 --- /dev/null +++ b/src/data/roadmaps/ai-engineer/content/zero-shot@15XOFdVp0IC-kLYPXUJWh.md @@ -0,0 +1,10 @@ +# Zero-Shot Prompting + +Zero-shot prompting is a prompt engineering method that relies on the pretraining of a large language model (LLM) to infer an appropriate response. In contrast to other prompt engineering methods, such as few-shot prompting, models aren’t provided with examples of output when prompting with the zero-shot technique.1 + +Visit the following resources to learn more: + +- [@article@What is zero-shot prompting?](https://www.ibm.com/think/topics/zero-shot-prompting) +- [@article@Zero-Shot Prompting](https://www.promptingguide.ai/techniques/zeroshot) +- [@article@Technique #3: Examples in Prompts: From Zero-Shot to Few-Shot](https://learnprompting.org/docs/basics/few_shot) +- [@video@Zero-shot, One-shot and Few-shot Prompting Explained | Prompt Engineering 101](https://www.youtube.com/watch?v=sW5xoicq5TY) \ No newline at end of file