diff --git a/.github/workflows/refresh-roadmap-content-json.yml b/.github/workflows/refresh-roadmap-content-json.yml deleted file mode 100644 index 8441eabda..000000000 --- a/.github/workflows/refresh-roadmap-content-json.yml +++ /dev/null @@ -1,52 +0,0 @@ -name: Refresh Roadmap Content JSON - -on: - workflow_dispatch: - schedule: - - cron: '0 0 * * *' - -jobs: - refresh-content: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - - - name: Setup pnpm@v9 - uses: pnpm/action-setup@v4 - with: - version: 9 - run_install: false - - - name: Setup Node.js Version 20 (LTS) - uses: actions/setup-node@v4 - with: - node-version: 20 - cache: 'pnpm' - - - name: Install Dependencies and Generate Content JSON - run: | - pnpm install - npm run generate:roadmap-content-json - - - name: Create PR - uses: peter-evans/create-pull-request@v7 - with: - delete-branch: false - branch: "chore/update-content-json" - base: "master" - labels: | - dependencies - automated pr - reviewers: kamranahmedse - commit-message: "chore: update roadmap content json" - title: "Updated Roadmap Content JSON - Automated" - body: | - ## Updated Roadmap Content JSON - - > [!IMPORTANT] - > This PR Updates the Roadmap Content JSON files stored in the `public` directory. - > - > Commit: ${{ github.sha }} - > Workflow Path: ${{ github.workflow_ref }} - - **Please Review the Changes and Merge the PR if everything is fine.** \ No newline at end of file diff --git a/public/roadmap-content/ai-agents.json b/public/roadmap-content/ai-agents.json deleted file mode 100644 index a95f01130..000000000 --- a/public/roadmap-content/ai-agents.json +++ /dev/null @@ -1,2064 +0,0 @@ -{ - "VPI89s-m885r2YrXjYxdd": { - "title": "Basic Backend Development", - "description": "Before you start learning how to build AI agents, we would recommend you to have a basic knowledge of Backend development. This includes, programming language knowledge, interacting with database and basics of APIs at minimum.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Introduction to the server-side", - "url": "https://developer.mozilla.org/en-US/docs/Learn/Server-side/First_steps/Introduction", - "type": "article" - }, - { - "title": "What is a REST API? - Red Hat", - "url": "https://www.redhat.com/en/topics/api/what-is-a-rest-api", - "type": "article" - }, - { - "title": "What is a Database? - Oracle", - "url": "https://www.oracle.com/database/what-is-database/", - "type": "article" - } - ] - }, - "McREk2zHOlIrqbGSKbX-J": { - "title": "Git and Terminal Usage", - "description": "Git and the terminal are key tools for AI agents and developers. Git lets you track changes in code, work with branches, and collaborate safely with others. It stores snapshots of your work so you can undo mistakes or merge ideas. The terminal (command line) lets you move around files, run programs, set up servers, and control tools like Git quickly without a GUI.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Git Basics", - "url": "https://git-scm.com/doc", - "type": "article" - }, - { - "title": "Introduction to the Terminal", - "url": "https://ubuntu.com/tutorials/command-line-for-beginners#1-overview", - "type": "article" - }, - { - "title": "Git and Terminal Basics Crash Course (YouTube)", - "url": "https://www.youtube.com/watch?v=HVsySz-h9r4", - "type": "video" - } - ] - }, - "QtTwecLdvQa8pgELJ6i80": { - "title": "REST API Knowledge", - "description": "A **REST API** (Representational State Transfer) is an architectural style for designing networked applications. In AI agents, REST APIs enable communication between the agent and external systems, allowing for data exchange and integration. The agent can use REST APIs to retrieve data from external sources, send data to external systems, and interact with other AI agents or services. This provides a flexible and scalable way to integrate with various systems, enabling the agent to access a wide range of data and services. REST APIs in AI agents support a variety of functions, including data retrieval, data sending, and system interaction. They play a crucial role in facilitating communication between AI agents and external systems, making them a fundamental component of AI agent architecture.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "What is RESTful API? - RESTful API Explained - AWS", - "url": "https://aws.amazon.com/what-is/restful-api/", - "type": "article" - }, - { - "title": "What Is a REST API? Examples, Uses & Challenges ", - "url": "https://blog.postman.com/rest-api-examples/", - "type": "article" - } - ] - }, - "ZF5_5Y5zqa75Ov22JACX6": { - "title": "Transformer Models and LLMs", - "description": "Transformer models are a type of neural network that read input data—like words in a sentence—all at once instead of one piece at a time. They use “attention” to find which parts of the input matter most for each other part. This lets them learn patterns in language very well. When a transformer has been trained on a very large set of text, we call it a Large Language Model (LLM). An LLM can answer questions, write text, translate languages, and code because it has seen many examples during training. AI agents use these models as their “brains.” They feed tasks or prompts to the LLM, get back text or plans, and then act on those results. This structure helps agents understand goals, break them into steps, and adjust based on feedback, making them useful for chatbots, research helpers, and automation tools.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Exploring Open Source AI Models: LLMs and Transformer Architectures", - "url": "https://llmmodels.org/blog/exploring-open-source-ai-models-llms-and-transformer-architectures/", - "type": "article" - }, - { - "title": "How Transformer LLMs Work", - "url": "https://www.deeplearning.ai/short-courses/how-transformer-llms-work/", - "type": "article" - } - ] - }, - "GAjuWyJl9CI1nqXBp6XCf": { - "title": "Tokenization", - "description": "Tokenization is the step where raw text is broken into small pieces called tokens, and each token is given a unique number. A token can be a whole word, part of a word, a punctuation mark, or even a space. The list of all possible tokens is the model’s vocabulary. Once text is turned into these numbered tokens, the model can look up an embedding for each number and start its math. By working with tokens instead of full sentences, the model keeps the input size steady and can handle new or rare words by slicing them into familiar sub-pieces. After the model finishes its work, the numbered tokens are turned back into text through the same vocabulary map, letting the user read the result.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Explaining Tokens — the Language and Currency of AI", - "url": "https://blogs.nvidia.com/blog/ai-tokens-explained/", - "type": "article" - }, - { - "title": "What is Tokenization? Types, Use Cases, Implementation", - "url": "https://www.datacamp.com/blog/what-is-tokenization", - "type": "article" - } - ] - }, - "dyn1LSioema-Bf9lLTgUZ": { - "title": "Context Windows", - "description": "A context window is the chunk of text a large language model can read at one time. It is measured in tokens, which are pieces of words. If a model has a 4,000-token window, it can only “look at” up to about 3,000 words before it must forget or shorten earlier parts. New tokens push old ones out, like a sliding window moving over text. The window size sets hard limits on how long a prompt, chat history, or document can be. A small window forces you to keep inputs short or split them, while a large window lets the model follow longer stories and hold more facts. Choosing the right window size balances cost, speed, and how much detail the model can keep in mind at once.\n\nNew techniques, like retrieval-augmented generation (RAG) and long-context transformers (e.g., Claude 3, Gemini 1.5), aim to extend usable context without hitting model limits directly.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "What is a Context Window in AI?", - "url": "https://www.ibm.com/think/topics/context-window", - "type": "article" - }, - { - "title": "Scaling Language Models with Retrieval-Augmented Generation (RAG)", - "url": "https://arxiv.org/abs/2005.11401", - "type": "article" - }, - { - "title": "Long Context in Language Models - Anthropic's Claude 3", - "url": "https://www.anthropic.com/news/claude-3-family", - "type": "article" - } - ] - }, - "1fiWPBV99E2YncqdCgUw2": { - "title": "Token Based Pricing", - "description": "Token-based pricing is how many language-model services charge for use. A token is a small chunk of text, roughly four characters or part of a word. The service counts every token that goes into the model (your prompt) and every token that comes out (the reply). It then multiplies this total by a listed price per thousand tokens. Some plans set one price for input tokens and a higher or lower price for output tokens. Because the bill grows with each token, users often shorten prompts, trim extra words, or cap response length to spend less.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Explaining Tokens — the Language and Currency of AI", - "url": "https://blogs.nvidia.com/blog/ai-tokens-explained/", - "type": "article" - }, - { - "title": "What Are AI Tokens?", - "url": "https://methodshop.com/what-are-ai-tokens/", - "type": "article" - }, - { - "title": "Pricing - OpenAI", - "url": "https://openai.com/api/pricing/", - "type": "article" - } - ] - }, - "L1zL1GzqjSAjF06pIIXhy": { - "title": "Temperature", - "description": "Temperature is a setting that changes how random or predictable an AI model’s text output is. The value usually goes from 0 to 1, sometimes higher. A low temperature, close to 0, makes the model pick the most likely next word almost every time, so the answer is steady and safe but can feel dull or repetitive. A high temperature, like 0.9 or 1.0, lets the model explore less-likely word choices, which can give fresh and creative replies, but it may also add mistakes or drift off topic. By adjusting temperature, you balance reliability and creativity to fit the goal of your task.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "What Temperature Means in Natural Language Processing and AI", - "url": "https://thenewstack.io/what-temperature-means-in-natural-language-processing-and-ai/", - "type": "article" - }, - { - "title": "LLM Temperature: How It Works and When You Should Use It", - "url": "https://www.vellum.ai/llm-parameters/temperature", - "type": "article" - }, - { - "title": "What is LLM Temperature? - IBM", - "url": "https://www.ibm.com/think/topics/llm-temperature", - "type": "article" - }, - { - "title": "How Temperature Settings Transform Your AI Agent's Responses", - "url": "https://docsbot.ai/article/how-temperature-settings-transform-your-ai-agents-responses", - "type": "article" - } - ] - }, - "z_N-Y0zGkv8_qHPuVtimL": { - "title": "Frequency Penalty", - "description": "Frequency penalty is a setting that tells a language model, “Stop repeating yourself.” As the model writes, it keeps track of how many times it has already used each word. A positive frequency-penalty value lowers the chance of picking a word again if it has been seen many times in the current reply. This helps cut down on loops like “very very very” or long blocks that echo the same phrase. A value of 0 turns the rule off, while higher numbers make the model avoid repeats more strongly. If the penalty is too high, the text may miss common words that are still needed, so you often start low (for example 0.2) and adjust. Frequency penalty works together with other controls such as temperature and top-p to shape output that is clear, varied, and not boring.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Frequency Penalty Explanation", - "url": "https://platform.openai.com/docs/advanced-usage/advanced-usage#frequency-and-presence-penalties", - "type": "article" - }, - { - "title": "Understanding Frequency Penalty and Presence Penalty", - "url": "https://medium.com/@the_tori_report/understanding-frequency-penalty-and-presence-penalty-how-to-fine-tune-ai-generated-text-e5e4f5e779cd", - "type": "article" - } - ] - }, - "Vd8ycw8pW-ZKvg5WYFtoh": { - "title": "Presence Penalty", - "description": "Presence penalty is a setting you can adjust when you ask a large language model to write. It pushes the model to choose words it has not used yet. Each time a word has already appeared, the model gets a small score cut for picking it again. A higher penalty gives bigger cuts, so the model looks for new words and fresh ideas. A lower penalty lets the model reuse words more often, which can help with repeats like rhymes or bullet lists. Tuning this control helps you steer the output toward either more variety or more consistency.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Understanding Presence Penalty and Frequency Penalty", - "url": "https://medium.com/@pushparajgenai2025/understanding-presence-penalty-and-frequency-penalty-in-openai-chat-completion-api-calls-2e3a22547b48", - "type": "article" - }, - { - "title": "Difference between Frequency and Presence Penalties?", - "url": "https://community.openai.com/t/difference-between-frequency-and-presence-penalties/2777", - "type": "article" - }, - { - "title": "LLM Parameters Explained: A Practical Guide with Examples", - "url": "https://learnprompting.org/blog/llm-parameters", - "type": "article" - } - ] - }, - "icbp1NjurQfdM0dHnz6v2": { - "title": "Top-p", - "description": "Top-p, also called nucleus sampling, is a setting that guides how an LLM picks its next word. The model lists many possible words and sorts them by probability. It then finds the smallest group of top words whose combined chance adds up to the chosen p value, such as 0.9. Only words inside this group stay in the running; the rest are dropped. The model picks one word from the kept group at random, weighted by their original chances. A lower p keeps only the very likely words, so output is safer and more focused. A higher p lets in less likely words, adding surprise and creativity but also more risk of error.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Nucleus Sampling", - "url": "https://nn.labml.ai/sampling/nucleus.html", - "type": "article" - }, - { - "title": "Sampling Techniques in Large Language Models (LLMs)", - "url": "https://medium.com/@shashankag14/understanding-sampling-techniques-in-large-language-models-llms-dfc28b93f518", - "type": "article" - }, - { - "title": "Temperature, top_p and top_k for chatbot responses", - "url": "https://community.openai.com/t/temperature-top-p-and-top-k-for-chatbot-responses/295542", - "type": "article" - } - ] - }, - "K0G-Lw069jXUJwZqHtybd": { - "title": "Stopping Criteria", - "description": "Stopping criteria tell the language model when to stop writing more text. Without them, the model could keep adding words forever, waste time, or spill past the point we care about. Common rules include a maximum number of tokens, a special end-of-sequence token, or a custom string such as `“\\n\\n”`. We can also stop when the answer starts to repeat or reaches a score that means it is off topic. Good stopping rules save cost, speed up replies, and avoid nonsense or unsafe content.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Defining Stopping Criteria in Large Language Models", - "url": "https://www.metriccoders.com/post/defining-stopping-criteria-in-large-language-models-a-practical-guide", - "type": "article" - }, - { - "title": "Stopping Criteria for Decision Tree Algorithm and Tree Plots", - "url": "https://aieagle.in/stopping-criteria-for-decision-tree-algorithm-and-tree-plots/", - "type": "article" - } - ] - }, - "DSJAhQhc1dQmBHQ8ZkTau": { - "title": "Open Weight Models", - "description": "Open-weight models are neural networks whose trained parameters, also called weights, are shared with everyone. Anyone can download the files, run the model, fine-tune it, or build tools on top of it. The licence that comes with the model spells out what you are allowed to do. Some licences are very permissive and even let you use the model for commercial work. Others allow only research or personal projects. Because the weights are public, the community can inspect how the model works, check for bias, and suggest fixes. Open weights also lower costs, since teams do not have to train a large model from scratch. Well-known examples include BLOOM, Falcon, and Llama 2.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "BLOOM BigScience", - "url": "https://bigscience.huggingface.co/", - "type": "article" - }, - { - "title": "Falcon LLM – Technology Innovation Institute (TII)", - "url": "https://falconllm.tii.ae/", - "type": "article" - }, - { - "title": "Llama 2 – Meta's Official Announcement", - "url": "https://ai.meta.com/llama/", - "type": "article" - }, - { - "title": "Hugging Face – Open LLM Leaderboard (Top Open Models)", - "url": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard", - "type": "article" - }, - { - "title": "EleutherAI – Open Research Collective (GPT-Neo, GPT-J, etc.)", - "url": "https://www.eleuther.ai/", - "type": "article" - } - ] - }, - "tJYmEDDwK0LtEux-kwp9B": { - "title": "Closed Weight Models", - "description": "Closed-weight models are AI systems whose trained parameters—the numbers that hold what the model has learned—are not shared with the public. You can send prompts to these models through an online service or a software kit, but you cannot download the weights, inspect them, or fine-tune them on your own computer. The company that owns the model keeps control and sets the rules for use, often through paid APIs or tight licences. This approach helps the owner protect trade secrets, reduce misuse, and keep a steady income stream. The downside is less freedom for users, higher costs over time, and limited ability to audit or adapt the model. Well-known examples include GPT-4, Claude, and Gemini.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Open-Source LLMs vs Closed LLMs", - "url": "https://hatchworks.com/blog/gen-ai/open-source-vs-closed-llms-guide/", - "type": "article" - }, - { - "title": "2024 Comparison of Open-Source Vs Closed-Source LLMs", - "url": "https://blog.spheron.network/choosing-the-right-llm-2024-comparison-of-open-source-vs-closed-source-llms", - "type": "article" - }, - { - "title": "Open AI's GPT-4", - "url": "https://openai.com/gpt-4", - "type": "article" - }, - { - "title": "Claude", - "url": "https://www.anthropic.com/claude", - "type": "article" - }, - { - "title": "Gemini", - "url": "https://deepmind.google/technologies/gemini/", - "type": "article" - } - ] - }, - "i2NE6haX9-7mdoV5LQ3Ah": { - "title": "Streamed vs Unstreamed Responses", - "description": "Streamed and unstreamed responses describe how an AI agent sends its answer to the user. With a streamed response, the agent starts sending words as soon as it generates them. The user sees the text grow on the screen in real time. This feels fast and lets the user stop or change the request early. It is useful for long answers and chat-like apps.\n\nAn unstreamed response waits until the whole answer is ready, then sends it all at once. This makes the code on the client side simpler and is easier to cache or log, but the user must wait longer, especially for big outputs. Choosing between the two depends on the need for speed, the length of the answer, and how complex you want the client and server to be.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Streaming Responses in AI: How AI Outputs Are Generated in Real Time", - "url": "https://dev.to/pranshu_kabra_fe98a73547a/streaming-responses-in-ai-how-ai-outputs-are-generated-in-real-time-18kb", - "type": "article" - }, - { - "title": "AI for Web Devs: Faster Responses with HTTP Streaming", - "url": "https://austingil.com/ai-for-web-devs-streaming/", - "type": "article" - }, - { - "title": "Master the OpenAI API: Stream Responses", - "url": "https://www.toolify.ai/gpts/master-the-openai-api-stream-responses-139447", - "type": "article" - } - ] - }, - "N3yZfUxphxjiupqGpyaS9": { - "title": "Reasoning vs Standard Models", - "description": "Reasoning models break a task into clear steps and follow a line of logic, while standard models give an answer in one quick move. A reasoning model might write down short notes, check each note, and then combine them to reach the final reply. This helps it solve math problems, plan actions, and spot errors that simple pattern matching would miss. A standard model depends on patterns it learned during training and often guesses the most likely next word. That works well for everyday chat, summaries, or common facts, but it can fail on tricky puzzles or tasks with many linked parts. Reasoning takes more time and computer power, yet it brings higher accuracy and makes the agent easier to debug because you can see its thought steps. Many new AI agents mix both styles: they use quick pattern recall for simple parts and switch to step-by-step reasoning when a goal needs deeper thought.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "ReAct: Synergizing Reasoning and Acting in Language Models", - "url": "https://react-lm.github.io/", - "type": "article" - }, - { - "title": "ReAct Systems: Enhancing LLMs with Reasoning and Action", - "url": "https://learnprompting.org/docs/agents/react", - "type": "article" - } - ] - }, - "5OW_6o286mj470ElFyJ_5": { - "title": "Fine-tuning vs Prompt Engineering", - "description": "Fine-tuning and prompt engineering are two ways to get better outputs from a language model. Fine-tuning means training an existing model further with your own examples so it adapts to specific tasks. It needs extra data, computing power, and time but creates deeply specialized models. Prompt engineering, in contrast, leaves the model unchanged and focuses on crafting better instructions or examples in the prompt itself. It is faster, cheaper, and safer when no custom data is available. Fine-tuning suits deep domain needs; prompt engineering fits quick control and prototyping.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "OpenAI Fine Tuning", - "url": "https://platform.openai.com/docs/guides/fine-tuning", - "type": "article" - }, - { - "title": "Prompt Engineering Guide", - "url": "https://www.promptingguide.ai/", - "type": "article" - }, - { - "title": "Prompt Engineering vs Prompt Tuning: A Detailed Explanation", - "url": "https://medium.com/@aabhi02/prompt-engineering-vs-prompt-tuning-a-detailed-explanation-19ea8ce62ac4", - "type": "article" - } - ] - }, - "UIm54UmICKgep6s8Itcyv": { - "title": "Embeddings and Vector Search", - "description": "Embeddings turn words, pictures, or other data into lists of numbers called vectors. Each vector keeps the meaning of the original item. Things with similar meaning get vectors that sit close together in this number space. Vector search scans a large set of vectors and finds the ones nearest to a query vector, even if the exact words differ. This lets AI agents match questions with answers, suggest related items, and link ideas quickly.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "OpenAI Embeddings API Documentation", - "url": "https://platform.openai.com/docs/guides/embeddings/what-are-embeddings", - "type": "article" - }, - { - "title": "Understanding Embeddings and Vector Search (Pinecone Blog)", - "url": "https://www.pinecone.io/learn/vector-embeddings/", - "type": "article" - } - ] - }, - "qwVQOwBTLA2yUgRISzC8k": { - "title": "Understand the Basics of RAG", - "description": "RAG, short for Retrieval-Augmented Generation, is a way to make language models give better answers by letting them look things up before they reply. First, the system turns the user’s question into a search query and scans a knowledge source, such as a set of documents or a database. It then pulls back the most relevant passages, called “retrievals.” Next, the language model reads those passages and uses them, plus its own trained knowledge, to write the final answer. This mix of search and generation helps the model stay up to date, reduce guesswork, and cite real facts. Because it adds outside information on demand, RAG often needs less fine-tuning and can handle topics the base model never saw during training.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "What Is RAG in AI and How to Use It?", - "url": "https://www.v7labs.com/blog/what-is-rag", - "type": "article" - }, - { - "title": "An Introduction to RAG and Simple & Complex RAG", - "url": "https://medium.com/enterprise-rag/an-introduction-to-rag-and-simple-complex-rag-9c3aa9bd017b", - "type": "article" - }, - { - "title": "Learn RAG From Scratch", - "url": "https://www.youtube.com/watch?v=sVcwVQRHIc8", - "type": "video" - } - ] - }, - "B8dzg61TGaknuruBgkEJd": { - "title": "Pricing of Common Models", - "description": "When you use a large language model, you usually pay by the amount of text it reads and writes, counted in “tokens.” A token is about four characters or three-quarters of a word. Providers list a price per 1,000 tokens. For example, GPT-3.5 Turbo may cost around $0.002 per 1,000 tokens, while GPT-4 is much higher, such as $0.03 to $0.06 for prompts and $0.06 to $0.12 for replies. Smaller open-source models like Llama-2 can be free to use if you run them on your own computer, but you still pay for the hardware or cloud time. Vision or audio models often have extra fees because they use more compute. When planning costs, estimate the tokens in each call, multiply by the price, and add any hosting or storage charges.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "OpenAI Pricing", - "url": "https://openai.com/api/pricing/", - "type": "article" - }, - { - "title": "Executive Guide To AI Agent Pricing", - "url": "https://www.forbes.com/councils/forbesbusinesscouncil/2025/01/28/executive-guide-to-ai-agent-pricing-winning-strategies-and-models-to-drive-growth/", - "type": "article" - }, - { - "title": "AI Pricing: How Much Does Artificial Intelligence Cost In 2025?", - "url": "https://www.internetsearchinc.com/ai-pricing-how-much-does-artificial-intelligence-cost/", - "type": "article" - } - ] - }, - "aFZAm44nP5NefX_9TpT0A": { - "title": "What are AI Agents?", - "description": "An AI agent is a computer program or robot that can sense its surroundings, think about what it senses, and then act to reach a goal. It gathers data through cameras, microphones, or software inputs, decides what the data means using rules or learned patterns, and picks the best action to move closer to its goal. After acting, it checks the results and learns from them, so it can do better next time. Chatbots, self-driving cars, and game characters are all examples.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "What are AI Agents? - Agents in Artificial Intelligence Explained", - "url": "https://aws.amazon.com/what-is/ai-agents/", - "type": "article" - }, - { - "title": "AI Agents Explained in Simple Terms for Beginners", - "url": "https://www.geeky-gadgets.com/ai-agents-explained-for-beginners/", - "type": "article" - }, - { - "title": "What are AI Agents?", - "url": "https://www.youtube.com/watch?v=F8NKVhkZZWI", - "type": "video" - } - ] - }, - "2zsOUWJQ8e7wnoHmq1icG": { - "title": "What are Tools?", - "description": "Tools are extra skills or resources that an AI agent can call on to finish a job. A tool can be anything from a web search API to a calculator, a database, or a language-translation engine. The agent sends a request to the tool, gets the result, and then uses that result to move forward. Tools let a small core model handle tasks that would be hard or slow on its own. They also help keep answers current, accurate, and grounded in real data. Choosing the right tool and knowing when to use it are key parts of building a smart agent.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Compare 50+ AI Agent Tools in 2025 - AIMultiple", - "url": "https://research.aimultiple.com/ai-agent-tools/", - "type": "article" - }, - { - "title": "AI Agents Explained in Simple Terms for Beginners", - "url": "https://www.geeky-gadgets.com/ai-agents-explained-for-beginners/", - "type": "article" - } - ] - }, - "Eih4eybuYB3C2So8K0AT3": { - "title": "Agent Loop", - "description": "An agent loop is the cycle that lets an AI agent keep working toward a goal. First, the agent gathers fresh data from its tools, sensors, or memory. Next, it updates its internal state and decides what to do, often by running a planning or reasoning step. Then it carries out the chosen action, such as calling an API, writing to a file, or sending a message. After acting, it checks the result and stores new information. The loop starts again with the latest data, so the agent can adjust to changes and improve over time. This fast repeat of observe–decide–act gives the agent its power.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "What is an Agent Loop?", - "url": "https://huggingface.co/learn/agents-course/en/unit1/agent-steps-and-structure", - "type": "article" - }, - { - "title": "Let's Build your Own Agentic Loop", - "url": "https://www.reddit.com/r/AI_Agents/comments/1js1xjz/lets_build_our_own_agentic_loop_running_in_our/", - "type": "article" - } - ] - }, - "LU76AhCYDjxdBhpMQ4eMU": { - "title": "Perception / User Input", - "description": "Perception, also called user input, is the first step in an agent loop. The agent listens and gathers data from the outside world. This data can be text typed by a user, spoken words, camera images, sensor readings, or web content pulled through an API. The goal is to turn raw signals into a clear, usable form. The agent may clean the text, translate speech to text, resize an image, or drop noise from sensor values. Good perception means the agent starts its loop with facts, not guesses. If the input is wrong or unclear, later steps will also fail. So careful handling of perception keeps the whole agent loop on track.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Perception in AI: Understanding Its Types and Importance", - "url": "https://marktalks.com/perception-in-ai-understanding-its-types-and-importance/", - "type": "article" - }, - { - "title": "What Is AI Agent Perception? - IBM", - "url": "https://www.ibm.com/think/topics/ai-agent-perception", - "type": "article" - } - ] - }, - "ycPRgRYR4lEBQr_xxHKnM": { - "title": "Reason and Plan", - "description": "Reason and Plan is the moment when an AI agent thinks before it acts. The agent starts with a goal and the facts it already knows. It looks at these facts and asks, “What do I need to do next to reach the goal?” It breaks the goal into smaller steps, checks if each step makes sense, and orders them in a clear path. The agent may also guess what could go wrong and prepare backup steps. Once the plan feels solid, the agent is ready to move on and take the first action.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "ReAct: Synergizing Reasoning and Acting in Language Models", - "url": "https://react-lm.github.io/", - "type": "article" - }, - { - "title": "ReAct Systems: Enhancing LLMs with Reasoning and Action", - "url": "https://learnprompting.org/docs/agents/react", - "type": "article" - } - ] - }, - "sHYd4KsKlmw5Im3nQ19W8": { - "title": "Acting / Tool Invocation", - "description": "Acting, also called tool invocation, is the step where the AI chooses a tool and runs it to get real-world data or to change something. The agent looks at its current goal and the plan it just made. It then picks the best tool, such as a web search, a database query, or a calculator. The agent fills in the needed inputs and sends the call. The external system does the heavy work and returns a result. Acting ends when the agent stores that result so it can think about the next move.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "What are Tools in AI Agents?", - "url": "https://huggingface.co/learn/agents-course/en/unit1/tools", - "type": "article" - }, - { - "title": "What is Tool Calling in Agents?", - "url": "https://www.useparagon.com/blog/ai-building-blocks-what-is-tool-calling-a-guide-for-pms", - "type": "article" - } - ] - }, - "ZJTrun3jK3zBGOTm1jdMI": { - "title": "Observation & Reflection", - "description": "Observation and reflection form the thinking pause in an AI agent’s loop. First, the agent looks at the world around it, gathers fresh data, and sees what has changed. It then pauses to ask, “What does this new information mean for my goal?” During this short check, the agent updates its memory, spots errors, and ranks what matters most. These steps guide wiser plans and actions in the next cycle. Without careful observation and reflection, the agent would rely on old or wrong facts and soon drift off course.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Best Practices for Prompting and Self-checking", - "url": "https://platform.openai.com/docs/guides/prompt-engineering", - "type": "article" - }, - { - "title": "Self-Reflective AI: Building Agents That Learn by Observing Themselves", - "url": "https://arxiv.org/abs/2302.14045", - "type": "article" - } - ] - }, - "PPdAutqJF5G60Eg9lYBND": { - "title": "Personal assistant", - "description": "A personal assistant AI agent is a smart program that helps one person manage daily tasks. It can check a calendar, set reminders, and send alerts so you never miss a meeting. It can read emails, highlight key points, and even draft quick replies. If you ask a question, it searches trusted sources and gives a short answer. It can order food, book rides, or shop online when you give simple voice or text commands. Because it learns your habits, it suggests the best time to work, rest, or travel. All these actions run in the background, saving you time and reducing stress.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "A Complete Guide on AI-powered Personal Assistants", - "url": "https://medium.com/@alexander_clifford/a-complete-guide-on-ai-powered-personal-assistants-with-examples-2f5cd894d566", - "type": "article" - }, - { - "title": "9 Best AI Personal Assistants for Work, Chat and Home", - "url": "https://saner.ai/best-ai-personal-assistants/", - "type": "article" - } - ] - }, - "PK8w31GlvtmAuU92sHaqr": { - "title": "Code generation", - "description": "Code-generation agents take a plain language request, understand the goal, and then write or edit source code to meet it. They can build small apps, add features, fix bugs, refactor old code, write tests, or translate code from one language to another. This saves time for developers, helps beginners learn, and reduces human error. Teams use these agents inside code editors, chat tools, and automated pipelines. By handling routine coding tasks, the agents free people to focus on design, logic, and user needs.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Multi-Agent-based Code Generation", - "url": "https://arxiv.org/abs/2312.13010", - "type": "article" - }, - { - "title": "From Prompt to Production: GitHub Blog", - "url": "https://github.blog/ai-and-ml/github-copilot/from-prompt-to-production-building-a-landing-page-with-copilot-agent-mode/", - "type": "article" - }, - { - "title": "GitHub Copilot", - "url": "https://github.com/features/copilot", - "type": "article" - } - ] - }, - "wKYEaPWNsR30TIpHaxSsq": { - "title": "Data analysis", - "description": "AI agents can automate data analysis by pulling information from files, databases, or live streams. They clean the data by spotting missing values, outliers, and making smart corrections. After cleaning, agents find patterns like sales spikes or sensor drops and can build charts or dashboards. Some run basic statistics, others apply machine learning to predict trends. Agents can also send alerts if numbers go beyond set limits, helping people stay informed without constant monitoring.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "How AI Will Transform Data Analysis in 2025", - "url": "https://www.devfi.com/ai-transform-data-analysis-2025/", - "type": "article" - }, - { - "title": "How AI Has Changed The World Of Analytics And Data Science", - "url": "https://www.forbes.com/councils/forbestechcouncil/2025/01/28/how-ai-has-changed-the-world-of-analytics-and-data-science/k", - "type": "article" - } - ] - }, - "5oLc-235bvKhApxzYFkEc": { - "title": "Web Scraping / Crawling", - "description": "Web scraping and crawling let an AI agent collect data from many web pages without human help. The agent sends a request to a page, reads the HTML, and pulls out parts you ask for, such as prices, news headlines, or product details. It can then follow links on the page to reach more pages and repeat the same steps. This loop builds a large, up-to-date dataset in minutes or hours instead of days. Companies use it to track market prices, researchers use it to gather facts or trends, and developers use it to feed fresh data into other AI models. Good scraping code also respects site rules like robots.txt and avoids hitting servers too fast, so it works smoothly and fairly.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Crawl AI - Build Your AI With One Prompt", - "url": "https://www.crawlai.org/", - "type": "article" - }, - { - "title": "AI-Powered Web Scraper with Crawl4AI and DeepSeek", - "url": "https://brightdata.com/blog/web-data/crawl4ai-and-deepseek-web-scraping", - "type": "article" - }, - { - "title": "Best Web Scraping Tools for AI Applications", - "url": "https://www.thetoolnerd.com/p/best-web-scraping-tools-for-ai-applications", - "type": "article" - }, - { - "title": "8 Best AI Web Scraping Tools I Tried - HubSpot Blog", - "url": "https://blog.hubspot.com/website/ai-web-scraping", - "type": "article" - } - ] - }, - "ok8vN7VtCgyef5x6aoQaL": { - "title": "NPC / Game AI", - "description": "Game studios use AI agents to control non-player characters (NPCs). The agent observes the game state and decides actions like moving, speaking, or fighting. It can shift tactics when the player changes strategy, keeping battles fresh instead of predictable. A quest giver might use an agent to offer hints that fit the player’s progress. In open-world games, agents guide crowds to move around obstacles, set new goals, and react to threats, making towns feel alive. Designers save time by writing broad rules and letting agents fill in details instead of hand-coding every scene. Smarter NPC behavior keeps players engaged and boosts replay value.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Unity – AI for NPCs", - "url": "https://dev.epicgames.com/documentation/en-us/unreal-engine/artificial-intelligence-in-unreal-engine?application_version=5.3", - "type": "article" - }, - { - "title": "AI-Driven NPCs: The Future of Gaming Explained", - "url": "https://www.capermint.com/blog/everything-you-need-to-know-about-non-player-character-npc/", - "type": "article" - } - ] - }, - "Bn_BkthrVX_vOuwQzvPZa": { - "title": "Max Length", - "description": "Max Length sets the maximum number of tokens a language model can generate in one reply. Tokens are pieces of text—roughly 100 tokens equals a short paragraph. A small limit saves time and cost but risks cutting answers short. A large limit allows full, detailed replies but needs more compute and can lose focus. Choose limits based on the task: short limits for tweets, longer ones for articles. Tuning Max Length carefully helps balance clarity, speed, and cost.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "OpenAI Token Usage", - "url": "https://platform.openai.com/docs/guides/gpt/managing-tokens", - "type": "article" - }, - { - "title": "Size and Max Token Limits", - "url": "https://docs.anthropic.com/claude/docs/size-and-token-limits", - "type": "article" - }, - { - "title": "Utilising Max Token Context Window of Anthropic Claude", - "url": "https://medium.com/@nampreetsingh/utilising-max-token-context-window-of-anthropic-claude-on-amazon-bedrock-7377d94b2dfa", - "type": "article" - }, - { - "title": "Controlling the Length of OpenAI Model Responses", - "url": "https://help.openai.com/en/articles/5072518-controlling-the-length-of-openai-model-responses", - "type": "article" - }, - { - "title": "Max Model Length in AI", - "url": "https://www.restack.io/p/ai-model-answer-max-model-length-cat-ai", - "type": "article" - }, - { - "title": "Understanding ChatGPT/OpenAI Tokens", - "url": "https://youtu.be/Mo3NV5n1yZk", - "type": "video" - } - ] - }, - "Y8EqzFx3qxtrSh7bWbbV8": { - "title": "What is Prompt Engineering", - "description": "Prompt engineering is the skill of writing clear questions or instructions so that an AI system gives the answer you want. It means choosing the right words, adding enough detail, and giving examples when needed. A good prompt tells the AI what role to play, what style to use, and what facts to include or avoid. By testing and refining the prompt, you can improve the quality, accuracy, and usefulness of the AI’s response. In short, prompt engineering is guiding the AI with well-designed text so it can help you better.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Visit Dedicated Prompt Engineering Roadmap", - "url": "https://roadmap.sh/prompt-engineering", - "type": "article" - }, - { - "title": "What is Prompt Engineering? - AI Prompt Engineering Explained - AWS", - "url": "https://aws.amazon.com/what-is/prompt-engineering/", - "type": "article" - }, - { - "title": "What is Prompt Engineering? A Detailed Guide For 2025", - "url": "https://www.datacamp.com/blog/what-is-prompt-engineering-the-future-of-ai-communication", - "type": "article" - } - ] - }, - "qFKFM2qNPEN7EoD0V-1SM": { - "title": "Be specific in what you want", - "description": "When you ask an AI to do something, clear and exact words help it give the answer you want. State the goal, the format, and any limits up front. Say who the answer is for, how long it should be, and what to leave out. If numbers, dates, or sources matter, name them. For example, rather than “Explain World War II,” try “List three key events of World War II with dates and one short fact for each.” Being this precise cuts down on guesswork, avoids unwanted extra detail, and saves time by reducing follow-up questions.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Prompt Engineering Guide", - "url": "https://www.promptingguide.ai/", - "type": "article" - }, - { - "title": "AI Prompting Examples, Templates, and Tips For Educators", - "url": "https://honorlock.com/blog/education-ai-prompt-writing/", - "type": "article" - }, - { - "title": "How to Ask AI for Anything: The Art of Prompting", - "url": "https://sixtyandme.com/using-ai-prompts/", - "type": "article" - } - ] - }, - "6I42CoeWX-kkFXTKAY7rw": { - "title": "Provide additional context", - "description": "Provide additional context means giving the AI enough background facts, constraints, and goals so it can reply in the way you need. Start by naming the topic and the purpose of the answer. Add who the answer is for, the tone you want, and any limits such as length, format, or style. List key facts, data, or examples that matter to the task. This extra detail stops the model from guessing and keeps replies on target. Think of it like guiding a new teammate: share the details they need, but keep them short and clear.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "What is Context in Prompt Engineering?", - "url": "https://www.godofprompt.ai/blog/what-is-context-in-prompt-engineering", - "type": "article" - }, - { - "title": "The Importance of Context for Reliable AI Systems", - "url": "https://medium.com/mathco-ai/the-importance-of-context-for-reliable-ai-systems-and-how-to-provide-context-009bd1ac7189/", - "type": "article" - }, - { - "title": "Context Engineering: Why Feeding AI the Right Context Matters", - "url": "https://inspirednonsense.com/context-engineering-why-feeding-ai-the-right-context-matters-353e8f87d6d3", - "type": "article" - } - ] - }, - "sUwdtOX550tSdceaeFPmF": { - "title": "Use relevant technical terms", - "description": "When a task involves a special field such as law, medicine, or computer science, include the correct domain words in your prompt so the AI knows exactly what you mean. Ask for “O(n log n) sorting algorithms” instead of just “fast sorts,” or “HTTP status code 404” instead of “page not found error.” The right term narrows the topic, removes guesswork, and points the model toward the knowledge base you need. It also keeps the answer at the right level, because the model sees you understand the field and will reply with matching depth. Check spelling and letter case; “SQL” and “sql” are seen the same, but “Sequel” is not. Do not overload the prompt with buzzwords—add only the words that truly matter. The goal is clear language plus the exact technical labels the subject uses.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "AI Terms Glossary: AI Terms To Know In 2024", - "url": "https://www.moveworks.com/us/en/resources/ai-terms-glossary", - "type": "article" - }, - { - "title": "15 Essential AI Agent Terms You Must Know", - "url": "https://shivammore.medium.com/15-essential-ai-agent-terms-you-must-know-6bfc2f332f6d", - "type": "article" - }, - { - "title": "AI Agent Examples & Use Cases: Real Applications in 2025", - "url": "https://eastgate-software.com/ai-agent-examples-use-cases-real-applications-in-2025/", - "type": "article" - } - ] - }, - "yulzE4ZNLhXOgHhG7BtZQ": { - "title": "Use Examples in your Prompt", - "description": "A clear way to guide an AI is to place one or two short samples inside your prompt. Show a small input and the exact output you expect. The AI studies these pairs and copies their pattern. Use plain words in the sample, keep the format steady, and label each part so the model knows which is which. If you need a list, show a list; if you need a table, include a small table. Good examples cut guesswork, reduce errors, and save you from writing long rules.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "10 Real-World AI Agent Examples in 2025", - "url": "https://www.chatbase.co/blog/ai-agent-examples", - "type": "article" - }, - { - "title": "GPT-4.1 Prompting Guide", - "url": "https://cookbook.openai.com/examples/gpt4-1_prompting_guide", - "type": "article" - }, - { - "title": "AI Agent Examples & Use Cases: Real Applications in 2025", - "url": "https://eastgate-software.com/ai-agent-examples-use-cases-real-applications-in-2025/", - "type": "article" - } - ] - }, - "noTuUFnHSBzn7GKG9UZEi": { - "title": "Iterate and Test your Prompts", - "description": "After you write a first prompt, treat it as a draft, not the final version. Run it with the AI, check the output, and note what is missing, wrong, or confusing. Change one thing at a time, such as adding an example, a limit on length, or a tone request. Test again and see if the result gets closer to what you want. Keep a record of each change and its effect, so you can learn patterns that work. Stop when the output is clear, correct, and repeatable. This loop of try, observe, adjust, and retry turns a rough prompt into a strong one.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Prompt Engineering Best Practices", - "url": "https://www.deeplearning.ai/short-courses/chatgpt-prompt-engineering-for-developers/", - "type": "course" - }, - { - "title": "Master Iterative Prompting: A Guide", - "url": "https://blogs.vreamer.space/master-iterative-prompting-a-guide-to-more-effective-interactions-with-ai-50a736eaec38", - "type": "article" - }, - { - "title": "Prompt Engineering: The Iterative Process", - "url": "https://www.youtube.com/watch?v=dOxUroR57xs", - "type": "video" - } - ] - }, - "wwHHlEoPAx0TLxbtY6nMA": { - "title": "Specify Length, format etc", - "description": "When you give a task to an AI, make clear how long the answer should be and what shape it must take. Say “Write 120 words” or “Give the steps as a numbered list.” If you need a table, state the column names and order. If you want bullet points, mention that. Telling the AI to use plain text, JSON, or markdown stops guesswork and saves time. Clear limits on length keep the reply focused. A fixed format makes it easier for people or other software to read and use the result. Always put these rules near the start of your prompt so the AI sees them as important.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Mastering Prompt Engineering: Format, Length, and Audience", - "url": "https://techlasi.com/savvy/mastering-prompt-engineering-format-length-and-audience-examples-for-2024/", - "type": "article" - }, - { - "title": "Ultimate Guide to Prompt Engineering", - "url": "https://promptdrive.ai/prompt-engineering/", - "type": "article" - } - ] - }, - "qakbxB8xe7Y8gejC5cZnK": { - "title": "Tool Definition", - "description": "A tool is any skill or function that an AI agent can call to get a job done. It can be as simple as a calculator for math or as complex as an API that fetches live weather data. Each tool has a name, a short description of what it does, and a clear list of the inputs it needs and the outputs it returns. The agent’s planner reads this definition to decide when to use the tool. Good tool definitions are precise and leave no room for doubt, so the agent will not guess or misuse them. They also set limits, like how many times a tool can be called or how much data can be pulled, which helps control cost and errors. Think of a tool definition as a recipe card the agent follows every time it needs that skill.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Understanding the Agent Function in AI: Key Roles and Responsibilities", - "url": "https://pingax.com/ai/agent/function/understanding-the-agent-function-in-ai-key-roles-and-responsibilities/", - "type": "article" - }, - { - "title": "What is an AI Tool?", - "url": "https://www.synthesia.io/glossary/ai-tool", - "type": "article" - } - ] - }, - "kBtqT8AduLoYDWopj-V9_": { - "title": "Web Search", - "description": "Web search lets an AI agent pull fresh facts, news, and examples from the internet while it is working. The agent turns a user request into search words, sends them to a search engine, and reads the list of results. It then follows the most promising links, grabs the page text, and picks out the parts that answer the task. This helps the agent handle topics that were not in its training data, update old knowledge, or double-check details. Web search covers almost any subject and is much faster than manual research, but the agent must watch for ads, bias, or wrong pages and cross-check sources to stay accurate.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "8 Best AI Search Engines for 2025", - "url": "https://usefulai.com/tools/ai-search-engines", - "type": "article" - }, - { - "title": "Web Search Agent - PraisonAI Documentation", - "url": "https://docs.praison.ai/agents/websearch", - "type": "article" - } - ] - }, - "mS0EVCkWuPN_GkVPng4A2": { - "title": "Code Execution / REPL", - "description": "Code Execution or REPL (Read-Eval-Print Loop) lets an AI agent run small pieces of code on demand, see the result right away, and use that result to decide what to do next. The agent “reads” the code, “evaluates” it in a safe sandbox, “prints” the output, and then loops back for more input. With this tool the agent can test ideas, perform math, transform text, call APIs, or inspect data without waiting for a full build or deployment. Python, JavaScript, or even shell commands are common choices because they start fast and have many libraries. Quick feedback helps the agent catch errors early and refine its plan step by step. Sandboxing keeps the host system safe by blocking dangerous actions such as deleting files or making forbidden network calls. Overall, a Code Execution / REPL tool gives the agent a fast, flexible workbench for problem-solving.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "What is a REPL?", - "url": "https://docs.replit.com/getting-started/intro-replit", - "type": "article" - }, - { - "title": "Code Execution AI Agent", - "url": "https://docs.praison.ai/features/codeagent", - "type": "article" - }, - { - "title": "Building an AI Agent's Code Execution Environment", - "url": "https://murraycole.com/posts/ai-code-execution-environment", - "type": "article" - }, - { - "title": "Python Code Tool", - "url": "https://python.langchain.com/docs/integrations/tools/python/", - "type": "article" - } - ] - }, - "sV1BnA2-qBnXoKpUn-8Ub": { - "title": "Database Queries", - "description": "Database queries let an AI agent fetch, add, change, or remove data stored in a database. The agent sends a request written in a query language, most often SQL. The database engine then looks through its tables and returns only the rows and columns that match the rules in the request. With this tool, the agent can answer questions that need up-to-date numbers, user records, or other stored facts. It can also write new entries or adjust old ones to keep the data current. Because queries work in real time and follow clear rules, they give the agent a reliable way to handle large sets of structured information.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Building Your Own Database Agent", - "url": "https://www.deeplearning.ai/short-courses/building-your-own-database-agent/", - "type": "article" - } - ] - }, - "52qxjZILV-X1isup6dazC": { - "title": "API Requests", - "description": "API requests let an AI agent ask another service for data or for an action. The agent builds a short message that follows the service’s rules, sends it over the internet, and waits for a reply. For example, it can call a weather API to get today’s forecast or a payment API to charge a customer. Each request has a method like GET or POST, a URL, and often a small block of JSON with needed details. The service answers with another JSON block that the agent reads and uses. Because API requests are fast and clear, they are a common tool for connecting the agent to many other systems without extra work.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Introduction to APIs - MDN Web Docs", - "url": "https://developer.mozilla.org/en-US/docs/Learn_web_development/Extensions/Client-side_APIs/Introduction", - "type": "article" - }, - { - "title": "How APIs Power AI Agents: A Comprehensive Guide", - "url": "https://blog.treblle.com/api-guide-for-ai-agents/", - "type": "article" - } - ] - }, - "qaNr5I-NQPnfrRH7ynGTl": { - "title": "Email / Slack / SMS", - "description": "Email, Slack, and SMS are message channels an AI agent can use to act on tasks and share updates. The agent writes and sends emails to give detailed reports or collect files. It posts to Slack to chat with a team, answer questions, or trigger alerts inside a workspace. It sends SMS texts for quick notices such as reminders, confirmations, or warnings when a fast response is needed. By picking the right channel, the agent reaches users where they already communicate, makes sure important information arrives on time, and can even gather replies to keep a task moving forward.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Twilio Messaging API", - "url": "https://www.twilio.com/docs/usage/api", - "type": "article" - }, - { - "title": "Slack AI Agents", - "url": "https://slack.com/ai-agents", - "type": "article" - } - ] - }, - "BoJqZvdGam4cd6G6yK2IV": { - "title": "File System Access", - "description": "File system access lets an AI agent read, create, change, or delete files and folders on a computer or server. With this power, the agent can open a text file to pull data, write a new report, save logs, or tidy up old files without human help. It can also move files between folders to keep things organized. This tool is useful for tasks such as data processing, report generation, and backup jobs. Strong safety checks are needed so the agent touches only the right files, avoids private data, and cannot harm the system by mistake.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Filesystem MCP server for AI Agents", - "url": "https://playbooks.com/mcp/mateicanavra-filesystem", - "type": "article" - }, - { - "title": "File System Access API", - "url": "https://developer.mozilla.org/en-US/docs/Web/API/File_System_Access_API", - "type": "article" - }, - { - "title": "Understanding File Permissions and Security", - "url": "https://linuxize.com/post/understanding-linux-file-permissions/", - "type": "article" - }, - { - "title": "How File Systems Work?", - "url": "https://www.youtube.com/watch?v=KN8YgJnShPM", - "type": "video" - } - ] - }, - "TBH_DZTAfR8Daoh-njNFC": { - "title": "What is Agent Memory?", - "description": "Agent memory is the part of an AI agent that keeps track of what has already happened. It stores past user messages, facts the agent has learned, and its own previous steps. This helps the agent remember goals, user likes and dislikes, and important details across turns or sessions. Memory can be short-term, lasting only for one conversation, or long-term, lasting across many. With a good memory the agent avoids repeating questions, stays consistent, and plans better actions. Without it, the agent would forget everything each time and feel unfocused.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Agentic Memory for LLM Agents", - "url": "https://arxiv.org/abs/2502.12110", - "type": "article" - }, - { - "title": "Memory Management in AI Agents", - "url": "https://python.langchain.com/docs/how_to/chatbots_memory/", - "type": "article" - }, - { - "title": "Storing and Retrieving Knowledge for Agents", - "url": "https://www.pinecone.io/learn/langchain-retrieval-augmentation/", - "type": "article" - }, - { - "title": "Short-Term vs Long-Term Memory in AI Agents", - "url": "https://adasci.org/short-term-vs-long-term-memory-in-ai-agents/", - "type": "article" - }, - { - "title": "Building Brain-Like Memory for AI Agents", - "url": "https://www.youtube.com/watch?v=VKPngyO0iKg", - "type": "video" - } - ] - }, - "M3U6RfIqaiut2nuOibY8W": { - "title": "Short Term Memory", - "description": "Short term memory are the facts which are passed as a part of the prompt to the LLM e.g. there might be a prompt like below:\n\n Users Profile:\n - name: {name}\n - age: {age}\n - expertise: {expertise}\n \n User is currently learning about {current_topic}. User has some goals in mind which are:\n - {goal_1}\n - {goal_2}\n - {goal_3}\n \n Help the user achieve the goals.\n \n\nNotice how we injected the user's profile, current topic and goals in the prompt. These are all short term memories.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Memory Management in AI Agents", - "url": "https://python.langchain.com/docs/how_to/chatbots_memory/", - "type": "article" - }, - { - "title": "Build Smarter AI Agents: Manage Short-term and Long-term Memory", - "url": "https://redis.io/blog/build-smarter-ai-agents-manage-short-term-and-long-term-memory-with-redis/", - "type": "article" - }, - { - "title": "Storing and Retrieving Knowledge for Agents", - "url": "https://www.pinecone.io/learn/langchain-retrieval-augmentation/", - "type": "article" - }, - { - "title": "Short-Term vs Long-Term Memory in AI Agents", - "url": "https://adasci.org/short-term-vs-long-term-memory-in-ai-agents/", - "type": "article" - }, - { - "title": "Building Brain-Like Memory for AI Agents", - "url": "https://www.youtube.com/watch?v=VKPngyO0iKg", - "type": "video" - } - ] - }, - "Ue633fz6Xu2wa2-KOAtdP": { - "title": "Long Term Memory", - "description": "Long term memory in an AI agent stores important information for future use, like a digital notebook. It saves facts, past events, user preferences, and learned skills so the agent can make smarter and more consistent decisions over time. Unlike short-term memory, this data survives across sessions. When a similar situation comes up, the agent can look back and use what it already knows. Long term memory usually lives in a database, file system, or vector store and may hold text, numbers, embeddings, or past conversation states. Good management of long-term memory is key for building agents that feel personalized and get better with experience.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Long Term Memory in AI Agents", - "url": "https://medium.com/@alozie_igbokwe/ai-101-long-term-memory-in-ai-agents-35f87f2d0ce0", - "type": "article" - }, - { - "title": "Memory Management in AI Agents", - "url": "https://python.langchain.com/docs/how_to/chatbots_memory/", - "type": "article" - }, - { - "title": "Storing and Retrieving Knowledge for Agents", - "url": "https://www.pinecone.io/learn/langchain-retrieval-augmentation/", - "type": "article" - }, - { - "title": "Short-Term vs Long-Term Memory in AI Agents", - "url": "https://adasci.org/short-term-vs-long-term-memory-in-ai-agents/", - "type": "article" - }, - { - "title": "Building Brain-Like Memory for AI Agents", - "url": "https://www.youtube.com/watch?v=VKPngyO0iKg", - "type": "video" - } - ] - }, - "EfCCNqLMJpWKKtamUa5gK": { - "title": "Episodic vs Semantic Memory", - "description": "Agent memory often has two parts. Episodic memory is relevant to the context of the current conversation and may be lost after the conversation ends. Semantic memory is relevant to the broader knowledge of the agent and is persistent.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "What Is AI Agent Memory? - IBM", - "url": "https://www.ibm.com/think/topics/ai-agent-memory", - "type": "article" - }, - { - "title": "Episodic Memory vs. Semantic Memory: The Key Differences", - "url": "https://www.magneticmemorymethod.com/episodic-vs-semantic-memory/", - "type": "article" - }, - { - "title": "Memory Systems in LangChain", - "url": "https://python.langchain.com/docs/how_to/chatbots_memory/", - "type": "article" - } - ] - }, - "wkS4yOJ3JdZQE_yBID8K7": { - "title": "RAG and Vector Databases", - "description": "RAG, short for Retrieval-Augmented Generation, lets an AI agent pull facts from stored data each time it answers. The data sits in a vector database. In that database, every text chunk is turned into a number list called a vector. Similar ideas create vectors that lie close together, so the agent can find related chunks fast. When the user asks a question, the agent turns the question into its own vector, finds the nearest chunks, and reads them. It then writes a reply that mixes the new prompt with those chunks. Because the data store can hold a lot of past chats, documents, or notes, this process gives the agent a working memory without stuffing everything into the prompt. It lowers token cost, keeps answers on topic, and allows the memory to grow over time.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Understanding Retrieval-Augmented Generation (RAG) and Vector Databases", - "url": "https://pureai.com/Articles/2025/03/03/Understanding-RAG.aspx", - "type": "article" - }, - { - "title": "Build Advanced Retrieval-Augmented Generation Systems", - "url": "https://learn.microsoft.com/en-us/azure/developer/ai/advanced-retrieval-augmented-generation", - "type": "article" - }, - { - "title": "What Is Retrieval-Augmented Generation, aka RAG?", - "url": "https://blogs.nvidia.com/blog/what-is-retrieval-augmented-generation/", - "type": "article" - } - ] - }, - "QJqXHV8VHPTnfYfmKPzW7": { - "title": "User Profile Storage", - "description": "User profile storage is the part of an AI agent’s memory that holds stable facts about each user, such as name, age group, language, past choices, and long-term goals. The agent saves this data in a file or small database so it can load it each time the same user returns. By keeping the profile separate from short-term conversation logs, the agent can remember preferences without mixing them with temporary chat history. The profile is updated only when the user states a new lasting preference or when old information changes, which helps prevent drift or bloat.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Storage Technology Explained: AI and Data Storage", - "url": "https://www.computerweekly.com/feature/Storage-technology-explained-AI-and-the-data-storage-it-needs", - "type": "article" - }, - { - "title": "The Architect's Guide to Storage for AI - The New Stack", - "url": "https://thenewstack.io/the-architects-guide-to-storage-for-ai/", - "type": "article" - } - ] - }, - "jTDC19BTWCqxqMizrIJHr": { - "title": "Summarization / Compression", - "description": "Summarization or compression lets an AI agent keep the gist of past chats without saving every line. After a talk, the agent runs a small model or rule set that pulls out key facts, goals, and feelings and writes them in a short note. This note goes into long-term memory, while the full chat can be dropped or stored elsewhere. Because the note is short, the agent spends fewer tokens when it loads memory into the next prompt, so costs stay low and speed stays high. Good summaries leave out side jokes and filler but keep names, dates, open tasks, and user preferences. The agent can update the note after each session, overwriting old points that are no longer true. This process lets the agent remember what matters even after hundreds of turns.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Evaluating LLMs for Text Summarization", - "url": "https://insights.sei.cmu.edu/blog/evaluating-llms-for-text-summarization-introduction/", - "type": "article" - }, - { - "title": "The Ultimate Guide to AI Document Summarization", - "url": "https://www.documentllm.com/blog/ai-document-summarization-guide", - "type": "article" - } - ] - }, - "m-97m7SI0XpBnhEE8-_1S": { - "title": "Forgetting / Aging Strategies", - "description": "Forgetting or aging strategies help an AI agent keep only the useful parts of its memory and drop the rest over time. The agent may tag each memory with a time stamp and lower its importance as it gets older, or it may remove items that have not been used for a while, much like a “least-recently-used” list. Some systems give each memory a relevance score; when space runs low, they erase the lowest-scoring items first. Others keep a fixed-length sliding window of the most recent events or create short summaries and store those instead of raw details. These methods stop the memory store from growing without limits, cut storage costs, and let the agent focus on current goals. Choosing the right mix of aging rules is a trade-off: forget too fast and the agent loses context, forget too slow and it wastes resources or reacts to outdated facts.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Memory Management", - "url": "https://python.langchain.com/docs/how_to/chatbots_memory/", - "type": "article" - }, - { - "title": "Memory Management for AI Agents", - "url": "https://techcommunity.microsoft.com/blog/azure-ai-services-blog/memory-management-for-ai-agents/4406359", - "type": "article" - } - ] - }, - "53xDks6JQ33fHMa3XcuCd": { - "title": "ReAct (Reason + Act)", - "description": "ReAct is an agent pattern that makes a model alternate between two simple steps: Reason and Act. First, the agent writes a short thought that sums up what it knows and what it should try next. Then it performs an action such as calling an API, running code, or searching a document. The result of that action is fed back, giving the agent fresh facts to think about. This loop repeats until the task is done. By showing its thoughts in plain text, the agent can be inspected, debugged, and even corrected on the fly. The clear split between thinking and doing also cuts wasted moves and guides the model toward steady progress. ReAct works well with large language models because they can both generate the chain of thoughts and choose the next tool in the very same response.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "ReAct: Synergizing Reasoning and Acting in Language Models", - "url": "https://react-lm.github.io/", - "type": "article" - }, - { - "title": "ReAct Systems: Enhancing LLMs with Reasoning and Action", - "url": "https://learnprompting.org/docs/agents/react", - "type": "article" - } - ] - }, - "1B0IqRNYdtbHDi1jHSXuI": { - "title": "Model Context Protocol (MCP)", - "description": "Model Context Protocol (MCP) is a rulebook that tells an AI agent how to pack background information before it sends a prompt to a language model. It lists what pieces go into the prompt—things like the system role, the user’s request, past memory, tool calls, or code snippets—and fixes their order. Clear tags mark each piece, so both humans and machines can see where one part ends and the next begins. Keeping the format steady cuts confusion, lets different tools work together, and makes it easier to test or swap models later. When agents follow MCP, the model gets a clean, complete prompt and can give better answers.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "MCP: Build Rich-Context AI Apps with Anthropic", - "url": "https://www.deeplearning.ai/short-courses/mcp-build-rich-context-ai-apps-with-anthropic/", - "type": "course" - }, - { - "title": "Model Context Protocol", - "url": "https://github.com/modelcontextprotocol/modelcontextprotocol", - "type": "opensource" - }, - { - "title": "Model Context Protocol", - "url": "https://modelcontextprotocol.io/introduction", - "type": "article" - }, - { - "title": "Introducing the Azure MCP Server ", - "url": "https://devblogs.microsoft.com/azure-sdk/introducing-the-azure-mcp-server/", - "type": "article" - }, - { - "title": "The Ultimate Guide to MCP", - "url": "https://guangzhengli.com/blog/en/model-context-protocol", - "type": "article" - } - ] - }, - "9FryAIrWRHh8YlzKX3et5": { - "title": "MCP Hosts", - "description": "MCP Hosts are computers or services that run the Model Context Protocol. They handle incoming calls, load the MCP manifest, check requests, and pass data between users, tools, and language models. Hosts may cache recent messages, track token usage, and add safety or billing checks before sending prompts to the model. They expose an API endpoint so apps can connect easily. You can run a host on your laptop for testing or deploy it on cloud platforms for scale. The host acts as the trusted bridge where agents, tools, and data meet.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "punkeye/awesome-mcp-servers", - "url": "https://github.com/punkpeye/awesome-mcp-servers", - "type": "opensource" - }, - { - "title": "Vercel Serverless Hosting", - "url": "https://vercel.com/docs", - "type": "article" - }, - { - "title": "The Ultimate Guide to MCP", - "url": "https://guangzhengli.com/blog/en/model-context-protocol", - "type": "article" - }, - { - "title": "AWS MCP Servers for Code Assistants", - "url": "https://aws.amazon.com/blogs/machine-learning/introducing-aws-mcp-servers-for-code-assistants-part-1/", - "type": "article" - } - ] - }, - "CGVstUxVXLJcYZrwk3iNQ": { - "title": "MCP Client", - "description": "The MCP Client is the part of an AI agent that talks to the language model API. It collects messages, files, and tool signals, packs them using the Model Context Protocol, and sends them to the model. When a reply comes back, it unpacks it, checks the format, and passes the result to other modules. It also tracks token usage, filters private data, retries failed calls, and logs important events for debugging.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Model Context Protocol", - "url": "https://github.com/modelcontextprotocol/modelcontextprotocol", - "type": "opensource" - }, - { - "title": "Model Context Protocol", - "url": "https://modelcontextprotocol.io/introduction", - "type": "article" - }, - { - "title": "OpenAI API Reference", - "url": "https://platform.openai.com/docs/api-reference", - "type": "article" - }, - { - "title": "Anthropic API Documentation", - "url": "https://docs.anthropic.com/claude/reference", - "type": "article" - } - ] - }, - "yv_-87FVM7WKn5iv6LW9q": { - "title": "MCP Servers", - "description": "An MCP Server is the main machine or cloud service that runs the Model Context Protocol. It keeps the shared “memory” that different AI agents need so they stay on the same page. When an agent sends a request, the server checks who is asking, pulls the right context from its store, and sends it back fast. It also saves new facts and task results so the next agent can use them. An MCP Server must handle many users at once, protect private data with strict access rules, and log every change for easy roll-back. Good servers break work into small tasks, spread them across many computers, and add backups so they never lose data. In short, the MCP Server is the hub that makes sure all agents share fresh, safe, and correct context.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "punkeye/awesome-mcp-servers", - "url": "https://github.com/punkpeye/awesome-mcp-servers", - "type": "opensource" - }, - { - "title": "Introducing the Azure MCP Server ", - "url": "https://devblogs.microsoft.com/azure-sdk/introducing-the-azure-mcp-server/", - "type": "article" - }, - { - "title": "The Ultimate Guide to MCP", - "url": "https://guangzhengli.com/blog/en/model-context-protocol", - "type": "article" - }, - { - "title": "AWS MCP Servers for Code Assistants", - "url": "https://aws.amazon.com/blogs/machine-learning/introducing-aws-mcp-servers-for-code-assistants-part-1/", - "type": "article" - } - ] - }, - "1NXIN-Hbjl5rPy_mqxQYW": { - "title": "Creating MCP Servers", - "description": "An MCP server stores and shares conversation data for AI agents using the Model Context Protocol (MCP), a standard for agent memory management. Start by picking a language and web framework, then create REST endpoints like `/messages`, `/state`, and `/health`. Each endpoint exchanges JSON following the MCP schema. Store session logs with a session ID, role, and timestamp using a database or in-memory store. Add token-based authentication and filters so agents can fetch only what they need. Set limits on message size and request rates to avoid overload. Finally, write unit tests, add monitoring, and run load tests to ensure stability.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Model Context Protocol (MCP) Specification", - "url": "https://www.anthropic.com/news/model-context-protocol", - "type": "article" - }, - { - "title": "How to Build and Host Your Own MCP Servers in Easy Steps?", - "url": "https://collabnix.com/how-to-build-and-host-your-own-mcp-servers-in-easy-steps/", - "type": "article" - } - ] - }, - "iBtJp24F_kJE3YlBsW60s": { - "title": "Local Desktop", - "description": "A Local Desktop deployment means running the MCP server directly on your own computer instead of a remote cloud or server. You install the MCP software, needed runtimes, and model files onto your desktop or laptop. The server then listens on a local address like `127.0.0.1:8000`, accessible only from the same machine unless you open ports manually. This setup is great for fast tests, personal demos, or private experiments since you keep full control and avoid cloud costs. However, it's limited by your hardware's speed and memory, and others cannot access it without tunneling tools like ngrok or local port forwarding.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Build a Simple Local MCP Server", - "url": "https://blog.stackademic.com/build-simple-local-mcp-server-5434d19572a4", - "type": "article" - }, - { - "title": "How to Build and Host Your Own MCP Servers in Easy Steps", - "url": "https://collabnix.com/how-to-build-and-host-your-own-mcp-servers-in-easy-steps/", - "type": "article" - }, - { - "title": "Expose localhost to Internet", - "url": "https://ngrok.com/docs", - "type": "article" - }, - { - "title": "Run a Local Server on Your Machine", - "url": "https://www.youtube.com/watch?v=ldGl6L4Vktk", - "type": "video" - } - ] - }, - "dHNMX3_t1KSDdAWqgdJXv": { - "title": "Remote / Cloud", - "description": "Remote or cloud deployment places the MCP server on a cloud provider instead of a local machine. You package the server as a container or virtual machine, choose a service like AWS, Azure, or GCP, and give it compute, storage, and a public HTTPS address. A load balancer spreads traffic, while auto-scaling adds or removes copies of the server as demand changes. You secure the endpoint with TLS, API keys, and firewalls, and you send logs and metrics to the provider’s monitoring tools. This setup lets the server handle many users, updates are easier, and you avoid local hardware limits, though you must watch costs and protect sensitive data.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Edge AI vs. Cloud AI: Real-Time Intelligence Models", - "url": "https://medium.com/@hassaanidrees7/edge-ai-vs-cloud-ai-real-time-intelligence-vs-centralized-processing-df8c6e94fd11", - "type": "article" - }, - { - "title": "Cloud AI vs. On-premises AI", - "url": "https://www.pluralsight.com/resources/blog/ai-and-data/ai-on-premises-vs-in-cloud", - "type": "article" - }, - { - "title": "Cloud vs On-Premises AI Deployment", - "url": "https://toxigon.com/cloud-vs-on-premises-ai-deployment", - "type": "article" - } - ] - }, - "qwdh5pkBbrF8LKPxbZp4F": { - "title": "Chain of Thought (CoT)", - "description": "Chain of Thought (CoT) is a way for an AI agent to think out loud. Before giving its final answer, the agent writes short notes that show each step it takes. These notes can list facts, name sub-tasks, or do small bits of math. By seeing the steps, the agent stays organized and is less likely to make a mistake. People who read the answer can also check the logic and spot any weak points. The same written steps can be fed back into the agent so it can plan, reflect, or fix itself. Because it is easy to use and boosts trust, CoT is one of the most common designs for language-based agents today.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Chain-of-Thought Prompting Elicits Reasoning in Large Language Models", - "url": "https://arxiv.org/abs/2201.11903", - "type": "article" - }, - { - "title": "Evoking Chain of Thought Reasoning in LLMs - Prompting Guide", - "url": "https://www.promptingguide.ai/techniques/cot", - "type": "article" - } - ] - }, - "cW8O4vLLKEG-Q0dE8E5Zp": { - "title": "RAG Agent", - "description": "A RAG (Retrieval-Augmented Generation) agent mixes search with language generation so it can answer questions using fresh and reliable facts. When a user sends a query, the agent first turns that query into an embedding—basically a number list that captures its meaning. It then looks up similar embeddings in a vector database that holds passages from web pages, PDFs, or other text. The best-matching passages come back as context. The agent puts the original question and those passages into a large language model. The model writes the final reply, grounding every sentence in the retrieved text. This setup keeps the model smaller, reduces wrong guesses, and lets the system update its knowledge just by adding new documents to the database. Common tools for building a RAG agent include an embedding model, a vector store like FAISS or Pinecone, and an LLM connected through a framework such as LangChain or LlamaIndex.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "What is RAG? - Retrieval-Augmented Generation AI Explained", - "url": "https://aws.amazon.com/what-is/retrieval-augmented-generation/", - "type": "article" - }, - { - "title": "What Is Retrieval-Augmented Generation, aka RAG?", - "url": "https://blogs.nvidia.com/blog/what-is-retrieval-augmented-generation/", - "type": "article" - } - ] - }, - "6YLCMWzystao6byCYCTPO": { - "title": "Planner Executor", - "description": "A **planner-executor agent** is a type of AI agent that splits its work into two clear parts: planning and execution. The **planner** thinks ahead, taking a goal and breaking it down into a sequence of steps, ordering them in a logical and efficient manner. The **executor**, on the other hand, takes each planned step and carries it out, monitoring the results and reporting back to the planner. If something fails or the world changes, the planner may update the plan, and the executor follows the new steps. This modular approach allows the agent to handle complex tasks by dividing them into manageable parts, making it easier to debug, reuse plans, and maintain clear and consistent behavior.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Plan-and-Execute Agents", - "url": "https://blog.langchain.dev/planning-agents/", - "type": "article" - }, - { - "title": "Plan and Execute: AI Agents Architecture", - "url": "https://medium.com/@shubham.ksingh.cer14/plan-and-execute-ai-agents-architecture-f6c60b5b9598", - "type": "article" - } - ] - }, - "Ep8RoZSy_Iq_zWXlGQLZo": { - "title": "DAG Agents", - "description": "A DAG (Directed Acyclic Graph) agent is made of small parts called nodes that form a one-way graph with no loops. Each node does a task and passes its result to the next. Because there are no cycles, data always moves forward, making workflows easy to follow and debug. Independent nodes can run in parallel, speeding up tasks. If a node fails, you can trace and fix that part without touching the rest. DAG agents are ideal for jobs like data cleaning, multi-step reasoning, or workflows where backtracking isn’t needed.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Airflow: Directed Acyclic Graphs Documentation", - "url": "https://airflow.apache.org/docs/apache-airflow/stable/concepts/dags.html", - "type": "article" - }, - { - "title": "What are DAGs in AI Systems?", - "url": "https://www.restack.io/p/version-control-for-ai-answer-what-is-dag-in-ai-cat-ai", - "type": "article" - }, - { - "title": "DAGs Explained Simply", - "url": "https://www.youtube.com/watch?v=1Yh5S-S6wsI", - "type": "video" - } - ] - }, - "Nmy1PoB32DcWZnPM8l8jT": { - "title": "Tree-of-Thought", - "description": "Tree-of-Thought is a way to organize an AI agent’s reasoning as a branching tree. At the root, the agent states the main problem. Each branch is a small idea, step, or guess that could lead to a solution. The agent expands the most promising branches, checks if they make sense, and prunes paths that look wrong or unhelpful. This setup helps the agent explore many possible answers while staying focused on the best ones. Because the agent can compare different branches side by side, it is less likely to get stuck on a bad line of thought. The result is more reliable and creative problem solving.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Tree of Thoughts (ToT) | Prompt Engineering Guide", - "url": "https://www.promptingguide.ai/techniques/tot", - "type": "article" - }, - { - "title": "What is tree-of-thoughts? - IBM", - "url": "https://www.ibm.com/think/topics/tree-of-thoughts", - "type": "article" - }, - { - "title": "The Revolutionary Approach of Tree-of-Thought Prompting in AI", - "url": "https://medium.com/@WeavePlatform/the-revolutionary-approach-of-tree-of-thought-prompting-in-ai-eb7c0872247b", - "type": "article" - } - ] - }, - "US6T5dXM8IY9V2qZnTOFW": { - "title": "Manual (from scratch)", - "description": "Building an AI agent from scratch means writing every part of the system yourself, without ready-made libraries. You define how the agent senses inputs, stores memory, makes decisions, and learns over time. First, you pick a clear goal, like solving puzzles or chatting. Then you code the inputs (keyboard, mouse, text), decision logic (rules or neural networks), and memory (saving facts from past events). Testing is critical: you run the agent, watch its actions, debug, and improve. Though it takes longer, this approach gives deep understanding and full control over how the agent works and evolves.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "A Step-by-Step Guide to Building an AI Agent From Scratch", - "url": "https://www.neurond.com/blog/how-to-build-an-ai-agent", - "type": "article" - }, - { - "title": "How to Build AI Agents", - "url": "https://wotnot.io/blog/build-ai-agents", - "type": "article" - }, - { - "title": "Build Your Own AI Agent from Scratch in 30 Minutes", - "url": "https://medium.com/@gurpartap.sandhu3/build-you-own-ai-agent-from-scratch-in-30-mins-using-simple-python-1458f8099da0", - "type": "article" - }, - { - "title": "Building an AI Agent From Scratch", - "url": "https://www.youtube.com/watch?v=bTMPwUgLZf0", - "type": "video" - } - ] - }, - "aafZxtjxiwzJH1lwHBODi": { - "title": "LLM Native \"Function Calling\"", - "description": "LLM native “function calling” lets a large language model decide when to run a piece of code and which inputs to pass to it. You first tell the model what functions are available. For each one you give a short name, a short description, and a list of arguments with their types. During a chat, the model can answer in JSON that matches this schema instead of plain text. Your wrapper program reads the JSON, calls the real function, and then feeds the result back to the model so it can keep going. This loop helps an agent search the web, look up data, send an email, or do any other task you expose. Because the output is structured, you get fewer mistakes than when the model tries to write raw code or natural-language commands.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "A Comprehensive Guide to Function Calling in LLMs", - "url": "https://thenewstack.io/a-comprehensive-guide-to-function-calling-in-llms/", - "type": "article" - }, - { - "title": "Function Calling with LLMs | Prompt Engineering Guide", - "url": "https://www.promptingguide.ai/applications/function_calling", - "type": "article" - }, - { - "title": "Function Calling with Open-Source LLMs", - "url": "https://medium.com/@rushing_andrei/function-calling-with-open-source-llms-594aa5b3a304", - "type": "article" - } - ] - }, - "AQtxTTxmBpfl8BMgJbGzc": { - "title": "OpenAI Functions Calling", - "description": "OpenAI Function Calling lets you give a language model a list of tools and have it decide which one to use and with what data. You describe each tool with a short name, what it does, and the shape of its inputs in a small JSON-like schema. You then pass the user message and this tool list to the model. Instead of normal text, the model can reply with a JSON block that names the tool and fills in the needed arguments. Your program reads this block, runs the real function, and can send the result back for the next step. This pattern makes agent actions clear, easy to parse, and hard to abuse, because the model cannot run code on its own and all calls go through your checks. It also cuts down on prompt hacks and wrong formats, so agents work faster and more safely.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "OpenAI Documentation – Function Calling", - "url": "https://platform.openai.com/docs/guides/function-calling", - "type": "article" - }, - { - "title": "OpenAI Cookbook – Using Functions with GPT Models", - "url": "https://github.com/openai/openai-cookbook/blob/main/examples/How_to_call_functions_with_chat_models.ipynb", - "type": "article" - }, - { - "title": "@officialOpenAI Blog – Announcing Function Calling and Other Updates", - "url": "https://openai.com/blog/function-calling-and-other-api-updates", - "type": "article" - }, - { - "title": "@officialOpenAI API Reference – Functions Section", - "url": "https://platform.openai.com/docs/api-reference/chat/create#functions", - "type": "article" - }, - { - "title": "@officialOpenAI Community – Discussions and Examples on Function Calling", - "url": "https://community.openai.com/tag/function-calling", - "type": "article" - } - ] - }, - "_iIsBJTVS6OBf_dsdmbVO": { - "title": "Gemini Function Calling", - "description": "Gemini function calling lets you hook the Gemini language model to real code in a safe and simple way. You first list the functions you want it to use, each with a name, a short note about what it does, and a JSON schema for the needed arguments. When the user speaks, Gemini checks this list and, if a match makes sense, answers with a tiny JSON block that holds the chosen function name and the filled-in arguments. Your program then runs that function, sends the result back, and the chat moves on. Because the reply is strict JSON and not free text, you do not have to guess at what the model means, and you avoid many errors. This flow lets you build agents that pull data, call APIs, or carry out long action chains while keeping control of business logic on your side.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Function Calling with the Gemini API", - "url": "https://ai.google.dev/gemini-api/docs/function-calling", - "type": "article" - }, - { - "title": "Understanding Function Calling in Gemini", - "url": "https://medium.com/google-cloud/understanding-function-calling-in-gemini-3097937f1905", - "type": "article" - } - ] - }, - "37GBFVZ2J2d5r8bd1ViHq": { - "title": "OpenAI Assistant API", - "description": "The OpenAI Assistants API lets you add clear, task-specific actions to a chat with a large language model. You first describe each action you want the model to use, giving it a name, a short purpose, and a list of inputs in JSON form. During the chat, the model may decide that one of these actions will help. It then returns the name of the action and a JSON object with the input values it thinks are right. Your code receives this call, runs real work such as a database query or a web request, and sends the result back to the model. The model reads the result and continues the chat, now armed with fresh facts. This loop lets you keep control of what real work happens while still letting the model plan and talk in natural language.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "OpenAI Documentation – Assistants API Overview", - "url": "https://platform.openai.com/docs/assistants/overview", - "type": "article" - }, - { - "title": "OpenAI Blog – Introducing the Assistants API", - "url": "https://openai.com/blog/assistants-api", - "type": "article" - }, - { - "title": "OpenAI Cookbook – Assistants API Example", - "url": "https://github.com/openai/openai-cookbook/blob/main/examples/Assistants_API_overview_python.ipynb", - "type": "article" - }, - { - "title": "OpenAI API Reference – Assistants Endpoints", - "url": "https://platform.openai.com/docs/api-reference/assistants", - "type": "article" - } - ] - }, - "Ka6VpCEnqABvwiF9vba7t": { - "title": "Langchain", - "description": "LangChain is a Python and JavaScript library that helps you put large language models to work in real products. It gives ready-made parts for common agent tasks such as talking to many tools, keeping short-term memory, and calling an external API when the model needs fresh data. You combine these parts like Lego blocks: pick a model, add a prompt template, chain the steps, then wrap the chain in an “agent” that can choose what step to run next. Built-in connectors link to OpenAI, Hugging Face, vector stores, and SQL databases, so you can search documents or pull company data without writing a lot of glue code. This lets you move fast from idea to working bot, while still letting you swap out parts if your needs change.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "langchain-ai/langchain", - "url": "https://github.com/langchain-ai/langchain", - "type": "opensource" - }, - { - "title": "LangChain Documentation", - "url": "https://python.langchain.com/docs/introduction/", - "type": "article" - }, - { - "title": "Building Applications with LLMs using LangChain", - "url": "https://www.pinecone.io/learn/series/langchain/", - "type": "article" - }, - { - "title": "AI Agents with LangChain and LangGraph", - "url": "https://www.udacity.com/course/ai-agents-with-langchain-and-langgraph--cd13764", - "type": "article" - }, - { - "title": "LangChain Crash Course - Build LLM Apps Fast (YouTube)", - "url": "https://www.youtube.com/watch?v=nAmC7SoVLd8", - "type": "video" - } - ] - }, - "iEHF-Jm3ck-Iu85EbCoDi": { - "title": "LlamaIndex", - "description": "LlamaIndex is an open-source Python toolkit that helps you give a language model access to your own data. You load files such as PDFs, web pages, or database rows. The toolkit breaks the text into chunks, turns them into vectors, and stores them in a chosen vector store like FAISS or Pinecone. When a user asks a question, LlamaIndex finds the best chunks, adds them to the prompt, and sends the prompt to the model. This flow is called retrieval-augmented generation and it lets an agent give answers grounded in your content. The library offers simple classes for loading, indexing, querying, and composing tools, so you write less boilerplate code. It also works with other frameworks, including LangChain, and supports models from OpenAI or Hugging Face. With a few lines of code you can build a chatbot, Q&A system, or other agent that knows your documents.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "run-llama/llama_index", - "url": "https://github.com/run-llama/llama_index", - "type": "opensource" - }, - { - "title": "LlamaIndex", - "url": "https://llamaindex.ai/", - "type": "article" - }, - { - "title": "LlamaIndex Documentation", - "url": "https://docs.smith.langchain.com/", - "type": "article" - }, - { - "title": "What is LlamaIndex.TS", - "url": "https://ts.llamaindex.ai/docs/llamaindex", - "type": "article" - }, - { - "title": "What is LlamaIndex? - IBM", - "url": "https://www.ibm.com/think/topics/llamaindex", - "type": "article" - }, - { - "title": "LlamaIndex - Hugging Face", - "url": "https://huggingface.co/llamaindex", - "type": "article" - } - ] - }, - "XS-FsvtrXGZ8DPrwOsnlI": { - "title": "Haystack", - "description": "Haystack is an open-source Python framework that helps you build search and question-answering agents fast. You connect your data sources, pick a language model, and set up pipelines that find the best answer to a user’s query. Haystack handles tasks such as indexing documents, retrieving passages, running the model, and ranking results. It works with many back-ends like Elasticsearch, OpenSearch, FAISS, and Pinecone, so you can scale from a laptop to a cluster. You can add features like summarization, translation, and document chat by dropping extra nodes into the pipeline. The framework also offers REST APIs, a web UI, and clear tutorials, making it easy to test and deploy your agent in production.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "deepset-ai/haystack", - "url": "https://github.com/deepset-ai/haystack", - "type": "opensource" - }, - { - "title": "Haystack", - "url": "https://haystack.deepset.ai/", - "type": "article" - }, - { - "title": "Haystack Overview", - "url": "https://docs.haystack.deepset.ai/docs/intro", - "type": "article" - } - ] - }, - "7YtnQ9-KIvGPSpDzEDexl": { - "title": "AutoGen", - "description": "AutoGen is an open-source Python framework that helps you build AI agents without starting from scratch. It lets you define each agent with a role, goals, and tools, then handles the chat flow between them and a large language model such as GPT-4. You can chain several agents so they plan, code, review, and run tasks together. The library includes ready-made modules for memory, task planning, tool calling, and function execution, so you only write the parts that are unique to your app. AutoGen connects to OpenAI, Azure, or local models through a simple settings file. Logs, cost tracking, and step-by-step debugging come built in, which makes testing easy. Because the agents are plain Python objects, you can mix them with other libraries or your own code. AutoGen is still young, so expect fast changes and keep an eye on usage costs, but it is a strong choice when you want to turn a prompt into a working multi-agent system in hours instead of weeks.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "GitHub - microsoft/autogen", - "url": "https://github.com/microsoft/autogen", - "type": "opensource" - }, - { - "title": "AutoGen - Microsoft Research", - "url": "https://www.microsoft.com/en-us/research/project/autogen/", - "type": "article" - } - ] - }, - "uFPJqgU4qGvZyxTv-osZA": { - "title": "CrewAI", - "description": "CrewAI is an open-source Python framework for creating teams of AI agents, called a crew. Each agent is assigned a name, role, and set of tools, and the system manages planning, communication, and execution between them. To use it, install the package, define agents in code, connect them with a `Crew` object, and assign a mission prompt. CrewAI interacts with an LLM like GPT-4 or Claude, passes messages, runs tools, and returns a final output. You can also add web search, custom functions, or memory stores. Logs are built-in to help debug and optimize workflows.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "CrewAI", - "url": "https://crewai.com/", - "type": "article" - }, - { - "title": "CrewAI Documentation", - "url": "https://docs.crewai.com/", - "type": "article" - }, - { - "title": "Getting Started with CrewAI: Building AI Agents That Work Together", - "url": "https://medium.com/@cammilo/getting-started-with-crewai-building-ai-agents-that-work-together-9c1f47f185ca", - "type": "article" - }, - { - "title": "Crew AI Full Tutorial For Beginners", - "url": "https://www.youtube.com/watch?v=q6QLGS306d0", - "type": "video" - } - ] - }, - "eWxQiBrxIUG2JNcrdfIHS": { - "title": "Smol Depot", - "description": "Smol Depot is an open-source kit that lets you bundle all the parts of a small AI agent in one place. You keep prompts, settings, and code files together in a single folder, then point the Depot tool at that folder to spin the agent up. The tool handles tasks such as loading models, saving chat history, and calling outside APIs, so you do not have to write that glue code yourself. A simple command can copy a starter template, letting you focus on the logic and prompts that make your agent special. Because everything lives in plain files, you can track changes with Git and share the agent like any other project.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "smol.ai - Continuous Fine-tuning Platform for AI Engineers", - "url": "https://smol.candycode.dev/", - "type": "article" - }, - { - "title": "5-min Smol AI Tutorial", - "url": "https://www.ai-jason.com/learning-ai/smol-ai-tutorial", - "type": "article" - }, - { - "title": "Smol AI Full Beginner Course", - "url": "https://www.youtube.com/watch?v=d7qFVrpLh34", - "type": "video" - } - ] - }, - "1EZFbDHA5J5_5BPMLMxXb": { - "title": "Anthropic Tool Use", - "description": "Anthropic Tool Use lets you connect a Claude model to real software functions so the agent can do useful tasks on its own. You give Claude a list of tools, each with a name, a short description, and a strict JSON schema that shows the allowed input fields. During a chat you send user text plus this tool list. Claude decides if a tool should run, picks one, and returns a JSON block that matches the schema. Your code reads the JSON, calls the matching function, and sends the result back to Claude for the next step. This loop repeats until no more tool calls are needed. Clear schemas, small field sets, and helpful examples make the calls accurate. By keeping the model in charge of choosing tools while your code controls real actions, you gain both flexibility and safety.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Anthropic Tool Use", - "url": "https://docs.anthropic.com/en/docs/build-with-claude/tool-use/overview", - "type": "article" - } - ] - }, - "v8qLnyFRnEumodBYxQSXQ": { - "title": "Metrics to Track", - "description": "To judge how well an AI agent works, you need clear numbers. Track accuracy, precision, recall, and F1 score to measure correctness. For ranking tasks, use metrics like mean average precision or ROC-AUC. If users interact with the agent, monitor response time, latency, and failure rates. Safety metrics count toxic or biased outputs, while robustness tests check how the agent handles messy or tricky inputs. Resource metrics—memory, CPU, and energy—show if it can scale. Pick the metrics that match your goal, compare against a baseline, and track trends across versions.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Robustness Testing for AI", - "url": "https://mitibmwatsonailab.mit.edu/category/robustness/", - "type": "article" - }, - { - "title": "Complete Guide to Machine Learning Evaluation Metrics", - "url": "https://medium.com/analytics-vidhya/complete-guide-to-machine-learning-evaluation-metrics-615c2864d916", - "type": "article" - }, - { - "title": "Measuring Model Performance", - "url": "https://developers.google.com/machine-learning/crash-course/classification/accuracy", - "type": "article" - }, - { - "title": "A Practical Framework for (Gen)AI Value Measurement", - "url": "https://medium.com/google-cloud/a-practical-framework-for-gen-ai-value-measurement-5fccf3b66c43", - "type": "article" - } - ] - }, - "qo_O4YAe4-MTP_ZJoXJHR": { - "title": "Unit Testing for Individual Tools", - "description": "Unit testing checks that each tool an AI agent uses works as expected when it stands alone. You write small tests that feed the tool clear input and then compare its output to a known correct answer. If the tool is a function that parses dates, you test many date strings and see if the function gives the right results. Good tests cover normal cases, edge cases, and error cases. Run the tests every time you change the code. When a test fails, fix the tool before moving on. This habit keeps bugs from spreading into larger agent workflows and makes later debugging faster.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Unit Testing Agents", - "url": "https://docs.patronus.ai/docs/agent_evals/unit_testing", - "type": "article" - }, - { - "title": "Best AI Tools for Unit Testing: A Look at Top 14 AI Tools", - "url": "https://thetrendchaser.com/best-ai-tools-for-unit-testing/", - "type": "article" - }, - { - "title": "AI for Unit Testing: Revolutionizing Developer Productivity", - "url": "https://www.diffblue.com/resources/ai-for-unit-testing-revolutionizing-developer-productivity/", - "type": "article" - } - ] - }, - "P9-SiIda3TSjHsfkI5OUV": { - "title": "Integration Testing for Flows", - "description": "Integration testing for flows checks that an AI agent works well from the first user input to the final action, across every step in between. It joins all parts of the system—natural-language understanding, planning, memory, tools, and output—and runs them together in real scenarios. Test cases follow common and edge-case paths a user might take. The goal is to catch errors that only appear when parts interact, such as wrong data passed between modules or timing issues. Good practice includes building automated test suites, using real or mock services, and logging each step for easy debugging. When integration tests pass, you gain confidence that the whole flow feels smooth and reliable for users.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Integration Testing for AI-based Features with Humans", - "url": "https://www.microsoft.com/en-us/research/publication/hint-integration-testing-for-ai-based-features-with-humans-in-the-loop/", - "type": "article" - }, - { - "title": "Integration Testing and Unit Testing in AI", - "url": "https://www.aviator.co/blog/integration-testing-and-unit-testing-in-the-age-of-ai/", - "type": "article" - }, - { - "title": "Integration Testing", - "url": "https://www.guru99.com/integration-testing.html", - "type": "article" - } - ] - }, - "rHxdxN97ZcU7MPl8L1jzN": { - "title": "Human in the Loop Evaluation", - "description": "Human-in-the-loop evaluation checks an AI agent by letting real people judge its output and behavior. Instead of trusting only automated scores, testers invite users, domain experts, or crowd workers to watch tasks, label answers, flag errors, and rate clarity, fairness, or safety. Their feedback shows problems that numbers alone miss, such as hidden bias, confusing language, or actions that feel wrong to a person. Teams study these notes, adjust the model, and run another round, repeating until the agent meets quality and trust goals. Mixing human judgment with data leads to a system that is more accurate, useful, and safe for everyday use.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Human in the Loop · Cloudflare Agents", - "url": "https://developers.cloudflare.com/agents/concepts/human-in-the-loop/", - "type": "article" - }, - { - "title": "What is Human-in-the-Loop: A Guide", - "url": "https://logifusion.com/what-is-human-in-the-loop-htil/", - "type": "article" - }, - { - "title": "Human-in-the-Loop ML", - "url": "https://docs.aws.amazon.com/sagemaker/latest/dg/sms-human-review-workflow.html", - "type": "article" - }, - { - "title": "The Importance of Human Feedback in AI (Hugging Face Blog)", - "url": "https://huggingface.co/blog/rlhf", - "type": "article" - } - ] - }, - "xp7TCTRE9HP60_rGzTUF6": { - "title": "LangSmith", - "description": "LangSmith is a tool that helps you see how well your AI agents work. It lets you record every step the agent takes, from the first input to the final answer. You can replay these steps to find places where the agent goes wrong. LangSmith also lets you create test sets with real user prompts and compare new model versions against them. It shows clear numbers on speed, cost, and accuracy so you can spot trade-offs. Because LangSmith links to LangChain, you can add it with only a few extra lines of code. The web dashboard then gives charts, error logs, and side-by-side result views. This makes it easy to track progress, fix bugs, and prove that your agent is getting better over time.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "LangSmith", - "url": "https://smith.langchain.com/", - "type": "article" - }, - { - "title": "LangSmith Documentation", - "url": "https://docs.smith.langchain.com/", - "type": "article" - }, - { - "title": "Harden your application with LangSmith Evaluation", - "url": "https://www.langchain.com/evaluation", - "type": "article" - }, - { - "title": "What is LangSmith and Why should I care as a developer?", - "url": "https://medium.com/around-the-prompt/what-is-langsmith-and-why-should-i-care-as-a-developer-e5921deb54b5", - "type": "article" - } - ] - }, - "YzEDtGEaMaMWVt0W03HRt": { - "title": "Ragas", - "description": "Ragas is an open-source tool used to check how well a Retrieval-Augmented Generation (RAG) agent works. You give it the user question, the passages the agent pulled from a knowledge base, and the final answer. Ragas then scores the answer for things like correctness, relevance, and whether the cited passages really support the words in the answer. It uses large language models under the hood, so you do not need to write your own scoring rules. Results appear in a clear report that shows strong and weak spots in the pipeline. With this feedback you can change prompts, retriever settings, or model choices and quickly see if quality goes up. This makes testing RAG systems faster, repeatable, and less guess-based.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "explodinggradients/ragas", - "url": "https://github.com/explodinggradients/ragas", - "type": "opensource" - }, - { - "title": "Ragas Documentation", - "url": "https://docs.ragas.io/en/latest/", - "type": "article" - }, - { - "title": "Evaluating RAG Applications with RAGAs", - "url": "https://towardsdatascience.com/evaluating-rag-applications-with-ragas-81d67b0ee31a/n", - "type": "article" - } - ] - }, - "0924QUH1wV7Mp-Xu0FAhF": { - "title": "DeepEval", - "description": "DeepEval is an open-source tool that helps you test and score the answers your AI agent gives. You write small test cases that show an input and the reply you hope to get, or a rule the reply must follow. DeepEval runs the agent, checks the reply with built-in measures such as similarity, accuracy, or safety, and then marks each test as pass or fail. You can add your own checks, store tests in code or YAML files, and run them in a CI pipeline so every new model or prompt version gets the same quick audit. The fast feedback makes it easy to spot errors, cut down on hallucinations, and compare different models before you ship.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "DeepEval GitHub Repository", - "url": "https://github.com/confident-ai/deepeval", - "type": "opensource" - }, - { - "title": "DeepEval - The Open-Source LLM Evaluation Framework", - "url": "https://www.deepeval.com/", - "type": "article" - }, - { - "title": "Evaluate LLMs Effectively Using DeepEval: A Pratical Guide", - "url": "https://www.datacamp.com/tutorial/deepeval", - "type": "article" - }, - { - "title": "DeepEval - LLM Evaluation Framework", - "url": "https://www.youtube.com/watch?v=ZNs2dCXHlfo", - "type": "video" - } - ] - }, - "zs6LM8WEnb0ERWpiaQCgc": { - "title": "Structured logging & tracing", - "description": "Structured logging and tracing are ways to record what an AI agent does so you can find and fix problems fast. Instead of dumping plain text, the agent writes logs in a fixed key-value format, such as time, user\\_id, step, and message. Because every entry follows the same shape, search tools can filter, sort, and count events with ease. Tracing links those log lines into a chain that follows one request or task across many functions, threads, or microservices. By adding a unique trace ID to each step, you can see how long each part took and where errors happened. Together, structured logs and traces offer clear, machine-readable data that helps developers spot slow code paths, unusual behavior, and hidden bugs without endless manual scans.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Understanding Structured Logging: A Comprehensive Guide", - "url": "https://www.graphapp.ai/blog/understanding-structured-logging-a-comprehensive-guide", - "type": "article" - }, - { - "title": "Structured Logging & Cloud Logging", - "url": "https://cloud.google.com/logging/docs/structured-logging", - "type": "article" - }, - { - "title": "Best Practices for Logging in AI Applications", - "url": "https://www.restack.io/p/best-ai-practices-software-compliance-answer-logging-best-practices-cat-ai", - "type": "article" - } - ] - }, - "SS8mGqf9wfrNqenIWvN8Z": { - "title": "LangSmith", - "description": "LangSmith is a web tool that helps you see and fix what your AI agents are doing. It records each call that the agent makes to a language model, the input it used, and the answer it got back. You can replay any step, compare different prompts, measure cost, speed, and error rates, and tag runs for easy search. It also lets you store test sets and run quick checks so you know if new code makes the agent worse. By showing clear traces and charts, LangSmith makes it easier to debug, improve, and trust AI systems built with LangChain or other frameworks.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "LangSmith", - "url": "https://smith.langchain.com/", - "type": "article" - }, - { - "title": "LangSmith Documentation", - "url": "https://docs.smith.langchain.com/", - "type": "article" - }, - { - "title": "Harden your application with LangSmith Evaluation", - "url": "https://www.langchain.com/evaluation", - "type": "article" - }, - { - "title": "What is LangSmith and Why should I care as a developer?", - "url": "https://medium.com/around-the-prompt/what-is-langsmith-and-why-should-i-care-as-a-developer-e5921deb54b5", - "type": "article" - } - ] - }, - "MLxP5N0Vrmwh-kyvNeGXn": { - "title": "Helicone", - "description": "Helicone is an open-source tool that helps you watch and understand how your AI agents talk to large language models. You send your model calls through Helicone’s proxy, and it records each request and response without changing the result. A clear web dashboard then shows logs, latency, token counts, error rates, and cost for every call. You can filter, search, and trace a single user journey, which makes it easy to spot slow prompts or rising costs. Helicone also lets you set alerts and share traces with your team, so problems get fixed fast and future changes are safer.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Helicone/helicone", - "url": "https://github.com/Helicone/helicone", - "type": "opensource" - }, - { - "title": "Helicone", - "url": "https://www.helicone.ai/", - "type": "article" - }, - { - "title": "Helicone OSS LLM Observability", - "url": "https://docs.helicone.ai/getting-started/quick-start", - "type": "article" - } - ] - }, - "UoIheaJlShiceafrWALEH": { - "title": "LangFuse", - "description": "LangFuse is a free, open-source tool that lets you watch and debug AI agents while they run. You add a small code snippet to your agent, and LangFuse starts collecting every prompt, model response, and user input. It shows this data as neat timelines, so you can see each step the agent takes, how long the calls cost, and where errors happen. You can tag runs, search through them, and compare different prompt versions to find what works best. The dashboard also tracks token usage and latency, helping you cut cost and improve speed. Because LangFuse stores data in your own database, you keep full control of sensitive text. It works well with popular frameworks like LangChain and can send alerts to Slack or email when something breaks.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "langfuse/langfuse", - "url": "https://github.com/langfuse/langfuse", - "type": "opensource" - }, - { - "title": "LangFuse", - "url": "https://langfuse.com/", - "type": "article" - }, - { - "title": "LangFuse Documentation", - "url": "https://langfuse.com/docs", - "type": "article" - }, - { - "title": "Langfuse: Open Source LLM Engineering Platform", - "url": "https://www.ycombinator.com/companies/langfuse", - "type": "article" - } - ] - }, - "7UqPXUzqKYXklnB3x-tsv": { - "title": "openllmetry", - "description": "openllmetry is a small Python library that makes it easy to watch what your AI agent is doing and how well it is working. It wraps calls to large-language-model APIs, vector stores, and other tools, then sends logs, traces, and simple metrics to any backend that speaks the OpenTelemetry standard, such as Jaeger, Zipkin, or Grafana. You add one or two lines of code at start-up, and the library captures prompt text, model name, latency, token counts, and costs each time the agent asks the model for an answer. The data helps you spot slow steps, high spend, or bad answers, and it lets you play back full traces to debug agent chains. Because it follows OpenTelemetry, you can mix these AI traces with normal service traces and see the whole flow in one place.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "traceloop/openllmetry", - "url": "https://github.com/traceloop/openllmetry", - "type": "opensource" - }, - { - "title": "OpenTelemetry Documentation", - "url": "https://www.traceloop.com/blog/openllmetry", - "type": "article" - }, - { - "title": "What is OpenLLMetry? - traceloop", - "url": "https://www.traceloop.com/docs/openllmetry/introduction", - "type": "article" - }, - { - "title": "Use Traceloop with Python", - "url": "https://www.traceloop.com/docs/openllmetry/getting-started-python", - "type": "article" - } - ] - }, - "SU2RuicMUo8tiAsQtDI1k": { - "title": "Prompt Injection / Jailbreaks", - "description": "Prompt injection, also called a jailbreak, is a trick that makes an AI system break its own rules. An attacker hides special words or symbols inside normal-looking text. When the AI reads this text, it follows the hidden instructions instead of its safety rules. The attacker might force the AI to reveal private data, produce harmful content, or give wrong advice. This risk grows when the AI talks to other software or pulls text from the internet, because harmful prompts can slip in without warning. Good defenses include cleaning user input, setting strong guardrails inside the model, checking outputs for policy breaks, and keeping humans in the loop for high-risk tasks.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Prompt Injection vs. Jailbreaking: What's the Difference?", - "url": "https://learnprompting.org/blog/injection_jailbreaking", - "type": "article" - }, - { - "title": "Prompt Injection vs Prompt Jailbreak", - "url": "https://codoid.com/ai/prompt-injection-vs-prompt-jailbreak-a-detailed-comparison/", - "type": "article" - }, - { - "title": "How Prompt Attacks Exploit GenAI and How to Fight Back", - "url": "https://unit42.paloaltonetworks.com/new-frontier-of-genai-threats-a-comprehensive-guide-to-prompt-attacks/", - "type": "article" - } - ] - }, - "UVzLGXG6K7HQVHmw8ZAv2": { - "title": "Tool sandboxing / Permissioning", - "description": "Tool sandboxing keeps the AI agent inside a safe zone where it can only run approved actions and cannot touch the wider system. Permissioning sets clear rules that say which files, networks, or commands the agent may use. Together they stop errors, leaks, or abuse by limiting what the agent can reach and do. Developers grant the smallest set of rights, watch activity, and block anything outside the plan. If the agent needs new access, it must ask and get a fresh permit. This simple fence protects user data, reduces harm, and builds trust in the agent’s work.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "AI Sandbox | Harvard University Information Technology", - "url": "https://www.huit.harvard.edu/ai-sandbox", - "type": "article" - }, - { - "title": "How to Set Up AI Sandboxes to Maximize Adoption", - "url": "https://medium.com/@emilholmegaard/how-to-set-up-ai-sandboxes-to-maximize-adoption-without-compromising-ethics-and-values-637c70626130", - "type": "article" - }, - { - "title": "Sandboxes for AI - The Datasphere Initiative", - "url": "https://www.thedatasphere.org/datasphere-publish/sandboxes-for-ai/", - "type": "article" - } - ] - }, - "rdlYBJNNyZUshzsJawME4": { - "title": "Data Privacy + PII Redaction", - "description": "AI agents often process text, images, and logs that include personal data like names, phone numbers, or addresses. Leaks can cause fraud, stalking, or other harm, so laws like GDPR and CCPA require strict protections. A key method is PII redaction: scanning inputs and outputs to find and mask any personal details before storage or sharing. Redaction uses pattern rules, machine learning, or both. Teams should also keep audit logs, enforce access controls, and test their redaction flows often to prevent leaks.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "GDPR Compliance Overview", - "url": "https://gdpr.eu/", - "type": "article" - }, - { - "title": "Protect Sensitive Data with PII Redaction Software", - "url": "https://redactor.ai/blog/pii-redaction-software-guide", - "type": "article" - }, - { - "title": "A Complete Guide on PII Redaction", - "url": "https://enthu.ai/blog/what-is-pii-redaction/", - "type": "article" - } - ] - }, - "EyLo2j8IQsIK91SKaXkmK": { - "title": "Bias & Toxicity Guardrails", - "description": "Bias and toxicity guardrails keep an AI agent from giving unfair or harmful results. Bias shows up when training data favors certain groups or views. Toxicity is language that is hateful, violent, or rude. To stop this, start with clean and balanced data. Remove slurs, stereotypes, and spam. Add examples from many voices so the model learns fair patterns. During training, test the model often and adjust weights or rules that lean one way. After training, put filters in place that block toxic words or flag unfair answers before users see them. Keep logs, run audits, and ask users for feedback to catch new issues early. Write down every step so builders and users know the limits and risks. These actions protect people, follow laws, and help users trust the AI.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Define the Agent Guardrails", - "url": "https://trailhead.salesforce.com/content/learn/modules/agentforce-agent-planning/define-the-agent-guardrails", - "type": "article" - }, - { - "title": "How to Build Safe AI Agents: Best Practices for Guardrails", - "url": "https://medium.com/@sahin.samia/how-to-build-safe-ai-agents-best-practices-for-guardrails-and-oversight-a0085b50c022", - "type": "article" - } - ] - }, - "63nsfJFO1BwjLX_ZVaPFC": { - "title": "Safety + Red Team Testing", - "description": "Safety + Red Team Testing is the practice of checking an AI agent for harmful or risky behavior before and after release. Safety work sets rules, guardrails, and alarms so the agent follows laws, keeps data private, and treats people fairly. Red team testing sends skilled testers to act like attackers or troublemakers. They type tricky prompts, try to leak private data, force biased outputs, or cause the agent to give dangerous advice. Every weakness they find is logged and fixed by adding filters, better training data, stronger limits, or live monitoring. Running these tests often lowers the chance of real-world harm and builds trust with users and regulators.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Visit Dedicated AI Red Teaming Roadmap", - "url": "https://roadmap.sh/ai-red-teaming", - "type": "article" - }, - { - "title": "Enhancing AI safety: Insights and lessons from red teaming", - "url": "https://www.microsoft.com/en-us/microsoft-cloud/blog/2025/01/14/enhancing-ai-safety-insights-and-lessons-from-red-teaming/", - "type": "article" - }, - { - "title": "AI Safety Testing in the Absence of Regulations", - "url": "https://aisecuritycentral.com/ai-safety-testing/", - "type": "article" - }, - { - "title": "A Guide to AI Red Teaming - HiddenLayer", - "url": "https://hiddenlayer.com/innovation-hub/a-guide-to-ai-red-teaming/", - "type": "article" - } - ] - } -} \ No newline at end of file diff --git a/public/roadmap-content/ai-data-scientist.json b/public/roadmap-content/ai-data-scientist.json deleted file mode 100644 index a773b1e4d..000000000 --- a/public/roadmap-content/ai-data-scientist.json +++ /dev/null @@ -1,414 +0,0 @@ -{ - "aStaDENn5PhEa-cFvNzXa": { - "title": "Mathematics", - "description": "Mathematics is the foundation of AI and Data Science. It is essential to have a good understanding of mathematics to excel in these fields.", - "links": [] - }, - "4WZL_fzJ3cZdWLLDoWN8D": { - "title": "Statistics", - "description": "Statistics is the science of collecting, analyzing, interpreting, presenting, and organizing data. It is a branch of mathematics that deals with the collection, analysis, interpretation, presentation, and organization of data. It is used in a wide range of fields, including science, engineering, medicine, and social science. Statistics is used to make informed decisions, to predict future events, and to test hypotheses. It is also used to summarize data, to describe relationships between variables, and to make inferences about populations based on samples.", - "links": [] - }, - "gWMvD83hVXeTmCuHGIiOL": { - "title": "Linear Algebra, Calculus, Mathematical Analysis", - "description": "", - "links": [ - { - "title": "Mathematics for Machine Learning Specialization", - "url": "https://imp.i384100.net/baqMYv", - "type": "article" - }, - { - "title": "Explore top posts about Math", - "url": "https://app.daily.dev/tags/math?ref=roadmapsh", - "type": "article" - }, - { - "title": "Linear Algebra Youtube Course", - "url": "https://www.youtube.com/playlist?list=PLZHQObOWTQDPD3MizzM2xVFitgF8hE_ab", - "type": "video" - } - ] - }, - "mwPJh33MEUQ4Co_LiVEOb": { - "title": "Differential Calculus ", - "description": "", - "links": [ - { - "title": "Algebra and Differential Calculus for Data Science", - "url": "https://imp.i384100.net/LX5M7M", - "type": "article" - }, - { - "title": "Calculus Youtube Course", - "url": "https://www.youtube.com/playlist?list=PLZHQObOWTQDMsr9K-rj53DwVRMYO3t5Yr", - "type": "video" - } - ] - }, - "Y9YJdARIRqqCBCy3GVYdA": { - "title": "Statistics, CLT", - "description": "", - "links": [ - { - "title": "Introduction to Statistics", - "url": "https://imp.i384100.net/3eRv4v", - "type": "article" - } - ] - }, - "XJXIkWVDIrPJ-bVIvX0ZO": { - "title": "Hypothesis Testing", - "description": "", - "links": [ - { - "title": "Introduction to Statistical Analysis: Hypothesis Testing", - "url": "https://imp.i384100.net/vN0JAA", - "type": "article" - }, - { - "title": "Explore top posts about Testing", - "url": "https://app.daily.dev/tags/testing?ref=roadmapsh", - "type": "article" - } - ] - }, - "jxJtwbiCvxHqmkWkE7zdx": { - "title": "Probability and Sampling", - "description": "", - "links": [ - { - "title": "Probability and Statistics: To p or not to p?", - "url": "https://imp.i384100.net/daDM6Q", - "type": "article" - }, - { - "title": "Explore top posts about Statistics", - "url": "https://app.daily.dev/tags/statistics?ref=roadmapsh", - "type": "article" - } - ] - }, - "mJq9b50MJM9o9dLhx40iN": { - "title": "AB Testing", - "description": "", - "links": [ - { - "title": "Practitioner’s Guide to Statistical Tests", - "url": "https://vkteam.medium.com/practitioners-guide-to-statistical-tests-ed2d580ef04f#1e3b", - "type": "article" - }, - { - "title": "Step by Step Process for Planning an A/B Test", - "url": "https://medium.com/data-science/step-by-step-for-planning-an-a-b-test-ef3c93143c0b", - "type": "article" - }, - { - "title": "Explore top posts about A/B Testing", - "url": "https://app.daily.dev/tags/ab-testing?ref=roadmapsh", - "type": "article" - } - ] - }, - "v68nwX914qCwHDSwY_ZhG": { - "title": "Increasing Test Sensitivity", - "description": "", - "links": [ - { - "title": "Minimum Detectable Effect (MDE)", - "url": "https://splitmetrics.com/resources/minimum-detectable-effect-mde/", - "type": "article" - }, - { - "title": "Improving the Sensitivity of Online Controlled Experiments: Case Studies at Netflix", - "url": "https://kdd.org/kdd2016/papers/files/adp0945-xieA.pdf", - "type": "article" - }, - { - "title": "Improving the Sensitivity of Online Controlled Experiments by Utilizing Pre-Experiment Data", - "url": "https://exp-platform.com/Documents/2013-02-CUPED-ImprovingSensitivityOfControlledExperiments.pdf", - "type": "article" - }, - { - "title": "How Booking.com increases the power of online experiments with CUPED", - "url": "https://booking.ai/how-booking-com-increases-the-power-of-online-experiments-with-cuped-995d186fff1d", - "type": "article" - }, - { - "title": "Improving Experimental Power through Control Using Predictions as Covariate — CUPAC", - "url": "https://doordash.engineering/2020/06/08/improving-experimental-power-through-control-using-predictions-as-covariate-cupac/", - "type": "article" - }, - { - "title": "Improving the Sensitivity of Online Controlled Experiments: Case Studies at Netflix", - "url": "https://www.researchgate.net/publication/305997925_Improving_the_Sensitivity_of_Online_Controlled_Experiments_Case_Studies_at_Netflix", - "type": "article" - } - ] - }, - "n2JFGwFxTuOviW6kHO1Uv": { - "title": "Ratio Metrics", - "description": "", - "links": [ - { - "title": "Applying the Delta Method in Metric Analytics: A Practical Guide with Novel Ideas", - "url": "https://arxiv.org/pdf/1803.06336.pdf", - "type": "article" - }, - { - "title": "Approximations for Mean and Variance of a Ratio", - "url": "https://www.stat.cmu.edu/~hseltman/files/ratio.pdf", - "type": "article" - } - ] - }, - "Gd2egqKZPnbPW1W2jw4j8": { - "title": "Econometrics", - "description": "Econometrics is the application of statistical methods to economic data. It is a branch of economics that aims to give empirical content to economic relations. More precisely, it is \"the quantitative analysis of actual economic phenomena based on the concurrent development of theory and observation, related by appropriate methods of inference.\" Econometrics can be described as something that allows economists \"to sift through mountains of data to extract simple relationships.\"", - "links": [] - }, - "y6xXsc-uSAmRDnNuyhqH2": { - "title": "Pre-requisites of Econometrics", - "description": "", - "links": [ - { - "title": "10 Fundamental Theorems for Econometrics", - "url": "https://bookdown.org/ts_robinson1994/10EconometricTheorems/", - "type": "article" - } - ] - }, - "h19k9Fn5XPh3_pKEC8Ftp": { - "title": "Regression, Timeseries, Fitting Distributions", - "description": "", - "links": [ - { - "title": "Blockchain.com Data Scientist TakeHome Test", - "url": "https://github.com/stalkermustang/bcdc_ds_takehome", - "type": "opensource" - }, - { - "title": "10 Fundamental Theorems for Econometrics", - "url": "https://bookdown.org/ts_robinson1994/10EconometricTheorems/", - "type": "article" - }, - { - "title": "Dougherty Intro to Econometrics 4th edition", - "url": "https://www.academia.edu/33062577/Dougherty_Intro_to_Econometrics_4th_ed_small", - "type": "article" - }, - { - "title": "Econometrics: Methods and Applications", - "url": "https://imp.i384100.net/k0krYL", - "type": "article" - }, - { - "title": "Kaggle - Learn Time Series", - "url": "https://www.kaggle.com/learn/time-series", - "type": "article" - }, - { - "title": "Time series Basics : Exploring traditional TS", - "url": "https://www.kaggle.com/code/jagangupta/time-series-basics-exploring-traditional-ts#Hierarchical-time-series", - "type": "article" - }, - { - "title": "How to Create an ARIMA Model for Time Series Forecasting in Python", - "url": "https://machinelearningmastery.com/arima-for-time-series-forecasting-with-python", - "type": "article" - }, - { - "title": "11 Classical Time Series Forecasting Methods in Python", - "url": "https://machinelearningmastery.com/time-series-forecasting-methods-in-python-cheat-sheet/", - "type": "article" - }, - { - "title": "Linear Regression for Business Statistics", - "url": "https://imp.i384100.net/9g97Ke", - "type": "article" - } - ] - }, - "XLDWuSt4tI4gnmqMFdpmy": { - "title": "Coding", - "description": "Programming is a fundamental skill for data scientists. You need to be able to write code to manipulate data, build models, and deploy solutions. The most common programming languages used in data science are Python and R. Python is a general-purpose programming language that is easy to learn and has a large number of libraries for data manipulation and machine learning. R is a programming language and free software environment for statistical computing and graphics. It is widely used for statistical analysis and data visualization.", - "links": [] - }, - "MVrAqizgkoAs2aghN8TgV": { - "title": "Learn Python Programming Language", - "description": "", - "links": [ - { - "title": "Kaggle — Python", - "url": "https://www.kaggle.com/learn/python", - "type": "article" - }, - { - "title": "Google's Python Class", - "url": "https://developers.google.com/edu/python", - "type": "article" - }, - { - "title": "Explore top posts about Python", - "url": "https://app.daily.dev/tags/python?ref=roadmapsh", - "type": "article" - } - ] - }, - "StBCykpzpM4g9PRFeSNXa": { - "title": "Data Structures and Algorithms (Python)", - "description": "", - "links": [ - { - "title": "Learn Algorithms", - "url": "https://leetcode.com/explore/learn/", - "type": "article" - }, - { - "title": "Leetcode - Study Plans", - "url": "https://leetcode.com/studyplan/", - "type": "article" - }, - { - "title": "Algorithms Specialization", - "url": "https://imp.i384100.net/5gqv4n", - "type": "article" - } - ] - }, - "Im0tXXn3GC-FUq2aMHgwm": { - "title": "Learn SQL", - "description": "", - "links": [ - { - "title": "SQL Tutorial", - "url": "https://www.sqltutorial.org/", - "type": "article" - }, - { - "title": "Explore top posts about SQL", - "url": "https://app.daily.dev/tags/sql?ref=roadmapsh", - "type": "article" - } - ] - }, - "l1027SBZxTHKzqWw98Ee-": { - "title": "Exploratory Data Analysis", - "description": "Exploratory Data Analysis (EDA) is an approach to analyzing data sets to summarize their main characteristics, often with visual methods. EDA is used to understand what the data can tell us beyond the formal modeling or hypothesis testing task. It is a crucial step in the data analysis process.", - "links": [] - }, - "JaN8YhMeN3whAe2TCXvw9": { - "title": "Data understanding, Data Analysis and Visualization", - "description": "", - "links": [ - { - "title": "Exploratory Data Analysis With Python and Pandas", - "url": "https://imp.i384100.net/AWAv4R", - "type": "article" - }, - { - "title": "Exploratory Data Analysis for Machine Learning", - "url": "https://imp.i384100.net/GmQMLE", - "type": "article" - }, - { - "title": "Python for Data Visualization: Matplotlib & Seaborn", - "url": "https://imp.i384100.net/55xvzn", - "type": "article" - } - ] - }, - "kBdt_t2SvVsY3blfubWIz": { - "title": "Machine Learning", - "description": "Machine learning is a field of artificial intelligence that uses statistical techniques to give computer systems the ability to \"learn\" (e.g., progressively improve performance on a specific task) from data, without being explicitly programmed. The name machine learning was coined in 1959 by Arthur Samuel. Evolved from the study of pattern recognition and computational learning theory in artificial intelligence, machine learning explores the study and construction of algorithms that can learn from and make predictions on data – such algorithms overcome following strictly static program instructions by making data-driven predictions or decisions, through building a model from sample inputs. Machine learning is employed in a range of computing tasks where designing and programming explicit algorithms with good performance is difficult or infeasible; example applications include email filtering, detection of network intruders, and computer vision.", - "links": [] - }, - "FdBih8tlGPPy97YWq463y": { - "title": "Classic ML (Sup., Unsup.), Advanced ML (Ensembles, NNs)", - "description": "", - "links": [ - { - "title": "Repository of notes, code and notebooks in Python for the book Pattern Recognition and Machine Learning by Christopher Bishop", - "url": "https://github.com/gerdm/prml", - "type": "opensource" - }, - { - "title": "Open Machine Learning Course", - "url": "https://mlcourse.ai/book/topic01/topic01_intro.html", - "type": "article" - }, - { - "title": "Coursera: Machine Learning Specialization", - "url": "https://imp.i384100.net/oqGkrg", - "type": "article" - }, - { - "title": "Pattern Recognition and Machine Learning by Christopher Bishop", - "url": "https://www.microsoft.com/en-us/research/uploads/prod/2006/01/Bishop-Pattern-Recognition-and-Machine-Learning-2006.pdf", - "type": "article" - }, - { - "title": "Explore top posts about Machine Learning", - "url": "https://app.daily.dev/tags/machine-learning?ref=roadmapsh", - "type": "article" - } - ] - }, - "cjvVLN0XjrKPn6o20oMmc": { - "title": "Deep Learning", - "description": "Deep Learning\n-------------\n\nDeep learning is a subset of machine learning that deals with algorithms inspired by the structure and function of the brain called artificial neural networks. Deep learning is a key technology behind driverless cars, enabling them to recognize a stop sign, or to distinguish a pedestrian from a lamppost. It is the key to voice control in consumer devices like phones, tablets, TVs, and hands-free speakers. Deep learning is getting lots of attention lately and for good reason. It’s achieving results that were not possible before.", - "links": [] - }, - "eOFoGKveaHaBm_6ppJUtA": { - "title": "Fully Connected, CNN, RNN, LSTM, Transformers, TL", - "description": "", - "links": [ - { - "title": "The Illustrated Transformer", - "url": "https://jalammar.github.io/illustrated-transformer/", - "type": "article" - }, - { - "title": "Attention is All you Need", - "url": "https://arxiv.org/pdf/1706.03762.pdf", - "type": "article" - }, - { - "title": "Deep Learning Book", - "url": "https://www.deeplearningbook.org/", - "type": "article" - }, - { - "title": "Deep Learning Specialization", - "url": "https://imp.i384100.net/Wq9MV3", - "type": "article" - } - ] - }, - "Qa85hEVe2kz62k9Pj4QCA": { - "title": "MLOps", - "description": "MLOps is a practice for collaboration and communication between data scientists and operations professionals to help manage production ML lifecycle. It is a set of best practices that aims to automate the ML lifecycle, including training, deployment, and monitoring. MLOps helps organizations to scale ML models and deliver business value faster.", - "links": [] - }, - "uPzzUpI0--7OWDfNeBIjt": { - "title": "Deployment Models, CI/CD", - "description": "", - "links": [ - { - "title": "Machine Learning Engineering for Production (MLOps) Specialization", - "url": "https://imp.i384100.net/nLA5mx", - "type": "article" - }, - { - "title": "Full Stack Deep Learning", - "url": "https://fullstackdeeplearning.com/course/2022/", - "type": "article" - }, - { - "title": "Explore top posts about CI/CD", - "url": "https://app.daily.dev/tags/cicd?ref=roadmapsh", - "type": "article" - } - ] - } -} \ No newline at end of file diff --git a/public/roadmap-content/ai-engineer.json b/public/roadmap-content/ai-engineer.json deleted file mode 100644 index 762f66093..000000000 --- a/public/roadmap-content/ai-engineer.json +++ /dev/null @@ -1,1986 +0,0 @@ -{ - "_hYN0gEi9BL24nptEtXWU": { - "title": "Introduction", - "description": "AI Engineering is the process of designing and implementing AI systems using pre-trained models and existing AI tools to solve practical problems. AI Engineers focus on applying AI in real-world scenarios, improving user experiences, and automating tasks, without developing new models from scratch. They work to ensure AI systems are efficient, scalable, and can be seamlessly integrated into business applications, distinguishing their role from AI Researchers and ML Engineers, who concentrate more on creating new models or advancing AI theory.\n\nLearn more from the following resources:", - "links": [ - { - "title": "AI Engineering", - "url": "https://en.wikipedia.org/wiki/Artificial_intelligence_engineering", - "type": "article" - }, - { - "title": "AI vs Machine Learning", - "url": "https://www.youtube.com/watch?v=4RixMPF4xis", - "type": "video" - }, - { - "title": "AI vs Machine Learning vs Deep Learning vs GenAI", - "url": "https://youtu.be/qYNweeDHiyU?si=eRJXjtk8Q-RKQ8Ms", - "type": "video" - } - ] - }, - "GN6SnI7RXIeW8JeD-qORW": { - "title": "What is an AI Engineer?", - "description": "AI engineers are professionals who specialize in designing, developing, and implementing artificial intelligence (AI) systems. Their work is essential in various industries, as they create applications that enable machines to perform tasks that typically require human intelligence, such as problem-solving, learning, and decision-making.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "AI For Everyone", - "url": "https://www.coursera.org/learn/ai-for-everyone", - "type": "course" - }, - { - "title": "How to Become an AI Engineer: Duties, Skills, and Salary", - "url": "https://www.simplilearn.com/tutorials/artificial-intelligence-tutorial/how-to-become-an-ai-engineer", - "type": "article" - }, - { - "title": "AI Engineers: What they do and how to become one", - "url": "https://www.techtarget.com/whatis/feature/How-to-become-an-artificial-intelligence-engineer", - "type": "article" - } - ] - }, - "jSZ1LhPdhlkW-9QJhIvFs": { - "title": "AI Engineer vs ML Engineer", - "description": "An AI Engineer uses pre-trained models and existing AI tools to improve user experiences. They focus on applying AI in practical ways, without building models from scratch. This is different from AI Researchers and ML Engineers, who focus more on creating new models or developing AI theory.\n\nLearn more from the following resources:", - "links": [ - { - "title": "What does an AI Engineer do?", - "url": "https://www.codecademy.com/resources/blog/what-does-an-ai-engineer-do/", - "type": "article" - }, - { - "title": "What is an ML Engineer?", - "url": "https://www.coursera.org/articles/what-is-machine-learning-engineer", - "type": "article" - }, - { - "title": "AI vs ML", - "url": "https://www.youtube.com/watch?v=4RixMPF4xis", - "type": "video" - } - ] - }, - "wf2BSyUekr1S1q6l8kyq6": { - "title": "LLMs", - "description": "LLMs, or Large Language Models, are advanced AI models trained on vast datasets to understand and generate human-like text. They can perform a wide range of natural language processing tasks, such as text generation, translation, summarization, and question answering. Examples include GPT-4, BERT, and T5. LLMs are capable of understanding context, handling complex queries, and generating coherent responses, making them useful for applications like chatbots, content creation, and automated support. However, they require significant computational resources and may carry biases from their training data.\n\nLearn more from the following resources:", - "links": [ - { - "title": "What is a large language model (LLM)?", - "url": "https://www.cloudflare.com/en-gb/learning/ai/what-is-large-language-model/", - "type": "article" - }, - { - "title": "How Large Language Models Work", - "url": "https://www.youtube.com/watch?v=5sLYAQS9sWQ", - "type": "video" - }, - { - "title": "Large Language Models (LLMs) - Everything You NEED To Know", - "url": "https://www.youtube.com/watch?v=osKyvYJ3PRM", - "type": "video" - } - ] - }, - "KWjD4xEPhOOYS51dvRLd2": { - "title": "Inference", - "description": "In artificial intelligence (AI), inference refers to the process where a trained machine learning model makes predictions or draws conclusions from new, unseen data. Unlike training, inference involves the model applying what it has learned to make decisions without needing examples of the exact result. In essence, inference is the AI model actively functioning. For example, a self-driving car recognizing a stop sign on a road it has never encountered before demonstrates inference. The model identifies the stop sign in a new setting, using its learned knowledge to make a decision in real-time.\n\nLearn more from the following resources:", - "links": [ - { - "title": "Inference vs Training", - "url": "https://www.cloudflare.com/learning/ai/inference-vs-training/", - "type": "article" - }, - { - "title": "What is Machine Learning Inference?", - "url": "https://hazelcast.com/glossary/machine-learning-inference/", - "type": "article" - }, - { - "title": "What is Machine Learning Inference? An Introduction to Inference Approaches", - "url": "https://www.datacamp.com/blog/what-is-machine-learning-inference", - "type": "article" - } - ] - }, - "xostGgoaYkqMO28iN2gx8": { - "title": "Training", - "description": "Training refers to the process of teaching a machine learning model to recognize patterns and make predictions by exposing it to a dataset. During training, the model learns from the data by adjusting its internal parameters to minimize errors between its predictions and the actual outcomes. This process involves iteratively feeding the model with input data, comparing its outputs to the correct answers, and refining its predictions through techniques like gradient descent. The goal is to enable the model to generalize well so that it can make accurate predictions on new, unseen data.\n\nLearn more from the following resources:", - "links": [ - { - "title": "What is Model Training?", - "url": "https://oden.io/glossary/model-training/", - "type": "article" - }, - { - "title": "Machine learning model training: What it is and why it’s important", - "url": "https://domino.ai/blog/what-is-machine-learning-model-training", - "type": "article" - }, - { - "title": "Training ML Models - Amazon", - "url": "https://docs.aws.amazon.com/machine-learning/latest/dg/training-ml-models.html", - "type": "article" - } - ] - }, - "XyEp6jnBSpCxMGwALnYfT": { - "title": "Embeddings", - "description": "Embeddings are dense, continuous vector representations of data, such as words, sentences, or images, in a lower-dimensional space. They capture the semantic relationships and patterns in the data, where similar items are placed closer together in the vector space. In machine learning, embeddings are used to convert complex data into numerical form that models can process more easily. For example, word embeddings represent words based on their meanings and contexts, allowing models to understand relationships like synonyms or analogies. Embeddings are widely used in tasks like natural language processing, recommendation systems, and image recognition to improve model performance and efficiency.\n\nLearn more from the following resources:", - "links": [ - { - "title": "What are Embeddings in Machine Learning?", - "url": "https://www.cloudflare.com/en-gb/learning/ai/what-are-embeddings/", - "type": "article" - }, - { - "title": "What is Embedding?", - "url": "https://www.ibm.com/topics/embedding", - "type": "article" - }, - { - "title": "What are Word Embeddings", - "url": "https://www.youtube.com/watch?v=wgfSDrqYMJ4", - "type": "video" - } - ] - }, - "LnQ2AatMWpExUHcZhDIPd": { - "title": "Vector Databases", - "description": "Vector databases are specialized systems designed to store, index, and retrieve high-dimensional vectors, often used as embeddings that represent data like text, images, or audio. Unlike traditional databases that handle structured data, vector databases excel at managing unstructured data by enabling fast similarity searches, where vectors are compared to find those that are most similar to a query. This makes them essential for tasks like semantic search, recommendation systems, and content discovery, where understanding relationships between items is crucial. Vector databases use indexing techniques such as approximate nearest neighbor (ANN) search to efficiently handle large datasets, ensuring quick and accurate retrieval even at scale.\n\nLearn more from the following resources:", - "links": [ - { - "title": "Vector Databases", - "url": "https://developers.cloudflare.com/vectorize/reference/what-is-a-vector-database/", - "type": "article" - }, - { - "title": "What are Vector Databases?", - "url": "https://www.mongodb.com/resources/basics/databases/vector-databases", - "type": "article" - } - ] - }, - "9JwWIK0Z2MK8-6EQQJsCO": { - "title": "RAG", - "description": "Retrieval-Augmented Generation (RAG) is an AI approach that combines information retrieval with language generation to create more accurate, contextually relevant outputs. It works by first retrieving relevant data from a knowledge base or external source, then using a language model to generate a response based on that information. This method enhances the accuracy of generative models by grounding their outputs in real-world data, making RAG ideal for tasks like question answering, summarization, and chatbots that require reliable, up-to-date information.\n\nLearn more from the following resources:", - "links": [ - { - "title": "What is Retrieval Augmented Generation (RAG)? - Datacamp", - "url": "https://www.datacamp.com/blog/what-is-retrieval-augmented-generation-rag", - "type": "article" - }, - { - "title": "What is Retrieval-Augmented Generation? - Google", - "url": "https://cloud.google.com/use-cases/retrieval-augmented-generation", - "type": "article" - }, - { - "title": "What is Retrieval-Augmented Generation? - IBM", - "url": "https://www.youtube.com/watch?v=T-D1OfcDW1M", - "type": "video" - } - ] - }, - "Dc15ayFlzqMF24RqIF_-X": { - "title": "Prompt Engineering", - "description": "Prompt engineering is the process of crafting effective inputs (prompts) to guide AI models, like GPT, to generate desired outputs. It involves strategically designing prompts to optimize the model’s performance by providing clear instructions, context, and examples. Effective prompt engineering can improve the quality, relevance, and accuracy of responses, making it essential for applications like chatbots, content generation, and automated support. By refining prompts, developers can better control the model’s behavior, reduce ambiguity, and achieve more consistent results, enhancing the overall effectiveness of AI-driven systems.\n\nLearn more from the following resources:", - "links": [ - { - "title": "Visit Dedicated Prompt Engineering Roadmap", - "url": "https://roadmap.sh/prompt-engineering", - "type": "article" - }, - { - "title": "What is Prompt Engineering?", - "url": "https://www.youtube.com/watch?v=nf1e-55KKbg", - "type": "video" - } - ] - }, - "9XCxilAQ7FRet7lHQr1gE": { - "title": "AI Agents", - "description": "In AI engineering, \"agents\" refer to autonomous systems or components that can perceive their environment, make decisions, and take actions to achieve specific goals. Agents often interact with external systems, users, or other agents to carry out complex tasks. They can vary in complexity, from simple rule-based bots to sophisticated AI-powered agents that leverage machine learning models, natural language processing, and reinforcement learning.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Building an AI Agent Tutorial - LangChain", - "url": "https://python.langchain.com/docs/tutorials/agents/", - "type": "article" - }, - { - "title": "AI Agents and Their Types", - "url": "https://www.digitalocean.com/resources/articles/types-of-ai-agents", - "type": "article" - }, - { - "title": "The Complete Guide to Building AI Agents for Beginners", - "url": "https://youtu.be/MOyl58VF2ak?si=-QjRD_5y3iViprJX", - "type": "video" - } - ] - }, - "5QdihE1lLpMc3DFrGy46M": { - "title": "AI vs AGI", - "description": "AI (Artificial Intelligence) refers to systems designed to perform specific tasks by mimicking aspects of human intelligence, such as pattern recognition, decision-making, and language processing. These systems, known as \"narrow AI,\" are highly specialized, excelling in defined areas like image classification or recommendation algorithms but lacking broader cognitive abilities. In contrast, AGI (Artificial General Intelligence) represents a theoretical form of intelligence that possesses the ability to understand, learn, and apply knowledge across a wide range of tasks at a human-like level. AGI would have the capacity for abstract thinking, reasoning, and adaptability similar to human cognitive abilities, making it far more versatile than today’s AI systems. While current AI technology is powerful, AGI remains a distant goal and presents complex challenges in safety, ethics, and technical feasibility.\n\nLearn more from the following resources:", - "links": [ - { - "title": "What is AGI?", - "url": "https://aws.amazon.com/what-is/artificial-general-intelligence/", - "type": "article" - }, - { - "title": "The crucial difference between AI and AGI", - "url": "https://www.forbes.com/sites/bernardmarr/2024/05/20/the-crucial-difference-between-ai-and-agi/", - "type": "article" - } - ] - }, - "qJVgKe9uBvXc-YPfvX_Y7": { - "title": "Impact on Product Development", - "description": "AI engineering transforms product development by automating tasks, enhancing data-driven decision-making, and enabling the creation of smarter, more personalized products. It speeds up design cycles, optimizes processes, and allows for predictive maintenance, quality control, and efficient resource management. By integrating AI, companies can innovate faster, reduce costs, and improve user experiences, giving them a competitive edge in the market.\n\nLearn more from the following resources:", - "links": [ - { - "title": "AI in Product Development: Netflix, BMW, and PepsiCo", - "url": "https://www.virtasant.com/ai-today/ai-in-product-development-netflix-bmw#:~:text=AI%20can%20help%20make%20product,and%20gain%20a%20competitive%20edge.", - "type": "article" - }, - { - "title": "AI Product Development: Why Are Founders So Fascinated By The Potential?", - "url": "https://www.techmagic.co/blog/ai-product-development/", - "type": "article" - } - ] - }, - "K9EiuFgPBFgeRxY4wxAmb": { - "title": "Roles and Responsiblities", - "description": "AI Engineers are responsible for designing, developing, and deploying AI systems that solve real-world problems. Their roles include building machine learning models, implementing data processing pipelines, and integrating AI solutions into existing software or platforms. They work on tasks like data collection, cleaning, and labeling, as well as model training, testing, and optimization to ensure high performance and accuracy. AI Engineers also focus on scaling models for production use, monitoring their performance, and troubleshooting issues. Additionally, they collaborate with data scientists, software developers, and other stakeholders to align AI projects with business goals, ensuring that solutions are reliable, efficient, and ethically sound.\n\nLearn more from the following resources:", - "links": [ - { - "title": "AI Engineer Job Description", - "url": "https://resources.workable.com/ai-engineer-job-description", - "type": "article" - }, - { - "title": "How To Become an AI Engineer (Plus Job Duties and Skills)", - "url": "https://www.indeed.com/career-advice/finding-a-job/ai-engineer", - "type": "article" - } - ] - }, - "d7fzv_ft12EopsQdmEsel": { - "title": "Pre-trained Models", - "description": "Pre-trained models are Machine Learning (ML) models that have been previously trained on a large dataset to solve a specific task or set of tasks. These models learn patterns, features, and representations from the training data, which can then be fine-tuned or adapted for other related tasks. Pre-training provides a good starting point, reducing the amount of data and computation required to train a new model from scratch.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Pre-trained Models: Past, Present and Future", - "url": "https://www.sciencedirect.com/science/article/pii/S2666651021000231", - "type": "article" - } - ] - }, - "1Ga6DbOPc6Crz7ilsZMYy": { - "title": "Benefits of Pre-trained Models", - "description": "Pre-trained models offer several benefits in AI engineering by significantly reducing development time and computational resources because these models are trained on large datasets and can be fine-tuned for specific tasks, which enables quicker deployment and better performance with less data. They help overcome the challenge of needing vast amounts of labeled data and computational power for training from scratch. Additionally, pre-trained models often demonstrate improved accuracy, generalization, and robustness across different tasks, making them ideal for applications in natural language processing, computer vision, and other AI domains.\n\nLearn more from the following resources:", - "links": [ - { - "title": "Why Pre-Trained Models Matter For Machine Learning", - "url": "https://www.ahead.com/resources/why-pre-trained-models-matter-for-machine-learning/", - "type": "article" - }, - { - "title": "Why You Should Use Pre-Trained Models Versus Building Your Own", - "url": "https://cohere.com/blog/pre-trained-vs-in-house-nlp-models", - "type": "article" - } - ] - }, - "MXqbQGhNM3xpXlMC2ib_6": { - "title": "Limitations and Considerations", - "description": "Pre-trained models, while powerful, come with several limitations and considerations. They may carry biases present in the training data, leading to unintended or discriminatory outcomes, these models are also typically trained on general data, so they might not perform well on niche or domain-specific tasks without further fine-tuning. Another concern is the \"black-box\" nature of many pre-trained models, which can make their decision-making processes hard to interpret and explain.\n\nLearn more from the following resources:", - "links": [ - { - "title": "Pre-trained Topic Models: Advantages and Limitation", - "url": "https://www.kaggle.com/code/amalsalilan/pretrained-topic-models-advantages-and-limitation", - "type": "article" - }, - { - "title": "Should You Use Open Source Large Language Models?", - "url": "https://www.youtube.com/watch?v=y9k-U9AuDeM", - "type": "video" - } - ] - }, - "2WbVpRLqwi3Oeqk1JPui4": { - "title": "Open AI Models", - "description": "OpenAI provides a variety of models designed for diverse tasks. GPT models like GPT-3 and GPT-4 handle text generation, conversation, and translation, offering context-aware responses, while Codex specializes in generating and debugging code across multiple languages. DALL-E creates images from text descriptions, supporting applications in design and content creation, and Whisper is a speech recognition model that converts spoken language to text for transcription and voice-to-text tasks.\n\nLearn more from the following resources:", - "links": [ - { - "title": "OpenAI Models Overview", - "url": "https://platform.openai.com/docs/models", - "type": "article" - }, - { - "title": "OpenAI’s new “deep-thinking” o1 model crushes coding benchmarks", - "url": "https://www.youtube.com/watch?v=6xlPJiNpCVw", - "type": "video" - } - ] - }, - "vvpYkmycH0_W030E-L12f": { - "title": "Capabilities / Context Length", - "description": "A key aspect of the OpenAI models is their context length, which refers to the amount of input text the model can process at once. Earlier models like GPT-3 had a context length of up to 4,096 tokens (words or word pieces), while more recent models like GPT-4 can handle significantly larger context lengths, some supporting up to 32,768 tokens. This extended context length enables the models to handle more complex tasks, such as maintaining long conversations or processing lengthy documents, which enhances their utility in real-world applications like legal document analysis or code generation.\n\nLearn more from the following resources:", - "links": [ - { - "title": "Managing Context", - "url": "https://platform.openai.com/docs/guides/conversation-state?api-mode=responses#managing-context-for-text-generation", - "type": "article" - }, - { - "title": "Capabilities", - "url": "https://platform.openai.com/docs/guides/text-generation", - "type": "article" - } - ] - }, - "LbB2PeytxRSuU07Bk0KlJ": { - "title": "Cut-off Dates / Knowledge", - "description": "OpenAI models, such as GPT-3.5 and GPT-4, have a knowledge cutoff date, which refers to the last point in time when the model was trained on data. For instance, as of the current version of GPT-4, the knowledge cutoff is October 2023. This means the model does not have awareness or knowledge of events, advancements, or data that occurred after that date. Consequently, the model may lack information on more recent developments, research, or real-time events unless explicitly updated in future versions. This limitation is important to consider when using the models for time-sensitive tasks or inquiries involving recent knowledge.\n\nLearn more from the following resources:", - "links": [ - { - "title": "Knowledge Cutoff Dates of all LLMs explained", - "url": "https://otterly.ai/blog/knowledge-cutoff/", - "type": "article" - }, - { - "title": "Knowledge Cutoff Dates For ChatGPT, Meta Ai, Copilot, Gemini, Claude", - "url": "https://computercity.com/artificial-intelligence/knowledge-cutoff-dates-llms", - "type": "article" - } - ] - }, - "hy6EyKiNxk1x84J63dhez": { - "title": "Anthropic's Claude", - "description": "Anthropic's Claude is an AI language model designed to facilitate safe and scalable AI systems. Named after Claude Shannon, the father of information theory, Claude focuses on responsible AI use, emphasizing safety, alignment with human intentions, and minimizing harmful outputs. Built as a competitor to models like OpenAI's GPT, Claude is designed to handle natural language tasks such as generating text, answering questions, and supporting conversations, with a strong focus on aligning AI behavior with user goals while maintaining transparency and avoiding harmful biases.\n\nLearn more from the following resources:", - "links": [ - { - "title": "Claude", - "url": "https://claude.ai", - "type": "article" - }, - { - "title": "How To Use Claude Pro For Beginners", - "url": "https://www.youtube.com/watch?v=J3X_JWQkvo8", - "type": "video" - } - ] - }, - "oe8E6ZIQWuYvHVbYJHUc1": { - "title": "Google's Gemini", - "description": "Google Gemini is an advanced AI model by Google DeepMind, designed to integrate natural language processing with multimodal capabilities, enabling it to understand and generate not just text but also images, videos, and other data types. It combines generative AI with reasoning skills, making it effective for complex tasks requiring logical analysis and contextual understanding. Built on Google's extensive knowledge base and infrastructure, Gemini aims to offer high accuracy, efficiency, and safety, positioning it as a competitor to models like OpenAI's GPT-4.\n\nLearn more from the following resources:", - "links": [ - { - "title": "Google Gemini", - "url": "https://gemini.google.com/", - "type": "article" - }, - { - "title": "Google's Gemini Documentation", - "url": "https://workspace.google.com/solutions/ai/", - "type": "article" - }, - { - "title": "Welcome to the Gemini era", - "url": "https://www.youtube.com/watch?v=_fuimO6ErKI", - "type": "video" - } - ] - }, - "3PQVZbcr4neNMRr6CuNzS": { - "title": "Azure AI", - "description": "Azure AI is a suite of AI services and tools provided by Microsoft through its Azure cloud platform. It includes pre-built AI models for natural language processing, computer vision, and speech, as well as tools for developing custom machine learning models using services like Azure Machine Learning. Azure AI enables developers to integrate AI capabilities into applications with APIs for tasks like sentiment analysis, image recognition, and language translation. It also supports responsible AI development with features for model monitoring, explainability, and fairness, aiming to make AI accessible, scalable, and secure across industries.\n\nLearn more from the following resources:", - "links": [ - { - "title": "Azure AI", - "url": "https://azure.microsoft.com/en-gb/solutions/ai", - "type": "article" - }, - { - "title": "How to Choose the Right Models for Your Apps", - "url": "https://www.youtube.com/watch?v=sx_uGylH8eg", - "type": "video" - } - ] - }, - "OkYO-aSPiuVYuLXHswBCn": { - "title": "AWS Sagemaker", - "description": "AWS SageMaker is a fully managed machine learning service from Amazon Web Services that enables developers and data scientists to build, train, and deploy machine learning models at scale. It provides an integrated development environment, simplifying the entire ML workflow, from data preparation and model development to training, tuning, and inference. SageMaker supports popular ML frameworks like TensorFlow, PyTorch, and Scikit-learn, and offers features like automated model tuning, model monitoring, and one-click deployment. It's designed to make machine learning more accessible and scalable, even for large enterprise applications.\n\nLearn more from the following resources:", - "links": [ - { - "title": "AWS SageMaker", - "url": "https://aws.amazon.com/sagemaker/", - "type": "article" - }, - { - "title": "Introduction to Amazon SageMaker", - "url": "https://www.youtube.com/watch?v=Qv_Tr_BCFCQ", - "type": "video" - } - ] - }, - "8XjkRqHOdyH-DbXHYiBEt": { - "title": "Hugging Face Models", - "description": "Hugging Face models are a collection of pre-trained machine learning models available through the Hugging Face platform, covering a wide range of tasks like natural language processing, computer vision, and audio processing. The platform includes models for tasks such as text classification, translation, summarization, question answering, and more, with popular models like BERT, GPT, T5, and CLIP. Hugging Face provides easy-to-use tools and APIs that allow developers to access, fine-tune, and deploy these models, fostering a collaborative community where users can share, modify, and contribute models to improve AI research and application development.\n\nLearn more from the following resources:", - "links": [ - { - "title": "Hugging Face Models", - "url": "https://huggingface.co/models", - "type": "article" - } - ] - }, - "n-Ud2dXkqIzK37jlKItN4": { - "title": "Mistral AI", - "description": "Mistral AI is a company focused on developing open-weight, large language models (LLMs) to provide high-performance AI solutions. Mistral aims to create models that are both efficient and versatile, making them suitable for a wide range of natural language processing tasks, including text generation, translation, and summarization. By releasing open-weight models, Mistral promotes transparency and accessibility, allowing developers to customize and deploy AI solutions more flexibly compared to proprietary models.\n\nLearn more from the resources:", - "links": [ - { - "title": "Mistral AI", - "url": "https://mistral.ai/", - "type": "article" - }, - { - "title": "Mistral AI: The Gen AI Start-up you did not know existed", - "url": "https://www.youtube.com/watch?v=vzrRGd18tAg", - "type": "video" - } - ] - }, - "a7qsvoauFe5u953I699ps": { - "title": "Cohere", - "description": "Cohere is an AI platform that specializes in natural language processing (NLP) by providing large language models designed to help developers build and deploy text-based applications. Cohere’s models are used for tasks such as text classification, language generation, semantic search, and sentiment analysis. Unlike some other providers, Cohere emphasizes simplicity and scalability, offering an easy-to-use API that allows developers to fine-tune models on custom data for specific use cases. Additionally, Cohere provides robust multilingual support and focuses on ensuring that its NLP solutions are both accessible and enterprise-ready, catering to a wide range of industries.\n\nLearn more from the following resources:", - "links": [ - { - "title": "Cohere", - "url": "https://cohere.com/", - "type": "article" - }, - { - "title": "What Does Cohere Do?", - "url": "https://medium.com/geekculture/what-does-cohere-do-cdadf6d70435", - "type": "article" - } - ] - }, - "zdeuA4GbdBl2DwKgiOA4G": { - "title": "OpenAI API", - "description": "The OpenAI API provides access to powerful AI models like GPT, Codex, DALL-E, and Whisper, enabling developers to integrate capabilities such as text generation, code assistance, image creation, and speech recognition into their applications via a simple, scalable interface.\n\nLearn more from the following resources:", - "links": [ - { - "title": "OpenAI API", - "url": "https://openai.com/api/", - "type": "article" - } - ] - }, - "_bPTciEA1GT1JwfXim19z": { - "title": "Chat Completions API", - "description": "The OpenAI Chat Completions API is a powerful interface that allows developers to integrate conversational AI into applications by utilizing models like GPT-3.5 and GPT-4. It is designed to manage multi-turn conversations, keeping context across interactions, making it ideal for chatbots, virtual assistants, and interactive AI systems. With the API, users can structure conversations by providing messages in a specific format, where each message has a role (e.g., \"system\" to guide the model, \"user\" for input, and \"assistant\" for responses).\n\nLearn more from the following resources:", - "links": [ - { - "title": "Create Chat Completions", - "url": "https://platform.openai.com/docs/api-reference/chat/create", - "type": "article" - }, - { - "title": "Getting Start with Chat Completions API", - "url": "https://medium.com/the-ai-archives/getting-started-with-openais-chat-completions-api-in-2024-462aae00bf0a", - "type": "article" - } - ] - }, - "9-5DYeOnKJq9XvEMWP45A": { - "title": "Writing Prompts", - "description": "Prompts for the OpenAI API are carefully crafted inputs designed to guide the language model in generating specific, high-quality content. These prompts can be used to direct the model to create stories, articles, dialogue, or even detailed responses on particular topics. Effective prompts set clear expectations by providing context, specifying the format, or including examples, such as \"Write a short sci-fi story about a future where humans can communicate with animals,\" or \"Generate a detailed summary of the key benefits of using renewable energy.\" Well-designed prompts help ensure that the API produces coherent, relevant, and creative outputs, making it easier to achieve desired results across various applications.\n\nLearn more from the following resources:", - "links": [ - { - "title": "Visit Dedicated Prompt Engineering Roadmap", - "url": "https://roadmap.sh/prompt-engineering", - "type": "article" - }, - { - "title": "How to Write AI prompts", - "url": "https://www.descript.com/blog/article/how-to-write-ai-prompts", - "type": "article" - }, - { - "title": "Prompt Engineering Guide", - "url": "https://www.promptingguide.ai/", - "type": "article" - } - ] - }, - "nyBgEHvUhwF-NANMwkRJW": { - "title": "Open AI Playground", - "description": "The OpenAI Playground is an interactive web interface that allows users to experiment with OpenAI's language models, such as GPT-3 and GPT-4, without needing to write code. It provides a user-friendly environment where you can input prompts, adjust parameters like temperature and token limits, and see how the models generate responses in real-time. The Playground helps users test different use cases, from text generation to question answering, and refine prompts for better outputs. It's a valuable tool for exploring the capabilities of OpenAI models, prototyping ideas, and understanding how the models behave before integrating them into applications.\n\nLearn more from the following resources:", - "links": [ - { - "title": "OpenAI Playground", - "url": "https://platform.openai.com/playground/chat", - "type": "article" - }, - { - "title": "How to Use OpenAi Playground Like a Pro", - "url": "https://www.youtube.com/watch?v=PLxpvtODiqs", - "type": "video" - } - ] - }, - "15XOFdVp0IC-kLYPXUJWh": { - "title": "Fine-tuning", - "description": "Fine-tuning the OpenAI API involves adapting pre-trained models, such as GPT, to specific use cases by training them on custom datasets. This process allows you to refine the model's behavior and improve its performance on specialized tasks, like generating domain-specific text or following particular patterns. By providing labeled examples of the desired input-output pairs, you guide the model to better understand and predict the appropriate responses for your use case.\n\nLearn more from the following resources:", - "links": [ - { - "title": "Fine-tuning Documentation", - "url": "https://platform.openai.com/docs/guides/fine-tuning", - "type": "article" - }, - { - "title": "Fine-tuning ChatGPT with OpenAI Tutorial", - "url": "https://www.youtube.com/watch?v=VVKcSf6r3CM", - "type": "video" - } - ] - }, - "qzvp6YxWDiGakA2mtspfh": { - "title": "Maximum Tokens", - "description": "The OpenAI API has different maximum token limits depending on the model being used. For instance, GPT-3 has a limit of 4,096 tokens, while GPT-4 can support larger inputs, with some versions allowing up to 8,192 tokens, and extended versions reaching up to 32,768 tokens. Tokens include both the input text and the generated output, so longer inputs mean less space for responses. Managing token limits is crucial to ensure the model can handle the entire input and still generate a complete response, especially for tasks involving lengthy documents or multi-turn conversations.\n\nLearn more from the following resources:", - "links": [ - { - "title": "Maximum Tokens", - "url": "https://platform.openai.com/docs/guides/rate-limits", - "type": "article" - }, - { - "title": "The Ins and Outs of GPT Token Limits", - "url": "https://www.supernormal.com/blog/gpt-token-limits", - "type": "article" - } - ] - }, - "FjV3oD7G2Ocq5HhUC17iH": { - "title": "Token Counting", - "description": "Token counting refers to tracking the number of tokens processed during interactions with language models, including both input and output text. Tokens are units of text that can be as short as a single character or as long as a word, and models like GPT process text by splitting it into these tokens. Knowing how many tokens are used is crucial because the API has token limits (e.g., 4,096 for GPT-3 and up to 32,768 for some versions of GPT-4), and costs are typically calculated based on the total number of tokens processed.\n\nLearn more from the following resources:", - "links": [ - { - "title": "OpenAI Tokenizer Tool", - "url": "https://platform.openai.com/tokenizer", - "type": "article" - }, - { - "title": "How to count tokens with Tiktoken", - "url": "https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken", - "type": "article" - } - ] - }, - "DZPM9zjCbYYWBPLmQImxQ": { - "title": "Pricing Considerations", - "description": "When using the OpenAI API, pricing considerations depend on factors like the model type, usage volume, and specific features utilized. Different models, such as GPT-3.5, GPT-4, or DALL-E, have varying cost structures based on the complexity of the model and the number of tokens processed (inputs and outputs). For cost efficiency, you should optimize prompt design, monitor usage, and consider rate limits or volume discounts offered by OpenAI for high usage.", - "links": [ - { - "title": "OpenAI API Pricing", - "url": "https://openai.com/api/pricing/", - "type": "article" - } - ] - }, - "8ndKHDJgL_gYwaXC7XMer": { - "title": "AI Safety and Ethics", - "description": "AI safety and ethics involve establishing guidelines and best practices to ensure that artificial intelligence systems are developed, deployed, and used in a manner that prioritizes human well-being, fairness, and transparency. This includes addressing risks such as bias, privacy violations, unintended consequences, and ensuring that AI operates reliably and predictably, even in complex environments. Ethical considerations focus on promoting accountability, avoiding discrimination, and aligning AI systems with human values and societal norms. Frameworks like explainability, human-in-the-loop design, and robust monitoring are often used to build systems that not only achieve technical objectives but also uphold ethical standards and mitigate potential harms.\n\nLearn more from the following resources:", - "links": [ - { - "title": "Understanding Artificial Intelligence Ethics and Safety", - "url": "https://www.turing.ac.uk/news/publications/understanding-artificial-intelligence-ethics-and-safety", - "type": "article" - }, - { - "title": "What is AI Ethics?", - "url": "https://www.youtube.com/watch?v=aGwYtUzMQUk", - "type": "video" - } - ] - }, - "cUyLT6ctYQ1pgmodCKREq": { - "title": "Prompt Injection Attacks", - "description": "Prompt injection attacks are a type of security vulnerability where malicious inputs are crafted to manipulate or exploit AI models, like language models, to produce unintended or harmful outputs. These attacks involve injecting deceptive or adversarial content into the prompt to bypass filters, extract confidential information, or make the model respond in ways it shouldn't. For instance, a prompt injection could trick a model into revealing sensitive data or generating inappropriate responses by altering its expected behavior.\n\nLearn more from the following resources:", - "links": [ - { - "title": "Prompt Injection in LLMs", - "url": "https://www.promptingguide.ai/prompts/adversarial-prompting/prompt-injection", - "type": "article" - }, - { - "title": "What is a Prompt Injection Attack?", - "url": "https://www.wiz.io/academy/prompt-injection-attack", - "type": "article" - } - ] - }, - "lhIU0ulpvDAn1Xc3ooYz_": { - "title": "Bias and Fairness", - "description": "Bias and fairness in AI refer to the challenges of ensuring that machine learning models do not produce discriminatory or skewed outcomes. Bias can arise from imbalanced training data, flawed assumptions, or biased algorithms, leading to unfair treatment of certain groups based on race, gender, or other factors. Fairness aims to address these issues by developing techniques to detect, mitigate, and prevent biases in AI systems. Ensuring fairness involves improving data diversity, applying fairness constraints during model training, and continuously monitoring models in production to avoid unintended consequences, promoting ethical and equitable AI use.\n\nLearn more from the following resources:", - "links": [ - { - "title": "What Do We Do About the Biases in AI?", - "url": "https://hbr.org/2019/10/what-do-we-do-about-the-biases-in-ai", - "type": "article" - }, - { - "title": "AI Bias - What Is It and How to Avoid It?", - "url": "https://levity.ai/blog/ai-bias-how-to-avoid", - "type": "article" - }, - { - "title": "What about fairness, bias and discrimination?", - "url": "https://ico.org.uk/for-organisations/uk-gdpr-guidance-and-resources/artificial-intelligence/guidance-on-ai-and-data-protection/how-do-we-ensure-fairness-in-ai/what-about-fairness-bias-and-discrimination/", - "type": "article" - } - ] - }, - "sWBT-j2cRuFqRFYtV_5TK": { - "title": "Security and Privacy Concerns", - "description": "Security and privacy concerns in AI revolve around the protection of data and the responsible use of models. Key issues include ensuring that sensitive data, such as personal information, is handled securely during collection, processing, and storage, to prevent unauthorized access and breaches. AI models can also inadvertently expose sensitive data if not properly designed, leading to privacy risks through data leakage or misuse. Additionally, there are concerns about model bias, data misuse, and ensuring transparency in how AI decisions are made.\n\nLearn more from the following resources:", - "links": [ - { - "title": "Examining Privacy Risks in AI Systems", - "url": "https://transcend.io/blog/ai-and-privacy", - "type": "article" - }, - { - "title": "AI Is Dangerous, but Not for the Reasons You Think | Sasha Luccioni | TED", - "url": "https://www.youtube.com/watch?v=eXdVDhOGqoE", - "type": "video" - } - ] - }, - "Pt-AJmSJrOxKvolb5_HEv": { - "title": "Conducting adversarial testing", - "description": "Adversarial testing involves intentionally exposing machine learning models to deceptive, perturbed, or carefully crafted inputs to evaluate their robustness and identify vulnerabilities. The goal is to simulate potential attacks or edge cases where the model might fail, such as subtle manipulations in images, text, or data that cause the model to misclassify or produce incorrect outputs. This type of testing helps to improve model resilience, particularly in sensitive applications like cybersecurity, autonomous systems, and finance.\n\nLearn more from the following resources:", - "links": [ - { - "title": "Adversarial Testing for Generative AI", - "url": "https://developers.google.com/machine-learning/resources/adv-testing", - "type": "article" - }, - { - "title": "Adversarial Testing: Definition, Examples and Resources", - "url": "https://www.leapwork.com/blog/adversarial-testing", - "type": "article" - } - ] - }, - "ljZLa3yjQpegiZWwtnn_q": { - "title": "OpenAI Moderation API", - "description": "The OpenAI Moderation API helps detect and filter harmful content by analyzing text for issues like hate speech, violence, self-harm, and adult content. It uses machine learning models to identify inappropriate or unsafe language, allowing developers to create safer online environments and maintain community guidelines. The API is designed to be integrated into applications, websites, and platforms, providing real-time content moderation to reduce the spread of harmful or offensive material.\n\nLearn more from the following resources:", - "links": [ - { - "title": "Moderation", - "url": "https://platform.openai.com/docs/guides/moderation", - "type": "article" - }, - { - "title": "How to user the moderation API", - "url": "https://cookbook.openai.com/examples/how_to_use_moderation", - "type": "article" - } - ] - }, - "4Q5x2VCXedAWISBXUIyin": { - "title": "Adding end-user IDs in prompts", - "description": "Sending end-user IDs in your requests can be a useful tool to help OpenAI monitor and detect abuse. This allows OpenAI to provide your team with more actionable feedback in the event that we detect any policy violations in your application.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Sending End-user IDs - OpenAI", - "url": "https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids", - "type": "article" - } - ] - }, - "qmx6OHqx4_0JXVIv8dASp": { - "title": "Robust prompt engineering", - "description": "Robust prompt engineering involves carefully crafting inputs to guide AI models toward producing accurate, relevant, and reliable outputs. It focuses on minimizing ambiguity and maximizing clarity by providing specific instructions, examples, or structured formats. Effective prompts anticipate potential issues, such as misinterpretation or inappropriate responses, and address them through testing and refinement. This approach enhances the consistency and quality of the model's behavior, making it especially useful for complex tasks like multi-step reasoning, content generation, and interactive systems.\n\nLearn more from the following resources:", - "links": [ - { - "title": "Building Robust Prompt Engineering Capability", - "url": "https://aimresearch.co/product/building-robust-prompt-engineering-capability", - "type": "article" - }, - { - "title": "Effective Prompt Engineering: A Comprehensive Guide", - "url": "https://medium.com/@nmurugs/effective-prompt-engineering-a-comprehensive-guide-803160c571ed", - "type": "article" - } - ] - }, - "t1SObMWkDZ1cKqNNlcd9L": { - "title": "Know your Customers / Usecases", - "description": "To know your customer means deeply understanding the needs, behaviors, and expectations of your target users. This ensures the tools you create are tailored precisely for their intended purpose, while also being designed to prevent misuse or unintended applications. By clearly defining the tool’s functionality and boundaries, you can align its features with the users’ goals while incorporating safeguards that limit its use in contexts it wasn’t designed for. This approach enhances both the tool’s effectiveness and safety, reducing the risk of improper use.\n\nLearn more from the following resources:", - "links": [ - { - "title": "Assigning Roles", - "url": "https://learnprompting.org/docs/basics/roles", - "type": "article" - } - ] - }, - "ONLDyczNacGVZGojYyJrU": { - "title": "Constraining outputs and inputs", - "description": "Constraining outputs and inputs in AI models refers to implementing limits or rules that guide both the data the model processes (inputs) and the results it generates (outputs). Input constraints ensure that only valid, clean, and well-formed data enters the model, which helps to reduce errors and improve performance. This can include setting data type restrictions, value ranges, or specific formats. Output constraints, on the other hand, ensure that the model produces appropriate, safe, and relevant results, often by limiting output length, specifying answer formats, or applying filters to avoid harmful or biased responses. These constraints are crucial for improving model safety, alignment, and utility in practical applications.\n\nLearn more from the following resources:", - "links": [ - { - "title": "Preventing Prompt Injection", - "url": "https://learnprompting.org/docs/prompt_hacking/defensive_measures/introduction", - "type": "article" - }, - { - "title": "Introducing Structured Outputs in the API - OpenAI", - "url": "https://openai.com/index/introducing-structured-outputs-in-the-api/", - "type": "article" - } - ] - }, - "a_3SabylVqzzOyw3tZN5f": { - "title": "OpenSource AI", - "description": "Open-source AI refers to AI models, tools, and frameworks that are freely available for anyone to use, modify, and distribute. Examples include TensorFlow, PyTorch, and models like BERT and Stable Diffusion. Open-source AI fosters transparency, collaboration, and innovation by allowing developers to inspect code, adapt models for specific needs, and contribute improvements. This approach accelerates the development of AI technologies, enabling faster experimentation and reducing dependency on proprietary solutions.\n\nLearn more from the following resources:", - "links": [ - { - "title": "Open Source AI Is the Path Forward", - "url": "https://about.fb.com/news/2024/07/open-source-ai-is-the-path-forward/", - "type": "article" - }, - { - "title": "Should You Use Open Source Large Language Models?", - "url": "https://www.youtube.com/watch?v=y9k-U9AuDeM", - "type": "video" - } - ] - }, - "RBwGsq9DngUsl8PrrCbqx": { - "title": "Open vs Closed Source Models", - "description": "Open-source models are freely available for customization and collaboration, promoting transparency and flexibility, while closed-source models are proprietary, offering ease of use but limiting modification and transparency.\n\nLearn more from the following resources:", - "links": [ - { - "title": "OpenAI vs. Open Source LLM", - "url": "https://ubiops.com/openai-vs-open-source-llm/", - "type": "article" - }, - { - "title": "Open-Source vs Closed-Source LLMs", - "url": "https://www.youtube.com/watch?v=710PDpuLwOc", - "type": "video" - } - ] - }, - "97eu-XxYUH9pYbD_KjAtA": { - "title": "Popular Open Source Models", - "description": "Open-source large language models (LLMs) are models whose source code and architecture are publicly available for use, modification, and distribution. They are built using machine learning algorithms that process and generate human-like text, and being open-source, they promote transparency, innovation, and community collaboration in their development and application.\n\nLearn more from the following resources:", - "links": [ - { - "title": "The Best Large Language Models (LLMs) in 2024", - "url": "https://zapier.com/blog/best-llm/", - "type": "article" - }, - { - "title": "8 Top Open-Source LLMs for 2024 and Their Uses", - "url": "https://www.datacamp.com/blog/top-open-source-llms", - "type": "article" - } - ] - }, - "v99C5Bml2a6148LCJ9gy9": { - "title": "Hugging Face", - "description": "Hugging Face is a leading AI company and open-source platform that provides tools, models, and libraries for natural language processing (NLP), computer vision, and other machine learning tasks. It is best known for its \"Transformers\" library, which simplifies the use of pre-trained models like BERT, GPT, T5, and CLIP, making them accessible for tasks such as text classification, translation, summarization, and image recognition.\n\nLearn more from the following resources:", - "links": [ - { - "title": "Hugging Face Official Video Course", - "url": "https://www.youtube.com/watch?v=00GKzGyWFEs&list=PLo2EIpI_JMQvWfQndUesu0nPBAtZ9gP1o", - "type": "course" - }, - { - "title": "Hugging Face", - "url": "https://huggingface.co", - "type": "article" - }, - { - "title": "What is Hugging Face? - Machine Learning Hub Explained", - "url": "https://www.youtube.com/watch?v=1AUjKfpRZVo", - "type": "video" - } - ] - }, - "YLOdOvLXa5Fa7_mmuvKEi": { - "title": "Hugging Face Hub", - "description": "The Hugging Face Hub is a comprehensive platform that hosts over 900,000 machine learning models, 200,000 datasets, and 300,000 demo applications, facilitating collaboration and sharing within the AI community. It serves as a central repository where users can discover, upload, and experiment with various models and datasets across multiple domains, including natural language processing, computer vision, and audio tasks. It also supports version control.\n\nLearn more from the following resources:", - "links": [ - { - "title": "nlp-official", - "url": "https://huggingface.co/learn/nlp-course/en/chapter4/1", - "type": "course" - }, - { - "title": "Hugging Face Documentation", - "url": "https://huggingface.co/docs/hub/en/index", - "type": "article" - } - ] - }, - "YKIPOiSj_FNtg0h8uaSMq": { - "title": "Hugging Face Tasks", - "description": "Hugging Face supports text classification, named entity recognition, question answering, summarization, and translation. It also extends to multimodal tasks that involve both text and images, such as visual question answering (VQA) and image-text matching. Each task is done by various pre-trained models that can be easily accessed and fine-tuned through the Hugging Face library.\n\nLearn more from the following resources:", - "links": [ - { - "title": "Task and Model", - "url": "https://huggingface.co/learn/computer-vision-course/en/unit4/multimodal-models/tasks-models-part1", - "type": "article" - }, - { - "title": "Task Summary", - "url": "https://huggingface.co/docs/transformers/v4.14.1/en/task_summary", - "type": "article" - }, - { - "title": "Task Manager", - "url": "https://huggingface.co/docs/optimum/en/exporters/task_manager", - "type": "article" - } - ] - }, - "3kRTzlLNBnXdTsAEXVu_M": { - "title": "Inference SDK", - "description": "The Hugging Face Inference SDK is a powerful tool that allows developers to easily integrate and run inference on large language models hosted on the Hugging Face Hub. By using the `InferenceClient`, users can make API calls to various models for tasks such as text generation, image creation, and more. The SDK supports both synchronous and asynchronous operations thus compatible with existing workflows.\n\nLearn more from the following resources:", - "links": [ - { - "title": "Inference", - "url": "https://huggingface.co/docs/huggingface_hub/en/package_reference/inference_client", - "type": "article" - }, - { - "title": "Endpoint Setup", - "url": "https://www.npmjs.com/package/@huggingface/inference", - "type": "article" - } - ] - }, - "bGLrbpxKgENe2xS1eQtdh": { - "title": "Transformers.js", - "description": "Transformers.js is a JavaScript library that enables transformer models, like those from Hugging Face, to run directly in the browser or Node.js, without needing cloud services. It supports tasks such as text generation, sentiment analysis, and translation within web apps or server-side scripts. Using WebAssembly (Wasm) and efficient JavaScript, Transformers.js offers powerful NLP capabilities with low latency, enhanced privacy, and offline functionality, making it ideal for real-time, interactive applications where local processing is essential for performance and security.\n\nLearn more from the following resources:", - "links": [ - { - "title": "Transformers.js on Hugging Face", - "url": "https://huggingface.co/docs/transformers.js/en/index", - "type": "article" - }, - { - "title": "How Transformer.js Can Help You Create Smarter AI In Your Browser", - "url": "https://www.youtube.com/watch?v=MNJHu9zjpqg", - "type": "video" - } - ] - }, - "rTT2UnvqFO3GH6ThPLEjO": { - "title": "Ollama", - "description": "Ollama is a platform that offers large language models (LLMs) designed to run locally on personal devices, enabling AI functionality without relying on cloud services. It focuses on privacy, performance, and ease of use by allowing users to deploy models directly on laptops, desktops, or edge devices, providing fast, offline AI capabilities. With tools like the Ollama SDK, developers can integrate these models into their applications for tasks such as text generation, summarization, and more, benefiting from reduced latency, greater data control, and seamless local processing.\n\nLearn more from the following resources:", - "links": [ - { - "title": "Ollama", - "url": "https://ollama.com/", - "type": "article" - }, - { - "title": "Ollama: Easily run LLMs locally", - "url": "https://klu.ai/glossary/ollama", - "type": "article" - }, - { - "title": "What is Ollama? Running Local LLMs Made Simple", - "url": "https://www.youtube.com/watch?v=5RIOQuHOihY", - "type": "video" - } - ] - }, - "ro3vY_sp6xMQ-hfzO-rc1": { - "title": "Ollama Models", - "description": "Ollama provides a collection of large language models (LLMs) designed to run locally on personal devices, enabling privacy-focused and efficient AI applications without relying on cloud services. These models can perform tasks like text generation, translation, summarization, and question answering, similar to popular models like GPT. Ollama emphasizes ease of use, offering models that are optimized for lower resource consumption, making it possible to deploy AI capabilities directly on laptops or edge devices.\n\nLearn more from the following resources:", - "links": [ - { - "title": "Ollama Model Library", - "url": "https://ollama.com/library", - "type": "article" - }, - { - "title": "What are the different types of models? Ollama Course", - "url": "https://www.youtube.com/watch?v=f4tXwCNP1Ac", - "type": "video" - } - ] - }, - "TsG_I7FL-cOCSw8gvZH3r": { - "title": "Ollama SDK", - "description": "The Ollama SDK is a community-driven tool that allows developers to integrate and run large language models (LLMs) locally through a simple API. Enabling users to easily import the Ollama provider and create customized instances for various models, such as Llama 2 and Mistral. The SDK supports functionalities like `text generation` and `embeddings`, making it versatile for applications ranging from `chatbots` to `content generation`. Also Ollama SDK enhances privacy and control over data while offering seamless integration with existing workflows.\n\nLearn more from the following resources:", - "links": [ - { - "title": "SDK Provider", - "url": "https://sdk.vercel.ai/providers/community-providers/ollama", - "type": "article" - }, - { - "title": "Beginner's Guide", - "url": "https://dev.to/jayantaadhikary/using-the-ollama-api-to-run-llms-and-generate-responses-locally-18b7", - "type": "article" - }, - { - "title": "Setup", - "url": "https://klu.ai/glossary/ollama", - "type": "article" - } - ] - }, - "--ig0Ume_BnXb9K2U7HJN": { - "title": "What are Embeddings", - "description": "Embeddings are dense, numerical vector representations of data, such as words, sentences, images, or audio, that capture their semantic meaning and relationships. By converting data into fixed-length vectors, embeddings allow machine learning models to process and understand the data more effectively. For example, word embeddings represent similar words with similar vectors, enabling tasks like semantic search, recommendation systems, and clustering. Embeddings make it easier to compare, search, and analyze complex, unstructured data by mapping similar items close together in a high-dimensional space.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Introducing Text and Code Embeddings", - "url": "https://openai.com/index/introducing-text-and-code-embeddings/", - "type": "article" - }, - { - "title": "What are Embeddings", - "url": "https://www.cloudflare.com/learning/ai/what-are-embeddings/", - "type": "article" - } - ] - }, - "eMfcyBxnMY_l_5-8eg6sD": { - "title": "Semantic Search", - "description": "Embeddings are used for semantic search by converting text, such as queries and documents, into high-dimensional vectors that capture the underlying meaning and context, rather than just exact words. These embeddings represent the semantic relationships between words or phrases, allowing the system to understand the query’s intent and retrieve relevant information, even if the exact terms don’t match.\n\nLearn more from the following resources:", - "links": [ - { - "title": "What is Semantic Search?", - "url": "https://www.elastic.co/what-is/semantic-search", - "type": "article" - }, - { - "title": "What is Semantic Search? - Cohere", - "url": "https://www.youtube.com/watch?v=fFt4kR4ntAA", - "type": "video" - } - ] - }, - "HQe9GKy3p0kTUPxojIfSF": { - "title": "Recommendation Systems", - "description": "In the context of embeddings, recommendation systems use vector representations to capture similarities between items, such as products or content. By converting items and user preferences into embeddings, these systems can measure how closely related different items are based on vector proximity, allowing them to recommend similar products or content based on a user's past interactions. This approach improves recommendation accuracy and efficiency by enabling meaningful, scalable comparisons of complex data.\n\nLearn more from the following resources:", - "links": [ - { - "title": "What Role does AI Play in Recommendation Systems and Engines?", - "url": "https://www.algolia.com/blog/ai/what-role-does-ai-play-in-recommendation-systems-and-engines/", - "type": "article" - }, - { - "title": "What is a Recommendation Engine?", - "url": "https://www.ibm.com/think/topics/recommendation-engine", - "type": "article" - } - ] - }, - "AglWJ7gb9rTT2rMkstxtk": { - "title": "Anomaly Detection", - "description": "Anomaly detection with embeddings works by transforming data, such as text, images, or time-series data, into vector representations that capture their patterns and relationships. In this high-dimensional space, similar data points are positioned close together, while anomalies stand out as those that deviate significantly from the typical distribution. This approach is highly effective for detecting outliers in tasks like fraud detection, network security, and quality control.\n\nLearn more from the following resources:", - "links": [ - { - "title": "Anomaly in Embeddings", - "url": "https://ai.google.dev/gemini-api/tutorials/anomaly_detection", - "type": "article" - } - ] - }, - "06Xta-OqSci05nV2QMFdF": { - "title": "Data Classification", - "description": "Once data is embedded, a classification algorithm, such as a neural network or a logistic regression model, can be trained on these embeddings to classify the data into different categories. The advantage of using embeddings is that they capture underlying relationships and similarities between data points, even if the raw data is complex or high-dimensional, improving classification accuracy in tasks like text classification, image categorization, and recommendation systems.\n\nLearn more from the following resources:", - "links": [ - { - "title": "What Is Data Classification?", - "url": "https://www.paloaltonetworks.com/cyberpedia/data-classification", - "type": "article" - }, - { - "title": "Text Embeddings, Classification, and Semantic Search (w/ Python Code)", - "url": "https://www.youtube.com/watch?v=sNa_uiqSlJo", - "type": "video" - } - ] - }, - "l6priWeJhbdUD5tJ7uHyG": { - "title": "Open AI Embeddings API", - "description": "The OpenAI Embeddings API allows developers to generate dense vector representations of text, which capture semantic meaning and relationships. These embeddings can be used for various tasks, such as semantic search, recommendation systems, and clustering, by enabling the comparison of text based on similarity in vector space. The API supports easy integration and scalability, making it possible to handle large datasets and perform tasks like finding similar documents, organizing content, or building recommendation engines. Learn more from the following resources:", - "links": [ - { - "title": "OpenAI Embeddings API", - "url": "https://platform.openai.com/docs/api-reference/embeddings/create", - "type": "article" - }, - { - "title": "Master OpenAI Embedding API", - "url": "https://www.youtube.com/watch?v=9oCS-VQupoc", - "type": "video" - } - ] - }, - "y0qD5Kb4Pf-ymIwW-tvhX": { - "title": "Open AI Embedding Models", - "description": "OpenAI's embedding models convert text into dense vector representations that capture semantic meaning, allowing for efficient similarity searches, clustering, and recommendations. These models are commonly used for tasks like semantic search, where similar phrases are mapped to nearby points in a vector space, and for building recommendation systems by comparing embeddings to find related content. OpenAI's embedding models offer versatility, supporting a range of applications from document retrieval to content classification, and can be easily integrated through the OpenAI API for scalable and efficient deployment.\n\nLearn more from the following resources:", - "links": [ - { - "title": "OpenAI Embedding Models", - "url": "https://platform.openai.com/docs/guides/embeddings/embedding-models", - "type": "article" - }, - { - "title": "OpenAI Embeddings Explained in 5 Minutes", - "url": "https://www.youtube.com/watch?v=8kJStTRuMcs", - "type": "video" - } - ] - }, - "4GArjDYipit4SLqKZAWDf": { - "title": "Pricing Considerations", - "description": "The pricing for the OpenAI Embedding API is based on the number of tokens processed and the specific embedding model used. Costs are determined by the total tokens needed to generate embeddings, so longer texts will result in higher charges. To manage costs, developers can optimize by shortening inputs or batching requests. Additionally, selecting the right embedding model for your performance and budget requirements, along with monitoring token usage, can help control expenses.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "OpenAI Pricing", - "url": "https://openai.com/api/pricing/", - "type": "article" - } - ] - }, - "apVYIV4EyejPft25oAvdI": { - "title": "Open-Source Embeddings", - "description": "Open-source embeddings are pre-trained vector representations of data, usually text, that are freely available for use and modification. These embeddings capture semantic meanings, making them useful for tasks like semantic search, text classification, and clustering. Examples include Word2Vec, GloVe, and FastText, which represent words as vectors based on their context in large corpora, and more advanced models like Sentence-BERT and CLIP that provide embeddings for sentences and images. Open-source embeddings allow developers to leverage pre-trained models without starting from scratch, enabling faster development and experimentation in natural language processing and other AI applications.\n\nLearn more from the following resources:", - "links": [ - { - "title": "Embeddings", - "url": "https://platform.openai.com/docs/guides/embeddings", - "type": "article" - }, - { - "title": "A Guide to Open-Source Embedding Models", - "url": "https://www.bentoml.com/blog/a-guide-to-open-source-embedding-models", - "type": "article" - } - ] - }, - "ZV_V6sqOnRodgaw4mzokC": { - "title": "Sentence Transformers", - "description": "Sentence Transformers are a type of model designed to generate high-quality embeddings for sentences, allowing them to capture the semantic meaning of text. Unlike traditional word embeddings, which represent individual words, Sentence Transformers understand the context of entire sentences, making them ideal for tasks that require semantic similarity, such as sentence clustering, semantic search, and paraphrase detection. Built on top of transformer models like BERT and RoBERTa, they convert sentences into dense vectors, where similar sentences are placed closer together in vector space.\n\nLearn more from the following resources:", - "links": [ - { - "title": "What is BERT?", - "url": "https://h2o.ai/wiki/bert/", - "type": "article" - }, - { - "title": "SentenceTransformers Documentation", - "url": "https://sbert.net/", - "type": "article" - }, - { - "title": "Using Sentence Transformers at Hugging Face", - "url": "https://huggingface.co/docs/hub/sentence-transformers", - "type": "article" - } - ] - }, - "dLEg4IA3F5jgc44Bst9if": { - "title": "Models on Hugging Face", - "description": "Embedding models are used to convert raw data like text, code, or images into high-dimensional vectors that capture semantic meaning. These vector representations allow AI systems to compare, cluster, and retrieve information based on similarity rather than exact matches. Hugging Face provides a wide range of pretrained embedding models such as `all-MiniLM-L6-v2`, `gte-base`, `Qwen3-Embedding-8B` and `bge-base` which are commonly used for tasks like semantic search, recommendation systems, duplicate detection, and retrieval-augmented generation (RAG). These models can be accessed through libraries like transformers or sentence-transformers, making it easy to generate high-quality embeddings for both general-purpose and task-specific applications.\n\nLearn more from the following resources:", - "links": [ - { - "title": "Hugging Face Embedding Models", - "url": "https://huggingface.co/models?pipeline_tag=feature-extraction", - "type": "article" - }, - { - "title": "Hugging Face - Text embeddings & semantic search", - "url": "https://www.youtube.com/watch?v=OATCgQtNX2o", - "type": "video" - } - ] - }, - "tt9u3oFlsjEMfPyojuqpc": { - "title": "Vector Databases", - "description": "Vector databases are systems specialized in storing, indexing, and retrieving high-dimensional vectors, often used as embeddings for data like text, images, or audio. Unlike traditional databases, they excel at managing unstructured data by enabling fast similarity searches, where vectors are compared to find the closest matches. This makes them essential for tasks like semantic search, recommendation systems, and content discovery. Using techniques like approximate nearest neighbor (ANN) search, vector databases handle large datasets efficiently, ensuring quick and accurate retrieval even at scale.\n\nLearn more from the following resources:", - "links": [ - { - "title": "Vector Databases", - "url": "https://developers.cloudflare.com/vectorize/reference/what-is-a-vector-database/", - "type": "article" - }, - { - "title": "What are Vector Databases?", - "url": "https://www.mongodb.com/resources/basics/databases/vector-databases", - "type": "article" - } - ] - }, - "WcjX6p-V-Rdd77EL8Ega9": { - "title": "Purpose and Functionality", - "description": "A vector database is designed to store, manage, and retrieve high-dimensional vectors (embeddings) generated by AI models. Its primary purpose is to perform fast and efficient similarity searches, enabling applications to find data points that are semantically or visually similar to a given query. Unlike traditional databases, which handle structured data, vector databases excel at managing unstructured data like text, images, and audio by converting them into dense vector representations. They use indexing techniques, such as approximate nearest neighbor (ANN) algorithms, to quickly search large datasets and return relevant results. Vector databases are essential for applications like recommendation systems, semantic search, and content discovery, where understanding and retrieving similar items is crucial.\n\nLearn more from the following resources:", - "links": [ - { - "title": "What is a Vector Database? Top 12 Use Cases", - "url": "https://lakefs.io/blog/what-is-vector-databases/", - "type": "article" - }, - { - "title": "Vector Databases: Intro, Use Cases", - "url": "https://www.v7labs.com/blog/vector-databases", - "type": "article" - } - ] - }, - "dSd2C9lNl-ymmCRT9_ZC3": { - "title": "Chroma", - "description": "Chroma is an open-source vector database and AI-native embedding database designed to handle and store large-scale embeddings and semantic vectors. It is used in applications that require fast, efficient similarity searches, such as natural language processing (NLP), machine learning (ML), and AI systems dealing with text, images, and other high-dimensional data.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Chroma", - "url": "https://www.trychroma.com/", - "type": "article" - }, - { - "title": "Chroma Tutorials", - "url": "https://lablab.ai/tech/chroma", - "type": "article" - }, - { - "title": "Chroma - Chroma - Vector Database for LLM Applications", - "url": "https://youtu.be/Qs_y0lTJAp0?si=Z2-eSmhf6PKrEKCW", - "type": "video" - } - ] - }, - "_Cf7S1DCvX7p1_3-tP3C3": { - "title": "Pinecone", - "description": "Pinecone is a managed vector database designed for efficient similarity search and real-time retrieval of high-dimensional data, such as embeddings. It allows developers to store, index, and query vector representations, making it easy to build applications like recommendation systems, semantic search, and AI-driven content discovery. Pinecone is scalable, handles large datasets, and provides fast, low-latency searches using optimized indexing techniques.\n\nLearn more from the following resources:", - "links": [ - { - "title": "Pinecone", - "url": "https://www.pinecone.io", - "type": "article" - }, - { - "title": "Everything you need to know about Pinecone", - "url": "https://www.packtpub.com/article-hub/everything-you-need-to-know-about-pinecone-a-vector-database?srsltid=AfmBOorXsy9WImpULoLjd-42ERvTzj3pQb7C2EFgamWlRobyGJVZKKdz", - "type": "article" - }, - { - "title": "Introducing Pinecone Serverless", - "url": "https://www.youtube.com/watch?v=iCuR6ihHQgc", - "type": "video" - } - ] - }, - "VgUnrZGKVjAAO4n_llq5-": { - "title": "Weaviate", - "description": "Weaviate is an open-source vector database that allows users to store, search, and manage high-dimensional vectors, often used for tasks like semantic search and recommendation systems. It enables efficient similarity searches by converting data (like text, images, or audio) into embeddings and indexing them for fast retrieval. Weaviate also supports integrating external data sources and schemas, making it easy to combine structured and unstructured data.\n\nLearn more from the following resources:", - "links": [ - { - "title": "Weaviate", - "url": "https://weaviate.io/", - "type": "article" - }, - { - "title": "Advanced AI Agents with RAG", - "url": "https://www.youtube.com/watch?v=UoowC-hsaf0&list=PLTL2JUbrY6tVmVxY12e6vRDmY-maAXzR1", - "type": "video" - } - ] - }, - "JurLbOO1Z8r6C3yUqRNwf": { - "title": "FAISS", - "description": "FAISS (Facebook AI Similarity Search) is a library developed by Facebook AI for efficient similarity search and clustering of dense vectors, particularly useful for large-scale datasets. It is optimized to handle embeddings (vector representations) and enables fast nearest neighbor search, allowing you to retrieve similar items from a large collection of vectors based on distance or similarity metrics like cosine similarity or Euclidean distance. FAISS is widely used in applications such as image and text retrieval, recommendation systems, and large-scale search systems where embeddings are used to represent items. It offers several indexing methods and can scale to billions of vectors, making it a powerful tool for handling real-time, large-scale similarity search problems efficiently.\n\nLearn more from the following resources:", - "links": [ - { - "title": "FAISS", - "url": "https://ai.meta.com/tools/faiss/", - "type": "article" - }, - { - "title": "What Is Faiss (Facebook AI Similarity Search)?", - "url": "https://www.datacamp.com/blog/faiss-facebook-ai-similarity-search", - "type": "article" - }, - { - "title": "FAISS Vector Library with LangChain and OpenAI", - "url": "https://www.youtube.com/watch?v=ZCSsIkyCZk4", - "type": "video" - } - ] - }, - "rjaCNT3Li45kwu2gXckke": { - "title": "LanceDB", - "description": "LanceDB is a vector database designed for efficient storage, retrieval, and management of embeddings. It enables users to perform fast similarity searches, particularly useful in applications like recommendation systems, semantic search, and AI-driven content retrieval. LanceDB focuses on scalability and speed, allowing large-scale datasets of embeddings to be indexed and queried quickly, which is essential for real-time AI applications. It integrates well with machine learning workflows, making it easier to deploy models that rely on vector-based data processing, and helps manage the complexities of handling high-dimensional vector data efficiently.\n\nLearn more from the following resources:", - "links": [ - { - "title": "LanceDB on GitHub", - "url": "https://github.com/lancedb/lancedb", - "type": "opensource" - }, - { - "title": "LanceDB", - "url": "https://lancedb.com/", - "type": "article" - }, - { - "title": "LanceDB Documentation", - "url": "https://docs.lancedb.com/enterprise/introduction", - "type": "article" - } - ] - }, - "DwOAL5mOBgBiw-EQpAzQl": { - "title": "Qdrant", - "description": "Qdrant is an open-source vector database designed for efficient similarity search and real-time data retrieval. It specializes in storing and indexing high-dimensional vectors (embeddings) to enable fast and accurate searches across large datasets. Qdrant is particularly suited for applications like recommendation systems, semantic search, and AI-driven content discovery, where finding similar items quickly is essential. It supports advanced filtering, scalable indexing, and real-time updates, making it easy to integrate into machine learning workflows.\n\nLearn more from the following resources:", - "links": [ - { - "title": "Qdrant on GitHub", - "url": "https://github.com/qdrant/qdrant", - "type": "opensource" - }, - { - "title": "Qdrant", - "url": "https://qdrant.tech/", - "type": "article" - }, - { - "title": "Getting started with Qdrant", - "url": "https://www.youtube.com/watch?v=LRcZ9pbGnno", - "type": "video" - } - ] - }, - "9kT7EEQsbeD2WDdN9ADx7": { - "title": "Supabase", - "description": "Supabase Vector is an extension of the Supabase platform, specifically designed for AI and machine learning applications that require vector operations. It leverages PostgreSQL's pgvector extension to provide efficient vector storage and similarity search capabilities. This makes Supabase Vector particularly useful for applications involving embeddings, semantic search, and recommendation systems. With Supabase Vector, developers can store and query high-dimensional vector data alongside regular relational data, all within the same PostgreSQL database.\n\nLearn more from the following resources:", - "links": [ - { - "title": "Supabase Vector", - "url": "https://supabase.com/docs/guides/ai", - "type": "article" - }, - { - "title": "Supabase Vector: The Postgres Vector database", - "url": "https://www.youtube.com/watch?v=MDxEXKkxf2Q", - "type": "video" - } - ] - }, - "j6bkm0VUgLkHdMDDJFiMC": { - "title": "MongoDB Atlas", - "description": "MongoDB Atlas, traditionally known for its document database capabilities, now includes vector search functionality, making it a strong option as a vector database. This feature allows developers to store and query high-dimensional vector data alongside regular document data. With Atlas’s vector search, users can perform similarity searches on embeddings of text, images, or other complex data, making it ideal for AI and machine learning applications like recommendation systems, image similarity search, and natural language processing tasks. The seamless integration of vector search within the MongoDB ecosystem allows developers to leverage familiar tools and interfaces while benefiting from advanced vector-based operations for sophisticated data analysis and retrieval.\n\nLearn more from the following resources:", - "links": [ - { - "title": "Vector Search in MongoDB Atlas", - "url": "https://www.mongodb.com/products/platform/atlas-vector-search", - "type": "article" - } - ] - }, - "5TQnO9B4_LTHwqjI7iHB1": { - "title": "Indexing Embeddings", - "description": "Embeddings are stored in a vector database by first converting data, such as text, images, or audio, into high-dimensional vectors using machine learning models. These vectors, also called embeddings, capture the semantic relationships and patterns within the data. Once generated, each embedding is indexed in the vector database along with its associated metadata, such as the original data (e.g., text or image) or an identifier. The vector database then organizes these embeddings to support efficient similarity searches, typically using techniques like approximate nearest neighbor (ANN) search.\n\nLearn more from the following resources:", - "links": [ - { - "title": "Indexing & Embeddings", - "url": "https://docs.llamaindex.ai/en/stable/understanding/indexing/indexing/", - "type": "article" - }, - { - "title": "Vector Databases Simply Explained! (Embeddings & Indexes)", - "url": "https://www.youtube.com/watch?v=dN0lsF2cvm4", - "type": "video" - } - ] - }, - "ZcbRPtgaptqKqWBgRrEBU": { - "title": "Performing Similarity Search", - "description": "In a similarity search, the process begins by converting the user’s query (such as a piece of text or an image) into an embedding—a vector representation that captures the query’s semantic meaning. This embedding is generated using a pre-trained model, such as BERT for text or a neural network for images. Once the query is converted into a vector, it is compared to the embeddings stored in the vector database.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "What is Similarity Search & How Does it work?", - "url": "https://www.truefoundry.com/blog/similarity-search", - "type": "article" - } - ] - }, - "what-are-rags@lVhWhZGR558O-ljHobxIi.md": { - "title": "What are RAGs?", - "description": "", - "links": [] - }, - "GCn4LGNEtPI0NWYAZCRE-": { - "title": "RAG Usecases", - "description": "Retrieval-Augmented Generation (RAG) enhances applications like chatbots, customer support, and content summarization by combining information retrieval with language generation. It retrieves relevant data from a knowledge base and uses it to generate accurate, context-aware responses, making it ideal for tasks such as question answering, document generation, and semantic search. RAG’s ability to ground outputs in real-world information leads to more reliable and informative results, improving user experience across various domains.\n\nLearn more from the following resources:", - "links": [ - { - "title": "Retrieval augmented generation use cases: Transforming data into insights", - "url": "https://www.glean.com/blog/retrieval-augmented-generation-use-cases", - "type": "article" - }, - { - "title": "Retrieval Augmented Generation (RAG) – 5 Use Cases", - "url": "https://theblue.ai/blog/rag-news/", - "type": "article" - }, - { - "title": "Introduction to RAG", - "url": "https://www.youtube.com/watch?v=LmiFeXH-kq8&list=PL-pTHQz4RcBbz78Z5QXsZhe9rHuCs1Jw-", - "type": "video" - } - ] - }, - "qlBEXrbV88e_wAGRwO9hW": { - "title": "RAG vs Fine-tuning", - "description": "RAG (Retrieval-Augmented Generation) and fine-tuning are two approaches to enhancing language models, but they differ in methodology and use cases. Fine-tuning involves training a pre-trained model on a specific dataset to adapt it to a particular task, making it more accurate for that context but limited to the knowledge present in the training data. RAG, on the other hand, combines real-time information retrieval with generation, enabling the model to access up-to-date external data and produce contextually relevant responses. While fine-tuning is ideal for specialized, static tasks, RAG is better suited for dynamic tasks that require real-time, fact-based responses.\n\nLearn more from the following resources:", - "links": [ - { - "title": "RAG vs Fine Tuning: How to Choose the Right Method", - "url": "https://www.montecarlodata.com/blog-rag-vs-fine-tuning/", - "type": "article" - }, - { - "title": "RAG vs Finetuning — Which Is the Best Tool to Boost Your LLM Application?", - "url": "https://towardsdatascience.com/rag-vs-finetuning-which-is-the-best-tool-to-boost-your-llm-application-94654b1eaba7", - "type": "article" - }, - { - "title": "RAG vs Fine-tuning", - "url": "https://www.youtube.com/watch?v=00Q0G84kq3M", - "type": "video" - } - ] - }, - "mX987wiZF7p3V_gExrPeX": { - "title": "Chunking", - "description": "The chunking step in Retrieval-Augmented Generation (RAG) involves breaking down large documents or data sources into smaller, manageable chunks. This is done to ensure that the retriever can efficiently search through large volumes of data while staying within the token or input limits of the model. Each chunk, typically a paragraph or section, is converted into an embedding, and these embeddings are stored in a vector database. When a query is made, the retriever searches for the most relevant chunks rather than the entire document, enabling faster and more accurate retrieval.\n\nLearn more from the following resources:", - "links": [ - { - "title": "Understanding LangChain's RecursiveCharacterTextSplitter", - "url": "https://dev.to/eteimz/understanding-langchains-recursivecharactertextsplitter-2846", - "type": "article" - }, - { - "title": "Chunking Strategies for LLM Applications", - "url": "https://www.pinecone.io/learn/chunking-strategies/", - "type": "article" - }, - { - "title": "A Guide to Chunking Strategies for Retrieval Augmented Generation", - "url": "https://zilliz.com/learn/guide-to-chunking-strategies-for-rag", - "type": "article" - } - ] - }, - "grTcbzT7jKk_sIUwOTZTD": { - "title": "Embedding", - "description": "In Retrieval-Augmented Generation (RAG), embeddings are essential for linking information retrieval with natural language generation. Embeddings represent both the user query and documents as dense vectors in a shared space, enabling the system to retrieve relevant information based on similarity. This retrieved information is then fed into a generative model, such as GPT, to produce contextually informed and accurate responses. By using embeddings, RAG enhances the model's ability to generate content grounded in external knowledge, making it effective for tasks like question answering and summarization.\n\nLearn more from the following resources:", - "links": [ - { - "title": "Understanding the role of embeddings in RAG LLMs", - "url": "https://www.aporia.com/learn/understanding-the-role-of-embeddings-in-rag-llms/", - "type": "article" - }, - { - "title": "Mastering RAG: How to Select an Embedding Model", - "url": "https://www.rungalileo.io/blog/mastering-rag-how-to-select-an-embedding-model", - "type": "article" - } - ] - }, - "zZA1FBhf1y4kCoUZ-hM4H": { - "title": "Vector Database", - "description": "When implementing Retrieval-Augmented Generation (RAG), a vector database is used to store and efficiently retrieve embeddings, which are vector representations of data like documents, images, or other knowledge sources. During the RAG process, when a query is made, the system converts it into an embedding and searches the vector database for the most relevant, similar embeddings (e.g., related documents or snippets). These retrieved pieces of information are then fed to a generative model, which uses them to produce a more accurate, context-aware response.\n\nLearn more from the following resources:", - "links": [ - { - "title": "How to Implement Graph RAG Using Knowledge Graphs and Vector Databases", - "url": "https://towardsdatascience.com/how-to-implement-graph-rag-using-knowledge-graphs-and-vector-databases-60bb69a22759", - "type": "article" - }, - { - "title": "Retrieval Augmented Generation (RAG) with Vector Databases: Expanding AI Capabilities", - "url": "https://objectbox.io/retrieval-augmented-generation-rag-with-vector-databases-expanding-ai-capabilities/", - "type": "article" - } - ] - }, - "OCGCzHQM2LQyUWmiqe6E0": { - "title": "Retrieval Process", - "description": "The retrieval process in Retrieval-Augmented Generation (RAG) involves finding relevant information from a large dataset or knowledge base to support the generation of accurate, context-aware responses. When a query is received, the system first converts it into a vector (embedding) and uses this vector to search a database of pre-indexed embeddings, identifying the most similar or relevant data points. Techniques like approximate nearest neighbor (ANN) search are often used to speed up this process.\n\nLearn more from the following resources:", - "links": [ - { - "title": "What is Retrieval-Augmented Generation (RAG)?", - "url": "https://cloud.google.com/use-cases/retrieval-augmented-generation", - "type": "article" - }, - { - "title": "What Is Retrieval-Augmented Generation, aka RAG?", - "url": "https://blogs.nvidia.com/blog/what-is-retrieval-augmented-generation/", - "type": "article" - } - ] - }, - "2jJnS9vRYhaS69d6OxrMh": { - "title": "Generation", - "description": "Generation refers to the process where a generative language model, such as GPT, creates a response based on the information retrieved during the retrieval phase. After relevant documents or data snippets are identified using embeddings, they are passed to the generative model, which uses this information to produce coherent, context-aware, and informative responses. The retrieved content helps the model stay grounded and factual, enhancing its ability to answer questions, provide summaries, or engage in dialogue by combining retrieved knowledge with its natural language generation capabilities. This synergy between retrieval and generation makes RAG systems effective for tasks that require detailed, accurate, and contextually relevant outputs.\n\nLearn more from the following resources:", - "links": [ - { - "title": "What is RAG (Retrieval-Augmented Generation)?", - "url": "https://aws.amazon.com/what-is/retrieval-augmented-generation/", - "type": "article" - }, - { - "title": "Retrieval Augmented Generation (RAG) Explained in 8 Minutes!", - "url": "https://www.youtube.com/watch?v=HREbdmOSQ18", - "type": "video" - } - ] - }, - "WZVW8FQu6LyspSKm1C_sl": { - "title": "Using SDKs Directly", - "description": "While tools like Langchain and LlamaIndex make it easy to implement RAG, you don't have to necessarily learn and use them. If you know about the different steps of implementing RAG you can simply do it all yourself e.g. do the chunking using `@langchain/textsplitters` package, create embeddings using any LLM e.g. use OpenAI Embedding API through their SDK, save the embeddings to any vector database e.g. if you are using Supabase Vector DB, you can use their SDK and similarly you can use the relevant SDKs for the rest of the steps as well.\n\nLearn more from the following resources:", - "links": [ - { - "title": "Langchain Text Splitter Package", - "url": "https://www.npmjs.com/package/@langchain/textsplitters", - "type": "article" - }, - { - "title": "OpenAI Embedding API", - "url": "https://platform.openai.com/docs/guides/embeddings", - "type": "article" - }, - { - "title": "Supabase AI & Vector Documentation", - "url": "https://supabase.com/docs/guides/ai", - "type": "article" - } - ] - }, - "langchain@jM-Jbo0wUilhVY830hetJ.md": { - "title": "Langchain", - "description": "", - "links": [] - }, - "llama-index@JT4mBXOjvvrUnynA7yrqt.md": { - "title": "Llama Index", - "description": "", - "links": [] - }, - "open-ai-response-api@eOqCBgBTKM8CmY3nsWjre.md": { - "title": "Open AI Response API", - "description": "", - "links": [] - }, - "c0RPhpD00VIUgF4HJgN2T": { - "title": "Replicate", - "description": "Replicate is a platform that allows developers to run machine learning models in the cloud without needing to manage infrastructure. It provides a simple API for deploying and scaling models, making it easy to integrate AI capabilities like image generation, text processing, and more into applications. Users can select from a library of pre-trained models or deploy their own, with the platform handling tasks like scaling, monitoring, and versioning.\n\nLearn more from the following resources:", - "links": [ - { - "title": "Replicate", - "url": "https://replicate.com/", - "type": "article" - }, - { - "title": "Replicate.com Beginners Tutorial", - "url": "https://www.youtube.com/watch?v=y0_GE5ErqY8", - "type": "video" - } - ] - }, - "ai-agents@4_ap0rD9Gl6Ep_4jMfPpG.md": { - "title": "AI Agents", - "description": "", - "links": [] - }, - "778HsQzTuJ_3c9OSn5DmH": { - "title": "Agents Usecases", - "description": "AI Agents have a variety of usecases ranging from customer support, workflow automation, cybersecurity, finance, marketing and sales, and more.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Top 15 Use Cases Of AI Agents In Business", - "url": "https://www.ampcome.com/post/15-use-cases-of-ai-agents-in-business", - "type": "article" - }, - { - "title": "A Brief Guide on AI Agents: Benefits and Use Cases", - "url": "https://www.codica.com/blog/brief-guide-on-ai-agents/", - "type": "article" - }, - { - "title": "The Complete Guide to Building AI Agents for Beginners", - "url": "https://youtu.be/MOyl58VF2ak?si=-QjRD_5y3iViprJX", - "type": "video" - } - ] - }, - "voDKcKvXtyLzeZdx2g3Qn": { - "title": "ReAct Prompting", - "description": "ReAct prompting is a technique that combines reasoning and action by guiding language models to think through a problem step-by-step and then take specific actions based on the reasoning. It encourages the model to break down tasks into logical steps (reasoning) and perform operations, such as calling APIs or retrieving information (actions), to reach a solution. This approach helps in scenarios where the model needs to process complex queries, interact with external systems, or handle tasks requiring a sequence of actions, improving the model's ability to provide accurate and context-aware responses.\n\nLearn more from the following resources:", - "links": [ - { - "title": "ReAct Prompting", - "url": "https://www.promptingguide.ai/techniques/react", - "type": "article" - }, - { - "title": "ReAct Prompting: How We Prompt for High-Quality Results from LLMs", - "url": "https://www.width.ai/post/react-prompting", - "type": "article" - } - ] - }, - "6xaRB34_g0HGt-y1dGYXR": { - "title": "Manual Implementation", - "description": "Services like Open AI functions and Tools or Vercel's AI SDK make it really easy to make SDK agents however it is a good idea to learn how these tools work under the hood. You can also create fully custom implementation of agents using by implementing custom loop.\n\nLearn more from the following resources:", - "links": [ - { - "title": "OpenAI Function Calling", - "url": "https://platform.openai.com/docs/guides/function-calling", - "type": "article" - }, - { - "title": "Vercel AI SDK", - "url": "https://sdk.vercel.ai/docs/foundations/tools", - "type": "article" - } - ] - }, - "Sm0Ne5Nx72hcZCdAcC0C2": { - "title": "OpenAI Functions / Tools", - "description": "OpenAI Functions, also known as tools, enable developers to extend the capabilities of language models by integrating external APIs and functionalities, allowing the models to perform specific actions, fetch real-time data, or interact with other software systems. This feature enhances the model's utility by bridging it with services like web searches, databases, and custom business applications, enabling more dynamic and task-oriented responses.\n\nLearn more from the following resources:", - "links": [ - { - "title": "Function Calling", - "url": "https://platform.openai.com/docs/guides/function-calling", - "type": "article" - }, - { - "title": "How does OpenAI Function Calling work?", - "url": "https://www.youtube.com/watch?v=Qor2VZoBib0", - "type": "video" - } - ] - }, - "openai-response-api@xXLyuUNrKEc32XLQxMjgT.md": { - "title": "OpenAI Response API", - "description": "", - "links": [] - }, - "W7cKPt_UxcUgwp8J6hS4p": { - "title": "Multimodal AI", - "description": "Multimodal AI is an approach that combines and processes data from multiple sources, such as text, images, audio, and video, to understand and generate responses. By integrating different data types, it enables more comprehensive and accurate AI systems, allowing for tasks like visual question answering, interactive virtual assistants, and enhanced content understanding. This capability helps create richer, more context-aware applications that can analyze and respond to complex, real-world scenarios.\n\nLearn more from the following resources:", - "links": [ - { - "title": "A Multimodal World - Hugging Face", - "url": "https://huggingface.co/learn/computer-vision-course/en/unit4/multimodal-models/a_multimodal_world", - "type": "article" - }, - { - "title": "Multimodal AI - Google", - "url": "https://cloud.google.com/use-cases/multimodal-ai?hl=en", - "type": "article" - }, - { - "title": "What Is Multimodal AI? A Complete Introduction", - "url": "https://www.splunk.com/en_us/blog/learn/multimodal-ai.html", - "type": "article" - } - ] - }, - "sGR9qcro68KrzM8qWxcH8": { - "title": "Multimodal AI Usecases", - "description": "Multimodal AI powers applications like visual question answering, content moderation, and enhanced search engines. It drives smarter virtual assistants and interactive AR apps, combining text, images, and audio for richer, more intuitive user experiences across e-commerce, accessibility, and entertainment.\n\nLearn more from the following resources:", - "links": [ - { - "title": "Hugging Face Multimodal Models", - "url": "https://huggingface.co/learn/computer-vision-course/en/unit4/multimodal-models/a_multimodal_world", - "type": "article" - } - ] - }, - "fzVq4hGoa2gdbIzoyY1Zp": { - "title": "Image Understanding", - "description": "Multimodal AI enhances image understanding by integrating visual data with other types of information, such as text or audio. By combining these inputs, AI models can interpret images more comprehensively, recognizing objects, scenes, and actions, while also understanding context and related concepts. For example, an AI system could analyze an image and generate descriptive captions, or provide explanations based on both visual content and accompanying text.\n\nLearn more from the following resources:", - "links": [ - { - "title": "Low or High Fidelity Image Understanding - OpenAI", - "url": "https://platform.openai.com/docs/guides/images", - "type": "article" - } - ] - }, - "49BWxYVFpIgZCCqsikH7l": { - "title": "Image Generation", - "description": "Image generation is a process in artificial intelligence where models create new images based on input prompts or existing data. It involves using generative models like GANs (Generative Adversarial Networks), VAEs (Variational Autoencoders), or more recently, transformer-based models like DALL-E and Stable Diffusion.\n\nLearn more from the following resources:", - "links": [ - { - "title": "DALL-E", - "url": "https://openai.com/index/dall-e-2/", - "type": "article" - }, - { - "title": "How DALL-E 2 Actually Works", - "url": "https://www.assemblyai.com/blog/how-dall-e-2-actually-works/", - "type": "article" - }, - { - "title": "How AI Image Generators Work (Stable Diffusion / Dall-E)", - "url": "https://www.youtube.com/watch?v=1CIpzeNxIhU", - "type": "video" - } - ] - }, - "TxaZCtTCTUfwCxAJ2pmND": { - "title": "Video Understanding", - "description": "Video understanding with multimodal AI involves analyzing and interpreting both visual and audio content to provide a more comprehensive understanding of videos. Common use cases include video summarization, where AI extracts key scenes and generates summaries; content moderation, where the system detects inappropriate visuals or audio; and video indexing for easier search and retrieval of specific moments within a video. Other applications include enhancing video-based recommendations, security surveillance, and interactive entertainment, where video and audio are processed together for real-time user interaction.\n\nLearn more from the following resources:", - "links": [ - { - "title": "Awesome LLM for Video Understanding", - "url": "https://github.com/yunlong10/Awesome-LLMs-for-Video-Understanding", - "type": "opensource" - }, - { - "title": "Video Understanding", - "url": "https://dl.acm.org/doi/10.1145/3503161.3551600", - "type": "article" - } - ] - }, - "mxQYB820447DC6kogyZIL": { - "title": "Audio Processing", - "description": "Audio processing in multimodal AI enables a wide range of use cases by combining sound with other data types, such as text, images, or video, to create more context-aware systems. Use cases include speech recognition paired with real-time transcription and visual analysis in meetings or video conferencing tools, voice-controlled virtual assistants that can interpret commands in conjunction with on-screen visuals, and multimedia content analysis where audio and visual elements are analyzed together for tasks like content moderation or video indexing.\n\nLearn more from the following resources:", - "links": [ - { - "title": "The State of Audio Processing", - "url": "https://appwrite.io/blog/post/state-of-audio-processing", - "type": "article" - }, - { - "title": "Audio Signal Processing for Machine Learning", - "url": "https://www.youtube.com/watch?v=iCwMQJnKk2c", - "type": "video" - } - ] - }, - "GCERpLz5BcRtWPpv-asUz": { - "title": "Text-to-Speech", - "description": "In the context of multimodal AI, text-to-speech (TTS) technology converts written text into natural-sounding spoken language, allowing AI systems to communicate verbally. When integrated with other modalities, such as visual or interactive elements, TTS can enhance user experiences in applications like virtual assistants, educational tools, and accessibility features. For example, a multimodal AI could read aloud text from an on-screen document while highlighting relevant sections, or narrate information about objects recognized in an image. By combining TTS with other forms of data processing, multimodal AI creates more engaging, accessible, and interactive systems for users.\n\nLearn more from the following resources:", - "links": [ - { - "title": "What is Text-to-Speech?", - "url": "https://aws.amazon.com/polly/what-is-text-to-speech/", - "type": "article" - }, - { - "title": "From Text to Speech: The Evolution of Synthetic Voices", - "url": "https://ignitetech.ai/about/blogs/text-speech-evolution-synthetic-voices", - "type": "article" - } - ] - }, - "jQX10XKd_QM5wdQweEkVJ": { - "title": "Speech-to-Text", - "description": "In the context of multimodal AI, speech-to-text technology converts spoken language into written text, enabling seamless integration with other data types like images and text. This allows AI systems to process audio input and combine it with visual or textual information, enhancing applications such as virtual assistants, interactive chatbots, and multimedia content analysis. For example, a multimodal AI can transcribe a video’s audio while simultaneously analyzing on-screen visuals and text, providing richer and more context-aware insights.\n\nLearn more from the following resources:", - "links": [ - { - "title": "What is Speech to Text?", - "url": "https://aws.amazon.com/what-is/speech-to-text/", - "type": "article" - }, - { - "title": "Turn Speech into Text using Google AI", - "url": "https://cloud.google.com/speech-to-text", - "type": "article" - }, - { - "title": "How is Speech to Text Used?", - "url": "https://h2o.ai/wiki/speech-to-text/", - "type": "article" - } - ] - }, - "CRrqa-dBw1LlOwVbrZhjK": { - "title": "OpenAI Vision API", - "description": "The OpenAI Vision API enables models to analyze and understand images, allowing them to identify objects, recognize text, and interpret visual content. It integrates image processing with natural language capabilities, enabling tasks like visual question answering, image captioning, and extracting information from photos. This API can be used for applications in accessibility, content moderation, and automation, providing a seamless way to combine visual understanding with text-based interactions.\n\nLearn more from the following resources:", - "links": [ - { - "title": "Vision", - "url": "https://platform.openai.com/docs/guides/vision", - "type": "article" - }, - { - "title": "OpenAI Vision API Crash Course", - "url": "https://www.youtube.com/watch?v=ZjkS11DSeEk", - "type": "video" - } - ] - }, - "LKFwwjtcawJ4Z12X102Cb": { - "title": "DALL-E API", - "description": "The DALL-E API is a tool provided by OpenAI that allows developers to integrate the DALL-E image generation model into applications. DALL-E is an AI model designed to generate images from textual descriptions, capable of producing highly detailed and creative visuals. The API enables users to provide a descriptive prompt, and the model generates corresponding images, opening up possibilities in fields like design, advertising, content creation, and art.\n\nLearn more from the following resources:", - "links": [ - { - "title": "OpenAI Image Generation", - "url": "https://platform.openai.com/docs/guides/images", - "type": "article" - }, - { - "title": "DALL E API - Introduction (Generative AI Pictures from OpenAI)", - "url": "https://www.youtube.com/watch?v=Zr6vAWwjHN0", - "type": "video" - } - ] - }, - "OTBd6cPUayKaAM-fLWdSt": { - "title": "Whisper API", - "description": "The Whisper API by OpenAI enables developers to integrate speech-to-text capabilities into their applications. It uses OpenAI's Whisper model, a powerful speech recognition system, to convert spoken language into accurate, readable text. The API supports multiple languages and can handle various accents, making it ideal for tasks like transcription, voice commands, and automated captions. With the ability to process audio in real time or from pre-recorded files, the Whisper API simplifies adding robust speech recognition features to applications, enhancing accessibility and enabling new interactive experiences.\n\nLearn more from the following resources:", - "links": [ - { - "title": "Whisper on GitHub", - "url": "https://github.com/openai/whisper", - "type": "opensource" - }, - { - "title": "OpenAI Whisper", - "url": "https://openai.com/index/whisper/", - "type": "article" - } - ] - }, - "EIDbwbdolR_qsNKVDla6V": { - "title": "Hugging Face Models", - "description": "Hugging Face models are a collection of pre-trained machine learning models available through the Hugging Face platform, covering a wide range of tasks like natural language processing, computer vision, and audio processing. The platform includes models for tasks such as text classification, translation, summarization, question answering, and more, with popular models like BERT, GPT, T5, and CLIP. Hugging Face provides easy-to-use tools and APIs that allow developers to access, fine-tune, and deploy these models, fostering a collaborative community where users can share, modify, and contribute models to improve AI research and application development.\n\nLearn more from the following resources:", - "links": [ - { - "title": "Hugging Face Models", - "url": "https://huggingface.co/models", - "type": "article" - }, - { - "title": "How to Use Pretrained Models from Hugging Face in a Few Lines of Code", - "url": "https://www.youtube.com/watch?v=ntz160EnWIc", - "type": "video" - } - ] - }, - "j9zD3pHysB1CBhLfLjhpD": { - "title": "LangChain for Multimodal Apps", - "description": "LangChain is a framework designed to build applications that integrate multiple AI models, especially those focusing on language understanding, generation, and multimodal capabilities. For multimodal apps, LangChain facilitates seamless interaction between text, image, and even audio models, enabling developers to create complex workflows that can process and analyze different types of data.\n\nLearn more from the following resources:", - "links": [ - { - "title": "LangChain", - "url": "https://www.langchain.com/", - "type": "article" - }, - { - "title": "Build a Multimodal GenAI App with LangChain and Gemini LLMs", - "url": "https://www.youtube.com/watch?v=bToMzuiOMhg", - "type": "video" - } - ] - }, - "akQTCKuPRRelj2GORqvsh": { - "title": "LlamaIndex for Multimodal Apps", - "description": "LlamaIndex enables multi-modal apps by linking language models (LLMs) to diverse data sources, including text and images. It indexes and retrieves information across formats, allowing LLMs to process and integrate data from multiple modalities. This supports applications like visual question answering, content summarization, and interactive systems by providing structured, context-aware inputs from various content types.\n\nLearn more from the following resources:", - "links": [ - { - "title": "LlamaIndex Multi-modal", - "url": "https://docs.llamaindex.ai/en/stable/use_cases/multimodal/", - "type": "article" - }, - { - "title": "Multi-modal Retrieval Augmented Generation with LlamaIndex", - "url": "https://www.youtube.com/watch?v=35RlrrgYDyU", - "type": "video" - } - ] - }, - "NYge7PNtfI-y6QWefXJ4d": { - "title": "Development Tools", - "description": "AI has given rise to a collection of AI powered development tools of various different varieties. We have IDEs like Cursor that has AI baked into it, live context capturing tools such as Pieces and a variety of brower based tools like V0, Claude and more.\n\nLearn more from the following resources:", - "links": [ - { - "title": "v0 Website", - "url": "https://v0.dev", - "type": "article" - }, - { - "title": "Aider - AI Pair Programming in Terminal", - "url": "https://aider.chat/", - "type": "article" - }, - { - "title": "Replit AI", - "url": "https://replit.com/ai", - "type": "article" - }, - { - "title": "Pieces", - "url": "https://pieces.app", - "type": "article" - } - ] - }, - "XcKeQfpTA5ITgdX51I4y-": { - "title": "AI Code Editors", - "description": "AI code editors are development tools that leverage artificial intelligence to assist software developers in writing, debugging, and optimizing code. These editors go beyond traditional syntax highlighting and code completion by incorporating machine learning models, natural language processing, and data analysis to understand code context, generate suggestions, and even automate portions of the software development process.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Cursor - The AI Code Editor", - "url": "https://www.cursor.com/", - "type": "website" - }, - { - "title": "PearAI - The Open Source, Extendable AI Code Editor", - "url": "https://trypear.ai/", - "type": "website" - }, - { - "title": "Bolt - Prompt, run, edit, and deploy full-stack web apps", - "url": "https://bolt.new", - "type": "website" - }, - { - "title": "Replit - Build Apps using AI", - "url": "https://replit.com/ai", - "type": "website" - }, - { - "title": "v0 - Build Apps with AI", - "url": "https://v0.dev", - "type": "website" - }, - { - "title": "Claude Code - AI coding assistant in terminal", - "url": "https://www.claude.com/product/claude-code", - "type": "website" - }, - { - "title": "Gemini CLI - Google's AI coding assistant for command line", - "url": "https://github.com/google-gemini/gemini-cli", - "type": "official" - }, - { - "title": "OpenAI Codex - AI code generation via API and CLI", - "url": "https://openai.com/codex/", - "type": "official" - } - ] - }, - "TifVhqFm1zXNssA8QR3SM": { - "title": "Code Completion Tools", - "description": "Code completion tools are AI-powered development assistants designed to enhance productivity by automatically suggesting code snippets, functions, and entire blocks of code as developers type. These tools, such as GitHub Copilot and Tabnine, leverage machine learning models trained on vast code repositories to predict and generate contextually relevant code. They help reduce repetitive coding tasks, minimize errors, and accelerate the development process by offering real-time, intelligent suggestions.\n\nLearn more from the following resources:", - "links": [ - { - "title": "GitHub Copilot", - "url": "https://github.com/features/copilot", - "type": "article" - }, - { - "title": "Codeium", - "url": "https://codeium.com/", - "type": "article" - }, - { - "title": "Supermaven", - "url": "https://supermaven.com/", - "type": "article" - }, - { - "title": "Tabnine", - "url": "https://www.tabnine.com/", - "type": "article" - } - ] - }, - "haystack@ebXXEhNRROjbbof-Gym4p.md": { - "title": "Haystack", - "description": "", - "links": [] - }, - "ragflow@d0ontCII8KI8wfP-8Y45R.md": { - "title": "RAGFlow", - "description": "", - "links": [] - }, - "model-context-protocol-mcp@AeHkNU-uJ_gBdo5-xdpEu.md": { - "title": "Model Context Protocol (MCP)", - "description": "", - "links": [] - }, - "mcp-host@MabZ9jOrSj539C5qZrVBd.md": { - "title": "MCP Host", - "description": "", - "links": [] - }, - "mcp-server@8Xkd88EjX3GE_9DWQhr7G.md": { - "title": "MCP Server", - "description": "", - "links": [] - }, - "mcp-client@po0fIZYaFhRbNlza7sB37.md": { - "title": "MCP Client", - "description": "", - "links": [] - }, - "data-layer@Z0920V57_ziDhXbQJMN9O.md": { - "title": "Data Layer", - "description": "", - "links": [] - }, - "transport-layer@o4gHDZ5p9lyeHuCAPvAKz.md": { - "title": "Transport Layer", - "description": "", - "links": [] - }, - "building-an-mcp-server@oLGfKjcqBzJ3vd6Cg-T1B.md": { - "title": "Building an MCP Server", - "description": "", - "links": [] - }, - "building-an-mcp-client@0Rk0rCbmRFJT2GKwUibQS.md": { - "title": "Building an MCP Client", - "description": "", - "links": [] - }, - "connect-to-local-server@H-G93SsEgsA_NGL_v4hPv.md": { - "title": "Connect to Local Server", - "description": "", - "links": [] - }, - "connect-to-remote-server@2t4uINxmzfx8FUF-_i_2B.md": { - "title": "Connect to Remote Server", - "description": "", - "links": [] - }, - "vertex-ai@AxzTGDCC2Ftp4G66U4Uqr.md": { - "title": "Vertex AI", - "description": "", - "links": [] - }, - "google-adk@mbp2NoL-VZ5hZIIblNBXt.md": { - "title": "Google ADK", - "description": "", - "links": [] - } -} \ No newline at end of file diff --git a/public/roadmap-content/ai-red-teaming.json b/public/roadmap-content/ai-red-teaming.json deleted file mode 100644 index 5f1cfe560..000000000 --- a/public/roadmap-content/ai-red-teaming.json +++ /dev/null @@ -1,1401 +0,0 @@ -{ - "R9DQNc0AyAQ2HLpP4HOk6": { - "title": "AI Security Fundamentals", - "description": "This covers the foundational concepts essential for AI Red Teaming, bridging traditional cybersecurity with AI-specific threats. An AI Red Teamer must understand common vulnerabilities in ML models (like evasion or poisoning), security risks in the AI lifecycle (from data collection to deployment), and how AI capabilities can be misused. This knowledge forms the basis for designing effective tests against AI systems.\n\nLearn more from the following resources:", - "links": [ - { - "title": "Building Trustworthy AI: Contending with Data Poisoning", - "url": "https://nisos.com/research/building-trustworthy-ai/", - "type": "article" - }, - { - "title": "What Is Adversarial AI in Machine Learning?", - "url": "https://www.paloaltonetworks.co.uk/cyberpedia/what-are-adversarial-attacks-on-AI-Machine-Learning", - "type": "article" - } - ] - }, - "fNTb9y3zs1HPYclAmu_Wv": { - "title": "Why Red Team AI Systems?", - "description": "AI systems introduce novel risks beyond traditional software, such as emergent unintended capabilities, complex failure modes, susceptibility to subtle data manipulations, and potential for large-scale misuse (e.g., generating disinformation). AI Red Teaming is necessary because standard testing methods often fail to uncover these unique AI vulnerabilities. It provides critical, adversary-focused insights needed to build genuinely safe, reliable, and secure AI before deployment.\n\nLearn more from the following resources:", - "links": [ - { - "title": "Introduction to Prompt Hacking", - "url": "https://learnprompting.org/courses/intro-to-prompt-hacking", - "type": "course" - }, - { - "title": "Prompt Hacking Offensive Measures", - "url": "https://learnprompting.org/docs/prompt_hacking/offensive_measures/introduction", - "type": "article" - } - ] - }, - "HFJIYcI16OMyM77fAw9af": { - "title": "Introduction", - "description": "AI Red Teaming is the practice of simulating adversarial attacks against AI systems to proactively identify vulnerabilities, potential misuse scenarios, and failure modes before malicious actors do. Distinct from traditional cybersecurity red teaming, it focuses on the unique attack surfaces of AI models, such as prompt manipulation, data poisoning, model extraction, and evasion techniques. The primary goal for an AI Red Teamer is to test the robustness, safety, alignment, and fairness of AI systems, particularly complex ones like LLMs, by adopting an attacker's mindset to uncover hidden flaws and provide actionable feedback for improvement.\n\nLearn more from the following resources:", - "links": [ - { - "title": "A Guide to AI Red Teaming", - "url": "https://hiddenlayer.com/innovation-hub/a-guide-to-ai-red-teaming/", - "type": "article" - }, - { - "title": "What is AI Red Teaming? (Learn Prompting)", - "url": "https://learnprompting.org/blog/what-is-ai-red-teaming", - "type": "article" - }, - { - "title": "What is AI Red Teaming? The Complete Guide", - "url": "https://mindgard.ai/blog/what-is-ai-red-teaming", - "type": "article" - } - ] - }, - "1gyuEV519LjN-KpROoVwv": { - "title": "Ethical Considerations", - "description": "Ethical conduct is crucial for AI Red Teamers. While simulating attacks, they must operate within strict legal and ethical boundaries defined by rules of engagement, focusing on improving safety without causing real harm or enabling misuse. This includes respecting data privacy, obtaining consent where necessary, responsibly disclosing vulnerabilities, and carefully considering the potential negative impacts of both the testing process and the AI capabilities being tested. The goal is discovery for defense, not exploitation.\n\nLearn more from the following resources:", - "links": [ - { - "title": "Red-Teaming in AI Testing: Stress Testing", - "url": "https://www.labelvisor.com/red-teaming-abstract-competitive-testing-data-selection/", - "type": "article" - }, - { - "title": "Responsible AI assessment - Responsible AI | Coursera", - "url": "https://www.coursera.org/learn/ai-security", - "type": "article" - }, - { - "title": "Responsible AI Principles (Microsoft)", - "url": "https://www.microsoft.com/en-us/ai/responsible-ai", - "type": "article" - } - ] - }, - "Irkc9DgBfqSn72WaJqXEt": { - "title": "Role of Red Teams", - "description": "The role of an AI Red Team is to rigorously challenge AI systems from an adversarial perspective. They design and execute tests to uncover vulnerabilities related to the model's logic, data dependencies, prompt interfaces, safety alignments, and interactions with surrounding infrastructure. They provide detailed reports on findings, potential impacts, and remediation advice, acting as a critical feedback loop for AI developers and stakeholders to improve system security and trustworthiness before and after deployment.\n\nLearn more from the following resources:", - "links": [ - { - "title": "The Complete Guide to Red Teaming: Process, Benefits & More", - "url": "https://mindgard.ai/blog/red-teaming", - "type": "article" - }, - { - "title": "The Complete Red Teaming Checklist [PDF]: 5 Key Steps - Mindgard AI", - "url": "https://mindgard.ai/blog/red-teaming-checklist", - "type": "article" - }, - { - "title": "Red Teaming in Defending AI Systems", - "url": "https://protectai.com/blog/expanding-role-red-teaming-defending-ai-systems", - "type": "article" - } - ] - }, - "NvOJIv36Utpm7_kOZyr79": { - "title": "Supervised Learning", - "description": "AI Red Teamers analyze systems built using supervised learning to probe for vulnerabilities like susceptibility to adversarial examples designed to cause misclassification, sensitivity to data distribution shifts, or potential for data leakage related to the labeled training data. Understanding how these models learn input-output mappings is key to devising tests that challenge their learned boundaries.\n\nLearn more from the following resources:", - "links": [ - { - "title": "AI and cybersecurity: a love-hate revolution", - "url": "https://www.alter-solutions.com/en-us/articles/ai-cybersecurity-love-hate-revolution", - "type": "article" - }, - { - "title": "What Is Supervised Learning?", - "url": "https://www.ibm.com/think/topics/supervised-learning", - "type": "article" - }, - { - "title": "What is Supervised Learning?", - "url": "https://cloud.google.com/discover/what-is-supervised-learning", - "type": "article" - } - ] - }, - "ZC0yKsu-CJC-LZKKo2pLD": { - "title": "Unsupervised Learning", - "description": "When red teaming AI systems using unsupervised learning (e.g., clustering algorithms), focus areas include assessing whether the discovered patterns reveal sensitive information, if the model can be manipulated to group data incorrectly, or if dimensionality reduction techniques obscure security-relevant features. Understanding these models helps identify risks associated with pattern discovery on unlabeled data.\n\nLearn more from the following resources:", - "links": [ - { - "title": "How Unsupervised Learning Works with Examples", - "url": "https://www.coursera.org/articles/unsupervised-learning", - "type": "article" - }, - { - "title": "Supervised vs. Unsupervised Learning: Which Approach is Best?", - "url": "https://www.digitalocean.com/resources/articles/supervised-vs-unsupervised-learning", - "type": "article" - } - ] - }, - "Xqzc4mOKsVzwaUxLGjHya": { - "title": "Reinforcement Learning", - "description": "Red teaming RL-based AI systems involves testing for vulnerabilities such as reward hacking (exploiting the reward function to induce unintended behavior), unsafe exploration (agent takes harmful actions during learning), or susceptibility to adversarial perturbations in the environment's state. Understanding the agent's policy and value functions is crucial for designing effective tests against RL agents.\n\nLearn more from the following resources:", - "links": [ - { - "title": "Deep Reinforcement Learning Course by HuggingFace", - "url": "https://huggingface.co/learn/deep-rl-course/unit0/introduction", - "type": "course" - }, - { - "title": "Resources to Learn Reinforcement Learning", - "url": "https://towardsdatascience.com/best-free-courses-and-resources-to-learn-reinforcement-learning-ed6633608cb2/", - "type": "article" - }, - { - "title": "What is reinforcement learning?", - "url": "https://online.york.ac.uk/resources/what-is-reinforcement-learning/", - "type": "article" - }, - { - "title": "Diverse and Effective Red Teaming with Auto-generated Rewards and Multi-step Reinforcement Learning", - "url": "https://arxiv.org/html/2412.18693v1", - "type": "article" - } - ] - }, - "RuKzVhd1nZphCrlW1wZGL": { - "title": "Neural Networks", - "description": "Understanding neural network architectures (layers, nodes, activation functions) is vital for AI Red Teamers. This knowledge allows for targeted testing, such as crafting adversarial examples that exploit specific activation functions or identifying potential vulnerabilities related to network depth or connectivity. It provides insight into the 'black box' for more effective white/grey-box testing.\n\nLearn more from the following resources:", - "links": [ - { - "title": "Neural Networks Explained: A Beginner's Guide", - "url": "https://www.skillcamper.com/blog/neural-networks-explained-a-beginners-guide", - "type": "article" - }, - { - "title": "Neural networks | Machine Learning", - "url": "https://developers.google.com/machine-learning/crash-course/neural-networks", - "type": "article" - }, - { - "title": "Red Teaming with Artificial Intelligence-Driven Cyberattacks: A Scoping Review", - "url": "https://arxiv.org/html/2503.19626", - "type": "article" - } - ] - }, - "3XJ-g0KvHP75U18mxCqgw": { - "title": "Generative Models", - "description": "AI Red Teamers focus heavily on generative models (like GANs and LLMs) due to their widespread use and unique risks. Understanding how they generate content is key to testing for issues like generating harmful/biased outputs, deepfakes, prompt injection vulnerabilities, or leaking sensitive information from their vast training data.\n\nLearn more from the following resources:", - "links": [ - { - "title": "Introduction to Generative AI", - "url": "https://learnprompting.org/courses/intro-to-gen-ai", - "type": "course" - }, - { - "title": "What is Generative AI?", - "url": "https://learnprompting.org/docs/basics/generative_ai", - "type": "article" - }, - { - "title": "Generative AI beginner's guide", - "url": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/overview", - "type": "article" - } - ] - }, - "8K-wCn2cLc7Vs_V4sC3sE": { - "title": "Large Language Models", - "description": "LLMs are a primary target for AI Red Teaming. Understanding their architecture (often Transformer-based), training processes (pre-training, fine-tuning), and capabilities (text generation, summarization, Q&A) is essential for identifying vulnerabilities like prompt injection, jailbreaking, data regurgitation, and emergent harmful behaviors specific to these large-scale models.\n\nLearn more from the following resources:", - "links": [ - { - "title": "What is an LLM (large language model)?", - "url": "https://www.cloudflare.com/learning/ai/what-is-large-language-model/", - "type": "article" - }, - { - "title": "ChatGPT For Everyone", - "url": "https://learnprompting.org/courses/chatgpt-for-everyone", - "type": "article" - }, - { - "title": "What Are Large Language Models? A Beginner's Guide for 2025", - "url": "https://www.kdnuggets.com/large-language-models-beginners-guide-2025", - "type": "article" - } - ] - }, - "gx4KaFqKgJX9n9_ZGMqlZ": { - "title": "Prompt Engineering", - "description": "For AI Red Teamers, prompt engineering is both a tool and a target. It's a tool for crafting inputs to test model boundaries and vulnerabilities (e.g., creating jailbreak prompts). It's a target because understanding how prompts influence LLMs is key to identifying prompt injection vulnerabilities and designing defenses. Mastering prompt design is fundamental to effective LLM red teaming.\n\nLearn more from the following resources:", - "links": [ - { - "title": "Introduction to Prompt Engineering", - "url": "https://learnprompting.org/courses/intro-to-prompt-engineering", - "type": "course" - }, - { - "title": "System Prompts - InjectPrompt", - "url": "https://www.injectprompt.com/t/system-prompts", - "type": "article" - }, - { - "title": "The Ultimate Guide to Red Teaming LLMs and Adversarial Prompts (Kili Technology)", - "url": "https://kili-technology.com/large-language-models-llms/red-teaming-llms-and-adversarial-prompts", - "type": "article" - } - ] - }, - "WZkIHZkV2qDYbYF9KBBRi": { - "title": "Confidentiality, Integrity, Availability", - "description": "The CIA Triad is directly applicable in AI Red Teaming. Confidentiality tests focus on preventing leakage of training data or proprietary model details. Integrity tests probe for susceptibility to data poisoning or model manipulation. Availability tests assess resilience against denial-of-service attacks targeting the AI model or its supporting infrastructure.\n\nLearn more from the following resources:", - "links": [ - { - "title": "Confidentiality, Integrity, Availability: Key Examples", - "url": "https://www.datasunrise.com/knowledge-center/confidentiality-integrity-availability-examples/", - "type": "article" - }, - { - "title": "The CIA Triad: Confidentiality, Integrity, Availability", - "url": "https://www.veeam.com/blog/cybersecurity-cia-triad-explained.html", - "type": "article" - }, - { - "title": "What's The CIA Triad? Confidentiality, Integrity, & Availability, Explained", - "url": "https://www.splunk.com/en_us/blog/learn/cia-triad-confidentiality-integrity-availability.html", - "type": "article" - } - ] - }, - "RDOaTBWP3aIJPUp_kcafm": { - "title": "Threat Modeling", - "description": "AI Red Teams apply threat modeling to identify unique attack surfaces in AI systems, such as manipulating training data, exploiting prompt interfaces, attacking the model inference process, or compromising connected tools/APIs. Before attacking an AI system, red teamers perform threat modeling to map out possible adversaries (from curious users to state actors) and attack vectors, prioritizing tests based on likely impact and adversary capability.\n\nLearn more from the following resources:", - "links": [ - { - "title": "Core Components of AI Red Team Exercises (Learn Prompting)", - "url": "https://learnprompting.org/blog/what-is-ai-red-teaming", - "type": "article" - }, - { - "title": "Threat Modeling Process", - "url": "https://owasp.org/www-community/Threat_Modeling_Process", - "type": "article" - }, - { - "title": "Threat Modeling", - "url": "https://owasp.org/www-community/Threat_Modeling", - "type": "article" - } - ] - }, - "MupRvk_8Io2Hn7yEvU663": { - "title": "Risk Management", - "description": "AI Red Teamers contribute to the AI risk management process by identifying and demonstrating concrete vulnerabilities. Findings from red team exercises inform risk assessments, helping organizations understand the likelihood and potential impact of specific AI threats and prioritize resources for mitigation based on demonstrated exploitability.\n\nLearn more from the following resources:", - "links": [ - { - "title": "NIST AI Risk Management Framework", - "url": "https://www.nist.gov/itl/ai-risk-management-framework", - "type": "article" - }, - { - "title": "A Beginner's Guide to Cybersecurity Risks and Vulnerabilities", - "url": "https://online.champlain.edu/blog/beginners-guide-cybersecurity-risk-management", - "type": "article" - }, - { - "title": "Cybersecurity Risk Management: Frameworks, Plans, and Best Practices", - "url": "https://hyperproof.io/resource/cybersecurity-risk-management-process/", - "type": "article" - } - ] - }, - "887lc3tWCRH-sOHSxWgWJ": { - "title": "Vulnerability Assessment", - "description": "While general vulnerability assessment scans infrastructure, AI Red Teaming extends this to assess vulnerabilities specific to the AI model and its unique interactions. This includes probing for prompt injection flaws, testing for adversarial example robustness, checking for data privacy leaks, and evaluating safety alignment failures – weaknesses not typically found by standard IT vulnerability scanners.\n\nLearn more from the following resources:", - "links": [ - { - "title": "AI red-teaming in critical infrastructure: Boosting security and trust in AI systems", - "url": "https://www.dnv.com/article/ai-red-teaming-for-critical-infrastructure-industries/", - "type": "article" - }, - { - "title": "The Ultimate Guide to Vulnerability Assessment", - "url": "https://strobes.co/blog/guide-vulnerability-assessment/", - "type": "article" - }, - { - "title": "Vulnerability Scanning Tools", - "url": "https://owasp.org/www-community/Vulnerability_Scanning_Tools", - "type": "article" - } - ] - }, - "Ds8pqn4y9Npo7z6ubunvc": { - "title": "Jailbreak Techniques", - "description": "Jailbreaking is a specific category of prompt hacking where the AI Red Teamer aims to bypass the LLM's safety and alignment training. They use techniques like creating fictional scenarios, asking the model to simulate an unrestricted AI, or using complex instructions to trick the model into generating content that violates its own policies (e.g., generating harmful code, hate speech, or illegal instructions).\n\nLearn more from the following resources:", - "links": [ - { - "title": "InjectPrompt", - "url": "https://injectprompt.com", - "type": "article" - }, - { - "title": "Jailbreaking Guide - Learn Prompting", - "url": "https://learnprompting.org/docs/prompt_hacking/jailbreaking", - "type": "article" - }, - { - "title": "Jailbroken: How Does LLM Safety Training Fail? (arXiv)", - "url": "https://arxiv.org/abs/2307.02483", - "type": "article" - } - ] - }, - "j7uLLpt8MkZ1rqM7UBPW4": { - "title": "Safety Filter Bypasses", - "description": "AI Red Teamers specifically target the safety mechanisms (filters, guardrails) implemented within or around an AI model. They test techniques like using synonyms for blocked words, employing different languages, embedding harmful requests within harmless text, or using character-level obfuscation to evade detection and induce the model to generate prohibited content, thereby assessing the robustness of the safety controls.\n\nLearn more from the following resources:", - "links": [ - { - "title": "Bypassing AI Content Filters", - "url": "https://www.restack.io/p/ai-driven-content-moderation-answer-bypass-filters-cat-ai", - "type": "article" - }, - { - "title": "How to Bypass Azure AI Content Safety Guardrails", - "url": "https://mindgard.ai/blog/bypassing-azure-ai-content-safety-guardrails", - "type": "article" - }, - { - "title": "The Best Methods to Bypass AI Detection: Tips and Techniques", - "url": "https://www.popai.pro/resources/the-best-methods-to-bypass-ai-detection-tips-and-techniques/", - "type": "article" - } - ] - }, - "XOrAPDRhBvde9R-znEipH": { - "title": "Prompt Injection", - "description": "Prompt injection is a critical vulnerability tested by AI Red Teamers. They attempt to insert instructions into the LLM's input that override its intended system prompt or task, causing it to perform unauthorized actions, leak data, or generate malicious output. This tests the model's ability to distinguish trusted instructions from potentially harmful user/external input.\n\nLearn more from the following resources:", - "links": [ - { - "title": "Advanced Prompt Hacking - Learn Prompting", - "url": "https://learnprompting.org/courses/advanced-prompt-hacking", - "type": "course" - }, - { - "title": "Prompt Injection & the Rise of Prompt Attacks", - "url": "https://www.lakera.ai/blog/guide-to-prompt-injection", - "type": "article" - }, - { - "title": "Prompt Injection (Learn Prompting)", - "url": "https://learnprompting.org/docs/prompt_hacking/injection", - "type": "article" - }, - { - "title": "Prompt Injection Attack Explanation (IBM)", - "url": "https://research.ibm.com/blog/prompt-injection-attacks-against-llms", - "type": "article" - }, - { - "title": "Prompt Injection: Impact, How It Works & 4 Defense Measures", - "url": "https://www.tigera.io/learn/guides/llm-security/prompt-injection/", - "type": "article" - } - ] - }, - "1Xr7mxVekeAHzTL7G4eAZ": { - "title": "Prompt Hacking", - "description": "Prompt hacking is a core technique for AI Red Teamers targeting LLMs. It involves crafting inputs (prompts) to manipulate the model into bypassing safety controls, revealing hidden information, or performing unintended actions. Red teamers systematically test various prompt hacking methods (like jailbreaking, role-playing, or instruction manipulation) to assess the LLM's resilience against adversarial user input.\n\nLearn more from the following resources:", - "links": [ - { - "title": "Introduction to Prompt Hacking", - "url": "https://learnprompting.org/courses/intro-to-prompt-hacking", - "type": "course" - }, - { - "title": "Prompt Hacking Guide", - "url": "https://learnprompting.org/docs/prompt_hacking/introduction", - "type": "article" - }, - { - "title": "SoK: Prompt Hacking of LLMs (arXiv 2023)", - "url": "https://arxiv.org/abs/2311.05544", - "type": "article" - } - ] - }, - "5zHow4KZVpfhch5Aabeft": { - "title": "Direct", - "description": "Direct injection attacks occur when malicious instructions are inserted directly into the prompt input field by the user interacting with the LLM. AI Red Teamers use this technique to assess if basic instructions like \"Ignore previous prompt\" can immediately compromise the model's safety or intended function, testing the robustness of the system prompt's influence.\n\nLearn more from the following resources:", - "links": [ - { - "title": "Prompt Injection", - "url": "https://learnprompting.org/docs/prompt_hacking/injection?srsltid=AfmBOooOKRzLT0Hn2PNdAa69Fietniztfds6Fo1PO8WuIyyXjbLb6XgI", - "type": "article" - }, - { - "title": "Prompt Injection & the Rise of Prompt Attacks", - "url": "https://www.lakera.ai/blog/guide-to-prompt-injection", - "type": "article" - }, - { - "title": "Prompt Injection Cheat Sheet (FlowGPT)", - "url": "https://flowgpt.com/p/prompt-injection-cheat-sheet", - "type": "article" - } - ] - }, - "3_gJRtJSdm2iAfkwmcv0e": { - "title": "Indirect", - "description": "Indirect injection involves embedding malicious prompts within external data sources that the LLM processes, such as websites, documents, or emails. AI Red Teamers test this by poisoning data sources the AI might interact with (e.g., adding hidden instructions to a webpage summarized by the AI) to see if the AI executes unintended commands or leaks data when processing that source.\n\nLearn more from the following resources:", - "links": [ - { - "title": "The Practical Application of Indirect Prompt Injection Attacks", - "url": "https://www.researchgate.net/publication/382692833_The_Practical_Application_of_Indirect_Prompt_Injection_Attacks_From_Academia_to_Industry", - "type": "article" - }, - { - "title": "How to Prevent Indirect Prompt Injection Attacks", - "url": "https://www.cobalt.io/blog/how-to-prevent-indirect-prompt-injection-attacks", - "type": "article" - }, - { - "title": "Indirect Prompt Injection Data Exfiltration", - "url": "https://embracethered.com/blog/posts/2024/chatgpt-macos-app-persistent-data-exfiltration/", - "type": "article" - } - ] - }, - "G1u_Kq4NeUsGX2qnUTuJU": { - "title": "Countermeasures", - "description": "AI Red Teamers must also understand and test defenses against prompt hacking. This includes evaluating the effectiveness of input sanitization, output filtering, instruction demarcation (e.g., XML tagging), contextual awareness checks, model fine-tuning for resistance, and applying the principle of least privilege to LLM capabilities and tool access.\n\nLearn more from the following resources:", - "links": [ - { - "title": "Prompt Hacking Defensive Measures", - "url": "https://learnprompting.org/docs/prompt_hacking/defensive_measures/introduction", - "type": "article" - }, - { - "title": "Mitigating Prompt Injection Attacks (NCC Group Research)", - "url": "https://research.nccgroup.com/2023/12/01/mitigating-prompt-injection-attacks/", - "type": "article" - }, - { - "title": "Prompt Injection & the Rise of Prompt Attacks", - "url": "https://www.lakera.ai/blog/guide-to-prompt-injection", - "type": "article" - }, - { - "title": "Prompt Injection: Impact, How It Works & 4 Defense Measures", - "url": "https://www.tigera.io/learn/guides/llm-security/prompt-injection/", - "type": "article" - }, - { - "title": "OpenAI Best Practices for Prompt Security", - "url": "https://platform.openai.com/docs/guides/prompt-engineering/strategy-write-clear-instructions", - "type": "article" - } - ] - }, - "vhBu5x8INTtqvx6vcYAhE": { - "title": "Code Injection", - "description": "AI Red Teamers test for code injection vulnerabilities specifically in the context of AI applications. This involves probing whether user input, potentially manipulated via prompts, can lead to the execution of unintended code (e.g., SQL, OS commands, or script execution via generated code) within the application layer or connected systems, using the AI as a potential vector.\n\nLearn more from the following resources:", - "links": [ - { - "title": "Code Injection in LLM Applications", - "url": "https://neuraltrust.ai/blog/code-injection-in-llms", - "type": "article" - }, - { - "title": "Code Injection", - "url": "https://learnprompting.org/docs/prompt_hacking/offensive_measures/code_injection", - "type": "article" - }, - { - "title": "Code Injection", - "url": "https://owasp.org/www-community/attacks/Code_Injection", - "type": "article" - } - ] - }, - "uBXrri2bXVsNiM8fIHHOv": { - "title": "Model Vulnerabilities", - "description": "This category covers attacks and tests targeting the AI model itself, beyond the prompt interface. AI Red Teamers investigate inherent weaknesses in the model's architecture, training data artifacts, or prediction mechanisms, such as susceptibility to data extraction, poisoning, or adversarial manipulation.\n\nLearn more from the following resources:", - "links": [ - { - "title": "AI Security Risks Uncovered: What You Must Know in 2025", - "url": "https://ttms.com/uk/ai-security-risks-explained-what-you-need-to-know-in-2025/", - "type": "article" - }, - { - "title": "Weaknesses in Modern AI", - "url": "https://insights.sei.cmu.edu/blog/weaknesses-and-vulnerabilities-in-modern-ai-why-security-and-safety-are-so-challenging/", - "type": "article" - }, - { - "title": "AI and ML Vulnerabilities (CNAS Report)", - "url": "https://www.cnas.org/publications/reports/understanding-and-mitigating-ai-vulnerabilities", - "type": "article" - } - ] - }, - "QFzLx5nc4rCCD8WVc20mo": { - "title": "Model Weight Stealing", - "description": "AI Red Teamers assess the risk of attackers reconstructing or stealing the proprietary weights of a trained model, often through API query-based attacks. Testing involves simulating such attacks to understand how easily the model's functionality can be replicated, which informs defenses like query rate limiting, watermarking, or differential privacy.\n\nLearn more from the following resources:", - "links": [ - { - "title": "A Playbook for Securing AI Model Weights", - "url": "https://www.rand.org/pubs/research_briefs/RBA2849-1.html", - "type": "article" - }, - { - "title": "How to Steal a Machine Learning Model (SkyCryptor)", - "url": "https://skycryptor.com/blog/how-to-steal-a-machine-learning-model", - "type": "article" - }, - { - "title": "On the Limitations of Model Stealing with Uncertainty Quantification Models", - "url": "https://openreview.net/pdf?id=ONRFHoUzNk", - "type": "article" - } - ] - }, - "DQeOavZCoXpF3k_qRDABs": { - "title": "Unauthorized Access", - "description": "AI Red Teamers test if vulnerabilities in the AI system or its interfaces allow attackers to gain unauthorized access to data, functionalities, or underlying infrastructure. This includes attempting privilege escalation via prompts, exploiting insecure API endpoints connected to the AI, or manipulating the AI to access restricted system resources.\n\nLearn more from the following resources:", - "links": [ - { - "title": "Defending Model Files from Unauthorized Access", - "url": "https://developer.nvidia.com/blog/defending-ai-model-files-from-unauthorized-access-with-canaries/", - "type": "article" - }, - { - "title": "OWASP API Security Project", - "url": "https://owasp.org/www-project-api-security/", - "type": "article" - }, - { - "title": "Detecting Unauthorized Usage", - "url": "https://www.unr.edu/digital-learning/instructional-strategies/understanding-and-integrating-generative-ai-in-teaching/how-can-i-detect-unauthorized-ai-usage", - "type": "article" - } - ] - }, - "nD0_64ELEeJSN-0aZiR7i": { - "title": "Data Poisoning", - "description": "AI Red Teamers simulate data poisoning attacks by evaluating how introducing manipulated or mislabeled data into potential training or fine-tuning datasets could compromise the model. They assess the impact on model accuracy, fairness, or the potential creation of exploitable backdoors, informing defenses around data validation and provenance.\n\nLearn more from the following resources:", - "links": [ - { - "title": "AI Poisoning", - "url": "https://www.aiblade.net/p/ai-poisoning-is-it-really-a-threat", - "type": "article" - }, - { - "title": "Detecting and Preventing Data Poisoning Attacks on AI Models", - "url": "https://arxiv.org/abs/2503.09302", - "type": "article" - }, - { - "title": "Poisoning Web-Scale Training Data (arXiv)", - "url": "https://arxiv.org/abs/2310.12818", - "type": "article" - } - ] - }, - "xjlttOti-_laPRn8a2fVy": { - "title": "Adversarial Examples", - "description": "A core AI Red Teaming activity involves generating adversarial examples – inputs slightly perturbed to cause misclassification or bypass safety filters – to test model robustness. Red teamers use various techniques (gradient-based, optimization-based, or black-box methods) to find inputs that exploit model weaknesses, informing developers on how to harden the model.\n\nLearn more from the following resources:", - "links": [ - { - "title": "Adversarial Examples – Interpretable Machine Learning Book", - "url": "https://christophm.github.io/interpretable-ml-book/adversarial.html", - "type": "article" - }, - { - "title": "Adversarial Testing for Generative AI", - "url": "https://developers.google.com/machine-learning/guides/adv-testing", - "type": "article" - }, - { - "title": "How AI Can Be Tricked With Adversarial Attacks", - "url": "https://www.youtube.com/watch?v=J3X_JWQkvo8?v=MPcfoQBDY0w", - "type": "video" - } - ] - }, - "iE5PcswBHnu_EBFIacib0": { - "title": "Model Inversion", - "description": "AI Red Teamers perform model inversion tests to assess if an attacker can reconstruct sensitive training data (like images, text snippets, or personal attributes) by repeatedly querying the model and analyzing its outputs. Success indicates privacy risks due to data memorization, requiring mitigation techniques like differential privacy or output filtering.\n\nLearn more from the following resources:", - "links": [ - { - "title": "Model inversion and membership inference: Understanding new AI security risks", - "url": "https://www.hoganlovells.com/en/publications/model-inversion-and-membership-inference-understanding-new-ai-security-risks-and-mitigating-vulnerabilities", - "type": "article" - }, - { - "title": "Extracting Training Data from LLMs (arXiv)", - "url": "https://arxiv.org/abs/2012.07805", - "type": "article" - }, - { - "title": "Model Inversion Attacks: A Survey of Approaches and Countermeasures", - "url": "https://arxiv.org/html/2411.10023v1", - "type": "article" - } - ] - }, - "2Y0ZO-etpv3XIvunDLu-W": { - "title": "Adversarial Training", - "description": "AI Red Teamers evaluate the effectiveness of adversarial training as a defense. They test if models trained on adversarial examples are truly robust or if new, unseen adversarial attacks can still bypass the hardened defenses. This helps refine the adversarial training process itself.\n\nLearn more from the following resources:", - "links": [ - { - "title": "Model Robustness: Building Reliable AI Models", - "url": "https://encord.com/blog/model-robustness-machine-learning-strategies/", - "type": "article" - }, - { - "title": "Adversarial Testing for Generative AI", - "url": "https://developers.google.com/machine-learning/guides/adv-testing", - "type": "article" - }, - { - "title": "Detecting and Preventing Data Poisoning Attacks on AI Models", - "url": "https://arxiv.org/abs/2503.09302", - "type": "article" - } - ] - }, - "6gEHMhh6BGJI-ZYN27YPW": { - "title": "Robust Model Design", - "description": "AI Red Teamers assess whether choices made during model design (architecture selection, regularization techniques, ensemble methods) effectively contribute to robustness against anticipated attacks. They test if these design choices actually prevent common failure modes identified during threat modeling.\n\nLearn more from the following resources:", - "links": [ - { - "title": "Model Robustness: Building Reliable AI Models", - "url": "https://encord.com/blog/model-robustness-machine-learning-strategies/", - "type": "article" - }, - { - "title": "Understanding Robustness in Machine Learning", - "url": "https://www.alooba.com/skills/concepts/machine-learning/robustness/", - "type": "article" - }, - { - "title": "Towards Evaluating the Robustness of Neural Networks (arXiv by Goodfellow et al.)", - "url": "https://arxiv.org/abs/1608.04644", - "type": "article" - } - ] - }, - "7Km0mFpHguHYPs5UhHTsM": { - "title": "Continuous Monitoring", - "description": "AI Red Teamers assess the effectiveness of continuous monitoring systems by attempting attacks and observing if detection mechanisms trigger appropriate alerts and responses. They test if monitoring covers AI-specific anomalies (like sudden shifts in output toxicity or unexpected resource consumption by the model) in addition to standard infrastructure monitoring.\n\nLearn more from the following resources:", - "links": [ - { - "title": "Cyber Security Monitoring: 5 Key Components", - "url": "https://www.bitsight.com/blog/5-things-to-consider-building-continuous-security-monitoring-strategy", - "type": "article" - }, - { - "title": "Cyber Security Monitoring: Definition and Best Practices", - "url": "https://www.sentinelone.com/cybersecurity-101/cybersecurity/cyber-security-monitoring/", - "type": "article" - }, - { - "title": "Cybersecurity Monitoring: Definition, Tools & Best Practices", - "url": "https://nordlayer.com/blog/cybersecurity-monitoring/", - "type": "article" - } - ] - }, - "aKzai0A8J55-OBXTnQih1": { - "title": "Insecure Deserialization", - "description": "AI Red Teamers investigate if serialized objects used by the AI system (e.g., for saving model states, configurations, or transmitting data) can be manipulated by an attacker. They test if crafting malicious serialized objects could lead to remote code execution or other exploits when the application deserializes the untrusted data.\n\nLearn more from the following resources:", - "links": [ - { - "title": "Lightboard Lessons: OWASP Top 10 - Insecure Deserialization", - "url": "https://community.f5.com/kb/technicalarticles/lightboard-lessons-owasp-top-10---insecure-deserialization/281509", - "type": "article" - }, - { - "title": "How Hugging Face Was Ethically Hacked", - "url": "https://www.aiblade.net/p/how-hugging-face-was-ethically-hacked", - "type": "article" - }, - { - "title": "OWASP TOP 10: Insecure Deserialization", - "url": "https://blog.detectify.com/best-practices/owasp-top-10-insecure-deserialization/", - "type": "article" - }, - { - "title": "Insecure Deserialization", - "url": "https://owasp.org/www-community/vulnerabilities/Insecure_Deserialization", - "type": "article" - } - ] - }, - "kgDsDlBk8W2aM6LyWpFY8": { - "title": "Remote Code Execution", - "description": "AI Red Teamers attempt to achieve RCE on systems hosting or interacting with AI models. This could involve exploiting vulnerabilities in the AI framework itself, the web server, connected APIs, or tricking an AI agent with code execution capabilities into running malicious commands provided via prompts. RCE is often the ultimate goal of exploiting other vulnerabilities like code injection or insecure deserialization.\n\nLearn more from the following resources:", - "links": [ - { - "title": "Exploiting LLMs with Code Execution (GitHub Gist)", - "url": "https://gist.github.com/coolaj86/6f4f7b30129b0251f61fa7baaa881516", - "type": "article" - }, - { - "title": "What is remote code execution?", - "url": "https://www.cloudflare.com/learning/security/what-is-remote-code-execution/", - "type": "article" - } - ] - }, - "nhUKKWyBH80nyKfGT8ErC": { - "title": "Infrastructure Security", - "description": "AI Red Teamers assess the security posture of the infrastructure hosting AI models (cloud environments, servers, containers). They look for misconfigurations, unpatched systems, insecure network setups, or inadequate access controls that could allow compromise of the AI system or leakage of sensitive data/models.\n\nLearn more from the following resources:", - "links": [ - { - "title": "AI Infrastructure Attacks (VentureBeat)", - "url": "https://venturebeat.com/ai/understanding-ai-infrastructure-attacks/", - "type": "article" - }, - { - "title": "Network Infrastructure Security - Best Practices and Strategies", - "url": "https://www.dataguard.com/blog/network-infrastructure-security-best-practices-and-strategies/", - "type": "article" - }, - { - "title": "Secure Deployment of ML Systems (NIST)", - "url": "https://csrc.nist.gov/publications/detail/sp/800-218/final", - "type": "article" - } - ] - }, - "Tszl26iNBnQBdBEWOueDA": { - "title": "API Protection", - "description": "AI Red Teamers rigorously test the security of APIs providing access to AI models. They probe for OWASP API Top 10 vulnerabilities like broken authentication/authorization, injection flaws, security misconfigurations, and lack of rate limiting, specifically evaluating how these could lead to misuse or compromise of the AI model itself.\n\nLearn more from the following resources:", - "links": [ - { - "title": "Securing APIs with AI for Advanced Threat Protection", - "url": "https://adevait.com/artificial-intelligence/securing-apis-with-ai", - "type": "article" - }, - { - "title": "Securing Machine Learning APIs (IBM)", - "url": "https://developer.ibm.com/articles/se-securing-machine-learning-apis/", - "type": "article" - }, - { - "title": "OWASP API Security Project (Top 10 2023)", - "url": "https://owasp.org/www-project-api-security/", - "type": "article" - } - ] - }, - "J7gjlt2MBx7lOkOnfGvPF": { - "title": "Authentication", - "description": "AI Red Teamers test the authentication mechanisms controlling access to AI systems and APIs. They attempt to bypass logins, steal or replay API keys/tokens, exploit weak password policies, or find flaws in MFA implementations to gain unauthorized access to the AI model or its management interfaces.\n\nLearn more from the following resources:", - "links": [ - { - "title": "Red-Teaming in AI Testing: Stress Testing", - "url": "https://www.labelvisor.com/red-teaming-abstract-competitive-testing-data-selection/", - "type": "article" - }, - { - "title": "What is Authentication vs Authorization?", - "url": "https://auth0.com/intro-to-iam/authentication-vs-authorization", - "type": "article" - }, - { - "title": "JWT Attacks", - "url": "https://portswigger.net/web-security/jwt", - "type": "article" - } - ] - }, - "JQ3bR8odXJfd-1RCEf3-Q": { - "title": "Authentication", - "description": "AI Red Teamers test authorization controls to ensure that authenticated users can only access the AI features and data permitted by their roles/permissions. They attempt privilege escalation, try to access other users' data via the AI, or manipulate the AI to perform actions beyond its authorized scope.\n\nLearn more from the following resources:", - "links": [ - { - "title": "What is Authentication vs Authorization?", - "url": "https://auth0.com/intro-to-iam/authentication-vs-authorization", - "type": "article" - }, - { - "title": "Identity and access management (IAM) fundamental concepts", - "url": "https://learn.microsoft.com/en-us/entra/fundamentals/identity-fundamental-concepts", - "type": "article" - }, - { - "title": "OWASP API Security Project", - "url": "https://owasp.org/www-project-api-security/", - "type": "article" - } - ] - }, - "0bApnJTt-Z2IUf0X3OCYf": { - "title": "Black Box Testing", - "description": "In AI Red Teaming, black-box testing involves probing the AI system with inputs and observing outputs without any knowledge of the model's architecture, training data, or internal logic. This simulates an external attacker and is crucial for finding vulnerabilities exploitable through publicly accessible interfaces, such as prompt injection or safety bypasses discoverable via API interaction.\n\nLearn more from the following resources:", - "links": [ - { - "title": "Black-Box, Gray Box, and White-Box Penetration Testing", - "url": "https://www.eccouncil.org/cybersecurity-exchange/penetration-testing/black-box-gray-box-and-white-box-penetration-testing-importance-and-uses/", - "type": "article" - }, - { - "title": "What is Black Box Testing", - "url": "https://www.imperva.com/learn/application-security/black-box-testing/", - "type": "article" - }, - { - "title": "LLM red teaming guide (open source)", - "url": "https://www.promptfoo.dev/docs/red-team/", - "type": "article" - } - ] - }, - "Mrk_js5UVn4dRDw-Yco3Y": { - "title": "White Box Testing", - "description": "White-box testing in AI Red Teaming grants the tester full access to the model's internals (architecture, weights, training data, source code). This allows for highly targeted attacks, such as crafting precise adversarial examples using gradients, analyzing code for vulnerabilities, or directly examining training data for biases or PII leakage. It simulates insider threats or deep analysis scenarios.\n\nLearn more from the following resources:", - "links": [ - { - "title": "Black-Box, Gray Box, and White-Box Penetration Testing", - "url": "https://www.eccouncil.org/cybersecurity-exchange/penetration-testing/black-box-gray-box-and-white-box-penetration-testing-importance-and-uses/", - "type": "article" - }, - { - "title": "What is White Box Penetration Testing", - "url": "https://www.getastra.com/blog/security-audit/white-box-penetration-testing/", - "type": "article" - }, - { - "title": "The Art of White Box Pentesting", - "url": "https://infosecwriteups.com/cracking-the-code-the-art-of-white-box-pentesting-de296bc22c67", - "type": "article" - } - ] - }, - "ZVNAMCP68XKRXVxF2-hBc": { - "title": "Grey Box Testing", - "description": "Grey-box AI Red Teaming involves testing with partial knowledge of the system, such as knowing the model type (e.g., GPT-4), having access to some documentation, or understanding the general system architecture but not having full model weights or source code. This allows for more targeted testing than black-box while still simulating realistic external attacker scenarios where some information might be gleaned.\n\nLearn more from the following resources:", - "links": [ - { - "title": "AI Transparency: Connecting AI Red Teaming and Compliance", - "url": "https://splx.ai/blog/ai-transparency-connecting-ai-red-teaming-and-compliance", - "type": "article" - }, - { - "title": "Black-Box, Gray Box, and White-Box Penetration Testing", - "url": "https://www.eccouncil.org/cybersecurity-exchange/penetration-testing/black-box-gray-box-and-white-box-penetration-testing-importance-and-uses/", - "type": "article" - }, - { - "title": "Understanding Black Box, White Box, and Grey Box Testing", - "url": "https://www.frugaltesting.com/blog/understanding-black-box-white-box-and-grey-box-testing-in-software-testing", - "type": "article" - } - ] - }, - "LVdYN9hyCyNPYn2Lz1y9b": { - "title": "Automated vs Manual", - "description": "AI Red Teaming typically employs a blend of automated tools (for large-scale scanning, fuzzing prompts, generating basic adversarial examples) and manual human testing (for creative jailbreaking, complex multi-stage attacks, evaluating nuanced safety issues like bias). Automation provides scale, while manual testing provides depth and creativity needed to find novel vulnerabilities.\n\nLearn more from the following resources:", - "links": [ - { - "title": "Automation Testing vs. Manual Testing: Which is the better approach?", - "url": "https://www.opkey.com/blog/automation-testing-vs-manual-testing-which-is-better", - "type": "article" - }, - { - "title": "Manual Testing vs Automated Testing: What's the Difference?", - "url": "https://www.leapwork.com/blog/manual-vs-automated-testing", - "type": "article" - }, - { - "title": "Spikee", - "url": "https://spikee.ai", - "type": "article" - } - ] - }, - "65Lo60JQS5YlvvQ6KevXt": { - "title": "Continuous Testing", - "description": "Applying continuous testing principles to AI security involves integrating automated red teaming checks into the development pipeline (CI/CD). This allows for regular, automated assessment of model safety, robustness, and alignment as the model or application code evolves, catching regressions or new vulnerabilities early. Tools facilitating Continuous Automated Red Teaming (CART) are emerging.\n\nLearn more from the following resources:", - "links": [ - { - "title": "Continuous Automated Red Teaming (CART)", - "url": "https://www.firecompass.com/continuous-automated-red-teaming/", - "type": "article" - }, - { - "title": "What is Continuous Penetration Testing? Process and Benefits", - "url": "https://qualysec.com/continuous-penetration-testing/", - "type": "article" - }, - { - "title": "What is Continuous Testing and How Does it Work?", - "url": "https://www.blackduck.com/glossary/what-is-continuous-testing.html", - "type": "article" - } - ] - }, - "c8n8FcYKDOgPLQvV9xF5J": { - "title": "Testing Platforms", - "description": "Platforms used by AI Red Teamers range from general penetration testing OS distributions like Kali Linux to specific AI red teaming tools/frameworks like Microsoft's PyRIT or Promptfoo, and vulnerability scanners like OWASP ZAP adapted for API testing of AI services. These platforms provide the toolsets needed to conduct assessments.\n\nLearn more from the following resources:", - "links": [ - { - "title": "AI Red Teaming Agent - Azure AI Foundry | Microsoft Learn", - "url": "https://learn.microsoft.com/en-us/azure/ai-foundry/concepts/ai-red-teaming-agent", - "type": "article" - }, - { - "title": "Kali Linux", - "url": "https://www.kali.org/", - "type": "article" - }, - { - "title": "OWASP Zed Attack Proxy (ZAP)", - "url": "https://owasp.org/www-project-zap/", - "type": "article" - }, - { - "title": "Promptfoo", - "url": "https://www.promptfoo.dev/", - "type": "article" - }, - { - "title": "PyRIT (Python Risk Identification Tool for generative AI)", - "url": "https://github.com/Azure/PyRIT", - "type": "article" - } - ] - }, - "59lkLcoqV4gq7f8Zm0X2p": { - "title": "Monitoring Solutions", - "description": "AI Red Teamers interact with monitoring tools primarily to test their effectiveness (evasion) or potentially exploit vulnerabilities within them. Understanding tools like IDS (Snort, Suricata), network analyzers (Wireshark), and SIEMs helps red teamers simulate attacks that might bypass or target these defensive systems.\n\nLearn more from the following resources:", - "links": [ - { - "title": "Open Source IDS Tools: Comparing Suricata, Snort, Bro (Zeek), Linux", - "url": "https://levelblue.com/blogs/security-essentials/open-source-intrusion-detection-tools-a-quick-overview", - "type": "article" - }, - { - "title": "Snort", - "url": "https://www.snort.org/", - "type": "article" - }, - { - "title": "Suricata", - "url": "https://suricata.io/", - "type": "article" - }, - { - "title": "Wireshark", - "url": "https://www.wireshark.org/", - "type": "article" - }, - { - "title": "Zeek (formerly Bro)", - "url": "https://zeek.org/", - "type": "article" - } - ] - }, - "et1Xrr8ez-fmB0mAq8W_a": { - "title": "Benchmark Datasets", - "description": "AI Red Teamers may use or contribute to benchmark datasets specifically designed to evaluate AI security. These datasets (like HackAprompt, SecBench, NYU CTF Bench, CySecBench) contain prompts or scenarios targeting vulnerabilities, safety issues, or specific cybersecurity capabilities, allowing for standardized testing of models.\n\nLearn more from the following resources:", - "links": [ - { - "title": "HackAPrompt Dataset", - "url": "https://huggingface.co/datasets/hackaprompt/hackaprompt-dataset", - "type": "article" - }, - { - "title": "CySecBench: Generative AI-based CyberSecurity-focused Prompt Dataset", - "url": "https://github.com/cysecbench/dataset", - "type": "article" - }, - { - "title": "NYU CTF Bench: A Scalable Open-Source Benchmark Dataset for Evaluating LLMs in Offensive Security", - "url": "https://proceedings.neurips.cc/paper_files/paper/2024/hash/69d97a6493fbf016fff0a751f253ad18-Abstract-Datasets_and_Benchmarks_Track.html", - "type": "article" - }, - { - "title": "SecBench: A Comprehensive Multi-Dimensional Benchmarking Dataset for LLMs in Cybersecurity", - "url": "https://arxiv.org/abs/2412.20787", - "type": "article" - } - ] - }, - "C1zO2xC0AqyV53p2YEPWg": { - "title": "Custom Testing Scripts", - "description": "AI Red Teamers frequently write custom scripts (often in Python) to automate bespoke attacks, interact with specific AI APIs, generate complex prompt sequences, parse model outputs at scale, or implement novel exploit techniques not found in standard tools. Proficiency in scripting is essential for advanced AI red teaming.\n\nLearn more from the following resources:", - "links": [ - { - "title": "Python for Cybersecurity: Key Use Cases and Tools", - "url": "https://panther.com/blog/python-for-cybersecurity-key-use-cases-and-tools", - "type": "article" - }, - { - "title": "Python for cybersecurity: use cases, tools and best practices", - "url": "https://softteco.com/blog/python-for-cybersecurity", - "type": "article" - }, - { - "title": "Scapy", - "url": "https://scapy.net/", - "type": "article" - } - ] - }, - "BLnfNlA0C4yzy1dvifjwx": { - "title": "Reporting Tools", - "description": "AI Red Teamers use reporting techniques and potentially tools to clearly document their findings, including discovered vulnerabilities, successful exploit steps (e.g., effective prompts), assessed impact, and actionable recommendations tailored to AI systems. Good reporting translates technical findings into understandable risks for stakeholders.\n\nLearn more from the following resources:", - "links": [ - { - "title": "The Complete Red Teaming Checklist [PDF]: 5 Key Steps - Mindgard AI", - "url": "https://mindgard.ai/blog/red-teaming-checklist", - "type": "article" - }, - { - "title": "Penetration Testing Report: 6 Key Sections and 4 Best Practices", - "url": "https://brightsec.com/blog/penetration-testing-report/", - "type": "article" - }, - { - "title": "Penetration testing best practices: Strategies for all test types", - "url": "https://www.strikegraph.com/blog/pen-testing-best-practices", - "type": "article" - } - ] - }, - "s1xKK8HL5-QGZpcutiuvj": { - "title": "Specialized Courses", - "description": "Targeted training is crucial for mastering AI Red Teaming. Look for courses covering adversarial ML, prompt hacking, LLM security, ethical hacking for AI, and specific red teaming methodologies applied to AI systems offered by platforms like Learn Prompting, Coursera, or security training providers.\n\nLearn more from the following resources:", - "links": [ - { - "title": "AI Red Teaming Courses - Learn Prompting", - "url": "https://learnprompting.org/blog/ai-red-teaming-courses", - "type": "course" - }, - { - "title": "AI Security | Coursera", - "url": "https://www.coursera.org/learn/ai-security", - "type": "course" - }, - { - "title": "Free Online Cyber Security Courses with Certificates in 2025", - "url": "https://www.eccouncil.org/cybersecurity-exchange/cyber-novice/free-cybersecurity-courses-beginners/", - "type": "course" - } - ] - }, - "HHjsFR6wRDqUd66PMDE_7": { - "title": "Industry Credentials", - "description": "Beyond formal certifications, recognition in the AI Red Teaming field comes from practical achievements like finding significant vulnerabilities (responsible disclosure), winning AI-focused CTFs or hackathons (like HackAPrompt), contributing to AI security research, or building open-source testing tools.\n\nLearn more from the following resources:", - "links": [ - { - "title": "HackAPrompt", - "url": "https://hackaprompt.com", - "type": "article" - }, - { - "title": "RedTeam Arena", - "url": "https://redarena.ai", - "type": "article" - } - ] - }, - "MmwwRK4I9aRH_ha7duPqf": { - "title": "Lab Environments", - "description": "AI Red Teamers need environments to practice attacking vulnerable systems safely. While traditional labs (HTB, THM, VulnHub) build general pentesting skills, platforms are emerging with labs specifically focused on AI/LLM vulnerabilities, prompt injection, or adversarial ML challenges.\n\nLearn more from the following resources:", - "links": [ - { - "title": "HackAPrompt Playground", - "url": "https://learnprompting.org/hackaprompt-playground", - "type": "article" - }, - { - "title": "InjectPrompt Playground", - "url": "https://playground.injectprompt.com/", - "type": "article" - }, - { - "title": "Gandalf AI Prompt Injection Lab", - "url": "https://gandalf.lakera.ai/", - "type": "article" - }, - { - "title": "Hack The Box: Hacking Labs", - "url": "https://www.hackthebox.com/hacker/hacking-labs", - "type": "article" - }, - { - "title": "TryHackMe: Learn Cyber Security", - "url": "https://tryhackme.com/", - "type": "article" - } - ] - }, - "2Imb64Px3ZQcBpSQjdc_G": { - "title": "CTF Challenges", - "description": "Capture The Flag competitions increasingly include AI/ML security challenges. Participating in CTFs (tracked on CTFtime) or platforms like picoCTF helps AI Red Teamers hone skills in reverse engineering, web exploitation, and cryptography applied to AI systems, including specialized AI safety CTFs.\n\nLearn more from the following resources:", - "links": [ - { - "title": "HackAPrompt", - "url": "https://www.hackaprompt.com/", - "type": "article" - }, - { - "title": "Progress from our Frontier Red Team", - "url": "https://www.anthropic.com/news/strategic-warning-for-ai-risk-progress-and-insights-from-our-frontier-red-team", - "type": "article" - }, - { - "title": "CTFtime.org", - "url": "https://ctftime.org/", - "type": "article" - } - ] - }, - "DpYsL0du37n40toH33fIr": { - "title": "Red Team Simulations", - "description": "Participating in or conducting structured red team simulations against AI systems (or components) provides the most realistic practice. This involves applying methodologies, TTPs (Tactics, Techniques, and Procedures), reconnaissance, exploitation, and reporting within a defined scope and objective, specifically targeting AI vulnerabilities.\n\nLearn more from the following resources:", - "links": [ - { - "title": "A Simple Guide to Successful Red Teaming", - "url": "https://www.cobaltstrike.com/resources/guides/a-simple-guide-to-successful-red-teaming", - "type": "article" - }, - { - "title": "The Complete Guide to Red Teaming: Process, Benefits & More", - "url": "https://mindgard.ai/blog/red-teaming", - "type": "article" - }, - { - "title": "The Complete Red Teaming Checklist [PDF]: 5 Key Steps - Mindgard AI", - "url": "https://mindgard.ai/blog/red-teaming-checklist", - "type": "article" - } - ] - }, - "LuKnmd9nSz9yLbTU_5Yp2": { - "title": "Conferences", - "description": "Attending major cybersecurity conferences (DEF CON, Black Hat, RSA) and increasingly specialized AI Safety/Security conferences allows AI Red Teamers to learn about cutting-edge research, network with peers, and discover new tools and attack/defense techniques.\n\nLearn more from the following resources:", - "links": [ - { - "title": "Black Hat Events", - "url": "https://www.blackhat.com/", - "type": "article" - }, - { - "title": "DEF CON Hacking Conference", - "url": "https://defcon.org/", - "type": "article" - }, - { - "title": "Global Conference on AI, Security and Ethics 2025", - "url": "https://unidir.org/event/global-conference-on-ai-security-and-ethics-2025/", - "type": "article" - }, - { - "title": "RSA Conference", - "url": "https://www.rsaconference.com/", - "type": "article" - } - ] - }, - "ZlR03pM-sqVFZNhD1gMSJ": { - "title": "Research Groups", - "description": "Following and potentially contributing to research groups at universities (like CMU, Stanford, Oxford), non-profits (like OpenAI, Anthropic), or government bodies (like UK's AISI) focused on AI safety, security, and alignment provides deep insights into emerging threats and mitigation strategies relevant to AI Red Teaming.\n\nLearn more from the following resources:", - "links": [ - { - "title": "AI Cybersecurity | Global Cyber Security Capacity Centre (Oxford)", - "url": "https://gcscc.ox.ac.uk/ai-security", - "type": "article" - }, - { - "title": "Anthropic Research", - "url": "https://www.anthropic.com/research", - "type": "article" - }, - { - "title": "Center for AI Safety", - "url": "https://www.safe.ai/", - "type": "article" - }, - { - "title": "The AI Security Institute (AISI)", - "url": "https://www.aisi.gov.uk/", - "type": "article" - } - ] - }, - "Smncq-n1OlnLAY27AFQOO": { - "title": "Forums", - "description": "Engaging in online forums, mailing lists, Discord servers, or subreddits dedicated to AI security, adversarial ML, prompt engineering, or general cybersecurity helps AI Red Teamers exchange knowledge, ask questions, learn about new tools/techniques, and find collaboration opportunities.\n\nLearn more from the following resources:", - "links": [ - { - "title": "LearnPrompting Prompt Hacking Discord", - "url": "https://discord.com/channels/1046228027434086460/1349689482651369492", - "type": "article" - }, - { - "title": "Reddit - r/ChatGPTJailbreak", - "url": "https://www.reddit.com/r/ChatGPTJailbreak/", - "type": "article" - }, - { - "title": "Reddit - r/artificial", - "url": "https://www.reddit.com/r/artificial/", - "type": "article" - }, - { - "title": "Reddit - r/cybersecurity", - "url": "https://www.reddit.com/r/cybersecurity/", - "type": "article" - } - ] - }, - "xJYTRbPxMn0Xs5ea0Ygn6": { - "title": "LLM Security Testing", - "description": "The core application area for many AI Red Teamers today involves specifically testing Large Language Models for vulnerabilities like prompt injection, jailbreaking, harmful content generation, bias, and data privacy issues using specialized prompts and evaluation frameworks.\n\nLearn more from the following resources:", - "links": [ - { - "title": "AI Red Teaming Courses - Learn Prompting", - "url": "https://learnprompting.org/blog/ai-red-teaming-courses", - "type": "course" - }, - { - "title": "SecBench: A Comprehensive Multi-Dimensional Benchmarking Dataset for LLMs in Cybersecurity", - "url": "https://arxiv.org/abs/2412.20787", - "type": "article" - }, - { - "title": "The Ultimate Guide to Red Teaming LLMs and Adversarial Prompts (Kili Technology)", - "url": "https://kili-technology.com/large-language-models-llms/red-teaming-llms-and-adversarial-prompts", - "type": "article" - } - ] - }, - "FVsKivsJrIb82B0lpPmgw": { - "title": "Agentic AI Security", - "description": "As AI agents capable of autonomous action become more common, AI Red Teamers must test their unique security implications. This involves assessing risks related to goal hijacking, unintended actions through tool use, exploitation of planning mechanisms, and ensuring agents operate safely within their designated boundaries.\n\nLearn more from the following resources:", - "links": [ - { - "title": "AI Agents - Learn Prompting", - "url": "https://learnprompting.org/docs/intermediate/ai_agents", - "type": "article" - }, - { - "title": "EmbraceTheRed", - "url": "https://embracethered.com/", - "type": "article" - } - ] - }, - "KAcCZ3zcv25R6HwzAsfUG": { - "title": "Responsible Disclosure", - "description": "A critical practice for AI Red Teamers is responsible disclosure: privately reporting discovered AI vulnerabilities (e.g., a successful jailbreak, data leak method, or severe bias) to the model developers or system owners, allowing them time to remediate before any public discussion, thus preventing malicious exploitation.\n\nLearn more from the following resources:", - "links": [ - { - "title": "0din.ai Policy", - "url": "https://0din.ai/policy", - "type": "article" - }, - { - "title": "Huntr Guidelines", - "url": "https://huntr.com/guidelines", - "type": "article" - }, - { - "title": "Google Vulnerability Reward Program (VRP)", - "url": "https://bughunters.google.com/", - "type": "article" - } - ] - }, - "-G8v_CNa8wO_g-46_RFQo": { - "title": "Emerging Threats", - "description": "AI Red Teamers must stay informed about potential future threats enabled by more advanced AI, such as highly autonomous attack agents, AI-generated malware that evades detection, sophisticated deepfakes for social engineering, or large-scale exploitation of interconnected AI systems. Anticipating these helps shape current testing priorities.\n\nLearn more from the following resources:", - "links": [ - { - "title": "AI Security Risks Uncovered: What You Must Know in 2025", - "url": "https://ttms.com/uk/ai-security-risks-explained-what-you-need-to-know-in-2025/", - "type": "article" - }, - { - "title": "Why Artificial Intelligence is the Future of Cybersecurity", - "url": "https://www.darktrace.com/blog/why-artificial-intelligence-is-the-future-of-cybersecurity", - "type": "article" - }, - { - "title": "AI Index 2024", - "url": "https://aiindex.stanford.edu/report/", - "type": "article" - } - ] - }, - "soC-kcem1ISbnCQMa6BIB": { - "title": "Advanced Techniques", - "description": "The practice of AI Red Teaming itself will evolve. Future techniques may involve using AI adversaries to automatically discover complex vulnerabilities, developing more sophisticated methods for testing AI alignment and safety properties, simulating multi-agent system failures, and creating novel metrics for evaluating AI robustness against unknown future attacks.\n\nLearn more from the following resources:", - "links": [ - { - "title": "AI red-teaming in critical infrastructure: Boosting security and trust in AI systems", - "url": "https://www.dnv.com/article/ai-red-teaming-for-critical-infrastructure-industries/", - "type": "article" - }, - { - "title": "Advanced Techniques in AI Red Teaming for LLMs", - "url": "https://neuraltrust.ai/blog/advanced-techniques-in-ai-red-teaming", - "type": "article" - }, - { - "title": "Diverse and Effective Red Teaming with Auto-generated Rewards and Multi-step Reinforcement Learning", - "url": "https://arxiv.org/html/2412.18693v1", - "type": "article" - } - ] - }, - "VmaIHVsCpq2um_0cA33V3": { - "title": "Research Opportunities", - "description": "AI Red Teaming relies on ongoing research. Key areas needing further investigation include scalable methods for finding elusive vulnerabilities, understanding emergent behaviors in complex models, developing provable safety guarantees, creating better benchmarks for AI security, and exploring the socio-technical aspects of AI misuse and defense.\n\nLearn more from the following resources:", - "links": [ - { - "title": "Cutting-Edge Research on AI Security bolstered with new Challenge Fund", - "url": "https://www.gov.uk/government/news/cutting-edge-research-on-ai-security-bolstered-with-new-challenge-fund-to-ramp-up-public-trust-and-adoption", - "type": "article" - }, - { - "title": "Careers | The AI Security Institute (AISI)", - "url": "https://www.aisi.gov.uk/careers", - "type": "article" - }, - { - "title": "Research - Anthropic", - "url": "https://www.anthropic.com/research", - "type": "article" - } - ] - }, - "WePO66_4-gNcSdE00WKmw": { - "title": "Industry Standards", - "description": "As AI matures, AI Red Teamers will increasingly need to understand and test against emerging industry standards and regulations for AI safety, security, and risk management, such as the NIST AI RMF, ISO/IEC 42001, and sector-specific guidelines, ensuring AI systems meet compliance requirements.\n\nLearn more from the following resources:", - "links": [ - { - "title": "ISO 42001: The New Compliance Standard for AI Management Systems", - "url": "https://www.brightdefense.com/resources/iso-42001-compliance/", - "type": "article" - }, - { - "title": "ISO 42001: What it is & why it matters for AI management", - "url": "https://www.itgovernance.co.uk/iso-42001", - "type": "article" - }, - { - "title": "NIST AI Risk Management Framework (AI RMF)", - "url": "https://www.nist.gov/itl/ai-risk-management-framework", - "type": "article" - }, - { - "title": "ISO/IEC 42001: Information technology — Artificial intelligence — Management system", - "url": "https://www.iso.org/standard/81230.html", - "type": "article" - } - ] - } -} \ No newline at end of file diff --git a/public/roadmap-content/android.json b/public/roadmap-content/android.json deleted file mode 100644 index c31837a71..000000000 --- a/public/roadmap-content/android.json +++ /dev/null @@ -1,1461 +0,0 @@ -{ - "Suws-7f_6Z1ChpfcnxX2M": { - "title": "Pick a Language", - "description": "When developing for Android, one crucial step is picking a programming language to use. There are multiple languages you can choose from, but the three most popular ones are Java, Kotlin, and C++.\n\nJava is the original language used for Android development and is widely used, making it a good choice for beginners due to the wealth of resources and developer communities. Kotlin is a newer option that is fully supported by Google and Android Studio, and addressing many of the drawbacks of Java which makes it a popular choice for many developers.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Pick a Language", - "url": "https://developer.android.com/studio/write/java8-support", - "type": "article" - }, - { - "title": "Kotlin vs Java", - "url": "https://developer.android.com/kotlin", - "type": "article" - } - ] - }, - "qIzUv8-GgQnkqChEdgD50": { - "title": "Kotlin", - "description": "`Kotlin` is a cross-platform, statically typed general-purpose programming language with type inference. Developed by JetBrains, the makers of the world’s leading IDEs, Kotlin has a syntax, which is more expressive and concise. This allows for more readable and maintainable code. It is fully interoperable with Java and comes with no limitations. It can be used almost everywhere Java is used today, for server-side development, Android apps, and much more. Kotlin introduces several improvements for programmers over Java, which makes it a preferred choice for many developers. With more concise code base and modern programming concept support - it's certainly a future of Android app development.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Kotlin", - "url": "https://kotlinlang.org/", - "type": "article" - }, - { - "title": "Kotlin Documentation", - "url": "https://kotlinlang.org/docs/home.html", - "type": "article" - }, - { - "title": "Learn Kotlin Programming for Beginners - Free Code Camp", - "url": "https://youtu.be/EExSSotojVI?si=4VPW8ZHa2UMX0HH1", - "type": "video" - } - ] - }, - "RBABbkzD_uNFwEO-hssZO": { - "title": "Java", - "description": "Java is a popular programming language used for Android development due to its robustness and ease of use. Its object-oriented structure allows developers to create modular programs and reusable code. The language was built with the philosophy of \"write once, run anywhere\" (WORA), meaning compiled Java code can run on all platforms without the need for recompilation. Android’s API and core libraries are primarily written in Java, therefore understanding Java is fundamental in creating diverse and powerful Android apps.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Java Roadmap", - "url": "https://roadmap.sh/java", - "type": "article" - }, - { - "title": "Java", - "url": "https://www.oracle.com/java/technologies/javase-jdk11-downloads.html", - "type": "article" - }, - { - "title": "Java Documentation", - "url": "https://docs.oracle.com/en/java/javase/11/docs/api/", - "type": "article" - } - ] - }, - "HlUUGj3dOZ68t4gIjerXh": { - "title": "The Fundamentals", - "description": "\"The Fundamentals\" of Android primarily concentrate on 5 components; Activities, Services, Broadcast Receivers, Content Providers, and Intents. **Activities** are essentially what you see on your screen; each screen in an app is a separate activity. **Services** run in the background to perform long-running operations or to perform work for remote processes. They do not provide a user interface. **Broadcast Receivers** respond to broadcast messages from other applications or from the system itself. These messages are often in the form of Intents. **Content Providers** manage a shared set of app data that other apps can query or modify, through a structured interface. Finally, **Intents** are messaging objects which facilitate the communication between the aforementioned components. Understanding these five core concepts is key to mastering Android fundamentals.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "The Fundamentals", - "url": "https://developer.android.com/guide/components/fundamentals", - "type": "article" - } - ] - }, - "ZRGsokU313Ky-anWbWK6q": { - "title": "Development IDE", - "description": "Development IDE refers to Development Integrated Development Environment that is vital for Android App development. For Android, the primary IDE is **Android Studio**. This official IDE from Google includes everything you need to build an Android app, such as a code editor, code analysis tools, emulators for all of Android's supported OS versions and hardware configurations, and more. Other popular IDEs include **Eclipse** (with an Android Developer Tools plugin), **NetBeans**, and **IntelliJ IDEA**. Each of these IDEs tends to have its own set of specialized features, but all are designed to provide the tools and services needed for Android development. The choice of IDE often depends on the specific needs and preferences of the developer or development team.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Android Studio", - "url": "https://developer.android.com/studio", - "type": "article" - }, - { - "title": "Android Studio Documentation", - "url": "https://developer.android.com/studio/intro", - "type": "article" - }, - { - "title": "Android Studio Plugins", - "url": "https://developer.android.com/studio/intro/studio-plugins", - "type": "article" - } - ] - }, - "jl1FsQ5-WGKeFyaILNt_p": { - "title": "Basics of Kotlin", - "description": "Kotlin is a concise, multi-platform, and fun language developed by JetBrains. Learn how to use Kotlin for Android, server-side, and cross-platform development, and explore its features, news, and community. Kotlin is a statically-typed programming language that runs on the Java Virtual Machine (JVM) and can be used to develop all types of Android apps. It is Google's preferred language for Android app development.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Kotlin", - "url": "https://kotlinlang.org/", - "type": "article" - }, - { - "title": "Kotlin Docs", - "url": "https://kotlinlang.org/docs/getting-started.html", - "type": "article" - } - ] - }, - "j69erqfosSZMDlmKcnnn0": { - "title": "Basics of OOP", - "description": "Understanding the `Basics of Object-Oriented Programming (OOP)` is crucial. OOP is a programming paradigm that uses \"Objects\" - entities that contain both data and functions that manipulate the data.\n\nKey concepts include `Classes`, which are blueprints from which objects are created; `Objects`, instances of a class; `Inheritance`, where one class acquires properties from another; `Polymorphism`, the ability of an object to take many forms; `Abstraction`, showing only necessary details and hiding implementation from the user; and `Encapsulation`, the concept of wrapping data and the methods that work on data within one unit.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Object-Oriented Programming - Wikipedia", - "url": "https://en.wikipedia.org/wiki/Object-oriented_programming", - "type": "article" - }, - { - "title": "OOP in Kotlin", - "url": "https://developer.android.com/codelabs/basic-android-kotlin-compose-classes-and-objects", - "type": "article" - } - ] - }, - "cNeT1dJDfgn0ndPzSxhSL": { - "title": "Data Structures and Algorithms", - "description": "**Data Structures** are primarily used to collect, organize and perform operations on the stored data more effectively. They are essential for designing advanced-level Android applications. Examples include Array, Linked List, Stack, Queue, Hash Map, and Tree.\n\n**Algorithms** are a sequence of instructions or rules for performing a particular task. In Android, algorithms can be used for data searching, sorting, or performing complex business logic. Some commonly used algorithms are Binary Search, Bubble Sort, Selection Sort, etc. A deep understanding of data structures and algorithms is crucial in optimizing the performance and the memory consumption of the Android applications.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Data Structures and Algorithms in Android", - "url": "https://medium.com/@pranamsharma.1997/importance-of-data-structure-and-algorithms-in-android-app-development-75eed9f73909", - "type": "article" - } - ] - }, - "FVxNjbDBxgf6vkZWw1Awt": { - "title": "What is and how to use Gradle?", - "description": "Gradle is a powerful build system used in Android development that allows you to define your project and dependencies, and distinguish between different build types and flavors. Gradle uses a domain-specific language (DSL) which gives developers almost complete control over the build process. When you trigger a build in Android Studio, Gradle is the tool working behind the scenes to compile and package your app. It looks at the dependencies you declared in your build.gradle files and create a build script accordingly. Using Gradle in Android development requires continuous editing of the build.gradle files to manage app dependencies, build variants, signing configurations, and other essential aspects related to building your app.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Gradle Build Tool", - "url": "https://gradle.org/", - "type": "article" - }, - { - "title": "Gradle Documentation", - "url": "https://docs.gradle.org/current/userguide/getting_started_eng.html", - "type": "article" - }, - { - "title": "Explore top posts about Gradle", - "url": "https://app.daily.dev/tags/gradle?ref=roadmapsh", - "type": "article" - }, - { - "title": "Gradle Course for Beginners - 55minutes", - "url": "https://www.youtube.com/watch?v=R6Z-Sxb837I", - "type": "video" - }, - { - "title": "Introduction to Gradle for Complete Beginners - 25minutes", - "url": "https://youtu.be/-dtcEMLNmn0?si=NuIP-3wNpUrxfTxA", - "type": "video" - } - ] - }, - "5m_7DvInF8C_4Ml1xVI6L": { - "title": "Create a Basic Hello World App", - "description": "The \"Hello World\" app is a simple project that you can build when you're getting started with Android development. It's often the first program that beginners learn to build in a new system. It's usually considered the simplest form of program that displays a message to the user - \"Hello, World!\" In Android, this involves creating a new project from the Android Studio and setting up the main activity. The main activity file is primarily written in Java or Kotlin where you can code for the display message, while the layout design view can be created in the XML file.\n\nVisit the follow resources to learn more:", - "links": [ - { - "title": "Create Your First Android Application", - "url": "https://developer.android.com/codelabs/basic-android-kotlin-compose-first-app", - "type": "article" - } - ] - }, - "5s1CqsYCOXjNroDHaGKGa": { - "title": "Version Control", - "description": "_Version Control_ is a system that records changes to a file or set of files over time so that you can recall specific versions later. An essential tool for software development, it helps to track changes, enhance collaboration, and manage different versions of a project. Two common types of version control systems are Centralized Version Control System (CVCS) and Distributed Version Control System (DVCS). CVCS uses a central server to store all versions of a project, with users getting snapshots from that server. Examples include SVN and Perforce. On the other hand, DVCS allows multiple developers to work on a single project simultaneously. Each user has a complete backup of all versions of the work. Examples include Git and Mercurial.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Version Control Systems", - "url": "https://en.wikipedia.org/wiki/Version_control", - "type": "article" - } - ] - }, - "rqSZ2ATeHbOdIQE9Jlb0B": { - "title": "Git", - "description": "`Git` is a highly efficient and flexible distributed version control system that was created by **Linus Torvalds**, the creator of Linux. It allows multiple developers to work on a project concurrently, providing tools for non-linear development and tracking changes in any set of files. Git has a local repository with a complete history and version-tracking capabilities, allowing offline operations, unlike SVN. It ensures data integrity and provides strong support for non-linear development with features such as branching and merging.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Git and GitHub Roadmap", - "url": "https://roadmap.sh/git-github", - "type": "article" - }, - { - "title": "Git", - "url": "https://git-scm.com/", - "type": "article" - }, - { - "title": "Git Documentation", - "url": "https://git-scm.com/docs", - "type": "article" - } - ] - }, - "H-2eb8fLwz8IKYXbeSVKK": { - "title": "GitHub", - "description": "**GitHub** is a cloud-based hosting service for managing software version control using Git. It provides a platform for enabling multiple developers to work together on the same project at the same time. With GitHub, codes can be stored publicly, allowing for collaboration with other developers or privately for individual projects. Key features of GitHub include code sharing, task management, and version control, among others. GitHub also offers functionalities such as bug tracking, feature requests, and task management for the project.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Git and GitHub Roadmap", - "url": "https://roadmap.sh/git-github", - "type": "article" - }, - { - "title": "GitHub", - "url": "https://github.com/", - "type": "article" - }, - { - "title": "GitHub Documentation", - "url": "https://docs.github.com/", - "type": "article" - } - ] - }, - "5LFZdUiFYYU_1sYsouyan": { - "title": "Bitbucket", - "description": "Bitbucket is a web-based hosting service that is owned by Atlassian. Bitbucket uses either Mercurial or Git revision control systems, allowing users to manage and maintain their code. This platform is mainly used for code and code review. Bitbucket provides both commercial plans and free accounts. It offers free accounts with an unlimited number of private repositories (which can have up to five users in the case of free accounts) as of September 2010. It originally offered only Mercurial support. Bitbucket integrates with other Atlassian software like JIRA, HipChat, Confluence and Bamboo.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Bitbucket", - "url": "https://bitbucket.org/", - "type": "article" - }, - { - "title": "Bitbucket for Developers", - "url": "https://www.atlassian.com/software/bitbucket", - "type": "article" - } - ] - }, - "Q47BtQphp59NkkZoeNXmP": { - "title": "GitLab", - "description": "`GitLab` is a web-based DevOps lifecycle tool which provides a Git-repository manager, along with continuous integration and deployment pipeline features, using an open-source license, developed by GitLab Inc. Users can manage and create their software projects and repositories, and collaborate on these projects with other members. `GitLab` also allows users to view analytics and open issues of their project. It stands next to other version control tools like `GitHub` and `Bitbucket`, but comes with its own set of additional features and nuances. For Android development, `GitLab` can be particularly useful owing to its continuous integration and deployment system which can automate large parts of the app testing and deployment.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "GitLab", - "url": "https://about.gitlab.com/", - "type": "article" - }, - { - "title": "GitLab Documentation", - "url": "https://docs.gitlab.com/", - "type": "article" - } - ] - }, - "5Li8J5iR_ZuyIlxX0LYei": { - "title": "App Components", - "description": "Android apps are primarily made up of five different types of components:\n\n1. **Activities**: These are individual screens that a user can interact with. Any UI action like touching a button or swiping a screen will usually take place within an activity.\n \n2. **Services**: Unlike activities, services run in the background and don't have a user interface. They’re used for repetitive or long running operations, like playing music or pulling in a feed of data from a server.\n \n3. **Broadcast Receivers**: These are event listeners. The Android operating system uses them to respond to system-wide events.\n \n4. **Content Providers**: They manage and share app data with other apps installed on the device. For security, data is not generally shared across apps.\n \n5. **Intents**: These serve as messages or commands to the Android system. They're used to signal to the Android system that certain events have occurred.\n \n\nVisit the following resources to learn more:", - "links": [ - { - "title": "App Fundamentals", - "url": "https://developer.android.com/guide/components/fundamentals", - "type": "article" - }, - { - "title": "Android App Components", - "url": "https://medium.com/android-hunger/android-app-components-activities-fragments-and-intents-ed7373455555", - "type": "article" - } - ] - }, - "nwuVlPmzwJ17mtVQ8Hi9w": { - "title": "Activity", - "description": "`Activity` in Android is a crucial component that represents a single screen with a user interface. It is just like a window in a desktop application. Android apps are typically made up of one or more activities, each having its interface which allows user interaction. When an app is launched, an instance of `Activity` is created, starting the lifecycle of that app. Every activity has its own lifecycle (create, start, resume, pause, stop, destroy) that keeps the state of a user's progress, and Android manages these states automatically. Activities can also have `Intent`, which allows them to interact with other components, such as starting another activity or getting a result from that activity.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Android Activity", - "url": "https://developer.android.com/reference/android/app/Activity", - "type": "article" - }, - { - "title": "Mastering Android Activities", - "url": "https://medium.com/deuk/mastering-the-android-activity-lifecycle-best-practices-1d10f15d060a", - "type": "article" - } - ] - }, - "PcHmU1c9hqKyzSjwlRPHk": { - "title": "Services", - "description": "**Services**: A service in Android is an app component that performs operations in the background without a user interface. It can be started by an application component, like an activity, and it will continue to run in the background even if the user switches to another application. There are two types of services in Android, namely, `Started Service` and `Bound Service`. A `Started Service` is used to perform a single operation, such as downloading a large file. On the other hand, a `Bound Service` offers a client-server interface that allows components to interact with the service, send requests, receive results, and even perform interprocess communication (IPC).\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Services", - "url": "https://developer.android.com/guide/components/services", - "type": "article" - } - ] - }, - "nkcdjrswv0WCzUs48BAt9": { - "title": "Content Provider", - "description": "A **Content Provider** in Android is a key component that allows applications to securely share data with other applications. They act as a layer between databases and applications to enhance data security. Content providers manage access to a structured set of data by handling data transactions, implementing data security, and maintaining isolation between applications. They provide an abstracted interface which is used to access data, while the underlying storage method (Like SQLite database, web, or any other method) remains hidden. This mechanism aids in retrieving data from a non-relational source in a structured way. They're used primarily when data needs to be shared between multiple applications, not just within a single application.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Content Provider", - "url": "https://developer.android.com/guide/topics/providers/content-providers", - "type": "article" - } - ] - }, - "tFuAToid1Fkmu96BDtW7K": { - "title": "Broadcast Receiver", - "description": "**Broadcast Receivers** in Android are components that respond to system-wide broadcast announcements. They can be registered to respond to a specific type of broadcasts or implement a user-defined broadcast. While you can initiate a broadcast from your app, they are generally used for receiving system notifications or communicating with other applications. However, keep in mind that they cannot display a user interface, but they can start activities if necessary, which do have a user interface. A `BroadcastReceiver` class must override the `onReceive()` method where each message is received as an `Intent` object parameter.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Broadcast Receiver", - "url": "https://developer.android.com/reference/android/content/BroadcastReceiver", - "type": "article" - } - ] - }, - "hv_9imIQpthxEaMLXEUHI": { - "title": "Intent", - "description": "Intent in Android is a software mechanism used for late runtime binding between components, such as activities, content providers, and services. It is essentially a passive data structure holding an abstract description of an operation that the Android system is requested to perform.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Intent", - "url": "https://developer.android.com/reference/android/content/Intent", - "type": "article" - } - ] - }, - "FVg438cVBBzqJFkGWVbQM": { - "title": "Activity LifeCycle", - "description": "The **Activity Lifecycle** in Android represents a series of states or events that an activity can go through from its creation to its destruction. The primary states or events are `onCreate()`, `onStart()`, `onResume()`, `onPause()`, `onStop()`, `onDestroy()`, and `onRestart()`. The method `onCreate()` is called when the activity is first created, followed by `onStart()` when the activity becomes visible to the user. The `onResume()` method executes when the user starts interacting with the application. `onPause()` and `onStop()` methods are invoked when the application is no longer in the foreground or visible to the user. The `onDestroy()` method is used when the activity is being completely removed from the memory. The `onRestart()` method is called after the system stops the activity and is about to start it again. The proper handling of these states ensures the efficient use of resources and a smooth user experience.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Activity Lifecycle", - "url": "https://developer.android.com/guide/components/activities/activity-lifecycle", - "type": "article" - } - ] - }, - "oUjetA2eduvQIeLcQlLcu": { - "title": "State Changes", - "description": "\"Activity\" is a crucial component that represents a single screen with a user interface. One or more active activities make up an Application. These activities can go through different states in their lifecycle, often due to user interaction or system interruption. The primary states of an Activity include `Created`, `Started`, `Resumed`, `Paused`, `Stopped`, `Restarted`, and `Destroyed`. The \"Created\" state occurs when an activity instance is being created. The \"Started\" state is when the activity is visible to the user, while \"Resumed\" is when the activity is interacting with the user. An activity is \"Paused\" when it loses focus but is partly visible, \"Stopped\" when it's not visible, \"Restarted\" when the activity is about to be started, and \"Destroyed\" when the activity is finished or the system is temporarily destroying it.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Activity Lifecycle", - "url": "https://developer.android.com/guide/components/activities/activity-lifecycle", - "type": "article" - } - ] - }, - "-O-G9bg36ut8NnZcdOaop": { - "title": "Tasks & Backstack", - "description": "The **tasks backstack** in Android refers to the way Android manages and arranges tasks in a stack-like structure. Every task has a stack of activities, which is referred to as the task's back stack. The activities are placed in the order they are opened. When a new activity is started, it is placed at the top of the stack and becomes the running activity, while the previous activity is paused and put into the back stack. When you press the back button, the current activity is destroyed and the activity at the top of the back stack becomes active again. Android defines how to navigate between tasks and activities using this back stack concept.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Tasks and Backstack", - "url": "https://developer.android.com/guide/components/activities/tasks-and-back-stack", - "type": "article" - } - ] - }, - "gGdz3j33x0gfrFDp_rw8Z": { - "title": "Implicit Intents", - "description": "**Implicit Intents** do not specify the target component explicitly like Explicit Intents. Instead, they allow the system to find a suitable component matching the Intent description to handle the request. The system will find an activity that can handle this intent by comparing the `` section in the `AndroidManifest.xml` of all apps installed on the device against the Implicit Intent.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Implicit Intents", - "url": "https://developer.android.com/guide/components/intents-filters#implicit", - "type": "article" - } - ] - }, - "TmIeCF3xVCe5Sy3ITmM31": { - "title": "Explicit Intents", - "description": "**Explicit Intents** are primarily used within an application's own boundaries. In explicit intents you specify the component that needs to be responded to the intent. Therefore, the target component must be specified by calling methods such as `setComponent(ComponentName)`, `setClass(Context, Class)`, or `setClassName(String, String)`. This means that explicit intents are typically used for launching activities, broadcasting messages, starting services within the app. Explicit intents are not resolved by the system but are passed to the component identified in the intent.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Explicit Intents", - "url": "https://developer.android.com/guide/components/intents-filters#explicit", - "type": "article" - } - ] - }, - "b-sfh6NoS-APqaNKm5L5S": { - "title": "Intent Filters", - "description": "`Intent Filters` in Android are essential components of the Android system where you can declare the capabilities of your activities, services, and broadcast receivers. An intent filter is an expression found in your app's manifest file, defined in the `` XML element. Android uses these filters to determine the appropriate components for incoming intents, which can be either explicit or implicit. Your app's ability to respond to intents depends on the filters you define. The filters are set of conditions comprised of `action`, `category`, and `data` which your activity or service is able to perform. If the incoming `Intent` matches with defined `Intent Filters`, Android system will permit that `Intent` to your Component (Activity, Service, or Broadcast Receiver).\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Intent Filters", - "url": "https://developer.android.com/guide/components/intents-filters", - "type": "article" - } - ] - }, - "jePGzTejFe4ryA5qFFmjl": { - "title": "Design & Architecture", - "description": "**Design Architecture** refers to structuring the code in a way that increases its readability, maintainability, and testability. There are several ways to design architecture like Model-View-Controller (MVC), Model-View-Intent (MVI). Each of these define the interaction between the data, the logic, and the UI layers. Google’s recommended architectural pattern is Android Architecture Components which follow the principles of the MVVM pattern. Bear in mind that an architecture is not a rigid structure that fits all solutions. Rather, it serves as a guideline and can be tweaked as necessary.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Android Architecture Components", - "url": "https://developer.android.com/topic/libraries/architecture", - "type": "article" - }, - { - "title": "Model-View-Controller (MVC)", - "url": "https://en.wikipedia.org/wiki/Model%E2%80%93view%E2%80%93controller", - "type": "article" - }, - { - "title": "Model-View-Presenter (MVP)", - "url": "https://en.wikipedia.org/wiki/Model%E2%80%93view%E2%80%93presenter", - "type": "article" - }, - { - "title": "Model-View-ViewModel (MVVM)", - "url": "https://en.wikipedia.org/wiki/Model%E2%80%93view%E2%80%93viewmodel", - "type": "article" - }, - { - "title": "Model-View-Intent (MVI)", - "url": "https://www.raywenderlich.com/817602-mvi-architecture-for-android-tutorial-getting-started", - "type": "article" - } - ] - }, - "Dp2DOX10u2xJUjB8Okhzh": { - "title": "Frame", - "description": "**FrameLayout** is a simple ViewGroup subclass in Android that is designed to hold a single child view or a stack of overlapping child views. It positions each child in the top-left corner by default and allows them to overlap on top of each other, which makes it useful for situations where you need to layer views on top of one another.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Android developers: Frame Layout", - "url": "https://developer.android.com/reference/android/widget/FrameLayout", - "type": "article" - } - ] - }, - "U8iMGGOd2EgPxSuwSG39Z": { - "title": "Linear", - "description": "**LinearLayout** is a view group that aligns all children in a single direction, vertically or horizontally. You can specify the layout direction with the `android:orientation` attribute.\n\n**LinearLayout** was commonly used in earlier Android development, but with the introduction of ConstraintLayout, it’s less frequently used in modern apps.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Linear Layout", - "url": "https://developer.android.com/develop/ui/views/layout/linear", - "type": "article" - } - ] - }, - "yE0qAQZiEC9R8WvCdskpr": { - "title": "Relative", - "description": "A **RelativeLayout** in Android is a type of ViewGroup that allows you to position child views relative to each other or relative to the parent layout. It's a flexible layout where you can arrange the child views in relation to one another based on certain rules, making it suitable for creating complex UI designs.\n\n**RelativeLayout** was commonly used in earlier Android development, but with the introduction of `ConstraintLayout`, it's less frequently used in modern apps.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Relative Layout", - "url": "https://developer.android.com/develop/ui/views/layout/relative", - "type": "article" - } - ] - }, - "3fFNMhQIuuh-NRzSXYpXO": { - "title": "Constraint", - "description": "Lets you create large, complex layouts with a flat view hierarchy—no nested view groups. It's similar to `RelativeLayout` in that all views are laid out according to relationships between sibling views and the parent layout, but it's more flexible than RelativeLayout and easier to use. Its available on xml and jetpack compose.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Android: ConstraintLayout in XML", - "url": "https://developer.android.com/develop/ui/views/layout/constraint-layout", - "type": "article" - }, - { - "title": "Android: ConstraintLayout in Compose", - "url": "https://developer.android.com/develop/ui/compose/layouts/constraintlayout", - "type": "article" - } - ] - }, - "recycleview@xIvplWfe-uDr9iHjPT1Mx.md": { - "title": "RecycleView", - "description": "", - "links": [] - }, - "znvZp24L-PcQwkSObtixs": { - "title": "TextView", - "description": "TextView is a widget that is seen in every android application. This widget is used to display simple text within the android application.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "TextView", - "url": "https://developer.android.com/reference/android/widget/TextView", - "type": "article" - } - ] - }, - "2iDJrxjXOt7o2fPp2HfRl": { - "title": "Fragments", - "description": "A `Fragment` represents a reusable portion of your app's UI. A fragment defines and manages its own layout, has its own lifecycle, and can handle its own input events. Fragments can't live on their own. They must be hosted by an activity or another fragment. The fragment’s view hierarchy becomes part of, or attaches to, the host’s view hierarchy.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Android Developers: Fragments", - "url": "https://developer.android.com/guide/fragments", - "type": "article" - } - ] - }, - "boMz0HZlMAsLdCZlpUo-H": { - "title": "EditText", - "description": "`EditText` is a fundamental UI element in Android Studio, used for allowing users to input and edit text within an application. It is a subclass of `TextView` that provides additional features to handle user input.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Android developers: EditText", - "url": "https://developer.android.com/reference/android/widget/EditText", - "type": "article" - } - ] - }, - "Mtx0bY0drmaTw8sCM5YTl": { - "title": "Dialogs", - "description": "A `DialogFragment` is a special fragment subclass that is designed for creating and hosting dialogs. Although you don't need to host your **dialog** within a fragment, doing so lets the `FragmentManager` manage the state of the dialog and automatically restore the dialog when a configuration change occurs. Learn more from the following resources:", - "links": [ - { - "title": "Android Developers: Dialogs", - "url": "https://developer.android.com/guide/fragments/dialogs", - "type": "article" - } - ] - }, - "WhfzFOUpm0DFEj7Oeq21R": { - "title": "Buttons", - "description": "A `button` consists of text or an icon, or both, that communicates what action occurs when the user taps it.\n\nButtons are UI widgets that allow users to interact with an application by tapping on them. A button typically consists of text, an icon, or a combination of both, and communicates what action will occur when the user taps it.\n\nLearn more from the following resources:", - "links": [ - { - "title": "Android Developers: Button", - "url": "https://developer.android.com/develop/ui/views/components/button", - "type": "article" - }, - { - "title": "Create a Button - Android", - "url": "https://developer.android.com/quick-guides/content/create-button", - "type": "article" - } - ] - }, - "BVgO9n7tGlVdiS72-hFSd": { - "title": "Toast", - "description": "A `toast` provides simple feedback about an operation in a small popup. It only fills the amount of space required for the message and the current activity remains visible and interactive. Toasts automatically disappear after a timeout.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Toast", - "url": "https://developer.android.com/guide/topics/ui/notifiers/toasts", - "type": "article" - } - ] - }, - "A4rtNULX_MoV93IH1Lgqw": { - "title": "ImageView", - "description": "Displays image resources, for example Bitmap or Drawable resources. ImageView is also commonly used to apply tints to an image and handle image scaling.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Android Developers: ImageView", - "url": "https://developer.android.com/reference/android/widget/ImageView", - "type": "article" - } - ] - }, - "Z4Tbd5ClnqCXGPGG09F-G": { - "title": "Bottom Sheet", - "description": "`Bottom sheets` are surfaces containing supplementary content that are anchored to the bottom of the screen.\n\nThere are several attributes that can be used to adjust the behavior of both standard and modal bottom sheets.\n\nBehavior attributes can be applied to standard bottom sheets in xml by setting them on a child View set to `app:layout_behavior` or programmatically.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Android developers: Bottom Sheets", - "url": "https://developer.android.com/reference/com/google/android/material/bottomsheet/BottomSheetDialog", - "type": "article" - } - ] - }, - "EzLjX4iRT7AxkAOsJYnSU": { - "title": "ListView", - "description": "Displays a vertically-scrollable collection of views, where each view is positioned immediately below the previous view in the list.\n\nFor a more modern, flexible, and performant approach to displaying lists, use `RecyclerView`.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "ListView", - "url": "https://developer.android.com/reference/android/widget/ListView", - "type": "article" - } - ] - }, - "amTxz7mS98lkhOrNMJXG_": { - "title": "Drawer", - "description": "The **Navigation Drawer** in Android is a sliding menu from the left that simplifies navigation between important app links. It opens by sliding or via an icon in the `ActionBar`. It’s an overlay panel that replaces a screen dedicated to displaying options.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Android developers: DrawerLayout", - "url": "https://developer.android.com/reference/androidx/drawerlayout/widget/DrawerLayout", - "type": "article" - }, - { - "title": "Navigate Drawer Tutorial", - "url": "https://www.digitalocean.com/community/tutorials/android-navigation-drawer-example-tutorial", - "type": "article" - } - ] - }, - "pEBpXv3Jf1AzBNHlvVrG8": { - "title": "Tabs", - "description": "Tabs in Android Studio are a UI component used to organize content into multiple sections, allowing users to navigate between them by selecting the corresponding tab. This component is commonly used when there is a need to present different types of content in a single screen, like different categories, settings, or pages within an app.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Material Tabs", - "url": "https://developer.android.com/reference/com/google/android/material/tabs/package-summary", - "type": "article" - } - ] - }, - "Xn1VQ-xOT67ZfJJTM4r1p": { - "title": "Animations", - "description": "`Animations` can add visual cues that notify users about what's going on in your app. They are especially useful when the UI changes state, such as when new content loads or new actions become available. Animations also add a polished look to your app, which gives it a higher quality look and feel.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Google developers: Animations", - "url": "https://developer.android.com/develop/ui/views/animations/overview", - "type": "article" - }, - { - "title": "Google developers: Animations", - "url": "https://www.youtube.com/watch?v=N_x7SV3I3P0", - "type": "video" - } - ] - }, - "60Vm-77rseUqpMiFvp-dA": { - "title": "Jetpack Compose", - "description": "`Jetpack Compose` is a modern toolkit for building native Android UI. It simplifies and accelerates UI development on Android with less code, powerful tools, and intuitive Kotlin APIs. `Jetpack Compose` offers a declarative approach to designing UI, where you can simply describe what your UI should look like at any given point of your app’s state, and `Compose` takes care of updating the view hierarchy, making UI development more efficient. It also integrates well with existing Android apps, letting you adopt its benefits at your own pace.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Jetpack Compose", - "url": "https://developer.android.com/jetpack/compose", - "type": "article" - }, - { - "title": "Jetpack Compose Crash Course", - "url": "https://www.youtube.com/watch?v=6_wK_Ud8--0", - "type": "video" - } - ] - }, - "xV475jHTlLuHtpHZeXb7P": { - "title": "App Shortcuts", - "description": "App shortcuts in Android are designed to provide quick and convenient routes to specific actions or functions within your app from the device home screen. To use them, long-press an app's icon and a pop-up menu will appear with the available shortcuts. Depending on the app, you might be able to send a message, make a booking, navigate home, or perform some other specific task without having to first open the app and navigate to the desired function. These shortcuts can also be moved and placed individually on the home screen for even faster access.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "App Shortcuts", - "url": "https://developer.android.com/guide/topics/ui/shortcuts", - "type": "article" - } - ] - }, - "o5rzmnaQeiSh9ocvfJPpK": { - "title": "Navigation Components", - "description": "The **Navigation Components** are part of Android Jetpack and are designed to simplify the implementation of navigation in your Android app. These components help you follow best practices, handle deep linking, and provide a consistent user experience across deep and conditional navigation. They also automate many common tasks, such as handling Up and Back actions correctly across many different types of devices. The Navigation component consists of three key parts which are Navigation graph, NavHost, and NavController.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Navigation Components", - "url": "https://developer.android.com/guide/navigation", - "type": "article" - } - ] - }, - "Bz-BkfzsDHAbAw3HD7WCd": { - "title": "MVI", - "description": "The **MVI** `Model-View-Intent` pattern is a reactive architectural pattern, similar to **MVVM** and **MVP**, focusing on immutability and handling states in unidirectional cycles. The data flow is unidirectional: Intents update the Model's state through the `ViewModel`, and then the View reacts to the new state. This ensures a clear and predictable cycle between logic and the interface.\n\n* Model: Represents the UI state. It is immutable and contains all the necessary information to represent a screen.\n* View: Displays the UI state and receives the user's intentions.\n* Intent: The user's intentions trigger state updates, managed by the `ViewModel`.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "MVI with Kotlin", - "url": "https://proandroiddev.com/mvi-architecture-with-kotlin-flows-and-channels-d36820b2028d", - "type": "article" - } - ] - }, - "pSU-NZtjBh-u0WKTYfjk_": { - "title": "MVVM", - "description": "The `Model-View-ViewModel` (MVVM) pattern is a software architectural pattern commonly used in UI development. It is designed to separate the concerns of an application, making the code more modular, testable, and maintainable.\n\nComponents:\n\n* `Model`: Refers either to a domain model, which represents real state content (an object-oriented approach), or to the data access layer, which represents content.\n* `View`: The view is the structure, layout, and appearance of what a user sees on the screen.\n* `View model`: The view model is an abstraction of the view exposing public properties and commands. The view model has been described as a state of the data in the model.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Android Developers: View Model", - "url": "https://developer.android.com/topic/libraries/architecture/viewmodel", - "type": "article" - }, - { - "title": "Wikipedia", - "url": "https://en.wikipedia.org/wiki/Model%E2%80%93view%E2%80%93viewmodel", - "type": "article" - } - ] - }, - "aF_xFIqTjQbENtC7pkXvJ": { - "title": "MVP", - "description": "The MVP `Model View Presenter` pattern is a derivative of the well-known MVC `Model View Controller` pattern and is one of the most popular patterns for organizing the presentation layer in Android applications.\n\nMVP is divided into three components:\n\n* `Model`: Responsible for managing the data input to the app. This can often be an Interactor or UseCase, handling the business logic and data operations.\n* `View`: Takes care of updating the graphical part of the application. It acts as a passive view, only receiving data and requesting actions to be performed.\n* `Presenter`: Handles all the logic related to the graphical interface that the View requests. It provides the View with the data it needs to display on the screen.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Model View Presenter", - "url": "https://en.wikipedia.org/wiki/Model%E2%80%93view%E2%80%93presenter", - "type": "article" - } - ] - }, - "w1A6wPKSd3Yh2luuHV-aE": { - "title": "MVC", - "description": "MVC or `Model View Controller` is a software design pattern commonly used for developing user interfaces that divides the related program logic into three interconnected components.\n\nComponents:\n-----------\n\n* `Model`: The internal representations of information. This can often be an Interactor or UseCase\n* `View`: The interface that presents information to and accepts it from the user\n* `Controller`: The controller contains logic that updates the model and/or view in response to input from the users of the app.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "MVC", - "url": "https://developer.mozilla.org/en-US/docs/Glossary/MVC", - "type": "article" - }, - { - "title": "Model View Controller", - "url": "https://en.wikipedia.org/wiki/Model%E2%80%93view%E2%80%93controller", - "type": "article" - } - ] - }, - "ZF5XgO7I_J9928bD3CVXo": { - "title": "Repository Pattern", - "description": "The `Repository Pattern` separates the data sources from the rest of the application. It acts as a mediator between different data sources, such as persistent models, web services, or caches. Instead of having the network and database calls spread out throughout your ViewModel, they are encapsulated within a Repository class. This separation will make the code clean, easy to read and testable. It provides a simple API for data access, the rest of the app doesn't need to know where the data is coming from it just asks the repository.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Repository Pattern", - "url": "https://en.wikipedia.org/wiki/Repository_pattern", - "type": "article" - } - ] - }, - "784fiIdKrQDlIm3VIiJQN": { - "title": "Builder Pattern", - "description": "The **Builder Pattern** in Android is used to construct complex objects step by step. It separates the construction of an object from its representation. The pattern involves a `Builder` class which is used to construct the `Product` class. The Builder class typically has a method for each part that may be used to construct a Product. It also has a method for returning the final product. The Builder pattern is useful in situations where you might have multiple ways to create an object or when an object requires a large number of parameters for its constructor.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Builder Pattern", - "url": "https://developer.android.com/kotlin/style-guide#builder-pattern", - "type": "article" - }, - { - "title": "Android Builder Pattern", - "url": "https://medium.com/kayvan-kaseb/builder-design-pattern-in-android-a38dccb75485", - "type": "article" - } - ] - }, - "DeOxj6RzQBYfEWV-M1Ybm": { - "title": "Factory Pattern", - "description": "The **Factory Pattern** is part of the Creational Design Patterns. This pattern provides an interface for creating objects in a superclass, but allows subclasses to alter the type of objects that will be created. It introduces an abstraction layer between the client code and the concrete objects. Normally, this is achieved by using a factory method to create objects instead of using constructors. The instance of the class is usually created by a method, referred to as a `factory method`, which is either specified in an interface and implemented in implementing classes or implemented in a base class which may be optionally overridden by derived classes. The Factory Method is used when we want to provide users with a way to create an instance of a class from one of several possible classes that share a common super class.\n\nHere is a basic example of the Factory Pattern:\n\n public abstract class Animal {\n public abstract String makeSound();\n }\n \n public class Dog extends Animal {\n @override\n public String makeSound() {\n return \"Woof\";\n }\n }\n \n public class Cat extends Animal {\n @override\n public String makeSound() {\n return \"Meow\";\n }\n }\n \n public class AnimalFactory {\n public Animal createAnimal(String type) {\n if (\"Dog\".equals(type)) {\n return new Dog();\n } else if (\"Cat\".equals(type)) {\n return new Cat();\n }\n return null;\n }\n }\n \n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Android: Factory Pattern", - "url": "https://developer.android.com/guide/components/intents-filters#factory", - "type": "article" - } - ] - }, - "N_auRfGKkeIIc-qiHLkR_": { - "title": "Observer Pattern", - "description": "The **Observer Pattern** is a software design pattern in which an object, known as the subject, maintains a list of its dependents, called observers, and notifies them automatically of any state changes. This is usually done by calling one of their methods. It's mainly used for implementing distributed event handling systems and is viewed as a good practice to follow, making your design more robust, flexible, and scalable. The subject to be observed triggers events and observers react to the change or the event that they are listening to. In Android, observable libraries like `LiveData`, `RxJava`, `Flow`, and other reactive streams allow the implementation of observer pattern.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Observer Pattern", - "url": "https://en.wikipedia.org/wiki/Observer_pattern", - "type": "article" - }, - { - "title": "Observer Pattern", - "url": "https://refactoring.guru/design-patterns/observer", - "type": "article" - } - ] - }, - "W-WTIiQml8dLK6i_V69JK": { - "title": "Flow", - "description": "`Flow` in Android development is part of the Kotlin Coroutines library. It is a type that can emit multiple values sequentially, making it perfect for working with any data streams or any streams of events that aren't instantaneous. Like Observables, `Flow` is also based on the observer pattern, meaning it can emit values and these emissions can be observed and reacted to. However, `Flow` comes with built-in back pressure handling and the ability to transform, filter, or combine these flows in a sequence. Along with Coroutines, `Flow` encourages a more predictable and simplified concurrency design without callback hell problem.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Flow", - "url": "https://kotlinlang.org/docs/flow.html", - "type": "article" - }, - { - "title": "Flow: Coroutines", - "url": "https://kotlinlang.org/docs/flow-coroutines.html", - "type": "article" - } - ] - }, - "xk0vnWr7uESdzYRxwFjoK": { - "title": "RxJava", - "description": "RxJava, standing for Reactive Extensions for the JVM, is a library in the Java and Android ecosystem that allows developers to write asynchronous, event-based programs. It is developed according to reactive programming principles and it supports multi-threading operations. One can create data streams from just about anything - variables, data structures, inputs, etc. and these streams could be used with functional programming methods to process the data.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "RxJava on GitHub", - "url": "https://github.com/ReactiveX/RxJava", - "type": "opensource" - } - ] - }, - "7rbsp1o5bzIJP11BRIoeG": { - "title": "RxKotlin", - "description": "`RxKotlin` is a lightweight library that adds convenient extension functions to `RxJava`. It allows developers to use RxJava with Kotlin in a more idiomatic way, thus making code more readable and understandable. While `RxJava` is a Java-based implementation of Reactive Extensions, `RxKotlin` is essentially a wrapper that includes extension functions and other constructs to take full advantage of Kotlin's features, such as its syntactic simplicity and ease of use.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "RxKotlin", - "url": "https://github.com/ReactiveX/RxKotlin", - "type": "opensource" - } - ] - }, - "TiokceMGU9caqiR0lbFYL": { - "title": "LiveData", - "description": "`LiveData` is a data holder class that can be observed within a given lifecycle. This means that an `Observer` can be added in a pair with a `LifecycleOwner`, and this observer will be notified about modifications of the `LiveData` object only if the associated `LifecycleOwner` is in active state. `LiveData` respects the lifecycle state of app components, such as activities, fragments, or services, and it only updates app-component observers that are in an active lifecycle state. Furthermore, `LiveData` automatically removes the observers when their associated `LifecycleOwner` moves to the `Destroyed` state. This combination of `LiveData` and `LifecycleOwner` helps you to manage appropriate and efficient updates because `LiveData` takes into consideration the lifecycle state of your app components.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "LiveData", - "url": "https://developer.android.com/topic/libraries/architecture/livedata", - "type": "article" - } - ] - }, - "qtXM9K7wyjOFuEMlZrB3C": { - "title": "Dependency Injection", - "description": "`Dependency Injection` is a technique where an object does not need to create its own dependencies; instead, dependencies are provided (or injected) at runtime. This technique is highly beneficial in Android Development. It helps in creating loosely coupled and easily testable code. For example, the `Retrofit` instance that your application requires to make network calls can be created somewhere else and can be injected whenever required using libraries like `Dagger`, `Koin` or `Hilt`. The `ViewModel` instances can also be injected rather than being created in the required classes directly. Through dependency injection, plugins ensure the code becomes easier to change, understand, and maintain, hence, improving the quality of the code.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Dependency Injection", - "url": "https://developer.android.com/training/dependency-injection", - "type": "article" - } - ] - }, - "CK7Ce632fdTgxeFsRUVvd": { - "title": "Dagger", - "description": "Dagger is a fully static, compile-time dependency injection framework for both Java and Android. It is an adaptation of an earlier version created by Square that's focused on simplicity and speed. Dagger's primary focus is on compile-time analysis of dependencies, code simplicity, and clarity. It uses annotations to define dependencies, thus aiding in easy readability and understanding of the code. Dagger also eliminates the use of reflection to inject dependencies, thus boosting performance. It offers custom scopes to control the lifespan of instances and ensures that dependencies are Singleton across the entire lifespan of certain scopes.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Dagger", - "url": "https://dagger.dev/", - "type": "article" - } - ] - }, - "UMqZ-jmXKDXKuu8bzqDH_": { - "title": "Koin", - "description": "Koin is a lightweight dependency injection framework developed specifically for Kotlin developers. It uses functional resolution only - no proxy, no code generation, no reflection, and offers simplicity by leveraging Kotlin's language features. While Koin is not related to the Android platform, it provides specific extensions enabling an efficient integration into your Android applications, including Android Architecture Components and Kotlin Coroutines amongst others.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Koin", - "url": "https://insert-koin.io", - "type": "article" - }, - { - "title": "Android Architecture Components", - "url": "https://developer.android.com/topic/libraries/architecture/index.html", - "type": "article" - }, - { - "title": "@officialKotlin Coroutines", - "url": "https://kotlinlang.org/docs/reference/coroutines-overview.html", - "type": "article" - } - ] - }, - "ooo_k2k_vUBR_jQ7Ke6Et": { - "title": "Hilt", - "description": "Hilt is a dependency injection library for Android that reduces the boilerplate code that you would normally need to write when setting up manual dependency injection in your project. It is based on the Dagger library, but it simplifies the implementation process and streamlines the use of Dagger in Android apps. To set it up, you must annotate your Android classes and Hilt will automatically generate and provide the necessary dependencies for you.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Hilt", - "url": "https://developer.android.com/training/dependency-injection/hilt-android", - "type": "article" - } - ] - }, - "dc7k50PjCYZcElHhCk66p": { - "title": "Kodein", - "description": "`Kodein` is one of the recommended dependency injection frameworks suitable for Android development. This open-source Kotlin library simplifies the DI process by allowing developers to bind various types of dependencies, such as singleton, factory, or provider bindings into containers or `Kodein` modules. It promotes dependency declaration where used, instead of prior declaration. It follows a \"Define in Use\" principle that enables easy-to-use, easy-to-debug and very idiomatic Kotlin code. It's also worth noting that `Kodein` works hand in hand with Android's lifecycle and provides easy integration with popular libraries such as Android Architecture Components, leveraging their functionality.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Kodein", - "url": "https://insert-kodein.io/", - "type": "article" - } - ] - }, - "0fNQWRxst8xRstIfPaPO6": { - "title": "Storage", - "description": "On Android devices, storage refers to where your data such as apps, photos, videos, and music are saved. It can be categorized into two types: internal and external storage. Internal Storage is where data is stored that's tied directly to your app. This data is private by default and not accessible by other apps. External Storage, on the other hand, is a shared space where all apps can read and write data. It can be further sub-categorized into Public and Private directories. Public directories are shared among all apps while private directories are specific to your app but can be accessed by other apps if they have the appropriate permissions.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Storage", - "url": "https://developer.android.com/guide/topics/data/data-storage", - "type": "article" - } - ] - }, - "PKql1HY0PLMfp50FRELXL": { - "title": "Shared Preferences", - "description": "Shared Preferences in Android are used to store data in key-value pairs. It works similar to a tiny database where you can save small pieces of data such as settings or the state of an application. When data is saved to Shared Preferences, it persists across user sessions, even if your application is killed or gets deleted. Data in Shared Preferences is not typically used for large amounts of data. To perform actions such as saving, retrieving, or editing data in Shared Preferences, you use an instance of `SharedPreferences.Editor`.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Shared Preferences Documentation", - "url": "https://developer.android.com/training/data-storage/shared-preferences", - "type": "article" - }, - { - "title": "SharedPreferences in Android", - "url": "https://www.youtube.com/watch?v=rJ3uwqko9Ew", - "type": "video" - } - ] - }, - "GWq3s1iTxQOp1BstHscJ9": { - "title": "DataStore", - "description": "`DataStore` is a new and improved data storage solution by Android, meant to supersede `SharedPreferences`. It is important to understand that it comes in two different implementations: `Preferences DataStore` and `Proto DataStore`. `Preferences DataStore` uses key-value pairs similar to `SharedPreferences`, but it's more robust and handles runtime exceptions more efficiently. On the other hand, `Proto DataStore` uses custom data types to provide type safety. It lets you leverage the power of Protocol Buffers, a language-neutral, platform-neutral mechanism for serializing structured data, as the data storage format. Operating on data in `DataStore` is transactional, meaning that if an error occurs during an operation, all changes are rolled back, so the data remains in a consistent state.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Documentation", - "url": "https://developer.android.com/topic/libraries/architecture/datastore", - "type": "article" - }, - { - "title": "Introduction to DataStore", - "url": "https://www.youtube.com/watch?v=9ws-cJzlJkU&list=PLWz5rJ2EKKc8to3Ere-ePuco69yBUmQ9C", - "type": "video" - } - ] - }, - "Bfg4So5RlI09zFNcburJd": { - "title": "Room Database", - "description": "\"Room\" is a persistence library introduced by Google that provides an abstraction layer over SQLite to help with robust database access while harnessing the full power of SQLite. Room supports the creation of databases and defines queries in compile-time-checked SQL strings. These databases belong to the data classes that you create representing your app's data. Room comprises three main components: **Database**, a container that holds your app's data tables; **Entity**, representing a table within the database; and **DAO (Data Access Object)**, containing SQL query methods to interact with the database.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Room Database", - "url": "https://developer.android.com/training/data-storage/room", - "type": "article" - }, - { - "title": "The Full Beginner's Guide for Room in Android", - "url": "https://www.youtube.com/watch?v=bOd3wO0uFr8&t=10s", - "type": "video" - } - ] - }, - "A4kdaj6AFueUgPI7hwKi5": { - "title": "File System", - "description": "The Android operating system uses a specific File System structure to store and manage files. It’s primarily based on the Linux File system, with some specific Android features. The File System includes several key directories that are used for specific purposes. For instance, directories such as `/system` hold system apps and firmware, while `/data` contains user data, settings and installed applications, and `/sdcard` usually represents an internal or external SD card for additional storage. It's worth mentioning directories like `/proc`, `/dev`, and `/sys` which are virtual file systems and house important system files. As an Android developer, understanding these directories can help you interact with Android's file system more effectively. Note that access to some of these directories may be restricted depending on system permissions.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Android File System", - "url": "https://developer.android.com/training/data-storage/", - "type": "article" - }, - { - "title": "Android File System", - "url": "https://www.androidauthority.com/android-file-system-1010817/", - "type": "article" - } - ] - }, - "Yb6aKJMMCxU1QVltWg3Dr": { - "title": "Network", - "description": "In Android, the `Network` component offers vital capabilities that enable communication and interaction among users, between users and remote servers, and between users and cloud services. Android provides a variety of APIs and services to interact with networks. The primary APIs are the `ConnectivityManager`, `WifiManager`, `TelephonyManager`, and `BluetoothManager` APIs, among others. ConnectivityManager, for example, can tell you about network connectivity changes.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Network", - "url": "https://developer.android.com/guide/topics/connectivity", - "type": "article" - }, - { - "title": "ConnectivityManager", - "url": "https://developer.android.com/reference/android/net/ConnectivityManager", - "type": "article" - }, - { - "title": "WifiManager", - "url": "https://developer.android.com/reference/android/net/wifi/WifiManager", - "type": "article" - } - ] - }, - "dDMRYiqrKyOBnRRQc8zsp": { - "title": "Retro", - "description": "Retrofit is a type-safe HTTP client for Android. It's designed to connect your application with an API or a back-end web service. Retrofit uses annotations to encode details about the API's operations and requests, such as the HTTP method (`GET`, `POST`, `PUT`, `DELETE`, `HEAD`) and the query parameters. The main advantage of **Retrofit** over other similar libraries is in its simplicity and intuitiveness, and it efficiently handles all network calls.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Retrofit Documentation", - "url": "https://square.github.io/retrofit/", - "type": "article" - }, - { - "title": "Retrofit in Android Studio", - "url": "https://www.youtube.com/watch?v=KJSBsRKqNwU", - "type": "video" - } - ] - }, - "5pVuwOItAhUxxJX8ysAsn": { - "title": "OkHttp", - "description": "`OkHttp` is an HTTP client that's extremely efficient, enabling several advanced features in Android app or other platforms that use Java. Developed by Square, it's built for high efficiency and capacity, simplifying many networking tasks, including connection pooling, response caching, and request retries. OkHttp allows seamless recovery from network issues, minimizing the loss of data. The library ensures fewer errors and higher quality of service by using the modern TLS encryption, extensible request and response models, and a fluent API for ease of use and integration.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "OkHttp on GitHub", - "url": "https://github.com/square/okhttp", - "type": "opensource" - }, - { - "title": "OkHttp", - "url": "https://square.github.io/okhttp/", - "type": "article" - } - ] - }, - "ww0fTbdXwVr-QIOClU7ng": { - "title": "Apollo-Android", - "description": "**Apollo Android** is a set of tools for using GraphQL with Android, made by the Apollo community developers. It's fully written in Kotlin and it was designed to seamlessly integrate with any Android app, making fetching data across network and handling data in the client-side a breeze. Apollo Android runs your queries and mutations and returns results as generated Kotlin types. It also normalizes your data and caches your results for further speed enhancements. It operates both on Android and Kotlin/JVM backend environment. It's also coroutines-first making handling concurrency easy and effective. To use Apollo Android, you'll set up the plugin, point it at your GraphQL schema, and write GraphQL queries.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "apollographql/apollo-kotlin - GraphQL for Android", - "url": "https://github.com/apollographql/apollo-kotlin", - "type": "opensource" - }, - { - "title": "Apollo Android", - "url": "https://www.apollographql.com/docs/kotlin/v2/", - "type": "article" - }, - { - "title": "Apollo Docs", - "url": "https://www.apollographql.com/docs/kotlin/", - "type": "article" - } - ] - }, - "cFYZ2C7yNnY6NHKUNP2Z4": { - "title": "Asynchronism", - "description": "Asynchronism in Android is a practice that defines operations, which can run independently from the main operation without following the program's linear flow. The Android system uses threads to handle asynchronous processes. These threads function independently, ensuring that complex or time-consuming operations do not interfere with the user interface or other essential parts of the application. Android provides various tools for carrying out asynchronous tasks, such as `Handler`, `ThreadPoolExecutor`, `IntentService`, `AsyncTask`, and `Loader` etc. These tools provide ways to execute tasks on different threads and communicate the results back to the main thread.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Asynchronous Tasks", - "url": "https://developer.android.com/guide/background", - "type": "article" - }, - { - "title": "Asynchronous Task Execution", - "url": "https://medium.com/@cpvasani48/asynchronous-task-execution-in-android-a-guide-with-example-44732744f3b8", - "type": "article" - } - ] - }, - "i_cKmTnGAYw8xpHwZHjAd": { - "title": "Coroutines", - "description": "`Coroutines` refer to a concurrency design pattern that you can use on Android to simplify code that executes asynchronously. `Coroutines` provide a way to write asynchronous, non-blocking code in a natural, sequential manner. The fundamental building blocks of `coroutines` are `suspend` functions which are simply functions that can be paused and resumed at later times. They are the key to writing non-blocking asynchronous code and represent a single unit of asynchronous computation. This aspect of `coroutines` makes them useful for managing long-running tasks that might otherwise block the main thread and cause your application to become unresponsive.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Coroutines", - "url": "https://kotlinlang.org/docs/coroutines-overview.html", - "type": "article" - } - ] - }, - "BeGrA5BDBMZP1Jy7n-wl-": { - "title": "Threads", - "description": "In Android, a `Thread` is a concurrent unit of execution. It has its own call stack, but can share its state with other threads in the same process, i.e., they can share the same memory area. They're primarily used in Android to perform operations in the background. One important aspect to note is that Android UI operations are not thread-safe, meaning they should always be done on the UI thread. Operations on `Threads` are typically managed through `Handler`, `Looper` and `MessageQueue` classes. Android also provides high-level constructs like `AsyncTask` and `Loader` for managing threads in relation to the UI.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Threads", - "url": "https://developer.android.com/guide/components/processes-and-threads", - "type": "article" - } - ] - }, - "zXsNEyRbb8UpEOAUv6FpY": { - "title": "RxJava", - "description": "RxJava (Reactive Extensions for the JVM) is a powerful library for composing asynchronous and event-based programs using observable sequences in Java. If you are an Android developer, you might be familiar with callbacks and async tasks to perform long-running operations in the background. However, handling multiple async tasks and nested callbacks can produce code that's complicated to read and maintain. To alleviate such complexities, RxJava provides tools and methods to create, transform and chain Observable sequences in a clean and declarative manner.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "RxJava on GitHub", - "url": "https://github.com/ReactiveX/RxJava", - "type": "opensource" - } - ] - }, - "4h37WBpYxRRyw9oH8ge7o": { - "title": "RxKotlin", - "description": "`RxKotlin` is a lightweight language extension to Java for Android development, enabling Android apps to be built using Kotlin with Reactivex. It brings the power of reactive programming paradigm to Kotlin, extending its capabilities for processing asynchronous streams of data. It allows you to express static (e.g., already known) or dynamic (e.g., future unknown) data streams, and perform various operations on them easily. Key concepts of `RxKotlin` include Observables, Observers and Schedulers. Observables represent the data streams, Observers interact with the data stream, and Schedulers determine on which thread operations are performed. RxKotlin helps manage background tasks, handle asynchronous data streams, and implement complex UIs, among others.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "ReactiveX/RxKotlin - RxKotlin on GitHub", - "url": "https://github.com/ReactiveX/RxKotlin", - "type": "opensource" - } - ] - }, - "OAb_JD64uGm2tPoue7w6t": { - "title": "WorkManager", - "description": "`WorkManager` is an Android library introduced by Google to execute tasks in a predictable and reliable manner. It's designed for tasks that require guaranteed execution, even if the app has been closed or the device restarts. It is backwards compatible up to API 14 and uses JobScheduler for API 23 and above, whilst using a combination of BroadcastReceiver + AlarmManager for APIs 14 and up. Regardless of the device API level, WorkManager works for all Android devices. Three types of work are supported by WorkManager - OneTimeWorkRequest, PeriodicWorkRequest, and DelayedWorkRequest.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "WorkManager", - "url": "https://developer.android.com/topic/libraries/architecture/workmanager", - "type": "article" - } - ] - }, - "ZEdn2yy-IwHN3kOYr2ZbC": { - "title": "Common Services", - "description": "Common Services are functional units or components provided by the Android system for use by developers. These services include things such as Location Services (used to determine the device's geographical location), Notification Services (handles the display and management of user notifications), and Sensor Services (interacts with hardware sensors). Other common services are Network and Connectivity Services, Account Manager, and Data Storage Services among others. They simplify the development process by handling complex functionalities behind the scenes, allowing developers to focus on the application's specific needs.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Common Services", - "url": "https://developer.android.com/guide/components/services", - "type": "article" - } - ] - }, - "Xv0es_z64vh-QzivMeAT3": { - "title": "Authentication", - "description": "Firebase Authentication in Android provides backend services, easy-to-use SDKs, and ready-made UI libraries to authenticate users to your app. It supports authentication using passwords, popular federated identity providers like Google, Facebook and Twitter, and more. Firebase also facilitates integration of functionality to sign in, sign up, and reset password. Moreover, it can be used to secure your database by implementing role-based access to data and to provide personalized experience according to the user's unique identity.\n\nFirebase Authentication offers two methods to authenticate. These are using an `email/password` login provided by Firebase Authentication or a `federated identity provider` like Google or Facebook. It also covers token-based authentication by creating custom tokens or verifying ID tokens. In addition to this, Firebase Authentication works with Firebase's client SDKs for practical use and works for long-running server processes for some of your users.\n\nFirebase Authentication provides a full suite of capabilities even beyond authentication to make your life easier, which includes Security Rules for Cloud Storage and Cloud Firestore, Firebase Dynamic Links, and Firebase Invites.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Firebase Authentication", - "url": "https://firebase.google.com/docs/auth", - "type": "article" - }, - { - "title": "Firebase Authentication: Android", - "url": "https://firebase.google.com/docs/auth/android/start", - "type": "article" - } - ] - }, - "xB4evbD07n1VrHOIpowV4": { - "title": "Crashlytics", - "description": "`Crashlytics` is a lightweight, real-time crash reporter that helps you track, prioritize, and fix stability issues that dismantle your app's quality. It delivers fast and precise information about crashes and helps you gain insights into your app's performance so you can pinpoint the exact line of code your app crashed on. Crashlytics offers a comprehensive suite of features like crash analysis, issue aggregation, and tracking user activities that led to a crash. This tool is now a part of Google's `Firebase` platform but can also be used independently.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Crashlytics Documentation", - "url": "https://firebase.google.com/docs/crashlytics/get-started?platform=android", - "type": "article" - }, - { - "title": "Firebase: Crashlytics", - "url": "https://www.youtube.com/watch?v=LhjTAkifr6g", - "type": "video" - } - ] - }, - "1Tz-Shj_Tuz2U8llEAcLr": { - "title": "Remote Config", - "description": "Firebase Remote Config is a cloud service offered by Firebase. It lets you change the behavior and appearance of your app without requiring a new app release. By using Firebase Remote config, you can customize your app for different user segments, manage the core aspects of your app by modifying parameters externally, and conduct A/B tests to improve your app. It works efficiently by using default values that control the behavior and appearance of your app. When your app needs configuration information, it makes a request to the Firebase Remote Config server. If the server has updated values, these values replace the default ones, hence modifying the app's behavior or appearance according to your needs.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Remote Config", - "url": "https://firebase.google.com/docs/remote-config/get-started?platform=android", - "type": "article" - }, - { - "title": "Getting Started with Remote Config", - "url": "https://www.youtube.com/watch?v=pcnnbjAAIkI", - "type": "video" - } - ] - }, - "e3vHFaFFMV7kI9q6yf5e9": { - "title": "Cloud Messaging", - "description": "Firebase Cloud Messaging (FCM) is a powerful, battery-efficient messaging service that enables you to send messages reliably and securely to your Android applications. It enables you to send two types of messages: \"notification messages\" and \"data messages\". Notification messages are primarily meant for user notifications and will only be delivered when the application is in the foreground. On the other hand, data messages can handle even when the app is in the background or killed and can be used to send custom key-value pairs. FCM also supports various additional features, such as topic messaging to send messages to multiple devices subscribed to a common topic, device group messaging for sending messages to groups of user devices, and upstream messaging for sending messages from the client application to the FCM server.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Documentation", - "url": "https://firebase.google.com/docs/cloud-messaging/android/client", - "type": "article" - }, - { - "title": "Firebase Cloud Messaging", - "url": "https://www.youtube.com/watch?v=sioEY4tWmLI&list=PLl-K7zZEsYLkuHRCtHTpi6JYHka8oHLft", - "type": "video" - } - ] - }, - "3EEfKAd-ppIQpdQSEhbA1": { - "title": "FireStore", - "description": "Firestore, often referred to as Firebase Firestore or Cloud Firestore, is a flexible, scalable database for mobile, web, and server development from Firebase and Google Cloud. Firestore comes with features like expressive querying, real-time updates, and automatic multi-region data replication. It is designed to offer seamless integration with other Firebase and Google Cloud products. It provides a cloud-based NoSQL database, which means the data is stored as collections of documents. Each document, in turn, contains a set of key-value pairs. Firestore ensures durable networking, so data syncs across client apps in real-time, even when the device is offline, making it easier for you to work with distributed data that can be kept in sync across various clients.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Firestore", - "url": "https://firebase.google.com/docs/firestore", - "type": "article" - } - ] - }, - "D4ZXQOKJkyFYNZIy-MJ9Y": { - "title": "Google Admob", - "description": "Google AdMob is a mobile advertising platform designed for app developers to monetize their apps by displaying ads from over 1 million Google advertisers. AdMob supports a wide range of ad formats, such as banner ads, interstitial ads, video ads, and more. It offers a powerful mediator that enables you to display ads from multiple sources, which includes the Google Mobile Ads SDK, third-party ad networks, and house ad campaigns. Furthermore, AdMob provides analytical tools to better understand your users and maximize ad revenue.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Google AdMob", - "url": "https://admob.google.com/home/", - "type": "article" - }, - { - "title": "Google AdMob Documentation", - "url": "https://admob.google.com/home/get-started/", - "type": "article" - } - ] - }, - "m5rumeynEbS8T27pelr0-": { - "title": "Google Play Services", - "description": "_Google Play Services_ is a proprietary background service and API package for Android devices from Google. Operated by Google, the service provides core functionalities like authentication for Google services, synchronized contacts, access to all the latest user privacy settings, and higher quality, lower-powered location-based services. It also speeds up offline searches, provides more immersive maps, and improves gaming experiences. Google Play Services play a crucial role in the operation of various other applications, including those not developed by Google. Moreover, it improves the overall Android experience by speeding up offline searches, providing more detailed maps, enhancing gaming experiences, and more.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Google Play Services", - "url": "https://developer.android.com/google/play-services", - "type": "article" - }, - { - "title": "Google Play Services Documentation", - "url": "https://developer.android.com/google/play-services/overview.html", - "type": "article" - } - ] - }, - "S5FVF9rMgVSSDKXJW2GYb": { - "title": "Google Maps", - "description": "Google Maps is a crucial service on Android, offering powerful, user-friendly mapping technology and local business information. Google Maps features include street maps, satellite imagery, 360° panoramic views of streets (Street View), real-time traffic conditions (Google Traffic), and route planning for traveling by foot, car, bicycle and air, or public transportation. The service's interface includes a function to overlay various layers such as traffic density, public transit lines, and cycling paths. Google Maps for Android also provides an API, which allows developers to interface with and control certain aspects of the Google Maps service in their applications. This capability subjects to certain usage limits and requirements set by Google.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Google Maps", - "url": "https://developers.google.com/maps/documentation/android-sdk/overview", - "type": "article" - }, - { - "title": "Google Maps Documentation", - "url": "https://developers.google.com/maps/documentation/android-sdk/intro", - "type": "article" - } - ] - }, - "77F9F3oI5CPgwgM_hxWfa": { - "title": "Linting", - "description": "`Linting` in Android is a tool that analyzes the source code of your application to identify potential errors, bugs, stylistic errors, and suspicious constructs. The term Linting came from a Unix utility that examined C language source code. In Android, it not only checks for potential Java issues but also for XML layout files and provides alerts for issues it encounters regarding usability, performance, and accessibility. For instance, it may scrutinize a draft of your AndroidManifest.xml file for potential duplication of elements or other anomalies that would cause the app to crash.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Linting", - "url": "https://developer.android.com/studio/write/lint", - "type": "article" - } - ] - }, - "zMbXQH17Q52opdbitPzj7": { - "title": "Ktlint", - "description": "`ktlint` is a static code analysis tool. It enforces a highly consistent style and adheres extensively to the official Kotlin coding conventions. `ktlint` does not have any configuration options (by design). The only exceptions are disabling specific rule(s) and specifying indentation size. `ktlint` can check, as well as automatically fix your code. Its main goal is to bring unified code style to your project. It works on the command line as well, so it can be hooked up into your continuous integration pipeline. It also has Ant, Gradle and Maven wrappers. You can use Ktlint on any Android/Kotlin project, as long as you have Gradle or Maven installed.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Ktlint: Static Code Analysis for Kotlin", - "url": "https://github.com/pinterest/ktlint", - "type": "opensource" - }, - { - "title": "Ktlint", - "url": "https://ktlint.github.io/", - "type": "article" - } - ] - }, - "RUvuCp_JK5MQQT13SSHUV": { - "title": "Detekt", - "description": "`Detekt` is a static code analysis tool for the Kotlin programming language. It operates on the abstract syntax tree provided by the Kotlin compiler and can run in the command line or as a task in your Gradle build script. Detekt provides complexity reports that can be used to identify overly complex code and help simplify it. It also checks for a variety of potential bugs and code smells, including issues with formatting, naming conventions, exception handling, and more. Moreover, Detekt is highly configurable, allowing you to enable, disable, or modify the behavior of its checks to suit your project's needs.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Detekt: Static Code Analysis for Kotlin", - "url": "https://github.com/detekt/detekt", - "type": "opensource" - }, - { - "title": "Detekt", - "url": "https://detekt.dev/", - "type": "article" - } - ] - }, - "6KbSUAoT_jTudFoIbwMpA": { - "title": "Debugging", - "description": "Debugging is a critical step in the app development process. In Android development, it includes identifying and fixing errors, or bugs, in your code. You can debug Android apps using several tools and techniques. For example, Android Studio, the primary integrated development environment (IDE) for Android, comes with a built-in debugging tool, the Android Debug Bridge (ADB). This command-line tool allows you to communicate your device and perform various actions like installing and debugging apps. Android Studio also supports step-by-step debugging, where you can set breakpoints in your code and inspect the application state at those points.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Debugging", - "url": "https://developer.android.com/studio/debug", - "type": "article" - } - ] - }, - "VFOD4JrV8kZ2583G3oT95": { - "title": "Timber", - "description": "`Timber` is a logging utility tool that has been specifically extended from the `Log` class of Android. It has been built to simplify the logging process while aiming to reduce the amount of boilerplate code the developer has to write. It was designed and is maintained by Jake Wharton, a renowned contributor in the Android Developer community. In Timber, each log message is directed to the next available logger, reducing the redundancy of manually assigning log tags. The simplicity of Timber is highlighted by its ability to log without defining any tag. Most importantly, Timber only logs messages in debug builds by default, avoiding potential data leaks in your production application.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Timber on GitHub", - "url": "https://github.com/JakeWharton/timber", - "type": "opensource" - } - ] - }, - "3i4g9ZWgLxKb2UMgRJi4Q": { - "title": "Leak Canary", - "description": "LeakCanary is a powerful open-source memory leak detection library for Android and Java. It is integrated into your app, and once you run your app, LeakCanary immediately starts watching for memory leaks and captures a memory dump if it detects one. After investigation, it will present a full stack trace to help you pinpoint the exact location of the memory leak. With the LeakCanary's user-friendly interface, you can then analyze the memory leak right in your app. The most recent version of LeakCanary also includes other improvements like automatic detection of leaks in Activity, Fragment, View, ViewModel, LiveData, etc.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "LeakCanary on GitHub", - "url": "https://github.com/square/leakcanary", - "type": "opensource" - }, - { - "title": "Leak Canary", - "url": "https://square.github.io/leakcanary/", - "type": "article" - } - ] - }, - "7RKN1FNtRE_BE6QeAQrKb": { - "title": "Chucker", - "description": "`Chucker` is an open-source debugging library created for Android applications. It has been designed to be easy to use and convenient for developers. This library intercepts and records all HTTP requests and responses inside your application, which helps to visualize and share this information in an understandable and easy-to-read format. Using Chucker's distinct features, you can inspect all the HTTP and HTTPS traffic going in and out of your app directly. In addition, it provides other nifty features such as a user-friendly interface to view the server's raw response. It's like having a built-in network inspector in your debugging tool, enabling you to solve network-related issues more efficiently.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "ChuckerTeam/chucker - Chucker On GitHub", - "url": "https://github.com/ChuckerTeam/chucker", - "type": "opensource" - }, - { - "title": "Network Traffic Inspection with Chucker", - "url": "https://medium.com/tech-takeaways/simple-android-network-traffic-inspection-with-chucker-and-seismic-1e6162c51f64", - "type": "article" - } - ] - }, - "ACUJlDDR0jqEohsFzWEoQ": { - "title": "Jetpack Benchmark", - "description": "Jetpack Benchmark is a library within the Android Jetpack Suite that allows developers to quickly and accurately measure the performance of their apps. This library can help measure CPU, memory, and IO performance of code in Android apps. Developers can define some specific code paths to be benchmarked by wrapping the code in `BenchmarkRule.measureRepeated {}`. In addition, it automatically takes care of warmup, measures your code performance, and outputs benchmarking results to the Android Studio's logcat.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Jetpack Benchmark", - "url": "https://developer.android.com/studio/profile/benchmark", - "type": "article" - } - ] - }, - "ZOQm5OlzCA-h_yxywwDrW": { - "title": "Testing", - "description": "**Testing** is a crucial part of the app development process. It involves validating the functionality, performance, usability, and consistency of your app before deploying it to the Play Store. There are two types of testing methods notably used: **Unit testing** and **Instrumentation Testing**. Unit testing, as the name suggests, tests each unit or segment of your code separately. It doesn't require Android dependencies and hence, runs faster. Instrumentation testing, on another hand, requires Android dependencies and is slower. Instrumentation testing tests the UIs, simulates user interactions and validates the navigation between different parts of your app. Android provides built-in testing frameworks like `JUnit` for unit testing and `Espresso` for Instrumentation testing.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Testing", - "url": "https://developer.android.com/training/testing", - "type": "article" - } - ] - }, - "-ONSC-ImGSELbamKmjIlH": { - "title": "Espresso", - "description": "Espresso is a testing framework provided by Android to create UI tests for Android applications. It automatically synchronizes your test actions with the UI of your application, ensuring that your test will only proceed when the necessary UI activities have been completed. In Espresso, you can programmatically simulate user interactions like clicking buttons, typing text, or swiping screens, and then examine the UI's state to confirm it's as expected. Espresso tests can run on devices running Android 4.3 (API level 18) or higher.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Espresso", - "url": "https://developer.android.com/training/testing/espresso", - "type": "article" - } - ] - }, - "gvGAwjk_nhEgxzZ_c3f6b": { - "title": "JUnit", - "description": "JUnit is a popular testing framework for Java programming. It forms the basis for many other testing libraries and tools in the Android ecosystem, making it important for any Android developer to become familiar with. The basic use of JUnit involves annotations such as `@Test`, indicating methods that represent a single test case. Other useful features include `@Before` and `@After` which allow for setup and teardown processes to be defined clearly. Another powerful feature in JUnit is the ability to create parameterized tests, effectively running the same test multiple times with different inputs.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Documentation", - "url": "https://developer.android.com/training/testing/local-tests", - "type": "article" - }, - { - "title": "Junit for android", - "url": "https://www.youtube.com/watch?v=jE1vQGVHaQA", - "type": "video" - } - ] - }, - "kc6buUsLAeZeUb4Tk0apM": { - "title": "Distribution", - "description": "Distribution in Android refers to the methods and channels you can use to get your Android application into the hands of users. You can choose to distribute your app on the Google Play Store, which is the official app store for the Android operating system. This platform makes your app available to users in various countries around the world. Additionally, you can also opt to distribute your app through other third-party app stores or even your own website. Furthermore, Google provides a range of distribution options such as country targeting, device targeting, and staged roll-outs, which can be customized according to your distribution strategy. Remember, when you submit your application for distribution, you must adhere to the respective app store's policy and content guidelines.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Google Play Developer Distribution", - "url": "https://developer.android.com/distribute", - "type": "article" - } - ] - }, - "T7q_quNaIAuGi96OdnDT1": { - "title": "Firebase Distribution", - "description": "Firebase Distribution is a development tool within the Google Firebase suite that allows you to share pre-release versions of your Android apps with your development team. It enables you to distribute your app binaries to multiple testers and teams to get feedback before the app's official launch. Firebase Distribution supports both Android and iOS applications and works in tandem with other features, like Firebase Crashlytics, to effectively manage the testing and debugging aspects of your application's lifecycle.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Firebase Distribution", - "url": "https://firebase.google.com/docs/app-distribution", - "type": "article" - } - ] - }, - "HgRdgi2Hu4C8YLG5PXfoo": { - "title": "Google Playstore", - "description": "**Google Play Store** is the official distribution channel for Android apps and other digital media content. It is a global online software store developed and operated by Google. Developers submit their applications to Google Play through the Play Console where Google Play's automated systems scan for potentially malicious code and content violations, before they are published on the Play Store. Users can then browse, download, and use these applications on their Android devices or via the web. Purchases, downloads, and user feedback can be tracked via the Google Play Console. Owners of Android devices can also configure automatic updates for the applications they have installed from the store. This platform supports multiple languages and multiple forms of payment methods, making it accessible and customer-friendly.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Google Play Store", - "url": "https://play.google.com/store", - "type": "article" - }, - { - "title": "Google Play Store Documentation", - "url": "https://developer.android.com/distribute/googleplay", - "type": "article" - } - ] - }, - "_FSlD_qTz5Xo0x3pB6sZI": { - "title": "Signed APK", - "description": "A **Signed APK** is a version of your app that you prepare for distribution in the Play Store or other Android markets. When you sign your app using a private key, you authenticate your identity as the developer of the app. It is a required step by the Android system that ensures only updates to the APK that are from the original developer will be accepted. The Android system refuses to install an app if it's not signed appropriately, thereby protecting users from potential security risks.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Signing Your App", - "url": "https://developer.android.com/studio/publish/app-signing", - "type": "article" - } - ] - }, - "4_e76QafrB419S2INOeKd": { - "title": "Interface & Navigation", - "description": "In Android development, the concepts of \"Interface\" and \"Navigation\" are crucial. The \"Interface\" often refers to the Graphical User Interface (GUI) that users interact with. This includes buttons, text fields, image views, scroll views and other UI elements that the users can interact with to perform certain tasks. Tools like XML and Material Designs are used for interface design in Android.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Android Developers: Interface", - "url": "https://developer.android.com/guide/topics/ui/declaring-layout.html", - "type": "article" - }, - { - "title": "Android Developers: Navigation", - "url": "https://developer.android.com/guide/navigation", - "type": "article" - } - ] - } -} \ No newline at end of file diff --git a/public/roadmap-content/angular.json b/public/roadmap-content/angular.json deleted file mode 100644 index 965ce885b..000000000 --- a/public/roadmap-content/angular.json +++ /dev/null @@ -1,3231 +0,0 @@ -{ - "KDd40JOAvZ8O1mfhTYB3K": { - "title": "Introduction to Angular", - "description": "Angular is a popular open-source front-end web application framework developed by Google. It is written in TypeScript and allows developers to build dynamic, single-page web applications with ease. Angular provides a comprehensive set of features for creating interactive and responsive user interfaces, making it a powerful tool for modern web development.\n\nLearn more from the following resources:", - "links": [ - { - "title": "Angular", - "url": "https://angular.dev/", - "type": "article" - }, - { - "title": "Angular Documentation", - "url": "https://angular.dev/overview", - "type": "article" - }, - { - "title": "Angular Playground", - "url": "https://angular.dev/playground", - "type": "article" - }, - { - "title": "Angular API Reference", - "url": "https://angular.dev/api", - "type": "article" - } - ] - }, - "DE3cMpeRYuUPw2ADtfS-3": { - "title": "Angular Architecture", - "description": "Angular follows a modular architecture pattern, dividing the application into distinct modules, components, services, and other elements, which enhances code organization and maintainability. The key building blocks include modules, which are containers grouping related components, services, directives, and other elements to ensure proper encapsulation and reusability. Components are the building blocks of Angular applications, representing parts of the user interface with associated logic, consisting of templates, styles, and a class defining behavior. Services encapsulate reusable business logic, data manipulation, and API communication, enabling data and functionality sharing across components.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Angular Coding Style Guide", - "url": "https://angular.dev/style-guide", - "type": "article" - }, - { - "title": "The Ultimate Guide to Angular Architecture", - "url": "https://angulardive.com/blog/the-ultimate-guide-to-angular-architecture-best-practices-for-efficient-coding-with-angular-framework/", - "type": "article" - }, - { - "title": "Modern Architectures with Angular Part 1: Strategic design with Sheriff and Standalone Components", - "url": "https://www.angulararchitects.io/en/blog/modern-architectures-with-angular-part-1-strategic-design-with-sheriff-and-standalone-components/", - "type": "article" - }, - { - "title": "Optimizing the architecture of large web applications with Angular", - "url": "https://albertobasalo.medium.com/optimizing-the-architecture-of-large-web-applications-with-angular-79d03b01a92b", - "type": "article" - }, - { - "title": "Angular Architecture Concepts and Patterns", - "url": "https://www.bigscal.com/blogs/frontend/angular-architecture-concepts-and-patterns/", - "type": "article" - }, - { - "title": "Top 10 Angular Architecture Mistakes", - "url": "https://angularexperts.io/blog/top-10-angular-architecture-mistakes", - "type": "article" - } - ] - }, - "EbFRcy4s6yzzIApBqU77Y": { - "title": "Setting up a New Project", - "description": "Setting up a new Angular project is streamlined by the **Angular CLI**, a command-line interface that automates the initial setup. First, ensure Node.js and npm are installed, then globally install the CLI itself via `npm install -g @angular/cli`. With the CLI in place, navigate to your desired directory and initiate a new project using `ng new your-project-name`, where you'll be prompted to configure options like routing and stylesheet format, with the `--standalone` flag being a common addition for modern projects. Once the project is scaffolded and dependencies are installed, change into your new project directory (`cd your-project-name`) and launch the development server with `ng serve`, making your new Angular application accessible in your browser, typically at `http://localhost:4200/`.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Installation", - "url": "https://angular.dev/installation", - "type": "article" - }, - { - "title": "Setting up the local environment and workspace", - "url": "https://angular.dev/tools/cli/setup-local", - "type": "article" - }, - { - "title": "Build your first Angular app", - "url": "https://angular.dev/tutorials/first-app", - "type": "article" - } - ] - }, - "hpShWwL0M57ZAzqkB4I8t": { - "title": "Angular and History", - "description": "Angular is a TypeScript-based open-source front-end web framework developed and maintained by Google. It is used for building dynamic, single-page web applications (SPAs). Angular provides comprehensive tools, including dependency injection, data binding, routing, and testing, to create robust and scalable web applications.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "What is Angular?", - "url": "https://angular.dev/overview", - "type": "article" - }, - { - "title": "Understanding Angular", - "url": "https://angular.io/guide/understanding-angular-overview", - "type": "article" - }, - { - "title": "Getting Started with Angular", - "url": "https://developer.mozilla.org/en-US/docs/Learn_web_development/Core/Frameworks_libraries/Angular_getting_started", - "type": "article" - } - ] - }, - "kGnKzCkQCNFEdgCBRtNuW": { - "title": "Components", - "description": "Components are the main building block for Angular applications. Each component consists of:\n\n* An HTML template that declares what renders on the page\n* A TypeScript class that defines the behavior\n* A CSS selector that defines how the component is used in a template\n* Optionally, CSS styles applied to the template\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Anatomy of a Component", - "url": "https://angular.dev/guide/components", - "type": "article" - }, - { - "title": "Composing with Components in Angular", - "url": "https://angular.dev/essentials/components", - "type": "article" - }, - { - "title": "Explore top posts about Angular", - "url": "https://app.daily.dev/tags/angular?ref=roadmapsh", - "type": "article" - }, - { - "title": "Standalone Components in Angular", - "url": "https://www.youtube.com/watch?v=x5PZwb4XurU", - "type": "video" - } - ] - }, - "Mp056kNnwsRWeEXuhGPy-": { - "title": "Component Anatomy", - "description": "Angular components are the foundational building blocks of Angular applications, designed to encapsulate both the UI and the business logic.\n\nEvery component must have:\n\n* A TypeScript class with behaviors\n* An HTML template\n* A CSS selector\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Anatomy of a Component", - "url": "https://angular.dev/guide/components", - "type": "article" - }, - { - "title": "Anatomy of a Component - Interactive Tutorial", - "url": "https://angular.dev/tutorials/learn-angular/1-components-in-angular", - "type": "article" - } - ] - }, - "dOMvz__EQjO-3p-Nzm-7P": { - "title": "Provider", - "description": "Configure the injector of component with a token that maps to a provider of a dependency.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Configuring Dependency Providers", - "url": "https://angular.dev/guide/di/dependency-injection-providers", - "type": "article" - }, - { - "title": "Component API", - "url": "https://angular.dev/api/core/Component#providers", - "type": "article" - } - ] - }, - "uYHy2yhtTm6fQkKpYx3lU": { - "title": "changeDetection", - "description": "The change-detection strategy to use for this component. When a component is instantiated, Angular creates a change detector, which is responsible for propagating the component's bindings. The strategy is one of:\n\n* `ChangeDetectionStrategy.OnPush` sets the strategy to CheckOnce (on demand).\n* `ChangeDetectionStrategy.Default` sets the strategy to CheckAlways.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Advanced Component Configuration", - "url": "https://angular.dev/guide/components/advanced-configuration#changedetectionstrategy", - "type": "article" - }, - { - "title": "Component - API", - "url": "https://angular.dev/api/core/Component#changeDetection", - "type": "article" - } - ] - }, - "-gUpm3OLUJl9iAyx6fmHN": { - "title": "Template", - "description": "`template` metadata is a property defined within the `@Component` decorator that specifies the HTML template for the component. It allows you to define the structure and layout of the component's view.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Template Syntax", - "url": "https://angular.dev/guide/templates", - "type": "article" - }, - { - "title": "Component Template API", - "url": "https://angular.dev/api/core/Component#template", - "type": "article" - } - ] - }, - "RcNHEh6kmbBK1PICbhAwr": { - "title": "Standalone", - "description": "A standalone component is a component that sets `standalone: true` in its component metadata. Standalone components directly import other components, directives, and pipes used in their templates\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Standalone Components", - "url": "https://angular.dev/guide/components/importing#standalone-components", - "type": "article" - }, - { - "title": "Component - API", - "url": "https://angular.dev/api/core/Component#standalone", - "type": "article" - } - ] - }, - "doHDoAgp7T59KGSXPpQzZ": { - "title": "viewProvider", - "description": "Defines the set of injectable objects that are visible to its view DOM children.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Using the viewProviders array", - "url": "https://angular.dev/guide/di/hierarchical-dependency-injection#using-the-viewproviders-array", - "type": "article" - }, - { - "title": "Component - API", - "url": "https://angular.dev/api/core/Component#viewProviders", - "type": "article" - } - ] - }, - "ctigvSYeFa77y3v7m11gk": { - "title": "Encapsulation", - "description": "An encapsulation policy for the component's styling. Possible values:\n\n* `ViewEncapsulation.Emulated`: Apply modified component styles in order to emulate a native Shadow DOM CSS encapsulation behavior.\n* `ViewEncapsulation.None`: Apply component styles globally without any sort of encapsulation.\n* `ViewEncapsulation.ShadowDom`: Use the browser's native Shadow DOM API to encapsulate styles.\n\nIf not supplied, the value is taken from the CompilerOptions which defaults to `ViewEncapsulation.Emulated`.\n\nIf the policy is `ViewEncapsulation.Emulated` and the component has no styles nor {@link Component#styleUrls styleUrls}, the policy is automatically switched to `ViewEncapsulation.None`.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Style Scoping", - "url": "https://angular.dev/guide/components/styling#style-scoping", - "type": "article" - }, - { - "title": "Component Encapsulation", - "url": "https://angular.dev/api/core/Component#encapsulation", - "type": "article" - } - ] - }, - "cDN0PGo-zkcLmttxCiAI-": { - "title": "Selector", - "description": "In Angular, the `selector` metadata is a crucial property defined within the `@Component` decorator that specifies how the component can be identified and used in HTML templates. It determines the way the component is rendered in the DOM, allowing developers to create reusable and easily identifiable components.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Component Selectors", - "url": "https://angular.dev/guide/components/selectors", - "type": "article" - }, - { - "title": "Component - API", - "url": "https://angular.dev/api/core/Component#selector", - "type": "article" - } - ] - }, - "4XJKEmSrQfPxggHlAP30w": { - "title": "Styles", - "description": "This metadata allows developers to apply CSS styles directly to a component, enhancing its appearance and ensuring that styles are scoped to that particular component.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Styling Components", - "url": "https://angular.dev/guide/components/styling", - "type": "article" - }, - { - "title": "Component Style - API", - "url": "https://angular.dev/api/core/Component#styles", - "type": "article" - } - ] - }, - "ghbrJhuGvscnNGCtVLh5_": { - "title": "Imports", - "description": "The `imports` property specifies the `standalone` component's template dependencies — those directives, components, and pipes that can be used within its template.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Importing and Using Components", - "url": "https://angular.dev/guide/components/importing", - "type": "article" - }, - { - "title": "Component - API", - "url": "https://angular.dev/api/core/Component#imports", - "type": "article" - } - ] - }, - "Szgr8dnZNi-z5i6raIJzW": { - "title": "Metadata", - "description": "Metadata in Angular components refers to the configuration information that is used to define and configure the behavior of a component. It is specified using decorators, which are functions that add metadata to classes, properties, and methods.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Component API", - "url": "https://angular.dev/api/core/Component", - "type": "article" - } - ] - }, - "19c7D-fWIJ3vYFT6h8ZfN": { - "title": "Communication", - "description": "Angular components can communicate with each other using `@Input()` and `@Output()` decorators. These decorators facilitate data exchange between parent and child components.\n\n* **@Input()**: This decorator allows a parent component to pass data to a child component, enabling the child to receive and use the data.\n* **@Output()**: This decorator allows a child component to emit events to a parent component, enabling the parent to respond to changes or actions within the child component.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Inputs", - "url": "https://angular.dev/guide/components/inputs", - "type": "article" - }, - { - "title": "Outputs", - "url": "https://angular.dev/guide/components/outputs", - "type": "article" - }, - { - "title": "Model Inputs", - "url": "https://angular.dev/guide/signals/model", - "type": "article" - }, - { - "title": "Custom events with outputs", - "url": "https://angular.dev/guide/components/outputs", - "type": "article" - }, - { - "title": "Non-Related Component Communication | Angular Component & Directives", - "url": "https://www.youtube.com/watch?v=aIkGXMJFTzM", - "type": "video" - } - ] - }, - "TDyFjKrIZJnCjEZsojPNQ": { - "title": "Parent-Child Interaction", - "description": "In angular parent-child communication is commonly used to share data between two components.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Component Interaction", - "url": "https://angular.io/guide/component-interaction", - "type": "article" - }, - { - "title": "Medium - Parent-Child Communication", - "url": "https://jaspritk.medium.com/parent-child-communication-in-angular-888373e0b69e", - "type": "article" - } - ] - }, - "v0XaLNZ-YrRqP-xv8wS43": { - "title": "ViewChild", - "description": "View queries retrieve results from the elements in the component's view — the elements defined in the component's own template.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "View queries", - "url": "https://angular.dev/guide/components/queries#view-queries", - "type": "article" - }, - { - "title": "viewChild - signal", - "url": "https://angular.dev/guide/signals/queries#viewchild", - "type": "article" - }, - { - "title": "viewChildren - signal", - "url": "https://angular.dev/guide/signals/queries#viewchildren", - "type": "article" - }, - { - "title": "viewChild - API", - "url": "https://angular.dev/api/core/viewChild", - "type": "article" - } - ] - }, - "oQl9etjoHiU2JgxieUOEH": { - "title": "ContentChild", - "description": "Content queries retrieve results from the elements in the component's content— the elements nested inside the component in the template where it's used.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Content Queries - Signal", - "url": "https://angular.dev/guide/signals/queries#content-queries", - "type": "article" - }, - { - "title": "Content Queries", - "url": "https://angular.dev/guide/components/queries#content-queries", - "type": "article" - }, - { - "title": "contentChild - API", - "url": "https://angular.dev/api/core/contentChild", - "type": "article" - } - ] - }, - "nCpfj_35ZvW-NTygg06XZ": { - "title": "Component Lifecycle", - "description": "A component instance has a lifecycle that starts when Angular instantiates the component class and renders the component view along with its child views. The lifecycle continues with change detection, as Angular checks to see when data-bound properties change, and updates both the view and the component instance as needed. The lifecycle ends when Angular destroys the component instance and removes its rendered template from the DOM. Directives have a similar lifecycle, as Angular creates, updates, and destroys instances in the course of execution.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Component Lifecycle", - "url": "https://angular.dev/guide/components/lifecycle", - "type": "article" - }, - { - "title": "The Life Cycle Hooks of Angular", - "url": "https://blog.logrocket.com/angular-lifecycle-hooks/", - "type": "article" - }, - { - "title": "Angular Lifecycle Hooks — Everything you need to know", - "url": "https://medium.com/@sinanozturk/angular-component-lifecycle-hooks-2f600c48dff3", - "type": "article" - }, - { - "title": "Explore top posts about Angular LifeCycle Hooks", - "url": "https://dev.to/search?utf8=%E2%9C%93&q=angular+hook", - "type": "article" - } - ] - }, - "tC5ETtOuuUcybj1jI4CuG": { - "title": "Dynamic Components", - "description": "In addition to using a component directly in a template, you can also dynamically render components. There are two main ways to dynamically render a component: in a template with `NgComponentOutlet`, or in your TypeScript code with `ViewContainerRef`.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Programmatically Rendering Components", - "url": "https://angular.dev/guide/components/programmatic-rendering", - "type": "article" - }, - { - "title": "New Input Binding for NgComponentOutlet", - "url": "https://medium.com/ngconf/new-input-binding-for-ngcomponentoutlet-cb18a86a739d", - "type": "article" - }, - { - "title": "Render dynamic components in Angular using ViewContainerRef", - "url": "https://dev.to/railsstudent/render-dynamic-components-in-angular-using-viewcontainerref-160h", - "type": "article" - }, - { - "title": "Dynamic Component in Angular (2024)", - "url": "https://www.youtube.com/watch?v=ncbftt3NWVo", - "type": "video" - }, - { - "title": "Mastering ViewContainerRef for dynamic component loading in Angular17", - "url": "https://www.youtube.com/watch?v=Ra4PITCt8m0", - "type": "video" - } - ] - }, - "b_kdNS9PDupcUftslkf9i": { - "title": "Modules", - "description": "Modules in Angular act like a container where we can group the components, directives, pipes, and services, related to the application.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Importing and Using Components", - "url": "https://angular.dev/guide/components/importing", - "type": "article" - }, - { - "title": "Introduction to Modules", - "url": "https://angular.dev/guide/ngmodules", - "type": "article" - }, - { - "title": "Explore top posts about Angular", - "url": "https://app.daily.dev/tags/angular?ref=roadmapsh", - "type": "article" - } - ] - }, - "BCq5sgWQLiw0f7u7ZSAd2": { - "title": "Module Architecture", - "description": "Angular's **module architecture** uses **NgModules** to organize applications into cohesive units. These modules group related components, directives, pipes, and services, promoting modular development. Key types include the **root module** (entry point), **feature modules** (specific functionality, often lazily loaded), and **shared modules** (reusable code). This approach significantly enhances code organization, scalability, and maintainability.\n\nVisit following resources to learn more:", - "links": [ - { - "title": "Angular Architecture", - "url": "https://dev.to/digitaldino/angular-architecture-39no", - "type": "article" - }, - { - "title": "Mastering Modular Architecture in Angular - Medium", - "url": "https://medium.com/@sehban.alam/mastering-modular-architecture-in-angular-4cc2632fc964", - "type": "article" - } - ] - }, - "ex8FOKrUlbu4MuEq2czyW": { - "title": "Creating Components", - "description": "You can either use Angular CLI to create the Angular components or create it manually.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Build your first Angular App", - "url": "https://angular.dev/tutorials/first-app", - "type": "article" - }, - { - "title": "Components", - "url": "https://angular.dev/essentials/components", - "type": "article" - }, - { - "title": "Angular CLI - ng generate components", - "url": "https://angular.dev/guide/components", - "type": "article" - } - ] - }, - "9YhTXybJw2gszlqFeBtW3": { - "title": "Creating Modules", - "description": "Creating modules in Angular helps organize your application into manageable, cohesive units. Each module can encapsulate related components, directives, pipes, and services. Here's a detailed guide on how to create and use modules in Angular.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Feature Modules", - "url": "https://angular.dev/guide/ngmodules/feature-modules", - "type": "article" - } - ] - }, - "w_BazXvINFyxDCHmlznfy": { - "title": "Feature Modules", - "description": "Feature modules are `NgModules` for the purpose of organizing code. With feature modules, you can keep code related to a specific functionality or feature separate from other code. Delineating areas of your application helps with collaboration between developers and teams, separating directives, and managing the size of the root module.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Feature Modules", - "url": "https://angular.dev/guide/ngmodules/feature-modules#how-to-make-a-feature-module", - "type": "article" - }, - { - "title": "Feature module with lazy loading in Angular 15", - "url": "https://medium.com/@jaydeepvpatil225/feature-module-with-lazy-loading-in-angular-15-53bb8e15d193", - "type": "article" - }, - { - "title": "Creating a Feature Module | Understanding Angular Modules", - "url": "https://www.youtube.com/watch?v=VaPhaexVa1U", - "type": "video" - } - ] - }, - "bLERvEERmNI5AgxtEYokZ": { - "title": "Lazy Loading Modules", - "description": "By default, NgModules are eagerly loaded. This means that as soon as the application loads, so do all the NgModules, whether they are immediately necessary or not. For large applications with lots of routes, consider lazy loading —a design pattern that loads NgModules as needed. Lazy loading helps keep initial bundle sizes smaller, which in turn helps decrease load times.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Lazy Loading", - "url": "https://angular.dev/guide/ngmodules/lazy-loading", - "type": "article" - }, - { - "title": "Angular Lazy Loading", - "url": "https://www.bairesdev.com/blog/angular-lazy-loading/", - "type": "article" - }, - { - "title": "Lazy Loading in Angular: Improving Performance and User Experience", - "url": "https://www.youtube.com/watch?v=mjhi27YfV8Y", - "type": "video" - } - ] - }, - "5b590c7s-2XJ0rgdCYxLa": { - "title": "Dependencies", - "description": "A provider is an instruction to the Dependency Injection system on how to obtain a value for a dependency. Most of the time, these dependencies are services that you create and provide.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Providing Dependencies in Modules", - "url": "https://angular.dev/guide/ngmodules/providers", - "type": "article" - }, - { - "title": "Providers in Angular", - "url": "https://www.scaler.com/topics/angular/providers-in-angular/", - "type": "article" - }, - { - "title": "Working with providers in Angular", - "url": "https://sergeygultyayev.medium.com/working-with-providers-in-angular-eeb493151446", - "type": "article" - } - ] - }, - "6fhe9xAi_RSVfa-KKbcbV": { - "title": "Templates", - "description": "A Template is a form of HTML which tells Angular to go towards another component. To create many Angular features, special syntax within the templates is used.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Template Syntax", - "url": "https://angular.dev/guide/templates", - "type": "article" - }, - { - "title": "Explore top posts about Angular", - "url": "https://app.daily.dev/tags/angular?ref=roadmapsh", - "type": "article" - } - ] - }, - "XHpfHRIlFh19FJIE07u7i": { - "title": "Interpolation", - "description": "Interpolation refers to embedding expressions into marked up text. By default, interpolation uses the double curly braces {{ and }} as delimiters. Angular replaces currentCustomer with the string value of the corresponding component property.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Interpolation", - "url": "https://angular.dev/guide/templates/interpolation", - "type": "article" - }, - { - "title": "Displaying values with interpolation", - "url": "https://angular.dev/guide/templates/interpolation", - "type": "article" - } - ] - }, - "t2YOeMONlcnKBrVAo0JDc": { - "title": "Template Statements", - "description": "Template statements are methods or properties that you can use in your HTML to respond to user events. With template statements, your application can engage users through actions such as displaying dynamic content or submitting forms. Enclose the event in `()` which causes Angular to evaluate the right hand side of the assignment as one or more template statements chained together using semicolon `;`.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Template Statements", - "url": "https://angular.dev/guide/templates/template-statements", - "type": "article" - }, - { - "title": "Understanding Template Statements", - "url": "https://angular.dev/guide/templates/template-statements#", - "type": "article" - } - ] - }, - "WH5wlyOtrqFHBJx7RFJwS": { - "title": "Understand Binding", - "description": "In an Angular template, a binding creates a live connection between view and the model and keeps them both in sync.\n\n* **property**: helps you set values for properties of HTML elements or directives.\n* **attributes**: helps you set values for attributes of HTML elements directly.\n* **event**: lets you listen for and respond to user actions such as keystrokes, mouse movements, clicks, and touches.\n* **data**: It's a combination of property and event binding and helps you share data between components.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Binding", - "url": "https://angular.dev/guide/templates/binding", - "type": "article" - } - ] - }, - "5vZkiH7HDwONIABLfNJ06": { - "title": "Data Binding", - "description": "In an Angular template, a binding creates a live connection between a part of the UI created from a template (a DOM element, directive, or component) and the model (the component instance to which the template belongs). This connection can be used to synchronize the view with the model, to notify the model when an event or user action takes place in the view, or both. Angular's Change Detection algorithm is responsible for keeping the view and the model in sync. Bindings always have two parts: a target which will receive the bound value, and a template expression which produces a value from the model.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Understand Binding", - "url": "https://angular.dev/guide/templates/binding", - "type": "article" - }, - { - "title": "Data Binding in Angular", - "url": "https://www.angularminds.com/blog/data-binding-in-angular", - "type": "article" - } - ] - }, - "TJOZfHtsLfwA0CZ2bd1b2": { - "title": "Property Binding", - "description": "Property binding helps you set values for properties of HTML elements or directives. To bind to an element's property, enclose it in square brackets `[]` which causes Angular to evaluate the right-hand side of the assignment as a dynamic expression.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Property Binding", - "url": "https://angular.dev/guide/templates/property-binding", - "type": "article" - } - ] - }, - "FgsSyM6To7irpbivtOLEE": { - "title": "Attribute Binding", - "description": "Attribute binding in Angular helps you set values for attributes directly. With attribute binding, you can improve accessibility, style your application dynamically, and manage multiple CSS classes or styles simultaneously.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Attribute Binding", - "url": "https://angular.dev/guide/templates/attribute-binding", - "type": "article" - }, - { - "title": "What is difference between binding to attribute and binding to property in Angular?", - "url": "https://stackoverflow.com/questions/76967327/what-is-difference-between-binding-to-attribute-and-binding-to-property-in-angul", - "type": "article" - } - ] - }, - "bKnpirSvex4oE4lAjiSSV": { - "title": "Event Binding", - "description": "Event binding lets you listen for and respond to user actions such as keystrokes, mouse movements, clicks, and touches.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Event Binding", - "url": "https://angular.dev/guide/templates/event-binding", - "type": "article" - }, - { - "title": "Event Handling in Angular", - "url": "https://medium.com/@theriyasharma24/event-handling-in-angular-a5854a61b4a5", - "type": "article" - } - ] - }, - "2UH79nCjgtY1Qz1YjUJYL": { - "title": "Two-way Binding", - "description": "Two-way binding gives components in your application a way to share data. Use two-way binding to listen for events and update values simultaneously between parent and child components. Angular's two-way binding syntax is a combination of square brackets and parentheses, `[()]`, commonly known as `banana in a box`.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Two-way binding", - "url": "https://angular.dev/guide/templates/two-way-binding", - "type": "article" - }, - { - "title": "How to implement two-way data binding in Angular", - "url": "https://www.angularminds.com/blog/how-to-implement-two-way-data-binding-in-angular", - "type": "article" - } - ] - }, - "VzvB_bads057YtG4ST4a2": { - "title": "Control Flow", - "description": "Angular templates support control flow blocks that let you conditionally show, hide, and repeat elements.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Built-in Control Flow", - "url": "https://angular.dev/guide/templates/control-flow", - "type": "article" - } - ] - }, - "VsU6713jeIjAOEZnF6gWx": { - "title": "@Input & @Output", - "description": "`@Input()` and `@Output()` give a child component a way to communicate with its parent component. `@Input()` lets a parent component update data in the child component. Conversely, `@Output()` lets the child send data to a parent component.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "inputs", - "url": "https://angular.dev/guide/components/inputs", - "type": "article" - }, - { - "title": "outputs", - "url": "https://angular.dev/guide/components/outputs", - "type": "article" - } - ] - }, - "nyDry6ZWyEUuTq4pw-lU3": { - "title": "Template Ref Vars", - "description": "Template reference variables help you use data from one part of a template in another part of the template. A template variable can refer to a DOM element within a template, component or directive. In the template, use the hash symbol, `#`, to declare a template reference variable.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Reference Variables", - "url": "https://angular.dev/guide/templates/reference-variables", - "type": "article" - } - ] - }, - "VsC7UmE_AumsBP8fC6to1": { - "title": "Template Syntax", - "description": "In Angular, a _template_ is a chunk of HTML. Use special syntax within a template to build on many of Angular's features. Extend the HTML vocabulary of your applications with special Angular syntax in your templates. For example, Angular helps you get and set DOM (Document Object Model) values dynamically with features such as built-in template functions, variables, event listening, and data binding.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Template Syntax", - "url": "https://angular.dev/guide/templates", - "type": "article" - }, - { - "title": "An Introduction to Angular Template Syntax", - "url": "https://angularstart.com/modules/basic-angular-concepts/3/", - "type": "article" - }, - { - "title": "Craft Dynamic Templates with Angular's Template Syntax", - "url": "https://www.youtube.com/watch?v=uSnUTcf8adI", - "type": "video" - } - ] - }, - "U1Zy2T-2ki9pDkXn9hn-I": { - "title": "@if", - "description": "The @if block conditionally displays its content when its condition expression is truthy. Content is added and removed from the DOM based on the evaluation of conditional expressions in the @if and @else blocks.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "@if", - "url": "https://angular.dev/api/core/@if", - "type": "article" - }, - { - "title": "Narrow Down signal value type within an if statement", - "url": "https://egghead.io/lessons/angular-narrow-down-angular-s-signal-value-type-within-an-if-statement", - "type": "video" - } - ] - }, - "ORdPDad4HWJAfcZuS-7yM": { - "title": "@else", - "description": "While the `@if` block can be helpful in many situations, it's common to also show fallback UI when the condition is not met. When you need a fallback, similar to JavaScript's else clause, add an `@else` block to accomplish the same effect.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Angular Official Docs - @else block", - "url": "https://angular.dev/guide/templates/control-flow#conditionally-display-content-with-if-else-if-and-else", - "type": "article" - }, - { - "title": "Angular If Else Control Flow Blocks Explained", - "url": "https://ultimatecourses.com/blog/angular-if-else-control-flow-blocks-explained", - "type": "article" - } - ] - }, - "ys5untkSppGMFK-VsfuRt": { - "title": "@else if", - "description": "With the new control flow syntax, you gain `@else if` conditional blocks, something that is not possible with `@ngIf`. This addition makes the control flow syntax close to what we would write with just plain JavaScript.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "@if", - "url": "https://angular.dev/api/core/@if", - "type": "article" - }, - { - "title": "Angular @if: Complete Guide", - "url": "https://blog.angular-university.io/angular-if/", - "type": "article" - } - ] - }, - "2kYS9w1UzQFZ1zhf01m9L": { - "title": "@for", - "description": "The @for block repeatedly renders content of a block for each item in a collection.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "@for", - "url": "https://angular.dev/api/core/@for", - "type": "article" - } - ] - }, - "nZuim4Fjq6jYOXcRTAEay": { - "title": "@switch", - "description": "The `@switch` blocks displays content selected by one of the cases matching against the conditional expression. The value of the conditional expression is compared to the case expression using the `===` operator. `@switch` does not have fallthrough, so you do not need an equivalent to a break or return statement.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "@switch", - "url": "https://angular.dev/guide/templates/control-flow#switch-block---selection", - "type": "article" - }, - { - "title": "Angular @switch: Complete Guide", - "url": "https://blog.angular-university.io/angular-switch/", - "type": "article" - } - ] - }, - "cHC2MH50CbUSMRZV4QGJI": { - "title": "@case", - "description": "If no `@case` matches the `@switch` condition and there is no `@default` block, nothing is shown. Otherwise, the content inside the `@case` that matches the condition will be displayed.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "@switch", - "url": "https://angular.dev/guide/templates/control-flow#switch-block---selection", - "type": "article" - }, - { - "title": "Angular @switch: Complete Guide", - "url": "https://blog.angular-university.io/angular-switch/", - "type": "article" - } - ] - }, - "h4MMn0_qUN3YXEdMUJOyd": { - "title": "@default", - "description": "The `@default` clause is used to render a template when none of the `@case` blocks matches the value of the `@switch` conditional. `@default` is optional and can be omitted.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "@switch", - "url": "https://angular.dev/api/core/@switch#description", - "type": "article" - }, - { - "title": "Angular @switch: Complete Guide", - "url": "https://blog.angular-university.io/angular-switch/", - "type": "article" - } - ] - }, - "AwOM0ucg6W7TohdUd7KWT": { - "title": "@let", - "description": "@let allows you to define a local variable and re-use it across the template.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "@let", - "url": "https://angular.dev/api/core/@let", - "type": "article" - }, - { - "title": "Angular's next feature let syntax", - "url": "https://nhannguyendevjs.medium.com/angulars-next-feature-let-syntax-afba6354112b", - "type": "article" - } - ] - }, - "ONy-0olujU_FGZM7Wvfr2": { - "title": "@defer", - "description": "A type of block that can be used to defer load the JavaScript for components, directives and pipes used inside a component template.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "@defer", - "url": "https://angular.dev/api/core/@defer", - "type": "article" - }, - { - "title": "Angular Defer Complete Guide", - "url": "https://blog.angular-university.io/angular-defer/", - "type": "article" - }, - { - "title": "How to use Angular's defer block to improve performance", - "url": "https://angular.love/en/how-to-use-angulars-defer-block-to-improve-performance", - "type": "article" - } - ] - }, - "j99WQxuTzGeBBVoReDp_y": { - "title": "Pipes", - "description": "Use pipes to transform strings, currency amounts, dates, and other data for display. Pipes are simple functions to use in template expressions to accept an input value and return a transformed value. Pipes are useful because you can use them throughout your application , some common pipes are `DatePipe` | `UpperCasePipe` | `LowerCasePipe` | `CurrencyPipe` | `DecimalPipe` | `PercentPipe`\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Understanding Pipes", - "url": "https://angular.dev/tutorials/learn-angular/22-pipes", - "type": "article" - }, - { - "title": "BuiltIn Pipes - examples", - "url": "https://codecraft.tv/courses/angular/pipes/built-in-pipes/", - "type": "article" - } - ] - }, - "_-mTs_FMeob-ZGK-bb3j-": { - "title": "Change Detection", - "description": "Change detection is the process through which Angular checks to see whether your application state has changed, and if any DOM needs to be updated. At a high level, Angular walks your components from top to bottom, looking for changes. Angular runs its change detection mechanism periodically so that changes to the data model are reflected in an application’s view. Change detection can be triggered either manually or through an asynchronous event\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Runtime Performance Optimization", - "url": "https://angular.dev/best-practices/runtime-performance", - "type": "article" - }, - { - "title": "ChangeDetectionStrategy", - "url": "https://angular.dev/guide/components/advanced-configuration#changedetectionstrategy", - "type": "article" - }, - { - "title": "4 Runtime Performance Optimizations ( Change detection )", - "url": "https://www.youtube.com/watch?v=f8sA-i6gkGQ", - "type": "video" - } - ] - }, - "i2taHzQ5KLHjkkpbH4Ytd": { - "title": "Common Pipes", - "description": "Angular provides built-in pipes for typical data transformations, including transformations for internationalization (i18n), which use locale information to format data. The following are commonly used built-in pipes for data formatting:\n\n* DatePipe: Formats a date value according to locale rules.\n* UpperCasePipe: Transforms text to all upper case.\n* LowerCasePipe: Transforms text to all lower case.\n* CurrencyPipe: Transforms a number to a currency string, formatted according to locale rules.\n* DecimalPipe: Transforms a number into a string with a decimal point, formatted according to locale rules.\n* PercentPipe: Transforms a number to a percentage string, formatted according to locale rules.\n* AsyncPipe: Subscribe and unsubscribe to an asynchronous source such as an observable.\n* JsonPipe: Display a component object property to the screen as JSON for debugging.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Pipes", - "url": "https://angular.dev/guide/pipes", - "type": "article" - }, - { - "title": "Pipes in Angular", - "url": "https://medium.com/@aqeelabbas3972/pipes-in-angular-6a871589299d", - "type": "article" - } - ] - }, - "nZxZnzbQg9dz-SI65UHq9": { - "title": "Pipes Precedence", - "description": "The pipe operator has a higher precedence than the JavaScript ternary operator.\n\nYou should always use parentheses to be sure Angular evaluates the expression as you intend.\n\n (condition ? a : b) | pipe\n \n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Precedence", - "url": "https://angular.dev/guide/pipes/precedence", - "type": "article" - }, - { - "title": "What is the precedence between pipe and ternary operators?", - "url": "https://iq.js.org/questions/angular/what-is-the-precedence-between-pipe-and-ternary-operators", - "type": "article" - } - ] - }, - "BOYXGfULJRiP-XOo_lNX3": { - "title": "Custom Pipes", - "description": "Pipes to transform strings, currency amounts, dates, and other data for display. Pipes are simple functions in template expressions to accept an input value and return a transformed value. Pipes are helpful because you can use them throughout your application while only declaring each pipe once. For example, you would use a pipe to show the date as April 15, 1988, rather than the raw string format.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Custom Pipes for New Transforms", - "url": "https://angular.dev/guide/pipes/transform-data", - "type": "article" - }, - { - "title": "Create a custom pipe video for Beginners", - "url": "https://www.youtube.com/watch?v=P2587FN4Y0w", - "type": "video" - } - ] - }, - "kGzlumFdZFxTRZ3HnCGFO": { - "title": "Directives", - "description": "SKDirectives are classes that add additional behavior to elements in your Angular applications. Use Angular's built-in directives to manage forms, lists, styles, and what users see.\n\n`NgClass` Adds and removes a set of CSS classes. | `NgStyle` Adds and removes a set of HTML styles. | `NgModel` Adds two-way data binding to an HTML form element.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Built-in directives", - "url": "https://angular.dev/guide/directives/", - "type": "article" - }, - { - "title": "BuiltIn Directives Types", - "url": "https://thinkster.io/tutorials/angular-2-directives", - "type": "article" - } - ] - }, - "xk3v8p6vf8ntGj5c-IU4U": { - "title": "Structural Directives", - "description": "Structural directives are directives applied to an `` element that conditionally or repeatedly renders the content of that ``. If you just wrap elements in an `` without applying a structural directive, those elements will not be rendered.\n\nIn Angular, there are three standard structural directives:\n\n* `*ngIf` – conditionally includes a template depending on the value of an expression returned by a Boolean.\n* `*ngFor` – makes it simple to iterate over an array.\n* `*ngSwitch` – renders each matching view.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Structural Directives", - "url": "https://angular.dev/guide/directives/structural-directives", - "type": "article" - }, - { - "title": "Structural Directives in Angular", - "url": "https://medium.com/@eugeniyoz/structural-directives-in-angular-61fe522f3427", - "type": "article" - }, - { - "title": "Angular Structural Directive Patterns: What they are and how to use them", - "url": "https://www.freecodecamp.org/news/angular-structural-directive-patterns-what-they-are-and-how-to-use-them/", - "type": "article" - } - ] - }, - "xvwby0FTdIolRrV2j88fY": { - "title": "Attribute Directives", - "description": "Change the appearance or behavior of DOM elements and Angular components with attribute directives.\n\nThe most used attribute directives are:\n\n* ngClass\n* ngStyle\n* ngModel\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Attribute Directives", - "url": "https://angular.dev/guide/directives/attribute-directives", - "type": "article" - }, - { - "title": "Angular Attribute Directive", - "url": "https://www.scaler.com/topics/angular/angular-attribute-directive/", - "type": "article" - } - ] - }, - "7GUvTMVzfdVEDBOz-tHUT": { - "title": "Custom Directives", - "description": "Directives are the functions that will execute whenever the Angular compiler finds them. Angular Directives enhance the capability of HTML elements by attaching custom behaviors to the DOM.\n\nFrom the core concept, Angular directives are categorized into three categories: Attribute Directives, Structural Directives, and Component Directives.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Built-in directives", - "url": "https://angular.dev/guide/directives#", - "type": "article" - }, - { - "title": "@articleDirectives in Angular: A Beginner's Guide", - "url": "https://medium.com/@drissi.dalanda8/demystifying-directives-in-angular-a-beginners-guide-fdb6e199b80a", - "type": "article" - }, - { - "title": "Create a custom directive video for Beginners", - "url": "https://www.youtube.com/watch?v=AoN56g6UAsE", - "type": "video" - } - ] - }, - "a74v78SvGtWduZpXs7wSq": { - "title": "Routing", - "description": "Routing in Angular allows the users to create a single-page application with multiple views and allows navigation between them.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Angular Routing", - "url": "https://angular.dev/guide/routing", - "type": "article" - }, - { - "title": "Common Routing Tasks", - "url": "https://angular.dev/guide/routing/common-router-tasks", - "type": "article" - } - ] - }, - "dbAS-hN1hoCsNJhkxXcGq": { - "title": "Configuration", - "description": "The configuration of routes in an Angular application involves defining route mappings in an array and providing these routes to the Angular router.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Router Reference - Configuration", - "url": "https://angular.dev/guide/routing/router-reference#configuration", - "type": "article" - }, - { - "title": "Routing Overview", - "url": "https://angular.dev/guide/routing", - "type": "article" - } - ] - }, - "ewbDdPYv2SJl_jW3RVHQs": { - "title": "Lazy Loading", - "description": "Lazy loading is a technique in Angular that allows you to load JavaScript components asynchronously when a specific route is activated. It improves the application load time speed by splitting the application into several bundles. The bundles are loaded as required when the user navigates through the app.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Lazy-loading Feature Modules", - "url": "https://angular.dev/guide/ngmodules/lazy-loading", - "type": "article" - }, - { - "title": "Angular Tutorial - Lazy Loading", - "url": "https://www.youtube.com/watch?v=JjIQq9lh-Bw", - "type": "video" - } - ] - }, - "1ZwdEL0Gx30Vv_Av3ZTGG": { - "title": "Router Outlets", - "description": "The router-outlet is a directive that's available from the @angular/router package and is used by the router to mark where in a template, a matched component should be inserted.\n\nThanks to the router outlet, your app will have multiple views/pages and the app template acts like a shell of your application. Any element, you add to the shell will be rendered in each view, only the part marked by the router outlet will be changed between views.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Router Reference - Router Outlet", - "url": "https://angular.dev/guide/routing/router-reference#router-outlet", - "type": "article" - }, - { - "title": "Router Outlet - API", - "url": "https://angular.dev/api/router/RouterOutlet", - "type": "article" - } - ] - }, - "8lFyuSx4MUcYRY2L8bZrq": { - "title": "Router Links", - "description": "In Angular, routerLink when applied to an element in a template, makes that element a link that initiates navigation to a route. Navigation opens one or more routed components in one or more `` locations on the page.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Router Reference - Router links", - "url": "https://angular.dev/guide/routing/router-reference#router-links", - "type": "article" - }, - { - "title": "Router Link - API", - "url": "https://angular.dev/api/router/RouterLink", - "type": "article" - }, - { - "title": "Angular Router: Navigation Using RouterLink, Navigate, or NavigateByUrl", - "url": "https://www.digitalocean.com/community/tutorials/angular-navigation-routerlink-navigate-navigatebyurl", - "type": "article" - } - ] - }, - "YF_sG292HqawIX0siWhrv": { - "title": "Router Events", - "description": "The Angular Router raises events when it navigates from one route to another route. It raises several events such as `NavigationStart`, `NavigationEnd`, `NavigationCancel`, `NavigationError`, `ResolveStart`, etc. You can listen to these events and find out when the state of the route changes. Some of the useful events are route change start (NavigationStart) and route change end (NavigationEnd).\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Router Reference - Router events", - "url": "https://angular.dev/guide/routing/router-reference#router-events", - "type": "article" - }, - { - "title": "Router Event - API", - "url": "https://angular.dev/api/router/RouterEvent", - "type": "article" - }, - { - "title": "Router events in Angular", - "url": "https://medium.com/@gurunadhpukkalla/router-events-in-angular-3112a3968660", - "type": "article" - } - ] - }, - "PmC4zeaLpa5LoL4FhYXcG": { - "title": "Guards", - "description": "Use route guards to prevent users from navigating to parts of an application without authorization.\n\nAngular route guards are interfaces provided by Angular that, when implemented, allow us to control the accessibility of a route based on conditions provided in function implementation of that interface.\n\nSome types of angular guards are `CanActivate`, `CanActivateChild`, `CanDeactivate`, `CanMatch` and `Resolve`.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Preventing Unauthorized Access", - "url": "https://angular.dev/guide/routing/common-router-tasks#preventing-unauthorized-access", - "type": "article" - }, - { - "title": "Resolve", - "url": "https://angular.dev/api/router/Resolve", - "type": "article" - } - ] - }, - "CpsoIVoCKaZnM_-BbXbCh": { - "title": "Services & Remote Data", - "description": "Services let you define code or functionalities that are then accessible and reusable in many other components in the Angular project. It also helps you with the abstraction of logic and data that is hosted independently but can be shared across other components.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Creating an Injectable Service", - "url": "https://angular.dev/guide/di/creating-injectable-service", - "type": "article" - }, - { - "title": "Service for API Calls", - "url": "https://www.knowledgehut.com/blog/web-development/make-api-calls-angular", - "type": "article" - } - ] - }, - "8u9uHCRt9RU57erBy79PP": { - "title": "Dependency Injection", - "description": "Dependency Injection is one of the fundamental concepts in Angular. DI is wired into the Angular framework and allows classes with Angular decorators, such as Components, Directives, Pipes, and Injectables, to configure dependencies that they need.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Understanding Dependency Injection", - "url": "https://angular.dev/guide/di/dependency-injection", - "type": "article" - }, - { - "title": "DI in Action", - "url": "https://angular.dev/guide/di/di-in-action", - "type": "article" - }, - { - "title": "Explore top posts about Dependency Injection", - "url": "https://app.daily.dev/tags/dependency-injection?ref=roadmapsh", - "type": "article" - } - ] - }, - "Q36LQds8k_cSjijvXyWOM": { - "title": "Forms", - "description": "Forms are used to handle user inputs in many applications. It enables users from entering sensitive information to performing several data entry tasks.\n\nAngular provides two approaches to handle user inputs trough forms: reactive and template-driven forms.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Forms in Angular", - "url": "https://angular.dev/guide/forms", - "type": "article" - }, - { - "title": "Angular Forms Tutorial", - "url": "https://www.youtube.com/watch?v=-bGgjgx3fGs", - "type": "video" - }, - { - "title": "Building Forms in Angular Apps", - "url": "https://www.youtube.com/watch?v=hAaoPOx_oIw", - "type": "video" - } - ] - }, - "1d3Y4HVnqom8UOok-7EEf": { - "title": "Reactive Forms", - "description": "Reactive Forms in angular are those which used to handle the inputs coming from the user. We can define controls by using classes such as FormGroup and FormControl.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Reactive Forms - Angular", - "url": "https://angular.dev/guide/forms/reactive-forms", - "type": "article" - }, - { - "title": "How To Use Reactive Forms in Angular", - "url": "https://www.digitalocean.com/community/tutorials/angular-reactive-forms-introduction", - "type": "article" - }, - { - "title": "Explore top posts about General Programming", - "url": "https://app.daily.dev/tags/general-programming?ref=roadmapsh", - "type": "article" - }, - { - "title": "Reactive Form in Angular", - "url": "https://www.youtube.com/watch?v=8k4ctDmVn7w", - "type": "video" - } - ] - }, - "XC_K1Wahl2ySqOXoym4YU": { - "title": "Typed Forms", - "description": "Since Angular 14, reactive forms are strictly typed by default. You don't have to define extra custom types or add a ton of type annotations to your form declarations to benefit from this extra type safety, as Angular will infer types from the default value of a form control. Non-typed forms are still supported. To use them, you must import the `Untyped` symbols from `@angular/forms`.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Typed Forms", - "url": "https://angular.dev/guide/forms/typed-forms", - "type": "article" - }, - { - "title": "Angular Strictly Typed Forms (Complete Guide)", - "url": "https://blog.angular-university.io/angular-typed-forms/", - "type": "article" - }, - { - "title": "Getting started with Typed Reactive Forms in Angular", - "url": "https://www.youtube.com/watch?v=mT3UR0TdDnU", - "type": "video" - }, - { - "title": "Angular TYPED Forms: Are You Using Them Correctly?", - "url": "https://www.youtube.com/watch?v=it2BZoIvBPc", - "type": "video" - }, - { - "title": "Knowing this makes Angular typed forms WAY less awkward", - "url": "https://www.youtube.com/watch?v=xpRlijg6spo", - "type": "video" - } - ] - }, - "uDx4lPavwsJFBMzdQ70CS": { - "title": "Template-driven Forms", - "description": "A Template driven form is the simplest form we can build in Angular. It is mainly used for creating simple form application.\n\nIt uses two-way data-binding (ngModel) to create and handle the form components.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Building a Template-driven Form", - "url": "https://angular.dev/guide/forms/template-driven-forms", - "type": "article" - }, - { - "title": "Template-Driven Forms", - "url": "https://codecraft.tv/courses/angular/forms/template-driven/", - "type": "article" - }, - { - "title": "Template driven form", - "url": "https://www.youtube.com/watch?v=whr14XxB8-M", - "type": "video" - }, - { - "title": "Template driven form Validations", - "url": "https://www.youtube.com/watch?v=cVd4ZCIXprs", - "type": "video" - } - ] - }, - "CpufN6DAOj5UNab9vnH0k": { - "title": "Dynamic Forms", - "description": "Dynamic forms in Angular are a flexible way to create forms where the structure (such as form fields and validation rules) is generated at runtime, rather than being hardcoded. By using Angular's `FormBuilder` and `FormGroup`, you can dynamically add, remove, or modify form controls based on user input, data fetched from a server, or other logic. This approach allows for creating complex forms that can adapt to different user scenarios, reducing the need for multiple form templates and making the codebase more maintainable and scalable.\n\nLearn more from the following resources:", - "links": [ - { - "title": "Dynamic Forms Documentation", - "url": "https://angular.dev/guide/forms/dynamic-forms", - "type": "article" - }, - { - "title": "Create a Dynamic Reactive Angular Form with JSON", - "url": "https://www.youtube.com/watch?v=ByHw_RMjkKM", - "type": "video" - } - ] - }, - "kxRtLsB3y_th8j-HjmJgK": { - "title": "Custom Validators", - "description": "Custom validators in Angular are functions that allow you to define your own validation logic for form controls. They are used when the built-in validators (like `required`, `minLength`, etc.) do not meet your specific validation requirements. A custom validator is a function that returns either `null` if the form control is valid, or an object that represents the validation error if it is invalid. This object typically contains a key-value pair where the key is the error name and the value is a boolean or some details about the error.\n\nLearn more from the following resources:", - "links": [ - { - "title": "Defining custom validators", - "url": "https://angular.dev/guide/forms/form-validation#defining-custom-validators", - "type": "article" - }, - { - "title": "How to create custom validator in Angular 17", - "url": "https://youtu.be/3TwmS0Gdg9I?si=1w4EX-HifJ70-CxT", - "type": "video" - } - ] - }, - "m5dgKgUR3ZqI9sBAzToev": { - "title": "Control Value Accessor", - "description": "Defines an interface that acts as a bridge between the Angular forms API and a native element in the DOM. Implement this interface to create a custom form control directive that integrates with Angular forms.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "ControlValueAccessor", - "url": "https://angular.dev/api/forms/ControlValueAccessor", - "type": "article" - }, - { - "title": "Mastering Angular Control Value Accessor: A guide for Angular Developer", - "url": "https://hackernoon.com/mastering-angular-control-value-accessor-a-guide-for-angular-developer", - "type": "article" - }, - { - "title": "Angular Custom Form Controls", - "url": "https://blog.angular-university.io/angular-custom-form-controls/", - "type": "article" - } - ] - }, - "8UY0HAvjY7bdbFpt-MM1u": { - "title": "HTTP Client", - "description": "Most front-end applications need to communicate with a server over the HTTP protocol, to download or upload data and access other back-end services. Angular provides a client HTTP API for Angular applications, the `HttpClient` service class in `@angular/common/http`.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "HTTP Client", - "url": "https://angular.dev/guide/http", - "type": "article" - }, - { - "title": "Angular HTTP Client - Quickstart Guide", - "url": "https://blog.angular-university.io/angular-http/", - "type": "article" - }, - { - "title": "Using HTTP Client in modern Angular applications", - "url": "https://www.thisdot.co/blog/using-httpclient-in-modern-angular-applications", - "type": "article" - } - ] - }, - "AKPhbg10xXjccO7UBh5eJ": { - "title": "Setting Up the Client", - "description": "Before you can use `HttpClient` in your app, you must configure it using dependency injection. `HttpClient` is provided using the `provideHttpClient` helper function, which most apps include in the application providers in `app.config.ts`. If your app is using NgModule-based bootstrap instead, you can include `provideHttpClient` in the providers of your app's `NgModule`.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Setting up HttpClient", - "url": "https://angular.dev/guide/http/setup", - "type": "article" - }, - { - "title": "Setting up HttpClient in Angular (NgModule)", - "url": "https://www.youtube.com/watch?v=hBFtim1vO3M", - "type": "video" - } - ] - }, - "HjGAv3aV-p4ijYJ8XYIw3": { - "title": "Making Requests", - "description": "`HttpClient` has methods corresponding to the different HTTP verbs used to make requests, both to load data and to apply mutations on the server. Each method returns an RxJS `Observable` which, when subscribed, sends the request and then emits the results when the server responds.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Making Requests", - "url": "https://angular.dev/guide/http/making-requests", - "type": "article" - }, - { - "title": "How to make HTTP request in Angular 18", - "url": "https://www.youtube.com/watch?v=3vQpYKlHmS0", - "type": "video" - } - ] - }, - "xG7iSVOGcbxJbNv3xbNfc": { - "title": "Writing Interceptors", - "description": "Interceptors are middleware that allows common patterns around retrying, caching, logging, and authentication to be abstracted away from individual requests.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Interceptors", - "url": "https://angular.dev/guide/http/interceptors", - "type": "article" - }, - { - "title": "Angular Interceptor", - "url": "https://www.scaler.com/topics/angular/angular-interceptor/", - "type": "article" - }, - { - "title": "Interceptors in Angular", - "url": "https://www.youtube.com/watch?v=w1_AmHv2LmA", - "type": "video" - } - ] - }, - "lfp7PIjwITU5gBITQdirD": { - "title": "RxJS Basics", - "description": "Reactive Extensions for JavaScript, or RxJS, is a reactive library used to implement reactive programming to deal with async implementation, callbacks, and event-based programs.\n\nThe reactive paradigm can be used in many different languages through the use of reactive libraries. These libraries are downloaded APIs that provide functionalities for reactive tools like observers and operators. It can be used in your browser or with Node.js.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Learn RxJS", - "url": "https://www.learnrxjs.io/", - "type": "article" - }, - { - "title": "RxJs and Observables for Beginners: A Beginner Friendly Introduction", - "url": "https://blog.angular-university.io/functional-reactive-programming-for-angular-2-developers-rxjs-and-observables/", - "type": "article" - }, - { - "title": "Beginner's RxJS Tutorial: Dive Deep with RxJS Crash Course!", - "url": "https://www.youtube.com/watch?v=yJdh1_FbtjU", - "type": "video" - } - ] - }, - "krXA6ua7E3m4IIpFkgQZe": { - "title": "Observable Pattern", - "description": "The observer pattern is a software design pattern in which an object, named the subject, maintains a list of its dependents, called observers, and notifies them automatically of any state changes, usually by calling one of their methods.\n\nAngular uses the Observer pattern which simply means — Observable objects are registered, and other objects observe (in Angular using the subscribe method) them and take action when the observable object is acted on in some way.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Angular and Observable", - "url": "https://medium.com/fuzzycloud/angular-and-observable-4bf890b2a282", - "type": "article" - } - ] - }, - "b06Y5YrqBbHhWkK6Ws_1c": { - "title": "Observable Lifecycle", - "description": "An observable is a function that acts as a wrapper for a data stream. They support to pass messages inside your application. An observable is useless until an observer subscribes to it. An observer is an object which consumes the data emitted by the observable. An observer keeps receiving data values from the observable until the observable is completed, or the observer unsubscribes from the observable. Otherwise observers can receive data values from the observable continuously and asynchronously. So we can perform various operations such as updating the user interface, or passing the JSON response.\n\nThere are 4 stages for a life cycle of an observable.\n\n* Creation\n* Subscription\n* Execution\n* Destruction\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Understanding Observable LifeCycle", - "url": "https://medium.com/analytics-vidhya/understanding-rxjs-observables-ad5b34d9607f", - "type": "article" - } - ] - }, - "e1ZmmxPZuogCNgtbPPWmd": { - "title": "RxJS vs Promises", - "description": "In a nutshell, the main differences between the Promise and the Observable are as follows:\n\n* The Promise is eager, whereas the Observable is lazy,\n* The Promise is always asynchronous, while the Observable can be either asynchronous or synchronous,\n* The Promise can provide a single value, whereas the Observable is a stream of values (from 0 to multiple values), you can apply RxJS operators to the Observable to get a new tailored stream.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Why RxJS? RxJS vs Promises", - "url": "https://javascript.plainenglish.io/why-rxjs-rxjs-vs-promises-b28962771d68", - "type": "article" - }, - { - "title": "Explore top posts about RxJS", - "url": "https://app.daily.dev/tags/rxjs?ref=roadmapsh", - "type": "article" - } - ] - }, - "ihsjIcF0tkhjs56458teE": { - "title": "Operators", - "description": "RxJS operators are crucial functions that enable the declarative composition of complex asynchronous code. There are two primary types: **Pipeable Operators**, such as `filter()` or `mergeMap()`, which are chained using the `observableInstance.pipe()` method to transform an existing Observable into a new one without altering the original; and **Creation Operators**, like `of()` or `interval()`, which are standalone functions used to generate new Observables from scratch. This modular approach to handling asynchronous data streams greatly enhances code readability and maintainability.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "List of Creation Operators", - "url": "https://www.learnrxjs.io/learn-rxjs/operators/creation", - "type": "article" - }, - { - "title": "Full RxJS Operators Documentation", - "url": "https://www.learnrxjs.io/learn-rxjs/operators/", - "type": "article" - } - ] - }, - "nxUbl0eu3LsSL-Z8X6nP5": { - "title": "Filtering", - "description": "RxJS provides a variety of filtering operators that you can use to filter and transform the data in a stream. You can use these operators in combination with other RxJS operators to create powerful and efficient data processing pipelines.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "RxJS Operators", - "url": "https://www.learnrxjs.io/learn-rxjs/operators", - "type": "article" - }, - { - "title": "Understanding RxJS Operators", - "url": "https://medium.com/@madhavi792002/understanding-rxjs-operators-a-friendly-guide-to-reactive-programming-part-1-305dbc0c6e72", - "type": "article" - } - ] - }, - "bJbbayFQ9WSJT9-qy0H5l": { - "title": "Rate Limiting", - "description": "Rate limiting in RxJS refers to the practice of restricting the rate at which events or data can be emitted from an observable. This can be useful in situations where the rate of incoming data is higher than the rate at which it can be processed, or where there are limits on the number of requests that can be made to a server. There are a few different operators in RxJS that can be used for rate limiting, such as throttleTime and sampleTime. These operators can be used to limit the rate of emissions from an observable by discarding emissions that occur too frequently. Another operator is auditTime it emits the last value from the source Observable during periodic time windows.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "throttleTime", - "url": "https://www.learnrxjs.io/learn-rxjs/operators/filtering/throttletime", - "type": "article" - }, - { - "title": "auditTime", - "url": "https://www.learnrxjs.io/learn-rxjs/operators/filtering/audittime", - "type": "article" - }, - { - "title": "Blogs and Tutorials on RxJS", - "url": "https://blog.angular-university.io/functional-reactive-programming-for-angular-2-developers-rxjs-and-observables/", - "type": "article" - } - ] - }, - "kdMJHljMzGA3oRlh8Zvos": { - "title": "Transformation", - "description": "In RxJS, \"transformation\" refers to the process of modifying or manipulating the data emitted by an Observable. There are a variety of methods available in RxJS that can be used to transform the data emitted by an Observable, including:\n\n* **map**: applies a function to each item emitted by the Observable and emits the resulting value\n* **mergeMap**: applies a function to each item emitted by the Observable, and then merges the resulting Observables into a single Observable\n* **switchMap**: applies a function to each item emitted by the Observable, and then switches to the latest resulting Observable\n* **concatMap**: applies a function to each item emitted by the Observable, and then concatenates the resulting Observables into a single Observable\n* **exhaustMap**: applies a function to each item emitted by the Observable, but ignores subsequent emissions until the current Observable completes\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "The RxJS Library", - "url": "https://v17.angular.io/guide/rx-library", - "type": "article" - }, - { - "title": "Merge", - "url": "https://www.learnrxjs.io/learn-rxjs/operators/combination/merge", - "type": "article" - }, - { - "title": "Concat", - "url": "https://www.learnrxjs.io/learn-rxjs/operators/combination/concat", - "type": "article" - }, - { - "title": "Zip", - "url": "https://www.learnrxjs.io/learn-rxjs/operators/combination/zip", - "type": "article" - }, - { - "title": "switchMap", - "url": "https://www.learnrxjs.io/learn-rxjs/operators/transformation/switchmap", - "type": "article" - }, - { - "title": "concatMap", - "url": "https://www.learnrxjs.io/learn-rxjs/operators/transformation/concatMap", - "type": "article" - }, - { - "title": "exhaustMap", - "url": "https://www.learnrxjs.io/learn-rxjs/operators/transformation/exhaustMap", - "type": "article" - }, - { - "title": "switchMap vs mergeMap vs concatMap vs exhaustMap practical guide", - "url": "https://youtu.be/40pC5wHowWw", - "type": "video" - } - ] - }, - "IgUHqfVhiGpwxT9tY8O88": { - "title": "Combination", - "description": "RxJS combination operators merge multiple observables into a single one using various strategies. Key operators include: `Merge` (emits items from all sources as they arrive), `Concat` (emits items from sources sequentially, one after another), `Zip` (pairs emissions from sources based on index), `CombineLatest` (emits based on the latest values from all sources whenever any source emits), `WithLatestFrom` (combines the value of one observable with the latest values of others when the first observable emits), and `ForkJoin` (emits the last value from each source only after all sources complete).\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "The RxJS Library", - "url": "https://v17.angular.io/guide/rx-library", - "type": "article" - }, - { - "title": "Merge", - "url": "https://www.learnrxjs.io/learn-rxjs/operators/combination/merge", - "type": "article" - }, - { - "title": "Concat", - "url": "https://www.learnrxjs.io/learn-rxjs/operators/combination/concat", - "type": "article" - }, - { - "title": "Zip", - "url": "https://www.learnrxjs.io/learn-rxjs/operators/combination/zip", - "type": "article" - }, - { - "title": "CombineLatest", - "url": "https://www.learnrxjs.io/learn-rxjs/operators/combination/combineLatest", - "type": "article" - }, - { - "title": "WithLatestFrom", - "url": "https://www.learnrxjs.io/learn-rxjs/operators/combination/withLatestFrom", - "type": "article" - }, - { - "title": "ForkJoin", - "url": "https://www.learnrxjs.io/learn-rxjs/operators/combination/forkJoin", - "type": "article" - } - ] - }, - "u1TG8i145o0RKhOR_5epf": { - "title": "Signals", - "description": "Angular Signals is a system that granularly tracks how and where your state is used throughout an application, allowing the framework to optimize rendering updates.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Signals", - "url": "https://angular.dev/guide/signals", - "type": "article" - }, - { - "title": "Angular Signals Complete Guide", - "url": "https://blog.angular-university.io/angular-signals/", - "type": "article" - }, - { - "title": "Signals Unleashed: The Full Guide", - "url": "https://www.youtube.com/watch?v=6W6gycuhiN0&t=169s", - "type": "video" - } - ] - }, - "KAdtebWvgvMifIwd52yc4": { - "title": "RxJS Interop", - "description": "Angular's `@angular/core/rxjs-interop` package provides useful utilities to integrate Angular Signals with RxJS Observables. Use a `toSignal` function to create a signal that tracks the value of an Observable. Use the `toObservable` utility to create an Observable which tracks the value of a signal.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "RxJS Interop", - "url": "https://angular.dev/guide/signals/rxjs-interop", - "type": "article" - }, - { - "title": "Angular Signals RxJS Interop from a Practical example", - "url": "https://angular.love/en/angular-signals-rxjs-interop-from-a-practical-example", - "type": "article" - }, - { - "title": "Angular Signals RxJs Interoperability: toObservable() (Guess the Behavior)", - "url": "https://www.youtube.com/watch?v=cam39UyVbpI", - "type": "video" - } - ] - }, - "LcJyAfv9hjyUNXUVyPRP4": { - "title": "Inputs as Signals", - "description": "Signal inputs allow values to be bound from parent components. Those values are exposed using a Signal and can change during the lifecycle of your component. Angular supports two variants of inputs: `Optional` and `Required`.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Inputs as Signals", - "url": "https://angular.dev/guide/signals/inputs", - "type": "article" - }, - { - "title": "Angular Signal Inputs: Complete Guide to input()", - "url": "https://blog.angular-university.io/angular-signal-inputs/", - "type": "article" - }, - { - "title": "Angular's New Signal Inputs", - "url": "https://www.youtube.com/watch?v=yjCeaiWXC0U", - "type": "video" - } - ] - }, - "9HS9C3yq9EUcUy0ZUZk_H": { - "title": "Queries as Signals", - "description": "A component or directive can define queries that find child elements and read values from their injectors. Developers most commonly use queries to retrieve references to components, directives, DOM elements, and more. There are two categories of query: view queries and content queries.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Queries as Signals", - "url": "https://angular.dev/guide/signals/queries", - "type": "article" - }, - { - "title": "Querying made easy: exploring Angular's query signals", - "url": "https://netbasal.com/querying-made-easy-exploring-angulars-query-signals-ca850b5db892", - "type": "article" - }, - { - "title": "Angular Signal Queries with the viewChild() and contentChild() functions", - "url": "https://www.youtube.com/watch?v=b35ts9OinBc", - "type": "video" - } - ] - }, - "IeU6ClS_yp6BYKdkQOJVf": { - "title": "Model Inputs", - "description": "Model inputs are a special type of input that enable a component to propagate new values back to another component. Use model inputs in components that exist to modify a value based on user interaction.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Model Inputs", - "url": "https://angular.dev/guide/signals/model", - "type": "article" - }, - { - "title": "Angular Model Inputs: Two-way binding inputs with signals", - "url": "https://www.codemotion.com/magazine/frontend/angular-model-inputs-two-way-binding-inputs-with-signals/", - "type": "article" - }, - { - "title": "Model Inputs: Reactive two-way binding ", - "url": "https://dev.to/this-is-angular/model-inputs-reactive-two-way-binding-2538", - "type": "article" - }, - { - "title": "Angular's New Model Inputs: Two-way Communication with your Child Components", - "url": "https://www.youtube.com/watch?v=frXIBKqzTK0", - "type": "video" - } - ] - }, - "Mqe_s-nwBqAL6X7OGRHEN": { - "title": "State Management", - "description": "Application state management is the process of maintaining knowledge of an application's inputs across multiple related data flows that form a complete business transaction -- or a session -- to understand the condition of the app at any given moment. In computer science, an input is information put into the program by the user and state refers to the condition of an application according to its stored inputs -- saved as variables or constants. State can also be described as the collection of preserved information that forms a complete session.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "What is State Management?", - "url": "https://www.techtarget.com/searchapparchitecture/definition/state-management", - "type": "article" - }, - { - "title": "Angular state management made simple with NgRx", - "url": "https://blog.logrocket.com/angular-state-management-made-simple-with-ngrx/", - "type": "article" - }, - { - "title": "Angular State Management with NgRx", - "url": "https://www.syncfusion.com/blogs/post/angular-state-management-with-ngrx.aspx", - "type": "article" - }, - { - "title": "State Management and the Facade pattern in Angular", - "url": "https://thefullstack.engineer/full-stack-development-series-part-10-state-management-and-the-facade-pattern-in-angular", - "type": "article" - } - ] - }, - "N9ZCPgFnFIUv4jMv1w5qK": { - "title": "NGXS", - "description": "Ngxs is a state management pattern for the Angular framework. It acts as a single source of truth for our application. Ngxs is very simple and easily implementable. It reduce lots of boilerplate code . It is a replacement for Ngrx. In Ngrx we are creating state, action, reducer, and effects but in Ngxs, we are creating only state and actions instead of all of this. Like Ngrx, Ngxs is also asynchronous and when we dispatch any action we can get a response back.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "What is NGXS?", - "url": "https://www.ngxs.io/", - "type": "article" - }, - { - "title": "Details about NGXS", - "url": "https://medium.com/@knoldus/introduction-to-ngxs-state-management-pattern-library-for-angular-ec76f681ceba", - "type": "article" - }, - { - "title": "Practice of NGXS", - "url": "https://www.youtube.com/watch?v=SGj11j4hxmg", - "type": "video" - } - ] - }, - "ir94IdkF1tVAA8ZTD9r0N": { - "title": "NgRx", - "description": "NgRx is a framework for building reactive applications in Angular. NgRx simplifies managing application state by enforcing unidirectional data flow and providing tools like NgRx Store, NgRx Effects, NgRx Router Store, NgRx Signals, NgRx Entity, and NgRx Operators.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "What is NGRX?", - "url": "https://ngrx.io/docs", - "type": "article" - }, - { - "title": "Angular NgRx Store and Effects Crash Course", - "url": "https://blog.angular-university.io/angular-ngrx-store-and-effects-crash-course/", - "type": "article" - }, - { - "title": "Angular State Management with NgRx", - "url": "https://www.youtube.com/watch?v=a3_GW3RBqn0", - "type": "video" - }, - { - "title": "Angular NgRx Signal Store Crash Course (For NgRx Beginners)", - "url": "https://www.youtube.com/watch?v=HqxY0JPlh54", - "type": "video" - }, - { - "title": "NgRx Best Practices", - "url": "https://youtu.be/yYiO-kjmLAc?si=7J_JkOdbyocfb5m_", - "type": "video" - }, - { - "title": "Angular Course with NgRx - Building Angular Project From Scratch", - "url": "https://www.youtube.com/watch?v=vcfZ0EQpYTA", - "type": "video" - } - ] - }, - "rgPUcSKxG9DvXicLfC2Ay": { - "title": "Elf", - "description": "Elf is a reactive immutable state management solution built on top of RxJS. It uses custom RxJS operators to query the state and pure functions to update it. Elf encourages simplicity. It saves you the hassle of creating boilerplate code and offers powerful tools with a moderate learning curve, suitable for experienced and inexperienced developers alike.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Elf", - "url": "https://github.com/ngneat/elf", - "type": "opensource" - }, - { - "title": "Elf NG Router Store", - "url": "https://github.com/ngneat/elf-ng-router-store", - "type": "opensource" - }, - { - "title": "What is Elf?", - "url": "https://ngneat.github.io/elf/", - "type": "article" - }, - { - "title": "Case Study: Elf", - "url": "https://medium.com/@gronichomer/case-study-elf-%EF%B8%8F%EF%B8%8F-part-1-fe5e87c31c89", - "type": "article" - } - ] - }, - "m4WBnx_9h01Jl6Q1sxi4Y": { - "title": "Zones", - "description": "Zone.js is a signaling mechanism that Angular uses to detect when an application state might have changed. It captures asynchronous operations like setTimeout, network requests, and event listeners. Angular schedules change detection based on signals from Zone.js.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Resolving Zone Pollution", - "url": "https://angular.dev/best-practices/zone-pollution", - "type": "article" - }, - { - "title": "Angular without ZoneJS (Zoneless)", - "url": "https://angular.dev/guide/experimental/zoneless", - "type": "article" - }, - { - "title": "NgZone - API", - "url": "https://angular.dev/api/core/NgZone", - "type": "article" - }, - { - "title": "WTF is \"Zone.js\" and is it making your app slow?", - "url": "https://www.youtube.com/watch?v=lmrf_gPIOZU", - "type": "video" - } - ] - }, - "1x5pT607aKE-S-NCWB810": { - "title": "Zoneless Applications", - "description": "Angular 18 introduced an experimental feature called zoneless change detection. This technology removes the need for `Zone.js`, a library that was previously used for change detection in Angular from the beginning. By eliminating `Zone.js`, Angular applications can have faster initial renders, smaller bundle sizes, and simpler debugging.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Zoneless", - "url": "https://angular.dev/guide/experimental/zoneless", - "type": "article" - }, - { - "title": "Zoneless Change Detection Angular 18", - "url": "https://blog.logrocket.com/zoneless-change-detection-angular-18/", - "type": "article" - }, - { - "title": "From Zone JS to Zoneless Angular and back: How it all works", - "url": "https://angularindepth.com/posts/1513/from-zone-js-to-zoneless-angular-and-back-how-it-all-works", - "type": "article" - }, - { - "title": "A New Era for Angular: Zoneless Change Detection", - "url": "https://justangular.com/blog/a-new-era-for-angular-zoneless-change-detection", - "type": "article" - }, - { - "title": "Zoneless Angular Applications in V18", - "url": "https://www.youtube.com/watch?v=MZ6s5EL7hKk", - "type": "video" - }, - { - "title": "I tested Angular 18 Zoneless mode and this is what I found out!", - "url": "https://www.youtube.com/watch?v=vHNeAqgNM4o", - "type": "video" - } - ] - }, - "EbJib-XfZFF9bpCtL3aBs": { - "title": "Developer Tools", - "description": "Angular offers a suite of powerful developer tools designed to streamline and enhance the development process. These include the Angular CLI for efficient project setup and management, the Angular DevTools extension for advanced debugging and profiling, and the Angular Language Service for improved code editing and completion. Leveraging these tools will significantly improve your ability to write high-quality Angular code.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "VS Code NG Language Service", - "url": "https://github.com/angular/vscode-ng-language-service", - "type": "opensource" - }, - { - "title": "Angular Devtools", - "url": "https://angular.dev/tools/devtools", - "type": "article" - }, - { - "title": "Angular CLI", - "url": "https://angular.dev/tools/cli", - "type": "article" - }, - { - "title": "Language Service Docs", - "url": "https://angular.dev/tools/language-service", - "type": "article" - } - ] - }, - "4YSk6I63Ew--zoXC3xmrC": { - "title": "Angular CLI", - "description": "The Angular CLI is a command-line interface tool that you use to initialize, develop, scaffold, and maintain Angular applications directly from a command shell. we can install angular latest CLI using the following command:\n\n`npm install -g @angular/cli`\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "CLI Reference", - "url": "https://angular.dev/cli", - "type": "article" - }, - { - "title": "The Angular CLI", - "url": "https://angular.dev/tools/cli", - "type": "article" - }, - { - "title": "Explore top posts about Angular", - "url": "https://app.daily.dev/tags/angular?ref=roadmapsh", - "type": "article" - }, - { - "title": "Angular CLI - Setup", - "url": "https://www.youtube.com/watch?v=mZnzX3J5XKI", - "type": "video" - } - ] - }, - "FVH0lnbIZ2m5EfF2EJ2DW": { - "title": "Local Setup", - "description": "To install Angular CLI on your local system, you need to install `Node.js`. Angular requires an active LTS or maintenance LTS version of Node. Angular CLI uses Node and its associated package manager, npm, to install and run JavaScript tools outside the browser. Once you have Node installed, you can run `npm install -g @angular/cli` to install the Angular CLI.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Local set-up", - "url": "https://angular.dev/tools/cli/setup-local", - "type": "article" - }, - { - "title": "Version Compatibility Guide", - "url": "https://angular.dev/reference/versions", - "type": "article" - }, - { - "title": "How To Install Angular CLI In Windows 10 | In Under 2 Minutes!", - "url": "https://www.youtube.com/watch?v=vjgACKkPENg", - "type": "video" - }, - { - "title": "How to Install Multiple Versions of Angular in Your Development Environment", - "url": "https://www.youtube.com/watch?v=LYNG3kcKRQ8", - "type": "video" - } - ] - }, - "1fVi9AK6aLjt5QgAFbnGX": { - "title": "Deployment", - "description": "The Angular CLI command `ng deploy` executes the deploy CLI builder associated with your project. A number of third-party builders implement deployment capabilities to different platforms. You can add any of them to your project with `ng add`.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "NGX AWS Deploy", - "url": "https://github.com/Jefiozie/ngx-aws-deploy", - "type": "opensource" - }, - { - "title": "Angular CLI GitHub Pages", - "url": "https://github.com/angular-schule/angular-cli-ghpages", - "type": "opensource" - }, - { - "title": "Deployment", - "url": "https://angular.dev/tools/cli/deployment", - "type": "article" - }, - { - "title": "Firebase Hosting", - "url": "https://firebase.google.com/docs/hosting", - "type": "article" - }, - { - "title": "Vercel: Angular Solutions", - "url": "https://vercel.com/solutions/angular", - "type": "article" - }, - { - "title": "Netlify", - "url": "https://docs.netlify.com/frameworks/angular/", - "type": "article" - }, - { - "title": "Cloudflare Pages", - "url": "https://developers.cloudflare.com/pages/framework-guides/deploy-an-angular-site/#create-a-new-project-using-the-create-cloudflare-cli-c3", - "type": "article" - }, - { - "title": "AWS Amplify", - "url": "https://docs.amplify.aws/angular/", - "type": "article" - } - ] - }, - "yhNGhduk__ow8VTLc6inZ": { - "title": "End-to-End Testing", - "description": "End-to-end or (E2E) testing is a form of testing used to assert your entire application works as expected from start to finish or \"end-to-end\". E2E testing differs from unit testing in that it is completely decoupled from the underlying implementation details of your code. It is typically used to validate an application in a way that mimics the way a user would interact with it. The `ng e2e` command will first check your project for the \"e2e\" target. If it can't locate it, the CLI will then prompt you which e2e package you would like to use and walk you through the setup.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "End to End Testing", - "url": "https://angular.dev/tools/cli/end-to-end", - "type": "article" - }, - { - "title": "Your First Test with Cypress", - "url": "https://docs.cypress.io/guides/end-to-end-testing/writing-your-first-end-to-end-test", - "type": "article" - }, - { - "title": "Writing Tests: Introduction", - "url": "https://nightwatchjs.org/guide/writing-tests/introduction.html", - "type": "article" - }, - { - "title": "Getting Started", - "url": "https://webdriver.io/docs/gettingstarted/", - "type": "article" - }, - { - "title": "Puppeteer Angular Schematic", - "url": "https://pptr.dev/guides/ng-schematics/#getting-started", - "type": "article" - } - ] - }, - "Uvr0pRk_fOzwRwqn0dQ6N": { - "title": "Schematics", - "description": "A schematic is a template-based code generator that supports complex logic. It is a set of instructions for transforming a software project by generating or modifying code.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Generating Code using Schematics", - "url": "https://angular.dev/tools/cli/schematics", - "type": "article" - }, - { - "title": "Angular Blog", - "url": "https://blog.angular.io/schematics-an-introduction-dc1dfbc2a2b2?gi=ad9571373944", - "type": "article" - } - ] - }, - "Ax-s_xw3FO3Ocv-AnLbQD": { - "title": "Build Environments", - "description": "You can define different named build configurations for your project, such as `development` and `production`, with different defaults. Each named configuration can have defaults for any of the options that apply to the various builder targets, such as `build`, `serve`, and `test`. The Angular CLI can replace files for each environment if you pass a `--configuration` flag with the named configuration when running a CLI command.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Build Environments", - "url": "https://angular.dev/tools/cli/environments#using-environment-specific-variables-in-your-app", - "type": "article" - }, - { - "title": "Building an Angular application in various environments using Angular CLI and server", - "url": "https://medium.com/yavar/building-an-angular-application-in-various-environments-using-angular-cli-and-server-18f94067154b", - "type": "article" - } - ] - }, - "TeWEy9I-hU6SH02Sy2S2S": { - "title": "CLI Builders", - "description": "A number of Angular CLI commands run a complex process on your code, such as building, testing, or serving your application. The commands use an internal tool called `Architect` to run CLI builders, which invoke another tool (bundler, test runner, server) to accomplish the desired task. Custom builders can perform an entirely new task or to change which third-party tool is used by an existing command.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Angular Builders", - "url": "https://github.com/just-jeb/angular-builders", - "type": "opensource" - }, - { - "title": "CLI Builders", - "url": "https://angular.dev/tools/cli/cli-builder", - "type": "article" - }, - { - "title": "Angular Builders – Creating Custom Builder from Scratch", - "url": "https://www.youtube.com/watch?v=QbDkDLnXAZE", - "type": "video" - } - ] - }, - "MwtM1UAIfj4FJ-Y4CKDsP": { - "title": "AoT Compilation", - "description": "Angular applications require a compilation process before they can run in a browser. The Angular ahead-of-time (AOT) compiler converts your Angular HTML and TypeScript code into efficient JavaScript code during the build phase before the browser downloads and runs that code. Compiling your application during the build process provides a faster rendering in the browser.\n\nVisit the following resources to learn to more:", - "links": [ - { - "title": "Angular Compiler Output", - "url": "https://github.com/JeanMeche/angular-compiler-output", - "type": "opensource" - }, - { - "title": "Ahead-of-time (AOT) compilation", - "url": "https://angular.dev/tools/cli/aot-compiler", - "type": "article" - }, - { - "title": "Understanding Angular's ahead of time compliation", - "url": "https://blog.nashtechglobal.com/understanding-angulars-ahead-of-time-aot-compilation/", - "type": "article" - } - ] - }, - "T3MmS3bvMMgCUbOk3ktU7": { - "title": "DevTools", - "description": "Angular DevTools is a browser extension that provides debugging and profiling capabilities for Angular applications. You can view component trees and change detection cycles.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Angular Devtools", - "url": "https://angular.dev/tools/devtools", - "type": "article" - }, - { - "title": "Chrome Web Store", - "url": "https://chromewebstore.google.com/detail/angular-devtools/ienfalfjdbdpebioblfackkekamfmbnh", - "type": "article" - }, - { - "title": "Firefox Add-ons", - "url": "https://addons.mozilla.org/en-US/firefox/addon/angular-devtools/", - "type": "article" - }, - { - "title": "Did you try the LATEST Angular developer tools?", - "url": "https://www.youtube.com/watch?v=tAfe33fVW4w", - "type": "video" - } - ] - }, - "ql7SyxrRmjpiXJ9hQeWPq": { - "title": "Language Service", - "description": "The Angular Language Service provides code editors with a way to get completions, errors, hints, and navigation inside Angular templates (external and in-line). Anytime you open an Angular application for the first time, an installation prompt will occur.\n\nVisit the following links to learn more:", - "links": [ - { - "title": "VS Code NG Language Service", - "url": "https://github.com/angular/vscode-ng-language-service", - "type": "opensource" - }, - { - "title": "Language Service Docs", - "url": "https://angular.dev/tools/language-service", - "type": "article" - } - ] - }, - "cl89U8atD6gw5rMGUm4Ix": { - "title": "Libraries", - "description": "Use the Angular CLI and the npm package manager to build and publish your library as an npm package.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Libraries", - "url": "https://angular.dev/tools/libraries/creating-libraries", - "type": "article" - }, - { - "title": "Angular CLI", - "url": "https://angular.dev/tools/cli", - "type": "article" - } - ] - }, - "YHV5oFwLwphXf1wJTDZuG": { - "title": "Using Libraries", - "description": "Libraries are published as `npm packages`, usually together with schematics that integrate them with the Angular CLI. To integrate reusable library code into an application, you need to install the package and import the provided functionality in the location you use it. For most published Angular libraries, use the `ng add ` Angular CLI command. A published library typically provides a `README` file or other documentation on how to add that library to your application. A library is able to be updated by the publisher, and also has individual dependencies which need to be kept current. To check for updates to your installed libraries, use the `ng update` Angular CLI command.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Using Libraries", - "url": "https://angular.dev/tools/libraries/using-libraries", - "type": "article" - }, - { - "title": "npm", - "url": "https://www.npmjs.com/", - "type": "article" - } - ] - }, - "A1mYMg7cbcj6p_VkDf-Tz": { - "title": "Creating Libraries", - "description": "If you have developed features that are suitable for reuse, you can create your own libraries. These libraries can be used locally in your workspace, or you can publish them as npm packages to share with other projects or other Angular developers. Putting code into a separate library is more complex than simply putting everything in one application. It requires more of an investment in time and thought for managing, maintaining, and updating the library. This complexity can pay off when the library is being used in multiple applications.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "NG Packagr", - "url": "https://github.com/ng-packagr/ng-packagr", - "type": "opensource" - }, - { - "title": "Creating Libraries", - "url": "https://angular.dev/tools/libraries/creating-libraries", - "type": "article" - }, - { - "title": "File Structure: Library project files", - "url": "https://angular.dev/reference/configs/file-structure#library-project-files", - "type": "article" - } - ] - }, - "jfHaS8TqE4tcAo59K8Nkn": { - "title": "SSR", - "description": "A normal Angular application executes in the browser, rendering pages in the DOM in response to user actions. Angular Universal executes on the server, generating static application pages that later get bootstrapped on the client. This means that the application generally renders more quickly, giving users a chance to view the application layout before it becomes fully interactive.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Server-side Rendering", - "url": "https://angular.dev/guide/ssr", - "type": "article" - }, - { - "title": "Rendering on the Web", - "url": "https://web.dev/rendering-on-the-web/", - "type": "article" - }, - { - "title": "Explore top posts about Angular", - "url": "https://app.daily.dev/tags/angular?ref=roadmapsh", - "type": "article" - } - ] - }, - "b-0yQ74zHtAxI9aRLBohc": { - "title": "SSG", - "description": "SSG (Static Site Generator) helps in building the HTML full website during the process of building and serving that HTML page. This method helps to generate the HTML website on the client side before it's served on the server side. Therefore, whenever a user requests a HTML page, the HTML page will be rendered, and secondly, the Angular app will be rendered. The SSG can be used only if your website is static or its content doesn't change frequently.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Angular Air with Alyssa Nicoll - SSR, SSG, ISR, & SOS", - "url": "https://www.youtube.com/watch?v=b0pUU7RJbBQ", - "type": "podcast" - }, - { - "title": "Prerendering (SSG)", - "url": "https://angular.dev/guide/prerendering", - "type": "article" - }, - { - "title": "Angular 16 Pre Rendering Static Pages - Static Site Generation SSG", - "url": "https://www.youtube.com/watch?v=vmOWJvm3apA", - "type": "video" - } - ] - }, - "kauQofxCmpktXPcnzid17": { - "title": "AnalogJS", - "description": "AnalogJS is a full-stack meta-framework powered by Vite and Nitro for Angular. Analog supports both Server-Side Rendering (SSR) and Static Site Generation (SSG). Analog uses file-based routing and supports API (server) routes.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "analogjs/analog", - "url": "https://github.com/analogjs/analog", - "type": "opensource" - }, - { - "title": "AnalogJS", - "url": "https://analogjs.org/", - "type": "article" - }, - { - "title": "AnalogJS Documentation", - "url": "https://analogjs.org/docs", - "type": "article" - }, - { - "title": "Robin Goetz - AnalogJS - The Vite powered Angular meta-framework", - "url": "https://www.youtube.com/watch?v=BSgpvP4eAGk", - "type": "video" - }, - { - "title": "Full-stack Angular (SSR, file-based routing, + more) with AnalogJS", - "url": "https://www.youtube.com/watch?v=VSCXOTCJpiI", - "type": "video" - }, - { - "title": "Is AnalogJS good enough for my blog site?", - "url": "https://www.youtube.com/watch?v=xTzEDQULo6s", - "type": "video" - } - ] - }, - "mm6c7GLQEwoQdAHdAYzGh": { - "title": "Security", - "description": "This topic describes Angular's built-in protections against common web-application vulnerabilities and attacks such as cross-site scripting attacks. It doesn't cover application-level security, such as authentication and authorization.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Security", - "url": "https://angular.dev/best-practices/security", - "type": "article" - }, - { - "title": "Open Web Application Security Project (OWASP)", - "url": "https://owasp.org/", - "type": "article" - } - ] - }, - "umUX4Hxk7srHlFR_Un-u7": { - "title": "Cross-site Scripting", - "description": "Cross-site scripting (XSS) enables attackers to inject malicious code into web pages. Such code can then, for example, steal user and login data, or perform actions that impersonate the user. This has been one of the biggest web security vulnerabilities for over a decade. To systematically block XSS bugs, Angular treats all values as untrusted by default. When a value is inserted into the DOM from a template binding, or interpolation, Angular sanitizes and escapes untrusted values.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Preventing cross-site Scripting (XSS)", - "url": "https://angular.dev/best-practices/security#preventing-cross-site-scripting-xss", - "type": "article" - }, - { - "title": "Mitigate cross-site scripting (XSS)", - "url": "https://web.dev/articles/strict-csp", - "type": "article" - } - ] - }, - "cgI9oeUHufA-ky_W1zENe": { - "title": "Sanitization", - "description": "Sanitization is the inspection of an untrusted value, turning it into a value that's safe to insert into the DOM. In many cases, sanitization doesn't change a value at all. Sanitization depends on context: A value that's harmless in CSS is potentially dangerous in a URL.\n\nAngular sanitizes untrusted values for HTML and URLs. Sanitizing resource URLs isn't possible because they contain arbitrary code. In development mode, Angular prints a console warning when it has to change a value during sanitization.\n\nVisit the following resources to learn more:", - "links": [ - { - "title": "Sanitization and Security Contexts", - "url": "https://angular.dev/best-practices/security#sanitization-and-security-contexts", - "type": "article" - } - ] - }, - "XoYSuv1salCCHoI1cJkxv": { - "title": "Trusting Safe Values", - "description": "Sometimes applications genuinely need to include executable code, display an `