Files
litellm/docs/my-website/sidebars.js
Sameer Kankute 558c8f92d1 Merge pull request #17519 from BerriAI/litellm_cursor_integration
Add support for cursor BYOK with its own configuration
2025-12-05 22:23:45 +05:30

868 lines
24 KiB
JavaScript

/**
* Creating a sidebar enables you to:
- create an ordered group of docs
- render a sidebar for each doc of that group
- provide next/previous navigation
The sidebars can be generated from the filesystem, or explicitly defined here.
Create as many sidebars as you want.
*/
// @ts-check
/** @type {import('@docusaurus/plugin-content-docs').SidebarsConfig} */
const sidebars = {
// // By default, Docusaurus generates a sidebar from the docs folder structure
integrationsSidebar: [
{ type: "doc", id: "integrations/index" },
{
type: "category",
label: "Observability",
items: [
{
type: "category",
label: "Contributing to Integrations",
items: [
{
type: "autogenerated",
dirName: "contribute_integration"
}
]
},
{
type: "autogenerated",
dirName: "observability"
}
],
},
{
type: "category",
label: "Guardrails",
items: [
"proxy/guardrails/quick_start",
{
type: "category",
"label": "Contributing to Guardrails",
items: [
"adding_provider/generic_guardrail_api",
"adding_provider/simple_guardrail_tutorial",
"adding_provider/adding_guardrail_support",
]
},
"proxy/guardrails/test_playground",
...[
"proxy/guardrails/aim_security",
"proxy/guardrails/aporia_api",
"proxy/guardrails/azure_content_guardrail",
"proxy/guardrails/bedrock",
"proxy/guardrails/enkryptai",
"proxy/guardrails/ibm_guardrails",
"proxy/guardrails/grayswan",
"proxy/guardrails/lasso_security",
"proxy/guardrails/litellm_content_filter",
"proxy/guardrails/guardrails_ai",
"proxy/guardrails/lakera_ai",
"proxy/guardrails/model_armor",
"proxy/guardrails/noma_security",
"proxy/guardrails/dynamoai",
"proxy/guardrails/openai_moderation",
"proxy/guardrails/pangea",
"proxy/guardrails/pillar_security",
"proxy/guardrails/pii_masking_v2",
"proxy/guardrails/panw_prisma_airs",
"proxy/guardrails/secret_detection",
"proxy/guardrails/custom_guardrail",
"proxy/guardrails/prompt_injection",
"proxy/guardrails/tool_permission",
"proxy/guardrails/zscaler_ai_guard",
"proxy/guardrails/javelin"
].sort(),
],
},
{
type: "category",
label: "Alerting & Monitoring",
items: [
"proxy/alerting",
"proxy/pagerduty",
"proxy/prometheus"
]
},
{
type: "category",
label: "[Beta] Prompt Management",
items: [
"proxy/litellm_prompt_management",
"proxy/custom_prompt_management",
"proxy/native_litellm_prompt",
"proxy/prompt_management"
]
},
{
type: "category",
label: "AI Tools (OpenWebUI, Claude Code, etc.)",
items: [
"tutorials/claude_responses_api",
"tutorials/cost_tracking_coding",
"tutorials/cursor_integration",
"tutorials/github_copilot_integration",
"tutorials/litellm_gemini_cli",
"tutorials/litellm_qwen_code_cli",
"tutorials/openai_codex",
"tutorials/openweb_ui"
]
},
],
// But you can create a sidebar manually
tutorialSidebar: [
{ type: "doc", id: "index" }, // NEW
{
type: "category",
label: "LiteLLM AI Gateway",
link: {
type: "generated-index",
title: "LiteLLM AI Gateway (LLM Proxy)",
description: `OpenAI Proxy Server (LLM Gateway) to call 100+ LLMs in a unified interface & track spend, set budgets per virtual key/user`,
slug: "/simple_proxy",
},
items: [
"proxy/docker_quick_start",
{
"type": "category",
"label": "Config.yaml",
"items": ["proxy/configs", "proxy/config_management", "proxy/config_settings"]
},
{
type: "category",
label: "Setup & Deployment",
items: [
"proxy/quick_start",
"proxy/cli",
"proxy/debugging",
"proxy/error_diagnosis",
"proxy/deploy",
"proxy/health",
"proxy/master_key_rotations",
"proxy/model_management",
"proxy/prod",
"proxy/release_cycle",
],
},
{
"type": "link",
"label": "Demo LiteLLM Cloud",
"href": "https://www.litellm.ai/cloud"
},
{
type: "category",
label: "Admin UI",
items: [
"proxy/ui",
"proxy/admin_ui_sso",
"proxy/custom_root_ui",
"proxy/custom_sso",
"proxy/ai_hub",
"proxy/model_compare_ui",
"proxy/public_teams",
"proxy/self_serve",
"proxy/ui/bulk_edit_users",
"proxy/ui_credentials",
"tutorials/scim_litellm",
{
type: "category",
label: "UI Logs",
items: [
"proxy/ui_logs",
"proxy/ui_logs_sessions"
]
}
],
},
{
type: "category",
label: "Architecture",
items: [
"proxy/architecture",
"proxy/control_plane_and_data_plane",
"proxy/db_deadlocks",
"proxy/db_info",
"proxy/image_handling",
"proxy/jwt_auth_arch",
"proxy/spend_logs_deletion",
"proxy/user_management_heirarchy",
"router_architecture"
],
},
{
type: "link",
label: "All Endpoints (Swagger)",
href: "https://litellm-api.up.railway.app/",
},
"proxy/enterprise",
{
type: "category",
label: "Authentication",
items: [
"proxy/virtual_keys",
"proxy/token_auth",
"proxy/service_accounts",
"proxy/access_control",
"proxy/cli_sso",
"proxy/custom_auth",
"proxy/ip_address",
"proxy/multiple_admins",
],
},
{
type: "category",
label: "Budgets + Rate Limits",
items: [
"proxy/users",
"proxy/team_budgets",
"proxy/tag_budgets",
"proxy/customers",
"proxy/dynamic_rate_limit",
"proxy/rate_limit_tiers",
"proxy/temporary_budget_increase",
],
},
"proxy/caching",
{
type: "category",
label: "Create Custom Plugins",
description: "Modify requests, responses, and more",
items: [
"proxy/call_hooks",
"proxy/rules",
]
},
"proxy/management_cli",
{
type: "link",
label: "Load Balancing, Routing, Fallbacks",
href: "https://docs.litellm.ai/docs/routing-load-balancing",
},
{
type: "category",
label: "Logging, Alerting, Metrics",
items: [
"proxy/dynamic_logging",
"proxy/logging",
"proxy/logging_spec",
"proxy/team_logging",
"proxy/email",
],
},
{
type: "category",
label: "Making LLM Requests",
items: [
"proxy/user_keys",
"proxy/clientside_auth",
"proxy/request_headers",
"proxy/response_headers",
"proxy/forward_client_headers",
"proxy/model_discovery",
],
},
{
type: "category",
label: "Model Access",
items: [
"proxy/model_access_guide",
"proxy/model_access",
"proxy/model_access_groups",
"proxy/team_model_add"
]
},
{
type: "category",
label: "Secret Managers",
items: [
"secret_managers/overview",
"secret_managers/aws_secret_manager",
"secret_managers/aws_kms",
"secret_managers/azure_key_vault",
"secret_managers/cyberark",
"secret_managers/google_secret_manager",
"secret_managers/google_kms",
"secret_managers/hashicorp_vault",
"secret_managers/custom_secret_manager",
"oidc"
]
},
{
type: "category",
label: "Spend Tracking",
items: [
"proxy/cost_tracking",
"proxy/custom_pricing",
"proxy/sync_models_github",
"proxy/billing",
],
},
]
},
{
type: "category",
label: "Supported Endpoints",
link: {
type: "generated-index",
title: "Supported Endpoints",
description:
"Learn how to deploy + call models from different providers on LiteLLM",
slug: "/supported_endpoints",
},
items: [
"assistants",
{
type: "category",
label: "/audio",
items: [
"audio_transcription",
"text_to_speech",
]
},
{
type: "category",
label: "/batches",
items: [
"batches",
"proxy/managed_batches",
]
},
"containers",
{
type: "category",
label: "/chat/completions",
link: {
type: "generated-index",
title: "Chat Completions",
description: "Details on the completion() function",
slug: "/completion",
},
items: [
"completion/input",
"completion/output",
"completion/usage",
"completion/http_handler_config",
],
},
"text_completion",
"bedrock_converse",
"embedding/supported_embedding",
{
type: "category",
label: "/files",
items: [
"files_endpoints",
"proxy/litellm_managed_files",
],
},
{
type: "category",
label: "/fine_tuning",
items: [
"fine_tuning",
"proxy/managed_finetuning",
]
},
"generateContent",
"apply_guardrail",
"bedrock_invoke",
{
type: "category",
label: "/images",
items: [
"image_edits",
"image_generation",
"image_variations",
]
},
"videos",
"vector_store_files",
{
type: "category",
label: "/mcp - Model Context Protocol",
items: [
"mcp",
"mcp_usage",
"mcp_control",
"mcp_cost",
"mcp_guardrail",
]
},
"anthropic_unified",
"moderation",
"ocr",
{
type: "category",
label: "Pass-through Endpoints (Anthropic SDK, etc.)",
items: [
"pass_through/intro",
"pass_through/anthropic_completion",
"pass_through/assembly_ai",
"pass_through/bedrock",
"pass_through/azure_passthrough",
"pass_through/cohere",
"pass_through/google_ai_studio",
"pass_through/langfuse",
"pass_through/mistral",
"pass_through/openai_passthrough",
{
type: "category",
label: "Vertex AI",
items: [
"pass_through/vertex_ai",
"pass_through/vertex_ai_live_websocket",
"pass_through/vertex_ai_search_datastores",
]
},
"pass_through/vllm",
"proxy/pass_through",
"proxy/pass_through_guardrails"
]
},
"rag_ingest",
"realtime",
"rerank",
"response_api",
{
type: "category",
label: "/search",
items: [
"search/index",
"search/perplexity",
"search/tavily",
"search/exa_ai",
"search/parallel_ai",
"search/google_pse",
"search/dataforseo",
"search/firecrawl",
"search/searxng",
]
},
"skills",
{
type: "category",
label: "/vector_stores",
items: [
"vector_stores/create",
"vector_stores/search",
]
},
],
},
{
type: "category",
label: "Supported Models & Providers",
link: {
type: "generated-index",
title: "Providers",
description:
"Learn how to deploy + call models from different providers on LiteLLM",
slug: "/providers",
},
items: [
{
type: "doc",
id: "provider_registration/index",
label: "Integrate as a Model Provider",
},
{
type: "doc",
id: "provider_registration/add_model_pricing",
label: "Add Model Pricing & Context Window",
},
{
type: "category",
label: "OpenAI",
items: [
"providers/openai",
"providers/openai/responses_api",
"providers/openai/text_to_speech",
"providers/openai/videos",
]
},
"providers/text_completion_openai",
"providers/openai_compatible",
{
type: "category",
label: "Azure OpenAI",
items: [
"providers/azure/azure",
"providers/azure/azure_responses",
"providers/azure/azure_embedding",
"providers/azure/azure_speech",
"providers/azure/videos",
]
},
{
type: "category",
label: "Azure AI",
items: [
"providers/azure_ai",
"providers/azure_ocr",
"providers/azure_document_intelligence",
"providers/azure_ai_speech",
"providers/azure_ai_img",
"providers/azure_ai_vector_stores",
"providers/azure_ai/azure_ai_vector_stores_passthrough",
]
},
{
type: "category",
label: "Vertex AI",
items: [
"providers/vertex",
"providers/vertex_ai/videos",
"providers/vertex_partner",
"providers/vertex_self_deployed",
"providers/vertex_embedding",
"providers/vertex_image",
"providers/vertex_speech",
"providers/vertex_batch",
"providers/vertex_ocr",
]
},
{
type: "category",
label: "Google AI Studio",
items: [
"providers/gemini",
"providers/gemini/videos",
"providers/google_ai_studio/files",
"providers/google_ai_studio/image_gen",
"providers/google_ai_studio/realtime",
]
},
"providers/anthropic",
"providers/aws_sagemaker",
{
type: "category",
label: "Bedrock",
items: [
"providers/bedrock",
"providers/bedrock_embedding",
"providers/bedrock_imported",
"providers/bedrock_image_gen",
"providers/bedrock_rerank",
"providers/bedrock_agentcore",
"providers/bedrock_agents",
"providers/bedrock_batches",
"providers/bedrock_vector_store",
]
},
"providers/litellm_proxy",
"providers/ai21",
"providers/aiml",
"providers/aleph_alpha",
"providers/anyscale",
"providers/baseten",
"providers/bytez",
"providers/cerebras",
"providers/clarifai",
"providers/cloudflare_workers",
"providers/codestral",
"providers/cohere",
"providers/cometapi",
"providers/compactifai",
"providers/custom_llm_server",
"providers/dashscope",
"providers/databricks",
"providers/datarobot",
"providers/deepgram",
"providers/deepinfra",
"providers/deepseek",
"providers/docker_model_runner",
"providers/elevenlabs",
"providers/fal_ai",
"providers/featherless_ai",
"providers/fireworks_ai",
"providers/friendliai",
"providers/galadriel",
"providers/github",
"providers/github_copilot",
"providers/gradient_ai",
"providers/groq",
"providers/heroku",
{
type: "category",
label: "HuggingFace",
items: [
"providers/huggingface",
"providers/huggingface_rerank",
]
},
"providers/hyperbolic",
"providers/infinity",
"providers/jina_ai",
"providers/lambda_ai",
"providers/lemonade",
"providers/llamafile",
"providers/lm_studio",
"providers/meta_llama",
"providers/milvus_vector_stores",
"providers/mistral",
"providers/moonshot",
"providers/morph",
"providers/nebius",
"providers/nlp_cloud",
"providers/novita",
{ type: "doc", id: "providers/nscale", label: "Nscale (EU Sovereign)" },
{
type: "category",
label: "Nvidia NIM",
items: [
"providers/nvidia_nim",
"providers/nvidia_nim_rerank",
]
},
"providers/oci",
"providers/ollama",
"providers/openrouter",
"providers/ovhcloud",
"providers/perplexity",
"providers/petals",
"providers/publicai",
"providers/predibase",
"providers/ragflow",
"providers/recraft",
"providers/replicate",
{
type: "category",
label: "RunwayML",
items: [
"providers/runwayml/images",
"providers/runwayml/videos",
]
},
"providers/sambanova",
"providers/snowflake",
"providers/togetherai",
"providers/topaz",
"providers/triton-inference-server",
"providers/v0",
"providers/vercel_ai_gateway",
"providers/vllm",
"providers/volcano",
"providers/voyage",
"providers/wandb_inference",
{
type: "category",
label: "WatsonX",
items: [
"providers/watsonx/index",
"providers/watsonx/audio_transcription",
]
},
"providers/xai",
"providers/xinference",
"providers/zai",
],
},
{
type: "category",
label: "Guides",
items: [
"completion/computer_use",
"completion/web_search",
"completion/web_fetch",
"completion/function_call",
"completion/audio",
"completion/document_understanding",
"completion/drop_params",
"completion/image_generation_chat",
"completion/json_mode",
"completion/knowledgebase",
"completion/message_trimming",
"completion/model_alias",
"completion/mock_requests",
"completion/predict_outputs",
"completion/prefix",
"completion/prompt_caching",
"completion/prompt_formatting",
"completion/reliable_completions",
"completion/stream",
"completion/provider_specific_params",
"completion/vision",
"exception_mapping",
"completion/batching",
"guides/finetuned_models",
"guides/security_settings",
"proxy/veo_video_generation",
"reasoning_content",
"extras/creating_adapters",
]
},
{
type: "category",
label: "Routing, Loadbalancing & Fallbacks",
link: {
type: "generated-index",
title: "Routing, Loadbalancing & Fallbacks",
description: "Learn how to load balance, route, and set fallbacks for your LLM requests",
slug: "/routing-load-balancing",
},
items: [
"routing",
"scheduler",
"proxy/auto_routing",
"proxy/load_balancing",
"proxy/provider_budget_routing",
"proxy/reliability",
"proxy/tag_routing",
"proxy/timeout",
"wildcard_routing"
],
},
{
type: "category",
label: "LiteLLM Python SDK",
items: [
"set_keys",
"budget_manager",
"caching/all_caches",
"completion/token_usage",
"sdk_custom_pricing",
"embedding/async_embedding",
"embedding/moderation",
"migration",
"sdk_custom_pricing",
{
type: "category",
label: "LangChain, LlamaIndex, Instructor Integration",
items: ["langchain/langchain", "tutorials/instructor"],
}
],
},
{
type: "category",
label: "Load Testing",
items: [
"benchmarks",
"load_test_advanced",
"load_test_sdk",
"load_test_rpm",
]
},
{
type: "category",
label: "Tutorials",
items: [
"tutorials/openweb_ui",
"tutorials/openai_codex",
"tutorials/litellm_gemini_cli",
"tutorials/litellm_qwen_code_cli",
"tutorials/anthropic_file_usage",
"tutorials/default_team_self_serve",
"tutorials/msft_sso",
"tutorials/prompt_caching",
"tutorials/tag_management",
'tutorials/litellm_proxy_aporia',
"tutorials/presidio_pii_masking",
"tutorials/elasticsearch_logging",
"tutorials/gemini_realtime_with_audio",
"tutorials/claude_responses_api",
{
type: "category",
label: "LiteLLM Python SDK Tutorials",
items: [
'tutorials/google_adk',
'tutorials/azure_openai',
'tutorials/instructor',
"tutorials/gradio_integration",
"tutorials/huggingface_codellama",
"tutorials/huggingface_tutorial",
"tutorials/TogetherAI_liteLLM",
"tutorials/finetuned_chat_gpt",
"tutorials/text_completion",
"tutorials/first_playground",
"tutorials/model_fallbacks",
],
},
]
},
{
type: "category",
label: "Contributing",
items: [
"extras/contributing_code",
{
type: "category",
label: "Adding Providers",
items: [
"adding_provider/directory_structure",
"adding_provider/new_rerank_provider",
]
},
"extras/contributing",
"contributing",
]
},
{
type: "category",
label: "Extras",
items: [
"data_security",
"data_retention",
"proxy/security_encryption_faq",
"migration_policy",
{
type: "category",
label: "❤️ 🚅 Projects built on LiteLLM",
link: {
type: "generated-index",
title: "Projects built on LiteLLM",
description:
"Learn how to deploy + call models from different providers on LiteLLM",
slug: "/project",
},
items: [
"projects/smolagents",
"projects/mini-swe-agent",
"projects/openai-agents",
"projects/Google ADK",
"projects/Agent Lightning",
"projects/Harbor",
"projects/Docq.AI",
"projects/PDL",
"projects/OpenInterpreter",
"projects/Elroy",
"projects/dbally",
"projects/FastREPL",
"projects/PROMPTMETHEUS",
"projects/Codium PR Agent",
"projects/Prompt2Model",
"projects/SalesGPT",
"projects/Quivr",
"projects/Langstream",
"projects/Otter",
"projects/GPT Migrate",
"projects/YiVal",
"projects/LiteLLM Proxy",
"projects/llm_cord",
"projects/pgai",
"projects/GPTLocalhost",
"projects/HolmesGPT",
"projects/Railtracks",
],
},
"extras/code_quality",
"rules",
"proxy/team_based_routing",
"proxy/customer_routing",
"proxy_server",
],
},
"troubleshoot",
],
};
module.exports = sidebars;