SDK Documentation

Welcome to the Cachee.ai SDK documentation. Cachee.ai is an AI-powered enterprise caching platform that automatically optimizes your application's cache performance, reducing latency and infrastructure costs by up to 42%.

AI-Powered
Machine learning algorithms automatically optimize TTL, predict access patterns, and pre-fetch data.
Lightning Fast
Achieve 94%+ cache hit rates with sub-10ms latency for most operations.
Simple Integration
Add one decorator to your functions. No infrastructure changes required.
Real-Time Analytics
Monitor performance, cost savings, and AI predictions in real-time.
Enterprise Security
SOC 2 Type II certified, AES-256 encryption, VPC deployment options.
Multi-Language
Full support for Python, Node.js, Go, and Java with more coming soon.

Installation

Install the Cachee.ai SDK using your language's package manager:

# Install via pip
pip install cachee-sdk

# Or with Poetry
poetry add cachee-sdk

# Or with pipenv
pipenv install cachee-sdk
# Install via npm
npm install @cachee/sdk

# Or with Yarn
yarn add @cachee/sdk

# Or with pnpm
pnpm add @cachee/sdk
# Install via go get
go get github.com/appuix/cachee-go

# Import in your code
import "github.com/appuix/cachee-go"
<!-- Maven -->
<dependency>
    <groupId>com.appuix</groupId>
    <artifactId>cachee-sdk</artifactId>
    <version>1.0.0</version>
</dependency>

// Gradle
implementation 'com.appuix:cachee-sdk:1.0.0'

Quick Start

Get up and running with Cachee.ai in under 5 minutes:

Prerequisites
You'll need a Cachee.ai API key. Sign up for free at cachee.ai/signup to get your key instantly.
from cachee_sdk import cachee
import os

# Initialize Cachee with your API key
cachee.init(api_key=os.getenv('CACHEE_API_KEY'))

# Add caching to any function with a decorator
@cachee.cache(
    key="user:{user_id}",
    ttl_strategy="adaptive"  # AI handles TTL automatically
)
def get_user(user_id):
    # Your expensive operation (database query, API call, etc.)
    return db.query("SELECT * FROM users WHERE id = ?", user_id)

# Use it like normal - Cachee handles the rest
user = get_user(12345)
print(f"User: {user['name']}")

# That's it! You now have AI-powered caching 🚀
import { cachee } from '@cachee/sdk';

// Initialize Cachee with your API key
cachee.init({
  apiKey: process.env.CACHEE_API_KEY
});

// Add caching to any function
const getUser = cachee.cache(
  async (userId) => {
    // Your expensive operation
    return await db.query('SELECT * FROM users WHERE id = ?', userId);
  },
  {
    key: 'user:{userId}',
    ttlStrategy: 'adaptive'  // AI handles TTL automatically
  }
);

// Use it like normal - Cachee handles the rest
const user = await getUser(12345);
console.log(`User: ${user.name}`);

// That's it! You now have AI-powered caching 🚀
package main

import (
    "context"
    "fmt"
    "os"
    "github.com/appuix/cachee-go"
)

func main() {
    // Initialize Cachee with your API key
    c := cachee.New(cachee.Config{
        APIKey: os.Getenv("CACHEE_API_KEY"),
    })

    // Create a cached function
    getUser := c.Cache(
        "user:{userId}",
        cachee.Options{TTLStrategy: "adaptive"},
        func(ctx context.Context, userId int) (User, error) {
            // Your expensive operation
            return db.QueryUser(userId)
        },
    )

    // Use it like normal - Cachee handles the rest
    user, err := getUser(context.Background(), 12345)
    if err != nil {
        panic(err)
    }
    fmt.Printf("User: %s\n", user.Name)

    // That's it! You now have AI-powered caching 🚀
}
import com.appuix.cachee.Cachee;
import com.appuix.cachee.annotations.Cache;

public class Application {
    public static void main(String[] args) {
        // Initialize Cachee with your API key
        Cachee.init(System.getenv("CACHEE_API_KEY"));
    }

    // Add caching with annotation
    @Cache(
        key = "user:{userId}",
        ttlStrategy = "adaptive"  // AI handles TTL automatically
    )
    public User getUser(int userId) {
        // Your expensive operation
        return db.query("SELECT * FROM users WHERE id = ?", userId);
    }

    // Use it like normal - Cachee handles the rest
    User user = getUser(12345);
    System.out.println("User: " + user.getName());

    // That's it! You now have AI-powered caching 🚀
}
What Just Happened?
Cachee.ai is now:
  • Monitoring access patterns for your function
  • Using AI to predict when data will be requested
  • Automatically adjusting TTL based on usage
  • Pre-fetching related data
  • Tracking performance metrics
Check your dashboard at dashboard.cachee.ai to see real-time analytics!

Authentication

Cachee.ai uses API keys for authentication. You can pass your API key in multiple ways:

# Method 1: Environment variable (recommended)
import os
from cachee_sdk import cachee

cachee.init(api_key=os.getenv('CACHEE_API_KEY'))

# Method 2: Direct initialization
cachee.init(api_key='sk_live_your_api_key_here')

# Method 3: Configuration file
cachee.init(config_file='~/.cachee/config.yaml')

# Method 4: AWS Secrets Manager
import boto3
secrets = boto3.client('secretsmanager')
api_key = secrets.get_secret_value(SecretId='cachee-api-key')['SecretString']
cachee.init(api_key=api_key)
// Method 1: Environment variable (recommended)
import { cachee } from '@cachee/sdk';

cachee.init({
  apiKey: process.env.CACHEE_API_KEY
});

// Method 2: Direct initialization
cachee.init({
  apiKey: 'sk_live_your_api_key_here'
});

// Method 3: Configuration file
cachee.init({
  configFile: '~/.cachee/config.json'
});

// Method 4: AWS Secrets Manager
import { SecretsManager } from 'aws-sdk';
const secrets = new SecretsManager();
const { SecretString } = await secrets.getSecretValue({
  SecretId: 'cachee-api-key'
}).promise();
cachee.init({ apiKey: SecretString });
package main

import (
    "os"
    "github.com/appuix/cachee-go"
)

// Method 1: Environment variable (recommended)
c := cachee.New(cachee.Config{
    APIKey: os.Getenv("CACHEE_API_KEY"),
})

// Method 2: Direct initialization
c := cachee.New(cachee.Config{
    APIKey: "sk_live_your_api_key_here",
})

// Method 3: Configuration file
c, err := cachee.NewFromConfig("~/.cachee/config.yaml")

// Method 4: AWS Secrets Manager
import "github.com/aws/aws-sdk-go/service/secretsmanager"
svc := secretsmanager.New(session.New())
secret, _ := svc.GetSecretValue(&secretsmanager.GetSecretValueInput{
    SecretId: aws.String("cachee-api-key"),
})
c := cachee.New(cachee.Config{
    APIKey: *secret.SecretString,
})
import com.appuix.cachee.Cachee;

// Method 1: Environment variable (recommended)
Cachee.init(System.getenv("CACHEE_API_KEY"));

// Method 2: Direct initialization
Cachee.init("sk_live_your_api_key_here");

// Method 3: Configuration file
Cachee.initFromConfig("~/.cachee/config.properties");

// Method 4: AWS Secrets Manager
AWSSecretsManager client = AWSSecretsManagerClientBuilder.standard().build();
GetSecretValueRequest request = new GetSecretValueRequest()
    .withSecretId("cachee-api-key");
String apiKey = client.getSecretValue(request).getSecretString();
Cachee.init(apiKey);
Security Best Practices
  • Never commit API keys to version control
  • Use environment variables or secret managers in production
  • Rotate API keys regularly (every 90 days recommended)
  • Use separate keys for development, staging, and production
  • Restrict key permissions to only what's needed

Caching Basics

Understand the fundamentals of how Cachee.ai caches your data:

Cache Keys

Cache keys uniquely identify cached data. Use dynamic parameters in your keys:

# Simple key
@cachee.cache(key="users_list")
def get_all_users():
    return db.query("SELECT * FROM users")

# Dynamic key with parameter
@cachee.cache(key="user:{user_id}")
def get_user(user_id):
    return db.query("SELECT * FROM users WHERE id = ?", user_id)

# Composite key with multiple parameters
@cachee.cache(key="products:{category}:{page}")
def get_products(category, page):
    return db.query("SELECT * FROM products WHERE category = ? LIMIT ? OFFSET ?",
                    category, 20, page * 20)

# Complex key with custom serialization
@cachee.cache(key="search:{query}:{filters_hash}")
def search_products(query, filters):
    filters_hash = hashlib.md5(json.dumps(filters).encode()).hexdigest()
    return db.search(query, filters)

TTL (Time To Live)

TTL determines how long data stays in cache. Cachee.ai offers multiple strategies:

Strategy Description Best For
adaptive AI automatically adjusts TTL based on access patterns Most use cases (default recommendation)
fixed Set a specific TTL in seconds Data with predictable freshness requirements
stale-while-revalidate Serve stale data while refreshing in background High-traffic endpoints where stale data is acceptable
time-based Different TTL based on time of day Data with predictable update schedules
no-ttl Cache indefinitely until manually invalidated Immutable data (user IDs, product SKUs)

@cachee.cache() Decorator

Complete reference for the main caching decorator:

@cachee.cache(
    key: str,                          # Cache key with parameter interpolation
    ttl_strategy: str = "adaptive",    # TTL strategy: adaptive, fixed, stale-while-revalidate, time-based, no-ttl
    ttl: int = None,                   # TTL in seconds (required if ttl_strategy="fixed")
    namespace: str = None,             # Logical grouping for related cache entries
    version: str = "1",                # Cache version for schema changes
    warm_cache: bool = False,          # Pre-compute during off-peak hours
    fallback_on_error: bool = False,   # Serve stale data if function raises error
    compress: bool = True,             # Compress large payloads (>10KB)
    serialize: str = "json",           # Serialization: json, pickle, msgpack
    tags: List[str] = None,            # Tags for bulk invalidation
    conditions: Dict = None,           # Conditional caching rules
    ai_enabled: bool = True,           # Enable AI features
    metrics: bool = True,              # Track performance metrics
    local_cache: bool = True,          # Enable in-memory L1 cache
    distributed: bool = True,          # Use distributed cache (Redis/Memcached)
)

Parameter Reference

Parameter Type Description
key REQUIRED string Cache key template. Use {param} for dynamic values.
ttl_strategy string TTL strategy. Default: "adaptive"
ttl int TTL in seconds (required if ttl_strategy="fixed")
namespace string Logical grouping for cache entries. Example: "users"
version string Cache version for schema changes. Default: "1"
warm_cache bool Pre-compute during off-peak hours. Default: False
fallback_on_error bool Serve stale data if function fails. Default: False
compress bool Compress payloads >10KB. Default: True
serialize string Serialization format: "json", "pickle", "msgpack". Default: "json"
tags List[str] Tags for bulk invalidation. Example: ["users", "auth"]
ai_enabled bool Enable AI optimization. Default: True
local_cache bool Enable in-memory L1 cache. Default: True

Advanced Examples

# Example 1: API calls with fallback
@cachee.cache(
    key="github:user:{username}",
    ttl_strategy="fixed",
    ttl=300,  # 5 minutes
    fallback_on_error=True,  # Serve stale if GitHub is down
    tags=["external_api", "github"]
)
def get_github_user(username):
    response = httpx.get(f"https://api.github.com/users/{username}")
    return response.json()

# Example 2: Expensive aggregation with cache warming
@cachee.cache(
    key="analytics:dashboard:{org_id}",
    ttl_strategy="adaptive",
    warm_cache=True,  # Pre-compute at 2 AM daily
    compress=True,
    namespace="analytics"
)
def get_dashboard_analytics(org_id):
    # Expensive multi-table aggregation
    return db.run_analytics_query(org_id)

# Example 3: User-specific data with namespace
@cachee.cache(
    key="user:{user_id}:preferences",
    namespace="users",
    version="2",  # Increment when schema changes
    tags=["users", "preferences"],
    serialize="json"
)
def get_user_preferences(user_id):
    return db.query("SELECT * FROM user_preferences WHERE user_id = ?", user_id)

# Example 4: Conditional caching based on request size
@cachee.cache(
    key="search:{query}",
    conditions={
        "cache_if": lambda result: len(result) > 0,  # Only cache non-empty results
        "skip_if": lambda query: len(query) < 3      # Don't cache short queries
    }
)
def search(query):
    return db.full_text_search(query)

AI Features

Cachee.ai's AI engine continuously learns from your application's behavior to optimize caching automatically:

Predictive Refresh
Predicts when data will be requested and pre-fetches it before the request arrives.
Adaptive TTL
Automatically adjusts TTL based on access frequency, data volatility, and time-of-day patterns.
Anomaly Detection
Detects unusual traffic patterns (traffic spikes, cache stampedes) and adapts automatically.
Related Data Pre-fetch
Learns relationships between data and pre-fetches related items (e.g., user → user_preferences).
Smart Cache Warming
Identifies high-value cache entries and warms them during off-peak hours.
Intelligent Eviction
Uses ML to predict which cache entries are least likely to be accessed next.
How It Works
The AI engine runs in real-time alongside your application:
  • Data Collection: Tracks every cache hit, miss, and access pattern
  • Pattern Recognition: Identifies temporal, user, and behavioral patterns
  • Prediction: Uses LSTM neural networks to predict future access
  • Optimization: Automatically adjusts TTL, pre-fetches data, and warms cache
  • Feedback Loop: Continuously learns from outcomes to improve accuracy

All of this happens transparently. You don't need to configure anything. Just use the @cachee.cache() decorator and the AI does the rest.

Cache Warming

Pre-compute expensive operations during off-peak hours to ensure cache is always hot:

# Enable automatic cache warming
@cachee.cache(
    key="analytics:{org_id}",
    warm_cache=True  # AI will determine best time to warm
)
def get_analytics(org_id):
    return expensive_aggregation(org_id)

# Manual cache warming (useful for migrations)
cachee.warm_cache(
    function=get_analytics,
    params=[{"org_id": id} for id in active_org_ids],
    schedule="0 2 * * *"  # Every day at 2 AM
)

# Warm cache for specific keys
cachee.warm_keys([
    "analytics:org_123",
    "analytics:org_456",
    "analytics:org_789"
])

# Warm entire namespace
cachee.warm_namespace("analytics", concurrency=10)

Monitoring & Observability

Track cache performance and AI predictions in real-time:

# Get real-time metrics
metrics = cachee.get_metrics(
    namespace="users",
    time_range="last_24h"
)

print(f"Cache Hit Rate: {metrics['hit_rate']}%")
print(f"Avg Latency: {metrics['latency_p50']}ms")
print(f"Cost Savings: ${metrics['cost_savings']}")

# Get AI predictions
predictions = cachee.get_ai_predictions(
    key="user:{user_id}"
)

print(f"Predicted next access: {predictions['next_access_time']}")
print(f"Recommended TTL: {predictions['recommended_ttl']}s")
print(f"Confidence: {predictions['confidence']}%")

# Export metrics to Prometheus
from prometheus_client import start_http_server
cachee.enable_prometheus_exporter(port=9090)

# Send metrics to CloudWatch
cachee.enable_cloudwatch_metrics(
    namespace="Cachee",
    region="us-west-2"
)

Best Practices

Follow these guidelines to get the most out of Cachee.ai:

Do's
  • Start with adaptive TTL: Let the AI learn first, then optimize if needed
  • Use descriptive cache keys: user:{user_id} not u:{id}
  • Namespace related data: Group users, products, etc. for easier management
  • Enable compression for large payloads: >10KB should be compressed
  • Use tags for bulk invalidation: Tag related cache entries
  • Monitor metrics regularly: Check dashboard weekly to identify optimization opportunities
  • Version your cache: Increment version when data schema changes
Don'ts
  • Don't cache user-specific sensitive data without encryption: Use encrypt=True parameter
  • Don't use dynamic keys without parameters: key=f"user:{user_id}" should be key="user:{user_id}"
  • Don't cache frequently changing data: If data changes >1x per second, caching may not help
  • Don't ignore cache invalidation: Stale data can cause bugs
  • Don't over-cache: Not everything needs caching. Focus on expensive operations
  • Don't use pickle serialization for untrusted data: Stick with JSON for external APIs

Performance Optimization Tips

# 1. Use local L1 cache for hot data
@cachee.cache(
    key="config:app_settings",
    local_cache=True,          # In-memory cache
    local_ttl=60,              # 60 seconds in memory
    distributed_ttl=300        # 5 minutes in Redis
)
def get_app_config():
    return db.query("SELECT * FROM config")

# 2. Batch operations when possible
@cachee.cache_batch(
    key="user:{user_id}",
    batch_size=100
)
def get_users_batch(user_ids):
    return db.query("SELECT * FROM users WHERE id IN (?)", user_ids)

# 3. Use stale-while-revalidate for high-traffic endpoints
@cachee.cache(
    key="homepage:feed",
    ttl_strategy="stale-while-revalidate",
    ttl=300,                   # Serve stale after 5 minutes
    grace_period=3600          # But keep serving stale for up to 1 hour
)
def get_homepage_feed():
    return expensive_feed_query()

# 4. Conditional caching for variable-cost operations
@cachee.cache(
    key="search:{query}",
    conditions={
        "cache_if": lambda result: len(result) > 10,  # Only cache if many results
        "ttl_if": lambda result: 3600 if len(result) > 100 else 300  # Dynamic TTL
    }
)
def search(query):
    return search_engine.query(query)

AWS Integration

Deploy Cachee.ai in your AWS VPC with full control:

Deployment Options
  1. Managed SaaS: Fastest setup (5 minutes), Appuix manages infrastructure
  2. AWS Marketplace: One-click CloudFormation deployment (15 minutes)
  3. Self-Hosted: Full control, deploy via Docker/ECS/Terraform (30 minutes)

See our Onboarding Guide for detailed deployment instructions.

# Deploy via AWS Marketplace CloudFormation
aws cloudformation create-stack \
  --stack-name cachee-ai-production \
  --template-url https://s3.amazonaws.com/cachee-cf-templates/latest.yaml \
  --parameters \
      ParameterKey=VPC,ParameterValue=vpc-12345 \
      ParameterKey=Subnets,ParameterValue=subnet-abc\\,subnet-def \
      ParameterKey=CacheSize,ParameterValue=100 \
      ParameterKey=InstanceType,ParameterValue=cache.r6g.xlarge \
  --capabilities CAPABILITY_IAM

# Or use our CLI
cachee-cli deploy aws \
  --region us-west-2 \
  --vpc vpc-12345 \
  --subnets subnet-abc,subnet-def \
  --cache-size 100GB