Rate limiting protects APIs from abuse and ensures fair usage. Redis provides the speed and atomicity needed for production rate limiting.
Setup
pnpm add ioredis
// lib/redis.ts
import Redis from "ioredis";
export const redis = new Redis(process.env.REDIS_URL!, {
maxRetriesPerRequest: 3,
lazyConnect: true,
});
Token Bucket Algorithm
The token bucket algorithm allows bursts of traffic while enforcing an average rate.
// lib/rate-limit/token-bucket.ts
import { redis } from "@/lib/redis";
interface TokenBucketConfig {
maxTokens: number;
refillRate: number; // tokens per second
keyPrefix: string;
}
interface RateLimitResult {
allowed: boolean;
remaining: number;
retryAfter: number | null;
}
export function createTokenBucket(config: TokenBucketConfig) {
const { maxTokens, refillRate, keyPrefix } = config;
// Lua script for atomic token bucket operation
const script = `
local key = KEYS[1]
local max_tokens = tonumber(ARGV[1])
local refill_rate = tonumber(ARGV[2])
local now = tonumber(ARGV[3])
local cost = tonumber(ARGV[4])
local data = redis.call('HMGET', key, 'tokens', 'last_refill')
local tokens = tonumber(data[1])
local last_refill = tonumber(data[2])
if tokens == nil then
tokens = max_tokens
last_refill = now
end
-- Refill tokens based on elapsed time
local elapsed = now - last_refill
local new_tokens = elapsed * refill_rate
tokens = math.min(max_tokens, tokens + new_tokens)
last_refill = now
local allowed = 0
if tokens >= cost then
tokens = tokens - cost
allowed = 1
end
redis.call('HMSET', key, 'tokens', tokens, 'last_refill', last_refill)
redis.call('EXPIRE', key, math.ceil(max_tokens / refill_rate) + 1)
return { allowed, math.floor(tokens) }
`;
return async function consume(
identifier: string,
cost = 1,
): Promise<RateLimitResult> {
const key = `${keyPrefix}:${identifier}`;
const now = Date.now() / 1000;
const result = (await redis.eval(
script,
1,
key,
maxTokens,
refillRate,
now,
cost,
)) as [number, number];
const allowed = result[0] === 1;
const remaining = result[1];
return {
allowed,
remaining,
retryAfter: allowed ? null : Math.ceil((cost - remaining) / refillRate),
};
};
}
Sliding Window Counter
More precise than fixed windows. Combines current and previous window counts.
// lib/rate-limit/sliding-window.ts
import { redis } from "@/lib/redis";
interface SlidingWindowConfig {
maxRequests: number;
windowMs: number;
keyPrefix: string;
}
export function createSlidingWindow(config: SlidingWindowConfig) {
const { maxRequests, windowMs, keyPrefix } = config;
const script = `
local current_key = KEYS[1]
local previous_key = KEYS[2]
local max_requests = tonumber(ARGV[1])
local window_ms = tonumber(ARGV[2])
local now = tonumber(ARGV[3])
local current_count = tonumber(redis.call('GET', current_key) or '0')
local previous_count = tonumber(redis.call('GET', previous_key) or '0')
-- Calculate window position
local window_start = math.floor(now / window_ms) * window_ms
local elapsed = now - window_start
local weight = 1 - (elapsed / window_ms)
-- Weighted count
local weighted_count = math.floor(previous_count * weight) + current_count
if weighted_count >= max_requests then
return { 0, max_requests - weighted_count, math.ceil(window_ms - elapsed) }
end
redis.call('INCR', current_key)
redis.call('PEXPIRE', current_key, window_ms * 2)
return { 1, max_requests - weighted_count - 1, 0 }
`;
return async function check(identifier: string) {
const now = Date.now();
const currentWindow = Math.floor(now / windowMs);
const previousWindow = currentWindow - 1;
const currentKey = `${keyPrefix}:${identifier}:${currentWindow}`;
const previousKey = `${keyPrefix}:${identifier}:${previousWindow}`;
const result = (await redis.eval(
script,
2,
currentKey,
previousKey,
maxRequests,
windowMs,
now,
)) as [number, number, number];
return {
allowed: result[0] === 1,
remaining: Math.max(0, result[1]),
retryAfter: result[2] > 0 ? Math.ceil(result[2] / 1000) : null,
};
};
}
Rate Limiter Instances
// lib/rate-limit/index.ts
import { createTokenBucket } from "./token-bucket";
import { createSlidingWindow } from "./sliding-window";
// General API rate limit: 100 requests per minute
export const apiLimiter = createSlidingWindow({
maxRequests: 100,
windowMs: 60_000,
keyPrefix: "rl:api",
});
// Auth endpoints: 5 attempts per 15 minutes
export const authLimiter = createTokenBucket({
maxTokens: 5,
refillRate: 5 / 900, // 5 tokens per 15 minutes
keyPrefix: "rl:auth",
});
// Contact form: 3 submissions per hour
export const contactLimiter = createSlidingWindow({
maxRequests: 3,
windowMs: 3_600_000,
keyPrefix: "rl:contact",
});
Middleware Integration
// middleware.ts
import { NextResponse } from "next/server";
import type { NextRequest } from "next/server";
import { apiLimiter, authLimiter } from "@/lib/rate-limit";
function getIP(request: NextRequest): string {
return (
request.headers.get("x-forwarded-for")?.split(",")[0]?.trim() ||
request.headers.get("x-real-ip") ||
"unknown"
);
}
export async function middleware(request: NextRequest) {
const ip = getIP(request);
const path = request.nextUrl.pathname;
try {
// Auth endpoints use strict limiting
if (path.startsWith("/api/auth")) {
const result = await authLimiter(ip);
if (!result.allowed) {
return NextResponse.json(
{ error: "Too many attempts. Please try again later." },
{
status: 429,
headers: {
"Retry-After": String(result.retryAfter ?? 60),
"X-RateLimit-Remaining": "0",
},
},
);
}
}
// General API rate limiting
if (path.startsWith("/api/")) {
const result = await apiLimiter(ip);
const response = NextResponse.next();
response.headers.set("X-RateLimit-Remaining", String(result.remaining));
if (!result.allowed) {
return NextResponse.json(
{ error: "Rate limit exceeded." },
{
status: 429,
headers: {
"Retry-After": String(result.retryAfter ?? 60),
"X-RateLimit-Remaining": "0",
},
},
);
}
return response;
}
} catch {
// If Redis is down, allow the request (fail open)
return NextResponse.next();
}
return NextResponse.next();
}
export const config = {
matcher: "/api/:path*",
};
API Route Usage
// app/api/contact/route.ts
import { NextResponse } from "next/server";
import type { NextRequest } from "next/server";
import { contactLimiter } from "@/lib/rate-limit";
export async function POST(request: NextRequest) {
const ip =
request.headers.get("x-forwarded-for")?.split(",")[0]?.trim() || "unknown";
const result = await contactLimiter(ip);
if (!result.allowed) {
return NextResponse.json(
{ error: "You have submitted too many messages. Please wait before trying again." },
{
status: 429,
headers: { "Retry-After": String(result.retryAfter ?? 3600) },
},
);
}
const body = await request.json();
// Process the contact form...
return NextResponse.json({ success: true });
}
Testing Rate Limits
import { describe, it, expect, beforeEach } from "vitest";
import { createSlidingWindow } from "@/lib/rate-limit/sliding-window";
describe("Sliding window rate limiter", () => {
const limiter = createSlidingWindow({
maxRequests: 3,
windowMs: 1000,
keyPrefix: "test:rl",
});
beforeEach(async () => {
// Clear test keys before each test
});
it("allows requests within limit", async () => {
const r1 = await limiter("test-user");
const r2 = await limiter("test-user");
const r3 = await limiter("test-user");
expect(r1.allowed).toBe(true);
expect(r2.allowed).toBe(true);
expect(r3.allowed).toBe(true);
expect(r3.remaining).toBe(0);
});
it("blocks requests over limit", async () => {
await limiter("test-user-2");
await limiter("test-user-2");
await limiter("test-user-2");
const r4 = await limiter("test-user-2");
expect(r4.allowed).toBe(false);
expect(r4.retryAfter).toBeGreaterThan(0);
});
});
Need API Security?
We build secure, scalable APIs with proper rate limiting. Contact us to discuss your project.