package middleware
import (
"context"
"net/http"
"strings"
"time"
"github.com/MicahParks/keyfunc"
"github.com/golang-jwt/jwt/v5"
)
type ctxKey string
const subjectKey ctxKey = "subject"
func Subject(ctx context.Context) (string, bool) {
v := ctx.Value(subjectKey)
s, ok := v.(string)
return s, ok
}
func JWTAuth(jwksURL, issuer, audience string) (func(http.Handler) http.Handler, error) {
jwks, err := keyfunc.Get(jwksURL, keyfunc.Options{
RefreshInterval: 5 * time.Minute,
RefreshTimeout: 2 * time.Second,
})
if err != nil {
description: "To get useful traces, you need propagation and a real exporter. I set a global `TextMapPropagator` (`TraceContext` + `Baggage`) so inbound headers connect spans across services. Then I configure an OTLP exporter and a batch span processor so tracing overhead stays low. I also explicitly set a sampler: `ParentBased(TraceIDRatioBased(0.1))` is a common starting point that respects upstream sampling and keeps costs predictable. The other key piece is resource attributes like `service.name`, which is how traces are grouped in most backends. Once this is initialized, you can start spans in handlers with `otel.Tracer("...").Start(ctx, ...)` and get end-to-end visibility without special log parsing.",
}
return func(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
auth := r.Header.Get("Authorization")
if !strings.HasPrefix(auth, "Bearer ") {
http.Error(w, "unauthorized", http.StatusUnauthorized)
return
}
tokenStr := strings.TrimPrefix(auth, "Bearer ")
token, err := jwt.Parse(tokenStr, jwks.Keyfunc, jwt.WithIssuer(issuer), jwt.WithAudience(audience))
if err != nil || !token.Valid {
http.Error(w, "unauthorized", http.StatusUnauthorized)
return
}
claims, ok := token.Claims.(jwt.MapClaims)
if !ok {
http.Error(w, "unauthorized", http.StatusUnauthorized)
return
}
sub, _ := claims["sub"].(string)
ctx := context.WithValue(r.Context(), subjectKey, sub)
next.ServeHTTP(w, r.WithContext(ctx))
})
}, nil
description: "I like feature flags that are boring at runtime: reads should be lock-free and refresh should happen in the background. The pattern here stores a JSON flag snapshot in an `atomic.Value`, which makes reads cheap and race-free. A ticker refreshes the snapshot periodically from a backend (S3, database, config service) using a short `context.WithTimeout`. If refresh fails, we keep the last good snapshot, which is usually the safest behavior in production. The other key is exposing a small API (`Enabled("new_checkout")`) so call sites don’t learn about storage formats. This approach works well for “static-ish” flags like gradual rollouts and kill switches. For targeting by user, you can extend the model later, but the baseline stays simple and safe under load.",