package gitlab import ( "crypto/sha256" "encoding/json" "fmt" "os" "path/filepath" "sync" "time" ) // CacheConfig defines cache behavior settings type CacheConfig struct { CacheDir string // Base directory for cache files (e.g., ~/.mcp/gitlab) TTL time.Duration // Time-to-live for cached entries MaxEntries int // Maximum number of entries per cache type EnableOffline bool // Whether to return stale data when offline CompressData bool // Whether to compress cached data } // CacheEntry represents a single cached item type CacheEntry struct { Key string `json:"key"` Data json.RawMessage `json:"data"` Timestamp time.Time `json:"timestamp"` LastAccessed time.Time `json:"last_accessed"` HitCount int `json:"hit_count"` Size int `json:"size"` ETag string `json:"etag,omitempty"` StatusCode int `json:"status_code,omitempty"` } // Cache provides thread-safe caching functionality type Cache struct { mu sync.RWMutex config CacheConfig metadata map[string]*CacheMetadata } // CacheMetadata tracks cache statistics and metadata type CacheMetadata struct { TotalHits int64 `json:"total_hits"` TotalMisses int64 `json:"total_misses"` LastUpdated time.Time `json:"last_updated"` EntryCount int `json:"entry_count"` TotalSize int64 `json:"total_size"` } // NewCache creates a new cache instance func NewCache(config CacheConfig) (*Cache, error) { // Set default values if config.CacheDir == "" { homeDir, err := os.UserHomeDir() if err != nil { return nil, fmt.Errorf("failed to get home directory: %w", err) } config.CacheDir = filepath.Join(homeDir, ".mcp", "gitlab") } if config.TTL == 0 { config.TTL = 5 * time.Minute // Default 5 minute TTL } if config.MaxEntries == 0 { config.MaxEntries = 1000 // Default max entries } // Create cache directory structure if err := os.MkdirAll(config.CacheDir, 0755); err != nil { return nil, fmt.Errorf("failed to create cache directory: %w", err) } cache := &Cache{ config: config, metadata: make(map[string]*CacheMetadata), } // Load existing metadata if err := cache.loadMetadata(); err != nil { // Non-fatal error, just log it fmt.Fprintf(os.Stderr, "Warning: failed to load cache metadata: %v\n", err) } return cache, nil } // generateCacheKey creates a deterministic cache key from request parameters func (c *Cache) generateCacheKey(endpoint string, params map[string]string) string { h := sha256.New() h.Write([]byte(endpoint)) // Sort params for consistent hashing for k, v := range params { h.Write([]byte(k)) h.Write([]byte(v)) } return fmt.Sprintf("%x", h.Sum(nil)) } // getCachePath returns the file path for a cache entry func (c *Cache) getCachePath(cacheType, key string) string { // Create subdirectories for different cache types (issues, projects, etc.) dir := filepath.Join(c.config.CacheDir, cacheType) os.MkdirAll(dir, 0755) // Use first 2 chars of key for sharding if len(key) >= 2 { shardDir := filepath.Join(dir, key[:2]) os.MkdirAll(shardDir, 0755) return filepath.Join(shardDir, key+".json") } return filepath.Join(dir, key+".json") } // Get retrieves a cached entry if it exists and is valid func (c *Cache) Get(cacheType, endpoint string, params map[string]string) ([]byte, bool) { c.mu.RLock() defer c.mu.RUnlock() key := c.generateCacheKey(endpoint, params) cachePath := c.getCachePath(cacheType, key) // Read cache file data, err := os.ReadFile(cachePath) if err != nil { c.recordMiss(cacheType) return nil, false } var entry CacheEntry if err := json.Unmarshal(data, &entry); err != nil { c.recordMiss(cacheType) return nil, false } // Check if entry is still valid if time.Since(entry.Timestamp) > c.config.TTL { if !c.config.EnableOffline { c.recordMiss(cacheType) return nil, false } // Return stale data in offline mode } // Update access time and hit count entry.LastAccessed = time.Now() entry.HitCount++ // Write updated entry back (async to avoid blocking) go func() { c.mu.Lock() defer c.mu.Unlock() updatedData, _ := json.MarshalIndent(entry, "", " ") os.WriteFile(cachePath, updatedData, 0644) }() c.recordHit(cacheType) return entry.Data, true } // Set stores data in the cache func (c *Cache) Set(cacheType, endpoint string, params map[string]string, data []byte, statusCode int) error { c.mu.Lock() defer c.mu.Unlock() key := c.generateCacheKey(endpoint, params) cachePath := c.getCachePath(cacheType, key) entry := CacheEntry{ Key: key, Data: json.RawMessage(data), Timestamp: time.Now(), LastAccessed: time.Now(), HitCount: 0, Size: len(data), StatusCode: statusCode, } // Marshal and save entry entryData, err := json.MarshalIndent(entry, "", " ") if err != nil { return fmt.Errorf("failed to marshal cache entry: %w", err) } if err := os.WriteFile(cachePath, entryData, 0644); err != nil { return fmt.Errorf("failed to write cache file: %w", err) } // Update metadata c.updateMetadata(cacheType, len(data)) // Enforce max entries limit go c.enforceMaxEntries(cacheType) return nil } // Delete removes a specific cache entry func (c *Cache) Delete(cacheType, endpoint string, params map[string]string) error { c.mu.Lock() defer c.mu.Unlock() key := c.generateCacheKey(endpoint, params) cachePath := c.getCachePath(cacheType, key) return os.Remove(cachePath) } // Clear removes all cached data for a specific type or all types func (c *Cache) Clear(cacheType string) error { c.mu.Lock() defer c.mu.Unlock() if cacheType == "" { // Clear all cache but preserve the base directory entries, err := os.ReadDir(c.config.CacheDir) if err != nil { return err } for _, entry := range entries { if err := os.RemoveAll(filepath.Join(c.config.CacheDir, entry.Name())); err != nil { return err } } // Reset metadata c.metadata = make(map[string]*CacheMetadata) return c.saveMetadata() } // Clear specific cache type cacheDir := filepath.Join(c.config.CacheDir, cacheType) if err := os.RemoveAll(cacheDir); err != nil { return err } // Update metadata delete(c.metadata, cacheType) return c.saveMetadata() } // GetStats returns cache statistics func (c *Cache) GetStats() map[string]*CacheMetadata { c.mu.RLock() defer c.mu.RUnlock() // Create a copy to avoid concurrent access stats := make(map[string]*CacheMetadata) for k, v := range c.metadata { stats[k] = &CacheMetadata{ TotalHits: v.TotalHits, TotalMisses: v.TotalMisses, LastUpdated: v.LastUpdated, EntryCount: v.EntryCount, TotalSize: v.TotalSize, } } return stats } // Helper methods func (c *Cache) recordHit(cacheType string) { if c.metadata[cacheType] == nil { c.metadata[cacheType] = &CacheMetadata{} } c.metadata[cacheType].TotalHits++ } func (c *Cache) recordMiss(cacheType string) { if c.metadata[cacheType] == nil { c.metadata[cacheType] = &CacheMetadata{} } c.metadata[cacheType].TotalMisses++ } func (c *Cache) updateMetadata(cacheType string, sizeAdded int) { if c.metadata[cacheType] == nil { c.metadata[cacheType] = &CacheMetadata{} } meta := c.metadata[cacheType] meta.LastUpdated = time.Now() meta.EntryCount++ meta.TotalSize += int64(sizeAdded) // Save metadata to disk c.saveMetadata() } func (c *Cache) loadMetadata() error { metaPath := filepath.Join(c.config.CacheDir, "metadata.json") data, err := os.ReadFile(metaPath) if err != nil { return err } return json.Unmarshal(data, &c.metadata) } func (c *Cache) saveMetadata() error { metaPath := filepath.Join(c.config.CacheDir, "metadata.json") data, err := json.MarshalIndent(c.metadata, "", " ") if err != nil { return err } return os.WriteFile(metaPath, data, 0644) } // enforceMaxEntries removes oldest entries when limit is exceeded func (c *Cache) enforceMaxEntries(cacheType string) { // Implementation would scan cache directory and remove oldest entries // based on LastAccessed time when entry count exceeds MaxEntries // This is left as a TODO for brevity } // MergeStrategy defines how to merge cached data with fresh API data type MergeStrategy int const ( MergeReplace MergeStrategy = iota // Replace cache with new data MergeAppend // Append new items to existing MergeDiff // Only add/update changed items ) // MergeAPIResponse merges fresh API data with cached data based on strategy func (c *Cache) MergeAPIResponse(cacheType string, cached, fresh []byte, strategy MergeStrategy) ([]byte, error) { switch cacheType { case "issues": return c.mergeIssues(cached, fresh, strategy) case "projects": return c.mergeProjects(cached, fresh, strategy) default: // Default to replace strategy return fresh, nil } } func (c *Cache) mergeIssues(cached, fresh []byte, strategy MergeStrategy) ([]byte, error) { var cachedIssues, freshIssues []GitLabIssue if err := json.Unmarshal(cached, &cachedIssues); err != nil { return fresh, nil // Return fresh data if cached is invalid } if err := json.Unmarshal(fresh, &freshIssues); err != nil { return nil, err } switch strategy { case MergeReplace: return fresh, nil case MergeAppend: // Append fresh issues to cached, removing duplicates issueMap := make(map[int]GitLabIssue) for _, issue := range cachedIssues { issueMap[issue.ID] = issue } for _, issue := range freshIssues { issueMap[issue.ID] = issue // Fresh data overwrites cached } merged := make([]GitLabIssue, 0, len(issueMap)) for _, issue := range issueMap { merged = append(merged, issue) } return json.Marshal(merged) case MergeDiff: // Only update changed issues issueMap := make(map[int]GitLabIssue) for _, issue := range cachedIssues { issueMap[issue.ID] = issue } // Update only if UpdatedAt is newer for _, freshIssue := range freshIssues { if cachedIssue, exists := issueMap[freshIssue.ID]; exists { if freshIssue.UpdatedAt.After(cachedIssue.UpdatedAt) { issueMap[freshIssue.ID] = freshIssue } } else { issueMap[freshIssue.ID] = freshIssue } } merged := make([]GitLabIssue, 0, len(issueMap)) for _, issue := range issueMap { merged = append(merged, issue) } return json.Marshal(merged) } return fresh, nil } func (c *Cache) mergeProjects(cached, fresh []byte, strategy MergeStrategy) ([]byte, error) { // Similar implementation for projects // Left as TODO for brevity return fresh, nil }