1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
|
package gitlab
import (
"crypto/sha256"
"encoding/json"
"fmt"
"os"
"path/filepath"
"sync"
"time"
)
// CacheConfig defines cache behavior settings
type CacheConfig struct {
CacheDir string // Base directory for cache files (e.g., ~/.mcp/gitlab)
TTL time.Duration // Time-to-live for cached entries
MaxEntries int // Maximum number of entries per cache type
EnableOffline bool // Whether to return stale data when offline
CompressData bool // Whether to compress cached data
}
// CacheEntry represents a single cached item
type CacheEntry struct {
Key string `json:"key"`
Data json.RawMessage `json:"data"`
Timestamp time.Time `json:"timestamp"`
LastAccessed time.Time `json:"last_accessed"`
HitCount int `json:"hit_count"`
Size int `json:"size"`
ETag string `json:"etag,omitempty"`
StatusCode int `json:"status_code,omitempty"`
}
// Cache provides thread-safe caching functionality
type Cache struct {
mu sync.RWMutex
config CacheConfig
metadata map[string]*CacheMetadata
}
// CacheMetadata tracks cache statistics and metadata
type CacheMetadata struct {
TotalHits int64 `json:"total_hits"`
TotalMisses int64 `json:"total_misses"`
LastUpdated time.Time `json:"last_updated"`
EntryCount int `json:"entry_count"`
TotalSize int64 `json:"total_size"`
}
// NewCache creates a new cache instance
func NewCache(config CacheConfig) (*Cache, error) {
// Set default values
if config.CacheDir == "" {
homeDir, err := os.UserHomeDir()
if err != nil {
return nil, fmt.Errorf("failed to get home directory: %w", err)
}
config.CacheDir = filepath.Join(homeDir, ".mcp", "gitlab")
}
if config.TTL == 0 {
config.TTL = 5 * time.Minute // Default 5 minute TTL
}
if config.MaxEntries == 0 {
config.MaxEntries = 1000 // Default max entries
}
// Create cache directory structure
if err := os.MkdirAll(config.CacheDir, 0755); err != nil {
return nil, fmt.Errorf("failed to create cache directory: %w", err)
}
cache := &Cache{
config: config,
metadata: make(map[string]*CacheMetadata),
}
// Load existing metadata
if err := cache.loadMetadata(); err != nil {
// Non-fatal error, just log it
fmt.Fprintf(os.Stderr, "Warning: failed to load cache metadata: %v\n", err)
}
return cache, nil
}
// generateCacheKey creates a deterministic cache key from request parameters
func (c *Cache) generateCacheKey(endpoint string, params map[string]string) string {
h := sha256.New()
h.Write([]byte(endpoint))
// Sort params for consistent hashing
for k, v := range params {
h.Write([]byte(k))
h.Write([]byte(v))
}
return fmt.Sprintf("%x", h.Sum(nil))
}
// getCachePath returns the file path for a cache entry
func (c *Cache) getCachePath(cacheType, key string) string {
// Create subdirectories for different cache types (issues, projects, etc.)
dir := filepath.Join(c.config.CacheDir, cacheType)
os.MkdirAll(dir, 0755)
// Use first 2 chars of key for sharding
if len(key) >= 2 {
shardDir := filepath.Join(dir, key[:2])
os.MkdirAll(shardDir, 0755)
return filepath.Join(shardDir, key+".json")
}
return filepath.Join(dir, key+".json")
}
// Get retrieves a cached entry if it exists and is valid
func (c *Cache) Get(cacheType, endpoint string, params map[string]string) ([]byte, bool) {
c.mu.RLock()
defer c.mu.RUnlock()
key := c.generateCacheKey(endpoint, params)
cachePath := c.getCachePath(cacheType, key)
// Read cache file
data, err := os.ReadFile(cachePath)
if err != nil {
c.recordMiss(cacheType)
return nil, false
}
var entry CacheEntry
if err := json.Unmarshal(data, &entry); err != nil {
c.recordMiss(cacheType)
return nil, false
}
// Check if entry is still valid
if time.Since(entry.Timestamp) > c.config.TTL {
if !c.config.EnableOffline {
c.recordMiss(cacheType)
return nil, false
}
// Return stale data in offline mode
}
// Update access time and hit count
entry.LastAccessed = time.Now()
entry.HitCount++
// Write updated entry back (async to avoid blocking)
go func() {
c.mu.Lock()
defer c.mu.Unlock()
updatedData, _ := json.MarshalIndent(entry, "", " ")
os.WriteFile(cachePath, updatedData, 0644)
}()
c.recordHit(cacheType)
return entry.Data, true
}
// Set stores data in the cache
func (c *Cache) Set(cacheType, endpoint string, params map[string]string, data []byte, statusCode int) error {
c.mu.Lock()
defer c.mu.Unlock()
key := c.generateCacheKey(endpoint, params)
cachePath := c.getCachePath(cacheType, key)
entry := CacheEntry{
Key: key,
Data: json.RawMessage(data),
Timestamp: time.Now(),
LastAccessed: time.Now(),
HitCount: 0,
Size: len(data),
StatusCode: statusCode,
}
// Marshal and save entry
entryData, err := json.MarshalIndent(entry, "", " ")
if err != nil {
return fmt.Errorf("failed to marshal cache entry: %w", err)
}
if err := os.WriteFile(cachePath, entryData, 0644); err != nil {
return fmt.Errorf("failed to write cache file: %w", err)
}
// Update metadata
c.updateMetadata(cacheType, len(data))
// Enforce max entries limit
go c.enforceMaxEntries(cacheType)
return nil
}
// Delete removes a specific cache entry
func (c *Cache) Delete(cacheType, endpoint string, params map[string]string) error {
c.mu.Lock()
defer c.mu.Unlock()
key := c.generateCacheKey(endpoint, params)
cachePath := c.getCachePath(cacheType, key)
return os.Remove(cachePath)
}
// Clear removes all cached data for a specific type or all types
func (c *Cache) Clear(cacheType string) error {
c.mu.Lock()
defer c.mu.Unlock()
if cacheType == "" {
// Clear all cache but preserve the base directory
entries, err := os.ReadDir(c.config.CacheDir)
if err != nil {
return err
}
for _, entry := range entries {
if err := os.RemoveAll(filepath.Join(c.config.CacheDir, entry.Name())); err != nil {
return err
}
}
// Reset metadata
c.metadata = make(map[string]*CacheMetadata)
return c.saveMetadata()
}
// Clear specific cache type
cacheDir := filepath.Join(c.config.CacheDir, cacheType)
if err := os.RemoveAll(cacheDir); err != nil {
return err
}
// Update metadata
delete(c.metadata, cacheType)
return c.saveMetadata()
}
// GetStats returns cache statistics
func (c *Cache) GetStats() map[string]*CacheMetadata {
c.mu.RLock()
defer c.mu.RUnlock()
// Create a copy to avoid concurrent access
stats := make(map[string]*CacheMetadata)
for k, v := range c.metadata {
stats[k] = &CacheMetadata{
TotalHits: v.TotalHits,
TotalMisses: v.TotalMisses,
LastUpdated: v.LastUpdated,
EntryCount: v.EntryCount,
TotalSize: v.TotalSize,
}
}
return stats
}
// Helper methods
func (c *Cache) recordHit(cacheType string) {
if c.metadata[cacheType] == nil {
c.metadata[cacheType] = &CacheMetadata{}
}
c.metadata[cacheType].TotalHits++
}
func (c *Cache) recordMiss(cacheType string) {
if c.metadata[cacheType] == nil {
c.metadata[cacheType] = &CacheMetadata{}
}
c.metadata[cacheType].TotalMisses++
}
func (c *Cache) updateMetadata(cacheType string, sizeAdded int) {
if c.metadata[cacheType] == nil {
c.metadata[cacheType] = &CacheMetadata{}
}
meta := c.metadata[cacheType]
meta.LastUpdated = time.Now()
meta.EntryCount++
meta.TotalSize += int64(sizeAdded)
// Save metadata to disk
c.saveMetadata()
}
func (c *Cache) loadMetadata() error {
metaPath := filepath.Join(c.config.CacheDir, "metadata.json")
data, err := os.ReadFile(metaPath)
if err != nil {
return err
}
return json.Unmarshal(data, &c.metadata)
}
func (c *Cache) saveMetadata() error {
metaPath := filepath.Join(c.config.CacheDir, "metadata.json")
data, err := json.MarshalIndent(c.metadata, "", " ")
if err != nil {
return err
}
return os.WriteFile(metaPath, data, 0644)
}
// enforceMaxEntries removes oldest entries when limit is exceeded
func (c *Cache) enforceMaxEntries(cacheType string) {
// Implementation would scan cache directory and remove oldest entries
// based on LastAccessed time when entry count exceeds MaxEntries
// This is left as a TODO for brevity
}
// MergeStrategy defines how to merge cached data with fresh API data
type MergeStrategy int
const (
MergeReplace MergeStrategy = iota // Replace cache with new data
MergeAppend // Append new items to existing
MergeDiff // Only add/update changed items
)
// MergeAPIResponse merges fresh API data with cached data based on strategy
func (c *Cache) MergeAPIResponse(cacheType string, cached, fresh []byte, strategy MergeStrategy) ([]byte, error) {
switch cacheType {
case "issues":
return c.mergeIssues(cached, fresh, strategy)
case "projects":
return c.mergeProjects(cached, fresh, strategy)
default:
// Default to replace strategy
return fresh, nil
}
}
func (c *Cache) mergeIssues(cached, fresh []byte, strategy MergeStrategy) ([]byte, error) {
var cachedIssues, freshIssues []GitLabIssue
if err := json.Unmarshal(cached, &cachedIssues); err != nil {
return fresh, nil // Return fresh data if cached is invalid
}
if err := json.Unmarshal(fresh, &freshIssues); err != nil {
return nil, err
}
switch strategy {
case MergeReplace:
return fresh, nil
case MergeAppend:
// Append fresh issues to cached, removing duplicates
issueMap := make(map[int]GitLabIssue)
for _, issue := range cachedIssues {
issueMap[issue.ID] = issue
}
for _, issue := range freshIssues {
issueMap[issue.ID] = issue // Fresh data overwrites cached
}
merged := make([]GitLabIssue, 0, len(issueMap))
for _, issue := range issueMap {
merged = append(merged, issue)
}
return json.Marshal(merged)
case MergeDiff:
// Only update changed issues
issueMap := make(map[int]GitLabIssue)
for _, issue := range cachedIssues {
issueMap[issue.ID] = issue
}
// Update only if UpdatedAt is newer
for _, freshIssue := range freshIssues {
if cachedIssue, exists := issueMap[freshIssue.ID]; exists {
if freshIssue.UpdatedAt.After(cachedIssue.UpdatedAt) {
issueMap[freshIssue.ID] = freshIssue
}
} else {
issueMap[freshIssue.ID] = freshIssue
}
}
merged := make([]GitLabIssue, 0, len(issueMap))
for _, issue := range issueMap {
merged = append(merged, issue)
}
return json.Marshal(merged)
}
return fresh, nil
}
func (c *Cache) mergeProjects(cached, fresh []byte, strategy MergeStrategy) ([]byte, error) {
// Similar implementation for projects
// Left as TODO for brevity
return fresh, nil
}
|