github.com/pietrocarrara/hugo@v0.47.1/resource/resource_cache.go (about)

     1  // Copyright 2018 The Hugo Authors. All rights reserved.
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  // http://www.apache.org/licenses/LICENSE-2.0
     7  //
     8  // Unless required by applicable law or agreed to in writing, software
     9  // distributed under the License is distributed on an "AS IS" BASIS,
    10  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    11  // See the License for the specific language governing permissions and
    12  // limitations under the License.
    13  
    14  package resource
    15  
    16  import (
    17  	"encoding/json"
    18  	"io/ioutil"
    19  	"path"
    20  	"path/filepath"
    21  	"strings"
    22  	"sync"
    23  
    24  	"github.com/gohugoio/hugo/helpers"
    25  	"github.com/spf13/afero"
    26  
    27  	"github.com/BurntSushi/locker"
    28  )
    29  
    30  const (
    31  	CACHE_CLEAR_ALL = "clear_all"
    32  	CACHE_OTHER     = "other"
    33  )
    34  
    35  type ResourceCache struct {
    36  	rs *Spec
    37  
    38  	cache map[string]Resource
    39  	sync.RWMutex
    40  
    41  	// Provides named resource locks.
    42  	nlocker *locker.Locker
    43  }
    44  
    45  // ResourceKeyPartition returns a partition name
    46  // to  allow for more fine grained cache flushes.
    47  // It will return the file extension without the leading ".". If no
    48  // extension, it will return "other".
    49  func ResourceKeyPartition(filename string) string {
    50  	ext := strings.TrimPrefix(path.Ext(filepath.ToSlash(filename)), ".")
    51  	if ext == "" {
    52  		ext = CACHE_OTHER
    53  	}
    54  	return ext
    55  }
    56  
    57  func newResourceCache(rs *Spec) *ResourceCache {
    58  	return &ResourceCache{
    59  		rs:      rs,
    60  		cache:   make(map[string]Resource),
    61  		nlocker: locker.NewLocker(),
    62  	}
    63  }
    64  
    65  func (c *ResourceCache) clear() {
    66  	c.Lock()
    67  	defer c.Unlock()
    68  
    69  	c.cache = make(map[string]Resource)
    70  	c.nlocker = locker.NewLocker()
    71  }
    72  
    73  func (c *ResourceCache) Contains(key string) bool {
    74  	key = c.cleanKey(filepath.ToSlash(key))
    75  	_, found := c.get(key)
    76  	return found
    77  }
    78  
    79  func (c *ResourceCache) cleanKey(key string) string {
    80  	return strings.TrimPrefix(path.Clean(key), "/")
    81  }
    82  
    83  func (c *ResourceCache) get(key string) (Resource, bool) {
    84  	c.RLock()
    85  	defer c.RUnlock()
    86  	r, found := c.cache[key]
    87  	return r, found
    88  }
    89  
    90  func (c *ResourceCache) GetOrCreate(partition, key string, f func() (Resource, error)) (Resource, error) {
    91  	key = c.cleanKey(path.Join(partition, key))
    92  	// First check in-memory cache.
    93  	r, found := c.get(key)
    94  	if found {
    95  		return r, nil
    96  	}
    97  	// This is a potentially long running operation, so get a named lock.
    98  	c.nlocker.Lock(key)
    99  
   100  	// Double check in-memory cache.
   101  	r, found = c.get(key)
   102  	if found {
   103  		c.nlocker.Unlock(key)
   104  		return r, nil
   105  	}
   106  
   107  	defer c.nlocker.Unlock(key)
   108  
   109  	r, err := f()
   110  	if err != nil {
   111  		return nil, err
   112  	}
   113  
   114  	c.set(key, r)
   115  
   116  	return r, nil
   117  
   118  }
   119  
   120  func (c *ResourceCache) getFilenames(key string) (string, string) {
   121  	filenameBase := filepath.Join(c.rs.GenAssetsPath, key)
   122  	filenameMeta := filenameBase + ".json"
   123  	filenameContent := filenameBase + ".content"
   124  
   125  	return filenameMeta, filenameContent
   126  }
   127  
   128  func (c *ResourceCache) getFromFile(key string) (afero.File, transformedResourceMetadata, bool) {
   129  	c.RLock()
   130  	defer c.RUnlock()
   131  
   132  	var meta transformedResourceMetadata
   133  	filenameMeta, filenameContent := c.getFilenames(key)
   134  	fMeta, err := c.rs.Resources.Fs.Open(filenameMeta)
   135  	if err != nil {
   136  		return nil, meta, false
   137  	}
   138  	defer fMeta.Close()
   139  
   140  	jsonContent, err := ioutil.ReadAll(fMeta)
   141  	if err != nil {
   142  		return nil, meta, false
   143  	}
   144  
   145  	if err := json.Unmarshal(jsonContent, &meta); err != nil {
   146  		return nil, meta, false
   147  	}
   148  
   149  	fContent, err := c.rs.Resources.Fs.Open(filenameContent)
   150  	if err != nil {
   151  		return nil, meta, false
   152  	}
   153  
   154  	return fContent, meta, true
   155  }
   156  
   157  // writeMeta writes the metadata to file and returns a writer for the content part.
   158  func (c *ResourceCache) writeMeta(key string, meta transformedResourceMetadata) (afero.File, error) {
   159  	filenameMeta, filenameContent := c.getFilenames(key)
   160  	raw, err := json.Marshal(meta)
   161  	if err != nil {
   162  		return nil, err
   163  	}
   164  
   165  	fm, err := c.openResourceFileForWriting(filenameMeta)
   166  	if err != nil {
   167  		return nil, err
   168  	}
   169  
   170  	if _, err := fm.Write(raw); err != nil {
   171  		return nil, err
   172  	}
   173  
   174  	return c.openResourceFileForWriting(filenameContent)
   175  
   176  }
   177  
   178  func (c *ResourceCache) openResourceFileForWriting(filename string) (afero.File, error) {
   179  	return helpers.OpenFileForWriting(c.rs.Resources.Fs, filename)
   180  }
   181  
   182  func (c *ResourceCache) set(key string, r Resource) {
   183  	c.Lock()
   184  	defer c.Unlock()
   185  	c.cache[key] = r
   186  }
   187  
   188  func (c *ResourceCache) DeletePartitions(partitions ...string) {
   189  	partitionsSet := map[string]bool{
   190  		// Always clear out the resources not matching the partition.
   191  		"other": true,
   192  	}
   193  	for _, p := range partitions {
   194  		partitionsSet[p] = true
   195  	}
   196  
   197  	if partitionsSet[CACHE_CLEAR_ALL] {
   198  		c.clear()
   199  		return
   200  	}
   201  
   202  	c.Lock()
   203  	defer c.Unlock()
   204  
   205  	for k := range c.cache {
   206  		clear := false
   207  		partIdx := strings.Index(k, "/")
   208  		if partIdx == -1 {
   209  			clear = true
   210  		} else {
   211  			partition := k[:partIdx]
   212  			if partitionsSet[partition] {
   213  				clear = true
   214  			}
   215  		}
   216  
   217  		if clear {
   218  			delete(c.cache, k)
   219  		}
   220  	}
   221  
   222  }