code.gitea.io/gitea@v1.22.3/modules/charset/charset.go (about) 1 // Copyright 2014 The Gogs Authors. All rights reserved. 2 // SPDX-License-Identifier: MIT 3 4 package charset 5 6 import ( 7 "bytes" 8 "fmt" 9 "io" 10 "strings" 11 "unicode/utf8" 12 13 "code.gitea.io/gitea/modules/log" 14 "code.gitea.io/gitea/modules/setting" 15 "code.gitea.io/gitea/modules/util" 16 17 "github.com/gogs/chardet" 18 "golang.org/x/net/html/charset" 19 "golang.org/x/text/transform" 20 ) 21 22 // UTF8BOM is the utf-8 byte-order marker 23 var UTF8BOM = []byte{'\xef', '\xbb', '\xbf'} 24 25 type ConvertOpts struct { 26 KeepBOM bool 27 } 28 29 // ToUTF8WithFallbackReader detects the encoding of content and converts to UTF-8 reader if possible 30 func ToUTF8WithFallbackReader(rd io.Reader, opts ConvertOpts) io.Reader { 31 buf := make([]byte, 2048) 32 n, err := util.ReadAtMost(rd, buf) 33 if err != nil { 34 return io.MultiReader(bytes.NewReader(MaybeRemoveBOM(buf[:n], opts)), rd) 35 } 36 37 charsetLabel, err := DetectEncoding(buf[:n]) 38 if err != nil || charsetLabel == "UTF-8" { 39 return io.MultiReader(bytes.NewReader(MaybeRemoveBOM(buf[:n], opts)), rd) 40 } 41 42 encoding, _ := charset.Lookup(charsetLabel) 43 if encoding == nil { 44 return io.MultiReader(bytes.NewReader(buf[:n]), rd) 45 } 46 47 return transform.NewReader( 48 io.MultiReader( 49 bytes.NewReader(MaybeRemoveBOM(buf[:n], opts)), 50 rd, 51 ), 52 encoding.NewDecoder(), 53 ) 54 } 55 56 // ToUTF8 converts content to UTF8 encoding 57 func ToUTF8(content []byte, opts ConvertOpts) (string, error) { 58 charsetLabel, err := DetectEncoding(content) 59 if err != nil { 60 return "", err 61 } else if charsetLabel == "UTF-8" { 62 return string(MaybeRemoveBOM(content, opts)), nil 63 } 64 65 encoding, _ := charset.Lookup(charsetLabel) 66 if encoding == nil { 67 return string(content), fmt.Errorf("Unknown encoding: %s", charsetLabel) 68 } 69 70 // If there is an error, we concatenate the nicely decoded part and the 71 // original left over. This way we won't lose much data. 72 result, n, err := transform.Bytes(encoding.NewDecoder(), content) 73 if err != nil { 74 result = append(result, content[n:]...) 75 } 76 77 result = MaybeRemoveBOM(result, opts) 78 79 return string(result), err 80 } 81 82 // ToUTF8WithFallback detects the encoding of content and converts to UTF-8 if possible 83 func ToUTF8WithFallback(content []byte, opts ConvertOpts) []byte { 84 bs, _ := io.ReadAll(ToUTF8WithFallbackReader(bytes.NewReader(content), opts)) 85 return bs 86 } 87 88 // ToUTF8DropErrors makes sure the return string is valid utf-8; attempts conversion if possible 89 func ToUTF8DropErrors(content []byte, opts ConvertOpts) []byte { 90 charsetLabel, err := DetectEncoding(content) 91 if err != nil || charsetLabel == "UTF-8" { 92 return MaybeRemoveBOM(content, opts) 93 } 94 95 encoding, _ := charset.Lookup(charsetLabel) 96 if encoding == nil { 97 return content 98 } 99 100 // We ignore any non-decodable parts from the file. 101 // Some parts might be lost 102 var decoded []byte 103 decoder := encoding.NewDecoder() 104 idx := 0 105 for { 106 result, n, err := transform.Bytes(decoder, content[idx:]) 107 decoded = append(decoded, result...) 108 if err == nil { 109 break 110 } 111 decoded = append(decoded, ' ') 112 idx = idx + n + 1 113 if idx >= len(content) { 114 break 115 } 116 } 117 118 return MaybeRemoveBOM(decoded, opts) 119 } 120 121 // MaybeRemoveBOM removes a UTF-8 BOM from a []byte when opts.KeepBOM is false 122 func MaybeRemoveBOM(content []byte, opts ConvertOpts) []byte { 123 if opts.KeepBOM { 124 return content 125 } 126 if len(content) > 2 && bytes.Equal(content[0:3], UTF8BOM) { 127 return content[3:] 128 } 129 return content 130 } 131 132 // DetectEncoding detect the encoding of content 133 func DetectEncoding(content []byte) (string, error) { 134 // First we check if the content represents valid utf8 content excepting a truncated character at the end. 135 136 // Now we could decode all the runes in turn but this is not necessarily the cheapest thing to do 137 // instead we walk backwards from the end to trim off a the incomplete character 138 toValidate := content 139 end := len(toValidate) - 1 140 141 if end < 0 { 142 // no-op 143 } else if toValidate[end]>>5 == 0b110 { 144 // Incomplete 1 byte extension e.g. © <c2><a9> which has been truncated to <c2> 145 toValidate = toValidate[:end] 146 } else if end > 0 && toValidate[end]>>6 == 0b10 && toValidate[end-1]>>4 == 0b1110 { 147 // Incomplete 2 byte extension e.g. ⛔ <e2><9b><94> which has been truncated to <e2><9b> 148 toValidate = toValidate[:end-1] 149 } else if end > 1 && toValidate[end]>>6 == 0b10 && toValidate[end-1]>>6 == 0b10 && toValidate[end-2]>>3 == 0b11110 { 150 // Incomplete 3 byte extension e.g. 💩 <f0><9f><92><a9> which has been truncated to <f0><9f><92> 151 toValidate = toValidate[:end-2] 152 } 153 if utf8.Valid(toValidate) { 154 log.Debug("Detected encoding: utf-8 (fast)") 155 return "UTF-8", nil 156 } 157 158 textDetector := chardet.NewTextDetector() 159 var detectContent []byte 160 if len(content) < 1024 { 161 // Check if original content is valid 162 if _, err := textDetector.DetectBest(content); err != nil { 163 return "", err 164 } 165 times := 1024 / len(content) 166 detectContent = make([]byte, 0, times*len(content)) 167 for i := 0; i < times; i++ { 168 detectContent = append(detectContent, content...) 169 } 170 } else { 171 detectContent = content 172 } 173 174 // Now we can't use DetectBest or just results[0] because the result isn't stable - so we need a tie break 175 results, err := textDetector.DetectAll(detectContent) 176 if err != nil { 177 if err == chardet.NotDetectedError && len(setting.Repository.AnsiCharset) > 0 { 178 log.Debug("Using default AnsiCharset: %s", setting.Repository.AnsiCharset) 179 return setting.Repository.AnsiCharset, nil 180 } 181 return "", err 182 } 183 184 topConfidence := results[0].Confidence 185 topResult := results[0] 186 priority, has := setting.Repository.DetectedCharsetScore[strings.ToLower(strings.TrimSpace(topResult.Charset))] 187 for _, result := range results { 188 // As results are sorted in confidence order - if we have a different confidence 189 // we know it's less than the current confidence and can break out of the loop early 190 if result.Confidence != topConfidence { 191 break 192 } 193 194 // Otherwise check if this results is earlier in the DetectedCharsetOrder than our current top guess 195 resultPriority, resultHas := setting.Repository.DetectedCharsetScore[strings.ToLower(strings.TrimSpace(result.Charset))] 196 if resultHas && (!has || resultPriority < priority) { 197 topResult = result 198 priority = resultPriority 199 has = true 200 } 201 } 202 203 // FIXME: to properly decouple this function the fallback ANSI charset should be passed as an argument 204 if topResult.Charset != "UTF-8" && len(setting.Repository.AnsiCharset) > 0 { 205 log.Debug("Using default AnsiCharset: %s", setting.Repository.AnsiCharset) 206 return setting.Repository.AnsiCharset, err 207 } 208 209 log.Debug("Detected encoding: %s", topResult.Charset) 210 return topResult.Charset, err 211 }