Skip to content

Commit 29057ea

Browse files
lunnywxiaoguang
andauthored
Fix bug when viewing the commit diff page with non-ANSI files (#36149)
Fix #35504 --------- Co-authored-by: wxiaoguang <[email protected]>
1 parent ac8308b commit 29057ea

File tree

11 files changed

+217
-314
lines changed

11 files changed

+217
-314
lines changed

modules/charset/charset.go

Lines changed: 65 additions & 83 deletions
Original file line numberDiff line numberDiff line change
@@ -5,12 +5,10 @@ package charset
55

66
import (
77
"bytes"
8-
"fmt"
98
"io"
109
"strings"
1110
"unicode/utf8"
1211

13-
"code.gitea.io/gitea/modules/log"
1412
"code.gitea.io/gitea/modules/setting"
1513
"code.gitea.io/gitea/modules/util"
1614

@@ -23,135 +21,125 @@ import (
2321
var UTF8BOM = []byte{'\xef', '\xbb', '\xbf'}
2422

2523
type ConvertOpts struct {
26-
KeepBOM bool
24+
KeepBOM bool
25+
ErrorReplacement []byte
26+
ErrorReturnOrigin bool
2727
}
2828

29+
var ToUTF8WithFallbackReaderPrefetchSize = 16 * 1024
30+
2931
// ToUTF8WithFallbackReader detects the encoding of content and converts to UTF-8 reader if possible
3032
func ToUTF8WithFallbackReader(rd io.Reader, opts ConvertOpts) io.Reader {
31-
buf := make([]byte, 2048)
33+
buf := make([]byte, ToUTF8WithFallbackReaderPrefetchSize)
3234
n, err := util.ReadAtMost(rd, buf)
3335
if err != nil {
34-
return io.MultiReader(bytes.NewReader(MaybeRemoveBOM(buf[:n], opts)), rd)
36+
// read error occurs, don't do any processing
37+
return io.MultiReader(bytes.NewReader(buf[:n]), rd)
3538
}
3639

37-
charsetLabel, err := DetectEncoding(buf[:n])
38-
if err != nil || charsetLabel == "UTF-8" {
39-
return io.MultiReader(bytes.NewReader(MaybeRemoveBOM(buf[:n], opts)), rd)
40+
charsetLabel, _ := DetectEncoding(buf[:n])
41+
if charsetLabel == "UTF-8" {
42+
// is utf-8, try to remove BOM and read it as-is
43+
return io.MultiReader(bytes.NewReader(maybeRemoveBOM(buf[:n], opts)), rd)
4044
}
4145

4246
encoding, _ := charset.Lookup(charsetLabel)
4347
if encoding == nil {
48+
// unknown charset, don't do any processing
4449
return io.MultiReader(bytes.NewReader(buf[:n]), rd)
4550
}
4651

52+
// convert from charset to utf-8
4753
return transform.NewReader(
48-
io.MultiReader(
49-
bytes.NewReader(MaybeRemoveBOM(buf[:n], opts)),
50-
rd,
51-
),
54+
io.MultiReader(bytes.NewReader(buf[:n]), rd),
5255
encoding.NewDecoder(),
5356
)
5457
}
5558

56-
// ToUTF8 converts content to UTF8 encoding
57-
func ToUTF8(content []byte, opts ConvertOpts) (string, error) {
58-
charsetLabel, err := DetectEncoding(content)
59-
if err != nil {
60-
return "", err
61-
} else if charsetLabel == "UTF-8" {
62-
return string(MaybeRemoveBOM(content, opts)), nil
63-
}
64-
65-
encoding, _ := charset.Lookup(charsetLabel)
66-
if encoding == nil {
67-
return string(content), fmt.Errorf("Unknown encoding: %s", charsetLabel)
68-
}
69-
70-
// If there is an error, we concatenate the nicely decoded part and the
71-
// original left over. This way we won't lose much data.
72-
result, n, err := transform.Bytes(encoding.NewDecoder(), content)
73-
if err != nil {
74-
result = append(result, content[n:]...)
75-
}
76-
77-
result = MaybeRemoveBOM(result, opts)
78-
79-
return string(result), err
80-
}
81-
8259
// ToUTF8WithFallback detects the encoding of content and converts to UTF-8 if possible
8360
func ToUTF8WithFallback(content []byte, opts ConvertOpts) []byte {
8461
bs, _ := io.ReadAll(ToUTF8WithFallbackReader(bytes.NewReader(content), opts))
8562
return bs
8663
}
8764

88-
// ToUTF8DropErrors makes sure the return string is valid utf-8; attempts conversion if possible
89-
func ToUTF8DropErrors(content []byte, opts ConvertOpts) []byte {
90-
charsetLabel, err := DetectEncoding(content)
91-
if err != nil || charsetLabel == "UTF-8" {
92-
return MaybeRemoveBOM(content, opts)
65+
func ToUTF8DropErrors(content []byte) []byte {
66+
return ToUTF8(content, ConvertOpts{ErrorReplacement: []byte{' '}})
67+
}
68+
69+
func ToUTF8(content []byte, opts ConvertOpts) []byte {
70+
charsetLabel, _ := DetectEncoding(content)
71+
if charsetLabel == "UTF-8" {
72+
return maybeRemoveBOM(content, opts)
9373
}
9474

9575
encoding, _ := charset.Lookup(charsetLabel)
9676
if encoding == nil {
77+
setting.PanicInDevOrTesting("unsupported detected charset %q, it shouldn't happen", charsetLabel)
9778
return content
9879
}
9980

100-
// We ignore any non-decodable parts from the file.
101-
// Some parts might be lost
10281
var decoded []byte
10382
decoder := encoding.NewDecoder()
10483
idx := 0
105-
for {
84+
for idx < len(content) {
10685
result, n, err := transform.Bytes(decoder, content[idx:])
10786
decoded = append(decoded, result...)
10887
if err == nil {
10988
break
11089
}
111-
decoded = append(decoded, ' ')
112-
idx = idx + n + 1
113-
if idx >= len(content) {
114-
break
90+
if opts.ErrorReturnOrigin {
91+
return content
92+
}
93+
if opts.ErrorReplacement == nil {
94+
decoded = append(decoded, content[idx+n])
95+
} else {
96+
decoded = append(decoded, opts.ErrorReplacement...)
11597
}
98+
idx += n + 1
11699
}
117-
118-
return MaybeRemoveBOM(decoded, opts)
100+
return maybeRemoveBOM(decoded, opts)
119101
}
120102

121-
// MaybeRemoveBOM removes a UTF-8 BOM from a []byte when opts.KeepBOM is false
122-
func MaybeRemoveBOM(content []byte, opts ConvertOpts) []byte {
103+
// maybeRemoveBOM removes a UTF-8 BOM from a []byte when opts.KeepBOM is false
104+
func maybeRemoveBOM(content []byte, opts ConvertOpts) []byte {
123105
if opts.KeepBOM {
124106
return content
125107
}
126-
if len(content) > 2 && bytes.Equal(content[0:3], UTF8BOM) {
127-
return content[3:]
128-
}
129-
return content
108+
return bytes.TrimPrefix(content, UTF8BOM)
130109
}
131110

132111
// DetectEncoding detect the encoding of content
133-
func DetectEncoding(content []byte) (string, error) {
112+
// it always returns a detected or guessed "encoding" string, no matter error happens or not
113+
func DetectEncoding(content []byte) (encoding string, _ error) {
134114
// First we check if the content represents valid utf8 content excepting a truncated character at the end.
135115

136116
// Now we could decode all the runes in turn but this is not necessarily the cheapest thing to do
137-
// instead we walk backwards from the end to trim off a the incomplete character
117+
// instead we walk backwards from the end to trim off the incomplete character
138118
toValidate := content
139119
end := len(toValidate) - 1
140120

141-
if end < 0 {
142-
// no-op
143-
} else if toValidate[end]>>5 == 0b110 {
144-
// Incomplete 1 byte extension e.g. © <c2><a9> which has been truncated to <c2>
145-
toValidate = toValidate[:end]
146-
} else if end > 0 && toValidate[end]>>6 == 0b10 && toValidate[end-1]>>4 == 0b1110 {
147-
// Incomplete 2 byte extension e.g. ⛔ <e2><9b><94> which has been truncated to <e2><9b>
148-
toValidate = toValidate[:end-1]
149-
} else if end > 1 && toValidate[end]>>6 == 0b10 && toValidate[end-1]>>6 == 0b10 && toValidate[end-2]>>3 == 0b11110 {
150-
// Incomplete 3 byte extension e.g. 💩 <f0><9f><92><a9> which has been truncated to <f0><9f><92>
151-
toValidate = toValidate[:end-2]
121+
// U+0000 U+007F 0yyyzzzz
122+
// U+0080 U+07FF 110xxxyy 10yyzzzz
123+
// U+0800 U+FFFF 1110wwww 10xxxxyy 10yyzzzz
124+
// U+010000 U+10FFFF 11110uvv 10vvwwww 10xxxxyy 10yyzzzz
125+
cnt := 0
126+
for end >= 0 && cnt < 4 {
127+
c := toValidate[end]
128+
if c>>5 == 0b110 || c>>4 == 0b1110 || c>>3 == 0b11110 {
129+
// a leading byte
130+
toValidate = toValidate[:end]
131+
break
132+
} else if c>>6 == 0b10 {
133+
// a continuation byte
134+
end--
135+
} else {
136+
// not an utf-8 byte
137+
break
138+
}
139+
cnt++
152140
}
141+
153142
if utf8.Valid(toValidate) {
154-
log.Debug("Detected encoding: utf-8 (fast)")
155143
return "UTF-8", nil
156144
}
157145

@@ -160,7 +148,7 @@ func DetectEncoding(content []byte) (string, error) {
160148
if len(content) < 1024 {
161149
// Check if original content is valid
162150
if _, err := textDetector.DetectBest(content); err != nil {
163-
return "", err
151+
return util.IfZero(setting.Repository.AnsiCharset, "UTF-8"), err
164152
}
165153
times := 1024 / len(content)
166154
detectContent = make([]byte, 0, times*len(content))
@@ -171,14 +159,10 @@ func DetectEncoding(content []byte) (string, error) {
171159
detectContent = content
172160
}
173161

174-
// Now we can't use DetectBest or just results[0] because the result isn't stable - so we need a tie break
162+
// Now we can't use DetectBest or just results[0] because the result isn't stable - so we need a tie-break
175163
results, err := textDetector.DetectAll(detectContent)
176164
if err != nil {
177-
if err == chardet.NotDetectedError && len(setting.Repository.AnsiCharset) > 0 {
178-
log.Debug("Using default AnsiCharset: %s", setting.Repository.AnsiCharset)
179-
return setting.Repository.AnsiCharset, nil
180-
}
181-
return "", err
165+
return util.IfZero(setting.Repository.AnsiCharset, "UTF-8"), err
182166
}
183167

184168
topConfidence := results[0].Confidence
@@ -201,11 +185,9 @@ func DetectEncoding(content []byte) (string, error) {
201185
}
202186

203187
// FIXME: to properly decouple this function the fallback ANSI charset should be passed as an argument
204-
if topResult.Charset != "UTF-8" && len(setting.Repository.AnsiCharset) > 0 {
205-
log.Debug("Using default AnsiCharset: %s", setting.Repository.AnsiCharset)
188+
if topResult.Charset != "UTF-8" && setting.Repository.AnsiCharset != "" {
206189
return setting.Repository.AnsiCharset, err
207190
}
208191

209-
log.Debug("Detected encoding: %s", topResult.Charset)
210-
return topResult.Charset, err
192+
return topResult.Charset, nil
211193
}

0 commit comments

Comments
 (0)