github.com/hasnat/dolt/go@v0.0.0-20210628190320-9eb5d843fbb7/libraries/doltcore/sqle/schema_util_test.go (about) 1 // Copyright 2020 Dolthub, Inc. 2 // 3 // Licensed under the Apache License, Version 2.0 (the "License"); 4 // you may not use this file except in compliance with the License. 5 // You may obtain a copy of the License at 6 // 7 // http://www.apache.org/licenses/LICENSE-2.0 8 // 9 // Unless required by applicable law or agreed to in writing, software 10 // distributed under the License is distributed on an "AS IS" BASIS, 11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 // See the License for the specific language governing permissions and 13 // limitations under the License. 14 15 package sqle 16 17 import ( 18 "fmt" 19 "strconv" 20 21 "github.com/dolthub/dolt/go/libraries/utils/set" 22 23 "github.com/dolthub/dolt/go/libraries/doltcore/row" 24 "github.com/dolthub/dolt/go/libraries/doltcore/schema" 25 "github.com/dolthub/dolt/go/store/types" 26 ) 27 28 // Creates a new schema for a result set specified by the given pairs of column names and types. Column names are 29 // strings, types are NomsKinds. 30 func NewResultSetSchema(colNamesAndTypes ...interface{}) schema.Schema { 31 if len(colNamesAndTypes)%2 != 0 { 32 panic("Non-even number of inputs passed to NewResultSetSchema") 33 } 34 35 cols := make([]schema.Column, len(colNamesAndTypes)/2) 36 for i := 0; i < len(colNamesAndTypes); i += 2 { 37 name := colNamesAndTypes[i].(string) 38 nomsKind := colNamesAndTypes[i+1].(types.NomsKind) 39 cols[i/2] = schema.NewColumn(name, uint64(i/2), nomsKind, false) 40 } 41 42 collection := schema.NewColCollection(cols...) 43 return schema.UnkeyedSchemaFromCols(collection) 44 } 45 46 // Creates a new row for a result set specified by the given values 47 func NewResultSetRow(colVals ...types.Value) row.Row { 48 taggedVals := make(row.TaggedValues) 49 cols := make([]schema.Column, len(colVals)) 50 for i := 0; i < len(colVals); i++ { 51 taggedVals[uint64(i)] = colVals[i] 52 nomsKind := colVals[i].Kind() 53 cols[i] = schema.NewColumn(fmt.Sprintf("%v", i), uint64(i), nomsKind, false) 54 } 55 56 collection := schema.NewColCollection(cols...) 57 sch := schema.UnkeyedSchemaFromCols(collection) 58 59 r, err := row.New(types.Format_Default, sch, taggedVals) 60 61 if err != nil { 62 panic(err) 63 } 64 65 return r 66 } 67 68 // NewRow creates a new row with the values given, using ascending tag numbers starting at 0. 69 // Uses the first value as the primary key. 70 func NewRow(colVals ...types.Value) row.Row { 71 return NewRowWithPks(colVals[0:1], colVals[1:]...) 72 } 73 74 // NewRowWithPks creates a new row with the values given, using ascending tag numbers starting at 0. 75 func NewRowWithPks(pkColVals []types.Value, nonPkVals ...types.Value) row.Row { 76 var cols []schema.Column 77 taggedVals := make(row.TaggedValues) 78 var tag int64 79 80 for _, val := range pkColVals { 81 var constraints []schema.ColConstraint 82 constraints = append(constraints, schema.NotNullConstraint{}) 83 cols = append(cols, schema.NewColumn(strconv.FormatInt(tag, 10), uint64(tag), val.Kind(), true, constraints...)) 84 taggedVals[uint64(tag)] = val 85 tag++ 86 } 87 88 for _, val := range nonPkVals { 89 cols = append(cols, schema.NewColumn(strconv.FormatInt(tag, 10), uint64(tag), val.Kind(), false)) 90 taggedVals[uint64(tag)] = val 91 tag++ 92 } 93 94 colColl := schema.NewColCollection(cols...) 95 sch := schema.MustSchemaFromCols(colColl) 96 97 r, err := row.New(types.Format_Default, sch, taggedVals) 98 99 if err != nil { 100 panic(err) 101 } 102 103 return r 104 } 105 106 // NewRowWithSchema creates a new row with the using the provided schema. 107 func NewRowWithSchema(sch schema.Schema, vals ...types.Value) row.Row { 108 tv := make(row.TaggedValues) 109 var i int 110 sch.GetAllCols().Iter(func(tag uint64, col schema.Column) (stop bool, err error) { 111 tv[tag] = vals[i] 112 i++ 113 return false, nil 114 }) 115 116 r, err := row.New(types.Format_Default, sch, tv) 117 if err != nil { 118 panic(err) 119 } 120 121 return r 122 } 123 124 // NewSchema creates a new schema with the pairs of column names and types given. 125 // Uses the first column as the primary key. 126 func NewSchema(colNamesAndTypes ...interface{}) schema.Schema { 127 return NewSchemaForTable("", colNamesAndTypes...) 128 } 129 130 // NewSchemaForTable creates a new schema for the table with the name given with the pairs of column names and types 131 // given. Uses the first column as the primary key. 132 func NewSchemaForTable(tableName string, colNamesAndTypes ...interface{}) schema.Schema { 133 if len(colNamesAndTypes)%2 != 0 { 134 panic("Non-even number of inputs passed to NewSchema") 135 } 136 137 // existingTags *set.Uint64Set, tableName string, existingColKinds []types.NomsKind, newColName string, newColKind types.NomsKind 138 nomsKinds := make([]types.NomsKind, 0) 139 tags := set.NewUint64Set(nil) 140 141 cols := make([]schema.Column, len(colNamesAndTypes)/2) 142 for i := 0; i < len(colNamesAndTypes); i += 2 { 143 name := colNamesAndTypes[i].(string) 144 nomsKind := colNamesAndTypes[i+1].(types.NomsKind) 145 146 tag := schema.AutoGenerateTag(tags, tableName, nomsKinds, name, nomsKind) 147 tags.Add(tag) 148 nomsKinds = append(nomsKinds, nomsKind) 149 150 isPk := i/2 == 0 151 var constraints []schema.ColConstraint 152 if isPk { 153 constraints = append(constraints, schema.NotNullConstraint{}) 154 } 155 cols[i/2] = schema.NewColumn(name, tag, nomsKind, isPk, constraints...) 156 } 157 158 colColl := schema.NewColCollection(cols...) 159 return schema.MustSchemaFromCols(colColl) 160 } 161 162 // Returns the logical concatenation of the schemas and rows given, rewriting all tag numbers to begin at zero. The row 163 // returned will have a new schema identical to the result of compressSchema. 164 func ConcatRows(schemasAndRows ...interface{}) row.Row { 165 if len(schemasAndRows)%2 != 0 { 166 panic("Non-even number of inputs passed to concatRows") 167 } 168 169 taggedVals := make(row.TaggedValues) 170 cols := make([]schema.Column, 0) 171 var itag uint64 172 for i := 0; i < len(schemasAndRows); i += 2 { 173 sch := schemasAndRows[i].(schema.Schema) 174 r := schemasAndRows[i+1].(row.Row) 175 sch.GetAllCols().IterInSortedOrder(func(tag uint64, col schema.Column) (stop bool) { 176 val, ok := r.GetColVal(tag) 177 if ok { 178 taggedVals[itag] = val 179 } 180 181 col.Tag = itag 182 cols = append(cols, col) 183 itag++ 184 185 return false 186 }) 187 } 188 189 colCol := schema.NewColCollection(cols...) 190 r, err := row.New(types.Format_Default, schema.UnkeyedSchemaFromCols(colCol), taggedVals) 191 192 if err != nil { 193 panic(err) 194 } 195 196 return r 197 } 198 199 // Rewrites the tag numbers for the row given to begin at zero and be contiguous, just like result set schemas. We don't 200 // want to just use the field mappings in the result set schema used by sqlselect, since that would only demonstrate 201 // that the code was consistent with itself, not actually correct. 202 func CompressRow(sch schema.Schema, r row.Row) row.Row { 203 var itag uint64 204 compressedRow := make(row.TaggedValues) 205 206 // TODO: this is probably incorrect and will break for schemas where the tag numbering doesn't match the declared order 207 sch.GetAllCols().IterInSortedOrder(func(tag uint64, col schema.Column) (stop bool) { 208 if val, ok := r.GetColVal(tag); ok { 209 compressedRow[itag] = val 210 } 211 itag++ 212 return false 213 }) 214 215 // call to compress schema is a no-op in most cases 216 r, err := row.New(types.Format_Default, CompressSchema(sch), compressedRow) 217 218 if err != nil { 219 panic(err) 220 } 221 222 return r 223 } 224 225 // Compresses each of the rows given ala compressRow 226 func CompressRows(sch schema.Schema, rs ...row.Row) []row.Row { 227 compressed := make([]row.Row, len(rs)) 228 for i := range rs { 229 compressed[i] = CompressRow(sch, rs[i]) 230 } 231 return compressed 232 } 233 234 // Rewrites the tag numbers for the schema given to start at 0, just like result set schemas. If one or more column 235 // names are given, only those column names are included in the compressed schema. The column list can also be used to 236 // reorder the columns as necessary. 237 func CompressSchema(sch schema.Schema, colNames ...string) schema.Schema { 238 var itag uint64 239 var cols []schema.Column 240 241 if len(colNames) > 0 { 242 cols = make([]schema.Column, len(colNames)) 243 for _, colName := range colNames { 244 column, ok := sch.GetAllCols().GetByName(colName) 245 if !ok { 246 panic("No column found for column name " + colName) 247 } 248 column.Tag = itag 249 cols[itag] = column 250 itag++ 251 } 252 } else { 253 cols = make([]schema.Column, sch.GetAllCols().Size()) 254 sch.GetAllCols().Iter(func(tag uint64, col schema.Column) (stop bool, err error) { 255 col.Tag = itag 256 cols[itag] = col 257 itag++ 258 return false, nil 259 }) 260 } 261 262 colCol := schema.NewColCollection(cols...) 263 return schema.UnkeyedSchemaFromCols(colCol) 264 } 265 266 // Rewrites the tag numbers for the schemas given to start at 0, just like result set schemas. 267 func CompressSchemas(schs ...schema.Schema) schema.Schema { 268 var itag uint64 269 var cols []schema.Column 270 271 cols = make([]schema.Column, 0) 272 for _, sch := range schs { 273 sch.GetAllCols().IterInSortedOrder(func(tag uint64, col schema.Column) (stop bool) { 274 col.Tag = itag 275 cols = append(cols, col) 276 itag++ 277 return false 278 }) 279 } 280 281 colCol := schema.NewColCollection(cols...) 282 return schema.UnkeyedSchemaFromCols(colCol) 283 }