github.com/milvus-io/milvus-sdk-go/v2@v2.4.1/test/testcases/configure_test.go (about)

     1  //go:build L3
     2  
     3  package testcases
     4  
     5  import (
     6  	"log"
     7  	"testing"
     8  	"time"
     9  
    10  	"github.com/stretchr/testify/require"
    11  
    12  	"github.com/milvus-io/milvus-sdk-go/v2/client"
    13  	"github.com/milvus-io/milvus-sdk-go/v2/entity"
    14  	"github.com/milvus-io/milvus-sdk-go/v2/test/common"
    15  )
    16  
    17  // config queryNode.replicas >=2 -> test load with multi replicas
    18  func TestLoadCollectionReplicas(t *testing.T) {
    19  	ctx := createContext(t, time.Second*common.DefaultTimeout)
    20  	// connect
    21  	mc := createMilvusClient(ctx, t)
    22  
    23  	collName, _ := createCollectionWithDataIndex(ctx, t, mc, true, true)
    24  
    25  	// load two replicas
    26  	errLoad := mc.LoadCollection(ctx, collName, false, client.WithReplicaNumber(2))
    27  	common.CheckErr(t, errLoad, true)
    28  
    29  	// check replicas and segment info
    30  	replicas, errReplicas := mc.GetReplicas(ctx, collName)
    31  	require.Len(t, replicas, 2)
    32  	common.CheckErr(t, errReplicas, true)
    33  	for _, replica := range replicas {
    34  		require.Len(t, replica.ShardReplicas, int(common.DefaultShards))
    35  		for _, shard := range replica.ShardReplicas {
    36  			log.Printf("ReplicaID: %v, NodeIDs: %v, LeaderID: %v, NodesIDs: %v, DmChannelName: %v",
    37  				replica.ReplicaID, replica.NodeIDs, shard.LeaderID, shard.NodesIDs, shard.DmChannelName)
    38  		}
    39  	}
    40  
    41  	// check segment info
    42  	segments, _ := mc.GetPersistentSegmentInfo(ctx, collName)
    43  	common.CheckPersistentSegments(t, segments, int64(common.DefaultNb))
    44  	for _, segment := range segments {
    45  		log.Printf("segmentId: %d, NumRows: %d, CollectionID: %d, ParititionID %d, IndexId: %d, State %v",
    46  			segment.ID, segment.NumRows, segment.CollectionID, segment.ParititionID, segment.IndexID, segment.State)
    47  	}
    48  }
    49  
    50  // config common.retentionDuration=40 -> test compact after delete half
    51  func TestCompactAfterDelete(t *testing.T) {
    52  	ctx := createContext(t, time.Second*common.DefaultTimeout)
    53  	// connect
    54  	mc := createMilvusClient(ctx, t)
    55  
    56  	// create collection with 1 shard
    57  	collName := createDefaultCollection(ctx, t, mc, true, 1)
    58  
    59  	// insert
    60  	_, floatColumn, vecColumn := common.GenDefaultColumnData(0, common.DefaultNb, common.DefaultDim)
    61  	ids, errInsert := mc.Insert(ctx, collName, common.DefaultPartition, floatColumn, vecColumn)
    62  	common.CheckErr(t, errInsert, true)
    63  
    64  	// flush
    65  	errFlush := mc.Flush(ctx, collName, false)
    66  	common.CheckErr(t, errFlush, true)
    67  
    68  	segments, _ := mc.GetPersistentSegmentInfo(ctx, collName)
    69  	require.Len(t, segments, 1)
    70  
    71  	// index
    72  	indexHnsw, _ := entity.NewIndexHNSW(entity.L2, 8, 96)
    73  	err := mc.CreateIndex(ctx, collName, common.DefaultFloatVecFieldName, indexHnsw, false)
    74  	common.CheckErr(t, err, true)
    75  
    76  	// delete half ids
    77  	deleteIds := ids.Slice(0, common.DefaultNb/2)
    78  	errDelete := mc.DeleteByPks(ctx, collName, "", deleteIds)
    79  	common.CheckErr(t, errDelete, true)
    80  
    81  	// compact
    82  	time.Sleep(common.RetentionDuration + 1)
    83  	compactionID, _ := mc.Compact(ctx, collName, 0)
    84  	err = mc.WaitForCompactionCompleted(ctx, compactionID)
    85  	common.CheckErr(t, err, true)
    86  
    87  	// get compaction plans
    88  	state, plans, _ := mc.GetCompactionStateWithPlans(ctx, compactionID)
    89  	require.Equal(t, state, entity.CompactionStateCompleted)
    90  	require.Len(t, plans, 1)
    91  	require.ElementsMatch(t, []int64{segments[0].ID}, plans[0].Source)
    92  }