Error fetching schema from server: : while fetching schema: rpc error: code = Unknown desc = This server doesn't serve group id: 2

we deploy dgraph cluster and ratel,using k8s.

ratel schema page show this error message

and that’s the state api result

{
    "counter": "20",
    "groups": {
        "1": {
            "members": {
                "6": {
                    "id": "6",
                    "groupId": 1,
                    "addr": "dgraph-alpha-0.dgraph-alpha.dgraph.svc.cluster.local:7080",
                    "leader": true,
                    "amDead": false,
                    "lastUpdate": "1660786861",
                    "learner": false,
                    "clusterInfoOnly": false,
                    "forceGroupId": false
                },
                "7": {
                    "id": "7",
                    "groupId": 1,
                    "addr": "dgraph-alpha-1.dgraph-alpha.dgraph.svc.cluster.local:7080",
                    "leader": false,
                    "amDead": false,
                    "lastUpdate": "0",
                    "learner": false,
                    "clusterInfoOnly": false,
                    "forceGroupId": false
                },
                "8": {
                    "id": "8",
                    "groupId": 1,
                    "addr": "dgraph-alpha-2.dgraph-alpha.dgraph.svc.cluster.local:7080",
                    "leader": false,
                    "amDead": false,
                    "lastUpdate": "0",
                    "learner": false,
                    "clusterInfoOnly": false,
                    "forceGroupId": false
                }
            },
            "tablets": {
                "0-dgraph.drop.op": {
                    "groupId": 1,
                    "predicate": "0-dgraph.drop.op",
                    "force": false,
                    "onDiskBytes": "0",
                    "remove": false,
                    "readOnly": false,
                    "moveTs": "0",
                    "uncompressedBytes": "0"
                },
                "0-dgraph.graphql.p_query": {
                    "groupId": 1,
                    "predicate": "0-dgraph.graphql.p_query",
                    "force": false,
                    "onDiskBytes": "0",
                    "remove": false,
                    "readOnly": false,
                    "moveTs": "0",
                    "uncompressedBytes": "0"
                },
                "0-dgraph.graphql.schema": {
                    "groupId": 1,
                    "predicate": "0-dgraph.graphql.schema",
                    "force": false,
                    "onDiskBytes": "0",
                    "remove": false,
                    "readOnly": false,
                    "moveTs": "0",
                    "uncompressedBytes": "0"
                },
                "0-dgraph.graphql.xid": {
                    "groupId": 1,
                    "predicate": "0-dgraph.graphql.xid",
                    "force": false,
                    "onDiskBytes": "0",
                    "remove": false,
                    "readOnly": false,
                    "moveTs": "0",
                    "uncompressedBytes": "0"
                },
                "0-dgraph.type": {
                    "groupId": 1,
                    "predicate": "0-dgraph.type",
                    "force": false,
                    "onDiskBytes": "0",
                    "remove": false,
                    "readOnly": false,
                    "moveTs": "0",
                    "uncompressedBytes": "0"
                }
            },
            "snapshotTs": "0",
            "checksum": "14965571037763386389",
            "checkpointTs": "0"
        },
        "2": {
            "members": {
                "9": {
                    "id": "9",
                    "groupId": 2,
                    "addr": "localhost:7080",
                    "leader": true,
                    "amDead": false,
                    "lastUpdate": "1660886897",
                    "learner": false,
                    "clusterInfoOnly": false,
                    "forceGroupId": false
                }
            },
            "tablets": {},
            "snapshotTs": "0",
            "checksum": "0",
            "checkpointTs": "0"
        }
    },
    "zeros": {
        "1": {
            "id": "1",
            "groupId": 0,
            "addr": "dgraph-zero-0.dgraph-zero.dgraph.svc.cluster.local:5080",
            "leader": true,
            "amDead": false,
            "lastUpdate": "0",
            "learner": false,
            "clusterInfoOnly": false,
            "forceGroupId": false
        },
        "2": {
            "id": "2",
            "groupId": 0,
            "addr": "dgraph-zero-1.dgraph-zero.dgraph.svc.cluster.local:5080",
            "leader": false,
            "amDead": false,
            "lastUpdate": "0",
            "learner": false,
            "clusterInfoOnly": false,
            "forceGroupId": false
        },
        "3": {
            "id": "3",
            "groupId": 0,
            "addr": "dgraph-zero-2.dgraph-zero.dgraph.svc.cluster.local:5080",
            "leader": false,
            "amDead": false,
            "lastUpdate": "0",
            "learner": false,
            "clusterInfoOnly": false,
            "forceGroupId": false
        }
    },
    "maxUID": "0",
    "maxTxnTs": "10000",
    "maxNsID": "0",
    "maxRaftId": "9",
    "removed": [],
    "cid": "a84a779c-ef5b-4757-8e99-cab66fda33fe",
    "license": {
        "user": "",
        "maxNodes": "18446744073709551615",
        "expiryTs": "1663378863",
        "enabled": true
    }
}

how to solve this problem?
thanks a lot

This feels like a complex issue to solve with imagination only. If you have a good env and perfect/untouched k8s yaml. Delete the deployment, make sure the volumes are erased. And start again from scratch.

See this? This is a result of mixing environments and configs. You were testing something. You didn’t clean up the Paths/directories correctly. And now this instance expects the same context as before.

There is no way for k8s to work with “localhost”. It is possible, but instances in other pods will have a hard time knowing what you want to do.