confluentinc / cp-docker-images

[DEPRECATED] Docker images for Confluent Platform.
Apache License 2.0
1.14k stars 704 forks source link

[OCP] 4.1.0 fails on startup #461

Closed angelbarrera92 closed 6 years ago

angelbarrera92 commented 6 years ago

Hi

We're trying to deploy zookeeper in openshift. Version 4.0.0 works perfectly for us but trying to upgrade to version 4.1.0 fails to start.

Paste the log output:

+ '[' -z '' ']'
+ ID_OFFSET=1
+ export ZOOKEEPER_SERVER_ID=1
+ ZOOKEEPER_SERVER_ID=1
++ cat /var/run/secrets/kubernetes.io/serviceaccount/namespace
+ export PROJECT=confluent
+ PROJECT=confluent
+ export 'ZOOKEEPER_SERVERS=ox-zookeeper-410-0.ox-zookeeper-410-headless.confluent.svc:2888:3888;ox-zookeeper-410-1.ox-zookeeper-410-headless.confluent.svc:2888:3888;ox-zookeeper-410-2.ox-zookeeper-410-headless.confluent.svc:2888:3888'
+ ZOOKEEPER_SERVERS='ox-zookeeper-410-0.ox-zookeeper-410-headless.confluent.svc:2888:3888;ox-zookeeper-410-1.ox-zookeeper-410-headless.confluent.svc:2888:3888;ox-zookeeper-410-2.ox-zookeeper-410-headless.confluent.svc:2888:3888'
+ /etc/confluent/docker/run

echo "===> ENV Variables ..."
+ echo '===> ENV Variables ...'
env | sort
===> ENV Variables ...
+ env
+ sort
ALLOW_UNSIGNED=false
APPLICATION_NAME=ox-zookeeper-410
COMPONENT=zookeeper
CONFLUENT_DEB_VERSION=1
CONFLUENT_MAJOR_VERSION=4
CONFLUENT_MINOR_VERSION=1
CONFLUENT_MVN_LABEL=
CONFLUENT_PATCH_VERSION=0
CONFLUENT_PLATFORM_LABEL=
CONFLUENT_VERSION=4.1.0
CUB_CLASSPATH=/etc/confluent/docker/docker-utils.jar
HOME=/
HOSTNAME=ox-zookeeper-410-0
KAFKA_VERSION=1.1.0
KUBERNETES_PORT=tcp://172.30.0.1:443
KUBERNETES_PORT_443_TCP=tcp://172.30.0.1:443
KUBERNETES_PORT_443_TCP_ADDR=172.30.0.1
KUBERNETES_PORT_443_TCP_PORT=443
KUBERNETES_PORT_443_TCP_PROTO=tcp
KUBERNETES_PORT_53_TCP=tcp://172.30.0.1:53
KUBERNETES_PORT_53_TCP_ADDR=172.30.0.1
KUBERNETES_PORT_53_TCP_PORT=53
KUBERNETES_PORT_53_TCP_PROTO=tcp
KUBERNETES_PORT_53_UDP=udp://172.30.0.1:53
KUBERNETES_PORT_53_UDP_ADDR=172.30.0.1
KUBERNETES_PORT_53_UDP_PORT=53
KUBERNETES_PORT_53_UDP_PROTO=udp
KUBERNETES_SERVICE_HOST=172.30.0.1
KUBERNETES_SERVICE_PORT=443
KUBERNETES_SERVICE_PORT_DNS=53
KUBERNETES_SERVICE_PORT_DNS_TCP=53
KUBERNETES_SERVICE_PORT_HTTPS=443
LANG=C.UTF-8
OX_KAFKA_CONTROL_CENTER_PORT=tcp://172.30.201.6:9021
OX_KAFKA_CONTROL_CENTER_PORT_9021_TCP=tcp://172.30.201.6:9021
OX_KAFKA_CONTROL_CENTER_PORT_9021_TCP_ADDR=172.30.201.6
OX_KAFKA_CONTROL_CENTER_PORT_9021_TCP_PORT=9021
OX_KAFKA_CONTROL_CENTER_PORT_9021_TCP_PROTO=tcp
OX_KAFKA_CONTROL_CENTER_SERVICE_HOST=172.30.201.6
OX_KAFKA_CONTROL_CENTER_SERVICE_PORT=9021
OX_KAFKA_CONTROL_CENTER_SERVICE_PORT_SERVER=9021
OX_KAFKA_PORT=tcp://172.30.227.70:9092
OX_KAFKA_PORT_9092_TCP=tcp://172.30.227.70:9092
OX_KAFKA_PORT_9092_TCP_ADDR=172.30.227.70
OX_KAFKA_PORT_9092_TCP_PORT=9092
OX_KAFKA_PORT_9092_TCP_PROTO=tcp
OX_KAFKA_REST_PROXY_PORT=tcp://172.30.145.15:8082
OX_KAFKA_REST_PROXY_PORT_8082_TCP=tcp://172.30.145.15:8082
OX_KAFKA_REST_PROXY_PORT_8082_TCP_ADDR=172.30.145.15
OX_KAFKA_REST_PROXY_PORT_8082_TCP_PORT=8082
OX_KAFKA_REST_PROXY_PORT_8082_TCP_PROTO=tcp
OX_KAFKA_REST_PROXY_SERVICE_HOST=172.30.145.15

echo "===> User"
+ echo '===> User'
id
+ id
OX_KAFKA_REST_PROXY_SERVICE_PORT=8082
OX_KAFKA_REST_PROXY_SERVICE_PORT_SERVER=8082
OX_KAFKA_SCHEMA_REGISTRY_PORT=tcp://172.30.74.255:8081
OX_KAFKA_SCHEMA_REGISTRY_PORT_8081_TCP=tcp://172.30.74.255:8081
OX_KAFKA_SCHEMA_REGISTRY_PORT_8081_TCP_ADDR=172.30.74.255
OX_KAFKA_SCHEMA_REGISTRY_PORT_8081_TCP_PORT=8081
OX_KAFKA_SCHEMA_REGISTRY_PORT_8081_TCP_PROTO=tcp
OX_KAFKA_SCHEMA_REGISTRY_SERVICE_HOST=172.30.74.255
OX_KAFKA_SCHEMA_REGISTRY_SERVICE_PORT=8081
OX_KAFKA_SCHEMA_REGISTRY_SERVICE_PORT_SERVER=8081
OX_KAFKA_SERVICE_HOST=172.30.227.70
OX_KAFKA_SERVICE_PORT=9092
OX_KAFKA_SERVICE_PORT_CLIENT=9092
OX_ZOOKEEPER400_PORT=tcp://172.30.236.246:2181
OX_ZOOKEEPER400_PORT_2181_TCP=tcp://172.30.236.246:2181
OX_ZOOKEEPER400_PORT_2181_TCP_ADDR=172.30.236.246
OX_ZOOKEEPER400_PORT_2181_TCP_PORT=2181
OX_ZOOKEEPER400_PORT_2181_TCP_PROTO=tcp
OX_ZOOKEEPER400_SERVICE_HOST=172.30.236.246
OX_ZOOKEEPER400_SERVICE_PORT=2181
OX_ZOOKEEPER400_SERVICE_PORT_CLIENT=2181
OX_ZOOKEEPER410_PORT=tcp://172.30.196.238:2181
OX_ZOOKEEPER410_PORT_2181_TCP=tcp://172.30.196.238:2181
OX_ZOOKEEPER410_PORT_2181_TCP_ADDR=172.30.196.238
OX_ZOOKEEPER410_PORT_2181_TCP_PORT=2181
OX_ZOOKEEPER410_PORT_2181_TCP_PROTO=tcp
OX_ZOOKEEPER410_SERVICE_HOST=172.30.196.238
OX_ZOOKEEPER410_SERVICE_PORT=2181
OX_ZOOKEEPER410_SERVICE_PORT_CLIENT=2181
OX_ZOOKEEPER_410_2_PORT=tcp://172.30.233.82:2181
OX_ZOOKEEPER_410_2_PORT_2181_TCP=tcp://172.30.233.82:2181
OX_ZOOKEEPER_410_2_PORT_2181_TCP_ADDR=172.30.233.82
OX_ZOOKEEPER_410_2_PORT_2181_TCP_PORT=2181
OX_ZOOKEEPER_410_2_PORT_2181_TCP_PROTO=tcp
OX_ZOOKEEPER_410_2_SERVICE_HOST=172.30.233.82
OX_ZOOKEEPER_410_2_SERVICE_PORT=2181
OX_ZOOKEEPER_410_2_SERVICE_PORT_CLIENT=2181
OX_ZOOKEEPER_410_PORT=tcp://172.30.232.125:2181
OX_ZOOKEEPER_410_PORT_2181_TCP=tcp://172.30.232.125:2181
OX_ZOOKEEPER_410_PORT_2181_TCP_ADDR=172.30.232.125
OX_ZOOKEEPER_410_PORT_2181_TCP_PORT=2181
OX_ZOOKEEPER_410_PORT_2181_TCP_PROTO=tcp
OX_ZOOKEEPER_410_SERVICE_HOST=172.30.232.125
OX_ZOOKEEPER_410_SERVICE_PORT=2181
OX_ZOOKEEPER_410_SERVICE_PORT_CLIENT=2181
OX_ZOOKEEPER_PORT=tcp://172.30.58.139:2181
OX_ZOOKEEPER_PORT_2181_TCP=tcp://172.30.58.139:2181
OX_ZOOKEEPER_PORT_2181_TCP_ADDR=172.30.58.139
OX_ZOOKEEPER_PORT_2181_TCP_PORT=2181
OX_ZOOKEEPER_PORT_2181_TCP_PROTO=tcp
OX_ZOOKEEPER_SERVICE_HOST=172.30.58.139
OX_ZOOKEEPER_SERVICE_PORT=2181
OX_ZOOKEEPER_SERVICE_PORT_CLIENT=2181
PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
PROJECT=confluent
PWD=/
PYTHON_PIP_VERSION=8.1.2
PYTHON_VERSION=2.7.9-1
SCALA_VERSION=2.11
SHLVL=2
ZOOKEEPER_CLIENT_PORT=2181
ZOOKEEPER_INIT_LIMIT=5
ZOOKEEPER_SERVERS=ox-zookeeper-410-0.ox-zookeeper-410-headless.confluent.svc:2888:3888;ox-zookeeper-410-1.ox-zookeeper-410-headless.confluent.svc:2888:3888;ox-zookeeper-410-2.ox-zookeeper-410-headless.confluent.svc:2888:3888
ZOOKEEPER_SERVER_ID=1
ZOOKEEPER_SYNC_LIMIT=2
ZOOKEEPER_TICK_TIME=2000
ZULU_OPENJDK_VERSION=8=8.17.0.3
_=/usr/bin/env
===> User
uid=1000140000 gid=0(root) groups=0(root),1000140000

echo "===> Configuring ..."
+ echo '===> Configuring ...'
/etc/confluent/docker/configure
+ /etc/confluent/docker/configure
===> Configuring ...

dub ensure ZOOKEEPER_CLIENT_PORT
+ dub ensure ZOOKEEPER_CLIENT_PORT

dub path /etc/kafka/ writable
+ dub path /etc/kafka/ writable

# myid is required for clusters
if [[ -n "${ZOOKEEPER_SERVERS-}" ]]
then
  dub ensure ZOOKEEPER_SERVER_ID
  export ZOOKEEPER_INIT_LIMIT=${ZOOKEEPER_INIT_LIMIT:-"10"}
  export ZOOKEEPER_SYNC_LIMIT=${ZOOKEEPER_SYNC_LIMIT:-"5"}
fi
+ [[ -n ox-zookeeper-410-0.ox-zookeeper-410-headless.confluent.svc:2888:3888;ox-zookeeper-410-1.ox-zookeeper-410-headless.confluent.svc:2888:3888;ox-zookeeper-410-2.ox-zookeeper-410-headless.confluent.svc:2888:3888 ]]
+ dub ensure ZOOKEEPER_SERVER_ID
+ export ZOOKEEPER_INIT_LIMIT=5
+ ZOOKEEPER_INIT_LIMIT=5
+ export ZOOKEEPER_SYNC_LIMIT=2
+ ZOOKEEPER_SYNC_LIMIT=2

if [[ -n "${ZOOKEEPER_SERVER_ID-}" ]]
then
  dub template "/etc/confluent/docker/myid.template" "/var/lib/${COMPONENT}/data/myid"
fi
+ [[ -n 1 ]]
+ dub template /etc/confluent/docker/myid.template /var/lib/zookeeper/data/myid
[Errno 13] Permission denied: '/var/lib/zookeeper/data/myid'
Command [/usr/local/bin/dub template /etc/confluent/docker/myid.template /var/lib/zookeeper/data/myid] FAILED !

With version 4.0.0 we have no problems. Is there anything different between these versions that might affect the boot of the containers?

Best regards!

gAmUssA commented 6 years ago

@angelbarrera92 could you share yml file?

angelbarrera92 commented 6 years ago

Sure!

apiVersion: v1
kind: Template
labels:
  template: zookeeper
metadata:
  annotations:
    description: Zookeeper Deployment and Runtime Components
    iconClass: icon-java
    tags: java,zookeeper
  name: zookeeper
objects:
- apiVersion: v1
  kind: Service
  metadata:
    labels:
      application: ${APPLICATION_NAME}
    name: ${APPLICATION_NAME}-headless
  spec:
    clusterIP: None
    portalIP: None
    ports:
    - name: server
      port: 2888
      protocol: TCP
      targetPort: 2888
    - name: leader-election
      port: 3888
      protocol: TCP
      targetPort: 3888
    selector:
      application: ${APPLICATION_NAME}
    sessionAffinity: None
    type: ClusterIP
- apiVersion: v1
  kind: Service
  metadata:
    labels:
      application: ${APPLICATION_NAME}
    name: ${APPLICATION_NAME}
  spec:
    ports:
    - name: client
      port: 2181
      protocol: TCP
      targetPort: 2181
    selector:
      application: ${APPLICATION_NAME}
    sessionAffinity: None
    type: ClusterIP
- apiVersion: v1
  data:
    init.sh: |-
      #!/bin/bash
      set -x

      [ -z "$ID_OFFSET" ] && ID_OFFSET=1
      export ZOOKEEPER_SERVER_ID=$((${HOSTNAME##*-} + $ID_OFFSET))
      export PROJECT=$(cat /var/run/secrets/kubernetes.io/serviceaccount/namespace)
      export ZOOKEEPER_SERVERS="${APPLICATION_NAME}-0.${APPLICATION_NAME}-headless.${PROJECT}.svc:2888:3888;${APPLICATION_NAME}-1.${APPLICATION_NAME}-headless.${PROJECT}.svc:2888:3888;${APPLICATION_NAME}-2.${APPLICATION_NAME}-headless.${PROJECT}.svc:2888:3888"
      /etc/confluent/docker/run
    client_port: '2181'
    tick_time: '2000'
    init_limit: '5'
    sync_limit: '2'
  kind: ConfigMap
  metadata:
    labels:
      application: ${APPLICATION_NAME}
    name: ${APPLICATION_NAME}-config
- apiVersion: apps/v1beta1
  kind: StatefulSet
  metadata:
    labels:
      application: ${APPLICATION_NAME}
    name: ${APPLICATION_NAME}
  spec:
    replicas: 3
    selector:
      matchLabels:
        application: ${APPLICATION_NAME}
    serviceName: ${APPLICATION_NAME}-headless
    template:
      metadata:
        creationTimestamp: null
        labels:
          application: ${APPLICATION_NAME}
      spec:
        containers:
        - command: ['/bin/bash', '/etc/scripts/init.sh']
          env:
          - name: APPLICATION_NAME
            value: ${APPLICATION_NAME}
          - name: ZOOKEEPER_CLIENT_PORT
            valueFrom:
              configMapKeyRef:
                key: client_port
                name: ${APPLICATION_NAME}-config
          - name: ZOOKEEPER_TICK_TIME
            valueFrom:
              configMapKeyRef:
                key: tick_time
                name: ${APPLICATION_NAME}-config
          - name: ZOOKEEPER_INIT_LIMIT
            valueFrom:
              configMapKeyRef:
                key: init_limit
                name: ${APPLICATION_NAME}-config
          - name: ZOOKEEPER_SYNC_LIMIT
            valueFrom:
              configMapKeyRef:
                key: sync_limit
                name: ${APPLICATION_NAME}-config
          image: docker-registry.default.svc:5000/confluent/cp-zookeeper:${ZOOKEEPER_VERSION}
          imagePullPolicy: Always
          name: ${APPLICATION_NAME}
          ports:
          - containerPort: 2181
            name: client
            protocol: TCP
          - containerPort: 2888
            name: server
            protocol: TCP
          - containerPort: 3888
            name: leader-election
            protocol: TCP
          resources:
            requests:
              cpu: 256m
              memory: 512Mi
          terminationMessagePath: /dev/termination-log
          volumeMounts:
          - mountPath: /var/lib/zookeeper/data
            name: datadir
          - mountPath: /etc/scripts
            name: config
        volumes:
        - name: config
          configMap:
            name: ${APPLICATION_NAME}-config
        dnsPolicy: ClusterFirst
        restartPolicy: Always
        terminationGracePeriodSeconds: 30
    volumeClaimTemplates:
    - metadata:
        labels:
          application: ${APPLICATION_NAME}
        name: datadir
      spec:
        accessModes:
        - ReadWriteOnce
        resources:
          requests:
            storage: 10Gi
parameters:
- description: The name for the application.
  name: APPLICATION_NAME
  required: true
  value: zookeeper
- description: Zookeeper ImageStream Tag
  name: ZOOKEEPER_VERSION
  required: true
  value: 4.0.0

oc apply -f template.yml oc new-app --template=zookeeper --param=APPLICATION_NAME=ox-zookeeper --param=ZOOKEEPER_VERSION=4.1.0

Thanks in advance @gAmUssA

sagarising commented 6 years ago

I am also facing the same issue with Version 4.1.0(latest). The 4.0.0 version works perfectly.

angelbarrera92 commented 6 years ago

is the error detected? Thanks!

sagarising commented 6 years ago

The error is the myid file is owned by root user. If I run my DeploymentConfig as root it works fine. But this is not good thing to do.

angelbarrera92 commented 6 years ago

So.... I imagine that the myid file should be generated with other permissions. Am I right? Will it be a fix branch?

sagarising commented 6 years ago

I am also facing the same issue. I am not a core member of confluent development team. Let's see if the core development team resolves this issue.

angelbarrera92 commented 6 years ago

Ouch ok @sagarising . We hope to have news soon, we will see...

dcamach0 commented 6 years ago

+1

angelbarrera92 commented 6 years ago

We've done a little more digging into this problem.

It seems that since version 4.1.0 a couple of new users (cp-kafka and cp-kafka-connect) are created. Also the directory /var/lib/{COMPONENT} is created with the user cp-kafka that belongs to the confluent group and has permissions 750.

In previous versions (<4.1.0) this directory belonged to the root user with 777 permissions.

So when you start these containers in openshift, they are started by a random user belonging to the root group. This scenario causes the container to fail in openshift.

To reproduce the problem you only have to start the containers with a specific random user.

Old ones

docker run -d \
   --net=host \
   --name=zk-1 \
   -e ZOOKEEPER_SERVER_ID=1 \
   -e ZOOKEEPER_CLIENT_PORT=2181 \
   -e ZOOKEEPER_INIT_LIMIT=5 \
   -u 10003 \
   confluentinc/cp-zookeeper:4.0.2-1

new images...

docker run -d \
   --net=host \
   --name=zk-1 \
   -e ZOOKEEPER_SERVER_ID=1 \
   -e ZOOKEEPER_CLIENT_PORT=2181 \
   -e ZOOKEEPER_INIT_LIMIT=5 \
   -u 10003 \
   confluentinc/cp-zookeeper:4.1.0

The old images work, while the new ones don't.

Evidence: New images:

$ docker run -it --rm confluentinc/cp-zookeeper:4.1.0 /bin/bash
$ root@a4026c13fa79:/# ls -lrta /var/lib/zookeeper/
total 16
drwxr-xr-x 1 root     root      4096 Apr 16 22:59 ..
drwxrwxrwx 2 root     root      4096 Apr 16 22:59 log
drwxrwxrwx 2 root     root      4096 Apr 16 22:59 data
drwxr-x--- 4 cp-kafka confluent 4096 Apr 16 22:59 .
$ root@a4026c13fa79:/# cat /etc/passwd
root:x:0:0:root:/root:/bin/bash
daemon:x:1:1:daemon:/usr/sbin:/usr/sbin/nologin
bin:x:2:2:bin:/bin:/usr/sbin/nologin
sys:x:3:3:sys:/dev:/usr/sbin/nologin
sync:x:4:65534:sync:/bin:/bin/sync
games:x:5:60:games:/usr/games:/usr/sbin/nologin
man:x:6:12:man:/var/cache/man:/usr/sbin/nologin
lp:x:7:7:lp:/var/spool/lpd:/usr/sbin/nologin
mail:x:8:8:mail:/var/mail:/usr/sbin/nologin
news:x:9:9:news:/var/spool/news:/usr/sbin/nologin
uucp:x:10:10:uucp:/var/spool/uucp:/usr/sbin/nologin
proxy:x:13:13:proxy:/bin:/usr/sbin/nologin
www-data:x:33:33:www-data:/var/www:/usr/sbin/nologin
backup:x:34:34:backup:/var/backups:/usr/sbin/nologin
list:x:38:38:Mailing List Manager:/var/list:/usr/sbin/nologin
irc:x:39:39:ircd:/var/run/ircd:/usr/sbin/nologin
gnats:x:41:41:Gnats Bug-Reporting System (admin):/var/lib/gnats:/usr/sbin/nologin
nobody:x:65534:65534:nobody:/nonexistent:/usr/sbin/nologin
systemd-timesync:x:100:103:systemd Time Synchronization,,,:/run/systemd:/bin/false
systemd-network:x:101:104:systemd Network Management,,,:/run/systemd/netif:/bin/false
systemd-resolve:x:102:105:systemd Resolver,,,:/run/systemd/resolve:/bin/false
systemd-bus-proxy:x:103:106:systemd Bus Proxy,,,:/run/systemd:/bin/false
cp-kafka:x:104:108::/var/empty:/bin/false
cp-kafka-connect:x:105:108::/var/empty:/bin/false
root@a4026c13fa79:/# 

Old image:

$ docker run -it --rm confluentinc/cp-zookeeper:4.0.2-1 /bin/bash
$ root@63672c5e1f90:/# ls -lrta /var/lib/zookeeper/
total 16
drwxrwxrwx 2 root root 4096 Jul 17 23:21 log
drwxrwxrwx 2 root root 4096 Jul 17 23:21 data
drwxr-xr-x 1 root root 4096 Jul 17 23:21 ..
drwxr-xr-x 4 root root 4096 Jul 17 23:21 .
$ root@63672c5e1f90:/# cat /etc/passwd
root:x:0:0:root:/root:/bin/bash
daemon:x:1:1:daemon:/usr/sbin:/usr/sbin/nologin
bin:x:2:2:bin:/bin:/usr/sbin/nologin
sys:x:3:3:sys:/dev:/usr/sbin/nologin
sync:x:4:65534:sync:/bin:/bin/sync
games:x:5:60:games:/usr/games:/usr/sbin/nologin
man:x:6:12:man:/var/cache/man:/usr/sbin/nologin
lp:x:7:7:lp:/var/spool/lpd:/usr/sbin/nologin
mail:x:8:8:mail:/var/mail:/usr/sbin/nologin
news:x:9:9:news:/var/spool/news:/usr/sbin/nologin
uucp:x:10:10:uucp:/var/spool/uucp:/usr/sbin/nologin
proxy:x:13:13:proxy:/bin:/usr/sbin/nologin
www-data:x:33:33:www-data:/var/www:/usr/sbin/nologin
backup:x:34:34:backup:/var/backups:/usr/sbin/nologin
list:x:38:38:Mailing List Manager:/var/list:/usr/sbin/nologin
irc:x:39:39:ircd:/var/run/ircd:/usr/sbin/nologin
gnats:x:41:41:Gnats Bug-Reporting System (admin):/var/lib/gnats:/usr/sbin/nologin
nobody:x:65534:65534:nobody:/nonexistent:/usr/sbin/nologin
systemd-timesync:x:100:103:systemd Time Synchronization,,,:/run/systemd:/bin/false
systemd-network:x:101:104:systemd Network Management,,,:/run/systemd/netif:/bin/false
systemd-resolve:x:102:105:systemd Resolver,,,:/run/systemd/resolve:/bin/false
systemd-bus-proxy:x:103:106:systemd Bus Proxy,,,:/run/systemd:/bin/false

So.. now... it will change?

maxzheng commented 6 years ago

@angelbarrera92 @sagarising @kmxillo We have updated the ownership of those directories to be the same as before -- by root -- in 4.1.2. Can you try that and let me know how it goes? Thx.