Products | Versions |
---|---|
TIBCO ModelOps | 1.2.0 |
aws sso logout aws configure sso SSO start URL [None]: https://companycloud.awsapps.com/start SSO Region [None]: us-west-2 Attempting to automatically open the SSO authorization page in your default browser. If the browser does not open or you wish to use a different device to authorize this request, open the following URL: https://device.sso.us-west-2.amazonaws.com/ Then enter the code: DTFW-GZZF The only AWS account available to you is: 012345678910 Using the account ID 012345678910 There are 3 roles available to you. *view *use *manage <--- Using the role name "manage" CLI default client Region [None]: us-west-1 CLI default output format [None]: json CLI profile name [manage-012345678910]: manage To use this profile, specify the profile name using --profile, as shown: aws s3 ls --profile manage
aws sts get-caller-identity --profile manage { "UserId": "AROAQGCIOINQYSW7ZGODO:ssouser@company.com", "Account": "012345678910", "Arn": "arn:aws:sts::012345678910:assumed-role/AWSReservedSSO_manage_95e0eb61e8ieb6f8/ssouser@company.com" }
aws sts get-caller-identity --profile view { "UserId": "AROAQGCIOINQYSW7ZGODO:ssouser@company.com", "Account": "012345678910", "Arn": "arn:aws:sts::012345678910:assumed-role/AWSReservedSSO_view_58e0eb8mh1ienu39/ssouser@company.com" }
aws sso login --profile manage aws sso login --profile view export AWS_SSO_READ_ACCESS_ROLE_ARN=$(aws sts get-caller-identity \ --profile view | jq '.Arn' | tr -d '"') echo $AWS_SSO_READ_ACCESS_ROLE_ARN rm -rf trust-relationship.json echo -e \ "{ \ \"Version\": \"2012-10-17\", \ \"Statement\": \ {\ \"Effect\": \"Allow\", \ \"Principal\": { \ \"AWS\": \"$AWS_SSO_READ_ACCESS_ROLE_ARN\" \ },\ \"Action\": \"sts:AssumeRole\" \ } \ }" | tr -d " \t" > trust-relationship.json export AWS_CUSTOM_ROLE_NAME=TIBCOAppsInstaller aws iam create-role \ --role-name $AWS_CUSTOM_ROLE_NAME \ --assume-role-policy-document file://trust-relationship.json \ --profile manage aws iam attach-role-policy \ --role-name $AWS_CUSTOM_ROLE_NAME \ --policy-arn arn:aws:iam::aws:policy/AdministratorAccess \ --profile manage aws iam attach-role-policy \ --role-name $AWS_CUSTOM_ROLE_NAME \ --policy-arn arn:aws:iam::aws:policy/PowerUserAccess \ --profile manage
export AWS_ACCOUNT_ID=012345678910 export ASSUMED_ROLE_SESSION_NAME=ssouser-assuming-tibcoappsinstaller export ASSUMED_AWS_ROLE_JSON=$(aws sts assume-role \ --role-arn arn:aws:iam::$AWS_ACCOUNT_ID:role/$AWS_CUSTOM_ROLE_NAME \ --role-session-name $ASSUMED_ROLE_SESSION_NAME --profile view \ --duration-seconds 3600 | jq .) export AWS_ACCESS_KEY_ID=$(echo $ASSUMED_AWS_ROLE_JSON | \ jq .Credentials.AccessKeyId | tr -d '"') export AWS_SECRET_ACCESS_KEY=$(echo $ASSUMED_AWS_ROLE_JSON | \ jq .Credentials.SecretAccessKey | tr -d '"') export AWS_SESSION_TOKEN=$(echo $ASSUMED_AWS_ROLE_JSON | \ jq .Credentials.SessionToken | tr -d '"') # The HELM_* variables shown below will be used later to pass the necessary AWS credentials to the # 'helm upgrade' command. However, the 'helm upgrade' command will fail if the AWS_* variables are still # set in your environment. The AWS_* variables will be unset before the 'helm upgrade' command is run. export HELM_AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY_ID export HELM_AWS_SECRET_ACCESS_KEY=$AWS_SECRET_ACCESS_KEY export HELM_AWS_SESSION_TOKEN=$AWS_SESSION_TOKEN aws sts get-caller-identity { "UserId": "ABOAOKNIUSEQQPKBFAC63:ssouser-assuming-tibcoappsinstaller", "Account": "012345678910", "Arn": "arn:aws:sts::012345678910:assumed-role/TIBCOAppsInstaller/ssouser-assuming-tibcoappsinstaller" }
export EKS_CLUSTER_NAME=epapps export EKS_CLUSTER_REGION=us-east-2 eksctl create cluster \ --name $EKS_CLUSTER_NAME \ --version 1.21 \ --region $EKS_CLUSTER_REGION \ --without-nodegroup \ --max-pods-per-node 100 eksctl create nodegroup \ --name $EKS_CLUSTER_NAME-nodes \ --nodes-min 2 \ --nodes-max 4 \ --node-volume-size 200 \ --cluster $EKS_CLUSTER_NAME \ --node-type t3.2xlarge \ --region $EKS_CLUSTER_REGION
eksctl utils associate-iam-oidc-provider \ --cluster $EKS_CLUSTER_NAME \ --approve \ --region $EKS_CLUSTER_REGION export OIDC_PROVIDER_ID=$(aws eks describe-cluster \ --name $EKS_CLUSTER_NAME \ --region $EKS_CLUSTER_REGION \ --query "cluster.identity.oidc.issuer" \ --output json | tr -d '"' | grep -o "id/.*")
# Unset AWS_* variables for the assumed custom role session. # This allows the 'manage' role be assumed again. # Then detach the AdministratorAccess policy. unset AWS_ACCESS_KEY_ID unset AWS_SECRET_ACCESS_KEY unset AWS_SESSION_TOKEN aws iam detach-role-policy \ --role-name $AWS_CUSTOM_ROLE_NAME \ --policy-arn arn:aws:iam::aws:policy/AdministratorAccess \ --profile manage
rm -rf trust-relationship.json echo -e \ "{ \ \"Version\": \"2012-10-17\", \ \"Statement\": [ \ { \ \"Sid\": \"Statement1\", \ \"Effect\": \"Allow\", \ \"Principal\": { \ \"AWS\": \"$AWS_SSO_READ_ACCESS_ROLE_ARN\" \ }, \ \"Action\": \"sts:AssumeRole\" \ }, \ { \ \"Sid\": \"Statement2\", \ \"Effect\": \"Allow\", \ \"Principal\": { \ \"Federated\": \"arn:aws:iam::$AWS_ACCOUNT_ID:oidc-provider/\ oidc.eks.$EKS_CLUSTER_REGION.amazonaws.com/$OIDC_PROVIDER_ID\" \ }, \ \"Action\": \"sts:AssumeRoleWithWebIdentity\" \ } \ ] \ }" | tr -d " \t" > trust-relationship.json aws iam update-assume-role-policy \ --role-name $AWS_CUSTOM_ROLE_NAME \ --policy-document file://trust-relationship.json \ --profile manage
az login export AZ_APP_DISPLAY_NAME=ModelOpsAzApp export AZ_TENANT_ID=$(az ad sp list \ --display-name $AZ_APP_DISPLAY_NAME | jq '.[].appOwnerTenantId' | tr -d '"') export AZ_SERVICE_PRINCIPAL_ID=$(az ad sp list \ --display-name $AZ_APP_DISPLAY_NAME | jq '.[].appId' | tr -d '"')
export AZ_APP_SECRET=*******
# Switch back to your custom role using the AWS_* variables export AWS_ACCESS_KEY_ID=$HELM_AWS_ACCESS_KEY_ID export AWS_SECRET_ACCESS_KEY=$HELM_AWS_SECRET_ACCESS_KEY export AWS_SESSION_TOKEN=$HELM_AWS_SESSION_TOKEN aws ecr create-repository --repository-name install-pipeline --region $EKS_CLUSTER_REGION aws ecr create-repository --repository-name tools --region $EKS_CLUSTER_REGION aws ecr create-repository --repository-name data-channel-registry --region $EKS_CLUSTER_REGION aws ecr create-repository --repository-name file-datasink --region $EKS_CLUSTER_REGION aws ecr create-repository --repository-name file-datasource --region $EKS_CLUSTER_REGION aws ecr create-repository --repository-name git-server --region $EKS_CLUSTER_REGION aws ecr create-repository --repository-name kafka-datasink --region $EKS_CLUSTER_REGION aws ecr create-repository --repository-name kafka-datasource --region $EKS_CLUSTER_REGION aws ecr create-repository --repository-name modelops-metrics --region $EKS_CLUSTER_REGION aws ecr create-repository --repository-name modelops-server --region $EKS_CLUSTER_REGION aws ecr create-repository --repository-name pmml --region $EKS_CLUSTER_REGION aws ecr create-repository --repository-name python --region $EKS_CLUSTER_REGION aws ecr create-repository --repository-name sbrt-base --region $EKS_CLUSTER_REGION aws ecr create-repository --repository-name scheduling-server --region $EKS_CLUSTER_REGION aws ecr create-repository --repository-name scoring-flow --region $EKS_CLUSTER_REGION aws ecr create-repository --repository-name tensorflow --region $EKS_CLUSTER_REGION aws ecr create-repository --repository-name test-datasink --region $EKS_CLUSTER_REGION aws ecr create-repository --repository-name test-datasource --region $EKS_CLUSTER_REGION aws ecr create-repository --repository-name rest-datasink --region $EKS_CLUSTER_REGION aws ecr create-repository --repository-name rest-datasource --region $EKS_CLUSTER_REGION aws ecr create-repository --repository-name statistica --region $EKS_CLUSTER_REGION aws ecr create-repository --repository-name jdbc-datasource --region $EKS_CLUSTER_REGION aws ecr create-repository --repository-name spark --region $EKS_CLUSTER_REGION aws ecr create-repository --repository-name jdbc-datasink --region $EKS_CLUSTER_REGION aws ecr create-repository --repository-name rest-request-response-datachannel --region $EKS_CLUSTER_REGION
unset AWS_ACCESS_KEY_ID unset AWS_SECRET_ACCESS_KEY unset AWS_SESSION_TOKEN export NODE_INSTANCE_ROLE_NAME=$(aws iam list-roles --profile manage | \ jq -r '.Roles| .[] | .RoleName' | grep $EKS_CLUSTER_NAME | \ grep NodeInstanceRole) aws iam attach-role-policy \ --policy-arn arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryFullAccess \ --role-name $NODE_INSTANCE_ROLE_NAME \ --profile manage aws iam attach-role-policy \ --policy-arn arn:aws:iam::aws:policy/AmazonElasticContainerRegistryPublicFullAccess \ --role-name $NODE_INSTANCE_ROLE_NAME \ --profile manage aws iam attach-role-policy \ --policy-arn arn:aws:iam::aws:policy/PowerUserAccess \ --role-name $NODE_INSTANCE_ROLE_NAME \ --profile manage
aws eks update-kubeconfig \ --name $EKS_CLUSTER_NAME \ --region $EKS_CLUSTER_REGION \ --role-arn arn:aws:iam::$AWS_ACCOUNT_ID:role/$AWS_CUSTOM_ROLE_NAME \ --profile view export MODELOPS_NAMESPACE=modelops12 export MODELOPS_ROOT_PASSWORD=tibco123 export EKS_ES_PASSWORD=ElAsticPW123 export EKS_GIT_PASSWORD=G1tPW123 export EKS_NEXUS_PASSWORD=NxtPW123 export EKS_SCORING_PASSWORD=Sc0rePW123 export ARTIFACT_MGMT_PASSWORD=Art1f@ct123 kubectl create namespace $MODELOPS_NAMESPACE kubectl create secret generic elasticsearch-es-elastic-user \ --from-literal=elastic=$EKS_ES_PASSWORD \ --namespace $MODELOPS_NAMESPACE kubectl create secret generic git-server \ --from-literal=modelops=$EKS_GIT_PASSWORD \ --namespace $MODELOPS_NAMESPACE kubectl create secret generic nexus-server \ --from-literal=admin=$EKS_NEXUS_PASSWORD \ --namespace $MODELOPS_NAMESPACE kubectl create secret generic modelops-server \ --from-literal=admin=$MODELOPS_ROOT_PASSWORD \ --namespace $MODELOPS_NAMESPACE kubectl create secret generic scoring-admin \ --from-literal=admin=$EKS_SCORING_PASSWORD \ --namespace $MODELOPS_NAMESPACE kubectl create secret generic artifact-management \ --from-literal=admin=$ARTIFACT_MGMT_PASSWORD \ --namespace $MODELOPS_NAMESPACE kubectl create secret generic oauth2 \ --from-literal=TENANT_ID=$AZ_TENANT_ID \ --from-literal=CLIENT_ID=$AZ_SERVICE_PRINCIPAL_ID \ --from-literal=CLIENT_SECRET=$AZ_APP_SECRET \ --namespace $MODELOPS_NAMESPACE
export MODELOPS_HOME=/opt/tibco/modelops/1.2 export AWS_HOSTED_ZONE=eksapps.companycloud.com # NOTE: the externalDNS.aws.eksRoleArn option shown below is intentionally # set to an empty value. Since you added the PowerUserAccess policy to the # NodeInstanceRole earlier, you do not need to specify a value for the # eksRoleArn (i.e. the NodeInstanceRole will be the default value). # However, the option must still be set in the 'helm upgrade' command, # otherwise the external-dns pod will not start properly. helm upgrade \ --install modelops12 $MODELOPS_HOME/helm-charts/kubernetes-installer-1.0.2.tgz \ --atomic \ --set cloud=eks \ --set eks.externalDNS=aws \ --set externalDNS.aws.eksRoleArn= \ --set eks.containerRegistry=$AWS_ACCOUNT_ID.dkr.ecr.$EKS_CLUSTER_REGION.amazonaws.com \ --namespace $MODELOPS_NAMESPACE \ --set eks.networkExposure=ingress \ --set eks.ingressDomain=$AWS_HOSTED_ZONE \ --set eks.oauth2=azure \ --set eks.awsAccessKeyID=$HELM_AWS_ACCESS_KEY_ID \ --set eks.awsSecretAccessKey=$HELM_AWS_SECRET_ACCESS_KEY \ --set eks.awsTokenId=$HELM_AWS_SESSION_TOKEN \ --set medium.nexus.memory=3Gi \ --timeout 10m0s
13.) When the helm upgrade command completes, it will instruct you to copy the maven artifacts. To do that, run this command:
cd $MODELOPS_HOME/maven-repository-artifacts/ kubectl cp modelops-repo-1.2.0-mavenrepo.zip mavenrepo-0:/tmp/ -n $MODELOPS_NAMESPACE
tkn pipelinerun logs bootstrap --follow --namespace $MODELOPS_NAMESPACE
tkn pipelinerun logs modelops-server --follow --namespace $MODELOPS_NAMESPACE