google.md 37 KB

Google

Machine Learning Free Tier
Cloud Machine Learning Engine Managed Machine Learning (TensorFlow) -
Cloud Job Discovery ML Job Search/Discovery -
Cloud Natural Language Text Parsing and Analysis 5000 text operations per month
Cloud Speech Convert Speech to Text 60 minutes of audio per month
Cloud Translation Language Detection and Translation -
Cloud Vision Image Recognition and Classification 1000 image operations per month
Cloud Video Intelligence Scene-level Video Annotation First 1000 minutes of video free
Internet of Things Free Tier
Cloud IoT Core Data Ingestion/Device Management 250MB network traffic
Big Data Free Tier
BigQuery Data Warehouse/Analytics 1TB queries/month; 10GB storage
Cloud Dataflow Stream/batch data processing -
Cloud Dataproc Managed Spark and Hadoop -
Cloud Datalab Visualize and Explore Data -
Cloud Dataprep Transform/Clean Raw Data -
Cloud Pub/Sub Global Real-time Messaging 10GB messages/month
Genomics Managed Genomics Platform -
Data Studio Collaborative Dashboards -
Databases Free Tier
Cloud SQL Managed MySQL and PostgreSQL -
Cloud Bigtable HBase compatible nonrelational -
Cloud Datastore Horizontally scalable nonrelational 1GB Storage
Cloud Spanner Horizontally scalable relational -
Storage Free Tier
Cloud Storage Object Storage and Serving 5GB Regional storage
Nearline Archival Storage 5GB Regional storage
Coldline Archival Cold Storage 5GB Regional storage
Persistent Disk VM-attached disks -
Data Transfer Free Tier
Google Transfer Appliance Hardware for Data Migration -
Cloud Storage Transfer Service Cloud to Cloud Transfers -
BigQuery Data Transfer Service Bulk Import Analytics Data -
Compute Free Tier
Compute Engine Virtual Machines, Disks, Network 1 f1-micro instance; 30GB HDD
App Engine Managed App Platform 28 instance hrs per day; 5GB Storage
Kubernetes Engine Managed Kubernetes/Containers Unlimited nodes free
Cloud Functions Event-driven serverless applications 2 million invocations per month
Networking Free Tier
Virtual Private Cloud Software Defined Networking -
Cloud Load Balancing Multi-region Load Distribution -
Cloud CDN Content Delivery Network -
Cloud DNS Programmable DNS Serving -
Dedicated Interconnect Dedicated private network connection -
IPsec VPN Virtual private network connection -
Direct Peering Peer with GCP -
Carrier Peering Peer with a carrier -
API Platform and Ecosystems Free Tier
Apigee API Platform Develop, secure, monitor APIs -
Apigee Sense API protection from attacks -
API Monetization Monetize APIs -
API Analytics API Metrics -
Cloud Endpoints Cloud API Gateway -
Identity and Security Free Tier
Cloud IAM Resource Access Control -
Cloud Identity-Aware Proxy Identity-based App Signin -
Cloud Data Loss Prevention API Classify, Redact Sensitive Data -
Cloud Key Management Service Hosted Key Management Service -
Cloud Resource Manager Cloud Project Metadata Management -
Cloud Security Scanner App Engine Security Scanner -
Security Key Enforcement Two-step Key Verification -
Management Tools Free Tier
Stackdriver Monitoring Infrastructure and Application Monitoring Free for GCP
Stackdriver Logging Centralized Logging 5GB per project per month; 7 day retention
Stackdriver Error Reporting App Error Reporting Free for App Engine or if using logging
Stackdriver Trace App Performance Insights Free
Stackdriver Debugger Live Production Debugging Free
Cloud Deployment Manager Templated Infrastructure Deployment -
Cloud Console Web-based Management Console -
Cloud Shell Browser-based Terminal/CLI Free
Cloud Mobile App iOS/Android GCP Manager App -
Cloud Billing API Programmatically Manage GCP Billing -
Cloud APIs APIs for Cloud Services -
Developer Tools Free Tier
Cloud SDK CLI for GCP -
Container Registry Private Container Registry/Storage -
Container Builder Build/Package Container Artifacts 120 build mins/day
Cloud Source Repositories Hosted Private Git Repos 1GB private hosting
Cloud Tools for Android Studio Android Studio GCP Tools -
Cloud Tools for IntelliJ IntelliJ GCP Tools -
Cloud Tools for PowerShell PowerShell GCP Tools -
Cloud Tools for Visual Studio Visual Studio GCP Tools -
Cloud Tools for Eclipse Eclipse GCP Tools -
Gradle App Engine Plugin Gradle App Engine Plugin -
Maven App Engine Plugin Maven App Engine Plugin -
Mobile (Firebase) Free Tier
Realtime Database Real-time Data Synchronization 100 Connections 1GB Stored
Cloud Firestore Document Store and Sync 1GB Stored
Cloud Storage File Storage and Serving 5GB Stored
Hosting Web Hosting with CDN/SSL 1GB Store / 10GB per Month Transfer
Authentication Drop-in Authentication Free
Cloud Functions Event-driven Serverless Applications 125K Invocations/Month
Test Lab for Android Mobile Device Testing Service 5/10 Physical/Virtual Device Tests per day
Performance Monitoring App Performance Monitoring Free
Crashlytics Crash Reporting and Analytics Free
Cloud Messaging Send Messages to Devices Free
Commands

References

Other cheatsheets

multiple gcloud config configurations

switch gcloud context with gcloud config

gcloud config list
gcloud config set account pythonrocks@gmail.com
gcloud config set project mygcp-demo
gcloud config set compute/region us-west1
gcloud config set compute/zone us-west1-a
alias demo='gcloud config set account pythonrocks@gmail.com && gcloud config set project mygcp-demo && gcloud config set compute/region us-west1 && gcloud config set compute/zone us-west1-a'


cluster=$(gcloud config get-value container/cluster 2> /dev/null)
zone=$(gcloud config get-value compute/zone 2> /dev/null)
project=$(gcloud config get-value core/project 2> /dev/null)

# switch project based on the name
gcloud config set project $(gcloud projects list --filter='name:wordpress-dev' --format='value(project_id)')

command -v gcloud >/dev/null 2>&1 || { \
 echo >&2 "I require gcloud but it's not installed.  Aborting."; exit 1; }

REGION=$(gcloud config get-value compute/region)
if [[ -z "${REGION}" ]]; then
    echo "https://cloud.google.com/compute/docs/regions-zones/changing-default-zone-region" 1>&2
    echo "gcloud cli must be configured with a default region." 1>&2
    echo "run 'gcloud config set compute/region REGION'." 1>&2
    echo "replace 'REGION' with the region name like us-west1." 1>&2
    exit 1;
fi

auth

kubectl uses OAuth token generated by

  • gcloud config config-helper --format json
  • gcloud config config-helper --format='value(credential.access_token)'
  • gcloud auth print-access-token generates new token

info

gcloud info --format flattened
export PROJECT=$(gcloud info --format='value(config.project)')

projects

# various way to get project_id
PROJECT_ID=$(gcloud config get-value core/project 2>/dev/null)
PROJECT_ID=$(gcloud config list project --format='value(core.project)')
PROJECT_ID=$(gcloud info --format='value(config.project)')

# get project_number given project_id or name
gcloud projects list --filter="project_id:${PROJECT_ID}"  --format='value(project_number)'
gcloud projects list --filter="name:${project_name}"  --format='value(project_number)'

zones & regions

To return a list of zones given a region

gcloud compute zones list --filter=region:us-central1
# list regions
gcloud compute regions list

organization

ORG_ID=$(gcloud organizations list --format 'value(ID)')
# list top level folders
gcloud resource-manager folders list --organization=$ORG_ID
# list sub folders given upper level folder id
gcloud resource-manager folders list --organization=$FOLDER_ID

# grant roles to a user
ORGANIZATION_ADMIN_ADDRESS='user:developer1@example.com'
gcloud resource-manager folders add-iam-policy-binding ${folder_id} \
  --member=${ORGANIZATION_ADMIN_ADDRESS} \
  --role=roles/resourcemanager.folderAdmin
gcloud resource-manager folders add-iam-policy-binding ${folder_id} \
  --member=${ORGANIZATION_ADMIN_ADDRESS} \
  --role=roles/storage.admin
gcloud resource-manager folders add-iam-policy-binding ${folder_id} \
  --member=${ORGANIZATION_ADMIN_ADDRESS} \
  --role=roles/billing.projectManager

billing

gcloud organizations list
gcloud beta billing accounts list
# link a billing account with a project, assuming the user or svc account has "Billing Account User" role.
gcloud beta billing projects link ${project_id} \
            --billing-account ${ORGANIZATION_BILLING_ACCOUNT}

IAM list permission and roles for a given resource

gcloud iam list-testable-permissions <uri>
e.g gcloud iam list-testable-permissions //cloudresourcemanager.googleapis.com/projects/$PROJECT_ID

gcloud iam list-grantable-roles <uri>
e.g.
gcloud iam list-grantable-roles //cloudresourcemanager.googleapis.com/projects/$PROJECT_ID
gcloud iam list-grantable-roles https://www.googleapis.com/compute/v1/projects/$PROJECT_ID/zones/us-central1-a/instances/iowa1

# get uri e.g.
gcloud projects list --uri

IAM service account

export SA_EMAIL=$(gcloud iam service-accounts list \
    --filter="displayName:jenkins" --format='value(email)')
export PROJECT=$(gcloud info --format='value(config.project)')

# creaate and list sa
gcloud iam service-accounts create jenkins --display-name jenkins
gcloud iam service-accounts list
gcloud iam service-accounts list   --filter='email ~ [0-9]*-compute@.*'   --format='table(email)'

# create & list sa key  
gcloud iam service-accounts keys create jenkins-sa.json --iam-account $SA_EMAIL    
gcloud iam service-accounts keys list --iam-account=vault-admin@<project_id>.iam.gserviceaccount.com

## project level: get a list of roles assigned to a given sa such as terraform
 gcloud projects get-iam-policy ${PROJECT_ID} --flatten="bindings[].members" --filter="bindings.members:serviceAccount:terraform@${PROJECT_ID}.iam.gserviceaccount.com"

# project level: grant roles to a given sa
gcloud projects get-iam-policy $PROJECT
gcloud projects add-iam-policy-binding $PROJECT  --role roles/storage.admin \
    --member serviceAccount:$SA_EMAIL
gcloud projects add-iam-policy-binding $PROJECT --role roles/compute.instanceAdmin.v1 \
    --member serviceAccount:$SA_EMAIL
gcloud projects add-iam-policy-binding $PROJECT --role roles/compute.networkAdmin \
    --member serviceAccount:$SA_EMAIL
gcloud projects add-iam-policy-binding $PROJECT --role roles/compute.securityAdmin \
    --member serviceAccount:$SA_EMAIL
gcloud projects add-iam-policy-binding $PROJECT --role roles/iam.serviceAccountActor \
    --member serviceAccount:$SA_EMAIL

GCS bucket level

COMPUTE_ENGINE_SA_EMAIL=$(gcloud iam service-accounts list --filter="name:Compute Engine default service account" --format "value(email)")
gsutil iam ch serviceAccount:${COMPUTE_ENGINE_SA_EMAIL}:objectViewer gs://bucket-name

Custom Roles

# list predefined roles
gcloud iam roles list
# list custom roles
gcloud iam roles list --project $PROJECT_ID

# create custom role in the following 2 ways, either on project level (--project [PROJECT_ID]) or org level (--organization [ORGANIZATION_ID])
1. gcloud iam roles create editor --project $PROJECT_ID --file role-definition.yaml
2. gcloud iam roles create viewer --project $PROJECT_ID --title "Role Viewer" --description "Custom role description." --permissions compute.instances.get,compu
te.instances.list --stage ALPHA

app engine

cloud build

# user defined
gcloud builds submit --config=cloudbuild.yaml --substitutions=_BRANCH_NAME=foo,_BUILD_NUMBER=1 .

# override built in TAG_NAME
gcloud builds submit --config=cloudbuild.yaml --substitutions=TAG_NAME=v1.0.1

Cloud build trigger GCE rolling replace/start

kms

  • cloud-encrypt-with-kms
  • Integrated with cloud build

    # list all keyrings
    gcloud kms keyrings list --location global
    # list all keys in my_key_ring
    gcloud kms keys list --keyring my_key_ring --location global
    
    # grant KMS IAM permission to a sv account $USER_EMAIL
    gcloud kms keyrings add-iam-policy-binding $KEYRING_NAME \
    --location global \
    --member user:$USER_EMAIL \
    --role roles/cloudkms.admin
    gcloud kms keyrings add-iam-policy-binding $KEYRING_NAME \
    --location global \
    --member user:$USER_EMAIL \
    --role roles/cloudkms.cryptoKeyEncrypterDecrypter
    
    # Encrypt and Decrypt in REST API
    curl -v "https://cloudkms.googleapis.com/v1/projects/$DEVSHELL_PROJECT_ID/locations/global/keyRings/$KEYRING_NAME/cryptoKeys/$CRYPTOKEY_NAME:encrypt" \
    -d "{\"plaintext\":\"$PLAINTEXT\"}" \
    -H "Authorization:Bearer $(gcloud auth application-default print-access-token)"\
    -H "Content-Type:application/json" \
    | jq .ciphertext -r > 1.encrypted
    
    curl -v "https://cloudkms.googleapis.com/v1/projects/$DEVSHELL_PROJECT_ID/locations/global/keyRings/$KEYRING_NAME/cryptoKeys/$CRYPTOKEY_NAME:decrypt" \
    -d "{\"ciphertext\":\"$(cat 1.encrypted)\"}" \
    -H "Authorization:Bearer $(gcloud auth application-default print-access-token)"\
    -H "Content-Type:application/json" \
    | jq .plaintext -r | base64 -d    
    

secret manager

  • https://blog.scalesec.com/gcp-secret-manager-first-look-eaa9b0620da1

    # create a secret
    gcloud beta secrets create SECRET_NAME --replication-policy="automatic"
    #create a secret version
    gcloud beta secrets versions add "SECRET_NAME" --data-file="/path/to/file.txt"
    # list
    gcloud beta secrets list
    # read
    gcloud beta secrets versions access latest --secret=my_ssh_private_key
    #update the labels (metadata) of a secret
    gcloud beta secrets update SECRET_NAME --update-labels=KEY=VALUE
    

compute engine

gcloud command for creating an instance?

from web console

gcloud compute instances create [INSTANCE_NAME] \
  --image-family [IMAGE_FAMILY] \
  --image-project [IMAGE_PROJECT] \
  --create-disk image=[DISK_IMAGE],image-project=[DISK_IMAGE_PROJECT],size=[SIZE_GB],type=[DISK_TYPE]

gcloud compute instances create micro1 --zone=us-west1-a --machine-type=f1-micro --subnet=default --network-tier=PREMIUM --maintenance-policy=MIGRATE --service-account=398028291895-compute@developer.gserviceaccount.com --scopes=https://www.googleapis.com/auth/devstorage.read_only,https://www.googleapis.com/auth/logging.write,https://www.googleapis.com/auth/monitoring.write,https://www.googleapis.com/auth/servicecontrol,https://www.googleapis.com/auth/service.management.readonly,https://www.googleapis.com/auth/trace.append --min-cpu-platform=Automatic --image=debian-9-stretch-v20180510 --image-project=debian-cloud --boot-disk-size=10GB --boot-disk-type=pd-standard --boot-disk-device-name=micro1

list compute images

gcloud compute images list --filter=name:debian --uri
https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-8-jessie-v20180109
https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-9-stretch-v20180105

# Use the following command to see available non-Shielded VM Windows Server images
gcloud compute images list --project windows-cloud --no-standard-images
# Use the following command to see a list of available Shielded VM images, including Windows images
gcloud compute images list --project gce-uefi-images --no-standard-images

list an instance

  • filters
  • resource-keys

    gcloud compute instances list --filter="zone:us-central1-a"
    gcloud compute instances list --project=dev --filter="name~^es"
    gcloud compute instances list --project=dev --filter=name:kafka --format="value(name,INTERNAL_IP)"
    gcloud compute instances list --filter=tags:kafka-node
    gcloud compute instances list --filter='machineType:g1-small'
    

move instance

gcloud compute instances move <instance_wanna_move> --destination-zone=us-central1-a --zone=us-central1-c

ssh & scp

#--verbosity=debug is great for debugging, showing the SSH command
# the following is a real word example for running a bastion server that talks to a GKE cluster (master authorized network)
gcloud compute ssh --verbosity=debug <instance_name> --command "kubectl get nodes"

gcloud compute scp  --recurse ../manifest <instance_name>:

SSH via IAP

  • https://cloud.google.com/iap/docs/using-tcp-forwarding

    # find out access-config-name's name
    gcloud compute instances describe oregon1
    # remove the external IP
    gcloud compute instances delete-access-config  oregon1 --access-config-name "External NAT"
    # connect via IAP, assuming the IAP is granted to the account used for login.
    gcloud beta compute ssh oregon1 --tunnel-through-iap
    

ssh port forwarding for elasticsearch

gcloud compute --project "foo" ssh --zone "us-central1-c" "elasticsearch-1"  --ssh-flag="-L localhost:9200:localhost:9200"

The 2nd localhost is relative to elasticsearch-1`

ssh reverse port forwarding

for example, how to connect to home server's flask server (tcp port 5000) for a demo or a local game server in development

GOOGLE_CLOUD_PROJECT=$(gcloud config get-value project)
gcloud compute --project "${GOOGLE_CLOUD_PROJECT}" ssh --zone "us-west1-c" --ssh-flag="-v -N -R :5000:localhost:5000" "google_cloud_bastion_server"

generate ssh config

gcloud compute config-ssh

Windows RDP reset windows password

returns the IP and password for creating the RDP connection.

gcloud compute reset-windows-password qa-iceberg-instance --user=jdoe

ip_address: 104.199.119.166
password:   Ks(;_gx7Bf2d.NP
username:   jode

debugging

gcloud debugging: gcloud compute instances list --log-http serial port debug

instance level metadata

curl -s "http://metadata.google.internal/computeMetadata/v1/instance/?recursive=true&alt=text" -H "Metadata-Flavor: Google"
leader=$(curl -s "http://metadata.google.internal/computeMetadata/v1/instance/attributes/leader" -H "Metadata-Flavor: Google")

project level metadata

gcloud compute project-info describe
gcloud compute project-info describe --flatten="commonInstanceMetadata[]"

instances, template, target-pool and instance group

cat << EOF > startup.sh
#! /bin/bash
apt-get update
apt-get install -y nginx
service nginx start
sed -i -- 's/nginx/Google Cloud Platform - '"\$HOSTNAME"'/' /var/www/html/index.nginx-debian.html
EOF

gcloud compute instance-templates create nginx-template  --metadata-from-file startup-script=startup.sh
gcloud compute target-pools create nginx-pool
gcloud compute instance-groups managed create nginx-group \
         --base-instance-name nginx \
         --size 2 \
         --template nginx-template \
         --target-pool nginx-pool

MIG with startup and shutdown scripts

https://cloud.google.com/vpc/docs/special-configurations#multiple-natgateways

gsutil cp gs://nat-gw-template/startup.sh .

gcloud compute instance-templates create nat-1 \
    --machine-type n1-standard-2 --can-ip-forward --tags natgw \
    --metadata-from-file=startup-script=startup.sh --address $nat_1_ip

gcloud compute instance-templates create nat-2 \
    --machine-type n1-standard-2 --can-ip-forward --tags natgw \
    --metadata-from-file=startup-script=startup.sh  --address $nat_2_ip

disk snapshot

gcloud compute disks snapshot kafka-data1-1 --async --snapshot-names=kafka-data-1 --project project_a --zone us-west1-a
Use [gcloud compute operations describe URI] command to check the status of the operation(s).

regional disk

 gcloud beta compute instance attach-disk micro1 --disk pd-west1 --disk-scope regional

Networking

network and subnets

 gcloud compute networks create privatenet --subnet-mode=custom
 gcloud compute networks subnets create privatesubnet-us --network=privatenet --region=us-central1 --range=172.16.0.0/24
 gcloud compute networks subnets create privatesubnet-eu --network=privatenet --region=europe-west1 --range=172.20.0.0/20
 gcloud compute networks subnets list --sort-by=NETWORK

route

tag the instances with no-ips

gcloud compute instances add-tags existing-instance --tags no-ip
gcloud compute routes create no-ip-internet-route \
    --network custom-network1 \
    --destination-range 0.0.0.0/0 \
    --next-hop-instance nat-gateway \
    --next-hop-instance-zone us-central1-a \
    --tags no-ip --priority 800

firewall rules

  • https://medium.com/@swongra/protect-your-google-cloud-instances-with-firewall-rules-69cce960fba

    # allow SSH, RDP and ICMP for the given network
    gcloud compute firewall-rules create managementnet-allow-icmp-ssh-rdp --direction=INGRESS --priority=1000 --network=managementnet --action=ALLOW --rules=tcp:22,3389,icmp --source-ranges=0.0.0.0/0
    # allow internal from given source range
    gcloud compute firewall-rules create mynetwork-allow-internal --network \
    mynetwork --action ALLOW --direction INGRESS --rules all \
    --source-ranges 10.128.0.0/9
    gcloud compute firewall-rules list --filter="network:mynetwork"
    
    ## DENY
    gcloud compute firewall-rules create mynetwork-deny-icmp \
    --network mynetwork --action DENY --direction EGRESS --rules icmp \
    --destination-ranges 10.132.0.2 --priority 500
    gcloud compute firewall-rules list \
    --filter="network:mynetwork AND name=mynetwork-deny-icmp"
    
    # sort-by
    gcloud compute firewall-rules list --sort-by=NETWORK
    
    

layer 4 network lb

gcloud compute firewall-rules create www-firewall --allow tcp:80
gcloud compute forwarding-rules create nginx-lb \
         --region us-central1 \
         --ports=80 \
         --target-pool nginx-pool

gcloud compute firewall-rules list --sort-by=NETWORK

layer 7 http lb

  • https://cloud.google.com/solutions/scalable-and-resilient-apps

    gcloud compute http-health-checks create http-basic-check
    gcloud compute instance-groups managed \
       set-named-ports nginx-group \
       --named-ports http:80
    
    gcloud compute backend-services create nginx-backend \
      --protocol HTTP --http-health-checks http-basic-check --global
    
    gcloud compute backend-services add-backend nginx-backend \
    --instance-group nginx-group \
    --instance-group-zone us-central1-a \
    --global  
    
    gcloud compute url-maps create web-map \
    --default-service nginx-backend
    
    gcloud compute target-http-proxies create http-lb-proxy \
    --url-map web-map
    
    gcloud compute forwarding-rules create http-content-rule \
        --global \
        --target-http-proxy http-lb-proxy \
        --ports 80
    gcloud compute forwarding-rules list
    
    

forwarding-rules

gcloud compute forwarding-rules list --filter=$(dig +short <dns_name>)
gcloud compute forwarding-rules describe my-forwardingrule --region us-central1
gcloud compute forwarding-rules describe my-http-forwardingrule --global

address

# get the external IP address of the instance
gcloud compute instances describe single-node \
     --format='value(networkInterfaces.accessConfigs[0].natIP)

gcloud compute addresses describe https-lb --global --format json

# list all IP addresses
gcloud projects list --format='value(project_id)' | xargs -I {} gcloud compute addresses list --format='value(address)' --project {}  2>/dev/null | sort | uniq -c

interconnect

# list Google Compute Engine interconnect locations
gcloud compute interconnects locations list

GCP managed ssl certificate

gcloud beta compute ssl-certificates create example-mydomain --domains example.mydomain.com
gcloud beta compute ssl-certificates list
gcloud beta compute ssl-certificates describe example-mydomain
# It takes 30mins+ to provision the TLS, one of conditions is the target-https-proxies needs to be associated with the cert.
gcloud beta compute target-https-proxies list

StackDriver logging

gcloud logging read "timestamp >= \"2018-04-19T00:30:00Z\"  and logName=projects/${project_id}/logs/requests and resource.type=http_load_balancer" --format="csv(httpRequest.remoteIp,httpRequest.requestUrl,timestamp)" --project=${project_id}

Service

list service available

gcloud services list --available

Enable Service

# chain
gcloud services enable cloudapis.googleapis.com && \
cloudresourcemanager.googleapis.com && \
compute.googleapis.com

# or not chain
gcloud services enable container.googleapis.com
gcloud services enable containerregistry.googleapis.com
gcloud services enable cloudbuild.googleapis.com
gcloud services enable iam.googleapis.com
gcloud services enable logging.googleapis.com
gcloud services enable monitoring.googleapis.com
gcloud services enable storage-api.googleapis.com
gcloud services enable storage-component.googleapis.com
gcloud services enable sourcerepo.googleapis.com
function enable-service() {
  SERVICE=$1
  if [[ $(gcloud services list --format="value(serviceConfig.name)" \
                                --filter="serviceConfig.name:$SERVICE" 2>&1) != \
                                "$SERVICE" ]]; then
    echo "Enabling $SERVICE"
    gcloud services enable $SERVICE
  else
    echo "$SERVICE is already enabled"
  fi
}

enable-service container.googleapis.com

Client libraries you can use to connect to Google APIs

chaining gcloud commands

gcloud compute forwarding-rules list --format 'value(NAME)' \
| xargs -I {}  gcloud compute forwarding-rules delete {}  --region us-west1 -q

gcloud projects list --format='value(project_id)' \
| xargs -I {} gcloud compute addresses list --format='value(address)' --project {}  2>/dev/null | sort | uniq -c

gcloud compute instances list --filter=elasticsearch --format='value(NAME)' \
| xargs -I {} -p gcloud compute instances stop {}
gcloud compute instances list --filter=elasticsearch --format='value(INTERNAL_IP)' \
| xargs -I {} ssh {} "sudo chef-client"

# delete non default routes
gcloud compute routes list --filter="NOT network=default" --format='value(NAME)' \
| xargs -I {} gcloud compute routes delete -q {}

one liner to purge GCR images given a date

DATE=2018-10-01
IMAGE=<project_id>/<image_name>
gcloud container images list-tags gcr.io/$IMAGE --limit=unlimited --sort-by=TIMESTAMP   \
--filter="NOT tags:* AND timestamp.datetime < '${DATE}'" --format='get(digest)' | \
while read digest;do gcloud container images delete -q --force-delete-tags gcr.io/$IMAGE@$digest ;done

GKE

# create a private cluster
gcloud beta container clusters create private-cluster \
    --private-cluster \
    --master-ipv4-cidr 172.16.0.16/28 \
    --enable-ip-alias \
    --create-subnetwork ""


gcloud compute networks subnets create my-subnet \
    --network default \
    --range 10.0.4.0/22 \
    --enable-private-ip-google-access \
    --region us-central1 \
    --secondary-range my-svc-range=10.0.32.0/20,my-pod-range=10.4.0.0/14

gcloud beta container clusters create private-cluster2 \
    --private-cluster \
    --enable-ip-alias \
    --master-ipv4-cidr 172.16.0.32/28 \
    --subnetwork my-subnet \
    --services-secondary-range-name my-svc-range \
    --cluster-secondary-range-name my-pod-range

 gcloud container clusters update private-cluster2 \
    --enable-master-authorized-networks \
    --master-authorized-networks <external_ip_of_kubectl_instance>
# create a GKE cluster with CloudRun,Istio, HPA enabled
gcloud beta container clusters create run-gke \
  --addons HorizontalPodAutoscaling,HttpLoadBalancing,Istio,CloudRun \
  --scopes cloud-platform \
  --zone us-central1-a \
  --machine-type n1-standard-4 \
  --enable-stackdriver-kubernetes \
  --no-enable-ip-alias
# create a VPC native cluster
gcloud container clusters create k1 \
--network custom-ip-vpc --subnetwork subnet-alias \
--enable-ip-alias --cluster-ipv4-cidr=/16   --services-ipv4-cidr=/22
# get the GKE endpoint
gcloud container clusters describe mycluster --format='get(endpoint)'
# generate a ~/.kube/config for private cluster with private endpoint
gcloud container clusters get-credentials private-cluster --zone us-central1-a --internal-ip

create a GKE cluster with label and query it later

gcloud container clusters create example-cluster --labels env=dev
gcloud container clusters list --filter resourceLabels.env=dev

Cloud Run

# deploy a service on Cloud Run in us-central1 and allow unauthenticated user
gcloud beta run deploy --image gcr.io/${PROJECT-ID}/helloworld --platform managed --region us-central1 --allow-unauthenticated

# list services
gcloud beta run services list
# get endpoint url for a service
gcloud beta run services describe <service_name> --format="get(status.url)"

Machine Learning

brew install bat
gcloud ml language analyze-entities --content="Michelangelo Caravaggio, Italian painter, is known for 'The Calling of Saint Matthew'." | bat  -l json

Deployment Manager