1. Packages
  2. Yandex
  3. API Docs
  4. MdbKafkaCluster
Yandex v0.13.0 published on Tuesday, Feb 22, 2022 by Pulumi

yandex.MdbKafkaCluster

Explore with Pulumi AI

Manages a Kafka cluster within the Yandex.Cloud. For more information, see the official documentation.

Example Usage

Example of creating a Single Node Kafka.

using Pulumi;
using Yandex = Pulumi.Yandex;

class MyStack : Stack
{
    public MyStack()
    {
        var fooVpcNetwork = new Yandex.VpcNetwork("fooVpcNetwork", new Yandex.VpcNetworkArgs
        {
        });
        var fooVpcSubnet = new Yandex.VpcSubnet("fooVpcSubnet", new Yandex.VpcSubnetArgs
        {
            NetworkId = fooVpcNetwork.Id,
            V4CidrBlocks = 
            {
                "10.5.0.0/24",
            },
            Zone = "ru-central1-a",
        });
        var fooMdbKafkaCluster = new Yandex.MdbKafkaCluster("fooMdbKafkaCluster", new Yandex.MdbKafkaClusterArgs
        {
            Config = new Yandex.Inputs.MdbKafkaClusterConfigArgs
            {
                AssignPublicIp = false,
                BrokersCount = 1,
                Kafka = new Yandex.Inputs.MdbKafkaClusterConfigKafkaArgs
                {
                    KafkaConfig = new Yandex.Inputs.MdbKafkaClusterConfigKafkaKafkaConfigArgs
                    {
                        CompressionType = "COMPRESSION_TYPE_ZSTD",
                        DefaultReplicationFactor = "1",
                        LogFlushIntervalMessages = "1024",
                        LogFlushIntervalMs = "1000",
                        LogFlushSchedulerIntervalMs = "1000",
                        LogPreallocate = true,
                        LogRetentionBytes = "1073741824",
                        LogRetentionHours = "168",
                        LogRetentionMinutes = "10080",
                        LogRetentionMs = "86400000",
                        LogSegmentBytes = "134217728",
                        NumPartitions = "10",
                    },
                    Resources = new Yandex.Inputs.MdbKafkaClusterConfigKafkaResourcesArgs
                    {
                        DiskSize = 32,
                        DiskTypeId = "network-ssd",
                        ResourcePresetId = "s2.micro",
                    },
                },
                SchemaRegistry = false,
                UnmanagedTopics = false,
                Version = "2.8",
                Zones = 
                {
                    "ru-central1-a",
                },
            },
            Environment = "PRESTABLE",
            NetworkId = fooVpcNetwork.Id,
            SubnetIds = 
            {
                fooVpcSubnet.Id,
            },
            Users = 
            {
                new Yandex.Inputs.MdbKafkaClusterUserArgs
                {
                    Name = "producer-application",
                    Password = "password",
                    Permissions = 
                    {
                        new Yandex.Inputs.MdbKafkaClusterUserPermissionArgs
                        {
                            Role = "ACCESS_ROLE_PRODUCER",
                            TopicName = "input",
                        },
                    },
                },
                new Yandex.Inputs.MdbKafkaClusterUserArgs
                {
                    Name = "worker",
                    Password = "password",
                    Permissions = 
                    {
                        new Yandex.Inputs.MdbKafkaClusterUserPermissionArgs
                        {
                            Role = "ACCESS_ROLE_CONSUMER",
                            TopicName = "input",
                        },
                        new Yandex.Inputs.MdbKafkaClusterUserPermissionArgs
                        {
                            Role = "ACCESS_ROLE_PRODUCER",
                            TopicName = "output",
                        },
                    },
                },
            },
        });
    }

}
Copy
package main

import (
	"github.com/pulumi/pulumi-yandex/sdk/go/yandex"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		fooVpcNetwork, err := yandex.NewVpcNetwork(ctx, "fooVpcNetwork", nil)
		if err != nil {
			return err
		}
		fooVpcSubnet, err := yandex.NewVpcSubnet(ctx, "fooVpcSubnet", &yandex.VpcSubnetArgs{
			NetworkId: fooVpcNetwork.ID(),
			V4CidrBlocks: pulumi.StringArray{
				pulumi.String("10.5.0.0/24"),
			},
			Zone: pulumi.String("ru-central1-a"),
		})
		if err != nil {
			return err
		}
		_, err = yandex.NewMdbKafkaCluster(ctx, "fooMdbKafkaCluster", &yandex.MdbKafkaClusterArgs{
			Config: &MdbKafkaClusterConfigArgs{
				AssignPublicIp: pulumi.Bool(false),
				BrokersCount:   pulumi.Int(1),
				Kafka: &MdbKafkaClusterConfigKafkaArgs{
					KafkaConfig: &MdbKafkaClusterConfigKafkaKafkaConfigArgs{
						CompressionType:             pulumi.String("COMPRESSION_TYPE_ZSTD"),
						DefaultReplicationFactor:    pulumi.String("1"),
						LogFlushIntervalMessages:    pulumi.String("1024"),
						LogFlushIntervalMs:          pulumi.String("1000"),
						LogFlushSchedulerIntervalMs: pulumi.String("1000"),
						LogPreallocate:              pulumi.Bool(true),
						LogRetentionBytes:           pulumi.String("1073741824"),
						LogRetentionHours:           pulumi.String("168"),
						LogRetentionMinutes:         pulumi.String("10080"),
						LogRetentionMs:              pulumi.String("86400000"),
						LogSegmentBytes:             pulumi.String("134217728"),
						NumPartitions:               pulumi.String("10"),
					},
					Resources: &MdbKafkaClusterConfigKafkaResourcesArgs{
						DiskSize:         pulumi.Int(32),
						DiskTypeId:       pulumi.String("network-ssd"),
						ResourcePresetId: pulumi.String("s2.micro"),
					},
				},
				SchemaRegistry:  pulumi.Bool(false),
				UnmanagedTopics: pulumi.Bool(false),
				Version:         pulumi.String("2.8"),
				Zones: pulumi.StringArray{
					pulumi.String("ru-central1-a"),
				},
			},
			Environment: pulumi.String("PRESTABLE"),
			NetworkId:   fooVpcNetwork.ID(),
			SubnetIds: pulumi.StringArray{
				fooVpcSubnet.ID(),
			},
			Users: MdbKafkaClusterUserArray{
				&MdbKafkaClusterUserArgs{
					Name:     pulumi.String("producer-application"),
					Password: pulumi.String("password"),
					Permissions: MdbKafkaClusterUserPermissionArray{
						&MdbKafkaClusterUserPermissionArgs{
							Role:      pulumi.String("ACCESS_ROLE_PRODUCER"),
							TopicName: pulumi.String("input"),
						},
					},
				},
				&MdbKafkaClusterUserArgs{
					Name:     pulumi.String("worker"),
					Password: pulumi.String("password"),
					Permissions: MdbKafkaClusterUserPermissionArray{
						&MdbKafkaClusterUserPermissionArgs{
							Role:      pulumi.String("ACCESS_ROLE_CONSUMER"),
							TopicName: pulumi.String("input"),
						},
						&MdbKafkaClusterUserPermissionArgs{
							Role:      pulumi.String("ACCESS_ROLE_PRODUCER"),
							TopicName: pulumi.String("output"),
						},
					},
				},
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
Copy

Coming soon!

import * as pulumi from "@pulumi/pulumi";
import * as yandex from "@pulumi/yandex";

const fooVpcNetwork = new yandex.VpcNetwork("foo", {});
const fooVpcSubnet = new yandex.VpcSubnet("foo", {
    networkId: fooVpcNetwork.id,
    v4CidrBlocks: ["10.5.0.0/24"],
    zone: "ru-central1-a",
});
const fooMdbKafkaCluster = new yandex.MdbKafkaCluster("foo", {
    config: {
        assignPublicIp: false,
        brokersCount: 1,
        kafka: {
            kafkaConfig: {
                compressionType: "COMPRESSION_TYPE_ZSTD",
                defaultReplicationFactor: "1",
                logFlushIntervalMessages: "1024",
                logFlushIntervalMs: "1000",
                logFlushSchedulerIntervalMs: "1000",
                logPreallocate: true,
                logRetentionBytes: "1.073741824e+09",
                logRetentionHours: "168",
                logRetentionMinutes: "10080",
                logRetentionMs: "8.64e+07",
                logSegmentBytes: "1.34217728e+08",
                numPartitions: "10",
            },
            resources: {
                diskSize: 32,
                diskTypeId: "network-ssd",
                resourcePresetId: "s2.micro",
            },
        },
        schemaRegistry: false,
        unmanagedTopics: false,
        version: "2.8",
        zones: ["ru-central1-a"],
    },
    environment: "PRESTABLE",
    networkId: fooVpcNetwork.id,
    subnetIds: [fooVpcSubnet.id],
    users: [
        {
            name: "producer-application",
            password: "password",
            permissions: [{
                role: "ACCESS_ROLE_PRODUCER",
                topicName: "input",
            }],
        },
        {
            name: "worker",
            password: "password",
            permissions: [
                {
                    role: "ACCESS_ROLE_CONSUMER",
                    topicName: "input",
                },
                {
                    role: "ACCESS_ROLE_PRODUCER",
                    topicName: "output",
                },
            ],
        },
    ],
});
Copy
import pulumi
import pulumi_yandex as yandex

foo_vpc_network = yandex.VpcNetwork("fooVpcNetwork")
foo_vpc_subnet = yandex.VpcSubnet("fooVpcSubnet",
    network_id=foo_vpc_network.id,
    v4_cidr_blocks=["10.5.0.0/24"],
    zone="ru-central1-a")
foo_mdb_kafka_cluster = yandex.MdbKafkaCluster("fooMdbKafkaCluster",
    config=yandex.MdbKafkaClusterConfigArgs(
        assign_public_ip=False,
        brokers_count=1,
        kafka=yandex.MdbKafkaClusterConfigKafkaArgs(
            kafka_config=yandex.MdbKafkaClusterConfigKafkaKafkaConfigArgs(
                compression_type="COMPRESSION_TYPE_ZSTD",
                default_replication_factor="1",
                log_flush_interval_messages="1024",
                log_flush_interval_ms="1000",
                log_flush_scheduler_interval_ms="1000",
                log_preallocate=True,
                log_retention_bytes="1073741824",
                log_retention_hours="168",
                log_retention_minutes="10080",
                log_retention_ms="86400000",
                log_segment_bytes="134217728",
                num_partitions="10",
            ),
            resources=yandex.MdbKafkaClusterConfigKafkaResourcesArgs(
                disk_size=32,
                disk_type_id="network-ssd",
                resource_preset_id="s2.micro",
            ),
        ),
        schema_registry=False,
        unmanaged_topics=False,
        version="2.8",
        zones=["ru-central1-a"],
    ),
    environment="PRESTABLE",
    network_id=foo_vpc_network.id,
    subnet_ids=[foo_vpc_subnet.id],
    users=[
        yandex.MdbKafkaClusterUserArgs(
            name="producer-application",
            password="password",
            permissions=[yandex.MdbKafkaClusterUserPermissionArgs(
                role="ACCESS_ROLE_PRODUCER",
                topic_name="input",
            )],
        ),
        yandex.MdbKafkaClusterUserArgs(
            name="worker",
            password="password",
            permissions=[
                yandex.MdbKafkaClusterUserPermissionArgs(
                    role="ACCESS_ROLE_CONSUMER",
                    topic_name="input",
                ),
                yandex.MdbKafkaClusterUserPermissionArgs(
                    role="ACCESS_ROLE_PRODUCER",
                    topic_name="output",
                ),
            ],
        ),
    ])
Copy

Coming soon!

Example of creating a HA Kafka Cluster with two brokers per AZ (6 brokers + 3 zk)

using Pulumi;
using Yandex = Pulumi.Yandex;

class MyStack : Stack
{
    public MyStack()
    {
        var fooVpcNetwork = new Yandex.VpcNetwork("fooVpcNetwork", new Yandex.VpcNetworkArgs
        {
        });
        var fooVpcSubnet = new Yandex.VpcSubnet("fooVpcSubnet", new Yandex.VpcSubnetArgs
        {
            NetworkId = fooVpcNetwork.Id,
            V4CidrBlocks = 
            {
                "10.1.0.0/24",
            },
            Zone = "ru-central1-a",
        });
        var bar = new Yandex.VpcSubnet("bar", new Yandex.VpcSubnetArgs
        {
            NetworkId = fooVpcNetwork.Id,
            V4CidrBlocks = 
            {
                "10.2.0.0/24",
            },
            Zone = "ru-central1-b",
        });
        var baz = new Yandex.VpcSubnet("baz", new Yandex.VpcSubnetArgs
        {
            NetworkId = fooVpcNetwork.Id,
            V4CidrBlocks = 
            {
                "10.3.0.0/24",
            },
            Zone = "ru-central1-c",
        });
        var fooMdbKafkaCluster = new Yandex.MdbKafkaCluster("fooMdbKafkaCluster", new Yandex.MdbKafkaClusterArgs
        {
            Config = new Yandex.Inputs.MdbKafkaClusterConfigArgs
            {
                AssignPublicIp = true,
                BrokersCount = 2,
                Kafka = new Yandex.Inputs.MdbKafkaClusterConfigKafkaArgs
                {
                    KafkaConfig = new Yandex.Inputs.MdbKafkaClusterConfigKafkaKafkaConfigArgs
                    {
                        CompressionType = "COMPRESSION_TYPE_ZSTD",
                        DefaultReplicationFactor = "6",
                        LogFlushIntervalMessages = "1024",
                        LogFlushIntervalMs = "1000",
                        LogFlushSchedulerIntervalMs = "1000",
                        LogPreallocate = true,
                        LogRetentionBytes = "1073741824",
                        LogRetentionHours = "168",
                        LogRetentionMinutes = "10080",
                        LogRetentionMs = "86400000",
                        LogSegmentBytes = "134217728",
                        NumPartitions = "10",
                    },
                    Resources = new Yandex.Inputs.MdbKafkaClusterConfigKafkaResourcesArgs
                    {
                        DiskSize = 128,
                        DiskTypeId = "network-ssd",
                        ResourcePresetId = "s2.medium",
                    },
                },
                SchemaRegistry = false,
                UnmanagedTopics = false,
                Version = "2.8",
                Zones = 
                {
                    "ru-central1-a",
                    "ru-central1-b",
                    "ru-central1-c",
                },
                Zookeeper = new Yandex.Inputs.MdbKafkaClusterConfigZookeeperArgs
                {
                    Resources = new Yandex.Inputs.MdbKafkaClusterConfigZookeeperResourcesArgs
                    {
                        DiskSize = 20,
                        DiskTypeId = "network-ssd",
                        ResourcePresetId = "s2.micro",
                    },
                },
            },
            Environment = "PRESTABLE",
            NetworkId = fooVpcNetwork.Id,
            SubnetIds = 
            {
                fooVpcSubnet.Id,
                bar.Id,
                baz.Id,
            },
            Users = 
            {
                new Yandex.Inputs.MdbKafkaClusterUserArgs
                {
                    Name = "producer-application",
                    Password = "password",
                    Permissions = 
                    {
                        new Yandex.Inputs.MdbKafkaClusterUserPermissionArgs
                        {
                            Role = "ACCESS_ROLE_PRODUCER",
                            TopicName = "input",
                        },
                    },
                },
                new Yandex.Inputs.MdbKafkaClusterUserArgs
                {
                    Name = "worker",
                    Password = "password",
                    Permissions = 
                    {
                        new Yandex.Inputs.MdbKafkaClusterUserPermissionArgs
                        {
                            Role = "ACCESS_ROLE_CONSUMER",
                            TopicName = "input",
                        },
                        new Yandex.Inputs.MdbKafkaClusterUserPermissionArgs
                        {
                            Role = "ACCESS_ROLE_PRODUCER",
                            TopicName = "output",
                        },
                    },
                },
            },
        });
    }

}
Copy
package main

import (
	"github.com/pulumi/pulumi-yandex/sdk/go/yandex"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		fooVpcNetwork, err := yandex.NewVpcNetwork(ctx, "fooVpcNetwork", nil)
		if err != nil {
			return err
		}
		fooVpcSubnet, err := yandex.NewVpcSubnet(ctx, "fooVpcSubnet", &yandex.VpcSubnetArgs{
			NetworkId: fooVpcNetwork.ID(),
			V4CidrBlocks: pulumi.StringArray{
				pulumi.String("10.1.0.0/24"),
			},
			Zone: pulumi.String("ru-central1-a"),
		})
		if err != nil {
			return err
		}
		bar, err := yandex.NewVpcSubnet(ctx, "bar", &yandex.VpcSubnetArgs{
			NetworkId: fooVpcNetwork.ID(),
			V4CidrBlocks: pulumi.StringArray{
				pulumi.String("10.2.0.0/24"),
			},
			Zone: pulumi.String("ru-central1-b"),
		})
		if err != nil {
			return err
		}
		baz, err := yandex.NewVpcSubnet(ctx, "baz", &yandex.VpcSubnetArgs{
			NetworkId: fooVpcNetwork.ID(),
			V4CidrBlocks: pulumi.StringArray{
				pulumi.String("10.3.0.0/24"),
			},
			Zone: pulumi.String("ru-central1-c"),
		})
		if err != nil {
			return err
		}
		_, err = yandex.NewMdbKafkaCluster(ctx, "fooMdbKafkaCluster", &yandex.MdbKafkaClusterArgs{
			Config: &MdbKafkaClusterConfigArgs{
				AssignPublicIp: pulumi.Bool(true),
				BrokersCount:   pulumi.Int(2),
				Kafka: &MdbKafkaClusterConfigKafkaArgs{
					KafkaConfig: &MdbKafkaClusterConfigKafkaKafkaConfigArgs{
						CompressionType:             pulumi.String("COMPRESSION_TYPE_ZSTD"),
						DefaultReplicationFactor:    pulumi.String("6"),
						LogFlushIntervalMessages:    pulumi.String("1024"),
						LogFlushIntervalMs:          pulumi.String("1000"),
						LogFlushSchedulerIntervalMs: pulumi.String("1000"),
						LogPreallocate:              pulumi.Bool(true),
						LogRetentionBytes:           pulumi.String("1073741824"),
						LogRetentionHours:           pulumi.String("168"),
						LogRetentionMinutes:         pulumi.String("10080"),
						LogRetentionMs:              pulumi.String("86400000"),
						LogSegmentBytes:             pulumi.String("134217728"),
						NumPartitions:               pulumi.String("10"),
					},
					Resources: &MdbKafkaClusterConfigKafkaResourcesArgs{
						DiskSize:         pulumi.Int(128),
						DiskTypeId:       pulumi.String("network-ssd"),
						ResourcePresetId: pulumi.String("s2.medium"),
					},
				},
				SchemaRegistry:  pulumi.Bool(false),
				UnmanagedTopics: pulumi.Bool(false),
				Version:         pulumi.String("2.8"),
				Zones: pulumi.StringArray{
					pulumi.String("ru-central1-a"),
					pulumi.String("ru-central1-b"),
					pulumi.String("ru-central1-c"),
				},
				Zookeeper: &MdbKafkaClusterConfigZookeeperArgs{
					Resources: &MdbKafkaClusterConfigZookeeperResourcesArgs{
						DiskSize:         pulumi.Int(20),
						DiskTypeId:       pulumi.String("network-ssd"),
						ResourcePresetId: pulumi.String("s2.micro"),
					},
				},
			},
			Environment: pulumi.String("PRESTABLE"),
			NetworkId:   fooVpcNetwork.ID(),
			SubnetIds: pulumi.StringArray{
				fooVpcSubnet.ID(),
				bar.ID(),
				baz.ID(),
			},
			Users: MdbKafkaClusterUserArray{
				&MdbKafkaClusterUserArgs{
					Name:     pulumi.String("producer-application"),
					Password: pulumi.String("password"),
					Permissions: MdbKafkaClusterUserPermissionArray{
						&MdbKafkaClusterUserPermissionArgs{
							Role:      pulumi.String("ACCESS_ROLE_PRODUCER"),
							TopicName: pulumi.String("input"),
						},
					},
				},
				&MdbKafkaClusterUserArgs{
					Name:     pulumi.String("worker"),
					Password: pulumi.String("password"),
					Permissions: MdbKafkaClusterUserPermissionArray{
						&MdbKafkaClusterUserPermissionArgs{
							Role:      pulumi.String("ACCESS_ROLE_CONSUMER"),
							TopicName: pulumi.String("input"),
						},
						&MdbKafkaClusterUserPermissionArgs{
							Role:      pulumi.String("ACCESS_ROLE_PRODUCER"),
							TopicName: pulumi.String("output"),
						},
					},
				},
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
Copy

Coming soon!

import * as pulumi from "@pulumi/pulumi";
import * as yandex from "@pulumi/yandex";

const fooVpcNetwork = new yandex.VpcNetwork("foo", {});
const fooVpcSubnet = new yandex.VpcSubnet("foo", {
    networkId: fooVpcNetwork.id,
    v4CidrBlocks: ["10.1.0.0/24"],
    zone: "ru-central1-a",
});
const bar = new yandex.VpcSubnet("bar", {
    networkId: fooVpcNetwork.id,
    v4CidrBlocks: ["10.2.0.0/24"],
    zone: "ru-central1-b",
});
const baz = new yandex.VpcSubnet("baz", {
    networkId: fooVpcNetwork.id,
    v4CidrBlocks: ["10.3.0.0/24"],
    zone: "ru-central1-c",
});
const fooMdbKafkaCluster = new yandex.MdbKafkaCluster("foo", {
    config: {
        assignPublicIp: true,
        brokersCount: 2,
        kafka: {
            kafkaConfig: {
                compressionType: "COMPRESSION_TYPE_ZSTD",
                defaultReplicationFactor: "6",
                logFlushIntervalMessages: "1024",
                logFlushIntervalMs: "1000",
                logFlushSchedulerIntervalMs: "1000",
                logPreallocate: true,
                logRetentionBytes: "1.073741824e+09",
                logRetentionHours: "168",
                logRetentionMinutes: "10080",
                logRetentionMs: "8.64e+07",
                logSegmentBytes: "1.34217728e+08",
                numPartitions: "10",
            },
            resources: {
                diskSize: 128,
                diskTypeId: "network-ssd",
                resourcePresetId: "s2.medium",
            },
        },
        schemaRegistry: false,
        unmanagedTopics: false,
        version: "2.8",
        zones: [
            "ru-central1-a",
            "ru-central1-b",
            "ru-central1-c",
        ],
        zookeeper: {
            resources: {
                diskSize: 20,
                diskTypeId: "network-ssd",
                resourcePresetId: "s2.micro",
            },
        },
    },
    environment: "PRESTABLE",
    networkId: fooVpcNetwork.id,
    subnetIds: [
        fooVpcSubnet.id,
        bar.id,
        baz.id,
    ],
    users: [
        {
            name: "producer-application",
            password: "password",
            permissions: [{
                role: "ACCESS_ROLE_PRODUCER",
                topicName: "input",
            }],
        },
        {
            name: "worker",
            password: "password",
            permissions: [
                {
                    role: "ACCESS_ROLE_CONSUMER",
                    topicName: "input",
                },
                {
                    role: "ACCESS_ROLE_PRODUCER",
                    topicName: "output",
                },
            ],
        },
    ],
});
Copy
import pulumi
import pulumi_yandex as yandex

foo_vpc_network = yandex.VpcNetwork("fooVpcNetwork")
foo_vpc_subnet = yandex.VpcSubnet("fooVpcSubnet",
    network_id=foo_vpc_network.id,
    v4_cidr_blocks=["10.1.0.0/24"],
    zone="ru-central1-a")
bar = yandex.VpcSubnet("bar",
    network_id=foo_vpc_network.id,
    v4_cidr_blocks=["10.2.0.0/24"],
    zone="ru-central1-b")
baz = yandex.VpcSubnet("baz",
    network_id=foo_vpc_network.id,
    v4_cidr_blocks=["10.3.0.0/24"],
    zone="ru-central1-c")
foo_mdb_kafka_cluster = yandex.MdbKafkaCluster("fooMdbKafkaCluster",
    config=yandex.MdbKafkaClusterConfigArgs(
        assign_public_ip=True,
        brokers_count=2,
        kafka=yandex.MdbKafkaClusterConfigKafkaArgs(
            kafka_config=yandex.MdbKafkaClusterConfigKafkaKafkaConfigArgs(
                compression_type="COMPRESSION_TYPE_ZSTD",
                default_replication_factor="6",
                log_flush_interval_messages="1024",
                log_flush_interval_ms="1000",
                log_flush_scheduler_interval_ms="1000",
                log_preallocate=True,
                log_retention_bytes="1073741824",
                log_retention_hours="168",
                log_retention_minutes="10080",
                log_retention_ms="86400000",
                log_segment_bytes="134217728",
                num_partitions="10",
            ),
            resources=yandex.MdbKafkaClusterConfigKafkaResourcesArgs(
                disk_size=128,
                disk_type_id="network-ssd",
                resource_preset_id="s2.medium",
            ),
        ),
        schema_registry=False,
        unmanaged_topics=False,
        version="2.8",
        zones=[
            "ru-central1-a",
            "ru-central1-b",
            "ru-central1-c",
        ],
        zookeeper=yandex.MdbKafkaClusterConfigZookeeperArgs(
            resources=yandex.MdbKafkaClusterConfigZookeeperResourcesArgs(
                disk_size=20,
                disk_type_id="network-ssd",
                resource_preset_id="s2.micro",
            ),
        ),
    ),
    environment="PRESTABLE",
    network_id=foo_vpc_network.id,
    subnet_ids=[
        foo_vpc_subnet.id,
        bar.id,
        baz.id,
    ],
    users=[
        yandex.MdbKafkaClusterUserArgs(
            name="producer-application",
            password="password",
            permissions=[yandex.MdbKafkaClusterUserPermissionArgs(
                role="ACCESS_ROLE_PRODUCER",
                topic_name="input",
            )],
        ),
        yandex.MdbKafkaClusterUserArgs(
            name="worker",
            password="password",
            permissions=[
                yandex.MdbKafkaClusterUserPermissionArgs(
                    role="ACCESS_ROLE_CONSUMER",
                    topic_name="input",
                ),
                yandex.MdbKafkaClusterUserPermissionArgs(
                    role="ACCESS_ROLE_PRODUCER",
                    topic_name="output",
                ),
            ],
        ),
    ])
Copy

Coming soon!

Create MdbKafkaCluster Resource

Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.

Constructor syntax

new MdbKafkaCluster(name: string, args: MdbKafkaClusterArgs, opts?: CustomResourceOptions);
@overload
def MdbKafkaCluster(resource_name: str,
                    args: MdbKafkaClusterArgs,
                    opts: Optional[ResourceOptions] = None)

@overload
def MdbKafkaCluster(resource_name: str,
                    opts: Optional[ResourceOptions] = None,
                    config: Optional[MdbKafkaClusterConfigArgs] = None,
                    network_id: Optional[str] = None,
                    environment: Optional[str] = None,
                    description: Optional[str] = None,
                    folder_id: Optional[str] = None,
                    host_group_ids: Optional[Sequence[str]] = None,
                    labels: Optional[Mapping[str, str]] = None,
                    maintenance_window: Optional[MdbKafkaClusterMaintenanceWindowArgs] = None,
                    name: Optional[str] = None,
                    deletion_protection: Optional[bool] = None,
                    security_group_ids: Optional[Sequence[str]] = None,
                    subnet_ids: Optional[Sequence[str]] = None,
                    topics: Optional[Sequence[MdbKafkaClusterTopicArgs]] = None,
                    users: Optional[Sequence[MdbKafkaClusterUserArgs]] = None)
func NewMdbKafkaCluster(ctx *Context, name string, args MdbKafkaClusterArgs, opts ...ResourceOption) (*MdbKafkaCluster, error)
public MdbKafkaCluster(string name, MdbKafkaClusterArgs args, CustomResourceOptions? opts = null)
public MdbKafkaCluster(String name, MdbKafkaClusterArgs args)
public MdbKafkaCluster(String name, MdbKafkaClusterArgs args, CustomResourceOptions options)
type: yandex:MdbKafkaCluster
properties: # The arguments to resource properties.
options: # Bag of options to control resource's behavior.

Parameters

name This property is required. string
The unique name of the resource.
args This property is required. MdbKafkaClusterArgs
The arguments to resource properties.
opts CustomResourceOptions
Bag of options to control resource's behavior.
resource_name This property is required. str
The unique name of the resource.
args This property is required. MdbKafkaClusterArgs
The arguments to resource properties.
opts ResourceOptions
Bag of options to control resource's behavior.
ctx Context
Context object for the current deployment.
name This property is required. string
The unique name of the resource.
args This property is required. MdbKafkaClusterArgs
The arguments to resource properties.
opts ResourceOption
Bag of options to control resource's behavior.
name This property is required. string
The unique name of the resource.
args This property is required. MdbKafkaClusterArgs
The arguments to resource properties.
opts CustomResourceOptions
Bag of options to control resource's behavior.
name This property is required. String
The unique name of the resource.
args This property is required. MdbKafkaClusterArgs
The arguments to resource properties.
options CustomResourceOptions
Bag of options to control resource's behavior.

Constructor example

The following reference example uses placeholder values for all input properties.

var mdbKafkaClusterResource = new Yandex.MdbKafkaCluster("mdbKafkaClusterResource", new()
{
    Config = new Yandex.Inputs.MdbKafkaClusterConfigArgs
    {
        Kafka = new Yandex.Inputs.MdbKafkaClusterConfigKafkaArgs
        {
            Resources = new Yandex.Inputs.MdbKafkaClusterConfigKafkaResourcesArgs
            {
                DiskSize = 0,
                DiskTypeId = "string",
                ResourcePresetId = "string",
            },
            KafkaConfig = new Yandex.Inputs.MdbKafkaClusterConfigKafkaKafkaConfigArgs
            {
                AutoCreateTopicsEnable = false,
                CompressionType = "string",
                DefaultReplicationFactor = "string",
                LogFlushIntervalMessages = "string",
                LogFlushIntervalMs = "string",
                LogFlushSchedulerIntervalMs = "string",
                LogPreallocate = false,
                LogRetentionBytes = "string",
                LogRetentionHours = "string",
                LogRetentionMinutes = "string",
                LogRetentionMs = "string",
                LogSegmentBytes = "string",
                NumPartitions = "string",
                SocketReceiveBufferBytes = "string",
                SocketSendBufferBytes = "string",
            },
        },
        Version = "string",
        Zones = new[]
        {
            "string",
        },
        AssignPublicIp = false,
        BrokersCount = 0,
        SchemaRegistry = false,
        UnmanagedTopics = false,
        Zookeeper = new Yandex.Inputs.MdbKafkaClusterConfigZookeeperArgs
        {
            Resources = new Yandex.Inputs.MdbKafkaClusterConfigZookeeperResourcesArgs
            {
                DiskSize = 0,
                DiskTypeId = "string",
                ResourcePresetId = "string",
            },
        },
    },
    NetworkId = "string",
    Environment = "string",
    Description = "string",
    FolderId = "string",
    HostGroupIds = new[]
    {
        "string",
    },
    Labels = 
    {
        { "string", "string" },
    },
    MaintenanceWindow = new Yandex.Inputs.MdbKafkaClusterMaintenanceWindowArgs
    {
        Type = "string",
        Day = "string",
        Hour = 0,
    },
    Name = "string",
    DeletionProtection = false,
    SecurityGroupIds = new[]
    {
        "string",
    },
    SubnetIds = new[]
    {
        "string",
    },
    Users = new[]
    {
        new Yandex.Inputs.MdbKafkaClusterUserArgs
        {
            Name = "string",
            Password = "string",
            Permissions = new[]
            {
                new Yandex.Inputs.MdbKafkaClusterUserPermissionArgs
                {
                    Role = "string",
                    TopicName = "string",
                },
            },
        },
    },
});
Copy
example, err := yandex.NewMdbKafkaCluster(ctx, "mdbKafkaClusterResource", &yandex.MdbKafkaClusterArgs{
	Config: &yandex.MdbKafkaClusterConfigArgs{
		Kafka: &yandex.MdbKafkaClusterConfigKafkaArgs{
			Resources: &yandex.MdbKafkaClusterConfigKafkaResourcesArgs{
				DiskSize:         pulumi.Int(0),
				DiskTypeId:       pulumi.String("string"),
				ResourcePresetId: pulumi.String("string"),
			},
			KafkaConfig: &yandex.MdbKafkaClusterConfigKafkaKafkaConfigArgs{
				AutoCreateTopicsEnable:      pulumi.Bool(false),
				CompressionType:             pulumi.String("string"),
				DefaultReplicationFactor:    pulumi.String("string"),
				LogFlushIntervalMessages:    pulumi.String("string"),
				LogFlushIntervalMs:          pulumi.String("string"),
				LogFlushSchedulerIntervalMs: pulumi.String("string"),
				LogPreallocate:              pulumi.Bool(false),
				LogRetentionBytes:           pulumi.String("string"),
				LogRetentionHours:           pulumi.String("string"),
				LogRetentionMinutes:         pulumi.String("string"),
				LogRetentionMs:              pulumi.String("string"),
				LogSegmentBytes:             pulumi.String("string"),
				NumPartitions:               pulumi.String("string"),
				SocketReceiveBufferBytes:    pulumi.String("string"),
				SocketSendBufferBytes:       pulumi.String("string"),
			},
		},
		Version: pulumi.String("string"),
		Zones: pulumi.StringArray{
			pulumi.String("string"),
		},
		AssignPublicIp:  pulumi.Bool(false),
		BrokersCount:    pulumi.Int(0),
		SchemaRegistry:  pulumi.Bool(false),
		UnmanagedTopics: pulumi.Bool(false),
		Zookeeper: &yandex.MdbKafkaClusterConfigZookeeperArgs{
			Resources: &yandex.MdbKafkaClusterConfigZookeeperResourcesArgs{
				DiskSize:         pulumi.Int(0),
				DiskTypeId:       pulumi.String("string"),
				ResourcePresetId: pulumi.String("string"),
			},
		},
	},
	NetworkId:   pulumi.String("string"),
	Environment: pulumi.String("string"),
	Description: pulumi.String("string"),
	FolderId:    pulumi.String("string"),
	HostGroupIds: pulumi.StringArray{
		pulumi.String("string"),
	},
	Labels: pulumi.StringMap{
		"string": pulumi.String("string"),
	},
	MaintenanceWindow: &yandex.MdbKafkaClusterMaintenanceWindowArgs{
		Type: pulumi.String("string"),
		Day:  pulumi.String("string"),
		Hour: pulumi.Int(0),
	},
	Name:               pulumi.String("string"),
	DeletionProtection: pulumi.Bool(false),
	SecurityGroupIds: pulumi.StringArray{
		pulumi.String("string"),
	},
	SubnetIds: pulumi.StringArray{
		pulumi.String("string"),
	},
	Users: yandex.MdbKafkaClusterUserArray{
		&yandex.MdbKafkaClusterUserArgs{
			Name:     pulumi.String("string"),
			Password: pulumi.String("string"),
			Permissions: yandex.MdbKafkaClusterUserPermissionArray{
				&yandex.MdbKafkaClusterUserPermissionArgs{
					Role:      pulumi.String("string"),
					TopicName: pulumi.String("string"),
				},
			},
		},
	},
})
Copy
var mdbKafkaClusterResource = new MdbKafkaCluster("mdbKafkaClusterResource", MdbKafkaClusterArgs.builder()
    .config(MdbKafkaClusterConfigArgs.builder()
        .kafka(MdbKafkaClusterConfigKafkaArgs.builder()
            .resources(MdbKafkaClusterConfigKafkaResourcesArgs.builder()
                .diskSize(0)
                .diskTypeId("string")
                .resourcePresetId("string")
                .build())
            .kafkaConfig(MdbKafkaClusterConfigKafkaKafkaConfigArgs.builder()
                .autoCreateTopicsEnable(false)
                .compressionType("string")
                .defaultReplicationFactor("string")
                .logFlushIntervalMessages("string")
                .logFlushIntervalMs("string")
                .logFlushSchedulerIntervalMs("string")
                .logPreallocate(false)
                .logRetentionBytes("string")
                .logRetentionHours("string")
                .logRetentionMinutes("string")
                .logRetentionMs("string")
                .logSegmentBytes("string")
                .numPartitions("string")
                .socketReceiveBufferBytes("string")
                .socketSendBufferBytes("string")
                .build())
            .build())
        .version("string")
        .zones("string")
        .assignPublicIp(false)
        .brokersCount(0)
        .schemaRegistry(false)
        .unmanagedTopics(false)
        .zookeeper(MdbKafkaClusterConfigZookeeperArgs.builder()
            .resources(MdbKafkaClusterConfigZookeeperResourcesArgs.builder()
                .diskSize(0)
                .diskTypeId("string")
                .resourcePresetId("string")
                .build())
            .build())
        .build())
    .networkId("string")
    .environment("string")
    .description("string")
    .folderId("string")
    .hostGroupIds("string")
    .labels(Map.of("string", "string"))
    .maintenanceWindow(MdbKafkaClusterMaintenanceWindowArgs.builder()
        .type("string")
        .day("string")
        .hour(0)
        .build())
    .name("string")
    .deletionProtection(false)
    .securityGroupIds("string")
    .subnetIds("string")
    .users(MdbKafkaClusterUserArgs.builder()
        .name("string")
        .password("string")
        .permissions(MdbKafkaClusterUserPermissionArgs.builder()
            .role("string")
            .topicName("string")
            .build())
        .build())
    .build());
Copy
mdb_kafka_cluster_resource = yandex.MdbKafkaCluster("mdbKafkaClusterResource",
    config={
        "kafka": {
            "resources": {
                "disk_size": 0,
                "disk_type_id": "string",
                "resource_preset_id": "string",
            },
            "kafka_config": {
                "auto_create_topics_enable": False,
                "compression_type": "string",
                "default_replication_factor": "string",
                "log_flush_interval_messages": "string",
                "log_flush_interval_ms": "string",
                "log_flush_scheduler_interval_ms": "string",
                "log_preallocate": False,
                "log_retention_bytes": "string",
                "log_retention_hours": "string",
                "log_retention_minutes": "string",
                "log_retention_ms": "string",
                "log_segment_bytes": "string",
                "num_partitions": "string",
                "socket_receive_buffer_bytes": "string",
                "socket_send_buffer_bytes": "string",
            },
        },
        "version": "string",
        "zones": ["string"],
        "assign_public_ip": False,
        "brokers_count": 0,
        "schema_registry": False,
        "unmanaged_topics": False,
        "zookeeper": {
            "resources": {
                "disk_size": 0,
                "disk_type_id": "string",
                "resource_preset_id": "string",
            },
        },
    },
    network_id="string",
    environment="string",
    description="string",
    folder_id="string",
    host_group_ids=["string"],
    labels={
        "string": "string",
    },
    maintenance_window={
        "type": "string",
        "day": "string",
        "hour": 0,
    },
    name="string",
    deletion_protection=False,
    security_group_ids=["string"],
    subnet_ids=["string"],
    users=[{
        "name": "string",
        "password": "string",
        "permissions": [{
            "role": "string",
            "topic_name": "string",
        }],
    }])
Copy
const mdbKafkaClusterResource = new yandex.MdbKafkaCluster("mdbKafkaClusterResource", {
    config: {
        kafka: {
            resources: {
                diskSize: 0,
                diskTypeId: "string",
                resourcePresetId: "string",
            },
            kafkaConfig: {
                autoCreateTopicsEnable: false,
                compressionType: "string",
                defaultReplicationFactor: "string",
                logFlushIntervalMessages: "string",
                logFlushIntervalMs: "string",
                logFlushSchedulerIntervalMs: "string",
                logPreallocate: false,
                logRetentionBytes: "string",
                logRetentionHours: "string",
                logRetentionMinutes: "string",
                logRetentionMs: "string",
                logSegmentBytes: "string",
                numPartitions: "string",
                socketReceiveBufferBytes: "string",
                socketSendBufferBytes: "string",
            },
        },
        version: "string",
        zones: ["string"],
        assignPublicIp: false,
        brokersCount: 0,
        schemaRegistry: false,
        unmanagedTopics: false,
        zookeeper: {
            resources: {
                diskSize: 0,
                diskTypeId: "string",
                resourcePresetId: "string",
            },
        },
    },
    networkId: "string",
    environment: "string",
    description: "string",
    folderId: "string",
    hostGroupIds: ["string"],
    labels: {
        string: "string",
    },
    maintenanceWindow: {
        type: "string",
        day: "string",
        hour: 0,
    },
    name: "string",
    deletionProtection: false,
    securityGroupIds: ["string"],
    subnetIds: ["string"],
    users: [{
        name: "string",
        password: "string",
        permissions: [{
            role: "string",
            topicName: "string",
        }],
    }],
});
Copy
type: yandex:MdbKafkaCluster
properties:
    config:
        assignPublicIp: false
        brokersCount: 0
        kafka:
            kafkaConfig:
                autoCreateTopicsEnable: false
                compressionType: string
                defaultReplicationFactor: string
                logFlushIntervalMessages: string
                logFlushIntervalMs: string
                logFlushSchedulerIntervalMs: string
                logPreallocate: false
                logRetentionBytes: string
                logRetentionHours: string
                logRetentionMinutes: string
                logRetentionMs: string
                logSegmentBytes: string
                numPartitions: string
                socketReceiveBufferBytes: string
                socketSendBufferBytes: string
            resources:
                diskSize: 0
                diskTypeId: string
                resourcePresetId: string
        schemaRegistry: false
        unmanagedTopics: false
        version: string
        zones:
            - string
        zookeeper:
            resources:
                diskSize: 0
                diskTypeId: string
                resourcePresetId: string
    deletionProtection: false
    description: string
    environment: string
    folderId: string
    hostGroupIds:
        - string
    labels:
        string: string
    maintenanceWindow:
        day: string
        hour: 0
        type: string
    name: string
    networkId: string
    securityGroupIds:
        - string
    subnetIds:
        - string
    users:
        - name: string
          password: string
          permissions:
            - role: string
              topicName: string
Copy

MdbKafkaCluster Resource Properties

To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.

Inputs

In Python, inputs that are objects can be passed either as argument classes or as dictionary literals.

The MdbKafkaCluster resource accepts the following input properties:

Config This property is required. MdbKafkaClusterConfig
Configuration of the Kafka cluster. The structure is documented below.
NetworkId This property is required. string
ID of the network, to which the Kafka cluster belongs.
DeletionProtection bool
Inhibits deletion of the cluster. Can be either true or false.
Description string
Description of the Kafka cluster.
Environment string
Deployment environment of the Kafka cluster. Can be either PRESTABLE or PRODUCTION. The default is PRODUCTION.
FolderId string
The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used.
HostGroupIds List<string>
A list of IDs of the host groups to place VMs of the cluster on.
Labels Dictionary<string, string>
A set of key/value label pairs to assign to the Kafka cluster.
MaintenanceWindow MdbKafkaClusterMaintenanceWindow
Maintenance policy of the Kafka cluster. The structure is documented below.
Name string
The name of the topic.
SecurityGroupIds List<string>
Security group ids, to which the Kafka cluster belongs.
SubnetIds List<string>
IDs of the subnets, to which the Kafka cluster belongs.
Topics List<MdbKafkaClusterTopic>
To manage topics, please switch to using a separate resource type yandex.MdbKafkaTopic.

Deprecated: to manage topics, please switch to using a separate resource type yandex_mdb_kafka_topic

Users List<MdbKafkaClusterUser>
A user of the Kafka cluster. The structure is documented below.
Config This property is required. MdbKafkaClusterConfigArgs
Configuration of the Kafka cluster. The structure is documented below.
NetworkId This property is required. string
ID of the network, to which the Kafka cluster belongs.
DeletionProtection bool
Inhibits deletion of the cluster. Can be either true or false.
Description string
Description of the Kafka cluster.
Environment string
Deployment environment of the Kafka cluster. Can be either PRESTABLE or PRODUCTION. The default is PRODUCTION.
FolderId string
The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used.
HostGroupIds []string
A list of IDs of the host groups to place VMs of the cluster on.
Labels map[string]string
A set of key/value label pairs to assign to the Kafka cluster.
MaintenanceWindow MdbKafkaClusterMaintenanceWindowArgs
Maintenance policy of the Kafka cluster. The structure is documented below.
Name string
The name of the topic.
SecurityGroupIds []string
Security group ids, to which the Kafka cluster belongs.
SubnetIds []string
IDs of the subnets, to which the Kafka cluster belongs.
Topics []MdbKafkaClusterTopicArgs
To manage topics, please switch to using a separate resource type yandex.MdbKafkaTopic.

Deprecated: to manage topics, please switch to using a separate resource type yandex_mdb_kafka_topic

Users []MdbKafkaClusterUserArgs
A user of the Kafka cluster. The structure is documented below.
config This property is required. MdbKafkaClusterConfig
Configuration of the Kafka cluster. The structure is documented below.
networkId This property is required. String
ID of the network, to which the Kafka cluster belongs.
deletionProtection Boolean
Inhibits deletion of the cluster. Can be either true or false.
description String
Description of the Kafka cluster.
environment String
Deployment environment of the Kafka cluster. Can be either PRESTABLE or PRODUCTION. The default is PRODUCTION.
folderId String
The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used.
hostGroupIds List<String>
A list of IDs of the host groups to place VMs of the cluster on.
labels Map<String,String>
A set of key/value label pairs to assign to the Kafka cluster.
maintenanceWindow MdbKafkaClusterMaintenanceWindow
Maintenance policy of the Kafka cluster. The structure is documented below.
name String
The name of the topic.
securityGroupIds List<String>
Security group ids, to which the Kafka cluster belongs.
subnetIds List<String>
IDs of the subnets, to which the Kafka cluster belongs.
topics List<MdbKafkaClusterTopic>
To manage topics, please switch to using a separate resource type yandex.MdbKafkaTopic.

Deprecated: to manage topics, please switch to using a separate resource type yandex_mdb_kafka_topic

users List<MdbKafkaClusterUser>
A user of the Kafka cluster. The structure is documented below.
config This property is required. MdbKafkaClusterConfig
Configuration of the Kafka cluster. The structure is documented below.
networkId This property is required. string
ID of the network, to which the Kafka cluster belongs.
deletionProtection boolean
Inhibits deletion of the cluster. Can be either true or false.
description string
Description of the Kafka cluster.
environment string
Deployment environment of the Kafka cluster. Can be either PRESTABLE or PRODUCTION. The default is PRODUCTION.
folderId string
The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used.
hostGroupIds string[]
A list of IDs of the host groups to place VMs of the cluster on.
labels {[key: string]: string}
A set of key/value label pairs to assign to the Kafka cluster.
maintenanceWindow MdbKafkaClusterMaintenanceWindow
Maintenance policy of the Kafka cluster. The structure is documented below.
name string
The name of the topic.
securityGroupIds string[]
Security group ids, to which the Kafka cluster belongs.
subnetIds string[]
IDs of the subnets, to which the Kafka cluster belongs.
topics MdbKafkaClusterTopic[]
To manage topics, please switch to using a separate resource type yandex.MdbKafkaTopic.

Deprecated: to manage topics, please switch to using a separate resource type yandex_mdb_kafka_topic

users MdbKafkaClusterUser[]
A user of the Kafka cluster. The structure is documented below.
config This property is required. MdbKafkaClusterConfigArgs
Configuration of the Kafka cluster. The structure is documented below.
network_id This property is required. str
ID of the network, to which the Kafka cluster belongs.
deletion_protection bool
Inhibits deletion of the cluster. Can be either true or false.
description str
Description of the Kafka cluster.
environment str
Deployment environment of the Kafka cluster. Can be either PRESTABLE or PRODUCTION. The default is PRODUCTION.
folder_id str
The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used.
host_group_ids Sequence[str]
A list of IDs of the host groups to place VMs of the cluster on.
labels Mapping[str, str]
A set of key/value label pairs to assign to the Kafka cluster.
maintenance_window MdbKafkaClusterMaintenanceWindowArgs
Maintenance policy of the Kafka cluster. The structure is documented below.
name str
The name of the topic.
security_group_ids Sequence[str]
Security group ids, to which the Kafka cluster belongs.
subnet_ids Sequence[str]
IDs of the subnets, to which the Kafka cluster belongs.
topics Sequence[MdbKafkaClusterTopicArgs]
To manage topics, please switch to using a separate resource type yandex.MdbKafkaTopic.

Deprecated: to manage topics, please switch to using a separate resource type yandex_mdb_kafka_topic

users Sequence[MdbKafkaClusterUserArgs]
A user of the Kafka cluster. The structure is documented below.
config This property is required. Property Map
Configuration of the Kafka cluster. The structure is documented below.
networkId This property is required. String
ID of the network, to which the Kafka cluster belongs.
deletionProtection Boolean
Inhibits deletion of the cluster. Can be either true or false.
description String
Description of the Kafka cluster.
environment String
Deployment environment of the Kafka cluster. Can be either PRESTABLE or PRODUCTION. The default is PRODUCTION.
folderId String
The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used.
hostGroupIds List<String>
A list of IDs of the host groups to place VMs of the cluster on.
labels Map<String>
A set of key/value label pairs to assign to the Kafka cluster.
maintenanceWindow Property Map
Maintenance policy of the Kafka cluster. The structure is documented below.
name String
The name of the topic.
securityGroupIds List<String>
Security group ids, to which the Kafka cluster belongs.
subnetIds List<String>
IDs of the subnets, to which the Kafka cluster belongs.
topics List<Property Map>
To manage topics, please switch to using a separate resource type yandex.MdbKafkaTopic.

Deprecated: to manage topics, please switch to using a separate resource type yandex_mdb_kafka_topic

users List<Property Map>
A user of the Kafka cluster. The structure is documented below.

Outputs

All input properties are implicitly available as output properties. Additionally, the MdbKafkaCluster resource produces the following output properties:

CreatedAt string
Timestamp of cluster creation.
Health string
Health of the host.
Hosts List<MdbKafkaClusterHost>
A host of the Kafka cluster. The structure is documented below.
Id string
The provider-assigned unique ID for this managed resource.
Status string
Status of the cluster. Can be either CREATING, STARTING, RUNNING, UPDATING, STOPPING, STOPPED, ERROR or STATUS_UNKNOWN. For more information see status field of JSON representation in the official documentation.
CreatedAt string
Timestamp of cluster creation.
Health string
Health of the host.
Hosts []MdbKafkaClusterHost
A host of the Kafka cluster. The structure is documented below.
Id string
The provider-assigned unique ID for this managed resource.
Status string
Status of the cluster. Can be either CREATING, STARTING, RUNNING, UPDATING, STOPPING, STOPPED, ERROR or STATUS_UNKNOWN. For more information see status field of JSON representation in the official documentation.
createdAt String
Timestamp of cluster creation.
health String
Health of the host.
hosts List<MdbKafkaClusterHost>
A host of the Kafka cluster. The structure is documented below.
id String
The provider-assigned unique ID for this managed resource.
status String
Status of the cluster. Can be either CREATING, STARTING, RUNNING, UPDATING, STOPPING, STOPPED, ERROR or STATUS_UNKNOWN. For more information see status field of JSON representation in the official documentation.
createdAt string
Timestamp of cluster creation.
health string
Health of the host.
hosts MdbKafkaClusterHost[]
A host of the Kafka cluster. The structure is documented below.
id string
The provider-assigned unique ID for this managed resource.
status string
Status of the cluster. Can be either CREATING, STARTING, RUNNING, UPDATING, STOPPING, STOPPED, ERROR or STATUS_UNKNOWN. For more information see status field of JSON representation in the official documentation.
created_at str
Timestamp of cluster creation.
health str
Health of the host.
hosts Sequence[MdbKafkaClusterHost]
A host of the Kafka cluster. The structure is documented below.
id str
The provider-assigned unique ID for this managed resource.
status str
Status of the cluster. Can be either CREATING, STARTING, RUNNING, UPDATING, STOPPING, STOPPED, ERROR or STATUS_UNKNOWN. For more information see status field of JSON representation in the official documentation.
createdAt String
Timestamp of cluster creation.
health String
Health of the host.
hosts List<Property Map>
A host of the Kafka cluster. The structure is documented below.
id String
The provider-assigned unique ID for this managed resource.
status String
Status of the cluster. Can be either CREATING, STARTING, RUNNING, UPDATING, STOPPING, STOPPED, ERROR or STATUS_UNKNOWN. For more information see status field of JSON representation in the official documentation.

Look up Existing MdbKafkaCluster Resource

Get an existing MdbKafkaCluster resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.

public static get(name: string, id: Input<ID>, state?: MdbKafkaClusterState, opts?: CustomResourceOptions): MdbKafkaCluster
@staticmethod
def get(resource_name: str,
        id: str,
        opts: Optional[ResourceOptions] = None,
        config: Optional[MdbKafkaClusterConfigArgs] = None,
        created_at: Optional[str] = None,
        deletion_protection: Optional[bool] = None,
        description: Optional[str] = None,
        environment: Optional[str] = None,
        folder_id: Optional[str] = None,
        health: Optional[str] = None,
        host_group_ids: Optional[Sequence[str]] = None,
        hosts: Optional[Sequence[MdbKafkaClusterHostArgs]] = None,
        labels: Optional[Mapping[str, str]] = None,
        maintenance_window: Optional[MdbKafkaClusterMaintenanceWindowArgs] = None,
        name: Optional[str] = None,
        network_id: Optional[str] = None,
        security_group_ids: Optional[Sequence[str]] = None,
        status: Optional[str] = None,
        subnet_ids: Optional[Sequence[str]] = None,
        topics: Optional[Sequence[MdbKafkaClusterTopicArgs]] = None,
        users: Optional[Sequence[MdbKafkaClusterUserArgs]] = None) -> MdbKafkaCluster
func GetMdbKafkaCluster(ctx *Context, name string, id IDInput, state *MdbKafkaClusterState, opts ...ResourceOption) (*MdbKafkaCluster, error)
public static MdbKafkaCluster Get(string name, Input<string> id, MdbKafkaClusterState? state, CustomResourceOptions? opts = null)
public static MdbKafkaCluster get(String name, Output<String> id, MdbKafkaClusterState state, CustomResourceOptions options)
resources:  _:    type: yandex:MdbKafkaCluster    get:      id: ${id}
name This property is required.
The unique name of the resulting resource.
id This property is required.
The unique provider ID of the resource to lookup.
state
Any extra arguments used during the lookup.
opts
A bag of options that control this resource's behavior.
resource_name This property is required.
The unique name of the resulting resource.
id This property is required.
The unique provider ID of the resource to lookup.
name This property is required.
The unique name of the resulting resource.
id This property is required.
The unique provider ID of the resource to lookup.
state
Any extra arguments used during the lookup.
opts
A bag of options that control this resource's behavior.
name This property is required.
The unique name of the resulting resource.
id This property is required.
The unique provider ID of the resource to lookup.
state
Any extra arguments used during the lookup.
opts
A bag of options that control this resource's behavior.
name This property is required.
The unique name of the resulting resource.
id This property is required.
The unique provider ID of the resource to lookup.
state
Any extra arguments used during the lookup.
opts
A bag of options that control this resource's behavior.
The following state arguments are supported:
Config MdbKafkaClusterConfig
Configuration of the Kafka cluster. The structure is documented below.
CreatedAt string
Timestamp of cluster creation.
DeletionProtection bool
Inhibits deletion of the cluster. Can be either true or false.
Description string
Description of the Kafka cluster.
Environment string
Deployment environment of the Kafka cluster. Can be either PRESTABLE or PRODUCTION. The default is PRODUCTION.
FolderId string
The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used.
Health string
Health of the host.
HostGroupIds List<string>
A list of IDs of the host groups to place VMs of the cluster on.
Hosts List<MdbKafkaClusterHost>
A host of the Kafka cluster. The structure is documented below.
Labels Dictionary<string, string>
A set of key/value label pairs to assign to the Kafka cluster.
MaintenanceWindow MdbKafkaClusterMaintenanceWindow
Maintenance policy of the Kafka cluster. The structure is documented below.
Name string
The name of the topic.
NetworkId string
ID of the network, to which the Kafka cluster belongs.
SecurityGroupIds List<string>
Security group ids, to which the Kafka cluster belongs.
Status string
Status of the cluster. Can be either CREATING, STARTING, RUNNING, UPDATING, STOPPING, STOPPED, ERROR or STATUS_UNKNOWN. For more information see status field of JSON representation in the official documentation.
SubnetIds List<string>
IDs of the subnets, to which the Kafka cluster belongs.
Topics List<MdbKafkaClusterTopic>
To manage topics, please switch to using a separate resource type yandex.MdbKafkaTopic.

Deprecated: to manage topics, please switch to using a separate resource type yandex_mdb_kafka_topic

Users List<MdbKafkaClusterUser>
A user of the Kafka cluster. The structure is documented below.
Config MdbKafkaClusterConfigArgs
Configuration of the Kafka cluster. The structure is documented below.
CreatedAt string
Timestamp of cluster creation.
DeletionProtection bool
Inhibits deletion of the cluster. Can be either true or false.
Description string
Description of the Kafka cluster.
Environment string
Deployment environment of the Kafka cluster. Can be either PRESTABLE or PRODUCTION. The default is PRODUCTION.
FolderId string
The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used.
Health string
Health of the host.
HostGroupIds []string
A list of IDs of the host groups to place VMs of the cluster on.
Hosts []MdbKafkaClusterHostArgs
A host of the Kafka cluster. The structure is documented below.
Labels map[string]string
A set of key/value label pairs to assign to the Kafka cluster.
MaintenanceWindow MdbKafkaClusterMaintenanceWindowArgs
Maintenance policy of the Kafka cluster. The structure is documented below.
Name string
The name of the topic.
NetworkId string
ID of the network, to which the Kafka cluster belongs.
SecurityGroupIds []string
Security group ids, to which the Kafka cluster belongs.
Status string
Status of the cluster. Can be either CREATING, STARTING, RUNNING, UPDATING, STOPPING, STOPPED, ERROR or STATUS_UNKNOWN. For more information see status field of JSON representation in the official documentation.
SubnetIds []string
IDs of the subnets, to which the Kafka cluster belongs.
Topics []MdbKafkaClusterTopicArgs
To manage topics, please switch to using a separate resource type yandex.MdbKafkaTopic.

Deprecated: to manage topics, please switch to using a separate resource type yandex_mdb_kafka_topic

Users []MdbKafkaClusterUserArgs
A user of the Kafka cluster. The structure is documented below.
config MdbKafkaClusterConfig
Configuration of the Kafka cluster. The structure is documented below.
createdAt String
Timestamp of cluster creation.
deletionProtection Boolean
Inhibits deletion of the cluster. Can be either true or false.
description String
Description of the Kafka cluster.
environment String
Deployment environment of the Kafka cluster. Can be either PRESTABLE or PRODUCTION. The default is PRODUCTION.
folderId String
The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used.
health String
Health of the host.
hostGroupIds List<String>
A list of IDs of the host groups to place VMs of the cluster on.
hosts List<MdbKafkaClusterHost>
A host of the Kafka cluster. The structure is documented below.
labels Map<String,String>
A set of key/value label pairs to assign to the Kafka cluster.
maintenanceWindow MdbKafkaClusterMaintenanceWindow
Maintenance policy of the Kafka cluster. The structure is documented below.
name String
The name of the topic.
networkId String
ID of the network, to which the Kafka cluster belongs.
securityGroupIds List<String>
Security group ids, to which the Kafka cluster belongs.
status String
Status of the cluster. Can be either CREATING, STARTING, RUNNING, UPDATING, STOPPING, STOPPED, ERROR or STATUS_UNKNOWN. For more information see status field of JSON representation in the official documentation.
subnetIds List<String>
IDs of the subnets, to which the Kafka cluster belongs.
topics List<MdbKafkaClusterTopic>
To manage topics, please switch to using a separate resource type yandex.MdbKafkaTopic.

Deprecated: to manage topics, please switch to using a separate resource type yandex_mdb_kafka_topic

users List<MdbKafkaClusterUser>
A user of the Kafka cluster. The structure is documented below.
config MdbKafkaClusterConfig
Configuration of the Kafka cluster. The structure is documented below.
createdAt string
Timestamp of cluster creation.
deletionProtection boolean
Inhibits deletion of the cluster. Can be either true or false.
description string
Description of the Kafka cluster.
environment string
Deployment environment of the Kafka cluster. Can be either PRESTABLE or PRODUCTION. The default is PRODUCTION.
folderId string
The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used.
health string
Health of the host.
hostGroupIds string[]
A list of IDs of the host groups to place VMs of the cluster on.
hosts MdbKafkaClusterHost[]
A host of the Kafka cluster. The structure is documented below.
labels {[key: string]: string}
A set of key/value label pairs to assign to the Kafka cluster.
maintenanceWindow MdbKafkaClusterMaintenanceWindow
Maintenance policy of the Kafka cluster. The structure is documented below.
name string
The name of the topic.
networkId string
ID of the network, to which the Kafka cluster belongs.
securityGroupIds string[]
Security group ids, to which the Kafka cluster belongs.
status string
Status of the cluster. Can be either CREATING, STARTING, RUNNING, UPDATING, STOPPING, STOPPED, ERROR or STATUS_UNKNOWN. For more information see status field of JSON representation in the official documentation.
subnetIds string[]
IDs of the subnets, to which the Kafka cluster belongs.
topics MdbKafkaClusterTopic[]
To manage topics, please switch to using a separate resource type yandex.MdbKafkaTopic.

Deprecated: to manage topics, please switch to using a separate resource type yandex_mdb_kafka_topic

users MdbKafkaClusterUser[]
A user of the Kafka cluster. The structure is documented below.
config MdbKafkaClusterConfigArgs
Configuration of the Kafka cluster. The structure is documented below.
created_at str
Timestamp of cluster creation.
deletion_protection bool
Inhibits deletion of the cluster. Can be either true or false.
description str
Description of the Kafka cluster.
environment str
Deployment environment of the Kafka cluster. Can be either PRESTABLE or PRODUCTION. The default is PRODUCTION.
folder_id str
The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used.
health str
Health of the host.
host_group_ids Sequence[str]
A list of IDs of the host groups to place VMs of the cluster on.
hosts Sequence[MdbKafkaClusterHostArgs]
A host of the Kafka cluster. The structure is documented below.
labels Mapping[str, str]
A set of key/value label pairs to assign to the Kafka cluster.
maintenance_window MdbKafkaClusterMaintenanceWindowArgs
Maintenance policy of the Kafka cluster. The structure is documented below.
name str
The name of the topic.
network_id str
ID of the network, to which the Kafka cluster belongs.
security_group_ids Sequence[str]
Security group ids, to which the Kafka cluster belongs.
status str
Status of the cluster. Can be either CREATING, STARTING, RUNNING, UPDATING, STOPPING, STOPPED, ERROR or STATUS_UNKNOWN. For more information see status field of JSON representation in the official documentation.
subnet_ids Sequence[str]
IDs of the subnets, to which the Kafka cluster belongs.
topics Sequence[MdbKafkaClusterTopicArgs]
To manage topics, please switch to using a separate resource type yandex.MdbKafkaTopic.

Deprecated: to manage topics, please switch to using a separate resource type yandex_mdb_kafka_topic

users Sequence[MdbKafkaClusterUserArgs]
A user of the Kafka cluster. The structure is documented below.
config Property Map
Configuration of the Kafka cluster. The structure is documented below.
createdAt String
Timestamp of cluster creation.
deletionProtection Boolean
Inhibits deletion of the cluster. Can be either true or false.
description String
Description of the Kafka cluster.
environment String
Deployment environment of the Kafka cluster. Can be either PRESTABLE or PRODUCTION. The default is PRODUCTION.
folderId String
The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used.
health String
Health of the host.
hostGroupIds List<String>
A list of IDs of the host groups to place VMs of the cluster on.
hosts List<Property Map>
A host of the Kafka cluster. The structure is documented below.
labels Map<String>
A set of key/value label pairs to assign to the Kafka cluster.
maintenanceWindow Property Map
Maintenance policy of the Kafka cluster. The structure is documented below.
name String
The name of the topic.
networkId String
ID of the network, to which the Kafka cluster belongs.
securityGroupIds List<String>
Security group ids, to which the Kafka cluster belongs.
status String
Status of the cluster. Can be either CREATING, STARTING, RUNNING, UPDATING, STOPPING, STOPPED, ERROR or STATUS_UNKNOWN. For more information see status field of JSON representation in the official documentation.
subnetIds List<String>
IDs of the subnets, to which the Kafka cluster belongs.
topics List<Property Map>
To manage topics, please switch to using a separate resource type yandex.MdbKafkaTopic.

Deprecated: to manage topics, please switch to using a separate resource type yandex_mdb_kafka_topic

users List<Property Map>
A user of the Kafka cluster. The structure is documented below.

Supporting Types

MdbKafkaClusterConfig
, MdbKafkaClusterConfigArgs

Kafka This property is required. MdbKafkaClusterConfigKafka
Configuration of the Kafka subcluster. The structure is documented below.
Version This property is required. string
Version of the Kafka server software.
Zones This property is required. List<string>
List of availability zones.
AssignPublicIp bool
Determines whether each broker will be assigned a public IP address. The default is false.
BrokersCount int
Count of brokers per availability zone. The default is 1.
SchemaRegistry bool
Enables managed schema registry on cluster. The default is false.
UnmanagedTopics bool
Allows to use Kafka AdminAPI to manage topics. The default is false.
Zookeeper MdbKafkaClusterConfigZookeeper
Configuration of the ZooKeeper subcluster. The structure is documented below.
Kafka This property is required. MdbKafkaClusterConfigKafka
Configuration of the Kafka subcluster. The structure is documented below.
Version This property is required. string
Version of the Kafka server software.
Zones This property is required. []string
List of availability zones.
AssignPublicIp bool
Determines whether each broker will be assigned a public IP address. The default is false.
BrokersCount int
Count of brokers per availability zone. The default is 1.
SchemaRegistry bool
Enables managed schema registry on cluster. The default is false.
UnmanagedTopics bool
Allows to use Kafka AdminAPI to manage topics. The default is false.
Zookeeper MdbKafkaClusterConfigZookeeper
Configuration of the ZooKeeper subcluster. The structure is documented below.
kafka This property is required. MdbKafkaClusterConfigKafka
Configuration of the Kafka subcluster. The structure is documented below.
version This property is required. String
Version of the Kafka server software.
zones This property is required. List<String>
List of availability zones.
assignPublicIp Boolean
Determines whether each broker will be assigned a public IP address. The default is false.
brokersCount Integer
Count of brokers per availability zone. The default is 1.
schemaRegistry Boolean
Enables managed schema registry on cluster. The default is false.
unmanagedTopics Boolean
Allows to use Kafka AdminAPI to manage topics. The default is false.
zookeeper MdbKafkaClusterConfigZookeeper
Configuration of the ZooKeeper subcluster. The structure is documented below.
kafka This property is required. MdbKafkaClusterConfigKafka
Configuration of the Kafka subcluster. The structure is documented below.
version This property is required. string
Version of the Kafka server software.
zones This property is required. string[]
List of availability zones.
assignPublicIp boolean
Determines whether each broker will be assigned a public IP address. The default is false.
brokersCount number
Count of brokers per availability zone. The default is 1.
schemaRegistry boolean
Enables managed schema registry on cluster. The default is false.
unmanagedTopics boolean
Allows to use Kafka AdminAPI to manage topics. The default is false.
zookeeper MdbKafkaClusterConfigZookeeper
Configuration of the ZooKeeper subcluster. The structure is documented below.
kafka This property is required. MdbKafkaClusterConfigKafka
Configuration of the Kafka subcluster. The structure is documented below.
version This property is required. str
Version of the Kafka server software.
zones This property is required. Sequence[str]
List of availability zones.
assign_public_ip bool
Determines whether each broker will be assigned a public IP address. The default is false.
brokers_count int
Count of brokers per availability zone. The default is 1.
schema_registry bool
Enables managed schema registry on cluster. The default is false.
unmanaged_topics bool
Allows to use Kafka AdminAPI to manage topics. The default is false.
zookeeper MdbKafkaClusterConfigZookeeper
Configuration of the ZooKeeper subcluster. The structure is documented below.
kafka This property is required. Property Map
Configuration of the Kafka subcluster. The structure is documented below.
version This property is required. String
Version of the Kafka server software.
zones This property is required. List<String>
List of availability zones.
assignPublicIp Boolean
Determines whether each broker will be assigned a public IP address. The default is false.
brokersCount Number
Count of brokers per availability zone. The default is 1.
schemaRegistry Boolean
Enables managed schema registry on cluster. The default is false.
unmanagedTopics Boolean
Allows to use Kafka AdminAPI to manage topics. The default is false.
zookeeper Property Map
Configuration of the ZooKeeper subcluster. The structure is documented below.

MdbKafkaClusterConfigKafka
, MdbKafkaClusterConfigKafkaArgs

Resources This property is required. MdbKafkaClusterConfigKafkaResources
Resources allocated to hosts of the ZooKeeper subcluster. The structure is documented below.
KafkaConfig MdbKafkaClusterConfigKafkaKafkaConfig
User-defined settings for the Kafka cluster. The structure is documented below.
Resources This property is required. MdbKafkaClusterConfigKafkaResources
Resources allocated to hosts of the ZooKeeper subcluster. The structure is documented below.
KafkaConfig MdbKafkaClusterConfigKafkaKafkaConfig
User-defined settings for the Kafka cluster. The structure is documented below.
resources This property is required. MdbKafkaClusterConfigKafkaResources
Resources allocated to hosts of the ZooKeeper subcluster. The structure is documented below.
kafkaConfig MdbKafkaClusterConfigKafkaKafkaConfig
User-defined settings for the Kafka cluster. The structure is documented below.
resources This property is required. MdbKafkaClusterConfigKafkaResources
Resources allocated to hosts of the ZooKeeper subcluster. The structure is documented below.
kafkaConfig MdbKafkaClusterConfigKafkaKafkaConfig
User-defined settings for the Kafka cluster. The structure is documented below.
resources This property is required. MdbKafkaClusterConfigKafkaResources
Resources allocated to hosts of the ZooKeeper subcluster. The structure is documented below.
kafka_config MdbKafkaClusterConfigKafkaKafkaConfig
User-defined settings for the Kafka cluster. The structure is documented below.
resources This property is required. Property Map
Resources allocated to hosts of the ZooKeeper subcluster. The structure is documented below.
kafkaConfig Property Map
User-defined settings for the Kafka cluster. The structure is documented below.

MdbKafkaClusterConfigKafkaKafkaConfig
, MdbKafkaClusterConfigKafkaKafkaConfigArgs

MdbKafkaClusterConfigKafkaResources
, MdbKafkaClusterConfigKafkaResourcesArgs

DiskSize This property is required. int
Volume of the storage available to a ZooKeeper host, in gigabytes.
DiskTypeId This property is required. string
Type of the storage of ZooKeeper hosts. For more information see the official documentation.
ResourcePresetId This property is required. string
DiskSize This property is required. int
Volume of the storage available to a ZooKeeper host, in gigabytes.
DiskTypeId This property is required. string
Type of the storage of ZooKeeper hosts. For more information see the official documentation.
ResourcePresetId This property is required. string
diskSize This property is required. Integer
Volume of the storage available to a ZooKeeper host, in gigabytes.
diskTypeId This property is required. String
Type of the storage of ZooKeeper hosts. For more information see the official documentation.
resourcePresetId This property is required. String
diskSize This property is required. number
Volume of the storage available to a ZooKeeper host, in gigabytes.
diskTypeId This property is required. string
Type of the storage of ZooKeeper hosts. For more information see the official documentation.
resourcePresetId This property is required. string
disk_size This property is required. int
Volume of the storage available to a ZooKeeper host, in gigabytes.
disk_type_id This property is required. str
Type of the storage of ZooKeeper hosts. For more information see the official documentation.
resource_preset_id This property is required. str
diskSize This property is required. Number
Volume of the storage available to a ZooKeeper host, in gigabytes.
diskTypeId This property is required. String
Type of the storage of ZooKeeper hosts. For more information see the official documentation.
resourcePresetId This property is required. String

MdbKafkaClusterConfigZookeeper
, MdbKafkaClusterConfigZookeeperArgs

Resources MdbKafkaClusterConfigZookeeperResources
Resources allocated to hosts of the ZooKeeper subcluster. The structure is documented below.
Resources MdbKafkaClusterConfigZookeeperResources
Resources allocated to hosts of the ZooKeeper subcluster. The structure is documented below.
resources MdbKafkaClusterConfigZookeeperResources
Resources allocated to hosts of the ZooKeeper subcluster. The structure is documented below.
resources MdbKafkaClusterConfigZookeeperResources
Resources allocated to hosts of the ZooKeeper subcluster. The structure is documented below.
resources MdbKafkaClusterConfigZookeeperResources
Resources allocated to hosts of the ZooKeeper subcluster. The structure is documented below.
resources Property Map
Resources allocated to hosts of the ZooKeeper subcluster. The structure is documented below.

MdbKafkaClusterConfigZookeeperResources
, MdbKafkaClusterConfigZookeeperResourcesArgs

DiskSize int
Volume of the storage available to a ZooKeeper host, in gigabytes.
DiskTypeId string
Type of the storage of ZooKeeper hosts. For more information see the official documentation.
ResourcePresetId string
DiskSize int
Volume of the storage available to a ZooKeeper host, in gigabytes.
DiskTypeId string
Type of the storage of ZooKeeper hosts. For more information see the official documentation.
ResourcePresetId string
diskSize Integer
Volume of the storage available to a ZooKeeper host, in gigabytes.
diskTypeId String
Type of the storage of ZooKeeper hosts. For more information see the official documentation.
resourcePresetId String
diskSize number
Volume of the storage available to a ZooKeeper host, in gigabytes.
diskTypeId string
Type of the storage of ZooKeeper hosts. For more information see the official documentation.
resourcePresetId string
disk_size int
Volume of the storage available to a ZooKeeper host, in gigabytes.
disk_type_id str
Type of the storage of ZooKeeper hosts. For more information see the official documentation.
resource_preset_id str
diskSize Number
Volume of the storage available to a ZooKeeper host, in gigabytes.
diskTypeId String
Type of the storage of ZooKeeper hosts. For more information see the official documentation.
resourcePresetId String

MdbKafkaClusterHost
, MdbKafkaClusterHostArgs

AssignPublicIp bool
Determines whether each broker will be assigned a public IP address. The default is false.
Health string
Health of the host.
Name string
The name of the topic.
Role string
The role type to grant to the topic.
SubnetId string
The ID of the subnet, to which the host belongs.
ZoneId string
The availability zone where the Kafka host was created.
AssignPublicIp bool
Determines whether each broker will be assigned a public IP address. The default is false.
Health string
Health of the host.
Name string
The name of the topic.
Role string
The role type to grant to the topic.
SubnetId string
The ID of the subnet, to which the host belongs.
ZoneId string
The availability zone where the Kafka host was created.
assignPublicIp Boolean
Determines whether each broker will be assigned a public IP address. The default is false.
health String
Health of the host.
name String
The name of the topic.
role String
The role type to grant to the topic.
subnetId String
The ID of the subnet, to which the host belongs.
zoneId String
The availability zone where the Kafka host was created.
assignPublicIp boolean
Determines whether each broker will be assigned a public IP address. The default is false.
health string
Health of the host.
name string
The name of the topic.
role string
The role type to grant to the topic.
subnetId string
The ID of the subnet, to which the host belongs.
zoneId string
The availability zone where the Kafka host was created.
assign_public_ip bool
Determines whether each broker will be assigned a public IP address. The default is false.
health str
Health of the host.
name str
The name of the topic.
role str
The role type to grant to the topic.
subnet_id str
The ID of the subnet, to which the host belongs.
zone_id str
The availability zone where the Kafka host was created.
assignPublicIp Boolean
Determines whether each broker will be assigned a public IP address. The default is false.
health String
Health of the host.
name String
The name of the topic.
role String
The role type to grant to the topic.
subnetId String
The ID of the subnet, to which the host belongs.
zoneId String
The availability zone where the Kafka host was created.

MdbKafkaClusterMaintenanceWindow
, MdbKafkaClusterMaintenanceWindowArgs

Type This property is required. string
Type of maintenance window. Can be either ANYTIME or WEEKLY. A day and hour of window need to be specified with weekly window.
Day string
Day of the week (in DDD format). Allowed values: "MON", "TUE", "WED", "THU", "FRI", "SAT", "SUN"
Hour int
Hour of the day in UTC (in HH format). Allowed value is between 1 and 24.
Type This property is required. string
Type of maintenance window. Can be either ANYTIME or WEEKLY. A day and hour of window need to be specified with weekly window.
Day string
Day of the week (in DDD format). Allowed values: "MON", "TUE", "WED", "THU", "FRI", "SAT", "SUN"
Hour int
Hour of the day in UTC (in HH format). Allowed value is between 1 and 24.
type This property is required. String
Type of maintenance window. Can be either ANYTIME or WEEKLY. A day and hour of window need to be specified with weekly window.
day String
Day of the week (in DDD format). Allowed values: "MON", "TUE", "WED", "THU", "FRI", "SAT", "SUN"
hour Integer
Hour of the day in UTC (in HH format). Allowed value is between 1 and 24.
type This property is required. string
Type of maintenance window. Can be either ANYTIME or WEEKLY. A day and hour of window need to be specified with weekly window.
day string
Day of the week (in DDD format). Allowed values: "MON", "TUE", "WED", "THU", "FRI", "SAT", "SUN"
hour number
Hour of the day in UTC (in HH format). Allowed value is between 1 and 24.
type This property is required. str
Type of maintenance window. Can be either ANYTIME or WEEKLY. A day and hour of window need to be specified with weekly window.
day str
Day of the week (in DDD format). Allowed values: "MON", "TUE", "WED", "THU", "FRI", "SAT", "SUN"
hour int
Hour of the day in UTC (in HH format). Allowed value is between 1 and 24.
type This property is required. String
Type of maintenance window. Can be either ANYTIME or WEEKLY. A day and hour of window need to be specified with weekly window.
day String
Day of the week (in DDD format). Allowed values: "MON", "TUE", "WED", "THU", "FRI", "SAT", "SUN"
hour Number
Hour of the day in UTC (in HH format). Allowed value is between 1 and 24.

MdbKafkaClusterTopic
, MdbKafkaClusterTopicArgs

Name This property is required. string
The name of the topic.
Partitions This property is required. int
The number of the topic's partitions.
ReplicationFactor This property is required. int
Amount of data copies (replicas) for the topic in the cluster.
TopicConfig MdbKafkaClusterTopicTopicConfig
User-defined settings for the topic. The structure is documented below.
Name This property is required. string
The name of the topic.
Partitions This property is required. int
The number of the topic's partitions.
ReplicationFactor This property is required. int
Amount of data copies (replicas) for the topic in the cluster.
TopicConfig MdbKafkaClusterTopicTopicConfig
User-defined settings for the topic. The structure is documented below.
name This property is required. String
The name of the topic.
partitions This property is required. Integer
The number of the topic's partitions.
replicationFactor This property is required. Integer
Amount of data copies (replicas) for the topic in the cluster.
topicConfig MdbKafkaClusterTopicTopicConfig
User-defined settings for the topic. The structure is documented below.
name This property is required. string
The name of the topic.
partitions This property is required. number
The number of the topic's partitions.
replicationFactor This property is required. number
Amount of data copies (replicas) for the topic in the cluster.
topicConfig MdbKafkaClusterTopicTopicConfig
User-defined settings for the topic. The structure is documented below.
name This property is required. str
The name of the topic.
partitions This property is required. int
The number of the topic's partitions.
replication_factor This property is required. int
Amount of data copies (replicas) for the topic in the cluster.
topic_config MdbKafkaClusterTopicTopicConfig
User-defined settings for the topic. The structure is documented below.
name This property is required. String
The name of the topic.
partitions This property is required. Number
The number of the topic's partitions.
replicationFactor This property is required. Number
Amount of data copies (replicas) for the topic in the cluster.
topicConfig Property Map
User-defined settings for the topic. The structure is documented below.

MdbKafkaClusterTopicTopicConfig
, MdbKafkaClusterTopicTopicConfigArgs

MdbKafkaClusterUser
, MdbKafkaClusterUserArgs

Name This property is required. string
The name of the topic.
Password This property is required. string
The password of the user.
Permissions List<MdbKafkaClusterUserPermission>
Set of permissions granted to the user. The structure is documented below.
Name This property is required. string
The name of the topic.
Password This property is required. string
The password of the user.
Permissions []MdbKafkaClusterUserPermission
Set of permissions granted to the user. The structure is documented below.
name This property is required. String
The name of the topic.
password This property is required. String
The password of the user.
permissions List<MdbKafkaClusterUserPermission>
Set of permissions granted to the user. The structure is documented below.
name This property is required. string
The name of the topic.
password This property is required. string
The password of the user.
permissions MdbKafkaClusterUserPermission[]
Set of permissions granted to the user. The structure is documented below.
name This property is required. str
The name of the topic.
password This property is required. str
The password of the user.
permissions Sequence[MdbKafkaClusterUserPermission]
Set of permissions granted to the user. The structure is documented below.
name This property is required. String
The name of the topic.
password This property is required. String
The password of the user.
permissions List<Property Map>
Set of permissions granted to the user. The structure is documented below.

MdbKafkaClusterUserPermission
, MdbKafkaClusterUserPermissionArgs

Role This property is required. string
The role type to grant to the topic.
TopicName This property is required. string
The name of the topic that the permission grants access to.
Role This property is required. string
The role type to grant to the topic.
TopicName This property is required. string
The name of the topic that the permission grants access to.
role This property is required. String
The role type to grant to the topic.
topicName This property is required. String
The name of the topic that the permission grants access to.
role This property is required. string
The role type to grant to the topic.
topicName This property is required. string
The name of the topic that the permission grants access to.
role This property is required. str
The role type to grant to the topic.
topic_name This property is required. str
The name of the topic that the permission grants access to.
role This property is required. String
The role type to grant to the topic.
topicName This property is required. String
The name of the topic that the permission grants access to.

Import

A cluster can be imported using the id of the resource, e.g.

 $ pulumi import yandex:index/mdbKafkaCluster:MdbKafkaCluster foo cluster_id
Copy

To learn more about importing existing cloud resources, see Importing resources.

Package Details

Repository
Yandex pulumi/pulumi-yandex
License
Apache-2.0
Notes
This Pulumi package is based on the yandex Terraform Provider.