hydro_deploy/
azure.rs

1use std::any::Any;
2use std::collections::HashMap;
3use std::fmt::Debug;
4use std::sync::{Arc, Mutex, OnceLock};
5
6use anyhow::Result;
7use async_trait::async_trait;
8use nanoid::nanoid;
9use serde_json::json;
10
11use super::terraform::{TERRAFORM_ALPHABET, TerraformOutput, TerraformProvider};
12use super::{ClientStrategy, Host, HostTargetType, LaunchedHost, ResourceBatch, ResourceResult};
13use crate::ssh::LaunchedSshHost;
14use crate::{BaseServerStrategy, HostStrategyGetter, PortNetworkHint};
15
16pub struct LaunchedVirtualMachine {
17    resource_result: Arc<ResourceResult>,
18    user: String,
19    pub internal_ip: String,
20    pub external_ip: Option<String>,
21}
22
23impl LaunchedSshHost for LaunchedVirtualMachine {
24    fn get_external_ip(&self) -> Option<String> {
25        self.external_ip.clone()
26    }
27
28    fn get_internal_ip(&self) -> String {
29        self.internal_ip.clone()
30    }
31
32    fn get_cloud_provider(&self) -> String {
33        "Azure".to_string()
34    }
35
36    fn resource_result(&self) -> &Arc<ResourceResult> {
37        &self.resource_result
38    }
39
40    fn ssh_user(&self) -> &str {
41        self.user.as_str()
42    }
43}
44
45pub struct AzureHost {
46    /// ID from [`crate::Deployment::add_host`].
47    id: usize,
48
49    project: String,
50    os_type: String, // linux or windows
51    machine_size: String,
52    image: Option<HashMap<String, String>>,
53    target_type: HostTargetType,
54    region: String,
55    user: Option<String>,
56    pub launched: OnceLock<Arc<LaunchedVirtualMachine>>, // TODO(mingwei): fix pub
57    external_ports: Mutex<Vec<u16>>,
58}
59
60impl AzureHost {
61    #[expect(clippy::too_many_arguments, reason = "used via builder pattern")]
62    pub fn new(
63        id: usize,
64        project: String,
65        os_type: String, // linux or windows
66        machine_size: String,
67        image: Option<HashMap<String, String>>,
68        target_type: HostTargetType,
69        region: String,
70        user: Option<String>,
71    ) -> Self {
72        Self {
73            id,
74            project,
75            os_type,
76            machine_size,
77            image,
78            target_type,
79            region,
80            user,
81            launched: OnceLock::new(),
82            external_ports: Mutex::new(Vec::new()),
83        }
84    }
85}
86
87impl Debug for AzureHost {
88    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
89        f.write_fmt(format_args!("AzureHost({})", self.id))
90    }
91}
92
93#[async_trait]
94impl Host for AzureHost {
95    fn target_type(&self) -> HostTargetType {
96        self.target_type
97    }
98
99    fn request_port_base(&self, bind_type: &BaseServerStrategy) {
100        match bind_type {
101            BaseServerStrategy::UnixSocket => {}
102            BaseServerStrategy::InternalTcpPort(_) => {}
103            BaseServerStrategy::ExternalTcpPort(port) => {
104                let mut external_ports = self.external_ports.lock().unwrap();
105                if !external_ports.contains(port) {
106                    if self.launched.get().is_some() {
107                        todo!("Cannot adjust firewall after host has been launched");
108                    }
109                    external_ports.push(*port);
110                }
111            }
112        }
113    }
114
115    fn request_custom_binary(&self) {
116        self.request_port_base(&BaseServerStrategy::ExternalTcpPort(22));
117    }
118
119    fn id(&self) -> usize {
120        self.id
121    }
122
123    fn collect_resources(&self, resource_batch: &mut ResourceBatch) {
124        if self.launched.get().is_some() {
125            return;
126        }
127
128        let project = self.project.as_str();
129
130        // first, we import the providers we need
131        resource_batch
132            .terraform
133            .terraform
134            .required_providers
135            .insert(
136                "azurerm".to_string(),
137                TerraformProvider {
138                    source: "hashicorp/azurerm".to_string(),
139                    version: "3.67.0".to_string(),
140                },
141            );
142
143        resource_batch
144            .terraform
145            .terraform
146            .required_providers
147            .insert(
148                "local".to_string(),
149                TerraformProvider {
150                    source: "hashicorp/local".to_string(),
151                    version: "2.3.0".to_string(),
152                },
153            );
154
155        resource_batch
156            .terraform
157            .terraform
158            .required_providers
159            .insert(
160                "tls".to_string(),
161                TerraformProvider {
162                    source: "hashicorp/tls".to_string(),
163                    version: "4.0.4".to_string(),
164                },
165            );
166
167        // we use a single SSH key for all VMs
168        resource_batch
169            .terraform
170            .resource
171            .entry("tls_private_key".to_string())
172            .or_default()
173            .insert(
174                "vm_instance_ssh_key".to_string(),
175                json!({
176                    "algorithm": "RSA",
177                    "rsa_bits": 4096
178                }),
179            );
180
181        resource_batch
182            .terraform
183            .resource
184            .entry("local_file".to_string())
185            .or_default()
186            .insert(
187                "vm_instance_ssh_key_pem".to_string(),
188                json!({
189                    "content": "${tls_private_key.vm_instance_ssh_key.private_key_pem}",
190                    "filename": ".ssh/vm_instance_ssh_key_pem",
191                    "file_permission": "0600"
192                }),
193            );
194
195        let vm_key = format!("vm-instance-{}", self.id);
196        let vm_name = format!("hydro-vm-instance-{}", nanoid!(8, &TERRAFORM_ALPHABET));
197
198        // Handle provider configuration
199        resource_batch.terraform.provider.insert(
200            "azurerm".to_string(),
201            json!({
202                "skip_provider_registration": "true",
203                "features": {},
204            }),
205        );
206
207        // Handle resources
208        resource_batch
209            .terraform
210            .resource
211            .entry("azurerm_resource_group".to_string())
212            .or_default()
213            .insert(
214                vm_key.to_string(),
215                json!({
216                    "name": project,
217                    "location": self.region.clone(),
218                }),
219            );
220
221        resource_batch
222            .terraform
223            .resource
224            .entry("azurerm_virtual_network".to_string())
225            .or_default()
226            .insert(
227                vm_key.to_string(),
228                json!({
229                    "name": format!("{vm_key}-network"),
230                    "address_space": ["10.0.0.0/16"],
231                    "location": self.region.clone(),
232                    "resource_group_name": format!("${{azurerm_resource_group.{vm_key}.name}}")
233                }),
234            );
235
236        resource_batch
237            .terraform
238            .resource
239            .entry("azurerm_subnet".to_string())
240            .or_default()
241            .insert(
242                vm_key.to_string(),
243                json!({
244                    "name": "internal",
245                    "resource_group_name": format!("${{azurerm_resource_group.{vm_key}.name}}"),
246                    "virtual_network_name": format!("${{azurerm_virtual_network.{vm_key}.name}}"),
247                    "address_prefixes": ["10.0.2.0/24"]
248                }),
249            );
250
251        resource_batch
252            .terraform
253            .resource
254            .entry("azurerm_public_ip".to_string())
255            .or_default()
256            .insert(
257                vm_key.to_string(),
258                json!({
259                    "name": "hydropubip",
260                    "resource_group_name": format!("${{azurerm_resource_group.{vm_key}.name}}"),
261                    "location": format!("${{azurerm_resource_group.{vm_key}.location}}"),
262                    "allocation_method": "Static",
263                }),
264            );
265
266        resource_batch
267            .terraform
268            .resource
269            .entry("azurerm_network_interface".to_string())
270            .or_default()
271            .insert(
272                vm_key.to_string(),
273                json!({
274                    "name": format!("{vm_key}-nic"),
275                    "location": format!("${{azurerm_resource_group.{vm_key}.location}}"),
276                    "resource_group_name": format!("${{azurerm_resource_group.{vm_key}.name}}"),
277                    "ip_configuration": {
278                        "name": "internal",
279                        "subnet_id": format!("${{azurerm_subnet.{vm_key}.id}}"),
280                        "private_ip_address_allocation": "Dynamic",
281                        "public_ip_address_id": format!("${{azurerm_public_ip.{vm_key}.id}}"),
282                    }
283                }),
284            );
285
286        // Define network security rules - for now, accept all connections
287        resource_batch
288            .terraform
289            .resource
290            .entry("azurerm_network_security_group".to_string())
291            .or_default()
292            .insert(
293                vm_key.to_string(),
294                json!({
295                    "name": "primary_security_group",
296                    "location": format!("${{azurerm_resource_group.{vm_key}.location}}"),
297                    "resource_group_name": format!("${{azurerm_resource_group.{vm_key}.name}}"),
298                }),
299            );
300
301        resource_batch
302            .terraform
303            .resource
304            .entry("azurerm_network_security_rule".to_string())
305            .or_default()
306            .insert(
307                vm_key.to_string(),
308                json!({
309                    "name": "allowall",
310                    "priority": 100,
311                    "direction": "Inbound",
312                    "access": "Allow",
313                    "protocol": "Tcp",
314                    "source_port_range": "*",
315                    "destination_port_range": "*",
316                    "source_address_prefix": "*",
317                    "destination_address_prefix": "*",
318                    "resource_group_name": format!("${{azurerm_resource_group.{vm_key}.name}}"),
319                    "network_security_group_name": format!("${{azurerm_network_security_group.{vm_key}.name}}"),
320                })
321            );
322
323        resource_batch
324            .terraform
325            .resource
326            .entry("azurerm_subnet_network_security_group_association".to_string())
327            .or_default()
328            .insert(
329                vm_key.to_string(),
330                json!({
331                    "subnet_id": format!("${{azurerm_subnet.{vm_key}.id}}"),
332                    "network_security_group_id": format!("${{azurerm_network_security_group.{vm_key}.id}}"),
333                })
334            );
335
336        let user = self.user.as_ref().cloned().unwrap_or("hydro".to_string());
337        let os_type = format!("azurerm_{}_virtual_machine", self.os_type.clone());
338        let image = self.image.as_ref().cloned().unwrap_or(HashMap::from([
339            ("publisher".to_string(), "Canonical".to_string()),
340            (
341                "offer".to_string(),
342                "0001-com-ubuntu-server-jammy".to_string(),
343            ),
344            ("sku".to_string(), "22_04-lts".to_string()),
345            ("version".to_string(), "latest".to_string()),
346        ]));
347
348        resource_batch
349            .terraform
350            .resource
351            .entry(os_type.clone())
352            .or_default()
353            .insert(
354                vm_key.clone(),
355                json!({
356                    "name": vm_name,
357                    "resource_group_name": format!("${{azurerm_resource_group.{vm_key}.name}}"),
358                    "location": format!("${{azurerm_resource_group.{vm_key}.location}}"),
359                    "size": self.machine_size.clone(),
360                    "network_interface_ids": [format!("${{azurerm_network_interface.{vm_key}.id}}")],
361                    "admin_ssh_key": {
362                        "username": user,
363                        "public_key": "${tls_private_key.vm_instance_ssh_key.public_key_openssh}",
364                    },
365                    "admin_username": user,
366                    "os_disk": {
367                        "caching": "ReadWrite",
368                        "storage_account_type": "Standard_LRS",
369                    },
370                    "source_image_reference": image,
371                }),
372            );
373
374        resource_batch.terraform.output.insert(
375            format!("{vm_key}-public-ip"),
376            TerraformOutput {
377                value: format!("${{azurerm_public_ip.{vm_key}.ip_address}}"),
378            },
379        );
380
381        resource_batch.terraform.output.insert(
382            format!("{vm_key}-internal-ip"),
383            TerraformOutput {
384                value: format!("${{azurerm_network_interface.{vm_key}.private_ip_address}}"),
385            },
386        );
387    }
388
389    fn launched(&self) -> Option<Arc<dyn LaunchedHost>> {
390        self.launched
391            .get()
392            .map(|a| a.clone() as Arc<dyn LaunchedHost>)
393    }
394
395    fn provision(&self, resource_result: &Arc<ResourceResult>) -> Arc<dyn LaunchedHost> {
396        self.launched
397            .get_or_init(|| {
398                let id = self.id;
399
400                let internal_ip = resource_result
401                    .terraform
402                    .outputs
403                    .get(&format!("vm-instance-{id}-internal-ip"))
404                    .unwrap()
405                    .value
406                    .clone();
407
408                let external_ip = resource_result
409                    .terraform
410                    .outputs
411                    .get(&format!("vm-instance-{id}-public-ip"))
412                    .map(|v| v.value.clone());
413
414                Arc::new(LaunchedVirtualMachine {
415                    resource_result: resource_result.clone(),
416                    user: self.user.as_ref().cloned().unwrap_or("hydro".to_string()),
417                    internal_ip,
418                    external_ip,
419                })
420            })
421            .clone()
422    }
423
424    fn strategy_as_server<'a>(
425        &'a self,
426        client_host: &dyn Host,
427        network_hint: PortNetworkHint,
428    ) -> Result<(ClientStrategy<'a>, HostStrategyGetter)> {
429        if matches!(network_hint, PortNetworkHint::Auto)
430            && client_host.can_connect_to(ClientStrategy::UnixSocket(self.id))
431        {
432            Ok((
433                ClientStrategy::UnixSocket(self.id),
434                Box::new(|_| BaseServerStrategy::UnixSocket),
435            ))
436        } else if matches!(
437            network_hint,
438            PortNetworkHint::Auto | PortNetworkHint::TcpPort(_)
439        ) && client_host.can_connect_to(ClientStrategy::InternalTcpPort(self))
440        {
441            Ok((
442                ClientStrategy::InternalTcpPort(self),
443                Box::new(move |_| {
444                    BaseServerStrategy::InternalTcpPort(match network_hint {
445                        PortNetworkHint::Auto => None,
446                        PortNetworkHint::TcpPort(port) => port,
447                    })
448                }),
449            ))
450        } else if matches!(network_hint, PortNetworkHint::Auto)
451            && client_host.can_connect_to(ClientStrategy::ForwardedTcpPort(self))
452        {
453            Ok((
454                ClientStrategy::ForwardedTcpPort(self),
455                Box::new(|me| {
456                    me.downcast_ref::<AzureHost>()
457                        .unwrap()
458                        .request_port_base(&BaseServerStrategy::ExternalTcpPort(22)); // needed to forward
459                    BaseServerStrategy::InternalTcpPort(None)
460                }),
461            ))
462        } else {
463            anyhow::bail!("Could not find a strategy to connect to Azure instance")
464        }
465    }
466
467    fn can_connect_to(&self, typ: ClientStrategy) -> bool {
468        match typ {
469            ClientStrategy::UnixSocket(id) => {
470                #[cfg(unix)]
471                {
472                    self.id == id
473                }
474
475                #[cfg(not(unix))]
476                {
477                    let _ = id;
478                    false
479                }
480            }
481            ClientStrategy::InternalTcpPort(target_host) => {
482                if let Some(provider_target) = <dyn Any>::downcast_ref::<AzureHost>(target_host) {
483                    self.project == provider_target.project
484                } else {
485                    false
486                }
487            }
488            ClientStrategy::ForwardedTcpPort(_) => false,
489        }
490    }
491}