$ terraform --version
Terraform v0.11.14
+ provider.azurerm v1.30.1
+ provider.template v2.1.2
azurerm_virtual_machine_scale_setresource "azurerm_resource_group" "test" {
  name     = "alex-test"
  location = "eastus2"
}
resource "azurerm_virtual_network" "test" {
  name                = "acctvn"
  address_space       = ["10.0.0.0/16"]
  location            = "${azurerm_resource_group.test.location}"
  resource_group_name = "${azurerm_resource_group.test.name}"
}
resource "azurerm_subnet" "test" {
  name                 = "acctsub"
  resource_group_name  = "${azurerm_resource_group.test.name}"
  virtual_network_name = "${azurerm_virtual_network.test.name}"
  address_prefix       = "10.0.2.0/24"
}
resource "azurerm_public_ip" "test" {
  name                = "test"
  location            = "${azurerm_resource_group.test.location}"
  resource_group_name = "${azurerm_resource_group.test.name}"
  allocation_method   = "Static"
  domain_name_label   = "${azurerm_resource_group.test.name}"
  tags = {
    environment = "staging"
  }
}
resource "azurerm_lb" "test" {
  name                = "test"
  location            = "${azurerm_resource_group.test.location}"
  resource_group_name = "${azurerm_resource_group.test.name}"
  frontend_ip_configuration {
    name                 = "PublicIPAddress"
    public_ip_address_id = "${azurerm_public_ip.test.id}"
  }
}
resource "azurerm_lb_backend_address_pool" "bpepool" {
  resource_group_name = "${azurerm_resource_group.test.name}"
  loadbalancer_id     = "${azurerm_lb.test.id}"
  name                = "BackEndAddressPool"
}
resource "azurerm_lb_nat_pool" "lbnatpool" {
  resource_group_name            = "${azurerm_resource_group.test.name}"
  name                           = "ssh"
  loadbalancer_id                = "${azurerm_lb.test.id}"
  protocol                       = "Tcp"
  frontend_port_start            = 50000
  frontend_port_end              = 50119
  backend_port                   = 22
  frontend_ip_configuration_name = "PublicIPAddress"
}
resource "azurerm_lb_probe" "test" {
  resource_group_name = "${azurerm_resource_group.test.name}"
  loadbalancer_id     = "${azurerm_lb.test.id}"
  name                = "http-probe"
  protocol            = "Http"
  request_path        = "/health"
  port                = 8080
}
resource "azurerm_virtual_machine_scale_set" "test" {
  name                = "mytestscaleset-1"
  location            = "${azurerm_resource_group.test.location}"
  resource_group_name = "${azurerm_resource_group.test.name}"
  # automatic rolling upgrade
  automatic_os_upgrade = false
  upgrade_policy_mode  = "Manual"
  sku {
    name     = "Standard_F2"
    tier     = "Standard"
    capacity = 2
  }
  storage_profile_image_reference {
    publisher = "Canonical"
    offer     = "UbuntuServer"
    sku       = "16.04-LTS"
    version   = "latest"
  }
  storage_profile_os_disk {
    name              = ""
    caching           = "ReadWrite"
    create_option     = "FromImage"
    managed_disk_type = "Standard_LRS"
  }
  storage_profile_data_disk {
    lun           = 0
    caching       = "ReadWrite"
    create_option = "Empty"
    disk_size_gb  = 10
  }
  os_profile {
    computer_name_prefix = "testvm"
    admin_username       = "myadmin"
  }
  os_profile_linux_config {
    disable_password_authentication = true
    ssh_keys {
      path     = "/home/myadmin/.ssh/authorized_keys"
      key_data = "${file("~/.ssh/id_rsa_terraform.pub")}"
    }
  }
  network_profile {
    name    = "terraformnetworkprofile"
    primary = true
    ip_configuration {
      name                                   = "TestIPConfiguration"
      primary                                = true
      subnet_id                              = "${azurerm_subnet.test.id}"
      load_balancer_backend_address_pool_ids = ["${azurerm_lb_backend_address_pool.bpepool.id}"]
      load_balancer_inbound_nat_rules_ids    = ["${element(azurerm_lb_nat_pool.lbnatpool.*.id, count.index)}"]
    }
  }
  tags = {
    environment = "staging"
  }
}
Here is a diff of an acceptance test that can illustrate the issue in a repeatable manner.
diff --git a/azurerm/resource_arm_virtual_machine_scale_set_test.go b/azurerm/resource_arm_virtual_machine_scale_set_test.go
index 07a3f311..e08abe9c 100644
--- a/azurerm/resource_arm_virtual_machine_scale_set_test.go
+++ b/azurerm/resource_arm_virtual_machine_scale_set_test.go
@@ -174,6 +174,35 @@ func TestUnitAzureRMVirtualMachineScaleSet_basicPublicIP_simpleUpdate(t *testing
    })
 }
+func TestUnitAzureRMVirtualMachineScaleSet_updateNetworkProfile(t *testing.T) {
+   resourceName := "azurerm_virtual_machine_scale_set.test"
+   ri := tf.AccRandTimeInt()
+   location := testLocation()
+   config := testAccAzureRMVirtualMachineScaleSet_basicEmptyPublicIP(ri, location)
+   updatedConfig := testAccAzureRMVirtualMachineScaleSet_basicEmptyNetworkProfile_true_ipforwarding(ri, location)
+
+   resource.ParallelTest(t, resource.TestCase{
+       PreCheck:     func() { testAccPreCheck(t) },
+       Providers:    testAccProviders,
+       CheckDestroy: testCheckAzureRMVirtualMachineScaleSetDestroy,
+       Steps: []resource.TestStep{
+           {
+               Config: config,
+               Check: resource.ComposeTestCheckFunc(
+                   testCheckAzureRMVirtualMachineScaleSetExists(resourceName),
+                   testCheckAzureRMVirtualMachineScaleSetIPForwarding(resourceName, false),
+               ),
+           },
+           {
+               Config: updatedConfig,
+               Check: resource.ComposeTestCheckFunc(
+                   testCheckAzureRMVirtualMachineScaleSetIPForwarding(resourceName, true),
+               ),
+           },
+       },
+   })
+}
+
 func TestAccAzureRMVirtualMachineScaleSet_basicApplicationSecurity(t *testing.T) {
    resourceName := "azurerm_virtual_machine_scale_set.test"
    ri := tf.AccRandTimeInt()
@@ -1266,6 +1295,28 @@ func testCheckAzureRMVirtualMachineScaleSetAcceleratedNetworking(name string, bo
    }
 }
+func testCheckAzureRMVirtualMachineScaleSetIPForwarding(name string, boolean bool) resource.TestCheckFunc {
+   return func(s *terraform.State) error {
+       resp, err := testGetAzureRMVirtualMachineScaleSet(s, name)
+       if err != nil {
+           return err
+       }
+
+       n := resp.VirtualMachineProfile.NetworkProfile.NetworkInterfaceConfigurations
+       if n == nil || len(*n) == 0 {
+           return fmt.Errorf("Bad: Could not get network interface configurations for scale set %v", name)
+       }
+
+       ipForwarding := *(*n)[0].EnableIPForwarding
+       if ipForwarding != boolean {
+           return fmt.Errorf("Bad: Primary set incorrectly for scale set %v\n Wanted: %+v Received: %+v", name, boolean, ipForwarding)
+       }
+
+       return nil
+   }
+}
+
+
 func testCheckAzureRMVirtualMachineScaleSetOverprovision(name string) resource.TestCheckFunc {
    return func(s *terraform.State) error {
        resp, err := testGetAzureRMVirtualMachineScaleSet(s, name)
@@ -1943,6 +1994,103 @@ resource "azurerm_virtual_machine_scale_set" "test" {
 `, rInt, location)
 }
+func testAccAzureRMVirtualMachineScaleSet_basicEmptyNetworkProfile_true_ipforwarding(rInt int, location string) string {
+   return fmt.Sprintf(`
+resource "azurerm_resource_group" "test" {
+  name     = "acctestRG-%[1]d"
+  location = "%[2]s"
+}
+
+resource "azurerm_virtual_network" "test" {
+  name                = "acctvn-%[1]d"
+  address_space       = ["10.0.0.0/16"]
+  location            = "${azurerm_resource_group.test.location}"
+  resource_group_name = "${azurerm_resource_group.test.name}"
+}
+
+resource "azurerm_subnet" "test" {
+  name                 = "acctsub-%[1]d"
+  resource_group_name  = "${azurerm_resource_group.test.name}"
+  virtual_network_name = "${azurerm_virtual_network.test.name}"
+  address_prefix       = "10.0.2.0/24"
+}
+
+resource "azurerm_storage_account" "test" {
+  name                     = "accsa%[1]d"
+  resource_group_name      = "${azurerm_resource_group.test.name}"
+  location                 = "${azurerm_resource_group.test.location}"
+  account_tier             = "Standard"
+  account_replication_type = "LRS"
+
+  tags = {
+    environment = "staging"
+  }
+}
+
+resource "azurerm_storage_container" "test" {
+  name                  = "vhds"
+  resource_group_name   = "${azurerm_resource_group.test.name}"
+  storage_account_name  = "${azurerm_storage_account.test.name}"
+  container_access_type = "private"
+}
+
+resource "azurerm_virtual_machine_scale_set" "test" {
+  name                = "acctvmss-%[1]d"
+  location            = "${azurerm_resource_group.test.location}"
+  resource_group_name = "${azurerm_resource_group.test.name}"
+  upgrade_policy_mode = "Manual"
+  
+  tags = {
+   state  = "update"
+  }
+  
+  sku {
+    name     = "Standard_D1_v2"
+    tier     = "Standard"
+    capacity = 0
+  }
+
+  os_profile {
+    computer_name_prefix = "testvm-%[1]d"
+    admin_username       = "myadmin"
+    admin_password       = "Passwword1234"
+  }
+
+  network_profile {
+    name    = "TestNetworkProfile-%[1]d"
+    primary = true
+   ip_forwarding = true
+
+    ip_configuration {
+      name      = "TestIPConfiguration"
+      subnet_id = "${azurerm_subnet.test.id}"
+      primary   = true
+
+      public_ip_address_configuration {
+        name              = "TestPublicIPConfiguration"
+        domain_name_label = "test-domain-label-%[1]d"
+        idle_timeout      = 4
+      }
+    }
+  }
+
+  storage_profile_os_disk {
+    name           = "osDiskProfile"
+    caching        = "ReadWrite"
+    create_option  = "FromImage"
+    vhd_containers = ["${azurerm_storage_account.test.primary_blob_endpoint}${azurerm_storage_container.test.name}"]
+  }
+
+  storage_profile_image_reference {
+    publisher = "Canonical"
+    offer     = "UbuntuServer"
+    sku       = "16.04-LTS"
+    version   = "latest"
+  }
+}
+`, rInt, location)
+}
+
 func testAccAzureRMVirtualMachineScaleSet_basicApplicationSecurity(rInt int, location string) string {
    return fmt.Sprintf(`
 resource "azurerm_resource_group" "test" {
When I create a scale set and there is a need to alter anything under the network_profile I cannot, every time I run terraform plan it's shown like there are no changes.
For example, if I create a new scale-set and I want to update the ip_forwarding I am obligated to create a new scale-set with that setting instead of updating the scale-set definition and then update the existing provisioned VM's.
This might be related to the way the provider is doing the flattenAzureRmVirtualMachineScaleSetNetworkProfile. 
This same applies to other changes under the network_profile, here I illustrate one use case.
When I updated the definition of the scale-set I should be able to update its setting and then later apply that new definition to the provisioned instanced (if any). 
create a simple scale-set as (seen with the above tf code), no instance are required
terraform apply
Update a setting under the network_profile for example change the ip_forwarding 
terraform apply
I am experiencing this issue as well.
Was talking with @kwilczynski about and I might have a fix for it.
I will open a PR so we can discuss.
I have created another PR #3821 to try and address the issue, as it turned out to be not the flattenAzureRmVirtualMachineScaleSetNetworkProfile but rather the custom hashcode method, at least to my understanding.
For the VMSS I found another case of the hashcode, I have created a new tracking issue https://github.com/terraform-providers/terraform-provider-azurerm/issues/3836
I already have a solution since I addressed this one. So I will try to PR that as well.
This has been released in version 1.32.1 of the provider. Please see the Terraform documentation on provider versioning or reach out if you need any assistance upgrading. As an example:
provider "azurerm" {
    version = "~> 1.32.1"
}
# ... other configuration ...
                    I'm going to lock this issue because it has been closed for _30 days_ ⏳. This helps our maintainers find and focus on the active issues.
If you feel this issue should be reopened, we encourage creating a new issue linking back to this one for added context. If you feel I made an error 🤖 🙉 , please reach out to my human friends 👉 [email protected]. Thanks!